aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/ABI/stable/sysfs-driver-usb-usbtmc2
-rw-r--r--Documentation/ABI/testing/debugfs-olpc16
-rw-r--r--Documentation/ABI/testing/sysfs-bus-event_source-devices-format14
-rw-r--r--Documentation/ABI/testing/sysfs-firmware-acpi20
-rw-r--r--Documentation/CodingStyle29
-rw-r--r--Documentation/acpi/apei/einj.txt8
-rw-r--r--Documentation/aoe/aoe.txt2
-rw-r--r--Documentation/aoe/autoload.sh4
-rw-r--r--Documentation/blockdev/floppy.txt2
-rw-r--r--Documentation/cpuidle/sysfs.txt5
-rw-r--r--Documentation/devicetree/bindings/mtd/arm-versatile.txt4
-rw-r--r--Documentation/devicetree/bindings/mtd/atmel-dataflash.txt3
-rw-r--r--Documentation/devicetree/bindings/mtd/fsl-upm-nand.txt4
-rw-r--r--Documentation/devicetree/bindings/mtd/fsmc-nand.txt33
-rw-r--r--Documentation/devicetree/bindings/mtd/gpio-control-nand.txt3
-rw-r--r--Documentation/devicetree/bindings/mtd/mtd-physmap.txt23
-rw-r--r--Documentation/devicetree/bindings/mtd/partition.txt38
-rw-r--r--Documentation/devicetree/bindings/mtd/spear_smi.txt31
-rw-r--r--Documentation/devicetree/bindings/power_supply/max17042_battery.txt18
-rw-r--r--Documentation/devicetree/usage-model.txt412
-rw-r--r--Documentation/dontdiff1
-rw-r--r--Documentation/fb/intel810.txt2
-rw-r--r--Documentation/fb/intelfb.txt2
-rw-r--r--Documentation/feature-removal-schedule.txt10
-rw-r--r--Documentation/filesystems/files.txt4
-rw-r--r--Documentation/i2c/busses/scx200_acb2
-rw-r--r--Documentation/ide/ide.txt2
-rw-r--r--Documentation/input/input.txt4
-rw-r--r--Documentation/isdn/README.gigaset16
-rw-r--r--Documentation/kbuild/kconfig.txt8
-rw-r--r--Documentation/kernel-parameters.txt6
-rw-r--r--Documentation/laptops/sonypi.txt2
-rw-r--r--Documentation/mono.txt8
-rw-r--r--Documentation/networking/baycom.txt2
-rw-r--r--Documentation/networking/bonding.txt46
-rw-r--r--Documentation/networking/dl2k.txt11
-rw-r--r--Documentation/networking/e100.txt6
-rw-r--r--Documentation/networking/ipv6.txt6
-rw-r--r--Documentation/networking/ixgb.txt6
-rw-r--r--Documentation/networking/ltpc.txt2
-rw-r--r--Documentation/networking/vortex.txt6
-rw-r--r--Documentation/parport.txt13
-rw-r--r--Documentation/s390/3270.txt21
-rw-r--r--Documentation/scsi/00-INDEX2
-rw-r--r--Documentation/scsi/aic79xx.txt2
-rw-r--r--Documentation/scsi/aic7xxx.txt2
-rw-r--r--Documentation/scsi/osst.txt2
-rw-r--r--Documentation/scsi/st.txt4
-rw-r--r--Documentation/scsi/ufs.txt133
-rw-r--r--Documentation/serial/computone.txt8
-rw-r--r--Documentation/serial/rocket.txt2
-rw-r--r--Documentation/serial/stallion.txt4
-rw-r--r--Documentation/sound/alsa/ALSA-Configuration.txt10
-rw-r--r--Documentation/sound/alsa/Audiophile-Usb.txt4
-rw-r--r--Documentation/sound/alsa/MIXART.txt6
-rw-r--r--Documentation/sound/alsa/OSS-Emulation.txt2
-rw-r--r--Documentation/sound/oss/AudioExcelDSP1610
-rw-r--r--Documentation/sound/oss/CMI83305
-rw-r--r--Documentation/sound/oss/Introduction10
-rw-r--r--Documentation/sound/oss/Opti8
-rw-r--r--Documentation/sound/oss/PAS164
-rw-r--r--Documentation/sound/oss/README.modules10
-rw-r--r--Documentation/sysrq.txt5
-rw-r--r--Documentation/usb/power-management.txt3
-rw-r--r--Documentation/video4linux/CQcam.txt14
-rw-r--r--Documentation/video4linux/Zoran2
-rw-r--r--Documentation/video4linux/bttv/Modules.conf2
-rw-r--r--Documentation/video4linux/meye.txt2
-rw-r--r--MAINTAINERS6
-rw-r--r--Makefile6
-rw-r--r--arch/Kconfig3
-rw-r--r--arch/alpha/include/asm/posix_types.h113
-rw-r--r--arch/arm/Kconfig50
-rw-r--r--arch/arm/Kconfig.debug16
-rw-r--r--arch/arm/Makefile1
-rw-r--r--arch/arm/boot/Makefile23
-rw-r--r--arch/arm/boot/compressed/.gitignore2
-rw-r--r--arch/arm/boot/compressed/Makefile15
-rw-r--r--arch/arm/boot/compressed/decompress.c6
-rw-r--r--arch/arm/boot/compressed/piggy.xzkern.S6
-rw-r--r--arch/arm/common/Kconfig3
-rw-r--r--arch/arm/common/Makefile1
-rw-r--r--arch/arm/common/gic.c13
-rw-r--r--arch/arm/common/pl330.c1960
-rw-r--r--arch/arm/configs/integrator_defconfig8
-rw-r--r--arch/arm/include/asm/assembler.h2
-rw-r--r--arch/arm/include/asm/cpuidle.h29
-rw-r--r--arch/arm/include/asm/elf.h4
-rw-r--r--arch/arm/include/asm/hardware/cache-l2x0.h6
-rw-r--r--arch/arm/include/asm/hardware/iop_adma.h2
-rw-r--r--arch/arm/include/asm/hardware/it8152.h3
-rw-r--r--arch/arm/include/asm/hardware/pl330.h217
-rw-r--r--arch/arm/include/asm/io.h71
-rw-r--r--arch/arm/include/asm/irq.h8
-rw-r--r--arch/arm/include/asm/jump_label.h41
-rw-r--r--arch/arm/include/asm/mc146818rtc.h4
-rw-r--r--arch/arm/include/asm/memory.h2
-rw-r--r--arch/arm/include/asm/mmu_context.h29
-rw-r--r--arch/arm/include/asm/opcodes.h59
-rw-r--r--arch/arm/include/asm/page.h2
-rw-r--r--arch/arm/include/asm/perf_event.h1
-rw-r--r--arch/arm/include/asm/posix_types.h55
-rw-r--r--arch/arm/include/asm/processor.h1
-rw-r--r--arch/arm/include/asm/prom.h2
-rw-r--r--arch/arm/include/asm/tlbflush.h136
-rw-r--r--arch/arm/include/asm/traps.h2
-rw-r--r--arch/arm/kernel/Makefile16
-rw-r--r--arch/arm/kernel/cpuidle.c21
-rw-r--r--arch/arm/kernel/debug.S26
-rw-r--r--arch/arm/kernel/entry-armv.S1
-rw-r--r--arch/arm/kernel/ftrace.c100
-rw-r--r--arch/arm/kernel/head.S8
-rw-r--r--arch/arm/kernel/insn.c61
-rw-r--r--arch/arm/kernel/insn.h29
-rw-r--r--arch/arm/kernel/irq.c5
-rw-r--r--arch/arm/kernel/jump_label.c39
-rw-r--r--arch/arm/kernel/kprobes.c86
-rw-r--r--arch/arm/kernel/machine_kexec.c25
-rw-r--r--arch/arm/kernel/patch.c75
-rw-r--r--arch/arm/kernel/patch.h7
-rw-r--r--arch/arm/kernel/perf_event.c3
-rw-r--r--arch/arm/kernel/perf_event_v7.c145
-rw-r--r--arch/arm/kernel/process.c36
-rw-r--r--arch/arm/kernel/sched_clock.c18
-rw-r--r--arch/arm/kernel/setup.c1
-rw-r--r--arch/arm/kernel/signal.c24
-rw-r--r--arch/arm/kernel/smp.c17
-rw-r--r--arch/arm/kernel/time.c4
-rw-r--r--arch/arm/kernel/traps.c19
-rw-r--r--arch/arm/mach-at91/at91sam9g45_devices.c1
-rw-r--r--arch/arm/mach-at91/at91x40.c1
-rw-r--r--arch/arm/mach-at91/cpuidle.c59
-rw-r--r--arch/arm/mach-at91/include/mach/at_hdmac.h15
-rw-r--r--arch/arm/mach-at91/include/mach/io.h31
-rw-r--r--arch/arm/mach-at91/include/mach/uncompress.h1
-rw-r--r--arch/arm/mach-at91/setup.c1
-rw-r--r--arch/arm/mach-bcmring/include/mach/io.h33
-rw-r--r--arch/arm/mach-clps711x/edb7211-mm.c1
-rw-r--r--arch/arm/mach-clps711x/include/mach/io.h36
-rw-r--r--arch/arm/mach-clps711x/include/mach/uncompress.h1
-rw-r--r--arch/arm/mach-cns3xxx/core.c8
-rw-r--r--arch/arm/mach-cns3xxx/devices.c2
-rw-r--r--arch/arm/mach-cns3xxx/include/mach/io.h17
-rw-r--r--arch/arm/mach-davinci/cpuidle.c83
-rw-r--r--arch/arm/mach-davinci/include/mach/entry-macro.S1
-rw-r--r--arch/arm/mach-davinci/include/mach/hardware.h6
-rw-r--r--arch/arm/mach-davinci/include/mach/io.h24
-rw-r--r--arch/arm/mach-davinci/include/mach/uncompress.h2
-rw-r--r--arch/arm/mach-davinci/time.c28
-rw-r--r--arch/arm/mach-dove/addr-map.c1
-rw-r--r--arch/arm/mach-dove/include/mach/io.h1
-rw-r--r--arch/arm/mach-ebsa110/core.c15
-rw-r--r--arch/arm/mach-ebsa110/include/mach/io.h9
-rw-r--r--arch/arm/mach-ep93xx/include/mach/io.h22
-rw-r--r--arch/arm/mach-exynos/include/mach/io.h26
-rw-r--r--arch/arm/mach-footbridge/include/mach/io.h13
-rw-r--r--arch/arm/mach-gemini/include/mach/io.h18
-rw-r--r--arch/arm/mach-h720x/common.c1
-rw-r--r--arch/arm/mach-h720x/include/mach/io.h22
-rw-r--r--arch/arm/mach-highbank/highbank.c1
-rw-r--r--arch/arm/mach-highbank/include/mach/io.h7
-rw-r--r--arch/arm/mach-highbank/include/mach/irqs.h6
-rw-r--r--arch/arm/mach-imx/Kconfig6
-rw-r--r--arch/arm/mach-imx/Makefile2
-rw-r--r--arch/arm/mach-imx/dma-v1.c845
-rw-r--r--arch/arm/mach-imx/include/mach/dma-v1.h103
-rw-r--r--arch/arm/mach-imx/mm-imx3.c11
-rw-r--r--arch/arm/mach-imx/mm-imx5.c1
-rw-r--r--arch/arm/mach-integrator/core.c3
-rw-r--r--arch/arm/mach-integrator/include/mach/io.h1
-rw-r--r--arch/arm/mach-integrator/include/mach/irqs.h3
-rw-r--r--arch/arm/mach-integrator/integrator_ap.c10
-rw-r--r--arch/arm/mach-integrator/integrator_cp.c3
-rw-r--r--arch/arm/mach-integrator/pci.c3
-rw-r--r--arch/arm/mach-integrator/pci_v3.c3
-rw-r--r--arch/arm/mach-iop13xx/include/mach/io.h13
-rw-r--r--arch/arm/mach-iop13xx/include/mach/iop13xx.h1
-rw-r--r--arch/arm/mach-iop13xx/io.c20
-rw-r--r--arch/arm/mach-iop13xx/iq81340mc.c1
-rw-r--r--arch/arm/mach-iop13xx/iq81340sc.c1
-rw-r--r--arch/arm/mach-iop13xx/pci.h6
-rw-r--r--arch/arm/mach-iop32x/include/mach/io.h1
-rw-r--r--arch/arm/mach-iop33x/include/mach/io.h1
-rw-r--r--arch/arm/mach-ixp2000/include/mach/io.h1
-rw-r--r--arch/arm/mach-ixp23xx/core.c1
-rw-r--r--arch/arm/mach-ixp23xx/include/mach/io.h1
-rw-r--r--arch/arm/mach-ixp4xx/avila-setup.c2
-rw-r--r--arch/arm/mach-ixp4xx/common.c34
-rw-r--r--arch/arm/mach-ixp4xx/coyote-setup.c2
-rw-r--r--arch/arm/mach-ixp4xx/dsmg600-setup.c1
-rw-r--r--arch/arm/mach-ixp4xx/fsg-setup.c1
-rw-r--r--arch/arm/mach-ixp4xx/gateway7001-setup.c1
-rw-r--r--arch/arm/mach-ixp4xx/goramo_mlr.c1
-rw-r--r--arch/arm/mach-ixp4xx/gtwx5715-setup.c1
-rw-r--r--arch/arm/mach-ixp4xx/include/mach/hardware.h2
-rw-r--r--arch/arm/mach-ixp4xx/include/mach/io.h24
-rw-r--r--arch/arm/mach-ixp4xx/include/mach/platform.h1
-rw-r--r--arch/arm/mach-ixp4xx/ixdp425-setup.c4
-rw-r--r--arch/arm/mach-ixp4xx/nas100d-setup.c1
-rw-r--r--arch/arm/mach-ixp4xx/nslu2-setup.c1
-rw-r--r--arch/arm/mach-ixp4xx/omixp-setup.c3
-rw-r--r--arch/arm/mach-ixp4xx/vulcan-setup.c1
-rw-r--r--arch/arm/mach-ixp4xx/wg302v2-setup.c1
-rw-r--r--arch/arm/mach-kirkwood/cpuidle.c72
-rw-r--r--arch/arm/mach-kirkwood/include/mach/io.h2
-rw-r--r--arch/arm/mach-ks8695/include/mach/io.h19
-rw-r--r--arch/arm/mach-lpc32xx/clock.c2
-rw-r--r--arch/arm/mach-lpc32xx/include/mach/io.h27
-rw-r--r--arch/arm/mach-mmp/aspenite.c5
-rw-r--r--arch/arm/mach-mmp/avengers_lite.c1
-rw-r--r--arch/arm/mach-mmp/brownstone.c4
-rw-r--r--arch/arm/mach-mmp/flint.c3
-rw-r--r--arch/arm/mach-mmp/gplugd.c2
-rw-r--r--arch/arm/mach-mmp/include/mach/addr-map.h6
-rw-r--r--arch/arm/mach-mmp/include/mach/io.h21
-rw-r--r--arch/arm/mach-mmp/include/mach/irqs.h3
-rw-r--r--arch/arm/mach-mmp/irq-mmp2.c1
-rw-r--r--arch/arm/mach-mmp/jasper.c5
-rw-r--r--arch/arm/mach-mmp/tavorevb.c1
-rw-r--r--arch/arm/mach-mmp/teton_bga.c3
-rw-r--r--arch/arm/mach-mmp/ttc_dkb.c4
-rw-r--r--arch/arm/mach-msm/board-halibut.c6
-rw-r--r--arch/arm/mach-msm/board-trout.c6
-rw-r--r--arch/arm/mach-msm/include/mach/io.h36
-rw-r--r--arch/arm/mach-msm/include/mach/msm_iomap-7x00.h12
-rw-r--r--arch/arm/mach-msm/include/mach/msm_iomap-7x30.h4
-rw-r--r--arch/arm/mach-msm/include/mach/msm_iomap-8960.h4
-rw-r--r--arch/arm/mach-msm/include/mach/msm_iomap-8x50.h4
-rw-r--r--arch/arm/mach-msm/include/mach/msm_iomap-8x60.h4
-rw-r--r--arch/arm/mach-msm/include/mach/msm_iomap.h6
-rw-r--r--arch/arm/mach-msm/io.c8
-rw-r--r--arch/arm/mach-msm/timer.c12
-rw-r--r--arch/arm/mach-mv78xx0/include/mach/io.h2
-rw-r--r--arch/arm/mach-mxs/include/mach/hardware.h6
-rw-r--r--arch/arm/mach-mxs/include/mach/io.h22
-rw-r--r--arch/arm/mach-netx/generic.c2
-rw-r--r--arch/arm/mach-netx/include/mach/hardware.h2
-rw-r--r--arch/arm/mach-netx/include/mach/io.h28
-rw-r--r--arch/arm/mach-netx/include/mach/netx-regs.h16
-rw-r--r--arch/arm/mach-nomadik/include/mach/io.h22
-rw-r--r--arch/arm/mach-omap1/ams-delta-fiq-handler.S1
-rw-r--r--arch/arm/mach-omap1/board-h2.c8
-rw-r--r--arch/arm/mach-omap1/board-h3.c9
-rw-r--r--arch/arm/mach-omap1/board-htcherald.c6
-rw-r--r--arch/arm/mach-omap1/board-innovator.c4
-rw-r--r--arch/arm/mach-omap1/board-nokia770.c2
-rw-r--r--arch/arm/mach-omap1/board-osk.c12
-rw-r--r--arch/arm/mach-omap1/board-palmte.c2
-rw-r--r--arch/arm/mach-omap1/board-palmtt.c2
-rw-r--r--arch/arm/mach-omap1/board-palmz71.c2
-rw-r--r--arch/arm/mach-omap1/board-voiceblue.c16
-rw-r--r--arch/arm/mach-omap1/flash.c20
-rw-r--r--arch/arm/mach-omap1/include/mach/entry-macro.S1
-rw-r--r--arch/arm/mach-omap1/include/mach/io.h46
-rw-r--r--arch/arm/mach-omap1/iomap.h6
-rw-r--r--arch/arm/mach-omap1/pm.c1
-rw-r--r--arch/arm/mach-omap1/sleep.S2
-rw-r--r--arch/arm/mach-omap1/sram.S1
-rw-r--r--arch/arm/mach-omap2/board-2430sdp.c2
-rw-r--r--arch/arm/mach-omap2/board-4430sdp.c2
-rw-r--r--arch/arm/mach-omap2/board-apollon.c4
-rw-r--r--arch/arm/mach-omap2/board-devkit8000.c2
-rw-r--r--arch/arm/mach-omap2/board-h4.c2
-rw-r--r--arch/arm/mach-omap2/board-omap3evm.c2
-rw-r--r--arch/arm/mach-omap2/board-omap4panda.c2
-rw-r--r--arch/arm/mach-omap2/board-rx51-peripherals.c3
-rw-r--r--arch/arm/mach-omap2/board-zoom-debugboard.c3
-rw-r--r--arch/arm/mach-omap2/board-zoom-peripherals.c6
-rw-r--r--arch/arm/mach-omap2/clock3xxx_data.c1
-rw-r--r--arch/arm/mach-omap2/clock44xx_data.c1
-rw-r--r--arch/arm/mach-omap2/common-board-devices.c2
-rw-r--r--arch/arm/mach-omap2/cpuidle34xx.c42
-rw-r--r--arch/arm/mach-omap2/cpuidle44xx.c21
-rw-r--r--arch/arm/mach-omap2/display.c8
-rw-r--r--arch/arm/mach-omap2/include/mach/io.h49
-rw-r--r--arch/arm/mach-omap2/iomap.h6
-rw-r--r--arch/arm/mach-omap2/pm.c2
-rw-r--r--arch/arm/mach-orion5x/common.h9
-rw-r--r--arch/arm/mach-orion5x/include/mach/io.h33
-rw-r--r--arch/arm/mach-orion5x/pci.c1
-rw-r--r--arch/arm/mach-orion5x/tsx09-common.c1
-rw-r--r--arch/arm/mach-picoxcell/include/mach/io.h22
-rw-r--r--arch/arm/mach-picoxcell/include/mach/irqs.h20
-rw-r--r--arch/arm/mach-pnx4008/include/mach/io.h21
-rw-r--r--arch/arm/mach-prima2/include/mach/io.h16
-rw-r--r--arch/arm/mach-prima2/timer.c21
-rw-r--r--arch/arm/mach-pxa/Kconfig1
-rw-r--r--arch/arm/mach-pxa/capc7117.c1
-rw-r--r--arch/arm/mach-pxa/clock-pxa2xx.c1
-rw-r--r--arch/arm/mach-pxa/cm-x300.c2
-rw-r--r--arch/arm/mach-pxa/colibri-pxa270.c2
-rw-r--r--arch/arm/mach-pxa/colibri-pxa300.c1
-rw-r--r--arch/arm/mach-pxa/colibri-pxa320.c1
-rw-r--r--arch/arm/mach-pxa/corgi.c3
-rw-r--r--arch/arm/mach-pxa/corgi_pm.c1
-rw-r--r--arch/arm/mach-pxa/cpufreq-pxa3xx.c1
-rw-r--r--arch/arm/mach-pxa/csb726.c1
-rw-r--r--arch/arm/mach-pxa/devices.c1
-rw-r--r--arch/arm/mach-pxa/em-x270.c12
-rw-r--r--arch/arm/mach-pxa/gumstix.c1
-rw-r--r--arch/arm/mach-pxa/h5000.c1
-rw-r--r--arch/arm/mach-pxa/himalaya.c1
-rw-r--r--arch/arm/mach-pxa/hx4700.c2
-rw-r--r--arch/arm/mach-pxa/icontrol.c1
-rw-r--r--arch/arm/mach-pxa/idp.c1
-rw-r--r--arch/arm/mach-pxa/include/mach/hardware.h6
-rw-r--r--arch/arm/mach-pxa/include/mach/io.h20
-rw-r--r--arch/arm/mach-pxa/include/mach/irqs.h2
-rw-r--r--arch/arm/mach-pxa/include/mach/mainstone.h2
-rw-r--r--arch/arm/mach-pxa/magician.c2
-rw-r--r--arch/arm/mach-pxa/mfp-pxa2xx.c1
-rw-r--r--arch/arm/mach-pxa/mioa701.c1
-rw-r--r--arch/arm/mach-pxa/mp900.c1
-rw-r--r--arch/arm/mach-pxa/palmld.c1
-rw-r--r--arch/arm/mach-pxa/palmt5.c1
-rw-r--r--arch/arm/mach-pxa/palmtc.c1
-rw-r--r--arch/arm/mach-pxa/palmte2.c1
-rw-r--r--arch/arm/mach-pxa/palmtreo.c2
-rw-r--r--arch/arm/mach-pxa/palmtx.c1
-rw-r--r--arch/arm/mach-pxa/palmz72.c1
-rw-r--r--arch/arm/mach-pxa/pxa2xx.c1
-rw-r--r--arch/arm/mach-pxa/pxa300.c1
-rw-r--r--arch/arm/mach-pxa/pxa320.c1
-rw-r--r--arch/arm/mach-pxa/pxa3xx.c1
-rw-r--r--arch/arm/mach-pxa/raumfeld.c5
-rw-r--r--arch/arm/mach-pxa/saar.c1
-rw-r--r--arch/arm/mach-pxa/sharpsl_pm.c1
-rw-r--r--arch/arm/mach-pxa/spitz.c3
-rw-r--r--arch/arm/mach-pxa/stargate2.c3
-rw-r--r--arch/arm/mach-pxa/tavorevb.c1
-rw-r--r--arch/arm/mach-pxa/time.c1
-rw-r--r--arch/arm/mach-pxa/trizeps4.c2
-rw-r--r--arch/arm/mach-pxa/viper.c1
-rw-r--r--arch/arm/mach-pxa/vpac270.c1
-rw-r--r--arch/arm/mach-pxa/xcep.c1
-rw-r--r--arch/arm/mach-pxa/z2.c1
-rw-r--r--arch/arm/mach-realview/include/mach/hardware.h2
-rw-r--r--arch/arm/mach-realview/include/mach/io.h28
-rw-r--r--arch/arm/mach-rpc/include/mach/hardware.h6
-rw-r--r--arch/arm/mach-rpc/include/mach/io.h5
-rw-r--r--arch/arm/mach-s3c24xx/include/mach/io.h5
-rw-r--r--arch/arm/mach-s3c24xx/simtec-nor.c3
-rw-r--r--arch/arm/mach-s3c64xx/include/mach/io.h18
-rw-r--r--arch/arm/mach-s5p64x0/include/mach/io.h25
-rw-r--r--arch/arm/mach-s5pc100/include/mach/io.h18
-rw-r--r--arch/arm/mach-s5pv210/include/mach/io.h26
-rw-r--r--arch/arm/mach-sa1100/include/mach/io.h20
-rw-r--r--arch/arm/mach-shark/core.c1
-rw-r--r--arch/arm/mach-shark/include/mach/io.h2
-rw-r--r--arch/arm/mach-shmobile/Kconfig4
-rw-r--r--arch/arm/mach-shmobile/board-ag5evm.c3
-rw-r--r--arch/arm/mach-shmobile/board-bonito.c3
-rw-r--r--arch/arm/mach-shmobile/board-g3evm.c1
-rw-r--r--arch/arm/mach-shmobile/board-g4evm.c1
-rw-r--r--arch/arm/mach-shmobile/board-kota2.c3
-rw-r--r--arch/arm/mach-shmobile/board-mackerel.c72
-rw-r--r--arch/arm/mach-shmobile/board-marzen.c1
-rw-r--r--arch/arm/mach-shmobile/clock-sh7372.c4
-rw-r--r--arch/arm/mach-shmobile/cpuidle.c31
-rw-r--r--arch/arm/mach-shmobile/include/mach/io.h9
-rw-r--r--arch/arm/mach-shmobile/include/mach/irqs.h6
-rw-r--r--arch/arm/mach-shmobile/intc-r8a7740.c1
-rw-r--r--arch/arm/mach-shmobile/intc-r8a7779.c4
-rw-r--r--arch/arm/mach-shmobile/intc-sh7367.c1
-rw-r--r--arch/arm/mach-shmobile/intc-sh7372.c1
-rw-r--r--arch/arm/mach-shmobile/intc-sh7377.c1
-rw-r--r--arch/arm/mach-shmobile/intc-sh73a0.c5
-rw-r--r--arch/arm/mach-shmobile/setup-r8a7740.c1
-rw-r--r--arch/arm/mach-shmobile/setup-r8a7779.c1
-rw-r--r--arch/arm/mach-shmobile/setup-sh7367.c1
-rw-r--r--arch/arm/mach-shmobile/setup-sh7372.c1
-rw-r--r--arch/arm/mach-shmobile/setup-sh7377.c1
-rw-r--r--arch/arm/mach-shmobile/setup-sh73a0.c1
-rw-r--r--arch/arm/mach-shmobile/smp-r8a7779.c4
-rw-r--r--arch/arm/mach-shmobile/smp-sh73a0.c20
-rw-r--r--arch/arm/mach-spear3xx/clock.c1
-rw-r--r--arch/arm/mach-spear3xx/include/mach/io.h19
-rw-r--r--arch/arm/mach-spear6xx/clock.c1
-rw-r--r--arch/arm/mach-spear6xx/include/mach/io.h20
-rw-r--r--arch/arm/mach-tegra/board-dt-tegra20.c6
-rw-r--r--arch/arm/mach-tegra/devices.c7
-rw-r--r--arch/arm/mach-tegra/devices.h5
-rw-r--r--arch/arm/mach-tegra/include/mach/debug-macro.S1
-rw-r--r--arch/arm/mach-tegra/include/mach/io.h49
-rw-r--r--arch/arm/mach-tegra/include/mach/iomap.h42
-rw-r--r--arch/arm/mach-tegra/io.c1
-rw-r--r--arch/arm/mach-tegra/sleep.S4
-rw-r--r--arch/arm/mach-u300/core.c2
-rw-r--r--arch/arm/mach-u300/include/mach/io.h20
-rw-r--r--arch/arm/mach-u300/include/mach/u300-regs.h11
-rw-r--r--arch/arm/mach-ux500/include/mach/hardware.h2
-rw-r--r--arch/arm/mach-ux500/include/mach/io.h22
-rw-r--r--arch/arm/mach-versatile/include/mach/io.h28
-rw-r--r--arch/arm/mach-vexpress/include/mach/io.h26
-rw-r--r--arch/arm/mach-vt8500/include/mach/io.h26
-rw-r--r--arch/arm/mach-w90x900/dev.c1
-rw-r--r--arch/arm/mach-w90x900/include/mach/io.h30
-rw-r--r--arch/arm/mm/cache-l2x0.c22
-rw-r--r--arch/arm/mm/copypage-v4mc.c9
-rw-r--r--arch/arm/mm/copypage-v6.c20
-rw-r--r--arch/arm/mm/copypage-xscale.c9
-rw-r--r--arch/arm/mm/dma-mapping.c20
-rw-r--r--arch/arm/mm/fault.c3
-rw-r--r--arch/arm/mm/flush.c14
-rw-r--r--arch/arm/mm/highmem.c21
-rw-r--r--arch/arm/mm/init.c4
-rw-r--r--arch/arm/mm/ioremap.c17
-rw-r--r--arch/arm/mm/mm.h26
-rw-r--r--arch/arm/mm/mmu.c7
-rw-r--r--arch/arm/mm/nommu.c8
-rw-r--r--arch/arm/mm/vmregion.c76
-rw-r--r--arch/arm/mm/vmregion.h5
-rw-r--r--arch/arm/net/Makefile3
-rw-r--r--arch/arm/net/bpf_jit_32.c915
-rw-r--r--arch/arm/net/bpf_jit_32.h190
-rw-r--r--arch/arm/plat-mxc/include/mach/hardware.h7
-rw-r--r--arch/arm/plat-mxc/include/mach/io.h39
-rw-r--r--arch/arm/plat-nomadik/Kconfig1
-rw-r--r--arch/arm/plat-nomadik/include/plat/ste_dma40.h3
-rw-r--r--arch/arm/plat-omap/include/plat/gpio.h4
-rw-r--r--arch/arm/plat-omap/include/plat/hardware.h6
-rw-r--r--arch/arm/plat-omap/include/plat/sdrc.h1
-rw-r--r--arch/arm/plat-omap/include/plat/usb.h1
-rw-r--r--arch/arm/plat-s3c24xx/cpu.c1
-rw-r--r--arch/arm/plat-samsung/dma-ops.c4
-rw-r--r--arch/arm/plat-spear/include/plat/hardware.h6
-rw-r--r--arch/arm/plat-spear/include/plat/io.h22
-rw-r--r--arch/arm/plat-spear/include/plat/keyboard.h7
-rw-r--r--arch/arm/plat-versatile/Kconfig3
-rw-r--r--arch/avr32/boot/images/Makefile9
-rw-r--r--arch/avr32/include/asm/posix_types.h107
-rw-r--r--arch/avr32/mach-at32ap/at32ap700x.c13
-rw-r--r--arch/avr32/mach-at32ap/include/mach/atmel-mci.h7
-rw-r--r--arch/blackfin/boot/Makefile19
-rw-r--r--arch/blackfin/kernel/setup.c7
-rw-r--r--arch/c6x/Kconfig2
-rw-r--r--arch/cris/include/asm/posix_types.h50
-rw-r--r--arch/frv/include/asm/posix_types.h53
-rw-r--r--arch/h8300/include/asm/posix_types.h49
-rw-r--r--arch/ia64/include/asm/cmpxchg.h1
-rw-r--r--arch/ia64/include/asm/posix_types.h121
-rw-r--r--arch/ia64/kernel/asm-offsets.c4
-rw-r--r--arch/ia64/kernel/fsys.S2
-rw-r--r--arch/ia64/kernel/fsyscall_gtod_data.h2
-rw-r--r--arch/ia64/kernel/process.c1
-rw-r--r--arch/ia64/kernel/time.c10
-rw-r--r--arch/m32r/include/asm/posix_types.h108
-rw-r--r--arch/m68k/include/asm/posix_types.h53
-rw-r--r--arch/microblaze/boot/Makefile10
-rw-r--r--arch/mips/Kconfig1
-rw-r--r--arch/mips/cavium-octeon/flash_setup.c2
-rw-r--r--arch/mips/configs/db1300_defconfig2
-rw-r--r--arch/mips/include/asm/posix_types.h121
-rw-r--r--arch/mips/kernel/kspd.c2
-rw-r--r--arch/mn10300/include/asm/posix_types.h111
-rw-r--r--arch/parisc/include/asm/futex.h31
-rw-r--r--arch/parisc/include/asm/posix_types.h119
-rw-r--r--arch/parisc/kernel/smp.c3
-rw-r--r--arch/powerpc/Kconfig2
-rw-r--r--arch/powerpc/Kconfig.debug10
-rw-r--r--arch/powerpc/configs/85xx/p1023rds_defconfig2
-rw-r--r--arch/powerpc/configs/chroma_defconfig2
-rw-r--r--arch/powerpc/configs/corenet64_smp_defconfig2
-rw-r--r--arch/powerpc/configs/mpc85xx_defconfig2
-rw-r--r--arch/powerpc/configs/mpc85xx_smp_defconfig2
-rw-r--r--arch/powerpc/configs/ppc64_defconfig2
-rw-r--r--arch/powerpc/configs/pseries_defconfig2
-rw-r--r--arch/powerpc/include/asm/posix_types.h118
-rw-r--r--arch/powerpc/platforms/cell/spufs/coredump.c2
-rw-r--r--arch/s390/Kconfig1
-rw-r--r--arch/s390/include/asm/cpu_mf.h2
-rw-r--r--arch/s390/include/asm/mmu.h2
-rw-r--r--arch/s390/include/asm/posix_types.h70
-rw-r--r--arch/s390/kernel/lgr.c1
-rw-r--r--arch/s390/kernel/perf_cpum_cf.c2
-rw-r--r--arch/s390/kernel/perf_event.c1
-rw-r--r--arch/s390/kernel/setup.c1
-rw-r--r--arch/s390/kernel/smp.c2
-rw-r--r--arch/sh/Kconfig6
-rw-r--r--arch/sh/boards/mach-ecovec24/setup.c104
-rw-r--r--arch/sh/boot/Makefile8
-rw-r--r--arch/sh/drivers/dma/dma-g2.c4
-rw-r--r--arch/sh/drivers/dma/dmabrg.c4
-rw-r--r--arch/sh/drivers/pci/pci-sh7780.c15
-rw-r--r--arch/sh/include/asm/io.h25
-rw-r--r--arch/sh/include/asm/irq.h11
-rw-r--r--arch/sh/include/asm/posix_types_32.h5
-rw-r--r--arch/sh/include/asm/posix_types_64.h4
-rw-r--r--arch/sh/include/asm/unistd.h37
-rw-r--r--arch/sh/include/asm/unistd_32.h102
-rw-r--r--arch/sh/include/asm/unistd_64.h106
-rw-r--r--arch/sh/include/cpu-sh4/cpu/dma-register.h32
-rw-r--r--arch/sh/include/mach-common/mach/mangle-port.h49
-rw-r--r--arch/sh/kernel/cpu/sh4a/setup-sh7757.c20
-rw-r--r--arch/sh/kernel/cpu/shmobile/cpuidle.c10
-rw-r--r--arch/sh/kernel/cpufreq.c121
-rw-r--r--arch/sh/kernel/signal_32.c35
-rw-r--r--arch/sh/kernel/signal_64.c40
-rw-r--r--arch/sh/kernel/syscalls_32.S8
-rw-r--r--arch/sh/kernel/syscalls_64.S8
-rw-r--r--arch/sparc/Kconfig1
-rw-r--r--arch/sparc/boot/Makefile9
-rw-r--r--arch/sparc/include/asm/posix_types.h133
-rw-r--r--arch/sparc/include/asm/ptrace.h2
-rw-r--r--arch/sparc/kernel/jump_label.c2
-rw-r--r--arch/sparc/kernel/kgdb_64.c1
-rw-r--r--arch/sparc/kernel/sun4d_smp.c1
-rw-r--r--arch/sparc/kernel/sun4m_smp.c1
-rw-r--r--arch/tile/include/asm/compat.h11
-rw-r--r--arch/tile/kernel/compat.c43
-rw-r--r--arch/unicore32/boot/Makefile12
-rw-r--r--arch/x86/Kconfig23
-rw-r--r--arch/x86/Makefile16
-rw-r--r--arch/x86/configs/i386_defconfig64
-rw-r--r--arch/x86/configs/x86_64_defconfig67
-rw-r--r--arch/x86/ia32/ia32_signal.c24
-rw-r--r--arch/x86/ia32/sys_ia32.c40
-rw-r--r--arch/x86/include/asm/Kbuild2
-rw-r--r--arch/x86/include/asm/apic.h2
-rw-r--r--arch/x86/include/asm/compat.h40
-rw-r--r--arch/x86/include/asm/elf.h31
-rw-r--r--arch/x86/include/asm/ia32.h18
-rw-r--r--arch/x86/include/asm/idle.h1
-rw-r--r--arch/x86/include/asm/io_apic.h9
-rw-r--r--arch/x86/include/asm/mtrr.h28
-rw-r--r--arch/x86/include/asm/posix_types.h4
-rw-r--r--arch/x86/include/asm/posix_types_32.h75
-rw-r--r--arch/x86/include/asm/posix_types_64.h106
-rw-r--r--arch/x86/include/asm/posix_types_x32.h19
-rw-r--r--arch/x86/include/asm/processor.h12
-rw-r--r--arch/x86/include/asm/ptrace.h1
-rw-r--r--arch/x86/include/asm/sigcontext.h57
-rw-r--r--arch/x86/include/asm/sigframe.h13
-rw-r--r--arch/x86/include/asm/sighandling.h24
-rw-r--r--arch/x86/include/asm/sys_ia32.h7
-rw-r--r--arch/x86/include/asm/syscall.h5
-rw-r--r--arch/x86/include/asm/thread_info.h18
-rw-r--r--arch/x86/include/asm/traps.h25
-rw-r--r--arch/x86/include/asm/unistd.h15
-rw-r--r--arch/x86/include/asm/vgtod.h17
-rw-r--r--arch/x86/include/asm/x2apic.h5
-rw-r--r--arch/x86/kernel/acpi/boot.c5
-rw-r--r--arch/x86/kernel/apic/apic.c13
-rw-r--r--arch/x86/kernel/apic/apic_numachip.c3
-rw-r--r--arch/x86/kernel/apic/io_apic.c159
-rw-r--r--arch/x86/kernel/apic/x2apic_cluster.c2
-rw-r--r--arch/x86/kernel/apic/x2apic_phys.c2
-rw-r--r--arch/x86/kernel/apic/x2apic_uv_x.c7
-rw-r--r--arch/x86/kernel/asm-offsets_64.c6
-rw-r--r--arch/x86/kernel/cpu/common.c2
-rw-r--r--arch/x86/kernel/cpu/mtrr/if.c10
-rw-r--r--arch/x86/kernel/cpu/perf_event.c21
-rw-r--r--arch/x86/kernel/cpu/perf_event.h1
-rw-r--r--arch/x86/kernel/cpu/perf_event_amd.c18
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel.c36
-rw-r--r--arch/x86/kernel/cpu/perf_event_p6.c19
-rw-r--r--arch/x86/kernel/dumpstack.c9
-rw-r--r--arch/x86/kernel/entry_64.S44
-rw-r--r--arch/x86/kernel/irqinit.c2
-rw-r--r--arch/x86/kernel/process.c114
-rw-r--r--arch/x86/kernel/process_32.c58
-rw-r--r--arch/x86/kernel/process_64.c134
-rw-r--r--arch/x86/kernel/ptrace.c102
-rw-r--r--arch/x86/kernel/signal.c140
-rw-r--r--arch/x86/kernel/smpboot.c9
-rw-r--r--arch/x86/kernel/sys_x86_64.c6
-rw-r--r--arch/x86/kernel/syscall_64.c8
-rw-r--r--arch/x86/kernel/tboot.c9
-rw-r--r--arch/x86/kernel/tls.c4
-rw-r--r--arch/x86/kernel/traps.c133
-rw-r--r--arch/x86/kernel/tsc.c10
-rw-r--r--arch/x86/kernel/vm86_32.c2
-rw-r--r--arch/x86/kernel/vsyscall_64.c27
-rw-r--r--arch/x86/math-emu/fpu_entry.c5
-rw-r--r--arch/x86/mm/fault.c10
-rw-r--r--arch/x86/mm/srat.c2
-rw-r--r--arch/x86/oprofile/backtrace.c2
-rw-r--r--arch/x86/platform/olpc/olpc.c97
-rw-r--r--arch/x86/syscalls/Makefile22
-rw-r--r--arch/x86/syscalls/syscall_32.tbl2
-rw-r--r--arch/x86/syscalls/syscall_64.tbl579
-rw-r--r--arch/x86/um/sys_call_table_64.c3
-rw-r--r--arch/x86/um/user-offsets.c2
-rw-r--r--arch/x86/vdso/.gitignore2
-rw-r--r--arch/x86/vdso/Makefile46
-rw-r--r--arch/x86/vdso/vclock_gettime.c135
-rw-r--r--arch/x86/vdso/vdso32-setup.c5
-rw-r--r--arch/x86/vdso/vdsox32.S22
-rw-r--r--arch/x86/vdso/vdsox32.lds.S28
-rw-r--r--arch/x86/vdso/vma.c78
-rw-r--r--arch/xtensa/configs/iss_defconfig2
-rw-r--r--arch/xtensa/include/asm/posix_types.h97
-rw-r--r--drivers/acpi/Kconfig9
-rw-r--r--drivers/acpi/Makefile1
-rw-r--r--drivers/acpi/acpica/Makefile4
-rw-r--r--drivers/acpi/acpica/accommon.h1
-rw-r--r--drivers/acpi/acpica/acdebug.h8
-rw-r--r--drivers/acpi/acpica/acevents.h21
-rw-r--r--drivers/acpi/acpica/acglobal.h11
-rw-r--r--drivers/acpi/acpica/achware.h32
-rw-r--r--drivers/acpi/acpica/aclocal.h1
-rw-r--r--drivers/acpi/acpica/acmacros.h6
-rw-r--r--drivers/acpi/acpica/acnamesp.h5
-rw-r--r--drivers/acpi/acpica/actables.h5
-rw-r--r--drivers/acpi/acpica/evevent.c4
-rw-r--r--drivers/acpi/acpica/evglock.c4
-rw-r--r--drivers/acpi/acpica/evgpe.c4
-rw-r--r--drivers/acpi/acpica/evgpeblk.c4
-rw-r--r--drivers/acpi/acpica/evgpeinit.c4
-rw-r--r--drivers/acpi/acpica/evgpeutil.c3
-rw-r--r--drivers/acpi/acpica/evmisc.c26
-rw-r--r--drivers/acpi/acpica/evsci.c4
-rw-r--r--drivers/acpi/acpica/evxface.c436
-rw-r--r--drivers/acpi/acpica/evxfevnt.c2
-rw-r--r--drivers/acpi/acpica/evxfgpe.c2
-rw-r--r--drivers/acpi/acpica/hwacpi.c3
-rw-r--r--drivers/acpi/acpica/hwesleep.c247
-rw-r--r--drivers/acpi/acpica/hwgpe.c4
-rw-r--r--drivers/acpi/acpica/hwregs.c16
-rw-r--r--drivers/acpi/acpica/hwsleep.c401
-rw-r--r--drivers/acpi/acpica/hwtimer.c2
-rw-r--r--drivers/acpi/acpica/hwxface.c50
-rw-r--r--drivers/acpi/acpica/hwxfsleep.c431
-rw-r--r--drivers/acpi/acpica/nsdump.c15
-rw-r--r--drivers/acpi/acpica/nsdumpdv.c2
-rw-r--r--drivers/acpi/acpica/nspredef.c4
-rw-r--r--drivers/acpi/acpica/nsrepair.c159
-rw-r--r--drivers/acpi/acpica/nsutils.c2
-rw-r--r--drivers/acpi/acpica/tbfadt.c8
-rw-r--r--drivers/acpi/acpica/tbinstal.c117
-rw-r--r--drivers/acpi/acpica/tbutils.c95
-rw-r--r--drivers/acpi/acpica/utdecode.c34
-rw-r--r--drivers/acpi/acpica/utglobal.c9
-rw-r--r--drivers/acpi/acpica/utinit.c37
-rw-r--r--drivers/acpi/acpica/utxface.c6
-rw-r--r--drivers/acpi/apei/apei-base.c61
-rw-r--r--drivers/acpi/apei/cper.c2
-rw-r--r--drivers/acpi/apei/einj.c17
-rw-r--r--drivers/acpi/apei/erst.c2
-rw-r--r--drivers/acpi/bgrt.c175
-rw-r--r--drivers/acpi/bus.c1
-rw-r--r--drivers/acpi/ec.c8
-rw-r--r--drivers/acpi/nvs.c4
-rw-r--r--drivers/acpi/osl.c124
-rw-r--r--drivers/acpi/power.c166
-rw-r--r--drivers/acpi/processor_driver.c62
-rw-r--r--drivers/acpi/processor_idle.c34
-rw-r--r--drivers/acpi/processor_thermal.c45
-rw-r--r--drivers/acpi/processor_throttling.c5
-rw-r--r--drivers/acpi/reboot.c3
-rw-r--r--drivers/acpi/scan.c12
-rw-r--r--drivers/acpi/sleep.c76
-rw-r--r--drivers/acpi/thermal.c8
-rw-r--r--drivers/acpi/video.c50
-rw-r--r--drivers/char/lp.c5
-rw-r--r--drivers/clocksource/Kconfig1
-rw-r--r--drivers/cpuidle/cpuidle.c97
-rw-r--r--drivers/cpuidle/driver.c2
-rw-r--r--drivers/cpuidle/governors/menu.c7
-rw-r--r--drivers/cpuidle/sysfs.c40
-rw-r--r--drivers/dma/Kconfig3
-rw-r--r--drivers/dma/amba-pl08x.c46
-rw-r--r--drivers/dma/at_hdmac.c111
-rw-r--r--drivers/dma/at_hdmac_regs.h34
-rw-r--r--drivers/dma/coh901318.c41
-rw-r--r--drivers/dma/dmaengine.c8
-rw-r--r--drivers/dma/dmaengine.h89
-rw-r--r--drivers/dma/dw_dmac.c228
-rw-r--r--drivers/dma/dw_dmac_regs.h16
-rw-r--r--drivers/dma/ep93xx_dma.c31
-rw-r--r--drivers/dma/fsldma.c28
-rw-r--r--drivers/dma/fsldma.h1
-rw-r--r--drivers/dma/imx-dma.c950
-rw-r--r--drivers/dma/imx-sdma.c187
-rw-r--r--drivers/dma/intel_mid_dma.c46
-rw-r--r--drivers/dma/intel_mid_dma_regs.h2
-rw-r--r--drivers/dma/ioat/dma.c21
-rw-r--r--drivers/dma/ioat/dma.h23
-rw-r--r--drivers/dma/ioat/dma_v2.c13
-rw-r--r--drivers/dma/ioat/dma_v3.c12
-rw-r--r--drivers/dma/iop-adma.c52
-rw-r--r--drivers/dma/ipu/ipu_idmac.c25
-rw-r--r--drivers/dma/mpc512x_dma.c25
-rw-r--r--drivers/dma/mv_xor.c34
-rw-r--r--drivers/dma/mv_xor.h3
-rw-r--r--drivers/dma/mxs-dma.c60
-rw-r--r--drivers/dma/pch_dma.c37
-rw-r--r--drivers/dma/pl330.c2149
-rw-r--r--drivers/dma/ppc4xx/adma.c49
-rw-r--r--drivers/dma/ppc4xx/adma.h2
-rw-r--r--drivers/dma/shdma.c33
-rw-r--r--drivers/dma/shdma.h1
-rw-r--r--drivers/dma/sirf-dma.c27
-rw-r--r--drivers/dma/ste_dma40.c41
-rw-r--r--drivers/dma/timb_dma.c37
-rw-r--r--drivers/dma/txx9dmac.c43
-rw-r--r--drivers/dma/txx9dmac.h1
-rw-r--r--drivers/gpio/gpio-pxa.c2
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c7
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h2
-rw-r--r--drivers/gpu/drm/i915/intel_fb.c10
-rw-r--r--drivers/input/input-compat.c4
-rw-r--r--drivers/input/input-compat.h2
-rw-r--r--drivers/input/joystick/amijoy.c3
-rw-r--r--drivers/input/keyboard/gpio_keys.c259
-rw-r--r--drivers/input/keyboard/tegra-kbc.c1
-rw-r--r--drivers/input/mouse/sentelic.c294
-rw-r--r--drivers/input/mouse/sentelic.h35
-rw-r--r--drivers/input/serio/ams_delta_serio.c2
-rw-r--r--drivers/input/tablet/Kconfig1
-rw-r--r--drivers/input/tablet/wacom.h9
-rw-r--r--drivers/input/tablet/wacom_sys.c231
-rw-r--r--drivers/input/tablet/wacom_wac.c49
-rw-r--r--drivers/input/tablet/wacom_wac.h6
-rw-r--r--drivers/media/video/davinci/vpbe_osd.c1
-rw-r--r--drivers/media/video/davinci/vpbe_venc.c1
-rw-r--r--drivers/media/video/mx3_camera.c2
-rw-r--r--drivers/media/video/timblogiw.c2
-rw-r--r--drivers/mmc/host/atmel-mci.c21
-rw-r--r--drivers/mmc/host/mmci.c4
-rw-r--r--drivers/mmc/host/mxcmmc.c5
-rw-r--r--drivers/mmc/host/mxs-mmc.c14
-rw-r--r--drivers/mmc/host/sh_mmcif.c4
-rw-r--r--drivers/mmc/host/tmio_mmc_dma.c4
-rw-r--r--drivers/mtd/Kconfig3
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0001.c83
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0002.c283
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0020.c33
-rw-r--r--drivers/mtd/chips/cfi_util.c6
-rw-r--r--drivers/mtd/chips/fwh_lock.h4
-rw-r--r--drivers/mtd/chips/map_absent.c10
-rw-r--r--drivers/mtd/chips/map_ram.c14
-rw-r--r--drivers/mtd/chips/map_rom.c13
-rw-r--r--drivers/mtd/devices/Kconfig7
-rw-r--r--drivers/mtd/devices/Makefile1
-rw-r--r--drivers/mtd/devices/block2mtd.c28
-rw-r--r--drivers/mtd/devices/doc2000.c25
-rw-r--r--drivers/mtd/devices/doc2001.c22
-rw-r--r--drivers/mtd/devices/doc2001plus.c22
-rw-r--r--drivers/mtd/devices/docg3.c201
-rw-r--r--drivers/mtd/devices/docg3.h20
-rw-r--r--drivers/mtd/devices/lart.c17
-rw-r--r--drivers/mtd/devices/m25p80.c56
-rw-r--r--drivers/mtd/devices/ms02-nv.c12
-rw-r--r--drivers/mtd/devices/mtd_dataflash.c50
-rw-r--r--drivers/mtd/devices/mtdram.c35
-rw-r--r--drivers/mtd/devices/phram.c76
-rw-r--r--drivers/mtd/devices/pmc551.c99
-rw-r--r--drivers/mtd/devices/slram.c41
-rw-r--r--drivers/mtd/devices/spear_smi.c1147
-rw-r--r--drivers/mtd/devices/sst25l.c46
-rw-r--r--drivers/mtd/inftlcore.c2
-rw-r--r--drivers/mtd/lpddr/lpddr_cmds.c37
-rw-r--r--drivers/mtd/maps/bfin-async-flash.c4
-rw-r--r--drivers/mtd/maps/dc21285.c2
-rw-r--r--drivers/mtd/maps/gpio-addr-flash.c4
-rw-r--r--drivers/mtd/maps/h720x-flash.c4
-rw-r--r--drivers/mtd/maps/impa7.c2
-rw-r--r--drivers/mtd/maps/intel_vr_nor.c2
-rw-r--r--drivers/mtd/maps/ixp2000.c2
-rw-r--r--drivers/mtd/maps/ixp4xx.c5
-rw-r--r--drivers/mtd/maps/l440gx.c14
-rw-r--r--drivers/mtd/maps/lantiq-flash.c6
-rw-r--r--drivers/mtd/maps/latch-addr-flash.c5
-rw-r--r--drivers/mtd/maps/pcmciamtd.c13
-rw-r--r--drivers/mtd/maps/physmap.c24
-rw-r--r--drivers/mtd/maps/plat-ram.c5
-rw-r--r--drivers/mtd/maps/pxa2xx-flash.c3
-rw-r--r--drivers/mtd/maps/rbtx4939-flash.c4
-rw-r--r--drivers/mtd/maps/sa1100-flash.c18
-rw-r--r--drivers/mtd/maps/solutionengine.c4
-rw-r--r--drivers/mtd/maps/uclinux.c2
-rw-r--r--drivers/mtd/maps/vmu-flash.c14
-rw-r--r--drivers/mtd/maps/wr_sbc82xx_flash.c2
-rw-r--r--drivers/mtd/mtd_blkdevs.c1
-rw-r--r--drivers/mtd/mtdblock.c8
-rw-r--r--drivers/mtd/mtdchar.c57
-rw-r--r--drivers/mtd/mtdconcat.c106
-rw-r--r--drivers/mtd/mtdcore.c271
-rw-r--r--drivers/mtd/mtdoops.c9
-rw-r--r--drivers/mtd/mtdpart.c200
-rw-r--r--drivers/mtd/nand/Kconfig21
-rw-r--r--drivers/mtd/nand/Makefile1
-rw-r--r--drivers/mtd/nand/alauda.c9
-rw-r--r--drivers/mtd/nand/atmel_nand.c1
-rw-r--r--drivers/mtd/nand/bcm_umi_nand.c10
-rw-r--r--drivers/mtd/nand/bf5xx_nand.c2
-rw-r--r--drivers/mtd/nand/cafe_nand.c3
-rw-r--r--drivers/mtd/nand/cmx270_nand.c2
-rw-r--r--drivers/mtd/nand/cs553x_nand.c4
-rw-r--r--drivers/mtd/nand/davinci_nand.c5
-rw-r--r--drivers/mtd/nand/denali.c3
-rw-r--r--drivers/mtd/nand/diskonchip.c1
-rw-r--r--drivers/mtd/nand/docg4.c1377
-rw-r--r--drivers/mtd/nand/fsl_elbc_nand.c6
-rw-r--r--drivers/mtd/nand/fsmc_nand.c924
-rw-r--r--drivers/mtd/nand/gpmi-nand/gpmi-lib.c43
-rw-r--r--drivers/mtd/nand/gpmi-nand/gpmi-nand.c14
-rw-r--r--drivers/mtd/nand/gpmi-nand/gpmi-nand.h2
-rw-r--r--drivers/mtd/nand/h1910.c4
-rw-r--r--drivers/mtd/nand/jz4740_nand.c11
-rw-r--r--drivers/mtd/nand/mxc_nand.c11
-rw-r--r--drivers/mtd/nand/nand_base.c194
-rw-r--r--drivers/mtd/nand/ndfc.c1
-rw-r--r--drivers/mtd/nand/omap2.c5
-rw-r--r--drivers/mtd/nand/orion_nand.c4
-rw-r--r--drivers/mtd/nand/plat_nand.c5
-rw-r--r--drivers/mtd/nand/ppchameleonevb.c18
-rw-r--r--drivers/mtd/nand/pxa3xx_nand.c6
-rw-r--r--drivers/mtd/nand/r852.c1
-rw-r--r--drivers/mtd/nand/rtc_from4.c1
-rw-r--r--drivers/mtd/nand/s3c2410.c5
-rw-r--r--drivers/mtd/nand/sh_flctl.c106
-rw-r--r--drivers/mtd/nand/sharpsl.c5
-rw-r--r--drivers/mtd/nand/tmio_nand.c7
-rw-r--r--drivers/mtd/nand/txx9ndfmc.c3
-rw-r--r--drivers/mtd/nftlcore.c7
-rw-r--r--drivers/mtd/onenand/generic.c6
-rw-r--r--drivers/mtd/onenand/omap2.c6
-rw-r--r--drivers/mtd/onenand/onenand_base.c68
-rw-r--r--drivers/mtd/onenand/samsung.c6
-rw-r--r--drivers/mtd/redboot.c6
-rw-r--r--drivers/mtd/sm_ftl.c2
-rw-r--r--drivers/mtd/ubi/gluebi.c29
-rw-r--r--drivers/net/ethernet/micrel/ks8842.c4
-rw-r--r--drivers/net/ethernet/sfc/mtd.c10
-rw-r--r--drivers/net/wan/Kconfig4
-rw-r--r--drivers/pci/pci-acpi.c40
-rw-r--r--drivers/pci/pcie/aspm.c13
-rw-r--r--drivers/pcmcia/at91_cf.c52
-rw-r--r--drivers/pcmcia/bcm63xx_pcmcia.c2
-rw-r--r--drivers/pcmcia/bfin_cf_pcmcia.c13
-rw-r--r--drivers/pcmcia/db1xxx_ss.c17
-rw-r--r--drivers/pcmcia/electra_cf.c12
-rw-r--r--drivers/pcmcia/i82092.c11
-rw-r--r--drivers/pcmcia/m8xx_pcmcia.c13
-rw-r--r--drivers/pcmcia/pd6729.c9
-rw-r--r--drivers/pcmcia/pxa2xx_viper.c13
-rw-r--r--drivers/pcmcia/vrc4173_cardu.c7
-rw-r--r--drivers/pcmcia/xxs1500_ss.c13
-rw-r--r--drivers/pcmcia/yenta_socket.c2
-rw-r--r--drivers/platform/x86/intel_ips.c13
-rw-r--r--drivers/pnp/pnpacpi/core.c7
-rw-r--r--drivers/power/Kconfig21
-rw-r--r--drivers/power/Makefile2
-rw-r--r--drivers/power/ab8500_btemp.c1124
-rw-r--r--drivers/power/ab8500_charger.c2789
-rw-r--r--drivers/power/ab8500_fg.c2637
-rw-r--r--drivers/power/abx500_chargalg.c1921
-rw-r--r--drivers/power/charger-manager.c67
-rw-r--r--drivers/power/da9052-battery.c15
-rw-r--r--drivers/power/ds2782_battery.c13
-rw-r--r--drivers/power/isp1704_charger.c1
-rw-r--r--drivers/power/lp8727_charger.c131
-rw-r--r--drivers/power/max17040_battery.c13
-rw-r--r--drivers/power/max17042_battery.c508
-rw-r--r--drivers/power/sbs-battery.c13
-rw-r--r--drivers/power/smb347-charger.c1294
-rw-r--r--drivers/power/z2_battery.c14
-rw-r--r--drivers/rtc/interface.c5
-rw-r--r--drivers/rtc/rtc-mpc5121.c2
-rw-r--r--drivers/rtc/rtc-sa1100.c1
-rw-r--r--drivers/scsi/Kconfig1
-rw-r--r--drivers/scsi/Makefile1
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_osm.c8
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_osm.c8
-rw-r--r--drivers/scsi/atp870u.c4
-rw-r--r--drivers/scsi/bfa/bfa.h9
-rw-r--r--drivers/scsi/bfa/bfa_core.c693
-rw-r--r--drivers/scsi/bfa/bfa_defs_svc.h2
-rw-r--r--drivers/scsi/bfa/bfa_fcs_lport.c2
-rw-r--r--drivers/scsi/bfa/bfa_fcs_rport.c5
-rw-r--r--drivers/scsi/bfa/bfa_ioc.c188
-rw-r--r--drivers/scsi/bfa/bfa_ioc.h17
-rw-r--r--drivers/scsi/bfa/bfa_ioc_ct.c151
-rw-r--r--drivers/scsi/bfa/bfa_svc.c69
-rw-r--r--drivers/scsi/bfa/bfa_svc.h4
-rw-r--r--drivers/scsi/bfa/bfad_attr.c47
-rw-r--r--drivers/scsi/bfa/bfad_bsg.c62
-rw-r--r--drivers/scsi/bfa/bfad_bsg.h2
-rw-r--r--drivers/scsi/bfa/bfad_drv.h2
-rw-r--r--drivers/scsi/bfa/bfi_ms.h17
-rw-r--r--drivers/scsi/bfa/bfi_reg.h6
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_fcoe.c4
-rw-r--r--drivers/scsi/fcoe/fcoe.c83
-rw-r--r--drivers/scsi/fcoe/fcoe_ctlr.c38
-rw-r--r--drivers/scsi/ipr.c73
-rw-r--r--drivers/scsi/ipr.h16
-rw-r--r--drivers/scsi/libfc/fc_exch.c14
-rw-r--r--drivers/scsi/libfc/fc_lport.c10
-rw-r--r--drivers/scsi/lpfc/Makefile4
-rw-r--r--drivers/scsi/lpfc/lpfc.h8
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c4
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.c80
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c17
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c24
-rw-r--r--drivers/scsi/lpfc/lpfc_hw4.h8
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c41
-rw-r--r--drivers/scsi/lpfc/lpfc_nportdisc.c10
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c488
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.h13
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c62
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h4
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_base.c6
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_ctl.c4
-rw-r--r--drivers/scsi/pm8001/pm8001_hwi.c18
-rw-r--r--drivers/scsi/qla4xxx/ql4_isr.c4
-rw-r--r--drivers/scsi/qla4xxx/ql4_os.c10
-rw-r--r--drivers/scsi/qla4xxx/ql4_version.h2
-rw-r--r--drivers/scsi/scsi_debug.c27
-rw-r--r--drivers/scsi/scsi_transport_iscsi.c8
-rw-r--r--drivers/scsi/sd.c15
-rw-r--r--drivers/scsi/st.c21
-rw-r--r--drivers/scsi/st.h1
-rw-r--r--drivers/scsi/ufs/Kconfig49
-rw-r--r--drivers/scsi/ufs/Makefile2
-rw-r--r--drivers/scsi/ufs/ufs.h207
-rw-r--r--drivers/scsi/ufs/ufshcd.c1978
-rw-r--r--drivers/scsi/ufs/ufshci.h376
-rw-r--r--drivers/scsi/vmw_pvscsi.c65
-rw-r--r--drivers/scsi/vmw_pvscsi.h109
-rw-r--r--drivers/sh/intc/balancing.c2
-rw-r--r--drivers/sh/intc/chip.c37
-rw-r--r--drivers/sh/intc/core.c13
-rw-r--r--drivers/sh/intc/handle.c7
-rw-r--r--drivers/sh/intc/internals.h9
-rw-r--r--drivers/sh/intc/virq.c2
-rw-r--r--drivers/spi/spi-dw-mid.c7
-rw-r--r--drivers/spi/spi-ep93xx.c4
-rw-r--r--drivers/spi/spi-pl022.c6
-rw-r--r--drivers/spi/spi-topcliff-pch.c4
-rw-r--r--drivers/staging/android/binder.c13
-rw-r--r--drivers/staging/asus_oled/README2
-rw-r--r--drivers/thermal/Kconfig8
-rw-r--r--drivers/thermal/Makefile1
-rw-r--r--drivers/thermal/spear_thermal.c206
-rw-r--r--drivers/thermal/thermal_sys.c94
-rw-r--r--drivers/tty/isicom.c2
-rw-r--r--drivers/tty/serial/amba-pl011.c9
-rw-r--r--drivers/tty/serial/pch_uart.c4
-rw-r--r--drivers/tty/serial/sh-sci.c19
-rw-r--r--drivers/usb/musb/ux500_dma.c4
-rw-r--r--drivers/usb/renesas_usbhs/fifo.c5
-rw-r--r--drivers/usb/serial/ftdi_sio.c3
-rw-r--r--drivers/usb/storage/Kconfig2
-rw-r--r--drivers/video/mx3fb.c4
-rw-r--r--drivers/video/omap2/vrfb.c1
-rw-r--r--drivers/virtio/virtio_balloon.c14
-rw-r--r--drivers/virtio/virtio_pci.c74
-rw-r--r--drivers/watchdog/sa1100_wdt.c1
-rw-r--r--fs/aio.c32
-rw-r--r--fs/autofs4/dev-ioctl.c2
-rw-r--r--fs/binfmt_elf.c24
-rw-r--r--fs/btrfs/async-thread.c15
-rw-r--r--fs/btrfs/async-thread.h4
-rw-r--r--fs/btrfs/backref.c122
-rw-r--r--fs/btrfs/backref.h5
-rw-r--r--fs/btrfs/compression.c38
-rw-r--r--fs/btrfs/compression.h2
-rw-r--r--fs/btrfs/ctree.c384
-rw-r--r--fs/btrfs/ctree.h169
-rw-r--r--fs/btrfs/delayed-inode.c33
-rw-r--r--fs/btrfs/delayed-ref.c33
-rw-r--r--fs/btrfs/dir-item.c10
-rw-r--r--fs/btrfs/disk-io.c649
-rw-r--r--fs/btrfs/disk-io.h10
-rw-r--r--fs/btrfs/export.c2
-rw-r--r--fs/btrfs/extent-tree.c737
-rw-r--r--fs/btrfs/extent_io.c1035
-rw-r--r--fs/btrfs/extent_io.h62
-rw-r--r--fs/btrfs/file-item.c57
-rw-r--r--fs/btrfs/file.c52
-rw-r--r--fs/btrfs/free-space-cache.c15
-rw-r--r--fs/btrfs/inode-item.c6
-rw-r--r--fs/btrfs/inode-map.c25
-rw-r--r--fs/btrfs/inode.c457
-rw-r--r--fs/btrfs/ioctl.c194
-rw-r--r--fs/btrfs/locking.c6
-rw-r--r--fs/btrfs/locking.h4
-rw-r--r--fs/btrfs/ordered-data.c60
-rw-r--r--fs/btrfs/ordered-data.h24
-rw-r--r--fs/btrfs/orphan.c2
-rw-r--r--fs/btrfs/reada.c10
-rw-r--r--fs/btrfs/relocation.c130
-rw-r--r--fs/btrfs/root-tree.c25
-rw-r--r--fs/btrfs/scrub.c1407
-rw-r--r--fs/btrfs/struct-funcs.c53
-rw-r--r--fs/btrfs/super.c192
-rw-r--r--fs/btrfs/transaction.c213
-rw-r--r--fs/btrfs/transaction.h3
-rw-r--r--fs/btrfs/tree-log.c96
-rw-r--r--fs/btrfs/tree-log.h2
-rw-r--r--fs/btrfs/volumes.c240
-rw-r--r--fs/btrfs/volumes.h4
-rw-r--r--fs/cifs/cifs_debug.c68
-rw-r--r--fs/cifs/cifs_debug.h4
-rw-r--r--fs/cifs/cifsfs.c13
-rw-r--r--fs/cifs/cifsfs.h2
-rw-r--r--fs/cifs/cifsglob.h39
-rw-r--r--fs/cifs/cifsproto.h20
-rw-r--r--fs/cifs/cifssmb.c126
-rw-r--r--fs/cifs/connect.c1454
-rw-r--r--fs/cifs/file.c271
-rw-r--r--fs/cifs/misc.c100
-rw-r--r--fs/cifs/netmisc.c3
-rw-r--r--fs/cifs/transport.c227
-rw-r--r--fs/compat.c26
-rw-r--r--fs/exec.c8
-rw-r--r--fs/ext2/ext2.h631
-rw-r--r--fs/ext2/xattr_security.c5
-rw-r--r--fs/ext2/xattr_trusted.c5
-rw-r--r--fs/ext2/xip.c2
-rw-r--r--fs/ext3/acl.c8
-rw-r--r--fs/ext3/balloc.c10
-rw-r--r--fs/ext3/bitmap.c4
-rw-r--r--fs/ext3/dir.c7
-rw-r--r--fs/ext3/ext3.h (renamed from include/linux/ext3_fs.h)488
-rw-r--r--fs/ext3/ext3_jbd.c2
-rw-r--r--fs/ext3/file.c6
-rw-r--r--fs/ext3/fsync.c8
-rw-r--r--fs/ext3/hash.c4
-rw-r--r--fs/ext3/ialloc.c13
-rw-r--r--fs/ext3/inode.c12
-rw-r--r--fs/ext3/ioctl.c7
-rw-r--r--fs/ext3/namei.c14
-rw-r--r--fs/ext3/resize.c5
-rw-r--r--fs/ext3/super.c18
-rw-r--r--fs/ext3/symlink.c4
-rw-r--r--fs/ext3/xattr.c7
-rw-r--r--fs/ext3/xattr_security.c6
-rw-r--r--fs/ext3/xattr_trusted.c6
-rw-r--r--fs/ext3/xattr_user.c5
-rw-r--r--fs/ext4/dir.c214
-rw-r--r--fs/ext4/ext4.h6
-rw-r--r--fs/ext4/hash.c4
-rw-r--r--fs/ext4/page-io.c7
-rw-r--r--fs/fcntl.c18
-rw-r--r--fs/file.c52
-rw-r--r--fs/gfs2/file.c1
-rw-r--r--fs/jffs2/acl.c2
-rw-r--r--fs/jffs2/background.c29
-rw-r--r--fs/jffs2/build.c6
-rw-r--r--fs/jffs2/compr.c32
-rw-r--r--fs/jffs2/compr_lzo.c1
-rw-r--r--fs/jffs2/compr_rubin.c2
-rw-r--r--fs/jffs2/compr_zlib.c45
-rw-r--r--fs/jffs2/debug.c22
-rw-r--r--fs/jffs2/debug.h50
-rw-r--r--fs/jffs2/dir.c41
-rw-r--r--fs/jffs2/erase.c72
-rw-r--r--fs/jffs2/file.c33
-rw-r--r--fs/jffs2/fs.c67
-rw-r--r--fs/jffs2/gc.c322
-rw-r--r--fs/jffs2/malloc.c2
-rw-r--r--fs/jffs2/nodelist.c30
-rw-r--r--fs/jffs2/nodemgmt.c214
-rw-r--r--fs/jffs2/os-linux.h4
-rw-r--r--fs/jffs2/read.c70
-rw-r--r--fs/jffs2/readinode.c2
-rw-r--r--fs/jffs2/scan.c229
-rw-r--r--fs/jffs2/security.c4
-rw-r--r--fs/jffs2/summary.c16
-rw-r--r--fs/jffs2/super.c30
-rw-r--r--fs/jffs2/symlink.c7
-rw-r--r--fs/jffs2/wbuf.c148
-rw-r--r--fs/jffs2/write.c113
-rw-r--r--fs/jffs2/xattr.c2
-rw-r--r--fs/lockd/svc.c2
-rw-r--r--fs/namei.c195
-rw-r--r--fs/nfsd/current_stateid.h28
-rw-r--r--fs/nfsd/export.c2
-rw-r--r--fs/nfsd/netns.h34
-rw-r--r--fs/nfsd/nfs4callback.c19
-rw-r--r--fs/nfsd/nfs4idmap.c53
-rw-r--r--fs/nfsd/nfs4proc.c118
-rw-r--r--fs/nfsd/nfs4recover.c647
-rw-r--r--fs/nfsd/nfs4state.c365
-rw-r--r--fs/nfsd/nfs4xdr.c132
-rw-r--r--fs/nfsd/nfsctl.c22
-rw-r--r--fs/nfsd/nfsd.h7
-rw-r--r--fs/nfsd/nfssvc.c44
-rw-r--r--fs/nfsd/state.h47
-rw-r--r--fs/nfsd/vfs.c33
-rw-r--r--fs/nfsd/vfs.h2
-rw-r--r--fs/nfsd/xdr4.h34
-rw-r--r--fs/ocfs2/ioctl.c2
-rw-r--r--fs/open.c4
-rw-r--r--fs/proc/base.c2
-rw-r--r--fs/proc/task_mmu.c1
-rw-r--r--fs/pstore/inode.c26
-rw-r--r--fs/romfs/storage.c2
-rw-r--r--fs/select.c2
-rw-r--r--include/acpi/acconfig.h (renamed from drivers/acpi/acpica/acconfig.h)19
-rw-r--r--include/acpi/acexcep.h7
-rw-r--r--include/acpi/acnames.h12
-rw-r--r--include/acpi/acpi_bus.h7
-rw-r--r--include/acpi/acpiosxf.h13
-rw-r--r--include/acpi/acpixf.h229
-rw-r--r--include/acpi/actbl.h7
-rw-r--r--include/acpi/actypes.h22
-rw-r--r--include/asm-generic/posix_types.h109
-rw-r--r--include/asm-generic/unistd.h2
-rw-r--r--include/linux/Kbuild2
-rw-r--r--include/linux/acpi.h10
-rw-r--r--include/linux/aio_abi.h2
-rw-r--r--include/linux/amba/pl08x.h10
-rw-r--r--include/linux/amba/pl330.h1
-rw-r--r--include/linux/compat.h32
-rw-r--r--include/linux/cpuidle.h22
-rw-r--r--include/linux/cpuset.h6
-rw-r--r--include/linux/dma-mapping.h2
-rw-r--r--include/linux/dmaengine.h35
-rw-r--r--include/linux/dw_dmac.h38
-rw-r--r--include/linux/ext2_fs.h569
-rw-r--r--include/linux/ext2_fs_sb.h126
-rw-r--r--include/linux/ext3_fs_i.h151
-rw-r--r--include/linux/ext3_fs_sb.h91
-rw-r--r--include/linux/ext3_jbd.h229
-rw-r--r--include/linux/fdtable.h46
-rw-r--r--include/linux/fs.h4
-rw-r--r--include/linux/fsl/mxs-dma.h (renamed from arch/arm/mach-mxs/include/mach/dma.h)0
-rw-r--r--include/linux/ftrace_event.h2
-rw-r--r--include/linux/gpio_keys.h3
-rw-r--r--include/linux/kernel.h36
-rw-r--r--include/linux/lp8727.h18
-rw-r--r--include/linux/mfd/abx500.h273
-rw-r--r--include/linux/mfd/abx500/ab8500-bm.h474
-rw-r--r--include/linux/mfd/abx500/ux500_chargalg.h38
-rw-r--r--include/linux/mtd/bbm.h5
-rw-r--r--include/linux/mtd/blktrans.h1
-rw-r--r--include/linux/mtd/fsmc.h169
-rw-r--r--include/linux/mtd/mtd.h304
-rw-r--r--include/linux/mtd/nand.h7
-rw-r--r--include/linux/mtd/pmc551.h78
-rw-r--r--include/linux/mtd/sh_flctl.h40
-rw-r--r--include/linux/mtd/spear_smi.h65
-rw-r--r--include/linux/mtio.h1
-rw-r--r--include/linux/nfs4.h15
-rw-r--r--include/linux/nfsd/cld.h56
-rw-r--r--include/linux/perf_event.h90
-rw-r--r--include/linux/platform_data/spear_thermal.h (renamed from arch/arm/mach-zynq/include/mach/io.h)33
-rw-r--r--include/linux/power/max17042_battery.h93
-rw-r--r--include/linux/power/smb347-charger.h117
-rw-r--r--include/linux/ring_buffer.h3
-rw-r--r--include/linux/rtc.h3
-rw-r--r--include/linux/sh_intc.h17
-rw-r--r--include/linux/spinlock_api_smp.h2
-rw-r--r--include/linux/sunrpc/svc_rdma.h4
-rw-r--r--include/linux/sysinfo.h24
-rw-r--r--include/linux/tboot.h1
-rw-r--r--include/linux/time.h2
-rw-r--r--include/linux/timex.h2
-rw-r--r--include/linux/virtio.h1
-rw-r--r--include/scsi/iscsi_if.h19
-rw-r--r--include/scsi/libfcoe.h4
-rw-r--r--include/trace/events/btrfs.h44
-rw-r--r--init/do_mounts_initrd.c1
-rw-r--r--init/do_mounts_rd.c9
-rw-r--r--ipc/compat.c70
-rw-r--r--kernel/Kconfig.locks4
-rw-r--r--kernel/Kconfig.preempt1
-rw-r--r--kernel/cgroup.c2
-rw-r--r--kernel/compat.c68
-rw-r--r--kernel/cpuset.c21
-rw-r--r--kernel/events/core.c11
-rw-r--r--kernel/exit.c2
-rw-r--r--kernel/futex.c38
-rw-r--r--kernel/futex_compat.c38
-rw-r--r--kernel/irq/Kconfig15
-rw-r--r--kernel/irq/handle.c16
-rw-r--r--kernel/irq/irqdomain.c8
-rw-r--r--kernel/irq/manage.c19
-rw-r--r--kernel/irq/migration.c10
-rw-r--r--kernel/sched/core.c71
-rw-r--r--kernel/sched/fair.c16
-rw-r--r--kernel/sched/rt.c2
-rw-r--r--kernel/sched/sched.h3
-rw-r--r--kernel/spinlock.c2
-rw-r--r--kernel/time.c6
-rw-r--r--kernel/time/alarmtimer.c8
-rw-r--r--kernel/time/clocksource.c2
-rw-r--r--kernel/time/ntp.c134
-rw-r--r--kernel/time/timekeeping.c51
-rw-r--r--kernel/trace/Kconfig2
-rw-r--r--kernel/trace/ftrace.c3
-rw-r--r--kernel/trace/ring_buffer.c157
-rw-r--r--kernel/trace/trace.c113
-rw-r--r--kernel/trace/trace.h3
-rw-r--r--kernel/trace/trace_entries.h16
-rw-r--r--kernel/trace/trace_export.c2
-rw-r--r--lib/Kconfig.debug1
-rw-r--r--net/bluetooth/hci_sock.c3
-rw-r--r--net/compat.c65
-rw-r--r--net/socket.c18
-rw-r--r--net/sunrpc/cache.c2
-rw-r--r--net/sunrpc/rpc_pipe.c5
-rw-r--r--net/sunrpc/svcauth_unix.c2
-rw-r--r--net/sunrpc/svcsock.c2
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma.c1
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_marshal.c66
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_recvfrom.c20
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_sendto.c26
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_transport.c10
-rw-r--r--net/sunrpc/xprtrdma/xprt_rdma.h7
-rw-r--r--net/sunrpc/xprtsock.c1
-rw-r--r--scripts/Kbuild.include2
-rw-r--r--scripts/Makefile.build2
-rw-r--r--scripts/Makefile.lib24
-rw-r--r--scripts/coccinelle/api/ptr_ret.cocci70
-rw-r--r--scripts/coccinelle/free/clk_put.cocci67
-rw-r--r--scripts/coccinelle/free/iounmap.cocci67
-rw-r--r--scripts/coccinelle/misc/boolinit.cocci178
-rw-r--r--scripts/coccinelle/misc/cstptr.cocci41
-rw-r--r--scripts/coccinelle/null/badzero.cocci237
-rw-r--r--scripts/dtc/dtc.c5
-rw-r--r--scripts/dtc/flattree.c2
-rw-r--r--scripts/gcc-goto.sh18
-rw-r--r--scripts/headers_check.pl38
-rw-r--r--scripts/kconfig/confdata.c26
-rwxr-xr-x[-rw-r--r--]scripts/kconfig/merge_config.sh15
-rw-r--r--scripts/kconfig/symbol.c9
-rw-r--r--scripts/mod/modpost.c2
-rw-r--r--scripts/package/builddeb20
-rwxr-xr-xscripts/patch-kernel4
-rwxr-xr-xscripts/setlocalversion3
-rwxr-xr-xscripts/tags.sh13
-rw-r--r--security/selinux/avc.c56
-rw-r--r--security/selinux/hooks.c11
-rw-r--r--security/selinux/selinuxfs.c110
-rw-r--r--sound/arm/pxa2xx-ac97-lib.c3
-rw-r--r--sound/arm/pxa2xx-ac97.c1
-rw-r--r--sound/atmel/abdac.c18
-rw-r--r--sound/atmel/ac97c.c41
-rw-r--r--sound/core/seq/seq_dummy.c2
-rw-r--r--sound/drivers/Kconfig3
-rw-r--r--sound/isa/opti9xx/opti92x-ad1848.c2
-rw-r--r--sound/oss/msnd_pinnacle.c2
-rw-r--r--sound/pci/asihpi/hpi_internal.h2
-rw-r--r--sound/pci/asihpi/hpios.c2
-rw-r--r--sound/pci/hda/patch_realtek.c11
-rw-r--r--sound/soc/codecs/wm8994.c2
-rw-r--r--sound/soc/imx/imx-pcm-dma-mx2.c3
-rw-r--r--sound/soc/mxs/mxs-pcm.c2
-rw-r--r--sound/soc/mxs/mxs-saif.c2
-rw-r--r--sound/soc/pxa/pxa2xx-ac97.c1
-rw-r--r--sound/soc/sh/siu_pcm.c4
-rw-r--r--sound/soc/soc-dmaengine-pcm.c2
-rw-r--r--sound/soc/txx9/txx9aclc.c2
-rw-r--r--tools/perf/Documentation/perf-report.txt5
-rw-r--r--tools/perf/Makefile66
-rw-r--r--tools/perf/builtin-diff.c60
-rw-r--r--tools/perf/builtin-report.c40
-rw-r--r--tools/perf/builtin-stat.c2
-rw-r--r--tools/perf/builtin-test.c174
-rw-r--r--tools/perf/config/feature-tests.mak15
-rw-r--r--tools/perf/util/annotate.c8
-rw-r--r--tools/perf/util/cache.h12
-rw-r--r--tools/perf/util/evlist.c6
-rw-r--r--tools/perf/util/evsel.c10
-rw-r--r--tools/perf/util/evsel.h5
-rw-r--r--tools/perf/util/gtk/browser.c189
-rw-r--r--tools/perf/util/gtk/gtk.h8
-rw-r--r--tools/perf/util/header.c2
-rw-r--r--tools/perf/util/hist.c200
-rw-r--r--tools/perf/util/hist.h19
-rw-r--r--tools/perf/util/include/linux/export.h (renamed from tools/perf/util/include/linux/module.h)0
-rw-r--r--tools/perf/util/parse-events.c603
-rw-r--r--tools/perf/util/parse-events.h49
-rw-r--r--tools/perf/util/parse-events.l127
-rw-r--r--tools/perf/util/parse-events.y229
-rw-r--r--tools/perf/util/pmu.c469
-rw-r--r--tools/perf/util/pmu.h41
-rw-r--r--tools/perf/util/pmu.l43
-rw-r--r--tools/perf/util/pmu.y93
-rw-r--r--tools/perf/util/probe-finder.c4
-rw-r--r--tools/perf/util/session.c1
-rw-r--r--tools/perf/util/symbol.c3
-rw-r--r--tools/perf/util/trace-event-parse.c10
-rw-r--r--tools/perf/util/ui/browser.h2
-rw-r--r--tools/perf/util/ui/browsers/hists.c14
-rw-r--r--tools/perf/util/ui/keysyms.h2
-rw-r--r--tools/perf/util/ui/util.c82
-rw-r--r--tools/power/cpupower/Makefile93
-rw-r--r--tools/power/cpupower/bench/Makefile23
-rw-r--r--tools/power/cpupower/debug/i386/Makefile40
-rw-r--r--tools/power/cpupower/debug/x86_64/Makefile26
-rw-r--r--tools/power/cpupower/man/cpupower-frequency-info.14
-rw-r--r--tools/power/cpupower/man/cpupower-frequency-set.14
-rw-r--r--tools/power/cpupower/man/cpupower-idle-info.190
-rw-r--r--tools/power/cpupower/man/cpupower-monitor.12
-rw-r--r--tools/power/cpupower/utils/cpuidle-info.c12
-rw-r--r--tools/power/cpupower/utils/helpers/amd.c4
-rw-r--r--tools/power/cpupower/utils/helpers/helpers.h11
-rw-r--r--tools/power/cpupower/utils/helpers/pci.c35
-rw-r--r--tools/power/cpupower/utils/idle_monitor/amd_fam14h_idle.c25
-rw-r--r--tools/power/x86/turbostat/turbostat.899
-rw-r--r--tools/power/x86/turbostat/turbostat.c245
1297 files changed, 48157 insertions, 21431 deletions
diff --git a/Documentation/ABI/stable/sysfs-driver-usb-usbtmc b/Documentation/ABI/stable/sysfs-driver-usb-usbtmc
index 23a43b8207e6..2a7f9a00cb0a 100644
--- a/Documentation/ABI/stable/sysfs-driver-usb-usbtmc
+++ b/Documentation/ABI/stable/sysfs-driver-usb-usbtmc
@@ -55,7 +55,7 @@ What: /sys/bus/usb/drivers/usbtmc/devices/*/auto_abort
55Date: August 2008 55Date: August 2008
56Contact: Greg Kroah-Hartman <gregkh@linuxfoundation.org> 56Contact: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
57Description: 57Description:
58 This file determines if the the transaction of the USB TMC 58 This file determines if the transaction of the USB TMC
59 device is to be automatically aborted if there is any error. 59 device is to be automatically aborted if there is any error.
60 For more details about this, please see the document, 60 For more details about this, please see the document,
61 "Universal Serial Bus Test and Measurement Class Specification 61 "Universal Serial Bus Test and Measurement Class Specification
diff --git a/Documentation/ABI/testing/debugfs-olpc b/Documentation/ABI/testing/debugfs-olpc
new file mode 100644
index 000000000000..bd76cc6d55f9
--- /dev/null
+++ b/Documentation/ABI/testing/debugfs-olpc
@@ -0,0 +1,16 @@
1What: /sys/kernel/debug/olpc-ec/cmd
2Date: Dec 2011
3KernelVersion: 3.4
4Contact: devel@lists.laptop.org
5Description:
6
7A generic interface for executing OLPC Embedded Controller commands and
8reading their responses.
9
10To execute a command, write data with the format: CC:N A A A A
11CC is the (hex) command, N is the count of expected reply bytes, and A A A A
12are optional (hex) arguments.
13
14To read the response (if any), read from the generic node after executing
15a command. Hex reply bytes will be returned, *whether or not* they came from
16the immediately previous command.
diff --git a/Documentation/ABI/testing/sysfs-bus-event_source-devices-format b/Documentation/ABI/testing/sysfs-bus-event_source-devices-format
new file mode 100644
index 000000000000..079afc71363d
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-bus-event_source-devices-format
@@ -0,0 +1,14 @@
1Where: /sys/bus/event_source/devices/<dev>/format
2Date: January 2012
3Kernel Version: 3.3
4Contact: Jiri Olsa <jolsa@redhat.com>
5Description:
6 Attribute group to describe the magic bits that go into
7 perf_event_attr::config[012] for a particular pmu.
8 Each attribute of this group defines the 'hardware' bitmask
9 we want to export, so that userspace can deal with sane
10 name/value pairs.
11
12 Example: 'config1:1,6-10,44'
13 Defines contents of attribute that occupies bits 1,6-10,44 of
14 perf_event_attr::config1.
diff --git a/Documentation/ABI/testing/sysfs-firmware-acpi b/Documentation/ABI/testing/sysfs-firmware-acpi
index 4f9ba3c2fca7..dd930c8db41f 100644
--- a/Documentation/ABI/testing/sysfs-firmware-acpi
+++ b/Documentation/ABI/testing/sysfs-firmware-acpi
@@ -1,3 +1,23 @@
1What: /sys/firmware/acpi/bgrt/
2Date: January 2012
3Contact: Matthew Garrett <mjg@redhat.com>
4Description:
5 The BGRT is an ACPI 5.0 feature that allows the OS
6 to obtain a copy of the firmware boot splash and
7 some associated metadata. This is intended to be used
8 by boot splash applications in order to interact with
9 the firmware boot splash in order to avoid jarring
10 transitions.
11
12 image: The image bitmap. Currently a 32-bit BMP.
13 status: 1 if the image is valid, 0 if firmware invalidated it.
14 type: 0 indicates image is in BMP format.
15 version: The version of the BGRT. Currently 1.
16 xoffset: The number of pixels between the left of the screen
17 and the left edge of the image.
18 yoffset: The number of pixels between the top of the screen
19 and the top edge of the image.
20
1What: /sys/firmware/acpi/interrupts/ 21What: /sys/firmware/acpi/interrupts/
2Date: February 2008 22Date: February 2008
3Contact: Len Brown <lenb@kernel.org> 23Contact: Len Brown <lenb@kernel.org>
diff --git a/Documentation/CodingStyle b/Documentation/CodingStyle
index 2b90d328b3ba..c58b236bbe04 100644
--- a/Documentation/CodingStyle
+++ b/Documentation/CodingStyle
@@ -793,6 +793,35 @@ own custom mode, or may have some other magic method for making indentation
793work correctly. 793work correctly.
794 794
795 795
796 Chapter 19: Inline assembly
797
798In architecture-specific code, you may need to use inline assembly to interface
799with CPU or platform functionality. Don't hesitate to do so when necessary.
800However, don't use inline assembly gratuitously when C can do the job. You can
801and should poke hardware from C when possible.
802
803Consider writing simple helper functions that wrap common bits of inline
804assembly, rather than repeatedly writing them with slight variations. Remember
805that inline assembly can use C parameters.
806
807Large, non-trivial assembly functions should go in .S files, with corresponding
808C prototypes defined in C header files. The C prototypes for assembly
809functions should use "asmlinkage".
810
811You may need to mark your asm statement as volatile, to prevent GCC from
812removing it if GCC doesn't notice any side effects. You don't always need to
813do so, though, and doing so unnecessarily can limit optimization.
814
815When writing a single inline assembly statement containing multiple
816instructions, put each instruction on a separate line in a separate quoted
817string, and end each string except the last with \n\t to properly indent the
818next instruction in the assembly output:
819
820 asm ("magic %reg1, #42\n\t"
821 "more_magic %reg2, %reg3"
822 : /* outputs */ : /* inputs */ : /* clobbers */);
823
824
796 825
797 Appendix I: References 826 Appendix I: References
798 827
diff --git a/Documentation/acpi/apei/einj.txt b/Documentation/acpi/apei/einj.txt
index e7cc36397217..e20b6daaced4 100644
--- a/Documentation/acpi/apei/einj.txt
+++ b/Documentation/acpi/apei/einj.txt
@@ -53,6 +53,14 @@ directory apei/einj. The following files are provided.
53 This file is used to set the second error parameter value. Effect of 53 This file is used to set the second error parameter value. Effect of
54 parameter depends on error_type specified. 54 parameter depends on error_type specified.
55 55
56- notrigger
57 The EINJ mechanism is a two step process. First inject the error, then
58 perform some actions to trigger it. Setting "notrigger" to 1 skips the
59 trigger phase, which *may* allow the user to cause the error in some other
60 context by a simple access to the cpu, memory location, or device that is
61 the target of the error injection. Whether this actually works depends
62 on what operations the BIOS actually includes in the trigger phase.
63
56BIOS versions based in the ACPI 4.0 specification have limited options 64BIOS versions based in the ACPI 4.0 specification have limited options
57to control where the errors are injected. Your BIOS may support an 65to control where the errors are injected. Your BIOS may support an
58extension (enabled with the param_extension=1 module parameter, or 66extension (enabled with the param_extension=1 module parameter, or
diff --git a/Documentation/aoe/aoe.txt b/Documentation/aoe/aoe.txt
index b5aada9f20cc..5f5aa16047ff 100644
--- a/Documentation/aoe/aoe.txt
+++ b/Documentation/aoe/aoe.txt
@@ -35,7 +35,7 @@ CREATING DEVICE NODES
35 sh Documentation/aoe/mkshelf.sh /dev/etherd 0 35 sh Documentation/aoe/mkshelf.sh /dev/etherd 0
36 36
37 There is also an autoload script that shows how to edit 37 There is also an autoload script that shows how to edit
38 /etc/modprobe.conf to ensure that the aoe module is loaded when 38 /etc/modprobe.d/aoe.conf to ensure that the aoe module is loaded when
39 necessary. 39 necessary.
40 40
41USING DEVICE NODES 41USING DEVICE NODES
diff --git a/Documentation/aoe/autoload.sh b/Documentation/aoe/autoload.sh
index 78dad1334c6f..815dff4691c9 100644
--- a/Documentation/aoe/autoload.sh
+++ b/Documentation/aoe/autoload.sh
@@ -1,8 +1,8 @@
1#!/bin/sh 1#!/bin/sh
2# set aoe to autoload by installing the 2# set aoe to autoload by installing the
3# aliases in /etc/modprobe.conf 3# aliases in /etc/modprobe.d/
4 4
5f=/etc/modprobe.conf 5f=/etc/modprobe.d/aoe.conf
6 6
7if test ! -r $f || test ! -w $f; then 7if test ! -r $f || test ! -w $f; then
8 echo "cannot configure $f for module autoloading" 1>&2 8 echo "cannot configure $f for module autoloading" 1>&2
diff --git a/Documentation/blockdev/floppy.txt b/Documentation/blockdev/floppy.txt
index 6ccab88705cb..470fe4b5e379 100644
--- a/Documentation/blockdev/floppy.txt
+++ b/Documentation/blockdev/floppy.txt
@@ -49,7 +49,7 @@ you can put:
49 49
50 options floppy omnibook messages 50 options floppy omnibook messages
51 51
52in /etc/modprobe.conf. 52in a configuration file in /etc/modprobe.d/.
53 53
54 54
55 The floppy driver related options are: 55 The floppy driver related options are:
diff --git a/Documentation/cpuidle/sysfs.txt b/Documentation/cpuidle/sysfs.txt
index 50d7b1642759..9d28a3406e74 100644
--- a/Documentation/cpuidle/sysfs.txt
+++ b/Documentation/cpuidle/sysfs.txt
@@ -36,6 +36,7 @@ drwxr-xr-x 2 root root 0 Feb 8 10:42 state3
36/sys/devices/system/cpu/cpu0/cpuidle/state0: 36/sys/devices/system/cpu/cpu0/cpuidle/state0:
37total 0 37total 0
38-r--r--r-- 1 root root 4096 Feb 8 10:42 desc 38-r--r--r-- 1 root root 4096 Feb 8 10:42 desc
39-rw-r--r-- 1 root root 4096 Feb 8 10:42 disable
39-r--r--r-- 1 root root 4096 Feb 8 10:42 latency 40-r--r--r-- 1 root root 4096 Feb 8 10:42 latency
40-r--r--r-- 1 root root 4096 Feb 8 10:42 name 41-r--r--r-- 1 root root 4096 Feb 8 10:42 name
41-r--r--r-- 1 root root 4096 Feb 8 10:42 power 42-r--r--r-- 1 root root 4096 Feb 8 10:42 power
@@ -45,6 +46,7 @@ total 0
45/sys/devices/system/cpu/cpu0/cpuidle/state1: 46/sys/devices/system/cpu/cpu0/cpuidle/state1:
46total 0 47total 0
47-r--r--r-- 1 root root 4096 Feb 8 10:42 desc 48-r--r--r-- 1 root root 4096 Feb 8 10:42 desc
49-rw-r--r-- 1 root root 4096 Feb 8 10:42 disable
48-r--r--r-- 1 root root 4096 Feb 8 10:42 latency 50-r--r--r-- 1 root root 4096 Feb 8 10:42 latency
49-r--r--r-- 1 root root 4096 Feb 8 10:42 name 51-r--r--r-- 1 root root 4096 Feb 8 10:42 name
50-r--r--r-- 1 root root 4096 Feb 8 10:42 power 52-r--r--r-- 1 root root 4096 Feb 8 10:42 power
@@ -54,6 +56,7 @@ total 0
54/sys/devices/system/cpu/cpu0/cpuidle/state2: 56/sys/devices/system/cpu/cpu0/cpuidle/state2:
55total 0 57total 0
56-r--r--r-- 1 root root 4096 Feb 8 10:42 desc 58-r--r--r-- 1 root root 4096 Feb 8 10:42 desc
59-rw-r--r-- 1 root root 4096 Feb 8 10:42 disable
57-r--r--r-- 1 root root 4096 Feb 8 10:42 latency 60-r--r--r-- 1 root root 4096 Feb 8 10:42 latency
58-r--r--r-- 1 root root 4096 Feb 8 10:42 name 61-r--r--r-- 1 root root 4096 Feb 8 10:42 name
59-r--r--r-- 1 root root 4096 Feb 8 10:42 power 62-r--r--r-- 1 root root 4096 Feb 8 10:42 power
@@ -63,6 +66,7 @@ total 0
63/sys/devices/system/cpu/cpu0/cpuidle/state3: 66/sys/devices/system/cpu/cpu0/cpuidle/state3:
64total 0 67total 0
65-r--r--r-- 1 root root 4096 Feb 8 10:42 desc 68-r--r--r-- 1 root root 4096 Feb 8 10:42 desc
69-rw-r--r-- 1 root root 4096 Feb 8 10:42 disable
66-r--r--r-- 1 root root 4096 Feb 8 10:42 latency 70-r--r--r-- 1 root root 4096 Feb 8 10:42 latency
67-r--r--r-- 1 root root 4096 Feb 8 10:42 name 71-r--r--r-- 1 root root 4096 Feb 8 10:42 name
68-r--r--r-- 1 root root 4096 Feb 8 10:42 power 72-r--r--r-- 1 root root 4096 Feb 8 10:42 power
@@ -72,6 +76,7 @@ total 0
72 76
73 77
74* desc : Small description about the idle state (string) 78* desc : Small description about the idle state (string)
79* disable : Option to disable this idle state (bool)
75* latency : Latency to exit out of this idle state (in microseconds) 80* latency : Latency to exit out of this idle state (in microseconds)
76* name : Name of the idle state (string) 81* name : Name of the idle state (string)
77* power : Power consumed while in this idle state (in milliwatts) 82* power : Power consumed while in this idle state (in milliwatts)
diff --git a/Documentation/devicetree/bindings/mtd/arm-versatile.txt b/Documentation/devicetree/bindings/mtd/arm-versatile.txt
index 476845db94d0..beace4b89daa 100644
--- a/Documentation/devicetree/bindings/mtd/arm-versatile.txt
+++ b/Documentation/devicetree/bindings/mtd/arm-versatile.txt
@@ -4,5 +4,5 @@ Required properties:
4- compatible : must be "arm,versatile-flash"; 4- compatible : must be "arm,versatile-flash";
5- bank-width : width in bytes of flash interface. 5- bank-width : width in bytes of flash interface.
6 6
7Optional properties: 7The device tree may optionally contain sub-nodes describing partitions of the
8- Subnode partition map from mtd flash binding 8address space. See partition.txt for more detail.
diff --git a/Documentation/devicetree/bindings/mtd/atmel-dataflash.txt b/Documentation/devicetree/bindings/mtd/atmel-dataflash.txt
index ef66ddd01da0..1889a4db5b7c 100644
--- a/Documentation/devicetree/bindings/mtd/atmel-dataflash.txt
+++ b/Documentation/devicetree/bindings/mtd/atmel-dataflash.txt
@@ -3,6 +3,9 @@
3Required properties: 3Required properties:
4- compatible : "atmel,<model>", "atmel,<series>", "atmel,dataflash". 4- compatible : "atmel,<model>", "atmel,<series>", "atmel,dataflash".
5 5
6The device tree may optionally contain sub-nodes describing partitions of the
7address space. See partition.txt for more detail.
8
6Example: 9Example:
7 10
8flash@1 { 11flash@1 {
diff --git a/Documentation/devicetree/bindings/mtd/fsl-upm-nand.txt b/Documentation/devicetree/bindings/mtd/fsl-upm-nand.txt
index 00f1f546b32e..fce4894f5a98 100644
--- a/Documentation/devicetree/bindings/mtd/fsl-upm-nand.txt
+++ b/Documentation/devicetree/bindings/mtd/fsl-upm-nand.txt
@@ -19,6 +19,10 @@ Optional properties:
19 read registers (tR). Required if property "gpios" is not used 19 read registers (tR). Required if property "gpios" is not used
20 (R/B# pins not connected). 20 (R/B# pins not connected).
21 21
22Each flash chip described may optionally contain additional sub-nodes
23describing partitions of the address space. See partition.txt for more
24detail.
25
22Examples: 26Examples:
23 27
24upm@1,0 { 28upm@1,0 {
diff --git a/Documentation/devicetree/bindings/mtd/fsmc-nand.txt b/Documentation/devicetree/bindings/mtd/fsmc-nand.txt
new file mode 100644
index 000000000000..e2c663b354d2
--- /dev/null
+++ b/Documentation/devicetree/bindings/mtd/fsmc-nand.txt
@@ -0,0 +1,33 @@
1* FSMC NAND
2
3Required properties:
4- compatible : "st,spear600-fsmc-nand"
5- reg : Address range of the mtd chip
6- reg-names: Should contain the reg names "fsmc_regs" and "nand_data"
7- st,ale-off : Chip specific offset to ALE
8- st,cle-off : Chip specific offset to CLE
9
10Optional properties:
11- bank-width : Width (in bytes) of the device. If not present, the width
12 defaults to 1 byte
13- nand-skip-bbtscan: Indicates the the BBT scanning should be skipped
14
15Example:
16
17 fsmc: flash@d1800000 {
18 compatible = "st,spear600-fsmc-nand";
19 #address-cells = <1>;
20 #size-cells = <1>;
21 reg = <0xd1800000 0x1000 /* FSMC Register */
22 0xd2000000 0x4000>; /* NAND Base */
23 reg-names = "fsmc_regs", "nand_data";
24 st,ale-off = <0x20000>;
25 st,cle-off = <0x10000>;
26
27 bank-width = <1>;
28 nand-skip-bbtscan;
29
30 partition@0 {
31 ...
32 };
33 };
diff --git a/Documentation/devicetree/bindings/mtd/gpio-control-nand.txt b/Documentation/devicetree/bindings/mtd/gpio-control-nand.txt
index 719f4dc58df7..36ef07d3c90f 100644
--- a/Documentation/devicetree/bindings/mtd/gpio-control-nand.txt
+++ b/Documentation/devicetree/bindings/mtd/gpio-control-nand.txt
@@ -25,6 +25,9 @@ Optional properties:
25 GPIO state and before and after command byte writes, this register will be 25 GPIO state and before and after command byte writes, this register will be
26 read to ensure that the GPIO accesses have completed. 26 read to ensure that the GPIO accesses have completed.
27 27
28The device tree may optionally contain sub-nodes describing partitions of the
29address space. See partition.txt for more detail.
30
28Examples: 31Examples:
29 32
30gpio-nand@1,0 { 33gpio-nand@1,0 {
diff --git a/Documentation/devicetree/bindings/mtd/mtd-physmap.txt b/Documentation/devicetree/bindings/mtd/mtd-physmap.txt
index 80152cb567d9..a63c2bd7de2b 100644
--- a/Documentation/devicetree/bindings/mtd/mtd-physmap.txt
+++ b/Documentation/devicetree/bindings/mtd/mtd-physmap.txt
@@ -23,27 +23,8 @@ are defined:
23 - vendor-id : Contains the flash chip's vendor id (1 byte). 23 - vendor-id : Contains the flash chip's vendor id (1 byte).
24 - device-id : Contains the flash chip's device id (1 byte). 24 - device-id : Contains the flash chip's device id (1 byte).
25 25
26In addition to the information on the mtd bank itself, the 26The device tree may optionally contain sub-nodes describing partitions of the
27device tree may optionally contain additional information 27address space. See partition.txt for more detail.
28describing partitions of the address space. This can be
29used on platforms which have strong conventions about which
30portions of a flash are used for what purposes, but which don't
31use an on-flash partition table such as RedBoot.
32
33Each partition is represented as a sub-node of the mtd device.
34Each node's name represents the name of the corresponding
35partition of the mtd device.
36
37Flash partitions
38 - reg : The partition's offset and size within the mtd bank.
39 - label : (optional) The label / name for this partition.
40 If omitted, the label is taken from the node name (excluding
41 the unit address).
42 - read-only : (optional) This parameter, if present, is a hint to
43 Linux that this partition should only be mounted
44 read-only. This is usually used for flash partitions
45 containing early-boot firmware images or data which should not
46 be clobbered.
47 28
48Example: 29Example:
49 30
diff --git a/Documentation/devicetree/bindings/mtd/partition.txt b/Documentation/devicetree/bindings/mtd/partition.txt
new file mode 100644
index 000000000000..f114ce1657c2
--- /dev/null
+++ b/Documentation/devicetree/bindings/mtd/partition.txt
@@ -0,0 +1,38 @@
1Representing flash partitions in devicetree
2
3Partitions can be represented by sub-nodes of an mtd device. This can be used
4on platforms which have strong conventions about which portions of a flash are
5used for what purposes, but which don't use an on-flash partition table such
6as RedBoot.
7
8#address-cells & #size-cells must both be present in the mtd device and be
9equal to 1.
10
11Required properties:
12- reg : The partition's offset and size within the mtd bank.
13
14Optional properties:
15- label : The label / name for this partition. If omitted, the label is taken
16 from the node name (excluding the unit address).
17- read-only : This parameter, if present, is a hint to Linux that this
18 partition should only be mounted read-only. This is usually used for flash
19 partitions containing early-boot firmware images or data which should not be
20 clobbered.
21
22Examples:
23
24
25flash@0 {
26 #address-cells = <1>;
27 #size-cells = <1>;
28
29 partition@0 {
30 label = "u-boot";
31 reg = <0x0000000 0x100000>;
32 read-only;
33 };
34
35 uimage@100000 {
36 reg = <0x0100000 0x200000>;
37 };
38];
diff --git a/Documentation/devicetree/bindings/mtd/spear_smi.txt b/Documentation/devicetree/bindings/mtd/spear_smi.txt
new file mode 100644
index 000000000000..7248aadd89e4
--- /dev/null
+++ b/Documentation/devicetree/bindings/mtd/spear_smi.txt
@@ -0,0 +1,31 @@
1* SPEAr SMI
2
3Required properties:
4- compatible : "st,spear600-smi"
5- reg : Address range of the mtd chip
6- #address-cells, #size-cells : Must be present if the device has sub-nodes
7 representing partitions.
8- interrupt-parent: Should be the phandle for the interrupt controller
9 that services interrupts for this device
10- interrupts: Should contain the STMMAC interrupts
11- clock-rate : Functional clock rate of SMI in Hz
12
13Optional properties:
14- st,smi-fast-mode : Flash supports read in fast mode
15
16Example:
17
18 smi: flash@fc000000 {
19 compatible = "st,spear600-smi";
20 #address-cells = <1>;
21 #size-cells = <1>;
22 reg = <0xfc000000 0x1000>;
23 interrupt-parent = <&vic1>;
24 interrupts = <12>;
25 clock-rate = <50000000>; /* 50MHz */
26
27 flash@f8000000 {
28 st,smi-fast-mode;
29 ...
30 };
31 };
diff --git a/Documentation/devicetree/bindings/power_supply/max17042_battery.txt b/Documentation/devicetree/bindings/power_supply/max17042_battery.txt
new file mode 100644
index 000000000000..5bc9b685cf8a
--- /dev/null
+++ b/Documentation/devicetree/bindings/power_supply/max17042_battery.txt
@@ -0,0 +1,18 @@
1max17042_battery
2~~~~~~~~~~~~~~~~
3
4Required properties :
5 - compatible : "maxim,max17042"
6
7Optional properties :
8 - maxim,rsns-microohm : Resistance of rsns resistor in micro Ohms
9 (datasheet-recommended value is 10000).
10 Defining this property enables current-sense functionality.
11
12Example:
13
14 battery-charger@36 {
15 compatible = "maxim,max17042";
16 reg = <0x36>;
17 maxim,rsns-microohm = <10000>;
18 };
diff --git a/Documentation/devicetree/usage-model.txt b/Documentation/devicetree/usage-model.txt
new file mode 100644
index 000000000000..c5a80099b71c
--- /dev/null
+++ b/Documentation/devicetree/usage-model.txt
@@ -0,0 +1,412 @@
1Linux and the Device Tree
2-------------------------
3The Linux usage model for device tree data
4
5Author: Grant Likely <grant.likely@secretlab.ca>
6
7This article describes how Linux uses the device tree. An overview of
8the device tree data format can be found on the device tree usage page
9at devicetree.org[1].
10
11[1] http://devicetree.org/Device_Tree_Usage
12
13The "Open Firmware Device Tree", or simply Device Tree (DT), is a data
14structure and language for describing hardware. More specifically, it
15is a description of hardware that is readable by an operating system
16so that the operating system doesn't need to hard code details of the
17machine.
18
19Structurally, the DT is a tree, or acyclic graph with named nodes, and
20nodes may have an arbitrary number of named properties encapsulating
21arbitrary data. A mechanism also exists to create arbitrary
22links from one node to another outside of the natural tree structure.
23
24Conceptually, a common set of usage conventions, called 'bindings',
25is defined for how data should appear in the tree to describe typical
26hardware characteristics including data busses, interrupt lines, GPIO
27connections, and peripheral devices.
28
29As much as possible, hardware is described using existing bindings to
30maximize use of existing support code, but since property and node
31names are simply text strings, it is easy to extend existing bindings
32or create new ones by defining new nodes and properties. Be wary,
33however, of creating a new binding without first doing some homework
34about what already exists. There are currently two different,
35incompatible, bindings for i2c busses that came about because the new
36binding was created without first investigating how i2c devices were
37already being enumerated in existing systems.
38
391. History
40----------
41The DT was originally created by Open Firmware as part of the
42communication method for passing data from Open Firmware to a client
43program (like to an operating system). An operating system used the
44Device Tree to discover the topology of the hardware at runtime, and
45thereby support a majority of available hardware without hard coded
46information (assuming drivers were available for all devices).
47
48Since Open Firmware is commonly used on PowerPC and SPARC platforms,
49the Linux support for those architectures has for a long time used the
50Device Tree.
51
52In 2005, when PowerPC Linux began a major cleanup and to merge 32-bit
53and 64-bit support, the decision was made to require DT support on all
54powerpc platforms, regardless of whether or not they used Open
55Firmware. To do this, a DT representation called the Flattened Device
56Tree (FDT) was created which could be passed to the kernel as a binary
57blob without requiring a real Open Firmware implementation. U-Boot,
58kexec, and other bootloaders were modified to support both passing a
59Device Tree Binary (dtb) and to modify a dtb at boot time. DT was
60also added to the PowerPC boot wrapper (arch/powerpc/boot/*) so that
61a dtb could be wrapped up with the kernel image to support booting
62existing non-DT aware firmware.
63
64Some time later, FDT infrastructure was generalized to be usable by
65all architectures. At the time of this writing, 6 mainlined
66architectures (arm, microblaze, mips, powerpc, sparc, and x86) and 1
67out of mainline (nios) have some level of DT support.
68
692. Data Model
70-------------
71If you haven't already read the Device Tree Usage[1] page,
72then go read it now. It's okay, I'll wait....
73
742.1 High Level View
75-------------------
76The most important thing to understand is that the DT is simply a data
77structure that describes the hardware. There is nothing magical about
78it, and it doesn't magically make all hardware configuration problems
79go away. What it does do is provide a language for decoupling the
80hardware configuration from the board and device driver support in the
81Linux kernel (or any other operating system for that matter). Using
82it allows board and device support to become data driven; to make
83setup decisions based on data passed into the kernel instead of on
84per-machine hard coded selections.
85
86Ideally, data driven platform setup should result in less code
87duplication and make it easier to support a wide range of hardware
88with a single kernel image.
89
90Linux uses DT data for three major purposes:
911) platform identification,
922) runtime configuration, and
933) device population.
94
952.2 Platform Identification
96---------------------------
97First and foremost, the kernel will use data in the DT to identify the
98specific machine. In a perfect world, the specific platform shouldn't
99matter to the kernel because all platform details would be described
100perfectly by the device tree in a consistent and reliable manner.
101Hardware is not perfect though, and so the kernel must identify the
102machine during early boot so that it has the opportunity to run
103machine-specific fixups.
104
105In the majority of cases, the machine identity is irrelevant, and the
106kernel will instead select setup code based on the machine's core
107CPU or SoC. On ARM for example, setup_arch() in
108arch/arm/kernel/setup.c will call setup_machine_fdt() in
109arch/arm/kernel/devicetree.c which searches through the machine_desc
110table and selects the machine_desc which best matches the device tree
111data. It determines the best match by looking at the 'compatible'
112property in the root device tree node, and comparing it with the
113dt_compat list in struct machine_desc.
114
115The 'compatible' property contains a sorted list of strings starting
116with the exact name of the machine, followed by an optional list of
117boards it is compatible with sorted from most compatible to least. For
118example, the root compatible properties for the TI BeagleBoard and its
119successor, the BeagleBoard xM board might look like:
120
121 compatible = "ti,omap3-beagleboard", "ti,omap3450", "ti,omap3";
122 compatible = "ti,omap3-beagleboard-xm", "ti,omap3450", "ti,omap3";
123
124Where "ti,omap3-beagleboard-xm" specifies the exact model, it also
125claims that it compatible with the OMAP 3450 SoC, and the omap3 family
126of SoCs in general. You'll notice that the list is sorted from most
127specific (exact board) to least specific (SoC family).
128
129Astute readers might point out that the Beagle xM could also claim
130compatibility with the original Beagle board. However, one should be
131cautioned about doing so at the board level since there is typically a
132high level of change from one board to another, even within the same
133product line, and it is hard to nail down exactly what is meant when one
134board claims to be compatible with another. For the top level, it is
135better to err on the side of caution and not claim one board is
136compatible with another. The notable exception would be when one
137board is a carrier for another, such as a CPU module attached to a
138carrier board.
139
140One more note on compatible values. Any string used in a compatible
141property must be documented as to what it indicates. Add
142documentation for compatible strings in Documentation/devicetree/bindings.
143
144Again on ARM, for each machine_desc, the kernel looks to see if
145any of the dt_compat list entries appear in the compatible property.
146If one does, then that machine_desc is a candidate for driving the
147machine. After searching the entire table of machine_descs,
148setup_machine_fdt() returns the 'most compatible' machine_desc based
149on which entry in the compatible property each machine_desc matches
150against. If no matching machine_desc is found, then it returns NULL.
151
152The reasoning behind this scheme is the observation that in the majority
153of cases, a single machine_desc can support a large number of boards
154if they all use the same SoC, or same family of SoCs. However,
155invariably there will be some exceptions where a specific board will
156require special setup code that is not useful in the generic case.
157Special cases could be handled by explicitly checking for the
158troublesome board(s) in generic setup code, but doing so very quickly
159becomes ugly and/or unmaintainable if it is more than just a couple of
160cases.
161
162Instead, the compatible list allows a generic machine_desc to provide
163support for a wide common set of boards by specifying "less
164compatible" value in the dt_compat list. In the example above,
165generic board support can claim compatibility with "ti,omap3" or
166"ti,omap3450". If a bug was discovered on the original beagleboard
167that required special workaround code during early boot, then a new
168machine_desc could be added which implements the workarounds and only
169matches on "ti,omap3-beagleboard".
170
171PowerPC uses a slightly different scheme where it calls the .probe()
172hook from each machine_desc, and the first one returning TRUE is used.
173However, this approach does not take into account the priority of the
174compatible list, and probably should be avoided for new architecture
175support.
176
1772.3 Runtime configuration
178-------------------------
179In most cases, a DT will be the sole method of communicating data from
180firmware to the kernel, so also gets used to pass in runtime and
181configuration data like the kernel parameters string and the location
182of an initrd image.
183
184Most of this data is contained in the /chosen node, and when booting
185Linux it will look something like this:
186
187 chosen {
188 bootargs = "console=ttyS0,115200 loglevel=8";
189 initrd-start = <0xc8000000>;
190 initrd-end = <0xc8200000>;
191 };
192
193The bootargs property contains the kernel arguments, and the initrd-*
194properties define the address and size of an initrd blob. The
195chosen node may also optionally contain an arbitrary number of
196additional properties for platform-specific configuration data.
197
198During early boot, the architecture setup code calls of_scan_flat_dt()
199several times with different helper callbacks to parse device tree
200data before paging is setup. The of_scan_flat_dt() code scans through
201the device tree and uses the helpers to extract information required
202during early boot. Typically the early_init_dt_scan_chosen() helper
203is used to parse the chosen node including kernel parameters,
204early_init_dt_scan_root() to initialize the DT address space model,
205and early_init_dt_scan_memory() to determine the size and
206location of usable RAM.
207
208On ARM, the function setup_machine_fdt() is responsible for early
209scanning of the device tree after selecting the correct machine_desc
210that supports the board.
211
2122.4 Device population
213---------------------
214After the board has been identified, and after the early configuration data
215has been parsed, then kernel initialization can proceed in the normal
216way. At some point in this process, unflatten_device_tree() is called
217to convert the data into a more efficient runtime representation.
218This is also when machine-specific setup hooks will get called, like
219the machine_desc .init_early(), .init_irq() and .init_machine() hooks
220on ARM. The remainder of this section uses examples from the ARM
221implementation, but all architectures will do pretty much the same
222thing when using a DT.
223
224As can be guessed by the names, .init_early() is used for any machine-
225specific setup that needs to be executed early in the boot process,
226and .init_irq() is used to set up interrupt handling. Using a DT
227doesn't materially change the behaviour of either of these functions.
228If a DT is provided, then both .init_early() and .init_irq() are able
229to call any of the DT query functions (of_* in include/linux/of*.h) to
230get additional data about the platform.
231
232The most interesting hook in the DT context is .init_machine() which
233is primarily responsible for populating the Linux device model with
234data about the platform. Historically this has been implemented on
235embedded platforms by defining a set of static clock structures,
236platform_devices, and other data in the board support .c file, and
237registering it en-masse in .init_machine(). When DT is used, then
238instead of hard coding static devices for each platform, the list of
239devices can be obtained by parsing the DT, and allocating device
240structures dynamically.
241
242The simplest case is when .init_machine() is only responsible for
243registering a block of platform_devices. A platform_device is a concept
244used by Linux for memory or I/O mapped devices which cannot be detected
245by hardware, and for 'composite' or 'virtual' devices (more on those
246later). While there is no 'platform device' terminology for the DT,
247platform devices roughly correspond to device nodes at the root of the
248tree and children of simple memory mapped bus nodes.
249
250About now is a good time to lay out an example. Here is part of the
251device tree for the NVIDIA Tegra board.
252
253/{
254 compatible = "nvidia,harmony", "nvidia,tegra20";
255 #address-cells = <1>;
256 #size-cells = <1>;
257 interrupt-parent = <&intc>;
258
259 chosen { };
260 aliases { };
261
262 memory {
263 device_type = "memory";
264 reg = <0x00000000 0x40000000>;
265 };
266
267 soc {
268 compatible = "nvidia,tegra20-soc", "simple-bus";
269 #address-cells = <1>;
270 #size-cells = <1>;
271 ranges;
272
273 intc: interrupt-controller@50041000 {
274 compatible = "nvidia,tegra20-gic";
275 interrupt-controller;
276 #interrupt-cells = <1>;
277 reg = <0x50041000 0x1000>, < 0x50040100 0x0100 >;
278 };
279
280 serial@70006300 {
281 compatible = "nvidia,tegra20-uart";
282 reg = <0x70006300 0x100>;
283 interrupts = <122>;
284 };
285
286 i2s1: i2s@70002800 {
287 compatible = "nvidia,tegra20-i2s";
288 reg = <0x70002800 0x100>;
289 interrupts = <77>;
290 codec = <&wm8903>;
291 };
292
293 i2c@7000c000 {
294 compatible = "nvidia,tegra20-i2c";
295 #address-cells = <1>;
296 #size-cells = <0>;
297 reg = <0x7000c000 0x100>;
298 interrupts = <70>;
299
300 wm8903: codec@1a {
301 compatible = "wlf,wm8903";
302 reg = <0x1a>;
303 interrupts = <347>;
304 };
305 };
306 };
307
308 sound {
309 compatible = "nvidia,harmony-sound";
310 i2s-controller = <&i2s1>;
311 i2s-codec = <&wm8903>;
312 };
313};
314
315At .machine_init() time, Tegra board support code will need to look at
316this DT and decide which nodes to create platform_devices for.
317However, looking at the tree, it is not immediately obvious what kind
318of device each node represents, or even if a node represents a device
319at all. The /chosen, /aliases, and /memory nodes are informational
320nodes that don't describe devices (although arguably memory could be
321considered a device). The children of the /soc node are memory mapped
322devices, but the codec@1a is an i2c device, and the sound node
323represents not a device, but rather how other devices are connected
324together to create the audio subsystem. I know what each device is
325because I'm familiar with the board design, but how does the kernel
326know what to do with each node?
327
328The trick is that the kernel starts at the root of the tree and looks
329for nodes that have a 'compatible' property. First, it is generally
330assumed that any node with a 'compatible' property represents a device
331of some kind, and second, it can be assumed that any node at the root
332of the tree is either directly attached to the processor bus, or is a
333miscellaneous system device that cannot be described any other way.
334For each of these nodes, Linux allocates and registers a
335platform_device, which in turn may get bound to a platform_driver.
336
337Why is using a platform_device for these nodes a safe assumption?
338Well, for the way that Linux models devices, just about all bus_types
339assume that its devices are children of a bus controller. For
340example, each i2c_client is a child of an i2c_master. Each spi_device
341is a child of an SPI bus. Similarly for USB, PCI, MDIO, etc. The
342same hierarchy is also found in the DT, where I2C device nodes only
343ever appear as children of an I2C bus node. Ditto for SPI, MDIO, USB,
344etc. The only devices which do not require a specific type of parent
345device are platform_devices (and amba_devices, but more on that
346later), which will happily live at the base of the Linux /sys/devices
347tree. Therefore, if a DT node is at the root of the tree, then it
348really probably is best registered as a platform_device.
349
350Linux board support code calls of_platform_populate(NULL, NULL, NULL)
351to kick off discovery of devices at the root of the tree. The
352parameters are all NULL because when starting from the root of the
353tree, there is no need to provide a starting node (the first NULL), a
354parent struct device (the last NULL), and we're not using a match
355table (yet). For a board that only needs to register devices,
356.init_machine() can be completely empty except for the
357of_platform_populate() call.
358
359In the Tegra example, this accounts for the /soc and /sound nodes, but
360what about the children of the SoC node? Shouldn't they be registered
361as platform devices too? For Linux DT support, the generic behaviour
362is for child devices to be registered by the parent's device driver at
363driver .probe() time. So, an i2c bus device driver will register a
364i2c_client for each child node, an SPI bus driver will register
365its spi_device children, and similarly for other bus_types.
366According to that model, a driver could be written that binds to the
367SoC node and simply registers platform_devices for each of its
368children. The board support code would allocate and register an SoC
369device, a (theoretical) SoC device driver could bind to the SoC device,
370and register platform_devices for /soc/interrupt-controller, /soc/serial,
371/soc/i2s, and /soc/i2c in its .probe() hook. Easy, right?
372
373Actually, it turns out that registering children of some
374platform_devices as more platform_devices is a common pattern, and the
375device tree support code reflects that and makes the above example
376simpler. The second argument to of_platform_populate() is an
377of_device_id table, and any node that matches an entry in that table
378will also get its child nodes registered. In the tegra case, the code
379can look something like this:
380
381static void __init harmony_init_machine(void)
382{
383 /* ... */
384 of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
385}
386
387"simple-bus" is defined in the ePAPR 1.0 specification as a property
388meaning a simple memory mapped bus, so the of_platform_populate() code
389could be written to just assume simple-bus compatible nodes will
390always be traversed. However, we pass it in as an argument so that
391board support code can always override the default behaviour.
392
393[Need to add discussion of adding i2c/spi/etc child devices]
394
395Appendix A: AMBA devices
396------------------------
397
398ARM Primecells are a certain kind of device attached to the ARM AMBA
399bus which include some support for hardware detection and power
400management. In Linux, struct amba_device and the amba_bus_type is
401used to represent Primecell devices. However, the fiddly bit is that
402not all devices on an AMBA bus are Primecells, and for Linux it is
403typical for both amba_device and platform_device instances to be
404siblings of the same bus segment.
405
406When using the DT, this creates problems for of_platform_populate()
407because it must decide whether to register each node as either a
408platform_device or an amba_device. This unfortunately complicates the
409device creation model a little bit, but the solution turns out not to
410be too invasive. If a node is compatible with "arm,amba-primecell", then
411of_platform_populate() will register it as an amba_device instead of a
412platform_device.
diff --git a/Documentation/dontdiff b/Documentation/dontdiff
index 0c083c5c2faa..b4a898f43c37 100644
--- a/Documentation/dontdiff
+++ b/Documentation/dontdiff
@@ -158,7 +158,6 @@ logo_*.c
158logo_*_clut224.c 158logo_*_clut224.c
159logo_*_mono.c 159logo_*_mono.c
160lxdialog 160lxdialog
161mach
162mach-types 161mach-types
163mach-types.h 162mach-types.h
164machtypes.h 163machtypes.h
diff --git a/Documentation/fb/intel810.txt b/Documentation/fb/intel810.txt
index be3e7836abef..a8e9f5bca6f3 100644
--- a/Documentation/fb/intel810.txt
+++ b/Documentation/fb/intel810.txt
@@ -211,7 +211,7 @@ Using the same setup as described above, load the module like this:
211 modprobe i810fb vram=2 xres=1024 bpp=8 hsync1=30 hsync2=55 vsync1=50 \ 211 modprobe i810fb vram=2 xres=1024 bpp=8 hsync1=30 hsync2=55 vsync1=50 \
212 vsync2=85 accel=1 mtrr=1 212 vsync2=85 accel=1 mtrr=1
213 213
214Or just add the following to /etc/modprobe.conf 214Or just add the following to a configuration file in /etc/modprobe.d/
215 215
216 options i810fb vram=2 xres=1024 bpp=16 hsync1=30 hsync2=55 vsync1=50 \ 216 options i810fb vram=2 xres=1024 bpp=16 hsync1=30 hsync2=55 vsync1=50 \
217 vsync2=85 accel=1 mtrr=1 217 vsync2=85 accel=1 mtrr=1
diff --git a/Documentation/fb/intelfb.txt b/Documentation/fb/intelfb.txt
index dd9e944ea628..feac4e4d6968 100644
--- a/Documentation/fb/intelfb.txt
+++ b/Documentation/fb/intelfb.txt
@@ -120,7 +120,7 @@ Using the same setup as described above, load the module like this:
120 120
121 modprobe intelfb mode=800x600-32@75 vram=8 accel=1 hwcursor=1 121 modprobe intelfb mode=800x600-32@75 vram=8 accel=1 hwcursor=1
122 122
123Or just add the following to /etc/modprobe.conf 123Or just add the following to a configuration file in /etc/modprobe.d/
124 124
125 options intelfb mode=800x600-32@75 vram=8 accel=1 hwcursor=1 125 options intelfb mode=800x600-32@75 vram=8 accel=1 hwcursor=1
126 126
diff --git a/Documentation/feature-removal-schedule.txt b/Documentation/feature-removal-schedule.txt
index 0cad4803ffac..c1be8066ea59 100644
--- a/Documentation/feature-removal-schedule.txt
+++ b/Documentation/feature-removal-schedule.txt
@@ -529,3 +529,13 @@ When: 3.5
529Why: The old kmap_atomic() with two arguments is deprecated, we only 529Why: The old kmap_atomic() with two arguments is deprecated, we only
530 keep it for backward compatibility for few cycles and then drop it. 530 keep it for backward compatibility for few cycles and then drop it.
531Who: Cong Wang <amwang@redhat.com> 531Who: Cong Wang <amwang@redhat.com>
532
533----------------------------
534
535What: get_robust_list syscall
536When: 2013
537Why: There appear to be no production users of the get_robust_list syscall,
538 and it runs the risk of leaking address locations, allowing the bypass
539 of ASLR. It was only ever intended for debugging, so it should be
540 removed.
541Who: Kees Cook <keescook@chromium.org>
diff --git a/Documentation/filesystems/files.txt b/Documentation/filesystems/files.txt
index ac2facc50d2a..46dfc6b038c3 100644
--- a/Documentation/filesystems/files.txt
+++ b/Documentation/filesystems/files.txt
@@ -113,8 +113,8 @@ the fdtable structure -
113 if (fd >= 0) { 113 if (fd >= 0) {
114 /* locate_fd() may have expanded fdtable, load the ptr */ 114 /* locate_fd() may have expanded fdtable, load the ptr */
115 fdt = files_fdtable(files); 115 fdt = files_fdtable(files);
116 FD_SET(fd, fdt->open_fds); 116 __set_open_fd(fd, fdt);
117 FD_CLR(fd, fdt->close_on_exec); 117 __clear_close_on_exec(fd, fdt);
118 spin_unlock(&files->file_lock); 118 spin_unlock(&files->file_lock);
119 ..... 119 .....
120 120
diff --git a/Documentation/i2c/busses/scx200_acb b/Documentation/i2c/busses/scx200_acb
index 7c07883d4dfc..ce83c871fe95 100644
--- a/Documentation/i2c/busses/scx200_acb
+++ b/Documentation/i2c/busses/scx200_acb
@@ -28,5 +28,5 @@ If the scx200_acb driver is built into the kernel, add the following
28parameter to your boot command line: 28parameter to your boot command line:
29 scx200_acb.base=0x810,0x820 29 scx200_acb.base=0x810,0x820
30If the scx200_acb driver is built as a module, add the following line to 30If the scx200_acb driver is built as a module, add the following line to
31the file /etc/modprobe.conf instead: 31a configuration file in /etc/modprobe.d/ instead:
32 options scx200_acb base=0x810,0x820 32 options scx200_acb base=0x810,0x820
diff --git a/Documentation/ide/ide.txt b/Documentation/ide/ide.txt
index e77bebfa7b0d..7aca987c23d9 100644
--- a/Documentation/ide/ide.txt
+++ b/Documentation/ide/ide.txt
@@ -169,7 +169,7 @@ When using ide.c as a module in combination with kmod, add:
169 169
170 alias block-major-3 ide-probe 170 alias block-major-3 ide-probe
171 171
172to /etc/modprobe.conf. 172to a configuration file in /etc/modprobe.d/.
173 173
174When ide.c is used as a module, you can pass command line parameters to the 174When ide.c is used as a module, you can pass command line parameters to the
175driver using the "options=" keyword to insmod, while replacing any ',' with 175driver using the "options=" keyword to insmod, while replacing any ',' with
diff --git a/Documentation/input/input.txt b/Documentation/input/input.txt
index b3d6787b4fb1..666c06c5ab0c 100644
--- a/Documentation/input/input.txt
+++ b/Documentation/input/input.txt
@@ -250,8 +250,8 @@ And so on up to event31.
250a USB keyboard works and is correctly connected to the kernel keyboard 250a USB keyboard works and is correctly connected to the kernel keyboard
251driver. 251driver.
252 252
253 Doing a cat /dev/input/mouse0 (c, 13, 32) will verify that a mouse 253 Doing a "cat /dev/input/mouse0" (c, 13, 32) will verify that a mouse
254is also emulated, characters should appear if you move it. 254is also emulated; characters should appear if you move it.
255 255
256 You can test the joystick emulation with the 'jstest' utility, 256 You can test the joystick emulation with the 'jstest' utility,
257available in the joystick package (see Documentation/input/joystick.txt). 257available in the joystick package (see Documentation/input/joystick.txt).
diff --git a/Documentation/isdn/README.gigaset b/Documentation/isdn/README.gigaset
index ef3343eaa002..7534c6039adc 100644
--- a/Documentation/isdn/README.gigaset
+++ b/Documentation/isdn/README.gigaset
@@ -97,8 +97,7 @@ GigaSet 307x Device Driver
97 2.5.): 1=on (default), 0=off 97 2.5.): 1=on (default), 0=off
98 98
99 Depending on your distribution you may want to create a separate module 99 Depending on your distribution you may want to create a separate module
100 configuration file /etc/modprobe.d/gigaset for these, or add them to a 100 configuration file like /etc/modprobe.d/gigaset.conf for these.
101 custom file like /etc/modprobe.conf.local.
102 101
1032.2. Device nodes for user space programs 1022.2. Device nodes for user space programs
104 ------------------------------------ 103 ------------------------------------
@@ -212,8 +211,8 @@ GigaSet 307x Device Driver
212 211
213 options ppp_async flag_time=0 212 options ppp_async flag_time=0
214 213
215 to an appropriate module configuration file, like /etc/modprobe.d/gigaset 214 to an appropriate module configuration file, like
216 or /etc/modprobe.conf.local. 215 /etc/modprobe.d/gigaset.conf.
217 216
218 Unimodem mode is needed for making some devices [e.g. SX100] work which 217 Unimodem mode is needed for making some devices [e.g. SX100] work which
219 do not support the regular Gigaset command set. If debug output (see 218 do not support the regular Gigaset command set. If debug output (see
@@ -237,8 +236,8 @@ GigaSet 307x Device Driver
237 modprobe usb_gigaset startmode=0 236 modprobe usb_gigaset startmode=0
238 or by adding a line like 237 or by adding a line like
239 options usb_gigaset startmode=0 238 options usb_gigaset startmode=0
240 to an appropriate module configuration file, like /etc/modprobe.d/gigaset 239 to an appropriate module configuration file, like
241 or /etc/modprobe.conf.local. 240 /etc/modprobe.d/gigaset.conf
242 241
2432.6. Call-ID (CID) mode 2422.6. Call-ID (CID) mode
244 ------------------ 243 ------------------
@@ -310,7 +309,7 @@ GigaSet 307x Device Driver
310 309
311 options isdn dialtimeout=15 310 options isdn dialtimeout=15
312 311
313 to /etc/modprobe.d/gigaset, /etc/modprobe.conf.local or a similar file. 312 to /etc/modprobe.d/gigaset.conf or a similar file.
314 313
315 Problem: 314 Problem:
316 The isdnlog program emits error messages or just doesn't work. 315 The isdnlog program emits error messages or just doesn't work.
@@ -350,8 +349,7 @@ GigaSet 307x Device Driver
350 The initial value can be set using the debug parameter when loading the 349 The initial value can be set using the debug parameter when loading the
351 module "gigaset", e.g. by adding a line 350 module "gigaset", e.g. by adding a line
352 options gigaset debug=0 351 options gigaset debug=0
353 to your module configuration file, eg. /etc/modprobe.d/gigaset or 352 to your module configuration file, eg. /etc/modprobe.d/gigaset.conf
354 /etc/modprobe.conf.local.
355 353
356 Generated debugging information can be found 354 Generated debugging information can be found
357 - as output of the command 355 - as output of the command
diff --git a/Documentation/kbuild/kconfig.txt b/Documentation/kbuild/kconfig.txt
index c313d71324b4..9d5f2a90dca9 100644
--- a/Documentation/kbuild/kconfig.txt
+++ b/Documentation/kbuild/kconfig.txt
@@ -28,12 +28,10 @@ new (default) values, so you can use:
28 28
29 grep "(NEW)" conf.new 29 grep "(NEW)" conf.new
30 30
31to see the new config symbols or you can 'diff' the previous and 31to see the new config symbols or you can use diffconfig to see the
32new .config files to see the differences: 32differences between the previous and new .config files:
33 33
34 diff .config.old .config | less 34 scripts/diffconfig .config.old .config | less
35
36(Yes, we need something better here.)
37 35
38______________________________________________________________________ 36______________________________________________________________________
39Environment variables for '*config' 37Environment variables for '*config'
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index e2f8c297a8a4..c1601e5a8b71 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -1699,6 +1699,12 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
1699 The default is to send the implementation identification 1699 The default is to send the implementation identification
1700 information. 1700 information.
1701 1701
1702 nfsd.nfs4_disable_idmapping=
1703 [NFSv4] When set to the default of '1', the NFSv4
1704 server will return only numeric uids and gids to
1705 clients using auth_sys, and will accept numeric uids
1706 and gids from such clients. This is intended to ease
1707 migration from NFSv2/v3.
1702 1708
1703 objlayoutdriver.osd_login_prog= 1709 objlayoutdriver.osd_login_prog=
1704 [NFS] [OBJLAYOUT] sets the pathname to the program which 1710 [NFS] [OBJLAYOUT] sets the pathname to the program which
diff --git a/Documentation/laptops/sonypi.txt b/Documentation/laptops/sonypi.txt
index 4857acfc50f1..606bdb9ce036 100644
--- a/Documentation/laptops/sonypi.txt
+++ b/Documentation/laptops/sonypi.txt
@@ -110,7 +110,7 @@ Module use:
110----------- 110-----------
111 111
112In order to automatically load the sonypi module on use, you can put those 112In order to automatically load the sonypi module on use, you can put those
113lines in your /etc/modprobe.conf file: 113lines a configuration file in /etc/modprobe.d/:
114 114
115 alias char-major-10-250 sonypi 115 alias char-major-10-250 sonypi
116 options sonypi minor=250 116 options sonypi minor=250
diff --git a/Documentation/mono.txt b/Documentation/mono.txt
index e8e1758e87da..d01ac6052194 100644
--- a/Documentation/mono.txt
+++ b/Documentation/mono.txt
@@ -38,11 +38,11 @@ if [ ! -e /proc/sys/fs/binfmt_misc/register ]; then
38 /sbin/modprobe binfmt_misc 38 /sbin/modprobe binfmt_misc
39 # Some distributions, like Fedora Core, perform 39 # Some distributions, like Fedora Core, perform
40 # the following command automatically when the 40 # the following command automatically when the
41 # binfmt_misc module is loaded into the kernel. 41 # binfmt_misc module is loaded into the kernel
42 # or during normal boot up (systemd-based systems).
42 # Thus, it is possible that the following line 43 # Thus, it is possible that the following line
43 # is not needed at all. Look at /etc/modprobe.conf 44 # is not needed at all.
44 # to check whether this is applicable or not. 45 mount -t binfmt_misc none /proc/sys/fs/binfmt_misc
45 mount -t binfmt_misc none /proc/sys/fs/binfmt_misc
46fi 46fi
47 47
48# Register support for .NET CLR binaries 48# Register support for .NET CLR binaries
diff --git a/Documentation/networking/baycom.txt b/Documentation/networking/baycom.txt
index 4e68849d5639..688f18fd4467 100644
--- a/Documentation/networking/baycom.txt
+++ b/Documentation/networking/baycom.txt
@@ -93,7 +93,7 @@ Every time a driver is inserted into the kernel, it has to know which
93modems it should access at which ports. This can be done with the setbaycom 93modems it should access at which ports. This can be done with the setbaycom
94utility. If you are only using one modem, you can also configure the 94utility. If you are only using one modem, you can also configure the
95driver from the insmod command line (or by means of an option line in 95driver from the insmod command line (or by means of an option line in
96/etc/modprobe.conf). 96/etc/modprobe.d/*.conf).
97 97
98Examples: 98Examples:
99 modprobe baycom_ser_fdx mode="ser12*" iobase=0x3f8 irq=4 99 modprobe baycom_ser_fdx mode="ser12*" iobase=0x3f8 irq=4
diff --git a/Documentation/networking/bonding.txt b/Documentation/networking/bonding.txt
index 080ad26690ae..bfea8a338901 100644
--- a/Documentation/networking/bonding.txt
+++ b/Documentation/networking/bonding.txt
@@ -173,9 +173,8 @@ bonding module at load time, or are specified via sysfs.
173 173
174 Module options may be given as command line arguments to the 174 Module options may be given as command line arguments to the
175insmod or modprobe command, but are usually specified in either the 175insmod or modprobe command, but are usually specified in either the
176/etc/modules.conf or /etc/modprobe.conf configuration file, or in a 176/etc/modrobe.d/*.conf configuration files, or in a distro-specific
177distro-specific configuration file (some of which are detailed in the next 177configuration file (some of which are detailed in the next section).
178section).
179 178
180 Details on bonding support for sysfs is provided in the 179 Details on bonding support for sysfs is provided in the
181"Configuring Bonding Manually via Sysfs" section, below. 180"Configuring Bonding Manually via Sysfs" section, below.
@@ -1021,7 +1020,7 @@ ifcfg-bondX files.
1021 1020
1022 Because the sysconfig scripts supply the bonding module 1021 Because the sysconfig scripts supply the bonding module
1023options in the ifcfg-bondX file, it is not necessary to add them to 1022options in the ifcfg-bondX file, it is not necessary to add them to
1024the system /etc/modules.conf or /etc/modprobe.conf configuration file. 1023the system /etc/modules.d/*.conf configuration files.
1025 1024
10263.2 Configuration with Initscripts Support 10253.2 Configuration with Initscripts Support
1027------------------------------------------ 1026------------------------------------------
@@ -1098,15 +1097,13 @@ queried targets, e.g.,
1098 arp_ip_target=+192.168.1.1 arp_ip_target=+192.168.1.2 1097 arp_ip_target=+192.168.1.1 arp_ip_target=+192.168.1.2
1099 1098
1100 is the proper syntax to specify multiple targets. When specifying 1099 is the proper syntax to specify multiple targets. When specifying
1101options via BONDING_OPTS, it is not necessary to edit /etc/modules.conf or 1100options via BONDING_OPTS, it is not necessary to edit /etc/modprobe.d/*.conf.
1102/etc/modprobe.conf.
1103 1101
1104 For even older versions of initscripts that do not support 1102 For even older versions of initscripts that do not support
1105BONDING_OPTS, it is necessary to edit /etc/modules.conf (or 1103BONDING_OPTS, it is necessary to edit /etc/modprobe.d/*.conf, depending upon
1106/etc/modprobe.conf, depending upon your distro) to load the bonding module 1104your distro) to load the bonding module with your desired options when the
1107with your desired options when the bond0 interface is brought up. The 1105bond0 interface is brought up. The following lines in /etc/modprobe.d/*.conf
1108following lines in /etc/modules.conf (or modprobe.conf) will load the 1106will load the bonding module, and select its options:
1109bonding module, and select its options:
1110 1107
1111alias bond0 bonding 1108alias bond0 bonding
1112options bond0 mode=balance-alb miimon=100 1109options bond0 mode=balance-alb miimon=100
@@ -1152,7 +1149,7 @@ knowledge of bonding. One such distro is SuSE Linux Enterprise Server
1152version 8. 1149version 8.
1153 1150
1154 The general method for these systems is to place the bonding 1151 The general method for these systems is to place the bonding
1155module parameters into /etc/modules.conf or /etc/modprobe.conf (as 1152module parameters into a config file in /etc/modprobe.d/ (as
1156appropriate for the installed distro), then add modprobe and/or 1153appropriate for the installed distro), then add modprobe and/or
1157ifenslave commands to the system's global init script. The name of 1154ifenslave commands to the system's global init script. The name of
1158the global init script differs; for sysconfig, it is 1155the global init script differs; for sysconfig, it is
@@ -1228,7 +1225,7 @@ network initialization scripts.
1228specify a different name for each instance (the module loading system 1225specify a different name for each instance (the module loading system
1229requires that every loaded module, even multiple instances of the same 1226requires that every loaded module, even multiple instances of the same
1230module, have a unique name). This is accomplished by supplying multiple 1227module, have a unique name). This is accomplished by supplying multiple
1231sets of bonding options in /etc/modprobe.conf, for example: 1228sets of bonding options in /etc/modprobe.d/*.conf, for example:
1232 1229
1233alias bond0 bonding 1230alias bond0 bonding
1234options bond0 -o bond0 mode=balance-rr miimon=100 1231options bond0 -o bond0 mode=balance-rr miimon=100
@@ -1793,8 +1790,8 @@ route additions may cause trouble.
1793 On systems with network configuration scripts that do not 1790 On systems with network configuration scripts that do not
1794associate physical devices directly with network interface names (so 1791associate physical devices directly with network interface names (so
1795that the same physical device always has the same "ethX" name), it may 1792that the same physical device always has the same "ethX" name), it may
1796be necessary to add some special logic to either /etc/modules.conf or 1793be necessary to add some special logic to config files in
1797/etc/modprobe.conf (depending upon which is installed on the system). 1794/etc/modprobe.d/.
1798 1795
1799 For example, given a modules.conf containing the following: 1796 For example, given a modules.conf containing the following:
1800 1797
@@ -1821,20 +1818,15 @@ add above bonding e1000 tg3
1821bonding is loaded. This command is fully documented in the 1818bonding is loaded. This command is fully documented in the
1822modules.conf manual page. 1819modules.conf manual page.
1823 1820
1824 On systems utilizing modprobe.conf (or modprobe.conf.local), 1821 On systems utilizing modprobe an equivalent problem can occur.
1825an equivalent problem can occur. In this case, the following can be 1822In this case, the following can be added to config files in
1826added to modprobe.conf (or modprobe.conf.local, as appropriate), as 1823/etc/modprobe.d/ as:
1827follows (all on one line; it has been split here for clarity):
1828 1824
1829install bonding /sbin/modprobe tg3; /sbin/modprobe e1000; 1825softdep bonding pre: tg3 e1000
1830 /sbin/modprobe --ignore-install bonding
1831 1826
1832 This will, when loading the bonding module, rather than 1827 This will load tg3 and e1000 modules before loading the bonding one.
1833performing the normal action, instead execute the provided command. 1828Full documentation on this can be found in the modprobe.d and modprobe
1834This command loads the device drivers in the order needed, then calls 1829manual pages.
1835modprobe with --ignore-install to cause the normal action to then take
1836place. Full documentation on this can be found in the modprobe.conf
1837and modprobe manual pages.
1838 1830
18398.3. Painfully Slow Or No Failed Link Detection By Miimon 18318.3. Painfully Slow Or No Failed Link Detection By Miimon
1840--------------------------------------------------------- 1832---------------------------------------------------------
diff --git a/Documentation/networking/dl2k.txt b/Documentation/networking/dl2k.txt
index 10e8490fa406..cba74f7a3abc 100644
--- a/Documentation/networking/dl2k.txt
+++ b/Documentation/networking/dl2k.txt
@@ -45,12 +45,13 @@ Now eth0 should active, you can test it by "ping" or get more information by
45"ifconfig". If tested ok, continue the next step. 45"ifconfig". If tested ok, continue the next step.
46 46
474. cp dl2k.ko /lib/modules/`uname -r`/kernel/drivers/net 474. cp dl2k.ko /lib/modules/`uname -r`/kernel/drivers/net
485. Add the following line to /etc/modprobe.conf: 485. Add the following line to /etc/modprobe.d/dl2k.conf:
49 alias eth0 dl2k 49 alias eth0 dl2k
506. Run "netconfig" or "netconf" to create configuration script ifcfg-eth0 506. Run depmod to updated module indexes.
517. Run "netconfig" or "netconf" to create configuration script ifcfg-eth0
51 located at /etc/sysconfig/network-scripts or create it manually. 52 located at /etc/sysconfig/network-scripts or create it manually.
52 [see - Configuration Script Sample] 53 [see - Configuration Script Sample]
537. Driver will automatically load and configure at next boot time. 548. Driver will automatically load and configure at next boot time.
54 55
55Compiling the Driver 56Compiling the Driver
56==================== 57====================
@@ -154,8 +155,8 @@ Installing the Driver
154 ----------------- 155 -----------------
155 1. Copy dl2k.o to the network modules directory, typically 156 1. Copy dl2k.o to the network modules directory, typically
156 /lib/modules/2.x.x-xx/net or /lib/modules/2.x.x/kernel/drivers/net. 157 /lib/modules/2.x.x-xx/net or /lib/modules/2.x.x/kernel/drivers/net.
157 2. Locate the boot module configuration file, most commonly modprobe.conf 158 2. Locate the boot module configuration file, most commonly in the
158 or modules.conf (for 2.4) in the /etc directory. Add the following lines: 159 /etc/modprobe.d/ directory. Add the following lines:
159 160
160 alias ethx dl2k 161 alias ethx dl2k
161 options dl2k <optional parameters> 162 options dl2k <optional parameters>
diff --git a/Documentation/networking/e100.txt b/Documentation/networking/e100.txt
index 162f323a7a1f..fcb6c71cdb69 100644
--- a/Documentation/networking/e100.txt
+++ b/Documentation/networking/e100.txt
@@ -94,8 +94,8 @@ Additional Configurations
94 94
95 Configuring a network driver to load properly when the system is started is 95 Configuring a network driver to load properly when the system is started is
96 distribution dependent. Typically, the configuration process involves adding 96 distribution dependent. Typically, the configuration process involves adding
97 an alias line to /etc/modules.conf or /etc/modprobe.conf as well as editing 97 an alias line to /etc/modprobe.d/*.conf as well as editing other system
98 other system startup scripts and/or configuration files. Many popular Linux 98 startup scripts and/or configuration files. Many popular Linux
99 distributions ship with tools to make these changes for you. To learn the 99 distributions ship with tools to make these changes for you. To learn the
100 proper way to configure a network device for your system, refer to your 100 proper way to configure a network device for your system, refer to your
101 distribution documentation. If during this process you are asked for the 101 distribution documentation. If during this process you are asked for the
@@ -103,7 +103,7 @@ Additional Configurations
103 PRO/100 Family of Adapters is e100. 103 PRO/100 Family of Adapters is e100.
104 104
105 As an example, if you install the e100 driver for two PRO/100 adapters 105 As an example, if you install the e100 driver for two PRO/100 adapters
106 (eth0 and eth1), add the following to modules.conf or modprobe.conf: 106 (eth0 and eth1), add the following to a configuraton file in /etc/modprobe.d/
107 107
108 alias eth0 e100 108 alias eth0 e100
109 alias eth1 e100 109 alias eth1 e100
diff --git a/Documentation/networking/ipv6.txt b/Documentation/networking/ipv6.txt
index 9fd7e21296c8..6cd74fa55358 100644
--- a/Documentation/networking/ipv6.txt
+++ b/Documentation/networking/ipv6.txt
@@ -2,9 +2,9 @@
2Options for the ipv6 module are supplied as parameters at load time. 2Options for the ipv6 module are supplied as parameters at load time.
3 3
4Module options may be given as command line arguments to the insmod 4Module options may be given as command line arguments to the insmod
5or modprobe command, but are usually specified in either the 5or modprobe command, but are usually specified in either
6/etc/modules.conf or /etc/modprobe.conf configuration file, or in a 6/etc/modules.d/*.conf configuration files, or in a distro-specific
7distro-specific configuration file. 7configuration file.
8 8
9The available ipv6 module parameters are listed below. If a parameter 9The available ipv6 module parameters are listed below. If a parameter
10is not specified the default value is used. 10is not specified the default value is used.
diff --git a/Documentation/networking/ixgb.txt b/Documentation/networking/ixgb.txt
index e196f16df313..d75a1f9565bb 100644
--- a/Documentation/networking/ixgb.txt
+++ b/Documentation/networking/ixgb.txt
@@ -274,9 +274,9 @@ Additional Configurations
274 ------------------------------------------------- 274 -------------------------------------------------
275 Configuring a network driver to load properly when the system is started is 275 Configuring a network driver to load properly when the system is started is
276 distribution dependent. Typically, the configuration process involves adding 276 distribution dependent. Typically, the configuration process involves adding
277 an alias line to /etc/modprobe.conf as well as editing other system startup 277 an alias line to files in /etc/modprobe.d/ as well as editing other system
278 scripts and/or configuration files. Many popular Linux distributions ship 278 startup scripts and/or configuration files. Many popular Linux distributions
279 with tools to make these changes for you. To learn the proper way to 279 ship with tools to make these changes for you. To learn the proper way to
280 configure a network device for your system, refer to your distribution 280 configure a network device for your system, refer to your distribution
281 documentation. If during this process you are asked for the driver or module 281 documentation. If during this process you are asked for the driver or module
282 name, the name for the Linux Base Driver for the Intel 10GbE Family of 282 name, the name for the Linux Base Driver for the Intel 10GbE Family of
diff --git a/Documentation/networking/ltpc.txt b/Documentation/networking/ltpc.txt
index fe2a9129d959..0bf3220c715b 100644
--- a/Documentation/networking/ltpc.txt
+++ b/Documentation/networking/ltpc.txt
@@ -25,7 +25,7 @@ the driver will try to determine them itself.
25 25
26If you load the driver as a module, you can pass the parameters "io=", 26If you load the driver as a module, you can pass the parameters "io=",
27"irq=", and "dma=" on the command line with insmod or modprobe, or add 27"irq=", and "dma=" on the command line with insmod or modprobe, or add
28them as options in /etc/modprobe.conf: 28them as options in a configuration file in /etc/modprobe.d/ directory:
29 29
30 alias lt0 ltpc # autoload the module when the interface is configured 30 alias lt0 ltpc # autoload the module when the interface is configured
31 options ltpc io=0x240 irq=9 dma=1 31 options ltpc io=0x240 irq=9 dma=1
diff --git a/Documentation/networking/vortex.txt b/Documentation/networking/vortex.txt
index bd70976b8160..b4038ffb3bc5 100644
--- a/Documentation/networking/vortex.txt
+++ b/Documentation/networking/vortex.txt
@@ -67,8 +67,8 @@ Module parameters
67================= 67=================
68 68
69There are several parameters which may be provided to the driver when 69There are several parameters which may be provided to the driver when
70its module is loaded. These are usually placed in /etc/modprobe.conf 70its module is loaded. These are usually placed in /etc/modprobe.d/*.conf
71(/etc/modules.conf in 2.4). Example: 71configuretion files. Example:
72 72
73options 3c59x debug=3 rx_copybreak=300 73options 3c59x debug=3 rx_copybreak=300
74 74
@@ -425,7 +425,7 @@ steps you should take:
425 1) Increase the debug level. Usually this is done via: 425 1) Increase the debug level. Usually this is done via:
426 426
427 a) modprobe driver debug=7 427 a) modprobe driver debug=7
428 b) In /etc/modprobe.conf (or /etc/modules.conf for 2.4): 428 b) In /etc/modprobe.d/driver.conf:
429 options driver debug=7 429 options driver debug=7
430 430
431 2) Recreate the problem with the higher debug level, 431 2) Recreate the problem with the higher debug level,
diff --git a/Documentation/parport.txt b/Documentation/parport.txt
index 93a7ceef398d..c208e4366c03 100644
--- a/Documentation/parport.txt
+++ b/Documentation/parport.txt
@@ -36,18 +36,17 @@ addresses should not be specified for supported PCI cards since they
36are automatically detected. 36are automatically detected.
37 37
38 38
39KMod 39modprobe
40---- 40--------
41 41
42If you use kmod, you will find it useful to edit /etc/modprobe.conf. 42If you use modprobe , you will find it useful to add lines as below to a
43Here is an example of the lines that need to be added: 43configuration file in /etc/modprobe.d/ directory:.
44 44
45 alias parport_lowlevel parport_pc 45 alias parport_lowlevel parport_pc
46 options parport_pc io=0x378,0x278 irq=7,auto 46 options parport_pc io=0x378,0x278 irq=7,auto
47 47
48KMod will then automatically load parport_pc (with the options 48modprobe will load parport_pc (with the options "io=0x378,0x278 irq=7,auto")
49"io=0x378,0x278 irq=7,auto") whenever a parallel port device driver 49whenever a parallel port device driver (such as lp) is loaded.
50(such as lp) is loaded.
51 50
52Note that these are example lines only! You shouldn't in general need 51Note that these are example lines only! You shouldn't in general need
53to specify any options to parport_pc in order to be able to use a 52to specify any options to parport_pc in order to be able to use a
diff --git a/Documentation/s390/3270.txt b/Documentation/s390/3270.txt
index 7a5c73a7ed7f..7c715de99774 100644
--- a/Documentation/s390/3270.txt
+++ b/Documentation/s390/3270.txt
@@ -47,9 +47,9 @@ including the console 3270, changes subchannel identifier relative to
47one another. ReIPL as soon as possible after running the configuration 47one another. ReIPL as soon as possible after running the configuration
48script and the resulting /tmp/mkdev3270. 48script and the resulting /tmp/mkdev3270.
49 49
50If you have chosen to make tub3270 a module, you add a line to 50If you have chosen to make tub3270 a module, you add a line to a
51/etc/modprobe.conf. If you are working on a VM virtual machine, you 51configuration file under /etc/modprobe.d/. If you are working on a VM
52can use DEF GRAF to define virtual 3270 devices. 52virtual machine, you can use DEF GRAF to define virtual 3270 devices.
53 53
54You may generate both 3270 and 3215 console support, or one or the 54You may generate both 3270 and 3215 console support, or one or the
55other, or neither. If you generate both, the console type under VM is 55other, or neither. If you generate both, the console type under VM is
@@ -60,7 +60,7 @@ at boot time to a 3270 if it is a 3215.
60 60
61In brief, these are the steps: 61In brief, these are the steps:
62 1. Install the tub3270 patch 62 1. Install the tub3270 patch
63 2. (If a module) add a line to /etc/modprobe.conf 63 2. (If a module) add a line to a file in /etc/modprobe.d/*.conf
64 3. (If VM) define devices with DEF GRAF 64 3. (If VM) define devices with DEF GRAF
65 4. Reboot 65 4. Reboot
66 5. Configure 66 5. Configure
@@ -84,13 +84,12 @@ Here are the installation steps in detail:
84 make modules_install 84 make modules_install
85 85
86 2. (Perform this step only if you have configured tub3270 as a 86 2. (Perform this step only if you have configured tub3270 as a
87 module.) Add a line to /etc/modprobe.conf to automatically 87 module.) Add a line to a file /etc/modprobe.d/*.conf to automatically
88 load the driver when it's needed. With this line added, 88 load the driver when it's needed. With this line added, you will see
89 you will see login prompts appear on your 3270s as soon as 89 login prompts appear on your 3270s as soon as boot is complete (or
90 boot is complete (or with emulated 3270s, as soon as you dial 90 with emulated 3270s, as soon as you dial into your vm guest using the
91 into your vm guest using the command "DIAL <vmguestname>"). 91 command "DIAL <vmguestname>"). Since the line-mode major number is
92 Since the line-mode major number is 227, the line to add to 92 227, the line to add should be:
93 /etc/modprobe.conf should be:
94 alias char-major-227 tub3270 93 alias char-major-227 tub3270
95 94
96 3. Define graphic devices to your vm guest machine, if you 95 3. Define graphic devices to your vm guest machine, if you
diff --git a/Documentation/scsi/00-INDEX b/Documentation/scsi/00-INDEX
index b48ded55b555..b7dd6502bec5 100644
--- a/Documentation/scsi/00-INDEX
+++ b/Documentation/scsi/00-INDEX
@@ -94,3 +94,5 @@ sym53c8xx_2.txt
94 - info on second generation driver for sym53c8xx based adapters 94 - info on second generation driver for sym53c8xx based adapters
95tmscsim.txt 95tmscsim.txt
96 - info on driver for AM53c974 based adapters 96 - info on driver for AM53c974 based adapters
97ufs.txt
98 - info on Universal Flash Storage(UFS) and UFS host controller driver.
diff --git a/Documentation/scsi/aic79xx.txt b/Documentation/scsi/aic79xx.txt
index 64ac7093c872..e2d3273000d4 100644
--- a/Documentation/scsi/aic79xx.txt
+++ b/Documentation/scsi/aic79xx.txt
@@ -215,7 +215,7 @@ The following information is available in this file:
215 INCORRECTLY CAN RENDER YOUR SYSTEM INOPERABLE. 215 INCORRECTLY CAN RENDER YOUR SYSTEM INOPERABLE.
216 USE THEM WITH CAUTION. 216 USE THEM WITH CAUTION.
217 217
218 Edit the file "modprobe.conf" in the directory /etc and add/edit a 218 Put a .conf file in the /etc/modprobe.d/ directory and add/edit a
219 line containing 'options aic79xx aic79xx=[command[,command...]]' where 219 line containing 'options aic79xx aic79xx=[command[,command...]]' where
220 'command' is one or more of the following: 220 'command' is one or more of the following:
221 ----------------------------------------------------------------- 221 -----------------------------------------------------------------
diff --git a/Documentation/scsi/aic7xxx.txt b/Documentation/scsi/aic7xxx.txt
index 18f8d1905e6a..7c5d0223d444 100644
--- a/Documentation/scsi/aic7xxx.txt
+++ b/Documentation/scsi/aic7xxx.txt
@@ -190,7 +190,7 @@ The following information is available in this file:
190 INCORRECTLY CAN RENDER YOUR SYSTEM INOPERABLE. 190 INCORRECTLY CAN RENDER YOUR SYSTEM INOPERABLE.
191 USE THEM WITH CAUTION. 191 USE THEM WITH CAUTION.
192 192
193 Edit the file "modprobe.conf" in the directory /etc and add/edit a 193 Put a .conf file in the /etc/modprobe.d directory and add/edit a
194 line containing 'options aic7xxx aic7xxx=[command[,command...]]' where 194 line containing 'options aic7xxx aic7xxx=[command[,command...]]' where
195 'command' is one or more of the following: 195 'command' is one or more of the following:
196 ----------------------------------------------------------------- 196 -----------------------------------------------------------------
diff --git a/Documentation/scsi/osst.txt b/Documentation/scsi/osst.txt
index ad86c6d1e898..00c8ebb2fd18 100644
--- a/Documentation/scsi/osst.txt
+++ b/Documentation/scsi/osst.txt
@@ -66,7 +66,7 @@ recognized.
66If you want to have the module autoloaded on access to /dev/osst, you may 66If you want to have the module autoloaded on access to /dev/osst, you may
67add something like 67add something like
68alias char-major-206 osst 68alias char-major-206 osst
69to your /etc/modprobe.conf (before 2.6: modules.conf). 69to a file under /etc/modprobe.d/ directory.
70 70
71You may find it convenient to create a symbolic link 71You may find it convenient to create a symbolic link
72ln -s nosst0 /dev/tape 72ln -s nosst0 /dev/tape
diff --git a/Documentation/scsi/st.txt b/Documentation/scsi/st.txt
index 691ca292c24d..685bf3582abe 100644
--- a/Documentation/scsi/st.txt
+++ b/Documentation/scsi/st.txt
@@ -390,6 +390,10 @@ MTSETDRVBUFFER
390 MT_ST_SYSV sets the SYSV semantics (mode) 390 MT_ST_SYSV sets the SYSV semantics (mode)
391 MT_ST_NOWAIT enables immediate mode (i.e., don't wait for 391 MT_ST_NOWAIT enables immediate mode (i.e., don't wait for
392 the command to finish) for some commands (e.g., rewind) 392 the command to finish) for some commands (e.g., rewind)
393 MT_ST_NOWAIT_EOF enables immediate filemark mode (i.e. when
394 writing a filemark, don't wait for it to complete). Please
395 see the BASICS note about MTWEOFI with respect to the
396 possible dangers of writing immediate filemarks.
393 MT_ST_SILI enables setting the SILI bit in SCSI commands when 397 MT_ST_SILI enables setting the SILI bit in SCSI commands when
394 reading in variable block mode to enhance performance when 398 reading in variable block mode to enhance performance when
395 reading blocks shorter than the byte count; set this only 399 reading blocks shorter than the byte count; set this only
diff --git a/Documentation/scsi/ufs.txt b/Documentation/scsi/ufs.txt
new file mode 100644
index 000000000000..41a6164592aa
--- /dev/null
+++ b/Documentation/scsi/ufs.txt
@@ -0,0 +1,133 @@
1 Universal Flash Storage
2 =======================
3
4
5Contents
6--------
7
81. Overview
92. UFS Architecture Overview
10 2.1 Application Layer
11 2.2 UFS Transport Protocol(UTP) layer
12 2.3 UFS Interconnect(UIC) Layer
133. UFSHCD Overview
14 3.1 UFS controller initialization
15 3.2 UTP Transfer requests
16 3.3 UFS error handling
17 3.4 SCSI Error handling
18
19
201. Overview
21-----------
22
23Universal Flash Storage(UFS) is a storage specification for flash devices.
24It is aimed to provide a universal storage interface for both
25embedded and removable flash memory based storage in mobile
26devices such as smart phones and tablet computers. The specification
27is defined by JEDEC Solid State Technology Association. UFS is based
28on MIPI M-PHY physical layer standard. UFS uses MIPI M-PHY as the
29physical layer and MIPI Unipro as the link layer.
30
31The main goals of UFS is to provide,
32 * Optimized performance:
33 For UFS version 1.0 and 1.1 the target performance is as follows,
34 Support for Gear1 is mandatory (rate A: 1248Mbps, rate B: 1457.6Mbps)
35 Support for Gear2 is optional (rate A: 2496Mbps, rate B: 2915.2Mbps)
36 Future version of the standard,
37 Gear3 (rate A: 4992Mbps, rate B: 5830.4Mbps)
38 * Low power consumption
39 * High random IOPs and low latency
40
41
422. UFS Architecture Overview
43----------------------------
44
45UFS has a layered communication architecture which is based on SCSI
46SAM-5 architectural model.
47
48UFS communication architecture consists of following layers,
49
502.1 Application Layer
51
52 The Application layer is composed of UFS command set layer(UCS),
53 Task Manager and Device manager. The UFS interface is designed to be
54 protocol agnostic, however SCSI has been selected as a baseline
55 protocol for versions 1.0 and 1.1 of UFS protocol layer.
56 UFS supports subset of SCSI commands defined by SPC-4 and SBC-3.
57 * UCS: It handles SCSI commands supported by UFS specification.
58 * Task manager: It handles task management functions defined by the
59 UFS which are meant for command queue control.
60 * Device manager: It handles device level operations and device
61 configuration operations. Device level operations mainly involve
62 device power management operations and commands to Interconnect
63 layers. Device level configurations involve handling of query
64 requests which are used to modify and retrieve configuration
65 information of the device.
66
672.2 UFS Transport Protocol(UTP) layer
68
69 UTP layer provides services for
70 the higher layers through Service Access Points. UTP defines 3
71 service access points for higher layers.
72 * UDM_SAP: Device manager service access point is exposed to device
73 manager for device level operations. These device level operations
74 are done through query requests.
75 * UTP_CMD_SAP: Command service access point is exposed to UFS command
76 set layer(UCS) to transport commands.
77 * UTP_TM_SAP: Task management service access point is exposed to task
78 manager to transport task management functions.
79 UTP transports messages through UFS protocol information unit(UPIU).
80
812.3 UFS Interconnect(UIC) Layer
82
83 UIC is the lowest layer of UFS layered architecture. It handles
84 connection between UFS host and UFS device. UIC consists of
85 MIPI UniPro and MIPI M-PHY. UIC provides 2 service access points
86 to upper layer,
87 * UIC_SAP: To transport UPIU between UFS host and UFS device.
88 * UIO_SAP: To issue commands to Unipro layers.
89
90
913. UFSHCD Overview
92------------------
93
94The UFS host controller driver is based on Linux SCSI Framework.
95UFSHCD is a low level device driver which acts as an interface between
96SCSI Midlayer and PCIe based UFS host controllers.
97
98The current UFSHCD implementation supports following functionality,
99
1003.1 UFS controller initialization
101
102 The initialization module brings UFS host controller to active state
103 and prepares the controller to transfer commands/response between
104 UFSHCD and UFS device.
105
1063.2 UTP Transfer requests
107
108 Transfer request handling module of UFSHCD receives SCSI commands
109 from SCSI Midlayer, forms UPIUs and issues the UPIUs to UFS Host
110 controller. Also, the module decodes, responses received from UFS
111 host controller in the form of UPIUs and intimates the SCSI Midlayer
112 of the status of the command.
113
1143.3 UFS error handling
115
116 Error handling module handles Host controller fatal errors,
117 Device fatal errors and UIC interconnect layer related errors.
118
1193.4 SCSI Error handling
120
121 This is done through UFSHCD SCSI error handling routines registered
122 with SCSI Midlayer. Examples of some of the error handling commands
123 issues by SCSI Midlayer are Abort task, Lun reset and host reset.
124 UFSHCD Routines to perform these tasks are registered with
125 SCSI Midlayer through .eh_abort_handler, .eh_device_reset_handler and
126 .eh_host_reset_handler.
127
128In this version of UFSHCD Query requests and power management
129functionality are not implemented.
130
131UFS Specifications can be found at,
132UFS - http://www.jedec.org/sites/default/files/docs/JESD220.pdf
133UFSHCI - http://www.jedec.org/sites/default/files/docs/JESD223.pdf
diff --git a/Documentation/serial/computone.txt b/Documentation/serial/computone.txt
index 39ddcdbeeb85..a6a1158ea2ba 100644
--- a/Documentation/serial/computone.txt
+++ b/Documentation/serial/computone.txt
@@ -49,7 +49,7 @@ Hardware - If you have an ISA card, find a free interrupt and io port.
49 49
50 Note the hardware address from the Computone ISA cards installed into 50 Note the hardware address from the Computone ISA cards installed into
51 the system. These are required for editing ip2.c or editing 51 the system. These are required for editing ip2.c or editing
52 /etc/modprobe.conf, or for specification on the modprobe 52 /etc/modprobe.d/*.conf, or for specification on the modprobe
53 command line. 53 command line.
54 54
55 Note that the /etc/modules.conf should be used for older (pre-2.6) 55 Note that the /etc/modules.conf should be used for older (pre-2.6)
@@ -66,7 +66,7 @@ b) Run "make config" or "make menuconfig" or "make xconfig"
66c) Set address on ISA cards then: 66c) Set address on ISA cards then:
67 edit /usr/src/linux/drivers/char/ip2.c if needed 67 edit /usr/src/linux/drivers/char/ip2.c if needed
68 or 68 or
69 edit /etc/modprobe.conf if needed (module). 69 edit config file in /etc/modprobe.d/ if needed (module).
70 or both to match this setting. 70 or both to match this setting.
71d) Run "make modules" 71d) Run "make modules"
72e) Run "make modules_install" 72e) Run "make modules_install"
@@ -153,11 +153,11 @@ the irqs are not specified the driver uses the default in ip2.c (which
153selects polled mode). If no base addresses are specified the defaults in 153selects polled mode). If no base addresses are specified the defaults in
154ip2.c are used. If you are autoloading the driver module with kerneld or 154ip2.c are used. If you are autoloading the driver module with kerneld or
155kmod the base addresses and interrupt number must also be set in ip2.c 155kmod the base addresses and interrupt number must also be set in ip2.c
156and recompile or just insert and options line in /etc/modprobe.conf or both. 156and recompile or just insert and options line in /etc/modprobe.d/*.conf or both.
157The options line is equivalent to the command line and takes precedence over 157The options line is equivalent to the command line and takes precedence over
158what is in ip2.c. 158what is in ip2.c.
159 159
160/etc/modprobe.conf sample: 160config sample to put /etc/modprobe.d/*.conf:
161 options ip2 io=1,0x328 irq=1,10 161 options ip2 io=1,0x328 irq=1,10
162 alias char-major-71 ip2 162 alias char-major-71 ip2
163 alias char-major-72 ip2 163 alias char-major-72 ip2
diff --git a/Documentation/serial/rocket.txt b/Documentation/serial/rocket.txt
index 1d8582990435..60b039891057 100644
--- a/Documentation/serial/rocket.txt
+++ b/Documentation/serial/rocket.txt
@@ -62,7 +62,7 @@ in the system log at /var/log/messages.
62 62
63If installed as a module, the module must be loaded. This can be done 63If installed as a module, the module must be loaded. This can be done
64manually by entering "modprobe rocket". To have the module loaded automatically 64manually by entering "modprobe rocket". To have the module loaded automatically
65upon system boot, edit the /etc/modprobe.conf file and add the line 65upon system boot, edit a /etc/modprobe.d/*.conf file and add the line
66"alias char-major-46 rocket". 66"alias char-major-46 rocket".
67 67
68In order to use the ports, their device names (nodes) must be created with mknod. 68In order to use the ports, their device names (nodes) must be created with mknod.
diff --git a/Documentation/serial/stallion.txt b/Documentation/serial/stallion.txt
index 5c4902d9a5be..55090914a9c5 100644
--- a/Documentation/serial/stallion.txt
+++ b/Documentation/serial/stallion.txt
@@ -139,8 +139,8 @@ secondary address 0x280 and IRQ 10.
139 139
140You will probably want to enter this module load and configuration information 140You will probably want to enter this module load and configuration information
141into your system startup scripts so that the drivers are loaded and configured 141into your system startup scripts so that the drivers are loaded and configured
142on each system boot. Typically the start up script would be something like 142on each system boot. Typically configuration files are put in the
143/etc/modprobe.conf. 143/etc/modprobe.d/ directory.
144 144
145 145
1462.2 STATIC DRIVER CONFIGURATION: 1462.2 STATIC DRIVER CONFIGURATION:
diff --git a/Documentation/sound/alsa/ALSA-Configuration.txt b/Documentation/sound/alsa/ALSA-Configuration.txt
index 6f75ba3b8a39..8c16d50f6cb6 100644
--- a/Documentation/sound/alsa/ALSA-Configuration.txt
+++ b/Documentation/sound/alsa/ALSA-Configuration.txt
@@ -2044,7 +2044,7 @@ Prior to version 0.9.0rc4 options had a 'snd_' prefix. This was removed.
2044 Install the necessary firmware files in alsa-firmware package. 2044 Install the necessary firmware files in alsa-firmware package.
2045 When no hotplug fw loader is available, you need to load the 2045 When no hotplug fw loader is available, you need to load the
2046 firmware via vxloader utility in alsa-tools package. To invoke 2046 firmware via vxloader utility in alsa-tools package. To invoke
2047 vxloader automatically, add the following to /etc/modprobe.conf 2047 vxloader automatically, add the following to /etc/modprobe.d/alsa.conf
2048 2048
2049 install snd-vx222 /sbin/modprobe --first-time -i snd-vx222 && /usr/bin/vxloader 2049 install snd-vx222 /sbin/modprobe --first-time -i snd-vx222 && /usr/bin/vxloader
2050 2050
@@ -2168,10 +2168,10 @@ corresponds to the card index of ALSA. Usually, define this
2168as the same card module. 2168as the same card module.
2169 2169
2170An example configuration for a single emu10k1 card is like below: 2170An example configuration for a single emu10k1 card is like below:
2171----- /etc/modprobe.conf 2171----- /etc/modprobe.d/alsa.conf
2172alias snd-card-0 snd-emu10k1 2172alias snd-card-0 snd-emu10k1
2173alias sound-slot-0 snd-emu10k1 2173alias sound-slot-0 snd-emu10k1
2174----- /etc/modprobe.conf 2174----- /etc/modprobe.d/alsa.conf
2175 2175
2176The available number of auto-loaded sound cards depends on the module 2176The available number of auto-loaded sound cards depends on the module
2177option "cards_limit" of snd module. As default it's set to 1. 2177option "cards_limit" of snd module. As default it's set to 1.
@@ -2184,7 +2184,7 @@ cards is kept consistent.
2184 2184
2185An example configuration for two sound cards is like below: 2185An example configuration for two sound cards is like below:
2186 2186
2187----- /etc/modprobe.conf 2187----- /etc/modprobe.d/alsa.conf
2188# ALSA portion 2188# ALSA portion
2189options snd cards_limit=2 2189options snd cards_limit=2
2190alias snd-card-0 snd-interwave 2190alias snd-card-0 snd-interwave
@@ -2194,7 +2194,7 @@ options snd-ens1371 index=1
2194# OSS/Free portion 2194# OSS/Free portion
2195alias sound-slot-0 snd-interwave 2195alias sound-slot-0 snd-interwave
2196alias sound-slot-1 snd-ens1371 2196alias sound-slot-1 snd-ens1371
2197----- /etc/modprobe.conf 2197----- /etc/modprobe.d/alsa.conf
2198 2198
2199In this example, the interwave card is always loaded as the first card 2199In this example, the interwave card is always loaded as the first card
2200(index 0) and ens1371 as the second (index 1). 2200(index 0) and ens1371 as the second (index 1).
diff --git a/Documentation/sound/alsa/Audiophile-Usb.txt b/Documentation/sound/alsa/Audiophile-Usb.txt
index a4c53d8961e1..654dd3b694a8 100644
--- a/Documentation/sound/alsa/Audiophile-Usb.txt
+++ b/Documentation/sound/alsa/Audiophile-Usb.txt
@@ -232,7 +232,7 @@ The parameter can be given:
232 # modprobe snd-usb-audio index=1 device_setup=0x09 232 # modprobe snd-usb-audio index=1 device_setup=0x09
233 233
234 * Or while configuring the modules options in your modules configuration file 234 * Or while configuring the modules options in your modules configuration file
235 - For Fedora distributions, edit the /etc/modprobe.conf file: 235 (tipically a .conf file in /etc/modprobe.d/ directory:
236 alias snd-card-1 snd-usb-audio 236 alias snd-card-1 snd-usb-audio
237 options snd-usb-audio index=1 device_setup=0x09 237 options snd-usb-audio index=1 device_setup=0x09
238 238
@@ -253,7 +253,7 @@ CAUTION when initializing the device
253 - first turn off the device 253 - first turn off the device
254 - de-register the snd-usb-audio module (modprobe -r) 254 - de-register the snd-usb-audio module (modprobe -r)
255 - change the device_setup parameter by changing the device_setup 255 - change the device_setup parameter by changing the device_setup
256 option in /etc/modprobe.conf 256 option in /etc/modprobe.d/*.conf
257 - turn on the device 257 - turn on the device
258 * A workaround for this last issue has been applied to kernel 2.6.23, but it may not 258 * A workaround for this last issue has been applied to kernel 2.6.23, but it may not
259 be enough to ensure the 'stability' of the device initialization. 259 be enough to ensure the 'stability' of the device initialization.
diff --git a/Documentation/sound/alsa/MIXART.txt b/Documentation/sound/alsa/MIXART.txt
index ef42c44fa1f2..4ee35b4fbe4a 100644
--- a/Documentation/sound/alsa/MIXART.txt
+++ b/Documentation/sound/alsa/MIXART.txt
@@ -76,9 +76,9 @@ FIRMWARE
76 when CONFIG_FW_LOADER is set. The mixartloader is necessary only 76 when CONFIG_FW_LOADER is set. The mixartloader is necessary only
77 for older versions or when you build the driver into kernel.] 77 for older versions or when you build the driver into kernel.]
78 78
79For loading the firmware automatically after the module is loaded, use 79For loading the firmware automatically after the module is loaded, use a
80the post-install command. For example, add the following entry to 80install command. For example, add the following entry to
81/etc/modprobe.conf for miXart driver: 81/etc/modprobe.d/mixart.conf for miXart driver:
82 82
83 install snd-mixart /sbin/modprobe --first-time -i snd-mixart && \ 83 install snd-mixart /sbin/modprobe --first-time -i snd-mixart && \
84 /usr/bin/mixartloader 84 /usr/bin/mixartloader
diff --git a/Documentation/sound/alsa/OSS-Emulation.txt b/Documentation/sound/alsa/OSS-Emulation.txt
index 022aaeb0e9dd..152ca2a3f1bd 100644
--- a/Documentation/sound/alsa/OSS-Emulation.txt
+++ b/Documentation/sound/alsa/OSS-Emulation.txt
@@ -19,7 +19,7 @@ the card number and the minor unit number. Usually you don't have to
19define these aliases by yourself. 19define these aliases by yourself.
20 20
21Only necessary step for auto-loading of OSS modules is to define the 21Only necessary step for auto-loading of OSS modules is to define the
22card alias in /etc/modprobe.conf, such as 22card alias in /etc/modprobe.d/alsa.conf, such as
23 23
24 alias sound-slot-0 snd-emu10k1 24 alias sound-slot-0 snd-emu10k1
25 25
diff --git a/Documentation/sound/oss/AudioExcelDSP16 b/Documentation/sound/oss/AudioExcelDSP16
index e0dc0641b480..ea8549faede9 100644
--- a/Documentation/sound/oss/AudioExcelDSP16
+++ b/Documentation/sound/oss/AudioExcelDSP16
@@ -41,7 +41,7 @@ mpu_base I/O base address for activate MPU-401 mode
41 (0x300, 0x310, 0x320 or 0x330) 41 (0x300, 0x310, 0x320 or 0x330)
42mpu_irq MPU-401 irq line (5, 7, 9, 10 or 0) 42mpu_irq MPU-401 irq line (5, 7, 9, 10 or 0)
43 43
44The /etc/modprobe.conf will have lines like this: 44A configuration file in /etc/modprobe.d/ directory will have lines like this:
45 45
46options opl3 io=0x388 46options opl3 io=0x388
47options ad1848 io=0x530 irq=11 dma=3 47options ad1848 io=0x530 irq=11 dma=3
@@ -51,11 +51,11 @@ Where the aedsp16 options are the options for this driver while opl3 and
51ad1848 are the corresponding options for the MSS and OPL3 modules. 51ad1848 are the corresponding options for the MSS and OPL3 modules.
52 52
53Loading MSS and OPL3 needs to pre load the aedsp16 module to set up correctly 53Loading MSS and OPL3 needs to pre load the aedsp16 module to set up correctly
54the sound card. Installation dependencies must be written in the modprobe.conf 54the sound card. Installation dependencies must be written in configuration
55file: 55files under /etc/modprobe.d/ directory:
56 56
57install ad1848 /sbin/modprobe aedsp16 && /sbin/modprobe -i ad1848 57softdep ad1848 pre: aedsp16
58install opl3 /sbin/modprobe aedsp16 && /sbin/modprobe -i opl3 58softdep opl3 pre: aedsp16
59 59
60Then you must load the sound modules stack in this order: 60Then you must load the sound modules stack in this order:
61sound -> aedsp16 -> [ ad1848, opl3 ] 61sound -> aedsp16 -> [ ad1848, opl3 ]
diff --git a/Documentation/sound/oss/CMI8330 b/Documentation/sound/oss/CMI8330
index 9c439f1a6dba..8a5fd1611c6f 100644
--- a/Documentation/sound/oss/CMI8330
+++ b/Documentation/sound/oss/CMI8330
@@ -143,11 +143,10 @@ CONFIG_SOUND_MSS=m
143 143
144 144
145 145
146Alma Chao <elysian@ethereal.torsion.org> suggests the following /etc/modprobe.conf: 146Alma Chao <elysian@ethereal.torsion.org> suggests the following in
147a /etc/modprobe.d/*conf file:
147 148
148alias sound ad1848 149alias sound ad1848
149alias synth0 opl3 150alias synth0 opl3
150options ad1848 io=0x530 irq=7 dma=0 soundpro=1 151options ad1848 io=0x530 irq=7 dma=0 soundpro=1
151options opl3 io=0x388 152options opl3 io=0x388
152
153
diff --git a/Documentation/sound/oss/Introduction b/Documentation/sound/oss/Introduction
index 75d967ff9266..42da2d8fa372 100644
--- a/Documentation/sound/oss/Introduction
+++ b/Documentation/sound/oss/Introduction
@@ -167,8 +167,8 @@ in a file such as /root/soundon.sh.
167MODPROBE: 167MODPROBE:
168========= 168=========
169 169
170If loading via modprobe, these common files are automatically loaded 170If loading via modprobe, these common files are automatically loaded when
171when requested by modprobe. For example, my /etc/modprobe.conf contains: 171requested by modprobe. For example, my /etc/modprobe.d/oss.conf contains:
172 172
173alias sound sb 173alias sound sb
174options sb io=0x240 irq=9 dma=3 dma16=5 mpu_io=0x300 174options sb io=0x240 irq=9 dma=3 dma16=5 mpu_io=0x300
@@ -228,7 +228,7 @@ http://www.opensound.com. Before loading the commercial sound
228driver, you should do the following: 228driver, you should do the following:
229 229
2301. remove sound modules (detailed above) 2301. remove sound modules (detailed above)
2312. remove the sound modules from /etc/modprobe.conf 2312. remove the sound modules from /etc/modprobe.d/*.conf
2323. move the sound modules from /lib/modules/<kernel>/misc 2323. move the sound modules from /lib/modules/<kernel>/misc
233 (for example, I make a /lib/modules/<kernel>/misc/tmp 233 (for example, I make a /lib/modules/<kernel>/misc/tmp
234 directory and copy the sound module files to that 234 directory and copy the sound module files to that
@@ -265,7 +265,7 @@ twice, you need to do the following:
265 sb.o could be copied (or symlinked) to sb1.o for the 265 sb.o could be copied (or symlinked) to sb1.o for the
266 second SoundBlaster. 266 second SoundBlaster.
267 267
2682. Make a second entry in /etc/modprobe.conf, for example, 2682. Make a second entry in /etc/modprobe.d/*conf, for example,
269 sound1 or sb1. This second entry should refer to the 269 sound1 or sb1. This second entry should refer to the
270 new module names for example sb1, and should include 270 new module names for example sb1, and should include
271 the I/O, etc. for the second sound card. 271 the I/O, etc. for the second sound card.
@@ -369,7 +369,7 @@ There are several ways of configuring your sound:
3692) On the command line when using insmod or in a bash script 3692) On the command line when using insmod or in a bash script
370 using command line calls to load sound. 370 using command line calls to load sound.
371 371
3723) In /etc/modprobe.conf when using modprobe. 3723) In /etc/modprobe.d/*conf when using modprobe.
373 373
3744) Via Red Hat's GPL'd /usr/sbin/sndconfig program (text based). 3744) Via Red Hat's GPL'd /usr/sbin/sndconfig program (text based).
375 375
diff --git a/Documentation/sound/oss/Opti b/Documentation/sound/oss/Opti
index c15af3c07d46..4cd5d9ab3580 100644
--- a/Documentation/sound/oss/Opti
+++ b/Documentation/sound/oss/Opti
@@ -18,7 +18,7 @@ force the card into a mode in which it can be programmed.
18If you have another OS installed on your computer it is recommended 18If you have another OS installed on your computer it is recommended
19that Linux and the other OS use the same resources. 19that Linux and the other OS use the same resources.
20 20
21Also, it is recommended that resources specified in /etc/modprobe.conf 21Also, it is recommended that resources specified in /etc/modprobe.d/*.conf
22and resources specified in /etc/isapnp.conf agree. 22and resources specified in /etc/isapnp.conf agree.
23 23
24Compiling the sound driver 24Compiling the sound driver
@@ -67,11 +67,7 @@ address is hard-coded into the driver.
67 67
68Using kmod and autoloading the sound driver 68Using kmod and autoloading the sound driver
69------------------------------------------- 69-------------------------------------------
70Comment: as of linux-2.1.90 kmod is replacing kerneld. 70Config files in '/etc/modprobe.d/' are used as below:
71The config file '/etc/modprobe.conf' is used as before.
72
73This is the sound part of my /etc/modprobe.conf file.
74Following that I will explain each line.
75 71
76alias mixer0 mad16 72alias mixer0 mad16
77alias audio0 mad16 73alias audio0 mad16
diff --git a/Documentation/sound/oss/PAS16 b/Documentation/sound/oss/PAS16
index 3dca4b75988e..5c27229eec8c 100644
--- a/Documentation/sound/oss/PAS16
+++ b/Documentation/sound/oss/PAS16
@@ -128,7 +128,7 @@ CONFIG_SOUND_YM3812
128 You can then get OPL3 functionality by issuing the command: 128 You can then get OPL3 functionality by issuing the command:
129 insmod opl3 129 insmod opl3
130 In addition, you must either add the following line to 130 In addition, you must either add the following line to
131 /etc/modprobe.conf: 131 /etc/modprobe.d/*.conf:
132 options opl3 io=0x388 132 options opl3 io=0x388
133 or else add the following line to /etc/lilo.conf: 133 or else add the following line to /etc/lilo.conf:
134 opl3=0x388 134 opl3=0x388
@@ -158,5 +158,5 @@ following line would be appropriate:
158append="pas2=0x388,10,3,-1,0,-1,-1,-1 opl3=0x388" 158append="pas2=0x388,10,3,-1,0,-1,-1,-1 opl3=0x388"
159 159
160If sound is built totally modular, the above options may be 160If sound is built totally modular, the above options may be
161specified in /etc/modprobe.conf for pas2, sb and opl3 161specified in /etc/modprobe.d/*.conf for pas2, sb and opl3
162respectively. 162respectively.
diff --git a/Documentation/sound/oss/README.modules b/Documentation/sound/oss/README.modules
index e691d74e1e5e..cdc039421a46 100644
--- a/Documentation/sound/oss/README.modules
+++ b/Documentation/sound/oss/README.modules
@@ -26,7 +26,7 @@ Note that it is no longer necessary or possible to configure sound in the
26drivers/sound dir. Now one simply configures and makes one's kernel and 26drivers/sound dir. Now one simply configures and makes one's kernel and
27modules in the usual way. 27modules in the usual way.
28 28
29 Then, add to your /etc/modprobe.conf something like: 29 Then, add to your /etc/modprobe.d/oss.conf something like:
30 30
31alias char-major-14-* sb 31alias char-major-14-* sb
32install sb /sbin/modprobe -i sb && /sbin/modprobe adlib_card 32install sb /sbin/modprobe -i sb && /sbin/modprobe adlib_card
@@ -36,7 +36,7 @@ options adlib_card io=0x388 # FM synthesizer
36 Alternatively, if you have compiled in kernel level ISAPnP support: 36 Alternatively, if you have compiled in kernel level ISAPnP support:
37 37
38alias char-major-14 sb 38alias char-major-14 sb
39post-install sb /sbin/modprobe "-k" "adlib_card" 39softdep sb post: adlib_card
40options adlib_card io=0x388 40options adlib_card io=0x388
41 41
42 The effect of this is that the sound driver and all necessary bits and 42 The effect of this is that the sound driver and all necessary bits and
@@ -66,12 +66,12 @@ args are expected.
66 Note that at present there is no way to configure the io, irq and other 66 Note that at present there is no way to configure the io, irq and other
67parameters for the modular drivers as one does for the wired drivers.. One 67parameters for the modular drivers as one does for the wired drivers.. One
68needs to pass the modules the necessary parameters as arguments, either 68needs to pass the modules the necessary parameters as arguments, either
69with /etc/modprobe.conf or with command-line args to modprobe, e.g. 69with /etc/modprobe.d/*.conf or with command-line args to modprobe, e.g.
70 70
71modprobe sb io=0x220 irq=7 dma=1 dma16=5 mpu_io=0x330 71modprobe sb io=0x220 irq=7 dma=1 dma16=5 mpu_io=0x330
72modprobe adlib_card io=0x388 72modprobe adlib_card io=0x388
73 73
74 recommend using /etc/modprobe.conf. 74 recommend using /etc/modprobe.d/*.conf.
75 75
76Persistent DMA Buffers: 76Persistent DMA Buffers:
77 77
@@ -89,7 +89,7 @@ wasteful of RAM, but it guarantees that sound always works.
89 89
90To make the sound driver use persistent DMA buffers we need to pass the 90To make the sound driver use persistent DMA buffers we need to pass the
91sound.o module a "dmabuf=1" command-line argument. This is normally done 91sound.o module a "dmabuf=1" command-line argument. This is normally done
92in /etc/modprobe.conf like so: 92in /etc/modprobe.d/*.conf files like so:
93 93
94options sound dmabuf=1 94options sound dmabuf=1
95 95
diff --git a/Documentation/sysrq.txt b/Documentation/sysrq.txt
index 312e3754e8c5..642f84495b29 100644
--- a/Documentation/sysrq.txt
+++ b/Documentation/sysrq.txt
@@ -241,9 +241,8 @@ command you are interested in.
241 241
242* I have more questions, who can I ask? 242* I have more questions, who can I ask?
243~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 243~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
244And I'll answer any questions about the registration system you got, also 244Just ask them on the linux-kernel mailing list:
245responding as soon as possible. 245 linux-kernel@vger.kernel.org
246 -Crutcher
247 246
248* Credits 247* Credits
249~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 248~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
diff --git a/Documentation/usb/power-management.txt b/Documentation/usb/power-management.txt
index 817df299ea07..4204eb01fd38 100644
--- a/Documentation/usb/power-management.txt
+++ b/Documentation/usb/power-management.txt
@@ -179,7 +179,8 @@ do:
179 179
180 modprobe usbcore autosuspend=5 180 modprobe usbcore autosuspend=5
181 181
182Equivalently, you could add to /etc/modprobe.conf a line saying: 182Equivalently, you could add to a configuration file in /etc/modprobe.d
183a line saying:
183 184
184 options usbcore autosuspend=5 185 options usbcore autosuspend=5
185 186
diff --git a/Documentation/video4linux/CQcam.txt b/Documentation/video4linux/CQcam.txt
index 8977e7ce4dab..6e680fec1e9c 100644
--- a/Documentation/video4linux/CQcam.txt
+++ b/Documentation/video4linux/CQcam.txt
@@ -61,29 +61,19 @@ But that is my personal preference.
612.2 Configuration 612.2 Configuration
62 62
63 The configuration requires module configuration and device 63 The configuration requires module configuration and device
64configuration. I like kmod or kerneld process with the 64configuration. The following sections detail these procedures.
65/etc/modprobe.conf file so the modules can automatically load/unload as
66they are used. The video devices could already exist, be generated
67using MAKEDEV, or need to be created. The following sections detail
68these procedures.
69 65
70 66
712.1 Module Configuration 672.1 Module Configuration
72 68
73 Using modules requires a bit of work to install and pass the 69 Using modules requires a bit of work to install and pass the
74parameters. Understand that entries in /etc/modprobe.conf of: 70parameters. Understand that entries in /etc/modprobe.d/*.conf of:
75 71
76 alias parport_lowlevel parport_pc 72 alias parport_lowlevel parport_pc
77 options parport_pc io=0x378 irq=none 73 options parport_pc io=0x378 irq=none
78 alias char-major-81 videodev 74 alias char-major-81 videodev
79 alias char-major-81-0 c-qcam 75 alias char-major-81-0 c-qcam
80 76
81will cause the kmod/modprobe to do certain things. If you are
82using kmod, then a request for a 'char-major-81-0' will cause
83the 'c-qcam' module to load. If you have other video sources with
84modules, you might want to assign the different minor numbers to
85different modules.
86
872.2 Device Configuration 772.2 Device Configuration
88 78
89 At this point, we need to ensure that the device files exist. 79 At this point, we need to ensure that the device files exist.
diff --git a/Documentation/video4linux/Zoran b/Documentation/video4linux/Zoran
index 9ed629d4874b..b5a911fd0602 100644
--- a/Documentation/video4linux/Zoran
+++ b/Documentation/video4linux/Zoran
@@ -255,7 +255,7 @@ Load zr36067.o. If it can't autodetect your card, use the card=X insmod
255option with X being the card number as given in the previous section. 255option with X being the card number as given in the previous section.
256To have more than one card, use card=X1[,X2[,X3,[X4[..]]]] 256To have more than one card, use card=X1[,X2[,X3,[X4[..]]]]
257 257
258To automate this, add the following to your /etc/modprobe.conf: 258To automate this, add the following to your /etc/modprobe.d/zoran.conf:
259 259
260options zr36067 card=X1[,X2[,X3[,X4[..]]]] 260options zr36067 card=X1[,X2[,X3[,X4[..]]]]
261alias char-major-81-0 zr36067 261alias char-major-81-0 zr36067
diff --git a/Documentation/video4linux/bttv/Modules.conf b/Documentation/video4linux/bttv/Modules.conf
index 753f15956eb8..8f258faf18f1 100644
--- a/Documentation/video4linux/bttv/Modules.conf
+++ b/Documentation/video4linux/bttv/Modules.conf
@@ -1,4 +1,4 @@
1# For modern kernels (2.6 or above), this belongs in /etc/modprobe.conf 1# For modern kernels (2.6 or above), this belongs in /etc/modprobe.d/*.conf
2# For for 2.4 kernels or earlier, this belongs in /etc/modules.conf. 2# For for 2.4 kernels or earlier, this belongs in /etc/modules.conf.
3 3
4# i2c 4# i2c
diff --git a/Documentation/video4linux/meye.txt b/Documentation/video4linux/meye.txt
index 34e2842c70ae..a051152ea99c 100644
--- a/Documentation/video4linux/meye.txt
+++ b/Documentation/video4linux/meye.txt
@@ -55,7 +55,7 @@ Module use:
55----------- 55-----------
56 56
57In order to automatically load the meye module on use, you can put those lines 57In order to automatically load the meye module on use, you can put those lines
58in your /etc/modprobe.conf file: 58in your /etc/modprobe.d/meye.conf file:
59 59
60 alias char-major-81 videodev 60 alias char-major-81 videodev
61 alias char-major-81-0 meye 61 alias char-major-81-0 meye
diff --git a/MAINTAINERS b/MAINTAINERS
index f9faadef7ab7..eecf3441ac21 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1940,7 +1940,7 @@ F: drivers/connector/
1940 1940
1941CONTROL GROUPS (CGROUPS) 1941CONTROL GROUPS (CGROUPS)
1942M: Tejun Heo <tj@kernel.org> 1942M: Tejun Heo <tj@kernel.org>
1943M: Li Zefan <lizf@cn.fujitsu.com> 1943M: Li Zefan <lizefan@huawei.com>
1944L: containers@lists.linux-foundation.org 1944L: containers@lists.linux-foundation.org
1945L: cgroups@vger.kernel.org 1945L: cgroups@vger.kernel.org
1946T: git git://git.kernel.org/pub/scm/linux/kernel/git/tj/cgroup.git 1946T: git git://git.kernel.org/pub/scm/linux/kernel/git/tj/cgroup.git
@@ -5185,7 +5185,7 @@ F: kernel/delayacct.c
5185PERFORMANCE EVENTS SUBSYSTEM 5185PERFORMANCE EVENTS SUBSYSTEM
5186M: Peter Zijlstra <a.p.zijlstra@chello.nl> 5186M: Peter Zijlstra <a.p.zijlstra@chello.nl>
5187M: Paul Mackerras <paulus@samba.org> 5187M: Paul Mackerras <paulus@samba.org>
5188M: Ingo Molnar <mingo@elte.hu> 5188M: Ingo Molnar <mingo@redhat.com>
5189M: Arnaldo Carvalho de Melo <acme@ghostprotocols.net> 5189M: Arnaldo Carvalho de Melo <acme@ghostprotocols.net>
5190T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git perf/core 5190T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git perf/core
5191S: Supported 5191S: Supported
@@ -5833,7 +5833,7 @@ S: Maintained
5833F: drivers/watchdog/sc1200wdt.c 5833F: drivers/watchdog/sc1200wdt.c
5834 5834
5835SCHEDULER 5835SCHEDULER
5836M: Ingo Molnar <mingo@elte.hu> 5836M: Ingo Molnar <mingo@redhat.com>
5837M: Peter Zijlstra <peterz@infradead.org> 5837M: Peter Zijlstra <peterz@infradead.org>
5838T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git sched/core 5838T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git sched/core
5839S: Maintained 5839S: Maintained
diff --git a/Makefile b/Makefile
index 1932984478c1..5e637c23974e 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
1VERSION = 3 1VERSION = 3
2PATCHLEVEL = 3 2PATCHLEVEL = 4
3SUBLEVEL = 0 3SUBLEVEL = 0
4EXTRAVERSION = 4EXTRAVERSION = -rc1
5NAME = Saber-toothed Squirrel 5NAME = Saber-toothed Squirrel
6 6
7# *DOCUMENTATION* 7# *DOCUMENTATION*
@@ -1170,7 +1170,7 @@ MRPROPER_FILES += .config .config.old .version .old_version \
1170# 1170#
1171clean: rm-dirs := $(CLEAN_DIRS) 1171clean: rm-dirs := $(CLEAN_DIRS)
1172clean: rm-files := $(CLEAN_FILES) 1172clean: rm-files := $(CLEAN_FILES)
1173clean-dirs := $(addprefix _clean_, . $(vmlinux-alldirs) Documentation) 1173clean-dirs := $(addprefix _clean_, . $(vmlinux-alldirs) Documentation samples)
1174 1174
1175PHONY += $(clean-dirs) clean archclean 1175PHONY += $(clean-dirs) clean archclean
1176$(clean-dirs): 1176$(clean-dirs):
diff --git a/arch/Kconfig b/arch/Kconfig
index a6f14f622d13..684eb5af439d 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -213,4 +213,7 @@ config HAVE_CMPXCHG_LOCAL
213config HAVE_CMPXCHG_DOUBLE 213config HAVE_CMPXCHG_DOUBLE
214 bool 214 bool
215 215
216config ARCH_WANT_OLD_COMPAT_IPC
217 bool
218
216source "kernel/gcov/Kconfig" 219source "kernel/gcov/Kconfig"
diff --git a/arch/alpha/include/asm/posix_types.h b/arch/alpha/include/asm/posix_types.h
index db167413300b..24779fc95994 100644
--- a/arch/alpha/include/asm/posix_types.h
+++ b/arch/alpha/include/asm/posix_types.h
@@ -8,116 +8,13 @@
8 */ 8 */
9 9
10typedef unsigned int __kernel_ino_t; 10typedef unsigned int __kernel_ino_t;
11typedef unsigned int __kernel_mode_t; 11#define __kernel_ino_t __kernel_ino_t
12typedef unsigned int __kernel_nlink_t;
13typedef long __kernel_off_t;
14typedef long long __kernel_loff_t;
15typedef int __kernel_pid_t;
16typedef int __kernel_ipc_pid_t;
17typedef unsigned int __kernel_uid_t;
18typedef unsigned int __kernel_gid_t;
19typedef unsigned long __kernel_size_t;
20typedef long __kernel_ssize_t;
21typedef long __kernel_ptrdiff_t;
22typedef long __kernel_time_t;
23typedef long __kernel_suseconds_t;
24typedef long __kernel_clock_t;
25typedef int __kernel_daddr_t;
26typedef char * __kernel_caddr_t;
27typedef unsigned long __kernel_sigset_t; /* at least 32 bits */
28typedef unsigned short __kernel_uid16_t;
29typedef unsigned short __kernel_gid16_t;
30typedef int __kernel_clockid_t;
31typedef int __kernel_timer_t;
32
33typedef struct {
34 int val[2];
35} __kernel_fsid_t;
36
37typedef __kernel_uid_t __kernel_old_uid_t;
38typedef __kernel_gid_t __kernel_old_gid_t;
39typedef __kernel_uid_t __kernel_uid32_t;
40typedef __kernel_gid_t __kernel_gid32_t;
41
42typedef unsigned int __kernel_old_dev_t;
43
44#ifdef __KERNEL__
45
46#ifndef __GNUC__
47
48#define __FD_SET(d, set) ((set)->fds_bits[__FDELT(d)] |= __FDMASK(d))
49#define __FD_CLR(d, set) ((set)->fds_bits[__FDELT(d)] &= ~__FDMASK(d))
50#define __FD_ISSET(d, set) (((set)->fds_bits[__FDELT(d)] & __FDMASK(d)) != 0)
51#define __FD_ZERO(set) \
52 ((void) memset ((void *) (set), 0, sizeof (__kernel_fd_set)))
53
54#else /* __GNUC__ */
55
56/* With GNU C, use inline functions instead so args are evaluated only once: */
57 12
58#undef __FD_SET 13typedef unsigned int __kernel_nlink_t;
59static __inline__ void __FD_SET(unsigned long fd, __kernel_fd_set *fdsetp) 14#define __kernel_nlink_t __kernel_nlink_t
60{
61 unsigned long _tmp = fd / __NFDBITS;
62 unsigned long _rem = fd % __NFDBITS;
63 fdsetp->fds_bits[_tmp] |= (1UL<<_rem);
64}
65
66#undef __FD_CLR
67static __inline__ void __FD_CLR(unsigned long fd, __kernel_fd_set *fdsetp)
68{
69 unsigned long _tmp = fd / __NFDBITS;
70 unsigned long _rem = fd % __NFDBITS;
71 fdsetp->fds_bits[_tmp] &= ~(1UL<<_rem);
72}
73
74#undef __FD_ISSET
75static __inline__ int __FD_ISSET(unsigned long fd, const __kernel_fd_set *p)
76{
77 unsigned long _tmp = fd / __NFDBITS;
78 unsigned long _rem = fd % __NFDBITS;
79 return (p->fds_bits[_tmp] & (1UL<<_rem)) != 0;
80}
81
82/*
83 * This will unroll the loop for the normal constant case (8 ints,
84 * for a 256-bit fd_set)
85 */
86#undef __FD_ZERO
87static __inline__ void __FD_ZERO(__kernel_fd_set *p)
88{
89 unsigned long *tmp = p->fds_bits;
90 int i;
91
92 if (__builtin_constant_p(__FDSET_LONGS)) {
93 switch (__FDSET_LONGS) {
94 case 16:
95 tmp[ 0] = 0; tmp[ 1] = 0; tmp[ 2] = 0; tmp[ 3] = 0;
96 tmp[ 4] = 0; tmp[ 5] = 0; tmp[ 6] = 0; tmp[ 7] = 0;
97 tmp[ 8] = 0; tmp[ 9] = 0; tmp[10] = 0; tmp[11] = 0;
98 tmp[12] = 0; tmp[13] = 0; tmp[14] = 0; tmp[15] = 0;
99 return;
100
101 case 8:
102 tmp[ 0] = 0; tmp[ 1] = 0; tmp[ 2] = 0; tmp[ 3] = 0;
103 tmp[ 4] = 0; tmp[ 5] = 0; tmp[ 6] = 0; tmp[ 7] = 0;
104 return;
105
106 case 4:
107 tmp[ 0] = 0; tmp[ 1] = 0; tmp[ 2] = 0; tmp[ 3] = 0;
108 return;
109 }
110 }
111 i = __FDSET_LONGS;
112 while (i) {
113 i--;
114 *tmp = 0;
115 tmp++;
116 }
117}
118 15
119#endif /* __GNUC__ */ 16typedef unsigned long __kernel_sigset_t; /* at least 32 bits */
120 17
121#endif /* __KERNEL__ */ 18#include <asm-generic/posix_types.h>
122 19
123#endif /* _ALPHA_POSIX_TYPES_H */ 20#endif /* _ALPHA_POSIX_TYPES_H */
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 5098564d5879..93180845ae16 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -9,6 +9,7 @@ config ARM
9 select SYS_SUPPORTS_APM_EMULATION 9 select SYS_SUPPORTS_APM_EMULATION
10 select GENERIC_ATOMIC64 if (CPU_V6 || !CPU_32v6K || !AEABI) 10 select GENERIC_ATOMIC64 if (CPU_V6 || !CPU_32v6K || !AEABI)
11 select HAVE_OPROFILE if (HAVE_PERF_EVENTS) 11 select HAVE_OPROFILE if (HAVE_PERF_EVENTS)
12 select HAVE_ARCH_JUMP_LABEL if !XIP_KERNEL
12 select HAVE_ARCH_KGDB 13 select HAVE_ARCH_KGDB
13 select HAVE_KPROBES if !XIP_KERNEL 14 select HAVE_KPROBES if !XIP_KERNEL
14 select HAVE_KRETPROBES if (HAVE_KPROBES) 15 select HAVE_KRETPROBES if (HAVE_KPROBES)
@@ -21,6 +22,7 @@ config ARM
21 select HAVE_KERNEL_GZIP 22 select HAVE_KERNEL_GZIP
22 select HAVE_KERNEL_LZO 23 select HAVE_KERNEL_LZO
23 select HAVE_KERNEL_LZMA 24 select HAVE_KERNEL_LZMA
25 select HAVE_KERNEL_XZ
24 select HAVE_IRQ_WORK 26 select HAVE_IRQ_WORK
25 select HAVE_PERF_EVENTS 27 select HAVE_PERF_EVENTS
26 select PERF_USE_VMALLOC 28 select PERF_USE_VMALLOC
@@ -28,10 +30,10 @@ config ARM
28 select HAVE_HW_BREAKPOINT if (PERF_EVENTS && (CPU_V6 || CPU_V6K || CPU_V7)) 30 select HAVE_HW_BREAKPOINT if (PERF_EVENTS && (CPU_V6 || CPU_V6K || CPU_V7))
29 select HAVE_C_RECORDMCOUNT 31 select HAVE_C_RECORDMCOUNT
30 select HAVE_GENERIC_HARDIRQS 32 select HAVE_GENERIC_HARDIRQS
31 select HAVE_SPARSE_IRQ
32 select GENERIC_IRQ_SHOW 33 select GENERIC_IRQ_SHOW
33 select CPU_PM if (SUSPEND || CPU_IDLE) 34 select CPU_PM if (SUSPEND || CPU_IDLE)
34 select GENERIC_PCI_IOMAP 35 select GENERIC_PCI_IOMAP
36 select HAVE_BPF_JIT if NET
35 help 37 help
36 The ARM series is a line of low-power-consumption RISC chip designs 38 The ARM series is a line of low-power-consumption RISC chip designs
37 licensed by ARM Ltd and targeted at embedded applications and 39 licensed by ARM Ltd and targeted at embedded applications and
@@ -52,9 +54,6 @@ config MIGHT_HAVE_PCI
52config SYS_SUPPORTS_APM_EMULATION 54config SYS_SUPPORTS_APM_EMULATION
53 bool 55 bool
54 56
55config HAVE_SCHED_CLOCK
56 bool
57
58config GENERIC_GPIO 57config GENERIC_GPIO
59 bool 58 bool
60 59
@@ -180,6 +179,9 @@ config ZONE_DMA
180config NEED_DMA_MAP_STATE 179config NEED_DMA_MAP_STATE
181 def_bool y 180 def_bool y
182 181
182config ARCH_HAS_DMA_SET_COHERENT_MASK
183 bool
184
183config GENERIC_ISA_DMA 185config GENERIC_ISA_DMA
184 bool 186 bool
185 187
@@ -217,6 +219,13 @@ config ARM_PATCH_PHYS_VIRT
217 this feature (eg, building a kernel for a single machine) and 219 this feature (eg, building a kernel for a single machine) and
218 you need to shrink the kernel to the minimal size. 220 you need to shrink the kernel to the minimal size.
219 221
222config NEED_MACH_IO_H
223 bool
224 help
225 Select this when mach/io.h is required to provide special
226 definitions for this platform. The need for mach/io.h should
227 be avoided when possible.
228
220config NEED_MACH_MEMORY_H 229config NEED_MACH_MEMORY_H
221 bool 230 bool
222 help 231 help
@@ -268,7 +277,9 @@ config ARCH_INTEGRATOR
268 select GENERIC_CLOCKEVENTS 277 select GENERIC_CLOCKEVENTS
269 select PLAT_VERSATILE 278 select PLAT_VERSATILE
270 select PLAT_VERSATILE_FPGA_IRQ 279 select PLAT_VERSATILE_FPGA_IRQ
280 select NEED_MACH_IO_H
271 select NEED_MACH_MEMORY_H 281 select NEED_MACH_MEMORY_H
282 select SPARSE_IRQ
272 help 283 help
273 Support for ARM's Integrator platform. 284 Support for ARM's Integrator platform.
274 285
@@ -315,6 +326,7 @@ config ARCH_VEXPRESS
315 select HAVE_CLK 326 select HAVE_CLK
316 select HAVE_PATA_PLATFORM 327 select HAVE_PATA_PLATFORM
317 select ICST 328 select ICST
329 select NO_IOPORT
318 select PLAT_VERSATILE 330 select PLAT_VERSATILE
319 select PLAT_VERSATILE_CLCD 331 select PLAT_VERSATILE_CLCD
320 help 332 help
@@ -354,6 +366,7 @@ config ARCH_HIGHBANK
354 select GENERIC_CLOCKEVENTS 366 select GENERIC_CLOCKEVENTS
355 select HAVE_ARM_SCU 367 select HAVE_ARM_SCU
356 select HAVE_SMP 368 select HAVE_SMP
369 select SPARSE_IRQ
357 select USE_OF 370 select USE_OF
358 help 371 help
359 Support for the Calxeda Highbank SoC based boards. 372 Support for the Calxeda Highbank SoC based boards.
@@ -404,6 +417,7 @@ config ARCH_EBSA110
404 select ISA 417 select ISA
405 select NO_IOPORT 418 select NO_IOPORT
406 select ARCH_USES_GETTIMEOFFSET 419 select ARCH_USES_GETTIMEOFFSET
420 select NEED_MACH_IO_H
407 select NEED_MACH_MEMORY_H 421 select NEED_MACH_MEMORY_H
408 help 422 help
409 This is an evaluation board for the StrongARM processor available 423 This is an evaluation board for the StrongARM processor available
@@ -430,6 +444,7 @@ config ARCH_FOOTBRIDGE
430 select FOOTBRIDGE 444 select FOOTBRIDGE
431 select GENERIC_CLOCKEVENTS 445 select GENERIC_CLOCKEVENTS
432 select HAVE_IDE 446 select HAVE_IDE
447 select NEED_MACH_IO_H
433 select NEED_MACH_MEMORY_H 448 select NEED_MACH_MEMORY_H
434 help 449 help
435 Support for systems based on the DC21285 companion chip 450 Support for systems based on the DC21285 companion chip
@@ -442,7 +457,6 @@ config ARCH_MXC
442 select CLKDEV_LOOKUP 457 select CLKDEV_LOOKUP
443 select CLKSRC_MMIO 458 select CLKSRC_MMIO
444 select GENERIC_IRQ_CHIP 459 select GENERIC_IRQ_CHIP
445 select HAVE_SCHED_CLOCK
446 select MULTI_IRQ_HANDLER 460 select MULTI_IRQ_HANDLER
447 help 461 help
448 Support for Freescale MXC/iMX-based family of processors 462 Support for Freescale MXC/iMX-based family of processors
@@ -482,6 +496,7 @@ config ARCH_IOP13XX
482 select PCI 496 select PCI
483 select ARCH_SUPPORTS_MSI 497 select ARCH_SUPPORTS_MSI
484 select VMSPLIT_1G 498 select VMSPLIT_1G
499 select NEED_MACH_IO_H
485 select NEED_MACH_MEMORY_H 500 select NEED_MACH_MEMORY_H
486 select NEED_RET_TO_USER 501 select NEED_RET_TO_USER
487 help 502 help
@@ -491,6 +506,7 @@ config ARCH_IOP32X
491 bool "IOP32x-based" 506 bool "IOP32x-based"
492 depends on MMU 507 depends on MMU
493 select CPU_XSCALE 508 select CPU_XSCALE
509 select NEED_MACH_IO_H
494 select NEED_RET_TO_USER 510 select NEED_RET_TO_USER
495 select PLAT_IOP 511 select PLAT_IOP
496 select PCI 512 select PCI
@@ -503,6 +519,7 @@ config ARCH_IOP33X
503 bool "IOP33x-based" 519 bool "IOP33x-based"
504 depends on MMU 520 depends on MMU
505 select CPU_XSCALE 521 select CPU_XSCALE
522 select NEED_MACH_IO_H
506 select NEED_RET_TO_USER 523 select NEED_RET_TO_USER
507 select PLAT_IOP 524 select PLAT_IOP
508 select PCI 525 select PCI
@@ -516,6 +533,7 @@ config ARCH_IXP23XX
516 select CPU_XSC3 533 select CPU_XSC3
517 select PCI 534 select PCI
518 select ARCH_USES_GETTIMEOFFSET 535 select ARCH_USES_GETTIMEOFFSET
536 select NEED_MACH_IO_H
519 select NEED_MACH_MEMORY_H 537 select NEED_MACH_MEMORY_H
520 help 538 help
521 Support for Intel's IXP23xx (XScale) family of processors. 539 Support for Intel's IXP23xx (XScale) family of processors.
@@ -526,6 +544,7 @@ config ARCH_IXP2000
526 select CPU_XSCALE 544 select CPU_XSCALE
527 select PCI 545 select PCI
528 select ARCH_USES_GETTIMEOFFSET 546 select ARCH_USES_GETTIMEOFFSET
547 select NEED_MACH_IO_H
529 select NEED_MACH_MEMORY_H 548 select NEED_MACH_MEMORY_H
530 help 549 help
531 Support for Intel's IXP2400/2800 (XScale) family of processors. 550 Support for Intel's IXP2400/2800 (XScale) family of processors.
@@ -533,12 +552,13 @@ config ARCH_IXP2000
533config ARCH_IXP4XX 552config ARCH_IXP4XX
534 bool "IXP4xx-based" 553 bool "IXP4xx-based"
535 depends on MMU 554 depends on MMU
555 select ARCH_HAS_DMA_SET_COHERENT_MASK
536 select CLKSRC_MMIO 556 select CLKSRC_MMIO
537 select CPU_XSCALE 557 select CPU_XSCALE
538 select GENERIC_GPIO 558 select GENERIC_GPIO
539 select GENERIC_CLOCKEVENTS 559 select GENERIC_CLOCKEVENTS
540 select HAVE_SCHED_CLOCK
541 select MIGHT_HAVE_PCI 560 select MIGHT_HAVE_PCI
561 select NEED_MACH_IO_H
542 select DMABOUNCE if PCI 562 select DMABOUNCE if PCI
543 help 563 help
544 Support for Intel's IXP4XX (XScale) family of processors. 564 Support for Intel's IXP4XX (XScale) family of processors.
@@ -549,6 +569,7 @@ config ARCH_DOVE
549 select PCI 569 select PCI
550 select ARCH_REQUIRE_GPIOLIB 570 select ARCH_REQUIRE_GPIOLIB
551 select GENERIC_CLOCKEVENTS 571 select GENERIC_CLOCKEVENTS
572 select NEED_MACH_IO_H
552 select PLAT_ORION 573 select PLAT_ORION
553 help 574 help
554 Support for the Marvell Dove SoC 88AP510 575 Support for the Marvell Dove SoC 88AP510
@@ -559,6 +580,7 @@ config ARCH_KIRKWOOD
559 select PCI 580 select PCI
560 select ARCH_REQUIRE_GPIOLIB 581 select ARCH_REQUIRE_GPIOLIB
561 select GENERIC_CLOCKEVENTS 582 select GENERIC_CLOCKEVENTS
583 select NEED_MACH_IO_H
562 select PLAT_ORION 584 select PLAT_ORION
563 help 585 help
564 Support for the following Marvell Kirkwood series SoCs: 586 Support for the following Marvell Kirkwood series SoCs:
@@ -583,6 +605,7 @@ config ARCH_MV78XX0
583 select PCI 605 select PCI
584 select ARCH_REQUIRE_GPIOLIB 606 select ARCH_REQUIRE_GPIOLIB
585 select GENERIC_CLOCKEVENTS 607 select GENERIC_CLOCKEVENTS
608 select NEED_MACH_IO_H
586 select PLAT_ORION 609 select PLAT_ORION
587 help 610 help
588 Support for the following Marvell MV78xx0 series SoCs: 611 Support for the following Marvell MV78xx0 series SoCs:
@@ -608,7 +631,6 @@ config ARCH_MMP
608 select CLKDEV_LOOKUP 631 select CLKDEV_LOOKUP
609 select GENERIC_CLOCKEVENTS 632 select GENERIC_CLOCKEVENTS
610 select GPIO_PXA 633 select GPIO_PXA
611 select HAVE_SCHED_CLOCK
612 select TICK_ONESHOT 634 select TICK_ONESHOT
613 select PLAT_PXA 635 select PLAT_PXA
614 select SPARSE_IRQ 636 select SPARSE_IRQ
@@ -649,9 +671,9 @@ config ARCH_TEGRA
649 select GENERIC_CLOCKEVENTS 671 select GENERIC_CLOCKEVENTS
650 select GENERIC_GPIO 672 select GENERIC_GPIO
651 select HAVE_CLK 673 select HAVE_CLK
652 select HAVE_SCHED_CLOCK
653 select HAVE_SMP 674 select HAVE_SMP
654 select MIGHT_HAVE_CACHE_L2X0 675 select MIGHT_HAVE_CACHE_L2X0
676 select NEED_MACH_IO_H if PCI
655 select ARCH_HAS_CPUFREQ 677 select ARCH_HAS_CPUFREQ
656 help 678 help
657 This enables support for NVIDIA Tegra based systems (Tegra APX, 679 This enables support for NVIDIA Tegra based systems (Tegra APX,
@@ -666,7 +688,6 @@ config ARCH_PICOXCELL
666 select DW_APB_TIMER 688 select DW_APB_TIMER
667 select GENERIC_CLOCKEVENTS 689 select GENERIC_CLOCKEVENTS
668 select GENERIC_GPIO 690 select GENERIC_GPIO
669 select HAVE_SCHED_CLOCK
670 select HAVE_TCM 691 select HAVE_TCM
671 select NO_IOPORT 692 select NO_IOPORT
672 select SPARSE_IRQ 693 select SPARSE_IRQ
@@ -694,7 +715,6 @@ config ARCH_PXA
694 select ARCH_REQUIRE_GPIOLIB 715 select ARCH_REQUIRE_GPIOLIB
695 select GENERIC_CLOCKEVENTS 716 select GENERIC_CLOCKEVENTS
696 select GPIO_PXA 717 select GPIO_PXA
697 select HAVE_SCHED_CLOCK
698 select TICK_ONESHOT 718 select TICK_ONESHOT
699 select PLAT_PXA 719 select PLAT_PXA
700 select SPARSE_IRQ 720 select SPARSE_IRQ
@@ -745,6 +765,7 @@ config ARCH_RPC
745 select ARCH_SPARSEMEM_ENABLE 765 select ARCH_SPARSEMEM_ENABLE
746 select ARCH_USES_GETTIMEOFFSET 766 select ARCH_USES_GETTIMEOFFSET
747 select HAVE_IDE 767 select HAVE_IDE
768 select NEED_MACH_IO_H
748 select NEED_MACH_MEMORY_H 769 select NEED_MACH_MEMORY_H
749 help 770 help
750 On the Acorn Risc-PC, Linux can support the internal IDE disk and 771 On the Acorn Risc-PC, Linux can support the internal IDE disk and
@@ -761,7 +782,6 @@ config ARCH_SA1100
761 select CPU_FREQ 782 select CPU_FREQ
762 select GENERIC_CLOCKEVENTS 783 select GENERIC_CLOCKEVENTS
763 select CLKDEV_LOOKUP 784 select CLKDEV_LOOKUP
764 select HAVE_SCHED_CLOCK
765 select TICK_ONESHOT 785 select TICK_ONESHOT
766 select ARCH_REQUIRE_GPIOLIB 786 select ARCH_REQUIRE_GPIOLIB
767 select HAVE_IDE 787 select HAVE_IDE
@@ -780,6 +800,7 @@ config ARCH_S3C24XX
780 select HAVE_S3C2410_I2C if I2C 800 select HAVE_S3C2410_I2C if I2C
781 select HAVE_S3C_RTC if RTC_CLASS 801 select HAVE_S3C_RTC if RTC_CLASS
782 select HAVE_S3C2410_WATCHDOG if WATCHDOG 802 select HAVE_S3C2410_WATCHDOG if WATCHDOG
803 select NEED_MACH_IO_H
783 help 804 help
784 Samsung S3C2410, S3C2412, S3C2413, S3C2416, S3C2440, S3C2442, S3C2443 805 Samsung S3C2410, S3C2412, S3C2413, S3C2416, S3C2440, S3C2442, S3C2443
785 and S3C2450 SoCs based systems, such as the Simtec Electronics BAST 806 and S3C2450 SoCs based systems, such as the Simtec Electronics BAST
@@ -818,7 +839,6 @@ config ARCH_S5P64X0
818 select CLKSRC_MMIO 839 select CLKSRC_MMIO
819 select HAVE_S3C2410_WATCHDOG if WATCHDOG 840 select HAVE_S3C2410_WATCHDOG if WATCHDOG
820 select GENERIC_CLOCKEVENTS 841 select GENERIC_CLOCKEVENTS
821 select HAVE_SCHED_CLOCK
822 select HAVE_S3C2410_I2C if I2C 842 select HAVE_S3C2410_I2C if I2C
823 select HAVE_S3C_RTC if RTC_CLASS 843 select HAVE_S3C_RTC if RTC_CLASS
824 help 844 help
@@ -849,7 +869,6 @@ config ARCH_S5PV210
849 select CLKSRC_MMIO 869 select CLKSRC_MMIO
850 select ARCH_HAS_CPUFREQ 870 select ARCH_HAS_CPUFREQ
851 select GENERIC_CLOCKEVENTS 871 select GENERIC_CLOCKEVENTS
852 select HAVE_SCHED_CLOCK
853 select HAVE_S3C2410_I2C if I2C 872 select HAVE_S3C2410_I2C if I2C
854 select HAVE_S3C_RTC if RTC_CLASS 873 select HAVE_S3C_RTC if RTC_CLASS
855 select HAVE_S3C2410_WATCHDOG if WATCHDOG 874 select HAVE_S3C2410_WATCHDOG if WATCHDOG
@@ -883,6 +902,7 @@ config ARCH_SHARK
883 select PCI 902 select PCI
884 select ARCH_USES_GETTIMEOFFSET 903 select ARCH_USES_GETTIMEOFFSET
885 select NEED_MACH_MEMORY_H 904 select NEED_MACH_MEMORY_H
905 select NEED_MACH_IO_H
886 help 906 help
887 Support for the StrongARM based Digital DNARD machine, also known 907 Support for the StrongARM based Digital DNARD machine, also known
888 as "Shark" (<http://www.shark-linux.de/shark.html>). 908 as "Shark" (<http://www.shark-linux.de/shark.html>).
@@ -892,7 +912,6 @@ config ARCH_U300
892 depends on MMU 912 depends on MMU
893 select CLKSRC_MMIO 913 select CLKSRC_MMIO
894 select CPU_ARM926T 914 select CPU_ARM926T
895 select HAVE_SCHED_CLOCK
896 select HAVE_TCM 915 select HAVE_TCM
897 select ARM_AMBA 916 select ARM_AMBA
898 select ARM_PATCH_PHYS_VIRT 917 select ARM_PATCH_PHYS_VIRT
@@ -951,7 +970,6 @@ config ARCH_OMAP
951 select ARCH_HAS_CPUFREQ 970 select ARCH_HAS_CPUFREQ
952 select CLKSRC_MMIO 971 select CLKSRC_MMIO
953 select GENERIC_CLOCKEVENTS 972 select GENERIC_CLOCKEVENTS
954 select HAVE_SCHED_CLOCK
955 select ARCH_HAS_HOLES_MEMORYMODEL 973 select ARCH_HAS_HOLES_MEMORYMODEL
956 help 974 help
957 Support for TI's OMAP platform (OMAP1/2/3/4). 975 Support for TI's OMAP platform (OMAP1/2/3/4).
@@ -1115,13 +1133,11 @@ config ARCH_ACORN
1115config PLAT_IOP 1133config PLAT_IOP
1116 bool 1134 bool
1117 select GENERIC_CLOCKEVENTS 1135 select GENERIC_CLOCKEVENTS
1118 select HAVE_SCHED_CLOCK
1119 1136
1120config PLAT_ORION 1137config PLAT_ORION
1121 bool 1138 bool
1122 select CLKSRC_MMIO 1139 select CLKSRC_MMIO
1123 select GENERIC_IRQ_CHIP 1140 select GENERIC_IRQ_CHIP
1124 select HAVE_SCHED_CLOCK
1125 1141
1126config PLAT_PXA 1142config PLAT_PXA
1127 bool 1143 bool
diff --git a/arch/arm/Kconfig.debug b/arch/arm/Kconfig.debug
index 66ca8014ff3e..85348a09d655 100644
--- a/arch/arm/Kconfig.debug
+++ b/arch/arm/Kconfig.debug
@@ -292,6 +292,22 @@ choice
292 Note that the system will appear to hang during boot if there 292 Note that the system will appear to hang during boot if there
293 is nothing connected to read from the DCC. 293 is nothing connected to read from the DCC.
294 294
295 config DEBUG_SEMIHOSTING
296 bool "Kernel low-level debug output via semihosting I"
297 help
298 Semihosting enables code running on an ARM target to use
299 the I/O facilities on a host debugger/emulator through a
300 simple SVC calls. The host debugger or emulator must have
301 semihosting enabled for the special svc call to be trapped
302 otherwise the kernel will crash.
303
304 This is known to work with OpenOCD, as wellas
305 ARM's Fast Models, or any other controlling environment
306 that implements semihosting.
307
308 For more details about semihosting, please see
309 chapter 8 of DUI0203I_rvct_developer_guide.pdf from ARM Ltd.
310
295endchoice 311endchoice
296 312
297config EARLY_PRINTK 313config EARLY_PRINTK
diff --git a/arch/arm/Makefile b/arch/arm/Makefile
index dcb088e868fe..047a20780fc1 100644
--- a/arch/arm/Makefile
+++ b/arch/arm/Makefile
@@ -253,6 +253,7 @@ core-$(CONFIG_VFP) += arch/arm/vfp/
253 253
254# If we have a machine-specific directory, then include it in the build. 254# If we have a machine-specific directory, then include it in the build.
255core-y += arch/arm/kernel/ arch/arm/mm/ arch/arm/common/ 255core-y += arch/arm/kernel/ arch/arm/mm/ arch/arm/common/
256core-y += arch/arm/net/
256core-y += $(machdirs) $(platdirs) 257core-y += $(machdirs) $(platdirs)
257 258
258drivers-$(CONFIG_OPROFILE) += arch/arm/oprofile/ 259drivers-$(CONFIG_OPROFILE) += arch/arm/oprofile/
diff --git a/arch/arm/boot/Makefile b/arch/arm/boot/Makefile
index fc871e719aae..c877087d2000 100644
--- a/arch/arm/boot/Makefile
+++ b/arch/arm/boot/Makefile
@@ -11,8 +11,6 @@
11# Copyright (C) 1995-2002 Russell King 11# Copyright (C) 1995-2002 Russell King
12# 12#
13 13
14MKIMAGE := $(srctree)/scripts/mkuboot.sh
15
16ifneq ($(MACHINE),) 14ifneq ($(MACHINE),)
17include $(srctree)/$(MACHINE)/Makefile.boot 15include $(srctree)/$(MACHINE)/Makefile.boot
18endif 16endif
@@ -69,22 +67,19 @@ $(obj)/dtbs: $(addprefix $(obj)/, $(dtb-y))
69 67
70clean-files := *.dtb 68clean-files := *.dtb
71 69
72quiet_cmd_uimage = UIMAGE $@ 70ifneq ($(LOADADDR),)
73 cmd_uimage = $(CONFIG_SHELL) $(MKIMAGE) -A arm -O linux -T kernel \ 71 UIMAGE_LOADADDR=$(LOADADDR)
74 -C none -a $(LOADADDR) -e $(STARTADDR) \
75 -n 'Linux-$(KERNELRELEASE)' -d $< $@
76
77ifeq ($(CONFIG_ZBOOT_ROM),y)
78$(obj)/uImage: LOADADDR=$(CONFIG_ZBOOT_ROM_TEXT)
79else 72else
80$(obj)/uImage: LOADADDR=$(ZRELADDR) 73 ifeq ($(CONFIG_ZBOOT_ROM),y)
74 UIMAGE_LOADADDR=$(CONFIG_ZBOOT_ROM_TEXT)
75 else
76 UIMAGE_LOADADDR=$(ZRELADDR)
77 endif
81endif 78endif
82 79
83$(obj)/uImage: STARTADDR=$(LOADADDR)
84
85check_for_multiple_loadaddr = \ 80check_for_multiple_loadaddr = \
86if [ $(words $(LOADADDR)) -gt 1 ]; then \ 81if [ $(words $(UIMAGE_LOADADDR)) -gt 1 ]; then \
87 echo 'multiple load addresses: $(LOADADDR)'; \ 82 echo 'multiple load addresses: $(UIMAGE_LOADADDR)'; \
88 echo 'This is incompatible with uImages'; \ 83 echo 'This is incompatible with uImages'; \
89 echo 'Specify LOADADDR on the commandline to build an uImage'; \ 84 echo 'Specify LOADADDR on the commandline to build an uImage'; \
90 false; \ 85 false; \
diff --git a/arch/arm/boot/compressed/.gitignore b/arch/arm/boot/compressed/.gitignore
index e0936a148516..d0d441c429ae 100644
--- a/arch/arm/boot/compressed/.gitignore
+++ b/arch/arm/boot/compressed/.gitignore
@@ -1,8 +1,10 @@
1ashldi3.S
1font.c 2font.c
2lib1funcs.S 3lib1funcs.S
3piggy.gzip 4piggy.gzip
4piggy.lzo 5piggy.lzo
5piggy.lzma 6piggy.lzma
7piggy.xzkern
6vmlinux 8vmlinux
7vmlinux.lds 9vmlinux.lds
8 10
diff --git a/arch/arm/boot/compressed/Makefile b/arch/arm/boot/compressed/Makefile
index cf0a64ce4b83..bb267562e7ed 100644
--- a/arch/arm/boot/compressed/Makefile
+++ b/arch/arm/boot/compressed/Makefile
@@ -92,6 +92,7 @@ SEDFLAGS = s/TEXT_START/$(ZTEXTADDR)/;s/BSS_START/$(ZBSSADDR)/
92suffix_$(CONFIG_KERNEL_GZIP) = gzip 92suffix_$(CONFIG_KERNEL_GZIP) = gzip
93suffix_$(CONFIG_KERNEL_LZO) = lzo 93suffix_$(CONFIG_KERNEL_LZO) = lzo
94suffix_$(CONFIG_KERNEL_LZMA) = lzma 94suffix_$(CONFIG_KERNEL_LZMA) = lzma
95suffix_$(CONFIG_KERNEL_XZ) = xzkern
95 96
96# Borrowed libfdt files for the ATAG compatibility mode 97# Borrowed libfdt files for the ATAG compatibility mode
97 98
@@ -112,10 +113,12 @@ endif
112 113
113targets := vmlinux vmlinux.lds \ 114targets := vmlinux vmlinux.lds \
114 piggy.$(suffix_y) piggy.$(suffix_y).o \ 115 piggy.$(suffix_y) piggy.$(suffix_y).o \
115 lib1funcs.o lib1funcs.S font.o font.c head.o misc.o $(OBJS) 116 lib1funcs.o lib1funcs.S ashldi3.o ashldi3.S \
117 font.o font.c head.o misc.o $(OBJS)
116 118
117# Make sure files are removed during clean 119# Make sure files are removed during clean
118extra-y += piggy.gzip piggy.lzo piggy.lzma lib1funcs.S $(libfdt) $(libfdt_hdrs) 120extra-y += piggy.gzip piggy.lzo piggy.lzma piggy.xzkern \
121 lib1funcs.S ashldi3.S $(libfdt) $(libfdt_hdrs)
119 122
120ifeq ($(CONFIG_FUNCTION_TRACER),y) 123ifeq ($(CONFIG_FUNCTION_TRACER),y)
121ORIG_CFLAGS := $(KBUILD_CFLAGS) 124ORIG_CFLAGS := $(KBUILD_CFLAGS)
@@ -151,6 +154,12 @@ lib1funcs = $(obj)/lib1funcs.o
151$(obj)/lib1funcs.S: $(srctree)/arch/$(SRCARCH)/lib/lib1funcs.S 154$(obj)/lib1funcs.S: $(srctree)/arch/$(SRCARCH)/lib/lib1funcs.S
152 $(call cmd,shipped) 155 $(call cmd,shipped)
153 156
157# For __aeabi_llsl
158ashldi3 = $(obj)/ashldi3.o
159
160$(obj)/ashldi3.S: $(srctree)/arch/$(SRCARCH)/lib/ashldi3.S
161 $(call cmd,shipped)
162
154# We need to prevent any GOTOFF relocs being used with references 163# We need to prevent any GOTOFF relocs being used with references
155# to symbols in the .bss section since we cannot relocate them 164# to symbols in the .bss section since we cannot relocate them
156# independently from the rest at run time. This can be achieved by 165# independently from the rest at run time. This can be achieved by
@@ -172,7 +181,7 @@ if [ $(words $(ZRELADDR)) -gt 1 -a "$(CONFIG_AUTO_ZRELADDR)" = "" ]; then \
172fi 181fi
173 182
174$(obj)/vmlinux: $(obj)/vmlinux.lds $(obj)/$(HEAD) $(obj)/piggy.$(suffix_y).o \ 183$(obj)/vmlinux: $(obj)/vmlinux.lds $(obj)/$(HEAD) $(obj)/piggy.$(suffix_y).o \
175 $(addprefix $(obj)/, $(OBJS)) $(lib1funcs) FORCE 184 $(addprefix $(obj)/, $(OBJS)) $(lib1funcs) $(ashldi3) FORCE
176 @$(check_for_multiple_zreladdr) 185 @$(check_for_multiple_zreladdr)
177 $(call if_changed,ld) 186 $(call if_changed,ld)
178 @$(check_for_bad_syms) 187 @$(check_for_bad_syms)
diff --git a/arch/arm/boot/compressed/decompress.c b/arch/arm/boot/compressed/decompress.c
index 07be5a2f8302..f41b38cafce8 100644
--- a/arch/arm/boot/compressed/decompress.c
+++ b/arch/arm/boot/compressed/decompress.c
@@ -44,6 +44,12 @@ extern void error(char *);
44#include "../../../../lib/decompress_unlzma.c" 44#include "../../../../lib/decompress_unlzma.c"
45#endif 45#endif
46 46
47#ifdef CONFIG_KERNEL_XZ
48#define memmove memmove
49#define memcpy memcpy
50#include "../../../../lib/decompress_unxz.c"
51#endif
52
47int do_decompress(u8 *input, int len, u8 *output, void (*error)(char *x)) 53int do_decompress(u8 *input, int len, u8 *output, void (*error)(char *x))
48{ 54{
49 return decompress(input, len, NULL, NULL, output, NULL, error); 55 return decompress(input, len, NULL, NULL, output, NULL, error);
diff --git a/arch/arm/boot/compressed/piggy.xzkern.S b/arch/arm/boot/compressed/piggy.xzkern.S
new file mode 100644
index 000000000000..5703f300d027
--- /dev/null
+++ b/arch/arm/boot/compressed/piggy.xzkern.S
@@ -0,0 +1,6 @@
1 .section .piggydata,#alloc
2 .globl input_data
3input_data:
4 .incbin "arch/arm/boot/compressed/piggy.xzkern"
5 .globl input_data_end
6input_data_end:
diff --git a/arch/arm/common/Kconfig b/arch/arm/common/Kconfig
index 3bb1d7589bd9..283fa1d804f4 100644
--- a/arch/arm/common/Kconfig
+++ b/arch/arm/common/Kconfig
@@ -24,9 +24,6 @@ config ARM_VIC_NR
24config ICST 24config ICST
25 bool 25 bool
26 26
27config PL330
28 bool
29
30config SA1111 27config SA1111
31 bool 28 bool
32 select DMABOUNCE if !ARCH_PXA 29 select DMABOUNCE if !ARCH_PXA
diff --git a/arch/arm/common/Makefile b/arch/arm/common/Makefile
index 69feafe7286c..215816f1775f 100644
--- a/arch/arm/common/Makefile
+++ b/arch/arm/common/Makefile
@@ -5,7 +5,6 @@
5obj-$(CONFIG_ARM_GIC) += gic.o 5obj-$(CONFIG_ARM_GIC) += gic.o
6obj-$(CONFIG_ARM_VIC) += vic.o 6obj-$(CONFIG_ARM_VIC) += vic.o
7obj-$(CONFIG_ICST) += icst.o 7obj-$(CONFIG_ICST) += icst.o
8obj-$(CONFIG_PL330) += pl330.o
9obj-$(CONFIG_SA1111) += sa1111.o 8obj-$(CONFIG_SA1111) += sa1111.o
10obj-$(CONFIG_PCI_HOST_VIA82C505) += via82c505.o 9obj-$(CONFIG_PCI_HOST_VIA82C505) += via82c505.o
11obj-$(CONFIG_DMABOUNCE) += dmabounce.o 10obj-$(CONFIG_DMABOUNCE) += dmabounce.o
diff --git a/arch/arm/common/gic.c b/arch/arm/common/gic.c
index f0783be17352..aa5269984187 100644
--- a/arch/arm/common/gic.c
+++ b/arch/arm/common/gic.c
@@ -686,13 +686,12 @@ void __init gic_init_bases(unsigned int gic_nr, int irq_start,
686 * For primary GICs, skip over SGIs. 686 * For primary GICs, skip over SGIs.
687 * For secondary GICs, skip over PPIs, too. 687 * For secondary GICs, skip over PPIs, too.
688 */ 688 */
689 hwirq_base = 32; 689 if (gic_nr == 0 && (irq_start & 31) > 0) {
690 if (gic_nr == 0) { 690 hwirq_base = 16;
691 if ((irq_start & 31) > 0) { 691 if (irq_start != -1)
692 hwirq_base = 16; 692 irq_start = (irq_start & ~31) + 16;
693 if (irq_start != -1) 693 } else {
694 irq_start = (irq_start & ~31) + 16; 694 hwirq_base = 32;
695 }
696 } 695 }
697 696
698 /* 697 /*
diff --git a/arch/arm/common/pl330.c b/arch/arm/common/pl330.c
deleted file mode 100644
index ff3ad2244824..000000000000
--- a/arch/arm/common/pl330.c
+++ /dev/null
@@ -1,1960 +0,0 @@
1/* linux/arch/arm/common/pl330.c
2 *
3 * Copyright (C) 2010 Samsung Electronics Co Ltd.
4 * Jaswinder Singh <jassi.brar@samsung.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19 */
20
21#include <linux/kernel.h>
22#include <linux/init.h>
23#include <linux/slab.h>
24#include <linux/module.h>
25#include <linux/string.h>
26#include <linux/io.h>
27#include <linux/delay.h>
28#include <linux/interrupt.h>
29#include <linux/dma-mapping.h>
30
31#include <asm/hardware/pl330.h>
32
33/* Register and Bit field Definitions */
34#define DS 0x0
35#define DS_ST_STOP 0x0
36#define DS_ST_EXEC 0x1
37#define DS_ST_CMISS 0x2
38#define DS_ST_UPDTPC 0x3
39#define DS_ST_WFE 0x4
40#define DS_ST_ATBRR 0x5
41#define DS_ST_QBUSY 0x6
42#define DS_ST_WFP 0x7
43#define DS_ST_KILL 0x8
44#define DS_ST_CMPLT 0x9
45#define DS_ST_FLTCMP 0xe
46#define DS_ST_FAULT 0xf
47
48#define DPC 0x4
49#define INTEN 0x20
50#define ES 0x24
51#define INTSTATUS 0x28
52#define INTCLR 0x2c
53#define FSM 0x30
54#define FSC 0x34
55#define FTM 0x38
56
57#define _FTC 0x40
58#define FTC(n) (_FTC + (n)*0x4)
59
60#define _CS 0x100
61#define CS(n) (_CS + (n)*0x8)
62#define CS_CNS (1 << 21)
63
64#define _CPC 0x104
65#define CPC(n) (_CPC + (n)*0x8)
66
67#define _SA 0x400
68#define SA(n) (_SA + (n)*0x20)
69
70#define _DA 0x404
71#define DA(n) (_DA + (n)*0x20)
72
73#define _CC 0x408
74#define CC(n) (_CC + (n)*0x20)
75
76#define CC_SRCINC (1 << 0)
77#define CC_DSTINC (1 << 14)
78#define CC_SRCPRI (1 << 8)
79#define CC_DSTPRI (1 << 22)
80#define CC_SRCNS (1 << 9)
81#define CC_DSTNS (1 << 23)
82#define CC_SRCIA (1 << 10)
83#define CC_DSTIA (1 << 24)
84#define CC_SRCBRSTLEN_SHFT 4
85#define CC_DSTBRSTLEN_SHFT 18
86#define CC_SRCBRSTSIZE_SHFT 1
87#define CC_DSTBRSTSIZE_SHFT 15
88#define CC_SRCCCTRL_SHFT 11
89#define CC_SRCCCTRL_MASK 0x7
90#define CC_DSTCCTRL_SHFT 25
91#define CC_DRCCCTRL_MASK 0x7
92#define CC_SWAP_SHFT 28
93
94#define _LC0 0x40c
95#define LC0(n) (_LC0 + (n)*0x20)
96
97#define _LC1 0x410
98#define LC1(n) (_LC1 + (n)*0x20)
99
100#define DBGSTATUS 0xd00
101#define DBG_BUSY (1 << 0)
102
103#define DBGCMD 0xd04
104#define DBGINST0 0xd08
105#define DBGINST1 0xd0c
106
107#define CR0 0xe00
108#define CR1 0xe04
109#define CR2 0xe08
110#define CR3 0xe0c
111#define CR4 0xe10
112#define CRD 0xe14
113
114#define PERIPH_ID 0xfe0
115#define PCELL_ID 0xff0
116
117#define CR0_PERIPH_REQ_SET (1 << 0)
118#define CR0_BOOT_EN_SET (1 << 1)
119#define CR0_BOOT_MAN_NS (1 << 2)
120#define CR0_NUM_CHANS_SHIFT 4
121#define CR0_NUM_CHANS_MASK 0x7
122#define CR0_NUM_PERIPH_SHIFT 12
123#define CR0_NUM_PERIPH_MASK 0x1f
124#define CR0_NUM_EVENTS_SHIFT 17
125#define CR0_NUM_EVENTS_MASK 0x1f
126
127#define CR1_ICACHE_LEN_SHIFT 0
128#define CR1_ICACHE_LEN_MASK 0x7
129#define CR1_NUM_ICACHELINES_SHIFT 4
130#define CR1_NUM_ICACHELINES_MASK 0xf
131
132#define CRD_DATA_WIDTH_SHIFT 0
133#define CRD_DATA_WIDTH_MASK 0x7
134#define CRD_WR_CAP_SHIFT 4
135#define CRD_WR_CAP_MASK 0x7
136#define CRD_WR_Q_DEP_SHIFT 8
137#define CRD_WR_Q_DEP_MASK 0xf
138#define CRD_RD_CAP_SHIFT 12
139#define CRD_RD_CAP_MASK 0x7
140#define CRD_RD_Q_DEP_SHIFT 16
141#define CRD_RD_Q_DEP_MASK 0xf
142#define CRD_DATA_BUFF_SHIFT 20
143#define CRD_DATA_BUFF_MASK 0x3ff
144
145#define PART 0x330
146#define DESIGNER 0x41
147#define REVISION 0x0
148#define INTEG_CFG 0x0
149#define PERIPH_ID_VAL ((PART << 0) | (DESIGNER << 12))
150
151#define PCELL_ID_VAL 0xb105f00d
152
153#define PL330_STATE_STOPPED (1 << 0)
154#define PL330_STATE_EXECUTING (1 << 1)
155#define PL330_STATE_WFE (1 << 2)
156#define PL330_STATE_FAULTING (1 << 3)
157#define PL330_STATE_COMPLETING (1 << 4)
158#define PL330_STATE_WFP (1 << 5)
159#define PL330_STATE_KILLING (1 << 6)
160#define PL330_STATE_FAULT_COMPLETING (1 << 7)
161#define PL330_STATE_CACHEMISS (1 << 8)
162#define PL330_STATE_UPDTPC (1 << 9)
163#define PL330_STATE_ATBARRIER (1 << 10)
164#define PL330_STATE_QUEUEBUSY (1 << 11)
165#define PL330_STATE_INVALID (1 << 15)
166
167#define PL330_STABLE_STATES (PL330_STATE_STOPPED | PL330_STATE_EXECUTING \
168 | PL330_STATE_WFE | PL330_STATE_FAULTING)
169
170#define CMD_DMAADDH 0x54
171#define CMD_DMAEND 0x00
172#define CMD_DMAFLUSHP 0x35
173#define CMD_DMAGO 0xa0
174#define CMD_DMALD 0x04
175#define CMD_DMALDP 0x25
176#define CMD_DMALP 0x20
177#define CMD_DMALPEND 0x28
178#define CMD_DMAKILL 0x01
179#define CMD_DMAMOV 0xbc
180#define CMD_DMANOP 0x18
181#define CMD_DMARMB 0x12
182#define CMD_DMASEV 0x34
183#define CMD_DMAST 0x08
184#define CMD_DMASTP 0x29
185#define CMD_DMASTZ 0x0c
186#define CMD_DMAWFE 0x36
187#define CMD_DMAWFP 0x30
188#define CMD_DMAWMB 0x13
189
190#define SZ_DMAADDH 3
191#define SZ_DMAEND 1
192#define SZ_DMAFLUSHP 2
193#define SZ_DMALD 1
194#define SZ_DMALDP 2
195#define SZ_DMALP 2
196#define SZ_DMALPEND 2
197#define SZ_DMAKILL 1
198#define SZ_DMAMOV 6
199#define SZ_DMANOP 1
200#define SZ_DMARMB 1
201#define SZ_DMASEV 2
202#define SZ_DMAST 1
203#define SZ_DMASTP 2
204#define SZ_DMASTZ 1
205#define SZ_DMAWFE 2
206#define SZ_DMAWFP 2
207#define SZ_DMAWMB 1
208#define SZ_DMAGO 6
209
210#define BRST_LEN(ccr) ((((ccr) >> CC_SRCBRSTLEN_SHFT) & 0xf) + 1)
211#define BRST_SIZE(ccr) (1 << (((ccr) >> CC_SRCBRSTSIZE_SHFT) & 0x7))
212
213#define BYTE_TO_BURST(b, ccr) ((b) / BRST_SIZE(ccr) / BRST_LEN(ccr))
214#define BURST_TO_BYTE(c, ccr) ((c) * BRST_SIZE(ccr) * BRST_LEN(ccr))
215
216/*
217 * With 256 bytes, we can do more than 2.5MB and 5MB xfers per req
218 * at 1byte/burst for P<->M and M<->M respectively.
219 * For typical scenario, at 1word/burst, 10MB and 20MB xfers per req
220 * should be enough for P<->M and M<->M respectively.
221 */
222#define MCODE_BUFF_PER_REQ 256
223
224/* If the _pl330_req is available to the client */
225#define IS_FREE(req) (*((u8 *)((req)->mc_cpu)) == CMD_DMAEND)
226
227/* Use this _only_ to wait on transient states */
228#define UNTIL(t, s) while (!(_state(t) & (s))) cpu_relax();
229
230#ifdef PL330_DEBUG_MCGEN
231static unsigned cmd_line;
232#define PL330_DBGCMD_DUMP(off, x...) do { \
233 printk("%x:", cmd_line); \
234 printk(x); \
235 cmd_line += off; \
236 } while (0)
237#define PL330_DBGMC_START(addr) (cmd_line = addr)
238#else
239#define PL330_DBGCMD_DUMP(off, x...) do {} while (0)
240#define PL330_DBGMC_START(addr) do {} while (0)
241#endif
242
243struct _xfer_spec {
244 u32 ccr;
245 struct pl330_req *r;
246 struct pl330_xfer *x;
247};
248
249enum dmamov_dst {
250 SAR = 0,
251 CCR,
252 DAR,
253};
254
255enum pl330_dst {
256 SRC = 0,
257 DST,
258};
259
260enum pl330_cond {
261 SINGLE,
262 BURST,
263 ALWAYS,
264};
265
266struct _pl330_req {
267 u32 mc_bus;
268 void *mc_cpu;
269 /* Number of bytes taken to setup MC for the req */
270 u32 mc_len;
271 struct pl330_req *r;
272 /* Hook to attach to DMAC's list of reqs with due callback */
273 struct list_head rqd;
274};
275
276/* ToBeDone for tasklet */
277struct _pl330_tbd {
278 bool reset_dmac;
279 bool reset_mngr;
280 u8 reset_chan;
281};
282
283/* A DMAC Thread */
284struct pl330_thread {
285 u8 id;
286 int ev;
287 /* If the channel is not yet acquired by any client */
288 bool free;
289 /* Parent DMAC */
290 struct pl330_dmac *dmac;
291 /* Only two at a time */
292 struct _pl330_req req[2];
293 /* Index of the last enqueued request */
294 unsigned lstenq;
295 /* Index of the last submitted request or -1 if the DMA is stopped */
296 int req_running;
297};
298
299enum pl330_dmac_state {
300 UNINIT,
301 INIT,
302 DYING,
303};
304
305/* A DMAC */
306struct pl330_dmac {
307 spinlock_t lock;
308 /* Holds list of reqs with due callbacks */
309 struct list_head req_done;
310 /* Pointer to platform specific stuff */
311 struct pl330_info *pinfo;
312 /* Maximum possible events/irqs */
313 int events[32];
314 /* BUS address of MicroCode buffer */
315 u32 mcode_bus;
316 /* CPU address of MicroCode buffer */
317 void *mcode_cpu;
318 /* List of all Channel threads */
319 struct pl330_thread *channels;
320 /* Pointer to the MANAGER thread */
321 struct pl330_thread *manager;
322 /* To handle bad news in interrupt */
323 struct tasklet_struct tasks;
324 struct _pl330_tbd dmac_tbd;
325 /* State of DMAC operation */
326 enum pl330_dmac_state state;
327};
328
329static inline void _callback(struct pl330_req *r, enum pl330_op_err err)
330{
331 if (r && r->xfer_cb)
332 r->xfer_cb(r->token, err);
333}
334
335static inline bool _queue_empty(struct pl330_thread *thrd)
336{
337 return (IS_FREE(&thrd->req[0]) && IS_FREE(&thrd->req[1]))
338 ? true : false;
339}
340
341static inline bool _queue_full(struct pl330_thread *thrd)
342{
343 return (IS_FREE(&thrd->req[0]) || IS_FREE(&thrd->req[1]))
344 ? false : true;
345}
346
347static inline bool is_manager(struct pl330_thread *thrd)
348{
349 struct pl330_dmac *pl330 = thrd->dmac;
350
351 /* MANAGER is indexed at the end */
352 if (thrd->id == pl330->pinfo->pcfg.num_chan)
353 return true;
354 else
355 return false;
356}
357
358/* If manager of the thread is in Non-Secure mode */
359static inline bool _manager_ns(struct pl330_thread *thrd)
360{
361 struct pl330_dmac *pl330 = thrd->dmac;
362
363 return (pl330->pinfo->pcfg.mode & DMAC_MODE_NS) ? true : false;
364}
365
366static inline u32 get_id(struct pl330_info *pi, u32 off)
367{
368 void __iomem *regs = pi->base;
369 u32 id = 0;
370
371 id |= (readb(regs + off + 0x0) << 0);
372 id |= (readb(regs + off + 0x4) << 8);
373 id |= (readb(regs + off + 0x8) << 16);
374 id |= (readb(regs + off + 0xc) << 24);
375
376 return id;
377}
378
379static inline u32 _emit_ADDH(unsigned dry_run, u8 buf[],
380 enum pl330_dst da, u16 val)
381{
382 if (dry_run)
383 return SZ_DMAADDH;
384
385 buf[0] = CMD_DMAADDH;
386 buf[0] |= (da << 1);
387 *((u16 *)&buf[1]) = val;
388
389 PL330_DBGCMD_DUMP(SZ_DMAADDH, "\tDMAADDH %s %u\n",
390 da == 1 ? "DA" : "SA", val);
391
392 return SZ_DMAADDH;
393}
394
395static inline u32 _emit_END(unsigned dry_run, u8 buf[])
396{
397 if (dry_run)
398 return SZ_DMAEND;
399
400 buf[0] = CMD_DMAEND;
401
402 PL330_DBGCMD_DUMP(SZ_DMAEND, "\tDMAEND\n");
403
404 return SZ_DMAEND;
405}
406
407static inline u32 _emit_FLUSHP(unsigned dry_run, u8 buf[], u8 peri)
408{
409 if (dry_run)
410 return SZ_DMAFLUSHP;
411
412 buf[0] = CMD_DMAFLUSHP;
413
414 peri &= 0x1f;
415 peri <<= 3;
416 buf[1] = peri;
417
418 PL330_DBGCMD_DUMP(SZ_DMAFLUSHP, "\tDMAFLUSHP %u\n", peri >> 3);
419
420 return SZ_DMAFLUSHP;
421}
422
423static inline u32 _emit_LD(unsigned dry_run, u8 buf[], enum pl330_cond cond)
424{
425 if (dry_run)
426 return SZ_DMALD;
427
428 buf[0] = CMD_DMALD;
429
430 if (cond == SINGLE)
431 buf[0] |= (0 << 1) | (1 << 0);
432 else if (cond == BURST)
433 buf[0] |= (1 << 1) | (1 << 0);
434
435 PL330_DBGCMD_DUMP(SZ_DMALD, "\tDMALD%c\n",
436 cond == SINGLE ? 'S' : (cond == BURST ? 'B' : 'A'));
437
438 return SZ_DMALD;
439}
440
441static inline u32 _emit_LDP(unsigned dry_run, u8 buf[],
442 enum pl330_cond cond, u8 peri)
443{
444 if (dry_run)
445 return SZ_DMALDP;
446
447 buf[0] = CMD_DMALDP;
448
449 if (cond == BURST)
450 buf[0] |= (1 << 1);
451
452 peri &= 0x1f;
453 peri <<= 3;
454 buf[1] = peri;
455
456 PL330_DBGCMD_DUMP(SZ_DMALDP, "\tDMALDP%c %u\n",
457 cond == SINGLE ? 'S' : 'B', peri >> 3);
458
459 return SZ_DMALDP;
460}
461
462static inline u32 _emit_LP(unsigned dry_run, u8 buf[],
463 unsigned loop, u8 cnt)
464{
465 if (dry_run)
466 return SZ_DMALP;
467
468 buf[0] = CMD_DMALP;
469
470 if (loop)
471 buf[0] |= (1 << 1);
472
473 cnt--; /* DMAC increments by 1 internally */
474 buf[1] = cnt;
475
476 PL330_DBGCMD_DUMP(SZ_DMALP, "\tDMALP_%c %u\n", loop ? '1' : '0', cnt);
477
478 return SZ_DMALP;
479}
480
481struct _arg_LPEND {
482 enum pl330_cond cond;
483 bool forever;
484 unsigned loop;
485 u8 bjump;
486};
487
488static inline u32 _emit_LPEND(unsigned dry_run, u8 buf[],
489 const struct _arg_LPEND *arg)
490{
491 enum pl330_cond cond = arg->cond;
492 bool forever = arg->forever;
493 unsigned loop = arg->loop;
494 u8 bjump = arg->bjump;
495
496 if (dry_run)
497 return SZ_DMALPEND;
498
499 buf[0] = CMD_DMALPEND;
500
501 if (loop)
502 buf[0] |= (1 << 2);
503
504 if (!forever)
505 buf[0] |= (1 << 4);
506
507 if (cond == SINGLE)
508 buf[0] |= (0 << 1) | (1 << 0);
509 else if (cond == BURST)
510 buf[0] |= (1 << 1) | (1 << 0);
511
512 buf[1] = bjump;
513
514 PL330_DBGCMD_DUMP(SZ_DMALPEND, "\tDMALP%s%c_%c bjmpto_%x\n",
515 forever ? "FE" : "END",
516 cond == SINGLE ? 'S' : (cond == BURST ? 'B' : 'A'),
517 loop ? '1' : '0',
518 bjump);
519
520 return SZ_DMALPEND;
521}
522
523static inline u32 _emit_KILL(unsigned dry_run, u8 buf[])
524{
525 if (dry_run)
526 return SZ_DMAKILL;
527
528 buf[0] = CMD_DMAKILL;
529
530 return SZ_DMAKILL;
531}
532
533static inline u32 _emit_MOV(unsigned dry_run, u8 buf[],
534 enum dmamov_dst dst, u32 val)
535{
536 if (dry_run)
537 return SZ_DMAMOV;
538
539 buf[0] = CMD_DMAMOV;
540 buf[1] = dst;
541 *((u32 *)&buf[2]) = val;
542
543 PL330_DBGCMD_DUMP(SZ_DMAMOV, "\tDMAMOV %s 0x%x\n",
544 dst == SAR ? "SAR" : (dst == DAR ? "DAR" : "CCR"), val);
545
546 return SZ_DMAMOV;
547}
548
549static inline u32 _emit_NOP(unsigned dry_run, u8 buf[])
550{
551 if (dry_run)
552 return SZ_DMANOP;
553
554 buf[0] = CMD_DMANOP;
555
556 PL330_DBGCMD_DUMP(SZ_DMANOP, "\tDMANOP\n");
557
558 return SZ_DMANOP;
559}
560
561static inline u32 _emit_RMB(unsigned dry_run, u8 buf[])
562{
563 if (dry_run)
564 return SZ_DMARMB;
565
566 buf[0] = CMD_DMARMB;
567
568 PL330_DBGCMD_DUMP(SZ_DMARMB, "\tDMARMB\n");
569
570 return SZ_DMARMB;
571}
572
573static inline u32 _emit_SEV(unsigned dry_run, u8 buf[], u8 ev)
574{
575 if (dry_run)
576 return SZ_DMASEV;
577
578 buf[0] = CMD_DMASEV;
579
580 ev &= 0x1f;
581 ev <<= 3;
582 buf[1] = ev;
583
584 PL330_DBGCMD_DUMP(SZ_DMASEV, "\tDMASEV %u\n", ev >> 3);
585
586 return SZ_DMASEV;
587}
588
589static inline u32 _emit_ST(unsigned dry_run, u8 buf[], enum pl330_cond cond)
590{
591 if (dry_run)
592 return SZ_DMAST;
593
594 buf[0] = CMD_DMAST;
595
596 if (cond == SINGLE)
597 buf[0] |= (0 << 1) | (1 << 0);
598 else if (cond == BURST)
599 buf[0] |= (1 << 1) | (1 << 0);
600
601 PL330_DBGCMD_DUMP(SZ_DMAST, "\tDMAST%c\n",
602 cond == SINGLE ? 'S' : (cond == BURST ? 'B' : 'A'));
603
604 return SZ_DMAST;
605}
606
607static inline u32 _emit_STP(unsigned dry_run, u8 buf[],
608 enum pl330_cond cond, u8 peri)
609{
610 if (dry_run)
611 return SZ_DMASTP;
612
613 buf[0] = CMD_DMASTP;
614
615 if (cond == BURST)
616 buf[0] |= (1 << 1);
617
618 peri &= 0x1f;
619 peri <<= 3;
620 buf[1] = peri;
621
622 PL330_DBGCMD_DUMP(SZ_DMASTP, "\tDMASTP%c %u\n",
623 cond == SINGLE ? 'S' : 'B', peri >> 3);
624
625 return SZ_DMASTP;
626}
627
628static inline u32 _emit_STZ(unsigned dry_run, u8 buf[])
629{
630 if (dry_run)
631 return SZ_DMASTZ;
632
633 buf[0] = CMD_DMASTZ;
634
635 PL330_DBGCMD_DUMP(SZ_DMASTZ, "\tDMASTZ\n");
636
637 return SZ_DMASTZ;
638}
639
640static inline u32 _emit_WFE(unsigned dry_run, u8 buf[], u8 ev,
641 unsigned invalidate)
642{
643 if (dry_run)
644 return SZ_DMAWFE;
645
646 buf[0] = CMD_DMAWFE;
647
648 ev &= 0x1f;
649 ev <<= 3;
650 buf[1] = ev;
651
652 if (invalidate)
653 buf[1] |= (1 << 1);
654
655 PL330_DBGCMD_DUMP(SZ_DMAWFE, "\tDMAWFE %u%s\n",
656 ev >> 3, invalidate ? ", I" : "");
657
658 return SZ_DMAWFE;
659}
660
661static inline u32 _emit_WFP(unsigned dry_run, u8 buf[],
662 enum pl330_cond cond, u8 peri)
663{
664 if (dry_run)
665 return SZ_DMAWFP;
666
667 buf[0] = CMD_DMAWFP;
668
669 if (cond == SINGLE)
670 buf[0] |= (0 << 1) | (0 << 0);
671 else if (cond == BURST)
672 buf[0] |= (1 << 1) | (0 << 0);
673 else
674 buf[0] |= (0 << 1) | (1 << 0);
675
676 peri &= 0x1f;
677 peri <<= 3;
678 buf[1] = peri;
679
680 PL330_DBGCMD_DUMP(SZ_DMAWFP, "\tDMAWFP%c %u\n",
681 cond == SINGLE ? 'S' : (cond == BURST ? 'B' : 'P'), peri >> 3);
682
683 return SZ_DMAWFP;
684}
685
686static inline u32 _emit_WMB(unsigned dry_run, u8 buf[])
687{
688 if (dry_run)
689 return SZ_DMAWMB;
690
691 buf[0] = CMD_DMAWMB;
692
693 PL330_DBGCMD_DUMP(SZ_DMAWMB, "\tDMAWMB\n");
694
695 return SZ_DMAWMB;
696}
697
698struct _arg_GO {
699 u8 chan;
700 u32 addr;
701 unsigned ns;
702};
703
704static inline u32 _emit_GO(unsigned dry_run, u8 buf[],
705 const struct _arg_GO *arg)
706{
707 u8 chan = arg->chan;
708 u32 addr = arg->addr;
709 unsigned ns = arg->ns;
710
711 if (dry_run)
712 return SZ_DMAGO;
713
714 buf[0] = CMD_DMAGO;
715 buf[0] |= (ns << 1);
716
717 buf[1] = chan & 0x7;
718
719 *((u32 *)&buf[2]) = addr;
720
721 return SZ_DMAGO;
722}
723
724#define msecs_to_loops(t) (loops_per_jiffy / 1000 * HZ * t)
725
726/* Returns Time-Out */
727static bool _until_dmac_idle(struct pl330_thread *thrd)
728{
729 void __iomem *regs = thrd->dmac->pinfo->base;
730 unsigned long loops = msecs_to_loops(5);
731
732 do {
733 /* Until Manager is Idle */
734 if (!(readl(regs + DBGSTATUS) & DBG_BUSY))
735 break;
736
737 cpu_relax();
738 } while (--loops);
739
740 if (!loops)
741 return true;
742
743 return false;
744}
745
746static inline void _execute_DBGINSN(struct pl330_thread *thrd,
747 u8 insn[], bool as_manager)
748{
749 void __iomem *regs = thrd->dmac->pinfo->base;
750 u32 val;
751
752 val = (insn[0] << 16) | (insn[1] << 24);
753 if (!as_manager) {
754 val |= (1 << 0);
755 val |= (thrd->id << 8); /* Channel Number */
756 }
757 writel(val, regs + DBGINST0);
758
759 val = *((u32 *)&insn[2]);
760 writel(val, regs + DBGINST1);
761
762 /* If timed out due to halted state-machine */
763 if (_until_dmac_idle(thrd)) {
764 dev_err(thrd->dmac->pinfo->dev, "DMAC halted!\n");
765 return;
766 }
767
768 /* Get going */
769 writel(0, regs + DBGCMD);
770}
771
772/*
773 * Mark a _pl330_req as free.
774 * We do it by writing DMAEND as the first instruction
775 * because no valid request is going to have DMAEND as
776 * its first instruction to execute.
777 */
778static void mark_free(struct pl330_thread *thrd, int idx)
779{
780 struct _pl330_req *req = &thrd->req[idx];
781
782 _emit_END(0, req->mc_cpu);
783 req->mc_len = 0;
784
785 thrd->req_running = -1;
786}
787
788static inline u32 _state(struct pl330_thread *thrd)
789{
790 void __iomem *regs = thrd->dmac->pinfo->base;
791 u32 val;
792
793 if (is_manager(thrd))
794 val = readl(regs + DS) & 0xf;
795 else
796 val = readl(regs + CS(thrd->id)) & 0xf;
797
798 switch (val) {
799 case DS_ST_STOP:
800 return PL330_STATE_STOPPED;
801 case DS_ST_EXEC:
802 return PL330_STATE_EXECUTING;
803 case DS_ST_CMISS:
804 return PL330_STATE_CACHEMISS;
805 case DS_ST_UPDTPC:
806 return PL330_STATE_UPDTPC;
807 case DS_ST_WFE:
808 return PL330_STATE_WFE;
809 case DS_ST_FAULT:
810 return PL330_STATE_FAULTING;
811 case DS_ST_ATBRR:
812 if (is_manager(thrd))
813 return PL330_STATE_INVALID;
814 else
815 return PL330_STATE_ATBARRIER;
816 case DS_ST_QBUSY:
817 if (is_manager(thrd))
818 return PL330_STATE_INVALID;
819 else
820 return PL330_STATE_QUEUEBUSY;
821 case DS_ST_WFP:
822 if (is_manager(thrd))
823 return PL330_STATE_INVALID;
824 else
825 return PL330_STATE_WFP;
826 case DS_ST_KILL:
827 if (is_manager(thrd))
828 return PL330_STATE_INVALID;
829 else
830 return PL330_STATE_KILLING;
831 case DS_ST_CMPLT:
832 if (is_manager(thrd))
833 return PL330_STATE_INVALID;
834 else
835 return PL330_STATE_COMPLETING;
836 case DS_ST_FLTCMP:
837 if (is_manager(thrd))
838 return PL330_STATE_INVALID;
839 else
840 return PL330_STATE_FAULT_COMPLETING;
841 default:
842 return PL330_STATE_INVALID;
843 }
844}
845
846static void _stop(struct pl330_thread *thrd)
847{
848 void __iomem *regs = thrd->dmac->pinfo->base;
849 u8 insn[6] = {0, 0, 0, 0, 0, 0};
850
851 if (_state(thrd) == PL330_STATE_FAULT_COMPLETING)
852 UNTIL(thrd, PL330_STATE_FAULTING | PL330_STATE_KILLING);
853
854 /* Return if nothing needs to be done */
855 if (_state(thrd) == PL330_STATE_COMPLETING
856 || _state(thrd) == PL330_STATE_KILLING
857 || _state(thrd) == PL330_STATE_STOPPED)
858 return;
859
860 _emit_KILL(0, insn);
861
862 /* Stop generating interrupts for SEV */
863 writel(readl(regs + INTEN) & ~(1 << thrd->ev), regs + INTEN);
864
865 _execute_DBGINSN(thrd, insn, is_manager(thrd));
866}
867
868/* Start doing req 'idx' of thread 'thrd' */
869static bool _trigger(struct pl330_thread *thrd)
870{
871 void __iomem *regs = thrd->dmac->pinfo->base;
872 struct _pl330_req *req;
873 struct pl330_req *r;
874 struct _arg_GO go;
875 unsigned ns;
876 u8 insn[6] = {0, 0, 0, 0, 0, 0};
877 int idx;
878
879 /* Return if already ACTIVE */
880 if (_state(thrd) != PL330_STATE_STOPPED)
881 return true;
882
883 idx = 1 - thrd->lstenq;
884 if (!IS_FREE(&thrd->req[idx]))
885 req = &thrd->req[idx];
886 else {
887 idx = thrd->lstenq;
888 if (!IS_FREE(&thrd->req[idx]))
889 req = &thrd->req[idx];
890 else
891 req = NULL;
892 }
893
894 /* Return if no request */
895 if (!req || !req->r)
896 return true;
897
898 r = req->r;
899
900 if (r->cfg)
901 ns = r->cfg->nonsecure ? 1 : 0;
902 else if (readl(regs + CS(thrd->id)) & CS_CNS)
903 ns = 1;
904 else
905 ns = 0;
906
907 /* See 'Abort Sources' point-4 at Page 2-25 */
908 if (_manager_ns(thrd) && !ns)
909 dev_info(thrd->dmac->pinfo->dev, "%s:%d Recipe for ABORT!\n",
910 __func__, __LINE__);
911
912 go.chan = thrd->id;
913 go.addr = req->mc_bus;
914 go.ns = ns;
915 _emit_GO(0, insn, &go);
916
917 /* Set to generate interrupts for SEV */
918 writel(readl(regs + INTEN) | (1 << thrd->ev), regs + INTEN);
919
920 /* Only manager can execute GO */
921 _execute_DBGINSN(thrd, insn, true);
922
923 thrd->req_running = idx;
924
925 return true;
926}
927
928static bool _start(struct pl330_thread *thrd)
929{
930 switch (_state(thrd)) {
931 case PL330_STATE_FAULT_COMPLETING:
932 UNTIL(thrd, PL330_STATE_FAULTING | PL330_STATE_KILLING);
933
934 if (_state(thrd) == PL330_STATE_KILLING)
935 UNTIL(thrd, PL330_STATE_STOPPED)
936
937 case PL330_STATE_FAULTING:
938 _stop(thrd);
939
940 case PL330_STATE_KILLING:
941 case PL330_STATE_COMPLETING:
942 UNTIL(thrd, PL330_STATE_STOPPED)
943
944 case PL330_STATE_STOPPED:
945 return _trigger(thrd);
946
947 case PL330_STATE_WFP:
948 case PL330_STATE_QUEUEBUSY:
949 case PL330_STATE_ATBARRIER:
950 case PL330_STATE_UPDTPC:
951 case PL330_STATE_CACHEMISS:
952 case PL330_STATE_EXECUTING:
953 return true;
954
955 case PL330_STATE_WFE: /* For RESUME, nothing yet */
956 default:
957 return false;
958 }
959}
960
961static inline int _ldst_memtomem(unsigned dry_run, u8 buf[],
962 const struct _xfer_spec *pxs, int cyc)
963{
964 int off = 0;
965
966 while (cyc--) {
967 off += _emit_LD(dry_run, &buf[off], ALWAYS);
968 off += _emit_RMB(dry_run, &buf[off]);
969 off += _emit_ST(dry_run, &buf[off], ALWAYS);
970 off += _emit_WMB(dry_run, &buf[off]);
971 }
972
973 return off;
974}
975
976static inline int _ldst_devtomem(unsigned dry_run, u8 buf[],
977 const struct _xfer_spec *pxs, int cyc)
978{
979 int off = 0;
980
981 while (cyc--) {
982 off += _emit_WFP(dry_run, &buf[off], SINGLE, pxs->r->peri);
983 off += _emit_LDP(dry_run, &buf[off], SINGLE, pxs->r->peri);
984 off += _emit_ST(dry_run, &buf[off], ALWAYS);
985 off += _emit_FLUSHP(dry_run, &buf[off], pxs->r->peri);
986 }
987
988 return off;
989}
990
991static inline int _ldst_memtodev(unsigned dry_run, u8 buf[],
992 const struct _xfer_spec *pxs, int cyc)
993{
994 int off = 0;
995
996 while (cyc--) {
997 off += _emit_WFP(dry_run, &buf[off], SINGLE, pxs->r->peri);
998 off += _emit_LD(dry_run, &buf[off], ALWAYS);
999 off += _emit_STP(dry_run, &buf[off], SINGLE, pxs->r->peri);
1000 off += _emit_FLUSHP(dry_run, &buf[off], pxs->r->peri);
1001 }
1002
1003 return off;
1004}
1005
1006static int _bursts(unsigned dry_run, u8 buf[],
1007 const struct _xfer_spec *pxs, int cyc)
1008{
1009 int off = 0;
1010
1011 switch (pxs->r->rqtype) {
1012 case MEMTODEV:
1013 off += _ldst_memtodev(dry_run, &buf[off], pxs, cyc);
1014 break;
1015 case DEVTOMEM:
1016 off += _ldst_devtomem(dry_run, &buf[off], pxs, cyc);
1017 break;
1018 case MEMTOMEM:
1019 off += _ldst_memtomem(dry_run, &buf[off], pxs, cyc);
1020 break;
1021 default:
1022 off += 0x40000000; /* Scare off the Client */
1023 break;
1024 }
1025
1026 return off;
1027}
1028
1029/* Returns bytes consumed and updates bursts */
1030static inline int _loop(unsigned dry_run, u8 buf[],
1031 unsigned long *bursts, const struct _xfer_spec *pxs)
1032{
1033 int cyc, cycmax, szlp, szlpend, szbrst, off;
1034 unsigned lcnt0, lcnt1, ljmp0, ljmp1;
1035 struct _arg_LPEND lpend;
1036
1037 /* Max iterations possible in DMALP is 256 */
1038 if (*bursts >= 256*256) {
1039 lcnt1 = 256;
1040 lcnt0 = 256;
1041 cyc = *bursts / lcnt1 / lcnt0;
1042 } else if (*bursts > 256) {
1043 lcnt1 = 256;
1044 lcnt0 = *bursts / lcnt1;
1045 cyc = 1;
1046 } else {
1047 lcnt1 = *bursts;
1048 lcnt0 = 0;
1049 cyc = 1;
1050 }
1051
1052 szlp = _emit_LP(1, buf, 0, 0);
1053 szbrst = _bursts(1, buf, pxs, 1);
1054
1055 lpend.cond = ALWAYS;
1056 lpend.forever = false;
1057 lpend.loop = 0;
1058 lpend.bjump = 0;
1059 szlpend = _emit_LPEND(1, buf, &lpend);
1060
1061 if (lcnt0) {
1062 szlp *= 2;
1063 szlpend *= 2;
1064 }
1065
1066 /*
1067 * Max bursts that we can unroll due to limit on the
1068 * size of backward jump that can be encoded in DMALPEND
1069 * which is 8-bits and hence 255
1070 */
1071 cycmax = (255 - (szlp + szlpend)) / szbrst;
1072
1073 cyc = (cycmax < cyc) ? cycmax : cyc;
1074
1075 off = 0;
1076
1077 if (lcnt0) {
1078 off += _emit_LP(dry_run, &buf[off], 0, lcnt0);
1079 ljmp0 = off;
1080 }
1081
1082 off += _emit_LP(dry_run, &buf[off], 1, lcnt1);
1083 ljmp1 = off;
1084
1085 off += _bursts(dry_run, &buf[off], pxs, cyc);
1086
1087 lpend.cond = ALWAYS;
1088 lpend.forever = false;
1089 lpend.loop = 1;
1090 lpend.bjump = off - ljmp1;
1091 off += _emit_LPEND(dry_run, &buf[off], &lpend);
1092
1093 if (lcnt0) {
1094 lpend.cond = ALWAYS;
1095 lpend.forever = false;
1096 lpend.loop = 0;
1097 lpend.bjump = off - ljmp0;
1098 off += _emit_LPEND(dry_run, &buf[off], &lpend);
1099 }
1100
1101 *bursts = lcnt1 * cyc;
1102 if (lcnt0)
1103 *bursts *= lcnt0;
1104
1105 return off;
1106}
1107
1108static inline int _setup_loops(unsigned dry_run, u8 buf[],
1109 const struct _xfer_spec *pxs)
1110{
1111 struct pl330_xfer *x = pxs->x;
1112 u32 ccr = pxs->ccr;
1113 unsigned long c, bursts = BYTE_TO_BURST(x->bytes, ccr);
1114 int off = 0;
1115
1116 while (bursts) {
1117 c = bursts;
1118 off += _loop(dry_run, &buf[off], &c, pxs);
1119 bursts -= c;
1120 }
1121
1122 return off;
1123}
1124
1125static inline int _setup_xfer(unsigned dry_run, u8 buf[],
1126 const struct _xfer_spec *pxs)
1127{
1128 struct pl330_xfer *x = pxs->x;
1129 int off = 0;
1130
1131 /* DMAMOV SAR, x->src_addr */
1132 off += _emit_MOV(dry_run, &buf[off], SAR, x->src_addr);
1133 /* DMAMOV DAR, x->dst_addr */
1134 off += _emit_MOV(dry_run, &buf[off], DAR, x->dst_addr);
1135
1136 /* Setup Loop(s) */
1137 off += _setup_loops(dry_run, &buf[off], pxs);
1138
1139 return off;
1140}
1141
1142/*
1143 * A req is a sequence of one or more xfer units.
1144 * Returns the number of bytes taken to setup the MC for the req.
1145 */
1146static int _setup_req(unsigned dry_run, struct pl330_thread *thrd,
1147 unsigned index, struct _xfer_spec *pxs)
1148{
1149 struct _pl330_req *req = &thrd->req[index];
1150 struct pl330_xfer *x;
1151 u8 *buf = req->mc_cpu;
1152 int off = 0;
1153
1154 PL330_DBGMC_START(req->mc_bus);
1155
1156 /* DMAMOV CCR, ccr */
1157 off += _emit_MOV(dry_run, &buf[off], CCR, pxs->ccr);
1158
1159 x = pxs->r->x;
1160 do {
1161 /* Error if xfer length is not aligned at burst size */
1162 if (x->bytes % (BRST_SIZE(pxs->ccr) * BRST_LEN(pxs->ccr)))
1163 return -EINVAL;
1164
1165 pxs->x = x;
1166 off += _setup_xfer(dry_run, &buf[off], pxs);
1167
1168 x = x->next;
1169 } while (x);
1170
1171 /* DMASEV peripheral/event */
1172 off += _emit_SEV(dry_run, &buf[off], thrd->ev);
1173 /* DMAEND */
1174 off += _emit_END(dry_run, &buf[off]);
1175
1176 return off;
1177}
1178
1179static inline u32 _prepare_ccr(const struct pl330_reqcfg *rqc)
1180{
1181 u32 ccr = 0;
1182
1183 if (rqc->src_inc)
1184 ccr |= CC_SRCINC;
1185
1186 if (rqc->dst_inc)
1187 ccr |= CC_DSTINC;
1188
1189 /* We set same protection levels for Src and DST for now */
1190 if (rqc->privileged)
1191 ccr |= CC_SRCPRI | CC_DSTPRI;
1192 if (rqc->nonsecure)
1193 ccr |= CC_SRCNS | CC_DSTNS;
1194 if (rqc->insnaccess)
1195 ccr |= CC_SRCIA | CC_DSTIA;
1196
1197 ccr |= (((rqc->brst_len - 1) & 0xf) << CC_SRCBRSTLEN_SHFT);
1198 ccr |= (((rqc->brst_len - 1) & 0xf) << CC_DSTBRSTLEN_SHFT);
1199
1200 ccr |= (rqc->brst_size << CC_SRCBRSTSIZE_SHFT);
1201 ccr |= (rqc->brst_size << CC_DSTBRSTSIZE_SHFT);
1202
1203 ccr |= (rqc->scctl << CC_SRCCCTRL_SHFT);
1204 ccr |= (rqc->dcctl << CC_DSTCCTRL_SHFT);
1205
1206 ccr |= (rqc->swap << CC_SWAP_SHFT);
1207
1208 return ccr;
1209}
1210
1211static inline bool _is_valid(u32 ccr)
1212{
1213 enum pl330_dstcachectrl dcctl;
1214 enum pl330_srccachectrl scctl;
1215
1216 dcctl = (ccr >> CC_DSTCCTRL_SHFT) & CC_DRCCCTRL_MASK;
1217 scctl = (ccr >> CC_SRCCCTRL_SHFT) & CC_SRCCCTRL_MASK;
1218
1219 if (dcctl == DINVALID1 || dcctl == DINVALID2
1220 || scctl == SINVALID1 || scctl == SINVALID2)
1221 return false;
1222 else
1223 return true;
1224}
1225
1226/*
1227 * Submit a list of xfers after which the client wants notification.
1228 * Client is not notified after each xfer unit, just once after all
1229 * xfer units are done or some error occurs.
1230 */
1231int pl330_submit_req(void *ch_id, struct pl330_req *r)
1232{
1233 struct pl330_thread *thrd = ch_id;
1234 struct pl330_dmac *pl330;
1235 struct pl330_info *pi;
1236 struct _xfer_spec xs;
1237 unsigned long flags;
1238 void __iomem *regs;
1239 unsigned idx;
1240 u32 ccr;
1241 int ret = 0;
1242
1243 /* No Req or Unacquired Channel or DMAC */
1244 if (!r || !thrd || thrd->free)
1245 return -EINVAL;
1246
1247 pl330 = thrd->dmac;
1248 pi = pl330->pinfo;
1249 regs = pi->base;
1250
1251 if (pl330->state == DYING
1252 || pl330->dmac_tbd.reset_chan & (1 << thrd->id)) {
1253 dev_info(thrd->dmac->pinfo->dev, "%s:%d\n",
1254 __func__, __LINE__);
1255 return -EAGAIN;
1256 }
1257
1258 /* If request for non-existing peripheral */
1259 if (r->rqtype != MEMTOMEM && r->peri >= pi->pcfg.num_peri) {
1260 dev_info(thrd->dmac->pinfo->dev,
1261 "%s:%d Invalid peripheral(%u)!\n",
1262 __func__, __LINE__, r->peri);
1263 return -EINVAL;
1264 }
1265
1266 spin_lock_irqsave(&pl330->lock, flags);
1267
1268 if (_queue_full(thrd)) {
1269 ret = -EAGAIN;
1270 goto xfer_exit;
1271 }
1272
1273 /* Prefer Secure Channel */
1274 if (!_manager_ns(thrd))
1275 r->cfg->nonsecure = 0;
1276 else
1277 r->cfg->nonsecure = 1;
1278
1279 /* Use last settings, if not provided */
1280 if (r->cfg)
1281 ccr = _prepare_ccr(r->cfg);
1282 else
1283 ccr = readl(regs + CC(thrd->id));
1284
1285 /* If this req doesn't have valid xfer settings */
1286 if (!_is_valid(ccr)) {
1287 ret = -EINVAL;
1288 dev_info(thrd->dmac->pinfo->dev, "%s:%d Invalid CCR(%x)!\n",
1289 __func__, __LINE__, ccr);
1290 goto xfer_exit;
1291 }
1292
1293 idx = IS_FREE(&thrd->req[0]) ? 0 : 1;
1294
1295 xs.ccr = ccr;
1296 xs.r = r;
1297
1298 /* First dry run to check if req is acceptable */
1299 ret = _setup_req(1, thrd, idx, &xs);
1300 if (ret < 0)
1301 goto xfer_exit;
1302
1303 if (ret > pi->mcbufsz / 2) {
1304 dev_info(thrd->dmac->pinfo->dev,
1305 "%s:%d Trying increasing mcbufsz\n",
1306 __func__, __LINE__);
1307 ret = -ENOMEM;
1308 goto xfer_exit;
1309 }
1310
1311 /* Hook the request */
1312 thrd->lstenq = idx;
1313 thrd->req[idx].mc_len = _setup_req(0, thrd, idx, &xs);
1314 thrd->req[idx].r = r;
1315
1316 ret = 0;
1317
1318xfer_exit:
1319 spin_unlock_irqrestore(&pl330->lock, flags);
1320
1321 return ret;
1322}
1323EXPORT_SYMBOL(pl330_submit_req);
1324
1325static void pl330_dotask(unsigned long data)
1326{
1327 struct pl330_dmac *pl330 = (struct pl330_dmac *) data;
1328 struct pl330_info *pi = pl330->pinfo;
1329 unsigned long flags;
1330 int i;
1331
1332 spin_lock_irqsave(&pl330->lock, flags);
1333
1334 /* The DMAC itself gone nuts */
1335 if (pl330->dmac_tbd.reset_dmac) {
1336 pl330->state = DYING;
1337 /* Reset the manager too */
1338 pl330->dmac_tbd.reset_mngr = true;
1339 /* Clear the reset flag */
1340 pl330->dmac_tbd.reset_dmac = false;
1341 }
1342
1343 if (pl330->dmac_tbd.reset_mngr) {
1344 _stop(pl330->manager);
1345 /* Reset all channels */
1346 pl330->dmac_tbd.reset_chan = (1 << pi->pcfg.num_chan) - 1;
1347 /* Clear the reset flag */
1348 pl330->dmac_tbd.reset_mngr = false;
1349 }
1350
1351 for (i = 0; i < pi->pcfg.num_chan; i++) {
1352
1353 if (pl330->dmac_tbd.reset_chan & (1 << i)) {
1354 struct pl330_thread *thrd = &pl330->channels[i];
1355 void __iomem *regs = pi->base;
1356 enum pl330_op_err err;
1357
1358 _stop(thrd);
1359
1360 if (readl(regs + FSC) & (1 << thrd->id))
1361 err = PL330_ERR_FAIL;
1362 else
1363 err = PL330_ERR_ABORT;
1364
1365 spin_unlock_irqrestore(&pl330->lock, flags);
1366
1367 _callback(thrd->req[1 - thrd->lstenq].r, err);
1368 _callback(thrd->req[thrd->lstenq].r, err);
1369
1370 spin_lock_irqsave(&pl330->lock, flags);
1371
1372 thrd->req[0].r = NULL;
1373 thrd->req[1].r = NULL;
1374 mark_free(thrd, 0);
1375 mark_free(thrd, 1);
1376
1377 /* Clear the reset flag */
1378 pl330->dmac_tbd.reset_chan &= ~(1 << i);
1379 }
1380 }
1381
1382 spin_unlock_irqrestore(&pl330->lock, flags);
1383
1384 return;
1385}
1386
1387/* Returns 1 if state was updated, 0 otherwise */
1388int pl330_update(const struct pl330_info *pi)
1389{
1390 struct _pl330_req *rqdone;
1391 struct pl330_dmac *pl330;
1392 unsigned long flags;
1393 void __iomem *regs;
1394 u32 val;
1395 int id, ev, ret = 0;
1396
1397 if (!pi || !pi->pl330_data)
1398 return 0;
1399
1400 regs = pi->base;
1401 pl330 = pi->pl330_data;
1402
1403 spin_lock_irqsave(&pl330->lock, flags);
1404
1405 val = readl(regs + FSM) & 0x1;
1406 if (val)
1407 pl330->dmac_tbd.reset_mngr = true;
1408 else
1409 pl330->dmac_tbd.reset_mngr = false;
1410
1411 val = readl(regs + FSC) & ((1 << pi->pcfg.num_chan) - 1);
1412 pl330->dmac_tbd.reset_chan |= val;
1413 if (val) {
1414 int i = 0;
1415 while (i < pi->pcfg.num_chan) {
1416 if (val & (1 << i)) {
1417 dev_info(pi->dev,
1418 "Reset Channel-%d\t CS-%x FTC-%x\n",
1419 i, readl(regs + CS(i)),
1420 readl(regs + FTC(i)));
1421 _stop(&pl330->channels[i]);
1422 }
1423 i++;
1424 }
1425 }
1426
1427 /* Check which event happened i.e, thread notified */
1428 val = readl(regs + ES);
1429 if (pi->pcfg.num_events < 32
1430 && val & ~((1 << pi->pcfg.num_events) - 1)) {
1431 pl330->dmac_tbd.reset_dmac = true;
1432 dev_err(pi->dev, "%s:%d Unexpected!\n", __func__, __LINE__);
1433 ret = 1;
1434 goto updt_exit;
1435 }
1436
1437 for (ev = 0; ev < pi->pcfg.num_events; ev++) {
1438 if (val & (1 << ev)) { /* Event occurred */
1439 struct pl330_thread *thrd;
1440 u32 inten = readl(regs + INTEN);
1441 int active;
1442
1443 /* Clear the event */
1444 if (inten & (1 << ev))
1445 writel(1 << ev, regs + INTCLR);
1446
1447 ret = 1;
1448
1449 id = pl330->events[ev];
1450
1451 thrd = &pl330->channels[id];
1452
1453 active = thrd->req_running;
1454 if (active == -1) /* Aborted */
1455 continue;
1456
1457 rqdone = &thrd->req[active];
1458 mark_free(thrd, active);
1459
1460 /* Get going again ASAP */
1461 _start(thrd);
1462
1463 /* For now, just make a list of callbacks to be done */
1464 list_add_tail(&rqdone->rqd, &pl330->req_done);
1465 }
1466 }
1467
1468 /* Now that we are in no hurry, do the callbacks */
1469 while (!list_empty(&pl330->req_done)) {
1470 struct pl330_req *r;
1471
1472 rqdone = container_of(pl330->req_done.next,
1473 struct _pl330_req, rqd);
1474
1475 list_del_init(&rqdone->rqd);
1476
1477 /* Detach the req */
1478 r = rqdone->r;
1479 rqdone->r = NULL;
1480
1481 spin_unlock_irqrestore(&pl330->lock, flags);
1482 _callback(r, PL330_ERR_NONE);
1483 spin_lock_irqsave(&pl330->lock, flags);
1484 }
1485
1486updt_exit:
1487 spin_unlock_irqrestore(&pl330->lock, flags);
1488
1489 if (pl330->dmac_tbd.reset_dmac
1490 || pl330->dmac_tbd.reset_mngr
1491 || pl330->dmac_tbd.reset_chan) {
1492 ret = 1;
1493 tasklet_schedule(&pl330->tasks);
1494 }
1495
1496 return ret;
1497}
1498EXPORT_SYMBOL(pl330_update);
1499
1500int pl330_chan_ctrl(void *ch_id, enum pl330_chan_op op)
1501{
1502 struct pl330_thread *thrd = ch_id;
1503 struct pl330_dmac *pl330;
1504 unsigned long flags;
1505 int ret = 0, active;
1506
1507 if (!thrd || thrd->free || thrd->dmac->state == DYING)
1508 return -EINVAL;
1509
1510 pl330 = thrd->dmac;
1511 active = thrd->req_running;
1512
1513 spin_lock_irqsave(&pl330->lock, flags);
1514
1515 switch (op) {
1516 case PL330_OP_FLUSH:
1517 /* Make sure the channel is stopped */
1518 _stop(thrd);
1519
1520 thrd->req[0].r = NULL;
1521 thrd->req[1].r = NULL;
1522 mark_free(thrd, 0);
1523 mark_free(thrd, 1);
1524 break;
1525
1526 case PL330_OP_ABORT:
1527 /* Make sure the channel is stopped */
1528 _stop(thrd);
1529
1530 /* ABORT is only for the active req */
1531 if (active == -1)
1532 break;
1533
1534 thrd->req[active].r = NULL;
1535 mark_free(thrd, active);
1536
1537 /* Start the next */
1538 case PL330_OP_START:
1539 if ((active == -1) && !_start(thrd))
1540 ret = -EIO;
1541 break;
1542
1543 default:
1544 ret = -EINVAL;
1545 }
1546
1547 spin_unlock_irqrestore(&pl330->lock, flags);
1548 return ret;
1549}
1550EXPORT_SYMBOL(pl330_chan_ctrl);
1551
1552int pl330_chan_status(void *ch_id, struct pl330_chanstatus *pstatus)
1553{
1554 struct pl330_thread *thrd = ch_id;
1555 struct pl330_dmac *pl330;
1556 struct pl330_info *pi;
1557 void __iomem *regs;
1558 int active;
1559 u32 val;
1560
1561 if (!pstatus || !thrd || thrd->free)
1562 return -EINVAL;
1563
1564 pl330 = thrd->dmac;
1565 pi = pl330->pinfo;
1566 regs = pi->base;
1567
1568 /* The client should remove the DMAC and add again */
1569 if (pl330->state == DYING)
1570 pstatus->dmac_halted = true;
1571 else
1572 pstatus->dmac_halted = false;
1573
1574 val = readl(regs + FSC);
1575 if (val & (1 << thrd->id))
1576 pstatus->faulting = true;
1577 else
1578 pstatus->faulting = false;
1579
1580 active = thrd->req_running;
1581
1582 if (active == -1) {
1583 /* Indicate that the thread is not running */
1584 pstatus->top_req = NULL;
1585 pstatus->wait_req = NULL;
1586 } else {
1587 pstatus->top_req = thrd->req[active].r;
1588 pstatus->wait_req = !IS_FREE(&thrd->req[1 - active])
1589 ? thrd->req[1 - active].r : NULL;
1590 }
1591
1592 pstatus->src_addr = readl(regs + SA(thrd->id));
1593 pstatus->dst_addr = readl(regs + DA(thrd->id));
1594
1595 return 0;
1596}
1597EXPORT_SYMBOL(pl330_chan_status);
1598
1599/* Reserve an event */
1600static inline int _alloc_event(struct pl330_thread *thrd)
1601{
1602 struct pl330_dmac *pl330 = thrd->dmac;
1603 struct pl330_info *pi = pl330->pinfo;
1604 int ev;
1605
1606 for (ev = 0; ev < pi->pcfg.num_events; ev++)
1607 if (pl330->events[ev] == -1) {
1608 pl330->events[ev] = thrd->id;
1609 return ev;
1610 }
1611
1612 return -1;
1613}
1614
1615static bool _chan_ns(const struct pl330_info *pi, int i)
1616{
1617 return pi->pcfg.irq_ns & (1 << i);
1618}
1619
1620/* Upon success, returns IdentityToken for the
1621 * allocated channel, NULL otherwise.
1622 */
1623void *pl330_request_channel(const struct pl330_info *pi)
1624{
1625 struct pl330_thread *thrd = NULL;
1626 struct pl330_dmac *pl330;
1627 unsigned long flags;
1628 int chans, i;
1629
1630 if (!pi || !pi->pl330_data)
1631 return NULL;
1632
1633 pl330 = pi->pl330_data;
1634
1635 if (pl330->state == DYING)
1636 return NULL;
1637
1638 chans = pi->pcfg.num_chan;
1639
1640 spin_lock_irqsave(&pl330->lock, flags);
1641
1642 for (i = 0; i < chans; i++) {
1643 thrd = &pl330->channels[i];
1644 if ((thrd->free) && (!_manager_ns(thrd) ||
1645 _chan_ns(pi, i))) {
1646 thrd->ev = _alloc_event(thrd);
1647 if (thrd->ev >= 0) {
1648 thrd->free = false;
1649 thrd->lstenq = 1;
1650 thrd->req[0].r = NULL;
1651 mark_free(thrd, 0);
1652 thrd->req[1].r = NULL;
1653 mark_free(thrd, 1);
1654 break;
1655 }
1656 }
1657 thrd = NULL;
1658 }
1659
1660 spin_unlock_irqrestore(&pl330->lock, flags);
1661
1662 return thrd;
1663}
1664EXPORT_SYMBOL(pl330_request_channel);
1665
1666/* Release an event */
1667static inline void _free_event(struct pl330_thread *thrd, int ev)
1668{
1669 struct pl330_dmac *pl330 = thrd->dmac;
1670 struct pl330_info *pi = pl330->pinfo;
1671
1672 /* If the event is valid and was held by the thread */
1673 if (ev >= 0 && ev < pi->pcfg.num_events
1674 && pl330->events[ev] == thrd->id)
1675 pl330->events[ev] = -1;
1676}
1677
1678void pl330_release_channel(void *ch_id)
1679{
1680 struct pl330_thread *thrd = ch_id;
1681 struct pl330_dmac *pl330;
1682 unsigned long flags;
1683
1684 if (!thrd || thrd->free)
1685 return;
1686
1687 _stop(thrd);
1688
1689 _callback(thrd->req[1 - thrd->lstenq].r, PL330_ERR_ABORT);
1690 _callback(thrd->req[thrd->lstenq].r, PL330_ERR_ABORT);
1691
1692 pl330 = thrd->dmac;
1693
1694 spin_lock_irqsave(&pl330->lock, flags);
1695 _free_event(thrd, thrd->ev);
1696 thrd->free = true;
1697 spin_unlock_irqrestore(&pl330->lock, flags);
1698}
1699EXPORT_SYMBOL(pl330_release_channel);
1700
1701/* Initialize the structure for PL330 configuration, that can be used
1702 * by the client driver the make best use of the DMAC
1703 */
1704static void read_dmac_config(struct pl330_info *pi)
1705{
1706 void __iomem *regs = pi->base;
1707 u32 val;
1708
1709 val = readl(regs + CRD) >> CRD_DATA_WIDTH_SHIFT;
1710 val &= CRD_DATA_WIDTH_MASK;
1711 pi->pcfg.data_bus_width = 8 * (1 << val);
1712
1713 val = readl(regs + CRD) >> CRD_DATA_BUFF_SHIFT;
1714 val &= CRD_DATA_BUFF_MASK;
1715 pi->pcfg.data_buf_dep = val + 1;
1716
1717 val = readl(regs + CR0) >> CR0_NUM_CHANS_SHIFT;
1718 val &= CR0_NUM_CHANS_MASK;
1719 val += 1;
1720 pi->pcfg.num_chan = val;
1721
1722 val = readl(regs + CR0);
1723 if (val & CR0_PERIPH_REQ_SET) {
1724 val = (val >> CR0_NUM_PERIPH_SHIFT) & CR0_NUM_PERIPH_MASK;
1725 val += 1;
1726 pi->pcfg.num_peri = val;
1727 pi->pcfg.peri_ns = readl(regs + CR4);
1728 } else {
1729 pi->pcfg.num_peri = 0;
1730 }
1731
1732 val = readl(regs + CR0);
1733 if (val & CR0_BOOT_MAN_NS)
1734 pi->pcfg.mode |= DMAC_MODE_NS;
1735 else
1736 pi->pcfg.mode &= ~DMAC_MODE_NS;
1737
1738 val = readl(regs + CR0) >> CR0_NUM_EVENTS_SHIFT;
1739 val &= CR0_NUM_EVENTS_MASK;
1740 val += 1;
1741 pi->pcfg.num_events = val;
1742
1743 pi->pcfg.irq_ns = readl(regs + CR3);
1744
1745 pi->pcfg.periph_id = get_id(pi, PERIPH_ID);
1746 pi->pcfg.pcell_id = get_id(pi, PCELL_ID);
1747}
1748
1749static inline void _reset_thread(struct pl330_thread *thrd)
1750{
1751 struct pl330_dmac *pl330 = thrd->dmac;
1752 struct pl330_info *pi = pl330->pinfo;
1753
1754 thrd->req[0].mc_cpu = pl330->mcode_cpu
1755 + (thrd->id * pi->mcbufsz);
1756 thrd->req[0].mc_bus = pl330->mcode_bus
1757 + (thrd->id * pi->mcbufsz);
1758 thrd->req[0].r = NULL;
1759 mark_free(thrd, 0);
1760
1761 thrd->req[1].mc_cpu = thrd->req[0].mc_cpu
1762 + pi->mcbufsz / 2;
1763 thrd->req[1].mc_bus = thrd->req[0].mc_bus
1764 + pi->mcbufsz / 2;
1765 thrd->req[1].r = NULL;
1766 mark_free(thrd, 1);
1767}
1768
1769static int dmac_alloc_threads(struct pl330_dmac *pl330)
1770{
1771 struct pl330_info *pi = pl330->pinfo;
1772 int chans = pi->pcfg.num_chan;
1773 struct pl330_thread *thrd;
1774 int i;
1775
1776 /* Allocate 1 Manager and 'chans' Channel threads */
1777 pl330->channels = kzalloc((1 + chans) * sizeof(*thrd),
1778 GFP_KERNEL);
1779 if (!pl330->channels)
1780 return -ENOMEM;
1781
1782 /* Init Channel threads */
1783 for (i = 0; i < chans; i++) {
1784 thrd = &pl330->channels[i];
1785 thrd->id = i;
1786 thrd->dmac = pl330;
1787 _reset_thread(thrd);
1788 thrd->free = true;
1789 }
1790
1791 /* MANAGER is indexed at the end */
1792 thrd = &pl330->channels[chans];
1793 thrd->id = chans;
1794 thrd->dmac = pl330;
1795 thrd->free = false;
1796 pl330->manager = thrd;
1797
1798 return 0;
1799}
1800
1801static int dmac_alloc_resources(struct pl330_dmac *pl330)
1802{
1803 struct pl330_info *pi = pl330->pinfo;
1804 int chans = pi->pcfg.num_chan;
1805 int ret;
1806
1807 /*
1808 * Alloc MicroCode buffer for 'chans' Channel threads.
1809 * A channel's buffer offset is (Channel_Id * MCODE_BUFF_PERCHAN)
1810 */
1811 pl330->mcode_cpu = dma_alloc_coherent(pi->dev,
1812 chans * pi->mcbufsz,
1813 &pl330->mcode_bus, GFP_KERNEL);
1814 if (!pl330->mcode_cpu) {
1815 dev_err(pi->dev, "%s:%d Can't allocate memory!\n",
1816 __func__, __LINE__);
1817 return -ENOMEM;
1818 }
1819
1820 ret = dmac_alloc_threads(pl330);
1821 if (ret) {
1822 dev_err(pi->dev, "%s:%d Can't to create channels for DMAC!\n",
1823 __func__, __LINE__);
1824 dma_free_coherent(pi->dev,
1825 chans * pi->mcbufsz,
1826 pl330->mcode_cpu, pl330->mcode_bus);
1827 return ret;
1828 }
1829
1830 return 0;
1831}
1832
1833int pl330_add(struct pl330_info *pi)
1834{
1835 struct pl330_dmac *pl330;
1836 void __iomem *regs;
1837 int i, ret;
1838
1839 if (!pi || !pi->dev)
1840 return -EINVAL;
1841
1842 /* If already added */
1843 if (pi->pl330_data)
1844 return -EINVAL;
1845
1846 /*
1847 * If the SoC can perform reset on the DMAC, then do it
1848 * before reading its configuration.
1849 */
1850 if (pi->dmac_reset)
1851 pi->dmac_reset(pi);
1852
1853 regs = pi->base;
1854
1855 /* Check if we can handle this DMAC */
1856 if ((get_id(pi, PERIPH_ID) & 0xfffff) != PERIPH_ID_VAL
1857 || get_id(pi, PCELL_ID) != PCELL_ID_VAL) {
1858 dev_err(pi->dev, "PERIPH_ID 0x%x, PCELL_ID 0x%x !\n",
1859 get_id(pi, PERIPH_ID), get_id(pi, PCELL_ID));
1860 return -EINVAL;
1861 }
1862
1863 /* Read the configuration of the DMAC */
1864 read_dmac_config(pi);
1865
1866 if (pi->pcfg.num_events == 0) {
1867 dev_err(pi->dev, "%s:%d Can't work without events!\n",
1868 __func__, __LINE__);
1869 return -EINVAL;
1870 }
1871
1872 pl330 = kzalloc(sizeof(*pl330), GFP_KERNEL);
1873 if (!pl330) {
1874 dev_err(pi->dev, "%s:%d Can't allocate memory!\n",
1875 __func__, __LINE__);
1876 return -ENOMEM;
1877 }
1878
1879 /* Assign the info structure and private data */
1880 pl330->pinfo = pi;
1881 pi->pl330_data = pl330;
1882
1883 spin_lock_init(&pl330->lock);
1884
1885 INIT_LIST_HEAD(&pl330->req_done);
1886
1887 /* Use default MC buffer size if not provided */
1888 if (!pi->mcbufsz)
1889 pi->mcbufsz = MCODE_BUFF_PER_REQ * 2;
1890
1891 /* Mark all events as free */
1892 for (i = 0; i < pi->pcfg.num_events; i++)
1893 pl330->events[i] = -1;
1894
1895 /* Allocate resources needed by the DMAC */
1896 ret = dmac_alloc_resources(pl330);
1897 if (ret) {
1898 dev_err(pi->dev, "Unable to create channels for DMAC\n");
1899 kfree(pl330);
1900 return ret;
1901 }
1902
1903 tasklet_init(&pl330->tasks, pl330_dotask, (unsigned long) pl330);
1904
1905 pl330->state = INIT;
1906
1907 return 0;
1908}
1909EXPORT_SYMBOL(pl330_add);
1910
1911static int dmac_free_threads(struct pl330_dmac *pl330)
1912{
1913 struct pl330_info *pi = pl330->pinfo;
1914 int chans = pi->pcfg.num_chan;
1915 struct pl330_thread *thrd;
1916 int i;
1917
1918 /* Release Channel threads */
1919 for (i = 0; i < chans; i++) {
1920 thrd = &pl330->channels[i];
1921 pl330_release_channel((void *)thrd);
1922 }
1923
1924 /* Free memory */
1925 kfree(pl330->channels);
1926
1927 return 0;
1928}
1929
1930static void dmac_free_resources(struct pl330_dmac *pl330)
1931{
1932 struct pl330_info *pi = pl330->pinfo;
1933 int chans = pi->pcfg.num_chan;
1934
1935 dmac_free_threads(pl330);
1936
1937 dma_free_coherent(pi->dev, chans * pi->mcbufsz,
1938 pl330->mcode_cpu, pl330->mcode_bus);
1939}
1940
1941void pl330_del(struct pl330_info *pi)
1942{
1943 struct pl330_dmac *pl330;
1944
1945 if (!pi || !pi->pl330_data)
1946 return;
1947
1948 pl330 = pi->pl330_data;
1949
1950 pl330->state = UNINIT;
1951
1952 tasklet_kill(&pl330->tasks);
1953
1954 /* Free DMAC resources */
1955 dmac_free_resources(pl330);
1956
1957 kfree(pl330);
1958 pi->pl330_data = NULL;
1959}
1960EXPORT_SYMBOL(pl330_del);
diff --git a/arch/arm/configs/integrator_defconfig b/arch/arm/configs/integrator_defconfig
index 1103f62a1964..a8314c3ee84d 100644
--- a/arch/arm/configs/integrator_defconfig
+++ b/arch/arm/configs/integrator_defconfig
@@ -57,18 +57,24 @@ CONFIG_NETDEVICES=y
57CONFIG_NET_ETHERNET=y 57CONFIG_NET_ETHERNET=y
58CONFIG_NET_PCI=y 58CONFIG_NET_PCI=y
59CONFIG_E100=y 59CONFIG_E100=y
60CONFIG_SMC91X=y
60# CONFIG_KEYBOARD_ATKBD is not set 61# CONFIG_KEYBOARD_ATKBD is not set
61# CONFIG_SERIO_SERPORT is not set 62# CONFIG_SERIO_SERPORT is not set
62CONFIG_SERIAL_AMBA_PL010=y 63CONFIG_SERIAL_AMBA_PL010=y
63CONFIG_SERIAL_AMBA_PL010_CONSOLE=y 64CONFIG_SERIAL_AMBA_PL010_CONSOLE=y
64CONFIG_FB=y 65CONFIG_FB=y
65CONFIG_FB_MODE_HELPERS=y 66CONFIG_FB_MODE_HELPERS=y
67CONFIG_FB_ARMCLCD=y
66CONFIG_FB_MATROX=y 68CONFIG_FB_MATROX=y
67CONFIG_FB_MATROX_MILLENIUM=y 69CONFIG_FB_MATROX_MILLENIUM=y
68CONFIG_FB_MATROX_MYSTIQUE=y 70CONFIG_FB_MATROX_MYSTIQUE=y
71# CONFIG_VGA_CONSOLE is not set
72CONFIG_MMC=y
73CONFIG_MMC_ARMMMCI=y
69CONFIG_RTC_CLASS=y 74CONFIG_RTC_CLASS=y
70CONFIG_RTC_DRV_PL030=y 75CONFIG_RTC_DRV_PL030=y
71CONFIG_EXT2_FS=y 76CONFIG_EXT2_FS=y
77CONFIG_VFAT_FS=y
72CONFIG_TMPFS=y 78CONFIG_TMPFS=y
73CONFIG_JFFS2_FS=y 79CONFIG_JFFS2_FS=y
74CONFIG_CRAMFS=y 80CONFIG_CRAMFS=y
@@ -78,5 +84,7 @@ CONFIG_ROOT_NFS=y
78CONFIG_NFSD=y 84CONFIG_NFSD=y
79CONFIG_NFSD_V3=y 85CONFIG_NFSD_V3=y
80CONFIG_PARTITION_ADVANCED=y 86CONFIG_PARTITION_ADVANCED=y
87CONFIG_NLS_CODEPAGE_437=y
88CONFIG_NLS_ISO8859_1=y
81CONFIG_MAGIC_SYSRQ=y 89CONFIG_MAGIC_SYSRQ=y
82CONFIG_DEBUG_KERNEL=y 90CONFIG_DEBUG_KERNEL=y
diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h
index 23371b17b23e..03fb93621d0d 100644
--- a/arch/arm/include/asm/assembler.h
+++ b/arch/arm/include/asm/assembler.h
@@ -23,6 +23,8 @@
23#include <asm/ptrace.h> 23#include <asm/ptrace.h>
24#include <asm/domain.h> 24#include <asm/domain.h>
25 25
26#define IOMEM(x) (x)
27
26/* 28/*
27 * Endian independent macros for shifting bytes within registers. 29 * Endian independent macros for shifting bytes within registers.
28 */ 30 */
diff --git a/arch/arm/include/asm/cpuidle.h b/arch/arm/include/asm/cpuidle.h
new file mode 100644
index 000000000000..2fca60ab513a
--- /dev/null
+++ b/arch/arm/include/asm/cpuidle.h
@@ -0,0 +1,29 @@
1#ifndef __ASM_ARM_CPUIDLE_H
2#define __ASM_ARM_CPUIDLE_H
3
4#ifdef CONFIG_CPU_IDLE
5extern int arm_cpuidle_simple_enter(struct cpuidle_device *dev,
6 struct cpuidle_driver *drv, int index);
7#else
8static inline int arm_cpuidle_simple_enter(struct cpuidle_device *dev,
9 struct cpuidle_driver *drv, int index) { return -ENODEV; }
10#endif
11
12/* Common ARM WFI state */
13#define ARM_CPUIDLE_WFI_STATE_PWR(p) {\
14 .enter = arm_cpuidle_simple_enter,\
15 .exit_latency = 1,\
16 .target_residency = 1,\
17 .power_usage = p,\
18 .flags = CPUIDLE_FLAG_TIME_VALID,\
19 .name = "WFI",\
20 .desc = "ARM WFI",\
21}
22
23/*
24 * in case power_specified == 1, give a default WFI power value needed
25 * by some governors
26 */
27#define ARM_CPUIDLE_WFI_STATE ARM_CPUIDLE_WFI_STATE_PWR(UINT_MAX)
28
29#endif
diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h
index 0e9ce8d9686e..38050b1c4800 100644
--- a/arch/arm/include/asm/elf.h
+++ b/arch/arm/include/asm/elf.h
@@ -130,8 +130,4 @@ struct mm_struct;
130extern unsigned long arch_randomize_brk(struct mm_struct *mm); 130extern unsigned long arch_randomize_brk(struct mm_struct *mm);
131#define arch_randomize_brk arch_randomize_brk 131#define arch_randomize_brk arch_randomize_brk
132 132
133extern int vectors_user_mapping(void);
134#define arch_setup_additional_pages(bprm, uses_interp) vectors_user_mapping()
135#define ARCH_HAS_SETUP_ADDITIONAL_PAGES
136
137#endif 133#endif
diff --git a/arch/arm/include/asm/hardware/cache-l2x0.h b/arch/arm/include/asm/hardware/cache-l2x0.h
index 7df239bcdf27..c4c87bc12231 100644
--- a/arch/arm/include/asm/hardware/cache-l2x0.h
+++ b/arch/arm/include/asm/hardware/cache-l2x0.h
@@ -103,11 +103,11 @@
103#define L2X0_ADDR_FILTER_EN 1 103#define L2X0_ADDR_FILTER_EN 1
104 104
105#ifndef __ASSEMBLY__ 105#ifndef __ASSEMBLY__
106extern void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask); 106extern void __init l2x0_init(void __iomem *base, u32 aux_val, u32 aux_mask);
107#if defined(CONFIG_CACHE_L2X0) && defined(CONFIG_OF) 107#if defined(CONFIG_CACHE_L2X0) && defined(CONFIG_OF)
108extern int l2x0_of_init(__u32 aux_val, __u32 aux_mask); 108extern int l2x0_of_init(u32 aux_val, u32 aux_mask);
109#else 109#else
110static inline int l2x0_of_init(__u32 aux_val, __u32 aux_mask) 110static inline int l2x0_of_init(u32 aux_val, u32 aux_mask)
111{ 111{
112 return -ENODEV; 112 return -ENODEV;
113} 113}
diff --git a/arch/arm/include/asm/hardware/iop_adma.h b/arch/arm/include/asm/hardware/iop_adma.h
index 59b8c3892f76..122f86d8c991 100644
--- a/arch/arm/include/asm/hardware/iop_adma.h
+++ b/arch/arm/include/asm/hardware/iop_adma.h
@@ -49,7 +49,6 @@ struct iop_adma_device {
49/** 49/**
50 * struct iop_adma_chan - internal representation of an ADMA device 50 * struct iop_adma_chan - internal representation of an ADMA device
51 * @pending: allows batching of hardware operations 51 * @pending: allows batching of hardware operations
52 * @completed_cookie: identifier for the most recently completed operation
53 * @lock: serializes enqueue/dequeue operations to the slot pool 52 * @lock: serializes enqueue/dequeue operations to the slot pool
54 * @mmr_base: memory mapped register base 53 * @mmr_base: memory mapped register base
55 * @chain: device chain view of the descriptors 54 * @chain: device chain view of the descriptors
@@ -62,7 +61,6 @@ struct iop_adma_device {
62 */ 61 */
63struct iop_adma_chan { 62struct iop_adma_chan {
64 int pending; 63 int pending;
65 dma_cookie_t completed_cookie;
66 spinlock_t lock; /* protects the descriptor slot pool */ 64 spinlock_t lock; /* protects the descriptor slot pool */
67 void __iomem *mmr_base; 65 void __iomem *mmr_base;
68 struct list_head chain; 66 struct list_head chain;
diff --git a/arch/arm/include/asm/hardware/it8152.h b/arch/arm/include/asm/hardware/it8152.h
index 43cab498bc27..73f84fa4f366 100644
--- a/arch/arm/include/asm/hardware/it8152.h
+++ b/arch/arm/include/asm/hardware/it8152.h
@@ -9,6 +9,9 @@
9 9
10#ifndef __ASM_HARDWARE_IT8152_H 10#ifndef __ASM_HARDWARE_IT8152_H
11#define __ASM_HARDWARE_IT8152_H 11#define __ASM_HARDWARE_IT8152_H
12
13#include <mach/irqs.h>
14
12extern void __iomem *it8152_base_address; 15extern void __iomem *it8152_base_address;
13 16
14#define IT8152_IO_BASE (it8152_base_address + 0x03e00000) 17#define IT8152_IO_BASE (it8152_base_address + 0x03e00000)
diff --git a/arch/arm/include/asm/hardware/pl330.h b/arch/arm/include/asm/hardware/pl330.h
deleted file mode 100644
index c1821385abfa..000000000000
--- a/arch/arm/include/asm/hardware/pl330.h
+++ /dev/null
@@ -1,217 +0,0 @@
1/* linux/include/asm/hardware/pl330.h
2 *
3 * Copyright (C) 2010 Samsung Electronics Co. Ltd.
4 * Jaswinder Singh <jassi.brar@samsung.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19 */
20
21#ifndef __PL330_CORE_H
22#define __PL330_CORE_H
23
24#define PL330_MAX_CHAN 8
25#define PL330_MAX_IRQS 32
26#define PL330_MAX_PERI 32
27
28enum pl330_srccachectrl {
29 SCCTRL0 = 0, /* Noncacheable and nonbufferable */
30 SCCTRL1, /* Bufferable only */
31 SCCTRL2, /* Cacheable, but do not allocate */
32 SCCTRL3, /* Cacheable and bufferable, but do not allocate */
33 SINVALID1,
34 SINVALID2,
35 SCCTRL6, /* Cacheable write-through, allocate on reads only */
36 SCCTRL7, /* Cacheable write-back, allocate on reads only */
37};
38
39enum pl330_dstcachectrl {
40 DCCTRL0 = 0, /* Noncacheable and nonbufferable */
41 DCCTRL1, /* Bufferable only */
42 DCCTRL2, /* Cacheable, but do not allocate */
43 DCCTRL3, /* Cacheable and bufferable, but do not allocate */
44 DINVALID1, /* AWCACHE = 0x1000 */
45 DINVALID2,
46 DCCTRL6, /* Cacheable write-through, allocate on writes only */
47 DCCTRL7, /* Cacheable write-back, allocate on writes only */
48};
49
50/* Populated by the PL330 core driver for DMA API driver's info */
51struct pl330_config {
52 u32 periph_id;
53 u32 pcell_id;
54#define DMAC_MODE_NS (1 << 0)
55 unsigned int mode;
56 unsigned int data_bus_width:10; /* In number of bits */
57 unsigned int data_buf_dep:10;
58 unsigned int num_chan:4;
59 unsigned int num_peri:6;
60 u32 peri_ns;
61 unsigned int num_events:6;
62 u32 irq_ns;
63};
64
65/* Handle to the DMAC provided to the PL330 core */
66struct pl330_info {
67 /* Owning device */
68 struct device *dev;
69 /* Size of MicroCode buffers for each channel. */
70 unsigned mcbufsz;
71 /* ioremap'ed address of PL330 registers. */
72 void __iomem *base;
73 /* Client can freely use it. */
74 void *client_data;
75 /* PL330 core data, Client must not touch it. */
76 void *pl330_data;
77 /* Populated by the PL330 core driver during pl330_add */
78 struct pl330_config pcfg;
79 /*
80 * If the DMAC has some reset mechanism, then the
81 * client may want to provide pointer to the method.
82 */
83 void (*dmac_reset)(struct pl330_info *pi);
84};
85
86enum pl330_byteswap {
87 SWAP_NO = 0,
88 SWAP_2,
89 SWAP_4,
90 SWAP_8,
91 SWAP_16,
92};
93
94/**
95 * Request Configuration.
96 * The PL330 core does not modify this and uses the last
97 * working configuration if the request doesn't provide any.
98 *
99 * The Client may want to provide this info only for the
100 * first request and a request with new settings.
101 */
102struct pl330_reqcfg {
103 /* Address Incrementing */
104 unsigned dst_inc:1;
105 unsigned src_inc:1;
106
107 /*
108 * For now, the SRC & DST protection levels
109 * and burst size/length are assumed same.
110 */
111 bool nonsecure;
112 bool privileged;
113 bool insnaccess;
114 unsigned brst_len:5;
115 unsigned brst_size:3; /* in power of 2 */
116
117 enum pl330_dstcachectrl dcctl;
118 enum pl330_srccachectrl scctl;
119 enum pl330_byteswap swap;
120};
121
122/*
123 * One cycle of DMAC operation.
124 * There may be more than one xfer in a request.
125 */
126struct pl330_xfer {
127 u32 src_addr;
128 u32 dst_addr;
129 /* Size to xfer */
130 u32 bytes;
131 /*
132 * Pointer to next xfer in the list.
133 * The last xfer in the req must point to NULL.
134 */
135 struct pl330_xfer *next;
136};
137
138/* The xfer callbacks are made with one of these arguments. */
139enum pl330_op_err {
140 /* The all xfers in the request were success. */
141 PL330_ERR_NONE,
142 /* If req aborted due to global error. */
143 PL330_ERR_ABORT,
144 /* If req failed due to problem with Channel. */
145 PL330_ERR_FAIL,
146};
147
148enum pl330_reqtype {
149 MEMTOMEM,
150 MEMTODEV,
151 DEVTOMEM,
152 DEVTODEV,
153};
154
155/* A request defining Scatter-Gather List ending with NULL xfer. */
156struct pl330_req {
157 enum pl330_reqtype rqtype;
158 /* Index of peripheral for the xfer. */
159 unsigned peri:5;
160 /* Unique token for this xfer, set by the client. */
161 void *token;
162 /* Callback to be called after xfer. */
163 void (*xfer_cb)(void *token, enum pl330_op_err err);
164 /* If NULL, req will be done at last set parameters. */
165 struct pl330_reqcfg *cfg;
166 /* Pointer to first xfer in the request. */
167 struct pl330_xfer *x;
168};
169
170/*
171 * To know the status of the channel and DMAC, the client
172 * provides a pointer to this structure. The PL330 core
173 * fills it with current information.
174 */
175struct pl330_chanstatus {
176 /*
177 * If the DMAC engine halted due to some error,
178 * the client should remove-add DMAC.
179 */
180 bool dmac_halted;
181 /*
182 * If channel is halted due to some error,
183 * the client should ABORT/FLUSH and START the channel.
184 */
185 bool faulting;
186 /* Location of last load */
187 u32 src_addr;
188 /* Location of last store */
189 u32 dst_addr;
190 /*
191 * Pointer to the currently active req, NULL if channel is
192 * inactive, even though the requests may be present.
193 */
194 struct pl330_req *top_req;
195 /* Pointer to req waiting second in the queue if any. */
196 struct pl330_req *wait_req;
197};
198
199enum pl330_chan_op {
200 /* Start the channel */
201 PL330_OP_START,
202 /* Abort the active xfer */
203 PL330_OP_ABORT,
204 /* Stop xfer and flush queue */
205 PL330_OP_FLUSH,
206};
207
208extern int pl330_add(struct pl330_info *);
209extern void pl330_del(struct pl330_info *pi);
210extern int pl330_update(const struct pl330_info *pi);
211extern void pl330_release_channel(void *ch_id);
212extern void *pl330_request_channel(const struct pl330_info *pi);
213extern int pl330_chan_status(void *ch_id, struct pl330_chanstatus *pstatus);
214extern int pl330_chan_ctrl(void *ch_id, enum pl330_chan_op op);
215extern int pl330_submit_req(void *ch_id, struct pl330_req *r);
216
217#endif /* __PL330_CORE_H */
diff --git a/arch/arm/include/asm/io.h b/arch/arm/include/asm/io.h
index bae7eb6011d2..df0ac0bb39aa 100644
--- a/arch/arm/include/asm/io.h
+++ b/arch/arm/include/asm/io.h
@@ -82,6 +82,11 @@ extern void __iomem *__arm_ioremap_pfn(unsigned long, unsigned long, size_t, uns
82extern void __iomem *__arm_ioremap(unsigned long, size_t, unsigned int); 82extern void __iomem *__arm_ioremap(unsigned long, size_t, unsigned int);
83extern void __iomem *__arm_ioremap_exec(unsigned long, size_t, bool cached); 83extern void __iomem *__arm_ioremap_exec(unsigned long, size_t, bool cached);
84extern void __iounmap(volatile void __iomem *addr); 84extern void __iounmap(volatile void __iomem *addr);
85extern void __arm_iounmap(volatile void __iomem *addr);
86
87extern void __iomem * (*arch_ioremap_caller)(unsigned long, size_t,
88 unsigned int, void *);
89extern void (*arch_iounmap)(volatile void __iomem *);
85 90
86/* 91/*
87 * Bad read/write accesses... 92 * Bad read/write accesses...
@@ -96,6 +101,8 @@ static inline void __iomem *__typesafe_io(unsigned long addr)
96 return (void __iomem *)addr; 101 return (void __iomem *)addr;
97} 102}
98 103
104#define IOMEM(x) ((void __force __iomem *)(x))
105
99/* IO barriers */ 106/* IO barriers */
100#ifdef CONFIG_ARM_DMA_MEM_BUFFERABLE 107#ifdef CONFIG_ARM_DMA_MEM_BUFFERABLE
101#include <asm/barrier.h> 108#include <asm/barrier.h>
@@ -109,7 +116,11 @@ static inline void __iomem *__typesafe_io(unsigned long addr)
109/* 116/*
110 * Now, pick up the machine-defined IO definitions 117 * Now, pick up the machine-defined IO definitions
111 */ 118 */
119#ifdef CONFIG_NEED_MACH_IO_H
112#include <mach/io.h> 120#include <mach/io.h>
121#else
122#define __io(a) ({ (void)(a); __typesafe_io(0); })
123#endif
113 124
114/* 125/*
115 * This is the limit of PC card/PCI/ISA IO space, which is by default 126 * This is the limit of PC card/PCI/ISA IO space, which is by default
@@ -211,18 +222,18 @@ extern void _memset_io(volatile void __iomem *, int, size_t);
211 * Again, this are defined to perform little endian accesses. See the 222 * Again, this are defined to perform little endian accesses. See the
212 * IO port primitives for more information. 223 * IO port primitives for more information.
213 */ 224 */
214#ifdef __mem_pci 225#ifndef readl
215#define readb_relaxed(c) ({ u8 __r = __raw_readb(__mem_pci(c)); __r; }) 226#define readb_relaxed(c) ({ u8 __r = __raw_readb(c); __r; })
216#define readw_relaxed(c) ({ u16 __r = le16_to_cpu((__force __le16) \ 227#define readw_relaxed(c) ({ u16 __r = le16_to_cpu((__force __le16) \
217 __raw_readw(__mem_pci(c))); __r; }) 228 __raw_readw(c)); __r; })
218#define readl_relaxed(c) ({ u32 __r = le32_to_cpu((__force __le32) \ 229#define readl_relaxed(c) ({ u32 __r = le32_to_cpu((__force __le32) \
219 __raw_readl(__mem_pci(c))); __r; }) 230 __raw_readl(c)); __r; })
220 231
221#define writeb_relaxed(v,c) ((void)__raw_writeb(v,__mem_pci(c))) 232#define writeb_relaxed(v,c) ((void)__raw_writeb(v,c))
222#define writew_relaxed(v,c) ((void)__raw_writew((__force u16) \ 233#define writew_relaxed(v,c) ((void)__raw_writew((__force u16) \
223 cpu_to_le16(v),__mem_pci(c))) 234 cpu_to_le16(v),c))
224#define writel_relaxed(v,c) ((void)__raw_writel((__force u32) \ 235#define writel_relaxed(v,c) ((void)__raw_writel((__force u32) \
225 cpu_to_le32(v),__mem_pci(c))) 236 cpu_to_le32(v),c))
226 237
227#define readb(c) ({ u8 __v = readb_relaxed(c); __iormb(); __v; }) 238#define readb(c) ({ u8 __v = readb_relaxed(c); __iormb(); __v; })
228#define readw(c) ({ u16 __v = readw_relaxed(c); __iormb(); __v; }) 239#define readw(c) ({ u16 __v = readw_relaxed(c); __iormb(); __v; })
@@ -232,30 +243,19 @@ extern void _memset_io(volatile void __iomem *, int, size_t);
232#define writew(v,c) ({ __iowmb(); writew_relaxed(v,c); }) 243#define writew(v,c) ({ __iowmb(); writew_relaxed(v,c); })
233#define writel(v,c) ({ __iowmb(); writel_relaxed(v,c); }) 244#define writel(v,c) ({ __iowmb(); writel_relaxed(v,c); })
234 245
235#define readsb(p,d,l) __raw_readsb(__mem_pci(p),d,l) 246#define readsb(p,d,l) __raw_readsb(p,d,l)
236#define readsw(p,d,l) __raw_readsw(__mem_pci(p),d,l) 247#define readsw(p,d,l) __raw_readsw(p,d,l)
237#define readsl(p,d,l) __raw_readsl(__mem_pci(p),d,l) 248#define readsl(p,d,l) __raw_readsl(p,d,l)
238
239#define writesb(p,d,l) __raw_writesb(__mem_pci(p),d,l)
240#define writesw(p,d,l) __raw_writesw(__mem_pci(p),d,l)
241#define writesl(p,d,l) __raw_writesl(__mem_pci(p),d,l)
242 249
243#define memset_io(c,v,l) _memset_io(__mem_pci(c),(v),(l)) 250#define writesb(p,d,l) __raw_writesb(p,d,l)
244#define memcpy_fromio(a,c,l) _memcpy_fromio((a),__mem_pci(c),(l)) 251#define writesw(p,d,l) __raw_writesw(p,d,l)
245#define memcpy_toio(c,a,l) _memcpy_toio(__mem_pci(c),(a),(l)) 252#define writesl(p,d,l) __raw_writesl(p,d,l)
246 253
247#elif !defined(readb) 254#define memset_io(c,v,l) _memset_io(c,(v),(l))
255#define memcpy_fromio(a,c,l) _memcpy_fromio((a),c,(l))
256#define memcpy_toio(c,a,l) _memcpy_toio(c,(a),(l))
248 257
249#define readb(c) (__readwrite_bug("readb"),0) 258#endif /* readl */
250#define readw(c) (__readwrite_bug("readw"),0)
251#define readl(c) (__readwrite_bug("readl"),0)
252#define writeb(v,c) __readwrite_bug("writeb")
253#define writew(v,c) __readwrite_bug("writew")
254#define writel(v,c) __readwrite_bug("writel")
255
256#define check_signature(io,sig,len) (0)
257
258#endif /* __mem_pci */
259 259
260/* 260/*
261 * ioremap and friends. 261 * ioremap and friends.
@@ -264,16 +264,11 @@ extern void _memset_io(volatile void __iomem *, int, size_t);
264 * Documentation/io-mapping.txt. 264 * Documentation/io-mapping.txt.
265 * 265 *
266 */ 266 */
267#ifndef __arch_ioremap 267#define ioremap(cookie,size) __arm_ioremap((cookie), (size), MT_DEVICE)
268#define __arch_ioremap __arm_ioremap 268#define ioremap_nocache(cookie,size) __arm_ioremap((cookie), (size), MT_DEVICE)
269#define __arch_iounmap __iounmap 269#define ioremap_cached(cookie,size) __arm_ioremap((cookie), (size), MT_DEVICE_CACHED)
270#endif 270#define ioremap_wc(cookie,size) __arm_ioremap((cookie), (size), MT_DEVICE_WC)
271 271#define iounmap __arm_iounmap
272#define ioremap(cookie,size) __arch_ioremap((cookie), (size), MT_DEVICE)
273#define ioremap_nocache(cookie,size) __arch_ioremap((cookie), (size), MT_DEVICE)
274#define ioremap_cached(cookie,size) __arch_ioremap((cookie), (size), MT_DEVICE_CACHED)
275#define ioremap_wc(cookie,size) __arch_ioremap((cookie), (size), MT_DEVICE_WC)
276#define iounmap __arch_iounmap
277 272
278/* 273/*
279 * io{read,write}{8,16,32} macros 274 * io{read,write}{8,16,32} macros
diff --git a/arch/arm/include/asm/irq.h b/arch/arm/include/asm/irq.h
index 5a526afb5f18..35c21c375d81 100644
--- a/arch/arm/include/asm/irq.h
+++ b/arch/arm/include/asm/irq.h
@@ -1,14 +1,18 @@
1#ifndef __ASM_ARM_IRQ_H 1#ifndef __ASM_ARM_IRQ_H
2#define __ASM_ARM_IRQ_H 2#define __ASM_ARM_IRQ_H
3 3
4#define NR_IRQS_LEGACY 16
5
6#ifndef CONFIG_SPARSE_IRQ
4#include <mach/irqs.h> 7#include <mach/irqs.h>
8#else
9#define NR_IRQS NR_IRQS_LEGACY
10#endif
5 11
6#ifndef irq_canonicalize 12#ifndef irq_canonicalize
7#define irq_canonicalize(i) (i) 13#define irq_canonicalize(i) (i)
8#endif 14#endif
9 15
10#define NR_IRQS_LEGACY 16
11
12/* 16/*
13 * Use this value to indicate lack of interrupt 17 * Use this value to indicate lack of interrupt
14 * capability 18 * capability
diff --git a/arch/arm/include/asm/jump_label.h b/arch/arm/include/asm/jump_label.h
new file mode 100644
index 000000000000..5c5ca2ea62b0
--- /dev/null
+++ b/arch/arm/include/asm/jump_label.h
@@ -0,0 +1,41 @@
1#ifndef _ASM_ARM_JUMP_LABEL_H
2#define _ASM_ARM_JUMP_LABEL_H
3
4#ifdef __KERNEL__
5
6#include <linux/types.h>
7#include <asm/system.h>
8
9#define JUMP_LABEL_NOP_SIZE 4
10
11#ifdef CONFIG_THUMB2_KERNEL
12#define JUMP_LABEL_NOP "nop.w"
13#else
14#define JUMP_LABEL_NOP "nop"
15#endif
16
17static __always_inline bool arch_static_branch(struct jump_label_key *key)
18{
19 asm goto("1:\n\t"
20 JUMP_LABEL_NOP "\n\t"
21 ".pushsection __jump_table, \"aw\"\n\t"
22 ".word 1b, %l[l_yes], %c0\n\t"
23 ".popsection\n\t"
24 : : "i" (key) : : l_yes);
25
26 return false;
27l_yes:
28 return true;
29}
30
31#endif /* __KERNEL__ */
32
33typedef u32 jump_label_t;
34
35struct jump_entry {
36 jump_label_t code;
37 jump_label_t target;
38 jump_label_t key;
39};
40
41#endif
diff --git a/arch/arm/include/asm/mc146818rtc.h b/arch/arm/include/asm/mc146818rtc.h
index 6b884d2b0b69..e8567bb99dfc 100644
--- a/arch/arm/include/asm/mc146818rtc.h
+++ b/arch/arm/include/asm/mc146818rtc.h
@@ -5,7 +5,9 @@
5#define _ASM_MC146818RTC_H 5#define _ASM_MC146818RTC_H
6 6
7#include <linux/io.h> 7#include <linux/io.h>
8#include <mach/irqs.h> 8#include <linux/kernel.h>
9
10#define RTC_IRQ BUILD_BUG_ON(1)
9 11
10#ifndef RTC_PORT 12#ifndef RTC_PORT
11#define RTC_PORT(x) (0x70 + (x)) 13#define RTC_PORT(x) (0x70 + (x))
diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h
index a8997d71084e..fcb575747e5e 100644
--- a/arch/arm/include/asm/memory.h
+++ b/arch/arm/include/asm/memory.h
@@ -116,6 +116,8 @@
116#define MODULES_END (END_MEM) 116#define MODULES_END (END_MEM)
117#define MODULES_VADDR (PHYS_OFFSET) 117#define MODULES_VADDR (PHYS_OFFSET)
118 118
119#define XIP_VIRT_ADDR(physaddr) (physaddr)
120
119#endif /* !CONFIG_MMU */ 121#endif /* !CONFIG_MMU */
120 122
121/* 123/*
diff --git a/arch/arm/include/asm/mmu_context.h b/arch/arm/include/asm/mmu_context.h
index 71605d9f8e42..a0b3cac0547c 100644
--- a/arch/arm/include/asm/mmu_context.h
+++ b/arch/arm/include/asm/mmu_context.h
@@ -18,6 +18,7 @@
18#include <asm/cacheflush.h> 18#include <asm/cacheflush.h>
19#include <asm/cachetype.h> 19#include <asm/cachetype.h>
20#include <asm/proc-fns.h> 20#include <asm/proc-fns.h>
21#include <asm-generic/mm_hooks.h>
21 22
22void __check_kvm_seq(struct mm_struct *mm); 23void __check_kvm_seq(struct mm_struct *mm);
23 24
@@ -133,32 +134,4 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next,
133#define deactivate_mm(tsk,mm) do { } while (0) 134#define deactivate_mm(tsk,mm) do { } while (0)
134#define activate_mm(prev,next) switch_mm(prev, next, NULL) 135#define activate_mm(prev,next) switch_mm(prev, next, NULL)
135 136
136/*
137 * We are inserting a "fake" vma for the user-accessible vector page so
138 * gdb and friends can get to it through ptrace and /proc/<pid>/mem.
139 * But we also want to remove it before the generic code gets to see it
140 * during process exit or the unmapping of it would cause total havoc.
141 * (the macro is used as remove_vma() is static to mm/mmap.c)
142 */
143#define arch_exit_mmap(mm) \
144do { \
145 struct vm_area_struct *high_vma = find_vma(mm, 0xffff0000); \
146 if (high_vma) { \
147 BUG_ON(high_vma->vm_next); /* it should be last */ \
148 if (high_vma->vm_prev) \
149 high_vma->vm_prev->vm_next = NULL; \
150 else \
151 mm->mmap = NULL; \
152 rb_erase(&high_vma->vm_rb, &mm->mm_rb); \
153 mm->mmap_cache = NULL; \
154 mm->map_count--; \
155 remove_vma(high_vma); \
156 } \
157} while (0)
158
159static inline void arch_dup_mmap(struct mm_struct *oldmm,
160 struct mm_struct *mm)
161{
162}
163
164#endif 137#endif
diff --git a/arch/arm/include/asm/opcodes.h b/arch/arm/include/asm/opcodes.h
index c0efdd60966f..19c48deda70f 100644
--- a/arch/arm/include/asm/opcodes.h
+++ b/arch/arm/include/asm/opcodes.h
@@ -17,4 +17,63 @@ extern asmlinkage unsigned int arm_check_condition(u32 opcode, u32 psr);
17#define ARM_OPCODE_CONDTEST_PASS 1 17#define ARM_OPCODE_CONDTEST_PASS 1
18#define ARM_OPCODE_CONDTEST_UNCOND 2 18#define ARM_OPCODE_CONDTEST_UNCOND 2
19 19
20
21/*
22 * Opcode byteswap helpers
23 *
24 * These macros help with converting instructions between a canonical integer
25 * format and in-memory representation, in an endianness-agnostic manner.
26 *
27 * __mem_to_opcode_*() convert from in-memory representation to canonical form.
28 * __opcode_to_mem_*() convert from canonical form to in-memory representation.
29 *
30 *
31 * Canonical instruction representation:
32 *
33 * ARM: 0xKKLLMMNN
34 * Thumb 16-bit: 0x0000KKLL, where KK < 0xE8
35 * Thumb 32-bit: 0xKKLLMMNN, where KK >= 0xE8
36 *
37 * There is no way to distinguish an ARM instruction in canonical representation
38 * from a Thumb instruction (just as these cannot be distinguished in memory).
39 * Where this distinction is important, it needs to be tracked separately.
40 *
41 * Note that values in the range 0x0000E800..0xE7FFFFFF intentionally do not
42 * represent any valid Thumb-2 instruction. For this range,
43 * __opcode_is_thumb32() and __opcode_is_thumb16() will both be false.
44 */
45
46#ifndef __ASSEMBLY__
47
48#include <linux/types.h>
49#include <linux/swab.h>
50
51#ifdef CONFIG_CPU_ENDIAN_BE8
52#define __opcode_to_mem_arm(x) swab32(x)
53#define __opcode_to_mem_thumb16(x) swab16(x)
54#define __opcode_to_mem_thumb32(x) swahb32(x)
55#else
56#define __opcode_to_mem_arm(x) ((u32)(x))
57#define __opcode_to_mem_thumb16(x) ((u16)(x))
58#define __opcode_to_mem_thumb32(x) swahw32(x)
59#endif
60
61#define __mem_to_opcode_arm(x) __opcode_to_mem_arm(x)
62#define __mem_to_opcode_thumb16(x) __opcode_to_mem_thumb16(x)
63#define __mem_to_opcode_thumb32(x) __opcode_to_mem_thumb32(x)
64
65/* Operations specific to Thumb opcodes */
66
67/* Instruction size checks: */
68#define __opcode_is_thumb32(x) ((u32)(x) >= 0xE8000000UL)
69#define __opcode_is_thumb16(x) ((u32)(x) < 0xE800UL)
70
71/* Operations to construct or split 32-bit Thumb instructions: */
72#define __opcode_thumb32_first(x) ((u16)((x) >> 16))
73#define __opcode_thumb32_second(x) ((u16)(x))
74#define __opcode_thumb32_compose(first, second) \
75 (((u32)(u16)(first) << 16) | (u32)(u16)(second))
76
77#endif /* __ASSEMBLY__ */
78
20#endif /* __ASM_ARM_OPCODES_H */ 79#endif /* __ASM_ARM_OPCODES_H */
diff --git a/arch/arm/include/asm/page.h b/arch/arm/include/asm/page.h
index 97b440c25c58..5838361c48b3 100644
--- a/arch/arm/include/asm/page.h
+++ b/arch/arm/include/asm/page.h
@@ -151,6 +151,8 @@ extern void __cpu_copy_user_highpage(struct page *to, struct page *from,
151#define clear_page(page) memset((void *)(page), 0, PAGE_SIZE) 151#define clear_page(page) memset((void *)(page), 0, PAGE_SIZE)
152extern void copy_page(void *to, const void *from); 152extern void copy_page(void *to, const void *from);
153 153
154#define __HAVE_ARCH_GATE_AREA 1
155
154#ifdef CONFIG_ARM_LPAE 156#ifdef CONFIG_ARM_LPAE
155#include <asm/pgtable-3level-types.h> 157#include <asm/pgtable-3level-types.h>
156#else 158#else
diff --git a/arch/arm/include/asm/perf_event.h b/arch/arm/include/asm/perf_event.h
index 7523340afb8a..00cbe10a50e3 100644
--- a/arch/arm/include/asm/perf_event.h
+++ b/arch/arm/include/asm/perf_event.h
@@ -22,6 +22,7 @@ enum arm_perf_pmu_ids {
22 ARM_PERF_PMU_ID_CA9, 22 ARM_PERF_PMU_ID_CA9,
23 ARM_PERF_PMU_ID_CA5, 23 ARM_PERF_PMU_ID_CA5,
24 ARM_PERF_PMU_ID_CA15, 24 ARM_PERF_PMU_ID_CA15,
25 ARM_PERF_PMU_ID_CA7,
25 ARM_NUM_PMU_IDS, 26 ARM_NUM_PMU_IDS,
26}; 27};
27 28
diff --git a/arch/arm/include/asm/posix_types.h b/arch/arm/include/asm/posix_types.h
index 2446d23bfdbf..efdf99045d87 100644
--- a/arch/arm/include/asm/posix_types.h
+++ b/arch/arm/include/asm/posix_types.h
@@ -19,59 +19,22 @@
19 * assume GCC is being used. 19 * assume GCC is being used.
20 */ 20 */
21 21
22typedef unsigned long __kernel_ino_t;
23typedef unsigned short __kernel_mode_t; 22typedef unsigned short __kernel_mode_t;
23#define __kernel_mode_t __kernel_mode_t
24
24typedef unsigned short __kernel_nlink_t; 25typedef unsigned short __kernel_nlink_t;
25typedef long __kernel_off_t; 26#define __kernel_nlink_t __kernel_nlink_t
26typedef int __kernel_pid_t; 27
27typedef unsigned short __kernel_ipc_pid_t; 28typedef unsigned short __kernel_ipc_pid_t;
29#define __kernel_ipc_pid_t __kernel_ipc_pid_t
30
28typedef unsigned short __kernel_uid_t; 31typedef unsigned short __kernel_uid_t;
29typedef unsigned short __kernel_gid_t; 32typedef unsigned short __kernel_gid_t;
30typedef unsigned int __kernel_size_t; 33#define __kernel_uid_t __kernel_uid_t
31typedef int __kernel_ssize_t;
32typedef int __kernel_ptrdiff_t;
33typedef long __kernel_time_t;
34typedef long __kernel_suseconds_t;
35typedef long __kernel_clock_t;
36typedef int __kernel_timer_t;
37typedef int __kernel_clockid_t;
38typedef int __kernel_daddr_t;
39typedef char * __kernel_caddr_t;
40typedef unsigned short __kernel_uid16_t;
41typedef unsigned short __kernel_gid16_t;
42typedef unsigned int __kernel_uid32_t;
43typedef unsigned int __kernel_gid32_t;
44 34
45typedef unsigned short __kernel_old_uid_t;
46typedef unsigned short __kernel_old_gid_t;
47typedef unsigned short __kernel_old_dev_t; 35typedef unsigned short __kernel_old_dev_t;
36#define __kernel_old_dev_t __kernel_old_dev_t
48 37
49#ifdef __GNUC__ 38#include <asm-generic/posix_types.h>
50typedef long long __kernel_loff_t;
51#endif
52
53typedef struct {
54 int val[2];
55} __kernel_fsid_t;
56
57#if defined(__KERNEL__)
58
59#undef __FD_SET
60#define __FD_SET(fd, fdsetp) \
61 (((fd_set *)(fdsetp))->fds_bits[(fd) >> 5] |= (1<<((fd) & 31)))
62
63#undef __FD_CLR
64#define __FD_CLR(fd, fdsetp) \
65 (((fd_set *)(fdsetp))->fds_bits[(fd) >> 5] &= ~(1<<((fd) & 31)))
66
67#undef __FD_ISSET
68#define __FD_ISSET(fd, fdsetp) \
69 ((((fd_set *)(fdsetp))->fds_bits[(fd) >> 5] & (1<<((fd) & 31))) != 0)
70
71#undef __FD_ZERO
72#define __FD_ZERO(fdsetp) \
73 (memset (fdsetp, 0, sizeof (*(fd_set *)(fdsetp))))
74
75#endif
76 39
77#endif 40#endif
diff --git a/arch/arm/include/asm/processor.h b/arch/arm/include/asm/processor.h
index f4d7f56ee51f..5ac8d3d3e025 100644
--- a/arch/arm/include/asm/processor.h
+++ b/arch/arm/include/asm/processor.h
@@ -55,7 +55,6 @@ struct thread_struct {
55#define start_thread(regs,pc,sp) \ 55#define start_thread(regs,pc,sp) \
56({ \ 56({ \
57 unsigned long *stack = (unsigned long *)sp; \ 57 unsigned long *stack = (unsigned long *)sp; \
58 set_fs(USER_DS); \
59 memset(regs->uregs, 0, sizeof(regs->uregs)); \ 58 memset(regs->uregs, 0, sizeof(regs->uregs)); \
60 if (current->personality & ADDR_LIMIT_32BIT) \ 59 if (current->personality & ADDR_LIMIT_32BIT) \
61 regs->ARM_cpsr = USR_MODE; \ 60 regs->ARM_cpsr = USR_MODE; \
diff --git a/arch/arm/include/asm/prom.h b/arch/arm/include/asm/prom.h
index ee0363307918..aeae9c609df4 100644
--- a/arch/arm/include/asm/prom.h
+++ b/arch/arm/include/asm/prom.h
@@ -13,8 +13,6 @@
13 13
14#ifdef CONFIG_OF 14#ifdef CONFIG_OF
15 15
16#include <asm/irq.h>
17
18extern struct machine_desc *setup_machine_fdt(unsigned int dt_phys); 16extern struct machine_desc *setup_machine_fdt(unsigned int dt_phys);
19extern void arm_dt_memblock_reserve(void); 17extern void arm_dt_memblock_reserve(void);
20 18
diff --git a/arch/arm/include/asm/tlbflush.h b/arch/arm/include/asm/tlbflush.h
index 02b2f8203982..85fe61e73202 100644
--- a/arch/arm/include/asm/tlbflush.h
+++ b/arch/arm/include/asm/tlbflush.h
@@ -318,6 +318,21 @@ extern struct cpu_tlb_fns cpu_tlb;
318 318
319#define tlb_flag(f) ((always_tlb_flags & (f)) || (__tlb_flag & possible_tlb_flags & (f))) 319#define tlb_flag(f) ((always_tlb_flags & (f)) || (__tlb_flag & possible_tlb_flags & (f)))
320 320
321#define __tlb_op(f, insnarg, arg) \
322 do { \
323 if (always_tlb_flags & (f)) \
324 asm("mcr " insnarg \
325 : : "r" (arg) : "cc"); \
326 else if (possible_tlb_flags & (f)) \
327 asm("tst %1, %2\n\t" \
328 "mcrne " insnarg \
329 : : "r" (arg), "r" (__tlb_flag), "Ir" (f) \
330 : "cc"); \
331 } while (0)
332
333#define tlb_op(f, regs, arg) __tlb_op(f, "p15, 0, %0, " regs, arg)
334#define tlb_l2_op(f, regs, arg) __tlb_op(f, "p15, 1, %0, " regs, arg)
335
321static inline void local_flush_tlb_all(void) 336static inline void local_flush_tlb_all(void)
322{ 337{
323 const int zero = 0; 338 const int zero = 0;
@@ -326,16 +341,11 @@ static inline void local_flush_tlb_all(void)
326 if (tlb_flag(TLB_WB)) 341 if (tlb_flag(TLB_WB))
327 dsb(); 342 dsb();
328 343
329 if (tlb_flag(TLB_V3_FULL)) 344 tlb_op(TLB_V3_FULL, "c6, c0, 0", zero);
330 asm("mcr p15, 0, %0, c6, c0, 0" : : "r" (zero) : "cc"); 345 tlb_op(TLB_V4_U_FULL | TLB_V6_U_FULL, "c8, c7, 0", zero);
331 if (tlb_flag(TLB_V4_U_FULL | TLB_V6_U_FULL)) 346 tlb_op(TLB_V4_D_FULL | TLB_V6_D_FULL, "c8, c6, 0", zero);
332 asm("mcr p15, 0, %0, c8, c7, 0" : : "r" (zero) : "cc"); 347 tlb_op(TLB_V4_I_FULL | TLB_V6_I_FULL, "c8, c5, 0", zero);
333 if (tlb_flag(TLB_V4_D_FULL | TLB_V6_D_FULL)) 348 tlb_op(TLB_V7_UIS_FULL, "c8, c3, 0", zero);
334 asm("mcr p15, 0, %0, c8, c6, 0" : : "r" (zero) : "cc");
335 if (tlb_flag(TLB_V4_I_FULL | TLB_V6_I_FULL))
336 asm("mcr p15, 0, %0, c8, c5, 0" : : "r" (zero) : "cc");
337 if (tlb_flag(TLB_V7_UIS_FULL))
338 asm("mcr p15, 0, %0, c8, c3, 0" : : "r" (zero) : "cc");
339 349
340 if (tlb_flag(TLB_BARRIER)) { 350 if (tlb_flag(TLB_BARRIER)) {
341 dsb(); 351 dsb();
@@ -352,29 +362,23 @@ static inline void local_flush_tlb_mm(struct mm_struct *mm)
352 if (tlb_flag(TLB_WB)) 362 if (tlb_flag(TLB_WB))
353 dsb(); 363 dsb();
354 364
355 if (cpumask_test_cpu(get_cpu(), mm_cpumask(mm))) { 365 if (possible_tlb_flags & (TLB_V3_FULL|TLB_V4_U_FULL|TLB_V4_D_FULL|TLB_V4_I_FULL)) {
356 if (tlb_flag(TLB_V3_FULL)) 366 if (cpumask_test_cpu(get_cpu(), mm_cpumask(mm))) {
357 asm("mcr p15, 0, %0, c6, c0, 0" : : "r" (zero) : "cc"); 367 tlb_op(TLB_V3_FULL, "c6, c0, 0", zero);
358 if (tlb_flag(TLB_V4_U_FULL)) 368 tlb_op(TLB_V4_U_FULL, "c8, c7, 0", zero);
359 asm("mcr p15, 0, %0, c8, c7, 0" : : "r" (zero) : "cc"); 369 tlb_op(TLB_V4_D_FULL, "c8, c6, 0", zero);
360 if (tlb_flag(TLB_V4_D_FULL)) 370 tlb_op(TLB_V4_I_FULL, "c8, c5, 0", zero);
361 asm("mcr p15, 0, %0, c8, c6, 0" : : "r" (zero) : "cc"); 371 }
362 if (tlb_flag(TLB_V4_I_FULL)) 372 put_cpu();
363 asm("mcr p15, 0, %0, c8, c5, 0" : : "r" (zero) : "cc");
364 } 373 }
365 put_cpu(); 374
366 375 tlb_op(TLB_V6_U_ASID, "c8, c7, 2", asid);
367 if (tlb_flag(TLB_V6_U_ASID)) 376 tlb_op(TLB_V6_D_ASID, "c8, c6, 2", asid);
368 asm("mcr p15, 0, %0, c8, c7, 2" : : "r" (asid) : "cc"); 377 tlb_op(TLB_V6_I_ASID, "c8, c5, 2", asid);
369 if (tlb_flag(TLB_V6_D_ASID))
370 asm("mcr p15, 0, %0, c8, c6, 2" : : "r" (asid) : "cc");
371 if (tlb_flag(TLB_V6_I_ASID))
372 asm("mcr p15, 0, %0, c8, c5, 2" : : "r" (asid) : "cc");
373 if (tlb_flag(TLB_V7_UIS_ASID))
374#ifdef CONFIG_ARM_ERRATA_720789 378#ifdef CONFIG_ARM_ERRATA_720789
375 asm("mcr p15, 0, %0, c8, c3, 0" : : "r" (zero) : "cc"); 379 tlb_op(TLB_V7_UIS_ASID, "c8, c3, 0", zero);
376#else 380#else
377 asm("mcr p15, 0, %0, c8, c3, 2" : : "r" (asid) : "cc"); 381 tlb_op(TLB_V7_UIS_ASID, "c8, c3, 2", asid);
378#endif 382#endif
379 383
380 if (tlb_flag(TLB_BARRIER)) 384 if (tlb_flag(TLB_BARRIER))
@@ -392,30 +396,23 @@ local_flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
392 if (tlb_flag(TLB_WB)) 396 if (tlb_flag(TLB_WB))
393 dsb(); 397 dsb();
394 398
395 if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) { 399 if (possible_tlb_flags & (TLB_V3_PAGE|TLB_V4_U_PAGE|TLB_V4_D_PAGE|TLB_V4_I_PAGE|TLB_V4_I_FULL) &&
396 if (tlb_flag(TLB_V3_PAGE)) 400 cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) {
397 asm("mcr p15, 0, %0, c6, c0, 0" : : "r" (uaddr) : "cc"); 401 tlb_op(TLB_V3_PAGE, "c6, c0, 0", uaddr);
398 if (tlb_flag(TLB_V4_U_PAGE)) 402 tlb_op(TLB_V4_U_PAGE, "c8, c7, 1", uaddr);
399 asm("mcr p15, 0, %0, c8, c7, 1" : : "r" (uaddr) : "cc"); 403 tlb_op(TLB_V4_D_PAGE, "c8, c6, 1", uaddr);
400 if (tlb_flag(TLB_V4_D_PAGE)) 404 tlb_op(TLB_V4_I_PAGE, "c8, c5, 1", uaddr);
401 asm("mcr p15, 0, %0, c8, c6, 1" : : "r" (uaddr) : "cc");
402 if (tlb_flag(TLB_V4_I_PAGE))
403 asm("mcr p15, 0, %0, c8, c5, 1" : : "r" (uaddr) : "cc");
404 if (!tlb_flag(TLB_V4_I_PAGE) && tlb_flag(TLB_V4_I_FULL)) 405 if (!tlb_flag(TLB_V4_I_PAGE) && tlb_flag(TLB_V4_I_FULL))
405 asm("mcr p15, 0, %0, c8, c5, 0" : : "r" (zero) : "cc"); 406 asm("mcr p15, 0, %0, c8, c5, 0" : : "r" (zero) : "cc");
406 } 407 }
407 408
408 if (tlb_flag(TLB_V6_U_PAGE)) 409 tlb_op(TLB_V6_U_PAGE, "c8, c7, 1", uaddr);
409 asm("mcr p15, 0, %0, c8, c7, 1" : : "r" (uaddr) : "cc"); 410 tlb_op(TLB_V6_D_PAGE, "c8, c6, 1", uaddr);
410 if (tlb_flag(TLB_V6_D_PAGE)) 411 tlb_op(TLB_V6_I_PAGE, "c8, c5, 1", uaddr);
411 asm("mcr p15, 0, %0, c8, c6, 1" : : "r" (uaddr) : "cc");
412 if (tlb_flag(TLB_V6_I_PAGE))
413 asm("mcr p15, 0, %0, c8, c5, 1" : : "r" (uaddr) : "cc");
414 if (tlb_flag(TLB_V7_UIS_PAGE))
415#ifdef CONFIG_ARM_ERRATA_720789 412#ifdef CONFIG_ARM_ERRATA_720789
416 asm("mcr p15, 0, %0, c8, c3, 3" : : "r" (uaddr & PAGE_MASK) : "cc"); 413 tlb_op(TLB_V7_UIS_PAGE, "c8, c3, 3", uaddr & PAGE_MASK);
417#else 414#else
418 asm("mcr p15, 0, %0, c8, c3, 1" : : "r" (uaddr) : "cc"); 415 tlb_op(TLB_V7_UIS_PAGE, "c8, c3, 1", uaddr);
419#endif 416#endif
420 417
421 if (tlb_flag(TLB_BARRIER)) 418 if (tlb_flag(TLB_BARRIER))
@@ -432,25 +429,17 @@ static inline void local_flush_tlb_kernel_page(unsigned long kaddr)
432 if (tlb_flag(TLB_WB)) 429 if (tlb_flag(TLB_WB))
433 dsb(); 430 dsb();
434 431
435 if (tlb_flag(TLB_V3_PAGE)) 432 tlb_op(TLB_V3_PAGE, "c6, c0, 0", kaddr);
436 asm("mcr p15, 0, %0, c6, c0, 0" : : "r" (kaddr) : "cc"); 433 tlb_op(TLB_V4_U_PAGE, "c8, c7, 1", kaddr);
437 if (tlb_flag(TLB_V4_U_PAGE)) 434 tlb_op(TLB_V4_D_PAGE, "c8, c6, 1", kaddr);
438 asm("mcr p15, 0, %0, c8, c7, 1" : : "r" (kaddr) : "cc"); 435 tlb_op(TLB_V4_I_PAGE, "c8, c5, 1", kaddr);
439 if (tlb_flag(TLB_V4_D_PAGE))
440 asm("mcr p15, 0, %0, c8, c6, 1" : : "r" (kaddr) : "cc");
441 if (tlb_flag(TLB_V4_I_PAGE))
442 asm("mcr p15, 0, %0, c8, c5, 1" : : "r" (kaddr) : "cc");
443 if (!tlb_flag(TLB_V4_I_PAGE) && tlb_flag(TLB_V4_I_FULL)) 436 if (!tlb_flag(TLB_V4_I_PAGE) && tlb_flag(TLB_V4_I_FULL))
444 asm("mcr p15, 0, %0, c8, c5, 0" : : "r" (zero) : "cc"); 437 asm("mcr p15, 0, %0, c8, c5, 0" : : "r" (zero) : "cc");
445 438
446 if (tlb_flag(TLB_V6_U_PAGE)) 439 tlb_op(TLB_V6_U_PAGE, "c8, c7, 1", kaddr);
447 asm("mcr p15, 0, %0, c8, c7, 1" : : "r" (kaddr) : "cc"); 440 tlb_op(TLB_V6_D_PAGE, "c8, c6, 1", kaddr);
448 if (tlb_flag(TLB_V6_D_PAGE)) 441 tlb_op(TLB_V6_I_PAGE, "c8, c5, 1", kaddr);
449 asm("mcr p15, 0, %0, c8, c6, 1" : : "r" (kaddr) : "cc"); 442 tlb_op(TLB_V7_UIS_PAGE, "c8, c3, 1", kaddr);
450 if (tlb_flag(TLB_V6_I_PAGE))
451 asm("mcr p15, 0, %0, c8, c5, 1" : : "r" (kaddr) : "cc");
452 if (tlb_flag(TLB_V7_UIS_PAGE))
453 asm("mcr p15, 0, %0, c8, c3, 1" : : "r" (kaddr) : "cc");
454 443
455 if (tlb_flag(TLB_BARRIER)) { 444 if (tlb_flag(TLB_BARRIER)) {
456 dsb(); 445 dsb();
@@ -475,13 +464,8 @@ static inline void flush_pmd_entry(void *pmd)
475{ 464{
476 const unsigned int __tlb_flag = __cpu_tlb_flags; 465 const unsigned int __tlb_flag = __cpu_tlb_flags;
477 466
478 if (tlb_flag(TLB_DCLEAN)) 467 tlb_op(TLB_DCLEAN, "c7, c10, 1 @ flush_pmd", pmd);
479 asm("mcr p15, 0, %0, c7, c10, 1 @ flush_pmd" 468 tlb_l2_op(TLB_L2CLEAN_FR, "c15, c9, 1 @ L2 flush_pmd", pmd);
480 : : "r" (pmd) : "cc");
481
482 if (tlb_flag(TLB_L2CLEAN_FR))
483 asm("mcr p15, 1, %0, c15, c9, 1 @ L2 flush_pmd"
484 : : "r" (pmd) : "cc");
485 469
486 if (tlb_flag(TLB_WB)) 470 if (tlb_flag(TLB_WB))
487 dsb(); 471 dsb();
@@ -491,15 +475,11 @@ static inline void clean_pmd_entry(void *pmd)
491{ 475{
492 const unsigned int __tlb_flag = __cpu_tlb_flags; 476 const unsigned int __tlb_flag = __cpu_tlb_flags;
493 477
494 if (tlb_flag(TLB_DCLEAN)) 478 tlb_op(TLB_DCLEAN, "c7, c10, 1 @ flush_pmd", pmd);
495 asm("mcr p15, 0, %0, c7, c10, 1 @ flush_pmd" 479 tlb_l2_op(TLB_L2CLEAN_FR, "c15, c9, 1 @ L2 flush_pmd", pmd);
496 : : "r" (pmd) : "cc");
497
498 if (tlb_flag(TLB_L2CLEAN_FR))
499 asm("mcr p15, 1, %0, c15, c9, 1 @ L2 flush_pmd"
500 : : "r" (pmd) : "cc");
501} 480}
502 481
482#undef tlb_op
503#undef tlb_flag 483#undef tlb_flag
504#undef always_tlb_flags 484#undef always_tlb_flags
505#undef possible_tlb_flags 485#undef possible_tlb_flags
diff --git a/arch/arm/include/asm/traps.h b/arch/arm/include/asm/traps.h
index 5b29a6673625..f555bb3664dc 100644
--- a/arch/arm/include/asm/traps.h
+++ b/arch/arm/include/asm/traps.h
@@ -46,7 +46,7 @@ static inline int in_exception_text(unsigned long ptr)
46 return in ? : __in_irqentry_text(ptr); 46 return in ? : __in_irqentry_text(ptr);
47} 47}
48 48
49extern void __init early_trap_init(void); 49extern void __init early_trap_init(void *);
50extern void dump_backtrace_entry(unsigned long where, unsigned long from, unsigned long frame); 50extern void dump_backtrace_entry(unsigned long where, unsigned long from, unsigned long frame);
51extern void ptrace_break(struct task_struct *tsk, struct pt_regs *regs); 51extern void ptrace_break(struct task_struct *tsk, struct pt_regs *regs);
52 52
diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile
index 3a274878412e..7b787d642af4 100644
--- a/arch/arm/kernel/Makefile
+++ b/arch/arm/kernel/Makefile
@@ -7,6 +7,8 @@ AFLAGS_head.o := -DTEXT_OFFSET=$(TEXT_OFFSET)
7 7
8ifdef CONFIG_FUNCTION_TRACER 8ifdef CONFIG_FUNCTION_TRACER
9CFLAGS_REMOVE_ftrace.o = -pg 9CFLAGS_REMOVE_ftrace.o = -pg
10CFLAGS_REMOVE_insn.o = -pg
11CFLAGS_REMOVE_patch.o = -pg
10endif 12endif
11 13
12CFLAGS_REMOVE_return_address.o = -pg 14CFLAGS_REMOVE_return_address.o = -pg
@@ -14,14 +16,14 @@ CFLAGS_REMOVE_return_address.o = -pg
14# Object file lists. 16# Object file lists.
15 17
16obj-y := elf.o entry-armv.o entry-common.o irq.o opcodes.o \ 18obj-y := elf.o entry-armv.o entry-common.o irq.o opcodes.o \
17 process.o ptrace.o return_address.o setup.o signal.o \ 19 process.o ptrace.o return_address.o sched_clock.o \
18 sys_arm.o stacktrace.o time.o traps.o 20 setup.o signal.o stacktrace.o sys_arm.o time.o traps.o
19 21
20obj-$(CONFIG_DEPRECATED_PARAM_STRUCT) += compat.o 22obj-$(CONFIG_DEPRECATED_PARAM_STRUCT) += compat.o
21 23
22obj-$(CONFIG_LEDS) += leds.o 24obj-$(CONFIG_LEDS) += leds.o
23obj-$(CONFIG_OC_ETM) += etm.o 25obj-$(CONFIG_OC_ETM) += etm.o
24 26obj-$(CONFIG_CPU_IDLE) += cpuidle.o
25obj-$(CONFIG_ISA_DMA_API) += dma.o 27obj-$(CONFIG_ISA_DMA_API) += dma.o
26obj-$(CONFIG_FIQ) += fiq.o fiqasm.o 28obj-$(CONFIG_FIQ) += fiq.o fiqasm.o
27obj-$(CONFIG_MODULES) += armksyms.o module.o 29obj-$(CONFIG_MODULES) += armksyms.o module.o
@@ -29,14 +31,14 @@ obj-$(CONFIG_ARTHUR) += arthur.o
29obj-$(CONFIG_ISA_DMA) += dma-isa.o 31obj-$(CONFIG_ISA_DMA) += dma-isa.o
30obj-$(CONFIG_PCI) += bios32.o isa.o 32obj-$(CONFIG_PCI) += bios32.o isa.o
31obj-$(CONFIG_ARM_CPU_SUSPEND) += sleep.o suspend.o 33obj-$(CONFIG_ARM_CPU_SUSPEND) += sleep.o suspend.o
32obj-$(CONFIG_HAVE_SCHED_CLOCK) += sched_clock.o
33obj-$(CONFIG_SMP) += smp.o smp_tlb.o 34obj-$(CONFIG_SMP) += smp.o smp_tlb.o
34obj-$(CONFIG_HAVE_ARM_SCU) += smp_scu.o 35obj-$(CONFIG_HAVE_ARM_SCU) += smp_scu.o
35obj-$(CONFIG_HAVE_ARM_TWD) += smp_twd.o 36obj-$(CONFIG_HAVE_ARM_TWD) += smp_twd.o
36obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o 37obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o insn.o
37obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o 38obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o insn.o
39obj-$(CONFIG_JUMP_LABEL) += jump_label.o insn.o patch.o
38obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o 40obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o
39obj-$(CONFIG_KPROBES) += kprobes.o kprobes-common.o 41obj-$(CONFIG_KPROBES) += kprobes.o kprobes-common.o patch.o
40ifdef CONFIG_THUMB2_KERNEL 42ifdef CONFIG_THUMB2_KERNEL
41obj-$(CONFIG_KPROBES) += kprobes-thumb.o 43obj-$(CONFIG_KPROBES) += kprobes-thumb.o
42else 44else
diff --git a/arch/arm/kernel/cpuidle.c b/arch/arm/kernel/cpuidle.c
new file mode 100644
index 000000000000..89545f6c8403
--- /dev/null
+++ b/arch/arm/kernel/cpuidle.c
@@ -0,0 +1,21 @@
1/*
2 * Copyright 2012 Linaro Ltd.
3 *
4 * The code contained herein is licensed under the GNU General Public
5 * License. You may obtain a copy of the GNU General Public License
6 * Version 2 or later at the following locations:
7 *
8 * http://www.opensource.org/licenses/gpl-license.html
9 * http://www.gnu.org/copyleft/gpl.html
10 */
11
12#include <linux/cpuidle.h>
13#include <asm/proc-fns.h>
14
15int arm_cpuidle_simple_enter(struct cpuidle_device *dev,
16 struct cpuidle_driver *drv, int index)
17{
18 cpu_do_idle();
19
20 return index;
21}
diff --git a/arch/arm/kernel/debug.S b/arch/arm/kernel/debug.S
index 204e2160cfcc..c45522c36787 100644
--- a/arch/arm/kernel/debug.S
+++ b/arch/arm/kernel/debug.S
@@ -10,6 +10,7 @@
10 * 32-bit debugging code 10 * 32-bit debugging code
11 */ 11 */
12#include <linux/linkage.h> 12#include <linux/linkage.h>
13#include <asm/assembler.h>
13 14
14 .text 15 .text
15 16
@@ -100,7 +101,7 @@
100 101
101#endif /* CONFIG_CPU_V6 */ 102#endif /* CONFIG_CPU_V6 */
102 103
103#else 104#elif !defined(CONFIG_DEBUG_SEMIHOSTING)
104#include <mach/debug-macro.S> 105#include <mach/debug-macro.S>
105#endif /* CONFIG_DEBUG_ICEDCC */ 106#endif /* CONFIG_DEBUG_ICEDCC */
106 107
@@ -155,6 +156,8 @@ hexbuf: .space 16
155 156
156 .ltorg 157 .ltorg
157 158
159#ifndef CONFIG_DEBUG_SEMIHOSTING
160
158ENTRY(printascii) 161ENTRY(printascii)
159 addruart_current r3, r1, r2 162 addruart_current r3, r1, r2
160 b 2f 163 b 2f
@@ -177,3 +180,24 @@ ENTRY(printch)
177 mov r0, #0 180 mov r0, #0
178 b 1b 181 b 1b
179ENDPROC(printch) 182ENDPROC(printch)
183
184#else
185
186ENTRY(printascii)
187 mov r1, r0
188 mov r0, #0x04 @ SYS_WRITE0
189 ARM( svc #0x123456 )
190 THUMB( svc #0xab )
191 mov pc, lr
192ENDPROC(printascii)
193
194ENTRY(printch)
195 adr r1, hexbuf
196 strb r0, [r1]
197 mov r0, #0x03 @ SYS_WRITEC
198 ARM( svc #0x123456 )
199 THUMB( svc #0xab )
200 mov pc, lr
201ENDPROC(printch)
202
203#endif
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
index 8ec5eed55e37..7fd3ad048da9 100644
--- a/arch/arm/kernel/entry-armv.S
+++ b/arch/arm/kernel/entry-armv.S
@@ -15,6 +15,7 @@
15 * that causes it to save wrong values... Be aware! 15 * that causes it to save wrong values... Be aware!
16 */ 16 */
17 17
18#include <asm/assembler.h>
18#include <asm/memory.h> 19#include <asm/memory.h>
19#include <asm/glue-df.h> 20#include <asm/glue-df.h>
20#include <asm/glue-pf.h> 21#include <asm/glue-pf.h>
diff --git a/arch/arm/kernel/ftrace.c b/arch/arm/kernel/ftrace.c
index c0062ad1e847..df0bf0c8cb79 100644
--- a/arch/arm/kernel/ftrace.c
+++ b/arch/arm/kernel/ftrace.c
@@ -16,10 +16,13 @@
16#include <linux/uaccess.h> 16#include <linux/uaccess.h>
17 17
18#include <asm/cacheflush.h> 18#include <asm/cacheflush.h>
19#include <asm/opcodes.h>
19#include <asm/ftrace.h> 20#include <asm/ftrace.h>
20 21
22#include "insn.h"
23
21#ifdef CONFIG_THUMB2_KERNEL 24#ifdef CONFIG_THUMB2_KERNEL
22#define NOP 0xeb04f85d /* pop.w {lr} */ 25#define NOP 0xf85deb04 /* pop.w {lr} */
23#else 26#else
24#define NOP 0xe8bd4000 /* pop {lr} */ 27#define NOP 0xe8bd4000 /* pop {lr} */
25#endif 28#endif
@@ -60,76 +63,31 @@ static unsigned long adjust_address(struct dyn_ftrace *rec, unsigned long addr)
60} 63}
61#endif 64#endif
62 65
63#ifdef CONFIG_THUMB2_KERNEL
64static unsigned long ftrace_gen_branch(unsigned long pc, unsigned long addr,
65 bool link)
66{
67 unsigned long s, j1, j2, i1, i2, imm10, imm11;
68 unsigned long first, second;
69 long offset;
70
71 offset = (long)addr - (long)(pc + 4);
72 if (offset < -16777216 || offset > 16777214) {
73 WARN_ON_ONCE(1);
74 return 0;
75 }
76
77 s = (offset >> 24) & 0x1;
78 i1 = (offset >> 23) & 0x1;
79 i2 = (offset >> 22) & 0x1;
80 imm10 = (offset >> 12) & 0x3ff;
81 imm11 = (offset >> 1) & 0x7ff;
82
83 j1 = (!i1) ^ s;
84 j2 = (!i2) ^ s;
85
86 first = 0xf000 | (s << 10) | imm10;
87 second = 0x9000 | (j1 << 13) | (j2 << 11) | imm11;
88 if (link)
89 second |= 1 << 14;
90
91 return (second << 16) | first;
92}
93#else
94static unsigned long ftrace_gen_branch(unsigned long pc, unsigned long addr,
95 bool link)
96{
97 unsigned long opcode = 0xea000000;
98 long offset;
99
100 if (link)
101 opcode |= 1 << 24;
102
103 offset = (long)addr - (long)(pc + 8);
104 if (unlikely(offset < -33554432 || offset > 33554428)) {
105 /* Can't generate branches that far (from ARM ARM). Ftrace
106 * doesn't generate branches outside of kernel text.
107 */
108 WARN_ON_ONCE(1);
109 return 0;
110 }
111
112 offset = (offset >> 2) & 0x00ffffff;
113
114 return opcode | offset;
115}
116#endif
117
118static unsigned long ftrace_call_replace(unsigned long pc, unsigned long addr) 66static unsigned long ftrace_call_replace(unsigned long pc, unsigned long addr)
119{ 67{
120 return ftrace_gen_branch(pc, addr, true); 68 return arm_gen_branch_link(pc, addr);
121} 69}
122 70
123static int ftrace_modify_code(unsigned long pc, unsigned long old, 71static int ftrace_modify_code(unsigned long pc, unsigned long old,
124 unsigned long new) 72 unsigned long new, bool validate)
125{ 73{
126 unsigned long replaced; 74 unsigned long replaced;
127 75
128 if (probe_kernel_read(&replaced, (void *)pc, MCOUNT_INSN_SIZE)) 76 if (IS_ENABLED(CONFIG_THUMB2_KERNEL)) {
129 return -EFAULT; 77 old = __opcode_to_mem_thumb32(old);
78 new = __opcode_to_mem_thumb32(new);
79 } else {
80 old = __opcode_to_mem_arm(old);
81 new = __opcode_to_mem_arm(new);
82 }
130 83
131 if (replaced != old) 84 if (validate) {
132 return -EINVAL; 85 if (probe_kernel_read(&replaced, (void *)pc, MCOUNT_INSN_SIZE))
86 return -EFAULT;
87
88 if (replaced != old)
89 return -EINVAL;
90 }
133 91
134 if (probe_kernel_write((void *)pc, &new, MCOUNT_INSN_SIZE)) 92 if (probe_kernel_write((void *)pc, &new, MCOUNT_INSN_SIZE))
135 return -EPERM; 93 return -EPERM;
@@ -141,23 +99,21 @@ static int ftrace_modify_code(unsigned long pc, unsigned long old,
141 99
142int ftrace_update_ftrace_func(ftrace_func_t func) 100int ftrace_update_ftrace_func(ftrace_func_t func)
143{ 101{
144 unsigned long pc, old; 102 unsigned long pc;
145 unsigned long new; 103 unsigned long new;
146 int ret; 104 int ret;
147 105
148 pc = (unsigned long)&ftrace_call; 106 pc = (unsigned long)&ftrace_call;
149 memcpy(&old, &ftrace_call, MCOUNT_INSN_SIZE);
150 new = ftrace_call_replace(pc, (unsigned long)func); 107 new = ftrace_call_replace(pc, (unsigned long)func);
151 108
152 ret = ftrace_modify_code(pc, old, new); 109 ret = ftrace_modify_code(pc, 0, new, false);
153 110
154#ifdef CONFIG_OLD_MCOUNT 111#ifdef CONFIG_OLD_MCOUNT
155 if (!ret) { 112 if (!ret) {
156 pc = (unsigned long)&ftrace_call_old; 113 pc = (unsigned long)&ftrace_call_old;
157 memcpy(&old, &ftrace_call_old, MCOUNT_INSN_SIZE);
158 new = ftrace_call_replace(pc, (unsigned long)func); 114 new = ftrace_call_replace(pc, (unsigned long)func);
159 115
160 ret = ftrace_modify_code(pc, old, new); 116 ret = ftrace_modify_code(pc, 0, new, false);
161 } 117 }
162#endif 118#endif
163 119
@@ -172,7 +128,7 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
172 old = ftrace_nop_replace(rec); 128 old = ftrace_nop_replace(rec);
173 new = ftrace_call_replace(ip, adjust_address(rec, addr)); 129 new = ftrace_call_replace(ip, adjust_address(rec, addr));
174 130
175 return ftrace_modify_code(rec->ip, old, new); 131 return ftrace_modify_code(rec->ip, old, new, true);
176} 132}
177 133
178int ftrace_make_nop(struct module *mod, 134int ftrace_make_nop(struct module *mod,
@@ -185,7 +141,7 @@ int ftrace_make_nop(struct module *mod,
185 141
186 old = ftrace_call_replace(ip, adjust_address(rec, addr)); 142 old = ftrace_call_replace(ip, adjust_address(rec, addr));
187 new = ftrace_nop_replace(rec); 143 new = ftrace_nop_replace(rec);
188 ret = ftrace_modify_code(ip, old, new); 144 ret = ftrace_modify_code(ip, old, new, true);
189 145
190#ifdef CONFIG_OLD_MCOUNT 146#ifdef CONFIG_OLD_MCOUNT
191 if (ret == -EINVAL && addr == MCOUNT_ADDR) { 147 if (ret == -EINVAL && addr == MCOUNT_ADDR) {
@@ -193,7 +149,7 @@ int ftrace_make_nop(struct module *mod,
193 149
194 old = ftrace_call_replace(ip, adjust_address(rec, addr)); 150 old = ftrace_call_replace(ip, adjust_address(rec, addr));
195 new = ftrace_nop_replace(rec); 151 new = ftrace_nop_replace(rec);
196 ret = ftrace_modify_code(ip, old, new); 152 ret = ftrace_modify_code(ip, old, new, true);
197 } 153 }
198#endif 154#endif
199 155
@@ -249,12 +205,12 @@ static int __ftrace_modify_caller(unsigned long *callsite,
249{ 205{
250 unsigned long caller_fn = (unsigned long) func; 206 unsigned long caller_fn = (unsigned long) func;
251 unsigned long pc = (unsigned long) callsite; 207 unsigned long pc = (unsigned long) callsite;
252 unsigned long branch = ftrace_gen_branch(pc, caller_fn, false); 208 unsigned long branch = arm_gen_branch(pc, caller_fn);
253 unsigned long nop = 0xe1a00000; /* mov r0, r0 */ 209 unsigned long nop = 0xe1a00000; /* mov r0, r0 */
254 unsigned long old = enable ? nop : branch; 210 unsigned long old = enable ? nop : branch;
255 unsigned long new = enable ? branch : nop; 211 unsigned long new = enable ? branch : nop;
256 212
257 return ftrace_modify_code(pc, old, new); 213 return ftrace_modify_code(pc, old, new, true);
258} 214}
259 215
260static int ftrace_modify_graph_caller(bool enable) 216static int ftrace_modify_graph_caller(bool enable)
diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
index a2e9694a68ee..3bf0c7f8b043 100644
--- a/arch/arm/kernel/head.S
+++ b/arch/arm/kernel/head.S
@@ -265,7 +265,7 @@ __create_page_tables:
265 str r6, [r3] 265 str r6, [r3]
266 266
267#ifdef CONFIG_DEBUG_LL 267#ifdef CONFIG_DEBUG_LL
268#ifndef CONFIG_DEBUG_ICEDCC 268#if !defined(CONFIG_DEBUG_ICEDCC) && !defined(CONFIG_DEBUG_SEMIHOSTING)
269 /* 269 /*
270 * Map in IO space for serial debugging. 270 * Map in IO space for serial debugging.
271 * This allows debug messages to be output 271 * This allows debug messages to be output
@@ -297,10 +297,10 @@ __create_page_tables:
297 cmp r0, r6 297 cmp r0, r6
298 blo 1b 298 blo 1b
299 299
300#else /* CONFIG_DEBUG_ICEDCC */ 300#else /* CONFIG_DEBUG_ICEDCC || CONFIG_DEBUG_SEMIHOSTING */
301 /* we don't need any serial debugging mappings for ICEDCC */ 301 /* we don't need any serial debugging mappings */
302 ldr r7, [r10, #PROCINFO_IO_MMUFLAGS] @ io_mmuflags 302 ldr r7, [r10, #PROCINFO_IO_MMUFLAGS] @ io_mmuflags
303#endif /* !CONFIG_DEBUG_ICEDCC */ 303#endif
304 304
305#if defined(CONFIG_ARCH_NETWINDER) || defined(CONFIG_ARCH_CATS) 305#if defined(CONFIG_ARCH_NETWINDER) || defined(CONFIG_ARCH_CATS)
306 /* 306 /*
diff --git a/arch/arm/kernel/insn.c b/arch/arm/kernel/insn.c
new file mode 100644
index 000000000000..ab312e516546
--- /dev/null
+++ b/arch/arm/kernel/insn.c
@@ -0,0 +1,61 @@
1#include <linux/kernel.h>
2#include <asm/opcodes.h>
3
4static unsigned long
5__arm_gen_branch_thumb2(unsigned long pc, unsigned long addr, bool link)
6{
7 unsigned long s, j1, j2, i1, i2, imm10, imm11;
8 unsigned long first, second;
9 long offset;
10
11 offset = (long)addr - (long)(pc + 4);
12 if (offset < -16777216 || offset > 16777214) {
13 WARN_ON_ONCE(1);
14 return 0;
15 }
16
17 s = (offset >> 24) & 0x1;
18 i1 = (offset >> 23) & 0x1;
19 i2 = (offset >> 22) & 0x1;
20 imm10 = (offset >> 12) & 0x3ff;
21 imm11 = (offset >> 1) & 0x7ff;
22
23 j1 = (!i1) ^ s;
24 j2 = (!i2) ^ s;
25
26 first = 0xf000 | (s << 10) | imm10;
27 second = 0x9000 | (j1 << 13) | (j2 << 11) | imm11;
28 if (link)
29 second |= 1 << 14;
30
31 return __opcode_thumb32_compose(first, second);
32}
33
34static unsigned long
35__arm_gen_branch_arm(unsigned long pc, unsigned long addr, bool link)
36{
37 unsigned long opcode = 0xea000000;
38 long offset;
39
40 if (link)
41 opcode |= 1 << 24;
42
43 offset = (long)addr - (long)(pc + 8);
44 if (unlikely(offset < -33554432 || offset > 33554428)) {
45 WARN_ON_ONCE(1);
46 return 0;
47 }
48
49 offset = (offset >> 2) & 0x00ffffff;
50
51 return opcode | offset;
52}
53
54unsigned long
55__arm_gen_branch(unsigned long pc, unsigned long addr, bool link)
56{
57 if (IS_ENABLED(CONFIG_THUMB2_KERNEL))
58 return __arm_gen_branch_thumb2(pc, addr, link);
59 else
60 return __arm_gen_branch_arm(pc, addr, link);
61}
diff --git a/arch/arm/kernel/insn.h b/arch/arm/kernel/insn.h
new file mode 100644
index 000000000000..e96065da4dae
--- /dev/null
+++ b/arch/arm/kernel/insn.h
@@ -0,0 +1,29 @@
1#ifndef __ASM_ARM_INSN_H
2#define __ASM_ARM_INSN_H
3
4static inline unsigned long
5arm_gen_nop(void)
6{
7#ifdef CONFIG_THUMB2_KERNEL
8 return 0xf3af8000; /* nop.w */
9#else
10 return 0xe1a00000; /* mov r0, r0 */
11#endif
12}
13
14unsigned long
15__arm_gen_branch(unsigned long pc, unsigned long addr, bool link);
16
17static inline unsigned long
18arm_gen_branch(unsigned long pc, unsigned long addr)
19{
20 return __arm_gen_branch(pc, addr, false);
21}
22
23static inline unsigned long
24arm_gen_branch_link(unsigned long pc, unsigned long addr)
25{
26 return __arm_gen_branch(pc, addr, true);
27}
28
29#endif
diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c
index 6a6a097edd61..71ccdbfed662 100644
--- a/arch/arm/kernel/irq.c
+++ b/arch/arm/kernel/irq.c
@@ -180,10 +180,7 @@ void migrate_irqs(void)
180 local_irq_save(flags); 180 local_irq_save(flags);
181 181
182 for_each_irq_desc(i, desc) { 182 for_each_irq_desc(i, desc) {
183 bool affinity_broken = false; 183 bool affinity_broken;
184
185 if (!desc)
186 continue;
187 184
188 raw_spin_lock(&desc->lock); 185 raw_spin_lock(&desc->lock);
189 affinity_broken = migrate_one_irq(desc); 186 affinity_broken = migrate_one_irq(desc);
diff --git a/arch/arm/kernel/jump_label.c b/arch/arm/kernel/jump_label.c
new file mode 100644
index 000000000000..4ce4f789446d
--- /dev/null
+++ b/arch/arm/kernel/jump_label.c
@@ -0,0 +1,39 @@
1#include <linux/kernel.h>
2#include <linux/jump_label.h>
3
4#include "insn.h"
5#include "patch.h"
6
7#ifdef HAVE_JUMP_LABEL
8
9static void __arch_jump_label_transform(struct jump_entry *entry,
10 enum jump_label_type type,
11 bool is_static)
12{
13 void *addr = (void *)entry->code;
14 unsigned int insn;
15
16 if (type == JUMP_LABEL_ENABLE)
17 insn = arm_gen_branch(entry->code, entry->target);
18 else
19 insn = arm_gen_nop();
20
21 if (is_static)
22 __patch_text(addr, insn);
23 else
24 patch_text(addr, insn);
25}
26
27void arch_jump_label_transform(struct jump_entry *entry,
28 enum jump_label_type type)
29{
30 __arch_jump_label_transform(entry, type, false);
31}
32
33void arch_jump_label_transform_static(struct jump_entry *entry,
34 enum jump_label_type type)
35{
36 __arch_jump_label_transform(entry, type, true);
37}
38
39#endif
diff --git a/arch/arm/kernel/kprobes.c b/arch/arm/kernel/kprobes.c
index a79e5c75a96e..4dd41fc9e235 100644
--- a/arch/arm/kernel/kprobes.c
+++ b/arch/arm/kernel/kprobes.c
@@ -29,6 +29,7 @@
29#include <asm/cacheflush.h> 29#include <asm/cacheflush.h>
30 30
31#include "kprobes.h" 31#include "kprobes.h"
32#include "patch.h"
32 33
33#define MIN_STACK_SIZE(addr) \ 34#define MIN_STACK_SIZE(addr) \
34 min((unsigned long)MAX_STACK_SIZE, \ 35 min((unsigned long)MAX_STACK_SIZE, \
@@ -103,57 +104,33 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p)
103 return 0; 104 return 0;
104} 105}
105 106
106#ifdef CONFIG_THUMB2_KERNEL
107
108/*
109 * For a 32-bit Thumb breakpoint spanning two memory words we need to take
110 * special precautions to insert the breakpoint atomically, especially on SMP
111 * systems. This is achieved by calling this arming function using stop_machine.
112 */
113static int __kprobes set_t32_breakpoint(void *addr)
114{
115 ((u16 *)addr)[0] = KPROBE_THUMB32_BREAKPOINT_INSTRUCTION >> 16;
116 ((u16 *)addr)[1] = KPROBE_THUMB32_BREAKPOINT_INSTRUCTION & 0xffff;
117 flush_insns(addr, 2*sizeof(u16));
118 return 0;
119}
120
121void __kprobes arch_arm_kprobe(struct kprobe *p) 107void __kprobes arch_arm_kprobe(struct kprobe *p)
122{ 108{
123 uintptr_t addr = (uintptr_t)p->addr & ~1; /* Remove any Thumb flag */ 109 unsigned int brkp;
124 110 void *addr;
125 if (!is_wide_instruction(p->opcode)) { 111
126 *(u16 *)addr = KPROBE_THUMB16_BREAKPOINT_INSTRUCTION; 112 if (IS_ENABLED(CONFIG_THUMB2_KERNEL)) {
127 flush_insns(addr, sizeof(u16)); 113 /* Remove any Thumb flag */
128 } else if (addr & 2) { 114 addr = (void *)((uintptr_t)p->addr & ~1);
129 /* A 32-bit instruction spanning two words needs special care */ 115
130 stop_machine(set_t32_breakpoint, (void *)addr, cpu_online_mask); 116 if (is_wide_instruction(p->opcode))
117 brkp = KPROBE_THUMB32_BREAKPOINT_INSTRUCTION;
118 else
119 brkp = KPROBE_THUMB16_BREAKPOINT_INSTRUCTION;
131 } else { 120 } else {
132 /* Word aligned 32-bit instruction can be written atomically */ 121 kprobe_opcode_t insn = p->opcode;
133 u32 bkp = KPROBE_THUMB32_BREAKPOINT_INSTRUCTION;
134#ifndef __ARMEB__ /* Swap halfwords for little-endian */
135 bkp = (bkp >> 16) | (bkp << 16);
136#endif
137 *(u32 *)addr = bkp;
138 flush_insns(addr, sizeof(u32));
139 }
140}
141 122
142#else /* !CONFIG_THUMB2_KERNEL */ 123 addr = p->addr;
124 brkp = KPROBE_ARM_BREAKPOINT_INSTRUCTION;
143 125
144void __kprobes arch_arm_kprobe(struct kprobe *p) 126 if (insn >= 0xe0000000)
145{ 127 brkp |= 0xe0000000; /* Unconditional instruction */
146 kprobe_opcode_t insn = p->opcode; 128 else
147 kprobe_opcode_t brkp = KPROBE_ARM_BREAKPOINT_INSTRUCTION; 129 brkp |= insn & 0xf0000000; /* Copy condition from insn */
148 if (insn >= 0xe0000000) 130 }
149 brkp |= 0xe0000000; /* Unconditional instruction */
150 else
151 brkp |= insn & 0xf0000000; /* Copy condition from insn */
152 *p->addr = brkp;
153 flush_insns(p->addr, sizeof(p->addr[0]));
154}
155 131
156#endif /* !CONFIG_THUMB2_KERNEL */ 132 patch_text(addr, brkp);
133}
157 134
158/* 135/*
159 * The actual disarming is done here on each CPU and synchronized using 136 * The actual disarming is done here on each CPU and synchronized using
@@ -166,25 +143,10 @@ void __kprobes arch_arm_kprobe(struct kprobe *p)
166int __kprobes __arch_disarm_kprobe(void *p) 143int __kprobes __arch_disarm_kprobe(void *p)
167{ 144{
168 struct kprobe *kp = p; 145 struct kprobe *kp = p;
169#ifdef CONFIG_THUMB2_KERNEL 146 void *addr = (void *)((uintptr_t)kp->addr & ~1);
170 u16 *addr = (u16 *)((uintptr_t)kp->addr & ~1);
171 kprobe_opcode_t insn = kp->opcode;
172 unsigned int len;
173 147
174 if (is_wide_instruction(insn)) { 148 __patch_text(addr, kp->opcode);
175 ((u16 *)addr)[0] = insn>>16;
176 ((u16 *)addr)[1] = insn;
177 len = 2*sizeof(u16);
178 } else {
179 ((u16 *)addr)[0] = insn;
180 len = sizeof(u16);
181 }
182 flush_insns(addr, len);
183 149
184#else /* !CONFIG_THUMB2_KERNEL */
185 *kp->addr = kp->opcode;
186 flush_insns(kp->addr, sizeof(kp->addr[0]));
187#endif
188 return 0; 150 return 0;
189} 151}
190 152
diff --git a/arch/arm/kernel/machine_kexec.c b/arch/arm/kernel/machine_kexec.c
index 56995983eed8..dfcdb9f7c126 100644
--- a/arch/arm/kernel/machine_kexec.c
+++ b/arch/arm/kernel/machine_kexec.c
@@ -7,6 +7,7 @@
7#include <linux/delay.h> 7#include <linux/delay.h>
8#include <linux/reboot.h> 8#include <linux/reboot.h>
9#include <linux/io.h> 9#include <linux/io.h>
10#include <linux/irq.h>
10#include <asm/pgtable.h> 11#include <asm/pgtable.h>
11#include <asm/pgalloc.h> 12#include <asm/pgalloc.h>
12#include <asm/mmu_context.h> 13#include <asm/mmu_context.h>
@@ -53,6 +54,29 @@ void machine_crash_nonpanic_core(void *unused)
53 cpu_relax(); 54 cpu_relax();
54} 55}
55 56
57static void machine_kexec_mask_interrupts(void)
58{
59 unsigned int i;
60 struct irq_desc *desc;
61
62 for_each_irq_desc(i, desc) {
63 struct irq_chip *chip;
64
65 chip = irq_desc_get_chip(desc);
66 if (!chip)
67 continue;
68
69 if (chip->irq_eoi && irqd_irq_inprogress(&desc->irq_data))
70 chip->irq_eoi(&desc->irq_data);
71
72 if (chip->irq_mask)
73 chip->irq_mask(&desc->irq_data);
74
75 if (chip->irq_disable && !irqd_irq_disabled(&desc->irq_data))
76 chip->irq_disable(&desc->irq_data);
77 }
78}
79
56void machine_crash_shutdown(struct pt_regs *regs) 80void machine_crash_shutdown(struct pt_regs *regs)
57{ 81{
58 unsigned long msecs; 82 unsigned long msecs;
@@ -70,6 +94,7 @@ void machine_crash_shutdown(struct pt_regs *regs)
70 printk(KERN_WARNING "Non-crashing CPUs did not react to IPI\n"); 94 printk(KERN_WARNING "Non-crashing CPUs did not react to IPI\n");
71 95
72 crash_save_cpu(regs, smp_processor_id()); 96 crash_save_cpu(regs, smp_processor_id());
97 machine_kexec_mask_interrupts();
73 98
74 printk(KERN_INFO "Loading crashdump kernel...\n"); 99 printk(KERN_INFO "Loading crashdump kernel...\n");
75} 100}
diff --git a/arch/arm/kernel/patch.c b/arch/arm/kernel/patch.c
new file mode 100644
index 000000000000..07314af47733
--- /dev/null
+++ b/arch/arm/kernel/patch.c
@@ -0,0 +1,75 @@
1#include <linux/kernel.h>
2#include <linux/kprobes.h>
3#include <linux/stop_machine.h>
4
5#include <asm/cacheflush.h>
6#include <asm/smp_plat.h>
7#include <asm/opcodes.h>
8
9#include "patch.h"
10
11struct patch {
12 void *addr;
13 unsigned int insn;
14};
15
16void __kprobes __patch_text(void *addr, unsigned int insn)
17{
18 bool thumb2 = IS_ENABLED(CONFIG_THUMB2_KERNEL);
19 int size;
20
21 if (thumb2 && __opcode_is_thumb16(insn)) {
22 *(u16 *)addr = __opcode_to_mem_thumb16(insn);
23 size = sizeof(u16);
24 } else if (thumb2 && ((uintptr_t)addr & 2)) {
25 u16 first = __opcode_thumb32_first(insn);
26 u16 second = __opcode_thumb32_second(insn);
27 u16 *addrh = addr;
28
29 addrh[0] = __opcode_to_mem_thumb16(first);
30 addrh[1] = __opcode_to_mem_thumb16(second);
31
32 size = sizeof(u32);
33 } else {
34 if (thumb2)
35 insn = __opcode_to_mem_thumb32(insn);
36 else
37 insn = __opcode_to_mem_arm(insn);
38
39 *(u32 *)addr = insn;
40 size = sizeof(u32);
41 }
42
43 flush_icache_range((uintptr_t)(addr),
44 (uintptr_t)(addr) + size);
45}
46
47static int __kprobes patch_text_stop_machine(void *data)
48{
49 struct patch *patch = data;
50
51 __patch_text(patch->addr, patch->insn);
52
53 return 0;
54}
55
56void __kprobes patch_text(void *addr, unsigned int insn)
57{
58 struct patch patch = {
59 .addr = addr,
60 .insn = insn,
61 };
62
63 if (cache_ops_need_broadcast()) {
64 stop_machine(patch_text_stop_machine, &patch, cpu_online_mask);
65 } else {
66 bool straddles_word = IS_ENABLED(CONFIG_THUMB2_KERNEL)
67 && __opcode_is_thumb32(insn)
68 && ((uintptr_t)addr & 2);
69
70 if (straddles_word)
71 stop_machine(patch_text_stop_machine, &patch, NULL);
72 else
73 __patch_text(addr, insn);
74 }
75}
diff --git a/arch/arm/kernel/patch.h b/arch/arm/kernel/patch.h
new file mode 100644
index 000000000000..b4731f2dac38
--- /dev/null
+++ b/arch/arm/kernel/patch.h
@@ -0,0 +1,7 @@
1#ifndef _ARM_KERNEL_PATCH_H
2#define _ARM_KERNEL_PATCH_H
3
4void patch_text(void *addr, unsigned int insn);
5void __patch_text(void *addr, unsigned int insn);
6
7#endif
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c
index 8a89d3b7626b..186c8cb982c5 100644
--- a/arch/arm/kernel/perf_event.c
+++ b/arch/arm/kernel/perf_event.c
@@ -738,6 +738,9 @@ init_hw_perf_events(void)
738 case 0xC0F0: /* Cortex-A15 */ 738 case 0xC0F0: /* Cortex-A15 */
739 cpu_pmu = armv7_a15_pmu_init(); 739 cpu_pmu = armv7_a15_pmu_init();
740 break; 740 break;
741 case 0xC070: /* Cortex-A7 */
742 cpu_pmu = armv7_a7_pmu_init();
743 break;
741 } 744 }
742 /* Intel CPUs [xscale]. */ 745 /* Intel CPUs [xscale]. */
743 } else if (0x69 == implementor) { 746 } else if (0x69 == implementor) {
diff --git a/arch/arm/kernel/perf_event_v7.c b/arch/arm/kernel/perf_event_v7.c
index 4d7095af2ab3..00755d82e2f2 100644
--- a/arch/arm/kernel/perf_event_v7.c
+++ b/arch/arm/kernel/perf_event_v7.c
@@ -610,6 +610,130 @@ static const unsigned armv7_a15_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
610}; 610};
611 611
612/* 612/*
613 * Cortex-A7 HW events mapping
614 */
615static const unsigned armv7_a7_perf_map[PERF_COUNT_HW_MAX] = {
616 [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES,
617 [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_PERFCTR_INSTR_EXECUTED,
618 [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
619 [PERF_COUNT_HW_CACHE_MISSES] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
620 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE,
621 [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
622 [PERF_COUNT_HW_BUS_CYCLES] = ARMV7_PERFCTR_BUS_CYCLES,
623 [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = HW_OP_UNSUPPORTED,
624 [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = HW_OP_UNSUPPORTED,
625};
626
627static const unsigned armv7_a7_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
628 [PERF_COUNT_HW_CACHE_OP_MAX]
629 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
630 [C(L1D)] = {
631 /*
632 * The performance counters don't differentiate between read
633 * and write accesses/misses so this isn't strictly correct,
634 * but it's the best we can do. Writes and reads get
635 * combined.
636 */
637 [C(OP_READ)] = {
638 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
639 [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
640 },
641 [C(OP_WRITE)] = {
642 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
643 [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
644 },
645 [C(OP_PREFETCH)] = {
646 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
647 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
648 },
649 },
650 [C(L1I)] = {
651 [C(OP_READ)] = {
652 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_ICACHE_ACCESS,
653 [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL,
654 },
655 [C(OP_WRITE)] = {
656 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_ICACHE_ACCESS,
657 [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL,
658 },
659 [C(OP_PREFETCH)] = {
660 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
661 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
662 },
663 },
664 [C(LL)] = {
665 [C(OP_READ)] = {
666 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L2_CACHE_ACCESS,
667 [C(RESULT_MISS)] = ARMV7_PERFCTR_L2_CACHE_REFILL,
668 },
669 [C(OP_WRITE)] = {
670 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L2_CACHE_ACCESS,
671 [C(RESULT_MISS)] = ARMV7_PERFCTR_L2_CACHE_REFILL,
672 },
673 [C(OP_PREFETCH)] = {
674 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
675 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
676 },
677 },
678 [C(DTLB)] = {
679 [C(OP_READ)] = {
680 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
681 [C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL,
682 },
683 [C(OP_WRITE)] = {
684 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
685 [C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL,
686 },
687 [C(OP_PREFETCH)] = {
688 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
689 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
690 },
691 },
692 [C(ITLB)] = {
693 [C(OP_READ)] = {
694 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
695 [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL,
696 },
697 [C(OP_WRITE)] = {
698 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
699 [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL,
700 },
701 [C(OP_PREFETCH)] = {
702 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
703 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
704 },
705 },
706 [C(BPU)] = {
707 [C(OP_READ)] = {
708 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
709 [C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
710 },
711 [C(OP_WRITE)] = {
712 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
713 [C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
714 },
715 [C(OP_PREFETCH)] = {
716 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
717 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
718 },
719 },
720 [C(NODE)] = {
721 [C(OP_READ)] = {
722 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
723 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
724 },
725 [C(OP_WRITE)] = {
726 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
727 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
728 },
729 [C(OP_PREFETCH)] = {
730 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
731 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
732 },
733 },
734};
735
736/*
613 * Perf Events' indices 737 * Perf Events' indices
614 */ 738 */
615#define ARMV7_IDX_CYCLE_COUNTER 0 739#define ARMV7_IDX_CYCLE_COUNTER 0
@@ -1104,6 +1228,12 @@ static int armv7_a15_map_event(struct perf_event *event)
1104 &armv7_a15_perf_cache_map, 0xFF); 1228 &armv7_a15_perf_cache_map, 0xFF);
1105} 1229}
1106 1230
1231static int armv7_a7_map_event(struct perf_event *event)
1232{
1233 return map_cpu_event(event, &armv7_a7_perf_map,
1234 &armv7_a7_perf_cache_map, 0xFF);
1235}
1236
1107static struct arm_pmu armv7pmu = { 1237static struct arm_pmu armv7pmu = {
1108 .handle_irq = armv7pmu_handle_irq, 1238 .handle_irq = armv7pmu_handle_irq,
1109 .enable = armv7pmu_enable_event, 1239 .enable = armv7pmu_enable_event,
@@ -1164,6 +1294,16 @@ static struct arm_pmu *__init armv7_a15_pmu_init(void)
1164 armv7pmu.set_event_filter = armv7pmu_set_event_filter; 1294 armv7pmu.set_event_filter = armv7pmu_set_event_filter;
1165 return &armv7pmu; 1295 return &armv7pmu;
1166} 1296}
1297
1298static struct arm_pmu *__init armv7_a7_pmu_init(void)
1299{
1300 armv7pmu.id = ARM_PERF_PMU_ID_CA7;
1301 armv7pmu.name = "ARMv7 Cortex-A7";
1302 armv7pmu.map_event = armv7_a7_map_event;
1303 armv7pmu.num_events = armv7_read_num_pmnc_events();
1304 armv7pmu.set_event_filter = armv7pmu_set_event_filter;
1305 return &armv7pmu;
1306}
1167#else 1307#else
1168static struct arm_pmu *__init armv7_a8_pmu_init(void) 1308static struct arm_pmu *__init armv7_a8_pmu_init(void)
1169{ 1309{
@@ -1184,4 +1324,9 @@ static struct arm_pmu *__init armv7_a15_pmu_init(void)
1184{ 1324{
1185 return NULL; 1325 return NULL;
1186} 1326}
1327
1328static struct arm_pmu *__init armv7_a7_pmu_init(void)
1329{
1330 return NULL;
1331}
1187#endif /* CONFIG_CPU_V7 */ 1332#endif /* CONFIG_CPU_V7 */
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
index 7b9cddef6e53..2b7b017a20cd 100644
--- a/arch/arm/kernel/process.c
+++ b/arch/arm/kernel/process.c
@@ -528,21 +528,39 @@ unsigned long arch_randomize_brk(struct mm_struct *mm)
528#ifdef CONFIG_MMU 528#ifdef CONFIG_MMU
529/* 529/*
530 * The vectors page is always readable from user space for the 530 * The vectors page is always readable from user space for the
531 * atomic helpers and the signal restart code. Let's declare a mapping 531 * atomic helpers and the signal restart code. Insert it into the
532 * for it so it is visible through ptrace and /proc/<pid>/mem. 532 * gate_vma so that it is visible through ptrace and /proc/<pid>/mem.
533 */ 533 */
534static struct vm_area_struct gate_vma;
534 535
535int vectors_user_mapping(void) 536static int __init gate_vma_init(void)
536{ 537{
537 struct mm_struct *mm = current->mm; 538 gate_vma.vm_start = 0xffff0000;
538 return install_special_mapping(mm, 0xffff0000, PAGE_SIZE, 539 gate_vma.vm_end = 0xffff0000 + PAGE_SIZE;
539 VM_READ | VM_EXEC | 540 gate_vma.vm_page_prot = PAGE_READONLY_EXEC;
540 VM_MAYREAD | VM_MAYEXEC | VM_RESERVED, 541 gate_vma.vm_flags = VM_READ | VM_EXEC |
541 NULL); 542 VM_MAYREAD | VM_MAYEXEC;
543 return 0;
544}
545arch_initcall(gate_vma_init);
546
547struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
548{
549 return &gate_vma;
550}
551
552int in_gate_area(struct mm_struct *mm, unsigned long addr)
553{
554 return (addr >= gate_vma.vm_start) && (addr < gate_vma.vm_end);
555}
556
557int in_gate_area_no_mm(unsigned long addr)
558{
559 return in_gate_area(NULL, addr);
542} 560}
543 561
544const char *arch_vma_name(struct vm_area_struct *vma) 562const char *arch_vma_name(struct vm_area_struct *vma)
545{ 563{
546 return (vma->vm_start == 0xffff0000) ? "[vectors]" : NULL; 564 return (vma == &gate_vma) ? "[vectors]" : NULL;
547} 565}
548#endif 566#endif
diff --git a/arch/arm/kernel/sched_clock.c b/arch/arm/kernel/sched_clock.c
index 5416c7c12528..27d186abbc06 100644
--- a/arch/arm/kernel/sched_clock.c
+++ b/arch/arm/kernel/sched_clock.c
@@ -10,6 +10,7 @@
10#include <linux/jiffies.h> 10#include <linux/jiffies.h>
11#include <linux/kernel.h> 11#include <linux/kernel.h>
12#include <linux/sched.h> 12#include <linux/sched.h>
13#include <linux/syscore_ops.h>
13#include <linux/timer.h> 14#include <linux/timer.h>
14 15
15#include <asm/sched_clock.h> 16#include <asm/sched_clock.h>
@@ -164,3 +165,20 @@ void __init sched_clock_postinit(void)
164 165
165 sched_clock_poll(sched_clock_timer.data); 166 sched_clock_poll(sched_clock_timer.data);
166} 167}
168
169static int sched_clock_suspend(void)
170{
171 sched_clock_poll(sched_clock_timer.data);
172 return 0;
173}
174
175static struct syscore_ops sched_clock_ops = {
176 .suspend = sched_clock_suspend,
177};
178
179static int __init sched_clock_syscore_init(void)
180{
181 register_syscore_ops(&sched_clock_ops);
182 return 0;
183}
184device_initcall(sched_clock_syscore_init);
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index 9e0fdb3a1988..b91411371ae1 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -976,7 +976,6 @@ void __init setup_arch(char **cmdline_p)
976 conswitchp = &dummy_con; 976 conswitchp = &dummy_con;
977#endif 977#endif
978#endif 978#endif
979 early_trap_init();
980 979
981 if (mdesc->init_early) 980 if (mdesc->init_early)
982 mdesc->init_early(); 981 mdesc->init_early();
diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c
index 9e617bd4a146..7cb532fc8aa4 100644
--- a/arch/arm/kernel/signal.c
+++ b/arch/arm/kernel/signal.c
@@ -66,12 +66,13 @@ const unsigned long syscall_restart_code[2] = {
66 */ 66 */
67asmlinkage int sys_sigsuspend(int restart, unsigned long oldmask, old_sigset_t mask) 67asmlinkage int sys_sigsuspend(int restart, unsigned long oldmask, old_sigset_t mask)
68{ 68{
69 mask &= _BLOCKABLE; 69 sigset_t blocked;
70 spin_lock_irq(&current->sighand->siglock); 70
71 current->saved_sigmask = current->blocked; 71 current->saved_sigmask = current->blocked;
72 siginitset(&current->blocked, mask); 72
73 recalc_sigpending(); 73 mask &= _BLOCKABLE;
74 spin_unlock_irq(&current->sighand->siglock); 74 siginitset(&blocked, mask);
75 set_current_blocked(&blocked);
75 76
76 current->state = TASK_INTERRUPTIBLE; 77 current->state = TASK_INTERRUPTIBLE;
77 schedule(); 78 schedule();
@@ -280,10 +281,7 @@ static int restore_sigframe(struct pt_regs *regs, struct sigframe __user *sf)
280 err = __copy_from_user(&set, &sf->uc.uc_sigmask, sizeof(set)); 281 err = __copy_from_user(&set, &sf->uc.uc_sigmask, sizeof(set));
281 if (err == 0) { 282 if (err == 0) {
282 sigdelsetmask(&set, ~_BLOCKABLE); 283 sigdelsetmask(&set, ~_BLOCKABLE);
283 spin_lock_irq(&current->sighand->siglock); 284 set_current_blocked(&set);
284 current->blocked = set;
285 recalc_sigpending();
286 spin_unlock_irq(&current->sighand->siglock);
287 } 285 }
288 286
289 __get_user_error(regs->ARM_r0, &sf->uc.uc_mcontext.arm_r0, err); 287 __get_user_error(regs->ARM_r0, &sf->uc.uc_mcontext.arm_r0, err);
@@ -636,13 +634,7 @@ handle_signal(unsigned long sig, struct k_sigaction *ka,
636 /* 634 /*
637 * Block the signal if we were successful. 635 * Block the signal if we were successful.
638 */ 636 */
639 spin_lock_irq(&tsk->sighand->siglock); 637 block_sigmask(ka, sig);
640 sigorsets(&tsk->blocked, &tsk->blocked,
641 &ka->sa.sa_mask);
642 if (!(ka->sa.sa_flags & SA_NODEFER))
643 sigaddset(&tsk->blocked, sig);
644 recalc_sigpending();
645 spin_unlock_irq(&tsk->sighand->siglock);
646 638
647 return 0; 639 return 0;
648} 640}
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index 9a4bdde909ce..addbbe8028c2 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -58,6 +58,8 @@ enum ipi_msg_type {
58 IPI_CPU_STOP, 58 IPI_CPU_STOP,
59}; 59};
60 60
61static DECLARE_COMPLETION(cpu_running);
62
61int __cpuinit __cpu_up(unsigned int cpu) 63int __cpuinit __cpu_up(unsigned int cpu)
62{ 64{
63 struct cpuinfo_arm *ci = &per_cpu(cpu_data, cpu); 65 struct cpuinfo_arm *ci = &per_cpu(cpu_data, cpu);
@@ -98,20 +100,12 @@ int __cpuinit __cpu_up(unsigned int cpu)
98 */ 100 */
99 ret = boot_secondary(cpu, idle); 101 ret = boot_secondary(cpu, idle);
100 if (ret == 0) { 102 if (ret == 0) {
101 unsigned long timeout;
102
103 /* 103 /*
104 * CPU was successfully started, wait for it 104 * CPU was successfully started, wait for it
105 * to come online or time out. 105 * to come online or time out.
106 */ 106 */
107 timeout = jiffies + HZ; 107 wait_for_completion_timeout(&cpu_running,
108 while (time_before(jiffies, timeout)) { 108 msecs_to_jiffies(1000));
109 if (cpu_online(cpu))
110 break;
111
112 udelay(10);
113 barrier();
114 }
115 109
116 if (!cpu_online(cpu)) { 110 if (!cpu_online(cpu)) {
117 pr_crit("CPU%u: failed to come online\n", cpu); 111 pr_crit("CPU%u: failed to come online\n", cpu);
@@ -288,9 +282,10 @@ asmlinkage void __cpuinit secondary_start_kernel(void)
288 /* 282 /*
289 * OK, now it's safe to let the boot CPU continue. Wait for 283 * OK, now it's safe to let the boot CPU continue. Wait for
290 * the CPU migration code to notice that the CPU is online 284 * the CPU migration code to notice that the CPU is online
291 * before we continue. 285 * before we continue - which happens after __cpu_up returns.
292 */ 286 */
293 set_cpu_online(cpu, true); 287 set_cpu_online(cpu, true);
288 complete(&cpu_running);
294 289
295 /* 290 /*
296 * Setup the percpu timer for this CPU. 291 * Setup the percpu timer for this CPU.
diff --git a/arch/arm/kernel/time.c b/arch/arm/kernel/time.c
index 8c57dd3680e9..fe31b22f18fd 100644
--- a/arch/arm/kernel/time.c
+++ b/arch/arm/kernel/time.c
@@ -25,8 +25,6 @@
25#include <linux/timer.h> 25#include <linux/timer.h>
26#include <linux/irq.h> 26#include <linux/irq.h>
27 27
28#include <linux/mc146818rtc.h>
29
30#include <asm/leds.h> 28#include <asm/leds.h>
31#include <asm/thread_info.h> 29#include <asm/thread_info.h>
32#include <asm/sched_clock.h> 30#include <asm/sched_clock.h>
@@ -149,8 +147,6 @@ void __init time_init(void)
149{ 147{
150 system_timer = machine_desc->timer; 148 system_timer = machine_desc->timer;
151 system_timer->init(); 149 system_timer->init();
152#ifdef CONFIG_HAVE_SCHED_CLOCK
153 sched_clock_postinit(); 150 sched_clock_postinit();
154#endif
155} 151}
156 152
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
index cd77743472a2..778454750a6c 100644
--- a/arch/arm/kernel/traps.c
+++ b/arch/arm/kernel/traps.c
@@ -227,6 +227,11 @@ void show_stack(struct task_struct *tsk, unsigned long *sp)
227#else 227#else
228#define S_SMP "" 228#define S_SMP ""
229#endif 229#endif
230#ifdef CONFIG_THUMB2_KERNEL
231#define S_ISA " THUMB2"
232#else
233#define S_ISA " ARM"
234#endif
230 235
231static int __die(const char *str, int err, struct thread_info *thread, struct pt_regs *regs) 236static int __die(const char *str, int err, struct thread_info *thread, struct pt_regs *regs)
232{ 237{
@@ -234,8 +239,8 @@ static int __die(const char *str, int err, struct thread_info *thread, struct pt
234 static int die_counter; 239 static int die_counter;
235 int ret; 240 int ret;
236 241
237 printk(KERN_EMERG "Internal error: %s: %x [#%d]" S_PREEMPT S_SMP "\n", 242 printk(KERN_EMERG "Internal error: %s: %x [#%d]" S_PREEMPT S_SMP
238 str, err, ++die_counter); 243 S_ISA "\n", str, err, ++die_counter);
239 244
240 /* trap and error numbers are mostly meaningless on ARM */ 245 /* trap and error numbers are mostly meaningless on ARM */
241 ret = notify_die(DIE_OOPS, str, regs, err, tsk->thread.trap_no, SIGSEGV); 246 ret = notify_die(DIE_OOPS, str, regs, err, tsk->thread.trap_no, SIGSEGV);
@@ -784,18 +789,16 @@ static void __init kuser_get_tls_init(unsigned long vectors)
784 memcpy((void *)vectors + 0xfe0, (void *)vectors + 0xfe8, 4); 789 memcpy((void *)vectors + 0xfe0, (void *)vectors + 0xfe8, 4);
785} 790}
786 791
787void __init early_trap_init(void) 792void __init early_trap_init(void *vectors_base)
788{ 793{
789#if defined(CONFIG_CPU_USE_DOMAINS) 794 unsigned long vectors = (unsigned long)vectors_base;
790 unsigned long vectors = CONFIG_VECTORS_BASE;
791#else
792 unsigned long vectors = (unsigned long)vectors_page;
793#endif
794 extern char __stubs_start[], __stubs_end[]; 795 extern char __stubs_start[], __stubs_end[];
795 extern char __vectors_start[], __vectors_end[]; 796 extern char __vectors_start[], __vectors_end[];
796 extern char __kuser_helper_start[], __kuser_helper_end[]; 797 extern char __kuser_helper_start[], __kuser_helper_end[];
797 int kuser_sz = __kuser_helper_end - __kuser_helper_start; 798 int kuser_sz = __kuser_helper_end - __kuser_helper_start;
798 799
800 vectors_page = vectors_base;
801
799 /* 802 /*
800 * Copy the vectors, stubs and kuser helpers (in entry-armv.S) 803 * Copy the vectors, stubs and kuser helpers (in entry-armv.S)
801 * into the vector page, mapped at 0xffff0000, and ensure these 804 * into the vector page, mapped at 0xffff0000, and ensure these
diff --git a/arch/arm/mach-at91/at91sam9g45_devices.c b/arch/arm/mach-at91/at91sam9g45_devices.c
index 4320b2096789..698479f1e197 100644
--- a/arch/arm/mach-at91/at91sam9g45_devices.c
+++ b/arch/arm/mach-at91/at91sam9g45_devices.c
@@ -437,7 +437,6 @@ void __init at91_add_device_mci(short mmc_id, struct mci_platform_data *data)
437 437
438 /* DMA slave channel configuration */ 438 /* DMA slave channel configuration */
439 atslave->dma_dev = &at_hdmac_device.dev; 439 atslave->dma_dev = &at_hdmac_device.dev;
440 atslave->reg_width = AT_DMA_SLAVE_WIDTH_32BIT;
441 atslave->cfg = ATC_FIFOCFG_HALFFIFO 440 atslave->cfg = ATC_FIFOCFG_HALFFIFO
442 | ATC_SRC_H2SEL_HW | ATC_DST_H2SEL_HW; 441 | ATC_SRC_H2SEL_HW | ATC_DST_H2SEL_HW;
443 atslave->ctrla = ATC_SCSIZE_16 | ATC_DCSIZE_16; 442 atslave->ctrla = ATC_SCSIZE_16 | ATC_DCSIZE_16;
diff --git a/arch/arm/mach-at91/at91x40.c b/arch/arm/mach-at91/at91x40.c
index 5400a1d65035..d62fe090d814 100644
--- a/arch/arm/mach-at91/at91x40.c
+++ b/arch/arm/mach-at91/at91x40.c
@@ -14,6 +14,7 @@
14#include <linux/init.h> 14#include <linux/init.h>
15#include <linux/irq.h> 15#include <linux/irq.h>
16#include <asm/proc-fns.h> 16#include <asm/proc-fns.h>
17#include <asm/system_misc.h>
17#include <asm/mach/arch.h> 18#include <asm/mach/arch.h>
18#include <mach/at91x40.h> 19#include <mach/at91x40.h>
19#include <mach/at91_st.h> 20#include <mach/at91_st.h>
diff --git a/arch/arm/mach-at91/cpuidle.c b/arch/arm/mach-at91/cpuidle.c
index 555d956b3a57..ece1f9aefb47 100644
--- a/arch/arm/mach-at91/cpuidle.c
+++ b/arch/arm/mach-at91/cpuidle.c
@@ -17,9 +17,10 @@
17#include <linux/init.h> 17#include <linux/init.h>
18#include <linux/platform_device.h> 18#include <linux/platform_device.h>
19#include <linux/cpuidle.h> 19#include <linux/cpuidle.h>
20#include <asm/proc-fns.h>
21#include <linux/io.h> 20#include <linux/io.h>
22#include <linux/export.h> 21#include <linux/export.h>
22#include <asm/proc-fns.h>
23#include <asm/cpuidle.h>
23 24
24#include "pm.h" 25#include "pm.h"
25 26
@@ -27,61 +28,39 @@
27 28
28static DEFINE_PER_CPU(struct cpuidle_device, at91_cpuidle_device); 29static DEFINE_PER_CPU(struct cpuidle_device, at91_cpuidle_device);
29 30
30static struct cpuidle_driver at91_idle_driver = {
31 .name = "at91_idle",
32 .owner = THIS_MODULE,
33};
34
35/* Actual code that puts the SoC in different idle states */ 31/* Actual code that puts the SoC in different idle states */
36static int at91_enter_idle(struct cpuidle_device *dev, 32static int at91_enter_idle(struct cpuidle_device *dev,
37 struct cpuidle_driver *drv, 33 struct cpuidle_driver *drv,
38 int index) 34 int index)
39{ 35{
40 struct timeval before, after; 36 at91_standby();
41 int idle_time;
42
43 local_irq_disable();
44 do_gettimeofday(&before);
45 if (index == 0)
46 /* Wait for interrupt state */
47 cpu_do_idle();
48 else if (index == 1)
49 at91_standby();
50 37
51 do_gettimeofday(&after);
52 local_irq_enable();
53 idle_time = (after.tv_sec - before.tv_sec) * USEC_PER_SEC +
54 (after.tv_usec - before.tv_usec);
55
56 dev->last_residency = idle_time;
57 return index; 38 return index;
58} 39}
59 40
41static struct cpuidle_driver at91_idle_driver = {
42 .name = "at91_idle",
43 .owner = THIS_MODULE,
44 .en_core_tk_irqen = 1,
45 .states[0] = ARM_CPUIDLE_WFI_STATE,
46 .states[1] = {
47 .enter = at91_enter_idle,
48 .exit_latency = 10,
49 .target_residency = 100000,
50 .flags = CPUIDLE_FLAG_TIME_VALID,
51 .name = "RAM_SR",
52 .desc = "WFI and DDR Self Refresh",
53 },
54 .state_count = AT91_MAX_STATES,
55};
56
60/* Initialize CPU idle by registering the idle states */ 57/* Initialize CPU idle by registering the idle states */
61static int at91_init_cpuidle(void) 58static int at91_init_cpuidle(void)
62{ 59{
63 struct cpuidle_device *device; 60 struct cpuidle_device *device;
64 struct cpuidle_driver *driver = &at91_idle_driver;
65 61
66 device = &per_cpu(at91_cpuidle_device, smp_processor_id()); 62 device = &per_cpu(at91_cpuidle_device, smp_processor_id());
67 device->state_count = AT91_MAX_STATES; 63 device->state_count = AT91_MAX_STATES;
68 driver->state_count = AT91_MAX_STATES;
69
70 /* Wait for interrupt state */
71 driver->states[0].enter = at91_enter_idle;
72 driver->states[0].exit_latency = 1;
73 driver->states[0].target_residency = 10000;
74 driver->states[0].flags = CPUIDLE_FLAG_TIME_VALID;
75 strcpy(driver->states[0].name, "WFI");
76 strcpy(driver->states[0].desc, "Wait for interrupt");
77
78 /* Wait for interrupt and RAM self refresh state */
79 driver->states[1].enter = at91_enter_idle;
80 driver->states[1].exit_latency = 10;
81 driver->states[1].target_residency = 10000;
82 driver->states[1].flags = CPUIDLE_FLAG_TIME_VALID;
83 strcpy(driver->states[1].name, "RAM_SR");
84 strcpy(driver->states[1].desc, "WFI and RAM Self Refresh");
85 64
86 cpuidle_register_driver(&at91_idle_driver); 65 cpuidle_register_driver(&at91_idle_driver);
87 66
diff --git a/arch/arm/mach-at91/include/mach/at_hdmac.h b/arch/arm/mach-at91/include/mach/at_hdmac.h
index 187cb58345c0..fff48d1a0f4e 100644
--- a/arch/arm/mach-at91/include/mach/at_hdmac.h
+++ b/arch/arm/mach-at91/include/mach/at_hdmac.h
@@ -24,18 +24,6 @@ struct at_dma_platform_data {
24}; 24};
25 25
26/** 26/**
27 * enum at_dma_slave_width - DMA slave register access width.
28 * @AT_DMA_SLAVE_WIDTH_8BIT: Do 8-bit slave register accesses
29 * @AT_DMA_SLAVE_WIDTH_16BIT: Do 16-bit slave register accesses
30 * @AT_DMA_SLAVE_WIDTH_32BIT: Do 32-bit slave register accesses
31 */
32enum at_dma_slave_width {
33 AT_DMA_SLAVE_WIDTH_8BIT = 0,
34 AT_DMA_SLAVE_WIDTH_16BIT,
35 AT_DMA_SLAVE_WIDTH_32BIT,
36};
37
38/**
39 * struct at_dma_slave - Controller-specific information about a slave 27 * struct at_dma_slave - Controller-specific information about a slave
40 * @dma_dev: required DMA master device 28 * @dma_dev: required DMA master device
41 * @tx_reg: physical address of data register used for 29 * @tx_reg: physical address of data register used for
@@ -48,9 +36,6 @@ enum at_dma_slave_width {
48 */ 36 */
49struct at_dma_slave { 37struct at_dma_slave {
50 struct device *dma_dev; 38 struct device *dma_dev;
51 dma_addr_t tx_reg;
52 dma_addr_t rx_reg;
53 enum at_dma_slave_width reg_width;
54 u32 cfg; 39 u32 cfg;
55 u32 ctrla; 40 u32 ctrla;
56}; 41};
diff --git a/arch/arm/mach-at91/include/mach/io.h b/arch/arm/mach-at91/include/mach/io.h
deleted file mode 100644
index 4003001eca3d..000000000000
--- a/arch/arm/mach-at91/include/mach/io.h
+++ /dev/null
@@ -1,31 +0,0 @@
1/*
2 * arch/arm/mach-at91/include/mach/io.h
3 *
4 * Copyright (C) 2003 SAN People
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20
21#ifndef __ASM_ARCH_IO_H
22#define __ASM_ARCH_IO_H
23
24#include <mach/hardware.h>
25
26#define IO_SPACE_LIMIT 0xFFFFFFFF
27
28#define __io(a) __typesafe_io(a)
29#define __mem_pci(a) (a)
30
31#endif
diff --git a/arch/arm/mach-at91/include/mach/uncompress.h b/arch/arm/mach-at91/include/mach/uncompress.h
index 0234fd9d20d6..4218647c1fcd 100644
--- a/arch/arm/mach-at91/include/mach/uncompress.h
+++ b/arch/arm/mach-at91/include/mach/uncompress.h
@@ -23,6 +23,7 @@
23 23
24#include <linux/io.h> 24#include <linux/io.h>
25#include <linux/atmel_serial.h> 25#include <linux/atmel_serial.h>
26#include <mach/hardware.h>
26 27
27#if defined(CONFIG_AT91_EARLY_DBGU0) 28#if defined(CONFIG_AT91_EARLY_DBGU0)
28#define UART_OFFSET AT91_BASE_DBGU0 29#define UART_OFFSET AT91_BASE_DBGU0
diff --git a/arch/arm/mach-at91/setup.c b/arch/arm/mach-at91/setup.c
index 1083739e3065..97cc04dc8073 100644
--- a/arch/arm/mach-at91/setup.c
+++ b/arch/arm/mach-at91/setup.c
@@ -11,6 +11,7 @@
11#include <linux/pm.h> 11#include <linux/pm.h>
12#include <linux/of_address.h> 12#include <linux/of_address.h>
13 13
14#include <asm/system_misc.h>
14#include <asm/mach/map.h> 15#include <asm/mach/map.h>
15 16
16#include <mach/hardware.h> 17#include <mach/hardware.h>
diff --git a/arch/arm/mach-bcmring/include/mach/io.h b/arch/arm/mach-bcmring/include/mach/io.h
deleted file mode 100644
index dae5e9b166ea..000000000000
--- a/arch/arm/mach-bcmring/include/mach/io.h
+++ /dev/null
@@ -1,33 +0,0 @@
1/*
2 *
3 * Copyright (C) 1999 ARM Limited
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */
19#ifndef __ASM_ARM_ARCH_IO_H
20#define __ASM_ARM_ARCH_IO_H
21
22#include <mach/hardware.h>
23
24#define IO_SPACE_LIMIT 0xffffffff
25
26/*
27 * We don't actually have real ISA nor PCI buses, but there is so many
28 * drivers out there that might just work if we fake them...
29 */
30#define __io(a) __typesafe_io(a)
31#define __mem_pci(a) (a)
32
33#endif
diff --git a/arch/arm/mach-clps711x/edb7211-mm.c b/arch/arm/mach-clps711x/edb7211-mm.c
index 0bea1454ae03..4372f06c9929 100644
--- a/arch/arm/mach-clps711x/edb7211-mm.c
+++ b/arch/arm/mach-clps711x/edb7211-mm.c
@@ -21,6 +21,7 @@
21 */ 21 */
22#include <linux/kernel.h> 22#include <linux/kernel.h>
23#include <linux/init.h> 23#include <linux/init.h>
24#include <linux/bug.h>
24 25
25#include <mach/hardware.h> 26#include <mach/hardware.h>
26#include <asm/page.h> 27#include <asm/page.h>
diff --git a/arch/arm/mach-clps711x/include/mach/io.h b/arch/arm/mach-clps711x/include/mach/io.h
deleted file mode 100644
index 2e0b3ced8f07..000000000000
--- a/arch/arm/mach-clps711x/include/mach/io.h
+++ /dev/null
@@ -1,36 +0,0 @@
1/*
2 * arch/arm/mach-clps711x/include/mach/io.h
3 *
4 * Copyright (C) 1999 ARM Limited
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20#ifndef __ASM_ARM_ARCH_IO_H
21#define __ASM_ARM_ARCH_IO_H
22
23#define IO_SPACE_LIMIT 0xffffffff
24
25#define __io(a) __typesafe_io(a)
26#define __mem_pci(a) (a)
27
28/*
29 * We don't support ins[lb]/outs[lb]. Make them fault.
30 */
31#define __raw_readsb(p,d,l) do { *(int *)0 = 0; } while (0)
32#define __raw_readsl(p,d,l) do { *(int *)0 = 0; } while (0)
33#define __raw_writesb(p,d,l) do { *(int *)0 = 0; } while (0)
34#define __raw_writesl(p,d,l) do { *(int *)0 = 0; } while (0)
35
36#endif
diff --git a/arch/arm/mach-clps711x/include/mach/uncompress.h b/arch/arm/mach-clps711x/include/mach/uncompress.h
index 7164310dea7c..35ed731b9f16 100644
--- a/arch/arm/mach-clps711x/include/mach/uncompress.h
+++ b/arch/arm/mach-clps711x/include/mach/uncompress.h
@@ -17,7 +17,6 @@
17 * along with this program; if not, write to the Free Software 17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */ 19 */
20#include <mach/io.h>
21#include <mach/hardware.h> 20#include <mach/hardware.h>
22#include <asm/hardware/clps7111.h> 21#include <asm/hardware/clps7111.h>
23 22
diff --git a/arch/arm/mach-cns3xxx/core.c b/arch/arm/mach-cns3xxx/core.c
index 941a308e1253..031805b1428d 100644
--- a/arch/arm/mach-cns3xxx/core.c
+++ b/arch/arm/mach-cns3xxx/core.c
@@ -72,13 +72,13 @@ void __init cns3xxx_map_io(void)
72/* used by entry-macro.S */ 72/* used by entry-macro.S */
73void __init cns3xxx_init_irq(void) 73void __init cns3xxx_init_irq(void)
74{ 74{
75 gic_init(0, 29, __io(CNS3XXX_TC11MP_GIC_DIST_BASE_VIRT), 75 gic_init(0, 29, IOMEM(CNS3XXX_TC11MP_GIC_DIST_BASE_VIRT),
76 __io(CNS3XXX_TC11MP_GIC_CPU_BASE_VIRT)); 76 IOMEM(CNS3XXX_TC11MP_GIC_CPU_BASE_VIRT));
77} 77}
78 78
79void cns3xxx_power_off(void) 79void cns3xxx_power_off(void)
80{ 80{
81 u32 __iomem *pm_base = __io(CNS3XXX_PM_BASE_VIRT); 81 u32 __iomem *pm_base = IOMEM(CNS3XXX_PM_BASE_VIRT);
82 u32 clkctrl; 82 u32 clkctrl;
83 83
84 printk(KERN_INFO "powering system down...\n"); 84 printk(KERN_INFO "powering system down...\n");
@@ -237,7 +237,7 @@ static void __init __cns3xxx_timer_init(unsigned int timer_irq)
237 237
238static void __init cns3xxx_timer_init(void) 238static void __init cns3xxx_timer_init(void)
239{ 239{
240 cns3xxx_tmr1 = __io(CNS3XXX_TIMER1_2_3_BASE_VIRT); 240 cns3xxx_tmr1 = IOMEM(CNS3XXX_TIMER1_2_3_BASE_VIRT);
241 241
242 __cns3xxx_timer_init(IRQ_CNS3XXX_TIMER0); 242 __cns3xxx_timer_init(IRQ_CNS3XXX_TIMER0);
243} 243}
diff --git a/arch/arm/mach-cns3xxx/devices.c b/arch/arm/mach-cns3xxx/devices.c
index 79d1fb02c23f..1e40c99b015f 100644
--- a/arch/arm/mach-cns3xxx/devices.c
+++ b/arch/arm/mach-cns3xxx/devices.c
@@ -98,7 +98,7 @@ static struct platform_device cns3xxx_sdhci_pdev = {
98 98
99void __init cns3xxx_sdhci_init(void) 99void __init cns3xxx_sdhci_init(void)
100{ 100{
101 u32 __iomem *gpioa = __io(CNS3XXX_MISC_BASE_VIRT + 0x0014); 101 u32 __iomem *gpioa = IOMEM(CNS3XXX_MISC_BASE_VIRT + 0x0014);
102 u32 gpioa_pins = __raw_readl(gpioa); 102 u32 gpioa_pins = __raw_readl(gpioa);
103 103
104 /* MMC/SD pins share with GPIOA */ 104 /* MMC/SD pins share with GPIOA */
diff --git a/arch/arm/mach-cns3xxx/include/mach/io.h b/arch/arm/mach-cns3xxx/include/mach/io.h
deleted file mode 100644
index 33b6fc1ece7c..000000000000
--- a/arch/arm/mach-cns3xxx/include/mach/io.h
+++ /dev/null
@@ -1,17 +0,0 @@
1/*
2 * Copyright 2008 Cavium Networks
3 * Copyright 2003 ARM Limited
4 *
5 * This file is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License, Version 2, as
7 * published by the Free Software Foundation.
8 */
9#ifndef __MACH_IO_H
10#define __MACH_IO_H
11
12#define IO_SPACE_LIMIT 0xffffffff
13
14#define __io(a) __typesafe_io(a)
15#define __mem_pci(a) (a)
16
17#endif
diff --git a/arch/arm/mach-davinci/cpuidle.c b/arch/arm/mach-davinci/cpuidle.c
index a30c7c5a6d83..9107691adbdb 100644
--- a/arch/arm/mach-davinci/cpuidle.c
+++ b/arch/arm/mach-davinci/cpuidle.c
@@ -18,6 +18,7 @@
18#include <linux/io.h> 18#include <linux/io.h>
19#include <linux/export.h> 19#include <linux/export.h>
20#include <asm/proc-fns.h> 20#include <asm/proc-fns.h>
21#include <asm/cpuidle.h>
21 22
22#include <mach/cpuidle.h> 23#include <mach/cpuidle.h>
23#include <mach/ddr2.h> 24#include <mach/ddr2.h>
@@ -30,12 +31,43 @@ struct davinci_ops {
30 u32 flags; 31 u32 flags;
31}; 32};
32 33
34/* Actual code that puts the SoC in different idle states */
35static int davinci_enter_idle(struct cpuidle_device *dev,
36 struct cpuidle_driver *drv,
37 int index)
38{
39 struct cpuidle_state_usage *state_usage = &dev->states_usage[index];
40 struct davinci_ops *ops = cpuidle_get_statedata(state_usage);
41
42 if (ops && ops->enter)
43 ops->enter(ops->flags);
44
45 index = cpuidle_wrap_enter(dev, drv, index,
46 arm_cpuidle_simple_enter);
47
48 if (ops && ops->exit)
49 ops->exit(ops->flags);
50
51 return index;
52}
53
33/* fields in davinci_ops.flags */ 54/* fields in davinci_ops.flags */
34#define DAVINCI_CPUIDLE_FLAGS_DDR2_PWDN BIT(0) 55#define DAVINCI_CPUIDLE_FLAGS_DDR2_PWDN BIT(0)
35 56
36static struct cpuidle_driver davinci_idle_driver = { 57static struct cpuidle_driver davinci_idle_driver = {
37 .name = "cpuidle-davinci", 58 .name = "cpuidle-davinci",
38 .owner = THIS_MODULE, 59 .owner = THIS_MODULE,
60 .en_core_tk_irqen = 1,
61 .states[0] = ARM_CPUIDLE_WFI_STATE,
62 .states[1] = {
63 .enter = davinci_enter_idle,
64 .exit_latency = 10,
65 .target_residency = 100000,
66 .flags = CPUIDLE_FLAG_TIME_VALID,
67 .name = "DDR SR",
68 .desc = "WFI and DDR Self Refresh",
69 },
70 .state_count = DAVINCI_CPUIDLE_MAX_STATES,
39}; 71};
40 72
41static DEFINE_PER_CPU(struct cpuidle_device, davinci_cpuidle_device); 73static DEFINE_PER_CPU(struct cpuidle_device, davinci_cpuidle_device);
@@ -77,41 +109,10 @@ static struct davinci_ops davinci_states[DAVINCI_CPUIDLE_MAX_STATES] = {
77 }, 109 },
78}; 110};
79 111
80/* Actual code that puts the SoC in different idle states */
81static int davinci_enter_idle(struct cpuidle_device *dev,
82 struct cpuidle_driver *drv,
83 int index)
84{
85 struct cpuidle_state_usage *state_usage = &dev->states_usage[index];
86 struct davinci_ops *ops = cpuidle_get_statedata(state_usage);
87 struct timeval before, after;
88 int idle_time;
89
90 local_irq_disable();
91 do_gettimeofday(&before);
92
93 if (ops && ops->enter)
94 ops->enter(ops->flags);
95 /* Wait for interrupt state */
96 cpu_do_idle();
97 if (ops && ops->exit)
98 ops->exit(ops->flags);
99
100 do_gettimeofday(&after);
101 local_irq_enable();
102 idle_time = (after.tv_sec - before.tv_sec) * USEC_PER_SEC +
103 (after.tv_usec - before.tv_usec);
104
105 dev->last_residency = idle_time;
106
107 return index;
108}
109
110static int __init davinci_cpuidle_probe(struct platform_device *pdev) 112static int __init davinci_cpuidle_probe(struct platform_device *pdev)
111{ 113{
112 int ret; 114 int ret;
113 struct cpuidle_device *device; 115 struct cpuidle_device *device;
114 struct cpuidle_driver *driver = &davinci_idle_driver;
115 struct davinci_cpuidle_config *pdata = pdev->dev.platform_data; 116 struct davinci_cpuidle_config *pdata = pdev->dev.platform_data;
116 117
117 device = &per_cpu(davinci_cpuidle_device, smp_processor_id()); 118 device = &per_cpu(davinci_cpuidle_device, smp_processor_id());
@@ -123,27 +124,11 @@ static int __init davinci_cpuidle_probe(struct platform_device *pdev)
123 124
124 ddr2_reg_base = pdata->ddr2_ctlr_base; 125 ddr2_reg_base = pdata->ddr2_ctlr_base;
125 126
126 /* Wait for interrupt state */
127 driver->states[0].enter = davinci_enter_idle;
128 driver->states[0].exit_latency = 1;
129 driver->states[0].target_residency = 10000;
130 driver->states[0].flags = CPUIDLE_FLAG_TIME_VALID;
131 strcpy(driver->states[0].name, "WFI");
132 strcpy(driver->states[0].desc, "Wait for interrupt");
133
134 /* Wait for interrupt and DDR self refresh state */
135 driver->states[1].enter = davinci_enter_idle;
136 driver->states[1].exit_latency = 10;
137 driver->states[1].target_residency = 10000;
138 driver->states[1].flags = CPUIDLE_FLAG_TIME_VALID;
139 strcpy(driver->states[1].name, "DDR SR");
140 strcpy(driver->states[1].desc, "WFI and DDR Self Refresh");
141 if (pdata->ddr2_pdown) 127 if (pdata->ddr2_pdown)
142 davinci_states[1].flags |= DAVINCI_CPUIDLE_FLAGS_DDR2_PWDN; 128 davinci_states[1].flags |= DAVINCI_CPUIDLE_FLAGS_DDR2_PWDN;
143 cpuidle_set_statedata(&device->states_usage[1], &davinci_states[1]); 129 cpuidle_set_statedata(&device->states_usage[1], &davinci_states[1]);
144 130
145 device->state_count = DAVINCI_CPUIDLE_MAX_STATES; 131 device->state_count = DAVINCI_CPUIDLE_MAX_STATES;
146 driver->state_count = DAVINCI_CPUIDLE_MAX_STATES;
147 132
148 ret = cpuidle_register_driver(&davinci_idle_driver); 133 ret = cpuidle_register_driver(&davinci_idle_driver);
149 if (ret) { 134 if (ret) {
diff --git a/arch/arm/mach-davinci/include/mach/entry-macro.S b/arch/arm/mach-davinci/include/mach/entry-macro.S
index c1661d2feca9..768b3c060214 100644
--- a/arch/arm/mach-davinci/include/mach/entry-macro.S
+++ b/arch/arm/mach-davinci/include/mach/entry-macro.S
@@ -8,7 +8,6 @@
8 * is licensed "as is" without any warranty of any kind, whether express 8 * is licensed "as is" without any warranty of any kind, whether express
9 * or implied. 9 * or implied.
10 */ 10 */
11#include <mach/io.h>
12#include <mach/irqs.h> 11#include <mach/irqs.h>
13 12
14 .macro get_irqnr_preamble, base, tmp 13 .macro get_irqnr_preamble, base, tmp
diff --git a/arch/arm/mach-davinci/include/mach/hardware.h b/arch/arm/mach-davinci/include/mach/hardware.h
index 0209b1fc22a1..2184691ebc2f 100644
--- a/arch/arm/mach-davinci/include/mach/hardware.h
+++ b/arch/arm/mach-davinci/include/mach/hardware.h
@@ -30,10 +30,4 @@
30#define __IO_ADDRESS(x) ((x) + IO_OFFSET) 30#define __IO_ADDRESS(x) ((x) + IO_OFFSET)
31#define IO_ADDRESS(pa) IOMEM(__IO_ADDRESS(pa)) 31#define IO_ADDRESS(pa) IOMEM(__IO_ADDRESS(pa))
32 32
33#ifdef __ASSEMBLER__
34#define IOMEM(x) x
35#else
36#define IOMEM(x) ((void __force __iomem *)(x))
37#endif
38
39#endif /* __ASM_ARCH_HARDWARE_H */ 33#endif /* __ASM_ARCH_HARDWARE_H */
diff --git a/arch/arm/mach-davinci/include/mach/io.h b/arch/arm/mach-davinci/include/mach/io.h
deleted file mode 100644
index b2267d1e1a71..000000000000
--- a/arch/arm/mach-davinci/include/mach/io.h
+++ /dev/null
@@ -1,24 +0,0 @@
1/*
2 * DaVinci IO address definitions
3 *
4 * Copied from include/asm/arm/arch-omap/io.h
5 *
6 * 2007 (c) MontaVista Software, Inc. This file is licensed under
7 * the terms of the GNU General Public License version 2. This program
8 * is licensed "as is" without any warranty of any kind, whether express
9 * or implied.
10 */
11#ifndef __ASM_ARCH_IO_H
12#define __ASM_ARCH_IO_H
13
14#define IO_SPACE_LIMIT 0xffffffff
15
16/*
17 * We don't actually have real ISA nor PCI buses, but there is so many
18 * drivers out there that might just work if we fake them...
19 */
20#define __io(a) __typesafe_io(a)
21#define __mem_pci(a) (a)
22#define __mem_isa(a) (a)
23
24#endif /* __ASM_ARCH_IO_H */
diff --git a/arch/arm/mach-davinci/include/mach/uncompress.h b/arch/arm/mach-davinci/include/mach/uncompress.h
index 9dc7cf9664fe..da2fb2c2155a 100644
--- a/arch/arm/mach-davinci/include/mach/uncompress.h
+++ b/arch/arm/mach-davinci/include/mach/uncompress.h
@@ -25,6 +25,8 @@
25 25
26#include <mach/serial.h> 26#include <mach/serial.h>
27 27
28#define IOMEM(x) ((void __force __iomem *)(x))
29
28u32 *uart; 30u32 *uart;
29 31
30/* PORT_16C550A, in polled non-fifo mode */ 32/* PORT_16C550A, in polled non-fifo mode */
diff --git a/arch/arm/mach-davinci/time.c b/arch/arm/mach-davinci/time.c
index e1969ce904dc..75da315b6587 100644
--- a/arch/arm/mach-davinci/time.c
+++ b/arch/arm/mach-davinci/time.c
@@ -19,11 +19,14 @@
19#include <linux/err.h> 19#include <linux/err.h>
20#include <linux/platform_device.h> 20#include <linux/platform_device.h>
21 21
22#include <mach/hardware.h> 22#include <asm/sched_clock.h>
23#include <asm/mach/irq.h> 23#include <asm/mach/irq.h>
24#include <asm/mach/time.h> 24#include <asm/mach/time.h>
25
25#include <mach/cputype.h> 26#include <mach/cputype.h>
27#include <mach/hardware.h>
26#include <mach/time.h> 28#include <mach/time.h>
29
27#include "clock.h" 30#include "clock.h"
28 31
29static struct clock_event_device clockevent_davinci; 32static struct clock_event_device clockevent_davinci;
@@ -272,19 +275,9 @@ static cycle_t read_cycles(struct clocksource *cs)
272 return (cycles_t)timer32_read(t); 275 return (cycles_t)timer32_read(t);
273} 276}
274 277
275/*
276 * Kernel assumes that sched_clock can be called early but may not have
277 * things ready yet.
278 */
279static cycle_t read_dummy(struct clocksource *cs)
280{
281 return 0;
282}
283
284
285static struct clocksource clocksource_davinci = { 278static struct clocksource clocksource_davinci = {
286 .rating = 300, 279 .rating = 300,
287 .read = read_dummy, 280 .read = read_cycles,
288 .mask = CLOCKSOURCE_MASK(32), 281 .mask = CLOCKSOURCE_MASK(32),
289 .flags = CLOCK_SOURCE_IS_CONTINUOUS, 282 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
290}; 283};
@@ -292,12 +285,9 @@ static struct clocksource clocksource_davinci = {
292/* 285/*
293 * Overwrite weak default sched_clock with something more precise 286 * Overwrite weak default sched_clock with something more precise
294 */ 287 */
295unsigned long long notrace sched_clock(void) 288static u32 notrace davinci_read_sched_clock(void)
296{ 289{
297 const cycle_t cyc = clocksource_davinci.read(&clocksource_davinci); 290 return timer32_read(&timers[TID_CLOCKSOURCE]);
298
299 return clocksource_cyc2ns(cyc, clocksource_davinci.mult,
300 clocksource_davinci.shift);
301} 291}
302 292
303/* 293/*
@@ -397,12 +387,14 @@ static void __init davinci_timer_init(void)
397 davinci_clock_tick_rate = clk_get_rate(timer_clk); 387 davinci_clock_tick_rate = clk_get_rate(timer_clk);
398 388
399 /* setup clocksource */ 389 /* setup clocksource */
400 clocksource_davinci.read = read_cycles;
401 clocksource_davinci.name = id_to_name[clocksource_id]; 390 clocksource_davinci.name = id_to_name[clocksource_id];
402 if (clocksource_register_hz(&clocksource_davinci, 391 if (clocksource_register_hz(&clocksource_davinci,
403 davinci_clock_tick_rate)) 392 davinci_clock_tick_rate))
404 printk(err, clocksource_davinci.name); 393 printk(err, clocksource_davinci.name);
405 394
395 setup_sched_clock(davinci_read_sched_clock, 32,
396 davinci_clock_tick_rate);
397
406 /* setup clockevent */ 398 /* setup clockevent */
407 clockevent_davinci.name = id_to_name[timers[TID_CLOCKEVENT].id]; 399 clockevent_davinci.name = id_to_name[timers[TID_CLOCKEVENT].id];
408 clockevent_davinci.mult = div_sc(davinci_clock_tick_rate, NSEC_PER_SEC, 400 clockevent_davinci.mult = div_sc(davinci_clock_tick_rate, NSEC_PER_SEC,
diff --git a/arch/arm/mach-dove/addr-map.c b/arch/arm/mach-dove/addr-map.c
index 98b8c83b09ab..2a06c0163418 100644
--- a/arch/arm/mach-dove/addr-map.c
+++ b/arch/arm/mach-dove/addr-map.c
@@ -14,6 +14,7 @@
14#include <linux/io.h> 14#include <linux/io.h>
15#include <asm/mach/arch.h> 15#include <asm/mach/arch.h>
16#include <asm/setup.h> 16#include <asm/setup.h>
17#include <mach/dove.h>
17#include <plat/addr-map.h> 18#include <plat/addr-map.h>
18#include "common.h" 19#include "common.h"
19 20
diff --git a/arch/arm/mach-dove/include/mach/io.h b/arch/arm/mach-dove/include/mach/io.h
index eb4936ff90ad..29c8b85355a5 100644
--- a/arch/arm/mach-dove/include/mach/io.h
+++ b/arch/arm/mach-dove/include/mach/io.h
@@ -15,6 +15,5 @@
15 15
16#define __io(a) ((void __iomem *)(((a) - DOVE_PCIE0_IO_BUS_BASE) + \ 16#define __io(a) ((void __iomem *)(((a) - DOVE_PCIE0_IO_BUS_BASE) + \
17 DOVE_PCIE0_IO_VIRT_BASE)) 17 DOVE_PCIE0_IO_VIRT_BASE))
18#define __mem_pci(a) (a)
19 18
20#endif 19#endif
diff --git a/arch/arm/mach-ebsa110/core.c b/arch/arm/mach-ebsa110/core.c
index 8c9f56a3e8ec..6f8068692edf 100644
--- a/arch/arm/mach-ebsa110/core.c
+++ b/arch/arm/mach-ebsa110/core.c
@@ -116,6 +116,20 @@ static void __init ebsa110_map_io(void)
116 iotable_init(ebsa110_io_desc, ARRAY_SIZE(ebsa110_io_desc)); 116 iotable_init(ebsa110_io_desc, ARRAY_SIZE(ebsa110_io_desc));
117} 117}
118 118
119static void __iomem *ebsa110_ioremap_caller(unsigned long cookie, size_t size,
120 unsigned int flags, void *caller)
121{
122 return (void __iomem *)cookie;
123}
124
125static void ebsa110_iounmap(volatile void __iomem *io_addr)
126{}
127
128static void __init ebsa110_init_early(void)
129{
130 arch_ioremap_caller = ebsa110_ioremap_caller;
131 arch_iounmap = ebsa110_iounmap;
132}
119 133
120#define PIT_CTRL (PIT_BASE + 0x0d) 134#define PIT_CTRL (PIT_BASE + 0x0d)
121#define PIT_T2 (PIT_BASE + 0x09) 135#define PIT_T2 (PIT_BASE + 0x09)
@@ -312,6 +326,7 @@ MACHINE_START(EBSA110, "EBSA110")
312 .reserve_lp2 = 1, 326 .reserve_lp2 = 1,
313 .restart_mode = 's', 327 .restart_mode = 's',
314 .map_io = ebsa110_map_io, 328 .map_io = ebsa110_map_io,
329 .init_early = ebsa110_init_early,
315 .init_irq = ebsa110_init_irq, 330 .init_irq = ebsa110_init_irq,
316 .timer = &ebsa110_timer, 331 .timer = &ebsa110_timer,
317 .restart = ebsa110_restart, 332 .restart = ebsa110_restart,
diff --git a/arch/arm/mach-ebsa110/include/mach/io.h b/arch/arm/mach-ebsa110/include/mach/io.h
index 44679db672fb..11bb0799424b 100644
--- a/arch/arm/mach-ebsa110/include/mach/io.h
+++ b/arch/arm/mach-ebsa110/include/mach/io.h
@@ -62,15 +62,6 @@ void __writel(u32 val, void __iomem *addr);
62#define writew(v,b) __writew(v,b) 62#define writew(v,b) __writew(v,b)
63#define writel(v,b) __writel(v,b) 63#define writel(v,b) __writel(v,b)
64 64
65static inline void __iomem *__arch_ioremap(unsigned long cookie, size_t size,
66 unsigned int flags)
67{
68 return (void __iomem *)cookie;
69}
70
71#define __arch_ioremap __arch_ioremap
72#define __arch_iounmap(cookie) do { } while (0)
73
74extern void insb(unsigned int port, void *buf, int sz); 65extern void insb(unsigned int port, void *buf, int sz);
75extern void insw(unsigned int port, void *buf, int sz); 66extern void insw(unsigned int port, void *buf, int sz);
76extern void insl(unsigned int port, void *buf, int sz); 67extern void insl(unsigned int port, void *buf, int sz);
diff --git a/arch/arm/mach-ep93xx/include/mach/io.h b/arch/arm/mach-ep93xx/include/mach/io.h
deleted file mode 100644
index 594b77f21054..000000000000
--- a/arch/arm/mach-ep93xx/include/mach/io.h
+++ /dev/null
@@ -1,22 +0,0 @@
1/*
2 * arch/arm/mach-ep93xx/include/mach/io.h
3 */
4
5#ifndef __ASM_MACH_IO_H
6#define __ASM_MACH_IO_H
7
8#define IO_SPACE_LIMIT 0xffffffff
9
10#define __io(p) __typesafe_io(p)
11#define __mem_pci(p) (p)
12
13/*
14 * A typesafe __io() variation for variable initialisers
15 */
16#ifdef __ASSEMBLER__
17#define IOMEM(p) p
18#else
19#define IOMEM(p) ((void __iomem __force *)(p))
20#endif
21
22#endif /* __ASM_MACH_IO_H */
diff --git a/arch/arm/mach-exynos/include/mach/io.h b/arch/arm/mach-exynos/include/mach/io.h
deleted file mode 100644
index d5478d247535..000000000000
--- a/arch/arm/mach-exynos/include/mach/io.h
+++ /dev/null
@@ -1,26 +0,0 @@
1/* linux/arch/arm/mach-exynos4/include/mach/io.h
2 *
3 * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
4 * http://www.samsung.com
5 *
6 * Copyright 2008-2010 Ben Dooks <ben-linux@fluff.org>
7 *
8 * Based on arch/arm/mach-s5p6442/include/mach/io.h
9 *
10 * Default IO routines for EXYNOS4
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License version 2 as
14 * published by the Free Software Foundation.
15*/
16
17#ifndef __ASM_ARM_ARCH_IO_H
18#define __ASM_ARM_ARCH_IO_H __FILE__
19
20/* No current ISA/PCI bus support. */
21#define __io(a) __typesafe_io(a)
22#define __mem_pci(a) (a)
23
24#define IO_SPACE_LIMIT (0xFFFFFFFF)
25
26#endif /* __ASM_ARM_ARCH_IO_H */
diff --git a/arch/arm/mach-footbridge/include/mach/io.h b/arch/arm/mach-footbridge/include/mach/io.h
index 15a70396c27d..aba531eebbc6 100644
--- a/arch/arm/mach-footbridge/include/mach/io.h
+++ b/arch/arm/mach-footbridge/include/mach/io.h
@@ -27,18 +27,5 @@
27 * Translation of various region addresses to virtual addresses 27 * Translation of various region addresses to virtual addresses
28 */ 28 */
29#define __io(a) ((void __iomem *)(PCIO_BASE + (a))) 29#define __io(a) ((void __iomem *)(PCIO_BASE + (a)))
30#if 1
31#define __mem_pci(a) (a)
32#else
33
34static inline void __iomem *___mem_pci(void __iomem *p)
35{
36 unsigned long a = (unsigned long)p;
37 BUG_ON(a <= 0xc0000000 || a >= 0xe0000000);
38 return p;
39}
40
41#define __mem_pci(a) ___mem_pci(a)
42#endif
43 30
44#endif 31#endif
diff --git a/arch/arm/mach-gemini/include/mach/io.h b/arch/arm/mach-gemini/include/mach/io.h
deleted file mode 100644
index c548056b98b2..000000000000
--- a/arch/arm/mach-gemini/include/mach/io.h
+++ /dev/null
@@ -1,18 +0,0 @@
1/*
2 * Copyright (C) 2001-2006 Storlink, Corp.
3 * Copyright (C) 2008-2009 Paulius Zaleckas <paulius.zaleckas@teltonika.lt>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 */
10#ifndef __MACH_IO_H
11#define __MACH_IO_H
12
13#define IO_SPACE_LIMIT 0xffffffff
14
15#define __io(a) __typesafe_io(a)
16#define __mem_pci(a) (a)
17
18#endif /* __MACH_IO_H */
diff --git a/arch/arm/mach-h720x/common.c b/arch/arm/mach-h720x/common.c
index e756d1ac00c2..aa1331e86bcf 100644
--- a/arch/arm/mach-h720x/common.c
+++ b/arch/arm/mach-h720x/common.c
@@ -24,6 +24,7 @@
24#include <asm/dma.h> 24#include <asm/dma.h>
25#include <mach/hardware.h> 25#include <mach/hardware.h>
26#include <asm/irq.h> 26#include <asm/irq.h>
27#include <asm/system_misc.h>
27#include <asm/mach/irq.h> 28#include <asm/mach/irq.h>
28#include <asm/mach/map.h> 29#include <asm/mach/map.h>
29#include <mach/irqs.h> 30#include <mach/irqs.h>
diff --git a/arch/arm/mach-h720x/include/mach/io.h b/arch/arm/mach-h720x/include/mach/io.h
deleted file mode 100644
index 2c8659c21a93..000000000000
--- a/arch/arm/mach-h720x/include/mach/io.h
+++ /dev/null
@@ -1,22 +0,0 @@
1/*
2 * arch/arm/mach-h720x/include/mach/io.h
3 *
4 * Copyright (C) 2000 Steve Hill (sjhill@cotw.com)
5 *
6 * Changelog:
7 *
8 * 09-19-2001 JJKIM
9 * Created from arch/arm/mach-l7200/include/mach/io.h
10 *
11 * 03-27-2003 Robert Schwebel <r.schwebel@pengutronix.de>:
12 * re-unified header files for h720x
13 */
14#ifndef __ASM_ARM_ARCH_IO_H
15#define __ASM_ARM_ARCH_IO_H
16
17#define IO_SPACE_LIMIT 0xffffffff
18
19#define __io(a) __typesafe_io(a)
20#define __mem_pci(a) (a)
21
22#endif
diff --git a/arch/arm/mach-highbank/highbank.c b/arch/arm/mach-highbank/highbank.c
index 808b055289b2..410a112bb52e 100644
--- a/arch/arm/mach-highbank/highbank.c
+++ b/arch/arm/mach-highbank/highbank.c
@@ -35,7 +35,6 @@
35#include <asm/mach/arch.h> 35#include <asm/mach/arch.h>
36#include <asm/mach/map.h> 36#include <asm/mach/map.h>
37#include <asm/mach/time.h> 37#include <asm/mach/time.h>
38#include <mach/irqs.h>
39 38
40#include "core.h" 39#include "core.h"
41#include "sysregs.h" 40#include "sysregs.h"
diff --git a/arch/arm/mach-highbank/include/mach/io.h b/arch/arm/mach-highbank/include/mach/io.h
deleted file mode 100644
index 70cfa3ba7697..000000000000
--- a/arch/arm/mach-highbank/include/mach/io.h
+++ /dev/null
@@ -1,7 +0,0 @@
1#ifndef __MACH_IO_H
2#define __MACH_IO_H
3
4#define __io(a) ({ (void)(a); __typesafe_io(0); })
5#define __mem_pci(a) (a)
6
7#endif
diff --git a/arch/arm/mach-highbank/include/mach/irqs.h b/arch/arm/mach-highbank/include/mach/irqs.h
deleted file mode 100644
index 9746aab14e9a..000000000000
--- a/arch/arm/mach-highbank/include/mach/irqs.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef __MACH_IRQS_H
2#define __MACH_IRQS_H
3
4#define NR_IRQS 192
5
6#endif
diff --git a/arch/arm/mach-imx/Kconfig b/arch/arm/mach-imx/Kconfig
index 52359f80c42d..7561eca131b0 100644
--- a/arch/arm/mach-imx/Kconfig
+++ b/arch/arm/mach-imx/Kconfig
@@ -1,6 +1,3 @@
1config IMX_HAVE_DMA_V1
2 bool
3
4config HAVE_IMX_GPC 1config HAVE_IMX_GPC
5 bool 2 bool
6 3
@@ -38,7 +35,6 @@ config SOC_IMX1
38 bool 35 bool
39 select ARCH_MX1 36 select ARCH_MX1
40 select CPU_ARM920T 37 select CPU_ARM920T
41 select IMX_HAVE_DMA_V1
42 select IMX_HAVE_IOMUX_V1 38 select IMX_HAVE_IOMUX_V1
43 select MXC_AVIC 39 select MXC_AVIC
44 40
@@ -46,7 +42,6 @@ config SOC_IMX21
46 bool 42 bool
47 select MACH_MX21 43 select MACH_MX21
48 select CPU_ARM926T 44 select CPU_ARM926T
49 select IMX_HAVE_DMA_V1
50 select IMX_HAVE_IOMUX_V1 45 select IMX_HAVE_IOMUX_V1
51 select MXC_AVIC 46 select MXC_AVIC
52 47
@@ -61,7 +56,6 @@ config SOC_IMX27
61 bool 56 bool
62 select MACH_MX27 57 select MACH_MX27
63 select CPU_ARM926T 58 select CPU_ARM926T
64 select IMX_HAVE_DMA_V1
65 select IMX_HAVE_IOMUX_V1 59 select IMX_HAVE_IOMUX_V1
66 select MXC_AVIC 60 select MXC_AVIC
67 61
diff --git a/arch/arm/mach-imx/Makefile b/arch/arm/mach-imx/Makefile
index 35fc450fa263..ab939c5046c3 100644
--- a/arch/arm/mach-imx/Makefile
+++ b/arch/arm/mach-imx/Makefile
@@ -1,5 +1,3 @@
1obj-$(CONFIG_IMX_HAVE_DMA_V1) += dma-v1.o
2
3obj-$(CONFIG_SOC_IMX1) += clock-imx1.o mm-imx1.o 1obj-$(CONFIG_SOC_IMX1) += clock-imx1.o mm-imx1.o
4obj-$(CONFIG_SOC_IMX21) += clock-imx21.o mm-imx21.o 2obj-$(CONFIG_SOC_IMX21) += clock-imx21.o mm-imx21.o
5 3
diff --git a/arch/arm/mach-imx/dma-v1.c b/arch/arm/mach-imx/dma-v1.c
deleted file mode 100644
index 3189a6004cf9..000000000000
--- a/arch/arm/mach-imx/dma-v1.c
+++ /dev/null
@@ -1,845 +0,0 @@
1/*
2 * linux/arch/arm/plat-mxc/dma-v1.c
3 *
4 * i.MX DMA registration and IRQ dispatching
5 *
6 * Copyright 2006 Pavel Pisa <pisa@cmp.felk.cvut.cz>
7 * Copyright 2008 Juergen Beisert, <kernel@pengutronix.de>
8 * Copyright 2008 Sascha Hauer, <s.hauer@pengutronix.de>
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version 2
13 * of the License, or (at your option) any later version.
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
22 * MA 02110-1301, USA.
23 */
24
25#include <linux/module.h>
26#include <linux/init.h>
27#include <linux/kernel.h>
28#include <linux/interrupt.h>
29#include <linux/err.h>
30#include <linux/errno.h>
31#include <linux/clk.h>
32#include <linux/scatterlist.h>
33#include <linux/io.h>
34
35#include <asm/irq.h>
36#include <mach/hardware.h>
37#include <mach/dma-v1.h>
38
39#define DMA_DCR 0x00 /* Control Register */
40#define DMA_DISR 0x04 /* Interrupt status Register */
41#define DMA_DIMR 0x08 /* Interrupt mask Register */
42#define DMA_DBTOSR 0x0c /* Burst timeout status Register */
43#define DMA_DRTOSR 0x10 /* Request timeout Register */
44#define DMA_DSESR 0x14 /* Transfer Error Status Register */
45#define DMA_DBOSR 0x18 /* Buffer overflow status Register */
46#define DMA_DBTOCR 0x1c /* Burst timeout control Register */
47#define DMA_WSRA 0x40 /* W-Size Register A */
48#define DMA_XSRA 0x44 /* X-Size Register A */
49#define DMA_YSRA 0x48 /* Y-Size Register A */
50#define DMA_WSRB 0x4c /* W-Size Register B */
51#define DMA_XSRB 0x50 /* X-Size Register B */
52#define DMA_YSRB 0x54 /* Y-Size Register B */
53#define DMA_SAR(x) (0x80 + ((x) << 6)) /* Source Address Registers */
54#define DMA_DAR(x) (0x84 + ((x) << 6)) /* Destination Address Registers */
55#define DMA_CNTR(x) (0x88 + ((x) << 6)) /* Count Registers */
56#define DMA_CCR(x) (0x8c + ((x) << 6)) /* Control Registers */
57#define DMA_RSSR(x) (0x90 + ((x) << 6)) /* Request source select Registers */
58#define DMA_BLR(x) (0x94 + ((x) << 6)) /* Burst length Registers */
59#define DMA_RTOR(x) (0x98 + ((x) << 6)) /* Request timeout Registers */
60#define DMA_BUCR(x) (0x98 + ((x) << 6)) /* Bus Utilization Registers */
61#define DMA_CCNR(x) (0x9C + ((x) << 6)) /* Channel counter Registers */
62
63#define DCR_DRST (1<<1)
64#define DCR_DEN (1<<0)
65#define DBTOCR_EN (1<<15)
66#define DBTOCR_CNT(x) ((x) & 0x7fff)
67#define CNTR_CNT(x) ((x) & 0xffffff)
68#define CCR_ACRPT (1<<14)
69#define CCR_DMOD_LINEAR (0x0 << 12)
70#define CCR_DMOD_2D (0x1 << 12)
71#define CCR_DMOD_FIFO (0x2 << 12)
72#define CCR_DMOD_EOBFIFO (0x3 << 12)
73#define CCR_SMOD_LINEAR (0x0 << 10)
74#define CCR_SMOD_2D (0x1 << 10)
75#define CCR_SMOD_FIFO (0x2 << 10)
76#define CCR_SMOD_EOBFIFO (0x3 << 10)
77#define CCR_MDIR_DEC (1<<9)
78#define CCR_MSEL_B (1<<8)
79#define CCR_DSIZ_32 (0x0 << 6)
80#define CCR_DSIZ_8 (0x1 << 6)
81#define CCR_DSIZ_16 (0x2 << 6)
82#define CCR_SSIZ_32 (0x0 << 4)
83#define CCR_SSIZ_8 (0x1 << 4)
84#define CCR_SSIZ_16 (0x2 << 4)
85#define CCR_REN (1<<3)
86#define CCR_RPT (1<<2)
87#define CCR_FRC (1<<1)
88#define CCR_CEN (1<<0)
89#define RTOR_EN (1<<15)
90#define RTOR_CLK (1<<14)
91#define RTOR_PSC (1<<13)
92
93/*
94 * struct imx_dma_channel - i.MX specific DMA extension
95 * @name: name specified by DMA client
96 * @irq_handler: client callback for end of transfer
97 * @err_handler: client callback for error condition
98 * @data: clients context data for callbacks
99 * @dma_mode: direction of the transfer %DMA_MODE_READ or %DMA_MODE_WRITE
100 * @sg: pointer to the actual read/written chunk for scatter-gather emulation
101 * @resbytes: total residual number of bytes to transfer
102 * (it can be lower or same as sum of SG mapped chunk sizes)
103 * @sgcount: number of chunks to be read/written
104 *
105 * Structure is used for IMX DMA processing. It would be probably good
106 * @struct dma_struct in the future for external interfacing and use
107 * @struct imx_dma_channel only as extension to it.
108 */
109
110struct imx_dma_channel {
111 const char *name;
112 void (*irq_handler) (int, void *);
113 void (*err_handler) (int, void *, int errcode);
114 void (*prog_handler) (int, void *, struct scatterlist *);
115 void *data;
116 unsigned int dma_mode;
117 struct scatterlist *sg;
118 unsigned int resbytes;
119 int dma_num;
120
121 int in_use;
122
123 u32 ccr_from_device;
124 u32 ccr_to_device;
125
126 struct timer_list watchdog;
127
128 int hw_chaining;
129};
130
131static void __iomem *imx_dmav1_baseaddr;
132
133static void imx_dmav1_writel(unsigned val, unsigned offset)
134{
135 __raw_writel(val, imx_dmav1_baseaddr + offset);
136}
137
138static unsigned imx_dmav1_readl(unsigned offset)
139{
140 return __raw_readl(imx_dmav1_baseaddr + offset);
141}
142
143static struct imx_dma_channel imx_dma_channels[IMX_DMA_CHANNELS];
144
145static struct clk *dma_clk;
146
147static int imx_dma_hw_chain(struct imx_dma_channel *imxdma)
148{
149 if (cpu_is_mx27())
150 return imxdma->hw_chaining;
151 else
152 return 0;
153}
154
155/*
156 * imx_dma_sg_next - prepare next chunk for scatter-gather DMA emulation
157 */
158static inline int imx_dma_sg_next(int channel, struct scatterlist *sg)
159{
160 struct imx_dma_channel *imxdma = &imx_dma_channels[channel];
161 unsigned long now;
162
163 if (!imxdma->name) {
164 printk(KERN_CRIT "%s: called for not allocated channel %d\n",
165 __func__, channel);
166 return 0;
167 }
168
169 now = min(imxdma->resbytes, sg->length);
170 if (imxdma->resbytes != IMX_DMA_LENGTH_LOOP)
171 imxdma->resbytes -= now;
172
173 if ((imxdma->dma_mode & DMA_MODE_MASK) == DMA_MODE_READ)
174 imx_dmav1_writel(sg->dma_address, DMA_DAR(channel));
175 else
176 imx_dmav1_writel(sg->dma_address, DMA_SAR(channel));
177
178 imx_dmav1_writel(now, DMA_CNTR(channel));
179
180 pr_debug("imxdma%d: next sg chunk dst 0x%08x, src 0x%08x, "
181 "size 0x%08x\n", channel,
182 imx_dmav1_readl(DMA_DAR(channel)),
183 imx_dmav1_readl(DMA_SAR(channel)),
184 imx_dmav1_readl(DMA_CNTR(channel)));
185
186 return now;
187}
188
189/**
190 * imx_dma_setup_single - setup i.MX DMA channel for linear memory to/from
191 * device transfer
192 *
193 * @channel: i.MX DMA channel number
194 * @dma_address: the DMA/physical memory address of the linear data block
195 * to transfer
196 * @dma_length: length of the data block in bytes
197 * @dev_addr: physical device port address
198 * @dmamode: DMA transfer mode, %DMA_MODE_READ from the device to the memory
199 * or %DMA_MODE_WRITE from memory to the device
200 *
201 * Return value: if incorrect parameters are provided -%EINVAL.
202 * Zero indicates success.
203 */
204int
205imx_dma_setup_single(int channel, dma_addr_t dma_address,
206 unsigned int dma_length, unsigned int dev_addr,
207 unsigned int dmamode)
208{
209 struct imx_dma_channel *imxdma = &imx_dma_channels[channel];
210
211 imxdma->sg = NULL;
212 imxdma->dma_mode = dmamode;
213
214 if (!dma_address) {
215 printk(KERN_ERR "imxdma%d: imx_dma_setup_single null address\n",
216 channel);
217 return -EINVAL;
218 }
219
220 if (!dma_length) {
221 printk(KERN_ERR "imxdma%d: imx_dma_setup_single zero length\n",
222 channel);
223 return -EINVAL;
224 }
225
226 if ((dmamode & DMA_MODE_MASK) == DMA_MODE_READ) {
227 pr_debug("imxdma%d: %s dma_addressg=0x%08x dma_length=%d "
228 "dev_addr=0x%08x for read\n",
229 channel, __func__, (unsigned int)dma_address,
230 dma_length, dev_addr);
231
232 imx_dmav1_writel(dev_addr, DMA_SAR(channel));
233 imx_dmav1_writel(dma_address, DMA_DAR(channel));
234 imx_dmav1_writel(imxdma->ccr_from_device, DMA_CCR(channel));
235 } else if ((dmamode & DMA_MODE_MASK) == DMA_MODE_WRITE) {
236 pr_debug("imxdma%d: %s dma_addressg=0x%08x dma_length=%d "
237 "dev_addr=0x%08x for write\n",
238 channel, __func__, (unsigned int)dma_address,
239 dma_length, dev_addr);
240
241 imx_dmav1_writel(dma_address, DMA_SAR(channel));
242 imx_dmav1_writel(dev_addr, DMA_DAR(channel));
243 imx_dmav1_writel(imxdma->ccr_to_device,
244 DMA_CCR(channel));
245 } else {
246 printk(KERN_ERR "imxdma%d: imx_dma_setup_single bad dmamode\n",
247 channel);
248 return -EINVAL;
249 }
250
251 imx_dmav1_writel(dma_length, DMA_CNTR(channel));
252
253 return 0;
254}
255EXPORT_SYMBOL(imx_dma_setup_single);
256
257/**
258 * imx_dma_setup_sg - setup i.MX DMA channel SG list to/from device transfer
259 * @channel: i.MX DMA channel number
260 * @sg: pointer to the scatter-gather list/vector
261 * @sgcount: scatter-gather list hungs count
262 * @dma_length: total length of the transfer request in bytes
263 * @dev_addr: physical device port address
264 * @dmamode: DMA transfer mode, %DMA_MODE_READ from the device to the memory
265 * or %DMA_MODE_WRITE from memory to the device
266 *
267 * The function sets up DMA channel state and registers to be ready for
268 * transfer specified by provided parameters. The scatter-gather emulation
269 * is set up according to the parameters.
270 *
271 * The full preparation of the transfer requires setup of more register
272 * by the caller before imx_dma_enable() can be called.
273 *
274 * %BLR(channel) holds transfer burst length in bytes, 0 means 64 bytes
275 *
276 * %RSSR(channel) has to be set to the DMA request line source %DMA_REQ_xxx
277 *
278 * %CCR(channel) has to specify transfer parameters, the next settings is
279 * typical for linear or simple scatter-gather transfers if %DMA_MODE_READ is
280 * specified
281 *
282 * %CCR_DMOD_LINEAR | %CCR_DSIZ_32 | %CCR_SMOD_FIFO | %CCR_SSIZ_x
283 *
284 * The typical setup for %DMA_MODE_WRITE is specified by next options
285 * combination
286 *
287 * %CCR_SMOD_LINEAR | %CCR_SSIZ_32 | %CCR_DMOD_FIFO | %CCR_DSIZ_x
288 *
289 * Be careful here and do not mistakenly mix source and target device
290 * port sizes constants, they are really different:
291 * %CCR_SSIZ_8, %CCR_SSIZ_16, %CCR_SSIZ_32,
292 * %CCR_DSIZ_8, %CCR_DSIZ_16, %CCR_DSIZ_32
293 *
294 * Return value: if incorrect parameters are provided -%EINVAL.
295 * Zero indicates success.
296 */
297int
298imx_dma_setup_sg(int channel,
299 struct scatterlist *sg, unsigned int sgcount,
300 unsigned int dma_length, unsigned int dev_addr,
301 unsigned int dmamode)
302{
303 struct imx_dma_channel *imxdma = &imx_dma_channels[channel];
304
305 if (imxdma->in_use)
306 return -EBUSY;
307
308 imxdma->sg = sg;
309 imxdma->dma_mode = dmamode;
310 imxdma->resbytes = dma_length;
311
312 if (!sg || !sgcount) {
313 printk(KERN_ERR "imxdma%d: imx_dma_setup_sg empty sg list\n",
314 channel);
315 return -EINVAL;
316 }
317
318 if (!sg->length) {
319 printk(KERN_ERR "imxdma%d: imx_dma_setup_sg zero length\n",
320 channel);
321 return -EINVAL;
322 }
323
324 if ((dmamode & DMA_MODE_MASK) == DMA_MODE_READ) {
325 pr_debug("imxdma%d: %s sg=%p sgcount=%d total length=%d "
326 "dev_addr=0x%08x for read\n",
327 channel, __func__, sg, sgcount, dma_length, dev_addr);
328
329 imx_dmav1_writel(dev_addr, DMA_SAR(channel));
330 imx_dmav1_writel(imxdma->ccr_from_device, DMA_CCR(channel));
331 } else if ((dmamode & DMA_MODE_MASK) == DMA_MODE_WRITE) {
332 pr_debug("imxdma%d: %s sg=%p sgcount=%d total length=%d "
333 "dev_addr=0x%08x for write\n",
334 channel, __func__, sg, sgcount, dma_length, dev_addr);
335
336 imx_dmav1_writel(dev_addr, DMA_DAR(channel));
337 imx_dmav1_writel(imxdma->ccr_to_device, DMA_CCR(channel));
338 } else {
339 printk(KERN_ERR "imxdma%d: imx_dma_setup_sg bad dmamode\n",
340 channel);
341 return -EINVAL;
342 }
343
344 imx_dma_sg_next(channel, sg);
345
346 return 0;
347}
348EXPORT_SYMBOL(imx_dma_setup_sg);
349
350int
351imx_dma_config_channel(int channel, unsigned int config_port,
352 unsigned int config_mem, unsigned int dmareq, int hw_chaining)
353{
354 struct imx_dma_channel *imxdma = &imx_dma_channels[channel];
355 u32 dreq = 0;
356
357 imxdma->hw_chaining = 0;
358
359 if (hw_chaining) {
360 imxdma->hw_chaining = 1;
361 if (!imx_dma_hw_chain(imxdma))
362 return -EINVAL;
363 }
364
365 if (dmareq)
366 dreq = CCR_REN;
367
368 imxdma->ccr_from_device = config_port | (config_mem << 2) | dreq;
369 imxdma->ccr_to_device = config_mem | (config_port << 2) | dreq;
370
371 imx_dmav1_writel(dmareq, DMA_RSSR(channel));
372
373 return 0;
374}
375EXPORT_SYMBOL(imx_dma_config_channel);
376
377void imx_dma_config_burstlen(int channel, unsigned int burstlen)
378{
379 imx_dmav1_writel(burstlen, DMA_BLR(channel));
380}
381EXPORT_SYMBOL(imx_dma_config_burstlen);
382
383/**
384 * imx_dma_setup_handlers - setup i.MX DMA channel end and error notification
385 * handlers
386 * @channel: i.MX DMA channel number
387 * @irq_handler: the pointer to the function called if the transfer
388 * ends successfully
389 * @err_handler: the pointer to the function called if the premature
390 * end caused by error occurs
391 * @data: user specified value to be passed to the handlers
392 */
393int
394imx_dma_setup_handlers(int channel,
395 void (*irq_handler) (int, void *),
396 void (*err_handler) (int, void *, int),
397 void *data)
398{
399 struct imx_dma_channel *imxdma = &imx_dma_channels[channel];
400 unsigned long flags;
401
402 if (!imxdma->name) {
403 printk(KERN_CRIT "%s: called for not allocated channel %d\n",
404 __func__, channel);
405 return -ENODEV;
406 }
407
408 local_irq_save(flags);
409 imx_dmav1_writel(1 << channel, DMA_DISR);
410 imxdma->irq_handler = irq_handler;
411 imxdma->err_handler = err_handler;
412 imxdma->data = data;
413 local_irq_restore(flags);
414 return 0;
415}
416EXPORT_SYMBOL(imx_dma_setup_handlers);
417
418/**
419 * imx_dma_setup_progression_handler - setup i.MX DMA channel progression
420 * handlers
421 * @channel: i.MX DMA channel number
422 * @prog_handler: the pointer to the function called if the transfer progresses
423 */
424int
425imx_dma_setup_progression_handler(int channel,
426 void (*prog_handler) (int, void*, struct scatterlist*))
427{
428 struct imx_dma_channel *imxdma = &imx_dma_channels[channel];
429 unsigned long flags;
430
431 if (!imxdma->name) {
432 printk(KERN_CRIT "%s: called for not allocated channel %d\n",
433 __func__, channel);
434 return -ENODEV;
435 }
436
437 local_irq_save(flags);
438 imxdma->prog_handler = prog_handler;
439 local_irq_restore(flags);
440 return 0;
441}
442EXPORT_SYMBOL(imx_dma_setup_progression_handler);
443
444/**
445 * imx_dma_enable - function to start i.MX DMA channel operation
446 * @channel: i.MX DMA channel number
447 *
448 * The channel has to be allocated by driver through imx_dma_request()
449 * or imx_dma_request_by_prio() function.
450 * The transfer parameters has to be set to the channel registers through
451 * call of the imx_dma_setup_single() or imx_dma_setup_sg() function
452 * and registers %BLR(channel), %RSSR(channel) and %CCR(channel) has to
453 * be set prior this function call by the channel user.
454 */
455void imx_dma_enable(int channel)
456{
457 struct imx_dma_channel *imxdma = &imx_dma_channels[channel];
458 unsigned long flags;
459
460 pr_debug("imxdma%d: imx_dma_enable\n", channel);
461
462 if (!imxdma->name) {
463 printk(KERN_CRIT "%s: called for not allocated channel %d\n",
464 __func__, channel);
465 return;
466 }
467
468 if (imxdma->in_use)
469 return;
470
471 local_irq_save(flags);
472
473 imx_dmav1_writel(1 << channel, DMA_DISR);
474 imx_dmav1_writel(imx_dmav1_readl(DMA_DIMR) & ~(1 << channel), DMA_DIMR);
475 imx_dmav1_writel(imx_dmav1_readl(DMA_CCR(channel)) | CCR_CEN |
476 CCR_ACRPT, DMA_CCR(channel));
477
478 if ((cpu_is_mx21() || cpu_is_mx27()) &&
479 imxdma->sg && imx_dma_hw_chain(imxdma)) {
480 imxdma->sg = sg_next(imxdma->sg);
481 if (imxdma->sg) {
482 u32 tmp;
483 imx_dma_sg_next(channel, imxdma->sg);
484 tmp = imx_dmav1_readl(DMA_CCR(channel));
485 imx_dmav1_writel(tmp | CCR_RPT | CCR_ACRPT,
486 DMA_CCR(channel));
487 }
488 }
489 imxdma->in_use = 1;
490
491 local_irq_restore(flags);
492}
493EXPORT_SYMBOL(imx_dma_enable);
494
495/**
496 * imx_dma_disable - stop, finish i.MX DMA channel operatin
497 * @channel: i.MX DMA channel number
498 */
499void imx_dma_disable(int channel)
500{
501 struct imx_dma_channel *imxdma = &imx_dma_channels[channel];
502 unsigned long flags;
503
504 pr_debug("imxdma%d: imx_dma_disable\n", channel);
505
506 if (imx_dma_hw_chain(imxdma))
507 del_timer(&imxdma->watchdog);
508
509 local_irq_save(flags);
510 imx_dmav1_writel(imx_dmav1_readl(DMA_DIMR) | (1 << channel), DMA_DIMR);
511 imx_dmav1_writel(imx_dmav1_readl(DMA_CCR(channel)) & ~CCR_CEN,
512 DMA_CCR(channel));
513 imx_dmav1_writel(1 << channel, DMA_DISR);
514 imxdma->in_use = 0;
515 local_irq_restore(flags);
516}
517EXPORT_SYMBOL(imx_dma_disable);
518
519static void imx_dma_watchdog(unsigned long chno)
520{
521 struct imx_dma_channel *imxdma = &imx_dma_channels[chno];
522
523 imx_dmav1_writel(0, DMA_CCR(chno));
524 imxdma->in_use = 0;
525 imxdma->sg = NULL;
526
527 if (imxdma->err_handler)
528 imxdma->err_handler(chno, imxdma->data, IMX_DMA_ERR_TIMEOUT);
529}
530
531static irqreturn_t dma_err_handler(int irq, void *dev_id)
532{
533 int i, disr;
534 struct imx_dma_channel *imxdma;
535 unsigned int err_mask;
536 int errcode;
537
538 disr = imx_dmav1_readl(DMA_DISR);
539
540 err_mask = imx_dmav1_readl(DMA_DBTOSR) |
541 imx_dmav1_readl(DMA_DRTOSR) |
542 imx_dmav1_readl(DMA_DSESR) |
543 imx_dmav1_readl(DMA_DBOSR);
544
545 if (!err_mask)
546 return IRQ_HANDLED;
547
548 imx_dmav1_writel(disr & err_mask, DMA_DISR);
549
550 for (i = 0; i < IMX_DMA_CHANNELS; i++) {
551 if (!(err_mask & (1 << i)))
552 continue;
553 imxdma = &imx_dma_channels[i];
554 errcode = 0;
555
556 if (imx_dmav1_readl(DMA_DBTOSR) & (1 << i)) {
557 imx_dmav1_writel(1 << i, DMA_DBTOSR);
558 errcode |= IMX_DMA_ERR_BURST;
559 }
560 if (imx_dmav1_readl(DMA_DRTOSR) & (1 << i)) {
561 imx_dmav1_writel(1 << i, DMA_DRTOSR);
562 errcode |= IMX_DMA_ERR_REQUEST;
563 }
564 if (imx_dmav1_readl(DMA_DSESR) & (1 << i)) {
565 imx_dmav1_writel(1 << i, DMA_DSESR);
566 errcode |= IMX_DMA_ERR_TRANSFER;
567 }
568 if (imx_dmav1_readl(DMA_DBOSR) & (1 << i)) {
569 imx_dmav1_writel(1 << i, DMA_DBOSR);
570 errcode |= IMX_DMA_ERR_BUFFER;
571 }
572 if (imxdma->name && imxdma->err_handler) {
573 imxdma->err_handler(i, imxdma->data, errcode);
574 continue;
575 }
576
577 imx_dma_channels[i].sg = NULL;
578
579 printk(KERN_WARNING
580 "DMA timeout on channel %d (%s) -%s%s%s%s\n",
581 i, imxdma->name,
582 errcode & IMX_DMA_ERR_BURST ? " burst" : "",
583 errcode & IMX_DMA_ERR_REQUEST ? " request" : "",
584 errcode & IMX_DMA_ERR_TRANSFER ? " transfer" : "",
585 errcode & IMX_DMA_ERR_BUFFER ? " buffer" : "");
586 }
587 return IRQ_HANDLED;
588}
589
590static void dma_irq_handle_channel(int chno)
591{
592 struct imx_dma_channel *imxdma = &imx_dma_channels[chno];
593
594 if (!imxdma->name) {
595 /*
596 * IRQ for an unregistered DMA channel:
597 * let's clear the interrupts and disable it.
598 */
599 printk(KERN_WARNING
600 "spurious IRQ for DMA channel %d\n", chno);
601 return;
602 }
603
604 if (imxdma->sg) {
605 u32 tmp;
606 struct scatterlist *current_sg = imxdma->sg;
607 imxdma->sg = sg_next(imxdma->sg);
608
609 if (imxdma->sg) {
610 imx_dma_sg_next(chno, imxdma->sg);
611
612 tmp = imx_dmav1_readl(DMA_CCR(chno));
613
614 if (imx_dma_hw_chain(imxdma)) {
615 /* FIXME: The timeout should probably be
616 * configurable
617 */
618 mod_timer(&imxdma->watchdog,
619 jiffies + msecs_to_jiffies(500));
620
621 tmp |= CCR_CEN | CCR_RPT | CCR_ACRPT;
622 imx_dmav1_writel(tmp, DMA_CCR(chno));
623 } else {
624 imx_dmav1_writel(tmp & ~CCR_CEN, DMA_CCR(chno));
625 tmp |= CCR_CEN;
626 }
627
628 imx_dmav1_writel(tmp, DMA_CCR(chno));
629
630 if (imxdma->prog_handler)
631 imxdma->prog_handler(chno, imxdma->data,
632 current_sg);
633
634 return;
635 }
636
637 if (imx_dma_hw_chain(imxdma)) {
638 del_timer(&imxdma->watchdog);
639 return;
640 }
641 }
642
643 imx_dmav1_writel(0, DMA_CCR(chno));
644 imxdma->in_use = 0;
645 if (imxdma->irq_handler)
646 imxdma->irq_handler(chno, imxdma->data);
647}
648
649static irqreturn_t dma_irq_handler(int irq, void *dev_id)
650{
651 int i, disr;
652
653 if (cpu_is_mx21() || cpu_is_mx27())
654 dma_err_handler(irq, dev_id);
655
656 disr = imx_dmav1_readl(DMA_DISR);
657
658 pr_debug("imxdma: dma_irq_handler called, disr=0x%08x\n",
659 disr);
660
661 imx_dmav1_writel(disr, DMA_DISR);
662 for (i = 0; i < IMX_DMA_CHANNELS; i++) {
663 if (disr & (1 << i))
664 dma_irq_handle_channel(i);
665 }
666
667 return IRQ_HANDLED;
668}
669
670/**
671 * imx_dma_request - request/allocate specified channel number
672 * @channel: i.MX DMA channel number
673 * @name: the driver/caller own non-%NULL identification
674 */
675int imx_dma_request(int channel, const char *name)
676{
677 struct imx_dma_channel *imxdma = &imx_dma_channels[channel];
678 unsigned long flags;
679 int ret = 0;
680
681 /* basic sanity checks */
682 if (!name)
683 return -EINVAL;
684
685 if (channel >= IMX_DMA_CHANNELS) {
686 printk(KERN_CRIT "%s: called for non-existed channel %d\n",
687 __func__, channel);
688 return -EINVAL;
689 }
690
691 local_irq_save(flags);
692 if (imxdma->name) {
693 local_irq_restore(flags);
694 return -EBUSY;
695 }
696 memset(imxdma, 0, sizeof(*imxdma));
697 imxdma->name = name;
698 local_irq_restore(flags); /* request_irq() can block */
699
700 if (cpu_is_mx21() || cpu_is_mx27()) {
701 ret = request_irq(MX2x_INT_DMACH0 + channel,
702 dma_irq_handler, 0, "DMA", NULL);
703 if (ret) {
704 imxdma->name = NULL;
705 pr_crit("Can't register IRQ %d for DMA channel %d\n",
706 MX2x_INT_DMACH0 + channel, channel);
707 return ret;
708 }
709 init_timer(&imxdma->watchdog);
710 imxdma->watchdog.function = &imx_dma_watchdog;
711 imxdma->watchdog.data = channel;
712 }
713
714 return ret;
715}
716EXPORT_SYMBOL(imx_dma_request);
717
718/**
719 * imx_dma_free - release previously acquired channel
720 * @channel: i.MX DMA channel number
721 */
722void imx_dma_free(int channel)
723{
724 unsigned long flags;
725 struct imx_dma_channel *imxdma = &imx_dma_channels[channel];
726
727 if (!imxdma->name) {
728 printk(KERN_CRIT
729 "%s: trying to free free channel %d\n",
730 __func__, channel);
731 return;
732 }
733
734 local_irq_save(flags);
735 /* Disable interrupts */
736 imx_dma_disable(channel);
737 imxdma->name = NULL;
738
739 if (cpu_is_mx21() || cpu_is_mx27())
740 free_irq(MX2x_INT_DMACH0 + channel, NULL);
741
742 local_irq_restore(flags);
743}
744EXPORT_SYMBOL(imx_dma_free);
745
746/**
747 * imx_dma_request_by_prio - find and request some of free channels best
748 * suiting requested priority
749 * @channel: i.MX DMA channel number
750 * @name: the driver/caller own non-%NULL identification
751 *
752 * This function tries to find a free channel in the specified priority group
753 * if the priority cannot be achieved it tries to look for free channel
754 * in the higher and then even lower priority groups.
755 *
756 * Return value: If there is no free channel to allocate, -%ENODEV is returned.
757 * On successful allocation channel is returned.
758 */
759int imx_dma_request_by_prio(const char *name, enum imx_dma_prio prio)
760{
761 int i;
762 int best;
763
764 switch (prio) {
765 case (DMA_PRIO_HIGH):
766 best = 8;
767 break;
768 case (DMA_PRIO_MEDIUM):
769 best = 4;
770 break;
771 case (DMA_PRIO_LOW):
772 default:
773 best = 0;
774 break;
775 }
776
777 for (i = best; i < IMX_DMA_CHANNELS; i++)
778 if (!imx_dma_request(i, name))
779 return i;
780
781 for (i = best - 1; i >= 0; i--)
782 if (!imx_dma_request(i, name))
783 return i;
784
785 printk(KERN_ERR "%s: no free DMA channel found\n", __func__);
786
787 return -ENODEV;
788}
789EXPORT_SYMBOL(imx_dma_request_by_prio);
790
791static int __init imx_dma_init(void)
792{
793 int ret = 0;
794 int i;
795
796 if (cpu_is_mx1())
797 imx_dmav1_baseaddr = MX1_IO_ADDRESS(MX1_DMA_BASE_ADDR);
798 else if (cpu_is_mx21())
799 imx_dmav1_baseaddr = MX21_IO_ADDRESS(MX21_DMA_BASE_ADDR);
800 else if (cpu_is_mx27())
801 imx_dmav1_baseaddr = MX27_IO_ADDRESS(MX27_DMA_BASE_ADDR);
802 else
803 return 0;
804
805 dma_clk = clk_get(NULL, "dma");
806 if (IS_ERR(dma_clk))
807 return PTR_ERR(dma_clk);
808 clk_enable(dma_clk);
809
810 /* reset DMA module */
811 imx_dmav1_writel(DCR_DRST, DMA_DCR);
812
813 if (cpu_is_mx1()) {
814 ret = request_irq(MX1_DMA_INT, dma_irq_handler, 0, "DMA", NULL);
815 if (ret) {
816 pr_crit("Wow! Can't register IRQ for DMA\n");
817 return ret;
818 }
819
820 ret = request_irq(MX1_DMA_ERR, dma_err_handler, 0, "DMA", NULL);
821 if (ret) {
822 pr_crit("Wow! Can't register ERRIRQ for DMA\n");
823 free_irq(MX1_DMA_INT, NULL);
824 return ret;
825 }
826 }
827
828 /* enable DMA module */
829 imx_dmav1_writel(DCR_DEN, DMA_DCR);
830
831 /* clear all interrupts */
832 imx_dmav1_writel((1 << IMX_DMA_CHANNELS) - 1, DMA_DISR);
833
834 /* disable interrupts */
835 imx_dmav1_writel((1 << IMX_DMA_CHANNELS) - 1, DMA_DIMR);
836
837 for (i = 0; i < IMX_DMA_CHANNELS; i++) {
838 imx_dma_channels[i].sg = NULL;
839 imx_dma_channels[i].dma_num = i;
840 }
841
842 return ret;
843}
844
845arch_initcall(imx_dma_init);
diff --git a/arch/arm/mach-imx/include/mach/dma-v1.h b/arch/arm/mach-imx/include/mach/dma-v1.h
deleted file mode 100644
index ac6fd713828a..000000000000
--- a/arch/arm/mach-imx/include/mach/dma-v1.h
+++ /dev/null
@@ -1,103 +0,0 @@
1/*
2 * linux/arch/arm/mach-imx/include/mach/dma-v1.h
3 *
4 * i.MX DMA registration and IRQ dispatching
5 *
6 * Copyright 2006 Pavel Pisa <pisa@cmp.felk.cvut.cz>
7 * Copyright 2008 Juergen Beisert, <kernel@pengutronix.de>
8 * Copyright 2008 Sascha Hauer, <s.hauer@pengutronix.de>
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version 2
13 * of the License, or (at your option) any later version.
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
22 * MA 02110-1301, USA.
23 */
24
25#ifndef __MACH_DMA_V1_H__
26#define __MACH_DMA_V1_H__
27
28#define imx_has_dma_v1() (cpu_is_mx1() || cpu_is_mx21() || cpu_is_mx27())
29
30#include <mach/dma.h>
31
32#define IMX_DMA_CHANNELS 16
33
34#define DMA_MODE_READ 0
35#define DMA_MODE_WRITE 1
36#define DMA_MODE_MASK 1
37
38#define MX1_DMA_REG(offset) MX1_IO_ADDRESS(MX1_DMA_BASE_ADDR + (offset))
39
40/* DMA Interrupt Mask Register */
41#define MX1_DMA_DIMR MX1_DMA_REG(0x08)
42
43/* Channel Control Register */
44#define MX1_DMA_CCR(x) MX1_DMA_REG(0x8c + ((x) << 6))
45
46#define IMX_DMA_MEMSIZE_32 (0 << 4)
47#define IMX_DMA_MEMSIZE_8 (1 << 4)
48#define IMX_DMA_MEMSIZE_16 (2 << 4)
49#define IMX_DMA_TYPE_LINEAR (0 << 10)
50#define IMX_DMA_TYPE_2D (1 << 10)
51#define IMX_DMA_TYPE_FIFO (2 << 10)
52
53#define IMX_DMA_ERR_BURST (1 << 0)
54#define IMX_DMA_ERR_REQUEST (1 << 1)
55#define IMX_DMA_ERR_TRANSFER (1 << 2)
56#define IMX_DMA_ERR_BUFFER (1 << 3)
57#define IMX_DMA_ERR_TIMEOUT (1 << 4)
58
59int
60imx_dma_config_channel(int channel, unsigned int config_port,
61 unsigned int config_mem, unsigned int dmareq, int hw_chaining);
62
63void
64imx_dma_config_burstlen(int channel, unsigned int burstlen);
65
66int
67imx_dma_setup_single(int channel, dma_addr_t dma_address,
68 unsigned int dma_length, unsigned int dev_addr,
69 unsigned int dmamode);
70
71
72/*
73 * Use this flag as the dma_length argument to imx_dma_setup_sg()
74 * to create an endless running dma loop. The end of the scatterlist
75 * must be linked to the beginning for this to work.
76 */
77#define IMX_DMA_LENGTH_LOOP ((unsigned int)-1)
78
79int
80imx_dma_setup_sg(int channel, struct scatterlist *sg,
81 unsigned int sgcount, unsigned int dma_length,
82 unsigned int dev_addr, unsigned int dmamode);
83
84int
85imx_dma_setup_handlers(int channel,
86 void (*irq_handler) (int, void *),
87 void (*err_handler) (int, void *, int), void *data);
88
89int
90imx_dma_setup_progression_handler(int channel,
91 void (*prog_handler) (int, void*, struct scatterlist*));
92
93void imx_dma_enable(int channel);
94
95void imx_dma_disable(int channel);
96
97int imx_dma_request(int channel, const char *name);
98
99void imx_dma_free(int channel);
100
101int imx_dma_request_by_prio(const char *name, enum imx_dma_prio prio);
102
103#endif /* __MACH_DMA_V1_H__ */
diff --git a/arch/arm/mach-imx/mm-imx3.c b/arch/arm/mach-imx/mm-imx3.c
index f8ca96c354f2..74127389e7ab 100644
--- a/arch/arm/mach-imx/mm-imx3.c
+++ b/arch/arm/mach-imx/mm-imx3.c
@@ -21,6 +21,7 @@
21#include <linux/err.h> 21#include <linux/err.h>
22 22
23#include <asm/pgtable.h> 23#include <asm/pgtable.h>
24#include <asm/system_misc.h>
24#include <asm/hardware/cache-l2x0.h> 25#include <asm/hardware/cache-l2x0.h>
25#include <asm/mach/map.h> 26#include <asm/mach/map.h>
26 27
@@ -61,8 +62,8 @@ static void imx3_idle(void)
61 : "=r" (reg)); 62 : "=r" (reg));
62} 63}
63 64
64static void __iomem *imx3_ioremap(unsigned long phys_addr, size_t size, 65static void __iomem *imx3_ioremap_caller(unsigned long phys_addr, size_t size,
65 unsigned int mtype) 66 unsigned int mtype, void *caller)
66{ 67{
67 if (mtype == MT_DEVICE) { 68 if (mtype == MT_DEVICE) {
68 /* 69 /*
@@ -75,7 +76,7 @@ static void __iomem *imx3_ioremap(unsigned long phys_addr, size_t size,
75 mtype = MT_DEVICE_NONSHARED; 76 mtype = MT_DEVICE_NONSHARED;
76 } 77 }
77 78
78 return __arm_ioremap(phys_addr, size, mtype); 79 return __arm_ioremap_caller(phys_addr, size, mtype, caller);
79} 80}
80 81
81void __init imx3_init_l2x0(void) 82void __init imx3_init_l2x0(void)
@@ -134,7 +135,7 @@ void __init imx31_init_early(void)
134{ 135{
135 mxc_set_cpu_type(MXC_CPU_MX31); 136 mxc_set_cpu_type(MXC_CPU_MX31);
136 mxc_arch_reset_init(MX31_IO_ADDRESS(MX31_WDOG_BASE_ADDR)); 137 mxc_arch_reset_init(MX31_IO_ADDRESS(MX31_WDOG_BASE_ADDR));
137 imx_ioremap = imx3_ioremap; 138 arch_ioremap_caller = imx3_ioremap_caller;
138 arm_pm_idle = imx3_idle; 139 arm_pm_idle = imx3_idle;
139} 140}
140 141
@@ -208,7 +209,7 @@ void __init imx35_init_early(void)
208 mxc_iomux_v3_init(MX35_IO_ADDRESS(MX35_IOMUXC_BASE_ADDR)); 209 mxc_iomux_v3_init(MX35_IO_ADDRESS(MX35_IOMUXC_BASE_ADDR));
209 mxc_arch_reset_init(MX35_IO_ADDRESS(MX35_WDOG_BASE_ADDR)); 210 mxc_arch_reset_init(MX35_IO_ADDRESS(MX35_WDOG_BASE_ADDR));
210 arm_pm_idle = imx3_idle; 211 arm_pm_idle = imx3_idle;
211 imx_ioremap = imx3_ioremap; 212 arch_ioremap_caller = imx3_ioremap_caller;
212} 213}
213 214
214void __init mx35_init_irq(void) 215void __init mx35_init_irq(void)
diff --git a/arch/arm/mach-imx/mm-imx5.c b/arch/arm/mach-imx/mm-imx5.c
index 51af9fa56944..05250aed61fb 100644
--- a/arch/arm/mach-imx/mm-imx5.c
+++ b/arch/arm/mach-imx/mm-imx5.c
@@ -15,6 +15,7 @@
15#include <linux/init.h> 15#include <linux/init.h>
16#include <linux/clk.h> 16#include <linux/clk.h>
17 17
18#include <asm/system_misc.h>
18#include <asm/mach/map.h> 19#include <asm/mach/map.h>
19 20
20#include <mach/hardware.h> 21#include <mach/hardware.h>
diff --git a/arch/arm/mach-integrator/core.c b/arch/arm/mach-integrator/core.c
index 1a65d77bd55d..eaf6c6366ffa 100644
--- a/arch/arm/mach-integrator/core.c
+++ b/arch/arm/mach-integrator/core.c
@@ -25,8 +25,9 @@
25 25
26#include <mach/hardware.h> 26#include <mach/hardware.h>
27#include <mach/platform.h> 27#include <mach/platform.h>
28#include <asm/irq.h>
29#include <mach/cm.h> 28#include <mach/cm.h>
29#include <mach/irqs.h>
30
30#include <asm/leds.h> 31#include <asm/leds.h>
31#include <asm/mach-types.h> 32#include <asm/mach-types.h>
32#include <asm/mach/time.h> 33#include <asm/mach/time.h>
diff --git a/arch/arm/mach-integrator/include/mach/io.h b/arch/arm/mach-integrator/include/mach/io.h
index 37beed3fa3ed..8de70de3dd0a 100644
--- a/arch/arm/mach-integrator/include/mach/io.h
+++ b/arch/arm/mach-integrator/include/mach/io.h
@@ -29,6 +29,5 @@
29#define PCI_IO_VADDR 0xee000000 29#define PCI_IO_VADDR 0xee000000
30 30
31#define __io(a) ((void __iomem *)(PCI_IO_VADDR + (a))) 31#define __io(a) ((void __iomem *)(PCI_IO_VADDR + (a)))
32#define __mem_pci(a) (a)
33 32
34#endif 33#endif
diff --git a/arch/arm/mach-integrator/include/mach/irqs.h b/arch/arm/mach-integrator/include/mach/irqs.h
index 1fbe6d190222..a19a1a2fcf6b 100644
--- a/arch/arm/mach-integrator/include/mach/irqs.h
+++ b/arch/arm/mach-integrator/include/mach/irqs.h
@@ -78,5 +78,6 @@
78#define IRQ_SIC_CP_LMINT7 46 78#define IRQ_SIC_CP_LMINT7 46
79#define IRQ_SIC_END 46 79#define IRQ_SIC_END 46
80 80
81#define NR_IRQS 47 81#define NR_IRQS_INTEGRATOR_AP 34
82#define NR_IRQS_INTEGRATOR_CP 47
82 83
diff --git a/arch/arm/mach-integrator/integrator_ap.c b/arch/arm/mach-integrator/integrator_ap.c
index 21a1d6cbef40..871f148ffd72 100644
--- a/arch/arm/mach-integrator/integrator_ap.c
+++ b/arch/arm/mach-integrator/integrator_ap.c
@@ -38,12 +38,13 @@
38#include <mach/hardware.h> 38#include <mach/hardware.h>
39#include <mach/platform.h> 39#include <mach/platform.h>
40#include <asm/hardware/arm_timer.h> 40#include <asm/hardware/arm_timer.h>
41#include <asm/irq.h>
42#include <asm/setup.h> 41#include <asm/setup.h>
43#include <asm/param.h> /* HZ */ 42#include <asm/param.h> /* HZ */
44#include <asm/mach-types.h> 43#include <asm/mach-types.h>
44#include <asm/sched_clock.h>
45 45
46#include <mach/lm.h> 46#include <mach/lm.h>
47#include <mach/irqs.h>
47 48
48#include <asm/mach/arch.h> 49#include <asm/mach/arch.h>
49#include <asm/mach/irq.h> 50#include <asm/mach/irq.h>
@@ -325,6 +326,11 @@ static void __init ap_init(void)
325 326
326static unsigned long timer_reload; 327static unsigned long timer_reload;
327 328
329static u32 notrace integrator_read_sched_clock(void)
330{
331 return -readl((void __iomem *) TIMER2_VA_BASE + TIMER_VALUE);
332}
333
328static void integrator_clocksource_init(unsigned long inrate) 334static void integrator_clocksource_init(unsigned long inrate)
329{ 335{
330 void __iomem *base = (void __iomem *)TIMER2_VA_BASE; 336 void __iomem *base = (void __iomem *)TIMER2_VA_BASE;
@@ -341,6 +347,7 @@ static void integrator_clocksource_init(unsigned long inrate)
341 347
342 clocksource_mmio_init(base + TIMER_VALUE, "timer2", 348 clocksource_mmio_init(base + TIMER_VALUE, "timer2",
343 rate, 200, 16, clocksource_mmio_readl_down); 349 rate, 200, 16, clocksource_mmio_readl_down);
350 setup_sched_clock(integrator_read_sched_clock, 16, rate);
344} 351}
345 352
346static void __iomem * const clkevt_base = (void __iomem *)TIMER1_VA_BASE; 353static void __iomem * const clkevt_base = (void __iomem *)TIMER1_VA_BASE;
@@ -468,6 +475,7 @@ MACHINE_START(INTEGRATOR, "ARM-Integrator")
468 .atag_offset = 0x100, 475 .atag_offset = 0x100,
469 .reserve = integrator_reserve, 476 .reserve = integrator_reserve,
470 .map_io = ap_map_io, 477 .map_io = ap_map_io,
478 .nr_irqs = NR_IRQS_INTEGRATOR_AP,
471 .init_early = integrator_init_early, 479 .init_early = integrator_init_early,
472 .init_irq = ap_init_irq, 480 .init_irq = ap_init_irq,
473 .timer = &ap_timer, 481 .timer = &ap_timer,
diff --git a/arch/arm/mach-integrator/integrator_cp.c b/arch/arm/mach-integrator/integrator_cp.c
index be9ead4a3bcc..48a115a91d9d 100644
--- a/arch/arm/mach-integrator/integrator_cp.c
+++ b/arch/arm/mach-integrator/integrator_cp.c
@@ -26,7 +26,6 @@
26 26
27#include <mach/hardware.h> 27#include <mach/hardware.h>
28#include <mach/platform.h> 28#include <mach/platform.h>
29#include <asm/irq.h>
30#include <asm/setup.h> 29#include <asm/setup.h>
31#include <asm/mach-types.h> 30#include <asm/mach-types.h>
32#include <asm/hardware/arm_timer.h> 31#include <asm/hardware/arm_timer.h>
@@ -34,6 +33,7 @@
34 33
35#include <mach/cm.h> 34#include <mach/cm.h>
36#include <mach/lm.h> 35#include <mach/lm.h>
36#include <mach/irqs.h>
37 37
38#include <asm/mach/arch.h> 38#include <asm/mach/arch.h>
39#include <asm/mach/irq.h> 39#include <asm/mach/irq.h>
@@ -464,6 +464,7 @@ MACHINE_START(CINTEGRATOR, "ARM-IntegratorCP")
464 .atag_offset = 0x100, 464 .atag_offset = 0x100,
465 .reserve = integrator_reserve, 465 .reserve = integrator_reserve,
466 .map_io = intcp_map_io, 466 .map_io = intcp_map_io,
467 .nr_irqs = NR_IRQS_INTEGRATOR_CP,
467 .init_early = intcp_init_early, 468 .init_early = intcp_init_early,
468 .init_irq = intcp_init_irq, 469 .init_irq = intcp_init_irq,
469 .timer = &cp_timer, 470 .timer = &cp_timer,
diff --git a/arch/arm/mach-integrator/pci.c b/arch/arm/mach-integrator/pci.c
index 36068f438f2b..f1ca9c122861 100644
--- a/arch/arm/mach-integrator/pci.c
+++ b/arch/arm/mach-integrator/pci.c
@@ -26,10 +26,11 @@
26#include <linux/interrupt.h> 26#include <linux/interrupt.h>
27#include <linux/init.h> 27#include <linux/init.h>
28 28
29#include <asm/irq.h>
30#include <asm/mach/pci.h> 29#include <asm/mach/pci.h>
31#include <asm/mach-types.h> 30#include <asm/mach-types.h>
32 31
32#include <mach/irqs.h>
33
33/* 34/*
34 * A small note about bridges and interrupts. The DECchip 21050 (and 35 * A small note about bridges and interrupts. The DECchip 21050 (and
35 * later) adheres to the PCI-PCI bridge specification. This says that 36 * later) adheres to the PCI-PCI bridge specification. This says that
diff --git a/arch/arm/mach-integrator/pci_v3.c b/arch/arm/mach-integrator/pci_v3.c
index 4be172c3cbe0..67e6f9a9d1a0 100644
--- a/arch/arm/mach-integrator/pci_v3.c
+++ b/arch/arm/mach-integrator/pci_v3.c
@@ -30,7 +30,8 @@
30 30
31#include <mach/hardware.h> 31#include <mach/hardware.h>
32#include <mach/platform.h> 32#include <mach/platform.h>
33#include <asm/irq.h> 33#include <mach/irqs.h>
34
34#include <asm/signal.h> 35#include <asm/signal.h>
35#include <asm/mach/pci.h> 36#include <asm/mach/pci.h>
36#include <asm/irq_regs.h> 37#include <asm/irq_regs.h>
diff --git a/arch/arm/mach-iop13xx/include/mach/io.h b/arch/arm/mach-iop13xx/include/mach/io.h
index dffb234bb967..f13188518025 100644
--- a/arch/arm/mach-iop13xx/include/mach/io.h
+++ b/arch/arm/mach-iop13xx/include/mach/io.h
@@ -22,20 +22,7 @@
22#define IO_SPACE_LIMIT 0xffffffff 22#define IO_SPACE_LIMIT 0xffffffff
23 23
24#define __io(a) __iop13xx_io(a) 24#define __io(a) __iop13xx_io(a)
25#define __mem_pci(a) (a)
26#define __mem_isa(a) (a)
27 25
28extern void __iomem * __iop13xx_io(unsigned long io_addr); 26extern void __iomem * __iop13xx_io(unsigned long io_addr);
29extern void __iomem *__iop13xx_ioremap(unsigned long cookie, size_t size,
30 unsigned int mtype);
31extern void __iop13xx_iounmap(void __iomem *addr);
32
33extern u32 iop13xx_atue_mem_base;
34extern u32 iop13xx_atux_mem_base;
35extern size_t iop13xx_atue_mem_size;
36extern size_t iop13xx_atux_mem_size;
37
38#define __arch_ioremap __iop13xx_ioremap
39#define __arch_iounmap __iop13xx_iounmap
40 27
41#endif 28#endif
diff --git a/arch/arm/mach-iop13xx/include/mach/iop13xx.h b/arch/arm/mach-iop13xx/include/mach/iop13xx.h
index 07e9ff7adafb..e190dcd7d72d 100644
--- a/arch/arm/mach-iop13xx/include/mach/iop13xx.h
+++ b/arch/arm/mach-iop13xx/include/mach/iop13xx.h
@@ -5,6 +5,7 @@
5/* The ATU offsets can change based on the strapping */ 5/* The ATU offsets can change based on the strapping */
6extern u32 iop13xx_atux_pmmr_offset; 6extern u32 iop13xx_atux_pmmr_offset;
7extern u32 iop13xx_atue_pmmr_offset; 7extern u32 iop13xx_atue_pmmr_offset;
8void iop13xx_init_early(void);
8void iop13xx_init_irq(void); 9void iop13xx_init_irq(void);
9void iop13xx_map_io(void); 10void iop13xx_map_io(void);
10void iop13xx_platform_init(void); 11void iop13xx_platform_init(void);
diff --git a/arch/arm/mach-iop13xx/io.c b/arch/arm/mach-iop13xx/io.c
index 48642e66c566..3c364198db9c 100644
--- a/arch/arm/mach-iop13xx/io.c
+++ b/arch/arm/mach-iop13xx/io.c
@@ -21,6 +21,8 @@
21#include <linux/io.h> 21#include <linux/io.h>
22#include <mach/hardware.h> 22#include <mach/hardware.h>
23 23
24#include "pci.h"
25
24void * __iomem __iop13xx_io(unsigned long io_addr) 26void * __iomem __iop13xx_io(unsigned long io_addr)
25{ 27{
26 void __iomem * io_virt; 28 void __iomem * io_virt;
@@ -40,8 +42,8 @@ void * __iomem __iop13xx_io(unsigned long io_addr)
40} 42}
41EXPORT_SYMBOL(__iop13xx_io); 43EXPORT_SYMBOL(__iop13xx_io);
42 44
43void * __iomem __iop13xx_ioremap(unsigned long cookie, size_t size, 45static void __iomem *__iop13xx_ioremap_caller(unsigned long cookie,
44 unsigned int mtype) 46 size_t size, unsigned int mtype, void *caller)
45{ 47{
46 void __iomem * retval; 48 void __iomem * retval;
47 49
@@ -76,17 +78,14 @@ void * __iomem __iop13xx_ioremap(unsigned long cookie, size_t size,
76 break; 78 break;
77 default: 79 default:
78 retval = __arm_ioremap_caller(cookie, size, mtype, 80 retval = __arm_ioremap_caller(cookie, size, mtype,
79 __builtin_return_address(0)); 81 caller);
80 } 82 }
81 83
82 return retval; 84 return retval;
83} 85}
84EXPORT_SYMBOL(__iop13xx_ioremap);
85 86
86void __iop13xx_iounmap(void __iomem *addr) 87static void __iop13xx_iounmap(volatile void __iomem *addr)
87{ 88{
88 extern void __iounmap(volatile void __iomem *addr);
89
90 if (iop13xx_atue_mem_base) 89 if (iop13xx_atue_mem_base)
91 if (addr >= (void __iomem *) iop13xx_atue_mem_base && 90 if (addr >= (void __iomem *) iop13xx_atue_mem_base &&
92 addr < (void __iomem *) (iop13xx_atue_mem_base + 91 addr < (void __iomem *) (iop13xx_atue_mem_base +
@@ -110,4 +109,9 @@ void __iop13xx_iounmap(void __iomem *addr)
110skip: 109skip:
111 return; 110 return;
112} 111}
113EXPORT_SYMBOL(__iop13xx_iounmap); 112
113void __init iop13xx_init_early(void)
114{
115 arch_ioremap_caller = __iop13xx_ioremap_caller;
116 arch_iounmap = __iop13xx_iounmap;
117}
diff --git a/arch/arm/mach-iop13xx/iq81340mc.c b/arch/arm/mach-iop13xx/iq81340mc.c
index abaee8833588..5c96b73e6964 100644
--- a/arch/arm/mach-iop13xx/iq81340mc.c
+++ b/arch/arm/mach-iop13xx/iq81340mc.c
@@ -92,6 +92,7 @@ static struct sys_timer iq81340mc_timer = {
92MACHINE_START(IQ81340MC, "Intel IQ81340MC") 92MACHINE_START(IQ81340MC, "Intel IQ81340MC")
93 /* Maintainer: Dan Williams <dan.j.williams@intel.com> */ 93 /* Maintainer: Dan Williams <dan.j.williams@intel.com> */
94 .atag_offset = 0x100, 94 .atag_offset = 0x100,
95 .init_early = iop13xx_init_early,
95 .map_io = iop13xx_map_io, 96 .map_io = iop13xx_map_io,
96 .init_irq = iop13xx_init_irq, 97 .init_irq = iop13xx_init_irq,
97 .timer = &iq81340mc_timer, 98 .timer = &iq81340mc_timer,
diff --git a/arch/arm/mach-iop13xx/iq81340sc.c b/arch/arm/mach-iop13xx/iq81340sc.c
index 690916a09dc6..aa4dd750135a 100644
--- a/arch/arm/mach-iop13xx/iq81340sc.c
+++ b/arch/arm/mach-iop13xx/iq81340sc.c
@@ -94,6 +94,7 @@ static struct sys_timer iq81340sc_timer = {
94MACHINE_START(IQ81340SC, "Intel IQ81340SC") 94MACHINE_START(IQ81340SC, "Intel IQ81340SC")
95 /* Maintainer: Dan Williams <dan.j.williams@intel.com> */ 95 /* Maintainer: Dan Williams <dan.j.williams@intel.com> */
96 .atag_offset = 0x100, 96 .atag_offset = 0x100,
97 .init_early = iop13xx_init_early,
97 .map_io = iop13xx_map_io, 98 .map_io = iop13xx_map_io,
98 .init_irq = iop13xx_init_irq, 99 .init_irq = iop13xx_init_irq,
99 .timer = &iq81340sc_timer, 100 .timer = &iq81340sc_timer,
diff --git a/arch/arm/mach-iop13xx/pci.h b/arch/arm/mach-iop13xx/pci.h
new file mode 100644
index 000000000000..c70cf5b41e31
--- /dev/null
+++ b/arch/arm/mach-iop13xx/pci.h
@@ -0,0 +1,6 @@
1#include <linux/types.h>
2
3extern u32 iop13xx_atue_mem_base;
4extern u32 iop13xx_atux_mem_base;
5extern size_t iop13xx_atue_mem_size;
6extern size_t iop13xx_atux_mem_size;
diff --git a/arch/arm/mach-iop32x/include/mach/io.h b/arch/arm/mach-iop32x/include/mach/io.h
index 2d88264b9863..e2ada265bb8d 100644
--- a/arch/arm/mach-iop32x/include/mach/io.h
+++ b/arch/arm/mach-iop32x/include/mach/io.h
@@ -15,6 +15,5 @@
15 15
16#define IO_SPACE_LIMIT 0xffffffff 16#define IO_SPACE_LIMIT 0xffffffff
17#define __io(p) ((void __iomem *)IOP3XX_PCI_IO_PHYS_TO_VIRT(p)) 17#define __io(p) ((void __iomem *)IOP3XX_PCI_IO_PHYS_TO_VIRT(p))
18#define __mem_pci(a) (a)
19 18
20#endif 19#endif
diff --git a/arch/arm/mach-iop33x/include/mach/io.h b/arch/arm/mach-iop33x/include/mach/io.h
index a8a66fc8fbdb..f7c1b6595660 100644
--- a/arch/arm/mach-iop33x/include/mach/io.h
+++ b/arch/arm/mach-iop33x/include/mach/io.h
@@ -15,6 +15,5 @@
15 15
16#define IO_SPACE_LIMIT 0xffffffff 16#define IO_SPACE_LIMIT 0xffffffff
17#define __io(p) ((void __iomem *)IOP3XX_PCI_IO_PHYS_TO_VIRT(p)) 17#define __io(p) ((void __iomem *)IOP3XX_PCI_IO_PHYS_TO_VIRT(p))
18#define __mem_pci(a) (a)
19 18
20#endif 19#endif
diff --git a/arch/arm/mach-ixp2000/include/mach/io.h b/arch/arm/mach-ixp2000/include/mach/io.h
index 859e584914d9..f6552d6f35ab 100644
--- a/arch/arm/mach-ixp2000/include/mach/io.h
+++ b/arch/arm/mach-ixp2000/include/mach/io.h
@@ -18,7 +18,6 @@
18#include <mach/hardware.h> 18#include <mach/hardware.h>
19 19
20#define IO_SPACE_LIMIT 0xffffffff 20#define IO_SPACE_LIMIT 0xffffffff
21#define __mem_pci(a) (a)
22 21
23/* 22/*
24 * The A? revisions of the IXP2000s assert byte lanes for PCI I/O 23 * The A? revisions of the IXP2000s assert byte lanes for PCI I/O
diff --git a/arch/arm/mach-ixp23xx/core.c b/arch/arm/mach-ixp23xx/core.c
index d2c2dc35cbdd..d34542425990 100644
--- a/arch/arm/mach-ixp23xx/core.c
+++ b/arch/arm/mach-ixp23xx/core.c
@@ -36,6 +36,7 @@
36#include <asm/irq.h> 36#include <asm/irq.h>
37#include <asm/tlbflush.h> 37#include <asm/tlbflush.h>
38#include <asm/pgtable.h> 38#include <asm/pgtable.h>
39#include <asm/system_misc.h>
39 40
40#include <asm/mach/map.h> 41#include <asm/mach/map.h>
41#include <asm/mach/time.h> 42#include <asm/mach/time.h>
diff --git a/arch/arm/mach-ixp23xx/include/mach/io.h b/arch/arm/mach-ixp23xx/include/mach/io.h
index 4ce4353b9f72..a7aceb55c130 100644
--- a/arch/arm/mach-ixp23xx/include/mach/io.h
+++ b/arch/arm/mach-ixp23xx/include/mach/io.h
@@ -18,6 +18,5 @@
18#define IO_SPACE_LIMIT 0xffffffff 18#define IO_SPACE_LIMIT 0xffffffff
19 19
20#define __io(p) ((void __iomem*)((p) + IXP23XX_PCI_IO_VIRT)) 20#define __io(p) ((void __iomem*)((p) + IXP23XX_PCI_IO_VIRT))
21#define __mem_pci(a) (a)
22 21
23#endif 22#endif
diff --git a/arch/arm/mach-ixp4xx/avila-setup.c b/arch/arm/mach-ixp4xx/avila-setup.c
index a7277ad470a5..90e42e9982cb 100644
--- a/arch/arm/mach-ixp4xx/avila-setup.c
+++ b/arch/arm/mach-ixp4xx/avila-setup.c
@@ -165,6 +165,7 @@ static void __init avila_init(void)
165MACHINE_START(AVILA, "Gateworks Avila Network Platform") 165MACHINE_START(AVILA, "Gateworks Avila Network Platform")
166 /* Maintainer: Deepak Saxena <dsaxena@plexity.net> */ 166 /* Maintainer: Deepak Saxena <dsaxena@plexity.net> */
167 .map_io = ixp4xx_map_io, 167 .map_io = ixp4xx_map_io,
168 .init_early = ixp4xx_init_early,
168 .init_irq = ixp4xx_init_irq, 169 .init_irq = ixp4xx_init_irq,
169 .timer = &ixp4xx_timer, 170 .timer = &ixp4xx_timer,
170 .atag_offset = 0x100, 171 .atag_offset = 0x100,
@@ -184,6 +185,7 @@ MACHINE_END
184MACHINE_START(LOFT, "Giant Shoulder Inc Loft board") 185MACHINE_START(LOFT, "Giant Shoulder Inc Loft board")
185 /* Maintainer: Tom Billman <kernel@giantshoulderinc.com> */ 186 /* Maintainer: Tom Billman <kernel@giantshoulderinc.com> */
186 .map_io = ixp4xx_map_io, 187 .map_io = ixp4xx_map_io,
188 .init_early = ixp4xx_init_early,
187 .init_irq = ixp4xx_init_irq, 189 .init_irq = ixp4xx_init_irq,
188 .timer = &ixp4xx_timer, 190 .timer = &ixp4xx_timer,
189 .atag_offset = 0x100, 191 .atag_offset = 0x100,
diff --git a/arch/arm/mach-ixp4xx/common.c b/arch/arm/mach-ixp4xx/common.c
index a6329a0a8ec4..ebbd7fc90eb4 100644
--- a/arch/arm/mach-ixp4xx/common.c
+++ b/arch/arm/mach-ixp4xx/common.c
@@ -31,11 +31,13 @@
31 31
32#include <mach/udc.h> 32#include <mach/udc.h>
33#include <mach/hardware.h> 33#include <mach/hardware.h>
34#include <mach/io.h>
34#include <asm/uaccess.h> 35#include <asm/uaccess.h>
35#include <asm/pgtable.h> 36#include <asm/pgtable.h>
36#include <asm/page.h> 37#include <asm/page.h>
37#include <asm/irq.h> 38#include <asm/irq.h>
38#include <asm/sched_clock.h> 39#include <asm/sched_clock.h>
40#include <asm/system_misc.h>
39 41
40#include <asm/mach/map.h> 42#include <asm/mach/map.h>
41#include <asm/mach/irq.h> 43#include <asm/mach/irq.h>
@@ -517,3 +519,35 @@ void ixp4xx_restart(char mode, const char *cmd)
517 *IXP4XX_OSWE = IXP4XX_WDT_RESET_ENABLE | IXP4XX_WDT_COUNT_ENABLE; 519 *IXP4XX_OSWE = IXP4XX_WDT_RESET_ENABLE | IXP4XX_WDT_COUNT_ENABLE;
518 } 520 }
519} 521}
522
523#ifdef CONFIG_IXP4XX_INDIRECT_PCI
524/*
525 * In the case of using indirect PCI, we simply return the actual PCI
526 * address and our read/write implementation use that to drive the
527 * access registers. If something outside of PCI is ioremap'd, we
528 * fallback to the default.
529 */
530
531static void __iomem *ixp4xx_ioremap_caller(unsigned long addr, size_t size,
532 unsigned int mtype, void *caller)
533{
534 if (!is_pci_memory(addr))
535 return __arm_ioremap_caller(addr, size, mtype, caller);
536
537 return (void __iomem *)addr;
538}
539
540static void ixp4xx_iounmap(void __iomem *addr)
541{
542 if (!is_pci_memory((__force u32)addr))
543 __iounmap(addr);
544}
545
546void __init ixp4xx_init_early(void)
547{
548 arch_ioremap_caller = ixp4xx_ioremap_caller;
549 arch_iounmap = ixp4xx_iounmap;
550}
551#else
552void __init ixp4xx_init_early(void) {}
553#endif
diff --git a/arch/arm/mach-ixp4xx/coyote-setup.c b/arch/arm/mach-ixp4xx/coyote-setup.c
index a74f86ce8bcc..1b83110028d6 100644
--- a/arch/arm/mach-ixp4xx/coyote-setup.c
+++ b/arch/arm/mach-ixp4xx/coyote-setup.c
@@ -110,6 +110,7 @@ static void __init coyote_init(void)
110MACHINE_START(ADI_COYOTE, "ADI Engineering Coyote") 110MACHINE_START(ADI_COYOTE, "ADI Engineering Coyote")
111 /* Maintainer: MontaVista Software, Inc. */ 111 /* Maintainer: MontaVista Software, Inc. */
112 .map_io = ixp4xx_map_io, 112 .map_io = ixp4xx_map_io,
113 .init_early = ixp4xx_init_early,
113 .init_irq = ixp4xx_init_irq, 114 .init_irq = ixp4xx_init_irq,
114 .timer = &ixp4xx_timer, 115 .timer = &ixp4xx_timer,
115 .atag_offset = 0x100, 116 .atag_offset = 0x100,
@@ -129,6 +130,7 @@ MACHINE_END
129MACHINE_START(IXDPG425, "Intel IXDPG425") 130MACHINE_START(IXDPG425, "Intel IXDPG425")
130 /* Maintainer: MontaVista Software, Inc. */ 131 /* Maintainer: MontaVista Software, Inc. */
131 .map_io = ixp4xx_map_io, 132 .map_io = ixp4xx_map_io,
133 .init_early = ixp4xx_init_early,
132 .init_irq = ixp4xx_init_irq, 134 .init_irq = ixp4xx_init_irq,
133 .timer = &ixp4xx_timer, 135 .timer = &ixp4xx_timer,
134 .atag_offset = 0x100, 136 .atag_offset = 0x100,
diff --git a/arch/arm/mach-ixp4xx/dsmg600-setup.c b/arch/arm/mach-ixp4xx/dsmg600-setup.c
index 67be177b336a..97a0af8f1955 100644
--- a/arch/arm/mach-ixp4xx/dsmg600-setup.c
+++ b/arch/arm/mach-ixp4xx/dsmg600-setup.c
@@ -280,6 +280,7 @@ MACHINE_START(DSMG600, "D-Link DSM-G600 RevA")
280 /* Maintainer: www.nslu2-linux.org */ 280 /* Maintainer: www.nslu2-linux.org */
281 .atag_offset = 0x100, 281 .atag_offset = 0x100,
282 .map_io = ixp4xx_map_io, 282 .map_io = ixp4xx_map_io,
283 .init_early = ixp4xx_init_early,
283 .init_irq = ixp4xx_init_irq, 284 .init_irq = ixp4xx_init_irq,
284 .timer = &dsmg600_timer, 285 .timer = &dsmg600_timer,
285 .init_machine = dsmg600_init, 286 .init_machine = dsmg600_init,
diff --git a/arch/arm/mach-ixp4xx/fsg-setup.c b/arch/arm/mach-ixp4xx/fsg-setup.c
index 6d5818285af8..9175a25a7511 100644
--- a/arch/arm/mach-ixp4xx/fsg-setup.c
+++ b/arch/arm/mach-ixp4xx/fsg-setup.c
@@ -270,6 +270,7 @@ static void __init fsg_init(void)
270MACHINE_START(FSG, "Freecom FSG-3") 270MACHINE_START(FSG, "Freecom FSG-3")
271 /* Maintainer: www.nslu2-linux.org */ 271 /* Maintainer: www.nslu2-linux.org */
272 .map_io = ixp4xx_map_io, 272 .map_io = ixp4xx_map_io,
273 .init_early = ixp4xx_init_early,
273 .init_irq = ixp4xx_init_irq, 274 .init_irq = ixp4xx_init_irq,
274 .timer = &ixp4xx_timer, 275 .timer = &ixp4xx_timer,
275 .atag_offset = 0x100, 276 .atag_offset = 0x100,
diff --git a/arch/arm/mach-ixp4xx/gateway7001-setup.c b/arch/arm/mach-ixp4xx/gateway7001-setup.c
index 7ecf9b28f1c0..033c71758953 100644
--- a/arch/arm/mach-ixp4xx/gateway7001-setup.c
+++ b/arch/arm/mach-ixp4xx/gateway7001-setup.c
@@ -97,6 +97,7 @@ static void __init gateway7001_init(void)
97MACHINE_START(GATEWAY7001, "Gateway 7001 AP") 97MACHINE_START(GATEWAY7001, "Gateway 7001 AP")
98 /* Maintainer: Imre Kaloz <kaloz@openwrt.org> */ 98 /* Maintainer: Imre Kaloz <kaloz@openwrt.org> */
99 .map_io = ixp4xx_map_io, 99 .map_io = ixp4xx_map_io,
100 .init_early = ixp4xx_init_early,
100 .init_irq = ixp4xx_init_irq, 101 .init_irq = ixp4xx_init_irq,
101 .timer = &ixp4xx_timer, 102 .timer = &ixp4xx_timer,
102 .atag_offset = 0x100, 103 .atag_offset = 0x100,
diff --git a/arch/arm/mach-ixp4xx/goramo_mlr.c b/arch/arm/mach-ixp4xx/goramo_mlr.c
index 78ae12c46261..46bb924962ee 100644
--- a/arch/arm/mach-ixp4xx/goramo_mlr.c
+++ b/arch/arm/mach-ixp4xx/goramo_mlr.c
@@ -496,6 +496,7 @@ subsys_initcall(gmlr_pci_init);
496MACHINE_START(GORAMO_MLR, "MultiLink") 496MACHINE_START(GORAMO_MLR, "MultiLink")
497 /* Maintainer: Krzysztof Halasa */ 497 /* Maintainer: Krzysztof Halasa */
498 .map_io = ixp4xx_map_io, 498 .map_io = ixp4xx_map_io,
499 .init_early = ixp4xx_init_early,
499 .init_irq = ixp4xx_init_irq, 500 .init_irq = ixp4xx_init_irq,
500 .timer = &ixp4xx_timer, 501 .timer = &ixp4xx_timer,
501 .atag_offset = 0x100, 502 .atag_offset = 0x100,
diff --git a/arch/arm/mach-ixp4xx/gtwx5715-setup.c b/arch/arm/mach-ixp4xx/gtwx5715-setup.c
index a23f89391458..18ebc6be7969 100644
--- a/arch/arm/mach-ixp4xx/gtwx5715-setup.c
+++ b/arch/arm/mach-ixp4xx/gtwx5715-setup.c
@@ -165,6 +165,7 @@ static void __init gtwx5715_init(void)
165MACHINE_START(GTWX5715, "Gemtek GTWX5715 (Linksys WRV54G)") 165MACHINE_START(GTWX5715, "Gemtek GTWX5715 (Linksys WRV54G)")
166 /* Maintainer: George Joseph */ 166 /* Maintainer: George Joseph */
167 .map_io = ixp4xx_map_io, 167 .map_io = ixp4xx_map_io,
168 .init_early = ixp4xx_init_early,
168 .init_irq = ixp4xx_init_irq, 169 .init_irq = ixp4xx_init_irq,
169 .timer = &ixp4xx_timer, 170 .timer = &ixp4xx_timer,
170 .atag_offset = 0x100, 171 .atag_offset = 0x100,
diff --git a/arch/arm/mach-ixp4xx/include/mach/hardware.h b/arch/arm/mach-ixp4xx/include/mach/hardware.h
index c30e7e923a73..034bb2a1b805 100644
--- a/arch/arm/mach-ixp4xx/include/mach/hardware.h
+++ b/arch/arm/mach-ixp4xx/include/mach/hardware.h
@@ -23,8 +23,6 @@
23#define PCIBIOS_MAX_MEM 0x4BFFFFFF 23#define PCIBIOS_MAX_MEM 0x4BFFFFFF
24#endif 24#endif
25 25
26#define ARCH_HAS_DMA_SET_COHERENT_MASK
27
28/* Register locations and bits */ 26/* Register locations and bits */
29#include "ixp4xx-regs.h" 27#include "ixp4xx-regs.h"
30 28
diff --git a/arch/arm/mach-ixp4xx/include/mach/io.h b/arch/arm/mach-ixp4xx/include/mach/io.h
index ffb9d6afb89f..5cf30d1b78d2 100644
--- a/arch/arm/mach-ixp4xx/include/mach/io.h
+++ b/arch/arm/mach-ixp4xx/include/mach/io.h
@@ -39,11 +39,7 @@ extern int ixp4xx_pci_write(u32 addr, u32 cmd, u32 data);
39 * but in some cases the performance hit is acceptable. In addition, you 39 * but in some cases the performance hit is acceptable. In addition, you
40 * cannot mmap() PCI devices in this case. 40 * cannot mmap() PCI devices in this case.
41 */ 41 */
42#ifndef CONFIG_IXP4XX_INDIRECT_PCI 42#ifdef CONFIG_IXP4XX_INDIRECT_PCI
43
44#define __mem_pci(a) (a)
45
46#else
47 43
48/* 44/*
49 * In the case of using indirect PCI, we simply return the actual PCI 45 * In the case of using indirect PCI, we simply return the actual PCI
@@ -57,24 +53,6 @@ static inline int is_pci_memory(u32 addr)
57 return (addr >= PCIBIOS_MIN_MEM) && (addr <= 0x4FFFFFFF); 53 return (addr >= PCIBIOS_MIN_MEM) && (addr <= 0x4FFFFFFF);
58} 54}
59 55
60static inline void __iomem * __indirect_ioremap(unsigned long addr, size_t size,
61 unsigned int mtype)
62{
63 if (!is_pci_memory(addr))
64 return __arm_ioremap(addr, size, mtype);
65
66 return (void __iomem *)addr;
67}
68
69static inline void __indirect_iounmap(void __iomem *addr)
70{
71 if (!is_pci_memory((__force u32)addr))
72 __iounmap(addr);
73}
74
75#define __arch_ioremap __indirect_ioremap
76#define __arch_iounmap __indirect_iounmap
77
78#define writeb(v, p) __indirect_writeb(v, p) 56#define writeb(v, p) __indirect_writeb(v, p)
79#define writew(v, p) __indirect_writew(v, p) 57#define writew(v, p) __indirect_writew(v, p)
80#define writel(v, p) __indirect_writel(v, p) 58#define writel(v, p) __indirect_writel(v, p)
diff --git a/arch/arm/mach-ixp4xx/include/mach/platform.h b/arch/arm/mach-ixp4xx/include/mach/platform.h
index df9250bbf13d..b66bedc64de1 100644
--- a/arch/arm/mach-ixp4xx/include/mach/platform.h
+++ b/arch/arm/mach-ixp4xx/include/mach/platform.h
@@ -121,6 +121,7 @@ extern unsigned long ixp4xx_timer_freq;
121 * Functions used by platform-level setup code 121 * Functions used by platform-level setup code
122 */ 122 */
123extern void ixp4xx_map_io(void); 123extern void ixp4xx_map_io(void);
124extern void ixp4xx_init_early(void);
124extern void ixp4xx_init_irq(void); 125extern void ixp4xx_init_irq(void);
125extern void ixp4xx_sys_init(void); 126extern void ixp4xx_sys_init(void);
126extern void ixp4xx_timer_init(void); 127extern void ixp4xx_timer_init(void);
diff --git a/arch/arm/mach-ixp4xx/ixdp425-setup.c b/arch/arm/mach-ixp4xx/ixdp425-setup.c
index 8a38b39999f8..3d742aee1773 100644
--- a/arch/arm/mach-ixp4xx/ixdp425-setup.c
+++ b/arch/arm/mach-ixp4xx/ixdp425-setup.c
@@ -254,6 +254,7 @@ static void __init ixdp425_init(void)
254MACHINE_START(IXDP425, "Intel IXDP425 Development Platform") 254MACHINE_START(IXDP425, "Intel IXDP425 Development Platform")
255 /* Maintainer: MontaVista Software, Inc. */ 255 /* Maintainer: MontaVista Software, Inc. */
256 .map_io = ixp4xx_map_io, 256 .map_io = ixp4xx_map_io,
257 .init_early = ixp4xx_init_early,
257 .init_irq = ixp4xx_init_irq, 258 .init_irq = ixp4xx_init_irq,
258 .timer = &ixp4xx_timer, 259 .timer = &ixp4xx_timer,
259 .atag_offset = 0x100, 260 .atag_offset = 0x100,
@@ -269,6 +270,7 @@ MACHINE_END
269MACHINE_START(IXDP465, "Intel IXDP465 Development Platform") 270MACHINE_START(IXDP465, "Intel IXDP465 Development Platform")
270 /* Maintainer: MontaVista Software, Inc. */ 271 /* Maintainer: MontaVista Software, Inc. */
271 .map_io = ixp4xx_map_io, 272 .map_io = ixp4xx_map_io,
273 .init_early = ixp4xx_init_early,
272 .init_irq = ixp4xx_init_irq, 274 .init_irq = ixp4xx_init_irq,
273 .timer = &ixp4xx_timer, 275 .timer = &ixp4xx_timer,
274 .atag_offset = 0x100, 276 .atag_offset = 0x100,
@@ -283,6 +285,7 @@ MACHINE_END
283MACHINE_START(IXCDP1100, "Intel IXCDP1100 Development Platform") 285MACHINE_START(IXCDP1100, "Intel IXCDP1100 Development Platform")
284 /* Maintainer: MontaVista Software, Inc. */ 286 /* Maintainer: MontaVista Software, Inc. */
285 .map_io = ixp4xx_map_io, 287 .map_io = ixp4xx_map_io,
288 .init_early = ixp4xx_init_early,
286 .init_irq = ixp4xx_init_irq, 289 .init_irq = ixp4xx_init_irq,
287 .timer = &ixp4xx_timer, 290 .timer = &ixp4xx_timer,
288 .atag_offset = 0x100, 291 .atag_offset = 0x100,
@@ -297,6 +300,7 @@ MACHINE_END
297MACHINE_START(KIXRP435, "Intel KIXRP435 Reference Platform") 300MACHINE_START(KIXRP435, "Intel KIXRP435 Reference Platform")
298 /* Maintainer: MontaVista Software, Inc. */ 301 /* Maintainer: MontaVista Software, Inc. */
299 .map_io = ixp4xx_map_io, 302 .map_io = ixp4xx_map_io,
303 .init_early = ixp4xx_init_early,
300 .init_irq = ixp4xx_init_irq, 304 .init_irq = ixp4xx_init_irq,
301 .timer = &ixp4xx_timer, 305 .timer = &ixp4xx_timer,
302 .atag_offset = 0x100, 306 .atag_offset = 0x100,
diff --git a/arch/arm/mach-ixp4xx/nas100d-setup.c b/arch/arm/mach-ixp4xx/nas100d-setup.c
index 1010eb7b0083..33cb0955b6bf 100644
--- a/arch/arm/mach-ixp4xx/nas100d-setup.c
+++ b/arch/arm/mach-ixp4xx/nas100d-setup.c
@@ -315,6 +315,7 @@ MACHINE_START(NAS100D, "Iomega NAS 100d")
315 /* Maintainer: www.nslu2-linux.org */ 315 /* Maintainer: www.nslu2-linux.org */
316 .atag_offset = 0x100, 316 .atag_offset = 0x100,
317 .map_io = ixp4xx_map_io, 317 .map_io = ixp4xx_map_io,
318 .init_early = ixp4xx_init_early,
318 .init_irq = ixp4xx_init_irq, 319 .init_irq = ixp4xx_init_irq,
319 .timer = &ixp4xx_timer, 320 .timer = &ixp4xx_timer,
320 .init_machine = nas100d_init, 321 .init_machine = nas100d_init,
diff --git a/arch/arm/mach-ixp4xx/nslu2-setup.c b/arch/arm/mach-ixp4xx/nslu2-setup.c
index aa355c360d57..e2903faaebb3 100644
--- a/arch/arm/mach-ixp4xx/nslu2-setup.c
+++ b/arch/arm/mach-ixp4xx/nslu2-setup.c
@@ -301,6 +301,7 @@ MACHINE_START(NSLU2, "Linksys NSLU2")
301 /* Maintainer: www.nslu2-linux.org */ 301 /* Maintainer: www.nslu2-linux.org */
302 .atag_offset = 0x100, 302 .atag_offset = 0x100,
303 .map_io = ixp4xx_map_io, 303 .map_io = ixp4xx_map_io,
304 .init_early = ixp4xx_init_early,
304 .init_irq = ixp4xx_init_irq, 305 .init_irq = ixp4xx_init_irq,
305 .timer = &nslu2_timer, 306 .timer = &nslu2_timer,
306 .init_machine = nslu2_init, 307 .init_machine = nslu2_init,
diff --git a/arch/arm/mach-ixp4xx/omixp-setup.c b/arch/arm/mach-ixp4xx/omixp-setup.c
index 0940869fcfdd..158ddb79821d 100644
--- a/arch/arm/mach-ixp4xx/omixp-setup.c
+++ b/arch/arm/mach-ixp4xx/omixp-setup.c
@@ -243,6 +243,7 @@ static void __init omixp_init(void)
243MACHINE_START(DEVIXP, "Omicron DEVIXP") 243MACHINE_START(DEVIXP, "Omicron DEVIXP")
244 .atag_offset = 0x100, 244 .atag_offset = 0x100,
245 .map_io = ixp4xx_map_io, 245 .map_io = ixp4xx_map_io,
246 .init_early = ixp4xx_init_early,
246 .init_irq = ixp4xx_init_irq, 247 .init_irq = ixp4xx_init_irq,
247 .timer = &ixp4xx_timer, 248 .timer = &ixp4xx_timer,
248 .init_machine = omixp_init, 249 .init_machine = omixp_init,
@@ -254,6 +255,7 @@ MACHINE_END
254MACHINE_START(MICCPT, "Omicron MICCPT") 255MACHINE_START(MICCPT, "Omicron MICCPT")
255 .atag_offset = 0x100, 256 .atag_offset = 0x100,
256 .map_io = ixp4xx_map_io, 257 .map_io = ixp4xx_map_io,
258 .init_early = ixp4xx_init_early,
257 .init_irq = ixp4xx_init_irq, 259 .init_irq = ixp4xx_init_irq,
258 .timer = &ixp4xx_timer, 260 .timer = &ixp4xx_timer,
259 .init_machine = omixp_init, 261 .init_machine = omixp_init,
@@ -268,6 +270,7 @@ MACHINE_END
268MACHINE_START(MIC256, "Omicron MIC256") 270MACHINE_START(MIC256, "Omicron MIC256")
269 .atag_offset = 0x100, 271 .atag_offset = 0x100,
270 .map_io = ixp4xx_map_io, 272 .map_io = ixp4xx_map_io,
273 .init_early = ixp4xx_init_early,
271 .init_irq = ixp4xx_init_irq, 274 .init_irq = ixp4xx_init_irq,
272 .timer = &ixp4xx_timer, 275 .timer = &ixp4xx_timer,
273 .init_machine = omixp_init, 276 .init_machine = omixp_init,
diff --git a/arch/arm/mach-ixp4xx/vulcan-setup.c b/arch/arm/mach-ixp4xx/vulcan-setup.c
index 9dec20683291..2798f435aaf4 100644
--- a/arch/arm/mach-ixp4xx/vulcan-setup.c
+++ b/arch/arm/mach-ixp4xx/vulcan-setup.c
@@ -237,6 +237,7 @@ static void __init vulcan_init(void)
237MACHINE_START(ARCOM_VULCAN, "Arcom/Eurotech Vulcan") 237MACHINE_START(ARCOM_VULCAN, "Arcom/Eurotech Vulcan")
238 /* Maintainer: Marc Zyngier <maz@misterjones.org> */ 238 /* Maintainer: Marc Zyngier <maz@misterjones.org> */
239 .map_io = ixp4xx_map_io, 239 .map_io = ixp4xx_map_io,
240 .init_early = ixp4xx_init_early,
240 .init_irq = ixp4xx_init_irq, 241 .init_irq = ixp4xx_init_irq,
241 .timer = &ixp4xx_timer, 242 .timer = &ixp4xx_timer,
242 .atag_offset = 0x100, 243 .atag_offset = 0x100,
diff --git a/arch/arm/mach-ixp4xx/wg302v2-setup.c b/arch/arm/mach-ixp4xx/wg302v2-setup.c
index 5ac0f0a0fd8c..a785175b115b 100644
--- a/arch/arm/mach-ixp4xx/wg302v2-setup.c
+++ b/arch/arm/mach-ixp4xx/wg302v2-setup.c
@@ -98,6 +98,7 @@ static void __init wg302v2_init(void)
98MACHINE_START(WG302V2, "Netgear WG302 v2 / WAG302 v2") 98MACHINE_START(WG302V2, "Netgear WG302 v2 / WAG302 v2")
99 /* Maintainer: Imre Kaloz <kaloz@openwrt.org> */ 99 /* Maintainer: Imre Kaloz <kaloz@openwrt.org> */
100 .map_io = ixp4xx_map_io, 100 .map_io = ixp4xx_map_io,
101 .init_early = ixp4xx_init_early,
101 .init_irq = ixp4xx_init_irq, 102 .init_irq = ixp4xx_init_irq,
102 .timer = &ixp4xx_timer, 103 .timer = &ixp4xx_timer,
103 .atag_offset = 0x100, 104 .atag_offset = 0x100,
diff --git a/arch/arm/mach-kirkwood/cpuidle.c b/arch/arm/mach-kirkwood/cpuidle.c
index 7088180b018b..0f1710941878 100644
--- a/arch/arm/mach-kirkwood/cpuidle.c
+++ b/arch/arm/mach-kirkwood/cpuidle.c
@@ -20,77 +20,47 @@
20#include <linux/io.h> 20#include <linux/io.h>
21#include <linux/export.h> 21#include <linux/export.h>
22#include <asm/proc-fns.h> 22#include <asm/proc-fns.h>
23#include <asm/cpuidle.h>
23#include <mach/kirkwood.h> 24#include <mach/kirkwood.h>
24 25
25#define KIRKWOOD_MAX_STATES 2 26#define KIRKWOOD_MAX_STATES 2
26 27
27static struct cpuidle_driver kirkwood_idle_driver = {
28 .name = "kirkwood_idle",
29 .owner = THIS_MODULE,
30};
31
32static DEFINE_PER_CPU(struct cpuidle_device, kirkwood_cpuidle_device);
33
34/* Actual code that puts the SoC in different idle states */ 28/* Actual code that puts the SoC in different idle states */
35static int kirkwood_enter_idle(struct cpuidle_device *dev, 29static int kirkwood_enter_idle(struct cpuidle_device *dev,
36 struct cpuidle_driver *drv, 30 struct cpuidle_driver *drv,
37 int index) 31 int index)
38{ 32{
39 struct timeval before, after; 33 writel(0x7, DDR_OPERATION_BASE);
40 int idle_time; 34 cpu_do_idle();
41
42 local_irq_disable();
43 do_gettimeofday(&before);
44 if (index == 0)
45 /* Wait for interrupt state */
46 cpu_do_idle();
47 else if (index == 1) {
48 /*
49 * Following write will put DDR in self refresh.
50 * Note that we have 256 cycles before DDR puts it
51 * self in self-refresh, so the wait-for-interrupt
52 * call afterwards won't get the DDR from self refresh
53 * mode.
54 */
55 writel(0x7, DDR_OPERATION_BASE);
56 cpu_do_idle();
57 }
58 do_gettimeofday(&after);
59 local_irq_enable();
60 idle_time = (after.tv_sec - before.tv_sec) * USEC_PER_SEC +
61 (after.tv_usec - before.tv_usec);
62
63 /* Update last residency */
64 dev->last_residency = idle_time;
65 35
66 return index; 36 return index;
67} 37}
68 38
39static struct cpuidle_driver kirkwood_idle_driver = {
40 .name = "kirkwood_idle",
41 .owner = THIS_MODULE,
42 .en_core_tk_irqen = 1,
43 .states[0] = ARM_CPUIDLE_WFI_STATE,
44 .states[1] = {
45 .enter = kirkwood_enter_idle,
46 .exit_latency = 10,
47 .target_residency = 100000,
48 .flags = CPUIDLE_FLAG_TIME_VALID,
49 .name = "DDR SR",
50 .desc = "WFI and DDR Self Refresh",
51 },
52 .state_count = KIRKWOOD_MAX_STATES,
53};
54
55static DEFINE_PER_CPU(struct cpuidle_device, kirkwood_cpuidle_device);
56
69/* Initialize CPU idle by registering the idle states */ 57/* Initialize CPU idle by registering the idle states */
70static int kirkwood_init_cpuidle(void) 58static int kirkwood_init_cpuidle(void)
71{ 59{
72 struct cpuidle_device *device; 60 struct cpuidle_device *device;
73 struct cpuidle_driver *driver = &kirkwood_idle_driver;
74 61
75 device = &per_cpu(kirkwood_cpuidle_device, smp_processor_id()); 62 device = &per_cpu(kirkwood_cpuidle_device, smp_processor_id());
76 device->state_count = KIRKWOOD_MAX_STATES; 63 device->state_count = KIRKWOOD_MAX_STATES;
77 driver->state_count = KIRKWOOD_MAX_STATES;
78
79 /* Wait for interrupt state */
80 driver->states[0].enter = kirkwood_enter_idle;
81 driver->states[0].exit_latency = 1;
82 driver->states[0].target_residency = 10000;
83 driver->states[0].flags = CPUIDLE_FLAG_TIME_VALID;
84 strcpy(driver->states[0].name, "WFI");
85 strcpy(driver->states[0].desc, "Wait for interrupt");
86
87 /* Wait for interrupt and DDR self refresh state */
88 driver->states[1].enter = kirkwood_enter_idle;
89 driver->states[1].exit_latency = 10;
90 driver->states[1].target_residency = 10000;
91 driver->states[1].flags = CPUIDLE_FLAG_TIME_VALID;
92 strcpy(driver->states[1].name, "DDR SR");
93 strcpy(driver->states[1].desc, "WFI and DDR Self Refresh");
94 64
95 cpuidle_register_driver(&kirkwood_idle_driver); 65 cpuidle_register_driver(&kirkwood_idle_driver);
96 if (cpuidle_register_device(device)) { 66 if (cpuidle_register_device(device)) {
diff --git a/arch/arm/mach-kirkwood/include/mach/io.h b/arch/arm/mach-kirkwood/include/mach/io.h
index 49dd0cb5e166..5d0ab61700d2 100644
--- a/arch/arm/mach-kirkwood/include/mach/io.h
+++ b/arch/arm/mach-kirkwood/include/mach/io.h
@@ -20,7 +20,5 @@ static inline void __iomem *__io(unsigned long addr)
20} 20}
21 21
22#define __io(a) __io(a) 22#define __io(a) __io(a)
23#define __mem_pci(a) (a)
24
25 23
26#endif 24#endif
diff --git a/arch/arm/mach-ks8695/include/mach/io.h b/arch/arm/mach-ks8695/include/mach/io.h
deleted file mode 100644
index a7a63ac3ba4e..000000000000
--- a/arch/arm/mach-ks8695/include/mach/io.h
+++ /dev/null
@@ -1,19 +0,0 @@
1/*
2 * arch/arm/mach-ks8695/include/mach/io.h
3 *
4 * Copyright (C) 2006 Andrew Victor
5 *
6 * This file is licensed under the terms of the GNU General Public
7 * License version 2. This program is licensed "as is" without any
8 * warranty of any kind, whether express or implied.
9 */
10
11#ifndef __ASM_ARCH_IO_H
12#define __ASM_ARCH_IO_H
13
14#define IO_SPACE_LIMIT 0xffffffff
15
16#define __io(a) __typesafe_io(a)
17#define __mem_pci(a) (a)
18
19#endif
diff --git a/arch/arm/mach-lpc32xx/clock.c b/arch/arm/mach-lpc32xx/clock.c
index b7ef51119d37..2fc24ca12054 100644
--- a/arch/arm/mach-lpc32xx/clock.c
+++ b/arch/arm/mach-lpc32xx/clock.c
@@ -1134,7 +1134,7 @@ static struct clk_lookup lookups[] = {
1134 _REGISTER_CLOCK(NULL, "i2s1_ck", clk_i2s1) 1134 _REGISTER_CLOCK(NULL, "i2s1_ck", clk_i2s1)
1135 _REGISTER_CLOCK("ts-lpc32xx", NULL, clk_tsc) 1135 _REGISTER_CLOCK("ts-lpc32xx", NULL, clk_tsc)
1136 _REGISTER_CLOCK("dev:mmc0", NULL, clk_mmc) 1136 _REGISTER_CLOCK("dev:mmc0", NULL, clk_mmc)
1137 _REGISTER_CLOCK("lpc-net.0", NULL, clk_net) 1137 _REGISTER_CLOCK("lpc-eth.0", NULL, clk_net)
1138 _REGISTER_CLOCK("dev:clcd", NULL, clk_lcd) 1138 _REGISTER_CLOCK("dev:clcd", NULL, clk_lcd)
1139 _REGISTER_CLOCK("lpc32xx_udc", "ck_usbd", clk_usbd) 1139 _REGISTER_CLOCK("lpc32xx_udc", "ck_usbd", clk_usbd)
1140 _REGISTER_CLOCK("lpc32xx_rtc", NULL, clk_rtc) 1140 _REGISTER_CLOCK("lpc32xx_rtc", NULL, clk_rtc)
diff --git a/arch/arm/mach-lpc32xx/include/mach/io.h b/arch/arm/mach-lpc32xx/include/mach/io.h
deleted file mode 100644
index 9b59ab5cef89..000000000000
--- a/arch/arm/mach-lpc32xx/include/mach/io.h
+++ /dev/null
@@ -1,27 +0,0 @@
1/*
2 * arch/arm/mach-lpc32xx/include/mach/io.h
3 *
4 * Author: Kevin Wells <kevin.wells@nxp.com>
5 *
6 * Copyright (C) 2010 NXP Semiconductors
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 */
18
19#ifndef __ASM_ARM_ARCH_IO_H
20#define __ASM_ARM_ARCH_IO_H
21
22#define IO_SPACE_LIMIT 0xffffffff
23
24#define __io(a) __typesafe_io(a)
25#define __mem_pci(a) (a)
26
27#endif
diff --git a/arch/arm/mach-mmp/aspenite.c b/arch/arm/mach-mmp/aspenite.c
index 3588a5584153..bf5d8e195c3e 100644
--- a/arch/arm/mach-mmp/aspenite.c
+++ b/arch/arm/mach-mmp/aspenite.c
@@ -23,6 +23,7 @@
23#include <mach/addr-map.h> 23#include <mach/addr-map.h>
24#include <mach/mfp-pxa168.h> 24#include <mach/mfp-pxa168.h>
25#include <mach/pxa168.h> 25#include <mach/pxa168.h>
26#include <mach/irqs.h>
26#include <video/pxa168fb.h> 27#include <video/pxa168fb.h>
27#include <linux/input.h> 28#include <linux/input.h>
28#include <plat/pxa27x_keypad.h> 29#include <plat/pxa27x_keypad.h>
@@ -239,7 +240,7 @@ static void __init common_init(void)
239 240
240MACHINE_START(ASPENITE, "PXA168-based Aspenite Development Platform") 241MACHINE_START(ASPENITE, "PXA168-based Aspenite Development Platform")
241 .map_io = mmp_map_io, 242 .map_io = mmp_map_io,
242 .nr_irqs = IRQ_BOARD_START, 243 .nr_irqs = MMP_NR_IRQS,
243 .init_irq = pxa168_init_irq, 244 .init_irq = pxa168_init_irq,
244 .timer = &pxa168_timer, 245 .timer = &pxa168_timer,
245 .init_machine = common_init, 246 .init_machine = common_init,
@@ -248,7 +249,7 @@ MACHINE_END
248 249
249MACHINE_START(ZYLONITE2, "PXA168-based Zylonite2 Development Platform") 250MACHINE_START(ZYLONITE2, "PXA168-based Zylonite2 Development Platform")
250 .map_io = mmp_map_io, 251 .map_io = mmp_map_io,
251 .nr_irqs = IRQ_BOARD_START, 252 .nr_irqs = MMP_NR_IRQS,
252 .init_irq = pxa168_init_irq, 253 .init_irq = pxa168_init_irq,
253 .timer = &pxa168_timer, 254 .timer = &pxa168_timer,
254 .init_machine = common_init, 255 .init_machine = common_init,
diff --git a/arch/arm/mach-mmp/avengers_lite.c b/arch/arm/mach-mmp/avengers_lite.c
index b148a9dc5a44..603542ae6fbd 100644
--- a/arch/arm/mach-mmp/avengers_lite.c
+++ b/arch/arm/mach-mmp/avengers_lite.c
@@ -43,6 +43,7 @@ static void __init avengers_lite_init(void)
43 43
44MACHINE_START(AVENGERS_LITE, "PXA168 Avengers lite Development Platform") 44MACHINE_START(AVENGERS_LITE, "PXA168 Avengers lite Development Platform")
45 .map_io = mmp_map_io, 45 .map_io = mmp_map_io,
46 .nr_irqs = MMP_NR_IRQS,
46 .init_irq = pxa168_init_irq, 47 .init_irq = pxa168_init_irq,
47 .timer = &pxa168_timer, 48 .timer = &pxa168_timer,
48 .init_machine = avengers_lite_init, 49 .init_machine = avengers_lite_init,
diff --git a/arch/arm/mach-mmp/brownstone.c b/arch/arm/mach-mmp/brownstone.c
index d839fe6421e6..5cb769cd26d9 100644
--- a/arch/arm/mach-mmp/brownstone.c
+++ b/arch/arm/mach-mmp/brownstone.c
@@ -28,7 +28,7 @@
28 28
29#include "common.h" 29#include "common.h"
30 30
31#define BROWNSTONE_NR_IRQS (IRQ_BOARD_START + 40) 31#define BROWNSTONE_NR_IRQS (MMP_NR_IRQS + 40)
32 32
33#define GPIO_5V_ENABLE (89) 33#define GPIO_5V_ENABLE (89)
34 34
@@ -158,7 +158,7 @@ static struct platform_device brownstone_v_5vp_device = {
158}; 158};
159 159
160static struct max8925_platform_data brownstone_max8925_info = { 160static struct max8925_platform_data brownstone_max8925_info = {
161 .irq_base = IRQ_BOARD_START, 161 .irq_base = MMP_NR_IRQS,
162}; 162};
163 163
164static struct i2c_board_info brownstone_twsi1_info[] = { 164static struct i2c_board_info brownstone_twsi1_info[] = {
diff --git a/arch/arm/mach-mmp/flint.c b/arch/arm/mach-mmp/flint.c
index 2ee8cd7829dd..8059cc0905c6 100644
--- a/arch/arm/mach-mmp/flint.c
+++ b/arch/arm/mach-mmp/flint.c
@@ -23,10 +23,11 @@
23#include <mach/addr-map.h> 23#include <mach/addr-map.h>
24#include <mach/mfp-mmp2.h> 24#include <mach/mfp-mmp2.h>
25#include <mach/mmp2.h> 25#include <mach/mmp2.h>
26#include <mach/irqs.h>
26 27
27#include "common.h" 28#include "common.h"
28 29
29#define FLINT_NR_IRQS (IRQ_BOARD_START + 48) 30#define FLINT_NR_IRQS (MMP_NR_IRQS + 48)
30 31
31static unsigned long flint_pin_config[] __initdata = { 32static unsigned long flint_pin_config[] __initdata = {
32 /* UART1 */ 33 /* UART1 */
diff --git a/arch/arm/mach-mmp/gplugd.c b/arch/arm/mach-mmp/gplugd.c
index 87765467de63..f516e74ce0d5 100644
--- a/arch/arm/mach-mmp/gplugd.c
+++ b/arch/arm/mach-mmp/gplugd.c
@@ -191,7 +191,7 @@ static void __init gplugd_init(void)
191 191
192MACHINE_START(GPLUGD, "PXA168-based GuruPlug Display (gplugD) Platform") 192MACHINE_START(GPLUGD, "PXA168-based GuruPlug Display (gplugD) Platform")
193 .map_io = mmp_map_io, 193 .map_io = mmp_map_io,
194 .nr_irqs = IRQ_BOARD_START, 194 .nr_irqs = MMP_NR_IRQS,
195 .init_irq = pxa168_init_irq, 195 .init_irq = pxa168_init_irq,
196 .timer = &pxa168_timer, 196 .timer = &pxa168_timer,
197 .init_machine = gplugd_init, 197 .init_machine = gplugd_init,
diff --git a/arch/arm/mach-mmp/include/mach/addr-map.h b/arch/arm/mach-mmp/include/mach/addr-map.h
index 3e404acd6ff4..b1ece08174e8 100644
--- a/arch/arm/mach-mmp/include/mach/addr-map.h
+++ b/arch/arm/mach-mmp/include/mach/addr-map.h
@@ -11,12 +11,6 @@
11#ifndef __ASM_MACH_ADDR_MAP_H 11#ifndef __ASM_MACH_ADDR_MAP_H
12#define __ASM_MACH_ADDR_MAP_H 12#define __ASM_MACH_ADDR_MAP_H
13 13
14#ifndef __ASSEMBLER__
15#define IOMEM(x) ((void __iomem *)(x))
16#else
17#define IOMEM(x) (x)
18#endif
19
20/* APB - Application Subsystem Peripheral Bus 14/* APB - Application Subsystem Peripheral Bus
21 * 15 *
22 * NOTE: the DMA controller registers are actually on the AXI fabric #1 16 * NOTE: the DMA controller registers are actually on the AXI fabric #1
diff --git a/arch/arm/mach-mmp/include/mach/io.h b/arch/arm/mach-mmp/include/mach/io.h
deleted file mode 100644
index e7adf3d012c1..000000000000
--- a/arch/arm/mach-mmp/include/mach/io.h
+++ /dev/null
@@ -1,21 +0,0 @@
1/*
2 * linux/arch/arm/mach-mmp/include/mach/io.h
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9#ifndef __ASM_MACH_IO_H
10#define __ASM_MACH_IO_H
11
12#define IO_SPACE_LIMIT 0xffffffff
13
14/*
15 * We don't actually have real ISA nor PCI buses, but there is so many
16 * drivers out there that might just work if we fake them...
17 */
18#define __io(a) __typesafe_io(a)
19#define __mem_pci(a) (a)
20
21#endif /* __ASM_MACH_IO_H */
diff --git a/arch/arm/mach-mmp/include/mach/irqs.h b/arch/arm/mach-mmp/include/mach/irqs.h
index 34635a0bbb59..d0e746626a3d 100644
--- a/arch/arm/mach-mmp/include/mach/irqs.h
+++ b/arch/arm/mach-mmp/include/mach/irqs.h
@@ -223,7 +223,6 @@
223#define MMP_GPIO_TO_IRQ(gpio) (IRQ_GPIO_START + (gpio)) 223#define MMP_GPIO_TO_IRQ(gpio) (IRQ_GPIO_START + (gpio))
224 224
225#define IRQ_BOARD_START (IRQ_GPIO_START + MMP_NR_BUILTIN_GPIO) 225#define IRQ_BOARD_START (IRQ_GPIO_START + MMP_NR_BUILTIN_GPIO)
226 226#define MMP_NR_IRQS IRQ_BOARD_START
227#define NR_IRQS (IRQ_BOARD_START)
228 227
229#endif /* __ASM_MACH_IRQS_H */ 228#endif /* __ASM_MACH_IRQS_H */
diff --git a/arch/arm/mach-mmp/irq-mmp2.c b/arch/arm/mach-mmp/irq-mmp2.c
index d21c5441a3d0..7895d277421e 100644
--- a/arch/arm/mach-mmp/irq-mmp2.c
+++ b/arch/arm/mach-mmp/irq-mmp2.c
@@ -15,6 +15,7 @@
15#include <linux/irq.h> 15#include <linux/irq.h>
16#include <linux/io.h> 16#include <linux/io.h>
17 17
18#include <mach/irqs.h>
18#include <mach/regs-icu.h> 19#include <mach/regs-icu.h>
19#include <mach/mmp2.h> 20#include <mach/mmp2.h>
20 21
diff --git a/arch/arm/mach-mmp/jasper.c b/arch/arm/mach-mmp/jasper.c
index 96cf5c8fe47d..ff73249884d0 100644
--- a/arch/arm/mach-mmp/jasper.c
+++ b/arch/arm/mach-mmp/jasper.c
@@ -19,6 +19,7 @@
19#include <linux/mfd/max8925.h> 19#include <linux/mfd/max8925.h>
20#include <linux/interrupt.h> 20#include <linux/interrupt.h>
21 21
22#include <mach/irqs.h>
22#include <asm/mach-types.h> 23#include <asm/mach-types.h>
23#include <asm/mach/arch.h> 24#include <asm/mach/arch.h>
24#include <mach/addr-map.h> 25#include <mach/addr-map.h>
@@ -27,7 +28,7 @@
27 28
28#include "common.h" 29#include "common.h"
29 30
30#define JASPER_NR_IRQS (IRQ_BOARD_START + 48) 31#define JASPER_NR_IRQS (MMP_NR_IRQS + 48)
31 32
32static unsigned long jasper_pin_config[] __initdata = { 33static unsigned long jasper_pin_config[] __initdata = {
33 /* UART1 */ 34 /* UART1 */
@@ -135,7 +136,7 @@ static struct max8925_power_pdata jasper_power_data = {
135static struct max8925_platform_data jasper_max8925_info = { 136static struct max8925_platform_data jasper_max8925_info = {
136 .backlight = &jasper_backlight_data, 137 .backlight = &jasper_backlight_data,
137 .power = &jasper_power_data, 138 .power = &jasper_power_data,
138 .irq_base = IRQ_BOARD_START, 139 .irq_base = MMP_NR_IRQS,
139}; 140};
140 141
141static struct i2c_board_info jasper_twsi1_info[] = { 142static struct i2c_board_info jasper_twsi1_info[] = {
diff --git a/arch/arm/mach-mmp/tavorevb.c b/arch/arm/mach-mmp/tavorevb.c
index bc97170125bf..b28f9084dfff 100644
--- a/arch/arm/mach-mmp/tavorevb.c
+++ b/arch/arm/mach-mmp/tavorevb.c
@@ -101,6 +101,7 @@ static void __init tavorevb_init(void)
101 101
102MACHINE_START(TAVOREVB, "PXA910 Evaluation Board (aka TavorEVB)") 102MACHINE_START(TAVOREVB, "PXA910 Evaluation Board (aka TavorEVB)")
103 .map_io = mmp_map_io, 103 .map_io = mmp_map_io,
104 .nr_irqs = MMP_NR_IRQS,
104 .init_irq = pxa910_init_irq, 105 .init_irq = pxa910_init_irq,
105 .timer = &pxa910_timer, 106 .timer = &pxa910_timer,
106 .init_machine = tavorevb_init, 107 .init_machine = tavorevb_init,
diff --git a/arch/arm/mach-mmp/teton_bga.c b/arch/arm/mach-mmp/teton_bga.c
index 0523e422990e..42bef6674ecf 100644
--- a/arch/arm/mach-mmp/teton_bga.c
+++ b/arch/arm/mach-mmp/teton_bga.c
@@ -26,6 +26,7 @@
26#include <mach/mfp-pxa168.h> 26#include <mach/mfp-pxa168.h>
27#include <mach/pxa168.h> 27#include <mach/pxa168.h>
28#include <mach/teton_bga.h> 28#include <mach/teton_bga.h>
29#include <mach/irqs.h>
29 30
30#include "common.h" 31#include "common.h"
31 32
@@ -83,7 +84,7 @@ static void __init teton_bga_init(void)
83 84
84MACHINE_START(TETON_BGA, "PXA168-based Teton BGA Development Platform") 85MACHINE_START(TETON_BGA, "PXA168-based Teton BGA Development Platform")
85 .map_io = mmp_map_io, 86 .map_io = mmp_map_io,
86 .nr_irqs = IRQ_BOARD_START, 87 .nr_irqs = MMP_NR_IRQS,
87 .init_irq = pxa168_init_irq, 88 .init_irq = pxa168_init_irq,
88 .timer = &pxa168_timer, 89 .timer = &pxa168_timer,
89 .init_machine = teton_bga_init, 90 .init_machine = teton_bga_init,
diff --git a/arch/arm/mach-mmp/ttc_dkb.c b/arch/arm/mach-mmp/ttc_dkb.c
index e72c709da44f..3fc9ed21f97d 100644
--- a/arch/arm/mach-mmp/ttc_dkb.c
+++ b/arch/arm/mach-mmp/ttc_dkb.c
@@ -38,7 +38,7 @@
38 * 16 board interrupts -- PCA9575 GPIO expander 38 * 16 board interrupts -- PCA9575 GPIO expander
39 * 24 board interrupts -- 88PM860x PMIC 39 * 24 board interrupts -- 88PM860x PMIC
40 */ 40 */
41#define TTCDKB_NR_IRQS (IRQ_BOARD_START + 16 + 16 + 24) 41#define TTCDKB_NR_IRQS (MMP_NR_IRQS + 16 + 16 + 24)
42 42
43static unsigned long ttc_dkb_pin_config[] __initdata = { 43static unsigned long ttc_dkb_pin_config[] __initdata = {
44 /* UART2 */ 44 /* UART2 */
@@ -131,7 +131,7 @@ static struct platform_device *ttc_dkb_devices[] = {
131static struct pca953x_platform_data max7312_data[] = { 131static struct pca953x_platform_data max7312_data[] = {
132 { 132 {
133 .gpio_base = TTCDKB_GPIO_EXT0(0), 133 .gpio_base = TTCDKB_GPIO_EXT0(0),
134 .irq_base = IRQ_BOARD_START, 134 .irq_base = MMP_NR_IRQS,
135 }, 135 },
136}; 136};
137 137
diff --git a/arch/arm/mach-msm/board-halibut.c b/arch/arm/mach-msm/board-halibut.c
index a60ab6d04ec5..3698a370d636 100644
--- a/arch/arm/mach-msm/board-halibut.c
+++ b/arch/arm/mach-msm/board-halibut.c
@@ -68,6 +68,11 @@ static struct platform_device *devices[] __initdata = {
68 68
69extern struct sys_timer msm_timer; 69extern struct sys_timer msm_timer;
70 70
71static void __init halibut_init_early(void)
72{
73 arch_ioremap_caller = __msm_ioremap_caller;
74}
75
71static void __init halibut_init_irq(void) 76static void __init halibut_init_irq(void)
72{ 77{
73 msm_init_irq(); 78 msm_init_irq();
@@ -96,6 +101,7 @@ MACHINE_START(HALIBUT, "Halibut Board (QCT SURF7200A)")
96 .atag_offset = 0x100, 101 .atag_offset = 0x100,
97 .fixup = halibut_fixup, 102 .fixup = halibut_fixup,
98 .map_io = halibut_map_io, 103 .map_io = halibut_map_io,
104 .init_early = halibut_init_early,
99 .init_irq = halibut_init_irq, 105 .init_irq = halibut_init_irq,
100 .init_machine = halibut_init, 106 .init_machine = halibut_init,
101 .timer = &msm_timer, 107 .timer = &msm_timer,
diff --git a/arch/arm/mach-msm/board-trout.c b/arch/arm/mach-msm/board-trout.c
index 6b9b227c87c5..5414f76ec0a9 100644
--- a/arch/arm/mach-msm/board-trout.c
+++ b/arch/arm/mach-msm/board-trout.c
@@ -43,6 +43,11 @@ static struct platform_device *devices[] __initdata = {
43 43
44extern struct sys_timer msm_timer; 44extern struct sys_timer msm_timer;
45 45
46static void __init trout_init_early(void)
47{
48 arch_ioremap_caller = __msm_ioremap_caller;
49}
50
46static void __init trout_init_irq(void) 51static void __init trout_init_irq(void)
47{ 52{
48 msm_init_irq(); 53 msm_init_irq();
@@ -96,6 +101,7 @@ MACHINE_START(TROUT, "HTC Dream")
96 .atag_offset = 0x100, 101 .atag_offset = 0x100,
97 .fixup = trout_fixup, 102 .fixup = trout_fixup,
98 .map_io = trout_map_io, 103 .map_io = trout_map_io,
104 .init_early = trout_init_early,
99 .init_irq = trout_init_irq, 105 .init_irq = trout_init_irq,
100 .init_machine = trout_init, 106 .init_machine = trout_init,
101 .timer = &msm_timer, 107 .timer = &msm_timer,
diff --git a/arch/arm/mach-msm/include/mach/io.h b/arch/arm/mach-msm/include/mach/io.h
deleted file mode 100644
index dc1b928745e9..000000000000
--- a/arch/arm/mach-msm/include/mach/io.h
+++ /dev/null
@@ -1,36 +0,0 @@
1/* arch/arm/mach-msm/include/mach/io.h
2 *
3 * Copyright (C) 2007 Google, Inc.
4 *
5 * This software is licensed under the terms of the GNU General Public
6 * License version 2, as published by the Free Software Foundation, and
7 * may be copied, distributed, and modified under those terms.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 */
15
16#ifndef __ASM_ARM_ARCH_IO_H
17#define __ASM_ARM_ARCH_IO_H
18
19#define IO_SPACE_LIMIT 0xffffffff
20
21#define __arch_ioremap __msm_ioremap
22#define __arch_iounmap __iounmap
23
24void __iomem *__msm_ioremap(unsigned long phys_addr, size_t size, unsigned int mtype);
25
26#define __io(a) __typesafe_io(a)
27#define __mem_pci(a) (a)
28
29void msm_map_qsd8x50_io(void);
30void msm_map_msm7x30_io(void);
31void msm_map_msm8x60_io(void);
32void msm_map_msm8960_io(void);
33
34extern unsigned int msm_shared_ram_phys;
35
36#endif
diff --git a/arch/arm/mach-msm/include/mach/msm_iomap-7x00.h b/arch/arm/mach-msm/include/mach/msm_iomap-7x00.h
index 8af46123dab6..6c4046c21296 100644
--- a/arch/arm/mach-msm/include/mach/msm_iomap-7x00.h
+++ b/arch/arm/mach-msm/include/mach/msm_iomap-7x00.h
@@ -38,12 +38,6 @@
38 * 38 *
39 */ 39 */
40 40
41#ifdef __ASSEMBLY__
42#define IOMEM(x) x
43#else
44#define IOMEM(x) ((void __force __iomem *)(x))
45#endif
46
47#define MSM_VIC_BASE IOMEM(0xE0000000) 41#define MSM_VIC_BASE IOMEM(0xE0000000)
48#define MSM_VIC_PHYS 0xC0000000 42#define MSM_VIC_PHYS 0xC0000000
49#define MSM_VIC_SIZE SZ_4K 43#define MSM_VIC_SIZE SZ_4K
@@ -111,5 +105,11 @@
111#define MSM_AD5_PHYS 0xAC000000 105#define MSM_AD5_PHYS 0xAC000000
112#define MSM_AD5_SIZE (SZ_1M*13) 106#define MSM_AD5_SIZE (SZ_1M*13)
113 107
108#ifndef __ASSEMBLY__
109
110extern void __iomem *__msm_ioremap_caller(unsigned long phys_addr, size_t size,
111 unsigned int mtype, void *caller);
112
113#endif
114 114
115#endif 115#endif
diff --git a/arch/arm/mach-msm/include/mach/msm_iomap-7x30.h b/arch/arm/mach-msm/include/mach/msm_iomap-7x30.h
index 198202c267c8..f944fe65a657 100644
--- a/arch/arm/mach-msm/include/mach/msm_iomap-7x30.h
+++ b/arch/arm/mach-msm/include/mach/msm_iomap-7x30.h
@@ -100,4 +100,8 @@
100#define MSM_HSUSB_PHYS 0xA3600000 100#define MSM_HSUSB_PHYS 0xA3600000
101#define MSM_HSUSB_SIZE SZ_1K 101#define MSM_HSUSB_SIZE SZ_1K
102 102
103#ifndef __ASSEMBLY__
104extern void msm_map_msm7x30_io(void);
105#endif
106
103#endif 107#endif
diff --git a/arch/arm/mach-msm/include/mach/msm_iomap-8960.h b/arch/arm/mach-msm/include/mach/msm_iomap-8960.h
index 800b55767e6b..a1752c0284fc 100644
--- a/arch/arm/mach-msm/include/mach/msm_iomap-8960.h
+++ b/arch/arm/mach-msm/include/mach/msm_iomap-8960.h
@@ -50,4 +50,8 @@
50#define MSM_DEBUG_UART_PHYS 0x16440000 50#define MSM_DEBUG_UART_PHYS 0x16440000
51#endif 51#endif
52 52
53#ifndef __ASSEMBLY__
54extern void msm_map_msm8960_io(void);
55#endif
56
53#endif 57#endif
diff --git a/arch/arm/mach-msm/include/mach/msm_iomap-8x50.h b/arch/arm/mach-msm/include/mach/msm_iomap-8x50.h
index 0faa894729b7..da77cc1d545d 100644
--- a/arch/arm/mach-msm/include/mach/msm_iomap-8x50.h
+++ b/arch/arm/mach-msm/include/mach/msm_iomap-8x50.h
@@ -122,4 +122,8 @@
122#define MSM_SDC4_PHYS 0xA0600000 122#define MSM_SDC4_PHYS 0xA0600000
123#define MSM_SDC4_SIZE SZ_4K 123#define MSM_SDC4_SIZE SZ_4K
124 124
125#ifndef __ASSEMBLY__
126extern void msm_map_qsd8x50_io(void);
127#endif
128
125#endif 129#endif
diff --git a/arch/arm/mach-msm/include/mach/msm_iomap-8x60.h b/arch/arm/mach-msm/include/mach/msm_iomap-8x60.h
index 54e12caa8d86..5aed57dc808c 100644
--- a/arch/arm/mach-msm/include/mach/msm_iomap-8x60.h
+++ b/arch/arm/mach-msm/include/mach/msm_iomap-8x60.h
@@ -67,4 +67,8 @@
67#define MSM_DEBUG_UART_PHYS 0x19C40000 67#define MSM_DEBUG_UART_PHYS 0x19C40000
68#endif 68#endif
69 69
70#ifndef __ASSEMBLY__
71extern void msm_map_msm8x60_io(void);
72#endif
73
70#endif 74#endif
diff --git a/arch/arm/mach-msm/include/mach/msm_iomap.h b/arch/arm/mach-msm/include/mach/msm_iomap.h
index 90682f4599d3..00afdfb8c38f 100644
--- a/arch/arm/mach-msm/include/mach/msm_iomap.h
+++ b/arch/arm/mach-msm/include/mach/msm_iomap.h
@@ -37,12 +37,6 @@
37 * 37 *
38 */ 38 */
39 39
40#ifdef __ASSEMBLY__
41#define IOMEM(x) x
42#else
43#define IOMEM(x) ((void __force __iomem *)(x))
44#endif
45
46#if defined(CONFIG_ARCH_MSM7X30) 40#if defined(CONFIG_ARCH_MSM7X30)
47#include "msm_iomap-7x30.h" 41#include "msm_iomap-7x30.h"
48#elif defined(CONFIG_ARCH_QSD8X50) 42#elif defined(CONFIG_ARCH_QSD8X50)
diff --git a/arch/arm/mach-msm/io.c b/arch/arm/mach-msm/io.c
index 578b04e42deb..a1e7b1168850 100644
--- a/arch/arm/mach-msm/io.c
+++ b/arch/arm/mach-msm/io.c
@@ -172,8 +172,8 @@ void __init msm_map_msm7x30_io(void)
172} 172}
173#endif /* CONFIG_ARCH_MSM7X30 */ 173#endif /* CONFIG_ARCH_MSM7X30 */
174 174
175void __iomem * 175void __iomem *__msm_ioremap_caller(unsigned long phys_addr, size_t size,
176__msm_ioremap(unsigned long phys_addr, size_t size, unsigned int mtype) 176 unsigned int mtype, void *caller)
177{ 177{
178 if (mtype == MT_DEVICE) { 178 if (mtype == MT_DEVICE) {
179 /* The peripherals in the 88000000 - D0000000 range 179 /* The peripherals in the 88000000 - D0000000 range
@@ -184,7 +184,5 @@ __msm_ioremap(unsigned long phys_addr, size_t size, unsigned int mtype)
184 mtype = MT_DEVICE_NONSHARED; 184 mtype = MT_DEVICE_NONSHARED;
185 } 185 }
186 186
187 return __arm_ioremap_caller(phys_addr, size, mtype, 187 return __arm_ioremap_caller(phys_addr, size, mtype, caller);
188 __builtin_return_address(0));
189} 188}
190EXPORT_SYMBOL(__msm_ioremap);
diff --git a/arch/arm/mach-msm/timer.c b/arch/arm/mach-msm/timer.c
index 75f4be40b3e5..812808254936 100644
--- a/arch/arm/mach-msm/timer.c
+++ b/arch/arm/mach-msm/timer.c
@@ -24,6 +24,7 @@
24#include <asm/mach/time.h> 24#include <asm/mach/time.h>
25#include <asm/hardware/gic.h> 25#include <asm/hardware/gic.h>
26#include <asm/localtimer.h> 26#include <asm/localtimer.h>
27#include <asm/sched_clock.h>
27 28
28#include <mach/msm_iomap.h> 29#include <mach/msm_iomap.h>
29#include <mach/cpu.h> 30#include <mach/cpu.h>
@@ -105,12 +106,12 @@ static union {
105 106
106static void __iomem *source_base; 107static void __iomem *source_base;
107 108
108static cycle_t msm_read_timer_count(struct clocksource *cs) 109static notrace cycle_t msm_read_timer_count(struct clocksource *cs)
109{ 110{
110 return readl_relaxed(source_base + TIMER_COUNT_VAL); 111 return readl_relaxed(source_base + TIMER_COUNT_VAL);
111} 112}
112 113
113static cycle_t msm_read_timer_count_shift(struct clocksource *cs) 114static notrace cycle_t msm_read_timer_count_shift(struct clocksource *cs)
114{ 115{
115 /* 116 /*
116 * Shift timer count down by a constant due to unreliable lower bits 117 * Shift timer count down by a constant due to unreliable lower bits
@@ -166,6 +167,11 @@ static struct local_timer_ops msm_local_timer_ops __cpuinitdata = {
166}; 167};
167#endif /* CONFIG_LOCAL_TIMERS */ 168#endif /* CONFIG_LOCAL_TIMERS */
168 169
170static notrace u32 msm_sched_clock_read(void)
171{
172 return msm_clocksource.read(&msm_clocksource);
173}
174
169static void __init msm_timer_init(void) 175static void __init msm_timer_init(void)
170{ 176{
171 struct clock_event_device *ce = &msm_clockevent; 177 struct clock_event_device *ce = &msm_clockevent;
@@ -232,6 +238,8 @@ err:
232 res = clocksource_register_hz(cs, dgt_hz); 238 res = clocksource_register_hz(cs, dgt_hz);
233 if (res) 239 if (res)
234 pr_err("clocksource_register failed\n"); 240 pr_err("clocksource_register failed\n");
241 setup_sched_clock(msm_sched_clock_read,
242 cpu_is_msm7x01() ? 32 - MSM_DGT_SHIFT : 32, dgt_hz);
235} 243}
236 244
237struct sys_timer msm_timer = { 245struct sys_timer msm_timer = {
diff --git a/arch/arm/mach-mv78xx0/include/mach/io.h b/arch/arm/mach-mv78xx0/include/mach/io.h
index 450e0e1ad092..c7d9d00d8fc1 100644
--- a/arch/arm/mach-mv78xx0/include/mach/io.h
+++ b/arch/arm/mach-mv78xx0/include/mach/io.h
@@ -20,7 +20,5 @@ static inline void __iomem *__io(unsigned long addr)
20} 20}
21 21
22#define __io(a) __io(a) 22#define __io(a) __io(a)
23#define __mem_pci(a) (a)
24
25 23
26#endif 24#endif
diff --git a/arch/arm/mach-mxs/include/mach/hardware.h b/arch/arm/mach-mxs/include/mach/hardware.h
index 53e89a09bf0d..4c0e8a64d8c7 100644
--- a/arch/arm/mach-mxs/include/mach/hardware.h
+++ b/arch/arm/mach-mxs/include/mach/hardware.h
@@ -20,10 +20,4 @@
20#ifndef __MACH_MXS_HARDWARE_H__ 20#ifndef __MACH_MXS_HARDWARE_H__
21#define __MACH_MXS_HARDWARE_H__ 21#define __MACH_MXS_HARDWARE_H__
22 22
23#ifdef __ASSEMBLER__
24#define IOMEM(addr) (addr)
25#else
26#define IOMEM(addr) ((void __force __iomem *)(addr))
27#endif
28
29#endif /* __MACH_MXS_HARDWARE_H__ */ 23#endif /* __MACH_MXS_HARDWARE_H__ */
diff --git a/arch/arm/mach-mxs/include/mach/io.h b/arch/arm/mach-mxs/include/mach/io.h
deleted file mode 100644
index 289b7227e072..000000000000
--- a/arch/arm/mach-mxs/include/mach/io.h
+++ /dev/null
@@ -1,22 +0,0 @@
1/*
2 * Copyright 2004-2007 Freescale Semiconductor, Inc. All Rights Reserved.
3 */
4
5/*
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11#ifndef __MACH_MXS_IO_H__
12#define __MACH_MXS_IO_H__
13
14/* Allow IO space to be anywhere in the memory */
15#define IO_SPACE_LIMIT 0xffffffff
16
17/* io address mapping macro */
18#define __io(a) __typesafe_io(a)
19
20#define __mem_pci(a) (a)
21
22#endif /* __MACH_MXS_IO_H__ */
diff --git a/arch/arm/mach-netx/generic.c b/arch/arm/mach-netx/generic.c
index 59e67979f197..aa627465d914 100644
--- a/arch/arm/mach-netx/generic.c
+++ b/arch/arm/mach-netx/generic.c
@@ -168,7 +168,7 @@ void __init netx_init_irq(void)
168{ 168{
169 int irq; 169 int irq;
170 170
171 vic_init(__io(io_p2v(NETX_PA_VIC)), 0, ~0, 0); 171 vic_init(io_p2v(NETX_PA_VIC), 0, ~0, 0);
172 172
173 for (irq = NETX_IRQ_HIF_CHAINED(0); irq <= NETX_IRQ_HIF_LAST; irq++) { 173 for (irq = NETX_IRQ_HIF_CHAINED(0); irq <= NETX_IRQ_HIF_LAST; irq++) {
174 irq_set_chip_and_handler(irq, &netx_hif_chip, 174 irq_set_chip_and_handler(irq, &netx_hif_chip,
diff --git a/arch/arm/mach-netx/include/mach/hardware.h b/arch/arm/mach-netx/include/mach/hardware.h
index 517a2bd37842..b661af2f2145 100644
--- a/arch/arm/mach-netx/include/mach/hardware.h
+++ b/arch/arm/mach-netx/include/mach/hardware.h
@@ -33,7 +33,7 @@
33#define XMAC_MEM_SIZE 0x1000 33#define XMAC_MEM_SIZE 0x1000
34#define SRAM_MEM_SIZE 0x8000 34#define SRAM_MEM_SIZE 0x8000
35 35
36#define io_p2v(x) ((x) - NETX_IO_PHYS + NETX_IO_VIRT) 36#define io_p2v(x) IOMEM((x) - NETX_IO_PHYS + NETX_IO_VIRT)
37#define io_v2p(x) ((x) - NETX_IO_VIRT + NETX_IO_PHYS) 37#define io_v2p(x) ((x) - NETX_IO_VIRT + NETX_IO_PHYS)
38 38
39#endif 39#endif
diff --git a/arch/arm/mach-netx/include/mach/io.h b/arch/arm/mach-netx/include/mach/io.h
deleted file mode 100644
index c3921cb3b6a6..000000000000
--- a/arch/arm/mach-netx/include/mach/io.h
+++ /dev/null
@@ -1,28 +0,0 @@
1/*
2 * arch/arm/mach-netx/include/mach/io.h
3 *
4 * Copyright (C) 2005 Sascha Hauer <s.hauer@pengutronix.de>, Pengutronix
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2
8 * as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */
19
20#ifndef __ASM_ARM_ARCH_IO_H
21#define __ASM_ARM_ARCH_IO_H
22
23#define IO_SPACE_LIMIT 0xffffffff
24
25#define __io(a) __typesafe_io(a)
26#define __mem_pci(a) (a)
27
28#endif
diff --git a/arch/arm/mach-netx/include/mach/netx-regs.h b/arch/arm/mach-netx/include/mach/netx-regs.h
index 5a03e7ccb01a..fdde22b58ac3 100644
--- a/arch/arm/mach-netx/include/mach/netx-regs.h
+++ b/arch/arm/mach-netx/include/mach/netx-regs.h
@@ -115,7 +115,7 @@
115 *********************************/ 115 *********************************/
116 116
117/* Registers */ 117/* Registers */
118#define NETX_SYSTEM_REG(ofs) __io(NETX_VA_SYSTEM + (ofs)) 118#define NETX_SYSTEM_REG(ofs) IOMEM(NETX_VA_SYSTEM + (ofs))
119#define NETX_SYSTEM_BOO_SR NETX_SYSTEM_REG(0x00) 119#define NETX_SYSTEM_BOO_SR NETX_SYSTEM_REG(0x00)
120#define NETX_SYSTEM_IOC_CR NETX_SYSTEM_REG(0x04) 120#define NETX_SYSTEM_IOC_CR NETX_SYSTEM_REG(0x04)
121#define NETX_SYSTEM_IOC_MR NETX_SYSTEM_REG(0x08) 121#define NETX_SYSTEM_IOC_MR NETX_SYSTEM_REG(0x08)
@@ -185,7 +185,7 @@
185 *******************************/ 185 *******************************/
186 186
187/* Registers */ 187/* Registers */
188#define NETX_GPIO_REG(ofs) __io(NETX_VA_GPIO + (ofs)) 188#define NETX_GPIO_REG(ofs) IOMEM(NETX_VA_GPIO + (ofs))
189#define NETX_GPIO_CFG(gpio) NETX_GPIO_REG(0x0 + ((gpio)<<2)) 189#define NETX_GPIO_CFG(gpio) NETX_GPIO_REG(0x0 + ((gpio)<<2))
190#define NETX_GPIO_THRESHOLD_CAPTURE(gpio) NETX_GPIO_REG(0x40 + ((gpio)<<2)) 190#define NETX_GPIO_THRESHOLD_CAPTURE(gpio) NETX_GPIO_REG(0x40 + ((gpio)<<2))
191#define NETX_GPIO_COUNTER_CTRL(counter) NETX_GPIO_REG(0x80 + ((counter)<<2)) 191#define NETX_GPIO_COUNTER_CTRL(counter) NETX_GPIO_REG(0x80 + ((counter)<<2))
@@ -230,7 +230,7 @@
230 *******************************/ 230 *******************************/
231 231
232/* Registers */ 232/* Registers */
233#define NETX_PIO_REG(ofs) __io(NETX_VA_PIO + (ofs)) 233#define NETX_PIO_REG(ofs) IOMEM(NETX_VA_PIO + (ofs))
234#define NETX_PIO_INPIO NETX_PIO_REG(0x0) 234#define NETX_PIO_INPIO NETX_PIO_REG(0x0)
235#define NETX_PIO_OUTPIO NETX_PIO_REG(0x4) 235#define NETX_PIO_OUTPIO NETX_PIO_REG(0x4)
236#define NETX_PIO_OEPIO NETX_PIO_REG(0x8) 236#define NETX_PIO_OEPIO NETX_PIO_REG(0x8)
@@ -240,7 +240,7 @@
240 *******************************/ 240 *******************************/
241 241
242/* Registers */ 242/* Registers */
243#define NETX_MIIMU __io(NETX_VA_MIIMU) 243#define NETX_MIIMU IOMEM(NETX_VA_MIIMU)
244 244
245/* Bits */ 245/* Bits */
246#define MIIMU_SNRDY (1<<0) 246#define MIIMU_SNRDY (1<<0)
@@ -317,7 +317,7 @@
317 *******************************/ 317 *******************************/
318 318
319/* Registers */ 319/* Registers */
320#define NETX_PFIFO_REG(ofs) __io(NETX_VA_PFIFO + (ofs)) 320#define NETX_PFIFO_REG(ofs) IOMEM(NETX_VA_PFIFO + (ofs))
321#define NETX_PFIFO_BASE(pfifo) NETX_PFIFO_REG(0x00 + ((pfifo)<<2)) 321#define NETX_PFIFO_BASE(pfifo) NETX_PFIFO_REG(0x00 + ((pfifo)<<2))
322#define NETX_PFIFO_BORDER_BASE(pfifo) NETX_PFIFO_REG(0x80 + ((pfifo)<<2)) 322#define NETX_PFIFO_BORDER_BASE(pfifo) NETX_PFIFO_REG(0x80 + ((pfifo)<<2))
323#define NETX_PFIFO_RESET NETX_PFIFO_REG(0x100) 323#define NETX_PFIFO_RESET NETX_PFIFO_REG(0x100)
@@ -334,7 +334,7 @@
334 *******************************/ 334 *******************************/
335 335
336/* Registers */ 336/* Registers */
337#define NETX_MEMCR_REG(ofs) __io(NETX_VA_MEMCR + (ofs)) 337#define NETX_MEMCR_REG(ofs) IOMEM(NETX_VA_MEMCR + (ofs))
338#define NETX_MEMCR_SRAM_CTRL(cs) NETX_MEMCR_REG(0x0 + 4 * (cs)) /* SRAM for CS 0..2 */ 338#define NETX_MEMCR_SRAM_CTRL(cs) NETX_MEMCR_REG(0x0 + 4 * (cs)) /* SRAM for CS 0..2 */
339#define NETX_MEMCR_SDRAM_CFG_CTRL NETX_MEMCR_REG(0x40) 339#define NETX_MEMCR_SDRAM_CFG_CTRL NETX_MEMCR_REG(0x40)
340#define NETX_MEMCR_SDRAM_TIMING_CTRL NETX_MEMCR_REG(0x44) 340#define NETX_MEMCR_SDRAM_TIMING_CTRL NETX_MEMCR_REG(0x44)
@@ -355,7 +355,7 @@
355 *******************************/ 355 *******************************/
356 356
357/* Registers */ 357/* Registers */
358#define NETX_DPMAS_REG(ofs) __io(NETX_VA_DPMAS + (ofs)) 358#define NETX_DPMAS_REG(ofs) IOMEM(NETX_VA_DPMAS + (ofs))
359#define NETX_DPMAS_SYS_STAT NETX_DPMAS_REG(0x4d8) 359#define NETX_DPMAS_SYS_STAT NETX_DPMAS_REG(0x4d8)
360#define NETX_DPMAS_INT_STAT NETX_DPMAS_REG(0x4e0) 360#define NETX_DPMAS_INT_STAT NETX_DPMAS_REG(0x4e0)
361#define NETX_DPMAS_INT_EN NETX_DPMAS_REG(0x4f0) 361#define NETX_DPMAS_INT_EN NETX_DPMAS_REG(0x4f0)
@@ -425,7 +425,7 @@
425/******************************* 425/*******************************
426 * I2C * 426 * I2C *
427 *******************************/ 427 *******************************/
428#define NETX_I2C_REG(ofs) __io(NETX_VA_I2C, (ofs)) 428#define NETX_I2C_REG(ofs) IOMEM(NETX_VA_I2C, (ofs))
429#define NETX_I2C_CTRL NETX_I2C_REG(0x0) 429#define NETX_I2C_CTRL NETX_I2C_REG(0x0)
430#define NETX_I2C_DATA NETX_I2C_REG(0x4) 430#define NETX_I2C_DATA NETX_I2C_REG(0x4)
431 431
diff --git a/arch/arm/mach-nomadik/include/mach/io.h b/arch/arm/mach-nomadik/include/mach/io.h
deleted file mode 100644
index 2e1eca1b8243..000000000000
--- a/arch/arm/mach-nomadik/include/mach/io.h
+++ /dev/null
@@ -1,22 +0,0 @@
1/*
2 * arch/arm/mach-nomadik/include/mach/io.h (copied from mach-sa1100)
3 *
4 * Copyright (C) 1997-1999 Russell King
5 *
6 * Modifications:
7 * 06-12-1997 RMK Created.
8 * 07-04-1999 RMK Major cleanup
9 */
10#ifndef __ASM_ARM_ARCH_IO_H
11#define __ASM_ARM_ARCH_IO_H
12
13#define IO_SPACE_LIMIT 0xffffffff
14
15/*
16 * We don't actually have real ISA nor PCI buses, but there is so many
17 * drivers out there that might just work if we fake them...
18 */
19#define __io(a) __typesafe_io(a)
20#define __mem_pci(a) (a)
21
22#endif
diff --git a/arch/arm/mach-omap1/ams-delta-fiq-handler.S b/arch/arm/mach-omap1/ams-delta-fiq-handler.S
index 399c4c49722f..a051cb8ae57f 100644
--- a/arch/arm/mach-omap1/ams-delta-fiq-handler.S
+++ b/arch/arm/mach-omap1/ams-delta-fiq-handler.S
@@ -14,6 +14,7 @@
14 */ 14 */
15 15
16#include <linux/linkage.h> 16#include <linux/linkage.h>
17#include <asm/assembler.h>
17 18
18#include <plat/board-ams-delta.h> 19#include <plat/board-ams-delta.h>
19 20
diff --git a/arch/arm/mach-omap1/board-h2.c b/arch/arm/mach-omap1/board-h2.c
index c3068622fdcb..553a2e535764 100644
--- a/arch/arm/mach-omap1/board-h2.c
+++ b/arch/arm/mach-omap1/board-h2.c
@@ -245,8 +245,6 @@ static struct resource h2_smc91x_resources[] = {
245 .flags = IORESOURCE_MEM, 245 .flags = IORESOURCE_MEM,
246 }, 246 },
247 [1] = { 247 [1] = {
248 .start = OMAP_GPIO_IRQ(0),
249 .end = OMAP_GPIO_IRQ(0),
250 .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_LOWEDGE, 248 .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_LOWEDGE,
251 }, 249 },
252}; 250};
@@ -359,11 +357,9 @@ static struct tps65010_board tps_board = {
359static struct i2c_board_info __initdata h2_i2c_board_info[] = { 357static struct i2c_board_info __initdata h2_i2c_board_info[] = {
360 { 358 {
361 I2C_BOARD_INFO("tps65010", 0x48), 359 I2C_BOARD_INFO("tps65010", 0x48),
362 .irq = OMAP_GPIO_IRQ(58),
363 .platform_data = &tps_board, 360 .platform_data = &tps_board,
364 }, { 361 }, {
365 I2C_BOARD_INFO("isp1301_omap", 0x2d), 362 I2C_BOARD_INFO("isp1301_omap", 0x2d),
366 .irq = OMAP_GPIO_IRQ(2),
367 }, 363 },
368}; 364};
369 365
@@ -428,8 +424,12 @@ static void __init h2_init(void)
428 omap_cfg_reg(E19_1610_KBR4); 424 omap_cfg_reg(E19_1610_KBR4);
429 omap_cfg_reg(N19_1610_KBR5); 425 omap_cfg_reg(N19_1610_KBR5);
430 426
427 h2_smc91x_resources[1].start = gpio_to_irq(0);
428 h2_smc91x_resources[1].end = gpio_to_irq(0);
431 platform_add_devices(h2_devices, ARRAY_SIZE(h2_devices)); 429 platform_add_devices(h2_devices, ARRAY_SIZE(h2_devices));
432 omap_serial_init(); 430 omap_serial_init();
431 h2_i2c_board_info[0].irq = gpio_to_irq(58);
432 h2_i2c_board_info[1].irq = gpio_to_irq(2);
433 omap_register_i2c_bus(1, 100, h2_i2c_board_info, 433 omap_register_i2c_bus(1, 100, h2_i2c_board_info,
434 ARRAY_SIZE(h2_i2c_board_info)); 434 ARRAY_SIZE(h2_i2c_board_info));
435 omap1_usb_init(&h2_usb_config); 435 omap1_usb_init(&h2_usb_config);
diff --git a/arch/arm/mach-omap1/board-h3.c b/arch/arm/mach-omap1/board-h3.c
index 64b8584f64ce..4c19f4c06851 100644
--- a/arch/arm/mach-omap1/board-h3.c
+++ b/arch/arm/mach-omap1/board-h3.c
@@ -247,8 +247,6 @@ static struct resource smc91x_resources[] = {
247 .flags = IORESOURCE_MEM, 247 .flags = IORESOURCE_MEM,
248 }, 248 },
249 [1] = { 249 [1] = {
250 .start = OMAP_GPIO_IRQ(40),
251 .end = OMAP_GPIO_IRQ(40),
252 .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_LOWEDGE, 250 .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_LOWEDGE,
253 }, 251 },
254}; 252};
@@ -338,7 +336,6 @@ static struct spi_board_info h3_spi_board_info[] __initdata = {
338 .modalias = "tsc2101", 336 .modalias = "tsc2101",
339 .bus_num = 2, 337 .bus_num = 2,
340 .chip_select = 0, 338 .chip_select = 0,
341 .irq = OMAP_GPIO_IRQ(H3_TS_GPIO),
342 .max_speed_hz = 16000000, 339 .max_speed_hz = 16000000,
343 /* .platform_data = &tsc_platform_data, */ 340 /* .platform_data = &tsc_platform_data, */
344 }, 341 },
@@ -374,11 +371,9 @@ static struct omap_lcd_config h3_lcd_config __initdata = {
374static struct i2c_board_info __initdata h3_i2c_board_info[] = { 371static struct i2c_board_info __initdata h3_i2c_board_info[] = {
375 { 372 {
376 I2C_BOARD_INFO("tps65013", 0x48), 373 I2C_BOARD_INFO("tps65013", 0x48),
377 /* .irq = OMAP_GPIO_IRQ(??), */
378 }, 374 },
379 { 375 {
380 I2C_BOARD_INFO("isp1301_omap", 0x2d), 376 I2C_BOARD_INFO("isp1301_omap", 0x2d),
381 .irq = OMAP_GPIO_IRQ(14),
382 }, 377 },
383}; 378};
384 379
@@ -420,10 +415,14 @@ static void __init h3_init(void)
420 omap_cfg_reg(E19_1610_KBR4); 415 omap_cfg_reg(E19_1610_KBR4);
421 omap_cfg_reg(N19_1610_KBR5); 416 omap_cfg_reg(N19_1610_KBR5);
422 417
418 smc91x_resources[1].start = gpio_to_irq(40);
419 smc91x_resources[1].end = gpio_to_irq(40);
423 platform_add_devices(devices, ARRAY_SIZE(devices)); 420 platform_add_devices(devices, ARRAY_SIZE(devices));
421 h3_spi_board_info[0].irq = gpio_to_irq(H3_TS_GPIO);
424 spi_register_board_info(h3_spi_board_info, 422 spi_register_board_info(h3_spi_board_info,
425 ARRAY_SIZE(h3_spi_board_info)); 423 ARRAY_SIZE(h3_spi_board_info));
426 omap_serial_init(); 424 omap_serial_init();
425 h3_i2c_board_info[1].irq = gpio_to_irq(14);
427 omap_register_i2c_bus(1, 100, h3_i2c_board_info, 426 omap_register_i2c_bus(1, 100, h3_i2c_board_info,
428 ARRAY_SIZE(h3_i2c_board_info)); 427 ARRAY_SIZE(h3_i2c_board_info));
429 omap1_usb_init(&h3_usb_config); 428 omap1_usb_init(&h3_usb_config);
diff --git a/arch/arm/mach-omap1/board-htcherald.c b/arch/arm/mach-omap1/board-htcherald.c
index 827d83a96af8..60c06ee23855 100644
--- a/arch/arm/mach-omap1/board-htcherald.c
+++ b/arch/arm/mach-omap1/board-htcherald.c
@@ -324,8 +324,6 @@ static struct platform_device gpio_leds_device = {
324 324
325static struct resource htcpld_resources[] = { 325static struct resource htcpld_resources[] = {
326 [0] = { 326 [0] = {
327 .start = OMAP_GPIO_IRQ(HTCHERALD_GIRQ_BTNS),
328 .end = OMAP_GPIO_IRQ(HTCHERALD_GIRQ_BTNS),
329 .flags = IORESOURCE_IRQ, 327 .flags = IORESOURCE_IRQ,
330 }, 328 },
331}; 329};
@@ -450,7 +448,6 @@ static struct spi_board_info __initdata htcherald_spi_board_info[] = {
450 { 448 {
451 .modalias = "ads7846", 449 .modalias = "ads7846",
452 .platform_data = &htcherald_ts_platform_data, 450 .platform_data = &htcherald_ts_platform_data,
453 .irq = OMAP_GPIO_IRQ(HTCHERALD_GPIO_TS),
454 .max_speed_hz = 2500000, 451 .max_speed_hz = 2500000,
455 .bus_num = 2, 452 .bus_num = 2,
456 .chip_select = 1, 453 .chip_select = 1,
@@ -576,6 +573,8 @@ static void __init htcherald_init(void)
576 printk(KERN_INFO "HTC Herald init.\n"); 573 printk(KERN_INFO "HTC Herald init.\n");
577 574
578 /* Do board initialization before we register all the devices */ 575 /* Do board initialization before we register all the devices */
576 htcpld_resources[0].start = gpio_to_irq(HTCHERALD_GIRQ_BTNS);
577 htcpld_resources[0].end = gpio_to_irq(HTCHERALD_GIRQ_BTNS);
579 platform_add_devices(devices, ARRAY_SIZE(devices)); 578 platform_add_devices(devices, ARRAY_SIZE(devices));
580 579
581 htcherald_disable_watchdog(); 580 htcherald_disable_watchdog();
@@ -583,6 +582,7 @@ static void __init htcherald_init(void)
583 htcherald_usb_enable(); 582 htcherald_usb_enable();
584 omap1_usb_init(&htcherald_usb_config); 583 omap1_usb_init(&htcherald_usb_config);
585 584
585 htcherald_spi_board_info[0].irq = gpio_to_irq(HTCHERALD_GPIO_TS);
586 spi_register_board_info(htcherald_spi_board_info, 586 spi_register_board_info(htcherald_spi_board_info,
587 ARRAY_SIZE(htcherald_spi_board_info)); 587 ARRAY_SIZE(htcherald_spi_board_info));
588 588
diff --git a/arch/arm/mach-omap1/board-innovator.c b/arch/arm/mach-omap1/board-innovator.c
index 61219182d16a..67d7fd57a692 100644
--- a/arch/arm/mach-omap1/board-innovator.c
+++ b/arch/arm/mach-omap1/board-innovator.c
@@ -248,8 +248,6 @@ static struct resource innovator1610_smc91x_resources[] = {
248 .flags = IORESOURCE_MEM, 248 .flags = IORESOURCE_MEM,
249 }, 249 },
250 [1] = { 250 [1] = {
251 .start = OMAP_GPIO_IRQ(0),
252 .end = OMAP_GPIO_IRQ(0),
253 .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_LOWEDGE, 251 .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_LOWEDGE,
254 }, 252 },
255}; 253};
@@ -409,6 +407,8 @@ static void __init innovator_init(void)
409#endif 407#endif
410#ifdef CONFIG_ARCH_OMAP16XX 408#ifdef CONFIG_ARCH_OMAP16XX
411 if (!cpu_is_omap1510()) { 409 if (!cpu_is_omap1510()) {
410 innovator1610_smc91x_resources[1].start = gpio_to_irq(0);
411 innovator1610_smc91x_resources[1].end = gpio_to_irq(0);
412 platform_add_devices(innovator1610_devices, ARRAY_SIZE(innovator1610_devices)); 412 platform_add_devices(innovator1610_devices, ARRAY_SIZE(innovator1610_devices));
413 } 413 }
414#endif 414#endif
diff --git a/arch/arm/mach-omap1/board-nokia770.c b/arch/arm/mach-omap1/board-nokia770.c
index fe95ec5f6f03..d21dcc2fbc5a 100644
--- a/arch/arm/mach-omap1/board-nokia770.c
+++ b/arch/arm/mach-omap1/board-nokia770.c
@@ -147,7 +147,6 @@ static struct spi_board_info nokia770_spi_board_info[] __initdata = {
147 .bus_num = 2, 147 .bus_num = 2,
148 .chip_select = 0, 148 .chip_select = 0,
149 .max_speed_hz = 2500000, 149 .max_speed_hz = 2500000,
150 .irq = OMAP_GPIO_IRQ(15),
151 .platform_data = &nokia770_ads7846_platform_data, 150 .platform_data = &nokia770_ads7846_platform_data,
152 }, 151 },
153}; 152};
@@ -237,6 +236,7 @@ static void __init omap_nokia770_init(void)
237 omap_writew((omap_readw(0xfffb5004) & ~2), 0xfffb5004); 236 omap_writew((omap_readw(0xfffb5004) & ~2), 0xfffb5004);
238 237
239 platform_add_devices(nokia770_devices, ARRAY_SIZE(nokia770_devices)); 238 platform_add_devices(nokia770_devices, ARRAY_SIZE(nokia770_devices));
239 nokia770_spi_board_info[1].irq = gpio_to_irq(15);
240 spi_register_board_info(nokia770_spi_board_info, 240 spi_register_board_info(nokia770_spi_board_info,
241 ARRAY_SIZE(nokia770_spi_board_info)); 241 ARRAY_SIZE(nokia770_spi_board_info));
242 omap_serial_init(); 242 omap_serial_init();
diff --git a/arch/arm/mach-omap1/board-osk.c b/arch/arm/mach-omap1/board-osk.c
index 1fe347396f4d..a5f85dda3f69 100644
--- a/arch/arm/mach-omap1/board-osk.c
+++ b/arch/arm/mach-omap1/board-osk.c
@@ -129,8 +129,6 @@ static struct resource osk5912_smc91x_resources[] = {
129 .flags = IORESOURCE_MEM, 129 .flags = IORESOURCE_MEM,
130 }, 130 },
131 [1] = { 131 [1] = {
132 .start = OMAP_GPIO_IRQ(0),
133 .end = OMAP_GPIO_IRQ(0),
134 .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHEDGE, 132 .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHEDGE,
135 }, 133 },
136}; 134};
@@ -147,8 +145,6 @@ static struct platform_device osk5912_smc91x_device = {
147 145
148static struct resource osk5912_cf_resources[] = { 146static struct resource osk5912_cf_resources[] = {
149 [0] = { 147 [0] = {
150 .start = OMAP_GPIO_IRQ(62),
151 .end = OMAP_GPIO_IRQ(62),
152 .flags = IORESOURCE_IRQ, 148 .flags = IORESOURCE_IRQ,
153 }, 149 },
154}; 150};
@@ -240,7 +236,6 @@ static struct tps65010_board tps_board = {
240static struct i2c_board_info __initdata osk_i2c_board_info[] = { 236static struct i2c_board_info __initdata osk_i2c_board_info[] = {
241 { 237 {
242 I2C_BOARD_INFO("tps65010", 0x48), 238 I2C_BOARD_INFO("tps65010", 0x48),
243 .irq = OMAP_GPIO_IRQ(OMAP_MPUIO(1)),
244 .platform_data = &tps_board, 239 .platform_data = &tps_board,
245 240
246 }, 241 },
@@ -408,7 +403,6 @@ static struct spi_board_info __initdata mistral_boardinfo[] = { {
408 /* MicroWire (bus 2) CS0 has an ads7846e */ 403 /* MicroWire (bus 2) CS0 has an ads7846e */
409 .modalias = "ads7846", 404 .modalias = "ads7846",
410 .platform_data = &mistral_ts_info, 405 .platform_data = &mistral_ts_info,
411 .irq = OMAP_GPIO_IRQ(4),
412 .max_speed_hz = 120000 /* max sample rate at 3V */ 406 .max_speed_hz = 120000 /* max sample rate at 3V */
413 * 26 /* command + data + overhead */, 407 * 26 /* command + data + overhead */,
414 .bus_num = 2, 408 .bus_num = 2,
@@ -471,6 +465,7 @@ static void __init osk_mistral_init(void)
471 gpio_direction_input(4); 465 gpio_direction_input(4);
472 irq_set_irq_type(gpio_to_irq(4), IRQ_TYPE_EDGE_FALLING); 466 irq_set_irq_type(gpio_to_irq(4), IRQ_TYPE_EDGE_FALLING);
473 467
468 mistral_boardinfo[0].irq = gpio_to_irq(4);
474 spi_register_board_info(mistral_boardinfo, 469 spi_register_board_info(mistral_boardinfo,
475 ARRAY_SIZE(mistral_boardinfo)); 470 ARRAY_SIZE(mistral_boardinfo));
476 471
@@ -542,6 +537,10 @@ static void __init osk_init(void)
542 537
543 osk_flash_resource.end = osk_flash_resource.start = omap_cs3_phys(); 538 osk_flash_resource.end = osk_flash_resource.start = omap_cs3_phys();
544 osk_flash_resource.end += SZ_32M - 1; 539 osk_flash_resource.end += SZ_32M - 1;
540 osk5912_smc91x_resources[1].start = gpio_to_irq(0);
541 osk5912_smc91x_resources[1].end = gpio_to_irq(0);
542 osk5912_cf_resources[0].start = gpio_to_irq(62);
543 osk5912_cf_resources[0].end = gpio_to_irq(62);
545 platform_add_devices(osk5912_devices, ARRAY_SIZE(osk5912_devices)); 544 platform_add_devices(osk5912_devices, ARRAY_SIZE(osk5912_devices));
546 545
547 l = omap_readl(USB_TRANSCEIVER_CTRL); 546 l = omap_readl(USB_TRANSCEIVER_CTRL);
@@ -556,6 +555,7 @@ static void __init osk_init(void)
556 gpio_direction_input(OMAP_MPUIO(1)); 555 gpio_direction_input(OMAP_MPUIO(1));
557 556
558 omap_serial_init(); 557 omap_serial_init();
558 osk_i2c_board_info[0].irq = gpio_to_irq(OMAP_MPUIO(1));
559 omap_register_i2c_bus(1, 400, osk_i2c_board_info, 559 omap_register_i2c_bus(1, 400, osk_i2c_board_info,
560 ARRAY_SIZE(osk_i2c_board_info)); 560 ARRAY_SIZE(osk_i2c_board_info));
561 osk_mistral_init(); 561 osk_mistral_init();
diff --git a/arch/arm/mach-omap1/board-palmte.c b/arch/arm/mach-omap1/board-palmte.c
index 0863d8e2bdf1..a60e6c22f816 100644
--- a/arch/arm/mach-omap1/board-palmte.c
+++ b/arch/arm/mach-omap1/board-palmte.c
@@ -217,7 +217,6 @@ static struct spi_board_info palmte_spi_info[] __initdata = {
217 .modalias = "tsc2102", 217 .modalias = "tsc2102",
218 .bus_num = 2, /* uWire (officially) */ 218 .bus_num = 2, /* uWire (officially) */
219 .chip_select = 0, /* As opposed to 3 */ 219 .chip_select = 0, /* As opposed to 3 */
220 .irq = OMAP_GPIO_IRQ(PALMTE_PINTDAV_GPIO),
221 .max_speed_hz = 8000000, 220 .max_speed_hz = 8000000,
222 }, 221 },
223}; 222};
@@ -251,6 +250,7 @@ static void __init omap_palmte_init(void)
251 250
252 platform_add_devices(palmte_devices, ARRAY_SIZE(palmte_devices)); 251 platform_add_devices(palmte_devices, ARRAY_SIZE(palmte_devices));
253 252
253 palmte_spi_info[0].irq = gpio_to_irq(PALMTE_PINTDAV_GPIO);
254 spi_register_board_info(palmte_spi_info, ARRAY_SIZE(palmte_spi_info)); 254 spi_register_board_info(palmte_spi_info, ARRAY_SIZE(palmte_spi_info));
255 palmte_misc_gpio_setup(); 255 palmte_misc_gpio_setup();
256 omap_serial_init(); 256 omap_serial_init();
diff --git a/arch/arm/mach-omap1/board-palmtt.c b/arch/arm/mach-omap1/board-palmtt.c
index 4ff699c509c0..8d854878547b 100644
--- a/arch/arm/mach-omap1/board-palmtt.c
+++ b/arch/arm/mach-omap1/board-palmtt.c
@@ -257,7 +257,6 @@ static struct spi_board_info __initdata palmtt_boardinfo[] = {
257 /* MicroWire (bus 2) CS0 has an ads7846e */ 257 /* MicroWire (bus 2) CS0 has an ads7846e */
258 .modalias = "ads7846", 258 .modalias = "ads7846",
259 .platform_data = &palmtt_ts_info, 259 .platform_data = &palmtt_ts_info,
260 .irq = OMAP_GPIO_IRQ(6),
261 .max_speed_hz = 120000 /* max sample rate at 3V */ 260 .max_speed_hz = 120000 /* max sample rate at 3V */
262 * 26 /* command + data + overhead */, 261 * 26 /* command + data + overhead */,
263 .bus_num = 2, 262 .bus_num = 2,
@@ -298,6 +297,7 @@ static void __init omap_palmtt_init(void)
298 297
299 platform_add_devices(palmtt_devices, ARRAY_SIZE(palmtt_devices)); 298 platform_add_devices(palmtt_devices, ARRAY_SIZE(palmtt_devices));
300 299
300 palmtt_boardinfo[0].irq = gpio_to_irq(6);
301 spi_register_board_info(palmtt_boardinfo,ARRAY_SIZE(palmtt_boardinfo)); 301 spi_register_board_info(palmtt_boardinfo,ARRAY_SIZE(palmtt_boardinfo));
302 omap_serial_init(); 302 omap_serial_init();
303 omap1_usb_init(&palmtt_usb_config); 303 omap1_usb_init(&palmtt_usb_config);
diff --git a/arch/arm/mach-omap1/board-palmz71.c b/arch/arm/mach-omap1/board-palmz71.c
index abcbbd339aeb..a2c5abcd7c84 100644
--- a/arch/arm/mach-omap1/board-palmz71.c
+++ b/arch/arm/mach-omap1/board-palmz71.c
@@ -224,7 +224,6 @@ static struct spi_board_info __initdata palmz71_boardinfo[] = { {
224 /* MicroWire (bus 2) CS0 has an ads7846e */ 224 /* MicroWire (bus 2) CS0 has an ads7846e */
225 .modalias = "ads7846", 225 .modalias = "ads7846",
226 .platform_data = &palmz71_ts_info, 226 .platform_data = &palmz71_ts_info,
227 .irq = OMAP_GPIO_IRQ(PALMZ71_PENIRQ_GPIO),
228 .max_speed_hz = 120000 /* max sample rate at 3V */ 227 .max_speed_hz = 120000 /* max sample rate at 3V */
229 * 26 /* command + data + overhead */, 228 * 26 /* command + data + overhead */,
230 .bus_num = 2, 229 .bus_num = 2,
@@ -313,6 +312,7 @@ omap_palmz71_init(void)
313 312
314 platform_add_devices(devices, ARRAY_SIZE(devices)); 313 platform_add_devices(devices, ARRAY_SIZE(devices));
315 314
315 palmz71_boardinfo[0].irq = gpio_to_irq(PALMZ71_PENIRQ_GPIO);
316 spi_register_board_info(palmz71_boardinfo, 316 spi_register_board_info(palmz71_boardinfo,
317 ARRAY_SIZE(palmz71_boardinfo)); 317 ARRAY_SIZE(palmz71_boardinfo));
318 omap1_usb_init(&palmz71_usb_config); 318 omap1_usb_init(&palmz71_usb_config);
diff --git a/arch/arm/mach-omap1/board-voiceblue.c b/arch/arm/mach-omap1/board-voiceblue.c
index 659d0f75de2c..37232d04233f 100644
--- a/arch/arm/mach-omap1/board-voiceblue.c
+++ b/arch/arm/mach-omap1/board-voiceblue.c
@@ -44,7 +44,6 @@
44static struct plat_serial8250_port voiceblue_ports[] = { 44static struct plat_serial8250_port voiceblue_ports[] = {
45 { 45 {
46 .mapbase = (unsigned long)(OMAP_CS1_PHYS + 0x40000), 46 .mapbase = (unsigned long)(OMAP_CS1_PHYS + 0x40000),
47 .irq = OMAP_GPIO_IRQ(12),
48 .flags = UPF_BOOT_AUTOCONF | UPF_IOREMAP, 47 .flags = UPF_BOOT_AUTOCONF | UPF_IOREMAP,
49 .iotype = UPIO_MEM, 48 .iotype = UPIO_MEM,
50 .regshift = 1, 49 .regshift = 1,
@@ -52,7 +51,6 @@ static struct plat_serial8250_port voiceblue_ports[] = {
52 }, 51 },
53 { 52 {
54 .mapbase = (unsigned long)(OMAP_CS1_PHYS + 0x50000), 53 .mapbase = (unsigned long)(OMAP_CS1_PHYS + 0x50000),
55 .irq = OMAP_GPIO_IRQ(13),
56 .flags = UPF_BOOT_AUTOCONF | UPF_IOREMAP, 54 .flags = UPF_BOOT_AUTOCONF | UPF_IOREMAP,
57 .iotype = UPIO_MEM, 55 .iotype = UPIO_MEM,
58 .regshift = 1, 56 .regshift = 1,
@@ -60,7 +58,6 @@ static struct plat_serial8250_port voiceblue_ports[] = {
60 }, 58 },
61 { 59 {
62 .mapbase = (unsigned long)(OMAP_CS1_PHYS + 0x60000), 60 .mapbase = (unsigned long)(OMAP_CS1_PHYS + 0x60000),
63 .irq = OMAP_GPIO_IRQ(14),
64 .flags = UPF_BOOT_AUTOCONF | UPF_IOREMAP, 61 .flags = UPF_BOOT_AUTOCONF | UPF_IOREMAP,
65 .iotype = UPIO_MEM, 62 .iotype = UPIO_MEM,
66 .regshift = 1, 63 .regshift = 1,
@@ -68,7 +65,6 @@ static struct plat_serial8250_port voiceblue_ports[] = {
68 }, 65 },
69 { 66 {
70 .mapbase = (unsigned long)(OMAP_CS1_PHYS + 0x70000), 67 .mapbase = (unsigned long)(OMAP_CS1_PHYS + 0x70000),
71 .irq = OMAP_GPIO_IRQ(15),
72 .flags = UPF_BOOT_AUTOCONF | UPF_IOREMAP, 68 .flags = UPF_BOOT_AUTOCONF | UPF_IOREMAP,
73 .iotype = UPIO_MEM, 69 .iotype = UPIO_MEM,
74 .regshift = 1, 70 .regshift = 1,
@@ -80,9 +76,6 @@ static struct plat_serial8250_port voiceblue_ports[] = {
80static struct platform_device serial_device = { 76static struct platform_device serial_device = {
81 .name = "serial8250", 77 .name = "serial8250",
82 .id = PLAT8250_DEV_PLATFORM1, 78 .id = PLAT8250_DEV_PLATFORM1,
83 .dev = {
84 .platform_data = voiceblue_ports,
85 },
86}; 79};
87 80
88static int __init ext_uart_init(void) 81static int __init ext_uart_init(void)
@@ -90,6 +83,11 @@ static int __init ext_uart_init(void)
90 if (!machine_is_voiceblue()) 83 if (!machine_is_voiceblue())
91 return -ENODEV; 84 return -ENODEV;
92 85
86 voiceblue_ports[0].irq = gpio_to_irq(12);
87 voiceblue_ports[1].irq = gpio_to_irq(13);
88 voiceblue_ports[2].irq = gpio_to_irq(14);
89 voiceblue_ports[3].irq = gpio_to_irq(15);
90 serial_device.dev.platform_data = voiceblue_ports;
93 return platform_device_register(&serial_device); 91 return platform_device_register(&serial_device);
94} 92}
95arch_initcall(ext_uart_init); 93arch_initcall(ext_uart_init);
@@ -128,8 +126,6 @@ static struct resource voiceblue_smc91x_resources[] = {
128 .flags = IORESOURCE_MEM, 126 .flags = IORESOURCE_MEM,
129 }, 127 },
130 [1] = { 128 [1] = {
131 .start = OMAP_GPIO_IRQ(8),
132 .end = OMAP_GPIO_IRQ(8),
133 .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHEDGE, 129 .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHEDGE,
134 }, 130 },
135}; 131};
@@ -275,6 +271,8 @@ static void __init voiceblue_init(void)
275 irq_set_irq_type(gpio_to_irq(14), IRQ_TYPE_EDGE_RISING); 271 irq_set_irq_type(gpio_to_irq(14), IRQ_TYPE_EDGE_RISING);
276 irq_set_irq_type(gpio_to_irq(15), IRQ_TYPE_EDGE_RISING); 272 irq_set_irq_type(gpio_to_irq(15), IRQ_TYPE_EDGE_RISING);
277 273
274 voiceblue_smc91x_resources[1].start = gpio_to_irq(8);
275 voiceblue_smc91x_resources[1].end = gpio_to_irq(8);
278 platform_add_devices(voiceblue_devices, ARRAY_SIZE(voiceblue_devices)); 276 platform_add_devices(voiceblue_devices, ARRAY_SIZE(voiceblue_devices));
279 omap_board_config = voiceblue_config; 277 omap_board_config = voiceblue_config;
280 omap_board_config_size = ARRAY_SIZE(voiceblue_config); 278 omap_board_config_size = ARRAY_SIZE(voiceblue_config);
diff --git a/arch/arm/mach-omap1/flash.c b/arch/arm/mach-omap1/flash.c
index f9bf78d4fdfb..401eb3c080c2 100644
--- a/arch/arm/mach-omap1/flash.c
+++ b/arch/arm/mach-omap1/flash.c
@@ -17,20 +17,12 @@
17 17
18void omap1_set_vpp(struct platform_device *pdev, int enable) 18void omap1_set_vpp(struct platform_device *pdev, int enable)
19{ 19{
20 static int count;
21 u32 l; 20 u32 l;
22 21
23 if (enable) { 22 l = omap_readl(EMIFS_CONFIG);
24 if (count++ == 0) { 23 if (enable)
25 l = omap_readl(EMIFS_CONFIG); 24 l |= OMAP_EMIFS_CONFIG_WP;
26 l |= OMAP_EMIFS_CONFIG_WP; 25 else
27 omap_writel(l, EMIFS_CONFIG); 26 l &= ~OMAP_EMIFS_CONFIG_WP;
28 } 27 omap_writel(l, EMIFS_CONFIG);
29 } else {
30 if (count && (--count == 0)) {
31 l = omap_readl(EMIFS_CONFIG);
32 l &= ~OMAP_EMIFS_CONFIG_WP;
33 omap_writel(l, EMIFS_CONFIG);
34 }
35 }
36} 28}
diff --git a/arch/arm/mach-omap1/include/mach/entry-macro.S b/arch/arm/mach-omap1/include/mach/entry-macro.S
index fa0f32a686aa..88f08cab1717 100644
--- a/arch/arm/mach-omap1/include/mach/entry-macro.S
+++ b/arch/arm/mach-omap1/include/mach/entry-macro.S
@@ -11,7 +11,6 @@
11 */ 11 */
12 12
13#include <mach/hardware.h> 13#include <mach/hardware.h>
14#include <mach/io.h>
15#include <mach/irqs.h> 14#include <mach/irqs.h>
16 15
17#include "../../iomap.h" 16#include "../../iomap.h"
diff --git a/arch/arm/mach-omap1/include/mach/io.h b/arch/arm/mach-omap1/include/mach/io.h
deleted file mode 100644
index 37b12e1fd022..000000000000
--- a/arch/arm/mach-omap1/include/mach/io.h
+++ /dev/null
@@ -1,46 +0,0 @@
1/*
2 * arch/arm/mach-omap1/include/mach/io.h
3 *
4 * IO definitions for TI OMAP processors and boards
5 *
6 * Copied from arch/arm/mach-sa1100/include/mach/io.h
7 * Copyright (C) 1997-1999 Russell King
8 *
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License as published by the
11 * Free Software Foundation; either version 2 of the License, or (at your
12 * option) any later version.
13 *
14 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
15 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
16 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
17 * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
18 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
19 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
20 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
21 * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
23 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 *
25 * You should have received a copy of the GNU General Public License along
26 * with this program; if not, write to the Free Software Foundation, Inc.,
27 * 675 Mass Ave, Cambridge, MA 02139, USA.
28 *
29 * Modifications:
30 * 06-12-1997 RMK Created.
31 * 07-04-1999 RMK Major cleanup
32 */
33
34#ifndef __ASM_ARM_ARCH_IO_H
35#define __ASM_ARM_ARCH_IO_H
36
37#define IO_SPACE_LIMIT 0xffffffff
38
39/*
40 * We don't actually have real ISA nor PCI buses, but there is so many
41 * drivers out there that might just work if we fake them...
42 */
43#define __io(a) __typesafe_io(a)
44#define __mem_pci(a) (a)
45
46#endif
diff --git a/arch/arm/mach-omap1/iomap.h b/arch/arm/mach-omap1/iomap.h
index d68175761c3d..330c4716b028 100644
--- a/arch/arm/mach-omap1/iomap.h
+++ b/arch/arm/mach-omap1/iomap.h
@@ -22,12 +22,6 @@
22 * 675 Mass Ave, Cambridge, MA 02139, USA. 22 * 675 Mass Ave, Cambridge, MA 02139, USA.
23 */ 23 */
24 24
25#ifdef __ASSEMBLER__
26#define IOMEM(x) (x)
27#else
28#define IOMEM(x) ((void __force __iomem *)(x))
29#endif
30
31#define OMAP1_IO_OFFSET 0x01000000 /* Virtual IO = 0xfefb0000 */ 25#define OMAP1_IO_OFFSET 0x01000000 /* Virtual IO = 0xfefb0000 */
32#define OMAP1_IO_ADDRESS(pa) IOMEM((pa) - OMAP1_IO_OFFSET) 26#define OMAP1_IO_ADDRESS(pa) IOMEM((pa) - OMAP1_IO_OFFSET)
33 27
diff --git a/arch/arm/mach-omap1/pm.c b/arch/arm/mach-omap1/pm.c
index 306beaca14c5..f66c32912b22 100644
--- a/arch/arm/mach-omap1/pm.c
+++ b/arch/arm/mach-omap1/pm.c
@@ -44,6 +44,7 @@
44#include <linux/io.h> 44#include <linux/io.h>
45#include <linux/atomic.h> 45#include <linux/atomic.h>
46 46
47#include <asm/system_misc.h>
47#include <asm/irq.h> 48#include <asm/irq.h>
48#include <asm/mach/time.h> 49#include <asm/mach/time.h>
49#include <asm/mach/irq.h> 50#include <asm/mach/irq.h>
diff --git a/arch/arm/mach-omap1/sleep.S b/arch/arm/mach-omap1/sleep.S
index 0779db150da7..0e628743bd03 100644
--- a/arch/arm/mach-omap1/sleep.S
+++ b/arch/arm/mach-omap1/sleep.S
@@ -36,8 +36,6 @@
36 36
37#include <asm/assembler.h> 37#include <asm/assembler.h>
38 38
39#include <mach/io.h>
40
41#include "iomap.h" 39#include "iomap.h"
42#include "pm.h" 40#include "pm.h"
43 41
diff --git a/arch/arm/mach-omap1/sram.S b/arch/arm/mach-omap1/sram.S
index 2ce0b9ab20e5..00e9d9e9adf1 100644
--- a/arch/arm/mach-omap1/sram.S
+++ b/arch/arm/mach-omap1/sram.S
@@ -12,7 +12,6 @@
12 12
13#include <asm/assembler.h> 13#include <asm/assembler.h>
14 14
15#include <mach/io.h>
16#include <mach/hardware.h> 15#include <mach/hardware.h>
17 16
18#include "iomap.h" 17#include "iomap.h"
diff --git a/arch/arm/mach-omap2/board-2430sdp.c b/arch/arm/mach-omap2/board-2430sdp.c
index c8bda62900d8..e658f835d0de 100644
--- a/arch/arm/mach-omap2/board-2430sdp.c
+++ b/arch/arm/mach-omap2/board-2430sdp.c
@@ -230,12 +230,12 @@ static struct i2c_board_info __initdata sdp2430_i2c1_boardinfo[] = {
230 { 230 {
231 I2C_BOARD_INFO("isp1301_omap", 0x2D), 231 I2C_BOARD_INFO("isp1301_omap", 0x2D),
232 .flags = I2C_CLIENT_WAKE, 232 .flags = I2C_CLIENT_WAKE,
233 .irq = OMAP_GPIO_IRQ(78),
234 }, 233 },
235}; 234};
236 235
237static int __init omap2430_i2c_init(void) 236static int __init omap2430_i2c_init(void)
238{ 237{
238 sdp2430_i2c1_boardinfo[0].irq = gpio_to_irq(78);
239 omap_register_i2c_bus(1, 100, sdp2430_i2c1_boardinfo, 239 omap_register_i2c_bus(1, 100, sdp2430_i2c1_boardinfo,
240 ARRAY_SIZE(sdp2430_i2c1_boardinfo)); 240 ARRAY_SIZE(sdp2430_i2c1_boardinfo));
241 omap_pmic_init(2, 100, "twl4030", INT_24XX_SYS_NIRQ, 241 omap_pmic_init(2, 100, "twl4030", INT_24XX_SYS_NIRQ,
diff --git a/arch/arm/mach-omap2/board-4430sdp.c b/arch/arm/mach-omap2/board-4430sdp.c
index 37dcb1bc025e..a39fc4bbd2b8 100644
--- a/arch/arm/mach-omap2/board-4430sdp.c
+++ b/arch/arm/mach-omap2/board-4430sdp.c
@@ -907,7 +907,6 @@ static void __init omap4_sdp4430_wifi_mux_init(void)
907} 907}
908 908
909static struct wl12xx_platform_data omap4_sdp4430_wlan_data __initdata = { 909static struct wl12xx_platform_data omap4_sdp4430_wlan_data __initdata = {
910 .irq = OMAP_GPIO_IRQ(GPIO_WIFI_IRQ),
911 .board_ref_clock = WL12XX_REFCLOCK_26, 910 .board_ref_clock = WL12XX_REFCLOCK_26,
912 .board_tcxo_clock = WL12XX_TCXOCLOCK_26, 911 .board_tcxo_clock = WL12XX_TCXOCLOCK_26,
913}; 912};
@@ -917,6 +916,7 @@ static void __init omap4_sdp4430_wifi_init(void)
917 int ret; 916 int ret;
918 917
919 omap4_sdp4430_wifi_mux_init(); 918 omap4_sdp4430_wifi_mux_init();
919 omap4_sdp4430_wlan_data.irq = gpio_to_irq(GPIO_WIFI_IRQ);
920 ret = wl12xx_set_platform_data(&omap4_sdp4430_wlan_data); 920 ret = wl12xx_set_platform_data(&omap4_sdp4430_wlan_data);
921 if (ret) 921 if (ret)
922 pr_err("Error setting wl12xx data: %d\n", ret); 922 pr_err("Error setting wl12xx data: %d\n", ret);
diff --git a/arch/arm/mach-omap2/board-apollon.c b/arch/arm/mach-omap2/board-apollon.c
index ac773829941f..768ece2e9c3b 100644
--- a/arch/arm/mach-omap2/board-apollon.c
+++ b/arch/arm/mach-omap2/board-apollon.c
@@ -136,8 +136,6 @@ static struct resource apollon_smc91x_resources[] = {
136 .flags = IORESOURCE_MEM, 136 .flags = IORESOURCE_MEM,
137 }, 137 },
138 [1] = { 138 [1] = {
139 .start = OMAP_GPIO_IRQ(APOLLON_ETHR_GPIO_IRQ),
140 .end = OMAP_GPIO_IRQ(APOLLON_ETHR_GPIO_IRQ),
141 .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHEDGE, 139 .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHEDGE,
142 }, 140 },
143}; 141};
@@ -341,6 +339,8 @@ static void __init omap_apollon_init(void)
341 * You have to mux them off in device drivers later on 339 * You have to mux them off in device drivers later on
342 * if not needed. 340 * if not needed.
343 */ 341 */
342 apollon_smc91x_resources[1].start = gpio_to_irq(APOLLON_ETHR_GPIO_IRQ);
343 apollon_smc91x_resources[1].end = gpio_to_irq(APOLLON_ETHR_GPIO_IRQ);
344 platform_add_devices(apollon_devices, ARRAY_SIZE(apollon_devices)); 344 platform_add_devices(apollon_devices, ARRAY_SIZE(apollon_devices));
345 omap_serial_init(); 345 omap_serial_init();
346 omap_sdrc_init(NULL, NULL); 346 omap_sdrc_init(NULL, NULL);
diff --git a/arch/arm/mach-omap2/board-devkit8000.c b/arch/arm/mach-omap2/board-devkit8000.c
index 11cd2a806093..a2010f07de31 100644
--- a/arch/arm/mach-omap2/board-devkit8000.c
+++ b/arch/arm/mach-omap2/board-devkit8000.c
@@ -411,7 +411,6 @@ static struct resource omap_dm9000_resources[] = {
411 .flags = IORESOURCE_MEM, 411 .flags = IORESOURCE_MEM,
412 }, 412 },
413 [2] = { 413 [2] = {
414 .start = OMAP_GPIO_IRQ(OMAP_DM9000_GPIO_IRQ),
415 .flags = IORESOURCE_IRQ | IRQF_TRIGGER_LOW, 414 .flags = IORESOURCE_IRQ | IRQF_TRIGGER_LOW,
416 }, 415 },
417}; 416};
@@ -639,6 +638,7 @@ static void __init devkit8000_init(void)
639 638
640 omap_hsmmc_init(mmc); 639 omap_hsmmc_init(mmc);
641 devkit8000_i2c_init(); 640 devkit8000_i2c_init();
641 omap_dm9000_resources[2].start = gpio_to_irq(OMAP_DM9000_GPIO_IRQ);
642 platform_add_devices(devkit8000_devices, 642 platform_add_devices(devkit8000_devices,
643 ARRAY_SIZE(devkit8000_devices)); 643 ARRAY_SIZE(devkit8000_devices));
644 644
diff --git a/arch/arm/mach-omap2/board-h4.c b/arch/arm/mach-omap2/board-h4.c
index 54af800d143c..0bbbabe28fcc 100644
--- a/arch/arm/mach-omap2/board-h4.c
+++ b/arch/arm/mach-omap2/board-h4.c
@@ -348,7 +348,6 @@ static struct at24_platform_data m24c01 = {
348static struct i2c_board_info __initdata h4_i2c_board_info[] = { 348static struct i2c_board_info __initdata h4_i2c_board_info[] = {
349 { 349 {
350 I2C_BOARD_INFO("isp1301_omap", 0x2d), 350 I2C_BOARD_INFO("isp1301_omap", 0x2d),
351 .irq = OMAP_GPIO_IRQ(125),
352 }, 351 },
353 { /* EEPROM on mainboard */ 352 { /* EEPROM on mainboard */
354 I2C_BOARD_INFO("24c01", 0x52), 353 I2C_BOARD_INFO("24c01", 0x52),
@@ -377,6 +376,7 @@ static void __init omap_h4_init(void)
377 */ 376 */
378 377
379 board_mkp_init(); 378 board_mkp_init();
379 h4_i2c_board_info[0].irq = gpio_to_irq(125);
380 i2c_register_board_info(1, h4_i2c_board_info, 380 i2c_register_board_info(1, h4_i2c_board_info,
381 ARRAY_SIZE(h4_i2c_board_info)); 381 ARRAY_SIZE(h4_i2c_board_info));
382 382
diff --git a/arch/arm/mach-omap2/board-omap3evm.c b/arch/arm/mach-omap2/board-omap3evm.c
index a659e198892b..4c90f078abe1 100644
--- a/arch/arm/mach-omap2/board-omap3evm.c
+++ b/arch/arm/mach-omap2/board-omap3evm.c
@@ -487,7 +487,6 @@ static struct platform_device omap3evm_wlan_regulator = {
487}; 487};
488 488
489struct wl12xx_platform_data omap3evm_wlan_data __initdata = { 489struct wl12xx_platform_data omap3evm_wlan_data __initdata = {
490 .irq = OMAP_GPIO_IRQ(OMAP3EVM_WLAN_IRQ_GPIO),
491 .board_ref_clock = WL12XX_REFCLOCK_38, /* 38.4 MHz */ 490 .board_ref_clock = WL12XX_REFCLOCK_38, /* 38.4 MHz */
492}; 491};
493#endif 492#endif
@@ -623,6 +622,7 @@ static void __init omap3_evm_wl12xx_init(void)
623 int ret; 622 int ret;
624 623
625 /* WL12xx WLAN Init */ 624 /* WL12xx WLAN Init */
625 omap3evm_wlan_data.irq = gpio_to_irq(OMAP3EVM_WLAN_IRQ_GPIO);
626 ret = wl12xx_set_platform_data(&omap3evm_wlan_data); 626 ret = wl12xx_set_platform_data(&omap3evm_wlan_data);
627 if (ret) 627 if (ret)
628 pr_err("error setting wl12xx data: %d\n", ret); 628 pr_err("error setting wl12xx data: %d\n", ret);
diff --git a/arch/arm/mach-omap2/board-omap4panda.c b/arch/arm/mach-omap2/board-omap4panda.c
index 8bf8e99c358e..d8c0e89f0126 100644
--- a/arch/arm/mach-omap2/board-omap4panda.c
+++ b/arch/arm/mach-omap2/board-omap4panda.c
@@ -231,7 +231,6 @@ static struct platform_device omap_vwlan_device = {
231}; 231};
232 232
233struct wl12xx_platform_data omap_panda_wlan_data __initdata = { 233struct wl12xx_platform_data omap_panda_wlan_data __initdata = {
234 .irq = OMAP_GPIO_IRQ(GPIO_WIFI_IRQ),
235 /* PANDA ref clock is 38.4 MHz */ 234 /* PANDA ref clock is 38.4 MHz */
236 .board_ref_clock = 2, 235 .board_ref_clock = 2,
237}; 236};
@@ -558,6 +557,7 @@ static void __init omap4_panda_init(void)
558 package = OMAP_PACKAGE_CBL; 557 package = OMAP_PACKAGE_CBL;
559 omap4_mux_init(board_mux, NULL, package); 558 omap4_mux_init(board_mux, NULL, package);
560 559
560 omap_panda_wlan_data.irq = gpio_to_irq(GPIO_WIFI_IRQ);
561 ret = wl12xx_set_platform_data(&omap_panda_wlan_data); 561 ret = wl12xx_set_platform_data(&omap_panda_wlan_data);
562 if (ret) 562 if (ret)
563 pr_err("error setting wl12xx data: %d\n", ret); 563 pr_err("error setting wl12xx data: %d\n", ret);
diff --git a/arch/arm/mach-omap2/board-rx51-peripherals.c b/arch/arm/mach-omap2/board-rx51-peripherals.c
index f120997309af..d87ee0612098 100644
--- a/arch/arm/mach-omap2/board-rx51-peripherals.c
+++ b/arch/arm/mach-omap2/board-rx51-peripherals.c
@@ -170,7 +170,6 @@ static struct spi_board_info rx51_peripherals_spi_board_info[] __initdata = {
170 .modalias = "tsc2005", 170 .modalias = "tsc2005",
171 .bus_num = 1, 171 .bus_num = 1,
172 .chip_select = 0, 172 .chip_select = 0,
173 .irq = OMAP_GPIO_IRQ(RX51_TSC2005_IRQ_GPIO),
174 .max_speed_hz = 6000000, 173 .max_speed_hz = 6000000,
175 .controller_data = &tsc2005_mcspi_config, 174 .controller_data = &tsc2005_mcspi_config,
176 .platform_data = &tsc2005_pdata, 175 .platform_data = &tsc2005_pdata,
@@ -1129,6 +1128,8 @@ static void __init rx51_init_tsc2005(void)
1129 } 1128 }
1130 1129
1131 tsc2005_pdata.set_reset = rx51_tsc2005_set_reset; 1130 tsc2005_pdata.set_reset = rx51_tsc2005_set_reset;
1131 rx51_peripherals_spi_board_info[RX51_SPI_TSC2005].irq =
1132 gpio_to_irq(RX51_TSC2005_IRQ_GPIO);
1132} 1133}
1133 1134
1134void __init rx51_peripherals_init(void) 1135void __init rx51_peripherals_init(void)
diff --git a/arch/arm/mach-omap2/board-zoom-debugboard.c b/arch/arm/mach-omap2/board-zoom-debugboard.c
index 369c2eb7715b..1e8540eabde9 100644
--- a/arch/arm/mach-omap2/board-zoom-debugboard.c
+++ b/arch/arm/mach-omap2/board-zoom-debugboard.c
@@ -43,7 +43,6 @@ static inline void __init zoom_init_smsc911x(void)
43static struct plat_serial8250_port serial_platform_data[] = { 43static struct plat_serial8250_port serial_platform_data[] = {
44 { 44 {
45 .mapbase = ZOOM_UART_BASE, 45 .mapbase = ZOOM_UART_BASE,
46 .irq = OMAP_GPIO_IRQ(102),
47 .flags = UPF_BOOT_AUTOCONF|UPF_IOREMAP|UPF_SHARE_IRQ, 46 .flags = UPF_BOOT_AUTOCONF|UPF_IOREMAP|UPF_SHARE_IRQ,
48 .irqflags = IRQF_SHARED | IRQF_TRIGGER_RISING, 47 .irqflags = IRQF_SHARED | IRQF_TRIGGER_RISING,
49 .iotype = UPIO_MEM, 48 .iotype = UPIO_MEM,
@@ -89,6 +88,8 @@ static inline void __init zoom_init_quaduart(void)
89 if (gpio_request_one(quart_gpio, GPIOF_IN, "TL16CP754C GPIO") < 0) 88 if (gpio_request_one(quart_gpio, GPIOF_IN, "TL16CP754C GPIO") < 0)
90 printk(KERN_ERR "Failed to request GPIO%d for TL16CP754C\n", 89 printk(KERN_ERR "Failed to request GPIO%d for TL16CP754C\n",
91 quart_gpio); 90 quart_gpio);
91
92 serial_platform_data[0].irq = gpio_to_irq(102);
92} 93}
93 94
94static inline int omap_zoom_debugboard_detect(void) 95static inline int omap_zoom_debugboard_detect(void)
diff --git a/arch/arm/mach-omap2/board-zoom-peripherals.c b/arch/arm/mach-omap2/board-zoom-peripherals.c
index 3d39cdb2e250..b797cb279618 100644
--- a/arch/arm/mach-omap2/board-zoom-peripherals.c
+++ b/arch/arm/mach-omap2/board-zoom-peripherals.c
@@ -193,7 +193,6 @@ static struct platform_device omap_vwlan_device = {
193}; 193};
194 194
195static struct wl12xx_platform_data omap_zoom_wlan_data __initdata = { 195static struct wl12xx_platform_data omap_zoom_wlan_data __initdata = {
196 .irq = OMAP_GPIO_IRQ(OMAP_ZOOM_WLAN_IRQ_GPIO),
197 /* ZOOM ref clock is 26 MHz */ 196 /* ZOOM ref clock is 26 MHz */
198 .board_ref_clock = 1, 197 .board_ref_clock = 1,
199}; 198};
@@ -297,7 +296,10 @@ static void enable_board_wakeup_source(void)
297 296
298void __init zoom_peripherals_init(void) 297void __init zoom_peripherals_init(void)
299{ 298{
300 int ret = wl12xx_set_platform_data(&omap_zoom_wlan_data); 299 int ret;
300
301 omap_zoom_wlan_data.irq = gpio_to_irq(OMAP_ZOOM_WLAN_IRQ_GPIO);
302 ret = wl12xx_set_platform_data(&omap_zoom_wlan_data);
301 303
302 if (ret) 304 if (ret)
303 pr_err("error setting wl12xx data: %d\n", ret); 305 pr_err("error setting wl12xx data: %d\n", ret);
diff --git a/arch/arm/mach-omap2/clock3xxx_data.c b/arch/arm/mach-omap2/clock3xxx_data.c
index 981b9f9111a4..480fb8f09aed 100644
--- a/arch/arm/mach-omap2/clock3xxx_data.c
+++ b/arch/arm/mach-omap2/clock3xxx_data.c
@@ -19,6 +19,7 @@
19#include <linux/kernel.h> 19#include <linux/kernel.h>
20#include <linux/clk.h> 20#include <linux/clk.h>
21#include <linux/list.h> 21#include <linux/list.h>
22#include <linux/io.h>
22 23
23#include <plat/hardware.h> 24#include <plat/hardware.h>
24#include <plat/clkdev_omap.h> 25#include <plat/clkdev_omap.h>
diff --git a/arch/arm/mach-omap2/clock44xx_data.c b/arch/arm/mach-omap2/clock44xx_data.c
index 79b98f22f207..c03c1108468e 100644
--- a/arch/arm/mach-omap2/clock44xx_data.c
+++ b/arch/arm/mach-omap2/clock44xx_data.c
@@ -26,6 +26,7 @@
26#include <linux/kernel.h> 26#include <linux/kernel.h>
27#include <linux/list.h> 27#include <linux/list.h>
28#include <linux/clk.h> 28#include <linux/clk.h>
29#include <linux/io.h>
29 30
30#include <plat/hardware.h> 31#include <plat/hardware.h>
31#include <plat/clkdev_omap.h> 32#include <plat/clkdev_omap.h>
diff --git a/arch/arm/mach-omap2/common-board-devices.c b/arch/arm/mach-omap2/common-board-devices.c
index 9498b0f5fbd0..1706ebcec08d 100644
--- a/arch/arm/mach-omap2/common-board-devices.c
+++ b/arch/arm/mach-omap2/common-board-devices.c
@@ -76,7 +76,7 @@ void __init omap_ads7846_init(int bus_num, int gpio_pendown, int gpio_debounce,
76 } 76 }
77 77
78 spi_bi->bus_num = bus_num; 78 spi_bi->bus_num = bus_num;
79 spi_bi->irq = OMAP_GPIO_IRQ(gpio_pendown); 79 spi_bi->irq = gpio_to_irq(gpio_pendown);
80 80
81 if (board_pdata) { 81 if (board_pdata) {
82 board_pdata->gpio_pendown = gpio_pendown; 82 board_pdata->gpio_pendown = gpio_pendown;
diff --git a/arch/arm/mach-omap2/cpuidle34xx.c b/arch/arm/mach-omap2/cpuidle34xx.c
index 464cffde58fe..535866489ce3 100644
--- a/arch/arm/mach-omap2/cpuidle34xx.c
+++ b/arch/arm/mach-omap2/cpuidle34xx.c
@@ -87,29 +87,14 @@ static int _cpuidle_deny_idle(struct powerdomain *pwrdm,
87 return 0; 87 return 0;
88} 88}
89 89
90/** 90static int __omap3_enter_idle(struct cpuidle_device *dev,
91 * omap3_enter_idle - Programs OMAP3 to enter the specified state
92 * @dev: cpuidle device
93 * @drv: cpuidle driver
94 * @index: the index of state to be entered
95 *
96 * Called from the CPUidle framework to program the device to the
97 * specified target state selected by the governor.
98 */
99static int omap3_enter_idle(struct cpuidle_device *dev,
100 struct cpuidle_driver *drv, 91 struct cpuidle_driver *drv,
101 int index) 92 int index)
102{ 93{
103 struct omap3_idle_statedata *cx = 94 struct omap3_idle_statedata *cx =
104 cpuidle_get_statedata(&dev->states_usage[index]); 95 cpuidle_get_statedata(&dev->states_usage[index]);
105 struct timespec ts_preidle, ts_postidle, ts_idle;
106 u32 mpu_state = cx->mpu_state, core_state = cx->core_state; 96 u32 mpu_state = cx->mpu_state, core_state = cx->core_state;
107 int idle_time;
108
109 /* Used to keep track of the total time in idle */
110 getnstimeofday(&ts_preidle);
111 97
112 local_irq_disable();
113 local_fiq_disable(); 98 local_fiq_disable();
114 99
115 pwrdm_set_next_pwrst(mpu_pd, mpu_state); 100 pwrdm_set_next_pwrst(mpu_pd, mpu_state);
@@ -148,22 +133,29 @@ static int omap3_enter_idle(struct cpuidle_device *dev,
148 } 133 }
149 134
150return_sleep_time: 135return_sleep_time:
151 getnstimeofday(&ts_postidle);
152 ts_idle = timespec_sub(ts_postidle, ts_preidle);
153 136
154 local_irq_enable();
155 local_fiq_enable(); 137 local_fiq_enable();
156 138
157 idle_time = ts_idle.tv_nsec / NSEC_PER_USEC + ts_idle.tv_sec * \
158 USEC_PER_SEC;
159
160 /* Update cpuidle counters */
161 dev->last_residency = idle_time;
162
163 return index; 139 return index;
164} 140}
165 141
166/** 142/**
143 * omap3_enter_idle - Programs OMAP3 to enter the specified state
144 * @dev: cpuidle device
145 * @drv: cpuidle driver
146 * @index: the index of state to be entered
147 *
148 * Called from the CPUidle framework to program the device to the
149 * specified target state selected by the governor.
150 */
151static inline int omap3_enter_idle(struct cpuidle_device *dev,
152 struct cpuidle_driver *drv,
153 int index)
154{
155 return cpuidle_wrap_enter(dev, drv, index, __omap3_enter_idle);
156}
157
158/**
167 * next_valid_state - Find next valid C-state 159 * next_valid_state - Find next valid C-state
168 * @dev: cpuidle device 160 * @dev: cpuidle device
169 * @drv: cpuidle driver 161 * @drv: cpuidle driver
diff --git a/arch/arm/mach-omap2/cpuidle44xx.c b/arch/arm/mach-omap2/cpuidle44xx.c
index 72e018b9b260..f386cbe9c889 100644
--- a/arch/arm/mach-omap2/cpuidle44xx.c
+++ b/arch/arm/mach-omap2/cpuidle44xx.c
@@ -62,15 +62,9 @@ static int omap4_enter_idle(struct cpuidle_device *dev,
62{ 62{
63 struct omap4_idle_statedata *cx = 63 struct omap4_idle_statedata *cx =
64 cpuidle_get_statedata(&dev->states_usage[index]); 64 cpuidle_get_statedata(&dev->states_usage[index]);
65 struct timespec ts_preidle, ts_postidle, ts_idle;
66 u32 cpu1_state; 65 u32 cpu1_state;
67 int idle_time;
68 int cpu_id = smp_processor_id(); 66 int cpu_id = smp_processor_id();
69 67
70 /* Used to keep track of the total time in idle */
71 getnstimeofday(&ts_preidle);
72
73 local_irq_disable();
74 local_fiq_disable(); 68 local_fiq_disable();
75 69
76 /* 70 /*
@@ -128,26 +122,17 @@ static int omap4_enter_idle(struct cpuidle_device *dev,
128 if (index > 0) 122 if (index > 0)
129 clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &cpu_id); 123 clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &cpu_id);
130 124
131 getnstimeofday(&ts_postidle);
132 ts_idle = timespec_sub(ts_postidle, ts_preidle);
133
134 local_irq_enable();
135 local_fiq_enable(); 125 local_fiq_enable();
136 126
137 idle_time = ts_idle.tv_nsec / NSEC_PER_USEC + ts_idle.tv_sec * \
138 USEC_PER_SEC;
139
140 /* Update cpuidle counters */
141 dev->last_residency = idle_time;
142
143 return index; 127 return index;
144} 128}
145 129
146DEFINE_PER_CPU(struct cpuidle_device, omap4_idle_dev); 130DEFINE_PER_CPU(struct cpuidle_device, omap4_idle_dev);
147 131
148struct cpuidle_driver omap4_idle_driver = { 132struct cpuidle_driver omap4_idle_driver = {
149 .name = "omap4_idle", 133 .name = "omap4_idle",
150 .owner = THIS_MODULE, 134 .owner = THIS_MODULE,
135 .en_core_tk_irqen = 1,
151}; 136};
152 137
153static inline void _fill_cstate(struct cpuidle_driver *drv, 138static inline void _fill_cstate(struct cpuidle_driver *drv,
diff --git a/arch/arm/mach-omap2/display.c b/arch/arm/mach-omap2/display.c
index 9706c648bc19..db5a88a36c63 100644
--- a/arch/arm/mach-omap2/display.c
+++ b/arch/arm/mach-omap2/display.c
@@ -99,7 +99,7 @@ static const struct omap_dss_hwmod_data omap4_dss_hwmod_data[] __initdata = {
99 { "dss_hdmi", "omapdss_hdmi", -1 }, 99 { "dss_hdmi", "omapdss_hdmi", -1 },
100}; 100};
101 101
102static void omap4_hdmi_mux_pads(enum omap_hdmi_flags flags) 102static void __init omap4_hdmi_mux_pads(enum omap_hdmi_flags flags)
103{ 103{
104 u32 reg; 104 u32 reg;
105 u16 control_i2c_1; 105 u16 control_i2c_1;
@@ -125,7 +125,7 @@ static void omap4_hdmi_mux_pads(enum omap_hdmi_flags flags)
125 } 125 }
126} 126}
127 127
128static int __init omap4_dsi_mux_pads(int dsi_id, unsigned lanes) 128static int omap4_dsi_mux_pads(int dsi_id, unsigned lanes)
129{ 129{
130 u32 enable_mask, enable_shift; 130 u32 enable_mask, enable_shift;
131 u32 pipd_mask, pipd_shift; 131 u32 pipd_mask, pipd_shift;
@@ -166,7 +166,7 @@ int __init omap_hdmi_init(enum omap_hdmi_flags flags)
166 return 0; 166 return 0;
167} 167}
168 168
169static int __init omap_dsi_enable_pads(int dsi_id, unsigned lane_mask) 169static int omap_dsi_enable_pads(int dsi_id, unsigned lane_mask)
170{ 170{
171 if (cpu_is_omap44xx()) 171 if (cpu_is_omap44xx())
172 return omap4_dsi_mux_pads(dsi_id, lane_mask); 172 return omap4_dsi_mux_pads(dsi_id, lane_mask);
@@ -174,7 +174,7 @@ static int __init omap_dsi_enable_pads(int dsi_id, unsigned lane_mask)
174 return 0; 174 return 0;
175} 175}
176 176
177static void __init omap_dsi_disable_pads(int dsi_id, unsigned lane_mask) 177static void omap_dsi_disable_pads(int dsi_id, unsigned lane_mask)
178{ 178{
179 if (cpu_is_omap44xx()) 179 if (cpu_is_omap44xx())
180 omap4_dsi_mux_pads(dsi_id, 0); 180 omap4_dsi_mux_pads(dsi_id, 0);
diff --git a/arch/arm/mach-omap2/include/mach/io.h b/arch/arm/mach-omap2/include/mach/io.h
deleted file mode 100644
index b8758c8a9394..000000000000
--- a/arch/arm/mach-omap2/include/mach/io.h
+++ /dev/null
@@ -1,49 +0,0 @@
1/*
2 * arch/arm/mach-omap2/include/mach/io.h
3 *
4 * IO definitions for TI OMAP processors and boards
5 *
6 * Copied from arch/arm/mach-sa1100/include/mach/io.h
7 * Copyright (C) 1997-1999 Russell King
8 *
9 * Copyright (C) 2009 Texas Instruments
10 * Added OMAP4 support - Santosh Shilimkar <santosh.shilimkar@ti.com>
11 *
12 * This program is free software; you can redistribute it and/or modify it
13 * under the terms of the GNU General Public License as published by the
14 * Free Software Foundation; either version 2 of the License, or (at your
15 * option) any later version.
16 *
17 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
18 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
19 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
20 * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
23 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
24 * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * You should have received a copy of the GNU General Public License along
29 * with this program; if not, write to the Free Software Foundation, Inc.,
30 * 675 Mass Ave, Cambridge, MA 02139, USA.
31 *
32 * Modifications:
33 * 06-12-1997 RMK Created.
34 * 07-04-1999 RMK Major cleanup
35 */
36
37#ifndef __ASM_ARM_ARCH_IO_H
38#define __ASM_ARM_ARCH_IO_H
39
40#define IO_SPACE_LIMIT 0xffffffff
41
42/*
43 * We don't actually have real ISA nor PCI buses, but there is so many
44 * drivers out there that might just work if we fake them...
45 */
46#define __io(a) __typesafe_io(a)
47#define __mem_pci(a) (a)
48
49#endif
diff --git a/arch/arm/mach-omap2/iomap.h b/arch/arm/mach-omap2/iomap.h
index e6f958165296..0812b154f5b5 100644
--- a/arch/arm/mach-omap2/iomap.h
+++ b/arch/arm/mach-omap2/iomap.h
@@ -22,12 +22,6 @@
22 * 675 Mass Ave, Cambridge, MA 02139, USA. 22 * 675 Mass Ave, Cambridge, MA 02139, USA.
23 */ 23 */
24 24
25#ifdef __ASSEMBLER__
26#define IOMEM(x) (x)
27#else
28#define IOMEM(x) ((void __force __iomem *)(x))
29#endif
30
31#define OMAP2_L3_IO_OFFSET 0x90000000 25#define OMAP2_L3_IO_OFFSET 0x90000000
32#define OMAP2_L3_IO_ADDRESS(pa) IOMEM((pa) + OMAP2_L3_IO_OFFSET) /* L3 */ 26#define OMAP2_L3_IO_ADDRESS(pa) IOMEM((pa) + OMAP2_L3_IO_OFFSET) /* L3 */
33 27
diff --git a/arch/arm/mach-omap2/pm.c b/arch/arm/mach-omap2/pm.c
index a7bdec69a2b3..d0c1c9695996 100644
--- a/arch/arm/mach-omap2/pm.c
+++ b/arch/arm/mach-omap2/pm.c
@@ -17,6 +17,8 @@
17#include <linux/export.h> 17#include <linux/export.h>
18#include <linux/suspend.h> 18#include <linux/suspend.h>
19 19
20#include <asm/system_misc.h>
21
20#include <plat/omap-pm.h> 22#include <plat/omap-pm.h>
21#include <plat/omap_device.h> 23#include <plat/omap_device.h>
22#include "common.h" 24#include "common.h"
diff --git a/arch/arm/mach-orion5x/common.h b/arch/arm/mach-orion5x/common.h
index d2513ac79ff5..2e6454c8d4ba 100644
--- a/arch/arm/mach-orion5x/common.h
+++ b/arch/arm/mach-orion5x/common.h
@@ -57,5 +57,14 @@ struct meminfo;
57struct tag; 57struct tag;
58extern void __init tag_fixup_mem32(struct tag *, char **, struct meminfo *); 58extern void __init tag_fixup_mem32(struct tag *, char **, struct meminfo *);
59 59
60/*****************************************************************************
61 * Helpers to access Orion registers
62 ****************************************************************************/
63/*
64 * These are not preempt-safe. Locks, if needed, must be taken
65 * care of by the caller.
66 */
67#define orion5x_setbits(r, mask) writel(readl(r) | (mask), (r))
68#define orion5x_clrbits(r, mask) writel(readl(r) & ~(mask), (r))
60 69
61#endif 70#endif
diff --git a/arch/arm/mach-orion5x/include/mach/io.h b/arch/arm/mach-orion5x/include/mach/io.h
deleted file mode 100644
index e9d9afdc2659..000000000000
--- a/arch/arm/mach-orion5x/include/mach/io.h
+++ /dev/null
@@ -1,33 +0,0 @@
1/*
2 * arch/arm/mach-orion5x/include/mach/io.h
3 *
4 * Tzachi Perelstein <tzachi@marvell.com>
5 *
6 * This file is licensed under the terms of the GNU General Public
7 * License version 2. This program is licensed "as is" without any
8 * warranty of any kind, whether express or implied.
9 */
10
11#ifndef __ASM_ARCH_IO_H
12#define __ASM_ARCH_IO_H
13
14#include "orion5x.h"
15
16#define IO_SPACE_LIMIT 0xffffffff
17
18#define __io(a) __typesafe_io(a)
19#define __mem_pci(a) (a)
20
21
22/*****************************************************************************
23 * Helpers to access Orion registers
24 ****************************************************************************/
25/*
26 * These are not preempt-safe. Locks, if needed, must be taken
27 * care of by the caller.
28 */
29#define orion5x_setbits(r, mask) writel(readl(r) | (mask), (r))
30#define orion5x_clrbits(r, mask) writel(readl(r) & ~(mask), (r))
31
32
33#endif
diff --git a/arch/arm/mach-orion5x/pci.c b/arch/arm/mach-orion5x/pci.c
index d6a91948e4dc..cb19e1661bb3 100644
--- a/arch/arm/mach-orion5x/pci.c
+++ b/arch/arm/mach-orion5x/pci.c
@@ -19,6 +19,7 @@
19#include <asm/mach/pci.h> 19#include <asm/mach/pci.h>
20#include <plat/pcie.h> 20#include <plat/pcie.h>
21#include <plat/addr-map.h> 21#include <plat/addr-map.h>
22#include <mach/orion5x.h>
22#include "common.h" 23#include "common.h"
23 24
24/***************************************************************************** 25/*****************************************************************************
diff --git a/arch/arm/mach-orion5x/tsx09-common.c b/arch/arm/mach-orion5x/tsx09-common.c
index c9abb8fbfa70..7189827d641d 100644
--- a/arch/arm/mach-orion5x/tsx09-common.c
+++ b/arch/arm/mach-orion5x/tsx09-common.c
@@ -15,6 +15,7 @@
15#include <linux/mv643xx_eth.h> 15#include <linux/mv643xx_eth.h>
16#include <linux/timex.h> 16#include <linux/timex.h>
17#include <linux/serial_reg.h> 17#include <linux/serial_reg.h>
18#include <mach/orion5x.h>
18#include "tsx09-common.h" 19#include "tsx09-common.h"
19#include "common.h" 20#include "common.h"
20 21
diff --git a/arch/arm/mach-picoxcell/include/mach/io.h b/arch/arm/mach-picoxcell/include/mach/io.h
deleted file mode 100644
index 7573ec7d10a3..000000000000
--- a/arch/arm/mach-picoxcell/include/mach/io.h
+++ /dev/null
@@ -1,22 +0,0 @@
1/*
2 * Copyright (c) 2011 Picochip Ltd., Jamie Iles
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 */
14#ifndef __ASM_ARM_ARCH_IO_H
15#define __ASM_ARM_ARCH_IO_H
16
17/* No ioports, but needed for driver compatibility. */
18#define __io(a) __typesafe_io(a)
19/* No PCI possible on picoxcell. */
20#define __mem_pci(a) (a)
21
22#endif /* __ASM_ARM_ARCH_IO_H */
diff --git a/arch/arm/mach-picoxcell/include/mach/irqs.h b/arch/arm/mach-picoxcell/include/mach/irqs.h
deleted file mode 100644
index 59eac1ee2820..000000000000
--- a/arch/arm/mach-picoxcell/include/mach/irqs.h
+++ /dev/null
@@ -1,20 +0,0 @@
1/*
2 * Copyright (c) 2011 Picochip Ltd., Jamie Iles
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 */
14#ifndef __MACH_IRQS_H
15#define __MACH_IRQS_H
16
17/* We dynamically allocate our irq_desc's. */
18#define NR_IRQS 0
19
20#endif /* __MACH_IRQS_H */
diff --git a/arch/arm/mach-pnx4008/include/mach/io.h b/arch/arm/mach-pnx4008/include/mach/io.h
deleted file mode 100644
index cbf0904540ea..000000000000
--- a/arch/arm/mach-pnx4008/include/mach/io.h
+++ /dev/null
@@ -1,21 +0,0 @@
1
2/*
3 * arch/arm/mach-pnx4008/include/mach/io.h
4 *
5 * Author: Dmitry Chigirev <chigirev@ru.mvista.com>
6 *
7 * 2005 (c) MontaVista Software, Inc. This file is licensed under
8 * the terms of the GNU General Public License version 2. This program
9 * is licensed "as is" without any warranty of any kind, whether express
10 * or implied.
11 */
12
13#ifndef __ASM_ARM_ARCH_IO_H
14#define __ASM_ARM_ARCH_IO_H
15
16#define IO_SPACE_LIMIT 0xffffffff
17
18#define __io(a) __typesafe_io(a)
19#define __mem_pci(a) (a)
20
21#endif
diff --git a/arch/arm/mach-prima2/include/mach/io.h b/arch/arm/mach-prima2/include/mach/io.h
deleted file mode 100644
index 6c31e9ec279e..000000000000
--- a/arch/arm/mach-prima2/include/mach/io.h
+++ /dev/null
@@ -1,16 +0,0 @@
1/*
2 * arch/arm/mach-prima2/include/mach/io.h
3 *
4 * Copyright (c) 2011 Cambridge Silicon Radio Limited, a CSR plc group company.
5 *
6 * Licensed under GPLv2 or later.
7 */
8
9#ifndef __MACH_PRIMA2_IO_H
10#define __MACH_PRIMA2_IO_H
11
12#define IO_SPACE_LIMIT ((resource_size_t)0)
13
14#define __mem_pci(a) (a)
15
16#endif
diff --git a/arch/arm/mach-prima2/timer.c b/arch/arm/mach-prima2/timer.c
index b7a6091ce791..0d024b1e916d 100644
--- a/arch/arm/mach-prima2/timer.c
+++ b/arch/arm/mach-prima2/timer.c
@@ -18,6 +18,7 @@
18#include <linux/of.h> 18#include <linux/of.h>
19#include <linux/of_address.h> 19#include <linux/of_address.h>
20#include <mach/map.h> 20#include <mach/map.h>
21#include <asm/sched_clock.h>
21#include <asm/mach/time.h> 22#include <asm/mach/time.h>
22 23
23#define SIRFSOC_TIMER_COUNTER_LO 0x0000 24#define SIRFSOC_TIMER_COUNTER_LO 0x0000
@@ -165,21 +166,9 @@ static struct irqaction sirfsoc_timer_irq = {
165}; 166};
166 167
167/* Overwrite weak default sched_clock with more precise one */ 168/* Overwrite weak default sched_clock with more precise one */
168unsigned long long notrace sched_clock(void) 169static u32 notrace sirfsoc_read_sched_clock(void)
169{ 170{
170 static int is_mapped; 171 return (u32)(sirfsoc_timer_read(NULL) & 0xffffffff);
171
172 /*
173 * sched_clock is called earlier than .init of sys_timer
174 * if we map timer memory in .init of sys_timer, system
175 * will panic due to illegal memory access
176 */
177 if (!is_mapped) {
178 sirfsoc_of_timer_map();
179 is_mapped = 1;
180 }
181
182 return sirfsoc_timer_read(NULL) * (NSEC_PER_SEC / CLOCK_TICK_RATE);
183} 172}
184 173
185static void __init sirfsoc_clockevent_init(void) 174static void __init sirfsoc_clockevent_init(void)
@@ -210,6 +199,8 @@ static void __init sirfsoc_timer_init(void)
210 BUG_ON(rate < CLOCK_TICK_RATE); 199 BUG_ON(rate < CLOCK_TICK_RATE);
211 BUG_ON(rate % CLOCK_TICK_RATE); 200 BUG_ON(rate % CLOCK_TICK_RATE);
212 201
202 sirfsoc_of_timer_map();
203
213 writel_relaxed(rate / CLOCK_TICK_RATE / 2 - 1, sirfsoc_timer_base + SIRFSOC_TIMER_DIV); 204 writel_relaxed(rate / CLOCK_TICK_RATE / 2 - 1, sirfsoc_timer_base + SIRFSOC_TIMER_DIV);
214 writel_relaxed(0, sirfsoc_timer_base + SIRFSOC_TIMER_COUNTER_LO); 205 writel_relaxed(0, sirfsoc_timer_base + SIRFSOC_TIMER_COUNTER_LO);
215 writel_relaxed(0, sirfsoc_timer_base + SIRFSOC_TIMER_COUNTER_HI); 206 writel_relaxed(0, sirfsoc_timer_base + SIRFSOC_TIMER_COUNTER_HI);
@@ -217,6 +208,8 @@ static void __init sirfsoc_timer_init(void)
217 208
218 BUG_ON(clocksource_register_hz(&sirfsoc_clocksource, CLOCK_TICK_RATE)); 209 BUG_ON(clocksource_register_hz(&sirfsoc_clocksource, CLOCK_TICK_RATE));
219 210
211 setup_sched_clock(sirfsoc_read_sched_clock, 32, CLOCK_TICK_RATE);
212
220 BUG_ON(setup_irq(sirfsoc_timer_irq.irq, &sirfsoc_timer_irq)); 213 BUG_ON(setup_irq(sirfsoc_timer_irq.irq, &sirfsoc_timer_irq));
221 214
222 sirfsoc_clockevent_init(); 215 sirfsoc_clockevent_init();
diff --git a/arch/arm/mach-pxa/Kconfig b/arch/arm/mach-pxa/Kconfig
index 61d3c72ded84..109ccd2a8885 100644
--- a/arch/arm/mach-pxa/Kconfig
+++ b/arch/arm/mach-pxa/Kconfig
@@ -108,6 +108,7 @@ config CSB726_CSB701
108 108
109config MACH_ARMCORE 109config MACH_ARMCORE
110 bool "CompuLab CM-X255/CM-X270 modules" 110 bool "CompuLab CM-X255/CM-X270 modules"
111 select ARCH_HAS_DMA_SET_COHERENT_MASK if PCI
111 select PXA27x 112 select PXA27x
112 select IWMMXT 113 select IWMMXT
113 select PXA25x 114 select PXA25x
diff --git a/arch/arm/mach-pxa/capc7117.c b/arch/arm/mach-pxa/capc7117.c
index c91727d1fe09..9a8760b72913 100644
--- a/arch/arm/mach-pxa/capc7117.c
+++ b/arch/arm/mach-pxa/capc7117.c
@@ -150,6 +150,7 @@ MACHINE_START(CAPC7117,
150 "Embedian CAPC-7117 evaluation kit based on the MXM-8x10 CoM") 150 "Embedian CAPC-7117 evaluation kit based on the MXM-8x10 CoM")
151 .atag_offset = 0x100, 151 .atag_offset = 0x100,
152 .map_io = pxa3xx_map_io, 152 .map_io = pxa3xx_map_io,
153 .nr_irqs = PXA_NR_IRQS,
153 .init_irq = pxa3xx_init_irq, 154 .init_irq = pxa3xx_init_irq,
154 .handle_irq = pxa3xx_handle_irq, 155 .handle_irq = pxa3xx_handle_irq,
155 .timer = &pxa_timer, 156 .timer = &pxa_timer,
diff --git a/arch/arm/mach-pxa/clock-pxa2xx.c b/arch/arm/mach-pxa/clock-pxa2xx.c
index 1d5859d9a0e3..9ee2ad6a0a07 100644
--- a/arch/arm/mach-pxa/clock-pxa2xx.c
+++ b/arch/arm/mach-pxa/clock-pxa2xx.c
@@ -9,6 +9,7 @@
9#include <linux/module.h> 9#include <linux/module.h>
10#include <linux/kernel.h> 10#include <linux/kernel.h>
11#include <linux/init.h> 11#include <linux/init.h>
12#include <linux/io.h>
12#include <linux/syscore_ops.h> 13#include <linux/syscore_ops.h>
13 14
14#include <mach/pxa2xx-regs.h> 15#include <mach/pxa2xx-regs.h>
diff --git a/arch/arm/mach-pxa/cm-x300.c b/arch/arm/mach-pxa/cm-x300.c
index 895ee8c45009..313274016277 100644
--- a/arch/arm/mach-pxa/cm-x300.c
+++ b/arch/arm/mach-pxa/cm-x300.c
@@ -714,7 +714,6 @@ struct da9030_battery_info cm_x300_battery_info = {
714 714
715static struct regulator_consumer_supply buck2_consumers[] = { 715static struct regulator_consumer_supply buck2_consumers[] = {
716 { 716 {
717 .dev = NULL,
718 .supply = "vcc_core", 717 .supply = "vcc_core",
719 }, 718 },
720}; 719};
@@ -854,6 +853,7 @@ static void __init cm_x300_fixup(struct tag *tags, char **cmdline,
854MACHINE_START(CM_X300, "CM-X300 module") 853MACHINE_START(CM_X300, "CM-X300 module")
855 .atag_offset = 0x100, 854 .atag_offset = 0x100,
856 .map_io = pxa3xx_map_io, 855 .map_io = pxa3xx_map_io,
856 .nr_irqs = PXA_NR_IRQS,
857 .init_irq = pxa3xx_init_irq, 857 .init_irq = pxa3xx_init_irq,
858 .handle_irq = pxa3xx_handle_irq, 858 .handle_irq = pxa3xx_handle_irq,
859 .timer = &pxa_timer, 859 .timer = &pxa_timer,
diff --git a/arch/arm/mach-pxa/colibri-pxa270.c b/arch/arm/mach-pxa/colibri-pxa270.c
index 29d5d541f602..b2f227d36125 100644
--- a/arch/arm/mach-pxa/colibri-pxa270.c
+++ b/arch/arm/mach-pxa/colibri-pxa270.c
@@ -310,6 +310,7 @@ MACHINE_START(COLIBRI, "Toradex Colibri PXA270")
310 .atag_offset = 0x100, 310 .atag_offset = 0x100,
311 .init_machine = colibri_pxa270_init, 311 .init_machine = colibri_pxa270_init,
312 .map_io = pxa27x_map_io, 312 .map_io = pxa27x_map_io,
313 .nr_irqs = PXA_NR_IRQS,
313 .init_irq = pxa27x_init_irq, 314 .init_irq = pxa27x_init_irq,
314 .handle_irq = pxa27x_handle_irq, 315 .handle_irq = pxa27x_handle_irq,
315 .timer = &pxa_timer, 316 .timer = &pxa_timer,
@@ -320,6 +321,7 @@ MACHINE_START(INCOME, "Income s.r.o. SH-Dmaster PXA270 SBC")
320 .atag_offset = 0x100, 321 .atag_offset = 0x100,
321 .init_machine = colibri_pxa270_income_init, 322 .init_machine = colibri_pxa270_income_init,
322 .map_io = pxa27x_map_io, 323 .map_io = pxa27x_map_io,
324 .nr_irqs = PXA_NR_IRQS,
323 .init_irq = pxa27x_init_irq, 325 .init_irq = pxa27x_init_irq,
324 .handle_irq = pxa27x_handle_irq, 326 .handle_irq = pxa27x_handle_irq,
325 .timer = &pxa_timer, 327 .timer = &pxa_timer,
diff --git a/arch/arm/mach-pxa/colibri-pxa300.c b/arch/arm/mach-pxa/colibri-pxa300.c
index 0846d210cb05..bb6def8ec979 100644
--- a/arch/arm/mach-pxa/colibri-pxa300.c
+++ b/arch/arm/mach-pxa/colibri-pxa300.c
@@ -186,6 +186,7 @@ MACHINE_START(COLIBRI300, "Toradex Colibri PXA300")
186 .atag_offset = 0x100, 186 .atag_offset = 0x100,
187 .init_machine = colibri_pxa300_init, 187 .init_machine = colibri_pxa300_init,
188 .map_io = pxa3xx_map_io, 188 .map_io = pxa3xx_map_io,
189 .nr_irqs = PXA_NR_IRQS,
189 .init_irq = pxa3xx_init_irq, 190 .init_irq = pxa3xx_init_irq,
190 .handle_irq = pxa3xx_handle_irq, 191 .handle_irq = pxa3xx_handle_irq,
191 .timer = &pxa_timer, 192 .timer = &pxa_timer,
diff --git a/arch/arm/mach-pxa/colibri-pxa320.c b/arch/arm/mach-pxa/colibri-pxa320.c
index 6ad3359063af..d88e7b37f1da 100644
--- a/arch/arm/mach-pxa/colibri-pxa320.c
+++ b/arch/arm/mach-pxa/colibri-pxa320.c
@@ -256,6 +256,7 @@ MACHINE_START(COLIBRI320, "Toradex Colibri PXA320")
256 .atag_offset = 0x100, 256 .atag_offset = 0x100,
257 .init_machine = colibri_pxa320_init, 257 .init_machine = colibri_pxa320_init,
258 .map_io = pxa3xx_map_io, 258 .map_io = pxa3xx_map_io,
259 .nr_irqs = PXA_NR_IRQS,
259 .init_irq = pxa3xx_init_irq, 260 .init_irq = pxa3xx_init_irq,
260 .handle_irq = pxa3xx_handle_irq, 261 .handle_irq = pxa3xx_handle_irq,
261 .timer = &pxa_timer, 262 .timer = &pxa_timer,
diff --git a/arch/arm/mach-pxa/corgi.c b/arch/arm/mach-pxa/corgi.c
index de9d45e673fd..c1fe32db4755 100644
--- a/arch/arm/mach-pxa/corgi.c
+++ b/arch/arm/mach-pxa/corgi.c
@@ -729,6 +729,7 @@ static void __init fixup_corgi(struct tag *tags, char **cmdline,
729MACHINE_START(CORGI, "SHARP Corgi") 729MACHINE_START(CORGI, "SHARP Corgi")
730 .fixup = fixup_corgi, 730 .fixup = fixup_corgi,
731 .map_io = pxa25x_map_io, 731 .map_io = pxa25x_map_io,
732 .nr_irqs = PXA_NR_IRQS,
732 .init_irq = pxa25x_init_irq, 733 .init_irq = pxa25x_init_irq,
733 .handle_irq = pxa25x_handle_irq, 734 .handle_irq = pxa25x_handle_irq,
734 .init_machine = corgi_init, 735 .init_machine = corgi_init,
@@ -741,6 +742,7 @@ MACHINE_END
741MACHINE_START(SHEPHERD, "SHARP Shepherd") 742MACHINE_START(SHEPHERD, "SHARP Shepherd")
742 .fixup = fixup_corgi, 743 .fixup = fixup_corgi,
743 .map_io = pxa25x_map_io, 744 .map_io = pxa25x_map_io,
745 .nr_irqs = PXA_NR_IRQS,
744 .init_irq = pxa25x_init_irq, 746 .init_irq = pxa25x_init_irq,
745 .handle_irq = pxa25x_handle_irq, 747 .handle_irq = pxa25x_handle_irq,
746 .init_machine = corgi_init, 748 .init_machine = corgi_init,
@@ -753,6 +755,7 @@ MACHINE_END
753MACHINE_START(HUSKY, "SHARP Husky") 755MACHINE_START(HUSKY, "SHARP Husky")
754 .fixup = fixup_corgi, 756 .fixup = fixup_corgi,
755 .map_io = pxa25x_map_io, 757 .map_io = pxa25x_map_io,
758 .nr_irqs = PXA_NR_IRQS,
756 .init_irq = pxa25x_init_irq, 759 .init_irq = pxa25x_init_irq,
757 .handle_irq = pxa25x_handle_irq, 760 .handle_irq = pxa25x_handle_irq,
758 .init_machine = corgi_init, 761 .init_machine = corgi_init,
diff --git a/arch/arm/mach-pxa/corgi_pm.c b/arch/arm/mach-pxa/corgi_pm.c
index 39e265cfc86d..048c4299473c 100644
--- a/arch/arm/mach-pxa/corgi_pm.c
+++ b/arch/arm/mach-pxa/corgi_pm.c
@@ -19,6 +19,7 @@
19#include <linux/interrupt.h> 19#include <linux/interrupt.h>
20#include <linux/platform_device.h> 20#include <linux/platform_device.h>
21#include <linux/apm-emulation.h> 21#include <linux/apm-emulation.h>
22#include <linux/io.h>
22 23
23#include <asm/irq.h> 24#include <asm/irq.h>
24#include <asm/mach-types.h> 25#include <asm/mach-types.h>
diff --git a/arch/arm/mach-pxa/cpufreq-pxa3xx.c b/arch/arm/mach-pxa/cpufreq-pxa3xx.c
index 88fbec05ec50..b85b4ab7aac6 100644
--- a/arch/arm/mach-pxa/cpufreq-pxa3xx.c
+++ b/arch/arm/mach-pxa/cpufreq-pxa3xx.c
@@ -15,6 +15,7 @@
15#include <linux/init.h> 15#include <linux/init.h>
16#include <linux/cpufreq.h> 16#include <linux/cpufreq.h>
17#include <linux/slab.h> 17#include <linux/slab.h>
18#include <linux/io.h>
18 19
19#include <mach/pxa3xx-regs.h> 20#include <mach/pxa3xx-regs.h>
20 21
diff --git a/arch/arm/mach-pxa/csb726.c b/arch/arm/mach-pxa/csb726.c
index fb5a51d834e5..67f0de37f46e 100644
--- a/arch/arm/mach-pxa/csb726.c
+++ b/arch/arm/mach-pxa/csb726.c
@@ -274,6 +274,7 @@ static void __init csb726_init(void)
274MACHINE_START(CSB726, "Cogent CSB726") 274MACHINE_START(CSB726, "Cogent CSB726")
275 .atag_offset = 0x100, 275 .atag_offset = 0x100,
276 .map_io = pxa27x_map_io, 276 .map_io = pxa27x_map_io,
277 .nr_irqs = PXA_NR_IRQS,
277 .init_irq = pxa27x_init_irq, 278 .init_irq = pxa27x_init_irq,
278 .handle_irq = pxa27x_handle_irq, 279 .handle_irq = pxa27x_handle_irq,
279 .init_machine = csb726_init, 280 .init_machine = csb726_init,
diff --git a/arch/arm/mach-pxa/devices.c b/arch/arm/mach-pxa/devices.c
index 84f2d7015cfe..166eee5b8a70 100644
--- a/arch/arm/mach-pxa/devices.c
+++ b/arch/arm/mach-pxa/devices.c
@@ -12,6 +12,7 @@
12#include <mach/pxafb.h> 12#include <mach/pxafb.h>
13#include <mach/mmc.h> 13#include <mach/mmc.h>
14#include <mach/irda.h> 14#include <mach/irda.h>
15#include <mach/irqs.h>
15#include <mach/ohci.h> 16#include <mach/ohci.h>
16#include <plat/pxa27x_keypad.h> 17#include <plat/pxa27x_keypad.h>
17#include <mach/camera.h> 18#include <mach/camera.h>
diff --git a/arch/arm/mach-pxa/em-x270.c b/arch/arm/mach-pxa/em-x270.c
index d80c0ba9a095..16ec557b8e43 100644
--- a/arch/arm/mach-pxa/em-x270.c
+++ b/arch/arm/mach-pxa/em-x270.c
@@ -1083,19 +1083,19 @@ static void __init em_x270_userspace_consumers_init(void)
1083} 1083}
1084 1084
1085/* DA9030 related initializations */ 1085/* DA9030 related initializations */
1086#define REGULATOR_CONSUMER(_name, _dev, _supply) \ 1086#define REGULATOR_CONSUMER(_name, _dev_name, _supply) \
1087 static struct regulator_consumer_supply _name##_consumers[] = { \ 1087 static struct regulator_consumer_supply _name##_consumers[] = { \
1088 { \ 1088 { \
1089 .dev = _dev, \ 1089 .dev_name = _dev_name, \
1090 .supply = _supply, \ 1090 .supply = _supply, \
1091 }, \ 1091 }, \
1092 } 1092 }
1093 1093
1094REGULATOR_CONSUMER(ldo3, &em_x270_gps_userspace_consumer.dev, "vcc gps"); 1094REGULATOR_CONSUMER(ldo3, "reg-userspace-consumer.0", "vcc gps");
1095REGULATOR_CONSUMER(ldo5, NULL, "vcc cam"); 1095REGULATOR_CONSUMER(ldo5, NULL, "vcc cam");
1096REGULATOR_CONSUMER(ldo10, &pxa_device_mci.dev, "vcc sdio"); 1096REGULATOR_CONSUMER(ldo10, "pxa2xx-mci", "vcc sdio");
1097REGULATOR_CONSUMER(ldo12, NULL, "vcc usb"); 1097REGULATOR_CONSUMER(ldo12, NULL, "vcc usb");
1098REGULATOR_CONSUMER(ldo19, &em_x270_gprs_userspace_consumer.dev, "vcc gprs"); 1098REGULATOR_CONSUMER(ldo19, "reg-userspace-consumer.1", "vcc gprs");
1099REGULATOR_CONSUMER(buck2, NULL, "vcc_core"); 1099REGULATOR_CONSUMER(buck2, NULL, "vcc_core");
1100 1100
1101#define REGULATOR_INIT(_ldo, _min_uV, _max_uV, _ops_mask) \ 1101#define REGULATOR_INIT(_ldo, _min_uV, _max_uV, _ops_mask) \
@@ -1301,6 +1301,7 @@ static void __init em_x270_init(void)
1301MACHINE_START(EM_X270, "Compulab EM-X270") 1301MACHINE_START(EM_X270, "Compulab EM-X270")
1302 .atag_offset = 0x100, 1302 .atag_offset = 0x100,
1303 .map_io = pxa27x_map_io, 1303 .map_io = pxa27x_map_io,
1304 .nr_irqs = PXA_NR_IRQS,
1304 .init_irq = pxa27x_init_irq, 1305 .init_irq = pxa27x_init_irq,
1305 .handle_irq = pxa27x_handle_irq, 1306 .handle_irq = pxa27x_handle_irq,
1306 .timer = &pxa_timer, 1307 .timer = &pxa_timer,
@@ -1311,6 +1312,7 @@ MACHINE_END
1311MACHINE_START(EXEDA, "Compulab eXeda") 1312MACHINE_START(EXEDA, "Compulab eXeda")
1312 .atag_offset = 0x100, 1313 .atag_offset = 0x100,
1313 .map_io = pxa27x_map_io, 1314 .map_io = pxa27x_map_io,
1315 .nr_irqs = PXA_NR_IRQS,
1314 .init_irq = pxa27x_init_irq, 1316 .init_irq = pxa27x_init_irq,
1315 .handle_irq = pxa27x_handle_irq, 1317 .handle_irq = pxa27x_handle_irq,
1316 .timer = &pxa_timer, 1318 .timer = &pxa_timer,
diff --git a/arch/arm/mach-pxa/gumstix.c b/arch/arm/mach-pxa/gumstix.c
index ac3b1cef4751..e529a35a44ce 100644
--- a/arch/arm/mach-pxa/gumstix.c
+++ b/arch/arm/mach-pxa/gumstix.c
@@ -235,6 +235,7 @@ static void __init gumstix_init(void)
235MACHINE_START(GUMSTIX, "Gumstix") 235MACHINE_START(GUMSTIX, "Gumstix")
236 .atag_offset = 0x100, /* match u-boot bi_boot_params */ 236 .atag_offset = 0x100, /* match u-boot bi_boot_params */
237 .map_io = pxa25x_map_io, 237 .map_io = pxa25x_map_io,
238 .nr_irqs = PXA_NR_IRQS,
238 .init_irq = pxa25x_init_irq, 239 .init_irq = pxa25x_init_irq,
239 .handle_irq = pxa25x_handle_irq, 240 .handle_irq = pxa25x_handle_irq,
240 .timer = &pxa_timer, 241 .timer = &pxa_timer,
diff --git a/arch/arm/mach-pxa/h5000.c b/arch/arm/mach-pxa/h5000.c
index fde6b4c873c4..e7dec589f014 100644
--- a/arch/arm/mach-pxa/h5000.c
+++ b/arch/arm/mach-pxa/h5000.c
@@ -205,6 +205,7 @@ static void __init h5000_init(void)
205MACHINE_START(H5400, "HP iPAQ H5000") 205MACHINE_START(H5400, "HP iPAQ H5000")
206 .atag_offset = 0x100, 206 .atag_offset = 0x100,
207 .map_io = pxa25x_map_io, 207 .map_io = pxa25x_map_io,
208 .nr_irqs = PXA_NR_IRQS,
208 .init_irq = pxa25x_init_irq, 209 .init_irq = pxa25x_init_irq,
209 .handle_irq = pxa25x_handle_irq, 210 .handle_irq = pxa25x_handle_irq,
210 .timer = &pxa_timer, 211 .timer = &pxa_timer,
diff --git a/arch/arm/mach-pxa/himalaya.c b/arch/arm/mach-pxa/himalaya.c
index 26d069a9f900..2962de898da9 100644
--- a/arch/arm/mach-pxa/himalaya.c
+++ b/arch/arm/mach-pxa/himalaya.c
@@ -160,6 +160,7 @@ static void __init himalaya_init(void)
160MACHINE_START(HIMALAYA, "HTC Himalaya") 160MACHINE_START(HIMALAYA, "HTC Himalaya")
161 .atag_offset = 0x100, 161 .atag_offset = 0x100,
162 .map_io = pxa25x_map_io, 162 .map_io = pxa25x_map_io,
163 .nr_irqs = PXA_NR_IRQS,
163 .init_irq = pxa25x_init_irq, 164 .init_irq = pxa25x_init_irq,
164 .handle_irq = pxa25x_handle_irq, 165 .handle_irq = pxa25x_handle_irq,
165 .init_machine = himalaya_init, 166 .init_machine = himalaya_init,
diff --git a/arch/arm/mach-pxa/hx4700.c b/arch/arm/mach-pxa/hx4700.c
index 3fa929d4a4f5..b83b95a29503 100644
--- a/arch/arm/mach-pxa/hx4700.c
+++ b/arch/arm/mach-pxa/hx4700.c
@@ -681,11 +681,9 @@ static struct platform_device power_supply = {
681 681
682static struct regulator_consumer_supply bq24022_consumers[] = { 682static struct regulator_consumer_supply bq24022_consumers[] = {
683 { 683 {
684 .dev = &gpio_vbus.dev,
685 .supply = "vbus_draw", 684 .supply = "vbus_draw",
686 }, 685 },
687 { 686 {
688 .dev = &power_supply.dev,
689 .supply = "ac_draw", 687 .supply = "ac_draw",
690 }, 688 },
691}; 689};
diff --git a/arch/arm/mach-pxa/icontrol.c b/arch/arm/mach-pxa/icontrol.c
index 67400192ed3b..1d02eabc9c65 100644
--- a/arch/arm/mach-pxa/icontrol.c
+++ b/arch/arm/mach-pxa/icontrol.c
@@ -193,6 +193,7 @@ static void __init icontrol_init(void)
193MACHINE_START(ICONTROL, "iControl/SafeTcam boards using Embedian MXM-8x10 CoM") 193MACHINE_START(ICONTROL, "iControl/SafeTcam boards using Embedian MXM-8x10 CoM")
194 .atag_offset = 0x100, 194 .atag_offset = 0x100,
195 .map_io = pxa3xx_map_io, 195 .map_io = pxa3xx_map_io,
196 .nr_irqs = PXA_NR_IRQS,
196 .init_irq = pxa3xx_init_irq, 197 .init_irq = pxa3xx_init_irq,
197 .handle_irq = pxa3xx_handle_irq, 198 .handle_irq = pxa3xx_handle_irq,
198 .timer = &pxa_timer, 199 .timer = &pxa_timer,
diff --git a/arch/arm/mach-pxa/idp.c b/arch/arm/mach-pxa/idp.c
index 8af1840e12cc..6ff466bd43e8 100644
--- a/arch/arm/mach-pxa/idp.c
+++ b/arch/arm/mach-pxa/idp.c
@@ -195,6 +195,7 @@ static void __init idp_map_io(void)
195MACHINE_START(PXA_IDP, "Vibren PXA255 IDP") 195MACHINE_START(PXA_IDP, "Vibren PXA255 IDP")
196 /* Maintainer: Vibren Technologies */ 196 /* Maintainer: Vibren Technologies */
197 .map_io = idp_map_io, 197 .map_io = idp_map_io,
198 .nr_irqs = PXA_NR_IRQS,
198 .init_irq = pxa25x_init_irq, 199 .init_irq = pxa25x_init_irq,
199 .handle_irq = pxa25x_handle_irq, 200 .handle_irq = pxa25x_handle_irq,
200 .timer = &pxa_timer, 201 .timer = &pxa_timer,
diff --git a/arch/arm/mach-pxa/include/mach/hardware.h b/arch/arm/mach-pxa/include/mach/hardware.h
index 8184669dde28..56d92e5cad85 100644
--- a/arch/arm/mach-pxa/include/mach/hardware.h
+++ b/arch/arm/mach-pxa/include/mach/hardware.h
@@ -40,7 +40,6 @@
40#define io_p2v(x) IOMEM(0xf2000000 + ((x) & 0x01ffffff) + (((x) & 0x1c000000) >> 1)) 40#define io_p2v(x) IOMEM(0xf2000000 + ((x) & 0x01ffffff) + (((x) & 0x1c000000) >> 1))
41 41
42#ifndef __ASSEMBLY__ 42#ifndef __ASSEMBLY__
43# define IOMEM(x) ((void __iomem *)(x))
44# define __REG(x) (*((volatile u32 __iomem *)io_p2v(x))) 43# define __REG(x) (*((volatile u32 __iomem *)io_p2v(x)))
45 44
46/* With indexed regs we don't want to feed the index through io_p2v() 45/* With indexed regs we don't want to feed the index through io_p2v()
@@ -52,7 +51,6 @@
52 51
53#else 52#else
54 53
55# define IOMEM(x) x
56# define __REG(x) io_p2v(x) 54# define __REG(x) io_p2v(x)
57# define __PREG(x) io_v2p(x) 55# define __PREG(x) io_v2p(x)
58 56
@@ -337,8 +335,4 @@ extern unsigned int get_memclk_frequency_10khz(void);
337extern unsigned long get_clock_tick_rate(void); 335extern unsigned long get_clock_tick_rate(void);
338#endif 336#endif
339 337
340#if defined(CONFIG_MACH_ARMCORE) && defined(CONFIG_PCI)
341#define ARCH_HAS_DMA_SET_COHERENT_MASK
342#endif
343
344#endif /* _ASM_ARCH_HARDWARE_H */ 338#endif /* _ASM_ARCH_HARDWARE_H */
diff --git a/arch/arm/mach-pxa/include/mach/io.h b/arch/arm/mach-pxa/include/mach/io.h
deleted file mode 100644
index fdca3be47d9b..000000000000
--- a/arch/arm/mach-pxa/include/mach/io.h
+++ /dev/null
@@ -1,20 +0,0 @@
1/*
2 * arch/arm/mach-pxa/include/mach/io.h
3 *
4 * Copied from asm/arch/sa1100/io.h
5 */
6#ifndef __ASM_ARM_ARCH_IO_H
7#define __ASM_ARM_ARCH_IO_H
8
9#include <mach/hardware.h>
10
11#define IO_SPACE_LIMIT 0xffffffff
12
13/*
14 * We don't actually have real ISA nor PCI buses, but there is so many
15 * drivers out there that might just work if we fake them...
16 */
17#define __io(a) __typesafe_io(a)
18#define __mem_pci(a) (a)
19
20#endif
diff --git a/arch/arm/mach-pxa/include/mach/irqs.h b/arch/arm/mach-pxa/include/mach/irqs.h
index 32975adf3ca4..8765782dd955 100644
--- a/arch/arm/mach-pxa/include/mach/irqs.h
+++ b/arch/arm/mach-pxa/include/mach/irqs.h
@@ -100,7 +100,7 @@
100 */ 100 */
101#define IRQ_BOARD_START (PXA_GPIO_IRQ_BASE + PXA_NR_BUILTIN_GPIO) 101#define IRQ_BOARD_START (PXA_GPIO_IRQ_BASE + PXA_NR_BUILTIN_GPIO)
102 102
103#define NR_IRQS (IRQ_BOARD_START) 103#define PXA_NR_IRQS (IRQ_BOARD_START)
104 104
105#ifndef __ASSEMBLY__ 105#ifndef __ASSEMBLY__
106struct irq_data; 106struct irq_data;
diff --git a/arch/arm/mach-pxa/include/mach/mainstone.h b/arch/arm/mach-pxa/include/mach/mainstone.h
index 4c2d11cd824d..1bfc4e822a41 100644
--- a/arch/arm/mach-pxa/include/mach/mainstone.h
+++ b/arch/arm/mach-pxa/include/mach/mainstone.h
@@ -13,6 +13,8 @@
13#ifndef ASM_ARCH_MAINSTONE_H 13#ifndef ASM_ARCH_MAINSTONE_H
14#define ASM_ARCH_MAINSTONE_H 14#define ASM_ARCH_MAINSTONE_H
15 15
16#include <mach/irqs.h>
17
16#define MST_ETH_PHYS PXA_CS4_PHYS 18#define MST_ETH_PHYS PXA_CS4_PHYS
17 19
18#define MST_FPGA_PHYS PXA_CS2_PHYS 20#define MST_FPGA_PHYS PXA_CS2_PHYS
diff --git a/arch/arm/mach-pxa/magician.c b/arch/arm/mach-pxa/magician.c
index 6f4785b347c2..8de0651d7efb 100644
--- a/arch/arm/mach-pxa/magician.c
+++ b/arch/arm/mach-pxa/magician.c
@@ -580,11 +580,9 @@ static struct platform_device power_supply = {
580 580
581static struct regulator_consumer_supply bq24022_consumers[] = { 581static struct regulator_consumer_supply bq24022_consumers[] = {
582 { 582 {
583 .dev = &gpio_vbus.dev,
584 .supply = "vbus_draw", 583 .supply = "vbus_draw",
585 }, 584 },
586 { 585 {
587 .dev = &power_supply.dev,
588 .supply = "ac_draw", 586 .supply = "ac_draw",
589 }, 587 },
590}; 588};
diff --git a/arch/arm/mach-pxa/mfp-pxa2xx.c b/arch/arm/mach-pxa/mfp-pxa2xx.c
index 29b62afc6f7c..b0a842887780 100644
--- a/arch/arm/mach-pxa/mfp-pxa2xx.c
+++ b/arch/arm/mach-pxa/mfp-pxa2xx.c
@@ -17,6 +17,7 @@
17#include <linux/module.h> 17#include <linux/module.h>
18#include <linux/kernel.h> 18#include <linux/kernel.h>
19#include <linux/init.h> 19#include <linux/init.h>
20#include <linux/io.h>
20#include <linux/syscore_ops.h> 21#include <linux/syscore_ops.h>
21 22
22#include <mach/pxa2xx-regs.h> 23#include <mach/pxa2xx-regs.h>
diff --git a/arch/arm/mach-pxa/mioa701.c b/arch/arm/mach-pxa/mioa701.c
index e80a3db735c2..061d57009cee 100644
--- a/arch/arm/mach-pxa/mioa701.c
+++ b/arch/arm/mach-pxa/mioa701.c
@@ -758,6 +758,7 @@ MACHINE_START(MIOA701, "MIO A701")
758 .atag_offset = 0x100, 758 .atag_offset = 0x100,
759 .restart_mode = 's', 759 .restart_mode = 's',
760 .map_io = &pxa27x_map_io, 760 .map_io = &pxa27x_map_io,
761 .nr_irqs = PXA_NR_IRQS,
761 .init_irq = &pxa27x_init_irq, 762 .init_irq = &pxa27x_init_irq,
762 .handle_irq = &pxa27x_handle_irq, 763 .handle_irq = &pxa27x_handle_irq,
763 .init_machine = mioa701_machine_init, 764 .init_machine = mioa701_machine_init,
diff --git a/arch/arm/mach-pxa/mp900.c b/arch/arm/mach-pxa/mp900.c
index 169bf8f97af0..152efbf093f6 100644
--- a/arch/arm/mach-pxa/mp900.c
+++ b/arch/arm/mach-pxa/mp900.c
@@ -95,6 +95,7 @@ MACHINE_START(NEC_MP900, "MobilePro900/C")
95 .atag_offset = 0x220100, 95 .atag_offset = 0x220100,
96 .timer = &pxa_timer, 96 .timer = &pxa_timer,
97 .map_io = pxa25x_map_io, 97 .map_io = pxa25x_map_io,
98 .nr_irqs = PXA_NR_IRQS,
98 .init_irq = pxa25x_init_irq, 99 .init_irq = pxa25x_init_irq,
99 .handle_irq = pxa25x_handle_irq, 100 .handle_irq = pxa25x_handle_irq,
100 .init_machine = mp900c_init, 101 .init_machine = mp900c_init,
diff --git a/arch/arm/mach-pxa/palmld.c b/arch/arm/mach-pxa/palmld.c
index 1fa80f4f80c8..31e0433d83ba 100644
--- a/arch/arm/mach-pxa/palmld.c
+++ b/arch/arm/mach-pxa/palmld.c
@@ -344,6 +344,7 @@ static void __init palmld_init(void)
344MACHINE_START(PALMLD, "Palm LifeDrive") 344MACHINE_START(PALMLD, "Palm LifeDrive")
345 .atag_offset = 0x100, 345 .atag_offset = 0x100,
346 .map_io = palmld_map_io, 346 .map_io = palmld_map_io,
347 .nr_irqs = PXA_NR_IRQS,
347 .init_irq = pxa27x_init_irq, 348 .init_irq = pxa27x_init_irq,
348 .handle_irq = pxa27x_handle_irq, 349 .handle_irq = pxa27x_handle_irq,
349 .timer = &pxa_timer, 350 .timer = &pxa_timer,
diff --git a/arch/arm/mach-pxa/palmt5.c b/arch/arm/mach-pxa/palmt5.c
index 5ba14316bd9c..0f6bd4fcfa3b 100644
--- a/arch/arm/mach-pxa/palmt5.c
+++ b/arch/arm/mach-pxa/palmt5.c
@@ -205,6 +205,7 @@ MACHINE_START(PALMT5, "Palm Tungsten|T5")
205 .atag_offset = 0x100, 205 .atag_offset = 0x100,
206 .map_io = pxa27x_map_io, 206 .map_io = pxa27x_map_io,
207 .reserve = palmt5_reserve, 207 .reserve = palmt5_reserve,
208 .nr_irqs = PXA_NR_IRQS,
208 .init_irq = pxa27x_init_irq, 209 .init_irq = pxa27x_init_irq,
209 .handle_irq = pxa27x_handle_irq, 210 .handle_irq = pxa27x_handle_irq,
210 .timer = &pxa_timer, 211 .timer = &pxa_timer,
diff --git a/arch/arm/mach-pxa/palmtc.c b/arch/arm/mach-pxa/palmtc.c
index 29b51b40f09d..e2d97eed07a7 100644
--- a/arch/arm/mach-pxa/palmtc.c
+++ b/arch/arm/mach-pxa/palmtc.c
@@ -539,6 +539,7 @@ static void __init palmtc_init(void)
539MACHINE_START(PALMTC, "Palm Tungsten|C") 539MACHINE_START(PALMTC, "Palm Tungsten|C")
540 .atag_offset = 0x100, 540 .atag_offset = 0x100,
541 .map_io = pxa25x_map_io, 541 .map_io = pxa25x_map_io,
542 .nr_irqs = PXA_NR_IRQS,
542 .init_irq = pxa25x_init_irq, 543 .init_irq = pxa25x_init_irq,
543 .handle_irq = pxa25x_handle_irq, 544 .handle_irq = pxa25x_handle_irq,
544 .timer = &pxa_timer, 545 .timer = &pxa_timer,
diff --git a/arch/arm/mach-pxa/palmte2.c b/arch/arm/mach-pxa/palmte2.c
index 5ebf49acb827..c054827c567f 100644
--- a/arch/arm/mach-pxa/palmte2.c
+++ b/arch/arm/mach-pxa/palmte2.c
@@ -358,6 +358,7 @@ static void __init palmte2_init(void)
358MACHINE_START(PALMTE2, "Palm Tungsten|E2") 358MACHINE_START(PALMTE2, "Palm Tungsten|E2")
359 .atag_offset = 0x100, 359 .atag_offset = 0x100,
360 .map_io = pxa25x_map_io, 360 .map_io = pxa25x_map_io,
361 .nr_irqs = PXA_NR_IRQS,
361 .init_irq = pxa25x_init_irq, 362 .init_irq = pxa25x_init_irq,
362 .handle_irq = pxa25x_handle_irq, 363 .handle_irq = pxa25x_handle_irq,
363 .timer = &pxa_timer, 364 .timer = &pxa_timer,
diff --git a/arch/arm/mach-pxa/palmtreo.c b/arch/arm/mach-pxa/palmtreo.c
index ec8249156c08..fbdebee39a53 100644
--- a/arch/arm/mach-pxa/palmtreo.c
+++ b/arch/arm/mach-pxa/palmtreo.c
@@ -448,6 +448,7 @@ MACHINE_START(TREO680, "Palm Treo 680")
448 .atag_offset = 0x100, 448 .atag_offset = 0x100,
449 .map_io = pxa27x_map_io, 449 .map_io = pxa27x_map_io,
450 .reserve = treo_reserve, 450 .reserve = treo_reserve,
451 .nr_irqs = PXA_NR_IRQS,
451 .init_irq = pxa27x_init_irq, 452 .init_irq = pxa27x_init_irq,
452 .handle_irq = pxa27x_handle_irq, 453 .handle_irq = pxa27x_handle_irq,
453 .timer = &pxa_timer, 454 .timer = &pxa_timer,
@@ -461,6 +462,7 @@ MACHINE_START(CENTRO, "Palm Centro 685")
461 .atag_offset = 0x100, 462 .atag_offset = 0x100,
462 .map_io = pxa27x_map_io, 463 .map_io = pxa27x_map_io,
463 .reserve = treo_reserve, 464 .reserve = treo_reserve,
465 .nr_irqs = PXA_NR_IRQS,
464 .init_irq = pxa27x_init_irq, 466 .init_irq = pxa27x_init_irq,
465 .handle_irq = pxa27x_handle_irq, 467 .handle_irq = pxa27x_handle_irq,
466 .timer = &pxa_timer, 468 .timer = &pxa_timer,
diff --git a/arch/arm/mach-pxa/palmtx.c b/arch/arm/mach-pxa/palmtx.c
index 6170d76dfba8..9507605ed547 100644
--- a/arch/arm/mach-pxa/palmtx.c
+++ b/arch/arm/mach-pxa/palmtx.c
@@ -366,6 +366,7 @@ static void __init palmtx_init(void)
366MACHINE_START(PALMTX, "Palm T|X") 366MACHINE_START(PALMTX, "Palm T|X")
367 .atag_offset = 0x100, 367 .atag_offset = 0x100,
368 .map_io = palmtx_map_io, 368 .map_io = palmtx_map_io,
369 .nr_irqs = PXA_NR_IRQS,
369 .init_irq = pxa27x_init_irq, 370 .init_irq = pxa27x_init_irq,
370 .handle_irq = pxa27x_handle_irq, 371 .handle_irq = pxa27x_handle_irq,
371 .timer = &pxa_timer, 372 .timer = &pxa_timer,
diff --git a/arch/arm/mach-pxa/palmz72.c b/arch/arm/mach-pxa/palmz72.c
index b2dff9d415eb..a97b59965bb9 100644
--- a/arch/arm/mach-pxa/palmz72.c
+++ b/arch/arm/mach-pxa/palmz72.c
@@ -401,6 +401,7 @@ static void __init palmz72_init(void)
401MACHINE_START(PALMZ72, "Palm Zire72") 401MACHINE_START(PALMZ72, "Palm Zire72")
402 .atag_offset = 0x100, 402 .atag_offset = 0x100,
403 .map_io = pxa27x_map_io, 403 .map_io = pxa27x_map_io,
404 .nr_irqs = PXA_NR_IRQS,
404 .init_irq = pxa27x_init_irq, 405 .init_irq = pxa27x_init_irq,
405 .handle_irq = pxa27x_handle_irq, 406 .handle_irq = pxa27x_handle_irq,
406 .timer = &pxa_timer, 407 .timer = &pxa_timer,
diff --git a/arch/arm/mach-pxa/pxa2xx.c b/arch/arm/mach-pxa/pxa2xx.c
index 868270421b8c..f8ec85450c42 100644
--- a/arch/arm/mach-pxa/pxa2xx.c
+++ b/arch/arm/mach-pxa/pxa2xx.c
@@ -13,6 +13,7 @@
13#include <linux/module.h> 13#include <linux/module.h>
14#include <linux/kernel.h> 14#include <linux/kernel.h>
15#include <linux/device.h> 15#include <linux/device.h>
16#include <linux/io.h>
16 17
17#include <mach/hardware.h> 18#include <mach/hardware.h>
18#include <mach/pxa2xx-regs.h> 19#include <mach/pxa2xx-regs.h>
diff --git a/arch/arm/mach-pxa/pxa300.c b/arch/arm/mach-pxa/pxa300.c
index 40bb16501d86..17cbc0c7bdb8 100644
--- a/arch/arm/mach-pxa/pxa300.c
+++ b/arch/arm/mach-pxa/pxa300.c
@@ -16,6 +16,7 @@
16#include <linux/module.h> 16#include <linux/module.h>
17#include <linux/kernel.h> 17#include <linux/kernel.h>
18#include <linux/platform_device.h> 18#include <linux/platform_device.h>
19#include <linux/io.h>
19 20
20#include <mach/pxa300.h> 21#include <mach/pxa300.h>
21 22
diff --git a/arch/arm/mach-pxa/pxa320.c b/arch/arm/mach-pxa/pxa320.c
index 8d614ecd8e99..6dc99d4f2dc6 100644
--- a/arch/arm/mach-pxa/pxa320.c
+++ b/arch/arm/mach-pxa/pxa320.c
@@ -16,6 +16,7 @@
16#include <linux/module.h> 16#include <linux/module.h>
17#include <linux/kernel.h> 17#include <linux/kernel.h>
18#include <linux/platform_device.h> 18#include <linux/platform_device.h>
19#include <linux/io.h>
19 20
20#include <mach/pxa320.h> 21#include <mach/pxa320.h>
21 22
diff --git a/arch/arm/mach-pxa/pxa3xx.c b/arch/arm/mach-pxa/pxa3xx.c
index 1570d457fea3..dffb7e813d98 100644
--- a/arch/arm/mach-pxa/pxa3xx.c
+++ b/arch/arm/mach-pxa/pxa3xx.c
@@ -31,6 +31,7 @@
31#include <mach/pm.h> 31#include <mach/pm.h>
32#include <mach/dma.h> 32#include <mach/dma.h>
33#include <mach/smemc.h> 33#include <mach/smemc.h>
34#include <mach/irqs.h>
34 35
35#include "generic.h" 36#include "generic.h"
36#include "devices.h" 37#include "devices.h"
diff --git a/arch/arm/mach-pxa/raumfeld.c b/arch/arm/mach-pxa/raumfeld.c
index 22818c7694a8..5905ed130e94 100644
--- a/arch/arm/mach-pxa/raumfeld.c
+++ b/arch/arm/mach-pxa/raumfeld.c
@@ -43,6 +43,8 @@
43#include <linux/regulator/consumer.h> 43#include <linux/regulator/consumer.h>
44#include <linux/delay.h> 44#include <linux/delay.h>
45 45
46#include <asm/system_info.h>
47
46#include <asm/mach-types.h> 48#include <asm/mach-types.h>
47#include <asm/mach/arch.h> 49#include <asm/mach/arch.h>
48 50
@@ -1090,6 +1092,7 @@ MACHINE_START(RAUMFELD_RC, "Raumfeld Controller")
1090 .atag_offset = 0x100, 1092 .atag_offset = 0x100,
1091 .init_machine = raumfeld_controller_init, 1093 .init_machine = raumfeld_controller_init,
1092 .map_io = pxa3xx_map_io, 1094 .map_io = pxa3xx_map_io,
1095 .nr_irqs = PXA_NR_IRQS,
1093 .init_irq = pxa3xx_init_irq, 1096 .init_irq = pxa3xx_init_irq,
1094 .handle_irq = pxa3xx_handle_irq, 1097 .handle_irq = pxa3xx_handle_irq,
1095 .timer = &pxa_timer, 1098 .timer = &pxa_timer,
@@ -1102,6 +1105,7 @@ MACHINE_START(RAUMFELD_CONNECTOR, "Raumfeld Connector")
1102 .atag_offset = 0x100, 1105 .atag_offset = 0x100,
1103 .init_machine = raumfeld_connector_init, 1106 .init_machine = raumfeld_connector_init,
1104 .map_io = pxa3xx_map_io, 1107 .map_io = pxa3xx_map_io,
1108 .nr_irqs = PXA_NR_IRQS,
1105 .init_irq = pxa3xx_init_irq, 1109 .init_irq = pxa3xx_init_irq,
1106 .handle_irq = pxa3xx_handle_irq, 1110 .handle_irq = pxa3xx_handle_irq,
1107 .timer = &pxa_timer, 1111 .timer = &pxa_timer,
@@ -1114,6 +1118,7 @@ MACHINE_START(RAUMFELD_SPEAKER, "Raumfeld Speaker")
1114 .atag_offset = 0x100, 1118 .atag_offset = 0x100,
1115 .init_machine = raumfeld_speaker_init, 1119 .init_machine = raumfeld_speaker_init,
1116 .map_io = pxa3xx_map_io, 1120 .map_io = pxa3xx_map_io,
1121 .nr_irqs = PXA_NR_IRQS,
1117 .init_irq = pxa3xx_init_irq, 1122 .init_irq = pxa3xx_init_irq,
1118 .handle_irq = pxa3xx_handle_irq, 1123 .handle_irq = pxa3xx_handle_irq,
1119 .timer = &pxa_timer, 1124 .timer = &pxa_timer,
diff --git a/arch/arm/mach-pxa/saar.c b/arch/arm/mach-pxa/saar.c
index 0fe354efb931..86c95a5d8533 100644
--- a/arch/arm/mach-pxa/saar.c
+++ b/arch/arm/mach-pxa/saar.c
@@ -598,6 +598,7 @@ MACHINE_START(SAAR, "PXA930 Handheld Platform (aka SAAR)")
598 /* Maintainer: Eric Miao <eric.miao@marvell.com> */ 598 /* Maintainer: Eric Miao <eric.miao@marvell.com> */
599 .atag_offset = 0x100, 599 .atag_offset = 0x100,
600 .map_io = pxa3xx_map_io, 600 .map_io = pxa3xx_map_io,
601 .nr_irqs = PXA_NR_IRQS,
601 .init_irq = pxa3xx_init_irq, 602 .init_irq = pxa3xx_init_irq,
602 .handle_irq = pxa3xx_handle_irq, 603 .handle_irq = pxa3xx_handle_irq,
603 .timer = &pxa_timer, 604 .timer = &pxa_timer,
diff --git a/arch/arm/mach-pxa/sharpsl_pm.c b/arch/arm/mach-pxa/sharpsl_pm.c
index 30989baf7f2a..bdf4cb88ca0a 100644
--- a/arch/arm/mach-pxa/sharpsl_pm.c
+++ b/arch/arm/mach-pxa/sharpsl_pm.c
@@ -24,6 +24,7 @@
24#include <linux/leds.h> 24#include <linux/leds.h>
25#include <linux/suspend.h> 25#include <linux/suspend.h>
26#include <linux/gpio.h> 26#include <linux/gpio.h>
27#include <linux/io.h>
27 28
28#include <asm/mach-types.h> 29#include <asm/mach-types.h>
29#include <mach/pm.h> 30#include <mach/pm.h>
diff --git a/arch/arm/mach-pxa/spitz.c b/arch/arm/mach-pxa/spitz.c
index abf355d0c92f..df2ab0fb2ace 100644
--- a/arch/arm/mach-pxa/spitz.c
+++ b/arch/arm/mach-pxa/spitz.c
@@ -984,6 +984,7 @@ MACHINE_START(SPITZ, "SHARP Spitz")
984 .restart_mode = 'g', 984 .restart_mode = 'g',
985 .fixup = spitz_fixup, 985 .fixup = spitz_fixup,
986 .map_io = pxa27x_map_io, 986 .map_io = pxa27x_map_io,
987 .nr_irqs = PXA_NR_IRQS,
987 .init_irq = pxa27x_init_irq, 988 .init_irq = pxa27x_init_irq,
988 .handle_irq = pxa27x_handle_irq, 989 .handle_irq = pxa27x_handle_irq,
989 .init_machine = spitz_init, 990 .init_machine = spitz_init,
@@ -997,6 +998,7 @@ MACHINE_START(BORZOI, "SHARP Borzoi")
997 .restart_mode = 'g', 998 .restart_mode = 'g',
998 .fixup = spitz_fixup, 999 .fixup = spitz_fixup,
999 .map_io = pxa27x_map_io, 1000 .map_io = pxa27x_map_io,
1001 .nr_irqs = PXA_NR_IRQS,
1000 .init_irq = pxa27x_init_irq, 1002 .init_irq = pxa27x_init_irq,
1001 .handle_irq = pxa27x_handle_irq, 1003 .handle_irq = pxa27x_handle_irq,
1002 .init_machine = spitz_init, 1004 .init_machine = spitz_init,
@@ -1010,6 +1012,7 @@ MACHINE_START(AKITA, "SHARP Akita")
1010 .restart_mode = 'g', 1012 .restart_mode = 'g',
1011 .fixup = spitz_fixup, 1013 .fixup = spitz_fixup,
1012 .map_io = pxa27x_map_io, 1014 .map_io = pxa27x_map_io,
1015 .nr_irqs = PXA_NR_IRQS,
1013 .init_irq = pxa27x_init_irq, 1016 .init_irq = pxa27x_init_irq,
1014 .handle_irq = pxa27x_handle_irq, 1017 .handle_irq = pxa27x_handle_irq,
1015 .init_machine = spitz_init, 1018 .init_machine = spitz_init,
diff --git a/arch/arm/mach-pxa/stargate2.c b/arch/arm/mach-pxa/stargate2.c
index b0656e158d90..4cd645e29b64 100644
--- a/arch/arm/mach-pxa/stargate2.c
+++ b/arch/arm/mach-pxa/stargate2.c
@@ -152,7 +152,7 @@ static struct platform_device sht15 = {
152 152
153static struct regulator_consumer_supply stargate2_sensor_3_con[] = { 153static struct regulator_consumer_supply stargate2_sensor_3_con[] = {
154 { 154 {
155 .dev = &sht15.dev, 155 .dev_name = "sht15",
156 .supply = "vcc", 156 .supply = "vcc",
157 }, 157 },
158}; 158};
@@ -1006,6 +1006,7 @@ static void __init stargate2_init(void)
1006#ifdef CONFIG_MACH_INTELMOTE2 1006#ifdef CONFIG_MACH_INTELMOTE2
1007MACHINE_START(INTELMOTE2, "IMOTE 2") 1007MACHINE_START(INTELMOTE2, "IMOTE 2")
1008 .map_io = pxa27x_map_io, 1008 .map_io = pxa27x_map_io,
1009 .nr_irqs = PXA_NR_IRQS,
1009 .init_irq = pxa27x_init_irq, 1010 .init_irq = pxa27x_init_irq,
1010 .handle_irq = pxa27x_handle_irq, 1011 .handle_irq = pxa27x_handle_irq,
1011 .timer = &pxa_timer, 1012 .timer = &pxa_timer,
diff --git a/arch/arm/mach-pxa/tavorevb.c b/arch/arm/mach-pxa/tavorevb.c
index 9fb38e80e076..736bfdc50ee6 100644
--- a/arch/arm/mach-pxa/tavorevb.c
+++ b/arch/arm/mach-pxa/tavorevb.c
@@ -491,6 +491,7 @@ MACHINE_START(TAVOREVB, "PXA930 Evaluation Board (aka TavorEVB)")
491 /* Maintainer: Eric Miao <eric.miao@marvell.com> */ 491 /* Maintainer: Eric Miao <eric.miao@marvell.com> */
492 .atag_offset = 0x100, 492 .atag_offset = 0x100,
493 .map_io = pxa3xx_map_io, 493 .map_io = pxa3xx_map_io,
494 .nr_irqs = PXA_NR_IRQS,
494 .init_irq = pxa3xx_init_irq, 495 .init_irq = pxa3xx_init_irq,
495 .handle_irq = pxa3xx_handle_irq, 496 .handle_irq = pxa3xx_handle_irq,
496 .timer = &pxa_timer, 497 .timer = &pxa_timer,
diff --git a/arch/arm/mach-pxa/time.c b/arch/arm/mach-pxa/time.c
index b503049d6d26..3d6c9bd90de6 100644
--- a/arch/arm/mach-pxa/time.c
+++ b/arch/arm/mach-pxa/time.c
@@ -22,6 +22,7 @@
22#include <asm/mach/time.h> 22#include <asm/mach/time.h>
23#include <asm/sched_clock.h> 23#include <asm/sched_clock.h>
24#include <mach/regs-ost.h> 24#include <mach/regs-ost.h>
25#include <mach/irqs.h>
25 26
26/* 27/*
27 * This is PXA's sched_clock implementation. This has a resolution 28 * This is PXA's sched_clock implementation. This has a resolution
diff --git a/arch/arm/mach-pxa/trizeps4.c b/arch/arm/mach-pxa/trizeps4.c
index 0f30af617d8f..2b6ac00b2cd9 100644
--- a/arch/arm/mach-pxa/trizeps4.c
+++ b/arch/arm/mach-pxa/trizeps4.c
@@ -558,6 +558,7 @@ MACHINE_START(TRIZEPS4, "Keith und Koep Trizeps IV module")
558 .atag_offset = 0x100, 558 .atag_offset = 0x100,
559 .init_machine = trizeps4_init, 559 .init_machine = trizeps4_init,
560 .map_io = trizeps4_map_io, 560 .map_io = trizeps4_map_io,
561 .nr_irqs = PXA_NR_IRQS,
561 .init_irq = pxa27x_init_irq, 562 .init_irq = pxa27x_init_irq,
562 .handle_irq = pxa27x_handle_irq, 563 .handle_irq = pxa27x_handle_irq,
563 .timer = &pxa_timer, 564 .timer = &pxa_timer,
@@ -569,6 +570,7 @@ MACHINE_START(TRIZEPS4WL, "Keith und Koep Trizeps IV-WL module")
569 .atag_offset = 0x100, 570 .atag_offset = 0x100,
570 .init_machine = trizeps4_init, 571 .init_machine = trizeps4_init,
571 .map_io = trizeps4_map_io, 572 .map_io = trizeps4_map_io,
573 .nr_irqs = PXA_NR_IRQS,
572 .init_irq = pxa27x_init_irq, 574 .init_irq = pxa27x_init_irq,
573 .handle_irq = pxa27x_handle_irq, 575 .handle_irq = pxa27x_handle_irq,
574 .timer = &pxa_timer, 576 .timer = &pxa_timer,
diff --git a/arch/arm/mach-pxa/viper.c b/arch/arm/mach-pxa/viper.c
index 7a3d342a7732..130379fb9d0f 100644
--- a/arch/arm/mach-pxa/viper.c
+++ b/arch/arm/mach-pxa/viper.c
@@ -995,6 +995,7 @@ MACHINE_START(VIPER, "Arcom/Eurotech VIPER SBC")
995 /* Maintainer: Marc Zyngier <maz@misterjones.org> */ 995 /* Maintainer: Marc Zyngier <maz@misterjones.org> */
996 .atag_offset = 0x100, 996 .atag_offset = 0x100,
997 .map_io = viper_map_io, 997 .map_io = viper_map_io,
998 .nr_irqs = PXA_NR_IRQS,
998 .init_irq = viper_init_irq, 999 .init_irq = viper_init_irq,
999 .handle_irq = pxa25x_handle_irq, 1000 .handle_irq = pxa25x_handle_irq,
1000 .timer = &pxa_timer, 1001 .timer = &pxa_timer,
diff --git a/arch/arm/mach-pxa/vpac270.c b/arch/arm/mach-pxa/vpac270.c
index 1f5cfa96f6d6..c57ab636ea9c 100644
--- a/arch/arm/mach-pxa/vpac270.c
+++ b/arch/arm/mach-pxa/vpac270.c
@@ -718,6 +718,7 @@ static void __init vpac270_init(void)
718MACHINE_START(VPAC270, "Voipac PXA270") 718MACHINE_START(VPAC270, "Voipac PXA270")
719 .atag_offset = 0x100, 719 .atag_offset = 0x100,
720 .map_io = pxa27x_map_io, 720 .map_io = pxa27x_map_io,
721 .nr_irqs = PXA_NR_IRQS,
721 .init_irq = pxa27x_init_irq, 722 .init_irq = pxa27x_init_irq,
722 .handle_irq = pxa27x_handle_irq, 723 .handle_irq = pxa27x_handle_irq,
723 .timer = &pxa_timer, 724 .timer = &pxa_timer,
diff --git a/arch/arm/mach-pxa/xcep.c b/arch/arm/mach-pxa/xcep.c
index 4bbe9a36fe74..4275713ccd10 100644
--- a/arch/arm/mach-pxa/xcep.c
+++ b/arch/arm/mach-pxa/xcep.c
@@ -182,6 +182,7 @@ MACHINE_START(XCEP, "Iskratel XCEP")
182 .atag_offset = 0x100, 182 .atag_offset = 0x100,
183 .init_machine = xcep_init, 183 .init_machine = xcep_init,
184 .map_io = pxa25x_map_io, 184 .map_io = pxa25x_map_io,
185 .nr_irqs = PXA_NR_IRQS,
185 .init_irq = pxa25x_init_irq, 186 .init_irq = pxa25x_init_irq,
186 .handle_irq = pxa25x_handle_irq, 187 .handle_irq = pxa25x_handle_irq,
187 .timer = &pxa_timer, 188 .timer = &pxa_timer,
diff --git a/arch/arm/mach-pxa/z2.c b/arch/arm/mach-pxa/z2.c
index b6476848b561..fa8619970841 100644
--- a/arch/arm/mach-pxa/z2.c
+++ b/arch/arm/mach-pxa/z2.c
@@ -721,6 +721,7 @@ static void __init z2_init(void)
721MACHINE_START(ZIPIT2, "Zipit Z2") 721MACHINE_START(ZIPIT2, "Zipit Z2")
722 .atag_offset = 0x100, 722 .atag_offset = 0x100,
723 .map_io = pxa27x_map_io, 723 .map_io = pxa27x_map_io,
724 .nr_irqs = PXA_NR_IRQS,
724 .init_irq = pxa27x_init_irq, 725 .init_irq = pxa27x_init_irq,
725 .handle_irq = pxa27x_handle_irq, 726 .handle_irq = pxa27x_handle_irq,
726 .timer = &pxa_timer, 727 .timer = &pxa_timer,
diff --git a/arch/arm/mach-realview/include/mach/hardware.h b/arch/arm/mach-realview/include/mach/hardware.h
index 8a638d15797f..281e71c97525 100644
--- a/arch/arm/mach-realview/include/mach/hardware.h
+++ b/arch/arm/mach-realview/include/mach/hardware.h
@@ -37,6 +37,6 @@
37#else 37#else
38#define IO_ADDRESS(x) (x) 38#define IO_ADDRESS(x) (x)
39#endif 39#endif
40#define __io_address(n) __io(IO_ADDRESS(n)) 40#define __io_address(n) IOMEM(IO_ADDRESS(n))
41 41
42#endif 42#endif
diff --git a/arch/arm/mach-realview/include/mach/io.h b/arch/arm/mach-realview/include/mach/io.h
deleted file mode 100644
index f05bcdf605d8..000000000000
--- a/arch/arm/mach-realview/include/mach/io.h
+++ /dev/null
@@ -1,28 +0,0 @@
1/*
2 * arch/arm/mach-realview/include/mach/io.h
3 *
4 * Copyright (C) 2003 ARM Limited
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20#ifndef __ASM_ARM_ARCH_IO_H
21#define __ASM_ARM_ARCH_IO_H
22
23#define IO_SPACE_LIMIT 0xffffffff
24
25#define __io(a) __typesafe_io(a)
26#define __mem_pci(a) (a)
27
28#endif
diff --git a/arch/arm/mach-rpc/include/mach/hardware.h b/arch/arm/mach-rpc/include/mach/hardware.h
index 050d63c74cc1..257166b21f3d 100644
--- a/arch/arm/mach-rpc/include/mach/hardware.h
+++ b/arch/arm/mach-rpc/include/mach/hardware.h
@@ -14,12 +14,6 @@
14 14
15#include <mach/memory.h> 15#include <mach/memory.h>
16 16
17#ifndef __ASSEMBLY__
18#define IOMEM(x) ((void __iomem *)(unsigned long)(x))
19#else
20#define IOMEM(x) x
21#endif /* __ASSEMBLY__ */
22
23/* 17/*
24 * What hardware must be present 18 * What hardware must be present
25 */ 19 */
diff --git a/arch/arm/mach-rpc/include/mach/io.h b/arch/arm/mach-rpc/include/mach/io.h
index 695f4ed2e11b..707071a7ea4e 100644
--- a/arch/arm/mach-rpc/include/mach/io.h
+++ b/arch/arm/mach-rpc/include/mach/io.h
@@ -28,9 +28,4 @@
28 */ 28 */
29#define __io(a) (PCIO_BASE + ((a) << 2)) 29#define __io(a) (PCIO_BASE + ((a) << 2))
30 30
31/*
32 * 1:1 mapping for ioremapped regions.
33 */
34#define __mem_pci(x) (x)
35
36#endif 31#endif
diff --git a/arch/arm/mach-s3c24xx/include/mach/io.h b/arch/arm/mach-s3c24xx/include/mach/io.h
index 118749f37c4c..5dd1db4e2677 100644
--- a/arch/arm/mach-s3c24xx/include/mach/io.h
+++ b/arch/arm/mach-s3c24xx/include/mach/io.h
@@ -208,9 +208,4 @@ DECLARE_IO(int,l,"")
208#define outsw(p,d,l) __raw_writesw(__ioaddr(p),d,l) 208#define outsw(p,d,l) __raw_writesw(__ioaddr(p),d,l)
209#define outsl(p,d,l) __raw_writesl(__ioaddr(p),d,l) 209#define outsl(p,d,l) __raw_writesl(__ioaddr(p),d,l)
210 210
211/*
212 * 1:1 mapping for ioremapped regions.
213 */
214#define __mem_pci(x) (x)
215
216#endif 211#endif
diff --git a/arch/arm/mach-s3c24xx/simtec-nor.c b/arch/arm/mach-s3c24xx/simtec-nor.c
index 2119ca6a73bc..b9d6d4f92c03 100644
--- a/arch/arm/mach-s3c24xx/simtec-nor.c
+++ b/arch/arm/mach-s3c24xx/simtec-nor.c
@@ -35,9 +35,7 @@
35static void simtec_nor_vpp(struct platform_device *pdev, int vpp) 35static void simtec_nor_vpp(struct platform_device *pdev, int vpp)
36{ 36{
37 unsigned int val; 37 unsigned int val;
38 unsigned long flags;
39 38
40 local_irq_save(flags);
41 val = __raw_readb(BAST_VA_CTRL3); 39 val = __raw_readb(BAST_VA_CTRL3);
42 40
43 printk(KERN_DEBUG "%s(%d)\n", __func__, vpp); 41 printk(KERN_DEBUG "%s(%d)\n", __func__, vpp);
@@ -48,7 +46,6 @@ static void simtec_nor_vpp(struct platform_device *pdev, int vpp)
48 val &= ~BAST_CPLD_CTRL3_ROMWEN; 46 val &= ~BAST_CPLD_CTRL3_ROMWEN;
49 47
50 __raw_writeb(val, BAST_VA_CTRL3); 48 __raw_writeb(val, BAST_VA_CTRL3);
51 local_irq_restore(flags);
52} 49}
53 50
54static struct physmap_flash_data simtec_nor_pdata = { 51static struct physmap_flash_data simtec_nor_pdata = {
diff --git a/arch/arm/mach-s3c64xx/include/mach/io.h b/arch/arm/mach-s3c64xx/include/mach/io.h
deleted file mode 100644
index de5716dbbd65..000000000000
--- a/arch/arm/mach-s3c64xx/include/mach/io.h
+++ /dev/null
@@ -1,18 +0,0 @@
1/* arch/arm/mach-s3c64xxinclude/mach/io.h
2 *
3 * Copyright 2008 Simtec Electronics
4 * Ben Dooks <ben-linux@fluff.org>
5 *
6 * Default IO routines for S3C64XX based
7 */
8
9#ifndef __ASM_ARM_ARCH_IO_H
10#define __ASM_ARM_ARCH_IO_H
11
12/* No current ISA/PCI bus support. */
13#define __io(a) __typesafe_io(a)
14#define __mem_pci(a) (a)
15
16#define IO_SPACE_LIMIT (0xFFFFFFFF)
17
18#endif
diff --git a/arch/arm/mach-s5p64x0/include/mach/io.h b/arch/arm/mach-s5p64x0/include/mach/io.h
deleted file mode 100644
index a3e095c02fb5..000000000000
--- a/arch/arm/mach-s5p64x0/include/mach/io.h
+++ /dev/null
@@ -1,25 +0,0 @@
1/* linux/arch/arm/mach-s5p64x0/include/mach/io.h
2 *
3 * Copyright (c) 2010 Samsung Electronics Co., Ltd.
4 * http://www.samsung.com
5 *
6 * Copyright 2008 Simtec Electronics
7 * Ben Dooks <ben-linux@fluff.org>
8 *
9 * Default IO routines for S5P64X0 based
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License version 2 as
13 * published by the Free Software Foundation.
14*/
15
16#ifndef __ASM_ARM_ARCH_IO_H
17#define __ASM_ARM_ARCH_IO_H
18
19/* No current ISA/PCI bus support. */
20#define __io(a) __typesafe_io(a)
21#define __mem_pci(a) (a)
22
23#define IO_SPACE_LIMIT (0xFFFFFFFF)
24
25#endif
diff --git a/arch/arm/mach-s5pc100/include/mach/io.h b/arch/arm/mach-s5pc100/include/mach/io.h
deleted file mode 100644
index 819acf5eaf89..000000000000
--- a/arch/arm/mach-s5pc100/include/mach/io.h
+++ /dev/null
@@ -1,18 +0,0 @@
1/* arch/arm/mach-s5pc100/include/mach/io.h
2 *
3 * Copyright 2008 Simtec Electronics
4 * Ben Dooks <ben-linux@fluff.org>
5 *
6 * Default IO routines for S5PC100 systems
7 */
8
9#ifndef __ASM_ARM_ARCH_IO_H
10#define __ASM_ARM_ARCH_IO_H
11
12/* No current ISA/PCI bus support. */
13#define __io(a) __typesafe_io(a)
14#define __mem_pci(a) (a)
15
16#define IO_SPACE_LIMIT (0xFFFFFFFF)
17
18#endif
diff --git a/arch/arm/mach-s5pv210/include/mach/io.h b/arch/arm/mach-s5pv210/include/mach/io.h
deleted file mode 100644
index 5ab9d560bc86..000000000000
--- a/arch/arm/mach-s5pv210/include/mach/io.h
+++ /dev/null
@@ -1,26 +0,0 @@
1/* linux/arch/arm/mach-s5pv210/include/mach/io.h
2 *
3 * Copyright 2008-2010 Ben Dooks <ben-linux@fluff.org>
4 *
5 * Copyright (c) 2010 Samsung Electronics Co., Ltd.
6 * http://www.samsung.com/
7 *
8 * Based on arch/arm/mach-s5p6442/include/mach/io.h
9 *
10 * Default IO routines for S5PV210
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License version 2 as
14 * published by the Free Software Foundation.
15*/
16
17#ifndef __ASM_ARM_ARCH_IO_H
18#define __ASM_ARM_ARCH_IO_H __FILE__
19
20/* No current ISA/PCI bus support. */
21#define __io(a) __typesafe_io(a)
22#define __mem_pci(a) (a)
23
24#define IO_SPACE_LIMIT (0xFFFFFFFF)
25
26#endif /* __ASM_ARM_ARCH_IO_H */
diff --git a/arch/arm/mach-sa1100/include/mach/io.h b/arch/arm/mach-sa1100/include/mach/io.h
deleted file mode 100644
index dfc27ff08344..000000000000
--- a/arch/arm/mach-sa1100/include/mach/io.h
+++ /dev/null
@@ -1,20 +0,0 @@
1/*
2 * arch/arm/mach-sa1100/include/mach/io.h
3 *
4 * Copyright (C) 1997-1999 Russell King
5 *
6 * Modifications:
7 * 06-12-1997 RMK Created.
8 * 07-04-1999 RMK Major cleanup
9 */
10#ifndef __ASM_ARM_ARCH_IO_H
11#define __ASM_ARM_ARCH_IO_H
12
13/*
14 * __io() is required to be an equivalent mapping to __mem_pci() for
15 * SOC_COMMON to work.
16 */
17#define __io(a) __typesafe_io(a)
18#define __mem_pci(a) (a)
19
20#endif
diff --git a/arch/arm/mach-shark/core.c b/arch/arm/mach-shark/core.c
index 6a2a7f2c2557..2704bcd869cd 100644
--- a/arch/arm/mach-shark/core.c
+++ b/arch/arm/mach-shark/core.c
@@ -15,6 +15,7 @@
15#include <asm/mach-types.h> 15#include <asm/mach-types.h>
16#include <asm/leds.h> 16#include <asm/leds.h>
17#include <asm/param.h> 17#include <asm/param.h>
18#include <asm/system_misc.h>
18 19
19#include <asm/mach/map.h> 20#include <asm/mach/map.h>
20#include <asm/mach/arch.h> 21#include <asm/mach/arch.h>
diff --git a/arch/arm/mach-shark/include/mach/io.h b/arch/arm/mach-shark/include/mach/io.h
index 9ccbcecc430b..1a45fc01ff1d 100644
--- a/arch/arm/mach-shark/include/mach/io.h
+++ b/arch/arm/mach-shark/include/mach/io.h
@@ -15,6 +15,4 @@
15 15
16#define __io(a) ((void __iomem *)(0xe0000000 + (a))) 16#define __io(a) ((void __iomem *)(0xe0000000 + (a)))
17 17
18#define __mem_pci(addr) (addr)
19
20#endif 18#endif
diff --git a/arch/arm/mach-shmobile/Kconfig b/arch/arm/mach-shmobile/Kconfig
index 060e5644c49c..34560cab45d9 100644
--- a/arch/arm/mach-shmobile/Kconfig
+++ b/arch/arm/mach-shmobile/Kconfig
@@ -100,6 +100,10 @@ config MACH_MARZEN
100 100
101comment "SH-Mobile System Configuration" 101comment "SH-Mobile System Configuration"
102 102
103config CPU_HAS_INTEVT
104 bool
105 default y
106
103menu "Memory configuration" 107menu "Memory configuration"
104 108
105config MEMORY_START 109config MEMORY_START
diff --git a/arch/arm/mach-shmobile/board-ag5evm.c b/arch/arm/mach-shmobile/board-ag5evm.c
index f50d7c8b1221..cb224a344af0 100644
--- a/arch/arm/mach-shmobile/board-ag5evm.c
+++ b/arch/arm/mach-shmobile/board-ag5evm.c
@@ -43,6 +43,7 @@
43#include <video/sh_mipi_dsi.h> 43#include <video/sh_mipi_dsi.h>
44#include <sound/sh_fsi.h> 44#include <sound/sh_fsi.h>
45#include <mach/hardware.h> 45#include <mach/hardware.h>
46#include <mach/irqs.h>
46#include <mach/sh73a0.h> 47#include <mach/sh73a0.h>
47#include <mach/common.h> 48#include <mach/common.h>
48#include <asm/mach-types.h> 49#include <asm/mach-types.h>
@@ -584,7 +585,7 @@ static void __init ag5evm_init(void)
584 585
585#ifdef CONFIG_CACHE_L2X0 586#ifdef CONFIG_CACHE_L2X0
586 /* Shared attribute override enable, 64K*8way */ 587 /* Shared attribute override enable, 64K*8way */
587 l2x0_init(__io(0xf0100000), 0x00460000, 0xc2000fff); 588 l2x0_init(IOMEM(0xf0100000), 0x00460000, 0xc2000fff);
588#endif 589#endif
589 sh73a0_add_standard_devices(); 590 sh73a0_add_standard_devices();
590 platform_add_devices(ag5evm_devices, ARRAY_SIZE(ag5evm_devices)); 591 platform_add_devices(ag5evm_devices, ARRAY_SIZE(ag5evm_devices));
diff --git a/arch/arm/mach-shmobile/board-bonito.c b/arch/arm/mach-shmobile/board-bonito.c
index 8b2124da245d..81fd95f7f52a 100644
--- a/arch/arm/mach-shmobile/board-bonito.c
+++ b/arch/arm/mach-shmobile/board-bonito.c
@@ -35,6 +35,7 @@
35#include <asm/mach/time.h> 35#include <asm/mach/time.h>
36#include <asm/hardware/cache-l2x0.h> 36#include <asm/hardware/cache-l2x0.h>
37#include <mach/r8a7740.h> 37#include <mach/r8a7740.h>
38#include <mach/irqs.h>
38#include <video/sh_mobile_lcdc.h> 39#include <video/sh_mobile_lcdc.h>
39 40
40/* 41/*
@@ -370,7 +371,7 @@ static void __init bonito_init(void)
370 371
371#ifdef CONFIG_CACHE_L2X0 372#ifdef CONFIG_CACHE_L2X0
372 /* Early BRESP enable, Shared attribute override enable, 32K*8way */ 373 /* Early BRESP enable, Shared attribute override enable, 32K*8way */
373 l2x0_init(__io(0xf0002000), 0x40440000, 0x82000fff); 374 l2x0_init(IOMEM(0xf0002000), 0x40440000, 0x82000fff);
374#endif 375#endif
375 376
376 r8a7740_add_standard_devices(); 377 r8a7740_add_standard_devices();
diff --git a/arch/arm/mach-shmobile/board-g3evm.c b/arch/arm/mach-shmobile/board-g3evm.c
index b627e89037f5..39b6cf85ced6 100644
--- a/arch/arm/mach-shmobile/board-g3evm.c
+++ b/arch/arm/mach-shmobile/board-g3evm.c
@@ -33,6 +33,7 @@
33#include <linux/input.h> 33#include <linux/input.h>
34#include <linux/input/sh_keysc.h> 34#include <linux/input/sh_keysc.h>
35#include <linux/dma-mapping.h> 35#include <linux/dma-mapping.h>
36#include <mach/irqs.h>
36#include <mach/sh7367.h> 37#include <mach/sh7367.h>
37#include <mach/common.h> 38#include <mach/common.h>
38#include <asm/mach-types.h> 39#include <asm/mach-types.h>
diff --git a/arch/arm/mach-shmobile/board-g4evm.c b/arch/arm/mach-shmobile/board-g4evm.c
index 46d757d2759d..0e5a39c670bc 100644
--- a/arch/arm/mach-shmobile/board-g4evm.c
+++ b/arch/arm/mach-shmobile/board-g4evm.c
@@ -34,6 +34,7 @@
34#include <linux/mmc/sh_mobile_sdhi.h> 34#include <linux/mmc/sh_mobile_sdhi.h>
35#include <linux/gpio.h> 35#include <linux/gpio.h>
36#include <linux/dma-mapping.h> 36#include <linux/dma-mapping.h>
37#include <mach/irqs.h>
37#include <mach/sh7377.h> 38#include <mach/sh7377.h>
38#include <mach/common.h> 39#include <mach/common.h>
39#include <asm/mach-types.h> 40#include <asm/mach-types.h>
diff --git a/arch/arm/mach-shmobile/board-kota2.c b/arch/arm/mach-shmobile/board-kota2.c
index 61c067294660..200dcd42a3a0 100644
--- a/arch/arm/mach-shmobile/board-kota2.c
+++ b/arch/arm/mach-shmobile/board-kota2.c
@@ -39,6 +39,7 @@
39#include <linux/mfd/tmio.h> 39#include <linux/mfd/tmio.h>
40#include <linux/mmc/sh_mobile_sdhi.h> 40#include <linux/mmc/sh_mobile_sdhi.h>
41#include <mach/hardware.h> 41#include <mach/hardware.h>
42#include <mach/irqs.h>
42#include <mach/sh73a0.h> 43#include <mach/sh73a0.h>
43#include <mach/common.h> 44#include <mach/common.h>
44#include <asm/mach-types.h> 45#include <asm/mach-types.h>
@@ -507,7 +508,7 @@ static void __init kota2_init(void)
507 508
508#ifdef CONFIG_CACHE_L2X0 509#ifdef CONFIG_CACHE_L2X0
509 /* Early BRESP enable, Shared attribute override enable, 64K*8way */ 510 /* Early BRESP enable, Shared attribute override enable, 64K*8way */
510 l2x0_init(__io(0xf0100000), 0x40460000, 0x82000fff); 511 l2x0_init(IOMEM(0xf0100000), 0x40460000, 0x82000fff);
511#endif 512#endif
512 sh73a0_add_standard_devices(); 513 sh73a0_add_standard_devices();
513 platform_add_devices(kota2_devices, ARRAY_SIZE(kota2_devices)); 514 platform_add_devices(kota2_devices, ARRAY_SIZE(kota2_devices));
diff --git a/arch/arm/mach-shmobile/board-mackerel.c b/arch/arm/mach-shmobile/board-mackerel.c
index ca609502d6cd..f49e28abe0ab 100644
--- a/arch/arm/mach-shmobile/board-mackerel.c
+++ b/arch/arm/mach-shmobile/board-mackerel.c
@@ -39,6 +39,7 @@
39#include <linux/mtd/mtd.h> 39#include <linux/mtd/mtd.h>
40#include <linux/mtd/partitions.h> 40#include <linux/mtd/partitions.h>
41#include <linux/mtd/physmap.h> 41#include <linux/mtd/physmap.h>
42#include <linux/mtd/sh_flctl.h>
42#include <linux/pm_clock.h> 43#include <linux/pm_clock.h>
43#include <linux/smsc911x.h> 44#include <linux/smsc911x.h>
44#include <linux/sh_intc.h> 45#include <linux/sh_intc.h>
@@ -54,6 +55,7 @@
54#include <sound/sh_fsi.h> 55#include <sound/sh_fsi.h>
55 56
56#include <mach/common.h> 57#include <mach/common.h>
58#include <mach/irqs.h>
57#include <mach/sh7372.h> 59#include <mach/sh7372.h>
58 60
59#include <asm/mach/arch.h> 61#include <asm/mach/arch.h>
@@ -955,6 +957,50 @@ static struct platform_device fsi_ak4643_device = {
955 }, 957 },
956}; 958};
957 959
960/* FLCTL */
961static struct mtd_partition nand_partition_info[] = {
962 {
963 .name = "system",
964 .offset = 0,
965 .size = 128 * 1024 * 1024,
966 },
967 {
968 .name = "userdata",
969 .offset = MTDPART_OFS_APPEND,
970 .size = 256 * 1024 * 1024,
971 },
972 {
973 .name = "cache",
974 .offset = MTDPART_OFS_APPEND,
975 .size = 128 * 1024 * 1024,
976 },
977};
978
979static struct resource nand_flash_resources[] = {
980 [0] = {
981 .start = 0xe6a30000,
982 .end = 0xe6a3009b,
983 .flags = IORESOURCE_MEM,
984 }
985};
986
987static struct sh_flctl_platform_data nand_flash_data = {
988 .parts = nand_partition_info,
989 .nr_parts = ARRAY_SIZE(nand_partition_info),
990 .flcmncr_val = CLK_16B_12L_4H | TYPESEL_SET
991 | SHBUSSEL | SEL_16BIT | SNAND_E,
992 .use_holden = 1,
993};
994
995static struct platform_device nand_flash_device = {
996 .name = "sh_flctl",
997 .resource = nand_flash_resources,
998 .num_resources = ARRAY_SIZE(nand_flash_resources),
999 .dev = {
1000 .platform_data = &nand_flash_data,
1001 },
1002};
1003
958/* 1004/*
959 * The card detect pin of the top SD/MMC slot (CN7) is active low and is 1005 * The card detect pin of the top SD/MMC slot (CN7) is active low and is
960 * connected to GPIO A22 of SH7372 (GPIO_PORT41). 1006 * connected to GPIO A22 of SH7372 (GPIO_PORT41).
@@ -1258,6 +1304,7 @@ static struct platform_device *mackerel_devices[] __initdata = {
1258 &fsi_device, 1304 &fsi_device,
1259 &fsi_ak4643_device, 1305 &fsi_ak4643_device,
1260 &fsi_hdmi_device, 1306 &fsi_hdmi_device,
1307 &nand_flash_device,
1261 &sdhi0_device, 1308 &sdhi0_device,
1262#if !defined(CONFIG_MMC_SH_MMCIF) && !defined(CONFIG_MMC_SH_MMCIF_MODULE) 1309#if !defined(CONFIG_MMC_SH_MMCIF) && !defined(CONFIG_MMC_SH_MMCIF_MODULE)
1263 &sdhi1_device, 1310 &sdhi1_device,
@@ -1487,6 +1534,30 @@ static void __init mackerel_init(void)
1487 gpio_request(GPIO_FN_MMCCMD0, NULL); 1534 gpio_request(GPIO_FN_MMCCMD0, NULL);
1488 gpio_request(GPIO_FN_MMCCLK0, NULL); 1535 gpio_request(GPIO_FN_MMCCLK0, NULL);
1489 1536
1537 /* FLCTL */
1538 gpio_request(GPIO_FN_D0_NAF0, NULL);
1539 gpio_request(GPIO_FN_D1_NAF1, NULL);
1540 gpio_request(GPIO_FN_D2_NAF2, NULL);
1541 gpio_request(GPIO_FN_D3_NAF3, NULL);
1542 gpio_request(GPIO_FN_D4_NAF4, NULL);
1543 gpio_request(GPIO_FN_D5_NAF5, NULL);
1544 gpio_request(GPIO_FN_D6_NAF6, NULL);
1545 gpio_request(GPIO_FN_D7_NAF7, NULL);
1546 gpio_request(GPIO_FN_D8_NAF8, NULL);
1547 gpio_request(GPIO_FN_D9_NAF9, NULL);
1548 gpio_request(GPIO_FN_D10_NAF10, NULL);
1549 gpio_request(GPIO_FN_D11_NAF11, NULL);
1550 gpio_request(GPIO_FN_D12_NAF12, NULL);
1551 gpio_request(GPIO_FN_D13_NAF13, NULL);
1552 gpio_request(GPIO_FN_D14_NAF14, NULL);
1553 gpio_request(GPIO_FN_D15_NAF15, NULL);
1554 gpio_request(GPIO_FN_FCE0, NULL);
1555 gpio_request(GPIO_FN_WE0_FWE, NULL);
1556 gpio_request(GPIO_FN_FRB, NULL);
1557 gpio_request(GPIO_FN_A4_FOE, NULL);
1558 gpio_request(GPIO_FN_A5_FCDE, NULL);
1559 gpio_request(GPIO_FN_RD_FSC, NULL);
1560
1490 /* enable GPS module (GT-720F) */ 1561 /* enable GPS module (GT-720F) */
1491 gpio_request(GPIO_FN_SCIFA2_TXD1, NULL); 1562 gpio_request(GPIO_FN_SCIFA2_TXD1, NULL);
1492 gpio_request(GPIO_FN_SCIFA2_RXD1, NULL); 1563 gpio_request(GPIO_FN_SCIFA2_RXD1, NULL);
@@ -1531,6 +1602,7 @@ static void __init mackerel_init(void)
1531 sh7372_add_device_to_domain(&sh7372_a4mp, &fsi_device); 1602 sh7372_add_device_to_domain(&sh7372_a4mp, &fsi_device);
1532 sh7372_add_device_to_domain(&sh7372_a3sp, &usbhs0_device); 1603 sh7372_add_device_to_domain(&sh7372_a3sp, &usbhs0_device);
1533 sh7372_add_device_to_domain(&sh7372_a3sp, &usbhs1_device); 1604 sh7372_add_device_to_domain(&sh7372_a3sp, &usbhs1_device);
1605 sh7372_add_device_to_domain(&sh7372_a3sp, &nand_flash_device);
1534 sh7372_add_device_to_domain(&sh7372_a3sp, &sh_mmcif_device); 1606 sh7372_add_device_to_domain(&sh7372_a3sp, &sh_mmcif_device);
1535 sh7372_add_device_to_domain(&sh7372_a3sp, &sdhi0_device); 1607 sh7372_add_device_to_domain(&sh7372_a3sp, &sdhi0_device);
1536#if !defined(CONFIG_MMC_SH_MMCIF) && !defined(CONFIG_MMC_SH_MMCIF_MODULE) 1608#if !defined(CONFIG_MMC_SH_MMCIF) && !defined(CONFIG_MMC_SH_MMCIF_MODULE)
diff --git a/arch/arm/mach-shmobile/board-marzen.c b/arch/arm/mach-shmobile/board-marzen.c
index cbd5e4cd06d2..ef0e13bf0b3a 100644
--- a/arch/arm/mach-shmobile/board-marzen.c
+++ b/arch/arm/mach-shmobile/board-marzen.c
@@ -31,6 +31,7 @@
31#include <mach/hardware.h> 31#include <mach/hardware.h>
32#include <mach/r8a7779.h> 32#include <mach/r8a7779.h>
33#include <mach/common.h> 33#include <mach/common.h>
34#include <mach/irqs.h>
34#include <asm/mach-types.h> 35#include <asm/mach-types.h>
35#include <asm/mach/arch.h> 36#include <asm/mach/arch.h>
36#include <asm/hardware/gic.h> 37#include <asm/hardware/gic.h>
diff --git a/arch/arm/mach-shmobile/clock-sh7372.c b/arch/arm/mach-shmobile/clock-sh7372.c
index de243e3c8392..94d1f88246d3 100644
--- a/arch/arm/mach-shmobile/clock-sh7372.c
+++ b/arch/arm/mach-shmobile/clock-sh7372.c
@@ -511,7 +511,7 @@ enum { MSTP001, MSTP000,
511 MSTP223, 511 MSTP223,
512 MSTP218, MSTP217, MSTP216, MSTP214, MSTP208, MSTP207, 512 MSTP218, MSTP217, MSTP216, MSTP214, MSTP208, MSTP207,
513 MSTP206, MSTP205, MSTP204, MSTP203, MSTP202, MSTP201, MSTP200, 513 MSTP206, MSTP205, MSTP204, MSTP203, MSTP202, MSTP201, MSTP200,
514 MSTP328, MSTP323, MSTP322, MSTP314, MSTP313, MSTP312, 514 MSTP328, MSTP323, MSTP322, MSTP315, MSTP314, MSTP313, MSTP312,
515 MSTP423, MSTP415, MSTP413, MSTP411, MSTP410, MSTP407, MSTP406, 515 MSTP423, MSTP415, MSTP413, MSTP411, MSTP410, MSTP407, MSTP406,
516 MSTP405, MSTP404, MSTP403, MSTP400, 516 MSTP405, MSTP404, MSTP403, MSTP400,
517 MSTP_NR }; 517 MSTP_NR };
@@ -553,6 +553,7 @@ static struct clk mstp_clks[MSTP_NR] = {
553 [MSTP328] = MSTP(&div6_clks[DIV6_SPU], SMSTPCR3, 28, 0), /* FSI2 */ 553 [MSTP328] = MSTP(&div6_clks[DIV6_SPU], SMSTPCR3, 28, 0), /* FSI2 */
554 [MSTP323] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR3, 23, 0), /* IIC1 */ 554 [MSTP323] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR3, 23, 0), /* IIC1 */
555 [MSTP322] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR3, 22, 0), /* USB0 */ 555 [MSTP322] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR3, 22, 0), /* USB0 */
556 [MSTP315] = MSTP(&div4_clks[DIV4_HP], SMSTPCR3, 15, 0), /* FLCTL*/
556 [MSTP314] = MSTP(&div4_clks[DIV4_HP], SMSTPCR3, 14, 0), /* SDHI0 */ 557 [MSTP314] = MSTP(&div4_clks[DIV4_HP], SMSTPCR3, 14, 0), /* SDHI0 */
557 [MSTP313] = MSTP(&div4_clks[DIV4_HP], SMSTPCR3, 13, 0), /* SDHI1 */ 558 [MSTP313] = MSTP(&div4_clks[DIV4_HP], SMSTPCR3, 13, 0), /* SDHI1 */
558 [MSTP312] = MSTP(&div4_clks[DIV4_HP], SMSTPCR3, 12, 0), /* MMC */ 559 [MSTP312] = MSTP(&div4_clks[DIV4_HP], SMSTPCR3, 12, 0), /* MMC */
@@ -653,6 +654,7 @@ static struct clk_lookup lookups[] = {
653 CLKDEV_DEV_ID("r8a66597_hcd.0", &mstp_clks[MSTP322]), /* USB0 */ 654 CLKDEV_DEV_ID("r8a66597_hcd.0", &mstp_clks[MSTP322]), /* USB0 */
654 CLKDEV_DEV_ID("r8a66597_udc.0", &mstp_clks[MSTP322]), /* USB0 */ 655 CLKDEV_DEV_ID("r8a66597_udc.0", &mstp_clks[MSTP322]), /* USB0 */
655 CLKDEV_DEV_ID("renesas_usbhs.0", &mstp_clks[MSTP322]), /* USB0 */ 656 CLKDEV_DEV_ID("renesas_usbhs.0", &mstp_clks[MSTP322]), /* USB0 */
657 CLKDEV_DEV_ID("sh_flctl.0", &mstp_clks[MSTP315]), /* FLCTL */
656 CLKDEV_DEV_ID("sh_mobile_sdhi.0", &mstp_clks[MSTP314]), /* SDHI0 */ 658 CLKDEV_DEV_ID("sh_mobile_sdhi.0", &mstp_clks[MSTP314]), /* SDHI0 */
657 CLKDEV_DEV_ID("sh_mobile_sdhi.1", &mstp_clks[MSTP313]), /* SDHI1 */ 659 CLKDEV_DEV_ID("sh_mobile_sdhi.1", &mstp_clks[MSTP313]), /* SDHI1 */
658 CLKDEV_DEV_ID("sh_mmcif.0", &mstp_clks[MSTP312]), /* MMC */ 660 CLKDEV_DEV_ID("sh_mmcif.0", &mstp_clks[MSTP312]), /* MMC */
diff --git a/arch/arm/mach-shmobile/cpuidle.c b/arch/arm/mach-shmobile/cpuidle.c
index 21b09b6455e4..7e6559105d40 100644
--- a/arch/arm/mach-shmobile/cpuidle.c
+++ b/arch/arm/mach-shmobile/cpuidle.c
@@ -13,6 +13,7 @@
13#include <linux/suspend.h> 13#include <linux/suspend.h>
14#include <linux/module.h> 14#include <linux/module.h>
15#include <linux/err.h> 15#include <linux/err.h>
16#include <asm/cpuidle.h>
16#include <asm/io.h> 17#include <asm/io.h>
17 18
18static void shmobile_enter_wfi(void) 19static void shmobile_enter_wfi(void)
@@ -28,37 +29,19 @@ static int shmobile_cpuidle_enter(struct cpuidle_device *dev,
28 struct cpuidle_driver *drv, 29 struct cpuidle_driver *drv,
29 int index) 30 int index)
30{ 31{
31 ktime_t before, after;
32
33 before = ktime_get();
34
35 local_irq_disable();
36 local_fiq_disable();
37
38 shmobile_cpuidle_modes[index](); 32 shmobile_cpuidle_modes[index]();
39 33
40 local_irq_enable();
41 local_fiq_enable();
42
43 after = ktime_get();
44 dev->last_residency = ktime_to_ns(ktime_sub(after, before)) >> 10;
45
46 return index; 34 return index;
47} 35}
48 36
49static struct cpuidle_device shmobile_cpuidle_dev; 37static struct cpuidle_device shmobile_cpuidle_dev;
50static struct cpuidle_driver shmobile_cpuidle_driver = { 38static struct cpuidle_driver shmobile_cpuidle_driver = {
51 .name = "shmobile_cpuidle", 39 .name = "shmobile_cpuidle",
52 .owner = THIS_MODULE, 40 .owner = THIS_MODULE,
53 .states[0] = { 41 .en_core_tk_irqen = 1,
54 .name = "C1", 42 .states[0] = ARM_CPUIDLE_WFI_STATE,
55 .desc = "WFI", 43 .safe_state_index = 0, /* C1 */
56 .exit_latency = 1, 44 .state_count = 1,
57 .target_residency = 1 * 2,
58 .flags = CPUIDLE_FLAG_TIME_VALID,
59 },
60 .safe_state_index = 0, /* C1 */
61 .state_count = 1,
62}; 45};
63 46
64void (*shmobile_cpuidle_setup)(struct cpuidle_driver *drv); 47void (*shmobile_cpuidle_setup)(struct cpuidle_driver *drv);
diff --git a/arch/arm/mach-shmobile/include/mach/io.h b/arch/arm/mach-shmobile/include/mach/io.h
deleted file mode 100644
index 7339fe46cb7c..000000000000
--- a/arch/arm/mach-shmobile/include/mach/io.h
+++ /dev/null
@@ -1,9 +0,0 @@
1#ifndef __ASM_MACH_IO_H
2#define __ASM_MACH_IO_H
3
4#define IO_SPACE_LIMIT 0xffffffff
5
6#define __io(a) ((void __iomem *)(a))
7#define __mem_pci(a) (a)
8
9#endif /* __ASM_MACH_IO_H */
diff --git a/arch/arm/mach-shmobile/include/mach/irqs.h b/arch/arm/mach-shmobile/include/mach/irqs.h
index dcb714f4d75a..4e686cc201fc 100644
--- a/arch/arm/mach-shmobile/include/mach/irqs.h
+++ b/arch/arm/mach-shmobile/include/mach/irqs.h
@@ -1,15 +1,11 @@
1#ifndef __ASM_MACH_IRQS_H 1#ifndef __ASM_MACH_IRQS_H
2#define __ASM_MACH_IRQS_H 2#define __ASM_MACH_IRQS_H
3 3
4#define NR_IRQS 1024 4#include <linux/sh_intc.h>
5 5
6/* GIC */ 6/* GIC */
7#define gic_spi(nr) ((nr) + 32) 7#define gic_spi(nr) ((nr) + 32)
8 8
9/* INTCA */
10#define evt2irq(evt) (((evt) >> 5) - 16)
11#define irq2evt(irq) (((irq) + 16) << 5)
12
13/* INTCS */ 9/* INTCS */
14#define INTCS_VECT_BASE 0x2200 10#define INTCS_VECT_BASE 0x2200
15#define INTCS_VECT(n, vect) INTC_VECT((n), INTCS_VECT_BASE + (vect)) 11#define INTCS_VECT(n, vect) INTC_VECT((n), INTCS_VECT_BASE + (vect))
diff --git a/arch/arm/mach-shmobile/intc-r8a7740.c b/arch/arm/mach-shmobile/intc-r8a7740.c
index 272c84c20c83..09c42afcb22d 100644
--- a/arch/arm/mach-shmobile/intc-r8a7740.c
+++ b/arch/arm/mach-shmobile/intc-r8a7740.c
@@ -25,6 +25,7 @@
25#include <linux/io.h> 25#include <linux/io.h>
26#include <linux/sh_intc.h> 26#include <linux/sh_intc.h>
27#include <mach/intc.h> 27#include <mach/intc.h>
28#include <mach/irqs.h>
28#include <asm/mach-types.h> 29#include <asm/mach-types.h>
29#include <asm/mach/arch.h> 30#include <asm/mach/arch.h>
30 31
diff --git a/arch/arm/mach-shmobile/intc-r8a7779.c b/arch/arm/mach-shmobile/intc-r8a7779.c
index 5d92fcde2bc3..550b23df4fd4 100644
--- a/arch/arm/mach-shmobile/intc-r8a7779.c
+++ b/arch/arm/mach-shmobile/intc-r8a7779.c
@@ -42,8 +42,8 @@ static int r8a7779_set_wake(struct irq_data *data, unsigned int on)
42 42
43void __init r8a7779_init_irq(void) 43void __init r8a7779_init_irq(void)
44{ 44{
45 void __iomem *gic_dist_base = __io(0xf0001000); 45 void __iomem *gic_dist_base = IOMEM(0xf0001000);
46 void __iomem *gic_cpu_base = __io(0xf0000100); 46 void __iomem *gic_cpu_base = IOMEM(0xf0000100);
47 47
48 /* use GIC to handle interrupts */ 48 /* use GIC to handle interrupts */
49 gic_init(0, 29, gic_dist_base, gic_cpu_base); 49 gic_init(0, 29, gic_dist_base, gic_cpu_base);
diff --git a/arch/arm/mach-shmobile/intc-sh7367.c b/arch/arm/mach-shmobile/intc-sh7367.c
index cfde9bfc3669..5bf776495b75 100644
--- a/arch/arm/mach-shmobile/intc-sh7367.c
+++ b/arch/arm/mach-shmobile/intc-sh7367.c
@@ -23,6 +23,7 @@
23#include <linux/io.h> 23#include <linux/io.h>
24#include <linux/sh_intc.h> 24#include <linux/sh_intc.h>
25#include <mach/intc.h> 25#include <mach/intc.h>
26#include <mach/irqs.h>
26#include <asm/mach-types.h> 27#include <asm/mach-types.h>
27#include <asm/mach/arch.h> 28#include <asm/mach/arch.h>
28 29
diff --git a/arch/arm/mach-shmobile/intc-sh7372.c b/arch/arm/mach-shmobile/intc-sh7372.c
index 89afcaba99a1..6447e0af52d4 100644
--- a/arch/arm/mach-shmobile/intc-sh7372.c
+++ b/arch/arm/mach-shmobile/intc-sh7372.c
@@ -23,6 +23,7 @@
23#include <linux/io.h> 23#include <linux/io.h>
24#include <linux/sh_intc.h> 24#include <linux/sh_intc.h>
25#include <mach/intc.h> 25#include <mach/intc.h>
26#include <mach/irqs.h>
26#include <asm/mach-types.h> 27#include <asm/mach-types.h>
27#include <asm/mach/arch.h> 28#include <asm/mach/arch.h>
28 29
diff --git a/arch/arm/mach-shmobile/intc-sh7377.c b/arch/arm/mach-shmobile/intc-sh7377.c
index 2af4e6e9bc5b..b84a460a3405 100644
--- a/arch/arm/mach-shmobile/intc-sh7377.c
+++ b/arch/arm/mach-shmobile/intc-sh7377.c
@@ -23,6 +23,7 @@
23#include <linux/io.h> 23#include <linux/io.h>
24#include <linux/sh_intc.h> 24#include <linux/sh_intc.h>
25#include <mach/intc.h> 25#include <mach/intc.h>
26#include <mach/irqs.h>
26#include <asm/mach-types.h> 27#include <asm/mach-types.h>
27#include <asm/mach/arch.h> 28#include <asm/mach/arch.h>
28 29
diff --git a/arch/arm/mach-shmobile/intc-sh73a0.c b/arch/arm/mach-shmobile/intc-sh73a0.c
index 9857595eaa79..ee447404c857 100644
--- a/arch/arm/mach-shmobile/intc-sh73a0.c
+++ b/arch/arm/mach-shmobile/intc-sh73a0.c
@@ -24,6 +24,7 @@
24#include <linux/io.h> 24#include <linux/io.h>
25#include <linux/sh_intc.h> 25#include <linux/sh_intc.h>
26#include <mach/intc.h> 26#include <mach/intc.h>
27#include <mach/irqs.h>
27#include <mach/sh73a0.h> 28#include <mach/sh73a0.h>
28#include <asm/hardware/gic.h> 29#include <asm/hardware/gic.h>
29#include <asm/mach-types.h> 30#include <asm/mach-types.h>
@@ -420,8 +421,8 @@ static irqreturn_t sh73a0_pint1_demux(int irq, void *dev_id)
420 421
421void __init sh73a0_init_irq(void) 422void __init sh73a0_init_irq(void)
422{ 423{
423 void __iomem *gic_dist_base = __io(0xf0001000); 424 void __iomem *gic_dist_base = IOMEM(0xf0001000);
424 void __iomem *gic_cpu_base = __io(0xf0000100); 425 void __iomem *gic_cpu_base = IOMEM(0xf0000100);
425 void __iomem *intevtsa = ioremap_nocache(0xffd20100, PAGE_SIZE); 426 void __iomem *intevtsa = ioremap_nocache(0xffd20100, PAGE_SIZE);
426 int k, n; 427 int k, n;
427 428
diff --git a/arch/arm/mach-shmobile/setup-r8a7740.c b/arch/arm/mach-shmobile/setup-r8a7740.c
index 74e52341dd1b..14edb5cffa7f 100644
--- a/arch/arm/mach-shmobile/setup-r8a7740.c
+++ b/arch/arm/mach-shmobile/setup-r8a7740.c
@@ -26,6 +26,7 @@
26#include <linux/sh_timer.h> 26#include <linux/sh_timer.h>
27#include <mach/r8a7740.h> 27#include <mach/r8a7740.h>
28#include <mach/common.h> 28#include <mach/common.h>
29#include <mach/irqs.h>
29#include <asm/mach-types.h> 30#include <asm/mach-types.h>
30#include <asm/mach/map.h> 31#include <asm/mach/map.h>
31#include <asm/mach/arch.h> 32#include <asm/mach/arch.h>
diff --git a/arch/arm/mach-shmobile/setup-r8a7779.c b/arch/arm/mach-shmobile/setup-r8a7779.c
index 6820d785493d..12c6f529ab89 100644
--- a/arch/arm/mach-shmobile/setup-r8a7779.c
+++ b/arch/arm/mach-shmobile/setup-r8a7779.c
@@ -29,6 +29,7 @@
29#include <linux/sh_intc.h> 29#include <linux/sh_intc.h>
30#include <linux/sh_timer.h> 30#include <linux/sh_timer.h>
31#include <mach/hardware.h> 31#include <mach/hardware.h>
32#include <mach/irqs.h>
32#include <mach/r8a7779.h> 33#include <mach/r8a7779.h>
33#include <mach/common.h> 34#include <mach/common.h>
34#include <asm/mach-types.h> 35#include <asm/mach-types.h>
diff --git a/arch/arm/mach-shmobile/setup-sh7367.c b/arch/arm/mach-shmobile/setup-sh7367.c
index a51e1a1e6996..2e3074ab75b3 100644
--- a/arch/arm/mach-shmobile/setup-sh7367.c
+++ b/arch/arm/mach-shmobile/setup-sh7367.c
@@ -30,6 +30,7 @@
30#include <linux/sh_timer.h> 30#include <linux/sh_timer.h>
31#include <mach/hardware.h> 31#include <mach/hardware.h>
32#include <mach/common.h> 32#include <mach/common.h>
33#include <mach/irqs.h>
33#include <asm/mach-types.h> 34#include <asm/mach-types.h>
34#include <asm/mach/arch.h> 35#include <asm/mach/arch.h>
35#include <asm/mach/map.h> 36#include <asm/mach/map.h>
diff --git a/arch/arm/mach-shmobile/setup-sh7372.c b/arch/arm/mach-shmobile/setup-sh7372.c
index 4e818b7de781..2fe8f83ca124 100644
--- a/arch/arm/mach-shmobile/setup-sh7372.c
+++ b/arch/arm/mach-shmobile/setup-sh7372.c
@@ -33,6 +33,7 @@
33#include <linux/pm_domain.h> 33#include <linux/pm_domain.h>
34#include <linux/dma-mapping.h> 34#include <linux/dma-mapping.h>
35#include <mach/hardware.h> 35#include <mach/hardware.h>
36#include <mach/irqs.h>
36#include <mach/sh7372.h> 37#include <mach/sh7372.h>
37#include <mach/common.h> 38#include <mach/common.h>
38#include <asm/mach/map.h> 39#include <asm/mach/map.h>
diff --git a/arch/arm/mach-shmobile/setup-sh7377.c b/arch/arm/mach-shmobile/setup-sh7377.c
index 9f146095098b..d576a6abbade 100644
--- a/arch/arm/mach-shmobile/setup-sh7377.c
+++ b/arch/arm/mach-shmobile/setup-sh7377.c
@@ -32,6 +32,7 @@
32#include <mach/hardware.h> 32#include <mach/hardware.h>
33#include <mach/common.h> 33#include <mach/common.h>
34#include <asm/mach/map.h> 34#include <asm/mach/map.h>
35#include <mach/irqs.h>
35#include <asm/mach-types.h> 36#include <asm/mach-types.h>
36#include <asm/mach/arch.h> 37#include <asm/mach/arch.h>
37#include <asm/mach/time.h> 38#include <asm/mach/time.h>
diff --git a/arch/arm/mach-shmobile/setup-sh73a0.c b/arch/arm/mach-shmobile/setup-sh73a0.c
index b6a0734a738e..5bebffc10455 100644
--- a/arch/arm/mach-shmobile/setup-sh73a0.c
+++ b/arch/arm/mach-shmobile/setup-sh73a0.c
@@ -31,6 +31,7 @@
31#include <linux/sh_intc.h> 31#include <linux/sh_intc.h>
32#include <linux/sh_timer.h> 32#include <linux/sh_timer.h>
33#include <mach/hardware.h> 33#include <mach/hardware.h>
34#include <mach/irqs.h>
34#include <mach/sh73a0.h> 35#include <mach/sh73a0.h>
35#include <mach/common.h> 36#include <mach/common.h>
36#include <asm/mach-types.h> 37#include <asm/mach-types.h>
diff --git a/arch/arm/mach-shmobile/smp-r8a7779.c b/arch/arm/mach-shmobile/smp-r8a7779.c
index 9bb7b8575a1f..b62e19d4c9af 100644
--- a/arch/arm/mach-shmobile/smp-r8a7779.c
+++ b/arch/arm/mach-shmobile/smp-r8a7779.c
@@ -30,7 +30,7 @@
30#include <asm/smp_twd.h> 30#include <asm/smp_twd.h>
31#include <asm/hardware/gic.h> 31#include <asm/hardware/gic.h>
32 32
33#define AVECR 0xfe700040 33#define AVECR IOMEM(0xfe700040)
34 34
35static struct r8a7779_pm_ch r8a7779_ch_cpu1 = { 35static struct r8a7779_pm_ch r8a7779_ch_cpu1 = {
36 .chan_offs = 0x40, /* PWRSR0 .. PWRER0 */ 36 .chan_offs = 0x40, /* PWRSR0 .. PWRER0 */
@@ -138,7 +138,7 @@ void __init r8a7779_smp_prepare_cpus(void)
138 scu_enable(scu_base_addr()); 138 scu_enable(scu_base_addr());
139 139
140 /* Map the reset vector (in headsmp.S) */ 140 /* Map the reset vector (in headsmp.S) */
141 __raw_writel(__pa(shmobile_secondary_vector), __io(AVECR)); 141 __raw_writel(__pa(shmobile_secondary_vector), AVECR);
142 142
143 /* enable cache coherency on CPU0 */ 143 /* enable cache coherency on CPU0 */
144 modify_scu_cpu_psr(0, 3 << (cpu * 8)); 144 modify_scu_cpu_psr(0, 3 << (cpu * 8));
diff --git a/arch/arm/mach-shmobile/smp-sh73a0.c b/arch/arm/mach-shmobile/smp-sh73a0.c
index c0a9093ba3a8..14ad8b052f1a 100644
--- a/arch/arm/mach-shmobile/smp-sh73a0.c
+++ b/arch/arm/mach-shmobile/smp-sh73a0.c
@@ -28,11 +28,11 @@
28#include <asm/smp_twd.h> 28#include <asm/smp_twd.h>
29#include <asm/hardware/gic.h> 29#include <asm/hardware/gic.h>
30 30
31#define WUPCR 0xe6151010 31#define WUPCR IOMEM(0xe6151010)
32#define SRESCR 0xe6151018 32#define SRESCR IOMEM(0xe6151018)
33#define PSTR 0xe6151040 33#define PSTR IOMEM(0xe6151040)
34#define SBAR 0xe6180020 34#define SBAR IOMEM(0xe6180020)
35#define APARMBAREA 0xe6f10020 35#define APARMBAREA IOMEM(0xe6f10020)
36 36
37static void __iomem *scu_base_addr(void) 37static void __iomem *scu_base_addr(void)
38{ 38{
@@ -78,10 +78,10 @@ int __cpuinit sh73a0_boot_secondary(unsigned int cpu)
78 /* enable cache coherency */ 78 /* enable cache coherency */
79 modify_scu_cpu_psr(0, 3 << (cpu * 8)); 79 modify_scu_cpu_psr(0, 3 << (cpu * 8));
80 80
81 if (((__raw_readl(__io(PSTR)) >> (4 * cpu)) & 3) == 3) 81 if (((__raw_readl(PSTR) >> (4 * cpu)) & 3) == 3)
82 __raw_writel(1 << cpu, __io(WUPCR)); /* wake up */ 82 __raw_writel(1 << cpu, WUPCR); /* wake up */
83 else 83 else
84 __raw_writel(1 << cpu, __io(SRESCR)); /* reset */ 84 __raw_writel(1 << cpu, SRESCR); /* reset */
85 85
86 return 0; 86 return 0;
87} 87}
@@ -93,8 +93,8 @@ void __init sh73a0_smp_prepare_cpus(void)
93 scu_enable(scu_base_addr()); 93 scu_enable(scu_base_addr());
94 94
95 /* Map the reset vector (in headsmp.S) */ 95 /* Map the reset vector (in headsmp.S) */
96 __raw_writel(0, __io(APARMBAREA)); /* 4k */ 96 __raw_writel(0, APARMBAREA); /* 4k */
97 __raw_writel(__pa(shmobile_secondary_vector), __io(SBAR)); 97 __raw_writel(__pa(shmobile_secondary_vector), SBAR);
98 98
99 /* enable cache coherency on CPU0 */ 99 /* enable cache coherency on CPU0 */
100 modify_scu_cpu_psr(0, 3 << (cpu * 8)); 100 modify_scu_cpu_psr(0, 3 << (cpu * 8));
diff --git a/arch/arm/mach-spear3xx/clock.c b/arch/arm/mach-spear3xx/clock.c
index f67860cd649f..6c4841f55223 100644
--- a/arch/arm/mach-spear3xx/clock.c
+++ b/arch/arm/mach-spear3xx/clock.c
@@ -12,6 +12,7 @@
12 */ 12 */
13 13
14#include <linux/init.h> 14#include <linux/init.h>
15#include <linux/io.h>
15#include <linux/kernel.h> 16#include <linux/kernel.h>
16#include <asm/mach-types.h> 17#include <asm/mach-types.h>
17#include <plat/clock.h> 18#include <plat/clock.h>
diff --git a/arch/arm/mach-spear3xx/include/mach/io.h b/arch/arm/mach-spear3xx/include/mach/io.h
deleted file mode 100644
index 30cff8a1f6b5..000000000000
--- a/arch/arm/mach-spear3xx/include/mach/io.h
+++ /dev/null
@@ -1,19 +0,0 @@
1/*
2 * arch/arm/mach-spear3xx/include/mach/io.h
3 *
4 * IO definitions for SPEAr3xx machine family
5 *
6 * Copyright (C) 2009 ST Microelectronics
7 * Viresh Kumar<viresh.kumar@st.com>
8 *
9 * This file is licensed under the terms of the GNU General Public
10 * License version 2. This program is licensed "as is" without any
11 * warranty of any kind, whether express or implied.
12 */
13
14#ifndef __MACH_IO_H
15#define __MACH_IO_H
16
17#include <plat/io.h>
18
19#endif /* __MACH_IO_H */
diff --git a/arch/arm/mach-spear6xx/clock.c b/arch/arm/mach-spear6xx/clock.c
index 358f2800f17b..a86499a8a15f 100644
--- a/arch/arm/mach-spear6xx/clock.c
+++ b/arch/arm/mach-spear6xx/clock.c
@@ -12,6 +12,7 @@
12 */ 12 */
13 13
14#include <linux/init.h> 14#include <linux/init.h>
15#include <linux/io.h>
15#include <linux/kernel.h> 16#include <linux/kernel.h>
16#include <plat/clock.h> 17#include <plat/clock.h>
17#include <mach/misc_regs.h> 18#include <mach/misc_regs.h>
diff --git a/arch/arm/mach-spear6xx/include/mach/io.h b/arch/arm/mach-spear6xx/include/mach/io.h
deleted file mode 100644
index fb7c106cea94..000000000000
--- a/arch/arm/mach-spear6xx/include/mach/io.h
+++ /dev/null
@@ -1,20 +0,0 @@
1/*
2 * arch/arm/mach-spear6xx/include/mach/io.h
3 *
4 * IO definitions for SPEAr6xx machine family
5 *
6 * Copyright (C) 2009 ST Microelectronics
7 * Rajeev Kumar Kumar<rajeev-dlh.kumar@st.com>
8 *
9 * This file is licensed under the terms of the GNU General Public
10 * License version 2. This program is licensed "as is" without any
11 * warranty of any kind, whether express or implied.
12 */
13
14#ifndef __MACH_IO_H
15#define __MACH_IO_H
16
17#include <plat/io.h>
18
19#endif /* __MACH_IO_H */
20
diff --git a/arch/arm/mach-tegra/board-dt-tegra20.c b/arch/arm/mach-tegra/board-dt-tegra20.c
index e20b419d5983..0952494f481a 100644
--- a/arch/arm/mach-tegra/board-dt-tegra20.c
+++ b/arch/arm/mach-tegra/board-dt-tegra20.c
@@ -68,11 +68,11 @@ struct of_dev_auxdata tegra20_auxdata_lookup[] __initdata = {
68 OF_DEV_AUXDATA("nvidia,tegra20-i2s", TEGRA_I2S2_BASE, "tegra-i2s.1", NULL), 68 OF_DEV_AUXDATA("nvidia,tegra20-i2s", TEGRA_I2S2_BASE, "tegra-i2s.1", NULL),
69 OF_DEV_AUXDATA("nvidia,tegra20-das", TEGRA_APB_MISC_DAS_BASE, "tegra-das", NULL), 69 OF_DEV_AUXDATA("nvidia,tegra20-das", TEGRA_APB_MISC_DAS_BASE, "tegra-das", NULL),
70 OF_DEV_AUXDATA("nvidia,tegra20-ehci", TEGRA_USB_BASE, "tegra-ehci.0", 70 OF_DEV_AUXDATA("nvidia,tegra20-ehci", TEGRA_USB_BASE, "tegra-ehci.0",
71 &tegra_ehci1_device.dev.platform_data), 71 &tegra_ehci1_pdata),
72 OF_DEV_AUXDATA("nvidia,tegra20-ehci", TEGRA_USB2_BASE, "tegra-ehci.1", 72 OF_DEV_AUXDATA("nvidia,tegra20-ehci", TEGRA_USB2_BASE, "tegra-ehci.1",
73 &tegra_ehci2_device.dev.platform_data), 73 &tegra_ehci2_pdata),
74 OF_DEV_AUXDATA("nvidia,tegra20-ehci", TEGRA_USB3_BASE, "tegra-ehci.2", 74 OF_DEV_AUXDATA("nvidia,tegra20-ehci", TEGRA_USB3_BASE, "tegra-ehci.2",
75 &tegra_ehci3_device.dev.platform_data), 75 &tegra_ehci3_pdata),
76 {} 76 {}
77}; 77};
78 78
diff --git a/arch/arm/mach-tegra/devices.c b/arch/arm/mach-tegra/devices.c
index 7a2a02dbd632..5f6b867e20b4 100644
--- a/arch/arm/mach-tegra/devices.c
+++ b/arch/arm/mach-tegra/devices.c
@@ -23,7 +23,6 @@
23#include <linux/fsl_devices.h> 23#include <linux/fsl_devices.h>
24#include <linux/serial_8250.h> 24#include <linux/serial_8250.h>
25#include <linux/i2c-tegra.h> 25#include <linux/i2c-tegra.h>
26#include <linux/platform_data/tegra_usb.h>
27#include <asm/pmu.h> 26#include <asm/pmu.h>
28#include <mach/irqs.h> 27#include <mach/irqs.h>
29#include <mach/iomap.h> 28#include <mach/iomap.h>
@@ -446,18 +445,18 @@ static struct tegra_ulpi_config tegra_ehci2_ulpi_phy_config = {
446 .clk = "cdev2", 445 .clk = "cdev2",
447}; 446};
448 447
449static struct tegra_ehci_platform_data tegra_ehci1_pdata = { 448struct tegra_ehci_platform_data tegra_ehci1_pdata = {
450 .operating_mode = TEGRA_USB_OTG, 449 .operating_mode = TEGRA_USB_OTG,
451 .power_down_on_bus_suspend = 1, 450 .power_down_on_bus_suspend = 1,
452}; 451};
453 452
454static struct tegra_ehci_platform_data tegra_ehci2_pdata = { 453struct tegra_ehci_platform_data tegra_ehci2_pdata = {
455 .phy_config = &tegra_ehci2_ulpi_phy_config, 454 .phy_config = &tegra_ehci2_ulpi_phy_config,
456 .operating_mode = TEGRA_USB_HOST, 455 .operating_mode = TEGRA_USB_HOST,
457 .power_down_on_bus_suspend = 1, 456 .power_down_on_bus_suspend = 1,
458}; 457};
459 458
460static struct tegra_ehci_platform_data tegra_ehci3_pdata = { 459struct tegra_ehci_platform_data tegra_ehci3_pdata = {
461 .operating_mode = TEGRA_USB_HOST, 460 .operating_mode = TEGRA_USB_HOST,
462 .power_down_on_bus_suspend = 1, 461 .power_down_on_bus_suspend = 1,
463}; 462};
diff --git a/arch/arm/mach-tegra/devices.h b/arch/arm/mach-tegra/devices.h
index 873ecb2f8ae6..ec455679b219 100644
--- a/arch/arm/mach-tegra/devices.h
+++ b/arch/arm/mach-tegra/devices.h
@@ -20,6 +20,11 @@
20#define __MACH_TEGRA_DEVICES_H 20#define __MACH_TEGRA_DEVICES_H
21 21
22#include <linux/platform_device.h> 22#include <linux/platform_device.h>
23#include <linux/platform_data/tegra_usb.h>
24
25extern struct tegra_ehci_platform_data tegra_ehci1_pdata;
26extern struct tegra_ehci_platform_data tegra_ehci2_pdata;
27extern struct tegra_ehci_platform_data tegra_ehci3_pdata;
23 28
24extern struct platform_device tegra_gpio_device; 29extern struct platform_device tegra_gpio_device;
25extern struct platform_device tegra_pinmux_device; 30extern struct platform_device tegra_pinmux_device;
diff --git a/arch/arm/mach-tegra/include/mach/debug-macro.S b/arch/arm/mach-tegra/include/mach/debug-macro.S
index 90069abd37bd..8ce0661b8a3d 100644
--- a/arch/arm/mach-tegra/include/mach/debug-macro.S
+++ b/arch/arm/mach-tegra/include/mach/debug-macro.S
@@ -26,7 +26,6 @@
26 26
27#include <linux/serial_reg.h> 27#include <linux/serial_reg.h>
28 28
29#include <mach/io.h>
30#include <mach/iomap.h> 29#include <mach/iomap.h>
31#include <mach/irammap.h> 30#include <mach/irammap.h>
32 31
diff --git a/arch/arm/mach-tegra/include/mach/io.h b/arch/arm/mach-tegra/include/mach/io.h
index f15defffb5d2..fe700f9ce7dc 100644
--- a/arch/arm/mach-tegra/include/mach/io.h
+++ b/arch/arm/mach-tegra/include/mach/io.h
@@ -23,56 +23,8 @@
23 23
24#define IO_SPACE_LIMIT 0xffff 24#define IO_SPACE_LIMIT 0xffff
25 25
26/* On TEGRA, many peripherals are very closely packed in
27 * two 256MB io windows (that actually only use about 64KB
28 * at the start of each).
29 *
30 * We will just map the first 1MB of each window (to minimize
31 * pt entries needed) and provide a macro to transform physical
32 * io addresses to an appropriate void __iomem *.
33 *
34 */
35
36#ifdef __ASSEMBLY__
37#define IOMEM(x) (x)
38#else
39#define IOMEM(x) ((void __force __iomem *)(x))
40#endif
41
42#define IO_IRAM_PHYS 0x40000000
43#define IO_IRAM_VIRT IOMEM(0xFE400000)
44#define IO_IRAM_SIZE SZ_256K
45
46#define IO_CPU_PHYS 0x50040000
47#define IO_CPU_VIRT IOMEM(0xFE000000)
48#define IO_CPU_SIZE SZ_16K
49
50#define IO_PPSB_PHYS 0x60000000
51#define IO_PPSB_VIRT IOMEM(0xFE200000)
52#define IO_PPSB_SIZE SZ_1M
53
54#define IO_APB_PHYS 0x70000000
55#define IO_APB_VIRT IOMEM(0xFE300000)
56#define IO_APB_SIZE SZ_1M
57
58#define IO_TO_VIRT_BETWEEN(p, st, sz) ((p) >= (st) && (p) < ((st) + (sz)))
59#define IO_TO_VIRT_XLATE(p, pst, vst) (((p) - (pst) + (vst)))
60
61#define IO_TO_VIRT(n) ( \
62 IO_TO_VIRT_BETWEEN((n), IO_PPSB_PHYS, IO_PPSB_SIZE) ? \
63 IO_TO_VIRT_XLATE((n), IO_PPSB_PHYS, IO_PPSB_VIRT) : \
64 IO_TO_VIRT_BETWEEN((n), IO_APB_PHYS, IO_APB_SIZE) ? \
65 IO_TO_VIRT_XLATE((n), IO_APB_PHYS, IO_APB_VIRT) : \
66 IO_TO_VIRT_BETWEEN((n), IO_CPU_PHYS, IO_CPU_SIZE) ? \
67 IO_TO_VIRT_XLATE((n), IO_CPU_PHYS, IO_CPU_VIRT) : \
68 IO_TO_VIRT_BETWEEN((n), IO_IRAM_PHYS, IO_IRAM_SIZE) ? \
69 IO_TO_VIRT_XLATE((n), IO_IRAM_PHYS, IO_IRAM_VIRT) : \
70 NULL)
71
72#ifndef __ASSEMBLER__ 26#ifndef __ASSEMBLER__
73 27
74#define IO_ADDRESS(n) (IO_TO_VIRT(n))
75
76#ifdef CONFIG_TEGRA_PCI 28#ifdef CONFIG_TEGRA_PCI
77extern void __iomem *tegra_pcie_io_base; 29extern void __iomem *tegra_pcie_io_base;
78 30
@@ -88,7 +40,6 @@ static inline void __iomem *__io(unsigned long addr)
88#endif 40#endif
89 41
90#define __io(a) __io(a) 42#define __io(a) __io(a)
91#define __mem_pci(a) (a)
92 43
93#endif 44#endif
94 45
diff --git a/arch/arm/mach-tegra/include/mach/iomap.h b/arch/arm/mach-tegra/include/mach/iomap.h
index cff672a344f4..7e76da73121c 100644
--- a/arch/arm/mach-tegra/include/mach/iomap.h
+++ b/arch/arm/mach-tegra/include/mach/iomap.h
@@ -277,4 +277,46 @@
277# define TEGRA_DEBUG_UART_BASE TEGRA_UARTE_BASE 277# define TEGRA_DEBUG_UART_BASE TEGRA_UARTE_BASE
278#endif 278#endif
279 279
280/* On TEGRA, many peripherals are very closely packed in
281 * two 256MB io windows (that actually only use about 64KB
282 * at the start of each).
283 *
284 * We will just map the first 1MB of each window (to minimize
285 * pt entries needed) and provide a macro to transform physical
286 * io addresses to an appropriate void __iomem *.
287 *
288 */
289
290#define IO_IRAM_PHYS 0x40000000
291#define IO_IRAM_VIRT IOMEM(0xFE400000)
292#define IO_IRAM_SIZE SZ_256K
293
294#define IO_CPU_PHYS 0x50040000
295#define IO_CPU_VIRT IOMEM(0xFE000000)
296#define IO_CPU_SIZE SZ_16K
297
298#define IO_PPSB_PHYS 0x60000000
299#define IO_PPSB_VIRT IOMEM(0xFE200000)
300#define IO_PPSB_SIZE SZ_1M
301
302#define IO_APB_PHYS 0x70000000
303#define IO_APB_VIRT IOMEM(0xFE300000)
304#define IO_APB_SIZE SZ_1M
305
306#define IO_TO_VIRT_BETWEEN(p, st, sz) ((p) >= (st) && (p) < ((st) + (sz)))
307#define IO_TO_VIRT_XLATE(p, pst, vst) (((p) - (pst) + (vst)))
308
309#define IO_TO_VIRT(n) ( \
310 IO_TO_VIRT_BETWEEN((n), IO_PPSB_PHYS, IO_PPSB_SIZE) ? \
311 IO_TO_VIRT_XLATE((n), IO_PPSB_PHYS, IO_PPSB_VIRT) : \
312 IO_TO_VIRT_BETWEEN((n), IO_APB_PHYS, IO_APB_SIZE) ? \
313 IO_TO_VIRT_XLATE((n), IO_APB_PHYS, IO_APB_VIRT) : \
314 IO_TO_VIRT_BETWEEN((n), IO_CPU_PHYS, IO_CPU_SIZE) ? \
315 IO_TO_VIRT_XLATE((n), IO_CPU_PHYS, IO_CPU_VIRT) : \
316 IO_TO_VIRT_BETWEEN((n), IO_IRAM_PHYS, IO_IRAM_SIZE) ? \
317 IO_TO_VIRT_XLATE((n), IO_IRAM_PHYS, IO_IRAM_VIRT) : \
318 NULL)
319
320#define IO_ADDRESS(n) (IO_TO_VIRT(n))
321
280#endif 322#endif
diff --git a/arch/arm/mach-tegra/io.c b/arch/arm/mach-tegra/io.c
index d23ee2db2827..58b4baf9c483 100644
--- a/arch/arm/mach-tegra/io.c
+++ b/arch/arm/mach-tegra/io.c
@@ -26,6 +26,7 @@
26 26
27#include <asm/page.h> 27#include <asm/page.h>
28#include <asm/mach/map.h> 28#include <asm/mach/map.h>
29#include <mach/iomap.h>
29 30
30#include "board.h" 31#include "board.h"
31 32
diff --git a/arch/arm/mach-tegra/sleep.S b/arch/arm/mach-tegra/sleep.S
index 8f9fde161c34..5b20197bae7f 100644
--- a/arch/arm/mach-tegra/sleep.S
+++ b/arch/arm/mach-tegra/sleep.S
@@ -23,7 +23,9 @@
23 */ 23 */
24 24
25#include <linux/linkage.h> 25#include <linux/linkage.h>
26#include <mach/io.h> 26
27#include <asm/assembler.h>
28
27#include <mach/iomap.h> 29#include <mach/iomap.h>
28 30
29#include "flowctrl.h" 31#include "flowctrl.h"
diff --git a/arch/arm/mach-u300/core.c b/arch/arm/mach-u300/core.c
index 8b90c44d237f..1621ad07d284 100644
--- a/arch/arm/mach-u300/core.c
+++ b/arch/arm/mach-u300/core.c
@@ -1544,6 +1544,8 @@ static struct fsmc_nand_platform_data nand_platform_data = {
1544 .nr_partitions = ARRAY_SIZE(u300_partitions), 1544 .nr_partitions = ARRAY_SIZE(u300_partitions),
1545 .options = NAND_SKIP_BBTSCAN, 1545 .options = NAND_SKIP_BBTSCAN,
1546 .width = FSMC_NAND_BW8, 1546 .width = FSMC_NAND_BW8,
1547 .ale_off = PLAT_NAND_ALE,
1548 .cle_off = PLAT_NAND_CLE,
1547}; 1549};
1548 1550
1549static struct platform_device nand_device = { 1551static struct platform_device nand_device = {
diff --git a/arch/arm/mach-u300/include/mach/io.h b/arch/arm/mach-u300/include/mach/io.h
deleted file mode 100644
index 5d6b4c13b3a0..000000000000
--- a/arch/arm/mach-u300/include/mach/io.h
+++ /dev/null
@@ -1,20 +0,0 @@
1/*
2 *
3 * arch/arm/mach-u300/include/mach/io.h
4 *
5 *
6 * Copyright (C) 2006-2009 ST-Ericsson AB
7 * License terms: GNU General Public License (GPL) version 2
8 * Dummy IO map for being able to use writew()/readw(),
9 * writel()/readw() and similar accessor functions.
10 * Author: Linus Walleij <linus.walleij@stericsson.com>
11 */
12#ifndef __MACH_IO_H
13#define __MACH_IO_H
14
15#define IO_SPACE_LIMIT 0xffffffff
16
17#define __io(a) __typesafe_io(a)
18#define __mem_pci(a) (a)
19
20#endif
diff --git a/arch/arm/mach-u300/include/mach/u300-regs.h b/arch/arm/mach-u300/include/mach/u300-regs.h
index 035fdc9dbdb0..65f87c523892 100644
--- a/arch/arm/mach-u300/include/mach/u300-regs.h
+++ b/arch/arm/mach-u300/include/mach/u300-regs.h
@@ -18,18 +18,17 @@
18 * the defines are used for setting up the I/O memory mapping. 18 * the defines are used for setting up the I/O memory mapping.
19 */ 19 */
20 20
21#ifdef __ASSEMBLER__
22#define IOMEM(a) (a)
23#else
24#define IOMEM(a) (void __iomem *) a
25#endif
26
27/* NAND Flash CS0 */ 21/* NAND Flash CS0 */
28#define U300_NAND_CS0_PHYS_BASE 0x80000000 22#define U300_NAND_CS0_PHYS_BASE 0x80000000
29 23
30/* NFIF */ 24/* NFIF */
31#define U300_NAND_IF_PHYS_BASE 0x9f800000 25#define U300_NAND_IF_PHYS_BASE 0x9f800000
32 26
27/* ALE, CLE offset for FSMC NAND */
28#define PLAT_NAND_CLE (1 << 16)
29#define PLAT_NAND_ALE (1 << 17)
30
31
33/* AHB Peripherals */ 32/* AHB Peripherals */
34#define U300_AHB_PER_PHYS_BASE 0xa0000000 33#define U300_AHB_PER_PHYS_BASE 0xa0000000
35#define U300_AHB_PER_VIRT_BASE 0xff010000 34#define U300_AHB_PER_VIRT_BASE 0xff010000
diff --git a/arch/arm/mach-ux500/include/mach/hardware.h b/arch/arm/mach-ux500/include/mach/hardware.h
index d93d6dbef25b..f84698936d36 100644
--- a/arch/arm/mach-ux500/include/mach/hardware.h
+++ b/arch/arm/mach-ux500/include/mach/hardware.h
@@ -23,7 +23,7 @@
23 (((x) & 0x0fffffff) + (((x) >> 4) & 0x0f000000) + U8500_IO_VIRTUAL) 23 (((x) & 0x0fffffff) + (((x) >> 4) & 0x0f000000) + U8500_IO_VIRTUAL)
24 24
25/* typesafe io address */ 25/* typesafe io address */
26#define __io_address(n) __io(IO_ADDRESS(n)) 26#define __io_address(n) IOMEM(IO_ADDRESS(n))
27/* Used by some plat-nomadik code */ 27/* Used by some plat-nomadik code */
28#define io_p2v(n) __io_address(n) 28#define io_p2v(n) __io_address(n)
29 29
diff --git a/arch/arm/mach-ux500/include/mach/io.h b/arch/arm/mach-ux500/include/mach/io.h
deleted file mode 100644
index 1cf3f44ce5b2..000000000000
--- a/arch/arm/mach-ux500/include/mach/io.h
+++ /dev/null
@@ -1,22 +0,0 @@
1/*
2 * arch/arm/mach-u8500/include/mach/io.h
3 *
4 * Copyright (C) 1997-1999 Russell King
5 *
6 * Modifications:
7 * 06-12-1997 RMK Created.
8 * 07-04-1999 RMK Major cleanup
9 */
10#ifndef __ASM_ARM_ARCH_IO_H
11#define __ASM_ARM_ARCH_IO_H
12
13#define IO_SPACE_LIMIT 0xffffffff
14
15/*
16 * We don't actually have real ISA nor PCI buses, but there is so many
17 * drivers out there that might just work if we fake them...
18 */
19#define __io(a) __typesafe_io(a)
20#define __mem_pci(a) (a)
21
22#endif
diff --git a/arch/arm/mach-versatile/include/mach/io.h b/arch/arm/mach-versatile/include/mach/io.h
deleted file mode 100644
index f067c14c7182..000000000000
--- a/arch/arm/mach-versatile/include/mach/io.h
+++ /dev/null
@@ -1,28 +0,0 @@
1/*
2 * arch/arm/mach-versatile/include/mach/io.h
3 *
4 * Copyright (C) 2003 ARM Limited
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20#ifndef __ASM_ARM_ARCH_IO_H
21#define __ASM_ARM_ARCH_IO_H
22
23#define IO_SPACE_LIMIT 0xffffffff
24
25#define __io(a) __typesafe_io(a)
26#define __mem_pci(a) (a)
27
28#endif
diff --git a/arch/arm/mach-vexpress/include/mach/io.h b/arch/arm/mach-vexpress/include/mach/io.h
deleted file mode 100644
index 13522d86685e..000000000000
--- a/arch/arm/mach-vexpress/include/mach/io.h
+++ /dev/null
@@ -1,26 +0,0 @@
1/*
2 * arch/arm/mach-vexpress/include/mach/io.h
3 *
4 * Copyright (C) 2003 ARM Limited
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20#ifndef __ASM_ARM_ARCH_IO_H
21#define __ASM_ARM_ARCH_IO_H
22
23#define __io(a) __typesafe_io(a)
24#define __mem_pci(a) (a)
25
26#endif
diff --git a/arch/arm/mach-vt8500/include/mach/io.h b/arch/arm/mach-vt8500/include/mach/io.h
deleted file mode 100644
index 46181eecf273..000000000000
--- a/arch/arm/mach-vt8500/include/mach/io.h
+++ /dev/null
@@ -1,26 +0,0 @@
1/*
2 * arch/arm/mach-vt8500/include/mach/io.h
3 *
4 * Copyright (C) 2010 Alexey Charkov
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20#ifndef __ASM_ARM_ARCH_IO_H
21#define __ASM_ARM_ARCH_IO_H
22
23#define __io(a) __typesafe_io((a) + 0xf0000000)
24#define __mem_pci(a) (a)
25
26#endif
diff --git a/arch/arm/mach-w90x900/dev.c b/arch/arm/mach-w90x900/dev.c
index db82568a998a..48f5b9fdfb7f 100644
--- a/arch/arm/mach-w90x900/dev.c
+++ b/arch/arm/mach-w90x900/dev.c
@@ -27,6 +27,7 @@
27#include <linux/spi/spi.h> 27#include <linux/spi/spi.h>
28#include <linux/spi/flash.h> 28#include <linux/spi/flash.h>
29 29
30#include <asm/system_misc.h>
30#include <asm/mach/arch.h> 31#include <asm/mach/arch.h>
31#include <asm/mach/map.h> 32#include <asm/mach/map.h>
32#include <asm/mach/irq.h> 33#include <asm/mach/irq.h>
diff --git a/arch/arm/mach-w90x900/include/mach/io.h b/arch/arm/mach-w90x900/include/mach/io.h
deleted file mode 100644
index d96ab99df05b..000000000000
--- a/arch/arm/mach-w90x900/include/mach/io.h
+++ /dev/null
@@ -1,30 +0,0 @@
1/*
2 * arch/arm/mach-w90x900/include/mach/io.h
3 *
4 * Copyright (c) 2008 Nuvoton technology corporation
5 * All rights reserved.
6 *
7 * Wan ZongShun <mcuos.com@gmail.com>
8 *
9 * Based on arch/arm/mach-s3c2410/include/mach/io.h
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2 of the License, or
14 * (at your option) any later version.
15 *
16 */
17
18#ifndef __ASM_ARM_ARCH_IO_H
19#define __ASM_ARM_ARCH_IO_H
20
21#define IO_SPACE_LIMIT 0xffffffff
22
23/*
24 * 1:1 mapping for ioremapped regions.
25 */
26
27#define __mem_pci(a) (a)
28#define __io(a) __typesafe_io(a)
29
30#endif
diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c
index b1e192ba8c24..a53fd2aaa2f4 100644
--- a/arch/arm/mm/cache-l2x0.c
+++ b/arch/arm/mm/cache-l2x0.c
@@ -30,13 +30,13 @@
30 30
31static void __iomem *l2x0_base; 31static void __iomem *l2x0_base;
32static DEFINE_RAW_SPINLOCK(l2x0_lock); 32static DEFINE_RAW_SPINLOCK(l2x0_lock);
33static uint32_t l2x0_way_mask; /* Bitmask of active ways */ 33static u32 l2x0_way_mask; /* Bitmask of active ways */
34static uint32_t l2x0_size; 34static u32 l2x0_size;
35 35
36struct l2x0_regs l2x0_saved_regs; 36struct l2x0_regs l2x0_saved_regs;
37 37
38struct l2x0_of_data { 38struct l2x0_of_data {
39 void (*setup)(const struct device_node *, __u32 *, __u32 *); 39 void (*setup)(const struct device_node *, u32 *, u32 *);
40 void (*save)(void); 40 void (*save)(void);
41 void (*resume)(void); 41 void (*resume)(void);
42}; 42};
@@ -288,7 +288,7 @@ static void l2x0_disable(void)
288 raw_spin_unlock_irqrestore(&l2x0_lock, flags); 288 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
289} 289}
290 290
291static void l2x0_unlock(__u32 cache_id) 291static void l2x0_unlock(u32 cache_id)
292{ 292{
293 int lockregs; 293 int lockregs;
294 int i; 294 int i;
@@ -307,11 +307,11 @@ static void l2x0_unlock(__u32 cache_id)
307 } 307 }
308} 308}
309 309
310void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask) 310void __init l2x0_init(void __iomem *base, u32 aux_val, u32 aux_mask)
311{ 311{
312 __u32 aux; 312 u32 aux;
313 __u32 cache_id; 313 u32 cache_id;
314 __u32 way_size = 0; 314 u32 way_size = 0;
315 int ways; 315 int ways;
316 const char *type; 316 const char *type;
317 317
@@ -388,7 +388,7 @@ void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask)
388 388
389#ifdef CONFIG_OF 389#ifdef CONFIG_OF
390static void __init l2x0_of_setup(const struct device_node *np, 390static void __init l2x0_of_setup(const struct device_node *np,
391 __u32 *aux_val, __u32 *aux_mask) 391 u32 *aux_val, u32 *aux_mask)
392{ 392{
393 u32 data[2] = { 0, 0 }; 393 u32 data[2] = { 0, 0 };
394 u32 tag = 0; 394 u32 tag = 0;
@@ -422,7 +422,7 @@ static void __init l2x0_of_setup(const struct device_node *np,
422} 422}
423 423
424static void __init pl310_of_setup(const struct device_node *np, 424static void __init pl310_of_setup(const struct device_node *np,
425 __u32 *aux_val, __u32 *aux_mask) 425 u32 *aux_val, u32 *aux_mask)
426{ 426{
427 u32 data[3] = { 0, 0, 0 }; 427 u32 data[3] = { 0, 0, 0 };
428 u32 tag[3] = { 0, 0, 0 }; 428 u32 tag[3] = { 0, 0, 0 };
@@ -548,7 +548,7 @@ static const struct of_device_id l2x0_ids[] __initconst = {
548 {} 548 {}
549}; 549};
550 550
551int __init l2x0_of_init(__u32 aux_val, __u32 aux_mask) 551int __init l2x0_of_init(u32 aux_val, u32 aux_mask)
552{ 552{
553 struct device_node *np; 553 struct device_node *np;
554 struct l2x0_of_data *data; 554 struct l2x0_of_data *data;
diff --git a/arch/arm/mm/copypage-v4mc.c b/arch/arm/mm/copypage-v4mc.c
index ec8c3befb9c8..1267e64133b9 100644
--- a/arch/arm/mm/copypage-v4mc.c
+++ b/arch/arm/mm/copypage-v4mc.c
@@ -23,10 +23,6 @@
23 23
24#include "mm.h" 24#include "mm.h"
25 25
26/*
27 * 0xffff8000 to 0xffffffff is reserved for any ARM architecture
28 * specific hacks for copying pages efficiently.
29 */
30#define minicache_pgprot __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | \ 26#define minicache_pgprot __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | \
31 L_PTE_MT_MINICACHE) 27 L_PTE_MT_MINICACHE)
32 28
@@ -78,10 +74,9 @@ void v4_mc_copy_user_highpage(struct page *to, struct page *from,
78 74
79 raw_spin_lock(&minicache_lock); 75 raw_spin_lock(&minicache_lock);
80 76
81 set_pte_ext(TOP_PTE(0xffff8000), pfn_pte(page_to_pfn(from), minicache_pgprot), 0); 77 set_top_pte(COPYPAGE_MINICACHE, mk_pte(from, minicache_pgprot));
82 flush_tlb_kernel_page(0xffff8000);
83 78
84 mc_copy_user_page((void *)0xffff8000, kto); 79 mc_copy_user_page((void *)COPYPAGE_MINICACHE, kto);
85 80
86 raw_spin_unlock(&minicache_lock); 81 raw_spin_unlock(&minicache_lock);
87 82
diff --git a/arch/arm/mm/copypage-v6.c b/arch/arm/mm/copypage-v6.c
index 8b03a5814d00..b9bcc9d79176 100644
--- a/arch/arm/mm/copypage-v6.c
+++ b/arch/arm/mm/copypage-v6.c
@@ -24,9 +24,6 @@
24#error FIX ME 24#error FIX ME
25#endif 25#endif
26 26
27#define from_address (0xffff8000)
28#define to_address (0xffffc000)
29
30static DEFINE_RAW_SPINLOCK(v6_lock); 27static DEFINE_RAW_SPINLOCK(v6_lock);
31 28
32/* 29/*
@@ -90,14 +87,11 @@ static void v6_copy_user_highpage_aliasing(struct page *to,
90 */ 87 */
91 raw_spin_lock(&v6_lock); 88 raw_spin_lock(&v6_lock);
92 89
93 set_pte_ext(TOP_PTE(from_address) + offset, pfn_pte(page_to_pfn(from), PAGE_KERNEL), 0); 90 kfrom = COPYPAGE_V6_FROM + (offset << PAGE_SHIFT);
94 set_pte_ext(TOP_PTE(to_address) + offset, pfn_pte(page_to_pfn(to), PAGE_KERNEL), 0); 91 kto = COPYPAGE_V6_TO + (offset << PAGE_SHIFT);
95
96 kfrom = from_address + (offset << PAGE_SHIFT);
97 kto = to_address + (offset << PAGE_SHIFT);
98 92
99 flush_tlb_kernel_page(kfrom); 93 set_top_pte(kfrom, mk_pte(from, PAGE_KERNEL));
100 flush_tlb_kernel_page(kto); 94 set_top_pte(kto, mk_pte(to, PAGE_KERNEL));
101 95
102 copy_page((void *)kto, (void *)kfrom); 96 copy_page((void *)kto, (void *)kfrom);
103 97
@@ -111,8 +105,7 @@ static void v6_copy_user_highpage_aliasing(struct page *to,
111 */ 105 */
112static void v6_clear_user_highpage_aliasing(struct page *page, unsigned long vaddr) 106static void v6_clear_user_highpage_aliasing(struct page *page, unsigned long vaddr)
113{ 107{
114 unsigned int offset = CACHE_COLOUR(vaddr); 108 unsigned long to = COPYPAGE_V6_TO + (CACHE_COLOUR(vaddr) << PAGE_SHIFT);
115 unsigned long to = to_address + (offset << PAGE_SHIFT);
116 109
117 /* FIXME: not highmem safe */ 110 /* FIXME: not highmem safe */
118 discard_old_kernel_data(page_address(page)); 111 discard_old_kernel_data(page_address(page));
@@ -123,8 +116,7 @@ static void v6_clear_user_highpage_aliasing(struct page *page, unsigned long vad
123 */ 116 */
124 raw_spin_lock(&v6_lock); 117 raw_spin_lock(&v6_lock);
125 118
126 set_pte_ext(TOP_PTE(to_address) + offset, pfn_pte(page_to_pfn(page), PAGE_KERNEL), 0); 119 set_top_pte(to, mk_pte(page, PAGE_KERNEL));
127 flush_tlb_kernel_page(to);
128 clear_page((void *)to); 120 clear_page((void *)to);
129 121
130 raw_spin_unlock(&v6_lock); 122 raw_spin_unlock(&v6_lock);
diff --git a/arch/arm/mm/copypage-xscale.c b/arch/arm/mm/copypage-xscale.c
index 439d106ae638..0fb85025344d 100644
--- a/arch/arm/mm/copypage-xscale.c
+++ b/arch/arm/mm/copypage-xscale.c
@@ -23,12 +23,6 @@
23 23
24#include "mm.h" 24#include "mm.h"
25 25
26/*
27 * 0xffff8000 to 0xffffffff is reserved for any ARM architecture
28 * specific hacks for copying pages efficiently.
29 */
30#define COPYPAGE_MINICACHE 0xffff8000
31
32#define minicache_pgprot __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | \ 26#define minicache_pgprot __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | \
33 L_PTE_MT_MINICACHE) 27 L_PTE_MT_MINICACHE)
34 28
@@ -100,8 +94,7 @@ void xscale_mc_copy_user_highpage(struct page *to, struct page *from,
100 94
101 raw_spin_lock(&minicache_lock); 95 raw_spin_lock(&minicache_lock);
102 96
103 set_pte_ext(TOP_PTE(COPYPAGE_MINICACHE), pfn_pte(page_to_pfn(from), minicache_pgprot), 0); 97 set_top_pte(COPYPAGE_MINICACHE, mk_pte(from, minicache_pgprot));
104 flush_tlb_kernel_page(COPYPAGE_MINICACHE);
105 98
106 mc_copy_user_page((void *)COPYPAGE_MINICACHE, kto); 99 mc_copy_user_page((void *)COPYPAGE_MINICACHE, kto);
107 100
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index 1aa664a1999f..db23ae4aaaab 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -214,7 +214,8 @@ static int __init consistent_init(void)
214core_initcall(consistent_init); 214core_initcall(consistent_init);
215 215
216static void * 216static void *
217__dma_alloc_remap(struct page *page, size_t size, gfp_t gfp, pgprot_t prot) 217__dma_alloc_remap(struct page *page, size_t size, gfp_t gfp, pgprot_t prot,
218 const void *caller)
218{ 219{
219 struct arm_vmregion *c; 220 struct arm_vmregion *c;
220 size_t align; 221 size_t align;
@@ -241,7 +242,7 @@ __dma_alloc_remap(struct page *page, size_t size, gfp_t gfp, pgprot_t prot)
241 * Allocate a virtual address in the consistent mapping region. 242 * Allocate a virtual address in the consistent mapping region.
242 */ 243 */
243 c = arm_vmregion_alloc(&consistent_head, align, size, 244 c = arm_vmregion_alloc(&consistent_head, align, size,
244 gfp & ~(__GFP_DMA | __GFP_HIGHMEM)); 245 gfp & ~(__GFP_DMA | __GFP_HIGHMEM), caller);
245 if (c) { 246 if (c) {
246 pte_t *pte; 247 pte_t *pte;
247 int idx = CONSISTENT_PTE_INDEX(c->vm_start); 248 int idx = CONSISTENT_PTE_INDEX(c->vm_start);
@@ -320,14 +321,14 @@ static void __dma_free_remap(void *cpu_addr, size_t size)
320 321
321#else /* !CONFIG_MMU */ 322#else /* !CONFIG_MMU */
322 323
323#define __dma_alloc_remap(page, size, gfp, prot) page_address(page) 324#define __dma_alloc_remap(page, size, gfp, prot, c) page_address(page)
324#define __dma_free_remap(addr, size) do { } while (0) 325#define __dma_free_remap(addr, size) do { } while (0)
325 326
326#endif /* CONFIG_MMU */ 327#endif /* CONFIG_MMU */
327 328
328static void * 329static void *
329__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp, 330__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp,
330 pgprot_t prot) 331 pgprot_t prot, const void *caller)
331{ 332{
332 struct page *page; 333 struct page *page;
333 void *addr; 334 void *addr;
@@ -349,7 +350,7 @@ __dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp,
349 return NULL; 350 return NULL;
350 351
351 if (!arch_is_coherent()) 352 if (!arch_is_coherent())
352 addr = __dma_alloc_remap(page, size, gfp, prot); 353 addr = __dma_alloc_remap(page, size, gfp, prot, caller);
353 else 354 else
354 addr = page_address(page); 355 addr = page_address(page);
355 356
@@ -374,7 +375,8 @@ dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gf
374 return memory; 375 return memory;
375 376
376 return __dma_alloc(dev, size, handle, gfp, 377 return __dma_alloc(dev, size, handle, gfp,
377 pgprot_dmacoherent(pgprot_kernel)); 378 pgprot_dmacoherent(pgprot_kernel),
379 __builtin_return_address(0));
378} 380}
379EXPORT_SYMBOL(dma_alloc_coherent); 381EXPORT_SYMBOL(dma_alloc_coherent);
380 382
@@ -386,7 +388,8 @@ void *
386dma_alloc_writecombine(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp) 388dma_alloc_writecombine(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp)
387{ 389{
388 return __dma_alloc(dev, size, handle, gfp, 390 return __dma_alloc(dev, size, handle, gfp,
389 pgprot_writecombine(pgprot_kernel)); 391 pgprot_writecombine(pgprot_kernel),
392 __builtin_return_address(0));
390} 393}
391EXPORT_SYMBOL(dma_alloc_writecombine); 394EXPORT_SYMBOL(dma_alloc_writecombine);
392 395
@@ -723,6 +726,9 @@ EXPORT_SYMBOL(dma_set_mask);
723 726
724static int __init dma_debug_do_init(void) 727static int __init dma_debug_do_init(void)
725{ 728{
729#ifdef CONFIG_MMU
730 arm_vmregion_create_proc("dma-mappings", &consistent_head);
731#endif
726 dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES); 732 dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
727 return 0; 733 return 0;
728} 734}
diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
index 5bdff5c3e6cb..9055b5a84ec5 100644
--- a/arch/arm/mm/fault.c
+++ b/arch/arm/mm/fault.c
@@ -165,7 +165,8 @@ __do_user_fault(struct task_struct *tsk, unsigned long addr,
165 struct siginfo si; 165 struct siginfo si;
166 166
167#ifdef CONFIG_DEBUG_USER 167#ifdef CONFIG_DEBUG_USER
168 if (user_debug & UDBG_SEGV) { 168 if (((user_debug & UDBG_SEGV) && (sig == SIGSEGV)) ||
169 ((user_debug & UDBG_BUS) && (sig == SIGBUS))) {
169 printk(KERN_DEBUG "%s: unhandled page fault (%d) at 0x%08lx, code 0x%03x\n", 170 printk(KERN_DEBUG "%s: unhandled page fault (%d) at 0x%08lx, code 0x%03x\n",
170 tsk->comm, sig, addr, fsr); 171 tsk->comm, sig, addr, fsr);
171 show_pte(tsk->mm, addr); 172 show_pte(tsk->mm, addr);
diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c
index 062d61a1f87d..77458548e031 100644
--- a/arch/arm/mm/flush.c
+++ b/arch/arm/mm/flush.c
@@ -22,15 +22,12 @@
22 22
23#ifdef CONFIG_CPU_CACHE_VIPT 23#ifdef CONFIG_CPU_CACHE_VIPT
24 24
25#define ALIAS_FLUSH_START 0xffff4000
26
27static void flush_pfn_alias(unsigned long pfn, unsigned long vaddr) 25static void flush_pfn_alias(unsigned long pfn, unsigned long vaddr)
28{ 26{
29 unsigned long to = ALIAS_FLUSH_START + (CACHE_COLOUR(vaddr) << PAGE_SHIFT); 27 unsigned long to = FLUSH_ALIAS_START + (CACHE_COLOUR(vaddr) << PAGE_SHIFT);
30 const int zero = 0; 28 const int zero = 0;
31 29
32 set_pte_ext(TOP_PTE(to), pfn_pte(pfn, PAGE_KERNEL), 0); 30 set_top_pte(to, pfn_pte(pfn, PAGE_KERNEL));
33 flush_tlb_kernel_page(to);
34 31
35 asm( "mcrr p15, 0, %1, %0, c14\n" 32 asm( "mcrr p15, 0, %1, %0, c14\n"
36 " mcr p15, 0, %2, c7, c10, 4" 33 " mcr p15, 0, %2, c7, c10, 4"
@@ -41,13 +38,12 @@ static void flush_pfn_alias(unsigned long pfn, unsigned long vaddr)
41 38
42static void flush_icache_alias(unsigned long pfn, unsigned long vaddr, unsigned long len) 39static void flush_icache_alias(unsigned long pfn, unsigned long vaddr, unsigned long len)
43{ 40{
44 unsigned long colour = CACHE_COLOUR(vaddr); 41 unsigned long va = FLUSH_ALIAS_START + (CACHE_COLOUR(vaddr) << PAGE_SHIFT);
45 unsigned long offset = vaddr & (PAGE_SIZE - 1); 42 unsigned long offset = vaddr & (PAGE_SIZE - 1);
46 unsigned long to; 43 unsigned long to;
47 44
48 set_pte_ext(TOP_PTE(ALIAS_FLUSH_START) + colour, pfn_pte(pfn, PAGE_KERNEL), 0); 45 set_top_pte(va, pfn_pte(pfn, PAGE_KERNEL));
49 to = ALIAS_FLUSH_START + (colour << PAGE_SHIFT) + offset; 46 to = va + offset;
50 flush_tlb_kernel_page(to);
51 flush_icache_range(to, to + len); 47 flush_icache_range(to, to + len);
52} 48}
53 49
diff --git a/arch/arm/mm/highmem.c b/arch/arm/mm/highmem.c
index 5a21505d7550..21b9e1bf9b77 100644
--- a/arch/arm/mm/highmem.c
+++ b/arch/arm/mm/highmem.c
@@ -69,15 +69,14 @@ void *kmap_atomic(struct page *page)
69 * With debugging enabled, kunmap_atomic forces that entry to 0. 69 * With debugging enabled, kunmap_atomic forces that entry to 0.
70 * Make sure it was indeed properly unmapped. 70 * Make sure it was indeed properly unmapped.
71 */ 71 */
72 BUG_ON(!pte_none(*(TOP_PTE(vaddr)))); 72 BUG_ON(!pte_none(get_top_pte(vaddr)));
73#endif 73#endif
74 set_pte_ext(TOP_PTE(vaddr), mk_pte(page, kmap_prot), 0);
75 /* 74 /*
76 * When debugging is off, kunmap_atomic leaves the previous mapping 75 * When debugging is off, kunmap_atomic leaves the previous mapping
77 * in place, so this TLB flush ensures the TLB is updated with the 76 * in place, so the contained TLB flush ensures the TLB is updated
78 * new mapping. 77 * with the new mapping.
79 */ 78 */
80 local_flush_tlb_kernel_page(vaddr); 79 set_top_pte(vaddr, mk_pte(page, kmap_prot));
81 80
82 return (void *)vaddr; 81 return (void *)vaddr;
83} 82}
@@ -96,8 +95,7 @@ void __kunmap_atomic(void *kvaddr)
96 __cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE); 95 __cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE);
97#ifdef CONFIG_DEBUG_HIGHMEM 96#ifdef CONFIG_DEBUG_HIGHMEM
98 BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx)); 97 BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
99 set_pte_ext(TOP_PTE(vaddr), __pte(0), 0); 98 set_top_pte(vaddr, __pte(0));
100 local_flush_tlb_kernel_page(vaddr);
101#else 99#else
102 (void) idx; /* to kill a warning */ 100 (void) idx; /* to kill a warning */
103#endif 101#endif
@@ -121,10 +119,9 @@ void *kmap_atomic_pfn(unsigned long pfn)
121 idx = type + KM_TYPE_NR * smp_processor_id(); 119 idx = type + KM_TYPE_NR * smp_processor_id();
122 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); 120 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
123#ifdef CONFIG_DEBUG_HIGHMEM 121#ifdef CONFIG_DEBUG_HIGHMEM
124 BUG_ON(!pte_none(*(TOP_PTE(vaddr)))); 122 BUG_ON(!pte_none(get_top_pte(vaddr)));
125#endif 123#endif
126 set_pte_ext(TOP_PTE(vaddr), pfn_pte(pfn, kmap_prot), 0); 124 set_top_pte(vaddr, pfn_pte(pfn, kmap_prot));
127 local_flush_tlb_kernel_page(vaddr);
128 125
129 return (void *)vaddr; 126 return (void *)vaddr;
130} 127}
@@ -132,11 +129,9 @@ void *kmap_atomic_pfn(unsigned long pfn)
132struct page *kmap_atomic_to_page(const void *ptr) 129struct page *kmap_atomic_to_page(const void *ptr)
133{ 130{
134 unsigned long vaddr = (unsigned long)ptr; 131 unsigned long vaddr = (unsigned long)ptr;
135 pte_t *pte;
136 132
137 if (vaddr < FIXADDR_START) 133 if (vaddr < FIXADDR_START)
138 return virt_to_page(ptr); 134 return virt_to_page(ptr);
139 135
140 pte = TOP_PTE(vaddr); 136 return pte_page(get_top_pte(vaddr));
141 return pte_page(*pte);
142} 137}
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index 245a55a0a5bb..595079fa9d1d 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -658,7 +658,9 @@ void __init mem_init(void)
658#ifdef CONFIG_HIGHMEM 658#ifdef CONFIG_HIGHMEM
659 " pkmap : 0x%08lx - 0x%08lx (%4ld MB)\n" 659 " pkmap : 0x%08lx - 0x%08lx (%4ld MB)\n"
660#endif 660#endif
661#ifdef CONFIG_MODULES
661 " modules : 0x%08lx - 0x%08lx (%4ld MB)\n" 662 " modules : 0x%08lx - 0x%08lx (%4ld MB)\n"
663#endif
662 " .text : 0x%p" " - 0x%p" " (%4d kB)\n" 664 " .text : 0x%p" " - 0x%p" " (%4d kB)\n"
663 " .init : 0x%p" " - 0x%p" " (%4d kB)\n" 665 " .init : 0x%p" " - 0x%p" " (%4d kB)\n"
664 " .data : 0x%p" " - 0x%p" " (%4d kB)\n" 666 " .data : 0x%p" " - 0x%p" " (%4d kB)\n"
@@ -677,7 +679,9 @@ void __init mem_init(void)
677 MLM(PKMAP_BASE, (PKMAP_BASE) + (LAST_PKMAP) * 679 MLM(PKMAP_BASE, (PKMAP_BASE) + (LAST_PKMAP) *
678 (PAGE_SIZE)), 680 (PAGE_SIZE)),
679#endif 681#endif
682#ifdef CONFIG_MODULES
680 MLM(MODULES_VADDR, MODULES_END), 683 MLM(MODULES_VADDR, MODULES_END),
684#endif
681 685
682 MLK_ROUNDUP(_text, _etext), 686 MLK_ROUNDUP(_text, _etext),
683 MLK_ROUNDUP(__init_begin, __init_end), 687 MLK_ROUNDUP(__init_begin, __init_end),
diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c
index 6780b49f2c69..4f55f5062ab7 100644
--- a/arch/arm/mm/ioremap.c
+++ b/arch/arm/mm/ioremap.c
@@ -308,11 +308,15 @@ __arm_ioremap_pfn(unsigned long pfn, unsigned long offset, size_t size,
308} 308}
309EXPORT_SYMBOL(__arm_ioremap_pfn); 309EXPORT_SYMBOL(__arm_ioremap_pfn);
310 310
311void __iomem * (*arch_ioremap_caller)(unsigned long, size_t,
312 unsigned int, void *) =
313 __arm_ioremap_caller;
314
311void __iomem * 315void __iomem *
312__arm_ioremap(unsigned long phys_addr, size_t size, unsigned int mtype) 316__arm_ioremap(unsigned long phys_addr, size_t size, unsigned int mtype)
313{ 317{
314 return __arm_ioremap_caller(phys_addr, size, mtype, 318 return arch_ioremap_caller(phys_addr, size, mtype,
315 __builtin_return_address(0)); 319 __builtin_return_address(0));
316} 320}
317EXPORT_SYMBOL(__arm_ioremap); 321EXPORT_SYMBOL(__arm_ioremap);
318 322
@@ -371,4 +375,11 @@ void __iounmap(volatile void __iomem *io_addr)
371 375
372 vunmap(addr); 376 vunmap(addr);
373} 377}
374EXPORT_SYMBOL(__iounmap); 378
379void (*arch_iounmap)(volatile void __iomem *) = __iounmap;
380
381void __arm_iounmap(volatile void __iomem *io_addr)
382{
383 arch_iounmap(io_addr);
384}
385EXPORT_SYMBOL(__arm_iounmap);
diff --git a/arch/arm/mm/mm.h b/arch/arm/mm/mm.h
index 70f6d3ea4834..27f4a619b35d 100644
--- a/arch/arm/mm/mm.h
+++ b/arch/arm/mm/mm.h
@@ -3,7 +3,31 @@
3/* the upper-most page table pointer */ 3/* the upper-most page table pointer */
4extern pmd_t *top_pmd; 4extern pmd_t *top_pmd;
5 5
6#define TOP_PTE(x) pte_offset_kernel(top_pmd, x) 6/*
7 * 0xffff8000 to 0xffffffff is reserved for any ARM architecture
8 * specific hacks for copying pages efficiently, while 0xffff4000
9 * is reserved for VIPT aliasing flushing by generic code.
10 *
11 * Note that we don't allow VIPT aliasing caches with SMP.
12 */
13#define COPYPAGE_MINICACHE 0xffff8000
14#define COPYPAGE_V6_FROM 0xffff8000
15#define COPYPAGE_V6_TO 0xffffc000
16/* PFN alias flushing, for VIPT caches */
17#define FLUSH_ALIAS_START 0xffff4000
18
19static inline void set_top_pte(unsigned long va, pte_t pte)
20{
21 pte_t *ptep = pte_offset_kernel(top_pmd, va);
22 set_pte_ext(ptep, pte, 0);
23 local_flush_tlb_kernel_page(va);
24}
25
26static inline pte_t get_top_pte(unsigned long va)
27{
28 pte_t *ptep = pte_offset_kernel(top_pmd, va);
29 return *ptep;
30}
7 31
8static inline pmd_t *pmd_off_k(unsigned long virt) 32static inline pmd_t *pmd_off_k(unsigned long virt)
9{ 33{
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index cd439c1dd506..b86f8933ff91 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -999,11 +999,14 @@ static void __init devicemaps_init(struct machine_desc *mdesc)
999{ 999{
1000 struct map_desc map; 1000 struct map_desc map;
1001 unsigned long addr; 1001 unsigned long addr;
1002 void *vectors;
1002 1003
1003 /* 1004 /*
1004 * Allocate the vector page early. 1005 * Allocate the vector page early.
1005 */ 1006 */
1006 vectors_page = early_alloc(PAGE_SIZE); 1007 vectors = early_alloc(PAGE_SIZE);
1008
1009 early_trap_init(vectors);
1007 1010
1008 for (addr = VMALLOC_START; addr; addr += PMD_SIZE) 1011 for (addr = VMALLOC_START; addr; addr += PMD_SIZE)
1009 pmd_clear(pmd_off_k(addr)); 1012 pmd_clear(pmd_off_k(addr));
@@ -1043,7 +1046,7 @@ static void __init devicemaps_init(struct machine_desc *mdesc)
1043 * location (0xffff0000). If we aren't using high-vectors, also 1046 * location (0xffff0000). If we aren't using high-vectors, also
1044 * create a mapping at the low-vectors virtual address. 1047 * create a mapping at the low-vectors virtual address.
1045 */ 1048 */
1046 map.pfn = __phys_to_pfn(virt_to_phys(vectors_page)); 1049 map.pfn = __phys_to_pfn(virt_to_phys(vectors));
1047 map.virtual = 0xffff0000; 1050 map.virtual = 0xffff0000;
1048 map.length = PAGE_SIZE; 1051 map.length = PAGE_SIZE;
1049 map.type = MT_HIGH_VECTORS; 1052 map.type = MT_HIGH_VECTORS;
diff --git a/arch/arm/mm/nommu.c b/arch/arm/mm/nommu.c
index 4fc6794cca4b..6486d2f253cd 100644
--- a/arch/arm/mm/nommu.c
+++ b/arch/arm/mm/nommu.c
@@ -86,13 +86,17 @@ void __iomem *__arm_ioremap(unsigned long phys_addr, size_t size,
86} 86}
87EXPORT_SYMBOL(__arm_ioremap); 87EXPORT_SYMBOL(__arm_ioremap);
88 88
89void __iomem * (*arch_ioremap_caller)(unsigned long, size_t, unsigned int, void *);
90
89void __iomem *__arm_ioremap_caller(unsigned long phys_addr, size_t size, 91void __iomem *__arm_ioremap_caller(unsigned long phys_addr, size_t size,
90 unsigned int mtype, void *caller) 92 unsigned int mtype, void *caller)
91{ 93{
92 return __arm_ioremap(phys_addr, size, mtype); 94 return __arm_ioremap(phys_addr, size, mtype);
93} 95}
94 96
95void __iounmap(volatile void __iomem *addr) 97void (*arch_iounmap)(volatile void __iomem *);
98
99void __arm_iounmap(volatile void __iomem *addr)
96{ 100{
97} 101}
98EXPORT_SYMBOL(__iounmap); 102EXPORT_SYMBOL(__arm_iounmap);
diff --git a/arch/arm/mm/vmregion.c b/arch/arm/mm/vmregion.c
index 036fdbfdd62f..a631016e1f8f 100644
--- a/arch/arm/mm/vmregion.c
+++ b/arch/arm/mm/vmregion.c
@@ -1,5 +1,8 @@
1#include <linux/fs.h>
1#include <linux/spinlock.h> 2#include <linux/spinlock.h>
2#include <linux/list.h> 3#include <linux/list.h>
4#include <linux/proc_fs.h>
5#include <linux/seq_file.h>
3#include <linux/slab.h> 6#include <linux/slab.h>
4 7
5#include "vmregion.h" 8#include "vmregion.h"
@@ -36,7 +39,7 @@
36 39
37struct arm_vmregion * 40struct arm_vmregion *
38arm_vmregion_alloc(struct arm_vmregion_head *head, size_t align, 41arm_vmregion_alloc(struct arm_vmregion_head *head, size_t align,
39 size_t size, gfp_t gfp) 42 size_t size, gfp_t gfp, const void *caller)
40{ 43{
41 unsigned long start = head->vm_start, addr = head->vm_end; 44 unsigned long start = head->vm_start, addr = head->vm_end;
42 unsigned long flags; 45 unsigned long flags;
@@ -52,6 +55,8 @@ arm_vmregion_alloc(struct arm_vmregion_head *head, size_t align,
52 if (!new) 55 if (!new)
53 goto out; 56 goto out;
54 57
58 new->caller = caller;
59
55 spin_lock_irqsave(&head->vm_lock, flags); 60 spin_lock_irqsave(&head->vm_lock, flags);
56 61
57 addr = rounddown(addr - size, align); 62 addr = rounddown(addr - size, align);
@@ -129,3 +134,72 @@ void arm_vmregion_free(struct arm_vmregion_head *head, struct arm_vmregion *c)
129 134
130 kfree(c); 135 kfree(c);
131} 136}
137
138#ifdef CONFIG_PROC_FS
139static int arm_vmregion_show(struct seq_file *m, void *p)
140{
141 struct arm_vmregion *c = list_entry(p, struct arm_vmregion, vm_list);
142
143 seq_printf(m, "0x%08lx-0x%08lx %7lu", c->vm_start, c->vm_end,
144 c->vm_end - c->vm_start);
145 if (c->caller)
146 seq_printf(m, " %pS", (void *)c->caller);
147 seq_putc(m, '\n');
148 return 0;
149}
150
151static void *arm_vmregion_start(struct seq_file *m, loff_t *pos)
152{
153 struct arm_vmregion_head *h = m->private;
154 spin_lock_irq(&h->vm_lock);
155 return seq_list_start(&h->vm_list, *pos);
156}
157
158static void *arm_vmregion_next(struct seq_file *m, void *p, loff_t *pos)
159{
160 struct arm_vmregion_head *h = m->private;
161 return seq_list_next(p, &h->vm_list, pos);
162}
163
164static void arm_vmregion_stop(struct seq_file *m, void *p)
165{
166 struct arm_vmregion_head *h = m->private;
167 spin_unlock_irq(&h->vm_lock);
168}
169
170static const struct seq_operations arm_vmregion_ops = {
171 .start = arm_vmregion_start,
172 .stop = arm_vmregion_stop,
173 .next = arm_vmregion_next,
174 .show = arm_vmregion_show,
175};
176
177static int arm_vmregion_open(struct inode *inode, struct file *file)
178{
179 struct arm_vmregion_head *h = PDE(inode)->data;
180 int ret = seq_open(file, &arm_vmregion_ops);
181 if (!ret) {
182 struct seq_file *m = file->private_data;
183 m->private = h;
184 }
185 return ret;
186}
187
188static const struct file_operations arm_vmregion_fops = {
189 .open = arm_vmregion_open,
190 .read = seq_read,
191 .llseek = seq_lseek,
192 .release = seq_release,
193};
194
195int arm_vmregion_create_proc(const char *path, struct arm_vmregion_head *h)
196{
197 proc_create_data(path, S_IRUSR, NULL, &arm_vmregion_fops, h);
198 return 0;
199}
200#else
201int arm_vmregion_create_proc(const char *path, struct arm_vmregion_head *h)
202{
203 return 0;
204}
205#endif
diff --git a/arch/arm/mm/vmregion.h b/arch/arm/mm/vmregion.h
index 15e9f044db9f..162be662c088 100644
--- a/arch/arm/mm/vmregion.h
+++ b/arch/arm/mm/vmregion.h
@@ -19,11 +19,14 @@ struct arm_vmregion {
19 unsigned long vm_end; 19 unsigned long vm_end;
20 struct page *vm_pages; 20 struct page *vm_pages;
21 int vm_active; 21 int vm_active;
22 const void *caller;
22}; 23};
23 24
24struct arm_vmregion *arm_vmregion_alloc(struct arm_vmregion_head *, size_t, size_t, gfp_t); 25struct arm_vmregion *arm_vmregion_alloc(struct arm_vmregion_head *, size_t, size_t, gfp_t, const void *);
25struct arm_vmregion *arm_vmregion_find(struct arm_vmregion_head *, unsigned long); 26struct arm_vmregion *arm_vmregion_find(struct arm_vmregion_head *, unsigned long);
26struct arm_vmregion *arm_vmregion_find_remove(struct arm_vmregion_head *, unsigned long); 27struct arm_vmregion *arm_vmregion_find_remove(struct arm_vmregion_head *, unsigned long);
27void arm_vmregion_free(struct arm_vmregion_head *, struct arm_vmregion *); 28void arm_vmregion_free(struct arm_vmregion_head *, struct arm_vmregion *);
28 29
30int arm_vmregion_create_proc(const char *, struct arm_vmregion_head *);
31
29#endif 32#endif
diff --git a/arch/arm/net/Makefile b/arch/arm/net/Makefile
new file mode 100644
index 000000000000..c2c10841b6be
--- /dev/null
+++ b/arch/arm/net/Makefile
@@ -0,0 +1,3 @@
1# ARM-specific networking code
2
3obj-$(CONFIG_BPF_JIT) += bpf_jit_32.o
diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c
new file mode 100644
index 000000000000..62135849f48b
--- /dev/null
+++ b/arch/arm/net/bpf_jit_32.c
@@ -0,0 +1,915 @@
1/*
2 * Just-In-Time compiler for BPF filters on 32bit ARM
3 *
4 * Copyright (c) 2011 Mircea Gherzan <mgherzan@gmail.com>
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; version 2 of the License.
9 */
10
11#include <linux/bitops.h>
12#include <linux/compiler.h>
13#include <linux/errno.h>
14#include <linux/filter.h>
15#include <linux/moduleloader.h>
16#include <linux/netdevice.h>
17#include <linux/string.h>
18#include <linux/slab.h>
19#include <asm/cacheflush.h>
20#include <asm/hwcap.h>
21
22#include "bpf_jit_32.h"
23
24/*
25 * ABI:
26 *
27 * r0 scratch register
28 * r4 BPF register A
29 * r5 BPF register X
30 * r6 pointer to the skb
31 * r7 skb->data
32 * r8 skb_headlen(skb)
33 */
34
35#define r_scratch ARM_R0
36/* r1-r3 are (also) used for the unaligned loads on the non-ARMv7 slowpath */
37#define r_off ARM_R1
38#define r_A ARM_R4
39#define r_X ARM_R5
40#define r_skb ARM_R6
41#define r_skb_data ARM_R7
42#define r_skb_hl ARM_R8
43
44#define SCRATCH_SP_OFFSET 0
45#define SCRATCH_OFF(k) (SCRATCH_SP_OFFSET + (k))
46
47#define SEEN_MEM ((1 << BPF_MEMWORDS) - 1)
48#define SEEN_MEM_WORD(k) (1 << (k))
49#define SEEN_X (1 << BPF_MEMWORDS)
50#define SEEN_CALL (1 << (BPF_MEMWORDS + 1))
51#define SEEN_SKB (1 << (BPF_MEMWORDS + 2))
52#define SEEN_DATA (1 << (BPF_MEMWORDS + 3))
53
54#define FLAG_NEED_X_RESET (1 << 0)
55
56struct jit_ctx {
57 const struct sk_filter *skf;
58 unsigned idx;
59 unsigned prologue_bytes;
60 int ret0_fp_idx;
61 u32 seen;
62 u32 flags;
63 u32 *offsets;
64 u32 *target;
65#if __LINUX_ARM_ARCH__ < 7
66 u16 epilogue_bytes;
67 u16 imm_count;
68 u32 *imms;
69#endif
70};
71
72int bpf_jit_enable __read_mostly;
73
74static u64 jit_get_skb_b(struct sk_buff *skb, unsigned offset)
75{
76 u8 ret;
77 int err;
78
79 err = skb_copy_bits(skb, offset, &ret, 1);
80
81 return (u64)err << 32 | ret;
82}
83
84static u64 jit_get_skb_h(struct sk_buff *skb, unsigned offset)
85{
86 u16 ret;
87 int err;
88
89 err = skb_copy_bits(skb, offset, &ret, 2);
90
91 return (u64)err << 32 | ntohs(ret);
92}
93
94static u64 jit_get_skb_w(struct sk_buff *skb, unsigned offset)
95{
96 u32 ret;
97 int err;
98
99 err = skb_copy_bits(skb, offset, &ret, 4);
100
101 return (u64)err << 32 | ntohl(ret);
102}
103
104/*
105 * Wrapper that handles both OABI and EABI and assures Thumb2 interworking
106 * (where the assembly routines like __aeabi_uidiv could cause problems).
107 */
108static u32 jit_udiv(u32 dividend, u32 divisor)
109{
110 return dividend / divisor;
111}
112
113static inline void _emit(int cond, u32 inst, struct jit_ctx *ctx)
114{
115 if (ctx->target != NULL)
116 ctx->target[ctx->idx] = inst | (cond << 28);
117
118 ctx->idx++;
119}
120
121/*
122 * Emit an instruction that will be executed unconditionally.
123 */
124static inline void emit(u32 inst, struct jit_ctx *ctx)
125{
126 _emit(ARM_COND_AL, inst, ctx);
127}
128
129static u16 saved_regs(struct jit_ctx *ctx)
130{
131 u16 ret = 0;
132
133 if ((ctx->skf->len > 1) ||
134 (ctx->skf->insns[0].code == BPF_S_RET_A))
135 ret |= 1 << r_A;
136
137#ifdef CONFIG_FRAME_POINTER
138 ret |= (1 << ARM_FP) | (1 << ARM_IP) | (1 << ARM_LR) | (1 << ARM_PC);
139#else
140 if (ctx->seen & SEEN_CALL)
141 ret |= 1 << ARM_LR;
142#endif
143 if (ctx->seen & (SEEN_DATA | SEEN_SKB))
144 ret |= 1 << r_skb;
145 if (ctx->seen & SEEN_DATA)
146 ret |= (1 << r_skb_data) | (1 << r_skb_hl);
147 if (ctx->seen & SEEN_X)
148 ret |= 1 << r_X;
149
150 return ret;
151}
152
153static inline int mem_words_used(struct jit_ctx *ctx)
154{
155 /* yes, we do waste some stack space IF there are "holes" in the set" */
156 return fls(ctx->seen & SEEN_MEM);
157}
158
159static inline bool is_load_to_a(u16 inst)
160{
161 switch (inst) {
162 case BPF_S_LD_W_LEN:
163 case BPF_S_LD_W_ABS:
164 case BPF_S_LD_H_ABS:
165 case BPF_S_LD_B_ABS:
166 case BPF_S_ANC_CPU:
167 case BPF_S_ANC_IFINDEX:
168 case BPF_S_ANC_MARK:
169 case BPF_S_ANC_PROTOCOL:
170 case BPF_S_ANC_RXHASH:
171 case BPF_S_ANC_QUEUE:
172 return true;
173 default:
174 return false;
175 }
176}
177
178static void build_prologue(struct jit_ctx *ctx)
179{
180 u16 reg_set = saved_regs(ctx);
181 u16 first_inst = ctx->skf->insns[0].code;
182 u16 off;
183
184#ifdef CONFIG_FRAME_POINTER
185 emit(ARM_MOV_R(ARM_IP, ARM_SP), ctx);
186 emit(ARM_PUSH(reg_set), ctx);
187 emit(ARM_SUB_I(ARM_FP, ARM_IP, 4), ctx);
188#else
189 if (reg_set)
190 emit(ARM_PUSH(reg_set), ctx);
191#endif
192
193 if (ctx->seen & (SEEN_DATA | SEEN_SKB))
194 emit(ARM_MOV_R(r_skb, ARM_R0), ctx);
195
196 if (ctx->seen & SEEN_DATA) {
197 off = offsetof(struct sk_buff, data);
198 emit(ARM_LDR_I(r_skb_data, r_skb, off), ctx);
199 /* headlen = len - data_len */
200 off = offsetof(struct sk_buff, len);
201 emit(ARM_LDR_I(r_skb_hl, r_skb, off), ctx);
202 off = offsetof(struct sk_buff, data_len);
203 emit(ARM_LDR_I(r_scratch, r_skb, off), ctx);
204 emit(ARM_SUB_R(r_skb_hl, r_skb_hl, r_scratch), ctx);
205 }
206
207 if (ctx->flags & FLAG_NEED_X_RESET)
208 emit(ARM_MOV_I(r_X, 0), ctx);
209
210 /* do not leak kernel data to userspace */
211 if ((first_inst != BPF_S_RET_K) && !(is_load_to_a(first_inst)))
212 emit(ARM_MOV_I(r_A, 0), ctx);
213
214 /* stack space for the BPF_MEM words */
215 if (ctx->seen & SEEN_MEM)
216 emit(ARM_SUB_I(ARM_SP, ARM_SP, mem_words_used(ctx) * 4), ctx);
217}
218
219static void build_epilogue(struct jit_ctx *ctx)
220{
221 u16 reg_set = saved_regs(ctx);
222
223 if (ctx->seen & SEEN_MEM)
224 emit(ARM_ADD_I(ARM_SP, ARM_SP, mem_words_used(ctx) * 4), ctx);
225
226 reg_set &= ~(1 << ARM_LR);
227
228#ifdef CONFIG_FRAME_POINTER
229 /* the first instruction of the prologue was: mov ip, sp */
230 reg_set &= ~(1 << ARM_IP);
231 reg_set |= (1 << ARM_SP);
232 emit(ARM_LDM(ARM_SP, reg_set), ctx);
233#else
234 if (reg_set) {
235 if (ctx->seen & SEEN_CALL)
236 reg_set |= 1 << ARM_PC;
237 emit(ARM_POP(reg_set), ctx);
238 }
239
240 if (!(ctx->seen & SEEN_CALL))
241 emit(ARM_BX(ARM_LR), ctx);
242#endif
243}
244
245static int16_t imm8m(u32 x)
246{
247 u32 rot;
248
249 for (rot = 0; rot < 16; rot++)
250 if ((x & ~ror32(0xff, 2 * rot)) == 0)
251 return rol32(x, 2 * rot) | (rot << 8);
252
253 return -1;
254}
255
256#if __LINUX_ARM_ARCH__ < 7
257
258static u16 imm_offset(u32 k, struct jit_ctx *ctx)
259{
260 unsigned i = 0, offset;
261 u16 imm;
262
263 /* on the "fake" run we just count them (duplicates included) */
264 if (ctx->target == NULL) {
265 ctx->imm_count++;
266 return 0;
267 }
268
269 while ((i < ctx->imm_count) && ctx->imms[i]) {
270 if (ctx->imms[i] == k)
271 break;
272 i++;
273 }
274
275 if (ctx->imms[i] == 0)
276 ctx->imms[i] = k;
277
278 /* constants go just after the epilogue */
279 offset = ctx->offsets[ctx->skf->len];
280 offset += ctx->prologue_bytes;
281 offset += ctx->epilogue_bytes;
282 offset += i * 4;
283
284 ctx->target[offset / 4] = k;
285
286 /* PC in ARM mode == address of the instruction + 8 */
287 imm = offset - (8 + ctx->idx * 4);
288
289 return imm;
290}
291
292#endif /* __LINUX_ARM_ARCH__ */
293
294/*
295 * Move an immediate that's not an imm8m to a core register.
296 */
297static inline void emit_mov_i_no8m(int rd, u32 val, struct jit_ctx *ctx)
298{
299#if __LINUX_ARM_ARCH__ < 7
300 emit(ARM_LDR_I(rd, ARM_PC, imm_offset(val, ctx)), ctx);
301#else
302 emit(ARM_MOVW(rd, val & 0xffff), ctx);
303 if (val > 0xffff)
304 emit(ARM_MOVT(rd, val >> 16), ctx);
305#endif
306}
307
308static inline void emit_mov_i(int rd, u32 val, struct jit_ctx *ctx)
309{
310 int imm12 = imm8m(val);
311
312 if (imm12 >= 0)
313 emit(ARM_MOV_I(rd, imm12), ctx);
314 else
315 emit_mov_i_no8m(rd, val, ctx);
316}
317
318#if __LINUX_ARM_ARCH__ < 6
319
320static void emit_load_be32(u8 cond, u8 r_res, u8 r_addr, struct jit_ctx *ctx)
321{
322 _emit(cond, ARM_LDRB_I(ARM_R3, r_addr, 1), ctx);
323 _emit(cond, ARM_LDRB_I(ARM_R1, r_addr, 0), ctx);
324 _emit(cond, ARM_LDRB_I(ARM_R2, r_addr, 3), ctx);
325 _emit(cond, ARM_LSL_I(ARM_R3, ARM_R3, 16), ctx);
326 _emit(cond, ARM_LDRB_I(ARM_R0, r_addr, 2), ctx);
327 _emit(cond, ARM_ORR_S(ARM_R3, ARM_R3, ARM_R1, SRTYPE_LSL, 24), ctx);
328 _emit(cond, ARM_ORR_R(ARM_R3, ARM_R3, ARM_R2), ctx);
329 _emit(cond, ARM_ORR_S(r_res, ARM_R3, ARM_R0, SRTYPE_LSL, 8), ctx);
330}
331
332static void emit_load_be16(u8 cond, u8 r_res, u8 r_addr, struct jit_ctx *ctx)
333{
334 _emit(cond, ARM_LDRB_I(ARM_R1, r_addr, 0), ctx);
335 _emit(cond, ARM_LDRB_I(ARM_R2, r_addr, 1), ctx);
336 _emit(cond, ARM_ORR_S(r_res, ARM_R2, ARM_R1, SRTYPE_LSL, 8), ctx);
337}
338
339static inline void emit_swap16(u8 r_dst, u8 r_src, struct jit_ctx *ctx)
340{
341 emit(ARM_LSL_R(ARM_R1, r_src, 8), ctx);
342 emit(ARM_ORR_S(r_dst, ARM_R1, r_src, SRTYPE_LSL, 8), ctx);
343 emit(ARM_LSL_I(r_dst, r_dst, 8), ctx);
344 emit(ARM_LSL_R(r_dst, r_dst, 8), ctx);
345}
346
347#else /* ARMv6+ */
348
349static void emit_load_be32(u8 cond, u8 r_res, u8 r_addr, struct jit_ctx *ctx)
350{
351 _emit(cond, ARM_LDR_I(r_res, r_addr, 0), ctx);
352#ifdef __LITTLE_ENDIAN
353 _emit(cond, ARM_REV(r_res, r_res), ctx);
354#endif
355}
356
357static void emit_load_be16(u8 cond, u8 r_res, u8 r_addr, struct jit_ctx *ctx)
358{
359 _emit(cond, ARM_LDRH_I(r_res, r_addr, 0), ctx);
360#ifdef __LITTLE_ENDIAN
361 _emit(cond, ARM_REV16(r_res, r_res), ctx);
362#endif
363}
364
365static inline void emit_swap16(u8 r_dst __maybe_unused,
366 u8 r_src __maybe_unused,
367 struct jit_ctx *ctx __maybe_unused)
368{
369#ifdef __LITTLE_ENDIAN
370 emit(ARM_REV16(r_dst, r_src), ctx);
371#endif
372}
373
374#endif /* __LINUX_ARM_ARCH__ < 6 */
375
376
377/* Compute the immediate value for a PC-relative branch. */
378static inline u32 b_imm(unsigned tgt, struct jit_ctx *ctx)
379{
380 u32 imm;
381
382 if (ctx->target == NULL)
383 return 0;
384 /*
385 * BPF allows only forward jumps and the offset of the target is
386 * still the one computed during the first pass.
387 */
388 imm = ctx->offsets[tgt] + ctx->prologue_bytes - (ctx->idx * 4 + 8);
389
390 return imm >> 2;
391}
392
393#define OP_IMM3(op, r1, r2, imm_val, ctx) \
394 do { \
395 imm12 = imm8m(imm_val); \
396 if (imm12 < 0) { \
397 emit_mov_i_no8m(r_scratch, imm_val, ctx); \
398 emit(op ## _R((r1), (r2), r_scratch), ctx); \
399 } else { \
400 emit(op ## _I((r1), (r2), imm12), ctx); \
401 } \
402 } while (0)
403
404static inline void emit_err_ret(u8 cond, struct jit_ctx *ctx)
405{
406 if (ctx->ret0_fp_idx >= 0) {
407 _emit(cond, ARM_B(b_imm(ctx->ret0_fp_idx, ctx)), ctx);
408 /* NOP to keep the size constant between passes */
409 emit(ARM_MOV_R(ARM_R0, ARM_R0), ctx);
410 } else {
411 _emit(cond, ARM_MOV_I(ARM_R0, 0), ctx);
412 _emit(cond, ARM_B(b_imm(ctx->skf->len, ctx)), ctx);
413 }
414}
415
416static inline void emit_blx_r(u8 tgt_reg, struct jit_ctx *ctx)
417{
418#if __LINUX_ARM_ARCH__ < 5
419 emit(ARM_MOV_R(ARM_LR, ARM_PC), ctx);
420
421 if (elf_hwcap & HWCAP_THUMB)
422 emit(ARM_BX(tgt_reg), ctx);
423 else
424 emit(ARM_MOV_R(ARM_PC, tgt_reg), ctx);
425#else
426 emit(ARM_BLX_R(tgt_reg), ctx);
427#endif
428}
429
430static inline void emit_udiv(u8 rd, u8 rm, u8 rn, struct jit_ctx *ctx)
431{
432#if __LINUX_ARM_ARCH__ == 7
433 if (elf_hwcap & HWCAP_IDIVA) {
434 emit(ARM_UDIV(rd, rm, rn), ctx);
435 return;
436 }
437#endif
438 if (rm != ARM_R0)
439 emit(ARM_MOV_R(ARM_R0, rm), ctx);
440 if (rn != ARM_R1)
441 emit(ARM_MOV_R(ARM_R1, rn), ctx);
442
443 ctx->seen |= SEEN_CALL;
444 emit_mov_i(ARM_R3, (u32)jit_udiv, ctx);
445 emit_blx_r(ARM_R3, ctx);
446
447 if (rd != ARM_R0)
448 emit(ARM_MOV_R(rd, ARM_R0), ctx);
449}
450
451static inline void update_on_xread(struct jit_ctx *ctx)
452{
453 if (!(ctx->seen & SEEN_X))
454 ctx->flags |= FLAG_NEED_X_RESET;
455
456 ctx->seen |= SEEN_X;
457}
458
459static int build_body(struct jit_ctx *ctx)
460{
461 void *load_func[] = {jit_get_skb_b, jit_get_skb_h, jit_get_skb_w};
462 const struct sk_filter *prog = ctx->skf;
463 const struct sock_filter *inst;
464 unsigned i, load_order, off, condt;
465 int imm12;
466 u32 k;
467
468 for (i = 0; i < prog->len; i++) {
469 inst = &(prog->insns[i]);
470 /* K as an immediate value operand */
471 k = inst->k;
472
473 /* compute offsets only in the fake pass */
474 if (ctx->target == NULL)
475 ctx->offsets[i] = ctx->idx * 4;
476
477 switch (inst->code) {
478 case BPF_S_LD_IMM:
479 emit_mov_i(r_A, k, ctx);
480 break;
481 case BPF_S_LD_W_LEN:
482 ctx->seen |= SEEN_SKB;
483 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, len) != 4);
484 emit(ARM_LDR_I(r_A, r_skb,
485 offsetof(struct sk_buff, len)), ctx);
486 break;
487 case BPF_S_LD_MEM:
488 /* A = scratch[k] */
489 ctx->seen |= SEEN_MEM_WORD(k);
490 emit(ARM_LDR_I(r_A, ARM_SP, SCRATCH_OFF(k)), ctx);
491 break;
492 case BPF_S_LD_W_ABS:
493 load_order = 2;
494 goto load;
495 case BPF_S_LD_H_ABS:
496 load_order = 1;
497 goto load;
498 case BPF_S_LD_B_ABS:
499 load_order = 0;
500load:
501 /* the interpreter will deal with the negative K */
502 if ((int)k < 0)
503 return -ENOTSUPP;
504 emit_mov_i(r_off, k, ctx);
505load_common:
506 ctx->seen |= SEEN_DATA | SEEN_CALL;
507
508 if (load_order > 0) {
509 emit(ARM_SUB_I(r_scratch, r_skb_hl,
510 1 << load_order), ctx);
511 emit(ARM_CMP_R(r_scratch, r_off), ctx);
512 condt = ARM_COND_HS;
513 } else {
514 emit(ARM_CMP_R(r_skb_hl, r_off), ctx);
515 condt = ARM_COND_HI;
516 }
517
518 _emit(condt, ARM_ADD_R(r_scratch, r_off, r_skb_data),
519 ctx);
520
521 if (load_order == 0)
522 _emit(condt, ARM_LDRB_I(r_A, r_scratch, 0),
523 ctx);
524 else if (load_order == 1)
525 emit_load_be16(condt, r_A, r_scratch, ctx);
526 else if (load_order == 2)
527 emit_load_be32(condt, r_A, r_scratch, ctx);
528
529 _emit(condt, ARM_B(b_imm(i + 1, ctx)), ctx);
530
531 /* the slowpath */
532 emit_mov_i(ARM_R3, (u32)load_func[load_order], ctx);
533 emit(ARM_MOV_R(ARM_R0, r_skb), ctx);
534 /* the offset is already in R1 */
535 emit_blx_r(ARM_R3, ctx);
536 /* check the result of skb_copy_bits */
537 emit(ARM_CMP_I(ARM_R1, 0), ctx);
538 emit_err_ret(ARM_COND_NE, ctx);
539 emit(ARM_MOV_R(r_A, ARM_R0), ctx);
540 break;
541 case BPF_S_LD_W_IND:
542 load_order = 2;
543 goto load_ind;
544 case BPF_S_LD_H_IND:
545 load_order = 1;
546 goto load_ind;
547 case BPF_S_LD_B_IND:
548 load_order = 0;
549load_ind:
550 OP_IMM3(ARM_ADD, r_off, r_X, k, ctx);
551 goto load_common;
552 case BPF_S_LDX_IMM:
553 ctx->seen |= SEEN_X;
554 emit_mov_i(r_X, k, ctx);
555 break;
556 case BPF_S_LDX_W_LEN:
557 ctx->seen |= SEEN_X | SEEN_SKB;
558 emit(ARM_LDR_I(r_X, r_skb,
559 offsetof(struct sk_buff, len)), ctx);
560 break;
561 case BPF_S_LDX_MEM:
562 ctx->seen |= SEEN_X | SEEN_MEM_WORD(k);
563 emit(ARM_LDR_I(r_X, ARM_SP, SCRATCH_OFF(k)), ctx);
564 break;
565 case BPF_S_LDX_B_MSH:
566 /* x = ((*(frame + k)) & 0xf) << 2; */
567 ctx->seen |= SEEN_X | SEEN_DATA | SEEN_CALL;
568 /* the interpreter should deal with the negative K */
569 if (k < 0)
570 return -1;
571 /* offset in r1: we might have to take the slow path */
572 emit_mov_i(r_off, k, ctx);
573 emit(ARM_CMP_R(r_skb_hl, r_off), ctx);
574
575 /* load in r0: common with the slowpath */
576 _emit(ARM_COND_HI, ARM_LDRB_R(ARM_R0, r_skb_data,
577 ARM_R1), ctx);
578 /*
579 * emit_mov_i() might generate one or two instructions,
580 * the same holds for emit_blx_r()
581 */
582 _emit(ARM_COND_HI, ARM_B(b_imm(i + 1, ctx) - 2), ctx);
583
584 emit(ARM_MOV_R(ARM_R0, r_skb), ctx);
585 /* r_off is r1 */
586 emit_mov_i(ARM_R3, (u32)jit_get_skb_b, ctx);
587 emit_blx_r(ARM_R3, ctx);
588 /* check the return value of skb_copy_bits */
589 emit(ARM_CMP_I(ARM_R1, 0), ctx);
590 emit_err_ret(ARM_COND_NE, ctx);
591
592 emit(ARM_AND_I(r_X, ARM_R0, 0x00f), ctx);
593 emit(ARM_LSL_I(r_X, r_X, 2), ctx);
594 break;
595 case BPF_S_ST:
596 ctx->seen |= SEEN_MEM_WORD(k);
597 emit(ARM_STR_I(r_A, ARM_SP, SCRATCH_OFF(k)), ctx);
598 break;
599 case BPF_S_STX:
600 update_on_xread(ctx);
601 ctx->seen |= SEEN_MEM_WORD(k);
602 emit(ARM_STR_I(r_X, ARM_SP, SCRATCH_OFF(k)), ctx);
603 break;
604 case BPF_S_ALU_ADD_K:
605 /* A += K */
606 OP_IMM3(ARM_ADD, r_A, r_A, k, ctx);
607 break;
608 case BPF_S_ALU_ADD_X:
609 update_on_xread(ctx);
610 emit(ARM_ADD_R(r_A, r_A, r_X), ctx);
611 break;
612 case BPF_S_ALU_SUB_K:
613 /* A -= K */
614 OP_IMM3(ARM_SUB, r_A, r_A, k, ctx);
615 break;
616 case BPF_S_ALU_SUB_X:
617 update_on_xread(ctx);
618 emit(ARM_SUB_R(r_A, r_A, r_X), ctx);
619 break;
620 case BPF_S_ALU_MUL_K:
621 /* A *= K */
622 emit_mov_i(r_scratch, k, ctx);
623 emit(ARM_MUL(r_A, r_A, r_scratch), ctx);
624 break;
625 case BPF_S_ALU_MUL_X:
626 update_on_xread(ctx);
627 emit(ARM_MUL(r_A, r_A, r_X), ctx);
628 break;
629 case BPF_S_ALU_DIV_K:
630 /* current k == reciprocal_value(userspace k) */
631 emit_mov_i(r_scratch, k, ctx);
632 /* A = top 32 bits of the product */
633 emit(ARM_UMULL(r_scratch, r_A, r_A, r_scratch), ctx);
634 break;
635 case BPF_S_ALU_DIV_X:
636 update_on_xread(ctx);
637 emit(ARM_CMP_I(r_X, 0), ctx);
638 emit_err_ret(ARM_COND_EQ, ctx);
639 emit_udiv(r_A, r_A, r_X, ctx);
640 break;
641 case BPF_S_ALU_OR_K:
642 /* A |= K */
643 OP_IMM3(ARM_ORR, r_A, r_A, k, ctx);
644 break;
645 case BPF_S_ALU_OR_X:
646 update_on_xread(ctx);
647 emit(ARM_ORR_R(r_A, r_A, r_X), ctx);
648 break;
649 case BPF_S_ALU_AND_K:
650 /* A &= K */
651 OP_IMM3(ARM_AND, r_A, r_A, k, ctx);
652 break;
653 case BPF_S_ALU_AND_X:
654 update_on_xread(ctx);
655 emit(ARM_AND_R(r_A, r_A, r_X), ctx);
656 break;
657 case BPF_S_ALU_LSH_K:
658 if (unlikely(k > 31))
659 return -1;
660 emit(ARM_LSL_I(r_A, r_A, k), ctx);
661 break;
662 case BPF_S_ALU_LSH_X:
663 update_on_xread(ctx);
664 emit(ARM_LSL_R(r_A, r_A, r_X), ctx);
665 break;
666 case BPF_S_ALU_RSH_K:
667 if (unlikely(k > 31))
668 return -1;
669 emit(ARM_LSR_I(r_A, r_A, k), ctx);
670 break;
671 case BPF_S_ALU_RSH_X:
672 update_on_xread(ctx);
673 emit(ARM_LSR_R(r_A, r_A, r_X), ctx);
674 break;
675 case BPF_S_ALU_NEG:
676 /* A = -A */
677 emit(ARM_RSB_I(r_A, r_A, 0), ctx);
678 break;
679 case BPF_S_JMP_JA:
680 /* pc += K */
681 emit(ARM_B(b_imm(i + k + 1, ctx)), ctx);
682 break;
683 case BPF_S_JMP_JEQ_K:
684 /* pc += (A == K) ? pc->jt : pc->jf */
685 condt = ARM_COND_EQ;
686 goto cmp_imm;
687 case BPF_S_JMP_JGT_K:
688 /* pc += (A > K) ? pc->jt : pc->jf */
689 condt = ARM_COND_HI;
690 goto cmp_imm;
691 case BPF_S_JMP_JGE_K:
692 /* pc += (A >= K) ? pc->jt : pc->jf */
693 condt = ARM_COND_HS;
694cmp_imm:
695 imm12 = imm8m(k);
696 if (imm12 < 0) {
697 emit_mov_i_no8m(r_scratch, k, ctx);
698 emit(ARM_CMP_R(r_A, r_scratch), ctx);
699 } else {
700 emit(ARM_CMP_I(r_A, imm12), ctx);
701 }
702cond_jump:
703 if (inst->jt)
704 _emit(condt, ARM_B(b_imm(i + inst->jt + 1,
705 ctx)), ctx);
706 if (inst->jf)
707 _emit(condt ^ 1, ARM_B(b_imm(i + inst->jf + 1,
708 ctx)), ctx);
709 break;
710 case BPF_S_JMP_JEQ_X:
711 /* pc += (A == X) ? pc->jt : pc->jf */
712 condt = ARM_COND_EQ;
713 goto cmp_x;
714 case BPF_S_JMP_JGT_X:
715 /* pc += (A > X) ? pc->jt : pc->jf */
716 condt = ARM_COND_HI;
717 goto cmp_x;
718 case BPF_S_JMP_JGE_X:
719 /* pc += (A >= X) ? pc->jt : pc->jf */
720 condt = ARM_COND_CS;
721cmp_x:
722 update_on_xread(ctx);
723 emit(ARM_CMP_R(r_A, r_X), ctx);
724 goto cond_jump;
725 case BPF_S_JMP_JSET_K:
726 /* pc += (A & K) ? pc->jt : pc->jf */
727 condt = ARM_COND_NE;
728 /* not set iff all zeroes iff Z==1 iff EQ */
729
730 imm12 = imm8m(k);
731 if (imm12 < 0) {
732 emit_mov_i_no8m(r_scratch, k, ctx);
733 emit(ARM_TST_R(r_A, r_scratch), ctx);
734 } else {
735 emit(ARM_TST_I(r_A, imm12), ctx);
736 }
737 goto cond_jump;
738 case BPF_S_JMP_JSET_X:
739 /* pc += (A & X) ? pc->jt : pc->jf */
740 update_on_xread(ctx);
741 condt = ARM_COND_NE;
742 emit(ARM_TST_R(r_A, r_X), ctx);
743 goto cond_jump;
744 case BPF_S_RET_A:
745 emit(ARM_MOV_R(ARM_R0, r_A), ctx);
746 goto b_epilogue;
747 case BPF_S_RET_K:
748 if ((k == 0) && (ctx->ret0_fp_idx < 0))
749 ctx->ret0_fp_idx = i;
750 emit_mov_i(ARM_R0, k, ctx);
751b_epilogue:
752 if (i != ctx->skf->len - 1)
753 emit(ARM_B(b_imm(prog->len, ctx)), ctx);
754 break;
755 case BPF_S_MISC_TAX:
756 /* X = A */
757 ctx->seen |= SEEN_X;
758 emit(ARM_MOV_R(r_X, r_A), ctx);
759 break;
760 case BPF_S_MISC_TXA:
761 /* A = X */
762 update_on_xread(ctx);
763 emit(ARM_MOV_R(r_A, r_X), ctx);
764 break;
765 case BPF_S_ANC_PROTOCOL:
766 /* A = ntohs(skb->protocol) */
767 ctx->seen |= SEEN_SKB;
768 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff,
769 protocol) != 2);
770 off = offsetof(struct sk_buff, protocol);
771 emit(ARM_LDRH_I(r_scratch, r_skb, off), ctx);
772 emit_swap16(r_A, r_scratch, ctx);
773 break;
774 case BPF_S_ANC_CPU:
775 /* r_scratch = current_thread_info() */
776 OP_IMM3(ARM_BIC, r_scratch, ARM_SP, THREAD_SIZE - 1, ctx);
777 /* A = current_thread_info()->cpu */
778 BUILD_BUG_ON(FIELD_SIZEOF(struct thread_info, cpu) != 4);
779 off = offsetof(struct thread_info, cpu);
780 emit(ARM_LDR_I(r_A, r_scratch, off), ctx);
781 break;
782 case BPF_S_ANC_IFINDEX:
783 /* A = skb->dev->ifindex */
784 ctx->seen |= SEEN_SKB;
785 off = offsetof(struct sk_buff, dev);
786 emit(ARM_LDR_I(r_scratch, r_skb, off), ctx);
787
788 emit(ARM_CMP_I(r_scratch, 0), ctx);
789 emit_err_ret(ARM_COND_EQ, ctx);
790
791 BUILD_BUG_ON(FIELD_SIZEOF(struct net_device,
792 ifindex) != 4);
793 off = offsetof(struct net_device, ifindex);
794 emit(ARM_LDR_I(r_A, r_scratch, off), ctx);
795 break;
796 case BPF_S_ANC_MARK:
797 ctx->seen |= SEEN_SKB;
798 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4);
799 off = offsetof(struct sk_buff, mark);
800 emit(ARM_LDR_I(r_A, r_skb, off), ctx);
801 break;
802 case BPF_S_ANC_RXHASH:
803 ctx->seen |= SEEN_SKB;
804 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, rxhash) != 4);
805 off = offsetof(struct sk_buff, rxhash);
806 emit(ARM_LDR_I(r_A, r_skb, off), ctx);
807 break;
808 case BPF_S_ANC_QUEUE:
809 ctx->seen |= SEEN_SKB;
810 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff,
811 queue_mapping) != 2);
812 BUILD_BUG_ON(offsetof(struct sk_buff,
813 queue_mapping) > 0xff);
814 off = offsetof(struct sk_buff, queue_mapping);
815 emit(ARM_LDRH_I(r_A, r_skb, off), ctx);
816 break;
817 default:
818 return -1;
819 }
820 }
821
822 /* compute offsets only during the first pass */
823 if (ctx->target == NULL)
824 ctx->offsets[i] = ctx->idx * 4;
825
826 return 0;
827}
828
829
830void bpf_jit_compile(struct sk_filter *fp)
831{
832 struct jit_ctx ctx;
833 unsigned tmp_idx;
834 unsigned alloc_size;
835
836 if (!bpf_jit_enable)
837 return;
838
839 memset(&ctx, 0, sizeof(ctx));
840 ctx.skf = fp;
841 ctx.ret0_fp_idx = -1;
842
843 ctx.offsets = kzalloc(GFP_KERNEL, 4 * (ctx.skf->len + 1));
844 if (ctx.offsets == NULL)
845 return;
846
847 /* fake pass to fill in the ctx->seen */
848 if (unlikely(build_body(&ctx)))
849 goto out;
850
851 tmp_idx = ctx.idx;
852 build_prologue(&ctx);
853 ctx.prologue_bytes = (ctx.idx - tmp_idx) * 4;
854
855#if __LINUX_ARM_ARCH__ < 7
856 tmp_idx = ctx.idx;
857 build_epilogue(&ctx);
858 ctx.epilogue_bytes = (ctx.idx - tmp_idx) * 4;
859
860 ctx.idx += ctx.imm_count;
861 if (ctx.imm_count) {
862 ctx.imms = kzalloc(GFP_KERNEL, 4 * ctx.imm_count);
863 if (ctx.imms == NULL)
864 goto out;
865 }
866#else
867 /* there's nothing after the epilogue on ARMv7 */
868 build_epilogue(&ctx);
869#endif
870
871 alloc_size = 4 * ctx.idx;
872 ctx.target = module_alloc(max(sizeof(struct work_struct),
873 alloc_size));
874 if (unlikely(ctx.target == NULL))
875 goto out;
876
877 ctx.idx = 0;
878 build_prologue(&ctx);
879 build_body(&ctx);
880 build_epilogue(&ctx);
881
882 flush_icache_range((u32)ctx.target, (u32)(ctx.target + ctx.idx));
883
884#if __LINUX_ARM_ARCH__ < 7
885 if (ctx.imm_count)
886 kfree(ctx.imms);
887#endif
888
889 if (bpf_jit_enable > 1)
890 print_hex_dump(KERN_INFO, "BPF JIT code: ",
891 DUMP_PREFIX_ADDRESS, 16, 4, ctx.target,
892 alloc_size, false);
893
894 fp->bpf_func = (void *)ctx.target;
895out:
896 kfree(ctx.offsets);
897 return;
898}
899
900static void bpf_jit_free_worker(struct work_struct *work)
901{
902 module_free(NULL, work);
903}
904
905void bpf_jit_free(struct sk_filter *fp)
906{
907 struct work_struct *work;
908
909 if (fp->bpf_func != sk_run_filter) {
910 work = (struct work_struct *)fp->bpf_func;
911
912 INIT_WORK(work, bpf_jit_free_worker);
913 schedule_work(work);
914 }
915}
diff --git a/arch/arm/net/bpf_jit_32.h b/arch/arm/net/bpf_jit_32.h
new file mode 100644
index 000000000000..99ae5e3f46d2
--- /dev/null
+++ b/arch/arm/net/bpf_jit_32.h
@@ -0,0 +1,190 @@
1/*
2 * Just-In-Time compiler for BPF filters on 32bit ARM
3 *
4 * Copyright (c) 2011 Mircea Gherzan <mgherzan@gmail.com>
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; version 2 of the License.
9 */
10
11#ifndef PFILTER_OPCODES_ARM_H
12#define PFILTER_OPCODES_ARM_H
13
14#define ARM_R0 0
15#define ARM_R1 1
16#define ARM_R2 2
17#define ARM_R3 3
18#define ARM_R4 4
19#define ARM_R5 5
20#define ARM_R6 6
21#define ARM_R7 7
22#define ARM_R8 8
23#define ARM_R9 9
24#define ARM_R10 10
25#define ARM_FP 11
26#define ARM_IP 12
27#define ARM_SP 13
28#define ARM_LR 14
29#define ARM_PC 15
30
31#define ARM_COND_EQ 0x0
32#define ARM_COND_NE 0x1
33#define ARM_COND_CS 0x2
34#define ARM_COND_HS ARM_COND_CS
35#define ARM_COND_CC 0x3
36#define ARM_COND_LO ARM_COND_CC
37#define ARM_COND_MI 0x4
38#define ARM_COND_PL 0x5
39#define ARM_COND_VS 0x6
40#define ARM_COND_VC 0x7
41#define ARM_COND_HI 0x8
42#define ARM_COND_LS 0x9
43#define ARM_COND_GE 0xa
44#define ARM_COND_LT 0xb
45#define ARM_COND_GT 0xc
46#define ARM_COND_LE 0xd
47#define ARM_COND_AL 0xe
48
49/* register shift types */
50#define SRTYPE_LSL 0
51#define SRTYPE_LSR 1
52#define SRTYPE_ASR 2
53#define SRTYPE_ROR 3
54
55#define ARM_INST_ADD_R 0x00800000
56#define ARM_INST_ADD_I 0x02800000
57
58#define ARM_INST_AND_R 0x00000000
59#define ARM_INST_AND_I 0x02000000
60
61#define ARM_INST_BIC_R 0x01c00000
62#define ARM_INST_BIC_I 0x03c00000
63
64#define ARM_INST_B 0x0a000000
65#define ARM_INST_BX 0x012FFF10
66#define ARM_INST_BLX_R 0x012fff30
67
68#define ARM_INST_CMP_R 0x01500000
69#define ARM_INST_CMP_I 0x03500000
70
71#define ARM_INST_LDRB_I 0x05d00000
72#define ARM_INST_LDRB_R 0x07d00000
73#define ARM_INST_LDRH_I 0x01d000b0
74#define ARM_INST_LDR_I 0x05900000
75
76#define ARM_INST_LDM 0x08900000
77
78#define ARM_INST_LSL_I 0x01a00000
79#define ARM_INST_LSL_R 0x01a00010
80
81#define ARM_INST_LSR_I 0x01a00020
82#define ARM_INST_LSR_R 0x01a00030
83
84#define ARM_INST_MOV_R 0x01a00000
85#define ARM_INST_MOV_I 0x03a00000
86#define ARM_INST_MOVW 0x03000000
87#define ARM_INST_MOVT 0x03400000
88
89#define ARM_INST_MUL 0x00000090
90
91#define ARM_INST_POP 0x08bd0000
92#define ARM_INST_PUSH 0x092d0000
93
94#define ARM_INST_ORR_R 0x01800000
95#define ARM_INST_ORR_I 0x03800000
96
97#define ARM_INST_REV 0x06bf0f30
98#define ARM_INST_REV16 0x06bf0fb0
99
100#define ARM_INST_RSB_I 0x02600000
101
102#define ARM_INST_SUB_R 0x00400000
103#define ARM_INST_SUB_I 0x02400000
104
105#define ARM_INST_STR_I 0x05800000
106
107#define ARM_INST_TST_R 0x01100000
108#define ARM_INST_TST_I 0x03100000
109
110#define ARM_INST_UDIV 0x0730f010
111
112#define ARM_INST_UMULL 0x00800090
113
114/* register */
115#define _AL3_R(op, rd, rn, rm) ((op ## _R) | (rd) << 12 | (rn) << 16 | (rm))
116/* immediate */
117#define _AL3_I(op, rd, rn, imm) ((op ## _I) | (rd) << 12 | (rn) << 16 | (imm))
118
119#define ARM_ADD_R(rd, rn, rm) _AL3_R(ARM_INST_ADD, rd, rn, rm)
120#define ARM_ADD_I(rd, rn, imm) _AL3_I(ARM_INST_ADD, rd, rn, imm)
121
122#define ARM_AND_R(rd, rn, rm) _AL3_R(ARM_INST_AND, rd, rn, rm)
123#define ARM_AND_I(rd, rn, imm) _AL3_I(ARM_INST_AND, rd, rn, imm)
124
125#define ARM_BIC_R(rd, rn, rm) _AL3_R(ARM_INST_BIC, rd, rn, rm)
126#define ARM_BIC_I(rd, rn, imm) _AL3_I(ARM_INST_BIC, rd, rn, imm)
127
128#define ARM_B(imm24) (ARM_INST_B | ((imm24) & 0xffffff))
129#define ARM_BX(rm) (ARM_INST_BX | (rm))
130#define ARM_BLX_R(rm) (ARM_INST_BLX_R | (rm))
131
132#define ARM_CMP_R(rn, rm) _AL3_R(ARM_INST_CMP, 0, rn, rm)
133#define ARM_CMP_I(rn, imm) _AL3_I(ARM_INST_CMP, 0, rn, imm)
134
135#define ARM_LDR_I(rt, rn, off) (ARM_INST_LDR_I | (rt) << 12 | (rn) << 16 \
136 | (off))
137#define ARM_LDRB_I(rt, rn, off) (ARM_INST_LDRB_I | (rt) << 12 | (rn) << 16 \
138 | (off))
139#define ARM_LDRB_R(rt, rn, rm) (ARM_INST_LDRB_R | (rt) << 12 | (rn) << 16 \
140 | (rm))
141#define ARM_LDRH_I(rt, rn, off) (ARM_INST_LDRH_I | (rt) << 12 | (rn) << 16 \
142 | (((off) & 0xf0) << 4) | ((off) & 0xf))
143
144#define ARM_LDM(rn, regs) (ARM_INST_LDM | (rn) << 16 | (regs))
145
146#define ARM_LSL_R(rd, rn, rm) (_AL3_R(ARM_INST_LSL, rd, 0, rn) | (rm) << 8)
147#define ARM_LSL_I(rd, rn, imm) (_AL3_I(ARM_INST_LSL, rd, 0, rn) | (imm) << 7)
148
149#define ARM_LSR_R(rd, rn, rm) (_AL3_R(ARM_INST_LSR, rd, 0, rn) | (rm) << 8)
150#define ARM_LSR_I(rd, rn, imm) (_AL3_I(ARM_INST_LSR, rd, 0, rn) | (imm) << 7)
151
152#define ARM_MOV_R(rd, rm) _AL3_R(ARM_INST_MOV, rd, 0, rm)
153#define ARM_MOV_I(rd, imm) _AL3_I(ARM_INST_MOV, rd, 0, imm)
154
155#define ARM_MOVW(rd, imm) \
156 (ARM_INST_MOVW | ((imm) >> 12) << 16 | (rd) << 12 | ((imm) & 0x0fff))
157
158#define ARM_MOVT(rd, imm) \
159 (ARM_INST_MOVT | ((imm) >> 12) << 16 | (rd) << 12 | ((imm) & 0x0fff))
160
161#define ARM_MUL(rd, rm, rn) (ARM_INST_MUL | (rd) << 16 | (rm) << 8 | (rn))
162
163#define ARM_POP(regs) (ARM_INST_POP | (regs))
164#define ARM_PUSH(regs) (ARM_INST_PUSH | (regs))
165
166#define ARM_ORR_R(rd, rn, rm) _AL3_R(ARM_INST_ORR, rd, rn, rm)
167#define ARM_ORR_I(rd, rn, imm) _AL3_I(ARM_INST_ORR, rd, rn, imm)
168#define ARM_ORR_S(rd, rn, rm, type, rs) \
169 (ARM_ORR_R(rd, rn, rm) | (type) << 5 | (rs) << 7)
170
171#define ARM_REV(rd, rm) (ARM_INST_REV | (rd) << 12 | (rm))
172#define ARM_REV16(rd, rm) (ARM_INST_REV16 | (rd) << 12 | (rm))
173
174#define ARM_RSB_I(rd, rn, imm) _AL3_I(ARM_INST_RSB, rd, rn, imm)
175
176#define ARM_SUB_R(rd, rn, rm) _AL3_R(ARM_INST_SUB, rd, rn, rm)
177#define ARM_SUB_I(rd, rn, imm) _AL3_I(ARM_INST_SUB, rd, rn, imm)
178
179#define ARM_STR_I(rt, rn, off) (ARM_INST_STR_I | (rt) << 12 | (rn) << 16 \
180 | (off))
181
182#define ARM_TST_R(rn, rm) _AL3_R(ARM_INST_TST, 0, rn, rm)
183#define ARM_TST_I(rn, imm) _AL3_I(ARM_INST_TST, 0, rn, imm)
184
185#define ARM_UDIV(rd, rn, rm) (ARM_INST_UDIV | (rd) << 16 | (rn) | (rm) << 8)
186
187#define ARM_UMULL(rd_lo, rd_hi, rn, rm) (ARM_INST_UMULL | (rd_hi) << 16 \
188 | (rd_lo) << 12 | (rm) << 8 | rn)
189
190#endif /* PFILTER_OPCODES_ARM_H */
diff --git a/arch/arm/plat-mxc/include/mach/hardware.h b/arch/arm/plat-mxc/include/mach/hardware.h
index a599f01f8b92..0630513554de 100644
--- a/arch/arm/plat-mxc/include/mach/hardware.h
+++ b/arch/arm/plat-mxc/include/mach/hardware.h
@@ -22,11 +22,8 @@
22 22
23#include <asm/sizes.h> 23#include <asm/sizes.h>
24 24
25#ifdef __ASSEMBLER__ 25#define addr_in_module(addr, mod) \
26#define IOMEM(addr) (addr) 26 ((unsigned long)(addr) - mod ## _BASE_ADDR < mod ## _SIZE)
27#else
28#define IOMEM(addr) ((void __force __iomem *)(addr))
29#endif
30 27
31#define IMX_IO_P2V_MODULE(addr, module) \ 28#define IMX_IO_P2V_MODULE(addr, module) \
32 (((addr) - module ## _BASE_ADDR) < module ## _SIZE ? \ 29 (((addr) - module ## _BASE_ADDR) < module ## _SIZE ? \
diff --git a/arch/arm/plat-mxc/include/mach/io.h b/arch/arm/plat-mxc/include/mach/io.h
deleted file mode 100644
index 338300b18b00..000000000000
--- a/arch/arm/plat-mxc/include/mach/io.h
+++ /dev/null
@@ -1,39 +0,0 @@
1/*
2 * Copyright 2004-2007 Freescale Semiconductor, Inc. All Rights Reserved.
3 */
4
5/*
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11#ifndef __ASM_ARCH_MXC_IO_H__
12#define __ASM_ARCH_MXC_IO_H__
13
14/* Allow IO space to be anywhere in the memory */
15#define IO_SPACE_LIMIT 0xffffffff
16
17#define __arch_ioremap __imx_ioremap
18#define __arch_iounmap __iounmap
19
20#define addr_in_module(addr, mod) \
21 ((unsigned long)(addr) - mod ## _BASE_ADDR < mod ## _SIZE)
22
23extern void __iomem *(*imx_ioremap)(unsigned long, size_t, unsigned int);
24
25static inline void __iomem *
26__imx_ioremap(unsigned long phys_addr, size_t size, unsigned int mtype)
27{
28 if (imx_ioremap != NULL)
29 return imx_ioremap(phys_addr, size, mtype);
30 else
31 return __arm_ioremap(phys_addr, size, mtype);
32}
33
34/* io address mapping macro */
35#define __io(a) __typesafe_io(a)
36
37#define __mem_pci(a) (a)
38
39#endif
diff --git a/arch/arm/plat-nomadik/Kconfig b/arch/arm/plat-nomadik/Kconfig
index bca4914b4b9d..4c48c8b60b54 100644
--- a/arch/arm/plat-nomadik/Kconfig
+++ b/arch/arm/plat-nomadik/Kconfig
@@ -23,7 +23,6 @@ config HAS_MTU
23config NOMADIK_MTU_SCHED_CLOCK 23config NOMADIK_MTU_SCHED_CLOCK
24 bool 24 bool
25 depends on HAS_MTU 25 depends on HAS_MTU
26 select HAVE_SCHED_CLOCK
27 help 26 help
28 Use the Multi Timer Unit as the sched_clock. 27 Use the Multi Timer Unit as the sched_clock.
29 28
diff --git a/arch/arm/plat-nomadik/include/plat/ste_dma40.h b/arch/arm/plat-nomadik/include/plat/ste_dma40.h
index fd0ee84c45d1..9ff93b065686 100644
--- a/arch/arm/plat-nomadik/include/plat/ste_dma40.h
+++ b/arch/arm/plat-nomadik/include/plat/ste_dma40.h
@@ -200,8 +200,7 @@ dma_async_tx_descriptor *stedma40_slave_mem(struct dma_chan *chan,
200 sg.dma_address = addr; 200 sg.dma_address = addr;
201 sg.length = size; 201 sg.length = size;
202 202
203 return chan->device->device_prep_slave_sg(chan, &sg, 1, 203 return dmaengine_prep_slave_sg(chan, &sg, 1, direction, flags);
204 direction, flags);
205} 204}
206 205
207#else 206#else
diff --git a/arch/arm/plat-omap/include/plat/gpio.h b/arch/arm/plat-omap/include/plat/gpio.h
index b8a96c6a1a30..2f6e9924a814 100644
--- a/arch/arm/plat-omap/include/plat/gpio.h
+++ b/arch/arm/plat-omap/include/plat/gpio.h
@@ -158,10 +158,6 @@
158#define OMAP_MPUIO(nr) (OMAP_MAX_GPIO_LINES + (nr)) 158#define OMAP_MPUIO(nr) (OMAP_MAX_GPIO_LINES + (nr))
159#define OMAP_GPIO_IS_MPUIO(nr) ((nr) >= OMAP_MAX_GPIO_LINES) 159#define OMAP_GPIO_IS_MPUIO(nr) ((nr) >= OMAP_MAX_GPIO_LINES)
160 160
161#define OMAP_GPIO_IRQ(nr) (OMAP_GPIO_IS_MPUIO(nr) ? \
162 IH_MPUIO_BASE + ((nr) & 0x0f) : \
163 IH_GPIO_BASE + (nr))
164
165struct omap_gpio_dev_attr { 161struct omap_gpio_dev_attr {
166 int bank_width; /* GPIO bank width */ 162 int bank_width; /* GPIO bank width */
167 bool dbck_flag; /* dbck required or not - True for OMAP3&4 */ 163 bool dbck_flag; /* dbck required or not - True for OMAP3&4 */
diff --git a/arch/arm/plat-omap/include/plat/hardware.h b/arch/arm/plat-omap/include/plat/hardware.h
index 537b05ae1f51..e897978371c2 100644
--- a/arch/arm/plat-omap/include/plat/hardware.h
+++ b/arch/arm/plat-omap/include/plat/hardware.h
@@ -43,12 +43,6 @@
43#endif 43#endif
44#include <plat/serial.h> 44#include <plat/serial.h>
45 45
46#ifdef __ASSEMBLER__
47#define IOMEM(x) (x)
48#else
49#define IOMEM(x) ((void __force __iomem *)(x))
50#endif
51
52/* 46/*
53 * --------------------------------------------------------------------------- 47 * ---------------------------------------------------------------------------
54 * Common definitions for all OMAP processors 48 * Common definitions for all OMAP processors
diff --git a/arch/arm/plat-omap/include/plat/sdrc.h b/arch/arm/plat-omap/include/plat/sdrc.h
index 925b12b500dc..9bb978ecd884 100644
--- a/arch/arm/plat-omap/include/plat/sdrc.h
+++ b/arch/arm/plat-omap/include/plat/sdrc.h
@@ -16,7 +16,6 @@
16 * published by the Free Software Foundation. 16 * published by the Free Software Foundation.
17 */ 17 */
18 18
19#include <mach/io.h>
20 19
21/* SDRC register offsets - read/write with sdrc_{read,write}_reg() */ 20/* SDRC register offsets - read/write with sdrc_{read,write}_reg() */
22 21
diff --git a/arch/arm/plat-omap/include/plat/usb.h b/arch/arm/plat-omap/include/plat/usb.h
index d0fc9f4dc155..762eeb0626c1 100644
--- a/arch/arm/plat-omap/include/plat/usb.h
+++ b/arch/arm/plat-omap/include/plat/usb.h
@@ -112,7 +112,6 @@ extern int omap4430_phy_suspend(struct device *dev, int suspend);
112 */ 112 */
113 113
114#define OMAP2_L4_IO_OFFSET 0xb2000000 114#define OMAP2_L4_IO_OFFSET 0xb2000000
115#define IOMEM(x) ((void __force __iomem *)(x))
116#define OMAP2_L4_IO_ADDRESS(pa) IOMEM((pa) + OMAP2_L4_IO_OFFSET) 115#define OMAP2_L4_IO_ADDRESS(pa) IOMEM((pa) + OMAP2_L4_IO_OFFSET)
117 116
118static inline u8 omap_readb(u32 pa) 117static inline u8 omap_readb(u32 pa)
diff --git a/arch/arm/plat-s3c24xx/cpu.c b/arch/arm/plat-s3c24xx/cpu.c
index 0db73ae646bc..290942d9adda 100644
--- a/arch/arm/plat-s3c24xx/cpu.c
+++ b/arch/arm/plat-s3c24xx/cpu.c
@@ -36,6 +36,7 @@
36#include <asm/irq.h> 36#include <asm/irq.h>
37#include <asm/cacheflush.h> 37#include <asm/cacheflush.h>
38#include <asm/system_info.h> 38#include <asm/system_info.h>
39#include <asm/system_misc.h>
39 40
40#include <asm/mach/arch.h> 41#include <asm/mach/arch.h>
41#include <asm/mach/map.h> 42#include <asm/mach/map.h>
diff --git a/arch/arm/plat-samsung/dma-ops.c b/arch/arm/plat-samsung/dma-ops.c
index 301d9c319d0b..eb9f4f534006 100644
--- a/arch/arm/plat-samsung/dma-ops.c
+++ b/arch/arm/plat-samsung/dma-ops.c
@@ -79,11 +79,11 @@ static int samsung_dmadev_prepare(unsigned ch,
79 info->len, offset_in_page(info->buf)); 79 info->len, offset_in_page(info->buf));
80 sg_dma_address(&sg) = info->buf; 80 sg_dma_address(&sg) = info->buf;
81 81
82 desc = chan->device->device_prep_slave_sg(chan, 82 desc = dmaengine_prep_slave_sg(chan,
83 &sg, 1, info->direction, DMA_PREP_INTERRUPT); 83 &sg, 1, info->direction, DMA_PREP_INTERRUPT);
84 break; 84 break;
85 case DMA_CYCLIC: 85 case DMA_CYCLIC:
86 desc = chan->device->device_prep_dma_cyclic(chan, 86 desc = dmaengine_prep_dma_cyclic(chan,
87 info->buf, info->len, info->period, info->direction); 87 info->buf, info->len, info->period, info->direction);
88 break; 88 break;
89 default: 89 default:
diff --git a/arch/arm/plat-spear/include/plat/hardware.h b/arch/arm/plat-spear/include/plat/hardware.h
index 66d677225d15..70187d763e26 100644
--- a/arch/arm/plat-spear/include/plat/hardware.h
+++ b/arch/arm/plat-spear/include/plat/hardware.h
@@ -14,10 +14,4 @@
14#ifndef __PLAT_HARDWARE_H 14#ifndef __PLAT_HARDWARE_H
15#define __PLAT_HARDWARE_H 15#define __PLAT_HARDWARE_H
16 16
17#ifndef __ASSEMBLY__
18#define IOMEM(x) ((void __iomem __force *)(x))
19#else
20#define IOMEM(x) (x)
21#endif
22
23#endif /* __PLAT_HARDWARE_H */ 17#endif /* __PLAT_HARDWARE_H */
diff --git a/arch/arm/plat-spear/include/plat/io.h b/arch/arm/plat-spear/include/plat/io.h
deleted file mode 100644
index 4d4ba822b3eb..000000000000
--- a/arch/arm/plat-spear/include/plat/io.h
+++ /dev/null
@@ -1,22 +0,0 @@
1/*
2 * arch/arm/plat-spear/include/plat/io.h
3 *
4 * IO definitions for SPEAr platform
5 *
6 * Copyright (C) 2009 ST Microelectronics
7 * Viresh Kumar<viresh.kumar@st.com>
8 *
9 * This file is licensed under the terms of the GNU General Public
10 * License version 2. This program is licensed "as is" without any
11 * warranty of any kind, whether express or implied.
12 */
13
14#ifndef __PLAT_IO_H
15#define __PLAT_IO_H
16
17#define IO_SPACE_LIMIT 0xFFFFFFFF
18
19#define __io(a) __typesafe_io(a)
20#define __mem_pci(a) (a)
21
22#endif /* __PLAT_IO_H */
diff --git a/arch/arm/plat-spear/include/plat/keyboard.h b/arch/arm/plat-spear/include/plat/keyboard.h
index c16cc31ecbed..0562f134621d 100644
--- a/arch/arm/plat-spear/include/plat/keyboard.h
+++ b/arch/arm/plat-spear/include/plat/keyboard.h
@@ -159,11 +159,4 @@ struct kbd_platform_data {
159 unsigned int mode; 159 unsigned int mode;
160}; 160};
161 161
162/* This function is used to set platform data field of pdev->dev */
163static inline void
164kbd_set_plat_data(struct platform_device *pdev, struct kbd_platform_data *data)
165{
166 pdev->dev.platform_data = data;
167}
168
169#endif /* __PLAT_KEYBOARD_H */ 162#endif /* __PLAT_KEYBOARD_H */
diff --git a/arch/arm/plat-versatile/Kconfig b/arch/arm/plat-versatile/Kconfig
index 52353beb369d..043f7b02a9e7 100644
--- a/arch/arm/plat-versatile/Kconfig
+++ b/arch/arm/plat-versatile/Kconfig
@@ -11,7 +11,6 @@ config PLAT_VERSATILE_LEDS
11 depends on ARCH_REALVIEW || ARCH_VERSATILE 11 depends on ARCH_REALVIEW || ARCH_VERSATILE
12 12
13config PLAT_VERSATILE_SCHED_CLOCK 13config PLAT_VERSATILE_SCHED_CLOCK
14 def_bool y if !ARCH_INTEGRATOR_AP 14 def_bool y
15 select HAVE_SCHED_CLOCK
16 15
17endif 16endif
diff --git a/arch/avr32/boot/images/Makefile b/arch/avr32/boot/images/Makefile
index 1848bf0d7f62..2a3b53978a3b 100644
--- a/arch/avr32/boot/images/Makefile
+++ b/arch/avr32/boot/images/Makefile
@@ -6,8 +6,6 @@
6# for more details. 6# for more details.
7# 7#
8 8
9MKIMAGE := $(srctree)/scripts/mkuboot.sh
10
11extra-y := vmlinux.bin vmlinux.gz 9extra-y := vmlinux.bin vmlinux.gz
12 10
13OBJCOPYFLAGS_vmlinux.bin := -O binary -R .note.gnu.build-id 11OBJCOPYFLAGS_vmlinux.bin := -O binary -R .note.gnu.build-id
@@ -17,10 +15,9 @@ $(obj)/vmlinux.bin: vmlinux FORCE
17$(obj)/vmlinux.gz: $(obj)/vmlinux.bin FORCE 15$(obj)/vmlinux.gz: $(obj)/vmlinux.bin FORCE
18 $(call if_changed,gzip) 16 $(call if_changed,gzip)
19 17
20quiet_cmd_uimage = UIMAGE $@ 18UIMAGE_LOADADDR = $(CONFIG_LOAD_ADDRESS)
21 cmd_uimage = $(CONFIG_SHELL) $(MKIMAGE) -A avr32 -O linux -T kernel \ 19UIMAGE_ENTRYADDR = $(CONFIG_ENTRY_ADDRESS)
22 -C gzip -a $(CONFIG_LOAD_ADDRESS) -e $(CONFIG_ENTRY_ADDRESS) \ 20UIMAGE_COMPRESSION = gzip
23 -n 'Linux-$(KERNELRELEASE)' -d $< $@
24 21
25targets += uImage uImage.srec 22targets += uImage uImage.srec
26$(obj)/uImage: $(obj)/vmlinux.gz 23$(obj)/uImage: $(obj)/vmlinux.gz
diff --git a/arch/avr32/include/asm/posix_types.h b/arch/avr32/include/asm/posix_types.h
index fe0c0c014389..74667bfc88cc 100644
--- a/arch/avr32/include/asm/posix_types.h
+++ b/arch/avr32/include/asm/posix_types.h
@@ -14,112 +14,27 @@
14 * assume GCC is being used. 14 * assume GCC is being used.
15 */ 15 */
16 16
17typedef unsigned long __kernel_ino_t;
18typedef unsigned short __kernel_mode_t; 17typedef unsigned short __kernel_mode_t;
18#define __kernel_mode_t __kernel_mode_t
19
19typedef unsigned short __kernel_nlink_t; 20typedef unsigned short __kernel_nlink_t;
20typedef long __kernel_off_t; 21#define __kernel_nlink_t __kernel_nlink_t
21typedef int __kernel_pid_t; 22
22typedef unsigned short __kernel_ipc_pid_t; 23typedef unsigned short __kernel_ipc_pid_t;
23typedef unsigned int __kernel_uid_t; 24#define __kernel_ipc_pid_t __kernel_ipc_pid_t
24typedef unsigned int __kernel_gid_t; 25
25typedef unsigned long __kernel_size_t; 26typedef unsigned long __kernel_size_t;
26typedef long __kernel_ssize_t; 27typedef long __kernel_ssize_t;
27typedef int __kernel_ptrdiff_t; 28typedef int __kernel_ptrdiff_t;
28typedef long __kernel_time_t; 29#define __kernel_size_t __kernel_size_t
29typedef long __kernel_suseconds_t;
30typedef long __kernel_clock_t;
31typedef int __kernel_timer_t;
32typedef int __kernel_clockid_t;
33typedef int __kernel_daddr_t;
34typedef char * __kernel_caddr_t;
35typedef unsigned short __kernel_uid16_t;
36typedef unsigned short __kernel_gid16_t;
37typedef unsigned int __kernel_uid32_t;
38typedef unsigned int __kernel_gid32_t;
39 30
40typedef unsigned short __kernel_old_uid_t; 31typedef unsigned short __kernel_old_uid_t;
41typedef unsigned short __kernel_old_gid_t; 32typedef unsigned short __kernel_old_gid_t;
42typedef unsigned short __kernel_old_dev_t; 33#define __kernel_old_uid_t __kernel_old_uid_t
43
44#ifdef __GNUC__
45typedef long long __kernel_loff_t;
46#endif
47
48typedef struct {
49 int val[2];
50} __kernel_fsid_t;
51
52#if defined(__KERNEL__)
53
54#undef __FD_SET
55static __inline__ void __FD_SET(unsigned long __fd, __kernel_fd_set *__fdsetp)
56{
57 unsigned long __tmp = __fd / __NFDBITS;
58 unsigned long __rem = __fd % __NFDBITS;
59 __fdsetp->fds_bits[__tmp] |= (1UL<<__rem);
60}
61
62#undef __FD_CLR
63static __inline__ void __FD_CLR(unsigned long __fd, __kernel_fd_set *__fdsetp)
64{
65 unsigned long __tmp = __fd / __NFDBITS;
66 unsigned long __rem = __fd % __NFDBITS;
67 __fdsetp->fds_bits[__tmp] &= ~(1UL<<__rem);
68}
69 34
35typedef unsigned short __kernel_old_dev_t;
36#define __kernel_old_dev_t __kernel_old_dev_t
70 37
71#undef __FD_ISSET 38#include <asm-generic/posix_types.h>
72static __inline__ int __FD_ISSET(unsigned long __fd, const __kernel_fd_set *__p)
73{
74 unsigned long __tmp = __fd / __NFDBITS;
75 unsigned long __rem = __fd % __NFDBITS;
76 return (__p->fds_bits[__tmp] & (1UL<<__rem)) != 0;
77}
78
79/*
80 * This will unroll the loop for the normal constant case (8 ints,
81 * for a 256-bit fd_set)
82 */
83#undef __FD_ZERO
84static __inline__ void __FD_ZERO(__kernel_fd_set *__p)
85{
86 unsigned long *__tmp = __p->fds_bits;
87 int __i;
88
89 if (__builtin_constant_p(__FDSET_LONGS)) {
90 switch (__FDSET_LONGS) {
91 case 16:
92 __tmp[ 0] = 0; __tmp[ 1] = 0;
93 __tmp[ 2] = 0; __tmp[ 3] = 0;
94 __tmp[ 4] = 0; __tmp[ 5] = 0;
95 __tmp[ 6] = 0; __tmp[ 7] = 0;
96 __tmp[ 8] = 0; __tmp[ 9] = 0;
97 __tmp[10] = 0; __tmp[11] = 0;
98 __tmp[12] = 0; __tmp[13] = 0;
99 __tmp[14] = 0; __tmp[15] = 0;
100 return;
101
102 case 8:
103 __tmp[ 0] = 0; __tmp[ 1] = 0;
104 __tmp[ 2] = 0; __tmp[ 3] = 0;
105 __tmp[ 4] = 0; __tmp[ 5] = 0;
106 __tmp[ 6] = 0; __tmp[ 7] = 0;
107 return;
108
109 case 4:
110 __tmp[ 0] = 0; __tmp[ 1] = 0;
111 __tmp[ 2] = 0; __tmp[ 3] = 0;
112 return;
113 }
114 }
115 __i = __FDSET_LONGS;
116 while (__i) {
117 __i--;
118 *__tmp = 0;
119 __tmp++;
120 }
121}
122
123#endif /* defined(__KERNEL__) */
124 39
125#endif /* __ASM_AVR32_POSIX_TYPES_H */ 40#endif /* __ASM_AVR32_POSIX_TYPES_H */
diff --git a/arch/avr32/mach-at32ap/at32ap700x.c b/arch/avr32/mach-at32ap/at32ap700x.c
index 889c544688ca..0445c4fd67e3 100644
--- a/arch/avr32/mach-at32ap/at32ap700x.c
+++ b/arch/avr32/mach-at32ap/at32ap700x.c
@@ -1351,7 +1351,6 @@ at32_add_device_mci(unsigned int id, struct mci_platform_data *data)
1351 goto fail; 1351 goto fail;
1352 1352
1353 slave->sdata.dma_dev = &dw_dmac0_device.dev; 1353 slave->sdata.dma_dev = &dw_dmac0_device.dev;
1354 slave->sdata.reg_width = DW_DMA_SLAVE_WIDTH_32BIT;
1355 slave->sdata.cfg_hi = (DWC_CFGH_SRC_PER(0) 1354 slave->sdata.cfg_hi = (DWC_CFGH_SRC_PER(0)
1356 | DWC_CFGH_DST_PER(1)); 1355 | DWC_CFGH_DST_PER(1));
1357 slave->sdata.cfg_lo &= ~(DWC_CFGL_HS_DST_POL 1356 slave->sdata.cfg_lo &= ~(DWC_CFGL_HS_DST_POL
@@ -2046,27 +2045,19 @@ at32_add_device_ac97c(unsigned int id, struct ac97c_platform_data *data,
2046 /* Check if DMA slave interface for capture should be configured. */ 2045 /* Check if DMA slave interface for capture should be configured. */
2047 if (flags & AC97C_CAPTURE) { 2046 if (flags & AC97C_CAPTURE) {
2048 rx_dws->dma_dev = &dw_dmac0_device.dev; 2047 rx_dws->dma_dev = &dw_dmac0_device.dev;
2049 rx_dws->reg_width = DW_DMA_SLAVE_WIDTH_16BIT;
2050 rx_dws->cfg_hi = DWC_CFGH_SRC_PER(3); 2048 rx_dws->cfg_hi = DWC_CFGH_SRC_PER(3);
2051 rx_dws->cfg_lo &= ~(DWC_CFGL_HS_DST_POL | DWC_CFGL_HS_SRC_POL); 2049 rx_dws->cfg_lo &= ~(DWC_CFGL_HS_DST_POL | DWC_CFGL_HS_SRC_POL);
2052 rx_dws->src_master = 0; 2050 rx_dws->src_master = 0;
2053 rx_dws->dst_master = 1; 2051 rx_dws->dst_master = 1;
2054 rx_dws->src_msize = DW_DMA_MSIZE_1;
2055 rx_dws->dst_msize = DW_DMA_MSIZE_1;
2056 rx_dws->fc = DW_DMA_FC_D_P2M;
2057 } 2052 }
2058 2053
2059 /* Check if DMA slave interface for playback should be configured. */ 2054 /* Check if DMA slave interface for playback should be configured. */
2060 if (flags & AC97C_PLAYBACK) { 2055 if (flags & AC97C_PLAYBACK) {
2061 tx_dws->dma_dev = &dw_dmac0_device.dev; 2056 tx_dws->dma_dev = &dw_dmac0_device.dev;
2062 tx_dws->reg_width = DW_DMA_SLAVE_WIDTH_16BIT;
2063 tx_dws->cfg_hi = DWC_CFGH_DST_PER(4); 2057 tx_dws->cfg_hi = DWC_CFGH_DST_PER(4);
2064 tx_dws->cfg_lo &= ~(DWC_CFGL_HS_DST_POL | DWC_CFGL_HS_SRC_POL); 2058 tx_dws->cfg_lo &= ~(DWC_CFGL_HS_DST_POL | DWC_CFGL_HS_SRC_POL);
2065 tx_dws->src_master = 0; 2059 tx_dws->src_master = 0;
2066 tx_dws->dst_master = 1; 2060 tx_dws->dst_master = 1;
2067 tx_dws->src_msize = DW_DMA_MSIZE_1;
2068 tx_dws->dst_msize = DW_DMA_MSIZE_1;
2069 tx_dws->fc = DW_DMA_FC_D_M2P;
2070 } 2061 }
2071 2062
2072 if (platform_device_add_data(pdev, data, 2063 if (platform_device_add_data(pdev, data,
@@ -2136,14 +2127,10 @@ at32_add_device_abdac(unsigned int id, struct atmel_abdac_pdata *data)
2136 dws = &data->dws; 2127 dws = &data->dws;
2137 2128
2138 dws->dma_dev = &dw_dmac0_device.dev; 2129 dws->dma_dev = &dw_dmac0_device.dev;
2139 dws->reg_width = DW_DMA_SLAVE_WIDTH_32BIT;
2140 dws->cfg_hi = DWC_CFGH_DST_PER(2); 2130 dws->cfg_hi = DWC_CFGH_DST_PER(2);
2141 dws->cfg_lo &= ~(DWC_CFGL_HS_DST_POL | DWC_CFGL_HS_SRC_POL); 2131 dws->cfg_lo &= ~(DWC_CFGL_HS_DST_POL | DWC_CFGL_HS_SRC_POL);
2142 dws->src_master = 0; 2132 dws->src_master = 0;
2143 dws->dst_master = 1; 2133 dws->dst_master = 1;
2144 dws->src_msize = DW_DMA_MSIZE_1;
2145 dws->dst_msize = DW_DMA_MSIZE_1;
2146 dws->fc = DW_DMA_FC_D_M2P;
2147 2134
2148 if (platform_device_add_data(pdev, data, 2135 if (platform_device_add_data(pdev, data,
2149 sizeof(struct atmel_abdac_pdata))) 2136 sizeof(struct atmel_abdac_pdata)))
diff --git a/arch/avr32/mach-at32ap/include/mach/atmel-mci.h b/arch/avr32/mach-at32ap/include/mach/atmel-mci.h
index a9b38967f703..4bba58561d5c 100644
--- a/arch/avr32/mach-at32ap/include/mach/atmel-mci.h
+++ b/arch/avr32/mach-at32ap/include/mach/atmel-mci.h
@@ -14,11 +14,4 @@ struct mci_dma_data {
14#define slave_data_ptr(s) (&(s)->sdata) 14#define slave_data_ptr(s) (&(s)->sdata)
15#define find_slave_dev(s) ((s)->sdata.dma_dev) 15#define find_slave_dev(s) ((s)->sdata.dma_dev)
16 16
17#define setup_dma_addr(s, t, r) do { \
18 if (s) { \
19 (s)->sdata.tx_reg = (t); \
20 (s)->sdata.rx_reg = (r); \
21 } \
22} while (0)
23
24#endif /* __MACH_ATMEL_MCI_H */ 17#endif /* __MACH_ATMEL_MCI_H */
diff --git a/arch/blackfin/boot/Makefile b/arch/blackfin/boot/Makefile
index 0a49279e3428..f7d27d50d02c 100644
--- a/arch/blackfin/boot/Makefile
+++ b/arch/blackfin/boot/Makefile
@@ -6,20 +6,17 @@
6# for more details. 6# for more details.
7# 7#
8 8
9MKIMAGE := $(srctree)/scripts/mkuboot.sh
10
11targets := vmImage vmImage.bin vmImage.bz2 vmImage.gz vmImage.lzma vmImage.lzo vmImage.xip 9targets := vmImage vmImage.bin vmImage.bz2 vmImage.gz vmImage.lzma vmImage.lzo vmImage.xip
12extra-y += vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2 vmlinux.bin.lzma vmlinux.bin.lzo vmlinux.bin.xip 10extra-y += vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2 vmlinux.bin.lzma vmlinux.bin.lzo vmlinux.bin.xip
13 11
14UIMAGE_OPTS-y := 12ifeq ($(CONFIG_RAMKERNEL),y)
15UIMAGE_OPTS-$(CONFIG_RAMKERNEL) += -a $(CONFIG_BOOT_LOAD) 13UIMAGE_LOADADDR = $(CONFIG_BOOT_LOAD)
16UIMAGE_OPTS-$(CONFIG_ROMKERNEL) += -a $(CONFIG_ROM_BASE) -x 14else # CONFIG_ROMKERNEL must be set
17 15UIMAGE_LOADADDR = $(CONFIG_ROM_BASE)
18quiet_cmd_uimage = UIMAGE $@ 16endif
19 cmd_uimage = $(CONFIG_SHELL) $(MKIMAGE) -A $(ARCH) -O linux -T kernel \ 17UIMAGE_ENTRYADDR = $(shell $(NM) vmlinux | awk '$$NF == "__start" {print $$1}')
20 -C $(2) -n '$(CPU_REV)-$(KERNELRELEASE)' \ 18UIMAGE_NAME = '$(CPU_REV)-$(KERNELRELEASE)'
21 -e $(shell $(NM) vmlinux | awk '$$NF == "__start" {print $$1}') \ 19UIMAGE_OPTS-$(CONFIG_ROMKERNEL) += -x
22 $(UIMAGE_OPTS-y) -d $< $@
23 20
24$(obj)/vmlinux.bin: vmlinux FORCE 21$(obj)/vmlinux.bin: vmlinux FORCE
25 $(call if_changed,objcopy) 22 $(call if_changed,objcopy)
diff --git a/arch/blackfin/kernel/setup.c b/arch/blackfin/kernel/setup.c
index 2aa019368504..2ad747e909fb 100644
--- a/arch/blackfin/kernel/setup.c
+++ b/arch/blackfin/kernel/setup.c
@@ -550,6 +550,7 @@ static __init void memory_setup(void)
550{ 550{
551#ifdef CONFIG_MTD_UCLINUX 551#ifdef CONFIG_MTD_UCLINUX
552 unsigned long mtd_phys = 0; 552 unsigned long mtd_phys = 0;
553 unsigned long n;
553#endif 554#endif
554 unsigned long max_mem; 555 unsigned long max_mem;
555 556
@@ -593,9 +594,9 @@ static __init void memory_setup(void)
593 mtd_size = PAGE_ALIGN(*((unsigned long *)(mtd_phys + 8))); 594 mtd_size = PAGE_ALIGN(*((unsigned long *)(mtd_phys + 8)));
594 595
595# if defined(CONFIG_EXT2_FS) || defined(CONFIG_EXT3_FS) 596# if defined(CONFIG_EXT2_FS) || defined(CONFIG_EXT3_FS)
596 if (*((unsigned short *)(mtd_phys + 0x438)) == EXT2_SUPER_MAGIC) 597 n = ext2_image_size((void *)(mtd_phys + 0x400));
597 mtd_size = 598 if (n)
598 PAGE_ALIGN(*((unsigned long *)(mtd_phys + 0x404)) << 10); 599 mtd_size = PAGE_ALIGN(n * 1024);
599# endif 600# endif
600 601
601# if defined(CONFIG_CRAMFS) 602# if defined(CONFIG_CRAMFS)
diff --git a/arch/c6x/Kconfig b/arch/c6x/Kconfig
index 3c64b2894c13..1c3ccd416d50 100644
--- a/arch/c6x/Kconfig
+++ b/arch/c6x/Kconfig
@@ -11,7 +11,7 @@ config TMS320C6X
11 select HAVE_DMA_API_DEBUG 11 select HAVE_DMA_API_DEBUG
12 select HAVE_GENERIC_HARDIRQS 12 select HAVE_GENERIC_HARDIRQS
13 select HAVE_MEMBLOCK 13 select HAVE_MEMBLOCK
14 select HAVE_SPARSE_IRQ 14 select SPARSE_IRQ
15 select IRQ_DOMAIN 15 select IRQ_DOMAIN
16 select OF 16 select OF
17 select OF_EARLY_FLATTREE 17 select OF_EARLY_FLATTREE
diff --git a/arch/cris/include/asm/posix_types.h b/arch/cris/include/asm/posix_types.h
index ce3fb25a460b..72b3cd6eda0b 100644
--- a/arch/cris/include/asm/posix_types.h
+++ b/arch/cris/include/asm/posix_types.h
@@ -12,55 +12,25 @@
12 * assume GCC is being used. 12 * assume GCC is being used.
13 */ 13 */
14 14
15typedef unsigned long __kernel_ino_t;
16typedef unsigned short __kernel_mode_t; 15typedef unsigned short __kernel_mode_t;
16#define __kernel_mode_t __kernel_mode_t
17
17typedef unsigned short __kernel_nlink_t; 18typedef unsigned short __kernel_nlink_t;
18typedef long __kernel_off_t; 19#define __kernel_nlink_t __kernel_nlink_t
19typedef int __kernel_pid_t; 20
20typedef unsigned short __kernel_ipc_pid_t; 21typedef unsigned short __kernel_ipc_pid_t;
22#define __kernel_ipc_pid_t __kernel_ipc_pid_t
23
21typedef unsigned short __kernel_uid_t; 24typedef unsigned short __kernel_uid_t;
22typedef unsigned short __kernel_gid_t; 25typedef unsigned short __kernel_gid_t;
26#define __kernel_uid_t __kernel_uid_t
27
23typedef __SIZE_TYPE__ __kernel_size_t; 28typedef __SIZE_TYPE__ __kernel_size_t;
24typedef long __kernel_ssize_t; 29typedef long __kernel_ssize_t;
25typedef int __kernel_ptrdiff_t; 30typedef int __kernel_ptrdiff_t;
26typedef long __kernel_time_t; 31#define __kernel_size_t __kernel_size_t
27typedef long __kernel_suseconds_t;
28typedef long __kernel_clock_t;
29typedef int __kernel_timer_t;
30typedef int __kernel_clockid_t;
31typedef int __kernel_daddr_t;
32typedef char * __kernel_caddr_t;
33typedef unsigned short __kernel_uid16_t;
34typedef unsigned short __kernel_gid16_t;
35typedef unsigned int __kernel_uid32_t;
36typedef unsigned int __kernel_gid32_t;
37 32
38typedef unsigned short __kernel_old_uid_t;
39typedef unsigned short __kernel_old_gid_t;
40typedef unsigned short __kernel_old_dev_t; 33typedef unsigned short __kernel_old_dev_t;
41 34#define __kernel_old_dev_t __kernel_old_dev_t
42#ifdef __GNUC__
43typedef long long __kernel_loff_t;
44#endif
45
46typedef struct {
47 int val[2];
48} __kernel_fsid_t;
49
50#ifdef __KERNEL__
51
52#undef __FD_SET
53#define __FD_SET(fd,fdsetp) set_bit(fd, (void *)(fdsetp))
54
55#undef __FD_CLR
56#define __FD_CLR(fd,fdsetp) clear_bit(fd, (void *)(fdsetp))
57
58#undef __FD_ISSET
59#define __FD_ISSET(fd,fdsetp) test_bit(fd, (void *)(fdsetp))
60
61#undef __FD_ZERO
62#define __FD_ZERO(fdsetp) memset((void *)(fdsetp), 0, __FDSET_LONGS << 2)
63
64#endif /* __KERNEL__ */
65 35
66#endif /* __ARCH_CRIS_POSIX_TYPES_H */ 36#endif /* __ARCH_CRIS_POSIX_TYPES_H */
diff --git a/arch/frv/include/asm/posix_types.h b/arch/frv/include/asm/posix_types.h
index a9f1f5be0632..3f34cb45fbb3 100644
--- a/arch/frv/include/asm/posix_types.h
+++ b/arch/frv/include/asm/posix_types.h
@@ -7,56 +7,23 @@
7 * assume GCC is being used. 7 * assume GCC is being used.
8 */ 8 */
9 9
10typedef unsigned long __kernel_ino_t;
11typedef unsigned short __kernel_mode_t; 10typedef unsigned short __kernel_mode_t;
11#define __kernel_mode_t __kernel_mode_t
12
12typedef unsigned short __kernel_nlink_t; 13typedef unsigned short __kernel_nlink_t;
13typedef long __kernel_off_t; 14#define __kernel_nlink_t __kernel_nlink_t
14typedef int __kernel_pid_t; 15
15typedef unsigned short __kernel_ipc_pid_t; 16typedef unsigned short __kernel_ipc_pid_t;
17#define __kernel_ipc_pid_t __kernel_ipc_pid_t
18
16typedef unsigned short __kernel_uid_t; 19typedef unsigned short __kernel_uid_t;
17typedef unsigned short __kernel_gid_t; 20typedef unsigned short __kernel_gid_t;
18typedef unsigned int __kernel_size_t; 21#define __kernel_uid_t __kernel_uid_t
19typedef int __kernel_ssize_t;
20typedef int __kernel_ptrdiff_t;
21typedef long __kernel_time_t;
22typedef long __kernel_suseconds_t;
23typedef long __kernel_clock_t;
24typedef int __kernel_timer_t;
25typedef int __kernel_clockid_t;
26typedef int __kernel_daddr_t;
27typedef char * __kernel_caddr_t;
28typedef unsigned short __kernel_uid16_t;
29typedef unsigned short __kernel_gid16_t;
30typedef unsigned int __kernel_uid32_t;
31typedef unsigned int __kernel_gid32_t;
32
33typedef unsigned short __kernel_old_uid_t;
34typedef unsigned short __kernel_old_gid_t;
35typedef unsigned short __kernel_old_dev_t;
36
37#ifdef __GNUC__
38typedef long long __kernel_loff_t;
39#endif
40 22
41typedef struct { 23typedef unsigned short __kernel_old_dev_t;
42 int val[2]; 24#define __kernel_old_dev_t __kernel_old_dev_t
43} __kernel_fsid_t;
44
45#if defined(__KERNEL__)
46
47#undef __FD_SET
48#define __FD_SET(d, set) ((set)->fds_bits[__FDELT(d)] |= __FDMASK(d))
49
50#undef __FD_CLR
51#define __FD_CLR(d, set) ((set)->fds_bits[__FDELT(d)] &= ~__FDMASK(d))
52
53#undef __FD_ISSET
54#define __FD_ISSET(d, set) (!!((set)->fds_bits[__FDELT(d)] & __FDMASK(d)))
55
56#undef __FD_ZERO
57#define __FD_ZERO(fdsetp) (memset (fdsetp, 0, sizeof(*(fd_set *)fdsetp)))
58 25
59#endif /* defined(__KERNEL__) */ 26#include <asm-generic/posix_types.h>
60 27
61#endif 28#endif
62 29
diff --git a/arch/h8300/include/asm/posix_types.h b/arch/h8300/include/asm/posix_types.h
index 6f833a16f694..bc4c34efb1ad 100644
--- a/arch/h8300/include/asm/posix_types.h
+++ b/arch/h8300/include/asm/posix_types.h
@@ -7,54 +7,23 @@
7 * assume GCC is being used. 7 * assume GCC is being used.
8 */ 8 */
9 9
10typedef unsigned long __kernel_ino_t;
11typedef unsigned short __kernel_mode_t; 10typedef unsigned short __kernel_mode_t;
11#define __kernel_mode_t __kernel_mode_t
12
12typedef unsigned short __kernel_nlink_t; 13typedef unsigned short __kernel_nlink_t;
13typedef long __kernel_off_t; 14#define __kernel_nlink_t __kernel_nlink_t
14typedef int __kernel_pid_t; 15
15typedef unsigned short __kernel_ipc_pid_t; 16typedef unsigned short __kernel_ipc_pid_t;
17#define __kernel_ipc_pid_t __kernel_ipc_pid_t
18
16typedef unsigned short __kernel_uid_t; 19typedef unsigned short __kernel_uid_t;
17typedef unsigned short __kernel_gid_t; 20typedef unsigned short __kernel_gid_t;
18typedef unsigned int __kernel_size_t; 21#define __kernel_uid_t __kernel_uid_t
19typedef int __kernel_ssize_t;
20typedef int __kernel_ptrdiff_t;
21typedef long __kernel_time_t;
22typedef long __kernel_suseconds_t;
23typedef long __kernel_clock_t;
24typedef int __kernel_timer_t;
25typedef int __kernel_clockid_t;
26typedef int __kernel_daddr_t;
27typedef char * __kernel_caddr_t;
28typedef unsigned short __kernel_uid16_t;
29typedef unsigned short __kernel_gid16_t;
30typedef unsigned int __kernel_uid32_t;
31typedef unsigned int __kernel_gid32_t;
32 22
33typedef unsigned short __kernel_old_uid_t; 23typedef unsigned short __kernel_old_uid_t;
34typedef unsigned short __kernel_old_gid_t; 24typedef unsigned short __kernel_old_gid_t;
25#define __kernel_old_uid_t __kernel_old_uid_t
35 26
36#ifdef __GNUC__ 27#include <asm-generic/posix_types.h>
37typedef long long __kernel_loff_t;
38#endif
39
40typedef struct {
41 int val[2];
42} __kernel_fsid_t;
43
44#if defined(__KERNEL__)
45
46#undef __FD_SET
47#define __FD_SET(d, set) ((set)->fds_bits[__FDELT(d)] |= __FDMASK(d))
48
49#undef __FD_CLR
50#define __FD_CLR(d, set) ((set)->fds_bits[__FDELT(d)] &= ~__FDMASK(d))
51
52#undef __FD_ISSET
53#define __FD_ISSET(d, set) (!!((set)->fds_bits[__FDELT(d)] & __FDMASK(d)))
54
55#undef __FD_ZERO
56#define __FD_ZERO(fdsetp) (memset (fdsetp, 0, sizeof(*(fd_set *)fdsetp)))
57
58#endif /* defined(__KERNEL__) */
59 28
60#endif 29#endif
diff --git a/arch/ia64/include/asm/cmpxchg.h b/arch/ia64/include/asm/cmpxchg.h
new file mode 100644
index 000000000000..4c96187e2049
--- /dev/null
+++ b/arch/ia64/include/asm/cmpxchg.h
@@ -0,0 +1 @@
#include <asm/intrinsics.h>
diff --git a/arch/ia64/include/asm/posix_types.h b/arch/ia64/include/asm/posix_types.h
index 17885567b731..7323ab9467eb 100644
--- a/arch/ia64/include/asm/posix_types.h
+++ b/arch/ia64/include/asm/posix_types.h
@@ -1,126 +1,11 @@
1#ifndef _ASM_IA64_POSIX_TYPES_H 1#ifndef _ASM_IA64_POSIX_TYPES_H
2#define _ASM_IA64_POSIX_TYPES_H 2#define _ASM_IA64_POSIX_TYPES_H
3 3
4/*
5 * This file is generally used by user-level software, so you need to
6 * be a little careful about namespace pollution etc. Also, we cannot
7 * assume GCC is being used.
8 *
9 * Based on <asm-alpha/posix_types.h>.
10 *
11 * Modified 1998-2000, 2003
12 * David Mosberger-Tang <davidm@hpl.hp.com>, Hewlett-Packard Co
13 */
14
15typedef unsigned long __kernel_ino_t;
16typedef unsigned int __kernel_mode_t;
17typedef unsigned int __kernel_nlink_t; 4typedef unsigned int __kernel_nlink_t;
18typedef long __kernel_off_t; 5#define __kernel_nlink_t __kernel_nlink_t
19typedef long long __kernel_loff_t;
20typedef int __kernel_pid_t;
21typedef int __kernel_ipc_pid_t;
22typedef unsigned int __kernel_uid_t;
23typedef unsigned int __kernel_gid_t;
24typedef unsigned long __kernel_size_t;
25typedef long __kernel_ssize_t;
26typedef long __kernel_ptrdiff_t;
27typedef long __kernel_time_t;
28typedef long __kernel_suseconds_t;
29typedef long __kernel_clock_t;
30typedef int __kernel_timer_t;
31typedef int __kernel_clockid_t;
32typedef int __kernel_daddr_t;
33typedef char * __kernel_caddr_t;
34typedef unsigned long __kernel_sigset_t; /* at least 32 bits */
35typedef unsigned short __kernel_uid16_t;
36typedef unsigned short __kernel_gid16_t;
37
38typedef struct {
39 int val[2];
40} __kernel_fsid_t;
41
42typedef __kernel_uid_t __kernel_old_uid_t;
43typedef __kernel_gid_t __kernel_old_gid_t;
44typedef __kernel_uid_t __kernel_uid32_t;
45typedef __kernel_gid_t __kernel_gid32_t;
46
47typedef unsigned int __kernel_old_dev_t;
48
49# ifdef __KERNEL__
50
51# ifndef __GNUC__
52
53#define __FD_SET(d, set) ((set)->fds_bits[__FDELT(d)] |= __FDMASK(d))
54#define __FD_CLR(d, set) ((set)->fds_bits[__FDELT(d)] &= ~__FDMASK(d))
55#define __FD_ISSET(d, set) (((set)->fds_bits[__FDELT(d)] & __FDMASK(d)) != 0)
56#define __FD_ZERO(set) \
57 ((void) memset ((void *) (set), 0, sizeof (__kernel_fd_set)))
58 6
59# else /* !__GNUC__ */ 7typedef unsigned long __kernel_sigset_t; /* at least 32 bits */
60
61/* With GNU C, use inline functions instead so args are evaluated only once: */
62
63#undef __FD_SET
64static __inline__ void __FD_SET(unsigned long fd, __kernel_fd_set *fdsetp)
65{
66 unsigned long _tmp = fd / __NFDBITS;
67 unsigned long _rem = fd % __NFDBITS;
68 fdsetp->fds_bits[_tmp] |= (1UL<<_rem);
69}
70
71#undef __FD_CLR
72static __inline__ void __FD_CLR(unsigned long fd, __kernel_fd_set *fdsetp)
73{
74 unsigned long _tmp = fd / __NFDBITS;
75 unsigned long _rem = fd % __NFDBITS;
76 fdsetp->fds_bits[_tmp] &= ~(1UL<<_rem);
77}
78
79#undef __FD_ISSET
80static __inline__ int __FD_ISSET(unsigned long fd, const __kernel_fd_set *p)
81{
82 unsigned long _tmp = fd / __NFDBITS;
83 unsigned long _rem = fd % __NFDBITS;
84 return (p->fds_bits[_tmp] & (1UL<<_rem)) != 0;
85}
86
87/*
88 * This will unroll the loop for the normal constant case (8 ints,
89 * for a 256-bit fd_set)
90 */
91#undef __FD_ZERO
92static __inline__ void __FD_ZERO(__kernel_fd_set *p)
93{
94 unsigned long *tmp = p->fds_bits;
95 int i;
96
97 if (__builtin_constant_p(__FDSET_LONGS)) {
98 switch (__FDSET_LONGS) {
99 case 16:
100 tmp[ 0] = 0; tmp[ 1] = 0; tmp[ 2] = 0; tmp[ 3] = 0;
101 tmp[ 4] = 0; tmp[ 5] = 0; tmp[ 6] = 0; tmp[ 7] = 0;
102 tmp[ 8] = 0; tmp[ 9] = 0; tmp[10] = 0; tmp[11] = 0;
103 tmp[12] = 0; tmp[13] = 0; tmp[14] = 0; tmp[15] = 0;
104 return;
105
106 case 8:
107 tmp[ 0] = 0; tmp[ 1] = 0; tmp[ 2] = 0; tmp[ 3] = 0;
108 tmp[ 4] = 0; tmp[ 5] = 0; tmp[ 6] = 0; tmp[ 7] = 0;
109 return;
110 8
111 case 4: 9#include <asm-generic/posix_types.h>
112 tmp[ 0] = 0; tmp[ 1] = 0; tmp[ 2] = 0; tmp[ 3] = 0;
113 return;
114 }
115 }
116 i = __FDSET_LONGS;
117 while (i) {
118 i--;
119 *tmp = 0;
120 tmp++;
121 }
122}
123 10
124# endif /* !__GNUC__ */
125# endif /* __KERNEL__ */
126#endif /* _ASM_IA64_POSIX_TYPES_H */ 11#endif /* _ASM_IA64_POSIX_TYPES_H */
diff --git a/arch/ia64/kernel/asm-offsets.c b/arch/ia64/kernel/asm-offsets.c
index af5650169043..a48bd9a9927b 100644
--- a/arch/ia64/kernel/asm-offsets.c
+++ b/arch/ia64/kernel/asm-offsets.c
@@ -269,8 +269,8 @@ void foo(void)
269 BLANK(); 269 BLANK();
270 270
271 /* used by fsys_gettimeofday in arch/ia64/kernel/fsys.S */ 271 /* used by fsys_gettimeofday in arch/ia64/kernel/fsys.S */
272 DEFINE(IA64_GTOD_LOCK_OFFSET, 272 DEFINE(IA64_GTOD_SEQ_OFFSET,
273 offsetof (struct fsyscall_gtod_data_t, lock)); 273 offsetof (struct fsyscall_gtod_data_t, seq));
274 DEFINE(IA64_GTOD_WALL_TIME_OFFSET, 274 DEFINE(IA64_GTOD_WALL_TIME_OFFSET,
275 offsetof (struct fsyscall_gtod_data_t, wall_time)); 275 offsetof (struct fsyscall_gtod_data_t, wall_time));
276 DEFINE(IA64_GTOD_MONO_TIME_OFFSET, 276 DEFINE(IA64_GTOD_MONO_TIME_OFFSET,
diff --git a/arch/ia64/kernel/fsys.S b/arch/ia64/kernel/fsys.S
index f15d8601827f..cc26edac0ec6 100644
--- a/arch/ia64/kernel/fsys.S
+++ b/arch/ia64/kernel/fsys.S
@@ -173,7 +173,7 @@ ENTRY(fsys_set_tid_address)
173 FSYS_RETURN 173 FSYS_RETURN
174END(fsys_set_tid_address) 174END(fsys_set_tid_address)
175 175
176#if IA64_GTOD_LOCK_OFFSET !=0 176#if IA64_GTOD_SEQ_OFFSET !=0
177#error fsys_gettimeofday incompatible with changes to struct fsyscall_gtod_data_t 177#error fsys_gettimeofday incompatible with changes to struct fsyscall_gtod_data_t
178#endif 178#endif
179#if IA64_ITC_JITTER_OFFSET !=0 179#if IA64_ITC_JITTER_OFFSET !=0
diff --git a/arch/ia64/kernel/fsyscall_gtod_data.h b/arch/ia64/kernel/fsyscall_gtod_data.h
index 57d2ee6c83e1..146b15b5fec3 100644
--- a/arch/ia64/kernel/fsyscall_gtod_data.h
+++ b/arch/ia64/kernel/fsyscall_gtod_data.h
@@ -6,7 +6,7 @@
6 */ 6 */
7 7
8struct fsyscall_gtod_data_t { 8struct fsyscall_gtod_data_t {
9 seqlock_t lock; 9 seqcount_t seq;
10 struct timespec wall_time; 10 struct timespec wall_time;
11 struct timespec monotonic_time; 11 struct timespec monotonic_time;
12 cycle_t clk_mask; 12 cycle_t clk_mask;
diff --git a/arch/ia64/kernel/process.c b/arch/ia64/kernel/process.c
index 9dc52b63fc87..ce74e143aea3 100644
--- a/arch/ia64/kernel/process.c
+++ b/arch/ia64/kernel/process.c
@@ -38,6 +38,7 @@
38#include <asm/pgalloc.h> 38#include <asm/pgalloc.h>
39#include <asm/processor.h> 39#include <asm/processor.h>
40#include <asm/sal.h> 40#include <asm/sal.h>
41#include <asm/switch_to.h>
41#include <asm/tlbflush.h> 42#include <asm/tlbflush.h>
42#include <asm/uaccess.h> 43#include <asm/uaccess.h>
43#include <asm/unwind.h> 44#include <asm/unwind.h>
diff --git a/arch/ia64/kernel/time.c b/arch/ia64/kernel/time.c
index aa94bdda9de8..ecc904b33c5f 100644
--- a/arch/ia64/kernel/time.c
+++ b/arch/ia64/kernel/time.c
@@ -34,9 +34,7 @@
34 34
35static cycle_t itc_get_cycles(struct clocksource *cs); 35static cycle_t itc_get_cycles(struct clocksource *cs);
36 36
37struct fsyscall_gtod_data_t fsyscall_gtod_data = { 37struct fsyscall_gtod_data_t fsyscall_gtod_data;
38 .lock = __SEQLOCK_UNLOCKED(fsyscall_gtod_data.lock),
39};
40 38
41struct itc_jitter_data_t itc_jitter_data; 39struct itc_jitter_data_t itc_jitter_data;
42 40
@@ -459,9 +457,7 @@ void update_vsyscall_tz(void)
459void update_vsyscall(struct timespec *wall, struct timespec *wtm, 457void update_vsyscall(struct timespec *wall, struct timespec *wtm,
460 struct clocksource *c, u32 mult) 458 struct clocksource *c, u32 mult)
461{ 459{
462 unsigned long flags; 460 write_seqcount_begin(&fsyscall_gtod_data.seq);
463
464 write_seqlock_irqsave(&fsyscall_gtod_data.lock, flags);
465 461
466 /* copy fsyscall clock data */ 462 /* copy fsyscall clock data */
467 fsyscall_gtod_data.clk_mask = c->mask; 463 fsyscall_gtod_data.clk_mask = c->mask;
@@ -484,6 +480,6 @@ void update_vsyscall(struct timespec *wall, struct timespec *wtm,
484 fsyscall_gtod_data.monotonic_time.tv_sec++; 480 fsyscall_gtod_data.monotonic_time.tv_sec++;
485 } 481 }
486 482
487 write_sequnlock_irqrestore(&fsyscall_gtod_data.lock, flags); 483 write_seqcount_end(&fsyscall_gtod_data.seq);
488} 484}
489 485
diff --git a/arch/m32r/include/asm/posix_types.h b/arch/m32r/include/asm/posix_types.h
index b309c5858637..0195850e1f88 100644
--- a/arch/m32r/include/asm/posix_types.h
+++ b/arch/m32r/include/asm/posix_types.h
@@ -7,112 +7,22 @@
7 * assume GCC is being used. 7 * assume GCC is being used.
8 */ 8 */
9 9
10typedef unsigned long __kernel_ino_t;
11typedef unsigned short __kernel_mode_t; 10typedef unsigned short __kernel_mode_t;
11#define __kernel_mode_t __kernel_mode_t
12
12typedef unsigned short __kernel_nlink_t; 13typedef unsigned short __kernel_nlink_t;
13typedef long __kernel_off_t; 14#define __kernel_nlink_t __kernel_nlink_t
14typedef int __kernel_pid_t; 15
15typedef unsigned short __kernel_ipc_pid_t; 16typedef unsigned short __kernel_ipc_pid_t;
17#define __kernel_ipc_pid_t __kernel_ipc_pid_t
18
16typedef unsigned short __kernel_uid_t; 19typedef unsigned short __kernel_uid_t;
17typedef unsigned short __kernel_gid_t; 20typedef unsigned short __kernel_gid_t;
18typedef unsigned int __kernel_size_t; 21#define __kernel_uid_t __kernel_uid_t
19typedef int __kernel_ssize_t;
20typedef int __kernel_ptrdiff_t;
21typedef long __kernel_time_t;
22typedef long __kernel_suseconds_t;
23typedef long __kernel_clock_t;
24typedef int __kernel_timer_t;
25typedef int __kernel_clockid_t;
26typedef int __kernel_daddr_t;
27typedef char * __kernel_caddr_t;
28typedef unsigned short __kernel_uid16_t;
29typedef unsigned short __kernel_gid16_t;
30typedef unsigned int __kernel_uid32_t;
31typedef unsigned int __kernel_gid32_t;
32 22
33typedef unsigned short __kernel_old_uid_t;
34typedef unsigned short __kernel_old_gid_t;
35typedef unsigned short __kernel_old_dev_t; 23typedef unsigned short __kernel_old_dev_t;
24#define __kernel_old_dev_t __kernel_old_dev_t
36 25
37#ifdef __GNUC__ 26#include <asm-generic/posix_types.h>
38typedef long long __kernel_loff_t;
39#endif
40
41typedef struct {
42 int val[2];
43} __kernel_fsid_t;
44
45#if defined(__KERNEL__)
46
47#undef __FD_SET
48static __inline__ void __FD_SET(unsigned long __fd, __kernel_fd_set *__fdsetp)
49{
50 unsigned long __tmp = __fd / __NFDBITS;
51 unsigned long __rem = __fd % __NFDBITS;
52 __fdsetp->fds_bits[__tmp] |= (1UL<<__rem);
53}
54
55#undef __FD_CLR
56static __inline__ void __FD_CLR(unsigned long __fd, __kernel_fd_set *__fdsetp)
57{
58 unsigned long __tmp = __fd / __NFDBITS;
59 unsigned long __rem = __fd % __NFDBITS;
60 __fdsetp->fds_bits[__tmp] &= ~(1UL<<__rem);
61}
62
63
64#undef __FD_ISSET
65static __inline__ int __FD_ISSET(unsigned long __fd, const __kernel_fd_set *__p)
66{
67 unsigned long __tmp = __fd / __NFDBITS;
68 unsigned long __rem = __fd % __NFDBITS;
69 return (__p->fds_bits[__tmp] & (1UL<<__rem)) != 0;
70}
71
72/*
73 * This will unroll the loop for the normal constant case (8 ints,
74 * for a 256-bit fd_set)
75 */
76#undef __FD_ZERO
77static __inline__ void __FD_ZERO(__kernel_fd_set *__p)
78{
79 unsigned long *__tmp = __p->fds_bits;
80 int __i;
81
82 if (__builtin_constant_p(__FDSET_LONGS)) {
83 switch (__FDSET_LONGS) {
84 case 16:
85 __tmp[ 0] = 0; __tmp[ 1] = 0;
86 __tmp[ 2] = 0; __tmp[ 3] = 0;
87 __tmp[ 4] = 0; __tmp[ 5] = 0;
88 __tmp[ 6] = 0; __tmp[ 7] = 0;
89 __tmp[ 8] = 0; __tmp[ 9] = 0;
90 __tmp[10] = 0; __tmp[11] = 0;
91 __tmp[12] = 0; __tmp[13] = 0;
92 __tmp[14] = 0; __tmp[15] = 0;
93 return;
94
95 case 8:
96 __tmp[ 0] = 0; __tmp[ 1] = 0;
97 __tmp[ 2] = 0; __tmp[ 3] = 0;
98 __tmp[ 4] = 0; __tmp[ 5] = 0;
99 __tmp[ 6] = 0; __tmp[ 7] = 0;
100 return;
101
102 case 4:
103 __tmp[ 0] = 0; __tmp[ 1] = 0;
104 __tmp[ 2] = 0; __tmp[ 3] = 0;
105 return;
106 }
107 }
108 __i = __FDSET_LONGS;
109 while (__i) {
110 __i--;
111 *__tmp = 0;
112 __tmp++;
113 }
114}
115
116#endif /* defined(__KERNEL__) */
117 27
118#endif /* _ASM_M32R_POSIX_TYPES_H */ 28#endif /* _ASM_M32R_POSIX_TYPES_H */
diff --git a/arch/m68k/include/asm/posix_types.h b/arch/m68k/include/asm/posix_types.h
index 98d0970d9bad..6373093be72b 100644
--- a/arch/m68k/include/asm/posix_types.h
+++ b/arch/m68k/include/asm/posix_types.h
@@ -7,55 +7,22 @@
7 * assume GCC is being used. 7 * assume GCC is being used.
8 */ 8 */
9 9
10typedef unsigned long __kernel_ino_t;
11typedef unsigned short __kernel_mode_t; 10typedef unsigned short __kernel_mode_t;
11#define __kernel_mode_t __kernel_mode_t
12
12typedef unsigned short __kernel_nlink_t; 13typedef unsigned short __kernel_nlink_t;
13typedef long __kernel_off_t; 14#define __kernel_nlink_t __kernel_nlink_t
14typedef int __kernel_pid_t; 15
15typedef unsigned short __kernel_ipc_pid_t; 16typedef unsigned short __kernel_ipc_pid_t;
17#define __kernel_ipc_pid_t __kernel_ipc_pid_t
18
16typedef unsigned short __kernel_uid_t; 19typedef unsigned short __kernel_uid_t;
17typedef unsigned short __kernel_gid_t; 20typedef unsigned short __kernel_gid_t;
18typedef unsigned int __kernel_size_t; 21#define __kernel_uid_t __kernel_uid_t
19typedef int __kernel_ssize_t;
20typedef int __kernel_ptrdiff_t;
21typedef long __kernel_time_t;
22typedef long __kernel_suseconds_t;
23typedef long __kernel_clock_t;
24typedef int __kernel_timer_t;
25typedef int __kernel_clockid_t;
26typedef int __kernel_daddr_t;
27typedef char * __kernel_caddr_t;
28typedef unsigned short __kernel_uid16_t;
29typedef unsigned short __kernel_gid16_t;
30typedef unsigned int __kernel_uid32_t;
31typedef unsigned int __kernel_gid32_t;
32
33typedef unsigned short __kernel_old_uid_t;
34typedef unsigned short __kernel_old_gid_t;
35typedef unsigned short __kernel_old_dev_t;
36
37#ifdef __GNUC__
38typedef long long __kernel_loff_t;
39#endif
40 22
41typedef struct { 23typedef unsigned short __kernel_old_dev_t;
42 int val[2]; 24#define __kernel_old_dev_t __kernel_old_dev_t
43} __kernel_fsid_t;
44
45#if defined(__KERNEL__)
46
47#undef __FD_SET
48#define __FD_SET(d, set) ((set)->fds_bits[__FDELT(d)] |= __FDMASK(d))
49
50#undef __FD_CLR
51#define __FD_CLR(d, set) ((set)->fds_bits[__FDELT(d)] &= ~__FDMASK(d))
52
53#undef __FD_ISSET
54#define __FD_ISSET(d, set) (!!((set)->fds_bits[__FDELT(d)] & __FDMASK(d)))
55
56#undef __FD_ZERO
57#define __FD_ZERO(fdsetp) (memset (fdsetp, 0, sizeof(*(fd_set *)fdsetp)))
58 25
59#endif /* defined(__KERNEL__) */ 26#include <asm-generic/posix_types.h>
60 27
61#endif 28#endif
diff --git a/arch/microblaze/boot/Makefile b/arch/microblaze/boot/Makefile
index 34940c828def..fa83ea497db7 100644
--- a/arch/microblaze/boot/Makefile
+++ b/arch/microblaze/boot/Makefile
@@ -2,8 +2,6 @@
2# arch/microblaze/boot/Makefile 2# arch/microblaze/boot/Makefile
3# 3#
4 4
5MKIMAGE := $(srctree)/scripts/mkuboot.sh
6
7obj-y += linked_dtb.o 5obj-y += linked_dtb.o
8 6
9targets := linux.bin linux.bin.gz simpleImage.% 7targets := linux.bin linux.bin.gz simpleImage.%
@@ -35,11 +33,9 @@ quiet_cmd_strip = STRIP $@
35 cmd_strip = $(STRIP) -K microblaze_start -K _end -K __log_buf \ 33 cmd_strip = $(STRIP) -K microblaze_start -K _end -K __log_buf \
36 -K _fdt_start vmlinux -o $@ 34 -K _fdt_start vmlinux -o $@
37 35
38quiet_cmd_uimage = UIMAGE $@.ub 36UIMAGE_IN = $@
39 cmd_uimage = $(CONFIG_SHELL) $(MKIMAGE) -A microblaze -O linux -T kernel \ 37UIMAGE_OUT = $@.ub
40 -C none -n 'Linux-$(KERNELRELEASE)' \ 38UIMAGE_LOADADDR = $(CONFIG_KERNEL_BASE_ADDR)
41 -a $(CONFIG_KERNEL_BASE_ADDR) -e $(CONFIG_KERNEL_BASE_ADDR) \
42 -d $@ $@.ub
43 39
44$(obj)/simpleImage.%: vmlinux FORCE 40$(obj)/simpleImage.%: vmlinux FORCE
45 $(call if_changed,cp,.unstrip) 41 $(call if_changed,cp,.unstrip)
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index edbbae17e820..ce30e2f91d77 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -2457,6 +2457,7 @@ config MIPS32_COMPAT
2457config COMPAT 2457config COMPAT
2458 bool 2458 bool
2459 depends on MIPS32_COMPAT 2459 depends on MIPS32_COMPAT
2460 select ARCH_WANT_OLD_COMPAT_IPC
2460 default y 2461 default y
2461 2462
2462config SYSVIPC_COMPAT 2463config SYSVIPC_COMPAT
diff --git a/arch/mips/cavium-octeon/flash_setup.c b/arch/mips/cavium-octeon/flash_setup.c
index 0a430e06f5e5..e44a55bc7f0d 100644
--- a/arch/mips/cavium-octeon/flash_setup.c
+++ b/arch/mips/cavium-octeon/flash_setup.c
@@ -60,7 +60,7 @@ static int __init flash_init(void)
60 if (mymtd) { 60 if (mymtd) {
61 mymtd->owner = THIS_MODULE; 61 mymtd->owner = THIS_MODULE;
62 mtd_device_parse_register(mymtd, part_probe_types, 62 mtd_device_parse_register(mymtd, part_probe_types,
63 0, NULL, 0); 63 NULL, NULL, 0);
64 } else { 64 } else {
65 pr_err("Failed to register MTD device for flash\n"); 65 pr_err("Failed to register MTD device for flash\n");
66 } 66 }
diff --git a/arch/mips/configs/db1300_defconfig b/arch/mips/configs/db1300_defconfig
index c38b190151c4..3590ab5d9791 100644
--- a/arch/mips/configs/db1300_defconfig
+++ b/arch/mips/configs/db1300_defconfig
@@ -133,7 +133,7 @@ CONFIG_BLK_DEV_BSG=y
133CONFIG_IOSCHED_NOOP=y 133CONFIG_IOSCHED_NOOP=y
134CONFIG_DEFAULT_NOOP=y 134CONFIG_DEFAULT_NOOP=y
135CONFIG_DEFAULT_IOSCHED="noop" 135CONFIG_DEFAULT_IOSCHED="noop"
136CONFIG_INLINE_SPIN_UNLOCK=y 136# CONFIG_UNINLINE_SPIN_UNLOCK is not set
137CONFIG_INLINE_SPIN_UNLOCK_IRQ=y 137CONFIG_INLINE_SPIN_UNLOCK_IRQ=y
138CONFIG_INLINE_READ_UNLOCK=y 138CONFIG_INLINE_READ_UNLOCK=y
139CONFIG_INLINE_READ_UNLOCK_IRQ=y 139CONFIG_INLINE_READ_UNLOCK_IRQ=y
diff --git a/arch/mips/include/asm/posix_types.h b/arch/mips/include/asm/posix_types.h
index c200102c8586..e0308dcca135 100644
--- a/arch/mips/include/asm/posix_types.h
+++ b/arch/mips/include/asm/posix_types.h
@@ -17,128 +17,21 @@
17 * assume GCC is being used. 17 * assume GCC is being used.
18 */ 18 */
19 19
20typedef unsigned long __kernel_ino_t;
21typedef unsigned int __kernel_mode_t;
22#if (_MIPS_SZLONG == 32)
23typedef unsigned long __kernel_nlink_t;
24#endif
25#if (_MIPS_SZLONG == 64) 20#if (_MIPS_SZLONG == 64)
26typedef unsigned int __kernel_nlink_t; 21typedef unsigned int __kernel_nlink_t;
22#define __kernel_nlink_t __kernel_nlink_t
27#endif 23#endif
28typedef long __kernel_off_t;
29typedef int __kernel_pid_t;
30typedef int __kernel_ipc_pid_t;
31typedef unsigned int __kernel_uid_t;
32typedef unsigned int __kernel_gid_t;
33#if (_MIPS_SZLONG == 32)
34typedef unsigned int __kernel_size_t;
35typedef int __kernel_ssize_t;
36typedef int __kernel_ptrdiff_t;
37#endif
38#if (_MIPS_SZLONG == 64)
39typedef unsigned long __kernel_size_t;
40typedef long __kernel_ssize_t;
41typedef long __kernel_ptrdiff_t;
42#endif
43typedef long __kernel_time_t;
44typedef long __kernel_suseconds_t;
45typedef long __kernel_clock_t;
46typedef int __kernel_timer_t;
47typedef int __kernel_clockid_t;
48typedef long __kernel_daddr_t;
49typedef char * __kernel_caddr_t;
50 24
51typedef unsigned short __kernel_uid16_t; 25typedef long __kernel_daddr_t;
52typedef unsigned short __kernel_gid16_t; 26#define __kernel_daddr_t __kernel_daddr_t
53typedef unsigned int __kernel_uid32_t;
54typedef unsigned int __kernel_gid32_t;
55typedef __kernel_uid_t __kernel_old_uid_t;
56typedef __kernel_gid_t __kernel_old_gid_t;
57typedef unsigned int __kernel_old_dev_t;
58
59#ifdef __GNUC__
60typedef long long __kernel_loff_t;
61#endif
62 27
63typedef struct {
64#if (_MIPS_SZLONG == 32) 28#if (_MIPS_SZLONG == 32)
29typedef struct {
65 long val[2]; 30 long val[2];
66#endif
67#if (_MIPS_SZLONG == 64)
68 int val[2];
69#endif
70} __kernel_fsid_t; 31} __kernel_fsid_t;
32#define __kernel_fsid_t __kernel_fsid_t
33#endif
71 34
72#if defined(__KERNEL__) 35#include <asm-generic/posix_types.h>
73
74#undef __FD_SET
75static __inline__ void __FD_SET(unsigned long __fd, __kernel_fd_set *__fdsetp)
76{
77 unsigned long __tmp = __fd / __NFDBITS;
78 unsigned long __rem = __fd % __NFDBITS;
79 __fdsetp->fds_bits[__tmp] |= (1UL<<__rem);
80}
81
82#undef __FD_CLR
83static __inline__ void __FD_CLR(unsigned long __fd, __kernel_fd_set *__fdsetp)
84{
85 unsigned long __tmp = __fd / __NFDBITS;
86 unsigned long __rem = __fd % __NFDBITS;
87 __fdsetp->fds_bits[__tmp] &= ~(1UL<<__rem);
88}
89
90#undef __FD_ISSET
91static __inline__ int __FD_ISSET(unsigned long __fd, const __kernel_fd_set *__p)
92{
93 unsigned long __tmp = __fd / __NFDBITS;
94 unsigned long __rem = __fd % __NFDBITS;
95 return (__p->fds_bits[__tmp] & (1UL<<__rem)) != 0;
96}
97
98/*
99 * This will unroll the loop for the normal constant case (8 ints,
100 * for a 256-bit fd_set)
101 */
102#undef __FD_ZERO
103static __inline__ void __FD_ZERO(__kernel_fd_set *__p)
104{
105 unsigned long *__tmp = __p->fds_bits;
106 int __i;
107
108 if (__builtin_constant_p(__FDSET_LONGS)) {
109 switch (__FDSET_LONGS) {
110 case 16:
111 __tmp[ 0] = 0; __tmp[ 1] = 0;
112 __tmp[ 2] = 0; __tmp[ 3] = 0;
113 __tmp[ 4] = 0; __tmp[ 5] = 0;
114 __tmp[ 6] = 0; __tmp[ 7] = 0;
115 __tmp[ 8] = 0; __tmp[ 9] = 0;
116 __tmp[10] = 0; __tmp[11] = 0;
117 __tmp[12] = 0; __tmp[13] = 0;
118 __tmp[14] = 0; __tmp[15] = 0;
119 return;
120
121 case 8:
122 __tmp[ 0] = 0; __tmp[ 1] = 0;
123 __tmp[ 2] = 0; __tmp[ 3] = 0;
124 __tmp[ 4] = 0; __tmp[ 5] = 0;
125 __tmp[ 6] = 0; __tmp[ 7] = 0;
126 return;
127
128 case 4:
129 __tmp[ 0] = 0; __tmp[ 1] = 0;
130 __tmp[ 2] = 0; __tmp[ 3] = 0;
131 return;
132 }
133 }
134 __i = __FDSET_LONGS;
135 while (__i) {
136 __i--;
137 *__tmp = 0;
138 __tmp++;
139 }
140}
141
142#endif /* defined(__KERNEL__) */
143 36
144#endif /* _ASM_POSIX_TYPES_H */ 37#endif /* _ASM_POSIX_TYPES_H */
diff --git a/arch/mips/kernel/kspd.c b/arch/mips/kernel/kspd.c
index 29811f043399..84d0639e4580 100644
--- a/arch/mips/kernel/kspd.c
+++ b/arch/mips/kernel/kspd.c
@@ -326,7 +326,7 @@ static void sp_cleanup(void)
326 i = j * __NFDBITS; 326 i = j * __NFDBITS;
327 if (i >= fdt->max_fds) 327 if (i >= fdt->max_fds)
328 break; 328 break;
329 set = fdt->open_fds->fds_bits[j++]; 329 set = fdt->open_fds[j++];
330 while (set) { 330 while (set) {
331 if (set & 1) { 331 if (set & 1) {
332 struct file * file = xchg(&fdt->fd[i], NULL); 332 struct file * file = xchg(&fdt->fd[i], NULL);
diff --git a/arch/mn10300/include/asm/posix_types.h b/arch/mn10300/include/asm/posix_types.h
index 56ffbc158798..ab506181ec31 100644
--- a/arch/mn10300/include/asm/posix_types.h
+++ b/arch/mn10300/include/asm/posix_types.h
@@ -17,14 +17,19 @@
17 * assume GCC is being used. 17 * assume GCC is being used.
18 */ 18 */
19 19
20typedef unsigned long __kernel_ino_t;
21typedef unsigned short __kernel_mode_t; 20typedef unsigned short __kernel_mode_t;
21#define __kernel_mode_t __kernel_mode_t
22
22typedef unsigned short __kernel_nlink_t; 23typedef unsigned short __kernel_nlink_t;
23typedef long __kernel_off_t; 24#define __kernel_nlink_t __kernel_nlink_t
24typedef int __kernel_pid_t; 25
25typedef unsigned short __kernel_ipc_pid_t; 26typedef unsigned short __kernel_ipc_pid_t;
27#define __kernel_ipc_pid_t __kernel_ipc_pid_t
28
26typedef unsigned short __kernel_uid_t; 29typedef unsigned short __kernel_uid_t;
27typedef unsigned short __kernel_gid_t; 30typedef unsigned short __kernel_gid_t;
31#define __kernel_uid_t __kernel_uid_t
32
28#if __GNUC__ == 4 33#if __GNUC__ == 4
29typedef unsigned int __kernel_size_t; 34typedef unsigned int __kernel_size_t;
30typedef signed int __kernel_ssize_t; 35typedef signed int __kernel_ssize_t;
@@ -33,105 +38,11 @@ typedef unsigned long __kernel_size_t;
33typedef signed long __kernel_ssize_t; 38typedef signed long __kernel_ssize_t;
34#endif 39#endif
35typedef int __kernel_ptrdiff_t; 40typedef int __kernel_ptrdiff_t;
36typedef long __kernel_time_t; 41#define __kernel_size_t __kernel_size_t
37typedef long __kernel_suseconds_t;
38typedef long __kernel_clock_t;
39typedef int __kernel_timer_t;
40typedef int __kernel_clockid_t;
41typedef int __kernel_daddr_t;
42typedef char * __kernel_caddr_t;
43typedef unsigned short __kernel_uid16_t;
44typedef unsigned short __kernel_gid16_t;
45typedef unsigned int __kernel_uid32_t;
46typedef unsigned int __kernel_gid32_t;
47 42
48typedef unsigned short __kernel_old_uid_t;
49typedef unsigned short __kernel_old_gid_t;
50typedef unsigned short __kernel_old_dev_t; 43typedef unsigned short __kernel_old_dev_t;
44#define __kernel_old_dev_t __kernel_old_dev_t
51 45
52#ifdef __GNUC__ 46#include <asm-generic/posix_types.h>
53typedef long long __kernel_loff_t;
54#endif
55
56typedef struct {
57#if defined(__KERNEL__) || defined(__USE_ALL)
58 int val[2];
59#else /* !defined(__KERNEL__) && !defined(__USE_ALL) */
60 int __val[2];
61#endif /* !defined(__KERNEL__) && !defined(__USE_ALL) */
62} __kernel_fsid_t;
63
64#if defined(__KERNEL__) || !defined(__GLIBC__) || (__GLIBC__ < 2)
65
66#undef __FD_SET
67static inline void __FD_SET(unsigned long __fd, __kernel_fd_set *__fdsetp)
68{
69 unsigned long __tmp = __fd / __NFDBITS;
70 unsigned long __rem = __fd % __NFDBITS;
71 __fdsetp->fds_bits[__tmp] |= (1UL<<__rem);
72}
73
74#undef __FD_CLR
75static inline void __FD_CLR(unsigned long __fd, __kernel_fd_set *__fdsetp)
76{
77 unsigned long __tmp = __fd / __NFDBITS;
78 unsigned long __rem = __fd % __NFDBITS;
79 __fdsetp->fds_bits[__tmp] &= ~(1UL<<__rem);
80}
81
82
83#undef __FD_ISSET
84static inline int __FD_ISSET(unsigned long __fd, const __kernel_fd_set *__p)
85{
86 unsigned long __tmp = __fd / __NFDBITS;
87 unsigned long __rem = __fd % __NFDBITS;
88 return (__p->fds_bits[__tmp] & (1UL<<__rem)) != 0;
89}
90
91/*
92 * This will unroll the loop for the normal constant case (8 ints,
93 * for a 256-bit fd_set)
94 */
95#undef __FD_ZERO
96static inline void __FD_ZERO(__kernel_fd_set *__p)
97{
98 unsigned long *__tmp = __p->fds_bits;
99 int __i;
100
101 if (__builtin_constant_p(__FDSET_LONGS)) {
102 switch (__FDSET_LONGS) {
103 case 16:
104 __tmp[ 0] = 0; __tmp[ 1] = 0;
105 __tmp[ 2] = 0; __tmp[ 3] = 0;
106 __tmp[ 4] = 0; __tmp[ 5] = 0;
107 __tmp[ 6] = 0; __tmp[ 7] = 0;
108 __tmp[ 8] = 0; __tmp[ 9] = 0;
109 __tmp[10] = 0; __tmp[11] = 0;
110 __tmp[12] = 0; __tmp[13] = 0;
111 __tmp[14] = 0; __tmp[15] = 0;
112 return;
113
114 case 8:
115 __tmp[ 0] = 0; __tmp[ 1] = 0;
116 __tmp[ 2] = 0; __tmp[ 3] = 0;
117 __tmp[ 4] = 0; __tmp[ 5] = 0;
118 __tmp[ 6] = 0; __tmp[ 7] = 0;
119 return;
120
121 case 4:
122 __tmp[ 0] = 0; __tmp[ 1] = 0;
123 __tmp[ 2] = 0; __tmp[ 3] = 0;
124 return;
125 }
126 }
127 __i = __FDSET_LONGS;
128 while (__i) {
129 __i--;
130 *__tmp = 0;
131 __tmp++;
132 }
133}
134
135#endif /* defined(__KERNEL__) || !defined(__GLIBC__) || (__GLIBC__ < 2) */
136 47
137#endif /* _ASM_POSIX_TYPES_H */ 48#endif /* _ASM_POSIX_TYPES_H */
diff --git a/arch/parisc/include/asm/futex.h b/arch/parisc/include/asm/futex.h
index 2388bdb32832..49df14805a9b 100644
--- a/arch/parisc/include/asm/futex.h
+++ b/arch/parisc/include/asm/futex.h
@@ -8,6 +8,29 @@
8#include <asm/atomic.h> 8#include <asm/atomic.h>
9#include <asm/errno.h> 9#include <asm/errno.h>
10 10
11/* The following has to match the LWS code in syscall.S. We have
12 sixteen four-word locks. */
13
14static inline void
15_futex_spin_lock_irqsave(u32 __user *uaddr, unsigned long int *flags)
16{
17 extern u32 lws_lock_start[];
18 long index = ((long)uaddr & 0xf0) >> 2;
19 arch_spinlock_t *s = (arch_spinlock_t *)&lws_lock_start[index];
20 local_irq_save(*flags);
21 arch_spin_lock(s);
22}
23
24static inline void
25_futex_spin_unlock_irqrestore(u32 __user *uaddr, unsigned long int *flags)
26{
27 extern u32 lws_lock_start[];
28 long index = ((long)uaddr & 0xf0) >> 2;
29 arch_spinlock_t *s = (arch_spinlock_t *)&lws_lock_start[index];
30 arch_spin_unlock(s);
31 local_irq_restore(*flags);
32}
33
11static inline int 34static inline int
12futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr) 35futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
13{ 36{
@@ -26,7 +49,7 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
26 49
27 pagefault_disable(); 50 pagefault_disable();
28 51
29 _atomic_spin_lock_irqsave(uaddr, flags); 52 _futex_spin_lock_irqsave(uaddr, &flags);
30 53
31 switch (op) { 54 switch (op) {
32 case FUTEX_OP_SET: 55 case FUTEX_OP_SET:
@@ -71,7 +94,7 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
71 ret = -ENOSYS; 94 ret = -ENOSYS;
72 } 95 }
73 96
74 _atomic_spin_unlock_irqrestore(uaddr, flags); 97 _futex_spin_unlock_irqrestore(uaddr, &flags);
75 98
76 pagefault_enable(); 99 pagefault_enable();
77 100
@@ -113,7 +136,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
113 * address. This should scale to a couple of CPUs. 136 * address. This should scale to a couple of CPUs.
114 */ 137 */
115 138
116 _atomic_spin_lock_irqsave(uaddr, flags); 139 _futex_spin_lock_irqsave(uaddr, &flags);
117 140
118 ret = get_user(val, uaddr); 141 ret = get_user(val, uaddr);
119 142
@@ -122,7 +145,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
122 145
123 *uval = val; 146 *uval = val;
124 147
125 _atomic_spin_unlock_irqrestore(uaddr, flags); 148 _futex_spin_unlock_irqrestore(uaddr, &flags);
126 149
127 return ret; 150 return ret;
128} 151}
diff --git a/arch/parisc/include/asm/posix_types.h b/arch/parisc/include/asm/posix_types.h
index 00da29a340ba..5212b0357daf 100644
--- a/arch/parisc/include/asm/posix_types.h
+++ b/arch/parisc/include/asm/posix_types.h
@@ -6,123 +6,22 @@
6 * be a little careful about namespace pollution etc. Also, we cannot 6 * be a little careful about namespace pollution etc. Also, we cannot
7 * assume GCC is being used. 7 * assume GCC is being used.
8 */ 8 */
9typedef unsigned long __kernel_ino_t; 9
10typedef unsigned short __kernel_mode_t; 10typedef unsigned short __kernel_mode_t;
11#define __kernel_mode_t __kernel_mode_t
12
11typedef unsigned short __kernel_nlink_t; 13typedef unsigned short __kernel_nlink_t;
12typedef long __kernel_off_t; 14#define __kernel_nlink_t __kernel_nlink_t
13typedef int __kernel_pid_t; 15
14typedef unsigned short __kernel_ipc_pid_t; 16typedef unsigned short __kernel_ipc_pid_t;
15typedef unsigned int __kernel_uid_t; 17#define __kernel_ipc_pid_t __kernel_ipc_pid_t
16typedef unsigned int __kernel_gid_t;
17typedef int __kernel_suseconds_t;
18typedef long __kernel_clock_t;
19typedef int __kernel_timer_t;
20typedef int __kernel_clockid_t;
21typedef int __kernel_daddr_t;
22/* Note these change from narrow to wide kernels */
23#ifdef CONFIG_64BIT
24typedef unsigned long __kernel_size_t;
25typedef long __kernel_ssize_t;
26typedef long __kernel_ptrdiff_t;
27#else
28typedef unsigned int __kernel_size_t;
29typedef int __kernel_ssize_t;
30typedef int __kernel_ptrdiff_t;
31#endif
32typedef long __kernel_time_t;
33typedef char * __kernel_caddr_t;
34 18
35typedef unsigned short __kernel_uid16_t; 19typedef int __kernel_suseconds_t;
36typedef unsigned short __kernel_gid16_t; 20#define __kernel_suseconds_t __kernel_suseconds_t
37typedef unsigned int __kernel_uid32_t;
38typedef unsigned int __kernel_gid32_t;
39 21
40#ifdef __GNUC__
41typedef long long __kernel_loff_t;
42typedef long long __kernel_off64_t; 22typedef long long __kernel_off64_t;
43typedef unsigned long long __kernel_ino64_t; 23typedef unsigned long long __kernel_ino64_t;
44#endif
45
46typedef unsigned int __kernel_old_dev_t;
47
48typedef struct {
49 int val[2];
50} __kernel_fsid_t;
51
52/* compatibility stuff */
53typedef __kernel_uid_t __kernel_old_uid_t;
54typedef __kernel_gid_t __kernel_old_gid_t;
55
56#if defined(__KERNEL__)
57
58#undef __FD_SET
59static __inline__ void __FD_SET(unsigned long __fd, __kernel_fd_set *__fdsetp)
60{
61 unsigned long __tmp = __fd / __NFDBITS;
62 unsigned long __rem = __fd % __NFDBITS;
63 __fdsetp->fds_bits[__tmp] |= (1UL<<__rem);
64}
65
66#undef __FD_CLR
67static __inline__ void __FD_CLR(unsigned long __fd, __kernel_fd_set *__fdsetp)
68{
69 unsigned long __tmp = __fd / __NFDBITS;
70 unsigned long __rem = __fd % __NFDBITS;
71 __fdsetp->fds_bits[__tmp] &= ~(1UL<<__rem);
72}
73
74#undef __FD_ISSET
75static __inline__ int __FD_ISSET(unsigned long __fd, const __kernel_fd_set *__p)
76{
77 unsigned long __tmp = __fd / __NFDBITS;
78 unsigned long __rem = __fd % __NFDBITS;
79 return (__p->fds_bits[__tmp] & (1UL<<__rem)) != 0;
80}
81
82/*
83 * This will unroll the loop for the normal constant case (8 ints,
84 * for a 256-bit fd_set)
85 */
86#undef __FD_ZERO
87static __inline__ void __FD_ZERO(__kernel_fd_set *__p)
88{
89 unsigned long *__tmp = __p->fds_bits;
90 int __i;
91
92 if (__builtin_constant_p(__FDSET_LONGS)) {
93 switch (__FDSET_LONGS) {
94 case 16:
95 __tmp[ 0] = 0; __tmp[ 1] = 0;
96 __tmp[ 2] = 0; __tmp[ 3] = 0;
97 __tmp[ 4] = 0; __tmp[ 5] = 0;
98 __tmp[ 6] = 0; __tmp[ 7] = 0;
99 __tmp[ 8] = 0; __tmp[ 9] = 0;
100 __tmp[10] = 0; __tmp[11] = 0;
101 __tmp[12] = 0; __tmp[13] = 0;
102 __tmp[14] = 0; __tmp[15] = 0;
103 return;
104
105 case 8:
106 __tmp[ 0] = 0; __tmp[ 1] = 0;
107 __tmp[ 2] = 0; __tmp[ 3] = 0;
108 __tmp[ 4] = 0; __tmp[ 5] = 0;
109 __tmp[ 6] = 0; __tmp[ 7] = 0;
110 return;
111
112 case 4:
113 __tmp[ 0] = 0; __tmp[ 1] = 0;
114 __tmp[ 2] = 0; __tmp[ 3] = 0;
115 return;
116 }
117 }
118 __i = __FDSET_LONGS;
119 while (__i) {
120 __i--;
121 *__tmp = 0;
122 __tmp++;
123 }
124}
125 24
126#endif /* defined(__KERNEL__) */ 25#include <asm-generic/posix_types.h>
127 26
128#endif 27#endif
diff --git a/arch/parisc/kernel/smp.c b/arch/parisc/kernel/smp.c
index 5006e8ea3051..0bb1d63907f8 100644
--- a/arch/parisc/kernel/smp.c
+++ b/arch/parisc/kernel/smp.c
@@ -290,8 +290,7 @@ smp_cpu_init(int cpunum)
290 mb(); 290 mb();
291 291
292 /* Well, support 2.4 linux scheme as well. */ 292 /* Well, support 2.4 linux scheme as well. */
293 if (cpu_isset(cpunum, cpu_online_map)) 293 if (cpu_online(cpunum)) {
294 {
295 extern void machine_halt(void); /* arch/parisc.../process.c */ 294 extern void machine_halt(void); /* arch/parisc.../process.c */
296 295
297 printk(KERN_CRIT "CPU#%d already initialized!\n", cpunum); 296 printk(KERN_CRIT "CPU#%d already initialized!\n", cpunum);
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index d219ebecabf0..feab3bad6d0f 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -133,7 +133,6 @@ config PPC
133 select HAVE_REGS_AND_STACK_ACCESS_API 133 select HAVE_REGS_AND_STACK_ACCESS_API
134 select HAVE_HW_BREAKPOINT if PERF_EVENTS && PPC_BOOK3S_64 134 select HAVE_HW_BREAKPOINT if PERF_EVENTS && PPC_BOOK3S_64
135 select HAVE_GENERIC_HARDIRQS 135 select HAVE_GENERIC_HARDIRQS
136 select HAVE_SPARSE_IRQ
137 select SPARSE_IRQ 136 select SPARSE_IRQ
138 select IRQ_PER_CPU 137 select IRQ_PER_CPU
139 select IRQ_DOMAIN 138 select IRQ_DOMAIN
@@ -154,6 +153,7 @@ config COMPAT
154 bool 153 bool
155 default y if PPC64 154 default y if PPC64
156 select COMPAT_BINFMT_ELF 155 select COMPAT_BINFMT_ELF
156 select ARCH_WANT_OLD_COMPAT_IPC
157 157
158config SYSVIPC_COMPAT 158config SYSVIPC_COMPAT
159 bool 159 bool
diff --git a/arch/powerpc/Kconfig.debug b/arch/powerpc/Kconfig.debug
index 72d55dbc6119..e5f26890a69e 100644
--- a/arch/powerpc/Kconfig.debug
+++ b/arch/powerpc/Kconfig.debug
@@ -114,16 +114,6 @@ config DEBUGGER
114 depends on KGDB || XMON 114 depends on KGDB || XMON
115 default y 115 default y
116 116
117config VIRQ_DEBUG
118 bool "Expose hardware/virtual IRQ mapping via debugfs"
119 depends on DEBUG_FS
120 help
121 This option will show the mapping relationship between hardware irq
122 numbers and virtual irq numbers. The mapping is exposed via debugfs
123 in the file powerpc/virq_mapping.
124
125 If you don't know what this means you don't need it.
126
127config BDI_SWITCH 117config BDI_SWITCH
128 bool "Include BDI-2000 user context switcher" 118 bool "Include BDI-2000 user context switcher"
129 depends on DEBUG_KERNEL && PPC32 119 depends on DEBUG_KERNEL && PPC32
diff --git a/arch/powerpc/configs/85xx/p1023rds_defconfig b/arch/powerpc/configs/85xx/p1023rds_defconfig
index c091aaf7685f..f4337bacd0e7 100644
--- a/arch/powerpc/configs/85xx/p1023rds_defconfig
+++ b/arch/powerpc/configs/85xx/p1023rds_defconfig
@@ -165,7 +165,7 @@ CONFIG_DETECT_HUNG_TASK=y
165CONFIG_DEBUG_INFO=y 165CONFIG_DEBUG_INFO=y
166# CONFIG_RCU_CPU_STALL_DETECTOR is not set 166# CONFIG_RCU_CPU_STALL_DETECTOR is not set
167CONFIG_SYSCTL_SYSCALL_CHECK=y 167CONFIG_SYSCTL_SYSCALL_CHECK=y
168CONFIG_VIRQ_DEBUG=y 168CONFIG_IRQ_DOMAIN_DEBUG=y
169CONFIG_CRYPTO_PCBC=m 169CONFIG_CRYPTO_PCBC=m
170CONFIG_CRYPTO_SHA256=y 170CONFIG_CRYPTO_SHA256=y
171CONFIG_CRYPTO_SHA512=y 171CONFIG_CRYPTO_SHA512=y
diff --git a/arch/powerpc/configs/chroma_defconfig b/arch/powerpc/configs/chroma_defconfig
index acf7fb280464..f104ccde6b53 100644
--- a/arch/powerpc/configs/chroma_defconfig
+++ b/arch/powerpc/configs/chroma_defconfig
@@ -279,7 +279,7 @@ CONFIG_FTRACE_SYSCALLS=y
279CONFIG_PPC_EMULATED_STATS=y 279CONFIG_PPC_EMULATED_STATS=y
280CONFIG_XMON=y 280CONFIG_XMON=y
281CONFIG_XMON_DEFAULT=y 281CONFIG_XMON_DEFAULT=y
282CONFIG_VIRQ_DEBUG=y 282CONFIG_IRQ_DOMAIN_DEBUG=y
283CONFIG_PPC_EARLY_DEBUG=y 283CONFIG_PPC_EARLY_DEBUG=y
284CONFIG_KEYS_DEBUG_PROC_KEYS=y 284CONFIG_KEYS_DEBUG_PROC_KEYS=y
285CONFIG_CRYPTO_NULL=m 285CONFIG_CRYPTO_NULL=m
diff --git a/arch/powerpc/configs/corenet64_smp_defconfig b/arch/powerpc/configs/corenet64_smp_defconfig
index 7ed8d4cf2719..82b13bfcf3c0 100644
--- a/arch/powerpc/configs/corenet64_smp_defconfig
+++ b/arch/powerpc/configs/corenet64_smp_defconfig
@@ -95,7 +95,7 @@ CONFIG_DEBUG_FS=y
95CONFIG_DETECT_HUNG_TASK=y 95CONFIG_DETECT_HUNG_TASK=y
96CONFIG_DEBUG_INFO=y 96CONFIG_DEBUG_INFO=y
97CONFIG_SYSCTL_SYSCALL_CHECK=y 97CONFIG_SYSCTL_SYSCALL_CHECK=y
98CONFIG_VIRQ_DEBUG=y 98CONFIG_IRQ_DOMAIN_DEBUG=y
99CONFIG_CRYPTO_PCBC=m 99CONFIG_CRYPTO_PCBC=m
100CONFIG_CRYPTO_SHA256=y 100CONFIG_CRYPTO_SHA256=y
101CONFIG_CRYPTO_SHA512=y 101CONFIG_CRYPTO_SHA512=y
diff --git a/arch/powerpc/configs/mpc85xx_defconfig b/arch/powerpc/configs/mpc85xx_defconfig
index 5fb0c8a94811..cc87a8441566 100644
--- a/arch/powerpc/configs/mpc85xx_defconfig
+++ b/arch/powerpc/configs/mpc85xx_defconfig
@@ -214,7 +214,7 @@ CONFIG_DEBUG_FS=y
214CONFIG_DETECT_HUNG_TASK=y 214CONFIG_DETECT_HUNG_TASK=y
215CONFIG_DEBUG_INFO=y 215CONFIG_DEBUG_INFO=y
216CONFIG_SYSCTL_SYSCALL_CHECK=y 216CONFIG_SYSCTL_SYSCALL_CHECK=y
217CONFIG_VIRQ_DEBUG=y 217CONFIG_IRQ_DOMAIN_DEBUG=y
218CONFIG_CRYPTO_PCBC=m 218CONFIG_CRYPTO_PCBC=m
219CONFIG_CRYPTO_SHA256=y 219CONFIG_CRYPTO_SHA256=y
220CONFIG_CRYPTO_SHA512=y 220CONFIG_CRYPTO_SHA512=y
diff --git a/arch/powerpc/configs/mpc85xx_smp_defconfig b/arch/powerpc/configs/mpc85xx_smp_defconfig
index fb51bc90edd2..48d6682f2434 100644
--- a/arch/powerpc/configs/mpc85xx_smp_defconfig
+++ b/arch/powerpc/configs/mpc85xx_smp_defconfig
@@ -216,7 +216,7 @@ CONFIG_DEBUG_FS=y
216CONFIG_DETECT_HUNG_TASK=y 216CONFIG_DETECT_HUNG_TASK=y
217CONFIG_DEBUG_INFO=y 217CONFIG_DEBUG_INFO=y
218CONFIG_SYSCTL_SYSCALL_CHECK=y 218CONFIG_SYSCTL_SYSCALL_CHECK=y
219CONFIG_VIRQ_DEBUG=y 219CONFIG_IRQ_DOMAIN_DEBUG=y
220CONFIG_CRYPTO_PCBC=m 220CONFIG_CRYPTO_PCBC=m
221CONFIG_CRYPTO_SHA256=y 221CONFIG_CRYPTO_SHA256=y
222CONFIG_CRYPTO_SHA512=y 222CONFIG_CRYPTO_SHA512=y
diff --git a/arch/powerpc/configs/ppc64_defconfig b/arch/powerpc/configs/ppc64_defconfig
index 1acf65026773..c1442a3758ae 100644
--- a/arch/powerpc/configs/ppc64_defconfig
+++ b/arch/powerpc/configs/ppc64_defconfig
@@ -457,7 +457,7 @@ CONFIG_CODE_PATCHING_SELFTEST=y
457CONFIG_FTR_FIXUP_SELFTEST=y 457CONFIG_FTR_FIXUP_SELFTEST=y
458CONFIG_MSI_BITMAP_SELFTEST=y 458CONFIG_MSI_BITMAP_SELFTEST=y
459CONFIG_XMON=y 459CONFIG_XMON=y
460CONFIG_VIRQ_DEBUG=y 460CONFIG_IRQ_DOMAIN_DEBUG=y
461CONFIG_BOOTX_TEXT=y 461CONFIG_BOOTX_TEXT=y
462CONFIG_CRYPTO_NULL=m 462CONFIG_CRYPTO_NULL=m
463CONFIG_CRYPTO_TEST=m 463CONFIG_CRYPTO_TEST=m
diff --git a/arch/powerpc/configs/pseries_defconfig b/arch/powerpc/configs/pseries_defconfig
index 30e7d0d20e49..6608232663cb 100644
--- a/arch/powerpc/configs/pseries_defconfig
+++ b/arch/powerpc/configs/pseries_defconfig
@@ -340,7 +340,7 @@ CONFIG_FTR_FIXUP_SELFTEST=y
340CONFIG_MSI_BITMAP_SELFTEST=y 340CONFIG_MSI_BITMAP_SELFTEST=y
341CONFIG_XMON=y 341CONFIG_XMON=y
342CONFIG_XMON_DEFAULT=y 342CONFIG_XMON_DEFAULT=y
343CONFIG_VIRQ_DEBUG=y 343CONFIG_IRQ_DOMAIN_DEBUG=y
344CONFIG_CRYPTO_NULL=m 344CONFIG_CRYPTO_NULL=m
345CONFIG_CRYPTO_TEST=m 345CONFIG_CRYPTO_TEST=m
346CONFIG_CRYPTO_CCM=m 346CONFIG_CRYPTO_CCM=m
diff --git a/arch/powerpc/include/asm/posix_types.h b/arch/powerpc/include/asm/posix_types.h
index c4e396b540df..f1393252bbda 100644
--- a/arch/powerpc/include/asm/posix_types.h
+++ b/arch/powerpc/include/asm/posix_types.h
@@ -7,122 +7,22 @@
7 * assume GCC is being used. 7 * assume GCC is being used.
8 */ 8 */
9 9
10typedef unsigned long __kernel_ino_t;
11typedef unsigned int __kernel_mode_t;
12typedef long __kernel_off_t;
13typedef int __kernel_pid_t;
14typedef unsigned int __kernel_uid_t;
15typedef unsigned int __kernel_gid_t;
16typedef long __kernel_ptrdiff_t;
17typedef long __kernel_time_t;
18typedef long __kernel_clock_t;
19typedef int __kernel_timer_t;
20typedef int __kernel_clockid_t;
21typedef long __kernel_suseconds_t;
22typedef int __kernel_daddr_t;
23typedef char * __kernel_caddr_t;
24typedef unsigned short __kernel_uid16_t;
25typedef unsigned short __kernel_gid16_t;
26typedef unsigned int __kernel_uid32_t;
27typedef unsigned int __kernel_gid32_t;
28typedef unsigned int __kernel_old_uid_t;
29typedef unsigned int __kernel_old_gid_t;
30
31#ifdef __powerpc64__ 10#ifdef __powerpc64__
32typedef unsigned long __kernel_nlink_t;
33typedef int __kernel_ipc_pid_t;
34typedef unsigned long __kernel_size_t;
35typedef long __kernel_ssize_t;
36typedef unsigned long __kernel_old_dev_t; 11typedef unsigned long __kernel_old_dev_t;
12#define __kernel_old_dev_t __kernel_old_dev_t
37#else 13#else
38typedef unsigned short __kernel_nlink_t;
39typedef short __kernel_ipc_pid_t;
40typedef unsigned int __kernel_size_t; 14typedef unsigned int __kernel_size_t;
41typedef int __kernel_ssize_t; 15typedef int __kernel_ssize_t;
42typedef unsigned int __kernel_old_dev_t; 16typedef long __kernel_ptrdiff_t;
43#endif 17#define __kernel_size_t __kernel_size_t
44
45#ifdef __powerpc64__
46typedef long long __kernel_loff_t;
47#else
48#ifdef __GNUC__
49typedef long long __kernel_loff_t;
50#endif
51#endif
52
53typedef struct {
54 int val[2];
55} __kernel_fsid_t;
56
57#ifndef __GNUC__
58
59#define __FD_SET(d, set) ((set)->fds_bits[__FDELT(d)] |= __FDMASK(d))
60#define __FD_CLR(d, set) ((set)->fds_bits[__FDELT(d)] &= ~__FDMASK(d))
61#define __FD_ISSET(d, set) (((set)->fds_bits[__FDELT(d)] & __FDMASK(d)) != 0)
62#define __FD_ZERO(set) \
63 ((void) memset ((void *) (set), 0, sizeof (__kernel_fd_set)))
64
65#else /* __GNUC__ */
66
67#if defined(__KERNEL__)
68/* With GNU C, use inline functions instead so args are evaluated only once: */
69
70#undef __FD_SET
71static __inline__ void __FD_SET(unsigned long fd, __kernel_fd_set *fdsetp)
72{
73 unsigned long _tmp = fd / __NFDBITS;
74 unsigned long _rem = fd % __NFDBITS;
75 fdsetp->fds_bits[_tmp] |= (1UL<<_rem);
76}
77
78#undef __FD_CLR
79static __inline__ void __FD_CLR(unsigned long fd, __kernel_fd_set *fdsetp)
80{
81 unsigned long _tmp = fd / __NFDBITS;
82 unsigned long _rem = fd % __NFDBITS;
83 fdsetp->fds_bits[_tmp] &= ~(1UL<<_rem);
84}
85
86#undef __FD_ISSET
87static __inline__ int __FD_ISSET(unsigned long fd, __kernel_fd_set *p)
88{
89 unsigned long _tmp = fd / __NFDBITS;
90 unsigned long _rem = fd % __NFDBITS;
91 return (p->fds_bits[_tmp] & (1UL<<_rem)) != 0;
92}
93
94/*
95 * This will unroll the loop for the normal constant case (8 ints,
96 * for a 256-bit fd_set)
97 */
98#undef __FD_ZERO
99static __inline__ void __FD_ZERO(__kernel_fd_set *p)
100{
101 unsigned long *tmp = (unsigned long *)p->fds_bits;
102 int i;
103 18
104 if (__builtin_constant_p(__FDSET_LONGS)) { 19typedef unsigned short __kernel_nlink_t;
105 switch (__FDSET_LONGS) { 20#define __kernel_nlink_t __kernel_nlink_t
106 case 16:
107 tmp[12] = 0; tmp[13] = 0; tmp[14] = 0; tmp[15] = 0;
108 tmp[ 8] = 0; tmp[ 9] = 0; tmp[10] = 0; tmp[11] = 0;
109 21
110 case 8: 22typedef short __kernel_ipc_pid_t;
111 tmp[ 4] = 0; tmp[ 5] = 0; tmp[ 6] = 0; tmp[ 7] = 0; 23#define __kernel_ipc_pid_t __kernel_ipc_pid_t
24#endif
112 25
113 case 4: 26#include <asm-generic/posix_types.h>
114 tmp[ 0] = 0; tmp[ 1] = 0; tmp[ 2] = 0; tmp[ 3] = 0;
115 return;
116 }
117 }
118 i = __FDSET_LONGS;
119 while (i) {
120 i--;
121 *tmp = 0;
122 tmp++;
123 }
124}
125 27
126#endif /* defined(__KERNEL__) */
127#endif /* __GNUC__ */
128#endif /* _ASM_POWERPC_POSIX_TYPES_H */ 28#endif /* _ASM_POWERPC_POSIX_TYPES_H */
diff --git a/arch/powerpc/platforms/cell/spufs/coredump.c b/arch/powerpc/platforms/cell/spufs/coredump.c
index 03c5fce2a5b3..c2c5b078ba80 100644
--- a/arch/powerpc/platforms/cell/spufs/coredump.c
+++ b/arch/powerpc/platforms/cell/spufs/coredump.c
@@ -122,7 +122,7 @@ static struct spu_context *coredump_next_context(int *fd)
122 struct spu_context *ctx = NULL; 122 struct spu_context *ctx = NULL;
123 123
124 for (; *fd < fdt->max_fds; (*fd)++) { 124 for (; *fd < fdt->max_fds; (*fd)++) {
125 if (!FD_ISSET(*fd, fdt->open_fds)) 125 if (!fd_is_open(*fd, fdt))
126 continue; 126 continue;
127 127
128 file = fcheck(*fd); 128 file = fcheck(*fd);
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index 465d5be1f0f4..2b7c0fbe578e 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -219,6 +219,7 @@ config COMPAT
219 prompt "Kernel support for 31 bit emulation" 219 prompt "Kernel support for 31 bit emulation"
220 depends on 64BIT 220 depends on 64BIT
221 select COMPAT_BINFMT_ELF 221 select COMPAT_BINFMT_ELF
222 select ARCH_WANT_OLD_COMPAT_IPC
222 help 223 help
223 Select this option if you want to enable your system kernel to 224 Select this option if you want to enable your system kernel to
224 handle system-calls from ELF binaries for 31 bit ESA. This option 225 handle system-calls from ELF binaries for 31 bit ESA. This option
diff --git a/arch/s390/include/asm/cpu_mf.h b/arch/s390/include/asm/cpu_mf.h
index e49db5d5d06f..a3afecdae145 100644
--- a/arch/s390/include/asm/cpu_mf.h
+++ b/arch/s390/include/asm/cpu_mf.h
@@ -12,6 +12,8 @@
12#ifndef _ASM_S390_CPU_MF_H 12#ifndef _ASM_S390_CPU_MF_H
13#define _ASM_S390_CPU_MF_H 13#define _ASM_S390_CPU_MF_H
14 14
15#include <asm/facility.h>
16
15#define CPU_MF_INT_SF_IAE (1 << 31) /* invalid entry address */ 17#define CPU_MF_INT_SF_IAE (1 << 31) /* invalid entry address */
16#define CPU_MF_INT_SF_ISE (1 << 30) /* incorrect SDBT entry */ 18#define CPU_MF_INT_SF_ISE (1 << 30) /* incorrect SDBT entry */
17#define CPU_MF_INT_SF_PRA (1 << 29) /* program request alert */ 19#define CPU_MF_INT_SF_PRA (1 << 29) /* program request alert */
diff --git a/arch/s390/include/asm/mmu.h b/arch/s390/include/asm/mmu.h
index 1c7d6ce328bf..6340178748bf 100644
--- a/arch/s390/include/asm/mmu.h
+++ b/arch/s390/include/asm/mmu.h
@@ -1,6 +1,8 @@
1#ifndef __MMU_H 1#ifndef __MMU_H
2#define __MMU_H 2#define __MMU_H
3 3
4#include <linux/errno.h>
5
4typedef struct { 6typedef struct {
5 atomic_t attach_count; 7 atomic_t attach_count;
6 unsigned int flush_mm; 8 unsigned int flush_mm;
diff --git a/arch/s390/include/asm/posix_types.h b/arch/s390/include/asm/posix_types.h
index 8cc113f92523..edf8527ff08d 100644
--- a/arch/s390/include/asm/posix_types.h
+++ b/arch/s390/include/asm/posix_types.h
@@ -3,7 +3,6 @@
3 * 3 *
4 * S390 version 4 * S390 version
5 * 5 *
6 * Derived from "include/asm-i386/posix_types.h"
7 */ 6 */
8 7
9#ifndef __ARCH_S390_POSIX_TYPES_H 8#ifndef __ARCH_S390_POSIX_TYPES_H
@@ -15,22 +14,11 @@
15 * assume GCC is being used. 14 * assume GCC is being used.
16 */ 15 */
17 16
18typedef long __kernel_off_t;
19typedef int __kernel_pid_t;
20typedef unsigned long __kernel_size_t; 17typedef unsigned long __kernel_size_t;
21typedef long __kernel_time_t; 18#define __kernel_size_t __kernel_size_t
22typedef long __kernel_suseconds_t;
23typedef long __kernel_clock_t;
24typedef int __kernel_timer_t;
25typedef int __kernel_clockid_t;
26typedef int __kernel_daddr_t;
27typedef char * __kernel_caddr_t;
28typedef unsigned short __kernel_uid16_t;
29typedef unsigned short __kernel_gid16_t;
30 19
31#ifdef __GNUC__ 20typedef unsigned short __kernel_old_dev_t;
32typedef long long __kernel_loff_t; 21#define __kernel_old_dev_t __kernel_old_dev_t
33#endif
34 22
35#ifndef __s390x__ 23#ifndef __s390x__
36 24
@@ -42,11 +30,6 @@ typedef unsigned short __kernel_uid_t;
42typedef unsigned short __kernel_gid_t; 30typedef unsigned short __kernel_gid_t;
43typedef int __kernel_ssize_t; 31typedef int __kernel_ssize_t;
44typedef int __kernel_ptrdiff_t; 32typedef int __kernel_ptrdiff_t;
45typedef unsigned int __kernel_uid32_t;
46typedef unsigned int __kernel_gid32_t;
47typedef unsigned short __kernel_old_uid_t;
48typedef unsigned short __kernel_old_gid_t;
49typedef unsigned short __kernel_old_dev_t;
50 33
51#else /* __s390x__ */ 34#else /* __s390x__ */
52 35
@@ -59,49 +42,16 @@ typedef unsigned int __kernel_gid_t;
59typedef long __kernel_ssize_t; 42typedef long __kernel_ssize_t;
60typedef long __kernel_ptrdiff_t; 43typedef long __kernel_ptrdiff_t;
61typedef unsigned long __kernel_sigset_t; /* at least 32 bits */ 44typedef unsigned long __kernel_sigset_t; /* at least 32 bits */
62typedef __kernel_uid_t __kernel_old_uid_t;
63typedef __kernel_gid_t __kernel_old_gid_t;
64typedef __kernel_uid_t __kernel_uid32_t;
65typedef __kernel_gid_t __kernel_gid32_t;
66typedef unsigned short __kernel_old_dev_t;
67 45
68#endif /* __s390x__ */ 46#endif /* __s390x__ */
69 47
70typedef struct { 48#define __kernel_ino_t __kernel_ino_t
71 int val[2]; 49#define __kernel_mode_t __kernel_mode_t
72} __kernel_fsid_t; 50#define __kernel_nlink_t __kernel_nlink_t
73 51#define __kernel_ipc_pid_t __kernel_ipc_pid_t
74 52#define __kernel_uid_t __kernel_uid_t
75#ifdef __KERNEL__ 53#define __kernel_gid_t __kernel_gid_t
76
77#undef __FD_SET
78static inline void __FD_SET(unsigned long fd, __kernel_fd_set *fdsetp)
79{
80 unsigned long _tmp = fd / __NFDBITS;
81 unsigned long _rem = fd % __NFDBITS;
82 fdsetp->fds_bits[_tmp] |= (1UL<<_rem);
83}
84
85#undef __FD_CLR
86static inline void __FD_CLR(unsigned long fd, __kernel_fd_set *fdsetp)
87{
88 unsigned long _tmp = fd / __NFDBITS;
89 unsigned long _rem = fd % __NFDBITS;
90 fdsetp->fds_bits[_tmp] &= ~(1UL<<_rem);
91}
92
93#undef __FD_ISSET
94static inline int __FD_ISSET(unsigned long fd, const __kernel_fd_set *fdsetp)
95{
96 unsigned long _tmp = fd / __NFDBITS;
97 unsigned long _rem = fd % __NFDBITS;
98 return (fdsetp->fds_bits[_tmp] & (1UL<<_rem)) != 0;
99}
100
101#undef __FD_ZERO
102#define __FD_ZERO(fdsetp) \
103 ((void) memset ((void *) (fdsetp), 0, sizeof (__kernel_fd_set)))
104 54
105#endif /* __KERNEL__ */ 55#include <asm-generic/posix_types.h>
106 56
107#endif 57#endif
diff --git a/arch/s390/kernel/lgr.c b/arch/s390/kernel/lgr.c
index ac39e7a731fc..87f080b17af1 100644
--- a/arch/s390/kernel/lgr.c
+++ b/arch/s390/kernel/lgr.c
@@ -8,6 +8,7 @@
8#include <linux/module.h> 8#include <linux/module.h>
9#include <linux/timer.h> 9#include <linux/timer.h>
10#include <linux/slab.h> 10#include <linux/slab.h>
11#include <asm/facility.h>
11#include <asm/sysinfo.h> 12#include <asm/sysinfo.h>
12#include <asm/ebcdic.h> 13#include <asm/ebcdic.h>
13#include <asm/debug.h> 14#include <asm/debug.h>
diff --git a/arch/s390/kernel/perf_cpum_cf.c b/arch/s390/kernel/perf_cpum_cf.c
index 8481ecf2ad71..46405086479c 100644
--- a/arch/s390/kernel/perf_cpum_cf.c
+++ b/arch/s390/kernel/perf_cpum_cf.c
@@ -18,7 +18,7 @@
18#include <linux/notifier.h> 18#include <linux/notifier.h>
19#include <linux/init.h> 19#include <linux/init.h>
20#include <linux/export.h> 20#include <linux/export.h>
21#include <asm/system.h> 21#include <asm/ctl_reg.h>
22#include <asm/irq.h> 22#include <asm/irq.h>
23#include <asm/cpu_mf.h> 23#include <asm/cpu_mf.h>
24 24
diff --git a/arch/s390/kernel/perf_event.c b/arch/s390/kernel/perf_event.c
index 609f985198cf..f58f37f66824 100644
--- a/arch/s390/kernel/perf_event.c
+++ b/arch/s390/kernel/perf_event.c
@@ -15,7 +15,6 @@
15#include <linux/perf_event.h> 15#include <linux/perf_event.h>
16#include <linux/percpu.h> 16#include <linux/percpu.h>
17#include <linux/export.h> 17#include <linux/export.h>
18#include <asm/system.h>
19#include <asm/irq.h> 18#include <asm/irq.h>
20#include <asm/cpu_mf.h> 19#include <asm/cpu_mf.h>
21#include <asm/lowcore.h> 20#include <asm/lowcore.h>
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index 1581ea2e027a..06264ae8ccd9 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -50,6 +50,7 @@
50 50
51#include <asm/ipl.h> 51#include <asm/ipl.h>
52#include <asm/uaccess.h> 52#include <asm/uaccess.h>
53#include <asm/facility.h>
53#include <asm/smp.h> 54#include <asm/smp.h>
54#include <asm/mmu_context.h> 55#include <asm/mmu_context.h>
55#include <asm/cpcmd.h> 56#include <asm/cpcmd.h>
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index a8bf9994b086..1f77227669e8 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -32,6 +32,8 @@
32#include <linux/slab.h> 32#include <linux/slab.h>
33#include <linux/crash_dump.h> 33#include <linux/crash_dump.h>
34#include <asm/asm-offsets.h> 34#include <asm/asm-offsets.h>
35#include <asm/switch_to.h>
36#include <asm/facility.h>
35#include <asm/ipl.h> 37#include <asm/ipl.h>
36#include <asm/setup.h> 38#include <asm/setup.h>
37#include <asm/irq.h> 39#include <asm/irq.h>
diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig
index 713fb58ca507..ff9e033ce626 100644
--- a/arch/sh/Kconfig
+++ b/arch/sh/Kconfig
@@ -5,6 +5,7 @@ config SUPERH
5 select HAVE_IDE if HAS_IOPORT 5 select HAVE_IDE if HAS_IOPORT
6 select HAVE_MEMBLOCK 6 select HAVE_MEMBLOCK
7 select HAVE_MEMBLOCK_NODE_MAP 7 select HAVE_MEMBLOCK_NODE_MAP
8 select ARCH_DISCARD_MEMBLOCK
8 select HAVE_OPROFILE 9 select HAVE_OPROFILE
9 select HAVE_GENERIC_DMA_COHERENT 10 select HAVE_GENERIC_DMA_COHERENT
10 select HAVE_ARCH_TRACEHOOK 11 select HAVE_ARCH_TRACEHOOK
@@ -22,7 +23,7 @@ config SUPERH
22 select HAVE_SYSCALL_TRACEPOINTS 23 select HAVE_SYSCALL_TRACEPOINTS
23 select HAVE_REGS_AND_STACK_ACCESS_API 24 select HAVE_REGS_AND_STACK_ACCESS_API
24 select HAVE_GENERIC_HARDIRQS 25 select HAVE_GENERIC_HARDIRQS
25 select HAVE_SPARSE_IRQ 26 select MAY_HAVE_SPARSE_IRQ
26 select IRQ_FORCED_THREADING 27 select IRQ_FORCED_THREADING
27 select RTC_LIB 28 select RTC_LIB
28 select GENERIC_ATOMIC64 29 select GENERIC_ATOMIC64
@@ -161,6 +162,9 @@ config NO_IOPORT
161config IO_TRAPPED 162config IO_TRAPPED
162 bool 163 bool
163 164
165config SWAP_IO_SPACE
166 bool
167
164config DMA_COHERENT 168config DMA_COHERENT
165 bool 169 bool
166 170
diff --git a/arch/sh/boards/mach-ecovec24/setup.c b/arch/sh/boards/mach-ecovec24/setup.c
index e5ac12b2ce65..d12fe9ddf3da 100644
--- a/arch/sh/boards/mach-ecovec24/setup.c
+++ b/arch/sh/boards/mach-ecovec24/setup.c
@@ -522,11 +522,18 @@ static void sdhi0_set_pwr(struct platform_device *pdev, int state)
522 gpio_set_value(GPIO_PTB6, state); 522 gpio_set_value(GPIO_PTB6, state);
523} 523}
524 524
525static int sdhi0_get_cd(struct platform_device *pdev)
526{
527 return !gpio_get_value(GPIO_PTY7);
528}
529
525static struct sh_mobile_sdhi_info sdhi0_info = { 530static struct sh_mobile_sdhi_info sdhi0_info = {
526 .dma_slave_tx = SHDMA_SLAVE_SDHI0_TX, 531 .dma_slave_tx = SHDMA_SLAVE_SDHI0_TX,
527 .dma_slave_rx = SHDMA_SLAVE_SDHI0_RX, 532 .dma_slave_rx = SHDMA_SLAVE_SDHI0_RX,
528 .set_pwr = sdhi0_set_pwr, 533 .set_pwr = sdhi0_set_pwr,
529 .tmio_caps = MMC_CAP_SDIO_IRQ | MMC_CAP_POWER_OFF_CARD, 534 .tmio_caps = MMC_CAP_SDIO_IRQ | MMC_CAP_POWER_OFF_CARD |
535 MMC_CAP_NEEDS_POLL,
536 .get_cd = sdhi0_get_cd,
530}; 537};
531 538
532static struct resource sdhi0_resources[] = { 539static struct resource sdhi0_resources[] = {
@@ -559,11 +566,18 @@ static void sdhi1_set_pwr(struct platform_device *pdev, int state)
559 gpio_set_value(GPIO_PTB7, state); 566 gpio_set_value(GPIO_PTB7, state);
560} 567}
561 568
569static int sdhi1_get_cd(struct platform_device *pdev)
570{
571 return !gpio_get_value(GPIO_PTW7);
572}
573
562static struct sh_mobile_sdhi_info sdhi1_info = { 574static struct sh_mobile_sdhi_info sdhi1_info = {
563 .dma_slave_tx = SHDMA_SLAVE_SDHI1_TX, 575 .dma_slave_tx = SHDMA_SLAVE_SDHI1_TX,
564 .dma_slave_rx = SHDMA_SLAVE_SDHI1_RX, 576 .dma_slave_rx = SHDMA_SLAVE_SDHI1_RX,
565 .tmio_caps = MMC_CAP_SDIO_IRQ | MMC_CAP_POWER_OFF_CARD, 577 .tmio_caps = MMC_CAP_SDIO_IRQ | MMC_CAP_POWER_OFF_CARD |
578 MMC_CAP_NEEDS_POLL,
566 .set_pwr = sdhi1_set_pwr, 579 .set_pwr = sdhi1_set_pwr,
580 .get_cd = sdhi1_get_cd,
567}; 581};
568 582
569static struct resource sdhi1_resources[] = { 583static struct resource sdhi1_resources[] = {
@@ -1001,6 +1015,7 @@ extern char ecovec24_sdram_leave_end;
1001static int __init arch_setup(void) 1015static int __init arch_setup(void)
1002{ 1016{
1003 struct clk *clk; 1017 struct clk *clk;
1018 bool cn12_enabled = false;
1004 1019
1005 /* register board specific self-refresh code */ 1020 /* register board specific self-refresh code */
1006 sh_mobile_register_self_refresh(SUSP_SH_STANDBY | SUSP_SH_SF | 1021 sh_mobile_register_self_refresh(SUSP_SH_STANDBY | SUSP_SH_SF |
@@ -1201,9 +1216,13 @@ static int __init arch_setup(void)
1201 gpio_direction_input(GPIO_PTR5); 1216 gpio_direction_input(GPIO_PTR5);
1202 gpio_direction_input(GPIO_PTR6); 1217 gpio_direction_input(GPIO_PTR6);
1203 1218
1219 /* SD-card slot CN11 */
1220 /* Card-detect, used on CN11, either with SDHI0 or with SPI */
1221 gpio_request(GPIO_PTY7, NULL);
1222 gpio_direction_input(GPIO_PTY7);
1223
1204#if defined(CONFIG_MMC_SDHI) || defined(CONFIG_MMC_SDHI_MODULE) 1224#if defined(CONFIG_MMC_SDHI) || defined(CONFIG_MMC_SDHI_MODULE)
1205 /* enable SDHI0 on CN11 (needs DS2.4 set to ON) */ 1225 /* enable SDHI0 on CN11 (needs DS2.4 set to ON) */
1206 gpio_request(GPIO_FN_SDHI0CD, NULL);
1207 gpio_request(GPIO_FN_SDHI0WP, NULL); 1226 gpio_request(GPIO_FN_SDHI0WP, NULL);
1208 gpio_request(GPIO_FN_SDHI0CMD, NULL); 1227 gpio_request(GPIO_FN_SDHI0CMD, NULL);
1209 gpio_request(GPIO_FN_SDHI0CLK, NULL); 1228 gpio_request(GPIO_FN_SDHI0CLK, NULL);
@@ -1213,23 +1232,6 @@ static int __init arch_setup(void)
1213 gpio_request(GPIO_FN_SDHI0D0, NULL); 1232 gpio_request(GPIO_FN_SDHI0D0, NULL);
1214 gpio_request(GPIO_PTB6, NULL); 1233 gpio_request(GPIO_PTB6, NULL);
1215 gpio_direction_output(GPIO_PTB6, 0); 1234 gpio_direction_output(GPIO_PTB6, 0);
1216
1217#if !defined(CONFIG_MMC_SH_MMCIF) && !defined(CONFIG_MMC_SH_MMCIF_MODULE)
1218 /* enable SDHI1 on CN12 (needs DS2.6,7 set to ON,OFF) */
1219 gpio_request(GPIO_FN_SDHI1CD, NULL);
1220 gpio_request(GPIO_FN_SDHI1WP, NULL);
1221 gpio_request(GPIO_FN_SDHI1CMD, NULL);
1222 gpio_request(GPIO_FN_SDHI1CLK, NULL);
1223 gpio_request(GPIO_FN_SDHI1D3, NULL);
1224 gpio_request(GPIO_FN_SDHI1D2, NULL);
1225 gpio_request(GPIO_FN_SDHI1D1, NULL);
1226 gpio_request(GPIO_FN_SDHI1D0, NULL);
1227 gpio_request(GPIO_PTB7, NULL);
1228 gpio_direction_output(GPIO_PTB7, 0);
1229
1230 /* I/O buffer drive ability is high for SDHI1 */
1231 __raw_writew((__raw_readw(IODRIVEA) & ~0x3000) | 0x2000 , IODRIVEA);
1232#endif /* CONFIG_MMC_SH_MMCIF */
1233#else 1235#else
1234 /* enable MSIOF0 on CN11 (needs DS2.4 set to OFF) */ 1236 /* enable MSIOF0 on CN11 (needs DS2.4 set to OFF) */
1235 gpio_request(GPIO_FN_MSIOF0_TXD, NULL); 1237 gpio_request(GPIO_FN_MSIOF0_TXD, NULL);
@@ -1241,12 +1243,51 @@ static int __init arch_setup(void)
1241 gpio_direction_output(GPIO_PTB6, 0); /* disable power by default */ 1243 gpio_direction_output(GPIO_PTB6, 0); /* disable power by default */
1242 gpio_request(GPIO_PTY6, NULL); /* write protect */ 1244 gpio_request(GPIO_PTY6, NULL); /* write protect */
1243 gpio_direction_input(GPIO_PTY6); 1245 gpio_direction_input(GPIO_PTY6);
1244 gpio_request(GPIO_PTY7, NULL); /* card detect */
1245 gpio_direction_input(GPIO_PTY7);
1246 1246
1247 spi_register_board_info(spi_bus, ARRAY_SIZE(spi_bus)); 1247 spi_register_board_info(spi_bus, ARRAY_SIZE(spi_bus));
1248#endif 1248#endif
1249 1249
1250 /* MMC/SD-card slot CN12 */
1251#if defined(CONFIG_MMC_SH_MMCIF) || defined(CONFIG_MMC_SH_MMCIF_MODULE)
1252 /* enable MMCIF (needs DS2.6,7 set to OFF,ON) */
1253 gpio_request(GPIO_FN_MMC_D7, NULL);
1254 gpio_request(GPIO_FN_MMC_D6, NULL);
1255 gpio_request(GPIO_FN_MMC_D5, NULL);
1256 gpio_request(GPIO_FN_MMC_D4, NULL);
1257 gpio_request(GPIO_FN_MMC_D3, NULL);
1258 gpio_request(GPIO_FN_MMC_D2, NULL);
1259 gpio_request(GPIO_FN_MMC_D1, NULL);
1260 gpio_request(GPIO_FN_MMC_D0, NULL);
1261 gpio_request(GPIO_FN_MMC_CLK, NULL);
1262 gpio_request(GPIO_FN_MMC_CMD, NULL);
1263 gpio_request(GPIO_PTB7, NULL);
1264 gpio_direction_output(GPIO_PTB7, 0);
1265
1266 cn12_enabled = true;
1267#elif defined(CONFIG_MMC_SDHI) || defined(CONFIG_MMC_SDHI_MODULE)
1268 /* enable SDHI1 on CN12 (needs DS2.6,7 set to ON,OFF) */
1269 gpio_request(GPIO_FN_SDHI1WP, NULL);
1270 gpio_request(GPIO_FN_SDHI1CMD, NULL);
1271 gpio_request(GPIO_FN_SDHI1CLK, NULL);
1272 gpio_request(GPIO_FN_SDHI1D3, NULL);
1273 gpio_request(GPIO_FN_SDHI1D2, NULL);
1274 gpio_request(GPIO_FN_SDHI1D1, NULL);
1275 gpio_request(GPIO_FN_SDHI1D0, NULL);
1276 gpio_request(GPIO_PTB7, NULL);
1277 gpio_direction_output(GPIO_PTB7, 0);
1278
1279 /* Card-detect, used on CN12 with SDHI1 */
1280 gpio_request(GPIO_PTW7, NULL);
1281 gpio_direction_input(GPIO_PTW7);
1282
1283 cn12_enabled = true;
1284#endif
1285
1286 if (cn12_enabled)
1287 /* I/O buffer drive ability is high for CN12 */
1288 __raw_writew((__raw_readw(IODRIVEA) & ~0x3000) | 0x2000,
1289 IODRIVEA);
1290
1250 /* enable Video */ 1291 /* enable Video */
1251 gpio_request(GPIO_PTU2, NULL); 1292 gpio_request(GPIO_PTU2, NULL);
1252 gpio_direction_output(GPIO_PTU2, 1); 1293 gpio_direction_output(GPIO_PTU2, 1);
@@ -1305,25 +1346,6 @@ static int __init arch_setup(void)
1305 gpio_request(GPIO_PTU5, NULL); 1346 gpio_request(GPIO_PTU5, NULL);
1306 gpio_direction_output(GPIO_PTU5, 0); 1347 gpio_direction_output(GPIO_PTU5, 0);
1307 1348
1308#if defined(CONFIG_MMC_SH_MMCIF) || defined(CONFIG_MMC_SH_MMCIF_MODULE)
1309 /* enable MMCIF (needs DS2.6,7 set to OFF,ON) */
1310 gpio_request(GPIO_FN_MMC_D7, NULL);
1311 gpio_request(GPIO_FN_MMC_D6, NULL);
1312 gpio_request(GPIO_FN_MMC_D5, NULL);
1313 gpio_request(GPIO_FN_MMC_D4, NULL);
1314 gpio_request(GPIO_FN_MMC_D3, NULL);
1315 gpio_request(GPIO_FN_MMC_D2, NULL);
1316 gpio_request(GPIO_FN_MMC_D1, NULL);
1317 gpio_request(GPIO_FN_MMC_D0, NULL);
1318 gpio_request(GPIO_FN_MMC_CLK, NULL);
1319 gpio_request(GPIO_FN_MMC_CMD, NULL);
1320 gpio_request(GPIO_PTB7, NULL);
1321 gpio_direction_output(GPIO_PTB7, 0);
1322
1323 /* I/O buffer drive ability is high for MMCIF */
1324 __raw_writew((__raw_readw(IODRIVEA) & ~0x3000) | 0x2000 , IODRIVEA);
1325#endif
1326
1327 /* enable I2C device */ 1349 /* enable I2C device */
1328 i2c_register_board_info(0, i2c0_devices, 1350 i2c_register_board_info(0, i2c0_devices,
1329 ARRAY_SIZE(i2c0_devices)); 1351 ARRAY_SIZE(i2c0_devices));
diff --git a/arch/sh/boot/Makefile b/arch/sh/boot/Makefile
index e4ea31a62c55..58592dfa5cb6 100644
--- a/arch/sh/boot/Makefile
+++ b/arch/sh/boot/Makefile
@@ -8,8 +8,6 @@
8# Copyright (C) 1999 Stuart Menefy 8# Copyright (C) 1999 Stuart Menefy
9# 9#
10 10
11MKIMAGE := $(srctree)/scripts/mkuboot.sh
12
13# 11#
14# Assign safe dummy values if these variables are not defined, 12# Assign safe dummy values if these variables are not defined,
15# in order to suppress error message. 13# in order to suppress error message.
@@ -61,10 +59,8 @@ KERNEL_ENTRY := $(shell /bin/bash -c 'printf "0x%08x" \
61 $(KERNEL_MEMORY) + \ 59 $(KERNEL_MEMORY) + \
62 $(CONFIG_ZERO_PAGE_OFFSET) + $(CONFIG_ENTRY_OFFSET)]') 60 $(CONFIG_ZERO_PAGE_OFFSET) + $(CONFIG_ENTRY_OFFSET)]')
63 61
64quiet_cmd_uimage = UIMAGE $@ 62UIMAGE_LOADADDR = $(KERNEL_LOAD)
65 cmd_uimage = $(CONFIG_SHELL) $(MKIMAGE) -A sh -O linux -T kernel \ 63UIMAGE_ENTRYADDR = $(KERNEL_ENTRY)
66 -C $(2) -a $(KERNEL_LOAD) -e $(KERNEL_ENTRY) \
67 -n 'Linux-$(KERNELRELEASE)' -d $< $@
68 64
69$(obj)/vmlinux.bin: vmlinux FORCE 65$(obj)/vmlinux.bin: vmlinux FORCE
70 $(call if_changed,objcopy) 66 $(call if_changed,objcopy)
diff --git a/arch/sh/drivers/dma/dma-g2.c b/arch/sh/drivers/dma/dma-g2.c
index be9ca7ca0ce4..e1ab6eb3c04b 100644
--- a/arch/sh/drivers/dma/dma-g2.c
+++ b/arch/sh/drivers/dma/dma-g2.c
@@ -181,14 +181,14 @@ static int __init g2_dma_init(void)
181 181
182 ret = register_dmac(&g2_dma_info); 182 ret = register_dmac(&g2_dma_info);
183 if (unlikely(ret != 0)) 183 if (unlikely(ret != 0))
184 free_irq(HW_EVENT_G2_DMA, 0); 184 free_irq(HW_EVENT_G2_DMA, &g2_dma_info);
185 185
186 return ret; 186 return ret;
187} 187}
188 188
189static void __exit g2_dma_exit(void) 189static void __exit g2_dma_exit(void)
190{ 190{
191 free_irq(HW_EVENT_G2_DMA, 0); 191 free_irq(HW_EVENT_G2_DMA, &g2_dma_info);
192 unregister_dmac(&g2_dma_info); 192 unregister_dmac(&g2_dma_info);
193} 193}
194 194
diff --git a/arch/sh/drivers/dma/dmabrg.c b/arch/sh/drivers/dma/dmabrg.c
index 3d66a32ce610..c0dd904483c7 100644
--- a/arch/sh/drivers/dma/dmabrg.c
+++ b/arch/sh/drivers/dma/dmabrg.c
@@ -189,8 +189,8 @@ static int __init dmabrg_init(void)
189 if (ret == 0) 189 if (ret == 0)
190 return ret; 190 return ret;
191 191
192 free_irq(DMABRGI1, 0); 192 free_irq(DMABRGI1, NULL);
193out1: free_irq(DMABRGI0, 0); 193out1: free_irq(DMABRGI0, NULL);
194out0: kfree(dmabrg_handlers); 194out0: kfree(dmabrg_handlers);
195 return ret; 195 return ret;
196} 196}
diff --git a/arch/sh/drivers/pci/pci-sh7780.c b/arch/sh/drivers/pci/pci-sh7780.c
index fb8f14990743..5a6dab6e27d9 100644
--- a/arch/sh/drivers/pci/pci-sh7780.c
+++ b/arch/sh/drivers/pci/pci-sh7780.c
@@ -21,6 +21,13 @@
21#include <asm/mmu.h> 21#include <asm/mmu.h>
22#include <asm/sizes.h> 22#include <asm/sizes.h>
23 23
24#if defined(CONFIG_CPU_BIG_ENDIAN)
25# define PCICR_ENDIANNESS SH4_PCICR_BSWP
26#else
27# define PCICR_ENDIANNESS 0
28#endif
29
30
24static struct resource sh7785_pci_resources[] = { 31static struct resource sh7785_pci_resources[] = {
25 { 32 {
26 .name = "PCI IO", 33 .name = "PCI IO",
@@ -254,7 +261,7 @@ static int __init sh7780_pci_init(void)
254 __raw_writel(PCIECR_ENBL, PCIECR); 261 __raw_writel(PCIECR_ENBL, PCIECR);
255 262
256 /* Reset */ 263 /* Reset */
257 __raw_writel(SH4_PCICR_PREFIX | SH4_PCICR_PRST, 264 __raw_writel(SH4_PCICR_PREFIX | SH4_PCICR_PRST | PCICR_ENDIANNESS,
258 chan->reg_base + SH4_PCICR); 265 chan->reg_base + SH4_PCICR);
259 266
260 /* 267 /*
@@ -290,7 +297,8 @@ static int __init sh7780_pci_init(void)
290 * Now throw it in to register initialization mode and 297 * Now throw it in to register initialization mode and
291 * start the real work. 298 * start the real work.
292 */ 299 */
293 __raw_writel(SH4_PCICR_PREFIX, chan->reg_base + SH4_PCICR); 300 __raw_writel(SH4_PCICR_PREFIX | PCICR_ENDIANNESS,
301 chan->reg_base + SH4_PCICR);
294 302
295 memphys = __pa(memory_start); 303 memphys = __pa(memory_start);
296 memsize = roundup_pow_of_two(memory_end - memory_start); 304 memsize = roundup_pow_of_two(memory_end - memory_start);
@@ -380,7 +388,8 @@ static int __init sh7780_pci_init(void)
380 * Initialization mode complete, release the control register and 388 * Initialization mode complete, release the control register and
381 * enable round robin mode to stop device overruns/starvation. 389 * enable round robin mode to stop device overruns/starvation.
382 */ 390 */
383 __raw_writel(SH4_PCICR_PREFIX | SH4_PCICR_CFIN | SH4_PCICR_FTO, 391 __raw_writel(SH4_PCICR_PREFIX | SH4_PCICR_CFIN | SH4_PCICR_FTO |
392 PCICR_ENDIANNESS,
384 chan->reg_base + SH4_PCICR); 393 chan->reg_base + SH4_PCICR);
385 394
386 ret = register_pci_controller(chan); 395 ret = register_pci_controller(chan);
diff --git a/arch/sh/include/asm/io.h b/arch/sh/include/asm/io.h
index 35fc8b077cb1..ec464a6b95fe 100644
--- a/arch/sh/include/asm/io.h
+++ b/arch/sh/include/asm/io.h
@@ -23,6 +23,7 @@
23#define __IO_PREFIX generic 23#define __IO_PREFIX generic
24#include <asm/io_generic.h> 24#include <asm/io_generic.h>
25#include <asm/io_trapped.h> 25#include <asm/io_trapped.h>
26#include <mach/mangle-port.h>
26 27
27#define __raw_writeb(v,a) (__chk_io_ptr(a), *(volatile u8 __force *)(a) = (v)) 28#define __raw_writeb(v,a) (__chk_io_ptr(a), *(volatile u8 __force *)(a) = (v))
28#define __raw_writew(v,a) (__chk_io_ptr(a), *(volatile u16 __force *)(a) = (v)) 29#define __raw_writew(v,a) (__chk_io_ptr(a), *(volatile u16 __force *)(a) = (v))
@@ -34,21 +35,15 @@
34#define __raw_readl(a) (__chk_io_ptr(a), *(volatile u32 __force *)(a)) 35#define __raw_readl(a) (__chk_io_ptr(a), *(volatile u32 __force *)(a))
35#define __raw_readq(a) (__chk_io_ptr(a), *(volatile u64 __force *)(a)) 36#define __raw_readq(a) (__chk_io_ptr(a), *(volatile u64 __force *)(a))
36 37
37#define readb_relaxed(c) ({ u8 __v = __raw_readb(c); __v; }) 38#define readb_relaxed(c) ({ u8 __v = ioswabb(__raw_readb(c)); __v; })
38#define readw_relaxed(c) ({ u16 __v = le16_to_cpu((__force __le16) \ 39#define readw_relaxed(c) ({ u16 __v = ioswabw(__raw_readw(c)); __v; })
39 __raw_readw(c)); __v; }) 40#define readl_relaxed(c) ({ u32 __v = ioswabl(__raw_readl(c)); __v; })
40#define readl_relaxed(c) ({ u32 __v = le32_to_cpu((__force __le32) \ 41#define readq_relaxed(c) ({ u64 __v = ioswabq(__raw_readq(c)); __v; })
41 __raw_readl(c)); __v; }) 42
42#define readq_relaxed(c) ({ u64 __v = le64_to_cpu((__force __le64) \ 43#define writeb_relaxed(v,c) ((void)__raw_writeb((__force u8)ioswabb(v),c))
43 __raw_readq(c)); __v; }) 44#define writew_relaxed(v,c) ((void)__raw_writew((__force u16)ioswabw(v),c))
44 45#define writel_relaxed(v,c) ((void)__raw_writel((__force u32)ioswabl(v),c))
45#define writeb_relaxed(v,c) ((void)__raw_writeb(v,c)) 46#define writeq_relaxed(v,c) ((void)__raw_writeq((__force u64)ioswabq(v),c))
46#define writew_relaxed(v,c) ((void)__raw_writew((__force u16) \
47 cpu_to_le16(v),c))
48#define writel_relaxed(v,c) ((void)__raw_writel((__force u32) \
49 cpu_to_le32(v),c))
50#define writeq_relaxed(v,c) ((void)__raw_writeq((__force u64) \
51 cpu_to_le64(v),c))
52 47
53#define readb(a) ({ u8 r_ = readb_relaxed(a); rmb(); r_; }) 48#define readb(a) ({ u8 r_ = readb_relaxed(a); rmb(); r_; })
54#define readw(a) ({ u16 r_ = readw_relaxed(a); rmb(); r_; }) 49#define readw(a) ({ u16 r_ = readw_relaxed(a); rmb(); r_; })
diff --git a/arch/sh/include/asm/irq.h b/arch/sh/include/asm/irq.h
index 45d08b6a5ef7..2a62017eb275 100644
--- a/arch/sh/include/asm/irq.h
+++ b/arch/sh/include/asm/irq.h
@@ -21,17 +21,6 @@
21#define NO_IRQ_IGNORE ((unsigned int)-1) 21#define NO_IRQ_IGNORE ((unsigned int)-1)
22 22
23/* 23/*
24 * Convert back and forth between INTEVT and IRQ values.
25 */
26#ifdef CONFIG_CPU_HAS_INTEVT
27#define evt2irq(evt) (((evt) >> 5) - 16)
28#define irq2evt(irq) (((irq) + 16) << 5)
29#else
30#define evt2irq(evt) (evt)
31#define irq2evt(irq) (irq)
32#endif
33
34/*
35 * Simple Mask Register Support 24 * Simple Mask Register Support
36 */ 25 */
37extern void make_maskreg_irq(unsigned int irq); 26extern void make_maskreg_irq(unsigned int irq);
diff --git a/arch/sh/include/asm/posix_types_32.h b/arch/sh/include/asm/posix_types_32.h
index 6a9ceaaf1aea..abda58467ece 100644
--- a/arch/sh/include/asm/posix_types_32.h
+++ b/arch/sh/include/asm/posix_types_32.h
@@ -12,11 +12,6 @@ typedef unsigned short __kernel_uid_t;
12typedef unsigned short __kernel_gid_t; 12typedef unsigned short __kernel_gid_t;
13#define __kernel_gid_t __kernel_gid_t 13#define __kernel_gid_t __kernel_gid_t
14 14
15typedef unsigned int __kernel_uid32_t;
16#define __kernel_uid32_t __kernel_uid32_t
17typedef unsigned int __kernel_gid32_t;
18#define __kernel_gid32_t __kernel_gid32_t
19
20typedef unsigned short __kernel_old_uid_t; 15typedef unsigned short __kernel_old_uid_t;
21#define __kernel_old_uid_t __kernel_old_uid_t 16#define __kernel_old_uid_t __kernel_old_uid_t
22typedef unsigned short __kernel_old_gid_t; 17typedef unsigned short __kernel_old_gid_t;
diff --git a/arch/sh/include/asm/posix_types_64.h b/arch/sh/include/asm/posix_types_64.h
index 8cd11485c06b..fcda07b4a616 100644
--- a/arch/sh/include/asm/posix_types_64.h
+++ b/arch/sh/include/asm/posix_types_64.h
@@ -17,10 +17,6 @@ typedef int __kernel_ssize_t;
17#define __kernel_ssize_t __kernel_ssize_t 17#define __kernel_ssize_t __kernel_ssize_t
18typedef int __kernel_ptrdiff_t; 18typedef int __kernel_ptrdiff_t;
19#define __kernel_ptrdiff_t __kernel_ptrdiff_t 19#define __kernel_ptrdiff_t __kernel_ptrdiff_t
20typedef unsigned int __kernel_uid32_t;
21#define __kernel_uid32_t __kernel_uid32_t
22typedef unsigned int __kernel_gid32_t;
23#define __kernel_gid32_t __kernel_gid32_t
24 20
25typedef unsigned short __kernel_old_uid_t; 21typedef unsigned short __kernel_old_uid_t;
26#define __kernel_old_uid_t __kernel_old_uid_t 22#define __kernel_old_uid_t __kernel_old_uid_t
diff --git a/arch/sh/include/asm/unistd.h b/arch/sh/include/asm/unistd.h
index 65be656ead7d..a42a5610a36a 100644
--- a/arch/sh/include/asm/unistd.h
+++ b/arch/sh/include/asm/unistd.h
@@ -1,9 +1,46 @@
1#ifdef __KERNEL__ 1#ifdef __KERNEL__
2# ifdef CONFIG_SUPERH32 2# ifdef CONFIG_SUPERH32
3
3# include "unistd_32.h" 4# include "unistd_32.h"
5# define __ARCH_WANT_SYS_RT_SIGSUSPEND
6
4# else 7# else
5# include "unistd_64.h" 8# include "unistd_64.h"
6# endif 9# endif
10
11# define __ARCH_WANT_IPC_PARSE_VERSION
12# define __ARCH_WANT_OLD_READDIR
13# define __ARCH_WANT_OLD_STAT
14# define __ARCH_WANT_STAT64
15# define __ARCH_WANT_SYS_ALARM
16# define __ARCH_WANT_SYS_GETHOSTNAME
17# define __ARCH_WANT_SYS_IPC
18# define __ARCH_WANT_SYS_PAUSE
19# define __ARCH_WANT_SYS_SGETMASK
20# define __ARCH_WANT_SYS_SIGNAL
21# define __ARCH_WANT_SYS_TIME
22# define __ARCH_WANT_SYS_UTIME
23# define __ARCH_WANT_SYS_WAITPID
24# define __ARCH_WANT_SYS_SOCKETCALL
25# define __ARCH_WANT_SYS_FADVISE64
26# define __ARCH_WANT_SYS_GETPGRP
27# define __ARCH_WANT_SYS_LLSEEK
28# define __ARCH_WANT_SYS_NICE
29# define __ARCH_WANT_SYS_OLD_GETRLIMIT
30# define __ARCH_WANT_SYS_OLD_UNAME
31# define __ARCH_WANT_SYS_OLDUMOUNT
32# define __ARCH_WANT_SYS_SIGPENDING
33# define __ARCH_WANT_SYS_SIGPROCMASK
34# define __ARCH_WANT_SYS_RT_SIGACTION
35
36/*
37 * "Conditional" syscalls
38 *
39 * What we want is __attribute__((weak,alias("sys_ni_syscall"))),
40 * but it doesn't work on all toolchains, so we just do it by hand
41 */
42# define cond_syscall(x) asm(".weak\t" #x "\n\t.set\t" #x ",sys_ni_syscall")
43
7#else 44#else
8# ifdef __SH5__ 45# ifdef __SH5__
9# include "unistd_64.h" 46# include "unistd_64.h"
diff --git a/arch/sh/include/asm/unistd_32.h b/arch/sh/include/asm/unistd_32.h
index 152b8627a184..72fd1e061006 100644
--- a/arch/sh/include/asm/unistd_32.h
+++ b/arch/sh/include/asm/unistd_32.h
@@ -1,5 +1,5 @@
1#ifndef __ASM_SH_UNISTD_H 1#ifndef __ASM_SH_UNISTD_32_H
2#define __ASM_SH_UNISTD_H 2#define __ASM_SH_UNISTD_32_H
3 3
4/* 4/*
5 * Copyright (C) 1999 Niibe Yutaka 5 * Copyright (C) 1999 Niibe Yutaka
@@ -26,7 +26,7 @@
26#define __NR_mknod 14 26#define __NR_mknod 14
27#define __NR_chmod 15 27#define __NR_chmod 15
28#define __NR_lchown 16 28#define __NR_lchown 16
29#define __NR_break 17 29 /* 17 was sys_break */
30#define __NR_oldstat 18 30#define __NR_oldstat 18
31#define __NR_lseek 19 31#define __NR_lseek 19
32#define __NR_getpid 20 32#define __NR_getpid 20
@@ -40,11 +40,11 @@
40#define __NR_oldfstat 28 40#define __NR_oldfstat 28
41#define __NR_pause 29 41#define __NR_pause 29
42#define __NR_utime 30 42#define __NR_utime 30
43#define __NR_stty 31 43 /* 31 was sys_stty */
44#define __NR_gtty 32 44 /* 32 was sys_gtty */
45#define __NR_access 33 45#define __NR_access 33
46#define __NR_nice 34 46#define __NR_nice 34
47#define __NR_ftime 35 47 /* 35 was sys_ftime */
48#define __NR_sync 36 48#define __NR_sync 36
49#define __NR_kill 37 49#define __NR_kill 37
50#define __NR_rename 38 50#define __NR_rename 38
@@ -53,7 +53,7 @@
53#define __NR_dup 41 53#define __NR_dup 41
54#define __NR_pipe 42 54#define __NR_pipe 42
55#define __NR_times 43 55#define __NR_times 43
56#define __NR_prof 44 56 /* 44 was sys_prof */
57#define __NR_brk 45 57#define __NR_brk 45
58#define __NR_setgid 46 58#define __NR_setgid 46
59#define __NR_getgid 47 59#define __NR_getgid 47
@@ -62,13 +62,13 @@
62#define __NR_getegid 50 62#define __NR_getegid 50
63#define __NR_acct 51 63#define __NR_acct 51
64#define __NR_umount2 52 64#define __NR_umount2 52
65#define __NR_lock 53 65 /* 53 was sys_lock */
66#define __NR_ioctl 54 66#define __NR_ioctl 54
67#define __NR_fcntl 55 67#define __NR_fcntl 55
68#define __NR_mpx 56 68 /* 56 was sys_mpx */
69#define __NR_setpgid 57 69#define __NR_setpgid 57
70#define __NR_ulimit 58 70 /* 58 was sys_ulimit */
71#define __NR_oldolduname 59 71 /* 59 was sys_olduname */
72#define __NR_umask 60 72#define __NR_umask 60
73#define __NR_chroot 61 73#define __NR_chroot 61
74#define __NR_ustat 62 74#define __NR_ustat 62
@@ -91,7 +91,7 @@
91#define __NR_settimeofday 79 91#define __NR_settimeofday 79
92#define __NR_getgroups 80 92#define __NR_getgroups 80
93#define __NR_setgroups 81 93#define __NR_setgroups 81
94#define __NR_select 82 94 /* 82 was sys_oldselect */
95#define __NR_symlink 83 95#define __NR_symlink 83
96#define __NR_oldlstat 84 96#define __NR_oldlstat 84
97#define __NR_readlink 85 97#define __NR_readlink 85
@@ -107,10 +107,10 @@
107#define __NR_fchown 95 107#define __NR_fchown 95
108#define __NR_getpriority 96 108#define __NR_getpriority 96
109#define __NR_setpriority 97 109#define __NR_setpriority 97
110#define __NR_profil 98 110 /* 98 was sys_profil */
111#define __NR_statfs 99 111#define __NR_statfs 99
112#define __NR_fstatfs 100 112#define __NR_fstatfs 100
113#define __NR_ioperm 101 113 /* 101 was sys_ioperm */
114#define __NR_socketcall 102 114#define __NR_socketcall 102
115#define __NR_syslog 103 115#define __NR_syslog 103
116#define __NR_setitimer 104 116#define __NR_setitimer 104
@@ -119,10 +119,10 @@
119#define __NR_lstat 107 119#define __NR_lstat 107
120#define __NR_fstat 108 120#define __NR_fstat 108
121#define __NR_olduname 109 121#define __NR_olduname 109
122#define __NR_iopl 110 122 /* 110 was sys_iopl */
123#define __NR_vhangup 111 123#define __NR_vhangup 111
124#define __NR_idle 112 124 /* 112 was sys_idle */
125#define __NR_vm86old 113 125 /* 113 was sys_vm86old */
126#define __NR_wait4 114 126#define __NR_wait4 114
127#define __NR_swapoff 115 127#define __NR_swapoff 115
128#define __NR_sysinfo 116 128#define __NR_sysinfo 116
@@ -136,17 +136,17 @@
136#define __NR_adjtimex 124 136#define __NR_adjtimex 124
137#define __NR_mprotect 125 137#define __NR_mprotect 125
138#define __NR_sigprocmask 126 138#define __NR_sigprocmask 126
139#define __NR_create_module 127 139 /* 127 was sys_create_module */
140#define __NR_init_module 128 140#define __NR_init_module 128
141#define __NR_delete_module 129 141#define __NR_delete_module 129
142#define __NR_get_kernel_syms 130 142 /* 130 was sys_get_kernel_syms */
143#define __NR_quotactl 131 143#define __NR_quotactl 131
144#define __NR_getpgid 132 144#define __NR_getpgid 132
145#define __NR_fchdir 133 145#define __NR_fchdir 133
146#define __NR_bdflush 134 146#define __NR_bdflush 134
147#define __NR_sysfs 135 147#define __NR_sysfs 135
148#define __NR_personality 136 148#define __NR_personality 136
149#define __NR_afs_syscall 137 /* Syscall for Andrew File System */ 149 /* 137 was sys_afs_syscall */
150#define __NR_setfsuid 138 150#define __NR_setfsuid 138
151#define __NR_setfsgid 139 151#define __NR_setfsgid 139
152#define __NR__llseek 140 152#define __NR__llseek 140
@@ -175,8 +175,8 @@
175#define __NR_mremap 163 175#define __NR_mremap 163
176#define __NR_setresuid 164 176#define __NR_setresuid 164
177#define __NR_getresuid 165 177#define __NR_getresuid 165
178#define __NR_vm86 166 178 /* 166 was sys_vm86 */
179#define __NR_query_module 167 179 /* 167 was sys_query_module */
180#define __NR_poll 168 180#define __NR_poll 168
181#define __NR_nfsservctl 169 181#define __NR_nfsservctl 169
182#define __NR_setresgid 170 182#define __NR_setresgid 170
@@ -197,8 +197,8 @@
197#define __NR_capset 185 197#define __NR_capset 185
198#define __NR_sigaltstack 186 198#define __NR_sigaltstack 186
199#define __NR_sendfile 187 199#define __NR_sendfile 187
200#define __NR_streams1 188 /* some people actually want it */ 200 /* 188 reserved for sys_getpmsg */
201#define __NR_streams2 189 /* some people actually want it */ 201 /* 189 reserved for sys_putpmsg */
202#define __NR_vfork 190 202#define __NR_vfork 190
203#define __NR_ugetrlimit 191 /* SuS compliant getrlimit */ 203#define __NR_ugetrlimit 191 /* SuS compliant getrlimit */
204#define __NR_mmap2 192 204#define __NR_mmap2 192
@@ -231,7 +231,8 @@
231#define __NR_madvise 219 231#define __NR_madvise 219
232#define __NR_getdents64 220 232#define __NR_getdents64 220
233#define __NR_fcntl64 221 233#define __NR_fcntl64 221
234/* 223 is unused */ 234 /* 222 is reserved for tux */
235 /* 223 is unused */
235#define __NR_gettid 224 236#define __NR_gettid 224
236#define __NR_readahead 225 237#define __NR_readahead 225
237#define __NR_setxattr 226 238#define __NR_setxattr 226
@@ -251,15 +252,15 @@
251#define __NR_futex 240 252#define __NR_futex 240
252#define __NR_sched_setaffinity 241 253#define __NR_sched_setaffinity 241
253#define __NR_sched_getaffinity 242 254#define __NR_sched_getaffinity 242
254#define __NR_set_thread_area 243 255 /* 243 is reserved for set_thread_area */
255#define __NR_get_thread_area 244 256 /* 244 is reserved for get_thread_area */
256#define __NR_io_setup 245 257#define __NR_io_setup 245
257#define __NR_io_destroy 246 258#define __NR_io_destroy 246
258#define __NR_io_getevents 247 259#define __NR_io_getevents 247
259#define __NR_io_submit 248 260#define __NR_io_submit 248
260#define __NR_io_cancel 249 261#define __NR_io_cancel 249
261#define __NR_fadvise64 250 262#define __NR_fadvise64 250
262 263 /* 251 is unused */
263#define __NR_exit_group 252 264#define __NR_exit_group 252
264#define __NR_lookup_dcookie 253 265#define __NR_lookup_dcookie 253
265#define __NR_epoll_create 254 266#define __NR_epoll_create 254
@@ -281,7 +282,7 @@
281#define __NR_tgkill 270 282#define __NR_tgkill 270
282#define __NR_utimes 271 283#define __NR_utimes 271
283#define __NR_fadvise64_64 272 284#define __NR_fadvise64_64 272
284#define __NR_vserver 273 285 /* 273 is reserved for vserver */
285#define __NR_mbind 274 286#define __NR_mbind 274
286#define __NR_get_mempolicy 275 287#define __NR_get_mempolicy 275
287#define __NR_set_mempolicy 276 288#define __NR_set_mempolicy 276
@@ -301,7 +302,7 @@
301#define __NR_inotify_init 290 302#define __NR_inotify_init 290
302#define __NR_inotify_add_watch 291 303#define __NR_inotify_add_watch 291
303#define __NR_inotify_rm_watch 292 304#define __NR_inotify_rm_watch 292
304/* 293 is unused */ 305 /* 293 is unused */
305#define __NR_migrate_pages 294 306#define __NR_migrate_pages 294
306#define __NR_openat 295 307#define __NR_openat 295
307#define __NR_mkdirat 296 308#define __NR_mkdirat 296
@@ -380,43 +381,4 @@
380 381
381#define NR_syscalls 367 382#define NR_syscalls 367
382 383
383#ifdef __KERNEL__ 384#endif /* __ASM_SH_UNISTD_32_H */
384
385#define __ARCH_WANT_IPC_PARSE_VERSION
386#define __ARCH_WANT_OLD_READDIR
387#define __ARCH_WANT_OLD_STAT
388#define __ARCH_WANT_STAT64
389#define __ARCH_WANT_SYS_ALARM
390#define __ARCH_WANT_SYS_GETHOSTNAME
391#define __ARCH_WANT_SYS_IPC
392#define __ARCH_WANT_SYS_PAUSE
393#define __ARCH_WANT_SYS_SGETMASK
394#define __ARCH_WANT_SYS_SIGNAL
395#define __ARCH_WANT_SYS_TIME
396#define __ARCH_WANT_SYS_UTIME
397#define __ARCH_WANT_SYS_WAITPID
398#define __ARCH_WANT_SYS_SOCKETCALL
399#define __ARCH_WANT_SYS_FADVISE64
400#define __ARCH_WANT_SYS_GETPGRP
401#define __ARCH_WANT_SYS_LLSEEK
402#define __ARCH_WANT_SYS_NICE
403#define __ARCH_WANT_SYS_OLD_GETRLIMIT
404#define __ARCH_WANT_SYS_OLD_UNAME
405#define __ARCH_WANT_SYS_OLDUMOUNT
406#define __ARCH_WANT_SYS_SIGPENDING
407#define __ARCH_WANT_SYS_SIGPROCMASK
408#define __ARCH_WANT_SYS_RT_SIGACTION
409#define __ARCH_WANT_SYS_RT_SIGSUSPEND
410
411/*
412 * "Conditional" syscalls
413 *
414 * What we want is __attribute__((weak,alias("sys_ni_syscall"))),
415 * but it doesn't work on all toolchains, so we just do it by hand
416 */
417#ifndef cond_syscall
418#define cond_syscall(x) asm(".weak\t" #x "\n\t.set\t" #x ",sys_ni_syscall")
419#endif
420
421#endif /* __KERNEL__ */
422#endif /* __ASM_SH_UNISTD_H */
diff --git a/arch/sh/include/asm/unistd_64.h b/arch/sh/include/asm/unistd_64.h
index c330c23db5a0..a28edc329692 100644
--- a/arch/sh/include/asm/unistd_64.h
+++ b/arch/sh/include/asm/unistd_64.h
@@ -31,7 +31,7 @@
31#define __NR_mknod 14 31#define __NR_mknod 14
32#define __NR_chmod 15 32#define __NR_chmod 15
33#define __NR_lchown 16 33#define __NR_lchown 16
34#define __NR_break 17 34 /* 17 was sys_break */
35#define __NR_oldstat 18 35#define __NR_oldstat 18
36#define __NR_lseek 19 36#define __NR_lseek 19
37#define __NR_getpid 20 37#define __NR_getpid 20
@@ -45,11 +45,11 @@
45#define __NR_oldfstat 28 45#define __NR_oldfstat 28
46#define __NR_pause 29 46#define __NR_pause 29
47#define __NR_utime 30 47#define __NR_utime 30
48#define __NR_stty 31 48 /* 31 was sys_stty */
49#define __NR_gtty 32 49 /* 32 was sys_gtty */
50#define __NR_access 33 50#define __NR_access 33
51#define __NR_nice 34 51#define __NR_nice 34
52#define __NR_ftime 35 52 /* 35 was sys_ftime */
53#define __NR_sync 36 53#define __NR_sync 36
54#define __NR_kill 37 54#define __NR_kill 37
55#define __NR_rename 38 55#define __NR_rename 38
@@ -58,7 +58,7 @@
58#define __NR_dup 41 58#define __NR_dup 41
59#define __NR_pipe 42 59#define __NR_pipe 42
60#define __NR_times 43 60#define __NR_times 43
61#define __NR_prof 44 61 /* 44 was sys_prof */
62#define __NR_brk 45 62#define __NR_brk 45
63#define __NR_setgid 46 63#define __NR_setgid 46
64#define __NR_getgid 47 64#define __NR_getgid 47
@@ -67,13 +67,13 @@
67#define __NR_getegid 50 67#define __NR_getegid 50
68#define __NR_acct 51 68#define __NR_acct 51
69#define __NR_umount2 52 69#define __NR_umount2 52
70#define __NR_lock 53 70 /* 53 was sys_lock */
71#define __NR_ioctl 54 71#define __NR_ioctl 54
72#define __NR_fcntl 55 72#define __NR_fcntl 55
73#define __NR_mpx 56 73 /* 56 was sys_mpx */
74#define __NR_setpgid 57 74#define __NR_setpgid 57
75#define __NR_ulimit 58 75 /* 58 was sys_ulimit */
76#define __NR_oldolduname 59 76 /* 59 was sys_olduname */
77#define __NR_umask 60 77#define __NR_umask 60
78#define __NR_chroot 61 78#define __NR_chroot 61
79#define __NR_ustat 62 79#define __NR_ustat 62
@@ -96,7 +96,7 @@
96#define __NR_settimeofday 79 96#define __NR_settimeofday 79
97#define __NR_getgroups 80 97#define __NR_getgroups 80
98#define __NR_setgroups 81 98#define __NR_setgroups 81
99#define __NR_select 82 99 /* 82 was sys_select */
100#define __NR_symlink 83 100#define __NR_symlink 83
101#define __NR_oldlstat 84 101#define __NR_oldlstat 84
102#define __NR_readlink 85 102#define __NR_readlink 85
@@ -112,10 +112,10 @@
112#define __NR_fchown 95 112#define __NR_fchown 95
113#define __NR_getpriority 96 113#define __NR_getpriority 96
114#define __NR_setpriority 97 114#define __NR_setpriority 97
115#define __NR_profil 98 115 /* 98 was sys_profil */
116#define __NR_statfs 99 116#define __NR_statfs 99
117#define __NR_fstatfs 100 117#define __NR_fstatfs 100
118#define __NR_ioperm 101 118 /* 101 was sys_ioperm */
119#define __NR_socketcall 102 /* old implementation of socket systemcall */ 119#define __NR_socketcall 102 /* old implementation of socket systemcall */
120#define __NR_syslog 103 120#define __NR_syslog 103
121#define __NR_setitimer 104 121#define __NR_setitimer 104
@@ -124,10 +124,10 @@
124#define __NR_lstat 107 124#define __NR_lstat 107
125#define __NR_fstat 108 125#define __NR_fstat 108
126#define __NR_olduname 109 126#define __NR_olduname 109
127#define __NR_iopl 110 127 /* 110 was sys_iopl */
128#define __NR_vhangup 111 128#define __NR_vhangup 111
129#define __NR_idle 112 129 /* 112 was sys_idle */
130#define __NR_vm86old 113 130 /* 113 was sys_vm86old */
131#define __NR_wait4 114 131#define __NR_wait4 114
132#define __NR_swapoff 115 132#define __NR_swapoff 115
133#define __NR_sysinfo 116 133#define __NR_sysinfo 116
@@ -141,17 +141,17 @@
141#define __NR_adjtimex 124 141#define __NR_adjtimex 124
142#define __NR_mprotect 125 142#define __NR_mprotect 125
143#define __NR_sigprocmask 126 143#define __NR_sigprocmask 126
144#define __NR_create_module 127 144 /* 127 was sys_create_module */
145#define __NR_init_module 128 145#define __NR_init_module 128
146#define __NR_delete_module 129 146#define __NR_delete_module 129
147#define __NR_get_kernel_syms 130 147 /* 130 was sys_get_kernel_syms */
148#define __NR_quotactl 131 148#define __NR_quotactl 131
149#define __NR_getpgid 132 149#define __NR_getpgid 132
150#define __NR_fchdir 133 150#define __NR_fchdir 133
151#define __NR_bdflush 134 151#define __NR_bdflush 134
152#define __NR_sysfs 135 152#define __NR_sysfs 135
153#define __NR_personality 136 153#define __NR_personality 136
154#define __NR_afs_syscall 137 /* Syscall for Andrew File System */ 154 /* 137 was sys_afs_syscall */
155#define __NR_setfsuid 138 155#define __NR_setfsuid 138
156#define __NR_setfsgid 139 156#define __NR_setfsgid 139
157#define __NR__llseek 140 157#define __NR__llseek 140
@@ -180,8 +180,8 @@
180#define __NR_mremap 163 180#define __NR_mremap 163
181#define __NR_setresuid 164 181#define __NR_setresuid 164
182#define __NR_getresuid 165 182#define __NR_getresuid 165
183#define __NR_vm86 166 183 /* 166 was sys_vm86 */
184#define __NR_query_module 167 184 /* 167 was sys_query_module */
185#define __NR_poll 168 185#define __NR_poll 168
186#define __NR_nfsservctl 169 186#define __NR_nfsservctl 169
187#define __NR_setresgid 170 187#define __NR_setresgid 170
@@ -202,8 +202,8 @@
202#define __NR_capset 185 202#define __NR_capset 185
203#define __NR_sigaltstack 186 203#define __NR_sigaltstack 186
204#define __NR_sendfile 187 204#define __NR_sendfile 187
205#define __NR_streams1 188 /* some people actually want it */ 205 /* 188 reserved for getpmsg */
206#define __NR_streams2 189 /* some people actually want it */ 206 /* 189 reserved for putpmsg */
207#define __NR_vfork 190 207#define __NR_vfork 190
208#define __NR_ugetrlimit 191 /* SuS compliant getrlimit */ 208#define __NR_ugetrlimit 191 /* SuS compliant getrlimit */
209#define __NR_mmap2 192 209#define __NR_mmap2 192
@@ -262,16 +262,15 @@
262#define __NR_msgrcv 241 262#define __NR_msgrcv 241
263#define __NR_msgget 242 263#define __NR_msgget 242
264#define __NR_msgctl 243 264#define __NR_msgctl 243
265#if 0 265#define __NR_shmat 244
266#define __NR_shmatcall 244
267#endif
268#define __NR_shmdt 245 266#define __NR_shmdt 245
269#define __NR_shmget 246 267#define __NR_shmget 246
270#define __NR_shmctl 247 268#define __NR_shmctl 247
271 269
272#define __NR_getdents64 248 270#define __NR_getdents64 248
273#define __NR_fcntl64 249 271#define __NR_fcntl64 249
274/* 223 is unused */ 272 /* 250 is reserved for tux */
273 /* 251 is unused */
275#define __NR_gettid 252 274#define __NR_gettid 252
276#define __NR_readahead 253 275#define __NR_readahead 253
277#define __NR_setxattr 254 276#define __NR_setxattr 254
@@ -291,14 +290,15 @@
291#define __NR_futex 268 290#define __NR_futex 268
292#define __NR_sched_setaffinity 269 291#define __NR_sched_setaffinity 269
293#define __NR_sched_getaffinity 270 292#define __NR_sched_getaffinity 270
294#define __NR_set_thread_area 271 293 /* 271 is reserved for set_thread_area */
295#define __NR_get_thread_area 272 294 /* 272 is reserved for get_thread_area */
296#define __NR_io_setup 273 295#define __NR_io_setup 273
297#define __NR_io_destroy 274 296#define __NR_io_destroy 274
298#define __NR_io_getevents 275 297#define __NR_io_getevents 275
299#define __NR_io_submit 276 298#define __NR_io_submit 276
300#define __NR_io_cancel 277 299#define __NR_io_cancel 277
301#define __NR_fadvise64 278 300#define __NR_fadvise64 278
301 /* 279 is unused */
302#define __NR_exit_group 280 302#define __NR_exit_group 280
303 303
304#define __NR_lookup_dcookie 281 304#define __NR_lookup_dcookie 281
@@ -321,17 +321,17 @@
321#define __NR_tgkill 298 321#define __NR_tgkill 298
322#define __NR_utimes 299 322#define __NR_utimes 299
323#define __NR_fadvise64_64 300 323#define __NR_fadvise64_64 300
324#define __NR_vserver 301 324 /* 301 is reserved for vserver */
325#define __NR_mbind 302 325 /* 302 is reserved for mbind */
326#define __NR_get_mempolicy 303 326 /* 303 is reserved for get_mempolicy */
327#define __NR_set_mempolicy 304 327 /* 304 is reserved for set_mempolicy */
328#define __NR_mq_open 305 328#define __NR_mq_open 305
329#define __NR_mq_unlink (__NR_mq_open+1) 329#define __NR_mq_unlink (__NR_mq_open+1)
330#define __NR_mq_timedsend (__NR_mq_open+2) 330#define __NR_mq_timedsend (__NR_mq_open+2)
331#define __NR_mq_timedreceive (__NR_mq_open+3) 331#define __NR_mq_timedreceive (__NR_mq_open+3)
332#define __NR_mq_notify (__NR_mq_open+4) 332#define __NR_mq_notify (__NR_mq_open+4)
333#define __NR_mq_getsetattr (__NR_mq_open+5) 333#define __NR_mq_getsetattr (__NR_mq_open+5)
334#define __NR_kexec_load 311 334 /* 311 is reserved for kexec */
335#define __NR_waitid 312 335#define __NR_waitid 312
336#define __NR_add_key 313 336#define __NR_add_key 313
337#define __NR_request_key 314 337#define __NR_request_key 314
@@ -341,7 +341,7 @@
341#define __NR_inotify_init 318 341#define __NR_inotify_init 318
342#define __NR_inotify_add_watch 319 342#define __NR_inotify_add_watch 319
343#define __NR_inotify_rm_watch 320 343#define __NR_inotify_rm_watch 320
344/* 321 is unused */ 344 /* 321 is unused */
345#define __NR_migrate_pages 322 345#define __NR_migrate_pages 322
346#define __NR_openat 323 346#define __NR_openat 323
347#define __NR_mkdirat 324 347#define __NR_mkdirat 324
@@ -399,44 +399,6 @@
399#define __NR_process_vm_readv 376 399#define __NR_process_vm_readv 376
400#define __NR_process_vm_writev 377 400#define __NR_process_vm_writev 377
401 401
402#ifdef __KERNEL__
403
404#define NR_syscalls 378 402#define NR_syscalls 378
405 403
406#define __ARCH_WANT_IPC_PARSE_VERSION
407#define __ARCH_WANT_OLD_READDIR
408#define __ARCH_WANT_OLD_STAT
409#define __ARCH_WANT_STAT64
410#define __ARCH_WANT_SYS_ALARM
411#define __ARCH_WANT_SYS_GETHOSTNAME
412#define __ARCH_WANT_SYS_IPC
413#define __ARCH_WANT_SYS_PAUSE
414#define __ARCH_WANT_SYS_SGETMASK
415#define __ARCH_WANT_SYS_SIGNAL
416#define __ARCH_WANT_SYS_TIME
417#define __ARCH_WANT_SYS_UTIME
418#define __ARCH_WANT_SYS_WAITPID
419#define __ARCH_WANT_SYS_SOCKETCALL
420#define __ARCH_WANT_SYS_FADVISE64
421#define __ARCH_WANT_SYS_GETPGRP
422#define __ARCH_WANT_SYS_LLSEEK
423#define __ARCH_WANT_SYS_NICE
424#define __ARCH_WANT_SYS_OLD_GETRLIMIT
425#define __ARCH_WANT_SYS_OLD_UNAME
426#define __ARCH_WANT_SYS_OLDUMOUNT
427#define __ARCH_WANT_SYS_SIGPENDING
428#define __ARCH_WANT_SYS_SIGPROCMASK
429#define __ARCH_WANT_SYS_RT_SIGACTION
430
431/*
432 * "Conditional" syscalls
433 *
434 * What we want is __attribute__((weak,alias("sys_ni_syscall"))),
435 * but it doesn't work on all toolchains, so we just do it by hand
436 */
437#ifndef cond_syscall
438#define cond_syscall(x) asm(".weak\t" #x "\n\t.set\t" #x ",sys_ni_syscall")
439#endif
440
441#endif /* __KERNEL__ */
442#endif /* __ASM_SH_UNISTD_64_H */ 404#endif /* __ASM_SH_UNISTD_64_H */
diff --git a/arch/sh/include/cpu-sh4/cpu/dma-register.h b/arch/sh/include/cpu-sh4/cpu/dma-register.h
index 18fa80aba15e..02788b6a03b7 100644
--- a/arch/sh/include/cpu-sh4/cpu/dma-register.h
+++ b/arch/sh/include/cpu-sh4/cpu/dma-register.h
@@ -16,45 +16,29 @@
16 16
17#define DMAOR_INIT DMAOR_DME 17#define DMAOR_INIT DMAOR_DME
18 18
19#if defined(CONFIG_CPU_SUBTYPE_SH7343) || \ 19#if defined(CONFIG_CPU_SUBTYPE_SH7343)
20 defined(CONFIG_CPU_SUBTYPE_SH7730)
21#define CHCR_TS_LOW_MASK 0x00000018 20#define CHCR_TS_LOW_MASK 0x00000018
22#define CHCR_TS_LOW_SHIFT 3 21#define CHCR_TS_LOW_SHIFT 3
23#define CHCR_TS_HIGH_MASK 0 22#define CHCR_TS_HIGH_MASK 0
24#define CHCR_TS_HIGH_SHIFT 0 23#define CHCR_TS_HIGH_SHIFT 0
25#elif defined(CONFIG_CPU_SUBTYPE_SH7722) || \ 24#elif defined(CONFIG_CPU_SUBTYPE_SH7722) || \
25 defined(CONFIG_CPU_SUBTYPE_SH7723) || \
26 defined(CONFIG_CPU_SUBTYPE_SH7724) || \ 26 defined(CONFIG_CPU_SUBTYPE_SH7724) || \
27 defined(CONFIG_CPU_SUBTYPE_SH7730) || \
27 defined(CONFIG_CPU_SUBTYPE_SH7786) 28 defined(CONFIG_CPU_SUBTYPE_SH7786)
28#define CHCR_TS_LOW_MASK 0x00000018 29#define CHCR_TS_LOW_MASK 0x00000018
29#define CHCR_TS_LOW_SHIFT 3 30#define CHCR_TS_LOW_SHIFT 3
30#define CHCR_TS_HIGH_MASK 0x00300000 31#define CHCR_TS_HIGH_MASK 0x00300000
31#define CHCR_TS_HIGH_SHIFT (20 - 2) /* 2 bits for shifted low TS */ 32#define CHCR_TS_HIGH_SHIFT (20 - 2) /* 2 bits for shifted low TS */
32#elif defined(CONFIG_CPU_SUBTYPE_SH7763) || \ 33#elif defined(CONFIG_CPU_SUBTYPE_SH7757) || \
33 defined(CONFIG_CPU_SUBTYPE_SH7764) 34 defined(CONFIG_CPU_SUBTYPE_SH7763) || \
34#define CHCR_TS_LOW_MASK 0x00000018 35 defined(CONFIG_CPU_SUBTYPE_SH7764) || \
35#define CHCR_TS_LOW_SHIFT 3 36 defined(CONFIG_CPU_SUBTYPE_SH7780) || \
36#define CHCR_TS_HIGH_MASK 0 37 defined(CONFIG_CPU_SUBTYPE_SH7785)
37#define CHCR_TS_HIGH_SHIFT 0
38#elif defined(CONFIG_CPU_SUBTYPE_SH7723)
39#define CHCR_TS_LOW_MASK 0x00000018
40#define CHCR_TS_LOW_SHIFT 3
41#define CHCR_TS_HIGH_MASK 0
42#define CHCR_TS_HIGH_SHIFT 0
43#elif defined(CONFIG_CPU_SUBTYPE_SH7757)
44#define CHCR_TS_LOW_MASK 0x00000018 38#define CHCR_TS_LOW_MASK 0x00000018
45#define CHCR_TS_LOW_SHIFT 3 39#define CHCR_TS_LOW_SHIFT 3
46#define CHCR_TS_HIGH_MASK 0x00100000 40#define CHCR_TS_HIGH_MASK 0x00100000
47#define CHCR_TS_HIGH_SHIFT (20 - 2) /* 2 bits for shifted low TS */ 41#define CHCR_TS_HIGH_SHIFT (20 - 2) /* 2 bits for shifted low TS */
48#elif defined(CONFIG_CPU_SUBTYPE_SH7780)
49#define CHCR_TS_LOW_MASK 0x00000018
50#define CHCR_TS_LOW_SHIFT 3
51#define CHCR_TS_HIGH_MASK 0
52#define CHCR_TS_HIGH_SHIFT 0
53#else /* SH7785 */
54#define CHCR_TS_LOW_MASK 0x00000018
55#define CHCR_TS_LOW_SHIFT 3
56#define CHCR_TS_HIGH_MASK 0
57#define CHCR_TS_HIGH_SHIFT 0
58#endif 42#endif
59 43
60/* Transmit sizes and respective CHCR register values */ 44/* Transmit sizes and respective CHCR register values */
diff --git a/arch/sh/include/mach-common/mach/mangle-port.h b/arch/sh/include/mach-common/mach/mangle-port.h
new file mode 100644
index 000000000000..4ca1769a0f12
--- /dev/null
+++ b/arch/sh/include/mach-common/mach/mangle-port.h
@@ -0,0 +1,49 @@
1/*
2 * SH version cribbed from the MIPS copy:
3 *
4 * This file is subject to the terms and conditions of the GNU General Public
5 * License. See the file "COPYING" in the main directory of this archive
6 * for more details.
7 *
8 * Copyright (C) 2003, 2004 Ralf Baechle
9 */
10#ifndef __MACH_COMMON_MANGLE_PORT_H
11#define __MACH_COMMON_MANGLE_PORT_H
12
13/*
14 * Sane hardware offers swapping of PCI/ISA I/O space accesses in hardware;
15 * less sane hardware forces software to fiddle with this...
16 *
17 * Regardless, if the host bus endianness mismatches that of PCI/ISA, then
18 * you can't have the numerical value of data and byte addresses within
19 * multibyte quantities both preserved at the same time. Hence two
20 * variations of functions: non-prefixed ones that preserve the value
21 * and prefixed ones that preserve byte addresses. The latters are
22 * typically used for moving raw data between a peripheral and memory (cf.
23 * string I/O functions), hence the "__mem_" prefix.
24 */
25#if defined(CONFIG_SWAP_IO_SPACE)
26
27# define ioswabb(x) (x)
28# define __mem_ioswabb(x) (x)
29# define ioswabw(x) le16_to_cpu(x)
30# define __mem_ioswabw(x) (x)
31# define ioswabl(x) le32_to_cpu(x)
32# define __mem_ioswabl(x) (x)
33# define ioswabq(x) le64_to_cpu(x)
34# define __mem_ioswabq(x) (x)
35
36#else
37
38# define ioswabb(x) (x)
39# define __mem_ioswabb(x) (x)
40# define ioswabw(x) (x)
41# define __mem_ioswabw(x) cpu_to_le16(x)
42# define ioswabl(x) (x)
43# define __mem_ioswabl(x) cpu_to_le32(x)
44# define ioswabq(x) (x)
45# define __mem_ioswabq(x) cpu_to_le32(x)
46
47#endif
48
49#endif /* __MACH_COMMON_MANGLE_PORT_H */
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7757.c b/arch/sh/kernel/cpu/sh4a/setup-sh7757.c
index 2875e8be4f72..c8836cffa216 100644
--- a/arch/sh/kernel/cpu/sh4a/setup-sh7757.c
+++ b/arch/sh/kernel/cpu/sh4a/setup-sh7757.c
@@ -680,6 +680,25 @@ static struct platform_device spi1_device = {
680 .resource = spi1_resources, 680 .resource = spi1_resources,
681}; 681};
682 682
683static struct resource rspi_resources[] = {
684 {
685 .start = 0xfe480000,
686 .end = 0xfe4800ff,
687 .flags = IORESOURCE_MEM,
688 },
689 {
690 .start = 220,
691 .flags = IORESOURCE_IRQ,
692 },
693};
694
695static struct platform_device rspi_device = {
696 .name = "rspi",
697 .id = 2,
698 .num_resources = ARRAY_SIZE(rspi_resources),
699 .resource = rspi_resources,
700};
701
683static struct resource usb_ehci_resources[] = { 702static struct resource usb_ehci_resources[] = {
684 [0] = { 703 [0] = {
685 .start = 0xfe4f1000, 704 .start = 0xfe4f1000,
@@ -740,6 +759,7 @@ static struct platform_device *sh7757_devices[] __initdata = {
740 &dma3_device, 759 &dma3_device,
741 &spi0_device, 760 &spi0_device,
742 &spi1_device, 761 &spi1_device,
762 &rspi_device,
743 &usb_ehci_device, 763 &usb_ehci_device,
744 &usb_ohci_device, 764 &usb_ohci_device,
745}; 765};
diff --git a/arch/sh/kernel/cpu/shmobile/cpuidle.c b/arch/sh/kernel/cpu/shmobile/cpuidle.c
index 6d62eb40e750..1ddc876d3b26 100644
--- a/arch/sh/kernel/cpu/shmobile/cpuidle.c
+++ b/arch/sh/kernel/cpu/shmobile/cpuidle.c
@@ -29,7 +29,6 @@ static int cpuidle_sleep_enter(struct cpuidle_device *dev,
29 int index) 29 int index)
30{ 30{
31 unsigned long allowed_mode = SUSP_SH_SLEEP; 31 unsigned long allowed_mode = SUSP_SH_SLEEP;
32 ktime_t before, after;
33 int requested_state = index; 32 int requested_state = index;
34 int allowed_state; 33 int allowed_state;
35 int k; 34 int k;
@@ -47,19 +46,16 @@ static int cpuidle_sleep_enter(struct cpuidle_device *dev,
47 */ 46 */
48 k = min_t(int, allowed_state, requested_state); 47 k = min_t(int, allowed_state, requested_state);
49 48
50 before = ktime_get();
51 sh_mobile_call_standby(cpuidle_mode[k]); 49 sh_mobile_call_standby(cpuidle_mode[k]);
52 after = ktime_get();
53
54 dev->last_residency = (int)ktime_to_ns(ktime_sub(after, before)) >> 10;
55 50
56 return k; 51 return k;
57} 52}
58 53
59static struct cpuidle_device cpuidle_dev; 54static struct cpuidle_device cpuidle_dev;
60static struct cpuidle_driver cpuidle_driver = { 55static struct cpuidle_driver cpuidle_driver = {
61 .name = "sh_idle", 56 .name = "sh_idle",
62 .owner = THIS_MODULE, 57 .owner = THIS_MODULE,
58 .en_core_tk_irqen = 1,
63}; 59};
64 60
65void sh_mobile_setup_cpuidle(void) 61void sh_mobile_setup_cpuidle(void)
diff --git a/arch/sh/kernel/cpufreq.c b/arch/sh/kernel/cpufreq.c
index 0fffacea6ed9..e68b45b6f3f9 100644
--- a/arch/sh/kernel/cpufreq.c
+++ b/arch/sh/kernel/cpufreq.c
@@ -3,7 +3,7 @@
3 * 3 *
4 * cpufreq driver for the SuperH processors. 4 * cpufreq driver for the SuperH processors.
5 * 5 *
6 * Copyright (C) 2002 - 2007 Paul Mundt 6 * Copyright (C) 2002 - 2012 Paul Mundt
7 * Copyright (C) 2002 M. R. Brown 7 * Copyright (C) 2002 M. R. Brown
8 * 8 *
9 * Clock framework bits from arch/avr32/mach-at32ap/cpufreq.c 9 * Clock framework bits from arch/avr32/mach-at32ap/cpufreq.c
@@ -14,6 +14,8 @@
14 * License. See the file "COPYING" in the main directory of this archive 14 * License. See the file "COPYING" in the main directory of this archive
15 * for more details. 15 * for more details.
16 */ 16 */
17#define pr_fmt(fmt) "cpufreq: " fmt
18
17#include <linux/types.h> 19#include <linux/types.h>
18#include <linux/cpufreq.h> 20#include <linux/cpufreq.h>
19#include <linux/kernel.h> 21#include <linux/kernel.h>
@@ -21,15 +23,18 @@
21#include <linux/init.h> 23#include <linux/init.h>
22#include <linux/err.h> 24#include <linux/err.h>
23#include <linux/cpumask.h> 25#include <linux/cpumask.h>
26#include <linux/cpu.h>
24#include <linux/smp.h> 27#include <linux/smp.h>
25#include <linux/sched.h> /* set_cpus_allowed() */ 28#include <linux/sched.h> /* set_cpus_allowed() */
26#include <linux/clk.h> 29#include <linux/clk.h>
30#include <linux/percpu.h>
31#include <linux/sh_clk.h>
27 32
28static struct clk *cpuclk; 33static DEFINE_PER_CPU(struct clk, sh_cpuclk);
29 34
30static unsigned int sh_cpufreq_get(unsigned int cpu) 35static unsigned int sh_cpufreq_get(unsigned int cpu)
31{ 36{
32 return (clk_get_rate(cpuclk) + 500) / 1000; 37 return (clk_get_rate(&per_cpu(sh_cpuclk, cpu)) + 500) / 1000;
33} 38}
34 39
35/* 40/*
@@ -40,8 +45,10 @@ static int sh_cpufreq_target(struct cpufreq_policy *policy,
40 unsigned int relation) 45 unsigned int relation)
41{ 46{
42 unsigned int cpu = policy->cpu; 47 unsigned int cpu = policy->cpu;
48 struct clk *cpuclk = &per_cpu(sh_cpuclk, cpu);
43 cpumask_t cpus_allowed; 49 cpumask_t cpus_allowed;
44 struct cpufreq_freqs freqs; 50 struct cpufreq_freqs freqs;
51 struct device *dev;
45 long freq; 52 long freq;
46 53
47 if (!cpu_online(cpu)) 54 if (!cpu_online(cpu))
@@ -52,13 +59,15 @@ static int sh_cpufreq_target(struct cpufreq_policy *policy,
52 59
53 BUG_ON(smp_processor_id() != cpu); 60 BUG_ON(smp_processor_id() != cpu);
54 61
62 dev = get_cpu_device(cpu);
63
55 /* Convert target_freq from kHz to Hz */ 64 /* Convert target_freq from kHz to Hz */
56 freq = clk_round_rate(cpuclk, target_freq * 1000); 65 freq = clk_round_rate(cpuclk, target_freq * 1000);
57 66
58 if (freq < (policy->min * 1000) || freq > (policy->max * 1000)) 67 if (freq < (policy->min * 1000) || freq > (policy->max * 1000))
59 return -EINVAL; 68 return -EINVAL;
60 69
61 pr_debug("cpufreq: requested frequency %u Hz\n", target_freq * 1000); 70 dev_dbg(dev, "requested frequency %u Hz\n", target_freq * 1000);
62 71
63 freqs.cpu = cpu; 72 freqs.cpu = cpu;
64 freqs.old = sh_cpufreq_get(cpu); 73 freqs.old = sh_cpufreq_get(cpu);
@@ -70,78 +79,112 @@ static int sh_cpufreq_target(struct cpufreq_policy *policy,
70 clk_set_rate(cpuclk, freq); 79 clk_set_rate(cpuclk, freq);
71 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); 80 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
72 81
73 pr_debug("cpufreq: set frequency %lu Hz\n", freq); 82 dev_dbg(dev, "set frequency %lu Hz\n", freq);
83
84 return 0;
85}
86
87static int sh_cpufreq_verify(struct cpufreq_policy *policy)
88{
89 struct clk *cpuclk = &per_cpu(sh_cpuclk, policy->cpu);
90 struct cpufreq_frequency_table *freq_table;
91
92 freq_table = cpuclk->nr_freqs ? cpuclk->freq_table : NULL;
93 if (freq_table)
94 return cpufreq_frequency_table_verify(policy, freq_table);
95
96 cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq,
97 policy->cpuinfo.max_freq);
98
99 policy->min = (clk_round_rate(cpuclk, 1) + 500) / 1000;
100 policy->max = (clk_round_rate(cpuclk, ~0UL) + 500) / 1000;
101
102 cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq,
103 policy->cpuinfo.max_freq);
74 104
75 return 0; 105 return 0;
76} 106}
77 107
78static int sh_cpufreq_cpu_init(struct cpufreq_policy *policy) 108static int sh_cpufreq_cpu_init(struct cpufreq_policy *policy)
79{ 109{
80 if (!cpu_online(policy->cpu)) 110 unsigned int cpu = policy->cpu;
111 struct clk *cpuclk = &per_cpu(sh_cpuclk, cpu);
112 struct cpufreq_frequency_table *freq_table;
113 struct device *dev;
114
115 if (!cpu_online(cpu))
81 return -ENODEV; 116 return -ENODEV;
82 117
83 cpuclk = clk_get(NULL, "cpu_clk"); 118 dev = get_cpu_device(cpu);
119
120 cpuclk = clk_get(dev, "cpu_clk");
84 if (IS_ERR(cpuclk)) { 121 if (IS_ERR(cpuclk)) {
85 printk(KERN_ERR "cpufreq: couldn't get CPU#%d clk\n", 122 dev_err(dev, "couldn't get CPU clk\n");
86 policy->cpu);
87 return PTR_ERR(cpuclk); 123 return PTR_ERR(cpuclk);
88 } 124 }
89 125
90 /* cpuinfo and default policy values */ 126 policy->cur = policy->min = policy->max = sh_cpufreq_get(cpu);
91 policy->cpuinfo.min_freq = (clk_round_rate(cpuclk, 1) + 500) / 1000;
92 policy->cpuinfo.max_freq = (clk_round_rate(cpuclk, ~0UL) + 500) / 1000;
93 policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
94 127
95 policy->cur = sh_cpufreq_get(policy->cpu); 128 freq_table = cpuclk->nr_freqs ? cpuclk->freq_table : NULL;
96 policy->min = policy->cpuinfo.min_freq; 129 if (freq_table) {
97 policy->max = policy->cpuinfo.max_freq; 130 int result;
98 131
99 /* 132 result = cpufreq_frequency_table_cpuinfo(policy, freq_table);
100 * Catch the cases where the clock framework hasn't been wired up 133 if (!result)
101 * properly to support scaling. 134 cpufreq_frequency_table_get_attr(freq_table, cpu);
102 */ 135 } else {
103 if (unlikely(policy->min == policy->max)) { 136 dev_notice(dev, "no frequency table found, falling back "
104 printk(KERN_ERR "cpufreq: clock framework rate rounding " 137 "to rate rounding.\n");
105 "not supported on CPU#%d.\n", policy->cpu);
106 138
107 clk_put(cpuclk); 139 policy->cpuinfo.min_freq =
108 return -EINVAL; 140 (clk_round_rate(cpuclk, 1) + 500) / 1000;
141 policy->cpuinfo.max_freq =
142 (clk_round_rate(cpuclk, ~0UL) + 500) / 1000;
109 } 143 }
110 144
111 printk(KERN_INFO "cpufreq: CPU#%d Frequencies - Minimum %u.%03u MHz, " 145 policy->min = policy->cpuinfo.min_freq;
146 policy->max = policy->cpuinfo.max_freq;
147
148 policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
149
150 dev_info(dev, "CPU Frequencies - Minimum %u.%03u MHz, "
112 "Maximum %u.%03u MHz.\n", 151 "Maximum %u.%03u MHz.\n",
113 policy->cpu, policy->min / 1000, policy->min % 1000, 152 policy->min / 1000, policy->min % 1000,
114 policy->max / 1000, policy->max % 1000); 153 policy->max / 1000, policy->max % 1000);
115 154
116 return 0; 155 return 0;
117} 156}
118 157
119static int sh_cpufreq_verify(struct cpufreq_policy *policy) 158static int sh_cpufreq_cpu_exit(struct cpufreq_policy *policy)
120{ 159{
121 cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq, 160 unsigned int cpu = policy->cpu;
122 policy->cpuinfo.max_freq); 161 struct clk *cpuclk = &per_cpu(sh_cpuclk, cpu);
123 return 0;
124}
125 162
126static int sh_cpufreq_exit(struct cpufreq_policy *policy) 163 cpufreq_frequency_table_put_attr(cpu);
127{
128 clk_put(cpuclk); 164 clk_put(cpuclk);
165
129 return 0; 166 return 0;
130} 167}
131 168
169static struct freq_attr *sh_freq_attr[] = {
170 &cpufreq_freq_attr_scaling_available_freqs,
171 NULL,
172};
173
132static struct cpufreq_driver sh_cpufreq_driver = { 174static struct cpufreq_driver sh_cpufreq_driver = {
133 .owner = THIS_MODULE, 175 .owner = THIS_MODULE,
134 .name = "sh", 176 .name = "sh",
135 .init = sh_cpufreq_cpu_init,
136 .verify = sh_cpufreq_verify,
137 .target = sh_cpufreq_target,
138 .get = sh_cpufreq_get, 177 .get = sh_cpufreq_get,
139 .exit = sh_cpufreq_exit, 178 .target = sh_cpufreq_target,
179 .verify = sh_cpufreq_verify,
180 .init = sh_cpufreq_cpu_init,
181 .exit = sh_cpufreq_cpu_exit,
182 .attr = sh_freq_attr,
140}; 183};
141 184
142static int __init sh_cpufreq_module_init(void) 185static int __init sh_cpufreq_module_init(void)
143{ 186{
144 printk(KERN_INFO "cpufreq: SuperH CPU frequency driver.\n"); 187 pr_notice("SuperH CPU frequency driver.\n");
145 return cpufreq_register_driver(&sh_cpufreq_driver); 188 return cpufreq_register_driver(&sh_cpufreq_driver);
146} 189}
147 190
diff --git a/arch/sh/kernel/signal_32.c b/arch/sh/kernel/signal_32.c
index 0bc58866add1..5901fba3176e 100644
--- a/arch/sh/kernel/signal_32.c
+++ b/arch/sh/kernel/signal_32.c
@@ -57,12 +57,13 @@ sys_sigsuspend(old_sigset_t mask,
57 unsigned long r5, unsigned long r6, unsigned long r7, 57 unsigned long r5, unsigned long r6, unsigned long r7,
58 struct pt_regs __regs) 58 struct pt_regs __regs)
59{ 59{
60 mask &= _BLOCKABLE; 60 sigset_t blocked;
61 spin_lock_irq(&current->sighand->siglock); 61
62 current->saved_sigmask = current->blocked; 62 current->saved_sigmask = current->blocked;
63 siginitset(&current->blocked, mask); 63
64 recalc_sigpending(); 64 mask &= _BLOCKABLE;
65 spin_unlock_irq(&current->sighand->siglock); 65 siginitset(&blocked, mask);
66 set_current_blocked(&blocked);
66 67
67 current->state = TASK_INTERRUPTIBLE; 68 current->state = TASK_INTERRUPTIBLE;
68 schedule(); 69 schedule();
@@ -239,11 +240,7 @@ asmlinkage int sys_sigreturn(unsigned long r4, unsigned long r5,
239 goto badframe; 240 goto badframe;
240 241
241 sigdelsetmask(&set, ~_BLOCKABLE); 242 sigdelsetmask(&set, ~_BLOCKABLE);
242 243 set_current_blocked(&set);
243 spin_lock_irq(&current->sighand->siglock);
244 current->blocked = set;
245 recalc_sigpending();
246 spin_unlock_irq(&current->sighand->siglock);
247 244
248 if (restore_sigcontext(regs, &frame->sc, &r0)) 245 if (restore_sigcontext(regs, &frame->sc, &r0))
249 goto badframe; 246 goto badframe;
@@ -273,10 +270,7 @@ asmlinkage int sys_rt_sigreturn(unsigned long r4, unsigned long r5,
273 goto badframe; 270 goto badframe;
274 271
275 sigdelsetmask(&set, ~_BLOCKABLE); 272 sigdelsetmask(&set, ~_BLOCKABLE);
276 spin_lock_irq(&current->sighand->siglock); 273 set_current_blocked(&set);
277 current->blocked = set;
278 recalc_sigpending();
279 spin_unlock_irq(&current->sighand->siglock);
280 274
281 if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &r0)) 275 if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &r0))
282 goto badframe; 276 goto badframe;
@@ -547,17 +541,8 @@ handle_signal(unsigned long sig, struct k_sigaction *ka, siginfo_t *info,
547 else 541 else
548 ret = setup_frame(sig, ka, oldset, regs); 542 ret = setup_frame(sig, ka, oldset, regs);
549 543
550 if (ka->sa.sa_flags & SA_ONESHOT) 544 if (ret == 0)
551 ka->sa.sa_handler = SIG_DFL; 545 block_sigmask(ka, sig);
552
553 if (ret == 0) {
554 spin_lock_irq(&current->sighand->siglock);
555 sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
556 if (!(ka->sa.sa_flags & SA_NODEFER))
557 sigaddset(&current->blocked,sig);
558 recalc_sigpending();
559 spin_unlock_irq(&current->sighand->siglock);
560 }
561 546
562 return ret; 547 return ret;
563} 548}
diff --git a/arch/sh/kernel/signal_64.c b/arch/sh/kernel/signal_64.c
index 6b5603fe274b..3c9a6f7dcdce 100644
--- a/arch/sh/kernel/signal_64.c
+++ b/arch/sh/kernel/signal_64.c
@@ -159,14 +159,13 @@ sys_sigsuspend(old_sigset_t mask,
159 unsigned long r6, unsigned long r7, 159 unsigned long r6, unsigned long r7,
160 struct pt_regs * regs) 160 struct pt_regs * regs)
161{ 161{
162 sigset_t saveset; 162 sigset_t saveset, blocked;
163 163
164 mask &= _BLOCKABLE;
165 spin_lock_irq(&current->sighand->siglock);
166 saveset = current->blocked; 164 saveset = current->blocked;
167 siginitset(&current->blocked, mask); 165
168 recalc_sigpending(); 166 mask &= _BLOCKABLE;
169 spin_unlock_irq(&current->sighand->siglock); 167 siginitset(&blocked, mask);
168 set_current_blocked(&blocked);
170 169
171 REF_REG_RET = -EINTR; 170 REF_REG_RET = -EINTR;
172 while (1) { 171 while (1) {
@@ -198,11 +197,8 @@ sys_rt_sigsuspend(sigset_t *unewset, size_t sigsetsize,
198 if (copy_from_user(&newset, unewset, sizeof(newset))) 197 if (copy_from_user(&newset, unewset, sizeof(newset)))
199 return -EFAULT; 198 return -EFAULT;
200 sigdelsetmask(&newset, ~_BLOCKABLE); 199 sigdelsetmask(&newset, ~_BLOCKABLE);
201 spin_lock_irq(&current->sighand->siglock);
202 saveset = current->blocked; 200 saveset = current->blocked;
203 current->blocked = newset; 201 set_current_blocked(&newset);
204 recalc_sigpending();
205 spin_unlock_irq(&current->sighand->siglock);
206 202
207 REF_REG_RET = -EINTR; 203 REF_REG_RET = -EINTR;
208 while (1) { 204 while (1) {
@@ -408,11 +404,7 @@ asmlinkage int sys_sigreturn(unsigned long r2, unsigned long r3,
408 goto badframe; 404 goto badframe;
409 405
410 sigdelsetmask(&set, ~_BLOCKABLE); 406 sigdelsetmask(&set, ~_BLOCKABLE);
411 407 set_current_blocked(&set);
412 spin_lock_irq(&current->sighand->siglock);
413 current->blocked = set;
414 recalc_sigpending();
415 spin_unlock_irq(&current->sighand->siglock);
416 408
417 if (restore_sigcontext(regs, &frame->sc, &ret)) 409 if (restore_sigcontext(regs, &frame->sc, &ret))
418 goto badframe; 410 goto badframe;
@@ -445,10 +437,7 @@ asmlinkage int sys_rt_sigreturn(unsigned long r2, unsigned long r3,
445 goto badframe; 437 goto badframe;
446 438
447 sigdelsetmask(&set, ~_BLOCKABLE); 439 sigdelsetmask(&set, ~_BLOCKABLE);
448 spin_lock_irq(&current->sighand->siglock); 440 set_current_blocked(&set);
449 current->blocked = set;
450 recalc_sigpending();
451 spin_unlock_irq(&current->sighand->siglock);
452 441
453 if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &ret)) 442 if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &ret))
454 goto badframe; 443 goto badframe;
@@ -734,17 +723,8 @@ handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka,
734 else 723 else
735 ret = setup_frame(sig, ka, oldset, regs); 724 ret = setup_frame(sig, ka, oldset, regs);
736 725
737 if (ka->sa.sa_flags & SA_ONESHOT) 726 if (ret == 0)
738 ka->sa.sa_handler = SIG_DFL; 727 block_sigmask(ka, sig);
739
740 if (ret == 0) {
741 spin_lock_irq(&current->sighand->siglock);
742 sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
743 if (!(ka->sa.sa_flags & SA_NODEFER))
744 sigaddset(&current->blocked,sig);
745 recalc_sigpending();
746 spin_unlock_irq(&current->sighand->siglock);
747 }
748 728
749 return ret; 729 return ret;
750} 730}
diff --git a/arch/sh/kernel/syscalls_32.S b/arch/sh/kernel/syscalls_32.S
index ee56a9b1a981..4b68f0f79761 100644
--- a/arch/sh/kernel/syscalls_32.S
+++ b/arch/sh/kernel/syscalls_32.S
@@ -204,8 +204,8 @@ ENTRY(sys_call_table)
204 .long sys_capset /* 185 */ 204 .long sys_capset /* 185 */
205 .long sys_sigaltstack 205 .long sys_sigaltstack
206 .long sys_sendfile 206 .long sys_sendfile
207 .long sys_ni_syscall /* streams1 */ 207 .long sys_ni_syscall /* getpmsg */
208 .long sys_ni_syscall /* streams2 */ 208 .long sys_ni_syscall /* putpmsg */
209 .long sys_vfork /* 190 */ 209 .long sys_vfork /* 190 */
210 .long sys_getrlimit 210 .long sys_getrlimit
211 .long sys_mmap2 211 .long sys_mmap2
@@ -259,8 +259,8 @@ ENTRY(sys_call_table)
259 .long sys_futex /* 240 */ 259 .long sys_futex /* 240 */
260 .long sys_sched_setaffinity 260 .long sys_sched_setaffinity
261 .long sys_sched_getaffinity 261 .long sys_sched_getaffinity
262 .long sys_ni_syscall 262 .long sys_ni_syscall /* reserved for set_thread_area */
263 .long sys_ni_syscall 263 .long sys_ni_syscall /* reserved for get_thread_area */
264 .long sys_io_setup /* 245 */ 264 .long sys_io_setup /* 245 */
265 .long sys_io_destroy 265 .long sys_io_destroy
266 .long sys_io_getevents 266 .long sys_io_getevents
diff --git a/arch/sh/kernel/syscalls_64.S b/arch/sh/kernel/syscalls_64.S
index 9af7de26fb71..0956345b36ef 100644
--- a/arch/sh/kernel/syscalls_64.S
+++ b/arch/sh/kernel/syscalls_64.S
@@ -208,8 +208,8 @@ sys_call_table:
208 .long sys_capset /* 185 */ 208 .long sys_capset /* 185 */
209 .long sys_sigaltstack 209 .long sys_sigaltstack
210 .long sys_sendfile 210 .long sys_sendfile
211 .long sys_ni_syscall /* streams1 */ 211 .long sys_ni_syscall /* getpmsg */
212 .long sys_ni_syscall /* streams2 */ 212 .long sys_ni_syscall /* putpmsg */
213 .long sys_vfork /* 190 */ 213 .long sys_vfork /* 190 */
214 .long sys_getrlimit 214 .long sys_getrlimit
215 .long sys_mmap2 215 .long sys_mmap2
@@ -296,8 +296,8 @@ sys_call_table:
296 .long sys_futex 296 .long sys_futex
297 .long sys_sched_setaffinity 297 .long sys_sched_setaffinity
298 .long sys_sched_getaffinity /* 270 */ 298 .long sys_sched_getaffinity /* 270 */
299 .long sys_ni_syscall 299 .long sys_ni_syscall /* reserved for set_thread_area */
300 .long sys_ni_syscall 300 .long sys_ni_syscall /* reserved for get_thread_area */
301 .long sys_io_setup 301 .long sys_io_setup
302 .long sys_io_destroy 302 .long sys_io_destroy
303 .long sys_io_getevents /* 275 */ 303 .long sys_io_getevents /* 275 */
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index 1666de84d477..6c0683d3fcba 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -577,6 +577,7 @@ config COMPAT
577 depends on SPARC64 577 depends on SPARC64
578 default y 578 default y
579 select COMPAT_BINFMT_ELF 579 select COMPAT_BINFMT_ELF
580 select ARCH_WANT_OLD_COMPAT_IPC
580 581
581config SYSVIPC_COMPAT 582config SYSVIPC_COMPAT
582 bool 583 bool
diff --git a/arch/sparc/boot/Makefile b/arch/sparc/boot/Makefile
index 9205416b1e67..d56d199c1aa8 100644
--- a/arch/sparc/boot/Makefile
+++ b/arch/sparc/boot/Makefile
@@ -5,7 +5,6 @@
5 5
6ROOT_IMG := /usr/src/root.img 6ROOT_IMG := /usr/src/root.img
7ELFTOAOUT := elftoaout 7ELFTOAOUT := elftoaout
8MKIMAGE := $(srctree)/scripts/mkuboot.sh
9 8
10hostprogs-y := piggyback btfixupprep 9hostprogs-y := piggyback btfixupprep
11targets := tftpboot.img btfix.o btfix.S image zImage vmlinux.aout 10targets := tftpboot.img btfix.o btfix.S image zImage vmlinux.aout
@@ -92,11 +91,9 @@ $(obj)/image.bin: $(obj)/image FORCE
92$(obj)/image.gz: $(obj)/image.bin 91$(obj)/image.gz: $(obj)/image.bin
93 $(call if_changed,gzip) 92 $(call if_changed,gzip)
94 93
95quiet_cmd_uimage = UIMAGE $@ 94UIMAGE_LOADADDR = $(CONFIG_UBOOT_LOAD_ADDR)
96 cmd_uimage = $(CONFIG_SHELL) $(MKIMAGE) -A sparc -O linux -T kernel \ 95UIMAGE_ENTRYADDR = $(CONFIG_UBOOT_ENTRY_ADDR)
97 -C gzip -a $(CONFIG_UBOOT_LOAD_ADDR) \ 96UIMAGE_COMPRESSION = gzip
98 -e $(CONFIG_UBOOT_ENTRY_ADDR) -n 'Linux-$(KERNELRELEASE)' \
99 -d $< $@
100 97
101quiet_cmd_uimage.o = UIMAGE.O $@ 98quiet_cmd_uimage.o = UIMAGE.O $@
102 cmd_uimage.o = $(LD) -Tdata $(CONFIG_UBOOT_FLASH_ADDR) \ 99 cmd_uimage.o = $(LD) -Tdata $(CONFIG_UBOOT_FLASH_ADDR) \
diff --git a/arch/sparc/include/asm/posix_types.h b/arch/sparc/include/asm/posix_types.h
index dbfc1a34b3a2..3070f25ae90a 100644
--- a/arch/sparc/include/asm/posix_types.h
+++ b/arch/sparc/include/asm/posix_types.h
@@ -9,35 +9,16 @@
9 9
10#if defined(__sparc__) && defined(__arch64__) 10#if defined(__sparc__) && defined(__arch64__)
11/* sparc 64 bit */ 11/* sparc 64 bit */
12typedef unsigned long __kernel_size_t;
13typedef long __kernel_ssize_t;
14typedef long __kernel_ptrdiff_t;
15typedef long __kernel_time_t;
16typedef long __kernel_clock_t;
17typedef int __kernel_pid_t;
18typedef int __kernel_ipc_pid_t;
19typedef unsigned int __kernel_uid_t;
20typedef unsigned int __kernel_gid_t;
21typedef unsigned long __kernel_ino_t;
22typedef unsigned int __kernel_mode_t;
23typedef unsigned int __kernel_nlink_t; 12typedef unsigned int __kernel_nlink_t;
24typedef int __kernel_daddr_t; 13#define __kernel_nlink_t __kernel_nlink_t
25typedef long __kernel_off_t;
26typedef char * __kernel_caddr_t;
27typedef unsigned short __kernel_uid16_t;
28typedef unsigned short __kernel_gid16_t;
29typedef int __kernel_clockid_t;
30typedef int __kernel_timer_t;
31 14
32typedef unsigned short __kernel_old_uid_t; 15typedef unsigned short __kernel_old_uid_t;
33typedef unsigned short __kernel_old_gid_t; 16typedef unsigned short __kernel_old_gid_t;
34typedef __kernel_uid_t __kernel_uid32_t; 17#define __kernel_old_uid_t __kernel_old_uid_t
35typedef __kernel_gid_t __kernel_gid32_t;
36
37typedef unsigned int __kernel_old_dev_t;
38 18
39/* Note this piece of asymmetry from the v9 ABI. */ 19/* Note this piece of asymmetry from the v9 ABI. */
40typedef int __kernel_suseconds_t; 20typedef int __kernel_suseconds_t;
21#define __kernel_suseconds_t __kernel_suseconds_t
41 22
42#else 23#else
43/* sparc 32 bit */ 24/* sparc 32 bit */
@@ -45,109 +26,29 @@ typedef int __kernel_suseconds_t;
45typedef unsigned int __kernel_size_t; 26typedef unsigned int __kernel_size_t;
46typedef int __kernel_ssize_t; 27typedef int __kernel_ssize_t;
47typedef long int __kernel_ptrdiff_t; 28typedef long int __kernel_ptrdiff_t;
48typedef long __kernel_time_t; 29#define __kernel_size_t __kernel_size_t
49typedef long __kernel_suseconds_t; 30
50typedef long __kernel_clock_t;
51typedef int __kernel_pid_t;
52typedef unsigned short __kernel_ipc_pid_t; 31typedef unsigned short __kernel_ipc_pid_t;
32#define __kernel_ipc_pid_t __kernel_ipc_pid_t
33
53typedef unsigned short __kernel_uid_t; 34typedef unsigned short __kernel_uid_t;
54typedef unsigned short __kernel_gid_t; 35typedef unsigned short __kernel_gid_t;
55typedef unsigned long __kernel_ino_t; 36#define __kernel_uid_t __kernel_uid_t
37
56typedef unsigned short __kernel_mode_t; 38typedef unsigned short __kernel_mode_t;
39#define __kernel_mode_t __kernel_mode_t
40
57typedef short __kernel_nlink_t; 41typedef short __kernel_nlink_t;
42#define __kernel_nlink_t __kernel_nlink_t
43
58typedef long __kernel_daddr_t; 44typedef long __kernel_daddr_t;
59typedef long __kernel_off_t; 45#define __kernel_daddr_t __kernel_daddr_t
60typedef char * __kernel_caddr_t; 46
61typedef unsigned short __kernel_uid16_t;
62typedef unsigned short __kernel_gid16_t;
63typedef unsigned int __kernel_uid32_t;
64typedef unsigned int __kernel_gid32_t;
65typedef unsigned short __kernel_old_uid_t;
66typedef unsigned short __kernel_old_gid_t;
67typedef unsigned short __kernel_old_dev_t; 47typedef unsigned short __kernel_old_dev_t;
68typedef int __kernel_clockid_t; 48#define __kernel_old_dev_t __kernel_old_dev_t
69typedef int __kernel_timer_t;
70 49
71#endif /* defined(__sparc__) && defined(__arch64__) */ 50#endif /* defined(__sparc__) && defined(__arch64__) */
72 51
73#ifdef __GNUC__ 52#include <asm-generic/posix_types.h>
74typedef long long __kernel_loff_t;
75#endif
76
77typedef struct {
78 int val[2];
79} __kernel_fsid_t;
80
81#ifdef __KERNEL__
82
83#undef __FD_SET
84static inline void __FD_SET(unsigned long fd, __kernel_fd_set *fdsetp)
85{
86 unsigned long _tmp = fd / __NFDBITS;
87 unsigned long _rem = fd % __NFDBITS;
88 fdsetp->fds_bits[_tmp] |= (1UL<<_rem);
89}
90
91#undef __FD_CLR
92static inline void __FD_CLR(unsigned long fd, __kernel_fd_set *fdsetp)
93{
94 unsigned long _tmp = fd / __NFDBITS;
95 unsigned long _rem = fd % __NFDBITS;
96 fdsetp->fds_bits[_tmp] &= ~(1UL<<_rem);
97}
98
99#undef __FD_ISSET
100static inline int __FD_ISSET(unsigned long fd, __const__ __kernel_fd_set *p)
101{
102 unsigned long _tmp = fd / __NFDBITS;
103 unsigned long _rem = fd % __NFDBITS;
104 return (p->fds_bits[_tmp] & (1UL<<_rem)) != 0;
105}
106
107/*
108 * This will unroll the loop for the normal constant cases (8 or 32 longs,
109 * for 256 and 1024-bit fd_sets respectively)
110 */
111#undef __FD_ZERO
112static inline void __FD_ZERO(__kernel_fd_set *p)
113{
114 unsigned long *tmp = p->fds_bits;
115 int i;
116
117 if (__builtin_constant_p(__FDSET_LONGS)) {
118 switch (__FDSET_LONGS) {
119 case 32:
120 tmp[ 0] = 0; tmp[ 1] = 0; tmp[ 2] = 0; tmp[ 3] = 0;
121 tmp[ 4] = 0; tmp[ 5] = 0; tmp[ 6] = 0; tmp[ 7] = 0;
122 tmp[ 8] = 0; tmp[ 9] = 0; tmp[10] = 0; tmp[11] = 0;
123 tmp[12] = 0; tmp[13] = 0; tmp[14] = 0; tmp[15] = 0;
124 tmp[16] = 0; tmp[17] = 0; tmp[18] = 0; tmp[19] = 0;
125 tmp[20] = 0; tmp[21] = 0; tmp[22] = 0; tmp[23] = 0;
126 tmp[24] = 0; tmp[25] = 0; tmp[26] = 0; tmp[27] = 0;
127 tmp[28] = 0; tmp[29] = 0; tmp[30] = 0; tmp[31] = 0;
128 return;
129 case 16:
130 tmp[ 0] = 0; tmp[ 1] = 0; tmp[ 2] = 0; tmp[ 3] = 0;
131 tmp[ 4] = 0; tmp[ 5] = 0; tmp[ 6] = 0; tmp[ 7] = 0;
132 tmp[ 8] = 0; tmp[ 9] = 0; tmp[10] = 0; tmp[11] = 0;
133 tmp[12] = 0; tmp[13] = 0; tmp[14] = 0; tmp[15] = 0;
134 return;
135 case 8:
136 tmp[ 0] = 0; tmp[ 1] = 0; tmp[ 2] = 0; tmp[ 3] = 0;
137 tmp[ 4] = 0; tmp[ 5] = 0; tmp[ 6] = 0; tmp[ 7] = 0;
138 return;
139 case 4:
140 tmp[ 0] = 0; tmp[ 1] = 0; tmp[ 2] = 0; tmp[ 3] = 0;
141 return;
142 }
143 }
144 i = __FDSET_LONGS;
145 while (i) {
146 i--;
147 *tmp = 0;
148 tmp++;
149 }
150}
151 53
152#endif /* __KERNEL__ */
153#endif /* __SPARC_POSIX_TYPES_H */ 54#endif /* __SPARC_POSIX_TYPES_H */
diff --git a/arch/sparc/include/asm/ptrace.h b/arch/sparc/include/asm/ptrace.h
index ef8c7c068f53..fd9c3f21cbf0 100644
--- a/arch/sparc/include/asm/ptrace.h
+++ b/arch/sparc/include/asm/ptrace.h
@@ -165,6 +165,7 @@ struct sparc_stackf {
165#ifdef __KERNEL__ 165#ifdef __KERNEL__
166 166
167#include <linux/threads.h> 167#include <linux/threads.h>
168#include <asm/switch_to.h>
168 169
169static inline int pt_regs_trap_type(struct pt_regs *regs) 170static inline int pt_regs_trap_type(struct pt_regs *regs)
170{ 171{
@@ -240,6 +241,7 @@ extern unsigned long profile_pc(struct pt_regs *);
240#ifndef __ASSEMBLY__ 241#ifndef __ASSEMBLY__
241 242
242#ifdef __KERNEL__ 243#ifdef __KERNEL__
244#include <asm/switch_to.h>
243 245
244static inline bool pt_regs_is_syscall(struct pt_regs *regs) 246static inline bool pt_regs_is_syscall(struct pt_regs *regs)
245{ 247{
diff --git a/arch/sparc/kernel/jump_label.c b/arch/sparc/kernel/jump_label.c
index 971fd435a281..48565c11e82a 100644
--- a/arch/sparc/kernel/jump_label.c
+++ b/arch/sparc/kernel/jump_label.c
@@ -6,6 +6,8 @@
6#include <linux/jump_label.h> 6#include <linux/jump_label.h>
7#include <linux/memory.h> 7#include <linux/memory.h>
8 8
9#include <asm/cacheflush.h>
10
9#ifdef HAVE_JUMP_LABEL 11#ifdef HAVE_JUMP_LABEL
10 12
11void arch_jump_label_transform(struct jump_entry *entry, 13void arch_jump_label_transform(struct jump_entry *entry,
diff --git a/arch/sparc/kernel/kgdb_64.c b/arch/sparc/kernel/kgdb_64.c
index 768290a6c028..c8759550799f 100644
--- a/arch/sparc/kernel/kgdb_64.c
+++ b/arch/sparc/kernel/kgdb_64.c
@@ -7,6 +7,7 @@
7#include <linux/kdebug.h> 7#include <linux/kdebug.h>
8#include <linux/ftrace.h> 8#include <linux/ftrace.h>
9 9
10#include <asm/cacheflush.h>
10#include <asm/kdebug.h> 11#include <asm/kdebug.h>
11#include <asm/ptrace.h> 12#include <asm/ptrace.h>
12#include <asm/irq.h> 13#include <asm/irq.h>
diff --git a/arch/sparc/kernel/sun4d_smp.c b/arch/sparc/kernel/sun4d_smp.c
index 133387980b56..540b2fec09f0 100644
--- a/arch/sparc/kernel/sun4d_smp.c
+++ b/arch/sparc/kernel/sun4d_smp.c
@@ -14,6 +14,7 @@
14#include <asm/sbi.h> 14#include <asm/sbi.h>
15#include <asm/mmu.h> 15#include <asm/mmu.h>
16#include <asm/tlbflush.h> 16#include <asm/tlbflush.h>
17#include <asm/switch_to.h>
17#include <asm/cacheflush.h> 18#include <asm/cacheflush.h>
18 19
19#include "kernel.h" 20#include "kernel.h"
diff --git a/arch/sparc/kernel/sun4m_smp.c b/arch/sparc/kernel/sun4m_smp.c
index 594768686525..02db9a0412ce 100644
--- a/arch/sparc/kernel/sun4m_smp.c
+++ b/arch/sparc/kernel/sun4m_smp.c
@@ -10,6 +10,7 @@
10#include <linux/cpu.h> 10#include <linux/cpu.h>
11 11
12#include <asm/cacheflush.h> 12#include <asm/cacheflush.h>
13#include <asm/switch_to.h>
13#include <asm/tlbflush.h> 14#include <asm/tlbflush.h>
14 15
15#include "irq.h" 16#include "irq.h"
diff --git a/arch/tile/include/asm/compat.h b/arch/tile/include/asm/compat.h
index bf95f55b82b0..4b4b28969a65 100644
--- a/arch/tile/include/asm/compat.h
+++ b/arch/tile/include/asm/compat.h
@@ -242,17 +242,6 @@ long compat_sys_fallocate(int fd, int mode,
242long compat_sys_sched_rr_get_interval(compat_pid_t pid, 242long compat_sys_sched_rr_get_interval(compat_pid_t pid,
243 struct compat_timespec __user *interval); 243 struct compat_timespec __user *interval);
244 244
245/* Versions of compat functions that differ from generic Linux. */
246struct compat_msgbuf;
247long tile_compat_sys_msgsnd(int msqid,
248 struct compat_msgbuf __user *msgp,
249 size_t msgsz, int msgflg);
250long tile_compat_sys_msgrcv(int msqid,
251 struct compat_msgbuf __user *msgp,
252 size_t msgsz, long msgtyp, int msgflg);
253long tile_compat_sys_ptrace(compat_long_t request, compat_long_t pid,
254 compat_long_t addr, compat_long_t data);
255
256/* Tilera Linux syscalls that don't have "compat" versions. */ 245/* Tilera Linux syscalls that don't have "compat" versions. */
257#define compat_sys_flush_cache sys_flush_cache 246#define compat_sys_flush_cache sys_flush_cache
258 247
diff --git a/arch/tile/kernel/compat.c b/arch/tile/kernel/compat.c
index bf5e9d70266c..d67459b9ac2a 100644
--- a/arch/tile/kernel/compat.c
+++ b/arch/tile/kernel/compat.c
@@ -16,7 +16,6 @@
16#define __SYSCALL_COMPAT 16#define __SYSCALL_COMPAT
17 17
18#include <linux/compat.h> 18#include <linux/compat.h>
19#include <linux/msg.h>
20#include <linux/syscalls.h> 19#include <linux/syscalls.h>
21#include <linux/kdev_t.h> 20#include <linux/kdev_t.h>
22#include <linux/fs.h> 21#include <linux/fs.h>
@@ -95,52 +94,10 @@ long compat_sys_sched_rr_get_interval(compat_pid_t pid,
95 return ret; 94 return ret;
96} 95}
97 96
98/*
99 * The usual compat_sys_msgsnd() and _msgrcv() seem to be assuming
100 * some different calling convention than our normal 32-bit tile code.
101 */
102
103/* Already defined in ipc/compat.c, but we need it here. */
104struct compat_msgbuf {
105 compat_long_t mtype;
106 char mtext[1];
107};
108
109long tile_compat_sys_msgsnd(int msqid,
110 struct compat_msgbuf __user *msgp,
111 size_t msgsz, int msgflg)
112{
113 compat_long_t mtype;
114
115 if (get_user(mtype, &msgp->mtype))
116 return -EFAULT;
117 return do_msgsnd(msqid, mtype, msgp->mtext, msgsz, msgflg);
118}
119
120long tile_compat_sys_msgrcv(int msqid,
121 struct compat_msgbuf __user *msgp,
122 size_t msgsz, long msgtyp, int msgflg)
123{
124 long err, mtype;
125
126 err = do_msgrcv(msqid, &mtype, msgp->mtext, msgsz, msgtyp, msgflg);
127 if (err < 0)
128 goto out;
129
130 if (put_user(mtype, &msgp->mtype))
131 err = -EFAULT;
132 out:
133 return err;
134}
135
136/* Provide the compat syscall number to call mapping. */ 97/* Provide the compat syscall number to call mapping. */
137#undef __SYSCALL 98#undef __SYSCALL
138#define __SYSCALL(nr, call) [nr] = (call), 99#define __SYSCALL(nr, call) [nr] = (call),
139 100
140/* The generic versions of these don't work for Tile. */
141#define compat_sys_msgrcv tile_compat_sys_msgrcv
142#define compat_sys_msgsnd tile_compat_sys_msgsnd
143
144/* See comments in sys.c */ 101/* See comments in sys.c */
145#define compat_sys_fadvise64_64 sys32_fadvise64_64 102#define compat_sys_fadvise64_64 sys32_fadvise64_64
146#define compat_sys_readahead sys32_readahead 103#define compat_sys_readahead sys32_readahead
diff --git a/arch/unicore32/boot/Makefile b/arch/unicore32/boot/Makefile
index 79e5f88845d9..ec7fb70b412b 100644
--- a/arch/unicore32/boot/Makefile
+++ b/arch/unicore32/boot/Makefile
@@ -11,8 +11,6 @@
11# Copyright (C) 2001~2010 GUAN Xue-tao 11# Copyright (C) 2001~2010 GUAN Xue-tao
12# 12#
13 13
14MKIMAGE := $(srctree)/scripts/mkuboot.sh
15
16targets := Image zImage uImage 14targets := Image zImage uImage
17 15
18$(obj)/Image: vmlinux FORCE 16$(obj)/Image: vmlinux FORCE
@@ -26,14 +24,8 @@ $(obj)/zImage: $(obj)/compressed/vmlinux FORCE
26 $(call if_changed,objcopy) 24 $(call if_changed,objcopy)
27 @echo ' Kernel: $@ is ready' 25 @echo ' Kernel: $@ is ready'
28 26
29quiet_cmd_uimage = UIMAGE $@ 27UIMAGE_ARCH = unicore
30 cmd_uimage = $(CONFIG_SHELL) $(MKIMAGE) -A unicore -O linux -T kernel \ 28UIMAGE_LOADADDR = 0x0
31 -C none -a $(LOADADDR) -e $(STARTADDR) \
32 -n 'Linux-$(KERNELRELEASE)' -d $< $@
33
34$(obj)/uImage: LOADADDR=0x0
35
36$(obj)/uImage: STARTADDR=$(LOADADDR)
37 29
38$(obj)/uImage: $(obj)/zImage FORCE 30$(obj)/uImage: $(obj)/zImage FORCE
39 $(call if_changed,uimage) 31 $(call if_changed,uimage)
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 3ad653de7100..1d14cc6b79ad 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -69,7 +69,6 @@ config X86
69 select HAVE_ARCH_JUMP_LABEL 69 select HAVE_ARCH_JUMP_LABEL
70 select HAVE_TEXT_POKE_SMP 70 select HAVE_TEXT_POKE_SMP
71 select HAVE_GENERIC_HARDIRQS 71 select HAVE_GENERIC_HARDIRQS
72 select HAVE_SPARSE_IRQ
73 select SPARSE_IRQ 72 select SPARSE_IRQ
74 select GENERIC_FIND_FIRST_BIT 73 select GENERIC_FIND_FIRST_BIT
75 select GENERIC_IRQ_PROBE 74 select GENERIC_IRQ_PROBE
@@ -2164,9 +2163,9 @@ config IA32_EMULATION
2164 depends on X86_64 2163 depends on X86_64
2165 select COMPAT_BINFMT_ELF 2164 select COMPAT_BINFMT_ELF
2166 ---help--- 2165 ---help---
2167 Include code to run 32-bit programs under a 64-bit kernel. You should 2166 Include code to run legacy 32-bit programs under a
2168 likely turn this on, unless you're 100% sure that you don't have any 2167 64-bit kernel. You should likely turn this on, unless you're
2169 32-bit programs left. 2168 100% sure that you don't have any 32-bit programs left.
2170 2169
2171config IA32_AOUT 2170config IA32_AOUT
2172 tristate "IA32 a.out support" 2171 tristate "IA32 a.out support"
@@ -2174,9 +2173,23 @@ config IA32_AOUT
2174 ---help--- 2173 ---help---
2175 Support old a.out binaries in the 32bit emulation. 2174 Support old a.out binaries in the 32bit emulation.
2176 2175
2176config X86_X32
2177 bool "x32 ABI for 64-bit mode (EXPERIMENTAL)"
2178 depends on X86_64 && IA32_EMULATION && EXPERIMENTAL
2179 ---help---
2180 Include code to run binaries for the x32 native 32-bit ABI
2181 for 64-bit processors. An x32 process gets access to the
2182 full 64-bit register file and wide data path while leaving
2183 pointers at 32 bits for smaller memory footprint.
2184
2185 You will need a recent binutils (2.22 or later) with
2186 elf32_x86_64 support enabled to compile a kernel with this
2187 option set.
2188
2177config COMPAT 2189config COMPAT
2178 def_bool y 2190 def_bool y
2179 depends on IA32_EMULATION 2191 depends on IA32_EMULATION || X86_X32
2192 select ARCH_WANT_OLD_COMPAT_IPC
2180 2193
2181config COMPAT_FOR_U64_ALIGNMENT 2194config COMPAT_FOR_U64_ALIGNMENT
2182 def_bool COMPAT 2195 def_bool COMPAT
diff --git a/arch/x86/Makefile b/arch/x86/Makefile
index 209ba1294592..968dbe24a255 100644
--- a/arch/x86/Makefile
+++ b/arch/x86/Makefile
@@ -82,6 +82,22 @@ ifdef CONFIG_CC_STACKPROTECTOR
82 endif 82 endif
83endif 83endif
84 84
85ifdef CONFIG_X86_X32
86 x32_ld_ok := $(call try-run,\
87 /bin/echo -e '1: .quad 1b' | \
88 $(CC) $(KBUILD_AFLAGS) -c -xassembler -o "$$TMP" - && \
89 $(OBJCOPY) -O elf32-x86-64 "$$TMP" "$$TMPO" && \
90 $(LD) -m elf32_x86_64 "$$TMPO" -o "$$TMP",y,n)
91 ifeq ($(x32_ld_ok),y)
92 CONFIG_X86_X32_ABI := y
93 KBUILD_AFLAGS += -DCONFIG_X86_X32_ABI
94 KBUILD_CFLAGS += -DCONFIG_X86_X32_ABI
95 else
96 $(warning CONFIG_X86_X32 enabled but no binutils support)
97 endif
98endif
99export CONFIG_X86_X32_ABI
100
85# Don't unroll struct assignments with kmemcheck enabled 101# Don't unroll struct assignments with kmemcheck enabled
86ifeq ($(CONFIG_KMEMCHECK),y) 102ifeq ($(CONFIG_KMEMCHECK),y)
87 KBUILD_CFLAGS += $(call cc-option,-fno-builtin-memcpy) 103 KBUILD_CFLAGS += $(call cc-option,-fno-builtin-memcpy)
diff --git a/arch/x86/configs/i386_defconfig b/arch/x86/configs/i386_defconfig
index 2bf18059fbea..119db67dcb03 100644
--- a/arch/x86/configs/i386_defconfig
+++ b/arch/x86/configs/i386_defconfig
@@ -15,23 +15,28 @@ CONFIG_CPUSETS=y
15CONFIG_CGROUP_CPUACCT=y 15CONFIG_CGROUP_CPUACCT=y
16CONFIG_RESOURCE_COUNTERS=y 16CONFIG_RESOURCE_COUNTERS=y
17CONFIG_CGROUP_SCHED=y 17CONFIG_CGROUP_SCHED=y
18CONFIG_UTS_NS=y
19CONFIG_IPC_NS=y
20CONFIG_USER_NS=y
21CONFIG_PID_NS=y
22CONFIG_NET_NS=y
23CONFIG_BLK_DEV_INITRD=y 18CONFIG_BLK_DEV_INITRD=y
24CONFIG_KALLSYMS_EXTRA_PASS=y
25# CONFIG_COMPAT_BRK is not set 19# CONFIG_COMPAT_BRK is not set
26CONFIG_PROFILING=y 20CONFIG_PROFILING=y
27CONFIG_KPROBES=y 21CONFIG_KPROBES=y
28CONFIG_MODULES=y 22CONFIG_MODULES=y
29CONFIG_MODULE_UNLOAD=y 23CONFIG_MODULE_UNLOAD=y
30CONFIG_MODULE_FORCE_UNLOAD=y 24CONFIG_MODULE_FORCE_UNLOAD=y
25CONFIG_PARTITION_ADVANCED=y
26CONFIG_OSF_PARTITION=y
27CONFIG_AMIGA_PARTITION=y
28CONFIG_MAC_PARTITION=y
29CONFIG_BSD_DISKLABEL=y
30CONFIG_MINIX_SUBPARTITION=y
31CONFIG_SOLARIS_X86_PARTITION=y
32CONFIG_UNIXWARE_DISKLABEL=y
33CONFIG_SGI_PARTITION=y
34CONFIG_SUN_PARTITION=y
35CONFIG_KARMA_PARTITION=y
36CONFIG_EFI_PARTITION=y
31CONFIG_NO_HZ=y 37CONFIG_NO_HZ=y
32CONFIG_HIGH_RES_TIMERS=y 38CONFIG_HIGH_RES_TIMERS=y
33CONFIG_SMP=y 39CONFIG_SMP=y
34CONFIG_SPARSE_IRQ=y
35CONFIG_X86_GENERIC=y 40CONFIG_X86_GENERIC=y
36CONFIG_HPET_TIMER=y 41CONFIG_HPET_TIMER=y
37CONFIG_SCHED_SMT=y 42CONFIG_SCHED_SMT=y
@@ -51,14 +56,12 @@ CONFIG_HZ_1000=y
51CONFIG_KEXEC=y 56CONFIG_KEXEC=y
52CONFIG_CRASH_DUMP=y 57CONFIG_CRASH_DUMP=y
53# CONFIG_COMPAT_VDSO is not set 58# CONFIG_COMPAT_VDSO is not set
54CONFIG_PM=y 59CONFIG_HIBERNATION=y
55CONFIG_PM_DEBUG=y 60CONFIG_PM_DEBUG=y
56CONFIG_PM_TRACE_RTC=y 61CONFIG_PM_TRACE_RTC=y
57CONFIG_HIBERNATION=y
58CONFIG_ACPI_PROCFS=y 62CONFIG_ACPI_PROCFS=y
59CONFIG_ACPI_DOCK=y 63CONFIG_ACPI_DOCK=y
60CONFIG_CPU_FREQ=y 64CONFIG_CPU_FREQ=y
61CONFIG_CPU_FREQ_DEBUG=y
62# CONFIG_CPU_FREQ_STAT is not set 65# CONFIG_CPU_FREQ_STAT is not set
63CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE=y 66CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE=y
64CONFIG_CPU_FREQ_GOV_PERFORMANCE=y 67CONFIG_CPU_FREQ_GOV_PERFORMANCE=y
@@ -69,7 +72,6 @@ CONFIG_PCI_MSI=y
69CONFIG_PCCARD=y 72CONFIG_PCCARD=y
70CONFIG_YENTA=y 73CONFIG_YENTA=y
71CONFIG_HOTPLUG_PCI=y 74CONFIG_HOTPLUG_PCI=y
72CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y
73CONFIG_BINFMT_MISC=y 75CONFIG_BINFMT_MISC=y
74CONFIG_NET=y 76CONFIG_NET=y
75CONFIG_PACKET=y 77CONFIG_PACKET=y
@@ -120,7 +122,6 @@ CONFIG_NF_CONNTRACK_IPV4=y
120CONFIG_IP_NF_IPTABLES=y 122CONFIG_IP_NF_IPTABLES=y
121CONFIG_IP_NF_FILTER=y 123CONFIG_IP_NF_FILTER=y
122CONFIG_IP_NF_TARGET_REJECT=y 124CONFIG_IP_NF_TARGET_REJECT=y
123CONFIG_IP_NF_TARGET_LOG=y
124CONFIG_IP_NF_TARGET_ULOG=y 125CONFIG_IP_NF_TARGET_ULOG=y
125CONFIG_NF_NAT=y 126CONFIG_NF_NAT=y
126CONFIG_IP_NF_TARGET_MASQUERADE=y 127CONFIG_IP_NF_TARGET_MASQUERADE=y
@@ -128,7 +129,6 @@ CONFIG_IP_NF_MANGLE=y
128CONFIG_NF_CONNTRACK_IPV6=y 129CONFIG_NF_CONNTRACK_IPV6=y
129CONFIG_IP6_NF_IPTABLES=y 130CONFIG_IP6_NF_IPTABLES=y
130CONFIG_IP6_NF_MATCH_IPV6HEADER=y 131CONFIG_IP6_NF_MATCH_IPV6HEADER=y
131CONFIG_IP6_NF_TARGET_LOG=y
132CONFIG_IP6_NF_FILTER=y 132CONFIG_IP6_NF_FILTER=y
133CONFIG_IP6_NF_TARGET_REJECT=y 133CONFIG_IP6_NF_TARGET_REJECT=y
134CONFIG_IP6_NF_MANGLE=y 134CONFIG_IP6_NF_MANGLE=y
@@ -169,25 +169,20 @@ CONFIG_DM_ZERO=y
169CONFIG_MACINTOSH_DRIVERS=y 169CONFIG_MACINTOSH_DRIVERS=y
170CONFIG_MAC_EMUMOUSEBTN=y 170CONFIG_MAC_EMUMOUSEBTN=y
171CONFIG_NETDEVICES=y 171CONFIG_NETDEVICES=y
172CONFIG_NET_ETHERNET=y 172CONFIG_NETCONSOLE=y
173CONFIG_NET_VENDOR_3COM=y 173CONFIG_BNX2=y
174CONFIG_TIGON3=y
174CONFIG_NET_TULIP=y 175CONFIG_NET_TULIP=y
175CONFIG_NET_PCI=y
176CONFIG_FORCEDETH=y
177CONFIG_E100=y 176CONFIG_E100=y
177CONFIG_E1000=y
178CONFIG_E1000E=y
179CONFIG_SKY2=y
178CONFIG_NE2K_PCI=y 180CONFIG_NE2K_PCI=y
181CONFIG_FORCEDETH=y
179CONFIG_8139TOO=y 182CONFIG_8139TOO=y
180# CONFIG_8139TOO_PIO is not set 183# CONFIG_8139TOO_PIO is not set
181CONFIG_E1000=y
182CONFIG_E1000E=y
183CONFIG_R8169=y 184CONFIG_R8169=y
184CONFIG_SKY2=y
185CONFIG_TIGON3=y
186CONFIG_BNX2=y
187CONFIG_TR=y
188CONFIG_NET_PCMCIA=y
189CONFIG_FDDI=y 185CONFIG_FDDI=y
190CONFIG_NETCONSOLE=y
191CONFIG_INPUT_POLLDEV=y 186CONFIG_INPUT_POLLDEV=y
192# CONFIG_INPUT_MOUSEDEV_PSAUX is not set 187# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
193CONFIG_INPUT_EVDEV=y 188CONFIG_INPUT_EVDEV=y
@@ -196,6 +191,7 @@ CONFIG_INPUT_TABLET=y
196CONFIG_INPUT_TOUCHSCREEN=y 191CONFIG_INPUT_TOUCHSCREEN=y
197CONFIG_INPUT_MISC=y 192CONFIG_INPUT_MISC=y
198CONFIG_VT_HW_CONSOLE_BINDING=y 193CONFIG_VT_HW_CONSOLE_BINDING=y
194# CONFIG_LEGACY_PTYS is not set
199CONFIG_SERIAL_NONSTANDARD=y 195CONFIG_SERIAL_NONSTANDARD=y
200CONFIG_SERIAL_8250=y 196CONFIG_SERIAL_8250=y
201CONFIG_SERIAL_8250_CONSOLE=y 197CONFIG_SERIAL_8250_CONSOLE=y
@@ -205,7 +201,6 @@ CONFIG_SERIAL_8250_MANY_PORTS=y
205CONFIG_SERIAL_8250_SHARE_IRQ=y 201CONFIG_SERIAL_8250_SHARE_IRQ=y
206CONFIG_SERIAL_8250_DETECT_IRQ=y 202CONFIG_SERIAL_8250_DETECT_IRQ=y
207CONFIG_SERIAL_8250_RSA=y 203CONFIG_SERIAL_8250_RSA=y
208# CONFIG_LEGACY_PTYS is not set
209CONFIG_HW_RANDOM=y 204CONFIG_HW_RANDOM=y
210CONFIG_NVRAM=y 205CONFIG_NVRAM=y
211CONFIG_HPET=y 206CONFIG_HPET=y
@@ -220,7 +215,6 @@ CONFIG_DRM_I915=y
220CONFIG_FB_MODE_HELPERS=y 215CONFIG_FB_MODE_HELPERS=y
221CONFIG_FB_TILEBLITTING=y 216CONFIG_FB_TILEBLITTING=y
222CONFIG_FB_EFI=y 217CONFIG_FB_EFI=y
223CONFIG_BACKLIGHT_LCD_SUPPORT=y
224# CONFIG_LCD_CLASS_DEVICE is not set 218# CONFIG_LCD_CLASS_DEVICE is not set
225CONFIG_VGACON_SOFT_SCROLLBACK=y 219CONFIG_VGACON_SOFT_SCROLLBACK=y
226CONFIG_LOGO=y 220CONFIG_LOGO=y
@@ -283,7 +277,6 @@ CONFIG_ZISOFS=y
283CONFIG_MSDOS_FS=y 277CONFIG_MSDOS_FS=y
284CONFIG_VFAT_FS=y 278CONFIG_VFAT_FS=y
285CONFIG_PROC_KCORE=y 279CONFIG_PROC_KCORE=y
286CONFIG_TMPFS=y
287CONFIG_TMPFS_POSIX_ACL=y 280CONFIG_TMPFS_POSIX_ACL=y
288CONFIG_HUGETLBFS=y 281CONFIG_HUGETLBFS=y
289CONFIG_NFS_FS=y 282CONFIG_NFS_FS=y
@@ -291,18 +284,6 @@ CONFIG_NFS_V3=y
291CONFIG_NFS_V3_ACL=y 284CONFIG_NFS_V3_ACL=y
292CONFIG_NFS_V4=y 285CONFIG_NFS_V4=y
293CONFIG_ROOT_NFS=y 286CONFIG_ROOT_NFS=y
294CONFIG_PARTITION_ADVANCED=y
295CONFIG_OSF_PARTITION=y
296CONFIG_AMIGA_PARTITION=y
297CONFIG_MAC_PARTITION=y
298CONFIG_BSD_DISKLABEL=y
299CONFIG_MINIX_SUBPARTITION=y
300CONFIG_SOLARIS_X86_PARTITION=y
301CONFIG_UNIXWARE_DISKLABEL=y
302CONFIG_SGI_PARTITION=y
303CONFIG_SUN_PARTITION=y
304CONFIG_KARMA_PARTITION=y
305CONFIG_EFI_PARTITION=y
306CONFIG_NLS_DEFAULT="utf8" 287CONFIG_NLS_DEFAULT="utf8"
307CONFIG_NLS_CODEPAGE_437=y 288CONFIG_NLS_CODEPAGE_437=y
308CONFIG_NLS_ASCII=y 289CONFIG_NLS_ASCII=y
@@ -317,13 +298,12 @@ CONFIG_DEBUG_KERNEL=y
317# CONFIG_SCHED_DEBUG is not set 298# CONFIG_SCHED_DEBUG is not set
318CONFIG_SCHEDSTATS=y 299CONFIG_SCHEDSTATS=y
319CONFIG_TIMER_STATS=y 300CONFIG_TIMER_STATS=y
320# CONFIG_RCU_CPU_STALL_DETECTOR is not set 301CONFIG_DEBUG_STACK_USAGE=y
321CONFIG_SYSCTL_SYSCALL_CHECK=y 302CONFIG_SYSCTL_SYSCALL_CHECK=y
322CONFIG_BLK_DEV_IO_TRACE=y 303CONFIG_BLK_DEV_IO_TRACE=y
323CONFIG_PROVIDE_OHCI1394_DMA_INIT=y 304CONFIG_PROVIDE_OHCI1394_DMA_INIT=y
324CONFIG_EARLY_PRINTK_DBGP=y 305CONFIG_EARLY_PRINTK_DBGP=y
325CONFIG_DEBUG_STACKOVERFLOW=y 306CONFIG_DEBUG_STACKOVERFLOW=y
326CONFIG_DEBUG_STACK_USAGE=y
327# CONFIG_DEBUG_RODATA_TEST is not set 307# CONFIG_DEBUG_RODATA_TEST is not set
328CONFIG_DEBUG_NX_TEST=m 308CONFIG_DEBUG_NX_TEST=m
329CONFIG_DEBUG_BOOT_PARAMS=y 309CONFIG_DEBUG_BOOT_PARAMS=y
diff --git a/arch/x86/configs/x86_64_defconfig b/arch/x86/configs/x86_64_defconfig
index 058a35b8286c..76eb2903809f 100644
--- a/arch/x86/configs/x86_64_defconfig
+++ b/arch/x86/configs/x86_64_defconfig
@@ -1,4 +1,3 @@
1CONFIG_64BIT=y
2CONFIG_EXPERIMENTAL=y 1CONFIG_EXPERIMENTAL=y
3# CONFIG_LOCALVERSION_AUTO is not set 2# CONFIG_LOCALVERSION_AUTO is not set
4CONFIG_SYSVIPC=y 3CONFIG_SYSVIPC=y
@@ -16,26 +15,29 @@ CONFIG_CPUSETS=y
16CONFIG_CGROUP_CPUACCT=y 15CONFIG_CGROUP_CPUACCT=y
17CONFIG_RESOURCE_COUNTERS=y 16CONFIG_RESOURCE_COUNTERS=y
18CONFIG_CGROUP_SCHED=y 17CONFIG_CGROUP_SCHED=y
19CONFIG_UTS_NS=y
20CONFIG_IPC_NS=y
21CONFIG_USER_NS=y
22CONFIG_PID_NS=y
23CONFIG_NET_NS=y
24CONFIG_BLK_DEV_INITRD=y 18CONFIG_BLK_DEV_INITRD=y
25CONFIG_KALLSYMS_EXTRA_PASS=y
26# CONFIG_COMPAT_BRK is not set 19# CONFIG_COMPAT_BRK is not set
27CONFIG_PROFILING=y 20CONFIG_PROFILING=y
28CONFIG_KPROBES=y 21CONFIG_KPROBES=y
29CONFIG_MODULES=y 22CONFIG_MODULES=y
30CONFIG_MODULE_UNLOAD=y 23CONFIG_MODULE_UNLOAD=y
31CONFIG_MODULE_FORCE_UNLOAD=y 24CONFIG_MODULE_FORCE_UNLOAD=y
25CONFIG_PARTITION_ADVANCED=y
26CONFIG_OSF_PARTITION=y
27CONFIG_AMIGA_PARTITION=y
28CONFIG_MAC_PARTITION=y
29CONFIG_BSD_DISKLABEL=y
30CONFIG_MINIX_SUBPARTITION=y
31CONFIG_SOLARIS_X86_PARTITION=y
32CONFIG_UNIXWARE_DISKLABEL=y
33CONFIG_SGI_PARTITION=y
34CONFIG_SUN_PARTITION=y
35CONFIG_KARMA_PARTITION=y
36CONFIG_EFI_PARTITION=y
32CONFIG_NO_HZ=y 37CONFIG_NO_HZ=y
33CONFIG_HIGH_RES_TIMERS=y 38CONFIG_HIGH_RES_TIMERS=y
34CONFIG_SMP=y 39CONFIG_SMP=y
35CONFIG_SPARSE_IRQ=y
36CONFIG_CALGARY_IOMMU=y 40CONFIG_CALGARY_IOMMU=y
37CONFIG_AMD_IOMMU=y
38CONFIG_AMD_IOMMU_STATS=y
39CONFIG_NR_CPUS=64 41CONFIG_NR_CPUS=64
40CONFIG_SCHED_SMT=y 42CONFIG_SCHED_SMT=y
41CONFIG_PREEMPT_VOLUNTARY=y 43CONFIG_PREEMPT_VOLUNTARY=y
@@ -53,27 +55,22 @@ CONFIG_HZ_1000=y
53CONFIG_KEXEC=y 55CONFIG_KEXEC=y
54CONFIG_CRASH_DUMP=y 56CONFIG_CRASH_DUMP=y
55# CONFIG_COMPAT_VDSO is not set 57# CONFIG_COMPAT_VDSO is not set
56CONFIG_PM=y 58CONFIG_HIBERNATION=y
57CONFIG_PM_DEBUG=y 59CONFIG_PM_DEBUG=y
58CONFIG_PM_TRACE_RTC=y 60CONFIG_PM_TRACE_RTC=y
59CONFIG_HIBERNATION=y
60CONFIG_ACPI_PROCFS=y 61CONFIG_ACPI_PROCFS=y
61CONFIG_ACPI_DOCK=y 62CONFIG_ACPI_DOCK=y
62CONFIG_CPU_FREQ=y 63CONFIG_CPU_FREQ=y
63CONFIG_CPU_FREQ_DEBUG=y
64# CONFIG_CPU_FREQ_STAT is not set 64# CONFIG_CPU_FREQ_STAT is not set
65CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE=y 65CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE=y
66CONFIG_CPU_FREQ_GOV_PERFORMANCE=y 66CONFIG_CPU_FREQ_GOV_PERFORMANCE=y
67CONFIG_CPU_FREQ_GOV_ONDEMAND=y 67CONFIG_CPU_FREQ_GOV_ONDEMAND=y
68CONFIG_X86_ACPI_CPUFREQ=y 68CONFIG_X86_ACPI_CPUFREQ=y
69CONFIG_PCI_MMCONFIG=y 69CONFIG_PCI_MMCONFIG=y
70CONFIG_INTEL_IOMMU=y
71# CONFIG_INTEL_IOMMU_DEFAULT_ON is not set
72CONFIG_PCIEPORTBUS=y 70CONFIG_PCIEPORTBUS=y
73CONFIG_PCCARD=y 71CONFIG_PCCARD=y
74CONFIG_YENTA=y 72CONFIG_YENTA=y
75CONFIG_HOTPLUG_PCI=y 73CONFIG_HOTPLUG_PCI=y
76CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y
77CONFIG_BINFMT_MISC=y 74CONFIG_BINFMT_MISC=y
78CONFIG_IA32_EMULATION=y 75CONFIG_IA32_EMULATION=y
79CONFIG_NET=y 76CONFIG_NET=y
@@ -125,7 +122,6 @@ CONFIG_NF_CONNTRACK_IPV4=y
125CONFIG_IP_NF_IPTABLES=y 122CONFIG_IP_NF_IPTABLES=y
126CONFIG_IP_NF_FILTER=y 123CONFIG_IP_NF_FILTER=y
127CONFIG_IP_NF_TARGET_REJECT=y 124CONFIG_IP_NF_TARGET_REJECT=y
128CONFIG_IP_NF_TARGET_LOG=y
129CONFIG_IP_NF_TARGET_ULOG=y 125CONFIG_IP_NF_TARGET_ULOG=y
130CONFIG_NF_NAT=y 126CONFIG_NF_NAT=y
131CONFIG_IP_NF_TARGET_MASQUERADE=y 127CONFIG_IP_NF_TARGET_MASQUERADE=y
@@ -133,7 +129,6 @@ CONFIG_IP_NF_MANGLE=y
133CONFIG_NF_CONNTRACK_IPV6=y 129CONFIG_NF_CONNTRACK_IPV6=y
134CONFIG_IP6_NF_IPTABLES=y 130CONFIG_IP6_NF_IPTABLES=y
135CONFIG_IP6_NF_MATCH_IPV6HEADER=y 131CONFIG_IP6_NF_MATCH_IPV6HEADER=y
136CONFIG_IP6_NF_TARGET_LOG=y
137CONFIG_IP6_NF_FILTER=y 132CONFIG_IP6_NF_FILTER=y
138CONFIG_IP6_NF_TARGET_REJECT=y 133CONFIG_IP6_NF_TARGET_REJECT=y
139CONFIG_IP6_NF_MANGLE=y 134CONFIG_IP6_NF_MANGLE=y
@@ -172,20 +167,15 @@ CONFIG_DM_ZERO=y
172CONFIG_MACINTOSH_DRIVERS=y 167CONFIG_MACINTOSH_DRIVERS=y
173CONFIG_MAC_EMUMOUSEBTN=y 168CONFIG_MAC_EMUMOUSEBTN=y
174CONFIG_NETDEVICES=y 169CONFIG_NETDEVICES=y
175CONFIG_NET_ETHERNET=y 170CONFIG_NETCONSOLE=y
176CONFIG_NET_VENDOR_3COM=y 171CONFIG_TIGON3=y
177CONFIG_NET_TULIP=y 172CONFIG_NET_TULIP=y
178CONFIG_NET_PCI=y
179CONFIG_FORCEDETH=y
180CONFIG_E100=y 173CONFIG_E100=y
181CONFIG_8139TOO=y
182CONFIG_E1000=y 174CONFIG_E1000=y
183CONFIG_SKY2=y 175CONFIG_SKY2=y
184CONFIG_TIGON3=y 176CONFIG_FORCEDETH=y
185CONFIG_TR=y 177CONFIG_8139TOO=y
186CONFIG_NET_PCMCIA=y
187CONFIG_FDDI=y 178CONFIG_FDDI=y
188CONFIG_NETCONSOLE=y
189CONFIG_INPUT_POLLDEV=y 179CONFIG_INPUT_POLLDEV=y
190# CONFIG_INPUT_MOUSEDEV_PSAUX is not set 180# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
191CONFIG_INPUT_EVDEV=y 181CONFIG_INPUT_EVDEV=y
@@ -194,6 +184,7 @@ CONFIG_INPUT_TABLET=y
194CONFIG_INPUT_TOUCHSCREEN=y 184CONFIG_INPUT_TOUCHSCREEN=y
195CONFIG_INPUT_MISC=y 185CONFIG_INPUT_MISC=y
196CONFIG_VT_HW_CONSOLE_BINDING=y 186CONFIG_VT_HW_CONSOLE_BINDING=y
187# CONFIG_LEGACY_PTYS is not set
197CONFIG_SERIAL_NONSTANDARD=y 188CONFIG_SERIAL_NONSTANDARD=y
198CONFIG_SERIAL_8250=y 189CONFIG_SERIAL_8250=y
199CONFIG_SERIAL_8250_CONSOLE=y 190CONFIG_SERIAL_8250_CONSOLE=y
@@ -203,7 +194,6 @@ CONFIG_SERIAL_8250_MANY_PORTS=y
203CONFIG_SERIAL_8250_SHARE_IRQ=y 194CONFIG_SERIAL_8250_SHARE_IRQ=y
204CONFIG_SERIAL_8250_DETECT_IRQ=y 195CONFIG_SERIAL_8250_DETECT_IRQ=y
205CONFIG_SERIAL_8250_RSA=y 196CONFIG_SERIAL_8250_RSA=y
206# CONFIG_LEGACY_PTYS is not set
207CONFIG_HW_RANDOM=y 197CONFIG_HW_RANDOM=y
208# CONFIG_HW_RANDOM_INTEL is not set 198# CONFIG_HW_RANDOM_INTEL is not set
209# CONFIG_HW_RANDOM_AMD is not set 199# CONFIG_HW_RANDOM_AMD is not set
@@ -221,7 +211,6 @@ CONFIG_DRM_I915_KMS=y
221CONFIG_FB_MODE_HELPERS=y 211CONFIG_FB_MODE_HELPERS=y
222CONFIG_FB_TILEBLITTING=y 212CONFIG_FB_TILEBLITTING=y
223CONFIG_FB_EFI=y 213CONFIG_FB_EFI=y
224CONFIG_BACKLIGHT_LCD_SUPPORT=y
225# CONFIG_LCD_CLASS_DEVICE is not set 214# CONFIG_LCD_CLASS_DEVICE is not set
226CONFIG_VGACON_SOFT_SCROLLBACK=y 215CONFIG_VGACON_SOFT_SCROLLBACK=y
227CONFIG_LOGO=y 216CONFIG_LOGO=y
@@ -268,6 +257,10 @@ CONFIG_RTC_CLASS=y
268# CONFIG_RTC_HCTOSYS is not set 257# CONFIG_RTC_HCTOSYS is not set
269CONFIG_DMADEVICES=y 258CONFIG_DMADEVICES=y
270CONFIG_EEEPC_LAPTOP=y 259CONFIG_EEEPC_LAPTOP=y
260CONFIG_AMD_IOMMU=y
261CONFIG_AMD_IOMMU_STATS=y
262CONFIG_INTEL_IOMMU=y
263# CONFIG_INTEL_IOMMU_DEFAULT_ON is not set
271CONFIG_EFI_VARS=y 264CONFIG_EFI_VARS=y
272CONFIG_EXT3_FS=y 265CONFIG_EXT3_FS=y
273# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set 266# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
@@ -284,7 +277,6 @@ CONFIG_ZISOFS=y
284CONFIG_MSDOS_FS=y 277CONFIG_MSDOS_FS=y
285CONFIG_VFAT_FS=y 278CONFIG_VFAT_FS=y
286CONFIG_PROC_KCORE=y 279CONFIG_PROC_KCORE=y
287CONFIG_TMPFS=y
288CONFIG_TMPFS_POSIX_ACL=y 280CONFIG_TMPFS_POSIX_ACL=y
289CONFIG_HUGETLBFS=y 281CONFIG_HUGETLBFS=y
290CONFIG_NFS_FS=y 282CONFIG_NFS_FS=y
@@ -292,18 +284,6 @@ CONFIG_NFS_V3=y
292CONFIG_NFS_V3_ACL=y 284CONFIG_NFS_V3_ACL=y
293CONFIG_NFS_V4=y 285CONFIG_NFS_V4=y
294CONFIG_ROOT_NFS=y 286CONFIG_ROOT_NFS=y
295CONFIG_PARTITION_ADVANCED=y
296CONFIG_OSF_PARTITION=y
297CONFIG_AMIGA_PARTITION=y
298CONFIG_MAC_PARTITION=y
299CONFIG_BSD_DISKLABEL=y
300CONFIG_MINIX_SUBPARTITION=y
301CONFIG_SOLARIS_X86_PARTITION=y
302CONFIG_UNIXWARE_DISKLABEL=y
303CONFIG_SGI_PARTITION=y
304CONFIG_SUN_PARTITION=y
305CONFIG_KARMA_PARTITION=y
306CONFIG_EFI_PARTITION=y
307CONFIG_NLS_DEFAULT="utf8" 287CONFIG_NLS_DEFAULT="utf8"
308CONFIG_NLS_CODEPAGE_437=y 288CONFIG_NLS_CODEPAGE_437=y
309CONFIG_NLS_ASCII=y 289CONFIG_NLS_ASCII=y
@@ -317,13 +297,12 @@ CONFIG_DEBUG_KERNEL=y
317# CONFIG_SCHED_DEBUG is not set 297# CONFIG_SCHED_DEBUG is not set
318CONFIG_SCHEDSTATS=y 298CONFIG_SCHEDSTATS=y
319CONFIG_TIMER_STATS=y 299CONFIG_TIMER_STATS=y
320# CONFIG_RCU_CPU_STALL_DETECTOR is not set 300CONFIG_DEBUG_STACK_USAGE=y
321CONFIG_SYSCTL_SYSCALL_CHECK=y 301CONFIG_SYSCTL_SYSCALL_CHECK=y
322CONFIG_BLK_DEV_IO_TRACE=y 302CONFIG_BLK_DEV_IO_TRACE=y
323CONFIG_PROVIDE_OHCI1394_DMA_INIT=y 303CONFIG_PROVIDE_OHCI1394_DMA_INIT=y
324CONFIG_EARLY_PRINTK_DBGP=y 304CONFIG_EARLY_PRINTK_DBGP=y
325CONFIG_DEBUG_STACKOVERFLOW=y 305CONFIG_DEBUG_STACKOVERFLOW=y
326CONFIG_DEBUG_STACK_USAGE=y
327# CONFIG_DEBUG_RODATA_TEST is not set 306# CONFIG_DEBUG_RODATA_TEST is not set
328CONFIG_DEBUG_NX_TEST=m 307CONFIG_DEBUG_NX_TEST=m
329CONFIG_DEBUG_BOOT_PARAMS=y 308CONFIG_DEBUG_BOOT_PARAMS=y
diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c
index 5563ba1cf513..a69245ba27e3 100644
--- a/arch/x86/ia32/ia32_signal.c
+++ b/arch/x86/ia32/ia32_signal.c
@@ -12,10 +12,8 @@
12#include <linux/mm.h> 12#include <linux/mm.h>
13#include <linux/smp.h> 13#include <linux/smp.h>
14#include <linux/kernel.h> 14#include <linux/kernel.h>
15#include <linux/signal.h>
16#include <linux/errno.h> 15#include <linux/errno.h>
17#include <linux/wait.h> 16#include <linux/wait.h>
18#include <linux/ptrace.h>
19#include <linux/unistd.h> 17#include <linux/unistd.h>
20#include <linux/stddef.h> 18#include <linux/stddef.h>
21#include <linux/personality.h> 19#include <linux/personality.h>
@@ -32,20 +30,15 @@
32#include <asm/proto.h> 30#include <asm/proto.h>
33#include <asm/vdso.h> 31#include <asm/vdso.h>
34#include <asm/sigframe.h> 32#include <asm/sigframe.h>
33#include <asm/sighandling.h>
35#include <asm/sys_ia32.h> 34#include <asm/sys_ia32.h>
36 35
37#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) 36#define FIX_EFLAGS __FIX_EFLAGS
38
39#define FIX_EFLAGS (X86_EFLAGS_AC | X86_EFLAGS_OF | \
40 X86_EFLAGS_DF | X86_EFLAGS_TF | X86_EFLAGS_SF | \
41 X86_EFLAGS_ZF | X86_EFLAGS_AF | X86_EFLAGS_PF | \
42 X86_EFLAGS_CF)
43
44void signal_fault(struct pt_regs *regs, void __user *frame, char *where);
45 37
46int copy_siginfo_to_user32(compat_siginfo_t __user *to, siginfo_t *from) 38int copy_siginfo_to_user32(compat_siginfo_t __user *to, siginfo_t *from)
47{ 39{
48 int err = 0; 40 int err = 0;
41 bool ia32 = is_ia32_task();
49 42
50 if (!access_ok(VERIFY_WRITE, to, sizeof(compat_siginfo_t))) 43 if (!access_ok(VERIFY_WRITE, to, sizeof(compat_siginfo_t)))
51 return -EFAULT; 44 return -EFAULT;
@@ -75,8 +68,13 @@ int copy_siginfo_to_user32(compat_siginfo_t __user *to, siginfo_t *from)
75 case __SI_FAULT >> 16: 68 case __SI_FAULT >> 16:
76 break; 69 break;
77 case __SI_CHLD >> 16: 70 case __SI_CHLD >> 16:
78 put_user_ex(from->si_utime, &to->si_utime); 71 if (ia32) {
79 put_user_ex(from->si_stime, &to->si_stime); 72 put_user_ex(from->si_utime, &to->si_utime);
73 put_user_ex(from->si_stime, &to->si_stime);
74 } else {
75 put_user_ex(from->si_utime, &to->_sifields._sigchld_x32._utime);
76 put_user_ex(from->si_stime, &to->_sifields._sigchld_x32._stime);
77 }
80 put_user_ex(from->si_status, &to->si_status); 78 put_user_ex(from->si_status, &to->si_status);
81 /* FALL THROUGH */ 79 /* FALL THROUGH */
82 default: 80 default:
@@ -348,7 +346,7 @@ static int ia32_setup_sigcontext(struct sigcontext_ia32 __user *sc,
348 put_user_ex(regs->dx, &sc->dx); 346 put_user_ex(regs->dx, &sc->dx);
349 put_user_ex(regs->cx, &sc->cx); 347 put_user_ex(regs->cx, &sc->cx);
350 put_user_ex(regs->ax, &sc->ax); 348 put_user_ex(regs->ax, &sc->ax);
351 put_user_ex(current->thread.trap_no, &sc->trapno); 349 put_user_ex(current->thread.trap_nr, &sc->trapno);
352 put_user_ex(current->thread.error_code, &sc->err); 350 put_user_ex(current->thread.error_code, &sc->err);
353 put_user_ex(regs->ip, &sc->ip); 351 put_user_ex(regs->ip, &sc->ip);
354 put_user_ex(regs->cs, (unsigned int __user *)&sc->cs); 352 put_user_ex(regs->cs, (unsigned int __user *)&sc->cs);
diff --git a/arch/x86/ia32/sys_ia32.c b/arch/x86/ia32/sys_ia32.c
index f6f5c53dc903..aec2202a596c 100644
--- a/arch/x86/ia32/sys_ia32.c
+++ b/arch/x86/ia32/sys_ia32.c
@@ -287,46 +287,6 @@ asmlinkage long sys32_sigaction(int sig, struct old_sigaction32 __user *act,
287 return ret; 287 return ret;
288} 288}
289 289
290asmlinkage long sys32_rt_sigprocmask(int how, compat_sigset_t __user *set,
291 compat_sigset_t __user *oset,
292 unsigned int sigsetsize)
293{
294 sigset_t s;
295 compat_sigset_t s32;
296 int ret;
297 mm_segment_t old_fs = get_fs();
298
299 if (set) {
300 if (copy_from_user(&s32, set, sizeof(compat_sigset_t)))
301 return -EFAULT;
302 switch (_NSIG_WORDS) {
303 case 4: s.sig[3] = s32.sig[6] | (((long)s32.sig[7]) << 32);
304 case 3: s.sig[2] = s32.sig[4] | (((long)s32.sig[5]) << 32);
305 case 2: s.sig[1] = s32.sig[2] | (((long)s32.sig[3]) << 32);
306 case 1: s.sig[0] = s32.sig[0] | (((long)s32.sig[1]) << 32);
307 }
308 }
309 set_fs(KERNEL_DS);
310 ret = sys_rt_sigprocmask(how,
311 set ? (sigset_t __user *)&s : NULL,
312 oset ? (sigset_t __user *)&s : NULL,
313 sigsetsize);
314 set_fs(old_fs);
315 if (ret)
316 return ret;
317 if (oset) {
318 switch (_NSIG_WORDS) {
319 case 4: s32.sig[7] = (s.sig[3] >> 32); s32.sig[6] = s.sig[3];
320 case 3: s32.sig[5] = (s.sig[2] >> 32); s32.sig[4] = s.sig[2];
321 case 2: s32.sig[3] = (s.sig[1] >> 32); s32.sig[2] = s.sig[1];
322 case 1: s32.sig[1] = (s.sig[0] >> 32); s32.sig[0] = s.sig[0];
323 }
324 if (copy_to_user(oset, &s32, sizeof(compat_sigset_t)))
325 return -EFAULT;
326 }
327 return 0;
328}
329
330asmlinkage long sys32_alarm(unsigned int seconds) 290asmlinkage long sys32_alarm(unsigned int seconds)
331{ 291{
332 return alarm_setitimer(seconds); 292 return alarm_setitimer(seconds);
diff --git a/arch/x86/include/asm/Kbuild b/arch/x86/include/asm/Kbuild
index b57e6a43a37a..f9c0d3ba9e84 100644
--- a/arch/x86/include/asm/Kbuild
+++ b/arch/x86/include/asm/Kbuild
@@ -14,6 +14,7 @@ header-y += msr.h
14header-y += mtrr.h 14header-y += mtrr.h
15header-y += posix_types_32.h 15header-y += posix_types_32.h
16header-y += posix_types_64.h 16header-y += posix_types_64.h
17header-y += posix_types_x32.h
17header-y += prctl.h 18header-y += prctl.h
18header-y += processor-flags.h 19header-y += processor-flags.h
19header-y += ptrace-abi.h 20header-y += ptrace-abi.h
@@ -24,3 +25,4 @@ header-y += vsyscall.h
24 25
25genhdr-y += unistd_32.h 26genhdr-y += unistd_32.h
26genhdr-y += unistd_64.h 27genhdr-y += unistd_64.h
28genhdr-y += unistd_x32.h
diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
index 4b2caeefe1a2..d85410171260 100644
--- a/arch/x86/include/asm/apic.h
+++ b/arch/x86/include/asm/apic.h
@@ -534,7 +534,7 @@ static inline unsigned int read_apic_id(void)
534 534
535static inline int default_apic_id_valid(int apicid) 535static inline int default_apic_id_valid(int apicid)
536{ 536{
537 return x2apic_mode || (apicid < 255); 537 return (apicid < 255);
538} 538}
539 539
540extern void default_setup_apic_routing(void); 540extern void default_setup_apic_routing(void);
diff --git a/arch/x86/include/asm/compat.h b/arch/x86/include/asm/compat.h
index 30d737ef2a42..d6805798d6fc 100644
--- a/arch/x86/include/asm/compat.h
+++ b/arch/x86/include/asm/compat.h
@@ -6,7 +6,9 @@
6 */ 6 */
7#include <linux/types.h> 7#include <linux/types.h>
8#include <linux/sched.h> 8#include <linux/sched.h>
9#include <asm/processor.h>
9#include <asm/user32.h> 10#include <asm/user32.h>
11#include <asm/unistd.h>
10 12
11#define COMPAT_USER_HZ 100 13#define COMPAT_USER_HZ 100
12#define COMPAT_UTS_MACHINE "i686\0\0" 14#define COMPAT_UTS_MACHINE "i686\0\0"
@@ -186,7 +188,20 @@ struct compat_shmid64_ds {
186/* 188/*
187 * The type of struct elf_prstatus.pr_reg in compatible core dumps. 189 * The type of struct elf_prstatus.pr_reg in compatible core dumps.
188 */ 190 */
191#ifdef CONFIG_X86_X32_ABI
192typedef struct user_regs_struct compat_elf_gregset_t;
193
194#define PR_REG_SIZE(S) (test_thread_flag(TIF_IA32) ? 68 : 216)
195#define PRSTATUS_SIZE(S) (test_thread_flag(TIF_IA32) ? 144 : 296)
196#define SET_PR_FPVALID(S,V) \
197 do { *(int *) (((void *) &((S)->pr_reg)) + PR_REG_SIZE(0)) = (V); } \
198 while (0)
199
200#define COMPAT_USE_64BIT_TIME \
201 (!!(task_pt_regs(current)->orig_ax & __X32_SYSCALL_BIT))
202#else
189typedef struct user_regs_struct32 compat_elf_gregset_t; 203typedef struct user_regs_struct32 compat_elf_gregset_t;
204#endif
190 205
191/* 206/*
192 * A pointer passed in from user mode. This should not 207 * A pointer passed in from user mode. This should not
@@ -208,13 +223,30 @@ static inline compat_uptr_t ptr_to_compat(void __user *uptr)
208 223
209static inline void __user *arch_compat_alloc_user_space(long len) 224static inline void __user *arch_compat_alloc_user_space(long len)
210{ 225{
211 struct pt_regs *regs = task_pt_regs(current); 226 compat_uptr_t sp;
212 return (void __user *)regs->sp - len; 227
228 if (test_thread_flag(TIF_IA32)) {
229 sp = task_pt_regs(current)->sp;
230 } else {
231 /* -128 for the x32 ABI redzone */
232 sp = percpu_read(old_rsp) - 128;
233 }
234
235 return (void __user *)round_down(sp - len, 16);
236}
237
238static inline bool is_x32_task(void)
239{
240#ifdef CONFIG_X86_X32_ABI
241 if (task_pt_regs(current)->orig_ax & __X32_SYSCALL_BIT)
242 return true;
243#endif
244 return false;
213} 245}
214 246
215static inline int is_compat_task(void) 247static inline bool is_compat_task(void)
216{ 248{
217 return current_thread_info()->status & TS_COMPAT; 249 return is_ia32_task() || is_x32_task();
218} 250}
219 251
220#endif /* _ASM_X86_COMPAT_H */ 252#endif /* _ASM_X86_COMPAT_H */
diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
index f27f79abe021..5939f44fe0c0 100644
--- a/arch/x86/include/asm/elf.h
+++ b/arch/x86/include/asm/elf.h
@@ -155,7 +155,12 @@ do { \
155#define elf_check_arch(x) \ 155#define elf_check_arch(x) \
156 ((x)->e_machine == EM_X86_64) 156 ((x)->e_machine == EM_X86_64)
157 157
158#define compat_elf_check_arch(x) elf_check_arch_ia32(x) 158#define compat_elf_check_arch(x) \
159 (elf_check_arch_ia32(x) || (x)->e_machine == EM_X86_64)
160
161#if __USER32_DS != __USER_DS
162# error "The following code assumes __USER32_DS == __USER_DS"
163#endif
159 164
160static inline void elf_common_init(struct thread_struct *t, 165static inline void elf_common_init(struct thread_struct *t,
161 struct pt_regs *regs, const u16 ds) 166 struct pt_regs *regs, const u16 ds)
@@ -178,8 +183,9 @@ static inline void elf_common_init(struct thread_struct *t,
178void start_thread_ia32(struct pt_regs *regs, u32 new_ip, u32 new_sp); 183void start_thread_ia32(struct pt_regs *regs, u32 new_ip, u32 new_sp);
179#define compat_start_thread start_thread_ia32 184#define compat_start_thread start_thread_ia32
180 185
181void set_personality_ia32(void); 186void set_personality_ia32(bool);
182#define COMPAT_SET_PERSONALITY(ex) set_personality_ia32() 187#define COMPAT_SET_PERSONALITY(ex) \
188 set_personality_ia32((ex).e_machine == EM_X86_64)
183 189
184#define COMPAT_ELF_PLATFORM ("i686") 190#define COMPAT_ELF_PLATFORM ("i686")
185 191
@@ -286,7 +292,7 @@ do { \
286#define VDSO_HIGH_BASE 0xffffe000U /* CONFIG_COMPAT_VDSO address */ 292#define VDSO_HIGH_BASE 0xffffe000U /* CONFIG_COMPAT_VDSO address */
287 293
288/* 1GB for 64bit, 8MB for 32bit */ 294/* 1GB for 64bit, 8MB for 32bit */
289#define STACK_RND_MASK (test_thread_flag(TIF_IA32) ? 0x7ff : 0x3fffff) 295#define STACK_RND_MASK (test_thread_flag(TIF_ADDR32) ? 0x7ff : 0x3fffff)
290 296
291#define ARCH_DLINFO \ 297#define ARCH_DLINFO \
292do { \ 298do { \
@@ -295,9 +301,20 @@ do { \
295 (unsigned long)current->mm->context.vdso); \ 301 (unsigned long)current->mm->context.vdso); \
296} while (0) 302} while (0)
297 303
304#define ARCH_DLINFO_X32 \
305do { \
306 if (vdso_enabled) \
307 NEW_AUX_ENT(AT_SYSINFO_EHDR, \
308 (unsigned long)current->mm->context.vdso); \
309} while (0)
310
298#define AT_SYSINFO 32 311#define AT_SYSINFO 32
299 312
300#define COMPAT_ARCH_DLINFO ARCH_DLINFO_IA32(sysctl_vsyscall32) 313#define COMPAT_ARCH_DLINFO \
314if (test_thread_flag(TIF_X32)) \
315 ARCH_DLINFO_X32; \
316else \
317 ARCH_DLINFO_IA32(sysctl_vsyscall32)
301 318
302#define COMPAT_ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000) 319#define COMPAT_ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000)
303 320
@@ -313,6 +330,8 @@ struct linux_binprm;
313#define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1 330#define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
314extern int arch_setup_additional_pages(struct linux_binprm *bprm, 331extern int arch_setup_additional_pages(struct linux_binprm *bprm,
315 int uses_interp); 332 int uses_interp);
333extern int x32_setup_additional_pages(struct linux_binprm *bprm,
334 int uses_interp);
316 335
317extern int syscall32_setup_pages(struct linux_binprm *, int exstack); 336extern int syscall32_setup_pages(struct linux_binprm *, int exstack);
318#define compat_arch_setup_additional_pages syscall32_setup_pages 337#define compat_arch_setup_additional_pages syscall32_setup_pages
@@ -329,7 +348,7 @@ static inline int mmap_is_ia32(void)
329 return 1; 348 return 1;
330#endif 349#endif
331#ifdef CONFIG_IA32_EMULATION 350#ifdef CONFIG_IA32_EMULATION
332 if (test_thread_flag(TIF_IA32)) 351 if (test_thread_flag(TIF_ADDR32))
333 return 1; 352 return 1;
334#endif 353#endif
335 return 0; 354 return 0;
diff --git a/arch/x86/include/asm/ia32.h b/arch/x86/include/asm/ia32.h
index 1f7e62517284..ee52760549f0 100644
--- a/arch/x86/include/asm/ia32.h
+++ b/arch/x86/include/asm/ia32.h
@@ -43,6 +43,15 @@ struct ucontext_ia32 {
43 compat_sigset_t uc_sigmask; /* mask last for extensibility */ 43 compat_sigset_t uc_sigmask; /* mask last for extensibility */
44}; 44};
45 45
46struct ucontext_x32 {
47 unsigned int uc_flags;
48 unsigned int uc_link;
49 stack_ia32_t uc_stack;
50 unsigned int uc__pad0; /* needed for alignment */
51 struct sigcontext uc_mcontext; /* the 64-bit sigcontext type */
52 compat_sigset_t uc_sigmask; /* mask last for extensibility */
53};
54
46/* This matches struct stat64 in glibc2.2, hence the absolutely 55/* This matches struct stat64 in glibc2.2, hence the absolutely
47 * insane amounts of padding around dev_t's. 56 * insane amounts of padding around dev_t's.
48 */ 57 */
@@ -116,6 +125,15 @@ typedef struct compat_siginfo {
116 compat_clock_t _stime; 125 compat_clock_t _stime;
117 } _sigchld; 126 } _sigchld;
118 127
128 /* SIGCHLD (x32 version) */
129 struct {
130 unsigned int _pid; /* which child */
131 unsigned int _uid; /* sender's uid */
132 int _status; /* exit code */
133 compat_s64 _utime;
134 compat_s64 _stime;
135 } _sigchld_x32;
136
119 /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */ 137 /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
120 struct { 138 struct {
121 unsigned int _addr; /* faulting insn/memory ref. */ 139 unsigned int _addr; /* faulting insn/memory ref. */
diff --git a/arch/x86/include/asm/idle.h b/arch/x86/include/asm/idle.h
index f49253d75710..c5d1785373ed 100644
--- a/arch/x86/include/asm/idle.h
+++ b/arch/x86/include/asm/idle.h
@@ -14,6 +14,7 @@ void exit_idle(void);
14#else /* !CONFIG_X86_64 */ 14#else /* !CONFIG_X86_64 */
15static inline void enter_idle(void) { } 15static inline void enter_idle(void) { }
16static inline void exit_idle(void) { } 16static inline void exit_idle(void) { }
17static inline void __exit_idle(void) { }
17#endif /* CONFIG_X86_64 */ 18#endif /* CONFIG_X86_64 */
18 19
19void amd_e400_remove_cpu(int cpu); 20void amd_e400_remove_cpu(int cpu);
diff --git a/arch/x86/include/asm/io_apic.h b/arch/x86/include/asm/io_apic.h
index 690d1cc9a877..2c4943de5150 100644
--- a/arch/x86/include/asm/io_apic.h
+++ b/arch/x86/include/asm/io_apic.h
@@ -21,6 +21,15 @@
21#define IO_APIC_REDIR_LEVEL_TRIGGER (1 << 15) 21#define IO_APIC_REDIR_LEVEL_TRIGGER (1 << 15)
22#define IO_APIC_REDIR_MASKED (1 << 16) 22#define IO_APIC_REDIR_MASKED (1 << 16)
23 23
24struct io_apic_ops {
25 void (*init) (void);
26 unsigned int (*read) (unsigned int apic, unsigned int reg);
27 void (*write) (unsigned int apic, unsigned int reg, unsigned int value);
28 void (*modify)(unsigned int apic, unsigned int reg, unsigned int value);
29};
30
31void __init set_io_apic_ops(const struct io_apic_ops *);
32
24/* 33/*
25 * The structure of the IO-APIC: 34 * The structure of the IO-APIC:
26 */ 35 */
diff --git a/arch/x86/include/asm/mtrr.h b/arch/x86/include/asm/mtrr.h
index 4365ffdb461f..7e3f17f92c66 100644
--- a/arch/x86/include/asm/mtrr.h
+++ b/arch/x86/include/asm/mtrr.h
@@ -29,18 +29,18 @@
29 29
30#define MTRR_IOCTL_BASE 'M' 30#define MTRR_IOCTL_BASE 'M'
31 31
32struct mtrr_sentry {
33 unsigned long base; /* Base address */
34 unsigned int size; /* Size of region */
35 unsigned int type; /* Type of region */
36};
37
38/* Warning: this structure has a different order from i386 32/* Warning: this structure has a different order from i386
39 on x86-64. The 32bit emulation code takes care of that. 33 on x86-64. The 32bit emulation code takes care of that.
40 But you need to use this for 64bit, otherwise your X server 34 But you need to use this for 64bit, otherwise your X server
41 will break. */ 35 will break. */
42 36
43#ifdef __i386__ 37#ifdef __i386__
38struct mtrr_sentry {
39 unsigned long base; /* Base address */
40 unsigned int size; /* Size of region */
41 unsigned int type; /* Type of region */
42};
43
44struct mtrr_gentry { 44struct mtrr_gentry {
45 unsigned int regnum; /* Register number */ 45 unsigned int regnum; /* Register number */
46 unsigned long base; /* Base address */ 46 unsigned long base; /* Base address */
@@ -50,12 +50,20 @@ struct mtrr_gentry {
50 50
51#else /* __i386__ */ 51#else /* __i386__ */
52 52
53struct mtrr_sentry {
54 __u64 base; /* Base address */
55 __u32 size; /* Size of region */
56 __u32 type; /* Type of region */
57};
58
53struct mtrr_gentry { 59struct mtrr_gentry {
54 unsigned long base; /* Base address */ 60 __u64 base; /* Base address */
55 unsigned int size; /* Size of region */ 61 __u32 size; /* Size of region */
56 unsigned int regnum; /* Register number */ 62 __u32 regnum; /* Register number */
57 unsigned int type; /* Type of region */ 63 __u32 type; /* Type of region */
64 __u32 _pad; /* Unused */
58}; 65};
66
59#endif /* !__i386__ */ 67#endif /* !__i386__ */
60 68
61struct mtrr_var_range { 69struct mtrr_var_range {
diff --git a/arch/x86/include/asm/posix_types.h b/arch/x86/include/asm/posix_types.h
index bb7133dc155d..3427b7798dbc 100644
--- a/arch/x86/include/asm/posix_types.h
+++ b/arch/x86/include/asm/posix_types.h
@@ -7,7 +7,9 @@
7#else 7#else
8# ifdef __i386__ 8# ifdef __i386__
9# include "posix_types_32.h" 9# include "posix_types_32.h"
10# else 10# elif defined(__LP64__)
11# include "posix_types_64.h" 11# include "posix_types_64.h"
12# else
13# include "posix_types_x32.h"
12# endif 14# endif
13#endif 15#endif
diff --git a/arch/x86/include/asm/posix_types_32.h b/arch/x86/include/asm/posix_types_32.h
index f7d9adf82e53..99f262e04b91 100644
--- a/arch/x86/include/asm/posix_types_32.h
+++ b/arch/x86/include/asm/posix_types_32.h
@@ -7,79 +7,22 @@
7 * assume GCC is being used. 7 * assume GCC is being used.
8 */ 8 */
9 9
10typedef unsigned long __kernel_ino_t;
11typedef unsigned short __kernel_mode_t; 10typedef unsigned short __kernel_mode_t;
11#define __kernel_mode_t __kernel_mode_t
12
12typedef unsigned short __kernel_nlink_t; 13typedef unsigned short __kernel_nlink_t;
13typedef long __kernel_off_t; 14#define __kernel_nlink_t __kernel_nlink_t
14typedef int __kernel_pid_t; 15
15typedef unsigned short __kernel_ipc_pid_t; 16typedef unsigned short __kernel_ipc_pid_t;
17#define __kernel_ipc_pid_t __kernel_ipc_pid_t
18
16typedef unsigned short __kernel_uid_t; 19typedef unsigned short __kernel_uid_t;
17typedef unsigned short __kernel_gid_t; 20typedef unsigned short __kernel_gid_t;
18typedef unsigned int __kernel_size_t; 21#define __kernel_uid_t __kernel_uid_t
19typedef int __kernel_ssize_t;
20typedef int __kernel_ptrdiff_t;
21typedef long __kernel_time_t;
22typedef long __kernel_suseconds_t;
23typedef long __kernel_clock_t;
24typedef int __kernel_timer_t;
25typedef int __kernel_clockid_t;
26typedef int __kernel_daddr_t;
27typedef char * __kernel_caddr_t;
28typedef unsigned short __kernel_uid16_t;
29typedef unsigned short __kernel_gid16_t;
30typedef unsigned int __kernel_uid32_t;
31typedef unsigned int __kernel_gid32_t;
32 22
33typedef unsigned short __kernel_old_uid_t;
34typedef unsigned short __kernel_old_gid_t;
35typedef unsigned short __kernel_old_dev_t; 23typedef unsigned short __kernel_old_dev_t;
24#define __kernel_old_dev_t __kernel_old_dev_t
36 25
37#ifdef __GNUC__ 26#include <asm-generic/posix_types.h>
38typedef long long __kernel_loff_t;
39#endif
40
41typedef struct {
42 int val[2];
43} __kernel_fsid_t;
44
45#if defined(__KERNEL__)
46
47#undef __FD_SET
48#define __FD_SET(fd,fdsetp) \
49 asm volatile("btsl %1,%0": \
50 "+m" (*(__kernel_fd_set *)(fdsetp)) \
51 : "r" ((int)(fd)))
52
53#undef __FD_CLR
54#define __FD_CLR(fd,fdsetp) \
55 asm volatile("btrl %1,%0": \
56 "+m" (*(__kernel_fd_set *)(fdsetp)) \
57 : "r" ((int) (fd)))
58
59#undef __FD_ISSET
60#define __FD_ISSET(fd,fdsetp) \
61 (__extension__ \
62 ({ \
63 unsigned char __result; \
64 asm volatile("btl %1,%2 ; setb %0" \
65 : "=q" (__result) \
66 : "r" ((int)(fd)), \
67 "m" (*(__kernel_fd_set *)(fdsetp))); \
68 __result; \
69}))
70
71#undef __FD_ZERO
72#define __FD_ZERO(fdsetp) \
73do { \
74 int __d0, __d1; \
75 asm volatile("cld ; rep ; stosl" \
76 : "=m" (*(__kernel_fd_set *)(fdsetp)), \
77 "=&c" (__d0), "=&D" (__d1) \
78 : "a" (0), "1" (__FDSET_LONGS), \
79 "2" ((__kernel_fd_set *)(fdsetp)) \
80 : "memory"); \
81} while (0)
82
83#endif /* defined(__KERNEL__) */
84 27
85#endif /* _ASM_X86_POSIX_TYPES_32_H */ 28#endif /* _ASM_X86_POSIX_TYPES_32_H */
diff --git a/arch/x86/include/asm/posix_types_64.h b/arch/x86/include/asm/posix_types_64.h
index eb8d2d92b63e..cba0c1ead162 100644
--- a/arch/x86/include/asm/posix_types_64.h
+++ b/arch/x86/include/asm/posix_types_64.h
@@ -7,113 +7,13 @@
7 * assume GCC is being used. 7 * assume GCC is being used.
8 */ 8 */
9 9
10typedef unsigned long __kernel_ino_t;
11typedef unsigned int __kernel_mode_t;
12typedef unsigned long __kernel_nlink_t;
13typedef long __kernel_off_t;
14typedef int __kernel_pid_t;
15typedef int __kernel_ipc_pid_t;
16typedef unsigned int __kernel_uid_t;
17typedef unsigned int __kernel_gid_t;
18typedef unsigned long __kernel_size_t;
19typedef long __kernel_ssize_t;
20typedef long __kernel_ptrdiff_t;
21typedef long __kernel_time_t;
22typedef long __kernel_suseconds_t;
23typedef long __kernel_clock_t;
24typedef int __kernel_timer_t;
25typedef int __kernel_clockid_t;
26typedef int __kernel_daddr_t;
27typedef char * __kernel_caddr_t;
28typedef unsigned short __kernel_uid16_t;
29typedef unsigned short __kernel_gid16_t;
30
31#ifdef __GNUC__
32typedef long long __kernel_loff_t;
33#endif
34
35typedef struct {
36 int val[2];
37} __kernel_fsid_t;
38
39typedef unsigned short __kernel_old_uid_t; 10typedef unsigned short __kernel_old_uid_t;
40typedef unsigned short __kernel_old_gid_t; 11typedef unsigned short __kernel_old_gid_t;
41typedef __kernel_uid_t __kernel_uid32_t; 12#define __kernel_old_uid_t __kernel_old_uid_t
42typedef __kernel_gid_t __kernel_gid32_t;
43 13
44typedef unsigned long __kernel_old_dev_t; 14typedef unsigned long __kernel_old_dev_t;
15#define __kernel_old_dev_t __kernel_old_dev_t
45 16
46#ifdef __KERNEL__ 17#include <asm-generic/posix_types.h>
47
48#undef __FD_SET
49static inline void __FD_SET(unsigned long fd, __kernel_fd_set *fdsetp)
50{
51 unsigned long _tmp = fd / __NFDBITS;
52 unsigned long _rem = fd % __NFDBITS;
53 fdsetp->fds_bits[_tmp] |= (1UL<<_rem);
54}
55
56#undef __FD_CLR
57static inline void __FD_CLR(unsigned long fd, __kernel_fd_set *fdsetp)
58{
59 unsigned long _tmp = fd / __NFDBITS;
60 unsigned long _rem = fd % __NFDBITS;
61 fdsetp->fds_bits[_tmp] &= ~(1UL<<_rem);
62}
63
64#undef __FD_ISSET
65static inline int __FD_ISSET(unsigned long fd, __const__ __kernel_fd_set *p)
66{
67 unsigned long _tmp = fd / __NFDBITS;
68 unsigned long _rem = fd % __NFDBITS;
69 return (p->fds_bits[_tmp] & (1UL<<_rem)) != 0;
70}
71
72/*
73 * This will unroll the loop for the normal constant cases (8 or 32 longs,
74 * for 256 and 1024-bit fd_sets respectively)
75 */
76#undef __FD_ZERO
77static inline void __FD_ZERO(__kernel_fd_set *p)
78{
79 unsigned long *tmp = p->fds_bits;
80 int i;
81
82 if (__builtin_constant_p(__FDSET_LONGS)) {
83 switch (__FDSET_LONGS) {
84 case 32:
85 tmp[ 0] = 0; tmp[ 1] = 0; tmp[ 2] = 0; tmp[ 3] = 0;
86 tmp[ 4] = 0; tmp[ 5] = 0; tmp[ 6] = 0; tmp[ 7] = 0;
87 tmp[ 8] = 0; tmp[ 9] = 0; tmp[10] = 0; tmp[11] = 0;
88 tmp[12] = 0; tmp[13] = 0; tmp[14] = 0; tmp[15] = 0;
89 tmp[16] = 0; tmp[17] = 0; tmp[18] = 0; tmp[19] = 0;
90 tmp[20] = 0; tmp[21] = 0; tmp[22] = 0; tmp[23] = 0;
91 tmp[24] = 0; tmp[25] = 0; tmp[26] = 0; tmp[27] = 0;
92 tmp[28] = 0; tmp[29] = 0; tmp[30] = 0; tmp[31] = 0;
93 return;
94 case 16:
95 tmp[ 0] = 0; tmp[ 1] = 0; tmp[ 2] = 0; tmp[ 3] = 0;
96 tmp[ 4] = 0; tmp[ 5] = 0; tmp[ 6] = 0; tmp[ 7] = 0;
97 tmp[ 8] = 0; tmp[ 9] = 0; tmp[10] = 0; tmp[11] = 0;
98 tmp[12] = 0; tmp[13] = 0; tmp[14] = 0; tmp[15] = 0;
99 return;
100 case 8:
101 tmp[ 0] = 0; tmp[ 1] = 0; tmp[ 2] = 0; tmp[ 3] = 0;
102 tmp[ 4] = 0; tmp[ 5] = 0; tmp[ 6] = 0; tmp[ 7] = 0;
103 return;
104 case 4:
105 tmp[ 0] = 0; tmp[ 1] = 0; tmp[ 2] = 0; tmp[ 3] = 0;
106 return;
107 }
108 }
109 i = __FDSET_LONGS;
110 while (i) {
111 i--;
112 *tmp = 0;
113 tmp++;
114 }
115}
116
117#endif /* defined(__KERNEL__) */
118 18
119#endif /* _ASM_X86_POSIX_TYPES_64_H */ 19#endif /* _ASM_X86_POSIX_TYPES_64_H */
diff --git a/arch/x86/include/asm/posix_types_x32.h b/arch/x86/include/asm/posix_types_x32.h
new file mode 100644
index 000000000000..85f9bdafa93c
--- /dev/null
+++ b/arch/x86/include/asm/posix_types_x32.h
@@ -0,0 +1,19 @@
1#ifndef _ASM_X86_POSIX_TYPES_X32_H
2#define _ASM_X86_POSIX_TYPES_X32_H
3
4/*
5 * This file is only used by user-level software, so you need to
6 * be a little careful about namespace pollution etc. Also, we cannot
7 * assume GCC is being used.
8 *
9 * These types should generally match the ones used by the 64-bit kernel,
10 *
11 */
12
13typedef long long __kernel_long_t;
14typedef unsigned long long __kernel_ulong_t;
15#define __kernel_long_t __kernel_long_t
16
17#include <asm/posix_types_64.h>
18
19#endif /* _ASM_X86_POSIX_TYPES_X32_H */
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index a19542c1685e..7284c9a6a0b5 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -463,7 +463,7 @@ struct thread_struct {
463 unsigned long ptrace_dr7; 463 unsigned long ptrace_dr7;
464 /* Fault info: */ 464 /* Fault info: */
465 unsigned long cr2; 465 unsigned long cr2;
466 unsigned long trap_no; 466 unsigned long trap_nr;
467 unsigned long error_code; 467 unsigned long error_code;
468 /* floating point and extended processor state */ 468 /* floating point and extended processor state */
469 struct fpu fpu; 469 struct fpu fpu;
@@ -873,9 +873,9 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
873#define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \ 873#define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \
874 0xc0000000 : 0xFFFFe000) 874 0xc0000000 : 0xFFFFe000)
875 875
876#define TASK_SIZE (test_thread_flag(TIF_IA32) ? \ 876#define TASK_SIZE (test_thread_flag(TIF_ADDR32) ? \
877 IA32_PAGE_OFFSET : TASK_SIZE_MAX) 877 IA32_PAGE_OFFSET : TASK_SIZE_MAX)
878#define TASK_SIZE_OF(child) ((test_tsk_thread_flag(child, TIF_IA32)) ? \ 878#define TASK_SIZE_OF(child) ((test_tsk_thread_flag(child, TIF_ADDR32)) ? \
879 IA32_PAGE_OFFSET : TASK_SIZE_MAX) 879 IA32_PAGE_OFFSET : TASK_SIZE_MAX)
880 880
881#define STACK_TOP TASK_SIZE 881#define STACK_TOP TASK_SIZE
@@ -897,6 +897,12 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
897 897
898#define task_pt_regs(tsk) ((struct pt_regs *)(tsk)->thread.sp0 - 1) 898#define task_pt_regs(tsk) ((struct pt_regs *)(tsk)->thread.sp0 - 1)
899extern unsigned long KSTK_ESP(struct task_struct *task); 899extern unsigned long KSTK_ESP(struct task_struct *task);
900
901/*
902 * User space RSP while inside the SYSCALL fast path
903 */
904DECLARE_PER_CPU(unsigned long, old_rsp);
905
900#endif /* CONFIG_X86_64 */ 906#endif /* CONFIG_X86_64 */
901 907
902extern void start_thread(struct pt_regs *regs, unsigned long new_ip, 908extern void start_thread(struct pt_regs *regs, unsigned long new_ip,
diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h
index 35664547125b..dcfde52979c3 100644
--- a/arch/x86/include/asm/ptrace.h
+++ b/arch/x86/include/asm/ptrace.h
@@ -145,7 +145,6 @@ extern unsigned long
145convert_ip_to_linear(struct task_struct *child, struct pt_regs *regs); 145convert_ip_to_linear(struct task_struct *child, struct pt_regs *regs);
146extern void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs, 146extern void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs,
147 int error_code, int si_code); 147 int error_code, int si_code);
148void signal_fault(struct pt_regs *regs, void __user *frame, char *where);
149 148
150extern long syscall_trace_enter(struct pt_regs *); 149extern long syscall_trace_enter(struct pt_regs *);
151extern void syscall_trace_leave(struct pt_regs *); 150extern void syscall_trace_leave(struct pt_regs *);
diff --git a/arch/x86/include/asm/sigcontext.h b/arch/x86/include/asm/sigcontext.h
index 04459d25e66e..4a085383af27 100644
--- a/arch/x86/include/asm/sigcontext.h
+++ b/arch/x86/include/asm/sigcontext.h
@@ -230,34 +230,37 @@ struct sigcontext {
230 * User-space might still rely on the old definition: 230 * User-space might still rely on the old definition:
231 */ 231 */
232struct sigcontext { 232struct sigcontext {
233 unsigned long r8; 233 __u64 r8;
234 unsigned long r9; 234 __u64 r9;
235 unsigned long r10; 235 __u64 r10;
236 unsigned long r11; 236 __u64 r11;
237 unsigned long r12; 237 __u64 r12;
238 unsigned long r13; 238 __u64 r13;
239 unsigned long r14; 239 __u64 r14;
240 unsigned long r15; 240 __u64 r15;
241 unsigned long rdi; 241 __u64 rdi;
242 unsigned long rsi; 242 __u64 rsi;
243 unsigned long rbp; 243 __u64 rbp;
244 unsigned long rbx; 244 __u64 rbx;
245 unsigned long rdx; 245 __u64 rdx;
246 unsigned long rax; 246 __u64 rax;
247 unsigned long rcx; 247 __u64 rcx;
248 unsigned long rsp; 248 __u64 rsp;
249 unsigned long rip; 249 __u64 rip;
250 unsigned long eflags; /* RFLAGS */ 250 __u64 eflags; /* RFLAGS */
251 unsigned short cs; 251 __u16 cs;
252 unsigned short gs; 252 __u16 gs;
253 unsigned short fs; 253 __u16 fs;
254 unsigned short __pad0; 254 __u16 __pad0;
255 unsigned long err; 255 __u64 err;
256 unsigned long trapno; 256 __u64 trapno;
257 unsigned long oldmask; 257 __u64 oldmask;
258 unsigned long cr2; 258 __u64 cr2;
259 struct _fpstate __user *fpstate; /* zero when no FPU context */ 259 struct _fpstate __user *fpstate; /* zero when no FPU context */
260 unsigned long reserved1[8]; 260#ifndef __LP64__
261 __u32 __fpstate_pad;
262#endif
263 __u64 reserved1[8];
261}; 264};
262#endif /* !__KERNEL__ */ 265#endif /* !__KERNEL__ */
263 266
diff --git a/arch/x86/include/asm/sigframe.h b/arch/x86/include/asm/sigframe.h
index 4e0fe26d27d3..7c7c27c97daa 100644
--- a/arch/x86/include/asm/sigframe.h
+++ b/arch/x86/include/asm/sigframe.h
@@ -59,12 +59,25 @@ struct rt_sigframe_ia32 {
59#endif /* defined(CONFIG_X86_32) || defined(CONFIG_IA32_EMULATION) */ 59#endif /* defined(CONFIG_X86_32) || defined(CONFIG_IA32_EMULATION) */
60 60
61#ifdef CONFIG_X86_64 61#ifdef CONFIG_X86_64
62
62struct rt_sigframe { 63struct rt_sigframe {
63 char __user *pretcode; 64 char __user *pretcode;
64 struct ucontext uc; 65 struct ucontext uc;
65 struct siginfo info; 66 struct siginfo info;
66 /* fp state follows here */ 67 /* fp state follows here */
67}; 68};
69
70#ifdef CONFIG_X86_X32_ABI
71
72struct rt_sigframe_x32 {
73 u64 pretcode;
74 struct ucontext_x32 uc;
75 compat_siginfo_t info;
76 /* fp state follows here */
77};
78
79#endif /* CONFIG_X86_X32_ABI */
80
68#endif /* CONFIG_X86_64 */ 81#endif /* CONFIG_X86_64 */
69 82
70#endif /* _ASM_X86_SIGFRAME_H */ 83#endif /* _ASM_X86_SIGFRAME_H */
diff --git a/arch/x86/include/asm/sighandling.h b/arch/x86/include/asm/sighandling.h
new file mode 100644
index 000000000000..ada93b3b8c66
--- /dev/null
+++ b/arch/x86/include/asm/sighandling.h
@@ -0,0 +1,24 @@
1#ifndef _ASM_X86_SIGHANDLING_H
2#define _ASM_X86_SIGHANDLING_H
3
4#include <linux/compiler.h>
5#include <linux/ptrace.h>
6#include <linux/signal.h>
7
8#include <asm/processor-flags.h>
9
10#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
11
12#define __FIX_EFLAGS (X86_EFLAGS_AC | X86_EFLAGS_OF | \
13 X86_EFLAGS_DF | X86_EFLAGS_TF | X86_EFLAGS_SF | \
14 X86_EFLAGS_ZF | X86_EFLAGS_AF | X86_EFLAGS_PF | \
15 X86_EFLAGS_CF)
16
17void signal_fault(struct pt_regs *regs, void __user *frame, char *where);
18
19int restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc,
20 unsigned long *pax);
21int setup_sigcontext(struct sigcontext __user *sc, void __user *fpstate,
22 struct pt_regs *regs, unsigned long mask);
23
24#endif /* _ASM_X86_SIGHANDLING_H */
diff --git a/arch/x86/include/asm/sys_ia32.h b/arch/x86/include/asm/sys_ia32.h
index cb238526a9f1..3fda9db48819 100644
--- a/arch/x86/include/asm/sys_ia32.h
+++ b/arch/x86/include/asm/sys_ia32.h
@@ -10,6 +10,8 @@
10#ifndef _ASM_X86_SYS_IA32_H 10#ifndef _ASM_X86_SYS_IA32_H
11#define _ASM_X86_SYS_IA32_H 11#define _ASM_X86_SYS_IA32_H
12 12
13#ifdef CONFIG_COMPAT
14
13#include <linux/compiler.h> 15#include <linux/compiler.h>
14#include <linux/linkage.h> 16#include <linux/linkage.h>
15#include <linux/types.h> 17#include <linux/types.h>
@@ -36,8 +38,6 @@ asmlinkage long sys32_rt_sigaction(int, struct sigaction32 __user *,
36 struct sigaction32 __user *, unsigned int); 38 struct sigaction32 __user *, unsigned int);
37asmlinkage long sys32_sigaction(int, struct old_sigaction32 __user *, 39asmlinkage long sys32_sigaction(int, struct old_sigaction32 __user *,
38 struct old_sigaction32 __user *); 40 struct old_sigaction32 __user *);
39asmlinkage long sys32_rt_sigprocmask(int, compat_sigset_t __user *,
40 compat_sigset_t __user *, unsigned int);
41asmlinkage long sys32_alarm(unsigned int); 41asmlinkage long sys32_alarm(unsigned int);
42 42
43asmlinkage long sys32_waitpid(compat_pid_t, unsigned int *, int); 43asmlinkage long sys32_waitpid(compat_pid_t, unsigned int *, int);
@@ -83,4 +83,7 @@ asmlinkage long sys32_ipc(u32, int, int, int, compat_uptr_t, u32);
83 83
84asmlinkage long sys32_fanotify_mark(int, unsigned int, u32, u32, int, 84asmlinkage long sys32_fanotify_mark(int, unsigned int, u32, u32, int,
85 const char __user *); 85 const char __user *);
86
87#endif /* CONFIG_COMPAT */
88
86#endif /* _ASM_X86_SYS_IA32_H */ 89#endif /* _ASM_X86_SYS_IA32_H */
diff --git a/arch/x86/include/asm/syscall.h b/arch/x86/include/asm/syscall.h
index d962e5652a73..386b78686c4d 100644
--- a/arch/x86/include/asm/syscall.h
+++ b/arch/x86/include/asm/syscall.h
@@ -16,6 +16,7 @@
16#include <linux/sched.h> 16#include <linux/sched.h>
17#include <linux/err.h> 17#include <linux/err.h>
18#include <asm/asm-offsets.h> /* For NR_syscalls */ 18#include <asm/asm-offsets.h> /* For NR_syscalls */
19#include <asm/unistd.h>
19 20
20extern const unsigned long sys_call_table[]; 21extern const unsigned long sys_call_table[];
21 22
@@ -26,13 +27,13 @@ extern const unsigned long sys_call_table[];
26 */ 27 */
27static inline int syscall_get_nr(struct task_struct *task, struct pt_regs *regs) 28static inline int syscall_get_nr(struct task_struct *task, struct pt_regs *regs)
28{ 29{
29 return regs->orig_ax; 30 return regs->orig_ax & __SYSCALL_MASK;
30} 31}
31 32
32static inline void syscall_rollback(struct task_struct *task, 33static inline void syscall_rollback(struct task_struct *task,
33 struct pt_regs *regs) 34 struct pt_regs *regs)
34{ 35{
35 regs->ax = regs->orig_ax; 36 regs->ax = regs->orig_ax & __SYSCALL_MASK;
36} 37}
37 38
38static inline long syscall_get_error(struct task_struct *task, 39static inline long syscall_get_error(struct task_struct *task,
diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
index cfd8144d5527..ad6df8ccd715 100644
--- a/arch/x86/include/asm/thread_info.h
+++ b/arch/x86/include/asm/thread_info.h
@@ -86,7 +86,7 @@ struct thread_info {
86#define TIF_MCE_NOTIFY 10 /* notify userspace of an MCE */ 86#define TIF_MCE_NOTIFY 10 /* notify userspace of an MCE */
87#define TIF_USER_RETURN_NOTIFY 11 /* notify kernel of userspace return */ 87#define TIF_USER_RETURN_NOTIFY 11 /* notify kernel of userspace return */
88#define TIF_NOTSC 16 /* TSC is not accessible in userland */ 88#define TIF_NOTSC 16 /* TSC is not accessible in userland */
89#define TIF_IA32 17 /* 32bit process */ 89#define TIF_IA32 17 /* IA32 compatibility process */
90#define TIF_FORK 18 /* ret_from_fork */ 90#define TIF_FORK 18 /* ret_from_fork */
91#define TIF_MEMDIE 20 /* is terminating due to OOM killer */ 91#define TIF_MEMDIE 20 /* is terminating due to OOM killer */
92#define TIF_DEBUG 21 /* uses debug registers */ 92#define TIF_DEBUG 21 /* uses debug registers */
@@ -95,6 +95,8 @@ struct thread_info {
95#define TIF_BLOCKSTEP 25 /* set when we want DEBUGCTLMSR_BTF */ 95#define TIF_BLOCKSTEP 25 /* set when we want DEBUGCTLMSR_BTF */
96#define TIF_LAZY_MMU_UPDATES 27 /* task is updating the mmu lazily */ 96#define TIF_LAZY_MMU_UPDATES 27 /* task is updating the mmu lazily */
97#define TIF_SYSCALL_TRACEPOINT 28 /* syscall tracepoint instrumentation */ 97#define TIF_SYSCALL_TRACEPOINT 28 /* syscall tracepoint instrumentation */
98#define TIF_ADDR32 29 /* 32-bit address space on 64 bits */
99#define TIF_X32 30 /* 32-bit native x86-64 binary */
98 100
99#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) 101#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
100#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME) 102#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
@@ -116,6 +118,8 @@ struct thread_info {
116#define _TIF_BLOCKSTEP (1 << TIF_BLOCKSTEP) 118#define _TIF_BLOCKSTEP (1 << TIF_BLOCKSTEP)
117#define _TIF_LAZY_MMU_UPDATES (1 << TIF_LAZY_MMU_UPDATES) 119#define _TIF_LAZY_MMU_UPDATES (1 << TIF_LAZY_MMU_UPDATES)
118#define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT) 120#define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
121#define _TIF_ADDR32 (1 << TIF_ADDR32)
122#define _TIF_X32 (1 << TIF_X32)
119 123
120/* work to do in syscall_trace_enter() */ 124/* work to do in syscall_trace_enter() */
121#define _TIF_WORK_SYSCALL_ENTRY \ 125#define _TIF_WORK_SYSCALL_ENTRY \
@@ -262,6 +266,18 @@ static inline void set_restore_sigmask(void)
262 ti->status |= TS_RESTORE_SIGMASK; 266 ti->status |= TS_RESTORE_SIGMASK;
263 set_bit(TIF_SIGPENDING, (unsigned long *)&ti->flags); 267 set_bit(TIF_SIGPENDING, (unsigned long *)&ti->flags);
264} 268}
269
270static inline bool is_ia32_task(void)
271{
272#ifdef CONFIG_X86_32
273 return true;
274#endif
275#ifdef CONFIG_IA32_EMULATION
276 if (current_thread_info()->status & TS_COMPAT)
277 return true;
278#endif
279 return false;
280}
265#endif /* !__ASSEMBLY__ */ 281#endif /* !__ASSEMBLY__ */
266 282
267#ifndef __ASSEMBLY__ 283#ifndef __ASSEMBLY__
diff --git a/arch/x86/include/asm/traps.h b/arch/x86/include/asm/traps.h
index 0012d0902c5f..88eae2aec619 100644
--- a/arch/x86/include/asm/traps.h
+++ b/arch/x86/include/asm/traps.h
@@ -89,4 +89,29 @@ asmlinkage void smp_thermal_interrupt(void);
89asmlinkage void mce_threshold_interrupt(void); 89asmlinkage void mce_threshold_interrupt(void);
90#endif 90#endif
91 91
92/* Interrupts/Exceptions */
93enum {
94 X86_TRAP_DE = 0, /* 0, Divide-by-zero */
95 X86_TRAP_DB, /* 1, Debug */
96 X86_TRAP_NMI, /* 2, Non-maskable Interrupt */
97 X86_TRAP_BP, /* 3, Breakpoint */
98 X86_TRAP_OF, /* 4, Overflow */
99 X86_TRAP_BR, /* 5, Bound Range Exceeded */
100 X86_TRAP_UD, /* 6, Invalid Opcode */
101 X86_TRAP_NM, /* 7, Device Not Available */
102 X86_TRAP_DF, /* 8, Double Fault */
103 X86_TRAP_OLD_MF, /* 9, Coprocessor Segment Overrun */
104 X86_TRAP_TS, /* 10, Invalid TSS */
105 X86_TRAP_NP, /* 11, Segment Not Present */
106 X86_TRAP_SS, /* 12, Stack Segment Fault */
107 X86_TRAP_GP, /* 13, General Protection Fault */
108 X86_TRAP_PF, /* 14, Page Fault */
109 X86_TRAP_SPURIOUS, /* 15, Spurious Interrupt */
110 X86_TRAP_MF, /* 16, x87 Floating-Point Exception */
111 X86_TRAP_AC, /* 17, Alignment Check */
112 X86_TRAP_MC, /* 18, Machine Check */
113 X86_TRAP_XF, /* 19, SIMD Floating-Point Exception */
114 X86_TRAP_IRET = 32, /* 32, IRET Exception */
115};
116
92#endif /* _ASM_X86_TRAPS_H */ 117#endif /* _ASM_X86_TRAPS_H */
diff --git a/arch/x86/include/asm/unistd.h b/arch/x86/include/asm/unistd.h
index 21f77b89e47a..37cdc9d99bb1 100644
--- a/arch/x86/include/asm/unistd.h
+++ b/arch/x86/include/asm/unistd.h
@@ -1,7 +1,17 @@
1#ifndef _ASM_X86_UNISTD_H 1#ifndef _ASM_X86_UNISTD_H
2#define _ASM_X86_UNISTD_H 1 2#define _ASM_X86_UNISTD_H 1
3 3
4/* x32 syscall flag bit */
5#define __X32_SYSCALL_BIT 0x40000000
6
4#ifdef __KERNEL__ 7#ifdef __KERNEL__
8
9# ifdef CONFIG_X86_X32_ABI
10# define __SYSCALL_MASK (~(__X32_SYSCALL_BIT))
11# else
12# define __SYSCALL_MASK (~0)
13# endif
14
5# ifdef CONFIG_X86_32 15# ifdef CONFIG_X86_32
6 16
7# include <asm/unistd_32.h> 17# include <asm/unistd_32.h>
@@ -14,6 +24,7 @@
14# else 24# else
15 25
16# include <asm/unistd_64.h> 26# include <asm/unistd_64.h>
27# include <asm/unistd_64_x32.h>
17# define __ARCH_WANT_COMPAT_SYS_TIME 28# define __ARCH_WANT_COMPAT_SYS_TIME
18 29
19# endif 30# endif
@@ -52,8 +63,10 @@
52#else 63#else
53# ifdef __i386__ 64# ifdef __i386__
54# include <asm/unistd_32.h> 65# include <asm/unistd_32.h>
55# else 66# elif defined(__LP64__)
56# include <asm/unistd_64.h> 67# include <asm/unistd_64.h>
68# else
69# include <asm/unistd_x32.h>
57# endif 70# endif
58#endif 71#endif
59 72
diff --git a/arch/x86/include/asm/vgtod.h b/arch/x86/include/asm/vgtod.h
index 815285bcaceb..8b38be2de9e1 100644
--- a/arch/x86/include/asm/vgtod.h
+++ b/arch/x86/include/asm/vgtod.h
@@ -5,13 +5,8 @@
5#include <linux/clocksource.h> 5#include <linux/clocksource.h>
6 6
7struct vsyscall_gtod_data { 7struct vsyscall_gtod_data {
8 seqlock_t lock; 8 seqcount_t seq;
9 9
10 /* open coded 'struct timespec' */
11 time_t wall_time_sec;
12 u32 wall_time_nsec;
13
14 struct timezone sys_tz;
15 struct { /* extract of a clocksource struct */ 10 struct { /* extract of a clocksource struct */
16 int vclock_mode; 11 int vclock_mode;
17 cycle_t cycle_last; 12 cycle_t cycle_last;
@@ -19,8 +14,16 @@ struct vsyscall_gtod_data {
19 u32 mult; 14 u32 mult;
20 u32 shift; 15 u32 shift;
21 } clock; 16 } clock;
22 struct timespec wall_to_monotonic; 17
18 /* open coded 'struct timespec' */
19 time_t wall_time_sec;
20 u32 wall_time_nsec;
21 u32 monotonic_time_nsec;
22 time_t monotonic_time_sec;
23
24 struct timezone sys_tz;
23 struct timespec wall_time_coarse; 25 struct timespec wall_time_coarse;
26 struct timespec monotonic_time_coarse;
24}; 27};
25extern struct vsyscall_gtod_data vsyscall_gtod_data; 28extern struct vsyscall_gtod_data vsyscall_gtod_data;
26 29
diff --git a/arch/x86/include/asm/x2apic.h b/arch/x86/include/asm/x2apic.h
index 6bf5b8e478c0..92e54abf89e0 100644
--- a/arch/x86/include/asm/x2apic.h
+++ b/arch/x86/include/asm/x2apic.h
@@ -18,6 +18,11 @@ static const struct cpumask *x2apic_target_cpus(void)
18 return cpu_online_mask; 18 return cpu_online_mask;
19} 19}
20 20
21static int x2apic_apic_id_valid(int apicid)
22{
23 return 1;
24}
25
21static int x2apic_apic_id_registered(void) 26static int x2apic_apic_id_registered(void)
22{ 27{
23 return 1; 28 return 1;
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
index 406ed77216d0..a415b1f44365 100644
--- a/arch/x86/kernel/acpi/boot.c
+++ b/arch/x86/kernel/acpi/boot.c
@@ -239,7 +239,7 @@ acpi_parse_x2apic(struct acpi_subtable_header *header, const unsigned long end)
239 * to not preallocating memory for all NR_CPUS 239 * to not preallocating memory for all NR_CPUS
240 * when we use CPU hotplug. 240 * when we use CPU hotplug.
241 */ 241 */
242 if (!cpu_has_x2apic && (apic_id >= 0xff) && enabled) 242 if (!apic->apic_id_valid(apic_id) && enabled)
243 printk(KERN_WARNING PREFIX "x2apic entry ignored\n"); 243 printk(KERN_WARNING PREFIX "x2apic entry ignored\n");
244 else 244 else
245 acpi_register_lapic(apic_id, enabled); 245 acpi_register_lapic(apic_id, enabled);
@@ -642,6 +642,7 @@ static int __cpuinit _acpi_map_lsapic(acpi_handle handle, int *pcpu)
642 kfree(buffer.pointer); 642 kfree(buffer.pointer);
643 buffer.length = ACPI_ALLOCATE_BUFFER; 643 buffer.length = ACPI_ALLOCATE_BUFFER;
644 buffer.pointer = NULL; 644 buffer.pointer = NULL;
645 lapic = NULL;
645 646
646 if (!alloc_cpumask_var(&tmp_map, GFP_KERNEL)) 647 if (!alloc_cpumask_var(&tmp_map, GFP_KERNEL))
647 goto out; 648 goto out;
@@ -650,7 +651,7 @@ static int __cpuinit _acpi_map_lsapic(acpi_handle handle, int *pcpu)
650 goto free_tmp_map; 651 goto free_tmp_map;
651 652
652 cpumask_copy(tmp_map, cpu_present_mask); 653 cpumask_copy(tmp_map, cpu_present_mask);
653 acpi_register_lapic(physid, lapic->lapic_flags & ACPI_MADT_ENABLED); 654 acpi_register_lapic(physid, ACPI_MADT_ENABLED);
654 655
655 /* 656 /*
656 * If mp_register_lapic successfully generates a new logical cpu 657 * If mp_register_lapic successfully generates a new logical cpu
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
index 2eec05b6d1b8..11544d8f1e97 100644
--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -383,20 +383,25 @@ static inline int eilvt_entry_is_changeable(unsigned int old, unsigned int new)
383 383
384static unsigned int reserve_eilvt_offset(int offset, unsigned int new) 384static unsigned int reserve_eilvt_offset(int offset, unsigned int new)
385{ 385{
386 unsigned int rsvd; /* 0: uninitialized */ 386 unsigned int rsvd, vector;
387 387
388 if (offset >= APIC_EILVT_NR_MAX) 388 if (offset >= APIC_EILVT_NR_MAX)
389 return ~0; 389 return ~0;
390 390
391 rsvd = atomic_read(&eilvt_offsets[offset]) & ~APIC_EILVT_MASKED; 391 rsvd = atomic_read(&eilvt_offsets[offset]);
392 do { 392 do {
393 if (rsvd && 393 vector = rsvd & ~APIC_EILVT_MASKED; /* 0: unassigned */
394 !eilvt_entry_is_changeable(rsvd, new)) 394 if (vector && !eilvt_entry_is_changeable(vector, new))
395 /* may not change if vectors are different */ 395 /* may not change if vectors are different */
396 return rsvd; 396 return rsvd;
397 rsvd = atomic_cmpxchg(&eilvt_offsets[offset], rsvd, new); 397 rsvd = atomic_cmpxchg(&eilvt_offsets[offset], rsvd, new);
398 } while (rsvd != new); 398 } while (rsvd != new);
399 399
400 rsvd &= ~APIC_EILVT_MASKED;
401 if (rsvd && rsvd != vector)
402 pr_info("LVT offset %d assigned for vector 0x%02x\n",
403 offset, rsvd);
404
400 return new; 405 return new;
401} 406}
402 407
diff --git a/arch/x86/kernel/apic/apic_numachip.c b/arch/x86/kernel/apic/apic_numachip.c
index d9ea5f331ac5..899803e03214 100644
--- a/arch/x86/kernel/apic/apic_numachip.c
+++ b/arch/x86/kernel/apic/apic_numachip.c
@@ -229,11 +229,10 @@ static int __init numachip_system_init(void)
229} 229}
230early_initcall(numachip_system_init); 230early_initcall(numachip_system_init);
231 231
232static int __cpuinit numachip_acpi_madt_oem_check(char *oem_id, char *oem_table_id) 232static int numachip_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
233{ 233{
234 if (!strncmp(oem_id, "NUMASC", 6)) { 234 if (!strncmp(oem_id, "NUMASC", 6)) {
235 numachip_system = 1; 235 numachip_system = 1;
236 setup_force_cpu_cap(X86_FEATURE_X2APIC);
237 return 1; 236 return 1;
238 } 237 }
239 238
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index 6d10a66fc5a9..e88300d8e80a 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -64,9 +64,28 @@
64#include <asm/apic.h> 64#include <asm/apic.h>
65 65
66#define __apicdebuginit(type) static type __init 66#define __apicdebuginit(type) static type __init
67
67#define for_each_irq_pin(entry, head) \ 68#define for_each_irq_pin(entry, head) \
68 for (entry = head; entry; entry = entry->next) 69 for (entry = head; entry; entry = entry->next)
69 70
71static void __init __ioapic_init_mappings(void);
72
73static unsigned int __io_apic_read (unsigned int apic, unsigned int reg);
74static void __io_apic_write (unsigned int apic, unsigned int reg, unsigned int val);
75static void __io_apic_modify(unsigned int apic, unsigned int reg, unsigned int val);
76
77static struct io_apic_ops io_apic_ops = {
78 .init = __ioapic_init_mappings,
79 .read = __io_apic_read,
80 .write = __io_apic_write,
81 .modify = __io_apic_modify,
82};
83
84void __init set_io_apic_ops(const struct io_apic_ops *ops)
85{
86 io_apic_ops = *ops;
87}
88
70/* 89/*
71 * Is the SiS APIC rmw bug present ? 90 * Is the SiS APIC rmw bug present ?
72 * -1 = don't know, 0 = no, 1 = yes 91 * -1 = don't know, 0 = no, 1 = yes
@@ -294,6 +313,22 @@ static void free_irq_at(unsigned int at, struct irq_cfg *cfg)
294 irq_free_desc(at); 313 irq_free_desc(at);
295} 314}
296 315
316static inline unsigned int io_apic_read(unsigned int apic, unsigned int reg)
317{
318 return io_apic_ops.read(apic, reg);
319}
320
321static inline void io_apic_write(unsigned int apic, unsigned int reg, unsigned int value)
322{
323 io_apic_ops.write(apic, reg, value);
324}
325
326static inline void io_apic_modify(unsigned int apic, unsigned int reg, unsigned int value)
327{
328 io_apic_ops.modify(apic, reg, value);
329}
330
331
297struct io_apic { 332struct io_apic {
298 unsigned int index; 333 unsigned int index;
299 unsigned int unused[3]; 334 unsigned int unused[3];
@@ -314,16 +349,17 @@ static inline void io_apic_eoi(unsigned int apic, unsigned int vector)
314 writel(vector, &io_apic->eoi); 349 writel(vector, &io_apic->eoi);
315} 350}
316 351
317static inline unsigned int io_apic_read(unsigned int apic, unsigned int reg) 352static unsigned int __io_apic_read(unsigned int apic, unsigned int reg)
318{ 353{
319 struct io_apic __iomem *io_apic = io_apic_base(apic); 354 struct io_apic __iomem *io_apic = io_apic_base(apic);
320 writel(reg, &io_apic->index); 355 writel(reg, &io_apic->index);
321 return readl(&io_apic->data); 356 return readl(&io_apic->data);
322} 357}
323 358
324static inline void io_apic_write(unsigned int apic, unsigned int reg, unsigned int value) 359static void __io_apic_write(unsigned int apic, unsigned int reg, unsigned int value)
325{ 360{
326 struct io_apic __iomem *io_apic = io_apic_base(apic); 361 struct io_apic __iomem *io_apic = io_apic_base(apic);
362
327 writel(reg, &io_apic->index); 363 writel(reg, &io_apic->index);
328 writel(value, &io_apic->data); 364 writel(value, &io_apic->data);
329} 365}
@@ -334,7 +370,7 @@ static inline void io_apic_write(unsigned int apic, unsigned int reg, unsigned i
334 * 370 *
335 * Older SiS APIC requires we rewrite the index register 371 * Older SiS APIC requires we rewrite the index register
336 */ 372 */
337static inline void io_apic_modify(unsigned int apic, unsigned int reg, unsigned int value) 373static void __io_apic_modify(unsigned int apic, unsigned int reg, unsigned int value)
338{ 374{
339 struct io_apic __iomem *io_apic = io_apic_base(apic); 375 struct io_apic __iomem *io_apic = io_apic_base(apic);
340 376
@@ -377,6 +413,7 @@ static struct IO_APIC_route_entry __ioapic_read_entry(int apic, int pin)
377 413
378 eu.w1 = io_apic_read(apic, 0x10 + 2 * pin); 414 eu.w1 = io_apic_read(apic, 0x10 + 2 * pin);
379 eu.w2 = io_apic_read(apic, 0x11 + 2 * pin); 415 eu.w2 = io_apic_read(apic, 0x11 + 2 * pin);
416
380 return eu.entry; 417 return eu.entry;
381} 418}
382 419
@@ -384,9 +421,11 @@ static struct IO_APIC_route_entry ioapic_read_entry(int apic, int pin)
384{ 421{
385 union entry_union eu; 422 union entry_union eu;
386 unsigned long flags; 423 unsigned long flags;
424
387 raw_spin_lock_irqsave(&ioapic_lock, flags); 425 raw_spin_lock_irqsave(&ioapic_lock, flags);
388 eu.entry = __ioapic_read_entry(apic, pin); 426 eu.entry = __ioapic_read_entry(apic, pin);
389 raw_spin_unlock_irqrestore(&ioapic_lock, flags); 427 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
428
390 return eu.entry; 429 return eu.entry;
391} 430}
392 431
@@ -396,8 +435,7 @@ static struct IO_APIC_route_entry ioapic_read_entry(int apic, int pin)
396 * the interrupt, and we need to make sure the entry is fully populated 435 * the interrupt, and we need to make sure the entry is fully populated
397 * before that happens. 436 * before that happens.
398 */ 437 */
399static void 438static void __ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e)
400__ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e)
401{ 439{
402 union entry_union eu = {{0, 0}}; 440 union entry_union eu = {{0, 0}};
403 441
@@ -409,6 +447,7 @@ __ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e)
409static void ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e) 447static void ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e)
410{ 448{
411 unsigned long flags; 449 unsigned long flags;
450
412 raw_spin_lock_irqsave(&ioapic_lock, flags); 451 raw_spin_lock_irqsave(&ioapic_lock, flags);
413 __ioapic_write_entry(apic, pin, e); 452 __ioapic_write_entry(apic, pin, e);
414 raw_spin_unlock_irqrestore(&ioapic_lock, flags); 453 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
@@ -435,8 +474,7 @@ static void ioapic_mask_entry(int apic, int pin)
435 * shared ISA-space IRQs, so we have to support them. We are super 474 * shared ISA-space IRQs, so we have to support them. We are super
436 * fast in the common case, and fast for shared ISA-space IRQs. 475 * fast in the common case, and fast for shared ISA-space IRQs.
437 */ 476 */
438static int 477static int __add_pin_to_irq_node(struct irq_cfg *cfg, int node, int apic, int pin)
439__add_pin_to_irq_node(struct irq_cfg *cfg, int node, int apic, int pin)
440{ 478{
441 struct irq_pin_list **last, *entry; 479 struct irq_pin_list **last, *entry;
442 480
@@ -521,6 +559,7 @@ static void io_apic_sync(struct irq_pin_list *entry)
521 * a dummy read from the IO-APIC 559 * a dummy read from the IO-APIC
522 */ 560 */
523 struct io_apic __iomem *io_apic; 561 struct io_apic __iomem *io_apic;
562
524 io_apic = io_apic_base(entry->apic); 563 io_apic = io_apic_base(entry->apic);
525 readl(&io_apic->data); 564 readl(&io_apic->data);
526} 565}
@@ -2512,21 +2551,73 @@ static void ack_apic_edge(struct irq_data *data)
2512 2551
2513atomic_t irq_mis_count; 2552atomic_t irq_mis_count;
2514 2553
2515static void ack_apic_level(struct irq_data *data)
2516{
2517 struct irq_cfg *cfg = data->chip_data;
2518 int i, do_unmask_irq = 0, irq = data->irq;
2519 unsigned long v;
2520
2521 irq_complete_move(cfg);
2522#ifdef CONFIG_GENERIC_PENDING_IRQ 2554#ifdef CONFIG_GENERIC_PENDING_IRQ
2555static inline bool ioapic_irqd_mask(struct irq_data *data, struct irq_cfg *cfg)
2556{
2523 /* If we are moving the irq we need to mask it */ 2557 /* If we are moving the irq we need to mask it */
2524 if (unlikely(irqd_is_setaffinity_pending(data))) { 2558 if (unlikely(irqd_is_setaffinity_pending(data))) {
2525 do_unmask_irq = 1;
2526 mask_ioapic(cfg); 2559 mask_ioapic(cfg);
2560 return true;
2527 } 2561 }
2562 return false;
2563}
2564
2565static inline void ioapic_irqd_unmask(struct irq_data *data,
2566 struct irq_cfg *cfg, bool masked)
2567{
2568 if (unlikely(masked)) {
2569 /* Only migrate the irq if the ack has been received.
2570 *
2571 * On rare occasions the broadcast level triggered ack gets
2572 * delayed going to ioapics, and if we reprogram the
2573 * vector while Remote IRR is still set the irq will never
2574 * fire again.
2575 *
2576 * To prevent this scenario we read the Remote IRR bit
2577 * of the ioapic. This has two effects.
2578 * - On any sane system the read of the ioapic will
2579 * flush writes (and acks) going to the ioapic from
2580 * this cpu.
2581 * - We get to see if the ACK has actually been delivered.
2582 *
2583 * Based on failed experiments of reprogramming the
2584 * ioapic entry from outside of irq context starting
2585 * with masking the ioapic entry and then polling until
2586 * Remote IRR was clear before reprogramming the
2587 * ioapic I don't trust the Remote IRR bit to be
2588 * completey accurate.
2589 *
2590 * However there appears to be no other way to plug
2591 * this race, so if the Remote IRR bit is not
2592 * accurate and is causing problems then it is a hardware bug
2593 * and you can go talk to the chipset vendor about it.
2594 */
2595 if (!io_apic_level_ack_pending(cfg))
2596 irq_move_masked_irq(data);
2597 unmask_ioapic(cfg);
2598 }
2599}
2600#else
2601static inline bool ioapic_irqd_mask(struct irq_data *data, struct irq_cfg *cfg)
2602{
2603 return false;
2604}
2605static inline void ioapic_irqd_unmask(struct irq_data *data,
2606 struct irq_cfg *cfg, bool masked)
2607{
2608}
2528#endif 2609#endif
2529 2610
2611static void ack_apic_level(struct irq_data *data)
2612{
2613 struct irq_cfg *cfg = data->chip_data;
2614 int i, irq = data->irq;
2615 unsigned long v;
2616 bool masked;
2617
2618 irq_complete_move(cfg);
2619 masked = ioapic_irqd_mask(data, cfg);
2620
2530 /* 2621 /*
2531 * It appears there is an erratum which affects at least version 0x11 2622 * It appears there is an erratum which affects at least version 0x11
2532 * of I/O APIC (that's the 82093AA and cores integrated into various 2623 * of I/O APIC (that's the 82093AA and cores integrated into various
@@ -2581,38 +2672,7 @@ static void ack_apic_level(struct irq_data *data)
2581 eoi_ioapic_irq(irq, cfg); 2672 eoi_ioapic_irq(irq, cfg);
2582 } 2673 }
2583 2674
2584 /* Now we can move and renable the irq */ 2675 ioapic_irqd_unmask(data, cfg, masked);
2585 if (unlikely(do_unmask_irq)) {
2586 /* Only migrate the irq if the ack has been received.
2587 *
2588 * On rare occasions the broadcast level triggered ack gets
2589 * delayed going to ioapics, and if we reprogram the
2590 * vector while Remote IRR is still set the irq will never
2591 * fire again.
2592 *
2593 * To prevent this scenario we read the Remote IRR bit
2594 * of the ioapic. This has two effects.
2595 * - On any sane system the read of the ioapic will
2596 * flush writes (and acks) going to the ioapic from
2597 * this cpu.
2598 * - We get to see if the ACK has actually been delivered.
2599 *
2600 * Based on failed experiments of reprogramming the
2601 * ioapic entry from outside of irq context starting
2602 * with masking the ioapic entry and then polling until
2603 * Remote IRR was clear before reprogramming the
2604 * ioapic I don't trust the Remote IRR bit to be
2605 * completey accurate.
2606 *
2607 * However there appears to be no other way to plug
2608 * this race, so if the Remote IRR bit is not
2609 * accurate and is causing problems then it is a hardware bug
2610 * and you can go talk to the chipset vendor about it.
2611 */
2612 if (!io_apic_level_ack_pending(cfg))
2613 irq_move_masked_irq(data);
2614 unmask_ioapic(cfg);
2615 }
2616} 2676}
2617 2677
2618#ifdef CONFIG_IRQ_REMAP 2678#ifdef CONFIG_IRQ_REMAP
@@ -3873,6 +3933,11 @@ static struct resource * __init ioapic_setup_resources(int nr_ioapics)
3873 3933
3874void __init ioapic_and_gsi_init(void) 3934void __init ioapic_and_gsi_init(void)
3875{ 3935{
3936 io_apic_ops.init();
3937}
3938
3939static void __init __ioapic_init_mappings(void)
3940{
3876 unsigned long ioapic_phys, idx = FIX_IO_APIC_BASE_0; 3941 unsigned long ioapic_phys, idx = FIX_IO_APIC_BASE_0;
3877 struct resource *ioapic_res; 3942 struct resource *ioapic_res;
3878 int i; 3943 int i;
diff --git a/arch/x86/kernel/apic/x2apic_cluster.c b/arch/x86/kernel/apic/x2apic_cluster.c
index 9193713060a9..48f3103b3c93 100644
--- a/arch/x86/kernel/apic/x2apic_cluster.c
+++ b/arch/x86/kernel/apic/x2apic_cluster.c
@@ -213,7 +213,7 @@ static struct apic apic_x2apic_cluster = {
213 .name = "cluster x2apic", 213 .name = "cluster x2apic",
214 .probe = x2apic_cluster_probe, 214 .probe = x2apic_cluster_probe,
215 .acpi_madt_oem_check = x2apic_acpi_madt_oem_check, 215 .acpi_madt_oem_check = x2apic_acpi_madt_oem_check,
216 .apic_id_valid = default_apic_id_valid, 216 .apic_id_valid = x2apic_apic_id_valid,
217 .apic_id_registered = x2apic_apic_id_registered, 217 .apic_id_registered = x2apic_apic_id_registered,
218 218
219 .irq_delivery_mode = dest_LowestPrio, 219 .irq_delivery_mode = dest_LowestPrio,
diff --git a/arch/x86/kernel/apic/x2apic_phys.c b/arch/x86/kernel/apic/x2apic_phys.c
index bcd1db6eaca9..8a778db45e3a 100644
--- a/arch/x86/kernel/apic/x2apic_phys.c
+++ b/arch/x86/kernel/apic/x2apic_phys.c
@@ -119,7 +119,7 @@ static struct apic apic_x2apic_phys = {
119 .name = "physical x2apic", 119 .name = "physical x2apic",
120 .probe = x2apic_phys_probe, 120 .probe = x2apic_phys_probe,
121 .acpi_madt_oem_check = x2apic_acpi_madt_oem_check, 121 .acpi_madt_oem_check = x2apic_acpi_madt_oem_check,
122 .apic_id_valid = default_apic_id_valid, 122 .apic_id_valid = x2apic_apic_id_valid,
123 .apic_id_registered = x2apic_apic_id_registered, 123 .apic_id_registered = x2apic_apic_id_registered,
124 124
125 .irq_delivery_mode = dest_Fixed, 125 .irq_delivery_mode = dest_Fixed,
diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
index fc4771425852..87bfa69e216e 100644
--- a/arch/x86/kernel/apic/x2apic_uv_x.c
+++ b/arch/x86/kernel/apic/x2apic_uv_x.c
@@ -266,6 +266,11 @@ static void uv_send_IPI_all(int vector)
266 uv_send_IPI_mask(cpu_online_mask, vector); 266 uv_send_IPI_mask(cpu_online_mask, vector);
267} 267}
268 268
269static int uv_apic_id_valid(int apicid)
270{
271 return 1;
272}
273
269static int uv_apic_id_registered(void) 274static int uv_apic_id_registered(void)
270{ 275{
271 return 1; 276 return 1;
@@ -351,7 +356,7 @@ static struct apic __refdata apic_x2apic_uv_x = {
351 .name = "UV large system", 356 .name = "UV large system",
352 .probe = uv_probe, 357 .probe = uv_probe,
353 .acpi_madt_oem_check = uv_acpi_madt_oem_check, 358 .acpi_madt_oem_check = uv_acpi_madt_oem_check,
354 .apic_id_valid = default_apic_id_valid, 359 .apic_id_valid = uv_apic_id_valid,
355 .apic_id_registered = uv_apic_id_registered, 360 .apic_id_registered = uv_apic_id_registered,
356 361
357 .irq_delivery_mode = dest_Fixed, 362 .irq_delivery_mode = dest_Fixed,
diff --git a/arch/x86/kernel/asm-offsets_64.c b/arch/x86/kernel/asm-offsets_64.c
index 834e897b1e25..1b4754f82ba7 100644
--- a/arch/x86/kernel/asm-offsets_64.c
+++ b/arch/x86/kernel/asm-offsets_64.c
@@ -1,6 +1,12 @@
1#include <asm/ia32.h> 1#include <asm/ia32.h>
2 2
3#define __SYSCALL_64(nr, sym, compat) [nr] = 1, 3#define __SYSCALL_64(nr, sym, compat) [nr] = 1,
4#define __SYSCALL_COMMON(nr, sym, compat) [nr] = 1,
5#ifdef CONFIG_X86_X32_ABI
6# define __SYSCALL_X32(nr, sym, compat) [nr] = 1,
7#else
8# define __SYSCALL_X32(nr, sym, compat) /* nothing */
9#endif
4static char syscalls_64[] = { 10static char syscalls_64[] = {
5#include <asm/syscalls_64.h> 11#include <asm/syscalls_64.h>
6}; 12};
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index e49477444fff..67e258362a3d 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -999,7 +999,7 @@ void __cpuinit print_cpu_info(struct cpuinfo_x86 *c)
999 else 999 else
1000 printk(KERN_CONT "\n"); 1000 printk(KERN_CONT "\n");
1001 1001
1002 __print_cpu_msr(); 1002 print_cpu_msr(c);
1003} 1003}
1004 1004
1005void __cpuinit print_cpu_msr(struct cpuinfo_x86 *c) 1005void __cpuinit print_cpu_msr(struct cpuinfo_x86 *c)
diff --git a/arch/x86/kernel/cpu/mtrr/if.c b/arch/x86/kernel/cpu/mtrr/if.c
index 79289632cb27..a041e094b8b9 100644
--- a/arch/x86/kernel/cpu/mtrr/if.c
+++ b/arch/x86/kernel/cpu/mtrr/if.c
@@ -167,6 +167,7 @@ mtrr_ioctl(struct file *file, unsigned int cmd, unsigned long __arg)
167{ 167{
168 int err = 0; 168 int err = 0;
169 mtrr_type type; 169 mtrr_type type;
170 unsigned long base;
170 unsigned long size; 171 unsigned long size;
171 struct mtrr_sentry sentry; 172 struct mtrr_sentry sentry;
172 struct mtrr_gentry gentry; 173 struct mtrr_gentry gentry;
@@ -267,14 +268,14 @@ mtrr_ioctl(struct file *file, unsigned int cmd, unsigned long __arg)
267#endif 268#endif
268 if (gentry.regnum >= num_var_ranges) 269 if (gentry.regnum >= num_var_ranges)
269 return -EINVAL; 270 return -EINVAL;
270 mtrr_if->get(gentry.regnum, &gentry.base, &size, &type); 271 mtrr_if->get(gentry.regnum, &base, &size, &type);
271 272
272 /* Hide entries that go above 4GB */ 273 /* Hide entries that go above 4GB */
273 if (gentry.base + size - 1 >= (1UL << (8 * sizeof(gentry.size) - PAGE_SHIFT)) 274 if (base + size - 1 >= (1UL << (8 * sizeof(gentry.size) - PAGE_SHIFT))
274 || size >= (1UL << (8 * sizeof(gentry.size) - PAGE_SHIFT))) 275 || size >= (1UL << (8 * sizeof(gentry.size) - PAGE_SHIFT)))
275 gentry.base = gentry.size = gentry.type = 0; 276 gentry.base = gentry.size = gentry.type = 0;
276 else { 277 else {
277 gentry.base <<= PAGE_SHIFT; 278 gentry.base = base << PAGE_SHIFT;
278 gentry.size = size << PAGE_SHIFT; 279 gentry.size = size << PAGE_SHIFT;
279 gentry.type = type; 280 gentry.type = type;
280 } 281 }
@@ -321,11 +322,12 @@ mtrr_ioctl(struct file *file, unsigned int cmd, unsigned long __arg)
321#endif 322#endif
322 if (gentry.regnum >= num_var_ranges) 323 if (gentry.regnum >= num_var_ranges)
323 return -EINVAL; 324 return -EINVAL;
324 mtrr_if->get(gentry.regnum, &gentry.base, &size, &type); 325 mtrr_if->get(gentry.regnum, &base, &size, &type);
325 /* Hide entries that would overflow */ 326 /* Hide entries that would overflow */
326 if (size != (__typeof__(gentry.size))size) 327 if (size != (__typeof__(gentry.size))size)
327 gentry.base = gentry.size = gentry.type = 0; 328 gentry.base = gentry.size = gentry.type = 0;
328 else { 329 else {
330 gentry.base = base;
329 gentry.size = size; 331 gentry.size = size;
330 gentry.type = type; 332 gentry.type = type;
331 } 333 }
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index fa2900c0e398..bb8e03407e18 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -29,7 +29,6 @@
29#include <asm/apic.h> 29#include <asm/apic.h>
30#include <asm/stacktrace.h> 30#include <asm/stacktrace.h>
31#include <asm/nmi.h> 31#include <asm/nmi.h>
32#include <asm/compat.h>
33#include <asm/smp.h> 32#include <asm/smp.h>
34#include <asm/alternative.h> 33#include <asm/alternative.h>
35#include <asm/timer.h> 34#include <asm/timer.h>
@@ -1314,6 +1313,11 @@ static void __init pmu_check_apic(void)
1314 pr_info("no hardware sampling interrupt available.\n"); 1313 pr_info("no hardware sampling interrupt available.\n");
1315} 1314}
1316 1315
1316static struct attribute_group x86_pmu_format_group = {
1317 .name = "format",
1318 .attrs = NULL,
1319};
1320
1317static int __init init_hw_perf_events(void) 1321static int __init init_hw_perf_events(void)
1318{ 1322{
1319 struct x86_pmu_quirk *quirk; 1323 struct x86_pmu_quirk *quirk;
@@ -1388,6 +1392,7 @@ static int __init init_hw_perf_events(void)
1388 } 1392 }
1389 1393
1390 x86_pmu.attr_rdpmc = 1; /* enable userspace RDPMC usage by default */ 1394 x86_pmu.attr_rdpmc = 1; /* enable userspace RDPMC usage by default */
1395 x86_pmu_format_group.attrs = x86_pmu.format_attrs;
1391 1396
1392 pr_info("... version: %d\n", x86_pmu.version); 1397 pr_info("... version: %d\n", x86_pmu.version);
1393 pr_info("... bit width: %d\n", x86_pmu.cntval_bits); 1398 pr_info("... bit width: %d\n", x86_pmu.cntval_bits);
@@ -1616,6 +1621,9 @@ static int x86_pmu_event_idx(struct perf_event *event)
1616{ 1621{
1617 int idx = event->hw.idx; 1622 int idx = event->hw.idx;
1618 1623
1624 if (!x86_pmu.attr_rdpmc)
1625 return 0;
1626
1619 if (x86_pmu.num_counters_fixed && idx >= X86_PMC_IDX_FIXED) { 1627 if (x86_pmu.num_counters_fixed && idx >= X86_PMC_IDX_FIXED) {
1620 idx -= X86_PMC_IDX_FIXED; 1628 idx -= X86_PMC_IDX_FIXED;
1621 idx |= 1 << 30; 1629 idx |= 1 << 30;
@@ -1668,6 +1676,7 @@ static struct attribute_group x86_pmu_attr_group = {
1668 1676
1669static const struct attribute_group *x86_pmu_attr_groups[] = { 1677static const struct attribute_group *x86_pmu_attr_groups[] = {
1670 &x86_pmu_attr_group, 1678 &x86_pmu_attr_group,
1679 &x86_pmu_format_group,
1671 NULL, 1680 NULL,
1672}; 1681};
1673 1682
@@ -1699,14 +1708,19 @@ static struct pmu pmu = {
1699 .flush_branch_stack = x86_pmu_flush_branch_stack, 1708 .flush_branch_stack = x86_pmu_flush_branch_stack,
1700}; 1709};
1701 1710
1702void perf_update_user_clock(struct perf_event_mmap_page *userpg, u64 now) 1711void arch_perf_update_userpage(struct perf_event_mmap_page *userpg, u64 now)
1703{ 1712{
1713 userpg->cap_usr_time = 0;
1714 userpg->cap_usr_rdpmc = x86_pmu.attr_rdpmc;
1715 userpg->pmc_width = x86_pmu.cntval_bits;
1716
1704 if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) 1717 if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC))
1705 return; 1718 return;
1706 1719
1707 if (!boot_cpu_has(X86_FEATURE_NONSTOP_TSC)) 1720 if (!boot_cpu_has(X86_FEATURE_NONSTOP_TSC))
1708 return; 1721 return;
1709 1722
1723 userpg->cap_usr_time = 1;
1710 userpg->time_mult = this_cpu_read(cyc2ns); 1724 userpg->time_mult = this_cpu_read(cyc2ns);
1711 userpg->time_shift = CYC2NS_SCALE_FACTOR; 1725 userpg->time_shift = CYC2NS_SCALE_FACTOR;
1712 userpg->time_offset = this_cpu_read(cyc2ns_offset) - now; 1726 userpg->time_offset = this_cpu_read(cyc2ns_offset) - now;
@@ -1748,6 +1762,9 @@ perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs)
1748} 1762}
1749 1763
1750#ifdef CONFIG_COMPAT 1764#ifdef CONFIG_COMPAT
1765
1766#include <asm/compat.h>
1767
1751static inline int 1768static inline int
1752perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry *entry) 1769perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry *entry)
1753{ 1770{
diff --git a/arch/x86/kernel/cpu/perf_event.h b/arch/x86/kernel/cpu/perf_event.h
index 8484e77c211e..6638aaf54493 100644
--- a/arch/x86/kernel/cpu/perf_event.h
+++ b/arch/x86/kernel/cpu/perf_event.h
@@ -339,6 +339,7 @@ struct x86_pmu {
339 * sysfs attrs 339 * sysfs attrs
340 */ 340 */
341 int attr_rdpmc; 341 int attr_rdpmc;
342 struct attribute **format_attrs;
342 343
343 /* 344 /*
344 * CPU Hotplug hooks 345 * CPU Hotplug hooks
diff --git a/arch/x86/kernel/cpu/perf_event_amd.c b/arch/x86/kernel/cpu/perf_event_amd.c
index dd002faff7a6..95e7fe1c5f0b 100644
--- a/arch/x86/kernel/cpu/perf_event_amd.c
+++ b/arch/x86/kernel/cpu/perf_event_amd.c
@@ -404,6 +404,21 @@ static void amd_pmu_cpu_dead(int cpu)
404 } 404 }
405} 405}
406 406
407PMU_FORMAT_ATTR(event, "config:0-7,32-35");
408PMU_FORMAT_ATTR(umask, "config:8-15" );
409PMU_FORMAT_ATTR(edge, "config:18" );
410PMU_FORMAT_ATTR(inv, "config:23" );
411PMU_FORMAT_ATTR(cmask, "config:24-31" );
412
413static struct attribute *amd_format_attr[] = {
414 &format_attr_event.attr,
415 &format_attr_umask.attr,
416 &format_attr_edge.attr,
417 &format_attr_inv.attr,
418 &format_attr_cmask.attr,
419 NULL,
420};
421
407static __initconst const struct x86_pmu amd_pmu = { 422static __initconst const struct x86_pmu amd_pmu = {
408 .name = "AMD", 423 .name = "AMD",
409 .handle_irq = x86_pmu_handle_irq, 424 .handle_irq = x86_pmu_handle_irq,
@@ -426,6 +441,8 @@ static __initconst const struct x86_pmu amd_pmu = {
426 .get_event_constraints = amd_get_event_constraints, 441 .get_event_constraints = amd_get_event_constraints,
427 .put_event_constraints = amd_put_event_constraints, 442 .put_event_constraints = amd_put_event_constraints,
428 443
444 .format_attrs = amd_format_attr,
445
429 .cpu_prepare = amd_pmu_cpu_prepare, 446 .cpu_prepare = amd_pmu_cpu_prepare,
430 .cpu_starting = amd_pmu_cpu_starting, 447 .cpu_starting = amd_pmu_cpu_starting,
431 .cpu_dead = amd_pmu_cpu_dead, 448 .cpu_dead = amd_pmu_cpu_dead,
@@ -596,6 +613,7 @@ static __initconst const struct x86_pmu amd_pmu_f15h = {
596 .cpu_dead = amd_pmu_cpu_dead, 613 .cpu_dead = amd_pmu_cpu_dead,
597#endif 614#endif
598 .cpu_starting = amd_pmu_cpu_starting, 615 .cpu_starting = amd_pmu_cpu_starting,
616 .format_attrs = amd_format_attr,
599}; 617};
600 618
601__init int amd_pmu_init(void) 619__init int amd_pmu_init(void)
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
index 6a84e7f28f05..26b3e2fef104 100644
--- a/arch/x86/kernel/cpu/perf_event_intel.c
+++ b/arch/x86/kernel/cpu/perf_event_intel.c
@@ -1431,6 +1431,24 @@ static void core_pmu_enable_all(int added)
1431 } 1431 }
1432} 1432}
1433 1433
1434PMU_FORMAT_ATTR(event, "config:0-7" );
1435PMU_FORMAT_ATTR(umask, "config:8-15" );
1436PMU_FORMAT_ATTR(edge, "config:18" );
1437PMU_FORMAT_ATTR(pc, "config:19" );
1438PMU_FORMAT_ATTR(any, "config:21" ); /* v3 + */
1439PMU_FORMAT_ATTR(inv, "config:23" );
1440PMU_FORMAT_ATTR(cmask, "config:24-31" );
1441
1442static struct attribute *intel_arch_formats_attr[] = {
1443 &format_attr_event.attr,
1444 &format_attr_umask.attr,
1445 &format_attr_edge.attr,
1446 &format_attr_pc.attr,
1447 &format_attr_inv.attr,
1448 &format_attr_cmask.attr,
1449 NULL,
1450};
1451
1434static __initconst const struct x86_pmu core_pmu = { 1452static __initconst const struct x86_pmu core_pmu = {
1435 .name = "core", 1453 .name = "core",
1436 .handle_irq = x86_pmu_handle_irq, 1454 .handle_irq = x86_pmu_handle_irq,
@@ -1455,6 +1473,7 @@ static __initconst const struct x86_pmu core_pmu = {
1455 .put_event_constraints = intel_put_event_constraints, 1473 .put_event_constraints = intel_put_event_constraints,
1456 .event_constraints = intel_core_event_constraints, 1474 .event_constraints = intel_core_event_constraints,
1457 .guest_get_msrs = core_guest_get_msrs, 1475 .guest_get_msrs = core_guest_get_msrs,
1476 .format_attrs = intel_arch_formats_attr,
1458}; 1477};
1459 1478
1460struct intel_shared_regs *allocate_shared_regs(int cpu) 1479struct intel_shared_regs *allocate_shared_regs(int cpu)
@@ -1553,6 +1572,21 @@ static void intel_pmu_flush_branch_stack(void)
1553 intel_pmu_lbr_reset(); 1572 intel_pmu_lbr_reset();
1554} 1573}
1555 1574
1575PMU_FORMAT_ATTR(offcore_rsp, "config1:0-63");
1576
1577static struct attribute *intel_arch3_formats_attr[] = {
1578 &format_attr_event.attr,
1579 &format_attr_umask.attr,
1580 &format_attr_edge.attr,
1581 &format_attr_pc.attr,
1582 &format_attr_any.attr,
1583 &format_attr_inv.attr,
1584 &format_attr_cmask.attr,
1585
1586 &format_attr_offcore_rsp.attr, /* XXX do NHM/WSM + SNB breakout */
1587 NULL,
1588};
1589
1556static __initconst const struct x86_pmu intel_pmu = { 1590static __initconst const struct x86_pmu intel_pmu = {
1557 .name = "Intel", 1591 .name = "Intel",
1558 .handle_irq = intel_pmu_handle_irq, 1592 .handle_irq = intel_pmu_handle_irq,
@@ -1576,6 +1610,8 @@ static __initconst const struct x86_pmu intel_pmu = {
1576 .get_event_constraints = intel_get_event_constraints, 1610 .get_event_constraints = intel_get_event_constraints,
1577 .put_event_constraints = intel_put_event_constraints, 1611 .put_event_constraints = intel_put_event_constraints,
1578 1612
1613 .format_attrs = intel_arch3_formats_attr,
1614
1579 .cpu_prepare = intel_pmu_cpu_prepare, 1615 .cpu_prepare = intel_pmu_cpu_prepare,
1580 .cpu_starting = intel_pmu_cpu_starting, 1616 .cpu_starting = intel_pmu_cpu_starting,
1581 .cpu_dying = intel_pmu_cpu_dying, 1617 .cpu_dying = intel_pmu_cpu_dying,
diff --git a/arch/x86/kernel/cpu/perf_event_p6.c b/arch/x86/kernel/cpu/perf_event_p6.c
index c7181befecde..32bcfc7dd230 100644
--- a/arch/x86/kernel/cpu/perf_event_p6.c
+++ b/arch/x86/kernel/cpu/perf_event_p6.c
@@ -87,6 +87,23 @@ static void p6_pmu_enable_event(struct perf_event *event)
87 (void)checking_wrmsrl(hwc->config_base, val); 87 (void)checking_wrmsrl(hwc->config_base, val);
88} 88}
89 89
90PMU_FORMAT_ATTR(event, "config:0-7" );
91PMU_FORMAT_ATTR(umask, "config:8-15" );
92PMU_FORMAT_ATTR(edge, "config:18" );
93PMU_FORMAT_ATTR(pc, "config:19" );
94PMU_FORMAT_ATTR(inv, "config:23" );
95PMU_FORMAT_ATTR(cmask, "config:24-31" );
96
97static struct attribute *intel_p6_formats_attr[] = {
98 &format_attr_event.attr,
99 &format_attr_umask.attr,
100 &format_attr_edge.attr,
101 &format_attr_pc.attr,
102 &format_attr_inv.attr,
103 &format_attr_cmask.attr,
104 NULL,
105};
106
90static __initconst const struct x86_pmu p6_pmu = { 107static __initconst const struct x86_pmu p6_pmu = {
91 .name = "p6", 108 .name = "p6",
92 .handle_irq = x86_pmu_handle_irq, 109 .handle_irq = x86_pmu_handle_irq,
@@ -115,6 +132,8 @@ static __initconst const struct x86_pmu p6_pmu = {
115 .cntval_mask = (1ULL << 32) - 1, 132 .cntval_mask = (1ULL << 32) - 1,
116 .get_event_constraints = x86_get_event_constraints, 133 .get_event_constraints = x86_get_event_constraints,
117 .event_constraints = p6_event_constraints, 134 .event_constraints = p6_event_constraints,
135
136 .format_attrs = intel_p6_formats_attr,
118}; 137};
119 138
120__init int p6_pmu_init(void) 139__init int p6_pmu_init(void)
diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
index 4025fe4f928f..1b81839b6c88 100644
--- a/arch/x86/kernel/dumpstack.c
+++ b/arch/x86/kernel/dumpstack.c
@@ -37,13 +37,16 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
37 const struct stacktrace_ops *ops, 37 const struct stacktrace_ops *ops,
38 struct thread_info *tinfo, int *graph) 38 struct thread_info *tinfo, int *graph)
39{ 39{
40 struct task_struct *task = tinfo->task; 40 struct task_struct *task;
41 unsigned long ret_addr; 41 unsigned long ret_addr;
42 int index = task->curr_ret_stack; 42 int index;
43 43
44 if (addr != (unsigned long)return_to_handler) 44 if (addr != (unsigned long)return_to_handler)
45 return; 45 return;
46 46
47 task = tinfo->task;
48 index = task->curr_ret_stack;
49
47 if (!task->ret_stack || index < *graph) 50 if (!task->ret_stack || index < *graph)
48 return; 51 return;
49 52
@@ -265,7 +268,7 @@ int __kprobes __die(const char *str, struct pt_regs *regs, long err)
265#endif 268#endif
266 printk("\n"); 269 printk("\n");
267 if (notify_die(DIE_OOPS, str, regs, err, 270 if (notify_die(DIE_OOPS, str, regs, err,
268 current->thread.trap_no, SIGSEGV) == NOTIFY_STOP) 271 current->thread.trap_nr, SIGSEGV) == NOTIFY_STOP)
269 return 1; 272 return 1;
270 273
271 show_registers(regs); 274 show_registers(regs);
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
index 734ebd1d3caa..cdc79b5cfcd9 100644
--- a/arch/x86/kernel/entry_64.S
+++ b/arch/x86/kernel/entry_64.S
@@ -481,7 +481,12 @@ GLOBAL(system_call_after_swapgs)
481 testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET) 481 testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
482 jnz tracesys 482 jnz tracesys
483system_call_fastpath: 483system_call_fastpath:
484#if __SYSCALL_MASK == ~0
484 cmpq $__NR_syscall_max,%rax 485 cmpq $__NR_syscall_max,%rax
486#else
487 andl $__SYSCALL_MASK,%eax
488 cmpl $__NR_syscall_max,%eax
489#endif
485 ja badsys 490 ja badsys
486 movq %r10,%rcx 491 movq %r10,%rcx
487 call *sys_call_table(,%rax,8) # XXX: rip relative 492 call *sys_call_table(,%rax,8) # XXX: rip relative
@@ -595,7 +600,12 @@ tracesys:
595 */ 600 */
596 LOAD_ARGS ARGOFFSET, 1 601 LOAD_ARGS ARGOFFSET, 1
597 RESTORE_REST 602 RESTORE_REST
603#if __SYSCALL_MASK == ~0
598 cmpq $__NR_syscall_max,%rax 604 cmpq $__NR_syscall_max,%rax
605#else
606 andl $__SYSCALL_MASK,%eax
607 cmpl $__NR_syscall_max,%eax
608#endif
599 ja int_ret_from_sys_call /* RAX(%rsp) set to -ENOSYS above */ 609 ja int_ret_from_sys_call /* RAX(%rsp) set to -ENOSYS above */
600 movq %r10,%rcx /* fixup for C */ 610 movq %r10,%rcx /* fixup for C */
601 call *sys_call_table(,%rax,8) 611 call *sys_call_table(,%rax,8)
@@ -735,6 +745,40 @@ ENTRY(stub_rt_sigreturn)
735 CFI_ENDPROC 745 CFI_ENDPROC
736END(stub_rt_sigreturn) 746END(stub_rt_sigreturn)
737 747
748#ifdef CONFIG_X86_X32_ABI
749 PTREGSCALL stub_x32_sigaltstack, sys32_sigaltstack, %rdx
750
751ENTRY(stub_x32_rt_sigreturn)
752 CFI_STARTPROC
753 addq $8, %rsp
754 PARTIAL_FRAME 0
755 SAVE_REST
756 movq %rsp,%rdi
757 FIXUP_TOP_OF_STACK %r11
758 call sys32_x32_rt_sigreturn
759 movq %rax,RAX(%rsp) # fixme, this could be done at the higher layer
760 RESTORE_REST
761 jmp int_ret_from_sys_call
762 CFI_ENDPROC
763END(stub_x32_rt_sigreturn)
764
765ENTRY(stub_x32_execve)
766 CFI_STARTPROC
767 addq $8, %rsp
768 PARTIAL_FRAME 0
769 SAVE_REST
770 FIXUP_TOP_OF_STACK %r11
771 movq %rsp, %rcx
772 call sys32_execve
773 RESTORE_TOP_OF_STACK %r11
774 movq %rax,RAX(%rsp)
775 RESTORE_REST
776 jmp int_ret_from_sys_call
777 CFI_ENDPROC
778END(stub_x32_execve)
779
780#endif
781
738/* 782/*
739 * Build the entry stubs and pointer table with some assembler magic. 783 * Build the entry stubs and pointer table with some assembler magic.
740 * We pack 7 stubs into a single 32-byte chunk, which will fit in a 784 * We pack 7 stubs into a single 32-byte chunk, which will fit in a
diff --git a/arch/x86/kernel/irqinit.c b/arch/x86/kernel/irqinit.c
index 6d5fc8cfd5d6..252981afd6c4 100644
--- a/arch/x86/kernel/irqinit.c
+++ b/arch/x86/kernel/irqinit.c
@@ -60,7 +60,7 @@ static irqreturn_t math_error_irq(int cpl, void *dev_id)
60 outb(0, 0xF0); 60 outb(0, 0xF0);
61 if (ignore_fpu_irq || !boot_cpu_data.hard_math) 61 if (ignore_fpu_irq || !boot_cpu_data.hard_math)
62 return IRQ_NONE; 62 return IRQ_NONE;
63 math_error(get_irq_regs(), 0, 16); 63 math_error(get_irq_regs(), 0, X86_TRAP_MF);
64 return IRQ_HANDLED; 64 return IRQ_HANDLED;
65} 65}
66 66
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index 9b24f36eb55f..a33afaa5ddb7 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -12,6 +12,9 @@
12#include <linux/user-return-notifier.h> 12#include <linux/user-return-notifier.h>
13#include <linux/dmi.h> 13#include <linux/dmi.h>
14#include <linux/utsname.h> 14#include <linux/utsname.h>
15#include <linux/stackprotector.h>
16#include <linux/tick.h>
17#include <linux/cpuidle.h>
15#include <trace/events/power.h> 18#include <trace/events/power.h>
16#include <linux/hw_breakpoint.h> 19#include <linux/hw_breakpoint.h>
17#include <asm/cpu.h> 20#include <asm/cpu.h>
@@ -22,6 +25,24 @@
22#include <asm/i387.h> 25#include <asm/i387.h>
23#include <asm/fpu-internal.h> 26#include <asm/fpu-internal.h>
24#include <asm/debugreg.h> 27#include <asm/debugreg.h>
28#include <asm/nmi.h>
29
30#ifdef CONFIG_X86_64
31static DEFINE_PER_CPU(unsigned char, is_idle);
32static ATOMIC_NOTIFIER_HEAD(idle_notifier);
33
34void idle_notifier_register(struct notifier_block *n)
35{
36 atomic_notifier_chain_register(&idle_notifier, n);
37}
38EXPORT_SYMBOL_GPL(idle_notifier_register);
39
40void idle_notifier_unregister(struct notifier_block *n)
41{
42 atomic_notifier_chain_unregister(&idle_notifier, n);
43}
44EXPORT_SYMBOL_GPL(idle_notifier_unregister);
45#endif
25 46
26struct kmem_cache *task_xstate_cachep; 47struct kmem_cache *task_xstate_cachep;
27EXPORT_SYMBOL_GPL(task_xstate_cachep); 48EXPORT_SYMBOL_GPL(task_xstate_cachep);
@@ -370,6 +391,99 @@ static inline int hlt_use_halt(void)
370} 391}
371#endif 392#endif
372 393
394#ifndef CONFIG_SMP
395static inline void play_dead(void)
396{
397 BUG();
398}
399#endif
400
401#ifdef CONFIG_X86_64
402void enter_idle(void)
403{
404 percpu_write(is_idle, 1);
405 atomic_notifier_call_chain(&idle_notifier, IDLE_START, NULL);
406}
407
408static void __exit_idle(void)
409{
410 if (x86_test_and_clear_bit_percpu(0, is_idle) == 0)
411 return;
412 atomic_notifier_call_chain(&idle_notifier, IDLE_END, NULL);
413}
414
415/* Called from interrupts to signify idle end */
416void exit_idle(void)
417{
418 /* idle loop has pid 0 */
419 if (current->pid)
420 return;
421 __exit_idle();
422}
423#endif
424
425/*
426 * The idle thread. There's no useful work to be
427 * done, so just try to conserve power and have a
428 * low exit latency (ie sit in a loop waiting for
429 * somebody to say that they'd like to reschedule)
430 */
431void cpu_idle(void)
432{
433 /*
434 * If we're the non-boot CPU, nothing set the stack canary up
435 * for us. CPU0 already has it initialized but no harm in
436 * doing it again. This is a good place for updating it, as
437 * we wont ever return from this function (so the invalid
438 * canaries already on the stack wont ever trigger).
439 */
440 boot_init_stack_canary();
441 current_thread_info()->status |= TS_POLLING;
442
443 while (1) {
444 tick_nohz_idle_enter();
445
446 while (!need_resched()) {
447 rmb();
448
449 if (cpu_is_offline(smp_processor_id()))
450 play_dead();
451
452 /*
453 * Idle routines should keep interrupts disabled
454 * from here on, until they go to idle.
455 * Otherwise, idle callbacks can misfire.
456 */
457 local_touch_nmi();
458 local_irq_disable();
459
460 enter_idle();
461
462 /* Don't trace irqs off for idle */
463 stop_critical_timings();
464
465 /* enter_idle() needs rcu for notifiers */
466 rcu_idle_enter();
467
468 if (cpuidle_idle_call())
469 pm_idle();
470
471 rcu_idle_exit();
472 start_critical_timings();
473
474 /* In many cases the interrupt that ended idle
475 has already called exit_idle. But some idle
476 loops can be woken up without interrupt. */
477 __exit_idle();
478 }
479
480 tick_nohz_idle_exit();
481 preempt_enable_no_resched();
482 schedule();
483 preempt_disable();
484 }
485}
486
373/* 487/*
374 * We use this if we don't have any better 488 * We use this if we don't have any better
375 * idle routine.. 489 * idle routine..
diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
index aae4f4bbbe88..ae6847303e26 100644
--- a/arch/x86/kernel/process_32.c
+++ b/arch/x86/kernel/process_32.c
@@ -9,7 +9,6 @@
9 * This file handles the architecture-dependent parts of process handling.. 9 * This file handles the architecture-dependent parts of process handling..
10 */ 10 */
11 11
12#include <linux/stackprotector.h>
13#include <linux/cpu.h> 12#include <linux/cpu.h>
14#include <linux/errno.h> 13#include <linux/errno.h>
15#include <linux/sched.h> 14#include <linux/sched.h>
@@ -31,14 +30,12 @@
31#include <linux/kallsyms.h> 30#include <linux/kallsyms.h>
32#include <linux/ptrace.h> 31#include <linux/ptrace.h>
33#include <linux/personality.h> 32#include <linux/personality.h>
34#include <linux/tick.h>
35#include <linux/percpu.h> 33#include <linux/percpu.h>
36#include <linux/prctl.h> 34#include <linux/prctl.h>
37#include <linux/ftrace.h> 35#include <linux/ftrace.h>
38#include <linux/uaccess.h> 36#include <linux/uaccess.h>
39#include <linux/io.h> 37#include <linux/io.h>
40#include <linux/kdebug.h> 38#include <linux/kdebug.h>
41#include <linux/cpuidle.h>
42 39
43#include <asm/pgtable.h> 40#include <asm/pgtable.h>
44#include <asm/ldt.h> 41#include <asm/ldt.h>
@@ -57,7 +54,6 @@
57#include <asm/idle.h> 54#include <asm/idle.h>
58#include <asm/syscalls.h> 55#include <asm/syscalls.h>
59#include <asm/debugreg.h> 56#include <asm/debugreg.h>
60#include <asm/nmi.h>
61#include <asm/switch_to.h> 57#include <asm/switch_to.h>
62 58
63asmlinkage void ret_from_fork(void) __asm__("ret_from_fork"); 59asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
@@ -70,60 +66,6 @@ unsigned long thread_saved_pc(struct task_struct *tsk)
70 return ((unsigned long *)tsk->thread.sp)[3]; 66 return ((unsigned long *)tsk->thread.sp)[3];
71} 67}
72 68
73#ifndef CONFIG_SMP
74static inline void play_dead(void)
75{
76 BUG();
77}
78#endif
79
80/*
81 * The idle thread. There's no useful work to be
82 * done, so just try to conserve power and have a
83 * low exit latency (ie sit in a loop waiting for
84 * somebody to say that they'd like to reschedule)
85 */
86void cpu_idle(void)
87{
88 int cpu = smp_processor_id();
89
90 /*
91 * If we're the non-boot CPU, nothing set the stack canary up
92 * for us. CPU0 already has it initialized but no harm in
93 * doing it again. This is a good place for updating it, as
94 * we wont ever return from this function (so the invalid
95 * canaries already on the stack wont ever trigger).
96 */
97 boot_init_stack_canary();
98
99 current_thread_info()->status |= TS_POLLING;
100
101 /* endless idle loop with no priority at all */
102 while (1) {
103 tick_nohz_idle_enter();
104 rcu_idle_enter();
105 while (!need_resched()) {
106
107 check_pgt_cache();
108 rmb();
109
110 if (cpu_is_offline(cpu))
111 play_dead();
112
113 local_touch_nmi();
114 local_irq_disable();
115 /* Don't trace irqs off for idle */
116 stop_critical_timings();
117 if (cpuidle_idle_call())
118 pm_idle();
119 start_critical_timings();
120 }
121 rcu_idle_exit();
122 tick_nohz_idle_exit();
123 schedule_preempt_disabled();
124 }
125}
126
127void __show_regs(struct pt_regs *regs, int all) 69void __show_regs(struct pt_regs *regs, int all)
128{ 70{
129 unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L; 71 unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L;
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index 61270e8d428a..733ca39f367e 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -14,7 +14,6 @@
14 * This file handles the architecture-dependent parts of process handling.. 14 * This file handles the architecture-dependent parts of process handling..
15 */ 15 */
16 16
17#include <linux/stackprotector.h>
18#include <linux/cpu.h> 17#include <linux/cpu.h>
19#include <linux/errno.h> 18#include <linux/errno.h>
20#include <linux/sched.h> 19#include <linux/sched.h>
@@ -32,12 +31,10 @@
32#include <linux/notifier.h> 31#include <linux/notifier.h>
33#include <linux/kprobes.h> 32#include <linux/kprobes.h>
34#include <linux/kdebug.h> 33#include <linux/kdebug.h>
35#include <linux/tick.h>
36#include <linux/prctl.h> 34#include <linux/prctl.h>
37#include <linux/uaccess.h> 35#include <linux/uaccess.h>
38#include <linux/io.h> 36#include <linux/io.h>
39#include <linux/ftrace.h> 37#include <linux/ftrace.h>
40#include <linux/cpuidle.h>
41 38
42#include <asm/pgtable.h> 39#include <asm/pgtable.h>
43#include <asm/processor.h> 40#include <asm/processor.h>
@@ -51,115 +48,11 @@
51#include <asm/idle.h> 48#include <asm/idle.h>
52#include <asm/syscalls.h> 49#include <asm/syscalls.h>
53#include <asm/debugreg.h> 50#include <asm/debugreg.h>
54#include <asm/nmi.h>
55#include <asm/switch_to.h> 51#include <asm/switch_to.h>
56 52
57asmlinkage extern void ret_from_fork(void); 53asmlinkage extern void ret_from_fork(void);
58 54
59DEFINE_PER_CPU(unsigned long, old_rsp); 55DEFINE_PER_CPU(unsigned long, old_rsp);
60static DEFINE_PER_CPU(unsigned char, is_idle);
61
62static ATOMIC_NOTIFIER_HEAD(idle_notifier);
63
64void idle_notifier_register(struct notifier_block *n)
65{
66 atomic_notifier_chain_register(&idle_notifier, n);
67}
68EXPORT_SYMBOL_GPL(idle_notifier_register);
69
70void idle_notifier_unregister(struct notifier_block *n)
71{
72 atomic_notifier_chain_unregister(&idle_notifier, n);
73}
74EXPORT_SYMBOL_GPL(idle_notifier_unregister);
75
76void enter_idle(void)
77{
78 percpu_write(is_idle, 1);
79 atomic_notifier_call_chain(&idle_notifier, IDLE_START, NULL);
80}
81
82static void __exit_idle(void)
83{
84 if (x86_test_and_clear_bit_percpu(0, is_idle) == 0)
85 return;
86 atomic_notifier_call_chain(&idle_notifier, IDLE_END, NULL);
87}
88
89/* Called from interrupts to signify idle end */
90void exit_idle(void)
91{
92 /* idle loop has pid 0 */
93 if (current->pid)
94 return;
95 __exit_idle();
96}
97
98#ifndef CONFIG_SMP
99static inline void play_dead(void)
100{
101 BUG();
102}
103#endif
104
105/*
106 * The idle thread. There's no useful work to be
107 * done, so just try to conserve power and have a
108 * low exit latency (ie sit in a loop waiting for
109 * somebody to say that they'd like to reschedule)
110 */
111void cpu_idle(void)
112{
113 current_thread_info()->status |= TS_POLLING;
114
115 /*
116 * If we're the non-boot CPU, nothing set the stack canary up
117 * for us. CPU0 already has it initialized but no harm in
118 * doing it again. This is a good place for updating it, as
119 * we wont ever return from this function (so the invalid
120 * canaries already on the stack wont ever trigger).
121 */
122 boot_init_stack_canary();
123
124 /* endless idle loop with no priority at all */
125 while (1) {
126 tick_nohz_idle_enter();
127 while (!need_resched()) {
128
129 rmb();
130
131 if (cpu_is_offline(smp_processor_id()))
132 play_dead();
133 /*
134 * Idle routines should keep interrupts disabled
135 * from here on, until they go to idle.
136 * Otherwise, idle callbacks can misfire.
137 */
138 local_touch_nmi();
139 local_irq_disable();
140 enter_idle();
141 /* Don't trace irqs off for idle */
142 stop_critical_timings();
143
144 /* enter_idle() needs rcu for notifiers */
145 rcu_idle_enter();
146
147 if (cpuidle_idle_call())
148 pm_idle();
149
150 rcu_idle_exit();
151 start_critical_timings();
152
153 /* In many cases the interrupt that ended idle
154 has already called exit_idle. But some idle
155 loops can be woken up without interrupt. */
156 __exit_idle();
157 }
158
159 tick_nohz_idle_exit();
160 schedule_preempt_disabled();
161 }
162}
163 56
164/* Prints also some state that isn't saved in the pt_regs */ 57/* Prints also some state that isn't saved in the pt_regs */
165void __show_regs(struct pt_regs *regs, int all) 58void __show_regs(struct pt_regs *regs, int all)
@@ -365,7 +258,9 @@ start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp)
365void start_thread_ia32(struct pt_regs *regs, u32 new_ip, u32 new_sp) 258void start_thread_ia32(struct pt_regs *regs, u32 new_ip, u32 new_sp)
366{ 259{
367 start_thread_common(regs, new_ip, new_sp, 260 start_thread_common(regs, new_ip, new_sp,
368 __USER32_CS, __USER32_DS, __USER32_DS); 261 test_thread_flag(TIF_X32)
262 ? __USER_CS : __USER32_CS,
263 __USER_DS, __USER_DS);
369} 264}
370#endif 265#endif
371 266
@@ -488,6 +383,8 @@ void set_personality_64bit(void)
488 383
489 /* Make sure to be in 64bit mode */ 384 /* Make sure to be in 64bit mode */
490 clear_thread_flag(TIF_IA32); 385 clear_thread_flag(TIF_IA32);
386 clear_thread_flag(TIF_ADDR32);
387 clear_thread_flag(TIF_X32);
491 388
492 /* Ensure the corresponding mm is not marked. */ 389 /* Ensure the corresponding mm is not marked. */
493 if (current->mm) 390 if (current->mm)
@@ -500,20 +397,31 @@ void set_personality_64bit(void)
500 current->personality &= ~READ_IMPLIES_EXEC; 397 current->personality &= ~READ_IMPLIES_EXEC;
501} 398}
502 399
503void set_personality_ia32(void) 400void set_personality_ia32(bool x32)
504{ 401{
505 /* inherit personality from parent */ 402 /* inherit personality from parent */
506 403
507 /* Make sure to be in 32bit mode */ 404 /* Make sure to be in 32bit mode */
508 set_thread_flag(TIF_IA32); 405 set_thread_flag(TIF_ADDR32);
509 current->personality |= force_personality32;
510 406
511 /* Mark the associated mm as containing 32-bit tasks. */ 407 /* Mark the associated mm as containing 32-bit tasks. */
512 if (current->mm) 408 if (current->mm)
513 current->mm->context.ia32_compat = 1; 409 current->mm->context.ia32_compat = 1;
514 410
515 /* Prepare the first "return" to user space */ 411 if (x32) {
516 current_thread_info()->status |= TS_COMPAT; 412 clear_thread_flag(TIF_IA32);
413 set_thread_flag(TIF_X32);
414 current->personality &= ~READ_IMPLIES_EXEC;
415 /* is_compat_task() uses the presence of the x32
416 syscall bit flag to determine compat status */
417 current_thread_info()->status &= ~TS_COMPAT;
418 } else {
419 set_thread_flag(TIF_IA32);
420 clear_thread_flag(TIF_X32);
421 current->personality |= force_personality32;
422 /* Prepare the first "return" to user space */
423 current_thread_info()->status |= TS_COMPAT;
424 }
517} 425}
518 426
519unsigned long get_wchan(struct task_struct *p) 427unsigned long get_wchan(struct task_struct *p)
diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
index 8a634c887652..685845cf16e0 100644
--- a/arch/x86/kernel/ptrace.c
+++ b/arch/x86/kernel/ptrace.c
@@ -33,6 +33,7 @@
33#include <asm/prctl.h> 33#include <asm/prctl.h>
34#include <asm/proto.h> 34#include <asm/proto.h>
35#include <asm/hw_breakpoint.h> 35#include <asm/hw_breakpoint.h>
36#include <asm/traps.h>
36 37
37#include "tls.h" 38#include "tls.h"
38 39
@@ -1130,6 +1131,100 @@ static int genregs32_set(struct task_struct *target,
1130 return ret; 1131 return ret;
1131} 1132}
1132 1133
1134#ifdef CONFIG_X86_X32_ABI
1135static long x32_arch_ptrace(struct task_struct *child,
1136 compat_long_t request, compat_ulong_t caddr,
1137 compat_ulong_t cdata)
1138{
1139 unsigned long addr = caddr;
1140 unsigned long data = cdata;
1141 void __user *datap = compat_ptr(data);
1142 int ret;
1143
1144 switch (request) {
1145 /* Read 32bits at location addr in the USER area. Only allow
1146 to return the lower 32bits of segment and debug registers. */
1147 case PTRACE_PEEKUSR: {
1148 u32 tmp;
1149
1150 ret = -EIO;
1151 if ((addr & (sizeof(data) - 1)) || addr >= sizeof(struct user) ||
1152 addr < offsetof(struct user_regs_struct, cs))
1153 break;
1154
1155 tmp = 0; /* Default return condition */
1156 if (addr < sizeof(struct user_regs_struct))
1157 tmp = getreg(child, addr);
1158 else if (addr >= offsetof(struct user, u_debugreg[0]) &&
1159 addr <= offsetof(struct user, u_debugreg[7])) {
1160 addr -= offsetof(struct user, u_debugreg[0]);
1161 tmp = ptrace_get_debugreg(child, addr / sizeof(data));
1162 }
1163 ret = put_user(tmp, (__u32 __user *)datap);
1164 break;
1165 }
1166
1167 /* Write the word at location addr in the USER area. Only allow
1168 to update segment and debug registers with the upper 32bits
1169 zero-extended. */
1170 case PTRACE_POKEUSR:
1171 ret = -EIO;
1172 if ((addr & (sizeof(data) - 1)) || addr >= sizeof(struct user) ||
1173 addr < offsetof(struct user_regs_struct, cs))
1174 break;
1175
1176 if (addr < sizeof(struct user_regs_struct))
1177 ret = putreg(child, addr, data);
1178 else if (addr >= offsetof(struct user, u_debugreg[0]) &&
1179 addr <= offsetof(struct user, u_debugreg[7])) {
1180 addr -= offsetof(struct user, u_debugreg[0]);
1181 ret = ptrace_set_debugreg(child,
1182 addr / sizeof(data), data);
1183 }
1184 break;
1185
1186 case PTRACE_GETREGS: /* Get all gp regs from the child. */
1187 return copy_regset_to_user(child,
1188 task_user_regset_view(current),
1189 REGSET_GENERAL,
1190 0, sizeof(struct user_regs_struct),
1191 datap);
1192
1193 case PTRACE_SETREGS: /* Set all gp regs in the child. */
1194 return copy_regset_from_user(child,
1195 task_user_regset_view(current),
1196 REGSET_GENERAL,
1197 0, sizeof(struct user_regs_struct),
1198 datap);
1199
1200 case PTRACE_GETFPREGS: /* Get the child FPU state. */
1201 return copy_regset_to_user(child,
1202 task_user_regset_view(current),
1203 REGSET_FP,
1204 0, sizeof(struct user_i387_struct),
1205 datap);
1206
1207 case PTRACE_SETFPREGS: /* Set the child FPU state. */
1208 return copy_regset_from_user(child,
1209 task_user_regset_view(current),
1210 REGSET_FP,
1211 0, sizeof(struct user_i387_struct),
1212 datap);
1213
1214 /* normal 64bit interface to access TLS data.
1215 Works just like arch_prctl, except that the arguments
1216 are reversed. */
1217 case PTRACE_ARCH_PRCTL:
1218 return do_arch_prctl(child, data, addr);
1219
1220 default:
1221 return compat_ptrace_request(child, request, addr, data);
1222 }
1223
1224 return ret;
1225}
1226#endif
1227
1133long compat_arch_ptrace(struct task_struct *child, compat_long_t request, 1228long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
1134 compat_ulong_t caddr, compat_ulong_t cdata) 1229 compat_ulong_t caddr, compat_ulong_t cdata)
1135{ 1230{
@@ -1139,6 +1234,11 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
1139 int ret; 1234 int ret;
1140 __u32 val; 1235 __u32 val;
1141 1236
1237#ifdef CONFIG_X86_X32_ABI
1238 if (!is_ia32_task())
1239 return x32_arch_ptrace(child, request, caddr, cdata);
1240#endif
1241
1142 switch (request) { 1242 switch (request) {
1143 case PTRACE_PEEKUSR: 1243 case PTRACE_PEEKUSR:
1144 ret = getreg32(child, addr, &val); 1244 ret = getreg32(child, addr, &val);
@@ -1326,7 +1426,7 @@ static void fill_sigtrap_info(struct task_struct *tsk,
1326 int error_code, int si_code, 1426 int error_code, int si_code,
1327 struct siginfo *info) 1427 struct siginfo *info)
1328{ 1428{
1329 tsk->thread.trap_no = 1; 1429 tsk->thread.trap_nr = X86_TRAP_DB;
1330 tsk->thread.error_code = error_code; 1430 tsk->thread.error_code = error_code;
1331 1431
1332 memset(info, 0, sizeof(*info)); 1432 memset(info, 0, sizeof(*info));
diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
index 25edcfc9ba5b..115eac431483 100644
--- a/arch/x86/kernel/signal.c
+++ b/arch/x86/kernel/signal.c
@@ -10,10 +10,8 @@
10#include <linux/mm.h> 10#include <linux/mm.h>
11#include <linux/smp.h> 11#include <linux/smp.h>
12#include <linux/kernel.h> 12#include <linux/kernel.h>
13#include <linux/signal.h>
14#include <linux/errno.h> 13#include <linux/errno.h>
15#include <linux/wait.h> 14#include <linux/wait.h>
16#include <linux/ptrace.h>
17#include <linux/tracehook.h> 15#include <linux/tracehook.h>
18#include <linux/unistd.h> 16#include <linux/unistd.h>
19#include <linux/stddef.h> 17#include <linux/stddef.h>
@@ -27,10 +25,12 @@
27#include <asm/fpu-internal.h> 25#include <asm/fpu-internal.h>
28#include <asm/vdso.h> 26#include <asm/vdso.h>
29#include <asm/mce.h> 27#include <asm/mce.h>
28#include <asm/sighandling.h>
30 29
31#ifdef CONFIG_X86_64 30#ifdef CONFIG_X86_64
32#include <asm/proto.h> 31#include <asm/proto.h>
33#include <asm/ia32_unistd.h> 32#include <asm/ia32_unistd.h>
33#include <asm/sys_ia32.h>
34#endif /* CONFIG_X86_64 */ 34#endif /* CONFIG_X86_64 */
35 35
36#include <asm/syscall.h> 36#include <asm/syscall.h>
@@ -38,13 +38,6 @@
38 38
39#include <asm/sigframe.h> 39#include <asm/sigframe.h>
40 40
41#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
42
43#define __FIX_EFLAGS (X86_EFLAGS_AC | X86_EFLAGS_OF | \
44 X86_EFLAGS_DF | X86_EFLAGS_TF | X86_EFLAGS_SF | \
45 X86_EFLAGS_ZF | X86_EFLAGS_AF | X86_EFLAGS_PF | \
46 X86_EFLAGS_CF)
47
48#ifdef CONFIG_X86_32 41#ifdef CONFIG_X86_32
49# define FIX_EFLAGS (__FIX_EFLAGS | X86_EFLAGS_RF) 42# define FIX_EFLAGS (__FIX_EFLAGS | X86_EFLAGS_RF)
50#else 43#else
@@ -69,9 +62,8 @@
69 regs->seg = GET_SEG(seg) | 3; \ 62 regs->seg = GET_SEG(seg) | 3; \
70} while (0) 63} while (0)
71 64
72static int 65int restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc,
73restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc, 66 unsigned long *pax)
74 unsigned long *pax)
75{ 67{
76 void __user *buf; 68 void __user *buf;
77 unsigned int tmpflags; 69 unsigned int tmpflags;
@@ -126,9 +118,8 @@ restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc,
126 return err; 118 return err;
127} 119}
128 120
129static int 121int setup_sigcontext(struct sigcontext __user *sc, void __user *fpstate,
130setup_sigcontext(struct sigcontext __user *sc, void __user *fpstate, 122 struct pt_regs *regs, unsigned long mask)
131 struct pt_regs *regs, unsigned long mask)
132{ 123{
133 int err = 0; 124 int err = 0;
134 125
@@ -160,7 +151,7 @@ setup_sigcontext(struct sigcontext __user *sc, void __user *fpstate,
160 put_user_ex(regs->r15, &sc->r15); 151 put_user_ex(regs->r15, &sc->r15);
161#endif /* CONFIG_X86_64 */ 152#endif /* CONFIG_X86_64 */
162 153
163 put_user_ex(current->thread.trap_no, &sc->trapno); 154 put_user_ex(current->thread.trap_nr, &sc->trapno);
164 put_user_ex(current->thread.error_code, &sc->err); 155 put_user_ex(current->thread.error_code, &sc->err);
165 put_user_ex(regs->ip, &sc->ip); 156 put_user_ex(regs->ip, &sc->ip);
166#ifdef CONFIG_X86_32 157#ifdef CONFIG_X86_32
@@ -643,6 +634,16 @@ static int signr_convert(int sig)
643#define is_ia32 0 634#define is_ia32 0
644#endif /* CONFIG_IA32_EMULATION */ 635#endif /* CONFIG_IA32_EMULATION */
645 636
637#ifdef CONFIG_X86_X32_ABI
638#define is_x32 test_thread_flag(TIF_X32)
639
640static int x32_setup_rt_frame(int sig, struct k_sigaction *ka,
641 siginfo_t *info, compat_sigset_t *set,
642 struct pt_regs *regs);
643#else /* !CONFIG_X86_X32_ABI */
644#define is_x32 0
645#endif /* CONFIG_X86_X32_ABI */
646
646int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, 647int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
647 sigset_t *set, struct pt_regs *regs); 648 sigset_t *set, struct pt_regs *regs);
648int ia32_setup_frame(int sig, struct k_sigaction *ka, 649int ia32_setup_frame(int sig, struct k_sigaction *ka,
@@ -667,8 +668,14 @@ setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
667 ret = ia32_setup_rt_frame(usig, ka, info, set, regs); 668 ret = ia32_setup_rt_frame(usig, ka, info, set, regs);
668 else 669 else
669 ret = ia32_setup_frame(usig, ka, set, regs); 670 ret = ia32_setup_frame(usig, ka, set, regs);
670 } else 671#ifdef CONFIG_X86_X32_ABI
672 } else if (is_x32) {
673 ret = x32_setup_rt_frame(usig, ka, info,
674 (compat_sigset_t *)set, regs);
675#endif
676 } else {
671 ret = __setup_rt_frame(sig, ka, info, set, regs); 677 ret = __setup_rt_frame(sig, ka, info, set, regs);
678 }
672 679
673 if (ret) { 680 if (ret) {
674 force_sigsegv(sig, current); 681 force_sigsegv(sig, current);
@@ -851,3 +858,102 @@ void signal_fault(struct pt_regs *regs, void __user *frame, char *where)
851 858
852 force_sig(SIGSEGV, me); 859 force_sig(SIGSEGV, me);
853} 860}
861
862#ifdef CONFIG_X86_X32_ABI
863static int x32_setup_rt_frame(int sig, struct k_sigaction *ka,
864 siginfo_t *info, compat_sigset_t *set,
865 struct pt_regs *regs)
866{
867 struct rt_sigframe_x32 __user *frame;
868 void __user *restorer;
869 int err = 0;
870 void __user *fpstate = NULL;
871
872 frame = get_sigframe(ka, regs, sizeof(*frame), &fpstate);
873
874 if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
875 return -EFAULT;
876
877 if (ka->sa.sa_flags & SA_SIGINFO) {
878 if (copy_siginfo_to_user32(&frame->info, info))
879 return -EFAULT;
880 }
881
882 put_user_try {
883 /* Create the ucontext. */
884 if (cpu_has_xsave)
885 put_user_ex(UC_FP_XSTATE, &frame->uc.uc_flags);
886 else
887 put_user_ex(0, &frame->uc.uc_flags);
888 put_user_ex(0, &frame->uc.uc_link);
889 put_user_ex(current->sas_ss_sp, &frame->uc.uc_stack.ss_sp);
890 put_user_ex(sas_ss_flags(regs->sp),
891 &frame->uc.uc_stack.ss_flags);
892 put_user_ex(current->sas_ss_size, &frame->uc.uc_stack.ss_size);
893 put_user_ex(0, &frame->uc.uc__pad0);
894 err |= setup_sigcontext(&frame->uc.uc_mcontext, fpstate,
895 regs, set->sig[0]);
896 err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
897
898 if (ka->sa.sa_flags & SA_RESTORER) {
899 restorer = ka->sa.sa_restorer;
900 } else {
901 /* could use a vstub here */
902 restorer = NULL;
903 err |= -EFAULT;
904 }
905 put_user_ex(restorer, &frame->pretcode);
906 } put_user_catch(err);
907
908 if (err)
909 return -EFAULT;
910
911 /* Set up registers for signal handler */
912 regs->sp = (unsigned long) frame;
913 regs->ip = (unsigned long) ka->sa.sa_handler;
914
915 /* We use the x32 calling convention here... */
916 regs->di = sig;
917 regs->si = (unsigned long) &frame->info;
918 regs->dx = (unsigned long) &frame->uc;
919
920 loadsegment(ds, __USER_DS);
921 loadsegment(es, __USER_DS);
922
923 regs->cs = __USER_CS;
924 regs->ss = __USER_DS;
925
926 return 0;
927}
928
929asmlinkage long sys32_x32_rt_sigreturn(struct pt_regs *regs)
930{
931 struct rt_sigframe_x32 __user *frame;
932 sigset_t set;
933 unsigned long ax;
934 struct pt_regs tregs;
935
936 frame = (struct rt_sigframe_x32 __user *)(regs->sp - 8);
937
938 if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
939 goto badframe;
940 if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
941 goto badframe;
942
943 sigdelsetmask(&set, ~_BLOCKABLE);
944 set_current_blocked(&set);
945
946 if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &ax))
947 goto badframe;
948
949 tregs = *regs;
950 if (sys32_sigaltstack(&frame->uc.uc_stack, NULL, &tregs) == -EFAULT)
951 goto badframe;
952
953 return ax;
954
955badframe:
956 signal_fault(regs, frame, "x32 rt_sigreturn");
957 return 0;
958}
959#endif
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 5104a2b685cf..6e1e406038c2 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -50,6 +50,7 @@
50#include <linux/tboot.h> 50#include <linux/tboot.h>
51#include <linux/stackprotector.h> 51#include <linux/stackprotector.h>
52#include <linux/gfp.h> 52#include <linux/gfp.h>
53#include <linux/cpuidle.h>
53 54
54#include <asm/acpi.h> 55#include <asm/acpi.h>
55#include <asm/desc.h> 56#include <asm/desc.h>
@@ -219,14 +220,9 @@ static void __cpuinit smp_callin(void)
219 * Update loops_per_jiffy in cpu_data. Previous call to 220 * Update loops_per_jiffy in cpu_data. Previous call to
220 * smp_store_cpu_info() stored a value that is close but not as 221 * smp_store_cpu_info() stored a value that is close but not as
221 * accurate as the value just calculated. 222 * accurate as the value just calculated.
222 *
223 * Need to enable IRQs because it can take longer and then
224 * the NMI watchdog might kill us.
225 */ 223 */
226 local_irq_enable();
227 calibrate_delay(); 224 calibrate_delay();
228 cpu_data(cpuid).loops_per_jiffy = loops_per_jiffy; 225 cpu_data(cpuid).loops_per_jiffy = loops_per_jiffy;
229 local_irq_disable();
230 pr_debug("Stack at about %p\n", &cpuid); 226 pr_debug("Stack at about %p\n", &cpuid);
231 227
232 /* 228 /*
@@ -1409,7 +1405,8 @@ void native_play_dead(void)
1409 tboot_shutdown(TB_SHUTDOWN_WFS); 1405 tboot_shutdown(TB_SHUTDOWN_WFS);
1410 1406
1411 mwait_play_dead(); /* Only returns on failure */ 1407 mwait_play_dead(); /* Only returns on failure */
1412 hlt_play_dead(); 1408 if (cpuidle_play_dead())
1409 hlt_play_dead();
1413} 1410}
1414 1411
1415#else /* ... !CONFIG_HOTPLUG_CPU */ 1412#else /* ... !CONFIG_HOTPLUG_CPU */
diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c
index ef59642ff1bf..b4d3c3927dd8 100644
--- a/arch/x86/kernel/sys_x86_64.c
+++ b/arch/x86/kernel/sys_x86_64.c
@@ -98,7 +98,7 @@ out:
98static void find_start_end(unsigned long flags, unsigned long *begin, 98static void find_start_end(unsigned long flags, unsigned long *begin,
99 unsigned long *end) 99 unsigned long *end)
100{ 100{
101 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT)) { 101 if (!test_thread_flag(TIF_ADDR32) && (flags & MAP_32BIT)) {
102 unsigned long new_begin; 102 unsigned long new_begin;
103 /* This is usually used needed to map code in small 103 /* This is usually used needed to map code in small
104 model, so it needs to be in the first 31bit. Limit 104 model, so it needs to be in the first 31bit. Limit
@@ -144,7 +144,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
144 (!vma || addr + len <= vma->vm_start)) 144 (!vma || addr + len <= vma->vm_start))
145 return addr; 145 return addr;
146 } 146 }
147 if (((flags & MAP_32BIT) || test_thread_flag(TIF_IA32)) 147 if (((flags & MAP_32BIT) || test_thread_flag(TIF_ADDR32))
148 && len <= mm->cached_hole_size) { 148 && len <= mm->cached_hole_size) {
149 mm->cached_hole_size = 0; 149 mm->cached_hole_size = 0;
150 mm->free_area_cache = begin; 150 mm->free_area_cache = begin;
@@ -205,7 +205,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
205 return addr; 205 return addr;
206 206
207 /* for MAP_32BIT mappings we force the legact mmap base */ 207 /* for MAP_32BIT mappings we force the legact mmap base */
208 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT)) 208 if (!test_thread_flag(TIF_ADDR32) && (flags & MAP_32BIT))
209 goto bottomup; 209 goto bottomup;
210 210
211 /* requesting a specific address */ 211 /* requesting a specific address */
diff --git a/arch/x86/kernel/syscall_64.c b/arch/x86/kernel/syscall_64.c
index 7ac7943be02c..5c7f8c20da74 100644
--- a/arch/x86/kernel/syscall_64.c
+++ b/arch/x86/kernel/syscall_64.c
@@ -5,6 +5,14 @@
5#include <linux/cache.h> 5#include <linux/cache.h>
6#include <asm/asm-offsets.h> 6#include <asm/asm-offsets.h>
7 7
8#define __SYSCALL_COMMON(nr, sym, compat) __SYSCALL_64(nr, sym, compat)
9
10#ifdef CONFIG_X86_X32_ABI
11# define __SYSCALL_X32(nr, sym, compat) __SYSCALL_64(nr, sym, compat)
12#else
13# define __SYSCALL_X32(nr, sym, compat) /* nothing */
14#endif
15
8#define __SYSCALL_64(nr, sym, compat) extern asmlinkage void sym(void) ; 16#define __SYSCALL_64(nr, sym, compat) extern asmlinkage void sym(void) ;
9#include <asm/syscalls_64.h> 17#include <asm/syscalls_64.h>
10#undef __SYSCALL_64 18#undef __SYSCALL_64
diff --git a/arch/x86/kernel/tboot.c b/arch/x86/kernel/tboot.c
index e2410e27f97e..6410744ac5cb 100644
--- a/arch/x86/kernel/tboot.c
+++ b/arch/x86/kernel/tboot.c
@@ -272,7 +272,7 @@ static void tboot_copy_fadt(const struct acpi_table_fadt *fadt)
272 offsetof(struct acpi_table_facs, firmware_waking_vector); 272 offsetof(struct acpi_table_facs, firmware_waking_vector);
273} 273}
274 274
275void tboot_sleep(u8 sleep_state, u32 pm1a_control, u32 pm1b_control) 275static int tboot_sleep(u8 sleep_state, u32 pm1a_control, u32 pm1b_control)
276{ 276{
277 static u32 acpi_shutdown_map[ACPI_S_STATE_COUNT] = { 277 static u32 acpi_shutdown_map[ACPI_S_STATE_COUNT] = {
278 /* S0,1,2: */ -1, -1, -1, 278 /* S0,1,2: */ -1, -1, -1,
@@ -281,7 +281,7 @@ void tboot_sleep(u8 sleep_state, u32 pm1a_control, u32 pm1b_control)
281 /* S5: */ TB_SHUTDOWN_S5 }; 281 /* S5: */ TB_SHUTDOWN_S5 };
282 282
283 if (!tboot_enabled()) 283 if (!tboot_enabled())
284 return; 284 return 0;
285 285
286 tboot_copy_fadt(&acpi_gbl_FADT); 286 tboot_copy_fadt(&acpi_gbl_FADT);
287 tboot->acpi_sinfo.pm1a_cnt_val = pm1a_control; 287 tboot->acpi_sinfo.pm1a_cnt_val = pm1a_control;
@@ -292,10 +292,11 @@ void tboot_sleep(u8 sleep_state, u32 pm1a_control, u32 pm1b_control)
292 if (sleep_state >= ACPI_S_STATE_COUNT || 292 if (sleep_state >= ACPI_S_STATE_COUNT ||
293 acpi_shutdown_map[sleep_state] == -1) { 293 acpi_shutdown_map[sleep_state] == -1) {
294 pr_warning("unsupported sleep state 0x%x\n", sleep_state); 294 pr_warning("unsupported sleep state 0x%x\n", sleep_state);
295 return; 295 return -1;
296 } 296 }
297 297
298 tboot_shutdown(acpi_shutdown_map[sleep_state]); 298 tboot_shutdown(acpi_shutdown_map[sleep_state]);
299 return 0;
299} 300}
300 301
301static atomic_t ap_wfs_count; 302static atomic_t ap_wfs_count;
@@ -345,6 +346,8 @@ static __init int tboot_late_init(void)
345 346
346 atomic_set(&ap_wfs_count, 0); 347 atomic_set(&ap_wfs_count, 0);
347 register_hotcpu_notifier(&tboot_cpu_notifier); 348 register_hotcpu_notifier(&tboot_cpu_notifier);
349
350 acpi_os_set_prepare_sleep(&tboot_sleep);
348 return 0; 351 return 0;
349} 352}
350 353
diff --git a/arch/x86/kernel/tls.c b/arch/x86/kernel/tls.c
index 73920e4c6dc5..9d9d2f9e77a5 100644
--- a/arch/x86/kernel/tls.c
+++ b/arch/x86/kernel/tls.c
@@ -162,7 +162,7 @@ int regset_tls_get(struct task_struct *target, const struct user_regset *regset,
162{ 162{
163 const struct desc_struct *tls; 163 const struct desc_struct *tls;
164 164
165 if (pos > GDT_ENTRY_TLS_ENTRIES * sizeof(struct user_desc) || 165 if (pos >= GDT_ENTRY_TLS_ENTRIES * sizeof(struct user_desc) ||
166 (pos % sizeof(struct user_desc)) != 0 || 166 (pos % sizeof(struct user_desc)) != 0 ||
167 (count % sizeof(struct user_desc)) != 0) 167 (count % sizeof(struct user_desc)) != 0)
168 return -EINVAL; 168 return -EINVAL;
@@ -197,7 +197,7 @@ int regset_tls_set(struct task_struct *target, const struct user_regset *regset,
197 struct user_desc infobuf[GDT_ENTRY_TLS_ENTRIES]; 197 struct user_desc infobuf[GDT_ENTRY_TLS_ENTRIES];
198 const struct user_desc *info; 198 const struct user_desc *info;
199 199
200 if (pos > GDT_ENTRY_TLS_ENTRIES * sizeof(struct user_desc) || 200 if (pos >= GDT_ENTRY_TLS_ENTRIES * sizeof(struct user_desc) ||
201 (pos % sizeof(struct user_desc)) != 0 || 201 (pos % sizeof(struct user_desc)) != 0 ||
202 (count % sizeof(struct user_desc)) != 0) 202 (count % sizeof(struct user_desc)) != 0)
203 return -EINVAL; 203 return -EINVAL;
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
index 860f126ca233..ff9281f16029 100644
--- a/arch/x86/kernel/traps.c
+++ b/arch/x86/kernel/traps.c
@@ -119,7 +119,7 @@ do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
119 * traps 0, 1, 3, 4, and 5 should be forwarded to vm86. 119 * traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
120 * On nmi (interrupt 2), do_trap should not be called. 120 * On nmi (interrupt 2), do_trap should not be called.
121 */ 121 */
122 if (trapnr < 6) 122 if (trapnr < X86_TRAP_UD)
123 goto vm86_trap; 123 goto vm86_trap;
124 goto trap_signal; 124 goto trap_signal;
125 } 125 }
@@ -132,7 +132,7 @@ do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
132trap_signal: 132trap_signal:
133#endif 133#endif
134 /* 134 /*
135 * We want error_code and trap_no set for userspace faults and 135 * We want error_code and trap_nr set for userspace faults and
136 * kernelspace faults which result in die(), but not 136 * kernelspace faults which result in die(), but not
137 * kernelspace faults which are fixed up. die() gives the 137 * kernelspace faults which are fixed up. die() gives the
138 * process no chance to handle the signal and notice the 138 * process no chance to handle the signal and notice the
@@ -141,7 +141,7 @@ trap_signal:
141 * delivered, faults. See also do_general_protection below. 141 * delivered, faults. See also do_general_protection below.
142 */ 142 */
143 tsk->thread.error_code = error_code; 143 tsk->thread.error_code = error_code;
144 tsk->thread.trap_no = trapnr; 144 tsk->thread.trap_nr = trapnr;
145 145
146#ifdef CONFIG_X86_64 146#ifdef CONFIG_X86_64
147 if (show_unhandled_signals && unhandled_signal(tsk, signr) && 147 if (show_unhandled_signals && unhandled_signal(tsk, signr) &&
@@ -164,7 +164,7 @@ trap_signal:
164kernel_trap: 164kernel_trap:
165 if (!fixup_exception(regs)) { 165 if (!fixup_exception(regs)) {
166 tsk->thread.error_code = error_code; 166 tsk->thread.error_code = error_code;
167 tsk->thread.trap_no = trapnr; 167 tsk->thread.trap_nr = trapnr;
168 die(str, regs, error_code); 168 die(str, regs, error_code);
169 } 169 }
170 return; 170 return;
@@ -203,27 +203,31 @@ dotraplinkage void do_##name(struct pt_regs *regs, long error_code) \
203 do_trap(trapnr, signr, str, regs, error_code, &info); \ 203 do_trap(trapnr, signr, str, regs, error_code, &info); \
204} 204}
205 205
206DO_ERROR_INFO(0, SIGFPE, "divide error", divide_error, FPE_INTDIV, regs->ip) 206DO_ERROR_INFO(X86_TRAP_DE, SIGFPE, "divide error", divide_error, FPE_INTDIV,
207DO_ERROR(4, SIGSEGV, "overflow", overflow) 207 regs->ip)
208DO_ERROR(5, SIGSEGV, "bounds", bounds) 208DO_ERROR(X86_TRAP_OF, SIGSEGV, "overflow", overflow)
209DO_ERROR_INFO(6, SIGILL, "invalid opcode", invalid_op, ILL_ILLOPN, regs->ip) 209DO_ERROR(X86_TRAP_BR, SIGSEGV, "bounds", bounds)
210DO_ERROR(9, SIGFPE, "coprocessor segment overrun", coprocessor_segment_overrun) 210DO_ERROR_INFO(X86_TRAP_UD, SIGILL, "invalid opcode", invalid_op, ILL_ILLOPN,
211DO_ERROR(10, SIGSEGV, "invalid TSS", invalid_TSS) 211 regs->ip)
212DO_ERROR(11, SIGBUS, "segment not present", segment_not_present) 212DO_ERROR(X86_TRAP_OLD_MF, SIGFPE, "coprocessor segment overrun",
213 coprocessor_segment_overrun)
214DO_ERROR(X86_TRAP_TS, SIGSEGV, "invalid TSS", invalid_TSS)
215DO_ERROR(X86_TRAP_NP, SIGBUS, "segment not present", segment_not_present)
213#ifdef CONFIG_X86_32 216#ifdef CONFIG_X86_32
214DO_ERROR(12, SIGBUS, "stack segment", stack_segment) 217DO_ERROR(X86_TRAP_SS, SIGBUS, "stack segment", stack_segment)
215#endif 218#endif
216DO_ERROR_INFO(17, SIGBUS, "alignment check", alignment_check, BUS_ADRALN, 0) 219DO_ERROR_INFO(X86_TRAP_AC, SIGBUS, "alignment check", alignment_check,
220 BUS_ADRALN, 0)
217 221
218#ifdef CONFIG_X86_64 222#ifdef CONFIG_X86_64
219/* Runs on IST stack */ 223/* Runs on IST stack */
220dotraplinkage void do_stack_segment(struct pt_regs *regs, long error_code) 224dotraplinkage void do_stack_segment(struct pt_regs *regs, long error_code)
221{ 225{
222 if (notify_die(DIE_TRAP, "stack segment", regs, error_code, 226 if (notify_die(DIE_TRAP, "stack segment", regs, error_code,
223 12, SIGBUS) == NOTIFY_STOP) 227 X86_TRAP_SS, SIGBUS) == NOTIFY_STOP)
224 return; 228 return;
225 preempt_conditional_sti(regs); 229 preempt_conditional_sti(regs);
226 do_trap(12, SIGBUS, "stack segment", regs, error_code, NULL); 230 do_trap(X86_TRAP_SS, SIGBUS, "stack segment", regs, error_code, NULL);
227 preempt_conditional_cli(regs); 231 preempt_conditional_cli(regs);
228} 232}
229 233
@@ -233,10 +237,10 @@ dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code)
233 struct task_struct *tsk = current; 237 struct task_struct *tsk = current;
234 238
235 /* Return not checked because double check cannot be ignored */ 239 /* Return not checked because double check cannot be ignored */
236 notify_die(DIE_TRAP, str, regs, error_code, 8, SIGSEGV); 240 notify_die(DIE_TRAP, str, regs, error_code, X86_TRAP_DF, SIGSEGV);
237 241
238 tsk->thread.error_code = error_code; 242 tsk->thread.error_code = error_code;
239 tsk->thread.trap_no = 8; 243 tsk->thread.trap_nr = X86_TRAP_DF;
240 244
241 /* 245 /*
242 * This is always a kernel trap and never fixable (and thus must 246 * This is always a kernel trap and never fixable (and thus must
@@ -264,7 +268,7 @@ do_general_protection(struct pt_regs *regs, long error_code)
264 goto gp_in_kernel; 268 goto gp_in_kernel;
265 269
266 tsk->thread.error_code = error_code; 270 tsk->thread.error_code = error_code;
267 tsk->thread.trap_no = 13; 271 tsk->thread.trap_nr = X86_TRAP_GP;
268 272
269 if (show_unhandled_signals && unhandled_signal(tsk, SIGSEGV) && 273 if (show_unhandled_signals && unhandled_signal(tsk, SIGSEGV) &&
270 printk_ratelimit()) { 274 printk_ratelimit()) {
@@ -291,9 +295,9 @@ gp_in_kernel:
291 return; 295 return;
292 296
293 tsk->thread.error_code = error_code; 297 tsk->thread.error_code = error_code;
294 tsk->thread.trap_no = 13; 298 tsk->thread.trap_nr = X86_TRAP_GP;
295 if (notify_die(DIE_GPF, "general protection fault", regs, 299 if (notify_die(DIE_GPF, "general protection fault", regs, error_code,
296 error_code, 13, SIGSEGV) == NOTIFY_STOP) 300 X86_TRAP_GP, SIGSEGV) == NOTIFY_STOP)
297 return; 301 return;
298 die("general protection fault", regs, error_code); 302 die("general protection fault", regs, error_code);
299} 303}
@@ -302,13 +306,13 @@ gp_in_kernel:
302dotraplinkage void __kprobes do_int3(struct pt_regs *regs, long error_code) 306dotraplinkage void __kprobes do_int3(struct pt_regs *regs, long error_code)
303{ 307{
304#ifdef CONFIG_KGDB_LOW_LEVEL_TRAP 308#ifdef CONFIG_KGDB_LOW_LEVEL_TRAP
305 if (kgdb_ll_trap(DIE_INT3, "int3", regs, error_code, 3, SIGTRAP) 309 if (kgdb_ll_trap(DIE_INT3, "int3", regs, error_code, X86_TRAP_BP,
306 == NOTIFY_STOP) 310 SIGTRAP) == NOTIFY_STOP)
307 return; 311 return;
308#endif /* CONFIG_KGDB_LOW_LEVEL_TRAP */ 312#endif /* CONFIG_KGDB_LOW_LEVEL_TRAP */
309 313
310 if (notify_die(DIE_INT3, "int3", regs, error_code, 3, SIGTRAP) 314 if (notify_die(DIE_INT3, "int3", regs, error_code, X86_TRAP_BP,
311 == NOTIFY_STOP) 315 SIGTRAP) == NOTIFY_STOP)
312 return; 316 return;
313 317
314 /* 318 /*
@@ -317,7 +321,7 @@ dotraplinkage void __kprobes do_int3(struct pt_regs *regs, long error_code)
317 */ 321 */
318 debug_stack_usage_inc(); 322 debug_stack_usage_inc();
319 preempt_conditional_sti(regs); 323 preempt_conditional_sti(regs);
320 do_trap(3, SIGTRAP, "int3", regs, error_code, NULL); 324 do_trap(X86_TRAP_BP, SIGTRAP, "int3", regs, error_code, NULL);
321 preempt_conditional_cli(regs); 325 preempt_conditional_cli(regs);
322 debug_stack_usage_dec(); 326 debug_stack_usage_dec();
323} 327}
@@ -422,8 +426,8 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
422 preempt_conditional_sti(regs); 426 preempt_conditional_sti(regs);
423 427
424 if (regs->flags & X86_VM_MASK) { 428 if (regs->flags & X86_VM_MASK) {
425 handle_vm86_trap((struct kernel_vm86_regs *) regs, 429 handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code,
426 error_code, 1); 430 X86_TRAP_DB);
427 preempt_conditional_cli(regs); 431 preempt_conditional_cli(regs);
428 debug_stack_usage_dec(); 432 debug_stack_usage_dec();
429 return; 433 return;
@@ -460,7 +464,8 @@ void math_error(struct pt_regs *regs, int error_code, int trapnr)
460 struct task_struct *task = current; 464 struct task_struct *task = current;
461 siginfo_t info; 465 siginfo_t info;
462 unsigned short err; 466 unsigned short err;
463 char *str = (trapnr == 16) ? "fpu exception" : "simd exception"; 467 char *str = (trapnr == X86_TRAP_MF) ? "fpu exception" :
468 "simd exception";
464 469
465 if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, SIGFPE) == NOTIFY_STOP) 470 if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, SIGFPE) == NOTIFY_STOP)
466 return; 471 return;
@@ -470,7 +475,7 @@ void math_error(struct pt_regs *regs, int error_code, int trapnr)
470 { 475 {
471 if (!fixup_exception(regs)) { 476 if (!fixup_exception(regs)) {
472 task->thread.error_code = error_code; 477 task->thread.error_code = error_code;
473 task->thread.trap_no = trapnr; 478 task->thread.trap_nr = trapnr;
474 die(str, regs, error_code); 479 die(str, regs, error_code);
475 } 480 }
476 return; 481 return;
@@ -480,12 +485,12 @@ void math_error(struct pt_regs *regs, int error_code, int trapnr)
480 * Save the info for the exception handler and clear the error. 485 * Save the info for the exception handler and clear the error.
481 */ 486 */
482 save_init_fpu(task); 487 save_init_fpu(task);
483 task->thread.trap_no = trapnr; 488 task->thread.trap_nr = trapnr;
484 task->thread.error_code = error_code; 489 task->thread.error_code = error_code;
485 info.si_signo = SIGFPE; 490 info.si_signo = SIGFPE;
486 info.si_errno = 0; 491 info.si_errno = 0;
487 info.si_addr = (void __user *)regs->ip; 492 info.si_addr = (void __user *)regs->ip;
488 if (trapnr == 16) { 493 if (trapnr == X86_TRAP_MF) {
489 unsigned short cwd, swd; 494 unsigned short cwd, swd;
490 /* 495 /*
491 * (~cwd & swd) will mask out exceptions that are not set to unmasked 496 * (~cwd & swd) will mask out exceptions that are not set to unmasked
@@ -529,10 +534,11 @@ void math_error(struct pt_regs *regs, int error_code, int trapnr)
529 info.si_code = FPE_FLTRES; 534 info.si_code = FPE_FLTRES;
530 } else { 535 } else {
531 /* 536 /*
532 * If we're using IRQ 13, or supposedly even some trap 16 537 * If we're using IRQ 13, or supposedly even some trap
533 * implementations, it's possible we get a spurious trap... 538 * X86_TRAP_MF implementations, it's possible
539 * we get a spurious trap, which is not an error.
534 */ 540 */
535 return; /* Spurious trap, no error */ 541 return;
536 } 542 }
537 force_sig_info(SIGFPE, &info, task); 543 force_sig_info(SIGFPE, &info, task);
538} 544}
@@ -543,13 +549,13 @@ dotraplinkage void do_coprocessor_error(struct pt_regs *regs, long error_code)
543 ignore_fpu_irq = 1; 549 ignore_fpu_irq = 1;
544#endif 550#endif
545 551
546 math_error(regs, error_code, 16); 552 math_error(regs, error_code, X86_TRAP_MF);
547} 553}
548 554
549dotraplinkage void 555dotraplinkage void
550do_simd_coprocessor_error(struct pt_regs *regs, long error_code) 556do_simd_coprocessor_error(struct pt_regs *regs, long error_code)
551{ 557{
552 math_error(regs, error_code, 19); 558 math_error(regs, error_code, X86_TRAP_XF);
553} 559}
554 560
555dotraplinkage void 561dotraplinkage void
@@ -643,20 +649,21 @@ dotraplinkage void do_iret_error(struct pt_regs *regs, long error_code)
643 info.si_errno = 0; 649 info.si_errno = 0;
644 info.si_code = ILL_BADSTK; 650 info.si_code = ILL_BADSTK;
645 info.si_addr = NULL; 651 info.si_addr = NULL;
646 if (notify_die(DIE_TRAP, "iret exception", 652 if (notify_die(DIE_TRAP, "iret exception", regs, error_code,
647 regs, error_code, 32, SIGILL) == NOTIFY_STOP) 653 X86_TRAP_IRET, SIGILL) == NOTIFY_STOP)
648 return; 654 return;
649 do_trap(32, SIGILL, "iret exception", regs, error_code, &info); 655 do_trap(X86_TRAP_IRET, SIGILL, "iret exception", regs, error_code,
656 &info);
650} 657}
651#endif 658#endif
652 659
653/* Set of traps needed for early debugging. */ 660/* Set of traps needed for early debugging. */
654void __init early_trap_init(void) 661void __init early_trap_init(void)
655{ 662{
656 set_intr_gate_ist(1, &debug, DEBUG_STACK); 663 set_intr_gate_ist(X86_TRAP_DB, &debug, DEBUG_STACK);
657 /* int3 can be called from all */ 664 /* int3 can be called from all */
658 set_system_intr_gate_ist(3, &int3, DEBUG_STACK); 665 set_system_intr_gate_ist(X86_TRAP_BP, &int3, DEBUG_STACK);
659 set_intr_gate(14, &page_fault); 666 set_intr_gate(X86_TRAP_PF, &page_fault);
660 load_idt(&idt_descr); 667 load_idt(&idt_descr);
661} 668}
662 669
@@ -672,30 +679,30 @@ void __init trap_init(void)
672 early_iounmap(p, 4); 679 early_iounmap(p, 4);
673#endif 680#endif
674 681
675 set_intr_gate(0, &divide_error); 682 set_intr_gate(X86_TRAP_DE, &divide_error);
676 set_intr_gate_ist(2, &nmi, NMI_STACK); 683 set_intr_gate_ist(X86_TRAP_NMI, &nmi, NMI_STACK);
677 /* int4 can be called from all */ 684 /* int4 can be called from all */
678 set_system_intr_gate(4, &overflow); 685 set_system_intr_gate(X86_TRAP_OF, &overflow);
679 set_intr_gate(5, &bounds); 686 set_intr_gate(X86_TRAP_BR, &bounds);
680 set_intr_gate(6, &invalid_op); 687 set_intr_gate(X86_TRAP_UD, &invalid_op);
681 set_intr_gate(7, &device_not_available); 688 set_intr_gate(X86_TRAP_NM, &device_not_available);
682#ifdef CONFIG_X86_32 689#ifdef CONFIG_X86_32
683 set_task_gate(8, GDT_ENTRY_DOUBLEFAULT_TSS); 690 set_task_gate(X86_TRAP_DF, GDT_ENTRY_DOUBLEFAULT_TSS);
684#else 691#else
685 set_intr_gate_ist(8, &double_fault, DOUBLEFAULT_STACK); 692 set_intr_gate_ist(X86_TRAP_DF, &double_fault, DOUBLEFAULT_STACK);
686#endif 693#endif
687 set_intr_gate(9, &coprocessor_segment_overrun); 694 set_intr_gate(X86_TRAP_OLD_MF, &coprocessor_segment_overrun);
688 set_intr_gate(10, &invalid_TSS); 695 set_intr_gate(X86_TRAP_TS, &invalid_TSS);
689 set_intr_gate(11, &segment_not_present); 696 set_intr_gate(X86_TRAP_NP, &segment_not_present);
690 set_intr_gate_ist(12, &stack_segment, STACKFAULT_STACK); 697 set_intr_gate_ist(X86_TRAP_SS, &stack_segment, STACKFAULT_STACK);
691 set_intr_gate(13, &general_protection); 698 set_intr_gate(X86_TRAP_GP, &general_protection);
692 set_intr_gate(15, &spurious_interrupt_bug); 699 set_intr_gate(X86_TRAP_SPURIOUS, &spurious_interrupt_bug);
693 set_intr_gate(16, &coprocessor_error); 700 set_intr_gate(X86_TRAP_MF, &coprocessor_error);
694 set_intr_gate(17, &alignment_check); 701 set_intr_gate(X86_TRAP_AC, &alignment_check);
695#ifdef CONFIG_X86_MCE 702#ifdef CONFIG_X86_MCE
696 set_intr_gate_ist(18, &machine_check, MCE_STACK); 703 set_intr_gate_ist(X86_TRAP_MC, &machine_check, MCE_STACK);
697#endif 704#endif
698 set_intr_gate(19, &simd_coprocessor_error); 705 set_intr_gate(X86_TRAP_XF, &simd_coprocessor_error);
699 706
700 /* Reserve all the builtin and the syscall vector: */ 707 /* Reserve all the builtin and the syscall vector: */
701 for (i = 0; i < FIRST_EXTERNAL_VECTOR; i++) 708 for (i = 0; i < FIRST_EXTERNAL_VECTOR; i++)
@@ -720,7 +727,7 @@ void __init trap_init(void)
720 727
721#ifdef CONFIG_X86_64 728#ifdef CONFIG_X86_64
722 memcpy(&nmi_idt_table, &idt_table, IDT_ENTRIES * 16); 729 memcpy(&nmi_idt_table, &idt_table, IDT_ENTRIES * 16);
723 set_nmi_gate(1, &debug); 730 set_nmi_gate(X86_TRAP_DB, &debug);
724 set_nmi_gate(3, &int3); 731 set_nmi_gate(X86_TRAP_BP, &int3);
725#endif 732#endif
726} 733}
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index 899a03f2d181..fc0a147e3727 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -933,6 +933,16 @@ static int __init init_tsc_clocksource(void)
933 clocksource_tsc.rating = 0; 933 clocksource_tsc.rating = 0;
934 clocksource_tsc.flags &= ~CLOCK_SOURCE_IS_CONTINUOUS; 934 clocksource_tsc.flags &= ~CLOCK_SOURCE_IS_CONTINUOUS;
935 } 935 }
936
937 /*
938 * Trust the results of the earlier calibration on systems
939 * exporting a reliable TSC.
940 */
941 if (boot_cpu_has(X86_FEATURE_TSC_RELIABLE)) {
942 clocksource_register_khz(&clocksource_tsc, tsc_khz);
943 return 0;
944 }
945
936 schedule_delayed_work(&tsc_irqwork, 0); 946 schedule_delayed_work(&tsc_irqwork, 0);
937 return 0; 947 return 0;
938} 948}
diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c
index 328cb37bb827..255f58ae71e8 100644
--- a/arch/x86/kernel/vm86_32.c
+++ b/arch/x86/kernel/vm86_32.c
@@ -569,7 +569,7 @@ int handle_vm86_trap(struct kernel_vm86_regs *regs, long error_code, int trapno)
569 } 569 }
570 if (trapno != 1) 570 if (trapno != 1)
571 return 1; /* we let this handle by the calling routine */ 571 return 1; /* we let this handle by the calling routine */
572 current->thread.trap_no = trapno; 572 current->thread.trap_nr = trapno;
573 current->thread.error_code = error_code; 573 current->thread.error_code = error_code;
574 force_sig(SIGTRAP, current); 574 force_sig(SIGTRAP, current);
575 return 0; 575 return 0;
diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c
index b07ba9393564..f386dc49f988 100644
--- a/arch/x86/kernel/vsyscall_64.c
+++ b/arch/x86/kernel/vsyscall_64.c
@@ -52,10 +52,7 @@
52#include "vsyscall_trace.h" 52#include "vsyscall_trace.h"
53 53
54DEFINE_VVAR(int, vgetcpu_mode); 54DEFINE_VVAR(int, vgetcpu_mode);
55DEFINE_VVAR(struct vsyscall_gtod_data, vsyscall_gtod_data) = 55DEFINE_VVAR(struct vsyscall_gtod_data, vsyscall_gtod_data);
56{
57 .lock = __SEQLOCK_UNLOCKED(__vsyscall_gtod_data.lock),
58};
59 56
60static enum { EMULATE, NATIVE, NONE } vsyscall_mode = EMULATE; 57static enum { EMULATE, NATIVE, NONE } vsyscall_mode = EMULATE;
61 58
@@ -80,20 +77,15 @@ early_param("vsyscall", vsyscall_setup);
80 77
81void update_vsyscall_tz(void) 78void update_vsyscall_tz(void)
82{ 79{
83 unsigned long flags;
84
85 write_seqlock_irqsave(&vsyscall_gtod_data.lock, flags);
86 /* sys_tz has changed */
87 vsyscall_gtod_data.sys_tz = sys_tz; 80 vsyscall_gtod_data.sys_tz = sys_tz;
88 write_sequnlock_irqrestore(&vsyscall_gtod_data.lock, flags);
89} 81}
90 82
91void update_vsyscall(struct timespec *wall_time, struct timespec *wtm, 83void update_vsyscall(struct timespec *wall_time, struct timespec *wtm,
92 struct clocksource *clock, u32 mult) 84 struct clocksource *clock, u32 mult)
93{ 85{
94 unsigned long flags; 86 struct timespec monotonic;
95 87
96 write_seqlock_irqsave(&vsyscall_gtod_data.lock, flags); 88 write_seqcount_begin(&vsyscall_gtod_data.seq);
97 89
98 /* copy vsyscall data */ 90 /* copy vsyscall data */
99 vsyscall_gtod_data.clock.vclock_mode = clock->archdata.vclock_mode; 91 vsyscall_gtod_data.clock.vclock_mode = clock->archdata.vclock_mode;
@@ -101,12 +93,19 @@ void update_vsyscall(struct timespec *wall_time, struct timespec *wtm,
101 vsyscall_gtod_data.clock.mask = clock->mask; 93 vsyscall_gtod_data.clock.mask = clock->mask;
102 vsyscall_gtod_data.clock.mult = mult; 94 vsyscall_gtod_data.clock.mult = mult;
103 vsyscall_gtod_data.clock.shift = clock->shift; 95 vsyscall_gtod_data.clock.shift = clock->shift;
96
104 vsyscall_gtod_data.wall_time_sec = wall_time->tv_sec; 97 vsyscall_gtod_data.wall_time_sec = wall_time->tv_sec;
105 vsyscall_gtod_data.wall_time_nsec = wall_time->tv_nsec; 98 vsyscall_gtod_data.wall_time_nsec = wall_time->tv_nsec;
106 vsyscall_gtod_data.wall_to_monotonic = *wtm; 99
100 monotonic = timespec_add(*wall_time, *wtm);
101 vsyscall_gtod_data.monotonic_time_sec = monotonic.tv_sec;
102 vsyscall_gtod_data.monotonic_time_nsec = monotonic.tv_nsec;
103
107 vsyscall_gtod_data.wall_time_coarse = __current_kernel_time(); 104 vsyscall_gtod_data.wall_time_coarse = __current_kernel_time();
105 vsyscall_gtod_data.monotonic_time_coarse =
106 timespec_add(vsyscall_gtod_data.wall_time_coarse, *wtm);
108 107
109 write_sequnlock_irqrestore(&vsyscall_gtod_data.lock, flags); 108 write_seqcount_end(&vsyscall_gtod_data.seq);
110} 109}
111 110
112static void warn_bad_vsyscall(const char *level, struct pt_regs *regs, 111static void warn_bad_vsyscall(const char *level, struct pt_regs *regs,
@@ -153,7 +152,7 @@ static bool write_ok_or_segv(unsigned long ptr, size_t size)
153 152
154 thread->error_code = 6; /* user fault, no page, write */ 153 thread->error_code = 6; /* user fault, no page, write */
155 thread->cr2 = ptr; 154 thread->cr2 = ptr;
156 thread->trap_no = 14; 155 thread->trap_nr = X86_TRAP_PF;
157 156
158 memset(&info, 0, sizeof(info)); 157 memset(&info, 0, sizeof(info));
159 info.si_signo = SIGSEGV; 158 info.si_signo = SIGSEGV;
diff --git a/arch/x86/math-emu/fpu_entry.c b/arch/x86/math-emu/fpu_entry.c
index 7718541541d4..9b868124128d 100644
--- a/arch/x86/math-emu/fpu_entry.c
+++ b/arch/x86/math-emu/fpu_entry.c
@@ -28,6 +28,7 @@
28#include <linux/regset.h> 28#include <linux/regset.h>
29 29
30#include <asm/uaccess.h> 30#include <asm/uaccess.h>
31#include <asm/traps.h>
31#include <asm/desc.h> 32#include <asm/desc.h>
32#include <asm/user.h> 33#include <asm/user.h>
33#include <asm/i387.h> 34#include <asm/i387.h>
@@ -269,7 +270,7 @@ void math_emulate(struct math_emu_info *info)
269 FPU_EIP = FPU_ORIG_EIP; /* Point to current FPU instruction. */ 270 FPU_EIP = FPU_ORIG_EIP; /* Point to current FPU instruction. */
270 271
271 RE_ENTRANT_CHECK_OFF; 272 RE_ENTRANT_CHECK_OFF;
272 current->thread.trap_no = 16; 273 current->thread.trap_nr = X86_TRAP_MF;
273 current->thread.error_code = 0; 274 current->thread.error_code = 0;
274 send_sig(SIGFPE, current, 1); 275 send_sig(SIGFPE, current, 1);
275 return; 276 return;
@@ -662,7 +663,7 @@ static int valid_prefix(u_char *Byte, u_char __user **fpu_eip,
662void math_abort(struct math_emu_info *info, unsigned int signal) 663void math_abort(struct math_emu_info *info, unsigned int signal)
663{ 664{
664 FPU_EIP = FPU_ORIG_EIP; 665 FPU_EIP = FPU_ORIG_EIP;
665 current->thread.trap_no = 16; 666 current->thread.trap_nr = X86_TRAP_MF;
666 current->thread.error_code = 0; 667 current->thread.error_code = 0;
667 send_sig(signal, current, 1); 668 send_sig(signal, current, 1);
668 RE_ENTRANT_CHECK_OFF; 669 RE_ENTRANT_CHECK_OFF;
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index f0b4caf85c1a..3ecfd1aaf214 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -615,7 +615,7 @@ pgtable_bad(struct pt_regs *regs, unsigned long error_code,
615 dump_pagetable(address); 615 dump_pagetable(address);
616 616
617 tsk->thread.cr2 = address; 617 tsk->thread.cr2 = address;
618 tsk->thread.trap_no = 14; 618 tsk->thread.trap_nr = X86_TRAP_PF;
619 tsk->thread.error_code = error_code; 619 tsk->thread.error_code = error_code;
620 620
621 if (__die("Bad pagetable", regs, error_code)) 621 if (__die("Bad pagetable", regs, error_code))
@@ -636,7 +636,7 @@ no_context(struct pt_regs *regs, unsigned long error_code,
636 /* Are we prepared to handle this kernel fault? */ 636 /* Are we prepared to handle this kernel fault? */
637 if (fixup_exception(regs)) { 637 if (fixup_exception(regs)) {
638 if (current_thread_info()->sig_on_uaccess_error && signal) { 638 if (current_thread_info()->sig_on_uaccess_error && signal) {
639 tsk->thread.trap_no = 14; 639 tsk->thread.trap_nr = X86_TRAP_PF;
640 tsk->thread.error_code = error_code | PF_USER; 640 tsk->thread.error_code = error_code | PF_USER;
641 tsk->thread.cr2 = address; 641 tsk->thread.cr2 = address;
642 642
@@ -676,7 +676,7 @@ no_context(struct pt_regs *regs, unsigned long error_code,
676 printk(KERN_EMERG "Thread overran stack, or stack corrupted\n"); 676 printk(KERN_EMERG "Thread overran stack, or stack corrupted\n");
677 677
678 tsk->thread.cr2 = address; 678 tsk->thread.cr2 = address;
679 tsk->thread.trap_no = 14; 679 tsk->thread.trap_nr = X86_TRAP_PF;
680 tsk->thread.error_code = error_code; 680 tsk->thread.error_code = error_code;
681 681
682 sig = SIGKILL; 682 sig = SIGKILL;
@@ -754,7 +754,7 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
754 /* Kernel addresses are always protection faults: */ 754 /* Kernel addresses are always protection faults: */
755 tsk->thread.cr2 = address; 755 tsk->thread.cr2 = address;
756 tsk->thread.error_code = error_code | (address >= TASK_SIZE); 756 tsk->thread.error_code = error_code | (address >= TASK_SIZE);
757 tsk->thread.trap_no = 14; 757 tsk->thread.trap_nr = X86_TRAP_PF;
758 758
759 force_sig_info_fault(SIGSEGV, si_code, address, tsk, 0); 759 force_sig_info_fault(SIGSEGV, si_code, address, tsk, 0);
760 760
@@ -838,7 +838,7 @@ do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
838 838
839 tsk->thread.cr2 = address; 839 tsk->thread.cr2 = address;
840 tsk->thread.error_code = error_code; 840 tsk->thread.error_code = error_code;
841 tsk->thread.trap_no = 14; 841 tsk->thread.trap_nr = X86_TRAP_PF;
842 842
843#ifdef CONFIG_MEMORY_FAILURE 843#ifdef CONFIG_MEMORY_FAILURE
844 if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) { 844 if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) {
diff --git a/arch/x86/mm/srat.c b/arch/x86/mm/srat.c
index 1c1c4f46a7c1..efb5b4b93711 100644
--- a/arch/x86/mm/srat.c
+++ b/arch/x86/mm/srat.c
@@ -70,7 +70,7 @@ acpi_numa_x2apic_affinity_init(struct acpi_srat_x2apic_cpu_affinity *pa)
70 return; 70 return;
71 pxm = pa->proximity_domain; 71 pxm = pa->proximity_domain;
72 apic_id = pa->apic_id; 72 apic_id = pa->apic_id;
73 if (!cpu_has_x2apic && (apic_id >= 0xff)) { 73 if (!apic->apic_id_valid(apic_id)) {
74 printk(KERN_INFO "SRAT: PXM %u -> X2APIC 0x%04x ignored\n", 74 printk(KERN_INFO "SRAT: PXM %u -> X2APIC 0x%04x ignored\n",
75 pxm, apic_id); 75 pxm, apic_id);
76 return; 76 return;
diff --git a/arch/x86/oprofile/backtrace.c b/arch/x86/oprofile/backtrace.c
index bff89dfe3619..d6aa6e8315d1 100644
--- a/arch/x86/oprofile/backtrace.c
+++ b/arch/x86/oprofile/backtrace.c
@@ -67,7 +67,7 @@ x86_backtrace_32(struct pt_regs * const regs, unsigned int depth)
67{ 67{
68 struct stack_frame_ia32 *head; 68 struct stack_frame_ia32 *head;
69 69
70 /* User process is 32-bit */ 70 /* User process is IA32 */
71 if (!current || !test_thread_flag(TIF_IA32)) 71 if (!current || !test_thread_flag(TIF_IA32))
72 return 0; 72 return 0;
73 73
diff --git a/arch/x86/platform/olpc/olpc.c b/arch/x86/platform/olpc/olpc.c
index 7cce722667b8..a4bee53c2e54 100644
--- a/arch/x86/platform/olpc/olpc.c
+++ b/arch/x86/platform/olpc/olpc.c
@@ -20,6 +20,8 @@
20#include <linux/platform_device.h> 20#include <linux/platform_device.h>
21#include <linux/of.h> 21#include <linux/of.h>
22#include <linux/syscore_ops.h> 22#include <linux/syscore_ops.h>
23#include <linux/debugfs.h>
24#include <linux/mutex.h>
23 25
24#include <asm/geode.h> 26#include <asm/geode.h>
25#include <asm/setup.h> 27#include <asm/setup.h>
@@ -31,6 +33,15 @@ EXPORT_SYMBOL_GPL(olpc_platform_info);
31 33
32static DEFINE_SPINLOCK(ec_lock); 34static DEFINE_SPINLOCK(ec_lock);
33 35
36/* debugfs interface to EC commands */
37#define EC_MAX_CMD_ARGS (5 + 1) /* cmd byte + 5 args */
38#define EC_MAX_CMD_REPLY (8)
39
40static struct dentry *ec_debugfs_dir;
41static DEFINE_MUTEX(ec_debugfs_cmd_lock);
42static unsigned char ec_debugfs_resp[EC_MAX_CMD_REPLY];
43static unsigned int ec_debugfs_resp_bytes;
44
34/* EC event mask to be applied during suspend (defining wakeup sources). */ 45/* EC event mask to be applied during suspend (defining wakeup sources). */
35static u16 ec_wakeup_mask; 46static u16 ec_wakeup_mask;
36 47
@@ -269,6 +280,91 @@ int olpc_ec_sci_query(u16 *sci_value)
269} 280}
270EXPORT_SYMBOL_GPL(olpc_ec_sci_query); 281EXPORT_SYMBOL_GPL(olpc_ec_sci_query);
271 282
283static ssize_t ec_debugfs_cmd_write(struct file *file, const char __user *buf,
284 size_t size, loff_t *ppos)
285{
286 int i, m;
287 unsigned char ec_cmd[EC_MAX_CMD_ARGS];
288 unsigned int ec_cmd_int[EC_MAX_CMD_ARGS];
289 char cmdbuf[64];
290 int ec_cmd_bytes;
291
292 mutex_lock(&ec_debugfs_cmd_lock);
293
294 size = simple_write_to_buffer(cmdbuf, sizeof(cmdbuf), ppos, buf, size);
295
296 m = sscanf(cmdbuf, "%x:%u %x %x %x %x %x", &ec_cmd_int[0],
297 &ec_debugfs_resp_bytes,
298 &ec_cmd_int[1], &ec_cmd_int[2], &ec_cmd_int[3],
299 &ec_cmd_int[4], &ec_cmd_int[5]);
300 if (m < 2 || ec_debugfs_resp_bytes > EC_MAX_CMD_REPLY) {
301 /* reset to prevent overflow on read */
302 ec_debugfs_resp_bytes = 0;
303
304 printk(KERN_DEBUG "olpc-ec: bad ec cmd: "
305 "cmd:response-count [arg1 [arg2 ...]]\n");
306 size = -EINVAL;
307 goto out;
308 }
309
310 /* convert scanf'd ints to char */
311 ec_cmd_bytes = m - 2;
312 for (i = 0; i <= ec_cmd_bytes; i++)
313 ec_cmd[i] = ec_cmd_int[i];
314
315 printk(KERN_DEBUG "olpc-ec: debugfs cmd 0x%02x with %d args "
316 "%02x %02x %02x %02x %02x, want %d returns\n",
317 ec_cmd[0], ec_cmd_bytes, ec_cmd[1], ec_cmd[2], ec_cmd[3],
318 ec_cmd[4], ec_cmd[5], ec_debugfs_resp_bytes);
319
320 olpc_ec_cmd(ec_cmd[0], (ec_cmd_bytes == 0) ? NULL : &ec_cmd[1],
321 ec_cmd_bytes, ec_debugfs_resp, ec_debugfs_resp_bytes);
322
323 printk(KERN_DEBUG "olpc-ec: response "
324 "%02x %02x %02x %02x %02x %02x %02x %02x (%d bytes expected)\n",
325 ec_debugfs_resp[0], ec_debugfs_resp[1], ec_debugfs_resp[2],
326 ec_debugfs_resp[3], ec_debugfs_resp[4], ec_debugfs_resp[5],
327 ec_debugfs_resp[6], ec_debugfs_resp[7], ec_debugfs_resp_bytes);
328
329out:
330 mutex_unlock(&ec_debugfs_cmd_lock);
331 return size;
332}
333
334static ssize_t ec_debugfs_cmd_read(struct file *file, char __user *buf,
335 size_t size, loff_t *ppos)
336{
337 unsigned int i, r;
338 char *rp;
339 char respbuf[64];
340
341 mutex_lock(&ec_debugfs_cmd_lock);
342 rp = respbuf;
343 rp += sprintf(rp, "%02x", ec_debugfs_resp[0]);
344 for (i = 1; i < ec_debugfs_resp_bytes; i++)
345 rp += sprintf(rp, ", %02x", ec_debugfs_resp[i]);
346 mutex_unlock(&ec_debugfs_cmd_lock);
347 rp += sprintf(rp, "\n");
348
349 r = rp - respbuf;
350 return simple_read_from_buffer(buf, size, ppos, respbuf, r);
351}
352
353static const struct file_operations ec_debugfs_genops = {
354 .write = ec_debugfs_cmd_write,
355 .read = ec_debugfs_cmd_read,
356};
357
358static void setup_debugfs(void)
359{
360 ec_debugfs_dir = debugfs_create_dir("olpc-ec", 0);
361 if (ec_debugfs_dir == ERR_PTR(-ENODEV))
362 return;
363
364 debugfs_create_file("cmd", 0600, ec_debugfs_dir, NULL,
365 &ec_debugfs_genops);
366}
367
272static int olpc_ec_suspend(void) 368static int olpc_ec_suspend(void)
273{ 369{
274 return olpc_ec_mask_write(ec_wakeup_mask); 370 return olpc_ec_mask_write(ec_wakeup_mask);
@@ -372,6 +468,7 @@ static int __init olpc_init(void)
372 } 468 }
373 469
374 register_syscore_ops(&olpc_syscore_ops); 470 register_syscore_ops(&olpc_syscore_ops);
471 setup_debugfs();
375 472
376 return 0; 473 return 0;
377} 474}
diff --git a/arch/x86/syscalls/Makefile b/arch/x86/syscalls/Makefile
index 564b2476fede..3236aebc828d 100644
--- a/arch/x86/syscalls/Makefile
+++ b/arch/x86/syscalls/Makefile
@@ -10,8 +10,10 @@ syshdr := $(srctree)/$(src)/syscallhdr.sh
10systbl := $(srctree)/$(src)/syscalltbl.sh 10systbl := $(srctree)/$(src)/syscalltbl.sh
11 11
12quiet_cmd_syshdr = SYSHDR $@ 12quiet_cmd_syshdr = SYSHDR $@
13 cmd_syshdr = $(CONFIG_SHELL) '$(syshdr)' $< $@ \ 13 cmd_syshdr = $(CONFIG_SHELL) '$(syshdr)' '$<' '$@' \
14 $(syshdr_abi_$(basetarget)) $(syshdr_pfx_$(basetarget)) 14 '$(syshdr_abi_$(basetarget))' \
15 '$(syshdr_pfx_$(basetarget))' \
16 '$(syshdr_offset_$(basetarget))'
15quiet_cmd_systbl = SYSTBL $@ 17quiet_cmd_systbl = SYSTBL $@
16 cmd_systbl = $(CONFIG_SHELL) '$(systbl)' $< $@ 18 cmd_systbl = $(CONFIG_SHELL) '$(systbl)' $< $@
17 19
@@ -24,18 +26,28 @@ syshdr_pfx_unistd_32_ia32 := ia32_
24$(out)/unistd_32_ia32.h: $(syscall32) $(syshdr) 26$(out)/unistd_32_ia32.h: $(syscall32) $(syshdr)
25 $(call if_changed,syshdr) 27 $(call if_changed,syshdr)
26 28
27syshdr_abi_unistd_64 := 64 29syshdr_abi_unistd_x32 := common,x32
30syshdr_offset_unistd_x32 := __X32_SYSCALL_BIT
31$(out)/unistd_x32.h: $(syscall64) $(syshdr)
32 $(call if_changed,syshdr)
33
34syshdr_abi_unistd_64 := common,64
28$(out)/unistd_64.h: $(syscall64) $(syshdr) 35$(out)/unistd_64.h: $(syscall64) $(syshdr)
29 $(call if_changed,syshdr) 36 $(call if_changed,syshdr)
30 37
38syshdr_abi_unistd_64_x32 := x32
39syshdr_pfx_unistd_64_x32 := x32_
40$(out)/unistd_64_x32.h: $(syscall64) $(syshdr)
41 $(call if_changed,syshdr)
42
31$(out)/syscalls_32.h: $(syscall32) $(systbl) 43$(out)/syscalls_32.h: $(syscall32) $(systbl)
32 $(call if_changed,systbl) 44 $(call if_changed,systbl)
33$(out)/syscalls_64.h: $(syscall64) $(systbl) 45$(out)/syscalls_64.h: $(syscall64) $(systbl)
34 $(call if_changed,systbl) 46 $(call if_changed,systbl)
35 47
36syshdr-y += unistd_32.h unistd_64.h 48syshdr-y += unistd_32.h unistd_64.h unistd_x32.h
37syshdr-y += syscalls_32.h 49syshdr-y += syscalls_32.h
38syshdr-$(CONFIG_X86_64) += unistd_32_ia32.h 50syshdr-$(CONFIG_X86_64) += unistd_32_ia32.h unistd_64_x32.h
39syshdr-$(CONFIG_X86_64) += syscalls_64.h 51syshdr-$(CONFIG_X86_64) += syscalls_64.h
40 52
41targets += $(syshdr-y) 53targets += $(syshdr-y)
diff --git a/arch/x86/syscalls/syscall_32.tbl b/arch/x86/syscalls/syscall_32.tbl
index e7e67cc3c14b..29f9f0554f7d 100644
--- a/arch/x86/syscalls/syscall_32.tbl
+++ b/arch/x86/syscalls/syscall_32.tbl
@@ -181,7 +181,7 @@
181172 i386 prctl sys_prctl 181172 i386 prctl sys_prctl
182173 i386 rt_sigreturn ptregs_rt_sigreturn stub32_rt_sigreturn 182173 i386 rt_sigreturn ptregs_rt_sigreturn stub32_rt_sigreturn
183174 i386 rt_sigaction sys_rt_sigaction sys32_rt_sigaction 183174 i386 rt_sigaction sys_rt_sigaction sys32_rt_sigaction
184175 i386 rt_sigprocmask sys_rt_sigprocmask sys32_rt_sigprocmask 184175 i386 rt_sigprocmask sys_rt_sigprocmask
185176 i386 rt_sigpending sys_rt_sigpending sys32_rt_sigpending 185176 i386 rt_sigpending sys_rt_sigpending sys32_rt_sigpending
186177 i386 rt_sigtimedwait sys_rt_sigtimedwait compat_sys_rt_sigtimedwait 186177 i386 rt_sigtimedwait sys_rt_sigtimedwait compat_sys_rt_sigtimedwait
187178 i386 rt_sigqueueinfo sys_rt_sigqueueinfo sys32_rt_sigqueueinfo 187178 i386 rt_sigqueueinfo sys_rt_sigqueueinfo sys32_rt_sigqueueinfo
diff --git a/arch/x86/syscalls/syscall_64.tbl b/arch/x86/syscalls/syscall_64.tbl
index b440a8f7eefa..dd29a9ea27c5 100644
--- a/arch/x86/syscalls/syscall_64.tbl
+++ b/arch/x86/syscalls/syscall_64.tbl
@@ -4,317 +4,350 @@
4# The format is: 4# The format is:
5# <number> <abi> <name> <entry point> 5# <number> <abi> <name> <entry point>
6# 6#
7# The abi is always "64" for this file (for now.) 7# The abi is "common", "64" or "x32" for this file.
8# 8#
90 64 read sys_read 90 common read sys_read
101 64 write sys_write 101 common write sys_write
112 64 open sys_open 112 common open sys_open
123 64 close sys_close 123 common close sys_close
134 64 stat sys_newstat 134 common stat sys_newstat
145 64 fstat sys_newfstat 145 common fstat sys_newfstat
156 64 lstat sys_newlstat 156 common lstat sys_newlstat
167 64 poll sys_poll 167 common poll sys_poll
178 64 lseek sys_lseek 178 common lseek sys_lseek
189 64 mmap sys_mmap 189 common mmap sys_mmap
1910 64 mprotect sys_mprotect 1910 common mprotect sys_mprotect
2011 64 munmap sys_munmap 2011 common munmap sys_munmap
2112 64 brk sys_brk 2112 common brk sys_brk
2213 64 rt_sigaction sys_rt_sigaction 2213 64 rt_sigaction sys_rt_sigaction
2314 64 rt_sigprocmask sys_rt_sigprocmask 2314 common rt_sigprocmask sys_rt_sigprocmask
2415 64 rt_sigreturn stub_rt_sigreturn 2415 64 rt_sigreturn stub_rt_sigreturn
2516 64 ioctl sys_ioctl 2516 64 ioctl sys_ioctl
2617 64 pread64 sys_pread64 2617 common pread64 sys_pread64
2718 64 pwrite64 sys_pwrite64 2718 common pwrite64 sys_pwrite64
2819 64 readv sys_readv 2819 64 readv sys_readv
2920 64 writev sys_writev 2920 64 writev sys_writev
3021 64 access sys_access 3021 common access sys_access
3122 64 pipe sys_pipe 3122 common pipe sys_pipe
3223 64 select sys_select 3223 common select sys_select
3324 64 sched_yield sys_sched_yield 3324 common sched_yield sys_sched_yield
3425 64 mremap sys_mremap 3425 common mremap sys_mremap
3526 64 msync sys_msync 3526 common msync sys_msync
3627 64 mincore sys_mincore 3627 common mincore sys_mincore
3728 64 madvise sys_madvise 3728 common madvise sys_madvise
3829 64 shmget sys_shmget 3829 common shmget sys_shmget
3930 64 shmat sys_shmat 3930 common shmat sys_shmat
4031 64 shmctl sys_shmctl 4031 common shmctl sys_shmctl
4132 64 dup sys_dup 4132 common dup sys_dup
4233 64 dup2 sys_dup2 4233 common dup2 sys_dup2
4334 64 pause sys_pause 4334 common pause sys_pause
4435 64 nanosleep sys_nanosleep 4435 common nanosleep sys_nanosleep
4536 64 getitimer sys_getitimer 4536 common getitimer sys_getitimer
4637 64 alarm sys_alarm 4637 common alarm sys_alarm
4738 64 setitimer sys_setitimer 4738 common setitimer sys_setitimer
4839 64 getpid sys_getpid 4839 common getpid sys_getpid
4940 64 sendfile sys_sendfile64 4940 common sendfile sys_sendfile64
5041 64 socket sys_socket 5041 common socket sys_socket
5142 64 connect sys_connect 5142 common connect sys_connect
5243 64 accept sys_accept 5243 common accept sys_accept
5344 64 sendto sys_sendto 5344 common sendto sys_sendto
5445 64 recvfrom sys_recvfrom 5445 64 recvfrom sys_recvfrom
5546 64 sendmsg sys_sendmsg 5546 64 sendmsg sys_sendmsg
5647 64 recvmsg sys_recvmsg 5647 64 recvmsg sys_recvmsg
5748 64 shutdown sys_shutdown 5748 common shutdown sys_shutdown
5849 64 bind sys_bind 5849 common bind sys_bind
5950 64 listen sys_listen 5950 common listen sys_listen
6051 64 getsockname sys_getsockname 6051 common getsockname sys_getsockname
6152 64 getpeername sys_getpeername 6152 common getpeername sys_getpeername
6253 64 socketpair sys_socketpair 6253 common socketpair sys_socketpair
6354 64 setsockopt sys_setsockopt 6354 common setsockopt sys_setsockopt
6455 64 getsockopt sys_getsockopt 6455 common getsockopt sys_getsockopt
6556 64 clone stub_clone 6556 common clone stub_clone
6657 64 fork stub_fork 6657 common fork stub_fork
6758 64 vfork stub_vfork 6758 common vfork stub_vfork
6859 64 execve stub_execve 6859 64 execve stub_execve
6960 64 exit sys_exit 6960 common exit sys_exit
7061 64 wait4 sys_wait4 7061 common wait4 sys_wait4
7162 64 kill sys_kill 7162 common kill sys_kill
7263 64 uname sys_newuname 7263 common uname sys_newuname
7364 64 semget sys_semget 7364 common semget sys_semget
7465 64 semop sys_semop 7465 common semop sys_semop
7566 64 semctl sys_semctl 7566 common semctl sys_semctl
7667 64 shmdt sys_shmdt 7667 common shmdt sys_shmdt
7768 64 msgget sys_msgget 7768 common msgget sys_msgget
7869 64 msgsnd sys_msgsnd 7869 common msgsnd sys_msgsnd
7970 64 msgrcv sys_msgrcv 7970 common msgrcv sys_msgrcv
8071 64 msgctl sys_msgctl 8071 common msgctl sys_msgctl
8172 64 fcntl sys_fcntl 8172 common fcntl sys_fcntl
8273 64 flock sys_flock 8273 common flock sys_flock
8374 64 fsync sys_fsync 8374 common fsync sys_fsync
8475 64 fdatasync sys_fdatasync 8475 common fdatasync sys_fdatasync
8576 64 truncate sys_truncate 8576 common truncate sys_truncate
8677 64 ftruncate sys_ftruncate 8677 common ftruncate sys_ftruncate
8778 64 getdents sys_getdents 8778 common getdents sys_getdents
8879 64 getcwd sys_getcwd 8879 common getcwd sys_getcwd
8980 64 chdir sys_chdir 8980 common chdir sys_chdir
9081 64 fchdir sys_fchdir 9081 common fchdir sys_fchdir
9182 64 rename sys_rename 9182 common rename sys_rename
9283 64 mkdir sys_mkdir 9283 common mkdir sys_mkdir
9384 64 rmdir sys_rmdir 9384 common rmdir sys_rmdir
9485 64 creat sys_creat 9485 common creat sys_creat
9586 64 link sys_link 9586 common link sys_link
9687 64 unlink sys_unlink 9687 common unlink sys_unlink
9788 64 symlink sys_symlink 9788 common symlink sys_symlink
9889 64 readlink sys_readlink 9889 common readlink sys_readlink
9990 64 chmod sys_chmod 9990 common chmod sys_chmod
10091 64 fchmod sys_fchmod 10091 common fchmod sys_fchmod
10192 64 chown sys_chown 10192 common chown sys_chown
10293 64 fchown sys_fchown 10293 common fchown sys_fchown
10394 64 lchown sys_lchown 10394 common lchown sys_lchown
10495 64 umask sys_umask 10495 common umask sys_umask
10596 64 gettimeofday sys_gettimeofday 10596 common gettimeofday sys_gettimeofday
10697 64 getrlimit sys_getrlimit 10697 common getrlimit sys_getrlimit
10798 64 getrusage sys_getrusage 10798 common getrusage sys_getrusage
10899 64 sysinfo sys_sysinfo 10899 common sysinfo sys_sysinfo
109100 64 times sys_times 109100 common times sys_times
110101 64 ptrace sys_ptrace 110101 64 ptrace sys_ptrace
111102 64 getuid sys_getuid 111102 common getuid sys_getuid
112103 64 syslog sys_syslog 112103 common syslog sys_syslog
113104 64 getgid sys_getgid 113104 common getgid sys_getgid
114105 64 setuid sys_setuid 114105 common setuid sys_setuid
115106 64 setgid sys_setgid 115106 common setgid sys_setgid
116107 64 geteuid sys_geteuid 116107 common geteuid sys_geteuid
117108 64 getegid sys_getegid 117108 common getegid sys_getegid
118109 64 setpgid sys_setpgid 118109 common setpgid sys_setpgid
119110 64 getppid sys_getppid 119110 common getppid sys_getppid
120111 64 getpgrp sys_getpgrp 120111 common getpgrp sys_getpgrp
121112 64 setsid sys_setsid 121112 common setsid sys_setsid
122113 64 setreuid sys_setreuid 122113 common setreuid sys_setreuid
123114 64 setregid sys_setregid 123114 common setregid sys_setregid
124115 64 getgroups sys_getgroups 124115 common getgroups sys_getgroups
125116 64 setgroups sys_setgroups 125116 common setgroups sys_setgroups
126117 64 setresuid sys_setresuid 126117 common setresuid sys_setresuid
127118 64 getresuid sys_getresuid 127118 common getresuid sys_getresuid
128119 64 setresgid sys_setresgid 128119 common setresgid sys_setresgid
129120 64 getresgid sys_getresgid 129120 common getresgid sys_getresgid
130121 64 getpgid sys_getpgid 130121 common getpgid sys_getpgid
131122 64 setfsuid sys_setfsuid 131122 common setfsuid sys_setfsuid
132123 64 setfsgid sys_setfsgid 132123 common setfsgid sys_setfsgid
133124 64 getsid sys_getsid 133124 common getsid sys_getsid
134125 64 capget sys_capget 134125 common capget sys_capget
135126 64 capset sys_capset 135126 common capset sys_capset
136127 64 rt_sigpending sys_rt_sigpending 136127 64 rt_sigpending sys_rt_sigpending
137128 64 rt_sigtimedwait sys_rt_sigtimedwait 137128 64 rt_sigtimedwait sys_rt_sigtimedwait
138129 64 rt_sigqueueinfo sys_rt_sigqueueinfo 138129 64 rt_sigqueueinfo sys_rt_sigqueueinfo
139130 64 rt_sigsuspend sys_rt_sigsuspend 139130 common rt_sigsuspend sys_rt_sigsuspend
140131 64 sigaltstack stub_sigaltstack 140131 64 sigaltstack stub_sigaltstack
141132 64 utime sys_utime 141132 common utime sys_utime
142133 64 mknod sys_mknod 142133 common mknod sys_mknod
143134 64 uselib 143134 64 uselib
144135 64 personality sys_personality 144135 common personality sys_personality
145136 64 ustat sys_ustat 145136 common ustat sys_ustat
146137 64 statfs sys_statfs 146137 common statfs sys_statfs
147138 64 fstatfs sys_fstatfs 147138 common fstatfs sys_fstatfs
148139 64 sysfs sys_sysfs 148139 common sysfs sys_sysfs
149140 64 getpriority sys_getpriority 149140 common getpriority sys_getpriority
150141 64 setpriority sys_setpriority 150141 common setpriority sys_setpriority
151142 64 sched_setparam sys_sched_setparam 151142 common sched_setparam sys_sched_setparam
152143 64 sched_getparam sys_sched_getparam 152143 common sched_getparam sys_sched_getparam
153144 64 sched_setscheduler sys_sched_setscheduler 153144 common sched_setscheduler sys_sched_setscheduler
154145 64 sched_getscheduler sys_sched_getscheduler 154145 common sched_getscheduler sys_sched_getscheduler
155146 64 sched_get_priority_max sys_sched_get_priority_max 155146 common sched_get_priority_max sys_sched_get_priority_max
156147 64 sched_get_priority_min sys_sched_get_priority_min 156147 common sched_get_priority_min sys_sched_get_priority_min
157148 64 sched_rr_get_interval sys_sched_rr_get_interval 157148 common sched_rr_get_interval sys_sched_rr_get_interval
158149 64 mlock sys_mlock 158149 common mlock sys_mlock
159150 64 munlock sys_munlock 159150 common munlock sys_munlock
160151 64 mlockall sys_mlockall 160151 common mlockall sys_mlockall
161152 64 munlockall sys_munlockall 161152 common munlockall sys_munlockall
162153 64 vhangup sys_vhangup 162153 common vhangup sys_vhangup
163154 64 modify_ldt sys_modify_ldt 163154 common modify_ldt sys_modify_ldt
164155 64 pivot_root sys_pivot_root 164155 common pivot_root sys_pivot_root
165156 64 _sysctl sys_sysctl 165156 64 _sysctl sys_sysctl
166157 64 prctl sys_prctl 166157 common prctl sys_prctl
167158 64 arch_prctl sys_arch_prctl 167158 common arch_prctl sys_arch_prctl
168159 64 adjtimex sys_adjtimex 168159 common adjtimex sys_adjtimex
169160 64 setrlimit sys_setrlimit 169160 common setrlimit sys_setrlimit
170161 64 chroot sys_chroot 170161 common chroot sys_chroot
171162 64 sync sys_sync 171162 common sync sys_sync
172163 64 acct sys_acct 172163 common acct sys_acct
173164 64 settimeofday sys_settimeofday 173164 common settimeofday sys_settimeofday
174165 64 mount sys_mount 174165 common mount sys_mount
175166 64 umount2 sys_umount 175166 common umount2 sys_umount
176167 64 swapon sys_swapon 176167 common swapon sys_swapon
177168 64 swapoff sys_swapoff 177168 common swapoff sys_swapoff
178169 64 reboot sys_reboot 178169 common reboot sys_reboot
179170 64 sethostname sys_sethostname 179170 common sethostname sys_sethostname
180171 64 setdomainname sys_setdomainname 180171 common setdomainname sys_setdomainname
181172 64 iopl stub_iopl 181172 common iopl stub_iopl
182173 64 ioperm sys_ioperm 182173 common ioperm sys_ioperm
183174 64 create_module 183174 64 create_module
184175 64 init_module sys_init_module 184175 common init_module sys_init_module
185176 64 delete_module sys_delete_module 185176 common delete_module sys_delete_module
186177 64 get_kernel_syms 186177 64 get_kernel_syms
187178 64 query_module 187178 64 query_module
188179 64 quotactl sys_quotactl 188179 common quotactl sys_quotactl
189180 64 nfsservctl 189180 64 nfsservctl
190181 64 getpmsg 190181 common getpmsg
191182 64 putpmsg 191182 common putpmsg
192183 64 afs_syscall 192183 common afs_syscall
193184 64 tuxcall 193184 common tuxcall
194185 64 security 194185 common security
195186 64 gettid sys_gettid 195186 common gettid sys_gettid
196187 64 readahead sys_readahead 196187 common readahead sys_readahead
197188 64 setxattr sys_setxattr 197188 common setxattr sys_setxattr
198189 64 lsetxattr sys_lsetxattr 198189 common lsetxattr sys_lsetxattr
199190 64 fsetxattr sys_fsetxattr 199190 common fsetxattr sys_fsetxattr
200191 64 getxattr sys_getxattr 200191 common getxattr sys_getxattr
201192 64 lgetxattr sys_lgetxattr 201192 common lgetxattr sys_lgetxattr
202193 64 fgetxattr sys_fgetxattr 202193 common fgetxattr sys_fgetxattr
203194 64 listxattr sys_listxattr 203194 common listxattr sys_listxattr
204195 64 llistxattr sys_llistxattr 204195 common llistxattr sys_llistxattr
205196 64 flistxattr sys_flistxattr 205196 common flistxattr sys_flistxattr
206197 64 removexattr sys_removexattr 206197 common removexattr sys_removexattr
207198 64 lremovexattr sys_lremovexattr 207198 common lremovexattr sys_lremovexattr
208199 64 fremovexattr sys_fremovexattr 208199 common fremovexattr sys_fremovexattr
209200 64 tkill sys_tkill 209200 common tkill sys_tkill
210201 64 time sys_time 210201 common time sys_time
211202 64 futex sys_futex 211202 common futex sys_futex
212203 64 sched_setaffinity sys_sched_setaffinity 212203 common sched_setaffinity sys_sched_setaffinity
213204 64 sched_getaffinity sys_sched_getaffinity 213204 common sched_getaffinity sys_sched_getaffinity
214205 64 set_thread_area 214205 64 set_thread_area
215206 64 io_setup sys_io_setup 215206 common io_setup sys_io_setup
216207 64 io_destroy sys_io_destroy 216207 common io_destroy sys_io_destroy
217208 64 io_getevents sys_io_getevents 217208 common io_getevents sys_io_getevents
218209 64 io_submit sys_io_submit 218209 common io_submit sys_io_submit
219210 64 io_cancel sys_io_cancel 219210 common io_cancel sys_io_cancel
220211 64 get_thread_area 220211 64 get_thread_area
221212 64 lookup_dcookie sys_lookup_dcookie 221212 common lookup_dcookie sys_lookup_dcookie
222213 64 epoll_create sys_epoll_create 222213 common epoll_create sys_epoll_create
223214 64 epoll_ctl_old 223214 64 epoll_ctl_old
224215 64 epoll_wait_old 224215 64 epoll_wait_old
225216 64 remap_file_pages sys_remap_file_pages 225216 common remap_file_pages sys_remap_file_pages
226217 64 getdents64 sys_getdents64 226217 common getdents64 sys_getdents64
227218 64 set_tid_address sys_set_tid_address 227218 common set_tid_address sys_set_tid_address
228219 64 restart_syscall sys_restart_syscall 228219 common restart_syscall sys_restart_syscall
229220 64 semtimedop sys_semtimedop 229220 common semtimedop sys_semtimedop
230221 64 fadvise64 sys_fadvise64 230221 common fadvise64 sys_fadvise64
231222 64 timer_create sys_timer_create 231222 64 timer_create sys_timer_create
232223 64 timer_settime sys_timer_settime 232223 common timer_settime sys_timer_settime
233224 64 timer_gettime sys_timer_gettime 233224 common timer_gettime sys_timer_gettime
234225 64 timer_getoverrun sys_timer_getoverrun 234225 common timer_getoverrun sys_timer_getoverrun
235226 64 timer_delete sys_timer_delete 235226 common timer_delete sys_timer_delete
236227 64 clock_settime sys_clock_settime 236227 common clock_settime sys_clock_settime
237228 64 clock_gettime sys_clock_gettime 237228 common clock_gettime sys_clock_gettime
238229 64 clock_getres sys_clock_getres 238229 common clock_getres sys_clock_getres
239230 64 clock_nanosleep sys_clock_nanosleep 239230 common clock_nanosleep sys_clock_nanosleep
240231 64 exit_group sys_exit_group 240231 common exit_group sys_exit_group
241232 64 epoll_wait sys_epoll_wait 241232 common epoll_wait sys_epoll_wait
242233 64 epoll_ctl sys_epoll_ctl 242233 common epoll_ctl sys_epoll_ctl
243234 64 tgkill sys_tgkill 243234 common tgkill sys_tgkill
244235 64 utimes sys_utimes 244235 common utimes sys_utimes
245236 64 vserver 245236 64 vserver
246237 64 mbind sys_mbind 246237 common mbind sys_mbind
247238 64 set_mempolicy sys_set_mempolicy 247238 common set_mempolicy sys_set_mempolicy
248239 64 get_mempolicy sys_get_mempolicy 248239 common get_mempolicy sys_get_mempolicy
249240 64 mq_open sys_mq_open 249240 common mq_open sys_mq_open
250241 64 mq_unlink sys_mq_unlink 250241 common mq_unlink sys_mq_unlink
251242 64 mq_timedsend sys_mq_timedsend 251242 common mq_timedsend sys_mq_timedsend
252243 64 mq_timedreceive sys_mq_timedreceive 252243 common mq_timedreceive sys_mq_timedreceive
253244 64 mq_notify sys_mq_notify 253244 64 mq_notify sys_mq_notify
254245 64 mq_getsetattr sys_mq_getsetattr 254245 common mq_getsetattr sys_mq_getsetattr
255246 64 kexec_load sys_kexec_load 255246 64 kexec_load sys_kexec_load
256247 64 waitid sys_waitid 256247 64 waitid sys_waitid
257248 64 add_key sys_add_key 257248 common add_key sys_add_key
258249 64 request_key sys_request_key 258249 common request_key sys_request_key
259250 64 keyctl sys_keyctl 259250 common keyctl sys_keyctl
260251 64 ioprio_set sys_ioprio_set 260251 common ioprio_set sys_ioprio_set
261252 64 ioprio_get sys_ioprio_get 261252 common ioprio_get sys_ioprio_get
262253 64 inotify_init sys_inotify_init 262253 common inotify_init sys_inotify_init
263254 64 inotify_add_watch sys_inotify_add_watch 263254 common inotify_add_watch sys_inotify_add_watch
264255 64 inotify_rm_watch sys_inotify_rm_watch 264255 common inotify_rm_watch sys_inotify_rm_watch
265256 64 migrate_pages sys_migrate_pages 265256 common migrate_pages sys_migrate_pages
266257 64 openat sys_openat 266257 common openat sys_openat
267258 64 mkdirat sys_mkdirat 267258 common mkdirat sys_mkdirat
268259 64 mknodat sys_mknodat 268259 common mknodat sys_mknodat
269260 64 fchownat sys_fchownat 269260 common fchownat sys_fchownat
270261 64 futimesat sys_futimesat 270261 common futimesat sys_futimesat
271262 64 newfstatat sys_newfstatat 271262 common newfstatat sys_newfstatat
272263 64 unlinkat sys_unlinkat 272263 common unlinkat sys_unlinkat
273264 64 renameat sys_renameat 273264 common renameat sys_renameat
274265 64 linkat sys_linkat 274265 common linkat sys_linkat
275266 64 symlinkat sys_symlinkat 275266 common symlinkat sys_symlinkat
276267 64 readlinkat sys_readlinkat 276267 common readlinkat sys_readlinkat
277268 64 fchmodat sys_fchmodat 277268 common fchmodat sys_fchmodat
278269 64 faccessat sys_faccessat 278269 common faccessat sys_faccessat
279270 64 pselect6 sys_pselect6 279270 common pselect6 sys_pselect6
280271 64 ppoll sys_ppoll 280271 common ppoll sys_ppoll
281272 64 unshare sys_unshare 281272 common unshare sys_unshare
282273 64 set_robust_list sys_set_robust_list 282273 64 set_robust_list sys_set_robust_list
283274 64 get_robust_list sys_get_robust_list 283274 64 get_robust_list sys_get_robust_list
284275 64 splice sys_splice 284275 common splice sys_splice
285276 64 tee sys_tee 285276 common tee sys_tee
286277 64 sync_file_range sys_sync_file_range 286277 common sync_file_range sys_sync_file_range
287278 64 vmsplice sys_vmsplice 287278 64 vmsplice sys_vmsplice
288279 64 move_pages sys_move_pages 288279 64 move_pages sys_move_pages
289280 64 utimensat sys_utimensat 289280 common utimensat sys_utimensat
290281 64 epoll_pwait sys_epoll_pwait 290281 common epoll_pwait sys_epoll_pwait
291282 64 signalfd sys_signalfd 291282 common signalfd sys_signalfd
292283 64 timerfd_create sys_timerfd_create 292283 common timerfd_create sys_timerfd_create
293284 64 eventfd sys_eventfd 293284 common eventfd sys_eventfd
294285 64 fallocate sys_fallocate 294285 common fallocate sys_fallocate
295286 64 timerfd_settime sys_timerfd_settime 295286 common timerfd_settime sys_timerfd_settime
296287 64 timerfd_gettime sys_timerfd_gettime 296287 common timerfd_gettime sys_timerfd_gettime
297288 64 accept4 sys_accept4 297288 common accept4 sys_accept4
298289 64 signalfd4 sys_signalfd4 298289 common signalfd4 sys_signalfd4
299290 64 eventfd2 sys_eventfd2 299290 common eventfd2 sys_eventfd2
300291 64 epoll_create1 sys_epoll_create1 300291 common epoll_create1 sys_epoll_create1
301292 64 dup3 sys_dup3 301292 common dup3 sys_dup3
302293 64 pipe2 sys_pipe2 302293 common pipe2 sys_pipe2
303294 64 inotify_init1 sys_inotify_init1 303294 common inotify_init1 sys_inotify_init1
304295 64 preadv sys_preadv 304295 64 preadv sys_preadv
305296 64 pwritev sys_pwritev 305296 64 pwritev sys_pwritev
306297 64 rt_tgsigqueueinfo sys_rt_tgsigqueueinfo 306297 64 rt_tgsigqueueinfo sys_rt_tgsigqueueinfo
307298 64 perf_event_open sys_perf_event_open 307298 common perf_event_open sys_perf_event_open
308299 64 recvmmsg sys_recvmmsg 308299 64 recvmmsg sys_recvmmsg
309300 64 fanotify_init sys_fanotify_init 309300 common fanotify_init sys_fanotify_init
310301 64 fanotify_mark sys_fanotify_mark 310301 common fanotify_mark sys_fanotify_mark
311302 64 prlimit64 sys_prlimit64 311302 common prlimit64 sys_prlimit64
312303 64 name_to_handle_at sys_name_to_handle_at 312303 common name_to_handle_at sys_name_to_handle_at
313304 64 open_by_handle_at sys_open_by_handle_at 313304 common open_by_handle_at sys_open_by_handle_at
314305 64 clock_adjtime sys_clock_adjtime 314305 common clock_adjtime sys_clock_adjtime
315306 64 syncfs sys_syncfs 315306 common syncfs sys_syncfs
316307 64 sendmmsg sys_sendmmsg 316307 64 sendmmsg sys_sendmmsg
317308 64 setns sys_setns 317308 common setns sys_setns
318309 64 getcpu sys_getcpu 318309 common getcpu sys_getcpu
319310 64 process_vm_readv sys_process_vm_readv 319310 64 process_vm_readv sys_process_vm_readv
320311 64 process_vm_writev sys_process_vm_writev 320311 64 process_vm_writev sys_process_vm_writev
321#
322# x32-specific system call numbers start at 512 to avoid cache impact
323# for native 64-bit operation.
324#
325512 x32 rt_sigaction sys32_rt_sigaction
326513 x32 rt_sigreturn stub_x32_rt_sigreturn
327514 x32 ioctl compat_sys_ioctl
328515 x32 readv compat_sys_readv
329516 x32 writev compat_sys_writev
330517 x32 recvfrom compat_sys_recvfrom
331518 x32 sendmsg compat_sys_sendmsg
332519 x32 recvmsg compat_sys_recvmsg
333520 x32 execve stub_x32_execve
334521 x32 ptrace compat_sys_ptrace
335522 x32 rt_sigpending sys32_rt_sigpending
336523 x32 rt_sigtimedwait compat_sys_rt_sigtimedwait
337524 x32 rt_sigqueueinfo sys32_rt_sigqueueinfo
338525 x32 sigaltstack stub_x32_sigaltstack
339526 x32 timer_create compat_sys_timer_create
340527 x32 mq_notify compat_sys_mq_notify
341528 x32 kexec_load compat_sys_kexec_load
342529 x32 waitid compat_sys_waitid
343530 x32 set_robust_list compat_sys_set_robust_list
344531 x32 get_robust_list compat_sys_get_robust_list
345532 x32 vmsplice compat_sys_vmsplice
346533 x32 move_pages compat_sys_move_pages
347534 x32 preadv compat_sys_preadv64
348535 x32 pwritev compat_sys_pwritev64
349536 x32 rt_tgsigqueueinfo compat_sys_rt_tgsigqueueinfo
350537 x32 recvmmsg compat_sys_recvmmsg
351538 x32 sendmmsg compat_sys_sendmmsg
352539 x32 process_vm_readv compat_sys_process_vm_readv
353540 x32 process_vm_writev compat_sys_process_vm_writev
diff --git a/arch/x86/um/sys_call_table_64.c b/arch/x86/um/sys_call_table_64.c
index fe626c3ba01b..9924776f4265 100644
--- a/arch/x86/um/sys_call_table_64.c
+++ b/arch/x86/um/sys_call_table_64.c
@@ -35,6 +35,9 @@
35#define stub_sigaltstack sys_sigaltstack 35#define stub_sigaltstack sys_sigaltstack
36#define stub_rt_sigreturn sys_rt_sigreturn 36#define stub_rt_sigreturn sys_rt_sigreturn
37 37
38#define __SYSCALL_COMMON(nr, sym, compat) __SYSCALL_64(nr, sym, compat)
39#define __SYSCALL_X32(nr, sym, compat) /* Not supported */
40
38#define __SYSCALL_64(nr, sym, compat) extern asmlinkage void sym(void) ; 41#define __SYSCALL_64(nr, sym, compat) extern asmlinkage void sym(void) ;
39#include <asm/syscalls_64.h> 42#include <asm/syscalls_64.h>
40 43
diff --git a/arch/x86/um/user-offsets.c b/arch/x86/um/user-offsets.c
index 5edf4f4bbf53..ce7e3607a870 100644
--- a/arch/x86/um/user-offsets.c
+++ b/arch/x86/um/user-offsets.c
@@ -15,6 +15,8 @@ static char syscalls[] = {
15}; 15};
16#else 16#else
17#define __SYSCALL_64(nr, sym, compat) [nr] = 1, 17#define __SYSCALL_64(nr, sym, compat) [nr] = 1,
18#define __SYSCALL_COMMON(nr, sym, compat) [nr] = 1,
19#define __SYSCALL_X32(nr, sym, compat) /* Not supported */
18static char syscalls[] = { 20static char syscalls[] = {
19#include <asm/syscalls_64.h> 21#include <asm/syscalls_64.h>
20}; 22};
diff --git a/arch/x86/vdso/.gitignore b/arch/x86/vdso/.gitignore
index 60274d5746e1..3282874bc61d 100644
--- a/arch/x86/vdso/.gitignore
+++ b/arch/x86/vdso/.gitignore
@@ -1,5 +1,7 @@
1vdso.lds 1vdso.lds
2vdso-syms.lds 2vdso-syms.lds
3vdsox32.lds
4vdsox32-syms.lds
3vdso32-syms.lds 5vdso32-syms.lds
4vdso32-syscall-syms.lds 6vdso32-syscall-syms.lds
5vdso32-sysenter-syms.lds 7vdso32-sysenter-syms.lds
diff --git a/arch/x86/vdso/Makefile b/arch/x86/vdso/Makefile
index 5d179502a52c..fd14be1d1472 100644
--- a/arch/x86/vdso/Makefile
+++ b/arch/x86/vdso/Makefile
@@ -3,21 +3,29 @@
3# 3#
4 4
5VDSO64-$(CONFIG_X86_64) := y 5VDSO64-$(CONFIG_X86_64) := y
6VDSOX32-$(CONFIG_X86_X32_ABI) := y
6VDSO32-$(CONFIG_X86_32) := y 7VDSO32-$(CONFIG_X86_32) := y
7VDSO32-$(CONFIG_COMPAT) := y 8VDSO32-$(CONFIG_COMPAT) := y
8 9
9vdso-install-$(VDSO64-y) += vdso.so 10vdso-install-$(VDSO64-y) += vdso.so
11vdso-install-$(VDSOX32-y) += vdsox32.so
10vdso-install-$(VDSO32-y) += $(vdso32-images) 12vdso-install-$(VDSO32-y) += $(vdso32-images)
11 13
12 14
13# files to link into the vdso 15# files to link into the vdso
14vobjs-y := vdso-note.o vclock_gettime.o vgetcpu.o 16vobjs-y := vdso-note.o vclock_gettime.o vgetcpu.o
15 17
18vobjs-$(VDSOX32-y) += $(vobjx32s-compat)
19
20# Filter out x32 objects.
21vobj64s := $(filter-out $(vobjx32s-compat),$(vobjs-y))
22
16# files to link into kernel 23# files to link into kernel
17obj-$(VDSO64-y) += vma.o vdso.o 24obj-$(VDSO64-y) += vma.o vdso.o
25obj-$(VDSOX32-y) += vdsox32.o
18obj-$(VDSO32-y) += vdso32.o vdso32-setup.o 26obj-$(VDSO32-y) += vdso32.o vdso32-setup.o
19 27
20vobjs := $(foreach F,$(vobjs-y),$(obj)/$F) 28vobjs := $(foreach F,$(vobj64s),$(obj)/$F)
21 29
22$(obj)/vdso.o: $(obj)/vdso.so 30$(obj)/vdso.o: $(obj)/vdso.so
23 31
@@ -73,6 +81,42 @@ $(obj)/%-syms.lds: $(obj)/%.so.dbg FORCE
73 $(call if_changed,vdsosym) 81 $(call if_changed,vdsosym)
74 82
75# 83#
84# X32 processes use x32 vDSO to access 64bit kernel data.
85#
86# Build x32 vDSO image:
87# 1. Compile x32 vDSO as 64bit.
88# 2. Convert object files to x32.
89# 3. Build x32 VDSO image with x32 objects, which contains 64bit codes
90# so that it can reach 64bit address space with 64bit pointers.
91#
92
93targets += vdsox32-syms.lds
94obj-$(VDSOX32-y) += vdsox32-syms.lds
95
96CPPFLAGS_vdsox32.lds = $(CPPFLAGS_vdso.lds)
97VDSO_LDFLAGS_vdsox32.lds = -Wl,-m,elf32_x86_64 \
98 -Wl,-soname=linux-vdso.so.1 \
99 -Wl,-z,max-page-size=4096 \
100 -Wl,-z,common-page-size=4096
101
102vobjx32s-y := $(vobj64s:.o=-x32.o)
103vobjx32s := $(foreach F,$(vobjx32s-y),$(obj)/$F)
104
105# Convert 64bit object file to x32 for x32 vDSO.
106quiet_cmd_x32 = X32 $@
107 cmd_x32 = $(OBJCOPY) -O elf32-x86-64 $< $@
108
109$(obj)/%-x32.o: $(obj)/%.o FORCE
110 $(call if_changed,x32)
111
112targets += vdsox32.so vdsox32.so.dbg vdsox32.lds $(vobjx32s-y)
113
114$(obj)/vdsox32.o: $(src)/vdsox32.S $(obj)/vdsox32.so
115
116$(obj)/vdsox32.so.dbg: $(src)/vdsox32.lds $(vobjx32s) FORCE
117 $(call if_changed,vdso)
118
119#
76# Build multiple 32-bit vDSO images to choose from at boot time. 120# Build multiple 32-bit vDSO images to choose from at boot time.
77# 121#
78obj-$(VDSO32-y) += vdso32-syms.lds 122obj-$(VDSO32-y) += vdso32-syms.lds
diff --git a/arch/x86/vdso/vclock_gettime.c b/arch/x86/vdso/vclock_gettime.c
index 6bc0e723b6e8..885eff49d6ab 100644
--- a/arch/x86/vdso/vclock_gettime.c
+++ b/arch/x86/vdso/vclock_gettime.c
@@ -70,100 +70,98 @@ notrace static long vdso_fallback_gettime(long clock, struct timespec *ts)
70 return ret; 70 return ret;
71} 71}
72 72
73notrace static long vdso_fallback_gtod(struct timeval *tv, struct timezone *tz)
74{
75 long ret;
76
77 asm("syscall" : "=a" (ret) :
78 "0" (__NR_gettimeofday), "D" (tv), "S" (tz) : "memory");
79 return ret;
80}
81
82
73notrace static inline long vgetns(void) 83notrace static inline long vgetns(void)
74{ 84{
75 long v; 85 long v;
76 cycles_t cycles; 86 cycles_t cycles;
77 if (gtod->clock.vclock_mode == VCLOCK_TSC) 87 if (gtod->clock.vclock_mode == VCLOCK_TSC)
78 cycles = vread_tsc(); 88 cycles = vread_tsc();
79 else 89 else if (gtod->clock.vclock_mode == VCLOCK_HPET)
80 cycles = vread_hpet(); 90 cycles = vread_hpet();
91 else
92 return 0;
81 v = (cycles - gtod->clock.cycle_last) & gtod->clock.mask; 93 v = (cycles - gtod->clock.cycle_last) & gtod->clock.mask;
82 return (v * gtod->clock.mult) >> gtod->clock.shift; 94 return (v * gtod->clock.mult) >> gtod->clock.shift;
83} 95}
84 96
85notrace static noinline int do_realtime(struct timespec *ts) 97/* Code size doesn't matter (vdso is 4k anyway) and this is faster. */
98notrace static int __always_inline do_realtime(struct timespec *ts)
86{ 99{
87 unsigned long seq, ns; 100 unsigned long seq, ns;
101 int mode;
102
88 do { 103 do {
89 seq = read_seqbegin(&gtod->lock); 104 seq = read_seqcount_begin(&gtod->seq);
105 mode = gtod->clock.vclock_mode;
90 ts->tv_sec = gtod->wall_time_sec; 106 ts->tv_sec = gtod->wall_time_sec;
91 ts->tv_nsec = gtod->wall_time_nsec; 107 ts->tv_nsec = gtod->wall_time_nsec;
92 ns = vgetns(); 108 ns = vgetns();
93 } while (unlikely(read_seqretry(&gtod->lock, seq))); 109 } while (unlikely(read_seqcount_retry(&gtod->seq, seq)));
110
94 timespec_add_ns(ts, ns); 111 timespec_add_ns(ts, ns);
95 return 0; 112 return mode;
96} 113}
97 114
98notrace static noinline int do_monotonic(struct timespec *ts) 115notrace static int do_monotonic(struct timespec *ts)
99{ 116{
100 unsigned long seq, ns, secs; 117 unsigned long seq, ns;
118 int mode;
119
101 do { 120 do {
102 seq = read_seqbegin(&gtod->lock); 121 seq = read_seqcount_begin(&gtod->seq);
103 secs = gtod->wall_time_sec; 122 mode = gtod->clock.vclock_mode;
104 ns = gtod->wall_time_nsec + vgetns(); 123 ts->tv_sec = gtod->monotonic_time_sec;
105 secs += gtod->wall_to_monotonic.tv_sec; 124 ts->tv_nsec = gtod->monotonic_time_nsec;
106 ns += gtod->wall_to_monotonic.tv_nsec; 125 ns = vgetns();
107 } while (unlikely(read_seqretry(&gtod->lock, seq))); 126 } while (unlikely(read_seqcount_retry(&gtod->seq, seq)));
108 127 timespec_add_ns(ts, ns);
109 /* wall_time_nsec, vgetns(), and wall_to_monotonic.tv_nsec
110 * are all guaranteed to be nonnegative.
111 */
112 while (ns >= NSEC_PER_SEC) {
113 ns -= NSEC_PER_SEC;
114 ++secs;
115 }
116 ts->tv_sec = secs;
117 ts->tv_nsec = ns;
118 128
119 return 0; 129 return mode;
120} 130}
121 131
122notrace static noinline int do_realtime_coarse(struct timespec *ts) 132notrace static int do_realtime_coarse(struct timespec *ts)
123{ 133{
124 unsigned long seq; 134 unsigned long seq;
125 do { 135 do {
126 seq = read_seqbegin(&gtod->lock); 136 seq = read_seqcount_begin(&gtod->seq);
127 ts->tv_sec = gtod->wall_time_coarse.tv_sec; 137 ts->tv_sec = gtod->wall_time_coarse.tv_sec;
128 ts->tv_nsec = gtod->wall_time_coarse.tv_nsec; 138 ts->tv_nsec = gtod->wall_time_coarse.tv_nsec;
129 } while (unlikely(read_seqretry(&gtod->lock, seq))); 139 } while (unlikely(read_seqcount_retry(&gtod->seq, seq)));
130 return 0; 140 return 0;
131} 141}
132 142
133notrace static noinline int do_monotonic_coarse(struct timespec *ts) 143notrace static int do_monotonic_coarse(struct timespec *ts)
134{ 144{
135 unsigned long seq, ns, secs; 145 unsigned long seq;
136 do { 146 do {
137 seq = read_seqbegin(&gtod->lock); 147 seq = read_seqcount_begin(&gtod->seq);
138 secs = gtod->wall_time_coarse.tv_sec; 148 ts->tv_sec = gtod->monotonic_time_coarse.tv_sec;
139 ns = gtod->wall_time_coarse.tv_nsec; 149 ts->tv_nsec = gtod->monotonic_time_coarse.tv_nsec;
140 secs += gtod->wall_to_monotonic.tv_sec; 150 } while (unlikely(read_seqcount_retry(&gtod->seq, seq)));
141 ns += gtod->wall_to_monotonic.tv_nsec;
142 } while (unlikely(read_seqretry(&gtod->lock, seq)));
143
144 /* wall_time_nsec and wall_to_monotonic.tv_nsec are
145 * guaranteed to be between 0 and NSEC_PER_SEC.
146 */
147 if (ns >= NSEC_PER_SEC) {
148 ns -= NSEC_PER_SEC;
149 ++secs;
150 }
151 ts->tv_sec = secs;
152 ts->tv_nsec = ns;
153 151
154 return 0; 152 return 0;
155} 153}
156 154
157notrace int __vdso_clock_gettime(clockid_t clock, struct timespec *ts) 155notrace int __vdso_clock_gettime(clockid_t clock, struct timespec *ts)
158{ 156{
157 int ret = VCLOCK_NONE;
158
159 switch (clock) { 159 switch (clock) {
160 case CLOCK_REALTIME: 160 case CLOCK_REALTIME:
161 if (likely(gtod->clock.vclock_mode != VCLOCK_NONE)) 161 ret = do_realtime(ts);
162 return do_realtime(ts);
163 break; 162 break;
164 case CLOCK_MONOTONIC: 163 case CLOCK_MONOTONIC:
165 if (likely(gtod->clock.vclock_mode != VCLOCK_NONE)) 164 ret = do_monotonic(ts);
166 return do_monotonic(ts);
167 break; 165 break;
168 case CLOCK_REALTIME_COARSE: 166 case CLOCK_REALTIME_COARSE:
169 return do_realtime_coarse(ts); 167 return do_realtime_coarse(ts);
@@ -171,32 +169,33 @@ notrace int __vdso_clock_gettime(clockid_t clock, struct timespec *ts)
171 return do_monotonic_coarse(ts); 169 return do_monotonic_coarse(ts);
172 } 170 }
173 171
174 return vdso_fallback_gettime(clock, ts); 172 if (ret == VCLOCK_NONE)
173 return vdso_fallback_gettime(clock, ts);
174 return 0;
175} 175}
176int clock_gettime(clockid_t, struct timespec *) 176int clock_gettime(clockid_t, struct timespec *)
177 __attribute__((weak, alias("__vdso_clock_gettime"))); 177 __attribute__((weak, alias("__vdso_clock_gettime")));
178 178
179notrace int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz) 179notrace int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
180{ 180{
181 long ret; 181 long ret = VCLOCK_NONE;
182 if (likely(gtod->clock.vclock_mode != VCLOCK_NONE)) { 182
183 if (likely(tv != NULL)) { 183 if (likely(tv != NULL)) {
184 BUILD_BUG_ON(offsetof(struct timeval, tv_usec) != 184 BUILD_BUG_ON(offsetof(struct timeval, tv_usec) !=
185 offsetof(struct timespec, tv_nsec) || 185 offsetof(struct timespec, tv_nsec) ||
186 sizeof(*tv) != sizeof(struct timespec)); 186 sizeof(*tv) != sizeof(struct timespec));
187 do_realtime((struct timespec *)tv); 187 ret = do_realtime((struct timespec *)tv);
188 tv->tv_usec /= 1000; 188 tv->tv_usec /= 1000;
189 }
190 if (unlikely(tz != NULL)) {
191 /* Avoid memcpy. Some old compilers fail to inline it */
192 tz->tz_minuteswest = gtod->sys_tz.tz_minuteswest;
193 tz->tz_dsttime = gtod->sys_tz.tz_dsttime;
194 }
195 return 0;
196 } 189 }
197 asm("syscall" : "=a" (ret) : 190 if (unlikely(tz != NULL)) {
198 "0" (__NR_gettimeofday), "D" (tv), "S" (tz) : "memory"); 191 /* Avoid memcpy. Some old compilers fail to inline it */
199 return ret; 192 tz->tz_minuteswest = gtod->sys_tz.tz_minuteswest;
193 tz->tz_dsttime = gtod->sys_tz.tz_dsttime;
194 }
195
196 if (ret == VCLOCK_NONE)
197 return vdso_fallback_gtod(tv, tz);
198 return 0;
200} 199}
201int gettimeofday(struct timeval *, struct timezone *) 200int gettimeofday(struct timeval *, struct timezone *)
202 __attribute__((weak, alias("__vdso_gettimeofday"))); 201 __attribute__((weak, alias("__vdso_gettimeofday")));
diff --git a/arch/x86/vdso/vdso32-setup.c b/arch/x86/vdso/vdso32-setup.c
index a944020fa859..66e6d9359826 100644
--- a/arch/x86/vdso/vdso32-setup.c
+++ b/arch/x86/vdso/vdso32-setup.c
@@ -311,6 +311,11 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
311 int ret = 0; 311 int ret = 0;
312 bool compat; 312 bool compat;
313 313
314#ifdef CONFIG_X86_X32_ABI
315 if (test_thread_flag(TIF_X32))
316 return x32_setup_additional_pages(bprm, uses_interp);
317#endif
318
314 if (vdso_enabled == VDSO_DISABLED) 319 if (vdso_enabled == VDSO_DISABLED)
315 return 0; 320 return 0;
316 321
diff --git a/arch/x86/vdso/vdsox32.S b/arch/x86/vdso/vdsox32.S
new file mode 100644
index 000000000000..d6b9a7f42a8a
--- /dev/null
+++ b/arch/x86/vdso/vdsox32.S
@@ -0,0 +1,22 @@
1#include <asm/page_types.h>
2#include <linux/linkage.h>
3#include <linux/init.h>
4
5__PAGE_ALIGNED_DATA
6
7 .globl vdsox32_start, vdsox32_end
8 .align PAGE_SIZE
9vdsox32_start:
10 .incbin "arch/x86/vdso/vdsox32.so"
11vdsox32_end:
12 .align PAGE_SIZE /* extra data here leaks to userspace. */
13
14.previous
15
16 .globl vdsox32_pages
17 .bss
18 .align 8
19 .type vdsox32_pages, @object
20vdsox32_pages:
21 .zero (vdsox32_end - vdsox32_start + PAGE_SIZE - 1) / PAGE_SIZE * 8
22 .size vdsox32_pages, .-vdsox32_pages
diff --git a/arch/x86/vdso/vdsox32.lds.S b/arch/x86/vdso/vdsox32.lds.S
new file mode 100644
index 000000000000..62272aa2ae0a
--- /dev/null
+++ b/arch/x86/vdso/vdsox32.lds.S
@@ -0,0 +1,28 @@
1/*
2 * Linker script for x32 vDSO.
3 * We #include the file to define the layout details.
4 * Here we only choose the prelinked virtual address.
5 *
6 * This file defines the version script giving the user-exported symbols in
7 * the DSO. We can define local symbols here called VDSO* to make their
8 * values visible using the asm-x86/vdso.h macros from the kernel proper.
9 */
10
11#define VDSO_PRELINK 0
12#include "vdso-layout.lds.S"
13
14/*
15 * This controls what userland symbols we export from the vDSO.
16 */
17VERSION {
18 LINUX_2.6 {
19 global:
20 __vdso_clock_gettime;
21 __vdso_gettimeofday;
22 __vdso_getcpu;
23 __vdso_time;
24 local: *;
25 };
26}
27
28VDSOX32_PRELINK = VDSO_PRELINK;
diff --git a/arch/x86/vdso/vma.c b/arch/x86/vdso/vma.c
index 17e18279649f..00aaf047b39f 100644
--- a/arch/x86/vdso/vma.c
+++ b/arch/x86/vdso/vma.c
@@ -24,7 +24,44 @@ extern unsigned short vdso_sync_cpuid;
24extern struct page *vdso_pages[]; 24extern struct page *vdso_pages[];
25static unsigned vdso_size; 25static unsigned vdso_size;
26 26
27static void __init patch_vdso(void *vdso, size_t len) 27#ifdef CONFIG_X86_X32_ABI
28extern char vdsox32_start[], vdsox32_end[];
29extern struct page *vdsox32_pages[];
30static unsigned vdsox32_size;
31
32static void __init patch_vdsox32(void *vdso, size_t len)
33{
34 Elf32_Ehdr *hdr = vdso;
35 Elf32_Shdr *sechdrs, *alt_sec = 0;
36 char *secstrings;
37 void *alt_data;
38 int i;
39
40 BUG_ON(len < sizeof(Elf32_Ehdr));
41 BUG_ON(memcmp(hdr->e_ident, ELFMAG, SELFMAG) != 0);
42
43 sechdrs = (void *)hdr + hdr->e_shoff;
44 secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
45
46 for (i = 1; i < hdr->e_shnum; i++) {
47 Elf32_Shdr *shdr = &sechdrs[i];
48 if (!strcmp(secstrings + shdr->sh_name, ".altinstructions")) {
49 alt_sec = shdr;
50 goto found;
51 }
52 }
53
54 /* If we get here, it's probably a bug. */
55 pr_warning("patch_vdsox32: .altinstructions not found\n");
56 return; /* nothing to patch */
57
58found:
59 alt_data = (void *)hdr + alt_sec->sh_offset;
60 apply_alternatives(alt_data, alt_data + alt_sec->sh_size);
61}
62#endif
63
64static void __init patch_vdso64(void *vdso, size_t len)
28{ 65{
29 Elf64_Ehdr *hdr = vdso; 66 Elf64_Ehdr *hdr = vdso;
30 Elf64_Shdr *sechdrs, *alt_sec = 0; 67 Elf64_Shdr *sechdrs, *alt_sec = 0;
@@ -47,7 +84,7 @@ static void __init patch_vdso(void *vdso, size_t len)
47 } 84 }
48 85
49 /* If we get here, it's probably a bug. */ 86 /* If we get here, it's probably a bug. */
50 pr_warning("patch_vdso: .altinstructions not found\n"); 87 pr_warning("patch_vdso64: .altinstructions not found\n");
51 return; /* nothing to patch */ 88 return; /* nothing to patch */
52 89
53found: 90found:
@@ -60,12 +97,20 @@ static int __init init_vdso(void)
60 int npages = (vdso_end - vdso_start + PAGE_SIZE - 1) / PAGE_SIZE; 97 int npages = (vdso_end - vdso_start + PAGE_SIZE - 1) / PAGE_SIZE;
61 int i; 98 int i;
62 99
63 patch_vdso(vdso_start, vdso_end - vdso_start); 100 patch_vdso64(vdso_start, vdso_end - vdso_start);
64 101
65 vdso_size = npages << PAGE_SHIFT; 102 vdso_size = npages << PAGE_SHIFT;
66 for (i = 0; i < npages; i++) 103 for (i = 0; i < npages; i++)
67 vdso_pages[i] = virt_to_page(vdso_start + i*PAGE_SIZE); 104 vdso_pages[i] = virt_to_page(vdso_start + i*PAGE_SIZE);
68 105
106#ifdef CONFIG_X86_X32_ABI
107 patch_vdsox32(vdsox32_start, vdsox32_end - vdsox32_start);
108 npages = (vdsox32_end - vdsox32_start + PAGE_SIZE - 1) / PAGE_SIZE;
109 vdsox32_size = npages << PAGE_SHIFT;
110 for (i = 0; i < npages; i++)
111 vdsox32_pages[i] = virt_to_page(vdsox32_start + i*PAGE_SIZE);
112#endif
113
69 return 0; 114 return 0;
70} 115}
71subsys_initcall(init_vdso); 116subsys_initcall(init_vdso);
@@ -103,7 +148,10 @@ static unsigned long vdso_addr(unsigned long start, unsigned len)
103 148
104/* Setup a VMA at program startup for the vsyscall page. 149/* Setup a VMA at program startup for the vsyscall page.
105 Not called for compat tasks */ 150 Not called for compat tasks */
106int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) 151static int setup_additional_pages(struct linux_binprm *bprm,
152 int uses_interp,
153 struct page **pages,
154 unsigned size)
107{ 155{
108 struct mm_struct *mm = current->mm; 156 struct mm_struct *mm = current->mm;
109 unsigned long addr; 157 unsigned long addr;
@@ -113,8 +161,8 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
113 return 0; 161 return 0;
114 162
115 down_write(&mm->mmap_sem); 163 down_write(&mm->mmap_sem);
116 addr = vdso_addr(mm->start_stack, vdso_size); 164 addr = vdso_addr(mm->start_stack, size);
117 addr = get_unmapped_area(NULL, addr, vdso_size, 0, 0); 165 addr = get_unmapped_area(NULL, addr, size, 0, 0);
118 if (IS_ERR_VALUE(addr)) { 166 if (IS_ERR_VALUE(addr)) {
119 ret = addr; 167 ret = addr;
120 goto up_fail; 168 goto up_fail;
@@ -122,10 +170,10 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
122 170
123 current->mm->context.vdso = (void *)addr; 171 current->mm->context.vdso = (void *)addr;
124 172
125 ret = install_special_mapping(mm, addr, vdso_size, 173 ret = install_special_mapping(mm, addr, size,
126 VM_READ|VM_EXEC| 174 VM_READ|VM_EXEC|
127 VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC, 175 VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
128 vdso_pages); 176 pages);
129 if (ret) { 177 if (ret) {
130 current->mm->context.vdso = NULL; 178 current->mm->context.vdso = NULL;
131 goto up_fail; 179 goto up_fail;
@@ -136,6 +184,20 @@ up_fail:
136 return ret; 184 return ret;
137} 185}
138 186
187int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
188{
189 return setup_additional_pages(bprm, uses_interp, vdso_pages,
190 vdso_size);
191}
192
193#ifdef CONFIG_X86_X32_ABI
194int x32_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
195{
196 return setup_additional_pages(bprm, uses_interp, vdsox32_pages,
197 vdsox32_size);
198}
199#endif
200
139static __init int vdso_setup(char *s) 201static __init int vdso_setup(char *s)
140{ 202{
141 vdso_enabled = simple_strtoul(s, NULL, 0); 203 vdso_enabled = simple_strtoul(s, NULL, 0);
diff --git a/arch/xtensa/configs/iss_defconfig b/arch/xtensa/configs/iss_defconfig
index f932b30b47fb..ddab37b24741 100644
--- a/arch/xtensa/configs/iss_defconfig
+++ b/arch/xtensa/configs/iss_defconfig
@@ -113,7 +113,7 @@ CONFIG_DEFAULT_IOSCHED="noop"
113# CONFIG_INLINE_SPIN_LOCK_BH is not set 113# CONFIG_INLINE_SPIN_LOCK_BH is not set
114# CONFIG_INLINE_SPIN_LOCK_IRQ is not set 114# CONFIG_INLINE_SPIN_LOCK_IRQ is not set
115# CONFIG_INLINE_SPIN_LOCK_IRQSAVE is not set 115# CONFIG_INLINE_SPIN_LOCK_IRQSAVE is not set
116CONFIG_INLINE_SPIN_UNLOCK=y 116# CONFIG_UNINLINE_SPIN_UNLOCK is not set
117# CONFIG_INLINE_SPIN_UNLOCK_BH is not set 117# CONFIG_INLINE_SPIN_UNLOCK_BH is not set
118CONFIG_INLINE_SPIN_UNLOCK_IRQ=y 118CONFIG_INLINE_SPIN_UNLOCK_IRQ=y
119# CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE is not set 119# CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE is not set
diff --git a/arch/xtensa/include/asm/posix_types.h b/arch/xtensa/include/asm/posix_types.h
index 6b2190c35882..6e96be0d02d3 100644
--- a/arch/xtensa/include/asm/posix_types.h
+++ b/arch/xtensa/include/asm/posix_types.h
@@ -19,104 +19,21 @@
19 * assume GCC is being used. 19 * assume GCC is being used.
20 */ 20 */
21 21
22typedef unsigned long __kernel_ino_t;
23typedef unsigned int __kernel_mode_t;
24typedef unsigned long __kernel_nlink_t;
25typedef long __kernel_off_t;
26typedef int __kernel_pid_t;
27typedef unsigned short __kernel_ipc_pid_t; 22typedef unsigned short __kernel_ipc_pid_t;
28typedef unsigned int __kernel_uid_t; 23#define __kernel_ipc_pid_t __kernel_ipc_pid_t
29typedef unsigned int __kernel_gid_t; 24
30typedef unsigned int __kernel_size_t; 25typedef unsigned int __kernel_size_t;
31typedef int __kernel_ssize_t; 26typedef int __kernel_ssize_t;
32typedef long __kernel_ptrdiff_t; 27typedef long __kernel_ptrdiff_t;
33typedef long __kernel_time_t; 28#define __kernel_size_t __kernel_size_t
34typedef long __kernel_suseconds_t;
35typedef long __kernel_clock_t;
36typedef int __kernel_timer_t;
37typedef int __kernel_clockid_t;
38typedef int __kernel_daddr_t;
39typedef char * __kernel_caddr_t;
40typedef unsigned short __kernel_uid16_t;
41typedef unsigned short __kernel_gid16_t;
42typedef unsigned int __kernel_uid32_t;
43typedef unsigned int __kernel_gid32_t;
44 29
45typedef unsigned short __kernel_old_uid_t; 30typedef unsigned short __kernel_old_uid_t;
46typedef unsigned short __kernel_old_gid_t; 31typedef unsigned short __kernel_old_gid_t;
47typedef unsigned short __kernel_old_dev_t; 32#define __kernel_old_uid_t __kernel_old_uid_t
48
49#ifdef __GNUC__
50typedef long long __kernel_loff_t;
51#endif
52
53typedef struct {
54 int val[2];
55} __kernel_fsid_t;
56
57#ifndef __GNUC__
58
59#define __FD_SET(d, set) ((set)->fds_bits[__FDELT(d)] |= __FDMASK(d))
60#define __FD_CLR(d, set) ((set)->fds_bits[__FDELT(d)] &= ~__FDMASK(d))
61#define __FD_ISSET(d, set) (!!((set)->fds_bits[__FDELT(d)] & __FDMASK(d)))
62#define __FD_ZERO(set) \
63 ((void) memset ((void *) (set), 0, sizeof (__kernel_fd_set)))
64
65#else /* __GNUC__ */
66 33
67#if defined(__KERNEL__) 34typedef unsigned short __kernel_old_dev_t;
68/* With GNU C, use inline functions instead so args are evaluated only once: */ 35#define __kernel_old_dev_t __kernel_old_dev_t
69
70#undef __FD_SET
71static __inline__ void __FD_SET(unsigned long fd, __kernel_fd_set *fdsetp)
72{
73 unsigned long _tmp = fd / __NFDBITS;
74 unsigned long _rem = fd % __NFDBITS;
75 fdsetp->fds_bits[_tmp] |= (1UL<<_rem);
76}
77
78#undef __FD_CLR
79static __inline__ void __FD_CLR(unsigned long fd, __kernel_fd_set *fdsetp)
80{
81 unsigned long _tmp = fd / __NFDBITS;
82 unsigned long _rem = fd % __NFDBITS;
83 fdsetp->fds_bits[_tmp] &= ~(1UL<<_rem);
84}
85
86#undef __FD_ISSET
87static __inline__ int __FD_ISSET(unsigned long fd, __kernel_fd_set *p)
88{
89 unsigned long _tmp = fd / __NFDBITS;
90 unsigned long _rem = fd % __NFDBITS;
91 return (p->fds_bits[_tmp] & (1UL<<_rem)) != 0;
92}
93
94/*
95 * This will unroll the loop for the normal constant case (8 ints,
96 * for a 256-bit fd_set)
97 */
98#undef __FD_ZERO
99static __inline__ void __FD_ZERO(__kernel_fd_set *p)
100{
101 unsigned int *tmp = (unsigned int *)p->fds_bits;
102 int i;
103 36
104 if (__builtin_constant_p(__FDSET_LONGS)) { 37#include <asm-generic/posix_types.h>
105 switch (__FDSET_LONGS) {
106 case 8:
107 tmp[0] = 0; tmp[1] = 0; tmp[2] = 0; tmp[3] = 0;
108 tmp[4] = 0; tmp[5] = 0; tmp[6] = 0; tmp[7] = 0;
109 return;
110 }
111 }
112 i = __FDSET_LONGS;
113 while (i) {
114 i--;
115 *tmp = 0;
116 tmp++;
117 }
118}
119 38
120#endif /* defined(__KERNEL__) */
121#endif /* __GNUC__ */
122#endif /* _XTENSA_POSIX_TYPES_H */ 39#endif /* _XTENSA_POSIX_TYPES_H */
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index 7556913aba45..47768ff87343 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -384,6 +384,15 @@ config ACPI_CUSTOM_METHOD
384 load additional kernel modules after boot, this feature may be used 384 load additional kernel modules after boot, this feature may be used
385 to override that restriction). 385 to override that restriction).
386 386
387config ACPI_BGRT
388 tristate "Boottime Graphics Resource Table support"
389 default n
390 help
391 This driver adds support for exposing the ACPI Boottime Graphics
392 Resource Table, which allows the operating system to obtain
393 data from the firmware boot splash. It will appear under
394 /sys/firmware/acpi/bgrt/ .
395
387source "drivers/acpi/apei/Kconfig" 396source "drivers/acpi/apei/Kconfig"
388 397
389endif # ACPI 398endif # ACPI
diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile
index 1567028d2038..47199e2a9130 100644
--- a/drivers/acpi/Makefile
+++ b/drivers/acpi/Makefile
@@ -62,6 +62,7 @@ obj-$(CONFIG_ACPI_SBS) += sbs.o
62obj-$(CONFIG_ACPI_HED) += hed.o 62obj-$(CONFIG_ACPI_HED) += hed.o
63obj-$(CONFIG_ACPI_EC_DEBUGFS) += ec_sys.o 63obj-$(CONFIG_ACPI_EC_DEBUGFS) += ec_sys.o
64obj-$(CONFIG_ACPI_CUSTOM_METHOD)+= custom_method.o 64obj-$(CONFIG_ACPI_CUSTOM_METHOD)+= custom_method.o
65obj-$(CONFIG_ACPI_BGRT) += bgrt.o
65 66
66# processor has its own "processor." module_param namespace 67# processor has its own "processor." module_param namespace
67processor-y := processor_driver.o processor_throttling.o 68processor-y := processor_driver.o processor_throttling.o
diff --git a/drivers/acpi/acpica/Makefile b/drivers/acpi/acpica/Makefile
index 0ca208b6dcf0..793b8cc8e256 100644
--- a/drivers/acpi/acpica/Makefile
+++ b/drivers/acpi/acpica/Makefile
@@ -68,12 +68,14 @@ acpi-y += \
68 68
69acpi-y += \ 69acpi-y += \
70 hwacpi.o \ 70 hwacpi.o \
71 hwesleep.o \
71 hwgpe.o \ 72 hwgpe.o \
72 hwpci.o \ 73 hwpci.o \
73 hwregs.o \ 74 hwregs.o \
74 hwsleep.o \ 75 hwsleep.o \
75 hwvalid.o \ 76 hwvalid.o \
76 hwxface.o 77 hwxface.o \
78 hwxfsleep.o
77 79
78acpi-$(ACPI_FUTURE_USAGE) += hwtimer.o 80acpi-$(ACPI_FUTURE_USAGE) += hwtimer.o
79 81
diff --git a/drivers/acpi/acpica/accommon.h b/drivers/acpi/acpica/accommon.h
index a44bd424f9f4..8a7d51bfb3b3 100644
--- a/drivers/acpi/acpica/accommon.h
+++ b/drivers/acpi/acpica/accommon.h
@@ -51,7 +51,6 @@
51 * 51 *
52 * Note: The order of these include files is important. 52 * Note: The order of these include files is important.
53 */ 53 */
54#include "acconfig.h" /* Global configuration constants */
55#include "acmacros.h" /* C macros */ 54#include "acmacros.h" /* C macros */
56#include "aclocal.h" /* Internal data types */ 55#include "aclocal.h" /* Internal data types */
57#include "acobject.h" /* ACPI internal object */ 56#include "acobject.h" /* ACPI internal object */
diff --git a/drivers/acpi/acpica/acdebug.h b/drivers/acpi/acpica/acdebug.h
index deaa81979561..5e8abb07724f 100644
--- a/drivers/acpi/acpica/acdebug.h
+++ b/drivers/acpi/acpica/acdebug.h
@@ -111,7 +111,7 @@ acpi_status acpi_db_find_name_in_namespace(char *name_arg);
111 111
112void acpi_db_set_scope(char *name); 112void acpi_db_set_scope(char *name);
113 113
114acpi_status acpi_db_sleep(char *object_arg); 114ACPI_HW_DEPENDENT_RETURN_OK(acpi_status acpi_db_sleep(char *object_arg))
115 115
116void acpi_db_find_references(char *object_arg); 116void acpi_db_find_references(char *object_arg);
117 117
@@ -119,11 +119,13 @@ void acpi_db_display_locks(void);
119 119
120void acpi_db_display_resources(char *object_arg); 120void acpi_db_display_resources(char *object_arg);
121 121
122void acpi_db_display_gpes(void); 122ACPI_HW_DEPENDENT_RETURN_VOID(void acpi_db_display_gpes(void))
123 123
124void acpi_db_check_integrity(void); 124void acpi_db_check_integrity(void);
125 125
126void acpi_db_generate_gpe(char *gpe_arg, char *block_arg); 126ACPI_HW_DEPENDENT_RETURN_VOID(void
127 acpi_db_generate_gpe(char *gpe_arg,
128 char *block_arg))
127 129
128void acpi_db_check_predefined_names(void); 130void acpi_db_check_predefined_names(void);
129 131
diff --git a/drivers/acpi/acpica/acevents.h b/drivers/acpi/acpica/acevents.h
index c53caa521a30..d700f63e4701 100644
--- a/drivers/acpi/acpica/acevents.h
+++ b/drivers/acpi/acpica/acevents.h
@@ -69,11 +69,10 @@ acpi_ev_queue_notify_request(struct acpi_namespace_node *node,
69 */ 69 */
70acpi_status acpi_ev_init_global_lock_handler(void); 70acpi_status acpi_ev_init_global_lock_handler(void);
71 71
72acpi_status acpi_ev_acquire_global_lock(u16 timeout); 72ACPI_HW_DEPENDENT_RETURN_OK(acpi_status
73 73 acpi_ev_acquire_global_lock(u16 timeout))
74acpi_status acpi_ev_release_global_lock(void); 74 ACPI_HW_DEPENDENT_RETURN_OK(acpi_status acpi_ev_release_global_lock(void))
75 75 acpi_status acpi_ev_remove_global_lock_handler(void);
76acpi_status acpi_ev_remove_global_lock_handler(void);
77 76
78/* 77/*
79 * evgpe - Low-level GPE support 78 * evgpe - Low-level GPE support
@@ -114,7 +113,9 @@ acpi_ev_initialize_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
114 struct acpi_gpe_block_info *gpe_block, 113 struct acpi_gpe_block_info *gpe_block,
115 void *context); 114 void *context);
116 115
117acpi_status acpi_ev_delete_gpe_block(struct acpi_gpe_block_info *gpe_block); 116ACPI_HW_DEPENDENT_RETURN_OK(acpi_status
117 acpi_ev_delete_gpe_block(struct acpi_gpe_block_info
118 *gpe_block))
118 119
119u32 120u32
120acpi_ev_gpe_dispatch(struct acpi_namespace_node *gpe_device, 121acpi_ev_gpe_dispatch(struct acpi_namespace_node *gpe_device,
@@ -126,9 +127,10 @@ acpi_ev_gpe_dispatch(struct acpi_namespace_node *gpe_device,
126 */ 127 */
127acpi_status acpi_ev_gpe_initialize(void); 128acpi_status acpi_ev_gpe_initialize(void);
128 129
129void acpi_ev_update_gpes(acpi_owner_id table_owner_id); 130ACPI_HW_DEPENDENT_RETURN_VOID(void
131 acpi_ev_update_gpes(acpi_owner_id table_owner_id))
130 132
131acpi_status 133 acpi_status
132acpi_ev_match_gpe_method(acpi_handle obj_handle, 134acpi_ev_match_gpe_method(acpi_handle obj_handle,
133 u32 level, void *context, void **return_value); 135 u32 level, void *context, void **return_value);
134 136
@@ -237,6 +239,5 @@ acpi_status acpi_ev_remove_sci_handler(void);
237 239
238u32 acpi_ev_initialize_sCI(u32 program_sCI); 240u32 acpi_ev_initialize_sCI(u32 program_sCI);
239 241
240void acpi_ev_terminate(void); 242ACPI_HW_DEPENDENT_RETURN_VOID(void acpi_ev_terminate(void))
241
242#endif /* __ACEVENTS_H__ */ 243#endif /* __ACEVENTS_H__ */
diff --git a/drivers/acpi/acpica/acglobal.h b/drivers/acpi/acpica/acglobal.h
index 2853f7673f3b..4f7d3f57d05c 100644
--- a/drivers/acpi/acpica/acglobal.h
+++ b/drivers/acpi/acpica/acglobal.h
@@ -147,7 +147,7 @@ u8 acpi_gbl_system_awake_and_running;
147 */ 147 */
148u8 acpi_gbl_reduced_hardware; 148u8 acpi_gbl_reduced_hardware;
149 149
150#endif 150#endif /* DEFINE_ACPI_GLOBALS */
151 151
152/* Do not disassemble buffers to resource descriptors */ 152/* Do not disassemble buffers to resource descriptors */
153 153
@@ -184,8 +184,12 @@ ACPI_EXTERN u32 acpi_gbl_trace_dbg_layer;
184 * found in the RSDT/XSDT. 184 * found in the RSDT/XSDT.
185 */ 185 */
186ACPI_EXTERN struct acpi_table_list acpi_gbl_root_table_list; 186ACPI_EXTERN struct acpi_table_list acpi_gbl_root_table_list;
187
188#if (!ACPI_REDUCED_HARDWARE)
187ACPI_EXTERN struct acpi_table_facs *acpi_gbl_FACS; 189ACPI_EXTERN struct acpi_table_facs *acpi_gbl_FACS;
188 190
191#endif /* !ACPI_REDUCED_HARDWARE */
192
189/* These addresses are calculated from the FADT Event Block addresses */ 193/* These addresses are calculated from the FADT Event Block addresses */
190 194
191ACPI_EXTERN struct acpi_generic_address acpi_gbl_xpm1a_status; 195ACPI_EXTERN struct acpi_generic_address acpi_gbl_xpm1a_status;
@@ -397,10 +401,15 @@ ACPI_EXTERN struct acpi_fixed_event_handler
397ACPI_EXTERN struct acpi_gpe_xrupt_info *acpi_gbl_gpe_xrupt_list_head; 401ACPI_EXTERN struct acpi_gpe_xrupt_info *acpi_gbl_gpe_xrupt_list_head;
398ACPI_EXTERN struct acpi_gpe_block_info 402ACPI_EXTERN struct acpi_gpe_block_info
399*acpi_gbl_gpe_fadt_blocks[ACPI_MAX_GPE_BLOCKS]; 403*acpi_gbl_gpe_fadt_blocks[ACPI_MAX_GPE_BLOCKS];
404
405#if (!ACPI_REDUCED_HARDWARE)
406
400ACPI_EXTERN u8 acpi_gbl_all_gpes_initialized; 407ACPI_EXTERN u8 acpi_gbl_all_gpes_initialized;
401ACPI_EXTERN ACPI_GBL_EVENT_HANDLER acpi_gbl_global_event_handler; 408ACPI_EXTERN ACPI_GBL_EVENT_HANDLER acpi_gbl_global_event_handler;
402ACPI_EXTERN void *acpi_gbl_global_event_handler_context; 409ACPI_EXTERN void *acpi_gbl_global_event_handler_context;
403 410
411#endif /* !ACPI_REDUCED_HARDWARE */
412
404/***************************************************************************** 413/*****************************************************************************
405 * 414 *
406 * Debugger globals 415 * Debugger globals
diff --git a/drivers/acpi/acpica/achware.h b/drivers/acpi/acpica/achware.h
index 677793e938f5..5ccb99ae3a6f 100644
--- a/drivers/acpi/acpica/achware.h
+++ b/drivers/acpi/acpica/achware.h
@@ -81,6 +81,26 @@ acpi_status acpi_hw_register_write(u32 register_id, u32 value);
81acpi_status acpi_hw_clear_acpi_status(void); 81acpi_status acpi_hw_clear_acpi_status(void);
82 82
83/* 83/*
84 * hwsleep - sleep/wake support (Legacy sleep registers)
85 */
86acpi_status acpi_hw_legacy_sleep(u8 sleep_state, u8 flags);
87
88acpi_status acpi_hw_legacy_wake_prep(u8 sleep_state, u8 flags);
89
90acpi_status acpi_hw_legacy_wake(u8 sleep_state, u8 flags);
91
92/*
93 * hwesleep - sleep/wake support (Extended FADT-V5 sleep registers)
94 */
95void acpi_hw_execute_sleep_method(char *method_name, u32 integer_argument);
96
97acpi_status acpi_hw_extended_sleep(u8 sleep_state, u8 flags);
98
99acpi_status acpi_hw_extended_wake_prep(u8 sleep_state, u8 flags);
100
101acpi_status acpi_hw_extended_wake(u8 sleep_state, u8 flags);
102
103/*
84 * hwvalid - Port I/O with validation 104 * hwvalid - Port I/O with validation
85 */ 105 */
86acpi_status acpi_hw_read_port(acpi_io_address address, u32 *value, u32 width); 106acpi_status acpi_hw_read_port(acpi_io_address address, u32 *value, u32 width);
@@ -128,16 +148,4 @@ acpi_status
128acpi_hw_derive_pci_id(struct acpi_pci_id *pci_id, 148acpi_hw_derive_pci_id(struct acpi_pci_id *pci_id,
129 acpi_handle root_pci_device, acpi_handle pci_region); 149 acpi_handle root_pci_device, acpi_handle pci_region);
130 150
131#ifdef ACPI_FUTURE_USAGE
132/*
133 * hwtimer - ACPI Timer prototypes
134 */
135acpi_status acpi_get_timer_resolution(u32 * resolution);
136
137acpi_status acpi_get_timer(u32 * ticks);
138
139acpi_status
140acpi_get_timer_duration(u32 start_ticks, u32 end_ticks, u32 * time_elapsed);
141#endif /* ACPI_FUTURE_USAGE */
142
143#endif /* __ACHWARE_H__ */ 151#endif /* __ACHWARE_H__ */
diff --git a/drivers/acpi/acpica/aclocal.h b/drivers/acpi/acpica/aclocal.h
index 3f24068837d5..e3922ca20e7f 100644
--- a/drivers/acpi/acpica/aclocal.h
+++ b/drivers/acpi/acpica/aclocal.h
@@ -370,6 +370,7 @@ struct acpi_predefined_data {
370/* Defines for Flags field above */ 370/* Defines for Flags field above */
371 371
372#define ACPI_OBJECT_REPAIRED 1 372#define ACPI_OBJECT_REPAIRED 1
373#define ACPI_OBJECT_WRAPPED 2
373 374
374/* 375/*
375 * Bitmapped return value types 376 * Bitmapped return value types
diff --git a/drivers/acpi/acpica/acmacros.h b/drivers/acpi/acpica/acmacros.h
index ef338a96f5b2..f119f473f71a 100644
--- a/drivers/acpi/acpica/acmacros.h
+++ b/drivers/acpi/acpica/acmacros.h
@@ -516,6 +516,12 @@
516 516
517#endif /* ACPI_DEBUG_OUTPUT */ 517#endif /* ACPI_DEBUG_OUTPUT */
518 518
519#if (!ACPI_REDUCED_HARDWARE)
520#define ACPI_HW_OPTIONAL_FUNCTION(addr) addr
521#else
522#define ACPI_HW_OPTIONAL_FUNCTION(addr) NULL
523#endif
524
519/* 525/*
520 * Some code only gets executed when the debugger is built in. 526 * Some code only gets executed when the debugger is built in.
521 * Note that this is entirely independent of whether the 527 * Note that this is entirely independent of whether the
diff --git a/drivers/acpi/acpica/acnamesp.h b/drivers/acpi/acpica/acnamesp.h
index 2c9e0f049523..9b19d4b86424 100644
--- a/drivers/acpi/acpica/acnamesp.h
+++ b/drivers/acpi/acpica/acnamesp.h
@@ -283,8 +283,9 @@ acpi_ns_repair_object(struct acpi_predefined_data *data,
283 union acpi_operand_object **return_object_ptr); 283 union acpi_operand_object **return_object_ptr);
284 284
285acpi_status 285acpi_status
286acpi_ns_repair_package_list(struct acpi_predefined_data *data, 286acpi_ns_wrap_with_package(struct acpi_predefined_data *data,
287 union acpi_operand_object **obj_desc_ptr); 287 union acpi_operand_object *original_object,
288 union acpi_operand_object **obj_desc_ptr);
288 289
289acpi_status 290acpi_status
290acpi_ns_repair_null_element(struct acpi_predefined_data *data, 291acpi_ns_repair_null_element(struct acpi_predefined_data *data,
diff --git a/drivers/acpi/acpica/actables.h b/drivers/acpi/acpica/actables.h
index d5bec304c823..6712965ba8ae 100644
--- a/drivers/acpi/acpica/actables.h
+++ b/drivers/acpi/acpica/actables.h
@@ -67,6 +67,11 @@ acpi_status acpi_tb_resize_root_table_list(void);
67 67
68acpi_status acpi_tb_verify_table(struct acpi_table_desc *table_desc); 68acpi_status acpi_tb_verify_table(struct acpi_table_desc *table_desc);
69 69
70struct acpi_table_header *acpi_tb_table_override(struct acpi_table_header
71 *table_header,
72 struct acpi_table_desc
73 *table_desc);
74
70acpi_status 75acpi_status
71acpi_tb_add_table(struct acpi_table_desc *table_desc, u32 *table_index); 76acpi_tb_add_table(struct acpi_table_desc *table_desc, u32 *table_index);
72 77
diff --git a/drivers/acpi/acpica/evevent.c b/drivers/acpi/acpica/evevent.c
index 6729ebe2f1e6..07e4dc44f81c 100644
--- a/drivers/acpi/acpica/evevent.c
+++ b/drivers/acpi/acpica/evevent.c
@@ -47,7 +47,7 @@
47 47
48#define _COMPONENT ACPI_EVENTS 48#define _COMPONENT ACPI_EVENTS
49ACPI_MODULE_NAME("evevent") 49ACPI_MODULE_NAME("evevent")
50 50#if (!ACPI_REDUCED_HARDWARE) /* Entire module */
51/* Local prototypes */ 51/* Local prototypes */
52static acpi_status acpi_ev_fixed_event_initialize(void); 52static acpi_status acpi_ev_fixed_event_initialize(void);
53 53
@@ -291,3 +291,5 @@ static u32 acpi_ev_fixed_event_dispatch(u32 event)
291 return ((acpi_gbl_fixed_event_handlers[event]. 291 return ((acpi_gbl_fixed_event_handlers[event].
292 handler) (acpi_gbl_fixed_event_handlers[event].context)); 292 handler) (acpi_gbl_fixed_event_handlers[event].context));
293} 293}
294
295#endif /* !ACPI_REDUCED_HARDWARE */
diff --git a/drivers/acpi/acpica/evglock.c b/drivers/acpi/acpica/evglock.c
index 5e5683cb1f0d..cfeab38795d8 100644
--- a/drivers/acpi/acpica/evglock.c
+++ b/drivers/acpi/acpica/evglock.c
@@ -48,7 +48,7 @@
48 48
49#define _COMPONENT ACPI_EVENTS 49#define _COMPONENT ACPI_EVENTS
50ACPI_MODULE_NAME("evglock") 50ACPI_MODULE_NAME("evglock")
51 51#if (!ACPI_REDUCED_HARDWARE) /* Entire module */
52/* Local prototypes */ 52/* Local prototypes */
53static u32 acpi_ev_global_lock_handler(void *context); 53static u32 acpi_ev_global_lock_handler(void *context);
54 54
@@ -339,3 +339,5 @@ acpi_status acpi_ev_release_global_lock(void)
339 acpi_os_release_mutex(acpi_gbl_global_lock_mutex->mutex.os_mutex); 339 acpi_os_release_mutex(acpi_gbl_global_lock_mutex->mutex.os_mutex);
340 return_ACPI_STATUS(status); 340 return_ACPI_STATUS(status);
341} 341}
342
343#endif /* !ACPI_REDUCED_HARDWARE */
diff --git a/drivers/acpi/acpica/evgpe.c b/drivers/acpi/acpica/evgpe.c
index 9e88cb6fb25e..8ba0e5f17091 100644
--- a/drivers/acpi/acpica/evgpe.c
+++ b/drivers/acpi/acpica/evgpe.c
@@ -48,7 +48,7 @@
48 48
49#define _COMPONENT ACPI_EVENTS 49#define _COMPONENT ACPI_EVENTS
50ACPI_MODULE_NAME("evgpe") 50ACPI_MODULE_NAME("evgpe")
51 51#if (!ACPI_REDUCED_HARDWARE) /* Entire module */
52/* Local prototypes */ 52/* Local prototypes */
53static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context); 53static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context);
54 54
@@ -766,3 +766,5 @@ acpi_ev_gpe_dispatch(struct acpi_namespace_node *gpe_device,
766 766
767 return_UINT32(ACPI_INTERRUPT_HANDLED); 767 return_UINT32(ACPI_INTERRUPT_HANDLED);
768} 768}
769
770#endif /* !ACPI_REDUCED_HARDWARE */
diff --git a/drivers/acpi/acpica/evgpeblk.c b/drivers/acpi/acpica/evgpeblk.c
index be75339cd5dd..23a3ca86b2eb 100644
--- a/drivers/acpi/acpica/evgpeblk.c
+++ b/drivers/acpi/acpica/evgpeblk.c
@@ -48,7 +48,7 @@
48 48
49#define _COMPONENT ACPI_EVENTS 49#define _COMPONENT ACPI_EVENTS
50ACPI_MODULE_NAME("evgpeblk") 50ACPI_MODULE_NAME("evgpeblk")
51 51#if (!ACPI_REDUCED_HARDWARE) /* Entire module */
52/* Local prototypes */ 52/* Local prototypes */
53static acpi_status 53static acpi_status
54acpi_ev_install_gpe_block(struct acpi_gpe_block_info *gpe_block, 54acpi_ev_install_gpe_block(struct acpi_gpe_block_info *gpe_block,
@@ -504,3 +504,5 @@ acpi_ev_initialize_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
504 504
505 return_ACPI_STATUS(AE_OK); 505 return_ACPI_STATUS(AE_OK);
506} 506}
507
508#endif /* !ACPI_REDUCED_HARDWARE */
diff --git a/drivers/acpi/acpica/evgpeinit.c b/drivers/acpi/acpica/evgpeinit.c
index adf7494da9db..da0add858f81 100644
--- a/drivers/acpi/acpica/evgpeinit.c
+++ b/drivers/acpi/acpica/evgpeinit.c
@@ -48,7 +48,7 @@
48 48
49#define _COMPONENT ACPI_EVENTS 49#define _COMPONENT ACPI_EVENTS
50ACPI_MODULE_NAME("evgpeinit") 50ACPI_MODULE_NAME("evgpeinit")
51 51#if (!ACPI_REDUCED_HARDWARE) /* Entire module */
52/* 52/*
53 * Note: History of _PRW support in ACPICA 53 * Note: History of _PRW support in ACPICA
54 * 54 *
@@ -440,3 +440,5 @@ acpi_ev_match_gpe_method(acpi_handle obj_handle,
440 name, gpe_number)); 440 name, gpe_number));
441 return_ACPI_STATUS(AE_OK); 441 return_ACPI_STATUS(AE_OK);
442} 442}
443
444#endif /* !ACPI_REDUCED_HARDWARE */
diff --git a/drivers/acpi/acpica/evgpeutil.c b/drivers/acpi/acpica/evgpeutil.c
index 25073932aa10..3c43796b8361 100644
--- a/drivers/acpi/acpica/evgpeutil.c
+++ b/drivers/acpi/acpica/evgpeutil.c
@@ -48,6 +48,7 @@
48#define _COMPONENT ACPI_EVENTS 48#define _COMPONENT ACPI_EVENTS
49ACPI_MODULE_NAME("evgpeutil") 49ACPI_MODULE_NAME("evgpeutil")
50 50
51#if (!ACPI_REDUCED_HARDWARE) /* Entire module */
51/******************************************************************************* 52/*******************************************************************************
52 * 53 *
53 * FUNCTION: acpi_ev_walk_gpe_list 54 * FUNCTION: acpi_ev_walk_gpe_list
@@ -374,3 +375,5 @@ acpi_ev_delete_gpe_handlers(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
374 375
375 return_ACPI_STATUS(AE_OK); 376 return_ACPI_STATUS(AE_OK);
376} 377}
378
379#endif /* !ACPI_REDUCED_HARDWARE */
diff --git a/drivers/acpi/acpica/evmisc.c b/drivers/acpi/acpica/evmisc.c
index 84966f416463..51ef9f5e002d 100644
--- a/drivers/acpi/acpica/evmisc.c
+++ b/drivers/acpi/acpica/evmisc.c
@@ -108,27 +108,30 @@ acpi_ev_queue_notify_request(struct acpi_namespace_node * node,
108 ACPI_FUNCTION_NAME(ev_queue_notify_request); 108 ACPI_FUNCTION_NAME(ev_queue_notify_request);
109 109
110 /* 110 /*
111 * For value 3 (Ejection Request), some device method may need to be run. 111 * For value 0x03 (Ejection Request), may need to run a device method.
112 * For value 2 (Device Wake) if _PRW exists, the _PS0 method may need 112 * For value 0x02 (Device Wake), if _PRW exists, may need to run
113 * to be run. 113 * the _PS0 method.
114 * For value 0x80 (Status Change) on the power button or sleep button, 114 * For value 0x80 (Status Change) on the power button or sleep button,
115 * initiate soft-off or sleep operation? 115 * initiate soft-off or sleep operation.
116 *
117 * For all cases, simply dispatch the notify to the handler.
116 */ 118 */
117 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 119 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
118 "Dispatching Notify on [%4.4s] Node %p Value 0x%2.2X (%s)\n", 120 "Dispatching Notify on [%4.4s] (%s) Value 0x%2.2X (%s) Node %p\n",
119 acpi_ut_get_node_name(node), node, notify_value, 121 acpi_ut_get_node_name(node),
120 acpi_ut_get_notify_name(notify_value))); 122 acpi_ut_get_type_name(node->type), notify_value,
123 acpi_ut_get_notify_name(notify_value), node));
121 124
122 /* Get the notify object attached to the NS Node */ 125 /* Get the notify object attached to the NS Node */
123 126
124 obj_desc = acpi_ns_get_attached_object(node); 127 obj_desc = acpi_ns_get_attached_object(node);
125 if (obj_desc) { 128 if (obj_desc) {
126 129
127 /* We have the notify object, Get the right handler */ 130 /* We have the notify object, Get the correct handler */
128 131
129 switch (node->type) { 132 switch (node->type) {
130 133
131 /* Notify allowed only on these types */ 134 /* Notify is allowed only on these types */
132 135
133 case ACPI_TYPE_DEVICE: 136 case ACPI_TYPE_DEVICE:
134 case ACPI_TYPE_THERMAL: 137 case ACPI_TYPE_THERMAL:
@@ -152,7 +155,7 @@ acpi_ev_queue_notify_request(struct acpi_namespace_node * node,
152 } 155 }
153 156
154 /* 157 /*
155 * If there is any handler to run, schedule the dispatcher. 158 * If there is a handler to run, schedule the dispatcher.
156 * Check for: 159 * Check for:
157 * 1) Global system notify handler 160 * 1) Global system notify handler
158 * 2) Global device notify handler 161 * 2) Global device notify handler
@@ -270,6 +273,7 @@ static void ACPI_SYSTEM_XFACE acpi_ev_notify_dispatch(void *context)
270 acpi_ut_delete_generic_state(notify_info); 273 acpi_ut_delete_generic_state(notify_info);
271} 274}
272 275
276#if (!ACPI_REDUCED_HARDWARE)
273/****************************************************************************** 277/******************************************************************************
274 * 278 *
275 * FUNCTION: acpi_ev_terminate 279 * FUNCTION: acpi_ev_terminate
@@ -338,3 +342,5 @@ void acpi_ev_terminate(void)
338 } 342 }
339 return_VOID; 343 return_VOID;
340} 344}
345
346#endif /* !ACPI_REDUCED_HARDWARE */
diff --git a/drivers/acpi/acpica/evsci.c b/drivers/acpi/acpica/evsci.c
index 26065c612e76..6a57aa2d70d1 100644
--- a/drivers/acpi/acpica/evsci.c
+++ b/drivers/acpi/acpica/evsci.c
@@ -48,7 +48,7 @@
48 48
49#define _COMPONENT ACPI_EVENTS 49#define _COMPONENT ACPI_EVENTS
50ACPI_MODULE_NAME("evsci") 50ACPI_MODULE_NAME("evsci")
51 51#if (!ACPI_REDUCED_HARDWARE) /* Entire module */
52/* Local prototypes */ 52/* Local prototypes */
53static u32 ACPI_SYSTEM_XFACE acpi_ev_sci_xrupt_handler(void *context); 53static u32 ACPI_SYSTEM_XFACE acpi_ev_sci_xrupt_handler(void *context);
54 54
@@ -181,3 +181,5 @@ acpi_status acpi_ev_remove_sci_handler(void)
181 181
182 return_ACPI_STATUS(status); 182 return_ACPI_STATUS(status);
183} 183}
184
185#endif /* !ACPI_REDUCED_HARDWARE */
diff --git a/drivers/acpi/acpica/evxface.c b/drivers/acpi/acpica/evxface.c
index 61944e89565a..44bef5744ebb 100644
--- a/drivers/acpi/acpica/evxface.c
+++ b/drivers/acpi/acpica/evxface.c
@@ -51,222 +51,6 @@
51#define _COMPONENT ACPI_EVENTS 51#define _COMPONENT ACPI_EVENTS
52ACPI_MODULE_NAME("evxface") 52ACPI_MODULE_NAME("evxface")
53 53
54/*******************************************************************************
55 *
56 * FUNCTION: acpi_install_exception_handler
57 *
58 * PARAMETERS: Handler - Pointer to the handler function for the
59 * event
60 *
61 * RETURN: Status
62 *
63 * DESCRIPTION: Saves the pointer to the handler function
64 *
65 ******************************************************************************/
66#ifdef ACPI_FUTURE_USAGE
67acpi_status acpi_install_exception_handler(acpi_exception_handler handler)
68{
69 acpi_status status;
70
71 ACPI_FUNCTION_TRACE(acpi_install_exception_handler);
72
73 status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
74 if (ACPI_FAILURE(status)) {
75 return_ACPI_STATUS(status);
76 }
77
78 /* Don't allow two handlers. */
79
80 if (acpi_gbl_exception_handler) {
81 status = AE_ALREADY_EXISTS;
82 goto cleanup;
83 }
84
85 /* Install the handler */
86
87 acpi_gbl_exception_handler = handler;
88
89 cleanup:
90 (void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
91 return_ACPI_STATUS(status);
92}
93
94ACPI_EXPORT_SYMBOL(acpi_install_exception_handler)
95#endif /* ACPI_FUTURE_USAGE */
96
97/*******************************************************************************
98 *
99 * FUNCTION: acpi_install_global_event_handler
100 *
101 * PARAMETERS: Handler - Pointer to the global event handler function
102 * Context - Value passed to the handler on each event
103 *
104 * RETURN: Status
105 *
106 * DESCRIPTION: Saves the pointer to the handler function. The global handler
107 * is invoked upon each incoming GPE and Fixed Event. It is
108 * invoked at interrupt level at the time of the event dispatch.
109 * Can be used to update event counters, etc.
110 *
111 ******************************************************************************/
112acpi_status
113acpi_install_global_event_handler(ACPI_GBL_EVENT_HANDLER handler, void *context)
114{
115 acpi_status status;
116
117 ACPI_FUNCTION_TRACE(acpi_install_global_event_handler);
118
119 /* Parameter validation */
120
121 if (!handler) {
122 return_ACPI_STATUS(AE_BAD_PARAMETER);
123 }
124
125 status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
126 if (ACPI_FAILURE(status)) {
127 return_ACPI_STATUS(status);
128 }
129
130 /* Don't allow two handlers. */
131
132 if (acpi_gbl_global_event_handler) {
133 status = AE_ALREADY_EXISTS;
134 goto cleanup;
135 }
136
137 acpi_gbl_global_event_handler = handler;
138 acpi_gbl_global_event_handler_context = context;
139
140 cleanup:
141 (void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
142 return_ACPI_STATUS(status);
143}
144
145ACPI_EXPORT_SYMBOL(acpi_install_global_event_handler)
146
147/*******************************************************************************
148 *
149 * FUNCTION: acpi_install_fixed_event_handler
150 *
151 * PARAMETERS: Event - Event type to enable.
152 * Handler - Pointer to the handler function for the
153 * event
154 * Context - Value passed to the handler on each GPE
155 *
156 * RETURN: Status
157 *
158 * DESCRIPTION: Saves the pointer to the handler function and then enables the
159 * event.
160 *
161 ******************************************************************************/
162acpi_status
163acpi_install_fixed_event_handler(u32 event,
164 acpi_event_handler handler, void *context)
165{
166 acpi_status status;
167
168 ACPI_FUNCTION_TRACE(acpi_install_fixed_event_handler);
169
170 /* Parameter validation */
171
172 if (event > ACPI_EVENT_MAX) {
173 return_ACPI_STATUS(AE_BAD_PARAMETER);
174 }
175
176 status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
177 if (ACPI_FAILURE(status)) {
178 return_ACPI_STATUS(status);
179 }
180
181 /* Don't allow two handlers. */
182
183 if (NULL != acpi_gbl_fixed_event_handlers[event].handler) {
184 status = AE_ALREADY_EXISTS;
185 goto cleanup;
186 }
187
188 /* Install the handler before enabling the event */
189
190 acpi_gbl_fixed_event_handlers[event].handler = handler;
191 acpi_gbl_fixed_event_handlers[event].context = context;
192
193 status = acpi_clear_event(event);
194 if (ACPI_SUCCESS(status))
195 status = acpi_enable_event(event, 0);
196 if (ACPI_FAILURE(status)) {
197 ACPI_WARNING((AE_INFO, "Could not enable fixed event 0x%X",
198 event));
199
200 /* Remove the handler */
201
202 acpi_gbl_fixed_event_handlers[event].handler = NULL;
203 acpi_gbl_fixed_event_handlers[event].context = NULL;
204 } else {
205 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
206 "Enabled fixed event %X, Handler=%p\n", event,
207 handler));
208 }
209
210 cleanup:
211 (void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
212 return_ACPI_STATUS(status);
213}
214
215ACPI_EXPORT_SYMBOL(acpi_install_fixed_event_handler)
216
217/*******************************************************************************
218 *
219 * FUNCTION: acpi_remove_fixed_event_handler
220 *
221 * PARAMETERS: Event - Event type to disable.
222 * Handler - Address of the handler
223 *
224 * RETURN: Status
225 *
226 * DESCRIPTION: Disables the event and unregisters the event handler.
227 *
228 ******************************************************************************/
229acpi_status
230acpi_remove_fixed_event_handler(u32 event, acpi_event_handler handler)
231{
232 acpi_status status = AE_OK;
233
234 ACPI_FUNCTION_TRACE(acpi_remove_fixed_event_handler);
235
236 /* Parameter validation */
237
238 if (event > ACPI_EVENT_MAX) {
239 return_ACPI_STATUS(AE_BAD_PARAMETER);
240 }
241
242 status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
243 if (ACPI_FAILURE(status)) {
244 return_ACPI_STATUS(status);
245 }
246
247 /* Disable the event before removing the handler */
248
249 status = acpi_disable_event(event, 0);
250
251 /* Always Remove the handler */
252
253 acpi_gbl_fixed_event_handlers[event].handler = NULL;
254 acpi_gbl_fixed_event_handlers[event].context = NULL;
255
256 if (ACPI_FAILURE(status)) {
257 ACPI_WARNING((AE_INFO,
258 "Could not write to fixed event enable register 0x%X",
259 event));
260 } else {
261 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Disabled fixed event %X\n",
262 event));
263 }
264
265 (void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
266 return_ACPI_STATUS(status);
267}
268
269ACPI_EXPORT_SYMBOL(acpi_remove_fixed_event_handler)
270 54
271/******************************************************************************* 55/*******************************************************************************
272 * 56 *
@@ -334,6 +118,7 @@ acpi_add_handler_object(struct acpi_object_notify_handler *parent_obj,
334 return AE_OK; 118 return AE_OK;
335} 119}
336 120
121
337/******************************************************************************* 122/*******************************************************************************
338 * 123 *
339 * FUNCTION: acpi_install_notify_handler 124 * FUNCTION: acpi_install_notify_handler
@@ -705,6 +490,224 @@ ACPI_EXPORT_SYMBOL(acpi_remove_notify_handler)
705 490
706/******************************************************************************* 491/*******************************************************************************
707 * 492 *
493 * FUNCTION: acpi_install_exception_handler
494 *
495 * PARAMETERS: Handler - Pointer to the handler function for the
496 * event
497 *
498 * RETURN: Status
499 *
500 * DESCRIPTION: Saves the pointer to the handler function
501 *
502 ******************************************************************************/
503#ifdef ACPI_FUTURE_USAGE
504acpi_status acpi_install_exception_handler(acpi_exception_handler handler)
505{
506 acpi_status status;
507
508 ACPI_FUNCTION_TRACE(acpi_install_exception_handler);
509
510 status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
511 if (ACPI_FAILURE(status)) {
512 return_ACPI_STATUS(status);
513 }
514
515 /* Don't allow two handlers. */
516
517 if (acpi_gbl_exception_handler) {
518 status = AE_ALREADY_EXISTS;
519 goto cleanup;
520 }
521
522 /* Install the handler */
523
524 acpi_gbl_exception_handler = handler;
525
526 cleanup:
527 (void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
528 return_ACPI_STATUS(status);
529}
530
531ACPI_EXPORT_SYMBOL(acpi_install_exception_handler)
532#endif /* ACPI_FUTURE_USAGE */
533
534#if (!ACPI_REDUCED_HARDWARE)
535/*******************************************************************************
536 *
537 * FUNCTION: acpi_install_global_event_handler
538 *
539 * PARAMETERS: Handler - Pointer to the global event handler function
540 * Context - Value passed to the handler on each event
541 *
542 * RETURN: Status
543 *
544 * DESCRIPTION: Saves the pointer to the handler function. The global handler
545 * is invoked upon each incoming GPE and Fixed Event. It is
546 * invoked at interrupt level at the time of the event dispatch.
547 * Can be used to update event counters, etc.
548 *
549 ******************************************************************************/
550acpi_status
551acpi_install_global_event_handler(ACPI_GBL_EVENT_HANDLER handler, void *context)
552{
553 acpi_status status;
554
555 ACPI_FUNCTION_TRACE(acpi_install_global_event_handler);
556
557 /* Parameter validation */
558
559 if (!handler) {
560 return_ACPI_STATUS(AE_BAD_PARAMETER);
561 }
562
563 status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
564 if (ACPI_FAILURE(status)) {
565 return_ACPI_STATUS(status);
566 }
567
568 /* Don't allow two handlers. */
569
570 if (acpi_gbl_global_event_handler) {
571 status = AE_ALREADY_EXISTS;
572 goto cleanup;
573 }
574
575 acpi_gbl_global_event_handler = handler;
576 acpi_gbl_global_event_handler_context = context;
577
578 cleanup:
579 (void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
580 return_ACPI_STATUS(status);
581}
582
583ACPI_EXPORT_SYMBOL(acpi_install_global_event_handler)
584
585/*******************************************************************************
586 *
587 * FUNCTION: acpi_install_fixed_event_handler
588 *
589 * PARAMETERS: Event - Event type to enable.
590 * Handler - Pointer to the handler function for the
591 * event
592 * Context - Value passed to the handler on each GPE
593 *
594 * RETURN: Status
595 *
596 * DESCRIPTION: Saves the pointer to the handler function and then enables the
597 * event.
598 *
599 ******************************************************************************/
600acpi_status
601acpi_install_fixed_event_handler(u32 event,
602 acpi_event_handler handler, void *context)
603{
604 acpi_status status;
605
606 ACPI_FUNCTION_TRACE(acpi_install_fixed_event_handler);
607
608 /* Parameter validation */
609
610 if (event > ACPI_EVENT_MAX) {
611 return_ACPI_STATUS(AE_BAD_PARAMETER);
612 }
613
614 status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
615 if (ACPI_FAILURE(status)) {
616 return_ACPI_STATUS(status);
617 }
618
619 /* Don't allow two handlers. */
620
621 if (NULL != acpi_gbl_fixed_event_handlers[event].handler) {
622 status = AE_ALREADY_EXISTS;
623 goto cleanup;
624 }
625
626 /* Install the handler before enabling the event */
627
628 acpi_gbl_fixed_event_handlers[event].handler = handler;
629 acpi_gbl_fixed_event_handlers[event].context = context;
630
631 status = acpi_clear_event(event);
632 if (ACPI_SUCCESS(status))
633 status = acpi_enable_event(event, 0);
634 if (ACPI_FAILURE(status)) {
635 ACPI_WARNING((AE_INFO, "Could not enable fixed event 0x%X",
636 event));
637
638 /* Remove the handler */
639
640 acpi_gbl_fixed_event_handlers[event].handler = NULL;
641 acpi_gbl_fixed_event_handlers[event].context = NULL;
642 } else {
643 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
644 "Enabled fixed event %X, Handler=%p\n", event,
645 handler));
646 }
647
648 cleanup:
649 (void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
650 return_ACPI_STATUS(status);
651}
652
653ACPI_EXPORT_SYMBOL(acpi_install_fixed_event_handler)
654
655/*******************************************************************************
656 *
657 * FUNCTION: acpi_remove_fixed_event_handler
658 *
659 * PARAMETERS: Event - Event type to disable.
660 * Handler - Address of the handler
661 *
662 * RETURN: Status
663 *
664 * DESCRIPTION: Disables the event and unregisters the event handler.
665 *
666 ******************************************************************************/
667acpi_status
668acpi_remove_fixed_event_handler(u32 event, acpi_event_handler handler)
669{
670 acpi_status status = AE_OK;
671
672 ACPI_FUNCTION_TRACE(acpi_remove_fixed_event_handler);
673
674 /* Parameter validation */
675
676 if (event > ACPI_EVENT_MAX) {
677 return_ACPI_STATUS(AE_BAD_PARAMETER);
678 }
679
680 status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
681 if (ACPI_FAILURE(status)) {
682 return_ACPI_STATUS(status);
683 }
684
685 /* Disable the event before removing the handler */
686
687 status = acpi_disable_event(event, 0);
688
689 /* Always Remove the handler */
690
691 acpi_gbl_fixed_event_handlers[event].handler = NULL;
692 acpi_gbl_fixed_event_handlers[event].context = NULL;
693
694 if (ACPI_FAILURE(status)) {
695 ACPI_WARNING((AE_INFO,
696 "Could not write to fixed event enable register 0x%X",
697 event));
698 } else {
699 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Disabled fixed event %X\n",
700 event));
701 }
702
703 (void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
704 return_ACPI_STATUS(status);
705}
706
707ACPI_EXPORT_SYMBOL(acpi_remove_fixed_event_handler)
708
709/*******************************************************************************
710 *
708 * FUNCTION: acpi_install_gpe_handler 711 * FUNCTION: acpi_install_gpe_handler
709 * 712 *
710 * PARAMETERS: gpe_device - Namespace node for the GPE (NULL for FADT 713 * PARAMETERS: gpe_device - Namespace node for the GPE (NULL for FADT
@@ -984,3 +987,4 @@ acpi_status acpi_release_global_lock(u32 handle)
984} 987}
985 988
986ACPI_EXPORT_SYMBOL(acpi_release_global_lock) 989ACPI_EXPORT_SYMBOL(acpi_release_global_lock)
990#endif /* !ACPI_REDUCED_HARDWARE */
diff --git a/drivers/acpi/acpica/evxfevnt.c b/drivers/acpi/acpica/evxfevnt.c
index 1768bbec1002..77cee5a5e891 100644
--- a/drivers/acpi/acpica/evxfevnt.c
+++ b/drivers/acpi/acpica/evxfevnt.c
@@ -49,6 +49,7 @@
49#define _COMPONENT ACPI_EVENTS 49#define _COMPONENT ACPI_EVENTS
50ACPI_MODULE_NAME("evxfevnt") 50ACPI_MODULE_NAME("evxfevnt")
51 51
52#if (!ACPI_REDUCED_HARDWARE) /* Entire module */
52/******************************************************************************* 53/*******************************************************************************
53 * 54 *
54 * FUNCTION: acpi_enable 55 * FUNCTION: acpi_enable
@@ -352,3 +353,4 @@ acpi_status acpi_get_event_status(u32 event, acpi_event_status * event_status)
352} 353}
353 354
354ACPI_EXPORT_SYMBOL(acpi_get_event_status) 355ACPI_EXPORT_SYMBOL(acpi_get_event_status)
356#endif /* !ACPI_REDUCED_HARDWARE */
diff --git a/drivers/acpi/acpica/evxfgpe.c b/drivers/acpi/acpica/evxfgpe.c
index 33388fd69df4..86f9b343ebd4 100644
--- a/drivers/acpi/acpica/evxfgpe.c
+++ b/drivers/acpi/acpica/evxfgpe.c
@@ -50,6 +50,7 @@
50#define _COMPONENT ACPI_EVENTS 50#define _COMPONENT ACPI_EVENTS
51ACPI_MODULE_NAME("evxfgpe") 51ACPI_MODULE_NAME("evxfgpe")
52 52
53#if (!ACPI_REDUCED_HARDWARE) /* Entire module */
53/****************************************************************************** 54/******************************************************************************
54 * 55 *
55 * FUNCTION: acpi_update_all_gpes 56 * FUNCTION: acpi_update_all_gpes
@@ -695,3 +696,4 @@ acpi_get_gpe_device(u32 index, acpi_handle *gpe_device)
695} 696}
696 697
697ACPI_EXPORT_SYMBOL(acpi_get_gpe_device) 698ACPI_EXPORT_SYMBOL(acpi_get_gpe_device)
699#endif /* !ACPI_REDUCED_HARDWARE */
diff --git a/drivers/acpi/acpica/hwacpi.c b/drivers/acpi/acpica/hwacpi.c
index d21ec5f0b3a9..d0b9ed5df97e 100644
--- a/drivers/acpi/acpica/hwacpi.c
+++ b/drivers/acpi/acpica/hwacpi.c
@@ -48,6 +48,7 @@
48#define _COMPONENT ACPI_HARDWARE 48#define _COMPONENT ACPI_HARDWARE
49ACPI_MODULE_NAME("hwacpi") 49ACPI_MODULE_NAME("hwacpi")
50 50
51#if (!ACPI_REDUCED_HARDWARE) /* Entire module */
51/****************************************************************************** 52/******************************************************************************
52 * 53 *
53 * FUNCTION: acpi_hw_set_mode 54 * FUNCTION: acpi_hw_set_mode
@@ -166,3 +167,5 @@ u32 acpi_hw_get_mode(void)
166 return_UINT32(ACPI_SYS_MODE_LEGACY); 167 return_UINT32(ACPI_SYS_MODE_LEGACY);
167 } 168 }
168} 169}
170
171#endif /* !ACPI_REDUCED_HARDWARE */
diff --git a/drivers/acpi/acpica/hwesleep.c b/drivers/acpi/acpica/hwesleep.c
new file mode 100644
index 000000000000..29e859293edd
--- /dev/null
+++ b/drivers/acpi/acpica/hwesleep.c
@@ -0,0 +1,247 @@
1/******************************************************************************
2 *
3 * Name: hwesleep.c - ACPI Hardware Sleep/Wake Support functions for the
4 * extended FADT-V5 sleep registers.
5 *
6 *****************************************************************************/
7
8/*
9 * Copyright (C) 2000 - 2012, Intel Corp.
10 * All rights reserved.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions, and the following disclaimer,
17 * without modification.
18 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
19 * substantially similar to the "NO WARRANTY" disclaimer below
20 * ("Disclaimer") and any redistribution must be conditioned upon
21 * including a substantially similar Disclaimer requirement for further
22 * binary redistribution.
23 * 3. Neither the names of the above-listed copyright holders nor the names
24 * of any contributors may be used to endorse or promote products derived
25 * from this software without specific prior written permission.
26 *
27 * Alternatively, this software may be distributed under the terms of the
28 * GNU General Public License ("GPL") version 2 as published by the Free
29 * Software Foundation.
30 *
31 * NO WARRANTY
32 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
33 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
34 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
35 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
36 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
37 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
38 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
39 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
40 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
41 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
42 * POSSIBILITY OF SUCH DAMAGES.
43 */
44
45#include <acpi/acpi.h>
46#include "accommon.h"
47
48#define _COMPONENT ACPI_HARDWARE
49ACPI_MODULE_NAME("hwesleep")
50
51/*******************************************************************************
52 *
53 * FUNCTION: acpi_hw_execute_sleep_method
54 *
55 * PARAMETERS: method_pathname - Pathname of method to execute
56 * integer_argument - Argument to pass to the method
57 *
58 * RETURN: None
59 *
60 * DESCRIPTION: Execute a sleep/wake related method with one integer argument
61 * and no return value.
62 *
63 ******************************************************************************/
64void acpi_hw_execute_sleep_method(char *method_pathname, u32 integer_argument)
65{
66 struct acpi_object_list arg_list;
67 union acpi_object arg;
68 acpi_status status;
69
70 ACPI_FUNCTION_TRACE(hw_execute_sleep_method);
71
72 /* One argument, integer_argument; No return value expected */
73
74 arg_list.count = 1;
75 arg_list.pointer = &arg;
76 arg.type = ACPI_TYPE_INTEGER;
77 arg.integer.value = (u64)integer_argument;
78
79 status = acpi_evaluate_object(NULL, method_pathname, &arg_list, NULL);
80 if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
81 ACPI_EXCEPTION((AE_INFO, status, "While executing method %s",
82 method_pathname));
83 }
84
85 return_VOID;
86}
87
88/*******************************************************************************
89 *
90 * FUNCTION: acpi_hw_extended_sleep
91 *
92 * PARAMETERS: sleep_state - Which sleep state to enter
93 * Flags - ACPI_EXECUTE_GTS to run optional method
94 *
95 * RETURN: Status
96 *
97 * DESCRIPTION: Enter a system sleep state via the extended FADT sleep
98 * registers (V5 FADT).
99 * THIS FUNCTION MUST BE CALLED WITH INTERRUPTS DISABLED
100 *
101 ******************************************************************************/
102
103acpi_status acpi_hw_extended_sleep(u8 sleep_state, u8 flags)
104{
105 acpi_status status;
106 u8 sleep_type_value;
107 u64 sleep_status;
108
109 ACPI_FUNCTION_TRACE(hw_extended_sleep);
110
111 /* Extended sleep registers must be valid */
112
113 if (!acpi_gbl_FADT.sleep_control.address ||
114 !acpi_gbl_FADT.sleep_status.address) {
115 return_ACPI_STATUS(AE_NOT_EXIST);
116 }
117
118 /* Clear wake status (WAK_STS) */
119
120 status = acpi_write(ACPI_X_WAKE_STATUS, &acpi_gbl_FADT.sleep_status);
121 if (ACPI_FAILURE(status)) {
122 return_ACPI_STATUS(status);
123 }
124
125 acpi_gbl_system_awake_and_running = FALSE;
126
127 /* Optionally execute _GTS (Going To Sleep) */
128
129 if (flags & ACPI_EXECUTE_GTS) {
130 acpi_hw_execute_sleep_method(METHOD_PATHNAME__GTS, sleep_state);
131 }
132
133 /* Flush caches, as per ACPI specification */
134
135 ACPI_FLUSH_CPU_CACHE();
136
137 /*
138 * Set the SLP_TYP and SLP_EN bits.
139 *
140 * Note: We only use the first value returned by the \_Sx method
141 * (acpi_gbl_sleep_type_a) - As per ACPI specification.
142 */
143 ACPI_DEBUG_PRINT((ACPI_DB_INIT,
144 "Entering sleep state [S%u]\n", sleep_state));
145
146 sleep_type_value =
147 ((acpi_gbl_sleep_type_a << ACPI_X_SLEEP_TYPE_POSITION) &
148 ACPI_X_SLEEP_TYPE_MASK);
149
150 status = acpi_write((sleep_type_value | ACPI_X_SLEEP_ENABLE),
151 &acpi_gbl_FADT.sleep_control);
152 if (ACPI_FAILURE(status)) {
153 return_ACPI_STATUS(status);
154 }
155
156 /* Wait for transition back to Working State */
157
158 do {
159 status = acpi_read(&sleep_status, &acpi_gbl_FADT.sleep_status);
160 if (ACPI_FAILURE(status)) {
161 return_ACPI_STATUS(status);
162 }
163
164 } while (!(((u8)sleep_status) & ACPI_X_WAKE_STATUS));
165
166 return_ACPI_STATUS(AE_OK);
167}
168
169/*******************************************************************************
170 *
171 * FUNCTION: acpi_hw_extended_wake_prep
172 *
173 * PARAMETERS: sleep_state - Which sleep state we just exited
174 * Flags - ACPI_EXECUTE_BFS to run optional method
175 *
176 * RETURN: Status
177 *
178 * DESCRIPTION: Perform first part of OS-independent ACPI cleanup after
179 * a sleep. Called with interrupts ENABLED.
180 *
181 ******************************************************************************/
182
183acpi_status acpi_hw_extended_wake_prep(u8 sleep_state, u8 flags)
184{
185 acpi_status status;
186 u8 sleep_type_value;
187
188 ACPI_FUNCTION_TRACE(hw_extended_wake_prep);
189
190 status = acpi_get_sleep_type_data(ACPI_STATE_S0,
191 &acpi_gbl_sleep_type_a,
192 &acpi_gbl_sleep_type_b);
193 if (ACPI_SUCCESS(status)) {
194 sleep_type_value =
195 ((acpi_gbl_sleep_type_a << ACPI_X_SLEEP_TYPE_POSITION) &
196 ACPI_X_SLEEP_TYPE_MASK);
197
198 (void)acpi_write((sleep_type_value | ACPI_X_SLEEP_ENABLE),
199 &acpi_gbl_FADT.sleep_control);
200 }
201
202 /* Optionally execute _BFS (Back From Sleep) */
203
204 if (flags & ACPI_EXECUTE_BFS) {
205 acpi_hw_execute_sleep_method(METHOD_PATHNAME__BFS, sleep_state);
206 }
207 return_ACPI_STATUS(AE_OK);
208}
209
210/*******************************************************************************
211 *
212 * FUNCTION: acpi_hw_extended_wake
213 *
214 * PARAMETERS: sleep_state - Which sleep state we just exited
215 * Flags - Reserved, set to zero
216 *
217 * RETURN: Status
218 *
219 * DESCRIPTION: Perform OS-independent ACPI cleanup after a sleep
220 * Called with interrupts ENABLED.
221 *
222 ******************************************************************************/
223
224acpi_status acpi_hw_extended_wake(u8 sleep_state, u8 flags)
225{
226 ACPI_FUNCTION_TRACE(hw_extended_wake);
227
228 /* Ensure enter_sleep_state_prep -> enter_sleep_state ordering */
229
230 acpi_gbl_sleep_type_a = ACPI_SLEEP_TYPE_INVALID;
231
232 /* Execute the wake methods */
233
234 acpi_hw_execute_sleep_method(METHOD_PATHNAME__SST, ACPI_SST_WAKING);
235 acpi_hw_execute_sleep_method(METHOD_PATHNAME__WAK, sleep_state);
236
237 /*
238 * Some BIOS code assumes that WAK_STS will be cleared on resume
239 * and use it to determine whether the system is rebooting or
240 * resuming. Clear WAK_STS for compatibility.
241 */
242 (void)acpi_write(ACPI_X_WAKE_STATUS, &acpi_gbl_FADT.sleep_status);
243 acpi_gbl_system_awake_and_running = TRUE;
244
245 acpi_hw_execute_sleep_method(METHOD_PATHNAME__SST, ACPI_SST_WORKING);
246 return_ACPI_STATUS(AE_OK);
247}
diff --git a/drivers/acpi/acpica/hwgpe.c b/drivers/acpi/acpica/hwgpe.c
index 1a6894afef79..25bd28c4ae8d 100644
--- a/drivers/acpi/acpica/hwgpe.c
+++ b/drivers/acpi/acpica/hwgpe.c
@@ -48,7 +48,7 @@
48 48
49#define _COMPONENT ACPI_HARDWARE 49#define _COMPONENT ACPI_HARDWARE
50ACPI_MODULE_NAME("hwgpe") 50ACPI_MODULE_NAME("hwgpe")
51 51#if (!ACPI_REDUCED_HARDWARE) /* Entire module */
52/* Local prototypes */ 52/* Local prototypes */
53static acpi_status 53static acpi_status
54acpi_hw_enable_wakeup_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info, 54acpi_hw_enable_wakeup_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
@@ -479,3 +479,5 @@ acpi_status acpi_hw_enable_all_wakeup_gpes(void)
479 status = acpi_ev_walk_gpe_list(acpi_hw_enable_wakeup_gpe_block, NULL); 479 status = acpi_ev_walk_gpe_list(acpi_hw_enable_wakeup_gpe_block, NULL);
480 return_ACPI_STATUS(status); 480 return_ACPI_STATUS(status);
481} 481}
482
483#endif /* !ACPI_REDUCED_HARDWARE */
diff --git a/drivers/acpi/acpica/hwregs.c b/drivers/acpi/acpica/hwregs.c
index 4ea4eeb51bfd..6b6c83b87b52 100644
--- a/drivers/acpi/acpica/hwregs.c
+++ b/drivers/acpi/acpica/hwregs.c
@@ -51,6 +51,7 @@
51#define _COMPONENT ACPI_HARDWARE 51#define _COMPONENT ACPI_HARDWARE
52ACPI_MODULE_NAME("hwregs") 52ACPI_MODULE_NAME("hwregs")
53 53
54#if (!ACPI_REDUCED_HARDWARE)
54/* Local Prototypes */ 55/* Local Prototypes */
55static acpi_status 56static acpi_status
56acpi_hw_read_multiple(u32 *value, 57acpi_hw_read_multiple(u32 *value,
@@ -62,6 +63,8 @@ acpi_hw_write_multiple(u32 value,
62 struct acpi_generic_address *register_a, 63 struct acpi_generic_address *register_a,
63 struct acpi_generic_address *register_b); 64 struct acpi_generic_address *register_b);
64 65
66#endif /* !ACPI_REDUCED_HARDWARE */
67
65/****************************************************************************** 68/******************************************************************************
66 * 69 *
67 * FUNCTION: acpi_hw_validate_register 70 * FUNCTION: acpi_hw_validate_register
@@ -154,6 +157,7 @@ acpi_hw_validate_register(struct acpi_generic_address *reg,
154acpi_status acpi_hw_read(u32 *value, struct acpi_generic_address *reg) 157acpi_status acpi_hw_read(u32 *value, struct acpi_generic_address *reg)
155{ 158{
156 u64 address; 159 u64 address;
160 u64 value64;
157 acpi_status status; 161 acpi_status status;
158 162
159 ACPI_FUNCTION_NAME(hw_read); 163 ACPI_FUNCTION_NAME(hw_read);
@@ -175,7 +179,9 @@ acpi_status acpi_hw_read(u32 *value, struct acpi_generic_address *reg)
175 */ 179 */
176 if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) { 180 if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
177 status = acpi_os_read_memory((acpi_physical_address) 181 status = acpi_os_read_memory((acpi_physical_address)
178 address, value, reg->bit_width); 182 address, &value64, reg->bit_width);
183
184 *value = (u32)value64;
179 } else { /* ACPI_ADR_SPACE_SYSTEM_IO, validated earlier */ 185 } else { /* ACPI_ADR_SPACE_SYSTEM_IO, validated earlier */
180 186
181 status = acpi_hw_read_port((acpi_io_address) 187 status = acpi_hw_read_port((acpi_io_address)
@@ -225,7 +231,8 @@ acpi_status acpi_hw_write(u32 value, struct acpi_generic_address *reg)
225 */ 231 */
226 if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) { 232 if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
227 status = acpi_os_write_memory((acpi_physical_address) 233 status = acpi_os_write_memory((acpi_physical_address)
228 address, value, reg->bit_width); 234 address, (u64)value,
235 reg->bit_width);
229 } else { /* ACPI_ADR_SPACE_SYSTEM_IO, validated earlier */ 236 } else { /* ACPI_ADR_SPACE_SYSTEM_IO, validated earlier */
230 237
231 status = acpi_hw_write_port((acpi_io_address) 238 status = acpi_hw_write_port((acpi_io_address)
@@ -240,6 +247,7 @@ acpi_status acpi_hw_write(u32 value, struct acpi_generic_address *reg)
240 return (status); 247 return (status);
241} 248}
242 249
250#if (!ACPI_REDUCED_HARDWARE)
243/******************************************************************************* 251/*******************************************************************************
244 * 252 *
245 * FUNCTION: acpi_hw_clear_acpi_status 253 * FUNCTION: acpi_hw_clear_acpi_status
@@ -285,7 +293,7 @@ exit:
285 293
286/******************************************************************************* 294/*******************************************************************************
287 * 295 *
288 * FUNCTION: acpi_hw_get_register_bit_mask 296 * FUNCTION: acpi_hw_get_bit_register_info
289 * 297 *
290 * PARAMETERS: register_id - Index of ACPI Register to access 298 * PARAMETERS: register_id - Index of ACPI Register to access
291 * 299 *
@@ -658,3 +666,5 @@ acpi_hw_write_multiple(u32 value,
658 666
659 return (status); 667 return (status);
660} 668}
669
670#endif /* !ACPI_REDUCED_HARDWARE */
diff --git a/drivers/acpi/acpica/hwsleep.c b/drivers/acpi/acpica/hwsleep.c
index 3c4a922a9fc2..0ed85cac3231 100644
--- a/drivers/acpi/acpica/hwsleep.c
+++ b/drivers/acpi/acpica/hwsleep.c
@@ -1,7 +1,7 @@
1
2/****************************************************************************** 1/******************************************************************************
3 * 2 *
4 * Name: hwsleep.c - ACPI Hardware Sleep/Wake Interface 3 * Name: hwsleep.c - ACPI Hardware Sleep/Wake Support functions for the
4 * original/legacy sleep/PM registers.
5 * 5 *
6 *****************************************************************************/ 6 *****************************************************************************/
7 7
@@ -43,213 +43,37 @@
43 */ 43 */
44 44
45#include <acpi/acpi.h> 45#include <acpi/acpi.h>
46#include <linux/acpi.h>
46#include "accommon.h" 47#include "accommon.h"
47#include "actables.h"
48#include <linux/tboot.h>
49#include <linux/module.h> 48#include <linux/module.h>
50 49
51#define _COMPONENT ACPI_HARDWARE 50#define _COMPONENT ACPI_HARDWARE
52ACPI_MODULE_NAME("hwsleep") 51ACPI_MODULE_NAME("hwsleep")
53 52
53#if (!ACPI_REDUCED_HARDWARE) /* Entire module */
54/******************************************************************************* 54/*******************************************************************************
55 * 55 *
56 * FUNCTION: acpi_set_firmware_waking_vector 56 * FUNCTION: acpi_hw_legacy_sleep
57 *
58 * PARAMETERS: physical_address - 32-bit physical address of ACPI real mode
59 * entry point.
60 *
61 * RETURN: Status
62 *
63 * DESCRIPTION: Sets the 32-bit firmware_waking_vector field of the FACS
64 *
65 ******************************************************************************/
66acpi_status
67acpi_set_firmware_waking_vector(u32 physical_address)
68{
69 ACPI_FUNCTION_TRACE(acpi_set_firmware_waking_vector);
70
71
72 /*
73 * According to the ACPI specification 2.0c and later, the 64-bit
74 * waking vector should be cleared and the 32-bit waking vector should
75 * be used, unless we want the wake-up code to be called by the BIOS in
76 * Protected Mode. Some systems (for example HP dv5-1004nr) are known
77 * to fail to resume if the 64-bit vector is used.
78 */
79
80 /* Set the 32-bit vector */
81
82 acpi_gbl_FACS->firmware_waking_vector = physical_address;
83
84 /* Clear the 64-bit vector if it exists */
85
86 if ((acpi_gbl_FACS->length > 32) && (acpi_gbl_FACS->version >= 1)) {
87 acpi_gbl_FACS->xfirmware_waking_vector = 0;
88 }
89
90 return_ACPI_STATUS(AE_OK);
91}
92
93ACPI_EXPORT_SYMBOL(acpi_set_firmware_waking_vector)
94
95#if ACPI_MACHINE_WIDTH == 64
96/*******************************************************************************
97 *
98 * FUNCTION: acpi_set_firmware_waking_vector64
99 *
100 * PARAMETERS: physical_address - 64-bit physical address of ACPI protected
101 * mode entry point.
102 *
103 * RETURN: Status
104 *
105 * DESCRIPTION: Sets the 64-bit X_firmware_waking_vector field of the FACS, if
106 * it exists in the table. This function is intended for use with
107 * 64-bit host operating systems.
108 *
109 ******************************************************************************/
110acpi_status
111acpi_set_firmware_waking_vector64(u64 physical_address)
112{
113 ACPI_FUNCTION_TRACE(acpi_set_firmware_waking_vector64);
114
115
116 /* Determine if the 64-bit vector actually exists */
117
118 if ((acpi_gbl_FACS->length <= 32) || (acpi_gbl_FACS->version < 1)) {
119 return_ACPI_STATUS(AE_NOT_EXIST);
120 }
121
122 /* Clear 32-bit vector, set the 64-bit X_ vector */
123
124 acpi_gbl_FACS->firmware_waking_vector = 0;
125 acpi_gbl_FACS->xfirmware_waking_vector = physical_address;
126
127 return_ACPI_STATUS(AE_OK);
128}
129
130ACPI_EXPORT_SYMBOL(acpi_set_firmware_waking_vector64)
131#endif
132
133/*******************************************************************************
134 *
135 * FUNCTION: acpi_enter_sleep_state_prep
136 *
137 * PARAMETERS: sleep_state - Which sleep state to enter
138 *
139 * RETURN: Status
140 *
141 * DESCRIPTION: Prepare to enter a system sleep state (see ACPI 2.0 spec p 231)
142 * This function must execute with interrupts enabled.
143 * We break sleeping into 2 stages so that OSPM can handle
144 * various OS-specific tasks between the two steps.
145 *
146 ******************************************************************************/
147acpi_status acpi_enter_sleep_state_prep(u8 sleep_state)
148{
149 acpi_status status;
150 struct acpi_object_list arg_list;
151 union acpi_object arg;
152
153 ACPI_FUNCTION_TRACE(acpi_enter_sleep_state_prep);
154
155 /* _PSW methods could be run here to enable wake-on keyboard, LAN, etc. */
156
157 status = acpi_get_sleep_type_data(sleep_state,
158 &acpi_gbl_sleep_type_a,
159 &acpi_gbl_sleep_type_b);
160 if (ACPI_FAILURE(status)) {
161 return_ACPI_STATUS(status);
162 }
163
164 /* Setup parameter object */
165
166 arg_list.count = 1;
167 arg_list.pointer = &arg;
168
169 arg.type = ACPI_TYPE_INTEGER;
170 arg.integer.value = sleep_state;
171
172 /* Run the _PTS method */
173
174 status = acpi_evaluate_object(NULL, METHOD_NAME__PTS, &arg_list, NULL);
175 if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
176 return_ACPI_STATUS(status);
177 }
178
179 /* Setup the argument to _SST */
180
181 switch (sleep_state) {
182 case ACPI_STATE_S0:
183 arg.integer.value = ACPI_SST_WORKING;
184 break;
185
186 case ACPI_STATE_S1:
187 case ACPI_STATE_S2:
188 case ACPI_STATE_S3:
189 arg.integer.value = ACPI_SST_SLEEPING;
190 break;
191
192 case ACPI_STATE_S4:
193 arg.integer.value = ACPI_SST_SLEEP_CONTEXT;
194 break;
195
196 default:
197 arg.integer.value = ACPI_SST_INDICATOR_OFF; /* Default is off */
198 break;
199 }
200
201 /*
202 * Set the system indicators to show the desired sleep state.
203 * _SST is an optional method (return no error if not found)
204 */
205 status = acpi_evaluate_object(NULL, METHOD_NAME__SST, &arg_list, NULL);
206 if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
207 ACPI_EXCEPTION((AE_INFO, status,
208 "While executing method _SST"));
209 }
210
211 return_ACPI_STATUS(AE_OK);
212}
213
214ACPI_EXPORT_SYMBOL(acpi_enter_sleep_state_prep)
215
216static unsigned int gts, bfs;
217module_param(gts, uint, 0644);
218module_param(bfs, uint, 0644);
219MODULE_PARM_DESC(gts, "Enable evaluation of _GTS on suspend.");
220MODULE_PARM_DESC(bfs, "Enable evaluation of _BFS on resume".);
221
222/*******************************************************************************
223 *
224 * FUNCTION: acpi_enter_sleep_state
225 * 57 *
226 * PARAMETERS: sleep_state - Which sleep state to enter 58 * PARAMETERS: sleep_state - Which sleep state to enter
59 * Flags - ACPI_EXECUTE_GTS to run optional method
227 * 60 *
228 * RETURN: Status 61 * RETURN: Status
229 * 62 *
230 * DESCRIPTION: Enter a system sleep state (see ACPI 2.0 spec p 231) 63 * DESCRIPTION: Enter a system sleep state via the legacy FADT PM registers
231 * THIS FUNCTION MUST BE CALLED WITH INTERRUPTS DISABLED 64 * THIS FUNCTION MUST BE CALLED WITH INTERRUPTS DISABLED
232 * 65 *
233 ******************************************************************************/ 66 ******************************************************************************/
234acpi_status asmlinkage acpi_enter_sleep_state(u8 sleep_state) 67acpi_status acpi_hw_legacy_sleep(u8 sleep_state, u8 flags)
235{ 68{
236 u32 pm1a_control;
237 u32 pm1b_control;
238 struct acpi_bit_register_info *sleep_type_reg_info; 69 struct acpi_bit_register_info *sleep_type_reg_info;
239 struct acpi_bit_register_info *sleep_enable_reg_info; 70 struct acpi_bit_register_info *sleep_enable_reg_info;
71 u32 pm1a_control;
72 u32 pm1b_control;
240 u32 in_value; 73 u32 in_value;
241 struct acpi_object_list arg_list;
242 union acpi_object arg;
243 acpi_status status; 74 acpi_status status;
244 75
245 ACPI_FUNCTION_TRACE(acpi_enter_sleep_state); 76 ACPI_FUNCTION_TRACE(hw_legacy_sleep);
246
247 if ((acpi_gbl_sleep_type_a > ACPI_SLEEP_TYPE_MAX) ||
248 (acpi_gbl_sleep_type_b > ACPI_SLEEP_TYPE_MAX)) {
249 ACPI_ERROR((AE_INFO, "Sleep values out of range: A=0x%X B=0x%X",
250 acpi_gbl_sleep_type_a, acpi_gbl_sleep_type_b));
251 return_ACPI_STATUS(AE_AML_OPERAND_VALUE);
252 }
253 77
254 sleep_type_reg_info = 78 sleep_type_reg_info =
255 acpi_hw_get_bit_register_info(ACPI_BITREG_SLEEP_TYPE); 79 acpi_hw_get_bit_register_info(ACPI_BITREG_SLEEP_TYPE);
@@ -271,6 +95,18 @@ acpi_status asmlinkage acpi_enter_sleep_state(u8 sleep_state)
271 return_ACPI_STATUS(status); 95 return_ACPI_STATUS(status);
272 } 96 }
273 97
98 if (sleep_state != ACPI_STATE_S5) {
99 /*
100 * Disable BM arbitration. This feature is contained within an
101 * optional register (PM2 Control), so ignore a BAD_ADDRESS
102 * exception.
103 */
104 status = acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 1);
105 if (ACPI_FAILURE(status) && (status != AE_BAD_ADDRESS)) {
106 return_ACPI_STATUS(status);
107 }
108 }
109
274 /* 110 /*
275 * 1) Disable/Clear all GPEs 111 * 1) Disable/Clear all GPEs
276 * 2) Enable all wakeup GPEs 112 * 2) Enable all wakeup GPEs
@@ -286,18 +122,10 @@ acpi_status asmlinkage acpi_enter_sleep_state(u8 sleep_state)
286 return_ACPI_STATUS(status); 122 return_ACPI_STATUS(status);
287 } 123 }
288 124
289 if (gts) { 125 /* Optionally execute _GTS (Going To Sleep) */
290 /* Execute the _GTS method */
291
292 arg_list.count = 1;
293 arg_list.pointer = &arg;
294 arg.type = ACPI_TYPE_INTEGER;
295 arg.integer.value = sleep_state;
296 126
297 status = acpi_evaluate_object(NULL, METHOD_NAME__GTS, &arg_list, NULL); 127 if (flags & ACPI_EXECUTE_GTS) {
298 if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) { 128 acpi_hw_execute_sleep_method(METHOD_PATHNAME__GTS, sleep_state);
299 return_ACPI_STATUS(status);
300 }
301 } 129 }
302 130
303 /* Get current value of PM1A control */ 131 /* Get current value of PM1A control */
@@ -344,8 +172,12 @@ acpi_status asmlinkage acpi_enter_sleep_state(u8 sleep_state)
344 172
345 ACPI_FLUSH_CPU_CACHE(); 173 ACPI_FLUSH_CPU_CACHE();
346 174
347 tboot_sleep(sleep_state, pm1a_control, pm1b_control); 175 status = acpi_os_prepare_sleep(sleep_state, pm1a_control,
348 176 pm1b_control);
177 if (ACPI_SKIP(status))
178 return_ACPI_STATUS(AE_OK);
179 if (ACPI_FAILURE(status))
180 return_ACPI_STATUS(status);
349 /* Write #2: Write both SLP_TYP + SLP_EN */ 181 /* Write #2: Write both SLP_TYP + SLP_EN */
350 182
351 status = acpi_hw_write_pm1_control(pm1a_control, pm1b_control); 183 status = acpi_hw_write_pm1_control(pm1a_control, pm1b_control);
@@ -375,114 +207,44 @@ acpi_status asmlinkage acpi_enter_sleep_state(u8 sleep_state)
375 } 207 }
376 } 208 }
377 209
378 /* Wait until we enter sleep state */ 210 /* Wait for transition back to Working State */
379
380 do {
381 status = acpi_read_bit_register(ACPI_BITREG_WAKE_STATUS,
382 &in_value);
383 if (ACPI_FAILURE(status)) {
384 return_ACPI_STATUS(status);
385 }
386
387 /* Spin until we wake */
388
389 } while (!in_value);
390
391 return_ACPI_STATUS(AE_OK);
392}
393
394ACPI_EXPORT_SYMBOL(acpi_enter_sleep_state)
395
396/*******************************************************************************
397 *
398 * FUNCTION: acpi_enter_sleep_state_s4bios
399 *
400 * PARAMETERS: None
401 *
402 * RETURN: Status
403 *
404 * DESCRIPTION: Perform a S4 bios request.
405 * THIS FUNCTION MUST BE CALLED WITH INTERRUPTS DISABLED
406 *
407 ******************************************************************************/
408acpi_status asmlinkage acpi_enter_sleep_state_s4bios(void)
409{
410 u32 in_value;
411 acpi_status status;
412
413 ACPI_FUNCTION_TRACE(acpi_enter_sleep_state_s4bios);
414
415 /* Clear the wake status bit (PM1) */
416
417 status =
418 acpi_write_bit_register(ACPI_BITREG_WAKE_STATUS, ACPI_CLEAR_STATUS);
419 if (ACPI_FAILURE(status)) {
420 return_ACPI_STATUS(status);
421 }
422
423 status = acpi_hw_clear_acpi_status();
424 if (ACPI_FAILURE(status)) {
425 return_ACPI_STATUS(status);
426 }
427
428 /*
429 * 1) Disable/Clear all GPEs
430 * 2) Enable all wakeup GPEs
431 */
432 status = acpi_hw_disable_all_gpes();
433 if (ACPI_FAILURE(status)) {
434 return_ACPI_STATUS(status);
435 }
436 acpi_gbl_system_awake_and_running = FALSE;
437
438 status = acpi_hw_enable_all_wakeup_gpes();
439 if (ACPI_FAILURE(status)) {
440 return_ACPI_STATUS(status);
441 }
442
443 ACPI_FLUSH_CPU_CACHE();
444
445 status = acpi_hw_write_port(acpi_gbl_FADT.smi_command,
446 (u32) acpi_gbl_FADT.S4bios_request, 8);
447 211
448 do { 212 do {
449 acpi_os_stall(1000);
450 status = 213 status =
451 acpi_read_bit_register(ACPI_BITREG_WAKE_STATUS, &in_value); 214 acpi_read_bit_register(ACPI_BITREG_WAKE_STATUS, &in_value);
452 if (ACPI_FAILURE(status)) { 215 if (ACPI_FAILURE(status)) {
453 return_ACPI_STATUS(status); 216 return_ACPI_STATUS(status);
454 } 217 }
218
455 } while (!in_value); 219 } while (!in_value);
456 220
457 return_ACPI_STATUS(AE_OK); 221 return_ACPI_STATUS(AE_OK);
458} 222}
459 223
460ACPI_EXPORT_SYMBOL(acpi_enter_sleep_state_s4bios)
461
462/******************************************************************************* 224/*******************************************************************************
463 * 225 *
464 * FUNCTION: acpi_leave_sleep_state_prep 226 * FUNCTION: acpi_hw_legacy_wake_prep
465 * 227 *
466 * PARAMETERS: sleep_state - Which sleep state we are exiting 228 * PARAMETERS: sleep_state - Which sleep state we just exited
229 * Flags - ACPI_EXECUTE_BFS to run optional method
467 * 230 *
468 * RETURN: Status 231 * RETURN: Status
469 * 232 *
470 * DESCRIPTION: Perform the first state of OS-independent ACPI cleanup after a 233 * DESCRIPTION: Perform the first state of OS-independent ACPI cleanup after a
471 * sleep. 234 * sleep.
472 * Called with interrupts DISABLED. 235 * Called with interrupts ENABLED.
473 * 236 *
474 ******************************************************************************/ 237 ******************************************************************************/
475acpi_status acpi_leave_sleep_state_prep(u8 sleep_state) 238
239acpi_status acpi_hw_legacy_wake_prep(u8 sleep_state, u8 flags)
476{ 240{
477 struct acpi_object_list arg_list;
478 union acpi_object arg;
479 acpi_status status; 241 acpi_status status;
480 struct acpi_bit_register_info *sleep_type_reg_info; 242 struct acpi_bit_register_info *sleep_type_reg_info;
481 struct acpi_bit_register_info *sleep_enable_reg_info; 243 struct acpi_bit_register_info *sleep_enable_reg_info;
482 u32 pm1a_control; 244 u32 pm1a_control;
483 u32 pm1b_control; 245 u32 pm1b_control;
484 246
485 ACPI_FUNCTION_TRACE(acpi_leave_sleep_state_prep); 247 ACPI_FUNCTION_TRACE(hw_legacy_wake_prep);
486 248
487 /* 249 /*
488 * Set SLP_TYPE and SLP_EN to state S0. 250 * Set SLP_TYPE and SLP_EN to state S0.
@@ -525,27 +287,20 @@ acpi_status acpi_leave_sleep_state_prep(u8 sleep_state)
525 } 287 }
526 } 288 }
527 289
528 if (bfs) { 290 /* Optionally execute _BFS (Back From Sleep) */
529 /* Execute the _BFS method */
530 291
531 arg_list.count = 1; 292 if (flags & ACPI_EXECUTE_BFS) {
532 arg_list.pointer = &arg; 293 acpi_hw_execute_sleep_method(METHOD_PATHNAME__BFS, sleep_state);
533 arg.type = ACPI_TYPE_INTEGER;
534 arg.integer.value = sleep_state;
535
536 status = acpi_evaluate_object(NULL, METHOD_NAME__BFS, &arg_list, NULL);
537 if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
538 ACPI_EXCEPTION((AE_INFO, status, "During Method _BFS"));
539 }
540 } 294 }
541 return_ACPI_STATUS(status); 295 return_ACPI_STATUS(status);
542} 296}
543 297
544/******************************************************************************* 298/*******************************************************************************
545 * 299 *
546 * FUNCTION: acpi_leave_sleep_state 300 * FUNCTION: acpi_hw_legacy_wake
547 * 301 *
548 * PARAMETERS: sleep_state - Which sleep state we just exited 302 * PARAMETERS: sleep_state - Which sleep state we just exited
303 * Flags - Reserved, set to zero
549 * 304 *
550 * RETURN: Status 305 * RETURN: Status
551 * 306 *
@@ -553,31 +308,17 @@ acpi_status acpi_leave_sleep_state_prep(u8 sleep_state)
553 * Called with interrupts ENABLED. 308 * Called with interrupts ENABLED.
554 * 309 *
555 ******************************************************************************/ 310 ******************************************************************************/
556acpi_status acpi_leave_sleep_state(u8 sleep_state) 311
312acpi_status acpi_hw_legacy_wake(u8 sleep_state, u8 flags)
557{ 313{
558 struct acpi_object_list arg_list;
559 union acpi_object arg;
560 acpi_status status; 314 acpi_status status;
561 315
562 ACPI_FUNCTION_TRACE(acpi_leave_sleep_state); 316 ACPI_FUNCTION_TRACE(hw_legacy_wake);
563 317
564 /* Ensure enter_sleep_state_prep -> enter_sleep_state ordering */ 318 /* Ensure enter_sleep_state_prep -> enter_sleep_state ordering */
565 319
566 acpi_gbl_sleep_type_a = ACPI_SLEEP_TYPE_INVALID; 320 acpi_gbl_sleep_type_a = ACPI_SLEEP_TYPE_INVALID;
567 321 acpi_hw_execute_sleep_method(METHOD_PATHNAME__SST, ACPI_SST_WAKING);
568 /* Setup parameter object */
569
570 arg_list.count = 1;
571 arg_list.pointer = &arg;
572 arg.type = ACPI_TYPE_INTEGER;
573
574 /* Ignore any errors from these methods */
575
576 arg.integer.value = ACPI_SST_WAKING;
577 status = acpi_evaluate_object(NULL, METHOD_NAME__SST, &arg_list, NULL);
578 if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
579 ACPI_EXCEPTION((AE_INFO, status, "During Method _SST"));
580 }
581 322
582 /* 323 /*
583 * GPEs must be enabled before _WAK is called as GPEs 324 * GPEs must be enabled before _WAK is called as GPEs
@@ -591,46 +332,50 @@ acpi_status acpi_leave_sleep_state(u8 sleep_state)
591 if (ACPI_FAILURE(status)) { 332 if (ACPI_FAILURE(status)) {
592 return_ACPI_STATUS(status); 333 return_ACPI_STATUS(status);
593 } 334 }
335
594 status = acpi_hw_enable_all_runtime_gpes(); 336 status = acpi_hw_enable_all_runtime_gpes();
595 if (ACPI_FAILURE(status)) { 337 if (ACPI_FAILURE(status)) {
596 return_ACPI_STATUS(status); 338 return_ACPI_STATUS(status);
597 } 339 }
598 340
599 arg.integer.value = sleep_state; 341 /*
600 status = acpi_evaluate_object(NULL, METHOD_NAME__WAK, &arg_list, NULL); 342 * Now we can execute _WAK, etc. Some machines require that the GPEs
601 if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) { 343 * are enabled before the wake methods are executed.
602 ACPI_EXCEPTION((AE_INFO, status, "During Method _WAK")); 344 */
603 } 345 acpi_hw_execute_sleep_method(METHOD_PATHNAME__WAK, sleep_state);
604 /* TBD: _WAK "sometimes" returns stuff - do we want to look at it? */
605 346
606 /* 347 /*
607 * Some BIOSes assume that WAK_STS will be cleared on resume and use 348 * Some BIOS code assumes that WAK_STS will be cleared on resume
608 * it to determine whether the system is rebooting or resuming. Clear 349 * and use it to determine whether the system is rebooting or
609 * it for compatibility. 350 * resuming. Clear WAK_STS for compatibility.
610 */ 351 */
611 acpi_write_bit_register(ACPI_BITREG_WAKE_STATUS, 1); 352 acpi_write_bit_register(ACPI_BITREG_WAKE_STATUS, 1);
612
613 acpi_gbl_system_awake_and_running = TRUE; 353 acpi_gbl_system_awake_and_running = TRUE;
614 354
615 /* Enable power button */ 355 /* Enable power button */
616 356
617 (void) 357 (void)
618 acpi_write_bit_register(acpi_gbl_fixed_event_info 358 acpi_write_bit_register(acpi_gbl_fixed_event_info
619 [ACPI_EVENT_POWER_BUTTON]. 359 [ACPI_EVENT_POWER_BUTTON].
620 enable_register_id, ACPI_ENABLE_EVENT); 360 enable_register_id, ACPI_ENABLE_EVENT);
621 361
622 (void) 362 (void)
623 acpi_write_bit_register(acpi_gbl_fixed_event_info 363 acpi_write_bit_register(acpi_gbl_fixed_event_info
624 [ACPI_EVENT_POWER_BUTTON]. 364 [ACPI_EVENT_POWER_BUTTON].
625 status_register_id, ACPI_CLEAR_STATUS); 365 status_register_id, ACPI_CLEAR_STATUS);
626 366
627 arg.integer.value = ACPI_SST_WORKING; 367 /*
628 status = acpi_evaluate_object(NULL, METHOD_NAME__SST, &arg_list, NULL); 368 * Enable BM arbitration. This feature is contained within an
629 if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) { 369 * optional register (PM2 Control), so ignore a BAD_ADDRESS
630 ACPI_EXCEPTION((AE_INFO, status, "During Method _SST")); 370 * exception.
371 */
372 status = acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 0);
373 if (ACPI_FAILURE(status) && (status != AE_BAD_ADDRESS)) {
374 return_ACPI_STATUS(status);
631 } 375 }
632 376
377 acpi_hw_execute_sleep_method(METHOD_PATHNAME__SST, ACPI_SST_WORKING);
633 return_ACPI_STATUS(status); 378 return_ACPI_STATUS(status);
634} 379}
635 380
636ACPI_EXPORT_SYMBOL(acpi_leave_sleep_state) 381#endif /* !ACPI_REDUCED_HARDWARE */
diff --git a/drivers/acpi/acpica/hwtimer.c b/drivers/acpi/acpica/hwtimer.c
index d4973d9da9f1..f1b2c3b94cac 100644
--- a/drivers/acpi/acpica/hwtimer.c
+++ b/drivers/acpi/acpica/hwtimer.c
@@ -49,6 +49,7 @@
49#define _COMPONENT ACPI_HARDWARE 49#define _COMPONENT ACPI_HARDWARE
50ACPI_MODULE_NAME("hwtimer") 50ACPI_MODULE_NAME("hwtimer")
51 51
52#if (!ACPI_REDUCED_HARDWARE) /* Entire module */
52/****************************************************************************** 53/******************************************************************************
53 * 54 *
54 * FUNCTION: acpi_get_timer_resolution 55 * FUNCTION: acpi_get_timer_resolution
@@ -187,3 +188,4 @@ acpi_get_timer_duration(u32 start_ticks, u32 end_ticks, u32 * time_elapsed)
187} 188}
188 189
189ACPI_EXPORT_SYMBOL(acpi_get_timer_duration) 190ACPI_EXPORT_SYMBOL(acpi_get_timer_duration)
191#endif /* !ACPI_REDUCED_HARDWARE */
diff --git a/drivers/acpi/acpica/hwxface.c b/drivers/acpi/acpica/hwxface.c
index 9d38eb6c0d0b..ab513a972c95 100644
--- a/drivers/acpi/acpica/hwxface.c
+++ b/drivers/acpi/acpica/hwxface.c
@@ -74,8 +74,7 @@ acpi_status acpi_reset(void)
74 74
75 /* Check if the reset register is supported */ 75 /* Check if the reset register is supported */
76 76
77 if (!(acpi_gbl_FADT.flags & ACPI_FADT_RESET_REGISTER) || 77 if (!reset_reg->address) {
78 !reset_reg->address) {
79 return_ACPI_STATUS(AE_NOT_EXIST); 78 return_ACPI_STATUS(AE_NOT_EXIST);
80 } 79 }
81 80
@@ -138,11 +137,6 @@ acpi_status acpi_read(u64 *return_value, struct acpi_generic_address *reg)
138 return (status); 137 return (status);
139 } 138 }
140 139
141 width = reg->bit_width;
142 if (width == 64) {
143 width = 32; /* Break into two 32-bit transfers */
144 }
145
146 /* Initialize entire 64-bit return value to zero */ 140 /* Initialize entire 64-bit return value to zero */
147 141
148 *return_value = 0; 142 *return_value = 0;
@@ -154,24 +148,17 @@ acpi_status acpi_read(u64 *return_value, struct acpi_generic_address *reg)
154 */ 148 */
155 if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) { 149 if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
156 status = acpi_os_read_memory((acpi_physical_address) 150 status = acpi_os_read_memory((acpi_physical_address)
157 address, &value, width); 151 address, return_value,
152 reg->bit_width);
158 if (ACPI_FAILURE(status)) { 153 if (ACPI_FAILURE(status)) {
159 return (status); 154 return (status);
160 } 155 }
161 *return_value = value; 156 } else { /* ACPI_ADR_SPACE_SYSTEM_IO, validated earlier */
162
163 if (reg->bit_width == 64) {
164
165 /* Read the top 32 bits */
166 157
167 status = acpi_os_read_memory((acpi_physical_address) 158 width = reg->bit_width;
168 (address + 4), &value, 32); 159 if (width == 64) {
169 if (ACPI_FAILURE(status)) { 160 width = 32; /* Break into two 32-bit transfers */
170 return (status);
171 }
172 *return_value |= ((u64)value << 32);
173 } 161 }
174 } else { /* ACPI_ADR_SPACE_SYSTEM_IO, validated earlier */
175 162
176 status = acpi_hw_read_port((acpi_io_address) 163 status = acpi_hw_read_port((acpi_io_address)
177 address, &value, width); 164 address, &value, width);
@@ -231,32 +218,22 @@ acpi_status acpi_write(u64 value, struct acpi_generic_address *reg)
231 return (status); 218 return (status);
232 } 219 }
233 220
234 width = reg->bit_width;
235 if (width == 64) {
236 width = 32; /* Break into two 32-bit transfers */
237 }
238
239 /* 221 /*
240 * Two address spaces supported: Memory or IO. PCI_Config is 222 * Two address spaces supported: Memory or IO. PCI_Config is
241 * not supported here because the GAS structure is insufficient 223 * not supported here because the GAS structure is insufficient
242 */ 224 */
243 if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) { 225 if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
244 status = acpi_os_write_memory((acpi_physical_address) 226 status = acpi_os_write_memory((acpi_physical_address)
245 address, ACPI_LODWORD(value), 227 address, value, reg->bit_width);
246 width);
247 if (ACPI_FAILURE(status)) { 228 if (ACPI_FAILURE(status)) {
248 return (status); 229 return (status);
249 } 230 }
231 } else { /* ACPI_ADR_SPACE_SYSTEM_IO, validated earlier */
250 232
251 if (reg->bit_width == 64) { 233 width = reg->bit_width;
252 status = acpi_os_write_memory((acpi_physical_address) 234 if (width == 64) {
253 (address + 4), 235 width = 32; /* Break into two 32-bit transfers */
254 ACPI_HIDWORD(value), 32);
255 if (ACPI_FAILURE(status)) {
256 return (status);
257 }
258 } 236 }
259 } else { /* ACPI_ADR_SPACE_SYSTEM_IO, validated earlier */
260 237
261 status = acpi_hw_write_port((acpi_io_address) 238 status = acpi_hw_write_port((acpi_io_address)
262 address, ACPI_LODWORD(value), 239 address, ACPI_LODWORD(value),
@@ -286,6 +263,7 @@ acpi_status acpi_write(u64 value, struct acpi_generic_address *reg)
286 263
287ACPI_EXPORT_SYMBOL(acpi_write) 264ACPI_EXPORT_SYMBOL(acpi_write)
288 265
266#if (!ACPI_REDUCED_HARDWARE)
289/******************************************************************************* 267/*******************************************************************************
290 * 268 *
291 * FUNCTION: acpi_read_bit_register 269 * FUNCTION: acpi_read_bit_register
@@ -453,7 +431,7 @@ unlock_and_exit:
453} 431}
454 432
455ACPI_EXPORT_SYMBOL(acpi_write_bit_register) 433ACPI_EXPORT_SYMBOL(acpi_write_bit_register)
456 434#endif /* !ACPI_REDUCED_HARDWARE */
457/******************************************************************************* 435/*******************************************************************************
458 * 436 *
459 * FUNCTION: acpi_get_sleep_type_data 437 * FUNCTION: acpi_get_sleep_type_data
diff --git a/drivers/acpi/acpica/hwxfsleep.c b/drivers/acpi/acpica/hwxfsleep.c
new file mode 100644
index 000000000000..762d059bb508
--- /dev/null
+++ b/drivers/acpi/acpica/hwxfsleep.c
@@ -0,0 +1,431 @@
1/******************************************************************************
2 *
3 * Name: hwxfsleep.c - ACPI Hardware Sleep/Wake External Interfaces
4 *
5 *****************************************************************************/
6
7/*
8 * Copyright (C) 2000 - 2012, Intel Corp.
9 * All rights reserved.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions, and the following disclaimer,
16 * without modification.
17 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
18 * substantially similar to the "NO WARRANTY" disclaimer below
19 * ("Disclaimer") and any redistribution must be conditioned upon
20 * including a substantially similar Disclaimer requirement for further
21 * binary redistribution.
22 * 3. Neither the names of the above-listed copyright holders nor the names
23 * of any contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * Alternatively, this software may be distributed under the terms of the
27 * GNU General Public License ("GPL") version 2 as published by the Free
28 * Software Foundation.
29 *
30 * NO WARRANTY
31 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
32 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
33 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
34 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
35 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
36 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
37 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
38 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
39 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
40 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
41 * POSSIBILITY OF SUCH DAMAGES.
42 */
43
44#include <acpi/acpi.h>
45#include "accommon.h"
46#include <linux/module.h>
47
48#define _COMPONENT ACPI_HARDWARE
49ACPI_MODULE_NAME("hwxfsleep")
50
51/* Local prototypes */
52static acpi_status
53acpi_hw_sleep_dispatch(u8 sleep_state, u8 flags, u32 function_id);
54
55/*
56 * Dispatch table used to efficiently branch to the various sleep
57 * functions.
58 */
59#define ACPI_SLEEP_FUNCTION_ID 0
60#define ACPI_WAKE_PREP_FUNCTION_ID 1
61#define ACPI_WAKE_FUNCTION_ID 2
62
63/* Legacy functions are optional, based upon ACPI_REDUCED_HARDWARE */
64
65static struct acpi_sleep_functions acpi_sleep_dispatch[] = {
66 {ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_sleep),
67 acpi_hw_extended_sleep},
68 {ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_wake_prep),
69 acpi_hw_extended_wake_prep},
70 {ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_wake), acpi_hw_extended_wake}
71};
72
73/*
74 * These functions are removed for the ACPI_REDUCED_HARDWARE case:
75 * acpi_set_firmware_waking_vector
76 * acpi_set_firmware_waking_vector64
77 * acpi_enter_sleep_state_s4bios
78 */
79
80#if (!ACPI_REDUCED_HARDWARE)
81/*******************************************************************************
82 *
83 * FUNCTION: acpi_set_firmware_waking_vector
84 *
85 * PARAMETERS: physical_address - 32-bit physical address of ACPI real mode
86 * entry point.
87 *
88 * RETURN: Status
89 *
90 * DESCRIPTION: Sets the 32-bit firmware_waking_vector field of the FACS
91 *
92 ******************************************************************************/
93
94acpi_status acpi_set_firmware_waking_vector(u32 physical_address)
95{
96 ACPI_FUNCTION_TRACE(acpi_set_firmware_waking_vector);
97
98
99 /*
100 * According to the ACPI specification 2.0c and later, the 64-bit
101 * waking vector should be cleared and the 32-bit waking vector should
102 * be used, unless we want the wake-up code to be called by the BIOS in
103 * Protected Mode. Some systems (for example HP dv5-1004nr) are known
104 * to fail to resume if the 64-bit vector is used.
105 */
106
107 /* Set the 32-bit vector */
108
109 acpi_gbl_FACS->firmware_waking_vector = physical_address;
110
111 /* Clear the 64-bit vector if it exists */
112
113 if ((acpi_gbl_FACS->length > 32) && (acpi_gbl_FACS->version >= 1)) {
114 acpi_gbl_FACS->xfirmware_waking_vector = 0;
115 }
116
117 return_ACPI_STATUS(AE_OK);
118}
119
120ACPI_EXPORT_SYMBOL(acpi_set_firmware_waking_vector)
121
122#if ACPI_MACHINE_WIDTH == 64
123/*******************************************************************************
124 *
125 * FUNCTION: acpi_set_firmware_waking_vector64
126 *
127 * PARAMETERS: physical_address - 64-bit physical address of ACPI protected
128 * mode entry point.
129 *
130 * RETURN: Status
131 *
132 * DESCRIPTION: Sets the 64-bit X_firmware_waking_vector field of the FACS, if
133 * it exists in the table. This function is intended for use with
134 * 64-bit host operating systems.
135 *
136 ******************************************************************************/
137acpi_status acpi_set_firmware_waking_vector64(u64 physical_address)
138{
139 ACPI_FUNCTION_TRACE(acpi_set_firmware_waking_vector64);
140
141
142 /* Determine if the 64-bit vector actually exists */
143
144 if ((acpi_gbl_FACS->length <= 32) || (acpi_gbl_FACS->version < 1)) {
145 return_ACPI_STATUS(AE_NOT_EXIST);
146 }
147
148 /* Clear 32-bit vector, set the 64-bit X_ vector */
149
150 acpi_gbl_FACS->firmware_waking_vector = 0;
151 acpi_gbl_FACS->xfirmware_waking_vector = physical_address;
152 return_ACPI_STATUS(AE_OK);
153}
154
155ACPI_EXPORT_SYMBOL(acpi_set_firmware_waking_vector64)
156#endif
157
158/*******************************************************************************
159 *
160 * FUNCTION: acpi_enter_sleep_state_s4bios
161 *
162 * PARAMETERS: None
163 *
164 * RETURN: Status
165 *
166 * DESCRIPTION: Perform a S4 bios request.
167 * THIS FUNCTION MUST BE CALLED WITH INTERRUPTS DISABLED
168 *
169 ******************************************************************************/
170acpi_status asmlinkage acpi_enter_sleep_state_s4bios(void)
171{
172 u32 in_value;
173 acpi_status status;
174
175 ACPI_FUNCTION_TRACE(acpi_enter_sleep_state_s4bios);
176
177 /* Clear the wake status bit (PM1) */
178
179 status =
180 acpi_write_bit_register(ACPI_BITREG_WAKE_STATUS, ACPI_CLEAR_STATUS);
181 if (ACPI_FAILURE(status)) {
182 return_ACPI_STATUS(status);
183 }
184
185 status = acpi_hw_clear_acpi_status();
186 if (ACPI_FAILURE(status)) {
187 return_ACPI_STATUS(status);
188 }
189
190 /*
191 * 1) Disable/Clear all GPEs
192 * 2) Enable all wakeup GPEs
193 */
194 status = acpi_hw_disable_all_gpes();
195 if (ACPI_FAILURE(status)) {
196 return_ACPI_STATUS(status);
197 }
198 acpi_gbl_system_awake_and_running = FALSE;
199
200 status = acpi_hw_enable_all_wakeup_gpes();
201 if (ACPI_FAILURE(status)) {
202 return_ACPI_STATUS(status);
203 }
204
205 ACPI_FLUSH_CPU_CACHE();
206
207 status = acpi_hw_write_port(acpi_gbl_FADT.smi_command,
208 (u32)acpi_gbl_FADT.S4bios_request, 8);
209
210 do {
211 acpi_os_stall(1000);
212 status =
213 acpi_read_bit_register(ACPI_BITREG_WAKE_STATUS, &in_value);
214 if (ACPI_FAILURE(status)) {
215 return_ACPI_STATUS(status);
216 }
217 } while (!in_value);
218
219 return_ACPI_STATUS(AE_OK);
220}
221
222ACPI_EXPORT_SYMBOL(acpi_enter_sleep_state_s4bios)
223#endif /* !ACPI_REDUCED_HARDWARE */
224/*******************************************************************************
225 *
226 * FUNCTION: acpi_hw_sleep_dispatch
227 *
228 * PARAMETERS: sleep_state - Which sleep state to enter/exit
229 * function_id - Sleep, wake_prep, or Wake
230 *
231 * RETURN: Status from the invoked sleep handling function.
232 *
233 * DESCRIPTION: Dispatch a sleep/wake request to the appropriate handling
234 * function.
235 *
236 ******************************************************************************/
237static acpi_status
238acpi_hw_sleep_dispatch(u8 sleep_state, u8 flags, u32 function_id)
239{
240 acpi_status status;
241 struct acpi_sleep_functions *sleep_functions =
242 &acpi_sleep_dispatch[function_id];
243
244#if (!ACPI_REDUCED_HARDWARE)
245
246 /*
247 * If the Hardware Reduced flag is set (from the FADT), we must
248 * use the extended sleep registers
249 */
250 if (acpi_gbl_reduced_hardware || acpi_gbl_FADT.sleep_control.address) {
251 status = sleep_functions->extended_function(sleep_state, flags);
252 } else {
253 /* Legacy sleep */
254
255 status = sleep_functions->legacy_function(sleep_state, flags);
256 }
257
258 return (status);
259
260#else
261 /*
262 * For the case where reduced-hardware-only code is being generated,
263 * we know that only the extended sleep registers are available
264 */
265 status = sleep_functions->extended_function(sleep_state, flags);
266 return (status);
267
268#endif /* !ACPI_REDUCED_HARDWARE */
269}
270
271/*******************************************************************************
272 *
273 * FUNCTION: acpi_enter_sleep_state_prep
274 *
275 * PARAMETERS: sleep_state - Which sleep state to enter
276 *
277 * RETURN: Status
278 *
279 * DESCRIPTION: Prepare to enter a system sleep state.
280 * This function must execute with interrupts enabled.
281 * We break sleeping into 2 stages so that OSPM can handle
282 * various OS-specific tasks between the two steps.
283 *
284 ******************************************************************************/
285
286acpi_status acpi_enter_sleep_state_prep(u8 sleep_state)
287{
288 acpi_status status;
289 struct acpi_object_list arg_list;
290 union acpi_object arg;
291 u32 sst_value;
292
293 ACPI_FUNCTION_TRACE(acpi_enter_sleep_state_prep);
294
295 status = acpi_get_sleep_type_data(sleep_state,
296 &acpi_gbl_sleep_type_a,
297 &acpi_gbl_sleep_type_b);
298 if (ACPI_FAILURE(status)) {
299 return_ACPI_STATUS(status);
300 }
301
302 /* Execute the _PTS method (Prepare To Sleep) */
303
304 arg_list.count = 1;
305 arg_list.pointer = &arg;
306 arg.type = ACPI_TYPE_INTEGER;
307 arg.integer.value = sleep_state;
308
309 status =
310 acpi_evaluate_object(NULL, METHOD_PATHNAME__PTS, &arg_list, NULL);
311 if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
312 return_ACPI_STATUS(status);
313 }
314
315 /* Setup the argument to the _SST method (System STatus) */
316
317 switch (sleep_state) {
318 case ACPI_STATE_S0:
319 sst_value = ACPI_SST_WORKING;
320 break;
321
322 case ACPI_STATE_S1:
323 case ACPI_STATE_S2:
324 case ACPI_STATE_S3:
325 sst_value = ACPI_SST_SLEEPING;
326 break;
327
328 case ACPI_STATE_S4:
329 sst_value = ACPI_SST_SLEEP_CONTEXT;
330 break;
331
332 default:
333 sst_value = ACPI_SST_INDICATOR_OFF; /* Default is off */
334 break;
335 }
336
337 /*
338 * Set the system indicators to show the desired sleep state.
339 * _SST is an optional method (return no error if not found)
340 */
341 acpi_hw_execute_sleep_method(METHOD_PATHNAME__SST, sst_value);
342 return_ACPI_STATUS(AE_OK);
343}
344
345ACPI_EXPORT_SYMBOL(acpi_enter_sleep_state_prep)
346
347/*******************************************************************************
348 *
349 * FUNCTION: acpi_enter_sleep_state
350 *
351 * PARAMETERS: sleep_state - Which sleep state to enter
352 * Flags - ACPI_EXECUTE_GTS to run optional method
353 *
354 * RETURN: Status
355 *
356 * DESCRIPTION: Enter a system sleep state (see ACPI 2.0 spec p 231)
357 * THIS FUNCTION MUST BE CALLED WITH INTERRUPTS DISABLED
358 *
359 ******************************************************************************/
360acpi_status asmlinkage acpi_enter_sleep_state(u8 sleep_state, u8 flags)
361{
362 acpi_status status;
363
364 ACPI_FUNCTION_TRACE(acpi_enter_sleep_state);
365
366 if ((acpi_gbl_sleep_type_a > ACPI_SLEEP_TYPE_MAX) ||
367 (acpi_gbl_sleep_type_b > ACPI_SLEEP_TYPE_MAX)) {
368 ACPI_ERROR((AE_INFO, "Sleep values out of range: A=0x%X B=0x%X",
369 acpi_gbl_sleep_type_a, acpi_gbl_sleep_type_b));
370 return_ACPI_STATUS(AE_AML_OPERAND_VALUE);
371 }
372
373 status =
374 acpi_hw_sleep_dispatch(sleep_state, flags, ACPI_SLEEP_FUNCTION_ID);
375 return_ACPI_STATUS(status);
376}
377
378ACPI_EXPORT_SYMBOL(acpi_enter_sleep_state)
379
380/*******************************************************************************
381 *
382 * FUNCTION: acpi_leave_sleep_state_prep
383 *
384 * PARAMETERS: sleep_state - Which sleep state we are exiting
385 * Flags - ACPI_EXECUTE_BFS to run optional method
386 *
387 * RETURN: Status
388 *
389 * DESCRIPTION: Perform the first state of OS-independent ACPI cleanup after a
390 * sleep.
391 * Called with interrupts DISABLED.
392 *
393 ******************************************************************************/
394acpi_status acpi_leave_sleep_state_prep(u8 sleep_state, u8 flags)
395{
396 acpi_status status;
397
398 ACPI_FUNCTION_TRACE(acpi_leave_sleep_state_prep);
399
400 status =
401 acpi_hw_sleep_dispatch(sleep_state, flags,
402 ACPI_WAKE_PREP_FUNCTION_ID);
403 return_ACPI_STATUS(status);
404}
405
406ACPI_EXPORT_SYMBOL(acpi_leave_sleep_state_prep)
407
408/*******************************************************************************
409 *
410 * FUNCTION: acpi_leave_sleep_state
411 *
412 * PARAMETERS: sleep_state - Which sleep state we are exiting
413 *
414 * RETURN: Status
415 *
416 * DESCRIPTION: Perform OS-independent ACPI cleanup after a sleep
417 * Called with interrupts ENABLED.
418 *
419 ******************************************************************************/
420acpi_status acpi_leave_sleep_state(u8 sleep_state)
421{
422 acpi_status status;
423
424 ACPI_FUNCTION_TRACE(acpi_leave_sleep_state);
425
426
427 status = acpi_hw_sleep_dispatch(sleep_state, 0, ACPI_WAKE_FUNCTION_ID);
428 return_ACPI_STATUS(status);
429}
430
431ACPI_EXPORT_SYMBOL(acpi_leave_sleep_state)
diff --git a/drivers/acpi/acpica/nsdump.c b/drivers/acpi/acpica/nsdump.c
index b7f2b3be79ac..3f7f3f6e7dd5 100644
--- a/drivers/acpi/acpica/nsdump.c
+++ b/drivers/acpi/acpica/nsdump.c
@@ -242,7 +242,20 @@ acpi_ns_dump_one_object(acpi_handle obj_handle,
242 242
243 if (!obj_desc) { 243 if (!obj_desc) {
244 244
245 /* No attached object, we are done */ 245 /* No attached object. Some types should always have an object */
246
247 switch (type) {
248 case ACPI_TYPE_INTEGER:
249 case ACPI_TYPE_PACKAGE:
250 case ACPI_TYPE_BUFFER:
251 case ACPI_TYPE_STRING:
252 case ACPI_TYPE_METHOD:
253 acpi_os_printf("<No attached object>");
254 break;
255
256 default:
257 break;
258 }
246 259
247 acpi_os_printf("\n"); 260 acpi_os_printf("\n");
248 return (AE_OK); 261 return (AE_OK);
diff --git a/drivers/acpi/acpica/nsdumpdv.c b/drivers/acpi/acpica/nsdumpdv.c
index 30ea5bc53a78..3b5acb0eb406 100644
--- a/drivers/acpi/acpica/nsdumpdv.c
+++ b/drivers/acpi/acpica/nsdumpdv.c
@@ -121,7 +121,7 @@ void acpi_ns_dump_root_devices(void)
121 return; 121 return;
122 } 122 }
123 123
124 status = acpi_get_handle(NULL, ACPI_NS_SYSTEM_BUS, &sys_bus_handle); 124 status = acpi_get_handle(NULL, METHOD_NAME__SB_, &sys_bus_handle);
125 if (ACPI_FAILURE(status)) { 125 if (ACPI_FAILURE(status)) {
126 return; 126 return;
127 } 127 }
diff --git a/drivers/acpi/acpica/nspredef.c b/drivers/acpi/acpica/nspredef.c
index bbe46a447d34..23ce09686418 100644
--- a/drivers/acpi/acpica/nspredef.c
+++ b/drivers/acpi/acpica/nspredef.c
@@ -638,8 +638,8 @@ acpi_ns_check_package(struct acpi_predefined_data *data,
638 /* Create the new outer package and populate it */ 638 /* Create the new outer package and populate it */
639 639
640 status = 640 status =
641 acpi_ns_repair_package_list(data, 641 acpi_ns_wrap_with_package(data, *elements,
642 return_object_ptr); 642 return_object_ptr);
643 if (ACPI_FAILURE(status)) { 643 if (ACPI_FAILURE(status)) {
644 return (status); 644 return (status);
645 } 645 }
diff --git a/drivers/acpi/acpica/nsrepair.c b/drivers/acpi/acpica/nsrepair.c
index 9c35d20eb52b..5519a64a353f 100644
--- a/drivers/acpi/acpica/nsrepair.c
+++ b/drivers/acpi/acpica/nsrepair.c
@@ -71,11 +71,10 @@ ACPI_MODULE_NAME("nsrepair")
71 * Buffer -> String 71 * Buffer -> String
72 * Buffer -> Package of Integers 72 * Buffer -> Package of Integers
73 * Package -> Package of one Package 73 * Package -> Package of one Package
74 * An incorrect standalone object is wrapped with required outer package
74 * 75 *
75 * Additional possible repairs: 76 * Additional possible repairs:
76 *
77 * Required package elements that are NULL replaced by Integer/String/Buffer 77 * Required package elements that are NULL replaced by Integer/String/Buffer
78 * Incorrect standalone package wrapped with required outer package
79 * 78 *
80 ******************************************************************************/ 79 ******************************************************************************/
81/* Local prototypes */ 80/* Local prototypes */
@@ -91,10 +90,6 @@ static acpi_status
91acpi_ns_convert_to_buffer(union acpi_operand_object *original_object, 90acpi_ns_convert_to_buffer(union acpi_operand_object *original_object,
92 union acpi_operand_object **return_object); 91 union acpi_operand_object **return_object);
93 92
94static acpi_status
95acpi_ns_convert_to_package(union acpi_operand_object *original_object,
96 union acpi_operand_object **return_object);
97
98/******************************************************************************* 93/*******************************************************************************
99 * 94 *
100 * FUNCTION: acpi_ns_repair_object 95 * FUNCTION: acpi_ns_repair_object
@@ -151,9 +146,24 @@ acpi_ns_repair_object(struct acpi_predefined_data *data,
151 } 146 }
152 } 147 }
153 if (expected_btypes & ACPI_RTYPE_PACKAGE) { 148 if (expected_btypes & ACPI_RTYPE_PACKAGE) {
154 status = acpi_ns_convert_to_package(return_object, &new_object); 149 /*
150 * A package is expected. We will wrap the existing object with a
151 * new package object. It is often the case that if a variable-length
152 * package is required, but there is only a single object needed, the
153 * BIOS will return that object instead of wrapping it with a Package
154 * object. Note: after the wrapping, the package will be validated
155 * for correct contents (expected object type or types).
156 */
157 status =
158 acpi_ns_wrap_with_package(data, return_object, &new_object);
155 if (ACPI_SUCCESS(status)) { 159 if (ACPI_SUCCESS(status)) {
156 goto object_repaired; 160 /*
161 * The original object just had its reference count
162 * incremented for being inserted into the new package.
163 */
164 *return_object_ptr = new_object; /* New Package object */
165 data->flags |= ACPI_OBJECT_REPAIRED;
166 return (AE_OK);
157 } 167 }
158 } 168 }
159 169
@@ -165,22 +175,27 @@ acpi_ns_repair_object(struct acpi_predefined_data *data,
165 175
166 /* Object was successfully repaired */ 176 /* Object was successfully repaired */
167 177
168 /*
169 * If the original object is a package element, we need to:
170 * 1. Set the reference count of the new object to match the
171 * reference count of the old object.
172 * 2. Decrement the reference count of the original object.
173 */
174 if (package_index != ACPI_NOT_PACKAGE_ELEMENT) { 178 if (package_index != ACPI_NOT_PACKAGE_ELEMENT) {
175 new_object->common.reference_count = 179 /*
176 return_object->common.reference_count; 180 * The original object is a package element. We need to
181 * decrement the reference count of the original object,
182 * for removing it from the package.
183 *
184 * However, if the original object was just wrapped with a
185 * package object as part of the repair, we don't need to
186 * change the reference count.
187 */
188 if (!(data->flags & ACPI_OBJECT_WRAPPED)) {
189 new_object->common.reference_count =
190 return_object->common.reference_count;
177 191
178 if (return_object->common.reference_count > 1) { 192 if (return_object->common.reference_count > 1) {
179 return_object->common.reference_count--; 193 return_object->common.reference_count--;
194 }
180 } 195 }
181 196
182 ACPI_DEBUG_PRINT((ACPI_DB_REPAIR, 197 ACPI_DEBUG_PRINT((ACPI_DB_REPAIR,
183 "%s: Converted %s to expected %s at index %u\n", 198 "%s: Converted %s to expected %s at Package index %u\n",
184 data->pathname, 199 data->pathname,
185 acpi_ut_get_object_type_name(return_object), 200 acpi_ut_get_object_type_name(return_object),
186 acpi_ut_get_object_type_name(new_object), 201 acpi_ut_get_object_type_name(new_object),
@@ -453,65 +468,6 @@ acpi_ns_convert_to_buffer(union acpi_operand_object *original_object,
453 468
454/******************************************************************************* 469/*******************************************************************************
455 * 470 *
456 * FUNCTION: acpi_ns_convert_to_package
457 *
458 * PARAMETERS: original_object - Object to be converted
459 * return_object - Where the new converted object is returned
460 *
461 * RETURN: Status. AE_OK if conversion was successful.
462 *
463 * DESCRIPTION: Attempt to convert a Buffer object to a Package. Each byte of
464 * the buffer is converted to a single integer package element.
465 *
466 ******************************************************************************/
467
468static acpi_status
469acpi_ns_convert_to_package(union acpi_operand_object *original_object,
470 union acpi_operand_object **return_object)
471{
472 union acpi_operand_object *new_object;
473 union acpi_operand_object **elements;
474 u32 length;
475 u8 *buffer;
476
477 switch (original_object->common.type) {
478 case ACPI_TYPE_BUFFER:
479
480 /* Buffer-to-Package conversion */
481
482 length = original_object->buffer.length;
483 new_object = acpi_ut_create_package_object(length);
484 if (!new_object) {
485 return (AE_NO_MEMORY);
486 }
487
488 /* Convert each buffer byte to an integer package element */
489
490 elements = new_object->package.elements;
491 buffer = original_object->buffer.pointer;
492
493 while (length--) {
494 *elements =
495 acpi_ut_create_integer_object((u64) *buffer);
496 if (!*elements) {
497 acpi_ut_remove_reference(new_object);
498 return (AE_NO_MEMORY);
499 }
500 elements++;
501 buffer++;
502 }
503 break;
504
505 default:
506 return (AE_AML_OPERAND_TYPE);
507 }
508
509 *return_object = new_object;
510 return (AE_OK);
511}
512
513/*******************************************************************************
514 *
515 * FUNCTION: acpi_ns_repair_null_element 471 * FUNCTION: acpi_ns_repair_null_element
516 * 472 *
517 * PARAMETERS: Data - Pointer to validation data structure 473 * PARAMETERS: Data - Pointer to validation data structure
@@ -677,55 +633,56 @@ acpi_ns_remove_null_elements(struct acpi_predefined_data *data,
677 633
678/******************************************************************************* 634/*******************************************************************************
679 * 635 *
680 * FUNCTION: acpi_ns_repair_package_list 636 * FUNCTION: acpi_ns_wrap_with_package
681 * 637 *
682 * PARAMETERS: Data - Pointer to validation data structure 638 * PARAMETERS: Data - Pointer to validation data structure
683 * obj_desc_ptr - Pointer to the object to repair. The new 639 * original_object - Pointer to the object to repair.
684 * package object is returned here, 640 * obj_desc_ptr - The new package object is returned here
685 * overwriting the old object.
686 * 641 *
687 * RETURN: Status, new object in *obj_desc_ptr 642 * RETURN: Status, new object in *obj_desc_ptr
688 * 643 *
689 * DESCRIPTION: Repair a common problem with objects that are defined to return 644 * DESCRIPTION: Repair a common problem with objects that are defined to
690 * a variable-length Package of Packages. If the variable-length 645 * return a variable-length Package of sub-objects. If there is
691 * is one, some BIOS code mistakenly simply declares a single 646 * only one sub-object, some BIOS code mistakenly simply declares
692 * Package instead of a Package with one sub-Package. This 647 * the single object instead of a Package with one sub-object.
693 * function attempts to repair this error by wrapping a Package 648 * This function attempts to repair this error by wrapping a
694 * object around the original Package, creating the correct 649 * Package object around the original object, creating the
695 * Package with one sub-Package. 650 * correct and expected Package with one sub-object.
696 * 651 *
697 * Names that can be repaired in this manner include: 652 * Names that can be repaired in this manner include:
698 * _ALR, _CSD, _HPX, _MLS, _PRT, _PSS, _TRT, TSS 653 * _ALR, _CSD, _HPX, _MLS, _PLD, _PRT, _PSS, _TRT, _TSS,
654 * _BCL, _DOD, _FIX, _Sx
699 * 655 *
700 ******************************************************************************/ 656 ******************************************************************************/
701 657
702acpi_status 658acpi_status
703acpi_ns_repair_package_list(struct acpi_predefined_data *data, 659acpi_ns_wrap_with_package(struct acpi_predefined_data *data,
704 union acpi_operand_object **obj_desc_ptr) 660 union acpi_operand_object *original_object,
661 union acpi_operand_object **obj_desc_ptr)
705{ 662{
706 union acpi_operand_object *pkg_obj_desc; 663 union acpi_operand_object *pkg_obj_desc;
707 664
708 ACPI_FUNCTION_NAME(ns_repair_package_list); 665 ACPI_FUNCTION_NAME(ns_wrap_with_package);
709 666
710 /* 667 /*
711 * Create the new outer package and populate it. The new package will 668 * Create the new outer package and populate it. The new package will
712 * have a single element, the lone subpackage. 669 * have a single element, the lone sub-object.
713 */ 670 */
714 pkg_obj_desc = acpi_ut_create_package_object(1); 671 pkg_obj_desc = acpi_ut_create_package_object(1);
715 if (!pkg_obj_desc) { 672 if (!pkg_obj_desc) {
716 return (AE_NO_MEMORY); 673 return (AE_NO_MEMORY);
717 } 674 }
718 675
719 pkg_obj_desc->package.elements[0] = *obj_desc_ptr; 676 pkg_obj_desc->package.elements[0] = original_object;
677
678 ACPI_DEBUG_PRINT((ACPI_DB_REPAIR,
679 "%s: Wrapped %s with expected Package object\n",
680 data->pathname,
681 acpi_ut_get_object_type_name(original_object)));
720 682
721 /* Return the new object in the object pointer */ 683 /* Return the new object in the object pointer */
722 684
723 *obj_desc_ptr = pkg_obj_desc; 685 *obj_desc_ptr = pkg_obj_desc;
724 data->flags |= ACPI_OBJECT_REPAIRED; 686 data->flags |= ACPI_OBJECT_REPAIRED | ACPI_OBJECT_WRAPPED;
725
726 ACPI_DEBUG_PRINT((ACPI_DB_REPAIR,
727 "%s: Repaired incorrectly formed Package\n",
728 data->pathname));
729
730 return (AE_OK); 687 return (AE_OK);
731} 688}
diff --git a/drivers/acpi/acpica/nsutils.c b/drivers/acpi/acpica/nsutils.c
index a535b7afda5c..75113759f69d 100644
--- a/drivers/acpi/acpica/nsutils.c
+++ b/drivers/acpi/acpica/nsutils.c
@@ -341,7 +341,7 @@ acpi_status acpi_ns_build_internal_name(struct acpi_namestring_info *info)
341 341
342 if (!acpi_ns_valid_path_separator(*external_name) && 342 if (!acpi_ns_valid_path_separator(*external_name) &&
343 (*external_name != 0)) { 343 (*external_name != 0)) {
344 return_ACPI_STATUS(AE_BAD_PARAMETER); 344 return_ACPI_STATUS(AE_BAD_PATHNAME);
345 } 345 }
346 346
347 /* Move on the next segment */ 347 /* Move on the next segment */
diff --git a/drivers/acpi/acpica/tbfadt.c b/drivers/acpi/acpica/tbfadt.c
index c5d870406f41..4c9c760db4a4 100644
--- a/drivers/acpi/acpica/tbfadt.c
+++ b/drivers/acpi/acpica/tbfadt.c
@@ -363,10 +363,6 @@ static void acpi_tb_convert_fadt(void)
363 u32 address32; 363 u32 address32;
364 u32 i; 364 u32 i;
365 365
366 /* Update the local FADT table header length */
367
368 acpi_gbl_FADT.header.length = sizeof(struct acpi_table_fadt);
369
370 /* 366 /*
371 * Expand the 32-bit FACS and DSDT addresses to 64-bit as necessary. 367 * Expand the 32-bit FACS and DSDT addresses to 64-bit as necessary.
372 * Later code will always use the X 64-bit field. Also, check for an 368 * Later code will always use the X 64-bit field. Also, check for an
@@ -408,6 +404,10 @@ static void acpi_tb_convert_fadt(void)
408 acpi_gbl_FADT.boot_flags = 0; 404 acpi_gbl_FADT.boot_flags = 0;
409 } 405 }
410 406
407 /* Update the local FADT table header length */
408
409 acpi_gbl_FADT.header.length = sizeof(struct acpi_table_fadt);
410
411 /* 411 /*
412 * Expand the ACPI 1.0 32-bit addresses to the ACPI 2.0 64-bit "X" 412 * Expand the ACPI 1.0 32-bit addresses to the ACPI 2.0 64-bit "X"
413 * generic address structures as necessary. Later code will always use 413 * generic address structures as necessary. Later code will always use
diff --git a/drivers/acpi/acpica/tbinstal.c b/drivers/acpi/acpica/tbinstal.c
index 1aecf7baa4e0..c03500b4cc7a 100644
--- a/drivers/acpi/acpica/tbinstal.c
+++ b/drivers/acpi/acpica/tbinstal.c
@@ -114,7 +114,6 @@ acpi_tb_add_table(struct acpi_table_desc *table_desc, u32 *table_index)
114{ 114{
115 u32 i; 115 u32 i;
116 acpi_status status = AE_OK; 116 acpi_status status = AE_OK;
117 struct acpi_table_header *override_table = NULL;
118 117
119 ACPI_FUNCTION_TRACE(tb_add_table); 118 ACPI_FUNCTION_TRACE(tb_add_table);
120 119
@@ -224,25 +223,10 @@ acpi_tb_add_table(struct acpi_table_desc *table_desc, u32 *table_index)
224 /* 223 /*
225 * ACPI Table Override: 224 * ACPI Table Override:
226 * Allow the host to override dynamically loaded tables. 225 * Allow the host to override dynamically loaded tables.
226 * NOTE: the table is fully mapped at this point, and the mapping will
227 * be deleted by tb_table_override if the table is actually overridden.
227 */ 228 */
228 status = acpi_os_table_override(table_desc->pointer, &override_table); 229 (void)acpi_tb_table_override(table_desc->pointer, table_desc);
229 if (ACPI_SUCCESS(status) && override_table) {
230 ACPI_INFO((AE_INFO,
231 "%4.4s @ 0x%p Table override, replaced with:",
232 table_desc->pointer->signature,
233 ACPI_CAST_PTR(void, table_desc->address)));
234
235 /* We can delete the table that was passed as a parameter */
236
237 acpi_tb_delete_table(table_desc);
238
239 /* Setup descriptor for the new table */
240
241 table_desc->address = ACPI_PTR_TO_PHYSADDR(override_table);
242 table_desc->pointer = override_table;
243 table_desc->length = override_table->length;
244 table_desc->flags = ACPI_TABLE_ORIGIN_OVERRIDE;
245 }
246 230
247 /* Add the table to the global root table list */ 231 /* Add the table to the global root table list */
248 232
@@ -263,6 +247,95 @@ acpi_tb_add_table(struct acpi_table_desc *table_desc, u32 *table_index)
263 247
264/******************************************************************************* 248/*******************************************************************************
265 * 249 *
250 * FUNCTION: acpi_tb_table_override
251 *
252 * PARAMETERS: table_header - Header for the original table
253 * table_desc - Table descriptor initialized for the
254 * original table. May or may not be mapped.
255 *
256 * RETURN: Pointer to the entire new table. NULL if table not overridden.
257 * If overridden, installs the new table within the input table
258 * descriptor.
259 *
260 * DESCRIPTION: Attempt table override by calling the OSL override functions.
261 * Note: If the table is overridden, then the entire new table
262 * is mapped and returned by this function.
263 *
264 ******************************************************************************/
265
266struct acpi_table_header *acpi_tb_table_override(struct acpi_table_header
267 *table_header,
268 struct acpi_table_desc
269 *table_desc)
270{
271 acpi_status status;
272 struct acpi_table_header *new_table = NULL;
273 acpi_physical_address new_address = 0;
274 u32 new_table_length = 0;
275 u8 new_flags;
276 char *override_type;
277
278 /* (1) Attempt logical override (returns a logical address) */
279
280 status = acpi_os_table_override(table_header, &new_table);
281 if (ACPI_SUCCESS(status) && new_table) {
282 new_address = ACPI_PTR_TO_PHYSADDR(new_table);
283 new_table_length = new_table->length;
284 new_flags = ACPI_TABLE_ORIGIN_OVERRIDE;
285 override_type = "Logical";
286 goto finish_override;
287 }
288
289 /* (2) Attempt physical override (returns a physical address) */
290
291 status = acpi_os_physical_table_override(table_header,
292 &new_address,
293 &new_table_length);
294 if (ACPI_SUCCESS(status) && new_address && new_table_length) {
295
296 /* Map the entire new table */
297
298 new_table = acpi_os_map_memory(new_address, new_table_length);
299 if (!new_table) {
300 ACPI_EXCEPTION((AE_INFO, AE_NO_MEMORY,
301 "%4.4s %p Attempted physical table override failed",
302 table_header->signature,
303 ACPI_CAST_PTR(void,
304 table_desc->address)));
305 return (NULL);
306 }
307
308 override_type = "Physical";
309 new_flags = ACPI_TABLE_ORIGIN_MAPPED;
310 goto finish_override;
311 }
312
313 return (NULL); /* There was no override */
314
315 finish_override:
316
317 ACPI_INFO((AE_INFO,
318 "%4.4s %p %s table override, new table: %p",
319 table_header->signature,
320 ACPI_CAST_PTR(void, table_desc->address),
321 override_type, new_table));
322
323 /* We can now unmap/delete the original table (if fully mapped) */
324
325 acpi_tb_delete_table(table_desc);
326
327 /* Setup descriptor for the new table */
328
329 table_desc->address = new_address;
330 table_desc->pointer = new_table;
331 table_desc->length = new_table_length;
332 table_desc->flags = new_flags;
333
334 return (new_table);
335}
336
337/*******************************************************************************
338 *
266 * FUNCTION: acpi_tb_resize_root_table_list 339 * FUNCTION: acpi_tb_resize_root_table_list
267 * 340 *
268 * PARAMETERS: None 341 * PARAMETERS: None
@@ -396,7 +469,11 @@ void acpi_tb_delete_table(struct acpi_table_desc *table_desc)
396 case ACPI_TABLE_ORIGIN_ALLOCATED: 469 case ACPI_TABLE_ORIGIN_ALLOCATED:
397 ACPI_FREE(table_desc->pointer); 470 ACPI_FREE(table_desc->pointer);
398 break; 471 break;
399 default:; 472
473 /* Not mapped or allocated, there is nothing we can do */
474
475 default:
476 return;
400 } 477 }
401 478
402 table_desc->pointer = NULL; 479 table_desc->pointer = NULL;
diff --git a/drivers/acpi/acpica/tbutils.c b/drivers/acpi/acpica/tbutils.c
index 09ca39e14337..0a706cac37de 100644
--- a/drivers/acpi/acpica/tbutils.c
+++ b/drivers/acpi/acpica/tbutils.c
@@ -118,6 +118,7 @@ acpi_tb_check_xsdt(acpi_physical_address address)
118 return AE_OK; 118 return AE_OK;
119} 119}
120 120
121#if (!ACPI_REDUCED_HARDWARE)
121/******************************************************************************* 122/*******************************************************************************
122 * 123 *
123 * FUNCTION: acpi_tb_initialize_facs 124 * FUNCTION: acpi_tb_initialize_facs
@@ -148,6 +149,7 @@ acpi_status acpi_tb_initialize_facs(void)
148 &acpi_gbl_FACS)); 149 &acpi_gbl_FACS));
149 return status; 150 return status;
150} 151}
152#endif /* !ACPI_REDUCED_HARDWARE */
151 153
152/******************************************************************************* 154/*******************************************************************************
153 * 155 *
@@ -444,7 +446,7 @@ struct acpi_table_header *acpi_tb_copy_dsdt(u32 table_index)
444 * RETURN: None 446 * RETURN: None
445 * 447 *
446 * DESCRIPTION: Install an ACPI table into the global data structure. The 448 * DESCRIPTION: Install an ACPI table into the global data structure. The
447 * table override mechanism is implemented here to allow the host 449 * table override mechanism is called to allow the host
448 * OS to replace any table before it is installed in the root 450 * OS to replace any table before it is installed in the root
449 * table array. 451 * table array.
450 * 452 *
@@ -454,11 +456,9 @@ void
454acpi_tb_install_table(acpi_physical_address address, 456acpi_tb_install_table(acpi_physical_address address,
455 char *signature, u32 table_index) 457 char *signature, u32 table_index)
456{ 458{
457 u8 flags; 459 struct acpi_table_header *table;
458 acpi_status status; 460 struct acpi_table_header *final_table;
459 struct acpi_table_header *table_to_install; 461 struct acpi_table_desc *table_desc;
460 struct acpi_table_header *mapped_table;
461 struct acpi_table_header *override_table = NULL;
462 462
463 if (!address) { 463 if (!address) {
464 ACPI_ERROR((AE_INFO, 464 ACPI_ERROR((AE_INFO,
@@ -469,69 +469,78 @@ acpi_tb_install_table(acpi_physical_address address,
469 469
470 /* Map just the table header */ 470 /* Map just the table header */
471 471
472 mapped_table = 472 table = acpi_os_map_memory(address, sizeof(struct acpi_table_header));
473 acpi_os_map_memory(address, sizeof(struct acpi_table_header)); 473 if (!table) {
474 if (!mapped_table) { 474 ACPI_ERROR((AE_INFO,
475 "Could not map memory for table [%s] at %p",
476 signature, ACPI_CAST_PTR(void, address)));
475 return; 477 return;
476 } 478 }
477 479
478 /* If a particular signature is expected (DSDT/FACS), it must match */ 480 /* If a particular signature is expected (DSDT/FACS), it must match */
479 481
480 if (signature && !ACPI_COMPARE_NAME(mapped_table->signature, signature)) { 482 if (signature && !ACPI_COMPARE_NAME(table->signature, signature)) {
481 ACPI_ERROR((AE_INFO, 483 ACPI_ERROR((AE_INFO,
482 "Invalid signature 0x%X for ACPI table, expected [%s]", 484 "Invalid signature 0x%X for ACPI table, expected [%s]",
483 *ACPI_CAST_PTR(u32, mapped_table->signature), 485 *ACPI_CAST_PTR(u32, table->signature), signature));
484 signature));
485 goto unmap_and_exit; 486 goto unmap_and_exit;
486 } 487 }
487 488
488 /* 489 /*
490 * Initialize the table entry. Set the pointer to NULL, since the
491 * table is not fully mapped at this time.
492 */
493 table_desc = &acpi_gbl_root_table_list.tables[table_index];
494
495 table_desc->address = address;
496 table_desc->pointer = NULL;
497 table_desc->length = table->length;
498 table_desc->flags = ACPI_TABLE_ORIGIN_MAPPED;
499 ACPI_MOVE_32_TO_32(table_desc->signature.ascii, table->signature);
500
501 /*
489 * ACPI Table Override: 502 * ACPI Table Override:
490 * 503 *
491 * Before we install the table, let the host OS override it with a new 504 * Before we install the table, let the host OS override it with a new
492 * one if desired. Any table within the RSDT/XSDT can be replaced, 505 * one if desired. Any table within the RSDT/XSDT can be replaced,
493 * including the DSDT which is pointed to by the FADT. 506 * including the DSDT which is pointed to by the FADT.
507 *
508 * NOTE: If the table is overridden, then final_table will contain a
509 * mapped pointer to the full new table. If the table is not overridden,
510 * or if there has been a physical override, then the table will be
511 * fully mapped later (in verify table). In any case, we must
512 * unmap the header that was mapped above.
494 */ 513 */
495 status = acpi_os_table_override(mapped_table, &override_table); 514 final_table = acpi_tb_table_override(table, table_desc);
496 if (ACPI_SUCCESS(status) && override_table) { 515 if (!final_table) {
497 ACPI_INFO((AE_INFO, 516 final_table = table; /* There was no override */
498 "%4.4s @ 0x%p Table override, replaced with:",
499 mapped_table->signature, ACPI_CAST_PTR(void,
500 address)));
501
502 acpi_gbl_root_table_list.tables[table_index].pointer =
503 override_table;
504 address = ACPI_PTR_TO_PHYSADDR(override_table);
505
506 table_to_install = override_table;
507 flags = ACPI_TABLE_ORIGIN_OVERRIDE;
508 } else {
509 table_to_install = mapped_table;
510 flags = ACPI_TABLE_ORIGIN_MAPPED;
511 } 517 }
512 518
513 /* Initialize the table entry */ 519 acpi_tb_print_table_header(table_desc->address, final_table);
514 520
515 acpi_gbl_root_table_list.tables[table_index].address = address; 521 /* Set the global integer width (based upon revision of the DSDT) */
516 acpi_gbl_root_table_list.tables[table_index].length =
517 table_to_install->length;
518 acpi_gbl_root_table_list.tables[table_index].flags = flags;
519
520 ACPI_MOVE_32_TO_32(&
521 (acpi_gbl_root_table_list.tables[table_index].
522 signature), table_to_install->signature);
523
524 acpi_tb_print_table_header(address, table_to_install);
525 522
526 if (table_index == ACPI_TABLE_INDEX_DSDT) { 523 if (table_index == ACPI_TABLE_INDEX_DSDT) {
524 acpi_ut_set_integer_width(final_table->revision);
525 }
527 526
528 /* Global integer width is based upon revision of the DSDT */ 527 /*
529 528 * If we have a physical override during this early loading of the ACPI
530 acpi_ut_set_integer_width(table_to_install->revision); 529 * tables, unmap the table for now. It will be mapped again later when
530 * it is actually used. This supports very early loading of ACPI tables,
531 * before virtual memory is fully initialized and running within the
532 * host OS. Note: A logical override has the ACPI_TABLE_ORIGIN_OVERRIDE
533 * flag set and will not be deleted below.
534 */
535 if (final_table != table) {
536 acpi_tb_delete_table(table_desc);
531 } 537 }
532 538
533 unmap_and_exit: 539 unmap_and_exit:
534 acpi_os_unmap_memory(mapped_table, sizeof(struct acpi_table_header)); 540
541 /* Always unmap the table header that we mapped above */
542
543 acpi_os_unmap_memory(table, sizeof(struct acpi_table_header));
535} 544}
536 545
537/******************************************************************************* 546/*******************************************************************************
diff --git a/drivers/acpi/acpica/utdecode.c b/drivers/acpi/acpica/utdecode.c
index d42ede5260c7..684849949bf3 100644
--- a/drivers/acpi/acpica/utdecode.c
+++ b/drivers/acpi/acpica/utdecode.c
@@ -497,19 +497,20 @@ char *acpi_ut_get_mutex_name(u32 mutex_id)
497 497
498/* Names for Notify() values, used for debug output */ 498/* Names for Notify() values, used for debug output */
499 499
500static const char *acpi_gbl_notify_value_names[] = { 500static const char *acpi_gbl_notify_value_names[ACPI_NOTIFY_MAX + 1] = {
501 "Bus Check", 501 /* 00 */ "Bus Check",
502 "Device Check", 502 /* 01 */ "Device Check",
503 "Device Wake", 503 /* 02 */ "Device Wake",
504 "Eject Request", 504 /* 03 */ "Eject Request",
505 "Device Check Light", 505 /* 04 */ "Device Check Light",
506 "Frequency Mismatch", 506 /* 05 */ "Frequency Mismatch",
507 "Bus Mode Mismatch", 507 /* 06 */ "Bus Mode Mismatch",
508 "Power Fault", 508 /* 07 */ "Power Fault",
509 "Capabilities Check", 509 /* 08 */ "Capabilities Check",
510 "Device PLD Check", 510 /* 09 */ "Device PLD Check",
511 "Reserved", 511 /* 10 */ "Reserved",
512 "System Locality Update" 512 /* 11 */ "System Locality Update",
513 /* 12 */ "Shutdown Request"
513}; 514};
514 515
515const char *acpi_ut_get_notify_name(u32 notify_value) 516const char *acpi_ut_get_notify_name(u32 notify_value)
@@ -519,9 +520,10 @@ const char *acpi_ut_get_notify_name(u32 notify_value)
519 return (acpi_gbl_notify_value_names[notify_value]); 520 return (acpi_gbl_notify_value_names[notify_value]);
520 } else if (notify_value <= ACPI_MAX_SYS_NOTIFY) { 521 } else if (notify_value <= ACPI_MAX_SYS_NOTIFY) {
521 return ("Reserved"); 522 return ("Reserved");
522 } else { /* Greater or equal to 0x80 */ 523 } else if (notify_value <= ACPI_MAX_DEVICE_SPECIFIC_NOTIFY) {
523 524 return ("Device Specific");
524 return ("**Device Specific**"); 525 } else {
526 return ("Hardware Specific");
525 } 527 }
526} 528}
527#endif 529#endif
diff --git a/drivers/acpi/acpica/utglobal.c b/drivers/acpi/acpica/utglobal.c
index 4153584cf526..90f53b42eca9 100644
--- a/drivers/acpi/acpica/utglobal.c
+++ b/drivers/acpi/acpica/utglobal.c
@@ -140,6 +140,7 @@ const struct acpi_predefined_names acpi_gbl_pre_defined_names[] = {
140 {NULL, ACPI_TYPE_ANY, NULL} 140 {NULL, ACPI_TYPE_ANY, NULL}
141}; 141};
142 142
143#if (!ACPI_REDUCED_HARDWARE)
143/****************************************************************************** 144/******************************************************************************
144 * 145 *
145 * Event and Hardware globals 146 * Event and Hardware globals
@@ -236,6 +237,7 @@ struct acpi_fixed_event_info acpi_gbl_fixed_event_info[ACPI_NUM_FIXED_EVENTS] =
236 ACPI_BITMASK_RT_CLOCK_STATUS, 237 ACPI_BITMASK_RT_CLOCK_STATUS,
237 ACPI_BITMASK_RT_CLOCK_ENABLE}, 238 ACPI_BITMASK_RT_CLOCK_ENABLE},
238}; 239};
240#endif /* !ACPI_REDUCED_HARDWARE */
239 241
240/******************************************************************************* 242/*******************************************************************************
241 * 243 *
@@ -286,6 +288,8 @@ acpi_status acpi_ut_init_globals(void)
286 288
287 acpi_gbl_owner_id_mask[ACPI_NUM_OWNERID_MASKS - 1] = 0x80000000; 289 acpi_gbl_owner_id_mask[ACPI_NUM_OWNERID_MASKS - 1] = 0x80000000;
288 290
291#if (!ACPI_REDUCED_HARDWARE)
292
289 /* GPE support */ 293 /* GPE support */
290 294
291 acpi_gbl_gpe_xrupt_list_head = NULL; 295 acpi_gbl_gpe_xrupt_list_head = NULL;
@@ -294,6 +298,10 @@ acpi_status acpi_ut_init_globals(void)
294 acpi_current_gpe_count = 0; 298 acpi_current_gpe_count = 0;
295 acpi_gbl_all_gpes_initialized = FALSE; 299 acpi_gbl_all_gpes_initialized = FALSE;
296 300
301 acpi_gbl_global_event_handler = NULL;
302
303#endif /* !ACPI_REDUCED_HARDWARE */
304
297 /* Global handlers */ 305 /* Global handlers */
298 306
299 acpi_gbl_system_notify.handler = NULL; 307 acpi_gbl_system_notify.handler = NULL;
@@ -302,7 +310,6 @@ acpi_status acpi_ut_init_globals(void)
302 acpi_gbl_init_handler = NULL; 310 acpi_gbl_init_handler = NULL;
303 acpi_gbl_table_handler = NULL; 311 acpi_gbl_table_handler = NULL;
304 acpi_gbl_interface_handler = NULL; 312 acpi_gbl_interface_handler = NULL;
305 acpi_gbl_global_event_handler = NULL;
306 313
307 /* Global Lock support */ 314 /* Global Lock support */
308 315
diff --git a/drivers/acpi/acpica/utinit.c b/drivers/acpi/acpica/utinit.c
index 8359c0c5dc98..246798e4c938 100644
--- a/drivers/acpi/acpica/utinit.c
+++ b/drivers/acpi/acpica/utinit.c
@@ -53,27 +53,35 @@ ACPI_MODULE_NAME("utinit")
53/* Local prototypes */ 53/* Local prototypes */
54static void acpi_ut_terminate(void); 54static void acpi_ut_terminate(void);
55 55
56#if (!ACPI_REDUCED_HARDWARE)
57
58static void acpi_ut_free_gpe_lists(void);
59
60#else
61
62#define acpi_ut_free_gpe_lists()
63#endif /* !ACPI_REDUCED_HARDWARE */
64
65#if (!ACPI_REDUCED_HARDWARE)
56/****************************************************************************** 66/******************************************************************************
57 * 67 *
58 * FUNCTION: acpi_ut_terminate 68 * FUNCTION: acpi_ut_free_gpe_lists
59 * 69 *
60 * PARAMETERS: none 70 * PARAMETERS: none
61 * 71 *
62 * RETURN: none 72 * RETURN: none
63 * 73 *
64 * DESCRIPTION: Free global memory 74 * DESCRIPTION: Free global GPE lists
65 * 75 *
66 ******************************************************************************/ 76 ******************************************************************************/
67 77
68static void acpi_ut_terminate(void) 78static void acpi_ut_free_gpe_lists(void)
69{ 79{
70 struct acpi_gpe_block_info *gpe_block; 80 struct acpi_gpe_block_info *gpe_block;
71 struct acpi_gpe_block_info *next_gpe_block; 81 struct acpi_gpe_block_info *next_gpe_block;
72 struct acpi_gpe_xrupt_info *gpe_xrupt_info; 82 struct acpi_gpe_xrupt_info *gpe_xrupt_info;
73 struct acpi_gpe_xrupt_info *next_gpe_xrupt_info; 83 struct acpi_gpe_xrupt_info *next_gpe_xrupt_info;
74 84
75 ACPI_FUNCTION_TRACE(ut_terminate);
76
77 /* Free global GPE blocks and related info structures */ 85 /* Free global GPE blocks and related info structures */
78 86
79 gpe_xrupt_info = acpi_gbl_gpe_xrupt_list_head; 87 gpe_xrupt_info = acpi_gbl_gpe_xrupt_list_head;
@@ -91,7 +99,26 @@ static void acpi_ut_terminate(void)
91 ACPI_FREE(gpe_xrupt_info); 99 ACPI_FREE(gpe_xrupt_info);
92 gpe_xrupt_info = next_gpe_xrupt_info; 100 gpe_xrupt_info = next_gpe_xrupt_info;
93 } 101 }
102}
103#endif /* !ACPI_REDUCED_HARDWARE */
104
105/******************************************************************************
106 *
107 * FUNCTION: acpi_ut_terminate
108 *
109 * PARAMETERS: none
110 *
111 * RETURN: none
112 *
113 * DESCRIPTION: Free global memory
114 *
115 ******************************************************************************/
116
117static void acpi_ut_terminate(void)
118{
119 ACPI_FUNCTION_TRACE(ut_terminate);
94 120
121 acpi_ut_free_gpe_lists();
95 acpi_ut_delete_address_lists(); 122 acpi_ut_delete_address_lists();
96 return_VOID; 123 return_VOID;
97} 124}
diff --git a/drivers/acpi/acpica/utxface.c b/drivers/acpi/acpica/utxface.c
index 644e8c8ebc4b..afa94f51ff0b 100644
--- a/drivers/acpi/acpica/utxface.c
+++ b/drivers/acpi/acpica/utxface.c
@@ -145,6 +145,8 @@ acpi_status acpi_enable_subsystem(u32 flags)
145 145
146 ACPI_FUNCTION_TRACE(acpi_enable_subsystem); 146 ACPI_FUNCTION_TRACE(acpi_enable_subsystem);
147 147
148#if (!ACPI_REDUCED_HARDWARE)
149
148 /* Enable ACPI mode */ 150 /* Enable ACPI mode */
149 151
150 if (!(flags & ACPI_NO_ACPI_ENABLE)) { 152 if (!(flags & ACPI_NO_ACPI_ENABLE)) {
@@ -169,6 +171,7 @@ acpi_status acpi_enable_subsystem(u32 flags)
169 ACPI_WARNING((AE_INFO, "Could not map the FACS table")); 171 ACPI_WARNING((AE_INFO, "Could not map the FACS table"));
170 return_ACPI_STATUS(status); 172 return_ACPI_STATUS(status);
171 } 173 }
174#endif /* !ACPI_REDUCED_HARDWARE */
172 175
173 /* 176 /*
174 * Install the default op_region handlers. These are installed unless 177 * Install the default op_region handlers. These are installed unless
@@ -184,7 +187,7 @@ acpi_status acpi_enable_subsystem(u32 flags)
184 return_ACPI_STATUS(status); 187 return_ACPI_STATUS(status);
185 } 188 }
186 } 189 }
187 190#if (!ACPI_REDUCED_HARDWARE)
188 /* 191 /*
189 * Initialize ACPI Event handling (Fixed and General Purpose) 192 * Initialize ACPI Event handling (Fixed and General Purpose)
190 * 193 *
@@ -220,6 +223,7 @@ acpi_status acpi_enable_subsystem(u32 flags)
220 return_ACPI_STATUS(status); 223 return_ACPI_STATUS(status);
221 } 224 }
222 } 225 }
226#endif /* !ACPI_REDUCED_HARDWARE */
223 227
224 return_ACPI_STATUS(status); 228 return_ACPI_STATUS(status);
225} 229}
diff --git a/drivers/acpi/apei/apei-base.c b/drivers/acpi/apei/apei-base.c
index e5d53b7ddc7e..5577762daee1 100644
--- a/drivers/acpi/apei/apei-base.c
+++ b/drivers/acpi/apei/apei-base.c
@@ -558,33 +558,48 @@ void apei_resources_release(struct apei_resources *resources)
558} 558}
559EXPORT_SYMBOL_GPL(apei_resources_release); 559EXPORT_SYMBOL_GPL(apei_resources_release);
560 560
561static int apei_check_gar(struct acpi_generic_address *reg, u64 *paddr) 561static int apei_check_gar(struct acpi_generic_address *reg, u64 *paddr,
562 u32 *access_bit_width)
562{ 563{
563 u32 width, space_id; 564 u32 bit_width, bit_offset, access_size_code, space_id;
564 565
565 width = reg->bit_width; 566 bit_width = reg->bit_width;
567 bit_offset = reg->bit_offset;
568 access_size_code = reg->access_width;
566 space_id = reg->space_id; 569 space_id = reg->space_id;
567 /* Handle possible alignment issues */ 570 /* Handle possible alignment issues */
568 memcpy(paddr, &reg->address, sizeof(*paddr)); 571 memcpy(paddr, &reg->address, sizeof(*paddr));
569 if (!*paddr) { 572 if (!*paddr) {
570 pr_warning(FW_BUG APEI_PFX 573 pr_warning(FW_BUG APEI_PFX
571 "Invalid physical address in GAR [0x%llx/%u/%u]\n", 574 "Invalid physical address in GAR [0x%llx/%u/%u/%u/%u]\n",
572 *paddr, width, space_id); 575 *paddr, bit_width, bit_offset, access_size_code,
576 space_id);
573 return -EINVAL; 577 return -EINVAL;
574 } 578 }
575 579
576 if ((width != 8) && (width != 16) && (width != 32) && (width != 64)) { 580 if (access_size_code < 1 || access_size_code > 4) {
577 pr_warning(FW_BUG APEI_PFX 581 pr_warning(FW_BUG APEI_PFX
578 "Invalid bit width in GAR [0x%llx/%u/%u]\n", 582 "Invalid access size code in GAR [0x%llx/%u/%u/%u/%u]\n",
579 *paddr, width, space_id); 583 *paddr, bit_width, bit_offset, access_size_code,
584 space_id);
585 return -EINVAL;
586 }
587 *access_bit_width = 1UL << (access_size_code + 2);
588
589 if ((bit_width + bit_offset) > *access_bit_width) {
590 pr_warning(FW_BUG APEI_PFX
591 "Invalid bit width + offset in GAR [0x%llx/%u/%u/%u/%u]\n",
592 *paddr, bit_width, bit_offset, access_size_code,
593 space_id);
580 return -EINVAL; 594 return -EINVAL;
581 } 595 }
582 596
583 if (space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY && 597 if (space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY &&
584 space_id != ACPI_ADR_SPACE_SYSTEM_IO) { 598 space_id != ACPI_ADR_SPACE_SYSTEM_IO) {
585 pr_warning(FW_BUG APEI_PFX 599 pr_warning(FW_BUG APEI_PFX
586 "Invalid address space type in GAR [0x%llx/%u/%u]\n", 600 "Invalid address space type in GAR [0x%llx/%u/%u/%u/%u]\n",
587 *paddr, width, space_id); 601 *paddr, bit_width, bit_offset, access_size_code,
602 space_id);
588 return -EINVAL; 603 return -EINVAL;
589 } 604 }
590 605
@@ -595,23 +610,25 @@ static int apei_check_gar(struct acpi_generic_address *reg, u64 *paddr)
595int apei_read(u64 *val, struct acpi_generic_address *reg) 610int apei_read(u64 *val, struct acpi_generic_address *reg)
596{ 611{
597 int rc; 612 int rc;
613 u32 access_bit_width;
598 u64 address; 614 u64 address;
599 acpi_status status; 615 acpi_status status;
600 616
601 rc = apei_check_gar(reg, &address); 617 rc = apei_check_gar(reg, &address, &access_bit_width);
602 if (rc) 618 if (rc)
603 return rc; 619 return rc;
604 620
605 *val = 0; 621 *val = 0;
606 switch(reg->space_id) { 622 switch(reg->space_id) {
607 case ACPI_ADR_SPACE_SYSTEM_MEMORY: 623 case ACPI_ADR_SPACE_SYSTEM_MEMORY:
608 status = acpi_os_read_memory64((acpi_physical_address) 624 status = acpi_os_read_memory((acpi_physical_address) address,
609 address, val, reg->bit_width); 625 val, access_bit_width);
610 if (ACPI_FAILURE(status)) 626 if (ACPI_FAILURE(status))
611 return -EIO; 627 return -EIO;
612 break; 628 break;
613 case ACPI_ADR_SPACE_SYSTEM_IO: 629 case ACPI_ADR_SPACE_SYSTEM_IO:
614 status = acpi_os_read_port(address, (u32 *)val, reg->bit_width); 630 status = acpi_os_read_port(address, (u32 *)val,
631 access_bit_width);
615 if (ACPI_FAILURE(status)) 632 if (ACPI_FAILURE(status))
616 return -EIO; 633 return -EIO;
617 break; 634 break;
@@ -627,22 +644,23 @@ EXPORT_SYMBOL_GPL(apei_read);
627int apei_write(u64 val, struct acpi_generic_address *reg) 644int apei_write(u64 val, struct acpi_generic_address *reg)
628{ 645{
629 int rc; 646 int rc;
647 u32 access_bit_width;
630 u64 address; 648 u64 address;
631 acpi_status status; 649 acpi_status status;
632 650
633 rc = apei_check_gar(reg, &address); 651 rc = apei_check_gar(reg, &address, &access_bit_width);
634 if (rc) 652 if (rc)
635 return rc; 653 return rc;
636 654
637 switch (reg->space_id) { 655 switch (reg->space_id) {
638 case ACPI_ADR_SPACE_SYSTEM_MEMORY: 656 case ACPI_ADR_SPACE_SYSTEM_MEMORY:
639 status = acpi_os_write_memory64((acpi_physical_address) 657 status = acpi_os_write_memory((acpi_physical_address) address,
640 address, val, reg->bit_width); 658 val, access_bit_width);
641 if (ACPI_FAILURE(status)) 659 if (ACPI_FAILURE(status))
642 return -EIO; 660 return -EIO;
643 break; 661 break;
644 case ACPI_ADR_SPACE_SYSTEM_IO: 662 case ACPI_ADR_SPACE_SYSTEM_IO:
645 status = acpi_os_write_port(address, val, reg->bit_width); 663 status = acpi_os_write_port(address, val, access_bit_width);
646 if (ACPI_FAILURE(status)) 664 if (ACPI_FAILURE(status))
647 return -EIO; 665 return -EIO;
648 break; 666 break;
@@ -661,23 +679,24 @@ static int collect_res_callback(struct apei_exec_context *ctx,
661 struct apei_resources *resources = data; 679 struct apei_resources *resources = data;
662 struct acpi_generic_address *reg = &entry->register_region; 680 struct acpi_generic_address *reg = &entry->register_region;
663 u8 ins = entry->instruction; 681 u8 ins = entry->instruction;
682 u32 access_bit_width;
664 u64 paddr; 683 u64 paddr;
665 int rc; 684 int rc;
666 685
667 if (!(ctx->ins_table[ins].flags & APEI_EXEC_INS_ACCESS_REGISTER)) 686 if (!(ctx->ins_table[ins].flags & APEI_EXEC_INS_ACCESS_REGISTER))
668 return 0; 687 return 0;
669 688
670 rc = apei_check_gar(reg, &paddr); 689 rc = apei_check_gar(reg, &paddr, &access_bit_width);
671 if (rc) 690 if (rc)
672 return rc; 691 return rc;
673 692
674 switch (reg->space_id) { 693 switch (reg->space_id) {
675 case ACPI_ADR_SPACE_SYSTEM_MEMORY: 694 case ACPI_ADR_SPACE_SYSTEM_MEMORY:
676 return apei_res_add(&resources->iomem, paddr, 695 return apei_res_add(&resources->iomem, paddr,
677 reg->bit_width / 8); 696 access_bit_width / 8);
678 case ACPI_ADR_SPACE_SYSTEM_IO: 697 case ACPI_ADR_SPACE_SYSTEM_IO:
679 return apei_res_add(&resources->ioport, paddr, 698 return apei_res_add(&resources->ioport, paddr,
680 reg->bit_width / 8); 699 access_bit_width / 8);
681 default: 700 default:
682 return -EINVAL; 701 return -EINVAL;
683 } 702 }
diff --git a/drivers/acpi/apei/cper.c b/drivers/acpi/apei/cper.c
index 5d4189464d63..e6defd86b424 100644
--- a/drivers/acpi/apei/cper.c
+++ b/drivers/acpi/apei/cper.c
@@ -362,6 +362,7 @@ void apei_estatus_print(const char *pfx,
362 gedata_len = gdata->error_data_length; 362 gedata_len = gdata->error_data_length;
363 apei_estatus_print_section(pfx, gdata, sec_no); 363 apei_estatus_print_section(pfx, gdata, sec_no);
364 data_len -= gedata_len + sizeof(*gdata); 364 data_len -= gedata_len + sizeof(*gdata);
365 gdata = (void *)(gdata + 1) + gedata_len;
365 sec_no++; 366 sec_no++;
366 } 367 }
367} 368}
@@ -396,6 +397,7 @@ int apei_estatus_check(const struct acpi_hest_generic_status *estatus)
396 if (gedata_len > data_len - sizeof(*gdata)) 397 if (gedata_len > data_len - sizeof(*gdata))
397 return -EINVAL; 398 return -EINVAL;
398 data_len -= gedata_len + sizeof(*gdata); 399 data_len -= gedata_len + sizeof(*gdata);
400 gdata = (void *)(gdata + 1) + gedata_len;
399 } 401 }
400 if (data_len) 402 if (data_len)
401 return -EINVAL; 403 return -EINVAL;
diff --git a/drivers/acpi/apei/einj.c b/drivers/acpi/apei/einj.c
index 4ca087dd5f4f..8e1793649ec0 100644
--- a/drivers/acpi/apei/einj.c
+++ b/drivers/acpi/apei/einj.c
@@ -74,6 +74,8 @@ struct vendor_error_type_extension {
74 u8 reserved[3]; 74 u8 reserved[3];
75}; 75};
76 76
77static u32 notrigger;
78
77static u32 vendor_flags; 79static u32 vendor_flags;
78static struct debugfs_blob_wrapper vendor_blob; 80static struct debugfs_blob_wrapper vendor_blob;
79static char vendor_dev[64]; 81static char vendor_dev[64];
@@ -238,7 +240,7 @@ static void *einj_get_parameter_address(void)
238 return v5param; 240 return v5param;
239 } 241 }
240 } 242 }
241 if (paddrv4) { 243 if (param_extension && paddrv4) {
242 struct einj_parameter *v4param; 244 struct einj_parameter *v4param;
243 245
244 v4param = acpi_os_map_memory(paddrv4, sizeof(*v4param)); 246 v4param = acpi_os_map_memory(paddrv4, sizeof(*v4param));
@@ -496,9 +498,11 @@ static int __einj_error_inject(u32 type, u64 param1, u64 param2)
496 if (rc) 498 if (rc)
497 return rc; 499 return rc;
498 trigger_paddr = apei_exec_ctx_get_output(&ctx); 500 trigger_paddr = apei_exec_ctx_get_output(&ctx);
499 rc = __einj_error_trigger(trigger_paddr, type, param1, param2); 501 if (notrigger == 0) {
500 if (rc) 502 rc = __einj_error_trigger(trigger_paddr, type, param1, param2);
501 return rc; 503 if (rc)
504 return rc;
505 }
502 rc = apei_exec_run_optional(&ctx, ACPI_EINJ_END_OPERATION); 506 rc = apei_exec_run_optional(&ctx, ACPI_EINJ_END_OPERATION);
503 507
504 return rc; 508 return rc;
@@ -700,6 +704,11 @@ static int __init einj_init(void)
700 einj_debug_dir, &error_param2); 704 einj_debug_dir, &error_param2);
701 if (!fentry) 705 if (!fentry)
702 goto err_unmap; 706 goto err_unmap;
707
708 fentry = debugfs_create_x32("notrigger", S_IRUSR | S_IWUSR,
709 einj_debug_dir, &notrigger);
710 if (!fentry)
711 goto err_unmap;
703 } 712 }
704 713
705 if (vendor_dev[0]) { 714 if (vendor_dev[0]) {
diff --git a/drivers/acpi/apei/erst.c b/drivers/acpi/apei/erst.c
index eb9fab5b96e4..e4d9d24eb73d 100644
--- a/drivers/acpi/apei/erst.c
+++ b/drivers/acpi/apei/erst.c
@@ -917,7 +917,7 @@ static int erst_check_table(struct acpi_table_erst *erst_tab)
917{ 917{
918 if ((erst_tab->header_length != 918 if ((erst_tab->header_length !=
919 (sizeof(struct acpi_table_erst) - sizeof(erst_tab->header))) 919 (sizeof(struct acpi_table_erst) - sizeof(erst_tab->header)))
920 && (erst_tab->header_length != sizeof(struct acpi_table_einj))) 920 && (erst_tab->header_length != sizeof(struct acpi_table_erst)))
921 return -EINVAL; 921 return -EINVAL;
922 if (erst_tab->header.length < sizeof(struct acpi_table_erst)) 922 if (erst_tab->header.length < sizeof(struct acpi_table_erst))
923 return -EINVAL; 923 return -EINVAL;
diff --git a/drivers/acpi/bgrt.c b/drivers/acpi/bgrt.c
new file mode 100644
index 000000000000..8cf6c46e99fb
--- /dev/null
+++ b/drivers/acpi/bgrt.c
@@ -0,0 +1,175 @@
1/*
2 * Copyright 2012 Red Hat, Inc <mjg@redhat.com>
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9#include <linux/kernel.h>
10#include <linux/module.h>
11#include <linux/init.h>
12#include <linux/device.h>
13#include <linux/sysfs.h>
14#include <acpi/acpi.h>
15#include <acpi/acpi_bus.h>
16
17static struct acpi_table_bgrt *bgrt_tab;
18static struct kobject *bgrt_kobj;
19
20struct bmp_header {
21 u16 id;
22 u32 size;
23} __attribute ((packed));
24
25static struct bmp_header bmp_header;
26
27static ssize_t show_version(struct device *dev,
28 struct device_attribute *attr, char *buf)
29{
30 return snprintf(buf, PAGE_SIZE, "%d\n", bgrt_tab->version);
31}
32static DEVICE_ATTR(version, S_IRUGO, show_version, NULL);
33
34static ssize_t show_status(struct device *dev,
35 struct device_attribute *attr, char *buf)
36{
37 return snprintf(buf, PAGE_SIZE, "%d\n", bgrt_tab->status);
38}
39static DEVICE_ATTR(status, S_IRUGO, show_status, NULL);
40
41static ssize_t show_type(struct device *dev,
42 struct device_attribute *attr, char *buf)
43{
44 return snprintf(buf, PAGE_SIZE, "%d\n", bgrt_tab->image_type);
45}
46static DEVICE_ATTR(type, S_IRUGO, show_type, NULL);
47
48static ssize_t show_xoffset(struct device *dev,
49 struct device_attribute *attr, char *buf)
50{
51 return snprintf(buf, PAGE_SIZE, "%d\n", bgrt_tab->image_offset_x);
52}
53static DEVICE_ATTR(xoffset, S_IRUGO, show_xoffset, NULL);
54
55static ssize_t show_yoffset(struct device *dev,
56 struct device_attribute *attr, char *buf)
57{
58 return snprintf(buf, PAGE_SIZE, "%d\n", bgrt_tab->image_offset_y);
59}
60static DEVICE_ATTR(yoffset, S_IRUGO, show_yoffset, NULL);
61
62static ssize_t show_image(struct file *file, struct kobject *kobj,
63 struct bin_attribute *attr, char *buf, loff_t off, size_t count)
64{
65 int size = attr->size;
66 void __iomem *image = attr->private;
67
68 if (off >= size) {
69 count = 0;
70 } else {
71 if (off + count > size)
72 count = size - off;
73
74 memcpy_fromio(buf, image+off, count);
75 }
76
77 return count;
78}
79
80static struct bin_attribute image_attr = {
81 .attr = {
82 .name = "image",
83 .mode = S_IRUGO,
84 },
85 .read = show_image,
86};
87
88static struct attribute *bgrt_attributes[] = {
89 &dev_attr_version.attr,
90 &dev_attr_status.attr,
91 &dev_attr_type.attr,
92 &dev_attr_xoffset.attr,
93 &dev_attr_yoffset.attr,
94 NULL,
95};
96
97static struct attribute_group bgrt_attribute_group = {
98 .attrs = bgrt_attributes,
99};
100
101static int __init bgrt_init(void)
102{
103 acpi_status status;
104 int ret;
105 void __iomem *bgrt;
106
107 if (acpi_disabled)
108 return -ENODEV;
109
110 status = acpi_get_table("BGRT", 0,
111 (struct acpi_table_header **)&bgrt_tab);
112
113 if (ACPI_FAILURE(status))
114 return -ENODEV;
115
116 sysfs_bin_attr_init(&image_attr);
117
118 bgrt = ioremap(bgrt_tab->image_address, sizeof(struct bmp_header));
119
120 if (!bgrt) {
121 ret = -EINVAL;
122 goto out_err;
123 }
124
125 memcpy_fromio(&bmp_header, bgrt, sizeof(bmp_header));
126 image_attr.size = bmp_header.size;
127 iounmap(bgrt);
128
129 image_attr.private = ioremap(bgrt_tab->image_address, image_attr.size);
130
131 if (!image_attr.private) {
132 ret = -EINVAL;
133 goto out_err;
134 }
135
136
137 bgrt_kobj = kobject_create_and_add("bgrt", acpi_kobj);
138 if (!bgrt_kobj) {
139 ret = -EINVAL;
140 goto out_iounmap;
141 }
142
143 ret = sysfs_create_group(bgrt_kobj, &bgrt_attribute_group);
144 if (ret)
145 goto out_kobject;
146
147 ret = sysfs_create_bin_file(bgrt_kobj, &image_attr);
148 if (ret)
149 goto out_group;
150
151 return 0;
152
153out_group:
154 sysfs_remove_group(bgrt_kobj, &bgrt_attribute_group);
155out_kobject:
156 kobject_put(bgrt_kobj);
157out_iounmap:
158 iounmap(image_attr.private);
159out_err:
160 return ret;
161}
162
163static void __exit bgrt_exit(void)
164{
165 iounmap(image_attr.private);
166 sysfs_remove_group(bgrt_kobj, &bgrt_attribute_group);
167 sysfs_remove_bin_file(bgrt_kobj, &image_attr);
168}
169
170module_init(bgrt_init);
171module_exit(bgrt_exit);
172
173MODULE_AUTHOR("Matthew Garrett");
174MODULE_DESCRIPTION("BGRT boot graphic support");
175MODULE_LICENSE("GPL");
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index 9ecec98bc76e..3263b68cdfa3 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -1010,6 +1010,7 @@ static int __init acpi_bus_init(void)
1010} 1010}
1011 1011
1012struct kobject *acpi_kobj; 1012struct kobject *acpi_kobj;
1013EXPORT_SYMBOL_GPL(acpi_kobj);
1013 1014
1014static int __init acpi_init(void) 1015static int __init acpi_init(void)
1015{ 1016{
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index e37615f310d7..7edaccce6640 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -822,10 +822,10 @@ static int acpi_ec_add(struct acpi_device *device)
822 first_ec = ec; 822 first_ec = ec;
823 device->driver_data = ec; 823 device->driver_data = ec;
824 824
825 WARN(!request_region(ec->data_addr, 1, "EC data"), 825 ret = !!request_region(ec->data_addr, 1, "EC data");
826 "Could not request EC data io port 0x%lx", ec->data_addr); 826 WARN(!ret, "Could not request EC data io port 0x%lx", ec->data_addr);
827 WARN(!request_region(ec->command_addr, 1, "EC cmd"), 827 ret = !!request_region(ec->command_addr, 1, "EC cmd");
828 "Could not request EC cmd io port 0x%lx", ec->command_addr); 828 WARN(!ret, "Could not request EC cmd io port 0x%lx", ec->command_addr);
829 829
830 pr_info(PREFIX "GPE = 0x%lx, I/O: command/status = 0x%lx, data = 0x%lx\n", 830 pr_info(PREFIX "GPE = 0x%lx, I/O: command/status = 0x%lx, data = 0x%lx\n",
831 ec->gpe, ec->command_addr, ec->data_addr); 831 ec->gpe, ec->command_addr, ec->data_addr);
diff --git a/drivers/acpi/nvs.c b/drivers/acpi/nvs.c
index 7a2035fa8c71..266bc58ce0ce 100644
--- a/drivers/acpi/nvs.c
+++ b/drivers/acpi/nvs.c
@@ -95,8 +95,8 @@ static int suspend_nvs_register(unsigned long start, unsigned long size)
95{ 95{
96 struct nvs_page *entry, *next; 96 struct nvs_page *entry, *next;
97 97
98 pr_info("PM: Registering ACPI NVS region at %lx (%ld bytes)\n", 98 pr_info("PM: Registering ACPI NVS region [mem %#010lx-%#010lx] (%ld bytes)\n",
99 start, size); 99 start, start + size - 1, size);
100 100
101 while (size > 0) { 101 while (size > 0) {
102 unsigned int nr_bytes; 102 unsigned int nr_bytes;
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
index 412a1e04a922..ba14fb93c929 100644
--- a/drivers/acpi/osl.c
+++ b/drivers/acpi/osl.c
@@ -77,6 +77,9 @@ EXPORT_SYMBOL(acpi_in_debugger);
77extern char line_buf[80]; 77extern char line_buf[80];
78#endif /*ENABLE_DEBUGGER */ 78#endif /*ENABLE_DEBUGGER */
79 79
80static int (*__acpi_os_prepare_sleep)(u8 sleep_state, u32 pm1a_ctrl,
81 u32 pm1b_ctrl);
82
80static acpi_osd_handler acpi_irq_handler; 83static acpi_osd_handler acpi_irq_handler;
81static void *acpi_irq_context; 84static void *acpi_irq_context;
82static struct workqueue_struct *kacpid_wq; 85static struct workqueue_struct *kacpid_wq;
@@ -347,7 +350,7 @@ static void acpi_unmap(acpi_physical_address pg_off, void __iomem *vaddr)
347 unsigned long pfn; 350 unsigned long pfn;
348 351
349 pfn = pg_off >> PAGE_SHIFT; 352 pfn = pg_off >> PAGE_SHIFT;
350 if (page_is_ram(pfn)) 353 if (should_use_kmap(pfn))
351 kunmap(pfn_to_page(pfn)); 354 kunmap(pfn_to_page(pfn));
352 else 355 else
353 iounmap(vaddr); 356 iounmap(vaddr);
@@ -554,6 +557,15 @@ acpi_os_table_override(struct acpi_table_header * existing_table,
554 return AE_OK; 557 return AE_OK;
555} 558}
556 559
560acpi_status
561acpi_os_physical_table_override(struct acpi_table_header *existing_table,
562 acpi_physical_address * new_address,
563 u32 *new_table_length)
564{
565 return AE_SUPPORT;
566}
567
568
557static irqreturn_t acpi_irq(int irq, void *dev_id) 569static irqreturn_t acpi_irq(int irq, void *dev_id)
558{ 570{
559 u32 handled; 571 u32 handled;
@@ -595,7 +607,8 @@ acpi_os_install_interrupt_handler(u32 gsi, acpi_osd_handler handler,
595 607
596 acpi_irq_handler = handler; 608 acpi_irq_handler = handler;
597 acpi_irq_context = context; 609 acpi_irq_context = context;
598 if (request_irq(irq, acpi_irq, IRQF_SHARED, "acpi", acpi_irq)) { 610 if (request_threaded_irq(irq, NULL, acpi_irq, IRQF_SHARED, "acpi",
611 acpi_irq)) {
599 printk(KERN_ERR PREFIX "SCI (IRQ%d) allocation failed\n", irq); 612 printk(KERN_ERR PREFIX "SCI (IRQ%d) allocation failed\n", irq);
600 acpi_irq_handler = NULL; 613 acpi_irq_handler = NULL;
601 return AE_NOT_ACQUIRED; 614 return AE_NOT_ACQUIRED;
@@ -699,49 +712,6 @@ acpi_status acpi_os_write_port(acpi_io_address port, u32 value, u32 width)
699 712
700EXPORT_SYMBOL(acpi_os_write_port); 713EXPORT_SYMBOL(acpi_os_write_port);
701 714
702acpi_status
703acpi_os_read_memory(acpi_physical_address phys_addr, u32 * value, u32 width)
704{
705 void __iomem *virt_addr;
706 unsigned int size = width / 8;
707 bool unmap = false;
708 u32 dummy;
709
710 rcu_read_lock();
711 virt_addr = acpi_map_vaddr_lookup(phys_addr, size);
712 if (!virt_addr) {
713 rcu_read_unlock();
714 virt_addr = acpi_os_ioremap(phys_addr, size);
715 if (!virt_addr)
716 return AE_BAD_ADDRESS;
717 unmap = true;
718 }
719
720 if (!value)
721 value = &dummy;
722
723 switch (width) {
724 case 8:
725 *(u8 *) value = readb(virt_addr);
726 break;
727 case 16:
728 *(u16 *) value = readw(virt_addr);
729 break;
730 case 32:
731 *(u32 *) value = readl(virt_addr);
732 break;
733 default:
734 BUG();
735 }
736
737 if (unmap)
738 iounmap(virt_addr);
739 else
740 rcu_read_unlock();
741
742 return AE_OK;
743}
744
745#ifdef readq 715#ifdef readq
746static inline u64 read64(const volatile void __iomem *addr) 716static inline u64 read64(const volatile void __iomem *addr)
747{ 717{
@@ -758,7 +728,7 @@ static inline u64 read64(const volatile void __iomem *addr)
758#endif 728#endif
759 729
760acpi_status 730acpi_status
761acpi_os_read_memory64(acpi_physical_address phys_addr, u64 *value, u32 width) 731acpi_os_read_memory(acpi_physical_address phys_addr, u64 *value, u32 width)
762{ 732{
763 void __iomem *virt_addr; 733 void __iomem *virt_addr;
764 unsigned int size = width / 8; 734 unsigned int size = width / 8;
@@ -803,45 +773,6 @@ acpi_os_read_memory64(acpi_physical_address phys_addr, u64 *value, u32 width)
803 return AE_OK; 773 return AE_OK;
804} 774}
805 775
806acpi_status
807acpi_os_write_memory(acpi_physical_address phys_addr, u32 value, u32 width)
808{
809 void __iomem *virt_addr;
810 unsigned int size = width / 8;
811 bool unmap = false;
812
813 rcu_read_lock();
814 virt_addr = acpi_map_vaddr_lookup(phys_addr, size);
815 if (!virt_addr) {
816 rcu_read_unlock();
817 virt_addr = acpi_os_ioremap(phys_addr, size);
818 if (!virt_addr)
819 return AE_BAD_ADDRESS;
820 unmap = true;
821 }
822
823 switch (width) {
824 case 8:
825 writeb(value, virt_addr);
826 break;
827 case 16:
828 writew(value, virt_addr);
829 break;
830 case 32:
831 writel(value, virt_addr);
832 break;
833 default:
834 BUG();
835 }
836
837 if (unmap)
838 iounmap(virt_addr);
839 else
840 rcu_read_unlock();
841
842 return AE_OK;
843}
844
845#ifdef writeq 776#ifdef writeq
846static inline void write64(u64 val, volatile void __iomem *addr) 777static inline void write64(u64 val, volatile void __iomem *addr)
847{ 778{
@@ -856,7 +787,7 @@ static inline void write64(u64 val, volatile void __iomem *addr)
856#endif 787#endif
857 788
858acpi_status 789acpi_status
859acpi_os_write_memory64(acpi_physical_address phys_addr, u64 value, u32 width) 790acpi_os_write_memory(acpi_physical_address phys_addr, u64 value, u32 width)
860{ 791{
861 void __iomem *virt_addr; 792 void __iomem *virt_addr;
862 unsigned int size = width / 8; 793 unsigned int size = width / 8;
@@ -1641,3 +1572,24 @@ acpi_status acpi_os_terminate(void)
1641 1572
1642 return AE_OK; 1573 return AE_OK;
1643} 1574}
1575
1576acpi_status acpi_os_prepare_sleep(u8 sleep_state, u32 pm1a_control,
1577 u32 pm1b_control)
1578{
1579 int rc = 0;
1580 if (__acpi_os_prepare_sleep)
1581 rc = __acpi_os_prepare_sleep(sleep_state,
1582 pm1a_control, pm1b_control);
1583 if (rc < 0)
1584 return AE_ERROR;
1585 else if (rc > 0)
1586 return AE_CTRL_SKIP;
1587
1588 return AE_OK;
1589}
1590
1591void acpi_os_set_prepare_sleep(int (*func)(u8 sleep_state,
1592 u32 pm1a_ctrl, u32 pm1b_ctrl))
1593{
1594 __acpi_os_prepare_sleep = func;
1595}
diff --git a/drivers/acpi/power.c b/drivers/acpi/power.c
index 9ac2a9fa90ff..7049a7d27c4f 100644
--- a/drivers/acpi/power.c
+++ b/drivers/acpi/power.c
@@ -40,9 +40,11 @@
40#include <linux/init.h> 40#include <linux/init.h>
41#include <linux/types.h> 41#include <linux/types.h>
42#include <linux/slab.h> 42#include <linux/slab.h>
43#include <linux/pm_runtime.h>
43#include <acpi/acpi_bus.h> 44#include <acpi/acpi_bus.h>
44#include <acpi/acpi_drivers.h> 45#include <acpi/acpi_drivers.h>
45#include "sleep.h" 46#include "sleep.h"
47#include "internal.h"
46 48
47#define PREFIX "ACPI: " 49#define PREFIX "ACPI: "
48 50
@@ -77,6 +79,20 @@ static struct acpi_driver acpi_power_driver = {
77 }, 79 },
78}; 80};
79 81
82/*
83 * A power managed device
84 * A device may rely on multiple power resources.
85 * */
86struct acpi_power_managed_device {
87 struct device *dev; /* The physical device */
88 acpi_handle *handle;
89};
90
91struct acpi_power_resource_device {
92 struct acpi_power_managed_device *device;
93 struct acpi_power_resource_device *next;
94};
95
80struct acpi_power_resource { 96struct acpi_power_resource {
81 struct acpi_device * device; 97 struct acpi_device * device;
82 acpi_bus_id name; 98 acpi_bus_id name;
@@ -84,6 +100,9 @@ struct acpi_power_resource {
84 u32 order; 100 u32 order;
85 unsigned int ref_count; 101 unsigned int ref_count;
86 struct mutex resource_lock; 102 struct mutex resource_lock;
103
104 /* List of devices relying on this power resource */
105 struct acpi_power_resource_device *devices;
87}; 106};
88 107
89static struct list_head acpi_power_resource_list; 108static struct list_head acpi_power_resource_list;
@@ -183,8 +202,26 @@ static int acpi_power_get_list_state(struct acpi_handle_list *list, int *state)
183 return 0; 202 return 0;
184} 203}
185 204
205/* Resume the device when all power resources in _PR0 are on */
206static void acpi_power_on_device(struct acpi_power_managed_device *device)
207{
208 struct acpi_device *acpi_dev;
209 acpi_handle handle = device->handle;
210 int state;
211
212 if (acpi_bus_get_device(handle, &acpi_dev))
213 return;
214
215 if(acpi_power_get_inferred_state(acpi_dev, &state))
216 return;
217
218 if (state == ACPI_STATE_D0 && pm_runtime_suspended(device->dev))
219 pm_request_resume(device->dev);
220}
221
186static int __acpi_power_on(struct acpi_power_resource *resource) 222static int __acpi_power_on(struct acpi_power_resource *resource)
187{ 223{
224 struct acpi_power_resource_device *device_list = resource->devices;
188 acpi_status status = AE_OK; 225 acpi_status status = AE_OK;
189 226
190 status = acpi_evaluate_object(resource->device->handle, "_ON", NULL, NULL); 227 status = acpi_evaluate_object(resource->device->handle, "_ON", NULL, NULL);
@@ -197,6 +234,12 @@ static int __acpi_power_on(struct acpi_power_resource *resource)
197 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Power resource [%s] turned on\n", 234 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Power resource [%s] turned on\n",
198 resource->name)); 235 resource->name));
199 236
237 while (device_list) {
238 acpi_power_on_device(device_list->device);
239
240 device_list = device_list->next;
241 }
242
200 return 0; 243 return 0;
201} 244}
202 245
@@ -299,6 +342,125 @@ static int acpi_power_on_list(struct acpi_handle_list *list)
299 return result; 342 return result;
300} 343}
301 344
345static void __acpi_power_resource_unregister_device(struct device *dev,
346 acpi_handle res_handle)
347{
348 struct acpi_power_resource *resource = NULL;
349 struct acpi_power_resource_device *prev, *curr;
350
351 if (acpi_power_get_context(res_handle, &resource))
352 return;
353
354 mutex_lock(&resource->resource_lock);
355 prev = NULL;
356 curr = resource->devices;
357 while (curr) {
358 if (curr->device->dev == dev) {
359 if (!prev)
360 resource->devices = curr->next;
361 else
362 prev->next = curr->next;
363
364 kfree(curr);
365 break;
366 }
367
368 prev = curr;
369 curr = curr->next;
370 }
371 mutex_unlock(&resource->resource_lock);
372}
373
374/* Unlink dev from all power resources in _PR0 */
375void acpi_power_resource_unregister_device(struct device *dev, acpi_handle handle)
376{
377 struct acpi_device *acpi_dev;
378 struct acpi_handle_list *list;
379 int i;
380
381 if (!dev || !handle)
382 return;
383
384 if (acpi_bus_get_device(handle, &acpi_dev))
385 return;
386
387 list = &acpi_dev->power.states[ACPI_STATE_D0].resources;
388
389 for (i = 0; i < list->count; i++)
390 __acpi_power_resource_unregister_device(dev,
391 list->handles[i]);
392}
393
394static int __acpi_power_resource_register_device(
395 struct acpi_power_managed_device *powered_device, acpi_handle handle)
396{
397 struct acpi_power_resource *resource = NULL;
398 struct acpi_power_resource_device *power_resource_device;
399 int result;
400
401 result = acpi_power_get_context(handle, &resource);
402 if (result)
403 return result;
404
405 power_resource_device = kzalloc(
406 sizeof(*power_resource_device), GFP_KERNEL);
407 if (!power_resource_device)
408 return -ENOMEM;
409
410 power_resource_device->device = powered_device;
411
412 mutex_lock(&resource->resource_lock);
413 power_resource_device->next = resource->devices;
414 resource->devices = power_resource_device;
415 mutex_unlock(&resource->resource_lock);
416
417 return 0;
418}
419
420/* Link dev to all power resources in _PR0 */
421int acpi_power_resource_register_device(struct device *dev, acpi_handle handle)
422{
423 struct acpi_device *acpi_dev;
424 struct acpi_handle_list *list;
425 struct acpi_power_managed_device *powered_device;
426 int i, ret;
427
428 if (!dev || !handle)
429 return -ENODEV;
430
431 ret = acpi_bus_get_device(handle, &acpi_dev);
432 if (ret)
433 goto no_power_resource;
434
435 if (!acpi_dev->power.flags.power_resources)
436 goto no_power_resource;
437
438 powered_device = kzalloc(sizeof(*powered_device), GFP_KERNEL);
439 if (!powered_device)
440 return -ENOMEM;
441
442 powered_device->dev = dev;
443 powered_device->handle = handle;
444
445 list = &acpi_dev->power.states[ACPI_STATE_D0].resources;
446
447 for (i = 0; i < list->count; i++) {
448 ret = __acpi_power_resource_register_device(powered_device,
449 list->handles[i]);
450
451 if (ret) {
452 acpi_power_resource_unregister_device(dev, handle);
453 break;
454 }
455 }
456
457 return ret;
458
459no_power_resource:
460 printk(KERN_WARNING PREFIX "Invalid Power Resource to register!");
461 return -ENODEV;
462}
463
302/** 464/**
303 * acpi_device_sleep_wake - execute _DSW (Device Sleep Wake) or (deprecated in 465 * acpi_device_sleep_wake - execute _DSW (Device Sleep Wake) or (deprecated in
304 * ACPI 3.0) _PSW (Power State Wake) 466 * ACPI 3.0) _PSW (Power State Wake)
@@ -500,14 +662,14 @@ int acpi_power_transition(struct acpi_device *device, int state)
500{ 662{
501 int result; 663 int result;
502 664
503 if (!device || (state < ACPI_STATE_D0) || (state > ACPI_STATE_D3)) 665 if (!device || (state < ACPI_STATE_D0) || (state > ACPI_STATE_D3_COLD))
504 return -EINVAL; 666 return -EINVAL;
505 667
506 if (device->power.state == state) 668 if (device->power.state == state)
507 return 0; 669 return 0;
508 670
509 if ((device->power.state < ACPI_STATE_D0) 671 if ((device->power.state < ACPI_STATE_D0)
510 || (device->power.state > ACPI_STATE_D3)) 672 || (device->power.state > ACPI_STATE_D3_COLD))
511 return -ENODEV; 673 return -ENODEV;
512 674
513 /* TBD: Resources must be ordered. */ 675 /* TBD: Resources must be ordered. */
diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c
index d4d9cb7e016a..0734086537b8 100644
--- a/drivers/acpi/processor_driver.c
+++ b/drivers/acpi/processor_driver.c
@@ -67,6 +67,7 @@
67#define ACPI_PROCESSOR_NOTIFY_PERFORMANCE 0x80 67#define ACPI_PROCESSOR_NOTIFY_PERFORMANCE 0x80
68#define ACPI_PROCESSOR_NOTIFY_POWER 0x81 68#define ACPI_PROCESSOR_NOTIFY_POWER 0x81
69#define ACPI_PROCESSOR_NOTIFY_THROTTLING 0x82 69#define ACPI_PROCESSOR_NOTIFY_THROTTLING 0x82
70#define ACPI_PROCESSOR_DEVICE_HID "ACPI0007"
70 71
71#define ACPI_PROCESSOR_LIMIT_USER 0 72#define ACPI_PROCESSOR_LIMIT_USER 0
72#define ACPI_PROCESSOR_LIMIT_THERMAL 1 73#define ACPI_PROCESSOR_LIMIT_THERMAL 1
@@ -87,7 +88,7 @@ static int acpi_processor_start(struct acpi_processor *pr);
87 88
88static const struct acpi_device_id processor_device_ids[] = { 89static const struct acpi_device_id processor_device_ids[] = {
89 {ACPI_PROCESSOR_OBJECT_HID, 0}, 90 {ACPI_PROCESSOR_OBJECT_HID, 0},
90 {"ACPI0007", 0}, 91 {ACPI_PROCESSOR_DEVICE_HID, 0},
91 {"", 0}, 92 {"", 0},
92}; 93};
93MODULE_DEVICE_TABLE(acpi, processor_device_ids); 94MODULE_DEVICE_TABLE(acpi, processor_device_ids);
@@ -535,8 +536,8 @@ static int __cpuinit acpi_processor_add(struct acpi_device *device)
535 return -ENOMEM; 536 return -ENOMEM;
536 537
537 if (!zalloc_cpumask_var(&pr->throttling.shared_cpu_map, GFP_KERNEL)) { 538 if (!zalloc_cpumask_var(&pr->throttling.shared_cpu_map, GFP_KERNEL)) {
538 kfree(pr); 539 result = -ENOMEM;
539 return -ENOMEM; 540 goto err_free_pr;
540 } 541 }
541 542
542 pr->handle = device->handle; 543 pr->handle = device->handle;
@@ -576,7 +577,7 @@ static int __cpuinit acpi_processor_add(struct acpi_device *device)
576 dev = get_cpu_device(pr->id); 577 dev = get_cpu_device(pr->id);
577 if (sysfs_create_link(&device->dev.kobj, &dev->kobj, "sysdev")) { 578 if (sysfs_create_link(&device->dev.kobj, &dev->kobj, "sysdev")) {
578 result = -EFAULT; 579 result = -EFAULT;
579 goto err_free_cpumask; 580 goto err_clear_processor;
580 } 581 }
581 582
582 /* 583 /*
@@ -594,9 +595,15 @@ static int __cpuinit acpi_processor_add(struct acpi_device *device)
594 595
595err_remove_sysfs: 596err_remove_sysfs:
596 sysfs_remove_link(&device->dev.kobj, "sysdev"); 597 sysfs_remove_link(&device->dev.kobj, "sysdev");
598err_clear_processor:
599 /*
600 * processor_device_array is not cleared to allow checks for buggy BIOS
601 */
602 per_cpu(processors, pr->id) = NULL;
597err_free_cpumask: 603err_free_cpumask:
598 free_cpumask_var(pr->throttling.shared_cpu_map); 604 free_cpumask_var(pr->throttling.shared_cpu_map);
599 605err_free_pr:
606 kfree(pr);
600 return result; 607 return result;
601} 608}
602 609
@@ -741,20 +748,46 @@ static void acpi_processor_hotplug_notify(acpi_handle handle,
741 return; 748 return;
742} 749}
743 750
751static acpi_status is_processor_device(acpi_handle handle)
752{
753 struct acpi_device_info *info;
754 char *hid;
755 acpi_status status;
756
757 status = acpi_get_object_info(handle, &info);
758 if (ACPI_FAILURE(status))
759 return status;
760
761 if (info->type == ACPI_TYPE_PROCESSOR) {
762 kfree(info);
763 return AE_OK; /* found a processor object */
764 }
765
766 if (!(info->valid & ACPI_VALID_HID)) {
767 kfree(info);
768 return AE_ERROR;
769 }
770
771 hid = info->hardware_id.string;
772 if ((hid == NULL) || strcmp(hid, ACPI_PROCESSOR_DEVICE_HID)) {
773 kfree(info);
774 return AE_ERROR;
775 }
776
777 kfree(info);
778 return AE_OK; /* found a processor device object */
779}
780
744static acpi_status 781static acpi_status
745processor_walk_namespace_cb(acpi_handle handle, 782processor_walk_namespace_cb(acpi_handle handle,
746 u32 lvl, void *context, void **rv) 783 u32 lvl, void *context, void **rv)
747{ 784{
748 acpi_status status; 785 acpi_status status;
749 int *action = context; 786 int *action = context;
750 acpi_object_type type = 0;
751 787
752 status = acpi_get_type(handle, &type); 788 status = is_processor_device(handle);
753 if (ACPI_FAILURE(status)) 789 if (ACPI_FAILURE(status))
754 return (AE_OK); 790 return AE_OK; /* not a processor; continue to walk */
755
756 if (type != ACPI_TYPE_PROCESSOR)
757 return (AE_OK);
758 791
759 switch (*action) { 792 switch (*action) {
760 case INSTALL_NOTIFY_HANDLER: 793 case INSTALL_NOTIFY_HANDLER:
@@ -772,7 +805,8 @@ processor_walk_namespace_cb(acpi_handle handle,
772 break; 805 break;
773 } 806 }
774 807
775 return (AE_OK); 808 /* found a processor; skip walking underneath */
809 return AE_CTRL_DEPTH;
776} 810}
777 811
778static acpi_status acpi_processor_hotadd_init(struct acpi_processor *pr) 812static acpi_status acpi_processor_hotadd_init(struct acpi_processor *pr)
@@ -830,7 +864,7 @@ void acpi_processor_install_hotplug_notify(void)
830{ 864{
831#ifdef CONFIG_ACPI_HOTPLUG_CPU 865#ifdef CONFIG_ACPI_HOTPLUG_CPU
832 int action = INSTALL_NOTIFY_HANDLER; 866 int action = INSTALL_NOTIFY_HANDLER;
833 acpi_walk_namespace(ACPI_TYPE_PROCESSOR, 867 acpi_walk_namespace(ACPI_TYPE_ANY,
834 ACPI_ROOT_OBJECT, 868 ACPI_ROOT_OBJECT,
835 ACPI_UINT32_MAX, 869 ACPI_UINT32_MAX,
836 processor_walk_namespace_cb, NULL, &action, NULL); 870 processor_walk_namespace_cb, NULL, &action, NULL);
@@ -843,7 +877,7 @@ void acpi_processor_uninstall_hotplug_notify(void)
843{ 877{
844#ifdef CONFIG_ACPI_HOTPLUG_CPU 878#ifdef CONFIG_ACPI_HOTPLUG_CPU
845 int action = UNINSTALL_NOTIFY_HANDLER; 879 int action = UNINSTALL_NOTIFY_HANDLER;
846 acpi_walk_namespace(ACPI_TYPE_PROCESSOR, 880 acpi_walk_namespace(ACPI_TYPE_ANY,
847 ACPI_ROOT_OBJECT, 881 ACPI_ROOT_OBJECT,
848 ACPI_UINT32_MAX, 882 ACPI_UINT32_MAX,
849 processor_walk_namespace_cb, NULL, &action, NULL); 883 processor_walk_namespace_cb, NULL, &action, NULL);
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index 0e8e2de2ed3e..b3447f63e46b 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -770,6 +770,35 @@ static int acpi_idle_enter_c1(struct cpuidle_device *dev,
770 return index; 770 return index;
771} 771}
772 772
773
774/**
775 * acpi_idle_play_dead - enters an ACPI state for long-term idle (i.e. off-lining)
776 * @dev: the target CPU
777 * @index: the index of suggested state
778 */
779static int acpi_idle_play_dead(struct cpuidle_device *dev, int index)
780{
781 struct cpuidle_state_usage *state_usage = &dev->states_usage[index];
782 struct acpi_processor_cx *cx = cpuidle_get_statedata(state_usage);
783
784 ACPI_FLUSH_CPU_CACHE();
785
786 while (1) {
787
788 if (cx->entry_method == ACPI_CSTATE_HALT)
789 halt();
790 else if (cx->entry_method == ACPI_CSTATE_SYSTEMIO) {
791 inb(cx->address);
792 /* See comment in acpi_idle_do_entry() */
793 inl(acpi_gbl_FADT.xpm_timer_block.address);
794 } else
795 return -ENODEV;
796 }
797
798 /* Never reached */
799 return 0;
800}
801
773/** 802/**
774 * acpi_idle_enter_simple - enters an ACPI state without BM handling 803 * acpi_idle_enter_simple - enters an ACPI state without BM handling
775 * @dev: the target CPU 804 * @dev: the target CPU
@@ -1077,12 +1106,14 @@ static int acpi_processor_setup_cpuidle_states(struct acpi_processor *pr)
1077 state->flags |= CPUIDLE_FLAG_TIME_VALID; 1106 state->flags |= CPUIDLE_FLAG_TIME_VALID;
1078 1107
1079 state->enter = acpi_idle_enter_c1; 1108 state->enter = acpi_idle_enter_c1;
1109 state->enter_dead = acpi_idle_play_dead;
1080 drv->safe_state_index = count; 1110 drv->safe_state_index = count;
1081 break; 1111 break;
1082 1112
1083 case ACPI_STATE_C2: 1113 case ACPI_STATE_C2:
1084 state->flags |= CPUIDLE_FLAG_TIME_VALID; 1114 state->flags |= CPUIDLE_FLAG_TIME_VALID;
1085 state->enter = acpi_idle_enter_simple; 1115 state->enter = acpi_idle_enter_simple;
1116 state->enter_dead = acpi_idle_play_dead;
1086 drv->safe_state_index = count; 1117 drv->safe_state_index = count;
1087 break; 1118 break;
1088 1119
@@ -1159,8 +1190,7 @@ int acpi_processor_cst_has_changed(struct acpi_processor *pr)
1159 * to make the code that updates C-States be called once. 1190 * to make the code that updates C-States be called once.
1160 */ 1191 */
1161 1192
1162 if (smp_processor_id() == 0 && 1193 if (pr->id == 0 && cpuidle_get_driver() == &acpi_idle_driver) {
1163 cpuidle_get_driver() == &acpi_idle_driver) {
1164 1194
1165 cpuidle_pause_and_lock(); 1195 cpuidle_pause_and_lock();
1166 /* Protect against cpu-hotplug */ 1196 /* Protect against cpu-hotplug */
diff --git a/drivers/acpi/processor_thermal.c b/drivers/acpi/processor_thermal.c
index 3b599abf2b40..641b5450a0db 100644
--- a/drivers/acpi/processor_thermal.c
+++ b/drivers/acpi/processor_thermal.c
@@ -57,6 +57,27 @@ ACPI_MODULE_NAME("processor_thermal");
57static DEFINE_PER_CPU(unsigned int, cpufreq_thermal_reduction_pctg); 57static DEFINE_PER_CPU(unsigned int, cpufreq_thermal_reduction_pctg);
58static unsigned int acpi_thermal_cpufreq_is_init = 0; 58static unsigned int acpi_thermal_cpufreq_is_init = 0;
59 59
60#define reduction_pctg(cpu) \
61 per_cpu(cpufreq_thermal_reduction_pctg, phys_package_first_cpu(cpu))
62
63/*
64 * Emulate "per package data" using per cpu data (which should really be
65 * provided elsewhere)
66 *
67 * Note we can lose a CPU on cpu hotunplug, in this case we forget the state
68 * temporarily. Fortunately that's not a big issue here (I hope)
69 */
70static int phys_package_first_cpu(int cpu)
71{
72 int i;
73 int id = topology_physical_package_id(cpu);
74
75 for_each_online_cpu(i)
76 if (topology_physical_package_id(i) == id)
77 return i;
78 return 0;
79}
80
60static int cpu_has_cpufreq(unsigned int cpu) 81static int cpu_has_cpufreq(unsigned int cpu)
61{ 82{
62 struct cpufreq_policy policy; 83 struct cpufreq_policy policy;
@@ -76,7 +97,7 @@ static int acpi_thermal_cpufreq_notifier(struct notifier_block *nb,
76 97
77 max_freq = ( 98 max_freq = (
78 policy->cpuinfo.max_freq * 99 policy->cpuinfo.max_freq *
79 (100 - per_cpu(cpufreq_thermal_reduction_pctg, policy->cpu) * 20) 100 (100 - reduction_pctg(policy->cpu) * 20)
80 ) / 100; 101 ) / 100;
81 102
82 cpufreq_verify_within_limits(policy, 0, max_freq); 103 cpufreq_verify_within_limits(policy, 0, max_freq);
@@ -102,16 +123,28 @@ static int cpufreq_get_cur_state(unsigned int cpu)
102 if (!cpu_has_cpufreq(cpu)) 123 if (!cpu_has_cpufreq(cpu))
103 return 0; 124 return 0;
104 125
105 return per_cpu(cpufreq_thermal_reduction_pctg, cpu); 126 return reduction_pctg(cpu);
106} 127}
107 128
108static int cpufreq_set_cur_state(unsigned int cpu, int state) 129static int cpufreq_set_cur_state(unsigned int cpu, int state)
109{ 130{
131 int i;
132
110 if (!cpu_has_cpufreq(cpu)) 133 if (!cpu_has_cpufreq(cpu))
111 return 0; 134 return 0;
112 135
113 per_cpu(cpufreq_thermal_reduction_pctg, cpu) = state; 136 reduction_pctg(cpu) = state;
114 cpufreq_update_policy(cpu); 137
138 /*
139 * Update all the CPUs in the same package because they all
140 * contribute to the temperature and often share the same
141 * frequency.
142 */
143 for_each_online_cpu(i) {
144 if (topology_physical_package_id(i) ==
145 topology_physical_package_id(cpu))
146 cpufreq_update_policy(i);
147 }
115 return 0; 148 return 0;
116} 149}
117 150
@@ -119,10 +152,6 @@ void acpi_thermal_cpufreq_init(void)
119{ 152{
120 int i; 153 int i;
121 154
122 for (i = 0; i < nr_cpu_ids; i++)
123 if (cpu_present(i))
124 per_cpu(cpufreq_thermal_reduction_pctg, i) = 0;
125
126 i = cpufreq_register_notifier(&acpi_thermal_cpufreq_notifier_block, 155 i = cpufreq_register_notifier(&acpi_thermal_cpufreq_notifier_block,
127 CPUFREQ_POLICY_NOTIFIER); 156 CPUFREQ_POLICY_NOTIFIER);
128 if (!i) 157 if (!i)
diff --git a/drivers/acpi/processor_throttling.c b/drivers/acpi/processor_throttling.c
index 605a2954ef17..1d02b7b5ade0 100644
--- a/drivers/acpi/processor_throttling.c
+++ b/drivers/acpi/processor_throttling.c
@@ -769,7 +769,7 @@ static int acpi_read_throttling_status(struct acpi_processor *pr,
769 u64 *value) 769 u64 *value)
770{ 770{
771 u32 bit_width, bit_offset; 771 u32 bit_width, bit_offset;
772 u64 ptc_value; 772 u32 ptc_value;
773 u64 ptc_mask; 773 u64 ptc_mask;
774 struct acpi_processor_throttling *throttling; 774 struct acpi_processor_throttling *throttling;
775 int ret = -1; 775 int ret = -1;
@@ -777,12 +777,11 @@ static int acpi_read_throttling_status(struct acpi_processor *pr,
777 throttling = &pr->throttling; 777 throttling = &pr->throttling;
778 switch (throttling->status_register.space_id) { 778 switch (throttling->status_register.space_id) {
779 case ACPI_ADR_SPACE_SYSTEM_IO: 779 case ACPI_ADR_SPACE_SYSTEM_IO:
780 ptc_value = 0;
781 bit_width = throttling->status_register.bit_width; 780 bit_width = throttling->status_register.bit_width;
782 bit_offset = throttling->status_register.bit_offset; 781 bit_offset = throttling->status_register.bit_offset;
783 782
784 acpi_os_read_port((acpi_io_address) throttling->status_register. 783 acpi_os_read_port((acpi_io_address) throttling->status_register.
785 address, (u32 *) &ptc_value, 784 address, &ptc_value,
786 (u32) (bit_width + bit_offset)); 785 (u32) (bit_width + bit_offset));
787 ptc_mask = (1 << bit_width) - 1; 786 ptc_mask = (1 << bit_width) - 1;
788 *value = (u64) ((ptc_value >> bit_offset) & ptc_mask); 787 *value = (u64) ((ptc_value >> bit_offset) & ptc_mask);
diff --git a/drivers/acpi/reboot.c b/drivers/acpi/reboot.c
index a6c77e8b37bd..c1d612435939 100644
--- a/drivers/acpi/reboot.c
+++ b/drivers/acpi/reboot.c
@@ -23,8 +23,7 @@ void acpi_reboot(void)
23 /* Is the reset register supported? The spec says we should be 23 /* Is the reset register supported? The spec says we should be
24 * checking the bit width and bit offset, but Windows ignores 24 * checking the bit width and bit offset, but Windows ignores
25 * these fields */ 25 * these fields */
26 if (!(acpi_gbl_FADT.flags & ACPI_FADT_RESET_REGISTER)) 26 /* Ignore also acpi_gbl_FADT.flags.ACPI_FADT_RESET_REGISTER */
27 return;
28 27
29 reset_value = acpi_gbl_FADT.reset_value; 28 reset_value = acpi_gbl_FADT.reset_value;
30 29
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index 8ab80bafe3f1..767e2dcb9616 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -880,18 +880,22 @@ static int acpi_bus_get_power_flags(struct acpi_device *device)
880 int j; 880 int j;
881 881
882 device->power.flags.power_resources = 1; 882 device->power.flags.power_resources = 1;
883 ps->flags.valid = 1;
884 for (j = 0; j < ps->resources.count; j++) 883 for (j = 0; j < ps->resources.count; j++)
885 acpi_bus_add_power_resource(ps->resources.handles[j]); 884 acpi_bus_add_power_resource(ps->resources.handles[j]);
886 } 885 }
887 886
887 /* The exist of _PR3 indicates D3Cold support */
888 if (i == ACPI_STATE_D3) {
889 status = acpi_get_handle(device->handle, object_name, &handle);
890 if (ACPI_SUCCESS(status))
891 device->power.states[ACPI_STATE_D3_COLD].flags.valid = 1;
892 }
893
888 /* Evaluate "_PSx" to see if we can do explicit sets */ 894 /* Evaluate "_PSx" to see if we can do explicit sets */
889 object_name[2] = 'S'; 895 object_name[2] = 'S';
890 status = acpi_get_handle(device->handle, object_name, &handle); 896 status = acpi_get_handle(device->handle, object_name, &handle);
891 if (ACPI_SUCCESS(status)) { 897 if (ACPI_SUCCESS(status))
892 ps->flags.explicit_set = 1; 898 ps->flags.explicit_set = 1;
893 ps->flags.valid = 1;
894 }
895 899
896 /* State is valid if we have some power control */ 900 /* State is valid if we have some power control */
897 if (ps->resources.count || ps->flags.explicit_set) 901 if (ps->resources.count || ps->flags.explicit_set)
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index ca191ff97844..1d661b5c3287 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -17,6 +17,8 @@
17#include <linux/suspend.h> 17#include <linux/suspend.h>
18#include <linux/reboot.h> 18#include <linux/reboot.h>
19#include <linux/acpi.h> 19#include <linux/acpi.h>
20#include <linux/module.h>
21#include <linux/pm_runtime.h>
20 22
21#include <asm/io.h> 23#include <asm/io.h>
22 24
@@ -26,6 +28,24 @@
26#include "internal.h" 28#include "internal.h"
27#include "sleep.h" 29#include "sleep.h"
28 30
31static unsigned int gts, bfs;
32module_param(gts, uint, 0644);
33module_param(bfs, uint, 0644);
34MODULE_PARM_DESC(gts, "Enable evaluation of _GTS on suspend.");
35MODULE_PARM_DESC(bfs, "Enable evaluation of _BFS on resume".);
36
37static u8 wake_sleep_flags(void)
38{
39 u8 flags = ACPI_NO_OPTIONAL_METHODS;
40
41 if (gts)
42 flags |= ACPI_EXECUTE_GTS;
43 if (bfs)
44 flags |= ACPI_EXECUTE_BFS;
45
46 return flags;
47}
48
29static u8 sleep_states[ACPI_S_STATE_COUNT]; 49static u8 sleep_states[ACPI_S_STATE_COUNT];
30 50
31static void acpi_sleep_tts_switch(u32 acpi_state) 51static void acpi_sleep_tts_switch(u32 acpi_state)
@@ -243,6 +263,7 @@ static int acpi_suspend_enter(suspend_state_t pm_state)
243{ 263{
244 acpi_status status = AE_OK; 264 acpi_status status = AE_OK;
245 u32 acpi_state = acpi_target_sleep_state; 265 u32 acpi_state = acpi_target_sleep_state;
266 u8 flags = wake_sleep_flags();
246 int error; 267 int error;
247 268
248 ACPI_FLUSH_CPU_CACHE(); 269 ACPI_FLUSH_CPU_CACHE();
@@ -250,7 +271,7 @@ static int acpi_suspend_enter(suspend_state_t pm_state)
250 switch (acpi_state) { 271 switch (acpi_state) {
251 case ACPI_STATE_S1: 272 case ACPI_STATE_S1:
252 barrier(); 273 barrier();
253 status = acpi_enter_sleep_state(acpi_state); 274 status = acpi_enter_sleep_state(acpi_state, flags);
254 break; 275 break;
255 276
256 case ACPI_STATE_S3: 277 case ACPI_STATE_S3:
@@ -265,7 +286,7 @@ static int acpi_suspend_enter(suspend_state_t pm_state)
265 acpi_write_bit_register(ACPI_BITREG_SCI_ENABLE, 1); 286 acpi_write_bit_register(ACPI_BITREG_SCI_ENABLE, 1);
266 287
267 /* Reprogram control registers and execute _BFS */ 288 /* Reprogram control registers and execute _BFS */
268 acpi_leave_sleep_state_prep(acpi_state); 289 acpi_leave_sleep_state_prep(acpi_state, flags);
269 290
270 /* ACPI 3.0 specs (P62) says that it's the responsibility 291 /* ACPI 3.0 specs (P62) says that it's the responsibility
271 * of the OSPM to clear the status bit [ implying that the 292 * of the OSPM to clear the status bit [ implying that the
@@ -529,27 +550,30 @@ static int acpi_hibernation_begin(void)
529 550
530static int acpi_hibernation_enter(void) 551static int acpi_hibernation_enter(void)
531{ 552{
553 u8 flags = wake_sleep_flags();
532 acpi_status status = AE_OK; 554 acpi_status status = AE_OK;
533 555
534 ACPI_FLUSH_CPU_CACHE(); 556 ACPI_FLUSH_CPU_CACHE();
535 557
536 /* This shouldn't return. If it returns, we have a problem */ 558 /* This shouldn't return. If it returns, we have a problem */
537 status = acpi_enter_sleep_state(ACPI_STATE_S4); 559 status = acpi_enter_sleep_state(ACPI_STATE_S4, flags);
538 /* Reprogram control registers and execute _BFS */ 560 /* Reprogram control registers and execute _BFS */
539 acpi_leave_sleep_state_prep(ACPI_STATE_S4); 561 acpi_leave_sleep_state_prep(ACPI_STATE_S4, flags);
540 562
541 return ACPI_SUCCESS(status) ? 0 : -EFAULT; 563 return ACPI_SUCCESS(status) ? 0 : -EFAULT;
542} 564}
543 565
544static void acpi_hibernation_leave(void) 566static void acpi_hibernation_leave(void)
545{ 567{
568 u8 flags = wake_sleep_flags();
569
546 /* 570 /*
547 * If ACPI is not enabled by the BIOS and the boot kernel, we need to 571 * If ACPI is not enabled by the BIOS and the boot kernel, we need to
548 * enable it here. 572 * enable it here.
549 */ 573 */
550 acpi_enable(); 574 acpi_enable();
551 /* Reprogram control registers and execute _BFS */ 575 /* Reprogram control registers and execute _BFS */
552 acpi_leave_sleep_state_prep(ACPI_STATE_S4); 576 acpi_leave_sleep_state_prep(ACPI_STATE_S4, flags);
553 /* Check the hardware signature */ 577 /* Check the hardware signature */
554 if (facs && s4_hardware_signature != facs->hardware_signature) { 578 if (facs && s4_hardware_signature != facs->hardware_signature) {
555 printk(KERN_EMERG "ACPI: Hardware changed while hibernated, " 579 printk(KERN_EMERG "ACPI: Hardware changed while hibernated, "
@@ -730,6 +754,40 @@ int acpi_pm_device_sleep_state(struct device *dev, int *d_min_p)
730 754
731#ifdef CONFIG_PM_SLEEP 755#ifdef CONFIG_PM_SLEEP
732/** 756/**
757 * acpi_pm_device_run_wake - Enable/disable wake-up for given device.
758 * @phys_dev: Device to enable/disable the platform to wake-up the system for.
759 * @enable: Whether enable or disable the wake-up functionality.
760 *
761 * Find the ACPI device object corresponding to @pci_dev and try to
762 * enable/disable the GPE associated with it.
763 */
764int acpi_pm_device_run_wake(struct device *phys_dev, bool enable)
765{
766 struct acpi_device *dev;
767 acpi_handle handle;
768
769 if (!device_run_wake(phys_dev))
770 return -EINVAL;
771
772 handle = DEVICE_ACPI_HANDLE(phys_dev);
773 if (!handle || ACPI_FAILURE(acpi_bus_get_device(handle, &dev))) {
774 dev_dbg(phys_dev, "ACPI handle has no context in %s!\n",
775 __func__);
776 return -ENODEV;
777 }
778
779 if (enable) {
780 acpi_enable_wakeup_device_power(dev, ACPI_STATE_S0);
781 acpi_enable_gpe(dev->wakeup.gpe_device, dev->wakeup.gpe_number);
782 } else {
783 acpi_disable_gpe(dev->wakeup.gpe_device, dev->wakeup.gpe_number);
784 acpi_disable_wakeup_device_power(dev);
785 }
786
787 return 0;
788}
789
790/**
733 * acpi_pm_device_sleep_wake - enable or disable the system wake-up 791 * acpi_pm_device_sleep_wake - enable or disable the system wake-up
734 * capability of given device 792 * capability of given device
735 * @dev: device to handle 793 * @dev: device to handle
@@ -770,10 +828,12 @@ static void acpi_power_off_prepare(void)
770 828
771static void acpi_power_off(void) 829static void acpi_power_off(void)
772{ 830{
831 u8 flags = wake_sleep_flags();
832
773 /* acpi_sleep_prepare(ACPI_STATE_S5) should have already been called */ 833 /* acpi_sleep_prepare(ACPI_STATE_S5) should have already been called */
774 printk(KERN_DEBUG "%s called\n", __func__); 834 printk(KERN_DEBUG "%s called\n", __func__);
775 local_irq_disable(); 835 local_irq_disable();
776 acpi_enter_sleep_state(ACPI_STATE_S5); 836 acpi_enter_sleep_state(ACPI_STATE_S5, flags);
777} 837}
778 838
779/* 839/*
@@ -788,13 +848,13 @@ static void __init acpi_gts_bfs_check(void)
788{ 848{
789 acpi_handle dummy; 849 acpi_handle dummy;
790 850
791 if (ACPI_SUCCESS(acpi_get_handle(ACPI_ROOT_OBJECT, METHOD_NAME__GTS, &dummy))) 851 if (ACPI_SUCCESS(acpi_get_handle(ACPI_ROOT_OBJECT, METHOD_PATHNAME__GTS, &dummy)))
792 { 852 {
793 printk(KERN_NOTICE PREFIX "BIOS offers _GTS\n"); 853 printk(KERN_NOTICE PREFIX "BIOS offers _GTS\n");
794 printk(KERN_NOTICE PREFIX "If \"acpi.gts=1\" improves suspend, " 854 printk(KERN_NOTICE PREFIX "If \"acpi.gts=1\" improves suspend, "
795 "please notify linux-acpi@vger.kernel.org\n"); 855 "please notify linux-acpi@vger.kernel.org\n");
796 } 856 }
797 if (ACPI_SUCCESS(acpi_get_handle(ACPI_ROOT_OBJECT, METHOD_NAME__BFS, &dummy))) 857 if (ACPI_SUCCESS(acpi_get_handle(ACPI_ROOT_OBJECT, METHOD_PATHNAME__BFS, &dummy)))
798 { 858 {
799 printk(KERN_NOTICE PREFIX "BIOS offers _BFS\n"); 859 printk(KERN_NOTICE PREFIX "BIOS offers _BFS\n");
800 printk(KERN_NOTICE PREFIX "If \"acpi.bfs=1\" improves resume, " 860 printk(KERN_NOTICE PREFIX "If \"acpi.bfs=1\" improves resume, "
diff --git a/drivers/acpi/thermal.c b/drivers/acpi/thermal.c
index 48fbc647b178..7dbebea1ec31 100644
--- a/drivers/acpi/thermal.c
+++ b/drivers/acpi/thermal.c
@@ -941,13 +941,13 @@ static int acpi_thermal_get_info(struct acpi_thermal *tz)
941 if (!tz) 941 if (!tz)
942 return -EINVAL; 942 return -EINVAL;
943 943
944 /* Get temperature [_TMP] (required) */ 944 /* Get trip points [_CRT, _PSV, etc.] (required) */
945 result = acpi_thermal_get_temperature(tz); 945 result = acpi_thermal_get_trip_points(tz);
946 if (result) 946 if (result)
947 return result; 947 return result;
948 948
949 /* Get trip points [_CRT, _PSV, etc.] (required) */ 949 /* Get temperature [_TMP] (required) */
950 result = acpi_thermal_get_trip_points(tz); 950 result = acpi_thermal_get_temperature(tz);
951 if (result) 951 if (result)
952 return result; 952 return result;
953 953
diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
index eaef02afc7cf..9577b6fa2650 100644
--- a/drivers/acpi/video.c
+++ b/drivers/acpi/video.c
@@ -548,27 +548,27 @@ acpi_video_device_EDID(struct acpi_video_device *device,
548 * 1. The system BIOS should NOT automatically control the brightness 548 * 1. The system BIOS should NOT automatically control the brightness
549 * level of the LCD when the power changes from AC to DC. 549 * level of the LCD when the power changes from AC to DC.
550 * Return Value: 550 * Return Value:
551 * -1 wrong arg. 551 * -EINVAL wrong arg.
552 */ 552 */
553 553
554static int 554static int
555acpi_video_bus_DOS(struct acpi_video_bus *video, int bios_flag, int lcd_flag) 555acpi_video_bus_DOS(struct acpi_video_bus *video, int bios_flag, int lcd_flag)
556{ 556{
557 u64 status = 0; 557 acpi_status status;
558 union acpi_object arg0 = { ACPI_TYPE_INTEGER }; 558 union acpi_object arg0 = { ACPI_TYPE_INTEGER };
559 struct acpi_object_list args = { 1, &arg0 }; 559 struct acpi_object_list args = { 1, &arg0 };
560 560
561 561
562 if (bios_flag < 0 || bios_flag > 3 || lcd_flag < 0 || lcd_flag > 1) { 562 if (bios_flag < 0 || bios_flag > 3 || lcd_flag < 0 || lcd_flag > 1)
563 status = -1; 563 return -EINVAL;
564 goto Failed;
565 }
566 arg0.integer.value = (lcd_flag << 2) | bios_flag; 564 arg0.integer.value = (lcd_flag << 2) | bios_flag;
567 video->dos_setting = arg0.integer.value; 565 video->dos_setting = arg0.integer.value;
568 acpi_evaluate_object(video->device->handle, "_DOS", &args, NULL); 566 status = acpi_evaluate_object(video->device->handle, "_DOS",
567 &args, NULL);
568 if (ACPI_FAILURE(status))
569 return -EIO;
569 570
570 Failed: 571 return 0;
571 return status;
572} 572}
573 573
574/* 574/*
@@ -1343,15 +1343,17 @@ static int
1343acpi_video_bus_get_devices(struct acpi_video_bus *video, 1343acpi_video_bus_get_devices(struct acpi_video_bus *video,
1344 struct acpi_device *device) 1344 struct acpi_device *device)
1345{ 1345{
1346 int status = 0; 1346 int status;
1347 struct acpi_device *dev; 1347 struct acpi_device *dev;
1348 1348
1349 acpi_video_device_enumerate(video); 1349 status = acpi_video_device_enumerate(video);
1350 if (status)
1351 return status;
1350 1352
1351 list_for_each_entry(dev, &device->children, node) { 1353 list_for_each_entry(dev, &device->children, node) {
1352 1354
1353 status = acpi_video_bus_get_one_device(dev, video); 1355 status = acpi_video_bus_get_one_device(dev, video);
1354 if (ACPI_FAILURE(status)) { 1356 if (status) {
1355 printk(KERN_WARNING PREFIX 1357 printk(KERN_WARNING PREFIX
1356 "Can't attach device\n"); 1358 "Can't attach device\n");
1357 continue; 1359 continue;
@@ -1653,15 +1655,20 @@ static int acpi_video_bus_add(struct acpi_device *device)
1653 mutex_init(&video->device_list_lock); 1655 mutex_init(&video->device_list_lock);
1654 INIT_LIST_HEAD(&video->video_device_list); 1656 INIT_LIST_HEAD(&video->video_device_list);
1655 1657
1656 acpi_video_bus_get_devices(video, device); 1658 error = acpi_video_bus_get_devices(video, device);
1657 acpi_video_bus_start_devices(video); 1659 if (error)
1660 goto err_free_video;
1658 1661
1659 video->input = input = input_allocate_device(); 1662 video->input = input = input_allocate_device();
1660 if (!input) { 1663 if (!input) {
1661 error = -ENOMEM; 1664 error = -ENOMEM;
1662 goto err_stop_video; 1665 goto err_put_video;
1663 } 1666 }
1664 1667
1668 error = acpi_video_bus_start_devices(video);
1669 if (error)
1670 goto err_free_input_dev;
1671
1665 snprintf(video->phys, sizeof(video->phys), 1672 snprintf(video->phys, sizeof(video->phys),
1666 "%s/video/input0", acpi_device_hid(video->device)); 1673 "%s/video/input0", acpi_device_hid(video->device));
1667 1674
@@ -1682,7 +1689,7 @@ static int acpi_video_bus_add(struct acpi_device *device)
1682 1689
1683 error = input_register_device(input); 1690 error = input_register_device(input);
1684 if (error) 1691 if (error)
1685 goto err_free_input_dev; 1692 goto err_stop_video;
1686 1693
1687 printk(KERN_INFO PREFIX "%s [%s] (multi-head: %s rom: %s post: %s)\n", 1694 printk(KERN_INFO PREFIX "%s [%s] (multi-head: %s rom: %s post: %s)\n",
1688 ACPI_VIDEO_DEVICE_NAME, acpi_device_bid(device), 1695 ACPI_VIDEO_DEVICE_NAME, acpi_device_bid(device),
@@ -1692,14 +1699,19 @@ static int acpi_video_bus_add(struct acpi_device *device)
1692 1699
1693 video->pm_nb.notifier_call = acpi_video_resume; 1700 video->pm_nb.notifier_call = acpi_video_resume;
1694 video->pm_nb.priority = 0; 1701 video->pm_nb.priority = 0;
1695 register_pm_notifier(&video->pm_nb); 1702 error = register_pm_notifier(&video->pm_nb);
1703 if (error)
1704 goto err_unregister_input_dev;
1696 1705
1697 return 0; 1706 return 0;
1698 1707
1699 err_free_input_dev: 1708 err_unregister_input_dev:
1700 input_free_device(input); 1709 input_unregister_device(input);
1701 err_stop_video: 1710 err_stop_video:
1702 acpi_video_bus_stop_devices(video); 1711 acpi_video_bus_stop_devices(video);
1712 err_free_input_dev:
1713 input_free_device(input);
1714 err_put_video:
1703 acpi_video_bus_put_devices(video); 1715 acpi_video_bus_put_devices(video);
1704 kfree(video->attached_array); 1716 kfree(video->attached_array);
1705 err_free_video: 1717 err_free_video:
diff --git a/drivers/char/lp.c b/drivers/char/lp.c
index 0fbf1a776b52..a741e418b456 100644
--- a/drivers/char/lp.c
+++ b/drivers/char/lp.c
@@ -705,16 +705,13 @@ static long lp_compat_ioctl(struct file *file, unsigned int cmd,
705{ 705{
706 unsigned int minor; 706 unsigned int minor;
707 struct timeval par_timeout; 707 struct timeval par_timeout;
708 struct compat_timeval __user *tc;
709 int ret; 708 int ret;
710 709
711 minor = iminor(file->f_path.dentry->d_inode); 710 minor = iminor(file->f_path.dentry->d_inode);
712 mutex_lock(&lp_mutex); 711 mutex_lock(&lp_mutex);
713 switch (cmd) { 712 switch (cmd) {
714 case LPSETTIMEOUT: 713 case LPSETTIMEOUT:
715 tc = compat_ptr(arg); 714 if (compat_get_timeval(&par_timeout, compat_ptr(arg))) {
716 if (get_user(par_timeout.tv_sec, &tc->tv_sec) ||
717 get_user(par_timeout.tv_usec, &tc->tv_usec)) {
718 ret = -EFAULT; 715 ret = -EFAULT;
719 break; 716 break;
720 } 717 }
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig
index 999d6a03e436..5138927a416c 100644
--- a/drivers/clocksource/Kconfig
+++ b/drivers/clocksource/Kconfig
@@ -26,7 +26,6 @@ config CLKSRC_DBX500_PRCMU
26config CLKSRC_DBX500_PRCMU_SCHED_CLOCK 26config CLKSRC_DBX500_PRCMU_SCHED_CLOCK
27 bool "Clocksource PRCMU Timer sched_clock" 27 bool "Clocksource PRCMU Timer sched_clock"
28 depends on (CLKSRC_DBX500_PRCMU && !NOMADIK_MTU_SCHED_CLOCK) 28 depends on (CLKSRC_DBX500_PRCMU && !NOMADIK_MTU_SCHED_CLOCK)
29 select HAVE_SCHED_CLOCK
30 default y 29 default y
31 help 30 help
32 Use the always on PRCMU Timer as sched_clock 31 Use the always on PRCMU Timer as sched_clock
diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c
index 6588f43017bd..87411cebc577 100644
--- a/drivers/cpuidle/cpuidle.c
+++ b/drivers/cpuidle/cpuidle.c
@@ -53,6 +53,52 @@ static void cpuidle_kick_cpus(void) {}
53 53
54static int __cpuidle_register_device(struct cpuidle_device *dev); 54static int __cpuidle_register_device(struct cpuidle_device *dev);
55 55
56static inline int cpuidle_enter(struct cpuidle_device *dev,
57 struct cpuidle_driver *drv, int index)
58{
59 struct cpuidle_state *target_state = &drv->states[index];
60 return target_state->enter(dev, drv, index);
61}
62
63static inline int cpuidle_enter_tk(struct cpuidle_device *dev,
64 struct cpuidle_driver *drv, int index)
65{
66 return cpuidle_wrap_enter(dev, drv, index, cpuidle_enter);
67}
68
69typedef int (*cpuidle_enter_t)(struct cpuidle_device *dev,
70 struct cpuidle_driver *drv, int index);
71
72static cpuidle_enter_t cpuidle_enter_ops;
73
74/**
75 * cpuidle_play_dead - cpu off-lining
76 *
77 * Only returns in case of an error
78 */
79int cpuidle_play_dead(void)
80{
81 struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices);
82 struct cpuidle_driver *drv = cpuidle_get_driver();
83 int i, dead_state = -1;
84 int power_usage = -1;
85
86 /* Find lowest-power state that supports long-term idle */
87 for (i = CPUIDLE_DRIVER_STATE_START; i < drv->state_count; i++) {
88 struct cpuidle_state *s = &drv->states[i];
89
90 if (s->power_usage < power_usage && s->enter_dead) {
91 power_usage = s->power_usage;
92 dead_state = i;
93 }
94 }
95
96 if (dead_state != -1)
97 return drv->states[dead_state].enter_dead(dev, dead_state);
98
99 return -ENODEV;
100}
101
56/** 102/**
57 * cpuidle_idle_call - the main idle loop 103 * cpuidle_idle_call - the main idle loop
58 * 104 *
@@ -63,7 +109,6 @@ int cpuidle_idle_call(void)
63{ 109{
64 struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices); 110 struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices);
65 struct cpuidle_driver *drv = cpuidle_get_driver(); 111 struct cpuidle_driver *drv = cpuidle_get_driver();
66 struct cpuidle_state *target_state;
67 int next_state, entered_state; 112 int next_state, entered_state;
68 113
69 if (off) 114 if (off)
@@ -92,12 +137,10 @@ int cpuidle_idle_call(void)
92 return 0; 137 return 0;
93 } 138 }
94 139
95 target_state = &drv->states[next_state];
96
97 trace_power_start_rcuidle(POWER_CSTATE, next_state, dev->cpu); 140 trace_power_start_rcuidle(POWER_CSTATE, next_state, dev->cpu);
98 trace_cpu_idle_rcuidle(next_state, dev->cpu); 141 trace_cpu_idle_rcuidle(next_state, dev->cpu);
99 142
100 entered_state = target_state->enter(dev, drv, next_state); 143 entered_state = cpuidle_enter_ops(dev, drv, next_state);
101 144
102 trace_power_end_rcuidle(dev->cpu); 145 trace_power_end_rcuidle(dev->cpu);
103 trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, dev->cpu); 146 trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, dev->cpu);
@@ -110,6 +153,8 @@ int cpuidle_idle_call(void)
110 dev->states_usage[entered_state].time += 153 dev->states_usage[entered_state].time +=
111 (unsigned long long)dev->last_residency; 154 (unsigned long long)dev->last_residency;
112 dev->states_usage[entered_state].usage++; 155 dev->states_usage[entered_state].usage++;
156 } else {
157 dev->last_residency = 0;
113 } 158 }
114 159
115 /* give the governor an opportunity to reflect on the outcome */ 160 /* give the governor an opportunity to reflect on the outcome */
@@ -164,6 +209,37 @@ void cpuidle_resume_and_unlock(void)
164 209
165EXPORT_SYMBOL_GPL(cpuidle_resume_and_unlock); 210EXPORT_SYMBOL_GPL(cpuidle_resume_and_unlock);
166 211
212/**
213 * cpuidle_wrap_enter - performs timekeeping and irqen around enter function
214 * @dev: pointer to a valid cpuidle_device object
215 * @drv: pointer to a valid cpuidle_driver object
216 * @index: index of the target cpuidle state.
217 */
218int cpuidle_wrap_enter(struct cpuidle_device *dev,
219 struct cpuidle_driver *drv, int index,
220 int (*enter)(struct cpuidle_device *dev,
221 struct cpuidle_driver *drv, int index))
222{
223 ktime_t time_start, time_end;
224 s64 diff;
225
226 time_start = ktime_get();
227
228 index = enter(dev, drv, index);
229
230 time_end = ktime_get();
231
232 local_irq_enable();
233
234 diff = ktime_to_us(ktime_sub(time_end, time_start));
235 if (diff > INT_MAX)
236 diff = INT_MAX;
237
238 dev->last_residency = (int) diff;
239
240 return index;
241}
242
167#ifdef CONFIG_ARCH_HAS_CPU_RELAX 243#ifdef CONFIG_ARCH_HAS_CPU_RELAX
168static int poll_idle(struct cpuidle_device *dev, 244static int poll_idle(struct cpuidle_device *dev,
169 struct cpuidle_driver *drv, int index) 245 struct cpuidle_driver *drv, int index)
@@ -197,6 +273,7 @@ static void poll_idle_init(struct cpuidle_driver *drv)
197 state->power_usage = -1; 273 state->power_usage = -1;
198 state->flags = 0; 274 state->flags = 0;
199 state->enter = poll_idle; 275 state->enter = poll_idle;
276 state->disable = 0;
200} 277}
201#else 278#else
202static void poll_idle_init(struct cpuidle_driver *drv) {} 279static void poll_idle_init(struct cpuidle_driver *drv) {}
@@ -212,13 +289,14 @@ static void poll_idle_init(struct cpuidle_driver *drv) {}
212int cpuidle_enable_device(struct cpuidle_device *dev) 289int cpuidle_enable_device(struct cpuidle_device *dev)
213{ 290{
214 int ret, i; 291 int ret, i;
292 struct cpuidle_driver *drv = cpuidle_get_driver();
215 293
216 if (dev->enabled) 294 if (dev->enabled)
217 return 0; 295 return 0;
218 if (!cpuidle_get_driver() || !cpuidle_curr_governor) 296 if (!drv || !cpuidle_curr_governor)
219 return -EIO; 297 return -EIO;
220 if (!dev->state_count) 298 if (!dev->state_count)
221 return -EINVAL; 299 dev->state_count = drv->state_count;
222 300
223 if (dev->registered == 0) { 301 if (dev->registered == 0) {
224 ret = __cpuidle_register_device(dev); 302 ret = __cpuidle_register_device(dev);
@@ -226,13 +304,16 @@ int cpuidle_enable_device(struct cpuidle_device *dev)
226 return ret; 304 return ret;
227 } 305 }
228 306
229 poll_idle_init(cpuidle_get_driver()); 307 cpuidle_enter_ops = drv->en_core_tk_irqen ?
308 cpuidle_enter_tk : cpuidle_enter;
309
310 poll_idle_init(drv);
230 311
231 if ((ret = cpuidle_add_state_sysfs(dev))) 312 if ((ret = cpuidle_add_state_sysfs(dev)))
232 return ret; 313 return ret;
233 314
234 if (cpuidle_curr_governor->enable && 315 if (cpuidle_curr_governor->enable &&
235 (ret = cpuidle_curr_governor->enable(cpuidle_get_driver(), dev))) 316 (ret = cpuidle_curr_governor->enable(drv, dev)))
236 goto fail_sysfs; 317 goto fail_sysfs;
237 318
238 for (i = 0; i < dev->state_count; i++) { 319 for (i = 0; i < dev->state_count; i++) {
diff --git a/drivers/cpuidle/driver.c b/drivers/cpuidle/driver.c
index 284d7af5a9c8..40cd3f3024df 100644
--- a/drivers/cpuidle/driver.c
+++ b/drivers/cpuidle/driver.c
@@ -47,7 +47,7 @@ static void __cpuidle_register_driver(struct cpuidle_driver *drv)
47 */ 47 */
48int cpuidle_register_driver(struct cpuidle_driver *drv) 48int cpuidle_register_driver(struct cpuidle_driver *drv)
49{ 49{
50 if (!drv) 50 if (!drv || !drv->state_count)
51 return -EINVAL; 51 return -EINVAL;
52 52
53 if (cpuidle_disabled()) 53 if (cpuidle_disabled())
diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c
index ad0952601ae2..06335756ea14 100644
--- a/drivers/cpuidle/governors/menu.c
+++ b/drivers/cpuidle/governors/menu.c
@@ -236,7 +236,7 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev)
236{ 236{
237 struct menu_device *data = &__get_cpu_var(menu_devices); 237 struct menu_device *data = &__get_cpu_var(menu_devices);
238 int latency_req = pm_qos_request(PM_QOS_CPU_DMA_LATENCY); 238 int latency_req = pm_qos_request(PM_QOS_CPU_DMA_LATENCY);
239 unsigned int power_usage = -1; 239 int power_usage = -1;
240 int i; 240 int i;
241 int multiplier; 241 int multiplier;
242 struct timespec t; 242 struct timespec t;
@@ -280,7 +280,8 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev)
280 * We want to default to C1 (hlt), not to busy polling 280 * We want to default to C1 (hlt), not to busy polling
281 * unless the timer is happening really really soon. 281 * unless the timer is happening really really soon.
282 */ 282 */
283 if (data->expected_us > 5) 283 if (data->expected_us > 5 &&
284 drv->states[CPUIDLE_DRIVER_STATE_START].disable == 0)
284 data->last_state_idx = CPUIDLE_DRIVER_STATE_START; 285 data->last_state_idx = CPUIDLE_DRIVER_STATE_START;
285 286
286 /* 287 /*
@@ -290,6 +291,8 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev)
290 for (i = CPUIDLE_DRIVER_STATE_START; i < drv->state_count; i++) { 291 for (i = CPUIDLE_DRIVER_STATE_START; i < drv->state_count; i++) {
291 struct cpuidle_state *s = &drv->states[i]; 292 struct cpuidle_state *s = &drv->states[i];
292 293
294 if (s->disable)
295 continue;
293 if (s->target_residency > data->predicted_us) 296 if (s->target_residency > data->predicted_us)
294 continue; 297 continue;
295 if (s->exit_latency > latency_req) 298 if (s->exit_latency > latency_req)
diff --git a/drivers/cpuidle/sysfs.c b/drivers/cpuidle/sysfs.c
index 3fe41fe4851a..88032b4dc6d2 100644
--- a/drivers/cpuidle/sysfs.c
+++ b/drivers/cpuidle/sysfs.c
@@ -11,6 +11,7 @@
11#include <linux/sysfs.h> 11#include <linux/sysfs.h>
12#include <linux/slab.h> 12#include <linux/slab.h>
13#include <linux/cpu.h> 13#include <linux/cpu.h>
14#include <linux/capability.h>
14 15
15#include "cpuidle.h" 16#include "cpuidle.h"
16 17
@@ -222,6 +223,9 @@ struct cpuidle_state_attr {
222#define define_one_state_ro(_name, show) \ 223#define define_one_state_ro(_name, show) \
223static struct cpuidle_state_attr attr_##_name = __ATTR(_name, 0444, show, NULL) 224static struct cpuidle_state_attr attr_##_name = __ATTR(_name, 0444, show, NULL)
224 225
226#define define_one_state_rw(_name, show, store) \
227static struct cpuidle_state_attr attr_##_name = __ATTR(_name, 0644, show, store)
228
225#define define_show_state_function(_name) \ 229#define define_show_state_function(_name) \
226static ssize_t show_state_##_name(struct cpuidle_state *state, \ 230static ssize_t show_state_##_name(struct cpuidle_state *state, \
227 struct cpuidle_state_usage *state_usage, char *buf) \ 231 struct cpuidle_state_usage *state_usage, char *buf) \
@@ -229,6 +233,24 @@ static ssize_t show_state_##_name(struct cpuidle_state *state, \
229 return sprintf(buf, "%u\n", state->_name);\ 233 return sprintf(buf, "%u\n", state->_name);\
230} 234}
231 235
236#define define_store_state_function(_name) \
237static ssize_t store_state_##_name(struct cpuidle_state *state, \
238 const char *buf, size_t size) \
239{ \
240 long value; \
241 int err; \
242 if (!capable(CAP_SYS_ADMIN)) \
243 return -EPERM; \
244 err = kstrtol(buf, 0, &value); \
245 if (err) \
246 return err; \
247 if (value) \
248 state->disable = 1; \
249 else \
250 state->disable = 0; \
251 return size; \
252}
253
232#define define_show_state_ull_function(_name) \ 254#define define_show_state_ull_function(_name) \
233static ssize_t show_state_##_name(struct cpuidle_state *state, \ 255static ssize_t show_state_##_name(struct cpuidle_state *state, \
234 struct cpuidle_state_usage *state_usage, char *buf) \ 256 struct cpuidle_state_usage *state_usage, char *buf) \
@@ -251,6 +273,8 @@ define_show_state_ull_function(usage)
251define_show_state_ull_function(time) 273define_show_state_ull_function(time)
252define_show_state_str_function(name) 274define_show_state_str_function(name)
253define_show_state_str_function(desc) 275define_show_state_str_function(desc)
276define_show_state_function(disable)
277define_store_state_function(disable)
254 278
255define_one_state_ro(name, show_state_name); 279define_one_state_ro(name, show_state_name);
256define_one_state_ro(desc, show_state_desc); 280define_one_state_ro(desc, show_state_desc);
@@ -258,6 +282,7 @@ define_one_state_ro(latency, show_state_exit_latency);
258define_one_state_ro(power, show_state_power_usage); 282define_one_state_ro(power, show_state_power_usage);
259define_one_state_ro(usage, show_state_usage); 283define_one_state_ro(usage, show_state_usage);
260define_one_state_ro(time, show_state_time); 284define_one_state_ro(time, show_state_time);
285define_one_state_rw(disable, show_state_disable, store_state_disable);
261 286
262static struct attribute *cpuidle_state_default_attrs[] = { 287static struct attribute *cpuidle_state_default_attrs[] = {
263 &attr_name.attr, 288 &attr_name.attr,
@@ -266,6 +291,7 @@ static struct attribute *cpuidle_state_default_attrs[] = {
266 &attr_power.attr, 291 &attr_power.attr,
267 &attr_usage.attr, 292 &attr_usage.attr,
268 &attr_time.attr, 293 &attr_time.attr,
294 &attr_disable.attr,
269 NULL 295 NULL
270}; 296};
271 297
@@ -287,8 +313,22 @@ static ssize_t cpuidle_state_show(struct kobject * kobj,
287 return ret; 313 return ret;
288} 314}
289 315
316static ssize_t cpuidle_state_store(struct kobject *kobj,
317 struct attribute *attr, const char *buf, size_t size)
318{
319 int ret = -EIO;
320 struct cpuidle_state *state = kobj_to_state(kobj);
321 struct cpuidle_state_attr *cattr = attr_to_stateattr(attr);
322
323 if (cattr->store)
324 ret = cattr->store(state, buf, size);
325
326 return ret;
327}
328
290static const struct sysfs_ops cpuidle_state_sysfs_ops = { 329static const struct sysfs_ops cpuidle_state_sysfs_ops = {
291 .show = cpuidle_state_show, 330 .show = cpuidle_state_show,
331 .store = cpuidle_state_store,
292}; 332};
293 333
294static void cpuidle_state_sysfs_release(struct kobject *kobj) 334static void cpuidle_state_sysfs_release(struct kobject *kobj)
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 4a6c46dea8a0..cf9da362d64f 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -201,7 +201,6 @@ config PL330_DMA
201 tristate "DMA API Driver for PL330" 201 tristate "DMA API Driver for PL330"
202 select DMA_ENGINE 202 select DMA_ENGINE
203 depends on ARM_AMBA 203 depends on ARM_AMBA
204 select PL330
205 help 204 help
206 Select if your platform has one or more PL330 DMACs. 205 Select if your platform has one or more PL330 DMACs.
207 You need to provide platform specific settings via 206 You need to provide platform specific settings via
@@ -231,7 +230,7 @@ config IMX_SDMA
231 230
232config IMX_DMA 231config IMX_DMA
233 tristate "i.MX DMA support" 232 tristate "i.MX DMA support"
234 depends on IMX_HAVE_DMA_V1 233 depends on ARCH_MXC
235 select DMA_ENGINE 234 select DMA_ENGINE
236 help 235 help
237 Support the i.MX DMA engine. This engine is integrated into 236 Support the i.MX DMA engine. This engine is integrated into
diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c
index 8a281584458b..c301a8ec31aa 100644
--- a/drivers/dma/amba-pl08x.c
+++ b/drivers/dma/amba-pl08x.c
@@ -85,6 +85,8 @@
85#include <linux/slab.h> 85#include <linux/slab.h>
86#include <asm/hardware/pl080.h> 86#include <asm/hardware/pl080.h>
87 87
88#include "dmaengine.h"
89
88#define DRIVER_NAME "pl08xdmac" 90#define DRIVER_NAME "pl08xdmac"
89 91
90static struct amba_driver pl08x_amba_driver; 92static struct amba_driver pl08x_amba_driver;
@@ -649,7 +651,7 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x,
649 } 651 }
650 652
651 if ((bd.srcbus.addr % bd.srcbus.buswidth) || 653 if ((bd.srcbus.addr % bd.srcbus.buswidth) ||
652 (bd.srcbus.addr % bd.srcbus.buswidth)) { 654 (bd.dstbus.addr % bd.dstbus.buswidth)) {
653 dev_err(&pl08x->adev->dev, 655 dev_err(&pl08x->adev->dev,
654 "%s src & dst address must be aligned to src" 656 "%s src & dst address must be aligned to src"
655 " & dst width if peripheral is flow controller", 657 " & dst width if peripheral is flow controller",
@@ -919,13 +921,10 @@ static dma_cookie_t pl08x_tx_submit(struct dma_async_tx_descriptor *tx)
919 struct pl08x_dma_chan *plchan = to_pl08x_chan(tx->chan); 921 struct pl08x_dma_chan *plchan = to_pl08x_chan(tx->chan);
920 struct pl08x_txd *txd = to_pl08x_txd(tx); 922 struct pl08x_txd *txd = to_pl08x_txd(tx);
921 unsigned long flags; 923 unsigned long flags;
924 dma_cookie_t cookie;
922 925
923 spin_lock_irqsave(&plchan->lock, flags); 926 spin_lock_irqsave(&plchan->lock, flags);
924 927 cookie = dma_cookie_assign(tx);
925 plchan->chan.cookie += 1;
926 if (plchan->chan.cookie < 0)
927 plchan->chan.cookie = 1;
928 tx->cookie = plchan->chan.cookie;
929 928
930 /* Put this onto the pending list */ 929 /* Put this onto the pending list */
931 list_add_tail(&txd->node, &plchan->pend_list); 930 list_add_tail(&txd->node, &plchan->pend_list);
@@ -945,7 +944,7 @@ static dma_cookie_t pl08x_tx_submit(struct dma_async_tx_descriptor *tx)
945 944
946 spin_unlock_irqrestore(&plchan->lock, flags); 945 spin_unlock_irqrestore(&plchan->lock, flags);
947 946
948 return tx->cookie; 947 return cookie;
949} 948}
950 949
951static struct dma_async_tx_descriptor *pl08x_prep_dma_interrupt( 950static struct dma_async_tx_descriptor *pl08x_prep_dma_interrupt(
@@ -965,31 +964,17 @@ static enum dma_status pl08x_dma_tx_status(struct dma_chan *chan,
965 dma_cookie_t cookie, struct dma_tx_state *txstate) 964 dma_cookie_t cookie, struct dma_tx_state *txstate)
966{ 965{
967 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); 966 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
968 dma_cookie_t last_used;
969 dma_cookie_t last_complete;
970 enum dma_status ret; 967 enum dma_status ret;
971 u32 bytesleft = 0;
972 968
973 last_used = plchan->chan.cookie; 969 ret = dma_cookie_status(chan, cookie, txstate);
974 last_complete = plchan->lc; 970 if (ret == DMA_SUCCESS)
975
976 ret = dma_async_is_complete(cookie, last_complete, last_used);
977 if (ret == DMA_SUCCESS) {
978 dma_set_tx_state(txstate, last_complete, last_used, 0);
979 return ret; 971 return ret;
980 }
981 972
982 /* 973 /*
983 * This cookie not complete yet 974 * This cookie not complete yet
975 * Get number of bytes left in the active transactions and queue
984 */ 976 */
985 last_used = plchan->chan.cookie; 977 dma_set_residue(txstate, pl08x_getbytes_chan(plchan));
986 last_complete = plchan->lc;
987
988 /* Get number of bytes left in the active transactions and queue */
989 bytesleft = pl08x_getbytes_chan(plchan);
990
991 dma_set_tx_state(txstate, last_complete, last_used,
992 bytesleft);
993 978
994 if (plchan->state == PL08X_CHAN_PAUSED) 979 if (plchan->state == PL08X_CHAN_PAUSED)
995 return DMA_PAUSED; 980 return DMA_PAUSED;
@@ -1139,6 +1124,8 @@ static int dma_set_runtime_config(struct dma_chan *chan,
1139 cctl |= burst << PL080_CONTROL_SB_SIZE_SHIFT; 1124 cctl |= burst << PL080_CONTROL_SB_SIZE_SHIFT;
1140 cctl |= burst << PL080_CONTROL_DB_SIZE_SHIFT; 1125 cctl |= burst << PL080_CONTROL_DB_SIZE_SHIFT;
1141 1126
1127 plchan->device_fc = config->device_fc;
1128
1142 if (plchan->runtime_direction == DMA_DEV_TO_MEM) { 1129 if (plchan->runtime_direction == DMA_DEV_TO_MEM) {
1143 plchan->src_addr = config->src_addr; 1130 plchan->src_addr = config->src_addr;
1144 plchan->src_cctl = pl08x_cctl(cctl) | PL080_CONTROL_DST_INCR | 1131 plchan->src_cctl = pl08x_cctl(cctl) | PL080_CONTROL_DST_INCR |
@@ -1326,7 +1313,7 @@ static struct dma_async_tx_descriptor *pl08x_prep_dma_memcpy(
1326static struct dma_async_tx_descriptor *pl08x_prep_slave_sg( 1313static struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
1327 struct dma_chan *chan, struct scatterlist *sgl, 1314 struct dma_chan *chan, struct scatterlist *sgl,
1328 unsigned int sg_len, enum dma_transfer_direction direction, 1315 unsigned int sg_len, enum dma_transfer_direction direction,
1329 unsigned long flags) 1316 unsigned long flags, void *context)
1330{ 1317{
1331 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); 1318 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
1332 struct pl08x_driver_data *pl08x = plchan->host; 1319 struct pl08x_driver_data *pl08x = plchan->host;
@@ -1370,7 +1357,7 @@ static struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
1370 return NULL; 1357 return NULL;
1371 } 1358 }
1372 1359
1373 if (plchan->cd->device_fc) 1360 if (plchan->device_fc)
1374 tmp = (direction == DMA_MEM_TO_DEV) ? PL080_FLOW_MEM2PER_PER : 1361 tmp = (direction == DMA_MEM_TO_DEV) ? PL080_FLOW_MEM2PER_PER :
1375 PL080_FLOW_PER2MEM_PER; 1362 PL080_FLOW_PER2MEM_PER;
1376 else 1363 else
@@ -1541,7 +1528,7 @@ static void pl08x_tasklet(unsigned long data)
1541 1528
1542 if (txd) { 1529 if (txd) {
1543 /* Update last completed */ 1530 /* Update last completed */
1544 plchan->lc = txd->tx.cookie; 1531 dma_cookie_complete(&txd->tx);
1545 } 1532 }
1546 1533
1547 /* If a new descriptor is queued, set it up plchan->at is NULL here */ 1534 /* If a new descriptor is queued, set it up plchan->at is NULL here */
@@ -1722,8 +1709,7 @@ static int pl08x_dma_init_virtual_channels(struct pl08x_driver_data *pl08x,
1722 chan->name); 1709 chan->name);
1723 1710
1724 chan->chan.device = dmadev; 1711 chan->chan.device = dmadev;
1725 chan->chan.cookie = 0; 1712 dma_cookie_init(&chan->chan);
1726 chan->lc = 0;
1727 1713
1728 spin_lock_init(&chan->lock); 1714 spin_lock_init(&chan->lock);
1729 INIT_LIST_HEAD(&chan->pend_list); 1715 INIT_LIST_HEAD(&chan->pend_list);
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c
index f4aed5fc2cb6..7aa58d204892 100644
--- a/drivers/dma/at_hdmac.c
+++ b/drivers/dma/at_hdmac.c
@@ -27,6 +27,7 @@
27#include <linux/of_device.h> 27#include <linux/of_device.h>
28 28
29#include "at_hdmac_regs.h" 29#include "at_hdmac_regs.h"
30#include "dmaengine.h"
30 31
31/* 32/*
32 * Glossary 33 * Glossary
@@ -192,27 +193,6 @@ static void atc_desc_chain(struct at_desc **first, struct at_desc **prev,
192} 193}
193 194
194/** 195/**
195 * atc_assign_cookie - compute and assign new cookie
196 * @atchan: channel we work on
197 * @desc: descriptor to assign cookie for
198 *
199 * Called with atchan->lock held and bh disabled
200 */
201static dma_cookie_t
202atc_assign_cookie(struct at_dma_chan *atchan, struct at_desc *desc)
203{
204 dma_cookie_t cookie = atchan->chan_common.cookie;
205
206 if (++cookie < 0)
207 cookie = 1;
208
209 atchan->chan_common.cookie = cookie;
210 desc->txd.cookie = cookie;
211
212 return cookie;
213}
214
215/**
216 * atc_dostart - starts the DMA engine for real 196 * atc_dostart - starts the DMA engine for real
217 * @atchan: the channel we want to start 197 * @atchan: the channel we want to start
218 * @first: first descriptor in the list we want to begin with 198 * @first: first descriptor in the list we want to begin with
@@ -269,7 +249,7 @@ atc_chain_complete(struct at_dma_chan *atchan, struct at_desc *desc)
269 dev_vdbg(chan2dev(&atchan->chan_common), 249 dev_vdbg(chan2dev(&atchan->chan_common),
270 "descriptor %u complete\n", txd->cookie); 250 "descriptor %u complete\n", txd->cookie);
271 251
272 atchan->completed_cookie = txd->cookie; 252 dma_cookie_complete(txd);
273 253
274 /* move children to free_list */ 254 /* move children to free_list */
275 list_splice_init(&desc->tx_list, &atchan->free_list); 255 list_splice_init(&desc->tx_list, &atchan->free_list);
@@ -547,7 +527,7 @@ static dma_cookie_t atc_tx_submit(struct dma_async_tx_descriptor *tx)
547 unsigned long flags; 527 unsigned long flags;
548 528
549 spin_lock_irqsave(&atchan->lock, flags); 529 spin_lock_irqsave(&atchan->lock, flags);
550 cookie = atc_assign_cookie(atchan, desc); 530 cookie = dma_cookie_assign(tx);
551 531
552 if (list_empty(&atchan->active_list)) { 532 if (list_empty(&atchan->active_list)) {
553 dev_vdbg(chan2dev(tx->chan), "tx_submit: started %u\n", 533 dev_vdbg(chan2dev(tx->chan), "tx_submit: started %u\n",
@@ -659,14 +639,16 @@ err_desc_get:
659 * @sg_len: number of entries in @scatterlist 639 * @sg_len: number of entries in @scatterlist
660 * @direction: DMA direction 640 * @direction: DMA direction
661 * @flags: tx descriptor status flags 641 * @flags: tx descriptor status flags
642 * @context: transaction context (ignored)
662 */ 643 */
663static struct dma_async_tx_descriptor * 644static struct dma_async_tx_descriptor *
664atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, 645atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
665 unsigned int sg_len, enum dma_transfer_direction direction, 646 unsigned int sg_len, enum dma_transfer_direction direction,
666 unsigned long flags) 647 unsigned long flags, void *context)
667{ 648{
668 struct at_dma_chan *atchan = to_at_dma_chan(chan); 649 struct at_dma_chan *atchan = to_at_dma_chan(chan);
669 struct at_dma_slave *atslave = chan->private; 650 struct at_dma_slave *atslave = chan->private;
651 struct dma_slave_config *sconfig = &atchan->dma_sconfig;
670 struct at_desc *first = NULL; 652 struct at_desc *first = NULL;
671 struct at_desc *prev = NULL; 653 struct at_desc *prev = NULL;
672 u32 ctrla; 654 u32 ctrla;
@@ -688,19 +670,18 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
688 return NULL; 670 return NULL;
689 } 671 }
690 672
691 reg_width = atslave->reg_width;
692
693 ctrla = ATC_DEFAULT_CTRLA | atslave->ctrla; 673 ctrla = ATC_DEFAULT_CTRLA | atslave->ctrla;
694 ctrlb = ATC_IEN; 674 ctrlb = ATC_IEN;
695 675
696 switch (direction) { 676 switch (direction) {
697 case DMA_MEM_TO_DEV: 677 case DMA_MEM_TO_DEV:
678 reg_width = convert_buswidth(sconfig->dst_addr_width);
698 ctrla |= ATC_DST_WIDTH(reg_width); 679 ctrla |= ATC_DST_WIDTH(reg_width);
699 ctrlb |= ATC_DST_ADDR_MODE_FIXED 680 ctrlb |= ATC_DST_ADDR_MODE_FIXED
700 | ATC_SRC_ADDR_MODE_INCR 681 | ATC_SRC_ADDR_MODE_INCR
701 | ATC_FC_MEM2PER 682 | ATC_FC_MEM2PER
702 | ATC_SIF(AT_DMA_MEM_IF) | ATC_DIF(AT_DMA_PER_IF); 683 | ATC_SIF(AT_DMA_MEM_IF) | ATC_DIF(AT_DMA_PER_IF);
703 reg = atslave->tx_reg; 684 reg = sconfig->dst_addr;
704 for_each_sg(sgl, sg, sg_len, i) { 685 for_each_sg(sgl, sg, sg_len, i) {
705 struct at_desc *desc; 686 struct at_desc *desc;
706 u32 len; 687 u32 len;
@@ -728,13 +709,14 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
728 } 709 }
729 break; 710 break;
730 case DMA_DEV_TO_MEM: 711 case DMA_DEV_TO_MEM:
712 reg_width = convert_buswidth(sconfig->src_addr_width);
731 ctrla |= ATC_SRC_WIDTH(reg_width); 713 ctrla |= ATC_SRC_WIDTH(reg_width);
732 ctrlb |= ATC_DST_ADDR_MODE_INCR 714 ctrlb |= ATC_DST_ADDR_MODE_INCR
733 | ATC_SRC_ADDR_MODE_FIXED 715 | ATC_SRC_ADDR_MODE_FIXED
734 | ATC_FC_PER2MEM 716 | ATC_FC_PER2MEM
735 | ATC_SIF(AT_DMA_PER_IF) | ATC_DIF(AT_DMA_MEM_IF); 717 | ATC_SIF(AT_DMA_PER_IF) | ATC_DIF(AT_DMA_MEM_IF);
736 718
737 reg = atslave->rx_reg; 719 reg = sconfig->src_addr;
738 for_each_sg(sgl, sg, sg_len, i) { 720 for_each_sg(sgl, sg, sg_len, i) {
739 struct at_desc *desc; 721 struct at_desc *desc;
740 u32 len; 722 u32 len;
@@ -810,12 +792,15 @@ err_out:
810 * atc_dma_cyclic_fill_desc - Fill one period decriptor 792 * atc_dma_cyclic_fill_desc - Fill one period decriptor
811 */ 793 */
812static int 794static int
813atc_dma_cyclic_fill_desc(struct at_dma_slave *atslave, struct at_desc *desc, 795atc_dma_cyclic_fill_desc(struct dma_chan *chan, struct at_desc *desc,
814 unsigned int period_index, dma_addr_t buf_addr, 796 unsigned int period_index, dma_addr_t buf_addr,
815 size_t period_len, enum dma_transfer_direction direction) 797 unsigned int reg_width, size_t period_len,
798 enum dma_transfer_direction direction)
816{ 799{
817 u32 ctrla; 800 struct at_dma_chan *atchan = to_at_dma_chan(chan);
818 unsigned int reg_width = atslave->reg_width; 801 struct at_dma_slave *atslave = chan->private;
802 struct dma_slave_config *sconfig = &atchan->dma_sconfig;
803 u32 ctrla;
819 804
820 /* prepare common CRTLA value */ 805 /* prepare common CRTLA value */
821 ctrla = ATC_DEFAULT_CTRLA | atslave->ctrla 806 ctrla = ATC_DEFAULT_CTRLA | atslave->ctrla
@@ -826,7 +811,7 @@ atc_dma_cyclic_fill_desc(struct at_dma_slave *atslave, struct at_desc *desc,
826 switch (direction) { 811 switch (direction) {
827 case DMA_MEM_TO_DEV: 812 case DMA_MEM_TO_DEV:
828 desc->lli.saddr = buf_addr + (period_len * period_index); 813 desc->lli.saddr = buf_addr + (period_len * period_index);
829 desc->lli.daddr = atslave->tx_reg; 814 desc->lli.daddr = sconfig->dst_addr;
830 desc->lli.ctrla = ctrla; 815 desc->lli.ctrla = ctrla;
831 desc->lli.ctrlb = ATC_DST_ADDR_MODE_FIXED 816 desc->lli.ctrlb = ATC_DST_ADDR_MODE_FIXED
832 | ATC_SRC_ADDR_MODE_INCR 817 | ATC_SRC_ADDR_MODE_INCR
@@ -836,7 +821,7 @@ atc_dma_cyclic_fill_desc(struct at_dma_slave *atslave, struct at_desc *desc,
836 break; 821 break;
837 822
838 case DMA_DEV_TO_MEM: 823 case DMA_DEV_TO_MEM:
839 desc->lli.saddr = atslave->rx_reg; 824 desc->lli.saddr = sconfig->src_addr;
840 desc->lli.daddr = buf_addr + (period_len * period_index); 825 desc->lli.daddr = buf_addr + (period_len * period_index);
841 desc->lli.ctrla = ctrla; 826 desc->lli.ctrla = ctrla;
842 desc->lli.ctrlb = ATC_DST_ADDR_MODE_INCR 827 desc->lli.ctrlb = ATC_DST_ADDR_MODE_INCR
@@ -860,16 +845,20 @@ atc_dma_cyclic_fill_desc(struct at_dma_slave *atslave, struct at_desc *desc,
860 * @buf_len: total number of bytes for the entire buffer 845 * @buf_len: total number of bytes for the entire buffer
861 * @period_len: number of bytes for each period 846 * @period_len: number of bytes for each period
862 * @direction: transfer direction, to or from device 847 * @direction: transfer direction, to or from device
848 * @context: transfer context (ignored)
863 */ 849 */
864static struct dma_async_tx_descriptor * 850static struct dma_async_tx_descriptor *
865atc_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len, 851atc_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
866 size_t period_len, enum dma_transfer_direction direction) 852 size_t period_len, enum dma_transfer_direction direction,
853 void *context)
867{ 854{
868 struct at_dma_chan *atchan = to_at_dma_chan(chan); 855 struct at_dma_chan *atchan = to_at_dma_chan(chan);
869 struct at_dma_slave *atslave = chan->private; 856 struct at_dma_slave *atslave = chan->private;
857 struct dma_slave_config *sconfig = &atchan->dma_sconfig;
870 struct at_desc *first = NULL; 858 struct at_desc *first = NULL;
871 struct at_desc *prev = NULL; 859 struct at_desc *prev = NULL;
872 unsigned long was_cyclic; 860 unsigned long was_cyclic;
861 unsigned int reg_width;
873 unsigned int periods = buf_len / period_len; 862 unsigned int periods = buf_len / period_len;
874 unsigned int i; 863 unsigned int i;
875 864
@@ -889,8 +878,13 @@ atc_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
889 return NULL; 878 return NULL;
890 } 879 }
891 880
881 if (sconfig->direction == DMA_MEM_TO_DEV)
882 reg_width = convert_buswidth(sconfig->dst_addr_width);
883 else
884 reg_width = convert_buswidth(sconfig->src_addr_width);
885
892 /* Check for too big/unaligned periods and unaligned DMA buffer */ 886 /* Check for too big/unaligned periods and unaligned DMA buffer */
893 if (atc_dma_cyclic_check_values(atslave->reg_width, buf_addr, 887 if (atc_dma_cyclic_check_values(reg_width, buf_addr,
894 period_len, direction)) 888 period_len, direction))
895 goto err_out; 889 goto err_out;
896 890
@@ -902,8 +896,8 @@ atc_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
902 if (!desc) 896 if (!desc)
903 goto err_desc_get; 897 goto err_desc_get;
904 898
905 if (atc_dma_cyclic_fill_desc(atslave, desc, i, buf_addr, 899 if (atc_dma_cyclic_fill_desc(chan, desc, i, buf_addr,
906 period_len, direction)) 900 reg_width, period_len, direction))
907 goto err_desc_get; 901 goto err_desc_get;
908 902
909 atc_desc_chain(&first, &prev, desc); 903 atc_desc_chain(&first, &prev, desc);
@@ -926,6 +920,23 @@ err_out:
926 return NULL; 920 return NULL;
927} 921}
928 922
923static int set_runtime_config(struct dma_chan *chan,
924 struct dma_slave_config *sconfig)
925{
926 struct at_dma_chan *atchan = to_at_dma_chan(chan);
927
928 /* Check if it is chan is configured for slave transfers */
929 if (!chan->private)
930 return -EINVAL;
931
932 memcpy(&atchan->dma_sconfig, sconfig, sizeof(*sconfig));
933
934 convert_burst(&atchan->dma_sconfig.src_maxburst);
935 convert_burst(&atchan->dma_sconfig.dst_maxburst);
936
937 return 0;
938}
939
929 940
930static int atc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, 941static int atc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
931 unsigned long arg) 942 unsigned long arg)
@@ -986,6 +997,8 @@ static int atc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
986 clear_bit(ATC_IS_CYCLIC, &atchan->status); 997 clear_bit(ATC_IS_CYCLIC, &atchan->status);
987 998
988 spin_unlock_irqrestore(&atchan->lock, flags); 999 spin_unlock_irqrestore(&atchan->lock, flags);
1000 } else if (cmd == DMA_SLAVE_CONFIG) {
1001 return set_runtime_config(chan, (struct dma_slave_config *)arg);
989 } else { 1002 } else {
990 return -ENXIO; 1003 return -ENXIO;
991 } 1004 }
@@ -1016,26 +1029,20 @@ atc_tx_status(struct dma_chan *chan,
1016 1029
1017 spin_lock_irqsave(&atchan->lock, flags); 1030 spin_lock_irqsave(&atchan->lock, flags);
1018 1031
1019 last_complete = atchan->completed_cookie; 1032 ret = dma_cookie_status(chan, cookie, txstate);
1020 last_used = chan->cookie;
1021
1022 ret = dma_async_is_complete(cookie, last_complete, last_used);
1023 if (ret != DMA_SUCCESS) { 1033 if (ret != DMA_SUCCESS) {
1024 atc_cleanup_descriptors(atchan); 1034 atc_cleanup_descriptors(atchan);
1025 1035
1026 last_complete = atchan->completed_cookie; 1036 ret = dma_cookie_status(chan, cookie, txstate);
1027 last_used = chan->cookie;
1028
1029 ret = dma_async_is_complete(cookie, last_complete, last_used);
1030 } 1037 }
1031 1038
1039 last_complete = chan->completed_cookie;
1040 last_used = chan->cookie;
1041
1032 spin_unlock_irqrestore(&atchan->lock, flags); 1042 spin_unlock_irqrestore(&atchan->lock, flags);
1033 1043
1034 if (ret != DMA_SUCCESS) 1044 if (ret != DMA_SUCCESS)
1035 dma_set_tx_state(txstate, last_complete, last_used, 1045 dma_set_residue(txstate, atc_first_active(atchan)->len);
1036 atc_first_active(atchan)->len);
1037 else
1038 dma_set_tx_state(txstate, last_complete, last_used, 0);
1039 1046
1040 if (atc_chan_is_paused(atchan)) 1047 if (atc_chan_is_paused(atchan))
1041 ret = DMA_PAUSED; 1048 ret = DMA_PAUSED;
@@ -1129,7 +1136,7 @@ static int atc_alloc_chan_resources(struct dma_chan *chan)
1129 spin_lock_irqsave(&atchan->lock, flags); 1136 spin_lock_irqsave(&atchan->lock, flags);
1130 atchan->descs_allocated = i; 1137 atchan->descs_allocated = i;
1131 list_splice(&tmp_list, &atchan->free_list); 1138 list_splice(&tmp_list, &atchan->free_list);
1132 atchan->completed_cookie = chan->cookie = 1; 1139 dma_cookie_init(chan);
1133 spin_unlock_irqrestore(&atchan->lock, flags); 1140 spin_unlock_irqrestore(&atchan->lock, flags);
1134 1141
1135 /* channel parameters */ 1142 /* channel parameters */
@@ -1329,7 +1336,7 @@ static int __init at_dma_probe(struct platform_device *pdev)
1329 struct at_dma_chan *atchan = &atdma->chan[i]; 1336 struct at_dma_chan *atchan = &atdma->chan[i];
1330 1337
1331 atchan->chan_common.device = &atdma->dma_common; 1338 atchan->chan_common.device = &atdma->dma_common;
1332 atchan->chan_common.cookie = atchan->completed_cookie = 1; 1339 dma_cookie_init(&atchan->chan_common);
1333 list_add_tail(&atchan->chan_common.device_node, 1340 list_add_tail(&atchan->chan_common.device_node,
1334 &atdma->dma_common.channels); 1341 &atdma->dma_common.channels);
1335 1342
diff --git a/drivers/dma/at_hdmac_regs.h b/drivers/dma/at_hdmac_regs.h
index a8d3277d60b5..897a8bcaec90 100644
--- a/drivers/dma/at_hdmac_regs.h
+++ b/drivers/dma/at_hdmac_regs.h
@@ -207,8 +207,8 @@ enum atc_status {
207 * @save_cfg: configuration register that is saved on suspend/resume cycle 207 * @save_cfg: configuration register that is saved on suspend/resume cycle
208 * @save_dscr: for cyclic operations, preserve next descriptor address in 208 * @save_dscr: for cyclic operations, preserve next descriptor address in
209 * the cyclic list on suspend/resume cycle 209 * the cyclic list on suspend/resume cycle
210 * @dma_sconfig: configuration for slave transfers, passed via DMA_SLAVE_CONFIG
210 * @lock: serializes enqueue/dequeue operations to descriptors lists 211 * @lock: serializes enqueue/dequeue operations to descriptors lists
211 * @completed_cookie: identifier for the most recently completed operation
212 * @active_list: list of descriptors dmaengine is being running on 212 * @active_list: list of descriptors dmaengine is being running on
213 * @queue: list of descriptors ready to be submitted to engine 213 * @queue: list of descriptors ready to be submitted to engine
214 * @free_list: list of descriptors usable by the channel 214 * @free_list: list of descriptors usable by the channel
@@ -223,11 +223,11 @@ struct at_dma_chan {
223 struct tasklet_struct tasklet; 223 struct tasklet_struct tasklet;
224 u32 save_cfg; 224 u32 save_cfg;
225 u32 save_dscr; 225 u32 save_dscr;
226 struct dma_slave_config dma_sconfig;
226 227
227 spinlock_t lock; 228 spinlock_t lock;
228 229
229 /* these other elements are all protected by lock */ 230 /* these other elements are all protected by lock */
230 dma_cookie_t completed_cookie;
231 struct list_head active_list; 231 struct list_head active_list;
232 struct list_head queue; 232 struct list_head queue;
233 struct list_head free_list; 233 struct list_head free_list;
@@ -245,6 +245,36 @@ static inline struct at_dma_chan *to_at_dma_chan(struct dma_chan *dchan)
245 return container_of(dchan, struct at_dma_chan, chan_common); 245 return container_of(dchan, struct at_dma_chan, chan_common);
246} 246}
247 247
248/*
249 * Fix sconfig's burst size according to at_hdmac. We need to convert them as:
250 * 1 -> 0, 4 -> 1, 8 -> 2, 16 -> 3, 32 -> 4, 64 -> 5, 128 -> 6, 256 -> 7.
251 *
252 * This can be done by finding most significant bit set.
253 */
254static inline void convert_burst(u32 *maxburst)
255{
256 if (*maxburst > 1)
257 *maxburst = fls(*maxburst) - 2;
258 else
259 *maxburst = 0;
260}
261
262/*
263 * Fix sconfig's bus width according to at_hdmac.
264 * 1 byte -> 0, 2 bytes -> 1, 4 bytes -> 2.
265 */
266static inline u8 convert_buswidth(enum dma_slave_buswidth addr_width)
267{
268 switch (addr_width) {
269 case DMA_SLAVE_BUSWIDTH_2_BYTES:
270 return 1;
271 case DMA_SLAVE_BUSWIDTH_4_BYTES:
272 return 2;
273 default:
274 /* For 1 byte width or fallback */
275 return 0;
276 }
277}
248 278
249/*-- Controller ------------------------------------------------------*/ 279/*-- Controller ------------------------------------------------------*/
250 280
diff --git a/drivers/dma/coh901318.c b/drivers/dma/coh901318.c
index d65a718c0f9b..dc89455f5550 100644
--- a/drivers/dma/coh901318.c
+++ b/drivers/dma/coh901318.c
@@ -24,6 +24,7 @@
24#include <mach/coh901318.h> 24#include <mach/coh901318.h>
25 25
26#include "coh901318_lli.h" 26#include "coh901318_lli.h"
27#include "dmaengine.h"
27 28
28#define COHC_2_DEV(cohc) (&cohc->chan.dev->device) 29#define COHC_2_DEV(cohc) (&cohc->chan.dev->device)
29 30
@@ -59,7 +60,6 @@ struct coh901318_base {
59struct coh901318_chan { 60struct coh901318_chan {
60 spinlock_t lock; 61 spinlock_t lock;
61 int allocated; 62 int allocated;
62 int completed;
63 int id; 63 int id;
64 int stopped; 64 int stopped;
65 65
@@ -318,20 +318,6 @@ static int coh901318_prep_linked_list(struct coh901318_chan *cohc,
318 318
319 return 0; 319 return 0;
320} 320}
321static dma_cookie_t
322coh901318_assign_cookie(struct coh901318_chan *cohc,
323 struct coh901318_desc *cohd)
324{
325 dma_cookie_t cookie = cohc->chan.cookie;
326
327 if (++cookie < 0)
328 cookie = 1;
329
330 cohc->chan.cookie = cookie;
331 cohd->desc.cookie = cookie;
332
333 return cookie;
334}
335 321
336static struct coh901318_desc * 322static struct coh901318_desc *
337coh901318_desc_get(struct coh901318_chan *cohc) 323coh901318_desc_get(struct coh901318_chan *cohc)
@@ -705,7 +691,7 @@ static void dma_tasklet(unsigned long data)
705 callback_param = cohd_fin->desc.callback_param; 691 callback_param = cohd_fin->desc.callback_param;
706 692
707 /* sign this job as completed on the channel */ 693 /* sign this job as completed on the channel */
708 cohc->completed = cohd_fin->desc.cookie; 694 dma_cookie_complete(&cohd_fin->desc);
709 695
710 /* release the lli allocation and remove the descriptor */ 696 /* release the lli allocation and remove the descriptor */
711 coh901318_lli_free(&cohc->base->pool, &cohd_fin->lli); 697 coh901318_lli_free(&cohc->base->pool, &cohd_fin->lli);
@@ -929,7 +915,7 @@ static int coh901318_alloc_chan_resources(struct dma_chan *chan)
929 coh901318_config(cohc, NULL); 915 coh901318_config(cohc, NULL);
930 916
931 cohc->allocated = 1; 917 cohc->allocated = 1;
932 cohc->completed = chan->cookie = 1; 918 dma_cookie_init(chan);
933 919
934 spin_unlock_irqrestore(&cohc->lock, flags); 920 spin_unlock_irqrestore(&cohc->lock, flags);
935 921
@@ -966,16 +952,16 @@ coh901318_tx_submit(struct dma_async_tx_descriptor *tx)
966 desc); 952 desc);
967 struct coh901318_chan *cohc = to_coh901318_chan(tx->chan); 953 struct coh901318_chan *cohc = to_coh901318_chan(tx->chan);
968 unsigned long flags; 954 unsigned long flags;
955 dma_cookie_t cookie;
969 956
970 spin_lock_irqsave(&cohc->lock, flags); 957 spin_lock_irqsave(&cohc->lock, flags);
971 958 cookie = dma_cookie_assign(tx);
972 tx->cookie = coh901318_assign_cookie(cohc, cohd);
973 959
974 coh901318_desc_queue(cohc, cohd); 960 coh901318_desc_queue(cohc, cohd);
975 961
976 spin_unlock_irqrestore(&cohc->lock, flags); 962 spin_unlock_irqrestore(&cohc->lock, flags);
977 963
978 return tx->cookie; 964 return cookie;
979} 965}
980 966
981static struct dma_async_tx_descriptor * 967static struct dma_async_tx_descriptor *
@@ -1035,7 +1021,7 @@ coh901318_prep_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
1035static struct dma_async_tx_descriptor * 1021static struct dma_async_tx_descriptor *
1036coh901318_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, 1022coh901318_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
1037 unsigned int sg_len, enum dma_transfer_direction direction, 1023 unsigned int sg_len, enum dma_transfer_direction direction,
1038 unsigned long flags) 1024 unsigned long flags, void *context)
1039{ 1025{
1040 struct coh901318_chan *cohc = to_coh901318_chan(chan); 1026 struct coh901318_chan *cohc = to_coh901318_chan(chan);
1041 struct coh901318_lli *lli; 1027 struct coh901318_lli *lli;
@@ -1165,17 +1151,12 @@ coh901318_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
1165 struct dma_tx_state *txstate) 1151 struct dma_tx_state *txstate)
1166{ 1152{
1167 struct coh901318_chan *cohc = to_coh901318_chan(chan); 1153 struct coh901318_chan *cohc = to_coh901318_chan(chan);
1168 dma_cookie_t last_used; 1154 enum dma_status ret;
1169 dma_cookie_t last_complete;
1170 int ret;
1171
1172 last_complete = cohc->completed;
1173 last_used = chan->cookie;
1174 1155
1175 ret = dma_async_is_complete(cookie, last_complete, last_used); 1156 ret = dma_cookie_status(chan, cookie, txstate);
1157 /* FIXME: should be conditional on ret != DMA_SUCCESS? */
1158 dma_set_residue(txstate, coh901318_get_bytes_left(chan));
1176 1159
1177 dma_set_tx_state(txstate, last_complete, last_used,
1178 coh901318_get_bytes_left(chan));
1179 if (ret == DMA_IN_PROGRESS && cohc->stopped) 1160 if (ret == DMA_IN_PROGRESS && cohc->stopped)
1180 ret = DMA_PAUSED; 1161 ret = DMA_PAUSED;
1181 1162
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index a6c6051ec858..767bcc31b365 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -510,8 +510,8 @@ struct dma_chan *__dma_request_channel(dma_cap_mask_t *mask, dma_filter_fn fn, v
510 dma_chan_name(chan)); 510 dma_chan_name(chan));
511 list_del_rcu(&device->global_node); 511 list_del_rcu(&device->global_node);
512 } else if (err) 512 } else if (err)
513 pr_debug("dmaengine: failed to get %s: (%d)\n", 513 pr_debug("%s: failed to get %s: (%d)\n",
514 dma_chan_name(chan), err); 514 __func__, dma_chan_name(chan), err);
515 else 515 else
516 break; 516 break;
517 if (--device->privatecnt == 0) 517 if (--device->privatecnt == 0)
@@ -564,8 +564,8 @@ void dmaengine_get(void)
564 list_del_rcu(&device->global_node); 564 list_del_rcu(&device->global_node);
565 break; 565 break;
566 } else if (err) 566 } else if (err)
567 pr_err("dmaengine: failed to get %s: (%d)\n", 567 pr_err("%s: failed to get %s: (%d)\n",
568 dma_chan_name(chan), err); 568 __func__, dma_chan_name(chan), err);
569 } 569 }
570 } 570 }
571 571
diff --git a/drivers/dma/dmaengine.h b/drivers/dma/dmaengine.h
new file mode 100644
index 000000000000..17f983a4e9ba
--- /dev/null
+++ b/drivers/dma/dmaengine.h
@@ -0,0 +1,89 @@
1/*
2 * The contents of this file are private to DMA engine drivers, and is not
3 * part of the API to be used by DMA engine users.
4 */
5#ifndef DMAENGINE_H
6#define DMAENGINE_H
7
8#include <linux/bug.h>
9#include <linux/dmaengine.h>
10
11/**
12 * dma_cookie_init - initialize the cookies for a DMA channel
13 * @chan: dma channel to initialize
14 */
15static inline void dma_cookie_init(struct dma_chan *chan)
16{
17 chan->cookie = DMA_MIN_COOKIE;
18 chan->completed_cookie = DMA_MIN_COOKIE;
19}
20
21/**
22 * dma_cookie_assign - assign a DMA engine cookie to the descriptor
23 * @tx: descriptor needing cookie
24 *
25 * Assign a unique non-zero per-channel cookie to the descriptor.
26 * Note: caller is expected to hold a lock to prevent concurrency.
27 */
28static inline dma_cookie_t dma_cookie_assign(struct dma_async_tx_descriptor *tx)
29{
30 struct dma_chan *chan = tx->chan;
31 dma_cookie_t cookie;
32
33 cookie = chan->cookie + 1;
34 if (cookie < DMA_MIN_COOKIE)
35 cookie = DMA_MIN_COOKIE;
36 tx->cookie = chan->cookie = cookie;
37
38 return cookie;
39}
40
41/**
42 * dma_cookie_complete - complete a descriptor
43 * @tx: descriptor to complete
44 *
45 * Mark this descriptor complete by updating the channels completed
46 * cookie marker. Zero the descriptors cookie to prevent accidental
47 * repeated completions.
48 *
49 * Note: caller is expected to hold a lock to prevent concurrency.
50 */
51static inline void dma_cookie_complete(struct dma_async_tx_descriptor *tx)
52{
53 BUG_ON(tx->cookie < DMA_MIN_COOKIE);
54 tx->chan->completed_cookie = tx->cookie;
55 tx->cookie = 0;
56}
57
58/**
59 * dma_cookie_status - report cookie status
60 * @chan: dma channel
61 * @cookie: cookie we are interested in
62 * @state: dma_tx_state structure to return last/used cookies
63 *
64 * Report the status of the cookie, filling in the state structure if
65 * non-NULL. No locking is required.
66 */
67static inline enum dma_status dma_cookie_status(struct dma_chan *chan,
68 dma_cookie_t cookie, struct dma_tx_state *state)
69{
70 dma_cookie_t used, complete;
71
72 used = chan->cookie;
73 complete = chan->completed_cookie;
74 barrier();
75 if (state) {
76 state->last = complete;
77 state->used = used;
78 state->residue = 0;
79 }
80 return dma_async_is_complete(cookie, complete, used);
81}
82
83static inline void dma_set_residue(struct dma_tx_state *state, u32 residue)
84{
85 if (state)
86 state->residue = residue;
87}
88
89#endif
diff --git a/drivers/dma/dw_dmac.c b/drivers/dma/dw_dmac.c
index 9b592b02b5f4..7439079f5eed 100644
--- a/drivers/dma/dw_dmac.c
+++ b/drivers/dma/dw_dmac.c
@@ -9,6 +9,7 @@
9 * it under the terms of the GNU General Public License version 2 as 9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation. 10 * published by the Free Software Foundation.
11 */ 11 */
12#include <linux/bitops.h>
12#include <linux/clk.h> 13#include <linux/clk.h>
13#include <linux/delay.h> 14#include <linux/delay.h>
14#include <linux/dmaengine.h> 15#include <linux/dmaengine.h>
@@ -22,6 +23,7 @@
22#include <linux/slab.h> 23#include <linux/slab.h>
23 24
24#include "dw_dmac_regs.h" 25#include "dw_dmac_regs.h"
26#include "dmaengine.h"
25 27
26/* 28/*
27 * This supports the Synopsys "DesignWare AHB Central DMA Controller", 29 * This supports the Synopsys "DesignWare AHB Central DMA Controller",
@@ -33,19 +35,23 @@
33 * which does not support descriptor writeback. 35 * which does not support descriptor writeback.
34 */ 36 */
35 37
36#define DWC_DEFAULT_CTLLO(private) ({ \ 38#define DWC_DEFAULT_CTLLO(_chan) ({ \
37 struct dw_dma_slave *__slave = (private); \ 39 struct dw_dma_slave *__slave = (_chan->private); \
38 int dms = __slave ? __slave->dst_master : 0; \ 40 struct dw_dma_chan *_dwc = to_dw_dma_chan(_chan); \
39 int sms = __slave ? __slave->src_master : 1; \ 41 struct dma_slave_config *_sconfig = &_dwc->dma_sconfig; \
40 u8 smsize = __slave ? __slave->src_msize : DW_DMA_MSIZE_16; \ 42 int _dms = __slave ? __slave->dst_master : 0; \
41 u8 dmsize = __slave ? __slave->dst_msize : DW_DMA_MSIZE_16; \ 43 int _sms = __slave ? __slave->src_master : 1; \
44 u8 _smsize = __slave ? _sconfig->src_maxburst : \
45 DW_DMA_MSIZE_16; \
46 u8 _dmsize = __slave ? _sconfig->dst_maxburst : \
47 DW_DMA_MSIZE_16; \
42 \ 48 \
43 (DWC_CTLL_DST_MSIZE(dmsize) \ 49 (DWC_CTLL_DST_MSIZE(_dmsize) \
44 | DWC_CTLL_SRC_MSIZE(smsize) \ 50 | DWC_CTLL_SRC_MSIZE(_smsize) \
45 | DWC_CTLL_LLP_D_EN \ 51 | DWC_CTLL_LLP_D_EN \
46 | DWC_CTLL_LLP_S_EN \ 52 | DWC_CTLL_LLP_S_EN \
47 | DWC_CTLL_DMS(dms) \ 53 | DWC_CTLL_DMS(_dms) \
48 | DWC_CTLL_SMS(sms)); \ 54 | DWC_CTLL_SMS(_sms)); \
49 }) 55 })
50 56
51/* 57/*
@@ -151,21 +157,6 @@ static void dwc_desc_put(struct dw_dma_chan *dwc, struct dw_desc *desc)
151 } 157 }
152} 158}
153 159
154/* Called with dwc->lock held and bh disabled */
155static dma_cookie_t
156dwc_assign_cookie(struct dw_dma_chan *dwc, struct dw_desc *desc)
157{
158 dma_cookie_t cookie = dwc->chan.cookie;
159
160 if (++cookie < 0)
161 cookie = 1;
162
163 dwc->chan.cookie = cookie;
164 desc->txd.cookie = cookie;
165
166 return cookie;
167}
168
169static void dwc_initialize(struct dw_dma_chan *dwc) 160static void dwc_initialize(struct dw_dma_chan *dwc)
170{ 161{
171 struct dw_dma *dw = to_dw_dma(dwc->chan.device); 162 struct dw_dma *dw = to_dw_dma(dwc->chan.device);
@@ -192,7 +183,6 @@ static void dwc_initialize(struct dw_dma_chan *dwc)
192 183
193 /* Enable interrupts */ 184 /* Enable interrupts */
194 channel_set_bit(dw, MASK.XFER, dwc->mask); 185 channel_set_bit(dw, MASK.XFER, dwc->mask);
195 channel_set_bit(dw, MASK.BLOCK, dwc->mask);
196 channel_set_bit(dw, MASK.ERROR, dwc->mask); 186 channel_set_bit(dw, MASK.ERROR, dwc->mask);
197 187
198 dwc->initialized = true; 188 dwc->initialized = true;
@@ -245,7 +235,7 @@ dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc,
245 dev_vdbg(chan2dev(&dwc->chan), "descriptor %u complete\n", txd->cookie); 235 dev_vdbg(chan2dev(&dwc->chan), "descriptor %u complete\n", txd->cookie);
246 236
247 spin_lock_irqsave(&dwc->lock, flags); 237 spin_lock_irqsave(&dwc->lock, flags);
248 dwc->completed = txd->cookie; 238 dma_cookie_complete(txd);
249 if (callback_required) { 239 if (callback_required) {
250 callback = txd->callback; 240 callback = txd->callback;
251 param = txd->callback_param; 241 param = txd->callback_param;
@@ -329,12 +319,6 @@ static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc)
329 unsigned long flags; 319 unsigned long flags;
330 320
331 spin_lock_irqsave(&dwc->lock, flags); 321 spin_lock_irqsave(&dwc->lock, flags);
332 /*
333 * Clear block interrupt flag before scanning so that we don't
334 * miss any, and read LLP before RAW_XFER to ensure it is
335 * valid if we decide to scan the list.
336 */
337 dma_writel(dw, CLEAR.BLOCK, dwc->mask);
338 llp = channel_readl(dwc, LLP); 322 llp = channel_readl(dwc, LLP);
339 status_xfer = dma_readl(dw, RAW.XFER); 323 status_xfer = dma_readl(dw, RAW.XFER);
340 324
@@ -470,17 +454,16 @@ EXPORT_SYMBOL(dw_dma_get_dst_addr);
470 454
471/* called with dwc->lock held and all DMAC interrupts disabled */ 455/* called with dwc->lock held and all DMAC interrupts disabled */
472static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc, 456static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc,
473 u32 status_block, u32 status_err, u32 status_xfer) 457 u32 status_err, u32 status_xfer)
474{ 458{
475 unsigned long flags; 459 unsigned long flags;
476 460
477 if (status_block & dwc->mask) { 461 if (dwc->mask) {
478 void (*callback)(void *param); 462 void (*callback)(void *param);
479 void *callback_param; 463 void *callback_param;
480 464
481 dev_vdbg(chan2dev(&dwc->chan), "new cyclic period llp 0x%08x\n", 465 dev_vdbg(chan2dev(&dwc->chan), "new cyclic period llp 0x%08x\n",
482 channel_readl(dwc, LLP)); 466 channel_readl(dwc, LLP));
483 dma_writel(dw, CLEAR.BLOCK, dwc->mask);
484 467
485 callback = dwc->cdesc->period_callback; 468 callback = dwc->cdesc->period_callback;
486 callback_param = dwc->cdesc->period_callback_param; 469 callback_param = dwc->cdesc->period_callback_param;
@@ -520,7 +503,6 @@ static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc,
520 channel_writel(dwc, CTL_LO, 0); 503 channel_writel(dwc, CTL_LO, 0);
521 channel_writel(dwc, CTL_HI, 0); 504 channel_writel(dwc, CTL_HI, 0);
522 505
523 dma_writel(dw, CLEAR.BLOCK, dwc->mask);
524 dma_writel(dw, CLEAR.ERROR, dwc->mask); 506 dma_writel(dw, CLEAR.ERROR, dwc->mask);
525 dma_writel(dw, CLEAR.XFER, dwc->mask); 507 dma_writel(dw, CLEAR.XFER, dwc->mask);
526 508
@@ -537,36 +519,29 @@ static void dw_dma_tasklet(unsigned long data)
537{ 519{
538 struct dw_dma *dw = (struct dw_dma *)data; 520 struct dw_dma *dw = (struct dw_dma *)data;
539 struct dw_dma_chan *dwc; 521 struct dw_dma_chan *dwc;
540 u32 status_block;
541 u32 status_xfer; 522 u32 status_xfer;
542 u32 status_err; 523 u32 status_err;
543 int i; 524 int i;
544 525
545 status_block = dma_readl(dw, RAW.BLOCK);
546 status_xfer = dma_readl(dw, RAW.XFER); 526 status_xfer = dma_readl(dw, RAW.XFER);
547 status_err = dma_readl(dw, RAW.ERROR); 527 status_err = dma_readl(dw, RAW.ERROR);
548 528
549 dev_vdbg(dw->dma.dev, "tasklet: status_block=%x status_err=%x\n", 529 dev_vdbg(dw->dma.dev, "tasklet: status_err=%x\n", status_err);
550 status_block, status_err);
551 530
552 for (i = 0; i < dw->dma.chancnt; i++) { 531 for (i = 0; i < dw->dma.chancnt; i++) {
553 dwc = &dw->chan[i]; 532 dwc = &dw->chan[i];
554 if (test_bit(DW_DMA_IS_CYCLIC, &dwc->flags)) 533 if (test_bit(DW_DMA_IS_CYCLIC, &dwc->flags))
555 dwc_handle_cyclic(dw, dwc, status_block, status_err, 534 dwc_handle_cyclic(dw, dwc, status_err, status_xfer);
556 status_xfer);
557 else if (status_err & (1 << i)) 535 else if (status_err & (1 << i))
558 dwc_handle_error(dw, dwc); 536 dwc_handle_error(dw, dwc);
559 else if ((status_block | status_xfer) & (1 << i)) 537 else if (status_xfer & (1 << i))
560 dwc_scan_descriptors(dw, dwc); 538 dwc_scan_descriptors(dw, dwc);
561 } 539 }
562 540
563 /* 541 /*
564 * Re-enable interrupts. Block Complete interrupts are only 542 * Re-enable interrupts.
565 * enabled if the INT_EN bit in the descriptor is set. This
566 * will trigger a scan before the whole list is done.
567 */ 543 */
568 channel_set_bit(dw, MASK.XFER, dw->all_chan_mask); 544 channel_set_bit(dw, MASK.XFER, dw->all_chan_mask);
569 channel_set_bit(dw, MASK.BLOCK, dw->all_chan_mask);
570 channel_set_bit(dw, MASK.ERROR, dw->all_chan_mask); 545 channel_set_bit(dw, MASK.ERROR, dw->all_chan_mask);
571} 546}
572 547
@@ -583,7 +558,6 @@ static irqreturn_t dw_dma_interrupt(int irq, void *dev_id)
583 * softirq handler. 558 * softirq handler.
584 */ 559 */
585 channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask); 560 channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask);
586 channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask);
587 channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask); 561 channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask);
588 562
589 status = dma_readl(dw, STATUS_INT); 563 status = dma_readl(dw, STATUS_INT);
@@ -594,7 +568,6 @@ static irqreturn_t dw_dma_interrupt(int irq, void *dev_id)
594 568
595 /* Try to recover */ 569 /* Try to recover */
596 channel_clear_bit(dw, MASK.XFER, (1 << 8) - 1); 570 channel_clear_bit(dw, MASK.XFER, (1 << 8) - 1);
597 channel_clear_bit(dw, MASK.BLOCK, (1 << 8) - 1);
598 channel_clear_bit(dw, MASK.SRC_TRAN, (1 << 8) - 1); 571 channel_clear_bit(dw, MASK.SRC_TRAN, (1 << 8) - 1);
599 channel_clear_bit(dw, MASK.DST_TRAN, (1 << 8) - 1); 572 channel_clear_bit(dw, MASK.DST_TRAN, (1 << 8) - 1);
600 channel_clear_bit(dw, MASK.ERROR, (1 << 8) - 1); 573 channel_clear_bit(dw, MASK.ERROR, (1 << 8) - 1);
@@ -615,7 +588,7 @@ static dma_cookie_t dwc_tx_submit(struct dma_async_tx_descriptor *tx)
615 unsigned long flags; 588 unsigned long flags;
616 589
617 spin_lock_irqsave(&dwc->lock, flags); 590 spin_lock_irqsave(&dwc->lock, flags);
618 cookie = dwc_assign_cookie(dwc, desc); 591 cookie = dma_cookie_assign(tx);
619 592
620 /* 593 /*
621 * REVISIT: We should attempt to chain as many descriptors as 594 * REVISIT: We should attempt to chain as many descriptors as
@@ -674,7 +647,7 @@ dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
674 else 647 else
675 src_width = dst_width = 0; 648 src_width = dst_width = 0;
676 649
677 ctllo = DWC_DEFAULT_CTLLO(chan->private) 650 ctllo = DWC_DEFAULT_CTLLO(chan)
678 | DWC_CTLL_DST_WIDTH(dst_width) 651 | DWC_CTLL_DST_WIDTH(dst_width)
679 | DWC_CTLL_SRC_WIDTH(src_width) 652 | DWC_CTLL_SRC_WIDTH(src_width)
680 | DWC_CTLL_DST_INC 653 | DWC_CTLL_DST_INC
@@ -731,10 +704,11 @@ err_desc_get:
731static struct dma_async_tx_descriptor * 704static struct dma_async_tx_descriptor *
732dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, 705dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
733 unsigned int sg_len, enum dma_transfer_direction direction, 706 unsigned int sg_len, enum dma_transfer_direction direction,
734 unsigned long flags) 707 unsigned long flags, void *context)
735{ 708{
736 struct dw_dma_chan *dwc = to_dw_dma_chan(chan); 709 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
737 struct dw_dma_slave *dws = chan->private; 710 struct dw_dma_slave *dws = chan->private;
711 struct dma_slave_config *sconfig = &dwc->dma_sconfig;
738 struct dw_desc *prev; 712 struct dw_desc *prev;
739 struct dw_desc *first; 713 struct dw_desc *first;
740 u32 ctllo; 714 u32 ctllo;
@@ -750,25 +724,34 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
750 if (unlikely(!dws || !sg_len)) 724 if (unlikely(!dws || !sg_len))
751 return NULL; 725 return NULL;
752 726
753 reg_width = dws->reg_width;
754 prev = first = NULL; 727 prev = first = NULL;
755 728
756 switch (direction) { 729 switch (direction) {
757 case DMA_MEM_TO_DEV: 730 case DMA_MEM_TO_DEV:
758 ctllo = (DWC_DEFAULT_CTLLO(chan->private) 731 reg_width = __fls(sconfig->dst_addr_width);
732 reg = sconfig->dst_addr;
733 ctllo = (DWC_DEFAULT_CTLLO(chan)
759 | DWC_CTLL_DST_WIDTH(reg_width) 734 | DWC_CTLL_DST_WIDTH(reg_width)
760 | DWC_CTLL_DST_FIX 735 | DWC_CTLL_DST_FIX
761 | DWC_CTLL_SRC_INC 736 | DWC_CTLL_SRC_INC);
762 | DWC_CTLL_FC(dws->fc)); 737
763 reg = dws->tx_reg; 738 ctllo |= sconfig->device_fc ? DWC_CTLL_FC(DW_DMA_FC_P_M2P) :
739 DWC_CTLL_FC(DW_DMA_FC_D_M2P);
740
764 for_each_sg(sgl, sg, sg_len, i) { 741 for_each_sg(sgl, sg, sg_len, i) {
765 struct dw_desc *desc; 742 struct dw_desc *desc;
766 u32 len, dlen, mem; 743 u32 len, dlen, mem;
767 744
768 mem = sg_phys(sg); 745 mem = sg_phys(sg);
769 len = sg_dma_len(sg); 746 len = sg_dma_len(sg);
770 mem_width = 2; 747
771 if (unlikely(mem & 3 || len & 3)) 748 if (!((mem | len) & 7))
749 mem_width = 3;
750 else if (!((mem | len) & 3))
751 mem_width = 2;
752 else if (!((mem | len) & 1))
753 mem_width = 1;
754 else
772 mem_width = 0; 755 mem_width = 0;
773 756
774slave_sg_todev_fill_desc: 757slave_sg_todev_fill_desc:
@@ -812,21 +795,30 @@ slave_sg_todev_fill_desc:
812 } 795 }
813 break; 796 break;
814 case DMA_DEV_TO_MEM: 797 case DMA_DEV_TO_MEM:
815 ctllo = (DWC_DEFAULT_CTLLO(chan->private) 798 reg_width = __fls(sconfig->src_addr_width);
799 reg = sconfig->src_addr;
800 ctllo = (DWC_DEFAULT_CTLLO(chan)
816 | DWC_CTLL_SRC_WIDTH(reg_width) 801 | DWC_CTLL_SRC_WIDTH(reg_width)
817 | DWC_CTLL_DST_INC 802 | DWC_CTLL_DST_INC
818 | DWC_CTLL_SRC_FIX 803 | DWC_CTLL_SRC_FIX);
819 | DWC_CTLL_FC(dws->fc)); 804
805 ctllo |= sconfig->device_fc ? DWC_CTLL_FC(DW_DMA_FC_P_P2M) :
806 DWC_CTLL_FC(DW_DMA_FC_D_P2M);
820 807
821 reg = dws->rx_reg;
822 for_each_sg(sgl, sg, sg_len, i) { 808 for_each_sg(sgl, sg, sg_len, i) {
823 struct dw_desc *desc; 809 struct dw_desc *desc;
824 u32 len, dlen, mem; 810 u32 len, dlen, mem;
825 811
826 mem = sg_phys(sg); 812 mem = sg_phys(sg);
827 len = sg_dma_len(sg); 813 len = sg_dma_len(sg);
828 mem_width = 2; 814
829 if (unlikely(mem & 3 || len & 3)) 815 if (!((mem | len) & 7))
816 mem_width = 3;
817 else if (!((mem | len) & 3))
818 mem_width = 2;
819 else if (!((mem | len) & 1))
820 mem_width = 1;
821 else
830 mem_width = 0; 822 mem_width = 0;
831 823
832slave_sg_fromdev_fill_desc: 824slave_sg_fromdev_fill_desc:
@@ -890,6 +882,39 @@ err_desc_get:
890 return NULL; 882 return NULL;
891} 883}
892 884
885/*
886 * Fix sconfig's burst size according to dw_dmac. We need to convert them as:
887 * 1 -> 0, 4 -> 1, 8 -> 2, 16 -> 3.
888 *
889 * NOTE: burst size 2 is not supported by controller.
890 *
891 * This can be done by finding least significant bit set: n & (n - 1)
892 */
893static inline void convert_burst(u32 *maxburst)
894{
895 if (*maxburst > 1)
896 *maxburst = fls(*maxburst) - 2;
897 else
898 *maxburst = 0;
899}
900
901static int
902set_runtime_config(struct dma_chan *chan, struct dma_slave_config *sconfig)
903{
904 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
905
906 /* Check if it is chan is configured for slave transfers */
907 if (!chan->private)
908 return -EINVAL;
909
910 memcpy(&dwc->dma_sconfig, sconfig, sizeof(*sconfig));
911
912 convert_burst(&dwc->dma_sconfig.src_maxburst);
913 convert_burst(&dwc->dma_sconfig.dst_maxburst);
914
915 return 0;
916}
917
893static int dwc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, 918static int dwc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
894 unsigned long arg) 919 unsigned long arg)
895{ 920{
@@ -939,8 +964,11 @@ static int dwc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
939 /* Flush all pending and queued descriptors */ 964 /* Flush all pending and queued descriptors */
940 list_for_each_entry_safe(desc, _desc, &list, desc_node) 965 list_for_each_entry_safe(desc, _desc, &list, desc_node)
941 dwc_descriptor_complete(dwc, desc, false); 966 dwc_descriptor_complete(dwc, desc, false);
942 } else 967 } else if (cmd == DMA_SLAVE_CONFIG) {
968 return set_runtime_config(chan, (struct dma_slave_config *)arg);
969 } else {
943 return -ENXIO; 970 return -ENXIO;
971 }
944 972
945 return 0; 973 return 0;
946} 974}
@@ -951,28 +979,17 @@ dwc_tx_status(struct dma_chan *chan,
951 struct dma_tx_state *txstate) 979 struct dma_tx_state *txstate)
952{ 980{
953 struct dw_dma_chan *dwc = to_dw_dma_chan(chan); 981 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
954 dma_cookie_t last_used; 982 enum dma_status ret;
955 dma_cookie_t last_complete;
956 int ret;
957
958 last_complete = dwc->completed;
959 last_used = chan->cookie;
960 983
961 ret = dma_async_is_complete(cookie, last_complete, last_used); 984 ret = dma_cookie_status(chan, cookie, txstate);
962 if (ret != DMA_SUCCESS) { 985 if (ret != DMA_SUCCESS) {
963 dwc_scan_descriptors(to_dw_dma(chan->device), dwc); 986 dwc_scan_descriptors(to_dw_dma(chan->device), dwc);
964 987
965 last_complete = dwc->completed; 988 ret = dma_cookie_status(chan, cookie, txstate);
966 last_used = chan->cookie;
967
968 ret = dma_async_is_complete(cookie, last_complete, last_used);
969 } 989 }
970 990
971 if (ret != DMA_SUCCESS) 991 if (ret != DMA_SUCCESS)
972 dma_set_tx_state(txstate, last_complete, last_used, 992 dma_set_residue(txstate, dwc_first_active(dwc)->len);
973 dwc_first_active(dwc)->len);
974 else
975 dma_set_tx_state(txstate, last_complete, last_used, 0);
976 993
977 if (dwc->paused) 994 if (dwc->paused)
978 return DMA_PAUSED; 995 return DMA_PAUSED;
@@ -1004,7 +1021,7 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan)
1004 return -EIO; 1021 return -EIO;
1005 } 1022 }
1006 1023
1007 dwc->completed = chan->cookie = 1; 1024 dma_cookie_init(chan);
1008 1025
1009 /* 1026 /*
1010 * NOTE: some controllers may have additional features that we 1027 * NOTE: some controllers may have additional features that we
@@ -1068,7 +1085,6 @@ static void dwc_free_chan_resources(struct dma_chan *chan)
1068 1085
1069 /* Disable interrupts */ 1086 /* Disable interrupts */
1070 channel_clear_bit(dw, MASK.XFER, dwc->mask); 1087 channel_clear_bit(dw, MASK.XFER, dwc->mask);
1071 channel_clear_bit(dw, MASK.BLOCK, dwc->mask);
1072 channel_clear_bit(dw, MASK.ERROR, dwc->mask); 1088 channel_clear_bit(dw, MASK.ERROR, dwc->mask);
1073 1089
1074 spin_unlock_irqrestore(&dwc->lock, flags); 1090 spin_unlock_irqrestore(&dwc->lock, flags);
@@ -1120,7 +1136,6 @@ int dw_dma_cyclic_start(struct dma_chan *chan)
1120 return -EBUSY; 1136 return -EBUSY;
1121 } 1137 }
1122 1138
1123 dma_writel(dw, CLEAR.BLOCK, dwc->mask);
1124 dma_writel(dw, CLEAR.ERROR, dwc->mask); 1139 dma_writel(dw, CLEAR.ERROR, dwc->mask);
1125 dma_writel(dw, CLEAR.XFER, dwc->mask); 1140 dma_writel(dw, CLEAR.XFER, dwc->mask);
1126 1141
@@ -1175,11 +1190,11 @@ struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan,
1175 enum dma_transfer_direction direction) 1190 enum dma_transfer_direction direction)
1176{ 1191{
1177 struct dw_dma_chan *dwc = to_dw_dma_chan(chan); 1192 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
1193 struct dma_slave_config *sconfig = &dwc->dma_sconfig;
1178 struct dw_cyclic_desc *cdesc; 1194 struct dw_cyclic_desc *cdesc;
1179 struct dw_cyclic_desc *retval = NULL; 1195 struct dw_cyclic_desc *retval = NULL;
1180 struct dw_desc *desc; 1196 struct dw_desc *desc;
1181 struct dw_desc *last = NULL; 1197 struct dw_desc *last = NULL;
1182 struct dw_dma_slave *dws = chan->private;
1183 unsigned long was_cyclic; 1198 unsigned long was_cyclic;
1184 unsigned int reg_width; 1199 unsigned int reg_width;
1185 unsigned int periods; 1200 unsigned int periods;
@@ -1203,7 +1218,12 @@ struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan,
1203 } 1218 }
1204 1219
1205 retval = ERR_PTR(-EINVAL); 1220 retval = ERR_PTR(-EINVAL);
1206 reg_width = dws->reg_width; 1221
1222 if (direction == DMA_MEM_TO_DEV)
1223 reg_width = __ffs(sconfig->dst_addr_width);
1224 else
1225 reg_width = __ffs(sconfig->src_addr_width);
1226
1207 periods = buf_len / period_len; 1227 periods = buf_len / period_len;
1208 1228
1209 /* Check for too big/unaligned periods and unaligned DMA buffer. */ 1229 /* Check for too big/unaligned periods and unaligned DMA buffer. */
@@ -1236,26 +1256,34 @@ struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan,
1236 1256
1237 switch (direction) { 1257 switch (direction) {
1238 case DMA_MEM_TO_DEV: 1258 case DMA_MEM_TO_DEV:
1239 desc->lli.dar = dws->tx_reg; 1259 desc->lli.dar = sconfig->dst_addr;
1240 desc->lli.sar = buf_addr + (period_len * i); 1260 desc->lli.sar = buf_addr + (period_len * i);
1241 desc->lli.ctllo = (DWC_DEFAULT_CTLLO(chan->private) 1261 desc->lli.ctllo = (DWC_DEFAULT_CTLLO(chan)
1242 | DWC_CTLL_DST_WIDTH(reg_width) 1262 | DWC_CTLL_DST_WIDTH(reg_width)
1243 | DWC_CTLL_SRC_WIDTH(reg_width) 1263 | DWC_CTLL_SRC_WIDTH(reg_width)
1244 | DWC_CTLL_DST_FIX 1264 | DWC_CTLL_DST_FIX
1245 | DWC_CTLL_SRC_INC 1265 | DWC_CTLL_SRC_INC
1246 | DWC_CTLL_FC(dws->fc)
1247 | DWC_CTLL_INT_EN); 1266 | DWC_CTLL_INT_EN);
1267
1268 desc->lli.ctllo |= sconfig->device_fc ?
1269 DWC_CTLL_FC(DW_DMA_FC_P_M2P) :
1270 DWC_CTLL_FC(DW_DMA_FC_D_M2P);
1271
1248 break; 1272 break;
1249 case DMA_DEV_TO_MEM: 1273 case DMA_DEV_TO_MEM:
1250 desc->lli.dar = buf_addr + (period_len * i); 1274 desc->lli.dar = buf_addr + (period_len * i);
1251 desc->lli.sar = dws->rx_reg; 1275 desc->lli.sar = sconfig->src_addr;
1252 desc->lli.ctllo = (DWC_DEFAULT_CTLLO(chan->private) 1276 desc->lli.ctllo = (DWC_DEFAULT_CTLLO(chan)
1253 | DWC_CTLL_SRC_WIDTH(reg_width) 1277 | DWC_CTLL_SRC_WIDTH(reg_width)
1254 | DWC_CTLL_DST_WIDTH(reg_width) 1278 | DWC_CTLL_DST_WIDTH(reg_width)
1255 | DWC_CTLL_DST_INC 1279 | DWC_CTLL_DST_INC
1256 | DWC_CTLL_SRC_FIX 1280 | DWC_CTLL_SRC_FIX
1257 | DWC_CTLL_FC(dws->fc)
1258 | DWC_CTLL_INT_EN); 1281 | DWC_CTLL_INT_EN);
1282
1283 desc->lli.ctllo |= sconfig->device_fc ?
1284 DWC_CTLL_FC(DW_DMA_FC_P_P2M) :
1285 DWC_CTLL_FC(DW_DMA_FC_D_P2M);
1286
1259 break; 1287 break;
1260 default: 1288 default:
1261 break; 1289 break;
@@ -1322,7 +1350,6 @@ void dw_dma_cyclic_free(struct dma_chan *chan)
1322 while (dma_readl(dw, CH_EN) & dwc->mask) 1350 while (dma_readl(dw, CH_EN) & dwc->mask)
1323 cpu_relax(); 1351 cpu_relax();
1324 1352
1325 dma_writel(dw, CLEAR.BLOCK, dwc->mask);
1326 dma_writel(dw, CLEAR.ERROR, dwc->mask); 1353 dma_writel(dw, CLEAR.ERROR, dwc->mask);
1327 dma_writel(dw, CLEAR.XFER, dwc->mask); 1354 dma_writel(dw, CLEAR.XFER, dwc->mask);
1328 1355
@@ -1347,7 +1374,6 @@ static void dw_dma_off(struct dw_dma *dw)
1347 dma_writel(dw, CFG, 0); 1374 dma_writel(dw, CFG, 0);
1348 1375
1349 channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask); 1376 channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask);
1350 channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask);
1351 channel_clear_bit(dw, MASK.SRC_TRAN, dw->all_chan_mask); 1377 channel_clear_bit(dw, MASK.SRC_TRAN, dw->all_chan_mask);
1352 channel_clear_bit(dw, MASK.DST_TRAN, dw->all_chan_mask); 1378 channel_clear_bit(dw, MASK.DST_TRAN, dw->all_chan_mask);
1353 channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask); 1379 channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask);
@@ -1369,7 +1395,7 @@ static int __init dw_probe(struct platform_device *pdev)
1369 int err; 1395 int err;
1370 int i; 1396 int i;
1371 1397
1372 pdata = pdev->dev.platform_data; 1398 pdata = dev_get_platdata(&pdev->dev);
1373 if (!pdata || pdata->nr_channels > DW_DMA_MAX_NR_CHANNELS) 1399 if (!pdata || pdata->nr_channels > DW_DMA_MAX_NR_CHANNELS)
1374 return -EINVAL; 1400 return -EINVAL;
1375 1401
@@ -1423,7 +1449,7 @@ static int __init dw_probe(struct platform_device *pdev)
1423 struct dw_dma_chan *dwc = &dw->chan[i]; 1449 struct dw_dma_chan *dwc = &dw->chan[i];
1424 1450
1425 dwc->chan.device = &dw->dma; 1451 dwc->chan.device = &dw->dma;
1426 dwc->chan.cookie = dwc->completed = 1; 1452 dma_cookie_init(&dwc->chan);
1427 if (pdata->chan_allocation_order == CHAN_ALLOCATION_ASCENDING) 1453 if (pdata->chan_allocation_order == CHAN_ALLOCATION_ASCENDING)
1428 list_add_tail(&dwc->chan.device_node, 1454 list_add_tail(&dwc->chan.device_node,
1429 &dw->dma.channels); 1455 &dw->dma.channels);
@@ -1432,7 +1458,7 @@ static int __init dw_probe(struct platform_device *pdev)
1432 1458
1433 /* 7 is highest priority & 0 is lowest. */ 1459 /* 7 is highest priority & 0 is lowest. */
1434 if (pdata->chan_priority == CHAN_PRIORITY_ASCENDING) 1460 if (pdata->chan_priority == CHAN_PRIORITY_ASCENDING)
1435 dwc->priority = 7 - i; 1461 dwc->priority = pdata->nr_channels - i - 1;
1436 else 1462 else
1437 dwc->priority = i; 1463 dwc->priority = i;
1438 1464
@@ -1449,13 +1475,11 @@ static int __init dw_probe(struct platform_device *pdev)
1449 1475
1450 /* Clear/disable all interrupts on all channels. */ 1476 /* Clear/disable all interrupts on all channels. */
1451 dma_writel(dw, CLEAR.XFER, dw->all_chan_mask); 1477 dma_writel(dw, CLEAR.XFER, dw->all_chan_mask);
1452 dma_writel(dw, CLEAR.BLOCK, dw->all_chan_mask);
1453 dma_writel(dw, CLEAR.SRC_TRAN, dw->all_chan_mask); 1478 dma_writel(dw, CLEAR.SRC_TRAN, dw->all_chan_mask);
1454 dma_writel(dw, CLEAR.DST_TRAN, dw->all_chan_mask); 1479 dma_writel(dw, CLEAR.DST_TRAN, dw->all_chan_mask);
1455 dma_writel(dw, CLEAR.ERROR, dw->all_chan_mask); 1480 dma_writel(dw, CLEAR.ERROR, dw->all_chan_mask);
1456 1481
1457 channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask); 1482 channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask);
1458 channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask);
1459 channel_clear_bit(dw, MASK.SRC_TRAN, dw->all_chan_mask); 1483 channel_clear_bit(dw, MASK.SRC_TRAN, dw->all_chan_mask);
1460 channel_clear_bit(dw, MASK.DST_TRAN, dw->all_chan_mask); 1484 channel_clear_bit(dw, MASK.DST_TRAN, dw->all_chan_mask);
1461 channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask); 1485 channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask);
@@ -1562,6 +1586,10 @@ static int dw_resume_noirq(struct device *dev)
1562static const struct dev_pm_ops dw_dev_pm_ops = { 1586static const struct dev_pm_ops dw_dev_pm_ops = {
1563 .suspend_noirq = dw_suspend_noirq, 1587 .suspend_noirq = dw_suspend_noirq,
1564 .resume_noirq = dw_resume_noirq, 1588 .resume_noirq = dw_resume_noirq,
1589 .freeze_noirq = dw_suspend_noirq,
1590 .thaw_noirq = dw_resume_noirq,
1591 .restore_noirq = dw_resume_noirq,
1592 .poweroff_noirq = dw_suspend_noirq,
1565}; 1593};
1566 1594
1567static struct platform_driver dw_driver = { 1595static struct platform_driver dw_driver = {
diff --git a/drivers/dma/dw_dmac_regs.h b/drivers/dma/dw_dmac_regs.h
index 5eef6946a367..f298f69ecbf9 100644
--- a/drivers/dma/dw_dmac_regs.h
+++ b/drivers/dma/dw_dmac_regs.h
@@ -13,6 +13,18 @@
13 13
14#define DW_DMA_MAX_NR_CHANNELS 8 14#define DW_DMA_MAX_NR_CHANNELS 8
15 15
16/* flow controller */
17enum dw_dma_fc {
18 DW_DMA_FC_D_M2M,
19 DW_DMA_FC_D_M2P,
20 DW_DMA_FC_D_P2M,
21 DW_DMA_FC_D_P2P,
22 DW_DMA_FC_P_P2M,
23 DW_DMA_FC_SP_P2P,
24 DW_DMA_FC_P_M2P,
25 DW_DMA_FC_DP_P2P,
26};
27
16/* 28/*
17 * Redefine this macro to handle differences between 32- and 64-bit 29 * Redefine this macro to handle differences between 32- and 64-bit
18 * addressing, big vs. little endian, etc. 30 * addressing, big vs. little endian, etc.
@@ -146,13 +158,15 @@ struct dw_dma_chan {
146 158
147 /* these other elements are all protected by lock */ 159 /* these other elements are all protected by lock */
148 unsigned long flags; 160 unsigned long flags;
149 dma_cookie_t completed;
150 struct list_head active_list; 161 struct list_head active_list;
151 struct list_head queue; 162 struct list_head queue;
152 struct list_head free_list; 163 struct list_head free_list;
153 struct dw_cyclic_desc *cdesc; 164 struct dw_cyclic_desc *cdesc;
154 165
155 unsigned int descs_allocated; 166 unsigned int descs_allocated;
167
168 /* configuration passed via DMA_SLAVE_CONFIG */
169 struct dma_slave_config dma_sconfig;
156}; 170};
157 171
158static inline struct dw_dma_chan_regs __iomem * 172static inline struct dw_dma_chan_regs __iomem *
diff --git a/drivers/dma/ep93xx_dma.c b/drivers/dma/ep93xx_dma.c
index 59e7a965772b..e6f133b78dc2 100644
--- a/drivers/dma/ep93xx_dma.c
+++ b/drivers/dma/ep93xx_dma.c
@@ -28,6 +28,8 @@
28 28
29#include <mach/dma.h> 29#include <mach/dma.h>
30 30
31#include "dmaengine.h"
32
31/* M2P registers */ 33/* M2P registers */
32#define M2P_CONTROL 0x0000 34#define M2P_CONTROL 0x0000
33#define M2P_CONTROL_STALLINT BIT(0) 35#define M2P_CONTROL_STALLINT BIT(0)
@@ -122,7 +124,6 @@ struct ep93xx_dma_desc {
122 * @lock: lock protecting the fields following 124 * @lock: lock protecting the fields following
123 * @flags: flags for the channel 125 * @flags: flags for the channel
124 * @buffer: which buffer to use next (0/1) 126 * @buffer: which buffer to use next (0/1)
125 * @last_completed: last completed cookie value
126 * @active: flattened chain of descriptors currently being processed 127 * @active: flattened chain of descriptors currently being processed
127 * @queue: pending descriptors which are handled next 128 * @queue: pending descriptors which are handled next
128 * @free_list: list of free descriptors which can be used 129 * @free_list: list of free descriptors which can be used
@@ -157,7 +158,6 @@ struct ep93xx_dma_chan {
157#define EP93XX_DMA_IS_CYCLIC 0 158#define EP93XX_DMA_IS_CYCLIC 0
158 159
159 int buffer; 160 int buffer;
160 dma_cookie_t last_completed;
161 struct list_head active; 161 struct list_head active;
162 struct list_head queue; 162 struct list_head queue;
163 struct list_head free_list; 163 struct list_head free_list;
@@ -703,7 +703,7 @@ static void ep93xx_dma_tasklet(unsigned long data)
703 desc = ep93xx_dma_get_active(edmac); 703 desc = ep93xx_dma_get_active(edmac);
704 if (desc) { 704 if (desc) {
705 if (desc->complete) { 705 if (desc->complete) {
706 edmac->last_completed = desc->txd.cookie; 706 dma_cookie_complete(&desc->txd);
707 list_splice_init(&edmac->active, &list); 707 list_splice_init(&edmac->active, &list);
708 } 708 }
709 callback = desc->txd.callback; 709 callback = desc->txd.callback;
@@ -783,17 +783,10 @@ static dma_cookie_t ep93xx_dma_tx_submit(struct dma_async_tx_descriptor *tx)
783 unsigned long flags; 783 unsigned long flags;
784 784
785 spin_lock_irqsave(&edmac->lock, flags); 785 spin_lock_irqsave(&edmac->lock, flags);
786 786 cookie = dma_cookie_assign(tx);
787 cookie = edmac->chan.cookie;
788
789 if (++cookie < 0)
790 cookie = 1;
791 787
792 desc = container_of(tx, struct ep93xx_dma_desc, txd); 788 desc = container_of(tx, struct ep93xx_dma_desc, txd);
793 789
794 edmac->chan.cookie = cookie;
795 desc->txd.cookie = cookie;
796
797 /* 790 /*
798 * If nothing is currently prosessed, we push this descriptor 791 * If nothing is currently prosessed, we push this descriptor
799 * directly to the hardware. Otherwise we put the descriptor 792 * directly to the hardware. Otherwise we put the descriptor
@@ -861,8 +854,7 @@ static int ep93xx_dma_alloc_chan_resources(struct dma_chan *chan)
861 goto fail_clk_disable; 854 goto fail_clk_disable;
862 855
863 spin_lock_irq(&edmac->lock); 856 spin_lock_irq(&edmac->lock);
864 edmac->last_completed = 1; 857 dma_cookie_init(&edmac->chan);
865 edmac->chan.cookie = 1;
866 ret = edmac->edma->hw_setup(edmac); 858 ret = edmac->edma->hw_setup(edmac);
867 spin_unlock_irq(&edmac->lock); 859 spin_unlock_irq(&edmac->lock);
868 860
@@ -983,13 +975,14 @@ fail:
983 * @sg_len: number of entries in @sgl 975 * @sg_len: number of entries in @sgl
984 * @dir: direction of tha DMA transfer 976 * @dir: direction of tha DMA transfer
985 * @flags: flags for the descriptor 977 * @flags: flags for the descriptor
978 * @context: operation context (ignored)
986 * 979 *
987 * Returns a valid DMA descriptor or %NULL in case of failure. 980 * Returns a valid DMA descriptor or %NULL in case of failure.
988 */ 981 */
989static struct dma_async_tx_descriptor * 982static struct dma_async_tx_descriptor *
990ep93xx_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, 983ep93xx_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
991 unsigned int sg_len, enum dma_transfer_direction dir, 984 unsigned int sg_len, enum dma_transfer_direction dir,
992 unsigned long flags) 985 unsigned long flags, void *context)
993{ 986{
994 struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan); 987 struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
995 struct ep93xx_dma_desc *desc, *first; 988 struct ep93xx_dma_desc *desc, *first;
@@ -1056,6 +1049,7 @@ fail:
1056 * @buf_len: length of the buffer (in bytes) 1049 * @buf_len: length of the buffer (in bytes)
1057 * @period_len: lenght of a single period 1050 * @period_len: lenght of a single period
1058 * @dir: direction of the operation 1051 * @dir: direction of the operation
1052 * @context: operation context (ignored)
1059 * 1053 *
1060 * Prepares a descriptor for cyclic DMA operation. This means that once the 1054 * Prepares a descriptor for cyclic DMA operation. This means that once the
1061 * descriptor is submitted, we will be submitting in a @period_len sized 1055 * descriptor is submitted, we will be submitting in a @period_len sized
@@ -1068,7 +1062,7 @@ fail:
1068static struct dma_async_tx_descriptor * 1062static struct dma_async_tx_descriptor *
1069ep93xx_dma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr, 1063ep93xx_dma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr,
1070 size_t buf_len, size_t period_len, 1064 size_t buf_len, size_t period_len,
1071 enum dma_transfer_direction dir) 1065 enum dma_transfer_direction dir, void *context)
1072{ 1066{
1073 struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan); 1067 struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
1074 struct ep93xx_dma_desc *desc, *first; 1068 struct ep93xx_dma_desc *desc, *first;
@@ -1248,18 +1242,13 @@ static enum dma_status ep93xx_dma_tx_status(struct dma_chan *chan,
1248 struct dma_tx_state *state) 1242 struct dma_tx_state *state)
1249{ 1243{
1250 struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan); 1244 struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
1251 dma_cookie_t last_used, last_completed;
1252 enum dma_status ret; 1245 enum dma_status ret;
1253 unsigned long flags; 1246 unsigned long flags;
1254 1247
1255 spin_lock_irqsave(&edmac->lock, flags); 1248 spin_lock_irqsave(&edmac->lock, flags);
1256 last_used = chan->cookie; 1249 ret = dma_cookie_status(chan, cookie, state);
1257 last_completed = edmac->last_completed;
1258 spin_unlock_irqrestore(&edmac->lock, flags); 1250 spin_unlock_irqrestore(&edmac->lock, flags);
1259 1251
1260 ret = dma_async_is_complete(cookie, last_completed, last_used);
1261 dma_set_tx_state(state, last_completed, last_used, 0);
1262
1263 return ret; 1252 return ret;
1264} 1253}
1265 1254
diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c
index b98070c33ca9..8f84761f98ba 100644
--- a/drivers/dma/fsldma.c
+++ b/drivers/dma/fsldma.c
@@ -35,6 +35,7 @@
35#include <linux/dmapool.h> 35#include <linux/dmapool.h>
36#include <linux/of_platform.h> 36#include <linux/of_platform.h>
37 37
38#include "dmaengine.h"
38#include "fsldma.h" 39#include "fsldma.h"
39 40
40#define chan_dbg(chan, fmt, arg...) \ 41#define chan_dbg(chan, fmt, arg...) \
@@ -413,17 +414,10 @@ static dma_cookie_t fsl_dma_tx_submit(struct dma_async_tx_descriptor *tx)
413 * assign cookies to all of the software descriptors 414 * assign cookies to all of the software descriptors
414 * that make up this transaction 415 * that make up this transaction
415 */ 416 */
416 cookie = chan->common.cookie;
417 list_for_each_entry(child, &desc->tx_list, node) { 417 list_for_each_entry(child, &desc->tx_list, node) {
418 cookie++; 418 cookie = dma_cookie_assign(&child->async_tx);
419 if (cookie < DMA_MIN_COOKIE)
420 cookie = DMA_MIN_COOKIE;
421
422 child->async_tx.cookie = cookie;
423 } 419 }
424 420
425 chan->common.cookie = cookie;
426
427 /* put this transaction onto the tail of the pending queue */ 421 /* put this transaction onto the tail of the pending queue */
428 append_ld_queue(chan, desc); 422 append_ld_queue(chan, desc);
429 423
@@ -765,6 +759,7 @@ fail:
765 * @sg_len: number of entries in @scatterlist 759 * @sg_len: number of entries in @scatterlist
766 * @direction: DMA direction 760 * @direction: DMA direction
767 * @flags: DMAEngine flags 761 * @flags: DMAEngine flags
762 * @context: transaction context (ignored)
768 * 763 *
769 * Prepare a set of descriptors for a DMA_SLAVE transaction. Following the 764 * Prepare a set of descriptors for a DMA_SLAVE transaction. Following the
770 * DMA_SLAVE API, this gets the device-specific information from the 765 * DMA_SLAVE API, this gets the device-specific information from the
@@ -772,7 +767,8 @@ fail:
772 */ 767 */
773static struct dma_async_tx_descriptor *fsl_dma_prep_slave_sg( 768static struct dma_async_tx_descriptor *fsl_dma_prep_slave_sg(
774 struct dma_chan *dchan, struct scatterlist *sgl, unsigned int sg_len, 769 struct dma_chan *dchan, struct scatterlist *sgl, unsigned int sg_len,
775 enum dma_transfer_direction direction, unsigned long flags) 770 enum dma_transfer_direction direction, unsigned long flags,
771 void *context)
776{ 772{
777 /* 773 /*
778 * This operation is not supported on the Freescale DMA controller 774 * This operation is not supported on the Freescale DMA controller
@@ -984,19 +980,14 @@ static enum dma_status fsl_tx_status(struct dma_chan *dchan,
984 struct dma_tx_state *txstate) 980 struct dma_tx_state *txstate)
985{ 981{
986 struct fsldma_chan *chan = to_fsl_chan(dchan); 982 struct fsldma_chan *chan = to_fsl_chan(dchan);
987 dma_cookie_t last_complete; 983 enum dma_status ret;
988 dma_cookie_t last_used;
989 unsigned long flags; 984 unsigned long flags;
990 985
991 spin_lock_irqsave(&chan->desc_lock, flags); 986 spin_lock_irqsave(&chan->desc_lock, flags);
992 987 ret = dma_cookie_status(dchan, cookie, txstate);
993 last_complete = chan->completed_cookie;
994 last_used = dchan->cookie;
995
996 spin_unlock_irqrestore(&chan->desc_lock, flags); 988 spin_unlock_irqrestore(&chan->desc_lock, flags);
997 989
998 dma_set_tx_state(txstate, last_complete, last_used, 0); 990 return ret;
999 return dma_async_is_complete(cookie, last_complete, last_used);
1000} 991}
1001 992
1002/*----------------------------------------------------------------------------*/ 993/*----------------------------------------------------------------------------*/
@@ -1087,8 +1078,8 @@ static void dma_do_tasklet(unsigned long data)
1087 1078
1088 desc = to_fsl_desc(chan->ld_running.prev); 1079 desc = to_fsl_desc(chan->ld_running.prev);
1089 cookie = desc->async_tx.cookie; 1080 cookie = desc->async_tx.cookie;
1081 dma_cookie_complete(&desc->async_tx);
1090 1082
1091 chan->completed_cookie = cookie;
1092 chan_dbg(chan, "completed_cookie=%d\n", cookie); 1083 chan_dbg(chan, "completed_cookie=%d\n", cookie);
1093 } 1084 }
1094 1085
@@ -1303,6 +1294,7 @@ static int __devinit fsl_dma_chan_probe(struct fsldma_device *fdev,
1303 chan->idle = true; 1294 chan->idle = true;
1304 1295
1305 chan->common.device = &fdev->common; 1296 chan->common.device = &fdev->common;
1297 dma_cookie_init(&chan->common);
1306 1298
1307 /* find the IRQ line, if it exists in the device tree */ 1299 /* find the IRQ line, if it exists in the device tree */
1308 chan->irq = irq_of_parse_and_map(node, 0); 1300 chan->irq = irq_of_parse_and_map(node, 0);
diff --git a/drivers/dma/fsldma.h b/drivers/dma/fsldma.h
index 9cb5aa57c677..f5c38791fc74 100644
--- a/drivers/dma/fsldma.h
+++ b/drivers/dma/fsldma.h
@@ -137,7 +137,6 @@ struct fsldma_device {
137struct fsldma_chan { 137struct fsldma_chan {
138 char name[8]; /* Channel name */ 138 char name[8]; /* Channel name */
139 struct fsldma_chan_regs __iomem *regs; 139 struct fsldma_chan_regs __iomem *regs;
140 dma_cookie_t completed_cookie; /* The maximum cookie completed */
141 spinlock_t desc_lock; /* Descriptor operation lock */ 140 spinlock_t desc_lock; /* Descriptor operation lock */
142 struct list_head ld_pending; /* Link descriptors queue */ 141 struct list_head ld_pending; /* Link descriptors queue */
143 struct list_head ld_running; /* Link descriptors queue */ 142 struct list_head ld_running; /* Link descriptors queue */
diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c
index 38586ba8da91..a45b5d2a5987 100644
--- a/drivers/dma/imx-dma.c
+++ b/drivers/dma/imx-dma.c
@@ -5,6 +5,7 @@
5 * found on i.MX1/21/27 5 * found on i.MX1/21/27
6 * 6 *
7 * Copyright 2010 Sascha Hauer, Pengutronix <s.hauer@pengutronix.de> 7 * Copyright 2010 Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>
8 * Copyright 2012 Javier Martin, Vista Silicon <javier.martin@vista-silicon.com>
8 * 9 *
9 * The code contained herein is licensed under the GNU General Public 10 * The code contained herein is licensed under the GNU General Public
10 * License. You may obtain a copy of the GNU General Public License 11 * License. You may obtain a copy of the GNU General Public License
@@ -22,37 +23,159 @@
22#include <linux/dma-mapping.h> 23#include <linux/dma-mapping.h>
23#include <linux/slab.h> 24#include <linux/slab.h>
24#include <linux/platform_device.h> 25#include <linux/platform_device.h>
26#include <linux/clk.h>
25#include <linux/dmaengine.h> 27#include <linux/dmaengine.h>
26#include <linux/module.h> 28#include <linux/module.h>
27 29
28#include <asm/irq.h> 30#include <asm/irq.h>
29#include <mach/dma-v1.h> 31#include <mach/dma.h>
30#include <mach/hardware.h> 32#include <mach/hardware.h>
31 33
34#include "dmaengine.h"
35#define IMXDMA_MAX_CHAN_DESCRIPTORS 16
36#define IMX_DMA_CHANNELS 16
37
38#define IMX_DMA_2D_SLOTS 2
39#define IMX_DMA_2D_SLOT_A 0
40#define IMX_DMA_2D_SLOT_B 1
41
42#define IMX_DMA_LENGTH_LOOP ((unsigned int)-1)
43#define IMX_DMA_MEMSIZE_32 (0 << 4)
44#define IMX_DMA_MEMSIZE_8 (1 << 4)
45#define IMX_DMA_MEMSIZE_16 (2 << 4)
46#define IMX_DMA_TYPE_LINEAR (0 << 10)
47#define IMX_DMA_TYPE_2D (1 << 10)
48#define IMX_DMA_TYPE_FIFO (2 << 10)
49
50#define IMX_DMA_ERR_BURST (1 << 0)
51#define IMX_DMA_ERR_REQUEST (1 << 1)
52#define IMX_DMA_ERR_TRANSFER (1 << 2)
53#define IMX_DMA_ERR_BUFFER (1 << 3)
54#define IMX_DMA_ERR_TIMEOUT (1 << 4)
55
56#define DMA_DCR 0x00 /* Control Register */
57#define DMA_DISR 0x04 /* Interrupt status Register */
58#define DMA_DIMR 0x08 /* Interrupt mask Register */
59#define DMA_DBTOSR 0x0c /* Burst timeout status Register */
60#define DMA_DRTOSR 0x10 /* Request timeout Register */
61#define DMA_DSESR 0x14 /* Transfer Error Status Register */
62#define DMA_DBOSR 0x18 /* Buffer overflow status Register */
63#define DMA_DBTOCR 0x1c /* Burst timeout control Register */
64#define DMA_WSRA 0x40 /* W-Size Register A */
65#define DMA_XSRA 0x44 /* X-Size Register A */
66#define DMA_YSRA 0x48 /* Y-Size Register A */
67#define DMA_WSRB 0x4c /* W-Size Register B */
68#define DMA_XSRB 0x50 /* X-Size Register B */
69#define DMA_YSRB 0x54 /* Y-Size Register B */
70#define DMA_SAR(x) (0x80 + ((x) << 6)) /* Source Address Registers */
71#define DMA_DAR(x) (0x84 + ((x) << 6)) /* Destination Address Registers */
72#define DMA_CNTR(x) (0x88 + ((x) << 6)) /* Count Registers */
73#define DMA_CCR(x) (0x8c + ((x) << 6)) /* Control Registers */
74#define DMA_RSSR(x) (0x90 + ((x) << 6)) /* Request source select Registers */
75#define DMA_BLR(x) (0x94 + ((x) << 6)) /* Burst length Registers */
76#define DMA_RTOR(x) (0x98 + ((x) << 6)) /* Request timeout Registers */
77#define DMA_BUCR(x) (0x98 + ((x) << 6)) /* Bus Utilization Registers */
78#define DMA_CCNR(x) (0x9C + ((x) << 6)) /* Channel counter Registers */
79
80#define DCR_DRST (1<<1)
81#define DCR_DEN (1<<0)
82#define DBTOCR_EN (1<<15)
83#define DBTOCR_CNT(x) ((x) & 0x7fff)
84#define CNTR_CNT(x) ((x) & 0xffffff)
85#define CCR_ACRPT (1<<14)
86#define CCR_DMOD_LINEAR (0x0 << 12)
87#define CCR_DMOD_2D (0x1 << 12)
88#define CCR_DMOD_FIFO (0x2 << 12)
89#define CCR_DMOD_EOBFIFO (0x3 << 12)
90#define CCR_SMOD_LINEAR (0x0 << 10)
91#define CCR_SMOD_2D (0x1 << 10)
92#define CCR_SMOD_FIFO (0x2 << 10)
93#define CCR_SMOD_EOBFIFO (0x3 << 10)
94#define CCR_MDIR_DEC (1<<9)
95#define CCR_MSEL_B (1<<8)
96#define CCR_DSIZ_32 (0x0 << 6)
97#define CCR_DSIZ_8 (0x1 << 6)
98#define CCR_DSIZ_16 (0x2 << 6)
99#define CCR_SSIZ_32 (0x0 << 4)
100#define CCR_SSIZ_8 (0x1 << 4)
101#define CCR_SSIZ_16 (0x2 << 4)
102#define CCR_REN (1<<3)
103#define CCR_RPT (1<<2)
104#define CCR_FRC (1<<1)
105#define CCR_CEN (1<<0)
106#define RTOR_EN (1<<15)
107#define RTOR_CLK (1<<14)
108#define RTOR_PSC (1<<13)
109
110enum imxdma_prep_type {
111 IMXDMA_DESC_MEMCPY,
112 IMXDMA_DESC_INTERLEAVED,
113 IMXDMA_DESC_SLAVE_SG,
114 IMXDMA_DESC_CYCLIC,
115};
116
117struct imx_dma_2d_config {
118 u16 xsr;
119 u16 ysr;
120 u16 wsr;
121 int count;
122};
123
124struct imxdma_desc {
125 struct list_head node;
126 struct dma_async_tx_descriptor desc;
127 enum dma_status status;
128 dma_addr_t src;
129 dma_addr_t dest;
130 size_t len;
131 enum dma_transfer_direction direction;
132 enum imxdma_prep_type type;
133 /* For memcpy and interleaved */
134 unsigned int config_port;
135 unsigned int config_mem;
136 /* For interleaved transfers */
137 unsigned int x;
138 unsigned int y;
139 unsigned int w;
140 /* For slave sg and cyclic */
141 struct scatterlist *sg;
142 unsigned int sgcount;
143};
144
32struct imxdma_channel { 145struct imxdma_channel {
146 int hw_chaining;
147 struct timer_list watchdog;
33 struct imxdma_engine *imxdma; 148 struct imxdma_engine *imxdma;
34 unsigned int channel; 149 unsigned int channel;
35 unsigned int imxdma_channel;
36 150
151 struct tasklet_struct dma_tasklet;
152 struct list_head ld_free;
153 struct list_head ld_queue;
154 struct list_head ld_active;
155 int descs_allocated;
37 enum dma_slave_buswidth word_size; 156 enum dma_slave_buswidth word_size;
38 dma_addr_t per_address; 157 dma_addr_t per_address;
39 u32 watermark_level; 158 u32 watermark_level;
40 struct dma_chan chan; 159 struct dma_chan chan;
41 spinlock_t lock;
42 struct dma_async_tx_descriptor desc; 160 struct dma_async_tx_descriptor desc;
43 dma_cookie_t last_completed;
44 enum dma_status status; 161 enum dma_status status;
45 int dma_request; 162 int dma_request;
46 struct scatterlist *sg_list; 163 struct scatterlist *sg_list;
164 u32 ccr_from_device;
165 u32 ccr_to_device;
166 bool enabled_2d;
167 int slot_2d;
47}; 168};
48 169
49#define MAX_DMA_CHANNELS 8
50
51struct imxdma_engine { 170struct imxdma_engine {
52 struct device *dev; 171 struct device *dev;
53 struct device_dma_parameters dma_parms; 172 struct device_dma_parameters dma_parms;
54 struct dma_device dma_device; 173 struct dma_device dma_device;
55 struct imxdma_channel channel[MAX_DMA_CHANNELS]; 174 void __iomem *base;
175 struct clk *dma_clk;
176 spinlock_t lock;
177 struct imx_dma_2d_config slots_2d[IMX_DMA_2D_SLOTS];
178 struct imxdma_channel channel[IMX_DMA_CHANNELS];
56}; 179};
57 180
58static struct imxdma_channel *to_imxdma_chan(struct dma_chan *chan) 181static struct imxdma_channel *to_imxdma_chan(struct dma_chan *chan)
@@ -60,36 +183,418 @@ static struct imxdma_channel *to_imxdma_chan(struct dma_chan *chan)
60 return container_of(chan, struct imxdma_channel, chan); 183 return container_of(chan, struct imxdma_channel, chan);
61} 184}
62 185
63static void imxdma_handle(struct imxdma_channel *imxdmac) 186static inline bool imxdma_chan_is_doing_cyclic(struct imxdma_channel *imxdmac)
187{
188 struct imxdma_desc *desc;
189
190 if (!list_empty(&imxdmac->ld_active)) {
191 desc = list_first_entry(&imxdmac->ld_active, struct imxdma_desc,
192 node);
193 if (desc->type == IMXDMA_DESC_CYCLIC)
194 return true;
195 }
196 return false;
197}
198
199
200
201static void imx_dmav1_writel(struct imxdma_engine *imxdma, unsigned val,
202 unsigned offset)
203{
204 __raw_writel(val, imxdma->base + offset);
205}
206
207static unsigned imx_dmav1_readl(struct imxdma_engine *imxdma, unsigned offset)
208{
209 return __raw_readl(imxdma->base + offset);
210}
211
212static int imxdma_hw_chain(struct imxdma_channel *imxdmac)
213{
214 if (cpu_is_mx27())
215 return imxdmac->hw_chaining;
216 else
217 return 0;
218}
219
220/*
221 * imxdma_sg_next - prepare next chunk for scatter-gather DMA emulation
222 */
223static inline int imxdma_sg_next(struct imxdma_desc *d)
64{ 224{
65 if (imxdmac->desc.callback) 225 struct imxdma_channel *imxdmac = to_imxdma_chan(d->desc.chan);
66 imxdmac->desc.callback(imxdmac->desc.callback_param); 226 struct imxdma_engine *imxdma = imxdmac->imxdma;
67 imxdmac->last_completed = imxdmac->desc.cookie; 227 struct scatterlist *sg = d->sg;
228 unsigned long now;
229
230 now = min(d->len, sg->length);
231 if (d->len != IMX_DMA_LENGTH_LOOP)
232 d->len -= now;
233
234 if (d->direction == DMA_DEV_TO_MEM)
235 imx_dmav1_writel(imxdma, sg->dma_address,
236 DMA_DAR(imxdmac->channel));
237 else
238 imx_dmav1_writel(imxdma, sg->dma_address,
239 DMA_SAR(imxdmac->channel));
240
241 imx_dmav1_writel(imxdma, now, DMA_CNTR(imxdmac->channel));
242
243 dev_dbg(imxdma->dev, " %s channel: %d dst 0x%08x, src 0x%08x, "
244 "size 0x%08x\n", __func__, imxdmac->channel,
245 imx_dmav1_readl(imxdma, DMA_DAR(imxdmac->channel)),
246 imx_dmav1_readl(imxdma, DMA_SAR(imxdmac->channel)),
247 imx_dmav1_readl(imxdma, DMA_CNTR(imxdmac->channel)));
248
249 return now;
68} 250}
69 251
70static void imxdma_irq_handler(int channel, void *data) 252static void imxdma_enable_hw(struct imxdma_desc *d)
71{ 253{
72 struct imxdma_channel *imxdmac = data; 254 struct imxdma_channel *imxdmac = to_imxdma_chan(d->desc.chan);
255 struct imxdma_engine *imxdma = imxdmac->imxdma;
256 int channel = imxdmac->channel;
257 unsigned long flags;
258
259 dev_dbg(imxdma->dev, "%s channel %d\n", __func__, channel);
260
261 local_irq_save(flags);
262
263 imx_dmav1_writel(imxdma, 1 << channel, DMA_DISR);
264 imx_dmav1_writel(imxdma, imx_dmav1_readl(imxdma, DMA_DIMR) &
265 ~(1 << channel), DMA_DIMR);
266 imx_dmav1_writel(imxdma, imx_dmav1_readl(imxdma, DMA_CCR(channel)) |
267 CCR_CEN | CCR_ACRPT, DMA_CCR(channel));
268
269 if ((cpu_is_mx21() || cpu_is_mx27()) &&
270 d->sg && imxdma_hw_chain(imxdmac)) {
271 d->sg = sg_next(d->sg);
272 if (d->sg) {
273 u32 tmp;
274 imxdma_sg_next(d);
275 tmp = imx_dmav1_readl(imxdma, DMA_CCR(channel));
276 imx_dmav1_writel(imxdma, tmp | CCR_RPT | CCR_ACRPT,
277 DMA_CCR(channel));
278 }
279 }
280
281 local_irq_restore(flags);
282}
283
284static void imxdma_disable_hw(struct imxdma_channel *imxdmac)
285{
286 struct imxdma_engine *imxdma = imxdmac->imxdma;
287 int channel = imxdmac->channel;
288 unsigned long flags;
289
290 dev_dbg(imxdma->dev, "%s channel %d\n", __func__, channel);
291
292 if (imxdma_hw_chain(imxdmac))
293 del_timer(&imxdmac->watchdog);
294
295 local_irq_save(flags);
296 imx_dmav1_writel(imxdma, imx_dmav1_readl(imxdma, DMA_DIMR) |
297 (1 << channel), DMA_DIMR);
298 imx_dmav1_writel(imxdma, imx_dmav1_readl(imxdma, DMA_CCR(channel)) &
299 ~CCR_CEN, DMA_CCR(channel));
300 imx_dmav1_writel(imxdma, 1 << channel, DMA_DISR);
301 local_irq_restore(flags);
302}
303
304static void imxdma_watchdog(unsigned long data)
305{
306 struct imxdma_channel *imxdmac = (struct imxdma_channel *)data;
307 struct imxdma_engine *imxdma = imxdmac->imxdma;
308 int channel = imxdmac->channel;
309
310 imx_dmav1_writel(imxdma, 0, DMA_CCR(channel));
73 311
74 imxdmac->status = DMA_SUCCESS; 312 /* Tasklet watchdog error handler */
75 imxdma_handle(imxdmac); 313 tasklet_schedule(&imxdmac->dma_tasklet);
314 dev_dbg(imxdma->dev, "channel %d: watchdog timeout!\n",
315 imxdmac->channel);
76} 316}
77 317
78static void imxdma_err_handler(int channel, void *data, int error) 318static irqreturn_t imxdma_err_handler(int irq, void *dev_id)
79{ 319{
80 struct imxdma_channel *imxdmac = data; 320 struct imxdma_engine *imxdma = dev_id;
321 unsigned int err_mask;
322 int i, disr;
323 int errcode;
324
325 disr = imx_dmav1_readl(imxdma, DMA_DISR);
326
327 err_mask = imx_dmav1_readl(imxdma, DMA_DBTOSR) |
328 imx_dmav1_readl(imxdma, DMA_DRTOSR) |
329 imx_dmav1_readl(imxdma, DMA_DSESR) |
330 imx_dmav1_readl(imxdma, DMA_DBOSR);
331
332 if (!err_mask)
333 return IRQ_HANDLED;
334
335 imx_dmav1_writel(imxdma, disr & err_mask, DMA_DISR);
336
337 for (i = 0; i < IMX_DMA_CHANNELS; i++) {
338 if (!(err_mask & (1 << i)))
339 continue;
340 errcode = 0;
341
342 if (imx_dmav1_readl(imxdma, DMA_DBTOSR) & (1 << i)) {
343 imx_dmav1_writel(imxdma, 1 << i, DMA_DBTOSR);
344 errcode |= IMX_DMA_ERR_BURST;
345 }
346 if (imx_dmav1_readl(imxdma, DMA_DRTOSR) & (1 << i)) {
347 imx_dmav1_writel(imxdma, 1 << i, DMA_DRTOSR);
348 errcode |= IMX_DMA_ERR_REQUEST;
349 }
350 if (imx_dmav1_readl(imxdma, DMA_DSESR) & (1 << i)) {
351 imx_dmav1_writel(imxdma, 1 << i, DMA_DSESR);
352 errcode |= IMX_DMA_ERR_TRANSFER;
353 }
354 if (imx_dmav1_readl(imxdma, DMA_DBOSR) & (1 << i)) {
355 imx_dmav1_writel(imxdma, 1 << i, DMA_DBOSR);
356 errcode |= IMX_DMA_ERR_BUFFER;
357 }
358 /* Tasklet error handler */
359 tasklet_schedule(&imxdma->channel[i].dma_tasklet);
360
361 printk(KERN_WARNING
362 "DMA timeout on channel %d -%s%s%s%s\n", i,
363 errcode & IMX_DMA_ERR_BURST ? " burst" : "",
364 errcode & IMX_DMA_ERR_REQUEST ? " request" : "",
365 errcode & IMX_DMA_ERR_TRANSFER ? " transfer" : "",
366 errcode & IMX_DMA_ERR_BUFFER ? " buffer" : "");
367 }
368 return IRQ_HANDLED;
369}
370
371static void dma_irq_handle_channel(struct imxdma_channel *imxdmac)
372{
373 struct imxdma_engine *imxdma = imxdmac->imxdma;
374 int chno = imxdmac->channel;
375 struct imxdma_desc *desc;
376
377 spin_lock(&imxdma->lock);
378 if (list_empty(&imxdmac->ld_active)) {
379 spin_unlock(&imxdma->lock);
380 goto out;
381 }
382
383 desc = list_first_entry(&imxdmac->ld_active,
384 struct imxdma_desc,
385 node);
386 spin_unlock(&imxdma->lock);
387
388 if (desc->sg) {
389 u32 tmp;
390 desc->sg = sg_next(desc->sg);
391
392 if (desc->sg) {
393 imxdma_sg_next(desc);
394
395 tmp = imx_dmav1_readl(imxdma, DMA_CCR(chno));
396
397 if (imxdma_hw_chain(imxdmac)) {
398 /* FIXME: The timeout should probably be
399 * configurable
400 */
401 mod_timer(&imxdmac->watchdog,
402 jiffies + msecs_to_jiffies(500));
403
404 tmp |= CCR_CEN | CCR_RPT | CCR_ACRPT;
405 imx_dmav1_writel(imxdma, tmp, DMA_CCR(chno));
406 } else {
407 imx_dmav1_writel(imxdma, tmp & ~CCR_CEN,
408 DMA_CCR(chno));
409 tmp |= CCR_CEN;
410 }
411
412 imx_dmav1_writel(imxdma, tmp, DMA_CCR(chno));
413
414 if (imxdma_chan_is_doing_cyclic(imxdmac))
415 /* Tasklet progression */
416 tasklet_schedule(&imxdmac->dma_tasklet);
417
418 return;
419 }
420
421 if (imxdma_hw_chain(imxdmac)) {
422 del_timer(&imxdmac->watchdog);
423 return;
424 }
425 }
426
427out:
428 imx_dmav1_writel(imxdma, 0, DMA_CCR(chno));
429 /* Tasklet irq */
430 tasklet_schedule(&imxdmac->dma_tasklet);
431}
432
433static irqreturn_t dma_irq_handler(int irq, void *dev_id)
434{
435 struct imxdma_engine *imxdma = dev_id;
436 int i, disr;
437
438 if (cpu_is_mx21() || cpu_is_mx27())
439 imxdma_err_handler(irq, dev_id);
440
441 disr = imx_dmav1_readl(imxdma, DMA_DISR);
442
443 dev_dbg(imxdma->dev, "%s called, disr=0x%08x\n", __func__, disr);
444
445 imx_dmav1_writel(imxdma, disr, DMA_DISR);
446 for (i = 0; i < IMX_DMA_CHANNELS; i++) {
447 if (disr & (1 << i))
448 dma_irq_handle_channel(&imxdma->channel[i]);
449 }
450
451 return IRQ_HANDLED;
452}
453
454static int imxdma_xfer_desc(struct imxdma_desc *d)
455{
456 struct imxdma_channel *imxdmac = to_imxdma_chan(d->desc.chan);
457 struct imxdma_engine *imxdma = imxdmac->imxdma;
458 unsigned long flags;
459 int slot = -1;
460 int i;
461
462 /* Configure and enable */
463 switch (d->type) {
464 case IMXDMA_DESC_INTERLEAVED:
465 /* Try to get a free 2D slot */
466 spin_lock_irqsave(&imxdma->lock, flags);
467 for (i = 0; i < IMX_DMA_2D_SLOTS; i++) {
468 if ((imxdma->slots_2d[i].count > 0) &&
469 ((imxdma->slots_2d[i].xsr != d->x) ||
470 (imxdma->slots_2d[i].ysr != d->y) ||
471 (imxdma->slots_2d[i].wsr != d->w)))
472 continue;
473 slot = i;
474 break;
475 }
476 if (slot < 0)
477 return -EBUSY;
478
479 imxdma->slots_2d[slot].xsr = d->x;
480 imxdma->slots_2d[slot].ysr = d->y;
481 imxdma->slots_2d[slot].wsr = d->w;
482 imxdma->slots_2d[slot].count++;
483
484 imxdmac->slot_2d = slot;
485 imxdmac->enabled_2d = true;
486 spin_unlock_irqrestore(&imxdma->lock, flags);
487
488 if (slot == IMX_DMA_2D_SLOT_A) {
489 d->config_mem &= ~CCR_MSEL_B;
490 d->config_port &= ~CCR_MSEL_B;
491 imx_dmav1_writel(imxdma, d->x, DMA_XSRA);
492 imx_dmav1_writel(imxdma, d->y, DMA_YSRA);
493 imx_dmav1_writel(imxdma, d->w, DMA_WSRA);
494 } else {
495 d->config_mem |= CCR_MSEL_B;
496 d->config_port |= CCR_MSEL_B;
497 imx_dmav1_writel(imxdma, d->x, DMA_XSRB);
498 imx_dmav1_writel(imxdma, d->y, DMA_YSRB);
499 imx_dmav1_writel(imxdma, d->w, DMA_WSRB);
500 }
501 /*
502 * We fall-through here intentionally, since a 2D transfer is
503 * similar to MEMCPY just adding the 2D slot configuration.
504 */
505 case IMXDMA_DESC_MEMCPY:
506 imx_dmav1_writel(imxdma, d->src, DMA_SAR(imxdmac->channel));
507 imx_dmav1_writel(imxdma, d->dest, DMA_DAR(imxdmac->channel));
508 imx_dmav1_writel(imxdma, d->config_mem | (d->config_port << 2),
509 DMA_CCR(imxdmac->channel));
510
511 imx_dmav1_writel(imxdma, d->len, DMA_CNTR(imxdmac->channel));
512
513 dev_dbg(imxdma->dev, "%s channel: %d dest=0x%08x src=0x%08x "
514 "dma_length=%d\n", __func__, imxdmac->channel,
515 d->dest, d->src, d->len);
516
517 break;
518 /* Cyclic transfer is the same as slave_sg with special sg configuration. */
519 case IMXDMA_DESC_CYCLIC:
520 case IMXDMA_DESC_SLAVE_SG:
521 if (d->direction == DMA_DEV_TO_MEM) {
522 imx_dmav1_writel(imxdma, imxdmac->per_address,
523 DMA_SAR(imxdmac->channel));
524 imx_dmav1_writel(imxdma, imxdmac->ccr_from_device,
525 DMA_CCR(imxdmac->channel));
526
527 dev_dbg(imxdma->dev, "%s channel: %d sg=%p sgcount=%d "
528 "total length=%d dev_addr=0x%08x (dev2mem)\n",
529 __func__, imxdmac->channel, d->sg, d->sgcount,
530 d->len, imxdmac->per_address);
531 } else if (d->direction == DMA_MEM_TO_DEV) {
532 imx_dmav1_writel(imxdma, imxdmac->per_address,
533 DMA_DAR(imxdmac->channel));
534 imx_dmav1_writel(imxdma, imxdmac->ccr_to_device,
535 DMA_CCR(imxdmac->channel));
536
537 dev_dbg(imxdma->dev, "%s channel: %d sg=%p sgcount=%d "
538 "total length=%d dev_addr=0x%08x (mem2dev)\n",
539 __func__, imxdmac->channel, d->sg, d->sgcount,
540 d->len, imxdmac->per_address);
541 } else {
542 dev_err(imxdma->dev, "%s channel: %d bad dma mode\n",
543 __func__, imxdmac->channel);
544 return -EINVAL;
545 }
546
547 imxdma_sg_next(d);
81 548
82 imxdmac->status = DMA_ERROR; 549 break;
83 imxdma_handle(imxdmac); 550 default:
551 return -EINVAL;
552 }
553 imxdma_enable_hw(d);
554 return 0;
84} 555}
85 556
86static void imxdma_progression(int channel, void *data, 557static void imxdma_tasklet(unsigned long data)
87 struct scatterlist *sg)
88{ 558{
89 struct imxdma_channel *imxdmac = data; 559 struct imxdma_channel *imxdmac = (void *)data;
560 struct imxdma_engine *imxdma = imxdmac->imxdma;
561 struct imxdma_desc *desc;
90 562
91 imxdmac->status = DMA_SUCCESS; 563 spin_lock(&imxdma->lock);
92 imxdma_handle(imxdmac); 564
565 if (list_empty(&imxdmac->ld_active)) {
566 /* Someone might have called terminate all */
567 goto out;
568 }
569 desc = list_first_entry(&imxdmac->ld_active, struct imxdma_desc, node);
570
571 if (desc->desc.callback)
572 desc->desc.callback(desc->desc.callback_param);
573
574 dma_cookie_complete(&desc->desc);
575
576 /* If we are dealing with a cyclic descriptor keep it on ld_active */
577 if (imxdma_chan_is_doing_cyclic(imxdmac))
578 goto out;
579
580 /* Free 2D slot if it was an interleaved transfer */
581 if (imxdmac->enabled_2d) {
582 imxdma->slots_2d[imxdmac->slot_2d].count--;
583 imxdmac->enabled_2d = false;
584 }
585
586 list_move_tail(imxdmac->ld_active.next, &imxdmac->ld_free);
587
588 if (!list_empty(&imxdmac->ld_queue)) {
589 desc = list_first_entry(&imxdmac->ld_queue, struct imxdma_desc,
590 node);
591 list_move_tail(imxdmac->ld_queue.next, &imxdmac->ld_active);
592 if (imxdma_xfer_desc(desc) < 0)
593 dev_warn(imxdma->dev, "%s: channel: %d couldn't xfer desc\n",
594 __func__, imxdmac->channel);
595 }
596out:
597 spin_unlock(&imxdma->lock);
93} 598}
94 599
95static int imxdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, 600static int imxdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
@@ -97,13 +602,18 @@ static int imxdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
97{ 602{
98 struct imxdma_channel *imxdmac = to_imxdma_chan(chan); 603 struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
99 struct dma_slave_config *dmaengine_cfg = (void *)arg; 604 struct dma_slave_config *dmaengine_cfg = (void *)arg;
100 int ret; 605 struct imxdma_engine *imxdma = imxdmac->imxdma;
606 unsigned long flags;
101 unsigned int mode = 0; 607 unsigned int mode = 0;
102 608
103 switch (cmd) { 609 switch (cmd) {
104 case DMA_TERMINATE_ALL: 610 case DMA_TERMINATE_ALL:
105 imxdmac->status = DMA_ERROR; 611 imxdma_disable_hw(imxdmac);
106 imx_dma_disable(imxdmac->imxdma_channel); 612
613 spin_lock_irqsave(&imxdma->lock, flags);
614 list_splice_tail_init(&imxdmac->ld_active, &imxdmac->ld_free);
615 list_splice_tail_init(&imxdmac->ld_queue, &imxdmac->ld_free);
616 spin_unlock_irqrestore(&imxdma->lock, flags);
107 return 0; 617 return 0;
108 case DMA_SLAVE_CONFIG: 618 case DMA_SLAVE_CONFIG:
109 if (dmaengine_cfg->direction == DMA_DEV_TO_MEM) { 619 if (dmaengine_cfg->direction == DMA_DEV_TO_MEM) {
@@ -128,16 +638,22 @@ static int imxdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
128 mode = IMX_DMA_MEMSIZE_32; 638 mode = IMX_DMA_MEMSIZE_32;
129 break; 639 break;
130 } 640 }
131 ret = imx_dma_config_channel(imxdmac->imxdma_channel,
132 mode | IMX_DMA_TYPE_FIFO,
133 IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR,
134 imxdmac->dma_request, 1);
135
136 if (ret)
137 return ret;
138 641
139 imx_dma_config_burstlen(imxdmac->imxdma_channel, 642 imxdmac->hw_chaining = 1;
140 imxdmac->watermark_level * imxdmac->word_size); 643 if (!imxdma_hw_chain(imxdmac))
644 return -EINVAL;
645 imxdmac->ccr_from_device = (mode | IMX_DMA_TYPE_FIFO) |
646 ((IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR) << 2) |
647 CCR_REN;
648 imxdmac->ccr_to_device =
649 (IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR) |
650 ((mode | IMX_DMA_TYPE_FIFO) << 2) | CCR_REN;
651 imx_dmav1_writel(imxdma, imxdmac->dma_request,
652 DMA_RSSR(imxdmac->channel));
653
654 /* Set burst length */
655 imx_dmav1_writel(imxdma, imxdmac->watermark_level *
656 imxdmac->word_size, DMA_BLR(imxdmac->channel));
141 657
142 return 0; 658 return 0;
143 default: 659 default:
@@ -151,43 +667,20 @@ static enum dma_status imxdma_tx_status(struct dma_chan *chan,
151 dma_cookie_t cookie, 667 dma_cookie_t cookie,
152 struct dma_tx_state *txstate) 668 struct dma_tx_state *txstate)
153{ 669{
154 struct imxdma_channel *imxdmac = to_imxdma_chan(chan); 670 return dma_cookie_status(chan, cookie, txstate);
155 dma_cookie_t last_used;
156 enum dma_status ret;
157
158 last_used = chan->cookie;
159
160 ret = dma_async_is_complete(cookie, imxdmac->last_completed, last_used);
161 dma_set_tx_state(txstate, imxdmac->last_completed, last_used, 0);
162
163 return ret;
164}
165
166static dma_cookie_t imxdma_assign_cookie(struct imxdma_channel *imxdma)
167{
168 dma_cookie_t cookie = imxdma->chan.cookie;
169
170 if (++cookie < 0)
171 cookie = 1;
172
173 imxdma->chan.cookie = cookie;
174 imxdma->desc.cookie = cookie;
175
176 return cookie;
177} 671}
178 672
179static dma_cookie_t imxdma_tx_submit(struct dma_async_tx_descriptor *tx) 673static dma_cookie_t imxdma_tx_submit(struct dma_async_tx_descriptor *tx)
180{ 674{
181 struct imxdma_channel *imxdmac = to_imxdma_chan(tx->chan); 675 struct imxdma_channel *imxdmac = to_imxdma_chan(tx->chan);
676 struct imxdma_engine *imxdma = imxdmac->imxdma;
182 dma_cookie_t cookie; 677 dma_cookie_t cookie;
678 unsigned long flags;
183 679
184 spin_lock_irq(&imxdmac->lock); 680 spin_lock_irqsave(&imxdma->lock, flags);
185 681 list_move_tail(imxdmac->ld_free.next, &imxdmac->ld_queue);
186 cookie = imxdma_assign_cookie(imxdmac); 682 cookie = dma_cookie_assign(tx);
187 683 spin_unlock_irqrestore(&imxdma->lock, flags);
188 imx_dma_enable(imxdmac->imxdma_channel);
189
190 spin_unlock_irq(&imxdmac->lock);
191 684
192 return cookie; 685 return cookie;
193} 686}
@@ -197,23 +690,52 @@ static int imxdma_alloc_chan_resources(struct dma_chan *chan)
197 struct imxdma_channel *imxdmac = to_imxdma_chan(chan); 690 struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
198 struct imx_dma_data *data = chan->private; 691 struct imx_dma_data *data = chan->private;
199 692
200 imxdmac->dma_request = data->dma_request; 693 if (data != NULL)
694 imxdmac->dma_request = data->dma_request;
201 695
202 dma_async_tx_descriptor_init(&imxdmac->desc, chan); 696 while (imxdmac->descs_allocated < IMXDMA_MAX_CHAN_DESCRIPTORS) {
203 imxdmac->desc.tx_submit = imxdma_tx_submit; 697 struct imxdma_desc *desc;
204 /* txd.flags will be overwritten in prep funcs */
205 imxdmac->desc.flags = DMA_CTRL_ACK;
206 698
207 imxdmac->status = DMA_SUCCESS; 699 desc = kzalloc(sizeof(*desc), GFP_KERNEL);
700 if (!desc)
701 break;
702 __memzero(&desc->desc, sizeof(struct dma_async_tx_descriptor));
703 dma_async_tx_descriptor_init(&desc->desc, chan);
704 desc->desc.tx_submit = imxdma_tx_submit;
705 /* txd.flags will be overwritten in prep funcs */
706 desc->desc.flags = DMA_CTRL_ACK;
707 desc->status = DMA_SUCCESS;
708
709 list_add_tail(&desc->node, &imxdmac->ld_free);
710 imxdmac->descs_allocated++;
711 }
208 712
209 return 0; 713 if (!imxdmac->descs_allocated)
714 return -ENOMEM;
715
716 return imxdmac->descs_allocated;
210} 717}
211 718
212static void imxdma_free_chan_resources(struct dma_chan *chan) 719static void imxdma_free_chan_resources(struct dma_chan *chan)
213{ 720{
214 struct imxdma_channel *imxdmac = to_imxdma_chan(chan); 721 struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
722 struct imxdma_engine *imxdma = imxdmac->imxdma;
723 struct imxdma_desc *desc, *_desc;
724 unsigned long flags;
725
726 spin_lock_irqsave(&imxdma->lock, flags);
727
728 imxdma_disable_hw(imxdmac);
729 list_splice_tail_init(&imxdmac->ld_active, &imxdmac->ld_free);
730 list_splice_tail_init(&imxdmac->ld_queue, &imxdmac->ld_free);
215 731
216 imx_dma_disable(imxdmac->imxdma_channel); 732 spin_unlock_irqrestore(&imxdma->lock, flags);
733
734 list_for_each_entry_safe(desc, _desc, &imxdmac->ld_free, node) {
735 kfree(desc);
736 imxdmac->descs_allocated--;
737 }
738 INIT_LIST_HEAD(&imxdmac->ld_free);
217 739
218 if (imxdmac->sg_list) { 740 if (imxdmac->sg_list) {
219 kfree(imxdmac->sg_list); 741 kfree(imxdmac->sg_list);
@@ -224,27 +746,23 @@ static void imxdma_free_chan_resources(struct dma_chan *chan)
224static struct dma_async_tx_descriptor *imxdma_prep_slave_sg( 746static struct dma_async_tx_descriptor *imxdma_prep_slave_sg(
225 struct dma_chan *chan, struct scatterlist *sgl, 747 struct dma_chan *chan, struct scatterlist *sgl,
226 unsigned int sg_len, enum dma_transfer_direction direction, 748 unsigned int sg_len, enum dma_transfer_direction direction,
227 unsigned long flags) 749 unsigned long flags, void *context)
228{ 750{
229 struct imxdma_channel *imxdmac = to_imxdma_chan(chan); 751 struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
230 struct scatterlist *sg; 752 struct scatterlist *sg;
231 int i, ret, dma_length = 0; 753 int i, dma_length = 0;
232 unsigned int dmamode; 754 struct imxdma_desc *desc;
233 755
234 if (imxdmac->status == DMA_IN_PROGRESS) 756 if (list_empty(&imxdmac->ld_free) ||
757 imxdma_chan_is_doing_cyclic(imxdmac))
235 return NULL; 758 return NULL;
236 759
237 imxdmac->status = DMA_IN_PROGRESS; 760 desc = list_first_entry(&imxdmac->ld_free, struct imxdma_desc, node);
238 761
239 for_each_sg(sgl, sg, sg_len, i) { 762 for_each_sg(sgl, sg, sg_len, i) {
240 dma_length += sg->length; 763 dma_length += sg->length;
241 } 764 }
242 765
243 if (direction == DMA_DEV_TO_MEM)
244 dmamode = DMA_MODE_READ;
245 else
246 dmamode = DMA_MODE_WRITE;
247
248 switch (imxdmac->word_size) { 766 switch (imxdmac->word_size) {
249 case DMA_SLAVE_BUSWIDTH_4_BYTES: 767 case DMA_SLAVE_BUSWIDTH_4_BYTES:
250 if (sgl->length & 3 || sgl->dma_address & 3) 768 if (sgl->length & 3 || sgl->dma_address & 3)
@@ -260,37 +778,41 @@ static struct dma_async_tx_descriptor *imxdma_prep_slave_sg(
260 return NULL; 778 return NULL;
261 } 779 }
262 780
263 ret = imx_dma_setup_sg(imxdmac->imxdma_channel, sgl, sg_len, 781 desc->type = IMXDMA_DESC_SLAVE_SG;
264 dma_length, imxdmac->per_address, dmamode); 782 desc->sg = sgl;
265 if (ret) 783 desc->sgcount = sg_len;
266 return NULL; 784 desc->len = dma_length;
785 desc->direction = direction;
786 if (direction == DMA_DEV_TO_MEM) {
787 desc->src = imxdmac->per_address;
788 } else {
789 desc->dest = imxdmac->per_address;
790 }
791 desc->desc.callback = NULL;
792 desc->desc.callback_param = NULL;
267 793
268 return &imxdmac->desc; 794 return &desc->desc;
269} 795}
270 796
271static struct dma_async_tx_descriptor *imxdma_prep_dma_cyclic( 797static struct dma_async_tx_descriptor *imxdma_prep_dma_cyclic(
272 struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len, 798 struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len,
273 size_t period_len, enum dma_transfer_direction direction) 799 size_t period_len, enum dma_transfer_direction direction,
800 void *context)
274{ 801{
275 struct imxdma_channel *imxdmac = to_imxdma_chan(chan); 802 struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
276 struct imxdma_engine *imxdma = imxdmac->imxdma; 803 struct imxdma_engine *imxdma = imxdmac->imxdma;
277 int i, ret; 804 struct imxdma_desc *desc;
805 int i;
278 unsigned int periods = buf_len / period_len; 806 unsigned int periods = buf_len / period_len;
279 unsigned int dmamode;
280 807
281 dev_dbg(imxdma->dev, "%s channel: %d buf_len=%d period_len=%d\n", 808 dev_dbg(imxdma->dev, "%s channel: %d buf_len=%d period_len=%d\n",
282 __func__, imxdmac->channel, buf_len, period_len); 809 __func__, imxdmac->channel, buf_len, period_len);
283 810
284 if (imxdmac->status == DMA_IN_PROGRESS) 811 if (list_empty(&imxdmac->ld_free) ||
812 imxdma_chan_is_doing_cyclic(imxdmac))
285 return NULL; 813 return NULL;
286 imxdmac->status = DMA_IN_PROGRESS;
287 814
288 ret = imx_dma_setup_progression_handler(imxdmac->imxdma_channel, 815 desc = list_first_entry(&imxdmac->ld_free, struct imxdma_desc, node);
289 imxdma_progression);
290 if (ret) {
291 dev_err(imxdma->dev, "Failed to setup the DMA handler\n");
292 return NULL;
293 }
294 816
295 if (imxdmac->sg_list) 817 if (imxdmac->sg_list)
296 kfree(imxdmac->sg_list); 818 kfree(imxdmac->sg_list);
@@ -316,62 +838,221 @@ static struct dma_async_tx_descriptor *imxdma_prep_dma_cyclic(
316 imxdmac->sg_list[periods].page_link = 838 imxdmac->sg_list[periods].page_link =
317 ((unsigned long)imxdmac->sg_list | 0x01) & ~0x02; 839 ((unsigned long)imxdmac->sg_list | 0x01) & ~0x02;
318 840
319 if (direction == DMA_DEV_TO_MEM) 841 desc->type = IMXDMA_DESC_CYCLIC;
320 dmamode = DMA_MODE_READ; 842 desc->sg = imxdmac->sg_list;
321 else 843 desc->sgcount = periods;
322 dmamode = DMA_MODE_WRITE; 844 desc->len = IMX_DMA_LENGTH_LOOP;
845 desc->direction = direction;
846 if (direction == DMA_DEV_TO_MEM) {
847 desc->src = imxdmac->per_address;
848 } else {
849 desc->dest = imxdmac->per_address;
850 }
851 desc->desc.callback = NULL;
852 desc->desc.callback_param = NULL;
853
854 return &desc->desc;
855}
856
857static struct dma_async_tx_descriptor *imxdma_prep_dma_memcpy(
858 struct dma_chan *chan, dma_addr_t dest,
859 dma_addr_t src, size_t len, unsigned long flags)
860{
861 struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
862 struct imxdma_engine *imxdma = imxdmac->imxdma;
863 struct imxdma_desc *desc;
323 864
324 ret = imx_dma_setup_sg(imxdmac->imxdma_channel, imxdmac->sg_list, periods, 865 dev_dbg(imxdma->dev, "%s channel: %d src=0x%x dst=0x%x len=%d\n",
325 IMX_DMA_LENGTH_LOOP, imxdmac->per_address, dmamode); 866 __func__, imxdmac->channel, src, dest, len);
326 if (ret) 867
868 if (list_empty(&imxdmac->ld_free) ||
869 imxdma_chan_is_doing_cyclic(imxdmac))
327 return NULL; 870 return NULL;
328 871
329 return &imxdmac->desc; 872 desc = list_first_entry(&imxdmac->ld_free, struct imxdma_desc, node);
873
874 desc->type = IMXDMA_DESC_MEMCPY;
875 desc->src = src;
876 desc->dest = dest;
877 desc->len = len;
878 desc->direction = DMA_MEM_TO_MEM;
879 desc->config_port = IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR;
880 desc->config_mem = IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR;
881 desc->desc.callback = NULL;
882 desc->desc.callback_param = NULL;
883
884 return &desc->desc;
885}
886
887static struct dma_async_tx_descriptor *imxdma_prep_dma_interleaved(
888 struct dma_chan *chan, struct dma_interleaved_template *xt,
889 unsigned long flags)
890{
891 struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
892 struct imxdma_engine *imxdma = imxdmac->imxdma;
893 struct imxdma_desc *desc;
894
895 dev_dbg(imxdma->dev, "%s channel: %d src_start=0x%x dst_start=0x%x\n"
896 " src_sgl=%s dst_sgl=%s numf=%d frame_size=%d\n", __func__,
897 imxdmac->channel, xt->src_start, xt->dst_start,
898 xt->src_sgl ? "true" : "false", xt->dst_sgl ? "true" : "false",
899 xt->numf, xt->frame_size);
900
901 if (list_empty(&imxdmac->ld_free) ||
902 imxdma_chan_is_doing_cyclic(imxdmac))
903 return NULL;
904
905 if (xt->frame_size != 1 || xt->numf <= 0 || xt->dir != DMA_MEM_TO_MEM)
906 return NULL;
907
908 desc = list_first_entry(&imxdmac->ld_free, struct imxdma_desc, node);
909
910 desc->type = IMXDMA_DESC_INTERLEAVED;
911 desc->src = xt->src_start;
912 desc->dest = xt->dst_start;
913 desc->x = xt->sgl[0].size;
914 desc->y = xt->numf;
915 desc->w = xt->sgl[0].icg + desc->x;
916 desc->len = desc->x * desc->y;
917 desc->direction = DMA_MEM_TO_MEM;
918 desc->config_port = IMX_DMA_MEMSIZE_32;
919 desc->config_mem = IMX_DMA_MEMSIZE_32;
920 if (xt->src_sgl)
921 desc->config_mem |= IMX_DMA_TYPE_2D;
922 if (xt->dst_sgl)
923 desc->config_port |= IMX_DMA_TYPE_2D;
924 desc->desc.callback = NULL;
925 desc->desc.callback_param = NULL;
926
927 return &desc->desc;
330} 928}
331 929
332static void imxdma_issue_pending(struct dma_chan *chan) 930static void imxdma_issue_pending(struct dma_chan *chan)
333{ 931{
334 /* 932 struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
335 * Nothing to do. We only have a single descriptor 933 struct imxdma_engine *imxdma = imxdmac->imxdma;
336 */ 934 struct imxdma_desc *desc;
935 unsigned long flags;
936
937 spin_lock_irqsave(&imxdma->lock, flags);
938 if (list_empty(&imxdmac->ld_active) &&
939 !list_empty(&imxdmac->ld_queue)) {
940 desc = list_first_entry(&imxdmac->ld_queue,
941 struct imxdma_desc, node);
942
943 if (imxdma_xfer_desc(desc) < 0) {
944 dev_warn(imxdma->dev,
945 "%s: channel: %d couldn't issue DMA xfer\n",
946 __func__, imxdmac->channel);
947 } else {
948 list_move_tail(imxdmac->ld_queue.next,
949 &imxdmac->ld_active);
950 }
951 }
952 spin_unlock_irqrestore(&imxdma->lock, flags);
337} 953}
338 954
339static int __init imxdma_probe(struct platform_device *pdev) 955static int __init imxdma_probe(struct platform_device *pdev)
340{ 956 {
341 struct imxdma_engine *imxdma; 957 struct imxdma_engine *imxdma;
342 int ret, i; 958 int ret, i;
343 959
960
344 imxdma = kzalloc(sizeof(*imxdma), GFP_KERNEL); 961 imxdma = kzalloc(sizeof(*imxdma), GFP_KERNEL);
345 if (!imxdma) 962 if (!imxdma)
346 return -ENOMEM; 963 return -ENOMEM;
347 964
965 if (cpu_is_mx1()) {
966 imxdma->base = MX1_IO_ADDRESS(MX1_DMA_BASE_ADDR);
967 } else if (cpu_is_mx21()) {
968 imxdma->base = MX21_IO_ADDRESS(MX21_DMA_BASE_ADDR);
969 } else if (cpu_is_mx27()) {
970 imxdma->base = MX27_IO_ADDRESS(MX27_DMA_BASE_ADDR);
971 } else {
972 kfree(imxdma);
973 return 0;
974 }
975
976 imxdma->dma_clk = clk_get(NULL, "dma");
977 if (IS_ERR(imxdma->dma_clk))
978 return PTR_ERR(imxdma->dma_clk);
979 clk_enable(imxdma->dma_clk);
980
981 /* reset DMA module */
982 imx_dmav1_writel(imxdma, DCR_DRST, DMA_DCR);
983
984 if (cpu_is_mx1()) {
985 ret = request_irq(MX1_DMA_INT, dma_irq_handler, 0, "DMA", imxdma);
986 if (ret) {
987 dev_warn(imxdma->dev, "Can't register IRQ for DMA\n");
988 kfree(imxdma);
989 return ret;
990 }
991
992 ret = request_irq(MX1_DMA_ERR, imxdma_err_handler, 0, "DMA", imxdma);
993 if (ret) {
994 dev_warn(imxdma->dev, "Can't register ERRIRQ for DMA\n");
995 free_irq(MX1_DMA_INT, NULL);
996 kfree(imxdma);
997 return ret;
998 }
999 }
1000
1001 /* enable DMA module */
1002 imx_dmav1_writel(imxdma, DCR_DEN, DMA_DCR);
1003
1004 /* clear all interrupts */
1005 imx_dmav1_writel(imxdma, (1 << IMX_DMA_CHANNELS) - 1, DMA_DISR);
1006
1007 /* disable interrupts */
1008 imx_dmav1_writel(imxdma, (1 << IMX_DMA_CHANNELS) - 1, DMA_DIMR);
1009
348 INIT_LIST_HEAD(&imxdma->dma_device.channels); 1010 INIT_LIST_HEAD(&imxdma->dma_device.channels);
349 1011
350 dma_cap_set(DMA_SLAVE, imxdma->dma_device.cap_mask); 1012 dma_cap_set(DMA_SLAVE, imxdma->dma_device.cap_mask);
351 dma_cap_set(DMA_CYCLIC, imxdma->dma_device.cap_mask); 1013 dma_cap_set(DMA_CYCLIC, imxdma->dma_device.cap_mask);
1014 dma_cap_set(DMA_MEMCPY, imxdma->dma_device.cap_mask);
1015 dma_cap_set(DMA_INTERLEAVE, imxdma->dma_device.cap_mask);
1016
1017 /* Initialize 2D global parameters */
1018 for (i = 0; i < IMX_DMA_2D_SLOTS; i++)
1019 imxdma->slots_2d[i].count = 0;
1020
1021 spin_lock_init(&imxdma->lock);
352 1022
353 /* Initialize channel parameters */ 1023 /* Initialize channel parameters */
354 for (i = 0; i < MAX_DMA_CHANNELS; i++) { 1024 for (i = 0; i < IMX_DMA_CHANNELS; i++) {
355 struct imxdma_channel *imxdmac = &imxdma->channel[i]; 1025 struct imxdma_channel *imxdmac = &imxdma->channel[i];
356 1026
357 imxdmac->imxdma_channel = imx_dma_request_by_prio("dmaengine", 1027 if (cpu_is_mx21() || cpu_is_mx27()) {
358 DMA_PRIO_MEDIUM); 1028 ret = request_irq(MX2x_INT_DMACH0 + i,
359 if ((int)imxdmac->channel < 0) { 1029 dma_irq_handler, 0, "DMA", imxdma);
360 ret = -ENODEV; 1030 if (ret) {
361 goto err_init; 1031 dev_warn(imxdma->dev, "Can't register IRQ %d "
1032 "for DMA channel %d\n",
1033 MX2x_INT_DMACH0 + i, i);
1034 goto err_init;
1035 }
1036 init_timer(&imxdmac->watchdog);
1037 imxdmac->watchdog.function = &imxdma_watchdog;
1038 imxdmac->watchdog.data = (unsigned long)imxdmac;
362 } 1039 }
363 1040
364 imx_dma_setup_handlers(imxdmac->imxdma_channel,
365 imxdma_irq_handler, imxdma_err_handler, imxdmac);
366
367 imxdmac->imxdma = imxdma; 1041 imxdmac->imxdma = imxdma;
368 spin_lock_init(&imxdmac->lock);
369 1042
1043 INIT_LIST_HEAD(&imxdmac->ld_queue);
1044 INIT_LIST_HEAD(&imxdmac->ld_free);
1045 INIT_LIST_HEAD(&imxdmac->ld_active);
1046
1047 tasklet_init(&imxdmac->dma_tasklet, imxdma_tasklet,
1048 (unsigned long)imxdmac);
370 imxdmac->chan.device = &imxdma->dma_device; 1049 imxdmac->chan.device = &imxdma->dma_device;
1050 dma_cookie_init(&imxdmac->chan);
371 imxdmac->channel = i; 1051 imxdmac->channel = i;
372 1052
373 /* Add the channel to the DMAC list */ 1053 /* Add the channel to the DMAC list */
374 list_add_tail(&imxdmac->chan.device_node, &imxdma->dma_device.channels); 1054 list_add_tail(&imxdmac->chan.device_node,
1055 &imxdma->dma_device.channels);
375 } 1056 }
376 1057
377 imxdma->dev = &pdev->dev; 1058 imxdma->dev = &pdev->dev;
@@ -382,11 +1063,14 @@ static int __init imxdma_probe(struct platform_device *pdev)
382 imxdma->dma_device.device_tx_status = imxdma_tx_status; 1063 imxdma->dma_device.device_tx_status = imxdma_tx_status;
383 imxdma->dma_device.device_prep_slave_sg = imxdma_prep_slave_sg; 1064 imxdma->dma_device.device_prep_slave_sg = imxdma_prep_slave_sg;
384 imxdma->dma_device.device_prep_dma_cyclic = imxdma_prep_dma_cyclic; 1065 imxdma->dma_device.device_prep_dma_cyclic = imxdma_prep_dma_cyclic;
1066 imxdma->dma_device.device_prep_dma_memcpy = imxdma_prep_dma_memcpy;
1067 imxdma->dma_device.device_prep_interleaved_dma = imxdma_prep_dma_interleaved;
385 imxdma->dma_device.device_control = imxdma_control; 1068 imxdma->dma_device.device_control = imxdma_control;
386 imxdma->dma_device.device_issue_pending = imxdma_issue_pending; 1069 imxdma->dma_device.device_issue_pending = imxdma_issue_pending;
387 1070
388 platform_set_drvdata(pdev, imxdma); 1071 platform_set_drvdata(pdev, imxdma);
389 1072
1073 imxdma->dma_device.copy_align = 2; /* 2^2 = 4 bytes alignment */
390 imxdma->dma_device.dev->dma_parms = &imxdma->dma_parms; 1074 imxdma->dma_device.dev->dma_parms = &imxdma->dma_parms;
391 dma_set_max_seg_size(imxdma->dma_device.dev, 0xffffff); 1075 dma_set_max_seg_size(imxdma->dma_device.dev, 0xffffff);
392 1076
@@ -399,9 +1083,13 @@ static int __init imxdma_probe(struct platform_device *pdev)
399 return 0; 1083 return 0;
400 1084
401err_init: 1085err_init:
402 while (--i >= 0) { 1086
403 struct imxdma_channel *imxdmac = &imxdma->channel[i]; 1087 if (cpu_is_mx21() || cpu_is_mx27()) {
404 imx_dma_free(imxdmac->imxdma_channel); 1088 while (--i >= 0)
1089 free_irq(MX2x_INT_DMACH0 + i, NULL);
1090 } else if cpu_is_mx1() {
1091 free_irq(MX1_DMA_INT, NULL);
1092 free_irq(MX1_DMA_ERR, NULL);
405 } 1093 }
406 1094
407 kfree(imxdma); 1095 kfree(imxdma);
@@ -415,10 +1103,12 @@ static int __exit imxdma_remove(struct platform_device *pdev)
415 1103
416 dma_async_device_unregister(&imxdma->dma_device); 1104 dma_async_device_unregister(&imxdma->dma_device);
417 1105
418 for (i = 0; i < MAX_DMA_CHANNELS; i++) { 1106 if (cpu_is_mx21() || cpu_is_mx27()) {
419 struct imxdma_channel *imxdmac = &imxdma->channel[i]; 1107 for (i = 0; i < IMX_DMA_CHANNELS; i++)
420 1108 free_irq(MX2x_INT_DMACH0 + i, NULL);
421 imx_dma_free(imxdmac->imxdma_channel); 1109 } else if cpu_is_mx1() {
1110 free_irq(MX1_DMA_INT, NULL);
1111 free_irq(MX1_DMA_ERR, NULL);
422 } 1112 }
423 1113
424 kfree(imxdma); 1114 kfree(imxdma);
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
index 63540d3e2153..d3e38e28bb6b 100644
--- a/drivers/dma/imx-sdma.c
+++ b/drivers/dma/imx-sdma.c
@@ -20,6 +20,7 @@
20#include <linux/init.h> 20#include <linux/init.h>
21#include <linux/module.h> 21#include <linux/module.h>
22#include <linux/types.h> 22#include <linux/types.h>
23#include <linux/bitops.h>
23#include <linux/mm.h> 24#include <linux/mm.h>
24#include <linux/interrupt.h> 25#include <linux/interrupt.h>
25#include <linux/clk.h> 26#include <linux/clk.h>
@@ -41,6 +42,8 @@
41#include <mach/dma.h> 42#include <mach/dma.h>
42#include <mach/hardware.h> 43#include <mach/hardware.h>
43 44
45#include "dmaengine.h"
46
44/* SDMA registers */ 47/* SDMA registers */
45#define SDMA_H_C0PTR 0x000 48#define SDMA_H_C0PTR 0x000
46#define SDMA_H_INTR 0x004 49#define SDMA_H_INTR 0x004
@@ -259,19 +262,18 @@ struct sdma_channel {
259 unsigned int pc_from_device, pc_to_device; 262 unsigned int pc_from_device, pc_to_device;
260 unsigned long flags; 263 unsigned long flags;
261 dma_addr_t per_address; 264 dma_addr_t per_address;
262 u32 event_mask0, event_mask1; 265 unsigned long event_mask[2];
263 u32 watermark_level; 266 unsigned long watermark_level;
264 u32 shp_addr, per_addr; 267 u32 shp_addr, per_addr;
265 struct dma_chan chan; 268 struct dma_chan chan;
266 spinlock_t lock; 269 spinlock_t lock;
267 struct dma_async_tx_descriptor desc; 270 struct dma_async_tx_descriptor desc;
268 dma_cookie_t last_completed;
269 enum dma_status status; 271 enum dma_status status;
270 unsigned int chn_count; 272 unsigned int chn_count;
271 unsigned int chn_real_count; 273 unsigned int chn_real_count;
272}; 274};
273 275
274#define IMX_DMA_SG_LOOP (1 << 0) 276#define IMX_DMA_SG_LOOP BIT(0)
275 277
276#define MAX_DMA_CHANNELS 32 278#define MAX_DMA_CHANNELS 32
277#define MXC_SDMA_DEFAULT_PRIORITY 1 279#define MXC_SDMA_DEFAULT_PRIORITY 1
@@ -345,9 +347,9 @@ static const struct of_device_id sdma_dt_ids[] = {
345}; 347};
346MODULE_DEVICE_TABLE(of, sdma_dt_ids); 348MODULE_DEVICE_TABLE(of, sdma_dt_ids);
347 349
348#define SDMA_H_CONFIG_DSPDMA (1 << 12) /* indicates if the DSPDMA is used */ 350#define SDMA_H_CONFIG_DSPDMA BIT(12) /* indicates if the DSPDMA is used */
349#define SDMA_H_CONFIG_RTD_PINS (1 << 11) /* indicates if Real-Time Debug pins are enabled */ 351#define SDMA_H_CONFIG_RTD_PINS BIT(11) /* indicates if Real-Time Debug pins are enabled */
350#define SDMA_H_CONFIG_ACR (1 << 4) /* indicates if AHB freq /core freq = 2 or 1 */ 352#define SDMA_H_CONFIG_ACR BIT(4) /* indicates if AHB freq /core freq = 2 or 1 */
351#define SDMA_H_CONFIG_CSM (3) /* indicates which context switch mode is selected*/ 353#define SDMA_H_CONFIG_CSM (3) /* indicates which context switch mode is selected*/
352 354
353static inline u32 chnenbl_ofs(struct sdma_engine *sdma, unsigned int event) 355static inline u32 chnenbl_ofs(struct sdma_engine *sdma, unsigned int event)
@@ -362,37 +364,42 @@ static int sdma_config_ownership(struct sdma_channel *sdmac,
362{ 364{
363 struct sdma_engine *sdma = sdmac->sdma; 365 struct sdma_engine *sdma = sdmac->sdma;
364 int channel = sdmac->channel; 366 int channel = sdmac->channel;
365 u32 evt, mcu, dsp; 367 unsigned long evt, mcu, dsp;
366 368
367 if (event_override && mcu_override && dsp_override) 369 if (event_override && mcu_override && dsp_override)
368 return -EINVAL; 370 return -EINVAL;
369 371
370 evt = __raw_readl(sdma->regs + SDMA_H_EVTOVR); 372 evt = readl_relaxed(sdma->regs + SDMA_H_EVTOVR);
371 mcu = __raw_readl(sdma->regs + SDMA_H_HOSTOVR); 373 mcu = readl_relaxed(sdma->regs + SDMA_H_HOSTOVR);
372 dsp = __raw_readl(sdma->regs + SDMA_H_DSPOVR); 374 dsp = readl_relaxed(sdma->regs + SDMA_H_DSPOVR);
373 375
374 if (dsp_override) 376 if (dsp_override)
375 dsp &= ~(1 << channel); 377 __clear_bit(channel, &dsp);
376 else 378 else
377 dsp |= (1 << channel); 379 __set_bit(channel, &dsp);
378 380
379 if (event_override) 381 if (event_override)
380 evt &= ~(1 << channel); 382 __clear_bit(channel, &evt);
381 else 383 else
382 evt |= (1 << channel); 384 __set_bit(channel, &evt);
383 385
384 if (mcu_override) 386 if (mcu_override)
385 mcu &= ~(1 << channel); 387 __clear_bit(channel, &mcu);
386 else 388 else
387 mcu |= (1 << channel); 389 __set_bit(channel, &mcu);
388 390
389 __raw_writel(evt, sdma->regs + SDMA_H_EVTOVR); 391 writel_relaxed(evt, sdma->regs + SDMA_H_EVTOVR);
390 __raw_writel(mcu, sdma->regs + SDMA_H_HOSTOVR); 392 writel_relaxed(mcu, sdma->regs + SDMA_H_HOSTOVR);
391 __raw_writel(dsp, sdma->regs + SDMA_H_DSPOVR); 393 writel_relaxed(dsp, sdma->regs + SDMA_H_DSPOVR);
392 394
393 return 0; 395 return 0;
394} 396}
395 397
398static void sdma_enable_channel(struct sdma_engine *sdma, int channel)
399{
400 writel(BIT(channel), sdma->regs + SDMA_H_START);
401}
402
396/* 403/*
397 * sdma_run_channel - run a channel and wait till it's done 404 * sdma_run_channel - run a channel and wait till it's done
398 */ 405 */
@@ -404,7 +411,7 @@ static int sdma_run_channel(struct sdma_channel *sdmac)
404 411
405 init_completion(&sdmac->done); 412 init_completion(&sdmac->done);
406 413
407 __raw_writel(1 << channel, sdma->regs + SDMA_H_START); 414 sdma_enable_channel(sdma, channel);
408 415
409 ret = wait_for_completion_timeout(&sdmac->done, HZ); 416 ret = wait_for_completion_timeout(&sdmac->done, HZ);
410 417
@@ -451,12 +458,12 @@ static void sdma_event_enable(struct sdma_channel *sdmac, unsigned int event)
451{ 458{
452 struct sdma_engine *sdma = sdmac->sdma; 459 struct sdma_engine *sdma = sdmac->sdma;
453 int channel = sdmac->channel; 460 int channel = sdmac->channel;
454 u32 val; 461 unsigned long val;
455 u32 chnenbl = chnenbl_ofs(sdma, event); 462 u32 chnenbl = chnenbl_ofs(sdma, event);
456 463
457 val = __raw_readl(sdma->regs + chnenbl); 464 val = readl_relaxed(sdma->regs + chnenbl);
458 val |= (1 << channel); 465 __set_bit(channel, &val);
459 __raw_writel(val, sdma->regs + chnenbl); 466 writel_relaxed(val, sdma->regs + chnenbl);
460} 467}
461 468
462static void sdma_event_disable(struct sdma_channel *sdmac, unsigned int event) 469static void sdma_event_disable(struct sdma_channel *sdmac, unsigned int event)
@@ -464,11 +471,11 @@ static void sdma_event_disable(struct sdma_channel *sdmac, unsigned int event)
464 struct sdma_engine *sdma = sdmac->sdma; 471 struct sdma_engine *sdma = sdmac->sdma;
465 int channel = sdmac->channel; 472 int channel = sdmac->channel;
466 u32 chnenbl = chnenbl_ofs(sdma, event); 473 u32 chnenbl = chnenbl_ofs(sdma, event);
467 u32 val; 474 unsigned long val;
468 475
469 val = __raw_readl(sdma->regs + chnenbl); 476 val = readl_relaxed(sdma->regs + chnenbl);
470 val &= ~(1 << channel); 477 __clear_bit(channel, &val);
471 __raw_writel(val, sdma->regs + chnenbl); 478 writel_relaxed(val, sdma->regs + chnenbl);
472} 479}
473 480
474static void sdma_handle_channel_loop(struct sdma_channel *sdmac) 481static void sdma_handle_channel_loop(struct sdma_channel *sdmac)
@@ -522,7 +529,7 @@ static void mxc_sdma_handle_channel_normal(struct sdma_channel *sdmac)
522 else 529 else
523 sdmac->status = DMA_SUCCESS; 530 sdmac->status = DMA_SUCCESS;
524 531
525 sdmac->last_completed = sdmac->desc.cookie; 532 dma_cookie_complete(&sdmac->desc);
526 if (sdmac->desc.callback) 533 if (sdmac->desc.callback)
527 sdmac->desc.callback(sdmac->desc.callback_param); 534 sdmac->desc.callback(sdmac->desc.callback_param);
528} 535}
@@ -544,10 +551,10 @@ static void mxc_sdma_handle_channel(struct sdma_channel *sdmac)
544static irqreturn_t sdma_int_handler(int irq, void *dev_id) 551static irqreturn_t sdma_int_handler(int irq, void *dev_id)
545{ 552{
546 struct sdma_engine *sdma = dev_id; 553 struct sdma_engine *sdma = dev_id;
547 u32 stat; 554 unsigned long stat;
548 555
549 stat = __raw_readl(sdma->regs + SDMA_H_INTR); 556 stat = readl_relaxed(sdma->regs + SDMA_H_INTR);
550 __raw_writel(stat, sdma->regs + SDMA_H_INTR); 557 writel_relaxed(stat, sdma->regs + SDMA_H_INTR);
551 558
552 while (stat) { 559 while (stat) {
553 int channel = fls(stat) - 1; 560 int channel = fls(stat) - 1;
@@ -555,7 +562,7 @@ static irqreturn_t sdma_int_handler(int irq, void *dev_id)
555 562
556 mxc_sdma_handle_channel(sdmac); 563 mxc_sdma_handle_channel(sdmac);
557 564
558 stat &= ~(1 << channel); 565 __clear_bit(channel, &stat);
559 } 566 }
560 567
561 return IRQ_HANDLED; 568 return IRQ_HANDLED;
@@ -663,11 +670,11 @@ static int sdma_load_context(struct sdma_channel *sdmac)
663 return load_address; 670 return load_address;
664 671
665 dev_dbg(sdma->dev, "load_address = %d\n", load_address); 672 dev_dbg(sdma->dev, "load_address = %d\n", load_address);
666 dev_dbg(sdma->dev, "wml = 0x%08x\n", sdmac->watermark_level); 673 dev_dbg(sdma->dev, "wml = 0x%08x\n", (u32)sdmac->watermark_level);
667 dev_dbg(sdma->dev, "shp_addr = 0x%08x\n", sdmac->shp_addr); 674 dev_dbg(sdma->dev, "shp_addr = 0x%08x\n", sdmac->shp_addr);
668 dev_dbg(sdma->dev, "per_addr = 0x%08x\n", sdmac->per_addr); 675 dev_dbg(sdma->dev, "per_addr = 0x%08x\n", sdmac->per_addr);
669 dev_dbg(sdma->dev, "event_mask0 = 0x%08x\n", sdmac->event_mask0); 676 dev_dbg(sdma->dev, "event_mask0 = 0x%08x\n", (u32)sdmac->event_mask[0]);
670 dev_dbg(sdma->dev, "event_mask1 = 0x%08x\n", sdmac->event_mask1); 677 dev_dbg(sdma->dev, "event_mask1 = 0x%08x\n", (u32)sdmac->event_mask[1]);
671 678
672 mutex_lock(&sdma->channel_0_lock); 679 mutex_lock(&sdma->channel_0_lock);
673 680
@@ -677,8 +684,8 @@ static int sdma_load_context(struct sdma_channel *sdmac)
677 /* Send by context the event mask,base address for peripheral 684 /* Send by context the event mask,base address for peripheral
678 * and watermark level 685 * and watermark level
679 */ 686 */
680 context->gReg[0] = sdmac->event_mask1; 687 context->gReg[0] = sdmac->event_mask[1];
681 context->gReg[1] = sdmac->event_mask0; 688 context->gReg[1] = sdmac->event_mask[0];
682 context->gReg[2] = sdmac->per_addr; 689 context->gReg[2] = sdmac->per_addr;
683 context->gReg[6] = sdmac->shp_addr; 690 context->gReg[6] = sdmac->shp_addr;
684 context->gReg[7] = sdmac->watermark_level; 691 context->gReg[7] = sdmac->watermark_level;
@@ -701,7 +708,7 @@ static void sdma_disable_channel(struct sdma_channel *sdmac)
701 struct sdma_engine *sdma = sdmac->sdma; 708 struct sdma_engine *sdma = sdmac->sdma;
702 int channel = sdmac->channel; 709 int channel = sdmac->channel;
703 710
704 __raw_writel(1 << channel, sdma->regs + SDMA_H_STATSTOP); 711 writel_relaxed(BIT(channel), sdma->regs + SDMA_H_STATSTOP);
705 sdmac->status = DMA_ERROR; 712 sdmac->status = DMA_ERROR;
706} 713}
707 714
@@ -711,13 +718,13 @@ static int sdma_config_channel(struct sdma_channel *sdmac)
711 718
712 sdma_disable_channel(sdmac); 719 sdma_disable_channel(sdmac);
713 720
714 sdmac->event_mask0 = 0; 721 sdmac->event_mask[0] = 0;
715 sdmac->event_mask1 = 0; 722 sdmac->event_mask[1] = 0;
716 sdmac->shp_addr = 0; 723 sdmac->shp_addr = 0;
717 sdmac->per_addr = 0; 724 sdmac->per_addr = 0;
718 725
719 if (sdmac->event_id0) { 726 if (sdmac->event_id0) {
720 if (sdmac->event_id0 > 32) 727 if (sdmac->event_id0 >= sdmac->sdma->num_events)
721 return -EINVAL; 728 return -EINVAL;
722 sdma_event_enable(sdmac, sdmac->event_id0); 729 sdma_event_enable(sdmac, sdmac->event_id0);
723 } 730 }
@@ -740,15 +747,14 @@ static int sdma_config_channel(struct sdma_channel *sdmac)
740 (sdmac->peripheral_type != IMX_DMATYPE_DSP)) { 747 (sdmac->peripheral_type != IMX_DMATYPE_DSP)) {
741 /* Handle multiple event channels differently */ 748 /* Handle multiple event channels differently */
742 if (sdmac->event_id1) { 749 if (sdmac->event_id1) {
743 sdmac->event_mask1 = 1 << (sdmac->event_id1 % 32); 750 sdmac->event_mask[1] = BIT(sdmac->event_id1 % 32);
744 if (sdmac->event_id1 > 31) 751 if (sdmac->event_id1 > 31)
745 sdmac->watermark_level |= 1 << 31; 752 __set_bit(31, &sdmac->watermark_level);
746 sdmac->event_mask0 = 1 << (sdmac->event_id0 % 32); 753 sdmac->event_mask[0] = BIT(sdmac->event_id0 % 32);
747 if (sdmac->event_id0 > 31) 754 if (sdmac->event_id0 > 31)
748 sdmac->watermark_level |= 1 << 30; 755 __set_bit(30, &sdmac->watermark_level);
749 } else { 756 } else {
750 sdmac->event_mask0 = 1 << sdmac->event_id0; 757 __set_bit(sdmac->event_id0, sdmac->event_mask);
751 sdmac->event_mask1 = 1 << (sdmac->event_id0 - 32);
752 } 758 }
753 /* Watermark Level */ 759 /* Watermark Level */
754 sdmac->watermark_level |= sdmac->watermark_level; 760 sdmac->watermark_level |= sdmac->watermark_level;
@@ -774,7 +780,7 @@ static int sdma_set_channel_priority(struct sdma_channel *sdmac,
774 return -EINVAL; 780 return -EINVAL;
775 } 781 }
776 782
777 __raw_writel(priority, sdma->regs + SDMA_CHNPRI_0 + 4 * channel); 783 writel_relaxed(priority, sdma->regs + SDMA_CHNPRI_0 + 4 * channel);
778 784
779 return 0; 785 return 0;
780} 786}
@@ -796,8 +802,6 @@ static int sdma_request_channel(struct sdma_channel *sdmac)
796 sdma->channel_control[channel].base_bd_ptr = sdmac->bd_phys; 802 sdma->channel_control[channel].base_bd_ptr = sdmac->bd_phys;
797 sdma->channel_control[channel].current_bd_ptr = sdmac->bd_phys; 803 sdma->channel_control[channel].current_bd_ptr = sdmac->bd_phys;
798 804
799 clk_enable(sdma->clk);
800
801 sdma_set_channel_priority(sdmac, MXC_SDMA_DEFAULT_PRIORITY); 805 sdma_set_channel_priority(sdmac, MXC_SDMA_DEFAULT_PRIORITY);
802 806
803 init_completion(&sdmac->done); 807 init_completion(&sdmac->done);
@@ -810,24 +814,6 @@ out:
810 return ret; 814 return ret;
811} 815}
812 816
813static void sdma_enable_channel(struct sdma_engine *sdma, int channel)
814{
815 __raw_writel(1 << channel, sdma->regs + SDMA_H_START);
816}
817
818static dma_cookie_t sdma_assign_cookie(struct sdma_channel *sdmac)
819{
820 dma_cookie_t cookie = sdmac->chan.cookie;
821
822 if (++cookie < 0)
823 cookie = 1;
824
825 sdmac->chan.cookie = cookie;
826 sdmac->desc.cookie = cookie;
827
828 return cookie;
829}
830
831static struct sdma_channel *to_sdma_chan(struct dma_chan *chan) 817static struct sdma_channel *to_sdma_chan(struct dma_chan *chan)
832{ 818{
833 return container_of(chan, struct sdma_channel, chan); 819 return container_of(chan, struct sdma_channel, chan);
@@ -837,14 +823,11 @@ static dma_cookie_t sdma_tx_submit(struct dma_async_tx_descriptor *tx)
837{ 823{
838 unsigned long flags; 824 unsigned long flags;
839 struct sdma_channel *sdmac = to_sdma_chan(tx->chan); 825 struct sdma_channel *sdmac = to_sdma_chan(tx->chan);
840 struct sdma_engine *sdma = sdmac->sdma;
841 dma_cookie_t cookie; 826 dma_cookie_t cookie;
842 827
843 spin_lock_irqsave(&sdmac->lock, flags); 828 spin_lock_irqsave(&sdmac->lock, flags);
844 829
845 cookie = sdma_assign_cookie(sdmac); 830 cookie = dma_cookie_assign(tx);
846
847 sdma_enable_channel(sdma, sdmac->channel);
848 831
849 spin_unlock_irqrestore(&sdmac->lock, flags); 832 spin_unlock_irqrestore(&sdmac->lock, flags);
850 833
@@ -875,11 +858,14 @@ static int sdma_alloc_chan_resources(struct dma_chan *chan)
875 858
876 sdmac->peripheral_type = data->peripheral_type; 859 sdmac->peripheral_type = data->peripheral_type;
877 sdmac->event_id0 = data->dma_request; 860 sdmac->event_id0 = data->dma_request;
878 ret = sdma_set_channel_priority(sdmac, prio); 861
862 clk_enable(sdmac->sdma->clk);
863
864 ret = sdma_request_channel(sdmac);
879 if (ret) 865 if (ret)
880 return ret; 866 return ret;
881 867
882 ret = sdma_request_channel(sdmac); 868 ret = sdma_set_channel_priority(sdmac, prio);
883 if (ret) 869 if (ret)
884 return ret; 870 return ret;
885 871
@@ -916,7 +902,7 @@ static void sdma_free_chan_resources(struct dma_chan *chan)
916static struct dma_async_tx_descriptor *sdma_prep_slave_sg( 902static struct dma_async_tx_descriptor *sdma_prep_slave_sg(
917 struct dma_chan *chan, struct scatterlist *sgl, 903 struct dma_chan *chan, struct scatterlist *sgl,
918 unsigned int sg_len, enum dma_transfer_direction direction, 904 unsigned int sg_len, enum dma_transfer_direction direction,
919 unsigned long flags) 905 unsigned long flags, void *context)
920{ 906{
921 struct sdma_channel *sdmac = to_sdma_chan(chan); 907 struct sdma_channel *sdmac = to_sdma_chan(chan);
922 struct sdma_engine *sdma = sdmac->sdma; 908 struct sdma_engine *sdma = sdmac->sdma;
@@ -1014,7 +1000,8 @@ err_out:
1014 1000
1015static struct dma_async_tx_descriptor *sdma_prep_dma_cyclic( 1001static struct dma_async_tx_descriptor *sdma_prep_dma_cyclic(
1016 struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len, 1002 struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len,
1017 size_t period_len, enum dma_transfer_direction direction) 1003 size_t period_len, enum dma_transfer_direction direction,
1004 void *context)
1018{ 1005{
1019 struct sdma_channel *sdmac = to_sdma_chan(chan); 1006 struct sdma_channel *sdmac = to_sdma_chan(chan);
1020 struct sdma_engine *sdma = sdmac->sdma; 1007 struct sdma_engine *sdma = sdmac->sdma;
@@ -1128,7 +1115,7 @@ static enum dma_status sdma_tx_status(struct dma_chan *chan,
1128 1115
1129 last_used = chan->cookie; 1116 last_used = chan->cookie;
1130 1117
1131 dma_set_tx_state(txstate, sdmac->last_completed, last_used, 1118 dma_set_tx_state(txstate, chan->completed_cookie, last_used,
1132 sdmac->chn_count - sdmac->chn_real_count); 1119 sdmac->chn_count - sdmac->chn_real_count);
1133 1120
1134 return sdmac->status; 1121 return sdmac->status;
@@ -1136,9 +1123,11 @@ static enum dma_status sdma_tx_status(struct dma_chan *chan,
1136 1123
1137static void sdma_issue_pending(struct dma_chan *chan) 1124static void sdma_issue_pending(struct dma_chan *chan)
1138{ 1125{
1139 /* 1126 struct sdma_channel *sdmac = to_sdma_chan(chan);
1140 * Nothing to do. We only have a single descriptor 1127 struct sdma_engine *sdma = sdmac->sdma;
1141 */ 1128
1129 if (sdmac->status == DMA_IN_PROGRESS)
1130 sdma_enable_channel(sdma, sdmac->channel);
1142} 1131}
1143 1132
1144#define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1 34 1133#define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1 34
@@ -1230,7 +1219,7 @@ static int __init sdma_init(struct sdma_engine *sdma)
1230 clk_enable(sdma->clk); 1219 clk_enable(sdma->clk);
1231 1220
1232 /* Be sure SDMA has not started yet */ 1221 /* Be sure SDMA has not started yet */
1233 __raw_writel(0, sdma->regs + SDMA_H_C0PTR); 1222 writel_relaxed(0, sdma->regs + SDMA_H_C0PTR);
1234 1223
1235 sdma->channel_control = dma_alloc_coherent(NULL, 1224 sdma->channel_control = dma_alloc_coherent(NULL,
1236 MAX_DMA_CHANNELS * sizeof (struct sdma_channel_control) + 1225 MAX_DMA_CHANNELS * sizeof (struct sdma_channel_control) +
@@ -1253,11 +1242,11 @@ static int __init sdma_init(struct sdma_engine *sdma)
1253 1242
1254 /* disable all channels */ 1243 /* disable all channels */
1255 for (i = 0; i < sdma->num_events; i++) 1244 for (i = 0; i < sdma->num_events; i++)
1256 __raw_writel(0, sdma->regs + chnenbl_ofs(sdma, i)); 1245 writel_relaxed(0, sdma->regs + chnenbl_ofs(sdma, i));
1257 1246
1258 /* All channels have priority 0 */ 1247 /* All channels have priority 0 */
1259 for (i = 0; i < MAX_DMA_CHANNELS; i++) 1248 for (i = 0; i < MAX_DMA_CHANNELS; i++)
1260 __raw_writel(0, sdma->regs + SDMA_CHNPRI_0 + i * 4); 1249 writel_relaxed(0, sdma->regs + SDMA_CHNPRI_0 + i * 4);
1261 1250
1262 ret = sdma_request_channel(&sdma->channel[0]); 1251 ret = sdma_request_channel(&sdma->channel[0]);
1263 if (ret) 1252 if (ret)
@@ -1266,16 +1255,16 @@ static int __init sdma_init(struct sdma_engine *sdma)
1266 sdma_config_ownership(&sdma->channel[0], false, true, false); 1255 sdma_config_ownership(&sdma->channel[0], false, true, false);
1267 1256
1268 /* Set Command Channel (Channel Zero) */ 1257 /* Set Command Channel (Channel Zero) */
1269 __raw_writel(0x4050, sdma->regs + SDMA_CHN0ADDR); 1258 writel_relaxed(0x4050, sdma->regs + SDMA_CHN0ADDR);
1270 1259
1271 /* Set bits of CONFIG register but with static context switching */ 1260 /* Set bits of CONFIG register but with static context switching */
1272 /* FIXME: Check whether to set ACR bit depending on clock ratios */ 1261 /* FIXME: Check whether to set ACR bit depending on clock ratios */
1273 __raw_writel(0, sdma->regs + SDMA_H_CONFIG); 1262 writel_relaxed(0, sdma->regs + SDMA_H_CONFIG);
1274 1263
1275 __raw_writel(ccb_phys, sdma->regs + SDMA_H_C0PTR); 1264 writel_relaxed(ccb_phys, sdma->regs + SDMA_H_C0PTR);
1276 1265
1277 /* Set bits of CONFIG register with given context switching mode */ 1266 /* Set bits of CONFIG register with given context switching mode */
1278 __raw_writel(SDMA_H_CONFIG_CSM, sdma->regs + SDMA_H_CONFIG); 1267 writel_relaxed(SDMA_H_CONFIG_CSM, sdma->regs + SDMA_H_CONFIG);
1279 1268
1280 /* Initializes channel's priorities */ 1269 /* Initializes channel's priorities */
1281 sdma_set_channel_priority(&sdma->channel[0], 7); 1270 sdma_set_channel_priority(&sdma->channel[0], 7);
@@ -1367,6 +1356,7 @@ static int __init sdma_probe(struct platform_device *pdev)
1367 spin_lock_init(&sdmac->lock); 1356 spin_lock_init(&sdmac->lock);
1368 1357
1369 sdmac->chan.device = &sdma->dma_device; 1358 sdmac->chan.device = &sdma->dma_device;
1359 dma_cookie_init(&sdmac->chan);
1370 sdmac->channel = i; 1360 sdmac->channel = i;
1371 1361
1372 /* 1362 /*
@@ -1387,7 +1377,9 @@ static int __init sdma_probe(struct platform_device *pdev)
1387 sdma_add_scripts(sdma, pdata->script_addrs); 1377 sdma_add_scripts(sdma, pdata->script_addrs);
1388 1378
1389 if (pdata) { 1379 if (pdata) {
1390 sdma_get_firmware(sdma, pdata->fw_name); 1380 ret = sdma_get_firmware(sdma, pdata->fw_name);
1381 if (ret)
1382 dev_warn(&pdev->dev, "failed to get firmware from platform data\n");
1391 } else { 1383 } else {
1392 /* 1384 /*
1393 * Because that device tree does not encode ROM script address, 1385 * Because that device tree does not encode ROM script address,
@@ -1396,15 +1388,12 @@ static int __init sdma_probe(struct platform_device *pdev)
1396 */ 1388 */
1397 ret = of_property_read_string(np, "fsl,sdma-ram-script-name", 1389 ret = of_property_read_string(np, "fsl,sdma-ram-script-name",
1398 &fw_name); 1390 &fw_name);
1399 if (ret) { 1391 if (ret)
1400 dev_err(&pdev->dev, "failed to get firmware name\n"); 1392 dev_warn(&pdev->dev, "failed to get firmware name\n");
1401 goto err_init; 1393 else {
1402 } 1394 ret = sdma_get_firmware(sdma, fw_name);
1403 1395 if (ret)
1404 ret = sdma_get_firmware(sdma, fw_name); 1396 dev_warn(&pdev->dev, "failed to get firmware from device tree\n");
1405 if (ret) {
1406 dev_err(&pdev->dev, "failed to get firmware\n");
1407 goto err_init;
1408 } 1397 }
1409 } 1398 }
1410 1399
diff --git a/drivers/dma/intel_mid_dma.c b/drivers/dma/intel_mid_dma.c
index 74f70aadf9e4..c900ca7aaec4 100644
--- a/drivers/dma/intel_mid_dma.c
+++ b/drivers/dma/intel_mid_dma.c
@@ -29,6 +29,8 @@
29#include <linux/intel_mid_dma.h> 29#include <linux/intel_mid_dma.h>
30#include <linux/module.h> 30#include <linux/module.h>
31 31
32#include "dmaengine.h"
33
32#define MAX_CHAN 4 /*max ch across controllers*/ 34#define MAX_CHAN 4 /*max ch across controllers*/
33#include "intel_mid_dma_regs.h" 35#include "intel_mid_dma_regs.h"
34 36
@@ -288,7 +290,7 @@ static void midc_descriptor_complete(struct intel_mid_dma_chan *midc,
288 struct intel_mid_dma_lli *llitem; 290 struct intel_mid_dma_lli *llitem;
289 void *param_txd = NULL; 291 void *param_txd = NULL;
290 292
291 midc->completed = txd->cookie; 293 dma_cookie_complete(txd);
292 callback_txd = txd->callback; 294 callback_txd = txd->callback;
293 param_txd = txd->callback_param; 295 param_txd = txd->callback_param;
294 296
@@ -434,14 +436,7 @@ static dma_cookie_t intel_mid_dma_tx_submit(struct dma_async_tx_descriptor *tx)
434 dma_cookie_t cookie; 436 dma_cookie_t cookie;
435 437
436 spin_lock_bh(&midc->lock); 438 spin_lock_bh(&midc->lock);
437 cookie = midc->chan.cookie; 439 cookie = dma_cookie_assign(tx);
438
439 if (++cookie < 0)
440 cookie = 1;
441
442 midc->chan.cookie = cookie;
443 desc->txd.cookie = cookie;
444
445 440
446 if (list_empty(&midc->active_list)) 441 if (list_empty(&midc->active_list))
447 list_add_tail(&desc->desc_node, &midc->active_list); 442 list_add_tail(&desc->desc_node, &midc->active_list);
@@ -482,31 +477,18 @@ static enum dma_status intel_mid_dma_tx_status(struct dma_chan *chan,
482 dma_cookie_t cookie, 477 dma_cookie_t cookie,
483 struct dma_tx_state *txstate) 478 struct dma_tx_state *txstate)
484{ 479{
485 struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(chan); 480 struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(chan);
486 dma_cookie_t last_used; 481 enum dma_status ret;
487 dma_cookie_t last_complete;
488 int ret;
489 482
490 last_complete = midc->completed; 483 ret = dma_cookie_status(chan, cookie, txstate);
491 last_used = chan->cookie;
492
493 ret = dma_async_is_complete(cookie, last_complete, last_used);
494 if (ret != DMA_SUCCESS) { 484 if (ret != DMA_SUCCESS) {
495 spin_lock_bh(&midc->lock); 485 spin_lock_bh(&midc->lock);
496 midc_scan_descriptors(to_middma_device(chan->device), midc); 486 midc_scan_descriptors(to_middma_device(chan->device), midc);
497 spin_unlock_bh(&midc->lock); 487 spin_unlock_bh(&midc->lock);
498 488
499 last_complete = midc->completed; 489 ret = dma_cookie_status(chan, cookie, txstate);
500 last_used = chan->cookie;
501
502 ret = dma_async_is_complete(cookie, last_complete, last_used);
503 } 490 }
504 491
505 if (txstate) {
506 txstate->last = last_complete;
507 txstate->used = last_used;
508 txstate->residue = 0;
509 }
510 return ret; 492 return ret;
511} 493}
512 494
@@ -732,13 +714,14 @@ err_desc_get:
732 * @sg_len: length of sg txn 714 * @sg_len: length of sg txn
733 * @direction: DMA transfer dirtn 715 * @direction: DMA transfer dirtn
734 * @flags: DMA flags 716 * @flags: DMA flags
717 * @context: transfer context (ignored)
735 * 718 *
736 * Prepares LLI based periphral transfer 719 * Prepares LLI based periphral transfer
737 */ 720 */
738static struct dma_async_tx_descriptor *intel_mid_dma_prep_slave_sg( 721static struct dma_async_tx_descriptor *intel_mid_dma_prep_slave_sg(
739 struct dma_chan *chan, struct scatterlist *sgl, 722 struct dma_chan *chan, struct scatterlist *sgl,
740 unsigned int sg_len, enum dma_transfer_direction direction, 723 unsigned int sg_len, enum dma_transfer_direction direction,
741 unsigned long flags) 724 unsigned long flags, void *context)
742{ 725{
743 struct intel_mid_dma_chan *midc = NULL; 726 struct intel_mid_dma_chan *midc = NULL;
744 struct intel_mid_dma_slave *mids = NULL; 727 struct intel_mid_dma_slave *mids = NULL;
@@ -832,7 +815,6 @@ static void intel_mid_dma_free_chan_resources(struct dma_chan *chan)
832 /*trying to free ch in use!!!!!*/ 815 /*trying to free ch in use!!!!!*/
833 pr_err("ERR_MDMA: trying to free ch in use\n"); 816 pr_err("ERR_MDMA: trying to free ch in use\n");
834 } 817 }
835 pm_runtime_put(&mid->pdev->dev);
836 spin_lock_bh(&midc->lock); 818 spin_lock_bh(&midc->lock);
837 midc->descs_allocated = 0; 819 midc->descs_allocated = 0;
838 list_for_each_entry_safe(desc, _desc, &midc->active_list, desc_node) { 820 list_for_each_entry_safe(desc, _desc, &midc->active_list, desc_node) {
@@ -853,6 +835,7 @@ static void intel_mid_dma_free_chan_resources(struct dma_chan *chan)
853 /* Disable CH interrupts */ 835 /* Disable CH interrupts */
854 iowrite32(MASK_INTR_REG(midc->ch_id), mid->dma_base + MASK_BLOCK); 836 iowrite32(MASK_INTR_REG(midc->ch_id), mid->dma_base + MASK_BLOCK);
855 iowrite32(MASK_INTR_REG(midc->ch_id), mid->dma_base + MASK_ERR); 837 iowrite32(MASK_INTR_REG(midc->ch_id), mid->dma_base + MASK_ERR);
838 pm_runtime_put(&mid->pdev->dev);
856} 839}
857 840
858/** 841/**
@@ -886,7 +869,7 @@ static int intel_mid_dma_alloc_chan_resources(struct dma_chan *chan)
886 pm_runtime_put(&mid->pdev->dev); 869 pm_runtime_put(&mid->pdev->dev);
887 return -EIO; 870 return -EIO;
888 } 871 }
889 midc->completed = chan->cookie = 1; 872 dma_cookie_init(chan);
890 873
891 spin_lock_bh(&midc->lock); 874 spin_lock_bh(&midc->lock);
892 while (midc->descs_allocated < DESCS_PER_CHANNEL) { 875 while (midc->descs_allocated < DESCS_PER_CHANNEL) {
@@ -1056,7 +1039,8 @@ static irqreturn_t intel_mid_dma_interrupt(int irq, void *data)
1056 } 1039 }
1057 err_status &= mid->intr_mask; 1040 err_status &= mid->intr_mask;
1058 if (err_status) { 1041 if (err_status) {
1059 iowrite32(MASK_INTR_REG(err_status), mid->dma_base + MASK_ERR); 1042 iowrite32((err_status << INT_MASK_WE),
1043 mid->dma_base + MASK_ERR);
1060 call_tasklet = 1; 1044 call_tasklet = 1;
1061 } 1045 }
1062 if (call_tasklet) 1046 if (call_tasklet)
@@ -1118,7 +1102,7 @@ static int mid_setup_dma(struct pci_dev *pdev)
1118 struct intel_mid_dma_chan *midch = &dma->ch[i]; 1102 struct intel_mid_dma_chan *midch = &dma->ch[i];
1119 1103
1120 midch->chan.device = &dma->common; 1104 midch->chan.device = &dma->common;
1121 midch->chan.cookie = 1; 1105 dma_cookie_init(&midch->chan);
1122 midch->ch_id = dma->chan_base + i; 1106 midch->ch_id = dma->chan_base + i;
1123 pr_debug("MDMA:Init CH %d, ID %d\n", i, midch->ch_id); 1107 pr_debug("MDMA:Init CH %d, ID %d\n", i, midch->ch_id);
1124 1108
diff --git a/drivers/dma/intel_mid_dma_regs.h b/drivers/dma/intel_mid_dma_regs.h
index c83d35b97bd8..1bfa9268feaf 100644
--- a/drivers/dma/intel_mid_dma_regs.h
+++ b/drivers/dma/intel_mid_dma_regs.h
@@ -165,7 +165,6 @@ union intel_mid_dma_cfg_hi {
165 * @dma_base: MMIO register space DMA engine base pointer 165 * @dma_base: MMIO register space DMA engine base pointer
166 * @ch_id: DMA channel id 166 * @ch_id: DMA channel id
167 * @lock: channel spinlock 167 * @lock: channel spinlock
168 * @completed: DMA cookie
169 * @active_list: current active descriptors 168 * @active_list: current active descriptors
170 * @queue: current queued up descriptors 169 * @queue: current queued up descriptors
171 * @free_list: current free descriptors 170 * @free_list: current free descriptors
@@ -183,7 +182,6 @@ struct intel_mid_dma_chan {
183 void __iomem *dma_base; 182 void __iomem *dma_base;
184 int ch_id; 183 int ch_id;
185 spinlock_t lock; 184 spinlock_t lock;
186 dma_cookie_t completed;
187 struct list_head active_list; 185 struct list_head active_list;
188 struct list_head queue; 186 struct list_head queue;
189 struct list_head free_list; 187 struct list_head free_list;
diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c
index a4d6cb0c0343..31493d80e0e9 100644
--- a/drivers/dma/ioat/dma.c
+++ b/drivers/dma/ioat/dma.c
@@ -40,6 +40,8 @@
40#include "registers.h" 40#include "registers.h"
41#include "hw.h" 41#include "hw.h"
42 42
43#include "../dmaengine.h"
44
43int ioat_pending_level = 4; 45int ioat_pending_level = 4;
44module_param(ioat_pending_level, int, 0644); 46module_param(ioat_pending_level, int, 0644);
45MODULE_PARM_DESC(ioat_pending_level, 47MODULE_PARM_DESC(ioat_pending_level,
@@ -107,6 +109,7 @@ void ioat_init_channel(struct ioatdma_device *device, struct ioat_chan_common *c
107 chan->reg_base = device->reg_base + (0x80 * (idx + 1)); 109 chan->reg_base = device->reg_base + (0x80 * (idx + 1));
108 spin_lock_init(&chan->cleanup_lock); 110 spin_lock_init(&chan->cleanup_lock);
109 chan->common.device = dma; 111 chan->common.device = dma;
112 dma_cookie_init(&chan->common);
110 list_add_tail(&chan->common.device_node, &dma->channels); 113 list_add_tail(&chan->common.device_node, &dma->channels);
111 device->idx[idx] = chan; 114 device->idx[idx] = chan;
112 init_timer(&chan->timer); 115 init_timer(&chan->timer);
@@ -235,12 +238,7 @@ static dma_cookie_t ioat1_tx_submit(struct dma_async_tx_descriptor *tx)
235 238
236 spin_lock_bh(&ioat->desc_lock); 239 spin_lock_bh(&ioat->desc_lock);
237 /* cookie incr and addition to used_list must be atomic */ 240 /* cookie incr and addition to used_list must be atomic */
238 cookie = c->cookie; 241 cookie = dma_cookie_assign(tx);
239 cookie++;
240 if (cookie < 0)
241 cookie = 1;
242 c->cookie = cookie;
243 tx->cookie = cookie;
244 dev_dbg(to_dev(&ioat->base), "%s: cookie: %d\n", __func__, cookie); 242 dev_dbg(to_dev(&ioat->base), "%s: cookie: %d\n", __func__, cookie);
245 243
246 /* write address into NextDescriptor field of last desc in chain */ 244 /* write address into NextDescriptor field of last desc in chain */
@@ -603,8 +601,7 @@ static void __cleanup(struct ioat_dma_chan *ioat, unsigned long phys_complete)
603 */ 601 */
604 dump_desc_dbg(ioat, desc); 602 dump_desc_dbg(ioat, desc);
605 if (tx->cookie) { 603 if (tx->cookie) {
606 chan->completed_cookie = tx->cookie; 604 dma_cookie_complete(tx);
607 tx->cookie = 0;
608 ioat_dma_unmap(chan, tx->flags, desc->len, desc->hw); 605 ioat_dma_unmap(chan, tx->flags, desc->len, desc->hw);
609 ioat->active -= desc->hw->tx_cnt; 606 ioat->active -= desc->hw->tx_cnt;
610 if (tx->callback) { 607 if (tx->callback) {
@@ -733,13 +730,15 @@ ioat_dma_tx_status(struct dma_chan *c, dma_cookie_t cookie,
733{ 730{
734 struct ioat_chan_common *chan = to_chan_common(c); 731 struct ioat_chan_common *chan = to_chan_common(c);
735 struct ioatdma_device *device = chan->device; 732 struct ioatdma_device *device = chan->device;
733 enum dma_status ret;
736 734
737 if (ioat_tx_status(c, cookie, txstate) == DMA_SUCCESS) 735 ret = dma_cookie_status(c, cookie, txstate);
738 return DMA_SUCCESS; 736 if (ret == DMA_SUCCESS)
737 return ret;
739 738
740 device->cleanup_fn((unsigned long) c); 739 device->cleanup_fn((unsigned long) c);
741 740
742 return ioat_tx_status(c, cookie, txstate); 741 return dma_cookie_status(c, cookie, txstate);
743} 742}
744 743
745static void ioat1_dma_start_null_desc(struct ioat_dma_chan *ioat) 744static void ioat1_dma_start_null_desc(struct ioat_dma_chan *ioat)
diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h
index 5216c8a92a21..c7888bccd974 100644
--- a/drivers/dma/ioat/dma.h
+++ b/drivers/dma/ioat/dma.h
@@ -90,7 +90,6 @@ struct ioat_chan_common {
90 void __iomem *reg_base; 90 void __iomem *reg_base;
91 unsigned long last_completion; 91 unsigned long last_completion;
92 spinlock_t cleanup_lock; 92 spinlock_t cleanup_lock;
93 dma_cookie_t completed_cookie;
94 unsigned long state; 93 unsigned long state;
95 #define IOAT_COMPLETION_PENDING 0 94 #define IOAT_COMPLETION_PENDING 0
96 #define IOAT_COMPLETION_ACK 1 95 #define IOAT_COMPLETION_ACK 1
@@ -143,28 +142,6 @@ static inline struct ioat_dma_chan *to_ioat_chan(struct dma_chan *c)
143 return container_of(chan, struct ioat_dma_chan, base); 142 return container_of(chan, struct ioat_dma_chan, base);
144} 143}
145 144
146/**
147 * ioat_tx_status - poll the status of an ioat transaction
148 * @c: channel handle
149 * @cookie: transaction identifier
150 * @txstate: if set, updated with the transaction state
151 */
152static inline enum dma_status
153ioat_tx_status(struct dma_chan *c, dma_cookie_t cookie,
154 struct dma_tx_state *txstate)
155{
156 struct ioat_chan_common *chan = to_chan_common(c);
157 dma_cookie_t last_used;
158 dma_cookie_t last_complete;
159
160 last_used = c->cookie;
161 last_complete = chan->completed_cookie;
162
163 dma_set_tx_state(txstate, last_complete, last_used, 0);
164
165 return dma_async_is_complete(cookie, last_complete, last_used);
166}
167
168/* wrapper around hardware descriptor format + additional software fields */ 145/* wrapper around hardware descriptor format + additional software fields */
169 146
170/** 147/**
diff --git a/drivers/dma/ioat/dma_v2.c b/drivers/dma/ioat/dma_v2.c
index 5d65f8377971..e8e110ff3d96 100644
--- a/drivers/dma/ioat/dma_v2.c
+++ b/drivers/dma/ioat/dma_v2.c
@@ -41,6 +41,8 @@
41#include "registers.h" 41#include "registers.h"
42#include "hw.h" 42#include "hw.h"
43 43
44#include "../dmaengine.h"
45
44int ioat_ring_alloc_order = 8; 46int ioat_ring_alloc_order = 8;
45module_param(ioat_ring_alloc_order, int, 0644); 47module_param(ioat_ring_alloc_order, int, 0644);
46MODULE_PARM_DESC(ioat_ring_alloc_order, 48MODULE_PARM_DESC(ioat_ring_alloc_order,
@@ -147,8 +149,7 @@ static void __cleanup(struct ioat2_dma_chan *ioat, unsigned long phys_complete)
147 dump_desc_dbg(ioat, desc); 149 dump_desc_dbg(ioat, desc);
148 if (tx->cookie) { 150 if (tx->cookie) {
149 ioat_dma_unmap(chan, tx->flags, desc->len, desc->hw); 151 ioat_dma_unmap(chan, tx->flags, desc->len, desc->hw);
150 chan->completed_cookie = tx->cookie; 152 dma_cookie_complete(tx);
151 tx->cookie = 0;
152 if (tx->callback) { 153 if (tx->callback) {
153 tx->callback(tx->callback_param); 154 tx->callback(tx->callback_param);
154 tx->callback = NULL; 155 tx->callback = NULL;
@@ -398,13 +399,9 @@ static dma_cookie_t ioat2_tx_submit_unlock(struct dma_async_tx_descriptor *tx)
398 struct dma_chan *c = tx->chan; 399 struct dma_chan *c = tx->chan;
399 struct ioat2_dma_chan *ioat = to_ioat2_chan(c); 400 struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
400 struct ioat_chan_common *chan = &ioat->base; 401 struct ioat_chan_common *chan = &ioat->base;
401 dma_cookie_t cookie = c->cookie; 402 dma_cookie_t cookie;
402 403
403 cookie++; 404 cookie = dma_cookie_assign(tx);
404 if (cookie < 0)
405 cookie = 1;
406 tx->cookie = cookie;
407 c->cookie = cookie;
408 dev_dbg(to_dev(&ioat->base), "%s: cookie: %d\n", __func__, cookie); 405 dev_dbg(to_dev(&ioat->base), "%s: cookie: %d\n", __func__, cookie);
409 406
410 if (!test_and_set_bit(IOAT_COMPLETION_PENDING, &chan->state)) 407 if (!test_and_set_bit(IOAT_COMPLETION_PENDING, &chan->state))
diff --git a/drivers/dma/ioat/dma_v3.c b/drivers/dma/ioat/dma_v3.c
index f519c93a61e7..2c4476c0e405 100644
--- a/drivers/dma/ioat/dma_v3.c
+++ b/drivers/dma/ioat/dma_v3.c
@@ -61,6 +61,7 @@
61#include <linux/dmaengine.h> 61#include <linux/dmaengine.h>
62#include <linux/dma-mapping.h> 62#include <linux/dma-mapping.h>
63#include <linux/prefetch.h> 63#include <linux/prefetch.h>
64#include "../dmaengine.h"
64#include "registers.h" 65#include "registers.h"
65#include "hw.h" 66#include "hw.h"
66#include "dma.h" 67#include "dma.h"
@@ -277,9 +278,8 @@ static void __cleanup(struct ioat2_dma_chan *ioat, unsigned long phys_complete)
277 dump_desc_dbg(ioat, desc); 278 dump_desc_dbg(ioat, desc);
278 tx = &desc->txd; 279 tx = &desc->txd;
279 if (tx->cookie) { 280 if (tx->cookie) {
280 chan->completed_cookie = tx->cookie; 281 dma_cookie_complete(tx);
281 ioat3_dma_unmap(ioat, desc, idx + i); 282 ioat3_dma_unmap(ioat, desc, idx + i);
282 tx->cookie = 0;
283 if (tx->callback) { 283 if (tx->callback) {
284 tx->callback(tx->callback_param); 284 tx->callback(tx->callback_param);
285 tx->callback = NULL; 285 tx->callback = NULL;
@@ -411,13 +411,15 @@ ioat3_tx_status(struct dma_chan *c, dma_cookie_t cookie,
411 struct dma_tx_state *txstate) 411 struct dma_tx_state *txstate)
412{ 412{
413 struct ioat2_dma_chan *ioat = to_ioat2_chan(c); 413 struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
414 enum dma_status ret;
414 415
415 if (ioat_tx_status(c, cookie, txstate) == DMA_SUCCESS) 416 ret = dma_cookie_status(c, cookie, txstate);
416 return DMA_SUCCESS; 417 if (ret == DMA_SUCCESS)
418 return ret;
417 419
418 ioat3_cleanup(ioat); 420 ioat3_cleanup(ioat);
419 421
420 return ioat_tx_status(c, cookie, txstate); 422 return dma_cookie_status(c, cookie, txstate);
421} 423}
422 424
423static struct dma_async_tx_descriptor * 425static struct dma_async_tx_descriptor *
diff --git a/drivers/dma/iop-adma.c b/drivers/dma/iop-adma.c
index faf88b7e1e71..da6c4c2c066a 100644
--- a/drivers/dma/iop-adma.c
+++ b/drivers/dma/iop-adma.c
@@ -36,6 +36,8 @@
36 36
37#include <mach/adma.h> 37#include <mach/adma.h>
38 38
39#include "dmaengine.h"
40
39#define to_iop_adma_chan(chan) container_of(chan, struct iop_adma_chan, common) 41#define to_iop_adma_chan(chan) container_of(chan, struct iop_adma_chan, common)
40#define to_iop_adma_device(dev) \ 42#define to_iop_adma_device(dev) \
41 container_of(dev, struct iop_adma_device, common) 43 container_of(dev, struct iop_adma_device, common)
@@ -317,7 +319,7 @@ static void __iop_adma_slot_cleanup(struct iop_adma_chan *iop_chan)
317 } 319 }
318 320
319 if (cookie > 0) { 321 if (cookie > 0) {
320 iop_chan->completed_cookie = cookie; 322 iop_chan->common.completed_cookie = cookie;
321 pr_debug("\tcompleted cookie %d\n", cookie); 323 pr_debug("\tcompleted cookie %d\n", cookie);
322 } 324 }
323} 325}
@@ -438,18 +440,6 @@ retry:
438 return NULL; 440 return NULL;
439} 441}
440 442
441static dma_cookie_t
442iop_desc_assign_cookie(struct iop_adma_chan *iop_chan,
443 struct iop_adma_desc_slot *desc)
444{
445 dma_cookie_t cookie = iop_chan->common.cookie;
446 cookie++;
447 if (cookie < 0)
448 cookie = 1;
449 iop_chan->common.cookie = desc->async_tx.cookie = cookie;
450 return cookie;
451}
452
453static void iop_adma_check_threshold(struct iop_adma_chan *iop_chan) 443static void iop_adma_check_threshold(struct iop_adma_chan *iop_chan)
454{ 444{
455 dev_dbg(iop_chan->device->common.dev, "pending: %d\n", 445 dev_dbg(iop_chan->device->common.dev, "pending: %d\n",
@@ -477,7 +467,7 @@ iop_adma_tx_submit(struct dma_async_tx_descriptor *tx)
477 slots_per_op = grp_start->slots_per_op; 467 slots_per_op = grp_start->slots_per_op;
478 468
479 spin_lock_bh(&iop_chan->lock); 469 spin_lock_bh(&iop_chan->lock);
480 cookie = iop_desc_assign_cookie(iop_chan, sw_desc); 470 cookie = dma_cookie_assign(tx);
481 471
482 old_chain_tail = list_entry(iop_chan->chain.prev, 472 old_chain_tail = list_entry(iop_chan->chain.prev,
483 struct iop_adma_desc_slot, chain_node); 473 struct iop_adma_desc_slot, chain_node);
@@ -904,24 +894,15 @@ static enum dma_status iop_adma_status(struct dma_chan *chan,
904 struct dma_tx_state *txstate) 894 struct dma_tx_state *txstate)
905{ 895{
906 struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan); 896 struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
907 dma_cookie_t last_used; 897 int ret;
908 dma_cookie_t last_complete; 898
909 enum dma_status ret; 899 ret = dma_cookie_status(chan, cookie, txstate);
910
911 last_used = chan->cookie;
912 last_complete = iop_chan->completed_cookie;
913 dma_set_tx_state(txstate, last_complete, last_used, 0);
914 ret = dma_async_is_complete(cookie, last_complete, last_used);
915 if (ret == DMA_SUCCESS) 900 if (ret == DMA_SUCCESS)
916 return ret; 901 return ret;
917 902
918 iop_adma_slot_cleanup(iop_chan); 903 iop_adma_slot_cleanup(iop_chan);
919 904
920 last_used = chan->cookie; 905 return dma_cookie_status(chan, cookie, txstate);
921 last_complete = iop_chan->completed_cookie;
922 dma_set_tx_state(txstate, last_complete, last_used, 0);
923
924 return dma_async_is_complete(cookie, last_complete, last_used);
925} 906}
926 907
927static irqreturn_t iop_adma_eot_handler(int irq, void *data) 908static irqreturn_t iop_adma_eot_handler(int irq, void *data)
@@ -1565,6 +1546,7 @@ static int __devinit iop_adma_probe(struct platform_device *pdev)
1565 INIT_LIST_HEAD(&iop_chan->chain); 1546 INIT_LIST_HEAD(&iop_chan->chain);
1566 INIT_LIST_HEAD(&iop_chan->all_slots); 1547 INIT_LIST_HEAD(&iop_chan->all_slots);
1567 iop_chan->common.device = dma_dev; 1548 iop_chan->common.device = dma_dev;
1549 dma_cookie_init(&iop_chan->common);
1568 list_add_tail(&iop_chan->common.device_node, &dma_dev->channels); 1550 list_add_tail(&iop_chan->common.device_node, &dma_dev->channels);
1569 1551
1570 if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) { 1552 if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) {
@@ -1642,16 +1624,12 @@ static void iop_chan_start_null_memcpy(struct iop_adma_chan *iop_chan)
1642 iop_desc_set_dest_addr(grp_start, iop_chan, 0); 1624 iop_desc_set_dest_addr(grp_start, iop_chan, 0);
1643 iop_desc_set_memcpy_src_addr(grp_start, 0); 1625 iop_desc_set_memcpy_src_addr(grp_start, 0);
1644 1626
1645 cookie = iop_chan->common.cookie; 1627 cookie = dma_cookie_assign(&sw_desc->async_tx);
1646 cookie++;
1647 if (cookie <= 1)
1648 cookie = 2;
1649 1628
1650 /* initialize the completed cookie to be less than 1629 /* initialize the completed cookie to be less than
1651 * the most recently used cookie 1630 * the most recently used cookie
1652 */ 1631 */
1653 iop_chan->completed_cookie = cookie - 1; 1632 iop_chan->common.completed_cookie = cookie - 1;
1654 iop_chan->common.cookie = sw_desc->async_tx.cookie = cookie;
1655 1633
1656 /* channel should not be busy */ 1634 /* channel should not be busy */
1657 BUG_ON(iop_chan_is_busy(iop_chan)); 1635 BUG_ON(iop_chan_is_busy(iop_chan));
@@ -1699,16 +1677,12 @@ static void iop_chan_start_null_xor(struct iop_adma_chan *iop_chan)
1699 iop_desc_set_xor_src_addr(grp_start, 0, 0); 1677 iop_desc_set_xor_src_addr(grp_start, 0, 0);
1700 iop_desc_set_xor_src_addr(grp_start, 1, 0); 1678 iop_desc_set_xor_src_addr(grp_start, 1, 0);
1701 1679
1702 cookie = iop_chan->common.cookie; 1680 cookie = dma_cookie_assign(&sw_desc->async_tx);
1703 cookie++;
1704 if (cookie <= 1)
1705 cookie = 2;
1706 1681
1707 /* initialize the completed cookie to be less than 1682 /* initialize the completed cookie to be less than
1708 * the most recently used cookie 1683 * the most recently used cookie
1709 */ 1684 */
1710 iop_chan->completed_cookie = cookie - 1; 1685 iop_chan->common.completed_cookie = cookie - 1;
1711 iop_chan->common.cookie = sw_desc->async_tx.cookie = cookie;
1712 1686
1713 /* channel should not be busy */ 1687 /* channel should not be busy */
1714 BUG_ON(iop_chan_is_busy(iop_chan)); 1688 BUG_ON(iop_chan_is_busy(iop_chan));
diff --git a/drivers/dma/ipu/ipu_idmac.c b/drivers/dma/ipu/ipu_idmac.c
index 6212b16e8cf2..62e3f8ec2461 100644
--- a/drivers/dma/ipu/ipu_idmac.c
+++ b/drivers/dma/ipu/ipu_idmac.c
@@ -25,6 +25,7 @@
25 25
26#include <mach/ipu.h> 26#include <mach/ipu.h>
27 27
28#include "../dmaengine.h"
28#include "ipu_intern.h" 29#include "ipu_intern.h"
29 30
30#define FS_VF_IN_VALID 0x00000002 31#define FS_VF_IN_VALID 0x00000002
@@ -866,14 +867,7 @@ static dma_cookie_t idmac_tx_submit(struct dma_async_tx_descriptor *tx)
866 867
867 dev_dbg(dev, "Submitting sg %p\n", &desc->sg[0]); 868 dev_dbg(dev, "Submitting sg %p\n", &desc->sg[0]);
868 869
869 cookie = ichan->dma_chan.cookie; 870 cookie = dma_cookie_assign(tx);
870
871 if (++cookie < 0)
872 cookie = 1;
873
874 /* from dmaengine.h: "last cookie value returned to client" */
875 ichan->dma_chan.cookie = cookie;
876 tx->cookie = cookie;
877 871
878 /* ipu->lock can be taken under ichan->lock, but not v.v. */ 872 /* ipu->lock can be taken under ichan->lock, but not v.v. */
879 spin_lock_irqsave(&ichan->lock, flags); 873 spin_lock_irqsave(&ichan->lock, flags);
@@ -1295,7 +1289,7 @@ static irqreturn_t idmac_interrupt(int irq, void *dev_id)
1295 /* Flip the active buffer - even if update above failed */ 1289 /* Flip the active buffer - even if update above failed */
1296 ichan->active_buffer = !ichan->active_buffer; 1290 ichan->active_buffer = !ichan->active_buffer;
1297 if (done) 1291 if (done)
1298 ichan->completed = desc->txd.cookie; 1292 dma_cookie_complete(&desc->txd);
1299 1293
1300 callback = desc->txd.callback; 1294 callback = desc->txd.callback;
1301 callback_param = desc->txd.callback_param; 1295 callback_param = desc->txd.callback_param;
@@ -1341,7 +1335,8 @@ static void ipu_gc_tasklet(unsigned long arg)
1341/* Allocate and initialise a transfer descriptor. */ 1335/* Allocate and initialise a transfer descriptor. */
1342static struct dma_async_tx_descriptor *idmac_prep_slave_sg(struct dma_chan *chan, 1336static struct dma_async_tx_descriptor *idmac_prep_slave_sg(struct dma_chan *chan,
1343 struct scatterlist *sgl, unsigned int sg_len, 1337 struct scatterlist *sgl, unsigned int sg_len,
1344 enum dma_transfer_direction direction, unsigned long tx_flags) 1338 enum dma_transfer_direction direction, unsigned long tx_flags,
1339 void *context)
1345{ 1340{
1346 struct idmac_channel *ichan = to_idmac_chan(chan); 1341 struct idmac_channel *ichan = to_idmac_chan(chan);
1347 struct idmac_tx_desc *desc = NULL; 1342 struct idmac_tx_desc *desc = NULL;
@@ -1510,8 +1505,7 @@ static int idmac_alloc_chan_resources(struct dma_chan *chan)
1510 BUG_ON(chan->client_count > 1); 1505 BUG_ON(chan->client_count > 1);
1511 WARN_ON(ichan->status != IPU_CHANNEL_FREE); 1506 WARN_ON(ichan->status != IPU_CHANNEL_FREE);
1512 1507
1513 chan->cookie = 1; 1508 dma_cookie_init(chan);
1514 ichan->completed = -ENXIO;
1515 1509
1516 ret = ipu_irq_map(chan->chan_id); 1510 ret = ipu_irq_map(chan->chan_id);
1517 if (ret < 0) 1511 if (ret < 0)
@@ -1600,9 +1594,7 @@ static void idmac_free_chan_resources(struct dma_chan *chan)
1600static enum dma_status idmac_tx_status(struct dma_chan *chan, 1594static enum dma_status idmac_tx_status(struct dma_chan *chan,
1601 dma_cookie_t cookie, struct dma_tx_state *txstate) 1595 dma_cookie_t cookie, struct dma_tx_state *txstate)
1602{ 1596{
1603 struct idmac_channel *ichan = to_idmac_chan(chan); 1597 dma_set_tx_state(txstate, chan->completed_cookie, chan->cookie, 0);
1604
1605 dma_set_tx_state(txstate, ichan->completed, chan->cookie, 0);
1606 if (cookie != chan->cookie) 1598 if (cookie != chan->cookie)
1607 return DMA_ERROR; 1599 return DMA_ERROR;
1608 return DMA_SUCCESS; 1600 return DMA_SUCCESS;
@@ -1638,11 +1630,10 @@ static int __init ipu_idmac_init(struct ipu *ipu)
1638 1630
1639 ichan->status = IPU_CHANNEL_FREE; 1631 ichan->status = IPU_CHANNEL_FREE;
1640 ichan->sec_chan_en = false; 1632 ichan->sec_chan_en = false;
1641 ichan->completed = -ENXIO;
1642 snprintf(ichan->eof_name, sizeof(ichan->eof_name), "IDMAC EOF %d", i); 1633 snprintf(ichan->eof_name, sizeof(ichan->eof_name), "IDMAC EOF %d", i);
1643 1634
1644 dma_chan->device = &idmac->dma; 1635 dma_chan->device = &idmac->dma;
1645 dma_chan->cookie = 1; 1636 dma_cookie_init(dma_chan);
1646 dma_chan->chan_id = i; 1637 dma_chan->chan_id = i;
1647 list_add_tail(&dma_chan->device_node, &dma->channels); 1638 list_add_tail(&dma_chan->device_node, &dma->channels);
1648 } 1639 }
diff --git a/drivers/dma/mpc512x_dma.c b/drivers/dma/mpc512x_dma.c
index 4d6d4cf66949..2ab0a3d0eed5 100644
--- a/drivers/dma/mpc512x_dma.c
+++ b/drivers/dma/mpc512x_dma.c
@@ -44,6 +44,8 @@
44 44
45#include <linux/random.h> 45#include <linux/random.h>
46 46
47#include "dmaengine.h"
48
47/* Number of DMA Transfer descriptors allocated per channel */ 49/* Number of DMA Transfer descriptors allocated per channel */
48#define MPC_DMA_DESCRIPTORS 64 50#define MPC_DMA_DESCRIPTORS 64
49 51
@@ -188,7 +190,6 @@ struct mpc_dma_chan {
188 struct list_head completed; 190 struct list_head completed;
189 struct mpc_dma_tcd *tcd; 191 struct mpc_dma_tcd *tcd;
190 dma_addr_t tcd_paddr; 192 dma_addr_t tcd_paddr;
191 dma_cookie_t completed_cookie;
192 193
193 /* Lock for this structure */ 194 /* Lock for this structure */
194 spinlock_t lock; 195 spinlock_t lock;
@@ -365,7 +366,7 @@ static void mpc_dma_process_completed(struct mpc_dma *mdma)
365 /* Free descriptors */ 366 /* Free descriptors */
366 spin_lock_irqsave(&mchan->lock, flags); 367 spin_lock_irqsave(&mchan->lock, flags);
367 list_splice_tail_init(&list, &mchan->free); 368 list_splice_tail_init(&list, &mchan->free);
368 mchan->completed_cookie = last_cookie; 369 mchan->chan.completed_cookie = last_cookie;
369 spin_unlock_irqrestore(&mchan->lock, flags); 370 spin_unlock_irqrestore(&mchan->lock, flags);
370 } 371 }
371} 372}
@@ -438,13 +439,7 @@ static dma_cookie_t mpc_dma_tx_submit(struct dma_async_tx_descriptor *txd)
438 mpc_dma_execute(mchan); 439 mpc_dma_execute(mchan);
439 440
440 /* Update cookie */ 441 /* Update cookie */
441 cookie = mchan->chan.cookie + 1; 442 cookie = dma_cookie_assign(txd);
442 if (cookie <= 0)
443 cookie = 1;
444
445 mchan->chan.cookie = cookie;
446 mdesc->desc.cookie = cookie;
447
448 spin_unlock_irqrestore(&mchan->lock, flags); 443 spin_unlock_irqrestore(&mchan->lock, flags);
449 444
450 return cookie; 445 return cookie;
@@ -562,17 +557,14 @@ mpc_dma_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
562 struct dma_tx_state *txstate) 557 struct dma_tx_state *txstate)
563{ 558{
564 struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan); 559 struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan);
560 enum dma_status ret;
565 unsigned long flags; 561 unsigned long flags;
566 dma_cookie_t last_used;
567 dma_cookie_t last_complete;
568 562
569 spin_lock_irqsave(&mchan->lock, flags); 563 spin_lock_irqsave(&mchan->lock, flags);
570 last_used = mchan->chan.cookie; 564 ret = dma_cookie_status(chan, cookie, txstate);
571 last_complete = mchan->completed_cookie;
572 spin_unlock_irqrestore(&mchan->lock, flags); 565 spin_unlock_irqrestore(&mchan->lock, flags);
573 566
574 dma_set_tx_state(txstate, last_complete, last_used, 0); 567 return ret;
575 return dma_async_is_complete(cookie, last_complete, last_used);
576} 568}
577 569
578/* Prepare descriptor for memory to memory copy */ 570/* Prepare descriptor for memory to memory copy */
@@ -741,8 +733,7 @@ static int __devinit mpc_dma_probe(struct platform_device *op)
741 mchan = &mdma->channels[i]; 733 mchan = &mdma->channels[i];
742 734
743 mchan->chan.device = dma; 735 mchan->chan.device = dma;
744 mchan->chan.cookie = 1; 736 dma_cookie_init(&mchan->chan);
745 mchan->completed_cookie = mchan->chan.cookie;
746 737
747 INIT_LIST_HEAD(&mchan->free); 738 INIT_LIST_HEAD(&mchan->free);
748 INIT_LIST_HEAD(&mchan->prepared); 739 INIT_LIST_HEAD(&mchan->prepared);
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c
index e779b434af45..fa5d55fea46c 100644
--- a/drivers/dma/mv_xor.c
+++ b/drivers/dma/mv_xor.c
@@ -26,6 +26,8 @@
26#include <linux/platform_device.h> 26#include <linux/platform_device.h>
27#include <linux/memory.h> 27#include <linux/memory.h>
28#include <plat/mv_xor.h> 28#include <plat/mv_xor.h>
29
30#include "dmaengine.h"
29#include "mv_xor.h" 31#include "mv_xor.h"
30 32
31static void mv_xor_issue_pending(struct dma_chan *chan); 33static void mv_xor_issue_pending(struct dma_chan *chan);
@@ -435,7 +437,7 @@ static void __mv_xor_slot_cleanup(struct mv_xor_chan *mv_chan)
435 } 437 }
436 438
437 if (cookie > 0) 439 if (cookie > 0)
438 mv_chan->completed_cookie = cookie; 440 mv_chan->common.completed_cookie = cookie;
439} 441}
440 442
441static void 443static void
@@ -534,18 +536,6 @@ retry:
534 return NULL; 536 return NULL;
535} 537}
536 538
537static dma_cookie_t
538mv_desc_assign_cookie(struct mv_xor_chan *mv_chan,
539 struct mv_xor_desc_slot *desc)
540{
541 dma_cookie_t cookie = mv_chan->common.cookie;
542
543 if (++cookie < 0)
544 cookie = 1;
545 mv_chan->common.cookie = desc->async_tx.cookie = cookie;
546 return cookie;
547}
548
549/************************ DMA engine API functions ****************************/ 539/************************ DMA engine API functions ****************************/
550static dma_cookie_t 540static dma_cookie_t
551mv_xor_tx_submit(struct dma_async_tx_descriptor *tx) 541mv_xor_tx_submit(struct dma_async_tx_descriptor *tx)
@@ -563,7 +553,7 @@ mv_xor_tx_submit(struct dma_async_tx_descriptor *tx)
563 grp_start = sw_desc->group_head; 553 grp_start = sw_desc->group_head;
564 554
565 spin_lock_bh(&mv_chan->lock); 555 spin_lock_bh(&mv_chan->lock);
566 cookie = mv_desc_assign_cookie(mv_chan, sw_desc); 556 cookie = dma_cookie_assign(tx);
567 557
568 if (list_empty(&mv_chan->chain)) 558 if (list_empty(&mv_chan->chain))
569 list_splice_init(&sw_desc->tx_list, &mv_chan->chain); 559 list_splice_init(&sw_desc->tx_list, &mv_chan->chain);
@@ -820,27 +810,16 @@ static enum dma_status mv_xor_status(struct dma_chan *chan,
820 struct dma_tx_state *txstate) 810 struct dma_tx_state *txstate)
821{ 811{
822 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan); 812 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
823 dma_cookie_t last_used;
824 dma_cookie_t last_complete;
825 enum dma_status ret; 813 enum dma_status ret;
826 814
827 last_used = chan->cookie; 815 ret = dma_cookie_status(chan, cookie, txstate);
828 last_complete = mv_chan->completed_cookie;
829 mv_chan->is_complete_cookie = cookie;
830 dma_set_tx_state(txstate, last_complete, last_used, 0);
831
832 ret = dma_async_is_complete(cookie, last_complete, last_used);
833 if (ret == DMA_SUCCESS) { 816 if (ret == DMA_SUCCESS) {
834 mv_xor_clean_completed_slots(mv_chan); 817 mv_xor_clean_completed_slots(mv_chan);
835 return ret; 818 return ret;
836 } 819 }
837 mv_xor_slot_cleanup(mv_chan); 820 mv_xor_slot_cleanup(mv_chan);
838 821
839 last_used = chan->cookie; 822 return dma_cookie_status(chan, cookie, txstate);
840 last_complete = mv_chan->completed_cookie;
841
842 dma_set_tx_state(txstate, last_complete, last_used, 0);
843 return dma_async_is_complete(cookie, last_complete, last_used);
844} 823}
845 824
846static void mv_dump_xor_regs(struct mv_xor_chan *chan) 825static void mv_dump_xor_regs(struct mv_xor_chan *chan)
@@ -1214,6 +1193,7 @@ static int __devinit mv_xor_probe(struct platform_device *pdev)
1214 INIT_LIST_HEAD(&mv_chan->completed_slots); 1193 INIT_LIST_HEAD(&mv_chan->completed_slots);
1215 INIT_LIST_HEAD(&mv_chan->all_slots); 1194 INIT_LIST_HEAD(&mv_chan->all_slots);
1216 mv_chan->common.device = dma_dev; 1195 mv_chan->common.device = dma_dev;
1196 dma_cookie_init(&mv_chan->common);
1217 1197
1218 list_add_tail(&mv_chan->common.device_node, &dma_dev->channels); 1198 list_add_tail(&mv_chan->common.device_node, &dma_dev->channels);
1219 1199
diff --git a/drivers/dma/mv_xor.h b/drivers/dma/mv_xor.h
index 977b592e976b..654876b7ba1d 100644
--- a/drivers/dma/mv_xor.h
+++ b/drivers/dma/mv_xor.h
@@ -78,7 +78,6 @@ struct mv_xor_device {
78/** 78/**
79 * struct mv_xor_chan - internal representation of a XOR channel 79 * struct mv_xor_chan - internal representation of a XOR channel
80 * @pending: allows batching of hardware operations 80 * @pending: allows batching of hardware operations
81 * @completed_cookie: identifier for the most recently completed operation
82 * @lock: serializes enqueue/dequeue operations to the descriptors pool 81 * @lock: serializes enqueue/dequeue operations to the descriptors pool
83 * @mmr_base: memory mapped register base 82 * @mmr_base: memory mapped register base
84 * @idx: the index of the xor channel 83 * @idx: the index of the xor channel
@@ -93,7 +92,6 @@ struct mv_xor_device {
93 */ 92 */
94struct mv_xor_chan { 93struct mv_xor_chan {
95 int pending; 94 int pending;
96 dma_cookie_t completed_cookie;
97 spinlock_t lock; /* protects the descriptor slot pool */ 95 spinlock_t lock; /* protects the descriptor slot pool */
98 void __iomem *mmr_base; 96 void __iomem *mmr_base;
99 unsigned int idx; 97 unsigned int idx;
@@ -109,7 +107,6 @@ struct mv_xor_chan {
109#ifdef USE_TIMER 107#ifdef USE_TIMER
110 unsigned long cleanup_time; 108 unsigned long cleanup_time;
111 u32 current_on_last_cleanup; 109 u32 current_on_last_cleanup;
112 dma_cookie_t is_complete_cookie;
113#endif 110#endif
114}; 111};
115 112
diff --git a/drivers/dma/mxs-dma.c b/drivers/dma/mxs-dma.c
index b06cd4ca626f..c81ef7e10e08 100644
--- a/drivers/dma/mxs-dma.c
+++ b/drivers/dma/mxs-dma.c
@@ -22,12 +22,14 @@
22#include <linux/platform_device.h> 22#include <linux/platform_device.h>
23#include <linux/dmaengine.h> 23#include <linux/dmaengine.h>
24#include <linux/delay.h> 24#include <linux/delay.h>
25#include <linux/fsl/mxs-dma.h>
25 26
26#include <asm/irq.h> 27#include <asm/irq.h>
27#include <mach/mxs.h> 28#include <mach/mxs.h>
28#include <mach/dma.h>
29#include <mach/common.h> 29#include <mach/common.h>
30 30
31#include "dmaengine.h"
32
31/* 33/*
32 * NOTE: The term "PIO" throughout the mxs-dma implementation means 34 * NOTE: The term "PIO" throughout the mxs-dma implementation means
33 * PIO mode of mxs apbh-dma and apbx-dma. With this working mode, 35 * PIO mode of mxs apbh-dma and apbx-dma. With this working mode,
@@ -111,7 +113,6 @@ struct mxs_dma_chan {
111 struct mxs_dma_ccw *ccw; 113 struct mxs_dma_ccw *ccw;
112 dma_addr_t ccw_phys; 114 dma_addr_t ccw_phys;
113 int desc_count; 115 int desc_count;
114 dma_cookie_t last_completed;
115 enum dma_status status; 116 enum dma_status status;
116 unsigned int flags; 117 unsigned int flags;
117#define MXS_DMA_SG_LOOP (1 << 0) 118#define MXS_DMA_SG_LOOP (1 << 0)
@@ -193,19 +194,6 @@ static void mxs_dma_resume_chan(struct mxs_dma_chan *mxs_chan)
193 mxs_chan->status = DMA_IN_PROGRESS; 194 mxs_chan->status = DMA_IN_PROGRESS;
194} 195}
195 196
196static dma_cookie_t mxs_dma_assign_cookie(struct mxs_dma_chan *mxs_chan)
197{
198 dma_cookie_t cookie = mxs_chan->chan.cookie;
199
200 if (++cookie < 0)
201 cookie = 1;
202
203 mxs_chan->chan.cookie = cookie;
204 mxs_chan->desc.cookie = cookie;
205
206 return cookie;
207}
208
209static struct mxs_dma_chan *to_mxs_dma_chan(struct dma_chan *chan) 197static struct mxs_dma_chan *to_mxs_dma_chan(struct dma_chan *chan)
210{ 198{
211 return container_of(chan, struct mxs_dma_chan, chan); 199 return container_of(chan, struct mxs_dma_chan, chan);
@@ -217,7 +205,7 @@ static dma_cookie_t mxs_dma_tx_submit(struct dma_async_tx_descriptor *tx)
217 205
218 mxs_dma_enable_chan(mxs_chan); 206 mxs_dma_enable_chan(mxs_chan);
219 207
220 return mxs_dma_assign_cookie(mxs_chan); 208 return dma_cookie_assign(tx);
221} 209}
222 210
223static void mxs_dma_tasklet(unsigned long data) 211static void mxs_dma_tasklet(unsigned long data)
@@ -274,7 +262,7 @@ static irqreturn_t mxs_dma_int_handler(int irq, void *dev_id)
274 stat1 &= ~(1 << channel); 262 stat1 &= ~(1 << channel);
275 263
276 if (mxs_chan->status == DMA_SUCCESS) 264 if (mxs_chan->status == DMA_SUCCESS)
277 mxs_chan->last_completed = mxs_chan->desc.cookie; 265 dma_cookie_complete(&mxs_chan->desc);
278 266
279 /* schedule tasklet on this channel */ 267 /* schedule tasklet on this channel */
280 tasklet_schedule(&mxs_chan->tasklet); 268 tasklet_schedule(&mxs_chan->tasklet);
@@ -349,10 +337,32 @@ static void mxs_dma_free_chan_resources(struct dma_chan *chan)
349 clk_disable_unprepare(mxs_dma->clk); 337 clk_disable_unprepare(mxs_dma->clk);
350} 338}
351 339
340/*
341 * How to use the flags for ->device_prep_slave_sg() :
342 * [1] If there is only one DMA command in the DMA chain, the code should be:
343 * ......
344 * ->device_prep_slave_sg(DMA_CTRL_ACK);
345 * ......
346 * [2] If there are two DMA commands in the DMA chain, the code should be
347 * ......
348 * ->device_prep_slave_sg(0);
349 * ......
350 * ->device_prep_slave_sg(DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
351 * ......
352 * [3] If there are more than two DMA commands in the DMA chain, the code
353 * should be:
354 * ......
355 * ->device_prep_slave_sg(0); // First
356 * ......
357 * ->device_prep_slave_sg(DMA_PREP_INTERRUPT [| DMA_CTRL_ACK]);
358 * ......
359 * ->device_prep_slave_sg(DMA_PREP_INTERRUPT | DMA_CTRL_ACK); // Last
360 * ......
361 */
352static struct dma_async_tx_descriptor *mxs_dma_prep_slave_sg( 362static struct dma_async_tx_descriptor *mxs_dma_prep_slave_sg(
353 struct dma_chan *chan, struct scatterlist *sgl, 363 struct dma_chan *chan, struct scatterlist *sgl,
354 unsigned int sg_len, enum dma_transfer_direction direction, 364 unsigned int sg_len, enum dma_transfer_direction direction,
355 unsigned long append) 365 unsigned long flags, void *context)
356{ 366{
357 struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan); 367 struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan);
358 struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; 368 struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
@@ -360,6 +370,7 @@ static struct dma_async_tx_descriptor *mxs_dma_prep_slave_sg(
360 struct scatterlist *sg; 370 struct scatterlist *sg;
361 int i, j; 371 int i, j;
362 u32 *pio; 372 u32 *pio;
373 bool append = flags & DMA_PREP_INTERRUPT;
363 int idx = append ? mxs_chan->desc_count : 0; 374 int idx = append ? mxs_chan->desc_count : 0;
364 375
365 if (mxs_chan->status == DMA_IN_PROGRESS && !append) 376 if (mxs_chan->status == DMA_IN_PROGRESS && !append)
@@ -386,7 +397,6 @@ static struct dma_async_tx_descriptor *mxs_dma_prep_slave_sg(
386 ccw->bits |= CCW_CHAIN; 397 ccw->bits |= CCW_CHAIN;
387 ccw->bits &= ~CCW_IRQ; 398 ccw->bits &= ~CCW_IRQ;
388 ccw->bits &= ~CCW_DEC_SEM; 399 ccw->bits &= ~CCW_DEC_SEM;
389 ccw->bits &= ~CCW_WAIT4END;
390 } else { 400 } else {
391 idx = 0; 401 idx = 0;
392 } 402 }
@@ -401,7 +411,8 @@ static struct dma_async_tx_descriptor *mxs_dma_prep_slave_sg(
401 ccw->bits = 0; 411 ccw->bits = 0;
402 ccw->bits |= CCW_IRQ; 412 ccw->bits |= CCW_IRQ;
403 ccw->bits |= CCW_DEC_SEM; 413 ccw->bits |= CCW_DEC_SEM;
404 ccw->bits |= CCW_WAIT4END; 414 if (flags & DMA_CTRL_ACK)
415 ccw->bits |= CCW_WAIT4END;
405 ccw->bits |= CCW_HALT_ON_TERM; 416 ccw->bits |= CCW_HALT_ON_TERM;
406 ccw->bits |= CCW_TERM_FLUSH; 417 ccw->bits |= CCW_TERM_FLUSH;
407 ccw->bits |= BF_CCW(sg_len, PIO_NUM); 418 ccw->bits |= BF_CCW(sg_len, PIO_NUM);
@@ -432,7 +443,8 @@ static struct dma_async_tx_descriptor *mxs_dma_prep_slave_sg(
432 ccw->bits &= ~CCW_CHAIN; 443 ccw->bits &= ~CCW_CHAIN;
433 ccw->bits |= CCW_IRQ; 444 ccw->bits |= CCW_IRQ;
434 ccw->bits |= CCW_DEC_SEM; 445 ccw->bits |= CCW_DEC_SEM;
435 ccw->bits |= CCW_WAIT4END; 446 if (flags & DMA_CTRL_ACK)
447 ccw->bits |= CCW_WAIT4END;
436 } 448 }
437 } 449 }
438 } 450 }
@@ -447,7 +459,8 @@ err_out:
447 459
448static struct dma_async_tx_descriptor *mxs_dma_prep_dma_cyclic( 460static struct dma_async_tx_descriptor *mxs_dma_prep_dma_cyclic(
449 struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len, 461 struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len,
450 size_t period_len, enum dma_transfer_direction direction) 462 size_t period_len, enum dma_transfer_direction direction,
463 void *context)
451{ 464{
452 struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan); 465 struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan);
453 struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; 466 struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
@@ -538,7 +551,7 @@ static enum dma_status mxs_dma_tx_status(struct dma_chan *chan,
538 dma_cookie_t last_used; 551 dma_cookie_t last_used;
539 552
540 last_used = chan->cookie; 553 last_used = chan->cookie;
541 dma_set_tx_state(txstate, mxs_chan->last_completed, last_used, 0); 554 dma_set_tx_state(txstate, chan->completed_cookie, last_used, 0);
542 555
543 return mxs_chan->status; 556 return mxs_chan->status;
544} 557}
@@ -630,6 +643,7 @@ static int __init mxs_dma_probe(struct platform_device *pdev)
630 643
631 mxs_chan->mxs_dma = mxs_dma; 644 mxs_chan->mxs_dma = mxs_dma;
632 mxs_chan->chan.device = &mxs_dma->dma_device; 645 mxs_chan->chan.device = &mxs_dma->dma_device;
646 dma_cookie_init(&mxs_chan->chan);
633 647
634 tasklet_init(&mxs_chan->tasklet, mxs_dma_tasklet, 648 tasklet_init(&mxs_chan->tasklet, mxs_dma_tasklet,
635 (unsigned long) mxs_chan); 649 (unsigned long) mxs_chan);
diff --git a/drivers/dma/pch_dma.c b/drivers/dma/pch_dma.c
index 823f58179f9d..65c0495a6d40 100644
--- a/drivers/dma/pch_dma.c
+++ b/drivers/dma/pch_dma.c
@@ -25,6 +25,8 @@
25#include <linux/module.h> 25#include <linux/module.h>
26#include <linux/pch_dma.h> 26#include <linux/pch_dma.h>
27 27
28#include "dmaengine.h"
29
28#define DRV_NAME "pch-dma" 30#define DRV_NAME "pch-dma"
29 31
30#define DMA_CTL0_DISABLE 0x0 32#define DMA_CTL0_DISABLE 0x0
@@ -105,7 +107,6 @@ struct pch_dma_chan {
105 107
106 spinlock_t lock; 108 spinlock_t lock;
107 109
108 dma_cookie_t completed_cookie;
109 struct list_head active_list; 110 struct list_head active_list;
110 struct list_head queue; 111 struct list_head queue;
111 struct list_head free_list; 112 struct list_head free_list;
@@ -416,20 +417,6 @@ static void pdc_advance_work(struct pch_dma_chan *pd_chan)
416 } 417 }
417} 418}
418 419
419static dma_cookie_t pdc_assign_cookie(struct pch_dma_chan *pd_chan,
420 struct pch_dma_desc *desc)
421{
422 dma_cookie_t cookie = pd_chan->chan.cookie;
423
424 if (++cookie < 0)
425 cookie = 1;
426
427 pd_chan->chan.cookie = cookie;
428 desc->txd.cookie = cookie;
429
430 return cookie;
431}
432
433static dma_cookie_t pd_tx_submit(struct dma_async_tx_descriptor *txd) 420static dma_cookie_t pd_tx_submit(struct dma_async_tx_descriptor *txd)
434{ 421{
435 struct pch_dma_desc *desc = to_pd_desc(txd); 422 struct pch_dma_desc *desc = to_pd_desc(txd);
@@ -437,7 +424,7 @@ static dma_cookie_t pd_tx_submit(struct dma_async_tx_descriptor *txd)
437 dma_cookie_t cookie; 424 dma_cookie_t cookie;
438 425
439 spin_lock(&pd_chan->lock); 426 spin_lock(&pd_chan->lock);
440 cookie = pdc_assign_cookie(pd_chan, desc); 427 cookie = dma_cookie_assign(txd);
441 428
442 if (list_empty(&pd_chan->active_list)) { 429 if (list_empty(&pd_chan->active_list)) {
443 list_add_tail(&desc->desc_node, &pd_chan->active_list); 430 list_add_tail(&desc->desc_node, &pd_chan->active_list);
@@ -544,7 +531,7 @@ static int pd_alloc_chan_resources(struct dma_chan *chan)
544 spin_lock_irq(&pd_chan->lock); 531 spin_lock_irq(&pd_chan->lock);
545 list_splice(&tmp_list, &pd_chan->free_list); 532 list_splice(&tmp_list, &pd_chan->free_list);
546 pd_chan->descs_allocated = i; 533 pd_chan->descs_allocated = i;
547 pd_chan->completed_cookie = chan->cookie = 1; 534 dma_cookie_init(chan);
548 spin_unlock_irq(&pd_chan->lock); 535 spin_unlock_irq(&pd_chan->lock);
549 536
550 pdc_enable_irq(chan, 1); 537 pdc_enable_irq(chan, 1);
@@ -578,19 +565,12 @@ static enum dma_status pd_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
578 struct dma_tx_state *txstate) 565 struct dma_tx_state *txstate)
579{ 566{
580 struct pch_dma_chan *pd_chan = to_pd_chan(chan); 567 struct pch_dma_chan *pd_chan = to_pd_chan(chan);
581 dma_cookie_t last_used; 568 enum dma_status ret;
582 dma_cookie_t last_completed;
583 int ret;
584 569
585 spin_lock_irq(&pd_chan->lock); 570 spin_lock_irq(&pd_chan->lock);
586 last_completed = pd_chan->completed_cookie; 571 ret = dma_cookie_status(chan, cookie, txstate);
587 last_used = chan->cookie;
588 spin_unlock_irq(&pd_chan->lock); 572 spin_unlock_irq(&pd_chan->lock);
589 573
590 ret = dma_async_is_complete(cookie, last_completed, last_used);
591
592 dma_set_tx_state(txstate, last_completed, last_used, 0);
593
594 return ret; 574 return ret;
595} 575}
596 576
@@ -607,7 +587,8 @@ static void pd_issue_pending(struct dma_chan *chan)
607 587
608static struct dma_async_tx_descriptor *pd_prep_slave_sg(struct dma_chan *chan, 588static struct dma_async_tx_descriptor *pd_prep_slave_sg(struct dma_chan *chan,
609 struct scatterlist *sgl, unsigned int sg_len, 589 struct scatterlist *sgl, unsigned int sg_len,
610 enum dma_transfer_direction direction, unsigned long flags) 590 enum dma_transfer_direction direction, unsigned long flags,
591 void *context)
611{ 592{
612 struct pch_dma_chan *pd_chan = to_pd_chan(chan); 593 struct pch_dma_chan *pd_chan = to_pd_chan(chan);
613 struct pch_dma_slave *pd_slave = chan->private; 594 struct pch_dma_slave *pd_slave = chan->private;
@@ -932,7 +913,7 @@ static int __devinit pch_dma_probe(struct pci_dev *pdev,
932 struct pch_dma_chan *pd_chan = &pd->channels[i]; 913 struct pch_dma_chan *pd_chan = &pd->channels[i];
933 914
934 pd_chan->chan.device = &pd->dma; 915 pd_chan->chan.device = &pd->dma;
935 pd_chan->chan.cookie = 1; 916 dma_cookie_init(&pd_chan->chan);
936 917
937 pd_chan->membase = &regs->desc[i]; 918 pd_chan->membase = &regs->desc[i];
938 919
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
index 16b66c827f19..282caf118be8 100644
--- a/drivers/dma/pl330.c
+++ b/drivers/dma/pl330.c
@@ -1,4 +1,6 @@
1/* linux/drivers/dma/pl330.c 1/*
2 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
3 * http://www.samsung.com
2 * 4 *
3 * Copyright (C) 2010 Samsung Electronics Co. Ltd. 5 * Copyright (C) 2010 Samsung Electronics Co. Ltd.
4 * Jaswinder Singh <jassi.brar@samsung.com> 6 * Jaswinder Singh <jassi.brar@samsung.com>
@@ -9,10 +11,15 @@
9 * (at your option) any later version. 11 * (at your option) any later version.
10 */ 12 */
11 13
14#include <linux/kernel.h>
12#include <linux/io.h> 15#include <linux/io.h>
13#include <linux/init.h> 16#include <linux/init.h>
14#include <linux/slab.h> 17#include <linux/slab.h>
15#include <linux/module.h> 18#include <linux/module.h>
19#include <linux/string.h>
20#include <linux/delay.h>
21#include <linux/interrupt.h>
22#include <linux/dma-mapping.h>
16#include <linux/dmaengine.h> 23#include <linux/dmaengine.h>
17#include <linux/interrupt.h> 24#include <linux/interrupt.h>
18#include <linux/amba/bus.h> 25#include <linux/amba/bus.h>
@@ -21,8 +28,497 @@
21#include <linux/scatterlist.h> 28#include <linux/scatterlist.h>
22#include <linux/of.h> 29#include <linux/of.h>
23 30
31#include "dmaengine.h"
32#define PL330_MAX_CHAN 8
33#define PL330_MAX_IRQS 32
34#define PL330_MAX_PERI 32
35
36enum pl330_srccachectrl {
37 SCCTRL0, /* Noncacheable and nonbufferable */
38 SCCTRL1, /* Bufferable only */
39 SCCTRL2, /* Cacheable, but do not allocate */
40 SCCTRL3, /* Cacheable and bufferable, but do not allocate */
41 SINVALID1,
42 SINVALID2,
43 SCCTRL6, /* Cacheable write-through, allocate on reads only */
44 SCCTRL7, /* Cacheable write-back, allocate on reads only */
45};
46
47enum pl330_dstcachectrl {
48 DCCTRL0, /* Noncacheable and nonbufferable */
49 DCCTRL1, /* Bufferable only */
50 DCCTRL2, /* Cacheable, but do not allocate */
51 DCCTRL3, /* Cacheable and bufferable, but do not allocate */
52 DINVALID1, /* AWCACHE = 0x1000 */
53 DINVALID2,
54 DCCTRL6, /* Cacheable write-through, allocate on writes only */
55 DCCTRL7, /* Cacheable write-back, allocate on writes only */
56};
57
58enum pl330_byteswap {
59 SWAP_NO,
60 SWAP_2,
61 SWAP_4,
62 SWAP_8,
63 SWAP_16,
64};
65
66enum pl330_reqtype {
67 MEMTOMEM,
68 MEMTODEV,
69 DEVTOMEM,
70 DEVTODEV,
71};
72
73/* Register and Bit field Definitions */
74#define DS 0x0
75#define DS_ST_STOP 0x0
76#define DS_ST_EXEC 0x1
77#define DS_ST_CMISS 0x2
78#define DS_ST_UPDTPC 0x3
79#define DS_ST_WFE 0x4
80#define DS_ST_ATBRR 0x5
81#define DS_ST_QBUSY 0x6
82#define DS_ST_WFP 0x7
83#define DS_ST_KILL 0x8
84#define DS_ST_CMPLT 0x9
85#define DS_ST_FLTCMP 0xe
86#define DS_ST_FAULT 0xf
87
88#define DPC 0x4
89#define INTEN 0x20
90#define ES 0x24
91#define INTSTATUS 0x28
92#define INTCLR 0x2c
93#define FSM 0x30
94#define FSC 0x34
95#define FTM 0x38
96
97#define _FTC 0x40
98#define FTC(n) (_FTC + (n)*0x4)
99
100#define _CS 0x100
101#define CS(n) (_CS + (n)*0x8)
102#define CS_CNS (1 << 21)
103
104#define _CPC 0x104
105#define CPC(n) (_CPC + (n)*0x8)
106
107#define _SA 0x400
108#define SA(n) (_SA + (n)*0x20)
109
110#define _DA 0x404
111#define DA(n) (_DA + (n)*0x20)
112
113#define _CC 0x408
114#define CC(n) (_CC + (n)*0x20)
115
116#define CC_SRCINC (1 << 0)
117#define CC_DSTINC (1 << 14)
118#define CC_SRCPRI (1 << 8)
119#define CC_DSTPRI (1 << 22)
120#define CC_SRCNS (1 << 9)
121#define CC_DSTNS (1 << 23)
122#define CC_SRCIA (1 << 10)
123#define CC_DSTIA (1 << 24)
124#define CC_SRCBRSTLEN_SHFT 4
125#define CC_DSTBRSTLEN_SHFT 18
126#define CC_SRCBRSTSIZE_SHFT 1
127#define CC_DSTBRSTSIZE_SHFT 15
128#define CC_SRCCCTRL_SHFT 11
129#define CC_SRCCCTRL_MASK 0x7
130#define CC_DSTCCTRL_SHFT 25
131#define CC_DRCCCTRL_MASK 0x7
132#define CC_SWAP_SHFT 28
133
134#define _LC0 0x40c
135#define LC0(n) (_LC0 + (n)*0x20)
136
137#define _LC1 0x410
138#define LC1(n) (_LC1 + (n)*0x20)
139
140#define DBGSTATUS 0xd00
141#define DBG_BUSY (1 << 0)
142
143#define DBGCMD 0xd04
144#define DBGINST0 0xd08
145#define DBGINST1 0xd0c
146
147#define CR0 0xe00
148#define CR1 0xe04
149#define CR2 0xe08
150#define CR3 0xe0c
151#define CR4 0xe10
152#define CRD 0xe14
153
154#define PERIPH_ID 0xfe0
155#define PERIPH_REV_SHIFT 20
156#define PERIPH_REV_MASK 0xf
157#define PERIPH_REV_R0P0 0
158#define PERIPH_REV_R1P0 1
159#define PERIPH_REV_R1P1 2
160#define PCELL_ID 0xff0
161
162#define CR0_PERIPH_REQ_SET (1 << 0)
163#define CR0_BOOT_EN_SET (1 << 1)
164#define CR0_BOOT_MAN_NS (1 << 2)
165#define CR0_NUM_CHANS_SHIFT 4
166#define CR0_NUM_CHANS_MASK 0x7
167#define CR0_NUM_PERIPH_SHIFT 12
168#define CR0_NUM_PERIPH_MASK 0x1f
169#define CR0_NUM_EVENTS_SHIFT 17
170#define CR0_NUM_EVENTS_MASK 0x1f
171
172#define CR1_ICACHE_LEN_SHIFT 0
173#define CR1_ICACHE_LEN_MASK 0x7
174#define CR1_NUM_ICACHELINES_SHIFT 4
175#define CR1_NUM_ICACHELINES_MASK 0xf
176
177#define CRD_DATA_WIDTH_SHIFT 0
178#define CRD_DATA_WIDTH_MASK 0x7
179#define CRD_WR_CAP_SHIFT 4
180#define CRD_WR_CAP_MASK 0x7
181#define CRD_WR_Q_DEP_SHIFT 8
182#define CRD_WR_Q_DEP_MASK 0xf
183#define CRD_RD_CAP_SHIFT 12
184#define CRD_RD_CAP_MASK 0x7
185#define CRD_RD_Q_DEP_SHIFT 16
186#define CRD_RD_Q_DEP_MASK 0xf
187#define CRD_DATA_BUFF_SHIFT 20
188#define CRD_DATA_BUFF_MASK 0x3ff
189
190#define PART 0x330
191#define DESIGNER 0x41
192#define REVISION 0x0
193#define INTEG_CFG 0x0
194#define PERIPH_ID_VAL ((PART << 0) | (DESIGNER << 12))
195
196#define PCELL_ID_VAL 0xb105f00d
197
198#define PL330_STATE_STOPPED (1 << 0)
199#define PL330_STATE_EXECUTING (1 << 1)
200#define PL330_STATE_WFE (1 << 2)
201#define PL330_STATE_FAULTING (1 << 3)
202#define PL330_STATE_COMPLETING (1 << 4)
203#define PL330_STATE_WFP (1 << 5)
204#define PL330_STATE_KILLING (1 << 6)
205#define PL330_STATE_FAULT_COMPLETING (1 << 7)
206#define PL330_STATE_CACHEMISS (1 << 8)
207#define PL330_STATE_UPDTPC (1 << 9)
208#define PL330_STATE_ATBARRIER (1 << 10)
209#define PL330_STATE_QUEUEBUSY (1 << 11)
210#define PL330_STATE_INVALID (1 << 15)
211
212#define PL330_STABLE_STATES (PL330_STATE_STOPPED | PL330_STATE_EXECUTING \
213 | PL330_STATE_WFE | PL330_STATE_FAULTING)
214
215#define CMD_DMAADDH 0x54
216#define CMD_DMAEND 0x00
217#define CMD_DMAFLUSHP 0x35
218#define CMD_DMAGO 0xa0
219#define CMD_DMALD 0x04
220#define CMD_DMALDP 0x25
221#define CMD_DMALP 0x20
222#define CMD_DMALPEND 0x28
223#define CMD_DMAKILL 0x01
224#define CMD_DMAMOV 0xbc
225#define CMD_DMANOP 0x18
226#define CMD_DMARMB 0x12
227#define CMD_DMASEV 0x34
228#define CMD_DMAST 0x08
229#define CMD_DMASTP 0x29
230#define CMD_DMASTZ 0x0c
231#define CMD_DMAWFE 0x36
232#define CMD_DMAWFP 0x30
233#define CMD_DMAWMB 0x13
234
235#define SZ_DMAADDH 3
236#define SZ_DMAEND 1
237#define SZ_DMAFLUSHP 2
238#define SZ_DMALD 1
239#define SZ_DMALDP 2
240#define SZ_DMALP 2
241#define SZ_DMALPEND 2
242#define SZ_DMAKILL 1
243#define SZ_DMAMOV 6
244#define SZ_DMANOP 1
245#define SZ_DMARMB 1
246#define SZ_DMASEV 2
247#define SZ_DMAST 1
248#define SZ_DMASTP 2
249#define SZ_DMASTZ 1
250#define SZ_DMAWFE 2
251#define SZ_DMAWFP 2
252#define SZ_DMAWMB 1
253#define SZ_DMAGO 6
254
255#define BRST_LEN(ccr) ((((ccr) >> CC_SRCBRSTLEN_SHFT) & 0xf) + 1)
256#define BRST_SIZE(ccr) (1 << (((ccr) >> CC_SRCBRSTSIZE_SHFT) & 0x7))
257
258#define BYTE_TO_BURST(b, ccr) ((b) / BRST_SIZE(ccr) / BRST_LEN(ccr))
259#define BURST_TO_BYTE(c, ccr) ((c) * BRST_SIZE(ccr) * BRST_LEN(ccr))
260
261/*
262 * With 256 bytes, we can do more than 2.5MB and 5MB xfers per req
263 * at 1byte/burst for P<->M and M<->M respectively.
264 * For typical scenario, at 1word/burst, 10MB and 20MB xfers per req
265 * should be enough for P<->M and M<->M respectively.
266 */
267#define MCODE_BUFF_PER_REQ 256
268
269/* If the _pl330_req is available to the client */
270#define IS_FREE(req) (*((u8 *)((req)->mc_cpu)) == CMD_DMAEND)
271
272/* Use this _only_ to wait on transient states */
273#define UNTIL(t, s) while (!(_state(t) & (s))) cpu_relax();
274
275#ifdef PL330_DEBUG_MCGEN
276static unsigned cmd_line;
277#define PL330_DBGCMD_DUMP(off, x...) do { \
278 printk("%x:", cmd_line); \
279 printk(x); \
280 cmd_line += off; \
281 } while (0)
282#define PL330_DBGMC_START(addr) (cmd_line = addr)
283#else
284#define PL330_DBGCMD_DUMP(off, x...) do {} while (0)
285#define PL330_DBGMC_START(addr) do {} while (0)
286#endif
287
288/* The number of default descriptors */
289
24#define NR_DEFAULT_DESC 16 290#define NR_DEFAULT_DESC 16
25 291
292/* Populated by the PL330 core driver for DMA API driver's info */
293struct pl330_config {
294 u32 periph_id;
295 u32 pcell_id;
296#define DMAC_MODE_NS (1 << 0)
297 unsigned int mode;
298 unsigned int data_bus_width:10; /* In number of bits */
299 unsigned int data_buf_dep:10;
300 unsigned int num_chan:4;
301 unsigned int num_peri:6;
302 u32 peri_ns;
303 unsigned int num_events:6;
304 u32 irq_ns;
305};
306
307/* Handle to the DMAC provided to the PL330 core */
308struct pl330_info {
309 /* Owning device */
310 struct device *dev;
311 /* Size of MicroCode buffers for each channel. */
312 unsigned mcbufsz;
313 /* ioremap'ed address of PL330 registers. */
314 void __iomem *base;
315 /* Client can freely use it. */
316 void *client_data;
317 /* PL330 core data, Client must not touch it. */
318 void *pl330_data;
319 /* Populated by the PL330 core driver during pl330_add */
320 struct pl330_config pcfg;
321 /*
322 * If the DMAC has some reset mechanism, then the
323 * client may want to provide pointer to the method.
324 */
325 void (*dmac_reset)(struct pl330_info *pi);
326};
327
328/**
329 * Request Configuration.
330 * The PL330 core does not modify this and uses the last
331 * working configuration if the request doesn't provide any.
332 *
333 * The Client may want to provide this info only for the
334 * first request and a request with new settings.
335 */
336struct pl330_reqcfg {
337 /* Address Incrementing */
338 unsigned dst_inc:1;
339 unsigned src_inc:1;
340
341 /*
342 * For now, the SRC & DST protection levels
343 * and burst size/length are assumed same.
344 */
345 bool nonsecure;
346 bool privileged;
347 bool insnaccess;
348 unsigned brst_len:5;
349 unsigned brst_size:3; /* in power of 2 */
350
351 enum pl330_dstcachectrl dcctl;
352 enum pl330_srccachectrl scctl;
353 enum pl330_byteswap swap;
354 struct pl330_config *pcfg;
355};
356
357/*
358 * One cycle of DMAC operation.
359 * There may be more than one xfer in a request.
360 */
361struct pl330_xfer {
362 u32 src_addr;
363 u32 dst_addr;
364 /* Size to xfer */
365 u32 bytes;
366 /*
367 * Pointer to next xfer in the list.
368 * The last xfer in the req must point to NULL.
369 */
370 struct pl330_xfer *next;
371};
372
373/* The xfer callbacks are made with one of these arguments. */
374enum pl330_op_err {
375 /* The all xfers in the request were success. */
376 PL330_ERR_NONE,
377 /* If req aborted due to global error. */
378 PL330_ERR_ABORT,
379 /* If req failed due to problem with Channel. */
380 PL330_ERR_FAIL,
381};
382
383/* A request defining Scatter-Gather List ending with NULL xfer. */
384struct pl330_req {
385 enum pl330_reqtype rqtype;
386 /* Index of peripheral for the xfer. */
387 unsigned peri:5;
388 /* Unique token for this xfer, set by the client. */
389 void *token;
390 /* Callback to be called after xfer. */
391 void (*xfer_cb)(void *token, enum pl330_op_err err);
392 /* If NULL, req will be done at last set parameters. */
393 struct pl330_reqcfg *cfg;
394 /* Pointer to first xfer in the request. */
395 struct pl330_xfer *x;
396};
397
398/*
399 * To know the status of the channel and DMAC, the client
400 * provides a pointer to this structure. The PL330 core
401 * fills it with current information.
402 */
403struct pl330_chanstatus {
404 /*
405 * If the DMAC engine halted due to some error,
406 * the client should remove-add DMAC.
407 */
408 bool dmac_halted;
409 /*
410 * If channel is halted due to some error,
411 * the client should ABORT/FLUSH and START the channel.
412 */
413 bool faulting;
414 /* Location of last load */
415 u32 src_addr;
416 /* Location of last store */
417 u32 dst_addr;
418 /*
419 * Pointer to the currently active req, NULL if channel is
420 * inactive, even though the requests may be present.
421 */
422 struct pl330_req *top_req;
423 /* Pointer to req waiting second in the queue if any. */
424 struct pl330_req *wait_req;
425};
426
427enum pl330_chan_op {
428 /* Start the channel */
429 PL330_OP_START,
430 /* Abort the active xfer */
431 PL330_OP_ABORT,
432 /* Stop xfer and flush queue */
433 PL330_OP_FLUSH,
434};
435
436struct _xfer_spec {
437 u32 ccr;
438 struct pl330_req *r;
439 struct pl330_xfer *x;
440};
441
442enum dmamov_dst {
443 SAR = 0,
444 CCR,
445 DAR,
446};
447
448enum pl330_dst {
449 SRC = 0,
450 DST,
451};
452
453enum pl330_cond {
454 SINGLE,
455 BURST,
456 ALWAYS,
457};
458
459struct _pl330_req {
460 u32 mc_bus;
461 void *mc_cpu;
462 /* Number of bytes taken to setup MC for the req */
463 u32 mc_len;
464 struct pl330_req *r;
465 /* Hook to attach to DMAC's list of reqs with due callback */
466 struct list_head rqd;
467};
468
469/* ToBeDone for tasklet */
470struct _pl330_tbd {
471 bool reset_dmac;
472 bool reset_mngr;
473 u8 reset_chan;
474};
475
476/* A DMAC Thread */
477struct pl330_thread {
478 u8 id;
479 int ev;
480 /* If the channel is not yet acquired by any client */
481 bool free;
482 /* Parent DMAC */
483 struct pl330_dmac *dmac;
484 /* Only two at a time */
485 struct _pl330_req req[2];
486 /* Index of the last enqueued request */
487 unsigned lstenq;
488 /* Index of the last submitted request or -1 if the DMA is stopped */
489 int req_running;
490};
491
492enum pl330_dmac_state {
493 UNINIT,
494 INIT,
495 DYING,
496};
497
498/* A DMAC */
499struct pl330_dmac {
500 spinlock_t lock;
501 /* Holds list of reqs with due callbacks */
502 struct list_head req_done;
503 /* Pointer to platform specific stuff */
504 struct pl330_info *pinfo;
505 /* Maximum possible events/irqs */
506 int events[32];
507 /* BUS address of MicroCode buffer */
508 u32 mcode_bus;
509 /* CPU address of MicroCode buffer */
510 void *mcode_cpu;
511 /* List of all Channel threads */
512 struct pl330_thread *channels;
513 /* Pointer to the MANAGER thread */
514 struct pl330_thread *manager;
515 /* To handle bad news in interrupt */
516 struct tasklet_struct tasks;
517 struct _pl330_tbd dmac_tbd;
518 /* State of DMAC operation */
519 enum pl330_dmac_state state;
520};
521
26enum desc_status { 522enum desc_status {
27 /* In the DMAC pool */ 523 /* In the DMAC pool */
28 FREE, 524 FREE,
@@ -51,9 +547,6 @@ struct dma_pl330_chan {
51 /* DMA-Engine Channel */ 547 /* DMA-Engine Channel */
52 struct dma_chan chan; 548 struct dma_chan chan;
53 549
54 /* Last completed cookie */
55 dma_cookie_t completed;
56
57 /* List of to be xfered descriptors */ 550 /* List of to be xfered descriptors */
58 struct list_head work_list; 551 struct list_head work_list;
59 552
@@ -117,6 +610,1599 @@ struct dma_pl330_desc {
117 struct dma_pl330_chan *pchan; 610 struct dma_pl330_chan *pchan;
118}; 611};
119 612
613static inline void _callback(struct pl330_req *r, enum pl330_op_err err)
614{
615 if (r && r->xfer_cb)
616 r->xfer_cb(r->token, err);
617}
618
619static inline bool _queue_empty(struct pl330_thread *thrd)
620{
621 return (IS_FREE(&thrd->req[0]) && IS_FREE(&thrd->req[1]))
622 ? true : false;
623}
624
625static inline bool _queue_full(struct pl330_thread *thrd)
626{
627 return (IS_FREE(&thrd->req[0]) || IS_FREE(&thrd->req[1]))
628 ? false : true;
629}
630
631static inline bool is_manager(struct pl330_thread *thrd)
632{
633 struct pl330_dmac *pl330 = thrd->dmac;
634
635 /* MANAGER is indexed at the end */
636 if (thrd->id == pl330->pinfo->pcfg.num_chan)
637 return true;
638 else
639 return false;
640}
641
642/* If manager of the thread is in Non-Secure mode */
643static inline bool _manager_ns(struct pl330_thread *thrd)
644{
645 struct pl330_dmac *pl330 = thrd->dmac;
646
647 return (pl330->pinfo->pcfg.mode & DMAC_MODE_NS) ? true : false;
648}
649
650static inline u32 get_id(struct pl330_info *pi, u32 off)
651{
652 void __iomem *regs = pi->base;
653 u32 id = 0;
654
655 id |= (readb(regs + off + 0x0) << 0);
656 id |= (readb(regs + off + 0x4) << 8);
657 id |= (readb(regs + off + 0x8) << 16);
658 id |= (readb(regs + off + 0xc) << 24);
659
660 return id;
661}
662
663static inline u32 get_revision(u32 periph_id)
664{
665 return (periph_id >> PERIPH_REV_SHIFT) & PERIPH_REV_MASK;
666}
667
668static inline u32 _emit_ADDH(unsigned dry_run, u8 buf[],
669 enum pl330_dst da, u16 val)
670{
671 if (dry_run)
672 return SZ_DMAADDH;
673
674 buf[0] = CMD_DMAADDH;
675 buf[0] |= (da << 1);
676 *((u16 *)&buf[1]) = val;
677
678 PL330_DBGCMD_DUMP(SZ_DMAADDH, "\tDMAADDH %s %u\n",
679 da == 1 ? "DA" : "SA", val);
680
681 return SZ_DMAADDH;
682}
683
684static inline u32 _emit_END(unsigned dry_run, u8 buf[])
685{
686 if (dry_run)
687 return SZ_DMAEND;
688
689 buf[0] = CMD_DMAEND;
690
691 PL330_DBGCMD_DUMP(SZ_DMAEND, "\tDMAEND\n");
692
693 return SZ_DMAEND;
694}
695
696static inline u32 _emit_FLUSHP(unsigned dry_run, u8 buf[], u8 peri)
697{
698 if (dry_run)
699 return SZ_DMAFLUSHP;
700
701 buf[0] = CMD_DMAFLUSHP;
702
703 peri &= 0x1f;
704 peri <<= 3;
705 buf[1] = peri;
706
707 PL330_DBGCMD_DUMP(SZ_DMAFLUSHP, "\tDMAFLUSHP %u\n", peri >> 3);
708
709 return SZ_DMAFLUSHP;
710}
711
712static inline u32 _emit_LD(unsigned dry_run, u8 buf[], enum pl330_cond cond)
713{
714 if (dry_run)
715 return SZ_DMALD;
716
717 buf[0] = CMD_DMALD;
718
719 if (cond == SINGLE)
720 buf[0] |= (0 << 1) | (1 << 0);
721 else if (cond == BURST)
722 buf[0] |= (1 << 1) | (1 << 0);
723
724 PL330_DBGCMD_DUMP(SZ_DMALD, "\tDMALD%c\n",
725 cond == SINGLE ? 'S' : (cond == BURST ? 'B' : 'A'));
726
727 return SZ_DMALD;
728}
729
730static inline u32 _emit_LDP(unsigned dry_run, u8 buf[],
731 enum pl330_cond cond, u8 peri)
732{
733 if (dry_run)
734 return SZ_DMALDP;
735
736 buf[0] = CMD_DMALDP;
737
738 if (cond == BURST)
739 buf[0] |= (1 << 1);
740
741 peri &= 0x1f;
742 peri <<= 3;
743 buf[1] = peri;
744
745 PL330_DBGCMD_DUMP(SZ_DMALDP, "\tDMALDP%c %u\n",
746 cond == SINGLE ? 'S' : 'B', peri >> 3);
747
748 return SZ_DMALDP;
749}
750
751static inline u32 _emit_LP(unsigned dry_run, u8 buf[],
752 unsigned loop, u8 cnt)
753{
754 if (dry_run)
755 return SZ_DMALP;
756
757 buf[0] = CMD_DMALP;
758
759 if (loop)
760 buf[0] |= (1 << 1);
761
762 cnt--; /* DMAC increments by 1 internally */
763 buf[1] = cnt;
764
765 PL330_DBGCMD_DUMP(SZ_DMALP, "\tDMALP_%c %u\n", loop ? '1' : '0', cnt);
766
767 return SZ_DMALP;
768}
769
770struct _arg_LPEND {
771 enum pl330_cond cond;
772 bool forever;
773 unsigned loop;
774 u8 bjump;
775};
776
777static inline u32 _emit_LPEND(unsigned dry_run, u8 buf[],
778 const struct _arg_LPEND *arg)
779{
780 enum pl330_cond cond = arg->cond;
781 bool forever = arg->forever;
782 unsigned loop = arg->loop;
783 u8 bjump = arg->bjump;
784
785 if (dry_run)
786 return SZ_DMALPEND;
787
788 buf[0] = CMD_DMALPEND;
789
790 if (loop)
791 buf[0] |= (1 << 2);
792
793 if (!forever)
794 buf[0] |= (1 << 4);
795
796 if (cond == SINGLE)
797 buf[0] |= (0 << 1) | (1 << 0);
798 else if (cond == BURST)
799 buf[0] |= (1 << 1) | (1 << 0);
800
801 buf[1] = bjump;
802
803 PL330_DBGCMD_DUMP(SZ_DMALPEND, "\tDMALP%s%c_%c bjmpto_%x\n",
804 forever ? "FE" : "END",
805 cond == SINGLE ? 'S' : (cond == BURST ? 'B' : 'A'),
806 loop ? '1' : '0',
807 bjump);
808
809 return SZ_DMALPEND;
810}
811
812static inline u32 _emit_KILL(unsigned dry_run, u8 buf[])
813{
814 if (dry_run)
815 return SZ_DMAKILL;
816
817 buf[0] = CMD_DMAKILL;
818
819 return SZ_DMAKILL;
820}
821
822static inline u32 _emit_MOV(unsigned dry_run, u8 buf[],
823 enum dmamov_dst dst, u32 val)
824{
825 if (dry_run)
826 return SZ_DMAMOV;
827
828 buf[0] = CMD_DMAMOV;
829 buf[1] = dst;
830 *((u32 *)&buf[2]) = val;
831
832 PL330_DBGCMD_DUMP(SZ_DMAMOV, "\tDMAMOV %s 0x%x\n",
833 dst == SAR ? "SAR" : (dst == DAR ? "DAR" : "CCR"), val);
834
835 return SZ_DMAMOV;
836}
837
838static inline u32 _emit_NOP(unsigned dry_run, u8 buf[])
839{
840 if (dry_run)
841 return SZ_DMANOP;
842
843 buf[0] = CMD_DMANOP;
844
845 PL330_DBGCMD_DUMP(SZ_DMANOP, "\tDMANOP\n");
846
847 return SZ_DMANOP;
848}
849
850static inline u32 _emit_RMB(unsigned dry_run, u8 buf[])
851{
852 if (dry_run)
853 return SZ_DMARMB;
854
855 buf[0] = CMD_DMARMB;
856
857 PL330_DBGCMD_DUMP(SZ_DMARMB, "\tDMARMB\n");
858
859 return SZ_DMARMB;
860}
861
862static inline u32 _emit_SEV(unsigned dry_run, u8 buf[], u8 ev)
863{
864 if (dry_run)
865 return SZ_DMASEV;
866
867 buf[0] = CMD_DMASEV;
868
869 ev &= 0x1f;
870 ev <<= 3;
871 buf[1] = ev;
872
873 PL330_DBGCMD_DUMP(SZ_DMASEV, "\tDMASEV %u\n", ev >> 3);
874
875 return SZ_DMASEV;
876}
877
878static inline u32 _emit_ST(unsigned dry_run, u8 buf[], enum pl330_cond cond)
879{
880 if (dry_run)
881 return SZ_DMAST;
882
883 buf[0] = CMD_DMAST;
884
885 if (cond == SINGLE)
886 buf[0] |= (0 << 1) | (1 << 0);
887 else if (cond == BURST)
888 buf[0] |= (1 << 1) | (1 << 0);
889
890 PL330_DBGCMD_DUMP(SZ_DMAST, "\tDMAST%c\n",
891 cond == SINGLE ? 'S' : (cond == BURST ? 'B' : 'A'));
892
893 return SZ_DMAST;
894}
895
896static inline u32 _emit_STP(unsigned dry_run, u8 buf[],
897 enum pl330_cond cond, u8 peri)
898{
899 if (dry_run)
900 return SZ_DMASTP;
901
902 buf[0] = CMD_DMASTP;
903
904 if (cond == BURST)
905 buf[0] |= (1 << 1);
906
907 peri &= 0x1f;
908 peri <<= 3;
909 buf[1] = peri;
910
911 PL330_DBGCMD_DUMP(SZ_DMASTP, "\tDMASTP%c %u\n",
912 cond == SINGLE ? 'S' : 'B', peri >> 3);
913
914 return SZ_DMASTP;
915}
916
917static inline u32 _emit_STZ(unsigned dry_run, u8 buf[])
918{
919 if (dry_run)
920 return SZ_DMASTZ;
921
922 buf[0] = CMD_DMASTZ;
923
924 PL330_DBGCMD_DUMP(SZ_DMASTZ, "\tDMASTZ\n");
925
926 return SZ_DMASTZ;
927}
928
929static inline u32 _emit_WFE(unsigned dry_run, u8 buf[], u8 ev,
930 unsigned invalidate)
931{
932 if (dry_run)
933 return SZ_DMAWFE;
934
935 buf[0] = CMD_DMAWFE;
936
937 ev &= 0x1f;
938 ev <<= 3;
939 buf[1] = ev;
940
941 if (invalidate)
942 buf[1] |= (1 << 1);
943
944 PL330_DBGCMD_DUMP(SZ_DMAWFE, "\tDMAWFE %u%s\n",
945 ev >> 3, invalidate ? ", I" : "");
946
947 return SZ_DMAWFE;
948}
949
950static inline u32 _emit_WFP(unsigned dry_run, u8 buf[],
951 enum pl330_cond cond, u8 peri)
952{
953 if (dry_run)
954 return SZ_DMAWFP;
955
956 buf[0] = CMD_DMAWFP;
957
958 if (cond == SINGLE)
959 buf[0] |= (0 << 1) | (0 << 0);
960 else if (cond == BURST)
961 buf[0] |= (1 << 1) | (0 << 0);
962 else
963 buf[0] |= (0 << 1) | (1 << 0);
964
965 peri &= 0x1f;
966 peri <<= 3;
967 buf[1] = peri;
968
969 PL330_DBGCMD_DUMP(SZ_DMAWFP, "\tDMAWFP%c %u\n",
970 cond == SINGLE ? 'S' : (cond == BURST ? 'B' : 'P'), peri >> 3);
971
972 return SZ_DMAWFP;
973}
974
975static inline u32 _emit_WMB(unsigned dry_run, u8 buf[])
976{
977 if (dry_run)
978 return SZ_DMAWMB;
979
980 buf[0] = CMD_DMAWMB;
981
982 PL330_DBGCMD_DUMP(SZ_DMAWMB, "\tDMAWMB\n");
983
984 return SZ_DMAWMB;
985}
986
987struct _arg_GO {
988 u8 chan;
989 u32 addr;
990 unsigned ns;
991};
992
993static inline u32 _emit_GO(unsigned dry_run, u8 buf[],
994 const struct _arg_GO *arg)
995{
996 u8 chan = arg->chan;
997 u32 addr = arg->addr;
998 unsigned ns = arg->ns;
999
1000 if (dry_run)
1001 return SZ_DMAGO;
1002
1003 buf[0] = CMD_DMAGO;
1004 buf[0] |= (ns << 1);
1005
1006 buf[1] = chan & 0x7;
1007
1008 *((u32 *)&buf[2]) = addr;
1009
1010 return SZ_DMAGO;
1011}
1012
1013#define msecs_to_loops(t) (loops_per_jiffy / 1000 * HZ * t)
1014
1015/* Returns Time-Out */
1016static bool _until_dmac_idle(struct pl330_thread *thrd)
1017{
1018 void __iomem *regs = thrd->dmac->pinfo->base;
1019 unsigned long loops = msecs_to_loops(5);
1020
1021 do {
1022 /* Until Manager is Idle */
1023 if (!(readl(regs + DBGSTATUS) & DBG_BUSY))
1024 break;
1025
1026 cpu_relax();
1027 } while (--loops);
1028
1029 if (!loops)
1030 return true;
1031
1032 return false;
1033}
1034
1035static inline void _execute_DBGINSN(struct pl330_thread *thrd,
1036 u8 insn[], bool as_manager)
1037{
1038 void __iomem *regs = thrd->dmac->pinfo->base;
1039 u32 val;
1040
1041 val = (insn[0] << 16) | (insn[1] << 24);
1042 if (!as_manager) {
1043 val |= (1 << 0);
1044 val |= (thrd->id << 8); /* Channel Number */
1045 }
1046 writel(val, regs + DBGINST0);
1047
1048 val = *((u32 *)&insn[2]);
1049 writel(val, regs + DBGINST1);
1050
1051 /* If timed out due to halted state-machine */
1052 if (_until_dmac_idle(thrd)) {
1053 dev_err(thrd->dmac->pinfo->dev, "DMAC halted!\n");
1054 return;
1055 }
1056
1057 /* Get going */
1058 writel(0, regs + DBGCMD);
1059}
1060
1061/*
1062 * Mark a _pl330_req as free.
1063 * We do it by writing DMAEND as the first instruction
1064 * because no valid request is going to have DMAEND as
1065 * its first instruction to execute.
1066 */
1067static void mark_free(struct pl330_thread *thrd, int idx)
1068{
1069 struct _pl330_req *req = &thrd->req[idx];
1070
1071 _emit_END(0, req->mc_cpu);
1072 req->mc_len = 0;
1073
1074 thrd->req_running = -1;
1075}
1076
1077static inline u32 _state(struct pl330_thread *thrd)
1078{
1079 void __iomem *regs = thrd->dmac->pinfo->base;
1080 u32 val;
1081
1082 if (is_manager(thrd))
1083 val = readl(regs + DS) & 0xf;
1084 else
1085 val = readl(regs + CS(thrd->id)) & 0xf;
1086
1087 switch (val) {
1088 case DS_ST_STOP:
1089 return PL330_STATE_STOPPED;
1090 case DS_ST_EXEC:
1091 return PL330_STATE_EXECUTING;
1092 case DS_ST_CMISS:
1093 return PL330_STATE_CACHEMISS;
1094 case DS_ST_UPDTPC:
1095 return PL330_STATE_UPDTPC;
1096 case DS_ST_WFE:
1097 return PL330_STATE_WFE;
1098 case DS_ST_FAULT:
1099 return PL330_STATE_FAULTING;
1100 case DS_ST_ATBRR:
1101 if (is_manager(thrd))
1102 return PL330_STATE_INVALID;
1103 else
1104 return PL330_STATE_ATBARRIER;
1105 case DS_ST_QBUSY:
1106 if (is_manager(thrd))
1107 return PL330_STATE_INVALID;
1108 else
1109 return PL330_STATE_QUEUEBUSY;
1110 case DS_ST_WFP:
1111 if (is_manager(thrd))
1112 return PL330_STATE_INVALID;
1113 else
1114 return PL330_STATE_WFP;
1115 case DS_ST_KILL:
1116 if (is_manager(thrd))
1117 return PL330_STATE_INVALID;
1118 else
1119 return PL330_STATE_KILLING;
1120 case DS_ST_CMPLT:
1121 if (is_manager(thrd))
1122 return PL330_STATE_INVALID;
1123 else
1124 return PL330_STATE_COMPLETING;
1125 case DS_ST_FLTCMP:
1126 if (is_manager(thrd))
1127 return PL330_STATE_INVALID;
1128 else
1129 return PL330_STATE_FAULT_COMPLETING;
1130 default:
1131 return PL330_STATE_INVALID;
1132 }
1133}
1134
1135static void _stop(struct pl330_thread *thrd)
1136{
1137 void __iomem *regs = thrd->dmac->pinfo->base;
1138 u8 insn[6] = {0, 0, 0, 0, 0, 0};
1139
1140 if (_state(thrd) == PL330_STATE_FAULT_COMPLETING)
1141 UNTIL(thrd, PL330_STATE_FAULTING | PL330_STATE_KILLING);
1142
1143 /* Return if nothing needs to be done */
1144 if (_state(thrd) == PL330_STATE_COMPLETING
1145 || _state(thrd) == PL330_STATE_KILLING
1146 || _state(thrd) == PL330_STATE_STOPPED)
1147 return;
1148
1149 _emit_KILL(0, insn);
1150
1151 /* Stop generating interrupts for SEV */
1152 writel(readl(regs + INTEN) & ~(1 << thrd->ev), regs + INTEN);
1153
1154 _execute_DBGINSN(thrd, insn, is_manager(thrd));
1155}
1156
1157/* Start doing req 'idx' of thread 'thrd' */
1158static bool _trigger(struct pl330_thread *thrd)
1159{
1160 void __iomem *regs = thrd->dmac->pinfo->base;
1161 struct _pl330_req *req;
1162 struct pl330_req *r;
1163 struct _arg_GO go;
1164 unsigned ns;
1165 u8 insn[6] = {0, 0, 0, 0, 0, 0};
1166 int idx;
1167
1168 /* Return if already ACTIVE */
1169 if (_state(thrd) != PL330_STATE_STOPPED)
1170 return true;
1171
1172 idx = 1 - thrd->lstenq;
1173 if (!IS_FREE(&thrd->req[idx]))
1174 req = &thrd->req[idx];
1175 else {
1176 idx = thrd->lstenq;
1177 if (!IS_FREE(&thrd->req[idx]))
1178 req = &thrd->req[idx];
1179 else
1180 req = NULL;
1181 }
1182
1183 /* Return if no request */
1184 if (!req || !req->r)
1185 return true;
1186
1187 r = req->r;
1188
1189 if (r->cfg)
1190 ns = r->cfg->nonsecure ? 1 : 0;
1191 else if (readl(regs + CS(thrd->id)) & CS_CNS)
1192 ns = 1;
1193 else
1194 ns = 0;
1195
1196 /* See 'Abort Sources' point-4 at Page 2-25 */
1197 if (_manager_ns(thrd) && !ns)
1198 dev_info(thrd->dmac->pinfo->dev, "%s:%d Recipe for ABORT!\n",
1199 __func__, __LINE__);
1200
1201 go.chan = thrd->id;
1202 go.addr = req->mc_bus;
1203 go.ns = ns;
1204 _emit_GO(0, insn, &go);
1205
1206 /* Set to generate interrupts for SEV */
1207 writel(readl(regs + INTEN) | (1 << thrd->ev), regs + INTEN);
1208
1209 /* Only manager can execute GO */
1210 _execute_DBGINSN(thrd, insn, true);
1211
1212 thrd->req_running = idx;
1213
1214 return true;
1215}
1216
1217static bool _start(struct pl330_thread *thrd)
1218{
1219 switch (_state(thrd)) {
1220 case PL330_STATE_FAULT_COMPLETING:
1221 UNTIL(thrd, PL330_STATE_FAULTING | PL330_STATE_KILLING);
1222
1223 if (_state(thrd) == PL330_STATE_KILLING)
1224 UNTIL(thrd, PL330_STATE_STOPPED)
1225
1226 case PL330_STATE_FAULTING:
1227 _stop(thrd);
1228
1229 case PL330_STATE_KILLING:
1230 case PL330_STATE_COMPLETING:
1231 UNTIL(thrd, PL330_STATE_STOPPED)
1232
1233 case PL330_STATE_STOPPED:
1234 return _trigger(thrd);
1235
1236 case PL330_STATE_WFP:
1237 case PL330_STATE_QUEUEBUSY:
1238 case PL330_STATE_ATBARRIER:
1239 case PL330_STATE_UPDTPC:
1240 case PL330_STATE_CACHEMISS:
1241 case PL330_STATE_EXECUTING:
1242 return true;
1243
1244 case PL330_STATE_WFE: /* For RESUME, nothing yet */
1245 default:
1246 return false;
1247 }
1248}
1249
1250static inline int _ldst_memtomem(unsigned dry_run, u8 buf[],
1251 const struct _xfer_spec *pxs, int cyc)
1252{
1253 int off = 0;
1254 struct pl330_config *pcfg = pxs->r->cfg->pcfg;
1255
1256 /* check lock-up free version */
1257 if (get_revision(pcfg->periph_id) >= PERIPH_REV_R1P0) {
1258 while (cyc--) {
1259 off += _emit_LD(dry_run, &buf[off], ALWAYS);
1260 off += _emit_ST(dry_run, &buf[off], ALWAYS);
1261 }
1262 } else {
1263 while (cyc--) {
1264 off += _emit_LD(dry_run, &buf[off], ALWAYS);
1265 off += _emit_RMB(dry_run, &buf[off]);
1266 off += _emit_ST(dry_run, &buf[off], ALWAYS);
1267 off += _emit_WMB(dry_run, &buf[off]);
1268 }
1269 }
1270
1271 return off;
1272}
1273
1274static inline int _ldst_devtomem(unsigned dry_run, u8 buf[],
1275 const struct _xfer_spec *pxs, int cyc)
1276{
1277 int off = 0;
1278
1279 while (cyc--) {
1280 off += _emit_WFP(dry_run, &buf[off], SINGLE, pxs->r->peri);
1281 off += _emit_LDP(dry_run, &buf[off], SINGLE, pxs->r->peri);
1282 off += _emit_ST(dry_run, &buf[off], ALWAYS);
1283 off += _emit_FLUSHP(dry_run, &buf[off], pxs->r->peri);
1284 }
1285
1286 return off;
1287}
1288
1289static inline int _ldst_memtodev(unsigned dry_run, u8 buf[],
1290 const struct _xfer_spec *pxs, int cyc)
1291{
1292 int off = 0;
1293
1294 while (cyc--) {
1295 off += _emit_WFP(dry_run, &buf[off], SINGLE, pxs->r->peri);
1296 off += _emit_LD(dry_run, &buf[off], ALWAYS);
1297 off += _emit_STP(dry_run, &buf[off], SINGLE, pxs->r->peri);
1298 off += _emit_FLUSHP(dry_run, &buf[off], pxs->r->peri);
1299 }
1300
1301 return off;
1302}
1303
1304static int _bursts(unsigned dry_run, u8 buf[],
1305 const struct _xfer_spec *pxs, int cyc)
1306{
1307 int off = 0;
1308
1309 switch (pxs->r->rqtype) {
1310 case MEMTODEV:
1311 off += _ldst_memtodev(dry_run, &buf[off], pxs, cyc);
1312 break;
1313 case DEVTOMEM:
1314 off += _ldst_devtomem(dry_run, &buf[off], pxs, cyc);
1315 break;
1316 case MEMTOMEM:
1317 off += _ldst_memtomem(dry_run, &buf[off], pxs, cyc);
1318 break;
1319 default:
1320 off += 0x40000000; /* Scare off the Client */
1321 break;
1322 }
1323
1324 return off;
1325}
1326
1327/* Returns bytes consumed and updates bursts */
1328static inline int _loop(unsigned dry_run, u8 buf[],
1329 unsigned long *bursts, const struct _xfer_spec *pxs)
1330{
1331 int cyc, cycmax, szlp, szlpend, szbrst, off;
1332 unsigned lcnt0, lcnt1, ljmp0, ljmp1;
1333 struct _arg_LPEND lpend;
1334
1335 /* Max iterations possible in DMALP is 256 */
1336 if (*bursts >= 256*256) {
1337 lcnt1 = 256;
1338 lcnt0 = 256;
1339 cyc = *bursts / lcnt1 / lcnt0;
1340 } else if (*bursts > 256) {
1341 lcnt1 = 256;
1342 lcnt0 = *bursts / lcnt1;
1343 cyc = 1;
1344 } else {
1345 lcnt1 = *bursts;
1346 lcnt0 = 0;
1347 cyc = 1;
1348 }
1349
1350 szlp = _emit_LP(1, buf, 0, 0);
1351 szbrst = _bursts(1, buf, pxs, 1);
1352
1353 lpend.cond = ALWAYS;
1354 lpend.forever = false;
1355 lpend.loop = 0;
1356 lpend.bjump = 0;
1357 szlpend = _emit_LPEND(1, buf, &lpend);
1358
1359 if (lcnt0) {
1360 szlp *= 2;
1361 szlpend *= 2;
1362 }
1363
1364 /*
1365 * Max bursts that we can unroll due to limit on the
1366 * size of backward jump that can be encoded in DMALPEND
1367 * which is 8-bits and hence 255
1368 */
1369 cycmax = (255 - (szlp + szlpend)) / szbrst;
1370
1371 cyc = (cycmax < cyc) ? cycmax : cyc;
1372
1373 off = 0;
1374
1375 if (lcnt0) {
1376 off += _emit_LP(dry_run, &buf[off], 0, lcnt0);
1377 ljmp0 = off;
1378 }
1379
1380 off += _emit_LP(dry_run, &buf[off], 1, lcnt1);
1381 ljmp1 = off;
1382
1383 off += _bursts(dry_run, &buf[off], pxs, cyc);
1384
1385 lpend.cond = ALWAYS;
1386 lpend.forever = false;
1387 lpend.loop = 1;
1388 lpend.bjump = off - ljmp1;
1389 off += _emit_LPEND(dry_run, &buf[off], &lpend);
1390
1391 if (lcnt0) {
1392 lpend.cond = ALWAYS;
1393 lpend.forever = false;
1394 lpend.loop = 0;
1395 lpend.bjump = off - ljmp0;
1396 off += _emit_LPEND(dry_run, &buf[off], &lpend);
1397 }
1398
1399 *bursts = lcnt1 * cyc;
1400 if (lcnt0)
1401 *bursts *= lcnt0;
1402
1403 return off;
1404}
1405
1406static inline int _setup_loops(unsigned dry_run, u8 buf[],
1407 const struct _xfer_spec *pxs)
1408{
1409 struct pl330_xfer *x = pxs->x;
1410 u32 ccr = pxs->ccr;
1411 unsigned long c, bursts = BYTE_TO_BURST(x->bytes, ccr);
1412 int off = 0;
1413
1414 while (bursts) {
1415 c = bursts;
1416 off += _loop(dry_run, &buf[off], &c, pxs);
1417 bursts -= c;
1418 }
1419
1420 return off;
1421}
1422
1423static inline int _setup_xfer(unsigned dry_run, u8 buf[],
1424 const struct _xfer_spec *pxs)
1425{
1426 struct pl330_xfer *x = pxs->x;
1427 int off = 0;
1428
1429 /* DMAMOV SAR, x->src_addr */
1430 off += _emit_MOV(dry_run, &buf[off], SAR, x->src_addr);
1431 /* DMAMOV DAR, x->dst_addr */
1432 off += _emit_MOV(dry_run, &buf[off], DAR, x->dst_addr);
1433
1434 /* Setup Loop(s) */
1435 off += _setup_loops(dry_run, &buf[off], pxs);
1436
1437 return off;
1438}
1439
1440/*
1441 * A req is a sequence of one or more xfer units.
1442 * Returns the number of bytes taken to setup the MC for the req.
1443 */
1444static int _setup_req(unsigned dry_run, struct pl330_thread *thrd,
1445 unsigned index, struct _xfer_spec *pxs)
1446{
1447 struct _pl330_req *req = &thrd->req[index];
1448 struct pl330_xfer *x;
1449 u8 *buf = req->mc_cpu;
1450 int off = 0;
1451
1452 PL330_DBGMC_START(req->mc_bus);
1453
1454 /* DMAMOV CCR, ccr */
1455 off += _emit_MOV(dry_run, &buf[off], CCR, pxs->ccr);
1456
1457 x = pxs->r->x;
1458 do {
1459 /* Error if xfer length is not aligned at burst size */
1460 if (x->bytes % (BRST_SIZE(pxs->ccr) * BRST_LEN(pxs->ccr)))
1461 return -EINVAL;
1462
1463 pxs->x = x;
1464 off += _setup_xfer(dry_run, &buf[off], pxs);
1465
1466 x = x->next;
1467 } while (x);
1468
1469 /* DMASEV peripheral/event */
1470 off += _emit_SEV(dry_run, &buf[off], thrd->ev);
1471 /* DMAEND */
1472 off += _emit_END(dry_run, &buf[off]);
1473
1474 return off;
1475}
1476
1477static inline u32 _prepare_ccr(const struct pl330_reqcfg *rqc)
1478{
1479 u32 ccr = 0;
1480
1481 if (rqc->src_inc)
1482 ccr |= CC_SRCINC;
1483
1484 if (rqc->dst_inc)
1485 ccr |= CC_DSTINC;
1486
1487 /* We set same protection levels for Src and DST for now */
1488 if (rqc->privileged)
1489 ccr |= CC_SRCPRI | CC_DSTPRI;
1490 if (rqc->nonsecure)
1491 ccr |= CC_SRCNS | CC_DSTNS;
1492 if (rqc->insnaccess)
1493 ccr |= CC_SRCIA | CC_DSTIA;
1494
1495 ccr |= (((rqc->brst_len - 1) & 0xf) << CC_SRCBRSTLEN_SHFT);
1496 ccr |= (((rqc->brst_len - 1) & 0xf) << CC_DSTBRSTLEN_SHFT);
1497
1498 ccr |= (rqc->brst_size << CC_SRCBRSTSIZE_SHFT);
1499 ccr |= (rqc->brst_size << CC_DSTBRSTSIZE_SHFT);
1500
1501 ccr |= (rqc->scctl << CC_SRCCCTRL_SHFT);
1502 ccr |= (rqc->dcctl << CC_DSTCCTRL_SHFT);
1503
1504 ccr |= (rqc->swap << CC_SWAP_SHFT);
1505
1506 return ccr;
1507}
1508
1509static inline bool _is_valid(u32 ccr)
1510{
1511 enum pl330_dstcachectrl dcctl;
1512 enum pl330_srccachectrl scctl;
1513
1514 dcctl = (ccr >> CC_DSTCCTRL_SHFT) & CC_DRCCCTRL_MASK;
1515 scctl = (ccr >> CC_SRCCCTRL_SHFT) & CC_SRCCCTRL_MASK;
1516
1517 if (dcctl == DINVALID1 || dcctl == DINVALID2
1518 || scctl == SINVALID1 || scctl == SINVALID2)
1519 return false;
1520 else
1521 return true;
1522}
1523
1524/*
1525 * Submit a list of xfers after which the client wants notification.
1526 * Client is not notified after each xfer unit, just once after all
1527 * xfer units are done or some error occurs.
1528 */
1529static int pl330_submit_req(void *ch_id, struct pl330_req *r)
1530{
1531 struct pl330_thread *thrd = ch_id;
1532 struct pl330_dmac *pl330;
1533 struct pl330_info *pi;
1534 struct _xfer_spec xs;
1535 unsigned long flags;
1536 void __iomem *regs;
1537 unsigned idx;
1538 u32 ccr;
1539 int ret = 0;
1540
1541 /* No Req or Unacquired Channel or DMAC */
1542 if (!r || !thrd || thrd->free)
1543 return -EINVAL;
1544
1545 pl330 = thrd->dmac;
1546 pi = pl330->pinfo;
1547 regs = pi->base;
1548
1549 if (pl330->state == DYING
1550 || pl330->dmac_tbd.reset_chan & (1 << thrd->id)) {
1551 dev_info(thrd->dmac->pinfo->dev, "%s:%d\n",
1552 __func__, __LINE__);
1553 return -EAGAIN;
1554 }
1555
1556 /* If request for non-existing peripheral */
1557 if (r->rqtype != MEMTOMEM && r->peri >= pi->pcfg.num_peri) {
1558 dev_info(thrd->dmac->pinfo->dev,
1559 "%s:%d Invalid peripheral(%u)!\n",
1560 __func__, __LINE__, r->peri);
1561 return -EINVAL;
1562 }
1563
1564 spin_lock_irqsave(&pl330->lock, flags);
1565
1566 if (_queue_full(thrd)) {
1567 ret = -EAGAIN;
1568 goto xfer_exit;
1569 }
1570
1571 /* Prefer Secure Channel */
1572 if (!_manager_ns(thrd))
1573 r->cfg->nonsecure = 0;
1574 else
1575 r->cfg->nonsecure = 1;
1576
1577 /* Use last settings, if not provided */
1578 if (r->cfg)
1579 ccr = _prepare_ccr(r->cfg);
1580 else
1581 ccr = readl(regs + CC(thrd->id));
1582
1583 /* If this req doesn't have valid xfer settings */
1584 if (!_is_valid(ccr)) {
1585 ret = -EINVAL;
1586 dev_info(thrd->dmac->pinfo->dev, "%s:%d Invalid CCR(%x)!\n",
1587 __func__, __LINE__, ccr);
1588 goto xfer_exit;
1589 }
1590
1591 idx = IS_FREE(&thrd->req[0]) ? 0 : 1;
1592
1593 xs.ccr = ccr;
1594 xs.r = r;
1595
1596 /* First dry run to check if req is acceptable */
1597 ret = _setup_req(1, thrd, idx, &xs);
1598 if (ret < 0)
1599 goto xfer_exit;
1600
1601 if (ret > pi->mcbufsz / 2) {
1602 dev_info(thrd->dmac->pinfo->dev,
1603 "%s:%d Trying increasing mcbufsz\n",
1604 __func__, __LINE__);
1605 ret = -ENOMEM;
1606 goto xfer_exit;
1607 }
1608
1609 /* Hook the request */
1610 thrd->lstenq = idx;
1611 thrd->req[idx].mc_len = _setup_req(0, thrd, idx, &xs);
1612 thrd->req[idx].r = r;
1613
1614 ret = 0;
1615
1616xfer_exit:
1617 spin_unlock_irqrestore(&pl330->lock, flags);
1618
1619 return ret;
1620}
1621
1622static void pl330_dotask(unsigned long data)
1623{
1624 struct pl330_dmac *pl330 = (struct pl330_dmac *) data;
1625 struct pl330_info *pi = pl330->pinfo;
1626 unsigned long flags;
1627 int i;
1628
1629 spin_lock_irqsave(&pl330->lock, flags);
1630
1631 /* The DMAC itself gone nuts */
1632 if (pl330->dmac_tbd.reset_dmac) {
1633 pl330->state = DYING;
1634 /* Reset the manager too */
1635 pl330->dmac_tbd.reset_mngr = true;
1636 /* Clear the reset flag */
1637 pl330->dmac_tbd.reset_dmac = false;
1638 }
1639
1640 if (pl330->dmac_tbd.reset_mngr) {
1641 _stop(pl330->manager);
1642 /* Reset all channels */
1643 pl330->dmac_tbd.reset_chan = (1 << pi->pcfg.num_chan) - 1;
1644 /* Clear the reset flag */
1645 pl330->dmac_tbd.reset_mngr = false;
1646 }
1647
1648 for (i = 0; i < pi->pcfg.num_chan; i++) {
1649
1650 if (pl330->dmac_tbd.reset_chan & (1 << i)) {
1651 struct pl330_thread *thrd = &pl330->channels[i];
1652 void __iomem *regs = pi->base;
1653 enum pl330_op_err err;
1654
1655 _stop(thrd);
1656
1657 if (readl(regs + FSC) & (1 << thrd->id))
1658 err = PL330_ERR_FAIL;
1659 else
1660 err = PL330_ERR_ABORT;
1661
1662 spin_unlock_irqrestore(&pl330->lock, flags);
1663
1664 _callback(thrd->req[1 - thrd->lstenq].r, err);
1665 _callback(thrd->req[thrd->lstenq].r, err);
1666
1667 spin_lock_irqsave(&pl330->lock, flags);
1668
1669 thrd->req[0].r = NULL;
1670 thrd->req[1].r = NULL;
1671 mark_free(thrd, 0);
1672 mark_free(thrd, 1);
1673
1674 /* Clear the reset flag */
1675 pl330->dmac_tbd.reset_chan &= ~(1 << i);
1676 }
1677 }
1678
1679 spin_unlock_irqrestore(&pl330->lock, flags);
1680
1681 return;
1682}
1683
1684/* Returns 1 if state was updated, 0 otherwise */
1685static int pl330_update(const struct pl330_info *pi)
1686{
1687 struct _pl330_req *rqdone;
1688 struct pl330_dmac *pl330;
1689 unsigned long flags;
1690 void __iomem *regs;
1691 u32 val;
1692 int id, ev, ret = 0;
1693
1694 if (!pi || !pi->pl330_data)
1695 return 0;
1696
1697 regs = pi->base;
1698 pl330 = pi->pl330_data;
1699
1700 spin_lock_irqsave(&pl330->lock, flags);
1701
1702 val = readl(regs + FSM) & 0x1;
1703 if (val)
1704 pl330->dmac_tbd.reset_mngr = true;
1705 else
1706 pl330->dmac_tbd.reset_mngr = false;
1707
1708 val = readl(regs + FSC) & ((1 << pi->pcfg.num_chan) - 1);
1709 pl330->dmac_tbd.reset_chan |= val;
1710 if (val) {
1711 int i = 0;
1712 while (i < pi->pcfg.num_chan) {
1713 if (val & (1 << i)) {
1714 dev_info(pi->dev,
1715 "Reset Channel-%d\t CS-%x FTC-%x\n",
1716 i, readl(regs + CS(i)),
1717 readl(regs + FTC(i)));
1718 _stop(&pl330->channels[i]);
1719 }
1720 i++;
1721 }
1722 }
1723
1724 /* Check which event happened i.e, thread notified */
1725 val = readl(regs + ES);
1726 if (pi->pcfg.num_events < 32
1727 && val & ~((1 << pi->pcfg.num_events) - 1)) {
1728 pl330->dmac_tbd.reset_dmac = true;
1729 dev_err(pi->dev, "%s:%d Unexpected!\n", __func__, __LINE__);
1730 ret = 1;
1731 goto updt_exit;
1732 }
1733
1734 for (ev = 0; ev < pi->pcfg.num_events; ev++) {
1735 if (val & (1 << ev)) { /* Event occurred */
1736 struct pl330_thread *thrd;
1737 u32 inten = readl(regs + INTEN);
1738 int active;
1739
1740 /* Clear the event */
1741 if (inten & (1 << ev))
1742 writel(1 << ev, regs + INTCLR);
1743
1744 ret = 1;
1745
1746 id = pl330->events[ev];
1747
1748 thrd = &pl330->channels[id];
1749
1750 active = thrd->req_running;
1751 if (active == -1) /* Aborted */
1752 continue;
1753
1754 rqdone = &thrd->req[active];
1755 mark_free(thrd, active);
1756
1757 /* Get going again ASAP */
1758 _start(thrd);
1759
1760 /* For now, just make a list of callbacks to be done */
1761 list_add_tail(&rqdone->rqd, &pl330->req_done);
1762 }
1763 }
1764
1765 /* Now that we are in no hurry, do the callbacks */
1766 while (!list_empty(&pl330->req_done)) {
1767 struct pl330_req *r;
1768
1769 rqdone = container_of(pl330->req_done.next,
1770 struct _pl330_req, rqd);
1771
1772 list_del_init(&rqdone->rqd);
1773
1774 /* Detach the req */
1775 r = rqdone->r;
1776 rqdone->r = NULL;
1777
1778 spin_unlock_irqrestore(&pl330->lock, flags);
1779 _callback(r, PL330_ERR_NONE);
1780 spin_lock_irqsave(&pl330->lock, flags);
1781 }
1782
1783updt_exit:
1784 spin_unlock_irqrestore(&pl330->lock, flags);
1785
1786 if (pl330->dmac_tbd.reset_dmac
1787 || pl330->dmac_tbd.reset_mngr
1788 || pl330->dmac_tbd.reset_chan) {
1789 ret = 1;
1790 tasklet_schedule(&pl330->tasks);
1791 }
1792
1793 return ret;
1794}
1795
1796static int pl330_chan_ctrl(void *ch_id, enum pl330_chan_op op)
1797{
1798 struct pl330_thread *thrd = ch_id;
1799 struct pl330_dmac *pl330;
1800 unsigned long flags;
1801 int ret = 0, active;
1802
1803 if (!thrd || thrd->free || thrd->dmac->state == DYING)
1804 return -EINVAL;
1805
1806 pl330 = thrd->dmac;
1807 active = thrd->req_running;
1808
1809 spin_lock_irqsave(&pl330->lock, flags);
1810
1811 switch (op) {
1812 case PL330_OP_FLUSH:
1813 /* Make sure the channel is stopped */
1814 _stop(thrd);
1815
1816 thrd->req[0].r = NULL;
1817 thrd->req[1].r = NULL;
1818 mark_free(thrd, 0);
1819 mark_free(thrd, 1);
1820 break;
1821
1822 case PL330_OP_ABORT:
1823 /* Make sure the channel is stopped */
1824 _stop(thrd);
1825
1826 /* ABORT is only for the active req */
1827 if (active == -1)
1828 break;
1829
1830 thrd->req[active].r = NULL;
1831 mark_free(thrd, active);
1832
1833 /* Start the next */
1834 case PL330_OP_START:
1835 if ((active == -1) && !_start(thrd))
1836 ret = -EIO;
1837 break;
1838
1839 default:
1840 ret = -EINVAL;
1841 }
1842
1843 spin_unlock_irqrestore(&pl330->lock, flags);
1844 return ret;
1845}
1846
1847/* Reserve an event */
1848static inline int _alloc_event(struct pl330_thread *thrd)
1849{
1850 struct pl330_dmac *pl330 = thrd->dmac;
1851 struct pl330_info *pi = pl330->pinfo;
1852 int ev;
1853
1854 for (ev = 0; ev < pi->pcfg.num_events; ev++)
1855 if (pl330->events[ev] == -1) {
1856 pl330->events[ev] = thrd->id;
1857 return ev;
1858 }
1859
1860 return -1;
1861}
1862
1863static bool _chan_ns(const struct pl330_info *pi, int i)
1864{
1865 return pi->pcfg.irq_ns & (1 << i);
1866}
1867
1868/* Upon success, returns IdentityToken for the
1869 * allocated channel, NULL otherwise.
1870 */
1871static void *pl330_request_channel(const struct pl330_info *pi)
1872{
1873 struct pl330_thread *thrd = NULL;
1874 struct pl330_dmac *pl330;
1875 unsigned long flags;
1876 int chans, i;
1877
1878 if (!pi || !pi->pl330_data)
1879 return NULL;
1880
1881 pl330 = pi->pl330_data;
1882
1883 if (pl330->state == DYING)
1884 return NULL;
1885
1886 chans = pi->pcfg.num_chan;
1887
1888 spin_lock_irqsave(&pl330->lock, flags);
1889
1890 for (i = 0; i < chans; i++) {
1891 thrd = &pl330->channels[i];
1892 if ((thrd->free) && (!_manager_ns(thrd) ||
1893 _chan_ns(pi, i))) {
1894 thrd->ev = _alloc_event(thrd);
1895 if (thrd->ev >= 0) {
1896 thrd->free = false;
1897 thrd->lstenq = 1;
1898 thrd->req[0].r = NULL;
1899 mark_free(thrd, 0);
1900 thrd->req[1].r = NULL;
1901 mark_free(thrd, 1);
1902 break;
1903 }
1904 }
1905 thrd = NULL;
1906 }
1907
1908 spin_unlock_irqrestore(&pl330->lock, flags);
1909
1910 return thrd;
1911}
1912
1913/* Release an event */
1914static inline void _free_event(struct pl330_thread *thrd, int ev)
1915{
1916 struct pl330_dmac *pl330 = thrd->dmac;
1917 struct pl330_info *pi = pl330->pinfo;
1918
1919 /* If the event is valid and was held by the thread */
1920 if (ev >= 0 && ev < pi->pcfg.num_events
1921 && pl330->events[ev] == thrd->id)
1922 pl330->events[ev] = -1;
1923}
1924
1925static void pl330_release_channel(void *ch_id)
1926{
1927 struct pl330_thread *thrd = ch_id;
1928 struct pl330_dmac *pl330;
1929 unsigned long flags;
1930
1931 if (!thrd || thrd->free)
1932 return;
1933
1934 _stop(thrd);
1935
1936 _callback(thrd->req[1 - thrd->lstenq].r, PL330_ERR_ABORT);
1937 _callback(thrd->req[thrd->lstenq].r, PL330_ERR_ABORT);
1938
1939 pl330 = thrd->dmac;
1940
1941 spin_lock_irqsave(&pl330->lock, flags);
1942 _free_event(thrd, thrd->ev);
1943 thrd->free = true;
1944 spin_unlock_irqrestore(&pl330->lock, flags);
1945}
1946
1947/* Initialize the structure for PL330 configuration, that can be used
1948 * by the client driver the make best use of the DMAC
1949 */
1950static void read_dmac_config(struct pl330_info *pi)
1951{
1952 void __iomem *regs = pi->base;
1953 u32 val;
1954
1955 val = readl(regs + CRD) >> CRD_DATA_WIDTH_SHIFT;
1956 val &= CRD_DATA_WIDTH_MASK;
1957 pi->pcfg.data_bus_width = 8 * (1 << val);
1958
1959 val = readl(regs + CRD) >> CRD_DATA_BUFF_SHIFT;
1960 val &= CRD_DATA_BUFF_MASK;
1961 pi->pcfg.data_buf_dep = val + 1;
1962
1963 val = readl(regs + CR0) >> CR0_NUM_CHANS_SHIFT;
1964 val &= CR0_NUM_CHANS_MASK;
1965 val += 1;
1966 pi->pcfg.num_chan = val;
1967
1968 val = readl(regs + CR0);
1969 if (val & CR0_PERIPH_REQ_SET) {
1970 val = (val >> CR0_NUM_PERIPH_SHIFT) & CR0_NUM_PERIPH_MASK;
1971 val += 1;
1972 pi->pcfg.num_peri = val;
1973 pi->pcfg.peri_ns = readl(regs + CR4);
1974 } else {
1975 pi->pcfg.num_peri = 0;
1976 }
1977
1978 val = readl(regs + CR0);
1979 if (val & CR0_BOOT_MAN_NS)
1980 pi->pcfg.mode |= DMAC_MODE_NS;
1981 else
1982 pi->pcfg.mode &= ~DMAC_MODE_NS;
1983
1984 val = readl(regs + CR0) >> CR0_NUM_EVENTS_SHIFT;
1985 val &= CR0_NUM_EVENTS_MASK;
1986 val += 1;
1987 pi->pcfg.num_events = val;
1988
1989 pi->pcfg.irq_ns = readl(regs + CR3);
1990
1991 pi->pcfg.periph_id = get_id(pi, PERIPH_ID);
1992 pi->pcfg.pcell_id = get_id(pi, PCELL_ID);
1993}
1994
1995static inline void _reset_thread(struct pl330_thread *thrd)
1996{
1997 struct pl330_dmac *pl330 = thrd->dmac;
1998 struct pl330_info *pi = pl330->pinfo;
1999
2000 thrd->req[0].mc_cpu = pl330->mcode_cpu
2001 + (thrd->id * pi->mcbufsz);
2002 thrd->req[0].mc_bus = pl330->mcode_bus
2003 + (thrd->id * pi->mcbufsz);
2004 thrd->req[0].r = NULL;
2005 mark_free(thrd, 0);
2006
2007 thrd->req[1].mc_cpu = thrd->req[0].mc_cpu
2008 + pi->mcbufsz / 2;
2009 thrd->req[1].mc_bus = thrd->req[0].mc_bus
2010 + pi->mcbufsz / 2;
2011 thrd->req[1].r = NULL;
2012 mark_free(thrd, 1);
2013}
2014
2015static int dmac_alloc_threads(struct pl330_dmac *pl330)
2016{
2017 struct pl330_info *pi = pl330->pinfo;
2018 int chans = pi->pcfg.num_chan;
2019 struct pl330_thread *thrd;
2020 int i;
2021
2022 /* Allocate 1 Manager and 'chans' Channel threads */
2023 pl330->channels = kzalloc((1 + chans) * sizeof(*thrd),
2024 GFP_KERNEL);
2025 if (!pl330->channels)
2026 return -ENOMEM;
2027
2028 /* Init Channel threads */
2029 for (i = 0; i < chans; i++) {
2030 thrd = &pl330->channels[i];
2031 thrd->id = i;
2032 thrd->dmac = pl330;
2033 _reset_thread(thrd);
2034 thrd->free = true;
2035 }
2036
2037 /* MANAGER is indexed at the end */
2038 thrd = &pl330->channels[chans];
2039 thrd->id = chans;
2040 thrd->dmac = pl330;
2041 thrd->free = false;
2042 pl330->manager = thrd;
2043
2044 return 0;
2045}
2046
2047static int dmac_alloc_resources(struct pl330_dmac *pl330)
2048{
2049 struct pl330_info *pi = pl330->pinfo;
2050 int chans = pi->pcfg.num_chan;
2051 int ret;
2052
2053 /*
2054 * Alloc MicroCode buffer for 'chans' Channel threads.
2055 * A channel's buffer offset is (Channel_Id * MCODE_BUFF_PERCHAN)
2056 */
2057 pl330->mcode_cpu = dma_alloc_coherent(pi->dev,
2058 chans * pi->mcbufsz,
2059 &pl330->mcode_bus, GFP_KERNEL);
2060 if (!pl330->mcode_cpu) {
2061 dev_err(pi->dev, "%s:%d Can't allocate memory!\n",
2062 __func__, __LINE__);
2063 return -ENOMEM;
2064 }
2065
2066 ret = dmac_alloc_threads(pl330);
2067 if (ret) {
2068 dev_err(pi->dev, "%s:%d Can't to create channels for DMAC!\n",
2069 __func__, __LINE__);
2070 dma_free_coherent(pi->dev,
2071 chans * pi->mcbufsz,
2072 pl330->mcode_cpu, pl330->mcode_bus);
2073 return ret;
2074 }
2075
2076 return 0;
2077}
2078
2079static int pl330_add(struct pl330_info *pi)
2080{
2081 struct pl330_dmac *pl330;
2082 void __iomem *regs;
2083 int i, ret;
2084
2085 if (!pi || !pi->dev)
2086 return -EINVAL;
2087
2088 /* If already added */
2089 if (pi->pl330_data)
2090 return -EINVAL;
2091
2092 /*
2093 * If the SoC can perform reset on the DMAC, then do it
2094 * before reading its configuration.
2095 */
2096 if (pi->dmac_reset)
2097 pi->dmac_reset(pi);
2098
2099 regs = pi->base;
2100
2101 /* Check if we can handle this DMAC */
2102 if ((get_id(pi, PERIPH_ID) & 0xfffff) != PERIPH_ID_VAL
2103 || get_id(pi, PCELL_ID) != PCELL_ID_VAL) {
2104 dev_err(pi->dev, "PERIPH_ID 0x%x, PCELL_ID 0x%x !\n",
2105 get_id(pi, PERIPH_ID), get_id(pi, PCELL_ID));
2106 return -EINVAL;
2107 }
2108
2109 /* Read the configuration of the DMAC */
2110 read_dmac_config(pi);
2111
2112 if (pi->pcfg.num_events == 0) {
2113 dev_err(pi->dev, "%s:%d Can't work without events!\n",
2114 __func__, __LINE__);
2115 return -EINVAL;
2116 }
2117
2118 pl330 = kzalloc(sizeof(*pl330), GFP_KERNEL);
2119 if (!pl330) {
2120 dev_err(pi->dev, "%s:%d Can't allocate memory!\n",
2121 __func__, __LINE__);
2122 return -ENOMEM;
2123 }
2124
2125 /* Assign the info structure and private data */
2126 pl330->pinfo = pi;
2127 pi->pl330_data = pl330;
2128
2129 spin_lock_init(&pl330->lock);
2130
2131 INIT_LIST_HEAD(&pl330->req_done);
2132
2133 /* Use default MC buffer size if not provided */
2134 if (!pi->mcbufsz)
2135 pi->mcbufsz = MCODE_BUFF_PER_REQ * 2;
2136
2137 /* Mark all events as free */
2138 for (i = 0; i < pi->pcfg.num_events; i++)
2139 pl330->events[i] = -1;
2140
2141 /* Allocate resources needed by the DMAC */
2142 ret = dmac_alloc_resources(pl330);
2143 if (ret) {
2144 dev_err(pi->dev, "Unable to create channels for DMAC\n");
2145 kfree(pl330);
2146 return ret;
2147 }
2148
2149 tasklet_init(&pl330->tasks, pl330_dotask, (unsigned long) pl330);
2150
2151 pl330->state = INIT;
2152
2153 return 0;
2154}
2155
2156static int dmac_free_threads(struct pl330_dmac *pl330)
2157{
2158 struct pl330_info *pi = pl330->pinfo;
2159 int chans = pi->pcfg.num_chan;
2160 struct pl330_thread *thrd;
2161 int i;
2162
2163 /* Release Channel threads */
2164 for (i = 0; i < chans; i++) {
2165 thrd = &pl330->channels[i];
2166 pl330_release_channel((void *)thrd);
2167 }
2168
2169 /* Free memory */
2170 kfree(pl330->channels);
2171
2172 return 0;
2173}
2174
2175static void dmac_free_resources(struct pl330_dmac *pl330)
2176{
2177 struct pl330_info *pi = pl330->pinfo;
2178 int chans = pi->pcfg.num_chan;
2179
2180 dmac_free_threads(pl330);
2181
2182 dma_free_coherent(pi->dev, chans * pi->mcbufsz,
2183 pl330->mcode_cpu, pl330->mcode_bus);
2184}
2185
2186static void pl330_del(struct pl330_info *pi)
2187{
2188 struct pl330_dmac *pl330;
2189
2190 if (!pi || !pi->pl330_data)
2191 return;
2192
2193 pl330 = pi->pl330_data;
2194
2195 pl330->state = UNINIT;
2196
2197 tasklet_kill(&pl330->tasks);
2198
2199 /* Free DMAC resources */
2200 dmac_free_resources(pl330);
2201
2202 kfree(pl330);
2203 pi->pl330_data = NULL;
2204}
2205
120/* forward declaration */ 2206/* forward declaration */
121static struct amba_driver pl330_driver; 2207static struct amba_driver pl330_driver;
122 2208
@@ -234,7 +2320,7 @@ static void pl330_tasklet(unsigned long data)
234 /* Pick up ripe tomatoes */ 2320 /* Pick up ripe tomatoes */
235 list_for_each_entry_safe(desc, _dt, &pch->work_list, node) 2321 list_for_each_entry_safe(desc, _dt, &pch->work_list, node)
236 if (desc->status == DONE) { 2322 if (desc->status == DONE) {
237 pch->completed = desc->txd.cookie; 2323 dma_cookie_complete(&desc->txd);
238 list_move_tail(&desc->node, &list); 2324 list_move_tail(&desc->node, &list);
239 } 2325 }
240 2326
@@ -305,7 +2391,7 @@ static int pl330_alloc_chan_resources(struct dma_chan *chan)
305 2391
306 spin_lock_irqsave(&pch->lock, flags); 2392 spin_lock_irqsave(&pch->lock, flags);
307 2393
308 pch->completed = chan->cookie = 1; 2394 dma_cookie_init(chan);
309 pch->cyclic = false; 2395 pch->cyclic = false;
310 2396
311 pch->pl330_chid = pl330_request_channel(&pdmac->pif); 2397 pch->pl330_chid = pl330_request_channel(&pdmac->pif);
@@ -340,7 +2426,6 @@ static int pl330_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, unsigned
340 /* Mark all desc done */ 2426 /* Mark all desc done */
341 list_for_each_entry_safe(desc, _dt, &pch->work_list , node) { 2427 list_for_each_entry_safe(desc, _dt, &pch->work_list , node) {
342 desc->status = DONE; 2428 desc->status = DONE;
343 pch->completed = desc->txd.cookie;
344 list_move_tail(&desc->node, &list); 2429 list_move_tail(&desc->node, &list);
345 } 2430 }
346 2431
@@ -396,18 +2481,7 @@ static enum dma_status
396pl330_tx_status(struct dma_chan *chan, dma_cookie_t cookie, 2481pl330_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
397 struct dma_tx_state *txstate) 2482 struct dma_tx_state *txstate)
398{ 2483{
399 struct dma_pl330_chan *pch = to_pchan(chan); 2484 return dma_cookie_status(chan, cookie, txstate);
400 dma_cookie_t last_done, last_used;
401 int ret;
402
403 last_done = pch->completed;
404 last_used = chan->cookie;
405
406 ret = dma_async_is_complete(cookie, last_done, last_used);
407
408 dma_set_tx_state(txstate, last_done, last_used, 0);
409
410 return ret;
411} 2485}
412 2486
413static void pl330_issue_pending(struct dma_chan *chan) 2487static void pl330_issue_pending(struct dma_chan *chan)
@@ -430,26 +2504,16 @@ static dma_cookie_t pl330_tx_submit(struct dma_async_tx_descriptor *tx)
430 spin_lock_irqsave(&pch->lock, flags); 2504 spin_lock_irqsave(&pch->lock, flags);
431 2505
432 /* Assign cookies to all nodes */ 2506 /* Assign cookies to all nodes */
433 cookie = tx->chan->cookie;
434
435 while (!list_empty(&last->node)) { 2507 while (!list_empty(&last->node)) {
436 desc = list_entry(last->node.next, struct dma_pl330_desc, node); 2508 desc = list_entry(last->node.next, struct dma_pl330_desc, node);
437 2509
438 if (++cookie < 0) 2510 dma_cookie_assign(&desc->txd);
439 cookie = 1;
440 desc->txd.cookie = cookie;
441 2511
442 list_move_tail(&desc->node, &pch->work_list); 2512 list_move_tail(&desc->node, &pch->work_list);
443 } 2513 }
444 2514
445 if (++cookie < 0) 2515 cookie = dma_cookie_assign(&last->txd);
446 cookie = 1;
447 last->txd.cookie = cookie;
448
449 list_add_tail(&last->node, &pch->work_list); 2516 list_add_tail(&last->node, &pch->work_list);
450
451 tx->chan->cookie = cookie;
452
453 spin_unlock_irqrestore(&pch->lock, flags); 2517 spin_unlock_irqrestore(&pch->lock, flags);
454 2518
455 return cookie; 2519 return cookie;
@@ -553,6 +2617,7 @@ static struct dma_pl330_desc *pl330_get_desc(struct dma_pl330_chan *pch)
553 async_tx_ack(&desc->txd); 2617 async_tx_ack(&desc->txd);
554 2618
555 desc->req.peri = peri_id ? pch->chan.chan_id : 0; 2619 desc->req.peri = peri_id ? pch->chan.chan_id : 0;
2620 desc->rqcfg.pcfg = &pch->dmac->pif.pcfg;
556 2621
557 dma_async_tx_descriptor_init(&desc->txd, &pch->chan); 2622 dma_async_tx_descriptor_init(&desc->txd, &pch->chan);
558 2623
@@ -621,7 +2686,8 @@ static inline int get_burst_len(struct dma_pl330_desc *desc, size_t len)
621 2686
622static struct dma_async_tx_descriptor *pl330_prep_dma_cyclic( 2687static struct dma_async_tx_descriptor *pl330_prep_dma_cyclic(
623 struct dma_chan *chan, dma_addr_t dma_addr, size_t len, 2688 struct dma_chan *chan, dma_addr_t dma_addr, size_t len,
624 size_t period_len, enum dma_transfer_direction direction) 2689 size_t period_len, enum dma_transfer_direction direction,
2690 void *context)
625{ 2691{
626 struct dma_pl330_desc *desc; 2692 struct dma_pl330_desc *desc;
627 struct dma_pl330_chan *pch = to_pchan(chan); 2693 struct dma_pl330_chan *pch = to_pchan(chan);
@@ -711,7 +2777,7 @@ pl330_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dst,
711static struct dma_async_tx_descriptor * 2777static struct dma_async_tx_descriptor *
712pl330_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, 2778pl330_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
713 unsigned int sg_len, enum dma_transfer_direction direction, 2779 unsigned int sg_len, enum dma_transfer_direction direction,
714 unsigned long flg) 2780 unsigned long flg, void *context)
715{ 2781{
716 struct dma_pl330_desc *first, *desc = NULL; 2782 struct dma_pl330_desc *first, *desc = NULL;
717 struct dma_pl330_chan *pch = to_pchan(chan); 2783 struct dma_pl330_chan *pch = to_pchan(chan);
@@ -829,7 +2895,7 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
829 if (IS_ERR(pdmac->clk)) { 2895 if (IS_ERR(pdmac->clk)) {
830 dev_err(&adev->dev, "Cannot get operation clock.\n"); 2896 dev_err(&adev->dev, "Cannot get operation clock.\n");
831 ret = -EINVAL; 2897 ret = -EINVAL;
832 goto probe_err1; 2898 goto probe_err2;
833 } 2899 }
834 2900
835 amba_set_drvdata(adev, pdmac); 2901 amba_set_drvdata(adev, pdmac);
@@ -843,11 +2909,11 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
843 ret = request_irq(irq, pl330_irq_handler, 0, 2909 ret = request_irq(irq, pl330_irq_handler, 0,
844 dev_name(&adev->dev), pi); 2910 dev_name(&adev->dev), pi);
845 if (ret) 2911 if (ret)
846 goto probe_err2; 2912 goto probe_err3;
847 2913
848 ret = pl330_add(pi); 2914 ret = pl330_add(pi);
849 if (ret) 2915 if (ret)
850 goto probe_err3; 2916 goto probe_err4;
851 2917
852 INIT_LIST_HEAD(&pdmac->desc_pool); 2918 INIT_LIST_HEAD(&pdmac->desc_pool);
853 spin_lock_init(&pdmac->pool_lock); 2919 spin_lock_init(&pdmac->pool_lock);
@@ -904,7 +2970,7 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
904 ret = dma_async_device_register(pd); 2970 ret = dma_async_device_register(pd);
905 if (ret) { 2971 if (ret) {
906 dev_err(&adev->dev, "unable to register DMAC\n"); 2972 dev_err(&adev->dev, "unable to register DMAC\n");
907 goto probe_err4; 2973 goto probe_err5;
908 } 2974 }
909 2975
910 dev_info(&adev->dev, 2976 dev_info(&adev->dev,
@@ -917,10 +2983,15 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
917 2983
918 return 0; 2984 return 0;
919 2985
920probe_err4: 2986probe_err5:
921 pl330_del(pi); 2987 pl330_del(pi);
922probe_err3: 2988probe_err4:
923 free_irq(irq, pi); 2989 free_irq(irq, pi);
2990probe_err3:
2991#ifndef CONFIG_PM_RUNTIME
2992 clk_disable(pdmac->clk);
2993#endif
2994 clk_put(pdmac->clk);
924probe_err2: 2995probe_err2:
925 iounmap(pi->base); 2996 iounmap(pi->base);
926probe_err1: 2997probe_err1:
diff --git a/drivers/dma/ppc4xx/adma.c b/drivers/dma/ppc4xx/adma.c
index fc457a7e8832..ced98826684a 100644
--- a/drivers/dma/ppc4xx/adma.c
+++ b/drivers/dma/ppc4xx/adma.c
@@ -46,6 +46,7 @@
46#include <asm/dcr.h> 46#include <asm/dcr.h>
47#include <asm/dcr-regs.h> 47#include <asm/dcr-regs.h>
48#include "adma.h" 48#include "adma.h"
49#include "../dmaengine.h"
49 50
50enum ppc_adma_init_code { 51enum ppc_adma_init_code {
51 PPC_ADMA_INIT_OK = 0, 52 PPC_ADMA_INIT_OK = 0,
@@ -1930,7 +1931,7 @@ static void __ppc440spe_adma_slot_cleanup(struct ppc440spe_adma_chan *chan)
1930 if (end_of_chain && slot_cnt) { 1931 if (end_of_chain && slot_cnt) {
1931 /* Should wait for ZeroSum completion */ 1932 /* Should wait for ZeroSum completion */
1932 if (cookie > 0) 1933 if (cookie > 0)
1933 chan->completed_cookie = cookie; 1934 chan->common.completed_cookie = cookie;
1934 return; 1935 return;
1935 } 1936 }
1936 1937
@@ -1960,7 +1961,7 @@ static void __ppc440spe_adma_slot_cleanup(struct ppc440spe_adma_chan *chan)
1960 BUG_ON(!seen_current); 1961 BUG_ON(!seen_current);
1961 1962
1962 if (cookie > 0) { 1963 if (cookie > 0) {
1963 chan->completed_cookie = cookie; 1964 chan->common.completed_cookie = cookie;
1964 pr_debug("\tcompleted cookie %d\n", cookie); 1965 pr_debug("\tcompleted cookie %d\n", cookie);
1965 } 1966 }
1966 1967
@@ -2150,22 +2151,6 @@ static int ppc440spe_adma_alloc_chan_resources(struct dma_chan *chan)
2150} 2151}
2151 2152
2152/** 2153/**
2153 * ppc440spe_desc_assign_cookie - assign a cookie
2154 */
2155static dma_cookie_t ppc440spe_desc_assign_cookie(
2156 struct ppc440spe_adma_chan *chan,
2157 struct ppc440spe_adma_desc_slot *desc)
2158{
2159 dma_cookie_t cookie = chan->common.cookie;
2160
2161 cookie++;
2162 if (cookie < 0)
2163 cookie = 1;
2164 chan->common.cookie = desc->async_tx.cookie = cookie;
2165 return cookie;
2166}
2167
2168/**
2169 * ppc440spe_rxor_set_region_data - 2154 * ppc440spe_rxor_set_region_data -
2170 */ 2155 */
2171static void ppc440spe_rxor_set_region(struct ppc440spe_adma_desc_slot *desc, 2156static void ppc440spe_rxor_set_region(struct ppc440spe_adma_desc_slot *desc,
@@ -2235,8 +2220,7 @@ static dma_cookie_t ppc440spe_adma_tx_submit(struct dma_async_tx_descriptor *tx)
2235 slots_per_op = group_start->slots_per_op; 2220 slots_per_op = group_start->slots_per_op;
2236 2221
2237 spin_lock_bh(&chan->lock); 2222 spin_lock_bh(&chan->lock);
2238 2223 cookie = dma_cookie_assign(tx);
2239 cookie = ppc440spe_desc_assign_cookie(chan, sw_desc);
2240 2224
2241 if (unlikely(list_empty(&chan->chain))) { 2225 if (unlikely(list_empty(&chan->chain))) {
2242 /* first peer */ 2226 /* first peer */
@@ -3944,28 +3928,16 @@ static enum dma_status ppc440spe_adma_tx_status(struct dma_chan *chan,
3944 dma_cookie_t cookie, struct dma_tx_state *txstate) 3928 dma_cookie_t cookie, struct dma_tx_state *txstate)
3945{ 3929{
3946 struct ppc440spe_adma_chan *ppc440spe_chan; 3930 struct ppc440spe_adma_chan *ppc440spe_chan;
3947 dma_cookie_t last_used;
3948 dma_cookie_t last_complete;
3949 enum dma_status ret; 3931 enum dma_status ret;
3950 3932
3951 ppc440spe_chan = to_ppc440spe_adma_chan(chan); 3933 ppc440spe_chan = to_ppc440spe_adma_chan(chan);
3952 last_used = chan->cookie; 3934 ret = dma_cookie_status(chan, cookie, txstate);
3953 last_complete = ppc440spe_chan->completed_cookie;
3954
3955 dma_set_tx_state(txstate, last_complete, last_used, 0);
3956
3957 ret = dma_async_is_complete(cookie, last_complete, last_used);
3958 if (ret == DMA_SUCCESS) 3935 if (ret == DMA_SUCCESS)
3959 return ret; 3936 return ret;
3960 3937
3961 ppc440spe_adma_slot_cleanup(ppc440spe_chan); 3938 ppc440spe_adma_slot_cleanup(ppc440spe_chan);
3962 3939
3963 last_used = chan->cookie; 3940 return dma_cookie_status(chan, cookie, txstate);
3964 last_complete = ppc440spe_chan->completed_cookie;
3965
3966 dma_set_tx_state(txstate, last_complete, last_used, 0);
3967
3968 return dma_async_is_complete(cookie, last_complete, last_used);
3969} 3941}
3970 3942
3971/** 3943/**
@@ -4050,16 +4022,12 @@ static void ppc440spe_chan_start_null_xor(struct ppc440spe_adma_chan *chan)
4050 async_tx_ack(&sw_desc->async_tx); 4022 async_tx_ack(&sw_desc->async_tx);
4051 ppc440spe_desc_init_null_xor(group_start); 4023 ppc440spe_desc_init_null_xor(group_start);
4052 4024
4053 cookie = chan->common.cookie; 4025 cookie = dma_cookie_assign(&sw_desc->async_tx);
4054 cookie++;
4055 if (cookie <= 1)
4056 cookie = 2;
4057 4026
4058 /* initialize the completed cookie to be less than 4027 /* initialize the completed cookie to be less than
4059 * the most recently used cookie 4028 * the most recently used cookie
4060 */ 4029 */
4061 chan->completed_cookie = cookie - 1; 4030 chan->common.completed_cookie = cookie - 1;
4062 chan->common.cookie = sw_desc->async_tx.cookie = cookie;
4063 4031
4064 /* channel should not be busy */ 4032 /* channel should not be busy */
4065 BUG_ON(ppc440spe_chan_is_busy(chan)); 4033 BUG_ON(ppc440spe_chan_is_busy(chan));
@@ -4529,6 +4497,7 @@ static int __devinit ppc440spe_adma_probe(struct platform_device *ofdev)
4529 INIT_LIST_HEAD(&chan->all_slots); 4497 INIT_LIST_HEAD(&chan->all_slots);
4530 chan->device = adev; 4498 chan->device = adev;
4531 chan->common.device = &adev->common; 4499 chan->common.device = &adev->common;
4500 dma_cookie_init(&chan->common);
4532 list_add_tail(&chan->common.device_node, &adev->common.channels); 4501 list_add_tail(&chan->common.device_node, &adev->common.channels);
4533 tasklet_init(&chan->irq_tasklet, ppc440spe_adma_tasklet, 4502 tasklet_init(&chan->irq_tasklet, ppc440spe_adma_tasklet,
4534 (unsigned long)chan); 4503 (unsigned long)chan);
diff --git a/drivers/dma/ppc4xx/adma.h b/drivers/dma/ppc4xx/adma.h
index 8ada5a812e3b..26b7a5ed9ac7 100644
--- a/drivers/dma/ppc4xx/adma.h
+++ b/drivers/dma/ppc4xx/adma.h
@@ -81,7 +81,6 @@ struct ppc440spe_adma_device {
81 * @common: common dmaengine channel object members 81 * @common: common dmaengine channel object members
82 * @all_slots: complete domain of slots usable by the channel 82 * @all_slots: complete domain of slots usable by the channel
83 * @pending: allows batching of hardware operations 83 * @pending: allows batching of hardware operations
84 * @completed_cookie: identifier for the most recently completed operation
85 * @slots_allocated: records the actual size of the descriptor slot pool 84 * @slots_allocated: records the actual size of the descriptor slot pool
86 * @hw_chain_inited: h/w descriptor chain initialization flag 85 * @hw_chain_inited: h/w descriptor chain initialization flag
87 * @irq_tasklet: bottom half where ppc440spe_adma_slot_cleanup runs 86 * @irq_tasklet: bottom half where ppc440spe_adma_slot_cleanup runs
@@ -99,7 +98,6 @@ struct ppc440spe_adma_chan {
99 struct list_head all_slots; 98 struct list_head all_slots;
100 struct ppc440spe_adma_desc_slot *last_used; 99 struct ppc440spe_adma_desc_slot *last_used;
101 int pending; 100 int pending;
102 dma_cookie_t completed_cookie;
103 int slots_allocated; 101 int slots_allocated;
104 int hw_chain_inited; 102 int hw_chain_inited;
105 struct tasklet_struct irq_tasklet; 103 struct tasklet_struct irq_tasklet;
diff --git a/drivers/dma/shdma.c b/drivers/dma/shdma.c
index 812fd76e9c18..19d7a8d3975d 100644
--- a/drivers/dma/shdma.c
+++ b/drivers/dma/shdma.c
@@ -30,6 +30,8 @@
30#include <linux/kdebug.h> 30#include <linux/kdebug.h>
31#include <linux/spinlock.h> 31#include <linux/spinlock.h>
32#include <linux/rculist.h> 32#include <linux/rculist.h>
33
34#include "dmaengine.h"
33#include "shdma.h" 35#include "shdma.h"
34 36
35/* DMA descriptor control */ 37/* DMA descriptor control */
@@ -296,13 +298,7 @@ static dma_cookie_t sh_dmae_tx_submit(struct dma_async_tx_descriptor *tx)
296 else 298 else
297 power_up = false; 299 power_up = false;
298 300
299 cookie = sh_chan->common.cookie; 301 cookie = dma_cookie_assign(tx);
300 cookie++;
301 if (cookie < 0)
302 cookie = 1;
303
304 sh_chan->common.cookie = cookie;
305 tx->cookie = cookie;
306 302
307 /* Mark all chunks of this descriptor as submitted, move to the queue */ 303 /* Mark all chunks of this descriptor as submitted, move to the queue */
308 list_for_each_entry_safe(chunk, c, desc->node.prev, node) { 304 list_for_each_entry_safe(chunk, c, desc->node.prev, node) {
@@ -673,7 +669,8 @@ static struct dma_async_tx_descriptor *sh_dmae_prep_memcpy(
673 669
674static struct dma_async_tx_descriptor *sh_dmae_prep_slave_sg( 670static struct dma_async_tx_descriptor *sh_dmae_prep_slave_sg(
675 struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len, 671 struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len,
676 enum dma_transfer_direction direction, unsigned long flags) 672 enum dma_transfer_direction direction, unsigned long flags,
673 void *context)
677{ 674{
678 struct sh_dmae_slave *param; 675 struct sh_dmae_slave *param;
679 struct sh_dmae_chan *sh_chan; 676 struct sh_dmae_chan *sh_chan;
@@ -764,12 +761,12 @@ static dma_async_tx_callback __ld_cleanup(struct sh_dmae_chan *sh_chan, bool all
764 cookie = tx->cookie; 761 cookie = tx->cookie;
765 762
766 if (desc->mark == DESC_COMPLETED && desc->chunks == 1) { 763 if (desc->mark == DESC_COMPLETED && desc->chunks == 1) {
767 if (sh_chan->completed_cookie != desc->cookie - 1) 764 if (sh_chan->common.completed_cookie != desc->cookie - 1)
768 dev_dbg(sh_chan->dev, 765 dev_dbg(sh_chan->dev,
769 "Completing cookie %d, expected %d\n", 766 "Completing cookie %d, expected %d\n",
770 desc->cookie, 767 desc->cookie,
771 sh_chan->completed_cookie + 1); 768 sh_chan->common.completed_cookie + 1);
772 sh_chan->completed_cookie = desc->cookie; 769 sh_chan->common.completed_cookie = desc->cookie;
773 } 770 }
774 771
775 /* Call callback on the last chunk */ 772 /* Call callback on the last chunk */
@@ -823,7 +820,7 @@ static dma_async_tx_callback __ld_cleanup(struct sh_dmae_chan *sh_chan, bool all
823 * Terminating and the loop completed normally: forgive 820 * Terminating and the loop completed normally: forgive
824 * uncompleted cookies 821 * uncompleted cookies
825 */ 822 */
826 sh_chan->completed_cookie = sh_chan->common.cookie; 823 sh_chan->common.completed_cookie = sh_chan->common.cookie;
827 824
828 spin_unlock_irqrestore(&sh_chan->desc_lock, flags); 825 spin_unlock_irqrestore(&sh_chan->desc_lock, flags);
829 826
@@ -883,23 +880,14 @@ static enum dma_status sh_dmae_tx_status(struct dma_chan *chan,
883 struct dma_tx_state *txstate) 880 struct dma_tx_state *txstate)
884{ 881{
885 struct sh_dmae_chan *sh_chan = to_sh_chan(chan); 882 struct sh_dmae_chan *sh_chan = to_sh_chan(chan);
886 dma_cookie_t last_used;
887 dma_cookie_t last_complete;
888 enum dma_status status; 883 enum dma_status status;
889 unsigned long flags; 884 unsigned long flags;
890 885
891 sh_dmae_chan_ld_cleanup(sh_chan, false); 886 sh_dmae_chan_ld_cleanup(sh_chan, false);
892 887
893 /* First read completed cookie to avoid a skew */
894 last_complete = sh_chan->completed_cookie;
895 rmb();
896 last_used = chan->cookie;
897 BUG_ON(last_complete < 0);
898 dma_set_tx_state(txstate, last_complete, last_used, 0);
899
900 spin_lock_irqsave(&sh_chan->desc_lock, flags); 888 spin_lock_irqsave(&sh_chan->desc_lock, flags);
901 889
902 status = dma_async_is_complete(cookie, last_complete, last_used); 890 status = dma_cookie_status(chan, cookie, txstate);
903 891
904 /* 892 /*
905 * If we don't find cookie on the queue, it has been aborted and we have 893 * If we don't find cookie on the queue, it has been aborted and we have
@@ -1102,6 +1090,7 @@ static int __devinit sh_dmae_chan_probe(struct sh_dmae_device *shdev, int id,
1102 1090
1103 /* reference struct dma_device */ 1091 /* reference struct dma_device */
1104 new_sh_chan->common.device = &shdev->common; 1092 new_sh_chan->common.device = &shdev->common;
1093 dma_cookie_init(&new_sh_chan->common);
1105 1094
1106 new_sh_chan->dev = shdev->common.dev; 1095 new_sh_chan->dev = shdev->common.dev;
1107 new_sh_chan->id = id; 1096 new_sh_chan->id = id;
diff --git a/drivers/dma/shdma.h b/drivers/dma/shdma.h
index 2b55a276dc5b..0b1d2c105f02 100644
--- a/drivers/dma/shdma.h
+++ b/drivers/dma/shdma.h
@@ -30,7 +30,6 @@ enum dmae_pm_state {
30}; 30};
31 31
32struct sh_dmae_chan { 32struct sh_dmae_chan {
33 dma_cookie_t completed_cookie; /* The maximum cookie completed */
34 spinlock_t desc_lock; /* Descriptor operation lock */ 33 spinlock_t desc_lock; /* Descriptor operation lock */
35 struct list_head ld_queue; /* Link descriptors queue */ 34 struct list_head ld_queue; /* Link descriptors queue */
36 struct list_head ld_free; /* Link descriptors free */ 35 struct list_head ld_free; /* Link descriptors free */
diff --git a/drivers/dma/sirf-dma.c b/drivers/dma/sirf-dma.c
index 2333810d1688..434ad31174f2 100644
--- a/drivers/dma/sirf-dma.c
+++ b/drivers/dma/sirf-dma.c
@@ -18,6 +18,8 @@
18#include <linux/of_platform.h> 18#include <linux/of_platform.h>
19#include <linux/sirfsoc_dma.h> 19#include <linux/sirfsoc_dma.h>
20 20
21#include "dmaengine.h"
22
21#define SIRFSOC_DMA_DESCRIPTORS 16 23#define SIRFSOC_DMA_DESCRIPTORS 16
22#define SIRFSOC_DMA_CHANNELS 16 24#define SIRFSOC_DMA_CHANNELS 16
23 25
@@ -59,7 +61,6 @@ struct sirfsoc_dma_chan {
59 struct list_head queued; 61 struct list_head queued;
60 struct list_head active; 62 struct list_head active;
61 struct list_head completed; 63 struct list_head completed;
62 dma_cookie_t completed_cookie;
63 unsigned long happened_cyclic; 64 unsigned long happened_cyclic;
64 unsigned long completed_cyclic; 65 unsigned long completed_cyclic;
65 66
@@ -208,7 +209,7 @@ static void sirfsoc_dma_process_completed(struct sirfsoc_dma *sdma)
208 /* Free descriptors */ 209 /* Free descriptors */
209 spin_lock_irqsave(&schan->lock, flags); 210 spin_lock_irqsave(&schan->lock, flags);
210 list_splice_tail_init(&list, &schan->free); 211 list_splice_tail_init(&list, &schan->free);
211 schan->completed_cookie = last_cookie; 212 schan->chan.completed_cookie = last_cookie;
212 spin_unlock_irqrestore(&schan->lock, flags); 213 spin_unlock_irqrestore(&schan->lock, flags);
213 } else { 214 } else {
214 /* for cyclic channel, desc is always in active list */ 215 /* for cyclic channel, desc is always in active list */
@@ -258,13 +259,7 @@ static dma_cookie_t sirfsoc_dma_tx_submit(struct dma_async_tx_descriptor *txd)
258 /* Move descriptor to queue */ 259 /* Move descriptor to queue */
259 list_move_tail(&sdesc->node, &schan->queued); 260 list_move_tail(&sdesc->node, &schan->queued);
260 261
261 /* Update cookie */ 262 cookie = dma_cookie_assign(txd);
262 cookie = schan->chan.cookie + 1;
263 if (cookie <= 0)
264 cookie = 1;
265
266 schan->chan.cookie = cookie;
267 sdesc->desc.cookie = cookie;
268 263
269 spin_unlock_irqrestore(&schan->lock, flags); 264 spin_unlock_irqrestore(&schan->lock, flags);
270 265
@@ -414,16 +409,13 @@ sirfsoc_dma_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
414{ 409{
415 struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan); 410 struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
416 unsigned long flags; 411 unsigned long flags;
417 dma_cookie_t last_used; 412 enum dma_status ret;
418 dma_cookie_t last_complete;
419 413
420 spin_lock_irqsave(&schan->lock, flags); 414 spin_lock_irqsave(&schan->lock, flags);
421 last_used = schan->chan.cookie; 415 ret = dma_cookie_status(chan, cookie, txstate);
422 last_complete = schan->completed_cookie;
423 spin_unlock_irqrestore(&schan->lock, flags); 416 spin_unlock_irqrestore(&schan->lock, flags);
424 417
425 dma_set_tx_state(txstate, last_complete, last_used, 0); 418 return ret;
426 return dma_async_is_complete(cookie, last_complete, last_used);
427} 419}
428 420
429static struct dma_async_tx_descriptor *sirfsoc_dma_prep_interleaved( 421static struct dma_async_tx_descriptor *sirfsoc_dma_prep_interleaved(
@@ -497,7 +489,7 @@ err_dir:
497static struct dma_async_tx_descriptor * 489static struct dma_async_tx_descriptor *
498sirfsoc_dma_prep_cyclic(struct dma_chan *chan, dma_addr_t addr, 490sirfsoc_dma_prep_cyclic(struct dma_chan *chan, dma_addr_t addr,
499 size_t buf_len, size_t period_len, 491 size_t buf_len, size_t period_len,
500 enum dma_transfer_direction direction) 492 enum dma_transfer_direction direction, void *context)
501{ 493{
502 struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan); 494 struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
503 struct sirfsoc_dma_desc *sdesc = NULL; 495 struct sirfsoc_dma_desc *sdesc = NULL;
@@ -635,8 +627,7 @@ static int __devinit sirfsoc_dma_probe(struct platform_device *op)
635 schan = &sdma->channels[i]; 627 schan = &sdma->channels[i];
636 628
637 schan->chan.device = dma; 629 schan->chan.device = dma;
638 schan->chan.cookie = 1; 630 dma_cookie_init(&schan->chan);
639 schan->completed_cookie = schan->chan.cookie;
640 631
641 INIT_LIST_HEAD(&schan->free); 632 INIT_LIST_HEAD(&schan->free);
642 INIT_LIST_HEAD(&schan->prepared); 633 INIT_LIST_HEAD(&schan->prepared);
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c
index cc5ecbc067a3..bdd41d4bfa8d 100644
--- a/drivers/dma/ste_dma40.c
+++ b/drivers/dma/ste_dma40.c
@@ -21,6 +21,7 @@
21 21
22#include <plat/ste_dma40.h> 22#include <plat/ste_dma40.h>
23 23
24#include "dmaengine.h"
24#include "ste_dma40_ll.h" 25#include "ste_dma40_ll.h"
25 26
26#define D40_NAME "dma40" 27#define D40_NAME "dma40"
@@ -220,8 +221,6 @@ struct d40_base;
220 * 221 *
221 * @lock: A spinlock to protect this struct. 222 * @lock: A spinlock to protect this struct.
222 * @log_num: The logical number, if any of this channel. 223 * @log_num: The logical number, if any of this channel.
223 * @completed: Starts with 1, after first interrupt it is set to dma engine's
224 * current cookie.
225 * @pending_tx: The number of pending transfers. Used between interrupt handler 224 * @pending_tx: The number of pending transfers. Used between interrupt handler
226 * and tasklet. 225 * and tasklet.
227 * @busy: Set to true when transfer is ongoing on this channel. 226 * @busy: Set to true when transfer is ongoing on this channel.
@@ -250,8 +249,6 @@ struct d40_base;
250struct d40_chan { 249struct d40_chan {
251 spinlock_t lock; 250 spinlock_t lock;
252 int log_num; 251 int log_num;
253 /* ID of the most recent completed transfer */
254 int completed;
255 int pending_tx; 252 int pending_tx;
256 bool busy; 253 bool busy;
257 struct d40_phy_res *phy_chan; 254 struct d40_phy_res *phy_chan;
@@ -1223,21 +1220,14 @@ static dma_cookie_t d40_tx_submit(struct dma_async_tx_descriptor *tx)
1223 chan); 1220 chan);
1224 struct d40_desc *d40d = container_of(tx, struct d40_desc, txd); 1221 struct d40_desc *d40d = container_of(tx, struct d40_desc, txd);
1225 unsigned long flags; 1222 unsigned long flags;
1223 dma_cookie_t cookie;
1226 1224
1227 spin_lock_irqsave(&d40c->lock, flags); 1225 spin_lock_irqsave(&d40c->lock, flags);
1228 1226 cookie = dma_cookie_assign(tx);
1229 d40c->chan.cookie++;
1230
1231 if (d40c->chan.cookie < 0)
1232 d40c->chan.cookie = 1;
1233
1234 d40d->txd.cookie = d40c->chan.cookie;
1235
1236 d40_desc_queue(d40c, d40d); 1227 d40_desc_queue(d40c, d40d);
1237
1238 spin_unlock_irqrestore(&d40c->lock, flags); 1228 spin_unlock_irqrestore(&d40c->lock, flags);
1239 1229
1240 return tx->cookie; 1230 return cookie;
1241} 1231}
1242 1232
1243static int d40_start(struct d40_chan *d40c) 1233static int d40_start(struct d40_chan *d40c)
@@ -1357,7 +1347,7 @@ static void dma_tasklet(unsigned long data)
1357 goto err; 1347 goto err;
1358 1348
1359 if (!d40d->cyclic) 1349 if (!d40d->cyclic)
1360 d40c->completed = d40d->txd.cookie; 1350 dma_cookie_complete(&d40d->txd);
1361 1351
1362 /* 1352 /*
1363 * If terminating a channel pending_tx is set to zero. 1353 * If terminating a channel pending_tx is set to zero.
@@ -2182,7 +2172,7 @@ static int d40_alloc_chan_resources(struct dma_chan *chan)
2182 bool is_free_phy; 2172 bool is_free_phy;
2183 spin_lock_irqsave(&d40c->lock, flags); 2173 spin_lock_irqsave(&d40c->lock, flags);
2184 2174
2185 d40c->completed = chan->cookie = 1; 2175 dma_cookie_init(chan);
2186 2176
2187 /* If no dma configuration is set use default configuration (memcpy) */ 2177 /* If no dma configuration is set use default configuration (memcpy) */
2188 if (!d40c->configured) { 2178 if (!d40c->configured) {
@@ -2299,7 +2289,8 @@ static struct dma_async_tx_descriptor *d40_prep_slave_sg(struct dma_chan *chan,
2299 struct scatterlist *sgl, 2289 struct scatterlist *sgl,
2300 unsigned int sg_len, 2290 unsigned int sg_len,
2301 enum dma_transfer_direction direction, 2291 enum dma_transfer_direction direction,
2302 unsigned long dma_flags) 2292 unsigned long dma_flags,
2293 void *context)
2303{ 2294{
2304 if (direction != DMA_DEV_TO_MEM && direction != DMA_MEM_TO_DEV) 2295 if (direction != DMA_DEV_TO_MEM && direction != DMA_MEM_TO_DEV)
2305 return NULL; 2296 return NULL;
@@ -2310,7 +2301,7 @@ static struct dma_async_tx_descriptor *d40_prep_slave_sg(struct dma_chan *chan,
2310static struct dma_async_tx_descriptor * 2301static struct dma_async_tx_descriptor *
2311dma40_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr, 2302dma40_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr,
2312 size_t buf_len, size_t period_len, 2303 size_t buf_len, size_t period_len,
2313 enum dma_transfer_direction direction) 2304 enum dma_transfer_direction direction, void *context)
2314{ 2305{
2315 unsigned int periods = buf_len / period_len; 2306 unsigned int periods = buf_len / period_len;
2316 struct dma_async_tx_descriptor *txd; 2307 struct dma_async_tx_descriptor *txd;
@@ -2342,25 +2333,19 @@ static enum dma_status d40_tx_status(struct dma_chan *chan,
2342 struct dma_tx_state *txstate) 2333 struct dma_tx_state *txstate)
2343{ 2334{
2344 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan); 2335 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
2345 dma_cookie_t last_used; 2336 enum dma_status ret;
2346 dma_cookie_t last_complete;
2347 int ret;
2348 2337
2349 if (d40c->phy_chan == NULL) { 2338 if (d40c->phy_chan == NULL) {
2350 chan_err(d40c, "Cannot read status of unallocated channel\n"); 2339 chan_err(d40c, "Cannot read status of unallocated channel\n");
2351 return -EINVAL; 2340 return -EINVAL;
2352 } 2341 }
2353 2342
2354 last_complete = d40c->completed; 2343 ret = dma_cookie_status(chan, cookie, txstate);
2355 last_used = chan->cookie; 2344 if (ret != DMA_SUCCESS)
2345 dma_set_residue(txstate, stedma40_residue(chan));
2356 2346
2357 if (d40_is_paused(d40c)) 2347 if (d40_is_paused(d40c))
2358 ret = DMA_PAUSED; 2348 ret = DMA_PAUSED;
2359 else
2360 ret = dma_async_is_complete(cookie, last_complete, last_used);
2361
2362 dma_set_tx_state(txstate, last_complete, last_used,
2363 stedma40_residue(chan));
2364 2349
2365 return ret; 2350 return ret;
2366} 2351}
diff --git a/drivers/dma/timb_dma.c b/drivers/dma/timb_dma.c
index a6f9c1684a0f..4e0dff59901d 100644
--- a/drivers/dma/timb_dma.c
+++ b/drivers/dma/timb_dma.c
@@ -31,6 +31,8 @@
31 31
32#include <linux/timb_dma.h> 32#include <linux/timb_dma.h>
33 33
34#include "dmaengine.h"
35
34#define DRIVER_NAME "timb-dma" 36#define DRIVER_NAME "timb-dma"
35 37
36/* Global DMA registers */ 38/* Global DMA registers */
@@ -84,7 +86,6 @@ struct timb_dma_chan {
84 especially the lists and descriptors, 86 especially the lists and descriptors,
85 from races between the tasklet and calls 87 from races between the tasklet and calls
86 from above */ 88 from above */
87 dma_cookie_t last_completed_cookie;
88 bool ongoing; 89 bool ongoing;
89 struct list_head active_list; 90 struct list_head active_list;
90 struct list_head queue; 91 struct list_head queue;
@@ -284,7 +285,7 @@ static void __td_finish(struct timb_dma_chan *td_chan)
284 else 285 else
285 iowrite32(0, td_chan->membase + TIMBDMA_OFFS_TX_DLAR); 286 iowrite32(0, td_chan->membase + TIMBDMA_OFFS_TX_DLAR);
286*/ 287*/
287 td_chan->last_completed_cookie = txd->cookie; 288 dma_cookie_complete(txd);
288 td_chan->ongoing = false; 289 td_chan->ongoing = false;
289 290
290 callback = txd->callback; 291 callback = txd->callback;
@@ -349,12 +350,7 @@ static dma_cookie_t td_tx_submit(struct dma_async_tx_descriptor *txd)
349 dma_cookie_t cookie; 350 dma_cookie_t cookie;
350 351
351 spin_lock_bh(&td_chan->lock); 352 spin_lock_bh(&td_chan->lock);
352 353 cookie = dma_cookie_assign(txd);
353 cookie = txd->chan->cookie;
354 if (++cookie < 0)
355 cookie = 1;
356 txd->chan->cookie = cookie;
357 txd->cookie = cookie;
358 354
359 if (list_empty(&td_chan->active_list)) { 355 if (list_empty(&td_chan->active_list)) {
360 dev_dbg(chan2dev(txd->chan), "%s: started %u\n", __func__, 356 dev_dbg(chan2dev(txd->chan), "%s: started %u\n", __func__,
@@ -481,8 +477,7 @@ static int td_alloc_chan_resources(struct dma_chan *chan)
481 } 477 }
482 478
483 spin_lock_bh(&td_chan->lock); 479 spin_lock_bh(&td_chan->lock);
484 td_chan->last_completed_cookie = 1; 480 dma_cookie_init(chan);
485 chan->cookie = 1;
486 spin_unlock_bh(&td_chan->lock); 481 spin_unlock_bh(&td_chan->lock);
487 482
488 return 0; 483 return 0;
@@ -515,24 +510,13 @@ static void td_free_chan_resources(struct dma_chan *chan)
515static enum dma_status td_tx_status(struct dma_chan *chan, dma_cookie_t cookie, 510static enum dma_status td_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
516 struct dma_tx_state *txstate) 511 struct dma_tx_state *txstate)
517{ 512{
518 struct timb_dma_chan *td_chan = 513 enum dma_status ret;
519 container_of(chan, struct timb_dma_chan, chan);
520 dma_cookie_t last_used;
521 dma_cookie_t last_complete;
522 int ret;
523 514
524 dev_dbg(chan2dev(chan), "%s: Entry\n", __func__); 515 dev_dbg(chan2dev(chan), "%s: Entry\n", __func__);
525 516
526 last_complete = td_chan->last_completed_cookie; 517 ret = dma_cookie_status(chan, cookie, txstate);
527 last_used = chan->cookie;
528
529 ret = dma_async_is_complete(cookie, last_complete, last_used);
530
531 dma_set_tx_state(txstate, last_complete, last_used, 0);
532 518
533 dev_dbg(chan2dev(chan), 519 dev_dbg(chan2dev(chan), "%s: exit, ret: %d\n", __func__, ret);
534 "%s: exit, ret: %d, last_complete: %d, last_used: %d\n",
535 __func__, ret, last_complete, last_used);
536 520
537 return ret; 521 return ret;
538} 522}
@@ -558,7 +542,8 @@ static void td_issue_pending(struct dma_chan *chan)
558 542
559static struct dma_async_tx_descriptor *td_prep_slave_sg(struct dma_chan *chan, 543static struct dma_async_tx_descriptor *td_prep_slave_sg(struct dma_chan *chan,
560 struct scatterlist *sgl, unsigned int sg_len, 544 struct scatterlist *sgl, unsigned int sg_len,
561 enum dma_transfer_direction direction, unsigned long flags) 545 enum dma_transfer_direction direction, unsigned long flags,
546 void *context)
562{ 547{
563 struct timb_dma_chan *td_chan = 548 struct timb_dma_chan *td_chan =
564 container_of(chan, struct timb_dma_chan, chan); 549 container_of(chan, struct timb_dma_chan, chan);
@@ -766,7 +751,7 @@ static int __devinit td_probe(struct platform_device *pdev)
766 } 751 }
767 752
768 td_chan->chan.device = &td->dma; 753 td_chan->chan.device = &td->dma;
769 td_chan->chan.cookie = 1; 754 dma_cookie_init(&td_chan->chan);
770 spin_lock_init(&td_chan->lock); 755 spin_lock_init(&td_chan->lock);
771 INIT_LIST_HEAD(&td_chan->active_list); 756 INIT_LIST_HEAD(&td_chan->active_list);
772 INIT_LIST_HEAD(&td_chan->queue); 757 INIT_LIST_HEAD(&td_chan->queue);
diff --git a/drivers/dma/txx9dmac.c b/drivers/dma/txx9dmac.c
index 6122c364cf11..913f55c76c99 100644
--- a/drivers/dma/txx9dmac.c
+++ b/drivers/dma/txx9dmac.c
@@ -15,6 +15,8 @@
15#include <linux/platform_device.h> 15#include <linux/platform_device.h>
16#include <linux/slab.h> 16#include <linux/slab.h>
17#include <linux/scatterlist.h> 17#include <linux/scatterlist.h>
18
19#include "dmaengine.h"
18#include "txx9dmac.h" 20#include "txx9dmac.h"
19 21
20static struct txx9dmac_chan *to_txx9dmac_chan(struct dma_chan *chan) 22static struct txx9dmac_chan *to_txx9dmac_chan(struct dma_chan *chan)
@@ -279,21 +281,6 @@ static void txx9dmac_desc_put(struct txx9dmac_chan *dc,
279 } 281 }
280} 282}
281 283
282/* Called with dc->lock held and bh disabled */
283static dma_cookie_t
284txx9dmac_assign_cookie(struct txx9dmac_chan *dc, struct txx9dmac_desc *desc)
285{
286 dma_cookie_t cookie = dc->chan.cookie;
287
288 if (++cookie < 0)
289 cookie = 1;
290
291 dc->chan.cookie = cookie;
292 desc->txd.cookie = cookie;
293
294 return cookie;
295}
296
297/*----------------------------------------------------------------------*/ 284/*----------------------------------------------------------------------*/
298 285
299static void txx9dmac_dump_regs(struct txx9dmac_chan *dc) 286static void txx9dmac_dump_regs(struct txx9dmac_chan *dc)
@@ -424,7 +411,7 @@ txx9dmac_descriptor_complete(struct txx9dmac_chan *dc,
424 dev_vdbg(chan2dev(&dc->chan), "descriptor %u %p complete\n", 411 dev_vdbg(chan2dev(&dc->chan), "descriptor %u %p complete\n",
425 txd->cookie, desc); 412 txd->cookie, desc);
426 413
427 dc->completed = txd->cookie; 414 dma_cookie_complete(txd);
428 callback = txd->callback; 415 callback = txd->callback;
429 param = txd->callback_param; 416 param = txd->callback_param;
430 417
@@ -738,7 +725,7 @@ static dma_cookie_t txx9dmac_tx_submit(struct dma_async_tx_descriptor *tx)
738 dma_cookie_t cookie; 725 dma_cookie_t cookie;
739 726
740 spin_lock_bh(&dc->lock); 727 spin_lock_bh(&dc->lock);
741 cookie = txx9dmac_assign_cookie(dc, desc); 728 cookie = dma_cookie_assign(tx);
742 729
743 dev_vdbg(chan2dev(tx->chan), "tx_submit: queued %u %p\n", 730 dev_vdbg(chan2dev(tx->chan), "tx_submit: queued %u %p\n",
744 desc->txd.cookie, desc); 731 desc->txd.cookie, desc);
@@ -846,7 +833,7 @@ txx9dmac_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
846static struct dma_async_tx_descriptor * 833static struct dma_async_tx_descriptor *
847txx9dmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, 834txx9dmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
848 unsigned int sg_len, enum dma_transfer_direction direction, 835 unsigned int sg_len, enum dma_transfer_direction direction,
849 unsigned long flags) 836 unsigned long flags, void *context)
850{ 837{
851 struct txx9dmac_chan *dc = to_txx9dmac_chan(chan); 838 struct txx9dmac_chan *dc = to_txx9dmac_chan(chan);
852 struct txx9dmac_dev *ddev = dc->ddev; 839 struct txx9dmac_dev *ddev = dc->ddev;
@@ -972,27 +959,17 @@ txx9dmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
972 struct dma_tx_state *txstate) 959 struct dma_tx_state *txstate)
973{ 960{
974 struct txx9dmac_chan *dc = to_txx9dmac_chan(chan); 961 struct txx9dmac_chan *dc = to_txx9dmac_chan(chan);
975 dma_cookie_t last_used; 962 enum dma_status ret;
976 dma_cookie_t last_complete;
977 int ret;
978 963
979 last_complete = dc->completed; 964 ret = dma_cookie_status(chan, cookie, txstate);
980 last_used = chan->cookie;
981
982 ret = dma_async_is_complete(cookie, last_complete, last_used);
983 if (ret != DMA_SUCCESS) { 965 if (ret != DMA_SUCCESS) {
984 spin_lock_bh(&dc->lock); 966 spin_lock_bh(&dc->lock);
985 txx9dmac_scan_descriptors(dc); 967 txx9dmac_scan_descriptors(dc);
986 spin_unlock_bh(&dc->lock); 968 spin_unlock_bh(&dc->lock);
987 969
988 last_complete = dc->completed; 970 ret = dma_cookie_status(chan, cookie, txstate);
989 last_used = chan->cookie;
990
991 ret = dma_async_is_complete(cookie, last_complete, last_used);
992 } 971 }
993 972
994 dma_set_tx_state(txstate, last_complete, last_used, 0);
995
996 return ret; 973 return ret;
997} 974}
998 975
@@ -1057,7 +1034,7 @@ static int txx9dmac_alloc_chan_resources(struct dma_chan *chan)
1057 return -EIO; 1034 return -EIO;
1058 } 1035 }
1059 1036
1060 dc->completed = chan->cookie = 1; 1037 dma_cookie_init(chan);
1061 1038
1062 dc->ccr = TXX9_DMA_CCR_IMMCHN | TXX9_DMA_CCR_INTENE | CCR_LE; 1039 dc->ccr = TXX9_DMA_CCR_IMMCHN | TXX9_DMA_CCR_INTENE | CCR_LE;
1063 txx9dmac_chan_set_SMPCHN(dc); 1040 txx9dmac_chan_set_SMPCHN(dc);
@@ -1186,7 +1163,7 @@ static int __init txx9dmac_chan_probe(struct platform_device *pdev)
1186 dc->ddev->chan[ch] = dc; 1163 dc->ddev->chan[ch] = dc;
1187 dc->chan.device = &dc->dma; 1164 dc->chan.device = &dc->dma;
1188 list_add_tail(&dc->chan.device_node, &dc->chan.device->channels); 1165 list_add_tail(&dc->chan.device_node, &dc->chan.device->channels);
1189 dc->chan.cookie = dc->completed = 1; 1166 dma_cookie_init(&dc->chan);
1190 1167
1191 if (is_dmac64(dc)) 1168 if (is_dmac64(dc))
1192 dc->ch_regs = &__txx9dmac_regs(dc->ddev)->CHAN[ch]; 1169 dc->ch_regs = &__txx9dmac_regs(dc->ddev)->CHAN[ch];
diff --git a/drivers/dma/txx9dmac.h b/drivers/dma/txx9dmac.h
index 365d42366b9f..f5a760598882 100644
--- a/drivers/dma/txx9dmac.h
+++ b/drivers/dma/txx9dmac.h
@@ -172,7 +172,6 @@ struct txx9dmac_chan {
172 spinlock_t lock; 172 spinlock_t lock;
173 173
174 /* these other elements are all protected by lock */ 174 /* these other elements are all protected by lock */
175 dma_cookie_t completed;
176 struct list_head active_list; 175 struct list_head active_list;
177 struct list_head queue; 176 struct list_head queue;
178 struct list_head free_list; 177 struct list_head free_list;
diff --git a/drivers/gpio/gpio-pxa.c b/drivers/gpio/gpio-pxa.c
index b2d3ee1d183a..5689ce62fd81 100644
--- a/drivers/gpio/gpio-pxa.c
+++ b/drivers/gpio/gpio-pxa.c
@@ -22,6 +22,8 @@
22#include <linux/syscore_ops.h> 22#include <linux/syscore_ops.h>
23#include <linux/slab.h> 23#include <linux/slab.h>
24 24
25#include <mach/irqs.h>
26
25/* 27/*
26 * We handle the GPIOs by banks, each bank covers up to 32 GPIOs with 28 * We handle the GPIOs by banks, each bank covers up to 32 GPIOs with
27 * one set of registers. The register offsets are organized below: 29 * one set of registers. The register offsets are organized below:
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 0694e170a338..1a7559b59997 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -467,6 +467,10 @@ static int i915_drm_freeze(struct drm_device *dev)
467 /* Modeset on resume, not lid events */ 467 /* Modeset on resume, not lid events */
468 dev_priv->modeset_on_lid = 0; 468 dev_priv->modeset_on_lid = 0;
469 469
470 console_lock();
471 intel_fbdev_set_suspend(dev, 1);
472 console_unlock();
473
470 return 0; 474 return 0;
471} 475}
472 476
@@ -539,6 +543,9 @@ static int i915_drm_thaw(struct drm_device *dev)
539 543
540 dev_priv->modeset_on_lid = 0; 544 dev_priv->modeset_on_lid = 0;
541 545
546 console_lock();
547 intel_fbdev_set_suspend(dev, 0);
548 console_unlock();
542 return error; 549 return error;
543} 550}
544 551
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 9cec6c3937fa..5a14149b3794 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -382,7 +382,7 @@ extern int intel_framebuffer_init(struct drm_device *dev,
382 struct drm_i915_gem_object *obj); 382 struct drm_i915_gem_object *obj);
383extern int intel_fbdev_init(struct drm_device *dev); 383extern int intel_fbdev_init(struct drm_device *dev);
384extern void intel_fbdev_fini(struct drm_device *dev); 384extern void intel_fbdev_fini(struct drm_device *dev);
385 385extern void intel_fbdev_set_suspend(struct drm_device *dev, int state);
386extern void intel_prepare_page_flip(struct drm_device *dev, int plane); 386extern void intel_prepare_page_flip(struct drm_device *dev, int plane);
387extern void intel_finish_page_flip(struct drm_device *dev, int pipe); 387extern void intel_finish_page_flip(struct drm_device *dev, int pipe);
388extern void intel_finish_page_flip_plane(struct drm_device *dev, int plane); 388extern void intel_finish_page_flip_plane(struct drm_device *dev, int plane);
diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c
index 2d8766978388..19ecd78b8a2c 100644
--- a/drivers/gpu/drm/i915/intel_fb.c
+++ b/drivers/gpu/drm/i915/intel_fb.c
@@ -254,6 +254,16 @@ void intel_fbdev_fini(struct drm_device *dev)
254 kfree(dev_priv->fbdev); 254 kfree(dev_priv->fbdev);
255 dev_priv->fbdev = NULL; 255 dev_priv->fbdev = NULL;
256} 256}
257
258void intel_fbdev_set_suspend(struct drm_device *dev, int state)
259{
260 drm_i915_private_t *dev_priv = dev->dev_private;
261 if (!dev_priv->fbdev)
262 return;
263
264 fb_set_suspend(dev_priv->fbdev->helper.fbdev, state);
265}
266
257MODULE_LICENSE("GPL and additional rights"); 267MODULE_LICENSE("GPL and additional rights");
258 268
259void intel_fb_output_poll_changed(struct drm_device *dev) 269void intel_fb_output_poll_changed(struct drm_device *dev)
diff --git a/drivers/input/input-compat.c b/drivers/input/input-compat.c
index e46a86776a6b..64ca7113ff28 100644
--- a/drivers/input/input-compat.c
+++ b/drivers/input/input-compat.c
@@ -17,7 +17,7 @@
17int input_event_from_user(const char __user *buffer, 17int input_event_from_user(const char __user *buffer,
18 struct input_event *event) 18 struct input_event *event)
19{ 19{
20 if (INPUT_COMPAT_TEST) { 20 if (INPUT_COMPAT_TEST && !COMPAT_USE_64BIT_TIME) {
21 struct input_event_compat compat_event; 21 struct input_event_compat compat_event;
22 22
23 if (copy_from_user(&compat_event, buffer, 23 if (copy_from_user(&compat_event, buffer,
@@ -41,7 +41,7 @@ int input_event_from_user(const char __user *buffer,
41int input_event_to_user(char __user *buffer, 41int input_event_to_user(char __user *buffer,
42 const struct input_event *event) 42 const struct input_event *event)
43{ 43{
44 if (INPUT_COMPAT_TEST) { 44 if (INPUT_COMPAT_TEST && !COMPAT_USE_64BIT_TIME) {
45 struct input_event_compat compat_event; 45 struct input_event_compat compat_event;
46 46
47 compat_event.time.tv_sec = event->time.tv_sec; 47 compat_event.time.tv_sec = event->time.tv_sec;
diff --git a/drivers/input/input-compat.h b/drivers/input/input-compat.h
index 22be27b424de..148f66fe3205 100644
--- a/drivers/input/input-compat.h
+++ b/drivers/input/input-compat.h
@@ -67,7 +67,7 @@ struct ff_effect_compat {
67 67
68static inline size_t input_event_size(void) 68static inline size_t input_event_size(void)
69{ 69{
70 return INPUT_COMPAT_TEST ? 70 return (INPUT_COMPAT_TEST && !COMPAT_USE_64BIT_TIME) ?
71 sizeof(struct input_event_compat) : sizeof(struct input_event); 71 sizeof(struct input_event_compat) : sizeof(struct input_event);
72} 72}
73 73
diff --git a/drivers/input/joystick/amijoy.c b/drivers/input/joystick/amijoy.c
index 24044dacbf70..c65b5fa69f1e 100644
--- a/drivers/input/joystick/amijoy.c
+++ b/drivers/input/joystick/amijoy.c
@@ -107,6 +107,9 @@ static int __init amijoy_init(void)
107 int i, j; 107 int i, j;
108 int err; 108 int err;
109 109
110 if (!MACH_IS_AMIGA)
111 return -ENODEV;
112
110 for (i = 0; i < 2; i++) { 113 for (i = 0; i < 2; i++) {
111 if (!amijoy[i]) 114 if (!amijoy[i])
112 continue; 115 continue;
diff --git a/drivers/input/keyboard/gpio_keys.c b/drivers/input/keyboard/gpio_keys.c
index ed1ed469d085..62bfce468f9f 100644
--- a/drivers/input/keyboard/gpio_keys.c
+++ b/drivers/input/keyboard/gpio_keys.c
@@ -28,14 +28,18 @@
28#include <linux/gpio.h> 28#include <linux/gpio.h>
29#include <linux/of_platform.h> 29#include <linux/of_platform.h>
30#include <linux/of_gpio.h> 30#include <linux/of_gpio.h>
31#include <linux/spinlock.h>
31 32
32struct gpio_button_data { 33struct gpio_button_data {
33 struct gpio_keys_button *button; 34 const struct gpio_keys_button *button;
34 struct input_dev *input; 35 struct input_dev *input;
35 struct timer_list timer; 36 struct timer_list timer;
36 struct work_struct work; 37 struct work_struct work;
37 int timer_debounce; /* in msecs */ 38 unsigned int timer_debounce; /* in msecs */
39 unsigned int irq;
40 spinlock_t lock;
38 bool disabled; 41 bool disabled;
42 bool key_pressed;
39}; 43};
40 44
41struct gpio_keys_drvdata { 45struct gpio_keys_drvdata {
@@ -114,7 +118,7 @@ static void gpio_keys_disable_button(struct gpio_button_data *bdata)
114 /* 118 /*
115 * Disable IRQ and possible debouncing timer. 119 * Disable IRQ and possible debouncing timer.
116 */ 120 */
117 disable_irq(gpio_to_irq(bdata->button->gpio)); 121 disable_irq(bdata->irq);
118 if (bdata->timer_debounce) 122 if (bdata->timer_debounce)
119 del_timer_sync(&bdata->timer); 123 del_timer_sync(&bdata->timer);
120 124
@@ -135,7 +139,7 @@ static void gpio_keys_disable_button(struct gpio_button_data *bdata)
135static void gpio_keys_enable_button(struct gpio_button_data *bdata) 139static void gpio_keys_enable_button(struct gpio_button_data *bdata)
136{ 140{
137 if (bdata->disabled) { 141 if (bdata->disabled) {
138 enable_irq(gpio_to_irq(bdata->button->gpio)); 142 enable_irq(bdata->irq);
139 bdata->disabled = false; 143 bdata->disabled = false;
140 } 144 }
141} 145}
@@ -195,7 +199,7 @@ static ssize_t gpio_keys_attr_show_helper(struct gpio_keys_drvdata *ddata,
195 * @type: button type (%EV_KEY, %EV_SW) 199 * @type: button type (%EV_KEY, %EV_SW)
196 * 200 *
197 * This function parses stringified bitmap from @buf and disables/enables 201 * This function parses stringified bitmap from @buf and disables/enables
198 * GPIO buttons accordinly. Returns 0 on success and negative error 202 * GPIO buttons accordingly. Returns 0 on success and negative error
199 * on failure. 203 * on failure.
200 */ 204 */
201static ssize_t gpio_keys_attr_store_helper(struct gpio_keys_drvdata *ddata, 205static ssize_t gpio_keys_attr_store_helper(struct gpio_keys_drvdata *ddata,
@@ -320,9 +324,9 @@ static struct attribute_group gpio_keys_attr_group = {
320 .attrs = gpio_keys_attrs, 324 .attrs = gpio_keys_attrs,
321}; 325};
322 326
323static void gpio_keys_report_event(struct gpio_button_data *bdata) 327static void gpio_keys_gpio_report_event(struct gpio_button_data *bdata)
324{ 328{
325 struct gpio_keys_button *button = bdata->button; 329 const struct gpio_keys_button *button = bdata->button;
326 struct input_dev *input = bdata->input; 330 struct input_dev *input = bdata->input;
327 unsigned int type = button->type ?: EV_KEY; 331 unsigned int type = button->type ?: EV_KEY;
328 int state = (gpio_get_value_cansleep(button->gpio) ? 1 : 0) ^ button->active_low; 332 int state = (gpio_get_value_cansleep(button->gpio) ? 1 : 0) ^ button->active_low;
@@ -336,27 +340,26 @@ static void gpio_keys_report_event(struct gpio_button_data *bdata)
336 input_sync(input); 340 input_sync(input);
337} 341}
338 342
339static void gpio_keys_work_func(struct work_struct *work) 343static void gpio_keys_gpio_work_func(struct work_struct *work)
340{ 344{
341 struct gpio_button_data *bdata = 345 struct gpio_button_data *bdata =
342 container_of(work, struct gpio_button_data, work); 346 container_of(work, struct gpio_button_data, work);
343 347
344 gpio_keys_report_event(bdata); 348 gpio_keys_gpio_report_event(bdata);
345} 349}
346 350
347static void gpio_keys_timer(unsigned long _data) 351static void gpio_keys_gpio_timer(unsigned long _data)
348{ 352{
349 struct gpio_button_data *data = (struct gpio_button_data *)_data; 353 struct gpio_button_data *bdata = (struct gpio_button_data *)_data;
350 354
351 schedule_work(&data->work); 355 schedule_work(&bdata->work);
352} 356}
353 357
354static irqreturn_t gpio_keys_isr(int irq, void *dev_id) 358static irqreturn_t gpio_keys_gpio_isr(int irq, void *dev_id)
355{ 359{
356 struct gpio_button_data *bdata = dev_id; 360 struct gpio_button_data *bdata = dev_id;
357 struct gpio_keys_button *button = bdata->button;
358 361
359 BUG_ON(irq != gpio_to_irq(button->gpio)); 362 BUG_ON(irq != bdata->irq);
360 363
361 if (bdata->timer_debounce) 364 if (bdata->timer_debounce)
362 mod_timer(&bdata->timer, 365 mod_timer(&bdata->timer,
@@ -367,50 +370,133 @@ static irqreturn_t gpio_keys_isr(int irq, void *dev_id)
367 return IRQ_HANDLED; 370 return IRQ_HANDLED;
368} 371}
369 372
373static void gpio_keys_irq_timer(unsigned long _data)
374{
375 struct gpio_button_data *bdata = (struct gpio_button_data *)_data;
376 struct input_dev *input = bdata->input;
377 unsigned long flags;
378
379 spin_lock_irqsave(&bdata->lock, flags);
380 if (bdata->key_pressed) {
381 input_event(input, EV_KEY, bdata->button->code, 0);
382 input_sync(input);
383 bdata->key_pressed = false;
384 }
385 spin_unlock_irqrestore(&bdata->lock, flags);
386}
387
388static irqreturn_t gpio_keys_irq_isr(int irq, void *dev_id)
389{
390 struct gpio_button_data *bdata = dev_id;
391 const struct gpio_keys_button *button = bdata->button;
392 struct input_dev *input = bdata->input;
393 unsigned long flags;
394
395 BUG_ON(irq != bdata->irq);
396
397 spin_lock_irqsave(&bdata->lock, flags);
398
399 if (!bdata->key_pressed) {
400 input_event(input, EV_KEY, button->code, 1);
401 input_sync(input);
402
403 if (!bdata->timer_debounce) {
404 input_event(input, EV_KEY, button->code, 0);
405 input_sync(input);
406 goto out;
407 }
408
409 bdata->key_pressed = true;
410 }
411
412 if (bdata->timer_debounce)
413 mod_timer(&bdata->timer,
414 jiffies + msecs_to_jiffies(bdata->timer_debounce));
415out:
416 spin_unlock_irqrestore(&bdata->lock, flags);
417 return IRQ_HANDLED;
418}
419
370static int __devinit gpio_keys_setup_key(struct platform_device *pdev, 420static int __devinit gpio_keys_setup_key(struct platform_device *pdev,
421 struct input_dev *input,
371 struct gpio_button_data *bdata, 422 struct gpio_button_data *bdata,
372 struct gpio_keys_button *button) 423 const struct gpio_keys_button *button)
373{ 424{
374 const char *desc = button->desc ? button->desc : "gpio_keys"; 425 const char *desc = button->desc ? button->desc : "gpio_keys";
375 struct device *dev = &pdev->dev; 426 struct device *dev = &pdev->dev;
427 irq_handler_t isr;
376 unsigned long irqflags; 428 unsigned long irqflags;
377 int irq, error; 429 int irq, error;
378 430
379 setup_timer(&bdata->timer, gpio_keys_timer, (unsigned long)bdata); 431 bdata->input = input;
380 INIT_WORK(&bdata->work, gpio_keys_work_func); 432 bdata->button = button;
433 spin_lock_init(&bdata->lock);
381 434
382 error = gpio_request(button->gpio, desc); 435 if (gpio_is_valid(button->gpio)) {
383 if (error < 0) {
384 dev_err(dev, "failed to request GPIO %d, error %d\n",
385 button->gpio, error);
386 goto fail2;
387 }
388 436
389 error = gpio_direction_input(button->gpio); 437 error = gpio_request(button->gpio, desc);
390 if (error < 0) { 438 if (error < 0) {
391 dev_err(dev, "failed to configure" 439 dev_err(dev, "Failed to request GPIO %d, error %d\n",
392 " direction for GPIO %d, error %d\n", 440 button->gpio, error);
393 button->gpio, error); 441 return error;
394 goto fail3; 442 }
395 }
396 443
397 if (button->debounce_interval) { 444 error = gpio_direction_input(button->gpio);
398 error = gpio_set_debounce(button->gpio, 445 if (error < 0) {
399 button->debounce_interval * 1000); 446 dev_err(dev,
400 /* use timer if gpiolib doesn't provide debounce */ 447 "Failed to configure direction for GPIO %d, error %d\n",
401 if (error < 0) 448 button->gpio, error);
402 bdata->timer_debounce = button->debounce_interval; 449 goto fail;
403 } 450 }
404 451
405 irq = gpio_to_irq(button->gpio); 452 if (button->debounce_interval) {
406 if (irq < 0) { 453 error = gpio_set_debounce(button->gpio,
407 error = irq; 454 button->debounce_interval * 1000);
408 dev_err(dev, "Unable to get irq number for GPIO %d, error %d\n", 455 /* use timer if gpiolib doesn't provide debounce */
409 button->gpio, error); 456 if (error < 0)
410 goto fail3; 457 bdata->timer_debounce =
458 button->debounce_interval;
459 }
460
461 irq = gpio_to_irq(button->gpio);
462 if (irq < 0) {
463 error = irq;
464 dev_err(dev,
465 "Unable to get irq number for GPIO %d, error %d\n",
466 button->gpio, error);
467 goto fail;
468 }
469 bdata->irq = irq;
470
471 INIT_WORK(&bdata->work, gpio_keys_gpio_work_func);
472 setup_timer(&bdata->timer,
473 gpio_keys_gpio_timer, (unsigned long)bdata);
474
475 isr = gpio_keys_gpio_isr;
476 irqflags = IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING;
477
478 } else {
479 if (!button->irq) {
480 dev_err(dev, "No IRQ specified\n");
481 return -EINVAL;
482 }
483 bdata->irq = button->irq;
484
485 if (button->type && button->type != EV_KEY) {
486 dev_err(dev, "Only EV_KEY allowed for IRQ buttons.\n");
487 return -EINVAL;
488 }
489
490 bdata->timer_debounce = button->debounce_interval;
491 setup_timer(&bdata->timer,
492 gpio_keys_irq_timer, (unsigned long)bdata);
493
494 isr = gpio_keys_irq_isr;
495 irqflags = 0;
411 } 496 }
412 497
413 irqflags = IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING; 498 input_set_capability(input, button->type ?: EV_KEY, button->code);
499
414 /* 500 /*
415 * If platform has specified that the button can be disabled, 501 * If platform has specified that the button can be disabled,
416 * we don't want it to share the interrupt line. 502 * we don't want it to share the interrupt line.
@@ -418,18 +504,19 @@ static int __devinit gpio_keys_setup_key(struct platform_device *pdev,
418 if (!button->can_disable) 504 if (!button->can_disable)
419 irqflags |= IRQF_SHARED; 505 irqflags |= IRQF_SHARED;
420 506
421 error = request_threaded_irq(irq, NULL, gpio_keys_isr, irqflags, desc, bdata); 507 error = request_any_context_irq(bdata->irq, isr, irqflags, desc, bdata);
422 if (error < 0) { 508 if (error < 0) {
423 dev_err(dev, "Unable to claim irq %d; error %d\n", 509 dev_err(dev, "Unable to claim irq %d; error %d\n",
424 irq, error); 510 bdata->irq, error);
425 goto fail3; 511 goto fail;
426 } 512 }
427 513
428 return 0; 514 return 0;
429 515
430fail3: 516fail:
431 gpio_free(button->gpio); 517 if (gpio_is_valid(button->gpio))
432fail2: 518 gpio_free(button->gpio);
519
433 return error; 520 return error;
434} 521}
435 522
@@ -547,9 +634,19 @@ static int gpio_keys_get_devtree_pdata(struct device *dev,
547 634
548#endif 635#endif
549 636
637static void gpio_remove_key(struct gpio_button_data *bdata)
638{
639 free_irq(bdata->irq, bdata);
640 if (bdata->timer_debounce)
641 del_timer_sync(&bdata->timer);
642 cancel_work_sync(&bdata->work);
643 if (gpio_is_valid(bdata->button->gpio))
644 gpio_free(bdata->button->gpio);
645}
646
550static int __devinit gpio_keys_probe(struct platform_device *pdev) 647static int __devinit gpio_keys_probe(struct platform_device *pdev)
551{ 648{
552 struct gpio_keys_platform_data *pdata = pdev->dev.platform_data; 649 const struct gpio_keys_platform_data *pdata = pdev->dev.platform_data;
553 struct gpio_keys_drvdata *ddata; 650 struct gpio_keys_drvdata *ddata;
554 struct device *dev = &pdev->dev; 651 struct device *dev = &pdev->dev;
555 struct gpio_keys_platform_data alt_pdata; 652 struct gpio_keys_platform_data alt_pdata;
@@ -599,21 +696,15 @@ static int __devinit gpio_keys_probe(struct platform_device *pdev)
599 __set_bit(EV_REP, input->evbit); 696 __set_bit(EV_REP, input->evbit);
600 697
601 for (i = 0; i < pdata->nbuttons; i++) { 698 for (i = 0; i < pdata->nbuttons; i++) {
602 struct gpio_keys_button *button = &pdata->buttons[i]; 699 const struct gpio_keys_button *button = &pdata->buttons[i];
603 struct gpio_button_data *bdata = &ddata->data[i]; 700 struct gpio_button_data *bdata = &ddata->data[i];
604 unsigned int type = button->type ?: EV_KEY;
605
606 bdata->input = input;
607 bdata->button = button;
608 701
609 error = gpio_keys_setup_key(pdev, bdata, button); 702 error = gpio_keys_setup_key(pdev, input, bdata, button);
610 if (error) 703 if (error)
611 goto fail2; 704 goto fail2;
612 705
613 if (button->wakeup) 706 if (button->wakeup)
614 wakeup = 1; 707 wakeup = 1;
615
616 input_set_capability(input, type, button->code);
617 } 708 }
618 709
619 error = sysfs_create_group(&pdev->dev.kobj, &gpio_keys_attr_group); 710 error = sysfs_create_group(&pdev->dev.kobj, &gpio_keys_attr_group);
@@ -630,9 +721,12 @@ static int __devinit gpio_keys_probe(struct platform_device *pdev)
630 goto fail3; 721 goto fail3;
631 } 722 }
632 723
633 /* get current state of buttons */ 724 /* get current state of buttons that are connected to GPIOs */
634 for (i = 0; i < pdata->nbuttons; i++) 725 for (i = 0; i < pdata->nbuttons; i++) {
635 gpio_keys_report_event(&ddata->data[i]); 726 struct gpio_button_data *bdata = &ddata->data[i];
727 if (gpio_is_valid(bdata->button->gpio))
728 gpio_keys_gpio_report_event(bdata);
729 }
636 input_sync(input); 730 input_sync(input);
637 731
638 device_init_wakeup(&pdev->dev, wakeup); 732 device_init_wakeup(&pdev->dev, wakeup);
@@ -642,13 +736,8 @@ static int __devinit gpio_keys_probe(struct platform_device *pdev)
642 fail3: 736 fail3:
643 sysfs_remove_group(&pdev->dev.kobj, &gpio_keys_attr_group); 737 sysfs_remove_group(&pdev->dev.kobj, &gpio_keys_attr_group);
644 fail2: 738 fail2:
645 while (--i >= 0) { 739 while (--i >= 0)
646 free_irq(gpio_to_irq(pdata->buttons[i].gpio), &ddata->data[i]); 740 gpio_remove_key(&ddata->data[i]);
647 if (ddata->data[i].timer_debounce)
648 del_timer_sync(&ddata->data[i].timer);
649 cancel_work_sync(&ddata->data[i].work);
650 gpio_free(pdata->buttons[i].gpio);
651 }
652 741
653 platform_set_drvdata(pdev, NULL); 742 platform_set_drvdata(pdev, NULL);
654 fail1: 743 fail1:
@@ -671,14 +760,8 @@ static int __devexit gpio_keys_remove(struct platform_device *pdev)
671 760
672 device_init_wakeup(&pdev->dev, 0); 761 device_init_wakeup(&pdev->dev, 0);
673 762
674 for (i = 0; i < ddata->n_buttons; i++) { 763 for (i = 0; i < ddata->n_buttons; i++)
675 int irq = gpio_to_irq(ddata->data[i].button->gpio); 764 gpio_remove_key(&ddata->data[i]);
676 free_irq(irq, &ddata->data[i]);
677 if (ddata->data[i].timer_debounce)
678 del_timer_sync(&ddata->data[i].timer);
679 cancel_work_sync(&ddata->data[i].work);
680 gpio_free(ddata->data[i].button->gpio);
681 }
682 765
683 input_unregister_device(input); 766 input_unregister_device(input);
684 767
@@ -703,11 +786,9 @@ static int gpio_keys_suspend(struct device *dev)
703 786
704 if (device_may_wakeup(dev)) { 787 if (device_may_wakeup(dev)) {
705 for (i = 0; i < ddata->n_buttons; i++) { 788 for (i = 0; i < ddata->n_buttons; i++) {
706 struct gpio_keys_button *button = ddata->data[i].button; 789 struct gpio_button_data *bdata = &ddata->data[i];
707 if (button->wakeup) { 790 if (bdata->button->wakeup)
708 int irq = gpio_to_irq(button->gpio); 791 enable_irq_wake(bdata->irq);
709 enable_irq_wake(irq);
710 }
711 } 792 }
712 } 793 }
713 794
@@ -720,14 +801,12 @@ static int gpio_keys_resume(struct device *dev)
720 int i; 801 int i;
721 802
722 for (i = 0; i < ddata->n_buttons; i++) { 803 for (i = 0; i < ddata->n_buttons; i++) {
804 struct gpio_button_data *bdata = &ddata->data[i];
805 if (bdata->button->wakeup && device_may_wakeup(dev))
806 disable_irq_wake(bdata->irq);
723 807
724 struct gpio_keys_button *button = ddata->data[i].button; 808 if (gpio_is_valid(bdata->button->gpio))
725 if (button->wakeup && device_may_wakeup(dev)) { 809 gpio_keys_gpio_report_event(bdata);
726 int irq = gpio_to_irq(button->gpio);
727 disable_irq_wake(irq);
728 }
729
730 gpio_keys_report_event(&ddata->data[i]);
731 } 810 }
732 input_sync(ddata->input); 811 input_sync(ddata->input);
733 812
diff --git a/drivers/input/keyboard/tegra-kbc.c b/drivers/input/keyboard/tegra-kbc.c
index 21c42f852343..fe4ac95ca6c8 100644
--- a/drivers/input/keyboard/tegra-kbc.c
+++ b/drivers/input/keyboard/tegra-kbc.c
@@ -630,6 +630,7 @@ tegra_kbc_dt_parse_pdata(struct platform_device *pdev)
630 if (!np) 630 if (!np)
631 return NULL; 631 return NULL;
632 632
633 pdata = kzalloc(sizeof(*pdata), GFP_KERNEL);
633 if (!pdata) 634 if (!pdata)
634 return NULL; 635 return NULL;
635 636
diff --git a/drivers/input/mouse/sentelic.c b/drivers/input/mouse/sentelic.c
index 2a77a52d2e62..a977bfaa6821 100644
--- a/drivers/input/mouse/sentelic.c
+++ b/drivers/input/mouse/sentelic.c
@@ -2,7 +2,7 @@
2 * Finger Sensing Pad PS/2 mouse driver. 2 * Finger Sensing Pad PS/2 mouse driver.
3 * 3 *
4 * Copyright (C) 2005-2007 Asia Vital Components Co., Ltd. 4 * Copyright (C) 2005-2007 Asia Vital Components Co., Ltd.
5 * Copyright (C) 2005-2011 Tai-hwa Liang, Sentelic Corporation. 5 * Copyright (C) 2005-2012 Tai-hwa Liang, Sentelic Corporation.
6 * 6 *
7 * This program is free software; you can redistribute it and/or 7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License 8 * modify it under the terms of the GNU General Public License
@@ -21,6 +21,7 @@
21 21
22#include <linux/module.h> 22#include <linux/module.h>
23#include <linux/input.h> 23#include <linux/input.h>
24#include <linux/input/mt.h>
24#include <linux/ctype.h> 25#include <linux/ctype.h>
25#include <linux/libps2.h> 26#include <linux/libps2.h>
26#include <linux/serio.h> 27#include <linux/serio.h>
@@ -36,6 +37,9 @@
36#define FSP_CMD_TIMEOUT 200 37#define FSP_CMD_TIMEOUT 200
37#define FSP_CMD_TIMEOUT2 30 38#define FSP_CMD_TIMEOUT2 30
38 39
40#define GET_ABS_X(packet) ((packet[1] << 2) | ((packet[3] >> 2) & 0x03))
41#define GET_ABS_Y(packet) ((packet[2] << 2) | (packet[3] & 0x03))
42
39/** Driver version. */ 43/** Driver version. */
40static const char fsp_drv_ver[] = "1.0.0-K"; 44static const char fsp_drv_ver[] = "1.0.0-K";
41 45
@@ -128,8 +132,9 @@ static int fsp_reg_read(struct psmouse *psmouse, int reg_addr, int *reg_val)
128 out: 132 out:
129 ps2_end_command(ps2dev); 133 ps2_end_command(ps2dev);
130 psmouse_activate(psmouse); 134 psmouse_activate(psmouse);
131 dev_dbg(&ps2dev->serio->dev, "READ REG: 0x%02x is 0x%02x (rc = %d)\n", 135 psmouse_dbg(psmouse,
132 reg_addr, *reg_val, rc); 136 "READ REG: 0x%02x is 0x%02x (rc = %d)\n",
137 reg_addr, *reg_val, rc);
133 return rc; 138 return rc;
134} 139}
135 140
@@ -179,8 +184,9 @@ static int fsp_reg_write(struct psmouse *psmouse, int reg_addr, int reg_val)
179 184
180 out: 185 out:
181 ps2_end_command(ps2dev); 186 ps2_end_command(ps2dev);
182 dev_dbg(&ps2dev->serio->dev, "WRITE REG: 0x%02x to 0x%02x (rc = %d)\n", 187 psmouse_dbg(psmouse,
183 reg_addr, reg_val, rc); 188 "WRITE REG: 0x%02x to 0x%02x (rc = %d)\n",
189 reg_addr, reg_val, rc);
184 return rc; 190 return rc;
185} 191}
186 192
@@ -237,8 +243,9 @@ static int fsp_page_reg_read(struct psmouse *psmouse, int *reg_val)
237 out: 243 out:
238 ps2_end_command(ps2dev); 244 ps2_end_command(ps2dev);
239 psmouse_activate(psmouse); 245 psmouse_activate(psmouse);
240 dev_dbg(&ps2dev->serio->dev, "READ PAGE REG: 0x%02x (rc = %d)\n", 246 psmouse_dbg(psmouse,
241 *reg_val, rc); 247 "READ PAGE REG: 0x%02x (rc = %d)\n",
248 *reg_val, rc);
242 return rc; 249 return rc;
243} 250}
244 251
@@ -274,8 +281,9 @@ static int fsp_page_reg_write(struct psmouse *psmouse, int reg_val)
274 281
275 out: 282 out:
276 ps2_end_command(ps2dev); 283 ps2_end_command(ps2dev);
277 dev_dbg(&ps2dev->serio->dev, "WRITE PAGE REG: to 0x%02x (rc = %d)\n", 284 psmouse_dbg(psmouse,
278 reg_val, rc); 285 "WRITE PAGE REG: to 0x%02x (rc = %d)\n",
286 reg_val, rc);
279 return rc; 287 return rc;
280} 288}
281 289
@@ -319,7 +327,7 @@ static int fsp_opc_tag_enable(struct psmouse *psmouse, bool enable)
319 int res = 0; 327 int res = 0;
320 328
321 if (fsp_reg_read(psmouse, FSP_REG_OPC_QDOWN, &v) == -1) { 329 if (fsp_reg_read(psmouse, FSP_REG_OPC_QDOWN, &v) == -1) {
322 dev_err(&psmouse->ps2dev.serio->dev, "Unable get OPC state.\n"); 330 psmouse_err(psmouse, "Unable get OPC state.\n");
323 return -EIO; 331 return -EIO;
324 } 332 }
325 333
@@ -336,8 +344,7 @@ static int fsp_opc_tag_enable(struct psmouse *psmouse, bool enable)
336 } 344 }
337 345
338 if (res != 0) { 346 if (res != 0) {
339 dev_err(&psmouse->ps2dev.serio->dev, 347 psmouse_err(psmouse, "Unable to enable OPC tag.\n");
340 "Unable to enable OPC tag.\n");
341 res = -EIO; 348 res = -EIO;
342 } 349 }
343 350
@@ -615,18 +622,40 @@ static struct attribute_group fsp_attribute_group = {
615 .attrs = fsp_attributes, 622 .attrs = fsp_attributes,
616}; 623};
617 624
618#ifdef FSP_DEBUG 625#ifdef FSP_DEBUG
619static void fsp_packet_debug(unsigned char packet[]) 626static void fsp_packet_debug(struct psmouse *psmouse, unsigned char packet[])
620{ 627{
621 static unsigned int ps2_packet_cnt; 628 static unsigned int ps2_packet_cnt;
622 static unsigned int ps2_last_second; 629 static unsigned int ps2_last_second;
623 unsigned int jiffies_msec; 630 unsigned int jiffies_msec;
631 const char *packet_type = "UNKNOWN";
632 unsigned short abs_x = 0, abs_y = 0;
633
634 /* Interpret & dump the packet data. */
635 switch (packet[0] >> FSP_PKT_TYPE_SHIFT) {
636 case FSP_PKT_TYPE_ABS:
637 packet_type = "Absolute";
638 abs_x = GET_ABS_X(packet);
639 abs_y = GET_ABS_Y(packet);
640 break;
641 case FSP_PKT_TYPE_NORMAL:
642 packet_type = "Normal";
643 break;
644 case FSP_PKT_TYPE_NOTIFY:
645 packet_type = "Notify";
646 break;
647 case FSP_PKT_TYPE_NORMAL_OPC:
648 packet_type = "Normal-OPC";
649 break;
650 }
624 651
625 ps2_packet_cnt++; 652 ps2_packet_cnt++;
626 jiffies_msec = jiffies_to_msecs(jiffies); 653 jiffies_msec = jiffies_to_msecs(jiffies);
627 psmouse_dbg(psmouse, 654 psmouse_dbg(psmouse,
628 "%08dms PS/2 packets: %02x, %02x, %02x, %02x\n", 655 "%08dms %s packets: %02x, %02x, %02x, %02x; "
629 jiffies_msec, packet[0], packet[1], packet[2], packet[3]); 656 "abs_x: %d, abs_y: %d\n",
657 jiffies_msec, packet_type,
658 packet[0], packet[1], packet[2], packet[3], abs_x, abs_y);
630 659
631 if (jiffies_msec - ps2_last_second > 1000) { 660 if (jiffies_msec - ps2_last_second > 1000) {
632 psmouse_dbg(psmouse, "PS/2 packets/sec = %d\n", ps2_packet_cnt); 661 psmouse_dbg(psmouse, "PS/2 packets/sec = %d\n", ps2_packet_cnt);
@@ -635,17 +664,29 @@ static void fsp_packet_debug(unsigned char packet[])
635 } 664 }
636} 665}
637#else 666#else
638static void fsp_packet_debug(unsigned char packet[]) 667static void fsp_packet_debug(struct psmouse *psmouse, unsigned char packet[])
639{ 668{
640} 669}
641#endif 670#endif
642 671
672static void fsp_set_slot(struct input_dev *dev, int slot, bool active,
673 unsigned int x, unsigned int y)
674{
675 input_mt_slot(dev, slot);
676 input_mt_report_slot_state(dev, MT_TOOL_FINGER, active);
677 if (active) {
678 input_report_abs(dev, ABS_MT_POSITION_X, x);
679 input_report_abs(dev, ABS_MT_POSITION_Y, y);
680 }
681}
682
643static psmouse_ret_t fsp_process_byte(struct psmouse *psmouse) 683static psmouse_ret_t fsp_process_byte(struct psmouse *psmouse)
644{ 684{
645 struct input_dev *dev = psmouse->dev; 685 struct input_dev *dev = psmouse->dev;
646 struct fsp_data *ad = psmouse->private; 686 struct fsp_data *ad = psmouse->private;
647 unsigned char *packet = psmouse->packet; 687 unsigned char *packet = psmouse->packet;
648 unsigned char button_status = 0, lscroll = 0, rscroll = 0; 688 unsigned char button_status = 0, lscroll = 0, rscroll = 0;
689 unsigned short abs_x, abs_y, fgrs = 0;
649 int rel_x, rel_y; 690 int rel_x, rel_y;
650 691
651 if (psmouse->pktcnt < 4) 692 if (psmouse->pktcnt < 4)
@@ -655,16 +696,76 @@ static psmouse_ret_t fsp_process_byte(struct psmouse *psmouse)
655 * Full packet accumulated, process it 696 * Full packet accumulated, process it
656 */ 697 */
657 698
699 fsp_packet_debug(psmouse, packet);
700
658 switch (psmouse->packet[0] >> FSP_PKT_TYPE_SHIFT) { 701 switch (psmouse->packet[0] >> FSP_PKT_TYPE_SHIFT) {
659 case FSP_PKT_TYPE_ABS: 702 case FSP_PKT_TYPE_ABS:
660 dev_warn(&psmouse->ps2dev.serio->dev, 703 abs_x = GET_ABS_X(packet);
661 "Unexpected absolute mode packet, ignored.\n"); 704 abs_y = GET_ABS_Y(packet);
705
706 if (packet[0] & FSP_PB0_MFMC) {
707 /*
708 * MFMC packet: assume that there are two fingers on
709 * pad
710 */
711 fgrs = 2;
712
713 /* MFMC packet */
714 if (packet[0] & FSP_PB0_MFMC_FGR2) {
715 /* 2nd finger */
716 if (ad->last_mt_fgr == 2) {
717 /*
718 * workaround for buggy firmware
719 * which doesn't clear MFMC bit if
720 * the 1st finger is up
721 */
722 fgrs = 1;
723 fsp_set_slot(dev, 0, false, 0, 0);
724 }
725 ad->last_mt_fgr = 2;
726
727 fsp_set_slot(dev, 1, fgrs == 2, abs_x, abs_y);
728 } else {
729 /* 1st finger */
730 if (ad->last_mt_fgr == 1) {
731 /*
732 * workaround for buggy firmware
733 * which doesn't clear MFMC bit if
734 * the 2nd finger is up
735 */
736 fgrs = 1;
737 fsp_set_slot(dev, 1, false, 0, 0);
738 }
739 ad->last_mt_fgr = 1;
740 fsp_set_slot(dev, 0, fgrs != 0, abs_x, abs_y);
741 }
742 } else {
743 /* SFAC packet */
744
745 /* no multi-finger information */
746 ad->last_mt_fgr = 0;
747
748 if (abs_x != 0 && abs_y != 0)
749 fgrs = 1;
750
751 fsp_set_slot(dev, 0, fgrs > 0, abs_x, abs_y);
752 fsp_set_slot(dev, 1, false, 0, 0);
753 }
754 if (fgrs > 0) {
755 input_report_abs(dev, ABS_X, abs_x);
756 input_report_abs(dev, ABS_Y, abs_y);
757 }
758 input_report_key(dev, BTN_LEFT, packet[0] & 0x01);
759 input_report_key(dev, BTN_RIGHT, packet[0] & 0x02);
760 input_report_key(dev, BTN_TOUCH, fgrs);
761 input_report_key(dev, BTN_TOOL_FINGER, fgrs == 1);
762 input_report_key(dev, BTN_TOOL_DOUBLETAP, fgrs == 2);
662 break; 763 break;
663 764
664 case FSP_PKT_TYPE_NORMAL_OPC: 765 case FSP_PKT_TYPE_NORMAL_OPC:
665 /* on-pad click, filter it if necessary */ 766 /* on-pad click, filter it if necessary */
666 if ((ad->flags & FSPDRV_FLAG_EN_OPC) != FSPDRV_FLAG_EN_OPC) 767 if ((ad->flags & FSPDRV_FLAG_EN_OPC) != FSPDRV_FLAG_EN_OPC)
667 packet[0] &= ~BIT(0); 768 packet[0] &= ~FSP_PB0_LBTN;
668 /* fall through */ 769 /* fall through */
669 770
670 case FSP_PKT_TYPE_NORMAL: 771 case FSP_PKT_TYPE_NORMAL:
@@ -711,8 +812,6 @@ static psmouse_ret_t fsp_process_byte(struct psmouse *psmouse)
711 812
712 input_sync(dev); 813 input_sync(dev);
713 814
714 fsp_packet_debug(packet);
715
716 return PSMOUSE_FULL_PACKET; 815 return PSMOUSE_FULL_PACKET;
717} 816}
718 817
@@ -736,42 +835,106 @@ static int fsp_activate_protocol(struct psmouse *psmouse)
736 835
737 ps2_command(ps2dev, param, PSMOUSE_CMD_GETID); 836 ps2_command(ps2dev, param, PSMOUSE_CMD_GETID);
738 if (param[0] != 0x04) { 837 if (param[0] != 0x04) {
739 dev_err(&psmouse->ps2dev.serio->dev, 838 psmouse_err(psmouse,
740 "Unable to enable 4 bytes packet format.\n"); 839 "Unable to enable 4 bytes packet format.\n");
741 return -EIO; 840 return -EIO;
742 } 841 }
743 842
744 if (fsp_reg_read(psmouse, FSP_REG_SYSCTL5, &val)) { 843 if (pad->ver < FSP_VER_STL3888_C0) {
745 dev_err(&psmouse->ps2dev.serio->dev, 844 /* Preparing relative coordinates output for older hardware */
746 "Unable to read SYSCTL5 register.\n"); 845 if (fsp_reg_read(psmouse, FSP_REG_SYSCTL5, &val)) {
747 return -EIO; 846 psmouse_err(psmouse,
748 } 847 "Unable to read SYSCTL5 register.\n");
848 return -EIO;
849 }
749 850
750 val &= ~(FSP_BIT_EN_MSID7 | FSP_BIT_EN_MSID8 | FSP_BIT_EN_AUTO_MSID8); 851 if (fsp_get_buttons(psmouse, &pad->buttons)) {
751 /* Ensure we are not in absolute mode */ 852 psmouse_err(psmouse,
752 val &= ~FSP_BIT_EN_PKT_G0; 853 "Unable to retrieve number of buttons.\n");
753 if (pad->buttons == 0x06) { 854 return -EIO;
754 /* Left/Middle/Right & Scroll Up/Down/Right/Left */ 855 }
755 val |= FSP_BIT_EN_MSID6;
756 }
757 856
758 if (fsp_reg_write(psmouse, FSP_REG_SYSCTL5, val)) { 857 val &= ~(FSP_BIT_EN_MSID7 | FSP_BIT_EN_MSID8 | FSP_BIT_EN_AUTO_MSID8);
759 dev_err(&psmouse->ps2dev.serio->dev, 858 /* Ensure we are not in absolute mode */
760 "Unable to set up required mode bits.\n"); 859 val &= ~FSP_BIT_EN_PKT_G0;
761 return -EIO; 860 if (pad->buttons == 0x06) {
861 /* Left/Middle/Right & Scroll Up/Down/Right/Left */
862 val |= FSP_BIT_EN_MSID6;
863 }
864
865 if (fsp_reg_write(psmouse, FSP_REG_SYSCTL5, val)) {
866 psmouse_err(psmouse,
867 "Unable to set up required mode bits.\n");
868 return -EIO;
869 }
870
871 /*
872 * Enable OPC tags such that driver can tell the difference
873 * between on-pad and real button click
874 */
875 if (fsp_opc_tag_enable(psmouse, true))
876 psmouse_warn(psmouse,
877 "Failed to enable OPC tag mode.\n");
878 /* enable on-pad click by default */
879 pad->flags |= FSPDRV_FLAG_EN_OPC;
880
881 /* Enable on-pad vertical and horizontal scrolling */
882 fsp_onpad_vscr(psmouse, true);
883 fsp_onpad_hscr(psmouse, true);
884 } else {
885 /* Enable absolute coordinates output for Cx/Dx hardware */
886 if (fsp_reg_write(psmouse, FSP_REG_SWC1,
887 FSP_BIT_SWC1_EN_ABS_1F |
888 FSP_BIT_SWC1_EN_ABS_2F |
889 FSP_BIT_SWC1_EN_FUP_OUT |
890 FSP_BIT_SWC1_EN_ABS_CON)) {
891 psmouse_err(psmouse,
892 "Unable to enable absolute coordinates output.\n");
893 return -EIO;
894 }
762 } 895 }
763 896
764 /* 897 return 0;
765 * Enable OPC tags such that driver can tell the difference between 898}
766 * on-pad and real button click
767 */
768 if (fsp_opc_tag_enable(psmouse, true))
769 dev_warn(&psmouse->ps2dev.serio->dev,
770 "Failed to enable OPC tag mode.\n");
771 899
772 /* Enable on-pad vertical and horizontal scrolling */ 900static int fsp_set_input_params(struct psmouse *psmouse)
773 fsp_onpad_vscr(psmouse, true); 901{
774 fsp_onpad_hscr(psmouse, true); 902 struct input_dev *dev = psmouse->dev;
903 struct fsp_data *pad = psmouse->private;
904
905 if (pad->ver < FSP_VER_STL3888_C0) {
906 __set_bit(BTN_MIDDLE, dev->keybit);
907 __set_bit(BTN_BACK, dev->keybit);
908 __set_bit(BTN_FORWARD, dev->keybit);
909 __set_bit(REL_WHEEL, dev->relbit);
910 __set_bit(REL_HWHEEL, dev->relbit);
911 } else {
912 /*
913 * Hardware prior to Cx performs much better in relative mode;
914 * hence, only enable absolute coordinates output as well as
915 * multi-touch output for the newer hardware.
916 *
917 * Maximum coordinates can be computed as:
918 *
919 * number of scanlines * 64 - 57
920 *
921 * where number of X/Y scanline lines are 16/12.
922 */
923 int abs_x = 967, abs_y = 711;
924
925 __set_bit(EV_ABS, dev->evbit);
926 __clear_bit(EV_REL, dev->evbit);
927 __set_bit(BTN_TOUCH, dev->keybit);
928 __set_bit(BTN_TOOL_FINGER, dev->keybit);
929 __set_bit(BTN_TOOL_DOUBLETAP, dev->keybit);
930 __set_bit(INPUT_PROP_SEMI_MT, dev->propbit);
931
932 input_set_abs_params(dev, ABS_X, 0, abs_x, 0, 0);
933 input_set_abs_params(dev, ABS_Y, 0, abs_y, 0, 0);
934 input_mt_init_slots(dev, 2);
935 input_set_abs_params(dev, ABS_MT_POSITION_X, 0, abs_x, 0, 0);
936 input_set_abs_params(dev, ABS_MT_POSITION_Y, 0, abs_y, 0, 0);
937 }
775 938
776 return 0; 939 return 0;
777} 940}
@@ -829,18 +992,16 @@ static int fsp_reconnect(struct psmouse *psmouse)
829int fsp_init(struct psmouse *psmouse) 992int fsp_init(struct psmouse *psmouse)
830{ 993{
831 struct fsp_data *priv; 994 struct fsp_data *priv;
832 int ver, rev, buttons; 995 int ver, rev;
833 int error; 996 int error;
834 997
835 if (fsp_get_version(psmouse, &ver) || 998 if (fsp_get_version(psmouse, &ver) ||
836 fsp_get_revision(psmouse, &rev) || 999 fsp_get_revision(psmouse, &rev)) {
837 fsp_get_buttons(psmouse, &buttons)) {
838 return -ENODEV; 1000 return -ENODEV;
839 } 1001 }
840 1002
841 psmouse_info(psmouse, 1003 psmouse_info(psmouse, "Finger Sensing Pad, hw: %d.%d.%d, sw: %s\n",
842 "Finger Sensing Pad, hw: %d.%d.%d, sw: %s, buttons: %d\n", 1004 ver >> 4, ver & 0x0F, rev, fsp_drv_ver);
843 ver >> 4, ver & 0x0F, rev, fsp_drv_ver, buttons & 7);
844 1005
845 psmouse->private = priv = kzalloc(sizeof(struct fsp_data), GFP_KERNEL); 1006 psmouse->private = priv = kzalloc(sizeof(struct fsp_data), GFP_KERNEL);
846 if (!priv) 1007 if (!priv)
@@ -848,17 +1009,6 @@ int fsp_init(struct psmouse *psmouse)
848 1009
849 priv->ver = ver; 1010 priv->ver = ver;
850 priv->rev = rev; 1011 priv->rev = rev;
851 priv->buttons = buttons;
852
853 /* enable on-pad click by default */
854 priv->flags |= FSPDRV_FLAG_EN_OPC;
855
856 /* Set up various supported input event bits */
857 __set_bit(BTN_MIDDLE, psmouse->dev->keybit);
858 __set_bit(BTN_BACK, psmouse->dev->keybit);
859 __set_bit(BTN_FORWARD, psmouse->dev->keybit);
860 __set_bit(REL_WHEEL, psmouse->dev->relbit);
861 __set_bit(REL_HWHEEL, psmouse->dev->relbit);
862 1012
863 psmouse->protocol_handler = fsp_process_byte; 1013 psmouse->protocol_handler = fsp_process_byte;
864 psmouse->disconnect = fsp_disconnect; 1014 psmouse->disconnect = fsp_disconnect;
@@ -866,16 +1016,20 @@ int fsp_init(struct psmouse *psmouse)
866 psmouse->cleanup = fsp_reset; 1016 psmouse->cleanup = fsp_reset;
867 psmouse->pktsize = 4; 1017 psmouse->pktsize = 4;
868 1018
869 /* set default packet output based on number of buttons we found */
870 error = fsp_activate_protocol(psmouse); 1019 error = fsp_activate_protocol(psmouse);
871 if (error) 1020 if (error)
872 goto err_out; 1021 goto err_out;
873 1022
1023 /* Set up various supported input event bits */
1024 error = fsp_set_input_params(psmouse);
1025 if (error)
1026 goto err_out;
1027
874 error = sysfs_create_group(&psmouse->ps2dev.serio->dev.kobj, 1028 error = sysfs_create_group(&psmouse->ps2dev.serio->dev.kobj,
875 &fsp_attribute_group); 1029 &fsp_attribute_group);
876 if (error) { 1030 if (error) {
877 dev_err(&psmouse->ps2dev.serio->dev, 1031 psmouse_err(psmouse,
878 "Failed to create sysfs attributes (%d)", error); 1032 "Failed to create sysfs attributes (%d)", error);
879 goto err_out; 1033 goto err_out;
880 } 1034 }
881 1035
diff --git a/drivers/input/mouse/sentelic.h b/drivers/input/mouse/sentelic.h
index 2e4af24f8c15..334de19e5ddb 100644
--- a/drivers/input/mouse/sentelic.h
+++ b/drivers/input/mouse/sentelic.h
@@ -2,7 +2,7 @@
2 * Finger Sensing Pad PS/2 mouse driver. 2 * Finger Sensing Pad PS/2 mouse driver.
3 * 3 *
4 * Copyright (C) 2005-2007 Asia Vital Components Co., Ltd. 4 * Copyright (C) 2005-2007 Asia Vital Components Co., Ltd.
5 * Copyright (C) 2005-2011 Tai-hwa Liang, Sentelic Corporation. 5 * Copyright (C) 2005-2012 Tai-hwa Liang, Sentelic Corporation.
6 * 6 *
7 * This program is free software; you can redistribute it and/or 7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License 8 * modify it under the terms of the GNU General Public License
@@ -55,6 +55,16 @@
55#define FSP_BIT_FIX_HSCR BIT(5) 55#define FSP_BIT_FIX_HSCR BIT(5)
56#define FSP_BIT_DRAG_LOCK BIT(6) 56#define FSP_BIT_DRAG_LOCK BIT(6)
57 57
58#define FSP_REG_SWC1 (0x90)
59#define FSP_BIT_SWC1_EN_ABS_1F BIT(0)
60#define FSP_BIT_SWC1_EN_GID BIT(1)
61#define FSP_BIT_SWC1_EN_ABS_2F BIT(2)
62#define FSP_BIT_SWC1_EN_FUP_OUT BIT(3)
63#define FSP_BIT_SWC1_EN_ABS_CON BIT(4)
64#define FSP_BIT_SWC1_GST_GRP0 BIT(5)
65#define FSP_BIT_SWC1_GST_GRP1 BIT(6)
66#define FSP_BIT_SWC1_BX_COMPAT BIT(7)
67
58/* Finger-sensing Pad packet formating related definitions */ 68/* Finger-sensing Pad packet formating related definitions */
59 69
60/* absolute packet type */ 70/* absolute packet type */
@@ -64,12 +74,32 @@
64#define FSP_PKT_TYPE_NORMAL_OPC (0x03) 74#define FSP_PKT_TYPE_NORMAL_OPC (0x03)
65#define FSP_PKT_TYPE_SHIFT (6) 75#define FSP_PKT_TYPE_SHIFT (6)
66 76
77/* bit definitions for the first byte of report packet */
78#define FSP_PB0_LBTN BIT(0)
79#define FSP_PB0_RBTN BIT(1)
80#define FSP_PB0_MBTN BIT(2)
81#define FSP_PB0_MFMC_FGR2 FSP_PB0_MBTN
82#define FSP_PB0_MUST_SET BIT(3)
83#define FSP_PB0_PHY_BTN BIT(4)
84#define FSP_PB0_MFMC BIT(5)
85
86/* hardware revisions */
87#define FSP_VER_STL3888_A4 (0xC1)
88#define FSP_VER_STL3888_B0 (0xD0)
89#define FSP_VER_STL3888_B1 (0xD1)
90#define FSP_VER_STL3888_B2 (0xD2)
91#define FSP_VER_STL3888_C0 (0xE0)
92#define FSP_VER_STL3888_C1 (0xE1)
93#define FSP_VER_STL3888_D0 (0xE2)
94#define FSP_VER_STL3888_D1 (0xE3)
95#define FSP_VER_STL3888_E0 (0xE4)
96
67#ifdef __KERNEL__ 97#ifdef __KERNEL__
68 98
69struct fsp_data { 99struct fsp_data {
70 unsigned char ver; /* hardware version */ 100 unsigned char ver; /* hardware version */
71 unsigned char rev; /* hardware revison */ 101 unsigned char rev; /* hardware revison */
72 unsigned char buttons; /* Number of buttons */ 102 unsigned int buttons; /* Number of buttons */
73 unsigned int flags; 103 unsigned int flags;
74#define FSPDRV_FLAG_EN_OPC (0x001) /* enable on-pad clicking */ 104#define FSPDRV_FLAG_EN_OPC (0x001) /* enable on-pad clicking */
75 105
@@ -78,6 +108,7 @@ struct fsp_data {
78 108
79 unsigned char last_reg; /* Last register we requested read from */ 109 unsigned char last_reg; /* Last register we requested read from */
80 unsigned char last_val; 110 unsigned char last_val;
111 unsigned int last_mt_fgr; /* Last seen finger(multitouch) */
81}; 112};
82 113
83#ifdef CONFIG_MOUSE_PS2_SENTELIC 114#ifdef CONFIG_MOUSE_PS2_SENTELIC
diff --git a/drivers/input/serio/ams_delta_serio.c b/drivers/input/serio/ams_delta_serio.c
index bd5b10eeeb40..f5fbdf94de3b 100644
--- a/drivers/input/serio/ams_delta_serio.c
+++ b/drivers/input/serio/ams_delta_serio.c
@@ -184,7 +184,7 @@ module_init(ams_delta_serio_init);
184static void __exit ams_delta_serio_exit(void) 184static void __exit ams_delta_serio_exit(void)
185{ 185{
186 serio_unregister_port(ams_delta_serio); 186 serio_unregister_port(ams_delta_serio);
187 free_irq(OMAP_GPIO_IRQ(AMS_DELTA_GPIO_PIN_KEYBRD_CLK), 0); 187 free_irq(gpio_to_irq(AMS_DELTA_GPIO_PIN_KEYBRD_CLK), 0);
188 gpio_free_array(ams_delta_gpios, 188 gpio_free_array(ams_delta_gpios,
189 ARRAY_SIZE(ams_delta_gpios)); 189 ARRAY_SIZE(ams_delta_gpios));
190} 190}
diff --git a/drivers/input/tablet/Kconfig b/drivers/input/tablet/Kconfig
index e53f4081a586..bed7cbf84cfd 100644
--- a/drivers/input/tablet/Kconfig
+++ b/drivers/input/tablet/Kconfig
@@ -76,6 +76,7 @@ config TABLET_USB_KBTAB
76config TABLET_USB_WACOM 76config TABLET_USB_WACOM
77 tristate "Wacom Intuos/Graphire tablet support (USB)" 77 tristate "Wacom Intuos/Graphire tablet support (USB)"
78 depends on USB_ARCH_HAS_HCD 78 depends on USB_ARCH_HAS_HCD
79 select POWER_SUPPLY
79 select USB 80 select USB
80 select NEW_LEDS 81 select NEW_LEDS
81 select LEDS_CLASS 82 select LEDS_CLASS
diff --git a/drivers/input/tablet/wacom.h b/drivers/input/tablet/wacom.h
index 0783864a7dc2..b4842d0e61dd 100644
--- a/drivers/input/tablet/wacom.h
+++ b/drivers/input/tablet/wacom.h
@@ -88,6 +88,7 @@
88#include <linux/mod_devicetable.h> 88#include <linux/mod_devicetable.h>
89#include <linux/init.h> 89#include <linux/init.h>
90#include <linux/usb/input.h> 90#include <linux/usb/input.h>
91#include <linux/power_supply.h>
91#include <asm/unaligned.h> 92#include <asm/unaligned.h>
92 93
93/* 94/*
@@ -112,6 +113,7 @@ struct wacom {
112 struct urb *irq; 113 struct urb *irq;
113 struct wacom_wac wacom_wac; 114 struct wacom_wac wacom_wac;
114 struct mutex lock; 115 struct mutex lock;
116 struct work_struct work;
115 bool open; 117 bool open;
116 char phys[32]; 118 char phys[32];
117 struct wacom_led { 119 struct wacom_led {
@@ -120,8 +122,15 @@ struct wacom {
120 u8 hlv; /* status led brightness button pressed (1..127) */ 122 u8 hlv; /* status led brightness button pressed (1..127) */
121 u8 img_lum; /* OLED matrix display brightness */ 123 u8 img_lum; /* OLED matrix display brightness */
122 } led; 124 } led;
125 struct power_supply battery;
123}; 126};
124 127
128static inline void wacom_schedule_work(struct wacom_wac *wacom_wac)
129{
130 struct wacom *wacom = container_of(wacom_wac, struct wacom, wacom_wac);
131 schedule_work(&wacom->work);
132}
133
125extern const struct usb_device_id wacom_ids[]; 134extern const struct usb_device_id wacom_ids[];
126 135
127void wacom_wac_irq(struct wacom_wac *wacom_wac, size_t len); 136void wacom_wac_irq(struct wacom_wac *wacom_wac, size_t len);
diff --git a/drivers/input/tablet/wacom_sys.c b/drivers/input/tablet/wacom_sys.c
index ca28066dc81e..0d269212931e 100644
--- a/drivers/input/tablet/wacom_sys.c
+++ b/drivers/input/tablet/wacom_sys.c
@@ -167,6 +167,19 @@ static void wacom_close(struct input_dev *dev)
167 usb_autopm_put_interface(wacom->intf); 167 usb_autopm_put_interface(wacom->intf);
168} 168}
169 169
170/*
171 * Static values for max X/Y and resolution of Pen interface is stored in
172 * features. This mean physical size of active area can be computed.
173 * This is useful to do when Pen and Touch have same active area of tablet.
174 * This means for Touch device, we only need to find max X/Y value and we
175 * have enough information to compute resolution of touch.
176 */
177static void wacom_set_phy_from_res(struct wacom_features *features)
178{
179 features->x_phy = (features->x_max * 100) / features->x_resolution;
180 features->y_phy = (features->y_max * 100) / features->y_resolution;
181}
182
170static int wacom_parse_logical_collection(unsigned char *report, 183static int wacom_parse_logical_collection(unsigned char *report,
171 struct wacom_features *features) 184 struct wacom_features *features)
172{ 185{
@@ -178,15 +191,7 @@ static int wacom_parse_logical_collection(unsigned char *report,
178 features->pktlen = WACOM_PKGLEN_BBTOUCH3; 191 features->pktlen = WACOM_PKGLEN_BBTOUCH3;
179 features->device_type = BTN_TOOL_FINGER; 192 features->device_type = BTN_TOOL_FINGER;
180 193
181 /* 194 wacom_set_phy_from_res(features);
182 * Stylus and Touch have same active area
183 * so compute physical size based on stylus
184 * data before its overwritten.
185 */
186 features->x_phy =
187 (features->x_max * 100) / features->x_resolution;
188 features->y_phy =
189 (features->y_max * 100) / features->y_resolution;
190 195
191 features->x_max = features->y_max = 196 features->x_max = features->y_max =
192 get_unaligned_le16(&report[10]); 197 get_unaligned_le16(&report[10]);
@@ -422,6 +427,7 @@ static int wacom_query_tablet_data(struct usb_interface *intf, struct wacom_feat
422 report_id, rep_data, 4, 1); 427 report_id, rep_data, 4, 1);
423 } while ((error < 0 || rep_data[1] != 4) && limit++ < WAC_MSG_RETRIES); 428 } while ((error < 0 || rep_data[1] != 4) && limit++ < WAC_MSG_RETRIES);
424 } else if (features->type != TABLETPC && 429 } else if (features->type != TABLETPC &&
430 features->type != WIRELESS &&
425 features->device_type == BTN_TOOL_PEN) { 431 features->device_type == BTN_TOOL_PEN) {
426 do { 432 do {
427 rep_data[0] = 2; 433 rep_data[0] = 2;
@@ -454,6 +460,21 @@ static int wacom_retrieve_hid_descriptor(struct usb_interface *intf,
454 features->pressure_fuzz = 0; 460 features->pressure_fuzz = 0;
455 features->distance_fuzz = 0; 461 features->distance_fuzz = 0;
456 462
463 /*
464 * The wireless device HID is basic and layout conflicts with
465 * other tablets (monitor and touch interface can look like pen).
466 * Skip the query for this type and modify defaults based on
467 * interface number.
468 */
469 if (features->type == WIRELESS) {
470 if (intf->cur_altsetting->desc.bInterfaceNumber == 0) {
471 features->device_type = 0;
472 } else if (intf->cur_altsetting->desc.bInterfaceNumber == 2) {
473 features->device_type = BTN_TOOL_DOUBLETAP;
474 features->pktlen = WACOM_PKGLEN_BBTOUCH3;
475 }
476 }
477
457 /* only Tablet PCs and Bamboo P&T need to retrieve the info */ 478 /* only Tablet PCs and Bamboo P&T need to retrieve the info */
458 if ((features->type != TABLETPC) && (features->type != TABLETPC2FG) && 479 if ((features->type != TABLETPC) && (features->type != TABLETPC2FG) &&
459 (features->type != BAMBOO_PT)) 480 (features->type != BAMBOO_PT))
@@ -822,6 +843,152 @@ static void wacom_destroy_leds(struct wacom *wacom)
822 } 843 }
823} 844}
824 845
846static enum power_supply_property wacom_battery_props[] = {
847 POWER_SUPPLY_PROP_CAPACITY
848};
849
850static int wacom_battery_get_property(struct power_supply *psy,
851 enum power_supply_property psp,
852 union power_supply_propval *val)
853{
854 struct wacom *wacom = container_of(psy, struct wacom, battery);
855 int ret = 0;
856
857 switch (psp) {
858 case POWER_SUPPLY_PROP_CAPACITY:
859 val->intval =
860 wacom->wacom_wac.battery_capacity * 100 / 31;
861 break;
862 default:
863 ret = -EINVAL;
864 break;
865 }
866
867 return ret;
868}
869
870static int wacom_initialize_battery(struct wacom *wacom)
871{
872 int error = 0;
873
874 if (wacom->wacom_wac.features.quirks & WACOM_QUIRK_MONITOR) {
875 wacom->battery.properties = wacom_battery_props;
876 wacom->battery.num_properties = ARRAY_SIZE(wacom_battery_props);
877 wacom->battery.get_property = wacom_battery_get_property;
878 wacom->battery.name = "wacom_battery";
879 wacom->battery.type = POWER_SUPPLY_TYPE_BATTERY;
880 wacom->battery.use_for_apm = 0;
881
882 error = power_supply_register(&wacom->usbdev->dev,
883 &wacom->battery);
884 }
885
886 return error;
887}
888
889static void wacom_destroy_battery(struct wacom *wacom)
890{
891 if (wacom->wacom_wac.features.quirks & WACOM_QUIRK_MONITOR)
892 power_supply_unregister(&wacom->battery);
893}
894
895static int wacom_register_input(struct wacom *wacom)
896{
897 struct input_dev *input_dev;
898 struct usb_interface *intf = wacom->intf;
899 struct usb_device *dev = interface_to_usbdev(intf);
900 struct wacom_wac *wacom_wac = &(wacom->wacom_wac);
901 int error;
902
903 input_dev = input_allocate_device();
904 if (!input_dev)
905 return -ENOMEM;
906
907 input_dev->name = wacom_wac->name;
908 input_dev->dev.parent = &intf->dev;
909 input_dev->open = wacom_open;
910 input_dev->close = wacom_close;
911 usb_to_input_id(dev, &input_dev->id);
912 input_set_drvdata(input_dev, wacom);
913
914 wacom_wac->input = input_dev;
915 wacom_setup_input_capabilities(input_dev, wacom_wac);
916
917 error = input_register_device(input_dev);
918 if (error) {
919 input_free_device(input_dev);
920 wacom_wac->input = NULL;
921 }
922
923 return error;
924}
925
926static void wacom_wireless_work(struct work_struct *work)
927{
928 struct wacom *wacom = container_of(work, struct wacom, work);
929 struct usb_device *usbdev = wacom->usbdev;
930 struct wacom_wac *wacom_wac = &wacom->wacom_wac;
931
932 /*
933 * Regardless if this is a disconnect or a new tablet,
934 * remove any existing input devices.
935 */
936
937 /* Stylus interface */
938 wacom = usb_get_intfdata(usbdev->config->interface[1]);
939 if (wacom->wacom_wac.input)
940 input_unregister_device(wacom->wacom_wac.input);
941 wacom->wacom_wac.input = 0;
942
943 /* Touch interface */
944 wacom = usb_get_intfdata(usbdev->config->interface[2]);
945 if (wacom->wacom_wac.input)
946 input_unregister_device(wacom->wacom_wac.input);
947 wacom->wacom_wac.input = 0;
948
949 if (wacom_wac->pid == 0) {
950 printk(KERN_INFO "wacom: wireless tablet disconnected\n");
951 } else {
952 const struct usb_device_id *id = wacom_ids;
953
954 printk(KERN_INFO
955 "wacom: wireless tablet connected with PID %x\n",
956 wacom_wac->pid);
957
958 while (id->match_flags) {
959 if (id->idVendor == USB_VENDOR_ID_WACOM &&
960 id->idProduct == wacom_wac->pid)
961 break;
962 id++;
963 }
964
965 if (!id->match_flags) {
966 printk(KERN_INFO
967 "wacom: ignorning unknown PID.\n");
968 return;
969 }
970
971 /* Stylus interface */
972 wacom = usb_get_intfdata(usbdev->config->interface[1]);
973 wacom_wac = &wacom->wacom_wac;
974 wacom_wac->features =
975 *((struct wacom_features *)id->driver_info);
976 wacom_wac->features.device_type = BTN_TOOL_PEN;
977 wacom_register_input(wacom);
978
979 /* Touch interface */
980 wacom = usb_get_intfdata(usbdev->config->interface[2]);
981 wacom_wac = &wacom->wacom_wac;
982 wacom_wac->features =
983 *((struct wacom_features *)id->driver_info);
984 wacom_wac->features.pktlen = WACOM_PKGLEN_BBTOUCH3;
985 wacom_wac->features.device_type = BTN_TOOL_FINGER;
986 wacom_set_phy_from_res(&wacom_wac->features);
987 wacom_wac->features.x_max = wacom_wac->features.y_max = 4096;
988 wacom_register_input(wacom);
989 }
990}
991
825static int wacom_probe(struct usb_interface *intf, const struct usb_device_id *id) 992static int wacom_probe(struct usb_interface *intf, const struct usb_device_id *id)
826{ 993{
827 struct usb_device *dev = interface_to_usbdev(intf); 994 struct usb_device *dev = interface_to_usbdev(intf);
@@ -829,18 +996,14 @@ static int wacom_probe(struct usb_interface *intf, const struct usb_device_id *i
829 struct wacom *wacom; 996 struct wacom *wacom;
830 struct wacom_wac *wacom_wac; 997 struct wacom_wac *wacom_wac;
831 struct wacom_features *features; 998 struct wacom_features *features;
832 struct input_dev *input_dev;
833 int error; 999 int error;
834 1000
835 if (!id->driver_info) 1001 if (!id->driver_info)
836 return -EINVAL; 1002 return -EINVAL;
837 1003
838 wacom = kzalloc(sizeof(struct wacom), GFP_KERNEL); 1004 wacom = kzalloc(sizeof(struct wacom), GFP_KERNEL);
839 input_dev = input_allocate_device(); 1005 if (!wacom)
840 if (!wacom || !input_dev) { 1006 return -ENOMEM;
841 error = -ENOMEM;
842 goto fail1;
843 }
844 1007
845 wacom_wac = &wacom->wacom_wac; 1008 wacom_wac = &wacom->wacom_wac;
846 wacom_wac->features = *((struct wacom_features *)id->driver_info); 1009 wacom_wac->features = *((struct wacom_features *)id->driver_info);
@@ -866,11 +1029,10 @@ static int wacom_probe(struct usb_interface *intf, const struct usb_device_id *i
866 wacom->usbdev = dev; 1029 wacom->usbdev = dev;
867 wacom->intf = intf; 1030 wacom->intf = intf;
868 mutex_init(&wacom->lock); 1031 mutex_init(&wacom->lock);
1032 INIT_WORK(&wacom->work, wacom_wireless_work);
869 usb_make_path(dev, wacom->phys, sizeof(wacom->phys)); 1033 usb_make_path(dev, wacom->phys, sizeof(wacom->phys));
870 strlcat(wacom->phys, "/input0", sizeof(wacom->phys)); 1034 strlcat(wacom->phys, "/input0", sizeof(wacom->phys));
871 1035
872 wacom_wac->input = input_dev;
873
874 endpoint = &intf->cur_altsetting->endpoint[0].desc; 1036 endpoint = &intf->cur_altsetting->endpoint[0].desc;
875 1037
876 /* Retrieve the physical and logical size for OEM devices */ 1038 /* Retrieve the physical and logical size for OEM devices */
@@ -894,15 +1056,6 @@ static int wacom_probe(struct usb_interface *intf, const struct usb_device_id *i
894 goto fail3; 1056 goto fail3;
895 } 1057 }
896 1058
897 input_dev->name = wacom_wac->name;
898 input_dev->dev.parent = &intf->dev;
899 input_dev->open = wacom_open;
900 input_dev->close = wacom_close;
901 usb_to_input_id(dev, &input_dev->id);
902 input_set_drvdata(input_dev, wacom);
903
904 wacom_setup_input_capabilities(input_dev, wacom_wac);
905
906 usb_fill_int_urb(wacom->irq, dev, 1059 usb_fill_int_urb(wacom->irq, dev,
907 usb_rcvintpipe(dev, endpoint->bEndpointAddress), 1060 usb_rcvintpipe(dev, endpoint->bEndpointAddress),
908 wacom_wac->data, features->pktlen, 1061 wacom_wac->data, features->pktlen,
@@ -914,22 +1067,34 @@ static int wacom_probe(struct usb_interface *intf, const struct usb_device_id *i
914 if (error) 1067 if (error)
915 goto fail4; 1068 goto fail4;
916 1069
917 error = input_register_device(input_dev); 1070 error = wacom_initialize_battery(wacom);
918 if (error) 1071 if (error)
919 goto fail5; 1072 goto fail5;
920 1073
1074 if (!(features->quirks & WACOM_QUIRK_NO_INPUT)) {
1075 error = wacom_register_input(wacom);
1076 if (error)
1077 goto fail6;
1078 }
1079
921 /* Note that if query fails it is not a hard failure */ 1080 /* Note that if query fails it is not a hard failure */
922 wacom_query_tablet_data(intf, features); 1081 wacom_query_tablet_data(intf, features);
923 1082
924 usb_set_intfdata(intf, wacom); 1083 usb_set_intfdata(intf, wacom);
1084
1085 if (features->quirks & WACOM_QUIRK_MONITOR) {
1086 if (usb_submit_urb(wacom->irq, GFP_KERNEL))
1087 goto fail5;
1088 }
1089
925 return 0; 1090 return 0;
926 1091
1092 fail6: wacom_destroy_battery(wacom);
927 fail5: wacom_destroy_leds(wacom); 1093 fail5: wacom_destroy_leds(wacom);
928 fail4: wacom_remove_shared_data(wacom_wac); 1094 fail4: wacom_remove_shared_data(wacom_wac);
929 fail3: usb_free_urb(wacom->irq); 1095 fail3: usb_free_urb(wacom->irq);
930 fail2: usb_free_coherent(dev, WACOM_PKGLEN_MAX, wacom_wac->data, wacom->data_dma); 1096 fail2: usb_free_coherent(dev, WACOM_PKGLEN_MAX, wacom_wac->data, wacom->data_dma);
931 fail1: input_free_device(input_dev); 1097 fail1: kfree(wacom);
932 kfree(wacom);
933 return error; 1098 return error;
934} 1099}
935 1100
@@ -940,7 +1105,10 @@ static void wacom_disconnect(struct usb_interface *intf)
940 usb_set_intfdata(intf, NULL); 1105 usb_set_intfdata(intf, NULL);
941 1106
942 usb_kill_urb(wacom->irq); 1107 usb_kill_urb(wacom->irq);
943 input_unregister_device(wacom->wacom_wac.input); 1108 cancel_work_sync(&wacom->work);
1109 if (wacom->wacom_wac.input)
1110 input_unregister_device(wacom->wacom_wac.input);
1111 wacom_destroy_battery(wacom);
944 wacom_destroy_leds(wacom); 1112 wacom_destroy_leds(wacom);
945 usb_free_urb(wacom->irq); 1113 usb_free_urb(wacom->irq);
946 usb_free_coherent(interface_to_usbdev(intf), WACOM_PKGLEN_MAX, 1114 usb_free_coherent(interface_to_usbdev(intf), WACOM_PKGLEN_MAX,
@@ -972,7 +1140,8 @@ static int wacom_resume(struct usb_interface *intf)
972 wacom_query_tablet_data(intf, features); 1140 wacom_query_tablet_data(intf, features);
973 wacom_led_control(wacom); 1141 wacom_led_control(wacom);
974 1142
975 if (wacom->open && usb_submit_urb(wacom->irq, GFP_NOIO) < 0) 1143 if ((wacom->open || features->quirks & WACOM_QUIRK_MONITOR)
1144 && usb_submit_urb(wacom->irq, GFP_NOIO) < 0)
976 rv = -EIO; 1145 rv = -EIO;
977 1146
978 mutex_unlock(&wacom->lock); 1147 mutex_unlock(&wacom->lock);
diff --git a/drivers/input/tablet/wacom_wac.c b/drivers/input/tablet/wacom_wac.c
index 89a96427faa0..cecd35c8f0b3 100644
--- a/drivers/input/tablet/wacom_wac.c
+++ b/drivers/input/tablet/wacom_wac.c
@@ -1044,6 +1044,35 @@ static int wacom_bpt_irq(struct wacom_wac *wacom, size_t len)
1044 return 0; 1044 return 0;
1045} 1045}
1046 1046
1047static int wacom_wireless_irq(struct wacom_wac *wacom, size_t len)
1048{
1049 unsigned char *data = wacom->data;
1050 int connected;
1051
1052 if (len != WACOM_PKGLEN_WIRELESS || data[0] != 0x80)
1053 return 0;
1054
1055 connected = data[1] & 0x01;
1056 if (connected) {
1057 int pid, battery;
1058
1059 pid = get_unaligned_be16(&data[6]);
1060 battery = data[5] & 0x3f;
1061 if (wacom->pid != pid) {
1062 wacom->pid = pid;
1063 wacom_schedule_work(wacom);
1064 }
1065 wacom->battery_capacity = battery;
1066 } else if (wacom->pid != 0) {
1067 /* disconnected while previously connected */
1068 wacom->pid = 0;
1069 wacom_schedule_work(wacom);
1070 wacom->battery_capacity = 0;
1071 }
1072
1073 return 0;
1074}
1075
1047void wacom_wac_irq(struct wacom_wac *wacom_wac, size_t len) 1076void wacom_wac_irq(struct wacom_wac *wacom_wac, size_t len)
1048{ 1077{
1049 bool sync; 1078 bool sync;
@@ -1094,6 +1123,10 @@ void wacom_wac_irq(struct wacom_wac *wacom_wac, size_t len)
1094 sync = wacom_bpt_irq(wacom_wac, len); 1123 sync = wacom_bpt_irq(wacom_wac, len);
1095 break; 1124 break;
1096 1125
1126 case WIRELESS:
1127 sync = wacom_wireless_irq(wacom_wac, len);
1128 break;
1129
1097 default: 1130 default:
1098 sync = false; 1131 sync = false;
1099 break; 1132 break;
@@ -1155,7 +1188,7 @@ void wacom_setup_device_quirks(struct wacom_features *features)
1155 1188
1156 /* these device have multiple inputs */ 1189 /* these device have multiple inputs */
1157 if (features->type == TABLETPC || features->type == TABLETPC2FG || 1190 if (features->type == TABLETPC || features->type == TABLETPC2FG ||
1158 features->type == BAMBOO_PT) 1191 features->type == BAMBOO_PT || features->type == WIRELESS)
1159 features->quirks |= WACOM_QUIRK_MULTI_INPUT; 1192 features->quirks |= WACOM_QUIRK_MULTI_INPUT;
1160 1193
1161 /* quirk for bamboo touch with 2 low res touches */ 1194 /* quirk for bamboo touch with 2 low res touches */
@@ -1167,6 +1200,16 @@ void wacom_setup_device_quirks(struct wacom_features *features)
1167 features->y_fuzz <<= 5; 1200 features->y_fuzz <<= 5;
1168 features->quirks |= WACOM_QUIRK_BBTOUCH_LOWRES; 1201 features->quirks |= WACOM_QUIRK_BBTOUCH_LOWRES;
1169 } 1202 }
1203
1204 if (features->type == WIRELESS) {
1205
1206 /* monitor never has input and pen/touch have delayed create */
1207 features->quirks |= WACOM_QUIRK_NO_INPUT;
1208
1209 /* must be monitor interface if no device_type set */
1210 if (!features->device_type)
1211 features->quirks |= WACOM_QUIRK_MONITOR;
1212 }
1170} 1213}
1171 1214
1172static unsigned int wacom_calculate_touch_res(unsigned int logical_max, 1215static unsigned int wacom_calculate_touch_res(unsigned int logical_max,
@@ -1640,6 +1683,9 @@ static const struct wacom_features wacom_features_0xEC =
1640static const struct wacom_features wacom_features_0x47 = 1683static const struct wacom_features wacom_features_0x47 =
1641 { "Wacom Intuos2 6x8", WACOM_PKGLEN_INTUOS, 20320, 16240, 1023, 1684 { "Wacom Intuos2 6x8", WACOM_PKGLEN_INTUOS, 20320, 16240, 1023,
1642 31, INTUOS, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; 1685 31, INTUOS, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
1686static const struct wacom_features wacom_features_0x84 =
1687 { "Wacom Wireless Receiver", WACOM_PKGLEN_WIRELESS, 0, 0, 0,
1688 0, WIRELESS, 0, 0 };
1643static const struct wacom_features wacom_features_0xD0 = 1689static const struct wacom_features wacom_features_0xD0 =
1644 { "Wacom Bamboo 2FG", WACOM_PKGLEN_BBFUN, 14720, 9200, 1023, 1690 { "Wacom Bamboo 2FG", WACOM_PKGLEN_BBFUN, 14720, 9200, 1023,
1645 31, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; 1691 31, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
@@ -1766,6 +1812,7 @@ const struct usb_device_id wacom_ids[] = {
1766 { USB_DEVICE_DETAILED(0xCE, USB_CLASS_HID, 1812 { USB_DEVICE_DETAILED(0xCE, USB_CLASS_HID,
1767 USB_INTERFACE_SUBCLASS_BOOT, 1813 USB_INTERFACE_SUBCLASS_BOOT,
1768 USB_INTERFACE_PROTOCOL_MOUSE) }, 1814 USB_INTERFACE_PROTOCOL_MOUSE) },
1815 { USB_DEVICE_WACOM(0x84) },
1769 { USB_DEVICE_WACOM(0xD0) }, 1816 { USB_DEVICE_WACOM(0xD0) },
1770 { USB_DEVICE_WACOM(0xD1) }, 1817 { USB_DEVICE_WACOM(0xD1) },
1771 { USB_DEVICE_WACOM(0xD2) }, 1818 { USB_DEVICE_WACOM(0xD2) },
diff --git a/drivers/input/tablet/wacom_wac.h b/drivers/input/tablet/wacom_wac.h
index 4f0ba21b0196..ba5a334e54d6 100644
--- a/drivers/input/tablet/wacom_wac.h
+++ b/drivers/input/tablet/wacom_wac.h
@@ -24,6 +24,7 @@
24#define WACOM_PKGLEN_BBTOUCH 20 24#define WACOM_PKGLEN_BBTOUCH 20
25#define WACOM_PKGLEN_BBTOUCH3 64 25#define WACOM_PKGLEN_BBTOUCH3 64
26#define WACOM_PKGLEN_BBPEN 10 26#define WACOM_PKGLEN_BBPEN 10
27#define WACOM_PKGLEN_WIRELESS 32
27 28
28/* device IDs */ 29/* device IDs */
29#define STYLUS_DEVICE_ID 0x02 30#define STYLUS_DEVICE_ID 0x02
@@ -45,6 +46,8 @@
45/* device quirks */ 46/* device quirks */
46#define WACOM_QUIRK_MULTI_INPUT 0x0001 47#define WACOM_QUIRK_MULTI_INPUT 0x0001
47#define WACOM_QUIRK_BBTOUCH_LOWRES 0x0002 48#define WACOM_QUIRK_BBTOUCH_LOWRES 0x0002
49#define WACOM_QUIRK_NO_INPUT 0x0004
50#define WACOM_QUIRK_MONITOR 0x0008
48 51
49enum { 52enum {
50 PENPARTNER = 0, 53 PENPARTNER = 0,
@@ -54,6 +57,7 @@ enum {
54 PL, 57 PL,
55 DTU, 58 DTU,
56 BAMBOO_PT, 59 BAMBOO_PT,
60 WIRELESS,
57 INTUOS, 61 INTUOS,
58 INTUOS3S, 62 INTUOS3S,
59 INTUOS3, 63 INTUOS3,
@@ -107,6 +111,8 @@ struct wacom_wac {
107 struct wacom_features features; 111 struct wacom_features features;
108 struct wacom_shared *shared; 112 struct wacom_shared *shared;
109 struct input_dev *input; 113 struct input_dev *input;
114 int pid;
115 int battery_capacity;
110}; 116};
111 117
112#endif 118#endif
diff --git a/drivers/media/video/davinci/vpbe_osd.c b/drivers/media/video/davinci/vpbe_osd.c
index d6488b79ae3b..bba299dbf396 100644
--- a/drivers/media/video/davinci/vpbe_osd.c
+++ b/drivers/media/video/davinci/vpbe_osd.c
@@ -28,7 +28,6 @@
28#include <linux/clk.h> 28#include <linux/clk.h>
29#include <linux/slab.h> 29#include <linux/slab.h>
30 30
31#include <mach/io.h>
32#include <mach/cputype.h> 31#include <mach/cputype.h>
33#include <mach/hardware.h> 32#include <mach/hardware.h>
34 33
diff --git a/drivers/media/video/davinci/vpbe_venc.c b/drivers/media/video/davinci/vpbe_venc.c
index 00e80f59d5d5..b21ecc8d134d 100644
--- a/drivers/media/video/davinci/vpbe_venc.c
+++ b/drivers/media/video/davinci/vpbe_venc.c
@@ -27,7 +27,6 @@
27 27
28#include <mach/hardware.h> 28#include <mach/hardware.h>
29#include <mach/mux.h> 29#include <mach/mux.h>
30#include <mach/io.h>
31#include <mach/i2c.h> 30#include <mach/i2c.h>
32 31
33#include <linux/io.h> 32#include <linux/io.h>
diff --git a/drivers/media/video/mx3_camera.c b/drivers/media/video/mx3_camera.c
index 74522773e934..93c35ef5f0ad 100644
--- a/drivers/media/video/mx3_camera.c
+++ b/drivers/media/video/mx3_camera.c
@@ -286,7 +286,7 @@ static void mx3_videobuf_queue(struct vb2_buffer *vb)
286 sg_dma_address(sg) = vb2_dma_contig_plane_dma_addr(vb, 0); 286 sg_dma_address(sg) = vb2_dma_contig_plane_dma_addr(vb, 0);
287 sg_dma_len(sg) = new_size; 287 sg_dma_len(sg) = new_size;
288 288
289 txd = ichan->dma_chan.device->device_prep_slave_sg( 289 txd = dmaengine_prep_slave_sg(
290 &ichan->dma_chan, sg, 1, DMA_DEV_TO_MEM, 290 &ichan->dma_chan, sg, 1, DMA_DEV_TO_MEM,
291 DMA_PREP_INTERRUPT); 291 DMA_PREP_INTERRUPT);
292 if (!txd) 292 if (!txd)
diff --git a/drivers/media/video/timblogiw.c b/drivers/media/video/timblogiw.c
index 4ed1c7c28ae7..02194c056b00 100644
--- a/drivers/media/video/timblogiw.c
+++ b/drivers/media/video/timblogiw.c
@@ -564,7 +564,7 @@ static void buffer_queue(struct videobuf_queue *vq, struct videobuf_buffer *vb)
564 564
565 spin_unlock_irq(&fh->queue_lock); 565 spin_unlock_irq(&fh->queue_lock);
566 566
567 desc = fh->chan->device->device_prep_slave_sg(fh->chan, 567 desc = dmaengine_prep_slave_sg(fh->chan,
568 buf->sg, sg_elems, DMA_DEV_TO_MEM, 568 buf->sg, sg_elems, DMA_DEV_TO_MEM,
569 DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_SRC_UNMAP); 569 DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_SRC_UNMAP);
570 if (!desc) { 570 if (!desc) {
diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c
index 390863e7efbd..9819dc09ce08 100644
--- a/drivers/mmc/host/atmel-mci.c
+++ b/drivers/mmc/host/atmel-mci.c
@@ -24,6 +24,7 @@
24#include <linux/seq_file.h> 24#include <linux/seq_file.h>
25#include <linux/slab.h> 25#include <linux/slab.h>
26#include <linux/stat.h> 26#include <linux/stat.h>
27#include <linux/types.h>
27 28
28#include <linux/mmc/host.h> 29#include <linux/mmc/host.h>
29#include <linux/mmc/sdio.h> 30#include <linux/mmc/sdio.h>
@@ -173,6 +174,7 @@ struct atmel_mci {
173 174
174 struct atmel_mci_dma dma; 175 struct atmel_mci_dma dma;
175 struct dma_chan *data_chan; 176 struct dma_chan *data_chan;
177 struct dma_slave_config dma_conf;
176 178
177 u32 cmd_status; 179 u32 cmd_status;
178 u32 data_status; 180 u32 data_status;
@@ -863,16 +865,17 @@ atmci_prepare_data_dma(struct atmel_mci *host, struct mmc_data *data)
863 865
864 if (data->flags & MMC_DATA_READ) { 866 if (data->flags & MMC_DATA_READ) {
865 direction = DMA_FROM_DEVICE; 867 direction = DMA_FROM_DEVICE;
866 slave_dirn = DMA_DEV_TO_MEM; 868 host->dma_conf.direction = slave_dirn = DMA_DEV_TO_MEM;
867 } else { 869 } else {
868 direction = DMA_TO_DEVICE; 870 direction = DMA_TO_DEVICE;
869 slave_dirn = DMA_MEM_TO_DEV; 871 host->dma_conf.direction = slave_dirn = DMA_MEM_TO_DEV;
870 } 872 }
871 873
872 sglen = dma_map_sg(chan->device->dev, data->sg, 874 sglen = dma_map_sg(chan->device->dev, data->sg,
873 data->sg_len, direction); 875 data->sg_len, direction);
874 876
875 desc = chan->device->device_prep_slave_sg(chan, 877 dmaengine_slave_config(chan, &host->dma_conf);
878 desc = dmaengine_prep_slave_sg(chan,
876 data->sg, sglen, slave_dirn, 879 data->sg, sglen, slave_dirn,
877 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 880 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
878 if (!desc) 881 if (!desc)
@@ -1960,10 +1963,6 @@ static bool atmci_configure_dma(struct atmel_mci *host)
1960 if (pdata && find_slave_dev(pdata->dma_slave)) { 1963 if (pdata && find_slave_dev(pdata->dma_slave)) {
1961 dma_cap_mask_t mask; 1964 dma_cap_mask_t mask;
1962 1965
1963 setup_dma_addr(pdata->dma_slave,
1964 host->mapbase + ATMCI_TDR,
1965 host->mapbase + ATMCI_RDR);
1966
1967 /* Try to grab a DMA channel */ 1966 /* Try to grab a DMA channel */
1968 dma_cap_zero(mask); 1967 dma_cap_zero(mask);
1969 dma_cap_set(DMA_SLAVE, mask); 1968 dma_cap_set(DMA_SLAVE, mask);
@@ -1977,6 +1976,14 @@ static bool atmci_configure_dma(struct atmel_mci *host)
1977 dev_info(&host->pdev->dev, 1976 dev_info(&host->pdev->dev,
1978 "using %s for DMA transfers\n", 1977 "using %s for DMA transfers\n",
1979 dma_chan_name(host->dma.chan)); 1978 dma_chan_name(host->dma.chan));
1979
1980 host->dma_conf.src_addr = host->mapbase + ATMCI_RDR;
1981 host->dma_conf.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
1982 host->dma_conf.src_maxburst = 1;
1983 host->dma_conf.dst_addr = host->mapbase + ATMCI_TDR;
1984 host->dma_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
1985 host->dma_conf.dst_maxburst = 1;
1986 host->dma_conf.device_fc = false;
1980 return true; 1987 return true;
1981 } 1988 }
1982} 1989}
diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
index 983e244eca76..032b84791a16 100644
--- a/drivers/mmc/host/mmci.c
+++ b/drivers/mmc/host/mmci.c
@@ -30,6 +30,7 @@
30#include <linux/dma-mapping.h> 30#include <linux/dma-mapping.h>
31#include <linux/amba/mmci.h> 31#include <linux/amba/mmci.h>
32#include <linux/pm_runtime.h> 32#include <linux/pm_runtime.h>
33#include <linux/types.h>
33 34
34#include <asm/div64.h> 35#include <asm/div64.h>
35#include <asm/io.h> 36#include <asm/io.h>
@@ -400,6 +401,7 @@ static int mmci_dma_prep_data(struct mmci_host *host, struct mmc_data *data,
400 .dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES, 401 .dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
401 .src_maxburst = variant->fifohalfsize >> 2, /* # of words */ 402 .src_maxburst = variant->fifohalfsize >> 2, /* # of words */
402 .dst_maxburst = variant->fifohalfsize >> 2, /* # of words */ 403 .dst_maxburst = variant->fifohalfsize >> 2, /* # of words */
404 .device_fc = false,
403 }; 405 };
404 struct dma_chan *chan; 406 struct dma_chan *chan;
405 struct dma_device *device; 407 struct dma_device *device;
@@ -441,7 +443,7 @@ static int mmci_dma_prep_data(struct mmci_host *host, struct mmc_data *data,
441 return -EINVAL; 443 return -EINVAL;
442 444
443 dmaengine_slave_config(chan, &conf); 445 dmaengine_slave_config(chan, &conf);
444 desc = device->device_prep_slave_sg(chan, data->sg, nr_sg, 446 desc = dmaengine_prep_slave_sg(chan, data->sg, nr_sg,
445 conf.direction, DMA_CTRL_ACK); 447 conf.direction, DMA_CTRL_ACK);
446 if (!desc) 448 if (!desc)
447 goto unmap_exit; 449 goto unmap_exit;
diff --git a/drivers/mmc/host/mxcmmc.c b/drivers/mmc/host/mxcmmc.c
index 4184b7946bbf..b2058b432320 100644
--- a/drivers/mmc/host/mxcmmc.c
+++ b/drivers/mmc/host/mxcmmc.c
@@ -33,6 +33,7 @@
33#include <linux/gpio.h> 33#include <linux/gpio.h>
34#include <linux/regulator/consumer.h> 34#include <linux/regulator/consumer.h>
35#include <linux/dmaengine.h> 35#include <linux/dmaengine.h>
36#include <linux/types.h>
36 37
37#include <asm/dma.h> 38#include <asm/dma.h>
38#include <asm/irq.h> 39#include <asm/irq.h>
@@ -254,7 +255,7 @@ static int mxcmci_setup_data(struct mxcmci_host *host, struct mmc_data *data)
254 if (nents != data->sg_len) 255 if (nents != data->sg_len)
255 return -EINVAL; 256 return -EINVAL;
256 257
257 host->desc = host->dma->device->device_prep_slave_sg(host->dma, 258 host->desc = dmaengine_prep_slave_sg(host->dma,
258 data->sg, data->sg_len, slave_dirn, 259 data->sg, data->sg_len, slave_dirn,
259 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 260 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
260 261
@@ -267,6 +268,7 @@ static int mxcmci_setup_data(struct mxcmci_host *host, struct mmc_data *data)
267 wmb(); 268 wmb();
268 269
269 dmaengine_submit(host->desc); 270 dmaengine_submit(host->desc);
271 dma_async_issue_pending(host->dma);
270 272
271 return 0; 273 return 0;
272} 274}
@@ -710,6 +712,7 @@ static int mxcmci_setup_dma(struct mmc_host *mmc)
710 config->src_addr_width = 4; 712 config->src_addr_width = 4;
711 config->dst_maxburst = host->burstlen; 713 config->dst_maxburst = host->burstlen;
712 config->src_maxburst = host->burstlen; 714 config->src_maxburst = host->burstlen;
715 config->device_fc = false;
713 716
714 return dmaengine_slave_config(host->dma, config); 717 return dmaengine_slave_config(host->dma, config);
715} 718}
diff --git a/drivers/mmc/host/mxs-mmc.c b/drivers/mmc/host/mxs-mmc.c
index 382c835d217c..b0f2ef988188 100644
--- a/drivers/mmc/host/mxs-mmc.c
+++ b/drivers/mmc/host/mxs-mmc.c
@@ -38,10 +38,10 @@
38#include <linux/gpio.h> 38#include <linux/gpio.h>
39#include <linux/regulator/consumer.h> 39#include <linux/regulator/consumer.h>
40#include <linux/module.h> 40#include <linux/module.h>
41#include <linux/fsl/mxs-dma.h>
41 42
42#include <mach/mxs.h> 43#include <mach/mxs.h>
43#include <mach/common.h> 44#include <mach/common.h>
44#include <mach/dma.h>
45#include <mach/mmc.h> 45#include <mach/mmc.h>
46 46
47#define DRIVER_NAME "mxs-mmc" 47#define DRIVER_NAME "mxs-mmc"
@@ -305,7 +305,7 @@ static irqreturn_t mxs_mmc_irq_handler(int irq, void *dev_id)
305} 305}
306 306
307static struct dma_async_tx_descriptor *mxs_mmc_prep_dma( 307static struct dma_async_tx_descriptor *mxs_mmc_prep_dma(
308 struct mxs_mmc_host *host, unsigned int append) 308 struct mxs_mmc_host *host, unsigned long flags)
309{ 309{
310 struct dma_async_tx_descriptor *desc; 310 struct dma_async_tx_descriptor *desc;
311 struct mmc_data *data = host->data; 311 struct mmc_data *data = host->data;
@@ -324,8 +324,8 @@ static struct dma_async_tx_descriptor *mxs_mmc_prep_dma(
324 sg_len = SSP_PIO_NUM; 324 sg_len = SSP_PIO_NUM;
325 } 325 }
326 326
327 desc = host->dmach->device->device_prep_slave_sg(host->dmach, 327 desc = dmaengine_prep_slave_sg(host->dmach,
328 sgl, sg_len, host->slave_dirn, append); 328 sgl, sg_len, host->slave_dirn, flags);
329 if (desc) { 329 if (desc) {
330 desc->callback = mxs_mmc_dma_irq_callback; 330 desc->callback = mxs_mmc_dma_irq_callback;
331 desc->callback_param = host; 331 desc->callback_param = host;
@@ -358,7 +358,7 @@ static void mxs_mmc_bc(struct mxs_mmc_host *host)
358 host->ssp_pio_words[2] = cmd1; 358 host->ssp_pio_words[2] = cmd1;
359 host->dma_dir = DMA_NONE; 359 host->dma_dir = DMA_NONE;
360 host->slave_dirn = DMA_TRANS_NONE; 360 host->slave_dirn = DMA_TRANS_NONE;
361 desc = mxs_mmc_prep_dma(host, 0); 361 desc = mxs_mmc_prep_dma(host, DMA_CTRL_ACK);
362 if (!desc) 362 if (!desc)
363 goto out; 363 goto out;
364 364
@@ -398,7 +398,7 @@ static void mxs_mmc_ac(struct mxs_mmc_host *host)
398 host->ssp_pio_words[2] = cmd1; 398 host->ssp_pio_words[2] = cmd1;
399 host->dma_dir = DMA_NONE; 399 host->dma_dir = DMA_NONE;
400 host->slave_dirn = DMA_TRANS_NONE; 400 host->slave_dirn = DMA_TRANS_NONE;
401 desc = mxs_mmc_prep_dma(host, 0); 401 desc = mxs_mmc_prep_dma(host, DMA_CTRL_ACK);
402 if (!desc) 402 if (!desc)
403 goto out; 403 goto out;
404 404
@@ -526,7 +526,7 @@ static void mxs_mmc_adtc(struct mxs_mmc_host *host)
526 host->data = data; 526 host->data = data;
527 host->dma_dir = dma_data_dir; 527 host->dma_dir = dma_data_dir;
528 host->slave_dirn = slave_dirn; 528 host->slave_dirn = slave_dirn;
529 desc = mxs_mmc_prep_dma(host, 1); 529 desc = mxs_mmc_prep_dma(host, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
530 if (!desc) 530 if (!desc)
531 goto out; 531 goto out;
532 532
diff --git a/drivers/mmc/host/sh_mmcif.c b/drivers/mmc/host/sh_mmcif.c
index 60f205708f54..aafaf0b6eb1c 100644
--- a/drivers/mmc/host/sh_mmcif.c
+++ b/drivers/mmc/host/sh_mmcif.c
@@ -286,7 +286,7 @@ static void sh_mmcif_start_dma_rx(struct sh_mmcif_host *host)
286 DMA_FROM_DEVICE); 286 DMA_FROM_DEVICE);
287 if (ret > 0) { 287 if (ret > 0) {
288 host->dma_active = true; 288 host->dma_active = true;
289 desc = chan->device->device_prep_slave_sg(chan, sg, ret, 289 desc = dmaengine_prep_slave_sg(chan, sg, ret,
290 DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 290 DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
291 } 291 }
292 292
@@ -335,7 +335,7 @@ static void sh_mmcif_start_dma_tx(struct sh_mmcif_host *host)
335 DMA_TO_DEVICE); 335 DMA_TO_DEVICE);
336 if (ret > 0) { 336 if (ret > 0) {
337 host->dma_active = true; 337 host->dma_active = true;
338 desc = chan->device->device_prep_slave_sg(chan, sg, ret, 338 desc = dmaengine_prep_slave_sg(chan, sg, ret,
339 DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 339 DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
340 } 340 }
341 341
diff --git a/drivers/mmc/host/tmio_mmc_dma.c b/drivers/mmc/host/tmio_mmc_dma.c
index 8253ec12003e..fff928604859 100644
--- a/drivers/mmc/host/tmio_mmc_dma.c
+++ b/drivers/mmc/host/tmio_mmc_dma.c
@@ -88,7 +88,7 @@ static void tmio_mmc_start_dma_rx(struct tmio_mmc_host *host)
88 88
89 ret = dma_map_sg(chan->device->dev, sg, host->sg_len, DMA_FROM_DEVICE); 89 ret = dma_map_sg(chan->device->dev, sg, host->sg_len, DMA_FROM_DEVICE);
90 if (ret > 0) 90 if (ret > 0)
91 desc = chan->device->device_prep_slave_sg(chan, sg, ret, 91 desc = dmaengine_prep_slave_sg(chan, sg, ret,
92 DMA_DEV_TO_MEM, DMA_CTRL_ACK); 92 DMA_DEV_TO_MEM, DMA_CTRL_ACK);
93 93
94 if (desc) { 94 if (desc) {
@@ -169,7 +169,7 @@ static void tmio_mmc_start_dma_tx(struct tmio_mmc_host *host)
169 169
170 ret = dma_map_sg(chan->device->dev, sg, host->sg_len, DMA_TO_DEVICE); 170 ret = dma_map_sg(chan->device->dev, sg, host->sg_len, DMA_TO_DEVICE);
171 if (ret > 0) 171 if (ret > 0)
172 desc = chan->device->device_prep_slave_sg(chan, sg, ret, 172 desc = dmaengine_prep_slave_sg(chan, sg, ret,
173 DMA_MEM_TO_DEV, DMA_CTRL_ACK); 173 DMA_MEM_TO_DEV, DMA_CTRL_ACK);
174 174
175 if (desc) { 175 if (desc) {
diff --git a/drivers/mtd/Kconfig b/drivers/mtd/Kconfig
index 284cf3433720..5760c1a4b3f6 100644
--- a/drivers/mtd/Kconfig
+++ b/drivers/mtd/Kconfig
@@ -304,9 +304,6 @@ config MTD_OOPS
304 buffer in a flash partition where it can be read back at some 304 buffer in a flash partition where it can be read back at some
305 later point. 305 later point.
306 306
307 To use, add console=ttyMTDx to the kernel command line,
308 where x is the MTD device number to use.
309
310config MTD_SWAP 307config MTD_SWAP
311 tristate "Swap on MTD device support" 308 tristate "Swap on MTD device support"
312 depends on MTD && SWAP 309 depends on MTD && SWAP
diff --git a/drivers/mtd/chips/cfi_cmdset_0001.c b/drivers/mtd/chips/cfi_cmdset_0001.c
index 9bcd1f415f43..dbbd2edfb812 100644
--- a/drivers/mtd/chips/cfi_cmdset_0001.c
+++ b/drivers/mtd/chips/cfi_cmdset_0001.c
@@ -87,7 +87,7 @@ static int cfi_intelext_partition_fixup(struct mtd_info *, struct cfi_private **
87 87
88static int cfi_intelext_point (struct mtd_info *mtd, loff_t from, size_t len, 88static int cfi_intelext_point (struct mtd_info *mtd, loff_t from, size_t len,
89 size_t *retlen, void **virt, resource_size_t *phys); 89 size_t *retlen, void **virt, resource_size_t *phys);
90static void cfi_intelext_unpoint(struct mtd_info *mtd, loff_t from, size_t len); 90static int cfi_intelext_unpoint(struct mtd_info *mtd, loff_t from, size_t len);
91 91
92static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long adr, int mode); 92static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
93static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode); 93static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
@@ -262,9 +262,9 @@ static void fixup_st_m28w320cb(struct mtd_info *mtd)
262static void fixup_use_point(struct mtd_info *mtd) 262static void fixup_use_point(struct mtd_info *mtd)
263{ 263{
264 struct map_info *map = mtd->priv; 264 struct map_info *map = mtd->priv;
265 if (!mtd->point && map_is_linear(map)) { 265 if (!mtd->_point && map_is_linear(map)) {
266 mtd->point = cfi_intelext_point; 266 mtd->_point = cfi_intelext_point;
267 mtd->unpoint = cfi_intelext_unpoint; 267 mtd->_unpoint = cfi_intelext_unpoint;
268 } 268 }
269} 269}
270 270
@@ -274,8 +274,8 @@ static void fixup_use_write_buffers(struct mtd_info *mtd)
274 struct cfi_private *cfi = map->fldrv_priv; 274 struct cfi_private *cfi = map->fldrv_priv;
275 if (cfi->cfiq->BufWriteTimeoutTyp) { 275 if (cfi->cfiq->BufWriteTimeoutTyp) {
276 printk(KERN_INFO "Using buffer write method\n" ); 276 printk(KERN_INFO "Using buffer write method\n" );
277 mtd->write = cfi_intelext_write_buffers; 277 mtd->_write = cfi_intelext_write_buffers;
278 mtd->writev = cfi_intelext_writev; 278 mtd->_writev = cfi_intelext_writev;
279 } 279 }
280} 280}
281 281
@@ -443,15 +443,15 @@ struct mtd_info *cfi_cmdset_0001(struct map_info *map, int primary)
443 mtd->type = MTD_NORFLASH; 443 mtd->type = MTD_NORFLASH;
444 444
445 /* Fill in the default mtd operations */ 445 /* Fill in the default mtd operations */
446 mtd->erase = cfi_intelext_erase_varsize; 446 mtd->_erase = cfi_intelext_erase_varsize;
447 mtd->read = cfi_intelext_read; 447 mtd->_read = cfi_intelext_read;
448 mtd->write = cfi_intelext_write_words; 448 mtd->_write = cfi_intelext_write_words;
449 mtd->sync = cfi_intelext_sync; 449 mtd->_sync = cfi_intelext_sync;
450 mtd->lock = cfi_intelext_lock; 450 mtd->_lock = cfi_intelext_lock;
451 mtd->unlock = cfi_intelext_unlock; 451 mtd->_unlock = cfi_intelext_unlock;
452 mtd->is_locked = cfi_intelext_is_locked; 452 mtd->_is_locked = cfi_intelext_is_locked;
453 mtd->suspend = cfi_intelext_suspend; 453 mtd->_suspend = cfi_intelext_suspend;
454 mtd->resume = cfi_intelext_resume; 454 mtd->_resume = cfi_intelext_resume;
455 mtd->flags = MTD_CAP_NORFLASH; 455 mtd->flags = MTD_CAP_NORFLASH;
456 mtd->name = map->name; 456 mtd->name = map->name;
457 mtd->writesize = 1; 457 mtd->writesize = 1;
@@ -600,12 +600,12 @@ static struct mtd_info *cfi_intelext_setup(struct mtd_info *mtd)
600 } 600 }
601 601
602#ifdef CONFIG_MTD_OTP 602#ifdef CONFIG_MTD_OTP
603 mtd->read_fact_prot_reg = cfi_intelext_read_fact_prot_reg; 603 mtd->_read_fact_prot_reg = cfi_intelext_read_fact_prot_reg;
604 mtd->read_user_prot_reg = cfi_intelext_read_user_prot_reg; 604 mtd->_read_user_prot_reg = cfi_intelext_read_user_prot_reg;
605 mtd->write_user_prot_reg = cfi_intelext_write_user_prot_reg; 605 mtd->_write_user_prot_reg = cfi_intelext_write_user_prot_reg;
606 mtd->lock_user_prot_reg = cfi_intelext_lock_user_prot_reg; 606 mtd->_lock_user_prot_reg = cfi_intelext_lock_user_prot_reg;
607 mtd->get_fact_prot_info = cfi_intelext_get_fact_prot_info; 607 mtd->_get_fact_prot_info = cfi_intelext_get_fact_prot_info;
608 mtd->get_user_prot_info = cfi_intelext_get_user_prot_info; 608 mtd->_get_user_prot_info = cfi_intelext_get_user_prot_info;
609#endif 609#endif
610 610
611 /* This function has the potential to distort the reality 611 /* This function has the potential to distort the reality
@@ -1017,8 +1017,6 @@ static void put_chip(struct map_info *map, struct flchip *chip, unsigned long ad
1017 case FL_READY: 1017 case FL_READY:
1018 case FL_STATUS: 1018 case FL_STATUS:
1019 case FL_JEDEC_QUERY: 1019 case FL_JEDEC_QUERY:
1020 /* We should really make set_vpp() count, rather than doing this */
1021 DISABLE_VPP(map);
1022 break; 1020 break;
1023 default: 1021 default:
1024 printk(KERN_ERR "%s: put_chip() called with oldstate %d!!\n", map->name, chip->oldstate); 1022 printk(KERN_ERR "%s: put_chip() called with oldstate %d!!\n", map->name, chip->oldstate);
@@ -1324,7 +1322,7 @@ static int cfi_intelext_point(struct mtd_info *mtd, loff_t from, size_t len,
1324 int chipnum; 1322 int chipnum;
1325 int ret = 0; 1323 int ret = 0;
1326 1324
1327 if (!map->virt || (from + len > mtd->size)) 1325 if (!map->virt)
1328 return -EINVAL; 1326 return -EINVAL;
1329 1327
1330 /* Now lock the chip(s) to POINT state */ 1328 /* Now lock the chip(s) to POINT state */
@@ -1334,7 +1332,6 @@ static int cfi_intelext_point(struct mtd_info *mtd, loff_t from, size_t len,
1334 ofs = from - (chipnum << cfi->chipshift); 1332 ofs = from - (chipnum << cfi->chipshift);
1335 1333
1336 *virt = map->virt + cfi->chips[chipnum].start + ofs; 1334 *virt = map->virt + cfi->chips[chipnum].start + ofs;
1337 *retlen = 0;
1338 if (phys) 1335 if (phys)
1339 *phys = map->phys + cfi->chips[chipnum].start + ofs; 1336 *phys = map->phys + cfi->chips[chipnum].start + ofs;
1340 1337
@@ -1369,12 +1366,12 @@ static int cfi_intelext_point(struct mtd_info *mtd, loff_t from, size_t len,
1369 return 0; 1366 return 0;
1370} 1367}
1371 1368
1372static void cfi_intelext_unpoint(struct mtd_info *mtd, loff_t from, size_t len) 1369static int cfi_intelext_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
1373{ 1370{
1374 struct map_info *map = mtd->priv; 1371 struct map_info *map = mtd->priv;
1375 struct cfi_private *cfi = map->fldrv_priv; 1372 struct cfi_private *cfi = map->fldrv_priv;
1376 unsigned long ofs; 1373 unsigned long ofs;
1377 int chipnum; 1374 int chipnum, err = 0;
1378 1375
1379 /* Now unlock the chip(s) POINT state */ 1376 /* Now unlock the chip(s) POINT state */
1380 1377
@@ -1382,7 +1379,7 @@ static void cfi_intelext_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
1382 chipnum = (from >> cfi->chipshift); 1379 chipnum = (from >> cfi->chipshift);
1383 ofs = from - (chipnum << cfi->chipshift); 1380 ofs = from - (chipnum << cfi->chipshift);
1384 1381
1385 while (len) { 1382 while (len && !err) {
1386 unsigned long thislen; 1383 unsigned long thislen;
1387 struct flchip *chip; 1384 struct flchip *chip;
1388 1385
@@ -1400,8 +1397,10 @@ static void cfi_intelext_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
1400 chip->ref_point_counter--; 1397 chip->ref_point_counter--;
1401 if(chip->ref_point_counter == 0) 1398 if(chip->ref_point_counter == 0)
1402 chip->state = FL_READY; 1399 chip->state = FL_READY;
1403 } else 1400 } else {
1404 printk(KERN_ERR "%s: Warning: unpoint called on non pointed region\n", map->name); /* Should this give an error? */ 1401 printk(KERN_ERR "%s: Error: unpoint called on non pointed region\n", map->name);
1402 err = -EINVAL;
1403 }
1405 1404
1406 put_chip(map, chip, chip->start); 1405 put_chip(map, chip, chip->start);
1407 mutex_unlock(&chip->mutex); 1406 mutex_unlock(&chip->mutex);
@@ -1410,6 +1409,8 @@ static void cfi_intelext_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
1410 ofs = 0; 1409 ofs = 0;
1411 chipnum++; 1410 chipnum++;
1412 } 1411 }
1412
1413 return err;
1413} 1414}
1414 1415
1415static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf) 1416static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
@@ -1456,8 +1457,6 @@ static int cfi_intelext_read (struct mtd_info *mtd, loff_t from, size_t len, siz
1456 chipnum = (from >> cfi->chipshift); 1457 chipnum = (from >> cfi->chipshift);
1457 ofs = from - (chipnum << cfi->chipshift); 1458 ofs = from - (chipnum << cfi->chipshift);
1458 1459
1459 *retlen = 0;
1460
1461 while (len) { 1460 while (len) {
1462 unsigned long thislen; 1461 unsigned long thislen;
1463 1462
@@ -1551,7 +1550,8 @@ static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
1551 } 1550 }
1552 1551
1553 xip_enable(map, chip, adr); 1552 xip_enable(map, chip, adr);
1554 out: put_chip(map, chip, adr); 1553 out: DISABLE_VPP(map);
1554 put_chip(map, chip, adr);
1555 mutex_unlock(&chip->mutex); 1555 mutex_unlock(&chip->mutex);
1556 return ret; 1556 return ret;
1557} 1557}
@@ -1565,10 +1565,6 @@ static int cfi_intelext_write_words (struct mtd_info *mtd, loff_t to , size_t le
1565 int chipnum; 1565 int chipnum;
1566 unsigned long ofs; 1566 unsigned long ofs;
1567 1567
1568 *retlen = 0;
1569 if (!len)
1570 return 0;
1571
1572 chipnum = to >> cfi->chipshift; 1568 chipnum = to >> cfi->chipshift;
1573 ofs = to - (chipnum << cfi->chipshift); 1569 ofs = to - (chipnum << cfi->chipshift);
1574 1570
@@ -1794,7 +1790,8 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
1794 } 1790 }
1795 1791
1796 xip_enable(map, chip, cmd_adr); 1792 xip_enable(map, chip, cmd_adr);
1797 out: put_chip(map, chip, cmd_adr); 1793 out: DISABLE_VPP(map);
1794 put_chip(map, chip, cmd_adr);
1798 mutex_unlock(&chip->mutex); 1795 mutex_unlock(&chip->mutex);
1799 return ret; 1796 return ret;
1800} 1797}
@@ -1813,7 +1810,6 @@ static int cfi_intelext_writev (struct mtd_info *mtd, const struct kvec *vecs,
1813 for (i = 0; i < count; i++) 1810 for (i = 0; i < count; i++)
1814 len += vecs[i].iov_len; 1811 len += vecs[i].iov_len;
1815 1812
1816 *retlen = 0;
1817 if (!len) 1813 if (!len)
1818 return 0; 1814 return 0;
1819 1815
@@ -1932,6 +1928,7 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
1932 ret = -EIO; 1928 ret = -EIO;
1933 } else if (chipstatus & 0x20 && retries--) { 1929 } else if (chipstatus & 0x20 && retries--) {
1934 printk(KERN_DEBUG "block erase failed at 0x%08lx: status 0x%lx. Retrying...\n", adr, chipstatus); 1930 printk(KERN_DEBUG "block erase failed at 0x%08lx: status 0x%lx. Retrying...\n", adr, chipstatus);
1931 DISABLE_VPP(map);
1935 put_chip(map, chip, adr); 1932 put_chip(map, chip, adr);
1936 mutex_unlock(&chip->mutex); 1933 mutex_unlock(&chip->mutex);
1937 goto retry; 1934 goto retry;
@@ -1944,7 +1941,8 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
1944 } 1941 }
1945 1942
1946 xip_enable(map, chip, adr); 1943 xip_enable(map, chip, adr);
1947 out: put_chip(map, chip, adr); 1944 out: DISABLE_VPP(map);
1945 put_chip(map, chip, adr);
1948 mutex_unlock(&chip->mutex); 1946 mutex_unlock(&chip->mutex);
1949 return ret; 1947 return ret;
1950} 1948}
@@ -2086,7 +2084,8 @@ static int __xipram do_xxlock_oneblock(struct map_info *map, struct flchip *chip
2086 } 2084 }
2087 2085
2088 xip_enable(map, chip, adr); 2086 xip_enable(map, chip, adr);
2089out: put_chip(map, chip, adr); 2087 out: DISABLE_VPP(map);
2088 put_chip(map, chip, adr);
2090 mutex_unlock(&chip->mutex); 2089 mutex_unlock(&chip->mutex);
2091 return ret; 2090 return ret;
2092} 2091}
@@ -2483,7 +2482,7 @@ static int cfi_intelext_suspend(struct mtd_info *mtd)
2483 allowed to. Or should we return -EAGAIN, because the upper layers 2482 allowed to. Or should we return -EAGAIN, because the upper layers
2484 ought to have already shut down anything which was using the device 2483 ought to have already shut down anything which was using the device
2485 anyway? The latter for now. */ 2484 anyway? The latter for now. */
2486 printk(KERN_NOTICE "Flash device refused suspend due to active operation (state %d)\n", chip->oldstate); 2485 printk(KERN_NOTICE "Flash device refused suspend due to active operation (state %d)\n", chip->state);
2487 ret = -EAGAIN; 2486 ret = -EAGAIN;
2488 case FL_PM_SUSPENDED: 2487 case FL_PM_SUSPENDED:
2489 break; 2488 break;
diff --git a/drivers/mtd/chips/cfi_cmdset_0002.c b/drivers/mtd/chips/cfi_cmdset_0002.c
index 8d70895a58d6..d02592e6a0f0 100644
--- a/drivers/mtd/chips/cfi_cmdset_0002.c
+++ b/drivers/mtd/chips/cfi_cmdset_0002.c
@@ -59,6 +59,9 @@ static void cfi_amdstd_resume (struct mtd_info *);
59static int cfi_amdstd_reboot(struct notifier_block *, unsigned long, void *); 59static int cfi_amdstd_reboot(struct notifier_block *, unsigned long, void *);
60static int cfi_amdstd_secsi_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *); 60static int cfi_amdstd_secsi_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
61 61
62static int cfi_amdstd_panic_write(struct mtd_info *mtd, loff_t to, size_t len,
63 size_t *retlen, const u_char *buf);
64
62static void cfi_amdstd_destroy(struct mtd_info *); 65static void cfi_amdstd_destroy(struct mtd_info *);
63 66
64struct mtd_info *cfi_cmdset_0002(struct map_info *, int); 67struct mtd_info *cfi_cmdset_0002(struct map_info *, int);
@@ -189,7 +192,7 @@ static void fixup_use_write_buffers(struct mtd_info *mtd)
189 struct cfi_private *cfi = map->fldrv_priv; 192 struct cfi_private *cfi = map->fldrv_priv;
190 if (cfi->cfiq->BufWriteTimeoutTyp) { 193 if (cfi->cfiq->BufWriteTimeoutTyp) {
191 pr_debug("Using buffer write method\n" ); 194 pr_debug("Using buffer write method\n" );
192 mtd->write = cfi_amdstd_write_buffers; 195 mtd->_write = cfi_amdstd_write_buffers;
193 } 196 }
194} 197}
195 198
@@ -228,8 +231,8 @@ static void fixup_convert_atmel_pri(struct mtd_info *mtd)
228static void fixup_use_secsi(struct mtd_info *mtd) 231static void fixup_use_secsi(struct mtd_info *mtd)
229{ 232{
230 /* Setup for chips with a secsi area */ 233 /* Setup for chips with a secsi area */
231 mtd->read_user_prot_reg = cfi_amdstd_secsi_read; 234 mtd->_read_user_prot_reg = cfi_amdstd_secsi_read;
232 mtd->read_fact_prot_reg = cfi_amdstd_secsi_read; 235 mtd->_read_fact_prot_reg = cfi_amdstd_secsi_read;
233} 236}
234 237
235static void fixup_use_erase_chip(struct mtd_info *mtd) 238static void fixup_use_erase_chip(struct mtd_info *mtd)
@@ -238,7 +241,7 @@ static void fixup_use_erase_chip(struct mtd_info *mtd)
238 struct cfi_private *cfi = map->fldrv_priv; 241 struct cfi_private *cfi = map->fldrv_priv;
239 if ((cfi->cfiq->NumEraseRegions == 1) && 242 if ((cfi->cfiq->NumEraseRegions == 1) &&
240 ((cfi->cfiq->EraseRegionInfo[0] & 0xffff) == 0)) { 243 ((cfi->cfiq->EraseRegionInfo[0] & 0xffff) == 0)) {
241 mtd->erase = cfi_amdstd_erase_chip; 244 mtd->_erase = cfi_amdstd_erase_chip;
242 } 245 }
243 246
244} 247}
@@ -249,8 +252,8 @@ static void fixup_use_erase_chip(struct mtd_info *mtd)
249 */ 252 */
250static void fixup_use_atmel_lock(struct mtd_info *mtd) 253static void fixup_use_atmel_lock(struct mtd_info *mtd)
251{ 254{
252 mtd->lock = cfi_atmel_lock; 255 mtd->_lock = cfi_atmel_lock;
253 mtd->unlock = cfi_atmel_unlock; 256 mtd->_unlock = cfi_atmel_unlock;
254 mtd->flags |= MTD_POWERUP_LOCK; 257 mtd->flags |= MTD_POWERUP_LOCK;
255} 258}
256 259
@@ -429,12 +432,12 @@ struct mtd_info *cfi_cmdset_0002(struct map_info *map, int primary)
429 mtd->type = MTD_NORFLASH; 432 mtd->type = MTD_NORFLASH;
430 433
431 /* Fill in the default mtd operations */ 434 /* Fill in the default mtd operations */
432 mtd->erase = cfi_amdstd_erase_varsize; 435 mtd->_erase = cfi_amdstd_erase_varsize;
433 mtd->write = cfi_amdstd_write_words; 436 mtd->_write = cfi_amdstd_write_words;
434 mtd->read = cfi_amdstd_read; 437 mtd->_read = cfi_amdstd_read;
435 mtd->sync = cfi_amdstd_sync; 438 mtd->_sync = cfi_amdstd_sync;
436 mtd->suspend = cfi_amdstd_suspend; 439 mtd->_suspend = cfi_amdstd_suspend;
437 mtd->resume = cfi_amdstd_resume; 440 mtd->_resume = cfi_amdstd_resume;
438 mtd->flags = MTD_CAP_NORFLASH; 441 mtd->flags = MTD_CAP_NORFLASH;
439 mtd->name = map->name; 442 mtd->name = map->name;
440 mtd->writesize = 1; 443 mtd->writesize = 1;
@@ -443,6 +446,7 @@ struct mtd_info *cfi_cmdset_0002(struct map_info *map, int primary)
443 pr_debug("MTD %s(): write buffer size %d\n", __func__, 446 pr_debug("MTD %s(): write buffer size %d\n", __func__,
444 mtd->writebufsize); 447 mtd->writebufsize);
445 448
449 mtd->_panic_write = cfi_amdstd_panic_write;
446 mtd->reboot_notifier.notifier_call = cfi_amdstd_reboot; 450 mtd->reboot_notifier.notifier_call = cfi_amdstd_reboot;
447 451
448 if (cfi->cfi_mode==CFI_MODE_CFI){ 452 if (cfi->cfi_mode==CFI_MODE_CFI){
@@ -770,8 +774,6 @@ static void put_chip(struct map_info *map, struct flchip *chip, unsigned long ad
770 774
771 case FL_READY: 775 case FL_READY:
772 case FL_STATUS: 776 case FL_STATUS:
773 /* We should really make set_vpp() count, rather than doing this */
774 DISABLE_VPP(map);
775 break; 777 break;
776 default: 778 default:
777 printk(KERN_ERR "MTD: put_chip() called with oldstate %d!!\n", chip->oldstate); 779 printk(KERN_ERR "MTD: put_chip() called with oldstate %d!!\n", chip->oldstate);
@@ -1013,13 +1015,9 @@ static int cfi_amdstd_read (struct mtd_info *mtd, loff_t from, size_t len, size_
1013 int ret = 0; 1015 int ret = 0;
1014 1016
1015 /* ofs: offset within the first chip that the first read should start */ 1017 /* ofs: offset within the first chip that the first read should start */
1016
1017 chipnum = (from >> cfi->chipshift); 1018 chipnum = (from >> cfi->chipshift);
1018 ofs = from - (chipnum << cfi->chipshift); 1019 ofs = from - (chipnum << cfi->chipshift);
1019 1020
1020
1021 *retlen = 0;
1022
1023 while (len) { 1021 while (len) {
1024 unsigned long thislen; 1022 unsigned long thislen;
1025 1023
@@ -1097,16 +1095,11 @@ static int cfi_amdstd_secsi_read (struct mtd_info *mtd, loff_t from, size_t len,
1097 int chipnum; 1095 int chipnum;
1098 int ret = 0; 1096 int ret = 0;
1099 1097
1100
1101 /* ofs: offset within the first chip that the first read should start */ 1098 /* ofs: offset within the first chip that the first read should start */
1102
1103 /* 8 secsi bytes per chip */ 1099 /* 8 secsi bytes per chip */
1104 chipnum=from>>3; 1100 chipnum=from>>3;
1105 ofs=from & 7; 1101 ofs=from & 7;
1106 1102
1107
1108 *retlen = 0;
1109
1110 while (len) { 1103 while (len) {
1111 unsigned long thislen; 1104 unsigned long thislen;
1112 1105
@@ -1234,6 +1227,7 @@ static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
1234 xip_enable(map, chip, adr); 1227 xip_enable(map, chip, adr);
1235 op_done: 1228 op_done:
1236 chip->state = FL_READY; 1229 chip->state = FL_READY;
1230 DISABLE_VPP(map);
1237 put_chip(map, chip, adr); 1231 put_chip(map, chip, adr);
1238 mutex_unlock(&chip->mutex); 1232 mutex_unlock(&chip->mutex);
1239 1233
@@ -1251,10 +1245,6 @@ static int cfi_amdstd_write_words(struct mtd_info *mtd, loff_t to, size_t len,
1251 unsigned long ofs, chipstart; 1245 unsigned long ofs, chipstart;
1252 DECLARE_WAITQUEUE(wait, current); 1246 DECLARE_WAITQUEUE(wait, current);
1253 1247
1254 *retlen = 0;
1255 if (!len)
1256 return 0;
1257
1258 chipnum = to >> cfi->chipshift; 1248 chipnum = to >> cfi->chipshift;
1259 ofs = to - (chipnum << cfi->chipshift); 1249 ofs = to - (chipnum << cfi->chipshift);
1260 chipstart = cfi->chips[chipnum].start; 1250 chipstart = cfi->chips[chipnum].start;
@@ -1476,6 +1466,7 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
1476 ret = -EIO; 1466 ret = -EIO;
1477 op_done: 1467 op_done:
1478 chip->state = FL_READY; 1468 chip->state = FL_READY;
1469 DISABLE_VPP(map);
1479 put_chip(map, chip, adr); 1470 put_chip(map, chip, adr);
1480 mutex_unlock(&chip->mutex); 1471 mutex_unlock(&chip->mutex);
1481 1472
@@ -1493,10 +1484,6 @@ static int cfi_amdstd_write_buffers(struct mtd_info *mtd, loff_t to, size_t len,
1493 int chipnum; 1484 int chipnum;
1494 unsigned long ofs; 1485 unsigned long ofs;
1495 1486
1496 *retlen = 0;
1497 if (!len)
1498 return 0;
1499
1500 chipnum = to >> cfi->chipshift; 1487 chipnum = to >> cfi->chipshift;
1501 ofs = to - (chipnum << cfi->chipshift); 1488 ofs = to - (chipnum << cfi->chipshift);
1502 1489
@@ -1562,6 +1549,238 @@ static int cfi_amdstd_write_buffers(struct mtd_info *mtd, loff_t to, size_t len,
1562 return 0; 1549 return 0;
1563} 1550}
1564 1551
1552/*
1553 * Wait for the flash chip to become ready to write data
1554 *
1555 * This is only called during the panic_write() path. When panic_write()
1556 * is called, the kernel is in the process of a panic, and will soon be
1557 * dead. Therefore we don't take any locks, and attempt to get access
1558 * to the chip as soon as possible.
1559 */
1560static int cfi_amdstd_panic_wait(struct map_info *map, struct flchip *chip,
1561 unsigned long adr)
1562{
1563 struct cfi_private *cfi = map->fldrv_priv;
1564 int retries = 10;
1565 int i;
1566
1567 /*
1568 * If the driver thinks the chip is idle, and no toggle bits
1569 * are changing, then the chip is actually idle for sure.
1570 */
1571 if (chip->state == FL_READY && chip_ready(map, adr))
1572 return 0;
1573
1574 /*
1575 * Try several times to reset the chip and then wait for it
1576 * to become idle. The upper limit of a few milliseconds of
1577 * delay isn't a big problem: the kernel is dying anyway. It
1578 * is more important to save the messages.
1579 */
1580 while (retries > 0) {
1581 const unsigned long timeo = (HZ / 1000) + 1;
1582
1583 /* send the reset command */
1584 map_write(map, CMD(0xF0), chip->start);
1585
1586 /* wait for the chip to become ready */
1587 for (i = 0; i < jiffies_to_usecs(timeo); i++) {
1588 if (chip_ready(map, adr))
1589 return 0;
1590
1591 udelay(1);
1592 }
1593 }
1594
1595 /* the chip never became ready */
1596 return -EBUSY;
1597}
1598
1599/*
1600 * Write out one word of data to a single flash chip during a kernel panic
1601 *
1602 * This is only called during the panic_write() path. When panic_write()
1603 * is called, the kernel is in the process of a panic, and will soon be
1604 * dead. Therefore we don't take any locks, and attempt to get access
1605 * to the chip as soon as possible.
1606 *
1607 * The implementation of this routine is intentionally similar to
1608 * do_write_oneword(), in order to ease code maintenance.
1609 */
1610static int do_panic_write_oneword(struct map_info *map, struct flchip *chip,
1611 unsigned long adr, map_word datum)
1612{
1613 const unsigned long uWriteTimeout = (HZ / 1000) + 1;
1614 struct cfi_private *cfi = map->fldrv_priv;
1615 int retry_cnt = 0;
1616 map_word oldd;
1617 int ret = 0;
1618 int i;
1619
1620 adr += chip->start;
1621
1622 ret = cfi_amdstd_panic_wait(map, chip, adr);
1623 if (ret)
1624 return ret;
1625
1626 pr_debug("MTD %s(): PANIC WRITE 0x%.8lx(0x%.8lx)\n",
1627 __func__, adr, datum.x[0]);
1628
1629 /*
1630 * Check for a NOP for the case when the datum to write is already
1631 * present - it saves time and works around buggy chips that corrupt
1632 * data at other locations when 0xff is written to a location that
1633 * already contains 0xff.
1634 */
1635 oldd = map_read(map, adr);
1636 if (map_word_equal(map, oldd, datum)) {
1637 pr_debug("MTD %s(): NOP\n", __func__);
1638 goto op_done;
1639 }
1640
1641 ENABLE_VPP(map);
1642
1643retry:
1644 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1645 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1646 cfi_send_gen_cmd(0xA0, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1647 map_write(map, datum, adr);
1648
1649 for (i = 0; i < jiffies_to_usecs(uWriteTimeout); i++) {
1650 if (chip_ready(map, adr))
1651 break;
1652
1653 udelay(1);
1654 }
1655
1656 if (!chip_good(map, adr, datum)) {
1657 /* reset on all failures. */
1658 map_write(map, CMD(0xF0), chip->start);
1659 /* FIXME - should have reset delay before continuing */
1660
1661 if (++retry_cnt <= MAX_WORD_RETRIES)
1662 goto retry;
1663
1664 ret = -EIO;
1665 }
1666
1667op_done:
1668 DISABLE_VPP(map);
1669 return ret;
1670}
1671
1672/*
1673 * Write out some data during a kernel panic
1674 *
1675 * This is used by the mtdoops driver to save the dying messages from a
1676 * kernel which has panic'd.
1677 *
1678 * This routine ignores all of the locking used throughout the rest of the
1679 * driver, in order to ensure that the data gets written out no matter what
1680 * state this driver (and the flash chip itself) was in when the kernel crashed.
1681 *
1682 * The implementation of this routine is intentionally similar to
1683 * cfi_amdstd_write_words(), in order to ease code maintenance.
1684 */
1685static int cfi_amdstd_panic_write(struct mtd_info *mtd, loff_t to, size_t len,
1686 size_t *retlen, const u_char *buf)
1687{
1688 struct map_info *map = mtd->priv;
1689 struct cfi_private *cfi = map->fldrv_priv;
1690 unsigned long ofs, chipstart;
1691 int ret = 0;
1692 int chipnum;
1693
1694 chipnum = to >> cfi->chipshift;
1695 ofs = to - (chipnum << cfi->chipshift);
1696 chipstart = cfi->chips[chipnum].start;
1697
1698 /* If it's not bus aligned, do the first byte write */
1699 if (ofs & (map_bankwidth(map) - 1)) {
1700 unsigned long bus_ofs = ofs & ~(map_bankwidth(map) - 1);
1701 int i = ofs - bus_ofs;
1702 int n = 0;
1703 map_word tmp_buf;
1704
1705 ret = cfi_amdstd_panic_wait(map, &cfi->chips[chipnum], bus_ofs);
1706 if (ret)
1707 return ret;
1708
1709 /* Load 'tmp_buf' with old contents of flash */
1710 tmp_buf = map_read(map, bus_ofs + chipstart);
1711
1712 /* Number of bytes to copy from buffer */
1713 n = min_t(int, len, map_bankwidth(map) - i);
1714
1715 tmp_buf = map_word_load_partial(map, tmp_buf, buf, i, n);
1716
1717 ret = do_panic_write_oneword(map, &cfi->chips[chipnum],
1718 bus_ofs, tmp_buf);
1719 if (ret)
1720 return ret;
1721
1722 ofs += n;
1723 buf += n;
1724 (*retlen) += n;
1725 len -= n;
1726
1727 if (ofs >> cfi->chipshift) {
1728 chipnum++;
1729 ofs = 0;
1730 if (chipnum == cfi->numchips)
1731 return 0;
1732 }
1733 }
1734
1735 /* We are now aligned, write as much as possible */
1736 while (len >= map_bankwidth(map)) {
1737 map_word datum;
1738
1739 datum = map_word_load(map, buf);
1740
1741 ret = do_panic_write_oneword(map, &cfi->chips[chipnum],
1742 ofs, datum);
1743 if (ret)
1744 return ret;
1745
1746 ofs += map_bankwidth(map);
1747 buf += map_bankwidth(map);
1748 (*retlen) += map_bankwidth(map);
1749 len -= map_bankwidth(map);
1750
1751 if (ofs >> cfi->chipshift) {
1752 chipnum++;
1753 ofs = 0;
1754 if (chipnum == cfi->numchips)
1755 return 0;
1756
1757 chipstart = cfi->chips[chipnum].start;
1758 }
1759 }
1760
1761 /* Write the trailing bytes if any */
1762 if (len & (map_bankwidth(map) - 1)) {
1763 map_word tmp_buf;
1764
1765 ret = cfi_amdstd_panic_wait(map, &cfi->chips[chipnum], ofs);
1766 if (ret)
1767 return ret;
1768
1769 tmp_buf = map_read(map, ofs + chipstart);
1770
1771 tmp_buf = map_word_load_partial(map, tmp_buf, buf, 0, len);
1772
1773 ret = do_panic_write_oneword(map, &cfi->chips[chipnum],
1774 ofs, tmp_buf);
1775 if (ret)
1776 return ret;
1777
1778 (*retlen) += len;
1779 }
1780
1781 return 0;
1782}
1783
1565 1784
1566/* 1785/*
1567 * Handle devices with one erase region, that only implement 1786 * Handle devices with one erase region, that only implement
@@ -1649,6 +1868,7 @@ static int __xipram do_erase_chip(struct map_info *map, struct flchip *chip)
1649 1868
1650 chip->state = FL_READY; 1869 chip->state = FL_READY;
1651 xip_enable(map, chip, adr); 1870 xip_enable(map, chip, adr);
1871 DISABLE_VPP(map);
1652 put_chip(map, chip, adr); 1872 put_chip(map, chip, adr);
1653 mutex_unlock(&chip->mutex); 1873 mutex_unlock(&chip->mutex);
1654 1874
@@ -1739,6 +1959,7 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
1739 } 1959 }
1740 1960
1741 chip->state = FL_READY; 1961 chip->state = FL_READY;
1962 DISABLE_VPP(map);
1742 put_chip(map, chip, adr); 1963 put_chip(map, chip, adr);
1743 mutex_unlock(&chip->mutex); 1964 mutex_unlock(&chip->mutex);
1744 return ret; 1965 return ret;
diff --git a/drivers/mtd/chips/cfi_cmdset_0020.c b/drivers/mtd/chips/cfi_cmdset_0020.c
index 85e80180b65b..096993f9711e 100644
--- a/drivers/mtd/chips/cfi_cmdset_0020.c
+++ b/drivers/mtd/chips/cfi_cmdset_0020.c
@@ -228,15 +228,15 @@ static struct mtd_info *cfi_staa_setup(struct map_info *map)
228 } 228 }
229 229
230 /* Also select the correct geometry setup too */ 230 /* Also select the correct geometry setup too */
231 mtd->erase = cfi_staa_erase_varsize; 231 mtd->_erase = cfi_staa_erase_varsize;
232 mtd->read = cfi_staa_read; 232 mtd->_read = cfi_staa_read;
233 mtd->write = cfi_staa_write_buffers; 233 mtd->_write = cfi_staa_write_buffers;
234 mtd->writev = cfi_staa_writev; 234 mtd->_writev = cfi_staa_writev;
235 mtd->sync = cfi_staa_sync; 235 mtd->_sync = cfi_staa_sync;
236 mtd->lock = cfi_staa_lock; 236 mtd->_lock = cfi_staa_lock;
237 mtd->unlock = cfi_staa_unlock; 237 mtd->_unlock = cfi_staa_unlock;
238 mtd->suspend = cfi_staa_suspend; 238 mtd->_suspend = cfi_staa_suspend;
239 mtd->resume = cfi_staa_resume; 239 mtd->_resume = cfi_staa_resume;
240 mtd->flags = MTD_CAP_NORFLASH & ~MTD_BIT_WRITEABLE; 240 mtd->flags = MTD_CAP_NORFLASH & ~MTD_BIT_WRITEABLE;
241 mtd->writesize = 8; /* FIXME: Should be 0 for STMicro flashes w/out ECC */ 241 mtd->writesize = 8; /* FIXME: Should be 0 for STMicro flashes w/out ECC */
242 mtd->writebufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize; 242 mtd->writebufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
@@ -394,8 +394,6 @@ static int cfi_staa_read (struct mtd_info *mtd, loff_t from, size_t len, size_t
394 chipnum = (from >> cfi->chipshift); 394 chipnum = (from >> cfi->chipshift);
395 ofs = from - (chipnum << cfi->chipshift); 395 ofs = from - (chipnum << cfi->chipshift);
396 396
397 *retlen = 0;
398
399 while (len) { 397 while (len) {
400 unsigned long thislen; 398 unsigned long thislen;
401 399
@@ -617,10 +615,6 @@ static int cfi_staa_write_buffers (struct mtd_info *mtd, loff_t to,
617 int chipnum; 615 int chipnum;
618 unsigned long ofs; 616 unsigned long ofs;
619 617
620 *retlen = 0;
621 if (!len)
622 return 0;
623
624 chipnum = to >> cfi->chipshift; 618 chipnum = to >> cfi->chipshift;
625 ofs = to - (chipnum << cfi->chipshift); 619 ofs = to - (chipnum << cfi->chipshift);
626 620
@@ -904,12 +898,6 @@ static int cfi_staa_erase_varsize(struct mtd_info *mtd,
904 int i, first; 898 int i, first;
905 struct mtd_erase_region_info *regions = mtd->eraseregions; 899 struct mtd_erase_region_info *regions = mtd->eraseregions;
906 900
907 if (instr->addr > mtd->size)
908 return -EINVAL;
909
910 if ((instr->len + instr->addr) > mtd->size)
911 return -EINVAL;
912
913 /* Check that both start and end of the requested erase are 901 /* Check that both start and end of the requested erase are
914 * aligned with the erasesize at the appropriate addresses. 902 * aligned with the erasesize at the appropriate addresses.
915 */ 903 */
@@ -1155,9 +1143,6 @@ static int cfi_staa_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
1155 if (len & (mtd->erasesize -1)) 1143 if (len & (mtd->erasesize -1))
1156 return -EINVAL; 1144 return -EINVAL;
1157 1145
1158 if ((len + ofs) > mtd->size)
1159 return -EINVAL;
1160
1161 chipnum = ofs >> cfi->chipshift; 1146 chipnum = ofs >> cfi->chipshift;
1162 adr = ofs - (chipnum << cfi->chipshift); 1147 adr = ofs - (chipnum << cfi->chipshift);
1163 1148
diff --git a/drivers/mtd/chips/cfi_util.c b/drivers/mtd/chips/cfi_util.c
index 8e464054a631..f992418f40a8 100644
--- a/drivers/mtd/chips/cfi_util.c
+++ b/drivers/mtd/chips/cfi_util.c
@@ -173,12 +173,6 @@ int cfi_varsize_frob(struct mtd_info *mtd, varsize_frob_t frob,
173 int i, first; 173 int i, first;
174 struct mtd_erase_region_info *regions = mtd->eraseregions; 174 struct mtd_erase_region_info *regions = mtd->eraseregions;
175 175
176 if (ofs > mtd->size)
177 return -EINVAL;
178
179 if ((len + ofs) > mtd->size)
180 return -EINVAL;
181
182 /* Check that both start and end of the requested erase are 176 /* Check that both start and end of the requested erase are
183 * aligned with the erasesize at the appropriate addresses. 177 * aligned with the erasesize at the appropriate addresses.
184 */ 178 */
diff --git a/drivers/mtd/chips/fwh_lock.h b/drivers/mtd/chips/fwh_lock.h
index 89c6595454a5..800b0e853e86 100644
--- a/drivers/mtd/chips/fwh_lock.h
+++ b/drivers/mtd/chips/fwh_lock.h
@@ -101,7 +101,7 @@ static void fixup_use_fwh_lock(struct mtd_info *mtd)
101{ 101{
102 printk(KERN_NOTICE "using fwh lock/unlock method\n"); 102 printk(KERN_NOTICE "using fwh lock/unlock method\n");
103 /* Setup for the chips with the fwh lock method */ 103 /* Setup for the chips with the fwh lock method */
104 mtd->lock = fwh_lock_varsize; 104 mtd->_lock = fwh_lock_varsize;
105 mtd->unlock = fwh_unlock_varsize; 105 mtd->_unlock = fwh_unlock_varsize;
106} 106}
107#endif /* FWH_LOCK_H */ 107#endif /* FWH_LOCK_H */
diff --git a/drivers/mtd/chips/map_absent.c b/drivers/mtd/chips/map_absent.c
index f2b872946871..f7a5bca92aef 100644
--- a/drivers/mtd/chips/map_absent.c
+++ b/drivers/mtd/chips/map_absent.c
@@ -55,10 +55,10 @@ static struct mtd_info *map_absent_probe(struct map_info *map)
55 mtd->name = map->name; 55 mtd->name = map->name;
56 mtd->type = MTD_ABSENT; 56 mtd->type = MTD_ABSENT;
57 mtd->size = map->size; 57 mtd->size = map->size;
58 mtd->erase = map_absent_erase; 58 mtd->_erase = map_absent_erase;
59 mtd->read = map_absent_read; 59 mtd->_read = map_absent_read;
60 mtd->write = map_absent_write; 60 mtd->_write = map_absent_write;
61 mtd->sync = map_absent_sync; 61 mtd->_sync = map_absent_sync;
62 mtd->flags = 0; 62 mtd->flags = 0;
63 mtd->erasesize = PAGE_SIZE; 63 mtd->erasesize = PAGE_SIZE;
64 mtd->writesize = 1; 64 mtd->writesize = 1;
@@ -70,13 +70,11 @@ static struct mtd_info *map_absent_probe(struct map_info *map)
70 70
71static int map_absent_read(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf) 71static int map_absent_read(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
72{ 72{
73 *retlen = 0;
74 return -ENODEV; 73 return -ENODEV;
75} 74}
76 75
77static int map_absent_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen, const u_char *buf) 76static int map_absent_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen, const u_char *buf)
78{ 77{
79 *retlen = 0;
80 return -ENODEV; 78 return -ENODEV;
81} 79}
82 80
diff --git a/drivers/mtd/chips/map_ram.c b/drivers/mtd/chips/map_ram.c
index 67640ccb2d41..991c2a1c05d3 100644
--- a/drivers/mtd/chips/map_ram.c
+++ b/drivers/mtd/chips/map_ram.c
@@ -64,11 +64,11 @@ static struct mtd_info *map_ram_probe(struct map_info *map)
64 mtd->name = map->name; 64 mtd->name = map->name;
65 mtd->type = MTD_RAM; 65 mtd->type = MTD_RAM;
66 mtd->size = map->size; 66 mtd->size = map->size;
67 mtd->erase = mapram_erase; 67 mtd->_erase = mapram_erase;
68 mtd->get_unmapped_area = mapram_unmapped_area; 68 mtd->_get_unmapped_area = mapram_unmapped_area;
69 mtd->read = mapram_read; 69 mtd->_read = mapram_read;
70 mtd->write = mapram_write; 70 mtd->_write = mapram_write;
71 mtd->sync = mapram_nop; 71 mtd->_sync = mapram_nop;
72 mtd->flags = MTD_CAP_RAM; 72 mtd->flags = MTD_CAP_RAM;
73 mtd->writesize = 1; 73 mtd->writesize = 1;
74 74
@@ -122,14 +122,10 @@ static int mapram_erase (struct mtd_info *mtd, struct erase_info *instr)
122 unsigned long i; 122 unsigned long i;
123 123
124 allff = map_word_ff(map); 124 allff = map_word_ff(map);
125
126 for (i=0; i<instr->len; i += map_bankwidth(map)) 125 for (i=0; i<instr->len; i += map_bankwidth(map))
127 map_write(map, allff, instr->addr + i); 126 map_write(map, allff, instr->addr + i);
128
129 instr->state = MTD_ERASE_DONE; 127 instr->state = MTD_ERASE_DONE;
130
131 mtd_erase_callback(instr); 128 mtd_erase_callback(instr);
132
133 return 0; 129 return 0;
134} 130}
135 131
diff --git a/drivers/mtd/chips/map_rom.c b/drivers/mtd/chips/map_rom.c
index 593f73d480d2..47a43cf7e5c6 100644
--- a/drivers/mtd/chips/map_rom.c
+++ b/drivers/mtd/chips/map_rom.c
@@ -41,11 +41,11 @@ static struct mtd_info *map_rom_probe(struct map_info *map)
41 mtd->name = map->name; 41 mtd->name = map->name;
42 mtd->type = MTD_ROM; 42 mtd->type = MTD_ROM;
43 mtd->size = map->size; 43 mtd->size = map->size;
44 mtd->get_unmapped_area = maprom_unmapped_area; 44 mtd->_get_unmapped_area = maprom_unmapped_area;
45 mtd->read = maprom_read; 45 mtd->_read = maprom_read;
46 mtd->write = maprom_write; 46 mtd->_write = maprom_write;
47 mtd->sync = maprom_nop; 47 mtd->_sync = maprom_nop;
48 mtd->erase = maprom_erase; 48 mtd->_erase = maprom_erase;
49 mtd->flags = MTD_CAP_ROM; 49 mtd->flags = MTD_CAP_ROM;
50 mtd->erasesize = map->size; 50 mtd->erasesize = map->size;
51 mtd->writesize = 1; 51 mtd->writesize = 1;
@@ -85,8 +85,7 @@ static void maprom_nop(struct mtd_info *mtd)
85 85
86static int maprom_write (struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen, const u_char *buf) 86static int maprom_write (struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen, const u_char *buf)
87{ 87{
88 printk(KERN_NOTICE "maprom_write called\n"); 88 return -EROFS;
89 return -EIO;
90} 89}
91 90
92static int maprom_erase (struct mtd_info *mtd, struct erase_info *info) 91static int maprom_erase (struct mtd_info *mtd, struct erase_info *info)
diff --git a/drivers/mtd/devices/Kconfig b/drivers/mtd/devices/Kconfig
index 8d3dac40d7e6..4cdb2af7bf44 100644
--- a/drivers/mtd/devices/Kconfig
+++ b/drivers/mtd/devices/Kconfig
@@ -103,6 +103,13 @@ config M25PXX_USE_FAST_READ
103 help 103 help
104 This option enables FAST_READ access supported by ST M25Pxx. 104 This option enables FAST_READ access supported by ST M25Pxx.
105 105
106config MTD_SPEAR_SMI
107 tristate "SPEAR MTD NOR Support through SMI controller"
108 depends on PLAT_SPEAR
109 default y
110 help
111 This enable SNOR support on SPEAR platforms using SMI controller
112
106config MTD_SST25L 113config MTD_SST25L
107 tristate "Support SST25L (non JEDEC) SPI Flash chips" 114 tristate "Support SST25L (non JEDEC) SPI Flash chips"
108 depends on SPI_MASTER 115 depends on SPI_MASTER
diff --git a/drivers/mtd/devices/Makefile b/drivers/mtd/devices/Makefile
index 56c7cd462f11..a4dd1d822b6c 100644
--- a/drivers/mtd/devices/Makefile
+++ b/drivers/mtd/devices/Makefile
@@ -17,6 +17,7 @@ obj-$(CONFIG_MTD_LART) += lart.o
17obj-$(CONFIG_MTD_BLOCK2MTD) += block2mtd.o 17obj-$(CONFIG_MTD_BLOCK2MTD) += block2mtd.o
18obj-$(CONFIG_MTD_DATAFLASH) += mtd_dataflash.o 18obj-$(CONFIG_MTD_DATAFLASH) += mtd_dataflash.o
19obj-$(CONFIG_MTD_M25P80) += m25p80.o 19obj-$(CONFIG_MTD_M25P80) += m25p80.o
20obj-$(CONFIG_MTD_SPEAR_SMI) += spear_smi.o
20obj-$(CONFIG_MTD_SST25L) += sst25l.o 21obj-$(CONFIG_MTD_SST25L) += sst25l.o
21 22
22CFLAGS_docg3.o += -I$(src) \ No newline at end of file 23CFLAGS_docg3.o += -I$(src) \ No newline at end of file
diff --git a/drivers/mtd/devices/block2mtd.c b/drivers/mtd/devices/block2mtd.c
index e7e46d1e7463..a4a80b742e65 100644
--- a/drivers/mtd/devices/block2mtd.c
+++ b/drivers/mtd/devices/block2mtd.c
@@ -104,14 +104,6 @@ static int block2mtd_read(struct mtd_info *mtd, loff_t from, size_t len,
104 int offset = from & (PAGE_SIZE-1); 104 int offset = from & (PAGE_SIZE-1);
105 int cpylen; 105 int cpylen;
106 106
107 if (from > mtd->size)
108 return -EINVAL;
109 if (from + len > mtd->size)
110 len = mtd->size - from;
111
112 if (retlen)
113 *retlen = 0;
114
115 while (len) { 107 while (len) {
116 if ((offset + len) > PAGE_SIZE) 108 if ((offset + len) > PAGE_SIZE)
117 cpylen = PAGE_SIZE - offset; // multiple pages 109 cpylen = PAGE_SIZE - offset; // multiple pages
@@ -148,8 +140,6 @@ static int _block2mtd_write(struct block2mtd_dev *dev, const u_char *buf,
148 int offset = to & ~PAGE_MASK; // page offset 140 int offset = to & ~PAGE_MASK; // page offset
149 int cpylen; 141 int cpylen;
150 142
151 if (retlen)
152 *retlen = 0;
153 while (len) { 143 while (len) {
154 if ((offset+len) > PAGE_SIZE) 144 if ((offset+len) > PAGE_SIZE)
155 cpylen = PAGE_SIZE - offset; // multiple pages 145 cpylen = PAGE_SIZE - offset; // multiple pages
@@ -188,13 +178,6 @@ static int block2mtd_write(struct mtd_info *mtd, loff_t to, size_t len,
188 struct block2mtd_dev *dev = mtd->priv; 178 struct block2mtd_dev *dev = mtd->priv;
189 int err; 179 int err;
190 180
191 if (!len)
192 return 0;
193 if (to >= mtd->size)
194 return -ENOSPC;
195 if (to + len > mtd->size)
196 len = mtd->size - to;
197
198 mutex_lock(&dev->write_mutex); 181 mutex_lock(&dev->write_mutex);
199 err = _block2mtd_write(dev, buf, to, len, retlen); 182 err = _block2mtd_write(dev, buf, to, len, retlen);
200 mutex_unlock(&dev->write_mutex); 183 mutex_unlock(&dev->write_mutex);
@@ -283,13 +266,14 @@ static struct block2mtd_dev *add_device(char *devname, int erase_size)
283 dev->mtd.size = dev->blkdev->bd_inode->i_size & PAGE_MASK; 266 dev->mtd.size = dev->blkdev->bd_inode->i_size & PAGE_MASK;
284 dev->mtd.erasesize = erase_size; 267 dev->mtd.erasesize = erase_size;
285 dev->mtd.writesize = 1; 268 dev->mtd.writesize = 1;
269 dev->mtd.writebufsize = PAGE_SIZE;
286 dev->mtd.type = MTD_RAM; 270 dev->mtd.type = MTD_RAM;
287 dev->mtd.flags = MTD_CAP_RAM; 271 dev->mtd.flags = MTD_CAP_RAM;
288 dev->mtd.erase = block2mtd_erase; 272 dev->mtd._erase = block2mtd_erase;
289 dev->mtd.write = block2mtd_write; 273 dev->mtd._write = block2mtd_write;
290 dev->mtd.writev = mtd_writev; 274 dev->mtd._writev = mtd_writev;
291 dev->mtd.sync = block2mtd_sync; 275 dev->mtd._sync = block2mtd_sync;
292 dev->mtd.read = block2mtd_read; 276 dev->mtd._read = block2mtd_read;
293 dev->mtd.priv = dev; 277 dev->mtd.priv = dev;
294 dev->mtd.owner = THIS_MODULE; 278 dev->mtd.owner = THIS_MODULE;
295 279
diff --git a/drivers/mtd/devices/doc2000.c b/drivers/mtd/devices/doc2000.c
index b1cdf6479019..a4eb8b5b85ec 100644
--- a/drivers/mtd/devices/doc2000.c
+++ b/drivers/mtd/devices/doc2000.c
@@ -562,14 +562,15 @@ void DoC2k_init(struct mtd_info *mtd)
562 562
563 mtd->type = MTD_NANDFLASH; 563 mtd->type = MTD_NANDFLASH;
564 mtd->flags = MTD_CAP_NANDFLASH; 564 mtd->flags = MTD_CAP_NANDFLASH;
565 mtd->writesize = 512; 565 mtd->writebufsize = mtd->writesize = 512;
566 mtd->oobsize = 16; 566 mtd->oobsize = 16;
567 mtd->ecc_strength = 2;
567 mtd->owner = THIS_MODULE; 568 mtd->owner = THIS_MODULE;
568 mtd->erase = doc_erase; 569 mtd->_erase = doc_erase;
569 mtd->read = doc_read; 570 mtd->_read = doc_read;
570 mtd->write = doc_write; 571 mtd->_write = doc_write;
571 mtd->read_oob = doc_read_oob; 572 mtd->_read_oob = doc_read_oob;
572 mtd->write_oob = doc_write_oob; 573 mtd->_write_oob = doc_write_oob;
573 this->curfloor = -1; 574 this->curfloor = -1;
574 this->curchip = -1; 575 this->curchip = -1;
575 mutex_init(&this->lock); 576 mutex_init(&this->lock);
@@ -602,13 +603,7 @@ static int doc_read(struct mtd_info *mtd, loff_t from, size_t len,
602 int i, len256 = 0, ret=0; 603 int i, len256 = 0, ret=0;
603 size_t left = len; 604 size_t left = len;
604 605
605 /* Don't allow read past end of device */
606 if (from >= this->totlen)
607 return -EINVAL;
608
609 mutex_lock(&this->lock); 606 mutex_lock(&this->lock);
610
611 *retlen = 0;
612 while (left) { 607 while (left) {
613 len = left; 608 len = left;
614 609
@@ -748,13 +743,7 @@ static int doc_write(struct mtd_info *mtd, loff_t to, size_t len,
748 size_t left = len; 743 size_t left = len;
749 int status; 744 int status;
750 745
751 /* Don't allow write past end of device */
752 if (to >= this->totlen)
753 return -EINVAL;
754
755 mutex_lock(&this->lock); 746 mutex_lock(&this->lock);
756
757 *retlen = 0;
758 while (left) { 747 while (left) {
759 len = left; 748 len = left;
760 749
diff --git a/drivers/mtd/devices/doc2001.c b/drivers/mtd/devices/doc2001.c
index 7543b98f46c4..f6927955dab0 100644
--- a/drivers/mtd/devices/doc2001.c
+++ b/drivers/mtd/devices/doc2001.c
@@ -346,14 +346,15 @@ void DoCMil_init(struct mtd_info *mtd)
346 346
347 /* FIXME: erase size is not always 8KiB */ 347 /* FIXME: erase size is not always 8KiB */
348 mtd->erasesize = 0x2000; 348 mtd->erasesize = 0x2000;
349 mtd->writesize = 512; 349 mtd->writebufsize = mtd->writesize = 512;
350 mtd->oobsize = 16; 350 mtd->oobsize = 16;
351 mtd->ecc_strength = 2;
351 mtd->owner = THIS_MODULE; 352 mtd->owner = THIS_MODULE;
352 mtd->erase = doc_erase; 353 mtd->_erase = doc_erase;
353 mtd->read = doc_read; 354 mtd->_read = doc_read;
354 mtd->write = doc_write; 355 mtd->_write = doc_write;
355 mtd->read_oob = doc_read_oob; 356 mtd->_read_oob = doc_read_oob;
356 mtd->write_oob = doc_write_oob; 357 mtd->_write_oob = doc_write_oob;
357 this->curfloor = -1; 358 this->curfloor = -1;
358 this->curchip = -1; 359 this->curchip = -1;
359 360
@@ -383,10 +384,6 @@ static int doc_read (struct mtd_info *mtd, loff_t from, size_t len,
383 void __iomem *docptr = this->virtadr; 384 void __iomem *docptr = this->virtadr;
384 struct Nand *mychip = &this->chips[from >> (this->chipshift)]; 385 struct Nand *mychip = &this->chips[from >> (this->chipshift)];
385 386
386 /* Don't allow read past end of device */
387 if (from >= this->totlen)
388 return -EINVAL;
389
390 /* Don't allow a single read to cross a 512-byte block boundary */ 387 /* Don't allow a single read to cross a 512-byte block boundary */
391 if (from + len > ((from | 0x1ff) + 1)) 388 if (from + len > ((from | 0x1ff) + 1))
392 len = ((from | 0x1ff) + 1) - from; 389 len = ((from | 0x1ff) + 1) - from;
@@ -494,10 +491,6 @@ static int doc_write (struct mtd_info *mtd, loff_t to, size_t len,
494 void __iomem *docptr = this->virtadr; 491 void __iomem *docptr = this->virtadr;
495 struct Nand *mychip = &this->chips[to >> (this->chipshift)]; 492 struct Nand *mychip = &this->chips[to >> (this->chipshift)];
496 493
497 /* Don't allow write past end of device */
498 if (to >= this->totlen)
499 return -EINVAL;
500
501#if 0 494#if 0
502 /* Don't allow a single write to cross a 512-byte block boundary */ 495 /* Don't allow a single write to cross a 512-byte block boundary */
503 if (to + len > ( (to | 0x1ff) + 1)) 496 if (to + len > ( (to | 0x1ff) + 1))
@@ -599,7 +592,6 @@ static int doc_write (struct mtd_info *mtd, loff_t to, size_t len,
599 printk("Error programming flash\n"); 592 printk("Error programming flash\n");
600 /* Error in programming 593 /* Error in programming
601 FIXME: implement Bad Block Replacement (in nftl.c ??) */ 594 FIXME: implement Bad Block Replacement (in nftl.c ??) */
602 *retlen = 0;
603 ret = -EIO; 595 ret = -EIO;
604 } 596 }
605 dummy = ReadDOC(docptr, LastDataRead); 597 dummy = ReadDOC(docptr, LastDataRead);
diff --git a/drivers/mtd/devices/doc2001plus.c b/drivers/mtd/devices/doc2001plus.c
index 177510d0e7ee..04eb2e4aa50f 100644
--- a/drivers/mtd/devices/doc2001plus.c
+++ b/drivers/mtd/devices/doc2001plus.c
@@ -467,14 +467,15 @@ void DoCMilPlus_init(struct mtd_info *mtd)
467 467
468 mtd->type = MTD_NANDFLASH; 468 mtd->type = MTD_NANDFLASH;
469 mtd->flags = MTD_CAP_NANDFLASH; 469 mtd->flags = MTD_CAP_NANDFLASH;
470 mtd->writesize = 512; 470 mtd->writebufsize = mtd->writesize = 512;
471 mtd->oobsize = 16; 471 mtd->oobsize = 16;
472 mtd->ecc_strength = 2;
472 mtd->owner = THIS_MODULE; 473 mtd->owner = THIS_MODULE;
473 mtd->erase = doc_erase; 474 mtd->_erase = doc_erase;
474 mtd->read = doc_read; 475 mtd->_read = doc_read;
475 mtd->write = doc_write; 476 mtd->_write = doc_write;
476 mtd->read_oob = doc_read_oob; 477 mtd->_read_oob = doc_read_oob;
477 mtd->write_oob = doc_write_oob; 478 mtd->_write_oob = doc_write_oob;
478 this->curfloor = -1; 479 this->curfloor = -1;
479 this->curchip = -1; 480 this->curchip = -1;
480 481
@@ -581,10 +582,6 @@ static int doc_read(struct mtd_info *mtd, loff_t from, size_t len,
581 void __iomem * docptr = this->virtadr; 582 void __iomem * docptr = this->virtadr;
582 struct Nand *mychip = &this->chips[from >> (this->chipshift)]; 583 struct Nand *mychip = &this->chips[from >> (this->chipshift)];
583 584
584 /* Don't allow read past end of device */
585 if (from >= this->totlen)
586 return -EINVAL;
587
588 /* Don't allow a single read to cross a 512-byte block boundary */ 585 /* Don't allow a single read to cross a 512-byte block boundary */
589 if (from + len > ((from | 0x1ff) + 1)) 586 if (from + len > ((from | 0x1ff) + 1))
590 len = ((from | 0x1ff) + 1) - from; 587 len = ((from | 0x1ff) + 1) - from;
@@ -700,10 +697,6 @@ static int doc_write(struct mtd_info *mtd, loff_t to, size_t len,
700 void __iomem * docptr = this->virtadr; 697 void __iomem * docptr = this->virtadr;
701 struct Nand *mychip = &this->chips[to >> (this->chipshift)]; 698 struct Nand *mychip = &this->chips[to >> (this->chipshift)];
702 699
703 /* Don't allow write past end of device */
704 if (to >= this->totlen)
705 return -EINVAL;
706
707 /* Don't allow writes which aren't exactly one block (512 bytes) */ 700 /* Don't allow writes which aren't exactly one block (512 bytes) */
708 if ((to & 0x1ff) || (len != 0x200)) 701 if ((to & 0x1ff) || (len != 0x200))
709 return -EINVAL; 702 return -EINVAL;
@@ -800,7 +793,6 @@ static int doc_write(struct mtd_info *mtd, loff_t to, size_t len,
800 printk("MTD: Error 0x%x programming at 0x%x\n", dummy, (int)to); 793 printk("MTD: Error 0x%x programming at 0x%x\n", dummy, (int)to);
801 /* Error in programming 794 /* Error in programming
802 FIXME: implement Bad Block Replacement (in nftl.c ??) */ 795 FIXME: implement Bad Block Replacement (in nftl.c ??) */
803 *retlen = 0;
804 ret = -EIO; 796 ret = -EIO;
805 } 797 }
806 dummy = ReadDOC(docptr, Mplus_LastDataRead); 798 dummy = ReadDOC(docptr, Mplus_LastDataRead);
diff --git a/drivers/mtd/devices/docg3.c b/drivers/mtd/devices/docg3.c
index ad11ef0a81f4..8272c02668d6 100644
--- a/drivers/mtd/devices/docg3.c
+++ b/drivers/mtd/devices/docg3.c
@@ -80,14 +80,9 @@ static struct nand_ecclayout docg3_oobinfo = {
80 .oobavail = 8, 80 .oobavail = 8,
81}; 81};
82 82
83/**
84 * struct docg3_bch - BCH engine
85 */
86static struct bch_control *docg3_bch;
87
88static inline u8 doc_readb(struct docg3 *docg3, u16 reg) 83static inline u8 doc_readb(struct docg3 *docg3, u16 reg)
89{ 84{
90 u8 val = readb(docg3->base + reg); 85 u8 val = readb(docg3->cascade->base + reg);
91 86
92 trace_docg3_io(0, 8, reg, (int)val); 87 trace_docg3_io(0, 8, reg, (int)val);
93 return val; 88 return val;
@@ -95,7 +90,7 @@ static inline u8 doc_readb(struct docg3 *docg3, u16 reg)
95 90
96static inline u16 doc_readw(struct docg3 *docg3, u16 reg) 91static inline u16 doc_readw(struct docg3 *docg3, u16 reg)
97{ 92{
98 u16 val = readw(docg3->base + reg); 93 u16 val = readw(docg3->cascade->base + reg);
99 94
100 trace_docg3_io(0, 16, reg, (int)val); 95 trace_docg3_io(0, 16, reg, (int)val);
101 return val; 96 return val;
@@ -103,13 +98,13 @@ static inline u16 doc_readw(struct docg3 *docg3, u16 reg)
103 98
104static inline void doc_writeb(struct docg3 *docg3, u8 val, u16 reg) 99static inline void doc_writeb(struct docg3 *docg3, u8 val, u16 reg)
105{ 100{
106 writeb(val, docg3->base + reg); 101 writeb(val, docg3->cascade->base + reg);
107 trace_docg3_io(1, 8, reg, val); 102 trace_docg3_io(1, 8, reg, val);
108} 103}
109 104
110static inline void doc_writew(struct docg3 *docg3, u16 val, u16 reg) 105static inline void doc_writew(struct docg3 *docg3, u16 val, u16 reg)
111{ 106{
112 writew(val, docg3->base + reg); 107 writew(val, docg3->cascade->base + reg);
113 trace_docg3_io(1, 16, reg, val); 108 trace_docg3_io(1, 16, reg, val);
114} 109}
115 110
@@ -643,7 +638,8 @@ static int doc_ecc_bch_fix_data(struct docg3 *docg3, void *buf, u8 *hwecc)
643 638
644 for (i = 0; i < DOC_ECC_BCH_SIZE; i++) 639 for (i = 0; i < DOC_ECC_BCH_SIZE; i++)
645 ecc[i] = bitrev8(hwecc[i]); 640 ecc[i] = bitrev8(hwecc[i]);
646 numerrs = decode_bch(docg3_bch, NULL, DOC_ECC_BCH_COVERED_BYTES, 641 numerrs = decode_bch(docg3->cascade->bch, NULL,
642 DOC_ECC_BCH_COVERED_BYTES,
647 NULL, ecc, NULL, errorpos); 643 NULL, ecc, NULL, errorpos);
648 BUG_ON(numerrs == -EINVAL); 644 BUG_ON(numerrs == -EINVAL);
649 if (numerrs < 0) 645 if (numerrs < 0)
@@ -734,7 +730,7 @@ err:
734 * doc_read_page_getbytes - Reads bytes from a prepared page 730 * doc_read_page_getbytes - Reads bytes from a prepared page
735 * @docg3: the device 731 * @docg3: the device
736 * @len: the number of bytes to be read (must be a multiple of 4) 732 * @len: the number of bytes to be read (must be a multiple of 4)
737 * @buf: the buffer to be filled in 733 * @buf: the buffer to be filled in (or NULL is forget bytes)
738 * @first: 1 if first time read, DOC_READADDRESS should be set 734 * @first: 1 if first time read, DOC_READADDRESS should be set
739 * 735 *
740 */ 736 */
@@ -849,7 +845,7 @@ static int doc_read_oob(struct mtd_info *mtd, loff_t from,
849 struct mtd_oob_ops *ops) 845 struct mtd_oob_ops *ops)
850{ 846{
851 struct docg3 *docg3 = mtd->priv; 847 struct docg3 *docg3 = mtd->priv;
852 int block0, block1, page, ret, ofs = 0; 848 int block0, block1, page, ret, skip, ofs = 0;
853 u8 *oobbuf = ops->oobbuf; 849 u8 *oobbuf = ops->oobbuf;
854 u8 *buf = ops->datbuf; 850 u8 *buf = ops->datbuf;
855 size_t len, ooblen, nbdata, nboob; 851 size_t len, ooblen, nbdata, nboob;
@@ -869,34 +865,36 @@ static int doc_read_oob(struct mtd_info *mtd, loff_t from,
869 865
870 doc_dbg("doc_read_oob(from=%lld, mode=%d, data=(%p:%zu), oob=(%p:%zu))\n", 866 doc_dbg("doc_read_oob(from=%lld, mode=%d, data=(%p:%zu), oob=(%p:%zu))\n",
871 from, ops->mode, buf, len, oobbuf, ooblen); 867 from, ops->mode, buf, len, oobbuf, ooblen);
872 if ((len % DOC_LAYOUT_PAGE_SIZE) || (ooblen % DOC_LAYOUT_OOB_SIZE) || 868 if (ooblen % DOC_LAYOUT_OOB_SIZE)
873 (from % DOC_LAYOUT_PAGE_SIZE))
874 return -EINVAL; 869 return -EINVAL;
875 870
876 ret = -EINVAL; 871 if (from + len > mtd->size)
877 calc_block_sector(from + len, &block0, &block1, &page, &ofs, 872 return -EINVAL;
878 docg3->reliable);
879 if (block1 > docg3->max_block)
880 goto err;
881 873
882 ops->oobretlen = 0; 874 ops->oobretlen = 0;
883 ops->retlen = 0; 875 ops->retlen = 0;
884 ret = 0; 876 ret = 0;
877 skip = from % DOC_LAYOUT_PAGE_SIZE;
878 mutex_lock(&docg3->cascade->lock);
885 while (!ret && (len > 0 || ooblen > 0)) { 879 while (!ret && (len > 0 || ooblen > 0)) {
886 calc_block_sector(from, &block0, &block1, &page, &ofs, 880 calc_block_sector(from - skip, &block0, &block1, &page, &ofs,
887 docg3->reliable); 881 docg3->reliable);
888 nbdata = min_t(size_t, len, (size_t)DOC_LAYOUT_PAGE_SIZE); 882 nbdata = min_t(size_t, len, DOC_LAYOUT_PAGE_SIZE - skip);
889 nboob = min_t(size_t, ooblen, (size_t)DOC_LAYOUT_OOB_SIZE); 883 nboob = min_t(size_t, ooblen, (size_t)DOC_LAYOUT_OOB_SIZE);
890 ret = doc_read_page_prepare(docg3, block0, block1, page, ofs); 884 ret = doc_read_page_prepare(docg3, block0, block1, page, ofs);
891 if (ret < 0) 885 if (ret < 0)
892 goto err; 886 goto out;
893 ret = doc_read_page_ecc_init(docg3, DOC_ECC_BCH_TOTAL_BYTES); 887 ret = doc_read_page_ecc_init(docg3, DOC_ECC_BCH_TOTAL_BYTES);
894 if (ret < 0) 888 if (ret < 0)
895 goto err_in_read; 889 goto err_in_read;
896 ret = doc_read_page_getbytes(docg3, nbdata, buf, 1); 890 ret = doc_read_page_getbytes(docg3, skip, NULL, 1);
891 if (ret < skip)
892 goto err_in_read;
893 ret = doc_read_page_getbytes(docg3, nbdata, buf, 0);
897 if (ret < nbdata) 894 if (ret < nbdata)
898 goto err_in_read; 895 goto err_in_read;
899 doc_read_page_getbytes(docg3, DOC_LAYOUT_PAGE_SIZE - nbdata, 896 doc_read_page_getbytes(docg3,
897 DOC_LAYOUT_PAGE_SIZE - nbdata - skip,
900 NULL, 0); 898 NULL, 0);
901 ret = doc_read_page_getbytes(docg3, nboob, oobbuf, 0); 899 ret = doc_read_page_getbytes(docg3, nboob, oobbuf, 0);
902 if (ret < nboob) 900 if (ret < nboob)
@@ -950,13 +948,15 @@ static int doc_read_oob(struct mtd_info *mtd, loff_t from,
950 len -= nbdata; 948 len -= nbdata;
951 ooblen -= nboob; 949 ooblen -= nboob;
952 from += DOC_LAYOUT_PAGE_SIZE; 950 from += DOC_LAYOUT_PAGE_SIZE;
951 skip = 0;
953 } 952 }
954 953
954out:
955 mutex_unlock(&docg3->cascade->lock);
955 return ret; 956 return ret;
956err_in_read: 957err_in_read:
957 doc_read_page_finish(docg3); 958 doc_read_page_finish(docg3);
958err: 959 goto out;
959 return ret;
960} 960}
961 961
962/** 962/**
@@ -1114,10 +1114,10 @@ static int doc_get_op_status(struct docg3 *docg3)
1114 */ 1114 */
1115static int doc_write_erase_wait_status(struct docg3 *docg3) 1115static int doc_write_erase_wait_status(struct docg3 *docg3)
1116{ 1116{
1117 int status, ret = 0; 1117 int i, status, ret = 0;
1118 1118
1119 if (!doc_is_ready(docg3)) 1119 for (i = 0; !doc_is_ready(docg3) && i < 5; i++)
1120 usleep_range(3000, 3000); 1120 msleep(20);
1121 if (!doc_is_ready(docg3)) { 1121 if (!doc_is_ready(docg3)) {
1122 doc_dbg("Timeout reached and the chip is still not ready\n"); 1122 doc_dbg("Timeout reached and the chip is still not ready\n");
1123 ret = -EAGAIN; 1123 ret = -EAGAIN;
@@ -1196,18 +1196,19 @@ static int doc_erase(struct mtd_info *mtd, struct erase_info *info)
1196 int block0, block1, page, ret, ofs = 0; 1196 int block0, block1, page, ret, ofs = 0;
1197 1197
1198 doc_dbg("doc_erase(from=%lld, len=%lld\n", info->addr, info->len); 1198 doc_dbg("doc_erase(from=%lld, len=%lld\n", info->addr, info->len);
1199 doc_set_device_id(docg3, docg3->device_id);
1200 1199
1201 info->state = MTD_ERASE_PENDING; 1200 info->state = MTD_ERASE_PENDING;
1202 calc_block_sector(info->addr + info->len, &block0, &block1, &page, 1201 calc_block_sector(info->addr + info->len, &block0, &block1, &page,
1203 &ofs, docg3->reliable); 1202 &ofs, docg3->reliable);
1204 ret = -EINVAL; 1203 ret = -EINVAL;
1205 if (block1 > docg3->max_block || page || ofs) 1204 if (info->addr + info->len > mtd->size || page || ofs)
1206 goto reset_err; 1205 goto reset_err;
1207 1206
1208 ret = 0; 1207 ret = 0;
1209 calc_block_sector(info->addr, &block0, &block1, &page, &ofs, 1208 calc_block_sector(info->addr, &block0, &block1, &page, &ofs,
1210 docg3->reliable); 1209 docg3->reliable);
1210 mutex_lock(&docg3->cascade->lock);
1211 doc_set_device_id(docg3, docg3->device_id);
1211 doc_set_reliable_mode(docg3); 1212 doc_set_reliable_mode(docg3);
1212 for (len = info->len; !ret && len > 0; len -= mtd->erasesize) { 1213 for (len = info->len; !ret && len > 0; len -= mtd->erasesize) {
1213 info->state = MTD_ERASING; 1214 info->state = MTD_ERASING;
@@ -1215,6 +1216,7 @@ static int doc_erase(struct mtd_info *mtd, struct erase_info *info)
1215 block0 += 2; 1216 block0 += 2;
1216 block1 += 2; 1217 block1 += 2;
1217 } 1218 }
1219 mutex_unlock(&docg3->cascade->lock);
1218 1220
1219 if (ret) 1221 if (ret)
1220 goto reset_err; 1222 goto reset_err;
@@ -1401,7 +1403,7 @@ static int doc_write_oob(struct mtd_info *mtd, loff_t ofs,
1401 struct mtd_oob_ops *ops) 1403 struct mtd_oob_ops *ops)
1402{ 1404{
1403 struct docg3 *docg3 = mtd->priv; 1405 struct docg3 *docg3 = mtd->priv;
1404 int block0, block1, page, ret, pofs = 0, autoecc, oobdelta; 1406 int ret, autoecc, oobdelta;
1405 u8 *oobbuf = ops->oobbuf; 1407 u8 *oobbuf = ops->oobbuf;
1406 u8 *buf = ops->datbuf; 1408 u8 *buf = ops->datbuf;
1407 size_t len, ooblen; 1409 size_t len, ooblen;
@@ -1438,12 +1440,8 @@ static int doc_write_oob(struct mtd_info *mtd, loff_t ofs,
1438 if (len && ooblen && 1440 if (len && ooblen &&
1439 (len / DOC_LAYOUT_PAGE_SIZE) != (ooblen / oobdelta)) 1441 (len / DOC_LAYOUT_PAGE_SIZE) != (ooblen / oobdelta))
1440 return -EINVAL; 1442 return -EINVAL;
1441 1443 if (ofs + len > mtd->size)
1442 ret = -EINVAL; 1444 return -EINVAL;
1443 calc_block_sector(ofs + len, &block0, &block1, &page, &pofs,
1444 docg3->reliable);
1445 if (block1 > docg3->max_block)
1446 goto err;
1447 1445
1448 ops->oobretlen = 0; 1446 ops->oobretlen = 0;
1449 ops->retlen = 0; 1447 ops->retlen = 0;
@@ -1457,6 +1455,7 @@ static int doc_write_oob(struct mtd_info *mtd, loff_t ofs,
1457 if (autoecc < 0) 1455 if (autoecc < 0)
1458 return autoecc; 1456 return autoecc;
1459 1457
1458 mutex_lock(&docg3->cascade->lock);
1460 while (!ret && len > 0) { 1459 while (!ret && len > 0) {
1461 memset(oob, 0, sizeof(oob)); 1460 memset(oob, 0, sizeof(oob));
1462 if (ofs == docg3->oob_write_ofs) 1461 if (ofs == docg3->oob_write_ofs)
@@ -1477,8 +1476,9 @@ static int doc_write_oob(struct mtd_info *mtd, loff_t ofs,
1477 } 1476 }
1478 ops->retlen += DOC_LAYOUT_PAGE_SIZE; 1477 ops->retlen += DOC_LAYOUT_PAGE_SIZE;
1479 } 1478 }
1480err: 1479
1481 doc_set_device_id(docg3, 0); 1480 doc_set_device_id(docg3, 0);
1481 mutex_unlock(&docg3->cascade->lock);
1482 return ret; 1482 return ret;
1483} 1483}
1484 1484
@@ -1535,9 +1535,11 @@ static ssize_t dps0_is_key_locked(struct device *dev,
1535 struct docg3 *docg3 = sysfs_dev2docg3(dev, attr); 1535 struct docg3 *docg3 = sysfs_dev2docg3(dev, attr);
1536 int dps0; 1536 int dps0;
1537 1537
1538 mutex_lock(&docg3->cascade->lock);
1538 doc_set_device_id(docg3, docg3->device_id); 1539 doc_set_device_id(docg3, docg3->device_id);
1539 dps0 = doc_register_readb(docg3, DOC_DPS0_STATUS); 1540 dps0 = doc_register_readb(docg3, DOC_DPS0_STATUS);
1540 doc_set_device_id(docg3, 0); 1541 doc_set_device_id(docg3, 0);
1542 mutex_unlock(&docg3->cascade->lock);
1541 1543
1542 return sprintf(buf, "%d\n", !(dps0 & DOC_DPS_KEY_OK)); 1544 return sprintf(buf, "%d\n", !(dps0 & DOC_DPS_KEY_OK));
1543} 1545}
@@ -1548,9 +1550,11 @@ static ssize_t dps1_is_key_locked(struct device *dev,
1548 struct docg3 *docg3 = sysfs_dev2docg3(dev, attr); 1550 struct docg3 *docg3 = sysfs_dev2docg3(dev, attr);
1549 int dps1; 1551 int dps1;
1550 1552
1553 mutex_lock(&docg3->cascade->lock);
1551 doc_set_device_id(docg3, docg3->device_id); 1554 doc_set_device_id(docg3, docg3->device_id);
1552 dps1 = doc_register_readb(docg3, DOC_DPS1_STATUS); 1555 dps1 = doc_register_readb(docg3, DOC_DPS1_STATUS);
1553 doc_set_device_id(docg3, 0); 1556 doc_set_device_id(docg3, 0);
1557 mutex_unlock(&docg3->cascade->lock);
1554 1558
1555 return sprintf(buf, "%d\n", !(dps1 & DOC_DPS_KEY_OK)); 1559 return sprintf(buf, "%d\n", !(dps1 & DOC_DPS_KEY_OK));
1556} 1560}
@@ -1565,10 +1569,12 @@ static ssize_t dps0_insert_key(struct device *dev,
1565 if (count != DOC_LAYOUT_DPS_KEY_LENGTH) 1569 if (count != DOC_LAYOUT_DPS_KEY_LENGTH)
1566 return -EINVAL; 1570 return -EINVAL;
1567 1571
1572 mutex_lock(&docg3->cascade->lock);
1568 doc_set_device_id(docg3, docg3->device_id); 1573 doc_set_device_id(docg3, docg3->device_id);
1569 for (i = 0; i < DOC_LAYOUT_DPS_KEY_LENGTH; i++) 1574 for (i = 0; i < DOC_LAYOUT_DPS_KEY_LENGTH; i++)
1570 doc_writeb(docg3, buf[i], DOC_DPS0_KEY); 1575 doc_writeb(docg3, buf[i], DOC_DPS0_KEY);
1571 doc_set_device_id(docg3, 0); 1576 doc_set_device_id(docg3, 0);
1577 mutex_unlock(&docg3->cascade->lock);
1572 return count; 1578 return count;
1573} 1579}
1574 1580
@@ -1582,10 +1588,12 @@ static ssize_t dps1_insert_key(struct device *dev,
1582 if (count != DOC_LAYOUT_DPS_KEY_LENGTH) 1588 if (count != DOC_LAYOUT_DPS_KEY_LENGTH)
1583 return -EINVAL; 1589 return -EINVAL;
1584 1590
1591 mutex_lock(&docg3->cascade->lock);
1585 doc_set_device_id(docg3, docg3->device_id); 1592 doc_set_device_id(docg3, docg3->device_id);
1586 for (i = 0; i < DOC_LAYOUT_DPS_KEY_LENGTH; i++) 1593 for (i = 0; i < DOC_LAYOUT_DPS_KEY_LENGTH; i++)
1587 doc_writeb(docg3, buf[i], DOC_DPS1_KEY); 1594 doc_writeb(docg3, buf[i], DOC_DPS1_KEY);
1588 doc_set_device_id(docg3, 0); 1595 doc_set_device_id(docg3, 0);
1596 mutex_unlock(&docg3->cascade->lock);
1589 return count; 1597 return count;
1590} 1598}
1591 1599
@@ -1601,13 +1609,13 @@ static struct device_attribute doc_sys_attrs[DOC_MAX_NBFLOORS][4] = {
1601}; 1609};
1602 1610
1603static int doc_register_sysfs(struct platform_device *pdev, 1611static int doc_register_sysfs(struct platform_device *pdev,
1604 struct mtd_info **floors) 1612 struct docg3_cascade *cascade)
1605{ 1613{
1606 int ret = 0, floor, i = 0; 1614 int ret = 0, floor, i = 0;
1607 struct device *dev = &pdev->dev; 1615 struct device *dev = &pdev->dev;
1608 1616
1609 for (floor = 0; !ret && floor < DOC_MAX_NBFLOORS && floors[floor]; 1617 for (floor = 0; !ret && floor < DOC_MAX_NBFLOORS &&
1610 floor++) 1618 cascade->floors[floor]; floor++)
1611 for (i = 0; !ret && i < 4; i++) 1619 for (i = 0; !ret && i < 4; i++)
1612 ret = device_create_file(dev, &doc_sys_attrs[floor][i]); 1620 ret = device_create_file(dev, &doc_sys_attrs[floor][i]);
1613 if (!ret) 1621 if (!ret)
@@ -1621,12 +1629,12 @@ static int doc_register_sysfs(struct platform_device *pdev,
1621} 1629}
1622 1630
1623static void doc_unregister_sysfs(struct platform_device *pdev, 1631static void doc_unregister_sysfs(struct platform_device *pdev,
1624 struct mtd_info **floors) 1632 struct docg3_cascade *cascade)
1625{ 1633{
1626 struct device *dev = &pdev->dev; 1634 struct device *dev = &pdev->dev;
1627 int floor, i; 1635 int floor, i;
1628 1636
1629 for (floor = 0; floor < DOC_MAX_NBFLOORS && floors[floor]; 1637 for (floor = 0; floor < DOC_MAX_NBFLOORS && cascade->floors[floor];
1630 floor++) 1638 floor++)
1631 for (i = 0; i < 4; i++) 1639 for (i = 0; i < 4; i++)
1632 device_remove_file(dev, &doc_sys_attrs[floor][i]); 1640 device_remove_file(dev, &doc_sys_attrs[floor][i]);
@@ -1640,7 +1648,11 @@ static int dbg_flashctrl_show(struct seq_file *s, void *p)
1640 struct docg3 *docg3 = (struct docg3 *)s->private; 1648 struct docg3 *docg3 = (struct docg3 *)s->private;
1641 1649
1642 int pos = 0; 1650 int pos = 0;
1643 u8 fctrl = doc_register_readb(docg3, DOC_FLASHCONTROL); 1651 u8 fctrl;
1652
1653 mutex_lock(&docg3->cascade->lock);
1654 fctrl = doc_register_readb(docg3, DOC_FLASHCONTROL);
1655 mutex_unlock(&docg3->cascade->lock);
1644 1656
1645 pos += seq_printf(s, 1657 pos += seq_printf(s,
1646 "FlashControl : 0x%02x (%s,CE# %s,%s,%s,flash %s)\n", 1658 "FlashControl : 0x%02x (%s,CE# %s,%s,%s,flash %s)\n",
@@ -1658,9 +1670,12 @@ static int dbg_asicmode_show(struct seq_file *s, void *p)
1658{ 1670{
1659 struct docg3 *docg3 = (struct docg3 *)s->private; 1671 struct docg3 *docg3 = (struct docg3 *)s->private;
1660 1672
1661 int pos = 0; 1673 int pos = 0, pctrl, mode;
1662 int pctrl = doc_register_readb(docg3, DOC_ASICMODE); 1674
1663 int mode = pctrl & 0x03; 1675 mutex_lock(&docg3->cascade->lock);
1676 pctrl = doc_register_readb(docg3, DOC_ASICMODE);
1677 mode = pctrl & 0x03;
1678 mutex_unlock(&docg3->cascade->lock);
1664 1679
1665 pos += seq_printf(s, 1680 pos += seq_printf(s,
1666 "%04x : RAM_WE=%d,RSTIN_RESET=%d,BDETCT_RESET=%d,WRITE_ENABLE=%d,POWERDOWN=%d,MODE=%d%d (", 1681 "%04x : RAM_WE=%d,RSTIN_RESET=%d,BDETCT_RESET=%d,WRITE_ENABLE=%d,POWERDOWN=%d,MODE=%d%d (",
@@ -1692,7 +1707,11 @@ static int dbg_device_id_show(struct seq_file *s, void *p)
1692{ 1707{
1693 struct docg3 *docg3 = (struct docg3 *)s->private; 1708 struct docg3 *docg3 = (struct docg3 *)s->private;
1694 int pos = 0; 1709 int pos = 0;
1695 int id = doc_register_readb(docg3, DOC_DEVICESELECT); 1710 int id;
1711
1712 mutex_lock(&docg3->cascade->lock);
1713 id = doc_register_readb(docg3, DOC_DEVICESELECT);
1714 mutex_unlock(&docg3->cascade->lock);
1696 1715
1697 pos += seq_printf(s, "DeviceId = %d\n", id); 1716 pos += seq_printf(s, "DeviceId = %d\n", id);
1698 return pos; 1717 return pos;
@@ -1705,6 +1724,7 @@ static int dbg_protection_show(struct seq_file *s, void *p)
1705 int pos = 0; 1724 int pos = 0;
1706 int protect, dps0, dps0_low, dps0_high, dps1, dps1_low, dps1_high; 1725 int protect, dps0, dps0_low, dps0_high, dps1, dps1_low, dps1_high;
1707 1726
1727 mutex_lock(&docg3->cascade->lock);
1708 protect = doc_register_readb(docg3, DOC_PROTECTION); 1728 protect = doc_register_readb(docg3, DOC_PROTECTION);
1709 dps0 = doc_register_readb(docg3, DOC_DPS0_STATUS); 1729 dps0 = doc_register_readb(docg3, DOC_DPS0_STATUS);
1710 dps0_low = doc_register_readw(docg3, DOC_DPS0_ADDRLOW); 1730 dps0_low = doc_register_readw(docg3, DOC_DPS0_ADDRLOW);
@@ -1712,6 +1732,7 @@ static int dbg_protection_show(struct seq_file *s, void *p)
1712 dps1 = doc_register_readb(docg3, DOC_DPS1_STATUS); 1732 dps1 = doc_register_readb(docg3, DOC_DPS1_STATUS);
1713 dps1_low = doc_register_readw(docg3, DOC_DPS1_ADDRLOW); 1733 dps1_low = doc_register_readw(docg3, DOC_DPS1_ADDRLOW);
1714 dps1_high = doc_register_readw(docg3, DOC_DPS1_ADDRHIGH); 1734 dps1_high = doc_register_readw(docg3, DOC_DPS1_ADDRHIGH);
1735 mutex_unlock(&docg3->cascade->lock);
1715 1736
1716 pos += seq_printf(s, "Protection = 0x%02x (", 1737 pos += seq_printf(s, "Protection = 0x%02x (",
1717 protect); 1738 protect);
@@ -1804,7 +1825,7 @@ static void __init doc_set_driver_info(int chip_id, struct mtd_info *mtd)
1804 1825
1805 switch (chip_id) { 1826 switch (chip_id) {
1806 case DOC_CHIPID_G3: 1827 case DOC_CHIPID_G3:
1807 mtd->name = kasprintf(GFP_KERNEL, "DiskOnChip G3 floor %d", 1828 mtd->name = kasprintf(GFP_KERNEL, "docg3.%d",
1808 docg3->device_id); 1829 docg3->device_id);
1809 docg3->max_block = 2047; 1830 docg3->max_block = 2047;
1810 break; 1831 break;
@@ -1817,16 +1838,17 @@ static void __init doc_set_driver_info(int chip_id, struct mtd_info *mtd)
1817 mtd->erasesize = DOC_LAYOUT_BLOCK_SIZE * DOC_LAYOUT_NBPLANES; 1838 mtd->erasesize = DOC_LAYOUT_BLOCK_SIZE * DOC_LAYOUT_NBPLANES;
1818 if (docg3->reliable == 2) 1839 if (docg3->reliable == 2)
1819 mtd->erasesize /= 2; 1840 mtd->erasesize /= 2;
1820 mtd->writesize = DOC_LAYOUT_PAGE_SIZE; 1841 mtd->writebufsize = mtd->writesize = DOC_LAYOUT_PAGE_SIZE;
1821 mtd->oobsize = DOC_LAYOUT_OOB_SIZE; 1842 mtd->oobsize = DOC_LAYOUT_OOB_SIZE;
1822 mtd->owner = THIS_MODULE; 1843 mtd->owner = THIS_MODULE;
1823 mtd->erase = doc_erase; 1844 mtd->_erase = doc_erase;
1824 mtd->read = doc_read; 1845 mtd->_read = doc_read;
1825 mtd->write = doc_write; 1846 mtd->_write = doc_write;
1826 mtd->read_oob = doc_read_oob; 1847 mtd->_read_oob = doc_read_oob;
1827 mtd->write_oob = doc_write_oob; 1848 mtd->_write_oob = doc_write_oob;
1828 mtd->block_isbad = doc_block_isbad; 1849 mtd->_block_isbad = doc_block_isbad;
1829 mtd->ecclayout = &docg3_oobinfo; 1850 mtd->ecclayout = &docg3_oobinfo;
1851 mtd->ecc_strength = DOC_ECC_BCH_T;
1830} 1852}
1831 1853
1832/** 1854/**
@@ -1834,6 +1856,7 @@ static void __init doc_set_driver_info(int chip_id, struct mtd_info *mtd)
1834 * @base: the io space where the device is probed 1856 * @base: the io space where the device is probed
1835 * @floor: the floor of the probed device 1857 * @floor: the floor of the probed device
1836 * @dev: the device 1858 * @dev: the device
1859 * @cascade: the cascade of chips this devices will belong to
1837 * 1860 *
1838 * Checks whether a device at the specified IO range, and floor is available. 1861 * Checks whether a device at the specified IO range, and floor is available.
1839 * 1862 *
@@ -1841,8 +1864,8 @@ static void __init doc_set_driver_info(int chip_id, struct mtd_info *mtd)
1841 * if a memory allocation failed. If floor 0 is checked, a reset of the ASIC is 1864 * if a memory allocation failed. If floor 0 is checked, a reset of the ASIC is
1842 * launched. 1865 * launched.
1843 */ 1866 */
1844static struct mtd_info *doc_probe_device(void __iomem *base, int floor, 1867static struct mtd_info * __init
1845 struct device *dev) 1868doc_probe_device(struct docg3_cascade *cascade, int floor, struct device *dev)
1846{ 1869{
1847 int ret, bbt_nbpages; 1870 int ret, bbt_nbpages;
1848 u16 chip_id, chip_id_inv; 1871 u16 chip_id, chip_id_inv;
@@ -1865,7 +1888,7 @@ static struct mtd_info *doc_probe_device(void __iomem *base, int floor,
1865 1888
1866 docg3->dev = dev; 1889 docg3->dev = dev;
1867 docg3->device_id = floor; 1890 docg3->device_id = floor;
1868 docg3->base = base; 1891 docg3->cascade = cascade;
1869 doc_set_device_id(docg3, docg3->device_id); 1892 doc_set_device_id(docg3, docg3->device_id);
1870 if (!floor) 1893 if (!floor)
1871 doc_set_asic_mode(docg3, DOC_ASICMODE_RESET); 1894 doc_set_asic_mode(docg3, DOC_ASICMODE_RESET);
@@ -1882,7 +1905,7 @@ static struct mtd_info *doc_probe_device(void __iomem *base, int floor,
1882 switch (chip_id) { 1905 switch (chip_id) {
1883 case DOC_CHIPID_G3: 1906 case DOC_CHIPID_G3:
1884 doc_info("Found a G3 DiskOnChip at addr %p, floor %d\n", 1907 doc_info("Found a G3 DiskOnChip at addr %p, floor %d\n",
1885 base, floor); 1908 docg3->cascade->base, floor);
1886 break; 1909 break;
1887 default: 1910 default:
1888 doc_err("Chip id %04x is not a DiskOnChip G3 chip\n", chip_id); 1911 doc_err("Chip id %04x is not a DiskOnChip G3 chip\n", chip_id);
@@ -1927,10 +1950,12 @@ static void doc_release_device(struct mtd_info *mtd)
1927static int docg3_resume(struct platform_device *pdev) 1950static int docg3_resume(struct platform_device *pdev)
1928{ 1951{
1929 int i; 1952 int i;
1953 struct docg3_cascade *cascade;
1930 struct mtd_info **docg3_floors, *mtd; 1954 struct mtd_info **docg3_floors, *mtd;
1931 struct docg3 *docg3; 1955 struct docg3 *docg3;
1932 1956
1933 docg3_floors = platform_get_drvdata(pdev); 1957 cascade = platform_get_drvdata(pdev);
1958 docg3_floors = cascade->floors;
1934 mtd = docg3_floors[0]; 1959 mtd = docg3_floors[0];
1935 docg3 = mtd->priv; 1960 docg3 = mtd->priv;
1936 1961
@@ -1952,11 +1977,13 @@ static int docg3_resume(struct platform_device *pdev)
1952static int docg3_suspend(struct platform_device *pdev, pm_message_t state) 1977static int docg3_suspend(struct platform_device *pdev, pm_message_t state)
1953{ 1978{
1954 int floor, i; 1979 int floor, i;
1980 struct docg3_cascade *cascade;
1955 struct mtd_info **docg3_floors, *mtd; 1981 struct mtd_info **docg3_floors, *mtd;
1956 struct docg3 *docg3; 1982 struct docg3 *docg3;
1957 u8 ctrl, pwr_down; 1983 u8 ctrl, pwr_down;
1958 1984
1959 docg3_floors = platform_get_drvdata(pdev); 1985 cascade = platform_get_drvdata(pdev);
1986 docg3_floors = cascade->floors;
1960 for (floor = 0; floor < DOC_MAX_NBFLOORS; floor++) { 1987 for (floor = 0; floor < DOC_MAX_NBFLOORS; floor++) {
1961 mtd = docg3_floors[floor]; 1988 mtd = docg3_floors[floor];
1962 if (!mtd) 1989 if (!mtd)
@@ -2006,7 +2033,7 @@ static int __init docg3_probe(struct platform_device *pdev)
2006 struct resource *ress; 2033 struct resource *ress;
2007 void __iomem *base; 2034 void __iomem *base;
2008 int ret, floor, found = 0; 2035 int ret, floor, found = 0;
2009 struct mtd_info **docg3_floors; 2036 struct docg3_cascade *cascade;
2010 2037
2011 ret = -ENXIO; 2038 ret = -ENXIO;
2012 ress = platform_get_resource(pdev, IORESOURCE_MEM, 0); 2039 ress = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -2017,17 +2044,19 @@ static int __init docg3_probe(struct platform_device *pdev)
2017 base = ioremap(ress->start, DOC_IOSPACE_SIZE); 2044 base = ioremap(ress->start, DOC_IOSPACE_SIZE);
2018 2045
2019 ret = -ENOMEM; 2046 ret = -ENOMEM;
2020 docg3_floors = kzalloc(sizeof(*docg3_floors) * DOC_MAX_NBFLOORS, 2047 cascade = kzalloc(sizeof(*cascade) * DOC_MAX_NBFLOORS,
2021 GFP_KERNEL); 2048 GFP_KERNEL);
2022 if (!docg3_floors) 2049 if (!cascade)
2023 goto nomem1; 2050 goto nomem1;
2024 docg3_bch = init_bch(DOC_ECC_BCH_M, DOC_ECC_BCH_T, 2051 cascade->base = base;
2052 mutex_init(&cascade->lock);
2053 cascade->bch = init_bch(DOC_ECC_BCH_M, DOC_ECC_BCH_T,
2025 DOC_ECC_BCH_PRIMPOLY); 2054 DOC_ECC_BCH_PRIMPOLY);
2026 if (!docg3_bch) 2055 if (!cascade->bch)
2027 goto nomem2; 2056 goto nomem2;
2028 2057
2029 for (floor = 0; floor < DOC_MAX_NBFLOORS; floor++) { 2058 for (floor = 0; floor < DOC_MAX_NBFLOORS; floor++) {
2030 mtd = doc_probe_device(base, floor, dev); 2059 mtd = doc_probe_device(cascade, floor, dev);
2031 if (IS_ERR(mtd)) { 2060 if (IS_ERR(mtd)) {
2032 ret = PTR_ERR(mtd); 2061 ret = PTR_ERR(mtd);
2033 goto err_probe; 2062 goto err_probe;
@@ -2038,7 +2067,7 @@ static int __init docg3_probe(struct platform_device *pdev)
2038 else 2067 else
2039 continue; 2068 continue;
2040 } 2069 }
2041 docg3_floors[floor] = mtd; 2070 cascade->floors[floor] = mtd;
2042 ret = mtd_device_parse_register(mtd, part_probes, NULL, NULL, 2071 ret = mtd_device_parse_register(mtd, part_probes, NULL, NULL,
2043 0); 2072 0);
2044 if (ret) 2073 if (ret)
@@ -2046,26 +2075,26 @@ static int __init docg3_probe(struct platform_device *pdev)
2046 found++; 2075 found++;
2047 } 2076 }
2048 2077
2049 ret = doc_register_sysfs(pdev, docg3_floors); 2078 ret = doc_register_sysfs(pdev, cascade);
2050 if (ret) 2079 if (ret)
2051 goto err_probe; 2080 goto err_probe;
2052 if (!found) 2081 if (!found)
2053 goto notfound; 2082 goto notfound;
2054 2083
2055 platform_set_drvdata(pdev, docg3_floors); 2084 platform_set_drvdata(pdev, cascade);
2056 doc_dbg_register(docg3_floors[0]->priv); 2085 doc_dbg_register(cascade->floors[0]->priv);
2057 return 0; 2086 return 0;
2058 2087
2059notfound: 2088notfound:
2060 ret = -ENODEV; 2089 ret = -ENODEV;
2061 dev_info(dev, "No supported DiskOnChip found\n"); 2090 dev_info(dev, "No supported DiskOnChip found\n");
2062err_probe: 2091err_probe:
2063 free_bch(docg3_bch); 2092 kfree(cascade->bch);
2064 for (floor = 0; floor < DOC_MAX_NBFLOORS; floor++) 2093 for (floor = 0; floor < DOC_MAX_NBFLOORS; floor++)
2065 if (docg3_floors[floor]) 2094 if (cascade->floors[floor])
2066 doc_release_device(docg3_floors[floor]); 2095 doc_release_device(cascade->floors[floor]);
2067nomem2: 2096nomem2:
2068 kfree(docg3_floors); 2097 kfree(cascade);
2069nomem1: 2098nomem1:
2070 iounmap(base); 2099 iounmap(base);
2071noress: 2100noress:
@@ -2080,19 +2109,19 @@ noress:
2080 */ 2109 */
2081static int __exit docg3_release(struct platform_device *pdev) 2110static int __exit docg3_release(struct platform_device *pdev)
2082{ 2111{
2083 struct mtd_info **docg3_floors = platform_get_drvdata(pdev); 2112 struct docg3_cascade *cascade = platform_get_drvdata(pdev);
2084 struct docg3 *docg3 = docg3_floors[0]->priv; 2113 struct docg3 *docg3 = cascade->floors[0]->priv;
2085 void __iomem *base = docg3->base; 2114 void __iomem *base = cascade->base;
2086 int floor; 2115 int floor;
2087 2116
2088 doc_unregister_sysfs(pdev, docg3_floors); 2117 doc_unregister_sysfs(pdev, cascade);
2089 doc_dbg_unregister(docg3); 2118 doc_dbg_unregister(docg3);
2090 for (floor = 0; floor < DOC_MAX_NBFLOORS; floor++) 2119 for (floor = 0; floor < DOC_MAX_NBFLOORS; floor++)
2091 if (docg3_floors[floor]) 2120 if (cascade->floors[floor])
2092 doc_release_device(docg3_floors[floor]); 2121 doc_release_device(cascade->floors[floor]);
2093 2122
2094 kfree(docg3_floors); 2123 free_bch(docg3->cascade->bch);
2095 free_bch(docg3_bch); 2124 kfree(cascade);
2096 iounmap(base); 2125 iounmap(base);
2097 return 0; 2126 return 0;
2098} 2127}
diff --git a/drivers/mtd/devices/docg3.h b/drivers/mtd/devices/docg3.h
index db0da436b493..19fb93f96a3a 100644
--- a/drivers/mtd/devices/docg3.h
+++ b/drivers/mtd/devices/docg3.h
@@ -22,6 +22,8 @@
22#ifndef _MTD_DOCG3_H 22#ifndef _MTD_DOCG3_H
23#define _MTD_DOCG3_H 23#define _MTD_DOCG3_H
24 24
25#include <linux/mtd/mtd.h>
26
25/* 27/*
26 * Flash memory areas : 28 * Flash memory areas :
27 * - 0x0000 .. 0x07ff : IPL 29 * - 0x0000 .. 0x07ff : IPL
@@ -267,9 +269,23 @@
267#define DOC_LAYOUT_DPS_KEY_LENGTH 8 269#define DOC_LAYOUT_DPS_KEY_LENGTH 8
268 270
269/** 271/**
272 * struct docg3_cascade - Cascade of 1 to 4 docg3 chips
273 * @floors: floors (ie. one physical docg3 chip is one floor)
274 * @base: IO space to access all chips in the cascade
275 * @bch: the BCH correcting control structure
276 * @lock: lock to protect docg3 IO space from concurrent accesses
277 */
278struct docg3_cascade {
279 struct mtd_info *floors[DOC_MAX_NBFLOORS];
280 void __iomem *base;
281 struct bch_control *bch;
282 struct mutex lock;
283};
284
285/**
270 * struct docg3 - DiskOnChip driver private data 286 * struct docg3 - DiskOnChip driver private data
271 * @dev: the device currently under control 287 * @dev: the device currently under control
272 * @base: mapped IO space 288 * @cascade: the cascade this device belongs to
273 * @device_id: number of the cascaded DoCG3 device (0, 1, 2 or 3) 289 * @device_id: number of the cascaded DoCG3 device (0, 1, 2 or 3)
274 * @if_cfg: if true, reads are on 16bits, else reads are on 8bits 290 * @if_cfg: if true, reads are on 16bits, else reads are on 8bits
275 291
@@ -287,7 +303,7 @@
287 */ 303 */
288struct docg3 { 304struct docg3 {
289 struct device *dev; 305 struct device *dev;
290 void __iomem *base; 306 struct docg3_cascade *cascade;
291 unsigned int device_id:4; 307 unsigned int device_id:4;
292 unsigned int if_cfg:1; 308 unsigned int if_cfg:1;
293 unsigned int reliable:2; 309 unsigned int reliable:2;
diff --git a/drivers/mtd/devices/lart.c b/drivers/mtd/devices/lart.c
index 3a11ea628e58..82bd00af5cc3 100644
--- a/drivers/mtd/devices/lart.c
+++ b/drivers/mtd/devices/lart.c
@@ -367,9 +367,6 @@ static int flash_erase (struct mtd_info *mtd,struct erase_info *instr)
367 printk (KERN_DEBUG "%s(addr = 0x%.8x, len = %d)\n", __func__, instr->addr, instr->len); 367 printk (KERN_DEBUG "%s(addr = 0x%.8x, len = %d)\n", __func__, instr->addr, instr->len);
368#endif 368#endif
369 369
370 /* sanity checks */
371 if (instr->addr + instr->len > mtd->size) return (-EINVAL);
372
373 /* 370 /*
374 * check that both start and end of the requested erase are 371 * check that both start and end of the requested erase are
375 * aligned with the erasesize at the appropriate addresses. 372 * aligned with the erasesize at the appropriate addresses.
@@ -440,10 +437,6 @@ static int flash_read (struct mtd_info *mtd,loff_t from,size_t len,size_t *retle
440 printk (KERN_DEBUG "%s(from = 0x%.8x, len = %d)\n", __func__, (__u32)from, len); 437 printk (KERN_DEBUG "%s(from = 0x%.8x, len = %d)\n", __func__, (__u32)from, len);
441#endif 438#endif
442 439
443 /* sanity checks */
444 if (!len) return (0);
445 if (from + len > mtd->size) return (-EINVAL);
446
447 /* we always read len bytes */ 440 /* we always read len bytes */
448 *retlen = len; 441 *retlen = len;
449 442
@@ -522,11 +515,8 @@ static int flash_write (struct mtd_info *mtd,loff_t to,size_t len,size_t *retlen
522 printk (KERN_DEBUG "%s(to = 0x%.8x, len = %d)\n", __func__, (__u32)to, len); 515 printk (KERN_DEBUG "%s(to = 0x%.8x, len = %d)\n", __func__, (__u32)to, len);
523#endif 516#endif
524 517
525 *retlen = 0;
526
527 /* sanity checks */ 518 /* sanity checks */
528 if (!len) return (0); 519 if (!len) return (0);
529 if (to + len > mtd->size) return (-EINVAL);
530 520
531 /* first, we write a 0xFF.... padded byte until we reach a dword boundary */ 521 /* first, we write a 0xFF.... padded byte until we reach a dword boundary */
532 if (to & (BUSWIDTH - 1)) 522 if (to & (BUSWIDTH - 1))
@@ -630,14 +620,15 @@ static int __init lart_flash_init (void)
630 mtd.name = module_name; 620 mtd.name = module_name;
631 mtd.type = MTD_NORFLASH; 621 mtd.type = MTD_NORFLASH;
632 mtd.writesize = 1; 622 mtd.writesize = 1;
623 mtd.writebufsize = 4;
633 mtd.flags = MTD_CAP_NORFLASH; 624 mtd.flags = MTD_CAP_NORFLASH;
634 mtd.size = FLASH_BLOCKSIZE_PARAM * FLASH_NUMBLOCKS_16m_PARAM + FLASH_BLOCKSIZE_MAIN * FLASH_NUMBLOCKS_16m_MAIN; 625 mtd.size = FLASH_BLOCKSIZE_PARAM * FLASH_NUMBLOCKS_16m_PARAM + FLASH_BLOCKSIZE_MAIN * FLASH_NUMBLOCKS_16m_MAIN;
635 mtd.erasesize = FLASH_BLOCKSIZE_MAIN; 626 mtd.erasesize = FLASH_BLOCKSIZE_MAIN;
636 mtd.numeraseregions = ARRAY_SIZE(erase_regions); 627 mtd.numeraseregions = ARRAY_SIZE(erase_regions);
637 mtd.eraseregions = erase_regions; 628 mtd.eraseregions = erase_regions;
638 mtd.erase = flash_erase; 629 mtd._erase = flash_erase;
639 mtd.read = flash_read; 630 mtd._read = flash_read;
640 mtd.write = flash_write; 631 mtd._write = flash_write;
641 mtd.owner = THIS_MODULE; 632 mtd.owner = THIS_MODULE;
642 633
643#ifdef LART_DEBUG 634#ifdef LART_DEBUG
diff --git a/drivers/mtd/devices/m25p80.c b/drivers/mtd/devices/m25p80.c
index 7c60dddbefc0..1924d247c1cb 100644
--- a/drivers/mtd/devices/m25p80.c
+++ b/drivers/mtd/devices/m25p80.c
@@ -288,9 +288,6 @@ static int m25p80_erase(struct mtd_info *mtd, struct erase_info *instr)
288 __func__, (long long)instr->addr, 288 __func__, (long long)instr->addr,
289 (long long)instr->len); 289 (long long)instr->len);
290 290
291 /* sanity checks */
292 if (instr->addr + instr->len > flash->mtd.size)
293 return -EINVAL;
294 div_u64_rem(instr->len, mtd->erasesize, &rem); 291 div_u64_rem(instr->len, mtd->erasesize, &rem);
295 if (rem) 292 if (rem)
296 return -EINVAL; 293 return -EINVAL;
@@ -349,13 +346,6 @@ static int m25p80_read(struct mtd_info *mtd, loff_t from, size_t len,
349 pr_debug("%s: %s from 0x%08x, len %zd\n", dev_name(&flash->spi->dev), 346 pr_debug("%s: %s from 0x%08x, len %zd\n", dev_name(&flash->spi->dev),
350 __func__, (u32)from, len); 347 __func__, (u32)from, len);
351 348
352 /* sanity checks */
353 if (!len)
354 return 0;
355
356 if (from + len > flash->mtd.size)
357 return -EINVAL;
358
359 spi_message_init(&m); 349 spi_message_init(&m);
360 memset(t, 0, (sizeof t)); 350 memset(t, 0, (sizeof t));
361 351
@@ -371,9 +361,6 @@ static int m25p80_read(struct mtd_info *mtd, loff_t from, size_t len,
371 t[1].len = len; 361 t[1].len = len;
372 spi_message_add_tail(&t[1], &m); 362 spi_message_add_tail(&t[1], &m);
373 363
374 /* Byte count starts at zero. */
375 *retlen = 0;
376
377 mutex_lock(&flash->lock); 364 mutex_lock(&flash->lock);
378 365
379 /* Wait till previous write/erase is done. */ 366 /* Wait till previous write/erase is done. */
@@ -417,15 +404,6 @@ static int m25p80_write(struct mtd_info *mtd, loff_t to, size_t len,
417 pr_debug("%s: %s to 0x%08x, len %zd\n", dev_name(&flash->spi->dev), 404 pr_debug("%s: %s to 0x%08x, len %zd\n", dev_name(&flash->spi->dev),
418 __func__, (u32)to, len); 405 __func__, (u32)to, len);
419 406
420 *retlen = 0;
421
422 /* sanity checks */
423 if (!len)
424 return(0);
425
426 if (to + len > flash->mtd.size)
427 return -EINVAL;
428
429 spi_message_init(&m); 407 spi_message_init(&m);
430 memset(t, 0, (sizeof t)); 408 memset(t, 0, (sizeof t));
431 409
@@ -509,15 +487,6 @@ static int sst_write(struct mtd_info *mtd, loff_t to, size_t len,
509 pr_debug("%s: %s to 0x%08x, len %zd\n", dev_name(&flash->spi->dev), 487 pr_debug("%s: %s to 0x%08x, len %zd\n", dev_name(&flash->spi->dev),
510 __func__, (u32)to, len); 488 __func__, (u32)to, len);
511 489
512 *retlen = 0;
513
514 /* sanity checks */
515 if (!len)
516 return 0;
517
518 if (to + len > flash->mtd.size)
519 return -EINVAL;
520
521 spi_message_init(&m); 490 spi_message_init(&m);
522 memset(t, 0, (sizeof t)); 491 memset(t, 0, (sizeof t));
523 492
@@ -908,14 +877,14 @@ static int __devinit m25p_probe(struct spi_device *spi)
908 flash->mtd.writesize = 1; 877 flash->mtd.writesize = 1;
909 flash->mtd.flags = MTD_CAP_NORFLASH; 878 flash->mtd.flags = MTD_CAP_NORFLASH;
910 flash->mtd.size = info->sector_size * info->n_sectors; 879 flash->mtd.size = info->sector_size * info->n_sectors;
911 flash->mtd.erase = m25p80_erase; 880 flash->mtd._erase = m25p80_erase;
912 flash->mtd.read = m25p80_read; 881 flash->mtd._read = m25p80_read;
913 882
914 /* sst flash chips use AAI word program */ 883 /* sst flash chips use AAI word program */
915 if (JEDEC_MFR(info->jedec_id) == CFI_MFR_SST) 884 if (JEDEC_MFR(info->jedec_id) == CFI_MFR_SST)
916 flash->mtd.write = sst_write; 885 flash->mtd._write = sst_write;
917 else 886 else
918 flash->mtd.write = m25p80_write; 887 flash->mtd._write = m25p80_write;
919 888
920 /* prefer "small sector" erase if possible */ 889 /* prefer "small sector" erase if possible */
921 if (info->flags & SECT_4K) { 890 if (info->flags & SECT_4K) {
@@ -932,6 +901,7 @@ static int __devinit m25p_probe(struct spi_device *spi)
932 ppdata.of_node = spi->dev.of_node; 901 ppdata.of_node = spi->dev.of_node;
933 flash->mtd.dev.parent = &spi->dev; 902 flash->mtd.dev.parent = &spi->dev;
934 flash->page_size = info->page_size; 903 flash->page_size = info->page_size;
904 flash->mtd.writebufsize = flash->page_size;
935 905
936 if (info->addr_width) 906 if (info->addr_width)
937 flash->addr_width = info->addr_width; 907 flash->addr_width = info->addr_width;
@@ -1004,21 +974,7 @@ static struct spi_driver m25p80_driver = {
1004 */ 974 */
1005}; 975};
1006 976
1007 977module_spi_driver(m25p80_driver);
1008static int __init m25p80_init(void)
1009{
1010 return spi_register_driver(&m25p80_driver);
1011}
1012
1013
1014static void __exit m25p80_exit(void)
1015{
1016 spi_unregister_driver(&m25p80_driver);
1017}
1018
1019
1020module_init(m25p80_init);
1021module_exit(m25p80_exit);
1022 978
1023MODULE_LICENSE("GPL"); 979MODULE_LICENSE("GPL");
1024MODULE_AUTHOR("Mike Lavender"); 980MODULE_AUTHOR("Mike Lavender");
diff --git a/drivers/mtd/devices/ms02-nv.c b/drivers/mtd/devices/ms02-nv.c
index 8423fb6d4f26..182849d39c61 100644
--- a/drivers/mtd/devices/ms02-nv.c
+++ b/drivers/mtd/devices/ms02-nv.c
@@ -59,12 +59,8 @@ static int ms02nv_read(struct mtd_info *mtd, loff_t from,
59{ 59{
60 struct ms02nv_private *mp = mtd->priv; 60 struct ms02nv_private *mp = mtd->priv;
61 61
62 if (from + len > mtd->size)
63 return -EINVAL;
64
65 memcpy(buf, mp->uaddr + from, len); 62 memcpy(buf, mp->uaddr + from, len);
66 *retlen = len; 63 *retlen = len;
67
68 return 0; 64 return 0;
69} 65}
70 66
@@ -73,12 +69,8 @@ static int ms02nv_write(struct mtd_info *mtd, loff_t to,
73{ 69{
74 struct ms02nv_private *mp = mtd->priv; 70 struct ms02nv_private *mp = mtd->priv;
75 71
76 if (to + len > mtd->size)
77 return -EINVAL;
78
79 memcpy(mp->uaddr + to, buf, len); 72 memcpy(mp->uaddr + to, buf, len);
80 *retlen = len; 73 *retlen = len;
81
82 return 0; 74 return 0;
83} 75}
84 76
@@ -215,8 +207,8 @@ static int __init ms02nv_init_one(ulong addr)
215 mtd->size = fixsize; 207 mtd->size = fixsize;
216 mtd->name = (char *)ms02nv_name; 208 mtd->name = (char *)ms02nv_name;
217 mtd->owner = THIS_MODULE; 209 mtd->owner = THIS_MODULE;
218 mtd->read = ms02nv_read; 210 mtd->_read = ms02nv_read;
219 mtd->write = ms02nv_write; 211 mtd->_write = ms02nv_write;
220 mtd->writesize = 1; 212 mtd->writesize = 1;
221 213
222 ret = -EIO; 214 ret = -EIO;
diff --git a/drivers/mtd/devices/mtd_dataflash.c b/drivers/mtd/devices/mtd_dataflash.c
index 236057ead0d2..928fb0e6d73a 100644
--- a/drivers/mtd/devices/mtd_dataflash.c
+++ b/drivers/mtd/devices/mtd_dataflash.c
@@ -164,9 +164,6 @@ static int dataflash_erase(struct mtd_info *mtd, struct erase_info *instr)
164 dev_name(&spi->dev), (long long)instr->addr, 164 dev_name(&spi->dev), (long long)instr->addr,
165 (long long)instr->len); 165 (long long)instr->len);
166 166
167 /* Sanity checks */
168 if (instr->addr + instr->len > mtd->size)
169 return -EINVAL;
170 div_u64_rem(instr->len, priv->page_size, &rem); 167 div_u64_rem(instr->len, priv->page_size, &rem);
171 if (rem) 168 if (rem)
172 return -EINVAL; 169 return -EINVAL;
@@ -252,14 +249,6 @@ static int dataflash_read(struct mtd_info *mtd, loff_t from, size_t len,
252 pr_debug("%s: read 0x%x..0x%x\n", dev_name(&priv->spi->dev), 249 pr_debug("%s: read 0x%x..0x%x\n", dev_name(&priv->spi->dev),
253 (unsigned)from, (unsigned)(from + len)); 250 (unsigned)from, (unsigned)(from + len));
254 251
255 *retlen = 0;
256
257 /* Sanity checks */
258 if (!len)
259 return 0;
260 if (from + len > mtd->size)
261 return -EINVAL;
262
263 /* Calculate flash page/byte address */ 252 /* Calculate flash page/byte address */
264 addr = (((unsigned)from / priv->page_size) << priv->page_offset) 253 addr = (((unsigned)from / priv->page_size) << priv->page_offset)
265 + ((unsigned)from % priv->page_size); 254 + ((unsigned)from % priv->page_size);
@@ -328,14 +317,6 @@ static int dataflash_write(struct mtd_info *mtd, loff_t to, size_t len,
328 pr_debug("%s: write 0x%x..0x%x\n", 317 pr_debug("%s: write 0x%x..0x%x\n",
329 dev_name(&spi->dev), (unsigned)to, (unsigned)(to + len)); 318 dev_name(&spi->dev), (unsigned)to, (unsigned)(to + len));
330 319
331 *retlen = 0;
332
333 /* Sanity checks */
334 if (!len)
335 return 0;
336 if ((to + len) > mtd->size)
337 return -EINVAL;
338
339 spi_message_init(&msg); 320 spi_message_init(&msg);
340 321
341 x[0].tx_buf = command = priv->command; 322 x[0].tx_buf = command = priv->command;
@@ -490,8 +471,6 @@ static ssize_t otp_read(struct spi_device *spi, unsigned base,
490 471
491 if ((off + len) > 64) 472 if ((off + len) > 64)
492 len = 64 - off; 473 len = 64 - off;
493 if (len == 0)
494 return len;
495 474
496 spi_message_init(&m); 475 spi_message_init(&m);
497 476
@@ -611,16 +590,16 @@ static int dataflash_write_user_otp(struct mtd_info *mtd,
611 590
612static char *otp_setup(struct mtd_info *device, char revision) 591static char *otp_setup(struct mtd_info *device, char revision)
613{ 592{
614 device->get_fact_prot_info = dataflash_get_otp_info; 593 device->_get_fact_prot_info = dataflash_get_otp_info;
615 device->read_fact_prot_reg = dataflash_read_fact_otp; 594 device->_read_fact_prot_reg = dataflash_read_fact_otp;
616 device->get_user_prot_info = dataflash_get_otp_info; 595 device->_get_user_prot_info = dataflash_get_otp_info;
617 device->read_user_prot_reg = dataflash_read_user_otp; 596 device->_read_user_prot_reg = dataflash_read_user_otp;
618 597
619 /* rev c parts (at45db321c and at45db1281 only!) use a 598 /* rev c parts (at45db321c and at45db1281 only!) use a
620 * different write procedure; not (yet?) implemented. 599 * different write procedure; not (yet?) implemented.
621 */ 600 */
622 if (revision > 'c') 601 if (revision > 'c')
623 device->write_user_prot_reg = dataflash_write_user_otp; 602 device->_write_user_prot_reg = dataflash_write_user_otp;
624 603
625 return ", OTP"; 604 return ", OTP";
626} 605}
@@ -672,9 +651,9 @@ add_dataflash_otp(struct spi_device *spi, char *name,
672 device->owner = THIS_MODULE; 651 device->owner = THIS_MODULE;
673 device->type = MTD_DATAFLASH; 652 device->type = MTD_DATAFLASH;
674 device->flags = MTD_WRITEABLE; 653 device->flags = MTD_WRITEABLE;
675 device->erase = dataflash_erase; 654 device->_erase = dataflash_erase;
676 device->read = dataflash_read; 655 device->_read = dataflash_read;
677 device->write = dataflash_write; 656 device->_write = dataflash_write;
678 device->priv = priv; 657 device->priv = priv;
679 658
680 device->dev.parent = &spi->dev; 659 device->dev.parent = &spi->dev;
@@ -946,18 +925,7 @@ static struct spi_driver dataflash_driver = {
946 /* FIXME: investigate suspend and resume... */ 925 /* FIXME: investigate suspend and resume... */
947}; 926};
948 927
949static int __init dataflash_init(void) 928module_spi_driver(dataflash_driver);
950{
951 return spi_register_driver(&dataflash_driver);
952}
953module_init(dataflash_init);
954
955static void __exit dataflash_exit(void)
956{
957 spi_unregister_driver(&dataflash_driver);
958}
959module_exit(dataflash_exit);
960
961 929
962MODULE_LICENSE("GPL"); 930MODULE_LICENSE("GPL");
963MODULE_AUTHOR("Andrew Victor, David Brownell"); 931MODULE_AUTHOR("Andrew Victor, David Brownell");
diff --git a/drivers/mtd/devices/mtdram.c b/drivers/mtd/devices/mtdram.c
index 2562689ba6b4..ec59d65897fb 100644
--- a/drivers/mtd/devices/mtdram.c
+++ b/drivers/mtd/devices/mtdram.c
@@ -34,34 +34,23 @@ static struct mtd_info *mtd_info;
34 34
35static int ram_erase(struct mtd_info *mtd, struct erase_info *instr) 35static int ram_erase(struct mtd_info *mtd, struct erase_info *instr)
36{ 36{
37 if (instr->addr + instr->len > mtd->size)
38 return -EINVAL;
39
40 memset((char *)mtd->priv + instr->addr, 0xff, instr->len); 37 memset((char *)mtd->priv + instr->addr, 0xff, instr->len);
41
42 instr->state = MTD_ERASE_DONE; 38 instr->state = MTD_ERASE_DONE;
43 mtd_erase_callback(instr); 39 mtd_erase_callback(instr);
44
45 return 0; 40 return 0;
46} 41}
47 42
48static int ram_point(struct mtd_info *mtd, loff_t from, size_t len, 43static int ram_point(struct mtd_info *mtd, loff_t from, size_t len,
49 size_t *retlen, void **virt, resource_size_t *phys) 44 size_t *retlen, void **virt, resource_size_t *phys)
50{ 45{
51 if (from + len > mtd->size)
52 return -EINVAL;
53
54 /* can we return a physical address with this driver? */
55 if (phys)
56 return -EINVAL;
57
58 *virt = mtd->priv + from; 46 *virt = mtd->priv + from;
59 *retlen = len; 47 *retlen = len;
60 return 0; 48 return 0;
61} 49}
62 50
63static void ram_unpoint(struct mtd_info *mtd, loff_t from, size_t len) 51static int ram_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
64{ 52{
53 return 0;
65} 54}
66 55
67/* 56/*
@@ -80,11 +69,7 @@ static unsigned long ram_get_unmapped_area(struct mtd_info *mtd,
80static int ram_read(struct mtd_info *mtd, loff_t from, size_t len, 69static int ram_read(struct mtd_info *mtd, loff_t from, size_t len,
81 size_t *retlen, u_char *buf) 70 size_t *retlen, u_char *buf)
82{ 71{
83 if (from + len > mtd->size)
84 return -EINVAL;
85
86 memcpy(buf, mtd->priv + from, len); 72 memcpy(buf, mtd->priv + from, len);
87
88 *retlen = len; 73 *retlen = len;
89 return 0; 74 return 0;
90} 75}
@@ -92,11 +77,7 @@ static int ram_read(struct mtd_info *mtd, loff_t from, size_t len,
92static int ram_write(struct mtd_info *mtd, loff_t to, size_t len, 77static int ram_write(struct mtd_info *mtd, loff_t to, size_t len,
93 size_t *retlen, const u_char *buf) 78 size_t *retlen, const u_char *buf)
94{ 79{
95 if (to + len > mtd->size)
96 return -EINVAL;
97
98 memcpy((char *)mtd->priv + to, buf, len); 80 memcpy((char *)mtd->priv + to, buf, len);
99
100 *retlen = len; 81 *retlen = len;
101 return 0; 82 return 0;
102} 83}
@@ -126,12 +107,12 @@ int mtdram_init_device(struct mtd_info *mtd, void *mapped_address,
126 mtd->priv = mapped_address; 107 mtd->priv = mapped_address;
127 108
128 mtd->owner = THIS_MODULE; 109 mtd->owner = THIS_MODULE;
129 mtd->erase = ram_erase; 110 mtd->_erase = ram_erase;
130 mtd->point = ram_point; 111 mtd->_point = ram_point;
131 mtd->unpoint = ram_unpoint; 112 mtd->_unpoint = ram_unpoint;
132 mtd->get_unmapped_area = ram_get_unmapped_area; 113 mtd->_get_unmapped_area = ram_get_unmapped_area;
133 mtd->read = ram_read; 114 mtd->_read = ram_read;
134 mtd->write = ram_write; 115 mtd->_write = ram_write;
135 116
136 if (mtd_device_register(mtd, NULL, 0)) 117 if (mtd_device_register(mtd, NULL, 0))
137 return -EIO; 118 return -EIO;
diff --git a/drivers/mtd/devices/phram.c b/drivers/mtd/devices/phram.c
index 23423bd00b06..67823de68db6 100644
--- a/drivers/mtd/devices/phram.c
+++ b/drivers/mtd/devices/phram.c
@@ -33,45 +33,33 @@ struct phram_mtd_list {
33 33
34static LIST_HEAD(phram_list); 34static LIST_HEAD(phram_list);
35 35
36
37static int phram_erase(struct mtd_info *mtd, struct erase_info *instr) 36static int phram_erase(struct mtd_info *mtd, struct erase_info *instr)
38{ 37{
39 u_char *start = mtd->priv; 38 u_char *start = mtd->priv;
40 39
41 if (instr->addr + instr->len > mtd->size)
42 return -EINVAL;
43
44 memset(start + instr->addr, 0xff, instr->len); 40 memset(start + instr->addr, 0xff, instr->len);
45 41
46 /* This'll catch a few races. Free the thing before returning :) 42 /*
43 * This'll catch a few races. Free the thing before returning :)
47 * I don't feel at all ashamed. This kind of thing is possible anyway 44 * I don't feel at all ashamed. This kind of thing is possible anyway
48 * with flash, but unlikely. 45 * with flash, but unlikely.
49 */ 46 */
50
51 instr->state = MTD_ERASE_DONE; 47 instr->state = MTD_ERASE_DONE;
52
53 mtd_erase_callback(instr); 48 mtd_erase_callback(instr);
54
55 return 0; 49 return 0;
56} 50}
57 51
58static int phram_point(struct mtd_info *mtd, loff_t from, size_t len, 52static int phram_point(struct mtd_info *mtd, loff_t from, size_t len,
59 size_t *retlen, void **virt, resource_size_t *phys) 53 size_t *retlen, void **virt, resource_size_t *phys)
60{ 54{
61 if (from + len > mtd->size)
62 return -EINVAL;
63
64 /* can we return a physical address with this driver? */
65 if (phys)
66 return -EINVAL;
67
68 *virt = mtd->priv + from; 55 *virt = mtd->priv + from;
69 *retlen = len; 56 *retlen = len;
70 return 0; 57 return 0;
71} 58}
72 59
73static void phram_unpoint(struct mtd_info *mtd, loff_t from, size_t len) 60static int phram_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
74{ 61{
62 return 0;
75} 63}
76 64
77static int phram_read(struct mtd_info *mtd, loff_t from, size_t len, 65static int phram_read(struct mtd_info *mtd, loff_t from, size_t len,
@@ -79,14 +67,7 @@ static int phram_read(struct mtd_info *mtd, loff_t from, size_t len,
79{ 67{
80 u_char *start = mtd->priv; 68 u_char *start = mtd->priv;
81 69
82 if (from >= mtd->size)
83 return -EINVAL;
84
85 if (len > mtd->size - from)
86 len = mtd->size - from;
87
88 memcpy(buf, start + from, len); 70 memcpy(buf, start + from, len);
89
90 *retlen = len; 71 *retlen = len;
91 return 0; 72 return 0;
92} 73}
@@ -96,20 +77,11 @@ static int phram_write(struct mtd_info *mtd, loff_t to, size_t len,
96{ 77{
97 u_char *start = mtd->priv; 78 u_char *start = mtd->priv;
98 79
99 if (to >= mtd->size)
100 return -EINVAL;
101
102 if (len > mtd->size - to)
103 len = mtd->size - to;
104
105 memcpy(start + to, buf, len); 80 memcpy(start + to, buf, len);
106
107 *retlen = len; 81 *retlen = len;
108 return 0; 82 return 0;
109} 83}
110 84
111
112
113static void unregister_devices(void) 85static void unregister_devices(void)
114{ 86{
115 struct phram_mtd_list *this, *safe; 87 struct phram_mtd_list *this, *safe;
@@ -142,11 +114,11 @@ static int register_device(char *name, unsigned long start, unsigned long len)
142 new->mtd.name = name; 114 new->mtd.name = name;
143 new->mtd.size = len; 115 new->mtd.size = len;
144 new->mtd.flags = MTD_CAP_RAM; 116 new->mtd.flags = MTD_CAP_RAM;
145 new->mtd.erase = phram_erase; 117 new->mtd._erase = phram_erase;
146 new->mtd.point = phram_point; 118 new->mtd._point = phram_point;
147 new->mtd.unpoint = phram_unpoint; 119 new->mtd._unpoint = phram_unpoint;
148 new->mtd.read = phram_read; 120 new->mtd._read = phram_read;
149 new->mtd.write = phram_write; 121 new->mtd._write = phram_write;
150 new->mtd.owner = THIS_MODULE; 122 new->mtd.owner = THIS_MODULE;
151 new->mtd.type = MTD_RAM; 123 new->mtd.type = MTD_RAM;
152 new->mtd.erasesize = PAGE_SIZE; 124 new->mtd.erasesize = PAGE_SIZE;
@@ -233,7 +205,17 @@ static inline void kill_final_newline(char *str)
233 return 1; \ 205 return 1; \
234} while (0) 206} while (0)
235 207
236static int phram_setup(const char *val, struct kernel_param *kp) 208/*
209 * This shall contain the module parameter if any. It is of the form:
210 * - phram=<device>,<address>,<size> for module case
211 * - phram.phram=<device>,<address>,<size> for built-in case
212 * We leave 64 bytes for the device name, 12 for the address and 12 for the
213 * size.
214 * Example: phram.phram=rootfs,0xa0000000,512Mi
215 */
216static __initdata char phram_paramline[64+12+12];
217
218static int __init phram_setup(const char *val)
237{ 219{
238 char buf[64+12+12], *str = buf; 220 char buf[64+12+12], *str = buf;
239 char *token[3]; 221 char *token[3];
@@ -282,12 +264,28 @@ static int phram_setup(const char *val, struct kernel_param *kp)
282 return ret; 264 return ret;
283} 265}
284 266
285module_param_call(phram, phram_setup, NULL, NULL, 000); 267static int __init phram_param_call(const char *val, struct kernel_param *kp)
268{
269 /*
270 * This function is always called before 'init_phram()', whether
271 * built-in or module.
272 */
273 if (strlen(val) >= sizeof(phram_paramline))
274 return -ENOSPC;
275 strcpy(phram_paramline, val);
276
277 return 0;
278}
279
280module_param_call(phram, phram_param_call, NULL, NULL, 000);
286MODULE_PARM_DESC(phram, "Memory region to map. \"phram=<name>,<start>,<length>\""); 281MODULE_PARM_DESC(phram, "Memory region to map. \"phram=<name>,<start>,<length>\"");
287 282
288 283
289static int __init init_phram(void) 284static int __init init_phram(void)
290{ 285{
286 if (phram_paramline[0])
287 return phram_setup(phram_paramline);
288
291 return 0; 289 return 0;
292} 290}
293 291
diff --git a/drivers/mtd/devices/pmc551.c b/drivers/mtd/devices/pmc551.c
index 5d53c5760a6c..0c51b988e1f8 100644
--- a/drivers/mtd/devices/pmc551.c
+++ b/drivers/mtd/devices/pmc551.c
@@ -94,12 +94,48 @@
94#include <linux/ioctl.h> 94#include <linux/ioctl.h>
95#include <asm/io.h> 95#include <asm/io.h>
96#include <linux/pci.h> 96#include <linux/pci.h>
97
98#include <linux/mtd/mtd.h> 97#include <linux/mtd/mtd.h>
99#include <linux/mtd/pmc551.h> 98
99#define PMC551_VERSION \
100 "Ramix PMC551 PCI Mezzanine Ram Driver. (C) 1999,2000 Nortel Networks.\n"
101
102#define PCI_VENDOR_ID_V3_SEMI 0x11b0
103#define PCI_DEVICE_ID_V3_SEMI_V370PDC 0x0200
104
105#define PMC551_PCI_MEM_MAP0 0x50
106#define PMC551_PCI_MEM_MAP1 0x54
107#define PMC551_PCI_MEM_MAP_MAP_ADDR_MASK 0x3ff00000
108#define PMC551_PCI_MEM_MAP_APERTURE_MASK 0x000000f0
109#define PMC551_PCI_MEM_MAP_REG_EN 0x00000002
110#define PMC551_PCI_MEM_MAP_ENABLE 0x00000001
111
112#define PMC551_SDRAM_MA 0x60
113#define PMC551_SDRAM_CMD 0x62
114#define PMC551_DRAM_CFG 0x64
115#define PMC551_SYS_CTRL_REG 0x78
116
117#define PMC551_DRAM_BLK0 0x68
118#define PMC551_DRAM_BLK1 0x6c
119#define PMC551_DRAM_BLK2 0x70
120#define PMC551_DRAM_BLK3 0x74
121#define PMC551_DRAM_BLK_GET_SIZE(x) (524288 << ((x >> 4) & 0x0f))
122#define PMC551_DRAM_BLK_SET_COL_MUX(x, v) (((x) & ~0x00007000) | (((v) & 0x7) << 12))
123#define PMC551_DRAM_BLK_SET_ROW_MUX(x, v) (((x) & ~0x00000f00) | (((v) & 0xf) << 8))
124
125struct mypriv {
126 struct pci_dev *dev;
127 u_char *start;
128 u32 base_map0;
129 u32 curr_map0;
130 u32 asize;
131 struct mtd_info *nextpmc551;
132};
100 133
101static struct mtd_info *pmc551list; 134static struct mtd_info *pmc551list;
102 135
136static int pmc551_point(struct mtd_info *mtd, loff_t from, size_t len,
137 size_t *retlen, void **virt, resource_size_t *phys);
138
103static int pmc551_erase(struct mtd_info *mtd, struct erase_info *instr) 139static int pmc551_erase(struct mtd_info *mtd, struct erase_info *instr)
104{ 140{
105 struct mypriv *priv = mtd->priv; 141 struct mypriv *priv = mtd->priv;
@@ -115,16 +151,6 @@ static int pmc551_erase(struct mtd_info *mtd, struct erase_info *instr)
115#endif 151#endif
116 152
117 end = instr->addr + instr->len - 1; 153 end = instr->addr + instr->len - 1;
118
119 /* Is it past the end? */
120 if (end > mtd->size) {
121#ifdef CONFIG_MTD_PMC551_DEBUG
122 printk(KERN_DEBUG "pmc551_erase() out of bounds (%ld > %ld)\n",
123 (long)end, (long)mtd->size);
124#endif
125 return -EINVAL;
126 }
127
128 eoff_hi = end & ~(priv->asize - 1); 154 eoff_hi = end & ~(priv->asize - 1);
129 soff_hi = instr->addr & ~(priv->asize - 1); 155 soff_hi = instr->addr & ~(priv->asize - 1);
130 eoff_lo = end & (priv->asize - 1); 156 eoff_lo = end & (priv->asize - 1);
@@ -178,18 +204,6 @@ static int pmc551_point(struct mtd_info *mtd, loff_t from, size_t len,
178 printk(KERN_DEBUG "pmc551_point(%ld, %ld)\n", (long)from, (long)len); 204 printk(KERN_DEBUG "pmc551_point(%ld, %ld)\n", (long)from, (long)len);
179#endif 205#endif
180 206
181 if (from + len > mtd->size) {
182#ifdef CONFIG_MTD_PMC551_DEBUG
183 printk(KERN_DEBUG "pmc551_point() out of bounds (%ld > %ld)\n",
184 (long)from + len, (long)mtd->size);
185#endif
186 return -EINVAL;
187 }
188
189 /* can we return a physical address with this driver? */
190 if (phys)
191 return -EINVAL;
192
193 soff_hi = from & ~(priv->asize - 1); 207 soff_hi = from & ~(priv->asize - 1);
194 soff_lo = from & (priv->asize - 1); 208 soff_lo = from & (priv->asize - 1);
195 209
@@ -205,11 +219,12 @@ static int pmc551_point(struct mtd_info *mtd, loff_t from, size_t len,
205 return 0; 219 return 0;
206} 220}
207 221
208static void pmc551_unpoint(struct mtd_info *mtd, loff_t from, size_t len) 222static int pmc551_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
209{ 223{
210#ifdef CONFIG_MTD_PMC551_DEBUG 224#ifdef CONFIG_MTD_PMC551_DEBUG
211 printk(KERN_DEBUG "pmc551_unpoint()\n"); 225 printk(KERN_DEBUG "pmc551_unpoint()\n");
212#endif 226#endif
227 return 0;
213} 228}
214 229
215static int pmc551_read(struct mtd_info *mtd, loff_t from, size_t len, 230static int pmc551_read(struct mtd_info *mtd, loff_t from, size_t len,
@@ -228,16 +243,6 @@ static int pmc551_read(struct mtd_info *mtd, loff_t from, size_t len,
228#endif 243#endif
229 244
230 end = from + len - 1; 245 end = from + len - 1;
231
232 /* Is it past the end? */
233 if (end > mtd->size) {
234#ifdef CONFIG_MTD_PMC551_DEBUG
235 printk(KERN_DEBUG "pmc551_read() out of bounds (%ld > %ld)\n",
236 (long)end, (long)mtd->size);
237#endif
238 return -EINVAL;
239 }
240
241 soff_hi = from & ~(priv->asize - 1); 246 soff_hi = from & ~(priv->asize - 1);
242 eoff_hi = end & ~(priv->asize - 1); 247 eoff_hi = end & ~(priv->asize - 1);
243 soff_lo = from & (priv->asize - 1); 248 soff_lo = from & (priv->asize - 1);
@@ -295,16 +300,6 @@ static int pmc551_write(struct mtd_info *mtd, loff_t to, size_t len,
295#endif 300#endif
296 301
297 end = to + len - 1; 302 end = to + len - 1;
298 /* Is it past the end? or did the u32 wrap? */
299 if (end > mtd->size) {
300#ifdef CONFIG_MTD_PMC551_DEBUG
301 printk(KERN_DEBUG "pmc551_write() out of bounds (end: %ld, "
302 "size: %ld, to: %ld)\n", (long)end, (long)mtd->size,
303 (long)to);
304#endif
305 return -EINVAL;
306 }
307
308 soff_hi = to & ~(priv->asize - 1); 303 soff_hi = to & ~(priv->asize - 1);
309 eoff_hi = end & ~(priv->asize - 1); 304 eoff_hi = end & ~(priv->asize - 1);
310 soff_lo = to & (priv->asize - 1); 305 soff_lo = to & (priv->asize - 1);
@@ -358,7 +353,7 @@ static int pmc551_write(struct mtd_info *mtd, loff_t to, size_t len,
358 * mechanism 353 * mechanism
359 * returns the size of the memory region found. 354 * returns the size of the memory region found.
360 */ 355 */
361static u32 fixup_pmc551(struct pci_dev *dev) 356static int fixup_pmc551(struct pci_dev *dev)
362{ 357{
363#ifdef CONFIG_MTD_PMC551_BUGFIX 358#ifdef CONFIG_MTD_PMC551_BUGFIX
364 u32 dram_data; 359 u32 dram_data;
@@ -668,7 +663,7 @@ static int __init init_pmc551(void)
668 struct mypriv *priv; 663 struct mypriv *priv;
669 int found = 0; 664 int found = 0;
670 struct mtd_info *mtd; 665 struct mtd_info *mtd;
671 u32 length = 0; 666 int length = 0;
672 667
673 if (msize) { 668 if (msize) {
674 msize = (1 << (ffs(msize) - 1)) << 20; 669 msize = (1 << (ffs(msize) - 1)) << 20;
@@ -786,11 +781,11 @@ static int __init init_pmc551(void)
786 781
787 mtd->size = msize; 782 mtd->size = msize;
788 mtd->flags = MTD_CAP_RAM; 783 mtd->flags = MTD_CAP_RAM;
789 mtd->erase = pmc551_erase; 784 mtd->_erase = pmc551_erase;
790 mtd->read = pmc551_read; 785 mtd->_read = pmc551_read;
791 mtd->write = pmc551_write; 786 mtd->_write = pmc551_write;
792 mtd->point = pmc551_point; 787 mtd->_point = pmc551_point;
793 mtd->unpoint = pmc551_unpoint; 788 mtd->_unpoint = pmc551_unpoint;
794 mtd->type = MTD_RAM; 789 mtd->type = MTD_RAM;
795 mtd->name = "PMC551 RAM board"; 790 mtd->name = "PMC551 RAM board";
796 mtd->erasesize = 0x10000; 791 mtd->erasesize = 0x10000;
diff --git a/drivers/mtd/devices/slram.c b/drivers/mtd/devices/slram.c
index 288594163c22..8f52fc858e48 100644
--- a/drivers/mtd/devices/slram.c
+++ b/drivers/mtd/devices/slram.c
@@ -75,7 +75,7 @@ static slram_mtd_list_t *slram_mtdlist = NULL;
75static int slram_erase(struct mtd_info *, struct erase_info *); 75static int slram_erase(struct mtd_info *, struct erase_info *);
76static int slram_point(struct mtd_info *, loff_t, size_t, size_t *, void **, 76static int slram_point(struct mtd_info *, loff_t, size_t, size_t *, void **,
77 resource_size_t *); 77 resource_size_t *);
78static void slram_unpoint(struct mtd_info *, loff_t, size_t); 78static int slram_unpoint(struct mtd_info *, loff_t, size_t);
79static int slram_read(struct mtd_info *, loff_t, size_t, size_t *, u_char *); 79static int slram_read(struct mtd_info *, loff_t, size_t, size_t *, u_char *);
80static int slram_write(struct mtd_info *, loff_t, size_t, size_t *, const u_char *); 80static int slram_write(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
81 81
@@ -83,21 +83,13 @@ static int slram_erase(struct mtd_info *mtd, struct erase_info *instr)
83{ 83{
84 slram_priv_t *priv = mtd->priv; 84 slram_priv_t *priv = mtd->priv;
85 85
86 if (instr->addr + instr->len > mtd->size) {
87 return(-EINVAL);
88 }
89
90 memset(priv->start + instr->addr, 0xff, instr->len); 86 memset(priv->start + instr->addr, 0xff, instr->len);
91
92 /* This'll catch a few races. Free the thing before returning :) 87 /* This'll catch a few races. Free the thing before returning :)
93 * I don't feel at all ashamed. This kind of thing is possible anyway 88 * I don't feel at all ashamed. This kind of thing is possible anyway
94 * with flash, but unlikely. 89 * with flash, but unlikely.
95 */ 90 */
96
97 instr->state = MTD_ERASE_DONE; 91 instr->state = MTD_ERASE_DONE;
98
99 mtd_erase_callback(instr); 92 mtd_erase_callback(instr);
100
101 return(0); 93 return(0);
102} 94}
103 95
@@ -106,20 +98,14 @@ static int slram_point(struct mtd_info *mtd, loff_t from, size_t len,
106{ 98{
107 slram_priv_t *priv = mtd->priv; 99 slram_priv_t *priv = mtd->priv;
108 100
109 /* can we return a physical address with this driver? */
110 if (phys)
111 return -EINVAL;
112
113 if (from + len > mtd->size)
114 return -EINVAL;
115
116 *virt = priv->start + from; 101 *virt = priv->start + from;
117 *retlen = len; 102 *retlen = len;
118 return(0); 103 return(0);
119} 104}
120 105
121static void slram_unpoint(struct mtd_info *mtd, loff_t from, size_t len) 106static int slram_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
122{ 107{
108 return 0;
123} 109}
124 110
125static int slram_read(struct mtd_info *mtd, loff_t from, size_t len, 111static int slram_read(struct mtd_info *mtd, loff_t from, size_t len,
@@ -127,14 +113,7 @@ static int slram_read(struct mtd_info *mtd, loff_t from, size_t len,
127{ 113{
128 slram_priv_t *priv = mtd->priv; 114 slram_priv_t *priv = mtd->priv;
129 115
130 if (from > mtd->size)
131 return -EINVAL;
132
133 if (from + len > mtd->size)
134 len = mtd->size - from;
135
136 memcpy(buf, priv->start + from, len); 116 memcpy(buf, priv->start + from, len);
137
138 *retlen = len; 117 *retlen = len;
139 return(0); 118 return(0);
140} 119}
@@ -144,11 +123,7 @@ static int slram_write(struct mtd_info *mtd, loff_t to, size_t len,
144{ 123{
145 slram_priv_t *priv = mtd->priv; 124 slram_priv_t *priv = mtd->priv;
146 125
147 if (to + len > mtd->size)
148 return -EINVAL;
149
150 memcpy(priv->start + to, buf, len); 126 memcpy(priv->start + to, buf, len);
151
152 *retlen = len; 127 *retlen = len;
153 return(0); 128 return(0);
154} 129}
@@ -199,11 +174,11 @@ static int register_device(char *name, unsigned long start, unsigned long length
199 (*curmtd)->mtdinfo->name = name; 174 (*curmtd)->mtdinfo->name = name;
200 (*curmtd)->mtdinfo->size = length; 175 (*curmtd)->mtdinfo->size = length;
201 (*curmtd)->mtdinfo->flags = MTD_CAP_RAM; 176 (*curmtd)->mtdinfo->flags = MTD_CAP_RAM;
202 (*curmtd)->mtdinfo->erase = slram_erase; 177 (*curmtd)->mtdinfo->_erase = slram_erase;
203 (*curmtd)->mtdinfo->point = slram_point; 178 (*curmtd)->mtdinfo->_point = slram_point;
204 (*curmtd)->mtdinfo->unpoint = slram_unpoint; 179 (*curmtd)->mtdinfo->_unpoint = slram_unpoint;
205 (*curmtd)->mtdinfo->read = slram_read; 180 (*curmtd)->mtdinfo->_read = slram_read;
206 (*curmtd)->mtdinfo->write = slram_write; 181 (*curmtd)->mtdinfo->_write = slram_write;
207 (*curmtd)->mtdinfo->owner = THIS_MODULE; 182 (*curmtd)->mtdinfo->owner = THIS_MODULE;
208 (*curmtd)->mtdinfo->type = MTD_RAM; 183 (*curmtd)->mtdinfo->type = MTD_RAM;
209 (*curmtd)->mtdinfo->erasesize = SLRAM_BLK_SZ; 184 (*curmtd)->mtdinfo->erasesize = SLRAM_BLK_SZ;
diff --git a/drivers/mtd/devices/spear_smi.c b/drivers/mtd/devices/spear_smi.c
new file mode 100644
index 000000000000..797d43cd3550
--- /dev/null
+++ b/drivers/mtd/devices/spear_smi.c
@@ -0,0 +1,1147 @@
1/*
2 * SMI (Serial Memory Controller) device driver for Serial NOR Flash on
3 * SPEAr platform
4 * The serial nor interface is largely based on drivers/mtd/m25p80.c,
5 * however the SPI interface has been replaced by SMI.
6 *
7 * Copyright © 2010 STMicroelectronics.
8 * Ashish Priyadarshi
9 * Shiraz Hashim <shiraz.hashim@st.com>
10 *
11 * This file is licensed under the terms of the GNU General Public
12 * License version 2. This program is licensed "as is" without any
13 * warranty of any kind, whether express or implied.
14 */
15
16#include <linux/clk.h>
17#include <linux/delay.h>
18#include <linux/device.h>
19#include <linux/err.h>
20#include <linux/errno.h>
21#include <linux/interrupt.h>
22#include <linux/io.h>
23#include <linux/ioport.h>
24#include <linux/jiffies.h>
25#include <linux/kernel.h>
26#include <linux/module.h>
27#include <linux/param.h>
28#include <linux/platform_device.h>
29#include <linux/mtd/mtd.h>
30#include <linux/mtd/partitions.h>
31#include <linux/mtd/spear_smi.h>
32#include <linux/mutex.h>
33#include <linux/sched.h>
34#include <linux/slab.h>
35#include <linux/wait.h>
36#include <linux/of.h>
37#include <linux/of_address.h>
38
39/* SMI clock rate */
40#define SMI_MAX_CLOCK_FREQ 50000000 /* 50 MHz */
41
42/* MAX time out to safely come out of a erase or write busy conditions */
43#define SMI_PROBE_TIMEOUT (HZ / 10)
44#define SMI_MAX_TIME_OUT (3 * HZ)
45
46/* timeout for command completion */
47#define SMI_CMD_TIMEOUT (HZ / 10)
48
49/* registers of smi */
50#define SMI_CR1 0x0 /* SMI control register 1 */
51#define SMI_CR2 0x4 /* SMI control register 2 */
52#define SMI_SR 0x8 /* SMI status register */
53#define SMI_TR 0xC /* SMI transmit register */
54#define SMI_RR 0x10 /* SMI receive register */
55
56/* defines for control_reg 1 */
57#define BANK_EN (0xF << 0) /* enables all banks */
58#define DSEL_TIME (0x6 << 4) /* Deselect time 6 + 1 SMI_CK periods */
59#define SW_MODE (0x1 << 28) /* enables SW Mode */
60#define WB_MODE (0x1 << 29) /* Write Burst Mode */
61#define FAST_MODE (0x1 << 15) /* Fast Mode */
62#define HOLD1 (0x1 << 16) /* Clock Hold period selection */
63
64/* defines for control_reg 2 */
65#define SEND (0x1 << 7) /* Send data */
66#define TFIE (0x1 << 8) /* Transmission Flag Interrupt Enable */
67#define WCIE (0x1 << 9) /* Write Complete Interrupt Enable */
68#define RD_STATUS_REG (0x1 << 10) /* reads status reg */
69#define WE (0x1 << 11) /* Write Enable */
70
71#define TX_LEN_SHIFT 0
72#define RX_LEN_SHIFT 4
73#define BANK_SHIFT 12
74
75/* defines for status register */
76#define SR_WIP 0x1 /* Write in progress */
77#define SR_WEL 0x2 /* Write enable latch */
78#define SR_BP0 0x4 /* Block protect 0 */
79#define SR_BP1 0x8 /* Block protect 1 */
80#define SR_BP2 0x10 /* Block protect 2 */
81#define SR_SRWD 0x80 /* SR write protect */
82#define TFF 0x100 /* Transfer Finished Flag */
83#define WCF 0x200 /* Transfer Finished Flag */
84#define ERF1 0x400 /* Forbidden Write Request */
85#define ERF2 0x800 /* Forbidden Access */
86
87#define WM_SHIFT 12
88
89/* flash opcodes */
90#define OPCODE_RDID 0x9f /* Read JEDEC ID */
91
92/* Flash Device Ids maintenance section */
93
94/* data structure to maintain flash ids from different vendors */
95struct flash_device {
96 char *name;
97 u8 erase_cmd;
98 u32 device_id;
99 u32 pagesize;
100 unsigned long sectorsize;
101 unsigned long size_in_bytes;
102};
103
104#define FLASH_ID(n, es, id, psize, ssize, size) \
105{ \
106 .name = n, \
107 .erase_cmd = es, \
108 .device_id = id, \
109 .pagesize = psize, \
110 .sectorsize = ssize, \
111 .size_in_bytes = size \
112}
113
114static struct flash_device flash_devices[] = {
115 FLASH_ID("st m25p16" , 0xd8, 0x00152020, 0x100, 0x10000, 0x200000),
116 FLASH_ID("st m25p32" , 0xd8, 0x00162020, 0x100, 0x10000, 0x400000),
117 FLASH_ID("st m25p64" , 0xd8, 0x00172020, 0x100, 0x10000, 0x800000),
118 FLASH_ID("st m25p128" , 0xd8, 0x00182020, 0x100, 0x40000, 0x1000000),
119 FLASH_ID("st m25p05" , 0xd8, 0x00102020, 0x80 , 0x8000 , 0x10000),
120 FLASH_ID("st m25p10" , 0xd8, 0x00112020, 0x80 , 0x8000 , 0x20000),
121 FLASH_ID("st m25p20" , 0xd8, 0x00122020, 0x100, 0x10000, 0x40000),
122 FLASH_ID("st m25p40" , 0xd8, 0x00132020, 0x100, 0x10000, 0x80000),
123 FLASH_ID("st m25p80" , 0xd8, 0x00142020, 0x100, 0x10000, 0x100000),
124 FLASH_ID("st m45pe10" , 0xd8, 0x00114020, 0x100, 0x10000, 0x20000),
125 FLASH_ID("st m45pe20" , 0xd8, 0x00124020, 0x100, 0x10000, 0x40000),
126 FLASH_ID("st m45pe40" , 0xd8, 0x00134020, 0x100, 0x10000, 0x80000),
127 FLASH_ID("st m45pe80" , 0xd8, 0x00144020, 0x100, 0x10000, 0x100000),
128 FLASH_ID("sp s25fl004" , 0xd8, 0x00120201, 0x100, 0x10000, 0x80000),
129 FLASH_ID("sp s25fl008" , 0xd8, 0x00130201, 0x100, 0x10000, 0x100000),
130 FLASH_ID("sp s25fl016" , 0xd8, 0x00140201, 0x100, 0x10000, 0x200000),
131 FLASH_ID("sp s25fl032" , 0xd8, 0x00150201, 0x100, 0x10000, 0x400000),
132 FLASH_ID("sp s25fl064" , 0xd8, 0x00160201, 0x100, 0x10000, 0x800000),
133 FLASH_ID("atmel 25f512" , 0x52, 0x0065001F, 0x80 , 0x8000 , 0x10000),
134 FLASH_ID("atmel 25f1024" , 0x52, 0x0060001F, 0x100, 0x8000 , 0x20000),
135 FLASH_ID("atmel 25f2048" , 0x52, 0x0063001F, 0x100, 0x10000, 0x40000),
136 FLASH_ID("atmel 25f4096" , 0x52, 0x0064001F, 0x100, 0x10000, 0x80000),
137 FLASH_ID("atmel 25fs040" , 0xd7, 0x0004661F, 0x100, 0x10000, 0x80000),
138 FLASH_ID("mac 25l512" , 0xd8, 0x001020C2, 0x010, 0x10000, 0x10000),
139 FLASH_ID("mac 25l1005" , 0xd8, 0x001120C2, 0x010, 0x10000, 0x20000),
140 FLASH_ID("mac 25l2005" , 0xd8, 0x001220C2, 0x010, 0x10000, 0x40000),
141 FLASH_ID("mac 25l4005" , 0xd8, 0x001320C2, 0x010, 0x10000, 0x80000),
142 FLASH_ID("mac 25l4005a" , 0xd8, 0x001320C2, 0x010, 0x10000, 0x80000),
143 FLASH_ID("mac 25l8005" , 0xd8, 0x001420C2, 0x010, 0x10000, 0x100000),
144 FLASH_ID("mac 25l1605" , 0xd8, 0x001520C2, 0x100, 0x10000, 0x200000),
145 FLASH_ID("mac 25l1605a" , 0xd8, 0x001520C2, 0x010, 0x10000, 0x200000),
146 FLASH_ID("mac 25l3205" , 0xd8, 0x001620C2, 0x100, 0x10000, 0x400000),
147 FLASH_ID("mac 25l3205a" , 0xd8, 0x001620C2, 0x100, 0x10000, 0x400000),
148 FLASH_ID("mac 25l6405" , 0xd8, 0x001720C2, 0x100, 0x10000, 0x800000),
149};
150
151/* Define spear specific structures */
152
153struct spear_snor_flash;
154
155/**
156 * struct spear_smi - Structure for SMI Device
157 *
158 * @clk: functional clock
159 * @status: current status register of SMI.
160 * @clk_rate: functional clock rate of SMI (default: SMI_MAX_CLOCK_FREQ)
161 * @lock: lock to prevent parallel access of SMI.
162 * @io_base: base address for registers of SMI.
163 * @pdev: platform device
164 * @cmd_complete: queue to wait for command completion of NOR-flash.
165 * @num_flashes: number of flashes actually present on board.
166 * @flash: separate structure for each Serial NOR-flash attached to SMI.
167 */
168struct spear_smi {
169 struct clk *clk;
170 u32 status;
171 unsigned long clk_rate;
172 struct mutex lock;
173 void __iomem *io_base;
174 struct platform_device *pdev;
175 wait_queue_head_t cmd_complete;
176 u32 num_flashes;
177 struct spear_snor_flash *flash[MAX_NUM_FLASH_CHIP];
178};
179
180/**
181 * struct spear_snor_flash - Structure for Serial NOR Flash
182 *
183 * @bank: Bank number(0, 1, 2, 3) for each NOR-flash.
184 * @dev_id: Device ID of NOR-flash.
185 * @lock: lock to manage flash read, write and erase operations
186 * @mtd: MTD info for each NOR-flash.
187 * @num_parts: Total number of partition in each bank of NOR-flash.
188 * @parts: Partition info for each bank of NOR-flash.
189 * @page_size: Page size of NOR-flash.
190 * @base_addr: Base address of NOR-flash.
191 * @erase_cmd: erase command may vary on different flash types
192 * @fast_mode: flash supports read in fast mode
193 */
194struct spear_snor_flash {
195 u32 bank;
196 u32 dev_id;
197 struct mutex lock;
198 struct mtd_info mtd;
199 u32 num_parts;
200 struct mtd_partition *parts;
201 u32 page_size;
202 void __iomem *base_addr;
203 u8 erase_cmd;
204 u8 fast_mode;
205};
206
207static inline struct spear_snor_flash *get_flash_data(struct mtd_info *mtd)
208{
209 return container_of(mtd, struct spear_snor_flash, mtd);
210}
211
212/**
213 * spear_smi_read_sr - Read status register of flash through SMI
214 * @dev: structure of SMI information.
215 * @bank: bank to which flash is connected
216 *
217 * This routine will return the status register of the flash chip present at the
218 * given bank.
219 */
220static int spear_smi_read_sr(struct spear_smi *dev, u32 bank)
221{
222 int ret;
223 u32 ctrlreg1;
224
225 mutex_lock(&dev->lock);
226 dev->status = 0; /* Will be set in interrupt handler */
227
228 ctrlreg1 = readl(dev->io_base + SMI_CR1);
229 /* program smi in hw mode */
230 writel(ctrlreg1 & ~(SW_MODE | WB_MODE), dev->io_base + SMI_CR1);
231
232 /* performing a rsr instruction in hw mode */
233 writel((bank << BANK_SHIFT) | RD_STATUS_REG | TFIE,
234 dev->io_base + SMI_CR2);
235
236 /* wait for tff */
237 ret = wait_event_interruptible_timeout(dev->cmd_complete,
238 dev->status & TFF, SMI_CMD_TIMEOUT);
239
240 /* copy dev->status (lower 16 bits) in order to release lock */
241 if (ret > 0)
242 ret = dev->status & 0xffff;
243 else
244 ret = -EIO;
245
246 /* restore the ctrl regs state */
247 writel(ctrlreg1, dev->io_base + SMI_CR1);
248 writel(0, dev->io_base + SMI_CR2);
249 mutex_unlock(&dev->lock);
250
251 return ret;
252}
253
254/**
255 * spear_smi_wait_till_ready - wait till flash is ready
256 * @dev: structure of SMI information.
257 * @bank: flash corresponding to this bank
258 * @timeout: timeout for busy wait condition
259 *
260 * This routine checks for WIP (write in progress) bit in Status register
261 * If successful the routine returns 0 else -EBUSY
262 */
263static int spear_smi_wait_till_ready(struct spear_smi *dev, u32 bank,
264 unsigned long timeout)
265{
266 unsigned long finish;
267 int status;
268
269 finish = jiffies + timeout;
270 do {
271 status = spear_smi_read_sr(dev, bank);
272 if (status < 0)
273 continue; /* try till timeout */
274 else if (!(status & SR_WIP))
275 return 0;
276
277 cond_resched();
278 } while (!time_after_eq(jiffies, finish));
279
280 dev_err(&dev->pdev->dev, "smi controller is busy, timeout\n");
281 return status;
282}
283
284/**
285 * spear_smi_int_handler - SMI Interrupt Handler.
286 * @irq: irq number
287 * @dev_id: structure of SMI device, embedded in dev_id.
288 *
289 * The handler clears all interrupt conditions and records the status in
290 * dev->status which is used by the driver later.
291 */
292static irqreturn_t spear_smi_int_handler(int irq, void *dev_id)
293{
294 u32 status = 0;
295 struct spear_smi *dev = dev_id;
296
297 status = readl(dev->io_base + SMI_SR);
298
299 if (unlikely(!status))
300 return IRQ_NONE;
301
302 /* clear all interrupt conditions */
303 writel(0, dev->io_base + SMI_SR);
304
305 /* copy the status register in dev->status */
306 dev->status |= status;
307
308 /* send the completion */
309 wake_up_interruptible(&dev->cmd_complete);
310
311 return IRQ_HANDLED;
312}
313
314/**
315 * spear_smi_hw_init - initializes the smi controller.
316 * @dev: structure of smi device
317 *
318 * this routine initializes the smi controller wit the default values
319 */
320static void spear_smi_hw_init(struct spear_smi *dev)
321{
322 unsigned long rate = 0;
323 u32 prescale = 0;
324 u32 val;
325
326 rate = clk_get_rate(dev->clk);
327
328 /* functional clock of smi */
329 prescale = DIV_ROUND_UP(rate, dev->clk_rate);
330
331 /*
332 * setting the standard values, fast mode, prescaler for
333 * SMI_MAX_CLOCK_FREQ (50MHz) operation and bank enable
334 */
335 val = HOLD1 | BANK_EN | DSEL_TIME | (prescale << 8);
336
337 mutex_lock(&dev->lock);
338 writel(val, dev->io_base + SMI_CR1);
339 mutex_unlock(&dev->lock);
340}
341
342/**
343 * get_flash_index - match chip id from a flash list.
344 * @flash_id: a valid nor flash chip id obtained from board.
345 *
346 * try to validate the chip id by matching from a list, if not found then simply
347 * returns negative. In case of success returns index in to the flash devices
348 * array.
349 */
350static int get_flash_index(u32 flash_id)
351{
352 int index;
353
354 /* Matches chip-id to entire list of 'serial-nor flash' ids */
355 for (index = 0; index < ARRAY_SIZE(flash_devices); index++) {
356 if (flash_devices[index].device_id == flash_id)
357 return index;
358 }
359
360 /* Memory chip is not listed and not supported */
361 return -ENODEV;
362}
363
364/**
365 * spear_smi_write_enable - Enable the flash to do write operation
366 * @dev: structure of SMI device
367 * @bank: enable write for flash connected to this bank
368 *
369 * Set write enable latch with Write Enable command.
370 * Returns 0 on success.
371 */
372static int spear_smi_write_enable(struct spear_smi *dev, u32 bank)
373{
374 int ret;
375 u32 ctrlreg1;
376
377 mutex_lock(&dev->lock);
378 dev->status = 0; /* Will be set in interrupt handler */
379
380 ctrlreg1 = readl(dev->io_base + SMI_CR1);
381 /* program smi in h/w mode */
382 writel(ctrlreg1 & ~SW_MODE, dev->io_base + SMI_CR1);
383
384 /* give the flash, write enable command */
385 writel((bank << BANK_SHIFT) | WE | TFIE, dev->io_base + SMI_CR2);
386
387 ret = wait_event_interruptible_timeout(dev->cmd_complete,
388 dev->status & TFF, SMI_CMD_TIMEOUT);
389
390 /* restore the ctrl regs state */
391 writel(ctrlreg1, dev->io_base + SMI_CR1);
392 writel(0, dev->io_base + SMI_CR2);
393
394 if (ret <= 0) {
395 ret = -EIO;
396 dev_err(&dev->pdev->dev,
397 "smi controller failed on write enable\n");
398 } else {
399 /* check whether write mode status is set for required bank */
400 if (dev->status & (1 << (bank + WM_SHIFT)))
401 ret = 0;
402 else {
403 dev_err(&dev->pdev->dev, "couldn't enable write\n");
404 ret = -EIO;
405 }
406 }
407
408 mutex_unlock(&dev->lock);
409 return ret;
410}
411
412static inline u32
413get_sector_erase_cmd(struct spear_snor_flash *flash, u32 offset)
414{
415 u32 cmd;
416 u8 *x = (u8 *)&cmd;
417
418 x[0] = flash->erase_cmd;
419 x[1] = offset >> 16;
420 x[2] = offset >> 8;
421 x[3] = offset;
422
423 return cmd;
424}
425
426/**
427 * spear_smi_erase_sector - erase one sector of flash
428 * @dev: structure of SMI information
429 * @command: erase command to be send
430 * @bank: bank to which this command needs to be send
431 * @bytes: size of command
432 *
433 * Erase one sector of flash memory at offset ``offset'' which is any
434 * address within the sector which should be erased.
435 * Returns 0 if successful, non-zero otherwise.
436 */
437static int spear_smi_erase_sector(struct spear_smi *dev,
438 u32 bank, u32 command, u32 bytes)
439{
440 u32 ctrlreg1 = 0;
441 int ret;
442
443 ret = spear_smi_wait_till_ready(dev, bank, SMI_MAX_TIME_OUT);
444 if (ret)
445 return ret;
446
447 ret = spear_smi_write_enable(dev, bank);
448 if (ret)
449 return ret;
450
451 mutex_lock(&dev->lock);
452
453 ctrlreg1 = readl(dev->io_base + SMI_CR1);
454 writel((ctrlreg1 | SW_MODE) & ~WB_MODE, dev->io_base + SMI_CR1);
455
456 /* send command in sw mode */
457 writel(command, dev->io_base + SMI_TR);
458
459 writel((bank << BANK_SHIFT) | SEND | TFIE | (bytes << TX_LEN_SHIFT),
460 dev->io_base + SMI_CR2);
461
462 ret = wait_event_interruptible_timeout(dev->cmd_complete,
463 dev->status & TFF, SMI_CMD_TIMEOUT);
464
465 if (ret <= 0) {
466 ret = -EIO;
467 dev_err(&dev->pdev->dev, "sector erase failed\n");
468 } else
469 ret = 0; /* success */
470
471 /* restore ctrl regs */
472 writel(ctrlreg1, dev->io_base + SMI_CR1);
473 writel(0, dev->io_base + SMI_CR2);
474
475 mutex_unlock(&dev->lock);
476 return ret;
477}
478
479/**
480 * spear_mtd_erase - perform flash erase operation as requested by user
481 * @mtd: Provides the memory characteristics
482 * @e_info: Provides the erase information
483 *
484 * Erase an address range on the flash chip. The address range may extend
485 * one or more erase sectors. Return an error is there is a problem erasing.
486 */
487static int spear_mtd_erase(struct mtd_info *mtd, struct erase_info *e_info)
488{
489 struct spear_snor_flash *flash = get_flash_data(mtd);
490 struct spear_smi *dev = mtd->priv;
491 u32 addr, command, bank;
492 int len, ret;
493
494 if (!flash || !dev)
495 return -ENODEV;
496
497 bank = flash->bank;
498 if (bank > dev->num_flashes - 1) {
499 dev_err(&dev->pdev->dev, "Invalid Bank Num");
500 return -EINVAL;
501 }
502
503 addr = e_info->addr;
504 len = e_info->len;
505
506 mutex_lock(&flash->lock);
507
508 /* now erase sectors in loop */
509 while (len) {
510 command = get_sector_erase_cmd(flash, addr);
511 /* preparing the command for flash */
512 ret = spear_smi_erase_sector(dev, bank, command, 4);
513 if (ret) {
514 e_info->state = MTD_ERASE_FAILED;
515 mutex_unlock(&flash->lock);
516 return ret;
517 }
518 addr += mtd->erasesize;
519 len -= mtd->erasesize;
520 }
521
522 mutex_unlock(&flash->lock);
523 e_info->state = MTD_ERASE_DONE;
524 mtd_erase_callback(e_info);
525
526 return 0;
527}
528
529/**
530 * spear_mtd_read - performs flash read operation as requested by the user
531 * @mtd: MTD information of the memory bank
532 * @from: Address from which to start read
533 * @len: Number of bytes to be read
534 * @retlen: Fills the Number of bytes actually read
535 * @buf: Fills this after reading
536 *
537 * Read an address range from the flash chip. The address range
538 * may be any size provided it is within the physical boundaries.
539 * Returns 0 on success, non zero otherwise
540 */
541static int spear_mtd_read(struct mtd_info *mtd, loff_t from, size_t len,
542 size_t *retlen, u8 *buf)
543{
544 struct spear_snor_flash *flash = get_flash_data(mtd);
545 struct spear_smi *dev = mtd->priv;
546 void *src;
547 u32 ctrlreg1, val;
548 int ret;
549
550 if (!flash || !dev)
551 return -ENODEV;
552
553 if (flash->bank > dev->num_flashes - 1) {
554 dev_err(&dev->pdev->dev, "Invalid Bank Num");
555 return -EINVAL;
556 }
557
558 /* select address as per bank number */
559 src = flash->base_addr + from;
560
561 mutex_lock(&flash->lock);
562
563 /* wait till previous write/erase is done. */
564 ret = spear_smi_wait_till_ready(dev, flash->bank, SMI_MAX_TIME_OUT);
565 if (ret) {
566 mutex_unlock(&flash->lock);
567 return ret;
568 }
569
570 mutex_lock(&dev->lock);
571 /* put smi in hw mode not wbt mode */
572 ctrlreg1 = val = readl(dev->io_base + SMI_CR1);
573 val &= ~(SW_MODE | WB_MODE);
574 if (flash->fast_mode)
575 val |= FAST_MODE;
576
577 writel(val, dev->io_base + SMI_CR1);
578
579 memcpy_fromio(buf, (u8 *)src, len);
580
581 /* restore ctrl reg1 */
582 writel(ctrlreg1, dev->io_base + SMI_CR1);
583 mutex_unlock(&dev->lock);
584
585 *retlen = len;
586 mutex_unlock(&flash->lock);
587
588 return 0;
589}
590
591static inline int spear_smi_cpy_toio(struct spear_smi *dev, u32 bank,
592 void *dest, const void *src, size_t len)
593{
594 int ret;
595 u32 ctrlreg1;
596
597 /* wait until finished previous write command. */
598 ret = spear_smi_wait_till_ready(dev, bank, SMI_MAX_TIME_OUT);
599 if (ret)
600 return ret;
601
602 /* put smi in write enable */
603 ret = spear_smi_write_enable(dev, bank);
604 if (ret)
605 return ret;
606
607 /* put smi in hw, write burst mode */
608 mutex_lock(&dev->lock);
609
610 ctrlreg1 = readl(dev->io_base + SMI_CR1);
611 writel((ctrlreg1 | WB_MODE) & ~SW_MODE, dev->io_base + SMI_CR1);
612
613 memcpy_toio(dest, src, len);
614
615 writel(ctrlreg1, dev->io_base + SMI_CR1);
616
617 mutex_unlock(&dev->lock);
618 return 0;
619}
620
621/**
622 * spear_mtd_write - performs write operation as requested by the user.
623 * @mtd: MTD information of the memory bank.
624 * @to: Address to write.
625 * @len: Number of bytes to be written.
626 * @retlen: Number of bytes actually wrote.
627 * @buf: Buffer from which the data to be taken.
628 *
629 * Write an address range to the flash chip. Data must be written in
630 * flash_page_size chunks. The address range may be any size provided
631 * it is within the physical boundaries.
632 * Returns 0 on success, non zero otherwise
633 */
634static int spear_mtd_write(struct mtd_info *mtd, loff_t to, size_t len,
635 size_t *retlen, const u8 *buf)
636{
637 struct spear_snor_flash *flash = get_flash_data(mtd);
638 struct spear_smi *dev = mtd->priv;
639 void *dest;
640 u32 page_offset, page_size;
641 int ret;
642
643 if (!flash || !dev)
644 return -ENODEV;
645
646 if (flash->bank > dev->num_flashes - 1) {
647 dev_err(&dev->pdev->dev, "Invalid Bank Num");
648 return -EINVAL;
649 }
650
651 /* select address as per bank number */
652 dest = flash->base_addr + to;
653 mutex_lock(&flash->lock);
654
655 page_offset = (u32)to % flash->page_size;
656
657 /* do if all the bytes fit onto one page */
658 if (page_offset + len <= flash->page_size) {
659 ret = spear_smi_cpy_toio(dev, flash->bank, dest, buf, len);
660 if (!ret)
661 *retlen += len;
662 } else {
663 u32 i;
664
665 /* the size of data remaining on the first page */
666 page_size = flash->page_size - page_offset;
667
668 ret = spear_smi_cpy_toio(dev, flash->bank, dest, buf,
669 page_size);
670 if (ret)
671 goto err_write;
672 else
673 *retlen += page_size;
674
675 /* write everything in pagesize chunks */
676 for (i = page_size; i < len; i += page_size) {
677 page_size = len - i;
678 if (page_size > flash->page_size)
679 page_size = flash->page_size;
680
681 ret = spear_smi_cpy_toio(dev, flash->bank, dest + i,
682 buf + i, page_size);
683 if (ret)
684 break;
685 else
686 *retlen += page_size;
687 }
688 }
689
690err_write:
691 mutex_unlock(&flash->lock);
692
693 return ret;
694}
695
696/**
697 * spear_smi_probe_flash - Detects the NOR Flash chip.
698 * @dev: structure of SMI information.
699 * @bank: bank on which flash must be probed
700 *
701 * This routine will check whether there exists a flash chip on a given memory
702 * bank ID.
703 * Return index of the probed flash in flash devices structure
704 */
705static int spear_smi_probe_flash(struct spear_smi *dev, u32 bank)
706{
707 int ret;
708 u32 val = 0;
709
710 ret = spear_smi_wait_till_ready(dev, bank, SMI_PROBE_TIMEOUT);
711 if (ret)
712 return ret;
713
714 mutex_lock(&dev->lock);
715
716 dev->status = 0; /* Will be set in interrupt handler */
717 /* put smi in sw mode */
718 val = readl(dev->io_base + SMI_CR1);
719 writel(val | SW_MODE, dev->io_base + SMI_CR1);
720
721 /* send readid command in sw mode */
722 writel(OPCODE_RDID, dev->io_base + SMI_TR);
723
724 val = (bank << BANK_SHIFT) | SEND | (1 << TX_LEN_SHIFT) |
725 (3 << RX_LEN_SHIFT) | TFIE;
726 writel(val, dev->io_base + SMI_CR2);
727
728 /* wait for TFF */
729 ret = wait_event_interruptible_timeout(dev->cmd_complete,
730 dev->status & TFF, SMI_CMD_TIMEOUT);
731 if (ret <= 0) {
732 ret = -ENODEV;
733 goto err_probe;
734 }
735
736 /* get memory chip id */
737 val = readl(dev->io_base + SMI_RR);
738 val &= 0x00ffffff;
739 ret = get_flash_index(val);
740
741err_probe:
742 /* clear sw mode */
743 val = readl(dev->io_base + SMI_CR1);
744 writel(val & ~SW_MODE, dev->io_base + SMI_CR1);
745
746 mutex_unlock(&dev->lock);
747 return ret;
748}
749
750
751#ifdef CONFIG_OF
752static int __devinit spear_smi_probe_config_dt(struct platform_device *pdev,
753 struct device_node *np)
754{
755 struct spear_smi_plat_data *pdata = dev_get_platdata(&pdev->dev);
756 struct device_node *pp = NULL;
757 const __be32 *addr;
758 u32 val;
759 int len;
760 int i = 0;
761
762 if (!np)
763 return -ENODEV;
764
765 of_property_read_u32(np, "clock-rate", &val);
766 pdata->clk_rate = val;
767
768 pdata->board_flash_info = devm_kzalloc(&pdev->dev,
769 sizeof(*pdata->board_flash_info),
770 GFP_KERNEL);
771
772 /* Fill structs for each subnode (flash device) */
773 while ((pp = of_get_next_child(np, pp))) {
774 struct spear_smi_flash_info *flash_info;
775
776 flash_info = &pdata->board_flash_info[i];
777 pdata->np[i] = pp;
778
779 /* Read base-addr and size from DT */
780 addr = of_get_property(pp, "reg", &len);
781 pdata->board_flash_info->mem_base = be32_to_cpup(&addr[0]);
782 pdata->board_flash_info->size = be32_to_cpup(&addr[1]);
783
784 if (of_get_property(pp, "st,smi-fast-mode", NULL))
785 pdata->board_flash_info->fast_mode = 1;
786
787 i++;
788 }
789
790 pdata->num_flashes = i;
791
792 return 0;
793}
794#else
795static int __devinit spear_smi_probe_config_dt(struct platform_device *pdev,
796 struct device_node *np)
797{
798 return -ENOSYS;
799}
800#endif
801
802static int spear_smi_setup_banks(struct platform_device *pdev,
803 u32 bank, struct device_node *np)
804{
805 struct spear_smi *dev = platform_get_drvdata(pdev);
806 struct mtd_part_parser_data ppdata = {};
807 struct spear_smi_flash_info *flash_info;
808 struct spear_smi_plat_data *pdata;
809 struct spear_snor_flash *flash;
810 struct mtd_partition *parts = NULL;
811 int count = 0;
812 int flash_index;
813 int ret = 0;
814
815 pdata = dev_get_platdata(&pdev->dev);
816 if (bank > pdata->num_flashes - 1)
817 return -EINVAL;
818
819 flash_info = &pdata->board_flash_info[bank];
820 if (!flash_info)
821 return -ENODEV;
822
823 flash = kzalloc(sizeof(*flash), GFP_ATOMIC);
824 if (!flash)
825 return -ENOMEM;
826 flash->bank = bank;
827 flash->fast_mode = flash_info->fast_mode ? 1 : 0;
828 mutex_init(&flash->lock);
829
830 /* verify whether nor flash is really present on board */
831 flash_index = spear_smi_probe_flash(dev, bank);
832 if (flash_index < 0) {
833 dev_info(&dev->pdev->dev, "smi-nor%d not found\n", bank);
834 ret = flash_index;
835 goto err_probe;
836 }
837 /* map the memory for nor flash chip */
838 flash->base_addr = ioremap(flash_info->mem_base, flash_info->size);
839 if (!flash->base_addr) {
840 ret = -EIO;
841 goto err_probe;
842 }
843
844 dev->flash[bank] = flash;
845 flash->mtd.priv = dev;
846
847 if (flash_info->name)
848 flash->mtd.name = flash_info->name;
849 else
850 flash->mtd.name = flash_devices[flash_index].name;
851
852 flash->mtd.type = MTD_NORFLASH;
853 flash->mtd.writesize = 1;
854 flash->mtd.flags = MTD_CAP_NORFLASH;
855 flash->mtd.size = flash_info->size;
856 flash->mtd.erasesize = flash_devices[flash_index].sectorsize;
857 flash->page_size = flash_devices[flash_index].pagesize;
858 flash->mtd.writebufsize = flash->page_size;
859 flash->erase_cmd = flash_devices[flash_index].erase_cmd;
860 flash->mtd._erase = spear_mtd_erase;
861 flash->mtd._read = spear_mtd_read;
862 flash->mtd._write = spear_mtd_write;
863 flash->dev_id = flash_devices[flash_index].device_id;
864
865 dev_info(&dev->pdev->dev, "mtd .name=%s .size=%llx(%lluM)\n",
866 flash->mtd.name, flash->mtd.size,
867 flash->mtd.size / (1024 * 1024));
868
869 dev_info(&dev->pdev->dev, ".erasesize = 0x%x(%uK)\n",
870 flash->mtd.erasesize, flash->mtd.erasesize / 1024);
871
872#ifndef CONFIG_OF
873 if (flash_info->partitions) {
874 parts = flash_info->partitions;
875 count = flash_info->nr_partitions;
876 }
877#endif
878 ppdata.of_node = np;
879
880 ret = mtd_device_parse_register(&flash->mtd, NULL, &ppdata, parts,
881 count);
882 if (ret) {
883 dev_err(&dev->pdev->dev, "Err MTD partition=%d\n", ret);
884 goto err_map;
885 }
886
887 return 0;
888
889err_map:
890 iounmap(flash->base_addr);
891
892err_probe:
893 kfree(flash);
894 return ret;
895}
896
897/**
898 * spear_smi_probe - Entry routine
899 * @pdev: platform device structure
900 *
901 * This is the first routine which gets invoked during booting and does all
902 * initialization/allocation work. The routine looks for available memory banks,
903 * and do proper init for any found one.
904 * Returns 0 on success, non zero otherwise
905 */
906static int __devinit spear_smi_probe(struct platform_device *pdev)
907{
908 struct device_node *np = pdev->dev.of_node;
909 struct spear_smi_plat_data *pdata = NULL;
910 struct spear_smi *dev;
911 struct resource *smi_base;
912 int irq, ret = 0;
913 int i;
914
915 if (np) {
916 pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
917 if (!pdata) {
918 pr_err("%s: ERROR: no memory", __func__);
919 ret = -ENOMEM;
920 goto err;
921 }
922 pdev->dev.platform_data = pdata;
923 ret = spear_smi_probe_config_dt(pdev, np);
924 if (ret) {
925 ret = -ENODEV;
926 dev_err(&pdev->dev, "no platform data\n");
927 goto err;
928 }
929 } else {
930 pdata = dev_get_platdata(&pdev->dev);
931 if (pdata < 0) {
932 ret = -ENODEV;
933 dev_err(&pdev->dev, "no platform data\n");
934 goto err;
935 }
936 }
937
938 smi_base = platform_get_resource(pdev, IORESOURCE_MEM, 0);
939 if (!smi_base) {
940 ret = -ENODEV;
941 dev_err(&pdev->dev, "invalid smi base address\n");
942 goto err;
943 }
944
945 irq = platform_get_irq(pdev, 0);
946 if (irq < 0) {
947 ret = -ENODEV;
948 dev_err(&pdev->dev, "invalid smi irq\n");
949 goto err;
950 }
951
952 dev = kzalloc(sizeof(*dev), GFP_ATOMIC);
953 if (!dev) {
954 ret = -ENOMEM;
955 dev_err(&pdev->dev, "mem alloc fail\n");
956 goto err;
957 }
958
959 smi_base = request_mem_region(smi_base->start, resource_size(smi_base),
960 pdev->name);
961 if (!smi_base) {
962 ret = -EBUSY;
963 dev_err(&pdev->dev, "request mem region fail\n");
964 goto err_mem;
965 }
966
967 dev->io_base = ioremap(smi_base->start, resource_size(smi_base));
968 if (!dev->io_base) {
969 ret = -EIO;
970 dev_err(&pdev->dev, "ioremap fail\n");
971 goto err_ioremap;
972 }
973
974 dev->pdev = pdev;
975 dev->clk_rate = pdata->clk_rate;
976
977 if (dev->clk_rate < 0 || dev->clk_rate > SMI_MAX_CLOCK_FREQ)
978 dev->clk_rate = SMI_MAX_CLOCK_FREQ;
979
980 dev->num_flashes = pdata->num_flashes;
981
982 if (dev->num_flashes > MAX_NUM_FLASH_CHIP) {
983 dev_err(&pdev->dev, "exceeding max number of flashes\n");
984 dev->num_flashes = MAX_NUM_FLASH_CHIP;
985 }
986
987 dev->clk = clk_get(&pdev->dev, NULL);
988 if (IS_ERR(dev->clk)) {
989 ret = PTR_ERR(dev->clk);
990 goto err_clk;
991 }
992
993 ret = clk_enable(dev->clk);
994 if (ret)
995 goto err_clk_enable;
996
997 ret = request_irq(irq, spear_smi_int_handler, 0, pdev->name, dev);
998 if (ret) {
999 dev_err(&dev->pdev->dev, "SMI IRQ allocation failed\n");
1000 goto err_irq;
1001 }
1002
1003 mutex_init(&dev->lock);
1004 init_waitqueue_head(&dev->cmd_complete);
1005 spear_smi_hw_init(dev);
1006 platform_set_drvdata(pdev, dev);
1007
1008 /* loop for each serial nor-flash which is connected to smi */
1009 for (i = 0; i < dev->num_flashes; i++) {
1010 ret = spear_smi_setup_banks(pdev, i, pdata->np[i]);
1011 if (ret) {
1012 dev_err(&dev->pdev->dev, "bank setup failed\n");
1013 goto err_bank_setup;
1014 }
1015 }
1016
1017 return 0;
1018
1019err_bank_setup:
1020 free_irq(irq, dev);
1021 platform_set_drvdata(pdev, NULL);
1022err_irq:
1023 clk_disable(dev->clk);
1024err_clk_enable:
1025 clk_put(dev->clk);
1026err_clk:
1027 iounmap(dev->io_base);
1028err_ioremap:
1029 release_mem_region(smi_base->start, resource_size(smi_base));
1030err_mem:
1031 kfree(dev);
1032err:
1033 return ret;
1034}
1035
1036/**
1037 * spear_smi_remove - Exit routine
1038 * @pdev: platform device structure
1039 *
1040 * free all allocations and delete the partitions.
1041 */
1042static int __devexit spear_smi_remove(struct platform_device *pdev)
1043{
1044 struct spear_smi *dev;
1045 struct spear_smi_plat_data *pdata;
1046 struct spear_snor_flash *flash;
1047 struct resource *smi_base;
1048 int ret;
1049 int i, irq;
1050
1051 dev = platform_get_drvdata(pdev);
1052 if (!dev) {
1053 dev_err(&pdev->dev, "dev is null\n");
1054 return -ENODEV;
1055 }
1056
1057 pdata = dev_get_platdata(&pdev->dev);
1058
1059 /* clean up for all nor flash */
1060 for (i = 0; i < dev->num_flashes; i++) {
1061 flash = dev->flash[i];
1062 if (!flash)
1063 continue;
1064
1065 /* clean up mtd stuff */
1066 ret = mtd_device_unregister(&flash->mtd);
1067 if (ret)
1068 dev_err(&pdev->dev, "error removing mtd\n");
1069
1070 iounmap(flash->base_addr);
1071 kfree(flash);
1072 }
1073
1074 irq = platform_get_irq(pdev, 0);
1075 free_irq(irq, dev);
1076
1077 clk_disable(dev->clk);
1078 clk_put(dev->clk);
1079 iounmap(dev->io_base);
1080 kfree(dev);
1081
1082 smi_base = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1083 release_mem_region(smi_base->start, resource_size(smi_base));
1084 platform_set_drvdata(pdev, NULL);
1085
1086 return 0;
1087}
1088
1089int spear_smi_suspend(struct platform_device *pdev, pm_message_t state)
1090{
1091 struct spear_smi *dev = platform_get_drvdata(pdev);
1092
1093 if (dev && dev->clk)
1094 clk_disable(dev->clk);
1095
1096 return 0;
1097}
1098
1099int spear_smi_resume(struct platform_device *pdev)
1100{
1101 struct spear_smi *dev = platform_get_drvdata(pdev);
1102 int ret = -EPERM;
1103
1104 if (dev && dev->clk)
1105 ret = clk_enable(dev->clk);
1106
1107 if (!ret)
1108 spear_smi_hw_init(dev);
1109 return ret;
1110}
1111
1112#ifdef CONFIG_OF
1113static const struct of_device_id spear_smi_id_table[] = {
1114 { .compatible = "st,spear600-smi" },
1115 {}
1116};
1117MODULE_DEVICE_TABLE(of, spear_smi_id_table);
1118#endif
1119
1120static struct platform_driver spear_smi_driver = {
1121 .driver = {
1122 .name = "smi",
1123 .bus = &platform_bus_type,
1124 .owner = THIS_MODULE,
1125 .of_match_table = of_match_ptr(spear_smi_id_table),
1126 },
1127 .probe = spear_smi_probe,
1128 .remove = __devexit_p(spear_smi_remove),
1129 .suspend = spear_smi_suspend,
1130 .resume = spear_smi_resume,
1131};
1132
1133static int spear_smi_init(void)
1134{
1135 return platform_driver_register(&spear_smi_driver);
1136}
1137module_init(spear_smi_init);
1138
1139static void spear_smi_exit(void)
1140{
1141 platform_driver_unregister(&spear_smi_driver);
1142}
1143module_exit(spear_smi_exit);
1144
1145MODULE_LICENSE("GPL");
1146MODULE_AUTHOR("Ashish Priyadarshi, Shiraz Hashim <shiraz.hashim@st.com>");
1147MODULE_DESCRIPTION("MTD SMI driver for serial nor flash chips");
diff --git a/drivers/mtd/devices/sst25l.c b/drivers/mtd/devices/sst25l.c
index 5fc198350b94..ab8a2f4c8d60 100644
--- a/drivers/mtd/devices/sst25l.c
+++ b/drivers/mtd/devices/sst25l.c
@@ -175,9 +175,6 @@ static int sst25l_erase(struct mtd_info *mtd, struct erase_info *instr)
175 int err; 175 int err;
176 176
177 /* Sanity checks */ 177 /* Sanity checks */
178 if (instr->addr + instr->len > flash->mtd.size)
179 return -EINVAL;
180
181 if ((uint32_t)instr->len % mtd->erasesize) 178 if ((uint32_t)instr->len % mtd->erasesize)
182 return -EINVAL; 179 return -EINVAL;
183 180
@@ -223,16 +220,6 @@ static int sst25l_read(struct mtd_info *mtd, loff_t from, size_t len,
223 unsigned char command[4]; 220 unsigned char command[4];
224 int ret; 221 int ret;
225 222
226 /* Sanity checking */
227 if (len == 0)
228 return 0;
229
230 if (from + len > flash->mtd.size)
231 return -EINVAL;
232
233 if (retlen)
234 *retlen = 0;
235
236 spi_message_init(&message); 223 spi_message_init(&message);
237 memset(&transfer, 0, sizeof(transfer)); 224 memset(&transfer, 0, sizeof(transfer));
238 225
@@ -274,13 +261,6 @@ static int sst25l_write(struct mtd_info *mtd, loff_t to, size_t len,
274 int i, j, ret, bytes, copied = 0; 261 int i, j, ret, bytes, copied = 0;
275 unsigned char command[5]; 262 unsigned char command[5];
276 263
277 /* Sanity checks */
278 if (!len)
279 return 0;
280
281 if (to + len > flash->mtd.size)
282 return -EINVAL;
283
284 if ((uint32_t)to % mtd->writesize) 264 if ((uint32_t)to % mtd->writesize)
285 return -EINVAL; 265 return -EINVAL;
286 266
@@ -402,10 +382,11 @@ static int __devinit sst25l_probe(struct spi_device *spi)
402 flash->mtd.flags = MTD_CAP_NORFLASH; 382 flash->mtd.flags = MTD_CAP_NORFLASH;
403 flash->mtd.erasesize = flash_info->erase_size; 383 flash->mtd.erasesize = flash_info->erase_size;
404 flash->mtd.writesize = flash_info->page_size; 384 flash->mtd.writesize = flash_info->page_size;
385 flash->mtd.writebufsize = flash_info->page_size;
405 flash->mtd.size = flash_info->page_size * flash_info->nr_pages; 386 flash->mtd.size = flash_info->page_size * flash_info->nr_pages;
406 flash->mtd.erase = sst25l_erase; 387 flash->mtd._erase = sst25l_erase;
407 flash->mtd.read = sst25l_read; 388 flash->mtd._read = sst25l_read;
408 flash->mtd.write = sst25l_write; 389 flash->mtd._write = sst25l_write;
409 390
410 dev_info(&spi->dev, "%s (%lld KiB)\n", flash_info->name, 391 dev_info(&spi->dev, "%s (%lld KiB)\n", flash_info->name,
411 (long long)flash->mtd.size >> 10); 392 (long long)flash->mtd.size >> 10);
@@ -418,9 +399,9 @@ static int __devinit sst25l_probe(struct spi_device *spi)
418 flash->mtd.numeraseregions); 399 flash->mtd.numeraseregions);
419 400
420 401
421 ret = mtd_device_parse_register(&flash->mtd, NULL, 0, 402 ret = mtd_device_parse_register(&flash->mtd, NULL, NULL,
422 data ? data->parts : NULL, 403 data ? data->parts : NULL,
423 data ? data->nr_parts : 0); 404 data ? data->nr_parts : 0);
424 if (ret) { 405 if (ret) {
425 kfree(flash); 406 kfree(flash);
426 dev_set_drvdata(&spi->dev, NULL); 407 dev_set_drvdata(&spi->dev, NULL);
@@ -450,18 +431,7 @@ static struct spi_driver sst25l_driver = {
450 .remove = __devexit_p(sst25l_remove), 431 .remove = __devexit_p(sst25l_remove),
451}; 432};
452 433
453static int __init sst25l_init(void) 434module_spi_driver(sst25l_driver);
454{
455 return spi_register_driver(&sst25l_driver);
456}
457
458static void __exit sst25l_exit(void)
459{
460 spi_unregister_driver(&sst25l_driver);
461}
462
463module_init(sst25l_init);
464module_exit(sst25l_exit);
465 435
466MODULE_DESCRIPTION("MTD SPI driver for SST25L Flash chips"); 436MODULE_DESCRIPTION("MTD SPI driver for SST25L Flash chips");
467MODULE_AUTHOR("Andre Renaud <andre@bluewatersys.com>, " 437MODULE_AUTHOR("Andre Renaud <andre@bluewatersys.com>, "
diff --git a/drivers/mtd/inftlcore.c b/drivers/mtd/inftlcore.c
index 28646c95cfb8..3af351484098 100644
--- a/drivers/mtd/inftlcore.c
+++ b/drivers/mtd/inftlcore.c
@@ -56,7 +56,7 @@ static void inftl_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
56 if (memcmp(mtd->name, "DiskOnChip", 10)) 56 if (memcmp(mtd->name, "DiskOnChip", 10))
57 return; 57 return;
58 58
59 if (!mtd->block_isbad) { 59 if (!mtd->_block_isbad) {
60 printk(KERN_ERR 60 printk(KERN_ERR
61"INFTL no longer supports the old DiskOnChip drivers loaded via docprobe.\n" 61"INFTL no longer supports the old DiskOnChip drivers loaded via docprobe.\n"
62"Please use the new diskonchip driver under the NAND subsystem.\n"); 62"Please use the new diskonchip driver under the NAND subsystem.\n");
diff --git a/drivers/mtd/lpddr/lpddr_cmds.c b/drivers/mtd/lpddr/lpddr_cmds.c
index 536bbceaeaad..d3cfe26beeaa 100644
--- a/drivers/mtd/lpddr/lpddr_cmds.c
+++ b/drivers/mtd/lpddr/lpddr_cmds.c
@@ -40,7 +40,7 @@ static int lpddr_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
40static int lpddr_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len); 40static int lpddr_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
41static int lpddr_point(struct mtd_info *mtd, loff_t adr, size_t len, 41static int lpddr_point(struct mtd_info *mtd, loff_t adr, size_t len,
42 size_t *retlen, void **mtdbuf, resource_size_t *phys); 42 size_t *retlen, void **mtdbuf, resource_size_t *phys);
43static void lpddr_unpoint(struct mtd_info *mtd, loff_t adr, size_t len); 43static int lpddr_unpoint(struct mtd_info *mtd, loff_t adr, size_t len);
44static int get_chip(struct map_info *map, struct flchip *chip, int mode); 44static int get_chip(struct map_info *map, struct flchip *chip, int mode);
45static int chip_ready(struct map_info *map, struct flchip *chip, int mode); 45static int chip_ready(struct map_info *map, struct flchip *chip, int mode);
46static void put_chip(struct map_info *map, struct flchip *chip); 46static void put_chip(struct map_info *map, struct flchip *chip);
@@ -63,18 +63,18 @@ struct mtd_info *lpddr_cmdset(struct map_info *map)
63 mtd->type = MTD_NORFLASH; 63 mtd->type = MTD_NORFLASH;
64 64
65 /* Fill in the default mtd operations */ 65 /* Fill in the default mtd operations */
66 mtd->read = lpddr_read; 66 mtd->_read = lpddr_read;
67 mtd->type = MTD_NORFLASH; 67 mtd->type = MTD_NORFLASH;
68 mtd->flags = MTD_CAP_NORFLASH; 68 mtd->flags = MTD_CAP_NORFLASH;
69 mtd->flags &= ~MTD_BIT_WRITEABLE; 69 mtd->flags &= ~MTD_BIT_WRITEABLE;
70 mtd->erase = lpddr_erase; 70 mtd->_erase = lpddr_erase;
71 mtd->write = lpddr_write_buffers; 71 mtd->_write = lpddr_write_buffers;
72 mtd->writev = lpddr_writev; 72 mtd->_writev = lpddr_writev;
73 mtd->lock = lpddr_lock; 73 mtd->_lock = lpddr_lock;
74 mtd->unlock = lpddr_unlock; 74 mtd->_unlock = lpddr_unlock;
75 if (map_is_linear(map)) { 75 if (map_is_linear(map)) {
76 mtd->point = lpddr_point; 76 mtd->_point = lpddr_point;
77 mtd->unpoint = lpddr_unpoint; 77 mtd->_unpoint = lpddr_unpoint;
78 } 78 }
79 mtd->size = 1 << lpddr->qinfo->DevSizeShift; 79 mtd->size = 1 << lpddr->qinfo->DevSizeShift;
80 mtd->erasesize = 1 << lpddr->qinfo->UniformBlockSizeShift; 80 mtd->erasesize = 1 << lpddr->qinfo->UniformBlockSizeShift;
@@ -530,14 +530,12 @@ static int lpddr_point(struct mtd_info *mtd, loff_t adr, size_t len,
530 struct flchip *chip = &lpddr->chips[chipnum]; 530 struct flchip *chip = &lpddr->chips[chipnum];
531 int ret = 0; 531 int ret = 0;
532 532
533 if (!map->virt || (adr + len > mtd->size)) 533 if (!map->virt)
534 return -EINVAL; 534 return -EINVAL;
535 535
536 /* ofs: offset within the first chip that the first read should start */ 536 /* ofs: offset within the first chip that the first read should start */
537 ofs = adr - (chipnum << lpddr->chipshift); 537 ofs = adr - (chipnum << lpddr->chipshift);
538
539 *mtdbuf = (void *)map->virt + chip->start + ofs; 538 *mtdbuf = (void *)map->virt + chip->start + ofs;
540 *retlen = 0;
541 539
542 while (len) { 540 while (len) {
543 unsigned long thislen; 541 unsigned long thislen;
@@ -575,11 +573,11 @@ static int lpddr_point(struct mtd_info *mtd, loff_t adr, size_t len,
575 return 0; 573 return 0;
576} 574}
577 575
578static void lpddr_unpoint (struct mtd_info *mtd, loff_t adr, size_t len) 576static int lpddr_unpoint (struct mtd_info *mtd, loff_t adr, size_t len)
579{ 577{
580 struct map_info *map = mtd->priv; 578 struct map_info *map = mtd->priv;
581 struct lpddr_private *lpddr = map->fldrv_priv; 579 struct lpddr_private *lpddr = map->fldrv_priv;
582 int chipnum = adr >> lpddr->chipshift; 580 int chipnum = adr >> lpddr->chipshift, err = 0;
583 unsigned long ofs; 581 unsigned long ofs;
584 582
585 /* ofs: offset within the first chip that the first read should start */ 583 /* ofs: offset within the first chip that the first read should start */
@@ -603,9 +601,11 @@ static void lpddr_unpoint (struct mtd_info *mtd, loff_t adr, size_t len)
603 chip->ref_point_counter--; 601 chip->ref_point_counter--;
604 if (chip->ref_point_counter == 0) 602 if (chip->ref_point_counter == 0)
605 chip->state = FL_READY; 603 chip->state = FL_READY;
606 } else 604 } else {
607 printk(KERN_WARNING "%s: Warning: unpoint called on non" 605 printk(KERN_WARNING "%s: Warning: unpoint called on non"
608 "pointed region\n", map->name); 606 "pointed region\n", map->name);
607 err = -EINVAL;
608 }
609 609
610 put_chip(map, chip); 610 put_chip(map, chip);
611 mutex_unlock(&chip->mutex); 611 mutex_unlock(&chip->mutex);
@@ -614,6 +614,8 @@ static void lpddr_unpoint (struct mtd_info *mtd, loff_t adr, size_t len)
614 ofs = 0; 614 ofs = 0;
615 chipnum++; 615 chipnum++;
616 } 616 }
617
618 return err;
617} 619}
618 620
619static int lpddr_write_buffers(struct mtd_info *mtd, loff_t to, size_t len, 621static int lpddr_write_buffers(struct mtd_info *mtd, loff_t to, size_t len,
@@ -637,13 +639,11 @@ static int lpddr_writev(struct mtd_info *mtd, const struct kvec *vecs,
637 int chipnum; 639 int chipnum;
638 unsigned long ofs, vec_seek, i; 640 unsigned long ofs, vec_seek, i;
639 int wbufsize = 1 << lpddr->qinfo->BufSizeShift; 641 int wbufsize = 1 << lpddr->qinfo->BufSizeShift;
640
641 size_t len = 0; 642 size_t len = 0;
642 643
643 for (i = 0; i < count; i++) 644 for (i = 0; i < count; i++)
644 len += vecs[i].iov_len; 645 len += vecs[i].iov_len;
645 646
646 *retlen = 0;
647 if (!len) 647 if (!len)
648 return 0; 648 return 0;
649 649
@@ -688,9 +688,6 @@ static int lpddr_erase(struct mtd_info *mtd, struct erase_info *instr)
688 ofs = instr->addr; 688 ofs = instr->addr;
689 len = instr->len; 689 len = instr->len;
690 690
691 if (ofs > mtd->size || (len + ofs) > mtd->size)
692 return -EINVAL;
693
694 while (len > 0) { 691 while (len > 0) {
695 ret = do_erase_oneblock(mtd, ofs); 692 ret = do_erase_oneblock(mtd, ofs);
696 if (ret) 693 if (ret)
diff --git a/drivers/mtd/maps/bfin-async-flash.c b/drivers/mtd/maps/bfin-async-flash.c
index 650126c361f1..ef5cde84a8b3 100644
--- a/drivers/mtd/maps/bfin-async-flash.c
+++ b/drivers/mtd/maps/bfin-async-flash.c
@@ -164,8 +164,8 @@ static int __devinit bfin_flash_probe(struct platform_device *pdev)
164 return -ENXIO; 164 return -ENXIO;
165 } 165 }
166 166
167 mtd_device_parse_register(state->mtd, part_probe_types, 0, 167 mtd_device_parse_register(state->mtd, part_probe_types, NULL,
168 pdata->parts, pdata->nr_parts); 168 pdata->parts, pdata->nr_parts);
169 169
170 platform_set_drvdata(pdev, state); 170 platform_set_drvdata(pdev, state);
171 171
diff --git a/drivers/mtd/maps/dc21285.c b/drivers/mtd/maps/dc21285.c
index f43b365b848c..080f06053bd4 100644
--- a/drivers/mtd/maps/dc21285.c
+++ b/drivers/mtd/maps/dc21285.c
@@ -196,7 +196,7 @@ static int __init init_dc21285(void)
196 196
197 dc21285_mtd->owner = THIS_MODULE; 197 dc21285_mtd->owner = THIS_MODULE;
198 198
199 mtd_device_parse_register(dc21285_mtd, probes, 0, NULL, 0); 199 mtd_device_parse_register(dc21285_mtd, probes, NULL, NULL, 0);
200 200
201 if(machine_is_ebsa285()) { 201 if(machine_is_ebsa285()) {
202 /* 202 /*
diff --git a/drivers/mtd/maps/gpio-addr-flash.c b/drivers/mtd/maps/gpio-addr-flash.c
index 33cce895859f..e4de96ba52b3 100644
--- a/drivers/mtd/maps/gpio-addr-flash.c
+++ b/drivers/mtd/maps/gpio-addr-flash.c
@@ -252,8 +252,8 @@ static int __devinit gpio_flash_probe(struct platform_device *pdev)
252 } 252 }
253 253
254 254
255 mtd_device_parse_register(state->mtd, part_probe_types, 0, 255 mtd_device_parse_register(state->mtd, part_probe_types, NULL,
256 pdata->parts, pdata->nr_parts); 256 pdata->parts, pdata->nr_parts);
257 257
258 return 0; 258 return 0;
259} 259}
diff --git a/drivers/mtd/maps/h720x-flash.c b/drivers/mtd/maps/h720x-flash.c
index 49c14187fc66..8ed6cb4529d8 100644
--- a/drivers/mtd/maps/h720x-flash.c
+++ b/drivers/mtd/maps/h720x-flash.c
@@ -85,8 +85,8 @@ static int __init h720x_mtd_init(void)
85 if (mymtd) { 85 if (mymtd) {
86 mymtd->owner = THIS_MODULE; 86 mymtd->owner = THIS_MODULE;
87 87
88 mtd_device_parse_register(mymtd, NULL, 0, 88 mtd_device_parse_register(mymtd, NULL, NULL,
89 h720x_partitions, NUM_PARTITIONS); 89 h720x_partitions, NUM_PARTITIONS);
90 return 0; 90 return 0;
91 } 91 }
92 92
diff --git a/drivers/mtd/maps/impa7.c b/drivers/mtd/maps/impa7.c
index f47aedb24366..834a06c56f56 100644
--- a/drivers/mtd/maps/impa7.c
+++ b/drivers/mtd/maps/impa7.c
@@ -91,7 +91,7 @@ static int __init init_impa7(void)
91 if (impa7_mtd[i]) { 91 if (impa7_mtd[i]) {
92 impa7_mtd[i]->owner = THIS_MODULE; 92 impa7_mtd[i]->owner = THIS_MODULE;
93 devicesfound++; 93 devicesfound++;
94 mtd_device_parse_register(impa7_mtd[i], NULL, 0, 94 mtd_device_parse_register(impa7_mtd[i], NULL, NULL,
95 partitions, 95 partitions,
96 ARRAY_SIZE(partitions)); 96 ARRAY_SIZE(partitions));
97 } 97 }
diff --git a/drivers/mtd/maps/intel_vr_nor.c b/drivers/mtd/maps/intel_vr_nor.c
index 08c239604ee4..92e1f41634c7 100644
--- a/drivers/mtd/maps/intel_vr_nor.c
+++ b/drivers/mtd/maps/intel_vr_nor.c
@@ -72,7 +72,7 @@ static int __devinit vr_nor_init_partitions(struct vr_nor_mtd *p)
72{ 72{
73 /* register the flash bank */ 73 /* register the flash bank */
74 /* partition the flash bank */ 74 /* partition the flash bank */
75 return mtd_device_parse_register(p->info, NULL, 0, NULL, 0); 75 return mtd_device_parse_register(p->info, NULL, NULL, NULL, 0);
76} 76}
77 77
78static void __devexit vr_nor_destroy_mtd_setup(struct vr_nor_mtd *p) 78static void __devexit vr_nor_destroy_mtd_setup(struct vr_nor_mtd *p)
diff --git a/drivers/mtd/maps/ixp2000.c b/drivers/mtd/maps/ixp2000.c
index fc7d4d0d9a4e..4a41ced0f710 100644
--- a/drivers/mtd/maps/ixp2000.c
+++ b/drivers/mtd/maps/ixp2000.c
@@ -226,7 +226,7 @@ static int ixp2000_flash_probe(struct platform_device *dev)
226 } 226 }
227 info->mtd->owner = THIS_MODULE; 227 info->mtd->owner = THIS_MODULE;
228 228
229 err = mtd_device_parse_register(info->mtd, probes, 0, NULL, 0); 229 err = mtd_device_parse_register(info->mtd, probes, NULL, NULL, 0);
230 if (err) 230 if (err)
231 goto Error; 231 goto Error;
232 232
diff --git a/drivers/mtd/maps/ixp4xx.c b/drivers/mtd/maps/ixp4xx.c
index 8b5410162d70..e864fc6c58f9 100644
--- a/drivers/mtd/maps/ixp4xx.c
+++ b/drivers/mtd/maps/ixp4xx.c
@@ -182,6 +182,9 @@ static int ixp4xx_flash_probe(struct platform_device *dev)
182{ 182{
183 struct flash_platform_data *plat = dev->dev.platform_data; 183 struct flash_platform_data *plat = dev->dev.platform_data;
184 struct ixp4xx_flash_info *info; 184 struct ixp4xx_flash_info *info;
185 struct mtd_part_parser_data ppdata = {
186 .origin = dev->resource->start,
187 };
185 int err = -1; 188 int err = -1;
186 189
187 if (!plat) 190 if (!plat)
@@ -247,7 +250,7 @@ static int ixp4xx_flash_probe(struct platform_device *dev)
247 /* Use the fast version */ 250 /* Use the fast version */
248 info->map.write = ixp4xx_write16; 251 info->map.write = ixp4xx_write16;
249 252
250 err = mtd_device_parse_register(info->mtd, probes, dev->resource->start, 253 err = mtd_device_parse_register(info->mtd, probes, &ppdata,
251 plat->parts, plat->nr_parts); 254 plat->parts, plat->nr_parts);
252 if (err) { 255 if (err) {
253 printk(KERN_ERR "Could not parse partitions\n"); 256 printk(KERN_ERR "Could not parse partitions\n");
diff --git a/drivers/mtd/maps/l440gx.c b/drivers/mtd/maps/l440gx.c
index dd0360ba2412..74bd98ee635f 100644
--- a/drivers/mtd/maps/l440gx.c
+++ b/drivers/mtd/maps/l440gx.c
@@ -27,17 +27,21 @@ static struct mtd_info *mymtd;
27 27
28 28
29/* Is this really the vpp port? */ 29/* Is this really the vpp port? */
30static DEFINE_SPINLOCK(l440gx_vpp_lock);
31static int l440gx_vpp_refcnt;
30static void l440gx_set_vpp(struct map_info *map, int vpp) 32static void l440gx_set_vpp(struct map_info *map, int vpp)
31{ 33{
32 unsigned long l; 34 unsigned long flags;
33 35
34 l = inl(VPP_PORT); 36 spin_lock_irqsave(&l440gx_vpp_lock, flags);
35 if (vpp) { 37 if (vpp) {
36 l |= 1; 38 if (++l440gx_vpp_refcnt == 1) /* first nested 'on' */
39 outl(inl(VPP_PORT) | 1, VPP_PORT);
37 } else { 40 } else {
38 l &= ~1; 41 if (--l440gx_vpp_refcnt == 0) /* last nested 'off' */
42 outl(inl(VPP_PORT) & ~1, VPP_PORT);
39 } 43 }
40 outl(l, VPP_PORT); 44 spin_unlock_irqrestore(&l440gx_vpp_lock, flags);
41} 45}
42 46
43static struct map_info l440gx_map = { 47static struct map_info l440gx_map = {
diff --git a/drivers/mtd/maps/lantiq-flash.c b/drivers/mtd/maps/lantiq-flash.c
index 7b889de9477b..b5401e355745 100644
--- a/drivers/mtd/maps/lantiq-flash.c
+++ b/drivers/mtd/maps/lantiq-flash.c
@@ -45,6 +45,7 @@ struct ltq_mtd {
45}; 45};
46 46
47static char ltq_map_name[] = "ltq_nor"; 47static char ltq_map_name[] = "ltq_nor";
48static const char *ltq_probe_types[] __devinitconst = { "cmdlinepart", NULL };
48 49
49static map_word 50static map_word
50ltq_read16(struct map_info *map, unsigned long adr) 51ltq_read16(struct map_info *map, unsigned long adr)
@@ -168,8 +169,9 @@ ltq_mtd_probe(struct platform_device *pdev)
168 cfi->addr_unlock1 ^= 1; 169 cfi->addr_unlock1 ^= 1;
169 cfi->addr_unlock2 ^= 1; 170 cfi->addr_unlock2 ^= 1;
170 171
171 err = mtd_device_parse_register(ltq_mtd->mtd, NULL, 0, 172 err = mtd_device_parse_register(ltq_mtd->mtd, ltq_probe_types, NULL,
172 ltq_mtd_data->parts, ltq_mtd_data->nr_parts); 173 ltq_mtd_data->parts,
174 ltq_mtd_data->nr_parts);
173 if (err) { 175 if (err) {
174 dev_err(&pdev->dev, "failed to add partitions\n"); 176 dev_err(&pdev->dev, "failed to add partitions\n");
175 goto err_destroy; 177 goto err_destroy;
diff --git a/drivers/mtd/maps/latch-addr-flash.c b/drivers/mtd/maps/latch-addr-flash.c
index 8fed58e3a4a8..3c7ad17fca78 100644
--- a/drivers/mtd/maps/latch-addr-flash.c
+++ b/drivers/mtd/maps/latch-addr-flash.c
@@ -199,8 +199,9 @@ static int __devinit latch_addr_flash_probe(struct platform_device *dev)
199 } 199 }
200 info->mtd->owner = THIS_MODULE; 200 info->mtd->owner = THIS_MODULE;
201 201
202 mtd_device_parse_register(info->mtd, NULL, 0, 202 mtd_device_parse_register(info->mtd, NULL, NULL,
203 latch_addr_data->parts, latch_addr_data->nr_parts); 203 latch_addr_data->parts,
204 latch_addr_data->nr_parts);
204 return 0; 205 return 0;
205 206
206iounmap: 207iounmap:
diff --git a/drivers/mtd/maps/pcmciamtd.c b/drivers/mtd/maps/pcmciamtd.c
index 0259cf583022..a3cfad392ed6 100644
--- a/drivers/mtd/maps/pcmciamtd.c
+++ b/drivers/mtd/maps/pcmciamtd.c
@@ -294,13 +294,24 @@ static void pcmcia_copy_to(struct map_info *map, unsigned long to, const void *f
294} 294}
295 295
296 296
297static DEFINE_SPINLOCK(pcmcia_vpp_lock);
298static int pcmcia_vpp_refcnt;
297static void pcmciamtd_set_vpp(struct map_info *map, int on) 299static void pcmciamtd_set_vpp(struct map_info *map, int on)
298{ 300{
299 struct pcmciamtd_dev *dev = (struct pcmciamtd_dev *)map->map_priv_1; 301 struct pcmciamtd_dev *dev = (struct pcmciamtd_dev *)map->map_priv_1;
300 struct pcmcia_device *link = dev->p_dev; 302 struct pcmcia_device *link = dev->p_dev;
303 unsigned long flags;
301 304
302 pr_debug("dev = %p on = %d vpp = %d\n\n", dev, on, dev->vpp); 305 pr_debug("dev = %p on = %d vpp = %d\n\n", dev, on, dev->vpp);
303 pcmcia_fixup_vpp(link, on ? dev->vpp : 0); 306 spin_lock_irqsave(&pcmcia_vpp_lock, flags);
307 if (on) {
308 if (++pcmcia_vpp_refcnt == 1) /* first nested 'on' */
309 pcmcia_fixup_vpp(link, dev->vpp);
310 } else {
311 if (--pcmcia_vpp_refcnt == 0) /* last nested 'off' */
312 pcmcia_fixup_vpp(link, 0);
313 }
314 spin_unlock_irqrestore(&pcmcia_vpp_lock, flags);
304} 315}
305 316
306 317
diff --git a/drivers/mtd/maps/physmap.c b/drivers/mtd/maps/physmap.c
index abc562653b31..21b0b713cacb 100644
--- a/drivers/mtd/maps/physmap.c
+++ b/drivers/mtd/maps/physmap.c
@@ -27,6 +27,8 @@ struct physmap_flash_info {
27 struct mtd_info *mtd[MAX_RESOURCES]; 27 struct mtd_info *mtd[MAX_RESOURCES];
28 struct mtd_info *cmtd; 28 struct mtd_info *cmtd;
29 struct map_info map[MAX_RESOURCES]; 29 struct map_info map[MAX_RESOURCES];
30 spinlock_t vpp_lock;
31 int vpp_refcnt;
30}; 32};
31 33
32static int physmap_flash_remove(struct platform_device *dev) 34static int physmap_flash_remove(struct platform_device *dev)
@@ -63,12 +65,26 @@ static void physmap_set_vpp(struct map_info *map, int state)
63{ 65{
64 struct platform_device *pdev; 66 struct platform_device *pdev;
65 struct physmap_flash_data *physmap_data; 67 struct physmap_flash_data *physmap_data;
68 struct physmap_flash_info *info;
69 unsigned long flags;
66 70
67 pdev = (struct platform_device *)map->map_priv_1; 71 pdev = (struct platform_device *)map->map_priv_1;
68 physmap_data = pdev->dev.platform_data; 72 physmap_data = pdev->dev.platform_data;
69 73
70 if (physmap_data->set_vpp) 74 if (!physmap_data->set_vpp)
71 physmap_data->set_vpp(pdev, state); 75 return;
76
77 info = platform_get_drvdata(pdev);
78
79 spin_lock_irqsave(&info->vpp_lock, flags);
80 if (state) {
81 if (++info->vpp_refcnt == 1) /* first nested 'on' */
82 physmap_data->set_vpp(pdev, 1);
83 } else {
84 if (--info->vpp_refcnt == 0) /* last nested 'off' */
85 physmap_data->set_vpp(pdev, 0);
86 }
87 spin_unlock_irqrestore(&info->vpp_lock, flags);
72} 88}
73 89
74static const char *rom_probe_types[] = { 90static const char *rom_probe_types[] = {
@@ -172,9 +188,11 @@ static int physmap_flash_probe(struct platform_device *dev)
172 if (err) 188 if (err)
173 goto err_out; 189 goto err_out;
174 190
191 spin_lock_init(&info->vpp_lock);
192
175 part_types = physmap_data->part_probe_types ? : part_probe_types; 193 part_types = physmap_data->part_probe_types ? : part_probe_types;
176 194
177 mtd_device_parse_register(info->cmtd, part_types, 0, 195 mtd_device_parse_register(info->cmtd, part_types, NULL,
178 physmap_data->parts, physmap_data->nr_parts); 196 physmap_data->parts, physmap_data->nr_parts);
179 return 0; 197 return 0;
180 198
diff --git a/drivers/mtd/maps/plat-ram.c b/drivers/mtd/maps/plat-ram.c
index 45876d0e5b8e..891558de3ec1 100644
--- a/drivers/mtd/maps/plat-ram.c
+++ b/drivers/mtd/maps/plat-ram.c
@@ -222,8 +222,9 @@ static int platram_probe(struct platform_device *pdev)
222 /* check to see if there are any available partitions, or wether 222 /* check to see if there are any available partitions, or wether
223 * to add this device whole */ 223 * to add this device whole */
224 224
225 err = mtd_device_parse_register(info->mtd, pdata->probes, 0, 225 err = mtd_device_parse_register(info->mtd, pdata->probes, NULL,
226 pdata->partitions, pdata->nr_partitions); 226 pdata->partitions,
227 pdata->nr_partitions);
227 if (!err) 228 if (!err)
228 dev_info(&pdev->dev, "registered mtd device\n"); 229 dev_info(&pdev->dev, "registered mtd device\n");
229 230
diff --git a/drivers/mtd/maps/pxa2xx-flash.c b/drivers/mtd/maps/pxa2xx-flash.c
index 436d121185b1..81884c277405 100644
--- a/drivers/mtd/maps/pxa2xx-flash.c
+++ b/drivers/mtd/maps/pxa2xx-flash.c
@@ -98,7 +98,8 @@ static int __devinit pxa2xx_flash_probe(struct platform_device *pdev)
98 } 98 }
99 info->mtd->owner = THIS_MODULE; 99 info->mtd->owner = THIS_MODULE;
100 100
101 mtd_device_parse_register(info->mtd, probes, 0, flash->parts, flash->nr_parts); 101 mtd_device_parse_register(info->mtd, probes, NULL, flash->parts,
102 flash->nr_parts);
102 103
103 platform_set_drvdata(pdev, info); 104 platform_set_drvdata(pdev, info);
104 return 0; 105 return 0;
diff --git a/drivers/mtd/maps/rbtx4939-flash.c b/drivers/mtd/maps/rbtx4939-flash.c
index 3da63fc6f16e..6f52e1f288b6 100644
--- a/drivers/mtd/maps/rbtx4939-flash.c
+++ b/drivers/mtd/maps/rbtx4939-flash.c
@@ -102,8 +102,8 @@ static int rbtx4939_flash_probe(struct platform_device *dev)
102 info->mtd->owner = THIS_MODULE; 102 info->mtd->owner = THIS_MODULE;
103 if (err) 103 if (err)
104 goto err_out; 104 goto err_out;
105 err = mtd_device_parse_register(info->mtd, NULL, 0, 105 err = mtd_device_parse_register(info->mtd, NULL, NULL, pdata->parts,
106 pdata->parts, pdata->nr_parts); 106 pdata->nr_parts);
107 107
108 if (err) 108 if (err)
109 goto err_out; 109 goto err_out;
diff --git a/drivers/mtd/maps/sa1100-flash.c b/drivers/mtd/maps/sa1100-flash.c
index cbc3b7867910..a675bdbcb0fe 100644
--- a/drivers/mtd/maps/sa1100-flash.c
+++ b/drivers/mtd/maps/sa1100-flash.c
@@ -36,10 +36,22 @@ struct sa_info {
36 struct sa_subdev_info subdev[0]; 36 struct sa_subdev_info subdev[0];
37}; 37};
38 38
39static DEFINE_SPINLOCK(sa1100_vpp_lock);
40static int sa1100_vpp_refcnt;
39static void sa1100_set_vpp(struct map_info *map, int on) 41static void sa1100_set_vpp(struct map_info *map, int on)
40{ 42{
41 struct sa_subdev_info *subdev = container_of(map, struct sa_subdev_info, map); 43 struct sa_subdev_info *subdev = container_of(map, struct sa_subdev_info, map);
42 subdev->plat->set_vpp(on); 44 unsigned long flags;
45
46 spin_lock_irqsave(&sa1100_vpp_lock, flags);
47 if (on) {
48 if (++sa1100_vpp_refcnt == 1) /* first nested 'on' */
49 subdev->plat->set_vpp(1);
50 } else {
51 if (--sa1100_vpp_refcnt == 0) /* last nested 'off' */
52 subdev->plat->set_vpp(0);
53 }
54 spin_unlock_irqrestore(&sa1100_vpp_lock, flags);
43} 55}
44 56
45static void sa1100_destroy_subdev(struct sa_subdev_info *subdev) 57static void sa1100_destroy_subdev(struct sa_subdev_info *subdev)
@@ -252,8 +264,8 @@ static int __devinit sa1100_mtd_probe(struct platform_device *pdev)
252 /* 264 /*
253 * Partition selection stuff. 265 * Partition selection stuff.
254 */ 266 */
255 mtd_device_parse_register(info->mtd, part_probes, 0, 267 mtd_device_parse_register(info->mtd, part_probes, NULL, plat->parts,
256 plat->parts, plat->nr_parts); 268 plat->nr_parts);
257 269
258 platform_set_drvdata(pdev, info); 270 platform_set_drvdata(pdev, info);
259 err = 0; 271 err = 0;
diff --git a/drivers/mtd/maps/solutionengine.c b/drivers/mtd/maps/solutionengine.c
index 496c40704aff..9d900ada6708 100644
--- a/drivers/mtd/maps/solutionengine.c
+++ b/drivers/mtd/maps/solutionengine.c
@@ -92,8 +92,8 @@ static int __init init_soleng_maps(void)
92 mtd_device_register(eprom_mtd, NULL, 0); 92 mtd_device_register(eprom_mtd, NULL, 0);
93 } 93 }
94 94
95 mtd_device_parse_register(flash_mtd, probes, 0, 95 mtd_device_parse_register(flash_mtd, probes, NULL,
96 superh_se_partitions, NUM_PARTITIONS); 96 superh_se_partitions, NUM_PARTITIONS);
97 97
98 return 0; 98 return 0;
99} 99}
diff --git a/drivers/mtd/maps/uclinux.c b/drivers/mtd/maps/uclinux.c
index 6793074f3f40..cfff454f628b 100644
--- a/drivers/mtd/maps/uclinux.c
+++ b/drivers/mtd/maps/uclinux.c
@@ -85,7 +85,7 @@ static int __init uclinux_mtd_init(void)
85 } 85 }
86 86
87 mtd->owner = THIS_MODULE; 87 mtd->owner = THIS_MODULE;
88 mtd->point = uclinux_point; 88 mtd->_point = uclinux_point;
89 mtd->priv = mapp; 89 mtd->priv = mapp;
90 90
91 uclinux_ram_mtdinfo = mtd; 91 uclinux_ram_mtdinfo = mtd;
diff --git a/drivers/mtd/maps/vmu-flash.c b/drivers/mtd/maps/vmu-flash.c
index 3a04b078576a..2e2b0945edc7 100644
--- a/drivers/mtd/maps/vmu-flash.c
+++ b/drivers/mtd/maps/vmu-flash.c
@@ -360,9 +360,6 @@ static int vmu_flash_read(struct mtd_info *mtd, loff_t from, size_t len,
360 int index = 0, retval, partition, leftover, numblocks; 360 int index = 0, retval, partition, leftover, numblocks;
361 unsigned char cx; 361 unsigned char cx;
362 362
363 if (len < 1)
364 return -EIO;
365
366 mpart = mtd->priv; 363 mpart = mtd->priv;
367 mdev = mpart->mdev; 364 mdev = mpart->mdev;
368 partition = mpart->partition; 365 partition = mpart->partition;
@@ -434,11 +431,6 @@ static int vmu_flash_write(struct mtd_info *mtd, loff_t to, size_t len,
434 partition = mpart->partition; 431 partition = mpart->partition;
435 card = maple_get_drvdata(mdev); 432 card = maple_get_drvdata(mdev);
436 433
437 /* simple sanity checks */
438 if (len < 1) {
439 error = -EIO;
440 goto failed;
441 }
442 numblocks = card->parts[partition].numblocks; 434 numblocks = card->parts[partition].numblocks;
443 if (to + len > numblocks * card->blocklen) 435 if (to + len > numblocks * card->blocklen)
444 len = numblocks * card->blocklen - to; 436 len = numblocks * card->blocklen - to;
@@ -544,9 +536,9 @@ static void vmu_queryblocks(struct mapleq *mq)
544 mtd_cur->flags = MTD_WRITEABLE|MTD_NO_ERASE; 536 mtd_cur->flags = MTD_WRITEABLE|MTD_NO_ERASE;
545 mtd_cur->size = part_cur->numblocks * card->blocklen; 537 mtd_cur->size = part_cur->numblocks * card->blocklen;
546 mtd_cur->erasesize = card->blocklen; 538 mtd_cur->erasesize = card->blocklen;
547 mtd_cur->write = vmu_flash_write; 539 mtd_cur->_write = vmu_flash_write;
548 mtd_cur->read = vmu_flash_read; 540 mtd_cur->_read = vmu_flash_read;
549 mtd_cur->sync = vmu_flash_sync; 541 mtd_cur->_sync = vmu_flash_sync;
550 mtd_cur->writesize = card->blocklen; 542 mtd_cur->writesize = card->blocklen;
551 543
552 mpart = kmalloc(sizeof(struct mdev_part), GFP_KERNEL); 544 mpart = kmalloc(sizeof(struct mdev_part), GFP_KERNEL);
diff --git a/drivers/mtd/maps/wr_sbc82xx_flash.c b/drivers/mtd/maps/wr_sbc82xx_flash.c
index aa7e0cb2893c..71b0ba797912 100644
--- a/drivers/mtd/maps/wr_sbc82xx_flash.c
+++ b/drivers/mtd/maps/wr_sbc82xx_flash.c
@@ -142,7 +142,7 @@ static int __init init_sbc82xx_flash(void)
142 nr_parts = ARRAY_SIZE(smallflash_parts); 142 nr_parts = ARRAY_SIZE(smallflash_parts);
143 } 143 }
144 144
145 mtd_device_parse_register(sbcmtd[i], part_probes, 0, 145 mtd_device_parse_register(sbcmtd[i], part_probes, NULL,
146 defparts, nr_parts); 146 defparts, nr_parts);
147 } 147 }
148 return 0; 148 return 0;
diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c
index 424ca5f93c6c..f1f06715d4e0 100644
--- a/drivers/mtd/mtd_blkdevs.c
+++ b/drivers/mtd/mtd_blkdevs.c
@@ -233,6 +233,7 @@ static int blktrans_open(struct block_device *bdev, fmode_t mode)
233 ret = __get_mtd_device(dev->mtd); 233 ret = __get_mtd_device(dev->mtd);
234 if (ret) 234 if (ret)
235 goto error_release; 235 goto error_release;
236 dev->file_mode = mode;
236 237
237unlock: 238unlock:
238 dev->open++; 239 dev->open++;
diff --git a/drivers/mtd/mtdblock.c b/drivers/mtd/mtdblock.c
index af6591237b9b..6c6d80736fad 100644
--- a/drivers/mtd/mtdblock.c
+++ b/drivers/mtd/mtdblock.c
@@ -321,8 +321,12 @@ static int mtdblock_release(struct mtd_blktrans_dev *mbd)
321 mutex_unlock(&mtdblk->cache_mutex); 321 mutex_unlock(&mtdblk->cache_mutex);
322 322
323 if (!--mtdblk->count) { 323 if (!--mtdblk->count) {
324 /* It was the last usage. Free the cache */ 324 /*
325 mtd_sync(mbd->mtd); 325 * It was the last usage. Free the cache, but only sync if
326 * opened for writing.
327 */
328 if (mbd->file_mode & FMODE_WRITE)
329 mtd_sync(mbd->mtd);
326 vfree(mtdblk->cache_data); 330 vfree(mtdblk->cache_data);
327 } 331 }
328 332
diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c
index c57ae92ebda4..94eb05b1afdf 100644
--- a/drivers/mtd/mtdchar.c
+++ b/drivers/mtd/mtdchar.c
@@ -39,7 +39,6 @@
39#include <asm/uaccess.h> 39#include <asm/uaccess.h>
40 40
41static DEFINE_MUTEX(mtd_mutex); 41static DEFINE_MUTEX(mtd_mutex);
42static struct vfsmount *mtd_inode_mnt __read_mostly;
43 42
44/* 43/*
45 * Data structure to hold the pointer to the mtd device as well 44 * Data structure to hold the pointer to the mtd device as well
@@ -75,7 +74,9 @@ static loff_t mtdchar_lseek(struct file *file, loff_t offset, int orig)
75 return -EINVAL; 74 return -EINVAL;
76} 75}
77 76
78 77static int count;
78static struct vfsmount *mnt;
79static struct file_system_type mtd_inodefs_type;
79 80
80static int mtdchar_open(struct inode *inode, struct file *file) 81static int mtdchar_open(struct inode *inode, struct file *file)
81{ 82{
@@ -92,6 +93,10 @@ static int mtdchar_open(struct inode *inode, struct file *file)
92 if ((file->f_mode & FMODE_WRITE) && (minor & 1)) 93 if ((file->f_mode & FMODE_WRITE) && (minor & 1))
93 return -EACCES; 94 return -EACCES;
94 95
96 ret = simple_pin_fs(&mtd_inodefs_type, &mnt, &count);
97 if (ret)
98 return ret;
99
95 mutex_lock(&mtd_mutex); 100 mutex_lock(&mtd_mutex);
96 mtd = get_mtd_device(NULL, devnum); 101 mtd = get_mtd_device(NULL, devnum);
97 102
@@ -106,7 +111,7 @@ static int mtdchar_open(struct inode *inode, struct file *file)
106 goto out; 111 goto out;
107 } 112 }
108 113
109 mtd_ino = iget_locked(mtd_inode_mnt->mnt_sb, devnum); 114 mtd_ino = iget_locked(mnt->mnt_sb, devnum);
110 if (!mtd_ino) { 115 if (!mtd_ino) {
111 put_mtd_device(mtd); 116 put_mtd_device(mtd);
112 ret = -ENOMEM; 117 ret = -ENOMEM;
@@ -141,6 +146,7 @@ static int mtdchar_open(struct inode *inode, struct file *file)
141 146
142out: 147out:
143 mutex_unlock(&mtd_mutex); 148 mutex_unlock(&mtd_mutex);
149 simple_release_fs(&mnt, &count);
144 return ret; 150 return ret;
145} /* mtdchar_open */ 151} /* mtdchar_open */
146 152
@@ -162,6 +168,7 @@ static int mtdchar_close(struct inode *inode, struct file *file)
162 put_mtd_device(mtd); 168 put_mtd_device(mtd);
163 file->private_data = NULL; 169 file->private_data = NULL;
164 kfree(mfi); 170 kfree(mfi);
171 simple_release_fs(&mnt, &count);
165 172
166 return 0; 173 return 0;
167} /* mtdchar_close */ 174} /* mtdchar_close */
@@ -405,7 +412,7 @@ static int mtdchar_writeoob(struct file *file, struct mtd_info *mtd,
405 if (length > 4096) 412 if (length > 4096)
406 return -EINVAL; 413 return -EINVAL;
407 414
408 if (!mtd->write_oob) 415 if (!mtd->_write_oob)
409 ret = -EOPNOTSUPP; 416 ret = -EOPNOTSUPP;
410 else 417 else
411 ret = access_ok(VERIFY_READ, ptr, length) ? 0 : -EFAULT; 418 ret = access_ok(VERIFY_READ, ptr, length) ? 0 : -EFAULT;
@@ -576,7 +583,7 @@ static int mtdchar_write_ioctl(struct mtd_info *mtd,
576 !access_ok(VERIFY_READ, req.usr_data, req.len) || 583 !access_ok(VERIFY_READ, req.usr_data, req.len) ||
577 !access_ok(VERIFY_READ, req.usr_oob, req.ooblen)) 584 !access_ok(VERIFY_READ, req.usr_oob, req.ooblen))
578 return -EFAULT; 585 return -EFAULT;
579 if (!mtd->write_oob) 586 if (!mtd->_write_oob)
580 return -EOPNOTSUPP; 587 return -EOPNOTSUPP;
581 588
582 ops.mode = req.mode; 589 ops.mode = req.mode;
@@ -1175,10 +1182,15 @@ static const struct file_operations mtd_fops = {
1175#endif 1182#endif
1176}; 1183};
1177 1184
1185static const struct super_operations mtd_ops = {
1186 .drop_inode = generic_delete_inode,
1187 .statfs = simple_statfs,
1188};
1189
1178static struct dentry *mtd_inodefs_mount(struct file_system_type *fs_type, 1190static struct dentry *mtd_inodefs_mount(struct file_system_type *fs_type,
1179 int flags, const char *dev_name, void *data) 1191 int flags, const char *dev_name, void *data)
1180{ 1192{
1181 return mount_pseudo(fs_type, "mtd_inode:", NULL, NULL, MTD_INODE_FS_MAGIC); 1193 return mount_pseudo(fs_type, "mtd_inode:", &mtd_ops, NULL, MTD_INODE_FS_MAGIC);
1182} 1194}
1183 1195
1184static struct file_system_type mtd_inodefs_type = { 1196static struct file_system_type mtd_inodefs_type = {
@@ -1187,26 +1199,6 @@ static struct file_system_type mtd_inodefs_type = {
1187 .kill_sb = kill_anon_super, 1199 .kill_sb = kill_anon_super,
1188}; 1200};
1189 1201
1190static void mtdchar_notify_add(struct mtd_info *mtd)
1191{
1192}
1193
1194static void mtdchar_notify_remove(struct mtd_info *mtd)
1195{
1196 struct inode *mtd_ino = ilookup(mtd_inode_mnt->mnt_sb, mtd->index);
1197
1198 if (mtd_ino) {
1199 /* Destroy the inode if it exists */
1200 clear_nlink(mtd_ino);
1201 iput(mtd_ino);
1202 }
1203}
1204
1205static struct mtd_notifier mtdchar_notifier = {
1206 .add = mtdchar_notify_add,
1207 .remove = mtdchar_notify_remove,
1208};
1209
1210static int __init init_mtdchar(void) 1202static int __init init_mtdchar(void)
1211{ 1203{
1212 int ret; 1204 int ret;
@@ -1224,19 +1216,8 @@ static int __init init_mtdchar(void)
1224 pr_notice("Can't register mtd_inodefs filesystem: %d\n", ret); 1216 pr_notice("Can't register mtd_inodefs filesystem: %d\n", ret);
1225 goto err_unregister_chdev; 1217 goto err_unregister_chdev;
1226 } 1218 }
1227
1228 mtd_inode_mnt = kern_mount(&mtd_inodefs_type);
1229 if (IS_ERR(mtd_inode_mnt)) {
1230 ret = PTR_ERR(mtd_inode_mnt);
1231 pr_notice("Error mounting mtd_inodefs filesystem: %d\n", ret);
1232 goto err_unregister_filesystem;
1233 }
1234 register_mtd_user(&mtdchar_notifier);
1235
1236 return ret; 1219 return ret;
1237 1220
1238err_unregister_filesystem:
1239 unregister_filesystem(&mtd_inodefs_type);
1240err_unregister_chdev: 1221err_unregister_chdev:
1241 __unregister_chrdev(MTD_CHAR_MAJOR, 0, 1 << MINORBITS, "mtd"); 1222 __unregister_chrdev(MTD_CHAR_MAJOR, 0, 1 << MINORBITS, "mtd");
1242 return ret; 1223 return ret;
@@ -1244,8 +1225,6 @@ err_unregister_chdev:
1244 1225
1245static void __exit cleanup_mtdchar(void) 1226static void __exit cleanup_mtdchar(void)
1246{ 1227{
1247 unregister_mtd_user(&mtdchar_notifier);
1248 kern_unmount(mtd_inode_mnt);
1249 unregister_filesystem(&mtd_inodefs_type); 1228 unregister_filesystem(&mtd_inodefs_type);
1250 __unregister_chrdev(MTD_CHAR_MAJOR, 0, 1 << MINORBITS, "mtd"); 1229 __unregister_chrdev(MTD_CHAR_MAJOR, 0, 1 << MINORBITS, "mtd");
1251} 1230}
diff --git a/drivers/mtd/mtdconcat.c b/drivers/mtd/mtdconcat.c
index 1ed5103b219b..b9000563b9f4 100644
--- a/drivers/mtd/mtdconcat.c
+++ b/drivers/mtd/mtdconcat.c
@@ -72,8 +72,6 @@ concat_read(struct mtd_info *mtd, loff_t from, size_t len,
72 int ret = 0, err; 72 int ret = 0, err;
73 int i; 73 int i;
74 74
75 *retlen = 0;
76
77 for (i = 0; i < concat->num_subdev; i++) { 75 for (i = 0; i < concat->num_subdev; i++) {
78 struct mtd_info *subdev = concat->subdev[i]; 76 struct mtd_info *subdev = concat->subdev[i];
79 size_t size, retsize; 77 size_t size, retsize;
@@ -126,11 +124,6 @@ concat_write(struct mtd_info *mtd, loff_t to, size_t len,
126 int err = -EINVAL; 124 int err = -EINVAL;
127 int i; 125 int i;
128 126
129 if (!(mtd->flags & MTD_WRITEABLE))
130 return -EROFS;
131
132 *retlen = 0;
133
134 for (i = 0; i < concat->num_subdev; i++) { 127 for (i = 0; i < concat->num_subdev; i++) {
135 struct mtd_info *subdev = concat->subdev[i]; 128 struct mtd_info *subdev = concat->subdev[i];
136 size_t size, retsize; 129 size_t size, retsize;
@@ -145,11 +138,7 @@ concat_write(struct mtd_info *mtd, loff_t to, size_t len,
145 else 138 else
146 size = len; 139 size = len;
147 140
148 if (!(subdev->flags & MTD_WRITEABLE)) 141 err = mtd_write(subdev, to, size, &retsize, buf);
149 err = -EROFS;
150 else
151 err = mtd_write(subdev, to, size, &retsize, buf);
152
153 if (err) 142 if (err)
154 break; 143 break;
155 144
@@ -176,19 +165,10 @@ concat_writev(struct mtd_info *mtd, const struct kvec *vecs,
176 int i; 165 int i;
177 int err = -EINVAL; 166 int err = -EINVAL;
178 167
179 if (!(mtd->flags & MTD_WRITEABLE))
180 return -EROFS;
181
182 *retlen = 0;
183
184 /* Calculate total length of data */ 168 /* Calculate total length of data */
185 for (i = 0; i < count; i++) 169 for (i = 0; i < count; i++)
186 total_len += vecs[i].iov_len; 170 total_len += vecs[i].iov_len;
187 171
188 /* Do not allow write past end of device */
189 if ((to + total_len) > mtd->size)
190 return -EINVAL;
191
192 /* Check alignment */ 172 /* Check alignment */
193 if (mtd->writesize > 1) { 173 if (mtd->writesize > 1) {
194 uint64_t __to = to; 174 uint64_t __to = to;
@@ -224,12 +204,8 @@ concat_writev(struct mtd_info *mtd, const struct kvec *vecs,
224 old_iov_len = vecs_copy[entry_high].iov_len; 204 old_iov_len = vecs_copy[entry_high].iov_len;
225 vecs_copy[entry_high].iov_len = size; 205 vecs_copy[entry_high].iov_len = size;
226 206
227 if (!(subdev->flags & MTD_WRITEABLE)) 207 err = mtd_writev(subdev, &vecs_copy[entry_low],
228 err = -EROFS; 208 entry_high - entry_low + 1, to, &retsize);
229 else
230 err = mtd_writev(subdev, &vecs_copy[entry_low],
231 entry_high - entry_low + 1, to,
232 &retsize);
233 209
234 vecs_copy[entry_high].iov_len = old_iov_len - size; 210 vecs_copy[entry_high].iov_len = old_iov_len - size;
235 vecs_copy[entry_high].iov_base += size; 211 vecs_copy[entry_high].iov_base += size;
@@ -403,15 +379,6 @@ static int concat_erase(struct mtd_info *mtd, struct erase_info *instr)
403 uint64_t length, offset = 0; 379 uint64_t length, offset = 0;
404 struct erase_info *erase; 380 struct erase_info *erase;
405 381
406 if (!(mtd->flags & MTD_WRITEABLE))
407 return -EROFS;
408
409 if (instr->addr > concat->mtd.size)
410 return -EINVAL;
411
412 if (instr->len + instr->addr > concat->mtd.size)
413 return -EINVAL;
414
415 /* 382 /*
416 * Check for proper erase block alignment of the to-be-erased area. 383 * Check for proper erase block alignment of the to-be-erased area.
417 * It is easier to do this based on the super device's erase 384 * It is easier to do this based on the super device's erase
@@ -459,8 +426,6 @@ static int concat_erase(struct mtd_info *mtd, struct erase_info *instr)
459 return -EINVAL; 426 return -EINVAL;
460 } 427 }
461 428
462 instr->fail_addr = MTD_FAIL_ADDR_UNKNOWN;
463
464 /* make a local copy of instr to avoid modifying the caller's struct */ 429 /* make a local copy of instr to avoid modifying the caller's struct */
465 erase = kmalloc(sizeof (struct erase_info), GFP_KERNEL); 430 erase = kmalloc(sizeof (struct erase_info), GFP_KERNEL);
466 431
@@ -499,10 +464,6 @@ static int concat_erase(struct mtd_info *mtd, struct erase_info *instr)
499 else 464 else
500 erase->len = length; 465 erase->len = length;
501 466
502 if (!(subdev->flags & MTD_WRITEABLE)) {
503 err = -EROFS;
504 break;
505 }
506 length -= erase->len; 467 length -= erase->len;
507 if ((err = concat_dev_erase(subdev, erase))) { 468 if ((err = concat_dev_erase(subdev, erase))) {
508 /* sanity check: should never happen since 469 /* sanity check: should never happen since
@@ -538,9 +499,6 @@ static int concat_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
538 struct mtd_concat *concat = CONCAT(mtd); 499 struct mtd_concat *concat = CONCAT(mtd);
539 int i, err = -EINVAL; 500 int i, err = -EINVAL;
540 501
541 if ((len + ofs) > mtd->size)
542 return -EINVAL;
543
544 for (i = 0; i < concat->num_subdev; i++) { 502 for (i = 0; i < concat->num_subdev; i++) {
545 struct mtd_info *subdev = concat->subdev[i]; 503 struct mtd_info *subdev = concat->subdev[i];
546 uint64_t size; 504 uint64_t size;
@@ -575,9 +533,6 @@ static int concat_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
575 struct mtd_concat *concat = CONCAT(mtd); 533 struct mtd_concat *concat = CONCAT(mtd);
576 int i, err = 0; 534 int i, err = 0;
577 535
578 if ((len + ofs) > mtd->size)
579 return -EINVAL;
580
581 for (i = 0; i < concat->num_subdev; i++) { 536 for (i = 0; i < concat->num_subdev; i++) {
582 struct mtd_info *subdev = concat->subdev[i]; 537 struct mtd_info *subdev = concat->subdev[i];
583 uint64_t size; 538 uint64_t size;
@@ -650,9 +605,6 @@ static int concat_block_isbad(struct mtd_info *mtd, loff_t ofs)
650 if (!mtd_can_have_bb(concat->subdev[0])) 605 if (!mtd_can_have_bb(concat->subdev[0]))
651 return res; 606 return res;
652 607
653 if (ofs > mtd->size)
654 return -EINVAL;
655
656 for (i = 0; i < concat->num_subdev; i++) { 608 for (i = 0; i < concat->num_subdev; i++) {
657 struct mtd_info *subdev = concat->subdev[i]; 609 struct mtd_info *subdev = concat->subdev[i];
658 610
@@ -673,12 +625,6 @@ static int concat_block_markbad(struct mtd_info *mtd, loff_t ofs)
673 struct mtd_concat *concat = CONCAT(mtd); 625 struct mtd_concat *concat = CONCAT(mtd);
674 int i, err = -EINVAL; 626 int i, err = -EINVAL;
675 627
676 if (!mtd_can_have_bb(concat->subdev[0]))
677 return 0;
678
679 if (ofs > mtd->size)
680 return -EINVAL;
681
682 for (i = 0; i < concat->num_subdev; i++) { 628 for (i = 0; i < concat->num_subdev; i++) {
683 struct mtd_info *subdev = concat->subdev[i]; 629 struct mtd_info *subdev = concat->subdev[i];
684 630
@@ -716,10 +662,6 @@ static unsigned long concat_get_unmapped_area(struct mtd_info *mtd,
716 continue; 662 continue;
717 } 663 }
718 664
719 /* we've found the subdev over which the mapping will reside */
720 if (offset + len > subdev->size)
721 return (unsigned long) -EINVAL;
722
723 return mtd_get_unmapped_area(subdev, len, offset, flags); 665 return mtd_get_unmapped_area(subdev, len, offset, flags);
724 } 666 }
725 667
@@ -777,16 +719,16 @@ struct mtd_info *mtd_concat_create(struct mtd_info *subdev[], /* subdevices to c
777 concat->mtd.subpage_sft = subdev[0]->subpage_sft; 719 concat->mtd.subpage_sft = subdev[0]->subpage_sft;
778 concat->mtd.oobsize = subdev[0]->oobsize; 720 concat->mtd.oobsize = subdev[0]->oobsize;
779 concat->mtd.oobavail = subdev[0]->oobavail; 721 concat->mtd.oobavail = subdev[0]->oobavail;
780 if (subdev[0]->writev) 722 if (subdev[0]->_writev)
781 concat->mtd.writev = concat_writev; 723 concat->mtd._writev = concat_writev;
782 if (subdev[0]->read_oob) 724 if (subdev[0]->_read_oob)
783 concat->mtd.read_oob = concat_read_oob; 725 concat->mtd._read_oob = concat_read_oob;
784 if (subdev[0]->write_oob) 726 if (subdev[0]->_write_oob)
785 concat->mtd.write_oob = concat_write_oob; 727 concat->mtd._write_oob = concat_write_oob;
786 if (subdev[0]->block_isbad) 728 if (subdev[0]->_block_isbad)
787 concat->mtd.block_isbad = concat_block_isbad; 729 concat->mtd._block_isbad = concat_block_isbad;
788 if (subdev[0]->block_markbad) 730 if (subdev[0]->_block_markbad)
789 concat->mtd.block_markbad = concat_block_markbad; 731 concat->mtd._block_markbad = concat_block_markbad;
790 732
791 concat->mtd.ecc_stats.badblocks = subdev[0]->ecc_stats.badblocks; 733 concat->mtd.ecc_stats.badblocks = subdev[0]->ecc_stats.badblocks;
792 734
@@ -833,8 +775,8 @@ struct mtd_info *mtd_concat_create(struct mtd_info *subdev[], /* subdevices to c
833 if (concat->mtd.writesize != subdev[i]->writesize || 775 if (concat->mtd.writesize != subdev[i]->writesize ||
834 concat->mtd.subpage_sft != subdev[i]->subpage_sft || 776 concat->mtd.subpage_sft != subdev[i]->subpage_sft ||
835 concat->mtd.oobsize != subdev[i]->oobsize || 777 concat->mtd.oobsize != subdev[i]->oobsize ||
836 !concat->mtd.read_oob != !subdev[i]->read_oob || 778 !concat->mtd._read_oob != !subdev[i]->_read_oob ||
837 !concat->mtd.write_oob != !subdev[i]->write_oob) { 779 !concat->mtd._write_oob != !subdev[i]->_write_oob) {
838 kfree(concat); 780 kfree(concat);
839 printk("Incompatible OOB or ECC data on \"%s\"\n", 781 printk("Incompatible OOB or ECC data on \"%s\"\n",
840 subdev[i]->name); 782 subdev[i]->name);
@@ -849,15 +791,15 @@ struct mtd_info *mtd_concat_create(struct mtd_info *subdev[], /* subdevices to c
849 concat->num_subdev = num_devs; 791 concat->num_subdev = num_devs;
850 concat->mtd.name = name; 792 concat->mtd.name = name;
851 793
852 concat->mtd.erase = concat_erase; 794 concat->mtd._erase = concat_erase;
853 concat->mtd.read = concat_read; 795 concat->mtd._read = concat_read;
854 concat->mtd.write = concat_write; 796 concat->mtd._write = concat_write;
855 concat->mtd.sync = concat_sync; 797 concat->mtd._sync = concat_sync;
856 concat->mtd.lock = concat_lock; 798 concat->mtd._lock = concat_lock;
857 concat->mtd.unlock = concat_unlock; 799 concat->mtd._unlock = concat_unlock;
858 concat->mtd.suspend = concat_suspend; 800 concat->mtd._suspend = concat_suspend;
859 concat->mtd.resume = concat_resume; 801 concat->mtd._resume = concat_resume;
860 concat->mtd.get_unmapped_area = concat_get_unmapped_area; 802 concat->mtd._get_unmapped_area = concat_get_unmapped_area;
861 803
862 /* 804 /*
863 * Combine the erase block size info of the subdevices: 805 * Combine the erase block size info of the subdevices:
diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c
index 9a9ce71a71fc..c837507dfb1c 100644
--- a/drivers/mtd/mtdcore.c
+++ b/drivers/mtd/mtdcore.c
@@ -107,7 +107,7 @@ static LIST_HEAD(mtd_notifiers);
107 */ 107 */
108static void mtd_release(struct device *dev) 108static void mtd_release(struct device *dev)
109{ 109{
110 struct mtd_info *mtd = dev_get_drvdata(dev); 110 struct mtd_info __maybe_unused *mtd = dev_get_drvdata(dev);
111 dev_t index = MTD_DEVT(mtd->index); 111 dev_t index = MTD_DEVT(mtd->index);
112 112
113 /* remove /dev/mtdXro node if needed */ 113 /* remove /dev/mtdXro node if needed */
@@ -126,7 +126,7 @@ static int mtd_cls_resume(struct device *dev)
126{ 126{
127 struct mtd_info *mtd = dev_get_drvdata(dev); 127 struct mtd_info *mtd = dev_get_drvdata(dev);
128 128
129 if (mtd && mtd->resume) 129 if (mtd)
130 mtd_resume(mtd); 130 mtd_resume(mtd);
131 return 0; 131 return 0;
132} 132}
@@ -610,8 +610,8 @@ int __get_mtd_device(struct mtd_info *mtd)
610 if (!try_module_get(mtd->owner)) 610 if (!try_module_get(mtd->owner))
611 return -ENODEV; 611 return -ENODEV;
612 612
613 if (mtd->get_device) { 613 if (mtd->_get_device) {
614 err = mtd->get_device(mtd); 614 err = mtd->_get_device(mtd);
615 615
616 if (err) { 616 if (err) {
617 module_put(mtd->owner); 617 module_put(mtd->owner);
@@ -675,14 +675,267 @@ void __put_mtd_device(struct mtd_info *mtd)
675 --mtd->usecount; 675 --mtd->usecount;
676 BUG_ON(mtd->usecount < 0); 676 BUG_ON(mtd->usecount < 0);
677 677
678 if (mtd->put_device) 678 if (mtd->_put_device)
679 mtd->put_device(mtd); 679 mtd->_put_device(mtd);
680 680
681 module_put(mtd->owner); 681 module_put(mtd->owner);
682} 682}
683EXPORT_SYMBOL_GPL(__put_mtd_device); 683EXPORT_SYMBOL_GPL(__put_mtd_device);
684 684
685/* 685/*
686 * Erase is an asynchronous operation. Device drivers are supposed
687 * to call instr->callback() whenever the operation completes, even
688 * if it completes with a failure.
689 * Callers are supposed to pass a callback function and wait for it
690 * to be called before writing to the block.
691 */
692int mtd_erase(struct mtd_info *mtd, struct erase_info *instr)
693{
694 if (instr->addr > mtd->size || instr->len > mtd->size - instr->addr)
695 return -EINVAL;
696 if (!(mtd->flags & MTD_WRITEABLE))
697 return -EROFS;
698 instr->fail_addr = MTD_FAIL_ADDR_UNKNOWN;
699 if (!instr->len) {
700 instr->state = MTD_ERASE_DONE;
701 mtd_erase_callback(instr);
702 return 0;
703 }
704 return mtd->_erase(mtd, instr);
705}
706EXPORT_SYMBOL_GPL(mtd_erase);
707
708/*
709 * This stuff for eXecute-In-Place. phys is optional and may be set to NULL.
710 */
711int mtd_point(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen,
712 void **virt, resource_size_t *phys)
713{
714 *retlen = 0;
715 *virt = NULL;
716 if (phys)
717 *phys = 0;
718 if (!mtd->_point)
719 return -EOPNOTSUPP;
720 if (from < 0 || from > mtd->size || len > mtd->size - from)
721 return -EINVAL;
722 if (!len)
723 return 0;
724 return mtd->_point(mtd, from, len, retlen, virt, phys);
725}
726EXPORT_SYMBOL_GPL(mtd_point);
727
728/* We probably shouldn't allow XIP if the unpoint isn't a NULL */
729int mtd_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
730{
731 if (!mtd->_point)
732 return -EOPNOTSUPP;
733 if (from < 0 || from > mtd->size || len > mtd->size - from)
734 return -EINVAL;
735 if (!len)
736 return 0;
737 return mtd->_unpoint(mtd, from, len);
738}
739EXPORT_SYMBOL_GPL(mtd_unpoint);
740
741/*
742 * Allow NOMMU mmap() to directly map the device (if not NULL)
743 * - return the address to which the offset maps
744 * - return -ENOSYS to indicate refusal to do the mapping
745 */
746unsigned long mtd_get_unmapped_area(struct mtd_info *mtd, unsigned long len,
747 unsigned long offset, unsigned long flags)
748{
749 if (!mtd->_get_unmapped_area)
750 return -EOPNOTSUPP;
751 if (offset > mtd->size || len > mtd->size - offset)
752 return -EINVAL;
753 return mtd->_get_unmapped_area(mtd, len, offset, flags);
754}
755EXPORT_SYMBOL_GPL(mtd_get_unmapped_area);
756
757int mtd_read(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen,
758 u_char *buf)
759{
760 *retlen = 0;
761 if (from < 0 || from > mtd->size || len > mtd->size - from)
762 return -EINVAL;
763 if (!len)
764 return 0;
765 return mtd->_read(mtd, from, len, retlen, buf);
766}
767EXPORT_SYMBOL_GPL(mtd_read);
768
769int mtd_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen,
770 const u_char *buf)
771{
772 *retlen = 0;
773 if (to < 0 || to > mtd->size || len > mtd->size - to)
774 return -EINVAL;
775 if (!mtd->_write || !(mtd->flags & MTD_WRITEABLE))
776 return -EROFS;
777 if (!len)
778 return 0;
779 return mtd->_write(mtd, to, len, retlen, buf);
780}
781EXPORT_SYMBOL_GPL(mtd_write);
782
783/*
784 * In blackbox flight recorder like scenarios we want to make successful writes
785 * in interrupt context. panic_write() is only intended to be called when its
786 * known the kernel is about to panic and we need the write to succeed. Since
787 * the kernel is not going to be running for much longer, this function can
788 * break locks and delay to ensure the write succeeds (but not sleep).
789 */
790int mtd_panic_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen,
791 const u_char *buf)
792{
793 *retlen = 0;
794 if (!mtd->_panic_write)
795 return -EOPNOTSUPP;
796 if (to < 0 || to > mtd->size || len > mtd->size - to)
797 return -EINVAL;
798 if (!(mtd->flags & MTD_WRITEABLE))
799 return -EROFS;
800 if (!len)
801 return 0;
802 return mtd->_panic_write(mtd, to, len, retlen, buf);
803}
804EXPORT_SYMBOL_GPL(mtd_panic_write);
805
806/*
807 * Method to access the protection register area, present in some flash
808 * devices. The user data is one time programmable but the factory data is read
809 * only.
810 */
811int mtd_get_fact_prot_info(struct mtd_info *mtd, struct otp_info *buf,
812 size_t len)
813{
814 if (!mtd->_get_fact_prot_info)
815 return -EOPNOTSUPP;
816 if (!len)
817 return 0;
818 return mtd->_get_fact_prot_info(mtd, buf, len);
819}
820EXPORT_SYMBOL_GPL(mtd_get_fact_prot_info);
821
822int mtd_read_fact_prot_reg(struct mtd_info *mtd, loff_t from, size_t len,
823 size_t *retlen, u_char *buf)
824{
825 *retlen = 0;
826 if (!mtd->_read_fact_prot_reg)
827 return -EOPNOTSUPP;
828 if (!len)
829 return 0;
830 return mtd->_read_fact_prot_reg(mtd, from, len, retlen, buf);
831}
832EXPORT_SYMBOL_GPL(mtd_read_fact_prot_reg);
833
834int mtd_get_user_prot_info(struct mtd_info *mtd, struct otp_info *buf,
835 size_t len)
836{
837 if (!mtd->_get_user_prot_info)
838 return -EOPNOTSUPP;
839 if (!len)
840 return 0;
841 return mtd->_get_user_prot_info(mtd, buf, len);
842}
843EXPORT_SYMBOL_GPL(mtd_get_user_prot_info);
844
845int mtd_read_user_prot_reg(struct mtd_info *mtd, loff_t from, size_t len,
846 size_t *retlen, u_char *buf)
847{
848 *retlen = 0;
849 if (!mtd->_read_user_prot_reg)
850 return -EOPNOTSUPP;
851 if (!len)
852 return 0;
853 return mtd->_read_user_prot_reg(mtd, from, len, retlen, buf);
854}
855EXPORT_SYMBOL_GPL(mtd_read_user_prot_reg);
856
857int mtd_write_user_prot_reg(struct mtd_info *mtd, loff_t to, size_t len,
858 size_t *retlen, u_char *buf)
859{
860 *retlen = 0;
861 if (!mtd->_write_user_prot_reg)
862 return -EOPNOTSUPP;
863 if (!len)
864 return 0;
865 return mtd->_write_user_prot_reg(mtd, to, len, retlen, buf);
866}
867EXPORT_SYMBOL_GPL(mtd_write_user_prot_reg);
868
869int mtd_lock_user_prot_reg(struct mtd_info *mtd, loff_t from, size_t len)
870{
871 if (!mtd->_lock_user_prot_reg)
872 return -EOPNOTSUPP;
873 if (!len)
874 return 0;
875 return mtd->_lock_user_prot_reg(mtd, from, len);
876}
877EXPORT_SYMBOL_GPL(mtd_lock_user_prot_reg);
878
879/* Chip-supported device locking */
880int mtd_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
881{
882 if (!mtd->_lock)
883 return -EOPNOTSUPP;
884 if (ofs < 0 || ofs > mtd->size || len > mtd->size - ofs)
885 return -EINVAL;
886 if (!len)
887 return 0;
888 return mtd->_lock(mtd, ofs, len);
889}
890EXPORT_SYMBOL_GPL(mtd_lock);
891
892int mtd_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
893{
894 if (!mtd->_unlock)
895 return -EOPNOTSUPP;
896 if (ofs < 0 || ofs > mtd->size || len > mtd->size - ofs)
897 return -EINVAL;
898 if (!len)
899 return 0;
900 return mtd->_unlock(mtd, ofs, len);
901}
902EXPORT_SYMBOL_GPL(mtd_unlock);
903
904int mtd_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len)
905{
906 if (!mtd->_is_locked)
907 return -EOPNOTSUPP;
908 if (ofs < 0 || ofs > mtd->size || len > mtd->size - ofs)
909 return -EINVAL;
910 if (!len)
911 return 0;
912 return mtd->_is_locked(mtd, ofs, len);
913}
914EXPORT_SYMBOL_GPL(mtd_is_locked);
915
916int mtd_block_isbad(struct mtd_info *mtd, loff_t ofs)
917{
918 if (!mtd->_block_isbad)
919 return 0;
920 if (ofs < 0 || ofs > mtd->size)
921 return -EINVAL;
922 return mtd->_block_isbad(mtd, ofs);
923}
924EXPORT_SYMBOL_GPL(mtd_block_isbad);
925
926int mtd_block_markbad(struct mtd_info *mtd, loff_t ofs)
927{
928 if (!mtd->_block_markbad)
929 return -EOPNOTSUPP;
930 if (ofs < 0 || ofs > mtd->size)
931 return -EINVAL;
932 if (!(mtd->flags & MTD_WRITEABLE))
933 return -EROFS;
934 return mtd->_block_markbad(mtd, ofs);
935}
936EXPORT_SYMBOL_GPL(mtd_block_markbad);
937
938/*
686 * default_mtd_writev - the default writev method 939 * default_mtd_writev - the default writev method
687 * @mtd: mtd device description object pointer 940 * @mtd: mtd device description object pointer
688 * @vecs: the vectors to write 941 * @vecs: the vectors to write
@@ -729,9 +982,11 @@ int mtd_writev(struct mtd_info *mtd, const struct kvec *vecs,
729 unsigned long count, loff_t to, size_t *retlen) 982 unsigned long count, loff_t to, size_t *retlen)
730{ 983{
731 *retlen = 0; 984 *retlen = 0;
732 if (!mtd->writev) 985 if (!(mtd->flags & MTD_WRITEABLE))
986 return -EROFS;
987 if (!mtd->_writev)
733 return default_mtd_writev(mtd, vecs, count, to, retlen); 988 return default_mtd_writev(mtd, vecs, count, to, retlen);
734 return mtd->writev(mtd, vecs, count, to, retlen); 989 return mtd->_writev(mtd, vecs, count, to, retlen);
735} 990}
736EXPORT_SYMBOL_GPL(mtd_writev); 991EXPORT_SYMBOL_GPL(mtd_writev);
737 992
diff --git a/drivers/mtd/mtdoops.c b/drivers/mtd/mtdoops.c
index 3ce99e00a49e..ae36d7e1e913 100644
--- a/drivers/mtd/mtdoops.c
+++ b/drivers/mtd/mtdoops.c
@@ -169,7 +169,7 @@ static void mtdoops_workfunc_erase(struct work_struct *work)
169 cxt->nextpage = 0; 169 cxt->nextpage = 0;
170 } 170 }
171 171
172 while (mtd_can_have_bb(mtd)) { 172 while (1) {
173 ret = mtd_block_isbad(mtd, cxt->nextpage * record_size); 173 ret = mtd_block_isbad(mtd, cxt->nextpage * record_size);
174 if (!ret) 174 if (!ret)
175 break; 175 break;
@@ -199,9 +199,9 @@ badblock:
199 return; 199 return;
200 } 200 }
201 201
202 if (mtd_can_have_bb(mtd) && ret == -EIO) { 202 if (ret == -EIO) {
203 ret = mtd_block_markbad(mtd, cxt->nextpage * record_size); 203 ret = mtd_block_markbad(mtd, cxt->nextpage * record_size);
204 if (ret < 0) { 204 if (ret < 0 && ret != -EOPNOTSUPP) {
205 printk(KERN_ERR "mtdoops: block_markbad failed, aborting\n"); 205 printk(KERN_ERR "mtdoops: block_markbad failed, aborting\n");
206 return; 206 return;
207 } 207 }
@@ -257,8 +257,7 @@ static void find_next_position(struct mtdoops_context *cxt)
257 size_t retlen; 257 size_t retlen;
258 258
259 for (page = 0; page < cxt->oops_pages; page++) { 259 for (page = 0; page < cxt->oops_pages; page++) {
260 if (mtd_can_have_bb(mtd) && 260 if (mtd_block_isbad(mtd, page * record_size))
261 mtd_block_isbad(mtd, page * record_size))
262 continue; 261 continue;
263 /* Assume the page is used */ 262 /* Assume the page is used */
264 mark_page_used(cxt, page); 263 mark_page_used(cxt, page);
diff --git a/drivers/mtd/mtdpart.c b/drivers/mtd/mtdpart.c
index a3d44c3416b4..9651c06de0a9 100644
--- a/drivers/mtd/mtdpart.c
+++ b/drivers/mtd/mtdpart.c
@@ -65,12 +65,8 @@ static int part_read(struct mtd_info *mtd, loff_t from, size_t len,
65 int res; 65 int res;
66 66
67 stats = part->master->ecc_stats; 67 stats = part->master->ecc_stats;
68 68 res = part->master->_read(part->master, from + part->offset, len,
69 if (from >= mtd->size) 69 retlen, buf);
70 len = 0;
71 else if (from + len > mtd->size)
72 len = mtd->size - from;
73 res = mtd_read(part->master, from + part->offset, len, retlen, buf);
74 if (unlikely(res)) { 70 if (unlikely(res)) {
75 if (mtd_is_bitflip(res)) 71 if (mtd_is_bitflip(res))
76 mtd->ecc_stats.corrected += part->master->ecc_stats.corrected - stats.corrected; 72 mtd->ecc_stats.corrected += part->master->ecc_stats.corrected - stats.corrected;
@@ -84,19 +80,16 @@ static int part_point(struct mtd_info *mtd, loff_t from, size_t len,
84 size_t *retlen, void **virt, resource_size_t *phys) 80 size_t *retlen, void **virt, resource_size_t *phys)
85{ 81{
86 struct mtd_part *part = PART(mtd); 82 struct mtd_part *part = PART(mtd);
87 if (from >= mtd->size) 83
88 len = 0; 84 return part->master->_point(part->master, from + part->offset, len,
89 else if (from + len > mtd->size) 85 retlen, virt, phys);
90 len = mtd->size - from;
91 return mtd_point(part->master, from + part->offset, len, retlen,
92 virt, phys);
93} 86}
94 87
95static void part_unpoint(struct mtd_info *mtd, loff_t from, size_t len) 88static int part_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
96{ 89{
97 struct mtd_part *part = PART(mtd); 90 struct mtd_part *part = PART(mtd);
98 91
99 mtd_unpoint(part->master, from + part->offset, len); 92 return part->master->_unpoint(part->master, from + part->offset, len);
100} 93}
101 94
102static unsigned long part_get_unmapped_area(struct mtd_info *mtd, 95static unsigned long part_get_unmapped_area(struct mtd_info *mtd,
@@ -107,7 +100,8 @@ static unsigned long part_get_unmapped_area(struct mtd_info *mtd,
107 struct mtd_part *part = PART(mtd); 100 struct mtd_part *part = PART(mtd);
108 101
109 offset += part->offset; 102 offset += part->offset;
110 return mtd_get_unmapped_area(part->master, len, offset, flags); 103 return part->master->_get_unmapped_area(part->master, len, offset,
104 flags);
111} 105}
112 106
113static int part_read_oob(struct mtd_info *mtd, loff_t from, 107static int part_read_oob(struct mtd_info *mtd, loff_t from,
@@ -138,7 +132,7 @@ static int part_read_oob(struct mtd_info *mtd, loff_t from,
138 return -EINVAL; 132 return -EINVAL;
139 } 133 }
140 134
141 res = mtd_read_oob(part->master, from + part->offset, ops); 135 res = part->master->_read_oob(part->master, from + part->offset, ops);
142 if (unlikely(res)) { 136 if (unlikely(res)) {
143 if (mtd_is_bitflip(res)) 137 if (mtd_is_bitflip(res))
144 mtd->ecc_stats.corrected++; 138 mtd->ecc_stats.corrected++;
@@ -152,55 +146,46 @@ static int part_read_user_prot_reg(struct mtd_info *mtd, loff_t from,
152 size_t len, size_t *retlen, u_char *buf) 146 size_t len, size_t *retlen, u_char *buf)
153{ 147{
154 struct mtd_part *part = PART(mtd); 148 struct mtd_part *part = PART(mtd);
155 return mtd_read_user_prot_reg(part->master, from, len, retlen, buf); 149 return part->master->_read_user_prot_reg(part->master, from, len,
150 retlen, buf);
156} 151}
157 152
158static int part_get_user_prot_info(struct mtd_info *mtd, 153static int part_get_user_prot_info(struct mtd_info *mtd,
159 struct otp_info *buf, size_t len) 154 struct otp_info *buf, size_t len)
160{ 155{
161 struct mtd_part *part = PART(mtd); 156 struct mtd_part *part = PART(mtd);
162 return mtd_get_user_prot_info(part->master, buf, len); 157 return part->master->_get_user_prot_info(part->master, buf, len);
163} 158}
164 159
165static int part_read_fact_prot_reg(struct mtd_info *mtd, loff_t from, 160static int part_read_fact_prot_reg(struct mtd_info *mtd, loff_t from,
166 size_t len, size_t *retlen, u_char *buf) 161 size_t len, size_t *retlen, u_char *buf)
167{ 162{
168 struct mtd_part *part = PART(mtd); 163 struct mtd_part *part = PART(mtd);
169 return mtd_read_fact_prot_reg(part->master, from, len, retlen, buf); 164 return part->master->_read_fact_prot_reg(part->master, from, len,
165 retlen, buf);
170} 166}
171 167
172static int part_get_fact_prot_info(struct mtd_info *mtd, struct otp_info *buf, 168static int part_get_fact_prot_info(struct mtd_info *mtd, struct otp_info *buf,
173 size_t len) 169 size_t len)
174{ 170{
175 struct mtd_part *part = PART(mtd); 171 struct mtd_part *part = PART(mtd);
176 return mtd_get_fact_prot_info(part->master, buf, len); 172 return part->master->_get_fact_prot_info(part->master, buf, len);
177} 173}
178 174
179static int part_write(struct mtd_info *mtd, loff_t to, size_t len, 175static int part_write(struct mtd_info *mtd, loff_t to, size_t len,
180 size_t *retlen, const u_char *buf) 176 size_t *retlen, const u_char *buf)
181{ 177{
182 struct mtd_part *part = PART(mtd); 178 struct mtd_part *part = PART(mtd);
183 if (!(mtd->flags & MTD_WRITEABLE)) 179 return part->master->_write(part->master, to + part->offset, len,
184 return -EROFS; 180 retlen, buf);
185 if (to >= mtd->size)
186 len = 0;
187 else if (to + len > mtd->size)
188 len = mtd->size - to;
189 return mtd_write(part->master, to + part->offset, len, retlen, buf);
190} 181}
191 182
192static int part_panic_write(struct mtd_info *mtd, loff_t to, size_t len, 183static int part_panic_write(struct mtd_info *mtd, loff_t to, size_t len,
193 size_t *retlen, const u_char *buf) 184 size_t *retlen, const u_char *buf)
194{ 185{
195 struct mtd_part *part = PART(mtd); 186 struct mtd_part *part = PART(mtd);
196 if (!(mtd->flags & MTD_WRITEABLE)) 187 return part->master->_panic_write(part->master, to + part->offset, len,
197 return -EROFS; 188 retlen, buf);
198 if (to >= mtd->size)
199 len = 0;
200 else if (to + len > mtd->size)
201 len = mtd->size - to;
202 return mtd_panic_write(part->master, to + part->offset, len, retlen,
203 buf);
204} 189}
205 190
206static int part_write_oob(struct mtd_info *mtd, loff_t to, 191static int part_write_oob(struct mtd_info *mtd, loff_t to,
@@ -208,50 +193,43 @@ static int part_write_oob(struct mtd_info *mtd, loff_t to,
208{ 193{
209 struct mtd_part *part = PART(mtd); 194 struct mtd_part *part = PART(mtd);
210 195
211 if (!(mtd->flags & MTD_WRITEABLE))
212 return -EROFS;
213
214 if (to >= mtd->size) 196 if (to >= mtd->size)
215 return -EINVAL; 197 return -EINVAL;
216 if (ops->datbuf && to + ops->len > mtd->size) 198 if (ops->datbuf && to + ops->len > mtd->size)
217 return -EINVAL; 199 return -EINVAL;
218 return mtd_write_oob(part->master, to + part->offset, ops); 200 return part->master->_write_oob(part->master, to + part->offset, ops);
219} 201}
220 202
221static int part_write_user_prot_reg(struct mtd_info *mtd, loff_t from, 203static int part_write_user_prot_reg(struct mtd_info *mtd, loff_t from,
222 size_t len, size_t *retlen, u_char *buf) 204 size_t len, size_t *retlen, u_char *buf)
223{ 205{
224 struct mtd_part *part = PART(mtd); 206 struct mtd_part *part = PART(mtd);
225 return mtd_write_user_prot_reg(part->master, from, len, retlen, buf); 207 return part->master->_write_user_prot_reg(part->master, from, len,
208 retlen, buf);
226} 209}
227 210
228static int part_lock_user_prot_reg(struct mtd_info *mtd, loff_t from, 211static int part_lock_user_prot_reg(struct mtd_info *mtd, loff_t from,
229 size_t len) 212 size_t len)
230{ 213{
231 struct mtd_part *part = PART(mtd); 214 struct mtd_part *part = PART(mtd);
232 return mtd_lock_user_prot_reg(part->master, from, len); 215 return part->master->_lock_user_prot_reg(part->master, from, len);
233} 216}
234 217
235static int part_writev(struct mtd_info *mtd, const struct kvec *vecs, 218static int part_writev(struct mtd_info *mtd, const struct kvec *vecs,
236 unsigned long count, loff_t to, size_t *retlen) 219 unsigned long count, loff_t to, size_t *retlen)
237{ 220{
238 struct mtd_part *part = PART(mtd); 221 struct mtd_part *part = PART(mtd);
239 if (!(mtd->flags & MTD_WRITEABLE)) 222 return part->master->_writev(part->master, vecs, count,
240 return -EROFS; 223 to + part->offset, retlen);
241 return mtd_writev(part->master, vecs, count, to + part->offset,
242 retlen);
243} 224}
244 225
245static int part_erase(struct mtd_info *mtd, struct erase_info *instr) 226static int part_erase(struct mtd_info *mtd, struct erase_info *instr)
246{ 227{
247 struct mtd_part *part = PART(mtd); 228 struct mtd_part *part = PART(mtd);
248 int ret; 229 int ret;
249 if (!(mtd->flags & MTD_WRITEABLE)) 230
250 return -EROFS;
251 if (instr->addr >= mtd->size)
252 return -EINVAL;
253 instr->addr += part->offset; 231 instr->addr += part->offset;
254 ret = mtd_erase(part->master, instr); 232 ret = part->master->_erase(part->master, instr);
255 if (ret) { 233 if (ret) {
256 if (instr->fail_addr != MTD_FAIL_ADDR_UNKNOWN) 234 if (instr->fail_addr != MTD_FAIL_ADDR_UNKNOWN)
257 instr->fail_addr -= part->offset; 235 instr->fail_addr -= part->offset;
@@ -262,7 +240,7 @@ static int part_erase(struct mtd_info *mtd, struct erase_info *instr)
262 240
263void mtd_erase_callback(struct erase_info *instr) 241void mtd_erase_callback(struct erase_info *instr)
264{ 242{
265 if (instr->mtd->erase == part_erase) { 243 if (instr->mtd->_erase == part_erase) {
266 struct mtd_part *part = PART(instr->mtd); 244 struct mtd_part *part = PART(instr->mtd);
267 245
268 if (instr->fail_addr != MTD_FAIL_ADDR_UNKNOWN) 246 if (instr->fail_addr != MTD_FAIL_ADDR_UNKNOWN)
@@ -277,52 +255,44 @@ EXPORT_SYMBOL_GPL(mtd_erase_callback);
277static int part_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len) 255static int part_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
278{ 256{
279 struct mtd_part *part = PART(mtd); 257 struct mtd_part *part = PART(mtd);
280 if ((len + ofs) > mtd->size) 258 return part->master->_lock(part->master, ofs + part->offset, len);
281 return -EINVAL;
282 return mtd_lock(part->master, ofs + part->offset, len);
283} 259}
284 260
285static int part_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len) 261static int part_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
286{ 262{
287 struct mtd_part *part = PART(mtd); 263 struct mtd_part *part = PART(mtd);
288 if ((len + ofs) > mtd->size) 264 return part->master->_unlock(part->master, ofs + part->offset, len);
289 return -EINVAL;
290 return mtd_unlock(part->master, ofs + part->offset, len);
291} 265}
292 266
293static int part_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len) 267static int part_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len)
294{ 268{
295 struct mtd_part *part = PART(mtd); 269 struct mtd_part *part = PART(mtd);
296 if ((len + ofs) > mtd->size) 270 return part->master->_is_locked(part->master, ofs + part->offset, len);
297 return -EINVAL;
298 return mtd_is_locked(part->master, ofs + part->offset, len);
299} 271}
300 272
301static void part_sync(struct mtd_info *mtd) 273static void part_sync(struct mtd_info *mtd)
302{ 274{
303 struct mtd_part *part = PART(mtd); 275 struct mtd_part *part = PART(mtd);
304 mtd_sync(part->master); 276 part->master->_sync(part->master);
305} 277}
306 278
307static int part_suspend(struct mtd_info *mtd) 279static int part_suspend(struct mtd_info *mtd)
308{ 280{
309 struct mtd_part *part = PART(mtd); 281 struct mtd_part *part = PART(mtd);
310 return mtd_suspend(part->master); 282 return part->master->_suspend(part->master);
311} 283}
312 284
313static void part_resume(struct mtd_info *mtd) 285static void part_resume(struct mtd_info *mtd)
314{ 286{
315 struct mtd_part *part = PART(mtd); 287 struct mtd_part *part = PART(mtd);
316 mtd_resume(part->master); 288 part->master->_resume(part->master);
317} 289}
318 290
319static int part_block_isbad(struct mtd_info *mtd, loff_t ofs) 291static int part_block_isbad(struct mtd_info *mtd, loff_t ofs)
320{ 292{
321 struct mtd_part *part = PART(mtd); 293 struct mtd_part *part = PART(mtd);
322 if (ofs >= mtd->size)
323 return -EINVAL;
324 ofs += part->offset; 294 ofs += part->offset;
325 return mtd_block_isbad(part->master, ofs); 295 return part->master->_block_isbad(part->master, ofs);
326} 296}
327 297
328static int part_block_markbad(struct mtd_info *mtd, loff_t ofs) 298static int part_block_markbad(struct mtd_info *mtd, loff_t ofs)
@@ -330,12 +300,8 @@ static int part_block_markbad(struct mtd_info *mtd, loff_t ofs)
330 struct mtd_part *part = PART(mtd); 300 struct mtd_part *part = PART(mtd);
331 int res; 301 int res;
332 302
333 if (!(mtd->flags & MTD_WRITEABLE))
334 return -EROFS;
335 if (ofs >= mtd->size)
336 return -EINVAL;
337 ofs += part->offset; 303 ofs += part->offset;
338 res = mtd_block_markbad(part->master, ofs); 304 res = part->master->_block_markbad(part->master, ofs);
339 if (!res) 305 if (!res)
340 mtd->ecc_stats.badblocks++; 306 mtd->ecc_stats.badblocks++;
341 return res; 307 return res;
@@ -410,54 +376,55 @@ static struct mtd_part *allocate_partition(struct mtd_info *master,
410 */ 376 */
411 slave->mtd.dev.parent = master->dev.parent; 377 slave->mtd.dev.parent = master->dev.parent;
412 378
413 slave->mtd.read = part_read; 379 slave->mtd._read = part_read;
414 slave->mtd.write = part_write; 380 slave->mtd._write = part_write;
415 381
416 if (master->panic_write) 382 if (master->_panic_write)
417 slave->mtd.panic_write = part_panic_write; 383 slave->mtd._panic_write = part_panic_write;
418 384
419 if (master->point && master->unpoint) { 385 if (master->_point && master->_unpoint) {
420 slave->mtd.point = part_point; 386 slave->mtd._point = part_point;
421 slave->mtd.unpoint = part_unpoint; 387 slave->mtd._unpoint = part_unpoint;
422 } 388 }
423 389
424 if (master->get_unmapped_area) 390 if (master->_get_unmapped_area)
425 slave->mtd.get_unmapped_area = part_get_unmapped_area; 391 slave->mtd._get_unmapped_area = part_get_unmapped_area;
426 if (master->read_oob) 392 if (master->_read_oob)
427 slave->mtd.read_oob = part_read_oob; 393 slave->mtd._read_oob = part_read_oob;
428 if (master->write_oob) 394 if (master->_write_oob)
429 slave->mtd.write_oob = part_write_oob; 395 slave->mtd._write_oob = part_write_oob;
430 if (master->read_user_prot_reg) 396 if (master->_read_user_prot_reg)
431 slave->mtd.read_user_prot_reg = part_read_user_prot_reg; 397 slave->mtd._read_user_prot_reg = part_read_user_prot_reg;
432 if (master->read_fact_prot_reg) 398 if (master->_read_fact_prot_reg)
433 slave->mtd.read_fact_prot_reg = part_read_fact_prot_reg; 399 slave->mtd._read_fact_prot_reg = part_read_fact_prot_reg;
434 if (master->write_user_prot_reg) 400 if (master->_write_user_prot_reg)
435 slave->mtd.write_user_prot_reg = part_write_user_prot_reg; 401 slave->mtd._write_user_prot_reg = part_write_user_prot_reg;
436 if (master->lock_user_prot_reg) 402 if (master->_lock_user_prot_reg)
437 slave->mtd.lock_user_prot_reg = part_lock_user_prot_reg; 403 slave->mtd._lock_user_prot_reg = part_lock_user_prot_reg;
438 if (master->get_user_prot_info) 404 if (master->_get_user_prot_info)
439 slave->mtd.get_user_prot_info = part_get_user_prot_info; 405 slave->mtd._get_user_prot_info = part_get_user_prot_info;
440 if (master->get_fact_prot_info) 406 if (master->_get_fact_prot_info)
441 slave->mtd.get_fact_prot_info = part_get_fact_prot_info; 407 slave->mtd._get_fact_prot_info = part_get_fact_prot_info;
442 if (master->sync) 408 if (master->_sync)
443 slave->mtd.sync = part_sync; 409 slave->mtd._sync = part_sync;
444 if (!partno && !master->dev.class && master->suspend && master->resume) { 410 if (!partno && !master->dev.class && master->_suspend &&
445 slave->mtd.suspend = part_suspend; 411 master->_resume) {
446 slave->mtd.resume = part_resume; 412 slave->mtd._suspend = part_suspend;
413 slave->mtd._resume = part_resume;
447 } 414 }
448 if (master->writev) 415 if (master->_writev)
449 slave->mtd.writev = part_writev; 416 slave->mtd._writev = part_writev;
450 if (master->lock) 417 if (master->_lock)
451 slave->mtd.lock = part_lock; 418 slave->mtd._lock = part_lock;
452 if (master->unlock) 419 if (master->_unlock)
453 slave->mtd.unlock = part_unlock; 420 slave->mtd._unlock = part_unlock;
454 if (master->is_locked) 421 if (master->_is_locked)
455 slave->mtd.is_locked = part_is_locked; 422 slave->mtd._is_locked = part_is_locked;
456 if (master->block_isbad) 423 if (master->_block_isbad)
457 slave->mtd.block_isbad = part_block_isbad; 424 slave->mtd._block_isbad = part_block_isbad;
458 if (master->block_markbad) 425 if (master->_block_markbad)
459 slave->mtd.block_markbad = part_block_markbad; 426 slave->mtd._block_markbad = part_block_markbad;
460 slave->mtd.erase = part_erase; 427 slave->mtd._erase = part_erase;
461 slave->master = master; 428 slave->master = master;
462 slave->offset = part->offset; 429 slave->offset = part->offset;
463 430
@@ -549,7 +516,8 @@ static struct mtd_part *allocate_partition(struct mtd_info *master,
549 } 516 }
550 517
551 slave->mtd.ecclayout = master->ecclayout; 518 slave->mtd.ecclayout = master->ecclayout;
552 if (master->block_isbad) { 519 slave->mtd.ecc_strength = master->ecc_strength;
520 if (master->_block_isbad) {
553 uint64_t offs = 0; 521 uint64_t offs = 0;
554 522
555 while (offs < slave->mtd.size) { 523 while (offs < slave->mtd.size) {
@@ -761,7 +729,7 @@ int parse_mtd_partitions(struct mtd_info *master, const char **types,
761 for ( ; ret <= 0 && *types; types++) { 729 for ( ; ret <= 0 && *types; types++) {
762 parser = get_partition_parser(*types); 730 parser = get_partition_parser(*types);
763 if (!parser && !request_module("%s", *types)) 731 if (!parser && !request_module("%s", *types))
764 parser = get_partition_parser(*types); 732 parser = get_partition_parser(*types);
765 if (!parser) 733 if (!parser)
766 continue; 734 continue;
767 ret = (*parser->parse_fn)(master, pparts, data); 735 ret = (*parser->parse_fn)(master, pparts, data);
diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig
index a3c4de551ebe..7d17cecad69d 100644
--- a/drivers/mtd/nand/Kconfig
+++ b/drivers/mtd/nand/Kconfig
@@ -314,6 +314,26 @@ config MTD_NAND_DISKONCHIP_BBTWRITE
314 load time (assuming you build diskonchip as a module) with the module 314 load time (assuming you build diskonchip as a module) with the module
315 parameter "inftl_bbt_write=1". 315 parameter "inftl_bbt_write=1".
316 316
317config MTD_NAND_DOCG4
318 tristate "Support for DiskOnChip G4 (EXPERIMENTAL)"
319 depends on EXPERIMENTAL
320 select BCH
321 select BITREVERSE
322 help
323 Support for diskonchip G4 nand flash, found in various smartphones and
324 PDAs, among them the Palm Treo680, HTC Prophet and Wizard, Toshiba
325 Portege G900, Asus P526, and O2 XDA Zinc.
326
327 With this driver you will be able to use UBI and create a ubifs on the
328 device, so you may wish to consider enabling UBI and UBIFS as well.
329
330 These devices ship with the Mys/Sandisk SAFTL formatting, for which
331 there is currently no mtd parser, so you may want to use command line
332 partitioning to segregate write-protected blocks. On the Treo680, the
333 first five erase blocks (256KiB each) are write-protected, followed
334 by the block containing the saftl partition table. This is probably
335 typical.
336
317config MTD_NAND_SHARPSL 337config MTD_NAND_SHARPSL
318 tristate "Support for NAND Flash on Sharp SL Series (C7xx + others)" 338 tristate "Support for NAND Flash on Sharp SL Series (C7xx + others)"
319 depends on ARCH_PXA 339 depends on ARCH_PXA
@@ -421,7 +441,6 @@ config MTD_NAND_NANDSIM
421config MTD_NAND_GPMI_NAND 441config MTD_NAND_GPMI_NAND
422 bool "GPMI NAND Flash Controller driver" 442 bool "GPMI NAND Flash Controller driver"
423 depends on MTD_NAND && (SOC_IMX23 || SOC_IMX28) 443 depends on MTD_NAND && (SOC_IMX23 || SOC_IMX28)
424 select MTD_CMDLINE_PARTS
425 help 444 help
426 Enables NAND Flash support for IMX23 or IMX28. 445 Enables NAND Flash support for IMX23 or IMX28.
427 The GPMI controller is very powerful, with the help of BCH 446 The GPMI controller is very powerful, with the help of BCH
diff --git a/drivers/mtd/nand/Makefile b/drivers/mtd/nand/Makefile
index 19bc8cb1d187..d4b4d8739bd8 100644
--- a/drivers/mtd/nand/Makefile
+++ b/drivers/mtd/nand/Makefile
@@ -19,6 +19,7 @@ obj-$(CONFIG_MTD_NAND_PPCHAMELEONEVB) += ppchameleonevb.o
19obj-$(CONFIG_MTD_NAND_S3C2410) += s3c2410.o 19obj-$(CONFIG_MTD_NAND_S3C2410) += s3c2410.o
20obj-$(CONFIG_MTD_NAND_DAVINCI) += davinci_nand.o 20obj-$(CONFIG_MTD_NAND_DAVINCI) += davinci_nand.o
21obj-$(CONFIG_MTD_NAND_DISKONCHIP) += diskonchip.o 21obj-$(CONFIG_MTD_NAND_DISKONCHIP) += diskonchip.o
22obj-$(CONFIG_MTD_NAND_DOCG4) += docg4.o
22obj-$(CONFIG_MTD_NAND_FSMC) += fsmc_nand.o 23obj-$(CONFIG_MTD_NAND_FSMC) += fsmc_nand.o
23obj-$(CONFIG_MTD_NAND_H1900) += h1910.o 24obj-$(CONFIG_MTD_NAND_H1900) += h1910.o
24obj-$(CONFIG_MTD_NAND_RTC_FROM4) += rtc_from4.o 25obj-$(CONFIG_MTD_NAND_RTC_FROM4) += rtc_from4.o
diff --git a/drivers/mtd/nand/alauda.c b/drivers/mtd/nand/alauda.c
index 6a5ff64a139e..4f20e1d8bef1 100644
--- a/drivers/mtd/nand/alauda.c
+++ b/drivers/mtd/nand/alauda.c
@@ -585,12 +585,13 @@ static int alauda_init_media(struct alauda *al)
585 mtd->writesize = 1<<card->pageshift; 585 mtd->writesize = 1<<card->pageshift;
586 mtd->type = MTD_NANDFLASH; 586 mtd->type = MTD_NANDFLASH;
587 mtd->flags = MTD_CAP_NANDFLASH; 587 mtd->flags = MTD_CAP_NANDFLASH;
588 mtd->read = alauda_read; 588 mtd->_read = alauda_read;
589 mtd->write = alauda_write; 589 mtd->_write = alauda_write;
590 mtd->erase = alauda_erase; 590 mtd->_erase = alauda_erase;
591 mtd->block_isbad = alauda_isbad; 591 mtd->_block_isbad = alauda_isbad;
592 mtd->priv = al; 592 mtd->priv = al;
593 mtd->owner = THIS_MODULE; 593 mtd->owner = THIS_MODULE;
594 mtd->ecc_strength = 1;
594 595
595 err = mtd_device_register(mtd, NULL, 0); 596 err = mtd_device_register(mtd, NULL, 0);
596 if (err) { 597 if (err) {
diff --git a/drivers/mtd/nand/atmel_nand.c b/drivers/mtd/nand/atmel_nand.c
index ae7e37d9ac17..2165576a1c67 100644
--- a/drivers/mtd/nand/atmel_nand.c
+++ b/drivers/mtd/nand/atmel_nand.c
@@ -603,6 +603,7 @@ static int __init atmel_nand_probe(struct platform_device *pdev)
603 nand_chip->ecc.hwctl = atmel_nand_hwctl; 603 nand_chip->ecc.hwctl = atmel_nand_hwctl;
604 nand_chip->ecc.read_page = atmel_nand_read_page; 604 nand_chip->ecc.read_page = atmel_nand_read_page;
605 nand_chip->ecc.bytes = 4; 605 nand_chip->ecc.bytes = 4;
606 nand_chip->ecc.strength = 1;
606 } 607 }
607 608
608 nand_chip->chip_delay = 20; /* 20us command delay time */ 609 nand_chip->chip_delay = 20; /* 20us command delay time */
diff --git a/drivers/mtd/nand/bcm_umi_nand.c b/drivers/mtd/nand/bcm_umi_nand.c
index 64c9cbaf86a1..6908cdde3065 100644
--- a/drivers/mtd/nand/bcm_umi_nand.c
+++ b/drivers/mtd/nand/bcm_umi_nand.c
@@ -475,6 +475,14 @@ static int __devinit bcm_umi_nand_probe(struct platform_device *pdev)
475 largepage_bbt.options = NAND_BBT_SCAN2NDPAGE; 475 largepage_bbt.options = NAND_BBT_SCAN2NDPAGE;
476 this->badblock_pattern = &largepage_bbt; 476 this->badblock_pattern = &largepage_bbt;
477 } 477 }
478
479 /*
480 * FIXME: ecc strength value of 6 bits per 512 bytes of data is a
481 * conservative guess, given 13 ecc bytes and using bch alg.
482 * (Assume Galois field order m=15 to allow a margin of error.)
483 */
484 this->ecc.strength = 6;
485
478#endif 486#endif
479 487
480 /* Now finish off the scan, now that ecc.layout has been initialized. */ 488 /* Now finish off the scan, now that ecc.layout has been initialized. */
@@ -487,7 +495,7 @@ static int __devinit bcm_umi_nand_probe(struct platform_device *pdev)
487 495
488 /* Register the partitions */ 496 /* Register the partitions */
489 board_mtd->name = "bcm_umi-nand"; 497 board_mtd->name = "bcm_umi-nand";
490 mtd_device_parse_register(board_mtd, NULL, 0, NULL, 0); 498 mtd_device_parse_register(board_mtd, NULL, NULL, NULL, 0);
491 499
492 /* Return happy */ 500 /* Return happy */
493 return 0; 501 return 0;
diff --git a/drivers/mtd/nand/bf5xx_nand.c b/drivers/mtd/nand/bf5xx_nand.c
index dd899cb5d366..d7b86b925de5 100644
--- a/drivers/mtd/nand/bf5xx_nand.c
+++ b/drivers/mtd/nand/bf5xx_nand.c
@@ -702,9 +702,11 @@ static int bf5xx_nand_scan(struct mtd_info *mtd)
702 if (likely(mtd->writesize >= 512)) { 702 if (likely(mtd->writesize >= 512)) {
703 chip->ecc.size = 512; 703 chip->ecc.size = 512;
704 chip->ecc.bytes = 6; 704 chip->ecc.bytes = 6;
705 chip->ecc.strength = 2;
705 } else { 706 } else {
706 chip->ecc.size = 256; 707 chip->ecc.size = 256;
707 chip->ecc.bytes = 3; 708 chip->ecc.bytes = 3;
709 chip->ecc.strength = 1;
708 bfin_write_NFC_CTL(bfin_read_NFC_CTL() & ~(1 << NFC_PG_SIZE_OFFSET)); 710 bfin_write_NFC_CTL(bfin_read_NFC_CTL() & ~(1 << NFC_PG_SIZE_OFFSET));
709 SSYNC(); 711 SSYNC();
710 } 712 }
diff --git a/drivers/mtd/nand/cafe_nand.c b/drivers/mtd/nand/cafe_nand.c
index 72d3f23490c5..2a96e1a12062 100644
--- a/drivers/mtd/nand/cafe_nand.c
+++ b/drivers/mtd/nand/cafe_nand.c
@@ -783,6 +783,7 @@ static int __devinit cafe_nand_probe(struct pci_dev *pdev,
783 cafe->nand.ecc.mode = NAND_ECC_HW_SYNDROME; 783 cafe->nand.ecc.mode = NAND_ECC_HW_SYNDROME;
784 cafe->nand.ecc.size = mtd->writesize; 784 cafe->nand.ecc.size = mtd->writesize;
785 cafe->nand.ecc.bytes = 14; 785 cafe->nand.ecc.bytes = 14;
786 cafe->nand.ecc.strength = 4;
786 cafe->nand.ecc.hwctl = (void *)cafe_nand_bug; 787 cafe->nand.ecc.hwctl = (void *)cafe_nand_bug;
787 cafe->nand.ecc.calculate = (void *)cafe_nand_bug; 788 cafe->nand.ecc.calculate = (void *)cafe_nand_bug;
788 cafe->nand.ecc.correct = (void *)cafe_nand_bug; 789 cafe->nand.ecc.correct = (void *)cafe_nand_bug;
@@ -799,7 +800,7 @@ static int __devinit cafe_nand_probe(struct pci_dev *pdev,
799 pci_set_drvdata(pdev, mtd); 800 pci_set_drvdata(pdev, mtd);
800 801
801 mtd->name = "cafe_nand"; 802 mtd->name = "cafe_nand";
802 mtd_device_parse_register(mtd, part_probes, 0, NULL, 0); 803 mtd_device_parse_register(mtd, part_probes, NULL, NULL, 0);
803 804
804 goto out; 805 goto out;
805 806
diff --git a/drivers/mtd/nand/cmx270_nand.c b/drivers/mtd/nand/cmx270_nand.c
index 737ef9a04fdb..1024bfc05c86 100644
--- a/drivers/mtd/nand/cmx270_nand.c
+++ b/drivers/mtd/nand/cmx270_nand.c
@@ -219,7 +219,7 @@ static int __init cmx270_init(void)
219 } 219 }
220 220
221 /* Register the partitions */ 221 /* Register the partitions */
222 ret = mtd_device_parse_register(cmx270_nand_mtd, NULL, 0, 222 ret = mtd_device_parse_register(cmx270_nand_mtd, NULL, NULL,
223 partition_info, NUM_PARTITIONS); 223 partition_info, NUM_PARTITIONS);
224 if (ret) 224 if (ret)
225 goto err_scan; 225 goto err_scan;
diff --git a/drivers/mtd/nand/cs553x_nand.c b/drivers/mtd/nand/cs553x_nand.c
index 414afa793563..821c34c62500 100644
--- a/drivers/mtd/nand/cs553x_nand.c
+++ b/drivers/mtd/nand/cs553x_nand.c
@@ -248,6 +248,8 @@ static int __init cs553x_init_one(int cs, int mmio, unsigned long adr)
248 goto out_ior; 248 goto out_ior;
249 } 249 }
250 250
251 this->ecc.strength = 1;
252
251 new_mtd->name = kasprintf(GFP_KERNEL, "cs553x_nand_cs%d", cs); 253 new_mtd->name = kasprintf(GFP_KERNEL, "cs553x_nand_cs%d", cs);
252 254
253 cs553x_mtd[cs] = new_mtd; 255 cs553x_mtd[cs] = new_mtd;
@@ -313,7 +315,7 @@ static int __init cs553x_init(void)
313 for (i = 0; i < NR_CS553X_CONTROLLERS; i++) { 315 for (i = 0; i < NR_CS553X_CONTROLLERS; i++) {
314 if (cs553x_mtd[i]) { 316 if (cs553x_mtd[i]) {
315 /* If any devices registered, return success. Else the last error. */ 317 /* If any devices registered, return success. Else the last error. */
316 mtd_device_parse_register(cs553x_mtd[i], NULL, 0, 318 mtd_device_parse_register(cs553x_mtd[i], NULL, NULL,
317 NULL, 0); 319 NULL, 0);
318 err = 0; 320 err = 0;
319 } 321 }
diff --git a/drivers/mtd/nand/davinci_nand.c b/drivers/mtd/nand/davinci_nand.c
index 6e566156956f..d94b03c207af 100644
--- a/drivers/mtd/nand/davinci_nand.c
+++ b/drivers/mtd/nand/davinci_nand.c
@@ -641,6 +641,7 @@ static int __init nand_davinci_probe(struct platform_device *pdev)
641 info->chip.ecc.bytes = 3; 641 info->chip.ecc.bytes = 3;
642 } 642 }
643 info->chip.ecc.size = 512; 643 info->chip.ecc.size = 512;
644 info->chip.ecc.strength = pdata->ecc_bits;
644 break; 645 break;
645 default: 646 default:
646 ret = -EINVAL; 647 ret = -EINVAL;
@@ -752,8 +753,8 @@ syndrome_done:
752 if (ret < 0) 753 if (ret < 0)
753 goto err_scan; 754 goto err_scan;
754 755
755 ret = mtd_device_parse_register(&info->mtd, NULL, 0, 756 ret = mtd_device_parse_register(&info->mtd, NULL, NULL, pdata->parts,
756 pdata->parts, pdata->nr_parts); 757 pdata->nr_parts);
757 758
758 if (ret < 0) 759 if (ret < 0)
759 goto err_scan; 760 goto err_scan;
diff --git a/drivers/mtd/nand/denali.c b/drivers/mtd/nand/denali.c
index 3984d488f9ab..a9e57d686297 100644
--- a/drivers/mtd/nand/denali.c
+++ b/drivers/mtd/nand/denali.c
@@ -1590,6 +1590,7 @@ static int denali_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
1590 ECC_15BITS * (denali->mtd.writesize / 1590 ECC_15BITS * (denali->mtd.writesize /
1591 ECC_SECTOR_SIZE)))) { 1591 ECC_SECTOR_SIZE)))) {
1592 /* if MLC OOB size is large enough, use 15bit ECC*/ 1592 /* if MLC OOB size is large enough, use 15bit ECC*/
1593 denali->nand.ecc.strength = 15;
1593 denali->nand.ecc.layout = &nand_15bit_oob; 1594 denali->nand.ecc.layout = &nand_15bit_oob;
1594 denali->nand.ecc.bytes = ECC_15BITS; 1595 denali->nand.ecc.bytes = ECC_15BITS;
1595 iowrite32(15, denali->flash_reg + ECC_CORRECTION); 1596 iowrite32(15, denali->flash_reg + ECC_CORRECTION);
@@ -1600,12 +1601,14 @@ static int denali_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
1600 " contain 8bit ECC correction codes"); 1601 " contain 8bit ECC correction codes");
1601 goto failed_req_irq; 1602 goto failed_req_irq;
1602 } else { 1603 } else {
1604 denali->nand.ecc.strength = 8;
1603 denali->nand.ecc.layout = &nand_8bit_oob; 1605 denali->nand.ecc.layout = &nand_8bit_oob;
1604 denali->nand.ecc.bytes = ECC_8BITS; 1606 denali->nand.ecc.bytes = ECC_8BITS;
1605 iowrite32(8, denali->flash_reg + ECC_CORRECTION); 1607 iowrite32(8, denali->flash_reg + ECC_CORRECTION);
1606 } 1608 }
1607 1609
1608 denali->nand.ecc.bytes *= denali->devnum; 1610 denali->nand.ecc.bytes *= denali->devnum;
1611 denali->nand.ecc.strength *= denali->devnum;
1609 denali->nand.ecc.layout->eccbytes *= 1612 denali->nand.ecc.layout->eccbytes *=
1610 denali->mtd.writesize / ECC_SECTOR_SIZE; 1613 denali->mtd.writesize / ECC_SECTOR_SIZE;
1611 denali->nand.ecc.layout->oobfree[0].offset = 1614 denali->nand.ecc.layout->oobfree[0].offset =
diff --git a/drivers/mtd/nand/diskonchip.c b/drivers/mtd/nand/diskonchip.c
index df921e7a496c..e2ca067631cf 100644
--- a/drivers/mtd/nand/diskonchip.c
+++ b/drivers/mtd/nand/diskonchip.c
@@ -1653,6 +1653,7 @@ static int __init doc_probe(unsigned long physadr)
1653 nand->ecc.mode = NAND_ECC_HW_SYNDROME; 1653 nand->ecc.mode = NAND_ECC_HW_SYNDROME;
1654 nand->ecc.size = 512; 1654 nand->ecc.size = 512;
1655 nand->ecc.bytes = 6; 1655 nand->ecc.bytes = 6;
1656 nand->ecc.strength = 2;
1656 nand->bbt_options = NAND_BBT_USE_FLASH; 1657 nand->bbt_options = NAND_BBT_USE_FLASH;
1657 1658
1658 doc->physadr = physadr; 1659 doc->physadr = physadr;
diff --git a/drivers/mtd/nand/docg4.c b/drivers/mtd/nand/docg4.c
new file mode 100644
index 000000000000..b08202664543
--- /dev/null
+++ b/drivers/mtd/nand/docg4.c
@@ -0,0 +1,1377 @@
1/*
2 * Copyright © 2012 Mike Dunn <mikedunn@newsguy.com>
3 *
4 * mtd nand driver for M-Systems DiskOnChip G4
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * Tested on the Palm Treo 680. The G4 is also present on Toshiba Portege, Asus
12 * P526, some HTC smartphones (Wizard, Prophet, ...), O2 XDA Zinc, maybe others.
13 * Should work on these as well. Let me know!
14 *
15 * TODO:
16 *
17 * Mechanism for management of password-protected areas
18 *
19 * Hamming ecc when reading oob only
20 *
21 * According to the M-Sys documentation, this device is also available in a
22 * "dual-die" configuration having a 256MB capacity, but no mechanism for
23 * detecting this variant is documented. Currently this driver assumes 128MB
24 * capacity.
25 *
26 * Support for multiple cascaded devices ("floors"). Not sure which gadgets
27 * contain multiple G4s in a cascaded configuration, if any.
28 *
29 */
30
31#include <linux/kernel.h>
32#include <linux/slab.h>
33#include <linux/init.h>
34#include <linux/string.h>
35#include <linux/sched.h>
36#include <linux/delay.h>
37#include <linux/module.h>
38#include <linux/export.h>
39#include <linux/platform_device.h>
40#include <linux/io.h>
41#include <linux/bitops.h>
42#include <linux/mtd/partitions.h>
43#include <linux/mtd/mtd.h>
44#include <linux/mtd/nand.h>
45#include <linux/bch.h>
46#include <linux/bitrev.h>
47
48/*
49 * You'll want to ignore badblocks if you're reading a partition that contains
50 * data written by the TrueFFS library (i.e., by PalmOS, Windows, etc), since
51 * it does not use mtd nand's method for marking bad blocks (using oob area).
52 * This will also skip the check of the "page written" flag.
53 */
54static bool ignore_badblocks;
55module_param(ignore_badblocks, bool, 0);
56MODULE_PARM_DESC(ignore_badblocks, "no badblock checking performed");
57
58struct docg4_priv {
59 struct mtd_info *mtd;
60 struct device *dev;
61 void __iomem *virtadr;
62 int status;
63 struct {
64 unsigned int command;
65 int column;
66 int page;
67 } last_command;
68 uint8_t oob_buf[16];
69 uint8_t ecc_buf[7];
70 int oob_page;
71 struct bch_control *bch;
72};
73
74/*
75 * Defines prefixed with DOCG4 are unique to the diskonchip G4. All others are
76 * shared with other diskonchip devices (P3, G3 at least).
77 *
78 * Functions with names prefixed with docg4_ are mtd / nand interface functions
79 * (though they may also be called internally). All others are internal.
80 */
81
82#define DOC_IOSPACE_DATA 0x0800
83
84/* register offsets */
85#define DOC_CHIPID 0x1000
86#define DOC_DEVICESELECT 0x100a
87#define DOC_ASICMODE 0x100c
88#define DOC_DATAEND 0x101e
89#define DOC_NOP 0x103e
90
91#define DOC_FLASHSEQUENCE 0x1032
92#define DOC_FLASHCOMMAND 0x1034
93#define DOC_FLASHADDRESS 0x1036
94#define DOC_FLASHCONTROL 0x1038
95#define DOC_ECCCONF0 0x1040
96#define DOC_ECCCONF1 0x1042
97#define DOC_HAMMINGPARITY 0x1046
98#define DOC_BCH_SYNDROM(idx) (0x1048 + idx)
99
100#define DOC_ASICMODECONFIRM 0x1072
101#define DOC_CHIPID_INV 0x1074
102#define DOC_POWERMODE 0x107c
103
104#define DOCG4_MYSTERY_REG 0x1050
105
106/* apparently used only to write oob bytes 6 and 7 */
107#define DOCG4_OOB_6_7 0x1052
108
109/* DOC_FLASHSEQUENCE register commands */
110#define DOC_SEQ_RESET 0x00
111#define DOCG4_SEQ_PAGE_READ 0x03
112#define DOCG4_SEQ_FLUSH 0x29
113#define DOCG4_SEQ_PAGEWRITE 0x16
114#define DOCG4_SEQ_PAGEPROG 0x1e
115#define DOCG4_SEQ_BLOCKERASE 0x24
116
117/* DOC_FLASHCOMMAND register commands */
118#define DOCG4_CMD_PAGE_READ 0x00
119#define DOC_CMD_ERASECYCLE2 0xd0
120#define DOCG4_CMD_FLUSH 0x70
121#define DOCG4_CMD_READ2 0x30
122#define DOC_CMD_PROG_BLOCK_ADDR 0x60
123#define DOCG4_CMD_PAGEWRITE 0x80
124#define DOC_CMD_PROG_CYCLE2 0x10
125#define DOC_CMD_RESET 0xff
126
127/* DOC_POWERMODE register bits */
128#define DOC_POWERDOWN_READY 0x80
129
130/* DOC_FLASHCONTROL register bits */
131#define DOC_CTRL_CE 0x10
132#define DOC_CTRL_UNKNOWN 0x40
133#define DOC_CTRL_FLASHREADY 0x01
134
135/* DOC_ECCCONF0 register bits */
136#define DOC_ECCCONF0_READ_MODE 0x8000
137#define DOC_ECCCONF0_UNKNOWN 0x2000
138#define DOC_ECCCONF0_ECC_ENABLE 0x1000
139#define DOC_ECCCONF0_DATA_BYTES_MASK 0x07ff
140
141/* DOC_ECCCONF1 register bits */
142#define DOC_ECCCONF1_BCH_SYNDROM_ERR 0x80
143#define DOC_ECCCONF1_ECC_ENABLE 0x07
144#define DOC_ECCCONF1_PAGE_IS_WRITTEN 0x20
145
146/* DOC_ASICMODE register bits */
147#define DOC_ASICMODE_RESET 0x00
148#define DOC_ASICMODE_NORMAL 0x01
149#define DOC_ASICMODE_POWERDOWN 0x02
150#define DOC_ASICMODE_MDWREN 0x04
151#define DOC_ASICMODE_BDETCT_RESET 0x08
152#define DOC_ASICMODE_RSTIN_RESET 0x10
153#define DOC_ASICMODE_RAM_WE 0x20
154
155/* good status values read after read/write/erase operations */
156#define DOCG4_PROGSTATUS_GOOD 0x51
157#define DOCG4_PROGSTATUS_GOOD_2 0xe0
158
159/*
160 * On read operations (page and oob-only), the first byte read from I/O reg is a
161 * status. On error, it reads 0x73; otherwise, it reads either 0x71 (first read
162 * after reset only) or 0x51, so bit 1 is presumed to be an error indicator.
163 */
164#define DOCG4_READ_ERROR 0x02 /* bit 1 indicates read error */
165
166/* anatomy of the device */
167#define DOCG4_CHIP_SIZE 0x8000000
168#define DOCG4_PAGE_SIZE 0x200
169#define DOCG4_PAGES_PER_BLOCK 0x200
170#define DOCG4_BLOCK_SIZE (DOCG4_PAGES_PER_BLOCK * DOCG4_PAGE_SIZE)
171#define DOCG4_NUMBLOCKS (DOCG4_CHIP_SIZE / DOCG4_BLOCK_SIZE)
172#define DOCG4_OOB_SIZE 0x10
173#define DOCG4_CHIP_SHIFT 27 /* log_2(DOCG4_CHIP_SIZE) */
174#define DOCG4_PAGE_SHIFT 9 /* log_2(DOCG4_PAGE_SIZE) */
175#define DOCG4_ERASE_SHIFT 18 /* log_2(DOCG4_BLOCK_SIZE) */
176
177/* all but the last byte is included in ecc calculation */
178#define DOCG4_BCH_SIZE (DOCG4_PAGE_SIZE + DOCG4_OOB_SIZE - 1)
179
180#define DOCG4_USERDATA_LEN 520 /* 512 byte page plus 8 oob avail to user */
181
182/* expected values from the ID registers */
183#define DOCG4_IDREG1_VALUE 0x0400
184#define DOCG4_IDREG2_VALUE 0xfbff
185
186/* primitive polynomial used to build the Galois field used by hw ecc gen */
187#define DOCG4_PRIMITIVE_POLY 0x4443
188
189#define DOCG4_M 14 /* Galois field is of order 2^14 */
190#define DOCG4_T 4 /* BCH alg corrects up to 4 bit errors */
191
192#define DOCG4_FACTORY_BBT_PAGE 16 /* page where read-only factory bbt lives */
193
194/*
195 * Oob bytes 0 - 6 are available to the user.
196 * Byte 7 is hamming ecc for first 7 bytes. Bytes 8 - 14 are hw-generated ecc.
197 * Byte 15 (the last) is used by the driver as a "page written" flag.
198 */
199static struct nand_ecclayout docg4_oobinfo = {
200 .eccbytes = 9,
201 .eccpos = {7, 8, 9, 10, 11, 12, 13, 14, 15},
202 .oobavail = 7,
203 .oobfree = { {0, 7} }
204};
205
206/*
207 * The device has a nop register which M-Sys claims is for the purpose of
208 * inserting precise delays. But beware; at least some operations fail if the
209 * nop writes are replaced with a generic delay!
210 */
211static inline void write_nop(void __iomem *docptr)
212{
213 writew(0, docptr + DOC_NOP);
214}
215
216static void docg4_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
217{
218 int i;
219 struct nand_chip *nand = mtd->priv;
220 uint16_t *p = (uint16_t *) buf;
221 len >>= 1;
222
223 for (i = 0; i < len; i++)
224 p[i] = readw(nand->IO_ADDR_R);
225}
226
227static void docg4_write_buf16(struct mtd_info *mtd, const uint8_t *buf, int len)
228{
229 int i;
230 struct nand_chip *nand = mtd->priv;
231 uint16_t *p = (uint16_t *) buf;
232 len >>= 1;
233
234 for (i = 0; i < len; i++)
235 writew(p[i], nand->IO_ADDR_W);
236}
237
238static int poll_status(struct docg4_priv *doc)
239{
240 /*
241 * Busy-wait for the FLASHREADY bit to be set in the FLASHCONTROL
242 * register. Operations known to take a long time (e.g., block erase)
243 * should sleep for a while before calling this.
244 */
245
246 uint16_t flash_status;
247 unsigned int timeo;
248 void __iomem *docptr = doc->virtadr;
249
250 dev_dbg(doc->dev, "%s...\n", __func__);
251
252 /* hardware quirk requires reading twice initially */
253 flash_status = readw(docptr + DOC_FLASHCONTROL);
254
255 timeo = 1000;
256 do {
257 cpu_relax();
258 flash_status = readb(docptr + DOC_FLASHCONTROL);
259 } while (!(flash_status & DOC_CTRL_FLASHREADY) && --timeo);
260
261
262 if (!timeo) {
263 dev_err(doc->dev, "%s: timed out!\n", __func__);
264 return NAND_STATUS_FAIL;
265 }
266
267 if (unlikely(timeo < 50))
268 dev_warn(doc->dev, "%s: nearly timed out; %d remaining\n",
269 __func__, timeo);
270
271 return 0;
272}
273
274
275static int docg4_wait(struct mtd_info *mtd, struct nand_chip *nand)
276{
277
278 struct docg4_priv *doc = nand->priv;
279 int status = NAND_STATUS_WP; /* inverse logic?? */
280 dev_dbg(doc->dev, "%s...\n", __func__);
281
282 /* report any previously unreported error */
283 if (doc->status) {
284 status |= doc->status;
285 doc->status = 0;
286 return status;
287 }
288
289 status |= poll_status(doc);
290 return status;
291}
292
293static void docg4_select_chip(struct mtd_info *mtd, int chip)
294{
295 /*
296 * Select among multiple cascaded chips ("floors"). Multiple floors are
297 * not yet supported, so the only valid non-negative value is 0.
298 */
299 struct nand_chip *nand = mtd->priv;
300 struct docg4_priv *doc = nand->priv;
301 void __iomem *docptr = doc->virtadr;
302
303 dev_dbg(doc->dev, "%s: chip %d\n", __func__, chip);
304
305 if (chip < 0)
306 return; /* deselected */
307
308 if (chip > 0)
309 dev_warn(doc->dev, "multiple floors currently unsupported\n");
310
311 writew(0, docptr + DOC_DEVICESELECT);
312}
313
314static void reset(struct mtd_info *mtd)
315{
316 /* full device reset */
317
318 struct nand_chip *nand = mtd->priv;
319 struct docg4_priv *doc = nand->priv;
320 void __iomem *docptr = doc->virtadr;
321
322 writew(DOC_ASICMODE_RESET | DOC_ASICMODE_MDWREN,
323 docptr + DOC_ASICMODE);
324 writew(~(DOC_ASICMODE_RESET | DOC_ASICMODE_MDWREN),
325 docptr + DOC_ASICMODECONFIRM);
326 write_nop(docptr);
327
328 writew(DOC_ASICMODE_NORMAL | DOC_ASICMODE_MDWREN,
329 docptr + DOC_ASICMODE);
330 writew(~(DOC_ASICMODE_NORMAL | DOC_ASICMODE_MDWREN),
331 docptr + DOC_ASICMODECONFIRM);
332
333 writew(DOC_ECCCONF1_ECC_ENABLE, docptr + DOC_ECCCONF1);
334
335 poll_status(doc);
336}
337
338static void read_hw_ecc(void __iomem *docptr, uint8_t *ecc_buf)
339{
340 /* read the 7 hw-generated ecc bytes */
341
342 int i;
343 for (i = 0; i < 7; i++) { /* hw quirk; read twice */
344 ecc_buf[i] = readb(docptr + DOC_BCH_SYNDROM(i));
345 ecc_buf[i] = readb(docptr + DOC_BCH_SYNDROM(i));
346 }
347}
348
349static int correct_data(struct mtd_info *mtd, uint8_t *buf, int page)
350{
351 /*
352 * Called after a page read when hardware reports bitflips.
353 * Up to four bitflips can be corrected.
354 */
355
356 struct nand_chip *nand = mtd->priv;
357 struct docg4_priv *doc = nand->priv;
358 void __iomem *docptr = doc->virtadr;
359 int i, numerrs, errpos[4];
360 const uint8_t blank_read_hwecc[8] = {
361 0xcf, 0x72, 0xfc, 0x1b, 0xa9, 0xc7, 0xb9, 0 };
362
363 read_hw_ecc(docptr, doc->ecc_buf); /* read 7 hw-generated ecc bytes */
364
365 /* check if read error is due to a blank page */
366 if (!memcmp(doc->ecc_buf, blank_read_hwecc, 7))
367 return 0; /* yes */
368
369 /* skip additional check of "written flag" if ignore_badblocks */
370 if (ignore_badblocks == false) {
371
372 /*
373 * If the hw ecc bytes are not those of a blank page, there's
374 * still a chance that the page is blank, but was read with
375 * errors. Check the "written flag" in last oob byte, which
376 * is set to zero when a page is written. If more than half
377 * the bits are set, assume a blank page. Unfortunately, the
378 * bit flips(s) are not reported in stats.
379 */
380
381 if (doc->oob_buf[15]) {
382 int bit, numsetbits = 0;
383 unsigned long written_flag = doc->oob_buf[15];
384 for_each_set_bit(bit, &written_flag, 8)
385 numsetbits++;
386 if (numsetbits > 4) { /* assume blank */
387 dev_warn(doc->dev,
388 "error(s) in blank page "
389 "at offset %08x\n",
390 page * DOCG4_PAGE_SIZE);
391 return 0;
392 }
393 }
394 }
395
396 /*
397 * The hardware ecc unit produces oob_ecc ^ calc_ecc. The kernel's bch
398 * algorithm is used to decode this. However the hw operates on page
399 * data in a bit order that is the reverse of that of the bch alg,
400 * requiring that the bits be reversed on the result. Thanks to Ivan
401 * Djelic for his analysis!
402 */
403 for (i = 0; i < 7; i++)
404 doc->ecc_buf[i] = bitrev8(doc->ecc_buf[i]);
405
406 numerrs = decode_bch(doc->bch, NULL, DOCG4_USERDATA_LEN, NULL,
407 doc->ecc_buf, NULL, errpos);
408
409 if (numerrs == -EBADMSG) {
410 dev_warn(doc->dev, "uncorrectable errors at offset %08x\n",
411 page * DOCG4_PAGE_SIZE);
412 return -EBADMSG;
413 }
414
415 BUG_ON(numerrs < 0); /* -EINVAL, or anything other than -EBADMSG */
416
417 /* undo last step in BCH alg (modulo mirroring not needed) */
418 for (i = 0; i < numerrs; i++)
419 errpos[i] = (errpos[i] & ~7)|(7-(errpos[i] & 7));
420
421 /* fix the errors */
422 for (i = 0; i < numerrs; i++) {
423
424 /* ignore if error within oob ecc bytes */
425 if (errpos[i] > DOCG4_USERDATA_LEN * 8)
426 continue;
427
428 /* if error within oob area preceeding ecc bytes... */
429 if (errpos[i] > DOCG4_PAGE_SIZE * 8)
430 change_bit(errpos[i] - DOCG4_PAGE_SIZE * 8,
431 (unsigned long *)doc->oob_buf);
432
433 else /* error in page data */
434 change_bit(errpos[i], (unsigned long *)buf);
435 }
436
437 dev_notice(doc->dev, "%d error(s) corrected at offset %08x\n",
438 numerrs, page * DOCG4_PAGE_SIZE);
439
440 return numerrs;
441}
442
443static uint8_t docg4_read_byte(struct mtd_info *mtd)
444{
445 struct nand_chip *nand = mtd->priv;
446 struct docg4_priv *doc = nand->priv;
447
448 dev_dbg(doc->dev, "%s\n", __func__);
449
450 if (doc->last_command.command == NAND_CMD_STATUS) {
451 int status;
452
453 /*
454 * Previous nand command was status request, so nand
455 * infrastructure code expects to read the status here. If an
456 * error occurred in a previous operation, report it.
457 */
458 doc->last_command.command = 0;
459
460 if (doc->status) {
461 status = doc->status;
462 doc->status = 0;
463 }
464
465 /* why is NAND_STATUS_WP inverse logic?? */
466 else
467 status = NAND_STATUS_WP | NAND_STATUS_READY;
468
469 return status;
470 }
471
472 dev_warn(doc->dev, "unexpectd call to read_byte()\n");
473
474 return 0;
475}
476
477static void write_addr(struct docg4_priv *doc, uint32_t docg4_addr)
478{
479 /* write the four address bytes packed in docg4_addr to the device */
480
481 void __iomem *docptr = doc->virtadr;
482 writeb(docg4_addr & 0xff, docptr + DOC_FLASHADDRESS);
483 docg4_addr >>= 8;
484 writeb(docg4_addr & 0xff, docptr + DOC_FLASHADDRESS);
485 docg4_addr >>= 8;
486 writeb(docg4_addr & 0xff, docptr + DOC_FLASHADDRESS);
487 docg4_addr >>= 8;
488 writeb(docg4_addr & 0xff, docptr + DOC_FLASHADDRESS);
489}
490
491static int read_progstatus(struct docg4_priv *doc)
492{
493 /*
494 * This apparently checks the status of programming. Done after an
495 * erasure, and after page data is written. On error, the status is
496 * saved, to be later retrieved by the nand infrastructure code.
497 */
498 void __iomem *docptr = doc->virtadr;
499
500 /* status is read from the I/O reg */
501 uint16_t status1 = readw(docptr + DOC_IOSPACE_DATA);
502 uint16_t status2 = readw(docptr + DOC_IOSPACE_DATA);
503 uint16_t status3 = readw(docptr + DOCG4_MYSTERY_REG);
504
505 dev_dbg(doc->dev, "docg4: %s: %02x %02x %02x\n",
506 __func__, status1, status2, status3);
507
508 if (status1 != DOCG4_PROGSTATUS_GOOD
509 || status2 != DOCG4_PROGSTATUS_GOOD_2
510 || status3 != DOCG4_PROGSTATUS_GOOD_2) {
511 doc->status = NAND_STATUS_FAIL;
512 dev_warn(doc->dev, "read_progstatus failed: "
513 "%02x, %02x, %02x\n", status1, status2, status3);
514 return -EIO;
515 }
516 return 0;
517}
518
519static int pageprog(struct mtd_info *mtd)
520{
521 /*
522 * Final step in writing a page. Writes the contents of its
523 * internal buffer out to the flash array, or some such.
524 */
525
526 struct nand_chip *nand = mtd->priv;
527 struct docg4_priv *doc = nand->priv;
528 void __iomem *docptr = doc->virtadr;
529 int retval = 0;
530
531 dev_dbg(doc->dev, "docg4: %s\n", __func__);
532
533 writew(DOCG4_SEQ_PAGEPROG, docptr + DOC_FLASHSEQUENCE);
534 writew(DOC_CMD_PROG_CYCLE2, docptr + DOC_FLASHCOMMAND);
535 write_nop(docptr);
536 write_nop(docptr);
537
538 /* Just busy-wait; usleep_range() slows things down noticeably. */
539 poll_status(doc);
540
541 writew(DOCG4_SEQ_FLUSH, docptr + DOC_FLASHSEQUENCE);
542 writew(DOCG4_CMD_FLUSH, docptr + DOC_FLASHCOMMAND);
543 writew(DOC_ECCCONF0_READ_MODE | 4, docptr + DOC_ECCCONF0);
544 write_nop(docptr);
545 write_nop(docptr);
546 write_nop(docptr);
547 write_nop(docptr);
548 write_nop(docptr);
549
550 retval = read_progstatus(doc);
551 writew(0, docptr + DOC_DATAEND);
552 write_nop(docptr);
553 poll_status(doc);
554 write_nop(docptr);
555
556 return retval;
557}
558
559static void sequence_reset(struct mtd_info *mtd)
560{
561 /* common starting sequence for all operations */
562
563 struct nand_chip *nand = mtd->priv;
564 struct docg4_priv *doc = nand->priv;
565 void __iomem *docptr = doc->virtadr;
566
567 writew(DOC_CTRL_UNKNOWN | DOC_CTRL_CE, docptr + DOC_FLASHCONTROL);
568 writew(DOC_SEQ_RESET, docptr + DOC_FLASHSEQUENCE);
569 writew(DOC_CMD_RESET, docptr + DOC_FLASHCOMMAND);
570 write_nop(docptr);
571 write_nop(docptr);
572 poll_status(doc);
573 write_nop(docptr);
574}
575
576static void read_page_prologue(struct mtd_info *mtd, uint32_t docg4_addr)
577{
578 /* first step in reading a page */
579
580 struct nand_chip *nand = mtd->priv;
581 struct docg4_priv *doc = nand->priv;
582 void __iomem *docptr = doc->virtadr;
583
584 dev_dbg(doc->dev,
585 "docg4: %s: g4 page %08x\n", __func__, docg4_addr);
586
587 sequence_reset(mtd);
588
589 writew(DOCG4_SEQ_PAGE_READ, docptr + DOC_FLASHSEQUENCE);
590 writew(DOCG4_CMD_PAGE_READ, docptr + DOC_FLASHCOMMAND);
591 write_nop(docptr);
592
593 write_addr(doc, docg4_addr);
594
595 write_nop(docptr);
596 writew(DOCG4_CMD_READ2, docptr + DOC_FLASHCOMMAND);
597 write_nop(docptr);
598 write_nop(docptr);
599
600 poll_status(doc);
601}
602
603static void write_page_prologue(struct mtd_info *mtd, uint32_t docg4_addr)
604{
605 /* first step in writing a page */
606
607 struct nand_chip *nand = mtd->priv;
608 struct docg4_priv *doc = nand->priv;
609 void __iomem *docptr = doc->virtadr;
610
611 dev_dbg(doc->dev,
612 "docg4: %s: g4 addr: %x\n", __func__, docg4_addr);
613 sequence_reset(mtd);
614 writew(DOCG4_SEQ_PAGEWRITE, docptr + DOC_FLASHSEQUENCE);
615 writew(DOCG4_CMD_PAGEWRITE, docptr + DOC_FLASHCOMMAND);
616 write_nop(docptr);
617 write_addr(doc, docg4_addr);
618 write_nop(docptr);
619 write_nop(docptr);
620 poll_status(doc);
621}
622
623static uint32_t mtd_to_docg4_address(int page, int column)
624{
625 /*
626 * Convert mtd address to format used by the device, 32 bit packed.
627 *
628 * Some notes on G4 addressing... The M-Sys documentation on this device
629 * claims that pages are 2K in length, and indeed, the format of the
630 * address used by the device reflects that. But within each page are
631 * four 512 byte "sub-pages", each with its own oob data that is
632 * read/written immediately after the 512 bytes of page data. This oob
633 * data contains the ecc bytes for the preceeding 512 bytes.
634 *
635 * Rather than tell the mtd nand infrastructure that page size is 2k,
636 * with four sub-pages each, we engage in a little subterfuge and tell
637 * the infrastructure code that pages are 512 bytes in size. This is
638 * done because during the course of reverse-engineering the device, I
639 * never observed an instance where an entire 2K "page" was read or
640 * written as a unit. Each "sub-page" is always addressed individually,
641 * its data read/written, and ecc handled before the next "sub-page" is
642 * addressed.
643 *
644 * This requires us to convert addresses passed by the mtd nand
645 * infrastructure code to those used by the device.
646 *
647 * The address that is written to the device consists of four bytes: the
648 * first two are the 2k page number, and the second is the index into
649 * the page. The index is in terms of 16-bit half-words and includes
650 * the preceeding oob data, so e.g., the index into the second
651 * "sub-page" is 0x108, and the full device address of the start of mtd
652 * page 0x201 is 0x00800108.
653 */
654 int g4_page = page / 4; /* device's 2K page */
655 int g4_index = (page % 4) * 0x108 + column/2; /* offset into page */
656 return (g4_page << 16) | g4_index; /* pack */
657}
658
659static void docg4_command(struct mtd_info *mtd, unsigned command, int column,
660 int page_addr)
661{
662 /* handle standard nand commands */
663
664 struct nand_chip *nand = mtd->priv;
665 struct docg4_priv *doc = nand->priv;
666 uint32_t g4_addr = mtd_to_docg4_address(page_addr, column);
667
668 dev_dbg(doc->dev, "%s %x, page_addr=%x, column=%x\n",
669 __func__, command, page_addr, column);
670
671 /*
672 * Save the command and its arguments. This enables emulation of
673 * standard flash devices, and also some optimizations.
674 */
675 doc->last_command.command = command;
676 doc->last_command.column = column;
677 doc->last_command.page = page_addr;
678
679 switch (command) {
680
681 case NAND_CMD_RESET:
682 reset(mtd);
683 break;
684
685 case NAND_CMD_READ0:
686 read_page_prologue(mtd, g4_addr);
687 break;
688
689 case NAND_CMD_STATUS:
690 /* next call to read_byte() will expect a status */
691 break;
692
693 case NAND_CMD_SEQIN:
694 write_page_prologue(mtd, g4_addr);
695
696 /* hack for deferred write of oob bytes */
697 if (doc->oob_page == page_addr)
698 memcpy(nand->oob_poi, doc->oob_buf, 16);
699 break;
700
701 case NAND_CMD_PAGEPROG:
702 pageprog(mtd);
703 break;
704
705 /* we don't expect these, based on review of nand_base.c */
706 case NAND_CMD_READOOB:
707 case NAND_CMD_READID:
708 case NAND_CMD_ERASE1:
709 case NAND_CMD_ERASE2:
710 dev_warn(doc->dev, "docg4_command: "
711 "unexpected nand command 0x%x\n", command);
712 break;
713
714 }
715}
716
717static int read_page(struct mtd_info *mtd, struct nand_chip *nand,
718 uint8_t *buf, int page, bool use_ecc)
719{
720 struct docg4_priv *doc = nand->priv;
721 void __iomem *docptr = doc->virtadr;
722 uint16_t status, edc_err, *buf16;
723
724 dev_dbg(doc->dev, "%s: page %08x\n", __func__, page);
725
726 writew(DOC_ECCCONF0_READ_MODE |
727 DOC_ECCCONF0_ECC_ENABLE |
728 DOC_ECCCONF0_UNKNOWN |
729 DOCG4_BCH_SIZE,
730 docptr + DOC_ECCCONF0);
731 write_nop(docptr);
732 write_nop(docptr);
733 write_nop(docptr);
734 write_nop(docptr);
735 write_nop(docptr);
736
737 /* the 1st byte from the I/O reg is a status; the rest is page data */
738 status = readw(docptr + DOC_IOSPACE_DATA);
739 if (status & DOCG4_READ_ERROR) {
740 dev_err(doc->dev,
741 "docg4_read_page: bad status: 0x%02x\n", status);
742 writew(0, docptr + DOC_DATAEND);
743 return -EIO;
744 }
745
746 dev_dbg(doc->dev, "%s: status = 0x%x\n", __func__, status);
747
748 docg4_read_buf(mtd, buf, DOCG4_PAGE_SIZE); /* read the page data */
749
750 /*
751 * Diskonchips read oob immediately after a page read. Mtd
752 * infrastructure issues a separate command for reading oob after the
753 * page is read. So we save the oob bytes in a local buffer and just
754 * copy it if the next command reads oob from the same page.
755 */
756
757 /* first 14 oob bytes read from I/O reg */
758 docg4_read_buf(mtd, doc->oob_buf, 14);
759
760 /* last 2 read from another reg */
761 buf16 = (uint16_t *)(doc->oob_buf + 14);
762 *buf16 = readw(docptr + DOCG4_MYSTERY_REG);
763
764 write_nop(docptr);
765
766 if (likely(use_ecc == true)) {
767
768 /* read the register that tells us if bitflip(s) detected */
769 edc_err = readw(docptr + DOC_ECCCONF1);
770 edc_err = readw(docptr + DOC_ECCCONF1);
771 dev_dbg(doc->dev, "%s: edc_err = 0x%02x\n", __func__, edc_err);
772
773 /* If bitflips are reported, attempt to correct with ecc */
774 if (edc_err & DOC_ECCCONF1_BCH_SYNDROM_ERR) {
775 int bits_corrected = correct_data(mtd, buf, page);
776 if (bits_corrected == -EBADMSG)
777 mtd->ecc_stats.failed++;
778 else
779 mtd->ecc_stats.corrected += bits_corrected;
780 }
781 }
782
783 writew(0, docptr + DOC_DATAEND);
784 return 0;
785}
786
787
788static int docg4_read_page_raw(struct mtd_info *mtd, struct nand_chip *nand,
789 uint8_t *buf, int page)
790{
791 return read_page(mtd, nand, buf, page, false);
792}
793
794static int docg4_read_page(struct mtd_info *mtd, struct nand_chip *nand,
795 uint8_t *buf, int page)
796{
797 return read_page(mtd, nand, buf, page, true);
798}
799
800static int docg4_read_oob(struct mtd_info *mtd, struct nand_chip *nand,
801 int page, int sndcmd)
802{
803 struct docg4_priv *doc = nand->priv;
804 void __iomem *docptr = doc->virtadr;
805 uint16_t status;
806
807 dev_dbg(doc->dev, "%s: page %x\n", __func__, page);
808
809 /*
810 * Oob bytes are read as part of a normal page read. If the previous
811 * nand command was a read of the page whose oob is now being read, just
812 * copy the oob bytes that we saved in a local buffer and avoid a
813 * separate oob read.
814 */
815 if (doc->last_command.command == NAND_CMD_READ0 &&
816 doc->last_command.page == page) {
817 memcpy(nand->oob_poi, doc->oob_buf, 16);
818 return 0;
819 }
820
821 /*
822 * Separate read of oob data only.
823 */
824 docg4_command(mtd, NAND_CMD_READ0, nand->ecc.size, page);
825
826 writew(DOC_ECCCONF0_READ_MODE | DOCG4_OOB_SIZE, docptr + DOC_ECCCONF0);
827 write_nop(docptr);
828 write_nop(docptr);
829 write_nop(docptr);
830 write_nop(docptr);
831 write_nop(docptr);
832
833 /* the 1st byte from the I/O reg is a status; the rest is oob data */
834 status = readw(docptr + DOC_IOSPACE_DATA);
835 if (status & DOCG4_READ_ERROR) {
836 dev_warn(doc->dev,
837 "docg4_read_oob failed: status = 0x%02x\n", status);
838 return -EIO;
839 }
840
841 dev_dbg(doc->dev, "%s: status = 0x%x\n", __func__, status);
842
843 docg4_read_buf(mtd, nand->oob_poi, 16);
844
845 write_nop(docptr);
846 write_nop(docptr);
847 write_nop(docptr);
848 writew(0, docptr + DOC_DATAEND);
849 write_nop(docptr);
850
851 return 0;
852}
853
854static void docg4_erase_block(struct mtd_info *mtd, int page)
855{
856 struct nand_chip *nand = mtd->priv;
857 struct docg4_priv *doc = nand->priv;
858 void __iomem *docptr = doc->virtadr;
859 uint16_t g4_page;
860
861 dev_dbg(doc->dev, "%s: page %04x\n", __func__, page);
862
863 sequence_reset(mtd);
864
865 writew(DOCG4_SEQ_BLOCKERASE, docptr + DOC_FLASHSEQUENCE);
866 writew(DOC_CMD_PROG_BLOCK_ADDR, docptr + DOC_FLASHCOMMAND);
867 write_nop(docptr);
868
869 /* only 2 bytes of address are written to specify erase block */
870 g4_page = (uint16_t)(page / 4); /* to g4's 2k page addressing */
871 writeb(g4_page & 0xff, docptr + DOC_FLASHADDRESS);
872 g4_page >>= 8;
873 writeb(g4_page & 0xff, docptr + DOC_FLASHADDRESS);
874 write_nop(docptr);
875
876 /* start the erasure */
877 writew(DOC_CMD_ERASECYCLE2, docptr + DOC_FLASHCOMMAND);
878 write_nop(docptr);
879 write_nop(docptr);
880
881 usleep_range(500, 1000); /* erasure is long; take a snooze */
882 poll_status(doc);
883 writew(DOCG4_SEQ_FLUSH, docptr + DOC_FLASHSEQUENCE);
884 writew(DOCG4_CMD_FLUSH, docptr + DOC_FLASHCOMMAND);
885 writew(DOC_ECCCONF0_READ_MODE | 4, docptr + DOC_ECCCONF0);
886 write_nop(docptr);
887 write_nop(docptr);
888 write_nop(docptr);
889 write_nop(docptr);
890 write_nop(docptr);
891
892 read_progstatus(doc);
893
894 writew(0, docptr + DOC_DATAEND);
895 write_nop(docptr);
896 poll_status(doc);
897 write_nop(docptr);
898}
899
900static void write_page(struct mtd_info *mtd, struct nand_chip *nand,
901 const uint8_t *buf, bool use_ecc)
902{
903 struct docg4_priv *doc = nand->priv;
904 void __iomem *docptr = doc->virtadr;
905 uint8_t ecc_buf[8];
906
907 dev_dbg(doc->dev, "%s...\n", __func__);
908
909 writew(DOC_ECCCONF0_ECC_ENABLE |
910 DOC_ECCCONF0_UNKNOWN |
911 DOCG4_BCH_SIZE,
912 docptr + DOC_ECCCONF0);
913 write_nop(docptr);
914
915 /* write the page data */
916 docg4_write_buf16(mtd, buf, DOCG4_PAGE_SIZE);
917
918 /* oob bytes 0 through 5 are written to I/O reg */
919 docg4_write_buf16(mtd, nand->oob_poi, 6);
920
921 /* oob byte 6 written to a separate reg */
922 writew(nand->oob_poi[6], docptr + DOCG4_OOB_6_7);
923
924 write_nop(docptr);
925 write_nop(docptr);
926
927 /* write hw-generated ecc bytes to oob */
928 if (likely(use_ecc == true)) {
929 /* oob byte 7 is hamming code */
930 uint8_t hamming = readb(docptr + DOC_HAMMINGPARITY);
931 hamming = readb(docptr + DOC_HAMMINGPARITY); /* 2nd read */
932 writew(hamming, docptr + DOCG4_OOB_6_7);
933 write_nop(docptr);
934
935 /* read the 7 bch bytes from ecc regs */
936 read_hw_ecc(docptr, ecc_buf);
937 ecc_buf[7] = 0; /* clear the "page written" flag */
938 }
939
940 /* write user-supplied bytes to oob */
941 else {
942 writew(nand->oob_poi[7], docptr + DOCG4_OOB_6_7);
943 write_nop(docptr);
944 memcpy(ecc_buf, &nand->oob_poi[8], 8);
945 }
946
947 docg4_write_buf16(mtd, ecc_buf, 8);
948 write_nop(docptr);
949 write_nop(docptr);
950 writew(0, docptr + DOC_DATAEND);
951 write_nop(docptr);
952}
953
954static void docg4_write_page_raw(struct mtd_info *mtd, struct nand_chip *nand,
955 const uint8_t *buf)
956{
957 return write_page(mtd, nand, buf, false);
958}
959
960static void docg4_write_page(struct mtd_info *mtd, struct nand_chip *nand,
961 const uint8_t *buf)
962{
963 return write_page(mtd, nand, buf, true);
964}
965
966static int docg4_write_oob(struct mtd_info *mtd, struct nand_chip *nand,
967 int page)
968{
969 /*
970 * Writing oob-only is not really supported, because MLC nand must write
971 * oob bytes at the same time as page data. Nonetheless, we save the
972 * oob buffer contents here, and then write it along with the page data
973 * if the same page is subsequently written. This allows user space
974 * utilities that write the oob data prior to the page data to work
975 * (e.g., nandwrite). The disdvantage is that, if the intention was to
976 * write oob only, the operation is quietly ignored. Also, oob can get
977 * corrupted if two concurrent processes are running nandwrite.
978 */
979
980 /* note that bytes 7..14 are hw generated hamming/ecc and overwritten */
981 struct docg4_priv *doc = nand->priv;
982 doc->oob_page = page;
983 memcpy(doc->oob_buf, nand->oob_poi, 16);
984 return 0;
985}
986
987static int __init read_factory_bbt(struct mtd_info *mtd)
988{
989 /*
990 * The device contains a read-only factory bad block table. Read it and
991 * update the memory-based bbt accordingly.
992 */
993
994 struct nand_chip *nand = mtd->priv;
995 struct docg4_priv *doc = nand->priv;
996 uint32_t g4_addr = mtd_to_docg4_address(DOCG4_FACTORY_BBT_PAGE, 0);
997 uint8_t *buf;
998 int i, block, status;
999
1000 buf = kzalloc(DOCG4_PAGE_SIZE, GFP_KERNEL);
1001 if (buf == NULL)
1002 return -ENOMEM;
1003
1004 read_page_prologue(mtd, g4_addr);
1005 status = docg4_read_page(mtd, nand, buf, DOCG4_FACTORY_BBT_PAGE);
1006 if (status)
1007 goto exit;
1008
1009 /*
1010 * If no memory-based bbt was created, exit. This will happen if module
1011 * parameter ignore_badblocks is set. Then why even call this function?
1012 * For an unknown reason, block erase always fails if it's the first
1013 * operation after device power-up. The above read ensures it never is.
1014 * Ugly, I know.
1015 */
1016 if (nand->bbt == NULL) /* no memory-based bbt */
1017 goto exit;
1018
1019 /*
1020 * Parse factory bbt and update memory-based bbt. Factory bbt format is
1021 * simple: one bit per block, block numbers increase left to right (msb
1022 * to lsb). Bit clear means bad block.
1023 */
1024 for (i = block = 0; block < DOCG4_NUMBLOCKS; block += 8, i++) {
1025 int bitnum;
1026 unsigned long bits = ~buf[i];
1027 for_each_set_bit(bitnum, &bits, 8) {
1028 int badblock = block + 7 - bitnum;
1029 nand->bbt[badblock / 4] |=
1030 0x03 << ((badblock % 4) * 2);
1031 mtd->ecc_stats.badblocks++;
1032 dev_notice(doc->dev, "factory-marked bad block: %d\n",
1033 badblock);
1034 }
1035 }
1036 exit:
1037 kfree(buf);
1038 return status;
1039}
1040
1041static int docg4_block_markbad(struct mtd_info *mtd, loff_t ofs)
1042{
1043 /*
1044 * Mark a block as bad. Bad blocks are marked in the oob area of the
1045 * first page of the block. The default scan_bbt() in the nand
1046 * infrastructure code works fine for building the memory-based bbt
1047 * during initialization, as does the nand infrastructure function that
1048 * checks if a block is bad by reading the bbt. This function replaces
1049 * the nand default because writes to oob-only are not supported.
1050 */
1051
1052 int ret, i;
1053 uint8_t *buf;
1054 struct nand_chip *nand = mtd->priv;
1055 struct docg4_priv *doc = nand->priv;
1056 struct nand_bbt_descr *bbtd = nand->badblock_pattern;
1057 int block = (int)(ofs >> nand->bbt_erase_shift);
1058 int page = (int)(ofs >> nand->page_shift);
1059 uint32_t g4_addr = mtd_to_docg4_address(page, 0);
1060
1061 dev_dbg(doc->dev, "%s: %08llx\n", __func__, ofs);
1062
1063 if (unlikely(ofs & (DOCG4_BLOCK_SIZE - 1)))
1064 dev_warn(doc->dev, "%s: ofs %llx not start of block!\n",
1065 __func__, ofs);
1066
1067 /* allocate blank buffer for page data */
1068 buf = kzalloc(DOCG4_PAGE_SIZE, GFP_KERNEL);
1069 if (buf == NULL)
1070 return -ENOMEM;
1071
1072 /* update bbt in memory */
1073 nand->bbt[block / 4] |= 0x01 << ((block & 0x03) * 2);
1074
1075 /* write bit-wise negation of pattern to oob buffer */
1076 memset(nand->oob_poi, 0xff, mtd->oobsize);
1077 for (i = 0; i < bbtd->len; i++)
1078 nand->oob_poi[bbtd->offs + i] = ~bbtd->pattern[i];
1079
1080 /* write first page of block */
1081 write_page_prologue(mtd, g4_addr);
1082 docg4_write_page(mtd, nand, buf);
1083 ret = pageprog(mtd);
1084 if (!ret)
1085 mtd->ecc_stats.badblocks++;
1086
1087 kfree(buf);
1088
1089 return ret;
1090}
1091
1092static int docg4_block_neverbad(struct mtd_info *mtd, loff_t ofs, int getchip)
1093{
1094 /* only called when module_param ignore_badblocks is set */
1095 return 0;
1096}
1097
1098static int docg4_suspend(struct platform_device *pdev, pm_message_t state)
1099{
1100 /*
1101 * Put the device into "deep power-down" mode. Note that CE# must be
1102 * deasserted for this to take effect. The xscale, e.g., can be
1103 * configured to float this signal when the processor enters power-down,
1104 * and a suitable pull-up ensures its deassertion.
1105 */
1106
1107 int i;
1108 uint8_t pwr_down;
1109 struct docg4_priv *doc = platform_get_drvdata(pdev);
1110 void __iomem *docptr = doc->virtadr;
1111
1112 dev_dbg(doc->dev, "%s...\n", __func__);
1113
1114 /* poll the register that tells us we're ready to go to sleep */
1115 for (i = 0; i < 10; i++) {
1116 pwr_down = readb(docptr + DOC_POWERMODE);
1117 if (pwr_down & DOC_POWERDOWN_READY)
1118 break;
1119 usleep_range(1000, 4000);
1120 }
1121
1122 if (pwr_down & DOC_POWERDOWN_READY) {
1123 dev_err(doc->dev, "suspend failed; "
1124 "timeout polling DOC_POWERDOWN_READY\n");
1125 return -EIO;
1126 }
1127
1128 writew(DOC_ASICMODE_POWERDOWN | DOC_ASICMODE_MDWREN,
1129 docptr + DOC_ASICMODE);
1130 writew(~(DOC_ASICMODE_POWERDOWN | DOC_ASICMODE_MDWREN),
1131 docptr + DOC_ASICMODECONFIRM);
1132
1133 write_nop(docptr);
1134
1135 return 0;
1136}
1137
1138static int docg4_resume(struct platform_device *pdev)
1139{
1140
1141 /*
1142 * Exit power-down. Twelve consecutive reads of the address below
1143 * accomplishes this, assuming CE# has been asserted.
1144 */
1145
1146 struct docg4_priv *doc = platform_get_drvdata(pdev);
1147 void __iomem *docptr = doc->virtadr;
1148 int i;
1149
1150 dev_dbg(doc->dev, "%s...\n", __func__);
1151
1152 for (i = 0; i < 12; i++)
1153 readb(docptr + 0x1fff);
1154
1155 return 0;
1156}
1157
1158static void __init init_mtd_structs(struct mtd_info *mtd)
1159{
1160 /* initialize mtd and nand data structures */
1161
1162 /*
1163 * Note that some of the following initializations are not usually
1164 * required within a nand driver because they are performed by the nand
1165 * infrastructure code as part of nand_scan(). In this case they need
1166 * to be initialized here because we skip call to nand_scan_ident() (the
1167 * first half of nand_scan()). The call to nand_scan_ident() is skipped
1168 * because for this device the chip id is not read in the manner of a
1169 * standard nand device. Unfortunately, nand_scan_ident() does other
1170 * things as well, such as call nand_set_defaults().
1171 */
1172
1173 struct nand_chip *nand = mtd->priv;
1174 struct docg4_priv *doc = nand->priv;
1175
1176 mtd->size = DOCG4_CHIP_SIZE;
1177 mtd->name = "Msys_Diskonchip_G4";
1178 mtd->writesize = DOCG4_PAGE_SIZE;
1179 mtd->erasesize = DOCG4_BLOCK_SIZE;
1180 mtd->oobsize = DOCG4_OOB_SIZE;
1181 nand->chipsize = DOCG4_CHIP_SIZE;
1182 nand->chip_shift = DOCG4_CHIP_SHIFT;
1183 nand->bbt_erase_shift = nand->phys_erase_shift = DOCG4_ERASE_SHIFT;
1184 nand->chip_delay = 20;
1185 nand->page_shift = DOCG4_PAGE_SHIFT;
1186 nand->pagemask = 0x3ffff;
1187 nand->badblockpos = NAND_LARGE_BADBLOCK_POS;
1188 nand->badblockbits = 8;
1189 nand->ecc.layout = &docg4_oobinfo;
1190 nand->ecc.mode = NAND_ECC_HW_SYNDROME;
1191 nand->ecc.size = DOCG4_PAGE_SIZE;
1192 nand->ecc.prepad = 8;
1193 nand->ecc.bytes = 8;
1194 nand->ecc.strength = DOCG4_T;
1195 nand->options =
1196 NAND_BUSWIDTH_16 | NAND_NO_SUBPAGE_WRITE | NAND_NO_AUTOINCR;
1197 nand->IO_ADDR_R = nand->IO_ADDR_W = doc->virtadr + DOC_IOSPACE_DATA;
1198 nand->controller = &nand->hwcontrol;
1199 spin_lock_init(&nand->controller->lock);
1200 init_waitqueue_head(&nand->controller->wq);
1201
1202 /* methods */
1203 nand->cmdfunc = docg4_command;
1204 nand->waitfunc = docg4_wait;
1205 nand->select_chip = docg4_select_chip;
1206 nand->read_byte = docg4_read_byte;
1207 nand->block_markbad = docg4_block_markbad;
1208 nand->read_buf = docg4_read_buf;
1209 nand->write_buf = docg4_write_buf16;
1210 nand->scan_bbt = nand_default_bbt;
1211 nand->erase_cmd = docg4_erase_block;
1212 nand->ecc.read_page = docg4_read_page;
1213 nand->ecc.write_page = docg4_write_page;
1214 nand->ecc.read_page_raw = docg4_read_page_raw;
1215 nand->ecc.write_page_raw = docg4_write_page_raw;
1216 nand->ecc.read_oob = docg4_read_oob;
1217 nand->ecc.write_oob = docg4_write_oob;
1218
1219 /*
1220 * The way the nand infrastructure code is written, a memory-based bbt
1221 * is not created if NAND_SKIP_BBTSCAN is set. With no memory bbt,
1222 * nand->block_bad() is used. So when ignoring bad blocks, we skip the
1223 * scan and define a dummy block_bad() which always returns 0.
1224 */
1225 if (ignore_badblocks) {
1226 nand->options |= NAND_SKIP_BBTSCAN;
1227 nand->block_bad = docg4_block_neverbad;
1228 }
1229
1230}
1231
1232static int __init read_id_reg(struct mtd_info *mtd)
1233{
1234 struct nand_chip *nand = mtd->priv;
1235 struct docg4_priv *doc = nand->priv;
1236 void __iomem *docptr = doc->virtadr;
1237 uint16_t id1, id2;
1238
1239 /* check for presence of g4 chip by reading id registers */
1240 id1 = readw(docptr + DOC_CHIPID);
1241 id1 = readw(docptr + DOCG4_MYSTERY_REG);
1242 id2 = readw(docptr + DOC_CHIPID_INV);
1243 id2 = readw(docptr + DOCG4_MYSTERY_REG);
1244
1245 if (id1 == DOCG4_IDREG1_VALUE && id2 == DOCG4_IDREG2_VALUE) {
1246 dev_info(doc->dev,
1247 "NAND device: 128MiB Diskonchip G4 detected\n");
1248 return 0;
1249 }
1250
1251 return -ENODEV;
1252}
1253
1254static char const *part_probes[] = { "cmdlinepart", "saftlpart", NULL };
1255
1256static int __init probe_docg4(struct platform_device *pdev)
1257{
1258 struct mtd_info *mtd;
1259 struct nand_chip *nand;
1260 void __iomem *virtadr;
1261 struct docg4_priv *doc;
1262 int len, retval;
1263 struct resource *r;
1264 struct device *dev = &pdev->dev;
1265
1266 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1267 if (r == NULL) {
1268 dev_err(dev, "no io memory resource defined!\n");
1269 return -ENODEV;
1270 }
1271
1272 virtadr = ioremap(r->start, resource_size(r));
1273 if (!virtadr) {
1274 dev_err(dev, "Diskonchip ioremap failed: %pR\n", r);
1275 return -EIO;
1276 }
1277
1278 len = sizeof(struct mtd_info) + sizeof(struct nand_chip) +
1279 sizeof(struct docg4_priv);
1280 mtd = kzalloc(len, GFP_KERNEL);
1281 if (mtd == NULL) {
1282 retval = -ENOMEM;
1283 goto fail;
1284 }
1285 nand = (struct nand_chip *) (mtd + 1);
1286 doc = (struct docg4_priv *) (nand + 1);
1287 mtd->priv = nand;
1288 nand->priv = doc;
1289 mtd->owner = THIS_MODULE;
1290 doc->virtadr = virtadr;
1291 doc->dev = dev;
1292
1293 init_mtd_structs(mtd);
1294
1295 /* initialize kernel bch algorithm */
1296 doc->bch = init_bch(DOCG4_M, DOCG4_T, DOCG4_PRIMITIVE_POLY);
1297 if (doc->bch == NULL) {
1298 retval = -EINVAL;
1299 goto fail;
1300 }
1301
1302 platform_set_drvdata(pdev, doc);
1303
1304 reset(mtd);
1305 retval = read_id_reg(mtd);
1306 if (retval == -ENODEV) {
1307 dev_warn(dev, "No diskonchip G4 device found.\n");
1308 goto fail;
1309 }
1310
1311 retval = nand_scan_tail(mtd);
1312 if (retval)
1313 goto fail;
1314
1315 retval = read_factory_bbt(mtd);
1316 if (retval)
1317 goto fail;
1318
1319 retval = mtd_device_parse_register(mtd, part_probes, NULL, NULL, 0);
1320 if (retval)
1321 goto fail;
1322
1323 doc->mtd = mtd;
1324 return 0;
1325
1326 fail:
1327 iounmap(virtadr);
1328 if (mtd) {
1329 /* re-declarations avoid compiler warning */
1330 struct nand_chip *nand = mtd->priv;
1331 struct docg4_priv *doc = nand->priv;
1332 nand_release(mtd); /* deletes partitions and mtd devices */
1333 platform_set_drvdata(pdev, NULL);
1334 free_bch(doc->bch);
1335 kfree(mtd);
1336 }
1337
1338 return retval;
1339}
1340
1341static int __exit cleanup_docg4(struct platform_device *pdev)
1342{
1343 struct docg4_priv *doc = platform_get_drvdata(pdev);
1344 nand_release(doc->mtd);
1345 platform_set_drvdata(pdev, NULL);
1346 free_bch(doc->bch);
1347 kfree(doc->mtd);
1348 iounmap(doc->virtadr);
1349 return 0;
1350}
1351
1352static struct platform_driver docg4_driver = {
1353 .driver = {
1354 .name = "docg4",
1355 .owner = THIS_MODULE,
1356 },
1357 .suspend = docg4_suspend,
1358 .resume = docg4_resume,
1359 .remove = __exit_p(cleanup_docg4),
1360};
1361
1362static int __init docg4_init(void)
1363{
1364 return platform_driver_probe(&docg4_driver, probe_docg4);
1365}
1366
1367static void __exit docg4_exit(void)
1368{
1369 platform_driver_unregister(&docg4_driver);
1370}
1371
1372module_init(docg4_init);
1373module_exit(docg4_exit);
1374
1375MODULE_LICENSE("GPL");
1376MODULE_AUTHOR("Mike Dunn");
1377MODULE_DESCRIPTION("M-Systems DiskOnChip G4 device driver");
diff --git a/drivers/mtd/nand/fsl_elbc_nand.c b/drivers/mtd/nand/fsl_elbc_nand.c
index 7195ee6efe12..80b5264f0a32 100644
--- a/drivers/mtd/nand/fsl_elbc_nand.c
+++ b/drivers/mtd/nand/fsl_elbc_nand.c
@@ -813,6 +813,12 @@ static int fsl_elbc_chip_init(struct fsl_elbc_mtd *priv)
813 &fsl_elbc_oob_sp_eccm1 : &fsl_elbc_oob_sp_eccm0; 813 &fsl_elbc_oob_sp_eccm1 : &fsl_elbc_oob_sp_eccm0;
814 chip->ecc.size = 512; 814 chip->ecc.size = 512;
815 chip->ecc.bytes = 3; 815 chip->ecc.bytes = 3;
816 chip->ecc.strength = 1;
817 /*
818 * FIXME: can hardware ecc correct 4 bitflips if page size is
819 * 2k? Then does hardware report number of corrections for this
820 * case? If so, ecc_stats reporting needs to be fixed as well.
821 */
816 } else { 822 } else {
817 /* otherwise fall back to default software ECC */ 823 /* otherwise fall back to default software ECC */
818 chip->ecc.mode = NAND_ECC_SOFT; 824 chip->ecc.mode = NAND_ECC_SOFT;
diff --git a/drivers/mtd/nand/fsmc_nand.c b/drivers/mtd/nand/fsmc_nand.c
index e53b76064133..1b8330e1155a 100644
--- a/drivers/mtd/nand/fsmc_nand.c
+++ b/drivers/mtd/nand/fsmc_nand.c
@@ -17,6 +17,10 @@
17 */ 17 */
18 18
19#include <linux/clk.h> 19#include <linux/clk.h>
20#include <linux/completion.h>
21#include <linux/dmaengine.h>
22#include <linux/dma-direction.h>
23#include <linux/dma-mapping.h>
20#include <linux/err.h> 24#include <linux/err.h>
21#include <linux/init.h> 25#include <linux/init.h>
22#include <linux/module.h> 26#include <linux/module.h>
@@ -27,6 +31,7 @@
27#include <linux/mtd/nand.h> 31#include <linux/mtd/nand.h>
28#include <linux/mtd/nand_ecc.h> 32#include <linux/mtd/nand_ecc.h>
29#include <linux/platform_device.h> 33#include <linux/platform_device.h>
34#include <linux/of.h>
30#include <linux/mtd/partitions.h> 35#include <linux/mtd/partitions.h>
31#include <linux/io.h> 36#include <linux/io.h>
32#include <linux/slab.h> 37#include <linux/slab.h>
@@ -34,7 +39,7 @@
34#include <linux/amba/bus.h> 39#include <linux/amba/bus.h>
35#include <mtd/mtd-abi.h> 40#include <mtd/mtd-abi.h>
36 41
37static struct nand_ecclayout fsmc_ecc1_layout = { 42static struct nand_ecclayout fsmc_ecc1_128_layout = {
38 .eccbytes = 24, 43 .eccbytes = 24,
39 .eccpos = {2, 3, 4, 18, 19, 20, 34, 35, 36, 50, 51, 52, 44 .eccpos = {2, 3, 4, 18, 19, 20, 34, 35, 36, 50, 51, 52,
40 66, 67, 68, 82, 83, 84, 98, 99, 100, 114, 115, 116}, 45 66, 67, 68, 82, 83, 84, 98, 99, 100, 114, 115, 116},
@@ -50,7 +55,127 @@ static struct nand_ecclayout fsmc_ecc1_layout = {
50 } 55 }
51}; 56};
52 57
53static struct nand_ecclayout fsmc_ecc4_lp_layout = { 58static struct nand_ecclayout fsmc_ecc1_64_layout = {
59 .eccbytes = 12,
60 .eccpos = {2, 3, 4, 18, 19, 20, 34, 35, 36, 50, 51, 52},
61 .oobfree = {
62 {.offset = 8, .length = 8},
63 {.offset = 24, .length = 8},
64 {.offset = 40, .length = 8},
65 {.offset = 56, .length = 8},
66 }
67};
68
69static struct nand_ecclayout fsmc_ecc1_16_layout = {
70 .eccbytes = 3,
71 .eccpos = {2, 3, 4},
72 .oobfree = {
73 {.offset = 8, .length = 8},
74 }
75};
76
77/*
78 * ECC4 layout for NAND of pagesize 8192 bytes & OOBsize 256 bytes. 13*16 bytes
79 * of OB size is reserved for ECC, Byte no. 0 & 1 reserved for bad block and 46
80 * bytes are free for use.
81 */
82static struct nand_ecclayout fsmc_ecc4_256_layout = {
83 .eccbytes = 208,
84 .eccpos = { 2, 3, 4, 5, 6, 7, 8,
85 9, 10, 11, 12, 13, 14,
86 18, 19, 20, 21, 22, 23, 24,
87 25, 26, 27, 28, 29, 30,
88 34, 35, 36, 37, 38, 39, 40,
89 41, 42, 43, 44, 45, 46,
90 50, 51, 52, 53, 54, 55, 56,
91 57, 58, 59, 60, 61, 62,
92 66, 67, 68, 69, 70, 71, 72,
93 73, 74, 75, 76, 77, 78,
94 82, 83, 84, 85, 86, 87, 88,
95 89, 90, 91, 92, 93, 94,
96 98, 99, 100, 101, 102, 103, 104,
97 105, 106, 107, 108, 109, 110,
98 114, 115, 116, 117, 118, 119, 120,
99 121, 122, 123, 124, 125, 126,
100 130, 131, 132, 133, 134, 135, 136,
101 137, 138, 139, 140, 141, 142,
102 146, 147, 148, 149, 150, 151, 152,
103 153, 154, 155, 156, 157, 158,
104 162, 163, 164, 165, 166, 167, 168,
105 169, 170, 171, 172, 173, 174,
106 178, 179, 180, 181, 182, 183, 184,
107 185, 186, 187, 188, 189, 190,
108 194, 195, 196, 197, 198, 199, 200,
109 201, 202, 203, 204, 205, 206,
110 210, 211, 212, 213, 214, 215, 216,
111 217, 218, 219, 220, 221, 222,
112 226, 227, 228, 229, 230, 231, 232,
113 233, 234, 235, 236, 237, 238,
114 242, 243, 244, 245, 246, 247, 248,
115 249, 250, 251, 252, 253, 254
116 },
117 .oobfree = {
118 {.offset = 15, .length = 3},
119 {.offset = 31, .length = 3},
120 {.offset = 47, .length = 3},
121 {.offset = 63, .length = 3},
122 {.offset = 79, .length = 3},
123 {.offset = 95, .length = 3},
124 {.offset = 111, .length = 3},
125 {.offset = 127, .length = 3},
126 {.offset = 143, .length = 3},
127 {.offset = 159, .length = 3},
128 {.offset = 175, .length = 3},
129 {.offset = 191, .length = 3},
130 {.offset = 207, .length = 3},
131 {.offset = 223, .length = 3},
132 {.offset = 239, .length = 3},
133 {.offset = 255, .length = 1}
134 }
135};
136
137/*
138 * ECC4 layout for NAND of pagesize 4096 bytes & OOBsize 224 bytes. 13*8 bytes
139 * of OOB size is reserved for ECC, Byte no. 0 & 1 reserved for bad block & 118
140 * bytes are free for use.
141 */
142static struct nand_ecclayout fsmc_ecc4_224_layout = {
143 .eccbytes = 104,
144 .eccpos = { 2, 3, 4, 5, 6, 7, 8,
145 9, 10, 11, 12, 13, 14,
146 18, 19, 20, 21, 22, 23, 24,
147 25, 26, 27, 28, 29, 30,
148 34, 35, 36, 37, 38, 39, 40,
149 41, 42, 43, 44, 45, 46,
150 50, 51, 52, 53, 54, 55, 56,
151 57, 58, 59, 60, 61, 62,
152 66, 67, 68, 69, 70, 71, 72,
153 73, 74, 75, 76, 77, 78,
154 82, 83, 84, 85, 86, 87, 88,
155 89, 90, 91, 92, 93, 94,
156 98, 99, 100, 101, 102, 103, 104,
157 105, 106, 107, 108, 109, 110,
158 114, 115, 116, 117, 118, 119, 120,
159 121, 122, 123, 124, 125, 126
160 },
161 .oobfree = {
162 {.offset = 15, .length = 3},
163 {.offset = 31, .length = 3},
164 {.offset = 47, .length = 3},
165 {.offset = 63, .length = 3},
166 {.offset = 79, .length = 3},
167 {.offset = 95, .length = 3},
168 {.offset = 111, .length = 3},
169 {.offset = 127, .length = 97}
170 }
171};
172
173/*
174 * ECC4 layout for NAND of pagesize 4096 bytes & OOBsize 128 bytes. 13*8 bytes
175 * of OOB size is reserved for ECC, Byte no. 0 & 1 reserved for bad block & 22
176 * bytes are free for use.
177 */
178static struct nand_ecclayout fsmc_ecc4_128_layout = {
54 .eccbytes = 104, 179 .eccbytes = 104,
55 .eccpos = { 2, 3, 4, 5, 6, 7, 8, 180 .eccpos = { 2, 3, 4, 5, 6, 7, 8,
56 9, 10, 11, 12, 13, 14, 181 9, 10, 11, 12, 13, 14,
@@ -82,6 +207,45 @@ static struct nand_ecclayout fsmc_ecc4_lp_layout = {
82}; 207};
83 208
84/* 209/*
210 * ECC4 layout for NAND of pagesize 2048 bytes & OOBsize 64 bytes. 13*4 bytes of
211 * OOB size is reserved for ECC, Byte no. 0 & 1 reserved for bad block and 10
212 * bytes are free for use.
213 */
214static struct nand_ecclayout fsmc_ecc4_64_layout = {
215 .eccbytes = 52,
216 .eccpos = { 2, 3, 4, 5, 6, 7, 8,
217 9, 10, 11, 12, 13, 14,
218 18, 19, 20, 21, 22, 23, 24,
219 25, 26, 27, 28, 29, 30,
220 34, 35, 36, 37, 38, 39, 40,
221 41, 42, 43, 44, 45, 46,
222 50, 51, 52, 53, 54, 55, 56,
223 57, 58, 59, 60, 61, 62,
224 },
225 .oobfree = {
226 {.offset = 15, .length = 3},
227 {.offset = 31, .length = 3},
228 {.offset = 47, .length = 3},
229 {.offset = 63, .length = 1},
230 }
231};
232
233/*
234 * ECC4 layout for NAND of pagesize 512 bytes & OOBsize 16 bytes. 13 bytes of
235 * OOB size is reserved for ECC, Byte no. 4 & 5 reserved for bad block and One
236 * byte is free for use.
237 */
238static struct nand_ecclayout fsmc_ecc4_16_layout = {
239 .eccbytes = 13,
240 .eccpos = { 0, 1, 2, 3, 6, 7, 8,
241 9, 10, 11, 12, 13, 14
242 },
243 .oobfree = {
244 {.offset = 15, .length = 1},
245 }
246};
247
248/*
85 * ECC placement definitions in oobfree type format. 249 * ECC placement definitions in oobfree type format.
86 * There are 13 bytes of ecc for every 512 byte block and it has to be read 250 * There are 13 bytes of ecc for every 512 byte block and it has to be read
87 * consecutively and immediately after the 512 byte data block for hardware to 251 * consecutively and immediately after the 512 byte data block for hardware to
@@ -103,16 +267,6 @@ static struct fsmc_eccplace fsmc_ecc4_lp_place = {
103 } 267 }
104}; 268};
105 269
106static struct nand_ecclayout fsmc_ecc4_sp_layout = {
107 .eccbytes = 13,
108 .eccpos = { 0, 1, 2, 3, 6, 7, 8,
109 9, 10, 11, 12, 13, 14
110 },
111 .oobfree = {
112 {.offset = 15, .length = 1},
113 }
114};
115
116static struct fsmc_eccplace fsmc_ecc4_sp_place = { 270static struct fsmc_eccplace fsmc_ecc4_sp_place = {
117 .eccplace = { 271 .eccplace = {
118 {.offset = 0, .length = 4}, 272 {.offset = 0, .length = 4},
@@ -120,75 +274,24 @@ static struct fsmc_eccplace fsmc_ecc4_sp_place = {
120 } 274 }
121}; 275};
122 276
123/*
124 * Default partition tables to be used if the partition information not
125 * provided through platform data.
126 *
127 * Default partition layout for small page(= 512 bytes) devices
128 * Size for "Root file system" is updated in driver based on actual device size
129 */
130static struct mtd_partition partition_info_16KB_blk[] = {
131 {
132 .name = "X-loader",
133 .offset = 0,
134 .size = 4*0x4000,
135 },
136 {
137 .name = "U-Boot",
138 .offset = 0x10000,
139 .size = 20*0x4000,
140 },
141 {
142 .name = "Kernel",
143 .offset = 0x60000,
144 .size = 256*0x4000,
145 },
146 {
147 .name = "Root File System",
148 .offset = 0x460000,
149 .size = MTDPART_SIZ_FULL,
150 },
151};
152
153/*
154 * Default partition layout for large page(> 512 bytes) devices
155 * Size for "Root file system" is updated in driver based on actual device size
156 */
157static struct mtd_partition partition_info_128KB_blk[] = {
158 {
159 .name = "X-loader",
160 .offset = 0,
161 .size = 4*0x20000,
162 },
163 {
164 .name = "U-Boot",
165 .offset = 0x80000,
166 .size = 12*0x20000,
167 },
168 {
169 .name = "Kernel",
170 .offset = 0x200000,
171 .size = 48*0x20000,
172 },
173 {
174 .name = "Root File System",
175 .offset = 0x800000,
176 .size = MTDPART_SIZ_FULL,
177 },
178};
179
180
181/** 277/**
182 * struct fsmc_nand_data - structure for FSMC NAND device state 278 * struct fsmc_nand_data - structure for FSMC NAND device state
183 * 279 *
184 * @pid: Part ID on the AMBA PrimeCell format 280 * @pid: Part ID on the AMBA PrimeCell format
185 * @mtd: MTD info for a NAND flash. 281 * @mtd: MTD info for a NAND flash.
186 * @nand: Chip related info for a NAND flash. 282 * @nand: Chip related info for a NAND flash.
283 * @partitions: Partition info for a NAND Flash.
284 * @nr_partitions: Total number of partition of a NAND flash.
187 * 285 *
188 * @ecc_place: ECC placing locations in oobfree type format. 286 * @ecc_place: ECC placing locations in oobfree type format.
189 * @bank: Bank number for probed device. 287 * @bank: Bank number for probed device.
190 * @clk: Clock structure for FSMC. 288 * @clk: Clock structure for FSMC.
191 * 289 *
290 * @read_dma_chan: DMA channel for read access
291 * @write_dma_chan: DMA channel for write access to NAND
292 * @dma_access_complete: Completion structure
293 *
294 * @data_pa: NAND Physical port for Data.
192 * @data_va: NAND port for Data. 295 * @data_va: NAND port for Data.
193 * @cmd_va: NAND port for Command. 296 * @cmd_va: NAND port for Command.
194 * @addr_va: NAND port for Address. 297 * @addr_va: NAND port for Address.
@@ -198,16 +301,23 @@ struct fsmc_nand_data {
198 u32 pid; 301 u32 pid;
199 struct mtd_info mtd; 302 struct mtd_info mtd;
200 struct nand_chip nand; 303 struct nand_chip nand;
304 struct mtd_partition *partitions;
305 unsigned int nr_partitions;
201 306
202 struct fsmc_eccplace *ecc_place; 307 struct fsmc_eccplace *ecc_place;
203 unsigned int bank; 308 unsigned int bank;
309 struct device *dev;
310 enum access_mode mode;
204 struct clk *clk; 311 struct clk *clk;
205 312
206 struct resource *resregs; 313 /* DMA related objects */
207 struct resource *rescmd; 314 struct dma_chan *read_dma_chan;
208 struct resource *resaddr; 315 struct dma_chan *write_dma_chan;
209 struct resource *resdata; 316 struct completion dma_access_complete;
317
318 struct fsmc_nand_timings *dev_timings;
210 319
320 dma_addr_t data_pa;
211 void __iomem *data_va; 321 void __iomem *data_va;
212 void __iomem *cmd_va; 322 void __iomem *cmd_va;
213 void __iomem *addr_va; 323 void __iomem *addr_va;
@@ -251,28 +361,29 @@ static void fsmc_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl)
251 struct nand_chip *this = mtd->priv; 361 struct nand_chip *this = mtd->priv;
252 struct fsmc_nand_data *host = container_of(mtd, 362 struct fsmc_nand_data *host = container_of(mtd,
253 struct fsmc_nand_data, mtd); 363 struct fsmc_nand_data, mtd);
254 struct fsmc_regs *regs = host->regs_va; 364 void *__iomem *regs = host->regs_va;
255 unsigned int bank = host->bank; 365 unsigned int bank = host->bank;
256 366
257 if (ctrl & NAND_CTRL_CHANGE) { 367 if (ctrl & NAND_CTRL_CHANGE) {
368 u32 pc;
369
258 if (ctrl & NAND_CLE) { 370 if (ctrl & NAND_CLE) {
259 this->IO_ADDR_R = (void __iomem *)host->cmd_va; 371 this->IO_ADDR_R = host->cmd_va;
260 this->IO_ADDR_W = (void __iomem *)host->cmd_va; 372 this->IO_ADDR_W = host->cmd_va;
261 } else if (ctrl & NAND_ALE) { 373 } else if (ctrl & NAND_ALE) {
262 this->IO_ADDR_R = (void __iomem *)host->addr_va; 374 this->IO_ADDR_R = host->addr_va;
263 this->IO_ADDR_W = (void __iomem *)host->addr_va; 375 this->IO_ADDR_W = host->addr_va;
264 } else { 376 } else {
265 this->IO_ADDR_R = (void __iomem *)host->data_va; 377 this->IO_ADDR_R = host->data_va;
266 this->IO_ADDR_W = (void __iomem *)host->data_va; 378 this->IO_ADDR_W = host->data_va;
267 } 379 }
268 380
269 if (ctrl & NAND_NCE) { 381 pc = readl(FSMC_NAND_REG(regs, bank, PC));
270 writel(readl(&regs->bank_regs[bank].pc) | FSMC_ENABLE, 382 if (ctrl & NAND_NCE)
271 &regs->bank_regs[bank].pc); 383 pc |= FSMC_ENABLE;
272 } else { 384 else
273 writel(readl(&regs->bank_regs[bank].pc) & ~FSMC_ENABLE, 385 pc &= ~FSMC_ENABLE;
274 &regs->bank_regs[bank].pc); 386 writel(pc, FSMC_NAND_REG(regs, bank, PC));
275 }
276 } 387 }
277 388
278 mb(); 389 mb();
@@ -287,22 +398,42 @@ static void fsmc_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl)
287 * This routine initializes timing parameters related to NAND memory access in 398 * This routine initializes timing parameters related to NAND memory access in
288 * FSMC registers 399 * FSMC registers
289 */ 400 */
290static void __init fsmc_nand_setup(struct fsmc_regs *regs, uint32_t bank, 401static void fsmc_nand_setup(void __iomem *regs, uint32_t bank,
291 uint32_t busw) 402 uint32_t busw, struct fsmc_nand_timings *timings)
292{ 403{
293 uint32_t value = FSMC_DEVTYPE_NAND | FSMC_ENABLE | FSMC_WAITON; 404 uint32_t value = FSMC_DEVTYPE_NAND | FSMC_ENABLE | FSMC_WAITON;
405 uint32_t tclr, tar, thiz, thold, twait, tset;
406 struct fsmc_nand_timings *tims;
407 struct fsmc_nand_timings default_timings = {
408 .tclr = FSMC_TCLR_1,
409 .tar = FSMC_TAR_1,
410 .thiz = FSMC_THIZ_1,
411 .thold = FSMC_THOLD_4,
412 .twait = FSMC_TWAIT_6,
413 .tset = FSMC_TSET_0,
414 };
415
416 if (timings)
417 tims = timings;
418 else
419 tims = &default_timings;
420
421 tclr = (tims->tclr & FSMC_TCLR_MASK) << FSMC_TCLR_SHIFT;
422 tar = (tims->tar & FSMC_TAR_MASK) << FSMC_TAR_SHIFT;
423 thiz = (tims->thiz & FSMC_THIZ_MASK) << FSMC_THIZ_SHIFT;
424 thold = (tims->thold & FSMC_THOLD_MASK) << FSMC_THOLD_SHIFT;
425 twait = (tims->twait & FSMC_TWAIT_MASK) << FSMC_TWAIT_SHIFT;
426 tset = (tims->tset & FSMC_TSET_MASK) << FSMC_TSET_SHIFT;
294 427
295 if (busw) 428 if (busw)
296 writel(value | FSMC_DEVWID_16, &regs->bank_regs[bank].pc); 429 writel(value | FSMC_DEVWID_16, FSMC_NAND_REG(regs, bank, PC));
297 else 430 else
298 writel(value | FSMC_DEVWID_8, &regs->bank_regs[bank].pc); 431 writel(value | FSMC_DEVWID_8, FSMC_NAND_REG(regs, bank, PC));
299 432
300 writel(readl(&regs->bank_regs[bank].pc) | FSMC_TCLR_1 | FSMC_TAR_1, 433 writel(readl(FSMC_NAND_REG(regs, bank, PC)) | tclr | tar,
301 &regs->bank_regs[bank].pc); 434 FSMC_NAND_REG(regs, bank, PC));
302 writel(FSMC_THIZ_1 | FSMC_THOLD_4 | FSMC_TWAIT_6 | FSMC_TSET_0, 435 writel(thiz | thold | twait | tset, FSMC_NAND_REG(regs, bank, COMM));
303 &regs->bank_regs[bank].comm); 436 writel(thiz | thold | twait | tset, FSMC_NAND_REG(regs, bank, ATTRIB));
304 writel(FSMC_THIZ_1 | FSMC_THOLD_4 | FSMC_TWAIT_6 | FSMC_TSET_0,
305 &regs->bank_regs[bank].attrib);
306} 437}
307 438
308/* 439/*
@@ -312,15 +443,15 @@ static void fsmc_enable_hwecc(struct mtd_info *mtd, int mode)
312{ 443{
313 struct fsmc_nand_data *host = container_of(mtd, 444 struct fsmc_nand_data *host = container_of(mtd,
314 struct fsmc_nand_data, mtd); 445 struct fsmc_nand_data, mtd);
315 struct fsmc_regs *regs = host->regs_va; 446 void __iomem *regs = host->regs_va;
316 uint32_t bank = host->bank; 447 uint32_t bank = host->bank;
317 448
318 writel(readl(&regs->bank_regs[bank].pc) & ~FSMC_ECCPLEN_256, 449 writel(readl(FSMC_NAND_REG(regs, bank, PC)) & ~FSMC_ECCPLEN_256,
319 &regs->bank_regs[bank].pc); 450 FSMC_NAND_REG(regs, bank, PC));
320 writel(readl(&regs->bank_regs[bank].pc) & ~FSMC_ECCEN, 451 writel(readl(FSMC_NAND_REG(regs, bank, PC)) & ~FSMC_ECCEN,
321 &regs->bank_regs[bank].pc); 452 FSMC_NAND_REG(regs, bank, PC));
322 writel(readl(&regs->bank_regs[bank].pc) | FSMC_ECCEN, 453 writel(readl(FSMC_NAND_REG(regs, bank, PC)) | FSMC_ECCEN,
323 &regs->bank_regs[bank].pc); 454 FSMC_NAND_REG(regs, bank, PC));
324} 455}
325 456
326/* 457/*
@@ -333,37 +464,42 @@ static int fsmc_read_hwecc_ecc4(struct mtd_info *mtd, const uint8_t *data,
333{ 464{
334 struct fsmc_nand_data *host = container_of(mtd, 465 struct fsmc_nand_data *host = container_of(mtd,
335 struct fsmc_nand_data, mtd); 466 struct fsmc_nand_data, mtd);
336 struct fsmc_regs *regs = host->regs_va; 467 void __iomem *regs = host->regs_va;
337 uint32_t bank = host->bank; 468 uint32_t bank = host->bank;
338 uint32_t ecc_tmp; 469 uint32_t ecc_tmp;
339 unsigned long deadline = jiffies + FSMC_BUSY_WAIT_TIMEOUT; 470 unsigned long deadline = jiffies + FSMC_BUSY_WAIT_TIMEOUT;
340 471
341 do { 472 do {
342 if (readl(&regs->bank_regs[bank].sts) & FSMC_CODE_RDY) 473 if (readl(FSMC_NAND_REG(regs, bank, STS)) & FSMC_CODE_RDY)
343 break; 474 break;
344 else 475 else
345 cond_resched(); 476 cond_resched();
346 } while (!time_after_eq(jiffies, deadline)); 477 } while (!time_after_eq(jiffies, deadline));
347 478
348 ecc_tmp = readl(&regs->bank_regs[bank].ecc1); 479 if (time_after_eq(jiffies, deadline)) {
480 dev_err(host->dev, "calculate ecc timed out\n");
481 return -ETIMEDOUT;
482 }
483
484 ecc_tmp = readl(FSMC_NAND_REG(regs, bank, ECC1));
349 ecc[0] = (uint8_t) (ecc_tmp >> 0); 485 ecc[0] = (uint8_t) (ecc_tmp >> 0);
350 ecc[1] = (uint8_t) (ecc_tmp >> 8); 486 ecc[1] = (uint8_t) (ecc_tmp >> 8);
351 ecc[2] = (uint8_t) (ecc_tmp >> 16); 487 ecc[2] = (uint8_t) (ecc_tmp >> 16);
352 ecc[3] = (uint8_t) (ecc_tmp >> 24); 488 ecc[3] = (uint8_t) (ecc_tmp >> 24);
353 489
354 ecc_tmp = readl(&regs->bank_regs[bank].ecc2); 490 ecc_tmp = readl(FSMC_NAND_REG(regs, bank, ECC2));
355 ecc[4] = (uint8_t) (ecc_tmp >> 0); 491 ecc[4] = (uint8_t) (ecc_tmp >> 0);
356 ecc[5] = (uint8_t) (ecc_tmp >> 8); 492 ecc[5] = (uint8_t) (ecc_tmp >> 8);
357 ecc[6] = (uint8_t) (ecc_tmp >> 16); 493 ecc[6] = (uint8_t) (ecc_tmp >> 16);
358 ecc[7] = (uint8_t) (ecc_tmp >> 24); 494 ecc[7] = (uint8_t) (ecc_tmp >> 24);
359 495
360 ecc_tmp = readl(&regs->bank_regs[bank].ecc3); 496 ecc_tmp = readl(FSMC_NAND_REG(regs, bank, ECC3));
361 ecc[8] = (uint8_t) (ecc_tmp >> 0); 497 ecc[8] = (uint8_t) (ecc_tmp >> 0);
362 ecc[9] = (uint8_t) (ecc_tmp >> 8); 498 ecc[9] = (uint8_t) (ecc_tmp >> 8);
363 ecc[10] = (uint8_t) (ecc_tmp >> 16); 499 ecc[10] = (uint8_t) (ecc_tmp >> 16);
364 ecc[11] = (uint8_t) (ecc_tmp >> 24); 500 ecc[11] = (uint8_t) (ecc_tmp >> 24);
365 501
366 ecc_tmp = readl(&regs->bank_regs[bank].sts); 502 ecc_tmp = readl(FSMC_NAND_REG(regs, bank, STS));
367 ecc[12] = (uint8_t) (ecc_tmp >> 16); 503 ecc[12] = (uint8_t) (ecc_tmp >> 16);
368 504
369 return 0; 505 return 0;
@@ -379,11 +515,11 @@ static int fsmc_read_hwecc_ecc1(struct mtd_info *mtd, const uint8_t *data,
379{ 515{
380 struct fsmc_nand_data *host = container_of(mtd, 516 struct fsmc_nand_data *host = container_of(mtd,
381 struct fsmc_nand_data, mtd); 517 struct fsmc_nand_data, mtd);
382 struct fsmc_regs *regs = host->regs_va; 518 void __iomem *regs = host->regs_va;
383 uint32_t bank = host->bank; 519 uint32_t bank = host->bank;
384 uint32_t ecc_tmp; 520 uint32_t ecc_tmp;
385 521
386 ecc_tmp = readl(&regs->bank_regs[bank].ecc1); 522 ecc_tmp = readl(FSMC_NAND_REG(regs, bank, ECC1));
387 ecc[0] = (uint8_t) (ecc_tmp >> 0); 523 ecc[0] = (uint8_t) (ecc_tmp >> 0);
388 ecc[1] = (uint8_t) (ecc_tmp >> 8); 524 ecc[1] = (uint8_t) (ecc_tmp >> 8);
389 ecc[2] = (uint8_t) (ecc_tmp >> 16); 525 ecc[2] = (uint8_t) (ecc_tmp >> 16);
@@ -391,6 +527,166 @@ static int fsmc_read_hwecc_ecc1(struct mtd_info *mtd, const uint8_t *data,
391 return 0; 527 return 0;
392} 528}
393 529
530/* Count the number of 0's in buff upto a max of max_bits */
531static int count_written_bits(uint8_t *buff, int size, int max_bits)
532{
533 int k, written_bits = 0;
534
535 for (k = 0; k < size; k++) {
536 written_bits += hweight8(~buff[k]);
537 if (written_bits > max_bits)
538 break;
539 }
540
541 return written_bits;
542}
543
544static void dma_complete(void *param)
545{
546 struct fsmc_nand_data *host = param;
547
548 complete(&host->dma_access_complete);
549}
550
551static int dma_xfer(struct fsmc_nand_data *host, void *buffer, int len,
552 enum dma_data_direction direction)
553{
554 struct dma_chan *chan;
555 struct dma_device *dma_dev;
556 struct dma_async_tx_descriptor *tx;
557 dma_addr_t dma_dst, dma_src, dma_addr;
558 dma_cookie_t cookie;
559 unsigned long flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
560 int ret;
561
562 if (direction == DMA_TO_DEVICE)
563 chan = host->write_dma_chan;
564 else if (direction == DMA_FROM_DEVICE)
565 chan = host->read_dma_chan;
566 else
567 return -EINVAL;
568
569 dma_dev = chan->device;
570 dma_addr = dma_map_single(dma_dev->dev, buffer, len, direction);
571
572 if (direction == DMA_TO_DEVICE) {
573 dma_src = dma_addr;
574 dma_dst = host->data_pa;
575 flags |= DMA_COMPL_SRC_UNMAP_SINGLE | DMA_COMPL_SKIP_DEST_UNMAP;
576 } else {
577 dma_src = host->data_pa;
578 dma_dst = dma_addr;
579 flags |= DMA_COMPL_DEST_UNMAP_SINGLE | DMA_COMPL_SKIP_SRC_UNMAP;
580 }
581
582 tx = dma_dev->device_prep_dma_memcpy(chan, dma_dst, dma_src,
583 len, flags);
584
585 if (!tx) {
586 dev_err(host->dev, "device_prep_dma_memcpy error\n");
587 dma_unmap_single(dma_dev->dev, dma_addr, len, direction);
588 return -EIO;
589 }
590
591 tx->callback = dma_complete;
592 tx->callback_param = host;
593 cookie = tx->tx_submit(tx);
594
595 ret = dma_submit_error(cookie);
596 if (ret) {
597 dev_err(host->dev, "dma_submit_error %d\n", cookie);
598 return ret;
599 }
600
601 dma_async_issue_pending(chan);
602
603 ret =
604 wait_for_completion_interruptible_timeout(&host->dma_access_complete,
605 msecs_to_jiffies(3000));
606 if (ret <= 0) {
607 chan->device->device_control(chan, DMA_TERMINATE_ALL, 0);
608 dev_err(host->dev, "wait_for_completion_timeout\n");
609 return ret ? ret : -ETIMEDOUT;
610 }
611
612 return 0;
613}
614
615/*
616 * fsmc_write_buf - write buffer to chip
617 * @mtd: MTD device structure
618 * @buf: data buffer
619 * @len: number of bytes to write
620 */
621static void fsmc_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
622{
623 int i;
624 struct nand_chip *chip = mtd->priv;
625
626 if (IS_ALIGNED((uint32_t)buf, sizeof(uint32_t)) &&
627 IS_ALIGNED(len, sizeof(uint32_t))) {
628 uint32_t *p = (uint32_t *)buf;
629 len = len >> 2;
630 for (i = 0; i < len; i++)
631 writel(p[i], chip->IO_ADDR_W);
632 } else {
633 for (i = 0; i < len; i++)
634 writeb(buf[i], chip->IO_ADDR_W);
635 }
636}
637
638/*
639 * fsmc_read_buf - read chip data into buffer
640 * @mtd: MTD device structure
641 * @buf: buffer to store date
642 * @len: number of bytes to read
643 */
644static void fsmc_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
645{
646 int i;
647 struct nand_chip *chip = mtd->priv;
648
649 if (IS_ALIGNED((uint32_t)buf, sizeof(uint32_t)) &&
650 IS_ALIGNED(len, sizeof(uint32_t))) {
651 uint32_t *p = (uint32_t *)buf;
652 len = len >> 2;
653 for (i = 0; i < len; i++)
654 p[i] = readl(chip->IO_ADDR_R);
655 } else {
656 for (i = 0; i < len; i++)
657 buf[i] = readb(chip->IO_ADDR_R);
658 }
659}
660
661/*
662 * fsmc_read_buf_dma - read chip data into buffer
663 * @mtd: MTD device structure
664 * @buf: buffer to store date
665 * @len: number of bytes to read
666 */
667static void fsmc_read_buf_dma(struct mtd_info *mtd, uint8_t *buf, int len)
668{
669 struct fsmc_nand_data *host;
670
671 host = container_of(mtd, struct fsmc_nand_data, mtd);
672 dma_xfer(host, buf, len, DMA_FROM_DEVICE);
673}
674
675/*
676 * fsmc_write_buf_dma - write buffer to chip
677 * @mtd: MTD device structure
678 * @buf: data buffer
679 * @len: number of bytes to write
680 */
681static void fsmc_write_buf_dma(struct mtd_info *mtd, const uint8_t *buf,
682 int len)
683{
684 struct fsmc_nand_data *host;
685
686 host = container_of(mtd, struct fsmc_nand_data, mtd);
687 dma_xfer(host, (void *)buf, len, DMA_TO_DEVICE);
688}
689
394/* 690/*
395 * fsmc_read_page_hwecc 691 * fsmc_read_page_hwecc
396 * @mtd: mtd info structure 692 * @mtd: mtd info structure
@@ -426,7 +722,6 @@ static int fsmc_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
426 uint8_t *oob = (uint8_t *)&ecc_oob[0]; 722 uint8_t *oob = (uint8_t *)&ecc_oob[0];
427 723
428 for (i = 0, s = 0; s < eccsteps; s++, i += eccbytes, p += eccsize) { 724 for (i = 0, s = 0; s < eccsteps; s++, i += eccbytes, p += eccsize) {
429
430 chip->cmdfunc(mtd, NAND_CMD_READ0, s * eccsize, page); 725 chip->cmdfunc(mtd, NAND_CMD_READ0, s * eccsize, page);
431 chip->ecc.hwctl(mtd, NAND_ECC_READ); 726 chip->ecc.hwctl(mtd, NAND_ECC_READ);
432 chip->read_buf(mtd, p, eccsize); 727 chip->read_buf(mtd, p, eccsize);
@@ -437,17 +732,19 @@ static int fsmc_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
437 group++; 732 group++;
438 733
439 /* 734 /*
440 * length is intentionally kept a higher multiple of 2 735 * length is intentionally kept a higher multiple of 2
441 * to read at least 13 bytes even in case of 16 bit NAND 736 * to read at least 13 bytes even in case of 16 bit NAND
442 * devices 737 * devices
443 */ 738 */
444 len = roundup(len, 2); 739 if (chip->options & NAND_BUSWIDTH_16)
740 len = roundup(len, 2);
741
445 chip->cmdfunc(mtd, NAND_CMD_READOOB, off, page); 742 chip->cmdfunc(mtd, NAND_CMD_READOOB, off, page);
446 chip->read_buf(mtd, oob + j, len); 743 chip->read_buf(mtd, oob + j, len);
447 j += len; 744 j += len;
448 } 745 }
449 746
450 memcpy(&ecc_code[i], oob, 13); 747 memcpy(&ecc_code[i], oob, chip->ecc.bytes);
451 chip->ecc.calculate(mtd, p, &ecc_calc[i]); 748 chip->ecc.calculate(mtd, p, &ecc_calc[i]);
452 749
453 stat = chip->ecc.correct(mtd, p, &ecc_code[i], &ecc_calc[i]); 750 stat = chip->ecc.correct(mtd, p, &ecc_code[i], &ecc_calc[i]);
@@ -461,7 +758,7 @@ static int fsmc_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
461} 758}
462 759
463/* 760/*
464 * fsmc_correct_data 761 * fsmc_bch8_correct_data
465 * @mtd: mtd info structure 762 * @mtd: mtd info structure
466 * @dat: buffer of read data 763 * @dat: buffer of read data
467 * @read_ecc: ecc read from device spare area 764 * @read_ecc: ecc read from device spare area
@@ -470,19 +767,51 @@ static int fsmc_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
470 * calc_ecc is a 104 bit information containing maximum of 8 error 767 * calc_ecc is a 104 bit information containing maximum of 8 error
471 * offset informations of 13 bits each in 512 bytes of read data. 768 * offset informations of 13 bits each in 512 bytes of read data.
472 */ 769 */
473static int fsmc_correct_data(struct mtd_info *mtd, uint8_t *dat, 770static int fsmc_bch8_correct_data(struct mtd_info *mtd, uint8_t *dat,
474 uint8_t *read_ecc, uint8_t *calc_ecc) 771 uint8_t *read_ecc, uint8_t *calc_ecc)
475{ 772{
476 struct fsmc_nand_data *host = container_of(mtd, 773 struct fsmc_nand_data *host = container_of(mtd,
477 struct fsmc_nand_data, mtd); 774 struct fsmc_nand_data, mtd);
478 struct fsmc_regs *regs = host->regs_va; 775 struct nand_chip *chip = mtd->priv;
776 void __iomem *regs = host->regs_va;
479 unsigned int bank = host->bank; 777 unsigned int bank = host->bank;
480 uint16_t err_idx[8]; 778 uint32_t err_idx[8];
481 uint64_t ecc_data[2];
482 uint32_t num_err, i; 779 uint32_t num_err, i;
780 uint32_t ecc1, ecc2, ecc3, ecc4;
781
782 num_err = (readl(FSMC_NAND_REG(regs, bank, STS)) >> 10) & 0xF;
783
784 /* no bit flipping */
785 if (likely(num_err == 0))
786 return 0;
787
788 /* too many errors */
789 if (unlikely(num_err > 8)) {
790 /*
791 * This is a temporary erase check. A newly erased page read
792 * would result in an ecc error because the oob data is also
793 * erased to FF and the calculated ecc for an FF data is not
794 * FF..FF.
795 * This is a workaround to skip performing correction in case
796 * data is FF..FF
797 *
798 * Logic:
799 * For every page, each bit written as 0 is counted until these
800 * number of bits are greater than 8 (the maximum correction
801 * capability of FSMC for each 512 + 13 bytes)
802 */
803
804 int bits_ecc = count_written_bits(read_ecc, chip->ecc.bytes, 8);
805 int bits_data = count_written_bits(dat, chip->ecc.size, 8);
806
807 if ((bits_ecc + bits_data) <= 8) {
808 if (bits_data)
809 memset(dat, 0xff, chip->ecc.size);
810 return bits_data;
811 }
483 812
484 /* The calculated ecc is actually the correction index in data */ 813 return -EBADMSG;
485 memcpy(ecc_data, calc_ecc, 13); 814 }
486 815
487 /* 816 /*
488 * ------------------- calc_ecc[] bit wise -----------|--13 bits--| 817 * ------------------- calc_ecc[] bit wise -----------|--13 bits--|
@@ -493,27 +822,26 @@ static int fsmc_correct_data(struct mtd_info *mtd, uint8_t *dat,
493 * uint64_t array and error offset indexes are populated in err_idx 822 * uint64_t array and error offset indexes are populated in err_idx
494 * array 823 * array
495 */ 824 */
496 for (i = 0; i < 8; i++) { 825 ecc1 = readl(FSMC_NAND_REG(regs, bank, ECC1));
497 if (i == 4) { 826 ecc2 = readl(FSMC_NAND_REG(regs, bank, ECC2));
498 err_idx[4] = ((ecc_data[1] & 0x1) << 12) | ecc_data[0]; 827 ecc3 = readl(FSMC_NAND_REG(regs, bank, ECC3));
499 ecc_data[1] >>= 1; 828 ecc4 = readl(FSMC_NAND_REG(regs, bank, STS));
500 continue; 829
501 } 830 err_idx[0] = (ecc1 >> 0) & 0x1FFF;
502 err_idx[i] = (ecc_data[i/4] & 0x1FFF); 831 err_idx[1] = (ecc1 >> 13) & 0x1FFF;
503 ecc_data[i/4] >>= 13; 832 err_idx[2] = (((ecc2 >> 0) & 0x7F) << 6) | ((ecc1 >> 26) & 0x3F);
504 } 833 err_idx[3] = (ecc2 >> 7) & 0x1FFF;
505 834 err_idx[4] = (((ecc3 >> 0) & 0x1) << 12) | ((ecc2 >> 20) & 0xFFF);
506 num_err = (readl(&regs->bank_regs[bank].sts) >> 10) & 0xF; 835 err_idx[5] = (ecc3 >> 1) & 0x1FFF;
507 836 err_idx[6] = (ecc3 >> 14) & 0x1FFF;
508 if (num_err == 0xF) 837 err_idx[7] = (((ecc4 >> 16) & 0xFF) << 5) | ((ecc3 >> 27) & 0x1F);
509 return -EBADMSG;
510 838
511 i = 0; 839 i = 0;
512 while (num_err--) { 840 while (num_err--) {
513 change_bit(0, (unsigned long *)&err_idx[i]); 841 change_bit(0, (unsigned long *)&err_idx[i]);
514 change_bit(1, (unsigned long *)&err_idx[i]); 842 change_bit(1, (unsigned long *)&err_idx[i]);
515 843
516 if (err_idx[i] <= 512 * 8) { 844 if (err_idx[i] < chip->ecc.size * 8) {
517 change_bit(err_idx[i], (unsigned long *)dat); 845 change_bit(err_idx[i], (unsigned long *)dat);
518 i++; 846 i++;
519 } 847 }
@@ -521,6 +849,44 @@ static int fsmc_correct_data(struct mtd_info *mtd, uint8_t *dat,
521 return i; 849 return i;
522} 850}
523 851
852static bool filter(struct dma_chan *chan, void *slave)
853{
854 chan->private = slave;
855 return true;
856}
857
858#ifdef CONFIG_OF
859static int __devinit fsmc_nand_probe_config_dt(struct platform_device *pdev,
860 struct device_node *np)
861{
862 struct fsmc_nand_platform_data *pdata = dev_get_platdata(&pdev->dev);
863 u32 val;
864
865 /* Set default NAND width to 8 bits */
866 pdata->width = 8;
867 if (!of_property_read_u32(np, "bank-width", &val)) {
868 if (val == 2) {
869 pdata->width = 16;
870 } else if (val != 1) {
871 dev_err(&pdev->dev, "invalid bank-width %u\n", val);
872 return -EINVAL;
873 }
874 }
875 of_property_read_u32(np, "st,ale-off", &pdata->ale_off);
876 of_property_read_u32(np, "st,cle-off", &pdata->cle_off);
877 if (of_get_property(np, "nand-skip-bbtscan", NULL))
878 pdata->options = NAND_SKIP_BBTSCAN;
879
880 return 0;
881}
882#else
883static int __devinit fsmc_nand_probe_config_dt(struct platform_device *pdev,
884 struct device_node *np)
885{
886 return -ENOSYS;
887}
888#endif
889
524/* 890/*
525 * fsmc_nand_probe - Probe function 891 * fsmc_nand_probe - Probe function
526 * @pdev: platform device structure 892 * @pdev: platform device structure
@@ -528,102 +894,109 @@ static int fsmc_correct_data(struct mtd_info *mtd, uint8_t *dat,
528static int __init fsmc_nand_probe(struct platform_device *pdev) 894static int __init fsmc_nand_probe(struct platform_device *pdev)
529{ 895{
530 struct fsmc_nand_platform_data *pdata = dev_get_platdata(&pdev->dev); 896 struct fsmc_nand_platform_data *pdata = dev_get_platdata(&pdev->dev);
897 struct device_node __maybe_unused *np = pdev->dev.of_node;
898 struct mtd_part_parser_data ppdata = {};
531 struct fsmc_nand_data *host; 899 struct fsmc_nand_data *host;
532 struct mtd_info *mtd; 900 struct mtd_info *mtd;
533 struct nand_chip *nand; 901 struct nand_chip *nand;
534 struct fsmc_regs *regs;
535 struct resource *res; 902 struct resource *res;
903 dma_cap_mask_t mask;
536 int ret = 0; 904 int ret = 0;
537 u32 pid; 905 u32 pid;
538 int i; 906 int i;
539 907
908 if (np) {
909 pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
910 pdev->dev.platform_data = pdata;
911 ret = fsmc_nand_probe_config_dt(pdev, np);
912 if (ret) {
913 dev_err(&pdev->dev, "no platform data\n");
914 return -ENODEV;
915 }
916 }
917
540 if (!pdata) { 918 if (!pdata) {
541 dev_err(&pdev->dev, "platform data is NULL\n"); 919 dev_err(&pdev->dev, "platform data is NULL\n");
542 return -EINVAL; 920 return -EINVAL;
543 } 921 }
544 922
545 /* Allocate memory for the device structure (and zero it) */ 923 /* Allocate memory for the device structure (and zero it) */
546 host = kzalloc(sizeof(*host), GFP_KERNEL); 924 host = devm_kzalloc(&pdev->dev, sizeof(*host), GFP_KERNEL);
547 if (!host) { 925 if (!host) {
548 dev_err(&pdev->dev, "failed to allocate device structure\n"); 926 dev_err(&pdev->dev, "failed to allocate device structure\n");
549 return -ENOMEM; 927 return -ENOMEM;
550 } 928 }
551 929
552 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nand_data"); 930 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nand_data");
553 if (!res) { 931 if (!res)
554 ret = -EIO; 932 return -EINVAL;
555 goto err_probe1;
556 }
557 933
558 host->resdata = request_mem_region(res->start, resource_size(res), 934 if (!devm_request_mem_region(&pdev->dev, res->start, resource_size(res),
559 pdev->name); 935 pdev->name)) {
560 if (!host->resdata) { 936 dev_err(&pdev->dev, "Failed to get memory data resourse\n");
561 ret = -EIO; 937 return -ENOENT;
562 goto err_probe1;
563 } 938 }
564 939
565 host->data_va = ioremap(res->start, resource_size(res)); 940 host->data_pa = (dma_addr_t)res->start;
941 host->data_va = devm_ioremap(&pdev->dev, res->start,
942 resource_size(res));
566 if (!host->data_va) { 943 if (!host->data_va) {
567 ret = -EIO; 944 dev_err(&pdev->dev, "data ioremap failed\n");
568 goto err_probe1; 945 return -ENOMEM;
569 } 946 }
570 947
571 host->resaddr = request_mem_region(res->start + PLAT_NAND_ALE, 948 if (!devm_request_mem_region(&pdev->dev, res->start + pdata->ale_off,
572 resource_size(res), pdev->name); 949 resource_size(res), pdev->name)) {
573 if (!host->resaddr) { 950 dev_err(&pdev->dev, "Failed to get memory ale resourse\n");
574 ret = -EIO; 951 return -ENOENT;
575 goto err_probe1;
576 } 952 }
577 953
578 host->addr_va = ioremap(res->start + PLAT_NAND_ALE, resource_size(res)); 954 host->addr_va = devm_ioremap(&pdev->dev, res->start + pdata->ale_off,
955 resource_size(res));
579 if (!host->addr_va) { 956 if (!host->addr_va) {
580 ret = -EIO; 957 dev_err(&pdev->dev, "ale ioremap failed\n");
581 goto err_probe1; 958 return -ENOMEM;
582 } 959 }
583 960
584 host->rescmd = request_mem_region(res->start + PLAT_NAND_CLE, 961 if (!devm_request_mem_region(&pdev->dev, res->start + pdata->cle_off,
585 resource_size(res), pdev->name); 962 resource_size(res), pdev->name)) {
586 if (!host->rescmd) { 963 dev_err(&pdev->dev, "Failed to get memory cle resourse\n");
587 ret = -EIO; 964 return -ENOENT;
588 goto err_probe1;
589 } 965 }
590 966
591 host->cmd_va = ioremap(res->start + PLAT_NAND_CLE, resource_size(res)); 967 host->cmd_va = devm_ioremap(&pdev->dev, res->start + pdata->cle_off,
968 resource_size(res));
592 if (!host->cmd_va) { 969 if (!host->cmd_va) {
593 ret = -EIO; 970 dev_err(&pdev->dev, "ale ioremap failed\n");
594 goto err_probe1; 971 return -ENOMEM;
595 } 972 }
596 973
597 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "fsmc_regs"); 974 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "fsmc_regs");
598 if (!res) { 975 if (!res)
599 ret = -EIO; 976 return -EINVAL;
600 goto err_probe1;
601 }
602 977
603 host->resregs = request_mem_region(res->start, resource_size(res), 978 if (!devm_request_mem_region(&pdev->dev, res->start, resource_size(res),
604 pdev->name); 979 pdev->name)) {
605 if (!host->resregs) { 980 dev_err(&pdev->dev, "Failed to get memory regs resourse\n");
606 ret = -EIO; 981 return -ENOENT;
607 goto err_probe1;
608 } 982 }
609 983
610 host->regs_va = ioremap(res->start, resource_size(res)); 984 host->regs_va = devm_ioremap(&pdev->dev, res->start,
985 resource_size(res));
611 if (!host->regs_va) { 986 if (!host->regs_va) {
612 ret = -EIO; 987 dev_err(&pdev->dev, "regs ioremap failed\n");
613 goto err_probe1; 988 return -ENOMEM;
614 } 989 }
615 990
616 host->clk = clk_get(&pdev->dev, NULL); 991 host->clk = clk_get(&pdev->dev, NULL);
617 if (IS_ERR(host->clk)) { 992 if (IS_ERR(host->clk)) {
618 dev_err(&pdev->dev, "failed to fetch block clock\n"); 993 dev_err(&pdev->dev, "failed to fetch block clock\n");
619 ret = PTR_ERR(host->clk); 994 return PTR_ERR(host->clk);
620 host->clk = NULL;
621 goto err_probe1;
622 } 995 }
623 996
624 ret = clk_enable(host->clk); 997 ret = clk_enable(host->clk);
625 if (ret) 998 if (ret)
626 goto err_probe1; 999 goto err_clk_enable;
627 1000
628 /* 1001 /*
629 * This device ID is actually a common AMBA ID as used on the 1002 * This device ID is actually a common AMBA ID as used on the
@@ -639,7 +1012,14 @@ static int __init fsmc_nand_probe(struct platform_device *pdev)
639 1012
640 host->bank = pdata->bank; 1013 host->bank = pdata->bank;
641 host->select_chip = pdata->select_bank; 1014 host->select_chip = pdata->select_bank;
642 regs = host->regs_va; 1015 host->partitions = pdata->partitions;
1016 host->nr_partitions = pdata->nr_partitions;
1017 host->dev = &pdev->dev;
1018 host->dev_timings = pdata->nand_timings;
1019 host->mode = pdata->mode;
1020
1021 if (host->mode == USE_DMA_ACCESS)
1022 init_completion(&host->dma_access_complete);
643 1023
644 /* Link all private pointers */ 1024 /* Link all private pointers */
645 mtd = &host->mtd; 1025 mtd = &host->mtd;
@@ -658,21 +1038,53 @@ static int __init fsmc_nand_probe(struct platform_device *pdev)
658 nand->ecc.size = 512; 1038 nand->ecc.size = 512;
659 nand->options = pdata->options; 1039 nand->options = pdata->options;
660 nand->select_chip = fsmc_select_chip; 1040 nand->select_chip = fsmc_select_chip;
1041 nand->badblockbits = 7;
661 1042
662 if (pdata->width == FSMC_NAND_BW16) 1043 if (pdata->width == FSMC_NAND_BW16)
663 nand->options |= NAND_BUSWIDTH_16; 1044 nand->options |= NAND_BUSWIDTH_16;
664 1045
665 fsmc_nand_setup(regs, host->bank, nand->options & NAND_BUSWIDTH_16); 1046 switch (host->mode) {
1047 case USE_DMA_ACCESS:
1048 dma_cap_zero(mask);
1049 dma_cap_set(DMA_MEMCPY, mask);
1050 host->read_dma_chan = dma_request_channel(mask, filter,
1051 pdata->read_dma_priv);
1052 if (!host->read_dma_chan) {
1053 dev_err(&pdev->dev, "Unable to get read dma channel\n");
1054 goto err_req_read_chnl;
1055 }
1056 host->write_dma_chan = dma_request_channel(mask, filter,
1057 pdata->write_dma_priv);
1058 if (!host->write_dma_chan) {
1059 dev_err(&pdev->dev, "Unable to get write dma channel\n");
1060 goto err_req_write_chnl;
1061 }
1062 nand->read_buf = fsmc_read_buf_dma;
1063 nand->write_buf = fsmc_write_buf_dma;
1064 break;
1065
1066 default:
1067 case USE_WORD_ACCESS:
1068 nand->read_buf = fsmc_read_buf;
1069 nand->write_buf = fsmc_write_buf;
1070 break;
1071 }
1072
1073 fsmc_nand_setup(host->regs_va, host->bank,
1074 nand->options & NAND_BUSWIDTH_16,
1075 host->dev_timings);
666 1076
667 if (AMBA_REV_BITS(host->pid) >= 8) { 1077 if (AMBA_REV_BITS(host->pid) >= 8) {
668 nand->ecc.read_page = fsmc_read_page_hwecc; 1078 nand->ecc.read_page = fsmc_read_page_hwecc;
669 nand->ecc.calculate = fsmc_read_hwecc_ecc4; 1079 nand->ecc.calculate = fsmc_read_hwecc_ecc4;
670 nand->ecc.correct = fsmc_correct_data; 1080 nand->ecc.correct = fsmc_bch8_correct_data;
671 nand->ecc.bytes = 13; 1081 nand->ecc.bytes = 13;
1082 nand->ecc.strength = 8;
672 } else { 1083 } else {
673 nand->ecc.calculate = fsmc_read_hwecc_ecc1; 1084 nand->ecc.calculate = fsmc_read_hwecc_ecc1;
674 nand->ecc.correct = nand_correct_data; 1085 nand->ecc.correct = nand_correct_data;
675 nand->ecc.bytes = 3; 1086 nand->ecc.bytes = 3;
1087 nand->ecc.strength = 1;
676 } 1088 }
677 1089
678 /* 1090 /*
@@ -681,19 +1093,52 @@ static int __init fsmc_nand_probe(struct platform_device *pdev)
681 if (nand_scan_ident(&host->mtd, 1, NULL)) { 1093 if (nand_scan_ident(&host->mtd, 1, NULL)) {
682 ret = -ENXIO; 1094 ret = -ENXIO;
683 dev_err(&pdev->dev, "No NAND Device found!\n"); 1095 dev_err(&pdev->dev, "No NAND Device found!\n");
684 goto err_probe; 1096 goto err_scan_ident;
685 } 1097 }
686 1098
687 if (AMBA_REV_BITS(host->pid) >= 8) { 1099 if (AMBA_REV_BITS(host->pid) >= 8) {
688 if (host->mtd.writesize == 512) { 1100 switch (host->mtd.oobsize) {
689 nand->ecc.layout = &fsmc_ecc4_sp_layout; 1101 case 16:
1102 nand->ecc.layout = &fsmc_ecc4_16_layout;
690 host->ecc_place = &fsmc_ecc4_sp_place; 1103 host->ecc_place = &fsmc_ecc4_sp_place;
691 } else { 1104 break;
692 nand->ecc.layout = &fsmc_ecc4_lp_layout; 1105 case 64:
1106 nand->ecc.layout = &fsmc_ecc4_64_layout;
1107 host->ecc_place = &fsmc_ecc4_lp_place;
1108 break;
1109 case 128:
1110 nand->ecc.layout = &fsmc_ecc4_128_layout;
1111 host->ecc_place = &fsmc_ecc4_lp_place;
1112 break;
1113 case 224:
1114 nand->ecc.layout = &fsmc_ecc4_224_layout;
693 host->ecc_place = &fsmc_ecc4_lp_place; 1115 host->ecc_place = &fsmc_ecc4_lp_place;
1116 break;
1117 case 256:
1118 nand->ecc.layout = &fsmc_ecc4_256_layout;
1119 host->ecc_place = &fsmc_ecc4_lp_place;
1120 break;
1121 default:
1122 printk(KERN_WARNING "No oob scheme defined for "
1123 "oobsize %d\n", mtd->oobsize);
1124 BUG();
694 } 1125 }
695 } else { 1126 } else {
696 nand->ecc.layout = &fsmc_ecc1_layout; 1127 switch (host->mtd.oobsize) {
1128 case 16:
1129 nand->ecc.layout = &fsmc_ecc1_16_layout;
1130 break;
1131 case 64:
1132 nand->ecc.layout = &fsmc_ecc1_64_layout;
1133 break;
1134 case 128:
1135 nand->ecc.layout = &fsmc_ecc1_128_layout;
1136 break;
1137 default:
1138 printk(KERN_WARNING "No oob scheme defined for "
1139 "oobsize %d\n", mtd->oobsize);
1140 BUG();
1141 }
697 } 1142 }
698 1143
699 /* Second stage of scan to fill MTD data-structures */ 1144 /* Second stage of scan to fill MTD data-structures */
@@ -713,13 +1158,9 @@ static int __init fsmc_nand_probe(struct platform_device *pdev)
713 * Check for partition info passed 1158 * Check for partition info passed
714 */ 1159 */
715 host->mtd.name = "nand"; 1160 host->mtd.name = "nand";
716 ret = mtd_device_parse_register(&host->mtd, NULL, 0, 1161 ppdata.of_node = np;
717 host->mtd.size <= 0x04000000 ? 1162 ret = mtd_device_parse_register(&host->mtd, NULL, &ppdata,
718 partition_info_16KB_blk : 1163 host->partitions, host->nr_partitions);
719 partition_info_128KB_blk,
720 host->mtd.size <= 0x04000000 ?
721 ARRAY_SIZE(partition_info_16KB_blk) :
722 ARRAY_SIZE(partition_info_128KB_blk));
723 if (ret) 1164 if (ret)
724 goto err_probe; 1165 goto err_probe;
725 1166
@@ -728,32 +1169,16 @@ static int __init fsmc_nand_probe(struct platform_device *pdev)
728 return 0; 1169 return 0;
729 1170
730err_probe: 1171err_probe:
1172err_scan_ident:
1173 if (host->mode == USE_DMA_ACCESS)
1174 dma_release_channel(host->write_dma_chan);
1175err_req_write_chnl:
1176 if (host->mode == USE_DMA_ACCESS)
1177 dma_release_channel(host->read_dma_chan);
1178err_req_read_chnl:
731 clk_disable(host->clk); 1179 clk_disable(host->clk);
732err_probe1: 1180err_clk_enable:
733 if (host->clk) 1181 clk_put(host->clk);
734 clk_put(host->clk);
735 if (host->regs_va)
736 iounmap(host->regs_va);
737 if (host->resregs)
738 release_mem_region(host->resregs->start,
739 resource_size(host->resregs));
740 if (host->cmd_va)
741 iounmap(host->cmd_va);
742 if (host->rescmd)
743 release_mem_region(host->rescmd->start,
744 resource_size(host->rescmd));
745 if (host->addr_va)
746 iounmap(host->addr_va);
747 if (host->resaddr)
748 release_mem_region(host->resaddr->start,
749 resource_size(host->resaddr));
750 if (host->data_va)
751 iounmap(host->data_va);
752 if (host->resdata)
753 release_mem_region(host->resdata->start,
754 resource_size(host->resdata));
755
756 kfree(host);
757 return ret; 1182 return ret;
758} 1183}
759 1184
@@ -768,24 +1193,15 @@ static int fsmc_nand_remove(struct platform_device *pdev)
768 1193
769 if (host) { 1194 if (host) {
770 nand_release(&host->mtd); 1195 nand_release(&host->mtd);
1196
1197 if (host->mode == USE_DMA_ACCESS) {
1198 dma_release_channel(host->write_dma_chan);
1199 dma_release_channel(host->read_dma_chan);
1200 }
771 clk_disable(host->clk); 1201 clk_disable(host->clk);
772 clk_put(host->clk); 1202 clk_put(host->clk);
773
774 iounmap(host->regs_va);
775 release_mem_region(host->resregs->start,
776 resource_size(host->resregs));
777 iounmap(host->cmd_va);
778 release_mem_region(host->rescmd->start,
779 resource_size(host->rescmd));
780 iounmap(host->addr_va);
781 release_mem_region(host->resaddr->start,
782 resource_size(host->resaddr));
783 iounmap(host->data_va);
784 release_mem_region(host->resdata->start,
785 resource_size(host->resdata));
786
787 kfree(host);
788 } 1203 }
1204
789 return 0; 1205 return 0;
790} 1206}
791 1207
@@ -801,15 +1217,24 @@ static int fsmc_nand_suspend(struct device *dev)
801static int fsmc_nand_resume(struct device *dev) 1217static int fsmc_nand_resume(struct device *dev)
802{ 1218{
803 struct fsmc_nand_data *host = dev_get_drvdata(dev); 1219 struct fsmc_nand_data *host = dev_get_drvdata(dev);
804 if (host) 1220 if (host) {
805 clk_enable(host->clk); 1221 clk_enable(host->clk);
1222 fsmc_nand_setup(host->regs_va, host->bank,
1223 host->nand.options & NAND_BUSWIDTH_16,
1224 host->dev_timings);
1225 }
806 return 0; 1226 return 0;
807} 1227}
808 1228
809static const struct dev_pm_ops fsmc_nand_pm_ops = { 1229static SIMPLE_DEV_PM_OPS(fsmc_nand_pm_ops, fsmc_nand_suspend, fsmc_nand_resume);
810 .suspend = fsmc_nand_suspend, 1230#endif
811 .resume = fsmc_nand_resume, 1231
1232#ifdef CONFIG_OF
1233static const struct of_device_id fsmc_nand_id_table[] = {
1234 { .compatible = "st,spear600-fsmc-nand" },
1235 {}
812}; 1236};
1237MODULE_DEVICE_TABLE(of, fsmc_nand_id_table);
813#endif 1238#endif
814 1239
815static struct platform_driver fsmc_nand_driver = { 1240static struct platform_driver fsmc_nand_driver = {
@@ -817,6 +1242,7 @@ static struct platform_driver fsmc_nand_driver = {
817 .driver = { 1242 .driver = {
818 .owner = THIS_MODULE, 1243 .owner = THIS_MODULE,
819 .name = "fsmc-nand", 1244 .name = "fsmc-nand",
1245 .of_match_table = of_match_ptr(fsmc_nand_id_table),
820#ifdef CONFIG_PM 1246#ifdef CONFIG_PM
821 .pm = &fsmc_nand_pm_ops, 1247 .pm = &fsmc_nand_pm_ops,
822#endif 1248#endif
diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-lib.c b/drivers/mtd/nand/gpmi-nand/gpmi-lib.c
index 7db6555ed3ba..e8ea7107932e 100644
--- a/drivers/mtd/nand/gpmi-nand/gpmi-lib.c
+++ b/drivers/mtd/nand/gpmi-nand/gpmi-lib.c
@@ -835,7 +835,7 @@ int gpmi_send_command(struct gpmi_nand_data *this)
835 | BM_GPMI_CTRL0_ADDRESS_INCREMENT 835 | BM_GPMI_CTRL0_ADDRESS_INCREMENT
836 | BF_GPMI_CTRL0_XFER_COUNT(this->command_length); 836 | BF_GPMI_CTRL0_XFER_COUNT(this->command_length);
837 pio[1] = pio[2] = 0; 837 pio[1] = pio[2] = 0;
838 desc = channel->device->device_prep_slave_sg(channel, 838 desc = dmaengine_prep_slave_sg(channel,
839 (struct scatterlist *)pio, 839 (struct scatterlist *)pio,
840 ARRAY_SIZE(pio), DMA_TRANS_NONE, 0); 840 ARRAY_SIZE(pio), DMA_TRANS_NONE, 0);
841 if (!desc) { 841 if (!desc) {
@@ -848,8 +848,10 @@ int gpmi_send_command(struct gpmi_nand_data *this)
848 848
849 sg_init_one(sgl, this->cmd_buffer, this->command_length); 849 sg_init_one(sgl, this->cmd_buffer, this->command_length);
850 dma_map_sg(this->dev, sgl, 1, DMA_TO_DEVICE); 850 dma_map_sg(this->dev, sgl, 1, DMA_TO_DEVICE);
851 desc = channel->device->device_prep_slave_sg(channel, 851 desc = dmaengine_prep_slave_sg(channel,
852 sgl, 1, DMA_MEM_TO_DEV, 1); 852 sgl, 1, DMA_MEM_TO_DEV,
853 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
854
853 if (!desc) { 855 if (!desc) {
854 pr_err("step 2 error\n"); 856 pr_err("step 2 error\n");
855 return -1; 857 return -1;
@@ -880,8 +882,7 @@ int gpmi_send_data(struct gpmi_nand_data *this)
880 | BF_GPMI_CTRL0_ADDRESS(address) 882 | BF_GPMI_CTRL0_ADDRESS(address)
881 | BF_GPMI_CTRL0_XFER_COUNT(this->upper_len); 883 | BF_GPMI_CTRL0_XFER_COUNT(this->upper_len);
882 pio[1] = 0; 884 pio[1] = 0;
883 desc = channel->device->device_prep_slave_sg(channel, 885 desc = dmaengine_prep_slave_sg(channel, (struct scatterlist *)pio,
884 (struct scatterlist *)pio,
885 ARRAY_SIZE(pio), DMA_TRANS_NONE, 0); 886 ARRAY_SIZE(pio), DMA_TRANS_NONE, 0);
886 if (!desc) { 887 if (!desc) {
887 pr_err("step 1 error\n"); 888 pr_err("step 1 error\n");
@@ -890,8 +891,9 @@ int gpmi_send_data(struct gpmi_nand_data *this)
890 891
891 /* [2] send DMA request */ 892 /* [2] send DMA request */
892 prepare_data_dma(this, DMA_TO_DEVICE); 893 prepare_data_dma(this, DMA_TO_DEVICE);
893 desc = channel->device->device_prep_slave_sg(channel, &this->data_sgl, 894 desc = dmaengine_prep_slave_sg(channel, &this->data_sgl,
894 1, DMA_MEM_TO_DEV, 1); 895 1, DMA_MEM_TO_DEV,
896 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
895 if (!desc) { 897 if (!desc) {
896 pr_err("step 2 error\n"); 898 pr_err("step 2 error\n");
897 return -1; 899 return -1;
@@ -916,7 +918,7 @@ int gpmi_read_data(struct gpmi_nand_data *this)
916 | BF_GPMI_CTRL0_ADDRESS(BV_GPMI_CTRL0_ADDRESS__NAND_DATA) 918 | BF_GPMI_CTRL0_ADDRESS(BV_GPMI_CTRL0_ADDRESS__NAND_DATA)
917 | BF_GPMI_CTRL0_XFER_COUNT(this->upper_len); 919 | BF_GPMI_CTRL0_XFER_COUNT(this->upper_len);
918 pio[1] = 0; 920 pio[1] = 0;
919 desc = channel->device->device_prep_slave_sg(channel, 921 desc = dmaengine_prep_slave_sg(channel,
920 (struct scatterlist *)pio, 922 (struct scatterlist *)pio,
921 ARRAY_SIZE(pio), DMA_TRANS_NONE, 0); 923 ARRAY_SIZE(pio), DMA_TRANS_NONE, 0);
922 if (!desc) { 924 if (!desc) {
@@ -926,8 +928,9 @@ int gpmi_read_data(struct gpmi_nand_data *this)
926 928
927 /* [2] : send DMA request */ 929 /* [2] : send DMA request */
928 prepare_data_dma(this, DMA_FROM_DEVICE); 930 prepare_data_dma(this, DMA_FROM_DEVICE);
929 desc = channel->device->device_prep_slave_sg(channel, &this->data_sgl, 931 desc = dmaengine_prep_slave_sg(channel, &this->data_sgl,
930 1, DMA_DEV_TO_MEM, 1); 932 1, DMA_DEV_TO_MEM,
933 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
931 if (!desc) { 934 if (!desc) {
932 pr_err("step 2 error\n"); 935 pr_err("step 2 error\n");
933 return -1; 936 return -1;
@@ -972,9 +975,10 @@ int gpmi_send_page(struct gpmi_nand_data *this,
972 pio[4] = payload; 975 pio[4] = payload;
973 pio[5] = auxiliary; 976 pio[5] = auxiliary;
974 977
975 desc = channel->device->device_prep_slave_sg(channel, 978 desc = dmaengine_prep_slave_sg(channel,
976 (struct scatterlist *)pio, 979 (struct scatterlist *)pio,
977 ARRAY_SIZE(pio), DMA_TRANS_NONE, 0); 980 ARRAY_SIZE(pio), DMA_TRANS_NONE,
981 DMA_CTRL_ACK);
978 if (!desc) { 982 if (!desc) {
979 pr_err("step 2 error\n"); 983 pr_err("step 2 error\n");
980 return -1; 984 return -1;
@@ -1007,7 +1011,7 @@ int gpmi_read_page(struct gpmi_nand_data *this,
1007 | BF_GPMI_CTRL0_ADDRESS(address) 1011 | BF_GPMI_CTRL0_ADDRESS(address)
1008 | BF_GPMI_CTRL0_XFER_COUNT(0); 1012 | BF_GPMI_CTRL0_XFER_COUNT(0);
1009 pio[1] = 0; 1013 pio[1] = 0;
1010 desc = channel->device->device_prep_slave_sg(channel, 1014 desc = dmaengine_prep_slave_sg(channel,
1011 (struct scatterlist *)pio, 2, 1015 (struct scatterlist *)pio, 2,
1012 DMA_TRANS_NONE, 0); 1016 DMA_TRANS_NONE, 0);
1013 if (!desc) { 1017 if (!desc) {
@@ -1036,9 +1040,10 @@ int gpmi_read_page(struct gpmi_nand_data *this,
1036 pio[3] = geo->page_size; 1040 pio[3] = geo->page_size;
1037 pio[4] = payload; 1041 pio[4] = payload;
1038 pio[5] = auxiliary; 1042 pio[5] = auxiliary;
1039 desc = channel->device->device_prep_slave_sg(channel, 1043 desc = dmaengine_prep_slave_sg(channel,
1040 (struct scatterlist *)pio, 1044 (struct scatterlist *)pio,
1041 ARRAY_SIZE(pio), DMA_TRANS_NONE, 1); 1045 ARRAY_SIZE(pio), DMA_TRANS_NONE,
1046 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
1042 if (!desc) { 1047 if (!desc) {
1043 pr_err("step 2 error\n"); 1048 pr_err("step 2 error\n");
1044 return -1; 1049 return -1;
@@ -1055,9 +1060,11 @@ int gpmi_read_page(struct gpmi_nand_data *this,
1055 | BF_GPMI_CTRL0_ADDRESS(address) 1060 | BF_GPMI_CTRL0_ADDRESS(address)
1056 | BF_GPMI_CTRL0_XFER_COUNT(geo->page_size); 1061 | BF_GPMI_CTRL0_XFER_COUNT(geo->page_size);
1057 pio[1] = 0; 1062 pio[1] = 0;
1058 desc = channel->device->device_prep_slave_sg(channel, 1063 pio[2] = 0; /* clear GPMI_HW_GPMI_ECCCTRL, disable the BCH. */
1059 (struct scatterlist *)pio, 2, 1064 desc = dmaengine_prep_slave_sg(channel,
1060 DMA_TRANS_NONE, 1); 1065 (struct scatterlist *)pio, 3,
1066 DMA_TRANS_NONE,
1067 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
1061 if (!desc) { 1068 if (!desc) {
1062 pr_err("step 3 error\n"); 1069 pr_err("step 3 error\n");
1063 return -1; 1070 return -1;
diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
index 493ec2fcf97f..75b1dde16358 100644
--- a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
+++ b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
@@ -1124,7 +1124,7 @@ static int gpmi_block_markbad(struct mtd_info *mtd, loff_t ofs)
1124 chip->bbt[block >> 2] |= 0x01 << ((block & 0x03) << 1); 1124 chip->bbt[block >> 2] |= 0x01 << ((block & 0x03) << 1);
1125 1125
1126 /* Do we have a flash based bad block table ? */ 1126 /* Do we have a flash based bad block table ? */
1127 if (chip->options & NAND_BBT_USE_FLASH) 1127 if (chip->bbt_options & NAND_BBT_USE_FLASH)
1128 ret = nand_update_bbt(mtd, ofs); 1128 ret = nand_update_bbt(mtd, ofs);
1129 else { 1129 else {
1130 chipnr = (int)(ofs >> chip->chip_shift); 1130 chipnr = (int)(ofs >> chip->chip_shift);
@@ -1155,7 +1155,7 @@ static int gpmi_block_markbad(struct mtd_info *mtd, loff_t ofs)
1155 return ret; 1155 return ret;
1156} 1156}
1157 1157
1158static int __devinit nand_boot_set_geometry(struct gpmi_nand_data *this) 1158static int nand_boot_set_geometry(struct gpmi_nand_data *this)
1159{ 1159{
1160 struct boot_rom_geometry *geometry = &this->rom_geometry; 1160 struct boot_rom_geometry *geometry = &this->rom_geometry;
1161 1161
@@ -1182,7 +1182,7 @@ static int __devinit nand_boot_set_geometry(struct gpmi_nand_data *this)
1182} 1182}
1183 1183
1184static const char *fingerprint = "STMP"; 1184static const char *fingerprint = "STMP";
1185static int __devinit mx23_check_transcription_stamp(struct gpmi_nand_data *this) 1185static int mx23_check_transcription_stamp(struct gpmi_nand_data *this)
1186{ 1186{
1187 struct boot_rom_geometry *rom_geo = &this->rom_geometry; 1187 struct boot_rom_geometry *rom_geo = &this->rom_geometry;
1188 struct device *dev = this->dev; 1188 struct device *dev = this->dev;
@@ -1239,7 +1239,7 @@ static int __devinit mx23_check_transcription_stamp(struct gpmi_nand_data *this)
1239} 1239}
1240 1240
1241/* Writes a transcription stamp. */ 1241/* Writes a transcription stamp. */
1242static int __devinit mx23_write_transcription_stamp(struct gpmi_nand_data *this) 1242static int mx23_write_transcription_stamp(struct gpmi_nand_data *this)
1243{ 1243{
1244 struct device *dev = this->dev; 1244 struct device *dev = this->dev;
1245 struct boot_rom_geometry *rom_geo = &this->rom_geometry; 1245 struct boot_rom_geometry *rom_geo = &this->rom_geometry;
@@ -1322,7 +1322,7 @@ static int __devinit mx23_write_transcription_stamp(struct gpmi_nand_data *this)
1322 return 0; 1322 return 0;
1323} 1323}
1324 1324
1325static int __devinit mx23_boot_init(struct gpmi_nand_data *this) 1325static int mx23_boot_init(struct gpmi_nand_data *this)
1326{ 1326{
1327 struct device *dev = this->dev; 1327 struct device *dev = this->dev;
1328 struct nand_chip *chip = &this->nand; 1328 struct nand_chip *chip = &this->nand;
@@ -1391,7 +1391,7 @@ static int __devinit mx23_boot_init(struct gpmi_nand_data *this)
1391 return 0; 1391 return 0;
1392} 1392}
1393 1393
1394static int __devinit nand_boot_init(struct gpmi_nand_data *this) 1394static int nand_boot_init(struct gpmi_nand_data *this)
1395{ 1395{
1396 nand_boot_set_geometry(this); 1396 nand_boot_set_geometry(this);
1397 1397
@@ -1401,7 +1401,7 @@ static int __devinit nand_boot_init(struct gpmi_nand_data *this)
1401 return 0; 1401 return 0;
1402} 1402}
1403 1403
1404static int __devinit gpmi_set_geometry(struct gpmi_nand_data *this) 1404static int gpmi_set_geometry(struct gpmi_nand_data *this)
1405{ 1405{
1406 int ret; 1406 int ret;
1407 1407
diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-nand.h b/drivers/mtd/nand/gpmi-nand/gpmi-nand.h
index e023bccb7781..ec6180d4ff8f 100644
--- a/drivers/mtd/nand/gpmi-nand/gpmi-nand.h
+++ b/drivers/mtd/nand/gpmi-nand/gpmi-nand.h
@@ -20,7 +20,7 @@
20#include <linux/mtd/nand.h> 20#include <linux/mtd/nand.h>
21#include <linux/platform_device.h> 21#include <linux/platform_device.h>
22#include <linux/dma-mapping.h> 22#include <linux/dma-mapping.h>
23#include <mach/dma.h> 23#include <linux/fsl/mxs-dma.h>
24 24
25struct resources { 25struct resources {
26 void *gpmi_regs; 26 void *gpmi_regs;
diff --git a/drivers/mtd/nand/h1910.c b/drivers/mtd/nand/h1910.c
index 5dc6f0d92f1a..11e487813428 100644
--- a/drivers/mtd/nand/h1910.c
+++ b/drivers/mtd/nand/h1910.c
@@ -135,8 +135,8 @@ static int __init h1910_init(void)
135 } 135 }
136 136
137 /* Register the partitions */ 137 /* Register the partitions */
138 mtd_device_parse_register(h1910_nand_mtd, NULL, 0, 138 mtd_device_parse_register(h1910_nand_mtd, NULL, NULL, partition_info,
139 partition_info, NUM_PARTITIONS); 139 NUM_PARTITIONS);
140 140
141 /* Return happy */ 141 /* Return happy */
142 return 0; 142 return 0;
diff --git a/drivers/mtd/nand/jz4740_nand.c b/drivers/mtd/nand/jz4740_nand.c
index ac3b9f255e00..e4147e8acb7c 100644
--- a/drivers/mtd/nand/jz4740_nand.c
+++ b/drivers/mtd/nand/jz4740_nand.c
@@ -332,6 +332,11 @@ static int __devinit jz_nand_probe(struct platform_device *pdev)
332 chip->ecc.mode = NAND_ECC_HW_OOB_FIRST; 332 chip->ecc.mode = NAND_ECC_HW_OOB_FIRST;
333 chip->ecc.size = 512; 333 chip->ecc.size = 512;
334 chip->ecc.bytes = 9; 334 chip->ecc.bytes = 9;
335 chip->ecc.strength = 2;
336 /*
337 * FIXME: ecc_strength value of 2 bits per 512 bytes of data is a
338 * conservative guess, given 9 ecc bytes and reed-solomon alg.
339 */
335 340
336 if (pdata) 341 if (pdata)
337 chip->ecc.layout = pdata->ecc_layout; 342 chip->ecc.layout = pdata->ecc_layout;
@@ -367,9 +372,9 @@ static int __devinit jz_nand_probe(struct platform_device *pdev)
367 goto err_gpio_free; 372 goto err_gpio_free;
368 } 373 }
369 374
370 ret = mtd_device_parse_register(mtd, NULL, 0, 375 ret = mtd_device_parse_register(mtd, NULL, NULL,
371 pdata ? pdata->partitions : NULL, 376 pdata ? pdata->partitions : NULL,
372 pdata ? pdata->num_partitions : 0); 377 pdata ? pdata->num_partitions : 0);
373 378
374 if (ret) { 379 if (ret) {
375 dev_err(&pdev->dev, "Failed to add mtd device\n"); 380 dev_err(&pdev->dev, "Failed to add mtd device\n");
diff --git a/drivers/mtd/nand/mxc_nand.c b/drivers/mtd/nand/mxc_nand.c
index 74a43b818d0e..cc0678a967c1 100644
--- a/drivers/mtd/nand/mxc_nand.c
+++ b/drivers/mtd/nand/mxc_nand.c
@@ -1225,9 +1225,16 @@ static int __init mxcnd_probe(struct platform_device *pdev)
1225 goto escan; 1225 goto escan;
1226 } 1226 }
1227 1227
1228 if (this->ecc.mode == NAND_ECC_HW) {
1229 if (nfc_is_v1())
1230 this->ecc.strength = 1;
1231 else
1232 this->ecc.strength = (host->eccsize == 4) ? 4 : 8;
1233 }
1234
1228 /* Register the partitions */ 1235 /* Register the partitions */
1229 mtd_device_parse_register(mtd, part_probes, 0, 1236 mtd_device_parse_register(mtd, part_probes, NULL, pdata->parts,
1230 pdata->parts, pdata->nr_parts); 1237 pdata->nr_parts);
1231 1238
1232 platform_set_drvdata(pdev, host); 1239 platform_set_drvdata(pdev, host);
1233 1240
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
index 8a393f9e6027..47b19c0bb070 100644
--- a/drivers/mtd/nand/nand_base.c
+++ b/drivers/mtd/nand/nand_base.c
@@ -123,12 +123,6 @@ static int check_offs_len(struct mtd_info *mtd,
123 ret = -EINVAL; 123 ret = -EINVAL;
124 } 124 }
125 125
126 /* Do not allow past end of device */
127 if (ofs + len > mtd->size) {
128 pr_debug("%s: past end of device\n", __func__);
129 ret = -EINVAL;
130 }
131
132 return ret; 126 return ret;
133} 127}
134 128
@@ -338,7 +332,7 @@ static int nand_verify_buf16(struct mtd_info *mtd, const uint8_t *buf, int len)
338 */ 332 */
339static int nand_block_bad(struct mtd_info *mtd, loff_t ofs, int getchip) 333static int nand_block_bad(struct mtd_info *mtd, loff_t ofs, int getchip)
340{ 334{
341 int page, chipnr, res = 0; 335 int page, chipnr, res = 0, i = 0;
342 struct nand_chip *chip = mtd->priv; 336 struct nand_chip *chip = mtd->priv;
343 u16 bad; 337 u16 bad;
344 338
@@ -356,23 +350,29 @@ static int nand_block_bad(struct mtd_info *mtd, loff_t ofs, int getchip)
356 chip->select_chip(mtd, chipnr); 350 chip->select_chip(mtd, chipnr);
357 } 351 }
358 352
359 if (chip->options & NAND_BUSWIDTH_16) { 353 do {
360 chip->cmdfunc(mtd, NAND_CMD_READOOB, chip->badblockpos & 0xFE, 354 if (chip->options & NAND_BUSWIDTH_16) {
361 page); 355 chip->cmdfunc(mtd, NAND_CMD_READOOB,
362 bad = cpu_to_le16(chip->read_word(mtd)); 356 chip->badblockpos & 0xFE, page);
363 if (chip->badblockpos & 0x1) 357 bad = cpu_to_le16(chip->read_word(mtd));
364 bad >>= 8; 358 if (chip->badblockpos & 0x1)
365 else 359 bad >>= 8;
366 bad &= 0xFF; 360 else
367 } else { 361 bad &= 0xFF;
368 chip->cmdfunc(mtd, NAND_CMD_READOOB, chip->badblockpos, page); 362 } else {
369 bad = chip->read_byte(mtd); 363 chip->cmdfunc(mtd, NAND_CMD_READOOB, chip->badblockpos,
370 } 364 page);
365 bad = chip->read_byte(mtd);
366 }
371 367
372 if (likely(chip->badblockbits == 8)) 368 if (likely(chip->badblockbits == 8))
373 res = bad != 0xFF; 369 res = bad != 0xFF;
374 else 370 else
375 res = hweight8(bad) < chip->badblockbits; 371 res = hweight8(bad) < chip->badblockbits;
372 ofs += mtd->writesize;
373 page = (int)(ofs >> chip->page_shift) & chip->pagemask;
374 i++;
375 } while (!res && i < 2 && (chip->bbt_options & NAND_BBT_SCAN2NDPAGE));
376 376
377 if (getchip) 377 if (getchip)
378 nand_release_device(mtd); 378 nand_release_device(mtd);
@@ -386,51 +386,79 @@ static int nand_block_bad(struct mtd_info *mtd, loff_t ofs, int getchip)
386 * @ofs: offset from device start 386 * @ofs: offset from device start
387 * 387 *
388 * This is the default implementation, which can be overridden by a hardware 388 * This is the default implementation, which can be overridden by a hardware
389 * specific driver. 389 * specific driver. We try operations in the following order, according to our
390 * bbt_options (NAND_BBT_NO_OOB_BBM and NAND_BBT_USE_FLASH):
391 * (1) erase the affected block, to allow OOB marker to be written cleanly
392 * (2) update in-memory BBT
393 * (3) write bad block marker to OOB area of affected block
394 * (4) update flash-based BBT
395 * Note that we retain the first error encountered in (3) or (4), finish the
396 * procedures, and dump the error in the end.
390*/ 397*/
391static int nand_default_block_markbad(struct mtd_info *mtd, loff_t ofs) 398static int nand_default_block_markbad(struct mtd_info *mtd, loff_t ofs)
392{ 399{
393 struct nand_chip *chip = mtd->priv; 400 struct nand_chip *chip = mtd->priv;
394 uint8_t buf[2] = { 0, 0 }; 401 uint8_t buf[2] = { 0, 0 };
395 int block, ret, i = 0; 402 int block, res, ret = 0, i = 0;
403 int write_oob = !(chip->bbt_options & NAND_BBT_NO_OOB_BBM);
396 404
397 if (chip->bbt_options & NAND_BBT_SCANLASTPAGE) 405 if (write_oob) {
398 ofs += mtd->erasesize - mtd->writesize; 406 struct erase_info einfo;
407
408 /* Attempt erase before marking OOB */
409 memset(&einfo, 0, sizeof(einfo));
410 einfo.mtd = mtd;
411 einfo.addr = ofs;
412 einfo.len = 1 << chip->phys_erase_shift;
413 nand_erase_nand(mtd, &einfo, 0);
414 }
399 415
400 /* Get block number */ 416 /* Get block number */
401 block = (int)(ofs >> chip->bbt_erase_shift); 417 block = (int)(ofs >> chip->bbt_erase_shift);
418 /* Mark block bad in memory-based BBT */
402 if (chip->bbt) 419 if (chip->bbt)
403 chip->bbt[block >> 2] |= 0x01 << ((block & 0x03) << 1); 420 chip->bbt[block >> 2] |= 0x01 << ((block & 0x03) << 1);
404 421
405 /* Do we have a flash based bad block table? */ 422 /* Write bad block marker to OOB */
406 if (chip->bbt_options & NAND_BBT_USE_FLASH) 423 if (write_oob) {
407 ret = nand_update_bbt(mtd, ofs);
408 else {
409 struct mtd_oob_ops ops; 424 struct mtd_oob_ops ops;
425 loff_t wr_ofs = ofs;
410 426
411 nand_get_device(chip, mtd, FL_WRITING); 427 nand_get_device(chip, mtd, FL_WRITING);
412 428
413 /*
414 * Write to first two pages if necessary. If we write to more
415 * than one location, the first error encountered quits the
416 * procedure. We write two bytes per location, so we dont have
417 * to mess with 16 bit access.
418 */
419 ops.len = ops.ooblen = 2;
420 ops.datbuf = NULL; 429 ops.datbuf = NULL;
421 ops.oobbuf = buf; 430 ops.oobbuf = buf;
422 ops.ooboffs = chip->badblockpos & ~0x01; 431 ops.ooboffs = chip->badblockpos;
432 if (chip->options & NAND_BUSWIDTH_16) {
433 ops.ooboffs &= ~0x01;
434 ops.len = ops.ooblen = 2;
435 } else {
436 ops.len = ops.ooblen = 1;
437 }
423 ops.mode = MTD_OPS_PLACE_OOB; 438 ops.mode = MTD_OPS_PLACE_OOB;
439
440 /* Write to first/last page(s) if necessary */
441 if (chip->bbt_options & NAND_BBT_SCANLASTPAGE)
442 wr_ofs += mtd->erasesize - mtd->writesize;
424 do { 443 do {
425 ret = nand_do_write_oob(mtd, ofs, &ops); 444 res = nand_do_write_oob(mtd, wr_ofs, &ops);
445 if (!ret)
446 ret = res;
426 447
427 i++; 448 i++;
428 ofs += mtd->writesize; 449 wr_ofs += mtd->writesize;
429 } while (!ret && (chip->bbt_options & NAND_BBT_SCAN2NDPAGE) && 450 } while ((chip->bbt_options & NAND_BBT_SCAN2NDPAGE) && i < 2);
430 i < 2);
431 451
432 nand_release_device(mtd); 452 nand_release_device(mtd);
433 } 453 }
454
455 /* Update flash-based bad block table */
456 if (chip->bbt_options & NAND_BBT_USE_FLASH) {
457 res = nand_update_bbt(mtd, ofs);
458 if (!ret)
459 ret = res;
460 }
461
434 if (!ret) 462 if (!ret)
435 mtd->ecc_stats.badblocks++; 463 mtd->ecc_stats.badblocks++;
436 464
@@ -1586,25 +1614,14 @@ static int nand_read(struct mtd_info *mtd, loff_t from, size_t len,
1586 struct mtd_oob_ops ops; 1614 struct mtd_oob_ops ops;
1587 int ret; 1615 int ret;
1588 1616
1589 /* Do not allow reads past end of device */
1590 if ((from + len) > mtd->size)
1591 return -EINVAL;
1592 if (!len)
1593 return 0;
1594
1595 nand_get_device(chip, mtd, FL_READING); 1617 nand_get_device(chip, mtd, FL_READING);
1596
1597 ops.len = len; 1618 ops.len = len;
1598 ops.datbuf = buf; 1619 ops.datbuf = buf;
1599 ops.oobbuf = NULL; 1620 ops.oobbuf = NULL;
1600 ops.mode = 0; 1621 ops.mode = 0;
1601
1602 ret = nand_do_read_ops(mtd, from, &ops); 1622 ret = nand_do_read_ops(mtd, from, &ops);
1603
1604 *retlen = ops.retlen; 1623 *retlen = ops.retlen;
1605
1606 nand_release_device(mtd); 1624 nand_release_device(mtd);
1607
1608 return ret; 1625 return ret;
1609} 1626}
1610 1627
@@ -2293,12 +2310,6 @@ static int panic_nand_write(struct mtd_info *mtd, loff_t to, size_t len,
2293 struct mtd_oob_ops ops; 2310 struct mtd_oob_ops ops;
2294 int ret; 2311 int ret;
2295 2312
2296 /* Do not allow reads past end of device */
2297 if ((to + len) > mtd->size)
2298 return -EINVAL;
2299 if (!len)
2300 return 0;
2301
2302 /* Wait for the device to get ready */ 2313 /* Wait for the device to get ready */
2303 panic_nand_wait(mtd, chip, 400); 2314 panic_nand_wait(mtd, chip, 400);
2304 2315
@@ -2333,25 +2344,14 @@ static int nand_write(struct mtd_info *mtd, loff_t to, size_t len,
2333 struct mtd_oob_ops ops; 2344 struct mtd_oob_ops ops;
2334 int ret; 2345 int ret;
2335 2346
2336 /* Do not allow reads past end of device */
2337 if ((to + len) > mtd->size)
2338 return -EINVAL;
2339 if (!len)
2340 return 0;
2341
2342 nand_get_device(chip, mtd, FL_WRITING); 2347 nand_get_device(chip, mtd, FL_WRITING);
2343
2344 ops.len = len; 2348 ops.len = len;
2345 ops.datbuf = (uint8_t *)buf; 2349 ops.datbuf = (uint8_t *)buf;
2346 ops.oobbuf = NULL; 2350 ops.oobbuf = NULL;
2347 ops.mode = 0; 2351 ops.mode = 0;
2348
2349 ret = nand_do_write_ops(mtd, to, &ops); 2352 ret = nand_do_write_ops(mtd, to, &ops);
2350
2351 *retlen = ops.retlen; 2353 *retlen = ops.retlen;
2352
2353 nand_release_device(mtd); 2354 nand_release_device(mtd);
2354
2355 return ret; 2355 return ret;
2356} 2356}
2357 2357
@@ -2550,8 +2550,6 @@ int nand_erase_nand(struct mtd_info *mtd, struct erase_info *instr,
2550 if (check_offs_len(mtd, instr->addr, instr->len)) 2550 if (check_offs_len(mtd, instr->addr, instr->len))
2551 return -EINVAL; 2551 return -EINVAL;
2552 2552
2553 instr->fail_addr = MTD_FAIL_ADDR_UNKNOWN;
2554
2555 /* Grab the lock and see if the device is available */ 2553 /* Grab the lock and see if the device is available */
2556 nand_get_device(chip, mtd, FL_ERASING); 2554 nand_get_device(chip, mtd, FL_ERASING);
2557 2555
@@ -2715,10 +2713,6 @@ static void nand_sync(struct mtd_info *mtd)
2715 */ 2713 */
2716static int nand_block_isbad(struct mtd_info *mtd, loff_t offs) 2714static int nand_block_isbad(struct mtd_info *mtd, loff_t offs)
2717{ 2715{
2718 /* Check for invalid offset */
2719 if (offs > mtd->size)
2720 return -EINVAL;
2721
2722 return nand_block_checkbad(mtd, offs, 1, 0); 2716 return nand_block_checkbad(mtd, offs, 1, 0);
2723} 2717}
2724 2718
@@ -2857,7 +2851,6 @@ static int nand_flash_detect_onfi(struct mtd_info *mtd, struct nand_chip *chip,
2857 chip->read_byte(mtd) != 'F' || chip->read_byte(mtd) != 'I') 2851 chip->read_byte(mtd) != 'F' || chip->read_byte(mtd) != 'I')
2858 return 0; 2852 return 0;
2859 2853
2860 pr_info("ONFI flash detected\n");
2861 chip->cmdfunc(mtd, NAND_CMD_PARAM, 0, -1); 2854 chip->cmdfunc(mtd, NAND_CMD_PARAM, 0, -1);
2862 for (i = 0; i < 3; i++) { 2855 for (i = 0; i < 3; i++) {
2863 chip->read_buf(mtd, (uint8_t *)p, sizeof(*p)); 2856 chip->read_buf(mtd, (uint8_t *)p, sizeof(*p));
@@ -2898,7 +2891,8 @@ static int nand_flash_detect_onfi(struct mtd_info *mtd, struct nand_chip *chip,
2898 mtd->writesize = le32_to_cpu(p->byte_per_page); 2891 mtd->writesize = le32_to_cpu(p->byte_per_page);
2899 mtd->erasesize = le32_to_cpu(p->pages_per_block) * mtd->writesize; 2892 mtd->erasesize = le32_to_cpu(p->pages_per_block) * mtd->writesize;
2900 mtd->oobsize = le16_to_cpu(p->spare_bytes_per_page); 2893 mtd->oobsize = le16_to_cpu(p->spare_bytes_per_page);
2901 chip->chipsize = (uint64_t)le32_to_cpu(p->blocks_per_lun) * mtd->erasesize; 2894 chip->chipsize = le32_to_cpu(p->blocks_per_lun);
2895 chip->chipsize *= (uint64_t)mtd->erasesize * p->lun_count;
2902 *busw = 0; 2896 *busw = 0;
2903 if (le16_to_cpu(p->features) & 1) 2897 if (le16_to_cpu(p->features) & 1)
2904 *busw = NAND_BUSWIDTH_16; 2898 *busw = NAND_BUSWIDTH_16;
@@ -2907,6 +2901,7 @@ static int nand_flash_detect_onfi(struct mtd_info *mtd, struct nand_chip *chip,
2907 chip->options |= (NAND_NO_READRDY | 2901 chip->options |= (NAND_NO_READRDY |
2908 NAND_NO_AUTOINCR) & NAND_CHIPOPTIONS_MSK; 2902 NAND_NO_AUTOINCR) & NAND_CHIPOPTIONS_MSK;
2909 2903
2904 pr_info("ONFI flash detected\n");
2910 return 1; 2905 return 1;
2911} 2906}
2912 2907
@@ -3238,6 +3233,10 @@ int nand_scan_tail(struct mtd_info *mtd)
3238 int i; 3233 int i;
3239 struct nand_chip *chip = mtd->priv; 3234 struct nand_chip *chip = mtd->priv;
3240 3235
3236 /* New bad blocks should be marked in OOB, flash-based BBT, or both */
3237 BUG_ON((chip->bbt_options & NAND_BBT_NO_OOB_BBM) &&
3238 !(chip->bbt_options & NAND_BBT_USE_FLASH));
3239
3241 if (!(chip->options & NAND_OWN_BUFFERS)) 3240 if (!(chip->options & NAND_OWN_BUFFERS))
3242 chip->buffers = kmalloc(sizeof(*chip->buffers), GFP_KERNEL); 3241 chip->buffers = kmalloc(sizeof(*chip->buffers), GFP_KERNEL);
3243 if (!chip->buffers) 3242 if (!chip->buffers)
@@ -3350,6 +3349,7 @@ int nand_scan_tail(struct mtd_info *mtd)
3350 if (!chip->ecc.size) 3349 if (!chip->ecc.size)
3351 chip->ecc.size = 256; 3350 chip->ecc.size = 256;
3352 chip->ecc.bytes = 3; 3351 chip->ecc.bytes = 3;
3352 chip->ecc.strength = 1;
3353 break; 3353 break;
3354 3354
3355 case NAND_ECC_SOFT_BCH: 3355 case NAND_ECC_SOFT_BCH:
@@ -3384,6 +3384,8 @@ int nand_scan_tail(struct mtd_info *mtd)
3384 pr_warn("BCH ECC initialization failed!\n"); 3384 pr_warn("BCH ECC initialization failed!\n");
3385 BUG(); 3385 BUG();
3386 } 3386 }
3387 chip->ecc.strength =
3388 chip->ecc.bytes*8 / fls(8*chip->ecc.size);
3387 break; 3389 break;
3388 3390
3389 case NAND_ECC_NONE: 3391 case NAND_ECC_NONE:
@@ -3397,6 +3399,7 @@ int nand_scan_tail(struct mtd_info *mtd)
3397 chip->ecc.write_oob = nand_write_oob_std; 3399 chip->ecc.write_oob = nand_write_oob_std;
3398 chip->ecc.size = mtd->writesize; 3400 chip->ecc.size = mtd->writesize;
3399 chip->ecc.bytes = 0; 3401 chip->ecc.bytes = 0;
3402 chip->ecc.strength = 0;
3400 break; 3403 break;
3401 3404
3402 default: 3405 default:
@@ -3461,25 +3464,26 @@ int nand_scan_tail(struct mtd_info *mtd)
3461 mtd->type = MTD_NANDFLASH; 3464 mtd->type = MTD_NANDFLASH;
3462 mtd->flags = (chip->options & NAND_ROM) ? MTD_CAP_ROM : 3465 mtd->flags = (chip->options & NAND_ROM) ? MTD_CAP_ROM :
3463 MTD_CAP_NANDFLASH; 3466 MTD_CAP_NANDFLASH;
3464 mtd->erase = nand_erase; 3467 mtd->_erase = nand_erase;
3465 mtd->point = NULL; 3468 mtd->_point = NULL;
3466 mtd->unpoint = NULL; 3469 mtd->_unpoint = NULL;
3467 mtd->read = nand_read; 3470 mtd->_read = nand_read;
3468 mtd->write = nand_write; 3471 mtd->_write = nand_write;
3469 mtd->panic_write = panic_nand_write; 3472 mtd->_panic_write = panic_nand_write;
3470 mtd->read_oob = nand_read_oob; 3473 mtd->_read_oob = nand_read_oob;
3471 mtd->write_oob = nand_write_oob; 3474 mtd->_write_oob = nand_write_oob;
3472 mtd->sync = nand_sync; 3475 mtd->_sync = nand_sync;
3473 mtd->lock = NULL; 3476 mtd->_lock = NULL;
3474 mtd->unlock = NULL; 3477 mtd->_unlock = NULL;
3475 mtd->suspend = nand_suspend; 3478 mtd->_suspend = nand_suspend;
3476 mtd->resume = nand_resume; 3479 mtd->_resume = nand_resume;
3477 mtd->block_isbad = nand_block_isbad; 3480 mtd->_block_isbad = nand_block_isbad;
3478 mtd->block_markbad = nand_block_markbad; 3481 mtd->_block_markbad = nand_block_markbad;
3479 mtd->writebufsize = mtd->writesize; 3482 mtd->writebufsize = mtd->writesize;
3480 3483
3481 /* propagate ecc.layout to mtd_info */ 3484 /* propagate ecc info to mtd_info */
3482 mtd->ecclayout = chip->ecc.layout; 3485 mtd->ecclayout = chip->ecc.layout;
3486 mtd->ecc_strength = chip->ecc.strength * chip->ecc.steps;
3483 3487
3484 /* Check, if we should skip the bad block table scan */ 3488 /* Check, if we should skip the bad block table scan */
3485 if (chip->options & NAND_SKIP_BBTSCAN) 3489 if (chip->options & NAND_SKIP_BBTSCAN)
diff --git a/drivers/mtd/nand/ndfc.c b/drivers/mtd/nand/ndfc.c
index ec688548c880..2b6f632cf274 100644
--- a/drivers/mtd/nand/ndfc.c
+++ b/drivers/mtd/nand/ndfc.c
@@ -179,6 +179,7 @@ static int ndfc_chip_init(struct ndfc_controller *ndfc,
179 chip->ecc.mode = NAND_ECC_HW; 179 chip->ecc.mode = NAND_ECC_HW;
180 chip->ecc.size = 256; 180 chip->ecc.size = 256;
181 chip->ecc.bytes = 3; 181 chip->ecc.bytes = 3;
182 chip->ecc.strength = 1;
182 chip->priv = ndfc; 183 chip->priv = ndfc;
183 184
184 ndfc->mtd.priv = chip; 185 ndfc->mtd.priv = chip;
diff --git a/drivers/mtd/nand/omap2.c b/drivers/mtd/nand/omap2.c
index b3a883e2a22f..c2b0bba9d8b3 100644
--- a/drivers/mtd/nand/omap2.c
+++ b/drivers/mtd/nand/omap2.c
@@ -1058,6 +1058,7 @@ static int __devinit omap_nand_probe(struct platform_device *pdev)
1058 (pdata->ecc_opt == OMAP_ECC_HAMMING_CODE_HW_ROMCODE)) { 1058 (pdata->ecc_opt == OMAP_ECC_HAMMING_CODE_HW_ROMCODE)) {
1059 info->nand.ecc.bytes = 3; 1059 info->nand.ecc.bytes = 3;
1060 info->nand.ecc.size = 512; 1060 info->nand.ecc.size = 512;
1061 info->nand.ecc.strength = 1;
1061 info->nand.ecc.calculate = omap_calculate_ecc; 1062 info->nand.ecc.calculate = omap_calculate_ecc;
1062 info->nand.ecc.hwctl = omap_enable_hwecc; 1063 info->nand.ecc.hwctl = omap_enable_hwecc;
1063 info->nand.ecc.correct = omap_correct_data; 1064 info->nand.ecc.correct = omap_correct_data;
@@ -1101,8 +1102,8 @@ static int __devinit omap_nand_probe(struct platform_device *pdev)
1101 goto out_release_mem_region; 1102 goto out_release_mem_region;
1102 } 1103 }
1103 1104
1104 mtd_device_parse_register(&info->mtd, NULL, 0, 1105 mtd_device_parse_register(&info->mtd, NULL, NULL, pdata->parts,
1105 pdata->parts, pdata->nr_parts); 1106 pdata->nr_parts);
1106 1107
1107 platform_set_drvdata(pdev, &info->mtd); 1108 platform_set_drvdata(pdev, &info->mtd);
1108 1109
diff --git a/drivers/mtd/nand/orion_nand.c b/drivers/mtd/nand/orion_nand.c
index 29f505adaf84..1d3bfb26080c 100644
--- a/drivers/mtd/nand/orion_nand.c
+++ b/drivers/mtd/nand/orion_nand.c
@@ -129,8 +129,8 @@ static int __init orion_nand_probe(struct platform_device *pdev)
129 } 129 }
130 130
131 mtd->name = "orion_nand"; 131 mtd->name = "orion_nand";
132 ret = mtd_device_parse_register(mtd, NULL, 0, 132 ret = mtd_device_parse_register(mtd, NULL, NULL, board->parts,
133 board->parts, board->nr_parts); 133 board->nr_parts);
134 if (ret) { 134 if (ret) {
135 nand_release(mtd); 135 nand_release(mtd);
136 goto no_dev; 136 goto no_dev;
diff --git a/drivers/mtd/nand/plat_nand.c b/drivers/mtd/nand/plat_nand.c
index 7f2da6953357..6404e6e81b10 100644
--- a/drivers/mtd/nand/plat_nand.c
+++ b/drivers/mtd/nand/plat_nand.c
@@ -99,8 +99,9 @@ static int __devinit plat_nand_probe(struct platform_device *pdev)
99 } 99 }
100 100
101 err = mtd_device_parse_register(&data->mtd, 101 err = mtd_device_parse_register(&data->mtd,
102 pdata->chip.part_probe_types, 0, 102 pdata->chip.part_probe_types, NULL,
103 pdata->chip.partitions, pdata->chip.nr_partitions); 103 pdata->chip.partitions,
104 pdata->chip.nr_partitions);
104 105
105 if (!err) 106 if (!err)
106 return err; 107 return err;
diff --git a/drivers/mtd/nand/ppchameleonevb.c b/drivers/mtd/nand/ppchameleonevb.c
index 7e52af51a198..0ddd90e5788f 100644
--- a/drivers/mtd/nand/ppchameleonevb.c
+++ b/drivers/mtd/nand/ppchameleonevb.c
@@ -275,11 +275,10 @@ static int __init ppchameleonevb_init(void)
275 ppchameleon_mtd->name = "ppchameleon-nand"; 275 ppchameleon_mtd->name = "ppchameleon-nand";
276 276
277 /* Register the partitions */ 277 /* Register the partitions */
278 mtd_device_parse_register(ppchameleon_mtd, NULL, 0, 278 mtd_device_parse_register(ppchameleon_mtd, NULL, NULL,
279 ppchameleon_mtd->size == NAND_SMALL_SIZE ? 279 ppchameleon_mtd->size == NAND_SMALL_SIZE ?
280 partition_info_me : 280 partition_info_me : partition_info_hi,
281 partition_info_hi, 281 NUM_PARTITIONS);
282 NUM_PARTITIONS);
283 282
284 nand_evb_init: 283 nand_evb_init:
285 /**************************** 284 /****************************
@@ -365,11 +364,10 @@ static int __init ppchameleonevb_init(void)
365 ppchameleonevb_mtd->name = NAND_EVB_MTD_NAME; 364 ppchameleonevb_mtd->name = NAND_EVB_MTD_NAME;
366 365
367 /* Register the partitions */ 366 /* Register the partitions */
368 mtd_device_parse_register(ppchameleonevb_mtd, NULL, 0, 367 mtd_device_parse_register(ppchameleonevb_mtd, NULL, NULL,
369 ppchameleon_mtd->size == NAND_SMALL_SIZE ? 368 ppchameleon_mtd->size == NAND_SMALL_SIZE ?
370 partition_info_me : 369 partition_info_me : partition_info_hi,
371 partition_info_hi, 370 NUM_PARTITIONS);
372 NUM_PARTITIONS);
373 371
374 /* Return happy */ 372 /* Return happy */
375 return 0; 373 return 0;
diff --git a/drivers/mtd/nand/pxa3xx_nand.c b/drivers/mtd/nand/pxa3xx_nand.c
index 5c3d719c37e6..def50caa6f84 100644
--- a/drivers/mtd/nand/pxa3xx_nand.c
+++ b/drivers/mtd/nand/pxa3xx_nand.c
@@ -1002,6 +1002,7 @@ static int pxa3xx_nand_scan(struct mtd_info *mtd)
1002KEEP_CONFIG: 1002KEEP_CONFIG:
1003 chip->ecc.mode = NAND_ECC_HW; 1003 chip->ecc.mode = NAND_ECC_HW;
1004 chip->ecc.size = host->page_size; 1004 chip->ecc.size = host->page_size;
1005 chip->ecc.strength = 1;
1005 1006
1006 chip->options = NAND_NO_AUTOINCR; 1007 chip->options = NAND_NO_AUTOINCR;
1007 chip->options |= NAND_NO_READRDY; 1008 chip->options |= NAND_NO_READRDY;
@@ -1228,8 +1229,9 @@ static int pxa3xx_nand_probe(struct platform_device *pdev)
1228 continue; 1229 continue;
1229 } 1230 }
1230 1231
1231 ret = mtd_device_parse_register(info->host[cs]->mtd, NULL, 0, 1232 ret = mtd_device_parse_register(info->host[cs]->mtd, NULL,
1232 pdata->parts[cs], pdata->nr_parts[cs]); 1233 NULL, pdata->parts[cs],
1234 pdata->nr_parts[cs]);
1233 if (!ret) 1235 if (!ret)
1234 probe_success = 1; 1236 probe_success = 1;
1235 } 1237 }
diff --git a/drivers/mtd/nand/r852.c b/drivers/mtd/nand/r852.c
index 769a4e096b3c..c2040187c813 100644
--- a/drivers/mtd/nand/r852.c
+++ b/drivers/mtd/nand/r852.c
@@ -891,6 +891,7 @@ int r852_probe(struct pci_dev *pci_dev, const struct pci_device_id *id)
891 chip->ecc.mode = NAND_ECC_HW_SYNDROME; 891 chip->ecc.mode = NAND_ECC_HW_SYNDROME;
892 chip->ecc.size = R852_DMA_LEN; 892 chip->ecc.size = R852_DMA_LEN;
893 chip->ecc.bytes = SM_OOB_SIZE; 893 chip->ecc.bytes = SM_OOB_SIZE;
894 chip->ecc.strength = 2;
894 chip->ecc.hwctl = r852_ecc_hwctl; 895 chip->ecc.hwctl = r852_ecc_hwctl;
895 chip->ecc.calculate = r852_ecc_calculate; 896 chip->ecc.calculate = r852_ecc_calculate;
896 chip->ecc.correct = r852_ecc_correct; 897 chip->ecc.correct = r852_ecc_correct;
diff --git a/drivers/mtd/nand/rtc_from4.c b/drivers/mtd/nand/rtc_from4.c
index f309addc2fa0..e55b5cfbe145 100644
--- a/drivers/mtd/nand/rtc_from4.c
+++ b/drivers/mtd/nand/rtc_from4.c
@@ -527,6 +527,7 @@ static int __init rtc_from4_init(void)
527 this->ecc.mode = NAND_ECC_HW_SYNDROME; 527 this->ecc.mode = NAND_ECC_HW_SYNDROME;
528 this->ecc.size = 512; 528 this->ecc.size = 512;
529 this->ecc.bytes = 8; 529 this->ecc.bytes = 8;
530 this->ecc.strength = 3;
530 /* return the status of extra status and ECC checks */ 531 /* return the status of extra status and ECC checks */
531 this->errstat = rtc_from4_errstat; 532 this->errstat = rtc_from4_errstat;
532 /* set the nand_oobinfo to support FPGA H/W error detection */ 533 /* set the nand_oobinfo to support FPGA H/W error detection */
diff --git a/drivers/mtd/nand/s3c2410.c b/drivers/mtd/nand/s3c2410.c
index 868685db6712..91121f33f743 100644
--- a/drivers/mtd/nand/s3c2410.c
+++ b/drivers/mtd/nand/s3c2410.c
@@ -751,8 +751,8 @@ static int s3c2410_nand_add_partition(struct s3c2410_nand_info *info,
751 if (set) 751 if (set)
752 mtd->mtd.name = set->name; 752 mtd->mtd.name = set->name;
753 753
754 return mtd_device_parse_register(&mtd->mtd, NULL, 0, 754 return mtd_device_parse_register(&mtd->mtd, NULL, NULL,
755 set->partitions, set->nr_partitions); 755 set->partitions, set->nr_partitions);
756} 756}
757 757
758/** 758/**
@@ -823,6 +823,7 @@ static void s3c2410_nand_init_chip(struct s3c2410_nand_info *info,
823 chip->ecc.calculate = s3c2410_nand_calculate_ecc; 823 chip->ecc.calculate = s3c2410_nand_calculate_ecc;
824 chip->ecc.correct = s3c2410_nand_correct_data; 824 chip->ecc.correct = s3c2410_nand_correct_data;
825 chip->ecc.mode = NAND_ECC_HW; 825 chip->ecc.mode = NAND_ECC_HW;
826 chip->ecc.strength = 1;
826 827
827 switch (info->cpu_type) { 828 switch (info->cpu_type) {
828 case TYPE_S3C2410: 829 case TYPE_S3C2410:
diff --git a/drivers/mtd/nand/sh_flctl.c b/drivers/mtd/nand/sh_flctl.c
index 93b1f74321c2..e9b2b260de3a 100644
--- a/drivers/mtd/nand/sh_flctl.c
+++ b/drivers/mtd/nand/sh_flctl.c
@@ -26,6 +26,7 @@
26#include <linux/delay.h> 26#include <linux/delay.h>
27#include <linux/io.h> 27#include <linux/io.h>
28#include <linux/platform_device.h> 28#include <linux/platform_device.h>
29#include <linux/pm_runtime.h>
29#include <linux/slab.h> 30#include <linux/slab.h>
30 31
31#include <linux/mtd/mtd.h> 32#include <linux/mtd/mtd.h>
@@ -283,7 +284,7 @@ static void write_fiforeg(struct sh_flctl *flctl, int rlen, int offset)
283static void set_cmd_regs(struct mtd_info *mtd, uint32_t cmd, uint32_t flcmcdr_val) 284static void set_cmd_regs(struct mtd_info *mtd, uint32_t cmd, uint32_t flcmcdr_val)
284{ 285{
285 struct sh_flctl *flctl = mtd_to_flctl(mtd); 286 struct sh_flctl *flctl = mtd_to_flctl(mtd);
286 uint32_t flcmncr_val = readl(FLCMNCR(flctl)) & ~SEL_16BIT; 287 uint32_t flcmncr_val = flctl->flcmncr_base & ~SEL_16BIT;
287 uint32_t flcmdcr_val, addr_len_bytes = 0; 288 uint32_t flcmdcr_val, addr_len_bytes = 0;
288 289
289 /* Set SNAND bit if page size is 2048byte */ 290 /* Set SNAND bit if page size is 2048byte */
@@ -303,6 +304,7 @@ static void set_cmd_regs(struct mtd_info *mtd, uint32_t cmd, uint32_t flcmcdr_va
303 break; 304 break;
304 case NAND_CMD_READ0: 305 case NAND_CMD_READ0:
305 case NAND_CMD_READOOB: 306 case NAND_CMD_READOOB:
307 case NAND_CMD_RNDOUT:
306 addr_len_bytes = flctl->rw_ADRCNT; 308 addr_len_bytes = flctl->rw_ADRCNT;
307 flcmdcr_val |= CDSRC_E; 309 flcmdcr_val |= CDSRC_E;
308 if (flctl->chip.options & NAND_BUSWIDTH_16) 310 if (flctl->chip.options & NAND_BUSWIDTH_16)
@@ -320,6 +322,7 @@ static void set_cmd_regs(struct mtd_info *mtd, uint32_t cmd, uint32_t flcmcdr_va
320 break; 322 break;
321 case NAND_CMD_READID: 323 case NAND_CMD_READID:
322 flcmncr_val &= ~SNAND_E; 324 flcmncr_val &= ~SNAND_E;
325 flcmdcr_val |= CDSRC_E;
323 addr_len_bytes = ADRCNT_1; 326 addr_len_bytes = ADRCNT_1;
324 break; 327 break;
325 case NAND_CMD_STATUS: 328 case NAND_CMD_STATUS:
@@ -513,6 +516,8 @@ static void flctl_cmdfunc(struct mtd_info *mtd, unsigned int command,
513 struct sh_flctl *flctl = mtd_to_flctl(mtd); 516 struct sh_flctl *flctl = mtd_to_flctl(mtd);
514 uint32_t read_cmd = 0; 517 uint32_t read_cmd = 0;
515 518
519 pm_runtime_get_sync(&flctl->pdev->dev);
520
516 flctl->read_bytes = 0; 521 flctl->read_bytes = 0;
517 if (command != NAND_CMD_PAGEPROG) 522 if (command != NAND_CMD_PAGEPROG)
518 flctl->index = 0; 523 flctl->index = 0;
@@ -525,7 +530,6 @@ static void flctl_cmdfunc(struct mtd_info *mtd, unsigned int command,
525 execmd_read_page_sector(mtd, page_addr); 530 execmd_read_page_sector(mtd, page_addr);
526 break; 531 break;
527 } 532 }
528 empty_fifo(flctl);
529 if (flctl->page_size) 533 if (flctl->page_size)
530 set_cmd_regs(mtd, command, (NAND_CMD_READSTART << 8) 534 set_cmd_regs(mtd, command, (NAND_CMD_READSTART << 8)
531 | command); 535 | command);
@@ -547,7 +551,6 @@ static void flctl_cmdfunc(struct mtd_info *mtd, unsigned int command,
547 break; 551 break;
548 } 552 }
549 553
550 empty_fifo(flctl);
551 if (flctl->page_size) { 554 if (flctl->page_size) {
552 set_cmd_regs(mtd, command, (NAND_CMD_READSTART << 8) 555 set_cmd_regs(mtd, command, (NAND_CMD_READSTART << 8)
553 | NAND_CMD_READ0); 556 | NAND_CMD_READ0);
@@ -559,15 +562,35 @@ static void flctl_cmdfunc(struct mtd_info *mtd, unsigned int command,
559 flctl->read_bytes = mtd->oobsize; 562 flctl->read_bytes = mtd->oobsize;
560 goto read_normal_exit; 563 goto read_normal_exit;
561 564
565 case NAND_CMD_RNDOUT:
566 if (flctl->hwecc)
567 break;
568
569 if (flctl->page_size)
570 set_cmd_regs(mtd, command, (NAND_CMD_RNDOUTSTART << 8)
571 | command);
572 else
573 set_cmd_regs(mtd, command, command);
574
575 set_addr(mtd, column, 0);
576
577 flctl->read_bytes = mtd->writesize + mtd->oobsize - column;
578 goto read_normal_exit;
579
562 case NAND_CMD_READID: 580 case NAND_CMD_READID:
563 empty_fifo(flctl);
564 set_cmd_regs(mtd, command, command); 581 set_cmd_regs(mtd, command, command);
565 set_addr(mtd, 0, 0);
566 582
567 flctl->read_bytes = 4; 583 /* READID is always performed using an 8-bit bus */
584 if (flctl->chip.options & NAND_BUSWIDTH_16)
585 column <<= 1;
586 set_addr(mtd, column, 0);
587
588 flctl->read_bytes = 8;
568 writel(flctl->read_bytes, FLDTCNTR(flctl)); /* set read size */ 589 writel(flctl->read_bytes, FLDTCNTR(flctl)); /* set read size */
590 empty_fifo(flctl);
569 start_translation(flctl); 591 start_translation(flctl);
570 read_datareg(flctl, 0); /* read and end */ 592 read_fiforeg(flctl, flctl->read_bytes, 0);
593 wait_completion(flctl);
571 break; 594 break;
572 595
573 case NAND_CMD_ERASE1: 596 case NAND_CMD_ERASE1:
@@ -650,29 +673,55 @@ static void flctl_cmdfunc(struct mtd_info *mtd, unsigned int command,
650 default: 673 default:
651 break; 674 break;
652 } 675 }
653 return; 676 goto runtime_exit;
654 677
655read_normal_exit: 678read_normal_exit:
656 writel(flctl->read_bytes, FLDTCNTR(flctl)); /* set read size */ 679 writel(flctl->read_bytes, FLDTCNTR(flctl)); /* set read size */
680 empty_fifo(flctl);
657 start_translation(flctl); 681 start_translation(flctl);
658 read_fiforeg(flctl, flctl->read_bytes, 0); 682 read_fiforeg(flctl, flctl->read_bytes, 0);
659 wait_completion(flctl); 683 wait_completion(flctl);
684runtime_exit:
685 pm_runtime_put_sync(&flctl->pdev->dev);
660 return; 686 return;
661} 687}
662 688
663static void flctl_select_chip(struct mtd_info *mtd, int chipnr) 689static void flctl_select_chip(struct mtd_info *mtd, int chipnr)
664{ 690{
665 struct sh_flctl *flctl = mtd_to_flctl(mtd); 691 struct sh_flctl *flctl = mtd_to_flctl(mtd);
666 uint32_t flcmncr_val = readl(FLCMNCR(flctl)); 692 int ret;
667 693
668 switch (chipnr) { 694 switch (chipnr) {
669 case -1: 695 case -1:
670 flcmncr_val &= ~CE0_ENABLE; 696 flctl->flcmncr_base &= ~CE0_ENABLE;
671 writel(flcmncr_val, FLCMNCR(flctl)); 697
698 pm_runtime_get_sync(&flctl->pdev->dev);
699 writel(flctl->flcmncr_base, FLCMNCR(flctl));
700
701 if (flctl->qos_request) {
702 dev_pm_qos_remove_request(&flctl->pm_qos);
703 flctl->qos_request = 0;
704 }
705
706 pm_runtime_put_sync(&flctl->pdev->dev);
672 break; 707 break;
673 case 0: 708 case 0:
674 flcmncr_val |= CE0_ENABLE; 709 flctl->flcmncr_base |= CE0_ENABLE;
675 writel(flcmncr_val, FLCMNCR(flctl)); 710
711 if (!flctl->qos_request) {
712 ret = dev_pm_qos_add_request(&flctl->pdev->dev,
713 &flctl->pm_qos, 100);
714 if (ret < 0)
715 dev_err(&flctl->pdev->dev,
716 "PM QoS request failed: %d\n", ret);
717 flctl->qos_request = 1;
718 }
719
720 if (flctl->holden) {
721 pm_runtime_get_sync(&flctl->pdev->dev);
722 writel(HOLDEN, FLHOLDCR(flctl));
723 pm_runtime_put_sync(&flctl->pdev->dev);
724 }
676 break; 725 break;
677 default: 726 default:
678 BUG(); 727 BUG();
@@ -730,11 +779,6 @@ static int flctl_verify_buf(struct mtd_info *mtd, const u_char *buf, int len)
730 return 0; 779 return 0;
731} 780}
732 781
733static void flctl_register_init(struct sh_flctl *flctl, unsigned long val)
734{
735 writel(val, FLCMNCR(flctl));
736}
737
738static int flctl_chip_init_tail(struct mtd_info *mtd) 782static int flctl_chip_init_tail(struct mtd_info *mtd)
739{ 783{
740 struct sh_flctl *flctl = mtd_to_flctl(mtd); 784 struct sh_flctl *flctl = mtd_to_flctl(mtd);
@@ -781,13 +825,13 @@ static int flctl_chip_init_tail(struct mtd_info *mtd)
781 825
782 chip->ecc.size = 512; 826 chip->ecc.size = 512;
783 chip->ecc.bytes = 10; 827 chip->ecc.bytes = 10;
828 chip->ecc.strength = 4;
784 chip->ecc.read_page = flctl_read_page_hwecc; 829 chip->ecc.read_page = flctl_read_page_hwecc;
785 chip->ecc.write_page = flctl_write_page_hwecc; 830 chip->ecc.write_page = flctl_write_page_hwecc;
786 chip->ecc.mode = NAND_ECC_HW; 831 chip->ecc.mode = NAND_ECC_HW;
787 832
788 /* 4 symbols ECC enabled */ 833 /* 4 symbols ECC enabled */
789 writel(readl(FLCMNCR(flctl)) | _4ECCEN | ECCPOS2 | ECCPOS_02, 834 flctl->flcmncr_base |= _4ECCEN | ECCPOS2 | ECCPOS_02;
790 FLCMNCR(flctl));
791 } else { 835 } else {
792 chip->ecc.mode = NAND_ECC_SOFT; 836 chip->ecc.mode = NAND_ECC_SOFT;
793 } 837 }
@@ -819,13 +863,13 @@ static int __devinit flctl_probe(struct platform_device *pdev)
819 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 863 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
820 if (!res) { 864 if (!res) {
821 dev_err(&pdev->dev, "failed to get I/O memory\n"); 865 dev_err(&pdev->dev, "failed to get I/O memory\n");
822 goto err; 866 goto err_iomap;
823 } 867 }
824 868
825 flctl->reg = ioremap(res->start, resource_size(res)); 869 flctl->reg = ioremap(res->start, resource_size(res));
826 if (flctl->reg == NULL) { 870 if (flctl->reg == NULL) {
827 dev_err(&pdev->dev, "failed to remap I/O memory\n"); 871 dev_err(&pdev->dev, "failed to remap I/O memory\n");
828 goto err; 872 goto err_iomap;
829 } 873 }
830 874
831 platform_set_drvdata(pdev, flctl); 875 platform_set_drvdata(pdev, flctl);
@@ -833,9 +877,9 @@ static int __devinit flctl_probe(struct platform_device *pdev)
833 nand = &flctl->chip; 877 nand = &flctl->chip;
834 flctl_mtd->priv = nand; 878 flctl_mtd->priv = nand;
835 flctl->pdev = pdev; 879 flctl->pdev = pdev;
880 flctl->flcmncr_base = pdata->flcmncr_val;
836 flctl->hwecc = pdata->has_hwecc; 881 flctl->hwecc = pdata->has_hwecc;
837 882 flctl->holden = pdata->use_holden;
838 flctl_register_init(flctl, pdata->flcmncr_val);
839 883
840 nand->options = NAND_NO_AUTOINCR; 884 nand->options = NAND_NO_AUTOINCR;
841 885
@@ -855,23 +899,28 @@ static int __devinit flctl_probe(struct platform_device *pdev)
855 nand->read_word = flctl_read_word; 899 nand->read_word = flctl_read_word;
856 } 900 }
857 901
902 pm_runtime_enable(&pdev->dev);
903 pm_runtime_resume(&pdev->dev);
904
858 ret = nand_scan_ident(flctl_mtd, 1, NULL); 905 ret = nand_scan_ident(flctl_mtd, 1, NULL);
859 if (ret) 906 if (ret)
860 goto err; 907 goto err_chip;
861 908
862 ret = flctl_chip_init_tail(flctl_mtd); 909 ret = flctl_chip_init_tail(flctl_mtd);
863 if (ret) 910 if (ret)
864 goto err; 911 goto err_chip;
865 912
866 ret = nand_scan_tail(flctl_mtd); 913 ret = nand_scan_tail(flctl_mtd);
867 if (ret) 914 if (ret)
868 goto err; 915 goto err_chip;
869 916
870 mtd_device_register(flctl_mtd, pdata->parts, pdata->nr_parts); 917 mtd_device_register(flctl_mtd, pdata->parts, pdata->nr_parts);
871 918
872 return 0; 919 return 0;
873 920
874err: 921err_chip:
922 pm_runtime_disable(&pdev->dev);
923err_iomap:
875 kfree(flctl); 924 kfree(flctl);
876 return ret; 925 return ret;
877} 926}
@@ -881,6 +930,7 @@ static int __devexit flctl_remove(struct platform_device *pdev)
881 struct sh_flctl *flctl = platform_get_drvdata(pdev); 930 struct sh_flctl *flctl = platform_get_drvdata(pdev);
882 931
883 nand_release(&flctl->mtd); 932 nand_release(&flctl->mtd);
933 pm_runtime_disable(&pdev->dev);
884 kfree(flctl); 934 kfree(flctl);
885 935
886 return 0; 936 return 0;
diff --git a/drivers/mtd/nand/sharpsl.c b/drivers/mtd/nand/sharpsl.c
index b175c0fd8b93..3421e3762a5a 100644
--- a/drivers/mtd/nand/sharpsl.c
+++ b/drivers/mtd/nand/sharpsl.c
@@ -167,6 +167,7 @@ static int __devinit sharpsl_nand_probe(struct platform_device *pdev)
167 this->ecc.mode = NAND_ECC_HW; 167 this->ecc.mode = NAND_ECC_HW;
168 this->ecc.size = 256; 168 this->ecc.size = 256;
169 this->ecc.bytes = 3; 169 this->ecc.bytes = 3;
170 this->ecc.strength = 1;
170 this->badblock_pattern = data->badblock_pattern; 171 this->badblock_pattern = data->badblock_pattern;
171 this->ecc.layout = data->ecc_layout; 172 this->ecc.layout = data->ecc_layout;
172 this->ecc.hwctl = sharpsl_nand_enable_hwecc; 173 this->ecc.hwctl = sharpsl_nand_enable_hwecc;
@@ -181,8 +182,8 @@ static int __devinit sharpsl_nand_probe(struct platform_device *pdev)
181 /* Register the partitions */ 182 /* Register the partitions */
182 sharpsl->mtd.name = "sharpsl-nand"; 183 sharpsl->mtd.name = "sharpsl-nand";
183 184
184 err = mtd_device_parse_register(&sharpsl->mtd, NULL, 0, 185 err = mtd_device_parse_register(&sharpsl->mtd, NULL, NULL,
185 data->partitions, data->nr_partitions); 186 data->partitions, data->nr_partitions);
186 if (err) 187 if (err)
187 goto err_add; 188 goto err_add;
188 189
diff --git a/drivers/mtd/nand/tmio_nand.c b/drivers/mtd/nand/tmio_nand.c
index 6caa0cd9d6a7..5aa518081c51 100644
--- a/drivers/mtd/nand/tmio_nand.c
+++ b/drivers/mtd/nand/tmio_nand.c
@@ -430,6 +430,7 @@ static int tmio_probe(struct platform_device *dev)
430 nand_chip->ecc.mode = NAND_ECC_HW; 430 nand_chip->ecc.mode = NAND_ECC_HW;
431 nand_chip->ecc.size = 512; 431 nand_chip->ecc.size = 512;
432 nand_chip->ecc.bytes = 6; 432 nand_chip->ecc.bytes = 6;
433 nand_chip->ecc.strength = 2;
433 nand_chip->ecc.hwctl = tmio_nand_enable_hwecc; 434 nand_chip->ecc.hwctl = tmio_nand_enable_hwecc;
434 nand_chip->ecc.calculate = tmio_nand_calculate_ecc; 435 nand_chip->ecc.calculate = tmio_nand_calculate_ecc;
435 nand_chip->ecc.correct = tmio_nand_correct_data; 436 nand_chip->ecc.correct = tmio_nand_correct_data;
@@ -456,9 +457,9 @@ static int tmio_probe(struct platform_device *dev)
456 goto err_scan; 457 goto err_scan;
457 } 458 }
458 /* Register the partitions */ 459 /* Register the partitions */
459 retval = mtd_device_parse_register(mtd, NULL, 0, 460 retval = mtd_device_parse_register(mtd, NULL, NULL,
460 data ? data->partition : NULL, 461 data ? data->partition : NULL,
461 data ? data->num_partitions : 0); 462 data ? data->num_partitions : 0);
462 if (!retval) 463 if (!retval)
463 return retval; 464 return retval;
464 465
diff --git a/drivers/mtd/nand/txx9ndfmc.c b/drivers/mtd/nand/txx9ndfmc.c
index c7c4f1d11c77..26398dcf21cf 100644
--- a/drivers/mtd/nand/txx9ndfmc.c
+++ b/drivers/mtd/nand/txx9ndfmc.c
@@ -356,6 +356,7 @@ static int __init txx9ndfmc_probe(struct platform_device *dev)
356 /* txx9ndfmc_nand_scan will overwrite ecc.size and ecc.bytes */ 356 /* txx9ndfmc_nand_scan will overwrite ecc.size and ecc.bytes */
357 chip->ecc.size = 256; 357 chip->ecc.size = 256;
358 chip->ecc.bytes = 3; 358 chip->ecc.bytes = 3;
359 chip->ecc.strength = 1;
359 chip->chip_delay = 100; 360 chip->chip_delay = 100;
360 chip->controller = &drvdata->hw_control; 361 chip->controller = &drvdata->hw_control;
361 362
@@ -386,7 +387,7 @@ static int __init txx9ndfmc_probe(struct platform_device *dev)
386 } 387 }
387 mtd->name = txx9_priv->mtdname; 388 mtd->name = txx9_priv->mtdname;
388 389
389 mtd_device_parse_register(mtd, NULL, 0, NULL, 0); 390 mtd_device_parse_register(mtd, NULL, NULL, NULL, 0);
390 drvdata->mtds[i] = mtd; 391 drvdata->mtds[i] = mtd;
391 } 392 }
392 393
diff --git a/drivers/mtd/nftlcore.c b/drivers/mtd/nftlcore.c
index a75382aff5f6..c5f4ebf4b384 100644
--- a/drivers/mtd/nftlcore.c
+++ b/drivers/mtd/nftlcore.c
@@ -56,13 +56,6 @@ static void nftl_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
56 if (memcmp(mtd->name, "DiskOnChip", 10)) 56 if (memcmp(mtd->name, "DiskOnChip", 10))
57 return; 57 return;
58 58
59 if (!mtd_can_have_bb(mtd)) {
60 printk(KERN_ERR
61"NFTL no longer supports the old DiskOnChip drivers loaded via docprobe.\n"
62"Please use the new diskonchip driver under the NAND subsystem.\n");
63 return;
64 }
65
66 pr_debug("NFTL: add_mtd for %s\n", mtd->name); 59 pr_debug("NFTL: add_mtd for %s\n", mtd->name);
67 60
68 nftl = kzalloc(sizeof(struct NFTLrecord), GFP_KERNEL); 61 nftl = kzalloc(sizeof(struct NFTLrecord), GFP_KERNEL);
diff --git a/drivers/mtd/onenand/generic.c b/drivers/mtd/onenand/generic.c
index 0ccd5bff2544..1c4f97c63e62 100644
--- a/drivers/mtd/onenand/generic.c
+++ b/drivers/mtd/onenand/generic.c
@@ -70,9 +70,9 @@ static int __devinit generic_onenand_probe(struct platform_device *pdev)
70 goto out_iounmap; 70 goto out_iounmap;
71 } 71 }
72 72
73 err = mtd_device_parse_register(&info->mtd, NULL, 0, 73 err = mtd_device_parse_register(&info->mtd, NULL, NULL,
74 pdata ? pdata->parts : NULL, 74 pdata ? pdata->parts : NULL,
75 pdata ? pdata->nr_parts : 0); 75 pdata ? pdata->nr_parts : 0);
76 76
77 platform_set_drvdata(pdev, info); 77 platform_set_drvdata(pdev, info);
78 78
diff --git a/drivers/mtd/onenand/omap2.c b/drivers/mtd/onenand/omap2.c
index 7e9ea6852b67..398a82783848 100644
--- a/drivers/mtd/onenand/omap2.c
+++ b/drivers/mtd/onenand/omap2.c
@@ -751,9 +751,9 @@ static int __devinit omap2_onenand_probe(struct platform_device *pdev)
751 if ((r = onenand_scan(&c->mtd, 1)) < 0) 751 if ((r = onenand_scan(&c->mtd, 1)) < 0)
752 goto err_release_regulator; 752 goto err_release_regulator;
753 753
754 r = mtd_device_parse_register(&c->mtd, NULL, 0, 754 r = mtd_device_parse_register(&c->mtd, NULL, NULL,
755 pdata ? pdata->parts : NULL, 755 pdata ? pdata->parts : NULL,
756 pdata ? pdata->nr_parts : 0); 756 pdata ? pdata->nr_parts : 0);
757 if (r) 757 if (r)
758 goto err_release_onenand; 758 goto err_release_onenand;
759 759
diff --git a/drivers/mtd/onenand/onenand_base.c b/drivers/mtd/onenand/onenand_base.c
index a061bc163da2..b3ce12ef359e 100644
--- a/drivers/mtd/onenand/onenand_base.c
+++ b/drivers/mtd/onenand/onenand_base.c
@@ -1753,16 +1753,6 @@ static int onenand_panic_write(struct mtd_info *mtd, loff_t to, size_t len,
1753 pr_debug("%s: to = 0x%08x, len = %i\n", __func__, (unsigned int)to, 1753 pr_debug("%s: to = 0x%08x, len = %i\n", __func__, (unsigned int)to,
1754 (int)len); 1754 (int)len);
1755 1755
1756 /* Initialize retlen, in case of early exit */
1757 *retlen = 0;
1758
1759 /* Do not allow writes past end of device */
1760 if (unlikely((to + len) > mtd->size)) {
1761 printk(KERN_ERR "%s: Attempt write to past end of device\n",
1762 __func__);
1763 return -EINVAL;
1764 }
1765
1766 /* Reject writes, which are not page aligned */ 1756 /* Reject writes, which are not page aligned */
1767 if (unlikely(NOTALIGNED(to) || NOTALIGNED(len))) { 1757 if (unlikely(NOTALIGNED(to) || NOTALIGNED(len))) {
1768 printk(KERN_ERR "%s: Attempt to write not page aligned data\n", 1758 printk(KERN_ERR "%s: Attempt to write not page aligned data\n",
@@ -1890,13 +1880,6 @@ static int onenand_write_ops_nolock(struct mtd_info *mtd, loff_t to,
1890 ops->retlen = 0; 1880 ops->retlen = 0;
1891 ops->oobretlen = 0; 1881 ops->oobretlen = 0;
1892 1882
1893 /* Do not allow writes past end of device */
1894 if (unlikely((to + len) > mtd->size)) {
1895 printk(KERN_ERR "%s: Attempt write to past end of device\n",
1896 __func__);
1897 return -EINVAL;
1898 }
1899
1900 /* Reject writes, which are not page aligned */ 1883 /* Reject writes, which are not page aligned */
1901 if (unlikely(NOTALIGNED(to) || NOTALIGNED(len))) { 1884 if (unlikely(NOTALIGNED(to) || NOTALIGNED(len))) {
1902 printk(KERN_ERR "%s: Attempt to write not page aligned data\n", 1885 printk(KERN_ERR "%s: Attempt to write not page aligned data\n",
@@ -2493,12 +2476,6 @@ static int onenand_erase(struct mtd_info *mtd, struct erase_info *instr)
2493 (unsigned long long)instr->addr, 2476 (unsigned long long)instr->addr,
2494 (unsigned long long)instr->len); 2477 (unsigned long long)instr->len);
2495 2478
2496 /* Do not allow erase past end of device */
2497 if (unlikely((len + addr) > mtd->size)) {
2498 printk(KERN_ERR "%s: Erase past end of device\n", __func__);
2499 return -EINVAL;
2500 }
2501
2502 if (FLEXONENAND(this)) { 2479 if (FLEXONENAND(this)) {
2503 /* Find the eraseregion of this address */ 2480 /* Find the eraseregion of this address */
2504 int i = flexonenand_region(mtd, addr); 2481 int i = flexonenand_region(mtd, addr);
@@ -2525,8 +2502,6 @@ static int onenand_erase(struct mtd_info *mtd, struct erase_info *instr)
2525 return -EINVAL; 2502 return -EINVAL;
2526 } 2503 }
2527 2504
2528 instr->fail_addr = MTD_FAIL_ADDR_UNKNOWN;
2529
2530 /* Grab the lock and see if the device is available */ 2505 /* Grab the lock and see if the device is available */
2531 onenand_get_device(mtd, FL_ERASING); 2506 onenand_get_device(mtd, FL_ERASING);
2532 2507
@@ -4103,33 +4078,34 @@ int onenand_scan(struct mtd_info *mtd, int maxchips)
4103 mtd->oobavail = this->ecclayout->oobavail; 4078 mtd->oobavail = this->ecclayout->oobavail;
4104 4079
4105 mtd->ecclayout = this->ecclayout; 4080 mtd->ecclayout = this->ecclayout;
4081 mtd->ecc_strength = 1;
4106 4082
4107 /* Fill in remaining MTD driver data */ 4083 /* Fill in remaining MTD driver data */
4108 mtd->type = ONENAND_IS_MLC(this) ? MTD_MLCNANDFLASH : MTD_NANDFLASH; 4084 mtd->type = ONENAND_IS_MLC(this) ? MTD_MLCNANDFLASH : MTD_NANDFLASH;
4109 mtd->flags = MTD_CAP_NANDFLASH; 4085 mtd->flags = MTD_CAP_NANDFLASH;
4110 mtd->erase = onenand_erase; 4086 mtd->_erase = onenand_erase;
4111 mtd->point = NULL; 4087 mtd->_point = NULL;
4112 mtd->unpoint = NULL; 4088 mtd->_unpoint = NULL;
4113 mtd->read = onenand_read; 4089 mtd->_read = onenand_read;
4114 mtd->write = onenand_write; 4090 mtd->_write = onenand_write;
4115 mtd->read_oob = onenand_read_oob; 4091 mtd->_read_oob = onenand_read_oob;
4116 mtd->write_oob = onenand_write_oob; 4092 mtd->_write_oob = onenand_write_oob;
4117 mtd->panic_write = onenand_panic_write; 4093 mtd->_panic_write = onenand_panic_write;
4118#ifdef CONFIG_MTD_ONENAND_OTP 4094#ifdef CONFIG_MTD_ONENAND_OTP
4119 mtd->get_fact_prot_info = onenand_get_fact_prot_info; 4095 mtd->_get_fact_prot_info = onenand_get_fact_prot_info;
4120 mtd->read_fact_prot_reg = onenand_read_fact_prot_reg; 4096 mtd->_read_fact_prot_reg = onenand_read_fact_prot_reg;
4121 mtd->get_user_prot_info = onenand_get_user_prot_info; 4097 mtd->_get_user_prot_info = onenand_get_user_prot_info;
4122 mtd->read_user_prot_reg = onenand_read_user_prot_reg; 4098 mtd->_read_user_prot_reg = onenand_read_user_prot_reg;
4123 mtd->write_user_prot_reg = onenand_write_user_prot_reg; 4099 mtd->_write_user_prot_reg = onenand_write_user_prot_reg;
4124 mtd->lock_user_prot_reg = onenand_lock_user_prot_reg; 4100 mtd->_lock_user_prot_reg = onenand_lock_user_prot_reg;
4125#endif 4101#endif
4126 mtd->sync = onenand_sync; 4102 mtd->_sync = onenand_sync;
4127 mtd->lock = onenand_lock; 4103 mtd->_lock = onenand_lock;
4128 mtd->unlock = onenand_unlock; 4104 mtd->_unlock = onenand_unlock;
4129 mtd->suspend = onenand_suspend; 4105 mtd->_suspend = onenand_suspend;
4130 mtd->resume = onenand_resume; 4106 mtd->_resume = onenand_resume;
4131 mtd->block_isbad = onenand_block_isbad; 4107 mtd->_block_isbad = onenand_block_isbad;
4132 mtd->block_markbad = onenand_block_markbad; 4108 mtd->_block_markbad = onenand_block_markbad;
4133 mtd->owner = THIS_MODULE; 4109 mtd->owner = THIS_MODULE;
4134 mtd->writebufsize = mtd->writesize; 4110 mtd->writebufsize = mtd->writesize;
4135 4111
diff --git a/drivers/mtd/onenand/samsung.c b/drivers/mtd/onenand/samsung.c
index fa1ee43f735b..8e4b3f2742ba 100644
--- a/drivers/mtd/onenand/samsung.c
+++ b/drivers/mtd/onenand/samsung.c
@@ -923,7 +923,7 @@ static int s3c_onenand_probe(struct platform_device *pdev)
923 r = platform_get_resource(pdev, IORESOURCE_MEM, 1); 923 r = platform_get_resource(pdev, IORESOURCE_MEM, 1);
924 if (!r) { 924 if (!r) {
925 dev_err(&pdev->dev, "no buffer memory resource defined\n"); 925 dev_err(&pdev->dev, "no buffer memory resource defined\n");
926 return -ENOENT; 926 err = -ENOENT;
927 goto ahb_resource_failed; 927 goto ahb_resource_failed;
928 } 928 }
929 929
@@ -964,7 +964,7 @@ static int s3c_onenand_probe(struct platform_device *pdev)
964 r = platform_get_resource(pdev, IORESOURCE_MEM, 1); 964 r = platform_get_resource(pdev, IORESOURCE_MEM, 1);
965 if (!r) { 965 if (!r) {
966 dev_err(&pdev->dev, "no dma memory resource defined\n"); 966 dev_err(&pdev->dev, "no dma memory resource defined\n");
967 return -ENOENT; 967 err = -ENOENT;
968 goto dma_resource_failed; 968 goto dma_resource_failed;
969 } 969 }
970 970
@@ -1014,7 +1014,7 @@ static int s3c_onenand_probe(struct platform_device *pdev)
1014 if (s3c_read_reg(MEM_CFG_OFFSET) & ONENAND_SYS_CFG1_SYNC_READ) 1014 if (s3c_read_reg(MEM_CFG_OFFSET) & ONENAND_SYS_CFG1_SYNC_READ)
1015 dev_info(&onenand->pdev->dev, "OneNAND Sync. Burst Read enabled\n"); 1015 dev_info(&onenand->pdev->dev, "OneNAND Sync. Burst Read enabled\n");
1016 1016
1017 err = mtd_device_parse_register(mtd, NULL, 0, 1017 err = mtd_device_parse_register(mtd, NULL, NULL,
1018 pdata ? pdata->parts : NULL, 1018 pdata ? pdata->parts : NULL,
1019 pdata ? pdata->nr_parts : 0); 1019 pdata ? pdata->nr_parts : 0);
1020 1020
diff --git a/drivers/mtd/redboot.c b/drivers/mtd/redboot.c
index 48970c14beff..580035c803d6 100644
--- a/drivers/mtd/redboot.c
+++ b/drivers/mtd/redboot.c
@@ -78,8 +78,7 @@ static int parse_redboot_partitions(struct mtd_info *master,
78 78
79 if ( directory < 0 ) { 79 if ( directory < 0 ) {
80 offset = master->size + directory * master->erasesize; 80 offset = master->size + directory * master->erasesize;
81 while (mtd_can_have_bb(master) && 81 while (mtd_block_isbad(master, offset)) {
82 mtd_block_isbad(master, offset)) {
83 if (!offset) { 82 if (!offset) {
84 nogood: 83 nogood:
85 printk(KERN_NOTICE "Failed to find a non-bad block to check for RedBoot partition table\n"); 84 printk(KERN_NOTICE "Failed to find a non-bad block to check for RedBoot partition table\n");
@@ -89,8 +88,7 @@ static int parse_redboot_partitions(struct mtd_info *master,
89 } 88 }
90 } else { 89 } else {
91 offset = directory * master->erasesize; 90 offset = directory * master->erasesize;
92 while (mtd_can_have_bb(master) && 91 while (mtd_block_isbad(master, offset)) {
93 mtd_block_isbad(master, offset)) {
94 offset += master->erasesize; 92 offset += master->erasesize;
95 if (offset == master->size) 93 if (offset == master->size)
96 goto nogood; 94 goto nogood;
diff --git a/drivers/mtd/sm_ftl.c b/drivers/mtd/sm_ftl.c
index 072ed5970e2f..9e2dfd517aa5 100644
--- a/drivers/mtd/sm_ftl.c
+++ b/drivers/mtd/sm_ftl.c
@@ -1256,7 +1256,7 @@ static void sm_remove_dev(struct mtd_blktrans_dev *dev)
1256 1256
1257static struct mtd_blktrans_ops sm_ftl_ops = { 1257static struct mtd_blktrans_ops sm_ftl_ops = {
1258 .name = "smblk", 1258 .name = "smblk",
1259 .major = -1, 1259 .major = 0,
1260 .part_bits = SM_FTL_PARTN_BITS, 1260 .part_bits = SM_FTL_PARTN_BITS,
1261 .blksize = SM_SECTOR_SIZE, 1261 .blksize = SM_SECTOR_SIZE,
1262 .getgeo = sm_getgeo, 1262 .getgeo = sm_getgeo,
diff --git a/drivers/mtd/ubi/gluebi.c b/drivers/mtd/ubi/gluebi.c
index 941bc3c05d6e..90b98822d9a4 100644
--- a/drivers/mtd/ubi/gluebi.c
+++ b/drivers/mtd/ubi/gluebi.c
@@ -174,11 +174,7 @@ static int gluebi_read(struct mtd_info *mtd, loff_t from, size_t len,
174 int err = 0, lnum, offs, total_read; 174 int err = 0, lnum, offs, total_read;
175 struct gluebi_device *gluebi; 175 struct gluebi_device *gluebi;
176 176
177 if (len < 0 || from < 0 || from + len > mtd->size)
178 return -EINVAL;
179
180 gluebi = container_of(mtd, struct gluebi_device, mtd); 177 gluebi = container_of(mtd, struct gluebi_device, mtd);
181
182 lnum = div_u64_rem(from, mtd->erasesize, &offs); 178 lnum = div_u64_rem(from, mtd->erasesize, &offs);
183 total_read = len; 179 total_read = len;
184 while (total_read) { 180 while (total_read) {
@@ -218,14 +214,7 @@ static int gluebi_write(struct mtd_info *mtd, loff_t to, size_t len,
218 int err = 0, lnum, offs, total_written; 214 int err = 0, lnum, offs, total_written;
219 struct gluebi_device *gluebi; 215 struct gluebi_device *gluebi;
220 216
221 if (len < 0 || to < 0 || len + to > mtd->size)
222 return -EINVAL;
223
224 gluebi = container_of(mtd, struct gluebi_device, mtd); 217 gluebi = container_of(mtd, struct gluebi_device, mtd);
225
226 if (!(mtd->flags & MTD_WRITEABLE))
227 return -EROFS;
228
229 lnum = div_u64_rem(to, mtd->erasesize, &offs); 218 lnum = div_u64_rem(to, mtd->erasesize, &offs);
230 219
231 if (len % mtd->writesize || offs % mtd->writesize) 220 if (len % mtd->writesize || offs % mtd->writesize)
@@ -265,21 +254,13 @@ static int gluebi_erase(struct mtd_info *mtd, struct erase_info *instr)
265 int err, i, lnum, count; 254 int err, i, lnum, count;
266 struct gluebi_device *gluebi; 255 struct gluebi_device *gluebi;
267 256
268 if (instr->addr < 0 || instr->addr > mtd->size - mtd->erasesize)
269 return -EINVAL;
270 if (instr->len < 0 || instr->addr + instr->len > mtd->size)
271 return -EINVAL;
272 if (mtd_mod_by_ws(instr->addr, mtd) || mtd_mod_by_ws(instr->len, mtd)) 257 if (mtd_mod_by_ws(instr->addr, mtd) || mtd_mod_by_ws(instr->len, mtd))
273 return -EINVAL; 258 return -EINVAL;
274 259
275 lnum = mtd_div_by_eb(instr->addr, mtd); 260 lnum = mtd_div_by_eb(instr->addr, mtd);
276 count = mtd_div_by_eb(instr->len, mtd); 261 count = mtd_div_by_eb(instr->len, mtd);
277
278 gluebi = container_of(mtd, struct gluebi_device, mtd); 262 gluebi = container_of(mtd, struct gluebi_device, mtd);
279 263
280 if (!(mtd->flags & MTD_WRITEABLE))
281 return -EROFS;
282
283 for (i = 0; i < count - 1; i++) { 264 for (i = 0; i < count - 1; i++) {
284 err = ubi_leb_unmap(gluebi->desc, lnum + i); 265 err = ubi_leb_unmap(gluebi->desc, lnum + i);
285 if (err) 266 if (err)
@@ -340,11 +321,11 @@ static int gluebi_create(struct ubi_device_info *di,
340 mtd->owner = THIS_MODULE; 321 mtd->owner = THIS_MODULE;
341 mtd->writesize = di->min_io_size; 322 mtd->writesize = di->min_io_size;
342 mtd->erasesize = vi->usable_leb_size; 323 mtd->erasesize = vi->usable_leb_size;
343 mtd->read = gluebi_read; 324 mtd->_read = gluebi_read;
344 mtd->write = gluebi_write; 325 mtd->_write = gluebi_write;
345 mtd->erase = gluebi_erase; 326 mtd->_erase = gluebi_erase;
346 mtd->get_device = gluebi_get_device; 327 mtd->_get_device = gluebi_get_device;
347 mtd->put_device = gluebi_put_device; 328 mtd->_put_device = gluebi_put_device;
348 329
349 /* 330 /*
350 * In case of dynamic a volume, MTD device size is just volume size. In 331 * In case of dynamic a volume, MTD device size is just volume size. In
diff --git a/drivers/net/ethernet/micrel/ks8842.c b/drivers/net/ethernet/micrel/ks8842.c
index 0686b93f1857..f84dd2dc82b6 100644
--- a/drivers/net/ethernet/micrel/ks8842.c
+++ b/drivers/net/ethernet/micrel/ks8842.c
@@ -458,7 +458,7 @@ static int ks8842_tx_frame_dma(struct sk_buff *skb, struct net_device *netdev)
458 if (sg_dma_len(&ctl->sg) % 4) 458 if (sg_dma_len(&ctl->sg) % 4)
459 sg_dma_len(&ctl->sg) += 4 - sg_dma_len(&ctl->sg) % 4; 459 sg_dma_len(&ctl->sg) += 4 - sg_dma_len(&ctl->sg) % 4;
460 460
461 ctl->adesc = ctl->chan->device->device_prep_slave_sg(ctl->chan, 461 ctl->adesc = dmaengine_prep_slave_sg(ctl->chan,
462 &ctl->sg, 1, DMA_MEM_TO_DEV, 462 &ctl->sg, 1, DMA_MEM_TO_DEV,
463 DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_SRC_UNMAP); 463 DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_SRC_UNMAP);
464 if (!ctl->adesc) 464 if (!ctl->adesc)
@@ -570,7 +570,7 @@ static int __ks8842_start_new_rx_dma(struct net_device *netdev)
570 570
571 sg_dma_len(sg) = DMA_BUFFER_SIZE; 571 sg_dma_len(sg) = DMA_BUFFER_SIZE;
572 572
573 ctl->adesc = ctl->chan->device->device_prep_slave_sg(ctl->chan, 573 ctl->adesc = dmaengine_prep_slave_sg(ctl->chan,
574 sg, 1, DMA_DEV_TO_MEM, 574 sg, 1, DMA_DEV_TO_MEM,
575 DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_SRC_UNMAP); 575 DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_SRC_UNMAP);
576 576
diff --git a/drivers/net/ethernet/sfc/mtd.c b/drivers/net/ethernet/sfc/mtd.c
index 26b3c23b0b6f..758148379b0e 100644
--- a/drivers/net/ethernet/sfc/mtd.c
+++ b/drivers/net/ethernet/sfc/mtd.c
@@ -193,7 +193,7 @@ static int efx_mtd_erase(struct mtd_info *mtd, struct erase_info *erase)
193 erase->state = MTD_ERASE_DONE; 193 erase->state = MTD_ERASE_DONE;
194 } else { 194 } else {
195 erase->state = MTD_ERASE_FAILED; 195 erase->state = MTD_ERASE_FAILED;
196 erase->fail_addr = 0xffffffff; 196 erase->fail_addr = MTD_FAIL_ADDR_UNKNOWN;
197 } 197 }
198 mtd_erase_callback(erase); 198 mtd_erase_callback(erase);
199 return rc; 199 return rc;
@@ -263,10 +263,10 @@ static int efx_mtd_probe_device(struct efx_nic *efx, struct efx_mtd *efx_mtd)
263 part->mtd.owner = THIS_MODULE; 263 part->mtd.owner = THIS_MODULE;
264 part->mtd.priv = efx_mtd; 264 part->mtd.priv = efx_mtd;
265 part->mtd.name = part->name; 265 part->mtd.name = part->name;
266 part->mtd.erase = efx_mtd_erase; 266 part->mtd._erase = efx_mtd_erase;
267 part->mtd.read = efx_mtd->ops->read; 267 part->mtd._read = efx_mtd->ops->read;
268 part->mtd.write = efx_mtd->ops->write; 268 part->mtd._write = efx_mtd->ops->write;
269 part->mtd.sync = efx_mtd_sync; 269 part->mtd._sync = efx_mtd_sync;
270 270
271 if (mtd_device_register(&part->mtd, NULL, 0)) 271 if (mtd_device_register(&part->mtd, NULL, 0))
272 goto fail; 272 goto fail;
diff --git a/drivers/net/wan/Kconfig b/drivers/net/wan/Kconfig
index 423eb26386c8..d70ede7a7f96 100644
--- a/drivers/net/wan/Kconfig
+++ b/drivers/net/wan/Kconfig
@@ -290,8 +290,8 @@ config FARSYNC
290 Frame Relay or X.25/LAPB. 290 Frame Relay or X.25/LAPB.
291 291
292 If you want the module to be automatically loaded when the interface 292 If you want the module to be automatically loaded when the interface
293 is referenced then you should add "alias hdlcX farsync" to 293 is referenced then you should add "alias hdlcX farsync" to a file
294 /etc/modprobe.conf for each interface, where X is 0, 1, 2, ..., or 294 in /etc/modprobe.d/ for each interface, where X is 0, 1, 2, ..., or
295 simply use "alias hdlc* farsync" to indicate all of them. 295 simply use "alias hdlc* farsync" to indicate all of them.
296 296
297 To compile this driver as a module, choose M here: the 297 To compile this driver as a module, choose M here: the
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c
index 060fd22a1103..0f150f271c2a 100644
--- a/drivers/pci/pci-acpi.c
+++ b/drivers/pci/pci-acpi.c
@@ -277,40 +277,6 @@ static int acpi_pci_sleep_wake(struct pci_dev *dev, bool enable)
277 return 0; 277 return 0;
278} 278}
279 279
280/**
281 * acpi_dev_run_wake - Enable/disable wake-up for given device.
282 * @phys_dev: Device to enable/disable the platform to wake-up the system for.
283 * @enable: Whether enable or disable the wake-up functionality.
284 *
285 * Find the ACPI device object corresponding to @pci_dev and try to
286 * enable/disable the GPE associated with it.
287 */
288static int acpi_dev_run_wake(struct device *phys_dev, bool enable)
289{
290 struct acpi_device *dev;
291 acpi_handle handle;
292
293 if (!device_run_wake(phys_dev))
294 return -EINVAL;
295
296 handle = DEVICE_ACPI_HANDLE(phys_dev);
297 if (!handle || ACPI_FAILURE(acpi_bus_get_device(handle, &dev))) {
298 dev_dbg(phys_dev, "ACPI handle has no context in %s!\n",
299 __func__);
300 return -ENODEV;
301 }
302
303 if (enable) {
304 acpi_enable_wakeup_device_power(dev, ACPI_STATE_S0);
305 acpi_enable_gpe(dev->wakeup.gpe_device, dev->wakeup.gpe_number);
306 } else {
307 acpi_disable_gpe(dev->wakeup.gpe_device, dev->wakeup.gpe_number);
308 acpi_disable_wakeup_device_power(dev);
309 }
310
311 return 0;
312}
313
314static void acpi_pci_propagate_run_wake(struct pci_bus *bus, bool enable) 280static void acpi_pci_propagate_run_wake(struct pci_bus *bus, bool enable)
315{ 281{
316 while (bus->parent) { 282 while (bus->parent) {
@@ -318,14 +284,14 @@ static void acpi_pci_propagate_run_wake(struct pci_bus *bus, bool enable)
318 284
319 if (bridge->pme_interrupt) 285 if (bridge->pme_interrupt)
320 return; 286 return;
321 if (!acpi_dev_run_wake(&bridge->dev, enable)) 287 if (!acpi_pm_device_run_wake(&bridge->dev, enable))
322 return; 288 return;
323 bus = bus->parent; 289 bus = bus->parent;
324 } 290 }
325 291
326 /* We have reached the root bus. */ 292 /* We have reached the root bus. */
327 if (bus->bridge) 293 if (bus->bridge)
328 acpi_dev_run_wake(bus->bridge, enable); 294 acpi_pm_device_run_wake(bus->bridge, enable);
329} 295}
330 296
331static int acpi_pci_run_wake(struct pci_dev *dev, bool enable) 297static int acpi_pci_run_wake(struct pci_dev *dev, bool enable)
@@ -333,7 +299,7 @@ static int acpi_pci_run_wake(struct pci_dev *dev, bool enable)
333 if (dev->pme_interrupt) 299 if (dev->pme_interrupt)
334 return 0; 300 return 0;
335 301
336 if (!acpi_dev_run_wake(&dev->dev, enable)) 302 if (!acpi_pm_device_run_wake(&dev->dev, enable))
337 return 0; 303 return 0;
338 304
339 acpi_pci_propagate_run_wake(dev->bus, enable); 305 acpi_pci_propagate_run_wake(dev->bus, enable);
diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
index 4bdef24cd412..b500840a143b 100644
--- a/drivers/pci/pcie/aspm.c
+++ b/drivers/pci/pcie/aspm.c
@@ -508,9 +508,6 @@ static int pcie_aspm_sanity_check(struct pci_dev *pdev)
508 int pos; 508 int pos;
509 u32 reg32; 509 u32 reg32;
510 510
511 if (aspm_disabled)
512 return 0;
513
514 /* 511 /*
515 * Some functions in a slot might not all be PCIe functions, 512 * Some functions in a slot might not all be PCIe functions,
516 * very strange. Disable ASPM for the whole slot 513 * very strange. Disable ASPM for the whole slot
@@ -519,6 +516,16 @@ static int pcie_aspm_sanity_check(struct pci_dev *pdev)
519 pos = pci_pcie_cap(child); 516 pos = pci_pcie_cap(child);
520 if (!pos) 517 if (!pos)
521 return -EINVAL; 518 return -EINVAL;
519
520 /*
521 * If ASPM is disabled then we're not going to change
522 * the BIOS state. It's safe to continue even if it's a
523 * pre-1.1 device
524 */
525
526 if (aspm_disabled)
527 continue;
528
522 /* 529 /*
523 * Disable ASPM for pre-1.1 PCIe device, we follow MS to use 530 * Disable ASPM for pre-1.1 PCIe device, we follow MS to use
524 * RBER bit to determine if a function is 1.1 version device 531 * RBER bit to determine if a function is 1.1 version device
diff --git a/drivers/pcmcia/at91_cf.c b/drivers/pcmcia/at91_cf.c
index 1dd68f502634..9694c1e783a5 100644
--- a/drivers/pcmcia/at91_cf.c
+++ b/drivers/pcmcia/at91_cf.c
@@ -16,13 +16,13 @@
16#include <linux/init.h> 16#include <linux/init.h>
17#include <linux/interrupt.h> 17#include <linux/interrupt.h>
18#include <linux/slab.h> 18#include <linux/slab.h>
19#include <linux/gpio.h>
19 20
20#include <pcmcia/ss.h> 21#include <pcmcia/ss.h>
21 22
22#include <mach/hardware.h> 23#include <mach/hardware.h>
23#include <asm/io.h> 24#include <asm/io.h>
24#include <asm/sizes.h> 25#include <asm/sizes.h>
25#include <asm/gpio.h>
26 26
27#include <mach/board.h> 27#include <mach/board.h>
28#include <mach/at91rm9200_mc.h> 28#include <mach/at91rm9200_mc.h>
@@ -70,7 +70,7 @@ static irqreturn_t at91_cf_irq(int irq, void *_cf)
70{ 70{
71 struct at91_cf_socket *cf = _cf; 71 struct at91_cf_socket *cf = _cf;
72 72
73 if (irq == cf->board->det_pin) { 73 if (irq == gpio_to_irq(cf->board->det_pin)) {
74 unsigned present = at91_cf_present(cf); 74 unsigned present = at91_cf_present(cf);
75 75
76 /* kick pccard as needed */ 76 /* kick pccard as needed */
@@ -96,8 +96,8 @@ static int at91_cf_get_status(struct pcmcia_socket *s, u_int *sp)
96 96
97 /* NOTE: CF is always 3VCARD */ 97 /* NOTE: CF is always 3VCARD */
98 if (at91_cf_present(cf)) { 98 if (at91_cf_present(cf)) {
99 int rdy = cf->board->irq_pin; /* RDY/nIRQ */ 99 int rdy = gpio_is_valid(cf->board->irq_pin); /* RDY/nIRQ */
100 int vcc = cf->board->vcc_pin; 100 int vcc = gpio_is_valid(cf->board->vcc_pin);
101 101
102 *sp = SS_DETECT | SS_3VCARD; 102 *sp = SS_DETECT | SS_3VCARD;
103 if (!rdy || gpio_get_value(rdy)) 103 if (!rdy || gpio_get_value(rdy))
@@ -118,7 +118,7 @@ at91_cf_set_socket(struct pcmcia_socket *sock, struct socket_state_t *s)
118 cf = container_of(sock, struct at91_cf_socket, socket); 118 cf = container_of(sock, struct at91_cf_socket, socket);
119 119
120 /* switch Vcc if needed and possible */ 120 /* switch Vcc if needed and possible */
121 if (cf->board->vcc_pin) { 121 if (gpio_is_valid(cf->board->vcc_pin)) {
122 switch (s->Vcc) { 122 switch (s->Vcc) {
123 case 0: 123 case 0:
124 gpio_set_value(cf->board->vcc_pin, 0); 124 gpio_set_value(cf->board->vcc_pin, 0);
@@ -222,7 +222,7 @@ static int __init at91_cf_probe(struct platform_device *pdev)
222 struct resource *io; 222 struct resource *io;
223 int status; 223 int status;
224 224
225 if (!board || !board->det_pin || !board->rst_pin) 225 if (!board || !gpio_is_valid(board->det_pin) || !gpio_is_valid(board->rst_pin))
226 return -ENODEV; 226 return -ENODEV;
227 227
228 io = platform_get_resource(pdev, IORESOURCE_MEM, 0); 228 io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -242,7 +242,7 @@ static int __init at91_cf_probe(struct platform_device *pdev)
242 status = gpio_request(board->det_pin, "cf_det"); 242 status = gpio_request(board->det_pin, "cf_det");
243 if (status < 0) 243 if (status < 0)
244 goto fail0; 244 goto fail0;
245 status = request_irq(board->det_pin, at91_cf_irq, 0, driver_name, cf); 245 status = request_irq(gpio_to_irq(board->det_pin), at91_cf_irq, 0, driver_name, cf);
246 if (status < 0) 246 if (status < 0)
247 goto fail00; 247 goto fail00;
248 device_init_wakeup(&pdev->dev, 1); 248 device_init_wakeup(&pdev->dev, 1);
@@ -251,7 +251,7 @@ static int __init at91_cf_probe(struct platform_device *pdev)
251 if (status < 0) 251 if (status < 0)
252 goto fail0a; 252 goto fail0a;
253 253
254 if (board->vcc_pin) { 254 if (gpio_is_valid(board->vcc_pin)) {
255 status = gpio_request(board->vcc_pin, "cf_vcc"); 255 status = gpio_request(board->vcc_pin, "cf_vcc");
256 if (status < 0) 256 if (status < 0)
257 goto fail0b; 257 goto fail0b;
@@ -263,15 +263,15 @@ static int __init at91_cf_probe(struct platform_device *pdev)
263 * unless we report that we handle everything (sigh). 263 * unless we report that we handle everything (sigh).
264 * (Note: DK board doesn't wire the IRQ pin...) 264 * (Note: DK board doesn't wire the IRQ pin...)
265 */ 265 */
266 if (board->irq_pin) { 266 if (gpio_is_valid(board->irq_pin)) {
267 status = gpio_request(board->irq_pin, "cf_irq"); 267 status = gpio_request(board->irq_pin, "cf_irq");
268 if (status < 0) 268 if (status < 0)
269 goto fail0c; 269 goto fail0c;
270 status = request_irq(board->irq_pin, at91_cf_irq, 270 status = request_irq(gpio_to_irq(board->irq_pin), at91_cf_irq,
271 IRQF_SHARED, driver_name, cf); 271 IRQF_SHARED, driver_name, cf);
272 if (status < 0) 272 if (status < 0)
273 goto fail0d; 273 goto fail0d;
274 cf->socket.pci_irq = board->irq_pin; 274 cf->socket.pci_irq = gpio_to_irq(board->irq_pin);
275 } else 275 } else
276 cf->socket.pci_irq = nr_irqs + 1; 276 cf->socket.pci_irq = nr_irqs + 1;
277 277
@@ -290,7 +290,7 @@ static int __init at91_cf_probe(struct platform_device *pdev)
290 } 290 }
291 291
292 pr_info("%s: irqs det #%d, io #%d\n", driver_name, 292 pr_info("%s: irqs det #%d, io #%d\n", driver_name,
293 board->det_pin, board->irq_pin); 293 gpio_to_irq(board->det_pin), gpio_to_irq(board->irq_pin));
294 294
295 cf->socket.owner = THIS_MODULE; 295 cf->socket.owner = THIS_MODULE;
296 cf->socket.dev.parent = &pdev->dev; 296 cf->socket.dev.parent = &pdev->dev;
@@ -312,19 +312,19 @@ fail2:
312fail1: 312fail1:
313 if (cf->socket.io_offset) 313 if (cf->socket.io_offset)
314 iounmap((void __iomem *) cf->socket.io_offset); 314 iounmap((void __iomem *) cf->socket.io_offset);
315 if (board->irq_pin) { 315 if (gpio_is_valid(board->irq_pin)) {
316 free_irq(board->irq_pin, cf); 316 free_irq(gpio_to_irq(board->irq_pin), cf);
317fail0d: 317fail0d:
318 gpio_free(board->irq_pin); 318 gpio_free(board->irq_pin);
319 } 319 }
320fail0c: 320fail0c:
321 if (board->vcc_pin) 321 if (gpio_is_valid(board->vcc_pin))
322 gpio_free(board->vcc_pin); 322 gpio_free(board->vcc_pin);
323fail0b: 323fail0b:
324 gpio_free(board->rst_pin); 324 gpio_free(board->rst_pin);
325fail0a: 325fail0a:
326 device_init_wakeup(&pdev->dev, 0); 326 device_init_wakeup(&pdev->dev, 0);
327 free_irq(board->det_pin, cf); 327 free_irq(gpio_to_irq(board->det_pin), cf);
328fail00: 328fail00:
329 gpio_free(board->det_pin); 329 gpio_free(board->det_pin);
330fail0: 330fail0:
@@ -341,15 +341,15 @@ static int __exit at91_cf_remove(struct platform_device *pdev)
341 pcmcia_unregister_socket(&cf->socket); 341 pcmcia_unregister_socket(&cf->socket);
342 release_mem_region(io->start, resource_size(io)); 342 release_mem_region(io->start, resource_size(io));
343 iounmap((void __iomem *) cf->socket.io_offset); 343 iounmap((void __iomem *) cf->socket.io_offset);
344 if (board->irq_pin) { 344 if (gpio_is_valid(board->irq_pin)) {
345 free_irq(board->irq_pin, cf); 345 free_irq(gpio_to_irq(board->irq_pin), cf);
346 gpio_free(board->irq_pin); 346 gpio_free(board->irq_pin);
347 } 347 }
348 if (board->vcc_pin) 348 if (gpio_is_valid(board->vcc_pin))
349 gpio_free(board->vcc_pin); 349 gpio_free(board->vcc_pin);
350 gpio_free(board->rst_pin); 350 gpio_free(board->rst_pin);
351 device_init_wakeup(&pdev->dev, 0); 351 device_init_wakeup(&pdev->dev, 0);
352 free_irq(board->det_pin, cf); 352 free_irq(gpio_to_irq(board->det_pin), cf);
353 gpio_free(board->det_pin); 353 gpio_free(board->det_pin);
354 kfree(cf); 354 kfree(cf);
355 return 0; 355 return 0;
@@ -363,9 +363,9 @@ static int at91_cf_suspend(struct platform_device *pdev, pm_message_t mesg)
363 struct at91_cf_data *board = cf->board; 363 struct at91_cf_data *board = cf->board;
364 364
365 if (device_may_wakeup(&pdev->dev)) { 365 if (device_may_wakeup(&pdev->dev)) {
366 enable_irq_wake(board->det_pin); 366 enable_irq_wake(gpio_to_irq(board->det_pin));
367 if (board->irq_pin) 367 if (gpio_is_valid(board->irq_pin))
368 enable_irq_wake(board->irq_pin); 368 enable_irq_wake(gpio_to_irq(board->irq_pin));
369 } 369 }
370 return 0; 370 return 0;
371} 371}
@@ -376,9 +376,9 @@ static int at91_cf_resume(struct platform_device *pdev)
376 struct at91_cf_data *board = cf->board; 376 struct at91_cf_data *board = cf->board;
377 377
378 if (device_may_wakeup(&pdev->dev)) { 378 if (device_may_wakeup(&pdev->dev)) {
379 disable_irq_wake(board->det_pin); 379 disable_irq_wake(gpio_to_irq(board->det_pin));
380 if (board->irq_pin) 380 if (gpio_is_valid(board->irq_pin))
381 disable_irq_wake(board->irq_pin); 381 disable_irq_wake(gpio_to_irq(board->irq_pin));
382 } 382 }
383 383
384 return 0; 384 return 0;
diff --git a/drivers/pcmcia/bcm63xx_pcmcia.c b/drivers/pcmcia/bcm63xx_pcmcia.c
index 693577e0fefc..c2e997a570bf 100644
--- a/drivers/pcmcia/bcm63xx_pcmcia.c
+++ b/drivers/pcmcia/bcm63xx_pcmcia.c
@@ -475,7 +475,7 @@ static void __devexit bcm63xx_cb_exit(struct pci_dev *dev)
475 bcm63xx_cb_dev = NULL; 475 bcm63xx_cb_dev = NULL;
476} 476}
477 477
478static struct pci_device_id bcm63xx_cb_table[] = { 478static DEFINE_PCI_DEVICE_TABLE(bcm63xx_cb_table) = {
479 { 479 {
480 .vendor = PCI_VENDOR_ID_BROADCOM, 480 .vendor = PCI_VENDOR_ID_BROADCOM,
481 .device = BCM6348_CPU_ID, 481 .device = BCM6348_CPU_ID,
diff --git a/drivers/pcmcia/bfin_cf_pcmcia.c b/drivers/pcmcia/bfin_cf_pcmcia.c
index 49221395101e..ac1a2232eab9 100644
--- a/drivers/pcmcia/bfin_cf_pcmcia.c
+++ b/drivers/pcmcia/bfin_cf_pcmcia.c
@@ -310,18 +310,7 @@ static struct platform_driver bfin_cf_driver = {
310 .remove = __devexit_p(bfin_cf_remove), 310 .remove = __devexit_p(bfin_cf_remove),
311}; 311};
312 312
313static int __init bfin_cf_init(void) 313module_platform_driver(bfin_cf_driver);
314{
315 return platform_driver_register(&bfin_cf_driver);
316}
317
318static void __exit bfin_cf_exit(void)
319{
320 platform_driver_unregister(&bfin_cf_driver);
321}
322
323module_init(bfin_cf_init);
324module_exit(bfin_cf_exit);
325 314
326MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>"); 315MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
327MODULE_DESCRIPTION("BFIN CF/PCMCIA Driver"); 316MODULE_DESCRIPTION("BFIN CF/PCMCIA Driver");
diff --git a/drivers/pcmcia/db1xxx_ss.c b/drivers/pcmcia/db1xxx_ss.c
index 5b7c22784aff..a484b1fb3382 100644
--- a/drivers/pcmcia/db1xxx_ss.c
+++ b/drivers/pcmcia/db1xxx_ss.c
@@ -172,12 +172,12 @@ static int db1x_pcmcia_setup_irqs(struct db1x_pcmcia_sock *sock)
172 if ((sock->board_type == BOARD_TYPE_DB1200) || 172 if ((sock->board_type == BOARD_TYPE_DB1200) ||
173 (sock->board_type == BOARD_TYPE_DB1300)) { 173 (sock->board_type == BOARD_TYPE_DB1300)) {
174 ret = request_irq(sock->insert_irq, db1200_pcmcia_cdirq, 174 ret = request_irq(sock->insert_irq, db1200_pcmcia_cdirq,
175 IRQF_DISABLED, "pcmcia_insert", sock); 175 0, "pcmcia_insert", sock);
176 if (ret) 176 if (ret)
177 goto out1; 177 goto out1;
178 178
179 ret = request_irq(sock->eject_irq, db1200_pcmcia_cdirq, 179 ret = request_irq(sock->eject_irq, db1200_pcmcia_cdirq,
180 IRQF_DISABLED, "pcmcia_eject", sock); 180 0, "pcmcia_eject", sock);
181 if (ret) { 181 if (ret) {
182 free_irq(sock->insert_irq, sock); 182 free_irq(sock->insert_irq, sock);
183 goto out1; 183 goto out1;
@@ -580,18 +580,7 @@ static struct platform_driver db1x_pcmcia_socket_driver = {
580 .remove = __devexit_p(db1x_pcmcia_socket_remove), 580 .remove = __devexit_p(db1x_pcmcia_socket_remove),
581}; 581};
582 582
583int __init db1x_pcmcia_socket_load(void) 583module_platform_driver(db1x_pcmcia_socket_driver);
584{
585 return platform_driver_register(&db1x_pcmcia_socket_driver);
586}
587
588void __exit db1x_pcmcia_socket_unload(void)
589{
590 platform_driver_unregister(&db1x_pcmcia_socket_driver);
591}
592
593module_init(db1x_pcmcia_socket_load);
594module_exit(db1x_pcmcia_socket_unload);
595 584
596MODULE_LICENSE("GPL"); 585MODULE_LICENSE("GPL");
597MODULE_DESCRIPTION("PCMCIA Socket Services for Alchemy Db/Pb1x00 boards"); 586MODULE_DESCRIPTION("PCMCIA Socket Services for Alchemy Db/Pb1x00 boards");
diff --git a/drivers/pcmcia/electra_cf.c b/drivers/pcmcia/electra_cf.c
index 06ad3e5e7d3d..7647d232e9e2 100644
--- a/drivers/pcmcia/electra_cf.c
+++ b/drivers/pcmcia/electra_cf.c
@@ -365,17 +365,7 @@ static struct platform_driver electra_cf_driver = {
365 .remove = electra_cf_remove, 365 .remove = electra_cf_remove,
366}; 366};
367 367
368static int __init electra_cf_init(void) 368module_platform_driver(electra_cf_driver);
369{
370 return platform_driver_register(&electra_cf_driver);
371}
372module_init(electra_cf_init);
373
374static void __exit electra_cf_exit(void)
375{
376 platform_driver_unregister(&electra_cf_driver);
377}
378module_exit(electra_cf_exit);
379 369
380MODULE_LICENSE("GPL"); 370MODULE_LICENSE("GPL");
381MODULE_AUTHOR ("Olof Johansson <olof@lixom.net>"); 371MODULE_AUTHOR ("Olof Johansson <olof@lixom.net>");
diff --git a/drivers/pcmcia/i82092.c b/drivers/pcmcia/i82092.c
index 0b66bfc0e148..4e8831bdb6ef 100644
--- a/drivers/pcmcia/i82092.c
+++ b/drivers/pcmcia/i82092.c
@@ -25,14 +25,9 @@
25MODULE_LICENSE("GPL"); 25MODULE_LICENSE("GPL");
26 26
27/* PCI core routines */ 27/* PCI core routines */
28static struct pci_device_id i82092aa_pci_ids[] = { 28static DEFINE_PCI_DEVICE_TABLE(i82092aa_pci_ids) = {
29 { 29 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82092AA_0) },
30 .vendor = PCI_VENDOR_ID_INTEL, 30 { }
31 .device = PCI_DEVICE_ID_INTEL_82092AA_0,
32 .subvendor = PCI_ANY_ID,
33 .subdevice = PCI_ANY_ID,
34 },
35 {}
36}; 31};
37MODULE_DEVICE_TABLE(pci, i82092aa_pci_ids); 32MODULE_DEVICE_TABLE(pci, i82092aa_pci_ids);
38 33
diff --git a/drivers/pcmcia/m8xx_pcmcia.c b/drivers/pcmcia/m8xx_pcmcia.c
index a317defd616d..a3a851e49321 100644
--- a/drivers/pcmcia/m8xx_pcmcia.c
+++ b/drivers/pcmcia/m8xx_pcmcia.c
@@ -1303,15 +1303,4 @@ static struct platform_driver m8xx_pcmcia_driver = {
1303 .remove = m8xx_remove, 1303 .remove = m8xx_remove,
1304}; 1304};
1305 1305
1306static int __init m8xx_init(void) 1306module_platform_driver(m8xx_pcmcia_driver);
1307{
1308 return platform_driver_register(&m8xx_pcmcia_driver);
1309}
1310
1311static void __exit m8xx_exit(void)
1312{
1313 platform_driver_unregister(&m8xx_pcmcia_driver);
1314}
1315
1316module_init(m8xx_init);
1317module_exit(m8xx_exit);
diff --git a/drivers/pcmcia/pd6729.c b/drivers/pcmcia/pd6729.c
index 0f8b70b27762..253e3867dec7 100644
--- a/drivers/pcmcia/pd6729.c
+++ b/drivers/pcmcia/pd6729.c
@@ -762,13 +762,8 @@ static void __devexit pd6729_pci_remove(struct pci_dev *dev)
762 kfree(socket); 762 kfree(socket);
763} 763}
764 764
765static struct pci_device_id pd6729_pci_ids[] = { 765static DEFINE_PCI_DEVICE_TABLE(pd6729_pci_ids) = {
766 { 766 { PCI_DEVICE(PCI_VENDOR_ID_CIRRUS, PCI_DEVICE_ID_CIRRUS_6729) },
767 .vendor = PCI_VENDOR_ID_CIRRUS,
768 .device = PCI_DEVICE_ID_CIRRUS_6729,
769 .subvendor = PCI_ANY_ID,
770 .subdevice = PCI_ANY_ID,
771 },
772 { } 767 { }
773}; 768};
774MODULE_DEVICE_TABLE(pci, pd6729_pci_ids); 769MODULE_DEVICE_TABLE(pci, pd6729_pci_ids);
diff --git a/drivers/pcmcia/pxa2xx_viper.c b/drivers/pcmcia/pxa2xx_viper.c
index adfae4987a42..cb0c37ec7f24 100644
--- a/drivers/pcmcia/pxa2xx_viper.c
+++ b/drivers/pcmcia/pxa2xx_viper.c
@@ -177,18 +177,7 @@ static struct platform_driver viper_pcmcia_driver = {
177 .id_table = viper_pcmcia_id_table, 177 .id_table = viper_pcmcia_id_table,
178}; 178};
179 179
180static int __init viper_pcmcia_init(void) 180module_platform_driver(viper_pcmcia_driver);
181{
182 return platform_driver_register(&viper_pcmcia_driver);
183}
184
185static void __exit viper_pcmcia_exit(void)
186{
187 return platform_driver_unregister(&viper_pcmcia_driver);
188}
189
190module_init(viper_pcmcia_init);
191module_exit(viper_pcmcia_exit);
192 181
193MODULE_DEVICE_TABLE(platform, viper_pcmcia_id_table); 182MODULE_DEVICE_TABLE(platform, viper_pcmcia_id_table);
194MODULE_LICENSE("GPL"); 183MODULE_LICENSE("GPL");
diff --git a/drivers/pcmcia/vrc4173_cardu.c b/drivers/pcmcia/vrc4173_cardu.c
index c6d36b3a6ce8..cd0a315d922b 100644
--- a/drivers/pcmcia/vrc4173_cardu.c
+++ b/drivers/pcmcia/vrc4173_cardu.c
@@ -563,11 +563,8 @@ static int __devinit vrc4173_cardu_setup(char *options)
563 563
564__setup("vrc4173_cardu=", vrc4173_cardu_setup); 564__setup("vrc4173_cardu=", vrc4173_cardu_setup);
565 565
566static struct pci_device_id vrc4173_cardu_id_table[] __devinitdata = { 566static DEFINE_PCI_DEVICE_TABLE(vrc4173_cardu_id_table) = {
567 { .vendor = PCI_VENDOR_ID_NEC, 567 { PCI_DEVICE(PCI_VENDOR_ID_NEC, PCI_DEVICE_ID_NEC_NAPCCARD) },
568 .device = PCI_DEVICE_ID_NEC_NAPCCARD,
569 .subvendor = PCI_ANY_ID,
570 .subdevice = PCI_ANY_ID, },
571 {0, } 568 {0, }
572}; 569};
573 570
diff --git a/drivers/pcmcia/xxs1500_ss.c b/drivers/pcmcia/xxs1500_ss.c
index 8f6698074f8e..fd5fbd10aad0 100644
--- a/drivers/pcmcia/xxs1500_ss.c
+++ b/drivers/pcmcia/xxs1500_ss.c
@@ -320,18 +320,7 @@ static struct platform_driver xxs1500_pcmcia_socket_driver = {
320 .remove = __devexit_p(xxs1500_pcmcia_remove), 320 .remove = __devexit_p(xxs1500_pcmcia_remove),
321}; 321};
322 322
323int __init xxs1500_pcmcia_socket_load(void) 323module_platform_driver(xxs1500_pcmcia_socket_driver);
324{
325 return platform_driver_register(&xxs1500_pcmcia_socket_driver);
326}
327
328void __exit xxs1500_pcmcia_socket_unload(void)
329{
330 platform_driver_unregister(&xxs1500_pcmcia_socket_driver);
331}
332
333module_init(xxs1500_pcmcia_socket_load);
334module_exit(xxs1500_pcmcia_socket_unload);
335 324
336MODULE_LICENSE("GPL"); 325MODULE_LICENSE("GPL");
337MODULE_DESCRIPTION("PCMCIA Socket Services for MyCable XXS1500 systems"); 326MODULE_DESCRIPTION("PCMCIA Socket Services for MyCable XXS1500 systems");
diff --git a/drivers/pcmcia/yenta_socket.c b/drivers/pcmcia/yenta_socket.c
index 849c0c11d2af..d07f9ac8c41d 100644
--- a/drivers/pcmcia/yenta_socket.c
+++ b/drivers/pcmcia/yenta_socket.c
@@ -1352,7 +1352,7 @@ static const struct dev_pm_ops yenta_pm_ops = {
1352 .driver_data = CARDBUS_TYPE_##type, \ 1352 .driver_data = CARDBUS_TYPE_##type, \
1353 } 1353 }
1354 1354
1355static struct pci_device_id yenta_table[] = { 1355static DEFINE_PCI_DEVICE_TABLE(yenta_table) = {
1356 CB_ID(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_1031, TI), 1356 CB_ID(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_1031, TI),
1357 1357
1358 /* 1358 /*
diff --git a/drivers/platform/x86/intel_ips.c b/drivers/platform/x86/intel_ips.c
index 88a98cff5a44..f7ba316e0ed6 100644
--- a/drivers/platform/x86/intel_ips.c
+++ b/drivers/platform/x86/intel_ips.c
@@ -609,25 +609,16 @@ static bool mcp_exceeded(struct ips_driver *ips)
609 bool ret = false; 609 bool ret = false;
610 u32 temp_limit; 610 u32 temp_limit;
611 u32 avg_power; 611 u32 avg_power;
612 const char *msg = "MCP limit exceeded: ";
613 612
614 spin_lock_irqsave(&ips->turbo_status_lock, flags); 613 spin_lock_irqsave(&ips->turbo_status_lock, flags);
615 614
616 temp_limit = ips->mcp_temp_limit * 100; 615 temp_limit = ips->mcp_temp_limit * 100;
617 if (ips->mcp_avg_temp > temp_limit) { 616 if (ips->mcp_avg_temp > temp_limit)
618 dev_info(&ips->dev->dev,
619 "%sAvg temp %u, limit %u\n", msg, ips->mcp_avg_temp,
620 temp_limit);
621 ret = true; 617 ret = true;
622 }
623 618
624 avg_power = ips->cpu_avg_power + ips->mch_avg_power; 619 avg_power = ips->cpu_avg_power + ips->mch_avg_power;
625 if (avg_power > ips->mcp_power_limit) { 620 if (avg_power > ips->mcp_power_limit)
626 dev_info(&ips->dev->dev,
627 "%sAvg power %u, limit %u\n", msg, avg_power,
628 ips->mcp_power_limit);
629 ret = true; 621 ret = true;
630 }
631 622
632 spin_unlock_irqrestore(&ips->turbo_status_lock, flags); 623 spin_unlock_irqrestore(&ips->turbo_status_lock, flags);
633 624
diff --git a/drivers/pnp/pnpacpi/core.c b/drivers/pnp/pnpacpi/core.c
index b00c17612a89..d21e8f59c84e 100644
--- a/drivers/pnp/pnpacpi/core.c
+++ b/drivers/pnp/pnpacpi/core.c
@@ -321,9 +321,14 @@ static int __init acpi_pnp_match(struct device *dev, void *_pnp)
321{ 321{
322 struct acpi_device *acpi = to_acpi_device(dev); 322 struct acpi_device *acpi = to_acpi_device(dev);
323 struct pnp_dev *pnp = _pnp; 323 struct pnp_dev *pnp = _pnp;
324 struct device *physical_device;
325
326 physical_device = acpi_get_physical_device(acpi->handle);
327 if (physical_device)
328 put_device(physical_device);
324 329
325 /* true means it matched */ 330 /* true means it matched */
326 return !acpi_get_physical_device(acpi->handle) 331 return !physical_device
327 && compare_pnp_id(pnp->id, acpi_device_hid(acpi)); 332 && compare_pnp_id(pnp->id, acpi_device_hid(acpi));
328} 333}
329 334
diff --git a/drivers/power/Kconfig b/drivers/power/Kconfig
index 459f66437fe9..99dc29f2f2f2 100644
--- a/drivers/power/Kconfig
+++ b/drivers/power/Kconfig
@@ -249,7 +249,7 @@ config CHARGER_TWL4030
249 Say Y here to enable support for TWL4030 Battery Charge Interface. 249 Say Y here to enable support for TWL4030 Battery Charge Interface.
250 250
251config CHARGER_LP8727 251config CHARGER_LP8727
252 tristate "National Semiconductor LP8727 charger driver" 252 tristate "TI/National Semiconductor LP8727 charger driver"
253 depends on I2C 253 depends on I2C
254 help 254 help
255 Say Y here to enable support for LP8727 Charger Driver. 255 Say Y here to enable support for LP8727 Charger Driver.
@@ -288,4 +288,23 @@ config CHARGER_MAX8998
288 Say Y to enable support for the battery charger control sysfs and 288 Say Y to enable support for the battery charger control sysfs and
289 platform data of MAX8998/LP3974 PMICs. 289 platform data of MAX8998/LP3974 PMICs.
290 290
291config CHARGER_SMB347
292 tristate "Summit Microelectronics SMB347 Battery Charger"
293 depends on I2C
294 help
295 Say Y to include support for Summit Microelectronics SMB347
296 Battery Charger.
297
298config AB8500_BM
299 bool "AB8500 Battery Management Driver"
300 depends on AB8500_CORE && AB8500_GPADC
301 help
302 Say Y to include support for AB5500 battery management.
303
304config AB8500_BATTERY_THERM_ON_BATCTRL
305 bool "Thermistor connected on BATCTRL ADC"
306 depends on AB8500_BM
307 help
308 Say Y to enable battery temperature measurements using
309 thermistor connected on BATCTRL ADC.
291endif # POWER_SUPPLY 310endif # POWER_SUPPLY
diff --git a/drivers/power/Makefile b/drivers/power/Makefile
index c590fa533406..b6b243416c0e 100644
--- a/drivers/power/Makefile
+++ b/drivers/power/Makefile
@@ -34,6 +34,7 @@ obj-$(CONFIG_BATTERY_S3C_ADC) += s3c_adc_battery.o
34obj-$(CONFIG_CHARGER_PCF50633) += pcf50633-charger.o 34obj-$(CONFIG_CHARGER_PCF50633) += pcf50633-charger.o
35obj-$(CONFIG_BATTERY_JZ4740) += jz4740-battery.o 35obj-$(CONFIG_BATTERY_JZ4740) += jz4740-battery.o
36obj-$(CONFIG_BATTERY_INTEL_MID) += intel_mid_battery.o 36obj-$(CONFIG_BATTERY_INTEL_MID) += intel_mid_battery.o
37obj-$(CONFIG_AB8500_BM) += ab8500_charger.o ab8500_btemp.o ab8500_fg.o abx500_chargalg.o
37obj-$(CONFIG_CHARGER_ISP1704) += isp1704_charger.o 38obj-$(CONFIG_CHARGER_ISP1704) += isp1704_charger.o
38obj-$(CONFIG_CHARGER_MAX8903) += max8903_charger.o 39obj-$(CONFIG_CHARGER_MAX8903) += max8903_charger.o
39obj-$(CONFIG_CHARGER_TWL4030) += twl4030_charger.o 40obj-$(CONFIG_CHARGER_TWL4030) += twl4030_charger.o
@@ -42,3 +43,4 @@ obj-$(CONFIG_CHARGER_GPIO) += gpio-charger.o
42obj-$(CONFIG_CHARGER_MANAGER) += charger-manager.o 43obj-$(CONFIG_CHARGER_MANAGER) += charger-manager.o
43obj-$(CONFIG_CHARGER_MAX8997) += max8997_charger.o 44obj-$(CONFIG_CHARGER_MAX8997) += max8997_charger.o
44obj-$(CONFIG_CHARGER_MAX8998) += max8998_charger.o 45obj-$(CONFIG_CHARGER_MAX8998) += max8998_charger.o
46obj-$(CONFIG_CHARGER_SMB347) += smb347-charger.o
diff --git a/drivers/power/ab8500_btemp.c b/drivers/power/ab8500_btemp.c
new file mode 100644
index 000000000000..d8bb99394ac0
--- /dev/null
+++ b/drivers/power/ab8500_btemp.c
@@ -0,0 +1,1124 @@
1/*
2 * Copyright (C) ST-Ericsson SA 2012
3 *
4 * Battery temperature driver for AB8500
5 *
6 * License Terms: GNU General Public License v2
7 * Author:
8 * Johan Palsson <johan.palsson@stericsson.com>
9 * Karl Komierowski <karl.komierowski@stericsson.com>
10 * Arun R Murthy <arun.murthy@stericsson.com>
11 */
12
13#include <linux/init.h>
14#include <linux/module.h>
15#include <linux/device.h>
16#include <linux/interrupt.h>
17#include <linux/delay.h>
18#include <linux/slab.h>
19#include <linux/platform_device.h>
20#include <linux/power_supply.h>
21#include <linux/completion.h>
22#include <linux/workqueue.h>
23#include <linux/mfd/abx500/ab8500.h>
24#include <linux/mfd/abx500.h>
25#include <linux/mfd/abx500/ab8500-bm.h>
26#include <linux/mfd/abx500/ab8500-gpadc.h>
27#include <linux/jiffies.h>
28
29#define VTVOUT_V 1800
30
31#define BTEMP_THERMAL_LOW_LIMIT -10
32#define BTEMP_THERMAL_MED_LIMIT 0
33#define BTEMP_THERMAL_HIGH_LIMIT_52 52
34#define BTEMP_THERMAL_HIGH_LIMIT_57 57
35#define BTEMP_THERMAL_HIGH_LIMIT_62 62
36
37#define BTEMP_BATCTRL_CURR_SRC_7UA 7
38#define BTEMP_BATCTRL_CURR_SRC_20UA 20
39
40#define to_ab8500_btemp_device_info(x) container_of((x), \
41 struct ab8500_btemp, btemp_psy);
42
43/**
44 * struct ab8500_btemp_interrupts - ab8500 interrupts
45 * @name: name of the interrupt
46 * @isr function pointer to the isr
47 */
48struct ab8500_btemp_interrupts {
49 char *name;
50 irqreturn_t (*isr)(int irq, void *data);
51};
52
53struct ab8500_btemp_events {
54 bool batt_rem;
55 bool btemp_high;
56 bool btemp_medhigh;
57 bool btemp_lowmed;
58 bool btemp_low;
59 bool ac_conn;
60 bool usb_conn;
61};
62
63struct ab8500_btemp_ranges {
64 int btemp_high_limit;
65 int btemp_med_limit;
66 int btemp_low_limit;
67};
68
69/**
70 * struct ab8500_btemp - ab8500 BTEMP device information
71 * @dev: Pointer to the structure device
72 * @node: List of AB8500 BTEMPs, hence prepared for reentrance
73 * @curr_source: What current source we use, in uA
74 * @bat_temp: Battery temperature in degree Celcius
75 * @prev_bat_temp Last dispatched battery temperature
76 * @parent: Pointer to the struct ab8500
77 * @gpadc: Pointer to the struct gpadc
78 * @fg: Pointer to the struct fg
79 * @pdata: Pointer to the abx500_btemp platform data
80 * @bat: Pointer to the abx500_bm platform data
81 * @btemp_psy: Structure for BTEMP specific battery properties
82 * @events: Structure for information about events triggered
83 * @btemp_ranges: Battery temperature range structure
84 * @btemp_wq: Work queue for measuring the temperature periodically
85 * @btemp_periodic_work: Work for measuring the temperature periodically
86 */
87struct ab8500_btemp {
88 struct device *dev;
89 struct list_head node;
90 int curr_source;
91 int bat_temp;
92 int prev_bat_temp;
93 struct ab8500 *parent;
94 struct ab8500_gpadc *gpadc;
95 struct ab8500_fg *fg;
96 struct abx500_btemp_platform_data *pdata;
97 struct abx500_bm_data *bat;
98 struct power_supply btemp_psy;
99 struct ab8500_btemp_events events;
100 struct ab8500_btemp_ranges btemp_ranges;
101 struct workqueue_struct *btemp_wq;
102 struct delayed_work btemp_periodic_work;
103};
104
105/* BTEMP power supply properties */
106static enum power_supply_property ab8500_btemp_props[] = {
107 POWER_SUPPLY_PROP_PRESENT,
108 POWER_SUPPLY_PROP_ONLINE,
109 POWER_SUPPLY_PROP_TECHNOLOGY,
110 POWER_SUPPLY_PROP_TEMP,
111};
112
113static LIST_HEAD(ab8500_btemp_list);
114
115/**
116 * ab8500_btemp_get() - returns a reference to the primary AB8500 BTEMP
117 * (i.e. the first BTEMP in the instance list)
118 */
119struct ab8500_btemp *ab8500_btemp_get(void)
120{
121 struct ab8500_btemp *btemp;
122 btemp = list_first_entry(&ab8500_btemp_list, struct ab8500_btemp, node);
123
124 return btemp;
125}
126
127/**
128 * ab8500_btemp_batctrl_volt_to_res() - convert batctrl voltage to resistance
129 * @di: pointer to the ab8500_btemp structure
130 * @v_batctrl: measured batctrl voltage
131 * @inst_curr: measured instant current
132 *
133 * This function returns the battery resistance that is
134 * derived from the BATCTRL voltage.
135 * Returns value in Ohms.
136 */
137static int ab8500_btemp_batctrl_volt_to_res(struct ab8500_btemp *di,
138 int v_batctrl, int inst_curr)
139{
140 int rbs;
141
142 if (is_ab8500_1p1_or_earlier(di->parent)) {
143 /*
144 * For ABB cut1.0 and 1.1 BAT_CTRL is internally
145 * connected to 1.8V through a 450k resistor
146 */
147 return (450000 * (v_batctrl)) / (1800 - v_batctrl);
148 }
149
150 if (di->bat->adc_therm == ABx500_ADC_THERM_BATCTRL) {
151 /*
152 * If the battery has internal NTC, we use the current
153 * source to calculate the resistance, 7uA or 20uA
154 */
155 rbs = (v_batctrl * 1000
156 - di->bat->gnd_lift_resistance * inst_curr)
157 / di->curr_source;
158 } else {
159 /*
160 * BAT_CTRL is internally
161 * connected to 1.8V through a 80k resistor
162 */
163 rbs = (80000 * (v_batctrl)) / (1800 - v_batctrl);
164 }
165
166 return rbs;
167}
168
169/**
170 * ab8500_btemp_read_batctrl_voltage() - measure batctrl voltage
171 * @di: pointer to the ab8500_btemp structure
172 *
173 * This function returns the voltage on BATCTRL. Returns value in mV.
174 */
175static int ab8500_btemp_read_batctrl_voltage(struct ab8500_btemp *di)
176{
177 int vbtemp;
178 static int prev;
179
180 vbtemp = ab8500_gpadc_convert(di->gpadc, BAT_CTRL);
181 if (vbtemp < 0) {
182 dev_err(di->dev,
183 "%s gpadc conversion failed, using previous value",
184 __func__);
185 return prev;
186 }
187 prev = vbtemp;
188 return vbtemp;
189}
190
191/**
192 * ab8500_btemp_curr_source_enable() - enable/disable batctrl current source
193 * @di: pointer to the ab8500_btemp structure
194 * @enable: enable or disable the current source
195 *
196 * Enable or disable the current sources for the BatCtrl AD channel
197 */
198static int ab8500_btemp_curr_source_enable(struct ab8500_btemp *di,
199 bool enable)
200{
201 int curr;
202 int ret = 0;
203
204 /*
205 * BATCTRL current sources are included on AB8500 cut2.0
206 * and future versions
207 */
208 if (is_ab8500_1p1_or_earlier(di->parent))
209 return 0;
210
211 /* Only do this for batteries with internal NTC */
212 if (di->bat->adc_therm == ABx500_ADC_THERM_BATCTRL && enable) {
213 if (di->curr_source == BTEMP_BATCTRL_CURR_SRC_7UA)
214 curr = BAT_CTRL_7U_ENA;
215 else
216 curr = BAT_CTRL_20U_ENA;
217
218 dev_dbg(di->dev, "Set BATCTRL %duA\n", di->curr_source);
219
220 ret = abx500_mask_and_set_register_interruptible(di->dev,
221 AB8500_CHARGER, AB8500_BAT_CTRL_CURRENT_SOURCE,
222 FORCE_BAT_CTRL_CMP_HIGH, FORCE_BAT_CTRL_CMP_HIGH);
223 if (ret) {
224 dev_err(di->dev, "%s failed setting cmp_force\n",
225 __func__);
226 return ret;
227 }
228
229 /*
230 * We have to wait one 32kHz cycle before enabling
231 * the current source, since ForceBatCtrlCmpHigh needs
232 * to be written in a separate cycle
233 */
234 udelay(32);
235
236 ret = abx500_set_register_interruptible(di->dev,
237 AB8500_CHARGER, AB8500_BAT_CTRL_CURRENT_SOURCE,
238 FORCE_BAT_CTRL_CMP_HIGH | curr);
239 if (ret) {
240 dev_err(di->dev, "%s failed enabling current source\n",
241 __func__);
242 goto disable_curr_source;
243 }
244 } else if (di->bat->adc_therm == ABx500_ADC_THERM_BATCTRL && !enable) {
245 dev_dbg(di->dev, "Disable BATCTRL curr source\n");
246
247 /* Write 0 to the curr bits */
248 ret = abx500_mask_and_set_register_interruptible(di->dev,
249 AB8500_CHARGER, AB8500_BAT_CTRL_CURRENT_SOURCE,
250 BAT_CTRL_7U_ENA | BAT_CTRL_20U_ENA,
251 ~(BAT_CTRL_7U_ENA | BAT_CTRL_20U_ENA));
252 if (ret) {
253 dev_err(di->dev, "%s failed disabling current source\n",
254 __func__);
255 goto disable_curr_source;
256 }
257
258 /* Enable Pull-Up and comparator */
259 ret = abx500_mask_and_set_register_interruptible(di->dev,
260 AB8500_CHARGER, AB8500_BAT_CTRL_CURRENT_SOURCE,
261 BAT_CTRL_PULL_UP_ENA | BAT_CTRL_CMP_ENA,
262 BAT_CTRL_PULL_UP_ENA | BAT_CTRL_CMP_ENA);
263 if (ret) {
264 dev_err(di->dev, "%s failed enabling PU and comp\n",
265 __func__);
266 goto enable_pu_comp;
267 }
268
269 /*
270 * We have to wait one 32kHz cycle before disabling
271 * ForceBatCtrlCmpHigh since this needs to be written
272 * in a separate cycle
273 */
274 udelay(32);
275
276 /* Disable 'force comparator' */
277 ret = abx500_mask_and_set_register_interruptible(di->dev,
278 AB8500_CHARGER, AB8500_BAT_CTRL_CURRENT_SOURCE,
279 FORCE_BAT_CTRL_CMP_HIGH, ~FORCE_BAT_CTRL_CMP_HIGH);
280 if (ret) {
281 dev_err(di->dev, "%s failed disabling force comp\n",
282 __func__);
283 goto disable_force_comp;
284 }
285 }
286 return ret;
287
288 /*
289 * We have to try unsetting FORCE_BAT_CTRL_CMP_HIGH one more time
290 * if we got an error above
291 */
292disable_curr_source:
293 /* Write 0 to the curr bits */
294 ret = abx500_mask_and_set_register_interruptible(di->dev,
295 AB8500_CHARGER, AB8500_BAT_CTRL_CURRENT_SOURCE,
296 BAT_CTRL_7U_ENA | BAT_CTRL_20U_ENA,
297 ~(BAT_CTRL_7U_ENA | BAT_CTRL_20U_ENA));
298 if (ret) {
299 dev_err(di->dev, "%s failed disabling current source\n",
300 __func__);
301 return ret;
302 }
303enable_pu_comp:
304 /* Enable Pull-Up and comparator */
305 ret = abx500_mask_and_set_register_interruptible(di->dev,
306 AB8500_CHARGER, AB8500_BAT_CTRL_CURRENT_SOURCE,
307 BAT_CTRL_PULL_UP_ENA | BAT_CTRL_CMP_ENA,
308 BAT_CTRL_PULL_UP_ENA | BAT_CTRL_CMP_ENA);
309 if (ret) {
310 dev_err(di->dev, "%s failed enabling PU and comp\n",
311 __func__);
312 return ret;
313 }
314
315disable_force_comp:
316 /*
317 * We have to wait one 32kHz cycle before disabling
318 * ForceBatCtrlCmpHigh since this needs to be written
319 * in a separate cycle
320 */
321 udelay(32);
322
323 /* Disable 'force comparator' */
324 ret = abx500_mask_and_set_register_interruptible(di->dev,
325 AB8500_CHARGER, AB8500_BAT_CTRL_CURRENT_SOURCE,
326 FORCE_BAT_CTRL_CMP_HIGH, ~FORCE_BAT_CTRL_CMP_HIGH);
327 if (ret) {
328 dev_err(di->dev, "%s failed disabling force comp\n",
329 __func__);
330 return ret;
331 }
332
333 return ret;
334}
335
336/**
337 * ab8500_btemp_get_batctrl_res() - get battery resistance
338 * @di: pointer to the ab8500_btemp structure
339 *
340 * This function returns the battery pack identification resistance.
341 * Returns value in Ohms.
342 */
343static int ab8500_btemp_get_batctrl_res(struct ab8500_btemp *di)
344{
345 int ret;
346 int batctrl = 0;
347 int res;
348 int inst_curr;
349 int i;
350
351 /*
352 * BATCTRL current sources are included on AB8500 cut2.0
353 * and future versions
354 */
355 ret = ab8500_btemp_curr_source_enable(di, true);
356 if (ret) {
357 dev_err(di->dev, "%s curr source enabled failed\n", __func__);
358 return ret;
359 }
360
361 if (!di->fg)
362 di->fg = ab8500_fg_get();
363 if (!di->fg) {
364 dev_err(di->dev, "No fg found\n");
365 return -EINVAL;
366 }
367
368 ret = ab8500_fg_inst_curr_start(di->fg);
369
370 if (ret) {
371 dev_err(di->dev, "Failed to start current measurement\n");
372 return ret;
373 }
374
375 /*
376 * Since there is no interrupt when current measurement is done,
377 * loop for over 250ms (250ms is one sample conversion time
378 * with 32.768 Khz RTC clock). Note that a stop time must be set
379 * since the ab8500_btemp_read_batctrl_voltage call can block and
380 * take an unknown amount of time to complete.
381 */
382 i = 0;
383
384 do {
385 batctrl += ab8500_btemp_read_batctrl_voltage(di);
386 i++;
387 msleep(20);
388 } while (!ab8500_fg_inst_curr_done(di->fg));
389 batctrl /= i;
390
391 ret = ab8500_fg_inst_curr_finalize(di->fg, &inst_curr);
392 if (ret) {
393 dev_err(di->dev, "Failed to finalize current measurement\n");
394 return ret;
395 }
396
397 res = ab8500_btemp_batctrl_volt_to_res(di, batctrl, inst_curr);
398
399 ret = ab8500_btemp_curr_source_enable(di, false);
400 if (ret) {
401 dev_err(di->dev, "%s curr source disable failed\n", __func__);
402 return ret;
403 }
404
405 dev_dbg(di->dev, "%s batctrl: %d res: %d inst_curr: %d samples: %d\n",
406 __func__, batctrl, res, inst_curr, i);
407
408 return res;
409}
410
411/**
412 * ab8500_btemp_res_to_temp() - resistance to temperature
413 * @di: pointer to the ab8500_btemp structure
414 * @tbl: pointer to the resiatance to temperature table
415 * @tbl_size: size of the resistance to temperature table
416 * @res: resistance to calculate the temperature from
417 *
418 * This function returns the battery temperature in degrees Celcius
419 * based on the NTC resistance.
420 */
421static int ab8500_btemp_res_to_temp(struct ab8500_btemp *di,
422 const struct abx500_res_to_temp *tbl, int tbl_size, int res)
423{
424 int i, temp;
425 /*
426 * Calculate the formula for the straight line
427 * Simple interpolation if we are within
428 * the resistance table limits, extrapolate
429 * if resistance is outside the limits.
430 */
431 if (res > tbl[0].resist)
432 i = 0;
433 else if (res <= tbl[tbl_size - 1].resist)
434 i = tbl_size - 2;
435 else {
436 i = 0;
437 while (!(res <= tbl[i].resist &&
438 res > tbl[i + 1].resist))
439 i++;
440 }
441
442 temp = tbl[i].temp + ((tbl[i + 1].temp - tbl[i].temp) *
443 (res - tbl[i].resist)) / (tbl[i + 1].resist - tbl[i].resist);
444 return temp;
445}
446
447/**
448 * ab8500_btemp_measure_temp() - measure battery temperature
449 * @di: pointer to the ab8500_btemp structure
450 *
451 * Returns battery temperature (on success) else the previous temperature
452 */
453static int ab8500_btemp_measure_temp(struct ab8500_btemp *di)
454{
455 int temp;
456 static int prev;
457 int rbat, rntc, vntc;
458 u8 id;
459
460 id = di->bat->batt_id;
461
462 if (di->bat->adc_therm == ABx500_ADC_THERM_BATCTRL &&
463 id != BATTERY_UNKNOWN) {
464
465 rbat = ab8500_btemp_get_batctrl_res(di);
466 if (rbat < 0) {
467 dev_err(di->dev, "%s get batctrl res failed\n",
468 __func__);
469 /*
470 * Return out-of-range temperature so that
471 * charging is stopped
472 */
473 return BTEMP_THERMAL_LOW_LIMIT;
474 }
475
476 temp = ab8500_btemp_res_to_temp(di,
477 di->bat->bat_type[id].r_to_t_tbl,
478 di->bat->bat_type[id].n_temp_tbl_elements, rbat);
479 } else {
480 vntc = ab8500_gpadc_convert(di->gpadc, BTEMP_BALL);
481 if (vntc < 0) {
482 dev_err(di->dev,
483 "%s gpadc conversion failed,"
484 " using previous value\n", __func__);
485 return prev;
486 }
487 /*
488 * The PCB NTC is sourced from VTVOUT via a 230kOhm
489 * resistor.
490 */
491 rntc = 230000 * vntc / (VTVOUT_V - vntc);
492
493 temp = ab8500_btemp_res_to_temp(di,
494 di->bat->bat_type[id].r_to_t_tbl,
495 di->bat->bat_type[id].n_temp_tbl_elements, rntc);
496 prev = temp;
497 }
498 dev_dbg(di->dev, "Battery temperature is %d\n", temp);
499 return temp;
500}
501
502/**
503 * ab8500_btemp_id() - Identify the connected battery
504 * @di: pointer to the ab8500_btemp structure
505 *
506 * This function will try to identify the battery by reading the ID
507 * resistor. Some brands use a combined ID resistor with a NTC resistor to
508 * both be able to identify and to read the temperature of it.
509 */
510static int ab8500_btemp_id(struct ab8500_btemp *di)
511{
512 int res;
513 u8 i;
514
515 di->curr_source = BTEMP_BATCTRL_CURR_SRC_7UA;
516 di->bat->batt_id = BATTERY_UNKNOWN;
517
518 res = ab8500_btemp_get_batctrl_res(di);
519 if (res < 0) {
520 dev_err(di->dev, "%s get batctrl res failed\n", __func__);
521 return -ENXIO;
522 }
523
524 /* BATTERY_UNKNOWN is defined on position 0, skip it! */
525 for (i = BATTERY_UNKNOWN + 1; i < di->bat->n_btypes; i++) {
526 if ((res <= di->bat->bat_type[i].resis_high) &&
527 (res >= di->bat->bat_type[i].resis_low)) {
528 dev_dbg(di->dev, "Battery detected on %s"
529 " low %d < res %d < high: %d"
530 " index: %d\n",
531 di->bat->adc_therm == ABx500_ADC_THERM_BATCTRL ?
532 "BATCTRL" : "BATTEMP",
533 di->bat->bat_type[i].resis_low, res,
534 di->bat->bat_type[i].resis_high, i);
535
536 di->bat->batt_id = i;
537 break;
538 }
539 }
540
541 if (di->bat->batt_id == BATTERY_UNKNOWN) {
542 dev_warn(di->dev, "Battery identified as unknown"
543 ", resistance %d Ohm\n", res);
544 return -ENXIO;
545 }
546
547 /*
548 * We only have to change current source if the
549 * detected type is Type 1, else we use the 7uA source
550 */
551 if (di->bat->adc_therm == ABx500_ADC_THERM_BATCTRL &&
552 di->bat->batt_id == 1) {
553 dev_dbg(di->dev, "Set BATCTRL current source to 20uA\n");
554 di->curr_source = BTEMP_BATCTRL_CURR_SRC_20UA;
555 }
556
557 return di->bat->batt_id;
558}
559
560/**
561 * ab8500_btemp_periodic_work() - Measuring the temperature periodically
562 * @work: pointer to the work_struct structure
563 *
564 * Work function for measuring the temperature periodically
565 */
566static void ab8500_btemp_periodic_work(struct work_struct *work)
567{
568 int interval;
569 struct ab8500_btemp *di = container_of(work,
570 struct ab8500_btemp, btemp_periodic_work.work);
571
572 di->bat_temp = ab8500_btemp_measure_temp(di);
573
574 if (di->bat_temp != di->prev_bat_temp) {
575 di->prev_bat_temp = di->bat_temp;
576 power_supply_changed(&di->btemp_psy);
577 }
578
579 if (di->events.ac_conn || di->events.usb_conn)
580 interval = di->bat->temp_interval_chg;
581 else
582 interval = di->bat->temp_interval_nochg;
583
584 /* Schedule a new measurement */
585 queue_delayed_work(di->btemp_wq,
586 &di->btemp_periodic_work,
587 round_jiffies(interval * HZ));
588}
589
590/**
591 * ab8500_btemp_batctrlindb_handler() - battery removal detected
592 * @irq: interrupt number
593 * @_di: void pointer that has to address of ab8500_btemp
594 *
595 * Returns IRQ status(IRQ_HANDLED)
596 */
597static irqreturn_t ab8500_btemp_batctrlindb_handler(int irq, void *_di)
598{
599 struct ab8500_btemp *di = _di;
600 dev_err(di->dev, "Battery removal detected!\n");
601
602 di->events.batt_rem = true;
603 power_supply_changed(&di->btemp_psy);
604
605 return IRQ_HANDLED;
606}
607
608/**
609 * ab8500_btemp_templow_handler() - battery temp lower than 10 degrees
610 * @irq: interrupt number
611 * @_di: void pointer that has to address of ab8500_btemp
612 *
613 * Returns IRQ status(IRQ_HANDLED)
614 */
615static irqreturn_t ab8500_btemp_templow_handler(int irq, void *_di)
616{
617 struct ab8500_btemp *di = _di;
618
619 if (is_ab8500_2p0_or_earlier(di->parent)) {
620 dev_dbg(di->dev, "Ignore false btemp low irq"
621 " for ABB cut 1.0, 1.1 and 2.0\n");
622 } else {
623 dev_crit(di->dev, "Battery temperature lower than -10deg c\n");
624
625 di->events.btemp_low = true;
626 di->events.btemp_high = false;
627 di->events.btemp_medhigh = false;
628 di->events.btemp_lowmed = false;
629 power_supply_changed(&di->btemp_psy);
630 }
631
632 return IRQ_HANDLED;
633}
634
635/**
636 * ab8500_btemp_temphigh_handler() - battery temp higher than max temp
637 * @irq: interrupt number
638 * @_di: void pointer that has to address of ab8500_btemp
639 *
640 * Returns IRQ status(IRQ_HANDLED)
641 */
642static irqreturn_t ab8500_btemp_temphigh_handler(int irq, void *_di)
643{
644 struct ab8500_btemp *di = _di;
645
646 dev_crit(di->dev, "Battery temperature is higher than MAX temp\n");
647
648 di->events.btemp_high = true;
649 di->events.btemp_medhigh = false;
650 di->events.btemp_lowmed = false;
651 di->events.btemp_low = false;
652 power_supply_changed(&di->btemp_psy);
653
654 return IRQ_HANDLED;
655}
656
657/**
658 * ab8500_btemp_lowmed_handler() - battery temp between low and medium
659 * @irq: interrupt number
660 * @_di: void pointer that has to address of ab8500_btemp
661 *
662 * Returns IRQ status(IRQ_HANDLED)
663 */
664static irqreturn_t ab8500_btemp_lowmed_handler(int irq, void *_di)
665{
666 struct ab8500_btemp *di = _di;
667
668 dev_dbg(di->dev, "Battery temperature is between low and medium\n");
669
670 di->events.btemp_lowmed = true;
671 di->events.btemp_medhigh = false;
672 di->events.btemp_high = false;
673 di->events.btemp_low = false;
674 power_supply_changed(&di->btemp_psy);
675
676 return IRQ_HANDLED;
677}
678
679/**
680 * ab8500_btemp_medhigh_handler() - battery temp between medium and high
681 * @irq: interrupt number
682 * @_di: void pointer that has to address of ab8500_btemp
683 *
684 * Returns IRQ status(IRQ_HANDLED)
685 */
686static irqreturn_t ab8500_btemp_medhigh_handler(int irq, void *_di)
687{
688 struct ab8500_btemp *di = _di;
689
690 dev_dbg(di->dev, "Battery temperature is between medium and high\n");
691
692 di->events.btemp_medhigh = true;
693 di->events.btemp_lowmed = false;
694 di->events.btemp_high = false;
695 di->events.btemp_low = false;
696 power_supply_changed(&di->btemp_psy);
697
698 return IRQ_HANDLED;
699}
700
701/**
702 * ab8500_btemp_periodic() - Periodic temperature measurements
703 * @di: pointer to the ab8500_btemp structure
704 * @enable: enable or disable periodic temperature measurements
705 *
706 * Starts of stops periodic temperature measurements. Periodic measurements
707 * should only be done when a charger is connected.
708 */
709static void ab8500_btemp_periodic(struct ab8500_btemp *di,
710 bool enable)
711{
712 dev_dbg(di->dev, "Enable periodic temperature measurements: %d\n",
713 enable);
714 /*
715 * Make sure a new measurement is done directly by cancelling
716 * any pending work
717 */
718 cancel_delayed_work_sync(&di->btemp_periodic_work);
719
720 if (enable)
721 queue_delayed_work(di->btemp_wq, &di->btemp_periodic_work, 0);
722}
723
724/**
725 * ab8500_btemp_get_temp() - get battery temperature
726 * @di: pointer to the ab8500_btemp structure
727 *
728 * Returns battery temperature
729 */
730static int ab8500_btemp_get_temp(struct ab8500_btemp *di)
731{
732 int temp = 0;
733
734 /*
735 * The BTEMP events are not reliabe on AB8500 cut2.0
736 * and prior versions
737 */
738 if (is_ab8500_2p0_or_earlier(di->parent)) {
739 temp = di->bat_temp * 10;
740 } else {
741 if (di->events.btemp_low) {
742 if (temp > di->btemp_ranges.btemp_low_limit)
743 temp = di->btemp_ranges.btemp_low_limit;
744 else
745 temp = di->bat_temp * 10;
746 } else if (di->events.btemp_high) {
747 if (temp < di->btemp_ranges.btemp_high_limit)
748 temp = di->btemp_ranges.btemp_high_limit;
749 else
750 temp = di->bat_temp * 10;
751 } else if (di->events.btemp_lowmed) {
752 if (temp > di->btemp_ranges.btemp_med_limit)
753 temp = di->btemp_ranges.btemp_med_limit;
754 else
755 temp = di->bat_temp * 10;
756 } else if (di->events.btemp_medhigh) {
757 if (temp < di->btemp_ranges.btemp_med_limit)
758 temp = di->btemp_ranges.btemp_med_limit;
759 else
760 temp = di->bat_temp * 10;
761 } else
762 temp = di->bat_temp * 10;
763 }
764 return temp;
765}
766
767/**
768 * ab8500_btemp_get_batctrl_temp() - get the temperature
769 * @btemp: pointer to the btemp structure
770 *
771 * Returns the batctrl temperature in millidegrees
772 */
773int ab8500_btemp_get_batctrl_temp(struct ab8500_btemp *btemp)
774{
775 return btemp->bat_temp * 1000;
776}
777
778/**
779 * ab8500_btemp_get_property() - get the btemp properties
780 * @psy: pointer to the power_supply structure
781 * @psp: pointer to the power_supply_property structure
782 * @val: pointer to the power_supply_propval union
783 *
784 * This function gets called when an application tries to get the btemp
785 * properties by reading the sysfs files.
786 * online: presence of the battery
787 * present: presence of the battery
788 * technology: battery technology
789 * temp: battery temperature
790 * Returns error code in case of failure else 0(on success)
791 */
792static int ab8500_btemp_get_property(struct power_supply *psy,
793 enum power_supply_property psp,
794 union power_supply_propval *val)
795{
796 struct ab8500_btemp *di;
797
798 di = to_ab8500_btemp_device_info(psy);
799
800 switch (psp) {
801 case POWER_SUPPLY_PROP_PRESENT:
802 case POWER_SUPPLY_PROP_ONLINE:
803 if (di->events.batt_rem)
804 val->intval = 0;
805 else
806 val->intval = 1;
807 break;
808 case POWER_SUPPLY_PROP_TECHNOLOGY:
809 val->intval = di->bat->bat_type[di->bat->batt_id].name;
810 break;
811 case POWER_SUPPLY_PROP_TEMP:
812 val->intval = ab8500_btemp_get_temp(di);
813 break;
814 default:
815 return -EINVAL;
816 }
817 return 0;
818}
819
820static int ab8500_btemp_get_ext_psy_data(struct device *dev, void *data)
821{
822 struct power_supply *psy;
823 struct power_supply *ext;
824 struct ab8500_btemp *di;
825 union power_supply_propval ret;
826 int i, j;
827 bool psy_found = false;
828
829 psy = (struct power_supply *)data;
830 ext = dev_get_drvdata(dev);
831 di = to_ab8500_btemp_device_info(psy);
832
833 /*
834 * For all psy where the name of your driver
835 * appears in any supplied_to
836 */
837 for (i = 0; i < ext->num_supplicants; i++) {
838 if (!strcmp(ext->supplied_to[i], psy->name))
839 psy_found = true;
840 }
841
842 if (!psy_found)
843 return 0;
844
845 /* Go through all properties for the psy */
846 for (j = 0; j < ext->num_properties; j++) {
847 enum power_supply_property prop;
848 prop = ext->properties[j];
849
850 if (ext->get_property(ext, prop, &ret))
851 continue;
852
853 switch (prop) {
854 case POWER_SUPPLY_PROP_PRESENT:
855 switch (ext->type) {
856 case POWER_SUPPLY_TYPE_MAINS:
857 /* AC disconnected */
858 if (!ret.intval && di->events.ac_conn) {
859 di->events.ac_conn = false;
860 }
861 /* AC connected */
862 else if (ret.intval && !di->events.ac_conn) {
863 di->events.ac_conn = true;
864 if (!di->events.usb_conn)
865 ab8500_btemp_periodic(di, true);
866 }
867 break;
868 case POWER_SUPPLY_TYPE_USB:
869 /* USB disconnected */
870 if (!ret.intval && di->events.usb_conn) {
871 di->events.usb_conn = false;
872 }
873 /* USB connected */
874 else if (ret.intval && !di->events.usb_conn) {
875 di->events.usb_conn = true;
876 if (!di->events.ac_conn)
877 ab8500_btemp_periodic(di, true);
878 }
879 break;
880 default:
881 break;
882 }
883 break;
884 default:
885 break;
886 }
887 }
888 return 0;
889}
890
891/**
892 * ab8500_btemp_external_power_changed() - callback for power supply changes
893 * @psy: pointer to the structure power_supply
894 *
895 * This function is pointing to the function pointer external_power_changed
896 * of the structure power_supply.
897 * This function gets executed when there is a change in the external power
898 * supply to the btemp.
899 */
900static void ab8500_btemp_external_power_changed(struct power_supply *psy)
901{
902 struct ab8500_btemp *di = to_ab8500_btemp_device_info(psy);
903
904 class_for_each_device(power_supply_class, NULL,
905 &di->btemp_psy, ab8500_btemp_get_ext_psy_data);
906}
907
908/* ab8500 btemp driver interrupts and their respective isr */
909static struct ab8500_btemp_interrupts ab8500_btemp_irq[] = {
910 {"BAT_CTRL_INDB", ab8500_btemp_batctrlindb_handler},
911 {"BTEMP_LOW", ab8500_btemp_templow_handler},
912 {"BTEMP_HIGH", ab8500_btemp_temphigh_handler},
913 {"BTEMP_LOW_MEDIUM", ab8500_btemp_lowmed_handler},
914 {"BTEMP_MEDIUM_HIGH", ab8500_btemp_medhigh_handler},
915};
916
917#if defined(CONFIG_PM)
918static int ab8500_btemp_resume(struct platform_device *pdev)
919{
920 struct ab8500_btemp *di = platform_get_drvdata(pdev);
921
922 ab8500_btemp_periodic(di, true);
923
924 return 0;
925}
926
927static int ab8500_btemp_suspend(struct platform_device *pdev,
928 pm_message_t state)
929{
930 struct ab8500_btemp *di = platform_get_drvdata(pdev);
931
932 ab8500_btemp_periodic(di, false);
933
934 return 0;
935}
936#else
937#define ab8500_btemp_suspend NULL
938#define ab8500_btemp_resume NULL
939#endif
940
941static int __devexit ab8500_btemp_remove(struct platform_device *pdev)
942{
943 struct ab8500_btemp *di = platform_get_drvdata(pdev);
944 int i, irq;
945
946 /* Disable interrupts */
947 for (i = 0; i < ARRAY_SIZE(ab8500_btemp_irq); i++) {
948 irq = platform_get_irq_byname(pdev, ab8500_btemp_irq[i].name);
949 free_irq(irq, di);
950 }
951
952 /* Delete the work queue */
953 destroy_workqueue(di->btemp_wq);
954
955 flush_scheduled_work();
956 power_supply_unregister(&di->btemp_psy);
957 platform_set_drvdata(pdev, NULL);
958 kfree(di);
959
960 return 0;
961}
962
963static int __devinit ab8500_btemp_probe(struct platform_device *pdev)
964{
965 int irq, i, ret = 0;
966 u8 val;
967 struct abx500_bm_plat_data *plat_data;
968
969 struct ab8500_btemp *di =
970 kzalloc(sizeof(struct ab8500_btemp), GFP_KERNEL);
971 if (!di)
972 return -ENOMEM;
973
974 /* get parent data */
975 di->dev = &pdev->dev;
976 di->parent = dev_get_drvdata(pdev->dev.parent);
977 di->gpadc = ab8500_gpadc_get("ab8500-gpadc.0");
978
979 /* get btemp specific platform data */
980 plat_data = pdev->dev.platform_data;
981 di->pdata = plat_data->btemp;
982 if (!di->pdata) {
983 dev_err(di->dev, "no btemp platform data supplied\n");
984 ret = -EINVAL;
985 goto free_device_info;
986 }
987
988 /* get battery specific platform data */
989 di->bat = plat_data->battery;
990 if (!di->bat) {
991 dev_err(di->dev, "no battery platform data supplied\n");
992 ret = -EINVAL;
993 goto free_device_info;
994 }
995
996 /* BTEMP supply */
997 di->btemp_psy.name = "ab8500_btemp";
998 di->btemp_psy.type = POWER_SUPPLY_TYPE_BATTERY;
999 di->btemp_psy.properties = ab8500_btemp_props;
1000 di->btemp_psy.num_properties = ARRAY_SIZE(ab8500_btemp_props);
1001 di->btemp_psy.get_property = ab8500_btemp_get_property;
1002 di->btemp_psy.supplied_to = di->pdata->supplied_to;
1003 di->btemp_psy.num_supplicants = di->pdata->num_supplicants;
1004 di->btemp_psy.external_power_changed =
1005 ab8500_btemp_external_power_changed;
1006
1007
1008 /* Create a work queue for the btemp */
1009 di->btemp_wq =
1010 create_singlethread_workqueue("ab8500_btemp_wq");
1011 if (di->btemp_wq == NULL) {
1012 dev_err(di->dev, "failed to create work queue\n");
1013 goto free_device_info;
1014 }
1015
1016 /* Init work for measuring temperature periodically */
1017 INIT_DELAYED_WORK_DEFERRABLE(&di->btemp_periodic_work,
1018 ab8500_btemp_periodic_work);
1019
1020 /* Identify the battery */
1021 if (ab8500_btemp_id(di) < 0)
1022 dev_warn(di->dev, "failed to identify the battery\n");
1023
1024 /* Set BTEMP thermal limits. Low and Med are fixed */
1025 di->btemp_ranges.btemp_low_limit = BTEMP_THERMAL_LOW_LIMIT;
1026 di->btemp_ranges.btemp_med_limit = BTEMP_THERMAL_MED_LIMIT;
1027
1028 ret = abx500_get_register_interruptible(di->dev, AB8500_CHARGER,
1029 AB8500_BTEMP_HIGH_TH, &val);
1030 if (ret < 0) {
1031 dev_err(di->dev, "%s ab8500 read failed\n", __func__);
1032 goto free_btemp_wq;
1033 }
1034 switch (val) {
1035 case BTEMP_HIGH_TH_57_0:
1036 case BTEMP_HIGH_TH_57_1:
1037 di->btemp_ranges.btemp_high_limit =
1038 BTEMP_THERMAL_HIGH_LIMIT_57;
1039 break;
1040 case BTEMP_HIGH_TH_52:
1041 di->btemp_ranges.btemp_high_limit =
1042 BTEMP_THERMAL_HIGH_LIMIT_52;
1043 break;
1044 case BTEMP_HIGH_TH_62:
1045 di->btemp_ranges.btemp_high_limit =
1046 BTEMP_THERMAL_HIGH_LIMIT_62;
1047 break;
1048 }
1049
1050 /* Register BTEMP power supply class */
1051 ret = power_supply_register(di->dev, &di->btemp_psy);
1052 if (ret) {
1053 dev_err(di->dev, "failed to register BTEMP psy\n");
1054 goto free_btemp_wq;
1055 }
1056
1057 /* Register interrupts */
1058 for (i = 0; i < ARRAY_SIZE(ab8500_btemp_irq); i++) {
1059 irq = platform_get_irq_byname(pdev, ab8500_btemp_irq[i].name);
1060 ret = request_threaded_irq(irq, NULL, ab8500_btemp_irq[i].isr,
1061 IRQF_SHARED | IRQF_NO_SUSPEND,
1062 ab8500_btemp_irq[i].name, di);
1063
1064 if (ret) {
1065 dev_err(di->dev, "failed to request %s IRQ %d: %d\n"
1066 , ab8500_btemp_irq[i].name, irq, ret);
1067 goto free_irq;
1068 }
1069 dev_dbg(di->dev, "Requested %s IRQ %d: %d\n",
1070 ab8500_btemp_irq[i].name, irq, ret);
1071 }
1072
1073 platform_set_drvdata(pdev, di);
1074
1075 /* Kick off periodic temperature measurements */
1076 ab8500_btemp_periodic(di, true);
1077 list_add_tail(&di->node, &ab8500_btemp_list);
1078
1079 return ret;
1080
1081free_irq:
1082 power_supply_unregister(&di->btemp_psy);
1083
1084 /* We also have to free all successfully registered irqs */
1085 for (i = i - 1; i >= 0; i--) {
1086 irq = platform_get_irq_byname(pdev, ab8500_btemp_irq[i].name);
1087 free_irq(irq, di);
1088 }
1089free_btemp_wq:
1090 destroy_workqueue(di->btemp_wq);
1091free_device_info:
1092 kfree(di);
1093
1094 return ret;
1095}
1096
1097static struct platform_driver ab8500_btemp_driver = {
1098 .probe = ab8500_btemp_probe,
1099 .remove = __devexit_p(ab8500_btemp_remove),
1100 .suspend = ab8500_btemp_suspend,
1101 .resume = ab8500_btemp_resume,
1102 .driver = {
1103 .name = "ab8500-btemp",
1104 .owner = THIS_MODULE,
1105 },
1106};
1107
1108static int __init ab8500_btemp_init(void)
1109{
1110 return platform_driver_register(&ab8500_btemp_driver);
1111}
1112
1113static void __exit ab8500_btemp_exit(void)
1114{
1115 platform_driver_unregister(&ab8500_btemp_driver);
1116}
1117
1118subsys_initcall_sync(ab8500_btemp_init);
1119module_exit(ab8500_btemp_exit);
1120
1121MODULE_LICENSE("GPL v2");
1122MODULE_AUTHOR("Johan Palsson, Karl Komierowski, Arun R Murthy");
1123MODULE_ALIAS("platform:ab8500-btemp");
1124MODULE_DESCRIPTION("AB8500 battery temperature driver");
diff --git a/drivers/power/ab8500_charger.c b/drivers/power/ab8500_charger.c
new file mode 100644
index 000000000000..e2b4accbec88
--- /dev/null
+++ b/drivers/power/ab8500_charger.c
@@ -0,0 +1,2789 @@
1/*
2 * Copyright (C) ST-Ericsson SA 2012
3 *
4 * Charger driver for AB8500
5 *
6 * License Terms: GNU General Public License v2
7 * Author:
8 * Johan Palsson <johan.palsson@stericsson.com>
9 * Karl Komierowski <karl.komierowski@stericsson.com>
10 * Arun R Murthy <arun.murthy@stericsson.com>
11 */
12
13#include <linux/init.h>
14#include <linux/module.h>
15#include <linux/device.h>
16#include <linux/interrupt.h>
17#include <linux/delay.h>
18#include <linux/slab.h>
19#include <linux/platform_device.h>
20#include <linux/power_supply.h>
21#include <linux/completion.h>
22#include <linux/regulator/consumer.h>
23#include <linux/err.h>
24#include <linux/workqueue.h>
25#include <linux/kobject.h>
26#include <linux/mfd/abx500/ab8500.h>
27#include <linux/mfd/abx500.h>
28#include <linux/mfd/abx500/ab8500-bm.h>
29#include <linux/mfd/abx500/ab8500-gpadc.h>
30#include <linux/mfd/abx500/ux500_chargalg.h>
31#include <linux/usb/otg.h>
32
33/* Charger constants */
34#define NO_PW_CONN 0
35#define AC_PW_CONN 1
36#define USB_PW_CONN 2
37
38#define MAIN_WDOG_ENA 0x01
39#define MAIN_WDOG_KICK 0x02
40#define MAIN_WDOG_DIS 0x00
41#define CHARG_WD_KICK 0x01
42#define MAIN_CH_ENA 0x01
43#define MAIN_CH_NO_OVERSHOOT_ENA_N 0x02
44#define USB_CH_ENA 0x01
45#define USB_CHG_NO_OVERSHOOT_ENA_N 0x02
46#define MAIN_CH_DET 0x01
47#define MAIN_CH_CV_ON 0x04
48#define USB_CH_CV_ON 0x08
49#define VBUS_DET_DBNC100 0x02
50#define VBUS_DET_DBNC1 0x01
51#define OTP_ENABLE_WD 0x01
52
53#define MAIN_CH_INPUT_CURR_SHIFT 4
54#define VBUS_IN_CURR_LIM_SHIFT 4
55
56#define LED_INDICATOR_PWM_ENA 0x01
57#define LED_INDICATOR_PWM_DIS 0x00
58#define LED_IND_CUR_5MA 0x04
59#define LED_INDICATOR_PWM_DUTY_252_256 0xBF
60
61/* HW failure constants */
62#define MAIN_CH_TH_PROT 0x02
63#define VBUS_CH_NOK 0x08
64#define USB_CH_TH_PROT 0x02
65#define VBUS_OVV_TH 0x01
66#define MAIN_CH_NOK 0x01
67#define VBUS_DET 0x80
68
69/* UsbLineStatus register bit masks */
70#define AB8500_USB_LINK_STATUS 0x78
71#define AB8500_STD_HOST_SUSP 0x18
72
73/* Watchdog timeout constant */
74#define WD_TIMER 0x30 /* 4min */
75#define WD_KICK_INTERVAL (60 * HZ)
76
77/* Lowest charger voltage is 3.39V -> 0x4E */
78#define LOW_VOLT_REG 0x4E
79
80/* UsbLineStatus register - usb types */
81enum ab8500_charger_link_status {
82 USB_STAT_NOT_CONFIGURED,
83 USB_STAT_STD_HOST_NC,
84 USB_STAT_STD_HOST_C_NS,
85 USB_STAT_STD_HOST_C_S,
86 USB_STAT_HOST_CHG_NM,
87 USB_STAT_HOST_CHG_HS,
88 USB_STAT_HOST_CHG_HS_CHIRP,
89 USB_STAT_DEDICATED_CHG,
90 USB_STAT_ACA_RID_A,
91 USB_STAT_ACA_RID_B,
92 USB_STAT_ACA_RID_C_NM,
93 USB_STAT_ACA_RID_C_HS,
94 USB_STAT_ACA_RID_C_HS_CHIRP,
95 USB_STAT_HM_IDGND,
96 USB_STAT_RESERVED,
97 USB_STAT_NOT_VALID_LINK,
98};
99
100enum ab8500_usb_state {
101 AB8500_BM_USB_STATE_RESET_HS, /* HighSpeed Reset */
102 AB8500_BM_USB_STATE_RESET_FS, /* FullSpeed/LowSpeed Reset */
103 AB8500_BM_USB_STATE_CONFIGURED,
104 AB8500_BM_USB_STATE_SUSPEND,
105 AB8500_BM_USB_STATE_RESUME,
106 AB8500_BM_USB_STATE_MAX,
107};
108
109/* VBUS input current limits supported in AB8500 in mA */
110#define USB_CH_IP_CUR_LVL_0P05 50
111#define USB_CH_IP_CUR_LVL_0P09 98
112#define USB_CH_IP_CUR_LVL_0P19 193
113#define USB_CH_IP_CUR_LVL_0P29 290
114#define USB_CH_IP_CUR_LVL_0P38 380
115#define USB_CH_IP_CUR_LVL_0P45 450
116#define USB_CH_IP_CUR_LVL_0P5 500
117#define USB_CH_IP_CUR_LVL_0P6 600
118#define USB_CH_IP_CUR_LVL_0P7 700
119#define USB_CH_IP_CUR_LVL_0P8 800
120#define USB_CH_IP_CUR_LVL_0P9 900
121#define USB_CH_IP_CUR_LVL_1P0 1000
122#define USB_CH_IP_CUR_LVL_1P1 1100
123#define USB_CH_IP_CUR_LVL_1P3 1300
124#define USB_CH_IP_CUR_LVL_1P4 1400
125#define USB_CH_IP_CUR_LVL_1P5 1500
126
127#define VBAT_TRESH_IP_CUR_RED 3800
128
129#define to_ab8500_charger_usb_device_info(x) container_of((x), \
130 struct ab8500_charger, usb_chg)
131#define to_ab8500_charger_ac_device_info(x) container_of((x), \
132 struct ab8500_charger, ac_chg)
133
134/**
135 * struct ab8500_charger_interrupts - ab8500 interupts
136 * @name: name of the interrupt
137 * @isr function pointer to the isr
138 */
139struct ab8500_charger_interrupts {
140 char *name;
141 irqreturn_t (*isr)(int irq, void *data);
142};
143
144struct ab8500_charger_info {
145 int charger_connected;
146 int charger_online;
147 int charger_voltage;
148 int cv_active;
149 bool wd_expired;
150};
151
152struct ab8500_charger_event_flags {
153 bool mainextchnotok;
154 bool main_thermal_prot;
155 bool usb_thermal_prot;
156 bool vbus_ovv;
157 bool usbchargernotok;
158 bool chgwdexp;
159 bool vbus_collapse;
160};
161
162struct ab8500_charger_usb_state {
163 bool usb_changed;
164 int usb_current;
165 enum ab8500_usb_state state;
166 spinlock_t usb_lock;
167};
168
169/**
170 * struct ab8500_charger - ab8500 Charger device information
171 * @dev: Pointer to the structure device
172 * @max_usb_in_curr: Max USB charger input current
173 * @vbus_detected: VBUS detected
174 * @vbus_detected_start:
175 * VBUS detected during startup
176 * @ac_conn: This will be true when the AC charger has been plugged
177 * @vddadc_en_ac: Indicate if VDD ADC supply is enabled because AC
178 * charger is enabled
179 * @vddadc_en_usb: Indicate if VDD ADC supply is enabled because USB
180 * charger is enabled
181 * @vbat Battery voltage
182 * @old_vbat Previously measured battery voltage
183 * @autopower Indicate if we should have automatic pwron after pwrloss
184 * @parent: Pointer to the struct ab8500
185 * @gpadc: Pointer to the struct gpadc
186 * @pdata: Pointer to the abx500_charger platform data
187 * @bat: Pointer to the abx500_bm platform data
188 * @flags: Structure for information about events triggered
189 * @usb_state: Structure for usb stack information
190 * @ac_chg: AC charger power supply
191 * @usb_chg: USB charger power supply
192 * @ac: Structure that holds the AC charger properties
193 * @usb: Structure that holds the USB charger properties
194 * @regu: Pointer to the struct regulator
195 * @charger_wq: Work queue for the IRQs and checking HW state
196 * @check_vbat_work Work for checking vbat threshold to adjust vbus current
197 * @check_hw_failure_work: Work for checking HW state
198 * @check_usbchgnotok_work: Work for checking USB charger not ok status
199 * @kick_wd_work: Work for kicking the charger watchdog in case
200 * of ABB rev 1.* due to the watchog logic bug
201 * @ac_work: Work for checking AC charger connection
202 * @detect_usb_type_work: Work for detecting the USB type connected
203 * @usb_link_status_work: Work for checking the new USB link status
204 * @usb_state_changed_work: Work for checking USB state
205 * @check_main_thermal_prot_work:
206 * Work for checking Main thermal status
207 * @check_usb_thermal_prot_work:
208 * Work for checking USB thermal status
209 */
210struct ab8500_charger {
211 struct device *dev;
212 int max_usb_in_curr;
213 bool vbus_detected;
214 bool vbus_detected_start;
215 bool ac_conn;
216 bool vddadc_en_ac;
217 bool vddadc_en_usb;
218 int vbat;
219 int old_vbat;
220 bool autopower;
221 struct ab8500 *parent;
222 struct ab8500_gpadc *gpadc;
223 struct abx500_charger_platform_data *pdata;
224 struct abx500_bm_data *bat;
225 struct ab8500_charger_event_flags flags;
226 struct ab8500_charger_usb_state usb_state;
227 struct ux500_charger ac_chg;
228 struct ux500_charger usb_chg;
229 struct ab8500_charger_info ac;
230 struct ab8500_charger_info usb;
231 struct regulator *regu;
232 struct workqueue_struct *charger_wq;
233 struct delayed_work check_vbat_work;
234 struct delayed_work check_hw_failure_work;
235 struct delayed_work check_usbchgnotok_work;
236 struct delayed_work kick_wd_work;
237 struct work_struct ac_work;
238 struct work_struct detect_usb_type_work;
239 struct work_struct usb_link_status_work;
240 struct work_struct usb_state_changed_work;
241 struct work_struct check_main_thermal_prot_work;
242 struct work_struct check_usb_thermal_prot_work;
243 struct usb_phy *usb_phy;
244 struct notifier_block nb;
245};
246
247/* AC properties */
248static enum power_supply_property ab8500_charger_ac_props[] = {
249 POWER_SUPPLY_PROP_HEALTH,
250 POWER_SUPPLY_PROP_PRESENT,
251 POWER_SUPPLY_PROP_ONLINE,
252 POWER_SUPPLY_PROP_VOLTAGE_NOW,
253 POWER_SUPPLY_PROP_VOLTAGE_AVG,
254 POWER_SUPPLY_PROP_CURRENT_NOW,
255};
256
257/* USB properties */
258static enum power_supply_property ab8500_charger_usb_props[] = {
259 POWER_SUPPLY_PROP_HEALTH,
260 POWER_SUPPLY_PROP_CURRENT_AVG,
261 POWER_SUPPLY_PROP_PRESENT,
262 POWER_SUPPLY_PROP_ONLINE,
263 POWER_SUPPLY_PROP_VOLTAGE_NOW,
264 POWER_SUPPLY_PROP_VOLTAGE_AVG,
265 POWER_SUPPLY_PROP_CURRENT_NOW,
266};
267
268/**
269 * ab8500_power_loss_handling - set how we handle powerloss.
270 * @di: pointer to the ab8500_charger structure
271 *
272 * Magic nummbers are from STE HW department.
273 */
274static void ab8500_power_loss_handling(struct ab8500_charger *di)
275{
276 u8 reg;
277 int ret;
278
279 dev_dbg(di->dev, "Autopower : %d\n", di->autopower);
280
281 /* read the autopower register */
282 ret = abx500_get_register_interruptible(di->dev, 0x15, 0x00, &reg);
283 if (ret) {
284 dev_err(di->dev, "%d write failed\n", __LINE__);
285 return;
286 }
287
288 /* enable the OPT emulation registers */
289 ret = abx500_set_register_interruptible(di->dev, 0x11, 0x00, 0x2);
290 if (ret) {
291 dev_err(di->dev, "%d write failed\n", __LINE__);
292 return;
293 }
294
295 if (di->autopower)
296 reg |= 0x8;
297 else
298 reg &= ~0x8;
299
300 /* write back the changed value to autopower reg */
301 ret = abx500_set_register_interruptible(di->dev, 0x15, 0x00, reg);
302 if (ret) {
303 dev_err(di->dev, "%d write failed\n", __LINE__);
304 return;
305 }
306
307 /* disable the set OTP registers again */
308 ret = abx500_set_register_interruptible(di->dev, 0x11, 0x00, 0x0);
309 if (ret) {
310 dev_err(di->dev, "%d write failed\n", __LINE__);
311 return;
312 }
313}
314
315/**
316 * ab8500_power_supply_changed - a wrapper with local extentions for
317 * power_supply_changed
318 * @di: pointer to the ab8500_charger structure
319 * @psy: pointer to power_supply_that have changed.
320 *
321 */
322static void ab8500_power_supply_changed(struct ab8500_charger *di,
323 struct power_supply *psy)
324{
325 if (di->pdata->autopower_cfg) {
326 if (!di->usb.charger_connected &&
327 !di->ac.charger_connected &&
328 di->autopower) {
329 di->autopower = false;
330 ab8500_power_loss_handling(di);
331 } else if (!di->autopower &&
332 (di->ac.charger_connected ||
333 di->usb.charger_connected)) {
334 di->autopower = true;
335 ab8500_power_loss_handling(di);
336 }
337 }
338 power_supply_changed(psy);
339}
340
341static void ab8500_charger_set_usb_connected(struct ab8500_charger *di,
342 bool connected)
343{
344 if (connected != di->usb.charger_connected) {
345 dev_dbg(di->dev, "USB connected:%i\n", connected);
346 di->usb.charger_connected = connected;
347 sysfs_notify(&di->usb_chg.psy.dev->kobj, NULL, "present");
348 }
349}
350
351/**
352 * ab8500_charger_get_ac_voltage() - get ac charger voltage
353 * @di: pointer to the ab8500_charger structure
354 *
355 * Returns ac charger voltage (on success)
356 */
357static int ab8500_charger_get_ac_voltage(struct ab8500_charger *di)
358{
359 int vch;
360
361 /* Only measure voltage if the charger is connected */
362 if (di->ac.charger_connected) {
363 vch = ab8500_gpadc_convert(di->gpadc, MAIN_CHARGER_V);
364 if (vch < 0)
365 dev_err(di->dev, "%s gpadc conv failed,\n", __func__);
366 } else {
367 vch = 0;
368 }
369 return vch;
370}
371
372/**
373 * ab8500_charger_ac_cv() - check if the main charger is in CV mode
374 * @di: pointer to the ab8500_charger structure
375 *
376 * Returns ac charger CV mode (on success) else error code
377 */
378static int ab8500_charger_ac_cv(struct ab8500_charger *di)
379{
380 u8 val;
381 int ret = 0;
382
383 /* Only check CV mode if the charger is online */
384 if (di->ac.charger_online) {
385 ret = abx500_get_register_interruptible(di->dev, AB8500_CHARGER,
386 AB8500_CH_STATUS1_REG, &val);
387 if (ret < 0) {
388 dev_err(di->dev, "%s ab8500 read failed\n", __func__);
389 return 0;
390 }
391
392 if (val & MAIN_CH_CV_ON)
393 ret = 1;
394 else
395 ret = 0;
396 }
397
398 return ret;
399}
400
401/**
402 * ab8500_charger_get_vbus_voltage() - get vbus voltage
403 * @di: pointer to the ab8500_charger structure
404 *
405 * This function returns the vbus voltage.
406 * Returns vbus voltage (on success)
407 */
408static int ab8500_charger_get_vbus_voltage(struct ab8500_charger *di)
409{
410 int vch;
411
412 /* Only measure voltage if the charger is connected */
413 if (di->usb.charger_connected) {
414 vch = ab8500_gpadc_convert(di->gpadc, VBUS_V);
415 if (vch < 0)
416 dev_err(di->dev, "%s gpadc conv failed\n", __func__);
417 } else {
418 vch = 0;
419 }
420 return vch;
421}
422
423/**
424 * ab8500_charger_get_usb_current() - get usb charger current
425 * @di: pointer to the ab8500_charger structure
426 *
427 * This function returns the usb charger current.
428 * Returns usb current (on success) and error code on failure
429 */
430static int ab8500_charger_get_usb_current(struct ab8500_charger *di)
431{
432 int ich;
433
434 /* Only measure current if the charger is online */
435 if (di->usb.charger_online) {
436 ich = ab8500_gpadc_convert(di->gpadc, USB_CHARGER_C);
437 if (ich < 0)
438 dev_err(di->dev, "%s gpadc conv failed\n", __func__);
439 } else {
440 ich = 0;
441 }
442 return ich;
443}
444
445/**
446 * ab8500_charger_get_ac_current() - get ac charger current
447 * @di: pointer to the ab8500_charger structure
448 *
449 * This function returns the ac charger current.
450 * Returns ac current (on success) and error code on failure.
451 */
452static int ab8500_charger_get_ac_current(struct ab8500_charger *di)
453{
454 int ich;
455
456 /* Only measure current if the charger is online */
457 if (di->ac.charger_online) {
458 ich = ab8500_gpadc_convert(di->gpadc, MAIN_CHARGER_C);
459 if (ich < 0)
460 dev_err(di->dev, "%s gpadc conv failed\n", __func__);
461 } else {
462 ich = 0;
463 }
464 return ich;
465}
466
467/**
468 * ab8500_charger_usb_cv() - check if the usb charger is in CV mode
469 * @di: pointer to the ab8500_charger structure
470 *
471 * Returns ac charger CV mode (on success) else error code
472 */
473static int ab8500_charger_usb_cv(struct ab8500_charger *di)
474{
475 int ret;
476 u8 val;
477
478 /* Only check CV mode if the charger is online */
479 if (di->usb.charger_online) {
480 ret = abx500_get_register_interruptible(di->dev, AB8500_CHARGER,
481 AB8500_CH_USBCH_STAT1_REG, &val);
482 if (ret < 0) {
483 dev_err(di->dev, "%s ab8500 read failed\n", __func__);
484 return 0;
485 }
486
487 if (val & USB_CH_CV_ON)
488 ret = 1;
489 else
490 ret = 0;
491 } else {
492 ret = 0;
493 }
494
495 return ret;
496}
497
498/**
499 * ab8500_charger_detect_chargers() - Detect the connected chargers
500 * @di: pointer to the ab8500_charger structure
501 *
502 * Returns the type of charger connected.
503 * For USB it will not mean we can actually charge from it
504 * but that there is a USB cable connected that we have to
505 * identify. This is used during startup when we don't get
506 * interrupts of the charger detection
507 *
508 * Returns an integer value, that means,
509 * NO_PW_CONN no power supply is connected
510 * AC_PW_CONN if the AC power supply is connected
511 * USB_PW_CONN if the USB power supply is connected
512 * AC_PW_CONN + USB_PW_CONN if USB and AC power supplies are both connected
513 */
514static int ab8500_charger_detect_chargers(struct ab8500_charger *di)
515{
516 int result = NO_PW_CONN;
517 int ret;
518 u8 val;
519
520 /* Check for AC charger */
521 ret = abx500_get_register_interruptible(di->dev, AB8500_CHARGER,
522 AB8500_CH_STATUS1_REG, &val);
523 if (ret < 0) {
524 dev_err(di->dev, "%s ab8500 read failed\n", __func__);
525 return ret;
526 }
527
528 if (val & MAIN_CH_DET)
529 result = AC_PW_CONN;
530
531 /* Check for USB charger */
532 ret = abx500_get_register_interruptible(di->dev, AB8500_CHARGER,
533 AB8500_CH_USBCH_STAT1_REG, &val);
534 if (ret < 0) {
535 dev_err(di->dev, "%s ab8500 read failed\n", __func__);
536 return ret;
537 }
538
539 if ((val & VBUS_DET_DBNC1) && (val & VBUS_DET_DBNC100))
540 result |= USB_PW_CONN;
541
542 return result;
543}
544
545/**
546 * ab8500_charger_max_usb_curr() - get the max curr for the USB type
547 * @di: pointer to the ab8500_charger structure
548 * @link_status: the identified USB type
549 *
550 * Get the maximum current that is allowed to be drawn from the host
551 * based on the USB type.
552 * Returns error code in case of failure else 0 on success
553 */
554static int ab8500_charger_max_usb_curr(struct ab8500_charger *di,
555 enum ab8500_charger_link_status link_status)
556{
557 int ret = 0;
558
559 switch (link_status) {
560 case USB_STAT_STD_HOST_NC:
561 case USB_STAT_STD_HOST_C_NS:
562 case USB_STAT_STD_HOST_C_S:
563 dev_dbg(di->dev, "USB Type - Standard host is "
564 "detected through USB driver\n");
565 di->max_usb_in_curr = USB_CH_IP_CUR_LVL_0P09;
566 break;
567 case USB_STAT_HOST_CHG_HS_CHIRP:
568 di->max_usb_in_curr = USB_CH_IP_CUR_LVL_0P5;
569 break;
570 case USB_STAT_HOST_CHG_HS:
571 case USB_STAT_ACA_RID_C_HS:
572 di->max_usb_in_curr = USB_CH_IP_CUR_LVL_0P9;
573 break;
574 case USB_STAT_ACA_RID_A:
575 /*
576 * Dedicated charger level minus maximum current accessory
577 * can consume (300mA). Closest level is 1100mA
578 */
579 di->max_usb_in_curr = USB_CH_IP_CUR_LVL_1P1;
580 break;
581 case USB_STAT_ACA_RID_B:
582 /*
583 * Dedicated charger level minus 120mA (20mA for ACA and
584 * 100mA for potential accessory). Closest level is 1300mA
585 */
586 di->max_usb_in_curr = USB_CH_IP_CUR_LVL_1P3;
587 break;
588 case USB_STAT_DEDICATED_CHG:
589 case USB_STAT_HOST_CHG_NM:
590 case USB_STAT_ACA_RID_C_HS_CHIRP:
591 case USB_STAT_ACA_RID_C_NM:
592 di->max_usb_in_curr = USB_CH_IP_CUR_LVL_1P5;
593 break;
594 case USB_STAT_RESERVED:
595 /*
596 * This state is used to indicate that VBUS has dropped below
597 * the detection level 4 times in a row. This is due to the
598 * charger output current is set to high making the charger
599 * voltage collapse. This have to be propagated through to
600 * chargalg. This is done using the property
601 * POWER_SUPPLY_PROP_CURRENT_AVG = 1
602 */
603 di->flags.vbus_collapse = true;
604 dev_dbg(di->dev, "USB Type - USB_STAT_RESERVED "
605 "VBUS has collapsed\n");
606 ret = -1;
607 break;
608 case USB_STAT_HM_IDGND:
609 case USB_STAT_NOT_CONFIGURED:
610 case USB_STAT_NOT_VALID_LINK:
611 dev_err(di->dev, "USB Type - Charging not allowed\n");
612 di->max_usb_in_curr = USB_CH_IP_CUR_LVL_0P05;
613 ret = -ENXIO;
614 break;
615 default:
616 dev_err(di->dev, "USB Type - Unknown\n");
617 di->max_usb_in_curr = USB_CH_IP_CUR_LVL_0P05;
618 ret = -ENXIO;
619 break;
620 };
621
622 dev_dbg(di->dev, "USB Type - 0x%02x MaxCurr: %d",
623 link_status, di->max_usb_in_curr);
624
625 return ret;
626}
627
628/**
629 * ab8500_charger_read_usb_type() - read the type of usb connected
630 * @di: pointer to the ab8500_charger structure
631 *
632 * Detect the type of the plugged USB
633 * Returns error code in case of failure else 0 on success
634 */
635static int ab8500_charger_read_usb_type(struct ab8500_charger *di)
636{
637 int ret;
638 u8 val;
639
640 ret = abx500_get_register_interruptible(di->dev,
641 AB8500_INTERRUPT, AB8500_IT_SOURCE21_REG, &val);
642 if (ret < 0) {
643 dev_err(di->dev, "%s ab8500 read failed\n", __func__);
644 return ret;
645 }
646 ret = abx500_get_register_interruptible(di->dev, AB8500_USB,
647 AB8500_USB_LINE_STAT_REG, &val);
648 if (ret < 0) {
649 dev_err(di->dev, "%s ab8500 read failed\n", __func__);
650 return ret;
651 }
652
653 /* get the USB type */
654 val = (val & AB8500_USB_LINK_STATUS) >> 3;
655 ret = ab8500_charger_max_usb_curr(di,
656 (enum ab8500_charger_link_status) val);
657
658 return ret;
659}
660
661/**
662 * ab8500_charger_detect_usb_type() - get the type of usb connected
663 * @di: pointer to the ab8500_charger structure
664 *
665 * Detect the type of the plugged USB
666 * Returns error code in case of failure else 0 on success
667 */
668static int ab8500_charger_detect_usb_type(struct ab8500_charger *di)
669{
670 int i, ret;
671 u8 val;
672
673 /*
674 * On getting the VBUS rising edge detect interrupt there
675 * is a 250ms delay after which the register UsbLineStatus
676 * is filled with valid data.
677 */
678 for (i = 0; i < 10; i++) {
679 msleep(250);
680 ret = abx500_get_register_interruptible(di->dev,
681 AB8500_INTERRUPT, AB8500_IT_SOURCE21_REG,
682 &val);
683 if (ret < 0) {
684 dev_err(di->dev, "%s ab8500 read failed\n", __func__);
685 return ret;
686 }
687 ret = abx500_get_register_interruptible(di->dev, AB8500_USB,
688 AB8500_USB_LINE_STAT_REG, &val);
689 if (ret < 0) {
690 dev_err(di->dev, "%s ab8500 read failed\n", __func__);
691 return ret;
692 }
693 /*
694 * Until the IT source register is read the UsbLineStatus
695 * register is not updated, hence doing the same
696 * Revisit this:
697 */
698
699 /* get the USB type */
700 val = (val & AB8500_USB_LINK_STATUS) >> 3;
701 if (val)
702 break;
703 }
704 ret = ab8500_charger_max_usb_curr(di,
705 (enum ab8500_charger_link_status) val);
706
707 return ret;
708}
709
710/*
711 * This array maps the raw hex value to charger voltage used by the AB8500
712 * Values taken from the UM0836
713 */
714static int ab8500_charger_voltage_map[] = {
715 3500 ,
716 3525 ,
717 3550 ,
718 3575 ,
719 3600 ,
720 3625 ,
721 3650 ,
722 3675 ,
723 3700 ,
724 3725 ,
725 3750 ,
726 3775 ,
727 3800 ,
728 3825 ,
729 3850 ,
730 3875 ,
731 3900 ,
732 3925 ,
733 3950 ,
734 3975 ,
735 4000 ,
736 4025 ,
737 4050 ,
738 4060 ,
739 4070 ,
740 4080 ,
741 4090 ,
742 4100 ,
743 4110 ,
744 4120 ,
745 4130 ,
746 4140 ,
747 4150 ,
748 4160 ,
749 4170 ,
750 4180 ,
751 4190 ,
752 4200 ,
753 4210 ,
754 4220 ,
755 4230 ,
756 4240 ,
757 4250 ,
758 4260 ,
759 4270 ,
760 4280 ,
761 4290 ,
762 4300 ,
763 4310 ,
764 4320 ,
765 4330 ,
766 4340 ,
767 4350 ,
768 4360 ,
769 4370 ,
770 4380 ,
771 4390 ,
772 4400 ,
773 4410 ,
774 4420 ,
775 4430 ,
776 4440 ,
777 4450 ,
778 4460 ,
779 4470 ,
780 4480 ,
781 4490 ,
782 4500 ,
783 4510 ,
784 4520 ,
785 4530 ,
786 4540 ,
787 4550 ,
788 4560 ,
789 4570 ,
790 4580 ,
791 4590 ,
792 4600 ,
793};
794
795/*
796 * This array maps the raw hex value to charger current used by the AB8500
797 * Values taken from the UM0836
798 */
799static int ab8500_charger_current_map[] = {
800 100 ,
801 200 ,
802 300 ,
803 400 ,
804 500 ,
805 600 ,
806 700 ,
807 800 ,
808 900 ,
809 1000 ,
810 1100 ,
811 1200 ,
812 1300 ,
813 1400 ,
814 1500 ,
815};
816
817/*
818 * This array maps the raw hex value to VBUS input current used by the AB8500
819 * Values taken from the UM0836
820 */
821static int ab8500_charger_vbus_in_curr_map[] = {
822 USB_CH_IP_CUR_LVL_0P05,
823 USB_CH_IP_CUR_LVL_0P09,
824 USB_CH_IP_CUR_LVL_0P19,
825 USB_CH_IP_CUR_LVL_0P29,
826 USB_CH_IP_CUR_LVL_0P38,
827 USB_CH_IP_CUR_LVL_0P45,
828 USB_CH_IP_CUR_LVL_0P5,
829 USB_CH_IP_CUR_LVL_0P6,
830 USB_CH_IP_CUR_LVL_0P7,
831 USB_CH_IP_CUR_LVL_0P8,
832 USB_CH_IP_CUR_LVL_0P9,
833 USB_CH_IP_CUR_LVL_1P0,
834 USB_CH_IP_CUR_LVL_1P1,
835 USB_CH_IP_CUR_LVL_1P3,
836 USB_CH_IP_CUR_LVL_1P4,
837 USB_CH_IP_CUR_LVL_1P5,
838};
839
840static int ab8500_voltage_to_regval(int voltage)
841{
842 int i;
843
844 /* Special case for voltage below 3.5V */
845 if (voltage < ab8500_charger_voltage_map[0])
846 return LOW_VOLT_REG;
847
848 for (i = 1; i < ARRAY_SIZE(ab8500_charger_voltage_map); i++) {
849 if (voltage < ab8500_charger_voltage_map[i])
850 return i - 1;
851 }
852
853 /* If not last element, return error */
854 i = ARRAY_SIZE(ab8500_charger_voltage_map) - 1;
855 if (voltage == ab8500_charger_voltage_map[i])
856 return i;
857 else
858 return -1;
859}
860
861static int ab8500_current_to_regval(int curr)
862{
863 int i;
864
865 if (curr < ab8500_charger_current_map[0])
866 return 0;
867
868 for (i = 0; i < ARRAY_SIZE(ab8500_charger_current_map); i++) {
869 if (curr < ab8500_charger_current_map[i])
870 return i - 1;
871 }
872
873 /* If not last element, return error */
874 i = ARRAY_SIZE(ab8500_charger_current_map) - 1;
875 if (curr == ab8500_charger_current_map[i])
876 return i;
877 else
878 return -1;
879}
880
881static int ab8500_vbus_in_curr_to_regval(int curr)
882{
883 int i;
884
885 if (curr < ab8500_charger_vbus_in_curr_map[0])
886 return 0;
887
888 for (i = 0; i < ARRAY_SIZE(ab8500_charger_vbus_in_curr_map); i++) {
889 if (curr < ab8500_charger_vbus_in_curr_map[i])
890 return i - 1;
891 }
892
893 /* If not last element, return error */
894 i = ARRAY_SIZE(ab8500_charger_vbus_in_curr_map) - 1;
895 if (curr == ab8500_charger_vbus_in_curr_map[i])
896 return i;
897 else
898 return -1;
899}
900
901/**
902 * ab8500_charger_get_usb_cur() - get usb current
903 * @di: pointer to the ab8500_charger structre
904 *
905 * The usb stack provides the maximum current that can be drawn from
906 * the standard usb host. This will be in mA.
907 * This function converts current in mA to a value that can be written
908 * to the register. Returns -1 if charging is not allowed
909 */
910static int ab8500_charger_get_usb_cur(struct ab8500_charger *di)
911{
912 switch (di->usb_state.usb_current) {
913 case 100:
914 di->max_usb_in_curr = USB_CH_IP_CUR_LVL_0P09;
915 break;
916 case 200:
917 di->max_usb_in_curr = USB_CH_IP_CUR_LVL_0P19;
918 break;
919 case 300:
920 di->max_usb_in_curr = USB_CH_IP_CUR_LVL_0P29;
921 break;
922 case 400:
923 di->max_usb_in_curr = USB_CH_IP_CUR_LVL_0P38;
924 break;
925 case 500:
926 di->max_usb_in_curr = USB_CH_IP_CUR_LVL_0P5;
927 break;
928 default:
929 di->max_usb_in_curr = USB_CH_IP_CUR_LVL_0P05;
930 return -1;
931 break;
932 };
933 return 0;
934}
935
936/**
937 * ab8500_charger_set_vbus_in_curr() - set VBUS input current limit
938 * @di: pointer to the ab8500_charger structure
939 * @ich_in: charger input current limit
940 *
941 * Sets the current that can be drawn from the USB host
942 * Returns error code in case of failure else 0(on success)
943 */
944static int ab8500_charger_set_vbus_in_curr(struct ab8500_charger *di,
945 int ich_in)
946{
947 int ret;
948 int input_curr_index;
949 int min_value;
950
951 /* We should always use to lowest current limit */
952 min_value = min(di->bat->chg_params->usb_curr_max, ich_in);
953
954 switch (min_value) {
955 case 100:
956 if (di->vbat < VBAT_TRESH_IP_CUR_RED)
957 min_value = USB_CH_IP_CUR_LVL_0P05;
958 break;
959 case 500:
960 if (di->vbat < VBAT_TRESH_IP_CUR_RED)
961 min_value = USB_CH_IP_CUR_LVL_0P45;
962 break;
963 default:
964 break;
965 }
966
967 input_curr_index = ab8500_vbus_in_curr_to_regval(min_value);
968 if (input_curr_index < 0) {
969 dev_err(di->dev, "VBUS input current limit too high\n");
970 return -ENXIO;
971 }
972
973 ret = abx500_set_register_interruptible(di->dev, AB8500_CHARGER,
974 AB8500_USBCH_IPT_CRNTLVL_REG,
975 input_curr_index << VBUS_IN_CURR_LIM_SHIFT);
976 if (ret)
977 dev_err(di->dev, "%s write failed\n", __func__);
978
979 return ret;
980}
981
982/**
983 * ab8500_charger_led_en() - turn on/off chargign led
984 * @di: pointer to the ab8500_charger structure
985 * @on: flag to turn on/off the chargign led
986 *
987 * Power ON/OFF charging LED indication
988 * Returns error code in case of failure else 0(on success)
989 */
990static int ab8500_charger_led_en(struct ab8500_charger *di, int on)
991{
992 int ret;
993
994 if (on) {
995 /* Power ON charging LED indicator, set LED current to 5mA */
996 ret = abx500_set_register_interruptible(di->dev, AB8500_CHARGER,
997 AB8500_LED_INDICATOR_PWM_CTRL,
998 (LED_IND_CUR_5MA | LED_INDICATOR_PWM_ENA));
999 if (ret) {
1000 dev_err(di->dev, "Power ON LED failed\n");
1001 return ret;
1002 }
1003 /* LED indicator PWM duty cycle 252/256 */
1004 ret = abx500_set_register_interruptible(di->dev, AB8500_CHARGER,
1005 AB8500_LED_INDICATOR_PWM_DUTY,
1006 LED_INDICATOR_PWM_DUTY_252_256);
1007 if (ret) {
1008 dev_err(di->dev, "Set LED PWM duty cycle failed\n");
1009 return ret;
1010 }
1011 } else {
1012 /* Power off charging LED indicator */
1013 ret = abx500_set_register_interruptible(di->dev, AB8500_CHARGER,
1014 AB8500_LED_INDICATOR_PWM_CTRL,
1015 LED_INDICATOR_PWM_DIS);
1016 if (ret) {
1017 dev_err(di->dev, "Power-off LED failed\n");
1018 return ret;
1019 }
1020 }
1021
1022 return ret;
1023}
1024
1025/**
1026 * ab8500_charger_ac_en() - enable or disable ac charging
1027 * @di: pointer to the ab8500_charger structure
1028 * @enable: enable/disable flag
1029 * @vset: charging voltage
1030 * @iset: charging current
1031 *
1032 * Enable/Disable AC/Mains charging and turns on/off the charging led
1033 * respectively.
1034 **/
1035static int ab8500_charger_ac_en(struct ux500_charger *charger,
1036 int enable, int vset, int iset)
1037{
1038 int ret;
1039 int volt_index;
1040 int curr_index;
1041 int input_curr_index;
1042 u8 overshoot = 0;
1043
1044 struct ab8500_charger *di = to_ab8500_charger_ac_device_info(charger);
1045
1046 if (enable) {
1047 /* Check if AC is connected */
1048 if (!di->ac.charger_connected) {
1049 dev_err(di->dev, "AC charger not connected\n");
1050 return -ENXIO;
1051 }
1052
1053 /* Enable AC charging */
1054 dev_dbg(di->dev, "Enable AC: %dmV %dmA\n", vset, iset);
1055
1056 /*
1057 * Due to a bug in AB8500, BTEMP_HIGH/LOW interrupts
1058 * will be triggered everytime we enable the VDD ADC supply.
1059 * This will turn off charging for a short while.
1060 * It can be avoided by having the supply on when
1061 * there is a charger enabled. Normally the VDD ADC supply
1062 * is enabled everytime a GPADC conversion is triggered. We will
1063 * force it to be enabled from this driver to have
1064 * the GPADC module independant of the AB8500 chargers
1065 */
1066 if (!di->vddadc_en_ac) {
1067 regulator_enable(di->regu);
1068 di->vddadc_en_ac = true;
1069 }
1070
1071 /* Check if the requested voltage or current is valid */
1072 volt_index = ab8500_voltage_to_regval(vset);
1073 curr_index = ab8500_current_to_regval(iset);
1074 input_curr_index = ab8500_current_to_regval(
1075 di->bat->chg_params->ac_curr_max);
1076 if (volt_index < 0 || curr_index < 0 || input_curr_index < 0) {
1077 dev_err(di->dev,
1078 "Charger voltage or current too high, "
1079 "charging not started\n");
1080 return -ENXIO;
1081 }
1082
1083 /* ChVoltLevel: maximum battery charging voltage */
1084 ret = abx500_set_register_interruptible(di->dev, AB8500_CHARGER,
1085 AB8500_CH_VOLT_LVL_REG, (u8) volt_index);
1086 if (ret) {
1087 dev_err(di->dev, "%s write failed\n", __func__);
1088 return ret;
1089 }
1090 /* MainChInputCurr: current that can be drawn from the charger*/
1091 ret = abx500_set_register_interruptible(di->dev, AB8500_CHARGER,
1092 AB8500_MCH_IPT_CURLVL_REG,
1093 input_curr_index << MAIN_CH_INPUT_CURR_SHIFT);
1094 if (ret) {
1095 dev_err(di->dev, "%s write failed\n", __func__);
1096 return ret;
1097 }
1098 /* ChOutputCurentLevel: protected output current */
1099 ret = abx500_set_register_interruptible(di->dev, AB8500_CHARGER,
1100 AB8500_CH_OPT_CRNTLVL_REG, (u8) curr_index);
1101 if (ret) {
1102 dev_err(di->dev, "%s write failed\n", __func__);
1103 return ret;
1104 }
1105
1106 /* Check if VBAT overshoot control should be enabled */
1107 if (!di->bat->enable_overshoot)
1108 overshoot = MAIN_CH_NO_OVERSHOOT_ENA_N;
1109
1110 /* Enable Main Charger */
1111 ret = abx500_set_register_interruptible(di->dev, AB8500_CHARGER,
1112 AB8500_MCH_CTRL1, MAIN_CH_ENA | overshoot);
1113 if (ret) {
1114 dev_err(di->dev, "%s write failed\n", __func__);
1115 return ret;
1116 }
1117
1118 /* Power on charging LED indication */
1119 ret = ab8500_charger_led_en(di, true);
1120 if (ret < 0)
1121 dev_err(di->dev, "failed to enable LED\n");
1122
1123 di->ac.charger_online = 1;
1124 } else {
1125 /* Disable AC charging */
1126 if (is_ab8500_1p1_or_earlier(di->parent)) {
1127 /*
1128 * For ABB revision 1.0 and 1.1 there is a bug in the
1129 * watchdog logic. That means we have to continously
1130 * kick the charger watchdog even when no charger is
1131 * connected. This is only valid once the AC charger
1132 * has been enabled. This is a bug that is not handled
1133 * by the algorithm and the watchdog have to be kicked
1134 * by the charger driver when the AC charger
1135 * is disabled
1136 */
1137 if (di->ac_conn) {
1138 queue_delayed_work(di->charger_wq,
1139 &di->kick_wd_work,
1140 round_jiffies(WD_KICK_INTERVAL));
1141 }
1142
1143 /*
1144 * We can't turn off charging completely
1145 * due to a bug in AB8500 cut1.
1146 * If we do, charging will not start again.
1147 * That is why we set the lowest voltage
1148 * and current possible
1149 */
1150 ret = abx500_set_register_interruptible(di->dev,
1151 AB8500_CHARGER,
1152 AB8500_CH_VOLT_LVL_REG, CH_VOL_LVL_3P5);
1153 if (ret) {
1154 dev_err(di->dev,
1155 "%s write failed\n", __func__);
1156 return ret;
1157 }
1158
1159 ret = abx500_set_register_interruptible(di->dev,
1160 AB8500_CHARGER,
1161 AB8500_CH_OPT_CRNTLVL_REG, CH_OP_CUR_LVL_0P1);
1162 if (ret) {
1163 dev_err(di->dev,
1164 "%s write failed\n", __func__);
1165 return ret;
1166 }
1167 } else {
1168 ret = abx500_set_register_interruptible(di->dev,
1169 AB8500_CHARGER,
1170 AB8500_MCH_CTRL1, 0);
1171 if (ret) {
1172 dev_err(di->dev,
1173 "%s write failed\n", __func__);
1174 return ret;
1175 }
1176 }
1177
1178 ret = ab8500_charger_led_en(di, false);
1179 if (ret < 0)
1180 dev_err(di->dev, "failed to disable LED\n");
1181
1182 di->ac.charger_online = 0;
1183 di->ac.wd_expired = false;
1184
1185 /* Disable regulator if enabled */
1186 if (di->vddadc_en_ac) {
1187 regulator_disable(di->regu);
1188 di->vddadc_en_ac = false;
1189 }
1190
1191 dev_dbg(di->dev, "%s Disabled AC charging\n", __func__);
1192 }
1193 ab8500_power_supply_changed(di, &di->ac_chg.psy);
1194
1195 return ret;
1196}
1197
1198/**
1199 * ab8500_charger_usb_en() - enable usb charging
1200 * @di: pointer to the ab8500_charger structure
1201 * @enable: enable/disable flag
1202 * @vset: charging voltage
1203 * @ich_out: charger output current
1204 *
1205 * Enable/Disable USB charging and turns on/off the charging led respectively.
1206 * Returns error code in case of failure else 0(on success)
1207 */
1208static int ab8500_charger_usb_en(struct ux500_charger *charger,
1209 int enable, int vset, int ich_out)
1210{
1211 int ret;
1212 int volt_index;
1213 int curr_index;
1214 u8 overshoot = 0;
1215
1216 struct ab8500_charger *di = to_ab8500_charger_usb_device_info(charger);
1217
1218 if (enable) {
1219 /* Check if USB is connected */
1220 if (!di->usb.charger_connected) {
1221 dev_err(di->dev, "USB charger not connected\n");
1222 return -ENXIO;
1223 }
1224
1225 /*
1226 * Due to a bug in AB8500, BTEMP_HIGH/LOW interrupts
1227 * will be triggered everytime we enable the VDD ADC supply.
1228 * This will turn off charging for a short while.
1229 * It can be avoided by having the supply on when
1230 * there is a charger enabled. Normally the VDD ADC supply
1231 * is enabled everytime a GPADC conversion is triggered. We will
1232 * force it to be enabled from this driver to have
1233 * the GPADC module independant of the AB8500 chargers
1234 */
1235 if (!di->vddadc_en_usb) {
1236 regulator_enable(di->regu);
1237 di->vddadc_en_usb = true;
1238 }
1239
1240 /* Enable USB charging */
1241 dev_dbg(di->dev, "Enable USB: %dmV %dmA\n", vset, ich_out);
1242
1243 /* Check if the requested voltage or current is valid */
1244 volt_index = ab8500_voltage_to_regval(vset);
1245 curr_index = ab8500_current_to_regval(ich_out);
1246 if (volt_index < 0 || curr_index < 0) {
1247 dev_err(di->dev,
1248 "Charger voltage or current too high, "
1249 "charging not started\n");
1250 return -ENXIO;
1251 }
1252
1253 /* ChVoltLevel: max voltage upto which battery can be charged */
1254 ret = abx500_set_register_interruptible(di->dev, AB8500_CHARGER,
1255 AB8500_CH_VOLT_LVL_REG, (u8) volt_index);
1256 if (ret) {
1257 dev_err(di->dev, "%s write failed\n", __func__);
1258 return ret;
1259 }
1260 /* USBChInputCurr: current that can be drawn from the usb */
1261 ret = ab8500_charger_set_vbus_in_curr(di, di->max_usb_in_curr);
1262 if (ret) {
1263 dev_err(di->dev, "setting USBChInputCurr failed\n");
1264 return ret;
1265 }
1266 /* ChOutputCurentLevel: protected output current */
1267 ret = abx500_set_register_interruptible(di->dev, AB8500_CHARGER,
1268 AB8500_CH_OPT_CRNTLVL_REG, (u8) curr_index);
1269 if (ret) {
1270 dev_err(di->dev, "%s write failed\n", __func__);
1271 return ret;
1272 }
1273 /* Check if VBAT overshoot control should be enabled */
1274 if (!di->bat->enable_overshoot)
1275 overshoot = USB_CHG_NO_OVERSHOOT_ENA_N;
1276
1277 /* Enable USB Charger */
1278 ret = abx500_set_register_interruptible(di->dev, AB8500_CHARGER,
1279 AB8500_USBCH_CTRL1_REG, USB_CH_ENA | overshoot);
1280 if (ret) {
1281 dev_err(di->dev, "%s write failed\n", __func__);
1282 return ret;
1283 }
1284
1285 /* If success power on charging LED indication */
1286 ret = ab8500_charger_led_en(di, true);
1287 if (ret < 0)
1288 dev_err(di->dev, "failed to enable LED\n");
1289
1290 queue_delayed_work(di->charger_wq, &di->check_vbat_work, HZ);
1291
1292 di->usb.charger_online = 1;
1293 } else {
1294 /* Disable USB charging */
1295 ret = abx500_set_register_interruptible(di->dev,
1296 AB8500_CHARGER,
1297 AB8500_USBCH_CTRL1_REG, 0);
1298 if (ret) {
1299 dev_err(di->dev,
1300 "%s write failed\n", __func__);
1301 return ret;
1302 }
1303
1304 ret = ab8500_charger_led_en(di, false);
1305 if (ret < 0)
1306 dev_err(di->dev, "failed to disable LED\n");
1307
1308 di->usb.charger_online = 0;
1309 di->usb.wd_expired = false;
1310
1311 /* Disable regulator if enabled */
1312 if (di->vddadc_en_usb) {
1313 regulator_disable(di->regu);
1314 di->vddadc_en_usb = false;
1315 }
1316
1317 dev_dbg(di->dev, "%s Disabled USB charging\n", __func__);
1318
1319 /* Cancel any pending Vbat check work */
1320 if (delayed_work_pending(&di->check_vbat_work))
1321 cancel_delayed_work(&di->check_vbat_work);
1322
1323 }
1324 ab8500_power_supply_changed(di, &di->usb_chg.psy);
1325
1326 return ret;
1327}
1328
1329/**
1330 * ab8500_charger_watchdog_kick() - kick charger watchdog
1331 * @di: pointer to the ab8500_charger structure
1332 *
1333 * Kick charger watchdog
1334 * Returns error code in case of failure else 0(on success)
1335 */
1336static int ab8500_charger_watchdog_kick(struct ux500_charger *charger)
1337{
1338 int ret;
1339 struct ab8500_charger *di;
1340
1341 if (charger->psy.type == POWER_SUPPLY_TYPE_MAINS)
1342 di = to_ab8500_charger_ac_device_info(charger);
1343 else if (charger->psy.type == POWER_SUPPLY_TYPE_USB)
1344 di = to_ab8500_charger_usb_device_info(charger);
1345 else
1346 return -ENXIO;
1347
1348 ret = abx500_set_register_interruptible(di->dev, AB8500_CHARGER,
1349 AB8500_CHARG_WD_CTRL, CHARG_WD_KICK);
1350 if (ret)
1351 dev_err(di->dev, "Failed to kick WD!\n");
1352
1353 return ret;
1354}
1355
1356/**
1357 * ab8500_charger_update_charger_current() - update charger current
1358 * @di: pointer to the ab8500_charger structure
1359 *
1360 * Update the charger output current for the specified charger
1361 * Returns error code in case of failure else 0(on success)
1362 */
1363static int ab8500_charger_update_charger_current(struct ux500_charger *charger,
1364 int ich_out)
1365{
1366 int ret;
1367 int curr_index;
1368 struct ab8500_charger *di;
1369
1370 if (charger->psy.type == POWER_SUPPLY_TYPE_MAINS)
1371 di = to_ab8500_charger_ac_device_info(charger);
1372 else if (charger->psy.type == POWER_SUPPLY_TYPE_USB)
1373 di = to_ab8500_charger_usb_device_info(charger);
1374 else
1375 return -ENXIO;
1376
1377 curr_index = ab8500_current_to_regval(ich_out);
1378 if (curr_index < 0) {
1379 dev_err(di->dev,
1380 "Charger current too high, "
1381 "charging not started\n");
1382 return -ENXIO;
1383 }
1384
1385 ret = abx500_set_register_interruptible(di->dev, AB8500_CHARGER,
1386 AB8500_CH_OPT_CRNTLVL_REG, (u8) curr_index);
1387 if (ret) {
1388 dev_err(di->dev, "%s write failed\n", __func__);
1389 return ret;
1390 }
1391
1392 /* Reset the main and usb drop input current measurement counter */
1393 ret = abx500_set_register_interruptible(di->dev, AB8500_CHARGER,
1394 AB8500_CHARGER_CTRL,
1395 0x1);
1396 if (ret) {
1397 dev_err(di->dev, "%s write failed\n", __func__);
1398 return ret;
1399 }
1400
1401 return ret;
1402}
1403
1404static int ab8500_charger_get_ext_psy_data(struct device *dev, void *data)
1405{
1406 struct power_supply *psy;
1407 struct power_supply *ext;
1408 struct ab8500_charger *di;
1409 union power_supply_propval ret;
1410 int i, j;
1411 bool psy_found = false;
1412 struct ux500_charger *usb_chg;
1413
1414 usb_chg = (struct ux500_charger *)data;
1415 psy = &usb_chg->psy;
1416
1417 di = to_ab8500_charger_usb_device_info(usb_chg);
1418
1419 ext = dev_get_drvdata(dev);
1420
1421 /* For all psy where the driver name appears in any supplied_to */
1422 for (i = 0; i < ext->num_supplicants; i++) {
1423 if (!strcmp(ext->supplied_to[i], psy->name))
1424 psy_found = true;
1425 }
1426
1427 if (!psy_found)
1428 return 0;
1429
1430 /* Go through all properties for the psy */
1431 for (j = 0; j < ext->num_properties; j++) {
1432 enum power_supply_property prop;
1433 prop = ext->properties[j];
1434
1435 if (ext->get_property(ext, prop, &ret))
1436 continue;
1437
1438 switch (prop) {
1439 case POWER_SUPPLY_PROP_VOLTAGE_NOW:
1440 switch (ext->type) {
1441 case POWER_SUPPLY_TYPE_BATTERY:
1442 di->vbat = ret.intval / 1000;
1443 break;
1444 default:
1445 break;
1446 }
1447 break;
1448 default:
1449 break;
1450 }
1451 }
1452 return 0;
1453}
1454
1455/**
1456 * ab8500_charger_check_vbat_work() - keep vbus current within spec
1457 * @work pointer to the work_struct structure
1458 *
1459 * Due to a asic bug it is necessary to lower the input current to the vbus
1460 * charger when charging with at some specific levels. This issue is only valid
1461 * for below a certain battery voltage. This function makes sure that the
1462 * the allowed current limit isn't exceeded.
1463 */
1464static void ab8500_charger_check_vbat_work(struct work_struct *work)
1465{
1466 int t = 10;
1467 struct ab8500_charger *di = container_of(work,
1468 struct ab8500_charger, check_vbat_work.work);
1469
1470 class_for_each_device(power_supply_class, NULL,
1471 &di->usb_chg.psy, ab8500_charger_get_ext_psy_data);
1472
1473 /* First run old_vbat is 0. */
1474 if (di->old_vbat == 0)
1475 di->old_vbat = di->vbat;
1476
1477 if (!((di->old_vbat <= VBAT_TRESH_IP_CUR_RED &&
1478 di->vbat <= VBAT_TRESH_IP_CUR_RED) ||
1479 (di->old_vbat > VBAT_TRESH_IP_CUR_RED &&
1480 di->vbat > VBAT_TRESH_IP_CUR_RED))) {
1481
1482 dev_dbg(di->dev, "Vbat did cross threshold, curr: %d, new: %d,"
1483 " old: %d\n", di->max_usb_in_curr, di->vbat,
1484 di->old_vbat);
1485 ab8500_charger_set_vbus_in_curr(di, di->max_usb_in_curr);
1486 power_supply_changed(&di->usb_chg.psy);
1487 }
1488
1489 di->old_vbat = di->vbat;
1490
1491 /*
1492 * No need to check the battery voltage every second when not close to
1493 * the threshold.
1494 */
1495 if (di->vbat < (VBAT_TRESH_IP_CUR_RED + 100) &&
1496 (di->vbat > (VBAT_TRESH_IP_CUR_RED - 100)))
1497 t = 1;
1498
1499 queue_delayed_work(di->charger_wq, &di->check_vbat_work, t * HZ);
1500}
1501
1502/**
1503 * ab8500_charger_check_hw_failure_work() - check main charger failure
1504 * @work: pointer to the work_struct structure
1505 *
1506 * Work queue function for checking the main charger status
1507 */
1508static void ab8500_charger_check_hw_failure_work(struct work_struct *work)
1509{
1510 int ret;
1511 u8 reg_value;
1512
1513 struct ab8500_charger *di = container_of(work,
1514 struct ab8500_charger, check_hw_failure_work.work);
1515
1516 /* Check if the status bits for HW failure is still active */
1517 if (di->flags.mainextchnotok) {
1518 ret = abx500_get_register_interruptible(di->dev,
1519 AB8500_CHARGER, AB8500_CH_STATUS2_REG, &reg_value);
1520 if (ret < 0) {
1521 dev_err(di->dev, "%s ab8500 read failed\n", __func__);
1522 return;
1523 }
1524 if (!(reg_value & MAIN_CH_NOK)) {
1525 di->flags.mainextchnotok = false;
1526 ab8500_power_supply_changed(di, &di->ac_chg.psy);
1527 }
1528 }
1529 if (di->flags.vbus_ovv) {
1530 ret = abx500_get_register_interruptible(di->dev,
1531 AB8500_CHARGER, AB8500_CH_USBCH_STAT2_REG,
1532 &reg_value);
1533 if (ret < 0) {
1534 dev_err(di->dev, "%s ab8500 read failed\n", __func__);
1535 return;
1536 }
1537 if (!(reg_value & VBUS_OVV_TH)) {
1538 di->flags.vbus_ovv = false;
1539 ab8500_power_supply_changed(di, &di->usb_chg.psy);
1540 }
1541 }
1542 /* If we still have a failure, schedule a new check */
1543 if (di->flags.mainextchnotok || di->flags.vbus_ovv) {
1544 queue_delayed_work(di->charger_wq,
1545 &di->check_hw_failure_work, round_jiffies(HZ));
1546 }
1547}
1548
1549/**
1550 * ab8500_charger_kick_watchdog_work() - kick the watchdog
1551 * @work: pointer to the work_struct structure
1552 *
1553 * Work queue function for kicking the charger watchdog.
1554 *
1555 * For ABB revision 1.0 and 1.1 there is a bug in the watchdog
1556 * logic. That means we have to continously kick the charger
1557 * watchdog even when no charger is connected. This is only
1558 * valid once the AC charger has been enabled. This is
1559 * a bug that is not handled by the algorithm and the
1560 * watchdog have to be kicked by the charger driver
1561 * when the AC charger is disabled
1562 */
1563static void ab8500_charger_kick_watchdog_work(struct work_struct *work)
1564{
1565 int ret;
1566
1567 struct ab8500_charger *di = container_of(work,
1568 struct ab8500_charger, kick_wd_work.work);
1569
1570 ret = abx500_set_register_interruptible(di->dev, AB8500_CHARGER,
1571 AB8500_CHARG_WD_CTRL, CHARG_WD_KICK);
1572 if (ret)
1573 dev_err(di->dev, "Failed to kick WD!\n");
1574
1575 /* Schedule a new watchdog kick */
1576 queue_delayed_work(di->charger_wq,
1577 &di->kick_wd_work, round_jiffies(WD_KICK_INTERVAL));
1578}
1579
1580/**
1581 * ab8500_charger_ac_work() - work to get and set main charger status
1582 * @work: pointer to the work_struct structure
1583 *
1584 * Work queue function for checking the main charger status
1585 */
1586static void ab8500_charger_ac_work(struct work_struct *work)
1587{
1588 int ret;
1589
1590 struct ab8500_charger *di = container_of(work,
1591 struct ab8500_charger, ac_work);
1592
1593 /*
1594 * Since we can't be sure that the events are received
1595 * synchronously, we have the check if the main charger is
1596 * connected by reading the status register
1597 */
1598 ret = ab8500_charger_detect_chargers(di);
1599 if (ret < 0)
1600 return;
1601
1602 if (ret & AC_PW_CONN) {
1603 di->ac.charger_connected = 1;
1604 di->ac_conn = true;
1605 } else {
1606 di->ac.charger_connected = 0;
1607 }
1608
1609 ab8500_power_supply_changed(di, &di->ac_chg.psy);
1610 sysfs_notify(&di->ac_chg.psy.dev->kobj, NULL, "present");
1611}
1612
1613/**
1614 * ab8500_charger_detect_usb_type_work() - work to detect USB type
1615 * @work: Pointer to the work_struct structure
1616 *
1617 * Detect the type of USB plugged
1618 */
1619static void ab8500_charger_detect_usb_type_work(struct work_struct *work)
1620{
1621 int ret;
1622
1623 struct ab8500_charger *di = container_of(work,
1624 struct ab8500_charger, detect_usb_type_work);
1625
1626 /*
1627 * Since we can't be sure that the events are received
1628 * synchronously, we have the check if is
1629 * connected by reading the status register
1630 */
1631 ret = ab8500_charger_detect_chargers(di);
1632 if (ret < 0)
1633 return;
1634
1635 if (!(ret & USB_PW_CONN)) {
1636 di->vbus_detected = 0;
1637 ab8500_charger_set_usb_connected(di, false);
1638 ab8500_power_supply_changed(di, &di->usb_chg.psy);
1639 } else {
1640 di->vbus_detected = 1;
1641
1642 if (is_ab8500_1p1_or_earlier(di->parent)) {
1643 ret = ab8500_charger_detect_usb_type(di);
1644 if (!ret) {
1645 ab8500_charger_set_usb_connected(di, true);
1646 ab8500_power_supply_changed(di,
1647 &di->usb_chg.psy);
1648 }
1649 } else {
1650 /* For ABB cut2.0 and onwards we have an IRQ,
1651 * USB_LINK_STATUS that will be triggered when the USB
1652 * link status changes. The exception is USB connected
1653 * during startup. Then we don't get a
1654 * USB_LINK_STATUS IRQ
1655 */
1656 if (di->vbus_detected_start) {
1657 di->vbus_detected_start = false;
1658 ret = ab8500_charger_detect_usb_type(di);
1659 if (!ret) {
1660 ab8500_charger_set_usb_connected(di,
1661 true);
1662 ab8500_power_supply_changed(di,
1663 &di->usb_chg.psy);
1664 }
1665 }
1666 }
1667 }
1668}
1669
1670/**
1671 * ab8500_charger_usb_link_status_work() - work to detect USB type
1672 * @work: pointer to the work_struct structure
1673 *
1674 * Detect the type of USB plugged
1675 */
1676static void ab8500_charger_usb_link_status_work(struct work_struct *work)
1677{
1678 int ret;
1679
1680 struct ab8500_charger *di = container_of(work,
1681 struct ab8500_charger, usb_link_status_work);
1682
1683 /*
1684 * Since we can't be sure that the events are received
1685 * synchronously, we have the check if is
1686 * connected by reading the status register
1687 */
1688 ret = ab8500_charger_detect_chargers(di);
1689 if (ret < 0)
1690 return;
1691
1692 if (!(ret & USB_PW_CONN)) {
1693 di->vbus_detected = 0;
1694 ab8500_charger_set_usb_connected(di, false);
1695 ab8500_power_supply_changed(di, &di->usb_chg.psy);
1696 } else {
1697 di->vbus_detected = 1;
1698 ret = ab8500_charger_read_usb_type(di);
1699 if (!ret) {
1700 /* Update maximum input current */
1701 ret = ab8500_charger_set_vbus_in_curr(di,
1702 di->max_usb_in_curr);
1703 if (ret)
1704 return;
1705
1706 ab8500_charger_set_usb_connected(di, true);
1707 ab8500_power_supply_changed(di, &di->usb_chg.psy);
1708 } else if (ret == -ENXIO) {
1709 /* No valid charger type detected */
1710 ab8500_charger_set_usb_connected(di, false);
1711 ab8500_power_supply_changed(di, &di->usb_chg.psy);
1712 }
1713 }
1714}
1715
1716static void ab8500_charger_usb_state_changed_work(struct work_struct *work)
1717{
1718 int ret;
1719 unsigned long flags;
1720
1721 struct ab8500_charger *di = container_of(work,
1722 struct ab8500_charger, usb_state_changed_work);
1723
1724 if (!di->vbus_detected)
1725 return;
1726
1727 spin_lock_irqsave(&di->usb_state.usb_lock, flags);
1728 di->usb_state.usb_changed = false;
1729 spin_unlock_irqrestore(&di->usb_state.usb_lock, flags);
1730
1731 /*
1732 * wait for some time until you get updates from the usb stack
1733 * and negotiations are completed
1734 */
1735 msleep(250);
1736
1737 if (di->usb_state.usb_changed)
1738 return;
1739
1740 dev_dbg(di->dev, "%s USB state: 0x%02x mA: %d\n",
1741 __func__, di->usb_state.state, di->usb_state.usb_current);
1742
1743 switch (di->usb_state.state) {
1744 case AB8500_BM_USB_STATE_RESET_HS:
1745 case AB8500_BM_USB_STATE_RESET_FS:
1746 case AB8500_BM_USB_STATE_SUSPEND:
1747 case AB8500_BM_USB_STATE_MAX:
1748 ab8500_charger_set_usb_connected(di, false);
1749 ab8500_power_supply_changed(di, &di->usb_chg.psy);
1750 break;
1751
1752 case AB8500_BM_USB_STATE_RESUME:
1753 /*
1754 * when suspend->resume there should be delay
1755 * of 1sec for enabling charging
1756 */
1757 msleep(1000);
1758 /* Intentional fall through */
1759 case AB8500_BM_USB_STATE_CONFIGURED:
1760 /*
1761 * USB is configured, enable charging with the charging
1762 * input current obtained from USB driver
1763 */
1764 if (!ab8500_charger_get_usb_cur(di)) {
1765 /* Update maximum input current */
1766 ret = ab8500_charger_set_vbus_in_curr(di,
1767 di->max_usb_in_curr);
1768 if (ret)
1769 return;
1770
1771 ab8500_charger_set_usb_connected(di, true);
1772 ab8500_power_supply_changed(di, &di->usb_chg.psy);
1773 }
1774 break;
1775
1776 default:
1777 break;
1778 };
1779}
1780
1781/**
1782 * ab8500_charger_check_usbchargernotok_work() - check USB chg not ok status
1783 * @work: pointer to the work_struct structure
1784 *
1785 * Work queue function for checking the USB charger Not OK status
1786 */
1787static void ab8500_charger_check_usbchargernotok_work(struct work_struct *work)
1788{
1789 int ret;
1790 u8 reg_value;
1791 bool prev_status;
1792
1793 struct ab8500_charger *di = container_of(work,
1794 struct ab8500_charger, check_usbchgnotok_work.work);
1795
1796 /* Check if the status bit for usbchargernotok is still active */
1797 ret = abx500_get_register_interruptible(di->dev,
1798 AB8500_CHARGER, AB8500_CH_USBCH_STAT2_REG, &reg_value);
1799 if (ret < 0) {
1800 dev_err(di->dev, "%s ab8500 read failed\n", __func__);
1801 return;
1802 }
1803 prev_status = di->flags.usbchargernotok;
1804
1805 if (reg_value & VBUS_CH_NOK) {
1806 di->flags.usbchargernotok = true;
1807 /* Check again in 1sec */
1808 queue_delayed_work(di->charger_wq,
1809 &di->check_usbchgnotok_work, HZ);
1810 } else {
1811 di->flags.usbchargernotok = false;
1812 di->flags.vbus_collapse = false;
1813 }
1814
1815 if (prev_status != di->flags.usbchargernotok)
1816 ab8500_power_supply_changed(di, &di->usb_chg.psy);
1817}
1818
1819/**
1820 * ab8500_charger_check_main_thermal_prot_work() - check main thermal status
1821 * @work: pointer to the work_struct structure
1822 *
1823 * Work queue function for checking the Main thermal prot status
1824 */
1825static void ab8500_charger_check_main_thermal_prot_work(
1826 struct work_struct *work)
1827{
1828 int ret;
1829 u8 reg_value;
1830
1831 struct ab8500_charger *di = container_of(work,
1832 struct ab8500_charger, check_main_thermal_prot_work);
1833
1834 /* Check if the status bit for main_thermal_prot is still active */
1835 ret = abx500_get_register_interruptible(di->dev,
1836 AB8500_CHARGER, AB8500_CH_STATUS2_REG, &reg_value);
1837 if (ret < 0) {
1838 dev_err(di->dev, "%s ab8500 read failed\n", __func__);
1839 return;
1840 }
1841 if (reg_value & MAIN_CH_TH_PROT)
1842 di->flags.main_thermal_prot = true;
1843 else
1844 di->flags.main_thermal_prot = false;
1845
1846 ab8500_power_supply_changed(di, &di->ac_chg.psy);
1847}
1848
1849/**
1850 * ab8500_charger_check_usb_thermal_prot_work() - check usb thermal status
1851 * @work: pointer to the work_struct structure
1852 *
1853 * Work queue function for checking the USB thermal prot status
1854 */
1855static void ab8500_charger_check_usb_thermal_prot_work(
1856 struct work_struct *work)
1857{
1858 int ret;
1859 u8 reg_value;
1860
1861 struct ab8500_charger *di = container_of(work,
1862 struct ab8500_charger, check_usb_thermal_prot_work);
1863
1864 /* Check if the status bit for usb_thermal_prot is still active */
1865 ret = abx500_get_register_interruptible(di->dev,
1866 AB8500_CHARGER, AB8500_CH_USBCH_STAT2_REG, &reg_value);
1867 if (ret < 0) {
1868 dev_err(di->dev, "%s ab8500 read failed\n", __func__);
1869 return;
1870 }
1871 if (reg_value & USB_CH_TH_PROT)
1872 di->flags.usb_thermal_prot = true;
1873 else
1874 di->flags.usb_thermal_prot = false;
1875
1876 ab8500_power_supply_changed(di, &di->usb_chg.psy);
1877}
1878
1879/**
1880 * ab8500_charger_mainchunplugdet_handler() - main charger unplugged
1881 * @irq: interrupt number
1882 * @_di: pointer to the ab8500_charger structure
1883 *
1884 * Returns IRQ status(IRQ_HANDLED)
1885 */
1886static irqreturn_t ab8500_charger_mainchunplugdet_handler(int irq, void *_di)
1887{
1888 struct ab8500_charger *di = _di;
1889
1890 dev_dbg(di->dev, "Main charger unplugged\n");
1891 queue_work(di->charger_wq, &di->ac_work);
1892
1893 return IRQ_HANDLED;
1894}
1895
1896/**
1897 * ab8500_charger_mainchplugdet_handler() - main charger plugged
1898 * @irq: interrupt number
1899 * @_di: pointer to the ab8500_charger structure
1900 *
1901 * Returns IRQ status(IRQ_HANDLED)
1902 */
1903static irqreturn_t ab8500_charger_mainchplugdet_handler(int irq, void *_di)
1904{
1905 struct ab8500_charger *di = _di;
1906
1907 dev_dbg(di->dev, "Main charger plugged\n");
1908 queue_work(di->charger_wq, &di->ac_work);
1909
1910 return IRQ_HANDLED;
1911}
1912
1913/**
1914 * ab8500_charger_mainextchnotok_handler() - main charger not ok
1915 * @irq: interrupt number
1916 * @_di: pointer to the ab8500_charger structure
1917 *
1918 * Returns IRQ status(IRQ_HANDLED)
1919 */
1920static irqreturn_t ab8500_charger_mainextchnotok_handler(int irq, void *_di)
1921{
1922 struct ab8500_charger *di = _di;
1923
1924 dev_dbg(di->dev, "Main charger not ok\n");
1925 di->flags.mainextchnotok = true;
1926 ab8500_power_supply_changed(di, &di->ac_chg.psy);
1927
1928 /* Schedule a new HW failure check */
1929 queue_delayed_work(di->charger_wq, &di->check_hw_failure_work, 0);
1930
1931 return IRQ_HANDLED;
1932}
1933
1934/**
1935 * ab8500_charger_mainchthprotr_handler() - Die temp is above main charger
1936 * thermal protection threshold
1937 * @irq: interrupt number
1938 * @_di: pointer to the ab8500_charger structure
1939 *
1940 * Returns IRQ status(IRQ_HANDLED)
1941 */
1942static irqreturn_t ab8500_charger_mainchthprotr_handler(int irq, void *_di)
1943{
1944 struct ab8500_charger *di = _di;
1945
1946 dev_dbg(di->dev,
1947 "Die temp above Main charger thermal protection threshold\n");
1948 queue_work(di->charger_wq, &di->check_main_thermal_prot_work);
1949
1950 return IRQ_HANDLED;
1951}
1952
1953/**
1954 * ab8500_charger_mainchthprotf_handler() - Die temp is below main charger
1955 * thermal protection threshold
1956 * @irq: interrupt number
1957 * @_di: pointer to the ab8500_charger structure
1958 *
1959 * Returns IRQ status(IRQ_HANDLED)
1960 */
1961static irqreturn_t ab8500_charger_mainchthprotf_handler(int irq, void *_di)
1962{
1963 struct ab8500_charger *di = _di;
1964
1965 dev_dbg(di->dev,
1966 "Die temp ok for Main charger thermal protection threshold\n");
1967 queue_work(di->charger_wq, &di->check_main_thermal_prot_work);
1968
1969 return IRQ_HANDLED;
1970}
1971
1972/**
1973 * ab8500_charger_vbusdetf_handler() - VBUS falling detected
1974 * @irq: interrupt number
1975 * @_di: pointer to the ab8500_charger structure
1976 *
1977 * Returns IRQ status(IRQ_HANDLED)
1978 */
1979static irqreturn_t ab8500_charger_vbusdetf_handler(int irq, void *_di)
1980{
1981 struct ab8500_charger *di = _di;
1982
1983 dev_dbg(di->dev, "VBUS falling detected\n");
1984 queue_work(di->charger_wq, &di->detect_usb_type_work);
1985
1986 return IRQ_HANDLED;
1987}
1988
1989/**
1990 * ab8500_charger_vbusdetr_handler() - VBUS rising detected
1991 * @irq: interrupt number
1992 * @_di: pointer to the ab8500_charger structure
1993 *
1994 * Returns IRQ status(IRQ_HANDLED)
1995 */
1996static irqreturn_t ab8500_charger_vbusdetr_handler(int irq, void *_di)
1997{
1998 struct ab8500_charger *di = _di;
1999
2000 di->vbus_detected = true;
2001 dev_dbg(di->dev, "VBUS rising detected\n");
2002 queue_work(di->charger_wq, &di->detect_usb_type_work);
2003
2004 return IRQ_HANDLED;
2005}
2006
2007/**
2008 * ab8500_charger_usblinkstatus_handler() - USB link status has changed
2009 * @irq: interrupt number
2010 * @_di: pointer to the ab8500_charger structure
2011 *
2012 * Returns IRQ status(IRQ_HANDLED)
2013 */
2014static irqreturn_t ab8500_charger_usblinkstatus_handler(int irq, void *_di)
2015{
2016 struct ab8500_charger *di = _di;
2017
2018 dev_dbg(di->dev, "USB link status changed\n");
2019
2020 queue_work(di->charger_wq, &di->usb_link_status_work);
2021
2022 return IRQ_HANDLED;
2023}
2024
2025/**
2026 * ab8500_charger_usbchthprotr_handler() - Die temp is above usb charger
2027 * thermal protection threshold
2028 * @irq: interrupt number
2029 * @_di: pointer to the ab8500_charger structure
2030 *
2031 * Returns IRQ status(IRQ_HANDLED)
2032 */
2033static irqreturn_t ab8500_charger_usbchthprotr_handler(int irq, void *_di)
2034{
2035 struct ab8500_charger *di = _di;
2036
2037 dev_dbg(di->dev,
2038 "Die temp above USB charger thermal protection threshold\n");
2039 queue_work(di->charger_wq, &di->check_usb_thermal_prot_work);
2040
2041 return IRQ_HANDLED;
2042}
2043
2044/**
2045 * ab8500_charger_usbchthprotf_handler() - Die temp is below usb charger
2046 * thermal protection threshold
2047 * @irq: interrupt number
2048 * @_di: pointer to the ab8500_charger structure
2049 *
2050 * Returns IRQ status(IRQ_HANDLED)
2051 */
2052static irqreturn_t ab8500_charger_usbchthprotf_handler(int irq, void *_di)
2053{
2054 struct ab8500_charger *di = _di;
2055
2056 dev_dbg(di->dev,
2057 "Die temp ok for USB charger thermal protection threshold\n");
2058 queue_work(di->charger_wq, &di->check_usb_thermal_prot_work);
2059
2060 return IRQ_HANDLED;
2061}
2062
2063/**
2064 * ab8500_charger_usbchargernotokr_handler() - USB charger not ok detected
2065 * @irq: interrupt number
2066 * @_di: pointer to the ab8500_charger structure
2067 *
2068 * Returns IRQ status(IRQ_HANDLED)
2069 */
2070static irqreturn_t ab8500_charger_usbchargernotokr_handler(int irq, void *_di)
2071{
2072 struct ab8500_charger *di = _di;
2073
2074 dev_dbg(di->dev, "Not allowed USB charger detected\n");
2075 queue_delayed_work(di->charger_wq, &di->check_usbchgnotok_work, 0);
2076
2077 return IRQ_HANDLED;
2078}
2079
2080/**
2081 * ab8500_charger_chwdexp_handler() - Charger watchdog expired
2082 * @irq: interrupt number
2083 * @_di: pointer to the ab8500_charger structure
2084 *
2085 * Returns IRQ status(IRQ_HANDLED)
2086 */
2087static irqreturn_t ab8500_charger_chwdexp_handler(int irq, void *_di)
2088{
2089 struct ab8500_charger *di = _di;
2090
2091 dev_dbg(di->dev, "Charger watchdog expired\n");
2092
2093 /*
2094 * The charger that was online when the watchdog expired
2095 * needs to be restarted for charging to start again
2096 */
2097 if (di->ac.charger_online) {
2098 di->ac.wd_expired = true;
2099 ab8500_power_supply_changed(di, &di->ac_chg.psy);
2100 }
2101 if (di->usb.charger_online) {
2102 di->usb.wd_expired = true;
2103 ab8500_power_supply_changed(di, &di->usb_chg.psy);
2104 }
2105
2106 return IRQ_HANDLED;
2107}
2108
2109/**
2110 * ab8500_charger_vbusovv_handler() - VBUS overvoltage detected
2111 * @irq: interrupt number
2112 * @_di: pointer to the ab8500_charger structure
2113 *
2114 * Returns IRQ status(IRQ_HANDLED)
2115 */
2116static irqreturn_t ab8500_charger_vbusovv_handler(int irq, void *_di)
2117{
2118 struct ab8500_charger *di = _di;
2119
2120 dev_dbg(di->dev, "VBUS overvoltage detected\n");
2121 di->flags.vbus_ovv = true;
2122 ab8500_power_supply_changed(di, &di->usb_chg.psy);
2123
2124 /* Schedule a new HW failure check */
2125 queue_delayed_work(di->charger_wq, &di->check_hw_failure_work, 0);
2126
2127 return IRQ_HANDLED;
2128}
2129
2130/**
2131 * ab8500_charger_ac_get_property() - get the ac/mains properties
2132 * @psy: pointer to the power_supply structure
2133 * @psp: pointer to the power_supply_property structure
2134 * @val: pointer to the power_supply_propval union
2135 *
2136 * This function gets called when an application tries to get the ac/mains
2137 * properties by reading the sysfs files.
2138 * AC/Mains properties are online, present and voltage.
2139 * online: ac/mains charging is in progress or not
2140 * present: presence of the ac/mains
2141 * voltage: AC/Mains voltage
2142 * Returns error code in case of failure else 0(on success)
2143 */
2144static int ab8500_charger_ac_get_property(struct power_supply *psy,
2145 enum power_supply_property psp,
2146 union power_supply_propval *val)
2147{
2148 struct ab8500_charger *di;
2149
2150 di = to_ab8500_charger_ac_device_info(psy_to_ux500_charger(psy));
2151
2152 switch (psp) {
2153 case POWER_SUPPLY_PROP_HEALTH:
2154 if (di->flags.mainextchnotok)
2155 val->intval = POWER_SUPPLY_HEALTH_UNSPEC_FAILURE;
2156 else if (di->ac.wd_expired || di->usb.wd_expired)
2157 val->intval = POWER_SUPPLY_HEALTH_DEAD;
2158 else if (di->flags.main_thermal_prot)
2159 val->intval = POWER_SUPPLY_HEALTH_OVERHEAT;
2160 else
2161 val->intval = POWER_SUPPLY_HEALTH_GOOD;
2162 break;
2163 case POWER_SUPPLY_PROP_ONLINE:
2164 val->intval = di->ac.charger_online;
2165 break;
2166 case POWER_SUPPLY_PROP_PRESENT:
2167 val->intval = di->ac.charger_connected;
2168 break;
2169 case POWER_SUPPLY_PROP_VOLTAGE_NOW:
2170 di->ac.charger_voltage = ab8500_charger_get_ac_voltage(di);
2171 val->intval = di->ac.charger_voltage * 1000;
2172 break;
2173 case POWER_SUPPLY_PROP_VOLTAGE_AVG:
2174 /*
2175 * This property is used to indicate when CV mode is entered
2176 * for the AC charger
2177 */
2178 di->ac.cv_active = ab8500_charger_ac_cv(di);
2179 val->intval = di->ac.cv_active;
2180 break;
2181 case POWER_SUPPLY_PROP_CURRENT_NOW:
2182 val->intval = ab8500_charger_get_ac_current(di) * 1000;
2183 break;
2184 default:
2185 return -EINVAL;
2186 }
2187 return 0;
2188}
2189
2190/**
2191 * ab8500_charger_usb_get_property() - get the usb properties
2192 * @psy: pointer to the power_supply structure
2193 * @psp: pointer to the power_supply_property structure
2194 * @val: pointer to the power_supply_propval union
2195 *
2196 * This function gets called when an application tries to get the usb
2197 * properties by reading the sysfs files.
2198 * USB properties are online, present and voltage.
2199 * online: usb charging is in progress or not
2200 * present: presence of the usb
2201 * voltage: vbus voltage
2202 * Returns error code in case of failure else 0(on success)
2203 */
2204static int ab8500_charger_usb_get_property(struct power_supply *psy,
2205 enum power_supply_property psp,
2206 union power_supply_propval *val)
2207{
2208 struct ab8500_charger *di;
2209
2210 di = to_ab8500_charger_usb_device_info(psy_to_ux500_charger(psy));
2211
2212 switch (psp) {
2213 case POWER_SUPPLY_PROP_HEALTH:
2214 if (di->flags.usbchargernotok)
2215 val->intval = POWER_SUPPLY_HEALTH_UNSPEC_FAILURE;
2216 else if (di->ac.wd_expired || di->usb.wd_expired)
2217 val->intval = POWER_SUPPLY_HEALTH_DEAD;
2218 else if (di->flags.usb_thermal_prot)
2219 val->intval = POWER_SUPPLY_HEALTH_OVERHEAT;
2220 else if (di->flags.vbus_ovv)
2221 val->intval = POWER_SUPPLY_HEALTH_OVERVOLTAGE;
2222 else
2223 val->intval = POWER_SUPPLY_HEALTH_GOOD;
2224 break;
2225 case POWER_SUPPLY_PROP_ONLINE:
2226 val->intval = di->usb.charger_online;
2227 break;
2228 case POWER_SUPPLY_PROP_PRESENT:
2229 val->intval = di->usb.charger_connected;
2230 break;
2231 case POWER_SUPPLY_PROP_VOLTAGE_NOW:
2232 di->usb.charger_voltage = ab8500_charger_get_vbus_voltage(di);
2233 val->intval = di->usb.charger_voltage * 1000;
2234 break;
2235 case POWER_SUPPLY_PROP_VOLTAGE_AVG:
2236 /*
2237 * This property is used to indicate when CV mode is entered
2238 * for the USB charger
2239 */
2240 di->usb.cv_active = ab8500_charger_usb_cv(di);
2241 val->intval = di->usb.cv_active;
2242 break;
2243 case POWER_SUPPLY_PROP_CURRENT_NOW:
2244 val->intval = ab8500_charger_get_usb_current(di) * 1000;
2245 break;
2246 case POWER_SUPPLY_PROP_CURRENT_AVG:
2247 /*
2248 * This property is used to indicate when VBUS has collapsed
2249 * due to too high output current from the USB charger
2250 */
2251 if (di->flags.vbus_collapse)
2252 val->intval = 1;
2253 else
2254 val->intval = 0;
2255 break;
2256 default:
2257 return -EINVAL;
2258 }
2259 return 0;
2260}
2261
2262/**
2263 * ab8500_charger_init_hw_registers() - Set up charger related registers
2264 * @di: pointer to the ab8500_charger structure
2265 *
2266 * Set up charger OVV, watchdog and maximum voltage registers as well as
2267 * charging of the backup battery
2268 */
2269static int ab8500_charger_init_hw_registers(struct ab8500_charger *di)
2270{
2271 int ret = 0;
2272
2273 /* Setup maximum charger current and voltage for ABB cut2.0 */
2274 if (!is_ab8500_1p1_or_earlier(di->parent)) {
2275 ret = abx500_set_register_interruptible(di->dev,
2276 AB8500_CHARGER,
2277 AB8500_CH_VOLT_LVL_MAX_REG, CH_VOL_LVL_4P6);
2278 if (ret) {
2279 dev_err(di->dev,
2280 "failed to set CH_VOLT_LVL_MAX_REG\n");
2281 goto out;
2282 }
2283
2284 ret = abx500_set_register_interruptible(di->dev,
2285 AB8500_CHARGER,
2286 AB8500_CH_OPT_CRNTLVL_MAX_REG, CH_OP_CUR_LVL_1P6);
2287 if (ret) {
2288 dev_err(di->dev,
2289 "failed to set CH_OPT_CRNTLVL_MAX_REG\n");
2290 goto out;
2291 }
2292 }
2293
2294 /* VBUS OVV set to 6.3V and enable automatic current limitiation */
2295 ret = abx500_set_register_interruptible(di->dev,
2296 AB8500_CHARGER,
2297 AB8500_USBCH_CTRL2_REG,
2298 VBUS_OVV_SELECT_6P3V | VBUS_AUTO_IN_CURR_LIM_ENA);
2299 if (ret) {
2300 dev_err(di->dev, "failed to set VBUS OVV\n");
2301 goto out;
2302 }
2303
2304 /* Enable main watchdog in OTP */
2305 ret = abx500_set_register_interruptible(di->dev,
2306 AB8500_OTP_EMUL, AB8500_OTP_CONF_15, OTP_ENABLE_WD);
2307 if (ret) {
2308 dev_err(di->dev, "failed to enable main WD in OTP\n");
2309 goto out;
2310 }
2311
2312 /* Enable main watchdog */
2313 ret = abx500_set_register_interruptible(di->dev,
2314 AB8500_SYS_CTRL2_BLOCK,
2315 AB8500_MAIN_WDOG_CTRL_REG, MAIN_WDOG_ENA);
2316 if (ret) {
2317 dev_err(di->dev, "faile to enable main watchdog\n");
2318 goto out;
2319 }
2320
2321 /*
2322 * Due to internal synchronisation, Enable and Kick watchdog bits
2323 * cannot be enabled in a single write.
2324 * A minimum delay of 2*32 kHz period (62.5µs) must be inserted
2325 * between writing Enable then Kick bits.
2326 */
2327 udelay(63);
2328
2329 /* Kick main watchdog */
2330 ret = abx500_set_register_interruptible(di->dev,
2331 AB8500_SYS_CTRL2_BLOCK,
2332 AB8500_MAIN_WDOG_CTRL_REG,
2333 (MAIN_WDOG_ENA | MAIN_WDOG_KICK));
2334 if (ret) {
2335 dev_err(di->dev, "failed to kick main watchdog\n");
2336 goto out;
2337 }
2338
2339 /* Disable main watchdog */
2340 ret = abx500_set_register_interruptible(di->dev,
2341 AB8500_SYS_CTRL2_BLOCK,
2342 AB8500_MAIN_WDOG_CTRL_REG, MAIN_WDOG_DIS);
2343 if (ret) {
2344 dev_err(di->dev, "failed to disable main watchdog\n");
2345 goto out;
2346 }
2347
2348 /* Set watchdog timeout */
2349 ret = abx500_set_register_interruptible(di->dev, AB8500_CHARGER,
2350 AB8500_CH_WD_TIMER_REG, WD_TIMER);
2351 if (ret) {
2352 dev_err(di->dev, "failed to set charger watchdog timeout\n");
2353 goto out;
2354 }
2355
2356 /* Backup battery voltage and current */
2357 ret = abx500_set_register_interruptible(di->dev,
2358 AB8500_RTC,
2359 AB8500_RTC_BACKUP_CHG_REG,
2360 di->bat->bkup_bat_v |
2361 di->bat->bkup_bat_i);
2362 if (ret) {
2363 dev_err(di->dev, "failed to setup backup battery charging\n");
2364 goto out;
2365 }
2366
2367 /* Enable backup battery charging */
2368 abx500_mask_and_set_register_interruptible(di->dev,
2369 AB8500_RTC, AB8500_RTC_CTRL_REG,
2370 RTC_BUP_CH_ENA, RTC_BUP_CH_ENA);
2371 if (ret < 0)
2372 dev_err(di->dev, "%s mask and set failed\n", __func__);
2373
2374out:
2375 return ret;
2376}
2377
2378/*
2379 * ab8500 charger driver interrupts and their respective isr
2380 */
2381static struct ab8500_charger_interrupts ab8500_charger_irq[] = {
2382 {"MAIN_CH_UNPLUG_DET", ab8500_charger_mainchunplugdet_handler},
2383 {"MAIN_CHARGE_PLUG_DET", ab8500_charger_mainchplugdet_handler},
2384 {"MAIN_EXT_CH_NOT_OK", ab8500_charger_mainextchnotok_handler},
2385 {"MAIN_CH_TH_PROT_R", ab8500_charger_mainchthprotr_handler},
2386 {"MAIN_CH_TH_PROT_F", ab8500_charger_mainchthprotf_handler},
2387 {"VBUS_DET_F", ab8500_charger_vbusdetf_handler},
2388 {"VBUS_DET_R", ab8500_charger_vbusdetr_handler},
2389 {"USB_LINK_STATUS", ab8500_charger_usblinkstatus_handler},
2390 {"USB_CH_TH_PROT_R", ab8500_charger_usbchthprotr_handler},
2391 {"USB_CH_TH_PROT_F", ab8500_charger_usbchthprotf_handler},
2392 {"USB_CHARGER_NOT_OKR", ab8500_charger_usbchargernotokr_handler},
2393 {"VBUS_OVV", ab8500_charger_vbusovv_handler},
2394 {"CH_WD_EXP", ab8500_charger_chwdexp_handler},
2395};
2396
2397static int ab8500_charger_usb_notifier_call(struct notifier_block *nb,
2398 unsigned long event, void *power)
2399{
2400 struct ab8500_charger *di =
2401 container_of(nb, struct ab8500_charger, nb);
2402 enum ab8500_usb_state bm_usb_state;
2403 unsigned mA = *((unsigned *)power);
2404
2405 if (event != USB_EVENT_VBUS) {
2406 dev_dbg(di->dev, "not a standard host, returning\n");
2407 return NOTIFY_DONE;
2408 }
2409
2410 /* TODO: State is fabricate here. See if charger really needs USB
2411 * state or if mA is enough
2412 */
2413 if ((di->usb_state.usb_current == 2) && (mA > 2))
2414 bm_usb_state = AB8500_BM_USB_STATE_RESUME;
2415 else if (mA == 0)
2416 bm_usb_state = AB8500_BM_USB_STATE_RESET_HS;
2417 else if (mA == 2)
2418 bm_usb_state = AB8500_BM_USB_STATE_SUSPEND;
2419 else if (mA >= 8) /* 8, 100, 500 */
2420 bm_usb_state = AB8500_BM_USB_STATE_CONFIGURED;
2421 else /* Should never occur */
2422 bm_usb_state = AB8500_BM_USB_STATE_RESET_FS;
2423
2424 dev_dbg(di->dev, "%s usb_state: 0x%02x mA: %d\n",
2425 __func__, bm_usb_state, mA);
2426
2427 spin_lock(&di->usb_state.usb_lock);
2428 di->usb_state.usb_changed = true;
2429 spin_unlock(&di->usb_state.usb_lock);
2430
2431 di->usb_state.state = bm_usb_state;
2432 di->usb_state.usb_current = mA;
2433
2434 queue_work(di->charger_wq, &di->usb_state_changed_work);
2435
2436 return NOTIFY_OK;
2437}
2438
2439#if defined(CONFIG_PM)
2440static int ab8500_charger_resume(struct platform_device *pdev)
2441{
2442 int ret;
2443 struct ab8500_charger *di = platform_get_drvdata(pdev);
2444
2445 /*
2446 * For ABB revision 1.0 and 1.1 there is a bug in the watchdog
2447 * logic. That means we have to continously kick the charger
2448 * watchdog even when no charger is connected. This is only
2449 * valid once the AC charger has been enabled. This is
2450 * a bug that is not handled by the algorithm and the
2451 * watchdog have to be kicked by the charger driver
2452 * when the AC charger is disabled
2453 */
2454 if (di->ac_conn && is_ab8500_1p1_or_earlier(di->parent)) {
2455 ret = abx500_set_register_interruptible(di->dev, AB8500_CHARGER,
2456 AB8500_CHARG_WD_CTRL, CHARG_WD_KICK);
2457 if (ret)
2458 dev_err(di->dev, "Failed to kick WD!\n");
2459
2460 /* If not already pending start a new timer */
2461 if (!delayed_work_pending(
2462 &di->kick_wd_work)) {
2463 queue_delayed_work(di->charger_wq, &di->kick_wd_work,
2464 round_jiffies(WD_KICK_INTERVAL));
2465 }
2466 }
2467
2468 /* If we still have a HW failure, schedule a new check */
2469 if (di->flags.mainextchnotok || di->flags.vbus_ovv) {
2470 queue_delayed_work(di->charger_wq,
2471 &di->check_hw_failure_work, 0);
2472 }
2473
2474 return 0;
2475}
2476
2477static int ab8500_charger_suspend(struct platform_device *pdev,
2478 pm_message_t state)
2479{
2480 struct ab8500_charger *di = platform_get_drvdata(pdev);
2481
2482 /* Cancel any pending HW failure check */
2483 if (delayed_work_pending(&di->check_hw_failure_work))
2484 cancel_delayed_work(&di->check_hw_failure_work);
2485
2486 return 0;
2487}
2488#else
2489#define ab8500_charger_suspend NULL
2490#define ab8500_charger_resume NULL
2491#endif
2492
2493static int __devexit ab8500_charger_remove(struct platform_device *pdev)
2494{
2495 struct ab8500_charger *di = platform_get_drvdata(pdev);
2496 int i, irq, ret;
2497
2498 /* Disable AC charging */
2499 ab8500_charger_ac_en(&di->ac_chg, false, 0, 0);
2500
2501 /* Disable USB charging */
2502 ab8500_charger_usb_en(&di->usb_chg, false, 0, 0);
2503
2504 /* Disable interrupts */
2505 for (i = 0; i < ARRAY_SIZE(ab8500_charger_irq); i++) {
2506 irq = platform_get_irq_byname(pdev, ab8500_charger_irq[i].name);
2507 free_irq(irq, di);
2508 }
2509
2510 /* disable the regulator */
2511 regulator_put(di->regu);
2512
2513 /* Backup battery voltage and current disable */
2514 ret = abx500_mask_and_set_register_interruptible(di->dev,
2515 AB8500_RTC, AB8500_RTC_CTRL_REG, RTC_BUP_CH_ENA, 0);
2516 if (ret < 0)
2517 dev_err(di->dev, "%s mask and set failed\n", __func__);
2518
2519 usb_unregister_notifier(di->usb_phy, &di->nb);
2520 usb_put_transceiver(di->usb_phy);
2521
2522 /* Delete the work queue */
2523 destroy_workqueue(di->charger_wq);
2524
2525 flush_scheduled_work();
2526 power_supply_unregister(&di->usb_chg.psy);
2527 power_supply_unregister(&di->ac_chg.psy);
2528 platform_set_drvdata(pdev, NULL);
2529 kfree(di);
2530
2531 return 0;
2532}
2533
2534static int __devinit ab8500_charger_probe(struct platform_device *pdev)
2535{
2536 int irq, i, charger_status, ret = 0;
2537 struct abx500_bm_plat_data *plat_data;
2538
2539 struct ab8500_charger *di =
2540 kzalloc(sizeof(struct ab8500_charger), GFP_KERNEL);
2541 if (!di)
2542 return -ENOMEM;
2543
2544 /* get parent data */
2545 di->dev = &pdev->dev;
2546 di->parent = dev_get_drvdata(pdev->dev.parent);
2547 di->gpadc = ab8500_gpadc_get("ab8500-gpadc.0");
2548
2549 /* initialize lock */
2550 spin_lock_init(&di->usb_state.usb_lock);
2551
2552 /* get charger specific platform data */
2553 plat_data = pdev->dev.platform_data;
2554 di->pdata = plat_data->charger;
2555
2556 if (!di->pdata) {
2557 dev_err(di->dev, "no charger platform data supplied\n");
2558 ret = -EINVAL;
2559 goto free_device_info;
2560 }
2561
2562 /* get battery specific platform data */
2563 di->bat = plat_data->battery;
2564 if (!di->bat) {
2565 dev_err(di->dev, "no battery platform data supplied\n");
2566 ret = -EINVAL;
2567 goto free_device_info;
2568 }
2569
2570 di->autopower = false;
2571
2572 /* AC supply */
2573 /* power_supply base class */
2574 di->ac_chg.psy.name = "ab8500_ac";
2575 di->ac_chg.psy.type = POWER_SUPPLY_TYPE_MAINS;
2576 di->ac_chg.psy.properties = ab8500_charger_ac_props;
2577 di->ac_chg.psy.num_properties = ARRAY_SIZE(ab8500_charger_ac_props);
2578 di->ac_chg.psy.get_property = ab8500_charger_ac_get_property;
2579 di->ac_chg.psy.supplied_to = di->pdata->supplied_to;
2580 di->ac_chg.psy.num_supplicants = di->pdata->num_supplicants;
2581 /* ux500_charger sub-class */
2582 di->ac_chg.ops.enable = &ab8500_charger_ac_en;
2583 di->ac_chg.ops.kick_wd = &ab8500_charger_watchdog_kick;
2584 di->ac_chg.ops.update_curr = &ab8500_charger_update_charger_current;
2585 di->ac_chg.max_out_volt = ab8500_charger_voltage_map[
2586 ARRAY_SIZE(ab8500_charger_voltage_map) - 1];
2587 di->ac_chg.max_out_curr = ab8500_charger_current_map[
2588 ARRAY_SIZE(ab8500_charger_current_map) - 1];
2589
2590 /* USB supply */
2591 /* power_supply base class */
2592 di->usb_chg.psy.name = "ab8500_usb";
2593 di->usb_chg.psy.type = POWER_SUPPLY_TYPE_USB;
2594 di->usb_chg.psy.properties = ab8500_charger_usb_props;
2595 di->usb_chg.psy.num_properties = ARRAY_SIZE(ab8500_charger_usb_props);
2596 di->usb_chg.psy.get_property = ab8500_charger_usb_get_property;
2597 di->usb_chg.psy.supplied_to = di->pdata->supplied_to;
2598 di->usb_chg.psy.num_supplicants = di->pdata->num_supplicants;
2599 /* ux500_charger sub-class */
2600 di->usb_chg.ops.enable = &ab8500_charger_usb_en;
2601 di->usb_chg.ops.kick_wd = &ab8500_charger_watchdog_kick;
2602 di->usb_chg.ops.update_curr = &ab8500_charger_update_charger_current;
2603 di->usb_chg.max_out_volt = ab8500_charger_voltage_map[
2604 ARRAY_SIZE(ab8500_charger_voltage_map) - 1];
2605 di->usb_chg.max_out_curr = ab8500_charger_current_map[
2606 ARRAY_SIZE(ab8500_charger_current_map) - 1];
2607
2608
2609 /* Create a work queue for the charger */
2610 di->charger_wq =
2611 create_singlethread_workqueue("ab8500_charger_wq");
2612 if (di->charger_wq == NULL) {
2613 dev_err(di->dev, "failed to create work queue\n");
2614 goto free_device_info;
2615 }
2616
2617 /* Init work for HW failure check */
2618 INIT_DELAYED_WORK_DEFERRABLE(&di->check_hw_failure_work,
2619 ab8500_charger_check_hw_failure_work);
2620 INIT_DELAYED_WORK_DEFERRABLE(&di->check_usbchgnotok_work,
2621 ab8500_charger_check_usbchargernotok_work);
2622
2623 /*
2624 * For ABB revision 1.0 and 1.1 there is a bug in the watchdog
2625 * logic. That means we have to continously kick the charger
2626 * watchdog even when no charger is connected. This is only
2627 * valid once the AC charger has been enabled. This is
2628 * a bug that is not handled by the algorithm and the
2629 * watchdog have to be kicked by the charger driver
2630 * when the AC charger is disabled
2631 */
2632 INIT_DELAYED_WORK_DEFERRABLE(&di->kick_wd_work,
2633 ab8500_charger_kick_watchdog_work);
2634
2635 INIT_DELAYED_WORK_DEFERRABLE(&di->check_vbat_work,
2636 ab8500_charger_check_vbat_work);
2637
2638 /* Init work for charger detection */
2639 INIT_WORK(&di->usb_link_status_work,
2640 ab8500_charger_usb_link_status_work);
2641 INIT_WORK(&di->ac_work, ab8500_charger_ac_work);
2642 INIT_WORK(&di->detect_usb_type_work,
2643 ab8500_charger_detect_usb_type_work);
2644
2645 INIT_WORK(&di->usb_state_changed_work,
2646 ab8500_charger_usb_state_changed_work);
2647
2648 /* Init work for checking HW status */
2649 INIT_WORK(&di->check_main_thermal_prot_work,
2650 ab8500_charger_check_main_thermal_prot_work);
2651 INIT_WORK(&di->check_usb_thermal_prot_work,
2652 ab8500_charger_check_usb_thermal_prot_work);
2653
2654 /*
2655 * VDD ADC supply needs to be enabled from this driver when there
2656 * is a charger connected to avoid erroneous BTEMP_HIGH/LOW
2657 * interrupts during charging
2658 */
2659 di->regu = regulator_get(di->dev, "vddadc");
2660 if (IS_ERR(di->regu)) {
2661 ret = PTR_ERR(di->regu);
2662 dev_err(di->dev, "failed to get vddadc regulator\n");
2663 goto free_charger_wq;
2664 }
2665
2666
2667 /* Initialize OVV, and other registers */
2668 ret = ab8500_charger_init_hw_registers(di);
2669 if (ret) {
2670 dev_err(di->dev, "failed to initialize ABB registers\n");
2671 goto free_regulator;
2672 }
2673
2674 /* Register AC charger class */
2675 ret = power_supply_register(di->dev, &di->ac_chg.psy);
2676 if (ret) {
2677 dev_err(di->dev, "failed to register AC charger\n");
2678 goto free_regulator;
2679 }
2680
2681 /* Register USB charger class */
2682 ret = power_supply_register(di->dev, &di->usb_chg.psy);
2683 if (ret) {
2684 dev_err(di->dev, "failed to register USB charger\n");
2685 goto free_ac;
2686 }
2687
2688 di->usb_phy = usb_get_transceiver();
2689 if (!di->usb_phy) {
2690 dev_err(di->dev, "failed to get usb transceiver\n");
2691 ret = -EINVAL;
2692 goto free_usb;
2693 }
2694 di->nb.notifier_call = ab8500_charger_usb_notifier_call;
2695 ret = usb_register_notifier(di->usb_phy, &di->nb);
2696 if (ret) {
2697 dev_err(di->dev, "failed to register usb notifier\n");
2698 goto put_usb_phy;
2699 }
2700
2701 /* Identify the connected charger types during startup */
2702 charger_status = ab8500_charger_detect_chargers(di);
2703 if (charger_status & AC_PW_CONN) {
2704 di->ac.charger_connected = 1;
2705 di->ac_conn = true;
2706 ab8500_power_supply_changed(di, &di->ac_chg.psy);
2707 sysfs_notify(&di->ac_chg.psy.dev->kobj, NULL, "present");
2708 }
2709
2710 if (charger_status & USB_PW_CONN) {
2711 dev_dbg(di->dev, "VBUS Detect during startup\n");
2712 di->vbus_detected = true;
2713 di->vbus_detected_start = true;
2714 queue_work(di->charger_wq,
2715 &di->detect_usb_type_work);
2716 }
2717
2718 /* Register interrupts */
2719 for (i = 0; i < ARRAY_SIZE(ab8500_charger_irq); i++) {
2720 irq = platform_get_irq_byname(pdev, ab8500_charger_irq[i].name);
2721 ret = request_threaded_irq(irq, NULL, ab8500_charger_irq[i].isr,
2722 IRQF_SHARED | IRQF_NO_SUSPEND,
2723 ab8500_charger_irq[i].name, di);
2724
2725 if (ret != 0) {
2726 dev_err(di->dev, "failed to request %s IRQ %d: %d\n"
2727 , ab8500_charger_irq[i].name, irq, ret);
2728 goto free_irq;
2729 }
2730 dev_dbg(di->dev, "Requested %s IRQ %d: %d\n",
2731 ab8500_charger_irq[i].name, irq, ret);
2732 }
2733
2734 platform_set_drvdata(pdev, di);
2735
2736 return ret;
2737
2738free_irq:
2739 usb_unregister_notifier(di->usb_phy, &di->nb);
2740
2741 /* We also have to free all successfully registered irqs */
2742 for (i = i - 1; i >= 0; i--) {
2743 irq = platform_get_irq_byname(pdev, ab8500_charger_irq[i].name);
2744 free_irq(irq, di);
2745 }
2746put_usb_phy:
2747 usb_put_transceiver(di->usb_phy);
2748free_usb:
2749 power_supply_unregister(&di->usb_chg.psy);
2750free_ac:
2751 power_supply_unregister(&di->ac_chg.psy);
2752free_regulator:
2753 regulator_put(di->regu);
2754free_charger_wq:
2755 destroy_workqueue(di->charger_wq);
2756free_device_info:
2757 kfree(di);
2758
2759 return ret;
2760}
2761
2762static struct platform_driver ab8500_charger_driver = {
2763 .probe = ab8500_charger_probe,
2764 .remove = __devexit_p(ab8500_charger_remove),
2765 .suspend = ab8500_charger_suspend,
2766 .resume = ab8500_charger_resume,
2767 .driver = {
2768 .name = "ab8500-charger",
2769 .owner = THIS_MODULE,
2770 },
2771};
2772
2773static int __init ab8500_charger_init(void)
2774{
2775 return platform_driver_register(&ab8500_charger_driver);
2776}
2777
2778static void __exit ab8500_charger_exit(void)
2779{
2780 platform_driver_unregister(&ab8500_charger_driver);
2781}
2782
2783subsys_initcall_sync(ab8500_charger_init);
2784module_exit(ab8500_charger_exit);
2785
2786MODULE_LICENSE("GPL v2");
2787MODULE_AUTHOR("Johan Palsson, Karl Komierowski, Arun R Murthy");
2788MODULE_ALIAS("platform:ab8500-charger");
2789MODULE_DESCRIPTION("AB8500 charger management driver");
diff --git a/drivers/power/ab8500_fg.c b/drivers/power/ab8500_fg.c
new file mode 100644
index 000000000000..c22f2f05657e
--- /dev/null
+++ b/drivers/power/ab8500_fg.c
@@ -0,0 +1,2637 @@
1/*
2 * Copyright (C) ST-Ericsson AB 2012
3 *
4 * Main and Back-up battery management driver.
5 *
6 * Note: Backup battery management is required in case of Li-Ion battery and not
7 * for capacitive battery. HREF boards have capacitive battery and hence backup
8 * battery management is not used and the supported code is available in this
9 * driver.
10 *
11 * License Terms: GNU General Public License v2
12 * Author:
13 * Johan Palsson <johan.palsson@stericsson.com>
14 * Karl Komierowski <karl.komierowski@stericsson.com>
15 * Arun R Murthy <arun.murthy@stericsson.com>
16 */
17
18#include <linux/init.h>
19#include <linux/module.h>
20#include <linux/device.h>
21#include <linux/interrupt.h>
22#include <linux/platform_device.h>
23#include <linux/power_supply.h>
24#include <linux/kobject.h>
25#include <linux/mfd/abx500/ab8500.h>
26#include <linux/mfd/abx500.h>
27#include <linux/slab.h>
28#include <linux/mfd/abx500/ab8500-bm.h>
29#include <linux/delay.h>
30#include <linux/mfd/abx500/ab8500-gpadc.h>
31#include <linux/mfd/abx500.h>
32#include <linux/time.h>
33#include <linux/completion.h>
34
35#define MILLI_TO_MICRO 1000
36#define FG_LSB_IN_MA 1627
37#define QLSB_NANO_AMP_HOURS_X10 1129
38#define INS_CURR_TIMEOUT (3 * HZ)
39
40#define SEC_TO_SAMPLE(S) (S * 4)
41
42#define NBR_AVG_SAMPLES 20
43
44#define LOW_BAT_CHECK_INTERVAL (2 * HZ)
45
46#define VALID_CAPACITY_SEC (45 * 60) /* 45 minutes */
47#define BATT_OK_MIN 2360 /* mV */
48#define BATT_OK_INCREMENT 50 /* mV */
49#define BATT_OK_MAX_NR_INCREMENTS 0xE
50
51/* FG constants */
52#define BATT_OVV 0x01
53
54#define interpolate(x, x1, y1, x2, y2) \
55 ((y1) + ((((y2) - (y1)) * ((x) - (x1))) / ((x2) - (x1))));
56
57#define to_ab8500_fg_device_info(x) container_of((x), \
58 struct ab8500_fg, fg_psy);
59
60/**
61 * struct ab8500_fg_interrupts - ab8500 fg interupts
62 * @name: name of the interrupt
63 * @isr function pointer to the isr
64 */
65struct ab8500_fg_interrupts {
66 char *name;
67 irqreturn_t (*isr)(int irq, void *data);
68};
69
70enum ab8500_fg_discharge_state {
71 AB8500_FG_DISCHARGE_INIT,
72 AB8500_FG_DISCHARGE_INITMEASURING,
73 AB8500_FG_DISCHARGE_INIT_RECOVERY,
74 AB8500_FG_DISCHARGE_RECOVERY,
75 AB8500_FG_DISCHARGE_READOUT_INIT,
76 AB8500_FG_DISCHARGE_READOUT,
77 AB8500_FG_DISCHARGE_WAKEUP,
78};
79
80static char *discharge_state[] = {
81 "DISCHARGE_INIT",
82 "DISCHARGE_INITMEASURING",
83 "DISCHARGE_INIT_RECOVERY",
84 "DISCHARGE_RECOVERY",
85 "DISCHARGE_READOUT_INIT",
86 "DISCHARGE_READOUT",
87 "DISCHARGE_WAKEUP",
88};
89
90enum ab8500_fg_charge_state {
91 AB8500_FG_CHARGE_INIT,
92 AB8500_FG_CHARGE_READOUT,
93};
94
95static char *charge_state[] = {
96 "CHARGE_INIT",
97 "CHARGE_READOUT",
98};
99
100enum ab8500_fg_calibration_state {
101 AB8500_FG_CALIB_INIT,
102 AB8500_FG_CALIB_WAIT,
103 AB8500_FG_CALIB_END,
104};
105
106struct ab8500_fg_avg_cap {
107 int avg;
108 int samples[NBR_AVG_SAMPLES];
109 __kernel_time_t time_stamps[NBR_AVG_SAMPLES];
110 int pos;
111 int nbr_samples;
112 int sum;
113};
114
115struct ab8500_fg_battery_capacity {
116 int max_mah_design;
117 int max_mah;
118 int mah;
119 int permille;
120 int level;
121 int prev_mah;
122 int prev_percent;
123 int prev_level;
124 int user_mah;
125};
126
127struct ab8500_fg_flags {
128 bool fg_enabled;
129 bool conv_done;
130 bool charging;
131 bool fully_charged;
132 bool force_full;
133 bool low_bat_delay;
134 bool low_bat;
135 bool bat_ovv;
136 bool batt_unknown;
137 bool calibrate;
138 bool user_cap;
139 bool batt_id_received;
140};
141
142struct inst_curr_result_list {
143 struct list_head list;
144 int *result;
145};
146
147/**
148 * struct ab8500_fg - ab8500 FG device information
149 * @dev: Pointer to the structure device
150 * @node: a list of AB8500 FGs, hence prepared for reentrance
151 * @irq holds the CCEOC interrupt number
152 * @vbat: Battery voltage in mV
153 * @vbat_nom: Nominal battery voltage in mV
154 * @inst_curr: Instantenous battery current in mA
155 * @avg_curr: Average battery current in mA
156 * @bat_temp battery temperature
157 * @fg_samples: Number of samples used in the FG accumulation
158 * @accu_charge: Accumulated charge from the last conversion
159 * @recovery_cnt: Counter for recovery mode
160 * @high_curr_cnt: Counter for high current mode
161 * @init_cnt: Counter for init mode
162 * @recovery_needed: Indicate if recovery is needed
163 * @high_curr_mode: Indicate if we're in high current mode
164 * @init_capacity: Indicate if initial capacity measuring should be done
165 * @turn_off_fg: True if fg was off before current measurement
166 * @calib_state State during offset calibration
167 * @discharge_state: Current discharge state
168 * @charge_state: Current charge state
169 * @ab8500_fg_complete Completion struct used for the instant current reading
170 * @flags: Structure for information about events triggered
171 * @bat_cap: Structure for battery capacity specific parameters
172 * @avg_cap: Average capacity filter
173 * @parent: Pointer to the struct ab8500
174 * @gpadc: Pointer to the struct gpadc
175 * @pdata: Pointer to the abx500_fg platform data
176 * @bat: Pointer to the abx500_bm platform data
177 * @fg_psy: Structure that holds the FG specific battery properties
178 * @fg_wq: Work queue for running the FG algorithm
179 * @fg_periodic_work: Work to run the FG algorithm periodically
180 * @fg_low_bat_work: Work to check low bat condition
181 * @fg_reinit_work Work used to reset and reinitialise the FG algorithm
182 * @fg_work: Work to run the FG algorithm instantly
183 * @fg_acc_cur_work: Work to read the FG accumulator
184 * @fg_check_hw_failure_work: Work for checking HW state
185 * @cc_lock: Mutex for locking the CC
186 * @fg_kobject: Structure of type kobject
187 */
188struct ab8500_fg {
189 struct device *dev;
190 struct list_head node;
191 int irq;
192 int vbat;
193 int vbat_nom;
194 int inst_curr;
195 int avg_curr;
196 int bat_temp;
197 int fg_samples;
198 int accu_charge;
199 int recovery_cnt;
200 int high_curr_cnt;
201 int init_cnt;
202 bool recovery_needed;
203 bool high_curr_mode;
204 bool init_capacity;
205 bool turn_off_fg;
206 enum ab8500_fg_calibration_state calib_state;
207 enum ab8500_fg_discharge_state discharge_state;
208 enum ab8500_fg_charge_state charge_state;
209 struct completion ab8500_fg_complete;
210 struct ab8500_fg_flags flags;
211 struct ab8500_fg_battery_capacity bat_cap;
212 struct ab8500_fg_avg_cap avg_cap;
213 struct ab8500 *parent;
214 struct ab8500_gpadc *gpadc;
215 struct abx500_fg_platform_data *pdata;
216 struct abx500_bm_data *bat;
217 struct power_supply fg_psy;
218 struct workqueue_struct *fg_wq;
219 struct delayed_work fg_periodic_work;
220 struct delayed_work fg_low_bat_work;
221 struct delayed_work fg_reinit_work;
222 struct work_struct fg_work;
223 struct work_struct fg_acc_cur_work;
224 struct delayed_work fg_check_hw_failure_work;
225 struct mutex cc_lock;
226 struct kobject fg_kobject;
227};
228static LIST_HEAD(ab8500_fg_list);
229
230/**
231 * ab8500_fg_get() - returns a reference to the primary AB8500 fuel gauge
232 * (i.e. the first fuel gauge in the instance list)
233 */
234struct ab8500_fg *ab8500_fg_get(void)
235{
236 struct ab8500_fg *fg;
237
238 if (list_empty(&ab8500_fg_list))
239 return NULL;
240
241 fg = list_first_entry(&ab8500_fg_list, struct ab8500_fg, node);
242 return fg;
243}
244
245/* Main battery properties */
246static enum power_supply_property ab8500_fg_props[] = {
247 POWER_SUPPLY_PROP_VOLTAGE_NOW,
248 POWER_SUPPLY_PROP_CURRENT_NOW,
249 POWER_SUPPLY_PROP_CURRENT_AVG,
250 POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN,
251 POWER_SUPPLY_PROP_ENERGY_FULL,
252 POWER_SUPPLY_PROP_ENERGY_NOW,
253 POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN,
254 POWER_SUPPLY_PROP_CHARGE_FULL,
255 POWER_SUPPLY_PROP_CHARGE_NOW,
256 POWER_SUPPLY_PROP_CAPACITY,
257 POWER_SUPPLY_PROP_CAPACITY_LEVEL,
258};
259
260/*
261 * This array maps the raw hex value to lowbat voltage used by the AB8500
262 * Values taken from the UM0836
263 */
264static int ab8500_fg_lowbat_voltage_map[] = {
265 2300 ,
266 2325 ,
267 2350 ,
268 2375 ,
269 2400 ,
270 2425 ,
271 2450 ,
272 2475 ,
273 2500 ,
274 2525 ,
275 2550 ,
276 2575 ,
277 2600 ,
278 2625 ,
279 2650 ,
280 2675 ,
281 2700 ,
282 2725 ,
283 2750 ,
284 2775 ,
285 2800 ,
286 2825 ,
287 2850 ,
288 2875 ,
289 2900 ,
290 2925 ,
291 2950 ,
292 2975 ,
293 3000 ,
294 3025 ,
295 3050 ,
296 3075 ,
297 3100 ,
298 3125 ,
299 3150 ,
300 3175 ,
301 3200 ,
302 3225 ,
303 3250 ,
304 3275 ,
305 3300 ,
306 3325 ,
307 3350 ,
308 3375 ,
309 3400 ,
310 3425 ,
311 3450 ,
312 3475 ,
313 3500 ,
314 3525 ,
315 3550 ,
316 3575 ,
317 3600 ,
318 3625 ,
319 3650 ,
320 3675 ,
321 3700 ,
322 3725 ,
323 3750 ,
324 3775 ,
325 3800 ,
326 3825 ,
327 3850 ,
328 3850 ,
329};
330
331static u8 ab8500_volt_to_regval(int voltage)
332{
333 int i;
334
335 if (voltage < ab8500_fg_lowbat_voltage_map[0])
336 return 0;
337
338 for (i = 0; i < ARRAY_SIZE(ab8500_fg_lowbat_voltage_map); i++) {
339 if (voltage < ab8500_fg_lowbat_voltage_map[i])
340 return (u8) i - 1;
341 }
342
343 /* If not captured above, return index of last element */
344 return (u8) ARRAY_SIZE(ab8500_fg_lowbat_voltage_map) - 1;
345}
346
347/**
348 * ab8500_fg_is_low_curr() - Low or high current mode
349 * @di: pointer to the ab8500_fg structure
350 * @curr: the current to base or our decision on
351 *
352 * Low current mode if the current consumption is below a certain threshold
353 */
354static int ab8500_fg_is_low_curr(struct ab8500_fg *di, int curr)
355{
356 /*
357 * We want to know if we're in low current mode
358 */
359 if (curr > -di->bat->fg_params->high_curr_threshold)
360 return true;
361 else
362 return false;
363}
364
365/**
366 * ab8500_fg_add_cap_sample() - Add capacity to average filter
367 * @di: pointer to the ab8500_fg structure
368 * @sample: the capacity in mAh to add to the filter
369 *
370 * A capacity is added to the filter and a new mean capacity is calculated and
371 * returned
372 */
373static int ab8500_fg_add_cap_sample(struct ab8500_fg *di, int sample)
374{
375 struct timespec ts;
376 struct ab8500_fg_avg_cap *avg = &di->avg_cap;
377
378 getnstimeofday(&ts);
379
380 do {
381 avg->sum += sample - avg->samples[avg->pos];
382 avg->samples[avg->pos] = sample;
383 avg->time_stamps[avg->pos] = ts.tv_sec;
384 avg->pos++;
385
386 if (avg->pos == NBR_AVG_SAMPLES)
387 avg->pos = 0;
388
389 if (avg->nbr_samples < NBR_AVG_SAMPLES)
390 avg->nbr_samples++;
391
392 /*
393 * Check the time stamp for each sample. If too old,
394 * replace with latest sample
395 */
396 } while (ts.tv_sec - VALID_CAPACITY_SEC > avg->time_stamps[avg->pos]);
397
398 avg->avg = avg->sum / avg->nbr_samples;
399
400 return avg->avg;
401}
402
403/**
404 * ab8500_fg_clear_cap_samples() - Clear average filter
405 * @di: pointer to the ab8500_fg structure
406 *
407 * The capacity filter is is reset to zero.
408 */
409static void ab8500_fg_clear_cap_samples(struct ab8500_fg *di)
410{
411 int i;
412 struct ab8500_fg_avg_cap *avg = &di->avg_cap;
413
414 avg->pos = 0;
415 avg->nbr_samples = 0;
416 avg->sum = 0;
417 avg->avg = 0;
418
419 for (i = 0; i < NBR_AVG_SAMPLES; i++) {
420 avg->samples[i] = 0;
421 avg->time_stamps[i] = 0;
422 }
423}
424
425/**
426 * ab8500_fg_fill_cap_sample() - Fill average filter
427 * @di: pointer to the ab8500_fg structure
428 * @sample: the capacity in mAh to fill the filter with
429 *
430 * The capacity filter is filled with a capacity in mAh
431 */
432static void ab8500_fg_fill_cap_sample(struct ab8500_fg *di, int sample)
433{
434 int i;
435 struct timespec ts;
436 struct ab8500_fg_avg_cap *avg = &di->avg_cap;
437
438 getnstimeofday(&ts);
439
440 for (i = 0; i < NBR_AVG_SAMPLES; i++) {
441 avg->samples[i] = sample;
442 avg->time_stamps[i] = ts.tv_sec;
443 }
444
445 avg->pos = 0;
446 avg->nbr_samples = NBR_AVG_SAMPLES;
447 avg->sum = sample * NBR_AVG_SAMPLES;
448 avg->avg = sample;
449}
450
451/**
452 * ab8500_fg_coulomb_counter() - enable coulomb counter
453 * @di: pointer to the ab8500_fg structure
454 * @enable: enable/disable
455 *
456 * Enable/Disable coulomb counter.
457 * On failure returns negative value.
458 */
459static int ab8500_fg_coulomb_counter(struct ab8500_fg *di, bool enable)
460{
461 int ret = 0;
462 mutex_lock(&di->cc_lock);
463 if (enable) {
464 /* To be able to reprogram the number of samples, we have to
465 * first stop the CC and then enable it again */
466 ret = abx500_set_register_interruptible(di->dev, AB8500_RTC,
467 AB8500_RTC_CC_CONF_REG, 0x00);
468 if (ret)
469 goto cc_err;
470
471 /* Program the samples */
472 ret = abx500_set_register_interruptible(di->dev,
473 AB8500_GAS_GAUGE, AB8500_GASG_CC_NCOV_ACCU,
474 di->fg_samples);
475 if (ret)
476 goto cc_err;
477
478 /* Start the CC */
479 ret = abx500_set_register_interruptible(di->dev, AB8500_RTC,
480 AB8500_RTC_CC_CONF_REG,
481 (CC_DEEP_SLEEP_ENA | CC_PWR_UP_ENA));
482 if (ret)
483 goto cc_err;
484
485 di->flags.fg_enabled = true;
486 } else {
487 /* Clear any pending read requests */
488 ret = abx500_set_register_interruptible(di->dev,
489 AB8500_GAS_GAUGE, AB8500_GASG_CC_CTRL_REG, 0);
490 if (ret)
491 goto cc_err;
492
493 ret = abx500_set_register_interruptible(di->dev,
494 AB8500_GAS_GAUGE, AB8500_GASG_CC_NCOV_ACCU_CTRL, 0);
495 if (ret)
496 goto cc_err;
497
498 /* Stop the CC */
499 ret = abx500_set_register_interruptible(di->dev, AB8500_RTC,
500 AB8500_RTC_CC_CONF_REG, 0);
501 if (ret)
502 goto cc_err;
503
504 di->flags.fg_enabled = false;
505
506 }
507 dev_dbg(di->dev, " CC enabled: %d Samples: %d\n",
508 enable, di->fg_samples);
509
510 mutex_unlock(&di->cc_lock);
511
512 return ret;
513cc_err:
514 dev_err(di->dev, "%s Enabling coulomb counter failed\n", __func__);
515 mutex_unlock(&di->cc_lock);
516 return ret;
517}
518
519/**
520 * ab8500_fg_inst_curr_start() - start battery instantaneous current
521 * @di: pointer to the ab8500_fg structure
522 *
523 * Returns 0 or error code
524 * Note: This is part "one" and has to be called before
525 * ab8500_fg_inst_curr_finalize()
526 */
527 int ab8500_fg_inst_curr_start(struct ab8500_fg *di)
528{
529 u8 reg_val;
530 int ret;
531
532 mutex_lock(&di->cc_lock);
533
534 ret = abx500_get_register_interruptible(di->dev, AB8500_RTC,
535 AB8500_RTC_CC_CONF_REG, &reg_val);
536 if (ret < 0)
537 goto fail;
538
539 if (!(reg_val & CC_PWR_UP_ENA)) {
540 dev_dbg(di->dev, "%s Enable FG\n", __func__);
541 di->turn_off_fg = true;
542
543 /* Program the samples */
544 ret = abx500_set_register_interruptible(di->dev,
545 AB8500_GAS_GAUGE, AB8500_GASG_CC_NCOV_ACCU,
546 SEC_TO_SAMPLE(10));
547 if (ret)
548 goto fail;
549
550 /* Start the CC */
551 ret = abx500_set_register_interruptible(di->dev, AB8500_RTC,
552 AB8500_RTC_CC_CONF_REG,
553 (CC_DEEP_SLEEP_ENA | CC_PWR_UP_ENA));
554 if (ret)
555 goto fail;
556 } else {
557 di->turn_off_fg = false;
558 }
559
560 /* Return and WFI */
561 INIT_COMPLETION(di->ab8500_fg_complete);
562 enable_irq(di->irq);
563
564 /* Note: cc_lock is still locked */
565 return 0;
566fail:
567 mutex_unlock(&di->cc_lock);
568 return ret;
569}
570
571/**
572 * ab8500_fg_inst_curr_done() - check if fg conversion is done
573 * @di: pointer to the ab8500_fg structure
574 *
575 * Returns 1 if conversion done, 0 if still waiting
576 */
577int ab8500_fg_inst_curr_done(struct ab8500_fg *di)
578{
579 return completion_done(&di->ab8500_fg_complete);
580}
581
582/**
583 * ab8500_fg_inst_curr_finalize() - battery instantaneous current
584 * @di: pointer to the ab8500_fg structure
585 * @res: battery instantenous current(on success)
586 *
587 * Returns 0 or an error code
588 * Note: This is part "two" and has to be called at earliest 250 ms
589 * after ab8500_fg_inst_curr_start()
590 */
591int ab8500_fg_inst_curr_finalize(struct ab8500_fg *di, int *res)
592{
593 u8 low, high;
594 int val;
595 int ret;
596 int timeout;
597
598 if (!completion_done(&di->ab8500_fg_complete)) {
599 timeout = wait_for_completion_timeout(&di->ab8500_fg_complete,
600 INS_CURR_TIMEOUT);
601 dev_dbg(di->dev, "Finalize time: %d ms\n",
602 ((INS_CURR_TIMEOUT - timeout) * 1000) / HZ);
603 if (!timeout) {
604 ret = -ETIME;
605 disable_irq(di->irq);
606 dev_err(di->dev, "completion timed out [%d]\n",
607 __LINE__);
608 goto fail;
609 }
610 }
611
612 disable_irq(di->irq);
613
614 ret = abx500_mask_and_set_register_interruptible(di->dev,
615 AB8500_GAS_GAUGE, AB8500_GASG_CC_CTRL_REG,
616 READ_REQ, READ_REQ);
617
618 /* 100uS between read request and read is needed */
619 usleep_range(100, 100);
620
621 /* Read CC Sample conversion value Low and high */
622 ret = abx500_get_register_interruptible(di->dev, AB8500_GAS_GAUGE,
623 AB8500_GASG_CC_SMPL_CNVL_REG, &low);
624 if (ret < 0)
625 goto fail;
626
627 ret = abx500_get_register_interruptible(di->dev, AB8500_GAS_GAUGE,
628 AB8500_GASG_CC_SMPL_CNVH_REG, &high);
629 if (ret < 0)
630 goto fail;
631
632 /*
633 * negative value for Discharging
634 * convert 2's compliment into decimal
635 */
636 if (high & 0x10)
637 val = (low | (high << 8) | 0xFFFFE000);
638 else
639 val = (low | (high << 8));
640
641 /*
642 * Convert to unit value in mA
643 * Full scale input voltage is
644 * 66.660mV => LSB = 66.660mV/(4096*res) = 1.627mA
645 * Given a 250ms conversion cycle time the LSB corresponds
646 * to 112.9 nAh. Convert to current by dividing by the conversion
647 * time in hours (250ms = 1 / (3600 * 4)h)
648 * 112.9nAh assumes 10mOhm, but fg_res is in 0.1mOhm
649 */
650 val = (val * QLSB_NANO_AMP_HOURS_X10 * 36 * 4) /
651 (1000 * di->bat->fg_res);
652
653 if (di->turn_off_fg) {
654 dev_dbg(di->dev, "%s Disable FG\n", __func__);
655
656 /* Clear any pending read requests */
657 ret = abx500_set_register_interruptible(di->dev,
658 AB8500_GAS_GAUGE, AB8500_GASG_CC_CTRL_REG, 0);
659 if (ret)
660 goto fail;
661
662 /* Stop the CC */
663 ret = abx500_set_register_interruptible(di->dev, AB8500_RTC,
664 AB8500_RTC_CC_CONF_REG, 0);
665 if (ret)
666 goto fail;
667 }
668 mutex_unlock(&di->cc_lock);
669 (*res) = val;
670
671 return 0;
672fail:
673 mutex_unlock(&di->cc_lock);
674 return ret;
675}
676
677/**
678 * ab8500_fg_inst_curr_blocking() - battery instantaneous current
679 * @di: pointer to the ab8500_fg structure
680 * @res: battery instantenous current(on success)
681 *
682 * Returns 0 else error code
683 */
684int ab8500_fg_inst_curr_blocking(struct ab8500_fg *di)
685{
686 int ret;
687 int res = 0;
688
689 ret = ab8500_fg_inst_curr_start(di);
690 if (ret) {
691 dev_err(di->dev, "Failed to initialize fg_inst\n");
692 return 0;
693 }
694
695 ret = ab8500_fg_inst_curr_finalize(di, &res);
696 if (ret) {
697 dev_err(di->dev, "Failed to finalize fg_inst\n");
698 return 0;
699 }
700
701 return res;
702}
703
704/**
705 * ab8500_fg_acc_cur_work() - average battery current
706 * @work: pointer to the work_struct structure
707 *
708 * Updated the average battery current obtained from the
709 * coulomb counter.
710 */
711static void ab8500_fg_acc_cur_work(struct work_struct *work)
712{
713 int val;
714 int ret;
715 u8 low, med, high;
716
717 struct ab8500_fg *di = container_of(work,
718 struct ab8500_fg, fg_acc_cur_work);
719
720 mutex_lock(&di->cc_lock);
721 ret = abx500_set_register_interruptible(di->dev, AB8500_GAS_GAUGE,
722 AB8500_GASG_CC_NCOV_ACCU_CTRL, RD_NCONV_ACCU_REQ);
723 if (ret)
724 goto exit;
725
726 ret = abx500_get_register_interruptible(di->dev, AB8500_GAS_GAUGE,
727 AB8500_GASG_CC_NCOV_ACCU_LOW, &low);
728 if (ret < 0)
729 goto exit;
730
731 ret = abx500_get_register_interruptible(di->dev, AB8500_GAS_GAUGE,
732 AB8500_GASG_CC_NCOV_ACCU_MED, &med);
733 if (ret < 0)
734 goto exit;
735
736 ret = abx500_get_register_interruptible(di->dev, AB8500_GAS_GAUGE,
737 AB8500_GASG_CC_NCOV_ACCU_HIGH, &high);
738 if (ret < 0)
739 goto exit;
740
741 /* Check for sign bit in case of negative value, 2's compliment */
742 if (high & 0x10)
743 val = (low | (med << 8) | (high << 16) | 0xFFE00000);
744 else
745 val = (low | (med << 8) | (high << 16));
746
747 /*
748 * Convert to uAh
749 * Given a 250ms conversion cycle time the LSB corresponds
750 * to 112.9 nAh.
751 * 112.9nAh assumes 10mOhm, but fg_res is in 0.1mOhm
752 */
753 di->accu_charge = (val * QLSB_NANO_AMP_HOURS_X10) /
754 (100 * di->bat->fg_res);
755
756 /*
757 * Convert to unit value in mA
758 * Full scale input voltage is
759 * 66.660mV => LSB = 66.660mV/(4096*res) = 1.627mA
760 * Given a 250ms conversion cycle time the LSB corresponds
761 * to 112.9 nAh. Convert to current by dividing by the conversion
762 * time in hours (= samples / (3600 * 4)h)
763 * 112.9nAh assumes 10mOhm, but fg_res is in 0.1mOhm
764 */
765 di->avg_curr = (val * QLSB_NANO_AMP_HOURS_X10 * 36) /
766 (1000 * di->bat->fg_res * (di->fg_samples / 4));
767
768 di->flags.conv_done = true;
769
770 mutex_unlock(&di->cc_lock);
771
772 queue_work(di->fg_wq, &di->fg_work);
773
774 return;
775exit:
776 dev_err(di->dev,
777 "Failed to read or write gas gauge registers\n");
778 mutex_unlock(&di->cc_lock);
779 queue_work(di->fg_wq, &di->fg_work);
780}
781
782/**
783 * ab8500_fg_bat_voltage() - get battery voltage
784 * @di: pointer to the ab8500_fg structure
785 *
786 * Returns battery voltage(on success) else error code
787 */
788static int ab8500_fg_bat_voltage(struct ab8500_fg *di)
789{
790 int vbat;
791 static int prev;
792
793 vbat = ab8500_gpadc_convert(di->gpadc, MAIN_BAT_V);
794 if (vbat < 0) {
795 dev_err(di->dev,
796 "%s gpadc conversion failed, using previous value\n",
797 __func__);
798 return prev;
799 }
800
801 prev = vbat;
802 return vbat;
803}
804
805/**
806 * ab8500_fg_volt_to_capacity() - Voltage based capacity
807 * @di: pointer to the ab8500_fg structure
808 * @voltage: The voltage to convert to a capacity
809 *
810 * Returns battery capacity in per mille based on voltage
811 */
812static int ab8500_fg_volt_to_capacity(struct ab8500_fg *di, int voltage)
813{
814 int i, tbl_size;
815 struct abx500_v_to_cap *tbl;
816 int cap = 0;
817
818 tbl = di->bat->bat_type[di->bat->batt_id].v_to_cap_tbl,
819 tbl_size = di->bat->bat_type[di->bat->batt_id].n_v_cap_tbl_elements;
820
821 for (i = 0; i < tbl_size; ++i) {
822 if (voltage > tbl[i].voltage)
823 break;
824 }
825
826 if ((i > 0) && (i < tbl_size)) {
827 cap = interpolate(voltage,
828 tbl[i].voltage,
829 tbl[i].capacity * 10,
830 tbl[i-1].voltage,
831 tbl[i-1].capacity * 10);
832 } else if (i == 0) {
833 cap = 1000;
834 } else {
835 cap = 0;
836 }
837
838 dev_dbg(di->dev, "%s Vbat: %d, Cap: %d per mille",
839 __func__, voltage, cap);
840
841 return cap;
842}
843
844/**
845 * ab8500_fg_uncomp_volt_to_capacity() - Uncompensated voltage based capacity
846 * @di: pointer to the ab8500_fg structure
847 *
848 * Returns battery capacity based on battery voltage that is not compensated
849 * for the voltage drop due to the load
850 */
851static int ab8500_fg_uncomp_volt_to_capacity(struct ab8500_fg *di)
852{
853 di->vbat = ab8500_fg_bat_voltage(di);
854 return ab8500_fg_volt_to_capacity(di, di->vbat);
855}
856
857/**
858 * ab8500_fg_battery_resistance() - Returns the battery inner resistance
859 * @di: pointer to the ab8500_fg structure
860 *
861 * Returns battery inner resistance added with the fuel gauge resistor value
862 * to get the total resistance in the whole link from gnd to bat+ node.
863 */
864static int ab8500_fg_battery_resistance(struct ab8500_fg *di)
865{
866 int i, tbl_size;
867 struct batres_vs_temp *tbl;
868 int resist = 0;
869
870 tbl = di->bat->bat_type[di->bat->batt_id].batres_tbl;
871 tbl_size = di->bat->bat_type[di->bat->batt_id].n_batres_tbl_elements;
872
873 for (i = 0; i < tbl_size; ++i) {
874 if (di->bat_temp / 10 > tbl[i].temp)
875 break;
876 }
877
878 if ((i > 0) && (i < tbl_size)) {
879 resist = interpolate(di->bat_temp / 10,
880 tbl[i].temp,
881 tbl[i].resist,
882 tbl[i-1].temp,
883 tbl[i-1].resist);
884 } else if (i == 0) {
885 resist = tbl[0].resist;
886 } else {
887 resist = tbl[tbl_size - 1].resist;
888 }
889
890 dev_dbg(di->dev, "%s Temp: %d battery internal resistance: %d"
891 " fg resistance %d, total: %d (mOhm)\n",
892 __func__, di->bat_temp, resist, di->bat->fg_res / 10,
893 (di->bat->fg_res / 10) + resist);
894
895 /* fg_res variable is in 0.1mOhm */
896 resist += di->bat->fg_res / 10;
897
898 return resist;
899}
900
901/**
902 * ab8500_fg_load_comp_volt_to_capacity() - Load compensated voltage based capacity
903 * @di: pointer to the ab8500_fg structure
904 *
905 * Returns battery capacity based on battery voltage that is load compensated
906 * for the voltage drop
907 */
908static int ab8500_fg_load_comp_volt_to_capacity(struct ab8500_fg *di)
909{
910 int vbat_comp, res;
911 int i = 0;
912 int vbat = 0;
913
914 ab8500_fg_inst_curr_start(di);
915
916 do {
917 vbat += ab8500_fg_bat_voltage(di);
918 i++;
919 msleep(5);
920 } while (!ab8500_fg_inst_curr_done(di));
921
922 ab8500_fg_inst_curr_finalize(di, &di->inst_curr);
923
924 di->vbat = vbat / i;
925 res = ab8500_fg_battery_resistance(di);
926
927 /* Use Ohms law to get the load compensated voltage */
928 vbat_comp = di->vbat - (di->inst_curr * res) / 1000;
929
930 dev_dbg(di->dev, "%s Measured Vbat: %dmV,Compensated Vbat %dmV, "
931 "R: %dmOhm, Current: %dmA Vbat Samples: %d\n",
932 __func__, di->vbat, vbat_comp, res, di->inst_curr, i);
933
934 return ab8500_fg_volt_to_capacity(di, vbat_comp);
935}
936
937/**
938 * ab8500_fg_convert_mah_to_permille() - Capacity in mAh to permille
939 * @di: pointer to the ab8500_fg structure
940 * @cap_mah: capacity in mAh
941 *
942 * Converts capacity in mAh to capacity in permille
943 */
944static int ab8500_fg_convert_mah_to_permille(struct ab8500_fg *di, int cap_mah)
945{
946 return (cap_mah * 1000) / di->bat_cap.max_mah_design;
947}
948
949/**
950 * ab8500_fg_convert_permille_to_mah() - Capacity in permille to mAh
951 * @di: pointer to the ab8500_fg structure
952 * @cap_pm: capacity in permille
953 *
954 * Converts capacity in permille to capacity in mAh
955 */
956static int ab8500_fg_convert_permille_to_mah(struct ab8500_fg *di, int cap_pm)
957{
958 return cap_pm * di->bat_cap.max_mah_design / 1000;
959}
960
961/**
962 * ab8500_fg_convert_mah_to_uwh() - Capacity in mAh to uWh
963 * @di: pointer to the ab8500_fg structure
964 * @cap_mah: capacity in mAh
965 *
966 * Converts capacity in mAh to capacity in uWh
967 */
968static int ab8500_fg_convert_mah_to_uwh(struct ab8500_fg *di, int cap_mah)
969{
970 u64 div_res;
971 u32 div_rem;
972
973 div_res = ((u64) cap_mah) * ((u64) di->vbat_nom);
974 div_rem = do_div(div_res, 1000);
975
976 /* Make sure to round upwards if necessary */
977 if (div_rem >= 1000 / 2)
978 div_res++;
979
980 return (int) div_res;
981}
982
983/**
984 * ab8500_fg_calc_cap_charging() - Calculate remaining capacity while charging
985 * @di: pointer to the ab8500_fg structure
986 *
987 * Return the capacity in mAh based on previous calculated capcity and the FG
988 * accumulator register value. The filter is filled with this capacity
989 */
990static int ab8500_fg_calc_cap_charging(struct ab8500_fg *di)
991{
992 dev_dbg(di->dev, "%s cap_mah %d accu_charge %d\n",
993 __func__,
994 di->bat_cap.mah,
995 di->accu_charge);
996
997 /* Capacity should not be less than 0 */
998 if (di->bat_cap.mah + di->accu_charge > 0)
999 di->bat_cap.mah += di->accu_charge;
1000 else
1001 di->bat_cap.mah = 0;
1002 /*
1003 * We force capacity to 100% once when the algorithm
1004 * reports that it's full.
1005 */
1006 if (di->bat_cap.mah >= di->bat_cap.max_mah_design ||
1007 di->flags.force_full) {
1008 di->bat_cap.mah = di->bat_cap.max_mah_design;
1009 }
1010
1011 ab8500_fg_fill_cap_sample(di, di->bat_cap.mah);
1012 di->bat_cap.permille =
1013 ab8500_fg_convert_mah_to_permille(di, di->bat_cap.mah);
1014
1015 /* We need to update battery voltage and inst current when charging */
1016 di->vbat = ab8500_fg_bat_voltage(di);
1017 di->inst_curr = ab8500_fg_inst_curr_blocking(di);
1018
1019 return di->bat_cap.mah;
1020}
1021
1022/**
1023 * ab8500_fg_calc_cap_discharge_voltage() - Capacity in discharge with voltage
1024 * @di: pointer to the ab8500_fg structure
1025 * @comp: if voltage should be load compensated before capacity calc
1026 *
1027 * Return the capacity in mAh based on the battery voltage. The voltage can
1028 * either be load compensated or not. This value is added to the filter and a
1029 * new mean value is calculated and returned.
1030 */
1031static int ab8500_fg_calc_cap_discharge_voltage(struct ab8500_fg *di, bool comp)
1032{
1033 int permille, mah;
1034
1035 if (comp)
1036 permille = ab8500_fg_load_comp_volt_to_capacity(di);
1037 else
1038 permille = ab8500_fg_uncomp_volt_to_capacity(di);
1039
1040 mah = ab8500_fg_convert_permille_to_mah(di, permille);
1041
1042 di->bat_cap.mah = ab8500_fg_add_cap_sample(di, mah);
1043 di->bat_cap.permille =
1044 ab8500_fg_convert_mah_to_permille(di, di->bat_cap.mah);
1045
1046 return di->bat_cap.mah;
1047}
1048
1049/**
1050 * ab8500_fg_calc_cap_discharge_fg() - Capacity in discharge with FG
1051 * @di: pointer to the ab8500_fg structure
1052 *
1053 * Return the capacity in mAh based on previous calculated capcity and the FG
1054 * accumulator register value. This value is added to the filter and a
1055 * new mean value is calculated and returned.
1056 */
1057static int ab8500_fg_calc_cap_discharge_fg(struct ab8500_fg *di)
1058{
1059 int permille_volt, permille;
1060
1061 dev_dbg(di->dev, "%s cap_mah %d accu_charge %d\n",
1062 __func__,
1063 di->bat_cap.mah,
1064 di->accu_charge);
1065
1066 /* Capacity should not be less than 0 */
1067 if (di->bat_cap.mah + di->accu_charge > 0)
1068 di->bat_cap.mah += di->accu_charge;
1069 else
1070 di->bat_cap.mah = 0;
1071
1072 if (di->bat_cap.mah >= di->bat_cap.max_mah_design)
1073 di->bat_cap.mah = di->bat_cap.max_mah_design;
1074
1075 /*
1076 * Check against voltage based capacity. It can not be lower
1077 * than what the uncompensated voltage says
1078 */
1079 permille = ab8500_fg_convert_mah_to_permille(di, di->bat_cap.mah);
1080 permille_volt = ab8500_fg_uncomp_volt_to_capacity(di);
1081
1082 if (permille < permille_volt) {
1083 di->bat_cap.permille = permille_volt;
1084 di->bat_cap.mah = ab8500_fg_convert_permille_to_mah(di,
1085 di->bat_cap.permille);
1086
1087 dev_dbg(di->dev, "%s voltage based: perm %d perm_volt %d\n",
1088 __func__,
1089 permille,
1090 permille_volt);
1091
1092 ab8500_fg_fill_cap_sample(di, di->bat_cap.mah);
1093 } else {
1094 ab8500_fg_fill_cap_sample(di, di->bat_cap.mah);
1095 di->bat_cap.permille =
1096 ab8500_fg_convert_mah_to_permille(di, di->bat_cap.mah);
1097 }
1098
1099 return di->bat_cap.mah;
1100}
1101
1102/**
1103 * ab8500_fg_capacity_level() - Get the battery capacity level
1104 * @di: pointer to the ab8500_fg structure
1105 *
1106 * Get the battery capacity level based on the capacity in percent
1107 */
1108static int ab8500_fg_capacity_level(struct ab8500_fg *di)
1109{
1110 int ret, percent;
1111
1112 percent = di->bat_cap.permille / 10;
1113
1114 if (percent <= di->bat->cap_levels->critical ||
1115 di->flags.low_bat)
1116 ret = POWER_SUPPLY_CAPACITY_LEVEL_CRITICAL;
1117 else if (percent <= di->bat->cap_levels->low)
1118 ret = POWER_SUPPLY_CAPACITY_LEVEL_LOW;
1119 else if (percent <= di->bat->cap_levels->normal)
1120 ret = POWER_SUPPLY_CAPACITY_LEVEL_NORMAL;
1121 else if (percent <= di->bat->cap_levels->high)
1122 ret = POWER_SUPPLY_CAPACITY_LEVEL_HIGH;
1123 else
1124 ret = POWER_SUPPLY_CAPACITY_LEVEL_FULL;
1125
1126 return ret;
1127}
1128
1129/**
1130 * ab8500_fg_check_capacity_limits() - Check if capacity has changed
1131 * @di: pointer to the ab8500_fg structure
1132 * @init: capacity is allowed to go up in init mode
1133 *
1134 * Check if capacity or capacity limit has changed and notify the system
1135 * about it using the power_supply framework
1136 */
1137static void ab8500_fg_check_capacity_limits(struct ab8500_fg *di, bool init)
1138{
1139 bool changed = false;
1140
1141 di->bat_cap.level = ab8500_fg_capacity_level(di);
1142
1143 if (di->bat_cap.level != di->bat_cap.prev_level) {
1144 /*
1145 * We do not allow reported capacity level to go up
1146 * unless we're charging or if we're in init
1147 */
1148 if (!(!di->flags.charging && di->bat_cap.level >
1149 di->bat_cap.prev_level) || init) {
1150 dev_dbg(di->dev, "level changed from %d to %d\n",
1151 di->bat_cap.prev_level,
1152 di->bat_cap.level);
1153 di->bat_cap.prev_level = di->bat_cap.level;
1154 changed = true;
1155 } else {
1156 dev_dbg(di->dev, "level not allowed to go up "
1157 "since no charger is connected: %d to %d\n",
1158 di->bat_cap.prev_level,
1159 di->bat_cap.level);
1160 }
1161 }
1162
1163 /*
1164 * If we have received the LOW_BAT IRQ, set capacity to 0 to initiate
1165 * shutdown
1166 */
1167 if (di->flags.low_bat) {
1168 dev_dbg(di->dev, "Battery low, set capacity to 0\n");
1169 di->bat_cap.prev_percent = 0;
1170 di->bat_cap.permille = 0;
1171 di->bat_cap.prev_mah = 0;
1172 di->bat_cap.mah = 0;
1173 changed = true;
1174 } else if (di->flags.fully_charged) {
1175 /*
1176 * We report 100% if algorithm reported fully charged
1177 * unless capacity drops too much
1178 */
1179 if (di->flags.force_full) {
1180 di->bat_cap.prev_percent = di->bat_cap.permille / 10;
1181 di->bat_cap.prev_mah = di->bat_cap.mah;
1182 } else if (!di->flags.force_full &&
1183 di->bat_cap.prev_percent !=
1184 (di->bat_cap.permille) / 10 &&
1185 (di->bat_cap.permille / 10) <
1186 di->bat->fg_params->maint_thres) {
1187 dev_dbg(di->dev,
1188 "battery reported full "
1189 "but capacity dropping: %d\n",
1190 di->bat_cap.permille / 10);
1191 di->bat_cap.prev_percent = di->bat_cap.permille / 10;
1192 di->bat_cap.prev_mah = di->bat_cap.mah;
1193
1194 changed = true;
1195 }
1196 } else if (di->bat_cap.prev_percent != di->bat_cap.permille / 10) {
1197 if (di->bat_cap.permille / 10 == 0) {
1198 /*
1199 * We will not report 0% unless we've got
1200 * the LOW_BAT IRQ, no matter what the FG
1201 * algorithm says.
1202 */
1203 di->bat_cap.prev_percent = 1;
1204 di->bat_cap.permille = 1;
1205 di->bat_cap.prev_mah = 1;
1206 di->bat_cap.mah = 1;
1207
1208 changed = true;
1209 } else if (!(!di->flags.charging &&
1210 (di->bat_cap.permille / 10) >
1211 di->bat_cap.prev_percent) || init) {
1212 /*
1213 * We do not allow reported capacity to go up
1214 * unless we're charging or if we're in init
1215 */
1216 dev_dbg(di->dev,
1217 "capacity changed from %d to %d (%d)\n",
1218 di->bat_cap.prev_percent,
1219 di->bat_cap.permille / 10,
1220 di->bat_cap.permille);
1221 di->bat_cap.prev_percent = di->bat_cap.permille / 10;
1222 di->bat_cap.prev_mah = di->bat_cap.mah;
1223
1224 changed = true;
1225 } else {
1226 dev_dbg(di->dev, "capacity not allowed to go up since "
1227 "no charger is connected: %d to %d (%d)\n",
1228 di->bat_cap.prev_percent,
1229 di->bat_cap.permille / 10,
1230 di->bat_cap.permille);
1231 }
1232 }
1233
1234 if (changed) {
1235 power_supply_changed(&di->fg_psy);
1236 if (di->flags.fully_charged && di->flags.force_full) {
1237 dev_dbg(di->dev, "Battery full, notifying.\n");
1238 di->flags.force_full = false;
1239 sysfs_notify(&di->fg_kobject, NULL, "charge_full");
1240 }
1241 sysfs_notify(&di->fg_kobject, NULL, "charge_now");
1242 }
1243}
1244
1245static void ab8500_fg_charge_state_to(struct ab8500_fg *di,
1246 enum ab8500_fg_charge_state new_state)
1247{
1248 dev_dbg(di->dev, "Charge state from %d [%s] to %d [%s]\n",
1249 di->charge_state,
1250 charge_state[di->charge_state],
1251 new_state,
1252 charge_state[new_state]);
1253
1254 di->charge_state = new_state;
1255}
1256
1257static void ab8500_fg_discharge_state_to(struct ab8500_fg *di,
1258 enum ab8500_fg_discharge_state new_state)
1259{
1260 dev_dbg(di->dev, "Disharge state from %d [%s] to %d [%s]\n",
1261 di->discharge_state,
1262 discharge_state[di->discharge_state],
1263 new_state,
1264 discharge_state[new_state]);
1265
1266 di->discharge_state = new_state;
1267}
1268
1269/**
1270 * ab8500_fg_algorithm_charging() - FG algorithm for when charging
1271 * @di: pointer to the ab8500_fg structure
1272 *
1273 * Battery capacity calculation state machine for when we're charging
1274 */
1275static void ab8500_fg_algorithm_charging(struct ab8500_fg *di)
1276{
1277 /*
1278 * If we change to discharge mode
1279 * we should start with recovery
1280 */
1281 if (di->discharge_state != AB8500_FG_DISCHARGE_INIT_RECOVERY)
1282 ab8500_fg_discharge_state_to(di,
1283 AB8500_FG_DISCHARGE_INIT_RECOVERY);
1284
1285 switch (di->charge_state) {
1286 case AB8500_FG_CHARGE_INIT:
1287 di->fg_samples = SEC_TO_SAMPLE(
1288 di->bat->fg_params->accu_charging);
1289
1290 ab8500_fg_coulomb_counter(di, true);
1291 ab8500_fg_charge_state_to(di, AB8500_FG_CHARGE_READOUT);
1292
1293 break;
1294
1295 case AB8500_FG_CHARGE_READOUT:
1296 /*
1297 * Read the FG and calculate the new capacity
1298 */
1299 mutex_lock(&di->cc_lock);
1300 if (!di->flags.conv_done) {
1301 /* Wasn't the CC IRQ that got us here */
1302 mutex_unlock(&di->cc_lock);
1303 dev_dbg(di->dev, "%s CC conv not done\n",
1304 __func__);
1305
1306 break;
1307 }
1308 di->flags.conv_done = false;
1309 mutex_unlock(&di->cc_lock);
1310
1311 ab8500_fg_calc_cap_charging(di);
1312
1313 break;
1314
1315 default:
1316 break;
1317 }
1318
1319 /* Check capacity limits */
1320 ab8500_fg_check_capacity_limits(di, false);
1321}
1322
1323static void force_capacity(struct ab8500_fg *di)
1324{
1325 int cap;
1326
1327 ab8500_fg_clear_cap_samples(di);
1328 cap = di->bat_cap.user_mah;
1329 if (cap > di->bat_cap.max_mah_design) {
1330 dev_dbg(di->dev, "Remaining cap %d can't be bigger than total"
1331 " %d\n", cap, di->bat_cap.max_mah_design);
1332 cap = di->bat_cap.max_mah_design;
1333 }
1334 ab8500_fg_fill_cap_sample(di, di->bat_cap.user_mah);
1335 di->bat_cap.permille = ab8500_fg_convert_mah_to_permille(di, cap);
1336 di->bat_cap.mah = cap;
1337 ab8500_fg_check_capacity_limits(di, true);
1338}
1339
1340static bool check_sysfs_capacity(struct ab8500_fg *di)
1341{
1342 int cap, lower, upper;
1343 int cap_permille;
1344
1345 cap = di->bat_cap.user_mah;
1346
1347 cap_permille = ab8500_fg_convert_mah_to_permille(di,
1348 di->bat_cap.user_mah);
1349
1350 lower = di->bat_cap.permille - di->bat->fg_params->user_cap_limit * 10;
1351 upper = di->bat_cap.permille + di->bat->fg_params->user_cap_limit * 10;
1352
1353 if (lower < 0)
1354 lower = 0;
1355 /* 1000 is permille, -> 100 percent */
1356 if (upper > 1000)
1357 upper = 1000;
1358
1359 dev_dbg(di->dev, "Capacity limits:"
1360 " (Lower: %d User: %d Upper: %d) [user: %d, was: %d]\n",
1361 lower, cap_permille, upper, cap, di->bat_cap.mah);
1362
1363 /* If within limits, use the saved capacity and exit estimation...*/
1364 if (cap_permille > lower && cap_permille < upper) {
1365 dev_dbg(di->dev, "OK! Using users cap %d uAh now\n", cap);
1366 force_capacity(di);
1367 return true;
1368 }
1369 dev_dbg(di->dev, "Capacity from user out of limits, ignoring");
1370 return false;
1371}
1372
1373/**
1374 * ab8500_fg_algorithm_discharging() - FG algorithm for when discharging
1375 * @di: pointer to the ab8500_fg structure
1376 *
1377 * Battery capacity calculation state machine for when we're discharging
1378 */
1379static void ab8500_fg_algorithm_discharging(struct ab8500_fg *di)
1380{
1381 int sleep_time;
1382
1383 /* If we change to charge mode we should start with init */
1384 if (di->charge_state != AB8500_FG_CHARGE_INIT)
1385 ab8500_fg_charge_state_to(di, AB8500_FG_CHARGE_INIT);
1386
1387 switch (di->discharge_state) {
1388 case AB8500_FG_DISCHARGE_INIT:
1389 /* We use the FG IRQ to work on */
1390 di->init_cnt = 0;
1391 di->fg_samples = SEC_TO_SAMPLE(di->bat->fg_params->init_timer);
1392 ab8500_fg_coulomb_counter(di, true);
1393 ab8500_fg_discharge_state_to(di,
1394 AB8500_FG_DISCHARGE_INITMEASURING);
1395
1396 /* Intentional fallthrough */
1397 case AB8500_FG_DISCHARGE_INITMEASURING:
1398 /*
1399 * Discard a number of samples during startup.
1400 * After that, use compensated voltage for a few
1401 * samples to get an initial capacity.
1402 * Then go to READOUT
1403 */
1404 sleep_time = di->bat->fg_params->init_timer;
1405
1406 /* Discard the first [x] seconds */
1407 if (di->init_cnt >
1408 di->bat->fg_params->init_discard_time) {
1409 ab8500_fg_calc_cap_discharge_voltage(di, true);
1410
1411 ab8500_fg_check_capacity_limits(di, true);
1412 }
1413
1414 di->init_cnt += sleep_time;
1415 if (di->init_cnt > di->bat->fg_params->init_total_time)
1416 ab8500_fg_discharge_state_to(di,
1417 AB8500_FG_DISCHARGE_READOUT_INIT);
1418
1419 break;
1420
1421 case AB8500_FG_DISCHARGE_INIT_RECOVERY:
1422 di->recovery_cnt = 0;
1423 di->recovery_needed = true;
1424 ab8500_fg_discharge_state_to(di,
1425 AB8500_FG_DISCHARGE_RECOVERY);
1426
1427 /* Intentional fallthrough */
1428
1429 case AB8500_FG_DISCHARGE_RECOVERY:
1430 sleep_time = di->bat->fg_params->recovery_sleep_timer;
1431
1432 /*
1433 * We should check the power consumption
1434 * If low, go to READOUT (after x min) or
1435 * RECOVERY_SLEEP if time left.
1436 * If high, go to READOUT
1437 */
1438 di->inst_curr = ab8500_fg_inst_curr_blocking(di);
1439
1440 if (ab8500_fg_is_low_curr(di, di->inst_curr)) {
1441 if (di->recovery_cnt >
1442 di->bat->fg_params->recovery_total_time) {
1443 di->fg_samples = SEC_TO_SAMPLE(
1444 di->bat->fg_params->accu_high_curr);
1445 ab8500_fg_coulomb_counter(di, true);
1446 ab8500_fg_discharge_state_to(di,
1447 AB8500_FG_DISCHARGE_READOUT);
1448 di->recovery_needed = false;
1449 } else {
1450 queue_delayed_work(di->fg_wq,
1451 &di->fg_periodic_work,
1452 sleep_time * HZ);
1453 }
1454 di->recovery_cnt += sleep_time;
1455 } else {
1456 di->fg_samples = SEC_TO_SAMPLE(
1457 di->bat->fg_params->accu_high_curr);
1458 ab8500_fg_coulomb_counter(di, true);
1459 ab8500_fg_discharge_state_to(di,
1460 AB8500_FG_DISCHARGE_READOUT);
1461 }
1462 break;
1463
1464 case AB8500_FG_DISCHARGE_READOUT_INIT:
1465 di->fg_samples = SEC_TO_SAMPLE(
1466 di->bat->fg_params->accu_high_curr);
1467 ab8500_fg_coulomb_counter(di, true);
1468 ab8500_fg_discharge_state_to(di,
1469 AB8500_FG_DISCHARGE_READOUT);
1470 break;
1471
1472 case AB8500_FG_DISCHARGE_READOUT:
1473 di->inst_curr = ab8500_fg_inst_curr_blocking(di);
1474
1475 if (ab8500_fg_is_low_curr(di, di->inst_curr)) {
1476 /* Detect mode change */
1477 if (di->high_curr_mode) {
1478 di->high_curr_mode = false;
1479 di->high_curr_cnt = 0;
1480 }
1481
1482 if (di->recovery_needed) {
1483 ab8500_fg_discharge_state_to(di,
1484 AB8500_FG_DISCHARGE_RECOVERY);
1485
1486 queue_delayed_work(di->fg_wq,
1487 &di->fg_periodic_work, 0);
1488
1489 break;
1490 }
1491
1492 ab8500_fg_calc_cap_discharge_voltage(di, true);
1493 } else {
1494 mutex_lock(&di->cc_lock);
1495 if (!di->flags.conv_done) {
1496 /* Wasn't the CC IRQ that got us here */
1497 mutex_unlock(&di->cc_lock);
1498 dev_dbg(di->dev, "%s CC conv not done\n",
1499 __func__);
1500
1501 break;
1502 }
1503 di->flags.conv_done = false;
1504 mutex_unlock(&di->cc_lock);
1505
1506 /* Detect mode change */
1507 if (!di->high_curr_mode) {
1508 di->high_curr_mode = true;
1509 di->high_curr_cnt = 0;
1510 }
1511
1512 di->high_curr_cnt +=
1513 di->bat->fg_params->accu_high_curr;
1514 if (di->high_curr_cnt >
1515 di->bat->fg_params->high_curr_time)
1516 di->recovery_needed = true;
1517
1518 ab8500_fg_calc_cap_discharge_fg(di);
1519 }
1520
1521 ab8500_fg_check_capacity_limits(di, false);
1522
1523 break;
1524
1525 case AB8500_FG_DISCHARGE_WAKEUP:
1526 ab8500_fg_coulomb_counter(di, true);
1527 di->inst_curr = ab8500_fg_inst_curr_blocking(di);
1528
1529 ab8500_fg_calc_cap_discharge_voltage(di, true);
1530
1531 di->fg_samples = SEC_TO_SAMPLE(
1532 di->bat->fg_params->accu_high_curr);
1533 ab8500_fg_coulomb_counter(di, true);
1534 ab8500_fg_discharge_state_to(di,
1535 AB8500_FG_DISCHARGE_READOUT);
1536
1537 ab8500_fg_check_capacity_limits(di, false);
1538
1539 break;
1540
1541 default:
1542 break;
1543 }
1544}
1545
1546/**
1547 * ab8500_fg_algorithm_calibrate() - Internal columb counter offset calibration
1548 * @di: pointer to the ab8500_fg structure
1549 *
1550 */
1551static void ab8500_fg_algorithm_calibrate(struct ab8500_fg *di)
1552{
1553 int ret;
1554
1555 switch (di->calib_state) {
1556 case AB8500_FG_CALIB_INIT:
1557 dev_dbg(di->dev, "Calibration ongoing...\n");
1558
1559 ret = abx500_mask_and_set_register_interruptible(di->dev,
1560 AB8500_GAS_GAUGE, AB8500_GASG_CC_CTRL_REG,
1561 CC_INT_CAL_N_AVG_MASK, CC_INT_CAL_SAMPLES_8);
1562 if (ret < 0)
1563 goto err;
1564
1565 ret = abx500_mask_and_set_register_interruptible(di->dev,
1566 AB8500_GAS_GAUGE, AB8500_GASG_CC_CTRL_REG,
1567 CC_INTAVGOFFSET_ENA, CC_INTAVGOFFSET_ENA);
1568 if (ret < 0)
1569 goto err;
1570 di->calib_state = AB8500_FG_CALIB_WAIT;
1571 break;
1572 case AB8500_FG_CALIB_END:
1573 ret = abx500_mask_and_set_register_interruptible(di->dev,
1574 AB8500_GAS_GAUGE, AB8500_GASG_CC_CTRL_REG,
1575 CC_MUXOFFSET, CC_MUXOFFSET);
1576 if (ret < 0)
1577 goto err;
1578 di->flags.calibrate = false;
1579 dev_dbg(di->dev, "Calibration done...\n");
1580 queue_delayed_work(di->fg_wq, &di->fg_periodic_work, 0);
1581 break;
1582 case AB8500_FG_CALIB_WAIT:
1583 dev_dbg(di->dev, "Calibration WFI\n");
1584 default:
1585 break;
1586 }
1587 return;
1588err:
1589 /* Something went wrong, don't calibrate then */
1590 dev_err(di->dev, "failed to calibrate the CC\n");
1591 di->flags.calibrate = false;
1592 di->calib_state = AB8500_FG_CALIB_INIT;
1593 queue_delayed_work(di->fg_wq, &di->fg_periodic_work, 0);
1594}
1595
1596/**
1597 * ab8500_fg_algorithm() - Entry point for the FG algorithm
1598 * @di: pointer to the ab8500_fg structure
1599 *
1600 * Entry point for the battery capacity calculation state machine
1601 */
1602static void ab8500_fg_algorithm(struct ab8500_fg *di)
1603{
1604 if (di->flags.calibrate)
1605 ab8500_fg_algorithm_calibrate(di);
1606 else {
1607 if (di->flags.charging)
1608 ab8500_fg_algorithm_charging(di);
1609 else
1610 ab8500_fg_algorithm_discharging(di);
1611 }
1612
1613 dev_dbg(di->dev, "[FG_DATA] %d %d %d %d %d %d %d %d %d "
1614 "%d %d %d %d %d %d %d\n",
1615 di->bat_cap.max_mah_design,
1616 di->bat_cap.mah,
1617 di->bat_cap.permille,
1618 di->bat_cap.level,
1619 di->bat_cap.prev_mah,
1620 di->bat_cap.prev_percent,
1621 di->bat_cap.prev_level,
1622 di->vbat,
1623 di->inst_curr,
1624 di->avg_curr,
1625 di->accu_charge,
1626 di->flags.charging,
1627 di->charge_state,
1628 di->discharge_state,
1629 di->high_curr_mode,
1630 di->recovery_needed);
1631}
1632
1633/**
1634 * ab8500_fg_periodic_work() - Run the FG state machine periodically
1635 * @work: pointer to the work_struct structure
1636 *
1637 * Work queue function for periodic work
1638 */
1639static void ab8500_fg_periodic_work(struct work_struct *work)
1640{
1641 struct ab8500_fg *di = container_of(work, struct ab8500_fg,
1642 fg_periodic_work.work);
1643
1644 if (di->init_capacity) {
1645 /* A dummy read that will return 0 */
1646 di->inst_curr = ab8500_fg_inst_curr_blocking(di);
1647 /* Get an initial capacity calculation */
1648 ab8500_fg_calc_cap_discharge_voltage(di, true);
1649 ab8500_fg_check_capacity_limits(di, true);
1650 di->init_capacity = false;
1651
1652 queue_delayed_work(di->fg_wq, &di->fg_periodic_work, 0);
1653 } else if (di->flags.user_cap) {
1654 if (check_sysfs_capacity(di)) {
1655 ab8500_fg_check_capacity_limits(di, true);
1656 if (di->flags.charging)
1657 ab8500_fg_charge_state_to(di,
1658 AB8500_FG_CHARGE_INIT);
1659 else
1660 ab8500_fg_discharge_state_to(di,
1661 AB8500_FG_DISCHARGE_READOUT_INIT);
1662 }
1663 di->flags.user_cap = false;
1664 queue_delayed_work(di->fg_wq, &di->fg_periodic_work, 0);
1665 } else
1666 ab8500_fg_algorithm(di);
1667
1668}
1669
1670/**
1671 * ab8500_fg_check_hw_failure_work() - Check OVV_BAT condition
1672 * @work: pointer to the work_struct structure
1673 *
1674 * Work queue function for checking the OVV_BAT condition
1675 */
1676static void ab8500_fg_check_hw_failure_work(struct work_struct *work)
1677{
1678 int ret;
1679 u8 reg_value;
1680
1681 struct ab8500_fg *di = container_of(work, struct ab8500_fg,
1682 fg_check_hw_failure_work.work);
1683
1684 /*
1685 * If we have had a battery over-voltage situation,
1686 * check ovv-bit to see if it should be reset.
1687 */
1688 if (di->flags.bat_ovv) {
1689 ret = abx500_get_register_interruptible(di->dev,
1690 AB8500_CHARGER, AB8500_CH_STAT_REG,
1691 &reg_value);
1692 if (ret < 0) {
1693 dev_err(di->dev, "%s ab8500 read failed\n", __func__);
1694 return;
1695 }
1696 if ((reg_value & BATT_OVV) != BATT_OVV) {
1697 dev_dbg(di->dev, "Battery recovered from OVV\n");
1698 di->flags.bat_ovv = false;
1699 power_supply_changed(&di->fg_psy);
1700 return;
1701 }
1702
1703 /* Not yet recovered from ovv, reschedule this test */
1704 queue_delayed_work(di->fg_wq, &di->fg_check_hw_failure_work,
1705 round_jiffies(HZ));
1706 }
1707}
1708
1709/**
1710 * ab8500_fg_low_bat_work() - Check LOW_BAT condition
1711 * @work: pointer to the work_struct structure
1712 *
1713 * Work queue function for checking the LOW_BAT condition
1714 */
1715static void ab8500_fg_low_bat_work(struct work_struct *work)
1716{
1717 int vbat;
1718
1719 struct ab8500_fg *di = container_of(work, struct ab8500_fg,
1720 fg_low_bat_work.work);
1721
1722 vbat = ab8500_fg_bat_voltage(di);
1723
1724 /* Check if LOW_BAT still fulfilled */
1725 if (vbat < di->bat->fg_params->lowbat_threshold) {
1726 di->flags.low_bat = true;
1727 dev_warn(di->dev, "Battery voltage still LOW\n");
1728
1729 /*
1730 * We need to re-schedule this check to be able to detect
1731 * if the voltage increases again during charging
1732 */
1733 queue_delayed_work(di->fg_wq, &di->fg_low_bat_work,
1734 round_jiffies(LOW_BAT_CHECK_INTERVAL));
1735 } else {
1736 di->flags.low_bat = false;
1737 dev_warn(di->dev, "Battery voltage OK again\n");
1738 }
1739
1740 /* This is needed to dispatch LOW_BAT */
1741 ab8500_fg_check_capacity_limits(di, false);
1742
1743 /* Set this flag to check if LOW_BAT IRQ still occurs */
1744 di->flags.low_bat_delay = false;
1745}
1746
1747/**
1748 * ab8500_fg_battok_calc - calculate the bit pattern corresponding
1749 * to the target voltage.
1750 * @di: pointer to the ab8500_fg structure
1751 * @target target voltage
1752 *
1753 * Returns bit pattern closest to the target voltage
1754 * valid return values are 0-14. (0-BATT_OK_MAX_NR_INCREMENTS)
1755 */
1756
1757static int ab8500_fg_battok_calc(struct ab8500_fg *di, int target)
1758{
1759 if (target > BATT_OK_MIN +
1760 (BATT_OK_INCREMENT * BATT_OK_MAX_NR_INCREMENTS))
1761 return BATT_OK_MAX_NR_INCREMENTS;
1762 if (target < BATT_OK_MIN)
1763 return 0;
1764 return (target - BATT_OK_MIN) / BATT_OK_INCREMENT;
1765}
1766
1767/**
1768 * ab8500_fg_battok_init_hw_register - init battok levels
1769 * @di: pointer to the ab8500_fg structure
1770 *
1771 */
1772
1773static int ab8500_fg_battok_init_hw_register(struct ab8500_fg *di)
1774{
1775 int selected;
1776 int sel0;
1777 int sel1;
1778 int cbp_sel0;
1779 int cbp_sel1;
1780 int ret;
1781 int new_val;
1782
1783 sel0 = di->bat->fg_params->battok_falling_th_sel0;
1784 sel1 = di->bat->fg_params->battok_raising_th_sel1;
1785
1786 cbp_sel0 = ab8500_fg_battok_calc(di, sel0);
1787 cbp_sel1 = ab8500_fg_battok_calc(di, sel1);
1788
1789 selected = BATT_OK_MIN + cbp_sel0 * BATT_OK_INCREMENT;
1790
1791 if (selected != sel0)
1792 dev_warn(di->dev, "Invalid voltage step:%d, using %d %d\n",
1793 sel0, selected, cbp_sel0);
1794
1795 selected = BATT_OK_MIN + cbp_sel1 * BATT_OK_INCREMENT;
1796
1797 if (selected != sel1)
1798 dev_warn(di->dev, "Invalid voltage step:%d, using %d %d\n",
1799 sel1, selected, cbp_sel1);
1800
1801 new_val = cbp_sel0 | (cbp_sel1 << 4);
1802
1803 dev_dbg(di->dev, "using: %x %d %d\n", new_val, cbp_sel0, cbp_sel1);
1804 ret = abx500_set_register_interruptible(di->dev, AB8500_SYS_CTRL2_BLOCK,
1805 AB8500_BATT_OK_REG, new_val);
1806 return ret;
1807}
1808
1809/**
1810 * ab8500_fg_instant_work() - Run the FG state machine instantly
1811 * @work: pointer to the work_struct structure
1812 *
1813 * Work queue function for instant work
1814 */
1815static void ab8500_fg_instant_work(struct work_struct *work)
1816{
1817 struct ab8500_fg *di = container_of(work, struct ab8500_fg, fg_work);
1818
1819 ab8500_fg_algorithm(di);
1820}
1821
1822/**
1823 * ab8500_fg_cc_data_end_handler() - isr to get battery avg current.
1824 * @irq: interrupt number
1825 * @_di: pointer to the ab8500_fg structure
1826 *
1827 * Returns IRQ status(IRQ_HANDLED)
1828 */
1829static irqreturn_t ab8500_fg_cc_data_end_handler(int irq, void *_di)
1830{
1831 struct ab8500_fg *di = _di;
1832 complete(&di->ab8500_fg_complete);
1833 return IRQ_HANDLED;
1834}
1835
1836/**
1837 * ab8500_fg_cc_convend_handler() - isr to get battery avg current.
1838 * @irq: interrupt number
1839 * @_di: pointer to the ab8500_fg structure
1840 *
1841 * Returns IRQ status(IRQ_HANDLED)
1842 */
1843static irqreturn_t ab8500_fg_cc_int_calib_handler(int irq, void *_di)
1844{
1845 struct ab8500_fg *di = _di;
1846 di->calib_state = AB8500_FG_CALIB_END;
1847 queue_delayed_work(di->fg_wq, &di->fg_periodic_work, 0);
1848 return IRQ_HANDLED;
1849}
1850
1851/**
1852 * ab8500_fg_cc_convend_handler() - isr to get battery avg current.
1853 * @irq: interrupt number
1854 * @_di: pointer to the ab8500_fg structure
1855 *
1856 * Returns IRQ status(IRQ_HANDLED)
1857 */
1858static irqreturn_t ab8500_fg_cc_convend_handler(int irq, void *_di)
1859{
1860 struct ab8500_fg *di = _di;
1861
1862 queue_work(di->fg_wq, &di->fg_acc_cur_work);
1863
1864 return IRQ_HANDLED;
1865}
1866
1867/**
1868 * ab8500_fg_batt_ovv_handler() - Battery OVV occured
1869 * @irq: interrupt number
1870 * @_di: pointer to the ab8500_fg structure
1871 *
1872 * Returns IRQ status(IRQ_HANDLED)
1873 */
1874static irqreturn_t ab8500_fg_batt_ovv_handler(int irq, void *_di)
1875{
1876 struct ab8500_fg *di = _di;
1877
1878 dev_dbg(di->dev, "Battery OVV\n");
1879 di->flags.bat_ovv = true;
1880 power_supply_changed(&di->fg_psy);
1881
1882 /* Schedule a new HW failure check */
1883 queue_delayed_work(di->fg_wq, &di->fg_check_hw_failure_work, 0);
1884
1885 return IRQ_HANDLED;
1886}
1887
1888/**
1889 * ab8500_fg_lowbatf_handler() - Battery voltage is below LOW threshold
1890 * @irq: interrupt number
1891 * @_di: pointer to the ab8500_fg structure
1892 *
1893 * Returns IRQ status(IRQ_HANDLED)
1894 */
1895static irqreturn_t ab8500_fg_lowbatf_handler(int irq, void *_di)
1896{
1897 struct ab8500_fg *di = _di;
1898
1899 if (!di->flags.low_bat_delay) {
1900 dev_warn(di->dev, "Battery voltage is below LOW threshold\n");
1901 di->flags.low_bat_delay = true;
1902 /*
1903 * Start a timer to check LOW_BAT again after some time
1904 * This is done to avoid shutdown on single voltage dips
1905 */
1906 queue_delayed_work(di->fg_wq, &di->fg_low_bat_work,
1907 round_jiffies(LOW_BAT_CHECK_INTERVAL));
1908 }
1909 return IRQ_HANDLED;
1910}
1911
1912/**
1913 * ab8500_fg_get_property() - get the fg properties
1914 * @psy: pointer to the power_supply structure
1915 * @psp: pointer to the power_supply_property structure
1916 * @val: pointer to the power_supply_propval union
1917 *
1918 * This function gets called when an application tries to get the
1919 * fg properties by reading the sysfs files.
1920 * voltage_now: battery voltage
1921 * current_now: battery instant current
1922 * current_avg: battery average current
1923 * charge_full_design: capacity where battery is considered full
1924 * charge_now: battery capacity in nAh
1925 * capacity: capacity in percent
1926 * capacity_level: capacity level
1927 *
1928 * Returns error code in case of failure else 0 on success
1929 */
1930static int ab8500_fg_get_property(struct power_supply *psy,
1931 enum power_supply_property psp,
1932 union power_supply_propval *val)
1933{
1934 struct ab8500_fg *di;
1935
1936 di = to_ab8500_fg_device_info(psy);
1937
1938 /*
1939 * If battery is identified as unknown and charging of unknown
1940 * batteries is disabled, we always report 100% capacity and
1941 * capacity level UNKNOWN, since we can't calculate
1942 * remaining capacity
1943 */
1944
1945 switch (psp) {
1946 case POWER_SUPPLY_PROP_VOLTAGE_NOW:
1947 if (di->flags.bat_ovv)
1948 val->intval = BATT_OVV_VALUE * 1000;
1949 else
1950 val->intval = di->vbat * 1000;
1951 break;
1952 case POWER_SUPPLY_PROP_CURRENT_NOW:
1953 val->intval = di->inst_curr * 1000;
1954 break;
1955 case POWER_SUPPLY_PROP_CURRENT_AVG:
1956 val->intval = di->avg_curr * 1000;
1957 break;
1958 case POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN:
1959 val->intval = ab8500_fg_convert_mah_to_uwh(di,
1960 di->bat_cap.max_mah_design);
1961 break;
1962 case POWER_SUPPLY_PROP_ENERGY_FULL:
1963 val->intval = ab8500_fg_convert_mah_to_uwh(di,
1964 di->bat_cap.max_mah);
1965 break;
1966 case POWER_SUPPLY_PROP_ENERGY_NOW:
1967 if (di->flags.batt_unknown && !di->bat->chg_unknown_bat &&
1968 di->flags.batt_id_received)
1969 val->intval = ab8500_fg_convert_mah_to_uwh(di,
1970 di->bat_cap.max_mah);
1971 else
1972 val->intval = ab8500_fg_convert_mah_to_uwh(di,
1973 di->bat_cap.prev_mah);
1974 break;
1975 case POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN:
1976 val->intval = di->bat_cap.max_mah_design;
1977 break;
1978 case POWER_SUPPLY_PROP_CHARGE_FULL:
1979 val->intval = di->bat_cap.max_mah;
1980 break;
1981 case POWER_SUPPLY_PROP_CHARGE_NOW:
1982 if (di->flags.batt_unknown && !di->bat->chg_unknown_bat &&
1983 di->flags.batt_id_received)
1984 val->intval = di->bat_cap.max_mah;
1985 else
1986 val->intval = di->bat_cap.prev_mah;
1987 break;
1988 case POWER_SUPPLY_PROP_CAPACITY:
1989 if (di->flags.batt_unknown && !di->bat->chg_unknown_bat &&
1990 di->flags.batt_id_received)
1991 val->intval = 100;
1992 else
1993 val->intval = di->bat_cap.prev_percent;
1994 break;
1995 case POWER_SUPPLY_PROP_CAPACITY_LEVEL:
1996 if (di->flags.batt_unknown && !di->bat->chg_unknown_bat &&
1997 di->flags.batt_id_received)
1998 val->intval = POWER_SUPPLY_CAPACITY_LEVEL_UNKNOWN;
1999 else
2000 val->intval = di->bat_cap.prev_level;
2001 break;
2002 default:
2003 return -EINVAL;
2004 }
2005 return 0;
2006}
2007
2008static int ab8500_fg_get_ext_psy_data(struct device *dev, void *data)
2009{
2010 struct power_supply *psy;
2011 struct power_supply *ext;
2012 struct ab8500_fg *di;
2013 union power_supply_propval ret;
2014 int i, j;
2015 bool psy_found = false;
2016
2017 psy = (struct power_supply *)data;
2018 ext = dev_get_drvdata(dev);
2019 di = to_ab8500_fg_device_info(psy);
2020
2021 /*
2022 * For all psy where the name of your driver
2023 * appears in any supplied_to
2024 */
2025 for (i = 0; i < ext->num_supplicants; i++) {
2026 if (!strcmp(ext->supplied_to[i], psy->name))
2027 psy_found = true;
2028 }
2029
2030 if (!psy_found)
2031 return 0;
2032
2033 /* Go through all properties for the psy */
2034 for (j = 0; j < ext->num_properties; j++) {
2035 enum power_supply_property prop;
2036 prop = ext->properties[j];
2037
2038 if (ext->get_property(ext, prop, &ret))
2039 continue;
2040
2041 switch (prop) {
2042 case POWER_SUPPLY_PROP_STATUS:
2043 switch (ext->type) {
2044 case POWER_SUPPLY_TYPE_BATTERY:
2045 switch (ret.intval) {
2046 case POWER_SUPPLY_STATUS_UNKNOWN:
2047 case POWER_SUPPLY_STATUS_DISCHARGING:
2048 case POWER_SUPPLY_STATUS_NOT_CHARGING:
2049 if (!di->flags.charging)
2050 break;
2051 di->flags.charging = false;
2052 di->flags.fully_charged = false;
2053 queue_work(di->fg_wq, &di->fg_work);
2054 break;
2055 case POWER_SUPPLY_STATUS_FULL:
2056 if (di->flags.fully_charged)
2057 break;
2058 di->flags.fully_charged = true;
2059 di->flags.force_full = true;
2060 /* Save current capacity as maximum */
2061 di->bat_cap.max_mah = di->bat_cap.mah;
2062 queue_work(di->fg_wq, &di->fg_work);
2063 break;
2064 case POWER_SUPPLY_STATUS_CHARGING:
2065 if (di->flags.charging)
2066 break;
2067 di->flags.charging = true;
2068 di->flags.fully_charged = false;
2069 queue_work(di->fg_wq, &di->fg_work);
2070 break;
2071 };
2072 default:
2073 break;
2074 };
2075 break;
2076 case POWER_SUPPLY_PROP_TECHNOLOGY:
2077 switch (ext->type) {
2078 case POWER_SUPPLY_TYPE_BATTERY:
2079 if (!di->flags.batt_id_received) {
2080 const struct abx500_battery_type *b;
2081
2082 b = &(di->bat->bat_type[di->bat->batt_id]);
2083
2084 di->flags.batt_id_received = true;
2085
2086 di->bat_cap.max_mah_design =
2087 MILLI_TO_MICRO *
2088 b->charge_full_design;
2089
2090 di->bat_cap.max_mah =
2091 di->bat_cap.max_mah_design;
2092
2093 di->vbat_nom = b->nominal_voltage;
2094 }
2095
2096 if (ret.intval)
2097 di->flags.batt_unknown = false;
2098 else
2099 di->flags.batt_unknown = true;
2100 break;
2101 default:
2102 break;
2103 }
2104 break;
2105 case POWER_SUPPLY_PROP_TEMP:
2106 switch (ext->type) {
2107 case POWER_SUPPLY_TYPE_BATTERY:
2108 if (di->flags.batt_id_received)
2109 di->bat_temp = ret.intval;
2110 break;
2111 default:
2112 break;
2113 }
2114 break;
2115 default:
2116 break;
2117 }
2118 }
2119 return 0;
2120}
2121
2122/**
2123 * ab8500_fg_init_hw_registers() - Set up FG related registers
2124 * @di: pointer to the ab8500_fg structure
2125 *
2126 * Set up battery OVV, low battery voltage registers
2127 */
2128static int ab8500_fg_init_hw_registers(struct ab8500_fg *di)
2129{
2130 int ret;
2131
2132 /* Set VBAT OVV threshold */
2133 ret = abx500_mask_and_set_register_interruptible(di->dev,
2134 AB8500_CHARGER,
2135 AB8500_BATT_OVV,
2136 BATT_OVV_TH_4P75,
2137 BATT_OVV_TH_4P75);
2138 if (ret) {
2139 dev_err(di->dev, "failed to set BATT_OVV\n");
2140 goto out;
2141 }
2142
2143 /* Enable VBAT OVV detection */
2144 ret = abx500_mask_and_set_register_interruptible(di->dev,
2145 AB8500_CHARGER,
2146 AB8500_BATT_OVV,
2147 BATT_OVV_ENA,
2148 BATT_OVV_ENA);
2149 if (ret) {
2150 dev_err(di->dev, "failed to enable BATT_OVV\n");
2151 goto out;
2152 }
2153
2154 /* Low Battery Voltage */
2155 ret = abx500_set_register_interruptible(di->dev,
2156 AB8500_SYS_CTRL2_BLOCK,
2157 AB8500_LOW_BAT_REG,
2158 ab8500_volt_to_regval(
2159 di->bat->fg_params->lowbat_threshold) << 1 |
2160 LOW_BAT_ENABLE);
2161 if (ret) {
2162 dev_err(di->dev, "%s write failed\n", __func__);
2163 goto out;
2164 }
2165
2166 /* Battery OK threshold */
2167 ret = ab8500_fg_battok_init_hw_register(di);
2168 if (ret) {
2169 dev_err(di->dev, "BattOk init write failed.\n");
2170 goto out;
2171 }
2172out:
2173 return ret;
2174}
2175
2176/**
2177 * ab8500_fg_external_power_changed() - callback for power supply changes
2178 * @psy: pointer to the structure power_supply
2179 *
2180 * This function is the entry point of the pointer external_power_changed
2181 * of the structure power_supply.
2182 * This function gets executed when there is a change in any external power
2183 * supply that this driver needs to be notified of.
2184 */
2185static void ab8500_fg_external_power_changed(struct power_supply *psy)
2186{
2187 struct ab8500_fg *di = to_ab8500_fg_device_info(psy);
2188
2189 class_for_each_device(power_supply_class, NULL,
2190 &di->fg_psy, ab8500_fg_get_ext_psy_data);
2191}
2192
2193/**
2194 * abab8500_fg_reinit_work() - work to reset the FG algorithm
2195 * @work: pointer to the work_struct structure
2196 *
2197 * Used to reset the current battery capacity to be able to
2198 * retrigger a new voltage base capacity calculation. For
2199 * test and verification purpose.
2200 */
2201static void ab8500_fg_reinit_work(struct work_struct *work)
2202{
2203 struct ab8500_fg *di = container_of(work, struct ab8500_fg,
2204 fg_reinit_work.work);
2205
2206 if (di->flags.calibrate == false) {
2207 dev_dbg(di->dev, "Resetting FG state machine to init.\n");
2208 ab8500_fg_clear_cap_samples(di);
2209 ab8500_fg_calc_cap_discharge_voltage(di, true);
2210 ab8500_fg_charge_state_to(di, AB8500_FG_CHARGE_INIT);
2211 ab8500_fg_discharge_state_to(di, AB8500_FG_DISCHARGE_INIT);
2212 queue_delayed_work(di->fg_wq, &di->fg_periodic_work, 0);
2213
2214 } else {
2215 dev_err(di->dev, "Residual offset calibration ongoing "
2216 "retrying..\n");
2217 /* Wait one second until next try*/
2218 queue_delayed_work(di->fg_wq, &di->fg_reinit_work,
2219 round_jiffies(1));
2220 }
2221}
2222
2223/**
2224 * ab8500_fg_reinit() - forces FG algorithm to reinitialize with current values
2225 *
2226 * This function can be used to force the FG algorithm to recalculate a new
2227 * voltage based battery capacity.
2228 */
2229void ab8500_fg_reinit(void)
2230{
2231 struct ab8500_fg *di = ab8500_fg_get();
2232 /* User won't be notified if a null pointer returned. */
2233 if (di != NULL)
2234 queue_delayed_work(di->fg_wq, &di->fg_reinit_work, 0);
2235}
2236
2237/* Exposure to the sysfs interface */
2238
2239struct ab8500_fg_sysfs_entry {
2240 struct attribute attr;
2241 ssize_t (*show)(struct ab8500_fg *, char *);
2242 ssize_t (*store)(struct ab8500_fg *, const char *, size_t);
2243};
2244
2245static ssize_t charge_full_show(struct ab8500_fg *di, char *buf)
2246{
2247 return sprintf(buf, "%d\n", di->bat_cap.max_mah);
2248}
2249
2250static ssize_t charge_full_store(struct ab8500_fg *di, const char *buf,
2251 size_t count)
2252{
2253 unsigned long charge_full;
2254 ssize_t ret = -EINVAL;
2255
2256 ret = strict_strtoul(buf, 10, &charge_full);
2257
2258 dev_dbg(di->dev, "Ret %zd charge_full %lu", ret, charge_full);
2259
2260 if (!ret) {
2261 di->bat_cap.max_mah = (int) charge_full;
2262 ret = count;
2263 }
2264 return ret;
2265}
2266
2267static ssize_t charge_now_show(struct ab8500_fg *di, char *buf)
2268{
2269 return sprintf(buf, "%d\n", di->bat_cap.prev_mah);
2270}
2271
2272static ssize_t charge_now_store(struct ab8500_fg *di, const char *buf,
2273 size_t count)
2274{
2275 unsigned long charge_now;
2276 ssize_t ret;
2277
2278 ret = strict_strtoul(buf, 10, &charge_now);
2279
2280 dev_dbg(di->dev, "Ret %zd charge_now %lu was %d",
2281 ret, charge_now, di->bat_cap.prev_mah);
2282
2283 if (!ret) {
2284 di->bat_cap.user_mah = (int) charge_now;
2285 di->flags.user_cap = true;
2286 ret = count;
2287 queue_delayed_work(di->fg_wq, &di->fg_periodic_work, 0);
2288 }
2289 return ret;
2290}
2291
2292static struct ab8500_fg_sysfs_entry charge_full_attr =
2293 __ATTR(charge_full, 0644, charge_full_show, charge_full_store);
2294
2295static struct ab8500_fg_sysfs_entry charge_now_attr =
2296 __ATTR(charge_now, 0644, charge_now_show, charge_now_store);
2297
2298static ssize_t
2299ab8500_fg_show(struct kobject *kobj, struct attribute *attr, char *buf)
2300{
2301 struct ab8500_fg_sysfs_entry *entry;
2302 struct ab8500_fg *di;
2303
2304 entry = container_of(attr, struct ab8500_fg_sysfs_entry, attr);
2305 di = container_of(kobj, struct ab8500_fg, fg_kobject);
2306
2307 if (!entry->show)
2308 return -EIO;
2309
2310 return entry->show(di, buf);
2311}
2312static ssize_t
2313ab8500_fg_store(struct kobject *kobj, struct attribute *attr, const char *buf,
2314 size_t count)
2315{
2316 struct ab8500_fg_sysfs_entry *entry;
2317 struct ab8500_fg *di;
2318
2319 entry = container_of(attr, struct ab8500_fg_sysfs_entry, attr);
2320 di = container_of(kobj, struct ab8500_fg, fg_kobject);
2321
2322 if (!entry->store)
2323 return -EIO;
2324
2325 return entry->store(di, buf, count);
2326}
2327
2328static const struct sysfs_ops ab8500_fg_sysfs_ops = {
2329 .show = ab8500_fg_show,
2330 .store = ab8500_fg_store,
2331};
2332
2333static struct attribute *ab8500_fg_attrs[] = {
2334 &charge_full_attr.attr,
2335 &charge_now_attr.attr,
2336 NULL,
2337};
2338
2339static struct kobj_type ab8500_fg_ktype = {
2340 .sysfs_ops = &ab8500_fg_sysfs_ops,
2341 .default_attrs = ab8500_fg_attrs,
2342};
2343
2344/**
2345 * ab8500_chargalg_sysfs_exit() - de-init of sysfs entry
2346 * @di: pointer to the struct ab8500_chargalg
2347 *
2348 * This function removes the entry in sysfs.
2349 */
2350static void ab8500_fg_sysfs_exit(struct ab8500_fg *di)
2351{
2352 kobject_del(&di->fg_kobject);
2353}
2354
2355/**
2356 * ab8500_chargalg_sysfs_init() - init of sysfs entry
2357 * @di: pointer to the struct ab8500_chargalg
2358 *
2359 * This function adds an entry in sysfs.
2360 * Returns error code in case of failure else 0(on success)
2361 */
2362static int ab8500_fg_sysfs_init(struct ab8500_fg *di)
2363{
2364 int ret = 0;
2365
2366 ret = kobject_init_and_add(&di->fg_kobject,
2367 &ab8500_fg_ktype,
2368 NULL, "battery");
2369 if (ret < 0)
2370 dev_err(di->dev, "failed to create sysfs entry\n");
2371
2372 return ret;
2373}
2374/* Exposure to the sysfs interface <<END>> */
2375
2376#if defined(CONFIG_PM)
2377static int ab8500_fg_resume(struct platform_device *pdev)
2378{
2379 struct ab8500_fg *di = platform_get_drvdata(pdev);
2380
2381 /*
2382 * Change state if we're not charging. If we're charging we will wake
2383 * up on the FG IRQ
2384 */
2385 if (!di->flags.charging) {
2386 ab8500_fg_discharge_state_to(di, AB8500_FG_DISCHARGE_WAKEUP);
2387 queue_work(di->fg_wq, &di->fg_work);
2388 }
2389
2390 return 0;
2391}
2392
2393static int ab8500_fg_suspend(struct platform_device *pdev,
2394 pm_message_t state)
2395{
2396 struct ab8500_fg *di = platform_get_drvdata(pdev);
2397
2398 flush_delayed_work(&di->fg_periodic_work);
2399
2400 /*
2401 * If the FG is enabled we will disable it before going to suspend
2402 * only if we're not charging
2403 */
2404 if (di->flags.fg_enabled && !di->flags.charging)
2405 ab8500_fg_coulomb_counter(di, false);
2406
2407 return 0;
2408}
2409#else
2410#define ab8500_fg_suspend NULL
2411#define ab8500_fg_resume NULL
2412#endif
2413
2414static int __devexit ab8500_fg_remove(struct platform_device *pdev)
2415{
2416 int ret = 0;
2417 struct ab8500_fg *di = platform_get_drvdata(pdev);
2418
2419 list_del(&di->node);
2420
2421 /* Disable coulomb counter */
2422 ret = ab8500_fg_coulomb_counter(di, false);
2423 if (ret)
2424 dev_err(di->dev, "failed to disable coulomb counter\n");
2425
2426 destroy_workqueue(di->fg_wq);
2427 ab8500_fg_sysfs_exit(di);
2428
2429 flush_scheduled_work();
2430 power_supply_unregister(&di->fg_psy);
2431 platform_set_drvdata(pdev, NULL);
2432 kfree(di);
2433 return ret;
2434}
2435
2436/* ab8500 fg driver interrupts and their respective isr */
2437static struct ab8500_fg_interrupts ab8500_fg_irq[] = {
2438 {"NCONV_ACCU", ab8500_fg_cc_convend_handler},
2439 {"BATT_OVV", ab8500_fg_batt_ovv_handler},
2440 {"LOW_BAT_F", ab8500_fg_lowbatf_handler},
2441 {"CC_INT_CALIB", ab8500_fg_cc_int_calib_handler},
2442 {"CCEOC", ab8500_fg_cc_data_end_handler},
2443};
2444
2445static int __devinit ab8500_fg_probe(struct platform_device *pdev)
2446{
2447 int i, irq;
2448 int ret = 0;
2449 struct abx500_bm_plat_data *plat_data;
2450
2451 struct ab8500_fg *di =
2452 kzalloc(sizeof(struct ab8500_fg), GFP_KERNEL);
2453 if (!di)
2454 return -ENOMEM;
2455
2456 mutex_init(&di->cc_lock);
2457
2458 /* get parent data */
2459 di->dev = &pdev->dev;
2460 di->parent = dev_get_drvdata(pdev->dev.parent);
2461 di->gpadc = ab8500_gpadc_get("ab8500-gpadc.0");
2462
2463 /* get fg specific platform data */
2464 plat_data = pdev->dev.platform_data;
2465 di->pdata = plat_data->fg;
2466 if (!di->pdata) {
2467 dev_err(di->dev, "no fg platform data supplied\n");
2468 ret = -EINVAL;
2469 goto free_device_info;
2470 }
2471
2472 /* get battery specific platform data */
2473 di->bat = plat_data->battery;
2474 if (!di->bat) {
2475 dev_err(di->dev, "no battery platform data supplied\n");
2476 ret = -EINVAL;
2477 goto free_device_info;
2478 }
2479
2480 di->fg_psy.name = "ab8500_fg";
2481 di->fg_psy.type = POWER_SUPPLY_TYPE_BATTERY;
2482 di->fg_psy.properties = ab8500_fg_props;
2483 di->fg_psy.num_properties = ARRAY_SIZE(ab8500_fg_props);
2484 di->fg_psy.get_property = ab8500_fg_get_property;
2485 di->fg_psy.supplied_to = di->pdata->supplied_to;
2486 di->fg_psy.num_supplicants = di->pdata->num_supplicants;
2487 di->fg_psy.external_power_changed = ab8500_fg_external_power_changed;
2488
2489 di->bat_cap.max_mah_design = MILLI_TO_MICRO *
2490 di->bat->bat_type[di->bat->batt_id].charge_full_design;
2491
2492 di->bat_cap.max_mah = di->bat_cap.max_mah_design;
2493
2494 di->vbat_nom = di->bat->bat_type[di->bat->batt_id].nominal_voltage;
2495
2496 di->init_capacity = true;
2497
2498 ab8500_fg_charge_state_to(di, AB8500_FG_CHARGE_INIT);
2499 ab8500_fg_discharge_state_to(di, AB8500_FG_DISCHARGE_INIT);
2500
2501 /* Create a work queue for running the FG algorithm */
2502 di->fg_wq = create_singlethread_workqueue("ab8500_fg_wq");
2503 if (di->fg_wq == NULL) {
2504 dev_err(di->dev, "failed to create work queue\n");
2505 goto free_device_info;
2506 }
2507
2508 /* Init work for running the fg algorithm instantly */
2509 INIT_WORK(&di->fg_work, ab8500_fg_instant_work);
2510
2511 /* Init work for getting the battery accumulated current */
2512 INIT_WORK(&di->fg_acc_cur_work, ab8500_fg_acc_cur_work);
2513
2514 /* Init work for reinitialising the fg algorithm */
2515 INIT_DELAYED_WORK_DEFERRABLE(&di->fg_reinit_work,
2516 ab8500_fg_reinit_work);
2517
2518 /* Work delayed Queue to run the state machine */
2519 INIT_DELAYED_WORK_DEFERRABLE(&di->fg_periodic_work,
2520 ab8500_fg_periodic_work);
2521
2522 /* Work to check low battery condition */
2523 INIT_DELAYED_WORK_DEFERRABLE(&di->fg_low_bat_work,
2524 ab8500_fg_low_bat_work);
2525
2526 /* Init work for HW failure check */
2527 INIT_DELAYED_WORK_DEFERRABLE(&di->fg_check_hw_failure_work,
2528 ab8500_fg_check_hw_failure_work);
2529
2530 /* Initialize OVV, and other registers */
2531 ret = ab8500_fg_init_hw_registers(di);
2532 if (ret) {
2533 dev_err(di->dev, "failed to initialize registers\n");
2534 goto free_inst_curr_wq;
2535 }
2536
2537 /* Consider battery unknown until we're informed otherwise */
2538 di->flags.batt_unknown = true;
2539 di->flags.batt_id_received = false;
2540
2541 /* Register FG power supply class */
2542 ret = power_supply_register(di->dev, &di->fg_psy);
2543 if (ret) {
2544 dev_err(di->dev, "failed to register FG psy\n");
2545 goto free_inst_curr_wq;
2546 }
2547
2548 di->fg_samples = SEC_TO_SAMPLE(di->bat->fg_params->init_timer);
2549 ab8500_fg_coulomb_counter(di, true);
2550
2551 /* Initialize completion used to notify completion of inst current */
2552 init_completion(&di->ab8500_fg_complete);
2553
2554 /* Register interrupts */
2555 for (i = 0; i < ARRAY_SIZE(ab8500_fg_irq); i++) {
2556 irq = platform_get_irq_byname(pdev, ab8500_fg_irq[i].name);
2557 ret = request_threaded_irq(irq, NULL, ab8500_fg_irq[i].isr,
2558 IRQF_SHARED | IRQF_NO_SUSPEND,
2559 ab8500_fg_irq[i].name, di);
2560
2561 if (ret != 0) {
2562 dev_err(di->dev, "failed to request %s IRQ %d: %d\n"
2563 , ab8500_fg_irq[i].name, irq, ret);
2564 goto free_irq;
2565 }
2566 dev_dbg(di->dev, "Requested %s IRQ %d: %d\n",
2567 ab8500_fg_irq[i].name, irq, ret);
2568 }
2569 di->irq = platform_get_irq_byname(pdev, "CCEOC");
2570 disable_irq(di->irq);
2571
2572 platform_set_drvdata(pdev, di);
2573
2574 ret = ab8500_fg_sysfs_init(di);
2575 if (ret) {
2576 dev_err(di->dev, "failed to create sysfs entry\n");
2577 goto free_irq;
2578 }
2579
2580 /* Calibrate the fg first time */
2581 di->flags.calibrate = true;
2582 di->calib_state = AB8500_FG_CALIB_INIT;
2583
2584 /* Use room temp as default value until we get an update from driver. */
2585 di->bat_temp = 210;
2586
2587 /* Run the FG algorithm */
2588 queue_delayed_work(di->fg_wq, &di->fg_periodic_work, 0);
2589
2590 list_add_tail(&di->node, &ab8500_fg_list);
2591
2592 return ret;
2593
2594free_irq:
2595 power_supply_unregister(&di->fg_psy);
2596
2597 /* We also have to free all successfully registered irqs */
2598 for (i = i - 1; i >= 0; i--) {
2599 irq = platform_get_irq_byname(pdev, ab8500_fg_irq[i].name);
2600 free_irq(irq, di);
2601 }
2602free_inst_curr_wq:
2603 destroy_workqueue(di->fg_wq);
2604free_device_info:
2605 kfree(di);
2606
2607 return ret;
2608}
2609
2610static struct platform_driver ab8500_fg_driver = {
2611 .probe = ab8500_fg_probe,
2612 .remove = __devexit_p(ab8500_fg_remove),
2613 .suspend = ab8500_fg_suspend,
2614 .resume = ab8500_fg_resume,
2615 .driver = {
2616 .name = "ab8500-fg",
2617 .owner = THIS_MODULE,
2618 },
2619};
2620
2621static int __init ab8500_fg_init(void)
2622{
2623 return platform_driver_register(&ab8500_fg_driver);
2624}
2625
2626static void __exit ab8500_fg_exit(void)
2627{
2628 platform_driver_unregister(&ab8500_fg_driver);
2629}
2630
2631subsys_initcall_sync(ab8500_fg_init);
2632module_exit(ab8500_fg_exit);
2633
2634MODULE_LICENSE("GPL v2");
2635MODULE_AUTHOR("Johan Palsson, Karl Komierowski");
2636MODULE_ALIAS("platform:ab8500-fg");
2637MODULE_DESCRIPTION("AB8500 Fuel Gauge driver");
diff --git a/drivers/power/abx500_chargalg.c b/drivers/power/abx500_chargalg.c
new file mode 100644
index 000000000000..804b88c760d6
--- /dev/null
+++ b/drivers/power/abx500_chargalg.c
@@ -0,0 +1,1921 @@
1/*
2 * Copyright (C) ST-Ericsson SA 2012
3 *
4 * Charging algorithm driver for abx500 variants
5 *
6 * License Terms: GNU General Public License v2
7 * Authors:
8 * Johan Palsson <johan.palsson@stericsson.com>
9 * Karl Komierowski <karl.komierowski@stericsson.com>
10 * Arun R Murthy <arun.murthy@stericsson.com>
11 */
12
13#include <linux/init.h>
14#include <linux/module.h>
15#include <linux/device.h>
16#include <linux/interrupt.h>
17#include <linux/delay.h>
18#include <linux/slab.h>
19#include <linux/platform_device.h>
20#include <linux/power_supply.h>
21#include <linux/completion.h>
22#include <linux/workqueue.h>
23#include <linux/kobject.h>
24#include <linux/mfd/abx500.h>
25#include <linux/mfd/abx500/ux500_chargalg.h>
26#include <linux/mfd/abx500/ab8500-bm.h>
27
28/* Watchdog kick interval */
29#define CHG_WD_INTERVAL (6 * HZ)
30
31/* End-of-charge criteria counter */
32#define EOC_COND_CNT 10
33
34/* Recharge criteria counter */
35#define RCH_COND_CNT 3
36
37#define to_abx500_chargalg_device_info(x) container_of((x), \
38 struct abx500_chargalg, chargalg_psy);
39
40enum abx500_chargers {
41 NO_CHG,
42 AC_CHG,
43 USB_CHG,
44};
45
46struct abx500_chargalg_charger_info {
47 enum abx500_chargers conn_chg;
48 enum abx500_chargers prev_conn_chg;
49 enum abx500_chargers online_chg;
50 enum abx500_chargers prev_online_chg;
51 enum abx500_chargers charger_type;
52 bool usb_chg_ok;
53 bool ac_chg_ok;
54 int usb_volt;
55 int usb_curr;
56 int ac_volt;
57 int ac_curr;
58 int usb_vset;
59 int usb_iset;
60 int ac_vset;
61 int ac_iset;
62};
63
64struct abx500_chargalg_suspension_status {
65 bool suspended_change;
66 bool ac_suspended;
67 bool usb_suspended;
68};
69
70struct abx500_chargalg_battery_data {
71 int temp;
72 int volt;
73 int avg_curr;
74 int inst_curr;
75 int percent;
76};
77
78enum abx500_chargalg_states {
79 STATE_HANDHELD_INIT,
80 STATE_HANDHELD,
81 STATE_CHG_NOT_OK_INIT,
82 STATE_CHG_NOT_OK,
83 STATE_HW_TEMP_PROTECT_INIT,
84 STATE_HW_TEMP_PROTECT,
85 STATE_NORMAL_INIT,
86 STATE_NORMAL,
87 STATE_WAIT_FOR_RECHARGE_INIT,
88 STATE_WAIT_FOR_RECHARGE,
89 STATE_MAINTENANCE_A_INIT,
90 STATE_MAINTENANCE_A,
91 STATE_MAINTENANCE_B_INIT,
92 STATE_MAINTENANCE_B,
93 STATE_TEMP_UNDEROVER_INIT,
94 STATE_TEMP_UNDEROVER,
95 STATE_TEMP_LOWHIGH_INIT,
96 STATE_TEMP_LOWHIGH,
97 STATE_SUSPENDED_INIT,
98 STATE_SUSPENDED,
99 STATE_OVV_PROTECT_INIT,
100 STATE_OVV_PROTECT,
101 STATE_SAFETY_TIMER_EXPIRED_INIT,
102 STATE_SAFETY_TIMER_EXPIRED,
103 STATE_BATT_REMOVED_INIT,
104 STATE_BATT_REMOVED,
105 STATE_WD_EXPIRED_INIT,
106 STATE_WD_EXPIRED,
107};
108
109static const char *states[] = {
110 "HANDHELD_INIT",
111 "HANDHELD",
112 "CHG_NOT_OK_INIT",
113 "CHG_NOT_OK",
114 "HW_TEMP_PROTECT_INIT",
115 "HW_TEMP_PROTECT",
116 "NORMAL_INIT",
117 "NORMAL",
118 "WAIT_FOR_RECHARGE_INIT",
119 "WAIT_FOR_RECHARGE",
120 "MAINTENANCE_A_INIT",
121 "MAINTENANCE_A",
122 "MAINTENANCE_B_INIT",
123 "MAINTENANCE_B",
124 "TEMP_UNDEROVER_INIT",
125 "TEMP_UNDEROVER",
126 "TEMP_LOWHIGH_INIT",
127 "TEMP_LOWHIGH",
128 "SUSPENDED_INIT",
129 "SUSPENDED",
130 "OVV_PROTECT_INIT",
131 "OVV_PROTECT",
132 "SAFETY_TIMER_EXPIRED_INIT",
133 "SAFETY_TIMER_EXPIRED",
134 "BATT_REMOVED_INIT",
135 "BATT_REMOVED",
136 "WD_EXPIRED_INIT",
137 "WD_EXPIRED",
138};
139
140struct abx500_chargalg_events {
141 bool batt_unknown;
142 bool mainextchnotok;
143 bool batt_ovv;
144 bool batt_rem;
145 bool btemp_underover;
146 bool btemp_lowhigh;
147 bool main_thermal_prot;
148 bool usb_thermal_prot;
149 bool main_ovv;
150 bool vbus_ovv;
151 bool usbchargernotok;
152 bool safety_timer_expired;
153 bool maintenance_timer_expired;
154 bool ac_wd_expired;
155 bool usb_wd_expired;
156 bool ac_cv_active;
157 bool usb_cv_active;
158 bool vbus_collapsed;
159};
160
161/**
162 * struct abx500_charge_curr_maximization - Charger maximization parameters
163 * @original_iset: the non optimized/maximised charger current
164 * @current_iset: the charging current used at this moment
165 * @test_delta_i: the delta between the current we want to charge and the
166 current that is really going into the battery
167 * @condition_cnt: number of iterations needed before a new charger current
168 is set
169 * @max_current: maximum charger current
170 * @wait_cnt: to avoid too fast current step down in case of charger
171 * voltage collapse, we insert this delay between step
172 * down
173 * @level: tells in how many steps the charging current has been
174 increased
175 */
176struct abx500_charge_curr_maximization {
177 int original_iset;
178 int current_iset;
179 int test_delta_i;
180 int condition_cnt;
181 int max_current;
182 int wait_cnt;
183 u8 level;
184};
185
186enum maxim_ret {
187 MAXIM_RET_NOACTION,
188 MAXIM_RET_CHANGE,
189 MAXIM_RET_IBAT_TOO_HIGH,
190};
191
192/**
193 * struct abx500_chargalg - abx500 Charging algorithm device information
194 * @dev: pointer to the structure device
195 * @charge_status: battery operating status
196 * @eoc_cnt: counter used to determine end-of_charge
197 * @rch_cnt: counter used to determine start of recharge
198 * @maintenance_chg: indicate if maintenance charge is active
199 * @t_hyst_norm temperature hysteresis when the temperature has been
200 * over or under normal limits
201 * @t_hyst_lowhigh temperature hysteresis when the temperature has been
202 * over or under the high or low limits
203 * @charge_state: current state of the charging algorithm
204 * @ccm charging current maximization parameters
205 * @chg_info: information about connected charger types
206 * @batt_data: data of the battery
207 * @susp_status: current charger suspension status
208 * @pdata: pointer to the abx500_chargalg platform data
209 * @bat: pointer to the abx500_bm platform data
210 * @chargalg_psy: structure that holds the battery properties exposed by
211 * the charging algorithm
212 * @events: structure for information about events triggered
213 * @chargalg_wq: work queue for running the charging algorithm
214 * @chargalg_periodic_work: work to run the charging algorithm periodically
215 * @chargalg_wd_work: work to kick the charger watchdog periodically
216 * @chargalg_work: work to run the charging algorithm instantly
217 * @safety_timer: charging safety timer
218 * @maintenance_timer: maintenance charging timer
219 * @chargalg_kobject: structure of type kobject
220 */
221struct abx500_chargalg {
222 struct device *dev;
223 int charge_status;
224 int eoc_cnt;
225 int rch_cnt;
226 bool maintenance_chg;
227 int t_hyst_norm;
228 int t_hyst_lowhigh;
229 enum abx500_chargalg_states charge_state;
230 struct abx500_charge_curr_maximization ccm;
231 struct abx500_chargalg_charger_info chg_info;
232 struct abx500_chargalg_battery_data batt_data;
233 struct abx500_chargalg_suspension_status susp_status;
234 struct abx500_chargalg_platform_data *pdata;
235 struct abx500_bm_data *bat;
236 struct power_supply chargalg_psy;
237 struct ux500_charger *ac_chg;
238 struct ux500_charger *usb_chg;
239 struct abx500_chargalg_events events;
240 struct workqueue_struct *chargalg_wq;
241 struct delayed_work chargalg_periodic_work;
242 struct delayed_work chargalg_wd_work;
243 struct work_struct chargalg_work;
244 struct timer_list safety_timer;
245 struct timer_list maintenance_timer;
246 struct kobject chargalg_kobject;
247};
248
249/* Main battery properties */
250static enum power_supply_property abx500_chargalg_props[] = {
251 POWER_SUPPLY_PROP_STATUS,
252 POWER_SUPPLY_PROP_HEALTH,
253};
254
255/**
256 * abx500_chargalg_safety_timer_expired() - Expiration of the safety timer
257 * @data: pointer to the abx500_chargalg structure
258 *
259 * This function gets called when the safety timer for the charger
260 * expires
261 */
262static void abx500_chargalg_safety_timer_expired(unsigned long data)
263{
264 struct abx500_chargalg *di = (struct abx500_chargalg *) data;
265 dev_err(di->dev, "Safety timer expired\n");
266 di->events.safety_timer_expired = true;
267
268 /* Trigger execution of the algorithm instantly */
269 queue_work(di->chargalg_wq, &di->chargalg_work);
270}
271
272/**
273 * abx500_chargalg_maintenance_timer_expired() - Expiration of
274 * the maintenance timer
275 * @i: pointer to the abx500_chargalg structure
276 *
277 * This function gets called when the maintenence timer
278 * expires
279 */
280static void abx500_chargalg_maintenance_timer_expired(unsigned long data)
281{
282
283 struct abx500_chargalg *di = (struct abx500_chargalg *) data;
284 dev_dbg(di->dev, "Maintenance timer expired\n");
285 di->events.maintenance_timer_expired = true;
286
287 /* Trigger execution of the algorithm instantly */
288 queue_work(di->chargalg_wq, &di->chargalg_work);
289}
290
291/**
292 * abx500_chargalg_state_to() - Change charge state
293 * @di: pointer to the abx500_chargalg structure
294 *
295 * This function gets called when a charge state change should occur
296 */
297static void abx500_chargalg_state_to(struct abx500_chargalg *di,
298 enum abx500_chargalg_states state)
299{
300 dev_dbg(di->dev,
301 "State changed: %s (From state: [%d] %s =to=> [%d] %s )\n",
302 di->charge_state == state ? "NO" : "YES",
303 di->charge_state,
304 states[di->charge_state],
305 state,
306 states[state]);
307
308 di->charge_state = state;
309}
310
311/**
312 * abx500_chargalg_check_charger_connection() - Check charger connection change
313 * @di: pointer to the abx500_chargalg structure
314 *
315 * This function will check if there is a change in the charger connection
316 * and change charge state accordingly. AC has precedence over USB.
317 */
318static int abx500_chargalg_check_charger_connection(struct abx500_chargalg *di)
319{
320 if (di->chg_info.conn_chg != di->chg_info.prev_conn_chg ||
321 di->susp_status.suspended_change) {
322 /*
323 * Charger state changed or suspension
324 * has changed since last update
325 */
326 if ((di->chg_info.conn_chg & AC_CHG) &&
327 !di->susp_status.ac_suspended) {
328 dev_dbg(di->dev, "Charging source is AC\n");
329 if (di->chg_info.charger_type != AC_CHG) {
330 di->chg_info.charger_type = AC_CHG;
331 abx500_chargalg_state_to(di, STATE_NORMAL_INIT);
332 }
333 } else if ((di->chg_info.conn_chg & USB_CHG) &&
334 !di->susp_status.usb_suspended) {
335 dev_dbg(di->dev, "Charging source is USB\n");
336 di->chg_info.charger_type = USB_CHG;
337 abx500_chargalg_state_to(di, STATE_NORMAL_INIT);
338 } else if (di->chg_info.conn_chg &&
339 (di->susp_status.ac_suspended ||
340 di->susp_status.usb_suspended)) {
341 dev_dbg(di->dev, "Charging is suspended\n");
342 di->chg_info.charger_type = NO_CHG;
343 abx500_chargalg_state_to(di, STATE_SUSPENDED_INIT);
344 } else {
345 dev_dbg(di->dev, "Charging source is OFF\n");
346 di->chg_info.charger_type = NO_CHG;
347 abx500_chargalg_state_to(di, STATE_HANDHELD_INIT);
348 }
349 di->chg_info.prev_conn_chg = di->chg_info.conn_chg;
350 di->susp_status.suspended_change = false;
351 }
352 return di->chg_info.conn_chg;
353}
354
355/**
356 * abx500_chargalg_start_safety_timer() - Start charging safety timer
357 * @di: pointer to the abx500_chargalg structure
358 *
359 * The safety timer is used to avoid overcharging of old or bad batteries.
360 * There are different timers for AC and USB
361 */
362static void abx500_chargalg_start_safety_timer(struct abx500_chargalg *di)
363{
364 unsigned long timer_expiration = 0;
365
366 switch (di->chg_info.charger_type) {
367 case AC_CHG:
368 timer_expiration =
369 round_jiffies(jiffies +
370 (di->bat->main_safety_tmr_h * 3600 * HZ));
371 break;
372
373 case USB_CHG:
374 timer_expiration =
375 round_jiffies(jiffies +
376 (di->bat->usb_safety_tmr_h * 3600 * HZ));
377 break;
378
379 default:
380 dev_err(di->dev, "Unknown charger to charge from\n");
381 break;
382 }
383
384 di->events.safety_timer_expired = false;
385 di->safety_timer.expires = timer_expiration;
386 if (!timer_pending(&di->safety_timer))
387 add_timer(&di->safety_timer);
388 else
389 mod_timer(&di->safety_timer, timer_expiration);
390}
391
392/**
393 * abx500_chargalg_stop_safety_timer() - Stop charging safety timer
394 * @di: pointer to the abx500_chargalg structure
395 *
396 * The safety timer is stopped whenever the NORMAL state is exited
397 */
398static void abx500_chargalg_stop_safety_timer(struct abx500_chargalg *di)
399{
400 di->events.safety_timer_expired = false;
401 del_timer(&di->safety_timer);
402}
403
404/**
405 * abx500_chargalg_start_maintenance_timer() - Start charging maintenance timer
406 * @di: pointer to the abx500_chargalg structure
407 * @duration: duration of ther maintenance timer in hours
408 *
409 * The maintenance timer is used to maintain the charge in the battery once
410 * the battery is considered full. These timers are chosen to match the
411 * discharge curve of the battery
412 */
413static void abx500_chargalg_start_maintenance_timer(struct abx500_chargalg *di,
414 int duration)
415{
416 unsigned long timer_expiration;
417
418 /* Convert from hours to jiffies */
419 timer_expiration = round_jiffies(jiffies + (duration * 3600 * HZ));
420
421 di->events.maintenance_timer_expired = false;
422 di->maintenance_timer.expires = timer_expiration;
423 if (!timer_pending(&di->maintenance_timer))
424 add_timer(&di->maintenance_timer);
425 else
426 mod_timer(&di->maintenance_timer, timer_expiration);
427}
428
429/**
430 * abx500_chargalg_stop_maintenance_timer() - Stop maintenance timer
431 * @di: pointer to the abx500_chargalg structure
432 *
433 * The maintenance timer is stopped whenever maintenance ends or when another
434 * state is entered
435 */
436static void abx500_chargalg_stop_maintenance_timer(struct abx500_chargalg *di)
437{
438 di->events.maintenance_timer_expired = false;
439 del_timer(&di->maintenance_timer);
440}
441
442/**
443 * abx500_chargalg_kick_watchdog() - Kick charger watchdog
444 * @di: pointer to the abx500_chargalg structure
445 *
446 * The charger watchdog have to be kicked periodically whenever the charger is
447 * on, else the ABB will reset the system
448 */
449static int abx500_chargalg_kick_watchdog(struct abx500_chargalg *di)
450{
451 /* Check if charger exists and kick watchdog if charging */
452 if (di->ac_chg && di->ac_chg->ops.kick_wd &&
453 di->chg_info.online_chg & AC_CHG)
454 return di->ac_chg->ops.kick_wd(di->ac_chg);
455 else if (di->usb_chg && di->usb_chg->ops.kick_wd &&
456 di->chg_info.online_chg & USB_CHG)
457 return di->usb_chg->ops.kick_wd(di->usb_chg);
458
459 return -ENXIO;
460}
461
462/**
463 * abx500_chargalg_ac_en() - Turn on/off the AC charger
464 * @di: pointer to the abx500_chargalg structure
465 * @enable: charger on/off
466 * @vset: requested charger output voltage
467 * @iset: requested charger output current
468 *
469 * The AC charger will be turned on/off with the requested charge voltage and
470 * current
471 */
472static int abx500_chargalg_ac_en(struct abx500_chargalg *di, int enable,
473 int vset, int iset)
474{
475 if (!di->ac_chg || !di->ac_chg->ops.enable)
476 return -ENXIO;
477
478 /* Select maximum of what both the charger and the battery supports */
479 if (di->ac_chg->max_out_volt)
480 vset = min(vset, di->ac_chg->max_out_volt);
481 if (di->ac_chg->max_out_curr)
482 iset = min(iset, di->ac_chg->max_out_curr);
483
484 di->chg_info.ac_iset = iset;
485 di->chg_info.ac_vset = vset;
486
487 return di->ac_chg->ops.enable(di->ac_chg, enable, vset, iset);
488}
489
490/**
491 * abx500_chargalg_usb_en() - Turn on/off the USB charger
492 * @di: pointer to the abx500_chargalg structure
493 * @enable: charger on/off
494 * @vset: requested charger output voltage
495 * @iset: requested charger output current
496 *
497 * The USB charger will be turned on/off with the requested charge voltage and
498 * current
499 */
500static int abx500_chargalg_usb_en(struct abx500_chargalg *di, int enable,
501 int vset, int iset)
502{
503 if (!di->usb_chg || !di->usb_chg->ops.enable)
504 return -ENXIO;
505
506 /* Select maximum of what both the charger and the battery supports */
507 if (di->usb_chg->max_out_volt)
508 vset = min(vset, di->usb_chg->max_out_volt);
509 if (di->usb_chg->max_out_curr)
510 iset = min(iset, di->usb_chg->max_out_curr);
511
512 di->chg_info.usb_iset = iset;
513 di->chg_info.usb_vset = vset;
514
515 return di->usb_chg->ops.enable(di->usb_chg, enable, vset, iset);
516}
517
518/**
519 * abx500_chargalg_update_chg_curr() - Update charger current
520 * @di: pointer to the abx500_chargalg structure
521 * @iset: requested charger output current
522 *
523 * The charger output current will be updated for the charger
524 * that is currently in use
525 */
526static int abx500_chargalg_update_chg_curr(struct abx500_chargalg *di,
527 int iset)
528{
529 /* Check if charger exists and update current if charging */
530 if (di->ac_chg && di->ac_chg->ops.update_curr &&
531 di->chg_info.charger_type & AC_CHG) {
532 /*
533 * Select maximum of what both the charger
534 * and the battery supports
535 */
536 if (di->ac_chg->max_out_curr)
537 iset = min(iset, di->ac_chg->max_out_curr);
538
539 di->chg_info.ac_iset = iset;
540
541 return di->ac_chg->ops.update_curr(di->ac_chg, iset);
542 } else if (di->usb_chg && di->usb_chg->ops.update_curr &&
543 di->chg_info.charger_type & USB_CHG) {
544 /*
545 * Select maximum of what both the charger
546 * and the battery supports
547 */
548 if (di->usb_chg->max_out_curr)
549 iset = min(iset, di->usb_chg->max_out_curr);
550
551 di->chg_info.usb_iset = iset;
552
553 return di->usb_chg->ops.update_curr(di->usb_chg, iset);
554 }
555
556 return -ENXIO;
557}
558
559/**
560 * abx500_chargalg_stop_charging() - Stop charging
561 * @di: pointer to the abx500_chargalg structure
562 *
563 * This function is called from any state where charging should be stopped.
564 * All charging is disabled and all status parameters and timers are changed
565 * accordingly
566 */
567static void abx500_chargalg_stop_charging(struct abx500_chargalg *di)
568{
569 abx500_chargalg_ac_en(di, false, 0, 0);
570 abx500_chargalg_usb_en(di, false, 0, 0);
571 abx500_chargalg_stop_safety_timer(di);
572 abx500_chargalg_stop_maintenance_timer(di);
573 di->charge_status = POWER_SUPPLY_STATUS_NOT_CHARGING;
574 di->maintenance_chg = false;
575 cancel_delayed_work(&di->chargalg_wd_work);
576 power_supply_changed(&di->chargalg_psy);
577}
578
579/**
580 * abx500_chargalg_hold_charging() - Pauses charging
581 * @di: pointer to the abx500_chargalg structure
582 *
583 * This function is called in the case where maintenance charging has been
584 * disabled and instead a battery voltage mode is entered to check when the
585 * battery voltage has reached a certain recharge voltage
586 */
587static void abx500_chargalg_hold_charging(struct abx500_chargalg *di)
588{
589 abx500_chargalg_ac_en(di, false, 0, 0);
590 abx500_chargalg_usb_en(di, false, 0, 0);
591 abx500_chargalg_stop_safety_timer(di);
592 abx500_chargalg_stop_maintenance_timer(di);
593 di->charge_status = POWER_SUPPLY_STATUS_CHARGING;
594 di->maintenance_chg = false;
595 cancel_delayed_work(&di->chargalg_wd_work);
596 power_supply_changed(&di->chargalg_psy);
597}
598
599/**
600 * abx500_chargalg_start_charging() - Start the charger
601 * @di: pointer to the abx500_chargalg structure
602 * @vset: requested charger output voltage
603 * @iset: requested charger output current
604 *
605 * A charger will be enabled depending on the requested charger type that was
606 * detected previously.
607 */
608static void abx500_chargalg_start_charging(struct abx500_chargalg *di,
609 int vset, int iset)
610{
611 switch (di->chg_info.charger_type) {
612 case AC_CHG:
613 dev_dbg(di->dev,
614 "AC parameters: Vset %d, Ich %d\n", vset, iset);
615 abx500_chargalg_usb_en(di, false, 0, 0);
616 abx500_chargalg_ac_en(di, true, vset, iset);
617 break;
618
619 case USB_CHG:
620 dev_dbg(di->dev,
621 "USB parameters: Vset %d, Ich %d\n", vset, iset);
622 abx500_chargalg_ac_en(di, false, 0, 0);
623 abx500_chargalg_usb_en(di, true, vset, iset);
624 break;
625
626 default:
627 dev_err(di->dev, "Unknown charger to charge from\n");
628 break;
629 }
630}
631
632/**
633 * abx500_chargalg_check_temp() - Check battery temperature ranges
634 * @di: pointer to the abx500_chargalg structure
635 *
636 * The battery temperature is checked against the predefined limits and the
637 * charge state is changed accordingly
638 */
639static void abx500_chargalg_check_temp(struct abx500_chargalg *di)
640{
641 if (di->batt_data.temp > (di->bat->temp_low + di->t_hyst_norm) &&
642 di->batt_data.temp < (di->bat->temp_high - di->t_hyst_norm)) {
643 /* Temp OK! */
644 di->events.btemp_underover = false;
645 di->events.btemp_lowhigh = false;
646 di->t_hyst_norm = 0;
647 di->t_hyst_lowhigh = 0;
648 } else {
649 if (((di->batt_data.temp >= di->bat->temp_high) &&
650 (di->batt_data.temp <
651 (di->bat->temp_over - di->t_hyst_lowhigh))) ||
652 ((di->batt_data.temp >
653 (di->bat->temp_under + di->t_hyst_lowhigh)) &&
654 (di->batt_data.temp <= di->bat->temp_low))) {
655 /* TEMP minor!!!!! */
656 di->events.btemp_underover = false;
657 di->events.btemp_lowhigh = true;
658 di->t_hyst_norm = di->bat->temp_hysteresis;
659 di->t_hyst_lowhigh = 0;
660 } else if (di->batt_data.temp <= di->bat->temp_under ||
661 di->batt_data.temp >= di->bat->temp_over) {
662 /* TEMP major!!!!! */
663 di->events.btemp_underover = true;
664 di->events.btemp_lowhigh = false;
665 di->t_hyst_norm = 0;
666 di->t_hyst_lowhigh = di->bat->temp_hysteresis;
667 } else {
668 /* Within hysteresis */
669 dev_dbg(di->dev, "Within hysteresis limit temp: %d "
670 "hyst_lowhigh %d, hyst normal %d\n",
671 di->batt_data.temp, di->t_hyst_lowhigh,
672 di->t_hyst_norm);
673 }
674 }
675}
676
677/**
678 * abx500_chargalg_check_charger_voltage() - Check charger voltage
679 * @di: pointer to the abx500_chargalg structure
680 *
681 * Charger voltage is checked against maximum limit
682 */
683static void abx500_chargalg_check_charger_voltage(struct abx500_chargalg *di)
684{
685 if (di->chg_info.usb_volt > di->bat->chg_params->usb_volt_max)
686 di->chg_info.usb_chg_ok = false;
687 else
688 di->chg_info.usb_chg_ok = true;
689
690 if (di->chg_info.ac_volt > di->bat->chg_params->ac_volt_max)
691 di->chg_info.ac_chg_ok = false;
692 else
693 di->chg_info.ac_chg_ok = true;
694
695}
696
697/**
698 * abx500_chargalg_end_of_charge() - Check if end-of-charge criteria is fulfilled
699 * @di: pointer to the abx500_chargalg structure
700 *
701 * End-of-charge criteria is fulfilled when the battery voltage is above a
702 * certain limit and the battery current is below a certain limit for a
703 * predefined number of consecutive seconds. If true, the battery is full
704 */
705static void abx500_chargalg_end_of_charge(struct abx500_chargalg *di)
706{
707 if (di->charge_status == POWER_SUPPLY_STATUS_CHARGING &&
708 di->charge_state == STATE_NORMAL &&
709 !di->maintenance_chg && (di->batt_data.volt >=
710 di->bat->bat_type[di->bat->batt_id].termination_vol ||
711 di->events.usb_cv_active || di->events.ac_cv_active) &&
712 di->batt_data.avg_curr <
713 di->bat->bat_type[di->bat->batt_id].termination_curr &&
714 di->batt_data.avg_curr > 0) {
715 if (++di->eoc_cnt >= EOC_COND_CNT) {
716 di->eoc_cnt = 0;
717 di->charge_status = POWER_SUPPLY_STATUS_FULL;
718 di->maintenance_chg = true;
719 dev_dbg(di->dev, "EOC reached!\n");
720 power_supply_changed(&di->chargalg_psy);
721 } else {
722 dev_dbg(di->dev,
723 " EOC limit reached for the %d"
724 " time, out of %d before EOC\n",
725 di->eoc_cnt,
726 EOC_COND_CNT);
727 }
728 } else {
729 di->eoc_cnt = 0;
730 }
731}
732
733static void init_maxim_chg_curr(struct abx500_chargalg *di)
734{
735 di->ccm.original_iset =
736 di->bat->bat_type[di->bat->batt_id].normal_cur_lvl;
737 di->ccm.current_iset =
738 di->bat->bat_type[di->bat->batt_id].normal_cur_lvl;
739 di->ccm.test_delta_i = di->bat->maxi->charger_curr_step;
740 di->ccm.max_current = di->bat->maxi->chg_curr;
741 di->ccm.condition_cnt = di->bat->maxi->wait_cycles;
742 di->ccm.level = 0;
743}
744
745/**
746 * abx500_chargalg_chg_curr_maxim - increases the charger current to
747 * compensate for the system load
748 * @di pointer to the abx500_chargalg structure
749 *
750 * This maximization function is used to raise the charger current to get the
751 * battery current as close to the optimal value as possible. The battery
752 * current during charging is affected by the system load
753 */
754static enum maxim_ret abx500_chargalg_chg_curr_maxim(struct abx500_chargalg *di)
755{
756 int delta_i;
757
758 if (!di->bat->maxi->ena_maxi)
759 return MAXIM_RET_NOACTION;
760
761 delta_i = di->ccm.original_iset - di->batt_data.inst_curr;
762
763 if (di->events.vbus_collapsed) {
764 dev_dbg(di->dev, "Charger voltage has collapsed %d\n",
765 di->ccm.wait_cnt);
766 if (di->ccm.wait_cnt == 0) {
767 dev_dbg(di->dev, "lowering current\n");
768 di->ccm.wait_cnt++;
769 di->ccm.condition_cnt = di->bat->maxi->wait_cycles;
770 di->ccm.max_current =
771 di->ccm.current_iset - di->ccm.test_delta_i;
772 di->ccm.current_iset = di->ccm.max_current;
773 di->ccm.level--;
774 return MAXIM_RET_CHANGE;
775 } else {
776 dev_dbg(di->dev, "waiting\n");
777 /* Let's go in here twice before lowering curr again */
778 di->ccm.wait_cnt = (di->ccm.wait_cnt + 1) % 3;
779 return MAXIM_RET_NOACTION;
780 }
781 }
782
783 di->ccm.wait_cnt = 0;
784
785 if ((di->batt_data.inst_curr > di->ccm.original_iset)) {
786 dev_dbg(di->dev, " Maximization Ibat (%dmA) too high"
787 " (limit %dmA) (current iset: %dmA)!\n",
788 di->batt_data.inst_curr, di->ccm.original_iset,
789 di->ccm.current_iset);
790
791 if (di->ccm.current_iset == di->ccm.original_iset)
792 return MAXIM_RET_NOACTION;
793
794 di->ccm.condition_cnt = di->bat->maxi->wait_cycles;
795 di->ccm.current_iset = di->ccm.original_iset;
796 di->ccm.level = 0;
797
798 return MAXIM_RET_IBAT_TOO_HIGH;
799 }
800
801 if (delta_i > di->ccm.test_delta_i &&
802 (di->ccm.current_iset + di->ccm.test_delta_i) <
803 di->ccm.max_current) {
804 if (di->ccm.condition_cnt-- == 0) {
805 /* Increse the iset with cco.test_delta_i */
806 di->ccm.condition_cnt = di->bat->maxi->wait_cycles;
807 di->ccm.current_iset += di->ccm.test_delta_i;
808 di->ccm.level++;
809 dev_dbg(di->dev, " Maximization needed, increase"
810 " with %d mA to %dmA (Optimal ibat: %d)"
811 " Level %d\n",
812 di->ccm.test_delta_i,
813 di->ccm.current_iset,
814 di->ccm.original_iset,
815 di->ccm.level);
816 return MAXIM_RET_CHANGE;
817 } else {
818 return MAXIM_RET_NOACTION;
819 }
820 } else {
821 di->ccm.condition_cnt = di->bat->maxi->wait_cycles;
822 return MAXIM_RET_NOACTION;
823 }
824}
825
826static void handle_maxim_chg_curr(struct abx500_chargalg *di)
827{
828 enum maxim_ret ret;
829 int result;
830
831 ret = abx500_chargalg_chg_curr_maxim(di);
832 switch (ret) {
833 case MAXIM_RET_CHANGE:
834 result = abx500_chargalg_update_chg_curr(di,
835 di->ccm.current_iset);
836 if (result)
837 dev_err(di->dev, "failed to set chg curr\n");
838 break;
839 case MAXIM_RET_IBAT_TOO_HIGH:
840 result = abx500_chargalg_update_chg_curr(di,
841 di->bat->bat_type[di->bat->batt_id].normal_cur_lvl);
842 if (result)
843 dev_err(di->dev, "failed to set chg curr\n");
844 break;
845
846 case MAXIM_RET_NOACTION:
847 default:
848 /* Do nothing..*/
849 break;
850 }
851}
852
853static int abx500_chargalg_get_ext_psy_data(struct device *dev, void *data)
854{
855 struct power_supply *psy;
856 struct power_supply *ext;
857 struct abx500_chargalg *di;
858 union power_supply_propval ret;
859 int i, j;
860 bool psy_found = false;
861
862 psy = (struct power_supply *)data;
863 ext = dev_get_drvdata(dev);
864 di = to_abx500_chargalg_device_info(psy);
865 /* For all psy where the driver name appears in any supplied_to */
866 for (i = 0; i < ext->num_supplicants; i++) {
867 if (!strcmp(ext->supplied_to[i], psy->name))
868 psy_found = true;
869 }
870 if (!psy_found)
871 return 0;
872
873 /* Go through all properties for the psy */
874 for (j = 0; j < ext->num_properties; j++) {
875 enum power_supply_property prop;
876 prop = ext->properties[j];
877
878 /* Initialize chargers if not already done */
879 if (!di->ac_chg &&
880 ext->type == POWER_SUPPLY_TYPE_MAINS)
881 di->ac_chg = psy_to_ux500_charger(ext);
882 else if (!di->usb_chg &&
883 ext->type == POWER_SUPPLY_TYPE_USB)
884 di->usb_chg = psy_to_ux500_charger(ext);
885
886 if (ext->get_property(ext, prop, &ret))
887 continue;
888 switch (prop) {
889 case POWER_SUPPLY_PROP_PRESENT:
890 switch (ext->type) {
891 case POWER_SUPPLY_TYPE_BATTERY:
892 /* Battery present */
893 if (ret.intval)
894 di->events.batt_rem = false;
895 /* Battery removed */
896 else
897 di->events.batt_rem = true;
898 break;
899 case POWER_SUPPLY_TYPE_MAINS:
900 /* AC disconnected */
901 if (!ret.intval &&
902 (di->chg_info.conn_chg & AC_CHG)) {
903 di->chg_info.prev_conn_chg =
904 di->chg_info.conn_chg;
905 di->chg_info.conn_chg &= ~AC_CHG;
906 }
907 /* AC connected */
908 else if (ret.intval &&
909 !(di->chg_info.conn_chg & AC_CHG)) {
910 di->chg_info.prev_conn_chg =
911 di->chg_info.conn_chg;
912 di->chg_info.conn_chg |= AC_CHG;
913 }
914 break;
915 case POWER_SUPPLY_TYPE_USB:
916 /* USB disconnected */
917 if (!ret.intval &&
918 (di->chg_info.conn_chg & USB_CHG)) {
919 di->chg_info.prev_conn_chg =
920 di->chg_info.conn_chg;
921 di->chg_info.conn_chg &= ~USB_CHG;
922 }
923 /* USB connected */
924 else if (ret.intval &&
925 !(di->chg_info.conn_chg & USB_CHG)) {
926 di->chg_info.prev_conn_chg =
927 di->chg_info.conn_chg;
928 di->chg_info.conn_chg |= USB_CHG;
929 }
930 break;
931 default:
932 break;
933 }
934 break;
935
936 case POWER_SUPPLY_PROP_ONLINE:
937 switch (ext->type) {
938 case POWER_SUPPLY_TYPE_BATTERY:
939 break;
940 case POWER_SUPPLY_TYPE_MAINS:
941 /* AC offline */
942 if (!ret.intval &&
943 (di->chg_info.online_chg & AC_CHG)) {
944 di->chg_info.prev_online_chg =
945 di->chg_info.online_chg;
946 di->chg_info.online_chg &= ~AC_CHG;
947 }
948 /* AC online */
949 else if (ret.intval &&
950 !(di->chg_info.online_chg & AC_CHG)) {
951 di->chg_info.prev_online_chg =
952 di->chg_info.online_chg;
953 di->chg_info.online_chg |= AC_CHG;
954 queue_delayed_work(di->chargalg_wq,
955 &di->chargalg_wd_work, 0);
956 }
957 break;
958 case POWER_SUPPLY_TYPE_USB:
959 /* USB offline */
960 if (!ret.intval &&
961 (di->chg_info.online_chg & USB_CHG)) {
962 di->chg_info.prev_online_chg =
963 di->chg_info.online_chg;
964 di->chg_info.online_chg &= ~USB_CHG;
965 }
966 /* USB online */
967 else if (ret.intval &&
968 !(di->chg_info.online_chg & USB_CHG)) {
969 di->chg_info.prev_online_chg =
970 di->chg_info.online_chg;
971 di->chg_info.online_chg |= USB_CHG;
972 queue_delayed_work(di->chargalg_wq,
973 &di->chargalg_wd_work, 0);
974 }
975 break;
976 default:
977 break;
978 }
979 break;
980
981 case POWER_SUPPLY_PROP_HEALTH:
982 switch (ext->type) {
983 case POWER_SUPPLY_TYPE_BATTERY:
984 break;
985 case POWER_SUPPLY_TYPE_MAINS:
986 switch (ret.intval) {
987 case POWER_SUPPLY_HEALTH_UNSPEC_FAILURE:
988 di->events.mainextchnotok = true;
989 di->events.main_thermal_prot = false;
990 di->events.main_ovv = false;
991 di->events.ac_wd_expired = false;
992 break;
993 case POWER_SUPPLY_HEALTH_DEAD:
994 di->events.ac_wd_expired = true;
995 di->events.mainextchnotok = false;
996 di->events.main_ovv = false;
997 di->events.main_thermal_prot = false;
998 break;
999 case POWER_SUPPLY_HEALTH_COLD:
1000 case POWER_SUPPLY_HEALTH_OVERHEAT:
1001 di->events.main_thermal_prot = true;
1002 di->events.mainextchnotok = false;
1003 di->events.main_ovv = false;
1004 di->events.ac_wd_expired = false;
1005 break;
1006 case POWER_SUPPLY_HEALTH_OVERVOLTAGE:
1007 di->events.main_ovv = true;
1008 di->events.mainextchnotok = false;
1009 di->events.main_thermal_prot = false;
1010 di->events.ac_wd_expired = false;
1011 break;
1012 case POWER_SUPPLY_HEALTH_GOOD:
1013 di->events.main_thermal_prot = false;
1014 di->events.mainextchnotok = false;
1015 di->events.main_ovv = false;
1016 di->events.ac_wd_expired = false;
1017 break;
1018 default:
1019 break;
1020 }
1021 break;
1022
1023 case POWER_SUPPLY_TYPE_USB:
1024 switch (ret.intval) {
1025 case POWER_SUPPLY_HEALTH_UNSPEC_FAILURE:
1026 di->events.usbchargernotok = true;
1027 di->events.usb_thermal_prot = false;
1028 di->events.vbus_ovv = false;
1029 di->events.usb_wd_expired = false;
1030 break;
1031 case POWER_SUPPLY_HEALTH_DEAD:
1032 di->events.usb_wd_expired = true;
1033 di->events.usbchargernotok = false;
1034 di->events.usb_thermal_prot = false;
1035 di->events.vbus_ovv = false;
1036 break;
1037 case POWER_SUPPLY_HEALTH_COLD:
1038 case POWER_SUPPLY_HEALTH_OVERHEAT:
1039 di->events.usb_thermal_prot = true;
1040 di->events.usbchargernotok = false;
1041 di->events.vbus_ovv = false;
1042 di->events.usb_wd_expired = false;
1043 break;
1044 case POWER_SUPPLY_HEALTH_OVERVOLTAGE:
1045 di->events.vbus_ovv = true;
1046 di->events.usbchargernotok = false;
1047 di->events.usb_thermal_prot = false;
1048 di->events.usb_wd_expired = false;
1049 break;
1050 case POWER_SUPPLY_HEALTH_GOOD:
1051 di->events.usbchargernotok = false;
1052 di->events.usb_thermal_prot = false;
1053 di->events.vbus_ovv = false;
1054 di->events.usb_wd_expired = false;
1055 break;
1056 default:
1057 break;
1058 }
1059 default:
1060 break;
1061 }
1062 break;
1063
1064 case POWER_SUPPLY_PROP_VOLTAGE_NOW:
1065 switch (ext->type) {
1066 case POWER_SUPPLY_TYPE_BATTERY:
1067 di->batt_data.volt = ret.intval / 1000;
1068 break;
1069 case POWER_SUPPLY_TYPE_MAINS:
1070 di->chg_info.ac_volt = ret.intval / 1000;
1071 break;
1072 case POWER_SUPPLY_TYPE_USB:
1073 di->chg_info.usb_volt = ret.intval / 1000;
1074 break;
1075 default:
1076 break;
1077 }
1078 break;
1079
1080 case POWER_SUPPLY_PROP_VOLTAGE_AVG:
1081 switch (ext->type) {
1082 case POWER_SUPPLY_TYPE_MAINS:
1083 /* AVG is used to indicate when we are
1084 * in CV mode */
1085 if (ret.intval)
1086 di->events.ac_cv_active = true;
1087 else
1088 di->events.ac_cv_active = false;
1089
1090 break;
1091 case POWER_SUPPLY_TYPE_USB:
1092 /* AVG is used to indicate when we are
1093 * in CV mode */
1094 if (ret.intval)
1095 di->events.usb_cv_active = true;
1096 else
1097 di->events.usb_cv_active = false;
1098
1099 break;
1100 default:
1101 break;
1102 }
1103 break;
1104
1105 case POWER_SUPPLY_PROP_TECHNOLOGY:
1106 switch (ext->type) {
1107 case POWER_SUPPLY_TYPE_BATTERY:
1108 if (ret.intval)
1109 di->events.batt_unknown = false;
1110 else
1111 di->events.batt_unknown = true;
1112
1113 break;
1114 default:
1115 break;
1116 }
1117 break;
1118
1119 case POWER_SUPPLY_PROP_TEMP:
1120 di->batt_data.temp = ret.intval / 10;
1121 break;
1122
1123 case POWER_SUPPLY_PROP_CURRENT_NOW:
1124 switch (ext->type) {
1125 case POWER_SUPPLY_TYPE_MAINS:
1126 di->chg_info.ac_curr =
1127 ret.intval / 1000;
1128 break;
1129 case POWER_SUPPLY_TYPE_USB:
1130 di->chg_info.usb_curr =
1131 ret.intval / 1000;
1132 break;
1133 case POWER_SUPPLY_TYPE_BATTERY:
1134 di->batt_data.inst_curr = ret.intval / 1000;
1135 break;
1136 default:
1137 break;
1138 }
1139 break;
1140
1141 case POWER_SUPPLY_PROP_CURRENT_AVG:
1142 switch (ext->type) {
1143 case POWER_SUPPLY_TYPE_BATTERY:
1144 di->batt_data.avg_curr = ret.intval / 1000;
1145 break;
1146 case POWER_SUPPLY_TYPE_USB:
1147 if (ret.intval)
1148 di->events.vbus_collapsed = true;
1149 else
1150 di->events.vbus_collapsed = false;
1151 break;
1152 default:
1153 break;
1154 }
1155 break;
1156 case POWER_SUPPLY_PROP_CAPACITY:
1157 di->batt_data.percent = ret.intval;
1158 break;
1159 default:
1160 break;
1161 }
1162 }
1163 return 0;
1164}
1165
1166/**
1167 * abx500_chargalg_external_power_changed() - callback for power supply changes
1168 * @psy: pointer to the structure power_supply
1169 *
1170 * This function is the entry point of the pointer external_power_changed
1171 * of the structure power_supply.
1172 * This function gets executed when there is a change in any external power
1173 * supply that this driver needs to be notified of.
1174 */
1175static void abx500_chargalg_external_power_changed(struct power_supply *psy)
1176{
1177 struct abx500_chargalg *di = to_abx500_chargalg_device_info(psy);
1178
1179 /*
1180 * Trigger execution of the algorithm instantly and read
1181 * all power_supply properties there instead
1182 */
1183 queue_work(di->chargalg_wq, &di->chargalg_work);
1184}
1185
1186/**
1187 * abx500_chargalg_algorithm() - Main function for the algorithm
1188 * @di: pointer to the abx500_chargalg structure
1189 *
1190 * This is the main control function for the charging algorithm.
1191 * It is called periodically or when something happens that will
1192 * trigger a state change
1193 */
1194static void abx500_chargalg_algorithm(struct abx500_chargalg *di)
1195{
1196 int charger_status;
1197
1198 /* Collect data from all power_supply class devices */
1199 class_for_each_device(power_supply_class, NULL,
1200 &di->chargalg_psy, abx500_chargalg_get_ext_psy_data);
1201
1202 abx500_chargalg_end_of_charge(di);
1203 abx500_chargalg_check_temp(di);
1204 abx500_chargalg_check_charger_voltage(di);
1205
1206 charger_status = abx500_chargalg_check_charger_connection(di);
1207 /*
1208 * First check if we have a charger connected.
1209 * Also we don't allow charging of unknown batteries if configured
1210 * this way
1211 */
1212 if (!charger_status ||
1213 (di->events.batt_unknown && !di->bat->chg_unknown_bat)) {
1214 if (di->charge_state != STATE_HANDHELD) {
1215 di->events.safety_timer_expired = false;
1216 abx500_chargalg_state_to(di, STATE_HANDHELD_INIT);
1217 }
1218 }
1219
1220 /* If suspended, we should not continue checking the flags */
1221 else if (di->charge_state == STATE_SUSPENDED_INIT ||
1222 di->charge_state == STATE_SUSPENDED) {
1223 /* We don't do anything here, just don,t continue */
1224 }
1225
1226 /* Safety timer expiration */
1227 else if (di->events.safety_timer_expired) {
1228 if (di->charge_state != STATE_SAFETY_TIMER_EXPIRED)
1229 abx500_chargalg_state_to(di,
1230 STATE_SAFETY_TIMER_EXPIRED_INIT);
1231 }
1232 /*
1233 * Check if any interrupts has occured
1234 * that will prevent us from charging
1235 */
1236
1237 /* Battery removed */
1238 else if (di->events.batt_rem) {
1239 if (di->charge_state != STATE_BATT_REMOVED)
1240 abx500_chargalg_state_to(di, STATE_BATT_REMOVED_INIT);
1241 }
1242 /* Main or USB charger not ok. */
1243 else if (di->events.mainextchnotok || di->events.usbchargernotok) {
1244 /*
1245 * If vbus_collapsed is set, we have to lower the charger
1246 * current, which is done in the normal state below
1247 */
1248 if (di->charge_state != STATE_CHG_NOT_OK &&
1249 !di->events.vbus_collapsed)
1250 abx500_chargalg_state_to(di, STATE_CHG_NOT_OK_INIT);
1251 }
1252 /* VBUS, Main or VBAT OVV. */
1253 else if (di->events.vbus_ovv ||
1254 di->events.main_ovv ||
1255 di->events.batt_ovv ||
1256 !di->chg_info.usb_chg_ok ||
1257 !di->chg_info.ac_chg_ok) {
1258 if (di->charge_state != STATE_OVV_PROTECT)
1259 abx500_chargalg_state_to(di, STATE_OVV_PROTECT_INIT);
1260 }
1261 /* USB Thermal, stop charging */
1262 else if (di->events.main_thermal_prot ||
1263 di->events.usb_thermal_prot) {
1264 if (di->charge_state != STATE_HW_TEMP_PROTECT)
1265 abx500_chargalg_state_to(di,
1266 STATE_HW_TEMP_PROTECT_INIT);
1267 }
1268 /* Battery temp over/under */
1269 else if (di->events.btemp_underover) {
1270 if (di->charge_state != STATE_TEMP_UNDEROVER)
1271 abx500_chargalg_state_to(di,
1272 STATE_TEMP_UNDEROVER_INIT);
1273 }
1274 /* Watchdog expired */
1275 else if (di->events.ac_wd_expired ||
1276 di->events.usb_wd_expired) {
1277 if (di->charge_state != STATE_WD_EXPIRED)
1278 abx500_chargalg_state_to(di, STATE_WD_EXPIRED_INIT);
1279 }
1280 /* Battery temp high/low */
1281 else if (di->events.btemp_lowhigh) {
1282 if (di->charge_state != STATE_TEMP_LOWHIGH)
1283 abx500_chargalg_state_to(di, STATE_TEMP_LOWHIGH_INIT);
1284 }
1285
1286 dev_dbg(di->dev,
1287 "[CHARGALG] Vb %d Ib_avg %d Ib_inst %d Tb %d Cap %d Maint %d "
1288 "State %s Active_chg %d Chg_status %d AC %d USB %d "
1289 "AC_online %d USB_online %d AC_CV %d USB_CV %d AC_I %d "
1290 "USB_I %d AC_Vset %d AC_Iset %d USB_Vset %d USB_Iset %d\n",
1291 di->batt_data.volt,
1292 di->batt_data.avg_curr,
1293 di->batt_data.inst_curr,
1294 di->batt_data.temp,
1295 di->batt_data.percent,
1296 di->maintenance_chg,
1297 states[di->charge_state],
1298 di->chg_info.charger_type,
1299 di->charge_status,
1300 di->chg_info.conn_chg & AC_CHG,
1301 di->chg_info.conn_chg & USB_CHG,
1302 di->chg_info.online_chg & AC_CHG,
1303 di->chg_info.online_chg & USB_CHG,
1304 di->events.ac_cv_active,
1305 di->events.usb_cv_active,
1306 di->chg_info.ac_curr,
1307 di->chg_info.usb_curr,
1308 di->chg_info.ac_vset,
1309 di->chg_info.ac_iset,
1310 di->chg_info.usb_vset,
1311 di->chg_info.usb_iset);
1312
1313 switch (di->charge_state) {
1314 case STATE_HANDHELD_INIT:
1315 abx500_chargalg_stop_charging(di);
1316 di->charge_status = POWER_SUPPLY_STATUS_DISCHARGING;
1317 abx500_chargalg_state_to(di, STATE_HANDHELD);
1318 /* Intentional fallthrough */
1319
1320 case STATE_HANDHELD:
1321 break;
1322
1323 case STATE_SUSPENDED_INIT:
1324 if (di->susp_status.ac_suspended)
1325 abx500_chargalg_ac_en(di, false, 0, 0);
1326 if (di->susp_status.usb_suspended)
1327 abx500_chargalg_usb_en(di, false, 0, 0);
1328 abx500_chargalg_stop_safety_timer(di);
1329 abx500_chargalg_stop_maintenance_timer(di);
1330 di->charge_status = POWER_SUPPLY_STATUS_NOT_CHARGING;
1331 di->maintenance_chg = false;
1332 abx500_chargalg_state_to(di, STATE_SUSPENDED);
1333 power_supply_changed(&di->chargalg_psy);
1334 /* Intentional fallthrough */
1335
1336 case STATE_SUSPENDED:
1337 /* CHARGING is suspended */
1338 break;
1339
1340 case STATE_BATT_REMOVED_INIT:
1341 abx500_chargalg_stop_charging(di);
1342 abx500_chargalg_state_to(di, STATE_BATT_REMOVED);
1343 /* Intentional fallthrough */
1344
1345 case STATE_BATT_REMOVED:
1346 if (!di->events.batt_rem)
1347 abx500_chargalg_state_to(di, STATE_NORMAL_INIT);
1348 break;
1349
1350 case STATE_HW_TEMP_PROTECT_INIT:
1351 abx500_chargalg_stop_charging(di);
1352 abx500_chargalg_state_to(di, STATE_HW_TEMP_PROTECT);
1353 /* Intentional fallthrough */
1354
1355 case STATE_HW_TEMP_PROTECT:
1356 if (!di->events.main_thermal_prot &&
1357 !di->events.usb_thermal_prot)
1358 abx500_chargalg_state_to(di, STATE_NORMAL_INIT);
1359 break;
1360
1361 case STATE_OVV_PROTECT_INIT:
1362 abx500_chargalg_stop_charging(di);
1363 abx500_chargalg_state_to(di, STATE_OVV_PROTECT);
1364 /* Intentional fallthrough */
1365
1366 case STATE_OVV_PROTECT:
1367 if (!di->events.vbus_ovv &&
1368 !di->events.main_ovv &&
1369 !di->events.batt_ovv &&
1370 di->chg_info.usb_chg_ok &&
1371 di->chg_info.ac_chg_ok)
1372 abx500_chargalg_state_to(di, STATE_NORMAL_INIT);
1373 break;
1374
1375 case STATE_CHG_NOT_OK_INIT:
1376 abx500_chargalg_stop_charging(di);
1377 abx500_chargalg_state_to(di, STATE_CHG_NOT_OK);
1378 /* Intentional fallthrough */
1379
1380 case STATE_CHG_NOT_OK:
1381 if (!di->events.mainextchnotok &&
1382 !di->events.usbchargernotok)
1383 abx500_chargalg_state_to(di, STATE_NORMAL_INIT);
1384 break;
1385
1386 case STATE_SAFETY_TIMER_EXPIRED_INIT:
1387 abx500_chargalg_stop_charging(di);
1388 abx500_chargalg_state_to(di, STATE_SAFETY_TIMER_EXPIRED);
1389 /* Intentional fallthrough */
1390
1391 case STATE_SAFETY_TIMER_EXPIRED:
1392 /* We exit this state when charger is removed */
1393 break;
1394
1395 case STATE_NORMAL_INIT:
1396 abx500_chargalg_start_charging(di,
1397 di->bat->bat_type[di->bat->batt_id].normal_vol_lvl,
1398 di->bat->bat_type[di->bat->batt_id].normal_cur_lvl);
1399 abx500_chargalg_state_to(di, STATE_NORMAL);
1400 abx500_chargalg_start_safety_timer(di);
1401 abx500_chargalg_stop_maintenance_timer(di);
1402 init_maxim_chg_curr(di);
1403 di->charge_status = POWER_SUPPLY_STATUS_CHARGING;
1404 di->eoc_cnt = 0;
1405 di->maintenance_chg = false;
1406 power_supply_changed(&di->chargalg_psy);
1407
1408 break;
1409
1410 case STATE_NORMAL:
1411 handle_maxim_chg_curr(di);
1412 if (di->charge_status == POWER_SUPPLY_STATUS_FULL &&
1413 di->maintenance_chg) {
1414 if (di->bat->no_maintenance)
1415 abx500_chargalg_state_to(di,
1416 STATE_WAIT_FOR_RECHARGE_INIT);
1417 else
1418 abx500_chargalg_state_to(di,
1419 STATE_MAINTENANCE_A_INIT);
1420 }
1421 break;
1422
1423 /* This state will be used when the maintenance state is disabled */
1424 case STATE_WAIT_FOR_RECHARGE_INIT:
1425 abx500_chargalg_hold_charging(di);
1426 abx500_chargalg_state_to(di, STATE_WAIT_FOR_RECHARGE);
1427 di->rch_cnt = RCH_COND_CNT;
1428 /* Intentional fallthrough */
1429
1430 case STATE_WAIT_FOR_RECHARGE:
1431 if (di->batt_data.volt <=
1432 di->bat->bat_type[di->bat->batt_id].recharge_vol) {
1433 if (di->rch_cnt-- == 0)
1434 abx500_chargalg_state_to(di, STATE_NORMAL_INIT);
1435 } else
1436 di->rch_cnt = RCH_COND_CNT;
1437 break;
1438
1439 case STATE_MAINTENANCE_A_INIT:
1440 abx500_chargalg_stop_safety_timer(di);
1441 abx500_chargalg_start_maintenance_timer(di,
1442 di->bat->bat_type[
1443 di->bat->batt_id].maint_a_chg_timer_h);
1444 abx500_chargalg_start_charging(di,
1445 di->bat->bat_type[
1446 di->bat->batt_id].maint_a_vol_lvl,
1447 di->bat->bat_type[
1448 di->bat->batt_id].maint_a_cur_lvl);
1449 abx500_chargalg_state_to(di, STATE_MAINTENANCE_A);
1450 power_supply_changed(&di->chargalg_psy);
1451 /* Intentional fallthrough*/
1452
1453 case STATE_MAINTENANCE_A:
1454 if (di->events.maintenance_timer_expired) {
1455 abx500_chargalg_stop_maintenance_timer(di);
1456 abx500_chargalg_state_to(di, STATE_MAINTENANCE_B_INIT);
1457 }
1458 break;
1459
1460 case STATE_MAINTENANCE_B_INIT:
1461 abx500_chargalg_start_maintenance_timer(di,
1462 di->bat->bat_type[
1463 di->bat->batt_id].maint_b_chg_timer_h);
1464 abx500_chargalg_start_charging(di,
1465 di->bat->bat_type[
1466 di->bat->batt_id].maint_b_vol_lvl,
1467 di->bat->bat_type[
1468 di->bat->batt_id].maint_b_cur_lvl);
1469 abx500_chargalg_state_to(di, STATE_MAINTENANCE_B);
1470 power_supply_changed(&di->chargalg_psy);
1471 /* Intentional fallthrough*/
1472
1473 case STATE_MAINTENANCE_B:
1474 if (di->events.maintenance_timer_expired) {
1475 abx500_chargalg_stop_maintenance_timer(di);
1476 abx500_chargalg_state_to(di, STATE_NORMAL_INIT);
1477 }
1478 break;
1479
1480 case STATE_TEMP_LOWHIGH_INIT:
1481 abx500_chargalg_start_charging(di,
1482 di->bat->bat_type[
1483 di->bat->batt_id].low_high_vol_lvl,
1484 di->bat->bat_type[
1485 di->bat->batt_id].low_high_cur_lvl);
1486 abx500_chargalg_stop_maintenance_timer(di);
1487 di->charge_status = POWER_SUPPLY_STATUS_CHARGING;
1488 abx500_chargalg_state_to(di, STATE_TEMP_LOWHIGH);
1489 power_supply_changed(&di->chargalg_psy);
1490 /* Intentional fallthrough */
1491
1492 case STATE_TEMP_LOWHIGH:
1493 if (!di->events.btemp_lowhigh)
1494 abx500_chargalg_state_to(di, STATE_NORMAL_INIT);
1495 break;
1496
1497 case STATE_WD_EXPIRED_INIT:
1498 abx500_chargalg_stop_charging(di);
1499 abx500_chargalg_state_to(di, STATE_WD_EXPIRED);
1500 /* Intentional fallthrough */
1501
1502 case STATE_WD_EXPIRED:
1503 if (!di->events.ac_wd_expired &&
1504 !di->events.usb_wd_expired)
1505 abx500_chargalg_state_to(di, STATE_NORMAL_INIT);
1506 break;
1507
1508 case STATE_TEMP_UNDEROVER_INIT:
1509 abx500_chargalg_stop_charging(di);
1510 abx500_chargalg_state_to(di, STATE_TEMP_UNDEROVER);
1511 /* Intentional fallthrough */
1512
1513 case STATE_TEMP_UNDEROVER:
1514 if (!di->events.btemp_underover)
1515 abx500_chargalg_state_to(di, STATE_NORMAL_INIT);
1516 break;
1517 }
1518
1519 /* Start charging directly if the new state is a charge state */
1520 if (di->charge_state == STATE_NORMAL_INIT ||
1521 di->charge_state == STATE_MAINTENANCE_A_INIT ||
1522 di->charge_state == STATE_MAINTENANCE_B_INIT)
1523 queue_work(di->chargalg_wq, &di->chargalg_work);
1524}
1525
1526/**
1527 * abx500_chargalg_periodic_work() - Periodic work for the algorithm
1528 * @work: pointer to the work_struct structure
1529 *
1530 * Work queue function for the charging algorithm
1531 */
1532static void abx500_chargalg_periodic_work(struct work_struct *work)
1533{
1534 struct abx500_chargalg *di = container_of(work,
1535 struct abx500_chargalg, chargalg_periodic_work.work);
1536
1537 abx500_chargalg_algorithm(di);
1538
1539 /*
1540 * If a charger is connected then the battery has to be monitored
1541 * frequently, else the work can be delayed.
1542 */
1543 if (di->chg_info.conn_chg)
1544 queue_delayed_work(di->chargalg_wq,
1545 &di->chargalg_periodic_work,
1546 di->bat->interval_charging * HZ);
1547 else
1548 queue_delayed_work(di->chargalg_wq,
1549 &di->chargalg_periodic_work,
1550 di->bat->interval_not_charging * HZ);
1551}
1552
1553/**
1554 * abx500_chargalg_wd_work() - periodic work to kick the charger watchdog
1555 * @work: pointer to the work_struct structure
1556 *
1557 * Work queue function for kicking the charger watchdog
1558 */
1559static void abx500_chargalg_wd_work(struct work_struct *work)
1560{
1561 int ret;
1562 struct abx500_chargalg *di = container_of(work,
1563 struct abx500_chargalg, chargalg_wd_work.work);
1564
1565 dev_dbg(di->dev, "abx500_chargalg_wd_work\n");
1566
1567 ret = abx500_chargalg_kick_watchdog(di);
1568 if (ret < 0)
1569 dev_err(di->dev, "failed to kick watchdog\n");
1570
1571 queue_delayed_work(di->chargalg_wq,
1572 &di->chargalg_wd_work, CHG_WD_INTERVAL);
1573}
1574
1575/**
1576 * abx500_chargalg_work() - Work to run the charging algorithm instantly
1577 * @work: pointer to the work_struct structure
1578 *
1579 * Work queue function for calling the charging algorithm
1580 */
1581static void abx500_chargalg_work(struct work_struct *work)
1582{
1583 struct abx500_chargalg *di = container_of(work,
1584 struct abx500_chargalg, chargalg_work);
1585
1586 abx500_chargalg_algorithm(di);
1587}
1588
1589/**
1590 * abx500_chargalg_get_property() - get the chargalg properties
1591 * @psy: pointer to the power_supply structure
1592 * @psp: pointer to the power_supply_property structure
1593 * @val: pointer to the power_supply_propval union
1594 *
1595 * This function gets called when an application tries to get the
1596 * chargalg properties by reading the sysfs files.
1597 * status: charging/discharging/full/unknown
1598 * health: health of the battery
1599 * Returns error code in case of failure else 0 on success
1600 */
1601static int abx500_chargalg_get_property(struct power_supply *psy,
1602 enum power_supply_property psp,
1603 union power_supply_propval *val)
1604{
1605 struct abx500_chargalg *di;
1606
1607 di = to_abx500_chargalg_device_info(psy);
1608
1609 switch (psp) {
1610 case POWER_SUPPLY_PROP_STATUS:
1611 val->intval = di->charge_status;
1612 break;
1613 case POWER_SUPPLY_PROP_HEALTH:
1614 if (di->events.batt_ovv) {
1615 val->intval = POWER_SUPPLY_HEALTH_OVERVOLTAGE;
1616 } else if (di->events.btemp_underover) {
1617 if (di->batt_data.temp <= di->bat->temp_under)
1618 val->intval = POWER_SUPPLY_HEALTH_COLD;
1619 else
1620 val->intval = POWER_SUPPLY_HEALTH_OVERHEAT;
1621 } else {
1622 val->intval = POWER_SUPPLY_HEALTH_GOOD;
1623 }
1624 break;
1625 default:
1626 return -EINVAL;
1627 }
1628 return 0;
1629}
1630
1631/* Exposure to the sysfs interface */
1632
1633/**
1634 * abx500_chargalg_sysfs_charger() - sysfs store operations
1635 * @kobj: pointer to the struct kobject
1636 * @attr: pointer to the struct attribute
1637 * @buf: buffer that holds the parameter passed from userspace
1638 * @length: length of the parameter passed
1639 *
1640 * Returns length of the buffer(input taken from user space) on success
1641 * else error code on failure
1642 * The operation to be performed on passing the parameters from the user space.
1643 */
1644static ssize_t abx500_chargalg_sysfs_charger(struct kobject *kobj,
1645 struct attribute *attr, const char *buf, size_t length)
1646{
1647 struct abx500_chargalg *di = container_of(kobj,
1648 struct abx500_chargalg, chargalg_kobject);
1649 long int param;
1650 int ac_usb;
1651 int ret;
1652 char entry = *attr->name;
1653
1654 switch (entry) {
1655 case 'c':
1656 ret = strict_strtol(buf, 10, &param);
1657 if (ret < 0)
1658 return ret;
1659
1660 ac_usb = param;
1661 switch (ac_usb) {
1662 case 0:
1663 /* Disable charging */
1664 di->susp_status.ac_suspended = true;
1665 di->susp_status.usb_suspended = true;
1666 di->susp_status.suspended_change = true;
1667 /* Trigger a state change */
1668 queue_work(di->chargalg_wq,
1669 &di->chargalg_work);
1670 break;
1671 case 1:
1672 /* Enable AC Charging */
1673 di->susp_status.ac_suspended = false;
1674 di->susp_status.suspended_change = true;
1675 /* Trigger a state change */
1676 queue_work(di->chargalg_wq,
1677 &di->chargalg_work);
1678 break;
1679 case 2:
1680 /* Enable USB charging */
1681 di->susp_status.usb_suspended = false;
1682 di->susp_status.suspended_change = true;
1683 /* Trigger a state change */
1684 queue_work(di->chargalg_wq,
1685 &di->chargalg_work);
1686 break;
1687 default:
1688 dev_info(di->dev, "Wrong input\n"
1689 "Enter 0. Disable AC/USB Charging\n"
1690 "1. Enable AC charging\n"
1691 "2. Enable USB Charging\n");
1692 };
1693 break;
1694 };
1695 return strlen(buf);
1696}
1697
1698static struct attribute abx500_chargalg_en_charger = \
1699{
1700 .name = "chargalg",
1701 .mode = S_IWUGO,
1702};
1703
1704static struct attribute *abx500_chargalg_chg[] = {
1705 &abx500_chargalg_en_charger,
1706 NULL
1707};
1708
1709static const struct sysfs_ops abx500_chargalg_sysfs_ops = {
1710 .store = abx500_chargalg_sysfs_charger,
1711};
1712
1713static struct kobj_type abx500_chargalg_ktype = {
1714 .sysfs_ops = &abx500_chargalg_sysfs_ops,
1715 .default_attrs = abx500_chargalg_chg,
1716};
1717
1718/**
1719 * abx500_chargalg_sysfs_exit() - de-init of sysfs entry
1720 * @di: pointer to the struct abx500_chargalg
1721 *
1722 * This function removes the entry in sysfs.
1723 */
1724static void abx500_chargalg_sysfs_exit(struct abx500_chargalg *di)
1725{
1726 kobject_del(&di->chargalg_kobject);
1727}
1728
1729/**
1730 * abx500_chargalg_sysfs_init() - init of sysfs entry
1731 * @di: pointer to the struct abx500_chargalg
1732 *
1733 * This function adds an entry in sysfs.
1734 * Returns error code in case of failure else 0(on success)
1735 */
1736static int abx500_chargalg_sysfs_init(struct abx500_chargalg *di)
1737{
1738 int ret = 0;
1739
1740 ret = kobject_init_and_add(&di->chargalg_kobject,
1741 &abx500_chargalg_ktype,
1742 NULL, "abx500_chargalg");
1743 if (ret < 0)
1744 dev_err(di->dev, "failed to create sysfs entry\n");
1745
1746 return ret;
1747}
1748/* Exposure to the sysfs interface <<END>> */
1749
1750#if defined(CONFIG_PM)
1751static int abx500_chargalg_resume(struct platform_device *pdev)
1752{
1753 struct abx500_chargalg *di = platform_get_drvdata(pdev);
1754
1755 /* Kick charger watchdog if charging (any charger online) */
1756 if (di->chg_info.online_chg)
1757 queue_delayed_work(di->chargalg_wq, &di->chargalg_wd_work, 0);
1758
1759 /*
1760 * Run the charging algorithm directly to be sure we don't
1761 * do it too seldom
1762 */
1763 queue_delayed_work(di->chargalg_wq, &di->chargalg_periodic_work, 0);
1764
1765 return 0;
1766}
1767
1768static int abx500_chargalg_suspend(struct platform_device *pdev,
1769 pm_message_t state)
1770{
1771 struct abx500_chargalg *di = platform_get_drvdata(pdev);
1772
1773 if (di->chg_info.online_chg)
1774 cancel_delayed_work_sync(&di->chargalg_wd_work);
1775
1776 cancel_delayed_work_sync(&di->chargalg_periodic_work);
1777
1778 return 0;
1779}
1780#else
1781#define abx500_chargalg_suspend NULL
1782#define abx500_chargalg_resume NULL
1783#endif
1784
1785static int __devexit abx500_chargalg_remove(struct platform_device *pdev)
1786{
1787 struct abx500_chargalg *di = platform_get_drvdata(pdev);
1788
1789 /* sysfs interface to enable/disbale charging from user space */
1790 abx500_chargalg_sysfs_exit(di);
1791
1792 /* Delete the work queue */
1793 destroy_workqueue(di->chargalg_wq);
1794
1795 flush_scheduled_work();
1796 power_supply_unregister(&di->chargalg_psy);
1797 platform_set_drvdata(pdev, NULL);
1798 kfree(di);
1799
1800 return 0;
1801}
1802
1803static int __devinit abx500_chargalg_probe(struct platform_device *pdev)
1804{
1805 struct abx500_bm_plat_data *plat_data;
1806 int ret = 0;
1807
1808 struct abx500_chargalg *di =
1809 kzalloc(sizeof(struct abx500_chargalg), GFP_KERNEL);
1810 if (!di)
1811 return -ENOMEM;
1812
1813 /* get device struct */
1814 di->dev = &pdev->dev;
1815
1816 plat_data = pdev->dev.platform_data;
1817 di->pdata = plat_data->chargalg;
1818 di->bat = plat_data->battery;
1819
1820 /* chargalg supply */
1821 di->chargalg_psy.name = "abx500_chargalg";
1822 di->chargalg_psy.type = POWER_SUPPLY_TYPE_BATTERY;
1823 di->chargalg_psy.properties = abx500_chargalg_props;
1824 di->chargalg_psy.num_properties = ARRAY_SIZE(abx500_chargalg_props);
1825 di->chargalg_psy.get_property = abx500_chargalg_get_property;
1826 di->chargalg_psy.supplied_to = di->pdata->supplied_to;
1827 di->chargalg_psy.num_supplicants = di->pdata->num_supplicants;
1828 di->chargalg_psy.external_power_changed =
1829 abx500_chargalg_external_power_changed;
1830
1831 /* Initilialize safety timer */
1832 init_timer(&di->safety_timer);
1833 di->safety_timer.function = abx500_chargalg_safety_timer_expired;
1834 di->safety_timer.data = (unsigned long) di;
1835
1836 /* Initilialize maintenance timer */
1837 init_timer(&di->maintenance_timer);
1838 di->maintenance_timer.function =
1839 abx500_chargalg_maintenance_timer_expired;
1840 di->maintenance_timer.data = (unsigned long) di;
1841
1842 /* Create a work queue for the chargalg */
1843 di->chargalg_wq =
1844 create_singlethread_workqueue("abx500_chargalg_wq");
1845 if (di->chargalg_wq == NULL) {
1846 dev_err(di->dev, "failed to create work queue\n");
1847 goto free_device_info;
1848 }
1849
1850 /* Init work for chargalg */
1851 INIT_DELAYED_WORK_DEFERRABLE(&di->chargalg_periodic_work,
1852 abx500_chargalg_periodic_work);
1853 INIT_DELAYED_WORK_DEFERRABLE(&di->chargalg_wd_work,
1854 abx500_chargalg_wd_work);
1855
1856 /* Init work for chargalg */
1857 INIT_WORK(&di->chargalg_work, abx500_chargalg_work);
1858
1859 /* To detect charger at startup */
1860 di->chg_info.prev_conn_chg = -1;
1861
1862 /* Register chargalg power supply class */
1863 ret = power_supply_register(di->dev, &di->chargalg_psy);
1864 if (ret) {
1865 dev_err(di->dev, "failed to register chargalg psy\n");
1866 goto free_chargalg_wq;
1867 }
1868
1869 platform_set_drvdata(pdev, di);
1870
1871 /* sysfs interface to enable/disable charging from user space */
1872 ret = abx500_chargalg_sysfs_init(di);
1873 if (ret) {
1874 dev_err(di->dev, "failed to create sysfs entry\n");
1875 goto free_psy;
1876 }
1877
1878 /* Run the charging algorithm */
1879 queue_delayed_work(di->chargalg_wq, &di->chargalg_periodic_work, 0);
1880
1881 dev_info(di->dev, "probe success\n");
1882 return ret;
1883
1884free_psy:
1885 power_supply_unregister(&di->chargalg_psy);
1886free_chargalg_wq:
1887 destroy_workqueue(di->chargalg_wq);
1888free_device_info:
1889 kfree(di);
1890
1891 return ret;
1892}
1893
1894static struct platform_driver abx500_chargalg_driver = {
1895 .probe = abx500_chargalg_probe,
1896 .remove = __devexit_p(abx500_chargalg_remove),
1897 .suspend = abx500_chargalg_suspend,
1898 .resume = abx500_chargalg_resume,
1899 .driver = {
1900 .name = "abx500-chargalg",
1901 .owner = THIS_MODULE,
1902 },
1903};
1904
1905static int __init abx500_chargalg_init(void)
1906{
1907 return platform_driver_register(&abx500_chargalg_driver);
1908}
1909
1910static void __exit abx500_chargalg_exit(void)
1911{
1912 platform_driver_unregister(&abx500_chargalg_driver);
1913}
1914
1915module_init(abx500_chargalg_init);
1916module_exit(abx500_chargalg_exit);
1917
1918MODULE_LICENSE("GPL v2");
1919MODULE_AUTHOR("Johan Palsson, Karl Komierowski");
1920MODULE_ALIAS("platform:abx500-chargalg");
1921MODULE_DESCRIPTION("abx500 battery charging algorithm");
diff --git a/drivers/power/charger-manager.c b/drivers/power/charger-manager.c
index 88fd9710bda2..9eca9f1ff0ea 100644
--- a/drivers/power/charger-manager.c
+++ b/drivers/power/charger-manager.c
@@ -134,12 +134,11 @@ static int get_batt_uV(struct charger_manager *cm, int *uV)
134 union power_supply_propval val; 134 union power_supply_propval val;
135 int ret; 135 int ret;
136 136
137 if (cm->fuel_gauge) 137 if (!cm->fuel_gauge)
138 ret = cm->fuel_gauge->get_property(cm->fuel_gauge,
139 POWER_SUPPLY_PROP_VOLTAGE_NOW, &val);
140 else
141 return -ENODEV; 138 return -ENODEV;
142 139
140 ret = cm->fuel_gauge->get_property(cm->fuel_gauge,
141 POWER_SUPPLY_PROP_VOLTAGE_NOW, &val);
143 if (ret) 142 if (ret)
144 return ret; 143 return ret;
145 144
@@ -245,9 +244,7 @@ static int try_charger_enable(struct charger_manager *cm, bool enable)
245 struct charger_desc *desc = cm->desc; 244 struct charger_desc *desc = cm->desc;
246 245
247 /* Ignore if it's redundent command */ 246 /* Ignore if it's redundent command */
248 if (enable && cm->charger_enabled) 247 if (enable == cm->charger_enabled)
249 return 0;
250 if (!enable && !cm->charger_enabled)
251 return 0; 248 return 0;
252 249
253 if (enable) { 250 if (enable) {
@@ -309,9 +306,7 @@ static void uevent_notify(struct charger_manager *cm, const char *event)
309 306
310 if (!strncmp(env_str_save, event, UEVENT_BUF_SIZE)) 307 if (!strncmp(env_str_save, event, UEVENT_BUF_SIZE))
311 return; /* Duplicated. */ 308 return; /* Duplicated. */
312 else 309 strncpy(env_str_save, event, UEVENT_BUF_SIZE);
313 strncpy(env_str_save, event, UEVENT_BUF_SIZE);
314
315 return; 310 return;
316 } 311 }
317 312
@@ -387,8 +382,10 @@ static bool cm_monitor(void)
387 382
388 mutex_lock(&cm_list_mtx); 383 mutex_lock(&cm_list_mtx);
389 384
390 list_for_each_entry(cm, &cm_list, entry) 385 list_for_each_entry(cm, &cm_list, entry) {
391 stop = stop || _cm_monitor(cm); 386 if (_cm_monitor(cm))
387 stop = true;
388 }
392 389
393 mutex_unlock(&cm_list_mtx); 390 mutex_unlock(&cm_list_mtx);
394 391
@@ -402,7 +399,8 @@ static int charger_get_property(struct power_supply *psy,
402 struct charger_manager *cm = container_of(psy, 399 struct charger_manager *cm = container_of(psy,
403 struct charger_manager, charger_psy); 400 struct charger_manager, charger_psy);
404 struct charger_desc *desc = cm->desc; 401 struct charger_desc *desc = cm->desc;
405 int i, ret = 0, uV; 402 int ret = 0;
403 int uV;
406 404
407 switch (psp) { 405 switch (psp) {
408 case POWER_SUPPLY_PROP_STATUS: 406 case POWER_SUPPLY_PROP_STATUS:
@@ -428,8 +426,7 @@ static int charger_get_property(struct power_supply *psy,
428 val->intval = 0; 426 val->intval = 0;
429 break; 427 break;
430 case POWER_SUPPLY_PROP_VOLTAGE_NOW: 428 case POWER_SUPPLY_PROP_VOLTAGE_NOW:
431 ret = get_batt_uV(cm, &i); 429 ret = get_batt_uV(cm, &val->intval);
432 val->intval = i;
433 break; 430 break;
434 case POWER_SUPPLY_PROP_CURRENT_NOW: 431 case POWER_SUPPLY_PROP_CURRENT_NOW:
435 ret = cm->fuel_gauge->get_property(cm->fuel_gauge, 432 ret = cm->fuel_gauge->get_property(cm->fuel_gauge,
@@ -697,8 +694,10 @@ bool cm_suspend_again(void)
697 mutex_lock(&cm_list_mtx); 694 mutex_lock(&cm_list_mtx);
698 list_for_each_entry(cm, &cm_list, entry) { 695 list_for_each_entry(cm, &cm_list, entry) {
699 if (cm->status_save_ext_pwr_inserted != is_ext_pwr_online(cm) || 696 if (cm->status_save_ext_pwr_inserted != is_ext_pwr_online(cm) ||
700 cm->status_save_batt != is_batt_present(cm)) 697 cm->status_save_batt != is_batt_present(cm)) {
701 ret = false; 698 ret = false;
699 break;
700 }
702 } 701 }
703 mutex_unlock(&cm_list_mtx); 702 mutex_unlock(&cm_list_mtx);
704 703
@@ -855,11 +854,10 @@ static int charger_manager_probe(struct platform_device *pdev)
855 854
856 platform_set_drvdata(pdev, cm); 855 platform_set_drvdata(pdev, cm);
857 856
858 memcpy(&cm->charger_psy, &psy_default, 857 memcpy(&cm->charger_psy, &psy_default, sizeof(psy_default));
859 sizeof(psy_default)); 858
860 if (!desc->psy_name) { 859 if (!desc->psy_name) {
861 strncpy(cm->psy_name_buf, psy_default.name, 860 strncpy(cm->psy_name_buf, psy_default.name, PSY_NAME_MAX);
862 PSY_NAME_MAX);
863 } else { 861 } else {
864 strncpy(cm->psy_name_buf, desc->psy_name, PSY_NAME_MAX); 862 strncpy(cm->psy_name_buf, desc->psy_name, PSY_NAME_MAX);
865 } 863 }
@@ -894,15 +892,15 @@ static int charger_manager_probe(struct platform_device *pdev)
894 POWER_SUPPLY_PROP_CURRENT_NOW; 892 POWER_SUPPLY_PROP_CURRENT_NOW;
895 cm->charger_psy.num_properties++; 893 cm->charger_psy.num_properties++;
896 } 894 }
897 if (!desc->measure_battery_temp) { 895
898 cm->charger_psy.properties[cm->charger_psy.num_properties] =
899 POWER_SUPPLY_PROP_TEMP_AMBIENT;
900 cm->charger_psy.num_properties++;
901 }
902 if (desc->measure_battery_temp) { 896 if (desc->measure_battery_temp) {
903 cm->charger_psy.properties[cm->charger_psy.num_properties] = 897 cm->charger_psy.properties[cm->charger_psy.num_properties] =
904 POWER_SUPPLY_PROP_TEMP; 898 POWER_SUPPLY_PROP_TEMP;
905 cm->charger_psy.num_properties++; 899 cm->charger_psy.num_properties++;
900 } else {
901 cm->charger_psy.properties[cm->charger_psy.num_properties] =
902 POWER_SUPPLY_PROP_TEMP_AMBIENT;
903 cm->charger_psy.num_properties++;
906 } 904 }
907 905
908 ret = power_supply_register(NULL, &cm->charger_psy); 906 ret = power_supply_register(NULL, &cm->charger_psy);
@@ -933,9 +931,8 @@ static int charger_manager_probe(struct platform_device *pdev)
933 return 0; 931 return 0;
934 932
935err_chg_enable: 933err_chg_enable:
936 if (desc->charger_regulators) 934 regulator_bulk_free(desc->num_charger_regulators,
937 regulator_bulk_free(desc->num_charger_regulators, 935 desc->charger_regulators);
938 desc->charger_regulators);
939err_bulk_get: 936err_bulk_get:
940 power_supply_unregister(&cm->charger_psy); 937 power_supply_unregister(&cm->charger_psy);
941err_register: 938err_register:
@@ -961,10 +958,8 @@ static int __devexit charger_manager_remove(struct platform_device *pdev)
961 list_del(&cm->entry); 958 list_del(&cm->entry);
962 mutex_unlock(&cm_list_mtx); 959 mutex_unlock(&cm_list_mtx);
963 960
964 if (desc->charger_regulators) 961 regulator_bulk_free(desc->num_charger_regulators,
965 regulator_bulk_free(desc->num_charger_regulators, 962 desc->charger_regulators);
966 desc->charger_regulators);
967
968 power_supply_unregister(&cm->charger_psy); 963 power_supply_unregister(&cm->charger_psy);
969 kfree(cm->charger_psy.properties); 964 kfree(cm->charger_psy.properties);
970 kfree(cm->charger_stat); 965 kfree(cm->charger_stat);
@@ -982,9 +977,7 @@ MODULE_DEVICE_TABLE(platform, charger_manager_id);
982 977
983static int cm_suspend_prepare(struct device *dev) 978static int cm_suspend_prepare(struct device *dev)
984{ 979{
985 struct platform_device *pdev = container_of(dev, struct platform_device, 980 struct charger_manager *cm = dev_get_drvdata(dev);
986 dev);
987 struct charger_manager *cm = platform_get_drvdata(pdev);
988 981
989 if (!cm_suspended) { 982 if (!cm_suspended) {
990 if (rtc_dev) { 983 if (rtc_dev) {
@@ -1020,9 +1013,7 @@ static int cm_suspend_prepare(struct device *dev)
1020 1013
1021static void cm_suspend_complete(struct device *dev) 1014static void cm_suspend_complete(struct device *dev)
1022{ 1015{
1023 struct platform_device *pdev = container_of(dev, struct platform_device, 1016 struct charger_manager *cm = dev_get_drvdata(dev);
1024 dev);
1025 struct charger_manager *cm = platform_get_drvdata(pdev);
1026 1017
1027 if (cm_suspended) { 1018 if (cm_suspended) {
1028 if (rtc_dev) { 1019 if (rtc_dev) {
diff --git a/drivers/power/da9052-battery.c b/drivers/power/da9052-battery.c
index e8ea47a53dee..a5f6a0ec1572 100644
--- a/drivers/power/da9052-battery.c
+++ b/drivers/power/da9052-battery.c
@@ -612,6 +612,7 @@ static s32 __devinit da9052_bat_probe(struct platform_device *pdev)
612 if (ret) 612 if (ret)
613 goto err; 613 goto err;
614 614
615 platform_set_drvdata(pdev, bat);
615 return 0; 616 return 0;
616 617
617err: 618err:
@@ -633,6 +634,7 @@ static int __devexit da9052_bat_remove(struct platform_device *pdev)
633 free_irq(bat->da9052->irq_base + irq, bat); 634 free_irq(bat->da9052->irq_base + irq, bat);
634 } 635 }
635 power_supply_unregister(&bat->psy); 636 power_supply_unregister(&bat->psy);
637 kfree(bat);
636 638
637 return 0; 639 return 0;
638} 640}
@@ -645,18 +647,7 @@ static struct platform_driver da9052_bat_driver = {
645 .owner = THIS_MODULE, 647 .owner = THIS_MODULE,
646 }, 648 },
647}; 649};
648 650module_platform_driver(da9052_bat_driver);
649static int __init da9052_bat_init(void)
650{
651 return platform_driver_register(&da9052_bat_driver);
652}
653module_init(da9052_bat_init);
654
655static void __exit da9052_bat_exit(void)
656{
657 platform_driver_unregister(&da9052_bat_driver);
658}
659module_exit(da9052_bat_exit);
660 651
661MODULE_DESCRIPTION("DA9052 BAT Device Driver"); 652MODULE_DESCRIPTION("DA9052 BAT Device Driver");
662MODULE_AUTHOR("David Dajun Chen <dchen@diasemi.com>"); 653MODULE_AUTHOR("David Dajun Chen <dchen@diasemi.com>");
diff --git a/drivers/power/ds2782_battery.c b/drivers/power/ds2782_battery.c
index bfbce5de49da..6bb6e2f5ea81 100644
--- a/drivers/power/ds2782_battery.c
+++ b/drivers/power/ds2782_battery.c
@@ -403,18 +403,7 @@ static struct i2c_driver ds278x_battery_driver = {
403 .remove = ds278x_battery_remove, 403 .remove = ds278x_battery_remove,
404 .id_table = ds278x_id, 404 .id_table = ds278x_id,
405}; 405};
406 406module_i2c_driver(ds278x_battery_driver);
407static int __init ds278x_init(void)
408{
409 return i2c_add_driver(&ds278x_battery_driver);
410}
411module_init(ds278x_init);
412
413static void __exit ds278x_exit(void)
414{
415 i2c_del_driver(&ds278x_battery_driver);
416}
417module_exit(ds278x_exit);
418 407
419MODULE_AUTHOR("Ryan Mallon"); 408MODULE_AUTHOR("Ryan Mallon");
420MODULE_DESCRIPTION("Maxim/Dallas DS2782 Stand-Alone Fuel Gauage IC driver"); 409MODULE_DESCRIPTION("Maxim/Dallas DS2782 Stand-Alone Fuel Gauage IC driver");
diff --git a/drivers/power/isp1704_charger.c b/drivers/power/isp1704_charger.c
index 1289a5f790a1..39eb50f35f09 100644
--- a/drivers/power/isp1704_charger.c
+++ b/drivers/power/isp1704_charger.c
@@ -480,6 +480,7 @@ fail0:
480 480
481 dev_err(&pdev->dev, "failed to register isp1704 with error %d\n", ret); 481 dev_err(&pdev->dev, "failed to register isp1704 with error %d\n", ret);
482 482
483 isp1704_charger_set_power(isp, 0);
483 return ret; 484 return ret;
484} 485}
485 486
diff --git a/drivers/power/lp8727_charger.c b/drivers/power/lp8727_charger.c
index c53dd1292f81..d8b75780bfef 100644
--- a/drivers/power/lp8727_charger.c
+++ b/drivers/power/lp8727_charger.c
@@ -1,6 +1,7 @@
1/* 1/*
2 * Driver for LP8727 Micro/Mini USB IC with intergrated charger 2 * Driver for LP8727 Micro/Mini USB IC with integrated charger
3 * 3 *
4 * Copyright (C) 2011 Texas Instruments
4 * Copyright (C) 2011 National Semiconductor 5 * Copyright (C) 2011 National Semiconductor
5 * 6 *
6 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
@@ -25,7 +26,7 @@
25#define INT1 0x4 26#define INT1 0x4
26#define INT2 0x5 27#define INT2 0x5
27#define STATUS1 0x6 28#define STATUS1 0x6
28#define STATUS2 0x7 29#define STATUS2 0x7
29#define CHGCTRL2 0x9 30#define CHGCTRL2 0x9
30 31
31/* CTRL1 register */ 32/* CTRL1 register */
@@ -91,7 +92,7 @@ struct lp8727_chg {
91 enum lp8727_dev_id devid; 92 enum lp8727_dev_id devid;
92}; 93};
93 94
94static int lp8727_i2c_read(struct lp8727_chg *pchg, u8 reg, u8 *data, u8 len) 95static int lp8727_read_bytes(struct lp8727_chg *pchg, u8 reg, u8 *data, u8 len)
95{ 96{
96 s32 ret; 97 s32 ret;
97 98
@@ -102,29 +103,22 @@ static int lp8727_i2c_read(struct lp8727_chg *pchg, u8 reg, u8 *data, u8 len)
102 return (ret != len) ? -EIO : 0; 103 return (ret != len) ? -EIO : 0;
103} 104}
104 105
105static int lp8727_i2c_write(struct lp8727_chg *pchg, u8 reg, u8 *data, u8 len) 106static inline int lp8727_read_byte(struct lp8727_chg *pchg, u8 reg, u8 *data)
106{ 107{
107 s32 ret; 108 return lp8727_read_bytes(pchg, reg, data, 1);
109}
110
111static int lp8727_write_byte(struct lp8727_chg *pchg, u8 reg, u8 data)
112{
113 int ret;
108 114
109 mutex_lock(&pchg->xfer_lock); 115 mutex_lock(&pchg->xfer_lock);
110 ret = i2c_smbus_write_i2c_block_data(pchg->client, reg, len, data); 116 ret = i2c_smbus_write_byte_data(pchg->client, reg, data);
111 mutex_unlock(&pchg->xfer_lock); 117 mutex_unlock(&pchg->xfer_lock);
112 118
113 return ret; 119 return ret;
114} 120}
115 121
116static inline int lp8727_i2c_read_byte(struct lp8727_chg *pchg, u8 reg,
117 u8 *data)
118{
119 return lp8727_i2c_read(pchg, reg, data, 1);
120}
121
122static inline int lp8727_i2c_write_byte(struct lp8727_chg *pchg, u8 reg,
123 u8 *data)
124{
125 return lp8727_i2c_write(pchg, reg, data, 1);
126}
127
128static int lp8727_is_charger_attached(const char *name, int id) 122static int lp8727_is_charger_attached(const char *name, int id)
129{ 123{
130 if (name) { 124 if (name) {
@@ -137,37 +131,41 @@ static int lp8727_is_charger_attached(const char *name, int id)
137 return (id >= ID_TA && id <= ID_USB_CHG) ? 1 : 0; 131 return (id >= ID_TA && id <= ID_USB_CHG) ? 1 : 0;
138} 132}
139 133
140static void lp8727_init_device(struct lp8727_chg *pchg) 134static int lp8727_init_device(struct lp8727_chg *pchg)
141{ 135{
142 u8 val; 136 u8 val;
137 int ret;
143 138
144 val = ID200_EN | ADC_EN | CP_EN; 139 val = ID200_EN | ADC_EN | CP_EN;
145 if (lp8727_i2c_write_byte(pchg, CTRL1, &val)) 140 ret = lp8727_write_byte(pchg, CTRL1, val);
146 dev_err(pchg->dev, "i2c write err : addr=0x%.2x\n", CTRL1); 141 if (ret)
142 return ret;
147 143
148 val = INT_EN | CHGDET_EN; 144 val = INT_EN | CHGDET_EN;
149 if (lp8727_i2c_write_byte(pchg, CTRL2, &val)) 145 ret = lp8727_write_byte(pchg, CTRL2, val);
150 dev_err(pchg->dev, "i2c write err : addr=0x%.2x\n", CTRL2); 146 if (ret)
147 return ret;
148
149 return 0;
151} 150}
152 151
153static int lp8727_is_dedicated_charger(struct lp8727_chg *pchg) 152static int lp8727_is_dedicated_charger(struct lp8727_chg *pchg)
154{ 153{
155 u8 val; 154 u8 val;
156 lp8727_i2c_read_byte(pchg, STATUS1, &val); 155 lp8727_read_byte(pchg, STATUS1, &val);
157 return (val & DCPORT); 156 return val & DCPORT;
158} 157}
159 158
160static int lp8727_is_usb_charger(struct lp8727_chg *pchg) 159static int lp8727_is_usb_charger(struct lp8727_chg *pchg)
161{ 160{
162 u8 val; 161 u8 val;
163 lp8727_i2c_read_byte(pchg, STATUS1, &val); 162 lp8727_read_byte(pchg, STATUS1, &val);
164 return (val & CHPORT); 163 return val & CHPORT;
165} 164}
166 165
167static void lp8727_ctrl_switch(struct lp8727_chg *pchg, u8 sw) 166static void lp8727_ctrl_switch(struct lp8727_chg *pchg, u8 sw)
168{ 167{
169 u8 val = sw; 168 lp8727_write_byte(pchg, SWCTRL, sw);
170 lp8727_i2c_write_byte(pchg, SWCTRL, &val);
171} 169}
172 170
173static void lp8727_id_detection(struct lp8727_chg *pchg, u8 id, int vbusin) 171static void lp8727_id_detection(struct lp8727_chg *pchg, u8 id, int vbusin)
@@ -207,9 +205,9 @@ static void lp8727_enable_chgdet(struct lp8727_chg *pchg)
207{ 205{
208 u8 val; 206 u8 val;
209 207
210 lp8727_i2c_read_byte(pchg, CTRL2, &val); 208 lp8727_read_byte(pchg, CTRL2, &val);
211 val |= CHGDET_EN; 209 val |= CHGDET_EN;
212 lp8727_i2c_write_byte(pchg, CTRL2, &val); 210 lp8727_write_byte(pchg, CTRL2, val);
213} 211}
214 212
215static void lp8727_delayed_func(struct work_struct *_work) 213static void lp8727_delayed_func(struct work_struct *_work)
@@ -218,7 +216,7 @@ static void lp8727_delayed_func(struct work_struct *_work)
218 struct lp8727_chg *pchg = 216 struct lp8727_chg *pchg =
219 container_of(_work, struct lp8727_chg, work.work); 217 container_of(_work, struct lp8727_chg, work.work);
220 218
221 if (lp8727_i2c_read(pchg, INT1, intstat, 2)) { 219 if (lp8727_read_bytes(pchg, INT1, intstat, 2)) {
222 dev_err(pchg->dev, "can not read INT registers\n"); 220 dev_err(pchg->dev, "can not read INT registers\n");
223 return; 221 return;
224 } 222 }
@@ -244,20 +242,22 @@ static irqreturn_t lp8727_isr_func(int irq, void *ptr)
244 return IRQ_HANDLED; 242 return IRQ_HANDLED;
245} 243}
246 244
247static void lp8727_intr_config(struct lp8727_chg *pchg) 245static int lp8727_intr_config(struct lp8727_chg *pchg)
248{ 246{
249 INIT_DELAYED_WORK(&pchg->work, lp8727_delayed_func); 247 INIT_DELAYED_WORK(&pchg->work, lp8727_delayed_func);
250 248
251 pchg->irqthread = create_singlethread_workqueue("lp8727-irqthd"); 249 pchg->irqthread = create_singlethread_workqueue("lp8727-irqthd");
252 if (!pchg->irqthread) 250 if (!pchg->irqthread) {
253 dev_err(pchg->dev, "can not create thread for lp8727\n"); 251 dev_err(pchg->dev, "can not create thread for lp8727\n");
254 252 return -ENOMEM;
255 if (request_threaded_irq(pchg->client->irq,
256 NULL,
257 lp8727_isr_func,
258 IRQF_TRIGGER_FALLING, "lp8727_irq", pchg)) {
259 dev_err(pchg->dev, "lp8727 irq can not be registered\n");
260 } 253 }
254
255 return request_threaded_irq(pchg->client->irq,
256 NULL,
257 lp8727_isr_func,
258 IRQF_TRIGGER_FALLING,
259 "lp8727_irq",
260 pchg);
261} 261}
262 262
263static enum power_supply_property lp8727_charger_prop[] = { 263static enum power_supply_property lp8727_charger_prop[] = {
@@ -300,7 +300,7 @@ static int lp8727_battery_get_property(struct power_supply *psy,
300 switch (psp) { 300 switch (psp) {
301 case POWER_SUPPLY_PROP_STATUS: 301 case POWER_SUPPLY_PROP_STATUS:
302 if (lp8727_is_charger_attached(psy->name, pchg->devid)) { 302 if (lp8727_is_charger_attached(psy->name, pchg->devid)) {
303 lp8727_i2c_read_byte(pchg, STATUS1, &read); 303 lp8727_read_byte(pchg, STATUS1, &read);
304 if (((read & CHGSTAT) >> 4) == EOC) 304 if (((read & CHGSTAT) >> 4) == EOC)
305 val->intval = POWER_SUPPLY_STATUS_FULL; 305 val->intval = POWER_SUPPLY_STATUS_FULL;
306 else 306 else
@@ -310,7 +310,7 @@ static int lp8727_battery_get_property(struct power_supply *psy,
310 } 310 }
311 break; 311 break;
312 case POWER_SUPPLY_PROP_HEALTH: 312 case POWER_SUPPLY_PROP_HEALTH:
313 lp8727_i2c_read_byte(pchg, STATUS2, &read); 313 lp8727_read_byte(pchg, STATUS2, &read);
314 read = (read & TEMP_STAT) >> 5; 314 read = (read & TEMP_STAT) >> 5;
315 if (read >= 0x1 && read <= 0x3) 315 if (read >= 0x1 && read <= 0x3)
316 val->intval = POWER_SUPPLY_HEALTH_OVERHEAT; 316 val->intval = POWER_SUPPLY_HEALTH_OVERHEAT;
@@ -351,7 +351,7 @@ static void lp8727_charger_changed(struct power_supply *psy)
351 eoc_level = pchg->chg_parm->eoc_level; 351 eoc_level = pchg->chg_parm->eoc_level;
352 ichg = pchg->chg_parm->ichg; 352 ichg = pchg->chg_parm->ichg;
353 val = (ichg << 4) | eoc_level; 353 val = (ichg << 4) | eoc_level;
354 lp8727_i2c_write_byte(pchg, CHGCTRL2, &val); 354 lp8727_write_byte(pchg, CHGCTRL2, val);
355 } 355 }
356 } 356 }
357} 357}
@@ -439,15 +439,29 @@ static int lp8727_probe(struct i2c_client *cl, const struct i2c_device_id *id)
439 439
440 mutex_init(&pchg->xfer_lock); 440 mutex_init(&pchg->xfer_lock);
441 441
442 lp8727_init_device(pchg); 442 ret = lp8727_init_device(pchg);
443 lp8727_intr_config(pchg); 443 if (ret) {
444 dev_err(pchg->dev, "i2c communication err: %d", ret);
445 goto error;
446 }
447
448 ret = lp8727_intr_config(pchg);
449 if (ret) {
450 dev_err(pchg->dev, "irq handler err: %d", ret);
451 goto error;
452 }
444 453
445 ret = lp8727_register_psy(pchg); 454 ret = lp8727_register_psy(pchg);
446 if (ret) 455 if (ret) {
447 dev_err(pchg->dev, 456 dev_err(pchg->dev, "power supplies register err: %d", ret);
448 "can not register power supplies. err=%d", ret); 457 goto error;
458 }
449 459
450 return 0; 460 return 0;
461
462error:
463 kfree(pchg);
464 return ret;
451} 465}
452 466
453static int __devexit lp8727_remove(struct i2c_client *cl) 467static int __devexit lp8727_remove(struct i2c_client *cl)
@@ -466,6 +480,7 @@ static const struct i2c_device_id lp8727_ids[] = {
466 {"lp8727", 0}, 480 {"lp8727", 0},
467 { } 481 { }
468}; 482};
483MODULE_DEVICE_TABLE(i2c, lp8727_ids);
469 484
470static struct i2c_driver lp8727_driver = { 485static struct i2c_driver lp8727_driver = {
471 .driver = { 486 .driver = {
@@ -475,21 +490,9 @@ static struct i2c_driver lp8727_driver = {
475 .remove = __devexit_p(lp8727_remove), 490 .remove = __devexit_p(lp8727_remove),
476 .id_table = lp8727_ids, 491 .id_table = lp8727_ids,
477}; 492};
493module_i2c_driver(lp8727_driver);
478 494
479static int __init lp8727_init(void) 495MODULE_DESCRIPTION("TI/National Semiconductor LP8727 charger driver");
480{ 496MODULE_AUTHOR("Woogyom Kim <milo.kim@ti.com>, "
481 return i2c_add_driver(&lp8727_driver); 497 "Daniel Jeong <daniel.jeong@ti.com>");
482}
483
484static void __exit lp8727_exit(void)
485{
486 i2c_del_driver(&lp8727_driver);
487}
488
489module_init(lp8727_init);
490module_exit(lp8727_exit);
491
492MODULE_DESCRIPTION("National Semiconductor LP8727 charger driver");
493MODULE_AUTHOR
494 ("Woogyom Kim <milo.kim@ti.com>, Daniel Jeong <daniel.jeong@ti.com>");
495MODULE_LICENSE("GPL"); 498MODULE_LICENSE("GPL");
diff --git a/drivers/power/max17040_battery.c b/drivers/power/max17040_battery.c
index 2f2f9a6f54fa..c284143cfcd7 100644
--- a/drivers/power/max17040_battery.c
+++ b/drivers/power/max17040_battery.c
@@ -290,18 +290,7 @@ static struct i2c_driver max17040_i2c_driver = {
290 .resume = max17040_resume, 290 .resume = max17040_resume,
291 .id_table = max17040_id, 291 .id_table = max17040_id,
292}; 292};
293 293module_i2c_driver(max17040_i2c_driver);
294static int __init max17040_init(void)
295{
296 return i2c_add_driver(&max17040_i2c_driver);
297}
298module_init(max17040_init);
299
300static void __exit max17040_exit(void)
301{
302 i2c_del_driver(&max17040_i2c_driver);
303}
304module_exit(max17040_exit);
305 294
306MODULE_AUTHOR("Minkyu Kang <mk7.kang@samsung.com>"); 295MODULE_AUTHOR("Minkyu Kang <mk7.kang@samsung.com>");
307MODULE_DESCRIPTION("MAX17040 Fuel Gauge"); 296MODULE_DESCRIPTION("MAX17040 Fuel Gauge");
diff --git a/drivers/power/max17042_battery.c b/drivers/power/max17042_battery.c
index 86acee2f9889..04620c2cb388 100644
--- a/drivers/power/max17042_battery.c
+++ b/drivers/power/max17042_battery.c
@@ -26,14 +26,47 @@
26#include <linux/module.h> 26#include <linux/module.h>
27#include <linux/slab.h> 27#include <linux/slab.h>
28#include <linux/i2c.h> 28#include <linux/i2c.h>
29#include <linux/delay.h>
30#include <linux/interrupt.h>
29#include <linux/mod_devicetable.h> 31#include <linux/mod_devicetable.h>
30#include <linux/power_supply.h> 32#include <linux/power_supply.h>
31#include <linux/power/max17042_battery.h> 33#include <linux/power/max17042_battery.h>
34#include <linux/of.h>
35
36/* Status register bits */
37#define STATUS_POR_BIT (1 << 1)
38#define STATUS_BST_BIT (1 << 3)
39#define STATUS_VMN_BIT (1 << 8)
40#define STATUS_TMN_BIT (1 << 9)
41#define STATUS_SMN_BIT (1 << 10)
42#define STATUS_BI_BIT (1 << 11)
43#define STATUS_VMX_BIT (1 << 12)
44#define STATUS_TMX_BIT (1 << 13)
45#define STATUS_SMX_BIT (1 << 14)
46#define STATUS_BR_BIT (1 << 15)
47
48/* Interrupt mask bits */
49#define CONFIG_ALRT_BIT_ENBL (1 << 2)
50#define STATUS_INTR_SOCMIN_BIT (1 << 10)
51#define STATUS_INTR_SOCMAX_BIT (1 << 14)
52
53#define VFSOC0_LOCK 0x0000
54#define VFSOC0_UNLOCK 0x0080
55#define MODEL_UNLOCK1 0X0059
56#define MODEL_UNLOCK2 0X00C4
57#define MODEL_LOCK1 0X0000
58#define MODEL_LOCK2 0X0000
59
60#define dQ_ACC_DIV 0x4
61#define dP_ACC_100 0x1900
62#define dP_ACC_200 0x3200
32 63
33struct max17042_chip { 64struct max17042_chip {
34 struct i2c_client *client; 65 struct i2c_client *client;
35 struct power_supply battery; 66 struct power_supply battery;
36 struct max17042_platform_data *pdata; 67 struct max17042_platform_data *pdata;
68 struct work_struct work;
69 int init_complete;
37}; 70};
38 71
39static int max17042_write_reg(struct i2c_client *client, u8 reg, u16 value) 72static int max17042_write_reg(struct i2c_client *client, u8 reg, u16 value)
@@ -87,6 +120,9 @@ static int max17042_get_property(struct power_supply *psy,
87 struct max17042_chip, battery); 120 struct max17042_chip, battery);
88 int ret; 121 int ret;
89 122
123 if (!chip->init_complete)
124 return -EAGAIN;
125
90 switch (psp) { 126 switch (psp) {
91 case POWER_SUPPLY_PROP_PRESENT: 127 case POWER_SUPPLY_PROP_PRESENT:
92 ret = max17042_read_reg(chip->client, MAX17042_STATUS); 128 ret = max17042_read_reg(chip->client, MAX17042_STATUS);
@@ -136,21 +172,18 @@ static int max17042_get_property(struct power_supply *psy,
136 val->intval = ret * 625 / 8; 172 val->intval = ret * 625 / 8;
137 break; 173 break;
138 case POWER_SUPPLY_PROP_CAPACITY: 174 case POWER_SUPPLY_PROP_CAPACITY:
139 ret = max17042_read_reg(chip->client, MAX17042_SOC); 175 ret = max17042_read_reg(chip->client, MAX17042_RepSOC);
140 if (ret < 0) 176 if (ret < 0)
141 return ret; 177 return ret;
142 178
143 val->intval = ret >> 8; 179 val->intval = ret >> 8;
144 break; 180 break;
145 case POWER_SUPPLY_PROP_CHARGE_FULL: 181 case POWER_SUPPLY_PROP_CHARGE_FULL:
146 ret = max17042_read_reg(chip->client, MAX17042_RepSOC); 182 ret = max17042_read_reg(chip->client, MAX17042_FullCAP);
147 if (ret < 0) 183 if (ret < 0)
148 return ret; 184 return ret;
149 185
150 if ((ret >> 8) >= MAX17042_BATTERY_FULL) 186 val->intval = ret * 1000 / 2;
151 val->intval = 1;
152 else if (ret >= 0)
153 val->intval = 0;
154 break; 187 break;
155 case POWER_SUPPLY_PROP_TEMP: 188 case POWER_SUPPLY_PROP_TEMP:
156 ret = max17042_read_reg(chip->client, MAX17042_TEMP); 189 ret = max17042_read_reg(chip->client, MAX17042_TEMP);
@@ -210,22 +243,419 @@ static int max17042_get_property(struct power_supply *psy,
210 return 0; 243 return 0;
211} 244}
212 245
246static int max17042_write_verify_reg(struct i2c_client *client,
247 u8 reg, u16 value)
248{
249 int retries = 8;
250 int ret;
251 u16 read_value;
252
253 do {
254 ret = i2c_smbus_write_word_data(client, reg, value);
255 read_value = max17042_read_reg(client, reg);
256 if (read_value != value) {
257 ret = -EIO;
258 retries--;
259 }
260 } while (retries && read_value != value);
261
262 if (ret < 0)
263 dev_err(&client->dev, "%s: err %d\n", __func__, ret);
264
265 return ret;
266}
267
268static inline void max17042_override_por(
269 struct i2c_client *client, u8 reg, u16 value)
270{
271 if (value)
272 max17042_write_reg(client, reg, value);
273}
274
275static inline void max10742_unlock_model(struct max17042_chip *chip)
276{
277 struct i2c_client *client = chip->client;
278 max17042_write_reg(client, MAX17042_MLOCKReg1, MODEL_UNLOCK1);
279 max17042_write_reg(client, MAX17042_MLOCKReg2, MODEL_UNLOCK2);
280}
281
282static inline void max10742_lock_model(struct max17042_chip *chip)
283{
284 struct i2c_client *client = chip->client;
285 max17042_write_reg(client, MAX17042_MLOCKReg1, MODEL_LOCK1);
286 max17042_write_reg(client, MAX17042_MLOCKReg2, MODEL_LOCK2);
287}
288
289static inline void max17042_write_model_data(struct max17042_chip *chip,
290 u8 addr, int size)
291{
292 struct i2c_client *client = chip->client;
293 int i;
294 for (i = 0; i < size; i++)
295 max17042_write_reg(client, addr + i,
296 chip->pdata->config_data->cell_char_tbl[i]);
297}
298
299static inline void max17042_read_model_data(struct max17042_chip *chip,
300 u8 addr, u16 *data, int size)
301{
302 struct i2c_client *client = chip->client;
303 int i;
304
305 for (i = 0; i < size; i++)
306 data[i] = max17042_read_reg(client, addr + i);
307}
308
309static inline int max17042_model_data_compare(struct max17042_chip *chip,
310 u16 *data1, u16 *data2, int size)
311{
312 int i;
313
314 if (memcmp(data1, data2, size)) {
315 dev_err(&chip->client->dev, "%s compare failed\n", __func__);
316 for (i = 0; i < size; i++)
317 dev_info(&chip->client->dev, "0x%x, 0x%x",
318 data1[i], data2[i]);
319 dev_info(&chip->client->dev, "\n");
320 return -EINVAL;
321 }
322 return 0;
323}
324
325static int max17042_init_model(struct max17042_chip *chip)
326{
327 int ret;
328 int table_size =
329 sizeof(chip->pdata->config_data->cell_char_tbl)/sizeof(u16);
330 u16 *temp_data;
331
332 temp_data = kzalloc(table_size, GFP_KERNEL);
333 if (!temp_data)
334 return -ENOMEM;
335
336 max10742_unlock_model(chip);
337 max17042_write_model_data(chip, MAX17042_MODELChrTbl,
338 table_size);
339 max17042_read_model_data(chip, MAX17042_MODELChrTbl, temp_data,
340 table_size);
341
342 ret = max17042_model_data_compare(
343 chip,
344 chip->pdata->config_data->cell_char_tbl,
345 temp_data,
346 table_size);
347
348 max10742_lock_model(chip);
349 kfree(temp_data);
350
351 return ret;
352}
353
354static int max17042_verify_model_lock(struct max17042_chip *chip)
355{
356 int i;
357 int table_size =
358 sizeof(chip->pdata->config_data->cell_char_tbl);
359 u16 *temp_data;
360 int ret = 0;
361
362 temp_data = kzalloc(table_size, GFP_KERNEL);
363 if (!temp_data)
364 return -ENOMEM;
365
366 max17042_read_model_data(chip, MAX17042_MODELChrTbl, temp_data,
367 table_size);
368 for (i = 0; i < table_size; i++)
369 if (temp_data[i])
370 ret = -EINVAL;
371
372 kfree(temp_data);
373 return ret;
374}
375
376static void max17042_write_config_regs(struct max17042_chip *chip)
377{
378 struct max17042_config_data *config = chip->pdata->config_data;
379
380 max17042_write_reg(chip->client, MAX17042_CONFIG, config->config);
381 max17042_write_reg(chip->client, MAX17042_LearnCFG, config->learn_cfg);
382 max17042_write_reg(chip->client, MAX17042_FilterCFG,
383 config->filter_cfg);
384 max17042_write_reg(chip->client, MAX17042_RelaxCFG, config->relax_cfg);
385}
386
387static void max17042_write_custom_regs(struct max17042_chip *chip)
388{
389 struct max17042_config_data *config = chip->pdata->config_data;
390
391 max17042_write_verify_reg(chip->client, MAX17042_RCOMP0,
392 config->rcomp0);
393 max17042_write_verify_reg(chip->client, MAX17042_TempCo,
394 config->tcompc0);
395 max17042_write_reg(chip->client, MAX17042_EmptyTempCo,
396 config->empty_tempco);
397 max17042_write_verify_reg(chip->client, MAX17042_K_empty0,
398 config->kempty0);
399 max17042_write_verify_reg(chip->client, MAX17042_ICHGTerm,
400 config->ichgt_term);
401}
402
403static void max17042_update_capacity_regs(struct max17042_chip *chip)
404{
405 struct max17042_config_data *config = chip->pdata->config_data;
406
407 max17042_write_verify_reg(chip->client, MAX17042_FullCAP,
408 config->fullcap);
409 max17042_write_reg(chip->client, MAX17042_DesignCap,
410 config->design_cap);
411 max17042_write_verify_reg(chip->client, MAX17042_FullCAPNom,
412 config->fullcapnom);
413}
414
415static void max17042_reset_vfsoc0_reg(struct max17042_chip *chip)
416{
417 u16 vfSoc;
418
419 vfSoc = max17042_read_reg(chip->client, MAX17042_VFSOC);
420 max17042_write_reg(chip->client, MAX17042_VFSOC0Enable, VFSOC0_UNLOCK);
421 max17042_write_verify_reg(chip->client, MAX17042_VFSOC0, vfSoc);
422 max17042_write_reg(chip->client, MAX17042_VFSOC0Enable, VFSOC0_LOCK);
423}
424
425static void max17042_load_new_capacity_params(struct max17042_chip *chip)
426{
427 u16 full_cap0, rep_cap, dq_acc, vfSoc;
428 u32 rem_cap;
429
430 struct max17042_config_data *config = chip->pdata->config_data;
431
432 full_cap0 = max17042_read_reg(chip->client, MAX17042_FullCAP0);
433 vfSoc = max17042_read_reg(chip->client, MAX17042_VFSOC);
434
435 /* fg_vfSoc needs to shifted by 8 bits to get the
436 * perc in 1% accuracy, to get the right rem_cap multiply
437 * full_cap0, fg_vfSoc and devide by 100
438 */
439 rem_cap = ((vfSoc >> 8) * full_cap0) / 100;
440 max17042_write_verify_reg(chip->client, MAX17042_RemCap, (u16)rem_cap);
441
442 rep_cap = (u16)rem_cap;
443 max17042_write_verify_reg(chip->client, MAX17042_RepCap, rep_cap);
444
445 /* Write dQ_acc to 200% of Capacity and dP_acc to 200% */
446 dq_acc = config->fullcap / dQ_ACC_DIV;
447 max17042_write_verify_reg(chip->client, MAX17042_dQacc, dq_acc);
448 max17042_write_verify_reg(chip->client, MAX17042_dPacc, dP_ACC_200);
449
450 max17042_write_verify_reg(chip->client, MAX17042_FullCAP,
451 config->fullcap);
452 max17042_write_reg(chip->client, MAX17042_DesignCap,
453 config->design_cap);
454 max17042_write_verify_reg(chip->client, MAX17042_FullCAPNom,
455 config->fullcapnom);
456}
457
458/*
459 * Block write all the override values coming from platform data.
460 * This function MUST be called before the POR initialization proceedure
461 * specified by maxim.
462 */
463static inline void max17042_override_por_values(struct max17042_chip *chip)
464{
465 struct i2c_client *client = chip->client;
466 struct max17042_config_data *config = chip->pdata->config_data;
467
468 max17042_override_por(client, MAX17042_TGAIN, config->tgain);
469 max17042_override_por(client, MAx17042_TOFF, config->toff);
470 max17042_override_por(client, MAX17042_CGAIN, config->cgain);
471 max17042_override_por(client, MAX17042_COFF, config->coff);
472
473 max17042_override_por(client, MAX17042_VALRT_Th, config->valrt_thresh);
474 max17042_override_por(client, MAX17042_TALRT_Th, config->talrt_thresh);
475 max17042_override_por(client, MAX17042_SALRT_Th,
476 config->soc_alrt_thresh);
477 max17042_override_por(client, MAX17042_CONFIG, config->config);
478 max17042_override_por(client, MAX17042_SHDNTIMER, config->shdntimer);
479
480 max17042_override_por(client, MAX17042_DesignCap, config->design_cap);
481 max17042_override_por(client, MAX17042_ICHGTerm, config->ichgt_term);
482
483 max17042_override_por(client, MAX17042_AtRate, config->at_rate);
484 max17042_override_por(client, MAX17042_LearnCFG, config->learn_cfg);
485 max17042_override_por(client, MAX17042_FilterCFG, config->filter_cfg);
486 max17042_override_por(client, MAX17042_RelaxCFG, config->relax_cfg);
487 max17042_override_por(client, MAX17042_MiscCFG, config->misc_cfg);
488 max17042_override_por(client, MAX17042_MaskSOC, config->masksoc);
489
490 max17042_override_por(client, MAX17042_FullCAP, config->fullcap);
491 max17042_override_por(client, MAX17042_FullCAPNom, config->fullcapnom);
492 max17042_override_por(client, MAX17042_SOC_empty, config->socempty);
493 max17042_override_por(client, MAX17042_LAvg_empty, config->lavg_empty);
494 max17042_override_por(client, MAX17042_dQacc, config->dqacc);
495 max17042_override_por(client, MAX17042_dPacc, config->dpacc);
496
497 max17042_override_por(client, MAX17042_V_empty, config->vempty);
498 max17042_override_por(client, MAX17042_TempNom, config->temp_nom);
499 max17042_override_por(client, MAX17042_TempLim, config->temp_lim);
500 max17042_override_por(client, MAX17042_FCTC, config->fctc);
501 max17042_override_por(client, MAX17042_RCOMP0, config->rcomp0);
502 max17042_override_por(client, MAX17042_TempCo, config->tcompc0);
503 max17042_override_por(client, MAX17042_EmptyTempCo,
504 config->empty_tempco);
505 max17042_override_por(client, MAX17042_K_empty0, config->kempty0);
506}
507
508static int max17042_init_chip(struct max17042_chip *chip)
509{
510 int ret;
511 int val;
512
513 max17042_override_por_values(chip);
514 /* After Power up, the MAX17042 requires 500mS in order
515 * to perform signal debouncing and initial SOC reporting
516 */
517 msleep(500);
518
519 /* Initialize configaration */
520 max17042_write_config_regs(chip);
521
522 /* write cell characterization data */
523 ret = max17042_init_model(chip);
524 if (ret) {
525 dev_err(&chip->client->dev, "%s init failed\n",
526 __func__);
527 return -EIO;
528 }
529 max17042_verify_model_lock(chip);
530 if (ret) {
531 dev_err(&chip->client->dev, "%s lock verify failed\n",
532 __func__);
533 return -EIO;
534 }
535 /* write custom parameters */
536 max17042_write_custom_regs(chip);
537
538 /* update capacity params */
539 max17042_update_capacity_regs(chip);
540
541 /* delay must be atleast 350mS to allow VFSOC
542 * to be calculated from the new configuration
543 */
544 msleep(350);
545
546 /* reset vfsoc0 reg */
547 max17042_reset_vfsoc0_reg(chip);
548
549 /* load new capacity params */
550 max17042_load_new_capacity_params(chip);
551
552 /* Init complete, Clear the POR bit */
553 val = max17042_read_reg(chip->client, MAX17042_STATUS);
554 max17042_write_reg(chip->client, MAX17042_STATUS,
555 val & (~STATUS_POR_BIT));
556 return 0;
557}
558
559static void max17042_set_soc_threshold(struct max17042_chip *chip, u16 off)
560{
561 u16 soc, soc_tr;
562
563 /* program interrupt thesholds such that we should
564 * get interrupt for every 'off' perc change in the soc
565 */
566 soc = max17042_read_reg(chip->client, MAX17042_RepSOC) >> 8;
567 soc_tr = (soc + off) << 8;
568 soc_tr |= (soc - off);
569 max17042_write_reg(chip->client, MAX17042_SALRT_Th, soc_tr);
570}
571
572static irqreturn_t max17042_thread_handler(int id, void *dev)
573{
574 struct max17042_chip *chip = dev;
575 u16 val;
576
577 val = max17042_read_reg(chip->client, MAX17042_STATUS);
578 if ((val & STATUS_INTR_SOCMIN_BIT) ||
579 (val & STATUS_INTR_SOCMAX_BIT)) {
580 dev_info(&chip->client->dev, "SOC threshold INTR\n");
581 max17042_set_soc_threshold(chip, 1);
582 }
583
584 power_supply_changed(&chip->battery);
585 return IRQ_HANDLED;
586}
587
588static void max17042_init_worker(struct work_struct *work)
589{
590 struct max17042_chip *chip = container_of(work,
591 struct max17042_chip, work);
592 int ret;
593
594 /* Initialize registers according to values from the platform data */
595 if (chip->pdata->enable_por_init && chip->pdata->config_data) {
596 ret = max17042_init_chip(chip);
597 if (ret)
598 return;
599 }
600
601 chip->init_complete = 1;
602}
603
604#ifdef CONFIG_OF
605static struct max17042_platform_data *
606max17042_get_pdata(struct device *dev)
607{
608 struct device_node *np = dev->of_node;
609 u32 prop;
610 struct max17042_platform_data *pdata;
611
612 if (!np)
613 return dev->platform_data;
614
615 pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
616 if (!pdata)
617 return NULL;
618
619 /*
620 * Require current sense resistor value to be specified for
621 * current-sense functionality to be enabled at all.
622 */
623 if (of_property_read_u32(np, "maxim,rsns-microohm", &prop) == 0) {
624 pdata->r_sns = prop;
625 pdata->enable_current_sense = true;
626 }
627
628 return pdata;
629}
630#else
631static struct max17042_platform_data *
632max17042_get_pdata(struct device *dev)
633{
634 return dev->platform_data;
635}
636#endif
637
213static int __devinit max17042_probe(struct i2c_client *client, 638static int __devinit max17042_probe(struct i2c_client *client,
214 const struct i2c_device_id *id) 639 const struct i2c_device_id *id)
215{ 640{
216 struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent); 641 struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent);
217 struct max17042_chip *chip; 642 struct max17042_chip *chip;
218 int ret; 643 int ret;
644 int reg;
219 645
220 if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_WORD_DATA)) 646 if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_WORD_DATA))
221 return -EIO; 647 return -EIO;
222 648
223 chip = kzalloc(sizeof(*chip), GFP_KERNEL); 649 chip = devm_kzalloc(&client->dev, sizeof(*chip), GFP_KERNEL);
224 if (!chip) 650 if (!chip)
225 return -ENOMEM; 651 return -ENOMEM;
226 652
227 chip->client = client; 653 chip->client = client;
228 chip->pdata = client->dev.platform_data; 654 chip->pdata = max17042_get_pdata(&client->dev);
655 if (!chip->pdata) {
656 dev_err(&client->dev, "no platform data provided\n");
657 return -EINVAL;
658 }
229 659
230 i2c_set_clientdata(client, chip); 660 i2c_set_clientdata(client, chip);
231 661
@@ -243,17 +673,9 @@ static int __devinit max17042_probe(struct i2c_client *client,
243 if (chip->pdata->r_sns == 0) 673 if (chip->pdata->r_sns == 0)
244 chip->pdata->r_sns = MAX17042_DEFAULT_SNS_RESISTOR; 674 chip->pdata->r_sns = MAX17042_DEFAULT_SNS_RESISTOR;
245 675
246 ret = power_supply_register(&client->dev, &chip->battery);
247 if (ret) {
248 dev_err(&client->dev, "failed: power supply register\n");
249 kfree(chip);
250 return ret;
251 }
252
253 /* Initialize registers according to values from the platform data */
254 if (chip->pdata->init_data) 676 if (chip->pdata->init_data)
255 max17042_set_reg(client, chip->pdata->init_data, 677 max17042_set_reg(client, chip->pdata->init_data,
256 chip->pdata->num_init_data); 678 chip->pdata->num_init_data);
257 679
258 if (!chip->pdata->enable_current_sense) { 680 if (!chip->pdata->enable_current_sense) {
259 max17042_write_reg(client, MAX17042_CGAIN, 0x0000); 681 max17042_write_reg(client, MAX17042_CGAIN, 0x0000);
@@ -261,7 +683,34 @@ static int __devinit max17042_probe(struct i2c_client *client,
261 max17042_write_reg(client, MAX17042_LearnCFG, 0x0007); 683 max17042_write_reg(client, MAX17042_LearnCFG, 0x0007);
262 } 684 }
263 685
264 return 0; 686 if (client->irq) {
687 ret = request_threaded_irq(client->irq, NULL,
688 max17042_thread_handler,
689 IRQF_TRIGGER_FALLING,
690 chip->battery.name, chip);
691 if (!ret) {
692 reg = max17042_read_reg(client, MAX17042_CONFIG);
693 reg |= CONFIG_ALRT_BIT_ENBL;
694 max17042_write_reg(client, MAX17042_CONFIG, reg);
695 max17042_set_soc_threshold(chip, 1);
696 } else
697 dev_err(&client->dev, "%s(): cannot get IRQ\n",
698 __func__);
699 }
700
701 reg = max17042_read_reg(chip->client, MAX17042_STATUS);
702
703 if (reg & STATUS_POR_BIT) {
704 INIT_WORK(&chip->work, max17042_init_worker);
705 schedule_work(&chip->work);
706 } else {
707 chip->init_complete = 1;
708 }
709
710 ret = power_supply_register(&client->dev, &chip->battery);
711 if (ret)
712 dev_err(&client->dev, "failed: power supply register\n");
713 return ret;
265} 714}
266 715
267static int __devexit max17042_remove(struct i2c_client *client) 716static int __devexit max17042_remove(struct i2c_client *client)
@@ -269,10 +718,17 @@ static int __devexit max17042_remove(struct i2c_client *client)
269 struct max17042_chip *chip = i2c_get_clientdata(client); 718 struct max17042_chip *chip = i2c_get_clientdata(client);
270 719
271 power_supply_unregister(&chip->battery); 720 power_supply_unregister(&chip->battery);
272 kfree(chip);
273 return 0; 721 return 0;
274} 722}
275 723
724#ifdef CONFIG_OF
725static const struct of_device_id max17042_dt_match[] = {
726 { .compatible = "maxim,max17042" },
727 { },
728};
729MODULE_DEVICE_TABLE(of, max17042_dt_match);
730#endif
731
276static const struct i2c_device_id max17042_id[] = { 732static const struct i2c_device_id max17042_id[] = {
277 { "max17042", 0 }, 733 { "max17042", 0 },
278 { } 734 { }
@@ -282,23 +738,13 @@ MODULE_DEVICE_TABLE(i2c, max17042_id);
282static struct i2c_driver max17042_i2c_driver = { 738static struct i2c_driver max17042_i2c_driver = {
283 .driver = { 739 .driver = {
284 .name = "max17042", 740 .name = "max17042",
741 .of_match_table = of_match_ptr(max17042_dt_match),
285 }, 742 },
286 .probe = max17042_probe, 743 .probe = max17042_probe,
287 .remove = __devexit_p(max17042_remove), 744 .remove = __devexit_p(max17042_remove),
288 .id_table = max17042_id, 745 .id_table = max17042_id,
289}; 746};
290 747module_i2c_driver(max17042_i2c_driver);
291static int __init max17042_init(void)
292{
293 return i2c_add_driver(&max17042_i2c_driver);
294}
295module_init(max17042_init);
296
297static void __exit max17042_exit(void)
298{
299 i2c_del_driver(&max17042_i2c_driver);
300}
301module_exit(max17042_exit);
302 748
303MODULE_AUTHOR("MyungJoo Ham <myungjoo.ham@samsung.com>"); 749MODULE_AUTHOR("MyungJoo Ham <myungjoo.ham@samsung.com>");
304MODULE_DESCRIPTION("MAX17042 Fuel Gauge"); 750MODULE_DESCRIPTION("MAX17042 Fuel Gauge");
diff --git a/drivers/power/sbs-battery.c b/drivers/power/sbs-battery.c
index 9ff8af069da6..06b659d91790 100644
--- a/drivers/power/sbs-battery.c
+++ b/drivers/power/sbs-battery.c
@@ -852,18 +852,7 @@ static struct i2c_driver sbs_battery_driver = {
852 .of_match_table = sbs_dt_ids, 852 .of_match_table = sbs_dt_ids,
853 }, 853 },
854}; 854};
855 855module_i2c_driver(sbs_battery_driver);
856static int __init sbs_battery_init(void)
857{
858 return i2c_add_driver(&sbs_battery_driver);
859}
860module_init(sbs_battery_init);
861
862static void __exit sbs_battery_exit(void)
863{
864 i2c_del_driver(&sbs_battery_driver);
865}
866module_exit(sbs_battery_exit);
867 856
868MODULE_DESCRIPTION("SBS battery monitor driver"); 857MODULE_DESCRIPTION("SBS battery monitor driver");
869MODULE_LICENSE("GPL"); 858MODULE_LICENSE("GPL");
diff --git a/drivers/power/smb347-charger.c b/drivers/power/smb347-charger.c
new file mode 100644
index 000000000000..ce1694d1a365
--- /dev/null
+++ b/drivers/power/smb347-charger.c
@@ -0,0 +1,1294 @@
1/*
2 * Summit Microelectronics SMB347 Battery Charger Driver
3 *
4 * Copyright (C) 2011, Intel Corporation
5 *
6 * Authors: Bruce E. Robertson <bruce.e.robertson@intel.com>
7 * Mika Westerberg <mika.westerberg@linux.intel.com>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 */
13
14#include <linux/debugfs.h>
15#include <linux/gpio.h>
16#include <linux/kernel.h>
17#include <linux/module.h>
18#include <linux/init.h>
19#include <linux/interrupt.h>
20#include <linux/i2c.h>
21#include <linux/mutex.h>
22#include <linux/power_supply.h>
23#include <linux/power/smb347-charger.h>
24#include <linux/seq_file.h>
25
26/*
27 * Configuration registers. These are mirrored to volatile RAM and can be
28 * written once %CMD_A_ALLOW_WRITE is set in %CMD_A register. They will be
29 * reloaded from non-volatile registers after POR.
30 */
31#define CFG_CHARGE_CURRENT 0x00
32#define CFG_CHARGE_CURRENT_FCC_MASK 0xe0
33#define CFG_CHARGE_CURRENT_FCC_SHIFT 5
34#define CFG_CHARGE_CURRENT_PCC_MASK 0x18
35#define CFG_CHARGE_CURRENT_PCC_SHIFT 3
36#define CFG_CHARGE_CURRENT_TC_MASK 0x07
37#define CFG_CURRENT_LIMIT 0x01
38#define CFG_CURRENT_LIMIT_DC_MASK 0xf0
39#define CFG_CURRENT_LIMIT_DC_SHIFT 4
40#define CFG_CURRENT_LIMIT_USB_MASK 0x0f
41#define CFG_FLOAT_VOLTAGE 0x03
42#define CFG_FLOAT_VOLTAGE_THRESHOLD_MASK 0xc0
43#define CFG_FLOAT_VOLTAGE_THRESHOLD_SHIFT 6
44#define CFG_STAT 0x05
45#define CFG_STAT_DISABLED BIT(5)
46#define CFG_STAT_ACTIVE_HIGH BIT(7)
47#define CFG_PIN 0x06
48#define CFG_PIN_EN_CTRL_MASK 0x60
49#define CFG_PIN_EN_CTRL_ACTIVE_HIGH 0x40
50#define CFG_PIN_EN_CTRL_ACTIVE_LOW 0x60
51#define CFG_PIN_EN_APSD_IRQ BIT(1)
52#define CFG_PIN_EN_CHARGER_ERROR BIT(2)
53#define CFG_THERM 0x07
54#define CFG_THERM_SOFT_HOT_COMPENSATION_MASK 0x03
55#define CFG_THERM_SOFT_HOT_COMPENSATION_SHIFT 0
56#define CFG_THERM_SOFT_COLD_COMPENSATION_MASK 0x0c
57#define CFG_THERM_SOFT_COLD_COMPENSATION_SHIFT 2
58#define CFG_THERM_MONITOR_DISABLED BIT(4)
59#define CFG_SYSOK 0x08
60#define CFG_SYSOK_SUSPEND_HARD_LIMIT_DISABLED BIT(2)
61#define CFG_OTHER 0x09
62#define CFG_OTHER_RID_MASK 0xc0
63#define CFG_OTHER_RID_ENABLED_AUTO_OTG 0xc0
64#define CFG_OTG 0x0a
65#define CFG_OTG_TEMP_THRESHOLD_MASK 0x30
66#define CFG_OTG_TEMP_THRESHOLD_SHIFT 4
67#define CFG_OTG_CC_COMPENSATION_MASK 0xc0
68#define CFG_OTG_CC_COMPENSATION_SHIFT 6
69#define CFG_TEMP_LIMIT 0x0b
70#define CFG_TEMP_LIMIT_SOFT_HOT_MASK 0x03
71#define CFG_TEMP_LIMIT_SOFT_HOT_SHIFT 0
72#define CFG_TEMP_LIMIT_SOFT_COLD_MASK 0x0c
73#define CFG_TEMP_LIMIT_SOFT_COLD_SHIFT 2
74#define CFG_TEMP_LIMIT_HARD_HOT_MASK 0x30
75#define CFG_TEMP_LIMIT_HARD_HOT_SHIFT 4
76#define CFG_TEMP_LIMIT_HARD_COLD_MASK 0xc0
77#define CFG_TEMP_LIMIT_HARD_COLD_SHIFT 6
78#define CFG_FAULT_IRQ 0x0c
79#define CFG_FAULT_IRQ_DCIN_UV BIT(2)
80#define CFG_STATUS_IRQ 0x0d
81#define CFG_STATUS_IRQ_TERMINATION_OR_TAPER BIT(4)
82#define CFG_ADDRESS 0x0e
83
84/* Command registers */
85#define CMD_A 0x30
86#define CMD_A_CHG_ENABLED BIT(1)
87#define CMD_A_SUSPEND_ENABLED BIT(2)
88#define CMD_A_ALLOW_WRITE BIT(7)
89#define CMD_B 0x31
90#define CMD_C 0x33
91
92/* Interrupt Status registers */
93#define IRQSTAT_A 0x35
94#define IRQSTAT_C 0x37
95#define IRQSTAT_C_TERMINATION_STAT BIT(0)
96#define IRQSTAT_C_TERMINATION_IRQ BIT(1)
97#define IRQSTAT_C_TAPER_IRQ BIT(3)
98#define IRQSTAT_E 0x39
99#define IRQSTAT_E_USBIN_UV_STAT BIT(0)
100#define IRQSTAT_E_USBIN_UV_IRQ BIT(1)
101#define IRQSTAT_E_DCIN_UV_STAT BIT(4)
102#define IRQSTAT_E_DCIN_UV_IRQ BIT(5)
103#define IRQSTAT_F 0x3a
104
105/* Status registers */
106#define STAT_A 0x3b
107#define STAT_A_FLOAT_VOLTAGE_MASK 0x3f
108#define STAT_B 0x3c
109#define STAT_C 0x3d
110#define STAT_C_CHG_ENABLED BIT(0)
111#define STAT_C_CHG_MASK 0x06
112#define STAT_C_CHG_SHIFT 1
113#define STAT_C_CHARGER_ERROR BIT(6)
114#define STAT_E 0x3f
115
116/**
117 * struct smb347_charger - smb347 charger instance
118 * @lock: protects concurrent access to online variables
119 * @client: pointer to i2c client
120 * @mains: power_supply instance for AC/DC power
121 * @usb: power_supply instance for USB power
122 * @battery: power_supply instance for battery
123 * @mains_online: is AC/DC input connected
124 * @usb_online: is USB input connected
125 * @charging_enabled: is charging enabled
126 * @dentry: for debugfs
127 * @pdata: pointer to platform data
128 */
129struct smb347_charger {
130 struct mutex lock;
131 struct i2c_client *client;
132 struct power_supply mains;
133 struct power_supply usb;
134 struct power_supply battery;
135 bool mains_online;
136 bool usb_online;
137 bool charging_enabled;
138 struct dentry *dentry;
139 const struct smb347_charger_platform_data *pdata;
140};
141
142/* Fast charge current in uA */
143static const unsigned int fcc_tbl[] = {
144 700000,
145 900000,
146 1200000,
147 1500000,
148 1800000,
149 2000000,
150 2200000,
151 2500000,
152};
153
154/* Pre-charge current in uA */
155static const unsigned int pcc_tbl[] = {
156 100000,
157 150000,
158 200000,
159 250000,
160};
161
162/* Termination current in uA */
163static const unsigned int tc_tbl[] = {
164 37500,
165 50000,
166 100000,
167 150000,
168 200000,
169 250000,
170 500000,
171 600000,
172};
173
174/* Input current limit in uA */
175static const unsigned int icl_tbl[] = {
176 300000,
177 500000,
178 700000,
179 900000,
180 1200000,
181 1500000,
182 1800000,
183 2000000,
184 2200000,
185 2500000,
186};
187
188/* Charge current compensation in uA */
189static const unsigned int ccc_tbl[] = {
190 250000,
191 700000,
192 900000,
193 1200000,
194};
195
196/* Convert register value to current using lookup table */
197static int hw_to_current(const unsigned int *tbl, size_t size, unsigned int val)
198{
199 if (val >= size)
200 return -EINVAL;
201 return tbl[val];
202}
203
204/* Convert current to register value using lookup table */
205static int current_to_hw(const unsigned int *tbl, size_t size, unsigned int val)
206{
207 size_t i;
208
209 for (i = 0; i < size; i++)
210 if (val < tbl[i])
211 break;
212 return i > 0 ? i - 1 : -EINVAL;
213}
214
215static int smb347_read(struct smb347_charger *smb, u8 reg)
216{
217 int ret;
218
219 ret = i2c_smbus_read_byte_data(smb->client, reg);
220 if (ret < 0)
221 dev_warn(&smb->client->dev, "failed to read reg 0x%x: %d\n",
222 reg, ret);
223 return ret;
224}
225
226static int smb347_write(struct smb347_charger *smb, u8 reg, u8 val)
227{
228 int ret;
229
230 ret = i2c_smbus_write_byte_data(smb->client, reg, val);
231 if (ret < 0)
232 dev_warn(&smb->client->dev, "failed to write reg 0x%x: %d\n",
233 reg, ret);
234 return ret;
235}
236
237/**
238 * smb347_update_status - updates the charging status
239 * @smb: pointer to smb347 charger instance
240 *
241 * Function checks status of the charging and updates internal state
242 * accordingly. Returns %0 if there is no change in status, %1 if the
243 * status has changed and negative errno in case of failure.
244 */
245static int smb347_update_status(struct smb347_charger *smb)
246{
247 bool usb = false;
248 bool dc = false;
249 int ret;
250
251 ret = smb347_read(smb, IRQSTAT_E);
252 if (ret < 0)
253 return ret;
254
255 /*
256 * Dc and usb are set depending on whether they are enabled in
257 * platform data _and_ whether corresponding undervoltage is set.
258 */
259 if (smb->pdata->use_mains)
260 dc = !(ret & IRQSTAT_E_DCIN_UV_STAT);
261 if (smb->pdata->use_usb)
262 usb = !(ret & IRQSTAT_E_USBIN_UV_STAT);
263
264 mutex_lock(&smb->lock);
265 ret = smb->mains_online != dc || smb->usb_online != usb;
266 smb->mains_online = dc;
267 smb->usb_online = usb;
268 mutex_unlock(&smb->lock);
269
270 return ret;
271}
272
273/*
274 * smb347_is_online - returns whether input power source is connected
275 * @smb: pointer to smb347 charger instance
276 *
277 * Returns %true if input power source is connected. Note that this is
278 * dependent on what platform has configured for usable power sources. For
279 * example if USB is disabled, this will return %false even if the USB
280 * cable is connected.
281 */
282static bool smb347_is_online(struct smb347_charger *smb)
283{
284 bool ret;
285
286 mutex_lock(&smb->lock);
287 ret = smb->usb_online || smb->mains_online;
288 mutex_unlock(&smb->lock);
289
290 return ret;
291}
292
293/**
294 * smb347_charging_status - returns status of charging
295 * @smb: pointer to smb347 charger instance
296 *
297 * Function returns charging status. %0 means no charging is in progress,
298 * %1 means pre-charging, %2 fast-charging and %3 taper-charging.
299 */
300static int smb347_charging_status(struct smb347_charger *smb)
301{
302 int ret;
303
304 if (!smb347_is_online(smb))
305 return 0;
306
307 ret = smb347_read(smb, STAT_C);
308 if (ret < 0)
309 return 0;
310
311 return (ret & STAT_C_CHG_MASK) >> STAT_C_CHG_SHIFT;
312}
313
314static int smb347_charging_set(struct smb347_charger *smb, bool enable)
315{
316 int ret = 0;
317
318 if (smb->pdata->enable_control != SMB347_CHG_ENABLE_SW) {
319 dev_dbg(&smb->client->dev,
320 "charging enable/disable in SW disabled\n");
321 return 0;
322 }
323
324 mutex_lock(&smb->lock);
325 if (smb->charging_enabled != enable) {
326 ret = smb347_read(smb, CMD_A);
327 if (ret < 0)
328 goto out;
329
330 smb->charging_enabled = enable;
331
332 if (enable)
333 ret |= CMD_A_CHG_ENABLED;
334 else
335 ret &= ~CMD_A_CHG_ENABLED;
336
337 ret = smb347_write(smb, CMD_A, ret);
338 }
339out:
340 mutex_unlock(&smb->lock);
341 return ret;
342}
343
344static inline int smb347_charging_enable(struct smb347_charger *smb)
345{
346 return smb347_charging_set(smb, true);
347}
348
349static inline int smb347_charging_disable(struct smb347_charger *smb)
350{
351 return smb347_charging_set(smb, false);
352}
353
354static int smb347_update_online(struct smb347_charger *smb)
355{
356 int ret;
357
358 /*
359 * Depending on whether valid power source is connected or not, we
360 * disable or enable the charging. We do it manually because it
361 * depends on how the platform has configured the valid inputs.
362 */
363 if (smb347_is_online(smb)) {
364 ret = smb347_charging_enable(smb);
365 if (ret < 0)
366 dev_err(&smb->client->dev,
367 "failed to enable charging\n");
368 } else {
369 ret = smb347_charging_disable(smb);
370 if (ret < 0)
371 dev_err(&smb->client->dev,
372 "failed to disable charging\n");
373 }
374
375 return ret;
376}
377
378static int smb347_set_charge_current(struct smb347_charger *smb)
379{
380 int ret, val;
381
382 ret = smb347_read(smb, CFG_CHARGE_CURRENT);
383 if (ret < 0)
384 return ret;
385
386 if (smb->pdata->max_charge_current) {
387 val = current_to_hw(fcc_tbl, ARRAY_SIZE(fcc_tbl),
388 smb->pdata->max_charge_current);
389 if (val < 0)
390 return val;
391
392 ret &= ~CFG_CHARGE_CURRENT_FCC_MASK;
393 ret |= val << CFG_CHARGE_CURRENT_FCC_SHIFT;
394 }
395
396 if (smb->pdata->pre_charge_current) {
397 val = current_to_hw(pcc_tbl, ARRAY_SIZE(pcc_tbl),
398 smb->pdata->pre_charge_current);
399 if (val < 0)
400 return val;
401
402 ret &= ~CFG_CHARGE_CURRENT_PCC_MASK;
403 ret |= val << CFG_CHARGE_CURRENT_PCC_SHIFT;
404 }
405
406 if (smb->pdata->termination_current) {
407 val = current_to_hw(tc_tbl, ARRAY_SIZE(tc_tbl),
408 smb->pdata->termination_current);
409 if (val < 0)
410 return val;
411
412 ret &= ~CFG_CHARGE_CURRENT_TC_MASK;
413 ret |= val;
414 }
415
416 return smb347_write(smb, CFG_CHARGE_CURRENT, ret);
417}
418
419static int smb347_set_current_limits(struct smb347_charger *smb)
420{
421 int ret, val;
422
423 ret = smb347_read(smb, CFG_CURRENT_LIMIT);
424 if (ret < 0)
425 return ret;
426
427 if (smb->pdata->mains_current_limit) {
428 val = current_to_hw(icl_tbl, ARRAY_SIZE(icl_tbl),
429 smb->pdata->mains_current_limit);
430 if (val < 0)
431 return val;
432
433 ret &= ~CFG_CURRENT_LIMIT_DC_MASK;
434 ret |= val << CFG_CURRENT_LIMIT_DC_SHIFT;
435 }
436
437 if (smb->pdata->usb_hc_current_limit) {
438 val = current_to_hw(icl_tbl, ARRAY_SIZE(icl_tbl),
439 smb->pdata->usb_hc_current_limit);
440 if (val < 0)
441 return val;
442
443 ret &= ~CFG_CURRENT_LIMIT_USB_MASK;
444 ret |= val;
445 }
446
447 return smb347_write(smb, CFG_CURRENT_LIMIT, ret);
448}
449
450static int smb347_set_voltage_limits(struct smb347_charger *smb)
451{
452 int ret, val;
453
454 ret = smb347_read(smb, CFG_FLOAT_VOLTAGE);
455 if (ret < 0)
456 return ret;
457
458 if (smb->pdata->pre_to_fast_voltage) {
459 val = smb->pdata->pre_to_fast_voltage;
460
461 /* uV */
462 val = clamp_val(val, 2400000, 3000000) - 2400000;
463 val /= 200000;
464
465 ret &= ~CFG_FLOAT_VOLTAGE_THRESHOLD_MASK;
466 ret |= val << CFG_FLOAT_VOLTAGE_THRESHOLD_SHIFT;
467 }
468
469 if (smb->pdata->max_charge_voltage) {
470 val = smb->pdata->max_charge_voltage;
471
472 /* uV */
473 val = clamp_val(val, 3500000, 4500000) - 3500000;
474 val /= 20000;
475
476 ret |= val;
477 }
478
479 return smb347_write(smb, CFG_FLOAT_VOLTAGE, ret);
480}
481
482static int smb347_set_temp_limits(struct smb347_charger *smb)
483{
484 bool enable_therm_monitor = false;
485 int ret, val;
486
487 if (smb->pdata->chip_temp_threshold) {
488 val = smb->pdata->chip_temp_threshold;
489
490 /* degree C */
491 val = clamp_val(val, 100, 130) - 100;
492 val /= 10;
493
494 ret = smb347_read(smb, CFG_OTG);
495 if (ret < 0)
496 return ret;
497
498 ret &= ~CFG_OTG_TEMP_THRESHOLD_MASK;
499 ret |= val << CFG_OTG_TEMP_THRESHOLD_SHIFT;
500
501 ret = smb347_write(smb, CFG_OTG, ret);
502 if (ret < 0)
503 return ret;
504 }
505
506 ret = smb347_read(smb, CFG_TEMP_LIMIT);
507 if (ret < 0)
508 return ret;
509
510 if (smb->pdata->soft_cold_temp_limit != SMB347_TEMP_USE_DEFAULT) {
511 val = smb->pdata->soft_cold_temp_limit;
512
513 val = clamp_val(val, 0, 15);
514 val /= 5;
515 /* this goes from higher to lower so invert the value */
516 val = ~val & 0x3;
517
518 ret &= ~CFG_TEMP_LIMIT_SOFT_COLD_MASK;
519 ret |= val << CFG_TEMP_LIMIT_SOFT_COLD_SHIFT;
520
521 enable_therm_monitor = true;
522 }
523
524 if (smb->pdata->soft_hot_temp_limit != SMB347_TEMP_USE_DEFAULT) {
525 val = smb->pdata->soft_hot_temp_limit;
526
527 val = clamp_val(val, 40, 55) - 40;
528 val /= 5;
529
530 ret &= ~CFG_TEMP_LIMIT_SOFT_HOT_MASK;
531 ret |= val << CFG_TEMP_LIMIT_SOFT_HOT_SHIFT;
532
533 enable_therm_monitor = true;
534 }
535
536 if (smb->pdata->hard_cold_temp_limit != SMB347_TEMP_USE_DEFAULT) {
537 val = smb->pdata->hard_cold_temp_limit;
538
539 val = clamp_val(val, -5, 10) + 5;
540 val /= 5;
541 /* this goes from higher to lower so invert the value */
542 val = ~val & 0x3;
543
544 ret &= ~CFG_TEMP_LIMIT_HARD_COLD_MASK;
545 ret |= val << CFG_TEMP_LIMIT_HARD_COLD_SHIFT;
546
547 enable_therm_monitor = true;
548 }
549
550 if (smb->pdata->hard_hot_temp_limit != SMB347_TEMP_USE_DEFAULT) {
551 val = smb->pdata->hard_hot_temp_limit;
552
553 val = clamp_val(val, 50, 65) - 50;
554 val /= 5;
555
556 ret &= ~CFG_TEMP_LIMIT_HARD_HOT_MASK;
557 ret |= val << CFG_TEMP_LIMIT_HARD_HOT_SHIFT;
558
559 enable_therm_monitor = true;
560 }
561
562 ret = smb347_write(smb, CFG_TEMP_LIMIT, ret);
563 if (ret < 0)
564 return ret;
565
566 /*
567 * If any of the temperature limits are set, we also enable the
568 * thermistor monitoring.
569 *
570 * When soft limits are hit, the device will start to compensate
571 * current and/or voltage depending on the configuration.
572 *
573 * When hard limit is hit, the device will suspend charging
574 * depending on the configuration.
575 */
576 if (enable_therm_monitor) {
577 ret = smb347_read(smb, CFG_THERM);
578 if (ret < 0)
579 return ret;
580
581 ret &= ~CFG_THERM_MONITOR_DISABLED;
582
583 ret = smb347_write(smb, CFG_THERM, ret);
584 if (ret < 0)
585 return ret;
586 }
587
588 if (smb->pdata->suspend_on_hard_temp_limit) {
589 ret = smb347_read(smb, CFG_SYSOK);
590 if (ret < 0)
591 return ret;
592
593 ret &= ~CFG_SYSOK_SUSPEND_HARD_LIMIT_DISABLED;
594
595 ret = smb347_write(smb, CFG_SYSOK, ret);
596 if (ret < 0)
597 return ret;
598 }
599
600 if (smb->pdata->soft_temp_limit_compensation !=
601 SMB347_SOFT_TEMP_COMPENSATE_DEFAULT) {
602 val = smb->pdata->soft_temp_limit_compensation & 0x3;
603
604 ret = smb347_read(smb, CFG_THERM);
605 if (ret < 0)
606 return ret;
607
608 ret &= ~CFG_THERM_SOFT_HOT_COMPENSATION_MASK;
609 ret |= val << CFG_THERM_SOFT_HOT_COMPENSATION_SHIFT;
610
611 ret &= ~CFG_THERM_SOFT_COLD_COMPENSATION_MASK;
612 ret |= val << CFG_THERM_SOFT_COLD_COMPENSATION_SHIFT;
613
614 ret = smb347_write(smb, CFG_THERM, ret);
615 if (ret < 0)
616 return ret;
617 }
618
619 if (smb->pdata->charge_current_compensation) {
620 val = current_to_hw(ccc_tbl, ARRAY_SIZE(ccc_tbl),
621 smb->pdata->charge_current_compensation);
622 if (val < 0)
623 return val;
624
625 ret = smb347_read(smb, CFG_OTG);
626 if (ret < 0)
627 return ret;
628
629 ret &= ~CFG_OTG_CC_COMPENSATION_MASK;
630 ret |= (val & 0x3) << CFG_OTG_CC_COMPENSATION_SHIFT;
631
632 ret = smb347_write(smb, CFG_OTG, ret);
633 if (ret < 0)
634 return ret;
635 }
636
637 return ret;
638}
639
640/*
641 * smb347_set_writable - enables/disables writing to non-volatile registers
642 * @smb: pointer to smb347 charger instance
643 *
644 * You can enable/disable writing to the non-volatile configuration
645 * registers by calling this function.
646 *
647 * Returns %0 on success and negative errno in case of failure.
648 */
649static int smb347_set_writable(struct smb347_charger *smb, bool writable)
650{
651 int ret;
652
653 ret = smb347_read(smb, CMD_A);
654 if (ret < 0)
655 return ret;
656
657 if (writable)
658 ret |= CMD_A_ALLOW_WRITE;
659 else
660 ret &= ~CMD_A_ALLOW_WRITE;
661
662 return smb347_write(smb, CMD_A, ret);
663}
664
665static int smb347_hw_init(struct smb347_charger *smb)
666{
667 int ret;
668
669 ret = smb347_set_writable(smb, true);
670 if (ret < 0)
671 return ret;
672
673 /*
674 * Program the platform specific configuration values to the device
675 * first.
676 */
677 ret = smb347_set_charge_current(smb);
678 if (ret < 0)
679 goto fail;
680
681 ret = smb347_set_current_limits(smb);
682 if (ret < 0)
683 goto fail;
684
685 ret = smb347_set_voltage_limits(smb);
686 if (ret < 0)
687 goto fail;
688
689 ret = smb347_set_temp_limits(smb);
690 if (ret < 0)
691 goto fail;
692
693 /* If USB charging is disabled we put the USB in suspend mode */
694 if (!smb->pdata->use_usb) {
695 ret = smb347_read(smb, CMD_A);
696 if (ret < 0)
697 goto fail;
698
699 ret |= CMD_A_SUSPEND_ENABLED;
700
701 ret = smb347_write(smb, CMD_A, ret);
702 if (ret < 0)
703 goto fail;
704 }
705
706 ret = smb347_read(smb, CFG_OTHER);
707 if (ret < 0)
708 goto fail;
709
710 /*
711 * If configured by platform data, we enable hardware Auto-OTG
712 * support for driving VBUS. Otherwise we disable it.
713 */
714 ret &= ~CFG_OTHER_RID_MASK;
715 if (smb->pdata->use_usb_otg)
716 ret |= CFG_OTHER_RID_ENABLED_AUTO_OTG;
717
718 ret = smb347_write(smb, CFG_OTHER, ret);
719 if (ret < 0)
720 goto fail;
721
722 ret = smb347_read(smb, CFG_PIN);
723 if (ret < 0)
724 goto fail;
725
726 /*
727 * Make the charging functionality controllable by a write to the
728 * command register unless pin control is specified in the platform
729 * data.
730 */
731 ret &= ~CFG_PIN_EN_CTRL_MASK;
732
733 switch (smb->pdata->enable_control) {
734 case SMB347_CHG_ENABLE_SW:
735 /* Do nothing, 0 means i2c control */
736 break;
737 case SMB347_CHG_ENABLE_PIN_ACTIVE_LOW:
738 ret |= CFG_PIN_EN_CTRL_ACTIVE_LOW;
739 break;
740 case SMB347_CHG_ENABLE_PIN_ACTIVE_HIGH:
741 ret |= CFG_PIN_EN_CTRL_ACTIVE_HIGH;
742 break;
743 }
744
745 /* Disable Automatic Power Source Detection (APSD) interrupt. */
746 ret &= ~CFG_PIN_EN_APSD_IRQ;
747
748 ret = smb347_write(smb, CFG_PIN, ret);
749 if (ret < 0)
750 goto fail;
751
752 ret = smb347_update_status(smb);
753 if (ret < 0)
754 goto fail;
755
756 ret = smb347_update_online(smb);
757
758fail:
759 smb347_set_writable(smb, false);
760 return ret;
761}
762
763static irqreturn_t smb347_interrupt(int irq, void *data)
764{
765 struct smb347_charger *smb = data;
766 int stat_c, irqstat_e, irqstat_c;
767 irqreturn_t ret = IRQ_NONE;
768
769 stat_c = smb347_read(smb, STAT_C);
770 if (stat_c < 0) {
771 dev_warn(&smb->client->dev, "reading STAT_C failed\n");
772 return IRQ_NONE;
773 }
774
775 irqstat_c = smb347_read(smb, IRQSTAT_C);
776 if (irqstat_c < 0) {
777 dev_warn(&smb->client->dev, "reading IRQSTAT_C failed\n");
778 return IRQ_NONE;
779 }
780
781 irqstat_e = smb347_read(smb, IRQSTAT_E);
782 if (irqstat_e < 0) {
783 dev_warn(&smb->client->dev, "reading IRQSTAT_E failed\n");
784 return IRQ_NONE;
785 }
786
787 /*
788 * If we get charger error we report the error back to user and
789 * disable charging.
790 */
791 if (stat_c & STAT_C_CHARGER_ERROR) {
792 dev_err(&smb->client->dev,
793 "error in charger, disabling charging\n");
794
795 smb347_charging_disable(smb);
796 power_supply_changed(&smb->battery);
797
798 ret = IRQ_HANDLED;
799 }
800
801 /*
802 * If we reached the termination current the battery is charged and
803 * we can update the status now. Charging is automatically
804 * disabled by the hardware.
805 */
806 if (irqstat_c & (IRQSTAT_C_TERMINATION_IRQ | IRQSTAT_C_TAPER_IRQ)) {
807 if (irqstat_c & IRQSTAT_C_TERMINATION_STAT)
808 power_supply_changed(&smb->battery);
809 ret = IRQ_HANDLED;
810 }
811
812 /*
813 * If we got an under voltage interrupt it means that AC/USB input
814 * was connected or disconnected.
815 */
816 if (irqstat_e & (IRQSTAT_E_USBIN_UV_IRQ | IRQSTAT_E_DCIN_UV_IRQ)) {
817 if (smb347_update_status(smb) > 0) {
818 smb347_update_online(smb);
819 power_supply_changed(&smb->mains);
820 power_supply_changed(&smb->usb);
821 }
822 ret = IRQ_HANDLED;
823 }
824
825 return ret;
826}
827
828static int smb347_irq_set(struct smb347_charger *smb, bool enable)
829{
830 int ret;
831
832 ret = smb347_set_writable(smb, true);
833 if (ret < 0)
834 return ret;
835
836 /*
837 * Enable/disable interrupts for:
838 * - under voltage
839 * - termination current reached
840 * - charger error
841 */
842 if (enable) {
843 ret = smb347_write(smb, CFG_FAULT_IRQ, CFG_FAULT_IRQ_DCIN_UV);
844 if (ret < 0)
845 goto fail;
846
847 ret = smb347_write(smb, CFG_STATUS_IRQ,
848 CFG_STATUS_IRQ_TERMINATION_OR_TAPER);
849 if (ret < 0)
850 goto fail;
851
852 ret = smb347_read(smb, CFG_PIN);
853 if (ret < 0)
854 goto fail;
855
856 ret |= CFG_PIN_EN_CHARGER_ERROR;
857
858 ret = smb347_write(smb, CFG_PIN, ret);
859 } else {
860 ret = smb347_write(smb, CFG_FAULT_IRQ, 0);
861 if (ret < 0)
862 goto fail;
863
864 ret = smb347_write(smb, CFG_STATUS_IRQ, 0);
865 if (ret < 0)
866 goto fail;
867
868 ret = smb347_read(smb, CFG_PIN);
869 if (ret < 0)
870 goto fail;
871
872 ret &= ~CFG_PIN_EN_CHARGER_ERROR;
873
874 ret = smb347_write(smb, CFG_PIN, ret);
875 }
876
877fail:
878 smb347_set_writable(smb, false);
879 return ret;
880}
881
882static inline int smb347_irq_enable(struct smb347_charger *smb)
883{
884 return smb347_irq_set(smb, true);
885}
886
887static inline int smb347_irq_disable(struct smb347_charger *smb)
888{
889 return smb347_irq_set(smb, false);
890}
891
892static int smb347_irq_init(struct smb347_charger *smb)
893{
894 const struct smb347_charger_platform_data *pdata = smb->pdata;
895 int ret, irq = gpio_to_irq(pdata->irq_gpio);
896
897 ret = gpio_request_one(pdata->irq_gpio, GPIOF_IN, smb->client->name);
898 if (ret < 0)
899 goto fail;
900
901 ret = request_threaded_irq(irq, NULL, smb347_interrupt,
902 IRQF_TRIGGER_FALLING, smb->client->name,
903 smb);
904 if (ret < 0)
905 goto fail_gpio;
906
907 ret = smb347_set_writable(smb, true);
908 if (ret < 0)
909 goto fail_irq;
910
911 /*
912 * Configure the STAT output to be suitable for interrupts: disable
913 * all other output (except interrupts) and make it active low.
914 */
915 ret = smb347_read(smb, CFG_STAT);
916 if (ret < 0)
917 goto fail_readonly;
918
919 ret &= ~CFG_STAT_ACTIVE_HIGH;
920 ret |= CFG_STAT_DISABLED;
921
922 ret = smb347_write(smb, CFG_STAT, ret);
923 if (ret < 0)
924 goto fail_readonly;
925
926 ret = smb347_irq_enable(smb);
927 if (ret < 0)
928 goto fail_readonly;
929
930 smb347_set_writable(smb, false);
931 smb->client->irq = irq;
932 return 0;
933
934fail_readonly:
935 smb347_set_writable(smb, false);
936fail_irq:
937 free_irq(irq, smb);
938fail_gpio:
939 gpio_free(pdata->irq_gpio);
940fail:
941 smb->client->irq = 0;
942 return ret;
943}
944
945static int smb347_mains_get_property(struct power_supply *psy,
946 enum power_supply_property prop,
947 union power_supply_propval *val)
948{
949 struct smb347_charger *smb =
950 container_of(psy, struct smb347_charger, mains);
951
952 if (prop == POWER_SUPPLY_PROP_ONLINE) {
953 val->intval = smb->mains_online;
954 return 0;
955 }
956 return -EINVAL;
957}
958
959static enum power_supply_property smb347_mains_properties[] = {
960 POWER_SUPPLY_PROP_ONLINE,
961};
962
963static int smb347_usb_get_property(struct power_supply *psy,
964 enum power_supply_property prop,
965 union power_supply_propval *val)
966{
967 struct smb347_charger *smb =
968 container_of(psy, struct smb347_charger, usb);
969
970 if (prop == POWER_SUPPLY_PROP_ONLINE) {
971 val->intval = smb->usb_online;
972 return 0;
973 }
974 return -EINVAL;
975}
976
977static enum power_supply_property smb347_usb_properties[] = {
978 POWER_SUPPLY_PROP_ONLINE,
979};
980
981static int smb347_battery_get_property(struct power_supply *psy,
982 enum power_supply_property prop,
983 union power_supply_propval *val)
984{
985 struct smb347_charger *smb =
986 container_of(psy, struct smb347_charger, battery);
987 const struct smb347_charger_platform_data *pdata = smb->pdata;
988 int ret;
989
990 ret = smb347_update_status(smb);
991 if (ret < 0)
992 return ret;
993
994 switch (prop) {
995 case POWER_SUPPLY_PROP_STATUS:
996 if (!smb347_is_online(smb)) {
997 val->intval = POWER_SUPPLY_STATUS_DISCHARGING;
998 break;
999 }
1000 if (smb347_charging_status(smb))
1001 val->intval = POWER_SUPPLY_STATUS_CHARGING;
1002 else
1003 val->intval = POWER_SUPPLY_STATUS_FULL;
1004 break;
1005
1006 case POWER_SUPPLY_PROP_CHARGE_TYPE:
1007 if (!smb347_is_online(smb))
1008 return -ENODATA;
1009
1010 /*
1011 * We handle trickle and pre-charging the same, and taper
1012 * and none the same.
1013 */
1014 switch (smb347_charging_status(smb)) {
1015 case 1:
1016 val->intval = POWER_SUPPLY_CHARGE_TYPE_TRICKLE;
1017 break;
1018 case 2:
1019 val->intval = POWER_SUPPLY_CHARGE_TYPE_FAST;
1020 break;
1021 default:
1022 val->intval = POWER_SUPPLY_CHARGE_TYPE_NONE;
1023 break;
1024 }
1025 break;
1026
1027 case POWER_SUPPLY_PROP_TECHNOLOGY:
1028 val->intval = pdata->battery_info.technology;
1029 break;
1030
1031 case POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN:
1032 val->intval = pdata->battery_info.voltage_min_design;
1033 break;
1034
1035 case POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN:
1036 val->intval = pdata->battery_info.voltage_max_design;
1037 break;
1038
1039 case POWER_SUPPLY_PROP_VOLTAGE_NOW:
1040 if (!smb347_is_online(smb))
1041 return -ENODATA;
1042 ret = smb347_read(smb, STAT_A);
1043 if (ret < 0)
1044 return ret;
1045
1046 ret &= STAT_A_FLOAT_VOLTAGE_MASK;
1047 if (ret > 0x3d)
1048 ret = 0x3d;
1049
1050 val->intval = 3500000 + ret * 20000;
1051 break;
1052
1053 case POWER_SUPPLY_PROP_CURRENT_NOW:
1054 if (!smb347_is_online(smb))
1055 return -ENODATA;
1056
1057 ret = smb347_read(smb, STAT_B);
1058 if (ret < 0)
1059 return ret;
1060
1061 /*
1062 * The current value is composition of FCC and PCC values
1063 * and we can detect which table to use from bit 5.
1064 */
1065 if (ret & 0x20) {
1066 val->intval = hw_to_current(fcc_tbl,
1067 ARRAY_SIZE(fcc_tbl),
1068 ret & 7);
1069 } else {
1070 ret >>= 3;
1071 val->intval = hw_to_current(pcc_tbl,
1072 ARRAY_SIZE(pcc_tbl),
1073 ret & 7);
1074 }
1075 break;
1076
1077 case POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN:
1078 val->intval = pdata->battery_info.charge_full_design;
1079 break;
1080
1081 case POWER_SUPPLY_PROP_MODEL_NAME:
1082 val->strval = pdata->battery_info.name;
1083 break;
1084
1085 default:
1086 return -EINVAL;
1087 }
1088
1089 return 0;
1090}
1091
1092static enum power_supply_property smb347_battery_properties[] = {
1093 POWER_SUPPLY_PROP_STATUS,
1094 POWER_SUPPLY_PROP_CHARGE_TYPE,
1095 POWER_SUPPLY_PROP_TECHNOLOGY,
1096 POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN,
1097 POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN,
1098 POWER_SUPPLY_PROP_VOLTAGE_NOW,
1099 POWER_SUPPLY_PROP_CURRENT_NOW,
1100 POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN,
1101 POWER_SUPPLY_PROP_MODEL_NAME,
1102};
1103
1104static int smb347_debugfs_show(struct seq_file *s, void *data)
1105{
1106 struct smb347_charger *smb = s->private;
1107 int ret;
1108 u8 reg;
1109
1110 seq_printf(s, "Control registers:\n");
1111 seq_printf(s, "==================\n");
1112 for (reg = CFG_CHARGE_CURRENT; reg <= CFG_ADDRESS; reg++) {
1113 ret = smb347_read(smb, reg);
1114 seq_printf(s, "0x%02x:\t0x%02x\n", reg, ret);
1115 }
1116 seq_printf(s, "\n");
1117
1118 seq_printf(s, "Command registers:\n");
1119 seq_printf(s, "==================\n");
1120 ret = smb347_read(smb, CMD_A);
1121 seq_printf(s, "0x%02x:\t0x%02x\n", CMD_A, ret);
1122 ret = smb347_read(smb, CMD_B);
1123 seq_printf(s, "0x%02x:\t0x%02x\n", CMD_B, ret);
1124 ret = smb347_read(smb, CMD_C);
1125 seq_printf(s, "0x%02x:\t0x%02x\n", CMD_C, ret);
1126 seq_printf(s, "\n");
1127
1128 seq_printf(s, "Interrupt status registers:\n");
1129 seq_printf(s, "===========================\n");
1130 for (reg = IRQSTAT_A; reg <= IRQSTAT_F; reg++) {
1131 ret = smb347_read(smb, reg);
1132 seq_printf(s, "0x%02x:\t0x%02x\n", reg, ret);
1133 }
1134 seq_printf(s, "\n");
1135
1136 seq_printf(s, "Status registers:\n");
1137 seq_printf(s, "=================\n");
1138 for (reg = STAT_A; reg <= STAT_E; reg++) {
1139 ret = smb347_read(smb, reg);
1140 seq_printf(s, "0x%02x:\t0x%02x\n", reg, ret);
1141 }
1142
1143 return 0;
1144}
1145
1146static int smb347_debugfs_open(struct inode *inode, struct file *file)
1147{
1148 return single_open(file, smb347_debugfs_show, inode->i_private);
1149}
1150
1151static const struct file_operations smb347_debugfs_fops = {
1152 .open = smb347_debugfs_open,
1153 .read = seq_read,
1154 .llseek = seq_lseek,
1155 .release = single_release,
1156};
1157
1158static int smb347_probe(struct i2c_client *client,
1159 const struct i2c_device_id *id)
1160{
1161 static char *battery[] = { "smb347-battery" };
1162 const struct smb347_charger_platform_data *pdata;
1163 struct device *dev = &client->dev;
1164 struct smb347_charger *smb;
1165 int ret;
1166
1167 pdata = dev->platform_data;
1168 if (!pdata)
1169 return -EINVAL;
1170
1171 if (!pdata->use_mains && !pdata->use_usb)
1172 return -EINVAL;
1173
1174 smb = devm_kzalloc(dev, sizeof(*smb), GFP_KERNEL);
1175 if (!smb)
1176 return -ENOMEM;
1177
1178 i2c_set_clientdata(client, smb);
1179
1180 mutex_init(&smb->lock);
1181 smb->client = client;
1182 smb->pdata = pdata;
1183
1184 ret = smb347_hw_init(smb);
1185 if (ret < 0)
1186 return ret;
1187
1188 smb->mains.name = "smb347-mains";
1189 smb->mains.type = POWER_SUPPLY_TYPE_MAINS;
1190 smb->mains.get_property = smb347_mains_get_property;
1191 smb->mains.properties = smb347_mains_properties;
1192 smb->mains.num_properties = ARRAY_SIZE(smb347_mains_properties);
1193 smb->mains.supplied_to = battery;
1194 smb->mains.num_supplicants = ARRAY_SIZE(battery);
1195
1196 smb->usb.name = "smb347-usb";
1197 smb->usb.type = POWER_SUPPLY_TYPE_USB;
1198 smb->usb.get_property = smb347_usb_get_property;
1199 smb->usb.properties = smb347_usb_properties;
1200 smb->usb.num_properties = ARRAY_SIZE(smb347_usb_properties);
1201 smb->usb.supplied_to = battery;
1202 smb->usb.num_supplicants = ARRAY_SIZE(battery);
1203
1204 smb->battery.name = "smb347-battery";
1205 smb->battery.type = POWER_SUPPLY_TYPE_BATTERY;
1206 smb->battery.get_property = smb347_battery_get_property;
1207 smb->battery.properties = smb347_battery_properties;
1208 smb->battery.num_properties = ARRAY_SIZE(smb347_battery_properties);
1209
1210 ret = power_supply_register(dev, &smb->mains);
1211 if (ret < 0)
1212 return ret;
1213
1214 ret = power_supply_register(dev, &smb->usb);
1215 if (ret < 0) {
1216 power_supply_unregister(&smb->mains);
1217 return ret;
1218 }
1219
1220 ret = power_supply_register(dev, &smb->battery);
1221 if (ret < 0) {
1222 power_supply_unregister(&smb->usb);
1223 power_supply_unregister(&smb->mains);
1224 return ret;
1225 }
1226
1227 /*
1228 * Interrupt pin is optional. If it is connected, we setup the
1229 * interrupt support here.
1230 */
1231 if (pdata->irq_gpio >= 0) {
1232 ret = smb347_irq_init(smb);
1233 if (ret < 0) {
1234 dev_warn(dev, "failed to initialize IRQ: %d\n", ret);
1235 dev_warn(dev, "disabling IRQ support\n");
1236 }
1237 }
1238
1239 smb->dentry = debugfs_create_file("smb347-regs", S_IRUSR, NULL, smb,
1240 &smb347_debugfs_fops);
1241 return 0;
1242}
1243
1244static int smb347_remove(struct i2c_client *client)
1245{
1246 struct smb347_charger *smb = i2c_get_clientdata(client);
1247
1248 if (!IS_ERR_OR_NULL(smb->dentry))
1249 debugfs_remove(smb->dentry);
1250
1251 if (client->irq) {
1252 smb347_irq_disable(smb);
1253 free_irq(client->irq, smb);
1254 gpio_free(smb->pdata->irq_gpio);
1255 }
1256
1257 power_supply_unregister(&smb->battery);
1258 power_supply_unregister(&smb->usb);
1259 power_supply_unregister(&smb->mains);
1260 return 0;
1261}
1262
1263static const struct i2c_device_id smb347_id[] = {
1264 { "smb347", 0 },
1265 { }
1266};
1267MODULE_DEVICE_TABLE(i2c, smb347_id);
1268
1269static struct i2c_driver smb347_driver = {
1270 .driver = {
1271 .name = "smb347",
1272 },
1273 .probe = smb347_probe,
1274 .remove = __devexit_p(smb347_remove),
1275 .id_table = smb347_id,
1276};
1277
1278static int __init smb347_init(void)
1279{
1280 return i2c_add_driver(&smb347_driver);
1281}
1282module_init(smb347_init);
1283
1284static void __exit smb347_exit(void)
1285{
1286 i2c_del_driver(&smb347_driver);
1287}
1288module_exit(smb347_exit);
1289
1290MODULE_AUTHOR("Bruce E. Robertson <bruce.e.robertson@intel.com>");
1291MODULE_AUTHOR("Mika Westerberg <mika.westerberg@linux.intel.com>");
1292MODULE_DESCRIPTION("SMB347 battery charger driver");
1293MODULE_LICENSE("GPL");
1294MODULE_ALIAS("i2c:smb347");
diff --git a/drivers/power/z2_battery.c b/drivers/power/z2_battery.c
index 636ebb2a0e80..8c9a607ea77a 100644
--- a/drivers/power/z2_battery.c
+++ b/drivers/power/z2_battery.c
@@ -316,19 +316,7 @@ static struct i2c_driver z2_batt_driver = {
316 .remove = __devexit_p(z2_batt_remove), 316 .remove = __devexit_p(z2_batt_remove),
317 .id_table = z2_batt_id, 317 .id_table = z2_batt_id,
318}; 318};
319 319module_i2c_driver(z2_batt_driver);
320static int __init z2_batt_init(void)
321{
322 return i2c_add_driver(&z2_batt_driver);
323}
324
325static void __exit z2_batt_exit(void)
326{
327 i2c_del_driver(&z2_batt_driver);
328}
329
330module_init(z2_batt_init);
331module_exit(z2_batt_exit);
332 320
333MODULE_LICENSE("GPL"); 321MODULE_LICENSE("GPL");
334MODULE_AUTHOR("Peter Edwards <sweetlilmre@gmail.com>"); 322MODULE_AUTHOR("Peter Edwards <sweetlilmre@gmail.com>");
diff --git a/drivers/rtc/interface.c b/drivers/rtc/interface.c
index dc87eda65814..eb415bd76494 100644
--- a/drivers/rtc/interface.c
+++ b/drivers/rtc/interface.c
@@ -458,6 +458,11 @@ int rtc_update_irq_enable(struct rtc_device *rtc, unsigned int enabled)
458 if (rtc->uie_rtctimer.enabled == enabled) 458 if (rtc->uie_rtctimer.enabled == enabled)
459 goto out; 459 goto out;
460 460
461 if (rtc->uie_unsupported) {
462 err = -EINVAL;
463 goto out;
464 }
465
461 if (enabled) { 466 if (enabled) {
462 struct rtc_time tm; 467 struct rtc_time tm;
463 ktime_t now, onesec; 468 ktime_t now, onesec;
diff --git a/drivers/rtc/rtc-mpc5121.c b/drivers/rtc/rtc-mpc5121.c
index e954a759ba85..42f5f829b3ee 100644
--- a/drivers/rtc/rtc-mpc5121.c
+++ b/drivers/rtc/rtc-mpc5121.c
@@ -360,6 +360,8 @@ static int __devinit mpc5121_rtc_probe(struct platform_device *op)
360 &mpc5200_rtc_ops, THIS_MODULE); 360 &mpc5200_rtc_ops, THIS_MODULE);
361 } 361 }
362 362
363 rtc->rtc->uie_unsupported = 1;
364
363 if (IS_ERR(rtc->rtc)) { 365 if (IS_ERR(rtc->rtc)) {
364 err = PTR_ERR(rtc->rtc); 366 err = PTR_ERR(rtc->rtc);
365 goto out_free_irq; 367 goto out_free_irq;
diff --git a/drivers/rtc/rtc-sa1100.c b/drivers/rtc/rtc-sa1100.c
index 4940fa8c4e10..50a5c4adee48 100644
--- a/drivers/rtc/rtc-sa1100.c
+++ b/drivers/rtc/rtc-sa1100.c
@@ -33,6 +33,7 @@
33#include <linux/of.h> 33#include <linux/of.h>
34#include <linux/pm.h> 34#include <linux/pm.h>
35#include <linux/bitops.h> 35#include <linux/bitops.h>
36#include <linux/io.h>
36 37
37#include <mach/hardware.h> 38#include <mach/hardware.h>
38#include <mach/irqs.h> 39#include <mach/irqs.h>
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index a06e608789e3..29684c8142b0 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -619,6 +619,7 @@ config SCSI_ARCMSR
619 619
620source "drivers/scsi/megaraid/Kconfig.megaraid" 620source "drivers/scsi/megaraid/Kconfig.megaraid"
621source "drivers/scsi/mpt2sas/Kconfig" 621source "drivers/scsi/mpt2sas/Kconfig"
622source "drivers/scsi/ufs/Kconfig"
622 623
623config SCSI_HPTIOP 624config SCSI_HPTIOP
624 tristate "HighPoint RocketRAID 3xxx/4xxx Controller support" 625 tristate "HighPoint RocketRAID 3xxx/4xxx Controller support"
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index ad24e065b1e5..8deedeaf5608 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -108,6 +108,7 @@ obj-$(CONFIG_MEGARAID_LEGACY) += megaraid.o
108obj-$(CONFIG_MEGARAID_NEWGEN) += megaraid/ 108obj-$(CONFIG_MEGARAID_NEWGEN) += megaraid/
109obj-$(CONFIG_MEGARAID_SAS) += megaraid/ 109obj-$(CONFIG_MEGARAID_SAS) += megaraid/
110obj-$(CONFIG_SCSI_MPT2SAS) += mpt2sas/ 110obj-$(CONFIG_SCSI_MPT2SAS) += mpt2sas/
111obj-$(CONFIG_SCSI_UFSHCD) += ufs/
111obj-$(CONFIG_SCSI_ACARD) += atp870u.o 112obj-$(CONFIG_SCSI_ACARD) += atp870u.o
112obj-$(CONFIG_SCSI_SUNESP) += esp_scsi.o sun_esp.o 113obj-$(CONFIG_SCSI_SUNESP) += esp_scsi.o sun_esp.o
113obj-$(CONFIG_SCSI_GDTH) += gdth.o 114obj-$(CONFIG_SCSI_GDTH) += gdth.o
diff --git a/drivers/scsi/aic7xxx/aic79xx_osm.c b/drivers/scsi/aic7xxx/aic79xx_osm.c
index 7d48700257a7..9328121804bb 100644
--- a/drivers/scsi/aic7xxx/aic79xx_osm.c
+++ b/drivers/scsi/aic7xxx/aic79xx_osm.c
@@ -341,10 +341,10 @@ MODULE_PARM_DESC(aic79xx,
341" (0/256ms,1/128ms,2/64ms,3/32ms)\n" 341" (0/256ms,1/128ms,2/64ms,3/32ms)\n"
342" slowcrc Turn on the SLOWCRC bit (Rev B only)\n" 342" slowcrc Turn on the SLOWCRC bit (Rev B only)\n"
343"\n" 343"\n"
344" Sample /etc/modprobe.conf line:\n" 344" Sample modprobe configuration file:\n"
345" Enable verbose logging\n" 345" # Enable verbose logging\n"
346" Set tag depth on Controller 2/Target 2 to 10 tags\n" 346" # Set tag depth on Controller 2/Target 2 to 10 tags\n"
347" Shorten the selection timeout to 128ms\n" 347" # Shorten the selection timeout to 128ms\n"
348"\n" 348"\n"
349" options aic79xx 'aic79xx=verbose.tag_info:{{}.{}.{..10}}.seltime:1'\n" 349" options aic79xx 'aic79xx=verbose.tag_info:{{}.{}.{..10}}.seltime:1'\n"
350); 350);
diff --git a/drivers/scsi/aic7xxx/aic7xxx_osm.c b/drivers/scsi/aic7xxx/aic7xxx_osm.c
index c6251bb4f438..5a477cdc780d 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_osm.c
+++ b/drivers/scsi/aic7xxx/aic7xxx_osm.c
@@ -360,10 +360,10 @@ MODULE_PARM_DESC(aic7xxx,
360" seltime:<int> Selection Timeout\n" 360" seltime:<int> Selection Timeout\n"
361" (0/256ms,1/128ms,2/64ms,3/32ms)\n" 361" (0/256ms,1/128ms,2/64ms,3/32ms)\n"
362"\n" 362"\n"
363" Sample /etc/modprobe.conf line:\n" 363" Sample modprobe configuration file:\n"
364" Toggle EISA/VLB probing\n" 364" # Toggle EISA/VLB probing\n"
365" Set tag depth on Controller 1/Target 1 to 10 tags\n" 365" # Set tag depth on Controller 1/Target 1 to 10 tags\n"
366" Shorten the selection timeout to 128ms\n" 366" # Shorten the selection timeout to 128ms\n"
367"\n" 367"\n"
368" options aic7xxx 'aic7xxx=probe_eisa_vl.tag_info:{{}.{.10}}.seltime:1'\n" 368" options aic7xxx 'aic7xxx=probe_eisa_vl.tag_info:{{}.{.10}}.seltime:1'\n"
369); 369);
diff --git a/drivers/scsi/atp870u.c b/drivers/scsi/atp870u.c
index f29d5121d5ed..68ce08552f69 100644
--- a/drivers/scsi/atp870u.c
+++ b/drivers/scsi/atp870u.c
@@ -2582,7 +2582,7 @@ static int atp870u_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2582 * this than via the PCI device table 2582 * this than via the PCI device table
2583 */ 2583 */
2584 if (ent->device == PCI_DEVICE_ID_ARTOP_AEC7610) { 2584 if (ent->device == PCI_DEVICE_ID_ARTOP_AEC7610) {
2585 error = pci_read_config_byte(pdev, PCI_CLASS_REVISION, &atpdev->chip_ver); 2585 atpdev->chip_ver = pdev->revision;
2586 if (atpdev->chip_ver < 2) 2586 if (atpdev->chip_ver < 2)
2587 goto err_eio; 2587 goto err_eio;
2588 } 2588 }
@@ -2601,7 +2601,7 @@ static int atp870u_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2601 base_io &= 0xfffffff8; 2601 base_io &= 0xfffffff8;
2602 2602
2603 if ((ent->device == ATP880_DEVID1)||(ent->device == ATP880_DEVID2)) { 2603 if ((ent->device == ATP880_DEVID1)||(ent->device == ATP880_DEVID2)) {
2604 error = pci_read_config_byte(pdev, PCI_CLASS_REVISION, &atpdev->chip_ver); 2604 atpdev->chip_ver = pdev->revision;
2605 pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0x80);//JCC082803 2605 pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0x80);//JCC082803
2606 2606
2607 host_id = inb(base_io + 0x39); 2607 host_id = inb(base_io + 0x39);
diff --git a/drivers/scsi/bfa/bfa.h b/drivers/scsi/bfa/bfa.h
index a796de935054..4ad7e368bbc2 100644
--- a/drivers/scsi/bfa/bfa.h
+++ b/drivers/scsi/bfa/bfa.h
@@ -225,9 +225,9 @@ struct bfa_faa_args_s {
225}; 225};
226 226
227struct bfa_iocfc_s { 227struct bfa_iocfc_s {
228 bfa_fsm_t fsm;
228 struct bfa_s *bfa; 229 struct bfa_s *bfa;
229 struct bfa_iocfc_cfg_s cfg; 230 struct bfa_iocfc_cfg_s cfg;
230 int action;
231 u32 req_cq_pi[BFI_IOC_MAX_CQS]; 231 u32 req_cq_pi[BFI_IOC_MAX_CQS];
232 u32 rsp_cq_ci[BFI_IOC_MAX_CQS]; 232 u32 rsp_cq_ci[BFI_IOC_MAX_CQS];
233 u8 hw_qid[BFI_IOC_MAX_CQS]; 233 u8 hw_qid[BFI_IOC_MAX_CQS];
@@ -236,7 +236,9 @@ struct bfa_iocfc_s {
236 struct bfa_cb_qe_s dis_hcb_qe; 236 struct bfa_cb_qe_s dis_hcb_qe;
237 struct bfa_cb_qe_s en_hcb_qe; 237 struct bfa_cb_qe_s en_hcb_qe;
238 struct bfa_cb_qe_s stats_hcb_qe; 238 struct bfa_cb_qe_s stats_hcb_qe;
239 bfa_boolean_t cfgdone; 239 bfa_boolean_t submod_enabled;
240 bfa_boolean_t cb_reqd; /* Driver call back reqd */
241 bfa_status_t op_status; /* Status of bfa iocfc op */
240 242
241 struct bfa_dma_s cfg_info; 243 struct bfa_dma_s cfg_info;
242 struct bfi_iocfc_cfg_s *cfginfo; 244 struct bfi_iocfc_cfg_s *cfginfo;
@@ -341,8 +343,6 @@ void bfa_hwct_msix_getvecs(struct bfa_s *bfa, u32 *vecmap, u32 *nvecs,
341void bfa_hwct_msix_get_rme_range(struct bfa_s *bfa, u32 *start, 343void bfa_hwct_msix_get_rme_range(struct bfa_s *bfa, u32 *start,
342 u32 *end); 344 u32 *end);
343void bfa_iocfc_get_bootwwns(struct bfa_s *bfa, u8 *nwwns, wwn_t *wwns); 345void bfa_iocfc_get_bootwwns(struct bfa_s *bfa, u8 *nwwns, wwn_t *wwns);
344wwn_t bfa_iocfc_get_pwwn(struct bfa_s *bfa);
345wwn_t bfa_iocfc_get_nwwn(struct bfa_s *bfa);
346int bfa_iocfc_get_pbc_vports(struct bfa_s *bfa, 346int bfa_iocfc_get_pbc_vports(struct bfa_s *bfa,
347 struct bfi_pbc_vport_s *pbc_vport); 347 struct bfi_pbc_vport_s *pbc_vport);
348 348
@@ -428,7 +428,6 @@ bfa_status_t bfa_iocfc_israttr_set(struct bfa_s *bfa,
428 428
429void bfa_iocfc_enable(struct bfa_s *bfa); 429void bfa_iocfc_enable(struct bfa_s *bfa);
430void bfa_iocfc_disable(struct bfa_s *bfa); 430void bfa_iocfc_disable(struct bfa_s *bfa);
431void bfa_iocfc_cb_dconf_modinit(struct bfa_s *bfa, bfa_status_t status);
432#define bfa_timer_start(_bfa, _timer, _timercb, _arg, _timeout) \ 431#define bfa_timer_start(_bfa, _timer, _timercb, _arg, _timeout) \
433 bfa_timer_begin(&(_bfa)->timer_mod, _timer, _timercb, _arg, _timeout) 432 bfa_timer_begin(&(_bfa)->timer_mod, _timer, _timercb, _arg, _timeout)
434 433
diff --git a/drivers/scsi/bfa/bfa_core.c b/drivers/scsi/bfa/bfa_core.c
index 4bd546bcc240..456e5762977d 100644
--- a/drivers/scsi/bfa/bfa_core.c
+++ b/drivers/scsi/bfa/bfa_core.c
@@ -200,13 +200,431 @@ enum {
200#define DEF_CFG_NUM_SBOOT_LUNS 16 200#define DEF_CFG_NUM_SBOOT_LUNS 16
201 201
202/* 202/*
203 * IOCFC state machine definitions/declarations
204 */
205bfa_fsm_state_decl(bfa_iocfc, stopped, struct bfa_iocfc_s, enum iocfc_event);
206bfa_fsm_state_decl(bfa_iocfc, initing, struct bfa_iocfc_s, enum iocfc_event);
207bfa_fsm_state_decl(bfa_iocfc, dconf_read, struct bfa_iocfc_s, enum iocfc_event);
208bfa_fsm_state_decl(bfa_iocfc, init_cfg_wait,
209 struct bfa_iocfc_s, enum iocfc_event);
210bfa_fsm_state_decl(bfa_iocfc, init_cfg_done,
211 struct bfa_iocfc_s, enum iocfc_event);
212bfa_fsm_state_decl(bfa_iocfc, operational,
213 struct bfa_iocfc_s, enum iocfc_event);
214bfa_fsm_state_decl(bfa_iocfc, dconf_write,
215 struct bfa_iocfc_s, enum iocfc_event);
216bfa_fsm_state_decl(bfa_iocfc, stopping, struct bfa_iocfc_s, enum iocfc_event);
217bfa_fsm_state_decl(bfa_iocfc, enabling, struct bfa_iocfc_s, enum iocfc_event);
218bfa_fsm_state_decl(bfa_iocfc, cfg_wait, struct bfa_iocfc_s, enum iocfc_event);
219bfa_fsm_state_decl(bfa_iocfc, disabling, struct bfa_iocfc_s, enum iocfc_event);
220bfa_fsm_state_decl(bfa_iocfc, disabled, struct bfa_iocfc_s, enum iocfc_event);
221bfa_fsm_state_decl(bfa_iocfc, failed, struct bfa_iocfc_s, enum iocfc_event);
222bfa_fsm_state_decl(bfa_iocfc, init_failed,
223 struct bfa_iocfc_s, enum iocfc_event);
224
225/*
203 * forward declaration for IOC FC functions 226 * forward declaration for IOC FC functions
204 */ 227 */
228static void bfa_iocfc_start_submod(struct bfa_s *bfa);
229static void bfa_iocfc_disable_submod(struct bfa_s *bfa);
230static void bfa_iocfc_send_cfg(void *bfa_arg);
205static void bfa_iocfc_enable_cbfn(void *bfa_arg, enum bfa_status status); 231static void bfa_iocfc_enable_cbfn(void *bfa_arg, enum bfa_status status);
206static void bfa_iocfc_disable_cbfn(void *bfa_arg); 232static void bfa_iocfc_disable_cbfn(void *bfa_arg);
207static void bfa_iocfc_hbfail_cbfn(void *bfa_arg); 233static void bfa_iocfc_hbfail_cbfn(void *bfa_arg);
208static void bfa_iocfc_reset_cbfn(void *bfa_arg); 234static void bfa_iocfc_reset_cbfn(void *bfa_arg);
209static struct bfa_ioc_cbfn_s bfa_iocfc_cbfn; 235static struct bfa_ioc_cbfn_s bfa_iocfc_cbfn;
236static void bfa_iocfc_init_cb(void *bfa_arg, bfa_boolean_t complete);
237static void bfa_iocfc_stop_cb(void *bfa_arg, bfa_boolean_t compl);
238static void bfa_iocfc_enable_cb(void *bfa_arg, bfa_boolean_t compl);
239static void bfa_iocfc_disable_cb(void *bfa_arg, bfa_boolean_t compl);
240
241static void
242bfa_iocfc_sm_stopped_entry(struct bfa_iocfc_s *iocfc)
243{
244}
245
246static void
247bfa_iocfc_sm_stopped(struct bfa_iocfc_s *iocfc, enum iocfc_event event)
248{
249 bfa_trc(iocfc->bfa, event);
250
251 switch (event) {
252 case IOCFC_E_INIT:
253 case IOCFC_E_ENABLE:
254 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_initing);
255 break;
256 default:
257 bfa_sm_fault(iocfc->bfa, event);
258 break;
259 }
260}
261
262static void
263bfa_iocfc_sm_initing_entry(struct bfa_iocfc_s *iocfc)
264{
265 bfa_ioc_enable(&iocfc->bfa->ioc);
266}
267
268static void
269bfa_iocfc_sm_initing(struct bfa_iocfc_s *iocfc, enum iocfc_event event)
270{
271 bfa_trc(iocfc->bfa, event);
272
273 switch (event) {
274 case IOCFC_E_IOC_ENABLED:
275 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_dconf_read);
276 break;
277 case IOCFC_E_IOC_FAILED:
278 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_init_failed);
279 break;
280 default:
281 bfa_sm_fault(iocfc->bfa, event);
282 break;
283 }
284}
285
286static void
287bfa_iocfc_sm_dconf_read_entry(struct bfa_iocfc_s *iocfc)
288{
289 bfa_dconf_modinit(iocfc->bfa);
290}
291
292static void
293bfa_iocfc_sm_dconf_read(struct bfa_iocfc_s *iocfc, enum iocfc_event event)
294{
295 bfa_trc(iocfc->bfa, event);
296
297 switch (event) {
298 case IOCFC_E_DCONF_DONE:
299 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_init_cfg_wait);
300 break;
301 case IOCFC_E_IOC_FAILED:
302 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_init_failed);
303 break;
304 default:
305 bfa_sm_fault(iocfc->bfa, event);
306 break;
307 }
308}
309
310static void
311bfa_iocfc_sm_init_cfg_wait_entry(struct bfa_iocfc_s *iocfc)
312{
313 bfa_iocfc_send_cfg(iocfc->bfa);
314}
315
316static void
317bfa_iocfc_sm_init_cfg_wait(struct bfa_iocfc_s *iocfc, enum iocfc_event event)
318{
319 bfa_trc(iocfc->bfa, event);
320
321 switch (event) {
322 case IOCFC_E_CFG_DONE:
323 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_init_cfg_done);
324 break;
325 case IOCFC_E_IOC_FAILED:
326 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_init_failed);
327 break;
328 default:
329 bfa_sm_fault(iocfc->bfa, event);
330 break;
331 }
332}
333
334static void
335bfa_iocfc_sm_init_cfg_done_entry(struct bfa_iocfc_s *iocfc)
336{
337 iocfc->bfa->iocfc.op_status = BFA_STATUS_OK;
338 bfa_cb_queue(iocfc->bfa, &iocfc->bfa->iocfc.init_hcb_qe,
339 bfa_iocfc_init_cb, iocfc->bfa);
340}
341
342static void
343bfa_iocfc_sm_init_cfg_done(struct bfa_iocfc_s *iocfc, enum iocfc_event event)
344{
345 bfa_trc(iocfc->bfa, event);
346
347 switch (event) {
348 case IOCFC_E_START:
349 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_operational);
350 break;
351 case IOCFC_E_STOP:
352 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_stopping);
353 break;
354 case IOCFC_E_DISABLE:
355 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_disabling);
356 break;
357 case IOCFC_E_IOC_FAILED:
358 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_failed);
359 break;
360 default:
361 bfa_sm_fault(iocfc->bfa, event);
362 break;
363 }
364}
365
366static void
367bfa_iocfc_sm_operational_entry(struct bfa_iocfc_s *iocfc)
368{
369 bfa_fcport_init(iocfc->bfa);
370 bfa_iocfc_start_submod(iocfc->bfa);
371}
372
373static void
374bfa_iocfc_sm_operational(struct bfa_iocfc_s *iocfc, enum iocfc_event event)
375{
376 bfa_trc(iocfc->bfa, event);
377
378 switch (event) {
379 case IOCFC_E_STOP:
380 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_dconf_write);
381 break;
382 case IOCFC_E_DISABLE:
383 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_disabling);
384 break;
385 case IOCFC_E_IOC_FAILED:
386 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_failed);
387 break;
388 default:
389 bfa_sm_fault(iocfc->bfa, event);
390 break;
391 }
392}
393
394static void
395bfa_iocfc_sm_dconf_write_entry(struct bfa_iocfc_s *iocfc)
396{
397 bfa_dconf_modexit(iocfc->bfa);
398}
399
400static void
401bfa_iocfc_sm_dconf_write(struct bfa_iocfc_s *iocfc, enum iocfc_event event)
402{
403 bfa_trc(iocfc->bfa, event);
404
405 switch (event) {
406 case IOCFC_E_DCONF_DONE:
407 case IOCFC_E_IOC_FAILED:
408 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_stopping);
409 break;
410 default:
411 bfa_sm_fault(iocfc->bfa, event);
412 break;
413 }
414}
415
416static void
417bfa_iocfc_sm_stopping_entry(struct bfa_iocfc_s *iocfc)
418{
419 bfa_ioc_disable(&iocfc->bfa->ioc);
420}
421
422static void
423bfa_iocfc_sm_stopping(struct bfa_iocfc_s *iocfc, enum iocfc_event event)
424{
425 bfa_trc(iocfc->bfa, event);
426
427 switch (event) {
428 case IOCFC_E_IOC_DISABLED:
429 bfa_isr_disable(iocfc->bfa);
430 bfa_iocfc_disable_submod(iocfc->bfa);
431 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_stopped);
432 iocfc->bfa->iocfc.op_status = BFA_STATUS_OK;
433 bfa_cb_queue(iocfc->bfa, &iocfc->bfa->iocfc.stop_hcb_qe,
434 bfa_iocfc_stop_cb, iocfc->bfa);
435 break;
436 default:
437 bfa_sm_fault(iocfc->bfa, event);
438 break;
439 }
440}
441
442static void
443bfa_iocfc_sm_enabling_entry(struct bfa_iocfc_s *iocfc)
444{
445 bfa_ioc_enable(&iocfc->bfa->ioc);
446}
447
448static void
449bfa_iocfc_sm_enabling(struct bfa_iocfc_s *iocfc, enum iocfc_event event)
450{
451 bfa_trc(iocfc->bfa, event);
452
453 switch (event) {
454 case IOCFC_E_IOC_ENABLED:
455 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_cfg_wait);
456 break;
457 case IOCFC_E_IOC_FAILED:
458 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_failed);
459
460 if (iocfc->bfa->iocfc.cb_reqd == BFA_FALSE)
461 break;
462
463 iocfc->bfa->iocfc.op_status = BFA_STATUS_FAILED;
464 bfa_cb_queue(iocfc->bfa, &iocfc->bfa->iocfc.en_hcb_qe,
465 bfa_iocfc_enable_cb, iocfc->bfa);
466 iocfc->bfa->iocfc.cb_reqd = BFA_FALSE;
467 break;
468 default:
469 bfa_sm_fault(iocfc->bfa, event);
470 break;
471 }
472}
473
474static void
475bfa_iocfc_sm_cfg_wait_entry(struct bfa_iocfc_s *iocfc)
476{
477 bfa_iocfc_send_cfg(iocfc->bfa);
478}
479
480static void
481bfa_iocfc_sm_cfg_wait(struct bfa_iocfc_s *iocfc, enum iocfc_event event)
482{
483 bfa_trc(iocfc->bfa, event);
484
485 switch (event) {
486 case IOCFC_E_CFG_DONE:
487 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_operational);
488 if (iocfc->bfa->iocfc.cb_reqd == BFA_FALSE)
489 break;
490
491 iocfc->bfa->iocfc.op_status = BFA_STATUS_OK;
492 bfa_cb_queue(iocfc->bfa, &iocfc->bfa->iocfc.en_hcb_qe,
493 bfa_iocfc_enable_cb, iocfc->bfa);
494 iocfc->bfa->iocfc.cb_reqd = BFA_FALSE;
495 break;
496 case IOCFC_E_IOC_FAILED:
497 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_failed);
498 if (iocfc->bfa->iocfc.cb_reqd == BFA_FALSE)
499 break;
500
501 iocfc->bfa->iocfc.op_status = BFA_STATUS_FAILED;
502 bfa_cb_queue(iocfc->bfa, &iocfc->bfa->iocfc.en_hcb_qe,
503 bfa_iocfc_enable_cb, iocfc->bfa);
504 iocfc->bfa->iocfc.cb_reqd = BFA_FALSE;
505 break;
506 default:
507 bfa_sm_fault(iocfc->bfa, event);
508 break;
509 }
510}
511
512static void
513bfa_iocfc_sm_disabling_entry(struct bfa_iocfc_s *iocfc)
514{
515 bfa_ioc_disable(&iocfc->bfa->ioc);
516}
517
518static void
519bfa_iocfc_sm_disabling(struct bfa_iocfc_s *iocfc, enum iocfc_event event)
520{
521 bfa_trc(iocfc->bfa, event);
522
523 switch (event) {
524 case IOCFC_E_IOC_DISABLED:
525 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_disabled);
526 break;
527 default:
528 bfa_sm_fault(iocfc->bfa, event);
529 break;
530 }
531}
532
533static void
534bfa_iocfc_sm_disabled_entry(struct bfa_iocfc_s *iocfc)
535{
536 bfa_isr_disable(iocfc->bfa);
537 bfa_iocfc_disable_submod(iocfc->bfa);
538 iocfc->bfa->iocfc.op_status = BFA_STATUS_OK;
539 bfa_cb_queue(iocfc->bfa, &iocfc->bfa->iocfc.dis_hcb_qe,
540 bfa_iocfc_disable_cb, iocfc->bfa);
541}
542
543static void
544bfa_iocfc_sm_disabled(struct bfa_iocfc_s *iocfc, enum iocfc_event event)
545{
546 bfa_trc(iocfc->bfa, event);
547
548 switch (event) {
549 case IOCFC_E_STOP:
550 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_dconf_write);
551 break;
552 case IOCFC_E_ENABLE:
553 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_enabling);
554 break;
555 default:
556 bfa_sm_fault(iocfc->bfa, event);
557 break;
558 }
559}
560
561static void
562bfa_iocfc_sm_failed_entry(struct bfa_iocfc_s *iocfc)
563{
564 bfa_isr_disable(iocfc->bfa);
565 bfa_iocfc_disable_submod(iocfc->bfa);
566}
567
568static void
569bfa_iocfc_sm_failed(struct bfa_iocfc_s *iocfc, enum iocfc_event event)
570{
571 bfa_trc(iocfc->bfa, event);
572
573 switch (event) {
574 case IOCFC_E_STOP:
575 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_dconf_write);
576 break;
577 case IOCFC_E_DISABLE:
578 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_disabling);
579 break;
580 case IOCFC_E_IOC_ENABLED:
581 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_cfg_wait);
582 break;
583 case IOCFC_E_IOC_FAILED:
584 break;
585 default:
586 bfa_sm_fault(iocfc->bfa, event);
587 break;
588 }
589}
590
591static void
592bfa_iocfc_sm_init_failed_entry(struct bfa_iocfc_s *iocfc)
593{
594 bfa_isr_disable(iocfc->bfa);
595 iocfc->bfa->iocfc.op_status = BFA_STATUS_FAILED;
596 bfa_cb_queue(iocfc->bfa, &iocfc->bfa->iocfc.init_hcb_qe,
597 bfa_iocfc_init_cb, iocfc->bfa);
598}
599
600static void
601bfa_iocfc_sm_init_failed(struct bfa_iocfc_s *iocfc, enum iocfc_event event)
602{
603 bfa_trc(iocfc->bfa, event);
604
605 switch (event) {
606 case IOCFC_E_STOP:
607 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_stopping);
608 break;
609 case IOCFC_E_DISABLE:
610 bfa_ioc_disable(&iocfc->bfa->ioc);
611 break;
612 case IOCFC_E_IOC_ENABLED:
613 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_dconf_read);
614 break;
615 case IOCFC_E_IOC_DISABLED:
616 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_stopped);
617 iocfc->bfa->iocfc.op_status = BFA_STATUS_OK;
618 bfa_cb_queue(iocfc->bfa, &iocfc->bfa->iocfc.dis_hcb_qe,
619 bfa_iocfc_disable_cb, iocfc->bfa);
620 break;
621 case IOCFC_E_IOC_FAILED:
622 break;
623 default:
624 bfa_sm_fault(iocfc->bfa, event);
625 break;
626 }
627}
210 628
211/* 629/*
212 * BFA Interrupt handling functions 630 * BFA Interrupt handling functions
@@ -231,16 +649,19 @@ bfa_reqq_resume(struct bfa_s *bfa, int qid)
231 } 649 }
232} 650}
233 651
234static inline void 652bfa_boolean_t
235bfa_isr_rspq(struct bfa_s *bfa, int qid) 653bfa_isr_rspq(struct bfa_s *bfa, int qid)
236{ 654{
237 struct bfi_msg_s *m; 655 struct bfi_msg_s *m;
238 u32 pi, ci; 656 u32 pi, ci;
239 struct list_head *waitq; 657 struct list_head *waitq;
658 bfa_boolean_t ret;
240 659
241 ci = bfa_rspq_ci(bfa, qid); 660 ci = bfa_rspq_ci(bfa, qid);
242 pi = bfa_rspq_pi(bfa, qid); 661 pi = bfa_rspq_pi(bfa, qid);
243 662
663 ret = (ci != pi);
664
244 while (ci != pi) { 665 while (ci != pi) {
245 m = bfa_rspq_elem(bfa, qid, ci); 666 m = bfa_rspq_elem(bfa, qid, ci);
246 WARN_ON(m->mhdr.msg_class >= BFI_MC_MAX); 667 WARN_ON(m->mhdr.msg_class >= BFI_MC_MAX);
@@ -260,6 +681,8 @@ bfa_isr_rspq(struct bfa_s *bfa, int qid)
260 waitq = bfa_reqq(bfa, qid); 681 waitq = bfa_reqq(bfa, qid);
261 if (!list_empty(waitq)) 682 if (!list_empty(waitq))
262 bfa_reqq_resume(bfa, qid); 683 bfa_reqq_resume(bfa, qid);
684
685 return ret;
263} 686}
264 687
265static inline void 688static inline void
@@ -320,6 +743,7 @@ bfa_intx(struct bfa_s *bfa)
320{ 743{
321 u32 intr, qintr; 744 u32 intr, qintr;
322 int queue; 745 int queue;
746 bfa_boolean_t rspq_comp = BFA_FALSE;
323 747
324 intr = readl(bfa->iocfc.bfa_regs.intr_status); 748 intr = readl(bfa->iocfc.bfa_regs.intr_status);
325 749
@@ -332,11 +756,12 @@ bfa_intx(struct bfa_s *bfa)
332 */ 756 */
333 if (bfa->queue_process) { 757 if (bfa->queue_process) {
334 for (queue = 0; queue < BFI_IOC_MAX_CQS; queue++) 758 for (queue = 0; queue < BFI_IOC_MAX_CQS; queue++)
335 bfa_isr_rspq(bfa, queue); 759 if (bfa_isr_rspq(bfa, queue))
760 rspq_comp = BFA_TRUE;
336 } 761 }
337 762
338 if (!intr) 763 if (!intr)
339 return BFA_TRUE; 764 return (qintr | rspq_comp) ? BFA_TRUE : BFA_FALSE;
340 765
341 /* 766 /*
342 * CPE completion queue interrupt 767 * CPE completion queue interrupt
@@ -525,11 +950,9 @@ bfa_iocfc_send_cfg(void *bfa_arg)
525 * Enable interrupt coalescing if it is driver init path 950 * Enable interrupt coalescing if it is driver init path
526 * and not ioc disable/enable path. 951 * and not ioc disable/enable path.
527 */ 952 */
528 if (!iocfc->cfgdone) 953 if (bfa_fsm_cmp_state(iocfc, bfa_iocfc_sm_init_cfg_wait))
529 cfg_info->intr_attr.coalesce = BFA_TRUE; 954 cfg_info->intr_attr.coalesce = BFA_TRUE;
530 955
531 iocfc->cfgdone = BFA_FALSE;
532
533 /* 956 /*
534 * dma map IOC configuration itself 957 * dma map IOC configuration itself
535 */ 958 */
@@ -549,8 +972,6 @@ bfa_iocfc_init_mem(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
549 972
550 bfa->bfad = bfad; 973 bfa->bfad = bfad;
551 iocfc->bfa = bfa; 974 iocfc->bfa = bfa;
552 iocfc->action = BFA_IOCFC_ACT_NONE;
553
554 iocfc->cfg = *cfg; 975 iocfc->cfg = *cfg;
555 976
556 /* 977 /*
@@ -683,6 +1104,8 @@ bfa_iocfc_start_submod(struct bfa_s *bfa)
683 1104
684 for (i = 0; hal_mods[i]; i++) 1105 for (i = 0; hal_mods[i]; i++)
685 hal_mods[i]->start(bfa); 1106 hal_mods[i]->start(bfa);
1107
1108 bfa->iocfc.submod_enabled = BFA_TRUE;
686} 1109}
687 1110
688/* 1111/*
@@ -693,8 +1116,13 @@ bfa_iocfc_disable_submod(struct bfa_s *bfa)
693{ 1116{
694 int i; 1117 int i;
695 1118
1119 if (bfa->iocfc.submod_enabled == BFA_FALSE)
1120 return;
1121
696 for (i = 0; hal_mods[i]; i++) 1122 for (i = 0; hal_mods[i]; i++)
697 hal_mods[i]->iocdisable(bfa); 1123 hal_mods[i]->iocdisable(bfa);
1124
1125 bfa->iocfc.submod_enabled = BFA_FALSE;
698} 1126}
699 1127
700static void 1128static void
@@ -702,15 +1130,8 @@ bfa_iocfc_init_cb(void *bfa_arg, bfa_boolean_t complete)
702{ 1130{
703 struct bfa_s *bfa = bfa_arg; 1131 struct bfa_s *bfa = bfa_arg;
704 1132
705 if (complete) { 1133 if (complete)
706 if (bfa->iocfc.cfgdone && BFA_DCONF_MOD(bfa)->flashdone) 1134 bfa_cb_init(bfa->bfad, bfa->iocfc.op_status);
707 bfa_cb_init(bfa->bfad, BFA_STATUS_OK);
708 else
709 bfa_cb_init(bfa->bfad, BFA_STATUS_FAILED);
710 } else {
711 if (bfa->iocfc.cfgdone)
712 bfa->iocfc.action = BFA_IOCFC_ACT_NONE;
713 }
714} 1135}
715 1136
716static void 1137static void
@@ -721,8 +1142,6 @@ bfa_iocfc_stop_cb(void *bfa_arg, bfa_boolean_t compl)
721 1142
722 if (compl) 1143 if (compl)
723 complete(&bfad->comp); 1144 complete(&bfad->comp);
724 else
725 bfa->iocfc.action = BFA_IOCFC_ACT_NONE;
726} 1145}
727 1146
728static void 1147static void
@@ -794,8 +1213,6 @@ bfa_iocfc_cfgrsp(struct bfa_s *bfa)
794 fwcfg->num_uf_bufs = be16_to_cpu(fwcfg->num_uf_bufs); 1213 fwcfg->num_uf_bufs = be16_to_cpu(fwcfg->num_uf_bufs);
795 fwcfg->num_rports = be16_to_cpu(fwcfg->num_rports); 1214 fwcfg->num_rports = be16_to_cpu(fwcfg->num_rports);
796 1215
797 iocfc->cfgdone = BFA_TRUE;
798
799 /* 1216 /*
800 * configure queue register offsets as learnt from firmware 1217 * configure queue register offsets as learnt from firmware
801 */ 1218 */
@@ -811,22 +1228,13 @@ bfa_iocfc_cfgrsp(struct bfa_s *bfa)
811 */ 1228 */
812 bfa_msix_queue_install(bfa); 1229 bfa_msix_queue_install(bfa);
813 1230
814 /* 1231 if (bfa->iocfc.cfgrsp->pbc_cfg.pbc_pwwn != 0) {
815 * Configuration is complete - initialize/start submodules 1232 bfa->ioc.attr->pwwn = bfa->iocfc.cfgrsp->pbc_cfg.pbc_pwwn;
816 */ 1233 bfa->ioc.attr->nwwn = bfa->iocfc.cfgrsp->pbc_cfg.pbc_nwwn;
817 bfa_fcport_init(bfa); 1234 bfa_fsm_send_event(iocfc, IOCFC_E_CFG_DONE);
818
819 if (iocfc->action == BFA_IOCFC_ACT_INIT) {
820 if (BFA_DCONF_MOD(bfa)->flashdone == BFA_TRUE)
821 bfa_cb_queue(bfa, &iocfc->init_hcb_qe,
822 bfa_iocfc_init_cb, bfa);
823 } else {
824 if (bfa->iocfc.action == BFA_IOCFC_ACT_ENABLE)
825 bfa_cb_queue(bfa, &bfa->iocfc.en_hcb_qe,
826 bfa_iocfc_enable_cb, bfa);
827 bfa_iocfc_start_submod(bfa);
828 } 1235 }
829} 1236}
1237
830void 1238void
831bfa_iocfc_reset_queues(struct bfa_s *bfa) 1239bfa_iocfc_reset_queues(struct bfa_s *bfa)
832{ 1240{
@@ -840,6 +1248,23 @@ bfa_iocfc_reset_queues(struct bfa_s *bfa)
840 } 1248 }
841} 1249}
842 1250
1251/*
1252 * Process FAA pwwn msg from fw.
1253 */
1254static void
1255bfa_iocfc_process_faa_addr(struct bfa_s *bfa, struct bfi_faa_addr_msg_s *msg)
1256{
1257 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
1258 struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp;
1259
1260 cfgrsp->pbc_cfg.pbc_pwwn = msg->pwwn;
1261 cfgrsp->pbc_cfg.pbc_nwwn = msg->nwwn;
1262
1263 bfa->ioc.attr->pwwn = msg->pwwn;
1264 bfa->ioc.attr->nwwn = msg->nwwn;
1265 bfa_fsm_send_event(iocfc, IOCFC_E_CFG_DONE);
1266}
1267
843/* Fabric Assigned Address specific functions */ 1268/* Fabric Assigned Address specific functions */
844 1269
845/* 1270/*
@@ -855,84 +1280,13 @@ bfa_faa_validate_request(struct bfa_s *bfa)
855 if ((ioc_type != BFA_IOC_TYPE_FC) || bfa_mfg_is_mezz(card_type)) 1280 if ((ioc_type != BFA_IOC_TYPE_FC) || bfa_mfg_is_mezz(card_type))
856 return BFA_STATUS_FEATURE_NOT_SUPPORTED; 1281 return BFA_STATUS_FEATURE_NOT_SUPPORTED;
857 } else { 1282 } else {
858 if (!bfa_ioc_is_acq_addr(&bfa->ioc)) 1283 return BFA_STATUS_IOC_NON_OP;
859 return BFA_STATUS_IOC_NON_OP;
860 } 1284 }
861 1285
862 return BFA_STATUS_OK; 1286 return BFA_STATUS_OK;
863} 1287}
864 1288
865bfa_status_t 1289bfa_status_t
866bfa_faa_enable(struct bfa_s *bfa, bfa_cb_iocfc_t cbfn, void *cbarg)
867{
868 struct bfi_faa_en_dis_s faa_enable_req;
869 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
870 bfa_status_t status;
871
872 iocfc->faa_args.faa_cb.faa_cbfn = cbfn;
873 iocfc->faa_args.faa_cb.faa_cbarg = cbarg;
874
875 status = bfa_faa_validate_request(bfa);
876 if (status != BFA_STATUS_OK)
877 return status;
878
879 if (iocfc->faa_args.busy == BFA_TRUE)
880 return BFA_STATUS_DEVBUSY;
881
882 if (iocfc->faa_args.faa_state == BFA_FAA_ENABLED)
883 return BFA_STATUS_FAA_ENABLED;
884
885 if (bfa_fcport_is_trunk_enabled(bfa))
886 return BFA_STATUS_ERROR_TRUNK_ENABLED;
887
888 bfa_fcport_cfg_faa(bfa, BFA_FAA_ENABLED);
889 iocfc->faa_args.busy = BFA_TRUE;
890
891 memset(&faa_enable_req, 0, sizeof(struct bfi_faa_en_dis_s));
892 bfi_h2i_set(faa_enable_req.mh, BFI_MC_IOCFC,
893 BFI_IOCFC_H2I_FAA_ENABLE_REQ, bfa_fn_lpu(bfa));
894
895 bfa_ioc_mbox_send(&bfa->ioc, &faa_enable_req,
896 sizeof(struct bfi_faa_en_dis_s));
897
898 return BFA_STATUS_OK;
899}
900
901bfa_status_t
902bfa_faa_disable(struct bfa_s *bfa, bfa_cb_iocfc_t cbfn,
903 void *cbarg)
904{
905 struct bfi_faa_en_dis_s faa_disable_req;
906 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
907 bfa_status_t status;
908
909 iocfc->faa_args.faa_cb.faa_cbfn = cbfn;
910 iocfc->faa_args.faa_cb.faa_cbarg = cbarg;
911
912 status = bfa_faa_validate_request(bfa);
913 if (status != BFA_STATUS_OK)
914 return status;
915
916 if (iocfc->faa_args.busy == BFA_TRUE)
917 return BFA_STATUS_DEVBUSY;
918
919 if (iocfc->faa_args.faa_state == BFA_FAA_DISABLED)
920 return BFA_STATUS_FAA_DISABLED;
921
922 bfa_fcport_cfg_faa(bfa, BFA_FAA_DISABLED);
923 iocfc->faa_args.busy = BFA_TRUE;
924
925 memset(&faa_disable_req, 0, sizeof(struct bfi_faa_en_dis_s));
926 bfi_h2i_set(faa_disable_req.mh, BFI_MC_IOCFC,
927 BFI_IOCFC_H2I_FAA_DISABLE_REQ, bfa_fn_lpu(bfa));
928
929 bfa_ioc_mbox_send(&bfa->ioc, &faa_disable_req,
930 sizeof(struct bfi_faa_en_dis_s));
931
932 return BFA_STATUS_OK;
933}
934
935bfa_status_t
936bfa_faa_query(struct bfa_s *bfa, struct bfa_faa_attr_s *attr, 1290bfa_faa_query(struct bfa_s *bfa, struct bfa_faa_attr_s *attr,
937 bfa_cb_iocfc_t cbfn, void *cbarg) 1291 bfa_cb_iocfc_t cbfn, void *cbarg)
938{ 1292{
@@ -963,38 +1317,6 @@ bfa_faa_query(struct bfa_s *bfa, struct bfa_faa_attr_s *attr,
963} 1317}
964 1318
965/* 1319/*
966 * FAA enable response
967 */
968static void
969bfa_faa_enable_reply(struct bfa_iocfc_s *iocfc,
970 struct bfi_faa_en_dis_rsp_s *rsp)
971{
972 void *cbarg = iocfc->faa_args.faa_cb.faa_cbarg;
973 bfa_status_t status = rsp->status;
974
975 WARN_ON(!iocfc->faa_args.faa_cb.faa_cbfn);
976
977 iocfc->faa_args.faa_cb.faa_cbfn(cbarg, status);
978 iocfc->faa_args.busy = BFA_FALSE;
979}
980
981/*
982 * FAA disable response
983 */
984static void
985bfa_faa_disable_reply(struct bfa_iocfc_s *iocfc,
986 struct bfi_faa_en_dis_rsp_s *rsp)
987{
988 void *cbarg = iocfc->faa_args.faa_cb.faa_cbarg;
989 bfa_status_t status = rsp->status;
990
991 WARN_ON(!iocfc->faa_args.faa_cb.faa_cbfn);
992
993 iocfc->faa_args.faa_cb.faa_cbfn(cbarg, status);
994 iocfc->faa_args.busy = BFA_FALSE;
995}
996
997/*
998 * FAA query response 1320 * FAA query response
999 */ 1321 */
1000static void 1322static void
@@ -1023,25 +1345,10 @@ bfa_iocfc_enable_cbfn(void *bfa_arg, enum bfa_status status)
1023{ 1345{
1024 struct bfa_s *bfa = bfa_arg; 1346 struct bfa_s *bfa = bfa_arg;
1025 1347
1026 if (status == BFA_STATUS_FAA_ACQ_ADDR) { 1348 if (status == BFA_STATUS_OK)
1027 bfa_cb_queue(bfa, &bfa->iocfc.init_hcb_qe, 1349 bfa_fsm_send_event(&bfa->iocfc, IOCFC_E_IOC_ENABLED);
1028 bfa_iocfc_init_cb, bfa); 1350 else
1029 return; 1351 bfa_fsm_send_event(&bfa->iocfc, IOCFC_E_IOC_FAILED);
1030 }
1031
1032 if (status != BFA_STATUS_OK) {
1033 bfa_isr_disable(bfa);
1034 if (bfa->iocfc.action == BFA_IOCFC_ACT_INIT)
1035 bfa_cb_queue(bfa, &bfa->iocfc.init_hcb_qe,
1036 bfa_iocfc_init_cb, bfa);
1037 else if (bfa->iocfc.action == BFA_IOCFC_ACT_ENABLE)
1038 bfa_cb_queue(bfa, &bfa->iocfc.en_hcb_qe,
1039 bfa_iocfc_enable_cb, bfa);
1040 return;
1041 }
1042
1043 bfa_iocfc_send_cfg(bfa);
1044 bfa_dconf_modinit(bfa);
1045} 1352}
1046 1353
1047/* 1354/*
@@ -1052,17 +1359,7 @@ bfa_iocfc_disable_cbfn(void *bfa_arg)
1052{ 1359{
1053 struct bfa_s *bfa = bfa_arg; 1360 struct bfa_s *bfa = bfa_arg;
1054 1361
1055 bfa_isr_disable(bfa); 1362 bfa_fsm_send_event(&bfa->iocfc, IOCFC_E_IOC_DISABLED);
1056 bfa_iocfc_disable_submod(bfa);
1057
1058 if (bfa->iocfc.action == BFA_IOCFC_ACT_STOP)
1059 bfa_cb_queue(bfa, &bfa->iocfc.stop_hcb_qe, bfa_iocfc_stop_cb,
1060 bfa);
1061 else {
1062 WARN_ON(bfa->iocfc.action != BFA_IOCFC_ACT_DISABLE);
1063 bfa_cb_queue(bfa, &bfa->iocfc.dis_hcb_qe, bfa_iocfc_disable_cb,
1064 bfa);
1065 }
1066} 1363}
1067 1364
1068/* 1365/*
@@ -1074,13 +1371,7 @@ bfa_iocfc_hbfail_cbfn(void *bfa_arg)
1074 struct bfa_s *bfa = bfa_arg; 1371 struct bfa_s *bfa = bfa_arg;
1075 1372
1076 bfa->queue_process = BFA_FALSE; 1373 bfa->queue_process = BFA_FALSE;
1077 1374 bfa_fsm_send_event(&bfa->iocfc, IOCFC_E_IOC_FAILED);
1078 bfa_isr_disable(bfa);
1079 bfa_iocfc_disable_submod(bfa);
1080
1081 if (bfa->iocfc.action == BFA_IOCFC_ACT_INIT)
1082 bfa_cb_queue(bfa, &bfa->iocfc.init_hcb_qe, bfa_iocfc_init_cb,
1083 bfa);
1084} 1375}
1085 1376
1086/* 1377/*
@@ -1095,7 +1386,6 @@ bfa_iocfc_reset_cbfn(void *bfa_arg)
1095 bfa_isr_enable(bfa); 1386 bfa_isr_enable(bfa);
1096} 1387}
1097 1388
1098
1099/* 1389/*
1100 * Query IOC memory requirement information. 1390 * Query IOC memory requirement information.
1101 */ 1391 */
@@ -1171,6 +1461,12 @@ bfa_iocfc_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
1171 INIT_LIST_HEAD(&bfa->comp_q); 1461 INIT_LIST_HEAD(&bfa->comp_q);
1172 for (i = 0; i < BFI_IOC_MAX_CQS; i++) 1462 for (i = 0; i < BFI_IOC_MAX_CQS; i++)
1173 INIT_LIST_HEAD(&bfa->reqq_waitq[i]); 1463 INIT_LIST_HEAD(&bfa->reqq_waitq[i]);
1464
1465 bfa->iocfc.cb_reqd = BFA_FALSE;
1466 bfa->iocfc.op_status = BFA_STATUS_OK;
1467 bfa->iocfc.submod_enabled = BFA_FALSE;
1468
1469 bfa_fsm_set_state(&bfa->iocfc, bfa_iocfc_sm_stopped);
1174} 1470}
1175 1471
1176/* 1472/*
@@ -1179,8 +1475,7 @@ bfa_iocfc_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
1179void 1475void
1180bfa_iocfc_init(struct bfa_s *bfa) 1476bfa_iocfc_init(struct bfa_s *bfa)
1181{ 1477{
1182 bfa->iocfc.action = BFA_IOCFC_ACT_INIT; 1478 bfa_fsm_send_event(&bfa->iocfc, IOCFC_E_INIT);
1183 bfa_ioc_enable(&bfa->ioc);
1184} 1479}
1185 1480
1186/* 1481/*
@@ -1190,8 +1485,7 @@ bfa_iocfc_init(struct bfa_s *bfa)
1190void 1485void
1191bfa_iocfc_start(struct bfa_s *bfa) 1486bfa_iocfc_start(struct bfa_s *bfa)
1192{ 1487{
1193 if (bfa->iocfc.cfgdone) 1488 bfa_fsm_send_event(&bfa->iocfc, IOCFC_E_START);
1194 bfa_iocfc_start_submod(bfa);
1195} 1489}
1196 1490
1197/* 1491/*
@@ -1201,12 +1495,8 @@ bfa_iocfc_start(struct bfa_s *bfa)
1201void 1495void
1202bfa_iocfc_stop(struct bfa_s *bfa) 1496bfa_iocfc_stop(struct bfa_s *bfa)
1203{ 1497{
1204 bfa->iocfc.action = BFA_IOCFC_ACT_STOP;
1205
1206 bfa->queue_process = BFA_FALSE; 1498 bfa->queue_process = BFA_FALSE;
1207 bfa_dconf_modexit(bfa); 1499 bfa_fsm_send_event(&bfa->iocfc, IOCFC_E_STOP);
1208 if (BFA_DCONF_MOD(bfa)->flashdone == BFA_TRUE)
1209 bfa_ioc_disable(&bfa->ioc);
1210} 1500}
1211 1501
1212void 1502void
@@ -1226,13 +1516,9 @@ bfa_iocfc_isr(void *bfaarg, struct bfi_mbmsg_s *m)
1226 case BFI_IOCFC_I2H_UPDATEQ_RSP: 1516 case BFI_IOCFC_I2H_UPDATEQ_RSP:
1227 iocfc->updateq_cbfn(iocfc->updateq_cbarg, BFA_STATUS_OK); 1517 iocfc->updateq_cbfn(iocfc->updateq_cbarg, BFA_STATUS_OK);
1228 break; 1518 break;
1229 case BFI_IOCFC_I2H_FAA_ENABLE_RSP: 1519 case BFI_IOCFC_I2H_ADDR_MSG:
1230 bfa_faa_enable_reply(iocfc, 1520 bfa_iocfc_process_faa_addr(bfa,
1231 (struct bfi_faa_en_dis_rsp_s *)msg); 1521 (struct bfi_faa_addr_msg_s *)msg);
1232 break;
1233 case BFI_IOCFC_I2H_FAA_DISABLE_RSP:
1234 bfa_faa_disable_reply(iocfc,
1235 (struct bfi_faa_en_dis_rsp_s *)msg);
1236 break; 1522 break;
1237 case BFI_IOCFC_I2H_FAA_QUERY_RSP: 1523 case BFI_IOCFC_I2H_FAA_QUERY_RSP:
1238 bfa_faa_query_reply(iocfc, (bfi_faa_query_rsp_t *)msg); 1524 bfa_faa_query_reply(iocfc, (bfi_faa_query_rsp_t *)msg);
@@ -1306,8 +1592,8 @@ bfa_iocfc_enable(struct bfa_s *bfa)
1306{ 1592{
1307 bfa_plog_str(bfa->plog, BFA_PL_MID_HAL, BFA_PL_EID_MISC, 0, 1593 bfa_plog_str(bfa->plog, BFA_PL_MID_HAL, BFA_PL_EID_MISC, 0,
1308 "IOC Enable"); 1594 "IOC Enable");
1309 bfa->iocfc.action = BFA_IOCFC_ACT_ENABLE; 1595 bfa->iocfc.cb_reqd = BFA_TRUE;
1310 bfa_ioc_enable(&bfa->ioc); 1596 bfa_fsm_send_event(&bfa->iocfc, IOCFC_E_ENABLE);
1311} 1597}
1312 1598
1313void 1599void
@@ -1315,17 +1601,16 @@ bfa_iocfc_disable(struct bfa_s *bfa)
1315{ 1601{
1316 bfa_plog_str(bfa->plog, BFA_PL_MID_HAL, BFA_PL_EID_MISC, 0, 1602 bfa_plog_str(bfa->plog, BFA_PL_MID_HAL, BFA_PL_EID_MISC, 0,
1317 "IOC Disable"); 1603 "IOC Disable");
1318 bfa->iocfc.action = BFA_IOCFC_ACT_DISABLE;
1319 1604
1320 bfa->queue_process = BFA_FALSE; 1605 bfa->queue_process = BFA_FALSE;
1321 bfa_ioc_disable(&bfa->ioc); 1606 bfa_fsm_send_event(&bfa->iocfc, IOCFC_E_DISABLE);
1322} 1607}
1323 1608
1324
1325bfa_boolean_t 1609bfa_boolean_t
1326bfa_iocfc_is_operational(struct bfa_s *bfa) 1610bfa_iocfc_is_operational(struct bfa_s *bfa)
1327{ 1611{
1328 return bfa_ioc_is_operational(&bfa->ioc) && bfa->iocfc.cfgdone; 1612 return bfa_ioc_is_operational(&bfa->ioc) &&
1613 bfa_fsm_cmp_state(&bfa->iocfc, bfa_iocfc_sm_operational);
1329} 1614}
1330 1615
1331/* 1616/*
@@ -1567,16 +1852,6 @@ bfa_comp_free(struct bfa_s *bfa, struct list_head *comp_q)
1567 } 1852 }
1568} 1853}
1569 1854
1570void
1571bfa_iocfc_cb_dconf_modinit(struct bfa_s *bfa, bfa_status_t status)
1572{
1573 if (bfa->iocfc.action == BFA_IOCFC_ACT_INIT) {
1574 if (bfa->iocfc.cfgdone == BFA_TRUE)
1575 bfa_cb_queue(bfa, &bfa->iocfc.init_hcb_qe,
1576 bfa_iocfc_init_cb, bfa);
1577 }
1578}
1579
1580/* 1855/*
1581 * Return the list of PCI vendor/device id lists supported by this 1856 * Return the list of PCI vendor/device id lists supported by this
1582 * BFA instance. 1857 * BFA instance.
diff --git a/drivers/scsi/bfa/bfa_defs_svc.h b/drivers/scsi/bfa/bfa_defs_svc.h
index cb07c628b2f1..36756ce0e58f 100644
--- a/drivers/scsi/bfa/bfa_defs_svc.h
+++ b/drivers/scsi/bfa/bfa_defs_svc.h
@@ -52,7 +52,7 @@ struct bfa_iocfc_fwcfg_s {
52 u16 num_uf_bufs; /* unsolicited recv buffers */ 52 u16 num_uf_bufs; /* unsolicited recv buffers */
53 u8 num_cqs; 53 u8 num_cqs;
54 u8 fw_tick_res; /* FW clock resolution in ms */ 54 u8 fw_tick_res; /* FW clock resolution in ms */
55 u8 rsvd[2]; 55 u8 rsvd[6];
56}; 56};
57#pragma pack() 57#pragma pack()
58 58
diff --git a/drivers/scsi/bfa/bfa_fcs_lport.c b/drivers/scsi/bfa/bfa_fcs_lport.c
index d4f951fe753e..5d2a1307e5ce 100644
--- a/drivers/scsi/bfa/bfa_fcs_lport.c
+++ b/drivers/scsi/bfa/bfa_fcs_lport.c
@@ -5717,6 +5717,8 @@ bfa_fcs_vport_free(struct bfa_fcs_vport_s *vport)
5717 5717
5718 if (vport_drv->comp_del) 5718 if (vport_drv->comp_del)
5719 complete(vport_drv->comp_del); 5719 complete(vport_drv->comp_del);
5720 else
5721 kfree(vport_drv);
5720 5722
5721 bfa_lps_delete(vport->lps); 5723 bfa_lps_delete(vport->lps);
5722} 5724}
diff --git a/drivers/scsi/bfa/bfa_fcs_rport.c b/drivers/scsi/bfa/bfa_fcs_rport.c
index 52628d5d3c9b..fe0463a1db04 100644
--- a/drivers/scsi/bfa/bfa_fcs_rport.c
+++ b/drivers/scsi/bfa/bfa_fcs_rport.c
@@ -2169,7 +2169,10 @@ bfa_fcs_rport_update(struct bfa_fcs_rport_s *rport, struct fc_logi_s *plogi)
2169 * - MAX receive frame size 2169 * - MAX receive frame size
2170 */ 2170 */
2171 rport->cisc = plogi->csp.cisc; 2171 rport->cisc = plogi->csp.cisc;
2172 rport->maxfrsize = be16_to_cpu(plogi->class3.rxsz); 2172 if (be16_to_cpu(plogi->class3.rxsz) < be16_to_cpu(plogi->csp.rxsz))
2173 rport->maxfrsize = be16_to_cpu(plogi->class3.rxsz);
2174 else
2175 rport->maxfrsize = be16_to_cpu(plogi->csp.rxsz);
2173 2176
2174 bfa_trc(port->fcs, be16_to_cpu(plogi->csp.bbcred)); 2177 bfa_trc(port->fcs, be16_to_cpu(plogi->csp.bbcred));
2175 bfa_trc(port->fcs, port->fabric->bb_credit); 2178 bfa_trc(port->fcs, port->fabric->bb_credit);
diff --git a/drivers/scsi/bfa/bfa_ioc.c b/drivers/scsi/bfa/bfa_ioc.c
index eca7ab78085b..14e6284e48e4 100644
--- a/drivers/scsi/bfa/bfa_ioc.c
+++ b/drivers/scsi/bfa/bfa_ioc.c
@@ -88,7 +88,6 @@ static void bfa_ioc_hb_monitor(struct bfa_ioc_s *ioc);
88static void bfa_ioc_mbox_poll(struct bfa_ioc_s *ioc); 88static void bfa_ioc_mbox_poll(struct bfa_ioc_s *ioc);
89static void bfa_ioc_mbox_flush(struct bfa_ioc_s *ioc); 89static void bfa_ioc_mbox_flush(struct bfa_ioc_s *ioc);
90static void bfa_ioc_recover(struct bfa_ioc_s *ioc); 90static void bfa_ioc_recover(struct bfa_ioc_s *ioc);
91static void bfa_ioc_check_attr_wwns(struct bfa_ioc_s *ioc);
92static void bfa_ioc_event_notify(struct bfa_ioc_s *ioc , 91static void bfa_ioc_event_notify(struct bfa_ioc_s *ioc ,
93 enum bfa_ioc_event_e event); 92 enum bfa_ioc_event_e event);
94static void bfa_ioc_disable_comp(struct bfa_ioc_s *ioc); 93static void bfa_ioc_disable_comp(struct bfa_ioc_s *ioc);
@@ -97,7 +96,6 @@ static void bfa_ioc_debug_save_ftrc(struct bfa_ioc_s *ioc);
97static void bfa_ioc_fail_notify(struct bfa_ioc_s *ioc); 96static void bfa_ioc_fail_notify(struct bfa_ioc_s *ioc);
98static void bfa_ioc_pf_fwmismatch(struct bfa_ioc_s *ioc); 97static void bfa_ioc_pf_fwmismatch(struct bfa_ioc_s *ioc);
99 98
100
101/* 99/*
102 * IOC state machine definitions/declarations 100 * IOC state machine definitions/declarations
103 */ 101 */
@@ -114,7 +112,6 @@ enum ioc_event {
114 IOC_E_HWERROR = 10, /* hardware error interrupt */ 112 IOC_E_HWERROR = 10, /* hardware error interrupt */
115 IOC_E_TIMEOUT = 11, /* timeout */ 113 IOC_E_TIMEOUT = 11, /* timeout */
116 IOC_E_HWFAILED = 12, /* PCI mapping failure notice */ 114 IOC_E_HWFAILED = 12, /* PCI mapping failure notice */
117 IOC_E_FWRSP_ACQ_ADDR = 13, /* Acquiring address */
118}; 115};
119 116
120bfa_fsm_state_decl(bfa_ioc, uninit, struct bfa_ioc_s, enum ioc_event); 117bfa_fsm_state_decl(bfa_ioc, uninit, struct bfa_ioc_s, enum ioc_event);
@@ -127,7 +124,6 @@ bfa_fsm_state_decl(bfa_ioc, fail, struct bfa_ioc_s, enum ioc_event);
127bfa_fsm_state_decl(bfa_ioc, disabling, struct bfa_ioc_s, enum ioc_event); 124bfa_fsm_state_decl(bfa_ioc, disabling, struct bfa_ioc_s, enum ioc_event);
128bfa_fsm_state_decl(bfa_ioc, disabled, struct bfa_ioc_s, enum ioc_event); 125bfa_fsm_state_decl(bfa_ioc, disabled, struct bfa_ioc_s, enum ioc_event);
129bfa_fsm_state_decl(bfa_ioc, hwfail, struct bfa_ioc_s, enum ioc_event); 126bfa_fsm_state_decl(bfa_ioc, hwfail, struct bfa_ioc_s, enum ioc_event);
130bfa_fsm_state_decl(bfa_ioc, acq_addr, struct bfa_ioc_s, enum ioc_event);
131 127
132static struct bfa_sm_table_s ioc_sm_table[] = { 128static struct bfa_sm_table_s ioc_sm_table[] = {
133 {BFA_SM(bfa_ioc_sm_uninit), BFA_IOC_UNINIT}, 129 {BFA_SM(bfa_ioc_sm_uninit), BFA_IOC_UNINIT},
@@ -140,7 +136,6 @@ static struct bfa_sm_table_s ioc_sm_table[] = {
140 {BFA_SM(bfa_ioc_sm_disabling), BFA_IOC_DISABLING}, 136 {BFA_SM(bfa_ioc_sm_disabling), BFA_IOC_DISABLING},
141 {BFA_SM(bfa_ioc_sm_disabled), BFA_IOC_DISABLED}, 137 {BFA_SM(bfa_ioc_sm_disabled), BFA_IOC_DISABLED},
142 {BFA_SM(bfa_ioc_sm_hwfail), BFA_IOC_HWFAIL}, 138 {BFA_SM(bfa_ioc_sm_hwfail), BFA_IOC_HWFAIL},
143 {BFA_SM(bfa_ioc_sm_acq_addr), BFA_IOC_ACQ_ADDR},
144}; 139};
145 140
146/* 141/*
@@ -371,17 +366,9 @@ bfa_ioc_sm_getattr(struct bfa_ioc_s *ioc, enum ioc_event event)
371 switch (event) { 366 switch (event) {
372 case IOC_E_FWRSP_GETATTR: 367 case IOC_E_FWRSP_GETATTR:
373 bfa_ioc_timer_stop(ioc); 368 bfa_ioc_timer_stop(ioc);
374 bfa_ioc_check_attr_wwns(ioc);
375 bfa_ioc_hb_monitor(ioc);
376 bfa_fsm_set_state(ioc, bfa_ioc_sm_op); 369 bfa_fsm_set_state(ioc, bfa_ioc_sm_op);
377 break; 370 break;
378 371
379 case IOC_E_FWRSP_ACQ_ADDR:
380 bfa_ioc_timer_stop(ioc);
381 bfa_ioc_hb_monitor(ioc);
382 bfa_fsm_set_state(ioc, bfa_ioc_sm_acq_addr);
383 break;
384
385 case IOC_E_PFFAILED: 372 case IOC_E_PFFAILED:
386 case IOC_E_HWERROR: 373 case IOC_E_HWERROR:
387 bfa_ioc_timer_stop(ioc); 374 bfa_ioc_timer_stop(ioc);
@@ -406,51 +393,6 @@ bfa_ioc_sm_getattr(struct bfa_ioc_s *ioc, enum ioc_event event)
406 } 393 }
407} 394}
408 395
409/*
410 * Acquiring address from fabric (entry function)
411 */
412static void
413bfa_ioc_sm_acq_addr_entry(struct bfa_ioc_s *ioc)
414{
415}
416
417/*
418 * Acquiring address from the fabric
419 */
420static void
421bfa_ioc_sm_acq_addr(struct bfa_ioc_s *ioc, enum ioc_event event)
422{
423 bfa_trc(ioc, event);
424
425 switch (event) {
426 case IOC_E_FWRSP_GETATTR:
427 bfa_ioc_check_attr_wwns(ioc);
428 bfa_fsm_set_state(ioc, bfa_ioc_sm_op);
429 break;
430
431 case IOC_E_PFFAILED:
432 case IOC_E_HWERROR:
433 bfa_hb_timer_stop(ioc);
434 case IOC_E_HBFAIL:
435 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
436 bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
437 if (event != IOC_E_PFFAILED)
438 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_GETATTRFAIL);
439 break;
440
441 case IOC_E_DISABLE:
442 bfa_hb_timer_stop(ioc);
443 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
444 break;
445
446 case IOC_E_ENABLE:
447 break;
448
449 default:
450 bfa_sm_fault(ioc, event);
451 }
452}
453
454static void 396static void
455bfa_ioc_sm_op_entry(struct bfa_ioc_s *ioc) 397bfa_ioc_sm_op_entry(struct bfa_ioc_s *ioc)
456{ 398{
@@ -458,6 +400,7 @@ bfa_ioc_sm_op_entry(struct bfa_ioc_s *ioc)
458 400
459 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_OK); 401 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_OK);
460 bfa_ioc_event_notify(ioc, BFA_IOC_E_ENABLED); 402 bfa_ioc_event_notify(ioc, BFA_IOC_E_ENABLED);
403 bfa_ioc_hb_monitor(ioc);
461 BFA_LOG(KERN_INFO, bfad, bfa_log_level, "IOC enabled\n"); 404 BFA_LOG(KERN_INFO, bfad, bfa_log_level, "IOC enabled\n");
462 bfa_ioc_aen_post(ioc, BFA_IOC_AEN_ENABLE); 405 bfa_ioc_aen_post(ioc, BFA_IOC_AEN_ENABLE);
463} 406}
@@ -738,26 +681,60 @@ static void
738bfa_iocpf_sm_fwcheck_entry(struct bfa_iocpf_s *iocpf) 681bfa_iocpf_sm_fwcheck_entry(struct bfa_iocpf_s *iocpf)
739{ 682{
740 struct bfi_ioc_image_hdr_s fwhdr; 683 struct bfi_ioc_image_hdr_s fwhdr;
741 u32 fwstate = readl(iocpf->ioc->ioc_regs.ioc_fwstate); 684 u32 r32, fwstate, pgnum, pgoff, loff = 0;
685 int i;
686
687 /*
688 * Spin on init semaphore to serialize.
689 */
690 r32 = readl(iocpf->ioc->ioc_regs.ioc_init_sem_reg);
691 while (r32 & 0x1) {
692 udelay(20);
693 r32 = readl(iocpf->ioc->ioc_regs.ioc_init_sem_reg);
694 }
742 695
743 /* h/w sem init */ 696 /* h/w sem init */
744 if (fwstate == BFI_IOC_UNINIT) 697 fwstate = readl(iocpf->ioc->ioc_regs.ioc_fwstate);
698 if (fwstate == BFI_IOC_UNINIT) {
699 writel(1, iocpf->ioc->ioc_regs.ioc_init_sem_reg);
745 goto sem_get; 700 goto sem_get;
701 }
746 702
747 bfa_ioc_fwver_get(iocpf->ioc, &fwhdr); 703 bfa_ioc_fwver_get(iocpf->ioc, &fwhdr);
748 704
749 if (swab32(fwhdr.exec) == BFI_FWBOOT_TYPE_NORMAL) 705 if (swab32(fwhdr.exec) == BFI_FWBOOT_TYPE_NORMAL) {
706 writel(1, iocpf->ioc->ioc_regs.ioc_init_sem_reg);
750 goto sem_get; 707 goto sem_get;
708 }
709
710 /*
711 * Clear fwver hdr
712 */
713 pgnum = PSS_SMEM_PGNUM(iocpf->ioc->ioc_regs.smem_pg0, loff);
714 pgoff = PSS_SMEM_PGOFF(loff);
715 writel(pgnum, iocpf->ioc->ioc_regs.host_page_num_fn);
716
717 for (i = 0; i < sizeof(struct bfi_ioc_image_hdr_s) / sizeof(u32); i++) {
718 bfa_mem_write(iocpf->ioc->ioc_regs.smem_page_start, loff, 0);
719 loff += sizeof(u32);
720 }
751 721
752 bfa_trc(iocpf->ioc, fwstate); 722 bfa_trc(iocpf->ioc, fwstate);
753 bfa_trc(iocpf->ioc, fwhdr.exec); 723 bfa_trc(iocpf->ioc, swab32(fwhdr.exec));
754 writel(BFI_IOC_UNINIT, iocpf->ioc->ioc_regs.ioc_fwstate); 724 writel(BFI_IOC_UNINIT, iocpf->ioc->ioc_regs.ioc_fwstate);
725 writel(BFI_IOC_UNINIT, iocpf->ioc->ioc_regs.alt_ioc_fwstate);
755 726
756 /* 727 /*
757 * Try to lock and then unlock the semaphore. 728 * Unlock the hw semaphore. Should be here only once per boot.
758 */ 729 */
759 readl(iocpf->ioc->ioc_regs.ioc_sem_reg); 730 readl(iocpf->ioc->ioc_regs.ioc_sem_reg);
760 writel(1, iocpf->ioc->ioc_regs.ioc_sem_reg); 731 writel(1, iocpf->ioc->ioc_regs.ioc_sem_reg);
732
733 /*
734 * unlock init semaphore.
735 */
736 writel(1, iocpf->ioc->ioc_regs.ioc_init_sem_reg);
737
761sem_get: 738sem_get:
762 bfa_ioc_hw_sem_get(iocpf->ioc); 739 bfa_ioc_hw_sem_get(iocpf->ioc);
763} 740}
@@ -1707,11 +1684,6 @@ bfa_ioc_download_fw(struct bfa_ioc_s *ioc, u32 boot_type,
1707 u32 i; 1684 u32 i;
1708 u32 asicmode; 1685 u32 asicmode;
1709 1686
1710 /*
1711 * Initialize LMEM first before code download
1712 */
1713 bfa_ioc_lmem_init(ioc);
1714
1715 bfa_trc(ioc, bfa_cb_image_get_size(bfa_ioc_asic_gen(ioc))); 1687 bfa_trc(ioc, bfa_cb_image_get_size(bfa_ioc_asic_gen(ioc)));
1716 fwimg = bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), chunkno); 1688 fwimg = bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), chunkno);
1717 1689
@@ -1999,6 +1971,12 @@ bfa_ioc_pll_init(struct bfa_ioc_s *ioc)
1999 bfa_ioc_pll_init_asic(ioc); 1971 bfa_ioc_pll_init_asic(ioc);
2000 1972
2001 ioc->pllinit = BFA_TRUE; 1973 ioc->pllinit = BFA_TRUE;
1974
1975 /*
1976 * Initialize LMEM
1977 */
1978 bfa_ioc_lmem_init(ioc);
1979
2002 /* 1980 /*
2003 * release semaphore. 1981 * release semaphore.
2004 */ 1982 */
@@ -2122,10 +2100,6 @@ bfa_ioc_isr(struct bfa_ioc_s *ioc, struct bfi_mbmsg_s *m)
2122 bfa_ioc_getattr_reply(ioc); 2100 bfa_ioc_getattr_reply(ioc);
2123 break; 2101 break;
2124 2102
2125 case BFI_IOC_I2H_ACQ_ADDR_REPLY:
2126 bfa_fsm_send_event(ioc, IOC_E_FWRSP_ACQ_ADDR);
2127 break;
2128
2129 default: 2103 default:
2130 bfa_trc(ioc, msg->mh.msg_id); 2104 bfa_trc(ioc, msg->mh.msg_id);
2131 WARN_ON(1); 2105 WARN_ON(1);
@@ -2416,15 +2390,6 @@ bfa_ioc_is_disabled(struct bfa_ioc_s *ioc)
2416} 2390}
2417 2391
2418/* 2392/*
2419 * Return TRUE if IOC is in acquiring address state
2420 */
2421bfa_boolean_t
2422bfa_ioc_is_acq_addr(struct bfa_ioc_s *ioc)
2423{
2424 return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_acq_addr);
2425}
2426
2427/*
2428 * return true if IOC firmware is different. 2393 * return true if IOC firmware is different.
2429 */ 2394 */
2430bfa_boolean_t 2395bfa_boolean_t
@@ -2916,17 +2881,6 @@ bfa_ioc_recover(struct bfa_ioc_s *ioc)
2916 bfa_fsm_send_event(ioc, IOC_E_HBFAIL); 2881 bfa_fsm_send_event(ioc, IOC_E_HBFAIL);
2917} 2882}
2918 2883
2919static void
2920bfa_ioc_check_attr_wwns(struct bfa_ioc_s *ioc)
2921{
2922 if (bfa_ioc_get_type(ioc) == BFA_IOC_TYPE_LL)
2923 return;
2924 if (ioc->attr->nwwn == 0)
2925 bfa_ioc_aen_post(ioc, BFA_IOC_AEN_INVALID_NWWN);
2926 if (ioc->attr->pwwn == 0)
2927 bfa_ioc_aen_post(ioc, BFA_IOC_AEN_INVALID_PWWN);
2928}
2929
2930/* 2884/*
2931 * BFA IOC PF private functions 2885 * BFA IOC PF private functions
2932 */ 2886 */
@@ -4495,7 +4449,7 @@ bfa_flash_read_part(struct bfa_flash_s *flash, enum bfa_flash_part_type type,
4495 */ 4449 */
4496 4450
4497#define BFA_DIAG_MEMTEST_TOV 50000 /* memtest timeout in msec */ 4451#define BFA_DIAG_MEMTEST_TOV 50000 /* memtest timeout in msec */
4498#define BFA_DIAG_FWPING_TOV 1000 /* msec */ 4452#define CT2_BFA_DIAG_MEMTEST_TOV (9*30*1000) /* 4.5 min */
4499 4453
4500/* IOC event handler */ 4454/* IOC event handler */
4501static void 4455static void
@@ -4772,7 +4726,7 @@ diag_ledtest_send(struct bfa_diag_s *diag, struct bfa_diag_ledtest_s *ledtest)
4772} 4726}
4773 4727
4774static void 4728static void
4775diag_ledtest_comp(struct bfa_diag_s *diag, struct bfi_diag_ledtest_rsp_s * msg) 4729diag_ledtest_comp(struct bfa_diag_s *diag, struct bfi_diag_ledtest_rsp_s *msg)
4776{ 4730{
4777 bfa_trc(diag, diag->ledtest.lock); 4731 bfa_trc(diag, diag->ledtest.lock);
4778 diag->ledtest.lock = BFA_FALSE; 4732 diag->ledtest.lock = BFA_FALSE;
@@ -4850,6 +4804,8 @@ bfa_diag_memtest(struct bfa_diag_s *diag, struct bfa_diag_memtest_s *memtest,
4850 u32 pattern, struct bfa_diag_memtest_result *result, 4804 u32 pattern, struct bfa_diag_memtest_result *result,
4851 bfa_cb_diag_t cbfn, void *cbarg) 4805 bfa_cb_diag_t cbfn, void *cbarg)
4852{ 4806{
4807 u32 memtest_tov;
4808
4853 bfa_trc(diag, pattern); 4809 bfa_trc(diag, pattern);
4854 4810
4855 if (!bfa_ioc_adapter_is_disabled(diag->ioc)) 4811 if (!bfa_ioc_adapter_is_disabled(diag->ioc))
@@ -4869,8 +4825,10 @@ bfa_diag_memtest(struct bfa_diag_s *diag, struct bfa_diag_memtest_s *memtest,
4869 /* download memtest code and take LPU0 out of reset */ 4825 /* download memtest code and take LPU0 out of reset */
4870 bfa_ioc_boot(diag->ioc, BFI_FWBOOT_TYPE_MEMTEST, BFI_FWBOOT_ENV_OS); 4826 bfa_ioc_boot(diag->ioc, BFI_FWBOOT_TYPE_MEMTEST, BFI_FWBOOT_ENV_OS);
4871 4827
4828 memtest_tov = (bfa_ioc_asic_gen(diag->ioc) == BFI_ASIC_GEN_CT2) ?
4829 CT2_BFA_DIAG_MEMTEST_TOV : BFA_DIAG_MEMTEST_TOV;
4872 bfa_timer_begin(diag->ioc->timer_mod, &diag->timer, 4830 bfa_timer_begin(diag->ioc->timer_mod, &diag->timer,
4873 bfa_diag_memtest_done, diag, BFA_DIAG_MEMTEST_TOV); 4831 bfa_diag_memtest_done, diag, memtest_tov);
4874 diag->timer_active = 1; 4832 diag->timer_active = 1;
4875 return BFA_STATUS_OK; 4833 return BFA_STATUS_OK;
4876} 4834}
@@ -5641,24 +5599,27 @@ bfa_dconf_sm_uninit(struct bfa_dconf_mod_s *dconf, enum bfa_dconf_event event)
5641 case BFA_DCONF_SM_INIT: 5599 case BFA_DCONF_SM_INIT:
5642 if (dconf->min_cfg) { 5600 if (dconf->min_cfg) {
5643 bfa_trc(dconf->bfa, dconf->min_cfg); 5601 bfa_trc(dconf->bfa, dconf->min_cfg);
5602 bfa_fsm_send_event(&dconf->bfa->iocfc,
5603 IOCFC_E_DCONF_DONE);
5644 return; 5604 return;
5645 } 5605 }
5646 bfa_sm_set_state(dconf, bfa_dconf_sm_flash_read); 5606 bfa_sm_set_state(dconf, bfa_dconf_sm_flash_read);
5647 dconf->flashdone = BFA_FALSE; 5607 bfa_timer_start(dconf->bfa, &dconf->timer,
5648 bfa_trc(dconf->bfa, dconf->flashdone); 5608 bfa_dconf_timer, dconf, BFA_DCONF_UPDATE_TOV);
5649 bfa_status = bfa_flash_read_part(BFA_FLASH(dconf->bfa), 5609 bfa_status = bfa_flash_read_part(BFA_FLASH(dconf->bfa),
5650 BFA_FLASH_PART_DRV, dconf->instance, 5610 BFA_FLASH_PART_DRV, dconf->instance,
5651 dconf->dconf, 5611 dconf->dconf,
5652 sizeof(struct bfa_dconf_s), 0, 5612 sizeof(struct bfa_dconf_s), 0,
5653 bfa_dconf_init_cb, dconf->bfa); 5613 bfa_dconf_init_cb, dconf->bfa);
5654 if (bfa_status != BFA_STATUS_OK) { 5614 if (bfa_status != BFA_STATUS_OK) {
5615 bfa_timer_stop(&dconf->timer);
5655 bfa_dconf_init_cb(dconf->bfa, BFA_STATUS_FAILED); 5616 bfa_dconf_init_cb(dconf->bfa, BFA_STATUS_FAILED);
5656 bfa_sm_set_state(dconf, bfa_dconf_sm_uninit); 5617 bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
5657 return; 5618 return;
5658 } 5619 }
5659 break; 5620 break;
5660 case BFA_DCONF_SM_EXIT: 5621 case BFA_DCONF_SM_EXIT:
5661 dconf->flashdone = BFA_TRUE; 5622 bfa_fsm_send_event(&dconf->bfa->iocfc, IOCFC_E_DCONF_DONE);
5662 case BFA_DCONF_SM_IOCDISABLE: 5623 case BFA_DCONF_SM_IOCDISABLE:
5663 case BFA_DCONF_SM_WR: 5624 case BFA_DCONF_SM_WR:
5664 case BFA_DCONF_SM_FLASH_COMP: 5625 case BFA_DCONF_SM_FLASH_COMP:
@@ -5679,15 +5640,20 @@ bfa_dconf_sm_flash_read(struct bfa_dconf_mod_s *dconf,
5679 5640
5680 switch (event) { 5641 switch (event) {
5681 case BFA_DCONF_SM_FLASH_COMP: 5642 case BFA_DCONF_SM_FLASH_COMP:
5643 bfa_timer_stop(&dconf->timer);
5682 bfa_sm_set_state(dconf, bfa_dconf_sm_ready); 5644 bfa_sm_set_state(dconf, bfa_dconf_sm_ready);
5683 break; 5645 break;
5684 case BFA_DCONF_SM_TIMEOUT: 5646 case BFA_DCONF_SM_TIMEOUT:
5685 bfa_sm_set_state(dconf, bfa_dconf_sm_ready); 5647 bfa_sm_set_state(dconf, bfa_dconf_sm_ready);
5648 bfa_fsm_send_event(&dconf->bfa->iocfc, IOCFC_E_IOC_FAILED);
5686 break; 5649 break;
5687 case BFA_DCONF_SM_EXIT: 5650 case BFA_DCONF_SM_EXIT:
5688 dconf->flashdone = BFA_TRUE; 5651 bfa_timer_stop(&dconf->timer);
5689 bfa_trc(dconf->bfa, dconf->flashdone); 5652 bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
5653 bfa_fsm_send_event(&dconf->bfa->iocfc, IOCFC_E_DCONF_DONE);
5654 break;
5690 case BFA_DCONF_SM_IOCDISABLE: 5655 case BFA_DCONF_SM_IOCDISABLE:
5656 bfa_timer_stop(&dconf->timer);
5691 bfa_sm_set_state(dconf, bfa_dconf_sm_uninit); 5657 bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
5692 break; 5658 break;
5693 default: 5659 default:
@@ -5710,9 +5676,8 @@ bfa_dconf_sm_ready(struct bfa_dconf_mod_s *dconf, enum bfa_dconf_event event)
5710 bfa_sm_set_state(dconf, bfa_dconf_sm_dirty); 5676 bfa_sm_set_state(dconf, bfa_dconf_sm_dirty);
5711 break; 5677 break;
5712 case BFA_DCONF_SM_EXIT: 5678 case BFA_DCONF_SM_EXIT:
5713 dconf->flashdone = BFA_TRUE;
5714 bfa_trc(dconf->bfa, dconf->flashdone);
5715 bfa_sm_set_state(dconf, bfa_dconf_sm_uninit); 5679 bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
5680 bfa_fsm_send_event(&dconf->bfa->iocfc, IOCFC_E_DCONF_DONE);
5716 break; 5681 break;
5717 case BFA_DCONF_SM_INIT: 5682 case BFA_DCONF_SM_INIT:
5718 case BFA_DCONF_SM_IOCDISABLE: 5683 case BFA_DCONF_SM_IOCDISABLE:
@@ -5774,9 +5739,7 @@ bfa_dconf_sm_final_sync(struct bfa_dconf_mod_s *dconf,
5774 bfa_timer_stop(&dconf->timer); 5739 bfa_timer_stop(&dconf->timer);
5775 case BFA_DCONF_SM_TIMEOUT: 5740 case BFA_DCONF_SM_TIMEOUT:
5776 bfa_sm_set_state(dconf, bfa_dconf_sm_uninit); 5741 bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
5777 dconf->flashdone = BFA_TRUE; 5742 bfa_fsm_send_event(&dconf->bfa->iocfc, IOCFC_E_DCONF_DONE);
5778 bfa_trc(dconf->bfa, dconf->flashdone);
5779 bfa_ioc_disable(&dconf->bfa->ioc);
5780 break; 5743 break;
5781 default: 5744 default:
5782 bfa_sm_fault(dconf->bfa, event); 5745 bfa_sm_fault(dconf->bfa, event);
@@ -5823,8 +5786,8 @@ bfa_dconf_sm_iocdown_dirty(struct bfa_dconf_mod_s *dconf,
5823 bfa_sm_set_state(dconf, bfa_dconf_sm_dirty); 5786 bfa_sm_set_state(dconf, bfa_dconf_sm_dirty);
5824 break; 5787 break;
5825 case BFA_DCONF_SM_EXIT: 5788 case BFA_DCONF_SM_EXIT:
5826 dconf->flashdone = BFA_TRUE;
5827 bfa_sm_set_state(dconf, bfa_dconf_sm_uninit); 5789 bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
5790 bfa_fsm_send_event(&dconf->bfa->iocfc, IOCFC_E_DCONF_DONE);
5828 break; 5791 break;
5829 case BFA_DCONF_SM_IOCDISABLE: 5792 case BFA_DCONF_SM_IOCDISABLE:
5830 break; 5793 break;
@@ -5865,11 +5828,6 @@ bfa_dconf_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
5865 if (cfg->drvcfg.min_cfg) { 5828 if (cfg->drvcfg.min_cfg) {
5866 bfa_mem_kva_curp(dconf) += sizeof(struct bfa_dconf_hdr_s); 5829 bfa_mem_kva_curp(dconf) += sizeof(struct bfa_dconf_hdr_s);
5867 dconf->min_cfg = BFA_TRUE; 5830 dconf->min_cfg = BFA_TRUE;
5868 /*
5869 * Set the flashdone flag to TRUE explicitly as no flash
5870 * write will happen in min_cfg mode.
5871 */
5872 dconf->flashdone = BFA_TRUE;
5873 } else { 5831 } else {
5874 dconf->min_cfg = BFA_FALSE; 5832 dconf->min_cfg = BFA_FALSE;
5875 bfa_mem_kva_curp(dconf) += sizeof(struct bfa_dconf_s); 5833 bfa_mem_kva_curp(dconf) += sizeof(struct bfa_dconf_s);
@@ -5885,9 +5843,7 @@ bfa_dconf_init_cb(void *arg, bfa_status_t status)
5885 struct bfa_s *bfa = arg; 5843 struct bfa_s *bfa = arg;
5886 struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa); 5844 struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa);
5887 5845
5888 dconf->flashdone = BFA_TRUE; 5846 bfa_sm_send_event(dconf, BFA_DCONF_SM_FLASH_COMP);
5889 bfa_trc(bfa, dconf->flashdone);
5890 bfa_iocfc_cb_dconf_modinit(bfa, status);
5891 if (status == BFA_STATUS_OK) { 5847 if (status == BFA_STATUS_OK) {
5892 bfa_dconf_read_data_valid(bfa) = BFA_TRUE; 5848 bfa_dconf_read_data_valid(bfa) = BFA_TRUE;
5893 if (dconf->dconf->hdr.signature != BFI_DCONF_SIGNATURE) 5849 if (dconf->dconf->hdr.signature != BFI_DCONF_SIGNATURE)
@@ -5895,7 +5851,7 @@ bfa_dconf_init_cb(void *arg, bfa_status_t status)
5895 if (dconf->dconf->hdr.version != BFI_DCONF_VERSION) 5851 if (dconf->dconf->hdr.version != BFI_DCONF_VERSION)
5896 dconf->dconf->hdr.version = BFI_DCONF_VERSION; 5852 dconf->dconf->hdr.version = BFI_DCONF_VERSION;
5897 } 5853 }
5898 bfa_sm_send_event(dconf, BFA_DCONF_SM_FLASH_COMP); 5854 bfa_fsm_send_event(&bfa->iocfc, IOCFC_E_DCONF_DONE);
5899} 5855}
5900 5856
5901void 5857void
@@ -5977,7 +5933,5 @@ void
5977bfa_dconf_modexit(struct bfa_s *bfa) 5933bfa_dconf_modexit(struct bfa_s *bfa)
5978{ 5934{
5979 struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa); 5935 struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa);
5980 BFA_DCONF_MOD(bfa)->flashdone = BFA_FALSE;
5981 bfa_trc(bfa, BFA_DCONF_MOD(bfa)->flashdone);
5982 bfa_sm_send_event(dconf, BFA_DCONF_SM_EXIT); 5936 bfa_sm_send_event(dconf, BFA_DCONF_SM_EXIT);
5983} 5937}
diff --git a/drivers/scsi/bfa/bfa_ioc.h b/drivers/scsi/bfa/bfa_ioc.h
index 546d46b37101..1a99d4b5b50f 100644
--- a/drivers/scsi/bfa/bfa_ioc.h
+++ b/drivers/scsi/bfa/bfa_ioc.h
@@ -373,6 +373,22 @@ struct bfa_cb_qe_s {
373}; 373};
374 374
375/* 375/*
376 * IOCFC state machine definitions/declarations
377 */
378enum iocfc_event {
379 IOCFC_E_INIT = 1, /* IOCFC init request */
380 IOCFC_E_START = 2, /* IOCFC mod start request */
381 IOCFC_E_STOP = 3, /* IOCFC stop request */
382 IOCFC_E_ENABLE = 4, /* IOCFC enable request */
383 IOCFC_E_DISABLE = 5, /* IOCFC disable request */
384 IOCFC_E_IOC_ENABLED = 6, /* IOC enabled message */
385 IOCFC_E_IOC_DISABLED = 7, /* IOC disabled message */
386 IOCFC_E_IOC_FAILED = 8, /* failure notice by IOC sm */
387 IOCFC_E_DCONF_DONE = 9, /* dconf read/write done */
388 IOCFC_E_CFG_DONE = 10, /* IOCFC config complete */
389};
390
391/*
376 * ASIC block configurtion related 392 * ASIC block configurtion related
377 */ 393 */
378 394
@@ -706,7 +722,6 @@ struct bfa_dconf_s {
706struct bfa_dconf_mod_s { 722struct bfa_dconf_mod_s {
707 bfa_sm_t sm; 723 bfa_sm_t sm;
708 u8 instance; 724 u8 instance;
709 bfa_boolean_t flashdone;
710 bfa_boolean_t read_data_valid; 725 bfa_boolean_t read_data_valid;
711 bfa_boolean_t min_cfg; 726 bfa_boolean_t min_cfg;
712 struct bfa_timer_s timer; 727 struct bfa_timer_s timer;
diff --git a/drivers/scsi/bfa/bfa_ioc_ct.c b/drivers/scsi/bfa/bfa_ioc_ct.c
index d1b8f0caaa79..2eb0c6a2938d 100644
--- a/drivers/scsi/bfa/bfa_ioc_ct.c
+++ b/drivers/scsi/bfa/bfa_ioc_ct.c
@@ -786,17 +786,73 @@ bfa_ioc_ct2_mac_reset(void __iomem *rb)
786} 786}
787 787
788#define CT2_NFC_MAX_DELAY 1000 788#define CT2_NFC_MAX_DELAY 1000
789#define CT2_NFC_VER_VALID 0x143
790#define BFA_IOC_PLL_POLL 1000000
791
792static bfa_boolean_t
793bfa_ioc_ct2_nfc_halted(void __iomem *rb)
794{
795 u32 r32;
796
797 r32 = readl(rb + CT2_NFC_CSR_SET_REG);
798 if (r32 & __NFC_CONTROLLER_HALTED)
799 return BFA_TRUE;
800
801 return BFA_FALSE;
802}
803
804static void
805bfa_ioc_ct2_nfc_resume(void __iomem *rb)
806{
807 u32 r32;
808 int i;
809
810 writel(__HALT_NFC_CONTROLLER, rb + CT2_NFC_CSR_CLR_REG);
811 for (i = 0; i < CT2_NFC_MAX_DELAY; i++) {
812 r32 = readl(rb + CT2_NFC_CSR_SET_REG);
813 if (!(r32 & __NFC_CONTROLLER_HALTED))
814 return;
815 udelay(1000);
816 }
817 WARN_ON(1);
818}
819
789bfa_status_t 820bfa_status_t
790bfa_ioc_ct2_pll_init(void __iomem *rb, enum bfi_asic_mode mode) 821bfa_ioc_ct2_pll_init(void __iomem *rb, enum bfi_asic_mode mode)
791{ 822{
792 u32 wgn, r32; 823 u32 wgn, r32, nfc_ver, i;
793 int i;
794 824
795 /*
796 * Initialize PLL if not already done by NFC
797 */
798 wgn = readl(rb + CT2_WGN_STATUS); 825 wgn = readl(rb + CT2_WGN_STATUS);
799 if (!(wgn & __GLBL_PF_VF_CFG_RDY)) { 826 nfc_ver = readl(rb + CT2_RSC_GPR15_REG);
827
828 if ((wgn == (__A2T_AHB_LOAD | __WGN_READY)) &&
829 (nfc_ver >= CT2_NFC_VER_VALID)) {
830 if (bfa_ioc_ct2_nfc_halted(rb))
831 bfa_ioc_ct2_nfc_resume(rb);
832
833 writel(__RESET_AND_START_SCLK_LCLK_PLLS,
834 rb + CT2_CSI_FW_CTL_SET_REG);
835
836 for (i = 0; i < BFA_IOC_PLL_POLL; i++) {
837 r32 = readl(rb + CT2_APP_PLL_LCLK_CTL_REG);
838 if (r32 & __RESET_AND_START_SCLK_LCLK_PLLS)
839 break;
840 }
841
842 WARN_ON(!(r32 & __RESET_AND_START_SCLK_LCLK_PLLS));
843
844 for (i = 0; i < BFA_IOC_PLL_POLL; i++) {
845 r32 = readl(rb + CT2_APP_PLL_LCLK_CTL_REG);
846 if (!(r32 & __RESET_AND_START_SCLK_LCLK_PLLS))
847 break;
848 }
849
850 WARN_ON(r32 & __RESET_AND_START_SCLK_LCLK_PLLS);
851 udelay(1000);
852
853 r32 = readl(rb + CT2_CSI_FW_CTL_REG);
854 WARN_ON(r32 & __RESET_AND_START_SCLK_LCLK_PLLS);
855 } else {
800 writel(__HALT_NFC_CONTROLLER, rb + CT2_NFC_CSR_SET_REG); 856 writel(__HALT_NFC_CONTROLLER, rb + CT2_NFC_CSR_SET_REG);
801 for (i = 0; i < CT2_NFC_MAX_DELAY; i++) { 857 for (i = 0; i < CT2_NFC_MAX_DELAY; i++) {
802 r32 = readl(rb + CT2_NFC_CSR_SET_REG); 858 r32 = readl(rb + CT2_NFC_CSR_SET_REG);
@@ -804,57 +860,62 @@ bfa_ioc_ct2_pll_init(void __iomem *rb, enum bfi_asic_mode mode)
804 break; 860 break;
805 udelay(1000); 861 udelay(1000);
806 } 862 }
807 }
808 863
809 /* 864 bfa_ioc_ct2_mac_reset(rb);
810 * Mask the interrupts and clear any 865 bfa_ioc_ct2_sclk_init(rb);
811 * pending interrupts. 866 bfa_ioc_ct2_lclk_init(rb);
812 */ 867
813 writel(1, (rb + CT2_LPU0_HOSTFN_MBOX0_MSK)); 868 /*
814 writel(1, (rb + CT2_LPU1_HOSTFN_MBOX0_MSK)); 869 * release soft reset on s_clk & l_clk
815 870 */
816 r32 = readl((rb + CT2_LPU0_HOSTFN_CMD_STAT)); 871 r32 = readl(rb + CT2_APP_PLL_SCLK_CTL_REG);
817 if (r32 == 1) { 872 writel(r32 & ~__APP_PLL_SCLK_LOGIC_SOFT_RESET,
818 writel(1, (rb + CT2_LPU0_HOSTFN_CMD_STAT)); 873 (rb + CT2_APP_PLL_SCLK_CTL_REG));
819 readl((rb + CT2_LPU0_HOSTFN_CMD_STAT)); 874
875 /*
876 * release soft reset on s_clk & l_clk
877 */
878 r32 = readl(rb + CT2_APP_PLL_LCLK_CTL_REG);
879 writel(r32 & ~__APP_PLL_LCLK_LOGIC_SOFT_RESET,
880 (rb + CT2_APP_PLL_LCLK_CTL_REG));
820 } 881 }
821 r32 = readl((rb + CT2_LPU1_HOSTFN_CMD_STAT));
822 if (r32 == 1) {
823 writel(1, (rb + CT2_LPU1_HOSTFN_CMD_STAT));
824 readl((rb + CT2_LPU1_HOSTFN_CMD_STAT));
825 }
826
827 bfa_ioc_ct2_mac_reset(rb);
828 bfa_ioc_ct2_sclk_init(rb);
829 bfa_ioc_ct2_lclk_init(rb);
830
831 /*
832 * release soft reset on s_clk & l_clk
833 */
834 r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG));
835 writel(r32 & ~__APP_PLL_SCLK_LOGIC_SOFT_RESET,
836 (rb + CT2_APP_PLL_SCLK_CTL_REG));
837
838 /*
839 * release soft reset on s_clk & l_clk
840 */
841 r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG));
842 writel(r32 & ~__APP_PLL_LCLK_LOGIC_SOFT_RESET,
843 (rb + CT2_APP_PLL_LCLK_CTL_REG));
844 882
845 /* 883 /*
846 * Announce flash device presence, if flash was corrupted. 884 * Announce flash device presence, if flash was corrupted.
847 */ 885 */
848 if (wgn == (__WGN_READY | __GLBL_PF_VF_CFG_RDY)) { 886 if (wgn == (__WGN_READY | __GLBL_PF_VF_CFG_RDY)) {
849 r32 = readl((rb + PSS_GPIO_OUT_REG)); 887 r32 = readl(rb + PSS_GPIO_OUT_REG);
850 writel(r32 & ~1, (rb + PSS_GPIO_OUT_REG)); 888 writel(r32 & ~1, (rb + PSS_GPIO_OUT_REG));
851 r32 = readl((rb + PSS_GPIO_OE_REG)); 889 r32 = readl(rb + PSS_GPIO_OE_REG);
852 writel(r32 | 1, (rb + PSS_GPIO_OE_REG)); 890 writel(r32 | 1, (rb + PSS_GPIO_OE_REG));
853 } 891 }
854 892
893 /*
894 * Mask the interrupts and clear any
895 * pending interrupts.
896 */
897 writel(1, (rb + CT2_LPU0_HOSTFN_MBOX0_MSK));
898 writel(1, (rb + CT2_LPU1_HOSTFN_MBOX0_MSK));
899
900 /* For first time initialization, no need to clear interrupts */
901 r32 = readl(rb + HOST_SEM5_REG);
902 if (r32 & 0x1) {
903 r32 = readl(rb + CT2_LPU0_HOSTFN_CMD_STAT);
904 if (r32 == 1) {
905 writel(1, rb + CT2_LPU0_HOSTFN_CMD_STAT);
906 readl((rb + CT2_LPU0_HOSTFN_CMD_STAT));
907 }
908 r32 = readl(rb + CT2_LPU1_HOSTFN_CMD_STAT);
909 if (r32 == 1) {
910 writel(1, rb + CT2_LPU1_HOSTFN_CMD_STAT);
911 readl(rb + CT2_LPU1_HOSTFN_CMD_STAT);
912 }
913 }
914
855 bfa_ioc_ct2_mem_init(rb); 915 bfa_ioc_ct2_mem_init(rb);
856 916
857 writel(BFI_IOC_UNINIT, (rb + CT2_BFA_IOC0_STATE_REG)); 917 writel(BFI_IOC_UNINIT, rb + CT2_BFA_IOC0_STATE_REG);
858 writel(BFI_IOC_UNINIT, (rb + CT2_BFA_IOC1_STATE_REG)); 918 writel(BFI_IOC_UNINIT, rb + CT2_BFA_IOC1_STATE_REG);
919
859 return BFA_STATUS_OK; 920 return BFA_STATUS_OK;
860} 921}
diff --git a/drivers/scsi/bfa/bfa_svc.c b/drivers/scsi/bfa/bfa_svc.c
index aa8a0eaf91f9..2e856e6710f7 100644
--- a/drivers/scsi/bfa/bfa_svc.c
+++ b/drivers/scsi/bfa/bfa_svc.c
@@ -1280,6 +1280,7 @@ bfa_lps_sm_loginwait(struct bfa_lps_s *lps, enum bfa_lps_event event)
1280 switch (event) { 1280 switch (event) {
1281 case BFA_LPS_SM_RESUME: 1281 case BFA_LPS_SM_RESUME:
1282 bfa_sm_set_state(lps, bfa_lps_sm_login); 1282 bfa_sm_set_state(lps, bfa_lps_sm_login);
1283 bfa_lps_send_login(lps);
1283 break; 1284 break;
1284 1285
1285 case BFA_LPS_SM_OFFLINE: 1286 case BFA_LPS_SM_OFFLINE:
@@ -1578,7 +1579,7 @@ bfa_lps_login_rsp(struct bfa_s *bfa, struct bfi_lps_login_rsp_s *rsp)
1578 break; 1579 break;
1579 1580
1580 case BFA_STATUS_VPORT_MAX: 1581 case BFA_STATUS_VPORT_MAX:
1581 if (!rsp->ext_status) 1582 if (rsp->ext_status)
1582 bfa_lps_no_res(lps, rsp->ext_status); 1583 bfa_lps_no_res(lps, rsp->ext_status);
1583 break; 1584 break;
1584 1585
@@ -3084,33 +3085,6 @@ bfa_fcport_set_wwns(struct bfa_fcport_s *fcport)
3084} 3085}
3085 3086
3086static void 3087static void
3087bfa_fcport_send_txcredit(void *port_cbarg)
3088{
3089
3090 struct bfa_fcport_s *fcport = port_cbarg;
3091 struct bfi_fcport_set_svc_params_req_s *m;
3092
3093 /*
3094 * check for room in queue to send request now
3095 */
3096 m = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
3097 if (!m) {
3098 bfa_trc(fcport->bfa, fcport->cfg.tx_bbcredit);
3099 return;
3100 }
3101
3102 bfi_h2i_set(m->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_SET_SVC_PARAMS_REQ,
3103 bfa_fn_lpu(fcport->bfa));
3104 m->tx_bbcredit = cpu_to_be16((u16)fcport->cfg.tx_bbcredit);
3105 m->bb_scn = fcport->cfg.bb_scn;
3106
3107 /*
3108 * queue I/O message to firmware
3109 */
3110 bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT, m->mh);
3111}
3112
3113static void
3114bfa_fcport_qos_stats_swap(struct bfa_qos_stats_s *d, 3088bfa_fcport_qos_stats_swap(struct bfa_qos_stats_s *d,
3115 struct bfa_qos_stats_s *s) 3089 struct bfa_qos_stats_s *s)
3116{ 3090{
@@ -3602,26 +3576,24 @@ bfa_fcport_cfg_speed(struct bfa_s *bfa, enum bfa_port_speed speed)
3602 return BFA_STATUS_UNSUPP_SPEED; 3576 return BFA_STATUS_UNSUPP_SPEED;
3603 } 3577 }
3604 3578
3605 /* For Mezz card, port speed entered needs to be checked */ 3579 /* Port speed entered needs to be checked */
3606 if (bfa_mfg_is_mezz(fcport->bfa->ioc.attr->card_type)) { 3580 if (bfa_ioc_get_type(&fcport->bfa->ioc) == BFA_IOC_TYPE_FC) {
3607 if (bfa_ioc_get_type(&fcport->bfa->ioc) == BFA_IOC_TYPE_FC) { 3581 /* For CT2, 1G is not supported */
3608 /* For CT2, 1G is not supported */ 3582 if ((speed == BFA_PORT_SPEED_1GBPS) &&
3609 if ((speed == BFA_PORT_SPEED_1GBPS) && 3583 (bfa_asic_id_ct2(bfa->ioc.pcidev.device_id)))
3610 (bfa_asic_id_ct2(bfa->ioc.pcidev.device_id))) 3584 return BFA_STATUS_UNSUPP_SPEED;
3611 return BFA_STATUS_UNSUPP_SPEED;
3612 3585
3613 /* Already checked for Auto Speed and Max Speed supp */ 3586 /* Already checked for Auto Speed and Max Speed supp */
3614 if (!(speed == BFA_PORT_SPEED_1GBPS || 3587 if (!(speed == BFA_PORT_SPEED_1GBPS ||
3615 speed == BFA_PORT_SPEED_2GBPS || 3588 speed == BFA_PORT_SPEED_2GBPS ||
3616 speed == BFA_PORT_SPEED_4GBPS || 3589 speed == BFA_PORT_SPEED_4GBPS ||
3617 speed == BFA_PORT_SPEED_8GBPS || 3590 speed == BFA_PORT_SPEED_8GBPS ||
3618 speed == BFA_PORT_SPEED_16GBPS || 3591 speed == BFA_PORT_SPEED_16GBPS ||
3619 speed == BFA_PORT_SPEED_AUTO)) 3592 speed == BFA_PORT_SPEED_AUTO))
3620 return BFA_STATUS_UNSUPP_SPEED; 3593 return BFA_STATUS_UNSUPP_SPEED;
3621 } else { 3594 } else {
3622 if (speed != BFA_PORT_SPEED_10GBPS) 3595 if (speed != BFA_PORT_SPEED_10GBPS)
3623 return BFA_STATUS_UNSUPP_SPEED; 3596 return BFA_STATUS_UNSUPP_SPEED;
3624 }
3625 } 3597 }
3626 3598
3627 fcport->cfg.speed = speed; 3599 fcport->cfg.speed = speed;
@@ -3765,7 +3737,6 @@ bfa_fcport_set_tx_bbcredit(struct bfa_s *bfa, u16 tx_bbcredit, u8 bb_scn)
3765 fcport->cfg.bb_scn = bb_scn; 3737 fcport->cfg.bb_scn = bb_scn;
3766 if (bb_scn) 3738 if (bb_scn)
3767 fcport->bbsc_op_state = BFA_TRUE; 3739 fcport->bbsc_op_state = BFA_TRUE;
3768 bfa_fcport_send_txcredit(fcport);
3769} 3740}
3770 3741
3771/* 3742/*
@@ -3825,8 +3796,6 @@ bfa_fcport_get_attr(struct bfa_s *bfa, struct bfa_port_attr_s *attr)
3825 attr->port_state = BFA_PORT_ST_IOCDIS; 3796 attr->port_state = BFA_PORT_ST_IOCDIS;
3826 else if (bfa_ioc_fw_mismatch(&fcport->bfa->ioc)) 3797 else if (bfa_ioc_fw_mismatch(&fcport->bfa->ioc))
3827 attr->port_state = BFA_PORT_ST_FWMISMATCH; 3798 attr->port_state = BFA_PORT_ST_FWMISMATCH;
3828 else if (bfa_ioc_is_acq_addr(&fcport->bfa->ioc))
3829 attr->port_state = BFA_PORT_ST_ACQ_ADDR;
3830 } 3799 }
3831 3800
3832 /* FCoE vlan */ 3801 /* FCoE vlan */
diff --git a/drivers/scsi/bfa/bfa_svc.h b/drivers/scsi/bfa/bfa_svc.h
index b52cbb6bcd5a..f30067564639 100644
--- a/drivers/scsi/bfa/bfa_svc.h
+++ b/drivers/scsi/bfa/bfa_svc.h
@@ -663,10 +663,6 @@ void bfa_cb_lps_fdisclogo_comp(void *bfad, void *uarg);
663void bfa_cb_lps_cvl_event(void *bfad, void *uarg); 663void bfa_cb_lps_cvl_event(void *bfad, void *uarg);
664 664
665/* FAA specific APIs */ 665/* FAA specific APIs */
666bfa_status_t bfa_faa_enable(struct bfa_s *bfa,
667 bfa_cb_iocfc_t cbfn, void *cbarg);
668bfa_status_t bfa_faa_disable(struct bfa_s *bfa,
669 bfa_cb_iocfc_t cbfn, void *cbarg);
670bfa_status_t bfa_faa_query(struct bfa_s *bfa, struct bfa_faa_attr_s *attr, 666bfa_status_t bfa_faa_query(struct bfa_s *bfa, struct bfa_faa_attr_s *attr,
671 bfa_cb_iocfc_t cbfn, void *cbarg); 667 bfa_cb_iocfc_t cbfn, void *cbarg);
672 668
diff --git a/drivers/scsi/bfa/bfad_attr.c b/drivers/scsi/bfa/bfad_attr.c
index 1938fe0473e9..7b1ecd2b3ffe 100644
--- a/drivers/scsi/bfa/bfad_attr.c
+++ b/drivers/scsi/bfa/bfad_attr.c
@@ -442,6 +442,43 @@ bfad_im_vport_create(struct fc_vport *fc_vport, bool disable)
442 return status; 442 return status;
443} 443}
444 444
445int
446bfad_im_issue_fc_host_lip(struct Scsi_Host *shost)
447{
448 struct bfad_im_port_s *im_port =
449 (struct bfad_im_port_s *) shost->hostdata[0];
450 struct bfad_s *bfad = im_port->bfad;
451 struct bfad_hal_comp fcomp;
452 unsigned long flags;
453 uint32_t status;
454
455 init_completion(&fcomp.comp);
456 spin_lock_irqsave(&bfad->bfad_lock, flags);
457 status = bfa_port_disable(&bfad->bfa.modules.port,
458 bfad_hcb_comp, &fcomp);
459 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
460
461 if (status != BFA_STATUS_OK)
462 return -EIO;
463
464 wait_for_completion(&fcomp.comp);
465 if (fcomp.status != BFA_STATUS_OK)
466 return -EIO;
467
468 spin_lock_irqsave(&bfad->bfad_lock, flags);
469 status = bfa_port_enable(&bfad->bfa.modules.port,
470 bfad_hcb_comp, &fcomp);
471 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
472 if (status != BFA_STATUS_OK)
473 return -EIO;
474
475 wait_for_completion(&fcomp.comp);
476 if (fcomp.status != BFA_STATUS_OK)
477 return -EIO;
478
479 return 0;
480}
481
445static int 482static int
446bfad_im_vport_delete(struct fc_vport *fc_vport) 483bfad_im_vport_delete(struct fc_vport *fc_vport)
447{ 484{
@@ -457,8 +494,11 @@ bfad_im_vport_delete(struct fc_vport *fc_vport)
457 unsigned long flags; 494 unsigned long flags;
458 struct completion fcomp; 495 struct completion fcomp;
459 496
460 if (im_port->flags & BFAD_PORT_DELETE) 497 if (im_port->flags & BFAD_PORT_DELETE) {
461 goto free_scsi_host; 498 bfad_scsi_host_free(bfad, im_port);
499 list_del(&vport->list_entry);
500 return 0;
501 }
462 502
463 port = im_port->port; 503 port = im_port->port;
464 504
@@ -489,7 +529,6 @@ bfad_im_vport_delete(struct fc_vport *fc_vport)
489 529
490 wait_for_completion(vport->comp_del); 530 wait_for_completion(vport->comp_del);
491 531
492free_scsi_host:
493 bfad_scsi_host_free(bfad, im_port); 532 bfad_scsi_host_free(bfad, im_port);
494 list_del(&vport->list_entry); 533 list_del(&vport->list_entry);
495 kfree(vport); 534 kfree(vport);
@@ -579,7 +618,7 @@ struct fc_function_template bfad_im_fc_function_template = {
579 .show_rport_dev_loss_tmo = 1, 618 .show_rport_dev_loss_tmo = 1,
580 .get_rport_dev_loss_tmo = bfad_im_get_rport_loss_tmo, 619 .get_rport_dev_loss_tmo = bfad_im_get_rport_loss_tmo,
581 .set_rport_dev_loss_tmo = bfad_im_set_rport_loss_tmo, 620 .set_rport_dev_loss_tmo = bfad_im_set_rport_loss_tmo,
582 621 .issue_fc_host_lip = bfad_im_issue_fc_host_lip,
583 .vport_create = bfad_im_vport_create, 622 .vport_create = bfad_im_vport_create,
584 .vport_delete = bfad_im_vport_delete, 623 .vport_delete = bfad_im_vport_delete,
585 .vport_disable = bfad_im_vport_disable, 624 .vport_disable = bfad_im_vport_disable,
diff --git a/drivers/scsi/bfa/bfad_bsg.c b/drivers/scsi/bfa/bfad_bsg.c
index 8005c6c5a080..e1f4b10df42a 100644
--- a/drivers/scsi/bfa/bfad_bsg.c
+++ b/drivers/scsi/bfa/bfad_bsg.c
@@ -1288,50 +1288,6 @@ out:
1288} 1288}
1289 1289
1290int 1290int
1291bfad_iocmd_faa_enable(struct bfad_s *bfad, void *cmd)
1292{
1293 struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
1294 unsigned long flags;
1295 struct bfad_hal_comp fcomp;
1296
1297 init_completion(&fcomp.comp);
1298 iocmd->status = BFA_STATUS_OK;
1299 spin_lock_irqsave(&bfad->bfad_lock, flags);
1300 iocmd->status = bfa_faa_enable(&bfad->bfa, bfad_hcb_comp, &fcomp);
1301 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1302
1303 if (iocmd->status != BFA_STATUS_OK)
1304 goto out;
1305
1306 wait_for_completion(&fcomp.comp);
1307 iocmd->status = fcomp.status;
1308out:
1309 return 0;
1310}
1311
1312int
1313bfad_iocmd_faa_disable(struct bfad_s *bfad, void *cmd)
1314{
1315 struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
1316 unsigned long flags;
1317 struct bfad_hal_comp fcomp;
1318
1319 init_completion(&fcomp.comp);
1320 iocmd->status = BFA_STATUS_OK;
1321 spin_lock_irqsave(&bfad->bfad_lock, flags);
1322 iocmd->status = bfa_faa_disable(&bfad->bfa, bfad_hcb_comp, &fcomp);
1323 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1324
1325 if (iocmd->status != BFA_STATUS_OK)
1326 goto out;
1327
1328 wait_for_completion(&fcomp.comp);
1329 iocmd->status = fcomp.status;
1330out:
1331 return 0;
1332}
1333
1334int
1335bfad_iocmd_faa_query(struct bfad_s *bfad, void *cmd) 1291bfad_iocmd_faa_query(struct bfad_s *bfad, void *cmd)
1336{ 1292{
1337 struct bfa_bsg_faa_attr_s *iocmd = (struct bfa_bsg_faa_attr_s *)cmd; 1293 struct bfa_bsg_faa_attr_s *iocmd = (struct bfa_bsg_faa_attr_s *)cmd;
@@ -1918,6 +1874,7 @@ bfad_iocmd_debug_fw_core(struct bfad_s *bfad, void *cmd,
1918 struct bfa_bsg_debug_s *iocmd = (struct bfa_bsg_debug_s *)cmd; 1874 struct bfa_bsg_debug_s *iocmd = (struct bfa_bsg_debug_s *)cmd;
1919 void *iocmd_bufptr; 1875 void *iocmd_bufptr;
1920 unsigned long flags; 1876 unsigned long flags;
1877 u32 offset;
1921 1878
1922 if (bfad_chk_iocmd_sz(payload_len, sizeof(struct bfa_bsg_debug_s), 1879 if (bfad_chk_iocmd_sz(payload_len, sizeof(struct bfa_bsg_debug_s),
1923 BFA_DEBUG_FW_CORE_CHUNK_SZ) != BFA_STATUS_OK) { 1880 BFA_DEBUG_FW_CORE_CHUNK_SZ) != BFA_STATUS_OK) {
@@ -1935,8 +1892,10 @@ bfad_iocmd_debug_fw_core(struct bfad_s *bfad, void *cmd,
1935 1892
1936 iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_debug_s); 1893 iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_debug_s);
1937 spin_lock_irqsave(&bfad->bfad_lock, flags); 1894 spin_lock_irqsave(&bfad->bfad_lock, flags);
1895 offset = iocmd->offset;
1938 iocmd->status = bfa_ioc_debug_fwcore(&bfad->bfa.ioc, iocmd_bufptr, 1896 iocmd->status = bfa_ioc_debug_fwcore(&bfad->bfa.ioc, iocmd_bufptr,
1939 (u32 *)&iocmd->offset, &iocmd->bufsz); 1897 &offset, &iocmd->bufsz);
1898 iocmd->offset = offset;
1940 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1899 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1941out: 1900out:
1942 return 0; 1901 return 0;
@@ -2633,12 +2592,6 @@ bfad_iocmd_handler(struct bfad_s *bfad, unsigned int cmd, void *iocmd,
2633 case IOCMD_FLASH_DISABLE_OPTROM: 2592 case IOCMD_FLASH_DISABLE_OPTROM:
2634 rc = bfad_iocmd_ablk_optrom(bfad, cmd, iocmd); 2593 rc = bfad_iocmd_ablk_optrom(bfad, cmd, iocmd);
2635 break; 2594 break;
2636 case IOCMD_FAA_ENABLE:
2637 rc = bfad_iocmd_faa_enable(bfad, iocmd);
2638 break;
2639 case IOCMD_FAA_DISABLE:
2640 rc = bfad_iocmd_faa_disable(bfad, iocmd);
2641 break;
2642 case IOCMD_FAA_QUERY: 2595 case IOCMD_FAA_QUERY:
2643 rc = bfad_iocmd_faa_query(bfad, iocmd); 2596 rc = bfad_iocmd_faa_query(bfad, iocmd);
2644 break; 2597 break;
@@ -2809,9 +2762,16 @@ bfad_im_bsg_vendor_request(struct fc_bsg_job *job)
2809 struct bfad_im_port_s *im_port = 2762 struct bfad_im_port_s *im_port =
2810 (struct bfad_im_port_s *) job->shost->hostdata[0]; 2763 (struct bfad_im_port_s *) job->shost->hostdata[0];
2811 struct bfad_s *bfad = im_port->bfad; 2764 struct bfad_s *bfad = im_port->bfad;
2765 struct request_queue *request_q = job->req->q;
2812 void *payload_kbuf; 2766 void *payload_kbuf;
2813 int rc = -EINVAL; 2767 int rc = -EINVAL;
2814 2768
2769 /*
2770 * Set the BSG device request_queue size to 256 to support
2771 * payloads larger than 512*1024K bytes.
2772 */
2773 blk_queue_max_segments(request_q, 256);
2774
2815 /* Allocate a temp buffer to hold the passed in user space command */ 2775 /* Allocate a temp buffer to hold the passed in user space command */
2816 payload_kbuf = kzalloc(job->request_payload.payload_len, GFP_KERNEL); 2776 payload_kbuf = kzalloc(job->request_payload.payload_len, GFP_KERNEL);
2817 if (!payload_kbuf) { 2777 if (!payload_kbuf) {
diff --git a/drivers/scsi/bfa/bfad_bsg.h b/drivers/scsi/bfa/bfad_bsg.h
index e859adb9aa9e..17ad67283130 100644
--- a/drivers/scsi/bfa/bfad_bsg.h
+++ b/drivers/scsi/bfa/bfad_bsg.h
@@ -83,8 +83,6 @@ enum {
83 IOCMD_PORT_CFG_MODE, 83 IOCMD_PORT_CFG_MODE,
84 IOCMD_FLASH_ENABLE_OPTROM, 84 IOCMD_FLASH_ENABLE_OPTROM,
85 IOCMD_FLASH_DISABLE_OPTROM, 85 IOCMD_FLASH_DISABLE_OPTROM,
86 IOCMD_FAA_ENABLE,
87 IOCMD_FAA_DISABLE,
88 IOCMD_FAA_QUERY, 86 IOCMD_FAA_QUERY,
89 IOCMD_CEE_GET_ATTR, 87 IOCMD_CEE_GET_ATTR,
90 IOCMD_CEE_GET_STATS, 88 IOCMD_CEE_GET_STATS,
diff --git a/drivers/scsi/bfa/bfad_drv.h b/drivers/scsi/bfa/bfad_drv.h
index dc5b9d99c450..7f74f1d19124 100644
--- a/drivers/scsi/bfa/bfad_drv.h
+++ b/drivers/scsi/bfa/bfad_drv.h
@@ -56,7 +56,7 @@
56#ifdef BFA_DRIVER_VERSION 56#ifdef BFA_DRIVER_VERSION
57#define BFAD_DRIVER_VERSION BFA_DRIVER_VERSION 57#define BFAD_DRIVER_VERSION BFA_DRIVER_VERSION
58#else 58#else
59#define BFAD_DRIVER_VERSION "3.0.2.2" 59#define BFAD_DRIVER_VERSION "3.0.23.0"
60#endif 60#endif
61 61
62#define BFAD_PROTO_NAME FCPI_NAME 62#define BFAD_PROTO_NAME FCPI_NAME
diff --git a/drivers/scsi/bfa/bfi_ms.h b/drivers/scsi/bfa/bfi_ms.h
index 0d9f1fb50db0..d4220e13cafa 100644
--- a/drivers/scsi/bfa/bfi_ms.h
+++ b/drivers/scsi/bfa/bfi_ms.h
@@ -28,17 +28,15 @@ enum bfi_iocfc_h2i_msgs {
28 BFI_IOCFC_H2I_CFG_REQ = 1, 28 BFI_IOCFC_H2I_CFG_REQ = 1,
29 BFI_IOCFC_H2I_SET_INTR_REQ = 2, 29 BFI_IOCFC_H2I_SET_INTR_REQ = 2,
30 BFI_IOCFC_H2I_UPDATEQ_REQ = 3, 30 BFI_IOCFC_H2I_UPDATEQ_REQ = 3,
31 BFI_IOCFC_H2I_FAA_ENABLE_REQ = 4, 31 BFI_IOCFC_H2I_FAA_QUERY_REQ = 4,
32 BFI_IOCFC_H2I_FAA_DISABLE_REQ = 5, 32 BFI_IOCFC_H2I_ADDR_REQ = 5,
33 BFI_IOCFC_H2I_FAA_QUERY_REQ = 6,
34}; 33};
35 34
36enum bfi_iocfc_i2h_msgs { 35enum bfi_iocfc_i2h_msgs {
37 BFI_IOCFC_I2H_CFG_REPLY = BFA_I2HM(1), 36 BFI_IOCFC_I2H_CFG_REPLY = BFA_I2HM(1),
38 BFI_IOCFC_I2H_UPDATEQ_RSP = BFA_I2HM(3), 37 BFI_IOCFC_I2H_UPDATEQ_RSP = BFA_I2HM(3),
39 BFI_IOCFC_I2H_FAA_ENABLE_RSP = BFA_I2HM(4), 38 BFI_IOCFC_I2H_FAA_QUERY_RSP = BFA_I2HM(4),
40 BFI_IOCFC_I2H_FAA_DISABLE_RSP = BFA_I2HM(5), 39 BFI_IOCFC_I2H_ADDR_MSG = BFA_I2HM(5),
41 BFI_IOCFC_I2H_FAA_QUERY_RSP = BFA_I2HM(6),
42}; 40};
43 41
44struct bfi_iocfc_cfg_s { 42struct bfi_iocfc_cfg_s {
@@ -184,6 +182,13 @@ struct bfi_faa_en_dis_s {
184 struct bfi_mhdr_s mh; /* common msg header */ 182 struct bfi_mhdr_s mh; /* common msg header */
185}; 183};
186 184
185struct bfi_faa_addr_msg_s {
186 struct bfi_mhdr_s mh; /* common msg header */
187 u8 rsvd[4];
188 wwn_t pwwn; /* Fabric acquired PWWN */
189 wwn_t nwwn; /* Fabric acquired PWWN */
190};
191
187/* 192/*
188 * BFI_IOCFC_H2I_FAA_QUERY_REQ message 193 * BFI_IOCFC_H2I_FAA_QUERY_REQ message
189 */ 194 */
diff --git a/drivers/scsi/bfa/bfi_reg.h b/drivers/scsi/bfa/bfi_reg.h
index d892064b64a8..ed5f159e1867 100644
--- a/drivers/scsi/bfa/bfi_reg.h
+++ b/drivers/scsi/bfa/bfi_reg.h
@@ -335,11 +335,17 @@ enum {
335#define __PMM_1T_PNDB_P 0x00000002 335#define __PMM_1T_PNDB_P 0x00000002
336#define CT2_PMM_1T_CONTROL_REG_P1 0x00023c1c 336#define CT2_PMM_1T_CONTROL_REG_P1 0x00023c1c
337#define CT2_WGN_STATUS 0x00014990 337#define CT2_WGN_STATUS 0x00014990
338#define __A2T_AHB_LOAD 0x00000800
338#define __WGN_READY 0x00000400 339#define __WGN_READY 0x00000400
339#define __GLBL_PF_VF_CFG_RDY 0x00000200 340#define __GLBL_PF_VF_CFG_RDY 0x00000200
341#define CT2_NFC_CSR_CLR_REG 0x00027420
340#define CT2_NFC_CSR_SET_REG 0x00027424 342#define CT2_NFC_CSR_SET_REG 0x00027424
341#define __HALT_NFC_CONTROLLER 0x00000002 343#define __HALT_NFC_CONTROLLER 0x00000002
342#define __NFC_CONTROLLER_HALTED 0x00001000 344#define __NFC_CONTROLLER_HALTED 0x00001000
345#define CT2_RSC_GPR15_REG 0x0002765c
346#define CT2_CSI_FW_CTL_REG 0x00027080
347#define CT2_CSI_FW_CTL_SET_REG 0x00027088
348#define __RESET_AND_START_SCLK_LCLK_PLLS 0x00010000
343 349
344#define CT2_CSI_MAC0_CONTROL_REG 0x000270d0 350#define CT2_CSI_MAC0_CONTROL_REG 0x000270d0
345#define __CSI_MAC_RESET 0x00000010 351#define __CSI_MAC_RESET 0x00000010
diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
index abd72a01856d..c1c6a92a0b98 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
@@ -439,13 +439,13 @@ static int bnx2fc_rcv(struct sk_buff *skb, struct net_device *dev,
439 fr->fr_dev = lport; 439 fr->fr_dev = lport;
440 440
441 bg = &bnx2fc_global; 441 bg = &bnx2fc_global;
442 spin_lock_bh(&bg->fcoe_rx_list.lock); 442 spin_lock(&bg->fcoe_rx_list.lock);
443 443
444 __skb_queue_tail(&bg->fcoe_rx_list, skb); 444 __skb_queue_tail(&bg->fcoe_rx_list, skb);
445 if (bg->fcoe_rx_list.qlen == 1) 445 if (bg->fcoe_rx_list.qlen == 1)
446 wake_up_process(bg->thread); 446 wake_up_process(bg->thread);
447 447
448 spin_unlock_bh(&bg->fcoe_rx_list.lock); 448 spin_unlock(&bg->fcoe_rx_list.lock);
449 449
450 return 0; 450 return 0;
451err: 451err:
diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c
index ae7d15c44e2a..335e85192807 100644
--- a/drivers/scsi/fcoe/fcoe.c
+++ b/drivers/scsi/fcoe/fcoe.c
@@ -1436,7 +1436,7 @@ static int fcoe_rcv(struct sk_buff *skb, struct net_device *netdev,
1436 goto err; 1436 goto err;
1437 1437
1438 fps = &per_cpu(fcoe_percpu, cpu); 1438 fps = &per_cpu(fcoe_percpu, cpu);
1439 spin_lock_bh(&fps->fcoe_rx_list.lock); 1439 spin_lock(&fps->fcoe_rx_list.lock);
1440 if (unlikely(!fps->thread)) { 1440 if (unlikely(!fps->thread)) {
1441 /* 1441 /*
1442 * The targeted CPU is not ready, let's target 1442 * The targeted CPU is not ready, let's target
@@ -1447,12 +1447,12 @@ static int fcoe_rcv(struct sk_buff *skb, struct net_device *netdev,
1447 "ready for incoming skb- using first online " 1447 "ready for incoming skb- using first online "
1448 "CPU.\n"); 1448 "CPU.\n");
1449 1449
1450 spin_unlock_bh(&fps->fcoe_rx_list.lock); 1450 spin_unlock(&fps->fcoe_rx_list.lock);
1451 cpu = cpumask_first(cpu_online_mask); 1451 cpu = cpumask_first(cpu_online_mask);
1452 fps = &per_cpu(fcoe_percpu, cpu); 1452 fps = &per_cpu(fcoe_percpu, cpu);
1453 spin_lock_bh(&fps->fcoe_rx_list.lock); 1453 spin_lock(&fps->fcoe_rx_list.lock);
1454 if (!fps->thread) { 1454 if (!fps->thread) {
1455 spin_unlock_bh(&fps->fcoe_rx_list.lock); 1455 spin_unlock(&fps->fcoe_rx_list.lock);
1456 goto err; 1456 goto err;
1457 } 1457 }
1458 } 1458 }
@@ -1463,24 +1463,17 @@ static int fcoe_rcv(struct sk_buff *skb, struct net_device *netdev,
1463 * so we're free to queue skbs into it's queue. 1463 * so we're free to queue skbs into it's queue.
1464 */ 1464 */
1465 1465
1466 /* If this is a SCSI-FCP frame, and this is already executing on the 1466 /*
1467 * correct CPU, and the queue for this CPU is empty, then go ahead 1467 * Note: We used to have a set of conditions under which we would
1468 * and process the frame directly in the softirq context. 1468 * call fcoe_recv_frame directly, rather than queuing to the rx list
1469 * This lets us process completions without context switching from the 1469 * as it could save a few cycles, but doing so is prohibited, as
1470 * NET_RX softirq, to our receive processing thread, and then back to 1470 * fcoe_recv_frame has several paths that may sleep, which is forbidden
1471 * BLOCK softirq context. 1471 * in softirq context.
1472 */ 1472 */
1473 if (fh->fh_type == FC_TYPE_FCP && 1473 __skb_queue_tail(&fps->fcoe_rx_list, skb);
1474 cpu == smp_processor_id() && 1474 if (fps->thread->state == TASK_INTERRUPTIBLE)
1475 skb_queue_empty(&fps->fcoe_rx_list)) { 1475 wake_up_process(fps->thread);
1476 spin_unlock_bh(&fps->fcoe_rx_list.lock); 1476 spin_unlock(&fps->fcoe_rx_list.lock);
1477 fcoe_recv_frame(skb);
1478 } else {
1479 __skb_queue_tail(&fps->fcoe_rx_list, skb);
1480 if (fps->fcoe_rx_list.qlen == 1)
1481 wake_up_process(fps->thread);
1482 spin_unlock_bh(&fps->fcoe_rx_list.lock);
1483 }
1484 1477
1485 return 0; 1478 return 0;
1486err: 1479err:
@@ -1797,23 +1790,29 @@ static int fcoe_percpu_receive_thread(void *arg)
1797{ 1790{
1798 struct fcoe_percpu_s *p = arg; 1791 struct fcoe_percpu_s *p = arg;
1799 struct sk_buff *skb; 1792 struct sk_buff *skb;
1793 struct sk_buff_head tmp;
1794
1795 skb_queue_head_init(&tmp);
1800 1796
1801 set_user_nice(current, -20); 1797 set_user_nice(current, -20);
1802 1798
1803 while (!kthread_should_stop()) { 1799 while (!kthread_should_stop()) {
1804 1800
1805 spin_lock_bh(&p->fcoe_rx_list.lock); 1801 spin_lock_bh(&p->fcoe_rx_list.lock);
1806 while ((skb = __skb_dequeue(&p->fcoe_rx_list)) == NULL) { 1802 skb_queue_splice_init(&p->fcoe_rx_list, &tmp);
1803 spin_unlock_bh(&p->fcoe_rx_list.lock);
1804
1805 while ((skb = __skb_dequeue(&tmp)) != NULL)
1806 fcoe_recv_frame(skb);
1807
1808 spin_lock_bh(&p->fcoe_rx_list.lock);
1809 if (!skb_queue_len(&p->fcoe_rx_list)) {
1807 set_current_state(TASK_INTERRUPTIBLE); 1810 set_current_state(TASK_INTERRUPTIBLE);
1808 spin_unlock_bh(&p->fcoe_rx_list.lock); 1811 spin_unlock_bh(&p->fcoe_rx_list.lock);
1809 schedule(); 1812 schedule();
1810 set_current_state(TASK_RUNNING); 1813 set_current_state(TASK_RUNNING);
1811 if (kthread_should_stop()) 1814 } else
1812 return 0; 1815 spin_unlock_bh(&p->fcoe_rx_list.lock);
1813 spin_lock_bh(&p->fcoe_rx_list.lock);
1814 }
1815 spin_unlock_bh(&p->fcoe_rx_list.lock);
1816 fcoe_recv_frame(skb);
1817 } 1816 }
1818 return 0; 1817 return 0;
1819} 1818}
@@ -2187,8 +2186,12 @@ static int fcoe_create(struct net_device *netdev, enum fip_state fip_mode)
2187 /* start FIP Discovery and FLOGI */ 2186 /* start FIP Discovery and FLOGI */
2188 lport->boot_time = jiffies; 2187 lport->boot_time = jiffies;
2189 fc_fabric_login(lport); 2188 fc_fabric_login(lport);
2190 if (!fcoe_link_ok(lport)) 2189 if (!fcoe_link_ok(lport)) {
2190 rtnl_unlock();
2191 fcoe_ctlr_link_up(&fcoe->ctlr); 2191 fcoe_ctlr_link_up(&fcoe->ctlr);
2192 mutex_unlock(&fcoe_config_mutex);
2193 return rc;
2194 }
2192 2195
2193out_nodev: 2196out_nodev:
2194 rtnl_unlock(); 2197 rtnl_unlock();
@@ -2261,31 +2264,14 @@ static int fcoe_link_ok(struct fc_lport *lport)
2261static void fcoe_percpu_clean(struct fc_lport *lport) 2264static void fcoe_percpu_clean(struct fc_lport *lport)
2262{ 2265{
2263 struct fcoe_percpu_s *pp; 2266 struct fcoe_percpu_s *pp;
2264 struct fcoe_rcv_info *fr; 2267 struct sk_buff *skb;
2265 struct sk_buff_head *list;
2266 struct sk_buff *skb, *next;
2267 struct sk_buff *head;
2268 unsigned int cpu; 2268 unsigned int cpu;
2269 2269
2270 for_each_possible_cpu(cpu) { 2270 for_each_possible_cpu(cpu) {
2271 pp = &per_cpu(fcoe_percpu, cpu); 2271 pp = &per_cpu(fcoe_percpu, cpu);
2272 spin_lock_bh(&pp->fcoe_rx_list.lock);
2273 list = &pp->fcoe_rx_list;
2274 head = list->next;
2275 for (skb = head; skb != (struct sk_buff *)list;
2276 skb = next) {
2277 next = skb->next;
2278 fr = fcoe_dev_from_skb(skb);
2279 if (fr->fr_dev == lport) {
2280 __skb_unlink(skb, list);
2281 kfree_skb(skb);
2282 }
2283 }
2284 2272
2285 if (!pp->thread || !cpu_online(cpu)) { 2273 if (!pp->thread || !cpu_online(cpu))
2286 spin_unlock_bh(&pp->fcoe_rx_list.lock);
2287 continue; 2274 continue;
2288 }
2289 2275
2290 skb = dev_alloc_skb(0); 2276 skb = dev_alloc_skb(0);
2291 if (!skb) { 2277 if (!skb) {
@@ -2294,6 +2280,7 @@ static void fcoe_percpu_clean(struct fc_lport *lport)
2294 } 2280 }
2295 skb->destructor = fcoe_percpu_flush_done; 2281 skb->destructor = fcoe_percpu_flush_done;
2296 2282
2283 spin_lock_bh(&pp->fcoe_rx_list.lock);
2297 __skb_queue_tail(&pp->fcoe_rx_list, skb); 2284 __skb_queue_tail(&pp->fcoe_rx_list, skb);
2298 if (pp->fcoe_rx_list.qlen == 1) 2285 if (pp->fcoe_rx_list.qlen == 1)
2299 wake_up_process(pp->thread); 2286 wake_up_process(pp->thread);
diff --git a/drivers/scsi/fcoe/fcoe_ctlr.c b/drivers/scsi/fcoe/fcoe_ctlr.c
index e7522dcc296e..249a106888d9 100644
--- a/drivers/scsi/fcoe/fcoe_ctlr.c
+++ b/drivers/scsi/fcoe/fcoe_ctlr.c
@@ -242,7 +242,7 @@ static void fcoe_ctlr_announce(struct fcoe_ctlr *fip)
242 printk(KERN_INFO "libfcoe: host%d: FIP selected " 242 printk(KERN_INFO "libfcoe: host%d: FIP selected "
243 "Fibre-Channel Forwarder MAC %pM\n", 243 "Fibre-Channel Forwarder MAC %pM\n",
244 fip->lp->host->host_no, sel->fcf_mac); 244 fip->lp->host->host_no, sel->fcf_mac);
245 memcpy(fip->dest_addr, sel->fcf_mac, ETH_ALEN); 245 memcpy(fip->dest_addr, sel->fcoe_mac, ETH_ALEN);
246 fip->map_dest = 0; 246 fip->map_dest = 0;
247 } 247 }
248unlock: 248unlock:
@@ -824,6 +824,7 @@ static int fcoe_ctlr_parse_adv(struct fcoe_ctlr *fip,
824 memcpy(fcf->fcf_mac, 824 memcpy(fcf->fcf_mac,
825 ((struct fip_mac_desc *)desc)->fd_mac, 825 ((struct fip_mac_desc *)desc)->fd_mac,
826 ETH_ALEN); 826 ETH_ALEN);
827 memcpy(fcf->fcoe_mac, fcf->fcf_mac, ETH_ALEN);
827 if (!is_valid_ether_addr(fcf->fcf_mac)) { 828 if (!is_valid_ether_addr(fcf->fcf_mac)) {
828 LIBFCOE_FIP_DBG(fip, 829 LIBFCOE_FIP_DBG(fip,
829 "Invalid MAC addr %pM in FIP adv\n", 830 "Invalid MAC addr %pM in FIP adv\n",
@@ -1013,6 +1014,7 @@ static void fcoe_ctlr_recv_els(struct fcoe_ctlr *fip, struct sk_buff *skb)
1013 struct fip_desc *desc; 1014 struct fip_desc *desc;
1014 struct fip_encaps *els; 1015 struct fip_encaps *els;
1015 struct fcoe_dev_stats *stats; 1016 struct fcoe_dev_stats *stats;
1017 struct fcoe_fcf *sel;
1016 enum fip_desc_type els_dtype = 0; 1018 enum fip_desc_type els_dtype = 0;
1017 u8 els_op; 1019 u8 els_op;
1018 u8 sub; 1020 u8 sub;
@@ -1040,7 +1042,8 @@ static void fcoe_ctlr_recv_els(struct fcoe_ctlr *fip, struct sk_buff *skb)
1040 goto drop; 1042 goto drop;
1041 /* Drop ELS if there are duplicate critical descriptors */ 1043 /* Drop ELS if there are duplicate critical descriptors */
1042 if (desc->fip_dtype < 32) { 1044 if (desc->fip_dtype < 32) {
1043 if (desc_mask & 1U << desc->fip_dtype) { 1045 if ((desc->fip_dtype != FIP_DT_MAC) &&
1046 (desc_mask & 1U << desc->fip_dtype)) {
1044 LIBFCOE_FIP_DBG(fip, "Duplicate Critical " 1047 LIBFCOE_FIP_DBG(fip, "Duplicate Critical "
1045 "Descriptors in FIP ELS\n"); 1048 "Descriptors in FIP ELS\n");
1046 goto drop; 1049 goto drop;
@@ -1049,17 +1052,32 @@ static void fcoe_ctlr_recv_els(struct fcoe_ctlr *fip, struct sk_buff *skb)
1049 } 1052 }
1050 switch (desc->fip_dtype) { 1053 switch (desc->fip_dtype) {
1051 case FIP_DT_MAC: 1054 case FIP_DT_MAC:
1055 sel = fip->sel_fcf;
1052 if (desc_cnt == 1) { 1056 if (desc_cnt == 1) {
1053 LIBFCOE_FIP_DBG(fip, "FIP descriptors " 1057 LIBFCOE_FIP_DBG(fip, "FIP descriptors "
1054 "received out of order\n"); 1058 "received out of order\n");
1055 goto drop; 1059 goto drop;
1056 } 1060 }
1061 /*
1062 * Some switch implementations send two MAC descriptors,
1063 * with first MAC(granted_mac) being the FPMA, and the
1064 * second one(fcoe_mac) is used as destination address
1065 * for sending/receiving FCoE packets. FIP traffic is
1066 * sent using fip_mac. For regular switches, both
1067 * fip_mac and fcoe_mac would be the same.
1068 */
1069 if (desc_cnt == 2)
1070 memcpy(granted_mac,
1071 ((struct fip_mac_desc *)desc)->fd_mac,
1072 ETH_ALEN);
1057 1073
1058 if (dlen != sizeof(struct fip_mac_desc)) 1074 if (dlen != sizeof(struct fip_mac_desc))
1059 goto len_err; 1075 goto len_err;
1060 memcpy(granted_mac, 1076
1061 ((struct fip_mac_desc *)desc)->fd_mac, 1077 if ((desc_cnt == 3) && (sel))
1062 ETH_ALEN); 1078 memcpy(sel->fcoe_mac,
1079 ((struct fip_mac_desc *)desc)->fd_mac,
1080 ETH_ALEN);
1063 break; 1081 break;
1064 case FIP_DT_FLOGI: 1082 case FIP_DT_FLOGI:
1065 case FIP_DT_FDISC: 1083 case FIP_DT_FDISC:
@@ -1273,11 +1291,6 @@ static void fcoe_ctlr_recv_clr_vlink(struct fcoe_ctlr *fip,
1273 * No Vx_Port description. Clear all NPIV ports, 1291 * No Vx_Port description. Clear all NPIV ports,
1274 * followed by physical port 1292 * followed by physical port
1275 */ 1293 */
1276 mutex_lock(&lport->lp_mutex);
1277 list_for_each_entry(vn_port, &lport->vports, list)
1278 fc_lport_reset(vn_port);
1279 mutex_unlock(&lport->lp_mutex);
1280
1281 mutex_lock(&fip->ctlr_mutex); 1294 mutex_lock(&fip->ctlr_mutex);
1282 per_cpu_ptr(lport->dev_stats, 1295 per_cpu_ptr(lport->dev_stats,
1283 get_cpu())->VLinkFailureCount++; 1296 get_cpu())->VLinkFailureCount++;
@@ -1285,6 +1298,11 @@ static void fcoe_ctlr_recv_clr_vlink(struct fcoe_ctlr *fip,
1285 fcoe_ctlr_reset(fip); 1298 fcoe_ctlr_reset(fip);
1286 mutex_unlock(&fip->ctlr_mutex); 1299 mutex_unlock(&fip->ctlr_mutex);
1287 1300
1301 mutex_lock(&lport->lp_mutex);
1302 list_for_each_entry(vn_port, &lport->vports, list)
1303 fc_lport_reset(vn_port);
1304 mutex_unlock(&lport->lp_mutex);
1305
1288 fc_lport_reset(fip->lp); 1306 fc_lport_reset(fip->lp);
1289 fcoe_ctlr_solicit(fip, NULL); 1307 fcoe_ctlr_solicit(fip, NULL);
1290 } else { 1308 } else {
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index cdfe5a16de2a..e002cd466e9a 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -104,7 +104,9 @@ static DEFINE_SPINLOCK(ipr_driver_lock);
104static const struct ipr_chip_cfg_t ipr_chip_cfg[] = { 104static const struct ipr_chip_cfg_t ipr_chip_cfg[] = {
105 { /* Gemstone, Citrine, Obsidian, and Obsidian-E */ 105 { /* Gemstone, Citrine, Obsidian, and Obsidian-E */
106 .mailbox = 0x0042C, 106 .mailbox = 0x0042C,
107 .max_cmds = 100,
107 .cache_line_size = 0x20, 108 .cache_line_size = 0x20,
109 .clear_isr = 1,
108 { 110 {
109 .set_interrupt_mask_reg = 0x0022C, 111 .set_interrupt_mask_reg = 0x0022C,
110 .clr_interrupt_mask_reg = 0x00230, 112 .clr_interrupt_mask_reg = 0x00230,
@@ -126,7 +128,9 @@ static const struct ipr_chip_cfg_t ipr_chip_cfg[] = {
126 }, 128 },
127 { /* Snipe and Scamp */ 129 { /* Snipe and Scamp */
128 .mailbox = 0x0052C, 130 .mailbox = 0x0052C,
131 .max_cmds = 100,
129 .cache_line_size = 0x20, 132 .cache_line_size = 0x20,
133 .clear_isr = 1,
130 { 134 {
131 .set_interrupt_mask_reg = 0x00288, 135 .set_interrupt_mask_reg = 0x00288,
132 .clr_interrupt_mask_reg = 0x0028C, 136 .clr_interrupt_mask_reg = 0x0028C,
@@ -148,7 +152,9 @@ static const struct ipr_chip_cfg_t ipr_chip_cfg[] = {
148 }, 152 },
149 { /* CRoC */ 153 { /* CRoC */
150 .mailbox = 0x00044, 154 .mailbox = 0x00044,
155 .max_cmds = 1000,
151 .cache_line_size = 0x20, 156 .cache_line_size = 0x20,
157 .clear_isr = 0,
152 { 158 {
153 .set_interrupt_mask_reg = 0x00010, 159 .set_interrupt_mask_reg = 0x00010,
154 .clr_interrupt_mask_reg = 0x00018, 160 .clr_interrupt_mask_reg = 0x00018,
@@ -847,8 +853,6 @@ static void ipr_do_req(struct ipr_cmnd *ipr_cmd,
847 853
848 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, 0); 854 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, 0);
849 855
850 mb();
851
852 ipr_send_command(ipr_cmd); 856 ipr_send_command(ipr_cmd);
853} 857}
854 858
@@ -982,8 +986,6 @@ static void ipr_send_hcam(struct ipr_ioa_cfg *ioa_cfg, u8 type,
982 986
983 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_IOA_RES_ADDR); 987 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_IOA_RES_ADDR);
984 988
985 mb();
986
987 ipr_send_command(ipr_cmd); 989 ipr_send_command(ipr_cmd);
988 } else { 990 } else {
989 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q); 991 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
@@ -4339,8 +4341,7 @@ static struct ipr_resource_entry *ipr_find_starget(struct scsi_target *starget)
4339 4341
4340 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) { 4342 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
4341 if ((res->bus == starget->channel) && 4343 if ((res->bus == starget->channel) &&
4342 (res->target == starget->id) && 4344 (res->target == starget->id)) {
4343 (res->lun == 0)) {
4344 return res; 4345 return res;
4345 } 4346 }
4346 } 4347 }
@@ -4414,12 +4415,14 @@ static void ipr_target_destroy(struct scsi_target *starget)
4414 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata; 4415 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4415 4416
4416 if (ioa_cfg->sis64) { 4417 if (ioa_cfg->sis64) {
4417 if (starget->channel == IPR_ARRAY_VIRTUAL_BUS) 4418 if (!ipr_find_starget(starget)) {
4418 clear_bit(starget->id, ioa_cfg->array_ids); 4419 if (starget->channel == IPR_ARRAY_VIRTUAL_BUS)
4419 else if (starget->channel == IPR_VSET_VIRTUAL_BUS) 4420 clear_bit(starget->id, ioa_cfg->array_ids);
4420 clear_bit(starget->id, ioa_cfg->vset_ids); 4421 else if (starget->channel == IPR_VSET_VIRTUAL_BUS)
4421 else if (starget->channel == 0) 4422 clear_bit(starget->id, ioa_cfg->vset_ids);
4422 clear_bit(starget->id, ioa_cfg->target_ids); 4423 else if (starget->channel == 0)
4424 clear_bit(starget->id, ioa_cfg->target_ids);
4425 }
4423 } 4426 }
4424 4427
4425 if (sata_port) { 4428 if (sata_port) {
@@ -5048,12 +5051,14 @@ static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg,
5048 del_timer(&ioa_cfg->reset_cmd->timer); 5051 del_timer(&ioa_cfg->reset_cmd->timer);
5049 ipr_reset_ioa_job(ioa_cfg->reset_cmd); 5052 ipr_reset_ioa_job(ioa_cfg->reset_cmd);
5050 } else if ((int_reg & IPR_PCII_HRRQ_UPDATED) == int_reg) { 5053 } else if ((int_reg & IPR_PCII_HRRQ_UPDATED) == int_reg) {
5051 if (ipr_debug && printk_ratelimit()) 5054 if (ioa_cfg->clear_isr) {
5052 dev_err(&ioa_cfg->pdev->dev, 5055 if (ipr_debug && printk_ratelimit())
5053 "Spurious interrupt detected. 0x%08X\n", int_reg); 5056 dev_err(&ioa_cfg->pdev->dev,
5054 writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg32); 5057 "Spurious interrupt detected. 0x%08X\n", int_reg);
5055 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32); 5058 writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg32);
5056 return IRQ_NONE; 5059 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5060 return IRQ_NONE;
5061 }
5057 } else { 5062 } else {
5058 if (int_reg & IPR_PCII_IOA_UNIT_CHECKED) 5063 if (int_reg & IPR_PCII_IOA_UNIT_CHECKED)
5059 ioa_cfg->ioa_unit_checked = 1; 5064 ioa_cfg->ioa_unit_checked = 1;
@@ -5153,6 +5158,9 @@ static irqreturn_t ipr_isr(int irq, void *devp)
5153 } 5158 }
5154 } 5159 }
5155 5160
5161 if (ipr_cmd && !ioa_cfg->clear_isr)
5162 break;
5163
5156 if (ipr_cmd != NULL) { 5164 if (ipr_cmd != NULL) {
5157 /* Clear the PCI interrupt */ 5165 /* Clear the PCI interrupt */
5158 num_hrrq = 0; 5166 num_hrrq = 0;
@@ -5854,14 +5862,12 @@ static int ipr_queuecommand_lck(struct scsi_cmnd *scsi_cmd,
5854 rc = ipr_build_ioadl(ioa_cfg, ipr_cmd); 5862 rc = ipr_build_ioadl(ioa_cfg, ipr_cmd);
5855 } 5863 }
5856 5864
5857 if (likely(rc == 0)) { 5865 if (unlikely(rc != 0)) {
5858 mb(); 5866 list_move_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5859 ipr_send_command(ipr_cmd); 5867 return SCSI_MLQUEUE_HOST_BUSY;
5860 } else {
5861 list_move_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5862 return SCSI_MLQUEUE_HOST_BUSY;
5863 } 5868 }
5864 5869
5870 ipr_send_command(ipr_cmd);
5865 return 0; 5871 return 0;
5866} 5872}
5867 5873
@@ -6239,8 +6245,6 @@ static unsigned int ipr_qc_issue(struct ata_queued_cmd *qc)
6239 return AC_ERR_INVALID; 6245 return AC_ERR_INVALID;
6240 } 6246 }
6241 6247
6242 mb();
6243
6244 ipr_send_command(ipr_cmd); 6248 ipr_send_command(ipr_cmd);
6245 6249
6246 return 0; 6250 return 0;
@@ -8277,6 +8281,10 @@ static void ipr_free_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
8277 if (ioa_cfg->ipr_cmd_pool) 8281 if (ioa_cfg->ipr_cmd_pool)
8278 pci_pool_destroy (ioa_cfg->ipr_cmd_pool); 8282 pci_pool_destroy (ioa_cfg->ipr_cmd_pool);
8279 8283
8284 kfree(ioa_cfg->ipr_cmnd_list);
8285 kfree(ioa_cfg->ipr_cmnd_list_dma);
8286 ioa_cfg->ipr_cmnd_list = NULL;
8287 ioa_cfg->ipr_cmnd_list_dma = NULL;
8280 ioa_cfg->ipr_cmd_pool = NULL; 8288 ioa_cfg->ipr_cmd_pool = NULL;
8281} 8289}
8282 8290
@@ -8352,11 +8360,19 @@ static int __devinit ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
8352 int i; 8360 int i;
8353 8361
8354 ioa_cfg->ipr_cmd_pool = pci_pool_create (IPR_NAME, ioa_cfg->pdev, 8362 ioa_cfg->ipr_cmd_pool = pci_pool_create (IPR_NAME, ioa_cfg->pdev,
8355 sizeof(struct ipr_cmnd), 16, 0); 8363 sizeof(struct ipr_cmnd), 512, 0);
8356 8364
8357 if (!ioa_cfg->ipr_cmd_pool) 8365 if (!ioa_cfg->ipr_cmd_pool)
8358 return -ENOMEM; 8366 return -ENOMEM;
8359 8367
8368 ioa_cfg->ipr_cmnd_list = kcalloc(IPR_NUM_CMD_BLKS, sizeof(struct ipr_cmnd *), GFP_KERNEL);
8369 ioa_cfg->ipr_cmnd_list_dma = kcalloc(IPR_NUM_CMD_BLKS, sizeof(dma_addr_t), GFP_KERNEL);
8370
8371 if (!ioa_cfg->ipr_cmnd_list || !ioa_cfg->ipr_cmnd_list_dma) {
8372 ipr_free_cmd_blks(ioa_cfg);
8373 return -ENOMEM;
8374 }
8375
8360 for (i = 0; i < IPR_NUM_CMD_BLKS; i++) { 8376 for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
8361 ipr_cmd = pci_pool_alloc (ioa_cfg->ipr_cmd_pool, GFP_KERNEL, &dma_addr); 8377 ipr_cmd = pci_pool_alloc (ioa_cfg->ipr_cmd_pool, GFP_KERNEL, &dma_addr);
8362 8378
@@ -8584,6 +8600,7 @@ static void __devinit ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
8584 host->max_channel = IPR_MAX_BUS_TO_SCAN; 8600 host->max_channel = IPR_MAX_BUS_TO_SCAN;
8585 host->unique_id = host->host_no; 8601 host->unique_id = host->host_no;
8586 host->max_cmd_len = IPR_MAX_CDB_LEN; 8602 host->max_cmd_len = IPR_MAX_CDB_LEN;
8603 host->can_queue = ioa_cfg->max_cmds;
8587 pci_set_drvdata(pdev, ioa_cfg); 8604 pci_set_drvdata(pdev, ioa_cfg);
8588 8605
8589 p = &ioa_cfg->chip_cfg->regs; 8606 p = &ioa_cfg->chip_cfg->regs;
@@ -8768,6 +8785,8 @@ static int __devinit ipr_probe_ioa(struct pci_dev *pdev,
8768 /* set SIS 32 or SIS 64 */ 8785 /* set SIS 32 or SIS 64 */
8769 ioa_cfg->sis64 = ioa_cfg->ipr_chip->sis_type == IPR_SIS64 ? 1 : 0; 8786 ioa_cfg->sis64 = ioa_cfg->ipr_chip->sis_type == IPR_SIS64 ? 1 : 0;
8770 ioa_cfg->chip_cfg = ioa_cfg->ipr_chip->cfg; 8787 ioa_cfg->chip_cfg = ioa_cfg->ipr_chip->cfg;
8788 ioa_cfg->clear_isr = ioa_cfg->chip_cfg->clear_isr;
8789 ioa_cfg->max_cmds = ioa_cfg->chip_cfg->max_cmds;
8771 8790
8772 if (ipr_transop_timeout) 8791 if (ipr_transop_timeout)
8773 ioa_cfg->transop_timeout = ipr_transop_timeout; 8792 ioa_cfg->transop_timeout = ipr_transop_timeout;
diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h
index f94eaee2ff16..153b8bd91d1e 100644
--- a/drivers/scsi/ipr.h
+++ b/drivers/scsi/ipr.h
@@ -38,8 +38,8 @@
38/* 38/*
39 * Literals 39 * Literals
40 */ 40 */
41#define IPR_DRIVER_VERSION "2.5.2" 41#define IPR_DRIVER_VERSION "2.5.3"
42#define IPR_DRIVER_DATE "(April 27, 2011)" 42#define IPR_DRIVER_DATE "(March 10, 2012)"
43 43
44/* 44/*
45 * IPR_MAX_CMD_PER_LUN: This defines the maximum number of outstanding 45 * IPR_MAX_CMD_PER_LUN: This defines the maximum number of outstanding
@@ -53,7 +53,7 @@
53 * IPR_NUM_BASE_CMD_BLKS: This defines the maximum number of 53 * IPR_NUM_BASE_CMD_BLKS: This defines the maximum number of
54 * ops the mid-layer can send to the adapter. 54 * ops the mid-layer can send to the adapter.
55 */ 55 */
56#define IPR_NUM_BASE_CMD_BLKS 100 56#define IPR_NUM_BASE_CMD_BLKS (ioa_cfg->max_cmds)
57 57
58#define PCI_DEVICE_ID_IBM_OBSIDIAN_E 0x0339 58#define PCI_DEVICE_ID_IBM_OBSIDIAN_E 0x0339
59 59
@@ -153,7 +153,7 @@
153#define IPR_NUM_INTERNAL_CMD_BLKS (IPR_NUM_HCAMS + \ 153#define IPR_NUM_INTERNAL_CMD_BLKS (IPR_NUM_HCAMS + \
154 ((IPR_NUM_RESET_RELOAD_RETRIES + 1) * 2) + 4) 154 ((IPR_NUM_RESET_RELOAD_RETRIES + 1) * 2) + 4)
155 155
156#define IPR_MAX_COMMANDS IPR_NUM_BASE_CMD_BLKS 156#define IPR_MAX_COMMANDS 100
157#define IPR_NUM_CMD_BLKS (IPR_NUM_BASE_CMD_BLKS + \ 157#define IPR_NUM_CMD_BLKS (IPR_NUM_BASE_CMD_BLKS + \
158 IPR_NUM_INTERNAL_CMD_BLKS) 158 IPR_NUM_INTERNAL_CMD_BLKS)
159 159
@@ -1305,7 +1305,9 @@ struct ipr_interrupts {
1305 1305
1306struct ipr_chip_cfg_t { 1306struct ipr_chip_cfg_t {
1307 u32 mailbox; 1307 u32 mailbox;
1308 u16 max_cmds;
1308 u8 cache_line_size; 1309 u8 cache_line_size;
1310 u8 clear_isr;
1309 struct ipr_interrupt_offsets regs; 1311 struct ipr_interrupt_offsets regs;
1310}; 1312};
1311 1313
@@ -1388,6 +1390,7 @@ struct ipr_ioa_cfg {
1388 u8 sis64:1; 1390 u8 sis64:1;
1389 u8 dump_timeout:1; 1391 u8 dump_timeout:1;
1390 u8 cfg_locked:1; 1392 u8 cfg_locked:1;
1393 u8 clear_isr:1;
1391 1394
1392 u8 revid; 1395 u8 revid;
1393 1396
@@ -1501,8 +1504,9 @@ struct ipr_ioa_cfg {
1501 struct ata_host ata_host; 1504 struct ata_host ata_host;
1502 char ipr_cmd_label[8]; 1505 char ipr_cmd_label[8];
1503#define IPR_CMD_LABEL "ipr_cmd" 1506#define IPR_CMD_LABEL "ipr_cmd"
1504 struct ipr_cmnd *ipr_cmnd_list[IPR_NUM_CMD_BLKS]; 1507 u32 max_cmds;
1505 dma_addr_t ipr_cmnd_list_dma[IPR_NUM_CMD_BLKS]; 1508 struct ipr_cmnd **ipr_cmnd_list;
1509 dma_addr_t *ipr_cmnd_list_dma;
1506}; /* struct ipr_ioa_cfg */ 1510}; /* struct ipr_ioa_cfg */
1507 1511
1508struct ipr_cmnd { 1512struct ipr_cmnd {
diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
index 630291f01826..aceffadb21c7 100644
--- a/drivers/scsi/libfc/fc_exch.c
+++ b/drivers/scsi/libfc/fc_exch.c
@@ -2263,7 +2263,18 @@ struct fc_exch_mgr *fc_exch_mgr_alloc(struct fc_lport *lport,
2263 mp->class = class; 2263 mp->class = class;
2264 /* adjust em exch xid range for offload */ 2264 /* adjust em exch xid range for offload */
2265 mp->min_xid = min_xid; 2265 mp->min_xid = min_xid;
2266 mp->max_xid = max_xid; 2266
2267 /* reduce range so per cpu pool fits into PCPU_MIN_UNIT_SIZE pool */
2268 pool_exch_range = (PCPU_MIN_UNIT_SIZE - sizeof(*pool)) /
2269 sizeof(struct fc_exch *);
2270 if ((max_xid - min_xid + 1) / (fc_cpu_mask + 1) > pool_exch_range) {
2271 mp->max_xid = pool_exch_range * (fc_cpu_mask + 1) +
2272 min_xid - 1;
2273 } else {
2274 mp->max_xid = max_xid;
2275 pool_exch_range = (mp->max_xid - mp->min_xid + 1) /
2276 (fc_cpu_mask + 1);
2277 }
2267 2278
2268 mp->ep_pool = mempool_create_slab_pool(2, fc_em_cachep); 2279 mp->ep_pool = mempool_create_slab_pool(2, fc_em_cachep);
2269 if (!mp->ep_pool) 2280 if (!mp->ep_pool)
@@ -2274,7 +2285,6 @@ struct fc_exch_mgr *fc_exch_mgr_alloc(struct fc_lport *lport,
2274 * divided across all cpus. The exch pointers array memory is 2285 * divided across all cpus. The exch pointers array memory is
2275 * allocated for exch range per pool. 2286 * allocated for exch range per pool.
2276 */ 2287 */
2277 pool_exch_range = (mp->max_xid - mp->min_xid + 1) / (fc_cpu_mask + 1);
2278 mp->pool_max_index = pool_exch_range - 1; 2288 mp->pool_max_index = pool_exch_range - 1;
2279 2289
2280 /* 2290 /*
diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c
index bd5d31d022d9..ef9560dff295 100644
--- a/drivers/scsi/libfc/fc_lport.c
+++ b/drivers/scsi/libfc/fc_lport.c
@@ -1743,8 +1743,16 @@ void fc_lport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp,
1743 mfs = ntohs(flp->fl_csp.sp_bb_data) & 1743 mfs = ntohs(flp->fl_csp.sp_bb_data) &
1744 FC_SP_BB_DATA_MASK; 1744 FC_SP_BB_DATA_MASK;
1745 if (mfs >= FC_SP_MIN_MAX_PAYLOAD && 1745 if (mfs >= FC_SP_MIN_MAX_PAYLOAD &&
1746 mfs < lport->mfs) 1746 mfs <= lport->mfs) {
1747 lport->mfs = mfs; 1747 lport->mfs = mfs;
1748 fc_host_maxframe_size(lport->host) = mfs;
1749 } else {
1750 FC_LPORT_DBG(lport, "FLOGI bad mfs:%hu response, "
1751 "lport->mfs:%hu\n", mfs, lport->mfs);
1752 fc_lport_error(lport, fp);
1753 goto err;
1754 }
1755
1748 csp_flags = ntohs(flp->fl_csp.sp_features); 1756 csp_flags = ntohs(flp->fl_csp.sp_features);
1749 r_a_tov = ntohl(flp->fl_csp.sp_r_a_tov); 1757 r_a_tov = ntohl(flp->fl_csp.sp_r_a_tov);
1750 e_d_tov = ntohl(flp->fl_csp.sp_e_d_tov); 1758 e_d_tov = ntohl(flp->fl_csp.sp_e_d_tov);
diff --git a/drivers/scsi/lpfc/Makefile b/drivers/scsi/lpfc/Makefile
index 88928f00aa2d..fe5d396aca73 100644
--- a/drivers/scsi/lpfc/Makefile
+++ b/drivers/scsi/lpfc/Makefile
@@ -1,7 +1,7 @@
1#/******************************************************************* 1#/*******************************************************************
2# * This file is part of the Emulex Linux Device Driver for * 2# * This file is part of the Emulex Linux Device Driver for *
3# * Fibre Channel Host Bus Adapters. * 3# * Fibre Channel Host Bus Adapters. *
4# * Copyright (C) 2004-2011 Emulex. All rights reserved. * 4# * Copyright (C) 2004-2012 Emulex. All rights reserved. *
5# * EMULEX and SLI are trademarks of Emulex. * 5# * EMULEX and SLI are trademarks of Emulex. *
6# * www.emulex.com * 6# * www.emulex.com *
7# * * 7# * *
@@ -22,6 +22,8 @@
22ccflags-$(GCOV) := -fprofile-arcs -ftest-coverage 22ccflags-$(GCOV) := -fprofile-arcs -ftest-coverage
23ccflags-$(GCOV) += -O0 23ccflags-$(GCOV) += -O0
24 24
25ccflags-y += -Werror
26
25obj-$(CONFIG_SCSI_LPFC) := lpfc.o 27obj-$(CONFIG_SCSI_LPFC) := lpfc.o
26 28
27lpfc-objs := lpfc_mem.o lpfc_sli.o lpfc_ct.o lpfc_els.o lpfc_hbadisc.o \ 29lpfc-objs := lpfc_mem.o lpfc_sli.o lpfc_ct.o lpfc_els.o lpfc_hbadisc.o \
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index 5fc044ff656e..3a1ffdd6d831 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -1,7 +1,7 @@
1/******************************************************************* 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2011 Emulex. All rights reserved. * 4 * Copyright (C) 2004-2012 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. * 5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com * 6 * www.emulex.com *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
@@ -840,6 +840,8 @@ struct lpfc_hba {
840 struct dentry *debug_dumpData; /* BlockGuard BPL */ 840 struct dentry *debug_dumpData; /* BlockGuard BPL */
841 struct dentry *debug_dumpDif; /* BlockGuard BPL */ 841 struct dentry *debug_dumpDif; /* BlockGuard BPL */
842 struct dentry *debug_InjErrLBA; /* LBA to inject errors at */ 842 struct dentry *debug_InjErrLBA; /* LBA to inject errors at */
843 struct dentry *debug_InjErrNPortID; /* NPortID to inject errors at */
844 struct dentry *debug_InjErrWWPN; /* WWPN to inject errors at */
843 struct dentry *debug_writeGuard; /* inject write guard_tag errors */ 845 struct dentry *debug_writeGuard; /* inject write guard_tag errors */
844 struct dentry *debug_writeApp; /* inject write app_tag errors */ 846 struct dentry *debug_writeApp; /* inject write app_tag errors */
845 struct dentry *debug_writeRef; /* inject write ref_tag errors */ 847 struct dentry *debug_writeRef; /* inject write ref_tag errors */
@@ -854,6 +856,8 @@ struct lpfc_hba {
854 uint32_t lpfc_injerr_rgrd_cnt; 856 uint32_t lpfc_injerr_rgrd_cnt;
855 uint32_t lpfc_injerr_rapp_cnt; 857 uint32_t lpfc_injerr_rapp_cnt;
856 uint32_t lpfc_injerr_rref_cnt; 858 uint32_t lpfc_injerr_rref_cnt;
859 uint32_t lpfc_injerr_nportid;
860 struct lpfc_name lpfc_injerr_wwpn;
857 sector_t lpfc_injerr_lba; 861 sector_t lpfc_injerr_lba;
858#define LPFC_INJERR_LBA_OFF (sector_t)(-1) 862#define LPFC_INJERR_LBA_OFF (sector_t)(-1)
859 863
@@ -908,6 +912,8 @@ struct lpfc_hba {
908 atomic_t fast_event_count; 912 atomic_t fast_event_count;
909 uint32_t fcoe_eventtag; 913 uint32_t fcoe_eventtag;
910 uint32_t fcoe_eventtag_at_fcf_scan; 914 uint32_t fcoe_eventtag_at_fcf_scan;
915 uint32_t fcoe_cvl_eventtag;
916 uint32_t fcoe_cvl_eventtag_attn;
911 struct lpfc_fcf fcf; 917 struct lpfc_fcf fcf;
912 uint8_t fc_map[3]; 918 uint8_t fc_map[3];
913 uint8_t valid_vlan; 919 uint8_t valid_vlan;
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index 296ad5bc4240..5eb2bc116183 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -1,7 +1,7 @@
1/******************************************************************* 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2011 Emulex. All rights reserved. * 4 * Copyright (C) 2004-2012 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. * 5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com * 6 * www.emulex.com *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
@@ -2575,7 +2575,7 @@ LPFC_VPORT_ATTR_HEX_RW(log_verbose, 0x0, 0x0, 0xffffffff,
2575# lpfc_enable_da_id: This turns on the DA_ID CT command that deregisters 2575# lpfc_enable_da_id: This turns on the DA_ID CT command that deregisters
2576# objects that have been registered with the nameserver after login. 2576# objects that have been registered with the nameserver after login.
2577*/ 2577*/
2578LPFC_VPORT_ATTR_R(enable_da_id, 0, 0, 1, 2578LPFC_VPORT_ATTR_R(enable_da_id, 1, 0, 1,
2579 "Deregister nameserver objects before LOGO"); 2579 "Deregister nameserver objects before LOGO");
2580 2580
2581/* 2581/*
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
index 22e17be04d8a..5bdf2eecb178 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.c
+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
@@ -1,7 +1,7 @@
1/******************************************************************* 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2007-2011 Emulex. All rights reserved. * 4 * Copyright (C) 2007-2012 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. * 5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com * 6 * www.emulex.com *
7 * * 7 * *
@@ -1010,25 +1010,35 @@ lpfc_debugfs_dif_err_read(struct file *file, char __user *buf,
1010{ 1010{
1011 struct dentry *dent = file->f_dentry; 1011 struct dentry *dent = file->f_dentry;
1012 struct lpfc_hba *phba = file->private_data; 1012 struct lpfc_hba *phba = file->private_data;
1013 char cbuf[16]; 1013 char cbuf[32];
1014 uint64_t tmp = 0;
1014 int cnt = 0; 1015 int cnt = 0;
1015 1016
1016 if (dent == phba->debug_writeGuard) 1017 if (dent == phba->debug_writeGuard)
1017 cnt = snprintf(cbuf, 16, "%u\n", phba->lpfc_injerr_wgrd_cnt); 1018 cnt = snprintf(cbuf, 32, "%u\n", phba->lpfc_injerr_wgrd_cnt);
1018 else if (dent == phba->debug_writeApp) 1019 else if (dent == phba->debug_writeApp)
1019 cnt = snprintf(cbuf, 16, "%u\n", phba->lpfc_injerr_wapp_cnt); 1020 cnt = snprintf(cbuf, 32, "%u\n", phba->lpfc_injerr_wapp_cnt);
1020 else if (dent == phba->debug_writeRef) 1021 else if (dent == phba->debug_writeRef)
1021 cnt = snprintf(cbuf, 16, "%u\n", phba->lpfc_injerr_wref_cnt); 1022 cnt = snprintf(cbuf, 32, "%u\n", phba->lpfc_injerr_wref_cnt);
1022 else if (dent == phba->debug_readGuard) 1023 else if (dent == phba->debug_readGuard)
1023 cnt = snprintf(cbuf, 16, "%u\n", phba->lpfc_injerr_rgrd_cnt); 1024 cnt = snprintf(cbuf, 32, "%u\n", phba->lpfc_injerr_rgrd_cnt);
1024 else if (dent == phba->debug_readApp) 1025 else if (dent == phba->debug_readApp)
1025 cnt = snprintf(cbuf, 16, "%u\n", phba->lpfc_injerr_rapp_cnt); 1026 cnt = snprintf(cbuf, 32, "%u\n", phba->lpfc_injerr_rapp_cnt);
1026 else if (dent == phba->debug_readRef) 1027 else if (dent == phba->debug_readRef)
1027 cnt = snprintf(cbuf, 16, "%u\n", phba->lpfc_injerr_rref_cnt); 1028 cnt = snprintf(cbuf, 32, "%u\n", phba->lpfc_injerr_rref_cnt);
1028 else if (dent == phba->debug_InjErrLBA) 1029 else if (dent == phba->debug_InjErrNPortID)
1029 cnt = snprintf(cbuf, 16, "0x%lx\n", 1030 cnt = snprintf(cbuf, 32, "0x%06x\n", phba->lpfc_injerr_nportid);
1030 (unsigned long) phba->lpfc_injerr_lba); 1031 else if (dent == phba->debug_InjErrWWPN) {
1031 else 1032 memcpy(&tmp, &phba->lpfc_injerr_wwpn, sizeof(struct lpfc_name));
1033 tmp = cpu_to_be64(tmp);
1034 cnt = snprintf(cbuf, 32, "0x%016llx\n", tmp);
1035 } else if (dent == phba->debug_InjErrLBA) {
1036 if (phba->lpfc_injerr_lba == (sector_t)(-1))
1037 cnt = snprintf(cbuf, 32, "off\n");
1038 else
1039 cnt = snprintf(cbuf, 32, "0x%llx\n",
1040 (uint64_t) phba->lpfc_injerr_lba);
1041 } else
1032 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1042 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1033 "0547 Unknown debugfs error injection entry\n"); 1043 "0547 Unknown debugfs error injection entry\n");
1034 1044
@@ -1042,7 +1052,7 @@ lpfc_debugfs_dif_err_write(struct file *file, const char __user *buf,
1042 struct dentry *dent = file->f_dentry; 1052 struct dentry *dent = file->f_dentry;
1043 struct lpfc_hba *phba = file->private_data; 1053 struct lpfc_hba *phba = file->private_data;
1044 char dstbuf[32]; 1054 char dstbuf[32];
1045 unsigned long tmp; 1055 uint64_t tmp = 0;
1046 int size; 1056 int size;
1047 1057
1048 memset(dstbuf, 0, 32); 1058 memset(dstbuf, 0, 32);
@@ -1050,7 +1060,12 @@ lpfc_debugfs_dif_err_write(struct file *file, const char __user *buf,
1050 if (copy_from_user(dstbuf, buf, size)) 1060 if (copy_from_user(dstbuf, buf, size))
1051 return 0; 1061 return 0;
1052 1062
1053 if (strict_strtoul(dstbuf, 0, &tmp)) 1063 if (dent == phba->debug_InjErrLBA) {
1064 if ((buf[0] == 'o') && (buf[1] == 'f') && (buf[2] == 'f'))
1065 tmp = (uint64_t)(-1);
1066 }
1067
1068 if ((tmp == 0) && (kstrtoull(dstbuf, 0, &tmp)))
1054 return 0; 1069 return 0;
1055 1070
1056 if (dent == phba->debug_writeGuard) 1071 if (dent == phba->debug_writeGuard)
@@ -1067,7 +1082,12 @@ lpfc_debugfs_dif_err_write(struct file *file, const char __user *buf,
1067 phba->lpfc_injerr_rref_cnt = (uint32_t)tmp; 1082 phba->lpfc_injerr_rref_cnt = (uint32_t)tmp;
1068 else if (dent == phba->debug_InjErrLBA) 1083 else if (dent == phba->debug_InjErrLBA)
1069 phba->lpfc_injerr_lba = (sector_t)tmp; 1084 phba->lpfc_injerr_lba = (sector_t)tmp;
1070 else 1085 else if (dent == phba->debug_InjErrNPortID)
1086 phba->lpfc_injerr_nportid = (uint32_t)(tmp & Mask_DID);
1087 else if (dent == phba->debug_InjErrWWPN) {
1088 tmp = cpu_to_be64(tmp);
1089 memcpy(&phba->lpfc_injerr_wwpn, &tmp, sizeof(struct lpfc_name));
1090 } else
1071 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1091 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1072 "0548 Unknown debugfs error injection entry\n"); 1092 "0548 Unknown debugfs error injection entry\n");
1073 1093
@@ -3949,6 +3969,28 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
3949 } 3969 }
3950 phba->lpfc_injerr_lba = LPFC_INJERR_LBA_OFF; 3970 phba->lpfc_injerr_lba = LPFC_INJERR_LBA_OFF;
3951 3971
3972 snprintf(name, sizeof(name), "InjErrNPortID");
3973 phba->debug_InjErrNPortID =
3974 debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
3975 phba->hba_debugfs_root,
3976 phba, &lpfc_debugfs_op_dif_err);
3977 if (!phba->debug_InjErrNPortID) {
3978 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
3979 "0809 Cannot create debugfs InjErrNPortID\n");
3980 goto debug_failed;
3981 }
3982
3983 snprintf(name, sizeof(name), "InjErrWWPN");
3984 phba->debug_InjErrWWPN =
3985 debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
3986 phba->hba_debugfs_root,
3987 phba, &lpfc_debugfs_op_dif_err);
3988 if (!phba->debug_InjErrWWPN) {
3989 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
3990 "0810 Cannot create debugfs InjErrWWPN\n");
3991 goto debug_failed;
3992 }
3993
3952 snprintf(name, sizeof(name), "writeGuardInjErr"); 3994 snprintf(name, sizeof(name), "writeGuardInjErr");
3953 phba->debug_writeGuard = 3995 phba->debug_writeGuard =
3954 debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR, 3996 debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
@@ -4321,6 +4363,14 @@ lpfc_debugfs_terminate(struct lpfc_vport *vport)
4321 debugfs_remove(phba->debug_InjErrLBA); /* InjErrLBA */ 4363 debugfs_remove(phba->debug_InjErrLBA); /* InjErrLBA */
4322 phba->debug_InjErrLBA = NULL; 4364 phba->debug_InjErrLBA = NULL;
4323 } 4365 }
4366 if (phba->debug_InjErrNPortID) { /* InjErrNPortID */
4367 debugfs_remove(phba->debug_InjErrNPortID);
4368 phba->debug_InjErrNPortID = NULL;
4369 }
4370 if (phba->debug_InjErrWWPN) {
4371 debugfs_remove(phba->debug_InjErrWWPN); /* InjErrWWPN */
4372 phba->debug_InjErrWWPN = NULL;
4373 }
4324 if (phba->debug_writeGuard) { 4374 if (phba->debug_writeGuard) {
4325 debugfs_remove(phba->debug_writeGuard); /* writeGuard */ 4375 debugfs_remove(phba->debug_writeGuard); /* writeGuard */
4326 phba->debug_writeGuard = NULL; 4376 phba->debug_writeGuard = NULL;
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index 8db2fb3b45ec..3407b39e0a3f 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -1,7 +1,7 @@
1/******************************************************************* 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2011 Emulex. All rights reserved. * 4 * Copyright (C) 2004-2012 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. * 5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com * 6 * www.emulex.com *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
@@ -925,9 +925,17 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
925 * due to new FCF discovery 925 * due to new FCF discovery
926 */ 926 */
927 if ((phba->hba_flag & HBA_FIP_SUPPORT) && 927 if ((phba->hba_flag & HBA_FIP_SUPPORT) &&
928 (phba->fcf.fcf_flag & FCF_DISCOVERY) && 928 (phba->fcf.fcf_flag & FCF_DISCOVERY)) {
929 !((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) && 929 if (phba->link_state < LPFC_LINK_UP)
930 (irsp->un.ulpWord[4] == IOERR_SLI_ABORTED))) { 930 goto stop_rr_fcf_flogi;
931 if ((phba->fcoe_cvl_eventtag_attn ==
932 phba->fcoe_cvl_eventtag) &&
933 (irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
934 (irsp->un.ulpWord[4] == IOERR_SLI_ABORTED))
935 goto stop_rr_fcf_flogi;
936 else
937 phba->fcoe_cvl_eventtag_attn =
938 phba->fcoe_cvl_eventtag;
931 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP | LOG_ELS, 939 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP | LOG_ELS,
932 "2611 FLOGI failed on FCF (x%x), " 940 "2611 FLOGI failed on FCF (x%x), "
933 "status:x%x/x%x, tmo:x%x, perform " 941 "status:x%x/x%x, tmo:x%x, perform "
@@ -943,6 +951,7 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
943 goto out; 951 goto out;
944 } 952 }
945 953
954stop_rr_fcf_flogi:
946 /* FLOGI failure */ 955 /* FLOGI failure */
947 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 956 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
948 "2858 FLOGI failure Status:x%x/x%x TMO:x%x\n", 957 "2858 FLOGI failure Status:x%x/x%x TMO:x%x\n",
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index 343d87ba4df8..b507536dc5b5 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -1,7 +1,7 @@
1/******************************************************************* 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2011 Emulex. All rights reserved. * 4 * Copyright (C) 2004-2012 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. * 5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com * 6 * www.emulex.com *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
@@ -2843,7 +2843,14 @@ lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
2843 struct lpfc_vport *vport = mboxq->vport; 2843 struct lpfc_vport *vport = mboxq->vport;
2844 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 2844 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2845 2845
2846 if (mboxq->u.mb.mbxStatus) { 2846 /*
2847 * VFI not supported for interface type 0, so ignore any mailbox
2848 * error (except VFI in use) and continue with the discovery.
2849 */
2850 if (mboxq->u.mb.mbxStatus &&
2851 (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
2852 LPFC_SLI_INTF_IF_TYPE_0) &&
2853 mboxq->u.mb.mbxStatus != MBX_VFI_IN_USE) {
2847 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX, 2854 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
2848 "2018 REG_VFI mbxStatus error x%x " 2855 "2018 REG_VFI mbxStatus error x%x "
2849 "HBA state x%x\n", 2856 "HBA state x%x\n",
@@ -5673,14 +5680,13 @@ lpfc_fcf_inuse(struct lpfc_hba *phba)
5673 ret = 1; 5680 ret = 1;
5674 spin_unlock_irq(shost->host_lock); 5681 spin_unlock_irq(shost->host_lock);
5675 goto out; 5682 goto out;
5676 } else { 5683 } else if (ndlp->nlp_flag & NLP_RPI_REGISTERED) {
5684 ret = 1;
5677 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 5685 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
5678 "2624 RPI %x DID %x flg %x still " 5686 "2624 RPI %x DID %x flag %x "
5679 "logged in\n", 5687 "still logged in\n",
5680 ndlp->nlp_rpi, ndlp->nlp_DID, 5688 ndlp->nlp_rpi, ndlp->nlp_DID,
5681 ndlp->nlp_flag); 5689 ndlp->nlp_flag);
5682 if (ndlp->nlp_flag & NLP_RPI_REGISTERED)
5683 ret = 1;
5684 } 5690 }
5685 } 5691 }
5686 spin_unlock_irq(shost->host_lock); 5692 spin_unlock_irq(shost->host_lock);
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
index 9e2b9b227e1a..91f09761bd32 100644
--- a/drivers/scsi/lpfc/lpfc_hw4.h
+++ b/drivers/scsi/lpfc/lpfc_hw4.h
@@ -1,7 +1,7 @@
1/******************************************************************* 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2009 Emulex. All rights reserved. * 4 * Copyright (C) 2009-2012 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. * 5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com * 6 * www.emulex.com *
7 * * 7 * *
@@ -338,6 +338,12 @@ struct lpfc_cqe {
338#define CQE_CODE_XRI_ABORTED 0x5 338#define CQE_CODE_XRI_ABORTED 0x5
339#define CQE_CODE_RECEIVE_V1 0x9 339#define CQE_CODE_RECEIVE_V1 0x9
340 340
341/*
342 * Define mask value for xri_aborted and wcqe completed CQE extended status.
343 * Currently, extended status is limited to 9 bits (0x0 -> 0x103) .
344 */
345#define WCQE_PARAM_MASK 0x1FF;
346
341/* completion queue entry for wqe completions */ 347/* completion queue entry for wqe completions */
342struct lpfc_wcqe_complete { 348struct lpfc_wcqe_complete {
343 uint32_t word0; 349 uint32_t word0;
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index b38f99f3be32..9598fdcb08ab 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -1,7 +1,7 @@
1/******************************************************************* 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2011 Emulex. All rights reserved. * 4 * Copyright (C) 2004-2012 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. * 5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com * 6 * www.emulex.com *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
@@ -2704,16 +2704,14 @@ lpfc_offline_prep(struct lpfc_hba * phba)
2704 } 2704 }
2705 spin_lock_irq(shost->host_lock); 2705 spin_lock_irq(shost->host_lock);
2706 ndlp->nlp_flag &= ~NLP_NPR_ADISC; 2706 ndlp->nlp_flag &= ~NLP_NPR_ADISC;
2707 2707 spin_unlock_irq(shost->host_lock);
2708 /* 2708 /*
2709 * Whenever an SLI4 port goes offline, free the 2709 * Whenever an SLI4 port goes offline, free the
2710 * RPI. A new RPI when the adapter port comes 2710 * RPI. Get a new RPI when the adapter port
2711 * back online. 2711 * comes back online.
2712 */ 2712 */
2713 if (phba->sli_rev == LPFC_SLI_REV4) 2713 if (phba->sli_rev == LPFC_SLI_REV4)
2714 lpfc_sli4_free_rpi(phba, ndlp->nlp_rpi); 2714 lpfc_sli4_free_rpi(phba, ndlp->nlp_rpi);
2715
2716 spin_unlock_irq(shost->host_lock);
2717 lpfc_unreg_rpi(vports[i], ndlp); 2715 lpfc_unreg_rpi(vports[i], ndlp);
2718 } 2716 }
2719 } 2717 }
@@ -2786,9 +2784,13 @@ lpfc_scsi_buf_update(struct lpfc_hba *phba)
2786 2784
2787 spin_lock_irq(&phba->hbalock); 2785 spin_lock_irq(&phba->hbalock);
2788 spin_lock(&phba->scsi_buf_list_lock); 2786 spin_lock(&phba->scsi_buf_list_lock);
2789 list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list, list) 2787 list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list, list) {
2790 sb->cur_iocbq.sli4_xritag = 2788 sb->cur_iocbq.sli4_xritag =
2791 phba->sli4_hba.xri_ids[sb->cur_iocbq.sli4_lxritag]; 2789 phba->sli4_hba.xri_ids[sb->cur_iocbq.sli4_lxritag];
2790 set_bit(sb->cur_iocbq.sli4_lxritag, phba->sli4_hba.xri_bmask);
2791 phba->sli4_hba.max_cfg_param.xri_used++;
2792 phba->sli4_hba.xri_count++;
2793 }
2792 spin_unlock(&phba->scsi_buf_list_lock); 2794 spin_unlock(&phba->scsi_buf_list_lock);
2793 spin_unlock_irq(&phba->hbalock); 2795 spin_unlock_irq(&phba->hbalock);
2794 return 0; 2796 return 0;
@@ -3723,6 +3725,7 @@ lpfc_sli4_async_fip_evt(struct lpfc_hba *phba,
3723 break; 3725 break;
3724 3726
3725 case LPFC_FIP_EVENT_TYPE_FCF_DEAD: 3727 case LPFC_FIP_EVENT_TYPE_FCF_DEAD:
3728 phba->fcoe_cvl_eventtag = acqe_fip->event_tag;
3726 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY, 3729 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
3727 "2549 FCF (x%x) disconnected from network, " 3730 "2549 FCF (x%x) disconnected from network, "
3728 "tag:x%x\n", acqe_fip->index, acqe_fip->event_tag); 3731 "tag:x%x\n", acqe_fip->index, acqe_fip->event_tag);
@@ -3784,6 +3787,7 @@ lpfc_sli4_async_fip_evt(struct lpfc_hba *phba,
3784 } 3787 }
3785 break; 3788 break;
3786 case LPFC_FIP_EVENT_TYPE_CVL: 3789 case LPFC_FIP_EVENT_TYPE_CVL:
3790 phba->fcoe_cvl_eventtag = acqe_fip->event_tag;
3787 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY, 3791 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
3788 "2718 Clear Virtual Link Received for VPI 0x%x" 3792 "2718 Clear Virtual Link Received for VPI 0x%x"
3789 " tag 0x%x\n", acqe_fip->index, acqe_fip->event_tag); 3793 " tag 0x%x\n", acqe_fip->index, acqe_fip->event_tag);
@@ -5226,8 +5230,7 @@ lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba)
5226 * rpi is normalized to a zero base because the physical rpi is 5230 * rpi is normalized to a zero base because the physical rpi is
5227 * port based. 5231 * port based.
5228 */ 5232 */
5229 curr_rpi_range = phba->sli4_hba.next_rpi - 5233 curr_rpi_range = phba->sli4_hba.next_rpi;
5230 phba->sli4_hba.max_cfg_param.rpi_base;
5231 spin_unlock_irq(&phba->hbalock); 5234 spin_unlock_irq(&phba->hbalock);
5232 5235
5233 /* 5236 /*
@@ -5818,10 +5821,9 @@ lpfc_sli4_post_status_check(struct lpfc_hba *phba)
5818 readl(phba->sli4_hba.u.if_type2. 5821 readl(phba->sli4_hba.u.if_type2.
5819 ERR2regaddr); 5822 ERR2regaddr);
5820 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5823 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5821 "2888 Port Error Detected " 5824 "2888 Unrecoverable port error "
5822 "during POST: " 5825 "following POST: port status reg "
5823 "port status reg 0x%x, " 5826 "0x%x, port_smphr reg 0x%x, "
5824 "port_smphr reg 0x%x, "
5825 "error 1=0x%x, error 2=0x%x\n", 5827 "error 1=0x%x, error 2=0x%x\n",
5826 reg_data.word0, 5828 reg_data.word0,
5827 portsmphr_reg.word0, 5829 portsmphr_reg.word0,
@@ -6142,7 +6144,6 @@ lpfc_sli4_read_config(struct lpfc_hba *phba)
6142 phba->sli4_hba.next_xri = phba->sli4_hba.max_cfg_param.xri_base; 6144 phba->sli4_hba.next_xri = phba->sli4_hba.max_cfg_param.xri_base;
6143 phba->vpi_base = phba->sli4_hba.max_cfg_param.vpi_base; 6145 phba->vpi_base = phba->sli4_hba.max_cfg_param.vpi_base;
6144 phba->vfi_base = phba->sli4_hba.max_cfg_param.vfi_base; 6146 phba->vfi_base = phba->sli4_hba.max_cfg_param.vfi_base;
6145 phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.rpi_base;
6146 phba->max_vpi = (phba->sli4_hba.max_cfg_param.max_vpi > 0) ? 6147 phba->max_vpi = (phba->sli4_hba.max_cfg_param.max_vpi > 0) ?
6147 (phba->sli4_hba.max_cfg_param.max_vpi - 1) : 0; 6148 (phba->sli4_hba.max_cfg_param.max_vpi - 1) : 0;
6148 phba->max_vports = phba->max_vpi; 6149 phba->max_vports = phba->max_vpi;
@@ -7231,6 +7232,7 @@ lpfc_pci_function_reset(struct lpfc_hba *phba)
7231 uint32_t rdy_chk, num_resets = 0, reset_again = 0; 7232 uint32_t rdy_chk, num_resets = 0, reset_again = 0;
7232 union lpfc_sli4_cfg_shdr *shdr; 7233 union lpfc_sli4_cfg_shdr *shdr;
7233 struct lpfc_register reg_data; 7234 struct lpfc_register reg_data;
7235 uint16_t devid;
7234 7236
7235 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); 7237 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
7236 switch (if_type) { 7238 switch (if_type) {
@@ -7277,7 +7279,9 @@ lpfc_pci_function_reset(struct lpfc_hba *phba)
7277 LPFC_SLIPORT_INIT_PORT); 7279 LPFC_SLIPORT_INIT_PORT);
7278 writel(reg_data.word0, phba->sli4_hba.u.if_type2. 7280 writel(reg_data.word0, phba->sli4_hba.u.if_type2.
7279 CTRLregaddr); 7281 CTRLregaddr);
7280 7282 /* flush */
7283 pci_read_config_word(phba->pcidev,
7284 PCI_DEVICE_ID, &devid);
7281 /* 7285 /*
7282 * Poll the Port Status Register and wait for RDY for 7286 * Poll the Port Status Register and wait for RDY for
7283 * up to 10 seconds. If the port doesn't respond, treat 7287 * up to 10 seconds. If the port doesn't respond, treat
@@ -7315,11 +7319,10 @@ lpfc_pci_function_reset(struct lpfc_hba *phba)
7315 phba->work_status[1] = readl( 7319 phba->work_status[1] = readl(
7316 phba->sli4_hba.u.if_type2.ERR2regaddr); 7320 phba->sli4_hba.u.if_type2.ERR2regaddr);
7317 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7321 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7318 "2890 Port Error Detected " 7322 "2890 Port error detected during port "
7319 "during Port Reset: " 7323 "reset(%d): port status reg 0x%x, "
7320 "port status reg 0x%x, "
7321 "error 1=0x%x, error 2=0x%x\n", 7324 "error 1=0x%x, error 2=0x%x\n",
7322 reg_data.word0, 7325 num_resets, reg_data.word0,
7323 phba->work_status[0], 7326 phba->work_status[0],
7324 phba->work_status[1]); 7327 phba->work_status[1]);
7325 rc = -ENODEV; 7328 rc = -ENODEV;
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index 7b6b2aa5795a..15ca2a9a0cdd 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -1,7 +1,7 @@
1 /******************************************************************* 1 /*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2009 Emulex. All rights reserved. * 4 * Copyright (C) 2004-2012 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. * 5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com * 6 * www.emulex.com *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
@@ -440,11 +440,15 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
440 spin_unlock_irq(shost->host_lock); 440 spin_unlock_irq(shost->host_lock);
441 stat.un.b.lsRjtRsnCode = LSRJT_INVALID_CMD; 441 stat.un.b.lsRjtRsnCode = LSRJT_INVALID_CMD;
442 stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE; 442 stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
443 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, 443 rc = lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb,
444 ndlp, mbox); 444 ndlp, mbox);
445 if (rc)
446 mempool_free(mbox, phba->mbox_mem_pool);
445 return 1; 447 return 1;
446 } 448 }
447 lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb, ndlp, mbox); 449 rc = lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb, ndlp, mbox);
450 if (rc)
451 mempool_free(mbox, phba->mbox_mem_pool);
448 return 1; 452 return 1;
449out: 453out:
450 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 454 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index efc055b6bac4..88f3a83dbd2e 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -39,8 +39,8 @@
39#include "lpfc_sli4.h" 39#include "lpfc_sli4.h"
40#include "lpfc_nl.h" 40#include "lpfc_nl.h"
41#include "lpfc_disc.h" 41#include "lpfc_disc.h"
42#include "lpfc_scsi.h"
43#include "lpfc.h" 42#include "lpfc.h"
43#include "lpfc_scsi.h"
44#include "lpfc_logmsg.h" 44#include "lpfc_logmsg.h"
45#include "lpfc_crtn.h" 45#include "lpfc_crtn.h"
46#include "lpfc_vport.h" 46#include "lpfc_vport.h"
@@ -51,13 +51,19 @@
51int _dump_buf_done; 51int _dump_buf_done;
52 52
53static char *dif_op_str[] = { 53static char *dif_op_str[] = {
54 "SCSI_PROT_NORMAL", 54 "PROT_NORMAL",
55 "SCSI_PROT_READ_INSERT", 55 "PROT_READ_INSERT",
56 "SCSI_PROT_WRITE_STRIP", 56 "PROT_WRITE_STRIP",
57 "SCSI_PROT_READ_STRIP", 57 "PROT_READ_STRIP",
58 "SCSI_PROT_WRITE_INSERT", 58 "PROT_WRITE_INSERT",
59 "SCSI_PROT_READ_PASS", 59 "PROT_READ_PASS",
60 "SCSI_PROT_WRITE_PASS", 60 "PROT_WRITE_PASS",
61};
62
63static char *dif_grd_str[] = {
64 "NO_GUARD",
65 "DIF_CRC",
66 "DIX_IP",
61}; 67};
62 68
63struct scsi_dif_tuple { 69struct scsi_dif_tuple {
@@ -1281,10 +1287,14 @@ lpfc_cmd_blksize(struct scsi_cmnd *sc)
1281 1287
1282#ifdef CONFIG_SCSI_LPFC_DEBUG_FS 1288#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1283 1289
1284#define BG_ERR_INIT 1 1290/* Return if if error injection is detected by Initiator */
1285#define BG_ERR_TGT 2 1291#define BG_ERR_INIT 0x1
1286#define BG_ERR_SWAP 3 1292/* Return if if error injection is detected by Target */
1287#define BG_ERR_CHECK 4 1293#define BG_ERR_TGT 0x2
1294/* Return if if swapping CSUM<-->CRC is required for error injection */
1295#define BG_ERR_SWAP 0x10
1296/* Return if disabling Guard/Ref/App checking is required for error injection */
1297#define BG_ERR_CHECK 0x20
1288 1298
1289/** 1299/**
1290 * lpfc_bg_err_inject - Determine if we should inject an error 1300 * lpfc_bg_err_inject - Determine if we should inject an error
@@ -1294,10 +1304,7 @@ lpfc_cmd_blksize(struct scsi_cmnd *sc)
1294 * @apptag: (out) BlockGuard application tag for transmitted data 1304 * @apptag: (out) BlockGuard application tag for transmitted data
1295 * @new_guard (in) Value to replace CRC with if needed 1305 * @new_guard (in) Value to replace CRC with if needed
1296 * 1306 *
1297 * Returns (1) if error injection is detected by Initiator 1307 * Returns BG_ERR_* bit mask or 0 if request ignored
1298 * Returns (2) if error injection is detected by Target
1299 * Returns (3) if swapping CSUM->CRC is required for error injection
1300 * Returns (4) disabling Guard/Ref/App checking is required for error injection
1301 **/ 1308 **/
1302static int 1309static int
1303lpfc_bg_err_inject(struct lpfc_hba *phba, struct scsi_cmnd *sc, 1310lpfc_bg_err_inject(struct lpfc_hba *phba, struct scsi_cmnd *sc,
@@ -1305,7 +1312,10 @@ lpfc_bg_err_inject(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1305{ 1312{
1306 struct scatterlist *sgpe; /* s/g prot entry */ 1313 struct scatterlist *sgpe; /* s/g prot entry */
1307 struct scatterlist *sgde; /* s/g data entry */ 1314 struct scatterlist *sgde; /* s/g data entry */
1315 struct lpfc_scsi_buf *lpfc_cmd = NULL;
1308 struct scsi_dif_tuple *src = NULL; 1316 struct scsi_dif_tuple *src = NULL;
1317 struct lpfc_nodelist *ndlp;
1318 struct lpfc_rport_data *rdata;
1309 uint32_t op = scsi_get_prot_op(sc); 1319 uint32_t op = scsi_get_prot_op(sc);
1310 uint32_t blksize; 1320 uint32_t blksize;
1311 uint32_t numblks; 1321 uint32_t numblks;
@@ -1318,8 +1328,9 @@ lpfc_bg_err_inject(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1318 1328
1319 sgpe = scsi_prot_sglist(sc); 1329 sgpe = scsi_prot_sglist(sc);
1320 sgde = scsi_sglist(sc); 1330 sgde = scsi_sglist(sc);
1321
1322 lba = scsi_get_lba(sc); 1331 lba = scsi_get_lba(sc);
1332
1333 /* First check if we need to match the LBA */
1323 if (phba->lpfc_injerr_lba != LPFC_INJERR_LBA_OFF) { 1334 if (phba->lpfc_injerr_lba != LPFC_INJERR_LBA_OFF) {
1324 blksize = lpfc_cmd_blksize(sc); 1335 blksize = lpfc_cmd_blksize(sc);
1325 numblks = (scsi_bufflen(sc) + blksize - 1) / blksize; 1336 numblks = (scsi_bufflen(sc) + blksize - 1) / blksize;
@@ -1334,66 +1345,123 @@ lpfc_bg_err_inject(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1334 sizeof(struct scsi_dif_tuple); 1345 sizeof(struct scsi_dif_tuple);
1335 if (numblks < blockoff) 1346 if (numblks < blockoff)
1336 blockoff = numblks; 1347 blockoff = numblks;
1337 src = (struct scsi_dif_tuple *)sg_virt(sgpe);
1338 src += blockoff;
1339 } 1348 }
1340 } 1349 }
1341 1350
1351 /* Next check if we need to match the remote NPortID or WWPN */
1352 rdata = sc->device->hostdata;
1353 if (rdata && rdata->pnode) {
1354 ndlp = rdata->pnode;
1355
1356 /* Make sure we have the right NPortID if one is specified */
1357 if (phba->lpfc_injerr_nportid &&
1358 (phba->lpfc_injerr_nportid != ndlp->nlp_DID))
1359 return 0;
1360
1361 /*
1362 * Make sure we have the right WWPN if one is specified.
1363 * wwn[0] should be a non-zero NAA in a good WWPN.
1364 */
1365 if (phba->lpfc_injerr_wwpn.u.wwn[0] &&
1366 (memcmp(&ndlp->nlp_portname, &phba->lpfc_injerr_wwpn,
1367 sizeof(struct lpfc_name)) != 0))
1368 return 0;
1369 }
1370
1371 /* Setup a ptr to the protection data if the SCSI host provides it */
1372 if (sgpe) {
1373 src = (struct scsi_dif_tuple *)sg_virt(sgpe);
1374 src += blockoff;
1375 lpfc_cmd = (struct lpfc_scsi_buf *)sc->host_scribble;
1376 }
1377
1342 /* Should we change the Reference Tag */ 1378 /* Should we change the Reference Tag */
1343 if (reftag) { 1379 if (reftag) {
1344 if (phba->lpfc_injerr_wref_cnt) { 1380 if (phba->lpfc_injerr_wref_cnt) {
1345 switch (op) { 1381 switch (op) {
1346 case SCSI_PROT_WRITE_PASS: 1382 case SCSI_PROT_WRITE_PASS:
1347 if (blockoff && src) { 1383 if (src) {
1348 /* Insert error in middle of the IO */ 1384 /*
1385 * For WRITE_PASS, force the error
1386 * to be sent on the wire. It should
1387 * be detected by the Target.
1388 * If blockoff != 0 error will be
1389 * inserted in middle of the IO.
1390 */
1349 1391
1350 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 1392 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1351 "9076 BLKGRD: Injecting reftag error: " 1393 "9076 BLKGRD: Injecting reftag error: "
1352 "write lba x%lx + x%x oldrefTag x%x\n", 1394 "write lba x%lx + x%x oldrefTag x%x\n",
1353 (unsigned long)lba, blockoff, 1395 (unsigned long)lba, blockoff,
1354 src->ref_tag); 1396 be32_to_cpu(src->ref_tag));
1355 1397
1356 /* 1398 /*
1357 * NOTE, this will change ref tag in 1399 * Save the old ref_tag so we can
1358 * the memory location forever! 1400 * restore it on completion.
1359 */ 1401 */
1360 src->ref_tag = 0xDEADBEEF; 1402 if (lpfc_cmd) {
1403 lpfc_cmd->prot_data_type =
1404 LPFC_INJERR_REFTAG;
1405 lpfc_cmd->prot_data_segment =
1406 src;
1407 lpfc_cmd->prot_data =
1408 src->ref_tag;
1409 }
1410 src->ref_tag = cpu_to_be32(0xDEADBEEF);
1361 phba->lpfc_injerr_wref_cnt--; 1411 phba->lpfc_injerr_wref_cnt--;
1362 phba->lpfc_injerr_lba = 1412 if (phba->lpfc_injerr_wref_cnt == 0) {
1363 LPFC_INJERR_LBA_OFF; 1413 phba->lpfc_injerr_nportid = 0;
1364 rc = BG_ERR_CHECK; 1414 phba->lpfc_injerr_lba =
1415 LPFC_INJERR_LBA_OFF;
1416 memset(&phba->lpfc_injerr_wwpn,
1417 0, sizeof(struct lpfc_name));
1418 }
1419 rc = BG_ERR_TGT | BG_ERR_CHECK;
1420
1365 break; 1421 break;
1366 } 1422 }
1367 /* Drop thru */ 1423 /* Drop thru */
1368 case SCSI_PROT_WRITE_STRIP: 1424 case SCSI_PROT_WRITE_INSERT:
1369 /* 1425 /*
1370 * For WRITE_STRIP and WRITE_PASS, 1426 * For WRITE_INSERT, force the error
1371 * force the error on data 1427 * to be sent on the wire. It should be
1372 * being copied from SLI-Host to SLI-Port. 1428 * detected by the Target.
1373 */ 1429 */
1430 /* DEADBEEF will be the reftag on the wire */
1374 *reftag = 0xDEADBEEF; 1431 *reftag = 0xDEADBEEF;
1375 phba->lpfc_injerr_wref_cnt--; 1432 phba->lpfc_injerr_wref_cnt--;
1376 phba->lpfc_injerr_lba = LPFC_INJERR_LBA_OFF; 1433 if (phba->lpfc_injerr_wref_cnt == 0) {
1377 rc = BG_ERR_INIT; 1434 phba->lpfc_injerr_nportid = 0;
1435 phba->lpfc_injerr_lba =
1436 LPFC_INJERR_LBA_OFF;
1437 memset(&phba->lpfc_injerr_wwpn,
1438 0, sizeof(struct lpfc_name));
1439 }
1440 rc = BG_ERR_TGT | BG_ERR_CHECK;
1378 1441
1379 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 1442 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1380 "9077 BLKGRD: Injecting reftag error: " 1443 "9078 BLKGRD: Injecting reftag error: "
1381 "write lba x%lx\n", (unsigned long)lba); 1444 "write lba x%lx\n", (unsigned long)lba);
1382 break; 1445 break;
1383 case SCSI_PROT_WRITE_INSERT: 1446 case SCSI_PROT_WRITE_STRIP:
1384 /* 1447 /*
1385 * For WRITE_INSERT, force the 1448 * For WRITE_STRIP and WRITE_PASS,
1386 * error to be sent on the wire. It should be 1449 * force the error on data
1387 * detected by the Target. 1450 * being copied from SLI-Host to SLI-Port.
1388 */ 1451 */
1389 /* DEADBEEF will be the reftag on the wire */
1390 *reftag = 0xDEADBEEF; 1452 *reftag = 0xDEADBEEF;
1391 phba->lpfc_injerr_wref_cnt--; 1453 phba->lpfc_injerr_wref_cnt--;
1392 phba->lpfc_injerr_lba = LPFC_INJERR_LBA_OFF; 1454 if (phba->lpfc_injerr_wref_cnt == 0) {
1393 rc = BG_ERR_TGT; 1455 phba->lpfc_injerr_nportid = 0;
1456 phba->lpfc_injerr_lba =
1457 LPFC_INJERR_LBA_OFF;
1458 memset(&phba->lpfc_injerr_wwpn,
1459 0, sizeof(struct lpfc_name));
1460 }
1461 rc = BG_ERR_INIT;
1394 1462
1395 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 1463 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1396 "9078 BLKGRD: Injecting reftag error: " 1464 "9077 BLKGRD: Injecting reftag error: "
1397 "write lba x%lx\n", (unsigned long)lba); 1465 "write lba x%lx\n", (unsigned long)lba);
1398 break; 1466 break;
1399 } 1467 }
@@ -1401,11 +1469,6 @@ lpfc_bg_err_inject(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1401 if (phba->lpfc_injerr_rref_cnt) { 1469 if (phba->lpfc_injerr_rref_cnt) {
1402 switch (op) { 1470 switch (op) {
1403 case SCSI_PROT_READ_INSERT: 1471 case SCSI_PROT_READ_INSERT:
1404 /*
1405 * For READ_INSERT, it doesn't make sense
1406 * to change the reftag.
1407 */
1408 break;
1409 case SCSI_PROT_READ_STRIP: 1472 case SCSI_PROT_READ_STRIP:
1410 case SCSI_PROT_READ_PASS: 1473 case SCSI_PROT_READ_PASS:
1411 /* 1474 /*
@@ -1415,7 +1478,13 @@ lpfc_bg_err_inject(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1415 */ 1478 */
1416 *reftag = 0xDEADBEEF; 1479 *reftag = 0xDEADBEEF;
1417 phba->lpfc_injerr_rref_cnt--; 1480 phba->lpfc_injerr_rref_cnt--;
1418 phba->lpfc_injerr_lba = LPFC_INJERR_LBA_OFF; 1481 if (phba->lpfc_injerr_rref_cnt == 0) {
1482 phba->lpfc_injerr_nportid = 0;
1483 phba->lpfc_injerr_lba =
1484 LPFC_INJERR_LBA_OFF;
1485 memset(&phba->lpfc_injerr_wwpn,
1486 0, sizeof(struct lpfc_name));
1487 }
1419 rc = BG_ERR_INIT; 1488 rc = BG_ERR_INIT;
1420 1489
1421 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 1490 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
@@ -1431,56 +1500,87 @@ lpfc_bg_err_inject(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1431 if (phba->lpfc_injerr_wapp_cnt) { 1500 if (phba->lpfc_injerr_wapp_cnt) {
1432 switch (op) { 1501 switch (op) {
1433 case SCSI_PROT_WRITE_PASS: 1502 case SCSI_PROT_WRITE_PASS:
1434 if (blockoff && src) { 1503 if (src) {
1435 /* Insert error in middle of the IO */ 1504 /*
1505 * For WRITE_PASS, force the error
1506 * to be sent on the wire. It should
1507 * be detected by the Target.
1508 * If blockoff != 0 error will be
1509 * inserted in middle of the IO.
1510 */
1436 1511
1437 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 1512 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1438 "9080 BLKGRD: Injecting apptag error: " 1513 "9080 BLKGRD: Injecting apptag error: "
1439 "write lba x%lx + x%x oldappTag x%x\n", 1514 "write lba x%lx + x%x oldappTag x%x\n",
1440 (unsigned long)lba, blockoff, 1515 (unsigned long)lba, blockoff,
1441 src->app_tag); 1516 be16_to_cpu(src->app_tag));
1442 1517
1443 /* 1518 /*
1444 * NOTE, this will change app tag in 1519 * Save the old app_tag so we can
1445 * the memory location forever! 1520 * restore it on completion.
1446 */ 1521 */
1447 src->app_tag = 0xDEAD; 1522 if (lpfc_cmd) {
1523 lpfc_cmd->prot_data_type =
1524 LPFC_INJERR_APPTAG;
1525 lpfc_cmd->prot_data_segment =
1526 src;
1527 lpfc_cmd->prot_data =
1528 src->app_tag;
1529 }
1530 src->app_tag = cpu_to_be16(0xDEAD);
1448 phba->lpfc_injerr_wapp_cnt--; 1531 phba->lpfc_injerr_wapp_cnt--;
1449 phba->lpfc_injerr_lba = 1532 if (phba->lpfc_injerr_wapp_cnt == 0) {
1450 LPFC_INJERR_LBA_OFF; 1533 phba->lpfc_injerr_nportid = 0;
1451 rc = BG_ERR_CHECK; 1534 phba->lpfc_injerr_lba =
1535 LPFC_INJERR_LBA_OFF;
1536 memset(&phba->lpfc_injerr_wwpn,
1537 0, sizeof(struct lpfc_name));
1538 }
1539 rc = BG_ERR_TGT | BG_ERR_CHECK;
1452 break; 1540 break;
1453 } 1541 }
1454 /* Drop thru */ 1542 /* Drop thru */
1455 case SCSI_PROT_WRITE_STRIP: 1543 case SCSI_PROT_WRITE_INSERT:
1456 /* 1544 /*
1457 * For WRITE_STRIP and WRITE_PASS, 1545 * For WRITE_INSERT, force the
1458 * force the error on data 1546 * error to be sent on the wire. It should be
1459 * being copied from SLI-Host to SLI-Port. 1547 * detected by the Target.
1460 */ 1548 */
1549 /* DEAD will be the apptag on the wire */
1461 *apptag = 0xDEAD; 1550 *apptag = 0xDEAD;
1462 phba->lpfc_injerr_wapp_cnt--; 1551 phba->lpfc_injerr_wapp_cnt--;
1463 phba->lpfc_injerr_lba = LPFC_INJERR_LBA_OFF; 1552 if (phba->lpfc_injerr_wapp_cnt == 0) {
1464 rc = BG_ERR_INIT; 1553 phba->lpfc_injerr_nportid = 0;
1554 phba->lpfc_injerr_lba =
1555 LPFC_INJERR_LBA_OFF;
1556 memset(&phba->lpfc_injerr_wwpn,
1557 0, sizeof(struct lpfc_name));
1558 }
1559 rc = BG_ERR_TGT | BG_ERR_CHECK;
1465 1560
1466 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 1561 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1467 "0812 BLKGRD: Injecting apptag error: " 1562 "0813 BLKGRD: Injecting apptag error: "
1468 "write lba x%lx\n", (unsigned long)lba); 1563 "write lba x%lx\n", (unsigned long)lba);
1469 break; 1564 break;
1470 case SCSI_PROT_WRITE_INSERT: 1565 case SCSI_PROT_WRITE_STRIP:
1471 /* 1566 /*
1472 * For WRITE_INSERT, force the 1567 * For WRITE_STRIP and WRITE_PASS,
1473 * error to be sent on the wire. It should be 1568 * force the error on data
1474 * detected by the Target. 1569 * being copied from SLI-Host to SLI-Port.
1475 */ 1570 */
1476 /* DEAD will be the apptag on the wire */
1477 *apptag = 0xDEAD; 1571 *apptag = 0xDEAD;
1478 phba->lpfc_injerr_wapp_cnt--; 1572 phba->lpfc_injerr_wapp_cnt--;
1479 phba->lpfc_injerr_lba = LPFC_INJERR_LBA_OFF; 1573 if (phba->lpfc_injerr_wapp_cnt == 0) {
1480 rc = BG_ERR_TGT; 1574 phba->lpfc_injerr_nportid = 0;
1575 phba->lpfc_injerr_lba =
1576 LPFC_INJERR_LBA_OFF;
1577 memset(&phba->lpfc_injerr_wwpn,
1578 0, sizeof(struct lpfc_name));
1579 }
1580 rc = BG_ERR_INIT;
1481 1581
1482 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 1582 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1483 "0813 BLKGRD: Injecting apptag error: " 1583 "0812 BLKGRD: Injecting apptag error: "
1484 "write lba x%lx\n", (unsigned long)lba); 1584 "write lba x%lx\n", (unsigned long)lba);
1485 break; 1585 break;
1486 } 1586 }
@@ -1488,11 +1588,6 @@ lpfc_bg_err_inject(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1488 if (phba->lpfc_injerr_rapp_cnt) { 1588 if (phba->lpfc_injerr_rapp_cnt) {
1489 switch (op) { 1589 switch (op) {
1490 case SCSI_PROT_READ_INSERT: 1590 case SCSI_PROT_READ_INSERT:
1491 /*
1492 * For READ_INSERT, it doesn't make sense
1493 * to change the apptag.
1494 */
1495 break;
1496 case SCSI_PROT_READ_STRIP: 1591 case SCSI_PROT_READ_STRIP:
1497 case SCSI_PROT_READ_PASS: 1592 case SCSI_PROT_READ_PASS:
1498 /* 1593 /*
@@ -1502,7 +1597,13 @@ lpfc_bg_err_inject(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1502 */ 1597 */
1503 *apptag = 0xDEAD; 1598 *apptag = 0xDEAD;
1504 phba->lpfc_injerr_rapp_cnt--; 1599 phba->lpfc_injerr_rapp_cnt--;
1505 phba->lpfc_injerr_lba = LPFC_INJERR_LBA_OFF; 1600 if (phba->lpfc_injerr_rapp_cnt == 0) {
1601 phba->lpfc_injerr_nportid = 0;
1602 phba->lpfc_injerr_lba =
1603 LPFC_INJERR_LBA_OFF;
1604 memset(&phba->lpfc_injerr_wwpn,
1605 0, sizeof(struct lpfc_name));
1606 }
1506 rc = BG_ERR_INIT; 1607 rc = BG_ERR_INIT;
1507 1608
1508 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 1609 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
@@ -1519,57 +1620,51 @@ lpfc_bg_err_inject(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1519 if (phba->lpfc_injerr_wgrd_cnt) { 1620 if (phba->lpfc_injerr_wgrd_cnt) {
1520 switch (op) { 1621 switch (op) {
1521 case SCSI_PROT_WRITE_PASS: 1622 case SCSI_PROT_WRITE_PASS:
1522 if (blockoff && src) { 1623 rc = BG_ERR_CHECK;
1523 /* Insert error in middle of the IO */
1524
1525 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1526 "0815 BLKGRD: Injecting guard error: "
1527 "write lba x%lx + x%x oldgrdTag x%x\n",
1528 (unsigned long)lba, blockoff,
1529 src->guard_tag);
1530
1531 /*
1532 * NOTE, this will change guard tag in
1533 * the memory location forever!
1534 */
1535 src->guard_tag = 0xDEAD;
1536 phba->lpfc_injerr_wgrd_cnt--;
1537 phba->lpfc_injerr_lba =
1538 LPFC_INJERR_LBA_OFF;
1539 rc = BG_ERR_CHECK;
1540 break;
1541 }
1542 /* Drop thru */ 1624 /* Drop thru */
1543 case SCSI_PROT_WRITE_STRIP: 1625
1626 case SCSI_PROT_WRITE_INSERT:
1544 /* 1627 /*
1545 * For WRITE_STRIP and WRITE_PASS, 1628 * For WRITE_INSERT, force the
1546 * force the error on data 1629 * error to be sent on the wire. It should be
1547 * being copied from SLI-Host to SLI-Port. 1630 * detected by the Target.
1548 */ 1631 */
1549 phba->lpfc_injerr_wgrd_cnt--; 1632 phba->lpfc_injerr_wgrd_cnt--;
1550 phba->lpfc_injerr_lba = LPFC_INJERR_LBA_OFF; 1633 if (phba->lpfc_injerr_wgrd_cnt == 0) {
1634 phba->lpfc_injerr_nportid = 0;
1635 phba->lpfc_injerr_lba =
1636 LPFC_INJERR_LBA_OFF;
1637 memset(&phba->lpfc_injerr_wwpn,
1638 0, sizeof(struct lpfc_name));
1639 }
1551 1640
1552 rc = BG_ERR_SWAP; 1641 rc |= BG_ERR_TGT | BG_ERR_SWAP;
1553 /* Signals the caller to swap CRC->CSUM */ 1642 /* Signals the caller to swap CRC->CSUM */
1554 1643
1555 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 1644 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1556 "0816 BLKGRD: Injecting guard error: " 1645 "0817 BLKGRD: Injecting guard error: "
1557 "write lba x%lx\n", (unsigned long)lba); 1646 "write lba x%lx\n", (unsigned long)lba);
1558 break; 1647 break;
1559 case SCSI_PROT_WRITE_INSERT: 1648 case SCSI_PROT_WRITE_STRIP:
1560 /* 1649 /*
1561 * For WRITE_INSERT, force the 1650 * For WRITE_STRIP and WRITE_PASS,
1562 * error to be sent on the wire. It should be 1651 * force the error on data
1563 * detected by the Target. 1652 * being copied from SLI-Host to SLI-Port.
1564 */ 1653 */
1565 phba->lpfc_injerr_wgrd_cnt--; 1654 phba->lpfc_injerr_wgrd_cnt--;
1566 phba->lpfc_injerr_lba = LPFC_INJERR_LBA_OFF; 1655 if (phba->lpfc_injerr_wgrd_cnt == 0) {
1656 phba->lpfc_injerr_nportid = 0;
1657 phba->lpfc_injerr_lba =
1658 LPFC_INJERR_LBA_OFF;
1659 memset(&phba->lpfc_injerr_wwpn,
1660 0, sizeof(struct lpfc_name));
1661 }
1567 1662
1568 rc = BG_ERR_SWAP; 1663 rc = BG_ERR_INIT | BG_ERR_SWAP;
1569 /* Signals the caller to swap CRC->CSUM */ 1664 /* Signals the caller to swap CRC->CSUM */
1570 1665
1571 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 1666 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1572 "0817 BLKGRD: Injecting guard error: " 1667 "0816 BLKGRD: Injecting guard error: "
1573 "write lba x%lx\n", (unsigned long)lba); 1668 "write lba x%lx\n", (unsigned long)lba);
1574 break; 1669 break;
1575 } 1670 }
@@ -1577,11 +1672,6 @@ lpfc_bg_err_inject(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1577 if (phba->lpfc_injerr_rgrd_cnt) { 1672 if (phba->lpfc_injerr_rgrd_cnt) {
1578 switch (op) { 1673 switch (op) {
1579 case SCSI_PROT_READ_INSERT: 1674 case SCSI_PROT_READ_INSERT:
1580 /*
1581 * For READ_INSERT, it doesn't make sense
1582 * to change the guard tag.
1583 */
1584 break;
1585 case SCSI_PROT_READ_STRIP: 1675 case SCSI_PROT_READ_STRIP:
1586 case SCSI_PROT_READ_PASS: 1676 case SCSI_PROT_READ_PASS:
1587 /* 1677 /*
@@ -1589,11 +1679,16 @@ lpfc_bg_err_inject(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1589 * error on data being read off the wire. It 1679 * error on data being read off the wire. It
1590 * should force an IO error to the driver. 1680 * should force an IO error to the driver.
1591 */ 1681 */
1592 *apptag = 0xDEAD;
1593 phba->lpfc_injerr_rgrd_cnt--; 1682 phba->lpfc_injerr_rgrd_cnt--;
1594 phba->lpfc_injerr_lba = LPFC_INJERR_LBA_OFF; 1683 if (phba->lpfc_injerr_rgrd_cnt == 0) {
1684 phba->lpfc_injerr_nportid = 0;
1685 phba->lpfc_injerr_lba =
1686 LPFC_INJERR_LBA_OFF;
1687 memset(&phba->lpfc_injerr_wwpn,
1688 0, sizeof(struct lpfc_name));
1689 }
1595 1690
1596 rc = BG_ERR_SWAP; 1691 rc = BG_ERR_INIT | BG_ERR_SWAP;
1597 /* Signals the caller to swap CRC->CSUM */ 1692 /* Signals the caller to swap CRC->CSUM */
1598 1693
1599 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 1694 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
@@ -1629,20 +1724,20 @@ lpfc_sc_to_bg_opcodes(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1629 switch (scsi_get_prot_op(sc)) { 1724 switch (scsi_get_prot_op(sc)) {
1630 case SCSI_PROT_READ_INSERT: 1725 case SCSI_PROT_READ_INSERT:
1631 case SCSI_PROT_WRITE_STRIP: 1726 case SCSI_PROT_WRITE_STRIP:
1632 *txop = BG_OP_IN_CSUM_OUT_NODIF;
1633 *rxop = BG_OP_IN_NODIF_OUT_CSUM; 1727 *rxop = BG_OP_IN_NODIF_OUT_CSUM;
1728 *txop = BG_OP_IN_CSUM_OUT_NODIF;
1634 break; 1729 break;
1635 1730
1636 case SCSI_PROT_READ_STRIP: 1731 case SCSI_PROT_READ_STRIP:
1637 case SCSI_PROT_WRITE_INSERT: 1732 case SCSI_PROT_WRITE_INSERT:
1638 *txop = BG_OP_IN_NODIF_OUT_CRC;
1639 *rxop = BG_OP_IN_CRC_OUT_NODIF; 1733 *rxop = BG_OP_IN_CRC_OUT_NODIF;
1734 *txop = BG_OP_IN_NODIF_OUT_CRC;
1640 break; 1735 break;
1641 1736
1642 case SCSI_PROT_READ_PASS: 1737 case SCSI_PROT_READ_PASS:
1643 case SCSI_PROT_WRITE_PASS: 1738 case SCSI_PROT_WRITE_PASS:
1644 *txop = BG_OP_IN_CSUM_OUT_CRC;
1645 *rxop = BG_OP_IN_CRC_OUT_CSUM; 1739 *rxop = BG_OP_IN_CRC_OUT_CSUM;
1740 *txop = BG_OP_IN_CSUM_OUT_CRC;
1646 break; 1741 break;
1647 1742
1648 case SCSI_PROT_NORMAL: 1743 case SCSI_PROT_NORMAL:
@@ -1658,20 +1753,20 @@ lpfc_sc_to_bg_opcodes(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1658 switch (scsi_get_prot_op(sc)) { 1753 switch (scsi_get_prot_op(sc)) {
1659 case SCSI_PROT_READ_STRIP: 1754 case SCSI_PROT_READ_STRIP:
1660 case SCSI_PROT_WRITE_INSERT: 1755 case SCSI_PROT_WRITE_INSERT:
1661 *txop = BG_OP_IN_NODIF_OUT_CRC;
1662 *rxop = BG_OP_IN_CRC_OUT_NODIF; 1756 *rxop = BG_OP_IN_CRC_OUT_NODIF;
1757 *txop = BG_OP_IN_NODIF_OUT_CRC;
1663 break; 1758 break;
1664 1759
1665 case SCSI_PROT_READ_PASS: 1760 case SCSI_PROT_READ_PASS:
1666 case SCSI_PROT_WRITE_PASS: 1761 case SCSI_PROT_WRITE_PASS:
1667 *txop = BG_OP_IN_CRC_OUT_CRC;
1668 *rxop = BG_OP_IN_CRC_OUT_CRC; 1762 *rxop = BG_OP_IN_CRC_OUT_CRC;
1763 *txop = BG_OP_IN_CRC_OUT_CRC;
1669 break; 1764 break;
1670 1765
1671 case SCSI_PROT_READ_INSERT: 1766 case SCSI_PROT_READ_INSERT:
1672 case SCSI_PROT_WRITE_STRIP: 1767 case SCSI_PROT_WRITE_STRIP:
1673 *txop = BG_OP_IN_CRC_OUT_NODIF;
1674 *rxop = BG_OP_IN_NODIF_OUT_CRC; 1768 *rxop = BG_OP_IN_NODIF_OUT_CRC;
1769 *txop = BG_OP_IN_CRC_OUT_NODIF;
1675 break; 1770 break;
1676 1771
1677 case SCSI_PROT_NORMAL: 1772 case SCSI_PROT_NORMAL:
@@ -1710,20 +1805,20 @@ lpfc_bg_err_opcodes(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1710 switch (scsi_get_prot_op(sc)) { 1805 switch (scsi_get_prot_op(sc)) {
1711 case SCSI_PROT_READ_INSERT: 1806 case SCSI_PROT_READ_INSERT:
1712 case SCSI_PROT_WRITE_STRIP: 1807 case SCSI_PROT_WRITE_STRIP:
1713 *txop = BG_OP_IN_CRC_OUT_NODIF;
1714 *rxop = BG_OP_IN_NODIF_OUT_CRC; 1808 *rxop = BG_OP_IN_NODIF_OUT_CRC;
1809 *txop = BG_OP_IN_CRC_OUT_NODIF;
1715 break; 1810 break;
1716 1811
1717 case SCSI_PROT_READ_STRIP: 1812 case SCSI_PROT_READ_STRIP:
1718 case SCSI_PROT_WRITE_INSERT: 1813 case SCSI_PROT_WRITE_INSERT:
1719 *txop = BG_OP_IN_NODIF_OUT_CSUM;
1720 *rxop = BG_OP_IN_CSUM_OUT_NODIF; 1814 *rxop = BG_OP_IN_CSUM_OUT_NODIF;
1815 *txop = BG_OP_IN_NODIF_OUT_CSUM;
1721 break; 1816 break;
1722 1817
1723 case SCSI_PROT_READ_PASS: 1818 case SCSI_PROT_READ_PASS:
1724 case SCSI_PROT_WRITE_PASS: 1819 case SCSI_PROT_WRITE_PASS:
1725 *txop = BG_OP_IN_CRC_OUT_CRC; 1820 *rxop = BG_OP_IN_CSUM_OUT_CRC;
1726 *rxop = BG_OP_IN_CRC_OUT_CRC; 1821 *txop = BG_OP_IN_CRC_OUT_CSUM;
1727 break; 1822 break;
1728 1823
1729 case SCSI_PROT_NORMAL: 1824 case SCSI_PROT_NORMAL:
@@ -1735,20 +1830,20 @@ lpfc_bg_err_opcodes(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1735 switch (scsi_get_prot_op(sc)) { 1830 switch (scsi_get_prot_op(sc)) {
1736 case SCSI_PROT_READ_STRIP: 1831 case SCSI_PROT_READ_STRIP:
1737 case SCSI_PROT_WRITE_INSERT: 1832 case SCSI_PROT_WRITE_INSERT:
1738 *txop = BG_OP_IN_NODIF_OUT_CSUM;
1739 *rxop = BG_OP_IN_CSUM_OUT_NODIF; 1833 *rxop = BG_OP_IN_CSUM_OUT_NODIF;
1834 *txop = BG_OP_IN_NODIF_OUT_CSUM;
1740 break; 1835 break;
1741 1836
1742 case SCSI_PROT_READ_PASS: 1837 case SCSI_PROT_READ_PASS:
1743 case SCSI_PROT_WRITE_PASS: 1838 case SCSI_PROT_WRITE_PASS:
1744 *txop = BG_OP_IN_CSUM_OUT_CRC; 1839 *rxop = BG_OP_IN_CSUM_OUT_CSUM;
1745 *rxop = BG_OP_IN_CRC_OUT_CSUM; 1840 *txop = BG_OP_IN_CSUM_OUT_CSUM;
1746 break; 1841 break;
1747 1842
1748 case SCSI_PROT_READ_INSERT: 1843 case SCSI_PROT_READ_INSERT:
1749 case SCSI_PROT_WRITE_STRIP: 1844 case SCSI_PROT_WRITE_STRIP:
1750 *txop = BG_OP_IN_CSUM_OUT_NODIF;
1751 *rxop = BG_OP_IN_NODIF_OUT_CSUM; 1845 *rxop = BG_OP_IN_NODIF_OUT_CSUM;
1846 *txop = BG_OP_IN_CSUM_OUT_NODIF;
1752 break; 1847 break;
1753 1848
1754 case SCSI_PROT_NORMAL: 1849 case SCSI_PROT_NORMAL:
@@ -1817,11 +1912,11 @@ lpfc_bg_setup_bpl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1817 reftag = (uint32_t)scsi_get_lba(sc); /* Truncate LBA */ 1912 reftag = (uint32_t)scsi_get_lba(sc); /* Truncate LBA */
1818 1913
1819#ifdef CONFIG_SCSI_LPFC_DEBUG_FS 1914#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1820 rc = lpfc_bg_err_inject(phba, sc, &reftag, 0, 1); 1915 rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1);
1821 if (rc) { 1916 if (rc) {
1822 if (rc == BG_ERR_SWAP) 1917 if (rc & BG_ERR_SWAP)
1823 lpfc_bg_err_opcodes(phba, sc, &txop, &rxop); 1918 lpfc_bg_err_opcodes(phba, sc, &txop, &rxop);
1824 if (rc == BG_ERR_CHECK) 1919 if (rc & BG_ERR_CHECK)
1825 checking = 0; 1920 checking = 0;
1826 } 1921 }
1827#endif 1922#endif
@@ -1964,11 +2059,11 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1964 reftag = (uint32_t)scsi_get_lba(sc); /* Truncate LBA */ 2059 reftag = (uint32_t)scsi_get_lba(sc); /* Truncate LBA */
1965 2060
1966#ifdef CONFIG_SCSI_LPFC_DEBUG_FS 2061#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1967 rc = lpfc_bg_err_inject(phba, sc, &reftag, 0, 1); 2062 rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1);
1968 if (rc) { 2063 if (rc) {
1969 if (rc == BG_ERR_SWAP) 2064 if (rc & BG_ERR_SWAP)
1970 lpfc_bg_err_opcodes(phba, sc, &txop, &rxop); 2065 lpfc_bg_err_opcodes(phba, sc, &txop, &rxop);
1971 if (rc == BG_ERR_CHECK) 2066 if (rc & BG_ERR_CHECK)
1972 checking = 0; 2067 checking = 0;
1973 } 2068 }
1974#endif 2069#endif
@@ -2172,11 +2267,11 @@ lpfc_bg_setup_sgl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
2172 reftag = (uint32_t)scsi_get_lba(sc); /* Truncate LBA */ 2267 reftag = (uint32_t)scsi_get_lba(sc); /* Truncate LBA */
2173 2268
2174#ifdef CONFIG_SCSI_LPFC_DEBUG_FS 2269#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2175 rc = lpfc_bg_err_inject(phba, sc, &reftag, 0, 1); 2270 rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1);
2176 if (rc) { 2271 if (rc) {
2177 if (rc == BG_ERR_SWAP) 2272 if (rc & BG_ERR_SWAP)
2178 lpfc_bg_err_opcodes(phba, sc, &txop, &rxop); 2273 lpfc_bg_err_opcodes(phba, sc, &txop, &rxop);
2179 if (rc == BG_ERR_CHECK) 2274 if (rc & BG_ERR_CHECK)
2180 checking = 0; 2275 checking = 0;
2181 } 2276 }
2182#endif 2277#endif
@@ -2312,11 +2407,11 @@ lpfc_bg_setup_sgl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
2312 reftag = (uint32_t)scsi_get_lba(sc); /* Truncate LBA */ 2407 reftag = (uint32_t)scsi_get_lba(sc); /* Truncate LBA */
2313 2408
2314#ifdef CONFIG_SCSI_LPFC_DEBUG_FS 2409#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2315 rc = lpfc_bg_err_inject(phba, sc, &reftag, 0, 1); 2410 rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1);
2316 if (rc) { 2411 if (rc) {
2317 if (rc == BG_ERR_SWAP) 2412 if (rc & BG_ERR_SWAP)
2318 lpfc_bg_err_opcodes(phba, sc, &txop, &rxop); 2413 lpfc_bg_err_opcodes(phba, sc, &txop, &rxop);
2319 if (rc == BG_ERR_CHECK) 2414 if (rc & BG_ERR_CHECK)
2320 checking = 0; 2415 checking = 0;
2321 } 2416 }
2322#endif 2417#endif
@@ -2788,7 +2883,7 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd,
2788 /* No error was reported - problem in FW? */ 2883 /* No error was reported - problem in FW? */
2789 cmd->result = ScsiResult(DID_ERROR, 0); 2884 cmd->result = ScsiResult(DID_ERROR, 0);
2790 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 2885 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
2791 "9057 BLKGRD: no errors reported!\n"); 2886 "9057 BLKGRD: Unknown error reported!\n");
2792 } 2887 }
2793 2888
2794out: 2889out:
@@ -3460,6 +3555,37 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
3460 /* pick up SLI4 exhange busy status from HBA */ 3555 /* pick up SLI4 exhange busy status from HBA */
3461 lpfc_cmd->exch_busy = pIocbOut->iocb_flag & LPFC_EXCHANGE_BUSY; 3556 lpfc_cmd->exch_busy = pIocbOut->iocb_flag & LPFC_EXCHANGE_BUSY;
3462 3557
3558#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
3559 if (lpfc_cmd->prot_data_type) {
3560 struct scsi_dif_tuple *src = NULL;
3561
3562 src = (struct scsi_dif_tuple *)lpfc_cmd->prot_data_segment;
3563 /*
3564 * Used to restore any changes to protection
3565 * data for error injection.
3566 */
3567 switch (lpfc_cmd->prot_data_type) {
3568 case LPFC_INJERR_REFTAG:
3569 src->ref_tag =
3570 lpfc_cmd->prot_data;
3571 break;
3572 case LPFC_INJERR_APPTAG:
3573 src->app_tag =
3574 (uint16_t)lpfc_cmd->prot_data;
3575 break;
3576 case LPFC_INJERR_GUARD:
3577 src->guard_tag =
3578 (uint16_t)lpfc_cmd->prot_data;
3579 break;
3580 default:
3581 break;
3582 }
3583
3584 lpfc_cmd->prot_data = 0;
3585 lpfc_cmd->prot_data_type = 0;
3586 lpfc_cmd->prot_data_segment = NULL;
3587 }
3588#endif
3463 if (pnode && NLP_CHK_NODE_ACT(pnode)) 3589 if (pnode && NLP_CHK_NODE_ACT(pnode))
3464 atomic_dec(&pnode->cmd_pending); 3590 atomic_dec(&pnode->cmd_pending);
3465 3591
@@ -4061,15 +4187,6 @@ lpfc_queuecommand_lck(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
4061 cmnd->result = err; 4187 cmnd->result = err;
4062 goto out_fail_command; 4188 goto out_fail_command;
4063 } 4189 }
4064 /*
4065 * Do not let the mid-layer retry I/O too fast. If an I/O is retried
4066 * without waiting a bit then indicate that the device is busy.
4067 */
4068 if (cmnd->retries &&
4069 time_before(jiffies, (cmnd->jiffies_at_alloc +
4070 msecs_to_jiffies(LPFC_RETRY_PAUSE *
4071 cmnd->retries))))
4072 return SCSI_MLQUEUE_DEVICE_BUSY;
4073 ndlp = rdata->pnode; 4190 ndlp = rdata->pnode;
4074 4191
4075 if ((scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) && 4192 if ((scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) &&
@@ -4119,63 +4236,48 @@ lpfc_queuecommand_lck(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
4119 if (scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) { 4236 if (scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) {
4120 if (vport->phba->cfg_enable_bg) { 4237 if (vport->phba->cfg_enable_bg) {
4121 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG, 4238 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
4122 "9033 BLKGRD: rcvd protected cmd:%02x op:%02x " 4239 "9033 BLKGRD: rcvd protected cmd:%02x op=%s "
4123 "str=%s\n", 4240 "guard=%s\n", cmnd->cmnd[0],
4124 cmnd->cmnd[0], scsi_get_prot_op(cmnd), 4241 dif_op_str[scsi_get_prot_op(cmnd)],
4125 dif_op_str[scsi_get_prot_op(cmnd)]); 4242 dif_grd_str[scsi_host_get_guard(shost)]);
4126 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
4127 "9034 BLKGRD: CDB: %02x %02x %02x %02x %02x "
4128 "%02x %02x %02x %02x %02x\n",
4129 cmnd->cmnd[0], cmnd->cmnd[1], cmnd->cmnd[2],
4130 cmnd->cmnd[3], cmnd->cmnd[4], cmnd->cmnd[5],
4131 cmnd->cmnd[6], cmnd->cmnd[7], cmnd->cmnd[8],
4132 cmnd->cmnd[9]);
4133 if (cmnd->cmnd[0] == READ_10) 4243 if (cmnd->cmnd[0] == READ_10)
4134 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG, 4244 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
4135 "9035 BLKGRD: READ @ sector %llu, " 4245 "9035 BLKGRD: READ @ sector %llu, "
4136 "count %u\n", 4246 "cnt %u, rpt %d\n",
4137 (unsigned long long)scsi_get_lba(cmnd), 4247 (unsigned long long)scsi_get_lba(cmnd),
4138 blk_rq_sectors(cmnd->request)); 4248 blk_rq_sectors(cmnd->request),
4249 (cmnd->cmnd[1]>>5));
4139 else if (cmnd->cmnd[0] == WRITE_10) 4250 else if (cmnd->cmnd[0] == WRITE_10)
4140 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG, 4251 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
4141 "9036 BLKGRD: WRITE @ sector %llu, " 4252 "9036 BLKGRD: WRITE @ sector %llu, "
4142 "count %u cmd=%p\n", 4253 "cnt %u, wpt %d\n",
4143 (unsigned long long)scsi_get_lba(cmnd), 4254 (unsigned long long)scsi_get_lba(cmnd),
4144 blk_rq_sectors(cmnd->request), 4255 blk_rq_sectors(cmnd->request),
4145 cmnd); 4256 (cmnd->cmnd[1]>>5));
4146 } 4257 }
4147 4258
4148 err = lpfc_bg_scsi_prep_dma_buf(phba, lpfc_cmd); 4259 err = lpfc_bg_scsi_prep_dma_buf(phba, lpfc_cmd);
4149 } else { 4260 } else {
4150 if (vport->phba->cfg_enable_bg) { 4261 if (vport->phba->cfg_enable_bg) {
4151 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG, 4262 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
4152 "9038 BLKGRD: rcvd unprotected cmd:" 4263 "9038 BLKGRD: rcvd unprotected cmd:"
4153 "%02x op:%02x str=%s\n", 4264 "%02x op=%s guard=%s\n", cmnd->cmnd[0],
4154 cmnd->cmnd[0], scsi_get_prot_op(cmnd), 4265 dif_op_str[scsi_get_prot_op(cmnd)],
4155 dif_op_str[scsi_get_prot_op(cmnd)]); 4266 dif_grd_str[scsi_host_get_guard(shost)]);
4156 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
4157 "9039 BLKGRD: CDB: %02x %02x %02x "
4158 "%02x %02x %02x %02x %02x %02x %02x\n",
4159 cmnd->cmnd[0], cmnd->cmnd[1],
4160 cmnd->cmnd[2], cmnd->cmnd[3],
4161 cmnd->cmnd[4], cmnd->cmnd[5],
4162 cmnd->cmnd[6], cmnd->cmnd[7],
4163 cmnd->cmnd[8], cmnd->cmnd[9]);
4164 if (cmnd->cmnd[0] == READ_10) 4267 if (cmnd->cmnd[0] == READ_10)
4165 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG, 4268 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
4166 "9040 dbg: READ @ sector %llu, " 4269 "9040 dbg: READ @ sector %llu, "
4167 "count %u\n", 4270 "cnt %u, rpt %d\n",
4168 (unsigned long long)scsi_get_lba(cmnd), 4271 (unsigned long long)scsi_get_lba(cmnd),
4169 blk_rq_sectors(cmnd->request)); 4272 blk_rq_sectors(cmnd->request),
4273 (cmnd->cmnd[1]>>5));
4170 else if (cmnd->cmnd[0] == WRITE_10) 4274 else if (cmnd->cmnd[0] == WRITE_10)
4171 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG, 4275 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
4172 "9041 dbg: WRITE @ sector %llu, " 4276 "9041 dbg: WRITE @ sector %llu, "
4173 "count %u cmd=%p\n", 4277 "cnt %u, wpt %d\n",
4174 (unsigned long long)scsi_get_lba(cmnd), 4278 (unsigned long long)scsi_get_lba(cmnd),
4175 blk_rq_sectors(cmnd->request), cmnd); 4279 blk_rq_sectors(cmnd->request),
4176 else 4280 (cmnd->cmnd[1]>>5));
4177 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
4178 "9042 dbg: parser not implemented\n");
4179 } 4281 }
4180 err = lpfc_scsi_prep_dma_buf(phba, lpfc_cmd); 4282 err = lpfc_scsi_prep_dma_buf(phba, lpfc_cmd);
4181 } 4283 }
diff --git a/drivers/scsi/lpfc/lpfc_scsi.h b/drivers/scsi/lpfc/lpfc_scsi.h
index 9075a08cf781..21a2ffe67eac 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.h
+++ b/drivers/scsi/lpfc/lpfc_scsi.h
@@ -1,7 +1,7 @@
1/******************************************************************* 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2006 Emulex. All rights reserved. * 4 * Copyright (C) 2004-2012 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. * 5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com * 6 * www.emulex.com *
7 * * 7 * *
@@ -150,9 +150,18 @@ struct lpfc_scsi_buf {
150 struct lpfc_iocbq cur_iocbq; 150 struct lpfc_iocbq cur_iocbq;
151 wait_queue_head_t *waitq; 151 wait_queue_head_t *waitq;
152 unsigned long start_time; 152 unsigned long start_time;
153
154#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
155 /* Used to restore any changes to protection data for error injection */
156 void *prot_data_segment;
157 uint32_t prot_data;
158 uint32_t prot_data_type;
159#define LPFC_INJERR_REFTAG 1
160#define LPFC_INJERR_APPTAG 2
161#define LPFC_INJERR_GUARD 3
162#endif
153}; 163};
154 164
155#define LPFC_SCSI_DMA_EXT_SIZE 264 165#define LPFC_SCSI_DMA_EXT_SIZE 264
156#define LPFC_BPL_SIZE 1024 166#define LPFC_BPL_SIZE 1024
157#define LPFC_RETRY_PAUSE 300
158#define MDAC_DIRECT_CMD 0x22 167#define MDAC_DIRECT_CMD 0x22
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index e0e4d8d18244..dbaf5b963bff 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -1,7 +1,7 @@
1/******************************************************************* 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2011 Emulex. All rights reserved. * 4 * Copyright (C) 2004-2012 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. * 5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com * 6 * www.emulex.com *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
@@ -5578,8 +5578,6 @@ lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba *phba)
5578 for (i = 0; i < count; i++) 5578 for (i = 0; i < count; i++)
5579 phba->sli4_hba.rpi_ids[i] = base + i; 5579 phba->sli4_hba.rpi_ids[i] = base + i;
5580 5580
5581 lpfc_sli4_node_prep(phba);
5582
5583 /* VPIs. */ 5581 /* VPIs. */
5584 count = phba->sli4_hba.max_cfg_param.max_vpi; 5582 count = phba->sli4_hba.max_cfg_param.max_vpi;
5585 base = phba->sli4_hba.max_cfg_param.vpi_base; 5583 base = phba->sli4_hba.max_cfg_param.vpi_base;
@@ -5613,6 +5611,8 @@ lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba *phba)
5613 rc = -ENOMEM; 5611 rc = -ENOMEM;
5614 goto free_vpi_ids; 5612 goto free_vpi_ids;
5615 } 5613 }
5614 phba->sli4_hba.max_cfg_param.xri_used = 0;
5615 phba->sli4_hba.xri_count = 0;
5616 phba->sli4_hba.xri_ids = kzalloc(count * 5616 phba->sli4_hba.xri_ids = kzalloc(count *
5617 sizeof(uint16_t), 5617 sizeof(uint16_t),
5618 GFP_KERNEL); 5618 GFP_KERNEL);
@@ -6147,6 +6147,7 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
6147 rc = -ENODEV; 6147 rc = -ENODEV;
6148 goto out_free_mbox; 6148 goto out_free_mbox;
6149 } 6149 }
6150 lpfc_sli4_node_prep(phba);
6150 6151
6151 /* Create all the SLI4 queues */ 6152 /* Create all the SLI4 queues */
6152 rc = lpfc_sli4_queue_create(phba); 6153 rc = lpfc_sli4_queue_create(phba);
@@ -7251,11 +7252,13 @@ lpfc_sli4_post_async_mbox(struct lpfc_hba *phba)
7251 7252
7252out_not_finished: 7253out_not_finished:
7253 spin_lock_irqsave(&phba->hbalock, iflags); 7254 spin_lock_irqsave(&phba->hbalock, iflags);
7254 mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED; 7255 if (phba->sli.mbox_active) {
7255 __lpfc_mbox_cmpl_put(phba, mboxq); 7256 mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED;
7256 /* Release the token */ 7257 __lpfc_mbox_cmpl_put(phba, mboxq);
7257 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 7258 /* Release the token */
7258 phba->sli.mbox_active = NULL; 7259 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
7260 phba->sli.mbox_active = NULL;
7261 }
7259 spin_unlock_irqrestore(&phba->hbalock, iflags); 7262 spin_unlock_irqrestore(&phba->hbalock, iflags);
7260 7263
7261 return MBX_NOT_FINISHED; 7264 return MBX_NOT_FINISHED;
@@ -7743,6 +7746,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
7743 if (pcmd && (*pcmd == ELS_CMD_FLOGI || 7746 if (pcmd && (*pcmd == ELS_CMD_FLOGI ||
7744 *pcmd == ELS_CMD_SCR || 7747 *pcmd == ELS_CMD_SCR ||
7745 *pcmd == ELS_CMD_FDISC || 7748 *pcmd == ELS_CMD_FDISC ||
7749 *pcmd == ELS_CMD_LOGO ||
7746 *pcmd == ELS_CMD_PLOGI)) { 7750 *pcmd == ELS_CMD_PLOGI)) {
7747 bf_set(els_req64_sp, &wqe->els_req, 1); 7751 bf_set(els_req64_sp, &wqe->els_req, 1);
7748 bf_set(els_req64_sid, &wqe->els_req, 7752 bf_set(els_req64_sid, &wqe->els_req,
@@ -8385,6 +8389,7 @@ lpfc_sli4_abts_err_handler(struct lpfc_hba *phba,
8385 struct sli4_wcqe_xri_aborted *axri) 8389 struct sli4_wcqe_xri_aborted *axri)
8386{ 8390{
8387 struct lpfc_vport *vport; 8391 struct lpfc_vport *vport;
8392 uint32_t ext_status = 0;
8388 8393
8389 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) { 8394 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
8390 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 8395 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
@@ -8396,12 +8401,20 @@ lpfc_sli4_abts_err_handler(struct lpfc_hba *phba,
8396 vport = ndlp->vport; 8401 vport = ndlp->vport;
8397 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 8402 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
8398 "3116 Port generated FCP XRI ABORT event on " 8403 "3116 Port generated FCP XRI ABORT event on "
8399 "vpi %d rpi %d xri x%x status 0x%x\n", 8404 "vpi %d rpi %d xri x%x status 0x%x parameter x%x\n",
8400 ndlp->vport->vpi, ndlp->nlp_rpi, 8405 ndlp->vport->vpi, ndlp->nlp_rpi,
8401 bf_get(lpfc_wcqe_xa_xri, axri), 8406 bf_get(lpfc_wcqe_xa_xri, axri),
8402 bf_get(lpfc_wcqe_xa_status, axri)); 8407 bf_get(lpfc_wcqe_xa_status, axri),
8408 axri->parameter);
8403 8409
8404 if (bf_get(lpfc_wcqe_xa_status, axri) == IOSTAT_LOCAL_REJECT) 8410 /*
8411 * Catch the ABTS protocol failure case. Older OCe FW releases returned
8412 * LOCAL_REJECT and 0 for a failed ABTS exchange and later OCe and
8413 * LPe FW releases returned LOCAL_REJECT and SEQUENCE_TIMEOUT.
8414 */
8415 ext_status = axri->parameter & WCQE_PARAM_MASK;
8416 if ((bf_get(lpfc_wcqe_xa_status, axri) == IOSTAT_LOCAL_REJECT) &&
8417 ((ext_status == IOERR_SEQUENCE_TIMEOUT) || (ext_status == 0)))
8405 lpfc_sli_abts_recover_port(vport, ndlp); 8418 lpfc_sli_abts_recover_port(vport, ndlp);
8406} 8419}
8407 8420
@@ -9807,12 +9820,11 @@ lpfc_sli_mbox_sys_shutdown(struct lpfc_hba *phba)
9807 unsigned long timeout; 9820 unsigned long timeout;
9808 9821
9809 timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies; 9822 timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies;
9823
9810 spin_lock_irq(&phba->hbalock); 9824 spin_lock_irq(&phba->hbalock);
9811 psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK; 9825 psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
9812 spin_unlock_irq(&phba->hbalock);
9813 9826
9814 if (psli->sli_flag & LPFC_SLI_ACTIVE) { 9827 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
9815 spin_lock_irq(&phba->hbalock);
9816 /* Determine how long we might wait for the active mailbox 9828 /* Determine how long we might wait for the active mailbox
9817 * command to be gracefully completed by firmware. 9829 * command to be gracefully completed by firmware.
9818 */ 9830 */
@@ -9831,7 +9843,9 @@ lpfc_sli_mbox_sys_shutdown(struct lpfc_hba *phba)
9831 */ 9843 */
9832 break; 9844 break;
9833 } 9845 }
9834 } 9846 } else
9847 spin_unlock_irq(&phba->hbalock);
9848
9835 lpfc_sli_mbox_sys_flush(phba); 9849 lpfc_sli_mbox_sys_flush(phba);
9836} 9850}
9837 9851
@@ -13272,7 +13286,7 @@ lpfc_sli4_post_els_sgl_list_ext(struct lpfc_hba *phba)
13272 LPFC_MBOXQ_t *mbox; 13286 LPFC_MBOXQ_t *mbox;
13273 uint32_t reqlen, alloclen, index; 13287 uint32_t reqlen, alloclen, index;
13274 uint32_t mbox_tmo; 13288 uint32_t mbox_tmo;
13275 uint16_t rsrc_start, rsrc_size, els_xri_cnt; 13289 uint16_t rsrc_start, rsrc_size, els_xri_cnt, post_els_xri_cnt;
13276 uint16_t xritag_start = 0, lxri = 0; 13290 uint16_t xritag_start = 0, lxri = 0;
13277 struct lpfc_rsrc_blks *rsrc_blk; 13291 struct lpfc_rsrc_blks *rsrc_blk;
13278 int cnt, ttl_cnt, rc = 0; 13292 int cnt, ttl_cnt, rc = 0;
@@ -13294,6 +13308,7 @@ lpfc_sli4_post_els_sgl_list_ext(struct lpfc_hba *phba)
13294 13308
13295 cnt = 0; 13309 cnt = 0;
13296 ttl_cnt = 0; 13310 ttl_cnt = 0;
13311 post_els_xri_cnt = els_xri_cnt;
13297 list_for_each_entry(rsrc_blk, &phba->sli4_hba.lpfc_xri_blk_list, 13312 list_for_each_entry(rsrc_blk, &phba->sli4_hba.lpfc_xri_blk_list,
13298 list) { 13313 list) {
13299 rsrc_start = rsrc_blk->rsrc_start; 13314 rsrc_start = rsrc_blk->rsrc_start;
@@ -13303,11 +13318,12 @@ lpfc_sli4_post_els_sgl_list_ext(struct lpfc_hba *phba)
13303 "3014 Working ELS Extent start %d, cnt %d\n", 13318 "3014 Working ELS Extent start %d, cnt %d\n",
13304 rsrc_start, rsrc_size); 13319 rsrc_start, rsrc_size);
13305 13320
13306 loop_cnt = min(els_xri_cnt, rsrc_size); 13321 loop_cnt = min(post_els_xri_cnt, rsrc_size);
13307 if (ttl_cnt + loop_cnt >= els_xri_cnt) { 13322 if (loop_cnt < post_els_xri_cnt) {
13308 loop_cnt = els_xri_cnt - ttl_cnt; 13323 post_els_xri_cnt -= loop_cnt;
13309 ttl_cnt = els_xri_cnt; 13324 ttl_cnt += loop_cnt;
13310 } 13325 } else
13326 ttl_cnt += post_els_xri_cnt;
13311 13327
13312 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 13328 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
13313 if (!mbox) 13329 if (!mbox)
@@ -14203,15 +14219,14 @@ lpfc_sli4_seq_abort_rsp(struct lpfc_hba *phba,
14203 * field and RX_ID from ABTS for RX_ID field. 14219 * field and RX_ID from ABTS for RX_ID field.
14204 */ 14220 */
14205 bf_set(lpfc_abts_orig, &icmd->un.bls_rsp, LPFC_ABTS_UNSOL_RSP); 14221 bf_set(lpfc_abts_orig, &icmd->un.bls_rsp, LPFC_ABTS_UNSOL_RSP);
14206 bf_set(lpfc_abts_rxid, &icmd->un.bls_rsp, rxid);
14207 } else { 14222 } else {
14208 /* ABTS sent by initiator to CT exchange, construction 14223 /* ABTS sent by initiator to CT exchange, construction
14209 * of BA_ACC will need to allocate a new XRI as for the 14224 * of BA_ACC will need to allocate a new XRI as for the
14210 * XRI_TAG and RX_ID fields. 14225 * XRI_TAG field.
14211 */ 14226 */
14212 bf_set(lpfc_abts_orig, &icmd->un.bls_rsp, LPFC_ABTS_UNSOL_INT); 14227 bf_set(lpfc_abts_orig, &icmd->un.bls_rsp, LPFC_ABTS_UNSOL_INT);
14213 bf_set(lpfc_abts_rxid, &icmd->un.bls_rsp, NO_XRI);
14214 } 14228 }
14229 bf_set(lpfc_abts_rxid, &icmd->un.bls_rsp, rxid);
14215 bf_set(lpfc_abts_oxid, &icmd->un.bls_rsp, oxid); 14230 bf_set(lpfc_abts_oxid, &icmd->un.bls_rsp, oxid);
14216 14231
14217 /* Xmit CT abts response on exchange <xid> */ 14232 /* Xmit CT abts response on exchange <xid> */
@@ -15042,6 +15057,7 @@ lpfc_sli4_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
15042 LPFC_MBOXQ_t *mboxq; 15057 LPFC_MBOXQ_t *mboxq;
15043 15058
15044 phba->fcoe_eventtag_at_fcf_scan = phba->fcoe_eventtag; 15059 phba->fcoe_eventtag_at_fcf_scan = phba->fcoe_eventtag;
15060 phba->fcoe_cvl_eventtag_attn = phba->fcoe_cvl_eventtag;
15045 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 15061 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15046 if (!mboxq) { 15062 if (!mboxq) {
15047 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 15063 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index f2a2602e5c35..25cefc254b76 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -1,7 +1,7 @@
1/******************************************************************* 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2011 Emulex. All rights reserved. * 4 * Copyright (C) 2004-2012 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. * 5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com * 6 * www.emulex.com *
7 * * 7 * *
@@ -18,7 +18,7 @@
18 * included with this package. * 18 * included with this package. *
19 *******************************************************************/ 19 *******************************************************************/
20 20
21#define LPFC_DRIVER_VERSION "8.3.29" 21#define LPFC_DRIVER_VERSION "8.3.30"
22#define LPFC_DRIVER_NAME "lpfc" 22#define LPFC_DRIVER_NAME "lpfc"
23#define LPFC_SP_DRIVER_HANDLER_NAME "lpfc:sp" 23#define LPFC_SP_DRIVER_HANDLER_NAME "lpfc:sp"
24#define LPFC_FP_DRIVER_HANDLER_NAME "lpfc:fp" 24#define LPFC_FP_DRIVER_HANDLER_NAME "lpfc:fp"
diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.c b/drivers/scsi/mpt2sas/mpt2sas_base.c
index 5e69f468535f..8a59a772fdf2 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_base.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_base.c
@@ -657,7 +657,7 @@ _base_sas_log_info(struct MPT2SAS_ADAPTER *ioc , u32 log_info)
657 return; 657 return;
658 658
659 /* eat the loginfos associated with task aborts */ 659 /* eat the loginfos associated with task aborts */
660 if (ioc->ignore_loginfos && (log_info == 30050000 || log_info == 660 if (ioc->ignore_loginfos && (log_info == 0x30050000 || log_info ==
661 0x31140000 || log_info == 0x31130000)) 661 0x31140000 || log_info == 0x31130000))
662 return; 662 return;
663 663
@@ -2060,12 +2060,10 @@ _base_display_ioc_capabilities(struct MPT2SAS_ADAPTER *ioc)
2060{ 2060{
2061 int i = 0; 2061 int i = 0;
2062 char desc[16]; 2062 char desc[16];
2063 u8 revision;
2064 u32 iounit_pg1_flags; 2063 u32 iounit_pg1_flags;
2065 u32 bios_version; 2064 u32 bios_version;
2066 2065
2067 bios_version = le32_to_cpu(ioc->bios_pg3.BiosVersion); 2066 bios_version = le32_to_cpu(ioc->bios_pg3.BiosVersion);
2068 pci_read_config_byte(ioc->pdev, PCI_CLASS_REVISION, &revision);
2069 strncpy(desc, ioc->manu_pg0.ChipName, 16); 2067 strncpy(desc, ioc->manu_pg0.ChipName, 16);
2070 printk(MPT2SAS_INFO_FMT "%s: FWVersion(%02d.%02d.%02d.%02d), " 2068 printk(MPT2SAS_INFO_FMT "%s: FWVersion(%02d.%02d.%02d.%02d), "
2071 "ChipRevision(0x%02x), BiosVersion(%02d.%02d.%02d.%02d)\n", 2069 "ChipRevision(0x%02x), BiosVersion(%02d.%02d.%02d.%02d)\n",
@@ -2074,7 +2072,7 @@ _base_display_ioc_capabilities(struct MPT2SAS_ADAPTER *ioc)
2074 (ioc->facts.FWVersion.Word & 0x00FF0000) >> 16, 2072 (ioc->facts.FWVersion.Word & 0x00FF0000) >> 16,
2075 (ioc->facts.FWVersion.Word & 0x0000FF00) >> 8, 2073 (ioc->facts.FWVersion.Word & 0x0000FF00) >> 8,
2076 ioc->facts.FWVersion.Word & 0x000000FF, 2074 ioc->facts.FWVersion.Word & 0x000000FF,
2077 revision, 2075 ioc->pdev->revision,
2078 (bios_version & 0xFF000000) >> 24, 2076 (bios_version & 0xFF000000) >> 24,
2079 (bios_version & 0x00FF0000) >> 16, 2077 (bios_version & 0x00FF0000) >> 16,
2080 (bios_version & 0x0000FF00) >> 8, 2078 (bios_version & 0x0000FF00) >> 8,
diff --git a/drivers/scsi/mpt2sas/mpt2sas_ctl.c b/drivers/scsi/mpt2sas/mpt2sas_ctl.c
index 7fceb899029e..3b9a28efea82 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_ctl.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_ctl.c
@@ -1026,7 +1026,6 @@ _ctl_getiocinfo(void __user *arg)
1026{ 1026{
1027 struct mpt2_ioctl_iocinfo karg; 1027 struct mpt2_ioctl_iocinfo karg;
1028 struct MPT2SAS_ADAPTER *ioc; 1028 struct MPT2SAS_ADAPTER *ioc;
1029 u8 revision;
1030 1029
1031 if (copy_from_user(&karg, arg, sizeof(karg))) { 1030 if (copy_from_user(&karg, arg, sizeof(karg))) {
1032 printk(KERN_ERR "failure at %s:%d/%s()!\n", 1031 printk(KERN_ERR "failure at %s:%d/%s()!\n",
@@ -1046,8 +1045,7 @@ _ctl_getiocinfo(void __user *arg)
1046 karg.adapter_type = MPT2_IOCTL_INTERFACE_SAS2; 1045 karg.adapter_type = MPT2_IOCTL_INTERFACE_SAS2;
1047 if (ioc->pfacts) 1046 if (ioc->pfacts)
1048 karg.port_number = ioc->pfacts[0].PortNumber; 1047 karg.port_number = ioc->pfacts[0].PortNumber;
1049 pci_read_config_byte(ioc->pdev, PCI_CLASS_REVISION, &revision); 1048 karg.hw_rev = ioc->pdev->revision;
1050 karg.hw_rev = revision;
1051 karg.pci_id = ioc->pdev->device; 1049 karg.pci_id = ioc->pdev->device;
1052 karg.subsystem_device = ioc->pdev->subsystem_device; 1050 karg.subsystem_device = ioc->pdev->subsystem_device;
1053 karg.subsystem_vendor = ioc->pdev->subsystem_vendor; 1051 karg.subsystem_vendor = ioc->pdev->subsystem_vendor;
diff --git a/drivers/scsi/pm8001/pm8001_hwi.c b/drivers/scsi/pm8001/pm8001_hwi.c
index 3619f6eeeeda..9d82ee5c10de 100644
--- a/drivers/scsi/pm8001/pm8001_hwi.c
+++ b/drivers/scsi/pm8001/pm8001_hwi.c
@@ -2093,6 +2093,7 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
2093 struct ata_task_resp *resp ; 2093 struct ata_task_resp *resp ;
2094 u32 *sata_resp; 2094 u32 *sata_resp;
2095 struct pm8001_device *pm8001_dev; 2095 struct pm8001_device *pm8001_dev;
2096 unsigned long flags;
2096 2097
2097 psataPayload = (struct sata_completion_resp *)(piomb + 4); 2098 psataPayload = (struct sata_completion_resp *)(piomb + 4);
2098 status = le32_to_cpu(psataPayload->status); 2099 status = le32_to_cpu(psataPayload->status);
@@ -2382,26 +2383,26 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
2382 ts->stat = SAS_DEV_NO_RESPONSE; 2383 ts->stat = SAS_DEV_NO_RESPONSE;
2383 break; 2384 break;
2384 } 2385 }
2385 spin_lock_irq(&t->task_state_lock); 2386 spin_lock_irqsave(&t->task_state_lock, flags);
2386 t->task_state_flags &= ~SAS_TASK_STATE_PENDING; 2387 t->task_state_flags &= ~SAS_TASK_STATE_PENDING;
2387 t->task_state_flags &= ~SAS_TASK_AT_INITIATOR; 2388 t->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
2388 t->task_state_flags |= SAS_TASK_STATE_DONE; 2389 t->task_state_flags |= SAS_TASK_STATE_DONE;
2389 if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) { 2390 if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) {
2390 spin_unlock_irq(&t->task_state_lock); 2391 spin_unlock_irqrestore(&t->task_state_lock, flags);
2391 PM8001_FAIL_DBG(pm8001_ha, 2392 PM8001_FAIL_DBG(pm8001_ha,
2392 pm8001_printk("task 0x%p done with io_status 0x%x" 2393 pm8001_printk("task 0x%p done with io_status 0x%x"
2393 " resp 0x%x stat 0x%x but aborted by upper layer!\n", 2394 " resp 0x%x stat 0x%x but aborted by upper layer!\n",
2394 t, status, ts->resp, ts->stat)); 2395 t, status, ts->resp, ts->stat));
2395 pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); 2396 pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
2396 } else if (t->uldd_task) { 2397 } else if (t->uldd_task) {
2397 spin_unlock_irq(&t->task_state_lock); 2398 spin_unlock_irqrestore(&t->task_state_lock, flags);
2398 pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); 2399 pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
2399 mb();/* ditto */ 2400 mb();/* ditto */
2400 spin_unlock_irq(&pm8001_ha->lock); 2401 spin_unlock_irq(&pm8001_ha->lock);
2401 t->task_done(t); 2402 t->task_done(t);
2402 spin_lock_irq(&pm8001_ha->lock); 2403 spin_lock_irq(&pm8001_ha->lock);
2403 } else if (!t->uldd_task) { 2404 } else if (!t->uldd_task) {
2404 spin_unlock_irq(&t->task_state_lock); 2405 spin_unlock_irqrestore(&t->task_state_lock, flags);
2405 pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); 2406 pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
2406 mb();/*ditto*/ 2407 mb();/*ditto*/
2407 spin_unlock_irq(&pm8001_ha->lock); 2408 spin_unlock_irq(&pm8001_ha->lock);
@@ -2423,6 +2424,7 @@ static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha , void *piomb)
2423 u32 tag = le32_to_cpu(psataPayload->tag); 2424 u32 tag = le32_to_cpu(psataPayload->tag);
2424 u32 port_id = le32_to_cpu(psataPayload->port_id); 2425 u32 port_id = le32_to_cpu(psataPayload->port_id);
2425 u32 dev_id = le32_to_cpu(psataPayload->device_id); 2426 u32 dev_id = le32_to_cpu(psataPayload->device_id);
2427 unsigned long flags;
2426 2428
2427 ccb = &pm8001_ha->ccb_info[tag]; 2429 ccb = &pm8001_ha->ccb_info[tag];
2428 t = ccb->task; 2430 t = ccb->task;
@@ -2593,26 +2595,26 @@ static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha , void *piomb)
2593 ts->stat = SAS_OPEN_TO; 2595 ts->stat = SAS_OPEN_TO;
2594 break; 2596 break;
2595 } 2597 }
2596 spin_lock_irq(&t->task_state_lock); 2598 spin_lock_irqsave(&t->task_state_lock, flags);
2597 t->task_state_flags &= ~SAS_TASK_STATE_PENDING; 2599 t->task_state_flags &= ~SAS_TASK_STATE_PENDING;
2598 t->task_state_flags &= ~SAS_TASK_AT_INITIATOR; 2600 t->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
2599 t->task_state_flags |= SAS_TASK_STATE_DONE; 2601 t->task_state_flags |= SAS_TASK_STATE_DONE;
2600 if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) { 2602 if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) {
2601 spin_unlock_irq(&t->task_state_lock); 2603 spin_unlock_irqrestore(&t->task_state_lock, flags);
2602 PM8001_FAIL_DBG(pm8001_ha, 2604 PM8001_FAIL_DBG(pm8001_ha,
2603 pm8001_printk("task 0x%p done with io_status 0x%x" 2605 pm8001_printk("task 0x%p done with io_status 0x%x"
2604 " resp 0x%x stat 0x%x but aborted by upper layer!\n", 2606 " resp 0x%x stat 0x%x but aborted by upper layer!\n",
2605 t, event, ts->resp, ts->stat)); 2607 t, event, ts->resp, ts->stat));
2606 pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); 2608 pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
2607 } else if (t->uldd_task) { 2609 } else if (t->uldd_task) {
2608 spin_unlock_irq(&t->task_state_lock); 2610 spin_unlock_irqrestore(&t->task_state_lock, flags);
2609 pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); 2611 pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
2610 mb();/* ditto */ 2612 mb();/* ditto */
2611 spin_unlock_irq(&pm8001_ha->lock); 2613 spin_unlock_irq(&pm8001_ha->lock);
2612 t->task_done(t); 2614 t->task_done(t);
2613 spin_lock_irq(&pm8001_ha->lock); 2615 spin_lock_irq(&pm8001_ha->lock);
2614 } else if (!t->uldd_task) { 2616 } else if (!t->uldd_task) {
2615 spin_unlock_irq(&t->task_state_lock); 2617 spin_unlock_irqrestore(&t->task_state_lock, flags);
2616 pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); 2618 pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
2617 mb();/*ditto*/ 2619 mb();/*ditto*/
2618 spin_unlock_irq(&pm8001_ha->lock); 2620 spin_unlock_irq(&pm8001_ha->lock);
diff --git a/drivers/scsi/qla4xxx/ql4_isr.c b/drivers/scsi/qla4xxx/ql4_isr.c
index 7c9f28b7da72..fc542a9bb106 100644
--- a/drivers/scsi/qla4xxx/ql4_isr.c
+++ b/drivers/scsi/qla4xxx/ql4_isr.c
@@ -431,9 +431,9 @@ static void qla4xxx_mbox_status_entry(struct scsi_qla_host *ha,
431 mbox_sts_entry->out_mbox[6])); 431 mbox_sts_entry->out_mbox[6]));
432 432
433 if (mbox_sts_entry->out_mbox[0] == MBOX_STS_COMMAND_COMPLETE) 433 if (mbox_sts_entry->out_mbox[0] == MBOX_STS_COMMAND_COMPLETE)
434 status = QLA_SUCCESS; 434 status = ISCSI_PING_SUCCESS;
435 else 435 else
436 status = QLA_ERROR; 436 status = mbox_sts_entry->out_mbox[6];
437 437
438 data_size = sizeof(mbox_sts_entry->out_mbox); 438 data_size = sizeof(mbox_sts_entry->out_mbox);
439 439
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
index 3d9419460e0c..ee47820c30a6 100644
--- a/drivers/scsi/qla4xxx/ql4_os.c
+++ b/drivers/scsi/qla4xxx/ql4_os.c
@@ -834,7 +834,7 @@ static enum blk_eh_timer_return qla4xxx_eh_cmd_timed_out(struct scsi_cmnd *sc)
834static void qla4xxx_set_port_speed(struct Scsi_Host *shost) 834static void qla4xxx_set_port_speed(struct Scsi_Host *shost)
835{ 835{
836 struct scsi_qla_host *ha = to_qla_host(shost); 836 struct scsi_qla_host *ha = to_qla_host(shost);
837 struct iscsi_cls_host *ihost = shost_priv(shost); 837 struct iscsi_cls_host *ihost = shost->shost_data;
838 uint32_t speed = ISCSI_PORT_SPEED_UNKNOWN; 838 uint32_t speed = ISCSI_PORT_SPEED_UNKNOWN;
839 839
840 qla4xxx_get_firmware_state(ha); 840 qla4xxx_get_firmware_state(ha);
@@ -859,7 +859,7 @@ static void qla4xxx_set_port_speed(struct Scsi_Host *shost)
859static void qla4xxx_set_port_state(struct Scsi_Host *shost) 859static void qla4xxx_set_port_state(struct Scsi_Host *shost)
860{ 860{
861 struct scsi_qla_host *ha = to_qla_host(shost); 861 struct scsi_qla_host *ha = to_qla_host(shost);
862 struct iscsi_cls_host *ihost = shost_priv(shost); 862 struct iscsi_cls_host *ihost = shost->shost_data;
863 uint32_t state = ISCSI_PORT_STATE_DOWN; 863 uint32_t state = ISCSI_PORT_STATE_DOWN;
864 864
865 if (test_bit(AF_LINK_UP, &ha->flags)) 865 if (test_bit(AF_LINK_UP, &ha->flags))
@@ -3445,7 +3445,6 @@ static void qla4xxx_free_adapter(struct scsi_qla_host *ha)
3445int qla4_8xxx_iospace_config(struct scsi_qla_host *ha) 3445int qla4_8xxx_iospace_config(struct scsi_qla_host *ha)
3446{ 3446{
3447 int status = 0; 3447 int status = 0;
3448 uint8_t revision_id;
3449 unsigned long mem_base, mem_len, db_base, db_len; 3448 unsigned long mem_base, mem_len, db_base, db_len;
3450 struct pci_dev *pdev = ha->pdev; 3449 struct pci_dev *pdev = ha->pdev;
3451 3450
@@ -3457,10 +3456,9 @@ int qla4_8xxx_iospace_config(struct scsi_qla_host *ha)
3457 goto iospace_error_exit; 3456 goto iospace_error_exit;
3458 } 3457 }
3459 3458
3460 pci_read_config_byte(pdev, PCI_REVISION_ID, &revision_id);
3461 DEBUG2(printk(KERN_INFO "%s: revision-id=%d\n", 3459 DEBUG2(printk(KERN_INFO "%s: revision-id=%d\n",
3462 __func__, revision_id)); 3460 __func__, pdev->revision));
3463 ha->revision_id = revision_id; 3461 ha->revision_id = pdev->revision;
3464 3462
3465 /* remap phys address */ 3463 /* remap phys address */
3466 mem_base = pci_resource_start(pdev, 0); /* 0 is for BAR 0 */ 3464 mem_base = pci_resource_start(pdev, 0); /* 0 is for BAR 0 */
diff --git a/drivers/scsi/qla4xxx/ql4_version.h b/drivers/scsi/qla4xxx/ql4_version.h
index ede9af944141..97b30c108e36 100644
--- a/drivers/scsi/qla4xxx/ql4_version.h
+++ b/drivers/scsi/qla4xxx/ql4_version.h
@@ -5,4 +5,4 @@
5 * See LICENSE.qla4xxx for copyright and licensing details. 5 * See LICENSE.qla4xxx for copyright and licensing details.
6 */ 6 */
7 7
8#define QLA4XXX_DRIVER_VERSION "5.02.00-k15" 8#define QLA4XXX_DRIVER_VERSION "5.02.00-k16"
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index 591856131c4e..182d5a57ab74 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -101,6 +101,7 @@ static const char * scsi_debug_version_date = "20100324";
101#define DEF_LBPU 0 101#define DEF_LBPU 0
102#define DEF_LBPWS 0 102#define DEF_LBPWS 0
103#define DEF_LBPWS10 0 103#define DEF_LBPWS10 0
104#define DEF_LBPRZ 1
104#define DEF_LOWEST_ALIGNED 0 105#define DEF_LOWEST_ALIGNED 0
105#define DEF_NO_LUN_0 0 106#define DEF_NO_LUN_0 0
106#define DEF_NUM_PARTS 0 107#define DEF_NUM_PARTS 0
@@ -186,6 +187,7 @@ static int scsi_debug_vpd_use_hostno = DEF_VPD_USE_HOSTNO;
186static unsigned int scsi_debug_lbpu = DEF_LBPU; 187static unsigned int scsi_debug_lbpu = DEF_LBPU;
187static unsigned int scsi_debug_lbpws = DEF_LBPWS; 188static unsigned int scsi_debug_lbpws = DEF_LBPWS;
188static unsigned int scsi_debug_lbpws10 = DEF_LBPWS10; 189static unsigned int scsi_debug_lbpws10 = DEF_LBPWS10;
190static unsigned int scsi_debug_lbprz = DEF_LBPRZ;
189static unsigned int scsi_debug_unmap_alignment = DEF_UNMAP_ALIGNMENT; 191static unsigned int scsi_debug_unmap_alignment = DEF_UNMAP_ALIGNMENT;
190static unsigned int scsi_debug_unmap_granularity = DEF_UNMAP_GRANULARITY; 192static unsigned int scsi_debug_unmap_granularity = DEF_UNMAP_GRANULARITY;
191static unsigned int scsi_debug_unmap_max_blocks = DEF_UNMAP_MAX_BLOCKS; 193static unsigned int scsi_debug_unmap_max_blocks = DEF_UNMAP_MAX_BLOCKS;
@@ -775,10 +777,10 @@ static int inquiry_evpd_b1(unsigned char *arr)
775 return 0x3c; 777 return 0x3c;
776} 778}
777 779
778/* Thin provisioning VPD page (SBC-3) */ 780/* Logical block provisioning VPD page (SBC-3) */
779static int inquiry_evpd_b2(unsigned char *arr) 781static int inquiry_evpd_b2(unsigned char *arr)
780{ 782{
781 memset(arr, 0, 0x8); 783 memset(arr, 0, 0x4);
782 arr[0] = 0; /* threshold exponent */ 784 arr[0] = 0; /* threshold exponent */
783 785
784 if (scsi_debug_lbpu) 786 if (scsi_debug_lbpu)
@@ -790,7 +792,10 @@ static int inquiry_evpd_b2(unsigned char *arr)
790 if (scsi_debug_lbpws10) 792 if (scsi_debug_lbpws10)
791 arr[1] |= 1 << 5; 793 arr[1] |= 1 << 5;
792 794
793 return 0x8; 795 if (scsi_debug_lbprz)
796 arr[1] |= 1 << 2;
797
798 return 0x4;
794} 799}
795 800
796#define SDEBUG_LONG_INQ_SZ 96 801#define SDEBUG_LONG_INQ_SZ 96
@@ -1071,8 +1076,11 @@ static int resp_readcap16(struct scsi_cmnd * scp,
1071 arr[13] = scsi_debug_physblk_exp & 0xf; 1076 arr[13] = scsi_debug_physblk_exp & 0xf;
1072 arr[14] = (scsi_debug_lowest_aligned >> 8) & 0x3f; 1077 arr[14] = (scsi_debug_lowest_aligned >> 8) & 0x3f;
1073 1078
1074 if (scsi_debug_lbp()) 1079 if (scsi_debug_lbp()) {
1075 arr[14] |= 0x80; /* LBPME */ 1080 arr[14] |= 0x80; /* LBPME */
1081 if (scsi_debug_lbprz)
1082 arr[14] |= 0x40; /* LBPRZ */
1083 }
1076 1084
1077 arr[15] = scsi_debug_lowest_aligned & 0xff; 1085 arr[15] = scsi_debug_lowest_aligned & 0xff;
1078 1086
@@ -2046,10 +2054,13 @@ static void unmap_region(sector_t lba, unsigned int len)
2046 block = lba + alignment; 2054 block = lba + alignment;
2047 rem = do_div(block, granularity); 2055 rem = do_div(block, granularity);
2048 2056
2049 if (rem == 0 && lba + granularity <= end && 2057 if (rem == 0 && lba + granularity <= end && block < map_size) {
2050 block < map_size)
2051 clear_bit(block, map_storep); 2058 clear_bit(block, map_storep);
2052 2059 if (scsi_debug_lbprz)
2060 memset(fake_storep +
2061 block * scsi_debug_sector_size, 0,
2062 scsi_debug_sector_size);
2063 }
2053 lba += granularity - rem; 2064 lba += granularity - rem;
2054 } 2065 }
2055} 2066}
@@ -2731,6 +2742,7 @@ module_param_named(guard, scsi_debug_guard, int, S_IRUGO);
2731module_param_named(lbpu, scsi_debug_lbpu, int, S_IRUGO); 2742module_param_named(lbpu, scsi_debug_lbpu, int, S_IRUGO);
2732module_param_named(lbpws, scsi_debug_lbpws, int, S_IRUGO); 2743module_param_named(lbpws, scsi_debug_lbpws, int, S_IRUGO);
2733module_param_named(lbpws10, scsi_debug_lbpws10, int, S_IRUGO); 2744module_param_named(lbpws10, scsi_debug_lbpws10, int, S_IRUGO);
2745module_param_named(lbprz, scsi_debug_lbprz, int, S_IRUGO);
2734module_param_named(lowest_aligned, scsi_debug_lowest_aligned, int, S_IRUGO); 2746module_param_named(lowest_aligned, scsi_debug_lowest_aligned, int, S_IRUGO);
2735module_param_named(max_luns, scsi_debug_max_luns, int, S_IRUGO | S_IWUSR); 2747module_param_named(max_luns, scsi_debug_max_luns, int, S_IRUGO | S_IWUSR);
2736module_param_named(max_queue, scsi_debug_max_queue, int, S_IRUGO | S_IWUSR); 2748module_param_named(max_queue, scsi_debug_max_queue, int, S_IRUGO | S_IWUSR);
@@ -2772,6 +2784,7 @@ MODULE_PARM_DESC(guard, "protection checksum: 0=crc, 1=ip (def=0)");
2772MODULE_PARM_DESC(lbpu, "enable LBP, support UNMAP command (def=0)"); 2784MODULE_PARM_DESC(lbpu, "enable LBP, support UNMAP command (def=0)");
2773MODULE_PARM_DESC(lbpws, "enable LBP, support WRITE SAME(16) with UNMAP bit (def=0)"); 2785MODULE_PARM_DESC(lbpws, "enable LBP, support WRITE SAME(16) with UNMAP bit (def=0)");
2774MODULE_PARM_DESC(lbpws10, "enable LBP, support WRITE SAME(10) with UNMAP bit (def=0)"); 2786MODULE_PARM_DESC(lbpws10, "enable LBP, support WRITE SAME(10) with UNMAP bit (def=0)");
2787MODULE_PARM_DESC(lbprz, "unmapped blocks return 0 on read (def=1)");
2775MODULE_PARM_DESC(lowest_aligned, "lowest aligned lba (def=0)"); 2788MODULE_PARM_DESC(lowest_aligned, "lowest aligned lba (def=0)");
2776MODULE_PARM_DESC(max_luns, "number of LUNs per target to simulate(def=1)"); 2789MODULE_PARM_DESC(max_luns, "number of LUNs per target to simulate(def=1)");
2777MODULE_PARM_DESC(max_queue, "max number of queued commands (1 to 255(def))"); 2790MODULE_PARM_DESC(max_queue, "max number of queued commands (1 to 255(def))");
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index fac31730addf..1cf640e575da 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -1486,7 +1486,7 @@ void iscsi_post_host_event(uint32_t host_no, struct iscsi_transport *transport,
1486 struct iscsi_uevent *ev; 1486 struct iscsi_uevent *ev;
1487 int len = NLMSG_SPACE(sizeof(*ev) + data_size); 1487 int len = NLMSG_SPACE(sizeof(*ev) + data_size);
1488 1488
1489 skb = alloc_skb(len, GFP_KERNEL); 1489 skb = alloc_skb(len, GFP_NOIO);
1490 if (!skb) { 1490 if (!skb) {
1491 printk(KERN_ERR "gracefully ignored host event (%d):%d OOM\n", 1491 printk(KERN_ERR "gracefully ignored host event (%d):%d OOM\n",
1492 host_no, code); 1492 host_no, code);
@@ -1504,7 +1504,7 @@ void iscsi_post_host_event(uint32_t host_no, struct iscsi_transport *transport,
1504 if (data_size) 1504 if (data_size)
1505 memcpy((char *)ev + sizeof(*ev), data, data_size); 1505 memcpy((char *)ev + sizeof(*ev), data, data_size);
1506 1506
1507 iscsi_multicast_skb(skb, ISCSI_NL_GRP_ISCSID, GFP_KERNEL); 1507 iscsi_multicast_skb(skb, ISCSI_NL_GRP_ISCSID, GFP_NOIO);
1508} 1508}
1509EXPORT_SYMBOL_GPL(iscsi_post_host_event); 1509EXPORT_SYMBOL_GPL(iscsi_post_host_event);
1510 1510
@@ -1517,7 +1517,7 @@ void iscsi_ping_comp_event(uint32_t host_no, struct iscsi_transport *transport,
1517 struct iscsi_uevent *ev; 1517 struct iscsi_uevent *ev;
1518 int len = NLMSG_SPACE(sizeof(*ev) + data_size); 1518 int len = NLMSG_SPACE(sizeof(*ev) + data_size);
1519 1519
1520 skb = alloc_skb(len, GFP_KERNEL); 1520 skb = alloc_skb(len, GFP_NOIO);
1521 if (!skb) { 1521 if (!skb) {
1522 printk(KERN_ERR "gracefully ignored ping comp: OOM\n"); 1522 printk(KERN_ERR "gracefully ignored ping comp: OOM\n");
1523 return; 1523 return;
@@ -1533,7 +1533,7 @@ void iscsi_ping_comp_event(uint32_t host_no, struct iscsi_transport *transport,
1533 ev->r.ping_comp.data_size = data_size; 1533 ev->r.ping_comp.data_size = data_size;
1534 memcpy((char *)ev + sizeof(*ev), data, data_size); 1534 memcpy((char *)ev + sizeof(*ev), data, data_size);
1535 1535
1536 iscsi_multicast_skb(skb, ISCSI_NL_GRP_ISCSID, GFP_KERNEL); 1536 iscsi_multicast_skb(skb, ISCSI_NL_GRP_ISCSID, GFP_NOIO);
1537} 1537}
1538EXPORT_SYMBOL_GPL(iscsi_ping_comp_event); 1538EXPORT_SYMBOL_GPL(iscsi_ping_comp_event);
1539 1539
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 09e3df42a402..5ba5c2a9e8e9 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -664,7 +664,7 @@ static void sd_unprep_fn(struct request_queue *q, struct request *rq)
664} 664}
665 665
666/** 666/**
667 * sd_init_command - build a scsi (read or write) command from 667 * sd_prep_fn - build a scsi (read or write) command from
668 * information in the request structure. 668 * information in the request structure.
669 * @SCpnt: pointer to mid-level's per scsi command structure that 669 * @SCpnt: pointer to mid-level's per scsi command structure that
670 * contains request and into which the scsi command is written 670 * contains request and into which the scsi command is written
@@ -711,7 +711,7 @@ static int sd_prep_fn(struct request_queue *q, struct request *rq)
711 ret = BLKPREP_KILL; 711 ret = BLKPREP_KILL;
712 712
713 SCSI_LOG_HLQUEUE(1, scmd_printk(KERN_INFO, SCpnt, 713 SCSI_LOG_HLQUEUE(1, scmd_printk(KERN_INFO, SCpnt,
714 "sd_init_command: block=%llu, " 714 "sd_prep_fn: block=%llu, "
715 "count=%d\n", 715 "count=%d\n",
716 (unsigned long long)block, 716 (unsigned long long)block,
717 this_count)); 717 this_count));
@@ -1212,9 +1212,14 @@ static unsigned int sd_check_events(struct gendisk *disk, unsigned int clearing)
1212 retval = -ENODEV; 1212 retval = -ENODEV;
1213 1213
1214 if (scsi_block_when_processing_errors(sdp)) { 1214 if (scsi_block_when_processing_errors(sdp)) {
1215 retval = scsi_autopm_get_device(sdp);
1216 if (retval)
1217 goto out;
1218
1215 sshdr = kzalloc(sizeof(*sshdr), GFP_KERNEL); 1219 sshdr = kzalloc(sizeof(*sshdr), GFP_KERNEL);
1216 retval = scsi_test_unit_ready(sdp, SD_TIMEOUT, SD_MAX_RETRIES, 1220 retval = scsi_test_unit_ready(sdp, SD_TIMEOUT, SD_MAX_RETRIES,
1217 sshdr); 1221 sshdr);
1222 scsi_autopm_put_device(sdp);
1218 } 1223 }
1219 1224
1220 /* failed to execute TUR, assume media not present */ 1225 /* failed to execute TUR, assume media not present */
@@ -2644,8 +2649,8 @@ static void sd_probe_async(void *data, async_cookie_t cookie)
2644 * (e.g. /dev/sda). More precisely it is the block device major 2649 * (e.g. /dev/sda). More precisely it is the block device major
2645 * and minor number that is chosen here. 2650 * and minor number that is chosen here.
2646 * 2651 *
2647 * Assume sd_attach is not re-entrant (for time being) 2652 * Assume sd_probe is not re-entrant (for time being)
2648 * Also think about sd_attach() and sd_remove() running coincidentally. 2653 * Also think about sd_probe() and sd_remove() running coincidentally.
2649 **/ 2654 **/
2650static int sd_probe(struct device *dev) 2655static int sd_probe(struct device *dev)
2651{ 2656{
@@ -2660,7 +2665,7 @@ static int sd_probe(struct device *dev)
2660 goto out; 2665 goto out;
2661 2666
2662 SCSI_LOG_HLQUEUE(3, sdev_printk(KERN_INFO, sdp, 2667 SCSI_LOG_HLQUEUE(3, sdev_printk(KERN_INFO, sdp,
2663 "sd_attach\n")); 2668 "sd_probe\n"));
2664 2669
2665 error = -ENOMEM; 2670 error = -ENOMEM;
2666 sdkp = kzalloc(sizeof(*sdkp), GFP_KERNEL); 2671 sdkp = kzalloc(sizeof(*sdkp), GFP_KERNEL);
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index a15f691f9d34..e41998cb098e 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -1105,6 +1105,12 @@ static int check_tape(struct scsi_tape *STp, struct file *filp)
1105 STp->drv_buffer)); 1105 STp->drv_buffer));
1106 } 1106 }
1107 STp->drv_write_prot = ((STp->buffer)->b_data[2] & 0x80) != 0; 1107 STp->drv_write_prot = ((STp->buffer)->b_data[2] & 0x80) != 0;
1108 if (!STp->drv_buffer && STp->immediate_filemark) {
1109 printk(KERN_WARNING
1110 "%s: non-buffered tape: disabling writing immediate filemarks\n",
1111 name);
1112 STp->immediate_filemark = 0;
1113 }
1108 } 1114 }
1109 st_release_request(SRpnt); 1115 st_release_request(SRpnt);
1110 SRpnt = NULL; 1116 SRpnt = NULL;
@@ -1313,6 +1319,8 @@ static int st_flush(struct file *filp, fl_owner_t id)
1313 1319
1314 memset(cmd, 0, MAX_COMMAND_SIZE); 1320 memset(cmd, 0, MAX_COMMAND_SIZE);
1315 cmd[0] = WRITE_FILEMARKS; 1321 cmd[0] = WRITE_FILEMARKS;
1322 if (STp->immediate_filemark)
1323 cmd[1] = 1;
1316 cmd[4] = 1 + STp->two_fm; 1324 cmd[4] = 1 + STp->two_fm;
1317 1325
1318 SRpnt = st_do_scsi(NULL, STp, cmd, 0, DMA_NONE, 1326 SRpnt = st_do_scsi(NULL, STp, cmd, 0, DMA_NONE,
@@ -2180,8 +2188,9 @@ static void st_log_options(struct scsi_tape * STp, struct st_modedef * STm, char
2180 name, STm->defaults_for_writes, STp->omit_blklims, STp->can_partitions, 2188 name, STm->defaults_for_writes, STp->omit_blklims, STp->can_partitions,
2181 STp->scsi2_logical); 2189 STp->scsi2_logical);
2182 printk(KERN_INFO 2190 printk(KERN_INFO
2183 "%s: sysv: %d nowait: %d sili: %d\n", name, STm->sysv, STp->immediate, 2191 "%s: sysv: %d nowait: %d sili: %d nowait_filemark: %d\n",
2184 STp->sili); 2192 name, STm->sysv, STp->immediate, STp->sili,
2193 STp->immediate_filemark);
2185 printk(KERN_INFO "%s: debugging: %d\n", 2194 printk(KERN_INFO "%s: debugging: %d\n",
2186 name, debugging); 2195 name, debugging);
2187 } 2196 }
@@ -2223,6 +2232,7 @@ static int st_set_options(struct scsi_tape *STp, long options)
2223 STp->can_partitions = (options & MT_ST_CAN_PARTITIONS) != 0; 2232 STp->can_partitions = (options & MT_ST_CAN_PARTITIONS) != 0;
2224 STp->scsi2_logical = (options & MT_ST_SCSI2LOGICAL) != 0; 2233 STp->scsi2_logical = (options & MT_ST_SCSI2LOGICAL) != 0;
2225 STp->immediate = (options & MT_ST_NOWAIT) != 0; 2234 STp->immediate = (options & MT_ST_NOWAIT) != 0;
2235 STp->immediate_filemark = (options & MT_ST_NOWAIT_EOF) != 0;
2226 STm->sysv = (options & MT_ST_SYSV) != 0; 2236 STm->sysv = (options & MT_ST_SYSV) != 0;
2227 STp->sili = (options & MT_ST_SILI) != 0; 2237 STp->sili = (options & MT_ST_SILI) != 0;
2228 DEB( debugging = (options & MT_ST_DEBUGGING) != 0; 2238 DEB( debugging = (options & MT_ST_DEBUGGING) != 0;
@@ -2254,6 +2264,8 @@ static int st_set_options(struct scsi_tape *STp, long options)
2254 STp->scsi2_logical = value; 2264 STp->scsi2_logical = value;
2255 if ((options & MT_ST_NOWAIT) != 0) 2265 if ((options & MT_ST_NOWAIT) != 0)
2256 STp->immediate = value; 2266 STp->immediate = value;
2267 if ((options & MT_ST_NOWAIT_EOF) != 0)
2268 STp->immediate_filemark = value;
2257 if ((options & MT_ST_SYSV) != 0) 2269 if ((options & MT_ST_SYSV) != 0)
2258 STm->sysv = value; 2270 STm->sysv = value;
2259 if ((options & MT_ST_SILI) != 0) 2271 if ((options & MT_ST_SILI) != 0)
@@ -2713,7 +2725,8 @@ static int st_int_ioctl(struct scsi_tape *STp, unsigned int cmd_in, unsigned lon
2713 cmd[0] = WRITE_FILEMARKS; 2725 cmd[0] = WRITE_FILEMARKS;
2714 if (cmd_in == MTWSM) 2726 if (cmd_in == MTWSM)
2715 cmd[1] = 2; 2727 cmd[1] = 2;
2716 if (cmd_in == MTWEOFI) 2728 if (cmd_in == MTWEOFI ||
2729 (cmd_in == MTWEOF && STp->immediate_filemark))
2717 cmd[1] |= 1; 2730 cmd[1] |= 1;
2718 cmd[2] = (arg >> 16); 2731 cmd[2] = (arg >> 16);
2719 cmd[3] = (arg >> 8); 2732 cmd[3] = (arg >> 8);
@@ -4092,6 +4105,7 @@ static int st_probe(struct device *dev)
4092 tpnt->scsi2_logical = ST_SCSI2LOGICAL; 4105 tpnt->scsi2_logical = ST_SCSI2LOGICAL;
4093 tpnt->sili = ST_SILI; 4106 tpnt->sili = ST_SILI;
4094 tpnt->immediate = ST_NOWAIT; 4107 tpnt->immediate = ST_NOWAIT;
4108 tpnt->immediate_filemark = 0;
4095 tpnt->default_drvbuffer = 0xff; /* No forced buffering */ 4109 tpnt->default_drvbuffer = 0xff; /* No forced buffering */
4096 tpnt->partition = 0; 4110 tpnt->partition = 0;
4097 tpnt->new_partition = 0; 4111 tpnt->new_partition = 0;
@@ -4477,6 +4491,7 @@ st_options_show(struct device *dev, struct device_attribute *attr, char *buf)
4477 options |= STp->scsi2_logical ? MT_ST_SCSI2LOGICAL : 0; 4491 options |= STp->scsi2_logical ? MT_ST_SCSI2LOGICAL : 0;
4478 options |= STm->sysv ? MT_ST_SYSV : 0; 4492 options |= STm->sysv ? MT_ST_SYSV : 0;
4479 options |= STp->immediate ? MT_ST_NOWAIT : 0; 4493 options |= STp->immediate ? MT_ST_NOWAIT : 0;
4494 options |= STp->immediate_filemark ? MT_ST_NOWAIT_EOF : 0;
4480 options |= STp->sili ? MT_ST_SILI : 0; 4495 options |= STp->sili ? MT_ST_SILI : 0;
4481 4496
4482 l = snprintf(buf, PAGE_SIZE, "0x%08x\n", options); 4497 l = snprintf(buf, PAGE_SIZE, "0x%08x\n", options);
diff --git a/drivers/scsi/st.h b/drivers/scsi/st.h
index f91a67c6d968..ea35632b986c 100644
--- a/drivers/scsi/st.h
+++ b/drivers/scsi/st.h
@@ -120,6 +120,7 @@ struct scsi_tape {
120 unsigned char c_algo; /* compression algorithm */ 120 unsigned char c_algo; /* compression algorithm */
121 unsigned char pos_unknown; /* after reset position unknown */ 121 unsigned char pos_unknown; /* after reset position unknown */
122 unsigned char sili; /* use SILI when reading in variable b mode */ 122 unsigned char sili; /* use SILI when reading in variable b mode */
123 unsigned char immediate_filemark; /* write filemark immediately */
123 int tape_type; 124 int tape_type;
124 int long_timeout; /* timeout for commands known to take long time */ 125 int long_timeout; /* timeout for commands known to take long time */
125 126
diff --git a/drivers/scsi/ufs/Kconfig b/drivers/scsi/ufs/Kconfig
new file mode 100644
index 000000000000..8f27f9d6f91d
--- /dev/null
+++ b/drivers/scsi/ufs/Kconfig
@@ -0,0 +1,49 @@
1#
2# Kernel configuration file for the UFS Host Controller
3#
4# This code is based on drivers/scsi/ufs/Kconfig
5# Copyright (C) 2011 Samsung Samsung India Software Operations
6#
7# Santosh Yaraganavi <santosh.sy@samsung.com>
8# Vinayak Holikatti <h.vinayak@samsung.com>
9
10# This program is free software; you can redistribute it and/or
11# modify it under the terms of the GNU General Public License
12# as published by the Free Software Foundation; either version 2
13# of the License, or (at your option) any later version.
14
15# This program is distributed in the hope that it will be useful,
16# but WITHOUT ANY WARRANTY; without even the implied warranty of
17# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18# GNU General Public License for more details.
19
20# NO WARRANTY
21# THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
22# CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
23# LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
24# MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
25# solely responsible for determining the appropriateness of using and
26# distributing the Program and assumes all risks associated with its
27# exercise of rights under this Agreement, including but not limited to
28# the risks and costs of program errors, damage to or loss of data,
29# programs or equipment, and unavailability or interruption of operations.
30
31# DISCLAIMER OF LIABILITY
32# NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
33# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
34# DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
35# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
36# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
37# USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
38# HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
39
40# You should have received a copy of the GNU General Public License
41# along with this program; if not, write to the Free Software
42# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
43# USA.
44
45config SCSI_UFSHCD
46 tristate "Universal Flash Storage host controller driver"
47 depends on PCI && SCSI
48 ---help---
49 This is a generic driver which supports PCIe UFS Host controllers.
diff --git a/drivers/scsi/ufs/Makefile b/drivers/scsi/ufs/Makefile
new file mode 100644
index 000000000000..adf7895a6a91
--- /dev/null
+++ b/drivers/scsi/ufs/Makefile
@@ -0,0 +1,2 @@
1# UFSHCD makefile
2obj-$(CONFIG_SCSI_UFSHCD) += ufshcd.o
diff --git a/drivers/scsi/ufs/ufs.h b/drivers/scsi/ufs/ufs.h
new file mode 100644
index 000000000000..b207529f8d54
--- /dev/null
+++ b/drivers/scsi/ufs/ufs.h
@@ -0,0 +1,207 @@
1/*
2 * Universal Flash Storage Host controller driver
3 *
4 * This code is based on drivers/scsi/ufs/ufs.h
5 * Copyright (C) 2011-2012 Samsung India Software Operations
6 *
7 * Santosh Yaraganavi <santosh.sy@samsung.com>
8 * Vinayak Holikatti <h.vinayak@samsung.com>
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version 2
13 * of the License, or (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 *
20 * NO WARRANTY
21 * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
22 * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
23 * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
24 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
25 * solely responsible for determining the appropriateness of using and
26 * distributing the Program and assumes all risks associated with its
27 * exercise of rights under this Agreement, including but not limited to
28 * the risks and costs of program errors, damage to or loss of data,
29 * programs or equipment, and unavailability or interruption of operations.
30
31 * DISCLAIMER OF LIABILITY
32 * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
33 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
34 * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
35 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
36 * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
37 * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
38 * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
39
40 * You should have received a copy of the GNU General Public License
41 * along with this program; if not, write to the Free Software
42 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
43 * USA.
44 */
45
46#ifndef _UFS_H
47#define _UFS_H
48
49#define MAX_CDB_SIZE 16
50
51#define UPIU_HEADER_DWORD(byte3, byte2, byte1, byte0)\
52 ((byte3 << 24) | (byte2 << 16) |\
53 (byte1 << 8) | (byte0))
54
55/*
56 * UFS Protocol Information Unit related definitions
57 */
58
59/* Task management functions */
60enum {
61 UFS_ABORT_TASK = 0x01,
62 UFS_ABORT_TASK_SET = 0x02,
63 UFS_CLEAR_TASK_SET = 0x04,
64 UFS_LOGICAL_RESET = 0x08,
65 UFS_QUERY_TASK = 0x80,
66 UFS_QUERY_TASK_SET = 0x81,
67};
68
69/* UTP UPIU Transaction Codes Initiator to Target */
70enum {
71 UPIU_TRANSACTION_NOP_OUT = 0x00,
72 UPIU_TRANSACTION_COMMAND = 0x01,
73 UPIU_TRANSACTION_DATA_OUT = 0x02,
74 UPIU_TRANSACTION_TASK_REQ = 0x04,
75 UPIU_TRANSACTION_QUERY_REQ = 0x26,
76};
77
78/* UTP UPIU Transaction Codes Target to Initiator */
79enum {
80 UPIU_TRANSACTION_NOP_IN = 0x20,
81 UPIU_TRANSACTION_RESPONSE = 0x21,
82 UPIU_TRANSACTION_DATA_IN = 0x22,
83 UPIU_TRANSACTION_TASK_RSP = 0x24,
84 UPIU_TRANSACTION_READY_XFER = 0x31,
85 UPIU_TRANSACTION_QUERY_RSP = 0x36,
86};
87
88/* UPIU Read/Write flags */
89enum {
90 UPIU_CMD_FLAGS_NONE = 0x00,
91 UPIU_CMD_FLAGS_WRITE = 0x20,
92 UPIU_CMD_FLAGS_READ = 0x40,
93};
94
95/* UPIU Task Attributes */
96enum {
97 UPIU_TASK_ATTR_SIMPLE = 0x00,
98 UPIU_TASK_ATTR_ORDERED = 0x01,
99 UPIU_TASK_ATTR_HEADQ = 0x02,
100 UPIU_TASK_ATTR_ACA = 0x03,
101};
102
103/* UTP QUERY Transaction Specific Fields OpCode */
104enum {
105 UPIU_QUERY_OPCODE_NOP = 0x0,
106 UPIU_QUERY_OPCODE_READ_DESC = 0x1,
107 UPIU_QUERY_OPCODE_WRITE_DESC = 0x2,
108 UPIU_QUERY_OPCODE_READ_ATTR = 0x3,
109 UPIU_QUERY_OPCODE_WRITE_ATTR = 0x4,
110 UPIU_QUERY_OPCODE_READ_FLAG = 0x5,
111 UPIU_QUERY_OPCODE_SET_FLAG = 0x6,
112 UPIU_QUERY_OPCODE_CLEAR_FLAG = 0x7,
113 UPIU_QUERY_OPCODE_TOGGLE_FLAG = 0x8,
114};
115
116/* UTP Transfer Request Command Type (CT) */
117enum {
118 UPIU_COMMAND_SET_TYPE_SCSI = 0x0,
119 UPIU_COMMAND_SET_TYPE_UFS = 0x1,
120 UPIU_COMMAND_SET_TYPE_QUERY = 0x2,
121};
122
123enum {
124 MASK_SCSI_STATUS = 0xFF,
125 MASK_TASK_RESPONSE = 0xFF00,
126 MASK_RSP_UPIU_RESULT = 0xFFFF,
127};
128
129/* Task management service response */
130enum {
131 UPIU_TASK_MANAGEMENT_FUNC_COMPL = 0x00,
132 UPIU_TASK_MANAGEMENT_FUNC_NOT_SUPPORTED = 0x04,
133 UPIU_TASK_MANAGEMENT_FUNC_SUCCEEDED = 0x08,
134 UPIU_TASK_MANAGEMENT_FUNC_FAILED = 0x05,
135 UPIU_INCORRECT_LOGICAL_UNIT_NO = 0x09,
136};
137/**
138 * struct utp_upiu_header - UPIU header structure
139 * @dword_0: UPIU header DW-0
140 * @dword_1: UPIU header DW-1
141 * @dword_2: UPIU header DW-2
142 */
143struct utp_upiu_header {
144 u32 dword_0;
145 u32 dword_1;
146 u32 dword_2;
147};
148
149/**
150 * struct utp_upiu_cmd - Command UPIU structure
151 * @header: UPIU header structure DW-0 to DW-2
152 * @data_transfer_len: Data Transfer Length DW-3
153 * @cdb: Command Descriptor Block CDB DW-4 to DW-7
154 */
155struct utp_upiu_cmd {
156 struct utp_upiu_header header;
157 u32 exp_data_transfer_len;
158 u8 cdb[MAX_CDB_SIZE];
159};
160
161/**
162 * struct utp_upiu_rsp - Response UPIU structure
163 * @header: UPIU header DW-0 to DW-2
164 * @residual_transfer_count: Residual transfer count DW-3
165 * @reserved: Reserved double words DW-4 to DW-7
166 * @sense_data_len: Sense data length DW-8 U16
167 * @sense_data: Sense data field DW-8 to DW-12
168 */
169struct utp_upiu_rsp {
170 struct utp_upiu_header header;
171 u32 residual_transfer_count;
172 u32 reserved[4];
173 u16 sense_data_len;
174 u8 sense_data[18];
175};
176
177/**
178 * struct utp_upiu_task_req - Task request UPIU structure
179 * @header - UPIU header structure DW0 to DW-2
180 * @input_param1: Input parameter 1 DW-3
181 * @input_param2: Input parameter 2 DW-4
182 * @input_param3: Input parameter 3 DW-5
183 * @reserved: Reserved double words DW-6 to DW-7
184 */
185struct utp_upiu_task_req {
186 struct utp_upiu_header header;
187 u32 input_param1;
188 u32 input_param2;
189 u32 input_param3;
190 u32 reserved[2];
191};
192
193/**
194 * struct utp_upiu_task_rsp - Task Management Response UPIU structure
195 * @header: UPIU header structure DW0-DW-2
196 * @output_param1: Ouput parameter 1 DW3
197 * @output_param2: Output parameter 2 DW4
198 * @reserved: Reserved double words DW-5 to DW-7
199 */
200struct utp_upiu_task_rsp {
201 struct utp_upiu_header header;
202 u32 output_param1;
203 u32 output_param2;
204 u32 reserved[3];
205};
206
207#endif /* End of Header */
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
new file mode 100644
index 000000000000..52b96e8bf92e
--- /dev/null
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -0,0 +1,1978 @@
1/*
2 * Universal Flash Storage Host controller driver
3 *
4 * This code is based on drivers/scsi/ufs/ufshcd.c
5 * Copyright (C) 2011-2012 Samsung India Software Operations
6 *
7 * Santosh Yaraganavi <santosh.sy@samsung.com>
8 * Vinayak Holikatti <h.vinayak@samsung.com>
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version 2
13 * of the License, or (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 *
20 * NO WARRANTY
21 * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
22 * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
23 * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
24 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
25 * solely responsible for determining the appropriateness of using and
26 * distributing the Program and assumes all risks associated with its
27 * exercise of rights under this Agreement, including but not limited to
28 * the risks and costs of program errors, damage to or loss of data,
29 * programs or equipment, and unavailability or interruption of operations.
30
31 * DISCLAIMER OF LIABILITY
32 * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
33 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
34 * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
35 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
36 * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
37 * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
38 * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
39
40 * You should have received a copy of the GNU General Public License
41 * along with this program; if not, write to the Free Software
42 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
43 * USA.
44 */
45
46#include <linux/module.h>
47#include <linux/kernel.h>
48#include <linux/init.h>
49#include <linux/pci.h>
50#include <linux/interrupt.h>
51#include <linux/io.h>
52#include <linux/delay.h>
53#include <linux/slab.h>
54#include <linux/spinlock.h>
55#include <linux/workqueue.h>
56#include <linux/errno.h>
57#include <linux/types.h>
58#include <linux/wait.h>
59#include <linux/bitops.h>
60
61#include <asm/irq.h>
62#include <asm/byteorder.h>
63#include <scsi/scsi.h>
64#include <scsi/scsi_cmnd.h>
65#include <scsi/scsi_host.h>
66#include <scsi/scsi_tcq.h>
67#include <scsi/scsi_dbg.h>
68#include <scsi/scsi_eh.h>
69
70#include "ufs.h"
71#include "ufshci.h"
72
73#define UFSHCD "ufshcd"
74#define UFSHCD_DRIVER_VERSION "0.1"
75
76enum {
77 UFSHCD_MAX_CHANNEL = 0,
78 UFSHCD_MAX_ID = 1,
79 UFSHCD_MAX_LUNS = 8,
80 UFSHCD_CMD_PER_LUN = 32,
81 UFSHCD_CAN_QUEUE = 32,
82};
83
84/* UFSHCD states */
85enum {
86 UFSHCD_STATE_OPERATIONAL,
87 UFSHCD_STATE_RESET,
88 UFSHCD_STATE_ERROR,
89};
90
91/* Interrupt configuration options */
92enum {
93 UFSHCD_INT_DISABLE,
94 UFSHCD_INT_ENABLE,
95 UFSHCD_INT_CLEAR,
96};
97
98/* Interrupt aggregation options */
99enum {
100 INT_AGGR_RESET,
101 INT_AGGR_CONFIG,
102};
103
104/**
105 * struct uic_command - UIC command structure
106 * @command: UIC command
107 * @argument1: UIC command argument 1
108 * @argument2: UIC command argument 2
109 * @argument3: UIC command argument 3
110 * @cmd_active: Indicate if UIC command is outstanding
111 * @result: UIC command result
112 */
113struct uic_command {
114 u32 command;
115 u32 argument1;
116 u32 argument2;
117 u32 argument3;
118 int cmd_active;
119 int result;
120};
121
122/**
123 * struct ufs_hba - per adapter private structure
124 * @mmio_base: UFSHCI base register address
125 * @ucdl_base_addr: UFS Command Descriptor base address
126 * @utrdl_base_addr: UTP Transfer Request Descriptor base address
127 * @utmrdl_base_addr: UTP Task Management Descriptor base address
128 * @ucdl_dma_addr: UFS Command Descriptor DMA address
129 * @utrdl_dma_addr: UTRDL DMA address
130 * @utmrdl_dma_addr: UTMRDL DMA address
131 * @host: Scsi_Host instance of the driver
132 * @pdev: PCI device handle
133 * @lrb: local reference block
134 * @outstanding_tasks: Bits representing outstanding task requests
135 * @outstanding_reqs: Bits representing outstanding transfer requests
136 * @capabilities: UFS Controller Capabilities
137 * @nutrs: Transfer Request Queue depth supported by controller
138 * @nutmrs: Task Management Queue depth supported by controller
139 * @active_uic_cmd: handle of active UIC command
140 * @ufshcd_tm_wait_queue: wait queue for task management
141 * @tm_condition: condition variable for task management
142 * @ufshcd_state: UFSHCD states
143 * @int_enable_mask: Interrupt Mask Bits
144 * @uic_workq: Work queue for UIC completion handling
145 * @feh_workq: Work queue for fatal controller error handling
146 * @errors: HBA errors
147 */
148struct ufs_hba {
149 void __iomem *mmio_base;
150
151 /* Virtual memory reference */
152 struct utp_transfer_cmd_desc *ucdl_base_addr;
153 struct utp_transfer_req_desc *utrdl_base_addr;
154 struct utp_task_req_desc *utmrdl_base_addr;
155
156 /* DMA memory reference */
157 dma_addr_t ucdl_dma_addr;
158 dma_addr_t utrdl_dma_addr;
159 dma_addr_t utmrdl_dma_addr;
160
161 struct Scsi_Host *host;
162 struct pci_dev *pdev;
163
164 struct ufshcd_lrb *lrb;
165
166 unsigned long outstanding_tasks;
167 unsigned long outstanding_reqs;
168
169 u32 capabilities;
170 int nutrs;
171 int nutmrs;
172 u32 ufs_version;
173
174 struct uic_command active_uic_cmd;
175 wait_queue_head_t ufshcd_tm_wait_queue;
176 unsigned long tm_condition;
177
178 u32 ufshcd_state;
179 u32 int_enable_mask;
180
181 /* Work Queues */
182 struct work_struct uic_workq;
183 struct work_struct feh_workq;
184
185 /* HBA Errors */
186 u32 errors;
187};
188
189/**
190 * struct ufshcd_lrb - local reference block
191 * @utr_descriptor_ptr: UTRD address of the command
192 * @ucd_cmd_ptr: UCD address of the command
193 * @ucd_rsp_ptr: Response UPIU address for this command
194 * @ucd_prdt_ptr: PRDT address of the command
195 * @cmd: pointer to SCSI command
196 * @sense_buffer: pointer to sense buffer address of the SCSI command
197 * @sense_bufflen: Length of the sense buffer
198 * @scsi_status: SCSI status of the command
199 * @command_type: SCSI, UFS, Query.
200 * @task_tag: Task tag of the command
201 * @lun: LUN of the command
202 */
203struct ufshcd_lrb {
204 struct utp_transfer_req_desc *utr_descriptor_ptr;
205 struct utp_upiu_cmd *ucd_cmd_ptr;
206 struct utp_upiu_rsp *ucd_rsp_ptr;
207 struct ufshcd_sg_entry *ucd_prdt_ptr;
208
209 struct scsi_cmnd *cmd;
210 u8 *sense_buffer;
211 unsigned int sense_bufflen;
212 int scsi_status;
213
214 int command_type;
215 int task_tag;
216 unsigned int lun;
217};
218
219/**
220 * ufshcd_get_ufs_version - Get the UFS version supported by the HBA
221 * @hba - Pointer to adapter instance
222 *
223 * Returns UFSHCI version supported by the controller
224 */
225static inline u32 ufshcd_get_ufs_version(struct ufs_hba *hba)
226{
227 return readl(hba->mmio_base + REG_UFS_VERSION);
228}
229
230/**
231 * ufshcd_is_device_present - Check if any device connected to
232 * the host controller
233 * @reg_hcs - host controller status register value
234 *
235 * Returns 0 if device present, non-zero if no device detected
236 */
237static inline int ufshcd_is_device_present(u32 reg_hcs)
238{
239 return (DEVICE_PRESENT & reg_hcs) ? 0 : -1;
240}
241
242/**
243 * ufshcd_get_tr_ocs - Get the UTRD Overall Command Status
244 * @lrb: pointer to local command reference block
245 *
246 * This function is used to get the OCS field from UTRD
247 * Returns the OCS field in the UTRD
248 */
249static inline int ufshcd_get_tr_ocs(struct ufshcd_lrb *lrbp)
250{
251 return lrbp->utr_descriptor_ptr->header.dword_2 & MASK_OCS;
252}
253
254/**
255 * ufshcd_get_tmr_ocs - Get the UTMRD Overall Command Status
256 * @task_req_descp: pointer to utp_task_req_desc structure
257 *
258 * This function is used to get the OCS field from UTMRD
259 * Returns the OCS field in the UTMRD
260 */
261static inline int
262ufshcd_get_tmr_ocs(struct utp_task_req_desc *task_req_descp)
263{
264 return task_req_descp->header.dword_2 & MASK_OCS;
265}
266
267/**
268 * ufshcd_get_tm_free_slot - get a free slot for task management request
269 * @hba: per adapter instance
270 *
271 * Returns maximum number of task management request slots in case of
272 * task management queue full or returns the free slot number
273 */
274static inline int ufshcd_get_tm_free_slot(struct ufs_hba *hba)
275{
276 return find_first_zero_bit(&hba->outstanding_tasks, hba->nutmrs);
277}
278
279/**
280 * ufshcd_utrl_clear - Clear a bit in UTRLCLR register
281 * @hba: per adapter instance
282 * @pos: position of the bit to be cleared
283 */
284static inline void ufshcd_utrl_clear(struct ufs_hba *hba, u32 pos)
285{
286 writel(~(1 << pos),
287 (hba->mmio_base + REG_UTP_TRANSFER_REQ_LIST_CLEAR));
288}
289
290/**
291 * ufshcd_get_lists_status - Check UCRDY, UTRLRDY and UTMRLRDY
292 * @reg: Register value of host controller status
293 *
294 * Returns integer, 0 on Success and positive value if failed
295 */
296static inline int ufshcd_get_lists_status(u32 reg)
297{
298 /*
299 * The mask 0xFF is for the following HCS register bits
300 * Bit Description
301 * 0 Device Present
302 * 1 UTRLRDY
303 * 2 UTMRLRDY
304 * 3 UCRDY
305 * 4 HEI
306 * 5 DEI
307 * 6-7 reserved
308 */
309 return (((reg) & (0xFF)) >> 1) ^ (0x07);
310}
311
312/**
313 * ufshcd_get_uic_cmd_result - Get the UIC command result
314 * @hba: Pointer to adapter instance
315 *
316 * This function gets the result of UIC command completion
317 * Returns 0 on success, non zero value on error
318 */
319static inline int ufshcd_get_uic_cmd_result(struct ufs_hba *hba)
320{
321 return readl(hba->mmio_base + REG_UIC_COMMAND_ARG_2) &
322 MASK_UIC_COMMAND_RESULT;
323}
324
325/**
326 * ufshcd_free_hba_memory - Free allocated memory for LRB, request
327 * and task lists
328 * @hba: Pointer to adapter instance
329 */
330static inline void ufshcd_free_hba_memory(struct ufs_hba *hba)
331{
332 size_t utmrdl_size, utrdl_size, ucdl_size;
333
334 kfree(hba->lrb);
335
336 if (hba->utmrdl_base_addr) {
337 utmrdl_size = sizeof(struct utp_task_req_desc) * hba->nutmrs;
338 dma_free_coherent(&hba->pdev->dev, utmrdl_size,
339 hba->utmrdl_base_addr, hba->utmrdl_dma_addr);
340 }
341
342 if (hba->utrdl_base_addr) {
343 utrdl_size =
344 (sizeof(struct utp_transfer_req_desc) * hba->nutrs);
345 dma_free_coherent(&hba->pdev->dev, utrdl_size,
346 hba->utrdl_base_addr, hba->utrdl_dma_addr);
347 }
348
349 if (hba->ucdl_base_addr) {
350 ucdl_size =
351 (sizeof(struct utp_transfer_cmd_desc) * hba->nutrs);
352 dma_free_coherent(&hba->pdev->dev, ucdl_size,
353 hba->ucdl_base_addr, hba->ucdl_dma_addr);
354 }
355}
356
357/**
358 * ufshcd_is_valid_req_rsp - checks if controller TR response is valid
359 * @ucd_rsp_ptr: pointer to response UPIU
360 *
361 * This function checks the response UPIU for valid transaction type in
362 * response field
363 * Returns 0 on success, non-zero on failure
364 */
365static inline int
366ufshcd_is_valid_req_rsp(struct utp_upiu_rsp *ucd_rsp_ptr)
367{
368 return ((be32_to_cpu(ucd_rsp_ptr->header.dword_0) >> 24) ==
369 UPIU_TRANSACTION_RESPONSE) ? 0 : DID_ERROR << 16;
370}
371
372/**
373 * ufshcd_get_rsp_upiu_result - Get the result from response UPIU
374 * @ucd_rsp_ptr: pointer to response UPIU
375 *
376 * This function gets the response status and scsi_status from response UPIU
377 * Returns the response result code.
378 */
379static inline int
380ufshcd_get_rsp_upiu_result(struct utp_upiu_rsp *ucd_rsp_ptr)
381{
382 return be32_to_cpu(ucd_rsp_ptr->header.dword_1) & MASK_RSP_UPIU_RESULT;
383}
384
385/**
386 * ufshcd_config_int_aggr - Configure interrupt aggregation values.
387 * Currently there is no use case where we want to configure
388 * interrupt aggregation dynamically. So to configure interrupt
389 * aggregation, #define INT_AGGR_COUNTER_THRESHOLD_VALUE and
390 * INT_AGGR_TIMEOUT_VALUE are used.
391 * @hba: per adapter instance
392 * @option: Interrupt aggregation option
393 */
394static inline void
395ufshcd_config_int_aggr(struct ufs_hba *hba, int option)
396{
397 switch (option) {
398 case INT_AGGR_RESET:
399 writel((INT_AGGR_ENABLE |
400 INT_AGGR_COUNTER_AND_TIMER_RESET),
401 (hba->mmio_base +
402 REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL));
403 break;
404 case INT_AGGR_CONFIG:
405 writel((INT_AGGR_ENABLE |
406 INT_AGGR_PARAM_WRITE |
407 INT_AGGR_COUNTER_THRESHOLD_VALUE |
408 INT_AGGR_TIMEOUT_VALUE),
409 (hba->mmio_base +
410 REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL));
411 break;
412 }
413}
414
415/**
416 * ufshcd_enable_run_stop_reg - Enable run-stop registers,
417 * When run-stop registers are set to 1, it indicates the
418 * host controller that it can process the requests
419 * @hba: per adapter instance
420 */
421static void ufshcd_enable_run_stop_reg(struct ufs_hba *hba)
422{
423 writel(UTP_TASK_REQ_LIST_RUN_STOP_BIT,
424 (hba->mmio_base +
425 REG_UTP_TASK_REQ_LIST_RUN_STOP));
426 writel(UTP_TRANSFER_REQ_LIST_RUN_STOP_BIT,
427 (hba->mmio_base +
428 REG_UTP_TRANSFER_REQ_LIST_RUN_STOP));
429}
430
431/**
432 * ufshcd_hba_stop - Send controller to reset state
433 * @hba: per adapter instance
434 */
435static inline void ufshcd_hba_stop(struct ufs_hba *hba)
436{
437 writel(CONTROLLER_DISABLE, (hba->mmio_base + REG_CONTROLLER_ENABLE));
438}
439
440/**
441 * ufshcd_hba_start - Start controller initialization sequence
442 * @hba: per adapter instance
443 */
444static inline void ufshcd_hba_start(struct ufs_hba *hba)
445{
446 writel(CONTROLLER_ENABLE , (hba->mmio_base + REG_CONTROLLER_ENABLE));
447}
448
449/**
450 * ufshcd_is_hba_active - Get controller state
451 * @hba: per adapter instance
452 *
453 * Returns zero if controller is active, 1 otherwise
454 */
455static inline int ufshcd_is_hba_active(struct ufs_hba *hba)
456{
457 return (readl(hba->mmio_base + REG_CONTROLLER_ENABLE) & 0x1) ? 0 : 1;
458}
459
460/**
461 * ufshcd_send_command - Send SCSI or device management commands
462 * @hba: per adapter instance
463 * @task_tag: Task tag of the command
464 */
465static inline
466void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag)
467{
468 __set_bit(task_tag, &hba->outstanding_reqs);
469 writel((1 << task_tag),
470 (hba->mmio_base + REG_UTP_TRANSFER_REQ_DOOR_BELL));
471}
472
473/**
474 * ufshcd_copy_sense_data - Copy sense data in case of check condition
475 * @lrb - pointer to local reference block
476 */
477static inline void ufshcd_copy_sense_data(struct ufshcd_lrb *lrbp)
478{
479 int len;
480 if (lrbp->sense_buffer) {
481 len = be16_to_cpu(lrbp->ucd_rsp_ptr->sense_data_len);
482 memcpy(lrbp->sense_buffer,
483 lrbp->ucd_rsp_ptr->sense_data,
484 min_t(int, len, SCSI_SENSE_BUFFERSIZE));
485 }
486}
487
488/**
489 * ufshcd_hba_capabilities - Read controller capabilities
490 * @hba: per adapter instance
491 */
492static inline void ufshcd_hba_capabilities(struct ufs_hba *hba)
493{
494 hba->capabilities =
495 readl(hba->mmio_base + REG_CONTROLLER_CAPABILITIES);
496
497 /* nutrs and nutmrs are 0 based values */
498 hba->nutrs = (hba->capabilities & MASK_TRANSFER_REQUESTS_SLOTS) + 1;
499 hba->nutmrs =
500 ((hba->capabilities & MASK_TASK_MANAGEMENT_REQUEST_SLOTS) >> 16) + 1;
501}
502
503/**
504 * ufshcd_send_uic_command - Send UIC commands to unipro layers
505 * @hba: per adapter instance
506 * @uic_command: UIC command
507 */
508static inline void
509ufshcd_send_uic_command(struct ufs_hba *hba, struct uic_command *uic_cmnd)
510{
511 /* Write Args */
512 writel(uic_cmnd->argument1,
513 (hba->mmio_base + REG_UIC_COMMAND_ARG_1));
514 writel(uic_cmnd->argument2,
515 (hba->mmio_base + REG_UIC_COMMAND_ARG_2));
516 writel(uic_cmnd->argument3,
517 (hba->mmio_base + REG_UIC_COMMAND_ARG_3));
518
519 /* Write UIC Cmd */
520 writel((uic_cmnd->command & COMMAND_OPCODE_MASK),
521 (hba->mmio_base + REG_UIC_COMMAND));
522}
523
524/**
525 * ufshcd_map_sg - Map scatter-gather list to prdt
526 * @lrbp - pointer to local reference block
527 *
528 * Returns 0 in case of success, non-zero value in case of failure
529 */
530static int ufshcd_map_sg(struct ufshcd_lrb *lrbp)
531{
532 struct ufshcd_sg_entry *prd_table;
533 struct scatterlist *sg;
534 struct scsi_cmnd *cmd;
535 int sg_segments;
536 int i;
537
538 cmd = lrbp->cmd;
539 sg_segments = scsi_dma_map(cmd);
540 if (sg_segments < 0)
541 return sg_segments;
542
543 if (sg_segments) {
544 lrbp->utr_descriptor_ptr->prd_table_length =
545 cpu_to_le16((u16) (sg_segments));
546
547 prd_table = (struct ufshcd_sg_entry *)lrbp->ucd_prdt_ptr;
548
549 scsi_for_each_sg(cmd, sg, sg_segments, i) {
550 prd_table[i].size =
551 cpu_to_le32(((u32) sg_dma_len(sg))-1);
552 prd_table[i].base_addr =
553 cpu_to_le32(lower_32_bits(sg->dma_address));
554 prd_table[i].upper_addr =
555 cpu_to_le32(upper_32_bits(sg->dma_address));
556 }
557 } else {
558 lrbp->utr_descriptor_ptr->prd_table_length = 0;
559 }
560
561 return 0;
562}
563
564/**
565 * ufshcd_int_config - enable/disable interrupts
566 * @hba: per adapter instance
567 * @option: interrupt option
568 */
569static void ufshcd_int_config(struct ufs_hba *hba, u32 option)
570{
571 switch (option) {
572 case UFSHCD_INT_ENABLE:
573 writel(hba->int_enable_mask,
574 (hba->mmio_base + REG_INTERRUPT_ENABLE));
575 break;
576 case UFSHCD_INT_DISABLE:
577 if (hba->ufs_version == UFSHCI_VERSION_10)
578 writel(INTERRUPT_DISABLE_MASK_10,
579 (hba->mmio_base + REG_INTERRUPT_ENABLE));
580 else
581 writel(INTERRUPT_DISABLE_MASK_11,
582 (hba->mmio_base + REG_INTERRUPT_ENABLE));
583 break;
584 }
585}
586
587/**
588 * ufshcd_compose_upiu - form UFS Protocol Information Unit(UPIU)
589 * @lrb - pointer to local reference block
590 */
591static void ufshcd_compose_upiu(struct ufshcd_lrb *lrbp)
592{
593 struct utp_transfer_req_desc *req_desc;
594 struct utp_upiu_cmd *ucd_cmd_ptr;
595 u32 data_direction;
596 u32 upiu_flags;
597
598 ucd_cmd_ptr = lrbp->ucd_cmd_ptr;
599 req_desc = lrbp->utr_descriptor_ptr;
600
601 switch (lrbp->command_type) {
602 case UTP_CMD_TYPE_SCSI:
603 if (lrbp->cmd->sc_data_direction == DMA_FROM_DEVICE) {
604 data_direction = UTP_DEVICE_TO_HOST;
605 upiu_flags = UPIU_CMD_FLAGS_READ;
606 } else if (lrbp->cmd->sc_data_direction == DMA_TO_DEVICE) {
607 data_direction = UTP_HOST_TO_DEVICE;
608 upiu_flags = UPIU_CMD_FLAGS_WRITE;
609 } else {
610 data_direction = UTP_NO_DATA_TRANSFER;
611 upiu_flags = UPIU_CMD_FLAGS_NONE;
612 }
613
614 /* Transfer request descriptor header fields */
615 req_desc->header.dword_0 =
616 cpu_to_le32(data_direction | UTP_SCSI_COMMAND);
617
618 /*
619 * assigning invalid value for command status. Controller
620 * updates OCS on command completion, with the command
621 * status
622 */
623 req_desc->header.dword_2 =
624 cpu_to_le32(OCS_INVALID_COMMAND_STATUS);
625
626 /* command descriptor fields */
627 ucd_cmd_ptr->header.dword_0 =
628 cpu_to_be32(UPIU_HEADER_DWORD(UPIU_TRANSACTION_COMMAND,
629 upiu_flags,
630 lrbp->lun,
631 lrbp->task_tag));
632 ucd_cmd_ptr->header.dword_1 =
633 cpu_to_be32(
634 UPIU_HEADER_DWORD(UPIU_COMMAND_SET_TYPE_SCSI,
635 0,
636 0,
637 0));
638
639 /* Total EHS length and Data segment length will be zero */
640 ucd_cmd_ptr->header.dword_2 = 0;
641
642 ucd_cmd_ptr->exp_data_transfer_len =
643 cpu_to_be32(lrbp->cmd->transfersize);
644
645 memcpy(ucd_cmd_ptr->cdb,
646 lrbp->cmd->cmnd,
647 (min_t(unsigned short,
648 lrbp->cmd->cmd_len,
649 MAX_CDB_SIZE)));
650 break;
651 case UTP_CMD_TYPE_DEV_MANAGE:
652 /* For query function implementation */
653 break;
654 case UTP_CMD_TYPE_UFS:
655 /* For UFS native command implementation */
656 break;
657 } /* end of switch */
658}
659
660/**
661 * ufshcd_queuecommand - main entry point for SCSI requests
662 * @cmd: command from SCSI Midlayer
663 * @done: call back function
664 *
665 * Returns 0 for success, non-zero in case of failure
666 */
667static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
668{
669 struct ufshcd_lrb *lrbp;
670 struct ufs_hba *hba;
671 unsigned long flags;
672 int tag;
673 int err = 0;
674
675 hba = shost_priv(host);
676
677 tag = cmd->request->tag;
678
679 if (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL) {
680 err = SCSI_MLQUEUE_HOST_BUSY;
681 goto out;
682 }
683
684 lrbp = &hba->lrb[tag];
685
686 lrbp->cmd = cmd;
687 lrbp->sense_bufflen = SCSI_SENSE_BUFFERSIZE;
688 lrbp->sense_buffer = cmd->sense_buffer;
689 lrbp->task_tag = tag;
690 lrbp->lun = cmd->device->lun;
691
692 lrbp->command_type = UTP_CMD_TYPE_SCSI;
693
694 /* form UPIU before issuing the command */
695 ufshcd_compose_upiu(lrbp);
696 err = ufshcd_map_sg(lrbp);
697 if (err)
698 goto out;
699
700 /* issue command to the controller */
701 spin_lock_irqsave(hba->host->host_lock, flags);
702 ufshcd_send_command(hba, tag);
703 spin_unlock_irqrestore(hba->host->host_lock, flags);
704out:
705 return err;
706}
707
708/**
709 * ufshcd_memory_alloc - allocate memory for host memory space data structures
710 * @hba: per adapter instance
711 *
712 * 1. Allocate DMA memory for Command Descriptor array
713 * Each command descriptor consist of Command UPIU, Response UPIU and PRDT
714 * 2. Allocate DMA memory for UTP Transfer Request Descriptor List (UTRDL).
715 * 3. Allocate DMA memory for UTP Task Management Request Descriptor List
716 * (UTMRDL)
717 * 4. Allocate memory for local reference block(lrb).
718 *
719 * Returns 0 for success, non-zero in case of failure
720 */
721static int ufshcd_memory_alloc(struct ufs_hba *hba)
722{
723 size_t utmrdl_size, utrdl_size, ucdl_size;
724
725 /* Allocate memory for UTP command descriptors */
726 ucdl_size = (sizeof(struct utp_transfer_cmd_desc) * hba->nutrs);
727 hba->ucdl_base_addr = dma_alloc_coherent(&hba->pdev->dev,
728 ucdl_size,
729 &hba->ucdl_dma_addr,
730 GFP_KERNEL);
731
732 /*
733 * UFSHCI requires UTP command descriptor to be 128 byte aligned.
734 * make sure hba->ucdl_dma_addr is aligned to PAGE_SIZE
735 * if hba->ucdl_dma_addr is aligned to PAGE_SIZE, then it will
736 * be aligned to 128 bytes as well
737 */
738 if (!hba->ucdl_base_addr ||
739 WARN_ON(hba->ucdl_dma_addr & (PAGE_SIZE - 1))) {
740 dev_err(&hba->pdev->dev,
741 "Command Descriptor Memory allocation failed\n");
742 goto out;
743 }
744
745 /*
746 * Allocate memory for UTP Transfer descriptors
747 * UFSHCI requires 1024 byte alignment of UTRD
748 */
749 utrdl_size = (sizeof(struct utp_transfer_req_desc) * hba->nutrs);
750 hba->utrdl_base_addr = dma_alloc_coherent(&hba->pdev->dev,
751 utrdl_size,
752 &hba->utrdl_dma_addr,
753 GFP_KERNEL);
754 if (!hba->utrdl_base_addr ||
755 WARN_ON(hba->utrdl_dma_addr & (PAGE_SIZE - 1))) {
756 dev_err(&hba->pdev->dev,
757 "Transfer Descriptor Memory allocation failed\n");
758 goto out;
759 }
760
761 /*
762 * Allocate memory for UTP Task Management descriptors
763 * UFSHCI requires 1024 byte alignment of UTMRD
764 */
765 utmrdl_size = sizeof(struct utp_task_req_desc) * hba->nutmrs;
766 hba->utmrdl_base_addr = dma_alloc_coherent(&hba->pdev->dev,
767 utmrdl_size,
768 &hba->utmrdl_dma_addr,
769 GFP_KERNEL);
770 if (!hba->utmrdl_base_addr ||
771 WARN_ON(hba->utmrdl_dma_addr & (PAGE_SIZE - 1))) {
772 dev_err(&hba->pdev->dev,
773 "Task Management Descriptor Memory allocation failed\n");
774 goto out;
775 }
776
777 /* Allocate memory for local reference block */
778 hba->lrb = kcalloc(hba->nutrs, sizeof(struct ufshcd_lrb), GFP_KERNEL);
779 if (!hba->lrb) {
780 dev_err(&hba->pdev->dev, "LRB Memory allocation failed\n");
781 goto out;
782 }
783 return 0;
784out:
785 ufshcd_free_hba_memory(hba);
786 return -ENOMEM;
787}
788
789/**
790 * ufshcd_host_memory_configure - configure local reference block with
791 * memory offsets
792 * @hba: per adapter instance
793 *
794 * Configure Host memory space
795 * 1. Update Corresponding UTRD.UCDBA and UTRD.UCDBAU with UCD DMA
796 * address.
797 * 2. Update each UTRD with Response UPIU offset, Response UPIU length
798 * and PRDT offset.
799 * 3. Save the corresponding addresses of UTRD, UCD.CMD, UCD.RSP and UCD.PRDT
800 * into local reference block.
801 */
802static void ufshcd_host_memory_configure(struct ufs_hba *hba)
803{
804 struct utp_transfer_cmd_desc *cmd_descp;
805 struct utp_transfer_req_desc *utrdlp;
806 dma_addr_t cmd_desc_dma_addr;
807 dma_addr_t cmd_desc_element_addr;
808 u16 response_offset;
809 u16 prdt_offset;
810 int cmd_desc_size;
811 int i;
812
813 utrdlp = hba->utrdl_base_addr;
814 cmd_descp = hba->ucdl_base_addr;
815
816 response_offset =
817 offsetof(struct utp_transfer_cmd_desc, response_upiu);
818 prdt_offset =
819 offsetof(struct utp_transfer_cmd_desc, prd_table);
820
821 cmd_desc_size = sizeof(struct utp_transfer_cmd_desc);
822 cmd_desc_dma_addr = hba->ucdl_dma_addr;
823
824 for (i = 0; i < hba->nutrs; i++) {
825 /* Configure UTRD with command descriptor base address */
826 cmd_desc_element_addr =
827 (cmd_desc_dma_addr + (cmd_desc_size * i));
828 utrdlp[i].command_desc_base_addr_lo =
829 cpu_to_le32(lower_32_bits(cmd_desc_element_addr));
830 utrdlp[i].command_desc_base_addr_hi =
831 cpu_to_le32(upper_32_bits(cmd_desc_element_addr));
832
833 /* Response upiu and prdt offset should be in double words */
834 utrdlp[i].response_upiu_offset =
835 cpu_to_le16((response_offset >> 2));
836 utrdlp[i].prd_table_offset =
837 cpu_to_le16((prdt_offset >> 2));
838 utrdlp[i].response_upiu_length =
839 cpu_to_le16(ALIGNED_UPIU_SIZE);
840
841 hba->lrb[i].utr_descriptor_ptr = (utrdlp + i);
842 hba->lrb[i].ucd_cmd_ptr =
843 (struct utp_upiu_cmd *)(cmd_descp + i);
844 hba->lrb[i].ucd_rsp_ptr =
845 (struct utp_upiu_rsp *)cmd_descp[i].response_upiu;
846 hba->lrb[i].ucd_prdt_ptr =
847 (struct ufshcd_sg_entry *)cmd_descp[i].prd_table;
848 }
849}
850
851/**
852 * ufshcd_dme_link_startup - Notify Unipro to perform link startup
853 * @hba: per adapter instance
854 *
855 * UIC_CMD_DME_LINK_STARTUP command must be issued to Unipro layer,
856 * in order to initialize the Unipro link startup procedure.
857 * Once the Unipro links are up, the device connected to the controller
858 * is detected.
859 *
860 * Returns 0 on success, non-zero value on failure
861 */
862static int ufshcd_dme_link_startup(struct ufs_hba *hba)
863{
864 struct uic_command *uic_cmd;
865 unsigned long flags;
866
867 /* check if controller is ready to accept UIC commands */
868 if (((readl(hba->mmio_base + REG_CONTROLLER_STATUS)) &
869 UIC_COMMAND_READY) == 0x0) {
870 dev_err(&hba->pdev->dev,
871 "Controller not ready"
872 " to accept UIC commands\n");
873 return -EIO;
874 }
875
876 spin_lock_irqsave(hba->host->host_lock, flags);
877
878 /* form UIC command */
879 uic_cmd = &hba->active_uic_cmd;
880 uic_cmd->command = UIC_CMD_DME_LINK_STARTUP;
881 uic_cmd->argument1 = 0;
882 uic_cmd->argument2 = 0;
883 uic_cmd->argument3 = 0;
884
885 /* enable UIC related interrupts */
886 hba->int_enable_mask |= UIC_COMMAND_COMPL;
887 ufshcd_int_config(hba, UFSHCD_INT_ENABLE);
888
889 /* sending UIC commands to controller */
890 ufshcd_send_uic_command(hba, uic_cmd);
891 spin_unlock_irqrestore(hba->host->host_lock, flags);
892 return 0;
893}
894
895/**
896 * ufshcd_make_hba_operational - Make UFS controller operational
897 * @hba: per adapter instance
898 *
899 * To bring UFS host controller to operational state,
900 * 1. Check if device is present
901 * 2. Configure run-stop-registers
902 * 3. Enable required interrupts
903 * 4. Configure interrupt aggregation
904 *
905 * Returns 0 on success, non-zero value on failure
906 */
907static int ufshcd_make_hba_operational(struct ufs_hba *hba)
908{
909 int err = 0;
910 u32 reg;
911
912 /* check if device present */
913 reg = readl((hba->mmio_base + REG_CONTROLLER_STATUS));
914 if (ufshcd_is_device_present(reg)) {
915 dev_err(&hba->pdev->dev, "cc: Device not present\n");
916 err = -ENXIO;
917 goto out;
918 }
919
920 /*
921 * UCRDY, UTMRLDY and UTRLRDY bits must be 1
922 * DEI, HEI bits must be 0
923 */
924 if (!(ufshcd_get_lists_status(reg))) {
925 ufshcd_enable_run_stop_reg(hba);
926 } else {
927 dev_err(&hba->pdev->dev,
928 "Host controller not ready to process requests");
929 err = -EIO;
930 goto out;
931 }
932
933 /* Enable required interrupts */
934 hba->int_enable_mask |= (UTP_TRANSFER_REQ_COMPL |
935 UIC_ERROR |
936 UTP_TASK_REQ_COMPL |
937 DEVICE_FATAL_ERROR |
938 CONTROLLER_FATAL_ERROR |
939 SYSTEM_BUS_FATAL_ERROR);
940 ufshcd_int_config(hba, UFSHCD_INT_ENABLE);
941
942 /* Configure interrupt aggregation */
943 ufshcd_config_int_aggr(hba, INT_AGGR_CONFIG);
944
945 if (hba->ufshcd_state == UFSHCD_STATE_RESET)
946 scsi_unblock_requests(hba->host);
947
948 hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
949 scsi_scan_host(hba->host);
950out:
951 return err;
952}
953
954/**
955 * ufshcd_hba_enable - initialize the controller
956 * @hba: per adapter instance
957 *
958 * The controller resets itself and controller firmware initialization
959 * sequence kicks off. When controller is ready it will set
960 * the Host Controller Enable bit to 1.
961 *
962 * Returns 0 on success, non-zero value on failure
963 */
964static int ufshcd_hba_enable(struct ufs_hba *hba)
965{
966 int retry;
967
968 /*
969 * msleep of 1 and 5 used in this function might result in msleep(20),
970 * but it was necessary to send the UFS FPGA to reset mode during
971 * development and testing of this driver. msleep can be changed to
972 * mdelay and retry count can be reduced based on the controller.
973 */
974 if (!ufshcd_is_hba_active(hba)) {
975
976 /* change controller state to "reset state" */
977 ufshcd_hba_stop(hba);
978
979 /*
980 * This delay is based on the testing done with UFS host
981 * controller FPGA. The delay can be changed based on the
982 * host controller used.
983 */
984 msleep(5);
985 }
986
987 /* start controller initialization sequence */
988 ufshcd_hba_start(hba);
989
990 /*
991 * To initialize a UFS host controller HCE bit must be set to 1.
992 * During initialization the HCE bit value changes from 1->0->1.
993 * When the host controller completes initialization sequence
994 * it sets the value of HCE bit to 1. The same HCE bit is read back
995 * to check if the controller has completed initialization sequence.
996 * So without this delay the value HCE = 1, set in the previous
997 * instruction might be read back.
998 * This delay can be changed based on the controller.
999 */
1000 msleep(1);
1001
1002 /* wait for the host controller to complete initialization */
1003 retry = 10;
1004 while (ufshcd_is_hba_active(hba)) {
1005 if (retry) {
1006 retry--;
1007 } else {
1008 dev_err(&hba->pdev->dev,
1009 "Controller enable failed\n");
1010 return -EIO;
1011 }
1012 msleep(5);
1013 }
1014 return 0;
1015}
1016
1017/**
1018 * ufshcd_initialize_hba - start the initialization process
1019 * @hba: per adapter instance
1020 *
1021 * 1. Enable the controller via ufshcd_hba_enable.
1022 * 2. Program the Transfer Request List Address with the starting address of
1023 * UTRDL.
1024 * 3. Program the Task Management Request List Address with starting address
1025 * of UTMRDL.
1026 *
1027 * Returns 0 on success, non-zero value on failure.
1028 */
1029static int ufshcd_initialize_hba(struct ufs_hba *hba)
1030{
1031 if (ufshcd_hba_enable(hba))
1032 return -EIO;
1033
1034 /* Configure UTRL and UTMRL base address registers */
1035 writel(hba->utrdl_dma_addr,
1036 (hba->mmio_base + REG_UTP_TRANSFER_REQ_LIST_BASE_L));
1037 writel(lower_32_bits(hba->utrdl_dma_addr),
1038 (hba->mmio_base + REG_UTP_TRANSFER_REQ_LIST_BASE_H));
1039 writel(hba->utmrdl_dma_addr,
1040 (hba->mmio_base + REG_UTP_TASK_REQ_LIST_BASE_L));
1041 writel(upper_32_bits(hba->utmrdl_dma_addr),
1042 (hba->mmio_base + REG_UTP_TASK_REQ_LIST_BASE_H));
1043
1044 /* Initialize unipro link startup procedure */
1045 return ufshcd_dme_link_startup(hba);
1046}
1047
1048/**
1049 * ufshcd_do_reset - reset the host controller
1050 * @hba: per adapter instance
1051 *
1052 * Returns SUCCESS/FAILED
1053 */
1054static int ufshcd_do_reset(struct ufs_hba *hba)
1055{
1056 struct ufshcd_lrb *lrbp;
1057 unsigned long flags;
1058 int tag;
1059
1060 /* block commands from midlayer */
1061 scsi_block_requests(hba->host);
1062
1063 spin_lock_irqsave(hba->host->host_lock, flags);
1064 hba->ufshcd_state = UFSHCD_STATE_RESET;
1065
1066 /* send controller to reset state */
1067 ufshcd_hba_stop(hba);
1068 spin_unlock_irqrestore(hba->host->host_lock, flags);
1069
1070 /* abort outstanding commands */
1071 for (tag = 0; tag < hba->nutrs; tag++) {
1072 if (test_bit(tag, &hba->outstanding_reqs)) {
1073 lrbp = &hba->lrb[tag];
1074 scsi_dma_unmap(lrbp->cmd);
1075 lrbp->cmd->result = DID_RESET << 16;
1076 lrbp->cmd->scsi_done(lrbp->cmd);
1077 lrbp->cmd = NULL;
1078 }
1079 }
1080
1081 /* clear outstanding request/task bit maps */
1082 hba->outstanding_reqs = 0;
1083 hba->outstanding_tasks = 0;
1084
1085 /* start the initialization process */
1086 if (ufshcd_initialize_hba(hba)) {
1087 dev_err(&hba->pdev->dev,
1088 "Reset: Controller initialization failed\n");
1089 return FAILED;
1090 }
1091 return SUCCESS;
1092}
1093
1094/**
1095 * ufshcd_slave_alloc - handle initial SCSI device configurations
1096 * @sdev: pointer to SCSI device
1097 *
1098 * Returns success
1099 */
1100static int ufshcd_slave_alloc(struct scsi_device *sdev)
1101{
1102 struct ufs_hba *hba;
1103
1104 hba = shost_priv(sdev->host);
1105 sdev->tagged_supported = 1;
1106
1107 /* Mode sense(6) is not supported by UFS, so use Mode sense(10) */
1108 sdev->use_10_for_ms = 1;
1109 scsi_set_tag_type(sdev, MSG_SIMPLE_TAG);
1110
1111 /*
1112 * Inform SCSI Midlayer that the LUN queue depth is same as the
1113 * controller queue depth. If a LUN queue depth is less than the
1114 * controller queue depth and if the LUN reports
1115 * SAM_STAT_TASK_SET_FULL, the LUN queue depth will be adjusted
1116 * with scsi_adjust_queue_depth.
1117 */
1118 scsi_activate_tcq(sdev, hba->nutrs);
1119 return 0;
1120}
1121
1122/**
1123 * ufshcd_slave_destroy - remove SCSI device configurations
1124 * @sdev: pointer to SCSI device
1125 */
1126static void ufshcd_slave_destroy(struct scsi_device *sdev)
1127{
1128 struct ufs_hba *hba;
1129
1130 hba = shost_priv(sdev->host);
1131 scsi_deactivate_tcq(sdev, hba->nutrs);
1132}
1133
1134/**
1135 * ufshcd_task_req_compl - handle task management request completion
1136 * @hba: per adapter instance
1137 * @index: index of the completed request
1138 *
1139 * Returns SUCCESS/FAILED
1140 */
1141static int ufshcd_task_req_compl(struct ufs_hba *hba, u32 index)
1142{
1143 struct utp_task_req_desc *task_req_descp;
1144 struct utp_upiu_task_rsp *task_rsp_upiup;
1145 unsigned long flags;
1146 int ocs_value;
1147 int task_result;
1148
1149 spin_lock_irqsave(hba->host->host_lock, flags);
1150
1151 /* Clear completed tasks from outstanding_tasks */
1152 __clear_bit(index, &hba->outstanding_tasks);
1153
1154 task_req_descp = hba->utmrdl_base_addr;
1155 ocs_value = ufshcd_get_tmr_ocs(&task_req_descp[index]);
1156
1157 if (ocs_value == OCS_SUCCESS) {
1158 task_rsp_upiup = (struct utp_upiu_task_rsp *)
1159 task_req_descp[index].task_rsp_upiu;
1160 task_result = be32_to_cpu(task_rsp_upiup->header.dword_1);
1161 task_result = ((task_result & MASK_TASK_RESPONSE) >> 8);
1162
1163 if (task_result != UPIU_TASK_MANAGEMENT_FUNC_COMPL ||
1164 task_result != UPIU_TASK_MANAGEMENT_FUNC_SUCCEEDED)
1165 task_result = FAILED;
1166 } else {
1167 task_result = FAILED;
1168 dev_err(&hba->pdev->dev,
1169 "trc: Invalid ocs = %x\n", ocs_value);
1170 }
1171 spin_unlock_irqrestore(hba->host->host_lock, flags);
1172 return task_result;
1173}
1174
1175/**
1176 * ufshcd_adjust_lun_qdepth - Update LUN queue depth if device responds with
1177 * SAM_STAT_TASK_SET_FULL SCSI command status.
1178 * @cmd: pointer to SCSI command
1179 */
1180static void ufshcd_adjust_lun_qdepth(struct scsi_cmnd *cmd)
1181{
1182 struct ufs_hba *hba;
1183 int i;
1184 int lun_qdepth = 0;
1185
1186 hba = shost_priv(cmd->device->host);
1187
1188 /*
1189 * LUN queue depth can be obtained by counting outstanding commands
1190 * on the LUN.
1191 */
1192 for (i = 0; i < hba->nutrs; i++) {
1193 if (test_bit(i, &hba->outstanding_reqs)) {
1194
1195 /*
1196 * Check if the outstanding command belongs
1197 * to the LUN which reported SAM_STAT_TASK_SET_FULL.
1198 */
1199 if (cmd->device->lun == hba->lrb[i].lun)
1200 lun_qdepth++;
1201 }
1202 }
1203
1204 /*
1205 * LUN queue depth will be total outstanding commands, except the
1206 * command for which the LUN reported SAM_STAT_TASK_SET_FULL.
1207 */
1208 scsi_adjust_queue_depth(cmd->device, MSG_SIMPLE_TAG, lun_qdepth - 1);
1209}
1210
1211/**
1212 * ufshcd_scsi_cmd_status - Update SCSI command result based on SCSI status
1213 * @lrb: pointer to local reference block of completed command
1214 * @scsi_status: SCSI command status
1215 *
1216 * Returns value base on SCSI command status
1217 */
1218static inline int
1219ufshcd_scsi_cmd_status(struct ufshcd_lrb *lrbp, int scsi_status)
1220{
1221 int result = 0;
1222
1223 switch (scsi_status) {
1224 case SAM_STAT_GOOD:
1225 result |= DID_OK << 16 |
1226 COMMAND_COMPLETE << 8 |
1227 SAM_STAT_GOOD;
1228 break;
1229 case SAM_STAT_CHECK_CONDITION:
1230 result |= DID_OK << 16 |
1231 COMMAND_COMPLETE << 8 |
1232 SAM_STAT_CHECK_CONDITION;
1233 ufshcd_copy_sense_data(lrbp);
1234 break;
1235 case SAM_STAT_BUSY:
1236 result |= SAM_STAT_BUSY;
1237 break;
1238 case SAM_STAT_TASK_SET_FULL:
1239
1240 /*
1241 * If a LUN reports SAM_STAT_TASK_SET_FULL, then the LUN queue
1242 * depth needs to be adjusted to the exact number of
1243 * outstanding commands the LUN can handle at any given time.
1244 */
1245 ufshcd_adjust_lun_qdepth(lrbp->cmd);
1246 result |= SAM_STAT_TASK_SET_FULL;
1247 break;
1248 case SAM_STAT_TASK_ABORTED:
1249 result |= SAM_STAT_TASK_ABORTED;
1250 break;
1251 default:
1252 result |= DID_ERROR << 16;
1253 break;
1254 } /* end of switch */
1255
1256 return result;
1257}
1258
1259/**
1260 * ufshcd_transfer_rsp_status - Get overall status of the response
1261 * @hba: per adapter instance
1262 * @lrb: pointer to local reference block of completed command
1263 *
1264 * Returns result of the command to notify SCSI midlayer
1265 */
1266static inline int
1267ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
1268{
1269 int result = 0;
1270 int scsi_status;
1271 int ocs;
1272
1273 /* overall command status of utrd */
1274 ocs = ufshcd_get_tr_ocs(lrbp);
1275
1276 switch (ocs) {
1277 case OCS_SUCCESS:
1278
1279 /* check if the returned transfer response is valid */
1280 result = ufshcd_is_valid_req_rsp(lrbp->ucd_rsp_ptr);
1281 if (result) {
1282 dev_err(&hba->pdev->dev,
1283 "Invalid response = %x\n", result);
1284 break;
1285 }
1286
1287 /*
1288 * get the response UPIU result to extract
1289 * the SCSI command status
1290 */
1291 result = ufshcd_get_rsp_upiu_result(lrbp->ucd_rsp_ptr);
1292
1293 /*
1294 * get the result based on SCSI status response
1295 * to notify the SCSI midlayer of the command status
1296 */
1297 scsi_status = result & MASK_SCSI_STATUS;
1298 result = ufshcd_scsi_cmd_status(lrbp, scsi_status);
1299 break;
1300 case OCS_ABORTED:
1301 result |= DID_ABORT << 16;
1302 break;
1303 case OCS_INVALID_CMD_TABLE_ATTR:
1304 case OCS_INVALID_PRDT_ATTR:
1305 case OCS_MISMATCH_DATA_BUF_SIZE:
1306 case OCS_MISMATCH_RESP_UPIU_SIZE:
1307 case OCS_PEER_COMM_FAILURE:
1308 case OCS_FATAL_ERROR:
1309 default:
1310 result |= DID_ERROR << 16;
1311 dev_err(&hba->pdev->dev,
1312 "OCS error from controller = %x\n", ocs);
1313 break;
1314 } /* end of switch */
1315
1316 return result;
1317}
1318
1319/**
1320 * ufshcd_transfer_req_compl - handle SCSI and query command completion
1321 * @hba: per adapter instance
1322 */
1323static void ufshcd_transfer_req_compl(struct ufs_hba *hba)
1324{
1325 struct ufshcd_lrb *lrb;
1326 unsigned long completed_reqs;
1327 u32 tr_doorbell;
1328 int result;
1329 int index;
1330
1331 lrb = hba->lrb;
1332 tr_doorbell =
1333 readl(hba->mmio_base + REG_UTP_TRANSFER_REQ_DOOR_BELL);
1334 completed_reqs = tr_doorbell ^ hba->outstanding_reqs;
1335
1336 for (index = 0; index < hba->nutrs; index++) {
1337 if (test_bit(index, &completed_reqs)) {
1338
1339 result = ufshcd_transfer_rsp_status(hba, &lrb[index]);
1340
1341 if (lrb[index].cmd) {
1342 scsi_dma_unmap(lrb[index].cmd);
1343 lrb[index].cmd->result = result;
1344 lrb[index].cmd->scsi_done(lrb[index].cmd);
1345
1346 /* Mark completed command as NULL in LRB */
1347 lrb[index].cmd = NULL;
1348 }
1349 } /* end of if */
1350 } /* end of for */
1351
1352 /* clear corresponding bits of completed commands */
1353 hba->outstanding_reqs ^= completed_reqs;
1354
1355 /* Reset interrupt aggregation counters */
1356 ufshcd_config_int_aggr(hba, INT_AGGR_RESET);
1357}
1358
1359/**
1360 * ufshcd_uic_cc_handler - handle UIC command completion
1361 * @work: pointer to a work queue structure
1362 *
1363 * Returns 0 on success, non-zero value on failure
1364 */
1365static void ufshcd_uic_cc_handler (struct work_struct *work)
1366{
1367 struct ufs_hba *hba;
1368
1369 hba = container_of(work, struct ufs_hba, uic_workq);
1370
1371 if ((hba->active_uic_cmd.command == UIC_CMD_DME_LINK_STARTUP) &&
1372 !(ufshcd_get_uic_cmd_result(hba))) {
1373
1374 if (ufshcd_make_hba_operational(hba))
1375 dev_err(&hba->pdev->dev,
1376 "cc: hba not operational state\n");
1377 return;
1378 }
1379}
1380
1381/**
1382 * ufshcd_fatal_err_handler - handle fatal errors
1383 * @hba: per adapter instance
1384 */
1385static void ufshcd_fatal_err_handler(struct work_struct *work)
1386{
1387 struct ufs_hba *hba;
1388 hba = container_of(work, struct ufs_hba, feh_workq);
1389
1390 /* check if reset is already in progress */
1391 if (hba->ufshcd_state != UFSHCD_STATE_RESET)
1392 ufshcd_do_reset(hba);
1393}
1394
1395/**
1396 * ufshcd_err_handler - Check for fatal errors
1397 * @work: pointer to a work queue structure
1398 */
1399static void ufshcd_err_handler(struct ufs_hba *hba)
1400{
1401 u32 reg;
1402
1403 if (hba->errors & INT_FATAL_ERRORS)
1404 goto fatal_eh;
1405
1406 if (hba->errors & UIC_ERROR) {
1407
1408 reg = readl(hba->mmio_base +
1409 REG_UIC_ERROR_CODE_PHY_ADAPTER_LAYER);
1410 if (reg & UIC_DATA_LINK_LAYER_ERROR_PA_INIT)
1411 goto fatal_eh;
1412 }
1413 return;
1414fatal_eh:
1415 hba->ufshcd_state = UFSHCD_STATE_ERROR;
1416 schedule_work(&hba->feh_workq);
1417}
1418
1419/**
1420 * ufshcd_tmc_handler - handle task management function completion
1421 * @hba: per adapter instance
1422 */
1423static void ufshcd_tmc_handler(struct ufs_hba *hba)
1424{
1425 u32 tm_doorbell;
1426
1427 tm_doorbell = readl(hba->mmio_base + REG_UTP_TASK_REQ_DOOR_BELL);
1428 hba->tm_condition = tm_doorbell ^ hba->outstanding_tasks;
1429 wake_up_interruptible(&hba->ufshcd_tm_wait_queue);
1430}
1431
1432/**
1433 * ufshcd_sl_intr - Interrupt service routine
1434 * @hba: per adapter instance
1435 * @intr_status: contains interrupts generated by the controller
1436 */
1437static void ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status)
1438{
1439 hba->errors = UFSHCD_ERROR_MASK & intr_status;
1440 if (hba->errors)
1441 ufshcd_err_handler(hba);
1442
1443 if (intr_status & UIC_COMMAND_COMPL)
1444 schedule_work(&hba->uic_workq);
1445
1446 if (intr_status & UTP_TASK_REQ_COMPL)
1447 ufshcd_tmc_handler(hba);
1448
1449 if (intr_status & UTP_TRANSFER_REQ_COMPL)
1450 ufshcd_transfer_req_compl(hba);
1451}
1452
1453/**
1454 * ufshcd_intr - Main interrupt service routine
1455 * @irq: irq number
1456 * @__hba: pointer to adapter instance
1457 *
1458 * Returns IRQ_HANDLED - If interrupt is valid
1459 * IRQ_NONE - If invalid interrupt
1460 */
1461static irqreturn_t ufshcd_intr(int irq, void *__hba)
1462{
1463 u32 intr_status;
1464 irqreturn_t retval = IRQ_NONE;
1465 struct ufs_hba *hba = __hba;
1466
1467 spin_lock(hba->host->host_lock);
1468 intr_status = readl(hba->mmio_base + REG_INTERRUPT_STATUS);
1469
1470 if (intr_status) {
1471 ufshcd_sl_intr(hba, intr_status);
1472
1473 /* If UFSHCI 1.0 then clear interrupt status register */
1474 if (hba->ufs_version == UFSHCI_VERSION_10)
1475 writel(intr_status,
1476 (hba->mmio_base + REG_INTERRUPT_STATUS));
1477 retval = IRQ_HANDLED;
1478 }
1479 spin_unlock(hba->host->host_lock);
1480 return retval;
1481}
1482
1483/**
1484 * ufshcd_issue_tm_cmd - issues task management commands to controller
1485 * @hba: per adapter instance
1486 * @lrbp: pointer to local reference block
1487 *
1488 * Returns SUCCESS/FAILED
1489 */
1490static int
1491ufshcd_issue_tm_cmd(struct ufs_hba *hba,
1492 struct ufshcd_lrb *lrbp,
1493 u8 tm_function)
1494{
1495 struct utp_task_req_desc *task_req_descp;
1496 struct utp_upiu_task_req *task_req_upiup;
1497 struct Scsi_Host *host;
1498 unsigned long flags;
1499 int free_slot = 0;
1500 int err;
1501
1502 host = hba->host;
1503
1504 spin_lock_irqsave(host->host_lock, flags);
1505
1506 /* If task management queue is full */
1507 free_slot = ufshcd_get_tm_free_slot(hba);
1508 if (free_slot >= hba->nutmrs) {
1509 spin_unlock_irqrestore(host->host_lock, flags);
1510 dev_err(&hba->pdev->dev, "Task management queue full\n");
1511 err = FAILED;
1512 goto out;
1513 }
1514
1515 task_req_descp = hba->utmrdl_base_addr;
1516 task_req_descp += free_slot;
1517
1518 /* Configure task request descriptor */
1519 task_req_descp->header.dword_0 = cpu_to_le32(UTP_REQ_DESC_INT_CMD);
1520 task_req_descp->header.dword_2 =
1521 cpu_to_le32(OCS_INVALID_COMMAND_STATUS);
1522
1523 /* Configure task request UPIU */
1524 task_req_upiup =
1525 (struct utp_upiu_task_req *) task_req_descp->task_req_upiu;
1526 task_req_upiup->header.dword_0 =
1527 cpu_to_be32(UPIU_HEADER_DWORD(UPIU_TRANSACTION_TASK_REQ, 0,
1528 lrbp->lun, lrbp->task_tag));
1529 task_req_upiup->header.dword_1 =
1530 cpu_to_be32(UPIU_HEADER_DWORD(0, tm_function, 0, 0));
1531
1532 task_req_upiup->input_param1 = lrbp->lun;
1533 task_req_upiup->input_param1 =
1534 cpu_to_be32(task_req_upiup->input_param1);
1535 task_req_upiup->input_param2 = lrbp->task_tag;
1536 task_req_upiup->input_param2 =
1537 cpu_to_be32(task_req_upiup->input_param2);
1538
1539 /* send command to the controller */
1540 __set_bit(free_slot, &hba->outstanding_tasks);
1541 writel((1 << free_slot),
1542 (hba->mmio_base + REG_UTP_TASK_REQ_DOOR_BELL));
1543
1544 spin_unlock_irqrestore(host->host_lock, flags);
1545
1546 /* wait until the task management command is completed */
1547 err =
1548 wait_event_interruptible_timeout(hba->ufshcd_tm_wait_queue,
1549 (test_bit(free_slot,
1550 &hba->tm_condition) != 0),
1551 60 * HZ);
1552 if (!err) {
1553 dev_err(&hba->pdev->dev,
1554 "Task management command timed-out\n");
1555 err = FAILED;
1556 goto out;
1557 }
1558 clear_bit(free_slot, &hba->tm_condition);
1559 return ufshcd_task_req_compl(hba, free_slot);
1560out:
1561 return err;
1562}
1563
1564/**
1565 * ufshcd_device_reset - reset device and abort all the pending commands
1566 * @cmd: SCSI command pointer
1567 *
1568 * Returns SUCCESS/FAILED
1569 */
1570static int ufshcd_device_reset(struct scsi_cmnd *cmd)
1571{
1572 struct Scsi_Host *host;
1573 struct ufs_hba *hba;
1574 unsigned int tag;
1575 u32 pos;
1576 int err;
1577
1578 host = cmd->device->host;
1579 hba = shost_priv(host);
1580 tag = cmd->request->tag;
1581
1582 err = ufshcd_issue_tm_cmd(hba, &hba->lrb[tag], UFS_LOGICAL_RESET);
1583 if (err)
1584 goto out;
1585
1586 for (pos = 0; pos < hba->nutrs; pos++) {
1587 if (test_bit(pos, &hba->outstanding_reqs) &&
1588 (hba->lrb[tag].lun == hba->lrb[pos].lun)) {
1589
1590 /* clear the respective UTRLCLR register bit */
1591 ufshcd_utrl_clear(hba, pos);
1592
1593 clear_bit(pos, &hba->outstanding_reqs);
1594
1595 if (hba->lrb[pos].cmd) {
1596 scsi_dma_unmap(hba->lrb[pos].cmd);
1597 hba->lrb[pos].cmd->result =
1598 DID_ABORT << 16;
1599 hba->lrb[pos].cmd->scsi_done(cmd);
1600 hba->lrb[pos].cmd = NULL;
1601 }
1602 }
1603 } /* end of for */
1604out:
1605 return err;
1606}
1607
1608/**
1609 * ufshcd_host_reset - Main reset function registered with scsi layer
1610 * @cmd: SCSI command pointer
1611 *
1612 * Returns SUCCESS/FAILED
1613 */
1614static int ufshcd_host_reset(struct scsi_cmnd *cmd)
1615{
1616 struct ufs_hba *hba;
1617
1618 hba = shost_priv(cmd->device->host);
1619
1620 if (hba->ufshcd_state == UFSHCD_STATE_RESET)
1621 return SUCCESS;
1622
1623 return (ufshcd_do_reset(hba) == SUCCESS) ? SUCCESS : FAILED;
1624}
1625
1626/**
1627 * ufshcd_abort - abort a specific command
1628 * @cmd: SCSI command pointer
1629 *
1630 * Returns SUCCESS/FAILED
1631 */
1632static int ufshcd_abort(struct scsi_cmnd *cmd)
1633{
1634 struct Scsi_Host *host;
1635 struct ufs_hba *hba;
1636 unsigned long flags;
1637 unsigned int tag;
1638 int err;
1639
1640 host = cmd->device->host;
1641 hba = shost_priv(host);
1642 tag = cmd->request->tag;
1643
1644 spin_lock_irqsave(host->host_lock, flags);
1645
1646 /* check if command is still pending */
1647 if (!(test_bit(tag, &hba->outstanding_reqs))) {
1648 err = FAILED;
1649 spin_unlock_irqrestore(host->host_lock, flags);
1650 goto out;
1651 }
1652 spin_unlock_irqrestore(host->host_lock, flags);
1653
1654 err = ufshcd_issue_tm_cmd(hba, &hba->lrb[tag], UFS_ABORT_TASK);
1655 if (err)
1656 goto out;
1657
1658 scsi_dma_unmap(cmd);
1659
1660 spin_lock_irqsave(host->host_lock, flags);
1661
1662 /* clear the respective UTRLCLR register bit */
1663 ufshcd_utrl_clear(hba, tag);
1664
1665 __clear_bit(tag, &hba->outstanding_reqs);
1666 hba->lrb[tag].cmd = NULL;
1667 spin_unlock_irqrestore(host->host_lock, flags);
1668out:
1669 return err;
1670}
1671
1672static struct scsi_host_template ufshcd_driver_template = {
1673 .module = THIS_MODULE,
1674 .name = UFSHCD,
1675 .proc_name = UFSHCD,
1676 .queuecommand = ufshcd_queuecommand,
1677 .slave_alloc = ufshcd_slave_alloc,
1678 .slave_destroy = ufshcd_slave_destroy,
1679 .eh_abort_handler = ufshcd_abort,
1680 .eh_device_reset_handler = ufshcd_device_reset,
1681 .eh_host_reset_handler = ufshcd_host_reset,
1682 .this_id = -1,
1683 .sg_tablesize = SG_ALL,
1684 .cmd_per_lun = UFSHCD_CMD_PER_LUN,
1685 .can_queue = UFSHCD_CAN_QUEUE,
1686};
1687
1688/**
1689 * ufshcd_shutdown - main function to put the controller in reset state
1690 * @pdev: pointer to PCI device handle
1691 */
1692static void ufshcd_shutdown(struct pci_dev *pdev)
1693{
1694 ufshcd_hba_stop((struct ufs_hba *)pci_get_drvdata(pdev));
1695}
1696
1697#ifdef CONFIG_PM
1698/**
1699 * ufshcd_suspend - suspend power management function
1700 * @pdev: pointer to PCI device handle
1701 * @state: power state
1702 *
1703 * Returns -ENOSYS
1704 */
1705static int ufshcd_suspend(struct pci_dev *pdev, pm_message_t state)
1706{
1707 /*
1708 * TODO:
1709 * 1. Block SCSI requests from SCSI midlayer
1710 * 2. Change the internal driver state to non operational
1711 * 3. Set UTRLRSR and UTMRLRSR bits to zero
1712 * 4. Wait until outstanding commands are completed
1713 * 5. Set HCE to zero to send the UFS host controller to reset state
1714 */
1715
1716 return -ENOSYS;
1717}
1718
1719/**
1720 * ufshcd_resume - resume power management function
1721 * @pdev: pointer to PCI device handle
1722 *
1723 * Returns -ENOSYS
1724 */
1725static int ufshcd_resume(struct pci_dev *pdev)
1726{
1727 /*
1728 * TODO:
1729 * 1. Set HCE to 1, to start the UFS host controller
1730 * initialization process
1731 * 2. Set UTRLRSR and UTMRLRSR bits to 1
1732 * 3. Change the internal driver state to operational
1733 * 4. Unblock SCSI requests from SCSI midlayer
1734 */
1735
1736 return -ENOSYS;
1737}
1738#endif /* CONFIG_PM */
1739
1740/**
1741 * ufshcd_hba_free - free allocated memory for
1742 * host memory space data structures
1743 * @hba: per adapter instance
1744 */
1745static void ufshcd_hba_free(struct ufs_hba *hba)
1746{
1747 iounmap(hba->mmio_base);
1748 ufshcd_free_hba_memory(hba);
1749 pci_release_regions(hba->pdev);
1750}
1751
1752/**
1753 * ufshcd_remove - de-allocate PCI/SCSI host and host memory space
1754 * data structure memory
1755 * @pdev - pointer to PCI handle
1756 */
1757static void ufshcd_remove(struct pci_dev *pdev)
1758{
1759 struct ufs_hba *hba = pci_get_drvdata(pdev);
1760
1761 /* disable interrupts */
1762 ufshcd_int_config(hba, UFSHCD_INT_DISABLE);
1763 free_irq(pdev->irq, hba);
1764
1765 ufshcd_hba_stop(hba);
1766 ufshcd_hba_free(hba);
1767
1768 scsi_remove_host(hba->host);
1769 scsi_host_put(hba->host);
1770 pci_set_drvdata(pdev, NULL);
1771 pci_clear_master(pdev);
1772 pci_disable_device(pdev);
1773}
1774
1775/**
1776 * ufshcd_set_dma_mask - Set dma mask based on the controller
1777 * addressing capability
1778 * @pdev: PCI device structure
1779 *
1780 * Returns 0 for success, non-zero for failure
1781 */
1782static int ufshcd_set_dma_mask(struct ufs_hba *hba)
1783{
1784 int err;
1785 u64 dma_mask;
1786
1787 /*
1788 * If controller supports 64 bit addressing mode, then set the DMA
1789 * mask to 64-bit, else set the DMA mask to 32-bit
1790 */
1791 if (hba->capabilities & MASK_64_ADDRESSING_SUPPORT)
1792 dma_mask = DMA_BIT_MASK(64);
1793 else
1794 dma_mask = DMA_BIT_MASK(32);
1795
1796 err = pci_set_dma_mask(hba->pdev, dma_mask);
1797 if (err)
1798 return err;
1799
1800 err = pci_set_consistent_dma_mask(hba->pdev, dma_mask);
1801
1802 return err;
1803}
1804
1805/**
1806 * ufshcd_probe - probe routine of the driver
1807 * @pdev: pointer to PCI device handle
1808 * @id: PCI device id
1809 *
1810 * Returns 0 on success, non-zero value on failure
1811 */
1812static int __devinit
1813ufshcd_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1814{
1815 struct Scsi_Host *host;
1816 struct ufs_hba *hba;
1817 int err;
1818
1819 err = pci_enable_device(pdev);
1820 if (err) {
1821 dev_err(&pdev->dev, "pci_enable_device failed\n");
1822 goto out_error;
1823 }
1824
1825 pci_set_master(pdev);
1826
1827 host = scsi_host_alloc(&ufshcd_driver_template,
1828 sizeof(struct ufs_hba));
1829 if (!host) {
1830 dev_err(&pdev->dev, "scsi_host_alloc failed\n");
1831 err = -ENOMEM;
1832 goto out_disable;
1833 }
1834 hba = shost_priv(host);
1835
1836 err = pci_request_regions(pdev, UFSHCD);
1837 if (err < 0) {
1838 dev_err(&pdev->dev, "request regions failed\n");
1839 goto out_disable;
1840 }
1841
1842 hba->mmio_base = pci_ioremap_bar(pdev, 0);
1843 if (!hba->mmio_base) {
1844 dev_err(&pdev->dev, "memory map failed\n");
1845 err = -ENOMEM;
1846 goto out_release_regions;
1847 }
1848
1849 hba->host = host;
1850 hba->pdev = pdev;
1851
1852 /* Read capabilities registers */
1853 ufshcd_hba_capabilities(hba);
1854
1855 /* Get UFS version supported by the controller */
1856 hba->ufs_version = ufshcd_get_ufs_version(hba);
1857
1858 err = ufshcd_set_dma_mask(hba);
1859 if (err) {
1860 dev_err(&pdev->dev, "set dma mask failed\n");
1861 goto out_iounmap;
1862 }
1863
1864 /* Allocate memory for host memory space */
1865 err = ufshcd_memory_alloc(hba);
1866 if (err) {
1867 dev_err(&pdev->dev, "Memory allocation failed\n");
1868 goto out_iounmap;
1869 }
1870
1871 /* Configure LRB */
1872 ufshcd_host_memory_configure(hba);
1873
1874 host->can_queue = hba->nutrs;
1875 host->cmd_per_lun = hba->nutrs;
1876 host->max_id = UFSHCD_MAX_ID;
1877 host->max_lun = UFSHCD_MAX_LUNS;
1878 host->max_channel = UFSHCD_MAX_CHANNEL;
1879 host->unique_id = host->host_no;
1880 host->max_cmd_len = MAX_CDB_SIZE;
1881
1882 /* Initailize wait queue for task management */
1883 init_waitqueue_head(&hba->ufshcd_tm_wait_queue);
1884
1885 /* Initialize work queues */
1886 INIT_WORK(&hba->uic_workq, ufshcd_uic_cc_handler);
1887 INIT_WORK(&hba->feh_workq, ufshcd_fatal_err_handler);
1888
1889 /* IRQ registration */
1890 err = request_irq(pdev->irq, ufshcd_intr, IRQF_SHARED, UFSHCD, hba);
1891 if (err) {
1892 dev_err(&pdev->dev, "request irq failed\n");
1893 goto out_lrb_free;
1894 }
1895
1896 /* Enable SCSI tag mapping */
1897 err = scsi_init_shared_tag_map(host, host->can_queue);
1898 if (err) {
1899 dev_err(&pdev->dev, "init shared queue failed\n");
1900 goto out_free_irq;
1901 }
1902
1903 pci_set_drvdata(pdev, hba);
1904
1905 err = scsi_add_host(host, &pdev->dev);
1906 if (err) {
1907 dev_err(&pdev->dev, "scsi_add_host failed\n");
1908 goto out_free_irq;
1909 }
1910
1911 /* Initialization routine */
1912 err = ufshcd_initialize_hba(hba);
1913 if (err) {
1914 dev_err(&pdev->dev, "Initialization failed\n");
1915 goto out_free_irq;
1916 }
1917
1918 return 0;
1919
1920out_free_irq:
1921 free_irq(pdev->irq, hba);
1922out_lrb_free:
1923 ufshcd_free_hba_memory(hba);
1924out_iounmap:
1925 iounmap(hba->mmio_base);
1926out_release_regions:
1927 pci_release_regions(pdev);
1928out_disable:
1929 scsi_host_put(host);
1930 pci_clear_master(pdev);
1931 pci_disable_device(pdev);
1932out_error:
1933 return err;
1934}
1935
1936static DEFINE_PCI_DEVICE_TABLE(ufshcd_pci_tbl) = {
1937 { PCI_VENDOR_ID_SAMSUNG, 0xC00C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
1938 { } /* terminate list */
1939};
1940
1941MODULE_DEVICE_TABLE(pci, ufshcd_pci_tbl);
1942
1943static struct pci_driver ufshcd_pci_driver = {
1944 .name = UFSHCD,
1945 .id_table = ufshcd_pci_tbl,
1946 .probe = ufshcd_probe,
1947 .remove = __devexit_p(ufshcd_remove),
1948 .shutdown = ufshcd_shutdown,
1949#ifdef CONFIG_PM
1950 .suspend = ufshcd_suspend,
1951 .resume = ufshcd_resume,
1952#endif
1953};
1954
1955/**
1956 * ufshcd_init - Driver registration routine
1957 */
1958static int __init ufshcd_init(void)
1959{
1960 return pci_register_driver(&ufshcd_pci_driver);
1961}
1962module_init(ufshcd_init);
1963
1964/**
1965 * ufshcd_exit - Driver exit clean-up routine
1966 */
1967static void __exit ufshcd_exit(void)
1968{
1969 pci_unregister_driver(&ufshcd_pci_driver);
1970}
1971module_exit(ufshcd_exit);
1972
1973
1974MODULE_AUTHOR("Santosh Yaragnavi <santosh.sy@samsung.com>, "
1975 "Vinayak Holikatti <h.vinayak@samsung.com>");
1976MODULE_DESCRIPTION("Generic UFS host controller driver");
1977MODULE_LICENSE("GPL");
1978MODULE_VERSION(UFSHCD_DRIVER_VERSION);
diff --git a/drivers/scsi/ufs/ufshci.h b/drivers/scsi/ufs/ufshci.h
new file mode 100644
index 000000000000..6e3510f71167
--- /dev/null
+++ b/drivers/scsi/ufs/ufshci.h
@@ -0,0 +1,376 @@
1/*
2 * Universal Flash Storage Host controller driver
3 *
4 * This code is based on drivers/scsi/ufs/ufshci.h
5 * Copyright (C) 2011-2012 Samsung India Software Operations
6 *
7 * Santosh Yaraganavi <santosh.sy@samsung.com>
8 * Vinayak Holikatti <h.vinayak@samsung.com>
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version 2
13 * of the License, or (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 *
20 * NO WARRANTY
21 * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
22 * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
23 * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
24 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
25 * solely responsible for determining the appropriateness of using and
26 * distributing the Program and assumes all risks associated with its
27 * exercise of rights under this Agreement, including but not limited to
28 * the risks and costs of program errors, damage to or loss of data,
29 * programs or equipment, and unavailability or interruption of operations.
30
31 * DISCLAIMER OF LIABILITY
32 * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
33 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
34 * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
35 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
36 * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
37 * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
38 * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
39
40 * You should have received a copy of the GNU General Public License
41 * along with this program; if not, write to the Free Software
42 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
43 * USA.
44 */
45
46#ifndef _UFSHCI_H
47#define _UFSHCI_H
48
49enum {
50 TASK_REQ_UPIU_SIZE_DWORDS = 8,
51 TASK_RSP_UPIU_SIZE_DWORDS = 8,
52 ALIGNED_UPIU_SIZE = 128,
53};
54
55/* UFSHCI Registers */
56enum {
57 REG_CONTROLLER_CAPABILITIES = 0x00,
58 REG_UFS_VERSION = 0x08,
59 REG_CONTROLLER_DEV_ID = 0x10,
60 REG_CONTROLLER_PROD_ID = 0x14,
61 REG_INTERRUPT_STATUS = 0x20,
62 REG_INTERRUPT_ENABLE = 0x24,
63 REG_CONTROLLER_STATUS = 0x30,
64 REG_CONTROLLER_ENABLE = 0x34,
65 REG_UIC_ERROR_CODE_PHY_ADAPTER_LAYER = 0x38,
66 REG_UIC_ERROR_CODE_DATA_LINK_LAYER = 0x3C,
67 REG_UIC_ERROR_CODE_NETWORK_LAYER = 0x40,
68 REG_UIC_ERROR_CODE_TRANSPORT_LAYER = 0x44,
69 REG_UIC_ERROR_CODE_DME = 0x48,
70 REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL = 0x4C,
71 REG_UTP_TRANSFER_REQ_LIST_BASE_L = 0x50,
72 REG_UTP_TRANSFER_REQ_LIST_BASE_H = 0x54,
73 REG_UTP_TRANSFER_REQ_DOOR_BELL = 0x58,
74 REG_UTP_TRANSFER_REQ_LIST_CLEAR = 0x5C,
75 REG_UTP_TRANSFER_REQ_LIST_RUN_STOP = 0x60,
76 REG_UTP_TASK_REQ_LIST_BASE_L = 0x70,
77 REG_UTP_TASK_REQ_LIST_BASE_H = 0x74,
78 REG_UTP_TASK_REQ_DOOR_BELL = 0x78,
79 REG_UTP_TASK_REQ_LIST_CLEAR = 0x7C,
80 REG_UTP_TASK_REQ_LIST_RUN_STOP = 0x80,
81 REG_UIC_COMMAND = 0x90,
82 REG_UIC_COMMAND_ARG_1 = 0x94,
83 REG_UIC_COMMAND_ARG_2 = 0x98,
84 REG_UIC_COMMAND_ARG_3 = 0x9C,
85};
86
87/* Controller capability masks */
88enum {
89 MASK_TRANSFER_REQUESTS_SLOTS = 0x0000001F,
90 MASK_TASK_MANAGEMENT_REQUEST_SLOTS = 0x00070000,
91 MASK_64_ADDRESSING_SUPPORT = 0x01000000,
92 MASK_OUT_OF_ORDER_DATA_DELIVERY_SUPPORT = 0x02000000,
93 MASK_UIC_DME_TEST_MODE_SUPPORT = 0x04000000,
94};
95
96/* UFS Version 08h */
97#define MINOR_VERSION_NUM_MASK UFS_MASK(0xFFFF, 0)
98#define MAJOR_VERSION_NUM_MASK UFS_MASK(0xFFFF, 16)
99
100/* Controller UFSHCI version */
101enum {
102 UFSHCI_VERSION_10 = 0x00010000,
103 UFSHCI_VERSION_11 = 0x00010100,
104};
105
106/*
107 * HCDDID - Host Controller Identification Descriptor
108 * - Device ID and Device Class 10h
109 */
110#define DEVICE_CLASS UFS_MASK(0xFFFF, 0)
111#define DEVICE_ID UFS_MASK(0xFF, 24)
112
113/*
114 * HCPMID - Host Controller Identification Descriptor
115 * - Product/Manufacturer ID 14h
116 */
117#define MANUFACTURE_ID_MASK UFS_MASK(0xFFFF, 0)
118#define PRODUCT_ID_MASK UFS_MASK(0xFFFF, 16)
119
120#define UFS_BIT(x) (1L << (x))
121
122#define UTP_TRANSFER_REQ_COMPL UFS_BIT(0)
123#define UIC_DME_END_PT_RESET UFS_BIT(1)
124#define UIC_ERROR UFS_BIT(2)
125#define UIC_TEST_MODE UFS_BIT(3)
126#define UIC_POWER_MODE UFS_BIT(4)
127#define UIC_HIBERNATE_EXIT UFS_BIT(5)
128#define UIC_HIBERNATE_ENTER UFS_BIT(6)
129#define UIC_LINK_LOST UFS_BIT(7)
130#define UIC_LINK_STARTUP UFS_BIT(8)
131#define UTP_TASK_REQ_COMPL UFS_BIT(9)
132#define UIC_COMMAND_COMPL UFS_BIT(10)
133#define DEVICE_FATAL_ERROR UFS_BIT(11)
134#define CONTROLLER_FATAL_ERROR UFS_BIT(16)
135#define SYSTEM_BUS_FATAL_ERROR UFS_BIT(17)
136
137#define UFSHCD_ERROR_MASK (UIC_ERROR |\
138 DEVICE_FATAL_ERROR |\
139 CONTROLLER_FATAL_ERROR |\
140 SYSTEM_BUS_FATAL_ERROR)
141
142#define INT_FATAL_ERRORS (DEVICE_FATAL_ERROR |\
143 CONTROLLER_FATAL_ERROR |\
144 SYSTEM_BUS_FATAL_ERROR)
145
146/* HCS - Host Controller Status 30h */
147#define DEVICE_PRESENT UFS_BIT(0)
148#define UTP_TRANSFER_REQ_LIST_READY UFS_BIT(1)
149#define UTP_TASK_REQ_LIST_READY UFS_BIT(2)
150#define UIC_COMMAND_READY UFS_BIT(3)
151#define HOST_ERROR_INDICATOR UFS_BIT(4)
152#define DEVICE_ERROR_INDICATOR UFS_BIT(5)
153#define UIC_POWER_MODE_CHANGE_REQ_STATUS_MASK UFS_MASK(0x7, 8)
154
155/* HCE - Host Controller Enable 34h */
156#define CONTROLLER_ENABLE UFS_BIT(0)
157#define CONTROLLER_DISABLE 0x0
158
159/* UECPA - Host UIC Error Code PHY Adapter Layer 38h */
160#define UIC_PHY_ADAPTER_LAYER_ERROR UFS_BIT(31)
161#define UIC_PHY_ADAPTER_LAYER_ERROR_CODE_MASK 0x1F
162
163/* UECDL - Host UIC Error Code Data Link Layer 3Ch */
164#define UIC_DATA_LINK_LAYER_ERROR UFS_BIT(31)
165#define UIC_DATA_LINK_LAYER_ERROR_CODE_MASK 0x7FFF
166#define UIC_DATA_LINK_LAYER_ERROR_PA_INIT 0x2000
167
168/* UECN - Host UIC Error Code Network Layer 40h */
169#define UIC_NETWORK_LAYER_ERROR UFS_BIT(31)
170#define UIC_NETWORK_LAYER_ERROR_CODE_MASK 0x7
171
172/* UECT - Host UIC Error Code Transport Layer 44h */
173#define UIC_TRANSPORT_LAYER_ERROR UFS_BIT(31)
174#define UIC_TRANSPORT_LAYER_ERROR_CODE_MASK 0x7F
175
176/* UECDME - Host UIC Error Code DME 48h */
177#define UIC_DME_ERROR UFS_BIT(31)
178#define UIC_DME_ERROR_CODE_MASK 0x1
179
180#define INT_AGGR_TIMEOUT_VAL_MASK 0xFF
181#define INT_AGGR_COUNTER_THRESHOLD_MASK UFS_MASK(0x1F, 8)
182#define INT_AGGR_COUNTER_AND_TIMER_RESET UFS_BIT(16)
183#define INT_AGGR_STATUS_BIT UFS_BIT(20)
184#define INT_AGGR_PARAM_WRITE UFS_BIT(24)
185#define INT_AGGR_ENABLE UFS_BIT(31)
186
187/* UTRLRSR - UTP Transfer Request Run-Stop Register 60h */
188#define UTP_TRANSFER_REQ_LIST_RUN_STOP_BIT UFS_BIT(0)
189
190/* UTMRLRSR - UTP Task Management Request Run-Stop Register 80h */
191#define UTP_TASK_REQ_LIST_RUN_STOP_BIT UFS_BIT(0)
192
193/* UICCMD - UIC Command */
194#define COMMAND_OPCODE_MASK 0xFF
195#define GEN_SELECTOR_INDEX_MASK 0xFFFF
196
197#define MIB_ATTRIBUTE_MASK UFS_MASK(0xFFFF, 16)
198#define RESET_LEVEL 0xFF
199
200#define ATTR_SET_TYPE_MASK UFS_MASK(0xFF, 16)
201#define CONFIG_RESULT_CODE_MASK 0xFF
202#define GENERIC_ERROR_CODE_MASK 0xFF
203
204/* UIC Commands */
205enum {
206 UIC_CMD_DME_GET = 0x01,
207 UIC_CMD_DME_SET = 0x02,
208 UIC_CMD_DME_PEER_GET = 0x03,
209 UIC_CMD_DME_PEER_SET = 0x04,
210 UIC_CMD_DME_POWERON = 0x10,
211 UIC_CMD_DME_POWEROFF = 0x11,
212 UIC_CMD_DME_ENABLE = 0x12,
213 UIC_CMD_DME_RESET = 0x14,
214 UIC_CMD_DME_END_PT_RST = 0x15,
215 UIC_CMD_DME_LINK_STARTUP = 0x16,
216 UIC_CMD_DME_HIBER_ENTER = 0x17,
217 UIC_CMD_DME_HIBER_EXIT = 0x18,
218 UIC_CMD_DME_TEST_MODE = 0x1A,
219};
220
221/* UIC Config result code / Generic error code */
222enum {
223 UIC_CMD_RESULT_SUCCESS = 0x00,
224 UIC_CMD_RESULT_INVALID_ATTR = 0x01,
225 UIC_CMD_RESULT_FAILURE = 0x01,
226 UIC_CMD_RESULT_INVALID_ATTR_VALUE = 0x02,
227 UIC_CMD_RESULT_READ_ONLY_ATTR = 0x03,
228 UIC_CMD_RESULT_WRITE_ONLY_ATTR = 0x04,
229 UIC_CMD_RESULT_BAD_INDEX = 0x05,
230 UIC_CMD_RESULT_LOCKED_ATTR = 0x06,
231 UIC_CMD_RESULT_BAD_TEST_FEATURE_INDEX = 0x07,
232 UIC_CMD_RESULT_PEER_COMM_FAILURE = 0x08,
233 UIC_CMD_RESULT_BUSY = 0x09,
234 UIC_CMD_RESULT_DME_FAILURE = 0x0A,
235};
236
237#define MASK_UIC_COMMAND_RESULT 0xFF
238
239#define INT_AGGR_COUNTER_THRESHOLD_VALUE (0x1F << 8)
240#define INT_AGGR_TIMEOUT_VALUE (0x02)
241
242/* Interrupt disable masks */
243enum {
244 /* Interrupt disable mask for UFSHCI v1.0 */
245 INTERRUPT_DISABLE_MASK_10 = 0xFFFF,
246
247 /* Interrupt disable mask for UFSHCI v1.1 */
248 INTERRUPT_DISABLE_MASK_11 = 0x0,
249};
250
251/*
252 * Request Descriptor Definitions
253 */
254
255/* Transfer request command type */
256enum {
257 UTP_CMD_TYPE_SCSI = 0x0,
258 UTP_CMD_TYPE_UFS = 0x1,
259 UTP_CMD_TYPE_DEV_MANAGE = 0x2,
260};
261
262enum {
263 UTP_SCSI_COMMAND = 0x00000000,
264 UTP_NATIVE_UFS_COMMAND = 0x10000000,
265 UTP_DEVICE_MANAGEMENT_FUNCTION = 0x20000000,
266 UTP_REQ_DESC_INT_CMD = 0x01000000,
267};
268
269/* UTP Transfer Request Data Direction (DD) */
270enum {
271 UTP_NO_DATA_TRANSFER = 0x00000000,
272 UTP_HOST_TO_DEVICE = 0x02000000,
273 UTP_DEVICE_TO_HOST = 0x04000000,
274};
275
276/* Overall command status values */
277enum {
278 OCS_SUCCESS = 0x0,
279 OCS_INVALID_CMD_TABLE_ATTR = 0x1,
280 OCS_INVALID_PRDT_ATTR = 0x2,
281 OCS_MISMATCH_DATA_BUF_SIZE = 0x3,
282 OCS_MISMATCH_RESP_UPIU_SIZE = 0x4,
283 OCS_PEER_COMM_FAILURE = 0x5,
284 OCS_ABORTED = 0x6,
285 OCS_FATAL_ERROR = 0x7,
286 OCS_INVALID_COMMAND_STATUS = 0x0F,
287 MASK_OCS = 0x0F,
288};
289
290/**
291 * struct ufshcd_sg_entry - UFSHCI PRD Entry
292 * @base_addr: Lower 32bit physical address DW-0
293 * @upper_addr: Upper 32bit physical address DW-1
294 * @reserved: Reserved for future use DW-2
295 * @size: size of physical segment DW-3
296 */
297struct ufshcd_sg_entry {
298 u32 base_addr;
299 u32 upper_addr;
300 u32 reserved;
301 u32 size;
302};
303
304/**
305 * struct utp_transfer_cmd_desc - UFS Command Descriptor structure
306 * @command_upiu: Command UPIU Frame address
307 * @response_upiu: Response UPIU Frame address
308 * @prd_table: Physical Region Descriptor
309 */
310struct utp_transfer_cmd_desc {
311 u8 command_upiu[ALIGNED_UPIU_SIZE];
312 u8 response_upiu[ALIGNED_UPIU_SIZE];
313 struct ufshcd_sg_entry prd_table[SG_ALL];
314};
315
316/**
317 * struct request_desc_header - Descriptor Header common to both UTRD and UTMRD
318 * @dword0: Descriptor Header DW0
319 * @dword1: Descriptor Header DW1
320 * @dword2: Descriptor Header DW2
321 * @dword3: Descriptor Header DW3
322 */
323struct request_desc_header {
324 u32 dword_0;
325 u32 dword_1;
326 u32 dword_2;
327 u32 dword_3;
328};
329
330/**
331 * struct utp_transfer_req_desc - UTRD structure
332 * @header: UTRD header DW-0 to DW-3
333 * @command_desc_base_addr_lo: UCD base address low DW-4
334 * @command_desc_base_addr_hi: UCD base address high DW-5
335 * @response_upiu_length: response UPIU length DW-6
336 * @response_upiu_offset: response UPIU offset DW-6
337 * @prd_table_length: Physical region descriptor length DW-7
338 * @prd_table_offset: Physical region descriptor offset DW-7
339 */
340struct utp_transfer_req_desc {
341
342 /* DW 0-3 */
343 struct request_desc_header header;
344
345 /* DW 4-5*/
346 u32 command_desc_base_addr_lo;
347 u32 command_desc_base_addr_hi;
348
349 /* DW 6 */
350 u16 response_upiu_length;
351 u16 response_upiu_offset;
352
353 /* DW 7 */
354 u16 prd_table_length;
355 u16 prd_table_offset;
356};
357
358/**
359 * struct utp_task_req_desc - UTMRD structure
360 * @header: UTMRD header DW-0 to DW-3
361 * @task_req_upiu: Pointer to task request UPIU DW-4 to DW-11
362 * @task_rsp_upiu: Pointer to task response UPIU DW12 to DW-19
363 */
364struct utp_task_req_desc {
365
366 /* DW 0-3 */
367 struct request_desc_header header;
368
369 /* DW 4-11 */
370 u32 task_req_upiu[TASK_REQ_UPIU_SIZE_DWORDS];
371
372 /* DW 12-19 */
373 u32 task_rsp_upiu[TASK_RSP_UPIU_SIZE_DWORDS];
374};
375
376#endif /* End of Header */
diff --git a/drivers/scsi/vmw_pvscsi.c b/drivers/scsi/vmw_pvscsi.c
index 7264116185d5..4411d4224401 100644
--- a/drivers/scsi/vmw_pvscsi.c
+++ b/drivers/scsi/vmw_pvscsi.c
@@ -17,7 +17,7 @@
17 * along with this program; if not, write to the Free Software 17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 * 19 *
20 * Maintained by: Alok N Kataria <akataria@vmware.com> 20 * Maintained by: Arvind Kumar <arvindkumar@vmware.com>
21 * 21 *
22 */ 22 */
23 23
@@ -1178,11 +1178,67 @@ static int __devinit pvscsi_allocate_sg(struct pvscsi_adapter *adapter)
1178 return 0; 1178 return 0;
1179} 1179}
1180 1180
1181/*
1182 * Query the device, fetch the config info and return the
1183 * maximum number of targets on the adapter. In case of
1184 * failure due to any reason return default i.e. 16.
1185 */
1186static u32 pvscsi_get_max_targets(struct pvscsi_adapter *adapter)
1187{
1188 struct PVSCSICmdDescConfigCmd cmd;
1189 struct PVSCSIConfigPageHeader *header;
1190 struct device *dev;
1191 dma_addr_t configPagePA;
1192 void *config_page;
1193 u32 numPhys = 16;
1194
1195 dev = pvscsi_dev(adapter);
1196 config_page = pci_alloc_consistent(adapter->dev, PAGE_SIZE,
1197 &configPagePA);
1198 if (!config_page) {
1199 dev_warn(dev, "vmw_pvscsi: failed to allocate memory for config page\n");
1200 goto exit;
1201 }
1202 BUG_ON(configPagePA & ~PAGE_MASK);
1203
1204 /* Fetch config info from the device. */
1205 cmd.configPageAddress = ((u64)PVSCSI_CONFIG_CONTROLLER_ADDRESS) << 32;
1206 cmd.configPageNum = PVSCSI_CONFIG_PAGE_CONTROLLER;
1207 cmd.cmpAddr = configPagePA;
1208 cmd._pad = 0;
1209
1210 /*
1211 * Mark the completion page header with error values. If the device
1212 * completes the command successfully, it sets the status values to
1213 * indicate success.
1214 */
1215 header = config_page;
1216 memset(header, 0, sizeof *header);
1217 header->hostStatus = BTSTAT_INVPARAM;
1218 header->scsiStatus = SDSTAT_CHECK;
1219
1220 pvscsi_write_cmd_desc(adapter, PVSCSI_CMD_CONFIG, &cmd, sizeof cmd);
1221
1222 if (header->hostStatus == BTSTAT_SUCCESS &&
1223 header->scsiStatus == SDSTAT_GOOD) {
1224 struct PVSCSIConfigPageController *config;
1225
1226 config = config_page;
1227 numPhys = config->numPhys;
1228 } else
1229 dev_warn(dev, "vmw_pvscsi: PVSCSI_CMD_CONFIG failed. hostStatus = 0x%x, scsiStatus = 0x%x\n",
1230 header->hostStatus, header->scsiStatus);
1231 pci_free_consistent(adapter->dev, PAGE_SIZE, config_page, configPagePA);
1232exit:
1233 return numPhys;
1234}
1235
1181static int __devinit pvscsi_probe(struct pci_dev *pdev, 1236static int __devinit pvscsi_probe(struct pci_dev *pdev,
1182 const struct pci_device_id *id) 1237 const struct pci_device_id *id)
1183{ 1238{
1184 struct pvscsi_adapter *adapter; 1239 struct pvscsi_adapter *adapter;
1185 struct Scsi_Host *host; 1240 struct Scsi_Host *host;
1241 struct device *dev;
1186 unsigned int i; 1242 unsigned int i;
1187 unsigned long flags = 0; 1243 unsigned long flags = 0;
1188 int error; 1244 int error;
@@ -1272,6 +1328,13 @@ static int __devinit pvscsi_probe(struct pci_dev *pdev,
1272 } 1328 }
1273 1329
1274 /* 1330 /*
1331 * Ask the device for max number of targets.
1332 */
1333 host->max_id = pvscsi_get_max_targets(adapter);
1334 dev = pvscsi_dev(adapter);
1335 dev_info(dev, "vmw_pvscsi: host->max_id: %u\n", host->max_id);
1336
1337 /*
1275 * From this point on we should reset the adapter if anything goes 1338 * From this point on we should reset the adapter if anything goes
1276 * wrong. 1339 * wrong.
1277 */ 1340 */
diff --git a/drivers/scsi/vmw_pvscsi.h b/drivers/scsi/vmw_pvscsi.h
index 62e36e75715e..3546e8662e30 100644
--- a/drivers/scsi/vmw_pvscsi.h
+++ b/drivers/scsi/vmw_pvscsi.h
@@ -17,7 +17,7 @@
17 * along with this program; if not, write to the Free Software 17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 * 19 *
20 * Maintained by: Alok N Kataria <akataria@vmware.com> 20 * Maintained by: Arvind Kumar <arvindkumar@vmware.com>
21 * 21 *
22 */ 22 */
23 23
@@ -26,7 +26,7 @@
26 26
27#include <linux/types.h> 27#include <linux/types.h>
28 28
29#define PVSCSI_DRIVER_VERSION_STRING "1.0.1.0-k" 29#define PVSCSI_DRIVER_VERSION_STRING "1.0.2.0-k"
30 30
31#define PVSCSI_MAX_NUM_SG_ENTRIES_PER_SEGMENT 128 31#define PVSCSI_MAX_NUM_SG_ENTRIES_PER_SEGMENT 128
32 32
@@ -39,28 +39,45 @@
39 * host adapter status/error codes 39 * host adapter status/error codes
40 */ 40 */
41enum HostBusAdapterStatus { 41enum HostBusAdapterStatus {
42 BTSTAT_SUCCESS = 0x00, /* CCB complete normally with no errors */ 42 BTSTAT_SUCCESS = 0x00, /* CCB complete normally with no errors */
43 BTSTAT_LINKED_COMMAND_COMPLETED = 0x0a, 43 BTSTAT_LINKED_COMMAND_COMPLETED = 0x0a,
44 BTSTAT_LINKED_COMMAND_COMPLETED_WITH_FLAG = 0x0b, 44 BTSTAT_LINKED_COMMAND_COMPLETED_WITH_FLAG = 0x0b,
45 BTSTAT_DATA_UNDERRUN = 0x0c, 45 BTSTAT_DATA_UNDERRUN = 0x0c,
46 BTSTAT_SELTIMEO = 0x11, /* SCSI selection timeout */ 46 BTSTAT_SELTIMEO = 0x11, /* SCSI selection timeout */
47 BTSTAT_DATARUN = 0x12, /* data overrun/underrun */ 47 BTSTAT_DATARUN = 0x12, /* data overrun/underrun */
48 BTSTAT_BUSFREE = 0x13, /* unexpected bus free */ 48 BTSTAT_BUSFREE = 0x13, /* unexpected bus free */
49 BTSTAT_INVPHASE = 0x14, /* invalid bus phase or sequence requested by target */ 49 BTSTAT_INVPHASE = 0x14, /* invalid bus phase or sequence
50 BTSTAT_LUNMISMATCH = 0x17, /* linked CCB has different LUN from first CCB */ 50 * requested by target */
51 BTSTAT_SENSFAILED = 0x1b, /* auto request sense failed */ 51 BTSTAT_LUNMISMATCH = 0x17, /* linked CCB has different LUN from
52 BTSTAT_TAGREJECT = 0x1c, /* SCSI II tagged queueing message rejected by target */ 52 * first CCB */
53 BTSTAT_BADMSG = 0x1d, /* unsupported message received by the host adapter */ 53 BTSTAT_INVPARAM = 0x1a, /* invalid parameter in CCB or segment
54 BTSTAT_HAHARDWARE = 0x20, /* host adapter hardware failed */ 54 * list */
55 BTSTAT_NORESPONSE = 0x21, /* target did not respond to SCSI ATN, sent a SCSI RST */ 55 BTSTAT_SENSFAILED = 0x1b, /* auto request sense failed */
56 BTSTAT_SENTRST = 0x22, /* host adapter asserted a SCSI RST */ 56 BTSTAT_TAGREJECT = 0x1c, /* SCSI II tagged queueing message
57 BTSTAT_RECVRST = 0x23, /* other SCSI devices asserted a SCSI RST */ 57 * rejected by target */
58 BTSTAT_DISCONNECT = 0x24, /* target device reconnected improperly (w/o tag) */ 58 BTSTAT_BADMSG = 0x1d, /* unsupported message received by the
59 BTSTAT_BUSRESET = 0x25, /* host adapter issued BUS device reset */ 59 * host adapter */
60 BTSTAT_ABORTQUEUE = 0x26, /* abort queue generated */ 60 BTSTAT_HAHARDWARE = 0x20, /* host adapter hardware failed */
61 BTSTAT_HASOFTWARE = 0x27, /* host adapter software error */ 61 BTSTAT_NORESPONSE = 0x21, /* target did not respond to SCSI ATN,
62 BTSTAT_HATIMEOUT = 0x30, /* host adapter hardware timeout error */ 62 * sent a SCSI RST */
63 BTSTAT_SCSIPARITY = 0x34, /* SCSI parity error detected */ 63 BTSTAT_SENTRST = 0x22, /* host adapter asserted a SCSI RST */
64 BTSTAT_RECVRST = 0x23, /* other SCSI devices asserted a SCSI
65 * RST */
66 BTSTAT_DISCONNECT = 0x24, /* target device reconnected improperly
67 * (w/o tag) */
68 BTSTAT_BUSRESET = 0x25, /* host adapter issued BUS device reset */
69 BTSTAT_ABORTQUEUE = 0x26, /* abort queue generated */
70 BTSTAT_HASOFTWARE = 0x27, /* host adapter software error */
71 BTSTAT_HATIMEOUT = 0x30, /* host adapter hardware timeout error */
72 BTSTAT_SCSIPARITY = 0x34, /* SCSI parity error detected */
73};
74
75/*
76 * SCSI device status values.
77 */
78enum ScsiDeviceStatus {
79 SDSTAT_GOOD = 0x00, /* No errors. */
80 SDSTAT_CHECK = 0x02, /* Check condition. */
64}; 81};
65 82
66/* 83/*
@@ -114,6 +131,29 @@ struct PVSCSICmdDescResetDevice {
114} __packed; 131} __packed;
115 132
116/* 133/*
134 * Command descriptor for PVSCSI_CMD_CONFIG --
135 */
136
137struct PVSCSICmdDescConfigCmd {
138 u64 cmpAddr;
139 u64 configPageAddress;
140 u32 configPageNum;
141 u32 _pad;
142} __packed;
143
144enum PVSCSIConfigPageType {
145 PVSCSI_CONFIG_PAGE_CONTROLLER = 0x1958,
146 PVSCSI_CONFIG_PAGE_PHY = 0x1959,
147 PVSCSI_CONFIG_PAGE_DEVICE = 0x195a,
148};
149
150enum PVSCSIConfigPageAddressType {
151 PVSCSI_CONFIG_CONTROLLER_ADDRESS = 0x2120,
152 PVSCSI_CONFIG_BUSTARGET_ADDRESS = 0x2121,
153 PVSCSI_CONFIG_PHY_ADDRESS = 0x2122,
154};
155
156/*
117 * Command descriptor for PVSCSI_CMD_ABORT_CMD -- 157 * Command descriptor for PVSCSI_CMD_ABORT_CMD --
118 * 158 *
119 * - currently does not support specifying the LUN. 159 * - currently does not support specifying the LUN.
@@ -332,6 +372,27 @@ struct PVSCSIRingCmpDesc {
332 u32 _pad[2]; 372 u32 _pad[2];
333} __packed; 373} __packed;
334 374
375struct PVSCSIConfigPageHeader {
376 u32 pageNum;
377 u16 numDwords;
378 u16 hostStatus;
379 u16 scsiStatus;
380 u16 reserved[3];
381} __packed;
382
383struct PVSCSIConfigPageController {
384 struct PVSCSIConfigPageHeader header;
385 u64 nodeWWN; /* Device name as defined in the SAS spec. */
386 u16 manufacturer[64];
387 u16 serialNumber[64];
388 u16 opromVersion[32];
389 u16 hwVersion[32];
390 u16 firmwareVersion[32];
391 u32 numPhys;
392 u8 useConsecutivePhyWWNs;
393 u8 reserved[3];
394} __packed;
395
335/* 396/*
336 * Interrupt status / IRQ bits. 397 * Interrupt status / IRQ bits.
337 */ 398 */
diff --git a/drivers/sh/intc/balancing.c b/drivers/sh/intc/balancing.c
index cec7a96f2c09..bc780807ccb0 100644
--- a/drivers/sh/intc/balancing.c
+++ b/drivers/sh/intc/balancing.c
@@ -9,7 +9,7 @@
9 */ 9 */
10#include "internals.h" 10#include "internals.h"
11 11
12static unsigned long dist_handle[NR_IRQS]; 12static unsigned long dist_handle[INTC_NR_IRQS];
13 13
14void intc_balancing_enable(unsigned int irq) 14void intc_balancing_enable(unsigned int irq)
15{ 15{
diff --git a/drivers/sh/intc/chip.c b/drivers/sh/intc/chip.c
index 7b246efa94ea..012df2676a26 100644
--- a/drivers/sh/intc/chip.c
+++ b/drivers/sh/intc/chip.c
@@ -2,13 +2,14 @@
2 * IRQ chip definitions for INTC IRQs. 2 * IRQ chip definitions for INTC IRQs.
3 * 3 *
4 * Copyright (C) 2007, 2008 Magnus Damm 4 * Copyright (C) 2007, 2008 Magnus Damm
5 * Copyright (C) 2009, 2010 Paul Mundt 5 * Copyright (C) 2009 - 2012 Paul Mundt
6 * 6 *
7 * This file is subject to the terms and conditions of the GNU General Public 7 * This file is subject to the terms and conditions of the GNU General Public
8 * License. See the file "COPYING" in the main directory of this archive 8 * License. See the file "COPYING" in the main directory of this archive
9 * for more details. 9 * for more details.
10 */ 10 */
11#include <linux/cpumask.h> 11#include <linux/cpumask.h>
12#include <linux/bsearch.h>
12#include <linux/io.h> 13#include <linux/io.h>
13#include "internals.h" 14#include "internals.h"
14 15
@@ -58,11 +59,6 @@ static void intc_disable(struct irq_data *data)
58 } 59 }
59} 60}
60 61
61static int intc_set_wake(struct irq_data *data, unsigned int on)
62{
63 return 0; /* allow wakeup, but setup hardware in intc_suspend() */
64}
65
66#ifdef CONFIG_SMP 62#ifdef CONFIG_SMP
67/* 63/*
68 * This is held with the irq desc lock held, so we don't require any 64 * This is held with the irq desc lock held, so we don't require any
@@ -78,7 +74,7 @@ static int intc_set_affinity(struct irq_data *data,
78 74
79 cpumask_copy(data->affinity, cpumask); 75 cpumask_copy(data->affinity, cpumask);
80 76
81 return 0; 77 return IRQ_SET_MASK_OK_NOCOPY;
82} 78}
83#endif 79#endif
84 80
@@ -122,28 +118,12 @@ static struct intc_handle_int *intc_find_irq(struct intc_handle_int *hp,
122 unsigned int nr_hp, 118 unsigned int nr_hp,
123 unsigned int irq) 119 unsigned int irq)
124{ 120{
125 int i; 121 struct intc_handle_int key;
126
127 /*
128 * this doesn't scale well, but...
129 *
130 * this function should only be used for cerain uncommon
131 * operations such as intc_set_priority() and intc_set_type()
132 * and in those rare cases performance doesn't matter that much.
133 * keeping the memory footprint low is more important.
134 *
135 * one rather simple way to speed this up and still keep the
136 * memory footprint down is to make sure the array is sorted
137 * and then perform a bisect to lookup the irq.
138 */
139 for (i = 0; i < nr_hp; i++) {
140 if ((hp + i)->irq != irq)
141 continue;
142 122
143 return hp + i; 123 key.irq = irq;
144 } 124 key.handle = 0;
145 125
146 return NULL; 126 return bsearch(&key, hp, nr_hp, sizeof(*hp), intc_handle_int_cmp);
147} 127}
148 128
149int intc_set_priority(unsigned int irq, unsigned int prio) 129int intc_set_priority(unsigned int irq, unsigned int prio)
@@ -223,10 +203,9 @@ struct irq_chip intc_irq_chip = {
223 .irq_mask_ack = intc_mask_ack, 203 .irq_mask_ack = intc_mask_ack,
224 .irq_enable = intc_enable, 204 .irq_enable = intc_enable,
225 .irq_disable = intc_disable, 205 .irq_disable = intc_disable,
226 .irq_shutdown = intc_disable,
227 .irq_set_type = intc_set_type, 206 .irq_set_type = intc_set_type,
228 .irq_set_wake = intc_set_wake,
229#ifdef CONFIG_SMP 207#ifdef CONFIG_SMP
230 .irq_set_affinity = intc_set_affinity, 208 .irq_set_affinity = intc_set_affinity,
231#endif 209#endif
210 .flags = IRQCHIP_SKIP_SET_WAKE,
232}; 211};
diff --git a/drivers/sh/intc/core.c b/drivers/sh/intc/core.c
index e53e449b4eca..7e562ccb6997 100644
--- a/drivers/sh/intc/core.c
+++ b/drivers/sh/intc/core.c
@@ -2,7 +2,7 @@
2 * Shared interrupt handling code for IPR and INTC2 types of IRQs. 2 * Shared interrupt handling code for IPR and INTC2 types of IRQs.
3 * 3 *
4 * Copyright (C) 2007, 2008 Magnus Damm 4 * Copyright (C) 2007, 2008 Magnus Damm
5 * Copyright (C) 2009, 2010 Paul Mundt 5 * Copyright (C) 2009 - 2012 Paul Mundt
6 * 6 *
7 * Based on intc2.c and ipr.c 7 * Based on intc2.c and ipr.c
8 * 8 *
@@ -31,18 +31,19 @@
31#include <linux/spinlock.h> 31#include <linux/spinlock.h>
32#include <linux/radix-tree.h> 32#include <linux/radix-tree.h>
33#include <linux/export.h> 33#include <linux/export.h>
34#include <linux/sort.h>
34#include "internals.h" 35#include "internals.h"
35 36
36LIST_HEAD(intc_list); 37LIST_HEAD(intc_list);
37DEFINE_RAW_SPINLOCK(intc_big_lock); 38DEFINE_RAW_SPINLOCK(intc_big_lock);
38unsigned int nr_intc_controllers; 39static unsigned int nr_intc_controllers;
39 40
40/* 41/*
41 * Default priority level 42 * Default priority level
42 * - this needs to be at least 2 for 5-bit priorities on 7780 43 * - this needs to be at least 2 for 5-bit priorities on 7780
43 */ 44 */
44static unsigned int default_prio_level = 2; /* 2 - 16 */ 45static unsigned int default_prio_level = 2; /* 2 - 16 */
45static unsigned int intc_prio_level[NR_IRQS]; /* for now */ 46static unsigned int intc_prio_level[INTC_NR_IRQS]; /* for now */
46 47
47unsigned int intc_get_dfl_prio_level(void) 48unsigned int intc_get_dfl_prio_level(void)
48{ 49{
@@ -267,6 +268,9 @@ int __init register_intc_controller(struct intc_desc *desc)
267 k += save_reg(d, k, hw->prio_regs[i].set_reg, smp); 268 k += save_reg(d, k, hw->prio_regs[i].set_reg, smp);
268 k += save_reg(d, k, hw->prio_regs[i].clr_reg, smp); 269 k += save_reg(d, k, hw->prio_regs[i].clr_reg, smp);
269 } 270 }
271
272 sort(d->prio, hw->nr_prio_regs, sizeof(*d->prio),
273 intc_handle_int_cmp, NULL);
270 } 274 }
271 275
272 if (hw->sense_regs) { 276 if (hw->sense_regs) {
@@ -277,6 +281,9 @@ int __init register_intc_controller(struct intc_desc *desc)
277 281
278 for (i = 0; i < hw->nr_sense_regs; i++) 282 for (i = 0; i < hw->nr_sense_regs; i++)
279 k += save_reg(d, k, hw->sense_regs[i].reg, 0); 283 k += save_reg(d, k, hw->sense_regs[i].reg, 0);
284
285 sort(d->sense, hw->nr_sense_regs, sizeof(*d->sense),
286 intc_handle_int_cmp, NULL);
280 } 287 }
281 288
282 if (hw->subgroups) 289 if (hw->subgroups)
diff --git a/drivers/sh/intc/handle.c b/drivers/sh/intc/handle.c
index 057ce56829bf..7863a44918a2 100644
--- a/drivers/sh/intc/handle.c
+++ b/drivers/sh/intc/handle.c
@@ -13,7 +13,7 @@
13#include <linux/spinlock.h> 13#include <linux/spinlock.h>
14#include "internals.h" 14#include "internals.h"
15 15
16static unsigned long ack_handle[NR_IRQS]; 16static unsigned long ack_handle[INTC_NR_IRQS];
17 17
18static intc_enum __init intc_grp_id(struct intc_desc *desc, 18static intc_enum __init intc_grp_id(struct intc_desc *desc,
19 intc_enum enum_id) 19 intc_enum enum_id)
@@ -172,9 +172,8 @@ intc_get_prio_handle(struct intc_desc *desc, struct intc_desc_int *d,
172 return 0; 172 return 0;
173} 173}
174 174
175static unsigned int __init intc_ack_data(struct intc_desc *desc, 175static unsigned int intc_ack_data(struct intc_desc *desc,
176 struct intc_desc_int *d, 176 struct intc_desc_int *d, intc_enum enum_id)
177 intc_enum enum_id)
178{ 177{
179 struct intc_mask_reg *mr = desc->hw.ack_regs; 178 struct intc_mask_reg *mr = desc->hw.ack_regs;
180 unsigned int i, j, fn, mode; 179 unsigned int i, j, fn, mode;
diff --git a/drivers/sh/intc/internals.h b/drivers/sh/intc/internals.h
index b0e9155ff739..f034a979a16f 100644
--- a/drivers/sh/intc/internals.h
+++ b/drivers/sh/intc/internals.h
@@ -108,6 +108,14 @@ static inline void activate_irq(int irq)
108#endif 108#endif
109} 109}
110 110
111static inline int intc_handle_int_cmp(const void *a, const void *b)
112{
113 const struct intc_handle_int *_a = a;
114 const struct intc_handle_int *_b = b;
115
116 return _a->irq - _b->irq;
117}
118
111/* access.c */ 119/* access.c */
112extern unsigned long 120extern unsigned long
113(*intc_reg_fns[])(unsigned long addr, unsigned long h, unsigned long data); 121(*intc_reg_fns[])(unsigned long addr, unsigned long h, unsigned long data);
@@ -157,7 +165,6 @@ void _intc_enable(struct irq_data *data, unsigned long handle);
157/* core.c */ 165/* core.c */
158extern struct list_head intc_list; 166extern struct list_head intc_list;
159extern raw_spinlock_t intc_big_lock; 167extern raw_spinlock_t intc_big_lock;
160extern unsigned int nr_intc_controllers;
161extern struct bus_type intc_subsys; 168extern struct bus_type intc_subsys;
162 169
163unsigned int intc_get_dfl_prio_level(void); 170unsigned int intc_get_dfl_prio_level(void);
diff --git a/drivers/sh/intc/virq.c b/drivers/sh/intc/virq.c
index c7ec49ffd9f6..93cec21e788b 100644
--- a/drivers/sh/intc/virq.c
+++ b/drivers/sh/intc/virq.c
@@ -17,7 +17,7 @@
17#include <linux/export.h> 17#include <linux/export.h>
18#include "internals.h" 18#include "internals.h"
19 19
20static struct intc_map_entry intc_irq_xlate[NR_IRQS]; 20static struct intc_map_entry intc_irq_xlate[INTC_NR_IRQS];
21 21
22struct intc_virq_list { 22struct intc_virq_list {
23 unsigned int irq; 23 unsigned int irq;
diff --git a/drivers/spi/spi-dw-mid.c b/drivers/spi/spi-dw-mid.c
index 8418eb036651..b9f0192758d6 100644
--- a/drivers/spi/spi-dw-mid.c
+++ b/drivers/spi/spi-dw-mid.c
@@ -22,6 +22,7 @@
22#include <linux/interrupt.h> 22#include <linux/interrupt.h>
23#include <linux/slab.h> 23#include <linux/slab.h>
24#include <linux/spi/spi.h> 24#include <linux/spi/spi.h>
25#include <linux/types.h>
25 26
26#include "spi-dw.h" 27#include "spi-dw.h"
27 28
@@ -136,6 +137,7 @@ static int mid_spi_dma_transfer(struct dw_spi *dws, int cs_change)
136 txconf.dst_maxburst = LNW_DMA_MSIZE_16; 137 txconf.dst_maxburst = LNW_DMA_MSIZE_16;
137 txconf.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; 138 txconf.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
138 txconf.dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES; 139 txconf.dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
140 txconf.device_fc = false;
139 141
140 txchan->device->device_control(txchan, DMA_SLAVE_CONFIG, 142 txchan->device->device_control(txchan, DMA_SLAVE_CONFIG,
141 (unsigned long) &txconf); 143 (unsigned long) &txconf);
@@ -144,7 +146,7 @@ static int mid_spi_dma_transfer(struct dw_spi *dws, int cs_change)
144 dws->tx_sgl.dma_address = dws->tx_dma; 146 dws->tx_sgl.dma_address = dws->tx_dma;
145 dws->tx_sgl.length = dws->len; 147 dws->tx_sgl.length = dws->len;
146 148
147 txdesc = txchan->device->device_prep_slave_sg(txchan, 149 txdesc = dmaengine_prep_slave_sg(txchan,
148 &dws->tx_sgl, 150 &dws->tx_sgl,
149 1, 151 1,
150 DMA_MEM_TO_DEV, 152 DMA_MEM_TO_DEV,
@@ -158,6 +160,7 @@ static int mid_spi_dma_transfer(struct dw_spi *dws, int cs_change)
158 rxconf.src_maxburst = LNW_DMA_MSIZE_16; 160 rxconf.src_maxburst = LNW_DMA_MSIZE_16;
159 rxconf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; 161 rxconf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
160 rxconf.src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES; 162 rxconf.src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
163 rxconf.device_fc = false;
161 164
162 rxchan->device->device_control(rxchan, DMA_SLAVE_CONFIG, 165 rxchan->device->device_control(rxchan, DMA_SLAVE_CONFIG,
163 (unsigned long) &rxconf); 166 (unsigned long) &rxconf);
@@ -166,7 +169,7 @@ static int mid_spi_dma_transfer(struct dw_spi *dws, int cs_change)
166 dws->rx_sgl.dma_address = dws->rx_dma; 169 dws->rx_sgl.dma_address = dws->rx_dma;
167 dws->rx_sgl.length = dws->len; 170 dws->rx_sgl.length = dws->len;
168 171
169 rxdesc = rxchan->device->device_prep_slave_sg(rxchan, 172 rxdesc = dmaengine_prep_slave_sg(rxchan,
170 &dws->rx_sgl, 173 &dws->rx_sgl,
171 1, 174 1,
172 DMA_DEV_TO_MEM, 175 DMA_DEV_TO_MEM,
diff --git a/drivers/spi/spi-ep93xx.c b/drivers/spi/spi-ep93xx.c
index d46e55c720b7..6db2887852d6 100644
--- a/drivers/spi/spi-ep93xx.c
+++ b/drivers/spi/spi-ep93xx.c
@@ -633,8 +633,8 @@ ep93xx_spi_dma_prepare(struct ep93xx_spi *espi, enum dma_data_direction dir)
633 if (!nents) 633 if (!nents)
634 return ERR_PTR(-ENOMEM); 634 return ERR_PTR(-ENOMEM);
635 635
636 txd = chan->device->device_prep_slave_sg(chan, sgt->sgl, nents, 636 txd = dmaengine_prep_slave_sg(chan, sgt->sgl, nents,
637 slave_dirn, DMA_CTRL_ACK); 637 slave_dirn, DMA_CTRL_ACK);
638 if (!txd) { 638 if (!txd) {
639 dma_unmap_sg(chan->device->dev, sgt->sgl, sgt->nents, dir); 639 dma_unmap_sg(chan->device->dev, sgt->sgl, sgt->nents, dir);
640 return ERR_PTR(-ENOMEM); 640 return ERR_PTR(-ENOMEM);
diff --git a/drivers/spi/spi-pl022.c b/drivers/spi/spi-pl022.c
index dc8485d1e883..96f0da66b185 100644
--- a/drivers/spi/spi-pl022.c
+++ b/drivers/spi/spi-pl022.c
@@ -880,10 +880,12 @@ static int configure_dma(struct pl022 *pl022)
880 struct dma_slave_config rx_conf = { 880 struct dma_slave_config rx_conf = {
881 .src_addr = SSP_DR(pl022->phybase), 881 .src_addr = SSP_DR(pl022->phybase),
882 .direction = DMA_DEV_TO_MEM, 882 .direction = DMA_DEV_TO_MEM,
883 .device_fc = false,
883 }; 884 };
884 struct dma_slave_config tx_conf = { 885 struct dma_slave_config tx_conf = {
885 .dst_addr = SSP_DR(pl022->phybase), 886 .dst_addr = SSP_DR(pl022->phybase),
886 .direction = DMA_MEM_TO_DEV, 887 .direction = DMA_MEM_TO_DEV,
888 .device_fc = false,
887 }; 889 };
888 unsigned int pages; 890 unsigned int pages;
889 int ret; 891 int ret;
@@ -1017,7 +1019,7 @@ static int configure_dma(struct pl022 *pl022)
1017 goto err_tx_sgmap; 1019 goto err_tx_sgmap;
1018 1020
1019 /* Send both scatterlists */ 1021 /* Send both scatterlists */
1020 rxdesc = rxchan->device->device_prep_slave_sg(rxchan, 1022 rxdesc = dmaengine_prep_slave_sg(rxchan,
1021 pl022->sgt_rx.sgl, 1023 pl022->sgt_rx.sgl,
1022 rx_sglen, 1024 rx_sglen,
1023 DMA_DEV_TO_MEM, 1025 DMA_DEV_TO_MEM,
@@ -1025,7 +1027,7 @@ static int configure_dma(struct pl022 *pl022)
1025 if (!rxdesc) 1027 if (!rxdesc)
1026 goto err_rxdesc; 1028 goto err_rxdesc;
1027 1029
1028 txdesc = txchan->device->device_prep_slave_sg(txchan, 1030 txdesc = dmaengine_prep_slave_sg(txchan,
1029 pl022->sgt_tx.sgl, 1031 pl022->sgt_tx.sgl,
1030 tx_sglen, 1032 tx_sglen,
1031 DMA_MEM_TO_DEV, 1033 DMA_MEM_TO_DEV,
diff --git a/drivers/spi/spi-topcliff-pch.c b/drivers/spi/spi-topcliff-pch.c
index 5c6fa5ed3366..ec47d3bdfd13 100644
--- a/drivers/spi/spi-topcliff-pch.c
+++ b/drivers/spi/spi-topcliff-pch.c
@@ -1099,7 +1099,7 @@ static void pch_spi_handle_dma(struct pch_spi_data *data, int *bpw)
1099 sg_dma_address(sg) = dma->rx_buf_dma + sg->offset; 1099 sg_dma_address(sg) = dma->rx_buf_dma + sg->offset;
1100 } 1100 }
1101 sg = dma->sg_rx_p; 1101 sg = dma->sg_rx_p;
1102 desc_rx = dma->chan_rx->device->device_prep_slave_sg(dma->chan_rx, sg, 1102 desc_rx = dmaengine_prep_slave_sg(dma->chan_rx, sg,
1103 num, DMA_DEV_TO_MEM, 1103 num, DMA_DEV_TO_MEM,
1104 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 1104 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
1105 if (!desc_rx) { 1105 if (!desc_rx) {
@@ -1158,7 +1158,7 @@ static void pch_spi_handle_dma(struct pch_spi_data *data, int *bpw)
1158 sg_dma_address(sg) = dma->tx_buf_dma + sg->offset; 1158 sg_dma_address(sg) = dma->tx_buf_dma + sg->offset;
1159 } 1159 }
1160 sg = dma->sg_tx_p; 1160 sg = dma->sg_tx_p;
1161 desc_tx = dma->chan_tx->device->device_prep_slave_sg(dma->chan_tx, 1161 desc_tx = dmaengine_prep_slave_sg(dma->chan_tx,
1162 sg, num, DMA_MEM_TO_DEV, 1162 sg, num, DMA_MEM_TO_DEV,
1163 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 1163 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
1164 if (!desc_tx) { 1164 if (!desc_tx) {
diff --git a/drivers/staging/android/binder.c b/drivers/staging/android/binder.c
index 59e095362c81..c2832124bb3e 100644
--- a/drivers/staging/android/binder.c
+++ b/drivers/staging/android/binder.c
@@ -381,8 +381,7 @@ int task_get_unused_fd_flags(struct binder_proc *proc, int flags)
381 381
382repeat: 382repeat:
383 fdt = files_fdtable(files); 383 fdt = files_fdtable(files);
384 fd = find_next_zero_bit(fdt->open_fds->fds_bits, fdt->max_fds, 384 fd = find_next_zero_bit(fdt->open_fds, fdt->max_fds, files->next_fd);
385 files->next_fd);
386 385
387 /* 386 /*
388 * N.B. For clone tasks sharing a files structure, this test 387 * N.B. For clone tasks sharing a files structure, this test
@@ -410,11 +409,11 @@ repeat:
410 goto repeat; 409 goto repeat;
411 } 410 }
412 411
413 FD_SET(fd, fdt->open_fds); 412 __set_open_fd(fd, fdt);
414 if (flags & O_CLOEXEC) 413 if (flags & O_CLOEXEC)
415 FD_SET(fd, fdt->close_on_exec); 414 __set_close_on_exec(fd, fdt);
416 else 415 else
417 FD_CLR(fd, fdt->close_on_exec); 416 __clear_close_on_exec(fd, fdt);
418 files->next_fd = fd + 1; 417 files->next_fd = fd + 1;
419#if 1 418#if 1
420 /* Sanity check */ 419 /* Sanity check */
@@ -455,7 +454,7 @@ static void task_fd_install(
455static void __put_unused_fd(struct files_struct *files, unsigned int fd) 454static void __put_unused_fd(struct files_struct *files, unsigned int fd)
456{ 455{
457 struct fdtable *fdt = files_fdtable(files); 456 struct fdtable *fdt = files_fdtable(files);
458 __FD_CLR(fd, fdt->open_fds); 457 __clear_open_fd(fd, fdt);
459 if (fd < files->next_fd) 458 if (fd < files->next_fd)
460 files->next_fd = fd; 459 files->next_fd = fd;
461} 460}
@@ -481,7 +480,7 @@ static long task_close_fd(struct binder_proc *proc, unsigned int fd)
481 if (!filp) 480 if (!filp)
482 goto out_unlock; 481 goto out_unlock;
483 rcu_assign_pointer(fdt->fd[fd], NULL); 482 rcu_assign_pointer(fdt->fd[fd], NULL);
484 FD_CLR(fd, fdt->close_on_exec); 483 __clear_close_on_exec(fd, fdt);
485 __put_unused_fd(files, fd); 484 __put_unused_fd(files, fd);
486 spin_unlock(&files->file_lock); 485 spin_unlock(&files->file_lock);
487 retval = filp_close(filp, files); 486 retval = filp_close(filp, files);
diff --git a/drivers/staging/asus_oled/README b/drivers/staging/asus_oled/README
index 0d82a6d5fa58..2d721232467a 100644
--- a/drivers/staging/asus_oled/README
+++ b/drivers/staging/asus_oled/README
@@ -52,7 +52,7 @@ Configuration
52 52
53 There is only one option: start_off. 53 There is only one option: start_off.
54 You can use it by: 'modprobe asus_oled start_off=1', or by adding this 54 You can use it by: 'modprobe asus_oled start_off=1', or by adding this
55 line to /etc/modprobe.conf: 55 line to /etc/modprobe.d/asus_oled.conf:
56 options asus_oled start_off=1 56 options asus_oled start_off=1
57 57
58 With this option provided, asus_oled driver will switch off the display 58 With this option provided, asus_oled driver will switch off the display
diff --git a/drivers/thermal/Kconfig b/drivers/thermal/Kconfig
index f7f71b2d3101..514a691abea0 100644
--- a/drivers/thermal/Kconfig
+++ b/drivers/thermal/Kconfig
@@ -18,3 +18,11 @@ config THERMAL_HWMON
18 depends on THERMAL 18 depends on THERMAL
19 depends on HWMON=y || HWMON=THERMAL 19 depends on HWMON=y || HWMON=THERMAL
20 default y 20 default y
21
22config SPEAR_THERMAL
23 bool "SPEAr thermal sensor driver"
24 depends on THERMAL
25 depends on PLAT_SPEAR
26 help
27 Enable this to plug the SPEAr thermal sensor driver into the Linux
28 thermal framework
diff --git a/drivers/thermal/Makefile b/drivers/thermal/Makefile
index 31108a01c22e..a9fff0bf4b14 100644
--- a/drivers/thermal/Makefile
+++ b/drivers/thermal/Makefile
@@ -3,3 +3,4 @@
3# 3#
4 4
5obj-$(CONFIG_THERMAL) += thermal_sys.o 5obj-$(CONFIG_THERMAL) += thermal_sys.o
6obj-$(CONFIG_SPEAR_THERMAL) += spear_thermal.o \ No newline at end of file
diff --git a/drivers/thermal/spear_thermal.c b/drivers/thermal/spear_thermal.c
new file mode 100644
index 000000000000..c2e32df3b164
--- /dev/null
+++ b/drivers/thermal/spear_thermal.c
@@ -0,0 +1,206 @@
1/*
2 * SPEAr thermal driver.
3 *
4 * Copyright (C) 2011-2012 ST Microelectronics
5 * Author: Vincenzo Frascino <vincenzo.frascino@st.com>
6 *
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 */
17
18#include <linux/clk.h>
19#include <linux/device.h>
20#include <linux/err.h>
21#include <linux/io.h>
22#include <linux/kernel.h>
23#include <linux/module.h>
24#include <linux/platform_device.h>
25#include <linux/platform_data/spear_thermal.h>
26#include <linux/thermal.h>
27
28#define MD_FACTOR 1000
29
30/* SPEAr Thermal Sensor Dev Structure */
31struct spear_thermal_dev {
32 /* pointer to base address of the thermal sensor */
33 void __iomem *thermal_base;
34 /* clk structure */
35 struct clk *clk;
36 /* pointer to thermal flags */
37 unsigned int flags;
38};
39
40static inline int thermal_get_temp(struct thermal_zone_device *thermal,
41 unsigned long *temp)
42{
43 struct spear_thermal_dev *stdev = thermal->devdata;
44
45 /*
46 * Data are ready to be read after 628 usec from POWERDOWN signal
47 * (PDN) = 1
48 */
49 *temp = (readl_relaxed(stdev->thermal_base) & 0x7F) * MD_FACTOR;
50 return 0;
51}
52
53static struct thermal_zone_device_ops ops = {
54 .get_temp = thermal_get_temp,
55};
56
57#ifdef CONFIG_PM
58static int spear_thermal_suspend(struct device *dev)
59{
60 struct platform_device *pdev = to_platform_device(dev);
61 struct thermal_zone_device *spear_thermal = platform_get_drvdata(pdev);
62 struct spear_thermal_dev *stdev = spear_thermal->devdata;
63 unsigned int actual_mask = 0;
64
65 /* Disable SPEAr Thermal Sensor */
66 actual_mask = readl_relaxed(stdev->thermal_base);
67 writel_relaxed(actual_mask & ~stdev->flags, stdev->thermal_base);
68
69 clk_disable(stdev->clk);
70 dev_info(dev, "Suspended.\n");
71
72 return 0;
73}
74
75static int spear_thermal_resume(struct device *dev)
76{
77 struct platform_device *pdev = to_platform_device(dev);
78 struct thermal_zone_device *spear_thermal = platform_get_drvdata(pdev);
79 struct spear_thermal_dev *stdev = spear_thermal->devdata;
80 unsigned int actual_mask = 0;
81 int ret = 0;
82
83 ret = clk_enable(stdev->clk);
84 if (ret) {
85 dev_err(&pdev->dev, "Can't enable clock\n");
86 return ret;
87 }
88
89 /* Enable SPEAr Thermal Sensor */
90 actual_mask = readl_relaxed(stdev->thermal_base);
91 writel_relaxed(actual_mask | stdev->flags, stdev->thermal_base);
92
93 dev_info(dev, "Resumed.\n");
94
95 return 0;
96}
97#endif
98
99static SIMPLE_DEV_PM_OPS(spear_thermal_pm_ops, spear_thermal_suspend,
100 spear_thermal_resume);
101
102static int spear_thermal_probe(struct platform_device *pdev)
103{
104 struct thermal_zone_device *spear_thermal = NULL;
105 struct spear_thermal_dev *stdev;
106 struct spear_thermal_pdata *pdata;
107 int ret = 0;
108 struct resource *stres = platform_get_resource(pdev, IORESOURCE_MEM, 0);
109
110 if (!stres) {
111 dev_err(&pdev->dev, "memory resource missing\n");
112 return -ENODEV;
113 }
114
115 pdata = dev_get_platdata(&pdev->dev);
116 if (!pdata) {
117 dev_err(&pdev->dev, "platform data is NULL\n");
118 return -EINVAL;
119 }
120
121 stdev = devm_kzalloc(&pdev->dev, sizeof(*stdev), GFP_KERNEL);
122 if (!stdev) {
123 dev_err(&pdev->dev, "kzalloc fail\n");
124 return -ENOMEM;
125 }
126
127 /* Enable thermal sensor */
128 stdev->thermal_base = devm_ioremap(&pdev->dev, stres->start,
129 resource_size(stres));
130 if (!stdev->thermal_base) {
131 dev_err(&pdev->dev, "ioremap failed\n");
132 return -ENOMEM;
133 }
134
135 stdev->clk = clk_get(&pdev->dev, NULL);
136 if (IS_ERR(stdev->clk)) {
137 dev_err(&pdev->dev, "Can't get clock\n");
138 return PTR_ERR(stdev->clk);
139 }
140
141 ret = clk_enable(stdev->clk);
142 if (ret) {
143 dev_err(&pdev->dev, "Can't enable clock\n");
144 goto put_clk;
145 }
146
147 stdev->flags = pdata->thermal_flags;
148 writel_relaxed(stdev->flags, stdev->thermal_base);
149
150 spear_thermal = thermal_zone_device_register("spear_thermal", 0,
151 stdev, &ops, 0, 0, 0, 0);
152 if (IS_ERR(spear_thermal)) {
153 dev_err(&pdev->dev, "thermal zone device is NULL\n");
154 ret = PTR_ERR(spear_thermal);
155 goto disable_clk;
156 }
157
158 platform_set_drvdata(pdev, spear_thermal);
159
160 dev_info(&spear_thermal->device, "Thermal Sensor Loaded at: 0x%p.\n",
161 stdev->thermal_base);
162
163 return 0;
164
165disable_clk:
166 clk_disable(stdev->clk);
167put_clk:
168 clk_put(stdev->clk);
169
170 return ret;
171}
172
173static int spear_thermal_exit(struct platform_device *pdev)
174{
175 unsigned int actual_mask = 0;
176 struct thermal_zone_device *spear_thermal = platform_get_drvdata(pdev);
177 struct spear_thermal_dev *stdev = spear_thermal->devdata;
178
179 thermal_zone_device_unregister(spear_thermal);
180 platform_set_drvdata(pdev, NULL);
181
182 /* Disable SPEAr Thermal Sensor */
183 actual_mask = readl_relaxed(stdev->thermal_base);
184 writel_relaxed(actual_mask & ~stdev->flags, stdev->thermal_base);
185
186 clk_disable(stdev->clk);
187 clk_put(stdev->clk);
188
189 return 0;
190}
191
192static struct platform_driver spear_thermal_driver = {
193 .probe = spear_thermal_probe,
194 .remove = spear_thermal_exit,
195 .driver = {
196 .name = "spear_thermal",
197 .owner = THIS_MODULE,
198 .pm = &spear_thermal_pm_ops,
199 },
200};
201
202module_platform_driver(spear_thermal_driver);
203
204MODULE_AUTHOR("Vincenzo Frascino <vincenzo.frascino@st.com>");
205MODULE_DESCRIPTION("SPEAr thermal driver");
206MODULE_LICENSE("GPL");
diff --git a/drivers/thermal/thermal_sys.c b/drivers/thermal/thermal_sys.c
index 220ce7e31cf5..022bacb71a7e 100644
--- a/drivers/thermal/thermal_sys.c
+++ b/drivers/thermal/thermal_sys.c
@@ -23,6 +23,8 @@
23 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 23 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
24 */ 24 */
25 25
26#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
27
26#include <linux/module.h> 28#include <linux/module.h>
27#include <linux/device.h> 29#include <linux/device.h>
28#include <linux/err.h> 30#include <linux/err.h>
@@ -39,8 +41,6 @@ MODULE_AUTHOR("Zhang Rui");
39MODULE_DESCRIPTION("Generic thermal management sysfs support"); 41MODULE_DESCRIPTION("Generic thermal management sysfs support");
40MODULE_LICENSE("GPL"); 42MODULE_LICENSE("GPL");
41 43
42#define PREFIX "Thermal: "
43
44struct thermal_cooling_device_instance { 44struct thermal_cooling_device_instance {
45 int id; 45 int id;
46 char name[THERMAL_NAME_LENGTH]; 46 char name[THERMAL_NAME_LENGTH];
@@ -60,13 +60,11 @@ static LIST_HEAD(thermal_tz_list);
60static LIST_HEAD(thermal_cdev_list); 60static LIST_HEAD(thermal_cdev_list);
61static DEFINE_MUTEX(thermal_list_lock); 61static DEFINE_MUTEX(thermal_list_lock);
62 62
63static unsigned int thermal_event_seqnum;
64
65static int get_idr(struct idr *idr, struct mutex *lock, int *id) 63static int get_idr(struct idr *idr, struct mutex *lock, int *id)
66{ 64{
67 int err; 65 int err;
68 66
69 again: 67again:
70 if (unlikely(idr_pre_get(idr, GFP_KERNEL) == 0)) 68 if (unlikely(idr_pre_get(idr, GFP_KERNEL) == 0))
71 return -ENOMEM; 69 return -ENOMEM;
72 70
@@ -152,9 +150,9 @@ mode_store(struct device *dev, struct device_attribute *attr,
152 if (!tz->ops->set_mode) 150 if (!tz->ops->set_mode)
153 return -EPERM; 151 return -EPERM;
154 152
155 if (!strncmp(buf, "enabled", sizeof("enabled"))) 153 if (!strncmp(buf, "enabled", sizeof("enabled") - 1))
156 result = tz->ops->set_mode(tz, THERMAL_DEVICE_ENABLED); 154 result = tz->ops->set_mode(tz, THERMAL_DEVICE_ENABLED);
157 else if (!strncmp(buf, "disabled", sizeof("disabled"))) 155 else if (!strncmp(buf, "disabled", sizeof("disabled") - 1))
158 result = tz->ops->set_mode(tz, THERMAL_DEVICE_DISABLED); 156 result = tz->ops->set_mode(tz, THERMAL_DEVICE_DISABLED);
159 else 157 else
160 result = -EINVAL; 158 result = -EINVAL;
@@ -283,8 +281,7 @@ passive_show(struct device *dev, struct device_attribute *attr,
283static DEVICE_ATTR(type, 0444, type_show, NULL); 281static DEVICE_ATTR(type, 0444, type_show, NULL);
284static DEVICE_ATTR(temp, 0444, temp_show, NULL); 282static DEVICE_ATTR(temp, 0444, temp_show, NULL);
285static DEVICE_ATTR(mode, 0644, mode_show, mode_store); 283static DEVICE_ATTR(mode, 0644, mode_show, mode_store);
286static DEVICE_ATTR(passive, S_IRUGO | S_IWUSR, passive_show, \ 284static DEVICE_ATTR(passive, S_IRUGO | S_IWUSR, passive_show, passive_store);
287 passive_store);
288 285
289static struct device_attribute trip_point_attrs[] = { 286static struct device_attribute trip_point_attrs[] = {
290 __ATTR(trip_point_0_type, 0444, trip_point_type_show, NULL), 287 __ATTR(trip_point_0_type, 0444, trip_point_type_show, NULL),
@@ -313,22 +310,6 @@ static struct device_attribute trip_point_attrs[] = {
313 __ATTR(trip_point_11_temp, 0444, trip_point_temp_show, NULL), 310 __ATTR(trip_point_11_temp, 0444, trip_point_temp_show, NULL),
314}; 311};
315 312
316#define TRIP_POINT_ATTR_ADD(_dev, _index, result) \
317do { \
318 result = device_create_file(_dev, \
319 &trip_point_attrs[_index * 2]); \
320 if (result) \
321 break; \
322 result = device_create_file(_dev, \
323 &trip_point_attrs[_index * 2 + 1]); \
324} while (0)
325
326#define TRIP_POINT_ATTR_REMOVE(_dev, _index) \
327do { \
328 device_remove_file(_dev, &trip_point_attrs[_index * 2]); \
329 device_remove_file(_dev, &trip_point_attrs[_index * 2 + 1]); \
330} while (0)
331
332/* sys I/F for cooling device */ 313/* sys I/F for cooling device */
333#define to_cooling_device(_dev) \ 314#define to_cooling_device(_dev) \
334 container_of(_dev, struct thermal_cooling_device, device) 315 container_of(_dev, struct thermal_cooling_device, device)
@@ -835,15 +816,14 @@ int thermal_zone_bind_cooling_device(struct thermal_zone_device *tz,
835 return 0; 816 return 0;
836 817
837 device_remove_file(&tz->device, &dev->attr); 818 device_remove_file(&tz->device, &dev->attr);
838 remove_symbol_link: 819remove_symbol_link:
839 sysfs_remove_link(&tz->device.kobj, dev->name); 820 sysfs_remove_link(&tz->device.kobj, dev->name);
840 release_idr: 821release_idr:
841 release_idr(&tz->idr, &tz->lock, dev->id); 822 release_idr(&tz->idr, &tz->lock, dev->id);
842 free_mem: 823free_mem:
843 kfree(dev); 824 kfree(dev);
844 return result; 825 return result;
845} 826}
846
847EXPORT_SYMBOL(thermal_zone_bind_cooling_device); 827EXPORT_SYMBOL(thermal_zone_bind_cooling_device);
848 828
849/** 829/**
@@ -873,14 +853,13 @@ int thermal_zone_unbind_cooling_device(struct thermal_zone_device *tz,
873 853
874 return -ENODEV; 854 return -ENODEV;
875 855
876 unbind: 856unbind:
877 device_remove_file(&tz->device, &pos->attr); 857 device_remove_file(&tz->device, &pos->attr);
878 sysfs_remove_link(&tz->device.kobj, pos->name); 858 sysfs_remove_link(&tz->device.kobj, pos->name);
879 release_idr(&tz->idr, &tz->lock, pos->id); 859 release_idr(&tz->idr, &tz->lock, pos->id);
880 kfree(pos); 860 kfree(pos);
881 return 0; 861 return 0;
882} 862}
883
884EXPORT_SYMBOL(thermal_zone_unbind_cooling_device); 863EXPORT_SYMBOL(thermal_zone_unbind_cooling_device);
885 864
886static void thermal_release(struct device *dev) 865static void thermal_release(struct device *dev)
@@ -888,7 +867,8 @@ static void thermal_release(struct device *dev)
888 struct thermal_zone_device *tz; 867 struct thermal_zone_device *tz;
889 struct thermal_cooling_device *cdev; 868 struct thermal_cooling_device *cdev;
890 869
891 if (!strncmp(dev_name(dev), "thermal_zone", sizeof "thermal_zone" - 1)) { 870 if (!strncmp(dev_name(dev), "thermal_zone",
871 sizeof("thermal_zone") - 1)) {
892 tz = to_thermal_zone(dev); 872 tz = to_thermal_zone(dev);
893 kfree(tz); 873 kfree(tz);
894 } else { 874 } else {
@@ -908,8 +888,9 @@ static struct class thermal_class = {
908 * @devdata: device private data. 888 * @devdata: device private data.
909 * @ops: standard thermal cooling devices callbacks. 889 * @ops: standard thermal cooling devices callbacks.
910 */ 890 */
911struct thermal_cooling_device *thermal_cooling_device_register( 891struct thermal_cooling_device *
912 char *type, void *devdata, const struct thermal_cooling_device_ops *ops) 892thermal_cooling_device_register(char *type, void *devdata,
893 const struct thermal_cooling_device_ops *ops)
913{ 894{
914 struct thermal_cooling_device *cdev; 895 struct thermal_cooling_device *cdev;
915 struct thermal_zone_device *pos; 896 struct thermal_zone_device *pos;
@@ -974,12 +955,11 @@ struct thermal_cooling_device *thermal_cooling_device_register(
974 if (!result) 955 if (!result)
975 return cdev; 956 return cdev;
976 957
977 unregister: 958unregister:
978 release_idr(&thermal_cdev_idr, &thermal_idr_lock, cdev->id); 959 release_idr(&thermal_cdev_idr, &thermal_idr_lock, cdev->id);
979 device_unregister(&cdev->device); 960 device_unregister(&cdev->device);
980 return ERR_PTR(result); 961 return ERR_PTR(result);
981} 962}
982
983EXPORT_SYMBOL(thermal_cooling_device_register); 963EXPORT_SYMBOL(thermal_cooling_device_register);
984 964
985/** 965/**
@@ -1024,7 +1004,6 @@ void thermal_cooling_device_unregister(struct
1024 device_unregister(&cdev->device); 1004 device_unregister(&cdev->device);
1025 return; 1005 return;
1026} 1006}
1027
1028EXPORT_SYMBOL(thermal_cooling_device_unregister); 1007EXPORT_SYMBOL(thermal_cooling_device_unregister);
1029 1008
1030/** 1009/**
@@ -1044,8 +1023,7 @@ void thermal_zone_device_update(struct thermal_zone_device *tz)
1044 1023
1045 if (tz->ops->get_temp(tz, &temp)) { 1024 if (tz->ops->get_temp(tz, &temp)) {
1046 /* get_temp failed - retry it later */ 1025 /* get_temp failed - retry it later */
1047 printk(KERN_WARNING PREFIX "failed to read out thermal zone " 1026 pr_warn("failed to read out thermal zone %d\n", tz->id);
1048 "%d\n", tz->id);
1049 goto leave; 1027 goto leave;
1050 } 1028 }
1051 1029
@@ -1060,9 +1038,8 @@ void thermal_zone_device_update(struct thermal_zone_device *tz)
1060 ret = tz->ops->notify(tz, count, 1038 ret = tz->ops->notify(tz, count,
1061 trip_type); 1039 trip_type);
1062 if (!ret) { 1040 if (!ret) {
1063 printk(KERN_EMERG 1041 pr_emerg("Critical temperature reached (%ld C), shutting down\n",
1064 "Critical temperature reached (%ld C), shutting down.\n", 1042 temp/1000);
1065 temp/1000);
1066 orderly_poweroff(true); 1043 orderly_poweroff(true);
1067 } 1044 }
1068 } 1045 }
@@ -1100,7 +1077,7 @@ void thermal_zone_device_update(struct thermal_zone_device *tz)
1100 1077
1101 tz->last_temperature = temp; 1078 tz->last_temperature = temp;
1102 1079
1103 leave: 1080leave:
1104 if (tz->passive) 1081 if (tz->passive)
1105 thermal_zone_device_set_polling(tz, tz->passive_delay); 1082 thermal_zone_device_set_polling(tz, tz->passive_delay);
1106 else if (tz->polling_delay) 1083 else if (tz->polling_delay)
@@ -1199,7 +1176,12 @@ struct thermal_zone_device *thermal_zone_device_register(char *type,
1199 } 1176 }
1200 1177
1201 for (count = 0; count < trips; count++) { 1178 for (count = 0; count < trips; count++) {
1202 TRIP_POINT_ATTR_ADD(&tz->device, count, result); 1179 result = device_create_file(&tz->device,
1180 &trip_point_attrs[count * 2]);
1181 if (result)
1182 break;
1183 result = device_create_file(&tz->device,
1184 &trip_point_attrs[count * 2 + 1]);
1203 if (result) 1185 if (result)
1204 goto unregister; 1186 goto unregister;
1205 tz->ops->get_trip_type(tz, count, &trip_type); 1187 tz->ops->get_trip_type(tz, count, &trip_type);
@@ -1235,12 +1217,11 @@ struct thermal_zone_device *thermal_zone_device_register(char *type,
1235 if (!result) 1217 if (!result)
1236 return tz; 1218 return tz;
1237 1219
1238 unregister: 1220unregister:
1239 release_idr(&thermal_tz_idr, &thermal_idr_lock, tz->id); 1221 release_idr(&thermal_tz_idr, &thermal_idr_lock, tz->id);
1240 device_unregister(&tz->device); 1222 device_unregister(&tz->device);
1241 return ERR_PTR(result); 1223 return ERR_PTR(result);
1242} 1224}
1243
1244EXPORT_SYMBOL(thermal_zone_device_register); 1225EXPORT_SYMBOL(thermal_zone_device_register);
1245 1226
1246/** 1227/**
@@ -1279,9 +1260,12 @@ void thermal_zone_device_unregister(struct thermal_zone_device *tz)
1279 if (tz->ops->get_mode) 1260 if (tz->ops->get_mode)
1280 device_remove_file(&tz->device, &dev_attr_mode); 1261 device_remove_file(&tz->device, &dev_attr_mode);
1281 1262
1282 for (count = 0; count < tz->trips; count++) 1263 for (count = 0; count < tz->trips; count++) {
1283 TRIP_POINT_ATTR_REMOVE(&tz->device, count); 1264 device_remove_file(&tz->device,
1284 1265 &trip_point_attrs[count * 2]);
1266 device_remove_file(&tz->device,
1267 &trip_point_attrs[count * 2 + 1]);
1268 }
1285 thermal_remove_hwmon_sysfs(tz); 1269 thermal_remove_hwmon_sysfs(tz);
1286 release_idr(&thermal_tz_idr, &thermal_idr_lock, tz->id); 1270 release_idr(&thermal_tz_idr, &thermal_idr_lock, tz->id);
1287 idr_destroy(&tz->idr); 1271 idr_destroy(&tz->idr);
@@ -1289,7 +1273,6 @@ void thermal_zone_device_unregister(struct thermal_zone_device *tz)
1289 device_unregister(&tz->device); 1273 device_unregister(&tz->device);
1290 return; 1274 return;
1291} 1275}
1292
1293EXPORT_SYMBOL(thermal_zone_device_unregister); 1276EXPORT_SYMBOL(thermal_zone_device_unregister);
1294 1277
1295#ifdef CONFIG_NET 1278#ifdef CONFIG_NET
@@ -1312,10 +1295,11 @@ int thermal_generate_netlink_event(u32 orig, enum events event)
1312 void *msg_header; 1295 void *msg_header;
1313 int size; 1296 int size;
1314 int result; 1297 int result;
1298 static unsigned int thermal_event_seqnum;
1315 1299
1316 /* allocate memory */ 1300 /* allocate memory */
1317 size = nla_total_size(sizeof(struct thermal_genl_event)) + \ 1301 size = nla_total_size(sizeof(struct thermal_genl_event)) +
1318 nla_total_size(0); 1302 nla_total_size(0);
1319 1303
1320 skb = genlmsg_new(size, GFP_ATOMIC); 1304 skb = genlmsg_new(size, GFP_ATOMIC);
1321 if (!skb) 1305 if (!skb)
@@ -1331,8 +1315,8 @@ int thermal_generate_netlink_event(u32 orig, enum events event)
1331 } 1315 }
1332 1316
1333 /* fill the data */ 1317 /* fill the data */
1334 attr = nla_reserve(skb, THERMAL_GENL_ATTR_EVENT, \ 1318 attr = nla_reserve(skb, THERMAL_GENL_ATTR_EVENT,
1335 sizeof(struct thermal_genl_event)); 1319 sizeof(struct thermal_genl_event));
1336 1320
1337 if (!attr) { 1321 if (!attr) {
1338 nlmsg_free(skb); 1322 nlmsg_free(skb);
@@ -1359,7 +1343,7 @@ int thermal_generate_netlink_event(u32 orig, enum events event)
1359 1343
1360 result = genlmsg_multicast(skb, 0, thermal_event_mcgrp.id, GFP_ATOMIC); 1344 result = genlmsg_multicast(skb, 0, thermal_event_mcgrp.id, GFP_ATOMIC);
1361 if (result) 1345 if (result)
1362 printk(KERN_INFO "failed to send netlink event:%d", result); 1346 pr_info("failed to send netlink event:%d\n", result);
1363 1347
1364 return result; 1348 return result;
1365} 1349}
diff --git a/drivers/tty/isicom.c b/drivers/tty/isicom.c
index 794ecb40017c..e1235accab74 100644
--- a/drivers/tty/isicom.c
+++ b/drivers/tty/isicom.c
@@ -102,7 +102,7 @@
102 * You can find the original tools for this direct from Multitech 102 * You can find the original tools for this direct from Multitech
103 * ftp://ftp.multitech.com/ISI-Cards/ 103 * ftp://ftp.multitech.com/ISI-Cards/
104 * 104 *
105 * Having installed the cards the module options (/etc/modprobe.conf) 105 * Having installed the cards the module options (/etc/modprobe.d/)
106 * 106 *
107 * options isicom io=card1,card2,card3,card4 irq=card1,card2,card3,card4 107 * options isicom io=card1,card2,card3,card4 irq=card1,card2,card3,card4
108 * 108 *
diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c
index 20d795d9b591..0c65c9e66986 100644
--- a/drivers/tty/serial/amba-pl011.c
+++ b/drivers/tty/serial/amba-pl011.c
@@ -51,6 +51,7 @@
51#include <linux/dma-mapping.h> 51#include <linux/dma-mapping.h>
52#include <linux/scatterlist.h> 52#include <linux/scatterlist.h>
53#include <linux/delay.h> 53#include <linux/delay.h>
54#include <linux/types.h>
54 55
55#include <asm/io.h> 56#include <asm/io.h>
56#include <asm/sizes.h> 57#include <asm/sizes.h>
@@ -271,6 +272,7 @@ static void pl011_dma_probe_initcall(struct uart_amba_port *uap)
271 .dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE, 272 .dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE,
272 .direction = DMA_MEM_TO_DEV, 273 .direction = DMA_MEM_TO_DEV,
273 .dst_maxburst = uap->fifosize >> 1, 274 .dst_maxburst = uap->fifosize >> 1,
275 .device_fc = false,
274 }; 276 };
275 struct dma_chan *chan; 277 struct dma_chan *chan;
276 dma_cap_mask_t mask; 278 dma_cap_mask_t mask;
@@ -304,6 +306,7 @@ static void pl011_dma_probe_initcall(struct uart_amba_port *uap)
304 .src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE, 306 .src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE,
305 .direction = DMA_DEV_TO_MEM, 307 .direction = DMA_DEV_TO_MEM,
306 .src_maxburst = uap->fifosize >> 1, 308 .src_maxburst = uap->fifosize >> 1,
309 .device_fc = false,
307 }; 310 };
308 311
309 chan = dma_request_channel(mask, plat->dma_filter, plat->dma_rx_param); 312 chan = dma_request_channel(mask, plat->dma_filter, plat->dma_rx_param);
@@ -481,7 +484,7 @@ static int pl011_dma_tx_refill(struct uart_amba_port *uap)
481 return -EBUSY; 484 return -EBUSY;
482 } 485 }
483 486
484 desc = dma_dev->device_prep_slave_sg(chan, &dmatx->sg, 1, DMA_MEM_TO_DEV, 487 desc = dmaengine_prep_slave_sg(chan, &dmatx->sg, 1, DMA_MEM_TO_DEV,
485 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 488 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
486 if (!desc) { 489 if (!desc) {
487 dma_unmap_sg(dma_dev->dev, &dmatx->sg, 1, DMA_TO_DEVICE); 490 dma_unmap_sg(dma_dev->dev, &dmatx->sg, 1, DMA_TO_DEVICE);
@@ -664,7 +667,6 @@ static void pl011_dma_rx_callback(void *data);
664static int pl011_dma_rx_trigger_dma(struct uart_amba_port *uap) 667static int pl011_dma_rx_trigger_dma(struct uart_amba_port *uap)
665{ 668{
666 struct dma_chan *rxchan = uap->dmarx.chan; 669 struct dma_chan *rxchan = uap->dmarx.chan;
667 struct dma_device *dma_dev;
668 struct pl011_dmarx_data *dmarx = &uap->dmarx; 670 struct pl011_dmarx_data *dmarx = &uap->dmarx;
669 struct dma_async_tx_descriptor *desc; 671 struct dma_async_tx_descriptor *desc;
670 struct pl011_sgbuf *sgbuf; 672 struct pl011_sgbuf *sgbuf;
@@ -675,8 +677,7 @@ static int pl011_dma_rx_trigger_dma(struct uart_amba_port *uap)
675 /* Start the RX DMA job */ 677 /* Start the RX DMA job */
676 sgbuf = uap->dmarx.use_buf_b ? 678 sgbuf = uap->dmarx.use_buf_b ?
677 &uap->dmarx.sgbuf_b : &uap->dmarx.sgbuf_a; 679 &uap->dmarx.sgbuf_b : &uap->dmarx.sgbuf_a;
678 dma_dev = rxchan->device; 680 desc = dmaengine_prep_slave_sg(rxchan, &sgbuf->sg, 1,
679 desc = rxchan->device->device_prep_slave_sg(rxchan, &sgbuf->sg, 1,
680 DMA_DEV_TO_MEM, 681 DMA_DEV_TO_MEM,
681 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 682 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
682 /* 683 /*
diff --git a/drivers/tty/serial/pch_uart.c b/drivers/tty/serial/pch_uart.c
index 332f2eb8abbc..e825460478be 100644
--- a/drivers/tty/serial/pch_uart.c
+++ b/drivers/tty/serial/pch_uart.c
@@ -844,7 +844,7 @@ static int dma_handle_rx(struct eg20t_port *priv)
844 844
845 sg_dma_address(sg) = priv->rx_buf_dma; 845 sg_dma_address(sg) = priv->rx_buf_dma;
846 846
847 desc = priv->chan_rx->device->device_prep_slave_sg(priv->chan_rx, 847 desc = dmaengine_prep_slave_sg(priv->chan_rx,
848 sg, 1, DMA_DEV_TO_MEM, 848 sg, 1, DMA_DEV_TO_MEM,
849 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 849 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
850 850
@@ -1003,7 +1003,7 @@ static unsigned int dma_handle_tx(struct eg20t_port *priv)
1003 sg_dma_len(sg) = size; 1003 sg_dma_len(sg) = size;
1004 } 1004 }
1005 1005
1006 desc = priv->chan_tx->device->device_prep_slave_sg(priv->chan_tx, 1006 desc = dmaengine_prep_slave_sg(priv->chan_tx,
1007 priv->sg_tx_p, nent, DMA_MEM_TO_DEV, 1007 priv->sg_tx_p, nent, DMA_MEM_TO_DEV,
1008 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 1008 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
1009 if (!desc) { 1009 if (!desc) {
diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
index 61b7fd2729cd..bf461cf99616 100644
--- a/drivers/tty/serial/sh-sci.c
+++ b/drivers/tty/serial/sh-sci.c
@@ -1229,17 +1229,20 @@ static void sci_dma_tx_complete(void *arg)
1229 port->icount.tx += sg_dma_len(&s->sg_tx); 1229 port->icount.tx += sg_dma_len(&s->sg_tx);
1230 1230
1231 async_tx_ack(s->desc_tx); 1231 async_tx_ack(s->desc_tx);
1232 s->cookie_tx = -EINVAL;
1233 s->desc_tx = NULL; 1232 s->desc_tx = NULL;
1234 1233
1235 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) 1234 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
1236 uart_write_wakeup(port); 1235 uart_write_wakeup(port);
1237 1236
1238 if (!uart_circ_empty(xmit)) { 1237 if (!uart_circ_empty(xmit)) {
1238 s->cookie_tx = 0;
1239 schedule_work(&s->work_tx); 1239 schedule_work(&s->work_tx);
1240 } else if (port->type == PORT_SCIFA || port->type == PORT_SCIFB) { 1240 } else {
1241 u16 ctrl = sci_in(port, SCSCR); 1241 s->cookie_tx = -EINVAL;
1242 sci_out(port, SCSCR, ctrl & ~SCSCR_TIE); 1242 if (port->type == PORT_SCIFA || port->type == PORT_SCIFB) {
1243 u16 ctrl = sci_in(port, SCSCR);
1244 sci_out(port, SCSCR, ctrl & ~SCSCR_TIE);
1245 }
1243 } 1246 }
1244 1247
1245 spin_unlock_irqrestore(&port->lock, flags); 1248 spin_unlock_irqrestore(&port->lock, flags);
@@ -1338,7 +1341,7 @@ static void sci_submit_rx(struct sci_port *s)
1338 struct scatterlist *sg = &s->sg_rx[i]; 1341 struct scatterlist *sg = &s->sg_rx[i];
1339 struct dma_async_tx_descriptor *desc; 1342 struct dma_async_tx_descriptor *desc;
1340 1343
1341 desc = chan->device->device_prep_slave_sg(chan, 1344 desc = dmaengine_prep_slave_sg(chan,
1342 sg, 1, DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT); 1345 sg, 1, DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT);
1343 1346
1344 if (desc) { 1347 if (desc) {
@@ -1453,7 +1456,7 @@ static void work_fn_tx(struct work_struct *work)
1453 1456
1454 BUG_ON(!sg_dma_len(sg)); 1457 BUG_ON(!sg_dma_len(sg));
1455 1458
1456 desc = chan->device->device_prep_slave_sg(chan, 1459 desc = dmaengine_prep_slave_sg(chan,
1457 sg, s->sg_len_tx, DMA_MEM_TO_DEV, 1460 sg, s->sg_len_tx, DMA_MEM_TO_DEV,
1458 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 1461 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
1459 if (!desc) { 1462 if (!desc) {
@@ -1501,8 +1504,10 @@ static void sci_start_tx(struct uart_port *port)
1501 } 1504 }
1502 1505
1503 if (s->chan_tx && !uart_circ_empty(&s->port.state->xmit) && 1506 if (s->chan_tx && !uart_circ_empty(&s->port.state->xmit) &&
1504 s->cookie_tx < 0) 1507 s->cookie_tx < 0) {
1508 s->cookie_tx = 0;
1505 schedule_work(&s->work_tx); 1509 schedule_work(&s->work_tx);
1510 }
1506#endif 1511#endif
1507 1512
1508 if (!s->chan_tx || port->type == PORT_SCIFA || port->type == PORT_SCIFB) { 1513 if (!s->chan_tx || port->type == PORT_SCIFA || port->type == PORT_SCIFB) {
diff --git a/drivers/usb/musb/ux500_dma.c b/drivers/usb/musb/ux500_dma.c
index 97cb45916c43..d05c7fbbb703 100644
--- a/drivers/usb/musb/ux500_dma.c
+++ b/drivers/usb/musb/ux500_dma.c
@@ -115,12 +115,12 @@ static bool ux500_configure_channel(struct dma_channel *channel,
115 slave_conf.dst_addr = usb_fifo_addr; 115 slave_conf.dst_addr = usb_fifo_addr;
116 slave_conf.dst_addr_width = addr_width; 116 slave_conf.dst_addr_width = addr_width;
117 slave_conf.dst_maxburst = 16; 117 slave_conf.dst_maxburst = 16;
118 slave_conf.device_fc = false;
118 119
119 dma_chan->device->device_control(dma_chan, DMA_SLAVE_CONFIG, 120 dma_chan->device->device_control(dma_chan, DMA_SLAVE_CONFIG,
120 (unsigned long) &slave_conf); 121 (unsigned long) &slave_conf);
121 122
122 dma_desc = dma_chan->device-> 123 dma_desc = dmaengine_prep_slave_sg(dma_chan, &sg, 1, direction,
123 device_prep_slave_sg(dma_chan, &sg, 1, direction,
124 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 124 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
125 if (!dma_desc) 125 if (!dma_desc)
126 return false; 126 return false;
diff --git a/drivers/usb/renesas_usbhs/fifo.c b/drivers/usb/renesas_usbhs/fifo.c
index 3648c82a17fe..6ec7f838d7fa 100644
--- a/drivers/usb/renesas_usbhs/fifo.c
+++ b/drivers/usb/renesas_usbhs/fifo.c
@@ -786,9 +786,8 @@ static void xfer_work(struct work_struct *work)
786 sg_dma_address(&sg) = pkt->dma + pkt->actual; 786 sg_dma_address(&sg) = pkt->dma + pkt->actual;
787 sg_dma_len(&sg) = pkt->trans; 787 sg_dma_len(&sg) = pkt->trans;
788 788
789 desc = chan->device->device_prep_slave_sg(chan, &sg, 1, dir, 789 desc = dmaengine_prep_slave_sg(chan, &sg, 1, dir,
790 DMA_PREP_INTERRUPT | 790 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
791 DMA_CTRL_ACK);
792 if (!desc) 791 if (!desc)
793 return; 792 return;
794 793
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index 7c229d304684..ff8605b4b4be 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -1724,7 +1724,8 @@ static void ftdi_HE_TIRA1_setup(struct ftdi_private *priv)
1724 1724
1725/* 1725/*
1726 * Module parameter to control latency timer for NDI FTDI-based USB devices. 1726 * Module parameter to control latency timer for NDI FTDI-based USB devices.
1727 * If this value is not set in modprobe.conf.local its value will be set to 1ms. 1727 * If this value is not set in /etc/modprobe.d/ its value will be set
1728 * to 1ms.
1728 */ 1729 */
1729static int ndi_latency_timer = 1; 1730static int ndi_latency_timer = 1;
1730 1731
diff --git a/drivers/usb/storage/Kconfig b/drivers/usb/storage/Kconfig
index fe2d803a6347..7691c866637b 100644
--- a/drivers/usb/storage/Kconfig
+++ b/drivers/usb/storage/Kconfig
@@ -222,7 +222,7 @@ config USB_LIBUSUAL
222 for usb-storage and ub drivers, and allows to switch binding 222 for usb-storage and ub drivers, and allows to switch binding
223 of these devices without rebuilding modules. 223 of these devices without rebuilding modules.
224 224
225 Typical syntax of /etc/modprobe.conf is: 225 Typical syntax of /etc/modprobe.d/*conf is:
226 226
227 options libusual bias="ub" 227 options libusual bias="ub"
228 228
diff --git a/drivers/video/mx3fb.c b/drivers/video/mx3fb.c
index 727a5149d818..eec0d7b748eb 100644
--- a/drivers/video/mx3fb.c
+++ b/drivers/video/mx3fb.c
@@ -337,7 +337,7 @@ static void sdc_enable_channel(struct mx3fb_info *mx3_fbi)
337 337
338 /* This enables the channel */ 338 /* This enables the channel */
339 if (mx3_fbi->cookie < 0) { 339 if (mx3_fbi->cookie < 0) {
340 mx3_fbi->txd = dma_chan->device->device_prep_slave_sg(dma_chan, 340 mx3_fbi->txd = dmaengine_prep_slave_sg(dma_chan,
341 &mx3_fbi->sg[0], 1, DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT); 341 &mx3_fbi->sg[0], 1, DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT);
342 if (!mx3_fbi->txd) { 342 if (!mx3_fbi->txd) {
343 dev_err(mx3fb->dev, "Cannot allocate descriptor on %d\n", 343 dev_err(mx3fb->dev, "Cannot allocate descriptor on %d\n",
@@ -1091,7 +1091,7 @@ static int mx3fb_pan_display(struct fb_var_screeninfo *var,
1091 if (mx3_fbi->txd) 1091 if (mx3_fbi->txd)
1092 async_tx_ack(mx3_fbi->txd); 1092 async_tx_ack(mx3_fbi->txd);
1093 1093
1094 txd = dma_chan->device->device_prep_slave_sg(dma_chan, sg + 1094 txd = dmaengine_prep_slave_sg(dma_chan, sg +
1095 mx3_fbi->cur_ipu_buf, 1, DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT); 1095 mx3_fbi->cur_ipu_buf, 1, DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT);
1096 if (!txd) { 1096 if (!txd) {
1097 dev_err(fbi->device, 1097 dev_err(fbi->device,
diff --git a/drivers/video/omap2/vrfb.c b/drivers/video/omap2/vrfb.c
index fd2271600370..4e5b960c32c8 100644
--- a/drivers/video/omap2/vrfb.c
+++ b/drivers/video/omap2/vrfb.c
@@ -27,7 +27,6 @@
27#include <linux/bitops.h> 27#include <linux/bitops.h>
28#include <linux/mutex.h> 28#include <linux/mutex.h>
29 29
30#include <mach/io.h>
31#include <plat/vrfb.h> 30#include <plat/vrfb.h>
32#include <plat/sdrc.h> 31#include <plat/sdrc.h>
33 32
diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
index 958e5129c601..05f0a80818a2 100644
--- a/drivers/virtio/virtio_balloon.c
+++ b/drivers/virtio/virtio_balloon.c
@@ -398,21 +398,8 @@ static int restore_common(struct virtio_device *vdev)
398 return 0; 398 return 0;
399} 399}
400 400
401static int virtballoon_thaw(struct virtio_device *vdev)
402{
403 return restore_common(vdev);
404}
405
406static int virtballoon_restore(struct virtio_device *vdev) 401static int virtballoon_restore(struct virtio_device *vdev)
407{ 402{
408 struct virtio_balloon *vb = vdev->priv;
409
410 /*
411 * If a request wasn't complete at the time of freezing, this
412 * could have been set.
413 */
414 vb->need_stats_update = 0;
415
416 return restore_common(vdev); 403 return restore_common(vdev);
417} 404}
418#endif 405#endif
@@ -434,7 +421,6 @@ static struct virtio_driver virtio_balloon_driver = {
434#ifdef CONFIG_PM 421#ifdef CONFIG_PM
435 .freeze = virtballoon_freeze, 422 .freeze = virtballoon_freeze,
436 .restore = virtballoon_restore, 423 .restore = virtballoon_restore,
437 .thaw = virtballoon_thaw,
438#endif 424#endif
439}; 425};
440 426
diff --git a/drivers/virtio/virtio_pci.c b/drivers/virtio/virtio_pci.c
index 635e1efb3792..2e03d416b9af 100644
--- a/drivers/virtio/virtio_pci.c
+++ b/drivers/virtio/virtio_pci.c
@@ -720,24 +720,6 @@ static void __devexit virtio_pci_remove(struct pci_dev *pci_dev)
720} 720}
721 721
722#ifdef CONFIG_PM 722#ifdef CONFIG_PM
723static int virtio_pci_suspend(struct device *dev)
724{
725 struct pci_dev *pci_dev = to_pci_dev(dev);
726
727 pci_save_state(pci_dev);
728 pci_set_power_state(pci_dev, PCI_D3hot);
729 return 0;
730}
731
732static int virtio_pci_resume(struct device *dev)
733{
734 struct pci_dev *pci_dev = to_pci_dev(dev);
735
736 pci_restore_state(pci_dev);
737 pci_set_power_state(pci_dev, PCI_D0);
738 return 0;
739}
740
741static int virtio_pci_freeze(struct device *dev) 723static int virtio_pci_freeze(struct device *dev)
742{ 724{
743 struct pci_dev *pci_dev = to_pci_dev(dev); 725 struct pci_dev *pci_dev = to_pci_dev(dev);
@@ -758,59 +740,24 @@ static int virtio_pci_freeze(struct device *dev)
758 return ret; 740 return ret;
759} 741}
760 742
761static int restore_common(struct device *dev) 743static int virtio_pci_restore(struct device *dev)
762{
763 struct pci_dev *pci_dev = to_pci_dev(dev);
764 struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev);
765 int ret;
766
767 ret = pci_enable_device(pci_dev);
768 if (ret)
769 return ret;
770 pci_set_master(pci_dev);
771 vp_finalize_features(&vp_dev->vdev);
772
773 return ret;
774}
775
776static int virtio_pci_thaw(struct device *dev)
777{ 744{
778 struct pci_dev *pci_dev = to_pci_dev(dev); 745 struct pci_dev *pci_dev = to_pci_dev(dev);
779 struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev); 746 struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev);
780 struct virtio_driver *drv; 747 struct virtio_driver *drv;
781 int ret; 748 int ret;
782 749
783 ret = restore_common(dev);
784 if (ret)
785 return ret;
786
787 drv = container_of(vp_dev->vdev.dev.driver, 750 drv = container_of(vp_dev->vdev.dev.driver,
788 struct virtio_driver, driver); 751 struct virtio_driver, driver);
789 752
790 if (drv && drv->thaw) 753 ret = pci_enable_device(pci_dev);
791 ret = drv->thaw(&vp_dev->vdev); 754 if (ret)
792 else if (drv && drv->restore) 755 return ret;
793 ret = drv->restore(&vp_dev->vdev);
794
795 /* Finally, tell the device we're all set */
796 if (!ret)
797 vp_set_status(&vp_dev->vdev, vp_dev->saved_status);
798
799 return ret;
800}
801
802static int virtio_pci_restore(struct device *dev)
803{
804 struct pci_dev *pci_dev = to_pci_dev(dev);
805 struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev);
806 struct virtio_driver *drv;
807 int ret;
808 756
809 drv = container_of(vp_dev->vdev.dev.driver, 757 pci_set_master(pci_dev);
810 struct virtio_driver, driver); 758 vp_finalize_features(&vp_dev->vdev);
811 759
812 ret = restore_common(dev); 760 if (drv && drv->restore)
813 if (!ret && drv && drv->restore)
814 ret = drv->restore(&vp_dev->vdev); 761 ret = drv->restore(&vp_dev->vdev);
815 762
816 /* Finally, tell the device we're all set */ 763 /* Finally, tell the device we're all set */
@@ -821,12 +768,7 @@ static int virtio_pci_restore(struct device *dev)
821} 768}
822 769
823static const struct dev_pm_ops virtio_pci_pm_ops = { 770static const struct dev_pm_ops virtio_pci_pm_ops = {
824 .suspend = virtio_pci_suspend, 771 SET_SYSTEM_SLEEP_PM_OPS(virtio_pci_freeze, virtio_pci_restore)
825 .resume = virtio_pci_resume,
826 .freeze = virtio_pci_freeze,
827 .thaw = virtio_pci_thaw,
828 .restore = virtio_pci_restore,
829 .poweroff = virtio_pci_suspend,
830}; 772};
831#endif 773#endif
832 774
diff --git a/drivers/watchdog/sa1100_wdt.c b/drivers/watchdog/sa1100_wdt.c
index d54e04df45e4..54984deb8561 100644
--- a/drivers/watchdog/sa1100_wdt.c
+++ b/drivers/watchdog/sa1100_wdt.c
@@ -28,6 +28,7 @@
28#include <linux/miscdevice.h> 28#include <linux/miscdevice.h>
29#include <linux/watchdog.h> 29#include <linux/watchdog.h>
30#include <linux/init.h> 30#include <linux/init.h>
31#include <linux/io.h>
31#include <linux/bitops.h> 32#include <linux/bitops.h>
32#include <linux/uaccess.h> 33#include <linux/uaccess.h>
33#include <linux/timex.h> 34#include <linux/timex.h>
diff --git a/fs/aio.c b/fs/aio.c
index 4f71627264fd..da887604dfc5 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -305,15 +305,18 @@ out_freectx:
305 return ERR_PTR(err); 305 return ERR_PTR(err);
306} 306}
307 307
308/* aio_cancel_all 308/* kill_ctx
309 * Cancels all outstanding aio requests on an aio context. Used 309 * Cancels all outstanding aio requests on an aio context. Used
310 * when the processes owning a context have all exited to encourage 310 * when the processes owning a context have all exited to encourage
311 * the rapid destruction of the kioctx. 311 * the rapid destruction of the kioctx.
312 */ 312 */
313static void aio_cancel_all(struct kioctx *ctx) 313static void kill_ctx(struct kioctx *ctx)
314{ 314{
315 int (*cancel)(struct kiocb *, struct io_event *); 315 int (*cancel)(struct kiocb *, struct io_event *);
316 struct task_struct *tsk = current;
317 DECLARE_WAITQUEUE(wait, tsk);
316 struct io_event res; 318 struct io_event res;
319
317 spin_lock_irq(&ctx->ctx_lock); 320 spin_lock_irq(&ctx->ctx_lock);
318 ctx->dead = 1; 321 ctx->dead = 1;
319 while (!list_empty(&ctx->active_reqs)) { 322 while (!list_empty(&ctx->active_reqs)) {
@@ -329,15 +332,7 @@ static void aio_cancel_all(struct kioctx *ctx)
329 spin_lock_irq(&ctx->ctx_lock); 332 spin_lock_irq(&ctx->ctx_lock);
330 } 333 }
331 } 334 }
332 spin_unlock_irq(&ctx->ctx_lock);
333}
334
335static void wait_for_all_aios(struct kioctx *ctx)
336{
337 struct task_struct *tsk = current;
338 DECLARE_WAITQUEUE(wait, tsk);
339 335
340 spin_lock_irq(&ctx->ctx_lock);
341 if (!ctx->reqs_active) 336 if (!ctx->reqs_active)
342 goto out; 337 goto out;
343 338
@@ -387,9 +382,7 @@ void exit_aio(struct mm_struct *mm)
387 ctx = hlist_entry(mm->ioctx_list.first, struct kioctx, list); 382 ctx = hlist_entry(mm->ioctx_list.first, struct kioctx, list);
388 hlist_del_rcu(&ctx->list); 383 hlist_del_rcu(&ctx->list);
389 384
390 aio_cancel_all(ctx); 385 kill_ctx(ctx);
391
392 wait_for_all_aios(ctx);
393 386
394 if (1 != atomic_read(&ctx->users)) 387 if (1 != atomic_read(&ctx->users))
395 printk(KERN_DEBUG 388 printk(KERN_DEBUG
@@ -1269,8 +1262,7 @@ static void io_destroy(struct kioctx *ioctx)
1269 if (likely(!was_dead)) 1262 if (likely(!was_dead))
1270 put_ioctx(ioctx); /* twice for the list */ 1263 put_ioctx(ioctx); /* twice for the list */
1271 1264
1272 aio_cancel_all(ioctx); 1265 kill_ctx(ioctx);
1273 wait_for_all_aios(ioctx);
1274 1266
1275 /* 1267 /*
1276 * Wake up any waiters. The setting of ctx->dead must be seen 1268 * Wake up any waiters. The setting of ctx->dead must be seen
@@ -1278,7 +1270,6 @@ static void io_destroy(struct kioctx *ioctx)
1278 * locking done by the above calls to ensure this consistency. 1270 * locking done by the above calls to ensure this consistency.
1279 */ 1271 */
1280 wake_up_all(&ioctx->wait); 1272 wake_up_all(&ioctx->wait);
1281 put_ioctx(ioctx); /* once for the lookup */
1282} 1273}
1283 1274
1284/* sys_io_setup: 1275/* sys_io_setup:
@@ -1315,11 +1306,9 @@ SYSCALL_DEFINE2(io_setup, unsigned, nr_events, aio_context_t __user *, ctxp)
1315 ret = PTR_ERR(ioctx); 1306 ret = PTR_ERR(ioctx);
1316 if (!IS_ERR(ioctx)) { 1307 if (!IS_ERR(ioctx)) {
1317 ret = put_user(ioctx->user_id, ctxp); 1308 ret = put_user(ioctx->user_id, ctxp);
1318 if (!ret) { 1309 if (ret)
1319 put_ioctx(ioctx); 1310 io_destroy(ioctx);
1320 return 0; 1311 put_ioctx(ioctx);
1321 }
1322 io_destroy(ioctx);
1323 } 1312 }
1324 1313
1325out: 1314out:
@@ -1337,6 +1326,7 @@ SYSCALL_DEFINE1(io_destroy, aio_context_t, ctx)
1337 struct kioctx *ioctx = lookup_ioctx(ctx); 1326 struct kioctx *ioctx = lookup_ioctx(ctx);
1338 if (likely(NULL != ioctx)) { 1327 if (likely(NULL != ioctx)) {
1339 io_destroy(ioctx); 1328 io_destroy(ioctx);
1329 put_ioctx(ioctx);
1340 return 0; 1330 return 0;
1341 } 1331 }
1342 pr_debug("EINVAL: io_destroy: invalid context id\n"); 1332 pr_debug("EINVAL: io_destroy: invalid context id\n");
diff --git a/fs/autofs4/dev-ioctl.c b/fs/autofs4/dev-ioctl.c
index 85f1fcdb30e7..9dacb8586701 100644
--- a/fs/autofs4/dev-ioctl.c
+++ b/fs/autofs4/dev-ioctl.c
@@ -230,7 +230,7 @@ static void autofs_dev_ioctl_fd_install(unsigned int fd, struct file *file)
230 fdt = files_fdtable(files); 230 fdt = files_fdtable(files);
231 BUG_ON(fdt->fd[fd] != NULL); 231 BUG_ON(fdt->fd[fd] != NULL);
232 rcu_assign_pointer(fdt->fd[fd], file); 232 rcu_assign_pointer(fdt->fd[fd], file);
233 FD_SET(fd, fdt->close_on_exec); 233 __set_close_on_exec(fd, fdt);
234 spin_unlock(&files->file_lock); 234 spin_unlock(&files->file_lock);
235} 235}
236 236
diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
index 7d7ff206cdcb..48ffb3dc610a 100644
--- a/fs/binfmt_elf.c
+++ b/fs/binfmt_elf.c
@@ -1415,6 +1415,22 @@ static void do_thread_regset_writeback(struct task_struct *task,
1415 regset->writeback(task, regset, 1); 1415 regset->writeback(task, regset, 1);
1416} 1416}
1417 1417
1418#ifndef PR_REG_SIZE
1419#define PR_REG_SIZE(S) sizeof(S)
1420#endif
1421
1422#ifndef PRSTATUS_SIZE
1423#define PRSTATUS_SIZE(S) sizeof(S)
1424#endif
1425
1426#ifndef PR_REG_PTR
1427#define PR_REG_PTR(S) (&((S)->pr_reg))
1428#endif
1429
1430#ifndef SET_PR_FPVALID
1431#define SET_PR_FPVALID(S, V) ((S)->pr_fpvalid = (V))
1432#endif
1433
1418static int fill_thread_core_info(struct elf_thread_core_info *t, 1434static int fill_thread_core_info(struct elf_thread_core_info *t,
1419 const struct user_regset_view *view, 1435 const struct user_regset_view *view,
1420 long signr, size_t *total) 1436 long signr, size_t *total)
@@ -1429,11 +1445,11 @@ static int fill_thread_core_info(struct elf_thread_core_info *t,
1429 */ 1445 */
1430 fill_prstatus(&t->prstatus, t->task, signr); 1446 fill_prstatus(&t->prstatus, t->task, signr);
1431 (void) view->regsets[0].get(t->task, &view->regsets[0], 1447 (void) view->regsets[0].get(t->task, &view->regsets[0],
1432 0, sizeof(t->prstatus.pr_reg), 1448 0, PR_REG_SIZE(t->prstatus.pr_reg),
1433 &t->prstatus.pr_reg, NULL); 1449 PR_REG_PTR(&t->prstatus), NULL);
1434 1450
1435 fill_note(&t->notes[0], "CORE", NT_PRSTATUS, 1451 fill_note(&t->notes[0], "CORE", NT_PRSTATUS,
1436 sizeof(t->prstatus), &t->prstatus); 1452 PRSTATUS_SIZE(t->prstatus), &t->prstatus);
1437 *total += notesize(&t->notes[0]); 1453 *total += notesize(&t->notes[0]);
1438 1454
1439 do_thread_regset_writeback(t->task, &view->regsets[0]); 1455 do_thread_regset_writeback(t->task, &view->regsets[0]);
@@ -1463,7 +1479,7 @@ static int fill_thread_core_info(struct elf_thread_core_info *t,
1463 regset->core_note_type, 1479 regset->core_note_type,
1464 size, data); 1480 size, data);
1465 else { 1481 else {
1466 t->prstatus.pr_fpvalid = 1; 1482 SET_PR_FPVALID(&t->prstatus, 1);
1467 fill_note(&t->notes[i], "CORE", 1483 fill_note(&t->notes[i], "CORE",
1468 NT_PRFPREG, size, data); 1484 NT_PRFPREG, size, data);
1469 } 1485 }
diff --git a/fs/btrfs/async-thread.c b/fs/btrfs/async-thread.c
index 0cc20b35c1c4..42704149b723 100644
--- a/fs/btrfs/async-thread.c
+++ b/fs/btrfs/async-thread.c
@@ -171,11 +171,11 @@ out:
171 spin_unlock_irqrestore(&workers->lock, flags); 171 spin_unlock_irqrestore(&workers->lock, flags);
172} 172}
173 173
174static noinline int run_ordered_completions(struct btrfs_workers *workers, 174static noinline void run_ordered_completions(struct btrfs_workers *workers,
175 struct btrfs_work *work) 175 struct btrfs_work *work)
176{ 176{
177 if (!workers->ordered) 177 if (!workers->ordered)
178 return 0; 178 return;
179 179
180 set_bit(WORK_DONE_BIT, &work->flags); 180 set_bit(WORK_DONE_BIT, &work->flags);
181 181
@@ -213,7 +213,6 @@ static noinline int run_ordered_completions(struct btrfs_workers *workers,
213 } 213 }
214 214
215 spin_unlock(&workers->order_lock); 215 spin_unlock(&workers->order_lock);
216 return 0;
217} 216}
218 217
219static void put_worker(struct btrfs_worker_thread *worker) 218static void put_worker(struct btrfs_worker_thread *worker)
@@ -399,7 +398,7 @@ again:
399/* 398/*
400 * this will wait for all the worker threads to shutdown 399 * this will wait for all the worker threads to shutdown
401 */ 400 */
402int btrfs_stop_workers(struct btrfs_workers *workers) 401void btrfs_stop_workers(struct btrfs_workers *workers)
403{ 402{
404 struct list_head *cur; 403 struct list_head *cur;
405 struct btrfs_worker_thread *worker; 404 struct btrfs_worker_thread *worker;
@@ -427,7 +426,6 @@ int btrfs_stop_workers(struct btrfs_workers *workers)
427 put_worker(worker); 426 put_worker(worker);
428 } 427 }
429 spin_unlock_irq(&workers->lock); 428 spin_unlock_irq(&workers->lock);
430 return 0;
431} 429}
432 430
433/* 431/*
@@ -615,14 +613,14 @@ found:
615 * it was taken from. It is intended for use with long running work functions 613 * it was taken from. It is intended for use with long running work functions
616 * that make some progress and want to give the cpu up for others. 614 * that make some progress and want to give the cpu up for others.
617 */ 615 */
618int btrfs_requeue_work(struct btrfs_work *work) 616void btrfs_requeue_work(struct btrfs_work *work)
619{ 617{
620 struct btrfs_worker_thread *worker = work->worker; 618 struct btrfs_worker_thread *worker = work->worker;
621 unsigned long flags; 619 unsigned long flags;
622 int wake = 0; 620 int wake = 0;
623 621
624 if (test_and_set_bit(WORK_QUEUED_BIT, &work->flags)) 622 if (test_and_set_bit(WORK_QUEUED_BIT, &work->flags))
625 goto out; 623 return;
626 624
627 spin_lock_irqsave(&worker->lock, flags); 625 spin_lock_irqsave(&worker->lock, flags);
628 if (test_bit(WORK_HIGH_PRIO_BIT, &work->flags)) 626 if (test_bit(WORK_HIGH_PRIO_BIT, &work->flags))
@@ -649,9 +647,6 @@ int btrfs_requeue_work(struct btrfs_work *work)
649 if (wake) 647 if (wake)
650 wake_up_process(worker->task); 648 wake_up_process(worker->task);
651 spin_unlock_irqrestore(&worker->lock, flags); 649 spin_unlock_irqrestore(&worker->lock, flags);
652out:
653
654 return 0;
655} 650}
656 651
657void btrfs_set_work_high_prio(struct btrfs_work *work) 652void btrfs_set_work_high_prio(struct btrfs_work *work)
diff --git a/fs/btrfs/async-thread.h b/fs/btrfs/async-thread.h
index f34cc31fa3c9..063698b90ce2 100644
--- a/fs/btrfs/async-thread.h
+++ b/fs/btrfs/async-thread.h
@@ -111,9 +111,9 @@ struct btrfs_workers {
111 111
112void btrfs_queue_worker(struct btrfs_workers *workers, struct btrfs_work *work); 112void btrfs_queue_worker(struct btrfs_workers *workers, struct btrfs_work *work);
113int btrfs_start_workers(struct btrfs_workers *workers); 113int btrfs_start_workers(struct btrfs_workers *workers);
114int btrfs_stop_workers(struct btrfs_workers *workers); 114void btrfs_stop_workers(struct btrfs_workers *workers);
115void btrfs_init_workers(struct btrfs_workers *workers, char *name, int max, 115void btrfs_init_workers(struct btrfs_workers *workers, char *name, int max,
116 struct btrfs_workers *async_starter); 116 struct btrfs_workers *async_starter);
117int btrfs_requeue_work(struct btrfs_work *work); 117void btrfs_requeue_work(struct btrfs_work *work);
118void btrfs_set_work_high_prio(struct btrfs_work *work); 118void btrfs_set_work_high_prio(struct btrfs_work *work);
119#endif 119#endif
diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c
index 0436c12da8c2..f4e90748940a 100644
--- a/fs/btrfs/backref.c
+++ b/fs/btrfs/backref.c
@@ -116,6 +116,7 @@ add_parent:
116 * to a logical address 116 * to a logical address
117 */ 117 */
118static int __resolve_indirect_ref(struct btrfs_fs_info *fs_info, 118static int __resolve_indirect_ref(struct btrfs_fs_info *fs_info,
119 int search_commit_root,
119 struct __prelim_ref *ref, 120 struct __prelim_ref *ref,
120 struct ulist *parents) 121 struct ulist *parents)
121{ 122{
@@ -131,6 +132,7 @@ static int __resolve_indirect_ref(struct btrfs_fs_info *fs_info,
131 path = btrfs_alloc_path(); 132 path = btrfs_alloc_path();
132 if (!path) 133 if (!path)
133 return -ENOMEM; 134 return -ENOMEM;
135 path->search_commit_root = !!search_commit_root;
134 136
135 root_key.objectid = ref->root_id; 137 root_key.objectid = ref->root_id;
136 root_key.type = BTRFS_ROOT_ITEM_KEY; 138 root_key.type = BTRFS_ROOT_ITEM_KEY;
@@ -188,6 +190,7 @@ out:
188 * resolve all indirect backrefs from the list 190 * resolve all indirect backrefs from the list
189 */ 191 */
190static int __resolve_indirect_refs(struct btrfs_fs_info *fs_info, 192static int __resolve_indirect_refs(struct btrfs_fs_info *fs_info,
193 int search_commit_root,
191 struct list_head *head) 194 struct list_head *head)
192{ 195{
193 int err; 196 int err;
@@ -212,7 +215,8 @@ static int __resolve_indirect_refs(struct btrfs_fs_info *fs_info,
212 continue; 215 continue;
213 if (ref->count == 0) 216 if (ref->count == 0)
214 continue; 217 continue;
215 err = __resolve_indirect_ref(fs_info, ref, parents); 218 err = __resolve_indirect_ref(fs_info, search_commit_root,
219 ref, parents);
216 if (err) { 220 if (err) {
217 if (ret == 0) 221 if (ret == 0)
218 ret = err; 222 ret = err;
@@ -586,6 +590,7 @@ static int find_parent_nodes(struct btrfs_trans_handle *trans,
586 struct btrfs_delayed_ref_head *head; 590 struct btrfs_delayed_ref_head *head;
587 int info_level = 0; 591 int info_level = 0;
588 int ret; 592 int ret;
593 int search_commit_root = (trans == BTRFS_BACKREF_SEARCH_COMMIT_ROOT);
589 struct list_head prefs_delayed; 594 struct list_head prefs_delayed;
590 struct list_head prefs; 595 struct list_head prefs;
591 struct __prelim_ref *ref; 596 struct __prelim_ref *ref;
@@ -600,6 +605,7 @@ static int find_parent_nodes(struct btrfs_trans_handle *trans,
600 path = btrfs_alloc_path(); 605 path = btrfs_alloc_path();
601 if (!path) 606 if (!path)
602 return -ENOMEM; 607 return -ENOMEM;
608 path->search_commit_root = !!search_commit_root;
603 609
604 /* 610 /*
605 * grab both a lock on the path and a lock on the delayed ref head. 611 * grab both a lock on the path and a lock on the delayed ref head.
@@ -614,35 +620,39 @@ again:
614 goto out; 620 goto out;
615 BUG_ON(ret == 0); 621 BUG_ON(ret == 0);
616 622
617 /* 623 if (trans != BTRFS_BACKREF_SEARCH_COMMIT_ROOT) {
618 * look if there are updates for this ref queued and lock the head 624 /*
619 */ 625 * look if there are updates for this ref queued and lock the
620 delayed_refs = &trans->transaction->delayed_refs; 626 * head
621 spin_lock(&delayed_refs->lock); 627 */
622 head = btrfs_find_delayed_ref_head(trans, bytenr); 628 delayed_refs = &trans->transaction->delayed_refs;
623 if (head) { 629 spin_lock(&delayed_refs->lock);
624 if (!mutex_trylock(&head->mutex)) { 630 head = btrfs_find_delayed_ref_head(trans, bytenr);
625 atomic_inc(&head->node.refs); 631 if (head) {
626 spin_unlock(&delayed_refs->lock); 632 if (!mutex_trylock(&head->mutex)) {
627 633 atomic_inc(&head->node.refs);
628 btrfs_release_path(path); 634 spin_unlock(&delayed_refs->lock);
629 635
630 /* 636 btrfs_release_path(path);
631 * Mutex was contended, block until it's 637
632 * released and try again 638 /*
633 */ 639 * Mutex was contended, block until it's
634 mutex_lock(&head->mutex); 640 * released and try again
635 mutex_unlock(&head->mutex); 641 */
636 btrfs_put_delayed_ref(&head->node); 642 mutex_lock(&head->mutex);
637 goto again; 643 mutex_unlock(&head->mutex);
638 } 644 btrfs_put_delayed_ref(&head->node);
639 ret = __add_delayed_refs(head, seq, &info_key, &prefs_delayed); 645 goto again;
640 if (ret) { 646 }
641 spin_unlock(&delayed_refs->lock); 647 ret = __add_delayed_refs(head, seq, &info_key,
642 goto out; 648 &prefs_delayed);
649 if (ret) {
650 spin_unlock(&delayed_refs->lock);
651 goto out;
652 }
643 } 653 }
654 spin_unlock(&delayed_refs->lock);
644 } 655 }
645 spin_unlock(&delayed_refs->lock);
646 656
647 if (path->slots[0]) { 657 if (path->slots[0]) {
648 struct extent_buffer *leaf; 658 struct extent_buffer *leaf;
@@ -679,7 +689,7 @@ again:
679 if (ret) 689 if (ret)
680 goto out; 690 goto out;
681 691
682 ret = __resolve_indirect_refs(fs_info, &prefs); 692 ret = __resolve_indirect_refs(fs_info, search_commit_root, &prefs);
683 if (ret) 693 if (ret)
684 goto out; 694 goto out;
685 695
@@ -1074,8 +1084,7 @@ int tree_backref_for_extent(unsigned long *ptr, struct extent_buffer *eb,
1074 return 0; 1084 return 0;
1075} 1085}
1076 1086
1077static int iterate_leaf_refs(struct btrfs_fs_info *fs_info, 1087static int iterate_leaf_refs(struct btrfs_fs_info *fs_info, u64 logical,
1078 struct btrfs_path *path, u64 logical,
1079 u64 orig_extent_item_objectid, 1088 u64 orig_extent_item_objectid,
1080 u64 extent_item_pos, u64 root, 1089 u64 extent_item_pos, u64 root,
1081 iterate_extent_inodes_t *iterate, void *ctx) 1090 iterate_extent_inodes_t *iterate, void *ctx)
@@ -1143,35 +1152,38 @@ static int iterate_leaf_refs(struct btrfs_fs_info *fs_info,
1143 * calls iterate() for every inode that references the extent identified by 1152 * calls iterate() for every inode that references the extent identified by
1144 * the given parameters. 1153 * the given parameters.
1145 * when the iterator function returns a non-zero value, iteration stops. 1154 * when the iterator function returns a non-zero value, iteration stops.
1146 * path is guaranteed to be in released state when iterate() is called.
1147 */ 1155 */
1148int iterate_extent_inodes(struct btrfs_fs_info *fs_info, 1156int iterate_extent_inodes(struct btrfs_fs_info *fs_info,
1149 struct btrfs_path *path,
1150 u64 extent_item_objectid, u64 extent_item_pos, 1157 u64 extent_item_objectid, u64 extent_item_pos,
1158 int search_commit_root,
1151 iterate_extent_inodes_t *iterate, void *ctx) 1159 iterate_extent_inodes_t *iterate, void *ctx)
1152{ 1160{
1153 int ret; 1161 int ret;
1154 struct list_head data_refs = LIST_HEAD_INIT(data_refs); 1162 struct list_head data_refs = LIST_HEAD_INIT(data_refs);
1155 struct list_head shared_refs = LIST_HEAD_INIT(shared_refs); 1163 struct list_head shared_refs = LIST_HEAD_INIT(shared_refs);
1156 struct btrfs_trans_handle *trans; 1164 struct btrfs_trans_handle *trans;
1157 struct ulist *refs; 1165 struct ulist *refs = NULL;
1158 struct ulist *roots; 1166 struct ulist *roots = NULL;
1159 struct ulist_node *ref_node = NULL; 1167 struct ulist_node *ref_node = NULL;
1160 struct ulist_node *root_node = NULL; 1168 struct ulist_node *root_node = NULL;
1161 struct seq_list seq_elem; 1169 struct seq_list seq_elem;
1162 struct btrfs_delayed_ref_root *delayed_refs; 1170 struct btrfs_delayed_ref_root *delayed_refs = NULL;
1163
1164 trans = btrfs_join_transaction(fs_info->extent_root);
1165 if (IS_ERR(trans))
1166 return PTR_ERR(trans);
1167 1171
1168 pr_debug("resolving all inodes for extent %llu\n", 1172 pr_debug("resolving all inodes for extent %llu\n",
1169 extent_item_objectid); 1173 extent_item_objectid);
1170 1174
1171 delayed_refs = &trans->transaction->delayed_refs; 1175 if (search_commit_root) {
1172 spin_lock(&delayed_refs->lock); 1176 trans = BTRFS_BACKREF_SEARCH_COMMIT_ROOT;
1173 btrfs_get_delayed_seq(delayed_refs, &seq_elem); 1177 } else {
1174 spin_unlock(&delayed_refs->lock); 1178 trans = btrfs_join_transaction(fs_info->extent_root);
1179 if (IS_ERR(trans))
1180 return PTR_ERR(trans);
1181
1182 delayed_refs = &trans->transaction->delayed_refs;
1183 spin_lock(&delayed_refs->lock);
1184 btrfs_get_delayed_seq(delayed_refs, &seq_elem);
1185 spin_unlock(&delayed_refs->lock);
1186 }
1175 1187
1176 ret = btrfs_find_all_leafs(trans, fs_info, extent_item_objectid, 1188 ret = btrfs_find_all_leafs(trans, fs_info, extent_item_objectid,
1177 extent_item_pos, seq_elem.seq, 1189 extent_item_pos, seq_elem.seq,
@@ -1188,7 +1200,7 @@ int iterate_extent_inodes(struct btrfs_fs_info *fs_info,
1188 while (!ret && (root_node = ulist_next(roots, root_node))) { 1200 while (!ret && (root_node = ulist_next(roots, root_node))) {
1189 pr_debug("root %llu references leaf %llu\n", 1201 pr_debug("root %llu references leaf %llu\n",
1190 root_node->val, ref_node->val); 1202 root_node->val, ref_node->val);
1191 ret = iterate_leaf_refs(fs_info, path, ref_node->val, 1203 ret = iterate_leaf_refs(fs_info, ref_node->val,
1192 extent_item_objectid, 1204 extent_item_objectid,
1193 extent_item_pos, root_node->val, 1205 extent_item_pos, root_node->val,
1194 iterate, ctx); 1206 iterate, ctx);
@@ -1198,8 +1210,11 @@ int iterate_extent_inodes(struct btrfs_fs_info *fs_info,
1198 ulist_free(refs); 1210 ulist_free(refs);
1199 ulist_free(roots); 1211 ulist_free(roots);
1200out: 1212out:
1201 btrfs_put_delayed_seq(delayed_refs, &seq_elem); 1213 if (!search_commit_root) {
1202 btrfs_end_transaction(trans, fs_info->extent_root); 1214 btrfs_put_delayed_seq(delayed_refs, &seq_elem);
1215 btrfs_end_transaction(trans, fs_info->extent_root);
1216 }
1217
1203 return ret; 1218 return ret;
1204} 1219}
1205 1220
@@ -1210,6 +1225,7 @@ int iterate_inodes_from_logical(u64 logical, struct btrfs_fs_info *fs_info,
1210 int ret; 1225 int ret;
1211 u64 extent_item_pos; 1226 u64 extent_item_pos;
1212 struct btrfs_key found_key; 1227 struct btrfs_key found_key;
1228 int search_commit_root = path->search_commit_root;
1213 1229
1214 ret = extent_from_logical(fs_info, logical, path, 1230 ret = extent_from_logical(fs_info, logical, path,
1215 &found_key); 1231 &found_key);
@@ -1220,8 +1236,9 @@ int iterate_inodes_from_logical(u64 logical, struct btrfs_fs_info *fs_info,
1220 return ret; 1236 return ret;
1221 1237
1222 extent_item_pos = logical - found_key.objectid; 1238 extent_item_pos = logical - found_key.objectid;
1223 ret = iterate_extent_inodes(fs_info, path, found_key.objectid, 1239 ret = iterate_extent_inodes(fs_info, found_key.objectid,
1224 extent_item_pos, iterate, ctx); 1240 extent_item_pos, search_commit_root,
1241 iterate, ctx);
1225 1242
1226 return ret; 1243 return ret;
1227} 1244}
@@ -1342,12 +1359,6 @@ int paths_from_inode(u64 inum, struct inode_fs_paths *ipath)
1342 inode_to_path, ipath); 1359 inode_to_path, ipath);
1343} 1360}
1344 1361
1345/*
1346 * allocates space to return multiple file system paths for an inode.
1347 * total_bytes to allocate are passed, note that space usable for actual path
1348 * information will be total_bytes - sizeof(struct inode_fs_paths).
1349 * the returned pointer must be freed with free_ipath() in the end.
1350 */
1351struct btrfs_data_container *init_data_container(u32 total_bytes) 1362struct btrfs_data_container *init_data_container(u32 total_bytes)
1352{ 1363{
1353 struct btrfs_data_container *data; 1364 struct btrfs_data_container *data;
@@ -1403,5 +1414,6 @@ struct inode_fs_paths *init_ipath(s32 total_bytes, struct btrfs_root *fs_root,
1403 1414
1404void free_ipath(struct inode_fs_paths *ipath) 1415void free_ipath(struct inode_fs_paths *ipath)
1405{ 1416{
1417 kfree(ipath->fspath);
1406 kfree(ipath); 1418 kfree(ipath);
1407} 1419}
diff --git a/fs/btrfs/backref.h b/fs/btrfs/backref.h
index d00dfa9ca934..57ea2e959e4d 100644
--- a/fs/btrfs/backref.h
+++ b/fs/btrfs/backref.h
@@ -22,6 +22,8 @@
22#include "ioctl.h" 22#include "ioctl.h"
23#include "ulist.h" 23#include "ulist.h"
24 24
25#define BTRFS_BACKREF_SEARCH_COMMIT_ROOT ((struct btrfs_trans_handle *)0)
26
25struct inode_fs_paths { 27struct inode_fs_paths {
26 struct btrfs_path *btrfs_path; 28 struct btrfs_path *btrfs_path;
27 struct btrfs_root *fs_root; 29 struct btrfs_root *fs_root;
@@ -44,9 +46,8 @@ int tree_backref_for_extent(unsigned long *ptr, struct extent_buffer *eb,
44 u64 *out_root, u8 *out_level); 46 u64 *out_root, u8 *out_level);
45 47
46int iterate_extent_inodes(struct btrfs_fs_info *fs_info, 48int iterate_extent_inodes(struct btrfs_fs_info *fs_info,
47 struct btrfs_path *path,
48 u64 extent_item_objectid, 49 u64 extent_item_objectid,
49 u64 extent_offset, 50 u64 extent_offset, int search_commit_root,
50 iterate_extent_inodes_t *iterate, void *ctx); 51 iterate_extent_inodes_t *iterate, void *ctx);
51 52
52int iterate_inodes_from_logical(u64 logical, struct btrfs_fs_info *fs_info, 53int iterate_inodes_from_logical(u64 logical, struct btrfs_fs_info *fs_info,
diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c
index b805afb37fa8..d286b40a5671 100644
--- a/fs/btrfs/compression.c
+++ b/fs/btrfs/compression.c
@@ -226,8 +226,8 @@ out:
226 * Clear the writeback bits on all of the file 226 * Clear the writeback bits on all of the file
227 * pages for a compressed write 227 * pages for a compressed write
228 */ 228 */
229static noinline int end_compressed_writeback(struct inode *inode, u64 start, 229static noinline void end_compressed_writeback(struct inode *inode, u64 start,
230 unsigned long ram_size) 230 unsigned long ram_size)
231{ 231{
232 unsigned long index = start >> PAGE_CACHE_SHIFT; 232 unsigned long index = start >> PAGE_CACHE_SHIFT;
233 unsigned long end_index = (start + ram_size - 1) >> PAGE_CACHE_SHIFT; 233 unsigned long end_index = (start + ram_size - 1) >> PAGE_CACHE_SHIFT;
@@ -253,7 +253,6 @@ static noinline int end_compressed_writeback(struct inode *inode, u64 start,
253 index += ret; 253 index += ret;
254 } 254 }
255 /* the inode may be gone now */ 255 /* the inode may be gone now */
256 return 0;
257} 256}
258 257
259/* 258/*
@@ -392,16 +391,16 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start,
392 */ 391 */
393 atomic_inc(&cb->pending_bios); 392 atomic_inc(&cb->pending_bios);
394 ret = btrfs_bio_wq_end_io(root->fs_info, bio, 0); 393 ret = btrfs_bio_wq_end_io(root->fs_info, bio, 0);
395 BUG_ON(ret); 394 BUG_ON(ret); /* -ENOMEM */
396 395
397 if (!skip_sum) { 396 if (!skip_sum) {
398 ret = btrfs_csum_one_bio(root, inode, bio, 397 ret = btrfs_csum_one_bio(root, inode, bio,
399 start, 1); 398 start, 1);
400 BUG_ON(ret); 399 BUG_ON(ret); /* -ENOMEM */
401 } 400 }
402 401
403 ret = btrfs_map_bio(root, WRITE, bio, 0, 1); 402 ret = btrfs_map_bio(root, WRITE, bio, 0, 1);
404 BUG_ON(ret); 403 BUG_ON(ret); /* -ENOMEM */
405 404
406 bio_put(bio); 405 bio_put(bio);
407 406
@@ -421,15 +420,15 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start,
421 bio_get(bio); 420 bio_get(bio);
422 421
423 ret = btrfs_bio_wq_end_io(root->fs_info, bio, 0); 422 ret = btrfs_bio_wq_end_io(root->fs_info, bio, 0);
424 BUG_ON(ret); 423 BUG_ON(ret); /* -ENOMEM */
425 424
426 if (!skip_sum) { 425 if (!skip_sum) {
427 ret = btrfs_csum_one_bio(root, inode, bio, start, 1); 426 ret = btrfs_csum_one_bio(root, inode, bio, start, 1);
428 BUG_ON(ret); 427 BUG_ON(ret); /* -ENOMEM */
429 } 428 }
430 429
431 ret = btrfs_map_bio(root, WRITE, bio, 0, 1); 430 ret = btrfs_map_bio(root, WRITE, bio, 0, 1);
432 BUG_ON(ret); 431 BUG_ON(ret); /* -ENOMEM */
433 432
434 bio_put(bio); 433 bio_put(bio);
435 return 0; 434 return 0;
@@ -497,7 +496,7 @@ static noinline int add_ra_bio_pages(struct inode *inode,
497 * sure they map to this compressed extent on disk. 496 * sure they map to this compressed extent on disk.
498 */ 497 */
499 set_page_extent_mapped(page); 498 set_page_extent_mapped(page);
500 lock_extent(tree, last_offset, end, GFP_NOFS); 499 lock_extent(tree, last_offset, end);
501 read_lock(&em_tree->lock); 500 read_lock(&em_tree->lock);
502 em = lookup_extent_mapping(em_tree, last_offset, 501 em = lookup_extent_mapping(em_tree, last_offset,
503 PAGE_CACHE_SIZE); 502 PAGE_CACHE_SIZE);
@@ -507,7 +506,7 @@ static noinline int add_ra_bio_pages(struct inode *inode,
507 (last_offset + PAGE_CACHE_SIZE > extent_map_end(em)) || 506 (last_offset + PAGE_CACHE_SIZE > extent_map_end(em)) ||
508 (em->block_start >> 9) != cb->orig_bio->bi_sector) { 507 (em->block_start >> 9) != cb->orig_bio->bi_sector) {
509 free_extent_map(em); 508 free_extent_map(em);
510 unlock_extent(tree, last_offset, end, GFP_NOFS); 509 unlock_extent(tree, last_offset, end);
511 unlock_page(page); 510 unlock_page(page);
512 page_cache_release(page); 511 page_cache_release(page);
513 break; 512 break;
@@ -535,7 +534,7 @@ static noinline int add_ra_bio_pages(struct inode *inode,
535 nr_pages++; 534 nr_pages++;
536 page_cache_release(page); 535 page_cache_release(page);
537 } else { 536 } else {
538 unlock_extent(tree, last_offset, end, GFP_NOFS); 537 unlock_extent(tree, last_offset, end);
539 unlock_page(page); 538 unlock_page(page);
540 page_cache_release(page); 539 page_cache_release(page);
541 break; 540 break;
@@ -662,7 +661,7 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
662 bio_get(comp_bio); 661 bio_get(comp_bio);
663 662
664 ret = btrfs_bio_wq_end_io(root->fs_info, comp_bio, 0); 663 ret = btrfs_bio_wq_end_io(root->fs_info, comp_bio, 0);
665 BUG_ON(ret); 664 BUG_ON(ret); /* -ENOMEM */
666 665
667 /* 666 /*
668 * inc the count before we submit the bio so 667 * inc the count before we submit the bio so
@@ -675,14 +674,14 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
675 if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) { 674 if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) {
676 ret = btrfs_lookup_bio_sums(root, inode, 675 ret = btrfs_lookup_bio_sums(root, inode,
677 comp_bio, sums); 676 comp_bio, sums);
678 BUG_ON(ret); 677 BUG_ON(ret); /* -ENOMEM */
679 } 678 }
680 sums += (comp_bio->bi_size + root->sectorsize - 1) / 679 sums += (comp_bio->bi_size + root->sectorsize - 1) /
681 root->sectorsize; 680 root->sectorsize;
682 681
683 ret = btrfs_map_bio(root, READ, comp_bio, 682 ret = btrfs_map_bio(root, READ, comp_bio,
684 mirror_num, 0); 683 mirror_num, 0);
685 BUG_ON(ret); 684 BUG_ON(ret); /* -ENOMEM */
686 685
687 bio_put(comp_bio); 686 bio_put(comp_bio);
688 687
@@ -698,15 +697,15 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
698 bio_get(comp_bio); 697 bio_get(comp_bio);
699 698
700 ret = btrfs_bio_wq_end_io(root->fs_info, comp_bio, 0); 699 ret = btrfs_bio_wq_end_io(root->fs_info, comp_bio, 0);
701 BUG_ON(ret); 700 BUG_ON(ret); /* -ENOMEM */
702 701
703 if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) { 702 if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) {
704 ret = btrfs_lookup_bio_sums(root, inode, comp_bio, sums); 703 ret = btrfs_lookup_bio_sums(root, inode, comp_bio, sums);
705 BUG_ON(ret); 704 BUG_ON(ret); /* -ENOMEM */
706 } 705 }
707 706
708 ret = btrfs_map_bio(root, READ, comp_bio, mirror_num, 0); 707 ret = btrfs_map_bio(root, READ, comp_bio, mirror_num, 0);
709 BUG_ON(ret); 708 BUG_ON(ret); /* -ENOMEM */
710 709
711 bio_put(comp_bio); 710 bio_put(comp_bio);
712 return 0; 711 return 0;
@@ -734,7 +733,7 @@ struct btrfs_compress_op *btrfs_compress_op[] = {
734 &btrfs_lzo_compress, 733 &btrfs_lzo_compress,
735}; 734};
736 735
737int __init btrfs_init_compress(void) 736void __init btrfs_init_compress(void)
738{ 737{
739 int i; 738 int i;
740 739
@@ -744,7 +743,6 @@ int __init btrfs_init_compress(void)
744 atomic_set(&comp_alloc_workspace[i], 0); 743 atomic_set(&comp_alloc_workspace[i], 0);
745 init_waitqueue_head(&comp_workspace_wait[i]); 744 init_waitqueue_head(&comp_workspace_wait[i]);
746 } 745 }
747 return 0;
748} 746}
749 747
750/* 748/*
diff --git a/fs/btrfs/compression.h b/fs/btrfs/compression.h
index a12059f4f0fd..9afb0a62ae82 100644
--- a/fs/btrfs/compression.h
+++ b/fs/btrfs/compression.h
@@ -19,7 +19,7 @@
19#ifndef __BTRFS_COMPRESSION_ 19#ifndef __BTRFS_COMPRESSION_
20#define __BTRFS_COMPRESSION_ 20#define __BTRFS_COMPRESSION_
21 21
22int btrfs_init_compress(void); 22void btrfs_init_compress(void);
23void btrfs_exit_compress(void); 23void btrfs_exit_compress(void);
24 24
25int btrfs_compress_pages(int type, struct address_space *mapping, 25int btrfs_compress_pages(int type, struct address_space *mapping,
diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
index 0639a555e16e..e801f226d7e0 100644
--- a/fs/btrfs/ctree.c
+++ b/fs/btrfs/ctree.c
@@ -36,7 +36,7 @@ static int balance_node_right(struct btrfs_trans_handle *trans,
36 struct btrfs_root *root, 36 struct btrfs_root *root,
37 struct extent_buffer *dst_buf, 37 struct extent_buffer *dst_buf,
38 struct extent_buffer *src_buf); 38 struct extent_buffer *src_buf);
39static int del_ptr(struct btrfs_trans_handle *trans, struct btrfs_root *root, 39static void del_ptr(struct btrfs_trans_handle *trans, struct btrfs_root *root,
40 struct btrfs_path *path, int level, int slot); 40 struct btrfs_path *path, int level, int slot);
41 41
42struct btrfs_path *btrfs_alloc_path(void) 42struct btrfs_path *btrfs_alloc_path(void)
@@ -156,10 +156,23 @@ struct extent_buffer *btrfs_root_node(struct btrfs_root *root)
156{ 156{
157 struct extent_buffer *eb; 157 struct extent_buffer *eb;
158 158
159 rcu_read_lock(); 159 while (1) {
160 eb = rcu_dereference(root->node); 160 rcu_read_lock();
161 extent_buffer_get(eb); 161 eb = rcu_dereference(root->node);
162 rcu_read_unlock(); 162
163 /*
164 * RCU really hurts here, we could free up the root node because
165 * it was cow'ed but we may not get the new root node yet so do
166 * the inc_not_zero dance and if it doesn't work then
167 * synchronize_rcu and try again.
168 */
169 if (atomic_inc_not_zero(&eb->refs)) {
170 rcu_read_unlock();
171 break;
172 }
173 rcu_read_unlock();
174 synchronize_rcu();
175 }
163 return eb; 176 return eb;
164} 177}
165 178
@@ -331,8 +344,13 @@ static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans,
331 if (btrfs_block_can_be_shared(root, buf)) { 344 if (btrfs_block_can_be_shared(root, buf)) {
332 ret = btrfs_lookup_extent_info(trans, root, buf->start, 345 ret = btrfs_lookup_extent_info(trans, root, buf->start,
333 buf->len, &refs, &flags); 346 buf->len, &refs, &flags);
334 BUG_ON(ret); 347 if (ret)
335 BUG_ON(refs == 0); 348 return ret;
349 if (refs == 0) {
350 ret = -EROFS;
351 btrfs_std_error(root->fs_info, ret);
352 return ret;
353 }
336 } else { 354 } else {
337 refs = 1; 355 refs = 1;
338 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID || 356 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID ||
@@ -351,14 +369,14 @@ static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans,
351 root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) && 369 root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) &&
352 !(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF)) { 370 !(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF)) {
353 ret = btrfs_inc_ref(trans, root, buf, 1, 1); 371 ret = btrfs_inc_ref(trans, root, buf, 1, 1);
354 BUG_ON(ret); 372 BUG_ON(ret); /* -ENOMEM */
355 373
356 if (root->root_key.objectid == 374 if (root->root_key.objectid ==
357 BTRFS_TREE_RELOC_OBJECTID) { 375 BTRFS_TREE_RELOC_OBJECTID) {
358 ret = btrfs_dec_ref(trans, root, buf, 0, 1); 376 ret = btrfs_dec_ref(trans, root, buf, 0, 1);
359 BUG_ON(ret); 377 BUG_ON(ret); /* -ENOMEM */
360 ret = btrfs_inc_ref(trans, root, cow, 1, 1); 378 ret = btrfs_inc_ref(trans, root, cow, 1, 1);
361 BUG_ON(ret); 379 BUG_ON(ret); /* -ENOMEM */
362 } 380 }
363 new_flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF; 381 new_flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF;
364 } else { 382 } else {
@@ -368,14 +386,15 @@ static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans,
368 ret = btrfs_inc_ref(trans, root, cow, 1, 1); 386 ret = btrfs_inc_ref(trans, root, cow, 1, 1);
369 else 387 else
370 ret = btrfs_inc_ref(trans, root, cow, 0, 1); 388 ret = btrfs_inc_ref(trans, root, cow, 0, 1);
371 BUG_ON(ret); 389 BUG_ON(ret); /* -ENOMEM */
372 } 390 }
373 if (new_flags != 0) { 391 if (new_flags != 0) {
374 ret = btrfs_set_disk_extent_flags(trans, root, 392 ret = btrfs_set_disk_extent_flags(trans, root,
375 buf->start, 393 buf->start,
376 buf->len, 394 buf->len,
377 new_flags, 0); 395 new_flags, 0);
378 BUG_ON(ret); 396 if (ret)
397 return ret;
379 } 398 }
380 } else { 399 } else {
381 if (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF) { 400 if (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF) {
@@ -384,9 +403,9 @@ static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans,
384 ret = btrfs_inc_ref(trans, root, cow, 1, 1); 403 ret = btrfs_inc_ref(trans, root, cow, 1, 1);
385 else 404 else
386 ret = btrfs_inc_ref(trans, root, cow, 0, 1); 405 ret = btrfs_inc_ref(trans, root, cow, 0, 1);
387 BUG_ON(ret); 406 BUG_ON(ret); /* -ENOMEM */
388 ret = btrfs_dec_ref(trans, root, buf, 1, 1); 407 ret = btrfs_dec_ref(trans, root, buf, 1, 1);
389 BUG_ON(ret); 408 BUG_ON(ret); /* -ENOMEM */
390 } 409 }
391 clean_tree_block(trans, root, buf); 410 clean_tree_block(trans, root, buf);
392 *last_ref = 1; 411 *last_ref = 1;
@@ -415,7 +434,7 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
415{ 434{
416 struct btrfs_disk_key disk_key; 435 struct btrfs_disk_key disk_key;
417 struct extent_buffer *cow; 436 struct extent_buffer *cow;
418 int level; 437 int level, ret;
419 int last_ref = 0; 438 int last_ref = 0;
420 int unlock_orig = 0; 439 int unlock_orig = 0;
421 u64 parent_start; 440 u64 parent_start;
@@ -467,7 +486,11 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
467 (unsigned long)btrfs_header_fsid(cow), 486 (unsigned long)btrfs_header_fsid(cow),
468 BTRFS_FSID_SIZE); 487 BTRFS_FSID_SIZE);
469 488
470 update_ref_for_cow(trans, root, buf, cow, &last_ref); 489 ret = update_ref_for_cow(trans, root, buf, cow, &last_ref);
490 if (ret) {
491 btrfs_abort_transaction(trans, root, ret);
492 return ret;
493 }
471 494
472 if (root->ref_cows) 495 if (root->ref_cows)
473 btrfs_reloc_cow_block(trans, root, buf, cow); 496 btrfs_reloc_cow_block(trans, root, buf, cow);
@@ -504,7 +527,7 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
504 } 527 }
505 if (unlock_orig) 528 if (unlock_orig)
506 btrfs_tree_unlock(buf); 529 btrfs_tree_unlock(buf);
507 free_extent_buffer(buf); 530 free_extent_buffer_stale(buf);
508 btrfs_mark_buffer_dirty(cow); 531 btrfs_mark_buffer_dirty(cow);
509 *cow_ret = cow; 532 *cow_ret = cow;
510 return 0; 533 return 0;
@@ -934,7 +957,12 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,
934 957
935 /* promote the child to a root */ 958 /* promote the child to a root */
936 child = read_node_slot(root, mid, 0); 959 child = read_node_slot(root, mid, 0);
937 BUG_ON(!child); 960 if (!child) {
961 ret = -EROFS;
962 btrfs_std_error(root->fs_info, ret);
963 goto enospc;
964 }
965
938 btrfs_tree_lock(child); 966 btrfs_tree_lock(child);
939 btrfs_set_lock_blocking(child); 967 btrfs_set_lock_blocking(child);
940 ret = btrfs_cow_block(trans, root, child, mid, 0, &child); 968 ret = btrfs_cow_block(trans, root, child, mid, 0, &child);
@@ -959,7 +987,7 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,
959 root_sub_used(root, mid->len); 987 root_sub_used(root, mid->len);
960 btrfs_free_tree_block(trans, root, mid, 0, 1, 0); 988 btrfs_free_tree_block(trans, root, mid, 0, 1, 0);
961 /* once for the root ptr */ 989 /* once for the root ptr */
962 free_extent_buffer(mid); 990 free_extent_buffer_stale(mid);
963 return 0; 991 return 0;
964 } 992 }
965 if (btrfs_header_nritems(mid) > 993 if (btrfs_header_nritems(mid) >
@@ -1010,13 +1038,10 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,
1010 if (btrfs_header_nritems(right) == 0) { 1038 if (btrfs_header_nritems(right) == 0) {
1011 clean_tree_block(trans, root, right); 1039 clean_tree_block(trans, root, right);
1012 btrfs_tree_unlock(right); 1040 btrfs_tree_unlock(right);
1013 wret = del_ptr(trans, root, path, level + 1, pslot + 1041 del_ptr(trans, root, path, level + 1, pslot + 1);
1014 1);
1015 if (wret)
1016 ret = wret;
1017 root_sub_used(root, right->len); 1042 root_sub_used(root, right->len);
1018 btrfs_free_tree_block(trans, root, right, 0, 1, 0); 1043 btrfs_free_tree_block(trans, root, right, 0, 1, 0);
1019 free_extent_buffer(right); 1044 free_extent_buffer_stale(right);
1020 right = NULL; 1045 right = NULL;
1021 } else { 1046 } else {
1022 struct btrfs_disk_key right_key; 1047 struct btrfs_disk_key right_key;
@@ -1035,7 +1060,11 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,
1035 * otherwise we would have pulled some pointers from the 1060 * otherwise we would have pulled some pointers from the
1036 * right 1061 * right
1037 */ 1062 */
1038 BUG_ON(!left); 1063 if (!left) {
1064 ret = -EROFS;
1065 btrfs_std_error(root->fs_info, ret);
1066 goto enospc;
1067 }
1039 wret = balance_node_right(trans, root, mid, left); 1068 wret = balance_node_right(trans, root, mid, left);
1040 if (wret < 0) { 1069 if (wret < 0) {
1041 ret = wret; 1070 ret = wret;
@@ -1051,12 +1080,10 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,
1051 if (btrfs_header_nritems(mid) == 0) { 1080 if (btrfs_header_nritems(mid) == 0) {
1052 clean_tree_block(trans, root, mid); 1081 clean_tree_block(trans, root, mid);
1053 btrfs_tree_unlock(mid); 1082 btrfs_tree_unlock(mid);
1054 wret = del_ptr(trans, root, path, level + 1, pslot); 1083 del_ptr(trans, root, path, level + 1, pslot);
1055 if (wret)
1056 ret = wret;
1057 root_sub_used(root, mid->len); 1084 root_sub_used(root, mid->len);
1058 btrfs_free_tree_block(trans, root, mid, 0, 1, 0); 1085 btrfs_free_tree_block(trans, root, mid, 0, 1, 0);
1059 free_extent_buffer(mid); 1086 free_extent_buffer_stale(mid);
1060 mid = NULL; 1087 mid = NULL;
1061 } else { 1088 } else {
1062 /* update the parent key to reflect our changes */ 1089 /* update the parent key to reflect our changes */
@@ -1382,7 +1409,8 @@ static noinline int reada_for_balance(struct btrfs_root *root,
1382 * if lowest_unlock is 1, level 0 won't be unlocked 1409 * if lowest_unlock is 1, level 0 won't be unlocked
1383 */ 1410 */
1384static noinline void unlock_up(struct btrfs_path *path, int level, 1411static noinline void unlock_up(struct btrfs_path *path, int level,
1385 int lowest_unlock) 1412 int lowest_unlock, int min_write_lock_level,
1413 int *write_lock_level)
1386{ 1414{
1387 int i; 1415 int i;
1388 int skip_level = level; 1416 int skip_level = level;
@@ -1414,6 +1442,11 @@ static noinline void unlock_up(struct btrfs_path *path, int level,
1414 if (i >= lowest_unlock && i > skip_level && path->locks[i]) { 1442 if (i >= lowest_unlock && i > skip_level && path->locks[i]) {
1415 btrfs_tree_unlock_rw(t, path->locks[i]); 1443 btrfs_tree_unlock_rw(t, path->locks[i]);
1416 path->locks[i] = 0; 1444 path->locks[i] = 0;
1445 if (write_lock_level &&
1446 i > min_write_lock_level &&
1447 i <= *write_lock_level) {
1448 *write_lock_level = i - 1;
1449 }
1417 } 1450 }
1418 } 1451 }
1419} 1452}
@@ -1637,6 +1670,7 @@ int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root
1637 /* everything at write_lock_level or lower must be write locked */ 1670 /* everything at write_lock_level or lower must be write locked */
1638 int write_lock_level = 0; 1671 int write_lock_level = 0;
1639 u8 lowest_level = 0; 1672 u8 lowest_level = 0;
1673 int min_write_lock_level;
1640 1674
1641 lowest_level = p->lowest_level; 1675 lowest_level = p->lowest_level;
1642 WARN_ON(lowest_level && ins_len > 0); 1676 WARN_ON(lowest_level && ins_len > 0);
@@ -1664,6 +1698,8 @@ int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root
1664 if (cow && (p->keep_locks || p->lowest_level)) 1698 if (cow && (p->keep_locks || p->lowest_level))
1665 write_lock_level = BTRFS_MAX_LEVEL; 1699 write_lock_level = BTRFS_MAX_LEVEL;
1666 1700
1701 min_write_lock_level = write_lock_level;
1702
1667again: 1703again:
1668 /* 1704 /*
1669 * we try very hard to do read locks on the root 1705 * we try very hard to do read locks on the root
@@ -1795,7 +1831,8 @@ cow_done:
1795 goto again; 1831 goto again;
1796 } 1832 }
1797 1833
1798 unlock_up(p, level, lowest_unlock); 1834 unlock_up(p, level, lowest_unlock,
1835 min_write_lock_level, &write_lock_level);
1799 1836
1800 if (level == lowest_level) { 1837 if (level == lowest_level) {
1801 if (dec) 1838 if (dec)
@@ -1857,7 +1894,8 @@ cow_done:
1857 } 1894 }
1858 } 1895 }
1859 if (!p->search_for_split) 1896 if (!p->search_for_split)
1860 unlock_up(p, level, lowest_unlock); 1897 unlock_up(p, level, lowest_unlock,
1898 min_write_lock_level, &write_lock_level);
1861 goto done; 1899 goto done;
1862 } 1900 }
1863 } 1901 }
@@ -1881,15 +1919,12 @@ done:
1881 * fixing up pointers when a given leaf/node is not in slot 0 of the 1919 * fixing up pointers when a given leaf/node is not in slot 0 of the
1882 * higher levels 1920 * higher levels
1883 * 1921 *
1884 * If this fails to write a tree block, it returns -1, but continues
1885 * fixing up the blocks in ram so the tree is consistent.
1886 */ 1922 */
1887static int fixup_low_keys(struct btrfs_trans_handle *trans, 1923static void fixup_low_keys(struct btrfs_trans_handle *trans,
1888 struct btrfs_root *root, struct btrfs_path *path, 1924 struct btrfs_root *root, struct btrfs_path *path,
1889 struct btrfs_disk_key *key, int level) 1925 struct btrfs_disk_key *key, int level)
1890{ 1926{
1891 int i; 1927 int i;
1892 int ret = 0;
1893 struct extent_buffer *t; 1928 struct extent_buffer *t;
1894 1929
1895 for (i = level; i < BTRFS_MAX_LEVEL; i++) { 1930 for (i = level; i < BTRFS_MAX_LEVEL; i++) {
@@ -1902,7 +1937,6 @@ static int fixup_low_keys(struct btrfs_trans_handle *trans,
1902 if (tslot != 0) 1937 if (tslot != 0)
1903 break; 1938 break;
1904 } 1939 }
1905 return ret;
1906} 1940}
1907 1941
1908/* 1942/*
@@ -1911,9 +1945,9 @@ static int fixup_low_keys(struct btrfs_trans_handle *trans,
1911 * This function isn't completely safe. It's the caller's responsibility 1945 * This function isn't completely safe. It's the caller's responsibility
1912 * that the new key won't break the order 1946 * that the new key won't break the order
1913 */ 1947 */
1914int btrfs_set_item_key_safe(struct btrfs_trans_handle *trans, 1948void btrfs_set_item_key_safe(struct btrfs_trans_handle *trans,
1915 struct btrfs_root *root, struct btrfs_path *path, 1949 struct btrfs_root *root, struct btrfs_path *path,
1916 struct btrfs_key *new_key) 1950 struct btrfs_key *new_key)
1917{ 1951{
1918 struct btrfs_disk_key disk_key; 1952 struct btrfs_disk_key disk_key;
1919 struct extent_buffer *eb; 1953 struct extent_buffer *eb;
@@ -1923,13 +1957,11 @@ int btrfs_set_item_key_safe(struct btrfs_trans_handle *trans,
1923 slot = path->slots[0]; 1957 slot = path->slots[0];
1924 if (slot > 0) { 1958 if (slot > 0) {
1925 btrfs_item_key(eb, &disk_key, slot - 1); 1959 btrfs_item_key(eb, &disk_key, slot - 1);
1926 if (comp_keys(&disk_key, new_key) >= 0) 1960 BUG_ON(comp_keys(&disk_key, new_key) >= 0);
1927 return -1;
1928 } 1961 }
1929 if (slot < btrfs_header_nritems(eb) - 1) { 1962 if (slot < btrfs_header_nritems(eb) - 1) {
1930 btrfs_item_key(eb, &disk_key, slot + 1); 1963 btrfs_item_key(eb, &disk_key, slot + 1);
1931 if (comp_keys(&disk_key, new_key) <= 0) 1964 BUG_ON(comp_keys(&disk_key, new_key) <= 0);
1932 return -1;
1933 } 1965 }
1934 1966
1935 btrfs_cpu_key_to_disk(&disk_key, new_key); 1967 btrfs_cpu_key_to_disk(&disk_key, new_key);
@@ -1937,7 +1969,6 @@ int btrfs_set_item_key_safe(struct btrfs_trans_handle *trans,
1937 btrfs_mark_buffer_dirty(eb); 1969 btrfs_mark_buffer_dirty(eb);
1938 if (slot == 0) 1970 if (slot == 0)
1939 fixup_low_keys(trans, root, path, &disk_key, 1); 1971 fixup_low_keys(trans, root, path, &disk_key, 1);
1940 return 0;
1941} 1972}
1942 1973
1943/* 1974/*
@@ -2140,12 +2171,11 @@ static noinline int insert_new_root(struct btrfs_trans_handle *trans,
2140 * 2171 *
2141 * slot and level indicate where you want the key to go, and 2172 * slot and level indicate where you want the key to go, and
2142 * blocknr is the block the key points to. 2173 * blocknr is the block the key points to.
2143 *
2144 * returns zero on success and < 0 on any error
2145 */ 2174 */
2146static int insert_ptr(struct btrfs_trans_handle *trans, struct btrfs_root 2175static void insert_ptr(struct btrfs_trans_handle *trans,
2147 *root, struct btrfs_path *path, struct btrfs_disk_key 2176 struct btrfs_root *root, struct btrfs_path *path,
2148 *key, u64 bytenr, int slot, int level) 2177 struct btrfs_disk_key *key, u64 bytenr,
2178 int slot, int level)
2149{ 2179{
2150 struct extent_buffer *lower; 2180 struct extent_buffer *lower;
2151 int nritems; 2181 int nritems;
@@ -2155,8 +2185,7 @@ static int insert_ptr(struct btrfs_trans_handle *trans, struct btrfs_root
2155 lower = path->nodes[level]; 2185 lower = path->nodes[level];
2156 nritems = btrfs_header_nritems(lower); 2186 nritems = btrfs_header_nritems(lower);
2157 BUG_ON(slot > nritems); 2187 BUG_ON(slot > nritems);
2158 if (nritems == BTRFS_NODEPTRS_PER_BLOCK(root)) 2188 BUG_ON(nritems == BTRFS_NODEPTRS_PER_BLOCK(root));
2159 BUG();
2160 if (slot != nritems) { 2189 if (slot != nritems) {
2161 memmove_extent_buffer(lower, 2190 memmove_extent_buffer(lower,
2162 btrfs_node_key_ptr_offset(slot + 1), 2191 btrfs_node_key_ptr_offset(slot + 1),
@@ -2169,7 +2198,6 @@ static int insert_ptr(struct btrfs_trans_handle *trans, struct btrfs_root
2169 btrfs_set_node_ptr_generation(lower, slot, trans->transid); 2198 btrfs_set_node_ptr_generation(lower, slot, trans->transid);
2170 btrfs_set_header_nritems(lower, nritems + 1); 2199 btrfs_set_header_nritems(lower, nritems + 1);
2171 btrfs_mark_buffer_dirty(lower); 2200 btrfs_mark_buffer_dirty(lower);
2172 return 0;
2173} 2201}
2174 2202
2175/* 2203/*
@@ -2190,7 +2218,6 @@ static noinline int split_node(struct btrfs_trans_handle *trans,
2190 struct btrfs_disk_key disk_key; 2218 struct btrfs_disk_key disk_key;
2191 int mid; 2219 int mid;
2192 int ret; 2220 int ret;
2193 int wret;
2194 u32 c_nritems; 2221 u32 c_nritems;
2195 2222
2196 c = path->nodes[level]; 2223 c = path->nodes[level];
@@ -2247,11 +2274,8 @@ static noinline int split_node(struct btrfs_trans_handle *trans,
2247 btrfs_mark_buffer_dirty(c); 2274 btrfs_mark_buffer_dirty(c);
2248 btrfs_mark_buffer_dirty(split); 2275 btrfs_mark_buffer_dirty(split);
2249 2276
2250 wret = insert_ptr(trans, root, path, &disk_key, split->start, 2277 insert_ptr(trans, root, path, &disk_key, split->start,
2251 path->slots[level + 1] + 1, 2278 path->slots[level + 1] + 1, level + 1);
2252 level + 1);
2253 if (wret)
2254 ret = wret;
2255 2279
2256 if (path->slots[level] >= mid) { 2280 if (path->slots[level] >= mid) {
2257 path->slots[level] -= mid; 2281 path->slots[level] -= mid;
@@ -2320,6 +2344,7 @@ static noinline int __push_leaf_right(struct btrfs_trans_handle *trans,
2320{ 2344{
2321 struct extent_buffer *left = path->nodes[0]; 2345 struct extent_buffer *left = path->nodes[0];
2322 struct extent_buffer *upper = path->nodes[1]; 2346 struct extent_buffer *upper = path->nodes[1];
2347 struct btrfs_map_token token;
2323 struct btrfs_disk_key disk_key; 2348 struct btrfs_disk_key disk_key;
2324 int slot; 2349 int slot;
2325 u32 i; 2350 u32 i;
@@ -2331,6 +2356,8 @@ static noinline int __push_leaf_right(struct btrfs_trans_handle *trans,
2331 u32 data_end; 2356 u32 data_end;
2332 u32 this_item_size; 2357 u32 this_item_size;
2333 2358
2359 btrfs_init_map_token(&token);
2360
2334 if (empty) 2361 if (empty)
2335 nr = 0; 2362 nr = 0;
2336 else 2363 else
@@ -2408,8 +2435,8 @@ static noinline int __push_leaf_right(struct btrfs_trans_handle *trans,
2408 push_space = BTRFS_LEAF_DATA_SIZE(root); 2435 push_space = BTRFS_LEAF_DATA_SIZE(root);
2409 for (i = 0; i < right_nritems; i++) { 2436 for (i = 0; i < right_nritems; i++) {
2410 item = btrfs_item_nr(right, i); 2437 item = btrfs_item_nr(right, i);
2411 push_space -= btrfs_item_size(right, item); 2438 push_space -= btrfs_token_item_size(right, item, &token);
2412 btrfs_set_item_offset(right, item, push_space); 2439 btrfs_set_token_item_offset(right, item, push_space, &token);
2413 } 2440 }
2414 2441
2415 left_nritems -= push_items; 2442 left_nritems -= push_items;
@@ -2537,9 +2564,11 @@ static noinline int __push_leaf_left(struct btrfs_trans_handle *trans,
2537 u32 old_left_nritems; 2564 u32 old_left_nritems;
2538 u32 nr; 2565 u32 nr;
2539 int ret = 0; 2566 int ret = 0;
2540 int wret;
2541 u32 this_item_size; 2567 u32 this_item_size;
2542 u32 old_left_item_size; 2568 u32 old_left_item_size;
2569 struct btrfs_map_token token;
2570
2571 btrfs_init_map_token(&token);
2543 2572
2544 if (empty) 2573 if (empty)
2545 nr = min(right_nritems, max_slot); 2574 nr = min(right_nritems, max_slot);
@@ -2600,9 +2629,10 @@ static noinline int __push_leaf_left(struct btrfs_trans_handle *trans,
2600 2629
2601 item = btrfs_item_nr(left, i); 2630 item = btrfs_item_nr(left, i);
2602 2631
2603 ioff = btrfs_item_offset(left, item); 2632 ioff = btrfs_token_item_offset(left, item, &token);
2604 btrfs_set_item_offset(left, item, 2633 btrfs_set_token_item_offset(left, item,
2605 ioff - (BTRFS_LEAF_DATA_SIZE(root) - old_left_item_size)); 2634 ioff - (BTRFS_LEAF_DATA_SIZE(root) - old_left_item_size),
2635 &token);
2606 } 2636 }
2607 btrfs_set_header_nritems(left, old_left_nritems + push_items); 2637 btrfs_set_header_nritems(left, old_left_nritems + push_items);
2608 2638
@@ -2632,8 +2662,9 @@ static noinline int __push_leaf_left(struct btrfs_trans_handle *trans,
2632 for (i = 0; i < right_nritems; i++) { 2662 for (i = 0; i < right_nritems; i++) {
2633 item = btrfs_item_nr(right, i); 2663 item = btrfs_item_nr(right, i);
2634 2664
2635 push_space = push_space - btrfs_item_size(right, item); 2665 push_space = push_space - btrfs_token_item_size(right,
2636 btrfs_set_item_offset(right, item, push_space); 2666 item, &token);
2667 btrfs_set_token_item_offset(right, item, push_space, &token);
2637 } 2668 }
2638 2669
2639 btrfs_mark_buffer_dirty(left); 2670 btrfs_mark_buffer_dirty(left);
@@ -2643,9 +2674,7 @@ static noinline int __push_leaf_left(struct btrfs_trans_handle *trans,
2643 clean_tree_block(trans, root, right); 2674 clean_tree_block(trans, root, right);
2644 2675
2645 btrfs_item_key(right, &disk_key, 0); 2676 btrfs_item_key(right, &disk_key, 0);
2646 wret = fixup_low_keys(trans, root, path, &disk_key, 1); 2677 fixup_low_keys(trans, root, path, &disk_key, 1);
2647 if (wret)
2648 ret = wret;
2649 2678
2650 /* then fixup the leaf pointer in the path */ 2679 /* then fixup the leaf pointer in the path */
2651 if (path->slots[0] < push_items) { 2680 if (path->slots[0] < push_items) {
@@ -2716,7 +2745,8 @@ static int push_leaf_left(struct btrfs_trans_handle *trans, struct btrfs_root
2716 path->nodes[1], slot - 1, &left); 2745 path->nodes[1], slot - 1, &left);
2717 if (ret) { 2746 if (ret) {
2718 /* we hit -ENOSPC, but it isn't fatal here */ 2747 /* we hit -ENOSPC, but it isn't fatal here */
2719 ret = 1; 2748 if (ret == -ENOSPC)
2749 ret = 1;
2720 goto out; 2750 goto out;
2721 } 2751 }
2722 2752
@@ -2738,22 +2768,21 @@ out:
2738/* 2768/*
2739 * split the path's leaf in two, making sure there is at least data_size 2769 * split the path's leaf in two, making sure there is at least data_size
2740 * available for the resulting leaf level of the path. 2770 * available for the resulting leaf level of the path.
2741 *
2742 * returns 0 if all went well and < 0 on failure.
2743 */ 2771 */
2744static noinline int copy_for_split(struct btrfs_trans_handle *trans, 2772static noinline void copy_for_split(struct btrfs_trans_handle *trans,
2745 struct btrfs_root *root, 2773 struct btrfs_root *root,
2746 struct btrfs_path *path, 2774 struct btrfs_path *path,
2747 struct extent_buffer *l, 2775 struct extent_buffer *l,
2748 struct extent_buffer *right, 2776 struct extent_buffer *right,
2749 int slot, int mid, int nritems) 2777 int slot, int mid, int nritems)
2750{ 2778{
2751 int data_copy_size; 2779 int data_copy_size;
2752 int rt_data_off; 2780 int rt_data_off;
2753 int i; 2781 int i;
2754 int ret = 0;
2755 int wret;
2756 struct btrfs_disk_key disk_key; 2782 struct btrfs_disk_key disk_key;
2783 struct btrfs_map_token token;
2784
2785 btrfs_init_map_token(&token);
2757 2786
2758 nritems = nritems - mid; 2787 nritems = nritems - mid;
2759 btrfs_set_header_nritems(right, nritems); 2788 btrfs_set_header_nritems(right, nritems);
@@ -2775,17 +2804,15 @@ static noinline int copy_for_split(struct btrfs_trans_handle *trans,
2775 struct btrfs_item *item = btrfs_item_nr(right, i); 2804 struct btrfs_item *item = btrfs_item_nr(right, i);
2776 u32 ioff; 2805 u32 ioff;
2777 2806
2778 ioff = btrfs_item_offset(right, item); 2807 ioff = btrfs_token_item_offset(right, item, &token);
2779 btrfs_set_item_offset(right, item, ioff + rt_data_off); 2808 btrfs_set_token_item_offset(right, item,
2809 ioff + rt_data_off, &token);
2780 } 2810 }
2781 2811
2782 btrfs_set_header_nritems(l, mid); 2812 btrfs_set_header_nritems(l, mid);
2783 ret = 0;
2784 btrfs_item_key(right, &disk_key, 0); 2813 btrfs_item_key(right, &disk_key, 0);
2785 wret = insert_ptr(trans, root, path, &disk_key, right->start, 2814 insert_ptr(trans, root, path, &disk_key, right->start,
2786 path->slots[1] + 1, 1); 2815 path->slots[1] + 1, 1);
2787 if (wret)
2788 ret = wret;
2789 2816
2790 btrfs_mark_buffer_dirty(right); 2817 btrfs_mark_buffer_dirty(right);
2791 btrfs_mark_buffer_dirty(l); 2818 btrfs_mark_buffer_dirty(l);
@@ -2803,8 +2830,6 @@ static noinline int copy_for_split(struct btrfs_trans_handle *trans,
2803 } 2830 }
2804 2831
2805 BUG_ON(path->slots[0] < 0); 2832 BUG_ON(path->slots[0] < 0);
2806
2807 return ret;
2808} 2833}
2809 2834
2810/* 2835/*
@@ -2993,12 +3018,8 @@ again:
2993 if (split == 0) { 3018 if (split == 0) {
2994 if (mid <= slot) { 3019 if (mid <= slot) {
2995 btrfs_set_header_nritems(right, 0); 3020 btrfs_set_header_nritems(right, 0);
2996 wret = insert_ptr(trans, root, path, 3021 insert_ptr(trans, root, path, &disk_key, right->start,
2997 &disk_key, right->start, 3022 path->slots[1] + 1, 1);
2998 path->slots[1] + 1, 1);
2999 if (wret)
3000 ret = wret;
3001
3002 btrfs_tree_unlock(path->nodes[0]); 3023 btrfs_tree_unlock(path->nodes[0]);
3003 free_extent_buffer(path->nodes[0]); 3024 free_extent_buffer(path->nodes[0]);
3004 path->nodes[0] = right; 3025 path->nodes[0] = right;
@@ -3006,29 +3027,21 @@ again:
3006 path->slots[1] += 1; 3027 path->slots[1] += 1;
3007 } else { 3028 } else {
3008 btrfs_set_header_nritems(right, 0); 3029 btrfs_set_header_nritems(right, 0);
3009 wret = insert_ptr(trans, root, path, 3030 insert_ptr(trans, root, path, &disk_key, right->start,
3010 &disk_key,
3011 right->start,
3012 path->slots[1], 1); 3031 path->slots[1], 1);
3013 if (wret)
3014 ret = wret;
3015 btrfs_tree_unlock(path->nodes[0]); 3032 btrfs_tree_unlock(path->nodes[0]);
3016 free_extent_buffer(path->nodes[0]); 3033 free_extent_buffer(path->nodes[0]);
3017 path->nodes[0] = right; 3034 path->nodes[0] = right;
3018 path->slots[0] = 0; 3035 path->slots[0] = 0;
3019 if (path->slots[1] == 0) { 3036 if (path->slots[1] == 0)
3020 wret = fixup_low_keys(trans, root, 3037 fixup_low_keys(trans, root, path,
3021 path, &disk_key, 1); 3038 &disk_key, 1);
3022 if (wret)
3023 ret = wret;
3024 }
3025 } 3039 }
3026 btrfs_mark_buffer_dirty(right); 3040 btrfs_mark_buffer_dirty(right);
3027 return ret; 3041 return ret;
3028 } 3042 }
3029 3043
3030 ret = copy_for_split(trans, root, path, l, right, slot, mid, nritems); 3044 copy_for_split(trans, root, path, l, right, slot, mid, nritems);
3031 BUG_ON(ret);
3032 3045
3033 if (split == 2) { 3046 if (split == 2) {
3034 BUG_ON(num_doubles != 0); 3047 BUG_ON(num_doubles != 0);
@@ -3036,7 +3049,7 @@ again:
3036 goto again; 3049 goto again;
3037 } 3050 }
3038 3051
3039 return ret; 3052 return 0;
3040 3053
3041push_for_double: 3054push_for_double:
3042 push_for_double_split(trans, root, path, data_size); 3055 push_for_double_split(trans, root, path, data_size);
@@ -3238,11 +3251,9 @@ int btrfs_duplicate_item(struct btrfs_trans_handle *trans,
3238 return ret; 3251 return ret;
3239 3252
3240 path->slots[0]++; 3253 path->slots[0]++;
3241 ret = setup_items_for_insert(trans, root, path, new_key, &item_size, 3254 setup_items_for_insert(trans, root, path, new_key, &item_size,
3242 item_size, item_size + 3255 item_size, item_size +
3243 sizeof(struct btrfs_item), 1); 3256 sizeof(struct btrfs_item), 1);
3244 BUG_ON(ret);
3245
3246 leaf = path->nodes[0]; 3257 leaf = path->nodes[0];
3247 memcpy_extent_buffer(leaf, 3258 memcpy_extent_buffer(leaf,
3248 btrfs_item_ptr_offset(leaf, path->slots[0]), 3259 btrfs_item_ptr_offset(leaf, path->slots[0]),
@@ -3257,10 +3268,10 @@ int btrfs_duplicate_item(struct btrfs_trans_handle *trans,
3257 * off the end of the item or if we shift the item to chop bytes off 3268 * off the end of the item or if we shift the item to chop bytes off
3258 * the front. 3269 * the front.
3259 */ 3270 */
3260int btrfs_truncate_item(struct btrfs_trans_handle *trans, 3271void btrfs_truncate_item(struct btrfs_trans_handle *trans,
3261 struct btrfs_root *root, 3272 struct btrfs_root *root,
3262 struct btrfs_path *path, 3273 struct btrfs_path *path,
3263 u32 new_size, int from_end) 3274 u32 new_size, int from_end)
3264{ 3275{
3265 int slot; 3276 int slot;
3266 struct extent_buffer *leaf; 3277 struct extent_buffer *leaf;
@@ -3271,13 +3282,16 @@ int btrfs_truncate_item(struct btrfs_trans_handle *trans,
3271 unsigned int old_size; 3282 unsigned int old_size;
3272 unsigned int size_diff; 3283 unsigned int size_diff;
3273 int i; 3284 int i;
3285 struct btrfs_map_token token;
3286
3287 btrfs_init_map_token(&token);
3274 3288
3275 leaf = path->nodes[0]; 3289 leaf = path->nodes[0];
3276 slot = path->slots[0]; 3290 slot = path->slots[0];
3277 3291
3278 old_size = btrfs_item_size_nr(leaf, slot); 3292 old_size = btrfs_item_size_nr(leaf, slot);
3279 if (old_size == new_size) 3293 if (old_size == new_size)
3280 return 0; 3294 return;
3281 3295
3282 nritems = btrfs_header_nritems(leaf); 3296 nritems = btrfs_header_nritems(leaf);
3283 data_end = leaf_data_end(root, leaf); 3297 data_end = leaf_data_end(root, leaf);
@@ -3297,8 +3311,9 @@ int btrfs_truncate_item(struct btrfs_trans_handle *trans,
3297 u32 ioff; 3311 u32 ioff;
3298 item = btrfs_item_nr(leaf, i); 3312 item = btrfs_item_nr(leaf, i);
3299 3313
3300 ioff = btrfs_item_offset(leaf, item); 3314 ioff = btrfs_token_item_offset(leaf, item, &token);
3301 btrfs_set_item_offset(leaf, item, ioff + size_diff); 3315 btrfs_set_token_item_offset(leaf, item,
3316 ioff + size_diff, &token);
3302 } 3317 }
3303 3318
3304 /* shift the data */ 3319 /* shift the data */
@@ -3350,15 +3365,14 @@ int btrfs_truncate_item(struct btrfs_trans_handle *trans,
3350 btrfs_print_leaf(root, leaf); 3365 btrfs_print_leaf(root, leaf);
3351 BUG(); 3366 BUG();
3352 } 3367 }
3353 return 0;
3354} 3368}
3355 3369
3356/* 3370/*
3357 * make the item pointed to by the path bigger, data_size is the new size. 3371 * make the item pointed to by the path bigger, data_size is the new size.
3358 */ 3372 */
3359int btrfs_extend_item(struct btrfs_trans_handle *trans, 3373void btrfs_extend_item(struct btrfs_trans_handle *trans,
3360 struct btrfs_root *root, struct btrfs_path *path, 3374 struct btrfs_root *root, struct btrfs_path *path,
3361 u32 data_size) 3375 u32 data_size)
3362{ 3376{
3363 int slot; 3377 int slot;
3364 struct extent_buffer *leaf; 3378 struct extent_buffer *leaf;
@@ -3368,6 +3382,9 @@ int btrfs_extend_item(struct btrfs_trans_handle *trans,
3368 unsigned int old_data; 3382 unsigned int old_data;
3369 unsigned int old_size; 3383 unsigned int old_size;
3370 int i; 3384 int i;
3385 struct btrfs_map_token token;
3386
3387 btrfs_init_map_token(&token);
3371 3388
3372 leaf = path->nodes[0]; 3389 leaf = path->nodes[0];
3373 3390
@@ -3397,8 +3414,9 @@ int btrfs_extend_item(struct btrfs_trans_handle *trans,
3397 u32 ioff; 3414 u32 ioff;
3398 item = btrfs_item_nr(leaf, i); 3415 item = btrfs_item_nr(leaf, i);
3399 3416
3400 ioff = btrfs_item_offset(leaf, item); 3417 ioff = btrfs_token_item_offset(leaf, item, &token);
3401 btrfs_set_item_offset(leaf, item, ioff - data_size); 3418 btrfs_set_token_item_offset(leaf, item,
3419 ioff - data_size, &token);
3402 } 3420 }
3403 3421
3404 /* shift the data */ 3422 /* shift the data */
@@ -3416,7 +3434,6 @@ int btrfs_extend_item(struct btrfs_trans_handle *trans,
3416 btrfs_print_leaf(root, leaf); 3434 btrfs_print_leaf(root, leaf);
3417 BUG(); 3435 BUG();
3418 } 3436 }
3419 return 0;
3420} 3437}
3421 3438
3422/* 3439/*
@@ -3441,6 +3458,9 @@ int btrfs_insert_some_items(struct btrfs_trans_handle *trans,
3441 unsigned int data_end; 3458 unsigned int data_end;
3442 struct btrfs_disk_key disk_key; 3459 struct btrfs_disk_key disk_key;
3443 struct btrfs_key found_key; 3460 struct btrfs_key found_key;
3461 struct btrfs_map_token token;
3462
3463 btrfs_init_map_token(&token);
3444 3464
3445 for (i = 0; i < nr; i++) { 3465 for (i = 0; i < nr; i++) {
3446 if (total_size + data_size[i] + sizeof(struct btrfs_item) > 3466 if (total_size + data_size[i] + sizeof(struct btrfs_item) >
@@ -3506,8 +3526,9 @@ int btrfs_insert_some_items(struct btrfs_trans_handle *trans,
3506 u32 ioff; 3526 u32 ioff;
3507 3527
3508 item = btrfs_item_nr(leaf, i); 3528 item = btrfs_item_nr(leaf, i);
3509 ioff = btrfs_item_offset(leaf, item); 3529 ioff = btrfs_token_item_offset(leaf, item, &token);
3510 btrfs_set_item_offset(leaf, item, ioff - total_data); 3530 btrfs_set_token_item_offset(leaf, item,
3531 ioff - total_data, &token);
3511 } 3532 }
3512 /* shift the items */ 3533 /* shift the items */
3513 memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot + nr), 3534 memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot + nr),
@@ -3534,9 +3555,10 @@ int btrfs_insert_some_items(struct btrfs_trans_handle *trans,
3534 btrfs_cpu_key_to_disk(&disk_key, cpu_key + i); 3555 btrfs_cpu_key_to_disk(&disk_key, cpu_key + i);
3535 btrfs_set_item_key(leaf, &disk_key, slot + i); 3556 btrfs_set_item_key(leaf, &disk_key, slot + i);
3536 item = btrfs_item_nr(leaf, slot + i); 3557 item = btrfs_item_nr(leaf, slot + i);
3537 btrfs_set_item_offset(leaf, item, data_end - data_size[i]); 3558 btrfs_set_token_item_offset(leaf, item,
3559 data_end - data_size[i], &token);
3538 data_end -= data_size[i]; 3560 data_end -= data_size[i];
3539 btrfs_set_item_size(leaf, item, data_size[i]); 3561 btrfs_set_token_item_size(leaf, item, data_size[i], &token);
3540 } 3562 }
3541 btrfs_set_header_nritems(leaf, nritems + nr); 3563 btrfs_set_header_nritems(leaf, nritems + nr);
3542 btrfs_mark_buffer_dirty(leaf); 3564 btrfs_mark_buffer_dirty(leaf);
@@ -3544,7 +3566,7 @@ int btrfs_insert_some_items(struct btrfs_trans_handle *trans,
3544 ret = 0; 3566 ret = 0;
3545 if (slot == 0) { 3567 if (slot == 0) {
3546 btrfs_cpu_key_to_disk(&disk_key, cpu_key); 3568 btrfs_cpu_key_to_disk(&disk_key, cpu_key);
3547 ret = fixup_low_keys(trans, root, path, &disk_key, 1); 3569 fixup_low_keys(trans, root, path, &disk_key, 1);
3548 } 3570 }
3549 3571
3550 if (btrfs_leaf_free_space(root, leaf) < 0) { 3572 if (btrfs_leaf_free_space(root, leaf) < 0) {
@@ -3562,19 +3584,21 @@ out:
3562 * to save stack depth by doing the bulk of the work in a function 3584 * to save stack depth by doing the bulk of the work in a function
3563 * that doesn't call btrfs_search_slot 3585 * that doesn't call btrfs_search_slot
3564 */ 3586 */
3565int setup_items_for_insert(struct btrfs_trans_handle *trans, 3587void setup_items_for_insert(struct btrfs_trans_handle *trans,
3566 struct btrfs_root *root, struct btrfs_path *path, 3588 struct btrfs_root *root, struct btrfs_path *path,
3567 struct btrfs_key *cpu_key, u32 *data_size, 3589 struct btrfs_key *cpu_key, u32 *data_size,
3568 u32 total_data, u32 total_size, int nr) 3590 u32 total_data, u32 total_size, int nr)
3569{ 3591{
3570 struct btrfs_item *item; 3592 struct btrfs_item *item;
3571 int i; 3593 int i;
3572 u32 nritems; 3594 u32 nritems;
3573 unsigned int data_end; 3595 unsigned int data_end;
3574 struct btrfs_disk_key disk_key; 3596 struct btrfs_disk_key disk_key;
3575 int ret;
3576 struct extent_buffer *leaf; 3597 struct extent_buffer *leaf;
3577 int slot; 3598 int slot;
3599 struct btrfs_map_token token;
3600
3601 btrfs_init_map_token(&token);
3578 3602
3579 leaf = path->nodes[0]; 3603 leaf = path->nodes[0];
3580 slot = path->slots[0]; 3604 slot = path->slots[0];
@@ -3606,8 +3630,9 @@ int setup_items_for_insert(struct btrfs_trans_handle *trans,
3606 u32 ioff; 3630 u32 ioff;
3607 3631
3608 item = btrfs_item_nr(leaf, i); 3632 item = btrfs_item_nr(leaf, i);
3609 ioff = btrfs_item_offset(leaf, item); 3633 ioff = btrfs_token_item_offset(leaf, item, &token);
3610 btrfs_set_item_offset(leaf, item, ioff - total_data); 3634 btrfs_set_token_item_offset(leaf, item,
3635 ioff - total_data, &token);
3611 } 3636 }
3612 /* shift the items */ 3637 /* shift the items */
3613 memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot + nr), 3638 memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot + nr),
@@ -3626,17 +3651,17 @@ int setup_items_for_insert(struct btrfs_trans_handle *trans,
3626 btrfs_cpu_key_to_disk(&disk_key, cpu_key + i); 3651 btrfs_cpu_key_to_disk(&disk_key, cpu_key + i);
3627 btrfs_set_item_key(leaf, &disk_key, slot + i); 3652 btrfs_set_item_key(leaf, &disk_key, slot + i);
3628 item = btrfs_item_nr(leaf, slot + i); 3653 item = btrfs_item_nr(leaf, slot + i);
3629 btrfs_set_item_offset(leaf, item, data_end - data_size[i]); 3654 btrfs_set_token_item_offset(leaf, item,
3655 data_end - data_size[i], &token);
3630 data_end -= data_size[i]; 3656 data_end -= data_size[i];
3631 btrfs_set_item_size(leaf, item, data_size[i]); 3657 btrfs_set_token_item_size(leaf, item, data_size[i], &token);
3632 } 3658 }
3633 3659
3634 btrfs_set_header_nritems(leaf, nritems + nr); 3660 btrfs_set_header_nritems(leaf, nritems + nr);
3635 3661
3636 ret = 0;
3637 if (slot == 0) { 3662 if (slot == 0) {
3638 btrfs_cpu_key_to_disk(&disk_key, cpu_key); 3663 btrfs_cpu_key_to_disk(&disk_key, cpu_key);
3639 ret = fixup_low_keys(trans, root, path, &disk_key, 1); 3664 fixup_low_keys(trans, root, path, &disk_key, 1);
3640 } 3665 }
3641 btrfs_unlock_up_safe(path, 1); 3666 btrfs_unlock_up_safe(path, 1);
3642 btrfs_mark_buffer_dirty(leaf); 3667 btrfs_mark_buffer_dirty(leaf);
@@ -3645,7 +3670,6 @@ int setup_items_for_insert(struct btrfs_trans_handle *trans,
3645 btrfs_print_leaf(root, leaf); 3670 btrfs_print_leaf(root, leaf);
3646 BUG(); 3671 BUG();
3647 } 3672 }
3648 return ret;
3649} 3673}
3650 3674
3651/* 3675/*
@@ -3672,16 +3696,14 @@ int btrfs_insert_empty_items(struct btrfs_trans_handle *trans,
3672 if (ret == 0) 3696 if (ret == 0)
3673 return -EEXIST; 3697 return -EEXIST;
3674 if (ret < 0) 3698 if (ret < 0)
3675 goto out; 3699 return ret;
3676 3700
3677 slot = path->slots[0]; 3701 slot = path->slots[0];
3678 BUG_ON(slot < 0); 3702 BUG_ON(slot < 0);
3679 3703
3680 ret = setup_items_for_insert(trans, root, path, cpu_key, data_size, 3704 setup_items_for_insert(trans, root, path, cpu_key, data_size,
3681 total_data, total_size, nr); 3705 total_data, total_size, nr);
3682 3706 return 0;
3683out:
3684 return ret;
3685} 3707}
3686 3708
3687/* 3709/*
@@ -3717,13 +3739,11 @@ int btrfs_insert_item(struct btrfs_trans_handle *trans, struct btrfs_root
3717 * the tree should have been previously balanced so the deletion does not 3739 * the tree should have been previously balanced so the deletion does not
3718 * empty a node. 3740 * empty a node.
3719 */ 3741 */
3720static int del_ptr(struct btrfs_trans_handle *trans, struct btrfs_root *root, 3742static void del_ptr(struct btrfs_trans_handle *trans, struct btrfs_root *root,
3721 struct btrfs_path *path, int level, int slot) 3743 struct btrfs_path *path, int level, int slot)
3722{ 3744{
3723 struct extent_buffer *parent = path->nodes[level]; 3745 struct extent_buffer *parent = path->nodes[level];
3724 u32 nritems; 3746 u32 nritems;
3725 int ret = 0;
3726 int wret;
3727 3747
3728 nritems = btrfs_header_nritems(parent); 3748 nritems = btrfs_header_nritems(parent);
3729 if (slot != nritems - 1) { 3749 if (slot != nritems - 1) {
@@ -3743,12 +3763,9 @@ static int del_ptr(struct btrfs_trans_handle *trans, struct btrfs_root *root,
3743 struct btrfs_disk_key disk_key; 3763 struct btrfs_disk_key disk_key;
3744 3764
3745 btrfs_node_key(parent, &disk_key, 0); 3765 btrfs_node_key(parent, &disk_key, 0);
3746 wret = fixup_low_keys(trans, root, path, &disk_key, level + 1); 3766 fixup_low_keys(trans, root, path, &disk_key, level + 1);
3747 if (wret)
3748 ret = wret;
3749 } 3767 }
3750 btrfs_mark_buffer_dirty(parent); 3768 btrfs_mark_buffer_dirty(parent);
3751 return ret;
3752} 3769}
3753 3770
3754/* 3771/*
@@ -3761,17 +3778,13 @@ static int del_ptr(struct btrfs_trans_handle *trans, struct btrfs_root *root,
3761 * The path must have already been setup for deleting the leaf, including 3778 * The path must have already been setup for deleting the leaf, including
3762 * all the proper balancing. path->nodes[1] must be locked. 3779 * all the proper balancing. path->nodes[1] must be locked.
3763 */ 3780 */
3764static noinline int btrfs_del_leaf(struct btrfs_trans_handle *trans, 3781static noinline void btrfs_del_leaf(struct btrfs_trans_handle *trans,
3765 struct btrfs_root *root, 3782 struct btrfs_root *root,
3766 struct btrfs_path *path, 3783 struct btrfs_path *path,
3767 struct extent_buffer *leaf) 3784 struct extent_buffer *leaf)
3768{ 3785{
3769 int ret;
3770
3771 WARN_ON(btrfs_header_generation(leaf) != trans->transid); 3786 WARN_ON(btrfs_header_generation(leaf) != trans->transid);
3772 ret = del_ptr(trans, root, path, 1, path->slots[1]); 3787 del_ptr(trans, root, path, 1, path->slots[1]);
3773 if (ret)
3774 return ret;
3775 3788
3776 /* 3789 /*
3777 * btrfs_free_extent is expensive, we want to make sure we 3790 * btrfs_free_extent is expensive, we want to make sure we
@@ -3781,8 +3794,9 @@ static noinline int btrfs_del_leaf(struct btrfs_trans_handle *trans,
3781 3794
3782 root_sub_used(root, leaf->len); 3795 root_sub_used(root, leaf->len);
3783 3796
3797 extent_buffer_get(leaf);
3784 btrfs_free_tree_block(trans, root, leaf, 0, 1, 0); 3798 btrfs_free_tree_block(trans, root, leaf, 0, 1, 0);
3785 return 0; 3799 free_extent_buffer_stale(leaf);
3786} 3800}
3787/* 3801/*
3788 * delete the item at the leaf level in path. If that empties 3802 * delete the item at the leaf level in path. If that empties
@@ -3799,6 +3813,9 @@ int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root,
3799 int wret; 3813 int wret;
3800 int i; 3814 int i;
3801 u32 nritems; 3815 u32 nritems;
3816 struct btrfs_map_token token;
3817
3818 btrfs_init_map_token(&token);
3802 3819
3803 leaf = path->nodes[0]; 3820 leaf = path->nodes[0];
3804 last_off = btrfs_item_offset_nr(leaf, slot + nr - 1); 3821 last_off = btrfs_item_offset_nr(leaf, slot + nr - 1);
@@ -3820,8 +3837,9 @@ int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root,
3820 u32 ioff; 3837 u32 ioff;
3821 3838
3822 item = btrfs_item_nr(leaf, i); 3839 item = btrfs_item_nr(leaf, i);
3823 ioff = btrfs_item_offset(leaf, item); 3840 ioff = btrfs_token_item_offset(leaf, item, &token);
3824 btrfs_set_item_offset(leaf, item, ioff + dsize); 3841 btrfs_set_token_item_offset(leaf, item,
3842 ioff + dsize, &token);
3825 } 3843 }
3826 3844
3827 memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot), 3845 memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot),
@@ -3839,8 +3857,7 @@ int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root,
3839 } else { 3857 } else {
3840 btrfs_set_path_blocking(path); 3858 btrfs_set_path_blocking(path);
3841 clean_tree_block(trans, root, leaf); 3859 clean_tree_block(trans, root, leaf);
3842 ret = btrfs_del_leaf(trans, root, path, leaf); 3860 btrfs_del_leaf(trans, root, path, leaf);
3843 BUG_ON(ret);
3844 } 3861 }
3845 } else { 3862 } else {
3846 int used = leaf_space_used(leaf, 0, nritems); 3863 int used = leaf_space_used(leaf, 0, nritems);
@@ -3848,10 +3865,7 @@ int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root,
3848 struct btrfs_disk_key disk_key; 3865 struct btrfs_disk_key disk_key;
3849 3866
3850 btrfs_item_key(leaf, &disk_key, 0); 3867 btrfs_item_key(leaf, &disk_key, 0);
3851 wret = fixup_low_keys(trans, root, path, 3868 fixup_low_keys(trans, root, path, &disk_key, 1);
3852 &disk_key, 1);
3853 if (wret)
3854 ret = wret;
3855 } 3869 }
3856 3870
3857 /* delete the leaf if it is mostly empty */ 3871 /* delete the leaf if it is mostly empty */
@@ -3879,9 +3893,9 @@ int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root,
3879 3893
3880 if (btrfs_header_nritems(leaf) == 0) { 3894 if (btrfs_header_nritems(leaf) == 0) {
3881 path->slots[1] = slot; 3895 path->slots[1] = slot;
3882 ret = btrfs_del_leaf(trans, root, path, leaf); 3896 btrfs_del_leaf(trans, root, path, leaf);
3883 BUG_ON(ret);
3884 free_extent_buffer(leaf); 3897 free_extent_buffer(leaf);
3898 ret = 0;
3885 } else { 3899 } else {
3886 /* if we're still in the path, make sure 3900 /* if we're still in the path, make sure
3887 * we're dirty. Otherwise, one of the 3901 * we're dirty. Otherwise, one of the
@@ -4059,18 +4073,18 @@ find_next_key:
4059 path->slots[level] = slot; 4073 path->slots[level] = slot;
4060 if (level == path->lowest_level) { 4074 if (level == path->lowest_level) {
4061 ret = 0; 4075 ret = 0;
4062 unlock_up(path, level, 1); 4076 unlock_up(path, level, 1, 0, NULL);
4063 goto out; 4077 goto out;
4064 } 4078 }
4065 btrfs_set_path_blocking(path); 4079 btrfs_set_path_blocking(path);
4066 cur = read_node_slot(root, cur, slot); 4080 cur = read_node_slot(root, cur, slot);
4067 BUG_ON(!cur); 4081 BUG_ON(!cur); /* -ENOMEM */
4068 4082
4069 btrfs_tree_read_lock(cur); 4083 btrfs_tree_read_lock(cur);
4070 4084
4071 path->locks[level - 1] = BTRFS_READ_LOCK; 4085 path->locks[level - 1] = BTRFS_READ_LOCK;
4072 path->nodes[level - 1] = cur; 4086 path->nodes[level - 1] = cur;
4073 unlock_up(path, level, 1); 4087 unlock_up(path, level, 1, 0, NULL);
4074 btrfs_clear_path_blocking(path, NULL, 0); 4088 btrfs_clear_path_blocking(path, NULL, 0);
4075 } 4089 }
4076out: 4090out:
@@ -4306,7 +4320,7 @@ again:
4306 } 4320 }
4307 ret = 0; 4321 ret = 0;
4308done: 4322done:
4309 unlock_up(path, 0, 1); 4323 unlock_up(path, 0, 1, 0, NULL);
4310 path->leave_spinning = old_spinning; 4324 path->leave_spinning = old_spinning;
4311 if (!old_spinning) 4325 if (!old_spinning)
4312 btrfs_set_path_blocking(path); 4326 btrfs_set_path_blocking(path);
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index 80b6486fd5e6..5b8ef8eb3521 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -48,6 +48,8 @@ struct btrfs_ordered_sum;
48 48
49#define BTRFS_MAGIC "_BHRfS_M" 49#define BTRFS_MAGIC "_BHRfS_M"
50 50
51#define BTRFS_MAX_MIRRORS 2
52
51#define BTRFS_MAX_LEVEL 8 53#define BTRFS_MAX_LEVEL 8
52 54
53#define BTRFS_COMPAT_EXTENT_TREE_V0 55#define BTRFS_COMPAT_EXTENT_TREE_V0
@@ -138,6 +140,12 @@ struct btrfs_ordered_sum;
138#define BTRFS_EMPTY_SUBVOL_DIR_OBJECTID 2 140#define BTRFS_EMPTY_SUBVOL_DIR_OBJECTID 2
139 141
140/* 142/*
143 * the max metadata block size. This limit is somewhat artificial,
144 * but the memmove costs go through the roof for larger blocks.
145 */
146#define BTRFS_MAX_METADATA_BLOCKSIZE 65536
147
148/*
141 * we can actually store much bigger names, but lets not confuse the rest 149 * we can actually store much bigger names, but lets not confuse the rest
142 * of linux 150 * of linux
143 */ 151 */
@@ -461,6 +469,19 @@ struct btrfs_super_block {
461#define BTRFS_FEATURE_INCOMPAT_DEFAULT_SUBVOL (1ULL << 1) 469#define BTRFS_FEATURE_INCOMPAT_DEFAULT_SUBVOL (1ULL << 1)
462#define BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS (1ULL << 2) 470#define BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS (1ULL << 2)
463#define BTRFS_FEATURE_INCOMPAT_COMPRESS_LZO (1ULL << 3) 471#define BTRFS_FEATURE_INCOMPAT_COMPRESS_LZO (1ULL << 3)
472/*
473 * some patches floated around with a second compression method
474 * lets save that incompat here for when they do get in
475 * Note we don't actually support it, we're just reserving the
476 * number
477 */
478#define BTRFS_FEATURE_INCOMPAT_COMPRESS_LZOv2 (1ULL << 4)
479
480/*
481 * older kernels tried to do bigger metadata blocks, but the
482 * code was pretty buggy. Lets not let them try anymore.
483 */
484#define BTRFS_FEATURE_INCOMPAT_BIG_METADATA (1ULL << 5)
464 485
465#define BTRFS_FEATURE_COMPAT_SUPP 0ULL 486#define BTRFS_FEATURE_COMPAT_SUPP 0ULL
466#define BTRFS_FEATURE_COMPAT_RO_SUPP 0ULL 487#define BTRFS_FEATURE_COMPAT_RO_SUPP 0ULL
@@ -468,6 +489,7 @@ struct btrfs_super_block {
468 (BTRFS_FEATURE_INCOMPAT_MIXED_BACKREF | \ 489 (BTRFS_FEATURE_INCOMPAT_MIXED_BACKREF | \
469 BTRFS_FEATURE_INCOMPAT_DEFAULT_SUBVOL | \ 490 BTRFS_FEATURE_INCOMPAT_DEFAULT_SUBVOL | \
470 BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS | \ 491 BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS | \
492 BTRFS_FEATURE_INCOMPAT_BIG_METADATA | \
471 BTRFS_FEATURE_INCOMPAT_COMPRESS_LZO) 493 BTRFS_FEATURE_INCOMPAT_COMPRESS_LZO)
472 494
473/* 495/*
@@ -829,6 +851,21 @@ struct btrfs_csum_item {
829 */ 851 */
830#define BTRFS_AVAIL_ALLOC_BIT_SINGLE (1ULL << 48) 852#define BTRFS_AVAIL_ALLOC_BIT_SINGLE (1ULL << 48)
831 853
854#define BTRFS_EXTENDED_PROFILE_MASK (BTRFS_BLOCK_GROUP_PROFILE_MASK | \
855 BTRFS_AVAIL_ALLOC_BIT_SINGLE)
856
857static inline u64 chunk_to_extended(u64 flags)
858{
859 if ((flags & BTRFS_BLOCK_GROUP_PROFILE_MASK) == 0)
860 flags |= BTRFS_AVAIL_ALLOC_BIT_SINGLE;
861
862 return flags;
863}
864static inline u64 extended_to_chunk(u64 flags)
865{
866 return flags & ~BTRFS_AVAIL_ALLOC_BIT_SINGLE;
867}
868
832struct btrfs_block_group_item { 869struct btrfs_block_group_item {
833 __le64 used; 870 __le64 used;
834 __le64 chunk_objectid; 871 __le64 chunk_objectid;
@@ -1503,6 +1540,7 @@ struct btrfs_ioctl_defrag_range_args {
1503#define BTRFS_MOUNT_SKIP_BALANCE (1 << 19) 1540#define BTRFS_MOUNT_SKIP_BALANCE (1 << 19)
1504#define BTRFS_MOUNT_CHECK_INTEGRITY (1 << 20) 1541#define BTRFS_MOUNT_CHECK_INTEGRITY (1 << 20)
1505#define BTRFS_MOUNT_CHECK_INTEGRITY_INCLUDING_EXTENT_DATA (1 << 21) 1542#define BTRFS_MOUNT_CHECK_INTEGRITY_INCLUDING_EXTENT_DATA (1 << 21)
1543#define BTRFS_MOUNT_PANIC_ON_FATAL_ERROR (1 << 22)
1506 1544
1507#define btrfs_clear_opt(o, opt) ((o) &= ~BTRFS_MOUNT_##opt) 1545#define btrfs_clear_opt(o, opt) ((o) &= ~BTRFS_MOUNT_##opt)
1508#define btrfs_set_opt(o, opt) ((o) |= BTRFS_MOUNT_##opt) 1546#define btrfs_set_opt(o, opt) ((o) |= BTRFS_MOUNT_##opt)
@@ -1526,6 +1564,17 @@ struct btrfs_ioctl_defrag_range_args {
1526 1564
1527#define BTRFS_INODE_ROOT_ITEM_INIT (1 << 31) 1565#define BTRFS_INODE_ROOT_ITEM_INIT (1 << 31)
1528 1566
1567struct btrfs_map_token {
1568 struct extent_buffer *eb;
1569 char *kaddr;
1570 unsigned long offset;
1571};
1572
1573static inline void btrfs_init_map_token (struct btrfs_map_token *token)
1574{
1575 memset(token, 0, sizeof(*token));
1576}
1577
1529/* some macros to generate set/get funcs for the struct fields. This 1578/* some macros to generate set/get funcs for the struct fields. This
1530 * assumes there is a lefoo_to_cpu for every type, so lets make a simple 1579 * assumes there is a lefoo_to_cpu for every type, so lets make a simple
1531 * one for u8: 1580 * one for u8:
@@ -1549,20 +1598,22 @@ struct btrfs_ioctl_defrag_range_args {
1549#ifndef BTRFS_SETGET_FUNCS 1598#ifndef BTRFS_SETGET_FUNCS
1550#define BTRFS_SETGET_FUNCS(name, type, member, bits) \ 1599#define BTRFS_SETGET_FUNCS(name, type, member, bits) \
1551u##bits btrfs_##name(struct extent_buffer *eb, type *s); \ 1600u##bits btrfs_##name(struct extent_buffer *eb, type *s); \
1601u##bits btrfs_token_##name(struct extent_buffer *eb, type *s, struct btrfs_map_token *token); \
1602void btrfs_set_token_##name(struct extent_buffer *eb, type *s, u##bits val, struct btrfs_map_token *token);\
1552void btrfs_set_##name(struct extent_buffer *eb, type *s, u##bits val); 1603void btrfs_set_##name(struct extent_buffer *eb, type *s, u##bits val);
1553#endif 1604#endif
1554 1605
1555#define BTRFS_SETGET_HEADER_FUNCS(name, type, member, bits) \ 1606#define BTRFS_SETGET_HEADER_FUNCS(name, type, member, bits) \
1556static inline u##bits btrfs_##name(struct extent_buffer *eb) \ 1607static inline u##bits btrfs_##name(struct extent_buffer *eb) \
1557{ \ 1608{ \
1558 type *p = page_address(eb->first_page); \ 1609 type *p = page_address(eb->pages[0]); \
1559 u##bits res = le##bits##_to_cpu(p->member); \ 1610 u##bits res = le##bits##_to_cpu(p->member); \
1560 return res; \ 1611 return res; \
1561} \ 1612} \
1562static inline void btrfs_set_##name(struct extent_buffer *eb, \ 1613static inline void btrfs_set_##name(struct extent_buffer *eb, \
1563 u##bits val) \ 1614 u##bits val) \
1564{ \ 1615{ \
1565 type *p = page_address(eb->first_page); \ 1616 type *p = page_address(eb->pages[0]); \
1566 p->member = cpu_to_le##bits(val); \ 1617 p->member = cpu_to_le##bits(val); \
1567} 1618}
1568 1619
@@ -2466,8 +2517,7 @@ int btrfs_reserve_extent(struct btrfs_trans_handle *trans,
2466 struct btrfs_root *root, 2517 struct btrfs_root *root,
2467 u64 num_bytes, u64 min_alloc_size, 2518 u64 num_bytes, u64 min_alloc_size,
2468 u64 empty_size, u64 hint_byte, 2519 u64 empty_size, u64 hint_byte,
2469 u64 search_end, struct btrfs_key *ins, 2520 struct btrfs_key *ins, u64 data);
2470 u64 data);
2471int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root, 2521int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
2472 struct extent_buffer *buf, int full_backref, int for_cow); 2522 struct extent_buffer *buf, int full_backref, int for_cow);
2473int btrfs_dec_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root, 2523int btrfs_dec_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
@@ -2484,8 +2534,8 @@ int btrfs_free_extent(struct btrfs_trans_handle *trans,
2484int btrfs_free_reserved_extent(struct btrfs_root *root, u64 start, u64 len); 2534int btrfs_free_reserved_extent(struct btrfs_root *root, u64 start, u64 len);
2485int btrfs_free_and_pin_reserved_extent(struct btrfs_root *root, 2535int btrfs_free_and_pin_reserved_extent(struct btrfs_root *root,
2486 u64 start, u64 len); 2536 u64 start, u64 len);
2487int btrfs_prepare_extent_commit(struct btrfs_trans_handle *trans, 2537void btrfs_prepare_extent_commit(struct btrfs_trans_handle *trans,
2488 struct btrfs_root *root); 2538 struct btrfs_root *root);
2489int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans, 2539int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans,
2490 struct btrfs_root *root); 2540 struct btrfs_root *root);
2491int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans, 2541int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
@@ -2548,8 +2598,8 @@ void btrfs_block_rsv_release(struct btrfs_root *root,
2548 u64 num_bytes); 2598 u64 num_bytes);
2549int btrfs_set_block_group_ro(struct btrfs_root *root, 2599int btrfs_set_block_group_ro(struct btrfs_root *root,
2550 struct btrfs_block_group_cache *cache); 2600 struct btrfs_block_group_cache *cache);
2551int btrfs_set_block_group_rw(struct btrfs_root *root, 2601void btrfs_set_block_group_rw(struct btrfs_root *root,
2552 struct btrfs_block_group_cache *cache); 2602 struct btrfs_block_group_cache *cache);
2553void btrfs_put_block_group_cache(struct btrfs_fs_info *info); 2603void btrfs_put_block_group_cache(struct btrfs_fs_info *info);
2554u64 btrfs_account_ro_block_groups_free_space(struct btrfs_space_info *sinfo); 2604u64 btrfs_account_ro_block_groups_free_space(struct btrfs_space_info *sinfo);
2555int btrfs_error_unpin_extent_range(struct btrfs_root *root, 2605int btrfs_error_unpin_extent_range(struct btrfs_root *root,
@@ -2568,9 +2618,9 @@ int btrfs_comp_cpu_keys(struct btrfs_key *k1, struct btrfs_key *k2);
2568int btrfs_previous_item(struct btrfs_root *root, 2618int btrfs_previous_item(struct btrfs_root *root,
2569 struct btrfs_path *path, u64 min_objectid, 2619 struct btrfs_path *path, u64 min_objectid,
2570 int type); 2620 int type);
2571int btrfs_set_item_key_safe(struct btrfs_trans_handle *trans, 2621void btrfs_set_item_key_safe(struct btrfs_trans_handle *trans,
2572 struct btrfs_root *root, struct btrfs_path *path, 2622 struct btrfs_root *root, struct btrfs_path *path,
2573 struct btrfs_key *new_key); 2623 struct btrfs_key *new_key);
2574struct extent_buffer *btrfs_root_node(struct btrfs_root *root); 2624struct extent_buffer *btrfs_root_node(struct btrfs_root *root);
2575struct extent_buffer *btrfs_lock_root_node(struct btrfs_root *root); 2625struct extent_buffer *btrfs_lock_root_node(struct btrfs_root *root);
2576int btrfs_find_next_key(struct btrfs_root *root, struct btrfs_path *path, 2626int btrfs_find_next_key(struct btrfs_root *root, struct btrfs_path *path,
@@ -2590,12 +2640,13 @@ int btrfs_copy_root(struct btrfs_trans_handle *trans,
2590 struct extent_buffer **cow_ret, u64 new_root_objectid); 2640 struct extent_buffer **cow_ret, u64 new_root_objectid);
2591int btrfs_block_can_be_shared(struct btrfs_root *root, 2641int btrfs_block_can_be_shared(struct btrfs_root *root,
2592 struct extent_buffer *buf); 2642 struct extent_buffer *buf);
2593int btrfs_extend_item(struct btrfs_trans_handle *trans, struct btrfs_root 2643void btrfs_extend_item(struct btrfs_trans_handle *trans,
2594 *root, struct btrfs_path *path, u32 data_size); 2644 struct btrfs_root *root, struct btrfs_path *path,
2595int btrfs_truncate_item(struct btrfs_trans_handle *trans, 2645 u32 data_size);
2596 struct btrfs_root *root, 2646void btrfs_truncate_item(struct btrfs_trans_handle *trans,
2597 struct btrfs_path *path, 2647 struct btrfs_root *root,
2598 u32 new_size, int from_end); 2648 struct btrfs_path *path,
2649 u32 new_size, int from_end);
2599int btrfs_split_item(struct btrfs_trans_handle *trans, 2650int btrfs_split_item(struct btrfs_trans_handle *trans,
2600 struct btrfs_root *root, 2651 struct btrfs_root *root,
2601 struct btrfs_path *path, 2652 struct btrfs_path *path,
@@ -2629,10 +2680,10 @@ static inline int btrfs_del_item(struct btrfs_trans_handle *trans,
2629 return btrfs_del_items(trans, root, path, path->slots[0], 1); 2680 return btrfs_del_items(trans, root, path, path->slots[0], 1);
2630} 2681}
2631 2682
2632int setup_items_for_insert(struct btrfs_trans_handle *trans, 2683void setup_items_for_insert(struct btrfs_trans_handle *trans,
2633 struct btrfs_root *root, struct btrfs_path *path, 2684 struct btrfs_root *root, struct btrfs_path *path,
2634 struct btrfs_key *cpu_key, u32 *data_size, 2685 struct btrfs_key *cpu_key, u32 *data_size,
2635 u32 total_data, u32 total_size, int nr); 2686 u32 total_data, u32 total_size, int nr);
2636int btrfs_insert_item(struct btrfs_trans_handle *trans, struct btrfs_root 2687int btrfs_insert_item(struct btrfs_trans_handle *trans, struct btrfs_root
2637 *root, struct btrfs_key *key, void *data, u32 data_size); 2688 *root, struct btrfs_key *key, void *data, u32 data_size);
2638int btrfs_insert_empty_items(struct btrfs_trans_handle *trans, 2689int btrfs_insert_empty_items(struct btrfs_trans_handle *trans,
@@ -2659,9 +2710,9 @@ static inline int btrfs_next_item(struct btrfs_root *root, struct btrfs_path *p)
2659} 2710}
2660int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path); 2711int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path);
2661int btrfs_leaf_free_space(struct btrfs_root *root, struct extent_buffer *leaf); 2712int btrfs_leaf_free_space(struct btrfs_root *root, struct extent_buffer *leaf);
2662void btrfs_drop_snapshot(struct btrfs_root *root, 2713int __must_check btrfs_drop_snapshot(struct btrfs_root *root,
2663 struct btrfs_block_rsv *block_rsv, int update_ref, 2714 struct btrfs_block_rsv *block_rsv,
2664 int for_reloc); 2715 int update_ref, int for_reloc);
2665int btrfs_drop_subtree(struct btrfs_trans_handle *trans, 2716int btrfs_drop_subtree(struct btrfs_trans_handle *trans,
2666 struct btrfs_root *root, 2717 struct btrfs_root *root,
2667 struct extent_buffer *node, 2718 struct extent_buffer *node,
@@ -2687,24 +2738,6 @@ static inline void free_fs_info(struct btrfs_fs_info *fs_info)
2687 kfree(fs_info->super_for_commit); 2738 kfree(fs_info->super_for_commit);
2688 kfree(fs_info); 2739 kfree(fs_info);
2689} 2740}
2690/**
2691 * profile_is_valid - tests whether a given profile is valid and reduced
2692 * @flags: profile to validate
2693 * @extended: if true @flags is treated as an extended profile
2694 */
2695static inline int profile_is_valid(u64 flags, int extended)
2696{
2697 u64 mask = ~BTRFS_BLOCK_GROUP_PROFILE_MASK;
2698
2699 flags &= ~BTRFS_BLOCK_GROUP_TYPE_MASK;
2700 if (extended)
2701 mask &= ~BTRFS_AVAIL_ALLOC_BIT_SINGLE;
2702
2703 if (flags & mask)
2704 return 0;
2705 /* true if zero or exactly one bit set */
2706 return (flags & (~flags + 1)) == flags;
2707}
2708 2741
2709/* root-item.c */ 2742/* root-item.c */
2710int btrfs_find_root_ref(struct btrfs_root *tree_root, 2743int btrfs_find_root_ref(struct btrfs_root *tree_root,
@@ -2723,9 +2756,10 @@ int btrfs_del_root(struct btrfs_trans_handle *trans, struct btrfs_root *root,
2723int btrfs_insert_root(struct btrfs_trans_handle *trans, struct btrfs_root 2756int btrfs_insert_root(struct btrfs_trans_handle *trans, struct btrfs_root
2724 *root, struct btrfs_key *key, struct btrfs_root_item 2757 *root, struct btrfs_key *key, struct btrfs_root_item
2725 *item); 2758 *item);
2726int btrfs_update_root(struct btrfs_trans_handle *trans, struct btrfs_root 2759int __must_check btrfs_update_root(struct btrfs_trans_handle *trans,
2727 *root, struct btrfs_key *key, struct btrfs_root_item 2760 struct btrfs_root *root,
2728 *item); 2761 struct btrfs_key *key,
2762 struct btrfs_root_item *item);
2729int btrfs_find_last_root(struct btrfs_root *root, u64 objectid, struct 2763int btrfs_find_last_root(struct btrfs_root *root, u64 objectid, struct
2730 btrfs_root_item *item, struct btrfs_key *key); 2764 btrfs_root_item *item, struct btrfs_key *key);
2731int btrfs_find_dead_roots(struct btrfs_root *root, u64 objectid); 2765int btrfs_find_dead_roots(struct btrfs_root *root, u64 objectid);
@@ -2909,7 +2943,7 @@ int btrfs_orphan_cleanup(struct btrfs_root *root);
2909void btrfs_orphan_commit_root(struct btrfs_trans_handle *trans, 2943void btrfs_orphan_commit_root(struct btrfs_trans_handle *trans,
2910 struct btrfs_root *root); 2944 struct btrfs_root *root);
2911int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size); 2945int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size);
2912int btrfs_invalidate_inodes(struct btrfs_root *root); 2946void btrfs_invalidate_inodes(struct btrfs_root *root);
2913void btrfs_add_delayed_iput(struct inode *inode); 2947void btrfs_add_delayed_iput(struct inode *inode);
2914void btrfs_run_delayed_iputs(struct btrfs_root *root); 2948void btrfs_run_delayed_iputs(struct btrfs_root *root);
2915int btrfs_prealloc_file_range(struct inode *inode, int mode, 2949int btrfs_prealloc_file_range(struct inode *inode, int mode,
@@ -2961,13 +2995,41 @@ ssize_t btrfs_listxattr(struct dentry *dentry, char *buffer, size_t size);
2961/* super.c */ 2995/* super.c */
2962int btrfs_parse_options(struct btrfs_root *root, char *options); 2996int btrfs_parse_options(struct btrfs_root *root, char *options);
2963int btrfs_sync_fs(struct super_block *sb, int wait); 2997int btrfs_sync_fs(struct super_block *sb, int wait);
2998void btrfs_printk(struct btrfs_fs_info *fs_info, const char *fmt, ...);
2964void __btrfs_std_error(struct btrfs_fs_info *fs_info, const char *function, 2999void __btrfs_std_error(struct btrfs_fs_info *fs_info, const char *function,
2965 unsigned int line, int errno); 3000 unsigned int line, int errno, const char *fmt, ...);
3001
3002void __btrfs_abort_transaction(struct btrfs_trans_handle *trans,
3003 struct btrfs_root *root, const char *function,
3004 unsigned int line, int errno);
3005
3006#define btrfs_abort_transaction(trans, root, errno) \
3007do { \
3008 __btrfs_abort_transaction(trans, root, __func__, \
3009 __LINE__, errno); \
3010} while (0)
2966 3011
2967#define btrfs_std_error(fs_info, errno) \ 3012#define btrfs_std_error(fs_info, errno) \
2968do { \ 3013do { \
2969 if ((errno)) \ 3014 if ((errno)) \
2970 __btrfs_std_error((fs_info), __func__, __LINE__, (errno));\ 3015 __btrfs_std_error((fs_info), __func__, \
3016 __LINE__, (errno), NULL); \
3017} while (0)
3018
3019#define btrfs_error(fs_info, errno, fmt, args...) \
3020do { \
3021 __btrfs_std_error((fs_info), __func__, __LINE__, \
3022 (errno), fmt, ##args); \
3023} while (0)
3024
3025void __btrfs_panic(struct btrfs_fs_info *fs_info, const char *function,
3026 unsigned int line, int errno, const char *fmt, ...);
3027
3028#define btrfs_panic(fs_info, errno, fmt, args...) \
3029do { \
3030 struct btrfs_fs_info *_i = (fs_info); \
3031 __btrfs_panic(_i, __func__, __LINE__, errno, fmt, ##args); \
3032 BUG_ON(!(_i->mount_opt & BTRFS_MOUNT_PANIC_ON_FATAL_ERROR)); \
2971} while (0) 3033} while (0)
2972 3034
2973/* acl.c */ 3035/* acl.c */
@@ -3003,16 +3065,17 @@ void btrfs_reloc_cow_block(struct btrfs_trans_handle *trans,
3003void btrfs_reloc_pre_snapshot(struct btrfs_trans_handle *trans, 3065void btrfs_reloc_pre_snapshot(struct btrfs_trans_handle *trans,
3004 struct btrfs_pending_snapshot *pending, 3066 struct btrfs_pending_snapshot *pending,
3005 u64 *bytes_to_reserve); 3067 u64 *bytes_to_reserve);
3006void btrfs_reloc_post_snapshot(struct btrfs_trans_handle *trans, 3068int btrfs_reloc_post_snapshot(struct btrfs_trans_handle *trans,
3007 struct btrfs_pending_snapshot *pending); 3069 struct btrfs_pending_snapshot *pending);
3008 3070
3009/* scrub.c */ 3071/* scrub.c */
3010int btrfs_scrub_dev(struct btrfs_root *root, u64 devid, u64 start, u64 end, 3072int btrfs_scrub_dev(struct btrfs_root *root, u64 devid, u64 start, u64 end,
3011 struct btrfs_scrub_progress *progress, int readonly); 3073 struct btrfs_scrub_progress *progress, int readonly);
3012int btrfs_scrub_pause(struct btrfs_root *root); 3074void btrfs_scrub_pause(struct btrfs_root *root);
3013int btrfs_scrub_pause_super(struct btrfs_root *root); 3075void btrfs_scrub_pause_super(struct btrfs_root *root);
3014int btrfs_scrub_continue(struct btrfs_root *root); 3076void btrfs_scrub_continue(struct btrfs_root *root);
3015int btrfs_scrub_continue_super(struct btrfs_root *root); 3077void btrfs_scrub_continue_super(struct btrfs_root *root);
3078int __btrfs_scrub_cancel(struct btrfs_fs_info *info);
3016int btrfs_scrub_cancel(struct btrfs_root *root); 3079int btrfs_scrub_cancel(struct btrfs_root *root);
3017int btrfs_scrub_cancel_dev(struct btrfs_root *root, struct btrfs_device *dev); 3080int btrfs_scrub_cancel_dev(struct btrfs_root *root, struct btrfs_device *dev);
3018int btrfs_scrub_cancel_devid(struct btrfs_root *root, u64 devid); 3081int btrfs_scrub_cancel_devid(struct btrfs_root *root, u64 devid);
diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c
index fe4cd0f1cef1..03e3748d84d0 100644
--- a/fs/btrfs/delayed-inode.c
+++ b/fs/btrfs/delayed-inode.c
@@ -115,6 +115,7 @@ static struct btrfs_delayed_node *btrfs_get_delayed_node(struct inode *inode)
115 return NULL; 115 return NULL;
116} 116}
117 117
118/* Will return either the node or PTR_ERR(-ENOMEM) */
118static struct btrfs_delayed_node *btrfs_get_or_create_delayed_node( 119static struct btrfs_delayed_node *btrfs_get_or_create_delayed_node(
119 struct inode *inode) 120 struct inode *inode)
120{ 121{
@@ -836,10 +837,8 @@ static int btrfs_batch_insert_items(struct btrfs_trans_handle *trans,
836 btrfs_clear_path_blocking(path, NULL, 0); 837 btrfs_clear_path_blocking(path, NULL, 0);
837 838
838 /* insert the keys of the items */ 839 /* insert the keys of the items */
839 ret = setup_items_for_insert(trans, root, path, keys, data_size, 840 setup_items_for_insert(trans, root, path, keys, data_size,
840 total_data_size, total_size, nitems); 841 total_data_size, total_size, nitems);
841 if (ret)
842 goto error;
843 842
844 /* insert the dir index items */ 843 /* insert the dir index items */
845 slot = path->slots[0]; 844 slot = path->slots[0];
@@ -1108,16 +1107,25 @@ static int btrfs_update_delayed_inode(struct btrfs_trans_handle *trans,
1108 return 0; 1107 return 0;
1109} 1108}
1110 1109
1111/* Called when committing the transaction. */ 1110/*
1111 * Called when committing the transaction.
1112 * Returns 0 on success.
1113 * Returns < 0 on error and returns with an aborted transaction with any
1114 * outstanding delayed items cleaned up.
1115 */
1112int btrfs_run_delayed_items(struct btrfs_trans_handle *trans, 1116int btrfs_run_delayed_items(struct btrfs_trans_handle *trans,
1113 struct btrfs_root *root) 1117 struct btrfs_root *root)
1114{ 1118{
1119 struct btrfs_root *curr_root = root;
1115 struct btrfs_delayed_root *delayed_root; 1120 struct btrfs_delayed_root *delayed_root;
1116 struct btrfs_delayed_node *curr_node, *prev_node; 1121 struct btrfs_delayed_node *curr_node, *prev_node;
1117 struct btrfs_path *path; 1122 struct btrfs_path *path;
1118 struct btrfs_block_rsv *block_rsv; 1123 struct btrfs_block_rsv *block_rsv;
1119 int ret = 0; 1124 int ret = 0;
1120 1125
1126 if (trans->aborted)
1127 return -EIO;
1128
1121 path = btrfs_alloc_path(); 1129 path = btrfs_alloc_path();
1122 if (!path) 1130 if (!path)
1123 return -ENOMEM; 1131 return -ENOMEM;
@@ -1130,17 +1138,18 @@ int btrfs_run_delayed_items(struct btrfs_trans_handle *trans,
1130 1138
1131 curr_node = btrfs_first_delayed_node(delayed_root); 1139 curr_node = btrfs_first_delayed_node(delayed_root);
1132 while (curr_node) { 1140 while (curr_node) {
1133 root = curr_node->root; 1141 curr_root = curr_node->root;
1134 ret = btrfs_insert_delayed_items(trans, path, root, 1142 ret = btrfs_insert_delayed_items(trans, path, curr_root,
1135 curr_node); 1143 curr_node);
1136 if (!ret) 1144 if (!ret)
1137 ret = btrfs_delete_delayed_items(trans, path, root, 1145 ret = btrfs_delete_delayed_items(trans, path,
1138 curr_node); 1146 curr_root, curr_node);
1139 if (!ret) 1147 if (!ret)
1140 ret = btrfs_update_delayed_inode(trans, root, path, 1148 ret = btrfs_update_delayed_inode(trans, curr_root,
1141 curr_node); 1149 path, curr_node);
1142 if (ret) { 1150 if (ret) {
1143 btrfs_release_delayed_node(curr_node); 1151 btrfs_release_delayed_node(curr_node);
1152 btrfs_abort_transaction(trans, root, ret);
1144 break; 1153 break;
1145 } 1154 }
1146 1155
@@ -1151,6 +1160,7 @@ int btrfs_run_delayed_items(struct btrfs_trans_handle *trans,
1151 1160
1152 btrfs_free_path(path); 1161 btrfs_free_path(path);
1153 trans->block_rsv = block_rsv; 1162 trans->block_rsv = block_rsv;
1163
1154 return ret; 1164 return ret;
1155} 1165}
1156 1166
@@ -1371,6 +1381,7 @@ void btrfs_balance_delayed_items(struct btrfs_root *root)
1371 btrfs_wq_run_delayed_node(delayed_root, root, 0); 1381 btrfs_wq_run_delayed_node(delayed_root, root, 0);
1372} 1382}
1373 1383
1384/* Will return 0 or -ENOMEM */
1374int btrfs_insert_delayed_dir_index(struct btrfs_trans_handle *trans, 1385int btrfs_insert_delayed_dir_index(struct btrfs_trans_handle *trans,
1375 struct btrfs_root *root, const char *name, 1386 struct btrfs_root *root, const char *name,
1376 int name_len, struct inode *dir, 1387 int name_len, struct inode *dir,
diff --git a/fs/btrfs/delayed-ref.c b/fs/btrfs/delayed-ref.c
index 66e4f29505a3..69f22e3ab3bc 100644
--- a/fs/btrfs/delayed-ref.c
+++ b/fs/btrfs/delayed-ref.c
@@ -420,7 +420,7 @@ update_existing_head_ref(struct btrfs_delayed_ref_node *existing,
420 * this does all the dirty work in terms of maintaining the correct 420 * this does all the dirty work in terms of maintaining the correct
421 * overall modification count. 421 * overall modification count.
422 */ 422 */
423static noinline int add_delayed_ref_head(struct btrfs_fs_info *fs_info, 423static noinline void add_delayed_ref_head(struct btrfs_fs_info *fs_info,
424 struct btrfs_trans_handle *trans, 424 struct btrfs_trans_handle *trans,
425 struct btrfs_delayed_ref_node *ref, 425 struct btrfs_delayed_ref_node *ref,
426 u64 bytenr, u64 num_bytes, 426 u64 bytenr, u64 num_bytes,
@@ -487,20 +487,19 @@ static noinline int add_delayed_ref_head(struct btrfs_fs_info *fs_info,
487 * we've updated the existing ref, free the newly 487 * we've updated the existing ref, free the newly
488 * allocated ref 488 * allocated ref
489 */ 489 */
490 kfree(ref); 490 kfree(head_ref);
491 } else { 491 } else {
492 delayed_refs->num_heads++; 492 delayed_refs->num_heads++;
493 delayed_refs->num_heads_ready++; 493 delayed_refs->num_heads_ready++;
494 delayed_refs->num_entries++; 494 delayed_refs->num_entries++;
495 trans->delayed_ref_updates++; 495 trans->delayed_ref_updates++;
496 } 496 }
497 return 0;
498} 497}
499 498
500/* 499/*
501 * helper to insert a delayed tree ref into the rbtree. 500 * helper to insert a delayed tree ref into the rbtree.
502 */ 501 */
503static noinline int add_delayed_tree_ref(struct btrfs_fs_info *fs_info, 502static noinline void add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
504 struct btrfs_trans_handle *trans, 503 struct btrfs_trans_handle *trans,
505 struct btrfs_delayed_ref_node *ref, 504 struct btrfs_delayed_ref_node *ref,
506 u64 bytenr, u64 num_bytes, u64 parent, 505 u64 bytenr, u64 num_bytes, u64 parent,
@@ -549,18 +548,17 @@ static noinline int add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
549 * we've updated the existing ref, free the newly 548 * we've updated the existing ref, free the newly
550 * allocated ref 549 * allocated ref
551 */ 550 */
552 kfree(ref); 551 kfree(full_ref);
553 } else { 552 } else {
554 delayed_refs->num_entries++; 553 delayed_refs->num_entries++;
555 trans->delayed_ref_updates++; 554 trans->delayed_ref_updates++;
556 } 555 }
557 return 0;
558} 556}
559 557
560/* 558/*
561 * helper to insert a delayed data ref into the rbtree. 559 * helper to insert a delayed data ref into the rbtree.
562 */ 560 */
563static noinline int add_delayed_data_ref(struct btrfs_fs_info *fs_info, 561static noinline void add_delayed_data_ref(struct btrfs_fs_info *fs_info,
564 struct btrfs_trans_handle *trans, 562 struct btrfs_trans_handle *trans,
565 struct btrfs_delayed_ref_node *ref, 563 struct btrfs_delayed_ref_node *ref,
566 u64 bytenr, u64 num_bytes, u64 parent, 564 u64 bytenr, u64 num_bytes, u64 parent,
@@ -611,12 +609,11 @@ static noinline int add_delayed_data_ref(struct btrfs_fs_info *fs_info,
611 * we've updated the existing ref, free the newly 609 * we've updated the existing ref, free the newly
612 * allocated ref 610 * allocated ref
613 */ 611 */
614 kfree(ref); 612 kfree(full_ref);
615 } else { 613 } else {
616 delayed_refs->num_entries++; 614 delayed_refs->num_entries++;
617 trans->delayed_ref_updates++; 615 trans->delayed_ref_updates++;
618 } 616 }
619 return 0;
620} 617}
621 618
622/* 619/*
@@ -634,7 +631,6 @@ int btrfs_add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
634 struct btrfs_delayed_tree_ref *ref; 631 struct btrfs_delayed_tree_ref *ref;
635 struct btrfs_delayed_ref_head *head_ref; 632 struct btrfs_delayed_ref_head *head_ref;
636 struct btrfs_delayed_ref_root *delayed_refs; 633 struct btrfs_delayed_ref_root *delayed_refs;
637 int ret;
638 634
639 BUG_ON(extent_op && extent_op->is_data); 635 BUG_ON(extent_op && extent_op->is_data);
640 ref = kmalloc(sizeof(*ref), GFP_NOFS); 636 ref = kmalloc(sizeof(*ref), GFP_NOFS);
@@ -656,14 +652,12 @@ int btrfs_add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
656 * insert both the head node and the new ref without dropping 652 * insert both the head node and the new ref without dropping
657 * the spin lock 653 * the spin lock
658 */ 654 */
659 ret = add_delayed_ref_head(fs_info, trans, &head_ref->node, bytenr, 655 add_delayed_ref_head(fs_info, trans, &head_ref->node, bytenr,
660 num_bytes, action, 0); 656 num_bytes, action, 0);
661 BUG_ON(ret);
662 657
663 ret = add_delayed_tree_ref(fs_info, trans, &ref->node, bytenr, 658 add_delayed_tree_ref(fs_info, trans, &ref->node, bytenr,
664 num_bytes, parent, ref_root, level, action, 659 num_bytes, parent, ref_root, level, action,
665 for_cow); 660 for_cow);
666 BUG_ON(ret);
667 if (!need_ref_seq(for_cow, ref_root) && 661 if (!need_ref_seq(for_cow, ref_root) &&
668 waitqueue_active(&delayed_refs->seq_wait)) 662 waitqueue_active(&delayed_refs->seq_wait))
669 wake_up(&delayed_refs->seq_wait); 663 wake_up(&delayed_refs->seq_wait);
@@ -685,7 +679,6 @@ int btrfs_add_delayed_data_ref(struct btrfs_fs_info *fs_info,
685 struct btrfs_delayed_data_ref *ref; 679 struct btrfs_delayed_data_ref *ref;
686 struct btrfs_delayed_ref_head *head_ref; 680 struct btrfs_delayed_ref_head *head_ref;
687 struct btrfs_delayed_ref_root *delayed_refs; 681 struct btrfs_delayed_ref_root *delayed_refs;
688 int ret;
689 682
690 BUG_ON(extent_op && !extent_op->is_data); 683 BUG_ON(extent_op && !extent_op->is_data);
691 ref = kmalloc(sizeof(*ref), GFP_NOFS); 684 ref = kmalloc(sizeof(*ref), GFP_NOFS);
@@ -707,14 +700,12 @@ int btrfs_add_delayed_data_ref(struct btrfs_fs_info *fs_info,
707 * insert both the head node and the new ref without dropping 700 * insert both the head node and the new ref without dropping
708 * the spin lock 701 * the spin lock
709 */ 702 */
710 ret = add_delayed_ref_head(fs_info, trans, &head_ref->node, bytenr, 703 add_delayed_ref_head(fs_info, trans, &head_ref->node, bytenr,
711 num_bytes, action, 1); 704 num_bytes, action, 1);
712 BUG_ON(ret);
713 705
714 ret = add_delayed_data_ref(fs_info, trans, &ref->node, bytenr, 706 add_delayed_data_ref(fs_info, trans, &ref->node, bytenr,
715 num_bytes, parent, ref_root, owner, offset, 707 num_bytes, parent, ref_root, owner, offset,
716 action, for_cow); 708 action, for_cow);
717 BUG_ON(ret);
718 if (!need_ref_seq(for_cow, ref_root) && 709 if (!need_ref_seq(for_cow, ref_root) &&
719 waitqueue_active(&delayed_refs->seq_wait)) 710 waitqueue_active(&delayed_refs->seq_wait))
720 wake_up(&delayed_refs->seq_wait); 711 wake_up(&delayed_refs->seq_wait);
@@ -729,7 +720,6 @@ int btrfs_add_delayed_extent_op(struct btrfs_fs_info *fs_info,
729{ 720{
730 struct btrfs_delayed_ref_head *head_ref; 721 struct btrfs_delayed_ref_head *head_ref;
731 struct btrfs_delayed_ref_root *delayed_refs; 722 struct btrfs_delayed_ref_root *delayed_refs;
732 int ret;
733 723
734 head_ref = kmalloc(sizeof(*head_ref), GFP_NOFS); 724 head_ref = kmalloc(sizeof(*head_ref), GFP_NOFS);
735 if (!head_ref) 725 if (!head_ref)
@@ -740,10 +730,9 @@ int btrfs_add_delayed_extent_op(struct btrfs_fs_info *fs_info,
740 delayed_refs = &trans->transaction->delayed_refs; 730 delayed_refs = &trans->transaction->delayed_refs;
741 spin_lock(&delayed_refs->lock); 731 spin_lock(&delayed_refs->lock);
742 732
743 ret = add_delayed_ref_head(fs_info, trans, &head_ref->node, bytenr, 733 add_delayed_ref_head(fs_info, trans, &head_ref->node, bytenr,
744 num_bytes, BTRFS_UPDATE_DELAYED_HEAD, 734 num_bytes, BTRFS_UPDATE_DELAYED_HEAD,
745 extent_op->is_data); 735 extent_op->is_data);
746 BUG_ON(ret);
747 736
748 if (waitqueue_active(&delayed_refs->seq_wait)) 737 if (waitqueue_active(&delayed_refs->seq_wait))
749 wake_up(&delayed_refs->seq_wait); 738 wake_up(&delayed_refs->seq_wait);
diff --git a/fs/btrfs/dir-item.c b/fs/btrfs/dir-item.c
index 31d84e78129b..c1a074d0696f 100644
--- a/fs/btrfs/dir-item.c
+++ b/fs/btrfs/dir-item.c
@@ -49,9 +49,8 @@ static struct btrfs_dir_item *insert_with_overflow(struct btrfs_trans_handle
49 di = btrfs_match_dir_item_name(root, path, name, name_len); 49 di = btrfs_match_dir_item_name(root, path, name, name_len);
50 if (di) 50 if (di)
51 return ERR_PTR(-EEXIST); 51 return ERR_PTR(-EEXIST);
52 ret = btrfs_extend_item(trans, root, path, data_size); 52 btrfs_extend_item(trans, root, path, data_size);
53 } 53 } else if (ret < 0)
54 if (ret < 0)
55 return ERR_PTR(ret); 54 return ERR_PTR(ret);
56 WARN_ON(ret > 0); 55 WARN_ON(ret > 0);
57 leaf = path->nodes[0]; 56 leaf = path->nodes[0];
@@ -116,6 +115,7 @@ int btrfs_insert_xattr_item(struct btrfs_trans_handle *trans,
116 * 'location' is the key to stuff into the directory item, 'type' is the 115 * 'location' is the key to stuff into the directory item, 'type' is the
117 * type of the inode we're pointing to, and 'index' is the sequence number 116 * type of the inode we're pointing to, and 'index' is the sequence number
118 * to use for the second index (if one is created). 117 * to use for the second index (if one is created).
118 * Will return 0 or -ENOMEM
119 */ 119 */
120int btrfs_insert_dir_item(struct btrfs_trans_handle *trans, struct btrfs_root 120int btrfs_insert_dir_item(struct btrfs_trans_handle *trans, struct btrfs_root
121 *root, const char *name, int name_len, 121 *root, const char *name, int name_len,
@@ -383,8 +383,8 @@ int btrfs_delete_one_dir_name(struct btrfs_trans_handle *trans,
383 start = btrfs_item_ptr_offset(leaf, path->slots[0]); 383 start = btrfs_item_ptr_offset(leaf, path->slots[0]);
384 memmove_extent_buffer(leaf, ptr, ptr + sub_item_len, 384 memmove_extent_buffer(leaf, ptr, ptr + sub_item_len,
385 item_len - (ptr + sub_item_len - start)); 385 item_len - (ptr + sub_item_len - start));
386 ret = btrfs_truncate_item(trans, root, path, 386 btrfs_truncate_item(trans, root, path,
387 item_len - sub_item_len, 1); 387 item_len - sub_item_len, 1);
388 } 388 }
389 return ret; 389 return ret;
390} 390}
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 534266fe505f..20196f411206 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -48,20 +48,19 @@
48static struct extent_io_ops btree_extent_io_ops; 48static struct extent_io_ops btree_extent_io_ops;
49static void end_workqueue_fn(struct btrfs_work *work); 49static void end_workqueue_fn(struct btrfs_work *work);
50static void free_fs_root(struct btrfs_root *root); 50static void free_fs_root(struct btrfs_root *root);
51static void btrfs_check_super_valid(struct btrfs_fs_info *fs_info, 51static int btrfs_check_super_valid(struct btrfs_fs_info *fs_info,
52 int read_only); 52 int read_only);
53static int btrfs_destroy_ordered_operations(struct btrfs_root *root); 53static void btrfs_destroy_ordered_operations(struct btrfs_root *root);
54static int btrfs_destroy_ordered_extents(struct btrfs_root *root); 54static void btrfs_destroy_ordered_extents(struct btrfs_root *root);
55static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans, 55static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
56 struct btrfs_root *root); 56 struct btrfs_root *root);
57static int btrfs_destroy_pending_snapshots(struct btrfs_transaction *t); 57static void btrfs_destroy_pending_snapshots(struct btrfs_transaction *t);
58static int btrfs_destroy_delalloc_inodes(struct btrfs_root *root); 58static void btrfs_destroy_delalloc_inodes(struct btrfs_root *root);
59static int btrfs_destroy_marked_extents(struct btrfs_root *root, 59static int btrfs_destroy_marked_extents(struct btrfs_root *root,
60 struct extent_io_tree *dirty_pages, 60 struct extent_io_tree *dirty_pages,
61 int mark); 61 int mark);
62static int btrfs_destroy_pinned_extent(struct btrfs_root *root, 62static int btrfs_destroy_pinned_extent(struct btrfs_root *root,
63 struct extent_io_tree *pinned_extents); 63 struct extent_io_tree *pinned_extents);
64static int btrfs_cleanup_transaction(struct btrfs_root *root);
65 64
66/* 65/*
67 * end_io_wq structs are used to do processing in task context when an IO is 66 * end_io_wq structs are used to do processing in task context when an IO is
@@ -99,6 +98,7 @@ struct async_submit_bio {
99 */ 98 */
100 u64 bio_offset; 99 u64 bio_offset;
101 struct btrfs_work work; 100 struct btrfs_work work;
101 int error;
102}; 102};
103 103
104/* 104/*
@@ -332,8 +332,8 @@ static int verify_parent_transid(struct extent_io_tree *io_tree,
332 return 0; 332 return 0;
333 333
334 lock_extent_bits(io_tree, eb->start, eb->start + eb->len - 1, 334 lock_extent_bits(io_tree, eb->start, eb->start + eb->len - 1,
335 0, &cached_state, GFP_NOFS); 335 0, &cached_state);
336 if (extent_buffer_uptodate(io_tree, eb, cached_state) && 336 if (extent_buffer_uptodate(eb) &&
337 btrfs_header_generation(eb) == parent_transid) { 337 btrfs_header_generation(eb) == parent_transid) {
338 ret = 0; 338 ret = 0;
339 goto out; 339 goto out;
@@ -344,7 +344,7 @@ static int verify_parent_transid(struct extent_io_tree *io_tree,
344 (unsigned long long)parent_transid, 344 (unsigned long long)parent_transid,
345 (unsigned long long)btrfs_header_generation(eb)); 345 (unsigned long long)btrfs_header_generation(eb));
346 ret = 1; 346 ret = 1;
347 clear_extent_buffer_uptodate(io_tree, eb, &cached_state); 347 clear_extent_buffer_uptodate(eb);
348out: 348out:
349 unlock_extent_cached(io_tree, eb->start, eb->start + eb->len - 1, 349 unlock_extent_cached(io_tree, eb->start, eb->start + eb->len - 1,
350 &cached_state, GFP_NOFS); 350 &cached_state, GFP_NOFS);
@@ -360,9 +360,11 @@ static int btree_read_extent_buffer_pages(struct btrfs_root *root,
360 u64 start, u64 parent_transid) 360 u64 start, u64 parent_transid)
361{ 361{
362 struct extent_io_tree *io_tree; 362 struct extent_io_tree *io_tree;
363 int failed = 0;
363 int ret; 364 int ret;
364 int num_copies = 0; 365 int num_copies = 0;
365 int mirror_num = 0; 366 int mirror_num = 0;
367 int failed_mirror = 0;
366 368
367 clear_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags); 369 clear_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags);
368 io_tree = &BTRFS_I(root->fs_info->btree_inode)->io_tree; 370 io_tree = &BTRFS_I(root->fs_info->btree_inode)->io_tree;
@@ -370,9 +372,8 @@ static int btree_read_extent_buffer_pages(struct btrfs_root *root,
370 ret = read_extent_buffer_pages(io_tree, eb, start, 372 ret = read_extent_buffer_pages(io_tree, eb, start,
371 WAIT_COMPLETE, 373 WAIT_COMPLETE,
372 btree_get_extent, mirror_num); 374 btree_get_extent, mirror_num);
373 if (!ret && 375 if (!ret && !verify_parent_transid(io_tree, eb, parent_transid))
374 !verify_parent_transid(io_tree, eb, parent_transid)) 376 break;
375 return ret;
376 377
377 /* 378 /*
378 * This buffer's crc is fine, but its contents are corrupted, so 379 * This buffer's crc is fine, but its contents are corrupted, so
@@ -380,18 +381,31 @@ static int btree_read_extent_buffer_pages(struct btrfs_root *root,
380 * any less wrong. 381 * any less wrong.
381 */ 382 */
382 if (test_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags)) 383 if (test_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags))
383 return ret; 384 break;
385
386 if (!failed_mirror) {
387 failed = 1;
388 printk(KERN_ERR "failed mirror was %d\n", eb->failed_mirror);
389 failed_mirror = eb->failed_mirror;
390 }
384 391
385 num_copies = btrfs_num_copies(&root->fs_info->mapping_tree, 392 num_copies = btrfs_num_copies(&root->fs_info->mapping_tree,
386 eb->start, eb->len); 393 eb->start, eb->len);
387 if (num_copies == 1) 394 if (num_copies == 1)
388 return ret; 395 break;
389 396
390 mirror_num++; 397 mirror_num++;
398 if (mirror_num == failed_mirror)
399 mirror_num++;
400
391 if (mirror_num > num_copies) 401 if (mirror_num > num_copies)
392 return ret; 402 break;
393 } 403 }
394 return -EIO; 404
405 if (failed && !ret)
406 repair_eb_io_failure(root, eb, failed_mirror);
407
408 return ret;
395} 409}
396 410
397/* 411/*
@@ -404,50 +418,27 @@ static int csum_dirty_buffer(struct btrfs_root *root, struct page *page)
404 struct extent_io_tree *tree; 418 struct extent_io_tree *tree;
405 u64 start = (u64)page->index << PAGE_CACHE_SHIFT; 419 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
406 u64 found_start; 420 u64 found_start;
407 unsigned long len;
408 struct extent_buffer *eb; 421 struct extent_buffer *eb;
409 int ret;
410 422
411 tree = &BTRFS_I(page->mapping->host)->io_tree; 423 tree = &BTRFS_I(page->mapping->host)->io_tree;
412 424
413 if (page->private == EXTENT_PAGE_PRIVATE) { 425 eb = (struct extent_buffer *)page->private;
414 WARN_ON(1); 426 if (page != eb->pages[0])
415 goto out; 427 return 0;
416 }
417 if (!page->private) {
418 WARN_ON(1);
419 goto out;
420 }
421 len = page->private >> 2;
422 WARN_ON(len == 0);
423
424 eb = alloc_extent_buffer(tree, start, len, page);
425 if (eb == NULL) {
426 WARN_ON(1);
427 goto out;
428 }
429 ret = btree_read_extent_buffer_pages(root, eb, start + PAGE_CACHE_SIZE,
430 btrfs_header_generation(eb));
431 BUG_ON(ret);
432 WARN_ON(!btrfs_header_flag(eb, BTRFS_HEADER_FLAG_WRITTEN));
433
434 found_start = btrfs_header_bytenr(eb); 428 found_start = btrfs_header_bytenr(eb);
435 if (found_start != start) { 429 if (found_start != start) {
436 WARN_ON(1); 430 WARN_ON(1);
437 goto err; 431 return 0;
438 } 432 }
439 if (eb->first_page != page) { 433 if (eb->pages[0] != page) {
440 WARN_ON(1); 434 WARN_ON(1);
441 goto err; 435 return 0;
442 } 436 }
443 if (!PageUptodate(page)) { 437 if (!PageUptodate(page)) {
444 WARN_ON(1); 438 WARN_ON(1);
445 goto err; 439 return 0;
446 } 440 }
447 csum_tree_block(root, eb, 0); 441 csum_tree_block(root, eb, 0);
448err:
449 free_extent_buffer(eb);
450out:
451 return 0; 442 return 0;
452} 443}
453 444
@@ -537,34 +528,74 @@ static noinline int check_leaf(struct btrfs_root *root,
537 return 0; 528 return 0;
538} 529}
539 530
531struct extent_buffer *find_eb_for_page(struct extent_io_tree *tree,
532 struct page *page, int max_walk)
533{
534 struct extent_buffer *eb;
535 u64 start = page_offset(page);
536 u64 target = start;
537 u64 min_start;
538
539 if (start < max_walk)
540 min_start = 0;
541 else
542 min_start = start - max_walk;
543
544 while (start >= min_start) {
545 eb = find_extent_buffer(tree, start, 0);
546 if (eb) {
547 /*
548 * we found an extent buffer and it contains our page
549 * horray!
550 */
551 if (eb->start <= target &&
552 eb->start + eb->len > target)
553 return eb;
554
555 /* we found an extent buffer that wasn't for us */
556 free_extent_buffer(eb);
557 return NULL;
558 }
559 if (start == 0)
560 break;
561 start -= PAGE_CACHE_SIZE;
562 }
563 return NULL;
564}
565
540static int btree_readpage_end_io_hook(struct page *page, u64 start, u64 end, 566static int btree_readpage_end_io_hook(struct page *page, u64 start, u64 end,
541 struct extent_state *state) 567 struct extent_state *state)
542{ 568{
543 struct extent_io_tree *tree; 569 struct extent_io_tree *tree;
544 u64 found_start; 570 u64 found_start;
545 int found_level; 571 int found_level;
546 unsigned long len;
547 struct extent_buffer *eb; 572 struct extent_buffer *eb;
548 struct btrfs_root *root = BTRFS_I(page->mapping->host)->root; 573 struct btrfs_root *root = BTRFS_I(page->mapping->host)->root;
549 int ret = 0; 574 int ret = 0;
575 int reads_done;
550 576
551 tree = &BTRFS_I(page->mapping->host)->io_tree;
552 if (page->private == EXTENT_PAGE_PRIVATE)
553 goto out;
554 if (!page->private) 577 if (!page->private)
555 goto out; 578 goto out;
556 579
557 len = page->private >> 2; 580 tree = &BTRFS_I(page->mapping->host)->io_tree;
558 WARN_ON(len == 0); 581 eb = (struct extent_buffer *)page->private;
559 582
560 eb = alloc_extent_buffer(tree, start, len, page); 583 /* the pending IO might have been the only thing that kept this buffer
561 if (eb == NULL) { 584 * in memory. Make sure we have a ref for all this other checks
585 */
586 extent_buffer_get(eb);
587
588 reads_done = atomic_dec_and_test(&eb->io_pages);
589 if (!reads_done)
590 goto err;
591
592 if (test_bit(EXTENT_BUFFER_IOERR, &eb->bflags)) {
562 ret = -EIO; 593 ret = -EIO;
563 goto out; 594 goto err;
564 } 595 }
565 596
566 found_start = btrfs_header_bytenr(eb); 597 found_start = btrfs_header_bytenr(eb);
567 if (found_start != start) { 598 if (found_start != eb->start) {
568 printk_ratelimited(KERN_INFO "btrfs bad tree block start " 599 printk_ratelimited(KERN_INFO "btrfs bad tree block start "
569 "%llu %llu\n", 600 "%llu %llu\n",
570 (unsigned long long)found_start, 601 (unsigned long long)found_start,
@@ -572,13 +603,6 @@ static int btree_readpage_end_io_hook(struct page *page, u64 start, u64 end,
572 ret = -EIO; 603 ret = -EIO;
573 goto err; 604 goto err;
574 } 605 }
575 if (eb->first_page != page) {
576 printk(KERN_INFO "btrfs bad first page %lu %lu\n",
577 eb->first_page->index, page->index);
578 WARN_ON(1);
579 ret = -EIO;
580 goto err;
581 }
582 if (check_tree_block_fsid(root, eb)) { 606 if (check_tree_block_fsid(root, eb)) {
583 printk_ratelimited(KERN_INFO "btrfs bad fsid on block %llu\n", 607 printk_ratelimited(KERN_INFO "btrfs bad fsid on block %llu\n",
584 (unsigned long long)eb->start); 608 (unsigned long long)eb->start);
@@ -606,48 +630,31 @@ static int btree_readpage_end_io_hook(struct page *page, u64 start, u64 end,
606 ret = -EIO; 630 ret = -EIO;
607 } 631 }
608 632
609 end = min_t(u64, eb->len, PAGE_CACHE_SIZE); 633 if (!ret)
610 end = eb->start + end - 1; 634 set_extent_buffer_uptodate(eb);
611err: 635err:
612 if (test_bit(EXTENT_BUFFER_READAHEAD, &eb->bflags)) { 636 if (test_bit(EXTENT_BUFFER_READAHEAD, &eb->bflags)) {
613 clear_bit(EXTENT_BUFFER_READAHEAD, &eb->bflags); 637 clear_bit(EXTENT_BUFFER_READAHEAD, &eb->bflags);
614 btree_readahead_hook(root, eb, eb->start, ret); 638 btree_readahead_hook(root, eb, eb->start, ret);
615 } 639 }
616 640
641 if (ret)
642 clear_extent_buffer_uptodate(eb);
617 free_extent_buffer(eb); 643 free_extent_buffer(eb);
618out: 644out:
619 return ret; 645 return ret;
620} 646}
621 647
622static int btree_io_failed_hook(struct bio *failed_bio, 648static int btree_io_failed_hook(struct page *page, int failed_mirror)
623 struct page *page, u64 start, u64 end,
624 int mirror_num, struct extent_state *state)
625{ 649{
626 struct extent_io_tree *tree;
627 unsigned long len;
628 struct extent_buffer *eb; 650 struct extent_buffer *eb;
629 struct btrfs_root *root = BTRFS_I(page->mapping->host)->root; 651 struct btrfs_root *root = BTRFS_I(page->mapping->host)->root;
630 652
631 tree = &BTRFS_I(page->mapping->host)->io_tree; 653 eb = (struct extent_buffer *)page->private;
632 if (page->private == EXTENT_PAGE_PRIVATE) 654 set_bit(EXTENT_BUFFER_IOERR, &eb->bflags);
633 goto out; 655 eb->failed_mirror = failed_mirror;
634 if (!page->private) 656 if (test_and_clear_bit(EXTENT_BUFFER_READAHEAD, &eb->bflags))
635 goto out;
636
637 len = page->private >> 2;
638 WARN_ON(len == 0);
639
640 eb = alloc_extent_buffer(tree, start, len, page);
641 if (eb == NULL)
642 goto out;
643
644 if (test_bit(EXTENT_BUFFER_READAHEAD, &eb->bflags)) {
645 clear_bit(EXTENT_BUFFER_READAHEAD, &eb->bflags);
646 btree_readahead_hook(root, eb, eb->start, -EIO); 657 btree_readahead_hook(root, eb, eb->start, -EIO);
647 }
648 free_extent_buffer(eb);
649
650out:
651 return -EIO; /* we fixed nothing */ 658 return -EIO; /* we fixed nothing */
652} 659}
653 660
@@ -719,11 +726,14 @@ unsigned long btrfs_async_submit_limit(struct btrfs_fs_info *info)
719static void run_one_async_start(struct btrfs_work *work) 726static void run_one_async_start(struct btrfs_work *work)
720{ 727{
721 struct async_submit_bio *async; 728 struct async_submit_bio *async;
729 int ret;
722 730
723 async = container_of(work, struct async_submit_bio, work); 731 async = container_of(work, struct async_submit_bio, work);
724 async->submit_bio_start(async->inode, async->rw, async->bio, 732 ret = async->submit_bio_start(async->inode, async->rw, async->bio,
725 async->mirror_num, async->bio_flags, 733 async->mirror_num, async->bio_flags,
726 async->bio_offset); 734 async->bio_offset);
735 if (ret)
736 async->error = ret;
727} 737}
728 738
729static void run_one_async_done(struct btrfs_work *work) 739static void run_one_async_done(struct btrfs_work *work)
@@ -744,6 +754,12 @@ static void run_one_async_done(struct btrfs_work *work)
744 waitqueue_active(&fs_info->async_submit_wait)) 754 waitqueue_active(&fs_info->async_submit_wait))
745 wake_up(&fs_info->async_submit_wait); 755 wake_up(&fs_info->async_submit_wait);
746 756
757 /* If an error occured we just want to clean up the bio and move on */
758 if (async->error) {
759 bio_endio(async->bio, async->error);
760 return;
761 }
762
747 async->submit_bio_done(async->inode, async->rw, async->bio, 763 async->submit_bio_done(async->inode, async->rw, async->bio,
748 async->mirror_num, async->bio_flags, 764 async->mirror_num, async->bio_flags,
749 async->bio_offset); 765 async->bio_offset);
@@ -785,6 +801,8 @@ int btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct inode *inode,
785 async->bio_flags = bio_flags; 801 async->bio_flags = bio_flags;
786 async->bio_offset = bio_offset; 802 async->bio_offset = bio_offset;
787 803
804 async->error = 0;
805
788 atomic_inc(&fs_info->nr_async_submits); 806 atomic_inc(&fs_info->nr_async_submits);
789 807
790 if (rw & REQ_SYNC) 808 if (rw & REQ_SYNC)
@@ -806,15 +824,18 @@ static int btree_csum_one_bio(struct bio *bio)
806 struct bio_vec *bvec = bio->bi_io_vec; 824 struct bio_vec *bvec = bio->bi_io_vec;
807 int bio_index = 0; 825 int bio_index = 0;
808 struct btrfs_root *root; 826 struct btrfs_root *root;
827 int ret = 0;
809 828
810 WARN_ON(bio->bi_vcnt <= 0); 829 WARN_ON(bio->bi_vcnt <= 0);
811 while (bio_index < bio->bi_vcnt) { 830 while (bio_index < bio->bi_vcnt) {
812 root = BTRFS_I(bvec->bv_page->mapping->host)->root; 831 root = BTRFS_I(bvec->bv_page->mapping->host)->root;
813 csum_dirty_buffer(root, bvec->bv_page); 832 ret = csum_dirty_buffer(root, bvec->bv_page);
833 if (ret)
834 break;
814 bio_index++; 835 bio_index++;
815 bvec++; 836 bvec++;
816 } 837 }
817 return 0; 838 return ret;
818} 839}
819 840
820static int __btree_submit_bio_start(struct inode *inode, int rw, 841static int __btree_submit_bio_start(struct inode *inode, int rw,
@@ -826,8 +847,7 @@ static int __btree_submit_bio_start(struct inode *inode, int rw,
826 * when we're called for a write, we're already in the async 847 * when we're called for a write, we're already in the async
827 * submission context. Just jump into btrfs_map_bio 848 * submission context. Just jump into btrfs_map_bio
828 */ 849 */
829 btree_csum_one_bio(bio); 850 return btree_csum_one_bio(bio);
830 return 0;
831} 851}
832 852
833static int __btree_submit_bio_done(struct inode *inode, int rw, struct bio *bio, 853static int __btree_submit_bio_done(struct inode *inode, int rw, struct bio *bio,
@@ -847,15 +867,16 @@ static int btree_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
847{ 867{
848 int ret; 868 int ret;
849 869
850 ret = btrfs_bio_wq_end_io(BTRFS_I(inode)->root->fs_info,
851 bio, 1);
852 BUG_ON(ret);
853
854 if (!(rw & REQ_WRITE)) { 870 if (!(rw & REQ_WRITE)) {
871
855 /* 872 /*
856 * called for a read, do the setup so that checksum validation 873 * called for a read, do the setup so that checksum validation
857 * can happen in the async kernel threads 874 * can happen in the async kernel threads
858 */ 875 */
876 ret = btrfs_bio_wq_end_io(BTRFS_I(inode)->root->fs_info,
877 bio, 1);
878 if (ret)
879 return ret;
859 return btrfs_map_bio(BTRFS_I(inode)->root, rw, bio, 880 return btrfs_map_bio(BTRFS_I(inode)->root, rw, bio,
860 mirror_num, 0); 881 mirror_num, 0);
861 } 882 }
@@ -893,34 +914,6 @@ static int btree_migratepage(struct address_space *mapping,
893} 914}
894#endif 915#endif
895 916
896static int btree_writepage(struct page *page, struct writeback_control *wbc)
897{
898 struct extent_io_tree *tree;
899 struct btrfs_root *root = BTRFS_I(page->mapping->host)->root;
900 struct extent_buffer *eb;
901 int was_dirty;
902
903 tree = &BTRFS_I(page->mapping->host)->io_tree;
904 if (!(current->flags & PF_MEMALLOC)) {
905 return extent_write_full_page(tree, page,
906 btree_get_extent, wbc);
907 }
908
909 redirty_page_for_writepage(wbc, page);
910 eb = btrfs_find_tree_block(root, page_offset(page), PAGE_CACHE_SIZE);
911 WARN_ON(!eb);
912
913 was_dirty = test_and_set_bit(EXTENT_BUFFER_DIRTY, &eb->bflags);
914 if (!was_dirty) {
915 spin_lock(&root->fs_info->delalloc_lock);
916 root->fs_info->dirty_metadata_bytes += PAGE_CACHE_SIZE;
917 spin_unlock(&root->fs_info->delalloc_lock);
918 }
919 free_extent_buffer(eb);
920
921 unlock_page(page);
922 return 0;
923}
924 917
925static int btree_writepages(struct address_space *mapping, 918static int btree_writepages(struct address_space *mapping,
926 struct writeback_control *wbc) 919 struct writeback_control *wbc)
@@ -940,7 +933,7 @@ static int btree_writepages(struct address_space *mapping,
940 if (num_dirty < thresh) 933 if (num_dirty < thresh)
941 return 0; 934 return 0;
942 } 935 }
943 return extent_writepages(tree, mapping, btree_get_extent, wbc); 936 return btree_write_cache_pages(mapping, wbc);
944} 937}
945 938
946static int btree_readpage(struct file *file, struct page *page) 939static int btree_readpage(struct file *file, struct page *page)
@@ -952,16 +945,8 @@ static int btree_readpage(struct file *file, struct page *page)
952 945
953static int btree_releasepage(struct page *page, gfp_t gfp_flags) 946static int btree_releasepage(struct page *page, gfp_t gfp_flags)
954{ 947{
955 struct extent_io_tree *tree;
956 struct extent_map_tree *map;
957 int ret;
958
959 if (PageWriteback(page) || PageDirty(page)) 948 if (PageWriteback(page) || PageDirty(page))
960 return 0; 949 return 0;
961
962 tree = &BTRFS_I(page->mapping->host)->io_tree;
963 map = &BTRFS_I(page->mapping->host)->extent_tree;
964
965 /* 950 /*
966 * We need to mask out eg. __GFP_HIGHMEM and __GFP_DMA32 as we're doing 951 * We need to mask out eg. __GFP_HIGHMEM and __GFP_DMA32 as we're doing
967 * slab allocation from alloc_extent_state down the callchain where 952 * slab allocation from alloc_extent_state down the callchain where
@@ -969,18 +954,7 @@ static int btree_releasepage(struct page *page, gfp_t gfp_flags)
969 */ 954 */
970 gfp_flags &= ~GFP_SLAB_BUG_MASK; 955 gfp_flags &= ~GFP_SLAB_BUG_MASK;
971 956
972 ret = try_release_extent_state(map, tree, page, gfp_flags); 957 return try_release_extent_buffer(page, gfp_flags);
973 if (!ret)
974 return 0;
975
976 ret = try_release_extent_buffer(tree, page);
977 if (ret == 1) {
978 ClearPagePrivate(page);
979 set_page_private(page, 0);
980 page_cache_release(page);
981 }
982
983 return ret;
984} 958}
985 959
986static void btree_invalidatepage(struct page *page, unsigned long offset) 960static void btree_invalidatepage(struct page *page, unsigned long offset)
@@ -998,15 +972,28 @@ static void btree_invalidatepage(struct page *page, unsigned long offset)
998 } 972 }
999} 973}
1000 974
975static int btree_set_page_dirty(struct page *page)
976{
977 struct extent_buffer *eb;
978
979 BUG_ON(!PagePrivate(page));
980 eb = (struct extent_buffer *)page->private;
981 BUG_ON(!eb);
982 BUG_ON(!test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
983 BUG_ON(!atomic_read(&eb->refs));
984 btrfs_assert_tree_locked(eb);
985 return __set_page_dirty_nobuffers(page);
986}
987
1001static const struct address_space_operations btree_aops = { 988static const struct address_space_operations btree_aops = {
1002 .readpage = btree_readpage, 989 .readpage = btree_readpage,
1003 .writepage = btree_writepage,
1004 .writepages = btree_writepages, 990 .writepages = btree_writepages,
1005 .releasepage = btree_releasepage, 991 .releasepage = btree_releasepage,
1006 .invalidatepage = btree_invalidatepage, 992 .invalidatepage = btree_invalidatepage,
1007#ifdef CONFIG_MIGRATION 993#ifdef CONFIG_MIGRATION
1008 .migratepage = btree_migratepage, 994 .migratepage = btree_migratepage,
1009#endif 995#endif
996 .set_page_dirty = btree_set_page_dirty,
1010}; 997};
1011 998
1012int readahead_tree_block(struct btrfs_root *root, u64 bytenr, u32 blocksize, 999int readahead_tree_block(struct btrfs_root *root, u64 bytenr, u32 blocksize,
@@ -1049,7 +1036,7 @@ int reada_tree_block_flagged(struct btrfs_root *root, u64 bytenr, u32 blocksize,
1049 if (test_bit(EXTENT_BUFFER_CORRUPT, &buf->bflags)) { 1036 if (test_bit(EXTENT_BUFFER_CORRUPT, &buf->bflags)) {
1050 free_extent_buffer(buf); 1037 free_extent_buffer(buf);
1051 return -EIO; 1038 return -EIO;
1052 } else if (extent_buffer_uptodate(io_tree, buf, NULL)) { 1039 } else if (extent_buffer_uptodate(buf)) {
1053 *eb = buf; 1040 *eb = buf;
1054 } else { 1041 } else {
1055 free_extent_buffer(buf); 1042 free_extent_buffer(buf);
@@ -1074,20 +1061,20 @@ struct extent_buffer *btrfs_find_create_tree_block(struct btrfs_root *root,
1074 struct extent_buffer *eb; 1061 struct extent_buffer *eb;
1075 1062
1076 eb = alloc_extent_buffer(&BTRFS_I(btree_inode)->io_tree, 1063 eb = alloc_extent_buffer(&BTRFS_I(btree_inode)->io_tree,
1077 bytenr, blocksize, NULL); 1064 bytenr, blocksize);
1078 return eb; 1065 return eb;
1079} 1066}
1080 1067
1081 1068
1082int btrfs_write_tree_block(struct extent_buffer *buf) 1069int btrfs_write_tree_block(struct extent_buffer *buf)
1083{ 1070{
1084 return filemap_fdatawrite_range(buf->first_page->mapping, buf->start, 1071 return filemap_fdatawrite_range(buf->pages[0]->mapping, buf->start,
1085 buf->start + buf->len - 1); 1072 buf->start + buf->len - 1);
1086} 1073}
1087 1074
1088int btrfs_wait_tree_block_writeback(struct extent_buffer *buf) 1075int btrfs_wait_tree_block_writeback(struct extent_buffer *buf)
1089{ 1076{
1090 return filemap_fdatawait_range(buf->first_page->mapping, 1077 return filemap_fdatawait_range(buf->pages[0]->mapping,
1091 buf->start, buf->start + buf->len - 1); 1078 buf->start, buf->start + buf->len - 1);
1092} 1079}
1093 1080
@@ -1102,17 +1089,13 @@ struct extent_buffer *read_tree_block(struct btrfs_root *root, u64 bytenr,
1102 return NULL; 1089 return NULL;
1103 1090
1104 ret = btree_read_extent_buffer_pages(root, buf, 0, parent_transid); 1091 ret = btree_read_extent_buffer_pages(root, buf, 0, parent_transid);
1105
1106 if (ret == 0)
1107 set_bit(EXTENT_BUFFER_UPTODATE, &buf->bflags);
1108 return buf; 1092 return buf;
1109 1093
1110} 1094}
1111 1095
1112int clean_tree_block(struct btrfs_trans_handle *trans, struct btrfs_root *root, 1096void clean_tree_block(struct btrfs_trans_handle *trans, struct btrfs_root *root,
1113 struct extent_buffer *buf) 1097 struct extent_buffer *buf)
1114{ 1098{
1115 struct inode *btree_inode = root->fs_info->btree_inode;
1116 if (btrfs_header_generation(buf) == 1099 if (btrfs_header_generation(buf) ==
1117 root->fs_info->running_transaction->transid) { 1100 root->fs_info->running_transaction->transid) {
1118 btrfs_assert_tree_locked(buf); 1101 btrfs_assert_tree_locked(buf);
@@ -1121,23 +1104,27 @@ int clean_tree_block(struct btrfs_trans_handle *trans, struct btrfs_root *root,
1121 spin_lock(&root->fs_info->delalloc_lock); 1104 spin_lock(&root->fs_info->delalloc_lock);
1122 if (root->fs_info->dirty_metadata_bytes >= buf->len) 1105 if (root->fs_info->dirty_metadata_bytes >= buf->len)
1123 root->fs_info->dirty_metadata_bytes -= buf->len; 1106 root->fs_info->dirty_metadata_bytes -= buf->len;
1124 else 1107 else {
1125 WARN_ON(1); 1108 spin_unlock(&root->fs_info->delalloc_lock);
1109 btrfs_panic(root->fs_info, -EOVERFLOW,
1110 "Can't clear %lu bytes from "
1111 " dirty_mdatadata_bytes (%lu)",
1112 buf->len,
1113 root->fs_info->dirty_metadata_bytes);
1114 }
1126 spin_unlock(&root->fs_info->delalloc_lock); 1115 spin_unlock(&root->fs_info->delalloc_lock);
1127 } 1116 }
1128 1117
1129 /* ugh, clear_extent_buffer_dirty needs to lock the page */ 1118 /* ugh, clear_extent_buffer_dirty needs to lock the page */
1130 btrfs_set_lock_blocking(buf); 1119 btrfs_set_lock_blocking(buf);
1131 clear_extent_buffer_dirty(&BTRFS_I(btree_inode)->io_tree, 1120 clear_extent_buffer_dirty(buf);
1132 buf);
1133 } 1121 }
1134 return 0;
1135} 1122}
1136 1123
1137static int __setup_root(u32 nodesize, u32 leafsize, u32 sectorsize, 1124static void __setup_root(u32 nodesize, u32 leafsize, u32 sectorsize,
1138 u32 stripesize, struct btrfs_root *root, 1125 u32 stripesize, struct btrfs_root *root,
1139 struct btrfs_fs_info *fs_info, 1126 struct btrfs_fs_info *fs_info,
1140 u64 objectid) 1127 u64 objectid)
1141{ 1128{
1142 root->node = NULL; 1129 root->node = NULL;
1143 root->commit_root = NULL; 1130 root->commit_root = NULL;
@@ -1189,13 +1176,12 @@ static int __setup_root(u32 nodesize, u32 leafsize, u32 sectorsize,
1189 root->defrag_running = 0; 1176 root->defrag_running = 0;
1190 root->root_key.objectid = objectid; 1177 root->root_key.objectid = objectid;
1191 root->anon_dev = 0; 1178 root->anon_dev = 0;
1192 return 0;
1193} 1179}
1194 1180
1195static int find_and_setup_root(struct btrfs_root *tree_root, 1181static int __must_check find_and_setup_root(struct btrfs_root *tree_root,
1196 struct btrfs_fs_info *fs_info, 1182 struct btrfs_fs_info *fs_info,
1197 u64 objectid, 1183 u64 objectid,
1198 struct btrfs_root *root) 1184 struct btrfs_root *root)
1199{ 1185{
1200 int ret; 1186 int ret;
1201 u32 blocksize; 1187 u32 blocksize;
@@ -1208,7 +1194,8 @@ static int find_and_setup_root(struct btrfs_root *tree_root,
1208 &root->root_item, &root->root_key); 1194 &root->root_item, &root->root_key);
1209 if (ret > 0) 1195 if (ret > 0)
1210 return -ENOENT; 1196 return -ENOENT;
1211 BUG_ON(ret); 1197 else if (ret < 0)
1198 return ret;
1212 1199
1213 generation = btrfs_root_generation(&root->root_item); 1200 generation = btrfs_root_generation(&root->root_item);
1214 blocksize = btrfs_level_size(root, btrfs_root_level(&root->root_item)); 1201 blocksize = btrfs_level_size(root, btrfs_root_level(&root->root_item));
@@ -1377,7 +1364,7 @@ struct btrfs_root *btrfs_read_fs_root_no_radix(struct btrfs_root *tree_root,
1377 root->node = read_tree_block(root, btrfs_root_bytenr(&root->root_item), 1364 root->node = read_tree_block(root, btrfs_root_bytenr(&root->root_item),
1378 blocksize, generation); 1365 blocksize, generation);
1379 root->commit_root = btrfs_root_node(root); 1366 root->commit_root = btrfs_root_node(root);
1380 BUG_ON(!root->node); 1367 BUG_ON(!root->node); /* -ENOMEM */
1381out: 1368out:
1382 if (location->objectid != BTRFS_TREE_LOG_OBJECTID) { 1369 if (location->objectid != BTRFS_TREE_LOG_OBJECTID) {
1383 root->ref_cows = 1; 1370 root->ref_cows = 1;
@@ -1513,41 +1500,6 @@ static int setup_bdi(struct btrfs_fs_info *info, struct backing_dev_info *bdi)
1513 return 0; 1500 return 0;
1514} 1501}
1515 1502
1516static int bio_ready_for_csum(struct bio *bio)
1517{
1518 u64 length = 0;
1519 u64 buf_len = 0;
1520 u64 start = 0;
1521 struct page *page;
1522 struct extent_io_tree *io_tree = NULL;
1523 struct bio_vec *bvec;
1524 int i;
1525 int ret;
1526
1527 bio_for_each_segment(bvec, bio, i) {
1528 page = bvec->bv_page;
1529 if (page->private == EXTENT_PAGE_PRIVATE) {
1530 length += bvec->bv_len;
1531 continue;
1532 }
1533 if (!page->private) {
1534 length += bvec->bv_len;
1535 continue;
1536 }
1537 length = bvec->bv_len;
1538 buf_len = page->private >> 2;
1539 start = page_offset(page) + bvec->bv_offset;
1540 io_tree = &BTRFS_I(page->mapping->host)->io_tree;
1541 }
1542 /* are we fully contained in this bio? */
1543 if (buf_len <= length)
1544 return 1;
1545
1546 ret = extent_range_uptodate(io_tree, start + length,
1547 start + buf_len - 1);
1548 return ret;
1549}
1550
1551/* 1503/*
1552 * called by the kthread helper functions to finally call the bio end_io 1504 * called by the kthread helper functions to finally call the bio end_io
1553 * functions. This is where read checksum verification actually happens 1505 * functions. This is where read checksum verification actually happens
@@ -1563,17 +1515,6 @@ static void end_workqueue_fn(struct btrfs_work *work)
1563 bio = end_io_wq->bio; 1515 bio = end_io_wq->bio;
1564 fs_info = end_io_wq->info; 1516 fs_info = end_io_wq->info;
1565 1517
1566 /* metadata bio reads are special because the whole tree block must
1567 * be checksummed at once. This makes sure the entire block is in
1568 * ram and up to date before trying to verify things. For
1569 * blocksize <= pagesize, it is basically a noop
1570 */
1571 if (!(bio->bi_rw & REQ_WRITE) && end_io_wq->metadata &&
1572 !bio_ready_for_csum(bio)) {
1573 btrfs_queue_worker(&fs_info->endio_meta_workers,
1574 &end_io_wq->work);
1575 return;
1576 }
1577 error = end_io_wq->error; 1518 error = end_io_wq->error;
1578 bio->bi_private = end_io_wq->private; 1519 bio->bi_private = end_io_wq->private;
1579 bio->bi_end_io = end_io_wq->end_io; 1520 bio->bi_end_io = end_io_wq->end_io;
@@ -1614,9 +1555,10 @@ static int transaction_kthread(void *arg)
1614 u64 transid; 1555 u64 transid;
1615 unsigned long now; 1556 unsigned long now;
1616 unsigned long delay; 1557 unsigned long delay;
1617 int ret; 1558 bool cannot_commit;
1618 1559
1619 do { 1560 do {
1561 cannot_commit = false;
1620 delay = HZ * 30; 1562 delay = HZ * 30;
1621 vfs_check_frozen(root->fs_info->sb, SB_FREEZE_WRITE); 1563 vfs_check_frozen(root->fs_info->sb, SB_FREEZE_WRITE);
1622 mutex_lock(&root->fs_info->transaction_kthread_mutex); 1564 mutex_lock(&root->fs_info->transaction_kthread_mutex);
@@ -1638,11 +1580,14 @@ static int transaction_kthread(void *arg)
1638 transid = cur->transid; 1580 transid = cur->transid;
1639 spin_unlock(&root->fs_info->trans_lock); 1581 spin_unlock(&root->fs_info->trans_lock);
1640 1582
1583 /* If the file system is aborted, this will always fail. */
1641 trans = btrfs_join_transaction(root); 1584 trans = btrfs_join_transaction(root);
1642 BUG_ON(IS_ERR(trans)); 1585 if (IS_ERR(trans)) {
1586 cannot_commit = true;
1587 goto sleep;
1588 }
1643 if (transid == trans->transid) { 1589 if (transid == trans->transid) {
1644 ret = btrfs_commit_transaction(trans, root); 1590 btrfs_commit_transaction(trans, root);
1645 BUG_ON(ret);
1646 } else { 1591 } else {
1647 btrfs_end_transaction(trans, root); 1592 btrfs_end_transaction(trans, root);
1648 } 1593 }
@@ -1653,7 +1598,8 @@ sleep:
1653 if (!try_to_freeze()) { 1598 if (!try_to_freeze()) {
1654 set_current_state(TASK_INTERRUPTIBLE); 1599 set_current_state(TASK_INTERRUPTIBLE);
1655 if (!kthread_should_stop() && 1600 if (!kthread_should_stop() &&
1656 !btrfs_transaction_blocked(root->fs_info)) 1601 (!btrfs_transaction_blocked(root->fs_info) ||
1602 cannot_commit))
1657 schedule_timeout(delay); 1603 schedule_timeout(delay);
1658 __set_current_state(TASK_RUNNING); 1604 __set_current_state(TASK_RUNNING);
1659 } 1605 }
@@ -2042,6 +1988,7 @@ int open_ctree(struct super_block *sb,
2042 RB_CLEAR_NODE(&BTRFS_I(fs_info->btree_inode)->rb_node); 1988 RB_CLEAR_NODE(&BTRFS_I(fs_info->btree_inode)->rb_node);
2043 extent_io_tree_init(&BTRFS_I(fs_info->btree_inode)->io_tree, 1989 extent_io_tree_init(&BTRFS_I(fs_info->btree_inode)->io_tree,
2044 fs_info->btree_inode->i_mapping); 1990 fs_info->btree_inode->i_mapping);
1991 BTRFS_I(fs_info->btree_inode)->io_tree.track_uptodate = 0;
2045 extent_map_tree_init(&BTRFS_I(fs_info->btree_inode)->extent_tree); 1992 extent_map_tree_init(&BTRFS_I(fs_info->btree_inode)->extent_tree);
2046 1993
2047 BTRFS_I(fs_info->btree_inode)->io_tree.ops = &btree_extent_io_ops; 1994 BTRFS_I(fs_info->btree_inode)->io_tree.ops = &btree_extent_io_ops;
@@ -2084,6 +2031,7 @@ int open_ctree(struct super_block *sb,
2084 __setup_root(4096, 4096, 4096, 4096, tree_root, 2031 __setup_root(4096, 4096, 4096, 4096, tree_root,
2085 fs_info, BTRFS_ROOT_TREE_OBJECTID); 2032 fs_info, BTRFS_ROOT_TREE_OBJECTID);
2086 2033
2034 invalidate_bdev(fs_devices->latest_bdev);
2087 bh = btrfs_read_dev_super(fs_devices->latest_bdev); 2035 bh = btrfs_read_dev_super(fs_devices->latest_bdev);
2088 if (!bh) { 2036 if (!bh) {
2089 err = -EINVAL; 2037 err = -EINVAL;
@@ -2104,7 +2052,12 @@ int open_ctree(struct super_block *sb,
2104 /* check FS state, whether FS is broken. */ 2052 /* check FS state, whether FS is broken. */
2105 fs_info->fs_state |= btrfs_super_flags(disk_super); 2053 fs_info->fs_state |= btrfs_super_flags(disk_super);
2106 2054
2107 btrfs_check_super_valid(fs_info, sb->s_flags & MS_RDONLY); 2055 ret = btrfs_check_super_valid(fs_info, sb->s_flags & MS_RDONLY);
2056 if (ret) {
2057 printk(KERN_ERR "btrfs: superblock contains fatal errors\n");
2058 err = ret;
2059 goto fail_alloc;
2060 }
2108 2061
2109 /* 2062 /*
2110 * run through our array of backup supers and setup 2063 * run through our array of backup supers and setup
@@ -2135,10 +2088,55 @@ int open_ctree(struct super_block *sb,
2135 goto fail_alloc; 2088 goto fail_alloc;
2136 } 2089 }
2137 2090
2091 if (btrfs_super_leafsize(disk_super) !=
2092 btrfs_super_nodesize(disk_super)) {
2093 printk(KERN_ERR "BTRFS: couldn't mount because metadata "
2094 "blocksizes don't match. node %d leaf %d\n",
2095 btrfs_super_nodesize(disk_super),
2096 btrfs_super_leafsize(disk_super));
2097 err = -EINVAL;
2098 goto fail_alloc;
2099 }
2100 if (btrfs_super_leafsize(disk_super) > BTRFS_MAX_METADATA_BLOCKSIZE) {
2101 printk(KERN_ERR "BTRFS: couldn't mount because metadata "
2102 "blocksize (%d) was too large\n",
2103 btrfs_super_leafsize(disk_super));
2104 err = -EINVAL;
2105 goto fail_alloc;
2106 }
2107
2138 features = btrfs_super_incompat_flags(disk_super); 2108 features = btrfs_super_incompat_flags(disk_super);
2139 features |= BTRFS_FEATURE_INCOMPAT_MIXED_BACKREF; 2109 features |= BTRFS_FEATURE_INCOMPAT_MIXED_BACKREF;
2140 if (tree_root->fs_info->compress_type & BTRFS_COMPRESS_LZO) 2110 if (tree_root->fs_info->compress_type & BTRFS_COMPRESS_LZO)
2141 features |= BTRFS_FEATURE_INCOMPAT_COMPRESS_LZO; 2111 features |= BTRFS_FEATURE_INCOMPAT_COMPRESS_LZO;
2112
2113 /*
2114 * flag our filesystem as having big metadata blocks if
2115 * they are bigger than the page size
2116 */
2117 if (btrfs_super_leafsize(disk_super) > PAGE_CACHE_SIZE) {
2118 if (!(features & BTRFS_FEATURE_INCOMPAT_BIG_METADATA))
2119 printk(KERN_INFO "btrfs flagging fs with big metadata feature\n");
2120 features |= BTRFS_FEATURE_INCOMPAT_BIG_METADATA;
2121 }
2122
2123 nodesize = btrfs_super_nodesize(disk_super);
2124 leafsize = btrfs_super_leafsize(disk_super);
2125 sectorsize = btrfs_super_sectorsize(disk_super);
2126 stripesize = btrfs_super_stripesize(disk_super);
2127
2128 /*
2129 * mixed block groups end up with duplicate but slightly offset
2130 * extent buffers for the same range. It leads to corruptions
2131 */
2132 if ((features & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS) &&
2133 (sectorsize != leafsize)) {
2134 printk(KERN_WARNING "btrfs: unequal leaf/node/sector sizes "
2135 "are not allowed for mixed block groups on %s\n",
2136 sb->s_id);
2137 goto fail_alloc;
2138 }
2139
2142 btrfs_set_super_incompat_flags(disk_super, features); 2140 btrfs_set_super_incompat_flags(disk_super, features);
2143 2141
2144 features = btrfs_super_compat_ro_flags(disk_super) & 2142 features = btrfs_super_compat_ro_flags(disk_super) &
@@ -2242,10 +2240,6 @@ int open_ctree(struct super_block *sb,
2242 fs_info->bdi.ra_pages = max(fs_info->bdi.ra_pages, 2240 fs_info->bdi.ra_pages = max(fs_info->bdi.ra_pages,
2243 4 * 1024 * 1024 / PAGE_CACHE_SIZE); 2241 4 * 1024 * 1024 / PAGE_CACHE_SIZE);
2244 2242
2245 nodesize = btrfs_super_nodesize(disk_super);
2246 leafsize = btrfs_super_leafsize(disk_super);
2247 sectorsize = btrfs_super_sectorsize(disk_super);
2248 stripesize = btrfs_super_stripesize(disk_super);
2249 tree_root->nodesize = nodesize; 2243 tree_root->nodesize = nodesize;
2250 tree_root->leafsize = leafsize; 2244 tree_root->leafsize = leafsize;
2251 tree_root->sectorsize = sectorsize; 2245 tree_root->sectorsize = sectorsize;
@@ -2285,7 +2279,7 @@ int open_ctree(struct super_block *sb,
2285 chunk_root->node = read_tree_block(chunk_root, 2279 chunk_root->node = read_tree_block(chunk_root,
2286 btrfs_super_chunk_root(disk_super), 2280 btrfs_super_chunk_root(disk_super),
2287 blocksize, generation); 2281 blocksize, generation);
2288 BUG_ON(!chunk_root->node); 2282 BUG_ON(!chunk_root->node); /* -ENOMEM */
2289 if (!test_bit(EXTENT_BUFFER_UPTODATE, &chunk_root->node->bflags)) { 2283 if (!test_bit(EXTENT_BUFFER_UPTODATE, &chunk_root->node->bflags)) {
2290 printk(KERN_WARNING "btrfs: failed to read chunk root on %s\n", 2284 printk(KERN_WARNING "btrfs: failed to read chunk root on %s\n",
2291 sb->s_id); 2285 sb->s_id);
@@ -2425,21 +2419,31 @@ retry_root_backup:
2425 log_tree_root->node = read_tree_block(tree_root, bytenr, 2419 log_tree_root->node = read_tree_block(tree_root, bytenr,
2426 blocksize, 2420 blocksize,
2427 generation + 1); 2421 generation + 1);
2422 /* returns with log_tree_root freed on success */
2428 ret = btrfs_recover_log_trees(log_tree_root); 2423 ret = btrfs_recover_log_trees(log_tree_root);
2429 BUG_ON(ret); 2424 if (ret) {
2425 btrfs_error(tree_root->fs_info, ret,
2426 "Failed to recover log tree");
2427 free_extent_buffer(log_tree_root->node);
2428 kfree(log_tree_root);
2429 goto fail_trans_kthread;
2430 }
2430 2431
2431 if (sb->s_flags & MS_RDONLY) { 2432 if (sb->s_flags & MS_RDONLY) {
2432 ret = btrfs_commit_super(tree_root); 2433 ret = btrfs_commit_super(tree_root);
2433 BUG_ON(ret); 2434 if (ret)
2435 goto fail_trans_kthread;
2434 } 2436 }
2435 } 2437 }
2436 2438
2437 ret = btrfs_find_orphan_roots(tree_root); 2439 ret = btrfs_find_orphan_roots(tree_root);
2438 BUG_ON(ret); 2440 if (ret)
2441 goto fail_trans_kthread;
2439 2442
2440 if (!(sb->s_flags & MS_RDONLY)) { 2443 if (!(sb->s_flags & MS_RDONLY)) {
2441 ret = btrfs_cleanup_fs_roots(fs_info); 2444 ret = btrfs_cleanup_fs_roots(fs_info);
2442 BUG_ON(ret); 2445 if (ret) {
2446 }
2443 2447
2444 ret = btrfs_recover_relocation(tree_root); 2448 ret = btrfs_recover_relocation(tree_root);
2445 if (ret < 0) { 2449 if (ret < 0) {
@@ -2859,6 +2863,8 @@ int write_all_supers(struct btrfs_root *root, int max_mirrors)
2859 if (total_errors > max_errors) { 2863 if (total_errors > max_errors) {
2860 printk(KERN_ERR "btrfs: %d errors while writing supers\n", 2864 printk(KERN_ERR "btrfs: %d errors while writing supers\n",
2861 total_errors); 2865 total_errors);
2866
2867 /* This shouldn't happen. FUA is masked off if unsupported */
2862 BUG(); 2868 BUG();
2863 } 2869 }
2864 2870
@@ -2875,9 +2881,9 @@ int write_all_supers(struct btrfs_root *root, int max_mirrors)
2875 } 2881 }
2876 mutex_unlock(&root->fs_info->fs_devices->device_list_mutex); 2882 mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
2877 if (total_errors > max_errors) { 2883 if (total_errors > max_errors) {
2878 printk(KERN_ERR "btrfs: %d errors while writing supers\n", 2884 btrfs_error(root->fs_info, -EIO,
2879 total_errors); 2885 "%d errors while writing supers", total_errors);
2880 BUG(); 2886 return -EIO;
2881 } 2887 }
2882 return 0; 2888 return 0;
2883} 2889}
@@ -2891,7 +2897,20 @@ int write_ctree_super(struct btrfs_trans_handle *trans,
2891 return ret; 2897 return ret;
2892} 2898}
2893 2899
2894int btrfs_free_fs_root(struct btrfs_fs_info *fs_info, struct btrfs_root *root) 2900/* Kill all outstanding I/O */
2901void btrfs_abort_devices(struct btrfs_root *root)
2902{
2903 struct list_head *head;
2904 struct btrfs_device *dev;
2905 mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
2906 head = &root->fs_info->fs_devices->devices;
2907 list_for_each_entry_rcu(dev, head, dev_list) {
2908 blk_abort_queue(dev->bdev->bd_disk->queue);
2909 }
2910 mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
2911}
2912
2913void btrfs_free_fs_root(struct btrfs_fs_info *fs_info, struct btrfs_root *root)
2895{ 2914{
2896 spin_lock(&fs_info->fs_roots_radix_lock); 2915 spin_lock(&fs_info->fs_roots_radix_lock);
2897 radix_tree_delete(&fs_info->fs_roots_radix, 2916 radix_tree_delete(&fs_info->fs_roots_radix,
@@ -2904,7 +2923,6 @@ int btrfs_free_fs_root(struct btrfs_fs_info *fs_info, struct btrfs_root *root)
2904 __btrfs_remove_free_space_cache(root->free_ino_pinned); 2923 __btrfs_remove_free_space_cache(root->free_ino_pinned);
2905 __btrfs_remove_free_space_cache(root->free_ino_ctl); 2924 __btrfs_remove_free_space_cache(root->free_ino_ctl);
2906 free_fs_root(root); 2925 free_fs_root(root);
2907 return 0;
2908} 2926}
2909 2927
2910static void free_fs_root(struct btrfs_root *root) 2928static void free_fs_root(struct btrfs_root *root)
@@ -2921,7 +2939,7 @@ static void free_fs_root(struct btrfs_root *root)
2921 kfree(root); 2939 kfree(root);
2922} 2940}
2923 2941
2924static int del_fs_roots(struct btrfs_fs_info *fs_info) 2942static void del_fs_roots(struct btrfs_fs_info *fs_info)
2925{ 2943{
2926 int ret; 2944 int ret;
2927 struct btrfs_root *gang[8]; 2945 struct btrfs_root *gang[8];
@@ -2950,7 +2968,6 @@ static int del_fs_roots(struct btrfs_fs_info *fs_info)
2950 for (i = 0; i < ret; i++) 2968 for (i = 0; i < ret; i++)
2951 btrfs_free_fs_root(fs_info, gang[i]); 2969 btrfs_free_fs_root(fs_info, gang[i]);
2952 } 2970 }
2953 return 0;
2954} 2971}
2955 2972
2956int btrfs_cleanup_fs_roots(struct btrfs_fs_info *fs_info) 2973int btrfs_cleanup_fs_roots(struct btrfs_fs_info *fs_info)
@@ -2999,14 +3016,21 @@ int btrfs_commit_super(struct btrfs_root *root)
2999 if (IS_ERR(trans)) 3016 if (IS_ERR(trans))
3000 return PTR_ERR(trans); 3017 return PTR_ERR(trans);
3001 ret = btrfs_commit_transaction(trans, root); 3018 ret = btrfs_commit_transaction(trans, root);
3002 BUG_ON(ret); 3019 if (ret)
3020 return ret;
3003 /* run commit again to drop the original snapshot */ 3021 /* run commit again to drop the original snapshot */
3004 trans = btrfs_join_transaction(root); 3022 trans = btrfs_join_transaction(root);
3005 if (IS_ERR(trans)) 3023 if (IS_ERR(trans))
3006 return PTR_ERR(trans); 3024 return PTR_ERR(trans);
3007 btrfs_commit_transaction(trans, root); 3025 ret = btrfs_commit_transaction(trans, root);
3026 if (ret)
3027 return ret;
3008 ret = btrfs_write_and_wait_transaction(NULL, root); 3028 ret = btrfs_write_and_wait_transaction(NULL, root);
3009 BUG_ON(ret); 3029 if (ret) {
3030 btrfs_error(root->fs_info, ret,
3031 "Failed to sync btree inode to disk.");
3032 return ret;
3033 }
3010 3034
3011 ret = write_ctree_super(NULL, root, 0); 3035 ret = write_ctree_super(NULL, root, 0);
3012 return ret; 3036 return ret;
@@ -3122,10 +3146,9 @@ int close_ctree(struct btrfs_root *root)
3122int btrfs_buffer_uptodate(struct extent_buffer *buf, u64 parent_transid) 3146int btrfs_buffer_uptodate(struct extent_buffer *buf, u64 parent_transid)
3123{ 3147{
3124 int ret; 3148 int ret;
3125 struct inode *btree_inode = buf->first_page->mapping->host; 3149 struct inode *btree_inode = buf->pages[0]->mapping->host;
3126 3150
3127 ret = extent_buffer_uptodate(&BTRFS_I(btree_inode)->io_tree, buf, 3151 ret = extent_buffer_uptodate(buf);
3128 NULL);
3129 if (!ret) 3152 if (!ret)
3130 return ret; 3153 return ret;
3131 3154
@@ -3136,16 +3159,13 @@ int btrfs_buffer_uptodate(struct extent_buffer *buf, u64 parent_transid)
3136 3159
3137int btrfs_set_buffer_uptodate(struct extent_buffer *buf) 3160int btrfs_set_buffer_uptodate(struct extent_buffer *buf)
3138{ 3161{
3139 struct inode *btree_inode = buf->first_page->mapping->host; 3162 return set_extent_buffer_uptodate(buf);
3140 return set_extent_buffer_uptodate(&BTRFS_I(btree_inode)->io_tree,
3141 buf);
3142} 3163}
3143 3164
3144void btrfs_mark_buffer_dirty(struct extent_buffer *buf) 3165void btrfs_mark_buffer_dirty(struct extent_buffer *buf)
3145{ 3166{
3146 struct btrfs_root *root = BTRFS_I(buf->first_page->mapping->host)->root; 3167 struct btrfs_root *root = BTRFS_I(buf->pages[0]->mapping->host)->root;
3147 u64 transid = btrfs_header_generation(buf); 3168 u64 transid = btrfs_header_generation(buf);
3148 struct inode *btree_inode = root->fs_info->btree_inode;
3149 int was_dirty; 3169 int was_dirty;
3150 3170
3151 btrfs_assert_tree_locked(buf); 3171 btrfs_assert_tree_locked(buf);
@@ -3157,8 +3177,7 @@ void btrfs_mark_buffer_dirty(struct extent_buffer *buf)
3157 (unsigned long long)root->fs_info->generation); 3177 (unsigned long long)root->fs_info->generation);
3158 WARN_ON(1); 3178 WARN_ON(1);
3159 } 3179 }
3160 was_dirty = set_extent_buffer_dirty(&BTRFS_I(btree_inode)->io_tree, 3180 was_dirty = set_extent_buffer_dirty(buf);
3161 buf);
3162 if (!was_dirty) { 3181 if (!was_dirty) {
3163 spin_lock(&root->fs_info->delalloc_lock); 3182 spin_lock(&root->fs_info->delalloc_lock);
3164 root->fs_info->dirty_metadata_bytes += buf->len; 3183 root->fs_info->dirty_metadata_bytes += buf->len;
@@ -3212,12 +3231,8 @@ void __btrfs_btree_balance_dirty(struct btrfs_root *root, unsigned long nr)
3212 3231
3213int btrfs_read_buffer(struct extent_buffer *buf, u64 parent_transid) 3232int btrfs_read_buffer(struct extent_buffer *buf, u64 parent_transid)
3214{ 3233{
3215 struct btrfs_root *root = BTRFS_I(buf->first_page->mapping->host)->root; 3234 struct btrfs_root *root = BTRFS_I(buf->pages[0]->mapping->host)->root;
3216 int ret; 3235 return btree_read_extent_buffer_pages(root, buf, 0, parent_transid);
3217 ret = btree_read_extent_buffer_pages(root, buf, 0, parent_transid);
3218 if (ret == 0)
3219 set_bit(EXTENT_BUFFER_UPTODATE, &buf->bflags);
3220 return ret;
3221} 3236}
3222 3237
3223static int btree_lock_page_hook(struct page *page, void *data, 3238static int btree_lock_page_hook(struct page *page, void *data,
@@ -3225,17 +3240,21 @@ static int btree_lock_page_hook(struct page *page, void *data,
3225{ 3240{
3226 struct inode *inode = page->mapping->host; 3241 struct inode *inode = page->mapping->host;
3227 struct btrfs_root *root = BTRFS_I(inode)->root; 3242 struct btrfs_root *root = BTRFS_I(inode)->root;
3228 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
3229 struct extent_buffer *eb; 3243 struct extent_buffer *eb;
3230 unsigned long len;
3231 u64 bytenr = page_offset(page);
3232 3244
3233 if (page->private == EXTENT_PAGE_PRIVATE) 3245 /*
3246 * We culled this eb but the page is still hanging out on the mapping,
3247 * carry on.
3248 */
3249 if (!PagePrivate(page))
3234 goto out; 3250 goto out;
3235 3251
3236 len = page->private >> 2; 3252 eb = (struct extent_buffer *)page->private;
3237 eb = find_extent_buffer(io_tree, bytenr, len); 3253 if (!eb) {
3238 if (!eb) 3254 WARN_ON(1);
3255 goto out;
3256 }
3257 if (page != eb->pages[0])
3239 goto out; 3258 goto out;
3240 3259
3241 if (!btrfs_try_tree_write_lock(eb)) { 3260 if (!btrfs_try_tree_write_lock(eb)) {
@@ -3254,7 +3273,6 @@ static int btree_lock_page_hook(struct page *page, void *data,
3254 } 3273 }
3255 3274
3256 btrfs_tree_unlock(eb); 3275 btrfs_tree_unlock(eb);
3257 free_extent_buffer(eb);
3258out: 3276out:
3259 if (!trylock_page(page)) { 3277 if (!trylock_page(page)) {
3260 flush_fn(data); 3278 flush_fn(data);
@@ -3263,15 +3281,23 @@ out:
3263 return 0; 3281 return 0;
3264} 3282}
3265 3283
3266static void btrfs_check_super_valid(struct btrfs_fs_info *fs_info, 3284static int btrfs_check_super_valid(struct btrfs_fs_info *fs_info,
3267 int read_only) 3285 int read_only)
3268{ 3286{
3287 if (btrfs_super_csum_type(fs_info->super_copy) >= ARRAY_SIZE(btrfs_csum_sizes)) {
3288 printk(KERN_ERR "btrfs: unsupported checksum algorithm\n");
3289 return -EINVAL;
3290 }
3291
3269 if (read_only) 3292 if (read_only)
3270 return; 3293 return 0;
3271 3294
3272 if (fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) 3295 if (fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) {
3273 printk(KERN_WARNING "warning: mount fs with errors, " 3296 printk(KERN_WARNING "warning: mount fs with errors, "
3274 "running btrfsck is recommended\n"); 3297 "running btrfsck is recommended\n");
3298 }
3299
3300 return 0;
3275} 3301}
3276 3302
3277int btrfs_error_commit_super(struct btrfs_root *root) 3303int btrfs_error_commit_super(struct btrfs_root *root)
@@ -3293,7 +3319,7 @@ int btrfs_error_commit_super(struct btrfs_root *root)
3293 return ret; 3319 return ret;
3294} 3320}
3295 3321
3296static int btrfs_destroy_ordered_operations(struct btrfs_root *root) 3322static void btrfs_destroy_ordered_operations(struct btrfs_root *root)
3297{ 3323{
3298 struct btrfs_inode *btrfs_inode; 3324 struct btrfs_inode *btrfs_inode;
3299 struct list_head splice; 3325 struct list_head splice;
@@ -3315,11 +3341,9 @@ static int btrfs_destroy_ordered_operations(struct btrfs_root *root)
3315 3341
3316 spin_unlock(&root->fs_info->ordered_extent_lock); 3342 spin_unlock(&root->fs_info->ordered_extent_lock);
3317 mutex_unlock(&root->fs_info->ordered_operations_mutex); 3343 mutex_unlock(&root->fs_info->ordered_operations_mutex);
3318
3319 return 0;
3320} 3344}
3321 3345
3322static int btrfs_destroy_ordered_extents(struct btrfs_root *root) 3346static void btrfs_destroy_ordered_extents(struct btrfs_root *root)
3323{ 3347{
3324 struct list_head splice; 3348 struct list_head splice;
3325 struct btrfs_ordered_extent *ordered; 3349 struct btrfs_ordered_extent *ordered;
@@ -3351,12 +3375,10 @@ static int btrfs_destroy_ordered_extents(struct btrfs_root *root)
3351 } 3375 }
3352 3376
3353 spin_unlock(&root->fs_info->ordered_extent_lock); 3377 spin_unlock(&root->fs_info->ordered_extent_lock);
3354
3355 return 0;
3356} 3378}
3357 3379
3358static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans, 3380int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
3359 struct btrfs_root *root) 3381 struct btrfs_root *root)
3360{ 3382{
3361 struct rb_node *node; 3383 struct rb_node *node;
3362 struct btrfs_delayed_ref_root *delayed_refs; 3384 struct btrfs_delayed_ref_root *delayed_refs;
@@ -3365,6 +3387,7 @@ static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
3365 3387
3366 delayed_refs = &trans->delayed_refs; 3388 delayed_refs = &trans->delayed_refs;
3367 3389
3390again:
3368 spin_lock(&delayed_refs->lock); 3391 spin_lock(&delayed_refs->lock);
3369 if (delayed_refs->num_entries == 0) { 3392 if (delayed_refs->num_entries == 0) {
3370 spin_unlock(&delayed_refs->lock); 3393 spin_unlock(&delayed_refs->lock);
@@ -3386,6 +3409,7 @@ static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
3386 struct btrfs_delayed_ref_head *head; 3409 struct btrfs_delayed_ref_head *head;
3387 3410
3388 head = btrfs_delayed_node_to_head(ref); 3411 head = btrfs_delayed_node_to_head(ref);
3412 spin_unlock(&delayed_refs->lock);
3389 mutex_lock(&head->mutex); 3413 mutex_lock(&head->mutex);
3390 kfree(head->extent_op); 3414 kfree(head->extent_op);
3391 delayed_refs->num_heads--; 3415 delayed_refs->num_heads--;
@@ -3393,8 +3417,9 @@ static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
3393 delayed_refs->num_heads_ready--; 3417 delayed_refs->num_heads_ready--;
3394 list_del_init(&head->cluster); 3418 list_del_init(&head->cluster);
3395 mutex_unlock(&head->mutex); 3419 mutex_unlock(&head->mutex);
3420 btrfs_put_delayed_ref(ref);
3421 goto again;
3396 } 3422 }
3397
3398 spin_unlock(&delayed_refs->lock); 3423 spin_unlock(&delayed_refs->lock);
3399 btrfs_put_delayed_ref(ref); 3424 btrfs_put_delayed_ref(ref);
3400 3425
@@ -3407,7 +3432,7 @@ static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
3407 return ret; 3432 return ret;
3408} 3433}
3409 3434
3410static int btrfs_destroy_pending_snapshots(struct btrfs_transaction *t) 3435static void btrfs_destroy_pending_snapshots(struct btrfs_transaction *t)
3411{ 3436{
3412 struct btrfs_pending_snapshot *snapshot; 3437 struct btrfs_pending_snapshot *snapshot;
3413 struct list_head splice; 3438 struct list_head splice;
@@ -3425,11 +3450,9 @@ static int btrfs_destroy_pending_snapshots(struct btrfs_transaction *t)
3425 3450
3426 kfree(snapshot); 3451 kfree(snapshot);
3427 } 3452 }
3428
3429 return 0;
3430} 3453}
3431 3454
3432static int btrfs_destroy_delalloc_inodes(struct btrfs_root *root) 3455static void btrfs_destroy_delalloc_inodes(struct btrfs_root *root)
3433{ 3456{
3434 struct btrfs_inode *btrfs_inode; 3457 struct btrfs_inode *btrfs_inode;
3435 struct list_head splice; 3458 struct list_head splice;
@@ -3449,8 +3472,6 @@ static int btrfs_destroy_delalloc_inodes(struct btrfs_root *root)
3449 } 3472 }
3450 3473
3451 spin_unlock(&root->fs_info->delalloc_lock); 3474 spin_unlock(&root->fs_info->delalloc_lock);
3452
3453 return 0;
3454} 3475}
3455 3476
3456static int btrfs_destroy_marked_extents(struct btrfs_root *root, 3477static int btrfs_destroy_marked_extents(struct btrfs_root *root,
@@ -3541,13 +3562,43 @@ static int btrfs_destroy_pinned_extent(struct btrfs_root *root,
3541 return 0; 3562 return 0;
3542} 3563}
3543 3564
3544static int btrfs_cleanup_transaction(struct btrfs_root *root) 3565void btrfs_cleanup_one_transaction(struct btrfs_transaction *cur_trans,
3566 struct btrfs_root *root)
3567{
3568 btrfs_destroy_delayed_refs(cur_trans, root);
3569 btrfs_block_rsv_release(root, &root->fs_info->trans_block_rsv,
3570 cur_trans->dirty_pages.dirty_bytes);
3571
3572 /* FIXME: cleanup wait for commit */
3573 cur_trans->in_commit = 1;
3574 cur_trans->blocked = 1;
3575 if (waitqueue_active(&root->fs_info->transaction_blocked_wait))
3576 wake_up(&root->fs_info->transaction_blocked_wait);
3577
3578 cur_trans->blocked = 0;
3579 if (waitqueue_active(&root->fs_info->transaction_wait))
3580 wake_up(&root->fs_info->transaction_wait);
3581
3582 cur_trans->commit_done = 1;
3583 if (waitqueue_active(&cur_trans->commit_wait))
3584 wake_up(&cur_trans->commit_wait);
3585
3586 btrfs_destroy_pending_snapshots(cur_trans);
3587
3588 btrfs_destroy_marked_extents(root, &cur_trans->dirty_pages,
3589 EXTENT_DIRTY);
3590
3591 /*
3592 memset(cur_trans, 0, sizeof(*cur_trans));
3593 kmem_cache_free(btrfs_transaction_cachep, cur_trans);
3594 */
3595}
3596
3597int btrfs_cleanup_transaction(struct btrfs_root *root)
3545{ 3598{
3546 struct btrfs_transaction *t; 3599 struct btrfs_transaction *t;
3547 LIST_HEAD(list); 3600 LIST_HEAD(list);
3548 3601
3549 WARN_ON(1);
3550
3551 mutex_lock(&root->fs_info->transaction_kthread_mutex); 3602 mutex_lock(&root->fs_info->transaction_kthread_mutex);
3552 3603
3553 spin_lock(&root->fs_info->trans_lock); 3604 spin_lock(&root->fs_info->trans_lock);
@@ -3612,6 +3663,17 @@ static int btrfs_cleanup_transaction(struct btrfs_root *root)
3612 return 0; 3663 return 0;
3613} 3664}
3614 3665
3666static int btree_writepage_io_failed_hook(struct bio *bio, struct page *page,
3667 u64 start, u64 end,
3668 struct extent_state *state)
3669{
3670 struct super_block *sb = page->mapping->host->i_sb;
3671 struct btrfs_fs_info *fs_info = btrfs_sb(sb);
3672 btrfs_error(fs_info, -EIO,
3673 "Error occured while writing out btree at %llu", start);
3674 return -EIO;
3675}
3676
3615static struct extent_io_ops btree_extent_io_ops = { 3677static struct extent_io_ops btree_extent_io_ops = {
3616 .write_cache_pages_lock_hook = btree_lock_page_hook, 3678 .write_cache_pages_lock_hook = btree_lock_page_hook,
3617 .readpage_end_io_hook = btree_readpage_end_io_hook, 3679 .readpage_end_io_hook = btree_readpage_end_io_hook,
@@ -3619,4 +3681,5 @@ static struct extent_io_ops btree_extent_io_ops = {
3619 .submit_bio_hook = btree_submit_bio_hook, 3681 .submit_bio_hook = btree_submit_bio_hook,
3620 /* note we're sharing with inode.c for the merge bio hook */ 3682 /* note we're sharing with inode.c for the merge bio hook */
3621 .merge_bio_hook = btrfs_merge_bio_hook, 3683 .merge_bio_hook = btrfs_merge_bio_hook,
3684 .writepage_io_failed_hook = btree_writepage_io_failed_hook,
3622}; 3685};
diff --git a/fs/btrfs/disk-io.h b/fs/btrfs/disk-io.h
index e4bc4741319b..a7ace1a2dd12 100644
--- a/fs/btrfs/disk-io.h
+++ b/fs/btrfs/disk-io.h
@@ -44,8 +44,8 @@ int reada_tree_block_flagged(struct btrfs_root *root, u64 bytenr, u32 blocksize,
44 int mirror_num, struct extent_buffer **eb); 44 int mirror_num, struct extent_buffer **eb);
45struct extent_buffer *btrfs_find_create_tree_block(struct btrfs_root *root, 45struct extent_buffer *btrfs_find_create_tree_block(struct btrfs_root *root,
46 u64 bytenr, u32 blocksize); 46 u64 bytenr, u32 blocksize);
47int clean_tree_block(struct btrfs_trans_handle *trans, 47void clean_tree_block(struct btrfs_trans_handle *trans,
48 struct btrfs_root *root, struct extent_buffer *buf); 48 struct btrfs_root *root, struct extent_buffer *buf);
49int open_ctree(struct super_block *sb, 49int open_ctree(struct super_block *sb,
50 struct btrfs_fs_devices *fs_devices, 50 struct btrfs_fs_devices *fs_devices,
51 char *options); 51 char *options);
@@ -64,7 +64,7 @@ struct btrfs_root *btrfs_read_fs_root_no_name(struct btrfs_fs_info *fs_info,
64int btrfs_cleanup_fs_roots(struct btrfs_fs_info *fs_info); 64int btrfs_cleanup_fs_roots(struct btrfs_fs_info *fs_info);
65void btrfs_btree_balance_dirty(struct btrfs_root *root, unsigned long nr); 65void btrfs_btree_balance_dirty(struct btrfs_root *root, unsigned long nr);
66void __btrfs_btree_balance_dirty(struct btrfs_root *root, unsigned long nr); 66void __btrfs_btree_balance_dirty(struct btrfs_root *root, unsigned long nr);
67int btrfs_free_fs_root(struct btrfs_fs_info *fs_info, struct btrfs_root *root); 67void btrfs_free_fs_root(struct btrfs_fs_info *fs_info, struct btrfs_root *root);
68void btrfs_mark_buffer_dirty(struct extent_buffer *buf); 68void btrfs_mark_buffer_dirty(struct extent_buffer *buf);
69int btrfs_buffer_uptodate(struct extent_buffer *buf, u64 parent_transid); 69int btrfs_buffer_uptodate(struct extent_buffer *buf, u64 parent_transid);
70int btrfs_set_buffer_uptodate(struct extent_buffer *buf); 70int btrfs_set_buffer_uptodate(struct extent_buffer *buf);
@@ -85,6 +85,10 @@ int btrfs_init_log_root_tree(struct btrfs_trans_handle *trans,
85 struct btrfs_fs_info *fs_info); 85 struct btrfs_fs_info *fs_info);
86int btrfs_add_log_tree(struct btrfs_trans_handle *trans, 86int btrfs_add_log_tree(struct btrfs_trans_handle *trans,
87 struct btrfs_root *root); 87 struct btrfs_root *root);
88int btrfs_cleanup_transaction(struct btrfs_root *root);
89void btrfs_cleanup_one_transaction(struct btrfs_transaction *trans,
90 struct btrfs_root *root);
91void btrfs_abort_devices(struct btrfs_root *root);
88 92
89#ifdef CONFIG_DEBUG_LOCK_ALLOC 93#ifdef CONFIG_DEBUG_LOCK_ALLOC
90void btrfs_init_lockdep(void); 94void btrfs_init_lockdep(void);
diff --git a/fs/btrfs/export.c b/fs/btrfs/export.c
index 5f77166fd01c..e887ee62b6d4 100644
--- a/fs/btrfs/export.c
+++ b/fs/btrfs/export.c
@@ -193,7 +193,7 @@ static struct dentry *btrfs_get_parent(struct dentry *child)
193 if (ret < 0) 193 if (ret < 0)
194 goto fail; 194 goto fail;
195 195
196 BUG_ON(ret == 0); 196 BUG_ON(ret == 0); /* Key with offset of -1 found */
197 if (path->slots[0] == 0) { 197 if (path->slots[0] == 0) {
198 ret = -ENOENT; 198 ret = -ENOENT;
199 goto fail; 199 goto fail;
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 37e0a800d34e..a84420491c11 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -245,7 +245,7 @@ static int exclude_super_stripes(struct btrfs_root *root,
245 cache->bytes_super += stripe_len; 245 cache->bytes_super += stripe_len;
246 ret = add_excluded_extent(root, cache->key.objectid, 246 ret = add_excluded_extent(root, cache->key.objectid,
247 stripe_len); 247 stripe_len);
248 BUG_ON(ret); 248 BUG_ON(ret); /* -ENOMEM */
249 } 249 }
250 250
251 for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) { 251 for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
@@ -253,13 +253,13 @@ static int exclude_super_stripes(struct btrfs_root *root,
253 ret = btrfs_rmap_block(&root->fs_info->mapping_tree, 253 ret = btrfs_rmap_block(&root->fs_info->mapping_tree,
254 cache->key.objectid, bytenr, 254 cache->key.objectid, bytenr,
255 0, &logical, &nr, &stripe_len); 255 0, &logical, &nr, &stripe_len);
256 BUG_ON(ret); 256 BUG_ON(ret); /* -ENOMEM */
257 257
258 while (nr--) { 258 while (nr--) {
259 cache->bytes_super += stripe_len; 259 cache->bytes_super += stripe_len;
260 ret = add_excluded_extent(root, logical[nr], 260 ret = add_excluded_extent(root, logical[nr],
261 stripe_len); 261 stripe_len);
262 BUG_ON(ret); 262 BUG_ON(ret); /* -ENOMEM */
263 } 263 }
264 264
265 kfree(logical); 265 kfree(logical);
@@ -321,7 +321,7 @@ static u64 add_new_free_space(struct btrfs_block_group_cache *block_group,
321 total_added += size; 321 total_added += size;
322 ret = btrfs_add_free_space(block_group, start, 322 ret = btrfs_add_free_space(block_group, start,
323 size); 323 size);
324 BUG_ON(ret); 324 BUG_ON(ret); /* -ENOMEM or logic error */
325 start = extent_end + 1; 325 start = extent_end + 1;
326 } else { 326 } else {
327 break; 327 break;
@@ -332,7 +332,7 @@ static u64 add_new_free_space(struct btrfs_block_group_cache *block_group,
332 size = end - start; 332 size = end - start;
333 total_added += size; 333 total_added += size;
334 ret = btrfs_add_free_space(block_group, start, size); 334 ret = btrfs_add_free_space(block_group, start, size);
335 BUG_ON(ret); 335 BUG_ON(ret); /* -ENOMEM or logic error */
336 } 336 }
337 337
338 return total_added; 338 return total_added;
@@ -474,7 +474,8 @@ static int cache_block_group(struct btrfs_block_group_cache *cache,
474 int ret = 0; 474 int ret = 0;
475 475
476 caching_ctl = kzalloc(sizeof(*caching_ctl), GFP_NOFS); 476 caching_ctl = kzalloc(sizeof(*caching_ctl), GFP_NOFS);
477 BUG_ON(!caching_ctl); 477 if (!caching_ctl)
478 return -ENOMEM;
478 479
479 INIT_LIST_HEAD(&caching_ctl->list); 480 INIT_LIST_HEAD(&caching_ctl->list);
480 mutex_init(&caching_ctl->mutex); 481 mutex_init(&caching_ctl->mutex);
@@ -982,7 +983,7 @@ static int convert_extent_item_v0(struct btrfs_trans_handle *trans,
982 ret = btrfs_next_leaf(root, path); 983 ret = btrfs_next_leaf(root, path);
983 if (ret < 0) 984 if (ret < 0)
984 return ret; 985 return ret;
985 BUG_ON(ret > 0); 986 BUG_ON(ret > 0); /* Corruption */
986 leaf = path->nodes[0]; 987 leaf = path->nodes[0];
987 } 988 }
988 btrfs_item_key_to_cpu(leaf, &found_key, 989 btrfs_item_key_to_cpu(leaf, &found_key,
@@ -1008,9 +1009,9 @@ static int convert_extent_item_v0(struct btrfs_trans_handle *trans,
1008 new_size + extra_size, 1); 1009 new_size + extra_size, 1);
1009 if (ret < 0) 1010 if (ret < 0)
1010 return ret; 1011 return ret;
1011 BUG_ON(ret); 1012 BUG_ON(ret); /* Corruption */
1012 1013
1013 ret = btrfs_extend_item(trans, root, path, new_size); 1014 btrfs_extend_item(trans, root, path, new_size);
1014 1015
1015 leaf = path->nodes[0]; 1016 leaf = path->nodes[0];
1016 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item); 1017 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
@@ -1478,7 +1479,11 @@ int lookup_inline_extent_backref(struct btrfs_trans_handle *trans,
1478 err = ret; 1479 err = ret;
1479 goto out; 1480 goto out;
1480 } 1481 }
1481 BUG_ON(ret); 1482 if (ret && !insert) {
1483 err = -ENOENT;
1484 goto out;
1485 }
1486 BUG_ON(ret); /* Corruption */
1482 1487
1483 leaf = path->nodes[0]; 1488 leaf = path->nodes[0];
1484 item_size = btrfs_item_size_nr(leaf, path->slots[0]); 1489 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
@@ -1592,13 +1597,13 @@ out:
1592 * helper to add new inline back ref 1597 * helper to add new inline back ref
1593 */ 1598 */
1594static noinline_for_stack 1599static noinline_for_stack
1595int setup_inline_extent_backref(struct btrfs_trans_handle *trans, 1600void setup_inline_extent_backref(struct btrfs_trans_handle *trans,
1596 struct btrfs_root *root, 1601 struct btrfs_root *root,
1597 struct btrfs_path *path, 1602 struct btrfs_path *path,
1598 struct btrfs_extent_inline_ref *iref, 1603 struct btrfs_extent_inline_ref *iref,
1599 u64 parent, u64 root_objectid, 1604 u64 parent, u64 root_objectid,
1600 u64 owner, u64 offset, int refs_to_add, 1605 u64 owner, u64 offset, int refs_to_add,
1601 struct btrfs_delayed_extent_op *extent_op) 1606 struct btrfs_delayed_extent_op *extent_op)
1602{ 1607{
1603 struct extent_buffer *leaf; 1608 struct extent_buffer *leaf;
1604 struct btrfs_extent_item *ei; 1609 struct btrfs_extent_item *ei;
@@ -1608,7 +1613,6 @@ int setup_inline_extent_backref(struct btrfs_trans_handle *trans,
1608 u64 refs; 1613 u64 refs;
1609 int size; 1614 int size;
1610 int type; 1615 int type;
1611 int ret;
1612 1616
1613 leaf = path->nodes[0]; 1617 leaf = path->nodes[0];
1614 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item); 1618 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
@@ -1617,7 +1621,7 @@ int setup_inline_extent_backref(struct btrfs_trans_handle *trans,
1617 type = extent_ref_type(parent, owner); 1621 type = extent_ref_type(parent, owner);
1618 size = btrfs_extent_inline_ref_size(type); 1622 size = btrfs_extent_inline_ref_size(type);
1619 1623
1620 ret = btrfs_extend_item(trans, root, path, size); 1624 btrfs_extend_item(trans, root, path, size);
1621 1625
1622 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item); 1626 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1623 refs = btrfs_extent_refs(leaf, ei); 1627 refs = btrfs_extent_refs(leaf, ei);
@@ -1652,7 +1656,6 @@ int setup_inline_extent_backref(struct btrfs_trans_handle *trans,
1652 btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid); 1656 btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid);
1653 } 1657 }
1654 btrfs_mark_buffer_dirty(leaf); 1658 btrfs_mark_buffer_dirty(leaf);
1655 return 0;
1656} 1659}
1657 1660
1658static int lookup_extent_backref(struct btrfs_trans_handle *trans, 1661static int lookup_extent_backref(struct btrfs_trans_handle *trans,
@@ -1687,12 +1690,12 @@ static int lookup_extent_backref(struct btrfs_trans_handle *trans,
1687 * helper to update/remove inline back ref 1690 * helper to update/remove inline back ref
1688 */ 1691 */
1689static noinline_for_stack 1692static noinline_for_stack
1690int update_inline_extent_backref(struct btrfs_trans_handle *trans, 1693void update_inline_extent_backref(struct btrfs_trans_handle *trans,
1691 struct btrfs_root *root, 1694 struct btrfs_root *root,
1692 struct btrfs_path *path, 1695 struct btrfs_path *path,
1693 struct btrfs_extent_inline_ref *iref, 1696 struct btrfs_extent_inline_ref *iref,
1694 int refs_to_mod, 1697 int refs_to_mod,
1695 struct btrfs_delayed_extent_op *extent_op) 1698 struct btrfs_delayed_extent_op *extent_op)
1696{ 1699{
1697 struct extent_buffer *leaf; 1700 struct extent_buffer *leaf;
1698 struct btrfs_extent_item *ei; 1701 struct btrfs_extent_item *ei;
@@ -1703,7 +1706,6 @@ int update_inline_extent_backref(struct btrfs_trans_handle *trans,
1703 u32 item_size; 1706 u32 item_size;
1704 int size; 1707 int size;
1705 int type; 1708 int type;
1706 int ret;
1707 u64 refs; 1709 u64 refs;
1708 1710
1709 leaf = path->nodes[0]; 1711 leaf = path->nodes[0];
@@ -1745,10 +1747,9 @@ int update_inline_extent_backref(struct btrfs_trans_handle *trans,
1745 memmove_extent_buffer(leaf, ptr, ptr + size, 1747 memmove_extent_buffer(leaf, ptr, ptr + size,
1746 end - ptr - size); 1748 end - ptr - size);
1747 item_size -= size; 1749 item_size -= size;
1748 ret = btrfs_truncate_item(trans, root, path, item_size, 1); 1750 btrfs_truncate_item(trans, root, path, item_size, 1);
1749 } 1751 }
1750 btrfs_mark_buffer_dirty(leaf); 1752 btrfs_mark_buffer_dirty(leaf);
1751 return 0;
1752} 1753}
1753 1754
1754static noinline_for_stack 1755static noinline_for_stack
@@ -1768,13 +1769,13 @@ int insert_inline_extent_backref(struct btrfs_trans_handle *trans,
1768 root_objectid, owner, offset, 1); 1769 root_objectid, owner, offset, 1);
1769 if (ret == 0) { 1770 if (ret == 0) {
1770 BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID); 1771 BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID);
1771 ret = update_inline_extent_backref(trans, root, path, iref, 1772 update_inline_extent_backref(trans, root, path, iref,
1772 refs_to_add, extent_op); 1773 refs_to_add, extent_op);
1773 } else if (ret == -ENOENT) { 1774 } else if (ret == -ENOENT) {
1774 ret = setup_inline_extent_backref(trans, root, path, iref, 1775 setup_inline_extent_backref(trans, root, path, iref, parent,
1775 parent, root_objectid, 1776 root_objectid, owner, offset,
1776 owner, offset, refs_to_add, 1777 refs_to_add, extent_op);
1777 extent_op); 1778 ret = 0;
1778 } 1779 }
1779 return ret; 1780 return ret;
1780} 1781}
@@ -1804,12 +1805,12 @@ static int remove_extent_backref(struct btrfs_trans_handle *trans,
1804 struct btrfs_extent_inline_ref *iref, 1805 struct btrfs_extent_inline_ref *iref,
1805 int refs_to_drop, int is_data) 1806 int refs_to_drop, int is_data)
1806{ 1807{
1807 int ret; 1808 int ret = 0;
1808 1809
1809 BUG_ON(!is_data && refs_to_drop != 1); 1810 BUG_ON(!is_data && refs_to_drop != 1);
1810 if (iref) { 1811 if (iref) {
1811 ret = update_inline_extent_backref(trans, root, path, iref, 1812 update_inline_extent_backref(trans, root, path, iref,
1812 -refs_to_drop, NULL); 1813 -refs_to_drop, NULL);
1813 } else if (is_data) { 1814 } else if (is_data) {
1814 ret = remove_extent_data_ref(trans, root, path, refs_to_drop); 1815 ret = remove_extent_data_ref(trans, root, path, refs_to_drop);
1815 } else { 1816 } else {
@@ -1835,6 +1836,7 @@ static int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr,
1835 /* Tell the block device(s) that the sectors can be discarded */ 1836 /* Tell the block device(s) that the sectors can be discarded */
1836 ret = btrfs_map_block(&root->fs_info->mapping_tree, REQ_DISCARD, 1837 ret = btrfs_map_block(&root->fs_info->mapping_tree, REQ_DISCARD,
1837 bytenr, &num_bytes, &bbio, 0); 1838 bytenr, &num_bytes, &bbio, 0);
1839 /* Error condition is -ENOMEM */
1838 if (!ret) { 1840 if (!ret) {
1839 struct btrfs_bio_stripe *stripe = bbio->stripes; 1841 struct btrfs_bio_stripe *stripe = bbio->stripes;
1840 int i; 1842 int i;
@@ -1850,7 +1852,7 @@ static int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr,
1850 if (!ret) 1852 if (!ret)
1851 discarded_bytes += stripe->length; 1853 discarded_bytes += stripe->length;
1852 else if (ret != -EOPNOTSUPP) 1854 else if (ret != -EOPNOTSUPP)
1853 break; 1855 break; /* Logic errors or -ENOMEM, or -EIO but I don't know how that could happen JDM */
1854 1856
1855 /* 1857 /*
1856 * Just in case we get back EOPNOTSUPP for some reason, 1858 * Just in case we get back EOPNOTSUPP for some reason,
@@ -1869,6 +1871,7 @@ static int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr,
1869 return ret; 1871 return ret;
1870} 1872}
1871 1873
1874/* Can return -ENOMEM */
1872int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans, 1875int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
1873 struct btrfs_root *root, 1876 struct btrfs_root *root,
1874 u64 bytenr, u64 num_bytes, u64 parent, 1877 u64 bytenr, u64 num_bytes, u64 parent,
@@ -1944,7 +1947,8 @@ static int __btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
1944 ret = insert_extent_backref(trans, root->fs_info->extent_root, 1947 ret = insert_extent_backref(trans, root->fs_info->extent_root,
1945 path, bytenr, parent, root_objectid, 1948 path, bytenr, parent, root_objectid,
1946 owner, offset, refs_to_add); 1949 owner, offset, refs_to_add);
1947 BUG_ON(ret); 1950 if (ret)
1951 btrfs_abort_transaction(trans, root, ret);
1948out: 1952out:
1949 btrfs_free_path(path); 1953 btrfs_free_path(path);
1950 return err; 1954 return err;
@@ -2031,6 +2035,9 @@ static int run_delayed_extent_op(struct btrfs_trans_handle *trans,
2031 int ret; 2035 int ret;
2032 int err = 0; 2036 int err = 0;
2033 2037
2038 if (trans->aborted)
2039 return 0;
2040
2034 path = btrfs_alloc_path(); 2041 path = btrfs_alloc_path();
2035 if (!path) 2042 if (!path)
2036 return -ENOMEM; 2043 return -ENOMEM;
@@ -2128,7 +2135,11 @@ static int run_one_delayed_ref(struct btrfs_trans_handle *trans,
2128 struct btrfs_delayed_extent_op *extent_op, 2135 struct btrfs_delayed_extent_op *extent_op,
2129 int insert_reserved) 2136 int insert_reserved)
2130{ 2137{
2131 int ret; 2138 int ret = 0;
2139
2140 if (trans->aborted)
2141 return 0;
2142
2132 if (btrfs_delayed_ref_is_head(node)) { 2143 if (btrfs_delayed_ref_is_head(node)) {
2133 struct btrfs_delayed_ref_head *head; 2144 struct btrfs_delayed_ref_head *head;
2134 /* 2145 /*
@@ -2146,11 +2157,10 @@ static int run_one_delayed_ref(struct btrfs_trans_handle *trans,
2146 ret = btrfs_del_csums(trans, root, 2157 ret = btrfs_del_csums(trans, root,
2147 node->bytenr, 2158 node->bytenr,
2148 node->num_bytes); 2159 node->num_bytes);
2149 BUG_ON(ret);
2150 } 2160 }
2151 } 2161 }
2152 mutex_unlock(&head->mutex); 2162 mutex_unlock(&head->mutex);
2153 return 0; 2163 return ret;
2154 } 2164 }
2155 2165
2156 if (node->type == BTRFS_TREE_BLOCK_REF_KEY || 2166 if (node->type == BTRFS_TREE_BLOCK_REF_KEY ||
@@ -2197,6 +2207,10 @@ again:
2197 return NULL; 2207 return NULL;
2198} 2208}
2199 2209
2210/*
2211 * Returns 0 on success or if called with an already aborted transaction.
2212 * Returns -ENOMEM or -EIO on failure and will abort the transaction.
2213 */
2200static noinline int run_clustered_refs(struct btrfs_trans_handle *trans, 2214static noinline int run_clustered_refs(struct btrfs_trans_handle *trans,
2201 struct btrfs_root *root, 2215 struct btrfs_root *root,
2202 struct list_head *cluster) 2216 struct list_head *cluster)
@@ -2285,9 +2299,13 @@ static noinline int run_clustered_refs(struct btrfs_trans_handle *trans,
2285 2299
2286 ret = run_delayed_extent_op(trans, root, 2300 ret = run_delayed_extent_op(trans, root,
2287 ref, extent_op); 2301 ref, extent_op);
2288 BUG_ON(ret);
2289 kfree(extent_op); 2302 kfree(extent_op);
2290 2303
2304 if (ret) {
2305 printk(KERN_DEBUG "btrfs: run_delayed_extent_op returned %d\n", ret);
2306 return ret;
2307 }
2308
2291 goto next; 2309 goto next;
2292 } 2310 }
2293 2311
@@ -2308,11 +2326,16 @@ static noinline int run_clustered_refs(struct btrfs_trans_handle *trans,
2308 2326
2309 ret = run_one_delayed_ref(trans, root, ref, extent_op, 2327 ret = run_one_delayed_ref(trans, root, ref, extent_op,
2310 must_insert_reserved); 2328 must_insert_reserved);
2311 BUG_ON(ret);
2312 2329
2313 btrfs_put_delayed_ref(ref); 2330 btrfs_put_delayed_ref(ref);
2314 kfree(extent_op); 2331 kfree(extent_op);
2315 count++; 2332 count++;
2333
2334 if (ret) {
2335 printk(KERN_DEBUG "btrfs: run_one_delayed_ref returned %d\n", ret);
2336 return ret;
2337 }
2338
2316next: 2339next:
2317 do_chunk_alloc(trans, root->fs_info->extent_root, 2340 do_chunk_alloc(trans, root->fs_info->extent_root,
2318 2 * 1024 * 1024, 2341 2 * 1024 * 1024,
@@ -2347,6 +2370,9 @@ static void wait_for_more_refs(struct btrfs_delayed_ref_root *delayed_refs,
2347 * 0, which means to process everything in the tree at the start 2370 * 0, which means to process everything in the tree at the start
2348 * of the run (but not newly added entries), or it can be some target 2371 * of the run (but not newly added entries), or it can be some target
2349 * number you'd like to process. 2372 * number you'd like to process.
2373 *
2374 * Returns 0 on success or if called with an aborted transaction
2375 * Returns <0 on error and aborts the transaction
2350 */ 2376 */
2351int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans, 2377int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
2352 struct btrfs_root *root, unsigned long count) 2378 struct btrfs_root *root, unsigned long count)
@@ -2362,6 +2388,10 @@ int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
2362 unsigned long num_refs = 0; 2388 unsigned long num_refs = 0;
2363 int consider_waiting; 2389 int consider_waiting;
2364 2390
2391 /* We'll clean this up in btrfs_cleanup_transaction */
2392 if (trans->aborted)
2393 return 0;
2394
2365 if (root == root->fs_info->extent_root) 2395 if (root == root->fs_info->extent_root)
2366 root = root->fs_info->tree_root; 2396 root = root->fs_info->tree_root;
2367 2397
@@ -2419,7 +2449,11 @@ again:
2419 } 2449 }
2420 2450
2421 ret = run_clustered_refs(trans, root, &cluster); 2451 ret = run_clustered_refs(trans, root, &cluster);
2422 BUG_ON(ret < 0); 2452 if (ret < 0) {
2453 spin_unlock(&delayed_refs->lock);
2454 btrfs_abort_transaction(trans, root, ret);
2455 return ret;
2456 }
2423 2457
2424 count -= min_t(unsigned long, ret, count); 2458 count -= min_t(unsigned long, ret, count);
2425 2459
@@ -2584,7 +2618,7 @@ static noinline int check_committed_ref(struct btrfs_trans_handle *trans,
2584 ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0); 2618 ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
2585 if (ret < 0) 2619 if (ret < 0)
2586 goto out; 2620 goto out;
2587 BUG_ON(ret == 0); 2621 BUG_ON(ret == 0); /* Corruption */
2588 2622
2589 ret = -ENOENT; 2623 ret = -ENOENT;
2590 if (path->slots[0] == 0) 2624 if (path->slots[0] == 0)
@@ -2738,7 +2772,6 @@ static int __btrfs_mod_ref(struct btrfs_trans_handle *trans,
2738 } 2772 }
2739 return 0; 2773 return 0;
2740fail: 2774fail:
2741 BUG();
2742 return ret; 2775 return ret;
2743} 2776}
2744 2777
@@ -2767,7 +2800,7 @@ static int write_one_cache_group(struct btrfs_trans_handle *trans,
2767 ret = btrfs_search_slot(trans, extent_root, &cache->key, path, 0, 1); 2800 ret = btrfs_search_slot(trans, extent_root, &cache->key, path, 0, 1);
2768 if (ret < 0) 2801 if (ret < 0)
2769 goto fail; 2802 goto fail;
2770 BUG_ON(ret); 2803 BUG_ON(ret); /* Corruption */
2771 2804
2772 leaf = path->nodes[0]; 2805 leaf = path->nodes[0];
2773 bi = btrfs_item_ptr_offset(leaf, path->slots[0]); 2806 bi = btrfs_item_ptr_offset(leaf, path->slots[0]);
@@ -2775,8 +2808,10 @@ static int write_one_cache_group(struct btrfs_trans_handle *trans,
2775 btrfs_mark_buffer_dirty(leaf); 2808 btrfs_mark_buffer_dirty(leaf);
2776 btrfs_release_path(path); 2809 btrfs_release_path(path);
2777fail: 2810fail:
2778 if (ret) 2811 if (ret) {
2812 btrfs_abort_transaction(trans, root, ret);
2779 return ret; 2813 return ret;
2814 }
2780 return 0; 2815 return 0;
2781 2816
2782} 2817}
@@ -2949,7 +2984,8 @@ again:
2949 if (last == 0) { 2984 if (last == 0) {
2950 err = btrfs_run_delayed_refs(trans, root, 2985 err = btrfs_run_delayed_refs(trans, root,
2951 (unsigned long)-1); 2986 (unsigned long)-1);
2952 BUG_ON(err); 2987 if (err) /* File system offline */
2988 goto out;
2953 } 2989 }
2954 2990
2955 cache = btrfs_lookup_first_block_group(root->fs_info, last); 2991 cache = btrfs_lookup_first_block_group(root->fs_info, last);
@@ -2976,7 +3012,9 @@ again:
2976 last = cache->key.objectid + cache->key.offset; 3012 last = cache->key.objectid + cache->key.offset;
2977 3013
2978 err = write_one_cache_group(trans, root, path, cache); 3014 err = write_one_cache_group(trans, root, path, cache);
2979 BUG_ON(err); 3015 if (err) /* File system offline */
3016 goto out;
3017
2980 btrfs_put_block_group(cache); 3018 btrfs_put_block_group(cache);
2981 } 3019 }
2982 3020
@@ -2989,7 +3027,8 @@ again:
2989 if (last == 0) { 3027 if (last == 0) {
2990 err = btrfs_run_delayed_refs(trans, root, 3028 err = btrfs_run_delayed_refs(trans, root,
2991 (unsigned long)-1); 3029 (unsigned long)-1);
2992 BUG_ON(err); 3030 if (err) /* File system offline */
3031 goto out;
2993 } 3032 }
2994 3033
2995 cache = btrfs_lookup_first_block_group(root->fs_info, last); 3034 cache = btrfs_lookup_first_block_group(root->fs_info, last);
@@ -3014,20 +3053,21 @@ again:
3014 continue; 3053 continue;
3015 } 3054 }
3016 3055
3017 btrfs_write_out_cache(root, trans, cache, path); 3056 err = btrfs_write_out_cache(root, trans, cache, path);
3018 3057
3019 /* 3058 /*
3020 * If we didn't have an error then the cache state is still 3059 * If we didn't have an error then the cache state is still
3021 * NEED_WRITE, so we can set it to WRITTEN. 3060 * NEED_WRITE, so we can set it to WRITTEN.
3022 */ 3061 */
3023 if (cache->disk_cache_state == BTRFS_DC_NEED_WRITE) 3062 if (!err && cache->disk_cache_state == BTRFS_DC_NEED_WRITE)
3024 cache->disk_cache_state = BTRFS_DC_WRITTEN; 3063 cache->disk_cache_state = BTRFS_DC_WRITTEN;
3025 last = cache->key.objectid + cache->key.offset; 3064 last = cache->key.objectid + cache->key.offset;
3026 btrfs_put_block_group(cache); 3065 btrfs_put_block_group(cache);
3027 } 3066 }
3067out:
3028 3068
3029 btrfs_free_path(path); 3069 btrfs_free_path(path);
3030 return 0; 3070 return err;
3031} 3071}
3032 3072
3033int btrfs_extent_readonly(struct btrfs_root *root, u64 bytenr) 3073int btrfs_extent_readonly(struct btrfs_root *root, u64 bytenr)
@@ -3098,11 +3138,8 @@ static int update_space_info(struct btrfs_fs_info *info, u64 flags,
3098 3138
3099static void set_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags) 3139static void set_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
3100{ 3140{
3101 u64 extra_flags = flags & BTRFS_BLOCK_GROUP_PROFILE_MASK; 3141 u64 extra_flags = chunk_to_extended(flags) &
3102 3142 BTRFS_EXTENDED_PROFILE_MASK;
3103 /* chunk -> extended profile */
3104 if (extra_flags == 0)
3105 extra_flags = BTRFS_AVAIL_ALLOC_BIT_SINGLE;
3106 3143
3107 if (flags & BTRFS_BLOCK_GROUP_DATA) 3144 if (flags & BTRFS_BLOCK_GROUP_DATA)
3108 fs_info->avail_data_alloc_bits |= extra_flags; 3145 fs_info->avail_data_alloc_bits |= extra_flags;
@@ -3113,6 +3150,35 @@ static void set_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
3113} 3150}
3114 3151
3115/* 3152/*
3153 * returns target flags in extended format or 0 if restripe for this
3154 * chunk_type is not in progress
3155 */
3156static u64 get_restripe_target(struct btrfs_fs_info *fs_info, u64 flags)
3157{
3158 struct btrfs_balance_control *bctl = fs_info->balance_ctl;
3159 u64 target = 0;
3160
3161 BUG_ON(!mutex_is_locked(&fs_info->volume_mutex) &&
3162 !spin_is_locked(&fs_info->balance_lock));
3163
3164 if (!bctl)
3165 return 0;
3166
3167 if (flags & BTRFS_BLOCK_GROUP_DATA &&
3168 bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) {
3169 target = BTRFS_BLOCK_GROUP_DATA | bctl->data.target;
3170 } else if (flags & BTRFS_BLOCK_GROUP_SYSTEM &&
3171 bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) {
3172 target = BTRFS_BLOCK_GROUP_SYSTEM | bctl->sys.target;
3173 } else if (flags & BTRFS_BLOCK_GROUP_METADATA &&
3174 bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) {
3175 target = BTRFS_BLOCK_GROUP_METADATA | bctl->meta.target;
3176 }
3177
3178 return target;
3179}
3180
3181/*
3116 * @flags: available profiles in extended format (see ctree.h) 3182 * @flags: available profiles in extended format (see ctree.h)
3117 * 3183 *
3118 * Returns reduced profile in chunk format. If profile changing is in 3184 * Returns reduced profile in chunk format. If profile changing is in
@@ -3128,31 +3194,19 @@ u64 btrfs_reduce_alloc_profile(struct btrfs_root *root, u64 flags)
3128 */ 3194 */
3129 u64 num_devices = root->fs_info->fs_devices->rw_devices + 3195 u64 num_devices = root->fs_info->fs_devices->rw_devices +
3130 root->fs_info->fs_devices->missing_devices; 3196 root->fs_info->fs_devices->missing_devices;
3197 u64 target;
3131 3198
3132 /* pick restriper's target profile if it's available */ 3199 /*
3200 * see if restripe for this chunk_type is in progress, if so
3201 * try to reduce to the target profile
3202 */
3133 spin_lock(&root->fs_info->balance_lock); 3203 spin_lock(&root->fs_info->balance_lock);
3134 if (root->fs_info->balance_ctl) { 3204 target = get_restripe_target(root->fs_info, flags);
3135 struct btrfs_balance_control *bctl = root->fs_info->balance_ctl; 3205 if (target) {
3136 u64 tgt = 0; 3206 /* pick target profile only if it's already available */
3137 3207 if ((flags & target) & BTRFS_EXTENDED_PROFILE_MASK) {
3138 if ((flags & BTRFS_BLOCK_GROUP_DATA) &&
3139 (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
3140 (flags & bctl->data.target)) {
3141 tgt = BTRFS_BLOCK_GROUP_DATA | bctl->data.target;
3142 } else if ((flags & BTRFS_BLOCK_GROUP_SYSTEM) &&
3143 (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
3144 (flags & bctl->sys.target)) {
3145 tgt = BTRFS_BLOCK_GROUP_SYSTEM | bctl->sys.target;
3146 } else if ((flags & BTRFS_BLOCK_GROUP_METADATA) &&
3147 (bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
3148 (flags & bctl->meta.target)) {
3149 tgt = BTRFS_BLOCK_GROUP_METADATA | bctl->meta.target;
3150 }
3151
3152 if (tgt) {
3153 spin_unlock(&root->fs_info->balance_lock); 3208 spin_unlock(&root->fs_info->balance_lock);
3154 flags = tgt; 3209 return extended_to_chunk(target);
3155 goto out;
3156 } 3210 }
3157 } 3211 }
3158 spin_unlock(&root->fs_info->balance_lock); 3212 spin_unlock(&root->fs_info->balance_lock);
@@ -3180,10 +3234,7 @@ u64 btrfs_reduce_alloc_profile(struct btrfs_root *root, u64 flags)
3180 flags &= ~BTRFS_BLOCK_GROUP_RAID0; 3234 flags &= ~BTRFS_BLOCK_GROUP_RAID0;
3181 } 3235 }
3182 3236
3183out: 3237 return extended_to_chunk(flags);
3184 /* extended -> chunk profile */
3185 flags &= ~BTRFS_AVAIL_ALLOC_BIT_SINGLE;
3186 return flags;
3187} 3238}
3188 3239
3189static u64 get_alloc_profile(struct btrfs_root *root, u64 flags) 3240static u64 get_alloc_profile(struct btrfs_root *root, u64 flags)
@@ -3312,8 +3363,7 @@ commit_trans:
3312 } 3363 }
3313 data_sinfo->bytes_may_use += bytes; 3364 data_sinfo->bytes_may_use += bytes;
3314 trace_btrfs_space_reservation(root->fs_info, "space_info", 3365 trace_btrfs_space_reservation(root->fs_info, "space_info",
3315 (u64)(unsigned long)data_sinfo, 3366 data_sinfo->flags, bytes, 1);
3316 bytes, 1);
3317 spin_unlock(&data_sinfo->lock); 3367 spin_unlock(&data_sinfo->lock);
3318 3368
3319 return 0; 3369 return 0;
@@ -3334,8 +3384,7 @@ void btrfs_free_reserved_data_space(struct inode *inode, u64 bytes)
3334 spin_lock(&data_sinfo->lock); 3384 spin_lock(&data_sinfo->lock);
3335 data_sinfo->bytes_may_use -= bytes; 3385 data_sinfo->bytes_may_use -= bytes;
3336 trace_btrfs_space_reservation(root->fs_info, "space_info", 3386 trace_btrfs_space_reservation(root->fs_info, "space_info",
3337 (u64)(unsigned long)data_sinfo, 3387 data_sinfo->flags, bytes, 0);
3338 bytes, 0);
3339 spin_unlock(&data_sinfo->lock); 3388 spin_unlock(&data_sinfo->lock);
3340} 3389}
3341 3390
@@ -3396,6 +3445,50 @@ static int should_alloc_chunk(struct btrfs_root *root,
3396 return 1; 3445 return 1;
3397} 3446}
3398 3447
3448static u64 get_system_chunk_thresh(struct btrfs_root *root, u64 type)
3449{
3450 u64 num_dev;
3451
3452 if (type & BTRFS_BLOCK_GROUP_RAID10 ||
3453 type & BTRFS_BLOCK_GROUP_RAID0)
3454 num_dev = root->fs_info->fs_devices->rw_devices;
3455 else if (type & BTRFS_BLOCK_GROUP_RAID1)
3456 num_dev = 2;
3457 else
3458 num_dev = 1; /* DUP or single */
3459
3460 /* metadata for updaing devices and chunk tree */
3461 return btrfs_calc_trans_metadata_size(root, num_dev + 1);
3462}
3463
3464static void check_system_chunk(struct btrfs_trans_handle *trans,
3465 struct btrfs_root *root, u64 type)
3466{
3467 struct btrfs_space_info *info;
3468 u64 left;
3469 u64 thresh;
3470
3471 info = __find_space_info(root->fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
3472 spin_lock(&info->lock);
3473 left = info->total_bytes - info->bytes_used - info->bytes_pinned -
3474 info->bytes_reserved - info->bytes_readonly;
3475 spin_unlock(&info->lock);
3476
3477 thresh = get_system_chunk_thresh(root, type);
3478 if (left < thresh && btrfs_test_opt(root, ENOSPC_DEBUG)) {
3479 printk(KERN_INFO "left=%llu, need=%llu, flags=%llu\n",
3480 left, thresh, type);
3481 dump_space_info(info, 0, 0);
3482 }
3483
3484 if (left < thresh) {
3485 u64 flags;
3486
3487 flags = btrfs_get_alloc_profile(root->fs_info->chunk_root, 0);
3488 btrfs_alloc_chunk(trans, root, flags);
3489 }
3490}
3491
3399static int do_chunk_alloc(struct btrfs_trans_handle *trans, 3492static int do_chunk_alloc(struct btrfs_trans_handle *trans,
3400 struct btrfs_root *extent_root, u64 alloc_bytes, 3493 struct btrfs_root *extent_root, u64 alloc_bytes,
3401 u64 flags, int force) 3494 u64 flags, int force)
@@ -3405,15 +3498,13 @@ static int do_chunk_alloc(struct btrfs_trans_handle *trans,
3405 int wait_for_alloc = 0; 3498 int wait_for_alloc = 0;
3406 int ret = 0; 3499 int ret = 0;
3407 3500
3408 BUG_ON(!profile_is_valid(flags, 0));
3409
3410 space_info = __find_space_info(extent_root->fs_info, flags); 3501 space_info = __find_space_info(extent_root->fs_info, flags);
3411 if (!space_info) { 3502 if (!space_info) {
3412 ret = update_space_info(extent_root->fs_info, flags, 3503 ret = update_space_info(extent_root->fs_info, flags,
3413 0, 0, &space_info); 3504 0, 0, &space_info);
3414 BUG_ON(ret); 3505 BUG_ON(ret); /* -ENOMEM */
3415 } 3506 }
3416 BUG_ON(!space_info); 3507 BUG_ON(!space_info); /* Logic error */
3417 3508
3418again: 3509again:
3419 spin_lock(&space_info->lock); 3510 spin_lock(&space_info->lock);
@@ -3468,6 +3559,12 @@ again:
3468 force_metadata_allocation(fs_info); 3559 force_metadata_allocation(fs_info);
3469 } 3560 }
3470 3561
3562 /*
3563 * Check if we have enough space in SYSTEM chunk because we may need
3564 * to update devices.
3565 */
3566 check_system_chunk(trans, extent_root, flags);
3567
3471 ret = btrfs_alloc_chunk(trans, extent_root, flags); 3568 ret = btrfs_alloc_chunk(trans, extent_root, flags);
3472 if (ret < 0 && ret != -ENOSPC) 3569 if (ret < 0 && ret != -ENOSPC)
3473 goto out; 3570 goto out;
@@ -3678,8 +3775,10 @@ again:
3678 ret = wait_event_interruptible(space_info->wait, 3775 ret = wait_event_interruptible(space_info->wait,
3679 !space_info->flush); 3776 !space_info->flush);
3680 /* Must have been interrupted, return */ 3777 /* Must have been interrupted, return */
3681 if (ret) 3778 if (ret) {
3779 printk(KERN_DEBUG "btrfs: %s returning -EINTR\n", __func__);
3682 return -EINTR; 3780 return -EINTR;
3781 }
3683 3782
3684 spin_lock(&space_info->lock); 3783 spin_lock(&space_info->lock);
3685 } 3784 }
@@ -3700,9 +3799,7 @@ again:
3700 if (used + orig_bytes <= space_info->total_bytes) { 3799 if (used + orig_bytes <= space_info->total_bytes) {
3701 space_info->bytes_may_use += orig_bytes; 3800 space_info->bytes_may_use += orig_bytes;
3702 trace_btrfs_space_reservation(root->fs_info, 3801 trace_btrfs_space_reservation(root->fs_info,
3703 "space_info", 3802 "space_info", space_info->flags, orig_bytes, 1);
3704 (u64)(unsigned long)space_info,
3705 orig_bytes, 1);
3706 ret = 0; 3803 ret = 0;
3707 } else { 3804 } else {
3708 /* 3805 /*
@@ -3771,9 +3868,7 @@ again:
3771 if (used + num_bytes < space_info->total_bytes + avail) { 3868 if (used + num_bytes < space_info->total_bytes + avail) {
3772 space_info->bytes_may_use += orig_bytes; 3869 space_info->bytes_may_use += orig_bytes;
3773 trace_btrfs_space_reservation(root->fs_info, 3870 trace_btrfs_space_reservation(root->fs_info,
3774 "space_info", 3871 "space_info", space_info->flags, orig_bytes, 1);
3775 (u64)(unsigned long)space_info,
3776 orig_bytes, 1);
3777 ret = 0; 3872 ret = 0;
3778 } else { 3873 } else {
3779 wait_ordered = true; 3874 wait_ordered = true;
@@ -3836,8 +3931,9 @@ out:
3836 return ret; 3931 return ret;
3837} 3932}
3838 3933
3839static struct btrfs_block_rsv *get_block_rsv(struct btrfs_trans_handle *trans, 3934static struct btrfs_block_rsv *get_block_rsv(
3840 struct btrfs_root *root) 3935 const struct btrfs_trans_handle *trans,
3936 const struct btrfs_root *root)
3841{ 3937{
3842 struct btrfs_block_rsv *block_rsv = NULL; 3938 struct btrfs_block_rsv *block_rsv = NULL;
3843 3939
@@ -3918,8 +4014,7 @@ static void block_rsv_release_bytes(struct btrfs_fs_info *fs_info,
3918 spin_lock(&space_info->lock); 4014 spin_lock(&space_info->lock);
3919 space_info->bytes_may_use -= num_bytes; 4015 space_info->bytes_may_use -= num_bytes;
3920 trace_btrfs_space_reservation(fs_info, "space_info", 4016 trace_btrfs_space_reservation(fs_info, "space_info",
3921 (u64)(unsigned long)space_info, 4017 space_info->flags, num_bytes, 0);
3922 num_bytes, 0);
3923 space_info->reservation_progress++; 4018 space_info->reservation_progress++;
3924 spin_unlock(&space_info->lock); 4019 spin_unlock(&space_info->lock);
3925 } 4020 }
@@ -4137,14 +4232,14 @@ static void update_global_block_rsv(struct btrfs_fs_info *fs_info)
4137 block_rsv->reserved += num_bytes; 4232 block_rsv->reserved += num_bytes;
4138 sinfo->bytes_may_use += num_bytes; 4233 sinfo->bytes_may_use += num_bytes;
4139 trace_btrfs_space_reservation(fs_info, "space_info", 4234 trace_btrfs_space_reservation(fs_info, "space_info",
4140 (u64)(unsigned long)sinfo, num_bytes, 1); 4235 sinfo->flags, num_bytes, 1);
4141 } 4236 }
4142 4237
4143 if (block_rsv->reserved >= block_rsv->size) { 4238 if (block_rsv->reserved >= block_rsv->size) {
4144 num_bytes = block_rsv->reserved - block_rsv->size; 4239 num_bytes = block_rsv->reserved - block_rsv->size;
4145 sinfo->bytes_may_use -= num_bytes; 4240 sinfo->bytes_may_use -= num_bytes;
4146 trace_btrfs_space_reservation(fs_info, "space_info", 4241 trace_btrfs_space_reservation(fs_info, "space_info",
4147 (u64)(unsigned long)sinfo, num_bytes, 0); 4242 sinfo->flags, num_bytes, 0);
4148 sinfo->reservation_progress++; 4243 sinfo->reservation_progress++;
4149 block_rsv->reserved = block_rsv->size; 4244 block_rsv->reserved = block_rsv->size;
4150 block_rsv->full = 1; 4245 block_rsv->full = 1;
@@ -4198,12 +4293,12 @@ void btrfs_trans_release_metadata(struct btrfs_trans_handle *trans,
4198 return; 4293 return;
4199 4294
4200 trace_btrfs_space_reservation(root->fs_info, "transaction", 4295 trace_btrfs_space_reservation(root->fs_info, "transaction",
4201 (u64)(unsigned long)trans, 4296 trans->transid, trans->bytes_reserved, 0);
4202 trans->bytes_reserved, 0);
4203 btrfs_block_rsv_release(root, trans->block_rsv, trans->bytes_reserved); 4297 btrfs_block_rsv_release(root, trans->block_rsv, trans->bytes_reserved);
4204 trans->bytes_reserved = 0; 4298 trans->bytes_reserved = 0;
4205} 4299}
4206 4300
4301/* Can only return 0 or -ENOSPC */
4207int btrfs_orphan_reserve_metadata(struct btrfs_trans_handle *trans, 4302int btrfs_orphan_reserve_metadata(struct btrfs_trans_handle *trans,
4208 struct inode *inode) 4303 struct inode *inode)
4209{ 4304{
@@ -4540,7 +4635,7 @@ static int update_block_group(struct btrfs_trans_handle *trans,
4540 while (total) { 4635 while (total) {
4541 cache = btrfs_lookup_block_group(info, bytenr); 4636 cache = btrfs_lookup_block_group(info, bytenr);
4542 if (!cache) 4637 if (!cache)
4543 return -1; 4638 return -ENOENT;
4544 if (cache->flags & (BTRFS_BLOCK_GROUP_DUP | 4639 if (cache->flags & (BTRFS_BLOCK_GROUP_DUP |
4545 BTRFS_BLOCK_GROUP_RAID1 | 4640 BTRFS_BLOCK_GROUP_RAID1 |
4546 BTRFS_BLOCK_GROUP_RAID10)) 4641 BTRFS_BLOCK_GROUP_RAID10))
@@ -4643,7 +4738,7 @@ int btrfs_pin_extent(struct btrfs_root *root,
4643 struct btrfs_block_group_cache *cache; 4738 struct btrfs_block_group_cache *cache;
4644 4739
4645 cache = btrfs_lookup_block_group(root->fs_info, bytenr); 4740 cache = btrfs_lookup_block_group(root->fs_info, bytenr);
4646 BUG_ON(!cache); 4741 BUG_ON(!cache); /* Logic error */
4647 4742
4648 pin_down_extent(root, cache, bytenr, num_bytes, reserved); 4743 pin_down_extent(root, cache, bytenr, num_bytes, reserved);
4649 4744
@@ -4661,7 +4756,7 @@ int btrfs_pin_extent_for_log_replay(struct btrfs_trans_handle *trans,
4661 struct btrfs_block_group_cache *cache; 4756 struct btrfs_block_group_cache *cache;
4662 4757
4663 cache = btrfs_lookup_block_group(root->fs_info, bytenr); 4758 cache = btrfs_lookup_block_group(root->fs_info, bytenr);
4664 BUG_ON(!cache); 4759 BUG_ON(!cache); /* Logic error */
4665 4760
4666 /* 4761 /*
4667 * pull in the free space cache (if any) so that our pin 4762 * pull in the free space cache (if any) so that our pin
@@ -4706,6 +4801,7 @@ static int btrfs_update_reserved_bytes(struct btrfs_block_group_cache *cache,
4706{ 4801{
4707 struct btrfs_space_info *space_info = cache->space_info; 4802 struct btrfs_space_info *space_info = cache->space_info;
4708 int ret = 0; 4803 int ret = 0;
4804
4709 spin_lock(&space_info->lock); 4805 spin_lock(&space_info->lock);
4710 spin_lock(&cache->lock); 4806 spin_lock(&cache->lock);
4711 if (reserve != RESERVE_FREE) { 4807 if (reserve != RESERVE_FREE) {
@@ -4716,9 +4812,8 @@ static int btrfs_update_reserved_bytes(struct btrfs_block_group_cache *cache,
4716 space_info->bytes_reserved += num_bytes; 4812 space_info->bytes_reserved += num_bytes;
4717 if (reserve == RESERVE_ALLOC) { 4813 if (reserve == RESERVE_ALLOC) {
4718 trace_btrfs_space_reservation(cache->fs_info, 4814 trace_btrfs_space_reservation(cache->fs_info,
4719 "space_info", 4815 "space_info", space_info->flags,
4720 (u64)(unsigned long)space_info, 4816 num_bytes, 0);
4721 num_bytes, 0);
4722 space_info->bytes_may_use -= num_bytes; 4817 space_info->bytes_may_use -= num_bytes;
4723 } 4818 }
4724 } 4819 }
@@ -4734,7 +4829,7 @@ static int btrfs_update_reserved_bytes(struct btrfs_block_group_cache *cache,
4734 return ret; 4829 return ret;
4735} 4830}
4736 4831
4737int btrfs_prepare_extent_commit(struct btrfs_trans_handle *trans, 4832void btrfs_prepare_extent_commit(struct btrfs_trans_handle *trans,
4738 struct btrfs_root *root) 4833 struct btrfs_root *root)
4739{ 4834{
4740 struct btrfs_fs_info *fs_info = root->fs_info; 4835 struct btrfs_fs_info *fs_info = root->fs_info;
@@ -4764,7 +4859,6 @@ int btrfs_prepare_extent_commit(struct btrfs_trans_handle *trans,
4764 up_write(&fs_info->extent_commit_sem); 4859 up_write(&fs_info->extent_commit_sem);
4765 4860
4766 update_global_block_rsv(fs_info); 4861 update_global_block_rsv(fs_info);
4767 return 0;
4768} 4862}
4769 4863
4770static int unpin_extent_range(struct btrfs_root *root, u64 start, u64 end) 4864static int unpin_extent_range(struct btrfs_root *root, u64 start, u64 end)
@@ -4779,7 +4873,7 @@ static int unpin_extent_range(struct btrfs_root *root, u64 start, u64 end)
4779 if (cache) 4873 if (cache)
4780 btrfs_put_block_group(cache); 4874 btrfs_put_block_group(cache);
4781 cache = btrfs_lookup_block_group(fs_info, start); 4875 cache = btrfs_lookup_block_group(fs_info, start);
4782 BUG_ON(!cache); 4876 BUG_ON(!cache); /* Logic error */
4783 } 4877 }
4784 4878
4785 len = cache->key.objectid + cache->key.offset - start; 4879 len = cache->key.objectid + cache->key.offset - start;
@@ -4816,6 +4910,9 @@ int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans,
4816 u64 end; 4910 u64 end;
4817 int ret; 4911 int ret;
4818 4912
4913 if (trans->aborted)
4914 return 0;
4915
4819 if (fs_info->pinned_extents == &fs_info->freed_extents[0]) 4916 if (fs_info->pinned_extents == &fs_info->freed_extents[0])
4820 unpin = &fs_info->freed_extents[1]; 4917 unpin = &fs_info->freed_extents[1];
4821 else 4918 else
@@ -4901,7 +4998,8 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
4901 ret = remove_extent_backref(trans, extent_root, path, 4998 ret = remove_extent_backref(trans, extent_root, path,
4902 NULL, refs_to_drop, 4999 NULL, refs_to_drop,
4903 is_data); 5000 is_data);
4904 BUG_ON(ret); 5001 if (ret)
5002 goto abort;
4905 btrfs_release_path(path); 5003 btrfs_release_path(path);
4906 path->leave_spinning = 1; 5004 path->leave_spinning = 1;
4907 5005
@@ -4919,10 +5017,11 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
4919 btrfs_print_leaf(extent_root, 5017 btrfs_print_leaf(extent_root,
4920 path->nodes[0]); 5018 path->nodes[0]);
4921 } 5019 }
4922 BUG_ON(ret); 5020 if (ret < 0)
5021 goto abort;
4923 extent_slot = path->slots[0]; 5022 extent_slot = path->slots[0];
4924 } 5023 }
4925 } else { 5024 } else if (ret == -ENOENT) {
4926 btrfs_print_leaf(extent_root, path->nodes[0]); 5025 btrfs_print_leaf(extent_root, path->nodes[0]);
4927 WARN_ON(1); 5026 WARN_ON(1);
4928 printk(KERN_ERR "btrfs unable to find ref byte nr %llu " 5027 printk(KERN_ERR "btrfs unable to find ref byte nr %llu "
@@ -4932,6 +5031,8 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
4932 (unsigned long long)root_objectid, 5031 (unsigned long long)root_objectid,
4933 (unsigned long long)owner_objectid, 5032 (unsigned long long)owner_objectid,
4934 (unsigned long long)owner_offset); 5033 (unsigned long long)owner_offset);
5034 } else {
5035 goto abort;
4935 } 5036 }
4936 5037
4937 leaf = path->nodes[0]; 5038 leaf = path->nodes[0];
@@ -4941,7 +5042,8 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
4941 BUG_ON(found_extent || extent_slot != path->slots[0]); 5042 BUG_ON(found_extent || extent_slot != path->slots[0]);
4942 ret = convert_extent_item_v0(trans, extent_root, path, 5043 ret = convert_extent_item_v0(trans, extent_root, path,
4943 owner_objectid, 0); 5044 owner_objectid, 0);
4944 BUG_ON(ret < 0); 5045 if (ret < 0)
5046 goto abort;
4945 5047
4946 btrfs_release_path(path); 5048 btrfs_release_path(path);
4947 path->leave_spinning = 1; 5049 path->leave_spinning = 1;
@@ -4958,7 +5060,8 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
4958 (unsigned long long)bytenr); 5060 (unsigned long long)bytenr);
4959 btrfs_print_leaf(extent_root, path->nodes[0]); 5061 btrfs_print_leaf(extent_root, path->nodes[0]);
4960 } 5062 }
4961 BUG_ON(ret); 5063 if (ret < 0)
5064 goto abort;
4962 extent_slot = path->slots[0]; 5065 extent_slot = path->slots[0];
4963 leaf = path->nodes[0]; 5066 leaf = path->nodes[0];
4964 item_size = btrfs_item_size_nr(leaf, extent_slot); 5067 item_size = btrfs_item_size_nr(leaf, extent_slot);
@@ -4995,7 +5098,8 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
4995 ret = remove_extent_backref(trans, extent_root, path, 5098 ret = remove_extent_backref(trans, extent_root, path,
4996 iref, refs_to_drop, 5099 iref, refs_to_drop,
4997 is_data); 5100 is_data);
4998 BUG_ON(ret); 5101 if (ret)
5102 goto abort;
4999 } 5103 }
5000 } else { 5104 } else {
5001 if (found_extent) { 5105 if (found_extent) {
@@ -5012,23 +5116,27 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
5012 5116
5013 ret = btrfs_del_items(trans, extent_root, path, path->slots[0], 5117 ret = btrfs_del_items(trans, extent_root, path, path->slots[0],
5014 num_to_del); 5118 num_to_del);
5015 BUG_ON(ret); 5119 if (ret)
5120 goto abort;
5016 btrfs_release_path(path); 5121 btrfs_release_path(path);
5017 5122
5018 if (is_data) { 5123 if (is_data) {
5019 ret = btrfs_del_csums(trans, root, bytenr, num_bytes); 5124 ret = btrfs_del_csums(trans, root, bytenr, num_bytes);
5020 BUG_ON(ret); 5125 if (ret)
5021 } else { 5126 goto abort;
5022 invalidate_mapping_pages(info->btree_inode->i_mapping,
5023 bytenr >> PAGE_CACHE_SHIFT,
5024 (bytenr + num_bytes - 1) >> PAGE_CACHE_SHIFT);
5025 } 5127 }
5026 5128
5027 ret = update_block_group(trans, root, bytenr, num_bytes, 0); 5129 ret = update_block_group(trans, root, bytenr, num_bytes, 0);
5028 BUG_ON(ret); 5130 if (ret)
5131 goto abort;
5029 } 5132 }
5133out:
5030 btrfs_free_path(path); 5134 btrfs_free_path(path);
5031 return ret; 5135 return ret;
5136
5137abort:
5138 btrfs_abort_transaction(trans, extent_root, ret);
5139 goto out;
5032} 5140}
5033 5141
5034/* 5142/*
@@ -5124,7 +5232,7 @@ void btrfs_free_tree_block(struct btrfs_trans_handle *trans,
5124 parent, root->root_key.objectid, 5232 parent, root->root_key.objectid,
5125 btrfs_header_level(buf), 5233 btrfs_header_level(buf),
5126 BTRFS_DROP_DELAYED_REF, NULL, for_cow); 5234 BTRFS_DROP_DELAYED_REF, NULL, for_cow);
5127 BUG_ON(ret); 5235 BUG_ON(ret); /* -ENOMEM */
5128 } 5236 }
5129 5237
5130 if (!last_ref) 5238 if (!last_ref)
@@ -5158,6 +5266,7 @@ out:
5158 btrfs_put_block_group(cache); 5266 btrfs_put_block_group(cache);
5159} 5267}
5160 5268
5269/* Can return -ENOMEM */
5161int btrfs_free_extent(struct btrfs_trans_handle *trans, struct btrfs_root *root, 5270int btrfs_free_extent(struct btrfs_trans_handle *trans, struct btrfs_root *root,
5162 u64 bytenr, u64 num_bytes, u64 parent, u64 root_objectid, 5271 u64 bytenr, u64 num_bytes, u64 parent, u64 root_objectid,
5163 u64 owner, u64 offset, int for_cow) 5272 u64 owner, u64 offset, int for_cow)
@@ -5179,14 +5288,12 @@ int btrfs_free_extent(struct btrfs_trans_handle *trans, struct btrfs_root *root,
5179 num_bytes, 5288 num_bytes,
5180 parent, root_objectid, (int)owner, 5289 parent, root_objectid, (int)owner,
5181 BTRFS_DROP_DELAYED_REF, NULL, for_cow); 5290 BTRFS_DROP_DELAYED_REF, NULL, for_cow);
5182 BUG_ON(ret);
5183 } else { 5291 } else {
5184 ret = btrfs_add_delayed_data_ref(fs_info, trans, bytenr, 5292 ret = btrfs_add_delayed_data_ref(fs_info, trans, bytenr,
5185 num_bytes, 5293 num_bytes,
5186 parent, root_objectid, owner, 5294 parent, root_objectid, owner,
5187 offset, BTRFS_DROP_DELAYED_REF, 5295 offset, BTRFS_DROP_DELAYED_REF,
5188 NULL, for_cow); 5296 NULL, for_cow);
5189 BUG_ON(ret);
5190 } 5297 }
5191 return ret; 5298 return ret;
5192} 5299}
@@ -5243,28 +5350,34 @@ wait_block_group_cache_done(struct btrfs_block_group_cache *cache)
5243 return 0; 5350 return 0;
5244} 5351}
5245 5352
5246static int get_block_group_index(struct btrfs_block_group_cache *cache) 5353static int __get_block_group_index(u64 flags)
5247{ 5354{
5248 int index; 5355 int index;
5249 if (cache->flags & BTRFS_BLOCK_GROUP_RAID10) 5356
5357 if (flags & BTRFS_BLOCK_GROUP_RAID10)
5250 index = 0; 5358 index = 0;
5251 else if (cache->flags & BTRFS_BLOCK_GROUP_RAID1) 5359 else if (flags & BTRFS_BLOCK_GROUP_RAID1)
5252 index = 1; 5360 index = 1;
5253 else if (cache->flags & BTRFS_BLOCK_GROUP_DUP) 5361 else if (flags & BTRFS_BLOCK_GROUP_DUP)
5254 index = 2; 5362 index = 2;
5255 else if (cache->flags & BTRFS_BLOCK_GROUP_RAID0) 5363 else if (flags & BTRFS_BLOCK_GROUP_RAID0)
5256 index = 3; 5364 index = 3;
5257 else 5365 else
5258 index = 4; 5366 index = 4;
5367
5259 return index; 5368 return index;
5260} 5369}
5261 5370
5371static int get_block_group_index(struct btrfs_block_group_cache *cache)
5372{
5373 return __get_block_group_index(cache->flags);
5374}
5375
5262enum btrfs_loop_type { 5376enum btrfs_loop_type {
5263 LOOP_FIND_IDEAL = 0, 5377 LOOP_CACHING_NOWAIT = 0,
5264 LOOP_CACHING_NOWAIT = 1, 5378 LOOP_CACHING_WAIT = 1,
5265 LOOP_CACHING_WAIT = 2, 5379 LOOP_ALLOC_CHUNK = 2,
5266 LOOP_ALLOC_CHUNK = 3, 5380 LOOP_NO_EMPTY_SIZE = 3,
5267 LOOP_NO_EMPTY_SIZE = 4,
5268}; 5381};
5269 5382
5270/* 5383/*
@@ -5278,7 +5391,6 @@ enum btrfs_loop_type {
5278static noinline int find_free_extent(struct btrfs_trans_handle *trans, 5391static noinline int find_free_extent(struct btrfs_trans_handle *trans,
5279 struct btrfs_root *orig_root, 5392 struct btrfs_root *orig_root,
5280 u64 num_bytes, u64 empty_size, 5393 u64 num_bytes, u64 empty_size,
5281 u64 search_start, u64 search_end,
5282 u64 hint_byte, struct btrfs_key *ins, 5394 u64 hint_byte, struct btrfs_key *ins,
5283 u64 data) 5395 u64 data)
5284{ 5396{
@@ -5287,6 +5399,7 @@ static noinline int find_free_extent(struct btrfs_trans_handle *trans,
5287 struct btrfs_free_cluster *last_ptr = NULL; 5399 struct btrfs_free_cluster *last_ptr = NULL;
5288 struct btrfs_block_group_cache *block_group = NULL; 5400 struct btrfs_block_group_cache *block_group = NULL;
5289 struct btrfs_block_group_cache *used_block_group; 5401 struct btrfs_block_group_cache *used_block_group;
5402 u64 search_start = 0;
5290 int empty_cluster = 2 * 1024 * 1024; 5403 int empty_cluster = 2 * 1024 * 1024;
5291 int allowed_chunk_alloc = 0; 5404 int allowed_chunk_alloc = 0;
5292 int done_chunk_alloc = 0; 5405 int done_chunk_alloc = 0;
@@ -5300,8 +5413,6 @@ static noinline int find_free_extent(struct btrfs_trans_handle *trans,
5300 bool failed_alloc = false; 5413 bool failed_alloc = false;
5301 bool use_cluster = true; 5414 bool use_cluster = true;
5302 bool have_caching_bg = false; 5415 bool have_caching_bg = false;
5303 u64 ideal_cache_percent = 0;
5304 u64 ideal_cache_offset = 0;
5305 5416
5306 WARN_ON(num_bytes < root->sectorsize); 5417 WARN_ON(num_bytes < root->sectorsize);
5307 btrfs_set_key_type(ins, BTRFS_EXTENT_ITEM_KEY); 5418 btrfs_set_key_type(ins, BTRFS_EXTENT_ITEM_KEY);
@@ -5351,7 +5462,6 @@ static noinline int find_free_extent(struct btrfs_trans_handle *trans,
5351 empty_cluster = 0; 5462 empty_cluster = 0;
5352 5463
5353 if (search_start == hint_byte) { 5464 if (search_start == hint_byte) {
5354ideal_cache:
5355 block_group = btrfs_lookup_block_group(root->fs_info, 5465 block_group = btrfs_lookup_block_group(root->fs_info,
5356 search_start); 5466 search_start);
5357 used_block_group = block_group; 5467 used_block_group = block_group;
@@ -5363,8 +5473,7 @@ ideal_cache:
5363 * picked out then we don't care that the block group is cached. 5473 * picked out then we don't care that the block group is cached.
5364 */ 5474 */
5365 if (block_group && block_group_bits(block_group, data) && 5475 if (block_group && block_group_bits(block_group, data) &&
5366 (block_group->cached != BTRFS_CACHE_NO || 5476 block_group->cached != BTRFS_CACHE_NO) {
5367 search_start == ideal_cache_offset)) {
5368 down_read(&space_info->groups_sem); 5477 down_read(&space_info->groups_sem);
5369 if (list_empty(&block_group->list) || 5478 if (list_empty(&block_group->list) ||
5370 block_group->ro) { 5479 block_group->ro) {
@@ -5418,44 +5527,13 @@ search:
5418have_block_group: 5527have_block_group:
5419 cached = block_group_cache_done(block_group); 5528 cached = block_group_cache_done(block_group);
5420 if (unlikely(!cached)) { 5529 if (unlikely(!cached)) {
5421 u64 free_percent;
5422
5423 found_uncached_bg = true; 5530 found_uncached_bg = true;
5424 ret = cache_block_group(block_group, trans, 5531 ret = cache_block_group(block_group, trans,
5425 orig_root, 1); 5532 orig_root, 0);
5426 if (block_group->cached == BTRFS_CACHE_FINISHED) 5533 BUG_ON(ret < 0);
5427 goto alloc; 5534 ret = 0;
5428
5429 free_percent = btrfs_block_group_used(&block_group->item);
5430 free_percent *= 100;
5431 free_percent = div64_u64(free_percent,
5432 block_group->key.offset);
5433 free_percent = 100 - free_percent;
5434 if (free_percent > ideal_cache_percent &&
5435 likely(!block_group->ro)) {
5436 ideal_cache_offset = block_group->key.objectid;
5437 ideal_cache_percent = free_percent;
5438 }
5439
5440 /*
5441 * The caching workers are limited to 2 threads, so we
5442 * can queue as much work as we care to.
5443 */
5444 if (loop > LOOP_FIND_IDEAL) {
5445 ret = cache_block_group(block_group, trans,
5446 orig_root, 0);
5447 BUG_ON(ret);
5448 }
5449
5450 /*
5451 * If loop is set for cached only, try the next block
5452 * group.
5453 */
5454 if (loop == LOOP_FIND_IDEAL)
5455 goto loop;
5456 } 5535 }
5457 5536
5458alloc:
5459 if (unlikely(block_group->ro)) 5537 if (unlikely(block_group->ro))
5460 goto loop; 5538 goto loop;
5461 5539
@@ -5606,11 +5684,6 @@ unclustered_alloc:
5606 } 5684 }
5607checks: 5685checks:
5608 search_start = stripe_align(root, offset); 5686 search_start = stripe_align(root, offset);
5609 /* move on to the next group */
5610 if (search_start + num_bytes >= search_end) {
5611 btrfs_add_free_space(used_block_group, offset, num_bytes);
5612 goto loop;
5613 }
5614 5687
5615 /* move on to the next group */ 5688 /* move on to the next group */
5616 if (search_start + num_bytes > 5689 if (search_start + num_bytes >
@@ -5661,9 +5734,7 @@ loop:
5661 if (!ins->objectid && ++index < BTRFS_NR_RAID_TYPES) 5734 if (!ins->objectid && ++index < BTRFS_NR_RAID_TYPES)
5662 goto search; 5735 goto search;
5663 5736
5664 /* LOOP_FIND_IDEAL, only search caching/cached bg's, and don't wait for 5737 /*
5665 * for them to make caching progress. Also
5666 * determine the best possible bg to cache
5667 * LOOP_CACHING_NOWAIT, search partially cached block groups, kicking 5738 * LOOP_CACHING_NOWAIT, search partially cached block groups, kicking
5668 * caching kthreads as we move along 5739 * caching kthreads as we move along
5669 * LOOP_CACHING_WAIT, search everything, and wait if our bg is caching 5740 * LOOP_CACHING_WAIT, search everything, and wait if our bg is caching
@@ -5673,50 +5744,17 @@ loop:
5673 */ 5744 */
5674 if (!ins->objectid && loop < LOOP_NO_EMPTY_SIZE) { 5745 if (!ins->objectid && loop < LOOP_NO_EMPTY_SIZE) {
5675 index = 0; 5746 index = 0;
5676 if (loop == LOOP_FIND_IDEAL && found_uncached_bg) {
5677 found_uncached_bg = false;
5678 loop++;
5679 if (!ideal_cache_percent)
5680 goto search;
5681
5682 /*
5683 * 1 of the following 2 things have happened so far
5684 *
5685 * 1) We found an ideal block group for caching that
5686 * is mostly full and will cache quickly, so we might
5687 * as well wait for it.
5688 *
5689 * 2) We searched for cached only and we didn't find
5690 * anything, and we didn't start any caching kthreads
5691 * either, so chances are we will loop through and
5692 * start a couple caching kthreads, and then come back
5693 * around and just wait for them. This will be slower
5694 * because we will have 2 caching kthreads reading at
5695 * the same time when we could have just started one
5696 * and waited for it to get far enough to give us an
5697 * allocation, so go ahead and go to the wait caching
5698 * loop.
5699 */
5700 loop = LOOP_CACHING_WAIT;
5701 search_start = ideal_cache_offset;
5702 ideal_cache_percent = 0;
5703 goto ideal_cache;
5704 } else if (loop == LOOP_FIND_IDEAL) {
5705 /*
5706 * Didn't find a uncached bg, wait on anything we find
5707 * next.
5708 */
5709 loop = LOOP_CACHING_WAIT;
5710 goto search;
5711 }
5712
5713 loop++; 5747 loop++;
5714
5715 if (loop == LOOP_ALLOC_CHUNK) { 5748 if (loop == LOOP_ALLOC_CHUNK) {
5716 if (allowed_chunk_alloc) { 5749 if (allowed_chunk_alloc) {
5717 ret = do_chunk_alloc(trans, root, num_bytes + 5750 ret = do_chunk_alloc(trans, root, num_bytes +
5718 2 * 1024 * 1024, data, 5751 2 * 1024 * 1024, data,
5719 CHUNK_ALLOC_LIMITED); 5752 CHUNK_ALLOC_LIMITED);
5753 if (ret < 0) {
5754 btrfs_abort_transaction(trans,
5755 root, ret);
5756 goto out;
5757 }
5720 allowed_chunk_alloc = 0; 5758 allowed_chunk_alloc = 0;
5721 if (ret == 1) 5759 if (ret == 1)
5722 done_chunk_alloc = 1; 5760 done_chunk_alloc = 1;
@@ -5745,6 +5783,7 @@ loop:
5745 } else if (ins->objectid) { 5783 } else if (ins->objectid) {
5746 ret = 0; 5784 ret = 0;
5747 } 5785 }
5786out:
5748 5787
5749 return ret; 5788 return ret;
5750} 5789}
@@ -5798,12 +5837,10 @@ int btrfs_reserve_extent(struct btrfs_trans_handle *trans,
5798 struct btrfs_root *root, 5837 struct btrfs_root *root,
5799 u64 num_bytes, u64 min_alloc_size, 5838 u64 num_bytes, u64 min_alloc_size,
5800 u64 empty_size, u64 hint_byte, 5839 u64 empty_size, u64 hint_byte,
5801 u64 search_end, struct btrfs_key *ins, 5840 struct btrfs_key *ins, u64 data)
5802 u64 data)
5803{ 5841{
5804 bool final_tried = false; 5842 bool final_tried = false;
5805 int ret; 5843 int ret;
5806 u64 search_start = 0;
5807 5844
5808 data = btrfs_get_alloc_profile(root, data); 5845 data = btrfs_get_alloc_profile(root, data);
5809again: 5846again:
@@ -5811,23 +5848,31 @@ again:
5811 * the only place that sets empty_size is btrfs_realloc_node, which 5848 * the only place that sets empty_size is btrfs_realloc_node, which
5812 * is not called recursively on allocations 5849 * is not called recursively on allocations
5813 */ 5850 */
5814 if (empty_size || root->ref_cows) 5851 if (empty_size || root->ref_cows) {
5815 ret = do_chunk_alloc(trans, root->fs_info->extent_root, 5852 ret = do_chunk_alloc(trans, root->fs_info->extent_root,
5816 num_bytes + 2 * 1024 * 1024, data, 5853 num_bytes + 2 * 1024 * 1024, data,
5817 CHUNK_ALLOC_NO_FORCE); 5854 CHUNK_ALLOC_NO_FORCE);
5855 if (ret < 0 && ret != -ENOSPC) {
5856 btrfs_abort_transaction(trans, root, ret);
5857 return ret;
5858 }
5859 }
5818 5860
5819 WARN_ON(num_bytes < root->sectorsize); 5861 WARN_ON(num_bytes < root->sectorsize);
5820 ret = find_free_extent(trans, root, num_bytes, empty_size, 5862 ret = find_free_extent(trans, root, num_bytes, empty_size,
5821 search_start, search_end, hint_byte, 5863 hint_byte, ins, data);
5822 ins, data);
5823 5864
5824 if (ret == -ENOSPC) { 5865 if (ret == -ENOSPC) {
5825 if (!final_tried) { 5866 if (!final_tried) {
5826 num_bytes = num_bytes >> 1; 5867 num_bytes = num_bytes >> 1;
5827 num_bytes = num_bytes & ~(root->sectorsize - 1); 5868 num_bytes = num_bytes & ~(root->sectorsize - 1);
5828 num_bytes = max(num_bytes, min_alloc_size); 5869 num_bytes = max(num_bytes, min_alloc_size);
5829 do_chunk_alloc(trans, root->fs_info->extent_root, 5870 ret = do_chunk_alloc(trans, root->fs_info->extent_root,
5830 num_bytes, data, CHUNK_ALLOC_FORCE); 5871 num_bytes, data, CHUNK_ALLOC_FORCE);
5872 if (ret < 0 && ret != -ENOSPC) {
5873 btrfs_abort_transaction(trans, root, ret);
5874 return ret;
5875 }
5831 if (num_bytes == min_alloc_size) 5876 if (num_bytes == min_alloc_size)
5832 final_tried = true; 5877 final_tried = true;
5833 goto again; 5878 goto again;
@@ -5838,7 +5883,8 @@ again:
5838 printk(KERN_ERR "btrfs allocation failed flags %llu, " 5883 printk(KERN_ERR "btrfs allocation failed flags %llu, "
5839 "wanted %llu\n", (unsigned long long)data, 5884 "wanted %llu\n", (unsigned long long)data,
5840 (unsigned long long)num_bytes); 5885 (unsigned long long)num_bytes);
5841 dump_space_info(sinfo, num_bytes, 1); 5886 if (sinfo)
5887 dump_space_info(sinfo, num_bytes, 1);
5842 } 5888 }
5843 } 5889 }
5844 5890
@@ -5917,7 +5963,10 @@ static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
5917 path->leave_spinning = 1; 5963 path->leave_spinning = 1;
5918 ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path, 5964 ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path,
5919 ins, size); 5965 ins, size);
5920 BUG_ON(ret); 5966 if (ret) {
5967 btrfs_free_path(path);
5968 return ret;
5969 }
5921 5970
5922 leaf = path->nodes[0]; 5971 leaf = path->nodes[0];
5923 extent_item = btrfs_item_ptr(leaf, path->slots[0], 5972 extent_item = btrfs_item_ptr(leaf, path->slots[0],
@@ -5947,7 +5996,7 @@ static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
5947 btrfs_free_path(path); 5996 btrfs_free_path(path);
5948 5997
5949 ret = update_block_group(trans, root, ins->objectid, ins->offset, 1); 5998 ret = update_block_group(trans, root, ins->objectid, ins->offset, 1);
5950 if (ret) { 5999 if (ret) { /* -ENOENT, logic error */
5951 printk(KERN_ERR "btrfs update block group failed for %llu " 6000 printk(KERN_ERR "btrfs update block group failed for %llu "
5952 "%llu\n", (unsigned long long)ins->objectid, 6001 "%llu\n", (unsigned long long)ins->objectid,
5953 (unsigned long long)ins->offset); 6002 (unsigned long long)ins->offset);
@@ -5978,7 +6027,10 @@ static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
5978 path->leave_spinning = 1; 6027 path->leave_spinning = 1;
5979 ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path, 6028 ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path,
5980 ins, size); 6029 ins, size);
5981 BUG_ON(ret); 6030 if (ret) {
6031 btrfs_free_path(path);
6032 return ret;
6033 }
5982 6034
5983 leaf = path->nodes[0]; 6035 leaf = path->nodes[0];
5984 extent_item = btrfs_item_ptr(leaf, path->slots[0], 6036 extent_item = btrfs_item_ptr(leaf, path->slots[0],
@@ -6008,7 +6060,7 @@ static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
6008 btrfs_free_path(path); 6060 btrfs_free_path(path);
6009 6061
6010 ret = update_block_group(trans, root, ins->objectid, ins->offset, 1); 6062 ret = update_block_group(trans, root, ins->objectid, ins->offset, 1);
6011 if (ret) { 6063 if (ret) { /* -ENOENT, logic error */
6012 printk(KERN_ERR "btrfs update block group failed for %llu " 6064 printk(KERN_ERR "btrfs update block group failed for %llu "
6013 "%llu\n", (unsigned long long)ins->objectid, 6065 "%llu\n", (unsigned long long)ins->objectid,
6014 (unsigned long long)ins->offset); 6066 (unsigned long long)ins->offset);
@@ -6056,28 +6108,28 @@ int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans,
6056 if (!caching_ctl) { 6108 if (!caching_ctl) {
6057 BUG_ON(!block_group_cache_done(block_group)); 6109 BUG_ON(!block_group_cache_done(block_group));
6058 ret = btrfs_remove_free_space(block_group, start, num_bytes); 6110 ret = btrfs_remove_free_space(block_group, start, num_bytes);
6059 BUG_ON(ret); 6111 BUG_ON(ret); /* -ENOMEM */
6060 } else { 6112 } else {
6061 mutex_lock(&caching_ctl->mutex); 6113 mutex_lock(&caching_ctl->mutex);
6062 6114
6063 if (start >= caching_ctl->progress) { 6115 if (start >= caching_ctl->progress) {
6064 ret = add_excluded_extent(root, start, num_bytes); 6116 ret = add_excluded_extent(root, start, num_bytes);
6065 BUG_ON(ret); 6117 BUG_ON(ret); /* -ENOMEM */
6066 } else if (start + num_bytes <= caching_ctl->progress) { 6118 } else if (start + num_bytes <= caching_ctl->progress) {
6067 ret = btrfs_remove_free_space(block_group, 6119 ret = btrfs_remove_free_space(block_group,
6068 start, num_bytes); 6120 start, num_bytes);
6069 BUG_ON(ret); 6121 BUG_ON(ret); /* -ENOMEM */
6070 } else { 6122 } else {
6071 num_bytes = caching_ctl->progress - start; 6123 num_bytes = caching_ctl->progress - start;
6072 ret = btrfs_remove_free_space(block_group, 6124 ret = btrfs_remove_free_space(block_group,
6073 start, num_bytes); 6125 start, num_bytes);
6074 BUG_ON(ret); 6126 BUG_ON(ret); /* -ENOMEM */
6075 6127
6076 start = caching_ctl->progress; 6128 start = caching_ctl->progress;
6077 num_bytes = ins->objectid + ins->offset - 6129 num_bytes = ins->objectid + ins->offset -
6078 caching_ctl->progress; 6130 caching_ctl->progress;
6079 ret = add_excluded_extent(root, start, num_bytes); 6131 ret = add_excluded_extent(root, start, num_bytes);
6080 BUG_ON(ret); 6132 BUG_ON(ret); /* -ENOMEM */
6081 } 6133 }
6082 6134
6083 mutex_unlock(&caching_ctl->mutex); 6135 mutex_unlock(&caching_ctl->mutex);
@@ -6086,7 +6138,7 @@ int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans,
6086 6138
6087 ret = btrfs_update_reserved_bytes(block_group, ins->offset, 6139 ret = btrfs_update_reserved_bytes(block_group, ins->offset,
6088 RESERVE_ALLOC_NO_ACCOUNT); 6140 RESERVE_ALLOC_NO_ACCOUNT);
6089 BUG_ON(ret); 6141 BUG_ON(ret); /* logic error */
6090 btrfs_put_block_group(block_group); 6142 btrfs_put_block_group(block_group);
6091 ret = alloc_reserved_file_extent(trans, root, 0, root_objectid, 6143 ret = alloc_reserved_file_extent(trans, root, 0, root_objectid,
6092 0, owner, offset, ins, 1); 6144 0, owner, offset, ins, 1);
@@ -6107,6 +6159,7 @@ struct extent_buffer *btrfs_init_new_buffer(struct btrfs_trans_handle *trans,
6107 btrfs_set_buffer_lockdep_class(root->root_key.objectid, buf, level); 6159 btrfs_set_buffer_lockdep_class(root->root_key.objectid, buf, level);
6108 btrfs_tree_lock(buf); 6160 btrfs_tree_lock(buf);
6109 clean_tree_block(trans, root, buf); 6161 clean_tree_block(trans, root, buf);
6162 clear_bit(EXTENT_BUFFER_STALE, &buf->bflags);
6110 6163
6111 btrfs_set_lock_blocking(buf); 6164 btrfs_set_lock_blocking(buf);
6112 btrfs_set_buffer_uptodate(buf); 6165 btrfs_set_buffer_uptodate(buf);
@@ -6214,7 +6267,7 @@ struct extent_buffer *btrfs_alloc_free_block(struct btrfs_trans_handle *trans,
6214 return ERR_CAST(block_rsv); 6267 return ERR_CAST(block_rsv);
6215 6268
6216 ret = btrfs_reserve_extent(trans, root, blocksize, blocksize, 6269 ret = btrfs_reserve_extent(trans, root, blocksize, blocksize,
6217 empty_size, hint, (u64)-1, &ins, 0); 6270 empty_size, hint, &ins, 0);
6218 if (ret) { 6271 if (ret) {
6219 unuse_block_rsv(root->fs_info, block_rsv, blocksize); 6272 unuse_block_rsv(root->fs_info, block_rsv, blocksize);
6220 return ERR_PTR(ret); 6273 return ERR_PTR(ret);
@@ -6222,7 +6275,7 @@ struct extent_buffer *btrfs_alloc_free_block(struct btrfs_trans_handle *trans,
6222 6275
6223 buf = btrfs_init_new_buffer(trans, root, ins.objectid, 6276 buf = btrfs_init_new_buffer(trans, root, ins.objectid,
6224 blocksize, level); 6277 blocksize, level);
6225 BUG_ON(IS_ERR(buf)); 6278 BUG_ON(IS_ERR(buf)); /* -ENOMEM */
6226 6279
6227 if (root_objectid == BTRFS_TREE_RELOC_OBJECTID) { 6280 if (root_objectid == BTRFS_TREE_RELOC_OBJECTID) {
6228 if (parent == 0) 6281 if (parent == 0)
@@ -6234,7 +6287,7 @@ struct extent_buffer *btrfs_alloc_free_block(struct btrfs_trans_handle *trans,
6234 if (root_objectid != BTRFS_TREE_LOG_OBJECTID) { 6287 if (root_objectid != BTRFS_TREE_LOG_OBJECTID) {
6235 struct btrfs_delayed_extent_op *extent_op; 6288 struct btrfs_delayed_extent_op *extent_op;
6236 extent_op = kmalloc(sizeof(*extent_op), GFP_NOFS); 6289 extent_op = kmalloc(sizeof(*extent_op), GFP_NOFS);
6237 BUG_ON(!extent_op); 6290 BUG_ON(!extent_op); /* -ENOMEM */
6238 if (key) 6291 if (key)
6239 memcpy(&extent_op->key, key, sizeof(extent_op->key)); 6292 memcpy(&extent_op->key, key, sizeof(extent_op->key));
6240 else 6293 else
@@ -6249,7 +6302,7 @@ struct extent_buffer *btrfs_alloc_free_block(struct btrfs_trans_handle *trans,
6249 ins.offset, parent, root_objectid, 6302 ins.offset, parent, root_objectid,
6250 level, BTRFS_ADD_DELAYED_EXTENT, 6303 level, BTRFS_ADD_DELAYED_EXTENT,
6251 extent_op, for_cow); 6304 extent_op, for_cow);
6252 BUG_ON(ret); 6305 BUG_ON(ret); /* -ENOMEM */
6253 } 6306 }
6254 return buf; 6307 return buf;
6255} 6308}
@@ -6319,7 +6372,9 @@ static noinline void reada_walk_down(struct btrfs_trans_handle *trans,
6319 /* We don't lock the tree block, it's OK to be racy here */ 6372 /* We don't lock the tree block, it's OK to be racy here */
6320 ret = btrfs_lookup_extent_info(trans, root, bytenr, blocksize, 6373 ret = btrfs_lookup_extent_info(trans, root, bytenr, blocksize,
6321 &refs, &flags); 6374 &refs, &flags);
6322 BUG_ON(ret); 6375 /* We don't care about errors in readahead. */
6376 if (ret < 0)
6377 continue;
6323 BUG_ON(refs == 0); 6378 BUG_ON(refs == 0);
6324 6379
6325 if (wc->stage == DROP_REFERENCE) { 6380 if (wc->stage == DROP_REFERENCE) {
@@ -6386,7 +6441,9 @@ static noinline int walk_down_proc(struct btrfs_trans_handle *trans,
6386 eb->start, eb->len, 6441 eb->start, eb->len,
6387 &wc->refs[level], 6442 &wc->refs[level],
6388 &wc->flags[level]); 6443 &wc->flags[level]);
6389 BUG_ON(ret); 6444 BUG_ON(ret == -ENOMEM);
6445 if (ret)
6446 return ret;
6390 BUG_ON(wc->refs[level] == 0); 6447 BUG_ON(wc->refs[level] == 0);
6391 } 6448 }
6392 6449
@@ -6405,12 +6462,12 @@ static noinline int walk_down_proc(struct btrfs_trans_handle *trans,
6405 if (!(wc->flags[level] & flag)) { 6462 if (!(wc->flags[level] & flag)) {
6406 BUG_ON(!path->locks[level]); 6463 BUG_ON(!path->locks[level]);
6407 ret = btrfs_inc_ref(trans, root, eb, 1, wc->for_reloc); 6464 ret = btrfs_inc_ref(trans, root, eb, 1, wc->for_reloc);
6408 BUG_ON(ret); 6465 BUG_ON(ret); /* -ENOMEM */
6409 ret = btrfs_dec_ref(trans, root, eb, 0, wc->for_reloc); 6466 ret = btrfs_dec_ref(trans, root, eb, 0, wc->for_reloc);
6410 BUG_ON(ret); 6467 BUG_ON(ret); /* -ENOMEM */
6411 ret = btrfs_set_disk_extent_flags(trans, root, eb->start, 6468 ret = btrfs_set_disk_extent_flags(trans, root, eb->start,
6412 eb->len, flag, 0); 6469 eb->len, flag, 0);
6413 BUG_ON(ret); 6470 BUG_ON(ret); /* -ENOMEM */
6414 wc->flags[level] |= flag; 6471 wc->flags[level] |= flag;
6415 } 6472 }
6416 6473
@@ -6482,7 +6539,11 @@ static noinline int do_walk_down(struct btrfs_trans_handle *trans,
6482 ret = btrfs_lookup_extent_info(trans, root, bytenr, blocksize, 6539 ret = btrfs_lookup_extent_info(trans, root, bytenr, blocksize,
6483 &wc->refs[level - 1], 6540 &wc->refs[level - 1],
6484 &wc->flags[level - 1]); 6541 &wc->flags[level - 1]);
6485 BUG_ON(ret); 6542 if (ret < 0) {
6543 btrfs_tree_unlock(next);
6544 return ret;
6545 }
6546
6486 BUG_ON(wc->refs[level - 1] == 0); 6547 BUG_ON(wc->refs[level - 1] == 0);
6487 *lookup_info = 0; 6548 *lookup_info = 0;
6488 6549
@@ -6551,7 +6612,7 @@ skip:
6551 6612
6552 ret = btrfs_free_extent(trans, root, bytenr, blocksize, parent, 6613 ret = btrfs_free_extent(trans, root, bytenr, blocksize, parent,
6553 root->root_key.objectid, level - 1, 0, 0); 6614 root->root_key.objectid, level - 1, 0, 0);
6554 BUG_ON(ret); 6615 BUG_ON(ret); /* -ENOMEM */
6555 } 6616 }
6556 btrfs_tree_unlock(next); 6617 btrfs_tree_unlock(next);
6557 free_extent_buffer(next); 6618 free_extent_buffer(next);
@@ -6609,7 +6670,10 @@ static noinline int walk_up_proc(struct btrfs_trans_handle *trans,
6609 eb->start, eb->len, 6670 eb->start, eb->len,
6610 &wc->refs[level], 6671 &wc->refs[level],
6611 &wc->flags[level]); 6672 &wc->flags[level]);
6612 BUG_ON(ret); 6673 if (ret < 0) {
6674 btrfs_tree_unlock_rw(eb, path->locks[level]);
6675 return ret;
6676 }
6613 BUG_ON(wc->refs[level] == 0); 6677 BUG_ON(wc->refs[level] == 0);
6614 if (wc->refs[level] == 1) { 6678 if (wc->refs[level] == 1) {
6615 btrfs_tree_unlock_rw(eb, path->locks[level]); 6679 btrfs_tree_unlock_rw(eb, path->locks[level]);
@@ -6629,7 +6693,7 @@ static noinline int walk_up_proc(struct btrfs_trans_handle *trans,
6629 else 6693 else
6630 ret = btrfs_dec_ref(trans, root, eb, 0, 6694 ret = btrfs_dec_ref(trans, root, eb, 0,
6631 wc->for_reloc); 6695 wc->for_reloc);
6632 BUG_ON(ret); 6696 BUG_ON(ret); /* -ENOMEM */
6633 } 6697 }
6634 /* make block locked assertion in clean_tree_block happy */ 6698 /* make block locked assertion in clean_tree_block happy */
6635 if (!path->locks[level] && 6699 if (!path->locks[level] &&
@@ -6738,7 +6802,7 @@ static noinline int walk_up_tree(struct btrfs_trans_handle *trans,
6738 * also make sure backrefs for the shared block and all lower level 6802 * also make sure backrefs for the shared block and all lower level
6739 * blocks are properly updated. 6803 * blocks are properly updated.
6740 */ 6804 */
6741void btrfs_drop_snapshot(struct btrfs_root *root, 6805int btrfs_drop_snapshot(struct btrfs_root *root,
6742 struct btrfs_block_rsv *block_rsv, int update_ref, 6806 struct btrfs_block_rsv *block_rsv, int update_ref,
6743 int for_reloc) 6807 int for_reloc)
6744{ 6808{
@@ -6766,7 +6830,10 @@ void btrfs_drop_snapshot(struct btrfs_root *root,
6766 } 6830 }
6767 6831
6768 trans = btrfs_start_transaction(tree_root, 0); 6832 trans = btrfs_start_transaction(tree_root, 0);
6769 BUG_ON(IS_ERR(trans)); 6833 if (IS_ERR(trans)) {
6834 err = PTR_ERR(trans);
6835 goto out_free;
6836 }
6770 6837
6771 if (block_rsv) 6838 if (block_rsv)
6772 trans->block_rsv = block_rsv; 6839 trans->block_rsv = block_rsv;
@@ -6791,7 +6858,7 @@ void btrfs_drop_snapshot(struct btrfs_root *root,
6791 path->lowest_level = 0; 6858 path->lowest_level = 0;
6792 if (ret < 0) { 6859 if (ret < 0) {
6793 err = ret; 6860 err = ret;
6794 goto out_free; 6861 goto out_end_trans;
6795 } 6862 }
6796 WARN_ON(ret > 0); 6863 WARN_ON(ret > 0);
6797 6864
@@ -6811,7 +6878,10 @@ void btrfs_drop_snapshot(struct btrfs_root *root,
6811 path->nodes[level]->len, 6878 path->nodes[level]->len,
6812 &wc->refs[level], 6879 &wc->refs[level],
6813 &wc->flags[level]); 6880 &wc->flags[level]);
6814 BUG_ON(ret); 6881 if (ret < 0) {
6882 err = ret;
6883 goto out_end_trans;
6884 }
6815 BUG_ON(wc->refs[level] == 0); 6885 BUG_ON(wc->refs[level] == 0);
6816 6886
6817 if (level == root_item->drop_level) 6887 if (level == root_item->drop_level)
@@ -6862,26 +6932,40 @@ void btrfs_drop_snapshot(struct btrfs_root *root,
6862 ret = btrfs_update_root(trans, tree_root, 6932 ret = btrfs_update_root(trans, tree_root,
6863 &root->root_key, 6933 &root->root_key,
6864 root_item); 6934 root_item);
6865 BUG_ON(ret); 6935 if (ret) {
6936 btrfs_abort_transaction(trans, tree_root, ret);
6937 err = ret;
6938 goto out_end_trans;
6939 }
6866 6940
6867 btrfs_end_transaction_throttle(trans, tree_root); 6941 btrfs_end_transaction_throttle(trans, tree_root);
6868 trans = btrfs_start_transaction(tree_root, 0); 6942 trans = btrfs_start_transaction(tree_root, 0);
6869 BUG_ON(IS_ERR(trans)); 6943 if (IS_ERR(trans)) {
6944 err = PTR_ERR(trans);
6945 goto out_free;
6946 }
6870 if (block_rsv) 6947 if (block_rsv)
6871 trans->block_rsv = block_rsv; 6948 trans->block_rsv = block_rsv;
6872 } 6949 }
6873 } 6950 }
6874 btrfs_release_path(path); 6951 btrfs_release_path(path);
6875 BUG_ON(err); 6952 if (err)
6953 goto out_end_trans;
6876 6954
6877 ret = btrfs_del_root(trans, tree_root, &root->root_key); 6955 ret = btrfs_del_root(trans, tree_root, &root->root_key);
6878 BUG_ON(ret); 6956 if (ret) {
6957 btrfs_abort_transaction(trans, tree_root, ret);
6958 goto out_end_trans;
6959 }
6879 6960
6880 if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) { 6961 if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) {
6881 ret = btrfs_find_last_root(tree_root, root->root_key.objectid, 6962 ret = btrfs_find_last_root(tree_root, root->root_key.objectid,
6882 NULL, NULL); 6963 NULL, NULL);
6883 BUG_ON(ret < 0); 6964 if (ret < 0) {
6884 if (ret > 0) { 6965 btrfs_abort_transaction(trans, tree_root, ret);
6966 err = ret;
6967 goto out_end_trans;
6968 } else if (ret > 0) {
6885 /* if we fail to delete the orphan item this time 6969 /* if we fail to delete the orphan item this time
6886 * around, it'll get picked up the next time. 6970 * around, it'll get picked up the next time.
6887 * 6971 *
@@ -6899,14 +6983,15 @@ void btrfs_drop_snapshot(struct btrfs_root *root,
6899 free_extent_buffer(root->commit_root); 6983 free_extent_buffer(root->commit_root);
6900 kfree(root); 6984 kfree(root);
6901 } 6985 }
6902out_free: 6986out_end_trans:
6903 btrfs_end_transaction_throttle(trans, tree_root); 6987 btrfs_end_transaction_throttle(trans, tree_root);
6988out_free:
6904 kfree(wc); 6989 kfree(wc);
6905 btrfs_free_path(path); 6990 btrfs_free_path(path);
6906out: 6991out:
6907 if (err) 6992 if (err)
6908 btrfs_std_error(root->fs_info, err); 6993 btrfs_std_error(root->fs_info, err);
6909 return; 6994 return err;
6910} 6995}
6911 6996
6912/* 6997/*
@@ -6983,31 +7068,15 @@ int btrfs_drop_subtree(struct btrfs_trans_handle *trans,
6983static u64 update_block_group_flags(struct btrfs_root *root, u64 flags) 7068static u64 update_block_group_flags(struct btrfs_root *root, u64 flags)
6984{ 7069{
6985 u64 num_devices; 7070 u64 num_devices;
6986 u64 stripped = BTRFS_BLOCK_GROUP_RAID0 | 7071 u64 stripped;
6987 BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10;
6988 7072
6989 if (root->fs_info->balance_ctl) { 7073 /*
6990 struct btrfs_balance_control *bctl = root->fs_info->balance_ctl; 7074 * if restripe for this chunk_type is on pick target profile and
6991 u64 tgt = 0; 7075 * return, otherwise do the usual balance
6992 7076 */
6993 /* pick restriper's target profile and return */ 7077 stripped = get_restripe_target(root->fs_info, flags);
6994 if (flags & BTRFS_BLOCK_GROUP_DATA && 7078 if (stripped)
6995 bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) { 7079 return extended_to_chunk(stripped);
6996 tgt = BTRFS_BLOCK_GROUP_DATA | bctl->data.target;
6997 } else if (flags & BTRFS_BLOCK_GROUP_SYSTEM &&
6998 bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) {
6999 tgt = BTRFS_BLOCK_GROUP_SYSTEM | bctl->sys.target;
7000 } else if (flags & BTRFS_BLOCK_GROUP_METADATA &&
7001 bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) {
7002 tgt = BTRFS_BLOCK_GROUP_METADATA | bctl->meta.target;
7003 }
7004
7005 if (tgt) {
7006 /* extended -> chunk profile */
7007 tgt &= ~BTRFS_AVAIL_ALLOC_BIT_SINGLE;
7008 return tgt;
7009 }
7010 }
7011 7080
7012 /* 7081 /*
7013 * we add in the count of missing devices because we want 7082 * we add in the count of missing devices because we want
@@ -7017,6 +7086,9 @@ static u64 update_block_group_flags(struct btrfs_root *root, u64 flags)
7017 num_devices = root->fs_info->fs_devices->rw_devices + 7086 num_devices = root->fs_info->fs_devices->rw_devices +
7018 root->fs_info->fs_devices->missing_devices; 7087 root->fs_info->fs_devices->missing_devices;
7019 7088
7089 stripped = BTRFS_BLOCK_GROUP_RAID0 |
7090 BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10;
7091
7020 if (num_devices == 1) { 7092 if (num_devices == 1) {
7021 stripped |= BTRFS_BLOCK_GROUP_DUP; 7093 stripped |= BTRFS_BLOCK_GROUP_DUP;
7022 stripped = flags & ~stripped; 7094 stripped = flags & ~stripped;
@@ -7029,7 +7101,6 @@ static u64 update_block_group_flags(struct btrfs_root *root, u64 flags)
7029 if (flags & (BTRFS_BLOCK_GROUP_RAID1 | 7101 if (flags & (BTRFS_BLOCK_GROUP_RAID1 |
7030 BTRFS_BLOCK_GROUP_RAID10)) 7102 BTRFS_BLOCK_GROUP_RAID10))
7031 return stripped | BTRFS_BLOCK_GROUP_DUP; 7103 return stripped | BTRFS_BLOCK_GROUP_DUP;
7032 return flags;
7033 } else { 7104 } else {
7034 /* they already had raid on here, just return */ 7105 /* they already had raid on here, just return */
7035 if (flags & stripped) 7106 if (flags & stripped)
@@ -7042,9 +7113,9 @@ static u64 update_block_group_flags(struct btrfs_root *root, u64 flags)
7042 if (flags & BTRFS_BLOCK_GROUP_DUP) 7113 if (flags & BTRFS_BLOCK_GROUP_DUP)
7043 return stripped | BTRFS_BLOCK_GROUP_RAID1; 7114 return stripped | BTRFS_BLOCK_GROUP_RAID1;
7044 7115
7045 /* turn single device chunks into raid0 */ 7116 /* this is drive concat, leave it alone */
7046 return stripped | BTRFS_BLOCK_GROUP_RAID0;
7047 } 7117 }
7118
7048 return flags; 7119 return flags;
7049} 7120}
7050 7121
@@ -7103,12 +7174,16 @@ int btrfs_set_block_group_ro(struct btrfs_root *root,
7103 BUG_ON(cache->ro); 7174 BUG_ON(cache->ro);
7104 7175
7105 trans = btrfs_join_transaction(root); 7176 trans = btrfs_join_transaction(root);
7106 BUG_ON(IS_ERR(trans)); 7177 if (IS_ERR(trans))
7178 return PTR_ERR(trans);
7107 7179
7108 alloc_flags = update_block_group_flags(root, cache->flags); 7180 alloc_flags = update_block_group_flags(root, cache->flags);
7109 if (alloc_flags != cache->flags) 7181 if (alloc_flags != cache->flags) {
7110 do_chunk_alloc(trans, root, 2 * 1024 * 1024, alloc_flags, 7182 ret = do_chunk_alloc(trans, root, 2 * 1024 * 1024, alloc_flags,
7111 CHUNK_ALLOC_FORCE); 7183 CHUNK_ALLOC_FORCE);
7184 if (ret < 0)
7185 goto out;
7186 }
7112 7187
7113 ret = set_block_group_ro(cache, 0); 7188 ret = set_block_group_ro(cache, 0);
7114 if (!ret) 7189 if (!ret)
@@ -7188,7 +7263,7 @@ u64 btrfs_account_ro_block_groups_free_space(struct btrfs_space_info *sinfo)
7188 return free_bytes; 7263 return free_bytes;
7189} 7264}
7190 7265
7191int btrfs_set_block_group_rw(struct btrfs_root *root, 7266void btrfs_set_block_group_rw(struct btrfs_root *root,
7192 struct btrfs_block_group_cache *cache) 7267 struct btrfs_block_group_cache *cache)
7193{ 7268{
7194 struct btrfs_space_info *sinfo = cache->space_info; 7269 struct btrfs_space_info *sinfo = cache->space_info;
@@ -7204,7 +7279,6 @@ int btrfs_set_block_group_rw(struct btrfs_root *root,
7204 cache->ro = 0; 7279 cache->ro = 0;
7205 spin_unlock(&cache->lock); 7280 spin_unlock(&cache->lock);
7206 spin_unlock(&sinfo->lock); 7281 spin_unlock(&sinfo->lock);
7207 return 0;
7208} 7282}
7209 7283
7210/* 7284/*
@@ -7222,6 +7296,7 @@ int btrfs_can_relocate(struct btrfs_root *root, u64 bytenr)
7222 u64 min_free; 7296 u64 min_free;
7223 u64 dev_min = 1; 7297 u64 dev_min = 1;
7224 u64 dev_nr = 0; 7298 u64 dev_nr = 0;
7299 u64 target;
7225 int index; 7300 int index;
7226 int full = 0; 7301 int full = 0;
7227 int ret = 0; 7302 int ret = 0;
@@ -7262,13 +7337,11 @@ int btrfs_can_relocate(struct btrfs_root *root, u64 bytenr)
7262 /* 7337 /*
7263 * ok we don't have enough space, but maybe we have free space on our 7338 * ok we don't have enough space, but maybe we have free space on our
7264 * devices to allocate new chunks for relocation, so loop through our 7339 * devices to allocate new chunks for relocation, so loop through our
7265 * alloc devices and guess if we have enough space. However, if we 7340 * alloc devices and guess if we have enough space. if this block
7266 * were marked as full, then we know there aren't enough chunks, and we 7341 * group is going to be restriped, run checks against the target
7267 * can just return. 7342 * profile instead of the current one.
7268 */ 7343 */
7269 ret = -1; 7344 ret = -1;
7270 if (full)
7271 goto out;
7272 7345
7273 /* 7346 /*
7274 * index: 7347 * index:
@@ -7278,7 +7351,20 @@ int btrfs_can_relocate(struct btrfs_root *root, u64 bytenr)
7278 * 3: raid0 7351 * 3: raid0
7279 * 4: single 7352 * 4: single
7280 */ 7353 */
7281 index = get_block_group_index(block_group); 7354 target = get_restripe_target(root->fs_info, block_group->flags);
7355 if (target) {
7356 index = __get_block_group_index(extended_to_chunk(target));
7357 } else {
7358 /*
7359 * this is just a balance, so if we were marked as full
7360 * we know there is no space for a new chunk
7361 */
7362 if (full)
7363 goto out;
7364
7365 index = get_block_group_index(block_group);
7366 }
7367
7282 if (index == 0) { 7368 if (index == 0) {
7283 dev_min = 4; 7369 dev_min = 4;
7284 /* Divide by 2 */ 7370 /* Divide by 2 */
@@ -7572,7 +7658,7 @@ int btrfs_read_block_groups(struct btrfs_root *root)
7572 ret = update_space_info(info, cache->flags, found_key.offset, 7658 ret = update_space_info(info, cache->flags, found_key.offset,
7573 btrfs_block_group_used(&cache->item), 7659 btrfs_block_group_used(&cache->item),
7574 &space_info); 7660 &space_info);
7575 BUG_ON(ret); 7661 BUG_ON(ret); /* -ENOMEM */
7576 cache->space_info = space_info; 7662 cache->space_info = space_info;
7577 spin_lock(&cache->space_info->lock); 7663 spin_lock(&cache->space_info->lock);
7578 cache->space_info->bytes_readonly += cache->bytes_super; 7664 cache->space_info->bytes_readonly += cache->bytes_super;
@@ -7581,7 +7667,7 @@ int btrfs_read_block_groups(struct btrfs_root *root)
7581 __link_block_group(space_info, cache); 7667 __link_block_group(space_info, cache);
7582 7668
7583 ret = btrfs_add_block_group_cache(root->fs_info, cache); 7669 ret = btrfs_add_block_group_cache(root->fs_info, cache);
7584 BUG_ON(ret); 7670 BUG_ON(ret); /* Logic error */
7585 7671
7586 set_avail_alloc_bits(root->fs_info, cache->flags); 7672 set_avail_alloc_bits(root->fs_info, cache->flags);
7587 if (btrfs_chunk_readonly(root, cache->key.objectid)) 7673 if (btrfs_chunk_readonly(root, cache->key.objectid))
@@ -7663,7 +7749,7 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans,
7663 7749
7664 ret = update_space_info(root->fs_info, cache->flags, size, bytes_used, 7750 ret = update_space_info(root->fs_info, cache->flags, size, bytes_used,
7665 &cache->space_info); 7751 &cache->space_info);
7666 BUG_ON(ret); 7752 BUG_ON(ret); /* -ENOMEM */
7667 update_global_block_rsv(root->fs_info); 7753 update_global_block_rsv(root->fs_info);
7668 7754
7669 spin_lock(&cache->space_info->lock); 7755 spin_lock(&cache->space_info->lock);
@@ -7673,11 +7759,14 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans,
7673 __link_block_group(cache->space_info, cache); 7759 __link_block_group(cache->space_info, cache);
7674 7760
7675 ret = btrfs_add_block_group_cache(root->fs_info, cache); 7761 ret = btrfs_add_block_group_cache(root->fs_info, cache);
7676 BUG_ON(ret); 7762 BUG_ON(ret); /* Logic error */
7677 7763
7678 ret = btrfs_insert_item(trans, extent_root, &cache->key, &cache->item, 7764 ret = btrfs_insert_item(trans, extent_root, &cache->key, &cache->item,
7679 sizeof(cache->item)); 7765 sizeof(cache->item));
7680 BUG_ON(ret); 7766 if (ret) {
7767 btrfs_abort_transaction(trans, extent_root, ret);
7768 return ret;
7769 }
7681 7770
7682 set_avail_alloc_bits(extent_root->fs_info, type); 7771 set_avail_alloc_bits(extent_root->fs_info, type);
7683 7772
@@ -7686,11 +7775,8 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans,
7686 7775
7687static void clear_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags) 7776static void clear_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
7688{ 7777{
7689 u64 extra_flags = flags & BTRFS_BLOCK_GROUP_PROFILE_MASK; 7778 u64 extra_flags = chunk_to_extended(flags) &
7690 7779 BTRFS_EXTENDED_PROFILE_MASK;
7691 /* chunk -> extended profile */
7692 if (extra_flags == 0)
7693 extra_flags = BTRFS_AVAIL_ALLOC_BIT_SINGLE;
7694 7780
7695 if (flags & BTRFS_BLOCK_GROUP_DATA) 7781 if (flags & BTRFS_BLOCK_GROUP_DATA)
7696 fs_info->avail_data_alloc_bits &= ~extra_flags; 7782 fs_info->avail_data_alloc_bits &= ~extra_flags;
@@ -7758,7 +7844,10 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
7758 inode = lookup_free_space_inode(tree_root, block_group, path); 7844 inode = lookup_free_space_inode(tree_root, block_group, path);
7759 if (!IS_ERR(inode)) { 7845 if (!IS_ERR(inode)) {
7760 ret = btrfs_orphan_add(trans, inode); 7846 ret = btrfs_orphan_add(trans, inode);
7761 BUG_ON(ret); 7847 if (ret) {
7848 btrfs_add_delayed_iput(inode);
7849 goto out;
7850 }
7762 clear_nlink(inode); 7851 clear_nlink(inode);
7763 /* One for the block groups ref */ 7852 /* One for the block groups ref */
7764 spin_lock(&block_group->lock); 7853 spin_lock(&block_group->lock);
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 2862454bcdb3..8d904dd7ea9f 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -19,6 +19,7 @@
19#include "btrfs_inode.h" 19#include "btrfs_inode.h"
20#include "volumes.h" 20#include "volumes.h"
21#include "check-integrity.h" 21#include "check-integrity.h"
22#include "locking.h"
22 23
23static struct kmem_cache *extent_state_cache; 24static struct kmem_cache *extent_state_cache;
24static struct kmem_cache *extent_buffer_cache; 25static struct kmem_cache *extent_buffer_cache;
@@ -53,6 +54,13 @@ struct extent_page_data {
53 unsigned int sync_io:1; 54 unsigned int sync_io:1;
54}; 55};
55 56
57static noinline void flush_write_bio(void *data);
58static inline struct btrfs_fs_info *
59tree_fs_info(struct extent_io_tree *tree)
60{
61 return btrfs_sb(tree->mapping->host->i_sb);
62}
63
56int __init extent_io_init(void) 64int __init extent_io_init(void)
57{ 65{
58 extent_state_cache = kmem_cache_create("extent_state", 66 extent_state_cache = kmem_cache_create("extent_state",
@@ -136,6 +144,7 @@ static struct extent_state *alloc_extent_state(gfp_t mask)
136#endif 144#endif
137 atomic_set(&state->refs, 1); 145 atomic_set(&state->refs, 1);
138 init_waitqueue_head(&state->wq); 146 init_waitqueue_head(&state->wq);
147 trace_alloc_extent_state(state, mask, _RET_IP_);
139 return state; 148 return state;
140} 149}
141 150
@@ -153,6 +162,7 @@ void free_extent_state(struct extent_state *state)
153 list_del(&state->leak_list); 162 list_del(&state->leak_list);
154 spin_unlock_irqrestore(&leak_lock, flags); 163 spin_unlock_irqrestore(&leak_lock, flags);
155#endif 164#endif
165 trace_free_extent_state(state, _RET_IP_);
156 kmem_cache_free(extent_state_cache, state); 166 kmem_cache_free(extent_state_cache, state);
157 } 167 }
158} 168}
@@ -439,6 +449,13 @@ alloc_extent_state_atomic(struct extent_state *prealloc)
439 return prealloc; 449 return prealloc;
440} 450}
441 451
452void extent_io_tree_panic(struct extent_io_tree *tree, int err)
453{
454 btrfs_panic(tree_fs_info(tree), err, "Locking error: "
455 "Extent tree was modified by another "
456 "thread while locked.");
457}
458
442/* 459/*
443 * clear some bits on a range in the tree. This may require splitting 460 * clear some bits on a range in the tree. This may require splitting
444 * or inserting elements in the tree, so the gfp mask is used to 461 * or inserting elements in the tree, so the gfp mask is used to
@@ -449,8 +466,7 @@ alloc_extent_state_atomic(struct extent_state *prealloc)
449 * 466 *
450 * the range [start, end] is inclusive. 467 * the range [start, end] is inclusive.
451 * 468 *
452 * This takes the tree lock, and returns < 0 on error, > 0 if any of the 469 * This takes the tree lock, and returns 0 on success and < 0 on error.
453 * bits were already set, or zero if none of the bits were already set.
454 */ 470 */
455int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, 471int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
456 int bits, int wake, int delete, 472 int bits, int wake, int delete,
@@ -464,7 +480,6 @@ int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
464 struct rb_node *node; 480 struct rb_node *node;
465 u64 last_end; 481 u64 last_end;
466 int err; 482 int err;
467 int set = 0;
468 int clear = 0; 483 int clear = 0;
469 484
470 if (delete) 485 if (delete)
@@ -542,12 +557,14 @@ hit_next:
542 prealloc = alloc_extent_state_atomic(prealloc); 557 prealloc = alloc_extent_state_atomic(prealloc);
543 BUG_ON(!prealloc); 558 BUG_ON(!prealloc);
544 err = split_state(tree, state, prealloc, start); 559 err = split_state(tree, state, prealloc, start);
545 BUG_ON(err == -EEXIST); 560 if (err)
561 extent_io_tree_panic(tree, err);
562
546 prealloc = NULL; 563 prealloc = NULL;
547 if (err) 564 if (err)
548 goto out; 565 goto out;
549 if (state->end <= end) { 566 if (state->end <= end) {
550 set |= clear_state_bit(tree, state, &bits, wake); 567 clear_state_bit(tree, state, &bits, wake);
551 if (last_end == (u64)-1) 568 if (last_end == (u64)-1)
552 goto out; 569 goto out;
553 start = last_end + 1; 570 start = last_end + 1;
@@ -564,17 +581,19 @@ hit_next:
564 prealloc = alloc_extent_state_atomic(prealloc); 581 prealloc = alloc_extent_state_atomic(prealloc);
565 BUG_ON(!prealloc); 582 BUG_ON(!prealloc);
566 err = split_state(tree, state, prealloc, end + 1); 583 err = split_state(tree, state, prealloc, end + 1);
567 BUG_ON(err == -EEXIST); 584 if (err)
585 extent_io_tree_panic(tree, err);
586
568 if (wake) 587 if (wake)
569 wake_up(&state->wq); 588 wake_up(&state->wq);
570 589
571 set |= clear_state_bit(tree, prealloc, &bits, wake); 590 clear_state_bit(tree, prealloc, &bits, wake);
572 591
573 prealloc = NULL; 592 prealloc = NULL;
574 goto out; 593 goto out;
575 } 594 }
576 595
577 set |= clear_state_bit(tree, state, &bits, wake); 596 clear_state_bit(tree, state, &bits, wake);
578next: 597next:
579 if (last_end == (u64)-1) 598 if (last_end == (u64)-1)
580 goto out; 599 goto out;
@@ -591,7 +610,7 @@ out:
591 if (prealloc) 610 if (prealloc)
592 free_extent_state(prealloc); 611 free_extent_state(prealloc);
593 612
594 return set; 613 return 0;
595 614
596search_again: 615search_again:
597 if (start > end) 616 if (start > end)
@@ -602,8 +621,8 @@ search_again:
602 goto again; 621 goto again;
603} 622}
604 623
605static int wait_on_state(struct extent_io_tree *tree, 624static void wait_on_state(struct extent_io_tree *tree,
606 struct extent_state *state) 625 struct extent_state *state)
607 __releases(tree->lock) 626 __releases(tree->lock)
608 __acquires(tree->lock) 627 __acquires(tree->lock)
609{ 628{
@@ -613,7 +632,6 @@ static int wait_on_state(struct extent_io_tree *tree,
613 schedule(); 632 schedule();
614 spin_lock(&tree->lock); 633 spin_lock(&tree->lock);
615 finish_wait(&state->wq, &wait); 634 finish_wait(&state->wq, &wait);
616 return 0;
617} 635}
618 636
619/* 637/*
@@ -621,7 +639,7 @@ static int wait_on_state(struct extent_io_tree *tree,
621 * The range [start, end] is inclusive. 639 * The range [start, end] is inclusive.
622 * The tree lock is taken by this function 640 * The tree lock is taken by this function
623 */ 641 */
624int wait_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, int bits) 642void wait_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, int bits)
625{ 643{
626 struct extent_state *state; 644 struct extent_state *state;
627 struct rb_node *node; 645 struct rb_node *node;
@@ -658,7 +676,6 @@ again:
658 } 676 }
659out: 677out:
660 spin_unlock(&tree->lock); 678 spin_unlock(&tree->lock);
661 return 0;
662} 679}
663 680
664static void set_state_bits(struct extent_io_tree *tree, 681static void set_state_bits(struct extent_io_tree *tree,
@@ -706,9 +723,10 @@ static void uncache_state(struct extent_state **cached_ptr)
706 * [start, end] is inclusive This takes the tree lock. 723 * [start, end] is inclusive This takes the tree lock.
707 */ 724 */
708 725
709int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, 726static int __must_check
710 int bits, int exclusive_bits, u64 *failed_start, 727__set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
711 struct extent_state **cached_state, gfp_t mask) 728 int bits, int exclusive_bits, u64 *failed_start,
729 struct extent_state **cached_state, gfp_t mask)
712{ 730{
713 struct extent_state *state; 731 struct extent_state *state;
714 struct extent_state *prealloc = NULL; 732 struct extent_state *prealloc = NULL;
@@ -742,8 +760,10 @@ again:
742 prealloc = alloc_extent_state_atomic(prealloc); 760 prealloc = alloc_extent_state_atomic(prealloc);
743 BUG_ON(!prealloc); 761 BUG_ON(!prealloc);
744 err = insert_state(tree, prealloc, start, end, &bits); 762 err = insert_state(tree, prealloc, start, end, &bits);
763 if (err)
764 extent_io_tree_panic(tree, err);
765
745 prealloc = NULL; 766 prealloc = NULL;
746 BUG_ON(err == -EEXIST);
747 goto out; 767 goto out;
748 } 768 }
749 state = rb_entry(node, struct extent_state, rb_node); 769 state = rb_entry(node, struct extent_state, rb_node);
@@ -809,7 +829,9 @@ hit_next:
809 prealloc = alloc_extent_state_atomic(prealloc); 829 prealloc = alloc_extent_state_atomic(prealloc);
810 BUG_ON(!prealloc); 830 BUG_ON(!prealloc);
811 err = split_state(tree, state, prealloc, start); 831 err = split_state(tree, state, prealloc, start);
812 BUG_ON(err == -EEXIST); 832 if (err)
833 extent_io_tree_panic(tree, err);
834
813 prealloc = NULL; 835 prealloc = NULL;
814 if (err) 836 if (err)
815 goto out; 837 goto out;
@@ -846,12 +868,9 @@ hit_next:
846 */ 868 */
847 err = insert_state(tree, prealloc, start, this_end, 869 err = insert_state(tree, prealloc, start, this_end,
848 &bits); 870 &bits);
849 BUG_ON(err == -EEXIST); 871 if (err)
850 if (err) { 872 extent_io_tree_panic(tree, err);
851 free_extent_state(prealloc); 873
852 prealloc = NULL;
853 goto out;
854 }
855 cache_state(prealloc, cached_state); 874 cache_state(prealloc, cached_state);
856 prealloc = NULL; 875 prealloc = NULL;
857 start = this_end + 1; 876 start = this_end + 1;
@@ -873,7 +892,8 @@ hit_next:
873 prealloc = alloc_extent_state_atomic(prealloc); 892 prealloc = alloc_extent_state_atomic(prealloc);
874 BUG_ON(!prealloc); 893 BUG_ON(!prealloc);
875 err = split_state(tree, state, prealloc, end + 1); 894 err = split_state(tree, state, prealloc, end + 1);
876 BUG_ON(err == -EEXIST); 895 if (err)
896 extent_io_tree_panic(tree, err);
877 897
878 set_state_bits(tree, prealloc, &bits); 898 set_state_bits(tree, prealloc, &bits);
879 cache_state(prealloc, cached_state); 899 cache_state(prealloc, cached_state);
@@ -900,6 +920,15 @@ search_again:
900 goto again; 920 goto again;
901} 921}
902 922
923int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, int bits,
924 u64 *failed_start, struct extent_state **cached_state,
925 gfp_t mask)
926{
927 return __set_extent_bit(tree, start, end, bits, 0, failed_start,
928 cached_state, mask);
929}
930
931
903/** 932/**
904 * convert_extent - convert all bits in a given range from one bit to another 933 * convert_extent - convert all bits in a given range from one bit to another
905 * @tree: the io tree to search 934 * @tree: the io tree to search
@@ -946,7 +975,8 @@ again:
946 } 975 }
947 err = insert_state(tree, prealloc, start, end, &bits); 976 err = insert_state(tree, prealloc, start, end, &bits);
948 prealloc = NULL; 977 prealloc = NULL;
949 BUG_ON(err == -EEXIST); 978 if (err)
979 extent_io_tree_panic(tree, err);
950 goto out; 980 goto out;
951 } 981 }
952 state = rb_entry(node, struct extent_state, rb_node); 982 state = rb_entry(node, struct extent_state, rb_node);
@@ -1002,7 +1032,8 @@ hit_next:
1002 goto out; 1032 goto out;
1003 } 1033 }
1004 err = split_state(tree, state, prealloc, start); 1034 err = split_state(tree, state, prealloc, start);
1005 BUG_ON(err == -EEXIST); 1035 if (err)
1036 extent_io_tree_panic(tree, err);
1006 prealloc = NULL; 1037 prealloc = NULL;
1007 if (err) 1038 if (err)
1008 goto out; 1039 goto out;
@@ -1041,12 +1072,8 @@ hit_next:
1041 */ 1072 */
1042 err = insert_state(tree, prealloc, start, this_end, 1073 err = insert_state(tree, prealloc, start, this_end,
1043 &bits); 1074 &bits);
1044 BUG_ON(err == -EEXIST); 1075 if (err)
1045 if (err) { 1076 extent_io_tree_panic(tree, err);
1046 free_extent_state(prealloc);
1047 prealloc = NULL;
1048 goto out;
1049 }
1050 prealloc = NULL; 1077 prealloc = NULL;
1051 start = this_end + 1; 1078 start = this_end + 1;
1052 goto search_again; 1079 goto search_again;
@@ -1065,7 +1092,8 @@ hit_next:
1065 } 1092 }
1066 1093
1067 err = split_state(tree, state, prealloc, end + 1); 1094 err = split_state(tree, state, prealloc, end + 1);
1068 BUG_ON(err == -EEXIST); 1095 if (err)
1096 extent_io_tree_panic(tree, err);
1069 1097
1070 set_state_bits(tree, prealloc, &bits); 1098 set_state_bits(tree, prealloc, &bits);
1071 clear_state_bit(tree, prealloc, &clear_bits, 0); 1099 clear_state_bit(tree, prealloc, &clear_bits, 0);
@@ -1095,14 +1123,14 @@ search_again:
1095int set_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end, 1123int set_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end,
1096 gfp_t mask) 1124 gfp_t mask)
1097{ 1125{
1098 return set_extent_bit(tree, start, end, EXTENT_DIRTY, 0, NULL, 1126 return set_extent_bit(tree, start, end, EXTENT_DIRTY, NULL,
1099 NULL, mask); 1127 NULL, mask);
1100} 1128}
1101 1129
1102int set_extent_bits(struct extent_io_tree *tree, u64 start, u64 end, 1130int set_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
1103 int bits, gfp_t mask) 1131 int bits, gfp_t mask)
1104{ 1132{
1105 return set_extent_bit(tree, start, end, bits, 0, NULL, 1133 return set_extent_bit(tree, start, end, bits, NULL,
1106 NULL, mask); 1134 NULL, mask);
1107} 1135}
1108 1136
@@ -1117,7 +1145,7 @@ int set_extent_delalloc(struct extent_io_tree *tree, u64 start, u64 end,
1117{ 1145{
1118 return set_extent_bit(tree, start, end, 1146 return set_extent_bit(tree, start, end,
1119 EXTENT_DELALLOC | EXTENT_UPTODATE, 1147 EXTENT_DELALLOC | EXTENT_UPTODATE,
1120 0, NULL, cached_state, mask); 1148 NULL, cached_state, mask);
1121} 1149}
1122 1150
1123int clear_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end, 1151int clear_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end,
@@ -1131,7 +1159,7 @@ int clear_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end,
1131int set_extent_new(struct extent_io_tree *tree, u64 start, u64 end, 1159int set_extent_new(struct extent_io_tree *tree, u64 start, u64 end,
1132 gfp_t mask) 1160 gfp_t mask)
1133{ 1161{
1134 return set_extent_bit(tree, start, end, EXTENT_NEW, 0, NULL, 1162 return set_extent_bit(tree, start, end, EXTENT_NEW, NULL,
1135 NULL, mask); 1163 NULL, mask);
1136} 1164}
1137 1165
@@ -1139,7 +1167,7 @@ int set_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end,
1139 struct extent_state **cached_state, gfp_t mask) 1167 struct extent_state **cached_state, gfp_t mask)
1140{ 1168{
1141 return set_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, 1169 return set_extent_bit(tree, start, end, EXTENT_UPTODATE, 0,
1142 NULL, cached_state, mask); 1170 cached_state, mask);
1143} 1171}
1144 1172
1145static int clear_extent_uptodate(struct extent_io_tree *tree, u64 start, 1173static int clear_extent_uptodate(struct extent_io_tree *tree, u64 start,
@@ -1155,42 +1183,40 @@ static int clear_extent_uptodate(struct extent_io_tree *tree, u64 start,
1155 * us if waiting is desired. 1183 * us if waiting is desired.
1156 */ 1184 */
1157int lock_extent_bits(struct extent_io_tree *tree, u64 start, u64 end, 1185int lock_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
1158 int bits, struct extent_state **cached_state, gfp_t mask) 1186 int bits, struct extent_state **cached_state)
1159{ 1187{
1160 int err; 1188 int err;
1161 u64 failed_start; 1189 u64 failed_start;
1162 while (1) { 1190 while (1) {
1163 err = set_extent_bit(tree, start, end, EXTENT_LOCKED | bits, 1191 err = __set_extent_bit(tree, start, end, EXTENT_LOCKED | bits,
1164 EXTENT_LOCKED, &failed_start, 1192 EXTENT_LOCKED, &failed_start,
1165 cached_state, mask); 1193 cached_state, GFP_NOFS);
1166 if (err == -EEXIST && (mask & __GFP_WAIT)) { 1194 if (err == -EEXIST) {
1167 wait_extent_bit(tree, failed_start, end, EXTENT_LOCKED); 1195 wait_extent_bit(tree, failed_start, end, EXTENT_LOCKED);
1168 start = failed_start; 1196 start = failed_start;
1169 } else { 1197 } else
1170 break; 1198 break;
1171 }
1172 WARN_ON(start > end); 1199 WARN_ON(start > end);
1173 } 1200 }
1174 return err; 1201 return err;
1175} 1202}
1176 1203
1177int lock_extent(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask) 1204int lock_extent(struct extent_io_tree *tree, u64 start, u64 end)
1178{ 1205{
1179 return lock_extent_bits(tree, start, end, 0, NULL, mask); 1206 return lock_extent_bits(tree, start, end, 0, NULL);
1180} 1207}
1181 1208
1182int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end, 1209int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end)
1183 gfp_t mask)
1184{ 1210{
1185 int err; 1211 int err;
1186 u64 failed_start; 1212 u64 failed_start;
1187 1213
1188 err = set_extent_bit(tree, start, end, EXTENT_LOCKED, EXTENT_LOCKED, 1214 err = __set_extent_bit(tree, start, end, EXTENT_LOCKED, EXTENT_LOCKED,
1189 &failed_start, NULL, mask); 1215 &failed_start, NULL, GFP_NOFS);
1190 if (err == -EEXIST) { 1216 if (err == -EEXIST) {
1191 if (failed_start > start) 1217 if (failed_start > start)
1192 clear_extent_bit(tree, start, failed_start - 1, 1218 clear_extent_bit(tree, start, failed_start - 1,
1193 EXTENT_LOCKED, 1, 0, NULL, mask); 1219 EXTENT_LOCKED, 1, 0, NULL, GFP_NOFS);
1194 return 0; 1220 return 0;
1195 } 1221 }
1196 return 1; 1222 return 1;
@@ -1203,10 +1229,10 @@ int unlock_extent_cached(struct extent_io_tree *tree, u64 start, u64 end,
1203 mask); 1229 mask);
1204} 1230}
1205 1231
1206int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask) 1232int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end)
1207{ 1233{
1208 return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, NULL, 1234 return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, NULL,
1209 mask); 1235 GFP_NOFS);
1210} 1236}
1211 1237
1212/* 1238/*
@@ -1220,7 +1246,7 @@ static int set_range_writeback(struct extent_io_tree *tree, u64 start, u64 end)
1220 1246
1221 while (index <= end_index) { 1247 while (index <= end_index) {
1222 page = find_get_page(tree->mapping, index); 1248 page = find_get_page(tree->mapping, index);
1223 BUG_ON(!page); 1249 BUG_ON(!page); /* Pages should be in the extent_io_tree */
1224 set_page_writeback(page); 1250 set_page_writeback(page);
1225 page_cache_release(page); 1251 page_cache_release(page);
1226 index++; 1252 index++;
@@ -1343,9 +1369,9 @@ out:
1343 return found; 1369 return found;
1344} 1370}
1345 1371
1346static noinline int __unlock_for_delalloc(struct inode *inode, 1372static noinline void __unlock_for_delalloc(struct inode *inode,
1347 struct page *locked_page, 1373 struct page *locked_page,
1348 u64 start, u64 end) 1374 u64 start, u64 end)
1349{ 1375{
1350 int ret; 1376 int ret;
1351 struct page *pages[16]; 1377 struct page *pages[16];
@@ -1355,7 +1381,7 @@ static noinline int __unlock_for_delalloc(struct inode *inode,
1355 int i; 1381 int i;
1356 1382
1357 if (index == locked_page->index && end_index == index) 1383 if (index == locked_page->index && end_index == index)
1358 return 0; 1384 return;
1359 1385
1360 while (nr_pages > 0) { 1386 while (nr_pages > 0) {
1361 ret = find_get_pages_contig(inode->i_mapping, index, 1387 ret = find_get_pages_contig(inode->i_mapping, index,
@@ -1370,7 +1396,6 @@ static noinline int __unlock_for_delalloc(struct inode *inode,
1370 index += ret; 1396 index += ret;
1371 cond_resched(); 1397 cond_resched();
1372 } 1398 }
1373 return 0;
1374} 1399}
1375 1400
1376static noinline int lock_delalloc_pages(struct inode *inode, 1401static noinline int lock_delalloc_pages(struct inode *inode,
@@ -1500,11 +1525,10 @@ again:
1500 goto out_failed; 1525 goto out_failed;
1501 } 1526 }
1502 } 1527 }
1503 BUG_ON(ret); 1528 BUG_ON(ret); /* Only valid values are 0 and -EAGAIN */
1504 1529
1505 /* step three, lock the state bits for the whole range */ 1530 /* step three, lock the state bits for the whole range */
1506 lock_extent_bits(tree, delalloc_start, delalloc_end, 1531 lock_extent_bits(tree, delalloc_start, delalloc_end, 0, &cached_state);
1507 0, &cached_state, GFP_NOFS);
1508 1532
1509 /* then test to make sure it is all still delalloc */ 1533 /* then test to make sure it is all still delalloc */
1510 ret = test_range_bit(tree, delalloc_start, delalloc_end, 1534 ret = test_range_bit(tree, delalloc_start, delalloc_end,
@@ -1761,39 +1785,34 @@ int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end,
1761 * helper function to set a given page up to date if all the 1785 * helper function to set a given page up to date if all the
1762 * extents in the tree for that page are up to date 1786 * extents in the tree for that page are up to date
1763 */ 1787 */
1764static int check_page_uptodate(struct extent_io_tree *tree, 1788static void check_page_uptodate(struct extent_io_tree *tree, struct page *page)
1765 struct page *page)
1766{ 1789{
1767 u64 start = (u64)page->index << PAGE_CACHE_SHIFT; 1790 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1768 u64 end = start + PAGE_CACHE_SIZE - 1; 1791 u64 end = start + PAGE_CACHE_SIZE - 1;
1769 if (test_range_bit(tree, start, end, EXTENT_UPTODATE, 1, NULL)) 1792 if (test_range_bit(tree, start, end, EXTENT_UPTODATE, 1, NULL))
1770 SetPageUptodate(page); 1793 SetPageUptodate(page);
1771 return 0;
1772} 1794}
1773 1795
1774/* 1796/*
1775 * helper function to unlock a page if all the extents in the tree 1797 * helper function to unlock a page if all the extents in the tree
1776 * for that page are unlocked 1798 * for that page are unlocked
1777 */ 1799 */
1778static int check_page_locked(struct extent_io_tree *tree, 1800static void check_page_locked(struct extent_io_tree *tree, struct page *page)
1779 struct page *page)
1780{ 1801{
1781 u64 start = (u64)page->index << PAGE_CACHE_SHIFT; 1802 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1782 u64 end = start + PAGE_CACHE_SIZE - 1; 1803 u64 end = start + PAGE_CACHE_SIZE - 1;
1783 if (!test_range_bit(tree, start, end, EXTENT_LOCKED, 0, NULL)) 1804 if (!test_range_bit(tree, start, end, EXTENT_LOCKED, 0, NULL))
1784 unlock_page(page); 1805 unlock_page(page);
1785 return 0;
1786} 1806}
1787 1807
1788/* 1808/*
1789 * helper function to end page writeback if all the extents 1809 * helper function to end page writeback if all the extents
1790 * in the tree for that page are done with writeback 1810 * in the tree for that page are done with writeback
1791 */ 1811 */
1792static int check_page_writeback(struct extent_io_tree *tree, 1812static void check_page_writeback(struct extent_io_tree *tree,
1793 struct page *page) 1813 struct page *page)
1794{ 1814{
1795 end_page_writeback(page); 1815 end_page_writeback(page);
1796 return 0;
1797} 1816}
1798 1817
1799/* 1818/*
@@ -1912,6 +1931,26 @@ int repair_io_failure(struct btrfs_mapping_tree *map_tree, u64 start,
1912 return 0; 1931 return 0;
1913} 1932}
1914 1933
1934int repair_eb_io_failure(struct btrfs_root *root, struct extent_buffer *eb,
1935 int mirror_num)
1936{
1937 struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree;
1938 u64 start = eb->start;
1939 unsigned long i, num_pages = num_extent_pages(eb->start, eb->len);
1940 int ret;
1941
1942 for (i = 0; i < num_pages; i++) {
1943 struct page *p = extent_buffer_page(eb, i);
1944 ret = repair_io_failure(map_tree, start, PAGE_CACHE_SIZE,
1945 start, p, mirror_num);
1946 if (ret)
1947 break;
1948 start += PAGE_CACHE_SIZE;
1949 }
1950
1951 return ret;
1952}
1953
1915/* 1954/*
1916 * each time an IO finishes, we do a fast check in the IO failure tree 1955 * each time an IO finishes, we do a fast check in the IO failure tree
1917 * to see if we need to process or clean up an io_failure_record 1956 * to see if we need to process or clean up an io_failure_record
@@ -2258,6 +2297,7 @@ static void end_bio_extent_readpage(struct bio *bio, int err)
2258 u64 start; 2297 u64 start;
2259 u64 end; 2298 u64 end;
2260 int whole_page; 2299 int whole_page;
2300 int failed_mirror;
2261 int ret; 2301 int ret;
2262 2302
2263 if (err) 2303 if (err)
@@ -2304,9 +2344,16 @@ static void end_bio_extent_readpage(struct bio *bio, int err)
2304 else 2344 else
2305 clean_io_failure(start, page); 2345 clean_io_failure(start, page);
2306 } 2346 }
2307 if (!uptodate) { 2347
2308 int failed_mirror; 2348 if (!uptodate)
2309 failed_mirror = (int)(unsigned long)bio->bi_bdev; 2349 failed_mirror = (int)(unsigned long)bio->bi_bdev;
2350
2351 if (!uptodate && tree->ops && tree->ops->readpage_io_failed_hook) {
2352 ret = tree->ops->readpage_io_failed_hook(page, failed_mirror);
2353 if (!ret && !err &&
2354 test_bit(BIO_UPTODATE, &bio->bi_flags))
2355 uptodate = 1;
2356 } else if (!uptodate) {
2310 /* 2357 /*
2311 * The generic bio_readpage_error handles errors the 2358 * The generic bio_readpage_error handles errors the
2312 * following way: If possible, new read requests are 2359 * following way: If possible, new read requests are
@@ -2320,7 +2367,6 @@ static void end_bio_extent_readpage(struct bio *bio, int err)
2320 ret = bio_readpage_error(bio, page, start, end, 2367 ret = bio_readpage_error(bio, page, start, end,
2321 failed_mirror, NULL); 2368 failed_mirror, NULL);
2322 if (ret == 0) { 2369 if (ret == 0) {
2323error_handled:
2324 uptodate = 2370 uptodate =
2325 test_bit(BIO_UPTODATE, &bio->bi_flags); 2371 test_bit(BIO_UPTODATE, &bio->bi_flags);
2326 if (err) 2372 if (err)
@@ -2328,16 +2374,9 @@ error_handled:
2328 uncache_state(&cached); 2374 uncache_state(&cached);
2329 continue; 2375 continue;
2330 } 2376 }
2331 if (tree->ops && tree->ops->readpage_io_failed_hook) {
2332 ret = tree->ops->readpage_io_failed_hook(
2333 bio, page, start, end,
2334 failed_mirror, state);
2335 if (ret == 0)
2336 goto error_handled;
2337 }
2338 } 2377 }
2339 2378
2340 if (uptodate) { 2379 if (uptodate && tree->track_uptodate) {
2341 set_extent_uptodate(tree, start, end, &cached, 2380 set_extent_uptodate(tree, start, end, &cached,
2342 GFP_ATOMIC); 2381 GFP_ATOMIC);
2343 } 2382 }
@@ -2386,8 +2425,12 @@ btrfs_bio_alloc(struct block_device *bdev, u64 first_sector, int nr_vecs,
2386 return bio; 2425 return bio;
2387} 2426}
2388 2427
2389static int submit_one_bio(int rw, struct bio *bio, int mirror_num, 2428/*
2390 unsigned long bio_flags) 2429 * Since writes are async, they will only return -ENOMEM.
2430 * Reads can return the full range of I/O error conditions.
2431 */
2432static int __must_check submit_one_bio(int rw, struct bio *bio,
2433 int mirror_num, unsigned long bio_flags)
2391{ 2434{
2392 int ret = 0; 2435 int ret = 0;
2393 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1; 2436 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
@@ -2413,6 +2456,19 @@ static int submit_one_bio(int rw, struct bio *bio, int mirror_num,
2413 return ret; 2456 return ret;
2414} 2457}
2415 2458
2459static int merge_bio(struct extent_io_tree *tree, struct page *page,
2460 unsigned long offset, size_t size, struct bio *bio,
2461 unsigned long bio_flags)
2462{
2463 int ret = 0;
2464 if (tree->ops && tree->ops->merge_bio_hook)
2465 ret = tree->ops->merge_bio_hook(page, offset, size, bio,
2466 bio_flags);
2467 BUG_ON(ret < 0);
2468 return ret;
2469
2470}
2471
2416static int submit_extent_page(int rw, struct extent_io_tree *tree, 2472static int submit_extent_page(int rw, struct extent_io_tree *tree,
2417 struct page *page, sector_t sector, 2473 struct page *page, sector_t sector,
2418 size_t size, unsigned long offset, 2474 size_t size, unsigned long offset,
@@ -2441,12 +2497,12 @@ static int submit_extent_page(int rw, struct extent_io_tree *tree,
2441 sector; 2497 sector;
2442 2498
2443 if (prev_bio_flags != bio_flags || !contig || 2499 if (prev_bio_flags != bio_flags || !contig ||
2444 (tree->ops && tree->ops->merge_bio_hook && 2500 merge_bio(tree, page, offset, page_size, bio, bio_flags) ||
2445 tree->ops->merge_bio_hook(page, offset, page_size, bio,
2446 bio_flags)) ||
2447 bio_add_page(bio, page, page_size, offset) < page_size) { 2501 bio_add_page(bio, page, page_size, offset) < page_size) {
2448 ret = submit_one_bio(rw, bio, mirror_num, 2502 ret = submit_one_bio(rw, bio, mirror_num,
2449 prev_bio_flags); 2503 prev_bio_flags);
2504 if (ret < 0)
2505 return ret;
2450 bio = NULL; 2506 bio = NULL;
2451 } else { 2507 } else {
2452 return 0; 2508 return 0;
@@ -2473,25 +2529,31 @@ static int submit_extent_page(int rw, struct extent_io_tree *tree,
2473 return ret; 2529 return ret;
2474} 2530}
2475 2531
2476void set_page_extent_mapped(struct page *page) 2532void attach_extent_buffer_page(struct extent_buffer *eb, struct page *page)
2477{ 2533{
2478 if (!PagePrivate(page)) { 2534 if (!PagePrivate(page)) {
2479 SetPagePrivate(page); 2535 SetPagePrivate(page);
2480 page_cache_get(page); 2536 page_cache_get(page);
2481 set_page_private(page, EXTENT_PAGE_PRIVATE); 2537 set_page_private(page, (unsigned long)eb);
2538 } else {
2539 WARN_ON(page->private != (unsigned long)eb);
2482 } 2540 }
2483} 2541}
2484 2542
2485static void set_page_extent_head(struct page *page, unsigned long len) 2543void set_page_extent_mapped(struct page *page)
2486{ 2544{
2487 WARN_ON(!PagePrivate(page)); 2545 if (!PagePrivate(page)) {
2488 set_page_private(page, EXTENT_PAGE_PRIVATE_FIRST_PAGE | len << 2); 2546 SetPagePrivate(page);
2547 page_cache_get(page);
2548 set_page_private(page, EXTENT_PAGE_PRIVATE);
2549 }
2489} 2550}
2490 2551
2491/* 2552/*
2492 * basic readpage implementation. Locked extent state structs are inserted 2553 * basic readpage implementation. Locked extent state structs are inserted
2493 * into the tree that are removed when the IO is done (by the end_io 2554 * into the tree that are removed when the IO is done (by the end_io
2494 * handlers) 2555 * handlers)
2556 * XXX JDM: This needs looking at to ensure proper page locking
2495 */ 2557 */
2496static int __extent_read_full_page(struct extent_io_tree *tree, 2558static int __extent_read_full_page(struct extent_io_tree *tree,
2497 struct page *page, 2559 struct page *page,
@@ -2531,11 +2593,11 @@ static int __extent_read_full_page(struct extent_io_tree *tree,
2531 2593
2532 end = page_end; 2594 end = page_end;
2533 while (1) { 2595 while (1) {
2534 lock_extent(tree, start, end, GFP_NOFS); 2596 lock_extent(tree, start, end);
2535 ordered = btrfs_lookup_ordered_extent(inode, start); 2597 ordered = btrfs_lookup_ordered_extent(inode, start);
2536 if (!ordered) 2598 if (!ordered)
2537 break; 2599 break;
2538 unlock_extent(tree, start, end, GFP_NOFS); 2600 unlock_extent(tree, start, end);
2539 btrfs_start_ordered_extent(inode, ordered, 1); 2601 btrfs_start_ordered_extent(inode, ordered, 1);
2540 btrfs_put_ordered_extent(ordered); 2602 btrfs_put_ordered_extent(ordered);
2541 } 2603 }
@@ -2572,7 +2634,7 @@ static int __extent_read_full_page(struct extent_io_tree *tree,
2572 end - cur + 1, 0); 2634 end - cur + 1, 0);
2573 if (IS_ERR_OR_NULL(em)) { 2635 if (IS_ERR_OR_NULL(em)) {
2574 SetPageError(page); 2636 SetPageError(page);
2575 unlock_extent(tree, cur, end, GFP_NOFS); 2637 unlock_extent(tree, cur, end);
2576 break; 2638 break;
2577 } 2639 }
2578 extent_offset = cur - em->start; 2640 extent_offset = cur - em->start;
@@ -2624,7 +2686,7 @@ static int __extent_read_full_page(struct extent_io_tree *tree,
2624 if (test_range_bit(tree, cur, cur_end, 2686 if (test_range_bit(tree, cur, cur_end,
2625 EXTENT_UPTODATE, 1, NULL)) { 2687 EXTENT_UPTODATE, 1, NULL)) {
2626 check_page_uptodate(tree, page); 2688 check_page_uptodate(tree, page);
2627 unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS); 2689 unlock_extent(tree, cur, cur + iosize - 1);
2628 cur = cur + iosize; 2690 cur = cur + iosize;
2629 pg_offset += iosize; 2691 pg_offset += iosize;
2630 continue; 2692 continue;
@@ -2634,7 +2696,7 @@ static int __extent_read_full_page(struct extent_io_tree *tree,
2634 */ 2696 */
2635 if (block_start == EXTENT_MAP_INLINE) { 2697 if (block_start == EXTENT_MAP_INLINE) {
2636 SetPageError(page); 2698 SetPageError(page);
2637 unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS); 2699 unlock_extent(tree, cur, cur + iosize - 1);
2638 cur = cur + iosize; 2700 cur = cur + iosize;
2639 pg_offset += iosize; 2701 pg_offset += iosize;
2640 continue; 2702 continue;
@@ -2654,6 +2716,7 @@ static int __extent_read_full_page(struct extent_io_tree *tree,
2654 end_bio_extent_readpage, mirror_num, 2716 end_bio_extent_readpage, mirror_num,
2655 *bio_flags, 2717 *bio_flags,
2656 this_bio_flag); 2718 this_bio_flag);
2719 BUG_ON(ret == -ENOMEM);
2657 nr++; 2720 nr++;
2658 *bio_flags = this_bio_flag; 2721 *bio_flags = this_bio_flag;
2659 } 2722 }
@@ -2795,7 +2858,11 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc,
2795 delalloc_end, 2858 delalloc_end,
2796 &page_started, 2859 &page_started,
2797 &nr_written); 2860 &nr_written);
2798 BUG_ON(ret); 2861 /* File system has been set read-only */
2862 if (ret) {
2863 SetPageError(page);
2864 goto done;
2865 }
2799 /* 2866 /*
2800 * delalloc_end is already one less than the total 2867 * delalloc_end is already one less than the total
2801 * length, so we don't subtract one from 2868 * length, so we don't subtract one from
@@ -2968,6 +3035,275 @@ done_unlocked:
2968 return 0; 3035 return 0;
2969} 3036}
2970 3037
3038static int eb_wait(void *word)
3039{
3040 io_schedule();
3041 return 0;
3042}
3043
3044static void wait_on_extent_buffer_writeback(struct extent_buffer *eb)
3045{
3046 wait_on_bit(&eb->bflags, EXTENT_BUFFER_WRITEBACK, eb_wait,
3047 TASK_UNINTERRUPTIBLE);
3048}
3049
3050static int lock_extent_buffer_for_io(struct extent_buffer *eb,
3051 struct btrfs_fs_info *fs_info,
3052 struct extent_page_data *epd)
3053{
3054 unsigned long i, num_pages;
3055 int flush = 0;
3056 int ret = 0;
3057
3058 if (!btrfs_try_tree_write_lock(eb)) {
3059 flush = 1;
3060 flush_write_bio(epd);
3061 btrfs_tree_lock(eb);
3062 }
3063
3064 if (test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags)) {
3065 btrfs_tree_unlock(eb);
3066 if (!epd->sync_io)
3067 return 0;
3068 if (!flush) {
3069 flush_write_bio(epd);
3070 flush = 1;
3071 }
3072 while (1) {
3073 wait_on_extent_buffer_writeback(eb);
3074 btrfs_tree_lock(eb);
3075 if (!test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags))
3076 break;
3077 btrfs_tree_unlock(eb);
3078 }
3079 }
3080
3081 if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)) {
3082 set_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags);
3083 btrfs_set_header_flag(eb, BTRFS_HEADER_FLAG_WRITTEN);
3084 spin_lock(&fs_info->delalloc_lock);
3085 if (fs_info->dirty_metadata_bytes >= eb->len)
3086 fs_info->dirty_metadata_bytes -= eb->len;
3087 else
3088 WARN_ON(1);
3089 spin_unlock(&fs_info->delalloc_lock);
3090 ret = 1;
3091 }
3092
3093 btrfs_tree_unlock(eb);
3094
3095 if (!ret)
3096 return ret;
3097
3098 num_pages = num_extent_pages(eb->start, eb->len);
3099 for (i = 0; i < num_pages; i++) {
3100 struct page *p = extent_buffer_page(eb, i);
3101
3102 if (!trylock_page(p)) {
3103 if (!flush) {
3104 flush_write_bio(epd);
3105 flush = 1;
3106 }
3107 lock_page(p);
3108 }
3109 }
3110
3111 return ret;
3112}
3113
3114static void end_extent_buffer_writeback(struct extent_buffer *eb)
3115{
3116 clear_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags);
3117 smp_mb__after_clear_bit();
3118 wake_up_bit(&eb->bflags, EXTENT_BUFFER_WRITEBACK);
3119}
3120
3121static void end_bio_extent_buffer_writepage(struct bio *bio, int err)
3122{
3123 int uptodate = err == 0;
3124 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
3125 struct extent_buffer *eb;
3126 int done;
3127
3128 do {
3129 struct page *page = bvec->bv_page;
3130
3131 bvec--;
3132 eb = (struct extent_buffer *)page->private;
3133 BUG_ON(!eb);
3134 done = atomic_dec_and_test(&eb->io_pages);
3135
3136 if (!uptodate || test_bit(EXTENT_BUFFER_IOERR, &eb->bflags)) {
3137 set_bit(EXTENT_BUFFER_IOERR, &eb->bflags);
3138 ClearPageUptodate(page);
3139 SetPageError(page);
3140 }
3141
3142 end_page_writeback(page);
3143
3144 if (!done)
3145 continue;
3146
3147 end_extent_buffer_writeback(eb);
3148 } while (bvec >= bio->bi_io_vec);
3149
3150 bio_put(bio);
3151
3152}
3153
3154static int write_one_eb(struct extent_buffer *eb,
3155 struct btrfs_fs_info *fs_info,
3156 struct writeback_control *wbc,
3157 struct extent_page_data *epd)
3158{
3159 struct block_device *bdev = fs_info->fs_devices->latest_bdev;
3160 u64 offset = eb->start;
3161 unsigned long i, num_pages;
3162 int rw = (epd->sync_io ? WRITE_SYNC : WRITE);
3163 int ret;
3164
3165 clear_bit(EXTENT_BUFFER_IOERR, &eb->bflags);
3166 num_pages = num_extent_pages(eb->start, eb->len);
3167 atomic_set(&eb->io_pages, num_pages);
3168 for (i = 0; i < num_pages; i++) {
3169 struct page *p = extent_buffer_page(eb, i);
3170
3171 clear_page_dirty_for_io(p);
3172 set_page_writeback(p);
3173 ret = submit_extent_page(rw, eb->tree, p, offset >> 9,
3174 PAGE_CACHE_SIZE, 0, bdev, &epd->bio,
3175 -1, end_bio_extent_buffer_writepage,
3176 0, 0, 0);
3177 if (ret) {
3178 set_bit(EXTENT_BUFFER_IOERR, &eb->bflags);
3179 SetPageError(p);
3180 if (atomic_sub_and_test(num_pages - i, &eb->io_pages))
3181 end_extent_buffer_writeback(eb);
3182 ret = -EIO;
3183 break;
3184 }
3185 offset += PAGE_CACHE_SIZE;
3186 update_nr_written(p, wbc, 1);
3187 unlock_page(p);
3188 }
3189
3190 if (unlikely(ret)) {
3191 for (; i < num_pages; i++) {
3192 struct page *p = extent_buffer_page(eb, i);
3193 unlock_page(p);
3194 }
3195 }
3196
3197 return ret;
3198}
3199
3200int btree_write_cache_pages(struct address_space *mapping,
3201 struct writeback_control *wbc)
3202{
3203 struct extent_io_tree *tree = &BTRFS_I(mapping->host)->io_tree;
3204 struct btrfs_fs_info *fs_info = BTRFS_I(mapping->host)->root->fs_info;
3205 struct extent_buffer *eb, *prev_eb = NULL;
3206 struct extent_page_data epd = {
3207 .bio = NULL,
3208 .tree = tree,
3209 .extent_locked = 0,
3210 .sync_io = wbc->sync_mode == WB_SYNC_ALL,
3211 };
3212 int ret = 0;
3213 int done = 0;
3214 int nr_to_write_done = 0;
3215 struct pagevec pvec;
3216 int nr_pages;
3217 pgoff_t index;
3218 pgoff_t end; /* Inclusive */
3219 int scanned = 0;
3220 int tag;
3221
3222 pagevec_init(&pvec, 0);
3223 if (wbc->range_cyclic) {
3224 index = mapping->writeback_index; /* Start from prev offset */
3225 end = -1;
3226 } else {
3227 index = wbc->range_start >> PAGE_CACHE_SHIFT;
3228 end = wbc->range_end >> PAGE_CACHE_SHIFT;
3229 scanned = 1;
3230 }
3231 if (wbc->sync_mode == WB_SYNC_ALL)
3232 tag = PAGECACHE_TAG_TOWRITE;
3233 else
3234 tag = PAGECACHE_TAG_DIRTY;
3235retry:
3236 if (wbc->sync_mode == WB_SYNC_ALL)
3237 tag_pages_for_writeback(mapping, index, end);
3238 while (!done && !nr_to_write_done && (index <= end) &&
3239 (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag,
3240 min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1))) {
3241 unsigned i;
3242
3243 scanned = 1;
3244 for (i = 0; i < nr_pages; i++) {
3245 struct page *page = pvec.pages[i];
3246
3247 if (!PagePrivate(page))
3248 continue;
3249
3250 if (!wbc->range_cyclic && page->index > end) {
3251 done = 1;
3252 break;
3253 }
3254
3255 eb = (struct extent_buffer *)page->private;
3256 if (!eb) {
3257 WARN_ON(1);
3258 continue;
3259 }
3260
3261 if (eb == prev_eb)
3262 continue;
3263
3264 if (!atomic_inc_not_zero(&eb->refs)) {
3265 WARN_ON(1);
3266 continue;
3267 }
3268
3269 prev_eb = eb;
3270 ret = lock_extent_buffer_for_io(eb, fs_info, &epd);
3271 if (!ret) {
3272 free_extent_buffer(eb);
3273 continue;
3274 }
3275
3276 ret = write_one_eb(eb, fs_info, wbc, &epd);
3277 if (ret) {
3278 done = 1;
3279 free_extent_buffer(eb);
3280 break;
3281 }
3282 free_extent_buffer(eb);
3283
3284 /*
3285 * the filesystem may choose to bump up nr_to_write.
3286 * We have to make sure to honor the new nr_to_write
3287 * at any time
3288 */
3289 nr_to_write_done = wbc->nr_to_write <= 0;
3290 }
3291 pagevec_release(&pvec);
3292 cond_resched();
3293 }
3294 if (!scanned && !done) {
3295 /*
3296 * We hit the last page and there is more work to be done: wrap
3297 * back to the start of the file
3298 */
3299 scanned = 1;
3300 index = 0;
3301 goto retry;
3302 }
3303 flush_write_bio(&epd);
3304 return ret;
3305}
3306
2971/** 3307/**
2972 * write_cache_pages - walk the list of dirty pages of the given address space and write all of them. 3308 * write_cache_pages - walk the list of dirty pages of the given address space and write all of them.
2973 * @mapping: address space structure to write 3309 * @mapping: address space structure to write
@@ -3099,10 +3435,14 @@ retry:
3099static void flush_epd_write_bio(struct extent_page_data *epd) 3435static void flush_epd_write_bio(struct extent_page_data *epd)
3100{ 3436{
3101 if (epd->bio) { 3437 if (epd->bio) {
3438 int rw = WRITE;
3439 int ret;
3440
3102 if (epd->sync_io) 3441 if (epd->sync_io)
3103 submit_one_bio(WRITE_SYNC, epd->bio, 0, 0); 3442 rw = WRITE_SYNC;
3104 else 3443
3105 submit_one_bio(WRITE, epd->bio, 0, 0); 3444 ret = submit_one_bio(rw, epd->bio, 0, 0);
3445 BUG_ON(ret < 0); /* -ENOMEM */
3106 epd->bio = NULL; 3446 epd->bio = NULL;
3107 } 3447 }
3108} 3448}
@@ -3219,7 +3559,7 @@ int extent_readpages(struct extent_io_tree *tree,
3219 } 3559 }
3220 BUG_ON(!list_empty(pages)); 3560 BUG_ON(!list_empty(pages));
3221 if (bio) 3561 if (bio)
3222 submit_one_bio(READ, bio, 0, bio_flags); 3562 return submit_one_bio(READ, bio, 0, bio_flags);
3223 return 0; 3563 return 0;
3224} 3564}
3225 3565
@@ -3240,7 +3580,7 @@ int extent_invalidatepage(struct extent_io_tree *tree,
3240 if (start > end) 3580 if (start > end)
3241 return 0; 3581 return 0;
3242 3582
3243 lock_extent_bits(tree, start, end, 0, &cached_state, GFP_NOFS); 3583 lock_extent_bits(tree, start, end, 0, &cached_state);
3244 wait_on_page_writeback(page); 3584 wait_on_page_writeback(page);
3245 clear_extent_bit(tree, start, end, 3585 clear_extent_bit(tree, start, end,
3246 EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC | 3586 EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC |
@@ -3454,7 +3794,7 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
3454 } 3794 }
3455 3795
3456 lock_extent_bits(&BTRFS_I(inode)->io_tree, start, start + len, 0, 3796 lock_extent_bits(&BTRFS_I(inode)->io_tree, start, start + len, 0,
3457 &cached_state, GFP_NOFS); 3797 &cached_state);
3458 3798
3459 em = get_extent_skip_holes(inode, start, last_for_get_extent, 3799 em = get_extent_skip_holes(inode, start, last_for_get_extent,
3460 get_extent); 3800 get_extent);
@@ -3548,26 +3888,7 @@ out:
3548inline struct page *extent_buffer_page(struct extent_buffer *eb, 3888inline struct page *extent_buffer_page(struct extent_buffer *eb,
3549 unsigned long i) 3889 unsigned long i)
3550{ 3890{
3551 struct page *p; 3891 return eb->pages[i];
3552 struct address_space *mapping;
3553
3554 if (i == 0)
3555 return eb->first_page;
3556 i += eb->start >> PAGE_CACHE_SHIFT;
3557 mapping = eb->first_page->mapping;
3558 if (!mapping)
3559 return NULL;
3560
3561 /*
3562 * extent_buffer_page is only called after pinning the page
3563 * by increasing the reference count. So we know the page must
3564 * be in the radix tree.
3565 */
3566 rcu_read_lock();
3567 p = radix_tree_lookup(&mapping->page_tree, i);
3568 rcu_read_unlock();
3569
3570 return p;
3571} 3892}
3572 3893
3573inline unsigned long num_extent_pages(u64 start, u64 len) 3894inline unsigned long num_extent_pages(u64 start, u64 len)
@@ -3576,6 +3897,19 @@ inline unsigned long num_extent_pages(u64 start, u64 len)
3576 (start >> PAGE_CACHE_SHIFT); 3897 (start >> PAGE_CACHE_SHIFT);
3577} 3898}
3578 3899
3900static void __free_extent_buffer(struct extent_buffer *eb)
3901{
3902#if LEAK_DEBUG
3903 unsigned long flags;
3904 spin_lock_irqsave(&leak_lock, flags);
3905 list_del(&eb->leak_list);
3906 spin_unlock_irqrestore(&leak_lock, flags);
3907#endif
3908 if (eb->pages && eb->pages != eb->inline_pages)
3909 kfree(eb->pages);
3910 kmem_cache_free(extent_buffer_cache, eb);
3911}
3912
3579static struct extent_buffer *__alloc_extent_buffer(struct extent_io_tree *tree, 3913static struct extent_buffer *__alloc_extent_buffer(struct extent_io_tree *tree,
3580 u64 start, 3914 u64 start,
3581 unsigned long len, 3915 unsigned long len,
@@ -3591,6 +3925,7 @@ static struct extent_buffer *__alloc_extent_buffer(struct extent_io_tree *tree,
3591 return NULL; 3925 return NULL;
3592 eb->start = start; 3926 eb->start = start;
3593 eb->len = len; 3927 eb->len = len;
3928 eb->tree = tree;
3594 rwlock_init(&eb->lock); 3929 rwlock_init(&eb->lock);
3595 atomic_set(&eb->write_locks, 0); 3930 atomic_set(&eb->write_locks, 0);
3596 atomic_set(&eb->read_locks, 0); 3931 atomic_set(&eb->read_locks, 0);
@@ -3607,20 +3942,32 @@ static struct extent_buffer *__alloc_extent_buffer(struct extent_io_tree *tree,
3607 list_add(&eb->leak_list, &buffers); 3942 list_add(&eb->leak_list, &buffers);
3608 spin_unlock_irqrestore(&leak_lock, flags); 3943 spin_unlock_irqrestore(&leak_lock, flags);
3609#endif 3944#endif
3945 spin_lock_init(&eb->refs_lock);
3610 atomic_set(&eb->refs, 1); 3946 atomic_set(&eb->refs, 1);
3947 atomic_set(&eb->io_pages, 0);
3948
3949 if (len > MAX_INLINE_EXTENT_BUFFER_SIZE) {
3950 struct page **pages;
3951 int num_pages = (len + PAGE_CACHE_SIZE - 1) >>
3952 PAGE_CACHE_SHIFT;
3953 pages = kzalloc(num_pages, mask);
3954 if (!pages) {
3955 __free_extent_buffer(eb);
3956 return NULL;
3957 }
3958 eb->pages = pages;
3959 } else {
3960 eb->pages = eb->inline_pages;
3961 }
3611 3962
3612 return eb; 3963 return eb;
3613} 3964}
3614 3965
3615static void __free_extent_buffer(struct extent_buffer *eb) 3966static int extent_buffer_under_io(struct extent_buffer *eb)
3616{ 3967{
3617#if LEAK_DEBUG 3968 return (atomic_read(&eb->io_pages) ||
3618 unsigned long flags; 3969 test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags) ||
3619 spin_lock_irqsave(&leak_lock, flags); 3970 test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
3620 list_del(&eb->leak_list);
3621 spin_unlock_irqrestore(&leak_lock, flags);
3622#endif
3623 kmem_cache_free(extent_buffer_cache, eb);
3624} 3971}
3625 3972
3626/* 3973/*
@@ -3632,8 +3979,7 @@ static void btrfs_release_extent_buffer_page(struct extent_buffer *eb,
3632 unsigned long index; 3979 unsigned long index;
3633 struct page *page; 3980 struct page *page;
3634 3981
3635 if (!eb->first_page) 3982 BUG_ON(extent_buffer_under_io(eb));
3636 return;
3637 3983
3638 index = num_extent_pages(eb->start, eb->len); 3984 index = num_extent_pages(eb->start, eb->len);
3639 if (start_idx >= index) 3985 if (start_idx >= index)
@@ -3642,8 +3988,34 @@ static void btrfs_release_extent_buffer_page(struct extent_buffer *eb,
3642 do { 3988 do {
3643 index--; 3989 index--;
3644 page = extent_buffer_page(eb, index); 3990 page = extent_buffer_page(eb, index);
3645 if (page) 3991 if (page) {
3992 spin_lock(&page->mapping->private_lock);
3993 /*
3994 * We do this since we'll remove the pages after we've
3995 * removed the eb from the radix tree, so we could race
3996 * and have this page now attached to the new eb. So
3997 * only clear page_private if it's still connected to
3998 * this eb.
3999 */
4000 if (PagePrivate(page) &&
4001 page->private == (unsigned long)eb) {
4002 BUG_ON(test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
4003 BUG_ON(PageDirty(page));
4004 BUG_ON(PageWriteback(page));
4005 /*
4006 * We need to make sure we haven't be attached
4007 * to a new eb.
4008 */
4009 ClearPagePrivate(page);
4010 set_page_private(page, 0);
4011 /* One for the page private */
4012 page_cache_release(page);
4013 }
4014 spin_unlock(&page->mapping->private_lock);
4015
4016 /* One for when we alloced the page */
3646 page_cache_release(page); 4017 page_cache_release(page);
4018 }
3647 } while (index != start_idx); 4019 } while (index != start_idx);
3648} 4020}
3649 4021
@@ -3656,9 +4028,50 @@ static inline void btrfs_release_extent_buffer(struct extent_buffer *eb)
3656 __free_extent_buffer(eb); 4028 __free_extent_buffer(eb);
3657} 4029}
3658 4030
4031static void check_buffer_tree_ref(struct extent_buffer *eb)
4032{
4033 /* the ref bit is tricky. We have to make sure it is set
4034 * if we have the buffer dirty. Otherwise the
4035 * code to free a buffer can end up dropping a dirty
4036 * page
4037 *
4038 * Once the ref bit is set, it won't go away while the
4039 * buffer is dirty or in writeback, and it also won't
4040 * go away while we have the reference count on the
4041 * eb bumped.
4042 *
4043 * We can't just set the ref bit without bumping the
4044 * ref on the eb because free_extent_buffer might
4045 * see the ref bit and try to clear it. If this happens
4046 * free_extent_buffer might end up dropping our original
4047 * ref by mistake and freeing the page before we are able
4048 * to add one more ref.
4049 *
4050 * So bump the ref count first, then set the bit. If someone
4051 * beat us to it, drop the ref we added.
4052 */
4053 if (!test_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)) {
4054 atomic_inc(&eb->refs);
4055 if (test_and_set_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
4056 atomic_dec(&eb->refs);
4057 }
4058}
4059
4060static void mark_extent_buffer_accessed(struct extent_buffer *eb)
4061{
4062 unsigned long num_pages, i;
4063
4064 check_buffer_tree_ref(eb);
4065
4066 num_pages = num_extent_pages(eb->start, eb->len);
4067 for (i = 0; i < num_pages; i++) {
4068 struct page *p = extent_buffer_page(eb, i);
4069 mark_page_accessed(p);
4070 }
4071}
4072
3659struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree, 4073struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree,
3660 u64 start, unsigned long len, 4074 u64 start, unsigned long len)
3661 struct page *page0)
3662{ 4075{
3663 unsigned long num_pages = num_extent_pages(start, len); 4076 unsigned long num_pages = num_extent_pages(start, len);
3664 unsigned long i; 4077 unsigned long i;
@@ -3674,7 +4087,7 @@ struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree,
3674 eb = radix_tree_lookup(&tree->buffer, start >> PAGE_CACHE_SHIFT); 4087 eb = radix_tree_lookup(&tree->buffer, start >> PAGE_CACHE_SHIFT);
3675 if (eb && atomic_inc_not_zero(&eb->refs)) { 4088 if (eb && atomic_inc_not_zero(&eb->refs)) {
3676 rcu_read_unlock(); 4089 rcu_read_unlock();
3677 mark_page_accessed(eb->first_page); 4090 mark_extent_buffer_accessed(eb);
3678 return eb; 4091 return eb;
3679 } 4092 }
3680 rcu_read_unlock(); 4093 rcu_read_unlock();
@@ -3683,32 +4096,43 @@ struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree,
3683 if (!eb) 4096 if (!eb)
3684 return NULL; 4097 return NULL;
3685 4098
3686 if (page0) { 4099 for (i = 0; i < num_pages; i++, index++) {
3687 eb->first_page = page0;
3688 i = 1;
3689 index++;
3690 page_cache_get(page0);
3691 mark_page_accessed(page0);
3692 set_page_extent_mapped(page0);
3693 set_page_extent_head(page0, len);
3694 uptodate = PageUptodate(page0);
3695 } else {
3696 i = 0;
3697 }
3698 for (; i < num_pages; i++, index++) {
3699 p = find_or_create_page(mapping, index, GFP_NOFS); 4100 p = find_or_create_page(mapping, index, GFP_NOFS);
3700 if (!p) { 4101 if (!p) {
3701 WARN_ON(1); 4102 WARN_ON(1);
3702 goto free_eb; 4103 goto free_eb;
3703 } 4104 }
3704 set_page_extent_mapped(p); 4105
3705 mark_page_accessed(p); 4106 spin_lock(&mapping->private_lock);
3706 if (i == 0) { 4107 if (PagePrivate(p)) {
3707 eb->first_page = p; 4108 /*
3708 set_page_extent_head(p, len); 4109 * We could have already allocated an eb for this page
3709 } else { 4110 * and attached one so lets see if we can get a ref on
3710 set_page_private(p, EXTENT_PAGE_PRIVATE); 4111 * the existing eb, and if we can we know it's good and
4112 * we can just return that one, else we know we can just
4113 * overwrite page->private.
4114 */
4115 exists = (struct extent_buffer *)p->private;
4116 if (atomic_inc_not_zero(&exists->refs)) {
4117 spin_unlock(&mapping->private_lock);
4118 unlock_page(p);
4119 mark_extent_buffer_accessed(exists);
4120 goto free_eb;
4121 }
4122
4123 /*
4124 * Do this so attach doesn't complain and we need to
4125 * drop the ref the old guy had.
4126 */
4127 ClearPagePrivate(p);
4128 WARN_ON(PageDirty(p));
4129 page_cache_release(p);
3711 } 4130 }
4131 attach_extent_buffer_page(eb, p);
4132 spin_unlock(&mapping->private_lock);
4133 WARN_ON(PageDirty(p));
4134 mark_page_accessed(p);
4135 eb->pages[i] = p;
3712 if (!PageUptodate(p)) 4136 if (!PageUptodate(p))
3713 uptodate = 0; 4137 uptodate = 0;
3714 4138
@@ -3716,12 +4140,10 @@ struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree,
3716 * see below about how we avoid a nasty race with release page 4140 * see below about how we avoid a nasty race with release page
3717 * and why we unlock later 4141 * and why we unlock later
3718 */ 4142 */
3719 if (i != 0)
3720 unlock_page(p);
3721 } 4143 }
3722 if (uptodate) 4144 if (uptodate)
3723 set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags); 4145 set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
3724 4146again:
3725 ret = radix_tree_preload(GFP_NOFS & ~__GFP_HIGHMEM); 4147 ret = radix_tree_preload(GFP_NOFS & ~__GFP_HIGHMEM);
3726 if (ret) 4148 if (ret)
3727 goto free_eb; 4149 goto free_eb;
@@ -3731,14 +4153,21 @@ struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree,
3731 if (ret == -EEXIST) { 4153 if (ret == -EEXIST) {
3732 exists = radix_tree_lookup(&tree->buffer, 4154 exists = radix_tree_lookup(&tree->buffer,
3733 start >> PAGE_CACHE_SHIFT); 4155 start >> PAGE_CACHE_SHIFT);
3734 /* add one reference for the caller */ 4156 if (!atomic_inc_not_zero(&exists->refs)) {
3735 atomic_inc(&exists->refs); 4157 spin_unlock(&tree->buffer_lock);
4158 radix_tree_preload_end();
4159 exists = NULL;
4160 goto again;
4161 }
3736 spin_unlock(&tree->buffer_lock); 4162 spin_unlock(&tree->buffer_lock);
3737 radix_tree_preload_end(); 4163 radix_tree_preload_end();
4164 mark_extent_buffer_accessed(exists);
3738 goto free_eb; 4165 goto free_eb;
3739 } 4166 }
3740 /* add one reference for the tree */ 4167 /* add one reference for the tree */
3741 atomic_inc(&eb->refs); 4168 spin_lock(&eb->refs_lock);
4169 check_buffer_tree_ref(eb);
4170 spin_unlock(&eb->refs_lock);
3742 spin_unlock(&tree->buffer_lock); 4171 spin_unlock(&tree->buffer_lock);
3743 radix_tree_preload_end(); 4172 radix_tree_preload_end();
3744 4173
@@ -3751,15 +4180,20 @@ struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree,
3751 * after the extent buffer is in the radix tree so 4180 * after the extent buffer is in the radix tree so
3752 * it doesn't get lost 4181 * it doesn't get lost
3753 */ 4182 */
3754 set_page_extent_mapped(eb->first_page); 4183 SetPageChecked(eb->pages[0]);
3755 set_page_extent_head(eb->first_page, eb->len); 4184 for (i = 1; i < num_pages; i++) {
3756 if (!page0) 4185 p = extent_buffer_page(eb, i);
3757 unlock_page(eb->first_page); 4186 ClearPageChecked(p);
4187 unlock_page(p);
4188 }
4189 unlock_page(eb->pages[0]);
3758 return eb; 4190 return eb;
3759 4191
3760free_eb: 4192free_eb:
3761 if (eb->first_page && !page0) 4193 for (i = 0; i < num_pages; i++) {
3762 unlock_page(eb->first_page); 4194 if (eb->pages[i])
4195 unlock_page(eb->pages[i]);
4196 }
3763 4197
3764 if (!atomic_dec_and_test(&eb->refs)) 4198 if (!atomic_dec_and_test(&eb->refs))
3765 return exists; 4199 return exists;
@@ -3776,7 +4210,7 @@ struct extent_buffer *find_extent_buffer(struct extent_io_tree *tree,
3776 eb = radix_tree_lookup(&tree->buffer, start >> PAGE_CACHE_SHIFT); 4210 eb = radix_tree_lookup(&tree->buffer, start >> PAGE_CACHE_SHIFT);
3777 if (eb && atomic_inc_not_zero(&eb->refs)) { 4211 if (eb && atomic_inc_not_zero(&eb->refs)) {
3778 rcu_read_unlock(); 4212 rcu_read_unlock();
3779 mark_page_accessed(eb->first_page); 4213 mark_extent_buffer_accessed(eb);
3780 return eb; 4214 return eb;
3781 } 4215 }
3782 rcu_read_unlock(); 4216 rcu_read_unlock();
@@ -3784,19 +4218,71 @@ struct extent_buffer *find_extent_buffer(struct extent_io_tree *tree,
3784 return NULL; 4218 return NULL;
3785} 4219}
3786 4220
4221static inline void btrfs_release_extent_buffer_rcu(struct rcu_head *head)
4222{
4223 struct extent_buffer *eb =
4224 container_of(head, struct extent_buffer, rcu_head);
4225
4226 __free_extent_buffer(eb);
4227}
4228
4229/* Expects to have eb->eb_lock already held */
4230static void release_extent_buffer(struct extent_buffer *eb, gfp_t mask)
4231{
4232 WARN_ON(atomic_read(&eb->refs) == 0);
4233 if (atomic_dec_and_test(&eb->refs)) {
4234 struct extent_io_tree *tree = eb->tree;
4235
4236 spin_unlock(&eb->refs_lock);
4237
4238 spin_lock(&tree->buffer_lock);
4239 radix_tree_delete(&tree->buffer,
4240 eb->start >> PAGE_CACHE_SHIFT);
4241 spin_unlock(&tree->buffer_lock);
4242
4243 /* Should be safe to release our pages at this point */
4244 btrfs_release_extent_buffer_page(eb, 0);
4245
4246 call_rcu(&eb->rcu_head, btrfs_release_extent_buffer_rcu);
4247 return;
4248 }
4249 spin_unlock(&eb->refs_lock);
4250}
4251
3787void free_extent_buffer(struct extent_buffer *eb) 4252void free_extent_buffer(struct extent_buffer *eb)
3788{ 4253{
3789 if (!eb) 4254 if (!eb)
3790 return; 4255 return;
3791 4256
3792 if (!atomic_dec_and_test(&eb->refs)) 4257 spin_lock(&eb->refs_lock);
4258 if (atomic_read(&eb->refs) == 2 &&
4259 test_bit(EXTENT_BUFFER_STALE, &eb->bflags) &&
4260 !extent_buffer_under_io(eb) &&
4261 test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
4262 atomic_dec(&eb->refs);
4263
4264 /*
4265 * I know this is terrible, but it's temporary until we stop tracking
4266 * the uptodate bits and such for the extent buffers.
4267 */
4268 release_extent_buffer(eb, GFP_ATOMIC);
4269}
4270
4271void free_extent_buffer_stale(struct extent_buffer *eb)
4272{
4273 if (!eb)
3793 return; 4274 return;
3794 4275
3795 WARN_ON(1); 4276 spin_lock(&eb->refs_lock);
4277 set_bit(EXTENT_BUFFER_STALE, &eb->bflags);
4278
4279 if (atomic_read(&eb->refs) == 2 && !extent_buffer_under_io(eb) &&
4280 test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
4281 atomic_dec(&eb->refs);
4282 release_extent_buffer(eb, GFP_NOFS);
3796} 4283}
3797 4284
3798int clear_extent_buffer_dirty(struct extent_io_tree *tree, 4285void clear_extent_buffer_dirty(struct extent_buffer *eb)
3799 struct extent_buffer *eb)
3800{ 4286{
3801 unsigned long i; 4287 unsigned long i;
3802 unsigned long num_pages; 4288 unsigned long num_pages;
@@ -3812,10 +4298,6 @@ int clear_extent_buffer_dirty(struct extent_io_tree *tree,
3812 lock_page(page); 4298 lock_page(page);
3813 WARN_ON(!PagePrivate(page)); 4299 WARN_ON(!PagePrivate(page));
3814 4300
3815 set_page_extent_mapped(page);
3816 if (i == 0)
3817 set_page_extent_head(page, eb->len);
3818
3819 clear_page_dirty_for_io(page); 4301 clear_page_dirty_for_io(page);
3820 spin_lock_irq(&page->mapping->tree_lock); 4302 spin_lock_irq(&page->mapping->tree_lock);
3821 if (!PageDirty(page)) { 4303 if (!PageDirty(page)) {
@@ -3827,24 +4309,29 @@ int clear_extent_buffer_dirty(struct extent_io_tree *tree,
3827 ClearPageError(page); 4309 ClearPageError(page);
3828 unlock_page(page); 4310 unlock_page(page);
3829 } 4311 }
3830 return 0; 4312 WARN_ON(atomic_read(&eb->refs) == 0);
3831} 4313}
3832 4314
3833int set_extent_buffer_dirty(struct extent_io_tree *tree, 4315int set_extent_buffer_dirty(struct extent_buffer *eb)
3834 struct extent_buffer *eb)
3835{ 4316{
3836 unsigned long i; 4317 unsigned long i;
3837 unsigned long num_pages; 4318 unsigned long num_pages;
3838 int was_dirty = 0; 4319 int was_dirty = 0;
3839 4320
4321 check_buffer_tree_ref(eb);
4322
3840 was_dirty = test_and_set_bit(EXTENT_BUFFER_DIRTY, &eb->bflags); 4323 was_dirty = test_and_set_bit(EXTENT_BUFFER_DIRTY, &eb->bflags);
4324
3841 num_pages = num_extent_pages(eb->start, eb->len); 4325 num_pages = num_extent_pages(eb->start, eb->len);
4326 WARN_ON(atomic_read(&eb->refs) == 0);
4327 WARN_ON(!test_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags));
4328
3842 for (i = 0; i < num_pages; i++) 4329 for (i = 0; i < num_pages; i++)
3843 __set_page_dirty_nobuffers(extent_buffer_page(eb, i)); 4330 set_page_dirty(extent_buffer_page(eb, i));
3844 return was_dirty; 4331 return was_dirty;
3845} 4332}
3846 4333
3847static int __eb_straddles_pages(u64 start, u64 len) 4334static int range_straddles_pages(u64 start, u64 len)
3848{ 4335{
3849 if (len < PAGE_CACHE_SIZE) 4336 if (len < PAGE_CACHE_SIZE)
3850 return 1; 4337 return 1;
@@ -3855,25 +4342,14 @@ static int __eb_straddles_pages(u64 start, u64 len)
3855 return 0; 4342 return 0;
3856} 4343}
3857 4344
3858static int eb_straddles_pages(struct extent_buffer *eb) 4345int clear_extent_buffer_uptodate(struct extent_buffer *eb)
3859{
3860 return __eb_straddles_pages(eb->start, eb->len);
3861}
3862
3863int clear_extent_buffer_uptodate(struct extent_io_tree *tree,
3864 struct extent_buffer *eb,
3865 struct extent_state **cached_state)
3866{ 4346{
3867 unsigned long i; 4347 unsigned long i;
3868 struct page *page; 4348 struct page *page;
3869 unsigned long num_pages; 4349 unsigned long num_pages;
3870 4350
3871 num_pages = num_extent_pages(eb->start, eb->len);
3872 clear_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags); 4351 clear_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
3873 4352 num_pages = num_extent_pages(eb->start, eb->len);
3874 clear_extent_uptodate(tree, eb->start, eb->start + eb->len - 1,
3875 cached_state, GFP_NOFS);
3876
3877 for (i = 0; i < num_pages; i++) { 4353 for (i = 0; i < num_pages; i++) {
3878 page = extent_buffer_page(eb, i); 4354 page = extent_buffer_page(eb, i);
3879 if (page) 4355 if (page)
@@ -3882,27 +4358,16 @@ int clear_extent_buffer_uptodate(struct extent_io_tree *tree,
3882 return 0; 4358 return 0;
3883} 4359}
3884 4360
3885int set_extent_buffer_uptodate(struct extent_io_tree *tree, 4361int set_extent_buffer_uptodate(struct extent_buffer *eb)
3886 struct extent_buffer *eb)
3887{ 4362{
3888 unsigned long i; 4363 unsigned long i;
3889 struct page *page; 4364 struct page *page;
3890 unsigned long num_pages; 4365 unsigned long num_pages;
3891 4366
4367 set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
3892 num_pages = num_extent_pages(eb->start, eb->len); 4368 num_pages = num_extent_pages(eb->start, eb->len);
3893
3894 if (eb_straddles_pages(eb)) {
3895 set_extent_uptodate(tree, eb->start, eb->start + eb->len - 1,
3896 NULL, GFP_NOFS);
3897 }
3898 for (i = 0; i < num_pages; i++) { 4369 for (i = 0; i < num_pages; i++) {
3899 page = extent_buffer_page(eb, i); 4370 page = extent_buffer_page(eb, i);
3900 if ((i == 0 && (eb->start & (PAGE_CACHE_SIZE - 1))) ||
3901 ((i == num_pages - 1) &&
3902 ((eb->start + eb->len) & (PAGE_CACHE_SIZE - 1)))) {
3903 check_page_uptodate(tree, page);
3904 continue;
3905 }
3906 SetPageUptodate(page); 4371 SetPageUptodate(page);
3907 } 4372 }
3908 return 0; 4373 return 0;
@@ -3917,7 +4382,7 @@ int extent_range_uptodate(struct extent_io_tree *tree,
3917 int uptodate; 4382 int uptodate;
3918 unsigned long index; 4383 unsigned long index;
3919 4384
3920 if (__eb_straddles_pages(start, end - start + 1)) { 4385 if (range_straddles_pages(start, end - start + 1)) {
3921 ret = test_range_bit(tree, start, end, 4386 ret = test_range_bit(tree, start, end,
3922 EXTENT_UPTODATE, 1, NULL); 4387 EXTENT_UPTODATE, 1, NULL);
3923 if (ret) 4388 if (ret)
@@ -3939,35 +4404,9 @@ int extent_range_uptodate(struct extent_io_tree *tree,
3939 return pg_uptodate; 4404 return pg_uptodate;
3940} 4405}
3941 4406
3942int extent_buffer_uptodate(struct extent_io_tree *tree, 4407int extent_buffer_uptodate(struct extent_buffer *eb)
3943 struct extent_buffer *eb,
3944 struct extent_state *cached_state)
3945{ 4408{
3946 int ret = 0; 4409 return test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
3947 unsigned long num_pages;
3948 unsigned long i;
3949 struct page *page;
3950 int pg_uptodate = 1;
3951
3952 if (test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags))
3953 return 1;
3954
3955 if (eb_straddles_pages(eb)) {
3956 ret = test_range_bit(tree, eb->start, eb->start + eb->len - 1,
3957 EXTENT_UPTODATE, 1, cached_state);
3958 if (ret)
3959 return ret;
3960 }
3961
3962 num_pages = num_extent_pages(eb->start, eb->len);
3963 for (i = 0; i < num_pages; i++) {
3964 page = extent_buffer_page(eb, i);
3965 if (!PageUptodate(page)) {
3966 pg_uptodate = 0;
3967 break;
3968 }
3969 }
3970 return pg_uptodate;
3971} 4410}
3972 4411
3973int read_extent_buffer_pages(struct extent_io_tree *tree, 4412int read_extent_buffer_pages(struct extent_io_tree *tree,
@@ -3981,21 +4420,14 @@ int read_extent_buffer_pages(struct extent_io_tree *tree,
3981 int ret = 0; 4420 int ret = 0;
3982 int locked_pages = 0; 4421 int locked_pages = 0;
3983 int all_uptodate = 1; 4422 int all_uptodate = 1;
3984 int inc_all_pages = 0;
3985 unsigned long num_pages; 4423 unsigned long num_pages;
4424 unsigned long num_reads = 0;
3986 struct bio *bio = NULL; 4425 struct bio *bio = NULL;
3987 unsigned long bio_flags = 0; 4426 unsigned long bio_flags = 0;
3988 4427
3989 if (test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags)) 4428 if (test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags))
3990 return 0; 4429 return 0;
3991 4430
3992 if (eb_straddles_pages(eb)) {
3993 if (test_range_bit(tree, eb->start, eb->start + eb->len - 1,
3994 EXTENT_UPTODATE, 1, NULL)) {
3995 return 0;
3996 }
3997 }
3998
3999 if (start) { 4431 if (start) {
4000 WARN_ON(start < eb->start); 4432 WARN_ON(start < eb->start);
4001 start_i = (start >> PAGE_CACHE_SHIFT) - 4433 start_i = (start >> PAGE_CACHE_SHIFT) -
@@ -4014,8 +4446,10 @@ int read_extent_buffer_pages(struct extent_io_tree *tree,
4014 lock_page(page); 4446 lock_page(page);
4015 } 4447 }
4016 locked_pages++; 4448 locked_pages++;
4017 if (!PageUptodate(page)) 4449 if (!PageUptodate(page)) {
4450 num_reads++;
4018 all_uptodate = 0; 4451 all_uptodate = 0;
4452 }
4019 } 4453 }
4020 if (all_uptodate) { 4454 if (all_uptodate) {
4021 if (start_i == 0) 4455 if (start_i == 0)
@@ -4023,20 +4457,12 @@ int read_extent_buffer_pages(struct extent_io_tree *tree,
4023 goto unlock_exit; 4457 goto unlock_exit;
4024 } 4458 }
4025 4459
4460 clear_bit(EXTENT_BUFFER_IOERR, &eb->bflags);
4461 eb->failed_mirror = 0;
4462 atomic_set(&eb->io_pages, num_reads);
4026 for (i = start_i; i < num_pages; i++) { 4463 for (i = start_i; i < num_pages; i++) {
4027 page = extent_buffer_page(eb, i); 4464 page = extent_buffer_page(eb, i);
4028
4029 WARN_ON(!PagePrivate(page));
4030
4031 set_page_extent_mapped(page);
4032 if (i == 0)
4033 set_page_extent_head(page, eb->len);
4034
4035 if (inc_all_pages)
4036 page_cache_get(page);
4037 if (!PageUptodate(page)) { 4465 if (!PageUptodate(page)) {
4038 if (start_i == 0)
4039 inc_all_pages = 1;
4040 ClearPageError(page); 4466 ClearPageError(page);
4041 err = __extent_read_full_page(tree, page, 4467 err = __extent_read_full_page(tree, page,
4042 get_extent, &bio, 4468 get_extent, &bio,
@@ -4048,8 +4474,11 @@ int read_extent_buffer_pages(struct extent_io_tree *tree,
4048 } 4474 }
4049 } 4475 }
4050 4476
4051 if (bio) 4477 if (bio) {
4052 submit_one_bio(READ, bio, mirror_num, bio_flags); 4478 err = submit_one_bio(READ, bio, mirror_num, bio_flags);
4479 if (err)
4480 return err;
4481 }
4053 4482
4054 if (ret || wait != WAIT_COMPLETE) 4483 if (ret || wait != WAIT_COMPLETE)
4055 return ret; 4484 return ret;
@@ -4061,8 +4490,6 @@ int read_extent_buffer_pages(struct extent_io_tree *tree,
4061 ret = -EIO; 4490 ret = -EIO;
4062 } 4491 }
4063 4492
4064 if (!ret)
4065 set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
4066 return ret; 4493 return ret;
4067 4494
4068unlock_exit: 4495unlock_exit:
@@ -4304,15 +4731,20 @@ static void copy_pages(struct page *dst_page, struct page *src_page,
4304{ 4731{
4305 char *dst_kaddr = page_address(dst_page); 4732 char *dst_kaddr = page_address(dst_page);
4306 char *src_kaddr; 4733 char *src_kaddr;
4734 int must_memmove = 0;
4307 4735
4308 if (dst_page != src_page) { 4736 if (dst_page != src_page) {
4309 src_kaddr = page_address(src_page); 4737 src_kaddr = page_address(src_page);
4310 } else { 4738 } else {
4311 src_kaddr = dst_kaddr; 4739 src_kaddr = dst_kaddr;
4312 BUG_ON(areas_overlap(src_off, dst_off, len)); 4740 if (areas_overlap(src_off, dst_off, len))
4741 must_memmove = 1;
4313 } 4742 }
4314 4743
4315 memcpy(dst_kaddr + dst_off, src_kaddr + src_off, len); 4744 if (must_memmove)
4745 memmove(dst_kaddr + dst_off, src_kaddr + src_off, len);
4746 else
4747 memcpy(dst_kaddr + dst_off, src_kaddr + src_off, len);
4316} 4748}
4317 4749
4318void memcpy_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset, 4750void memcpy_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
@@ -4382,7 +4814,7 @@ void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
4382 "len %lu len %lu\n", dst_offset, len, dst->len); 4814 "len %lu len %lu\n", dst_offset, len, dst->len);
4383 BUG_ON(1); 4815 BUG_ON(1);
4384 } 4816 }
4385 if (!areas_overlap(src_offset, dst_offset, len)) { 4817 if (dst_offset < src_offset) {
4386 memcpy_extent_buffer(dst, dst_offset, src_offset, len); 4818 memcpy_extent_buffer(dst, dst_offset, src_offset, len);
4387 return; 4819 return;
4388 } 4820 }
@@ -4408,47 +4840,48 @@ void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
4408 } 4840 }
4409} 4841}
4410 4842
4411static inline void btrfs_release_extent_buffer_rcu(struct rcu_head *head) 4843int try_release_extent_buffer(struct page *page, gfp_t mask)
4412{ 4844{
4413 struct extent_buffer *eb =
4414 container_of(head, struct extent_buffer, rcu_head);
4415
4416 btrfs_release_extent_buffer(eb);
4417}
4418
4419int try_release_extent_buffer(struct extent_io_tree *tree, struct page *page)
4420{
4421 u64 start = page_offset(page);
4422 struct extent_buffer *eb; 4845 struct extent_buffer *eb;
4423 int ret = 1;
4424 4846
4425 spin_lock(&tree->buffer_lock); 4847 /*
4426 eb = radix_tree_lookup(&tree->buffer, start >> PAGE_CACHE_SHIFT); 4848 * We need to make sure noboody is attaching this page to an eb right
4427 if (!eb) { 4849 * now.
4428 spin_unlock(&tree->buffer_lock); 4850 */
4429 return ret; 4851 spin_lock(&page->mapping->private_lock);
4852 if (!PagePrivate(page)) {
4853 spin_unlock(&page->mapping->private_lock);
4854 return 1;
4430 } 4855 }
4431 4856
4432 if (test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)) { 4857 eb = (struct extent_buffer *)page->private;
4433 ret = 0; 4858 BUG_ON(!eb);
4434 goto out;
4435 }
4436 4859
4437 /* 4860 /*
4438 * set @eb->refs to 0 if it is already 1, and then release the @eb. 4861 * This is a little awful but should be ok, we need to make sure that
4439 * Or go back. 4862 * the eb doesn't disappear out from under us while we're looking at
4863 * this page.
4440 */ 4864 */
4441 if (atomic_cmpxchg(&eb->refs, 1, 0) != 1) { 4865 spin_lock(&eb->refs_lock);
4442 ret = 0; 4866 if (atomic_read(&eb->refs) != 1 || extent_buffer_under_io(eb)) {
4443 goto out; 4867 spin_unlock(&eb->refs_lock);
4868 spin_unlock(&page->mapping->private_lock);
4869 return 0;
4444 } 4870 }
4871 spin_unlock(&page->mapping->private_lock);
4445 4872
4446 radix_tree_delete(&tree->buffer, start >> PAGE_CACHE_SHIFT); 4873 if ((mask & GFP_NOFS) == GFP_NOFS)
4447out: 4874 mask = GFP_NOFS;
4448 spin_unlock(&tree->buffer_lock);
4449 4875
4450 /* at this point we can safely release the extent buffer */ 4876 /*
4451 if (atomic_read(&eb->refs) == 0) 4877 * If tree ref isn't set then we know the ref on this eb is a real ref,
4452 call_rcu(&eb->rcu_head, btrfs_release_extent_buffer_rcu); 4878 * so just return, this page will likely be freed soon anyway.
4453 return ret; 4879 */
4880 if (!test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)) {
4881 spin_unlock(&eb->refs_lock);
4882 return 0;
4883 }
4884 release_extent_buffer(eb, mask);
4885
4886 return 1;
4454} 4887}
diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h
index cecc3518c121..faf10eb57f75 100644
--- a/fs/btrfs/extent_io.h
+++ b/fs/btrfs/extent_io.h
@@ -35,6 +35,10 @@
35#define EXTENT_BUFFER_DIRTY 2 35#define EXTENT_BUFFER_DIRTY 2
36#define EXTENT_BUFFER_CORRUPT 3 36#define EXTENT_BUFFER_CORRUPT 3
37#define EXTENT_BUFFER_READAHEAD 4 /* this got triggered by readahead */ 37#define EXTENT_BUFFER_READAHEAD 4 /* this got triggered by readahead */
38#define EXTENT_BUFFER_TREE_REF 5
39#define EXTENT_BUFFER_STALE 6
40#define EXTENT_BUFFER_WRITEBACK 7
41#define EXTENT_BUFFER_IOERR 8
38 42
39/* these are flags for extent_clear_unlock_delalloc */ 43/* these are flags for extent_clear_unlock_delalloc */
40#define EXTENT_CLEAR_UNLOCK_PAGE 0x1 44#define EXTENT_CLEAR_UNLOCK_PAGE 0x1
@@ -54,6 +58,7 @@
54#define EXTENT_PAGE_PRIVATE_FIRST_PAGE 3 58#define EXTENT_PAGE_PRIVATE_FIRST_PAGE 3
55 59
56struct extent_state; 60struct extent_state;
61struct btrfs_root;
57 62
58typedef int (extent_submit_bio_hook_t)(struct inode *inode, int rw, 63typedef int (extent_submit_bio_hook_t)(struct inode *inode, int rw,
59 struct bio *bio, int mirror_num, 64 struct bio *bio, int mirror_num,
@@ -69,9 +74,7 @@ struct extent_io_ops {
69 size_t size, struct bio *bio, 74 size_t size, struct bio *bio,
70 unsigned long bio_flags); 75 unsigned long bio_flags);
71 int (*readpage_io_hook)(struct page *page, u64 start, u64 end); 76 int (*readpage_io_hook)(struct page *page, u64 start, u64 end);
72 int (*readpage_io_failed_hook)(struct bio *bio, struct page *page, 77 int (*readpage_io_failed_hook)(struct page *page, int failed_mirror);
73 u64 start, u64 end, int failed_mirror,
74 struct extent_state *state);
75 int (*writepage_io_failed_hook)(struct bio *bio, struct page *page, 78 int (*writepage_io_failed_hook)(struct bio *bio, struct page *page,
76 u64 start, u64 end, 79 u64 start, u64 end,
77 struct extent_state *state); 80 struct extent_state *state);
@@ -97,6 +100,7 @@ struct extent_io_tree {
97 struct radix_tree_root buffer; 100 struct radix_tree_root buffer;
98 struct address_space *mapping; 101 struct address_space *mapping;
99 u64 dirty_bytes; 102 u64 dirty_bytes;
103 int track_uptodate;
100 spinlock_t lock; 104 spinlock_t lock;
101 spinlock_t buffer_lock; 105 spinlock_t buffer_lock;
102 struct extent_io_ops *ops; 106 struct extent_io_ops *ops;
@@ -119,16 +123,21 @@ struct extent_state {
119 struct list_head leak_list; 123 struct list_head leak_list;
120}; 124};
121 125
126#define INLINE_EXTENT_BUFFER_PAGES 16
127#define MAX_INLINE_EXTENT_BUFFER_SIZE (INLINE_EXTENT_BUFFER_PAGES * PAGE_CACHE_SIZE)
122struct extent_buffer { 128struct extent_buffer {
123 u64 start; 129 u64 start;
124 unsigned long len; 130 unsigned long len;
125 unsigned long map_start; 131 unsigned long map_start;
126 unsigned long map_len; 132 unsigned long map_len;
127 struct page *first_page;
128 unsigned long bflags; 133 unsigned long bflags;
134 struct extent_io_tree *tree;
135 spinlock_t refs_lock;
136 atomic_t refs;
137 atomic_t io_pages;
138 int failed_mirror;
129 struct list_head leak_list; 139 struct list_head leak_list;
130 struct rcu_head rcu_head; 140 struct rcu_head rcu_head;
131 atomic_t refs;
132 pid_t lock_owner; 141 pid_t lock_owner;
133 142
134 /* count of read lock holders on the extent buffer */ 143 /* count of read lock holders on the extent buffer */
@@ -152,6 +161,9 @@ struct extent_buffer {
152 * to unlock 161 * to unlock
153 */ 162 */
154 wait_queue_head_t read_lock_wq; 163 wait_queue_head_t read_lock_wq;
164 wait_queue_head_t lock_wq;
165 struct page *inline_pages[INLINE_EXTENT_BUFFER_PAGES];
166 struct page **pages;
155}; 167};
156 168
157static inline void extent_set_compress_type(unsigned long *bio_flags, 169static inline void extent_set_compress_type(unsigned long *bio_flags,
@@ -178,18 +190,17 @@ void extent_io_tree_init(struct extent_io_tree *tree,
178int try_release_extent_mapping(struct extent_map_tree *map, 190int try_release_extent_mapping(struct extent_map_tree *map,
179 struct extent_io_tree *tree, struct page *page, 191 struct extent_io_tree *tree, struct page *page,
180 gfp_t mask); 192 gfp_t mask);
181int try_release_extent_buffer(struct extent_io_tree *tree, struct page *page); 193int try_release_extent_buffer(struct page *page, gfp_t mask);
182int try_release_extent_state(struct extent_map_tree *map, 194int try_release_extent_state(struct extent_map_tree *map,
183 struct extent_io_tree *tree, struct page *page, 195 struct extent_io_tree *tree, struct page *page,
184 gfp_t mask); 196 gfp_t mask);
185int lock_extent(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask); 197int lock_extent(struct extent_io_tree *tree, u64 start, u64 end);
186int lock_extent_bits(struct extent_io_tree *tree, u64 start, u64 end, 198int lock_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
187 int bits, struct extent_state **cached, gfp_t mask); 199 int bits, struct extent_state **cached);
188int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask); 200int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end);
189int unlock_extent_cached(struct extent_io_tree *tree, u64 start, u64 end, 201int unlock_extent_cached(struct extent_io_tree *tree, u64 start, u64 end,
190 struct extent_state **cached, gfp_t mask); 202 struct extent_state **cached, gfp_t mask);
191int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end, 203int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end);
192 gfp_t mask);
193int extent_read_full_page(struct extent_io_tree *tree, struct page *page, 204int extent_read_full_page(struct extent_io_tree *tree, struct page *page,
194 get_extent_t *get_extent, int mirror_num); 205 get_extent_t *get_extent, int mirror_num);
195int __init extent_io_init(void); 206int __init extent_io_init(void);
@@ -210,7 +221,7 @@ int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
210int set_extent_bits(struct extent_io_tree *tree, u64 start, u64 end, 221int set_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
211 int bits, gfp_t mask); 222 int bits, gfp_t mask);
212int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, 223int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
213 int bits, int exclusive_bits, u64 *failed_start, 224 int bits, u64 *failed_start,
214 struct extent_state **cached_state, gfp_t mask); 225 struct extent_state **cached_state, gfp_t mask);
215int set_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end, 226int set_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end,
216 struct extent_state **cached_state, gfp_t mask); 227 struct extent_state **cached_state, gfp_t mask);
@@ -240,6 +251,8 @@ int extent_writepages(struct extent_io_tree *tree,
240 struct address_space *mapping, 251 struct address_space *mapping,
241 get_extent_t *get_extent, 252 get_extent_t *get_extent,
242 struct writeback_control *wbc); 253 struct writeback_control *wbc);
254int btree_write_cache_pages(struct address_space *mapping,
255 struct writeback_control *wbc);
243int extent_readpages(struct extent_io_tree *tree, 256int extent_readpages(struct extent_io_tree *tree,
244 struct address_space *mapping, 257 struct address_space *mapping,
245 struct list_head *pages, unsigned nr_pages, 258 struct list_head *pages, unsigned nr_pages,
@@ -251,11 +264,11 @@ int get_state_private(struct extent_io_tree *tree, u64 start, u64 *private);
251void set_page_extent_mapped(struct page *page); 264void set_page_extent_mapped(struct page *page);
252 265
253struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree, 266struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree,
254 u64 start, unsigned long len, 267 u64 start, unsigned long len);
255 struct page *page0);
256struct extent_buffer *find_extent_buffer(struct extent_io_tree *tree, 268struct extent_buffer *find_extent_buffer(struct extent_io_tree *tree,
257 u64 start, unsigned long len); 269 u64 start, unsigned long len);
258void free_extent_buffer(struct extent_buffer *eb); 270void free_extent_buffer(struct extent_buffer *eb);
271void free_extent_buffer_stale(struct extent_buffer *eb);
259#define WAIT_NONE 0 272#define WAIT_NONE 0
260#define WAIT_COMPLETE 1 273#define WAIT_COMPLETE 1
261#define WAIT_PAGE_LOCK 2 274#define WAIT_PAGE_LOCK 2
@@ -287,19 +300,12 @@ void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
287 unsigned long src_offset, unsigned long len); 300 unsigned long src_offset, unsigned long len);
288void memset_extent_buffer(struct extent_buffer *eb, char c, 301void memset_extent_buffer(struct extent_buffer *eb, char c,
289 unsigned long start, unsigned long len); 302 unsigned long start, unsigned long len);
290int wait_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, int bits); 303void wait_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, int bits);
291int clear_extent_buffer_dirty(struct extent_io_tree *tree, 304void clear_extent_buffer_dirty(struct extent_buffer *eb);
292 struct extent_buffer *eb); 305int set_extent_buffer_dirty(struct extent_buffer *eb);
293int set_extent_buffer_dirty(struct extent_io_tree *tree, 306int set_extent_buffer_uptodate(struct extent_buffer *eb);
294 struct extent_buffer *eb); 307int clear_extent_buffer_uptodate(struct extent_buffer *eb);
295int set_extent_buffer_uptodate(struct extent_io_tree *tree, 308int extent_buffer_uptodate(struct extent_buffer *eb);
296 struct extent_buffer *eb);
297int clear_extent_buffer_uptodate(struct extent_io_tree *tree,
298 struct extent_buffer *eb,
299 struct extent_state **cached_state);
300int extent_buffer_uptodate(struct extent_io_tree *tree,
301 struct extent_buffer *eb,
302 struct extent_state *cached_state);
303int map_private_extent_buffer(struct extent_buffer *eb, unsigned long offset, 309int map_private_extent_buffer(struct extent_buffer *eb, unsigned long offset,
304 unsigned long min_len, char **map, 310 unsigned long min_len, char **map,
305 unsigned long *map_start, 311 unsigned long *map_start,
@@ -320,4 +326,6 @@ int repair_io_failure(struct btrfs_mapping_tree *map_tree, u64 start,
320 u64 length, u64 logical, struct page *page, 326 u64 length, u64 logical, struct page *page,
321 int mirror_num); 327 int mirror_num);
322int end_extent_writepage(struct page *page, int err, u64 start, u64 end); 328int end_extent_writepage(struct page *page, int err, u64 start, u64 end);
329int repair_eb_io_failure(struct btrfs_root *root, struct extent_buffer *eb,
330 int mirror_num);
323#endif 331#endif
diff --git a/fs/btrfs/file-item.c b/fs/btrfs/file-item.c
index 078b4fd54500..5d158d320233 100644
--- a/fs/btrfs/file-item.c
+++ b/fs/btrfs/file-item.c
@@ -25,10 +25,12 @@
25#include "transaction.h" 25#include "transaction.h"
26#include "print-tree.h" 26#include "print-tree.h"
27 27
28#define MAX_CSUM_ITEMS(r, size) ((((BTRFS_LEAF_DATA_SIZE(r) - \ 28#define __MAX_CSUM_ITEMS(r, size) ((((BTRFS_LEAF_DATA_SIZE(r) - \
29 sizeof(struct btrfs_item) * 2) / \ 29 sizeof(struct btrfs_item) * 2) / \
30 size) - 1)) 30 size) - 1))
31 31
32#define MAX_CSUM_ITEMS(r, size) (min(__MAX_CSUM_ITEMS(r, size), PAGE_CACHE_SIZE))
33
32#define MAX_ORDERED_SUM_BYTES(r) ((PAGE_SIZE - \ 34#define MAX_ORDERED_SUM_BYTES(r) ((PAGE_SIZE - \
33 sizeof(struct btrfs_ordered_sum)) / \ 35 sizeof(struct btrfs_ordered_sum)) / \
34 sizeof(struct btrfs_sector_sum) * \ 36 sizeof(struct btrfs_sector_sum) * \
@@ -59,7 +61,7 @@ int btrfs_insert_file_extent(struct btrfs_trans_handle *trans,
59 sizeof(*item)); 61 sizeof(*item));
60 if (ret < 0) 62 if (ret < 0)
61 goto out; 63 goto out;
62 BUG_ON(ret); 64 BUG_ON(ret); /* Can't happen */
63 leaf = path->nodes[0]; 65 leaf = path->nodes[0];
64 item = btrfs_item_ptr(leaf, path->slots[0], 66 item = btrfs_item_ptr(leaf, path->slots[0],
65 struct btrfs_file_extent_item); 67 struct btrfs_file_extent_item);
@@ -284,6 +286,7 @@ int btrfs_lookup_csums_range(struct btrfs_root *root, u64 start, u64 end,
284 struct btrfs_ordered_sum *sums; 286 struct btrfs_ordered_sum *sums;
285 struct btrfs_sector_sum *sector_sum; 287 struct btrfs_sector_sum *sector_sum;
286 struct btrfs_csum_item *item; 288 struct btrfs_csum_item *item;
289 LIST_HEAD(tmplist);
287 unsigned long offset; 290 unsigned long offset;
288 int ret; 291 int ret;
289 size_t size; 292 size_t size;
@@ -358,7 +361,10 @@ int btrfs_lookup_csums_range(struct btrfs_root *root, u64 start, u64 end,
358 MAX_ORDERED_SUM_BYTES(root)); 361 MAX_ORDERED_SUM_BYTES(root));
359 sums = kzalloc(btrfs_ordered_sum_size(root, size), 362 sums = kzalloc(btrfs_ordered_sum_size(root, size),
360 GFP_NOFS); 363 GFP_NOFS);
361 BUG_ON(!sums); 364 if (!sums) {
365 ret = -ENOMEM;
366 goto fail;
367 }
362 368
363 sector_sum = sums->sums; 369 sector_sum = sums->sums;
364 sums->bytenr = start; 370 sums->bytenr = start;
@@ -380,12 +386,19 @@ int btrfs_lookup_csums_range(struct btrfs_root *root, u64 start, u64 end,
380 offset += csum_size; 386 offset += csum_size;
381 sector_sum++; 387 sector_sum++;
382 } 388 }
383 list_add_tail(&sums->list, list); 389 list_add_tail(&sums->list, &tmplist);
384 } 390 }
385 path->slots[0]++; 391 path->slots[0]++;
386 } 392 }
387 ret = 0; 393 ret = 0;
388fail: 394fail:
395 while (ret < 0 && !list_empty(&tmplist)) {
396 sums = list_entry(&tmplist, struct btrfs_ordered_sum, list);
397 list_del(&sums->list);
398 kfree(sums);
399 }
400 list_splice_tail(&tmplist, list);
401
389 btrfs_free_path(path); 402 btrfs_free_path(path);
390 return ret; 403 return ret;
391} 404}
@@ -420,7 +433,7 @@ int btrfs_csum_one_bio(struct btrfs_root *root, struct inode *inode,
420 offset = page_offset(bvec->bv_page) + bvec->bv_offset; 433 offset = page_offset(bvec->bv_page) + bvec->bv_offset;
421 434
422 ordered = btrfs_lookup_ordered_extent(inode, offset); 435 ordered = btrfs_lookup_ordered_extent(inode, offset);
423 BUG_ON(!ordered); 436 BUG_ON(!ordered); /* Logic error */
424 sums->bytenr = ordered->start; 437 sums->bytenr = ordered->start;
425 438
426 while (bio_index < bio->bi_vcnt) { 439 while (bio_index < bio->bi_vcnt) {
@@ -439,11 +452,11 @@ int btrfs_csum_one_bio(struct btrfs_root *root, struct inode *inode,
439 452
440 sums = kzalloc(btrfs_ordered_sum_size(root, bytes_left), 453 sums = kzalloc(btrfs_ordered_sum_size(root, bytes_left),
441 GFP_NOFS); 454 GFP_NOFS);
442 BUG_ON(!sums); 455 BUG_ON(!sums); /* -ENOMEM */
443 sector_sum = sums->sums; 456 sector_sum = sums->sums;
444 sums->len = bytes_left; 457 sums->len = bytes_left;
445 ordered = btrfs_lookup_ordered_extent(inode, offset); 458 ordered = btrfs_lookup_ordered_extent(inode, offset);
446 BUG_ON(!ordered); 459 BUG_ON(!ordered); /* Logic error */
447 sums->bytenr = ordered->start; 460 sums->bytenr = ordered->start;
448 } 461 }
449 462
@@ -483,18 +496,17 @@ int btrfs_csum_one_bio(struct btrfs_root *root, struct inode *inode,
483 * This calls btrfs_truncate_item with the correct args based on the 496 * This calls btrfs_truncate_item with the correct args based on the
484 * overlap, and fixes up the key as required. 497 * overlap, and fixes up the key as required.
485 */ 498 */
486static noinline int truncate_one_csum(struct btrfs_trans_handle *trans, 499static noinline void truncate_one_csum(struct btrfs_trans_handle *trans,
487 struct btrfs_root *root, 500 struct btrfs_root *root,
488 struct btrfs_path *path, 501 struct btrfs_path *path,
489 struct btrfs_key *key, 502 struct btrfs_key *key,
490 u64 bytenr, u64 len) 503 u64 bytenr, u64 len)
491{ 504{
492 struct extent_buffer *leaf; 505 struct extent_buffer *leaf;
493 u16 csum_size = btrfs_super_csum_size(root->fs_info->super_copy); 506 u16 csum_size = btrfs_super_csum_size(root->fs_info->super_copy);
494 u64 csum_end; 507 u64 csum_end;
495 u64 end_byte = bytenr + len; 508 u64 end_byte = bytenr + len;
496 u32 blocksize_bits = root->fs_info->sb->s_blocksize_bits; 509 u32 blocksize_bits = root->fs_info->sb->s_blocksize_bits;
497 int ret;
498 510
499 leaf = path->nodes[0]; 511 leaf = path->nodes[0];
500 csum_end = btrfs_item_size_nr(leaf, path->slots[0]) / csum_size; 512 csum_end = btrfs_item_size_nr(leaf, path->slots[0]) / csum_size;
@@ -510,7 +522,7 @@ static noinline int truncate_one_csum(struct btrfs_trans_handle *trans,
510 */ 522 */
511 u32 new_size = (bytenr - key->offset) >> blocksize_bits; 523 u32 new_size = (bytenr - key->offset) >> blocksize_bits;
512 new_size *= csum_size; 524 new_size *= csum_size;
513 ret = btrfs_truncate_item(trans, root, path, new_size, 1); 525 btrfs_truncate_item(trans, root, path, new_size, 1);
514 } else if (key->offset >= bytenr && csum_end > end_byte && 526 } else if (key->offset >= bytenr && csum_end > end_byte &&
515 end_byte > key->offset) { 527 end_byte > key->offset) {
516 /* 528 /*
@@ -522,15 +534,13 @@ static noinline int truncate_one_csum(struct btrfs_trans_handle *trans,
522 u32 new_size = (csum_end - end_byte) >> blocksize_bits; 534 u32 new_size = (csum_end - end_byte) >> blocksize_bits;
523 new_size *= csum_size; 535 new_size *= csum_size;
524 536
525 ret = btrfs_truncate_item(trans, root, path, new_size, 0); 537 btrfs_truncate_item(trans, root, path, new_size, 0);
526 538
527 key->offset = end_byte; 539 key->offset = end_byte;
528 ret = btrfs_set_item_key_safe(trans, root, path, key); 540 btrfs_set_item_key_safe(trans, root, path, key);
529 BUG_ON(ret);
530 } else { 541 } else {
531 BUG(); 542 BUG();
532 } 543 }
533 return 0;
534} 544}
535 545
536/* 546/*
@@ -635,13 +645,14 @@ int btrfs_del_csums(struct btrfs_trans_handle *trans,
635 * item changed size or key 645 * item changed size or key
636 */ 646 */
637 ret = btrfs_split_item(trans, root, path, &key, offset); 647 ret = btrfs_split_item(trans, root, path, &key, offset);
638 BUG_ON(ret && ret != -EAGAIN); 648 if (ret && ret != -EAGAIN) {
649 btrfs_abort_transaction(trans, root, ret);
650 goto out;
651 }
639 652
640 key.offset = end_byte - 1; 653 key.offset = end_byte - 1;
641 } else { 654 } else {
642 ret = truncate_one_csum(trans, root, path, 655 truncate_one_csum(trans, root, path, &key, bytenr, len);
643 &key, bytenr, len);
644 BUG_ON(ret);
645 if (key.offset < bytenr) 656 if (key.offset < bytenr)
646 break; 657 break;
647 } 658 }
@@ -772,7 +783,7 @@ again:
772 if (diff != csum_size) 783 if (diff != csum_size)
773 goto insert; 784 goto insert;
774 785
775 ret = btrfs_extend_item(trans, root, path, diff); 786 btrfs_extend_item(trans, root, path, diff);
776 goto csum; 787 goto csum;
777 } 788 }
778 789
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index e8d06b6b9194..d83260d7498f 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -452,7 +452,7 @@ int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end,
452 split = alloc_extent_map(); 452 split = alloc_extent_map();
453 if (!split2) 453 if (!split2)
454 split2 = alloc_extent_map(); 454 split2 = alloc_extent_map();
455 BUG_ON(!split || !split2); 455 BUG_ON(!split || !split2); /* -ENOMEM */
456 456
457 write_lock(&em_tree->lock); 457 write_lock(&em_tree->lock);
458 em = lookup_extent_mapping(em_tree, start, len); 458 em = lookup_extent_mapping(em_tree, start, len);
@@ -494,7 +494,7 @@ int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end,
494 split->flags = flags; 494 split->flags = flags;
495 split->compress_type = em->compress_type; 495 split->compress_type = em->compress_type;
496 ret = add_extent_mapping(em_tree, split); 496 ret = add_extent_mapping(em_tree, split);
497 BUG_ON(ret); 497 BUG_ON(ret); /* Logic error */
498 free_extent_map(split); 498 free_extent_map(split);
499 split = split2; 499 split = split2;
500 split2 = NULL; 500 split2 = NULL;
@@ -520,7 +520,7 @@ int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end,
520 } 520 }
521 521
522 ret = add_extent_mapping(em_tree, split); 522 ret = add_extent_mapping(em_tree, split);
523 BUG_ON(ret); 523 BUG_ON(ret); /* Logic error */
524 free_extent_map(split); 524 free_extent_map(split);
525 split = NULL; 525 split = NULL;
526 } 526 }
@@ -679,7 +679,7 @@ next_slot:
679 root->root_key.objectid, 679 root->root_key.objectid,
680 new_key.objectid, 680 new_key.objectid,
681 start - extent_offset, 0); 681 start - extent_offset, 0);
682 BUG_ON(ret); 682 BUG_ON(ret); /* -ENOMEM */
683 *hint_byte = disk_bytenr; 683 *hint_byte = disk_bytenr;
684 } 684 }
685 key.offset = start; 685 key.offset = start;
@@ -754,7 +754,7 @@ next_slot:
754 root->root_key.objectid, 754 root->root_key.objectid,
755 key.objectid, key.offset - 755 key.objectid, key.offset -
756 extent_offset, 0); 756 extent_offset, 0);
757 BUG_ON(ret); 757 BUG_ON(ret); /* -ENOMEM */
758 inode_sub_bytes(inode, 758 inode_sub_bytes(inode,
759 extent_end - key.offset); 759 extent_end - key.offset);
760 *hint_byte = disk_bytenr; 760 *hint_byte = disk_bytenr;
@@ -770,7 +770,10 @@ next_slot:
770 770
771 ret = btrfs_del_items(trans, root, path, del_slot, 771 ret = btrfs_del_items(trans, root, path, del_slot,
772 del_nr); 772 del_nr);
773 BUG_ON(ret); 773 if (ret) {
774 btrfs_abort_transaction(trans, root, ret);
775 goto out;
776 }
774 777
775 del_nr = 0; 778 del_nr = 0;
776 del_slot = 0; 779 del_slot = 0;
@@ -782,11 +785,13 @@ next_slot:
782 BUG_ON(1); 785 BUG_ON(1);
783 } 786 }
784 787
785 if (del_nr > 0) { 788 if (!ret && del_nr > 0) {
786 ret = btrfs_del_items(trans, root, path, del_slot, del_nr); 789 ret = btrfs_del_items(trans, root, path, del_slot, del_nr);
787 BUG_ON(ret); 790 if (ret)
791 btrfs_abort_transaction(trans, root, ret);
788 } 792 }
789 793
794out:
790 btrfs_free_path(path); 795 btrfs_free_path(path);
791 return ret; 796 return ret;
792} 797}
@@ -944,7 +949,10 @@ again:
944 btrfs_release_path(path); 949 btrfs_release_path(path);
945 goto again; 950 goto again;
946 } 951 }
947 BUG_ON(ret < 0); 952 if (ret < 0) {
953 btrfs_abort_transaction(trans, root, ret);
954 goto out;
955 }
948 956
949 leaf = path->nodes[0]; 957 leaf = path->nodes[0];
950 fi = btrfs_item_ptr(leaf, path->slots[0] - 1, 958 fi = btrfs_item_ptr(leaf, path->slots[0] - 1,
@@ -963,7 +971,7 @@ again:
963 ret = btrfs_inc_extent_ref(trans, root, bytenr, num_bytes, 0, 971 ret = btrfs_inc_extent_ref(trans, root, bytenr, num_bytes, 0,
964 root->root_key.objectid, 972 root->root_key.objectid,
965 ino, orig_offset, 0); 973 ino, orig_offset, 0);
966 BUG_ON(ret); 974 BUG_ON(ret); /* -ENOMEM */
967 975
968 if (split == start) { 976 if (split == start) {
969 key.offset = start; 977 key.offset = start;
@@ -990,7 +998,7 @@ again:
990 ret = btrfs_free_extent(trans, root, bytenr, num_bytes, 998 ret = btrfs_free_extent(trans, root, bytenr, num_bytes,
991 0, root->root_key.objectid, 999 0, root->root_key.objectid,
992 ino, orig_offset, 0); 1000 ino, orig_offset, 0);
993 BUG_ON(ret); 1001 BUG_ON(ret); /* -ENOMEM */
994 } 1002 }
995 other_start = 0; 1003 other_start = 0;
996 other_end = start; 1004 other_end = start;
@@ -1007,7 +1015,7 @@ again:
1007 ret = btrfs_free_extent(trans, root, bytenr, num_bytes, 1015 ret = btrfs_free_extent(trans, root, bytenr, num_bytes,
1008 0, root->root_key.objectid, 1016 0, root->root_key.objectid,
1009 ino, orig_offset, 0); 1017 ino, orig_offset, 0);
1010 BUG_ON(ret); 1018 BUG_ON(ret); /* -ENOMEM */
1011 } 1019 }
1012 if (del_nr == 0) { 1020 if (del_nr == 0) {
1013 fi = btrfs_item_ptr(leaf, path->slots[0], 1021 fi = btrfs_item_ptr(leaf, path->slots[0],
@@ -1025,7 +1033,10 @@ again:
1025 btrfs_mark_buffer_dirty(leaf); 1033 btrfs_mark_buffer_dirty(leaf);
1026 1034
1027 ret = btrfs_del_items(trans, root, path, del_slot, del_nr); 1035 ret = btrfs_del_items(trans, root, path, del_slot, del_nr);
1028 BUG_ON(ret); 1036 if (ret < 0) {
1037 btrfs_abort_transaction(trans, root, ret);
1038 goto out;
1039 }
1029 } 1040 }
1030out: 1041out:
1031 btrfs_free_path(path); 1042 btrfs_free_path(path);
@@ -1105,8 +1116,7 @@ again:
1105 if (start_pos < inode->i_size) { 1116 if (start_pos < inode->i_size) {
1106 struct btrfs_ordered_extent *ordered; 1117 struct btrfs_ordered_extent *ordered;
1107 lock_extent_bits(&BTRFS_I(inode)->io_tree, 1118 lock_extent_bits(&BTRFS_I(inode)->io_tree,
1108 start_pos, last_pos - 1, 0, &cached_state, 1119 start_pos, last_pos - 1, 0, &cached_state);
1109 GFP_NOFS);
1110 ordered = btrfs_lookup_first_ordered_extent(inode, 1120 ordered = btrfs_lookup_first_ordered_extent(inode,
1111 last_pos - 1); 1121 last_pos - 1);
1112 if (ordered && 1122 if (ordered &&
@@ -1638,7 +1648,7 @@ static long btrfs_fallocate(struct file *file, int mode,
1638 * transaction 1648 * transaction
1639 */ 1649 */
1640 lock_extent_bits(&BTRFS_I(inode)->io_tree, alloc_start, 1650 lock_extent_bits(&BTRFS_I(inode)->io_tree, alloc_start,
1641 locked_end, 0, &cached_state, GFP_NOFS); 1651 locked_end, 0, &cached_state);
1642 ordered = btrfs_lookup_first_ordered_extent(inode, 1652 ordered = btrfs_lookup_first_ordered_extent(inode,
1643 alloc_end - 1); 1653 alloc_end - 1);
1644 if (ordered && 1654 if (ordered &&
@@ -1667,7 +1677,13 @@ static long btrfs_fallocate(struct file *file, int mode,
1667 1677
1668 em = btrfs_get_extent(inode, NULL, 0, cur_offset, 1678 em = btrfs_get_extent(inode, NULL, 0, cur_offset,
1669 alloc_end - cur_offset, 0); 1679 alloc_end - cur_offset, 0);
1670 BUG_ON(IS_ERR_OR_NULL(em)); 1680 if (IS_ERR_OR_NULL(em)) {
1681 if (!em)
1682 ret = -ENOMEM;
1683 else
1684 ret = PTR_ERR(em);
1685 break;
1686 }
1671 last_byte = min(extent_map_end(em), alloc_end); 1687 last_byte = min(extent_map_end(em), alloc_end);
1672 actual_end = min_t(u64, extent_map_end(em), offset + len); 1688 actual_end = min_t(u64, extent_map_end(em), offset + len);
1673 last_byte = (last_byte + mask) & ~mask; 1689 last_byte = (last_byte + mask) & ~mask;
@@ -1737,7 +1753,7 @@ static int find_desired_extent(struct inode *inode, loff_t *offset, int origin)
1737 return -ENXIO; 1753 return -ENXIO;
1738 1754
1739 lock_extent_bits(&BTRFS_I(inode)->io_tree, lockstart, lockend, 0, 1755 lock_extent_bits(&BTRFS_I(inode)->io_tree, lockstart, lockend, 0,
1740 &cached_state, GFP_NOFS); 1756 &cached_state);
1741 1757
1742 /* 1758 /*
1743 * Delalloc is such a pain. If we have a hole and we have pending 1759 * Delalloc is such a pain. If we have a hole and we have pending
diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
index b02e379b14c7..e88330d3df52 100644
--- a/fs/btrfs/free-space-cache.c
+++ b/fs/btrfs/free-space-cache.c
@@ -230,11 +230,13 @@ int btrfs_truncate_free_space_cache(struct btrfs_root *root,
230 230
231 if (ret) { 231 if (ret) {
232 trans->block_rsv = rsv; 232 trans->block_rsv = rsv;
233 WARN_ON(1); 233 btrfs_abort_transaction(trans, root, ret);
234 return ret; 234 return ret;
235 } 235 }
236 236
237 ret = btrfs_update_inode(trans, root, inode); 237 ret = btrfs_update_inode(trans, root, inode);
238 if (ret)
239 btrfs_abort_transaction(trans, root, ret);
238 trans->block_rsv = rsv; 240 trans->block_rsv = rsv;
239 241
240 return ret; 242 return ret;
@@ -869,7 +871,7 @@ int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode,
869 io_ctl_prepare_pages(&io_ctl, inode, 0); 871 io_ctl_prepare_pages(&io_ctl, inode, 0);
870 872
871 lock_extent_bits(&BTRFS_I(inode)->io_tree, 0, i_size_read(inode) - 1, 873 lock_extent_bits(&BTRFS_I(inode)->io_tree, 0, i_size_read(inode) - 1,
872 0, &cached_state, GFP_NOFS); 874 0, &cached_state);
873 875
874 node = rb_first(&ctl->free_space_offset); 876 node = rb_first(&ctl->free_space_offset);
875 if (!node && cluster) { 877 if (!node && cluster) {
@@ -1948,14 +1950,14 @@ again:
1948 */ 1950 */
1949 ret = btrfs_add_free_space(block_group, old_start, 1951 ret = btrfs_add_free_space(block_group, old_start,
1950 offset - old_start); 1952 offset - old_start);
1951 WARN_ON(ret); 1953 WARN_ON(ret); /* -ENOMEM */
1952 goto out; 1954 goto out;
1953 } 1955 }
1954 1956
1955 ret = remove_from_bitmap(ctl, info, &offset, &bytes); 1957 ret = remove_from_bitmap(ctl, info, &offset, &bytes);
1956 if (ret == -EAGAIN) 1958 if (ret == -EAGAIN)
1957 goto again; 1959 goto again;
1958 BUG_ON(ret); 1960 BUG_ON(ret); /* logic error */
1959out_lock: 1961out_lock:
1960 spin_unlock(&ctl->tree_lock); 1962 spin_unlock(&ctl->tree_lock);
1961out: 1963out:
@@ -2346,7 +2348,7 @@ again:
2346 rb_erase(&entry->offset_index, &ctl->free_space_offset); 2348 rb_erase(&entry->offset_index, &ctl->free_space_offset);
2347 ret = tree_insert_offset(&cluster->root, entry->offset, 2349 ret = tree_insert_offset(&cluster->root, entry->offset,
2348 &entry->offset_index, 1); 2350 &entry->offset_index, 1);
2349 BUG_ON(ret); 2351 BUG_ON(ret); /* -EEXIST; Logic error */
2350 2352
2351 trace_btrfs_setup_cluster(block_group, cluster, 2353 trace_btrfs_setup_cluster(block_group, cluster,
2352 total_found * block_group->sectorsize, 1); 2354 total_found * block_group->sectorsize, 1);
@@ -2439,7 +2441,7 @@ setup_cluster_no_bitmap(struct btrfs_block_group_cache *block_group,
2439 ret = tree_insert_offset(&cluster->root, entry->offset, 2441 ret = tree_insert_offset(&cluster->root, entry->offset,
2440 &entry->offset_index, 0); 2442 &entry->offset_index, 0);
2441 total_size += entry->bytes; 2443 total_size += entry->bytes;
2442 BUG_ON(ret); 2444 BUG_ON(ret); /* -EEXIST; Logic error */
2443 } while (node && entry != last); 2445 } while (node && entry != last);
2444 2446
2445 cluster->max_size = max_extent; 2447 cluster->max_size = max_extent;
@@ -2830,6 +2832,7 @@ u64 btrfs_find_ino_for_alloc(struct btrfs_root *fs_root)
2830 int ret; 2832 int ret;
2831 2833
2832 ret = search_bitmap(ctl, entry, &offset, &count); 2834 ret = search_bitmap(ctl, entry, &offset, &count);
2835 /* Logic error; Should be empty if it can't find anything */
2833 BUG_ON(ret); 2836 BUG_ON(ret);
2834 2837
2835 ino = offset; 2838 ino = offset;
diff --git a/fs/btrfs/inode-item.c b/fs/btrfs/inode-item.c
index baa74f3db691..a13cf1a96c73 100644
--- a/fs/btrfs/inode-item.c
+++ b/fs/btrfs/inode-item.c
@@ -19,6 +19,7 @@
19#include "ctree.h" 19#include "ctree.h"
20#include "disk-io.h" 20#include "disk-io.h"
21#include "transaction.h" 21#include "transaction.h"
22#include "print-tree.h"
22 23
23static int find_name_in_backref(struct btrfs_path *path, const char *name, 24static int find_name_in_backref(struct btrfs_path *path, const char *name,
24 int name_len, struct btrfs_inode_ref **ref_ret) 25 int name_len, struct btrfs_inode_ref **ref_ret)
@@ -128,13 +129,14 @@ int btrfs_del_inode_ref(struct btrfs_trans_handle *trans,
128 item_start = btrfs_item_ptr_offset(leaf, path->slots[0]); 129 item_start = btrfs_item_ptr_offset(leaf, path->slots[0]);
129 memmove_extent_buffer(leaf, ptr, ptr + sub_item_len, 130 memmove_extent_buffer(leaf, ptr, ptr + sub_item_len,
130 item_size - (ptr + sub_item_len - item_start)); 131 item_size - (ptr + sub_item_len - item_start));
131 ret = btrfs_truncate_item(trans, root, path, 132 btrfs_truncate_item(trans, root, path,
132 item_size - sub_item_len, 1); 133 item_size - sub_item_len, 1);
133out: 134out:
134 btrfs_free_path(path); 135 btrfs_free_path(path);
135 return ret; 136 return ret;
136} 137}
137 138
139/* Will return 0, -ENOMEM, -EMLINK, or -EEXIST or anything from the CoW path */
138int btrfs_insert_inode_ref(struct btrfs_trans_handle *trans, 140int btrfs_insert_inode_ref(struct btrfs_trans_handle *trans,
139 struct btrfs_root *root, 141 struct btrfs_root *root,
140 const char *name, int name_len, 142 const char *name, int name_len,
@@ -165,7 +167,7 @@ int btrfs_insert_inode_ref(struct btrfs_trans_handle *trans,
165 goto out; 167 goto out;
166 168
167 old_size = btrfs_item_size_nr(path->nodes[0], path->slots[0]); 169 old_size = btrfs_item_size_nr(path->nodes[0], path->slots[0]);
168 ret = btrfs_extend_item(trans, root, path, ins_len); 170 btrfs_extend_item(trans, root, path, ins_len);
169 ref = btrfs_item_ptr(path->nodes[0], path->slots[0], 171 ref = btrfs_item_ptr(path->nodes[0], path->slots[0],
170 struct btrfs_inode_ref); 172 struct btrfs_inode_ref);
171 ref = (struct btrfs_inode_ref *)((unsigned long)ref + old_size); 173 ref = (struct btrfs_inode_ref *)((unsigned long)ref + old_size);
diff --git a/fs/btrfs/inode-map.c b/fs/btrfs/inode-map.c
index ee15d88b33d2..b1a1c929ba80 100644
--- a/fs/btrfs/inode-map.c
+++ b/fs/btrfs/inode-map.c
@@ -178,7 +178,7 @@ static void start_caching(struct btrfs_root *root)
178 178
179 tsk = kthread_run(caching_kthread, root, "btrfs-ino-cache-%llu\n", 179 tsk = kthread_run(caching_kthread, root, "btrfs-ino-cache-%llu\n",
180 root->root_key.objectid); 180 root->root_key.objectid);
181 BUG_ON(IS_ERR(tsk)); 181 BUG_ON(IS_ERR(tsk)); /* -ENOMEM */
182} 182}
183 183
184int btrfs_find_free_ino(struct btrfs_root *root, u64 *objectid) 184int btrfs_find_free_ino(struct btrfs_root *root, u64 *objectid)
@@ -271,7 +271,7 @@ void btrfs_unpin_free_ino(struct btrfs_root *root)
271 break; 271 break;
272 272
273 info = rb_entry(n, struct btrfs_free_space, offset_index); 273 info = rb_entry(n, struct btrfs_free_space, offset_index);
274 BUG_ON(info->bitmap); 274 BUG_ON(info->bitmap); /* Logic error */
275 275
276 if (info->offset > root->cache_progress) 276 if (info->offset > root->cache_progress)
277 goto free; 277 goto free;
@@ -439,17 +439,16 @@ int btrfs_save_ino_cache(struct btrfs_root *root,
439 if (ret) 439 if (ret)
440 goto out; 440 goto out;
441 trace_btrfs_space_reservation(root->fs_info, "ino_cache", 441 trace_btrfs_space_reservation(root->fs_info, "ino_cache",
442 (u64)(unsigned long)trans, 442 trans->transid, trans->bytes_reserved, 1);
443 trans->bytes_reserved, 1);
444again: 443again:
445 inode = lookup_free_ino_inode(root, path); 444 inode = lookup_free_ino_inode(root, path);
446 if (IS_ERR(inode) && PTR_ERR(inode) != -ENOENT) { 445 if (IS_ERR(inode) && (PTR_ERR(inode) != -ENOENT || retry)) {
447 ret = PTR_ERR(inode); 446 ret = PTR_ERR(inode);
448 goto out_release; 447 goto out_release;
449 } 448 }
450 449
451 if (IS_ERR(inode)) { 450 if (IS_ERR(inode)) {
452 BUG_ON(retry); 451 BUG_ON(retry); /* Logic error */
453 retry = true; 452 retry = true;
454 453
455 ret = create_free_ino_inode(root, trans, path); 454 ret = create_free_ino_inode(root, trans, path);
@@ -460,12 +459,17 @@ again:
460 459
461 BTRFS_I(inode)->generation = 0; 460 BTRFS_I(inode)->generation = 0;
462 ret = btrfs_update_inode(trans, root, inode); 461 ret = btrfs_update_inode(trans, root, inode);
463 WARN_ON(ret); 462 if (ret) {
463 btrfs_abort_transaction(trans, root, ret);
464 goto out_put;
465 }
464 466
465 if (i_size_read(inode) > 0) { 467 if (i_size_read(inode) > 0) {
466 ret = btrfs_truncate_free_space_cache(root, trans, path, inode); 468 ret = btrfs_truncate_free_space_cache(root, trans, path, inode);
467 if (ret) 469 if (ret) {
470 btrfs_abort_transaction(trans, root, ret);
468 goto out_put; 471 goto out_put;
472 }
469 } 473 }
470 474
471 spin_lock(&root->cache_lock); 475 spin_lock(&root->cache_lock);
@@ -502,8 +506,7 @@ out_put:
502 iput(inode); 506 iput(inode);
503out_release: 507out_release:
504 trace_btrfs_space_reservation(root->fs_info, "ino_cache", 508 trace_btrfs_space_reservation(root->fs_info, "ino_cache",
505 (u64)(unsigned long)trans, 509 trans->transid, trans->bytes_reserved, 0);
506 trans->bytes_reserved, 0);
507 btrfs_block_rsv_release(root, trans->block_rsv, trans->bytes_reserved); 510 btrfs_block_rsv_release(root, trans->block_rsv, trans->bytes_reserved);
508out: 511out:
509 trans->block_rsv = rsv; 512 trans->block_rsv = rsv;
@@ -532,7 +535,7 @@ static int btrfs_find_highest_objectid(struct btrfs_root *root, u64 *objectid)
532 ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0); 535 ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0);
533 if (ret < 0) 536 if (ret < 0)
534 goto error; 537 goto error;
535 BUG_ON(ret == 0); 538 BUG_ON(ret == 0); /* Corruption */
536 if (path->slots[0] > 0) { 539 if (path->slots[0] > 0) {
537 slot = path->slots[0] - 1; 540 slot = path->slots[0] - 1;
538 l = path->nodes[0]; 541 l = path->nodes[0];
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 3a0b5c1f9d31..115bc05e42b0 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -150,7 +150,6 @@ static noinline int insert_inline_extent(struct btrfs_trans_handle *trans,
150 inode_add_bytes(inode, size); 150 inode_add_bytes(inode, size);
151 ret = btrfs_insert_empty_item(trans, root, path, &key, 151 ret = btrfs_insert_empty_item(trans, root, path, &key,
152 datasize); 152 datasize);
153 BUG_ON(ret);
154 if (ret) { 153 if (ret) {
155 err = ret; 154 err = ret;
156 goto fail; 155 goto fail;
@@ -206,9 +205,9 @@ static noinline int insert_inline_extent(struct btrfs_trans_handle *trans,
206 * could end up racing with unlink. 205 * could end up racing with unlink.
207 */ 206 */
208 BTRFS_I(inode)->disk_i_size = inode->i_size; 207 BTRFS_I(inode)->disk_i_size = inode->i_size;
209 btrfs_update_inode(trans, root, inode); 208 ret = btrfs_update_inode(trans, root, inode);
210 209
211 return 0; 210 return ret;
212fail: 211fail:
213 btrfs_free_path(path); 212 btrfs_free_path(path);
214 return err; 213 return err;
@@ -250,14 +249,18 @@ static noinline int cow_file_range_inline(struct btrfs_trans_handle *trans,
250 249
251 ret = btrfs_drop_extents(trans, inode, start, aligned_end, 250 ret = btrfs_drop_extents(trans, inode, start, aligned_end,
252 &hint_byte, 1); 251 &hint_byte, 1);
253 BUG_ON(ret); 252 if (ret)
253 return ret;
254 254
255 if (isize > actual_end) 255 if (isize > actual_end)
256 inline_len = min_t(u64, isize, actual_end); 256 inline_len = min_t(u64, isize, actual_end);
257 ret = insert_inline_extent(trans, root, inode, start, 257 ret = insert_inline_extent(trans, root, inode, start,
258 inline_len, compressed_size, 258 inline_len, compressed_size,
259 compress_type, compressed_pages); 259 compress_type, compressed_pages);
260 BUG_ON(ret); 260 if (ret) {
261 btrfs_abort_transaction(trans, root, ret);
262 return ret;
263 }
261 btrfs_delalloc_release_metadata(inode, end + 1 - start); 264 btrfs_delalloc_release_metadata(inode, end + 1 - start);
262 btrfs_drop_extent_cache(inode, start, aligned_end - 1, 0); 265 btrfs_drop_extent_cache(inode, start, aligned_end - 1, 0);
263 return 0; 266 return 0;
@@ -293,7 +296,7 @@ static noinline int add_async_extent(struct async_cow *cow,
293 struct async_extent *async_extent; 296 struct async_extent *async_extent;
294 297
295 async_extent = kmalloc(sizeof(*async_extent), GFP_NOFS); 298 async_extent = kmalloc(sizeof(*async_extent), GFP_NOFS);
296 BUG_ON(!async_extent); 299 BUG_ON(!async_extent); /* -ENOMEM */
297 async_extent->start = start; 300 async_extent->start = start;
298 async_extent->ram_size = ram_size; 301 async_extent->ram_size = ram_size;
299 async_extent->compressed_size = compressed_size; 302 async_extent->compressed_size = compressed_size;
@@ -344,8 +347,9 @@ static noinline int compress_file_range(struct inode *inode,
344 int will_compress; 347 int will_compress;
345 int compress_type = root->fs_info->compress_type; 348 int compress_type = root->fs_info->compress_type;
346 349
347 /* if this is a small write inside eof, kick off a defragbot */ 350 /* if this is a small write inside eof, kick off a defrag */
348 if (end <= BTRFS_I(inode)->disk_i_size && (end - start + 1) < 16 * 1024) 351 if ((end - start + 1) < 16 * 1024 &&
352 (start > 0 || end + 1 < BTRFS_I(inode)->disk_i_size))
349 btrfs_add_inode_defrag(NULL, inode); 353 btrfs_add_inode_defrag(NULL, inode);
350 354
351 actual_end = min_t(u64, isize, end + 1); 355 actual_end = min_t(u64, isize, end + 1);
@@ -433,7 +437,11 @@ again:
433cont: 437cont:
434 if (start == 0) { 438 if (start == 0) {
435 trans = btrfs_join_transaction(root); 439 trans = btrfs_join_transaction(root);
436 BUG_ON(IS_ERR(trans)); 440 if (IS_ERR(trans)) {
441 ret = PTR_ERR(trans);
442 trans = NULL;
443 goto cleanup_and_out;
444 }
437 trans->block_rsv = &root->fs_info->delalloc_block_rsv; 445 trans->block_rsv = &root->fs_info->delalloc_block_rsv;
438 446
439 /* lets try to make an inline extent */ 447 /* lets try to make an inline extent */
@@ -450,11 +458,11 @@ cont:
450 total_compressed, 458 total_compressed,
451 compress_type, pages); 459 compress_type, pages);
452 } 460 }
453 if (ret == 0) { 461 if (ret <= 0) {
454 /* 462 /*
455 * inline extent creation worked, we don't need 463 * inline extent creation worked or returned error,
456 * to create any more async work items. Unlock 464 * we don't need to create any more async work items.
457 * and free up our temp pages. 465 * Unlock and free up our temp pages.
458 */ 466 */
459 extent_clear_unlock_delalloc(inode, 467 extent_clear_unlock_delalloc(inode,
460 &BTRFS_I(inode)->io_tree, 468 &BTRFS_I(inode)->io_tree,
@@ -547,7 +555,7 @@ cleanup_and_bail_uncompressed:
547 } 555 }
548 556
549out: 557out:
550 return 0; 558 return ret;
551 559
552free_pages_out: 560free_pages_out:
553 for (i = 0; i < nr_pages_ret; i++) { 561 for (i = 0; i < nr_pages_ret; i++) {
@@ -557,6 +565,20 @@ free_pages_out:
557 kfree(pages); 565 kfree(pages);
558 566
559 goto out; 567 goto out;
568
569cleanup_and_out:
570 extent_clear_unlock_delalloc(inode, &BTRFS_I(inode)->io_tree,
571 start, end, NULL,
572 EXTENT_CLEAR_UNLOCK_PAGE |
573 EXTENT_CLEAR_DIRTY |
574 EXTENT_CLEAR_DELALLOC |
575 EXTENT_SET_WRITEBACK |
576 EXTENT_END_WRITEBACK);
577 if (!trans || IS_ERR(trans))
578 btrfs_error(root->fs_info, ret, "Failed to join transaction");
579 else
580 btrfs_abort_transaction(trans, root, ret);
581 goto free_pages_out;
560} 582}
561 583
562/* 584/*
@@ -597,7 +619,7 @@ retry:
597 619
598 lock_extent(io_tree, async_extent->start, 620 lock_extent(io_tree, async_extent->start,
599 async_extent->start + 621 async_extent->start +
600 async_extent->ram_size - 1, GFP_NOFS); 622 async_extent->ram_size - 1);
601 623
602 /* allocate blocks */ 624 /* allocate blocks */
603 ret = cow_file_range(inode, async_cow->locked_page, 625 ret = cow_file_range(inode, async_cow->locked_page,
@@ -606,6 +628,8 @@ retry:
606 async_extent->ram_size - 1, 628 async_extent->ram_size - 1,
607 &page_started, &nr_written, 0); 629 &page_started, &nr_written, 0);
608 630
631 /* JDM XXX */
632
609 /* 633 /*
610 * if page_started, cow_file_range inserted an 634 * if page_started, cow_file_range inserted an
611 * inline extent and took care of all the unlocking 635 * inline extent and took care of all the unlocking
@@ -625,18 +649,21 @@ retry:
625 } 649 }
626 650
627 lock_extent(io_tree, async_extent->start, 651 lock_extent(io_tree, async_extent->start,
628 async_extent->start + async_extent->ram_size - 1, 652 async_extent->start + async_extent->ram_size - 1);
629 GFP_NOFS);
630 653
631 trans = btrfs_join_transaction(root); 654 trans = btrfs_join_transaction(root);
632 BUG_ON(IS_ERR(trans)); 655 if (IS_ERR(trans)) {
633 trans->block_rsv = &root->fs_info->delalloc_block_rsv; 656 ret = PTR_ERR(trans);
634 ret = btrfs_reserve_extent(trans, root, 657 } else {
658 trans->block_rsv = &root->fs_info->delalloc_block_rsv;
659 ret = btrfs_reserve_extent(trans, root,
635 async_extent->compressed_size, 660 async_extent->compressed_size,
636 async_extent->compressed_size, 661 async_extent->compressed_size,
637 0, alloc_hint, 662 0, alloc_hint, &ins, 1);
638 (u64)-1, &ins, 1); 663 if (ret)
639 btrfs_end_transaction(trans, root); 664 btrfs_abort_transaction(trans, root, ret);
665 btrfs_end_transaction(trans, root);
666 }
640 667
641 if (ret) { 668 if (ret) {
642 int i; 669 int i;
@@ -649,8 +676,10 @@ retry:
649 async_extent->pages = NULL; 676 async_extent->pages = NULL;
650 unlock_extent(io_tree, async_extent->start, 677 unlock_extent(io_tree, async_extent->start,
651 async_extent->start + 678 async_extent->start +
652 async_extent->ram_size - 1, GFP_NOFS); 679 async_extent->ram_size - 1);
653 goto retry; 680 if (ret == -ENOSPC)
681 goto retry;
682 goto out_free; /* JDM: Requeue? */
654 } 683 }
655 684
656 /* 685 /*
@@ -662,7 +691,7 @@ retry:
662 async_extent->ram_size - 1, 0); 691 async_extent->ram_size - 1, 0);
663 692
664 em = alloc_extent_map(); 693 em = alloc_extent_map();
665 BUG_ON(!em); 694 BUG_ON(!em); /* -ENOMEM */
666 em->start = async_extent->start; 695 em->start = async_extent->start;
667 em->len = async_extent->ram_size; 696 em->len = async_extent->ram_size;
668 em->orig_start = em->start; 697 em->orig_start = em->start;
@@ -694,7 +723,7 @@ retry:
694 ins.offset, 723 ins.offset,
695 BTRFS_ORDERED_COMPRESSED, 724 BTRFS_ORDERED_COMPRESSED,
696 async_extent->compress_type); 725 async_extent->compress_type);
697 BUG_ON(ret); 726 BUG_ON(ret); /* -ENOMEM */
698 727
699 /* 728 /*
700 * clear dirty, set writeback and unlock the pages. 729 * clear dirty, set writeback and unlock the pages.
@@ -716,13 +745,17 @@ retry:
716 ins.offset, async_extent->pages, 745 ins.offset, async_extent->pages,
717 async_extent->nr_pages); 746 async_extent->nr_pages);
718 747
719 BUG_ON(ret); 748 BUG_ON(ret); /* -ENOMEM */
720 alloc_hint = ins.objectid + ins.offset; 749 alloc_hint = ins.objectid + ins.offset;
721 kfree(async_extent); 750 kfree(async_extent);
722 cond_resched(); 751 cond_resched();
723 } 752 }
724 753 ret = 0;
725 return 0; 754out:
755 return ret;
756out_free:
757 kfree(async_extent);
758 goto out;
726} 759}
727 760
728static u64 get_extent_allocation_hint(struct inode *inode, u64 start, 761static u64 get_extent_allocation_hint(struct inode *inode, u64 start,
@@ -791,7 +824,18 @@ static noinline int cow_file_range(struct inode *inode,
791 824
792 BUG_ON(btrfs_is_free_space_inode(root, inode)); 825 BUG_ON(btrfs_is_free_space_inode(root, inode));
793 trans = btrfs_join_transaction(root); 826 trans = btrfs_join_transaction(root);
794 BUG_ON(IS_ERR(trans)); 827 if (IS_ERR(trans)) {
828 extent_clear_unlock_delalloc(inode,
829 &BTRFS_I(inode)->io_tree,
830 start, end, NULL,
831 EXTENT_CLEAR_UNLOCK_PAGE |
832 EXTENT_CLEAR_UNLOCK |
833 EXTENT_CLEAR_DELALLOC |
834 EXTENT_CLEAR_DIRTY |
835 EXTENT_SET_WRITEBACK |
836 EXTENT_END_WRITEBACK);
837 return PTR_ERR(trans);
838 }
795 trans->block_rsv = &root->fs_info->delalloc_block_rsv; 839 trans->block_rsv = &root->fs_info->delalloc_block_rsv;
796 840
797 num_bytes = (end - start + blocksize) & ~(blocksize - 1); 841 num_bytes = (end - start + blocksize) & ~(blocksize - 1);
@@ -800,7 +844,8 @@ static noinline int cow_file_range(struct inode *inode,
800 ret = 0; 844 ret = 0;
801 845
802 /* if this is a small write inside eof, kick off defrag */ 846 /* if this is a small write inside eof, kick off defrag */
803 if (end <= BTRFS_I(inode)->disk_i_size && num_bytes < 64 * 1024) 847 if (num_bytes < 64 * 1024 &&
848 (start > 0 || end + 1 < BTRFS_I(inode)->disk_i_size))
804 btrfs_add_inode_defrag(trans, inode); 849 btrfs_add_inode_defrag(trans, inode);
805 850
806 if (start == 0) { 851 if (start == 0) {
@@ -821,8 +866,10 @@ static noinline int cow_file_range(struct inode *inode,
821 *nr_written = *nr_written + 866 *nr_written = *nr_written +
822 (end - start + PAGE_CACHE_SIZE) / PAGE_CACHE_SIZE; 867 (end - start + PAGE_CACHE_SIZE) / PAGE_CACHE_SIZE;
823 *page_started = 1; 868 *page_started = 1;
824 ret = 0;
825 goto out; 869 goto out;
870 } else if (ret < 0) {
871 btrfs_abort_transaction(trans, root, ret);
872 goto out_unlock;
826 } 873 }
827 } 874 }
828 875
@@ -838,11 +885,14 @@ static noinline int cow_file_range(struct inode *inode,
838 cur_alloc_size = disk_num_bytes; 885 cur_alloc_size = disk_num_bytes;
839 ret = btrfs_reserve_extent(trans, root, cur_alloc_size, 886 ret = btrfs_reserve_extent(trans, root, cur_alloc_size,
840 root->sectorsize, 0, alloc_hint, 887 root->sectorsize, 0, alloc_hint,
841 (u64)-1, &ins, 1); 888 &ins, 1);
842 BUG_ON(ret); 889 if (ret < 0) {
890 btrfs_abort_transaction(trans, root, ret);
891 goto out_unlock;
892 }
843 893
844 em = alloc_extent_map(); 894 em = alloc_extent_map();
845 BUG_ON(!em); 895 BUG_ON(!em); /* -ENOMEM */
846 em->start = start; 896 em->start = start;
847 em->orig_start = em->start; 897 em->orig_start = em->start;
848 ram_size = ins.offset; 898 ram_size = ins.offset;
@@ -868,13 +918,16 @@ static noinline int cow_file_range(struct inode *inode,
868 cur_alloc_size = ins.offset; 918 cur_alloc_size = ins.offset;
869 ret = btrfs_add_ordered_extent(inode, start, ins.objectid, 919 ret = btrfs_add_ordered_extent(inode, start, ins.objectid,
870 ram_size, cur_alloc_size, 0); 920 ram_size, cur_alloc_size, 0);
871 BUG_ON(ret); 921 BUG_ON(ret); /* -ENOMEM */
872 922
873 if (root->root_key.objectid == 923 if (root->root_key.objectid ==
874 BTRFS_DATA_RELOC_TREE_OBJECTID) { 924 BTRFS_DATA_RELOC_TREE_OBJECTID) {
875 ret = btrfs_reloc_clone_csums(inode, start, 925 ret = btrfs_reloc_clone_csums(inode, start,
876 cur_alloc_size); 926 cur_alloc_size);
877 BUG_ON(ret); 927 if (ret) {
928 btrfs_abort_transaction(trans, root, ret);
929 goto out_unlock;
930 }
878 } 931 }
879 932
880 if (disk_num_bytes < cur_alloc_size) 933 if (disk_num_bytes < cur_alloc_size)
@@ -899,11 +952,23 @@ static noinline int cow_file_range(struct inode *inode,
899 alloc_hint = ins.objectid + ins.offset; 952 alloc_hint = ins.objectid + ins.offset;
900 start += cur_alloc_size; 953 start += cur_alloc_size;
901 } 954 }
902out:
903 ret = 0; 955 ret = 0;
956out:
904 btrfs_end_transaction(trans, root); 957 btrfs_end_transaction(trans, root);
905 958
906 return ret; 959 return ret;
960out_unlock:
961 extent_clear_unlock_delalloc(inode,
962 &BTRFS_I(inode)->io_tree,
963 start, end, NULL,
964 EXTENT_CLEAR_UNLOCK_PAGE |
965 EXTENT_CLEAR_UNLOCK |
966 EXTENT_CLEAR_DELALLOC |
967 EXTENT_CLEAR_DIRTY |
968 EXTENT_SET_WRITEBACK |
969 EXTENT_END_WRITEBACK);
970
971 goto out;
907} 972}
908 973
909/* 974/*
@@ -969,7 +1034,7 @@ static int cow_file_range_async(struct inode *inode, struct page *locked_page,
969 1, 0, NULL, GFP_NOFS); 1034 1, 0, NULL, GFP_NOFS);
970 while (start < end) { 1035 while (start < end) {
971 async_cow = kmalloc(sizeof(*async_cow), GFP_NOFS); 1036 async_cow = kmalloc(sizeof(*async_cow), GFP_NOFS);
972 BUG_ON(!async_cow); 1037 BUG_ON(!async_cow); /* -ENOMEM */
973 async_cow->inode = inode; 1038 async_cow->inode = inode;
974 async_cow->root = root; 1039 async_cow->root = root;
975 async_cow->locked_page = locked_page; 1040 async_cow->locked_page = locked_page;
@@ -1060,7 +1125,7 @@ static noinline int run_delalloc_nocow(struct inode *inode,
1060 u64 disk_bytenr; 1125 u64 disk_bytenr;
1061 u64 num_bytes; 1126 u64 num_bytes;
1062 int extent_type; 1127 int extent_type;
1063 int ret; 1128 int ret, err;
1064 int type; 1129 int type;
1065 int nocow; 1130 int nocow;
1066 int check_prev = 1; 1131 int check_prev = 1;
@@ -1078,7 +1143,11 @@ static noinline int run_delalloc_nocow(struct inode *inode,
1078 else 1143 else
1079 trans = btrfs_join_transaction(root); 1144 trans = btrfs_join_transaction(root);
1080 1145
1081 BUG_ON(IS_ERR(trans)); 1146 if (IS_ERR(trans)) {
1147 btrfs_free_path(path);
1148 return PTR_ERR(trans);
1149 }
1150
1082 trans->block_rsv = &root->fs_info->delalloc_block_rsv; 1151 trans->block_rsv = &root->fs_info->delalloc_block_rsv;
1083 1152
1084 cow_start = (u64)-1; 1153 cow_start = (u64)-1;
@@ -1086,7 +1155,10 @@ static noinline int run_delalloc_nocow(struct inode *inode,
1086 while (1) { 1155 while (1) {
1087 ret = btrfs_lookup_file_extent(trans, root, path, ino, 1156 ret = btrfs_lookup_file_extent(trans, root, path, ino,
1088 cur_offset, 0); 1157 cur_offset, 0);
1089 BUG_ON(ret < 0); 1158 if (ret < 0) {
1159 btrfs_abort_transaction(trans, root, ret);
1160 goto error;
1161 }
1090 if (ret > 0 && path->slots[0] > 0 && check_prev) { 1162 if (ret > 0 && path->slots[0] > 0 && check_prev) {
1091 leaf = path->nodes[0]; 1163 leaf = path->nodes[0];
1092 btrfs_item_key_to_cpu(leaf, &found_key, 1164 btrfs_item_key_to_cpu(leaf, &found_key,
@@ -1100,8 +1172,10 @@ next_slot:
1100 leaf = path->nodes[0]; 1172 leaf = path->nodes[0];
1101 if (path->slots[0] >= btrfs_header_nritems(leaf)) { 1173 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
1102 ret = btrfs_next_leaf(root, path); 1174 ret = btrfs_next_leaf(root, path);
1103 if (ret < 0) 1175 if (ret < 0) {
1104 BUG_ON(1); 1176 btrfs_abort_transaction(trans, root, ret);
1177 goto error;
1178 }
1105 if (ret > 0) 1179 if (ret > 0)
1106 break; 1180 break;
1107 leaf = path->nodes[0]; 1181 leaf = path->nodes[0];
@@ -1189,7 +1263,10 @@ out_check:
1189 ret = cow_file_range(inode, locked_page, cow_start, 1263 ret = cow_file_range(inode, locked_page, cow_start,
1190 found_key.offset - 1, page_started, 1264 found_key.offset - 1, page_started,
1191 nr_written, 1); 1265 nr_written, 1);
1192 BUG_ON(ret); 1266 if (ret) {
1267 btrfs_abort_transaction(trans, root, ret);
1268 goto error;
1269 }
1193 cow_start = (u64)-1; 1270 cow_start = (u64)-1;
1194 } 1271 }
1195 1272
@@ -1198,7 +1275,7 @@ out_check:
1198 struct extent_map_tree *em_tree; 1275 struct extent_map_tree *em_tree;
1199 em_tree = &BTRFS_I(inode)->extent_tree; 1276 em_tree = &BTRFS_I(inode)->extent_tree;
1200 em = alloc_extent_map(); 1277 em = alloc_extent_map();
1201 BUG_ON(!em); 1278 BUG_ON(!em); /* -ENOMEM */
1202 em->start = cur_offset; 1279 em->start = cur_offset;
1203 em->orig_start = em->start; 1280 em->orig_start = em->start;
1204 em->len = num_bytes; 1281 em->len = num_bytes;
@@ -1224,13 +1301,16 @@ out_check:
1224 1301
1225 ret = btrfs_add_ordered_extent(inode, cur_offset, disk_bytenr, 1302 ret = btrfs_add_ordered_extent(inode, cur_offset, disk_bytenr,
1226 num_bytes, num_bytes, type); 1303 num_bytes, num_bytes, type);
1227 BUG_ON(ret); 1304 BUG_ON(ret); /* -ENOMEM */
1228 1305
1229 if (root->root_key.objectid == 1306 if (root->root_key.objectid ==
1230 BTRFS_DATA_RELOC_TREE_OBJECTID) { 1307 BTRFS_DATA_RELOC_TREE_OBJECTID) {
1231 ret = btrfs_reloc_clone_csums(inode, cur_offset, 1308 ret = btrfs_reloc_clone_csums(inode, cur_offset,
1232 num_bytes); 1309 num_bytes);
1233 BUG_ON(ret); 1310 if (ret) {
1311 btrfs_abort_transaction(trans, root, ret);
1312 goto error;
1313 }
1234 } 1314 }
1235 1315
1236 extent_clear_unlock_delalloc(inode, &BTRFS_I(inode)->io_tree, 1316 extent_clear_unlock_delalloc(inode, &BTRFS_I(inode)->io_tree,
@@ -1249,18 +1329,23 @@ out_check:
1249 if (cow_start != (u64)-1) { 1329 if (cow_start != (u64)-1) {
1250 ret = cow_file_range(inode, locked_page, cow_start, end, 1330 ret = cow_file_range(inode, locked_page, cow_start, end,
1251 page_started, nr_written, 1); 1331 page_started, nr_written, 1);
1252 BUG_ON(ret); 1332 if (ret) {
1333 btrfs_abort_transaction(trans, root, ret);
1334 goto error;
1335 }
1253 } 1336 }
1254 1337
1338error:
1255 if (nolock) { 1339 if (nolock) {
1256 ret = btrfs_end_transaction_nolock(trans, root); 1340 err = btrfs_end_transaction_nolock(trans, root);
1257 BUG_ON(ret);
1258 } else { 1341 } else {
1259 ret = btrfs_end_transaction(trans, root); 1342 err = btrfs_end_transaction(trans, root);
1260 BUG_ON(ret);
1261 } 1343 }
1344 if (!ret)
1345 ret = err;
1346
1262 btrfs_free_path(path); 1347 btrfs_free_path(path);
1263 return 0; 1348 return ret;
1264} 1349}
1265 1350
1266/* 1351/*
@@ -1425,10 +1510,11 @@ int btrfs_merge_bio_hook(struct page *page, unsigned long offset,
1425 map_length = length; 1510 map_length = length;
1426 ret = btrfs_map_block(map_tree, READ, logical, 1511 ret = btrfs_map_block(map_tree, READ, logical,
1427 &map_length, NULL, 0); 1512 &map_length, NULL, 0);
1428 1513 /* Will always return 0 or 1 with map_multi == NULL */
1514 BUG_ON(ret < 0);
1429 if (map_length < length + size) 1515 if (map_length < length + size)
1430 return 1; 1516 return 1;
1431 return ret; 1517 return 0;
1432} 1518}
1433 1519
1434/* 1520/*
@@ -1448,7 +1534,7 @@ static int __btrfs_submit_bio_start(struct inode *inode, int rw,
1448 int ret = 0; 1534 int ret = 0;
1449 1535
1450 ret = btrfs_csum_one_bio(root, inode, bio, 0, 0); 1536 ret = btrfs_csum_one_bio(root, inode, bio, 0, 0);
1451 BUG_ON(ret); 1537 BUG_ON(ret); /* -ENOMEM */
1452 return 0; 1538 return 0;
1453} 1539}
1454 1540
@@ -1479,14 +1565,16 @@ static int btrfs_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
1479 struct btrfs_root *root = BTRFS_I(inode)->root; 1565 struct btrfs_root *root = BTRFS_I(inode)->root;
1480 int ret = 0; 1566 int ret = 0;
1481 int skip_sum; 1567 int skip_sum;
1568 int metadata = 0;
1482 1569
1483 skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM; 1570 skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;
1484 1571
1485 if (btrfs_is_free_space_inode(root, inode)) 1572 if (btrfs_is_free_space_inode(root, inode))
1486 ret = btrfs_bio_wq_end_io(root->fs_info, bio, 2); 1573 metadata = 2;
1487 else 1574
1488 ret = btrfs_bio_wq_end_io(root->fs_info, bio, 0); 1575 ret = btrfs_bio_wq_end_io(root->fs_info, bio, metadata);
1489 BUG_ON(ret); 1576 if (ret)
1577 return ret;
1490 1578
1491 if (!(rw & REQ_WRITE)) { 1579 if (!(rw & REQ_WRITE)) {
1492 if (bio_flags & EXTENT_BIO_COMPRESSED) { 1580 if (bio_flags & EXTENT_BIO_COMPRESSED) {
@@ -1571,7 +1659,7 @@ again:
1571 page_end = page_offset(page) + PAGE_CACHE_SIZE - 1; 1659 page_end = page_offset(page) + PAGE_CACHE_SIZE - 1;
1572 1660
1573 lock_extent_bits(&BTRFS_I(inode)->io_tree, page_start, page_end, 0, 1661 lock_extent_bits(&BTRFS_I(inode)->io_tree, page_start, page_end, 0,
1574 &cached_state, GFP_NOFS); 1662 &cached_state);
1575 1663
1576 /* already ordered? We're done */ 1664 /* already ordered? We're done */
1577 if (PagePrivate2(page)) 1665 if (PagePrivate2(page))
@@ -1675,13 +1763,15 @@ static int insert_reserved_file_extent(struct btrfs_trans_handle *trans,
1675 */ 1763 */
1676 ret = btrfs_drop_extents(trans, inode, file_pos, file_pos + num_bytes, 1764 ret = btrfs_drop_extents(trans, inode, file_pos, file_pos + num_bytes,
1677 &hint, 0); 1765 &hint, 0);
1678 BUG_ON(ret); 1766 if (ret)
1767 goto out;
1679 1768
1680 ins.objectid = btrfs_ino(inode); 1769 ins.objectid = btrfs_ino(inode);
1681 ins.offset = file_pos; 1770 ins.offset = file_pos;
1682 ins.type = BTRFS_EXTENT_DATA_KEY; 1771 ins.type = BTRFS_EXTENT_DATA_KEY;
1683 ret = btrfs_insert_empty_item(trans, root, path, &ins, sizeof(*fi)); 1772 ret = btrfs_insert_empty_item(trans, root, path, &ins, sizeof(*fi));
1684 BUG_ON(ret); 1773 if (ret)
1774 goto out;
1685 leaf = path->nodes[0]; 1775 leaf = path->nodes[0];
1686 fi = btrfs_item_ptr(leaf, path->slots[0], 1776 fi = btrfs_item_ptr(leaf, path->slots[0],
1687 struct btrfs_file_extent_item); 1777 struct btrfs_file_extent_item);
@@ -1709,10 +1799,10 @@ static int insert_reserved_file_extent(struct btrfs_trans_handle *trans,
1709 ret = btrfs_alloc_reserved_file_extent(trans, root, 1799 ret = btrfs_alloc_reserved_file_extent(trans, root,
1710 root->root_key.objectid, 1800 root->root_key.objectid,
1711 btrfs_ino(inode), file_pos, &ins); 1801 btrfs_ino(inode), file_pos, &ins);
1712 BUG_ON(ret); 1802out:
1713 btrfs_free_path(path); 1803 btrfs_free_path(path);
1714 1804
1715 return 0; 1805 return ret;
1716} 1806}
1717 1807
1718/* 1808/*
@@ -1740,35 +1830,41 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end)
1740 end - start + 1); 1830 end - start + 1);
1741 if (!ret) 1831 if (!ret)
1742 return 0; 1832 return 0;
1743 BUG_ON(!ordered_extent); 1833 BUG_ON(!ordered_extent); /* Logic error */
1744 1834
1745 nolock = btrfs_is_free_space_inode(root, inode); 1835 nolock = btrfs_is_free_space_inode(root, inode);
1746 1836
1747 if (test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags)) { 1837 if (test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags)) {
1748 BUG_ON(!list_empty(&ordered_extent->list)); 1838 BUG_ON(!list_empty(&ordered_extent->list)); /* Logic error */
1749 ret = btrfs_ordered_update_i_size(inode, 0, ordered_extent); 1839 ret = btrfs_ordered_update_i_size(inode, 0, ordered_extent);
1750 if (!ret) { 1840 if (!ret) {
1751 if (nolock) 1841 if (nolock)
1752 trans = btrfs_join_transaction_nolock(root); 1842 trans = btrfs_join_transaction_nolock(root);
1753 else 1843 else
1754 trans = btrfs_join_transaction(root); 1844 trans = btrfs_join_transaction(root);
1755 BUG_ON(IS_ERR(trans)); 1845 if (IS_ERR(trans))
1846 return PTR_ERR(trans);
1756 trans->block_rsv = &root->fs_info->delalloc_block_rsv; 1847 trans->block_rsv = &root->fs_info->delalloc_block_rsv;
1757 ret = btrfs_update_inode_fallback(trans, root, inode); 1848 ret = btrfs_update_inode_fallback(trans, root, inode);
1758 BUG_ON(ret); 1849 if (ret) /* -ENOMEM or corruption */
1850 btrfs_abort_transaction(trans, root, ret);
1759 } 1851 }
1760 goto out; 1852 goto out;
1761 } 1853 }
1762 1854
1763 lock_extent_bits(io_tree, ordered_extent->file_offset, 1855 lock_extent_bits(io_tree, ordered_extent->file_offset,
1764 ordered_extent->file_offset + ordered_extent->len - 1, 1856 ordered_extent->file_offset + ordered_extent->len - 1,
1765 0, &cached_state, GFP_NOFS); 1857 0, &cached_state);
1766 1858
1767 if (nolock) 1859 if (nolock)
1768 trans = btrfs_join_transaction_nolock(root); 1860 trans = btrfs_join_transaction_nolock(root);
1769 else 1861 else
1770 trans = btrfs_join_transaction(root); 1862 trans = btrfs_join_transaction(root);
1771 BUG_ON(IS_ERR(trans)); 1863 if (IS_ERR(trans)) {
1864 ret = PTR_ERR(trans);
1865 trans = NULL;
1866 goto out_unlock;
1867 }
1772 trans->block_rsv = &root->fs_info->delalloc_block_rsv; 1868 trans->block_rsv = &root->fs_info->delalloc_block_rsv;
1773 1869
1774 if (test_bit(BTRFS_ORDERED_COMPRESSED, &ordered_extent->flags)) 1870 if (test_bit(BTRFS_ORDERED_COMPRESSED, &ordered_extent->flags))
@@ -1779,7 +1875,6 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end)
1779 ordered_extent->file_offset, 1875 ordered_extent->file_offset,
1780 ordered_extent->file_offset + 1876 ordered_extent->file_offset +
1781 ordered_extent->len); 1877 ordered_extent->len);
1782 BUG_ON(ret);
1783 } else { 1878 } else {
1784 BUG_ON(root == root->fs_info->tree_root); 1879 BUG_ON(root == root->fs_info->tree_root);
1785 ret = insert_reserved_file_extent(trans, inode, 1880 ret = insert_reserved_file_extent(trans, inode,
@@ -1793,11 +1888,14 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end)
1793 unpin_extent_cache(&BTRFS_I(inode)->extent_tree, 1888 unpin_extent_cache(&BTRFS_I(inode)->extent_tree,
1794 ordered_extent->file_offset, 1889 ordered_extent->file_offset,
1795 ordered_extent->len); 1890 ordered_extent->len);
1796 BUG_ON(ret);
1797 } 1891 }
1798 unlock_extent_cached(io_tree, ordered_extent->file_offset, 1892 unlock_extent_cached(io_tree, ordered_extent->file_offset,
1799 ordered_extent->file_offset + 1893 ordered_extent->file_offset +
1800 ordered_extent->len - 1, &cached_state, GFP_NOFS); 1894 ordered_extent->len - 1, &cached_state, GFP_NOFS);
1895 if (ret < 0) {
1896 btrfs_abort_transaction(trans, root, ret);
1897 goto out;
1898 }
1801 1899
1802 add_pending_csums(trans, inode, ordered_extent->file_offset, 1900 add_pending_csums(trans, inode, ordered_extent->file_offset,
1803 &ordered_extent->list); 1901 &ordered_extent->list);
@@ -1805,7 +1903,10 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end)
1805 ret = btrfs_ordered_update_i_size(inode, 0, ordered_extent); 1903 ret = btrfs_ordered_update_i_size(inode, 0, ordered_extent);
1806 if (!ret || !test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags)) { 1904 if (!ret || !test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags)) {
1807 ret = btrfs_update_inode_fallback(trans, root, inode); 1905 ret = btrfs_update_inode_fallback(trans, root, inode);
1808 BUG_ON(ret); 1906 if (ret) { /* -ENOMEM or corruption */
1907 btrfs_abort_transaction(trans, root, ret);
1908 goto out;
1909 }
1809 } 1910 }
1810 ret = 0; 1911 ret = 0;
1811out: 1912out:
@@ -1824,6 +1925,11 @@ out:
1824 btrfs_put_ordered_extent(ordered_extent); 1925 btrfs_put_ordered_extent(ordered_extent);
1825 1926
1826 return 0; 1927 return 0;
1928out_unlock:
1929 unlock_extent_cached(io_tree, ordered_extent->file_offset,
1930 ordered_extent->file_offset +
1931 ordered_extent->len - 1, &cached_state, GFP_NOFS);
1932 goto out;
1827} 1933}
1828 1934
1829static int btrfs_writepage_end_io_hook(struct page *page, u64 start, u64 end, 1935static int btrfs_writepage_end_io_hook(struct page *page, u64 start, u64 end,
@@ -1905,6 +2011,8 @@ struct delayed_iput {
1905 struct inode *inode; 2011 struct inode *inode;
1906}; 2012};
1907 2013
2014/* JDM: If this is fs-wide, why can't we add a pointer to
2015 * btrfs_inode instead and avoid the allocation? */
1908void btrfs_add_delayed_iput(struct inode *inode) 2016void btrfs_add_delayed_iput(struct inode *inode)
1909{ 2017{
1910 struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info; 2018 struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
@@ -2051,20 +2159,27 @@ int btrfs_orphan_add(struct btrfs_trans_handle *trans, struct inode *inode)
2051 /* grab metadata reservation from transaction handle */ 2159 /* grab metadata reservation from transaction handle */
2052 if (reserve) { 2160 if (reserve) {
2053 ret = btrfs_orphan_reserve_metadata(trans, inode); 2161 ret = btrfs_orphan_reserve_metadata(trans, inode);
2054 BUG_ON(ret); 2162 BUG_ON(ret); /* -ENOSPC in reservation; Logic error? JDM */
2055 } 2163 }
2056 2164
2057 /* insert an orphan item to track this unlinked/truncated file */ 2165 /* insert an orphan item to track this unlinked/truncated file */
2058 if (insert >= 1) { 2166 if (insert >= 1) {
2059 ret = btrfs_insert_orphan_item(trans, root, btrfs_ino(inode)); 2167 ret = btrfs_insert_orphan_item(trans, root, btrfs_ino(inode));
2060 BUG_ON(ret && ret != -EEXIST); 2168 if (ret && ret != -EEXIST) {
2169 btrfs_abort_transaction(trans, root, ret);
2170 return ret;
2171 }
2172 ret = 0;
2061 } 2173 }
2062 2174
2063 /* insert an orphan item to track subvolume contains orphan files */ 2175 /* insert an orphan item to track subvolume contains orphan files */
2064 if (insert >= 2) { 2176 if (insert >= 2) {
2065 ret = btrfs_insert_orphan_item(trans, root->fs_info->tree_root, 2177 ret = btrfs_insert_orphan_item(trans, root->fs_info->tree_root,
2066 root->root_key.objectid); 2178 root->root_key.objectid);
2067 BUG_ON(ret); 2179 if (ret && ret != -EEXIST) {
2180 btrfs_abort_transaction(trans, root, ret);
2181 return ret;
2182 }
2068 } 2183 }
2069 return 0; 2184 return 0;
2070} 2185}
@@ -2094,7 +2209,7 @@ int btrfs_orphan_del(struct btrfs_trans_handle *trans, struct inode *inode)
2094 2209
2095 if (trans && delete_item) { 2210 if (trans && delete_item) {
2096 ret = btrfs_del_orphan_item(trans, root, btrfs_ino(inode)); 2211 ret = btrfs_del_orphan_item(trans, root, btrfs_ino(inode));
2097 BUG_ON(ret); 2212 BUG_ON(ret); /* -ENOMEM or corruption (JDM: Recheck) */
2098 } 2213 }
2099 2214
2100 if (release_rsv) 2215 if (release_rsv)
@@ -2228,7 +2343,7 @@ int btrfs_orphan_cleanup(struct btrfs_root *root)
2228 } 2343 }
2229 ret = btrfs_del_orphan_item(trans, root, 2344 ret = btrfs_del_orphan_item(trans, root,
2230 found_key.objectid); 2345 found_key.objectid);
2231 BUG_ON(ret); 2346 BUG_ON(ret); /* -ENOMEM or corruption (JDM: Recheck) */
2232 btrfs_end_transaction(trans, root); 2347 btrfs_end_transaction(trans, root);
2233 continue; 2348 continue;
2234 } 2349 }
@@ -2610,16 +2725,22 @@ static int __btrfs_unlink_inode(struct btrfs_trans_handle *trans,
2610 printk(KERN_INFO "btrfs failed to delete reference to %.*s, " 2725 printk(KERN_INFO "btrfs failed to delete reference to %.*s, "
2611 "inode %llu parent %llu\n", name_len, name, 2726 "inode %llu parent %llu\n", name_len, name,
2612 (unsigned long long)ino, (unsigned long long)dir_ino); 2727 (unsigned long long)ino, (unsigned long long)dir_ino);
2728 btrfs_abort_transaction(trans, root, ret);
2613 goto err; 2729 goto err;
2614 } 2730 }
2615 2731
2616 ret = btrfs_delete_delayed_dir_index(trans, root, dir, index); 2732 ret = btrfs_delete_delayed_dir_index(trans, root, dir, index);
2617 if (ret) 2733 if (ret) {
2734 btrfs_abort_transaction(trans, root, ret);
2618 goto err; 2735 goto err;
2736 }
2619 2737
2620 ret = btrfs_del_inode_ref_in_log(trans, root, name, name_len, 2738 ret = btrfs_del_inode_ref_in_log(trans, root, name, name_len,
2621 inode, dir_ino); 2739 inode, dir_ino);
2622 BUG_ON(ret != 0 && ret != -ENOENT); 2740 if (ret != 0 && ret != -ENOENT) {
2741 btrfs_abort_transaction(trans, root, ret);
2742 goto err;
2743 }
2623 2744
2624 ret = btrfs_del_dir_entries_in_log(trans, root, name, name_len, 2745 ret = btrfs_del_dir_entries_in_log(trans, root, name, name_len,
2625 dir, index); 2746 dir, index);
@@ -2777,7 +2898,7 @@ static struct btrfs_trans_handle *__unlink_start_trans(struct inode *dir,
2777 err = ret; 2898 err = ret;
2778 goto out; 2899 goto out;
2779 } 2900 }
2780 BUG_ON(ret == 0); 2901 BUG_ON(ret == 0); /* Corruption */
2781 if (check_path_shared(root, path)) 2902 if (check_path_shared(root, path))
2782 goto out; 2903 goto out;
2783 btrfs_release_path(path); 2904 btrfs_release_path(path);
@@ -2810,7 +2931,7 @@ static struct btrfs_trans_handle *__unlink_start_trans(struct inode *dir,
2810 err = PTR_ERR(ref); 2931 err = PTR_ERR(ref);
2811 goto out; 2932 goto out;
2812 } 2933 }
2813 BUG_ON(!ref); 2934 BUG_ON(!ref); /* Logic error */
2814 if (check_path_shared(root, path)) 2935 if (check_path_shared(root, path))
2815 goto out; 2936 goto out;
2816 index = btrfs_inode_ref_index(path->nodes[0], ref); 2937 index = btrfs_inode_ref_index(path->nodes[0], ref);
@@ -2917,23 +3038,42 @@ int btrfs_unlink_subvol(struct btrfs_trans_handle *trans,
2917 3038
2918 di = btrfs_lookup_dir_item(trans, root, path, dir_ino, 3039 di = btrfs_lookup_dir_item(trans, root, path, dir_ino,
2919 name, name_len, -1); 3040 name, name_len, -1);
2920 BUG_ON(IS_ERR_OR_NULL(di)); 3041 if (IS_ERR_OR_NULL(di)) {
3042 if (!di)
3043 ret = -ENOENT;
3044 else
3045 ret = PTR_ERR(di);
3046 goto out;
3047 }
2921 3048
2922 leaf = path->nodes[0]; 3049 leaf = path->nodes[0];
2923 btrfs_dir_item_key_to_cpu(leaf, di, &key); 3050 btrfs_dir_item_key_to_cpu(leaf, di, &key);
2924 WARN_ON(key.type != BTRFS_ROOT_ITEM_KEY || key.objectid != objectid); 3051 WARN_ON(key.type != BTRFS_ROOT_ITEM_KEY || key.objectid != objectid);
2925 ret = btrfs_delete_one_dir_name(trans, root, path, di); 3052 ret = btrfs_delete_one_dir_name(trans, root, path, di);
2926 BUG_ON(ret); 3053 if (ret) {
3054 btrfs_abort_transaction(trans, root, ret);
3055 goto out;
3056 }
2927 btrfs_release_path(path); 3057 btrfs_release_path(path);
2928 3058
2929 ret = btrfs_del_root_ref(trans, root->fs_info->tree_root, 3059 ret = btrfs_del_root_ref(trans, root->fs_info->tree_root,
2930 objectid, root->root_key.objectid, 3060 objectid, root->root_key.objectid,
2931 dir_ino, &index, name, name_len); 3061 dir_ino, &index, name, name_len);
2932 if (ret < 0) { 3062 if (ret < 0) {
2933 BUG_ON(ret != -ENOENT); 3063 if (ret != -ENOENT) {
3064 btrfs_abort_transaction(trans, root, ret);
3065 goto out;
3066 }
2934 di = btrfs_search_dir_index_item(root, path, dir_ino, 3067 di = btrfs_search_dir_index_item(root, path, dir_ino,
2935 name, name_len); 3068 name, name_len);
2936 BUG_ON(IS_ERR_OR_NULL(di)); 3069 if (IS_ERR_OR_NULL(di)) {
3070 if (!di)
3071 ret = -ENOENT;
3072 else
3073 ret = PTR_ERR(di);
3074 btrfs_abort_transaction(trans, root, ret);
3075 goto out;
3076 }
2937 3077
2938 leaf = path->nodes[0]; 3078 leaf = path->nodes[0];
2939 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 3079 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
@@ -2943,15 +3083,19 @@ int btrfs_unlink_subvol(struct btrfs_trans_handle *trans,
2943 btrfs_release_path(path); 3083 btrfs_release_path(path);
2944 3084
2945 ret = btrfs_delete_delayed_dir_index(trans, root, dir, index); 3085 ret = btrfs_delete_delayed_dir_index(trans, root, dir, index);
2946 BUG_ON(ret); 3086 if (ret) {
3087 btrfs_abort_transaction(trans, root, ret);
3088 goto out;
3089 }
2947 3090
2948 btrfs_i_size_write(dir, dir->i_size - name_len * 2); 3091 btrfs_i_size_write(dir, dir->i_size - name_len * 2);
2949 dir->i_mtime = dir->i_ctime = CURRENT_TIME; 3092 dir->i_mtime = dir->i_ctime = CURRENT_TIME;
2950 ret = btrfs_update_inode(trans, root, dir); 3093 ret = btrfs_update_inode(trans, root, dir);
2951 BUG_ON(ret); 3094 if (ret)
2952 3095 btrfs_abort_transaction(trans, root, ret);
3096out:
2953 btrfs_free_path(path); 3097 btrfs_free_path(path);
2954 return 0; 3098 return ret;
2955} 3099}
2956 3100
2957static int btrfs_rmdir(struct inode *dir, struct dentry *dentry) 3101static int btrfs_rmdir(struct inode *dir, struct dentry *dentry)
@@ -3161,8 +3305,8 @@ search_again:
3161 } 3305 }
3162 size = 3306 size =
3163 btrfs_file_extent_calc_inline_size(size); 3307 btrfs_file_extent_calc_inline_size(size);
3164 ret = btrfs_truncate_item(trans, root, path, 3308 btrfs_truncate_item(trans, root, path,
3165 size, 1); 3309 size, 1);
3166 } else if (root->ref_cows) { 3310 } else if (root->ref_cows) {
3167 inode_sub_bytes(inode, item_end + 1 - 3311 inode_sub_bytes(inode, item_end + 1 -
3168 found_key.offset); 3312 found_key.offset);
@@ -3210,7 +3354,11 @@ delete:
3210 ret = btrfs_del_items(trans, root, path, 3354 ret = btrfs_del_items(trans, root, path,
3211 pending_del_slot, 3355 pending_del_slot,
3212 pending_del_nr); 3356 pending_del_nr);
3213 BUG_ON(ret); 3357 if (ret) {
3358 btrfs_abort_transaction(trans,
3359 root, ret);
3360 goto error;
3361 }
3214 pending_del_nr = 0; 3362 pending_del_nr = 0;
3215 } 3363 }
3216 btrfs_release_path(path); 3364 btrfs_release_path(path);
@@ -3223,8 +3371,10 @@ out:
3223 if (pending_del_nr) { 3371 if (pending_del_nr) {
3224 ret = btrfs_del_items(trans, root, path, pending_del_slot, 3372 ret = btrfs_del_items(trans, root, path, pending_del_slot,
3225 pending_del_nr); 3373 pending_del_nr);
3226 BUG_ON(ret); 3374 if (ret)
3375 btrfs_abort_transaction(trans, root, ret);
3227 } 3376 }
3377error:
3228 btrfs_free_path(path); 3378 btrfs_free_path(path);
3229 return err; 3379 return err;
3230} 3380}
@@ -3282,8 +3432,7 @@ again:
3282 } 3432 }
3283 wait_on_page_writeback(page); 3433 wait_on_page_writeback(page);
3284 3434
3285 lock_extent_bits(io_tree, page_start, page_end, 0, &cached_state, 3435 lock_extent_bits(io_tree, page_start, page_end, 0, &cached_state);
3286 GFP_NOFS);
3287 set_page_extent_mapped(page); 3436 set_page_extent_mapped(page);
3288 3437
3289 ordered = btrfs_lookup_ordered_extent(inode, page_start); 3438 ordered = btrfs_lookup_ordered_extent(inode, page_start);
@@ -3359,7 +3508,7 @@ int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size)
3359 btrfs_wait_ordered_range(inode, hole_start, 3508 btrfs_wait_ordered_range(inode, hole_start,
3360 block_end - hole_start); 3509 block_end - hole_start);
3361 lock_extent_bits(io_tree, hole_start, block_end - 1, 0, 3510 lock_extent_bits(io_tree, hole_start, block_end - 1, 0,
3362 &cached_state, GFP_NOFS); 3511 &cached_state);
3363 ordered = btrfs_lookup_ordered_extent(inode, hole_start); 3512 ordered = btrfs_lookup_ordered_extent(inode, hole_start);
3364 if (!ordered) 3513 if (!ordered)
3365 break; 3514 break;
@@ -3372,7 +3521,10 @@ int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size)
3372 while (1) { 3521 while (1) {
3373 em = btrfs_get_extent(inode, NULL, 0, cur_offset, 3522 em = btrfs_get_extent(inode, NULL, 0, cur_offset,
3374 block_end - cur_offset, 0); 3523 block_end - cur_offset, 0);
3375 BUG_ON(IS_ERR_OR_NULL(em)); 3524 if (IS_ERR(em)) {
3525 err = PTR_ERR(em);
3526 break;
3527 }
3376 last_byte = min(extent_map_end(em), block_end); 3528 last_byte = min(extent_map_end(em), block_end);
3377 last_byte = (last_byte + mask) & ~mask; 3529 last_byte = (last_byte + mask) & ~mask;
3378 if (!test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) { 3530 if (!test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) {
@@ -3389,7 +3541,7 @@ int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size)
3389 cur_offset + hole_size, 3541 cur_offset + hole_size,
3390 &hint_byte, 1); 3542 &hint_byte, 1);
3391 if (err) { 3543 if (err) {
3392 btrfs_update_inode(trans, root, inode); 3544 btrfs_abort_transaction(trans, root, err);
3393 btrfs_end_transaction(trans, root); 3545 btrfs_end_transaction(trans, root);
3394 break; 3546 break;
3395 } 3547 }
@@ -3399,7 +3551,7 @@ int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size)
3399 0, hole_size, 0, hole_size, 3551 0, hole_size, 0, hole_size,
3400 0, 0, 0); 3552 0, 0, 0);
3401 if (err) { 3553 if (err) {
3402 btrfs_update_inode(trans, root, inode); 3554 btrfs_abort_transaction(trans, root, err);
3403 btrfs_end_transaction(trans, root); 3555 btrfs_end_transaction(trans, root);
3404 break; 3556 break;
3405 } 3557 }
@@ -3779,7 +3931,7 @@ static void inode_tree_del(struct inode *inode)
3779 } 3931 }
3780} 3932}
3781 3933
3782int btrfs_invalidate_inodes(struct btrfs_root *root) 3934void btrfs_invalidate_inodes(struct btrfs_root *root)
3783{ 3935{
3784 struct rb_node *node; 3936 struct rb_node *node;
3785 struct rb_node *prev; 3937 struct rb_node *prev;
@@ -3839,7 +3991,6 @@ again:
3839 node = rb_next(node); 3991 node = rb_next(node);
3840 } 3992 }
3841 spin_unlock(&root->inode_lock); 3993 spin_unlock(&root->inode_lock);
3842 return 0;
3843} 3994}
3844 3995
3845static int btrfs_init_locked_inode(struct inode *inode, void *p) 3996static int btrfs_init_locked_inode(struct inode *inode, void *p)
@@ -4581,18 +4732,26 @@ int btrfs_add_link(struct btrfs_trans_handle *trans,
4581 parent_ino, index); 4732 parent_ino, index);
4582 } 4733 }
4583 4734
4584 if (ret == 0) { 4735 /* Nothing to clean up yet */
4585 ret = btrfs_insert_dir_item(trans, root, name, name_len, 4736 if (ret)
4586 parent_inode, &key, 4737 return ret;
4587 btrfs_inode_type(inode), index);
4588 if (ret)
4589 goto fail_dir_item;
4590 4738
4591 btrfs_i_size_write(parent_inode, parent_inode->i_size + 4739 ret = btrfs_insert_dir_item(trans, root, name, name_len,
4592 name_len * 2); 4740 parent_inode, &key,
4593 parent_inode->i_mtime = parent_inode->i_ctime = CURRENT_TIME; 4741 btrfs_inode_type(inode), index);
4594 ret = btrfs_update_inode(trans, root, parent_inode); 4742 if (ret == -EEXIST)
4743 goto fail_dir_item;
4744 else if (ret) {
4745 btrfs_abort_transaction(trans, root, ret);
4746 return ret;
4595 } 4747 }
4748
4749 btrfs_i_size_write(parent_inode, parent_inode->i_size +
4750 name_len * 2);
4751 parent_inode->i_mtime = parent_inode->i_ctime = CURRENT_TIME;
4752 ret = btrfs_update_inode(trans, root, parent_inode);
4753 if (ret)
4754 btrfs_abort_transaction(trans, root, ret);
4596 return ret; 4755 return ret;
4597 4756
4598fail_dir_item: 4757fail_dir_item:
@@ -4806,7 +4965,8 @@ static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
4806 } else { 4965 } else {
4807 struct dentry *parent = dentry->d_parent; 4966 struct dentry *parent = dentry->d_parent;
4808 err = btrfs_update_inode(trans, root, inode); 4967 err = btrfs_update_inode(trans, root, inode);
4809 BUG_ON(err); 4968 if (err)
4969 goto fail;
4810 d_instantiate(dentry, inode); 4970 d_instantiate(dentry, inode);
4811 btrfs_log_new_name(trans, inode, NULL, parent); 4971 btrfs_log_new_name(trans, inode, NULL, parent);
4812 } 4972 }
@@ -5137,7 +5297,7 @@ again:
5137 ret = uncompress_inline(path, inode, page, 5297 ret = uncompress_inline(path, inode, page,
5138 pg_offset, 5298 pg_offset,
5139 extent_offset, item); 5299 extent_offset, item);
5140 BUG_ON(ret); 5300 BUG_ON(ret); /* -ENOMEM */
5141 } else { 5301 } else {
5142 map = kmap(page); 5302 map = kmap(page);
5143 read_extent_buffer(leaf, map + pg_offset, ptr, 5303 read_extent_buffer(leaf, map + pg_offset, ptr,
@@ -5252,6 +5412,7 @@ out:
5252 free_extent_map(em); 5412 free_extent_map(em);
5253 return ERR_PTR(err); 5413 return ERR_PTR(err);
5254 } 5414 }
5415 BUG_ON(!em); /* Error is always set */
5255 return em; 5416 return em;
5256} 5417}
5257 5418
@@ -5414,7 +5575,7 @@ static struct extent_map *btrfs_new_extent_direct(struct inode *inode,
5414 5575
5415 alloc_hint = get_extent_allocation_hint(inode, start, len); 5576 alloc_hint = get_extent_allocation_hint(inode, start, len);
5416 ret = btrfs_reserve_extent(trans, root, len, root->sectorsize, 0, 5577 ret = btrfs_reserve_extent(trans, root, len, root->sectorsize, 0,
5417 alloc_hint, (u64)-1, &ins, 1); 5578 alloc_hint, &ins, 1);
5418 if (ret) { 5579 if (ret) {
5419 em = ERR_PTR(ret); 5580 em = ERR_PTR(ret);
5420 goto out; 5581 goto out;
@@ -5602,7 +5763,7 @@ static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock,
5602 free_extent_map(em); 5763 free_extent_map(em);
5603 /* DIO will do one hole at a time, so just unlock a sector */ 5764 /* DIO will do one hole at a time, so just unlock a sector */
5604 unlock_extent(&BTRFS_I(inode)->io_tree, start, 5765 unlock_extent(&BTRFS_I(inode)->io_tree, start,
5605 start + root->sectorsize - 1, GFP_NOFS); 5766 start + root->sectorsize - 1);
5606 return 0; 5767 return 0;
5607 } 5768 }
5608 5769
@@ -5743,7 +5904,7 @@ static void btrfs_endio_direct_read(struct bio *bio, int err)
5743 } while (bvec <= bvec_end); 5904 } while (bvec <= bvec_end);
5744 5905
5745 unlock_extent(&BTRFS_I(inode)->io_tree, dip->logical_offset, 5906 unlock_extent(&BTRFS_I(inode)->io_tree, dip->logical_offset,
5746 dip->logical_offset + dip->bytes - 1, GFP_NOFS); 5907 dip->logical_offset + dip->bytes - 1);
5747 bio->bi_private = dip->private; 5908 bio->bi_private = dip->private;
5748 5909
5749 kfree(dip->csums); 5910 kfree(dip->csums);
@@ -5794,7 +5955,7 @@ again:
5794 5955
5795 lock_extent_bits(&BTRFS_I(inode)->io_tree, ordered->file_offset, 5956 lock_extent_bits(&BTRFS_I(inode)->io_tree, ordered->file_offset,
5796 ordered->file_offset + ordered->len - 1, 0, 5957 ordered->file_offset + ordered->len - 1, 0,
5797 &cached_state, GFP_NOFS); 5958 &cached_state);
5798 5959
5799 if (test_bit(BTRFS_ORDERED_PREALLOC, &ordered->flags)) { 5960 if (test_bit(BTRFS_ORDERED_PREALLOC, &ordered->flags)) {
5800 ret = btrfs_mark_extent_written(trans, inode, 5961 ret = btrfs_mark_extent_written(trans, inode,
@@ -5868,7 +6029,7 @@ static int __btrfs_submit_bio_start_direct_io(struct inode *inode, int rw,
5868 int ret; 6029 int ret;
5869 struct btrfs_root *root = BTRFS_I(inode)->root; 6030 struct btrfs_root *root = BTRFS_I(inode)->root;
5870 ret = btrfs_csum_one_bio(root, inode, bio, offset, 1); 6031 ret = btrfs_csum_one_bio(root, inode, bio, offset, 1);
5871 BUG_ON(ret); 6032 BUG_ON(ret); /* -ENOMEM */
5872 return 0; 6033 return 0;
5873} 6034}
5874 6035
@@ -6209,7 +6370,7 @@ static ssize_t btrfs_direct_IO(int rw, struct kiocb *iocb,
6209 6370
6210 while (1) { 6371 while (1) {
6211 lock_extent_bits(&BTRFS_I(inode)->io_tree, lockstart, lockend, 6372 lock_extent_bits(&BTRFS_I(inode)->io_tree, lockstart, lockend,
6212 0, &cached_state, GFP_NOFS); 6373 0, &cached_state);
6213 /* 6374 /*
6214 * We're concerned with the entire range that we're going to be 6375 * We're concerned with the entire range that we're going to be
6215 * doing DIO to, so we need to make sure theres no ordered 6376 * doing DIO to, so we need to make sure theres no ordered
@@ -6233,7 +6394,7 @@ static ssize_t btrfs_direct_IO(int rw, struct kiocb *iocb,
6233 if (writing) { 6394 if (writing) {
6234 write_bits = EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING; 6395 write_bits = EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING;
6235 ret = set_extent_bit(&BTRFS_I(inode)->io_tree, lockstart, lockend, 6396 ret = set_extent_bit(&BTRFS_I(inode)->io_tree, lockstart, lockend,
6236 EXTENT_DELALLOC, 0, NULL, &cached_state, 6397 EXTENT_DELALLOC, NULL, &cached_state,
6237 GFP_NOFS); 6398 GFP_NOFS);
6238 if (ret) { 6399 if (ret) {
6239 clear_extent_bit(&BTRFS_I(inode)->io_tree, lockstart, 6400 clear_extent_bit(&BTRFS_I(inode)->io_tree, lockstart,
@@ -6363,8 +6524,7 @@ static void btrfs_invalidatepage(struct page *page, unsigned long offset)
6363 btrfs_releasepage(page, GFP_NOFS); 6524 btrfs_releasepage(page, GFP_NOFS);
6364 return; 6525 return;
6365 } 6526 }
6366 lock_extent_bits(tree, page_start, page_end, 0, &cached_state, 6527 lock_extent_bits(tree, page_start, page_end, 0, &cached_state);
6367 GFP_NOFS);
6368 ordered = btrfs_lookup_ordered_extent(page->mapping->host, 6528 ordered = btrfs_lookup_ordered_extent(page->mapping->host,
6369 page_offset(page)); 6529 page_offset(page));
6370 if (ordered) { 6530 if (ordered) {
@@ -6386,8 +6546,7 @@ static void btrfs_invalidatepage(struct page *page, unsigned long offset)
6386 } 6546 }
6387 btrfs_put_ordered_extent(ordered); 6547 btrfs_put_ordered_extent(ordered);
6388 cached_state = NULL; 6548 cached_state = NULL;
6389 lock_extent_bits(tree, page_start, page_end, 0, &cached_state, 6549 lock_extent_bits(tree, page_start, page_end, 0, &cached_state);
6390 GFP_NOFS);
6391 } 6550 }
6392 clear_extent_bit(tree, page_start, page_end, 6551 clear_extent_bit(tree, page_start, page_end,
6393 EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC | 6552 EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC |
@@ -6462,8 +6621,7 @@ again:
6462 } 6621 }
6463 wait_on_page_writeback(page); 6622 wait_on_page_writeback(page);
6464 6623
6465 lock_extent_bits(io_tree, page_start, page_end, 0, &cached_state, 6624 lock_extent_bits(io_tree, page_start, page_end, 0, &cached_state);
6466 GFP_NOFS);
6467 set_page_extent_mapped(page); 6625 set_page_extent_mapped(page);
6468 6626
6469 /* 6627 /*
@@ -6737,10 +6895,9 @@ int btrfs_create_subvol_root(struct btrfs_trans_handle *trans,
6737 btrfs_i_size_write(inode, 0); 6895 btrfs_i_size_write(inode, 0);
6738 6896
6739 err = btrfs_update_inode(trans, new_root, inode); 6897 err = btrfs_update_inode(trans, new_root, inode);
6740 BUG_ON(err);
6741 6898
6742 iput(inode); 6899 iput(inode);
6743 return 0; 6900 return err;
6744} 6901}
6745 6902
6746struct inode *btrfs_alloc_inode(struct super_block *sb) 6903struct inode *btrfs_alloc_inode(struct super_block *sb)
@@ -6783,6 +6940,8 @@ struct inode *btrfs_alloc_inode(struct super_block *sb)
6783 extent_map_tree_init(&ei->extent_tree); 6940 extent_map_tree_init(&ei->extent_tree);
6784 extent_io_tree_init(&ei->io_tree, &inode->i_data); 6941 extent_io_tree_init(&ei->io_tree, &inode->i_data);
6785 extent_io_tree_init(&ei->io_failure_tree, &inode->i_data); 6942 extent_io_tree_init(&ei->io_failure_tree, &inode->i_data);
6943 ei->io_tree.track_uptodate = 1;
6944 ei->io_failure_tree.track_uptodate = 1;
6786 mutex_init(&ei->log_mutex); 6945 mutex_init(&ei->log_mutex);
6787 mutex_init(&ei->delalloc_mutex); 6946 mutex_init(&ei->delalloc_mutex);
6788 btrfs_ordered_inode_tree_init(&ei->ordered_tree); 6947 btrfs_ordered_inode_tree_init(&ei->ordered_tree);
@@ -7072,7 +7231,10 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
7072 if (!ret) 7231 if (!ret)
7073 ret = btrfs_update_inode(trans, root, old_inode); 7232 ret = btrfs_update_inode(trans, root, old_inode);
7074 } 7233 }
7075 BUG_ON(ret); 7234 if (ret) {
7235 btrfs_abort_transaction(trans, root, ret);
7236 goto out_fail;
7237 }
7076 7238
7077 if (new_inode) { 7239 if (new_inode) {
7078 new_inode->i_ctime = CURRENT_TIME; 7240 new_inode->i_ctime = CURRENT_TIME;
@@ -7090,11 +7252,14 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
7090 new_dentry->d_name.name, 7252 new_dentry->d_name.name,
7091 new_dentry->d_name.len); 7253 new_dentry->d_name.len);
7092 } 7254 }
7093 BUG_ON(ret); 7255 if (!ret && new_inode->i_nlink == 0) {
7094 if (new_inode->i_nlink == 0) {
7095 ret = btrfs_orphan_add(trans, new_dentry->d_inode); 7256 ret = btrfs_orphan_add(trans, new_dentry->d_inode);
7096 BUG_ON(ret); 7257 BUG_ON(ret);
7097 } 7258 }
7259 if (ret) {
7260 btrfs_abort_transaction(trans, root, ret);
7261 goto out_fail;
7262 }
7098 } 7263 }
7099 7264
7100 fixup_inode_flags(new_dir, old_inode); 7265 fixup_inode_flags(new_dir, old_inode);
@@ -7102,7 +7267,10 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
7102 ret = btrfs_add_link(trans, new_dir, old_inode, 7267 ret = btrfs_add_link(trans, new_dir, old_inode,
7103 new_dentry->d_name.name, 7268 new_dentry->d_name.name,
7104 new_dentry->d_name.len, 0, index); 7269 new_dentry->d_name.len, 0, index);
7105 BUG_ON(ret); 7270 if (ret) {
7271 btrfs_abort_transaction(trans, root, ret);
7272 goto out_fail;
7273 }
7106 7274
7107 if (old_ino != BTRFS_FIRST_FREE_OBJECTID) { 7275 if (old_ino != BTRFS_FIRST_FREE_OBJECTID) {
7108 struct dentry *parent = new_dentry->d_parent; 7276 struct dentry *parent = new_dentry->d_parent;
@@ -7315,7 +7483,7 @@ static int __btrfs_prealloc_file_range(struct inode *inode, int mode,
7315 } 7483 }
7316 7484
7317 ret = btrfs_reserve_extent(trans, root, num_bytes, min_size, 7485 ret = btrfs_reserve_extent(trans, root, num_bytes, min_size,
7318 0, *alloc_hint, (u64)-1, &ins, 1); 7486 0, *alloc_hint, &ins, 1);
7319 if (ret) { 7487 if (ret) {
7320 if (own_trans) 7488 if (own_trans)
7321 btrfs_end_transaction(trans, root); 7489 btrfs_end_transaction(trans, root);
@@ -7327,7 +7495,12 @@ static int __btrfs_prealloc_file_range(struct inode *inode, int mode,
7327 ins.offset, ins.offset, 7495 ins.offset, ins.offset,
7328 ins.offset, 0, 0, 0, 7496 ins.offset, 0, 0, 0,
7329 BTRFS_FILE_EXTENT_PREALLOC); 7497 BTRFS_FILE_EXTENT_PREALLOC);
7330 BUG_ON(ret); 7498 if (ret) {
7499 btrfs_abort_transaction(trans, root, ret);
7500 if (own_trans)
7501 btrfs_end_transaction(trans, root);
7502 break;
7503 }
7331 btrfs_drop_extent_cache(inode, cur_offset, 7504 btrfs_drop_extent_cache(inode, cur_offset,
7332 cur_offset + ins.offset -1, 0); 7505 cur_offset + ins.offset -1, 0);
7333 7506
@@ -7349,7 +7522,13 @@ static int __btrfs_prealloc_file_range(struct inode *inode, int mode,
7349 } 7522 }
7350 7523
7351 ret = btrfs_update_inode(trans, root, inode); 7524 ret = btrfs_update_inode(trans, root, inode);
7352 BUG_ON(ret); 7525
7526 if (ret) {
7527 btrfs_abort_transaction(trans, root, ret);
7528 if (own_trans)
7529 btrfs_end_transaction(trans, root);
7530 break;
7531 }
7353 7532
7354 if (own_trans) 7533 if (own_trans)
7355 btrfs_end_transaction(trans, root); 7534 btrfs_end_transaction(trans, root);
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index d8b54715c2de..18cc23d164a8 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -425,22 +425,37 @@ static noinline int create_subvol(struct btrfs_root *root,
425 425
426 key.offset = (u64)-1; 426 key.offset = (u64)-1;
427 new_root = btrfs_read_fs_root_no_name(root->fs_info, &key); 427 new_root = btrfs_read_fs_root_no_name(root->fs_info, &key);
428 BUG_ON(IS_ERR(new_root)); 428 if (IS_ERR(new_root)) {
429 btrfs_abort_transaction(trans, root, PTR_ERR(new_root));
430 ret = PTR_ERR(new_root);
431 goto fail;
432 }
429 433
430 btrfs_record_root_in_trans(trans, new_root); 434 btrfs_record_root_in_trans(trans, new_root);
431 435
432 ret = btrfs_create_subvol_root(trans, new_root, new_dirid); 436 ret = btrfs_create_subvol_root(trans, new_root, new_dirid);
437 if (ret) {
438 /* We potentially lose an unused inode item here */
439 btrfs_abort_transaction(trans, root, ret);
440 goto fail;
441 }
442
433 /* 443 /*
434 * insert the directory item 444 * insert the directory item
435 */ 445 */
436 ret = btrfs_set_inode_index(dir, &index); 446 ret = btrfs_set_inode_index(dir, &index);
437 BUG_ON(ret); 447 if (ret) {
448 btrfs_abort_transaction(trans, root, ret);
449 goto fail;
450 }
438 451
439 ret = btrfs_insert_dir_item(trans, root, 452 ret = btrfs_insert_dir_item(trans, root,
440 name, namelen, dir, &key, 453 name, namelen, dir, &key,
441 BTRFS_FT_DIR, index); 454 BTRFS_FT_DIR, index);
442 if (ret) 455 if (ret) {
456 btrfs_abort_transaction(trans, root, ret);
443 goto fail; 457 goto fail;
458 }
444 459
445 btrfs_i_size_write(dir, dir->i_size + namelen * 2); 460 btrfs_i_size_write(dir, dir->i_size + namelen * 2);
446 ret = btrfs_update_inode(trans, root, dir); 461 ret = btrfs_update_inode(trans, root, dir);
@@ -769,6 +784,31 @@ none:
769 return -ENOENT; 784 return -ENOENT;
770} 785}
771 786
787/*
788 * Validaty check of prev em and next em:
789 * 1) no prev/next em
790 * 2) prev/next em is an hole/inline extent
791 */
792static int check_adjacent_extents(struct inode *inode, struct extent_map *em)
793{
794 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
795 struct extent_map *prev = NULL, *next = NULL;
796 int ret = 0;
797
798 read_lock(&em_tree->lock);
799 prev = lookup_extent_mapping(em_tree, em->start - 1, (u64)-1);
800 next = lookup_extent_mapping(em_tree, em->start + em->len, (u64)-1);
801 read_unlock(&em_tree->lock);
802
803 if ((!prev || prev->block_start >= EXTENT_MAP_LAST_BYTE) &&
804 (!next || next->block_start >= EXTENT_MAP_LAST_BYTE))
805 ret = 1;
806 free_extent_map(prev);
807 free_extent_map(next);
808
809 return ret;
810}
811
772static int should_defrag_range(struct inode *inode, u64 start, u64 len, 812static int should_defrag_range(struct inode *inode, u64 start, u64 len,
773 int thresh, u64 *last_len, u64 *skip, 813 int thresh, u64 *last_len, u64 *skip,
774 u64 *defrag_end) 814 u64 *defrag_end)
@@ -797,17 +837,25 @@ static int should_defrag_range(struct inode *inode, u64 start, u64 len,
797 837
798 if (!em) { 838 if (!em) {
799 /* get the big lock and read metadata off disk */ 839 /* get the big lock and read metadata off disk */
800 lock_extent(io_tree, start, start + len - 1, GFP_NOFS); 840 lock_extent(io_tree, start, start + len - 1);
801 em = btrfs_get_extent(inode, NULL, 0, start, len, 0); 841 em = btrfs_get_extent(inode, NULL, 0, start, len, 0);
802 unlock_extent(io_tree, start, start + len - 1, GFP_NOFS); 842 unlock_extent(io_tree, start, start + len - 1);
803 843
804 if (IS_ERR(em)) 844 if (IS_ERR(em))
805 return 0; 845 return 0;
806 } 846 }
807 847
808 /* this will cover holes, and inline extents */ 848 /* this will cover holes, and inline extents */
809 if (em->block_start >= EXTENT_MAP_LAST_BYTE) 849 if (em->block_start >= EXTENT_MAP_LAST_BYTE) {
810 ret = 0; 850 ret = 0;
851 goto out;
852 }
853
854 /* If we have nothing to merge with us, just skip. */
855 if (check_adjacent_extents(inode, em)) {
856 ret = 0;
857 goto out;
858 }
811 859
812 /* 860 /*
813 * we hit a real extent, if it is big don't bother defragging it again 861 * we hit a real extent, if it is big don't bother defragging it again
@@ -815,6 +863,7 @@ static int should_defrag_range(struct inode *inode, u64 start, u64 len,
815 if ((*last_len == 0 || *last_len >= thresh) && em->len >= thresh) 863 if ((*last_len == 0 || *last_len >= thresh) && em->len >= thresh)
816 ret = 0; 864 ret = 0;
817 865
866out:
818 /* 867 /*
819 * last_len ends up being a counter of how many bytes we've defragged. 868 * last_len ends up being a counter of how many bytes we've defragged.
820 * every time we choose not to defrag an extent, we reset *last_len 869 * every time we choose not to defrag an extent, we reset *last_len
@@ -856,6 +905,7 @@ static int cluster_pages_for_defrag(struct inode *inode,
856 u64 isize = i_size_read(inode); 905 u64 isize = i_size_read(inode);
857 u64 page_start; 906 u64 page_start;
858 u64 page_end; 907 u64 page_end;
908 u64 page_cnt;
859 int ret; 909 int ret;
860 int i; 910 int i;
861 int i_done; 911 int i_done;
@@ -864,19 +914,21 @@ static int cluster_pages_for_defrag(struct inode *inode,
864 struct extent_io_tree *tree; 914 struct extent_io_tree *tree;
865 gfp_t mask = btrfs_alloc_write_mask(inode->i_mapping); 915 gfp_t mask = btrfs_alloc_write_mask(inode->i_mapping);
866 916
867 if (isize == 0)
868 return 0;
869 file_end = (isize - 1) >> PAGE_CACHE_SHIFT; 917 file_end = (isize - 1) >> PAGE_CACHE_SHIFT;
918 if (!isize || start_index > file_end)
919 return 0;
920
921 page_cnt = min_t(u64, (u64)num_pages, (u64)file_end - start_index + 1);
870 922
871 ret = btrfs_delalloc_reserve_space(inode, 923 ret = btrfs_delalloc_reserve_space(inode,
872 num_pages << PAGE_CACHE_SHIFT); 924 page_cnt << PAGE_CACHE_SHIFT);
873 if (ret) 925 if (ret)
874 return ret; 926 return ret;
875 i_done = 0; 927 i_done = 0;
876 tree = &BTRFS_I(inode)->io_tree; 928 tree = &BTRFS_I(inode)->io_tree;
877 929
878 /* step one, lock all the pages */ 930 /* step one, lock all the pages */
879 for (i = 0; i < num_pages; i++) { 931 for (i = 0; i < page_cnt; i++) {
880 struct page *page; 932 struct page *page;
881again: 933again:
882 page = find_or_create_page(inode->i_mapping, 934 page = find_or_create_page(inode->i_mapping,
@@ -887,10 +939,10 @@ again:
887 page_start = page_offset(page); 939 page_start = page_offset(page);
888 page_end = page_start + PAGE_CACHE_SIZE - 1; 940 page_end = page_start + PAGE_CACHE_SIZE - 1;
889 while (1) { 941 while (1) {
890 lock_extent(tree, page_start, page_end, GFP_NOFS); 942 lock_extent(tree, page_start, page_end);
891 ordered = btrfs_lookup_ordered_extent(inode, 943 ordered = btrfs_lookup_ordered_extent(inode,
892 page_start); 944 page_start);
893 unlock_extent(tree, page_start, page_end, GFP_NOFS); 945 unlock_extent(tree, page_start, page_end);
894 if (!ordered) 946 if (!ordered)
895 break; 947 break;
896 948
@@ -898,6 +950,15 @@ again:
898 btrfs_start_ordered_extent(inode, ordered, 1); 950 btrfs_start_ordered_extent(inode, ordered, 1);
899 btrfs_put_ordered_extent(ordered); 951 btrfs_put_ordered_extent(ordered);
900 lock_page(page); 952 lock_page(page);
953 /*
954 * we unlocked the page above, so we need check if
955 * it was released or not.
956 */
957 if (page->mapping != inode->i_mapping) {
958 unlock_page(page);
959 page_cache_release(page);
960 goto again;
961 }
901 } 962 }
902 963
903 if (!PageUptodate(page)) { 964 if (!PageUptodate(page)) {
@@ -911,15 +972,6 @@ again:
911 } 972 }
912 } 973 }
913 974
914 isize = i_size_read(inode);
915 file_end = (isize - 1) >> PAGE_CACHE_SHIFT;
916 if (!isize || page->index > file_end) {
917 /* whoops, we blew past eof, skip this page */
918 unlock_page(page);
919 page_cache_release(page);
920 break;
921 }
922
923 if (page->mapping != inode->i_mapping) { 975 if (page->mapping != inode->i_mapping) {
924 unlock_page(page); 976 unlock_page(page);
925 page_cache_release(page); 977 page_cache_release(page);
@@ -946,19 +998,18 @@ again:
946 page_end = page_offset(pages[i_done - 1]) + PAGE_CACHE_SIZE; 998 page_end = page_offset(pages[i_done - 1]) + PAGE_CACHE_SIZE;
947 999
948 lock_extent_bits(&BTRFS_I(inode)->io_tree, 1000 lock_extent_bits(&BTRFS_I(inode)->io_tree,
949 page_start, page_end - 1, 0, &cached_state, 1001 page_start, page_end - 1, 0, &cached_state);
950 GFP_NOFS);
951 clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start, 1002 clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start,
952 page_end - 1, EXTENT_DIRTY | EXTENT_DELALLOC | 1003 page_end - 1, EXTENT_DIRTY | EXTENT_DELALLOC |
953 EXTENT_DO_ACCOUNTING, 0, 0, &cached_state, 1004 EXTENT_DO_ACCOUNTING, 0, 0, &cached_state,
954 GFP_NOFS); 1005 GFP_NOFS);
955 1006
956 if (i_done != num_pages) { 1007 if (i_done != page_cnt) {
957 spin_lock(&BTRFS_I(inode)->lock); 1008 spin_lock(&BTRFS_I(inode)->lock);
958 BTRFS_I(inode)->outstanding_extents++; 1009 BTRFS_I(inode)->outstanding_extents++;
959 spin_unlock(&BTRFS_I(inode)->lock); 1010 spin_unlock(&BTRFS_I(inode)->lock);
960 btrfs_delalloc_release_space(inode, 1011 btrfs_delalloc_release_space(inode,
961 (num_pages - i_done) << PAGE_CACHE_SHIFT); 1012 (page_cnt - i_done) << PAGE_CACHE_SHIFT);
962 } 1013 }
963 1014
964 1015
@@ -983,7 +1034,7 @@ out:
983 unlock_page(pages[i]); 1034 unlock_page(pages[i]);
984 page_cache_release(pages[i]); 1035 page_cache_release(pages[i]);
985 } 1036 }
986 btrfs_delalloc_release_space(inode, num_pages << PAGE_CACHE_SHIFT); 1037 btrfs_delalloc_release_space(inode, page_cnt << PAGE_CACHE_SHIFT);
987 return ret; 1038 return ret;
988 1039
989} 1040}
@@ -1089,12 +1140,9 @@ int btrfs_defrag_file(struct inode *inode, struct file *file,
1089 if (!(inode->i_sb->s_flags & MS_ACTIVE)) 1140 if (!(inode->i_sb->s_flags & MS_ACTIVE))
1090 break; 1141 break;
1091 1142
1092 if (!newer_than && 1143 if (!should_defrag_range(inode, (u64)i << PAGE_CACHE_SHIFT,
1093 !should_defrag_range(inode, (u64)i << PAGE_CACHE_SHIFT, 1144 PAGE_CACHE_SIZE, extent_thresh,
1094 PAGE_CACHE_SIZE, 1145 &last_len, &skip, &defrag_end)) {
1095 extent_thresh,
1096 &last_len, &skip,
1097 &defrag_end)) {
1098 unsigned long next; 1146 unsigned long next;
1099 /* 1147 /*
1100 * the should_defrag function tells us how much to skip 1148 * the should_defrag function tells us how much to skip
@@ -1123,17 +1171,24 @@ int btrfs_defrag_file(struct inode *inode, struct file *file,
1123 ra_index += max_cluster; 1171 ra_index += max_cluster;
1124 } 1172 }
1125 1173
1174 mutex_lock(&inode->i_mutex);
1126 ret = cluster_pages_for_defrag(inode, pages, i, cluster); 1175 ret = cluster_pages_for_defrag(inode, pages, i, cluster);
1127 if (ret < 0) 1176 if (ret < 0) {
1177 mutex_unlock(&inode->i_mutex);
1128 goto out_ra; 1178 goto out_ra;
1179 }
1129 1180
1130 defrag_count += ret; 1181 defrag_count += ret;
1131 balance_dirty_pages_ratelimited_nr(inode->i_mapping, ret); 1182 balance_dirty_pages_ratelimited_nr(inode->i_mapping, ret);
1183 mutex_unlock(&inode->i_mutex);
1132 1184
1133 if (newer_than) { 1185 if (newer_than) {
1134 if (newer_off == (u64)-1) 1186 if (newer_off == (u64)-1)
1135 break; 1187 break;
1136 1188
1189 if (ret > 0)
1190 i += ret;
1191
1137 newer_off = max(newer_off + 1, 1192 newer_off = max(newer_off + 1,
1138 (u64)i << PAGE_CACHE_SHIFT); 1193 (u64)i << PAGE_CACHE_SHIFT);
1139 1194
@@ -1966,7 +2021,11 @@ static noinline int btrfs_ioctl_snap_destroy(struct file *file,
1966 dest->root_key.objectid, 2021 dest->root_key.objectid,
1967 dentry->d_name.name, 2022 dentry->d_name.name,
1968 dentry->d_name.len); 2023 dentry->d_name.len);
1969 BUG_ON(ret); 2024 if (ret) {
2025 err = ret;
2026 btrfs_abort_transaction(trans, root, ret);
2027 goto out_end_trans;
2028 }
1970 2029
1971 btrfs_record_root_in_trans(trans, dest); 2030 btrfs_record_root_in_trans(trans, dest);
1972 2031
@@ -1979,11 +2038,16 @@ static noinline int btrfs_ioctl_snap_destroy(struct file *file,
1979 ret = btrfs_insert_orphan_item(trans, 2038 ret = btrfs_insert_orphan_item(trans,
1980 root->fs_info->tree_root, 2039 root->fs_info->tree_root,
1981 dest->root_key.objectid); 2040 dest->root_key.objectid);
1982 BUG_ON(ret); 2041 if (ret) {
2042 btrfs_abort_transaction(trans, root, ret);
2043 err = ret;
2044 goto out_end_trans;
2045 }
1983 } 2046 }
1984 2047out_end_trans:
1985 ret = btrfs_end_transaction(trans, root); 2048 ret = btrfs_end_transaction(trans, root);
1986 BUG_ON(ret); 2049 if (ret && !err)
2050 err = ret;
1987 inode->i_flags |= S_DEAD; 2051 inode->i_flags |= S_DEAD;
1988out_up_write: 2052out_up_write:
1989 up_write(&root->fs_info->subvol_sem); 2053 up_write(&root->fs_info->subvol_sem);
@@ -2326,13 +2390,13 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,
2326 another, and lock file content */ 2390 another, and lock file content */
2327 while (1) { 2391 while (1) {
2328 struct btrfs_ordered_extent *ordered; 2392 struct btrfs_ordered_extent *ordered;
2329 lock_extent(&BTRFS_I(src)->io_tree, off, off+len, GFP_NOFS); 2393 lock_extent(&BTRFS_I(src)->io_tree, off, off+len);
2330 ordered = btrfs_lookup_first_ordered_extent(src, off+len); 2394 ordered = btrfs_lookup_first_ordered_extent(src, off+len);
2331 if (!ordered && 2395 if (!ordered &&
2332 !test_range_bit(&BTRFS_I(src)->io_tree, off, off+len, 2396 !test_range_bit(&BTRFS_I(src)->io_tree, off, off+len,
2333 EXTENT_DELALLOC, 0, NULL)) 2397 EXTENT_DELALLOC, 0, NULL))
2334 break; 2398 break;
2335 unlock_extent(&BTRFS_I(src)->io_tree, off, off+len, GFP_NOFS); 2399 unlock_extent(&BTRFS_I(src)->io_tree, off, off+len);
2336 if (ordered) 2400 if (ordered)
2337 btrfs_put_ordered_extent(ordered); 2401 btrfs_put_ordered_extent(ordered);
2338 btrfs_wait_ordered_range(src, off, len); 2402 btrfs_wait_ordered_range(src, off, len);
@@ -2447,11 +2511,21 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,
2447 new_key.offset, 2511 new_key.offset,
2448 new_key.offset + datal, 2512 new_key.offset + datal,
2449 &hint_byte, 1); 2513 &hint_byte, 1);
2450 BUG_ON(ret); 2514 if (ret) {
2515 btrfs_abort_transaction(trans, root,
2516 ret);
2517 btrfs_end_transaction(trans, root);
2518 goto out;
2519 }
2451 2520
2452 ret = btrfs_insert_empty_item(trans, root, path, 2521 ret = btrfs_insert_empty_item(trans, root, path,
2453 &new_key, size); 2522 &new_key, size);
2454 BUG_ON(ret); 2523 if (ret) {
2524 btrfs_abort_transaction(trans, root,
2525 ret);
2526 btrfs_end_transaction(trans, root);
2527 goto out;
2528 }
2455 2529
2456 leaf = path->nodes[0]; 2530 leaf = path->nodes[0];
2457 slot = path->slots[0]; 2531 slot = path->slots[0];
@@ -2478,7 +2552,15 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,
2478 btrfs_ino(inode), 2552 btrfs_ino(inode),
2479 new_key.offset - datao, 2553 new_key.offset - datao,
2480 0); 2554 0);
2481 BUG_ON(ret); 2555 if (ret) {
2556 btrfs_abort_transaction(trans,
2557 root,
2558 ret);
2559 btrfs_end_transaction(trans,
2560 root);
2561 goto out;
2562
2563 }
2482 } 2564 }
2483 } else if (type == BTRFS_FILE_EXTENT_INLINE) { 2565 } else if (type == BTRFS_FILE_EXTENT_INLINE) {
2484 u64 skip = 0; 2566 u64 skip = 0;
@@ -2503,11 +2585,21 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,
2503 new_key.offset, 2585 new_key.offset,
2504 new_key.offset + datal, 2586 new_key.offset + datal,
2505 &hint_byte, 1); 2587 &hint_byte, 1);
2506 BUG_ON(ret); 2588 if (ret) {
2589 btrfs_abort_transaction(trans, root,
2590 ret);
2591 btrfs_end_transaction(trans, root);
2592 goto out;
2593 }
2507 2594
2508 ret = btrfs_insert_empty_item(trans, root, path, 2595 ret = btrfs_insert_empty_item(trans, root, path,
2509 &new_key, size); 2596 &new_key, size);
2510 BUG_ON(ret); 2597 if (ret) {
2598 btrfs_abort_transaction(trans, root,
2599 ret);
2600 btrfs_end_transaction(trans, root);
2601 goto out;
2602 }
2511 2603
2512 if (skip) { 2604 if (skip) {
2513 u32 start = 2605 u32 start =
@@ -2541,8 +2633,12 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,
2541 btrfs_i_size_write(inode, endoff); 2633 btrfs_i_size_write(inode, endoff);
2542 2634
2543 ret = btrfs_update_inode(trans, root, inode); 2635 ret = btrfs_update_inode(trans, root, inode);
2544 BUG_ON(ret); 2636 if (ret) {
2545 btrfs_end_transaction(trans, root); 2637 btrfs_abort_transaction(trans, root, ret);
2638 btrfs_end_transaction(trans, root);
2639 goto out;
2640 }
2641 ret = btrfs_end_transaction(trans, root);
2546 } 2642 }
2547next: 2643next:
2548 btrfs_release_path(path); 2644 btrfs_release_path(path);
@@ -2551,7 +2647,7 @@ next:
2551 ret = 0; 2647 ret = 0;
2552out: 2648out:
2553 btrfs_release_path(path); 2649 btrfs_release_path(path);
2554 unlock_extent(&BTRFS_I(src)->io_tree, off, off+len, GFP_NOFS); 2650 unlock_extent(&BTRFS_I(src)->io_tree, off, off+len);
2555out_unlock: 2651out_unlock:
2556 mutex_unlock(&src->i_mutex); 2652 mutex_unlock(&src->i_mutex);
2557 mutex_unlock(&inode->i_mutex); 2653 mutex_unlock(&inode->i_mutex);
@@ -3066,8 +3162,8 @@ static long btrfs_ioctl_logical_to_ino(struct btrfs_root *root,
3066 goto out; 3162 goto out;
3067 3163
3068 extent_item_pos = loi->logical - key.objectid; 3164 extent_item_pos = loi->logical - key.objectid;
3069 ret = iterate_extent_inodes(root->fs_info, path, key.objectid, 3165 ret = iterate_extent_inodes(root->fs_info, key.objectid,
3070 extent_item_pos, build_ino_list, 3166 extent_item_pos, 0, build_ino_list,
3071 inodes); 3167 inodes);
3072 3168
3073 if (ret < 0) 3169 if (ret < 0)
diff --git a/fs/btrfs/locking.c b/fs/btrfs/locking.c
index 5e178d8f7167..272f911203ff 100644
--- a/fs/btrfs/locking.c
+++ b/fs/btrfs/locking.c
@@ -208,7 +208,7 @@ void btrfs_tree_read_unlock_blocking(struct extent_buffer *eb)
208 * take a spinning write lock. This will wait for both 208 * take a spinning write lock. This will wait for both
209 * blocking readers or writers 209 * blocking readers or writers
210 */ 210 */
211int btrfs_tree_lock(struct extent_buffer *eb) 211void btrfs_tree_lock(struct extent_buffer *eb)
212{ 212{
213again: 213again:
214 wait_event(eb->read_lock_wq, atomic_read(&eb->blocking_readers) == 0); 214 wait_event(eb->read_lock_wq, atomic_read(&eb->blocking_readers) == 0);
@@ -230,13 +230,12 @@ again:
230 atomic_inc(&eb->spinning_writers); 230 atomic_inc(&eb->spinning_writers);
231 atomic_inc(&eb->write_locks); 231 atomic_inc(&eb->write_locks);
232 eb->lock_owner = current->pid; 232 eb->lock_owner = current->pid;
233 return 0;
234} 233}
235 234
236/* 235/*
237 * drop a spinning or a blocking write lock. 236 * drop a spinning or a blocking write lock.
238 */ 237 */
239int btrfs_tree_unlock(struct extent_buffer *eb) 238void btrfs_tree_unlock(struct extent_buffer *eb)
240{ 239{
241 int blockers = atomic_read(&eb->blocking_writers); 240 int blockers = atomic_read(&eb->blocking_writers);
242 241
@@ -255,7 +254,6 @@ int btrfs_tree_unlock(struct extent_buffer *eb)
255 atomic_dec(&eb->spinning_writers); 254 atomic_dec(&eb->spinning_writers);
256 write_unlock(&eb->lock); 255 write_unlock(&eb->lock);
257 } 256 }
258 return 0;
259} 257}
260 258
261void btrfs_assert_tree_locked(struct extent_buffer *eb) 259void btrfs_assert_tree_locked(struct extent_buffer *eb)
diff --git a/fs/btrfs/locking.h b/fs/btrfs/locking.h
index 17247ddb81a0..ca52681e5f40 100644
--- a/fs/btrfs/locking.h
+++ b/fs/btrfs/locking.h
@@ -24,8 +24,8 @@
24#define BTRFS_WRITE_LOCK_BLOCKING 3 24#define BTRFS_WRITE_LOCK_BLOCKING 3
25#define BTRFS_READ_LOCK_BLOCKING 4 25#define BTRFS_READ_LOCK_BLOCKING 4
26 26
27int btrfs_tree_lock(struct extent_buffer *eb); 27void btrfs_tree_lock(struct extent_buffer *eb);
28int btrfs_tree_unlock(struct extent_buffer *eb); 28void btrfs_tree_unlock(struct extent_buffer *eb);
29int btrfs_try_spin_lock(struct extent_buffer *eb); 29int btrfs_try_spin_lock(struct extent_buffer *eb);
30 30
31void btrfs_tree_read_lock(struct extent_buffer *eb); 31void btrfs_tree_read_lock(struct extent_buffer *eb);
diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c
index a1c940425307..bbf6d0d9aebe 100644
--- a/fs/btrfs/ordered-data.c
+++ b/fs/btrfs/ordered-data.c
@@ -59,6 +59,14 @@ static struct rb_node *tree_insert(struct rb_root *root, u64 file_offset,
59 return NULL; 59 return NULL;
60} 60}
61 61
62static void ordered_data_tree_panic(struct inode *inode, int errno,
63 u64 offset)
64{
65 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
66 btrfs_panic(fs_info, errno, "Inconsistency in ordered tree at offset "
67 "%llu\n", (unsigned long long)offset);
68}
69
62/* 70/*
63 * look for a given offset in the tree, and if it can't be found return the 71 * look for a given offset in the tree, and if it can't be found return the
64 * first lesser offset 72 * first lesser offset
@@ -207,7 +215,8 @@ static int __btrfs_add_ordered_extent(struct inode *inode, u64 file_offset,
207 spin_lock(&tree->lock); 215 spin_lock(&tree->lock);
208 node = tree_insert(&tree->tree, file_offset, 216 node = tree_insert(&tree->tree, file_offset,
209 &entry->rb_node); 217 &entry->rb_node);
210 BUG_ON(node); 218 if (node)
219 ordered_data_tree_panic(inode, -EEXIST, file_offset);
211 spin_unlock(&tree->lock); 220 spin_unlock(&tree->lock);
212 221
213 spin_lock(&BTRFS_I(inode)->root->fs_info->ordered_extent_lock); 222 spin_lock(&BTRFS_I(inode)->root->fs_info->ordered_extent_lock);
@@ -215,7 +224,6 @@ static int __btrfs_add_ordered_extent(struct inode *inode, u64 file_offset,
215 &BTRFS_I(inode)->root->fs_info->ordered_extents); 224 &BTRFS_I(inode)->root->fs_info->ordered_extents);
216 spin_unlock(&BTRFS_I(inode)->root->fs_info->ordered_extent_lock); 225 spin_unlock(&BTRFS_I(inode)->root->fs_info->ordered_extent_lock);
217 226
218 BUG_ON(node);
219 return 0; 227 return 0;
220} 228}
221 229
@@ -249,9 +257,9 @@ int btrfs_add_ordered_extent_compress(struct inode *inode, u64 file_offset,
249 * when an ordered extent is finished. If the list covers more than one 257 * when an ordered extent is finished. If the list covers more than one
250 * ordered extent, it is split across multiples. 258 * ordered extent, it is split across multiples.
251 */ 259 */
252int btrfs_add_ordered_sum(struct inode *inode, 260void btrfs_add_ordered_sum(struct inode *inode,
253 struct btrfs_ordered_extent *entry, 261 struct btrfs_ordered_extent *entry,
254 struct btrfs_ordered_sum *sum) 262 struct btrfs_ordered_sum *sum)
255{ 263{
256 struct btrfs_ordered_inode_tree *tree; 264 struct btrfs_ordered_inode_tree *tree;
257 265
@@ -259,7 +267,6 @@ int btrfs_add_ordered_sum(struct inode *inode,
259 spin_lock(&tree->lock); 267 spin_lock(&tree->lock);
260 list_add_tail(&sum->list, &entry->list); 268 list_add_tail(&sum->list, &entry->list);
261 spin_unlock(&tree->lock); 269 spin_unlock(&tree->lock);
262 return 0;
263} 270}
264 271
265/* 272/*
@@ -384,7 +391,7 @@ out:
384 * used to drop a reference on an ordered extent. This will free 391 * used to drop a reference on an ordered extent. This will free
385 * the extent if the last reference is dropped 392 * the extent if the last reference is dropped
386 */ 393 */
387int btrfs_put_ordered_extent(struct btrfs_ordered_extent *entry) 394void btrfs_put_ordered_extent(struct btrfs_ordered_extent *entry)
388{ 395{
389 struct list_head *cur; 396 struct list_head *cur;
390 struct btrfs_ordered_sum *sum; 397 struct btrfs_ordered_sum *sum;
@@ -400,7 +407,6 @@ int btrfs_put_ordered_extent(struct btrfs_ordered_extent *entry)
400 } 407 }
401 kfree(entry); 408 kfree(entry);
402 } 409 }
403 return 0;
404} 410}
405 411
406/* 412/*
@@ -408,8 +414,8 @@ int btrfs_put_ordered_extent(struct btrfs_ordered_extent *entry)
408 * and you must wake_up entry->wait. You must hold the tree lock 414 * and you must wake_up entry->wait. You must hold the tree lock
409 * while you call this function. 415 * while you call this function.
410 */ 416 */
411static int __btrfs_remove_ordered_extent(struct inode *inode, 417static void __btrfs_remove_ordered_extent(struct inode *inode,
412 struct btrfs_ordered_extent *entry) 418 struct btrfs_ordered_extent *entry)
413{ 419{
414 struct btrfs_ordered_inode_tree *tree; 420 struct btrfs_ordered_inode_tree *tree;
415 struct btrfs_root *root = BTRFS_I(inode)->root; 421 struct btrfs_root *root = BTRFS_I(inode)->root;
@@ -436,35 +442,30 @@ static int __btrfs_remove_ordered_extent(struct inode *inode,
436 list_del_init(&BTRFS_I(inode)->ordered_operations); 442 list_del_init(&BTRFS_I(inode)->ordered_operations);
437 } 443 }
438 spin_unlock(&root->fs_info->ordered_extent_lock); 444 spin_unlock(&root->fs_info->ordered_extent_lock);
439
440 return 0;
441} 445}
442 446
443/* 447/*
444 * remove an ordered extent from the tree. No references are dropped 448 * remove an ordered extent from the tree. No references are dropped
445 * but any waiters are woken. 449 * but any waiters are woken.
446 */ 450 */
447int btrfs_remove_ordered_extent(struct inode *inode, 451void btrfs_remove_ordered_extent(struct inode *inode,
448 struct btrfs_ordered_extent *entry) 452 struct btrfs_ordered_extent *entry)
449{ 453{
450 struct btrfs_ordered_inode_tree *tree; 454 struct btrfs_ordered_inode_tree *tree;
451 int ret;
452 455
453 tree = &BTRFS_I(inode)->ordered_tree; 456 tree = &BTRFS_I(inode)->ordered_tree;
454 spin_lock(&tree->lock); 457 spin_lock(&tree->lock);
455 ret = __btrfs_remove_ordered_extent(inode, entry); 458 __btrfs_remove_ordered_extent(inode, entry);
456 spin_unlock(&tree->lock); 459 spin_unlock(&tree->lock);
457 wake_up(&entry->wait); 460 wake_up(&entry->wait);
458
459 return ret;
460} 461}
461 462
462/* 463/*
463 * wait for all the ordered extents in a root. This is done when balancing 464 * wait for all the ordered extents in a root. This is done when balancing
464 * space between drives. 465 * space between drives.
465 */ 466 */
466int btrfs_wait_ordered_extents(struct btrfs_root *root, 467void btrfs_wait_ordered_extents(struct btrfs_root *root,
467 int nocow_only, int delay_iput) 468 int nocow_only, int delay_iput)
468{ 469{
469 struct list_head splice; 470 struct list_head splice;
470 struct list_head *cur; 471 struct list_head *cur;
@@ -512,7 +513,6 @@ int btrfs_wait_ordered_extents(struct btrfs_root *root,
512 spin_lock(&root->fs_info->ordered_extent_lock); 513 spin_lock(&root->fs_info->ordered_extent_lock);
513 } 514 }
514 spin_unlock(&root->fs_info->ordered_extent_lock); 515 spin_unlock(&root->fs_info->ordered_extent_lock);
515 return 0;
516} 516}
517 517
518/* 518/*
@@ -525,7 +525,7 @@ int btrfs_wait_ordered_extents(struct btrfs_root *root,
525 * extra check to make sure the ordered operation list really is empty 525 * extra check to make sure the ordered operation list really is empty
526 * before we return 526 * before we return
527 */ 527 */
528int btrfs_run_ordered_operations(struct btrfs_root *root, int wait) 528void btrfs_run_ordered_operations(struct btrfs_root *root, int wait)
529{ 529{
530 struct btrfs_inode *btrfs_inode; 530 struct btrfs_inode *btrfs_inode;
531 struct inode *inode; 531 struct inode *inode;
@@ -573,8 +573,6 @@ again:
573 573
574 spin_unlock(&root->fs_info->ordered_extent_lock); 574 spin_unlock(&root->fs_info->ordered_extent_lock);
575 mutex_unlock(&root->fs_info->ordered_operations_mutex); 575 mutex_unlock(&root->fs_info->ordered_operations_mutex);
576
577 return 0;
578} 576}
579 577
580/* 578/*
@@ -609,7 +607,7 @@ void btrfs_start_ordered_extent(struct inode *inode,
609/* 607/*
610 * Used to wait on ordered extents across a large range of bytes. 608 * Used to wait on ordered extents across a large range of bytes.
611 */ 609 */
612int btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len) 610void btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len)
613{ 611{
614 u64 end; 612 u64 end;
615 u64 orig_end; 613 u64 orig_end;
@@ -664,7 +662,6 @@ again:
664 schedule_timeout(1); 662 schedule_timeout(1);
665 goto again; 663 goto again;
666 } 664 }
667 return 0;
668} 665}
669 666
670/* 667/*
@@ -948,9 +945,8 @@ out:
948 * If trans is not null, we'll do a friendly check for a transaction that 945 * If trans is not null, we'll do a friendly check for a transaction that
949 * is already flushing things and force the IO down ourselves. 946 * is already flushing things and force the IO down ourselves.
950 */ 947 */
951int btrfs_add_ordered_operation(struct btrfs_trans_handle *trans, 948void btrfs_add_ordered_operation(struct btrfs_trans_handle *trans,
952 struct btrfs_root *root, 949 struct btrfs_root *root, struct inode *inode)
953 struct inode *inode)
954{ 950{
955 u64 last_mod; 951 u64 last_mod;
956 952
@@ -961,7 +957,7 @@ int btrfs_add_ordered_operation(struct btrfs_trans_handle *trans,
961 * commit, we can safely return without doing anything 957 * commit, we can safely return without doing anything
962 */ 958 */
963 if (last_mod < root->fs_info->last_trans_committed) 959 if (last_mod < root->fs_info->last_trans_committed)
964 return 0; 960 return;
965 961
966 /* 962 /*
967 * the transaction is already committing. Just start the IO and 963 * the transaction is already committing. Just start the IO and
@@ -969,7 +965,7 @@ int btrfs_add_ordered_operation(struct btrfs_trans_handle *trans,
969 */ 965 */
970 if (trans && root->fs_info->running_transaction->blocked) { 966 if (trans && root->fs_info->running_transaction->blocked) {
971 btrfs_wait_ordered_range(inode, 0, (u64)-1); 967 btrfs_wait_ordered_range(inode, 0, (u64)-1);
972 return 0; 968 return;
973 } 969 }
974 970
975 spin_lock(&root->fs_info->ordered_extent_lock); 971 spin_lock(&root->fs_info->ordered_extent_lock);
@@ -978,6 +974,4 @@ int btrfs_add_ordered_operation(struct btrfs_trans_handle *trans,
978 &root->fs_info->ordered_operations); 974 &root->fs_info->ordered_operations);
979 } 975 }
980 spin_unlock(&root->fs_info->ordered_extent_lock); 976 spin_unlock(&root->fs_info->ordered_extent_lock);
981
982 return 0;
983} 977}
diff --git a/fs/btrfs/ordered-data.h b/fs/btrfs/ordered-data.h
index ff1f69aa1883..c355ad4dc1a6 100644
--- a/fs/btrfs/ordered-data.h
+++ b/fs/btrfs/ordered-data.h
@@ -138,8 +138,8 @@ btrfs_ordered_inode_tree_init(struct btrfs_ordered_inode_tree *t)
138 t->last = NULL; 138 t->last = NULL;
139} 139}
140 140
141int btrfs_put_ordered_extent(struct btrfs_ordered_extent *entry); 141void btrfs_put_ordered_extent(struct btrfs_ordered_extent *entry);
142int btrfs_remove_ordered_extent(struct inode *inode, 142void btrfs_remove_ordered_extent(struct inode *inode,
143 struct btrfs_ordered_extent *entry); 143 struct btrfs_ordered_extent *entry);
144int btrfs_dec_test_ordered_pending(struct inode *inode, 144int btrfs_dec_test_ordered_pending(struct inode *inode,
145 struct btrfs_ordered_extent **cached, 145 struct btrfs_ordered_extent **cached,
@@ -154,14 +154,14 @@ int btrfs_add_ordered_extent_dio(struct inode *inode, u64 file_offset,
154int btrfs_add_ordered_extent_compress(struct inode *inode, u64 file_offset, 154int btrfs_add_ordered_extent_compress(struct inode *inode, u64 file_offset,
155 u64 start, u64 len, u64 disk_len, 155 u64 start, u64 len, u64 disk_len,
156 int type, int compress_type); 156 int type, int compress_type);
157int btrfs_add_ordered_sum(struct inode *inode, 157void btrfs_add_ordered_sum(struct inode *inode,
158 struct btrfs_ordered_extent *entry, 158 struct btrfs_ordered_extent *entry,
159 struct btrfs_ordered_sum *sum); 159 struct btrfs_ordered_sum *sum);
160struct btrfs_ordered_extent *btrfs_lookup_ordered_extent(struct inode *inode, 160struct btrfs_ordered_extent *btrfs_lookup_ordered_extent(struct inode *inode,
161 u64 file_offset); 161 u64 file_offset);
162void btrfs_start_ordered_extent(struct inode *inode, 162void btrfs_start_ordered_extent(struct inode *inode,
163 struct btrfs_ordered_extent *entry, int wait); 163 struct btrfs_ordered_extent *entry, int wait);
164int btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len); 164void btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len);
165struct btrfs_ordered_extent * 165struct btrfs_ordered_extent *
166btrfs_lookup_first_ordered_extent(struct inode * inode, u64 file_offset); 166btrfs_lookup_first_ordered_extent(struct inode * inode, u64 file_offset);
167struct btrfs_ordered_extent *btrfs_lookup_ordered_range(struct inode *inode, 167struct btrfs_ordered_extent *btrfs_lookup_ordered_range(struct inode *inode,
@@ -170,10 +170,10 @@ struct btrfs_ordered_extent *btrfs_lookup_ordered_range(struct inode *inode,
170int btrfs_ordered_update_i_size(struct inode *inode, u64 offset, 170int btrfs_ordered_update_i_size(struct inode *inode, u64 offset,
171 struct btrfs_ordered_extent *ordered); 171 struct btrfs_ordered_extent *ordered);
172int btrfs_find_ordered_sum(struct inode *inode, u64 offset, u64 disk_bytenr, u32 *sum); 172int btrfs_find_ordered_sum(struct inode *inode, u64 offset, u64 disk_bytenr, u32 *sum);
173int btrfs_run_ordered_operations(struct btrfs_root *root, int wait); 173void btrfs_run_ordered_operations(struct btrfs_root *root, int wait);
174int btrfs_add_ordered_operation(struct btrfs_trans_handle *trans, 174void btrfs_add_ordered_operation(struct btrfs_trans_handle *trans,
175 struct btrfs_root *root, 175 struct btrfs_root *root,
176 struct inode *inode); 176 struct inode *inode);
177int btrfs_wait_ordered_extents(struct btrfs_root *root, 177void btrfs_wait_ordered_extents(struct btrfs_root *root,
178 int nocow_only, int delay_iput); 178 int nocow_only, int delay_iput);
179#endif 179#endif
diff --git a/fs/btrfs/orphan.c b/fs/btrfs/orphan.c
index f8be250963a0..24cad1695af7 100644
--- a/fs/btrfs/orphan.c
+++ b/fs/btrfs/orphan.c
@@ -58,7 +58,7 @@ int btrfs_del_orphan_item(struct btrfs_trans_handle *trans,
58 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 58 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
59 if (ret < 0) 59 if (ret < 0)
60 goto out; 60 goto out;
61 if (ret) { 61 if (ret) { /* JDM: Really? */
62 ret = -ENOENT; 62 ret = -ENOENT;
63 goto out; 63 goto out;
64 } 64 }
diff --git a/fs/btrfs/reada.c b/fs/btrfs/reada.c
index 22db04550f6a..dc5d33146fdb 100644
--- a/fs/btrfs/reada.c
+++ b/fs/btrfs/reada.c
@@ -54,7 +54,6 @@
54 * than the 2 started one after another. 54 * than the 2 started one after another.
55 */ 55 */
56 56
57#define MAX_MIRRORS 2
58#define MAX_IN_FLIGHT 6 57#define MAX_IN_FLIGHT 6
59 58
60struct reada_extctl { 59struct reada_extctl {
@@ -71,7 +70,7 @@ struct reada_extent {
71 struct list_head extctl; 70 struct list_head extctl;
72 struct kref refcnt; 71 struct kref refcnt;
73 spinlock_t lock; 72 spinlock_t lock;
74 struct reada_zone *zones[MAX_MIRRORS]; 73 struct reada_zone *zones[BTRFS_MAX_MIRRORS];
75 int nzones; 74 int nzones;
76 struct btrfs_device *scheduled_for; 75 struct btrfs_device *scheduled_for;
77}; 76};
@@ -84,7 +83,8 @@ struct reada_zone {
84 spinlock_t lock; 83 spinlock_t lock;
85 int locked; 84 int locked;
86 struct btrfs_device *device; 85 struct btrfs_device *device;
87 struct btrfs_device *devs[MAX_MIRRORS]; /* full list, incl self */ 86 struct btrfs_device *devs[BTRFS_MAX_MIRRORS]; /* full list, incl
87 * self */
88 int ndevs; 88 int ndevs;
89 struct kref refcnt; 89 struct kref refcnt;
90}; 90};
@@ -365,9 +365,9 @@ again:
365 if (ret || !bbio || length < blocksize) 365 if (ret || !bbio || length < blocksize)
366 goto error; 366 goto error;
367 367
368 if (bbio->num_stripes > MAX_MIRRORS) { 368 if (bbio->num_stripes > BTRFS_MAX_MIRRORS) {
369 printk(KERN_ERR "btrfs readahead: more than %d copies not " 369 printk(KERN_ERR "btrfs readahead: more than %d copies not "
370 "supported", MAX_MIRRORS); 370 "supported", BTRFS_MAX_MIRRORS);
371 goto error; 371 goto error;
372 } 372 }
373 373
diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
index 8c1aae2c845d..017281dbb2a7 100644
--- a/fs/btrfs/relocation.c
+++ b/fs/btrfs/relocation.c
@@ -326,6 +326,19 @@ static struct rb_node *tree_search(struct rb_root *root, u64 bytenr)
326 return NULL; 326 return NULL;
327} 327}
328 328
329void backref_tree_panic(struct rb_node *rb_node, int errno,
330 u64 bytenr)
331{
332
333 struct btrfs_fs_info *fs_info = NULL;
334 struct backref_node *bnode = rb_entry(rb_node, struct backref_node,
335 rb_node);
336 if (bnode->root)
337 fs_info = bnode->root->fs_info;
338 btrfs_panic(fs_info, errno, "Inconsistency in backref cache "
339 "found at offset %llu\n", (unsigned long long)bytenr);
340}
341
329/* 342/*
330 * walk up backref nodes until reach node presents tree root 343 * walk up backref nodes until reach node presents tree root
331 */ 344 */
@@ -452,7 +465,8 @@ static void update_backref_node(struct backref_cache *cache,
452 rb_erase(&node->rb_node, &cache->rb_root); 465 rb_erase(&node->rb_node, &cache->rb_root);
453 node->bytenr = bytenr; 466 node->bytenr = bytenr;
454 rb_node = tree_insert(&cache->rb_root, node->bytenr, &node->rb_node); 467 rb_node = tree_insert(&cache->rb_root, node->bytenr, &node->rb_node);
455 BUG_ON(rb_node); 468 if (rb_node)
469 backref_tree_panic(rb_node, -EEXIST, bytenr);
456} 470}
457 471
458/* 472/*
@@ -999,7 +1013,8 @@ next:
999 if (!cowonly) { 1013 if (!cowonly) {
1000 rb_node = tree_insert(&cache->rb_root, node->bytenr, 1014 rb_node = tree_insert(&cache->rb_root, node->bytenr,
1001 &node->rb_node); 1015 &node->rb_node);
1002 BUG_ON(rb_node); 1016 if (rb_node)
1017 backref_tree_panic(rb_node, -EEXIST, node->bytenr);
1003 list_add_tail(&node->lower, &cache->leaves); 1018 list_add_tail(&node->lower, &cache->leaves);
1004 } 1019 }
1005 1020
@@ -1034,7 +1049,9 @@ next:
1034 if (!cowonly) { 1049 if (!cowonly) {
1035 rb_node = tree_insert(&cache->rb_root, upper->bytenr, 1050 rb_node = tree_insert(&cache->rb_root, upper->bytenr,
1036 &upper->rb_node); 1051 &upper->rb_node);
1037 BUG_ON(rb_node); 1052 if (rb_node)
1053 backref_tree_panic(rb_node, -EEXIST,
1054 upper->bytenr);
1038 } 1055 }
1039 1056
1040 list_add_tail(&edge->list[UPPER], &upper->lower); 1057 list_add_tail(&edge->list[UPPER], &upper->lower);
@@ -1180,7 +1197,8 @@ static int clone_backref_node(struct btrfs_trans_handle *trans,
1180 1197
1181 rb_node = tree_insert(&cache->rb_root, new_node->bytenr, 1198 rb_node = tree_insert(&cache->rb_root, new_node->bytenr,
1182 &new_node->rb_node); 1199 &new_node->rb_node);
1183 BUG_ON(rb_node); 1200 if (rb_node)
1201 backref_tree_panic(rb_node, -EEXIST, new_node->bytenr);
1184 1202
1185 if (!new_node->lowest) { 1203 if (!new_node->lowest) {
1186 list_for_each_entry(new_edge, &new_node->lower, list[UPPER]) { 1204 list_for_each_entry(new_edge, &new_node->lower, list[UPPER]) {
@@ -1203,14 +1221,15 @@ fail:
1203/* 1221/*
1204 * helper to add 'address of tree root -> reloc tree' mapping 1222 * helper to add 'address of tree root -> reloc tree' mapping
1205 */ 1223 */
1206static int __add_reloc_root(struct btrfs_root *root) 1224static int __must_check __add_reloc_root(struct btrfs_root *root)
1207{ 1225{
1208 struct rb_node *rb_node; 1226 struct rb_node *rb_node;
1209 struct mapping_node *node; 1227 struct mapping_node *node;
1210 struct reloc_control *rc = root->fs_info->reloc_ctl; 1228 struct reloc_control *rc = root->fs_info->reloc_ctl;
1211 1229
1212 node = kmalloc(sizeof(*node), GFP_NOFS); 1230 node = kmalloc(sizeof(*node), GFP_NOFS);
1213 BUG_ON(!node); 1231 if (!node)
1232 return -ENOMEM;
1214 1233
1215 node->bytenr = root->node->start; 1234 node->bytenr = root->node->start;
1216 node->data = root; 1235 node->data = root;
@@ -1219,7 +1238,12 @@ static int __add_reloc_root(struct btrfs_root *root)
1219 rb_node = tree_insert(&rc->reloc_root_tree.rb_root, 1238 rb_node = tree_insert(&rc->reloc_root_tree.rb_root,
1220 node->bytenr, &node->rb_node); 1239 node->bytenr, &node->rb_node);
1221 spin_unlock(&rc->reloc_root_tree.lock); 1240 spin_unlock(&rc->reloc_root_tree.lock);
1222 BUG_ON(rb_node); 1241 if (rb_node) {
1242 kfree(node);
1243 btrfs_panic(root->fs_info, -EEXIST, "Duplicate root found "
1244 "for start=%llu while inserting into relocation "
1245 "tree\n");
1246 }
1223 1247
1224 list_add_tail(&root->root_list, &rc->reloc_roots); 1248 list_add_tail(&root->root_list, &rc->reloc_roots);
1225 return 0; 1249 return 0;
@@ -1252,7 +1276,8 @@ static int __update_reloc_root(struct btrfs_root *root, int del)
1252 rb_node = tree_insert(&rc->reloc_root_tree.rb_root, 1276 rb_node = tree_insert(&rc->reloc_root_tree.rb_root,
1253 node->bytenr, &node->rb_node); 1277 node->bytenr, &node->rb_node);
1254 spin_unlock(&rc->reloc_root_tree.lock); 1278 spin_unlock(&rc->reloc_root_tree.lock);
1255 BUG_ON(rb_node); 1279 if (rb_node)
1280 backref_tree_panic(rb_node, -EEXIST, node->bytenr);
1256 } else { 1281 } else {
1257 list_del_init(&root->root_list); 1282 list_del_init(&root->root_list);
1258 kfree(node); 1283 kfree(node);
@@ -1334,6 +1359,7 @@ int btrfs_init_reloc_root(struct btrfs_trans_handle *trans,
1334 struct btrfs_root *reloc_root; 1359 struct btrfs_root *reloc_root;
1335 struct reloc_control *rc = root->fs_info->reloc_ctl; 1360 struct reloc_control *rc = root->fs_info->reloc_ctl;
1336 int clear_rsv = 0; 1361 int clear_rsv = 0;
1362 int ret;
1337 1363
1338 if (root->reloc_root) { 1364 if (root->reloc_root) {
1339 reloc_root = root->reloc_root; 1365 reloc_root = root->reloc_root;
@@ -1353,7 +1379,8 @@ int btrfs_init_reloc_root(struct btrfs_trans_handle *trans,
1353 if (clear_rsv) 1379 if (clear_rsv)
1354 trans->block_rsv = NULL; 1380 trans->block_rsv = NULL;
1355 1381
1356 __add_reloc_root(reloc_root); 1382 ret = __add_reloc_root(reloc_root);
1383 BUG_ON(ret < 0);
1357 root->reloc_root = reloc_root; 1384 root->reloc_root = reloc_root;
1358 return 0; 1385 return 0;
1359} 1386}
@@ -1577,15 +1604,14 @@ int replace_file_extents(struct btrfs_trans_handle *trans,
1577 WARN_ON(!IS_ALIGNED(end, root->sectorsize)); 1604 WARN_ON(!IS_ALIGNED(end, root->sectorsize));
1578 end--; 1605 end--;
1579 ret = try_lock_extent(&BTRFS_I(inode)->io_tree, 1606 ret = try_lock_extent(&BTRFS_I(inode)->io_tree,
1580 key.offset, end, 1607 key.offset, end);
1581 GFP_NOFS);
1582 if (!ret) 1608 if (!ret)
1583 continue; 1609 continue;
1584 1610
1585 btrfs_drop_extent_cache(inode, key.offset, end, 1611 btrfs_drop_extent_cache(inode, key.offset, end,
1586 1); 1612 1);
1587 unlock_extent(&BTRFS_I(inode)->io_tree, 1613 unlock_extent(&BTRFS_I(inode)->io_tree,
1588 key.offset, end, GFP_NOFS); 1614 key.offset, end);
1589 } 1615 }
1590 } 1616 }
1591 1617
@@ -1956,9 +1982,9 @@ static int invalidate_extent_cache(struct btrfs_root *root,
1956 } 1982 }
1957 1983
1958 /* the lock_extent waits for readpage to complete */ 1984 /* the lock_extent waits for readpage to complete */
1959 lock_extent(&BTRFS_I(inode)->io_tree, start, end, GFP_NOFS); 1985 lock_extent(&BTRFS_I(inode)->io_tree, start, end);
1960 btrfs_drop_extent_cache(inode, start, end, 1); 1986 btrfs_drop_extent_cache(inode, start, end, 1);
1961 unlock_extent(&BTRFS_I(inode)->io_tree, start, end, GFP_NOFS); 1987 unlock_extent(&BTRFS_I(inode)->io_tree, start, end);
1962 } 1988 }
1963 return 0; 1989 return 0;
1964} 1990}
@@ -2246,7 +2272,8 @@ again:
2246 } else { 2272 } else {
2247 list_del_init(&reloc_root->root_list); 2273 list_del_init(&reloc_root->root_list);
2248 } 2274 }
2249 btrfs_drop_snapshot(reloc_root, rc->block_rsv, 0, 1); 2275 ret = btrfs_drop_snapshot(reloc_root, rc->block_rsv, 0, 1);
2276 BUG_ON(ret < 0);
2250 } 2277 }
2251 2278
2252 if (found) { 2279 if (found) {
@@ -2862,12 +2889,12 @@ int prealloc_file_extent_cluster(struct inode *inode,
2862 else 2889 else
2863 end = cluster->end - offset; 2890 end = cluster->end - offset;
2864 2891
2865 lock_extent(&BTRFS_I(inode)->io_tree, start, end, GFP_NOFS); 2892 lock_extent(&BTRFS_I(inode)->io_tree, start, end);
2866 num_bytes = end + 1 - start; 2893 num_bytes = end + 1 - start;
2867 ret = btrfs_prealloc_file_range(inode, 0, start, 2894 ret = btrfs_prealloc_file_range(inode, 0, start,
2868 num_bytes, num_bytes, 2895 num_bytes, num_bytes,
2869 end + 1, &alloc_hint); 2896 end + 1, &alloc_hint);
2870 unlock_extent(&BTRFS_I(inode)->io_tree, start, end, GFP_NOFS); 2897 unlock_extent(&BTRFS_I(inode)->io_tree, start, end);
2871 if (ret) 2898 if (ret)
2872 break; 2899 break;
2873 nr++; 2900 nr++;
@@ -2899,7 +2926,7 @@ int setup_extent_mapping(struct inode *inode, u64 start, u64 end,
2899 em->bdev = root->fs_info->fs_devices->latest_bdev; 2926 em->bdev = root->fs_info->fs_devices->latest_bdev;
2900 set_bit(EXTENT_FLAG_PINNED, &em->flags); 2927 set_bit(EXTENT_FLAG_PINNED, &em->flags);
2901 2928
2902 lock_extent(&BTRFS_I(inode)->io_tree, start, end, GFP_NOFS); 2929 lock_extent(&BTRFS_I(inode)->io_tree, start, end);
2903 while (1) { 2930 while (1) {
2904 write_lock(&em_tree->lock); 2931 write_lock(&em_tree->lock);
2905 ret = add_extent_mapping(em_tree, em); 2932 ret = add_extent_mapping(em_tree, em);
@@ -2910,7 +2937,7 @@ int setup_extent_mapping(struct inode *inode, u64 start, u64 end,
2910 } 2937 }
2911 btrfs_drop_extent_cache(inode, start, end, 0); 2938 btrfs_drop_extent_cache(inode, start, end, 0);
2912 } 2939 }
2913 unlock_extent(&BTRFS_I(inode)->io_tree, start, end, GFP_NOFS); 2940 unlock_extent(&BTRFS_I(inode)->io_tree, start, end);
2914 return ret; 2941 return ret;
2915} 2942}
2916 2943
@@ -2990,8 +3017,7 @@ static int relocate_file_extent_cluster(struct inode *inode,
2990 page_start = (u64)page->index << PAGE_CACHE_SHIFT; 3017 page_start = (u64)page->index << PAGE_CACHE_SHIFT;
2991 page_end = page_start + PAGE_CACHE_SIZE - 1; 3018 page_end = page_start + PAGE_CACHE_SIZE - 1;
2992 3019
2993 lock_extent(&BTRFS_I(inode)->io_tree, 3020 lock_extent(&BTRFS_I(inode)->io_tree, page_start, page_end);
2994 page_start, page_end, GFP_NOFS);
2995 3021
2996 set_page_extent_mapped(page); 3022 set_page_extent_mapped(page);
2997 3023
@@ -3007,7 +3033,7 @@ static int relocate_file_extent_cluster(struct inode *inode,
3007 set_page_dirty(page); 3033 set_page_dirty(page);
3008 3034
3009 unlock_extent(&BTRFS_I(inode)->io_tree, 3035 unlock_extent(&BTRFS_I(inode)->io_tree,
3010 page_start, page_end, GFP_NOFS); 3036 page_start, page_end);
3011 unlock_page(page); 3037 unlock_page(page);
3012 page_cache_release(page); 3038 page_cache_release(page);
3013 3039
@@ -3154,7 +3180,8 @@ static int add_tree_block(struct reloc_control *rc,
3154 block->key_ready = 0; 3180 block->key_ready = 0;
3155 3181
3156 rb_node = tree_insert(blocks, block->bytenr, &block->rb_node); 3182 rb_node = tree_insert(blocks, block->bytenr, &block->rb_node);
3157 BUG_ON(rb_node); 3183 if (rb_node)
3184 backref_tree_panic(rb_node, -EEXIST, block->bytenr);
3158 3185
3159 return 0; 3186 return 0;
3160} 3187}
@@ -3426,7 +3453,9 @@ static int find_data_references(struct reloc_control *rc,
3426 block->key_ready = 1; 3453 block->key_ready = 1;
3427 rb_node = tree_insert(blocks, block->bytenr, 3454 rb_node = tree_insert(blocks, block->bytenr,
3428 &block->rb_node); 3455 &block->rb_node);
3429 BUG_ON(rb_node); 3456 if (rb_node)
3457 backref_tree_panic(rb_node, -EEXIST,
3458 block->bytenr);
3430 } 3459 }
3431 if (counted) 3460 if (counted)
3432 added = 1; 3461 added = 1;
@@ -4073,10 +4102,11 @@ out:
4073static noinline_for_stack int mark_garbage_root(struct btrfs_root *root) 4102static noinline_for_stack int mark_garbage_root(struct btrfs_root *root)
4074{ 4103{
4075 struct btrfs_trans_handle *trans; 4104 struct btrfs_trans_handle *trans;
4076 int ret; 4105 int ret, err;
4077 4106
4078 trans = btrfs_start_transaction(root->fs_info->tree_root, 0); 4107 trans = btrfs_start_transaction(root->fs_info->tree_root, 0);
4079 BUG_ON(IS_ERR(trans)); 4108 if (IS_ERR(trans))
4109 return PTR_ERR(trans);
4080 4110
4081 memset(&root->root_item.drop_progress, 0, 4111 memset(&root->root_item.drop_progress, 0,
4082 sizeof(root->root_item.drop_progress)); 4112 sizeof(root->root_item.drop_progress));
@@ -4084,11 +4114,11 @@ static noinline_for_stack int mark_garbage_root(struct btrfs_root *root)
4084 btrfs_set_root_refs(&root->root_item, 0); 4114 btrfs_set_root_refs(&root->root_item, 0);
4085 ret = btrfs_update_root(trans, root->fs_info->tree_root, 4115 ret = btrfs_update_root(trans, root->fs_info->tree_root,
4086 &root->root_key, &root->root_item); 4116 &root->root_key, &root->root_item);
4087 BUG_ON(ret);
4088 4117
4089 ret = btrfs_end_transaction(trans, root->fs_info->tree_root); 4118 err = btrfs_end_transaction(trans, root->fs_info->tree_root);
4090 BUG_ON(ret); 4119 if (err)
4091 return 0; 4120 return err;
4121 return ret;
4092} 4122}
4093 4123
4094/* 4124/*
@@ -4156,7 +4186,11 @@ int btrfs_recover_relocation(struct btrfs_root *root)
4156 err = ret; 4186 err = ret;
4157 goto out; 4187 goto out;
4158 } 4188 }
4159 mark_garbage_root(reloc_root); 4189 ret = mark_garbage_root(reloc_root);
4190 if (ret < 0) {
4191 err = ret;
4192 goto out;
4193 }
4160 } 4194 }
4161 } 4195 }
4162 4196
@@ -4202,13 +4236,19 @@ int btrfs_recover_relocation(struct btrfs_root *root)
4202 4236
4203 fs_root = read_fs_root(root->fs_info, 4237 fs_root = read_fs_root(root->fs_info,
4204 reloc_root->root_key.offset); 4238 reloc_root->root_key.offset);
4205 BUG_ON(IS_ERR(fs_root)); 4239 if (IS_ERR(fs_root)) {
4240 err = PTR_ERR(fs_root);
4241 goto out_free;
4242 }
4206 4243
4207 __add_reloc_root(reloc_root); 4244 err = __add_reloc_root(reloc_root);
4245 BUG_ON(err < 0); /* -ENOMEM or logic error */
4208 fs_root->reloc_root = reloc_root; 4246 fs_root->reloc_root = reloc_root;
4209 } 4247 }
4210 4248
4211 btrfs_commit_transaction(trans, rc->extent_root); 4249 err = btrfs_commit_transaction(trans, rc->extent_root);
4250 if (err)
4251 goto out_free;
4212 4252
4213 merge_reloc_roots(rc); 4253 merge_reloc_roots(rc);
4214 4254
@@ -4218,7 +4258,7 @@ int btrfs_recover_relocation(struct btrfs_root *root)
4218 if (IS_ERR(trans)) 4258 if (IS_ERR(trans))
4219 err = PTR_ERR(trans); 4259 err = PTR_ERR(trans);
4220 else 4260 else
4221 btrfs_commit_transaction(trans, rc->extent_root); 4261 err = btrfs_commit_transaction(trans, rc->extent_root);
4222out_free: 4262out_free:
4223 kfree(rc); 4263 kfree(rc);
4224out: 4264out:
@@ -4267,6 +4307,8 @@ int btrfs_reloc_clone_csums(struct inode *inode, u64 file_pos, u64 len)
4267 disk_bytenr = file_pos + BTRFS_I(inode)->index_cnt; 4307 disk_bytenr = file_pos + BTRFS_I(inode)->index_cnt;
4268 ret = btrfs_lookup_csums_range(root->fs_info->csum_root, disk_bytenr, 4308 ret = btrfs_lookup_csums_range(root->fs_info->csum_root, disk_bytenr,
4269 disk_bytenr + len - 1, &list, 0); 4309 disk_bytenr + len - 1, &list, 0);
4310 if (ret)
4311 goto out;
4270 4312
4271 while (!list_empty(&list)) { 4313 while (!list_empty(&list)) {
4272 sums = list_entry(list.next, struct btrfs_ordered_sum, list); 4314 sums = list_entry(list.next, struct btrfs_ordered_sum, list);
@@ -4284,6 +4326,7 @@ int btrfs_reloc_clone_csums(struct inode *inode, u64 file_pos, u64 len)
4284 4326
4285 btrfs_add_ordered_sum(inode, ordered, sums); 4327 btrfs_add_ordered_sum(inode, ordered, sums);
4286 } 4328 }
4329out:
4287 btrfs_put_ordered_extent(ordered); 4330 btrfs_put_ordered_extent(ordered);
4288 return ret; 4331 return ret;
4289} 4332}
@@ -4380,7 +4423,7 @@ void btrfs_reloc_pre_snapshot(struct btrfs_trans_handle *trans,
4380 * called after snapshot is created. migrate block reservation 4423 * called after snapshot is created. migrate block reservation
4381 * and create reloc root for the newly created snapshot 4424 * and create reloc root for the newly created snapshot
4382 */ 4425 */
4383void btrfs_reloc_post_snapshot(struct btrfs_trans_handle *trans, 4426int btrfs_reloc_post_snapshot(struct btrfs_trans_handle *trans,
4384 struct btrfs_pending_snapshot *pending) 4427 struct btrfs_pending_snapshot *pending)
4385{ 4428{
4386 struct btrfs_root *root = pending->root; 4429 struct btrfs_root *root = pending->root;
@@ -4390,7 +4433,7 @@ void btrfs_reloc_post_snapshot(struct btrfs_trans_handle *trans,
4390 int ret; 4433 int ret;
4391 4434
4392 if (!root->reloc_root) 4435 if (!root->reloc_root)
4393 return; 4436 return 0;
4394 4437
4395 rc = root->fs_info->reloc_ctl; 4438 rc = root->fs_info->reloc_ctl;
4396 rc->merging_rsv_size += rc->nodes_relocated; 4439 rc->merging_rsv_size += rc->nodes_relocated;
@@ -4399,18 +4442,21 @@ void btrfs_reloc_post_snapshot(struct btrfs_trans_handle *trans,
4399 ret = btrfs_block_rsv_migrate(&pending->block_rsv, 4442 ret = btrfs_block_rsv_migrate(&pending->block_rsv,
4400 rc->block_rsv, 4443 rc->block_rsv,
4401 rc->nodes_relocated); 4444 rc->nodes_relocated);
4402 BUG_ON(ret); 4445 if (ret)
4446 return ret;
4403 } 4447 }
4404 4448
4405 new_root = pending->snap; 4449 new_root = pending->snap;
4406 reloc_root = create_reloc_root(trans, root->reloc_root, 4450 reloc_root = create_reloc_root(trans, root->reloc_root,
4407 new_root->root_key.objectid); 4451 new_root->root_key.objectid);
4452 if (IS_ERR(reloc_root))
4453 return PTR_ERR(reloc_root);
4408 4454
4409 __add_reloc_root(reloc_root); 4455 ret = __add_reloc_root(reloc_root);
4456 BUG_ON(ret < 0);
4410 new_root->reloc_root = reloc_root; 4457 new_root->reloc_root = reloc_root;
4411 4458
4412 if (rc->create_reloc_tree) { 4459 if (rc->create_reloc_tree)
4413 ret = clone_backref_node(trans, rc, root, reloc_root); 4460 ret = clone_backref_node(trans, rc, root, reloc_root);
4414 BUG_ON(ret); 4461 return ret;
4415 }
4416} 4462}
diff --git a/fs/btrfs/root-tree.c b/fs/btrfs/root-tree.c
index f4099904565a..24fb8ce4e071 100644
--- a/fs/btrfs/root-tree.c
+++ b/fs/btrfs/root-tree.c
@@ -93,10 +93,14 @@ int btrfs_update_root(struct btrfs_trans_handle *trans, struct btrfs_root
93 unsigned long ptr; 93 unsigned long ptr;
94 94
95 path = btrfs_alloc_path(); 95 path = btrfs_alloc_path();
96 BUG_ON(!path); 96 if (!path)
97 return -ENOMEM;
98
97 ret = btrfs_search_slot(trans, root, key, path, 0, 1); 99 ret = btrfs_search_slot(trans, root, key, path, 0, 1);
98 if (ret < 0) 100 if (ret < 0) {
101 btrfs_abort_transaction(trans, root, ret);
99 goto out; 102 goto out;
103 }
100 104
101 if (ret != 0) { 105 if (ret != 0) {
102 btrfs_print_leaf(root, path->nodes[0]); 106 btrfs_print_leaf(root, path->nodes[0]);
@@ -116,13 +120,10 @@ out:
116 return ret; 120 return ret;
117} 121}
118 122
119int btrfs_insert_root(struct btrfs_trans_handle *trans, struct btrfs_root 123int btrfs_insert_root(struct btrfs_trans_handle *trans, struct btrfs_root *root,
120 *root, struct btrfs_key *key, struct btrfs_root_item 124 struct btrfs_key *key, struct btrfs_root_item *item)
121 *item)
122{ 125{
123 int ret; 126 return btrfs_insert_item(trans, root, key, item, sizeof(*item));
124 ret = btrfs_insert_item(trans, root, key, item, sizeof(*item));
125 return ret;
126} 127}
127 128
128/* 129/*
@@ -384,6 +385,8 @@ int btrfs_find_root_ref(struct btrfs_root *tree_root,
384 * 385 *
385 * For a back ref the root_id is the id of the subvol or snapshot and 386 * For a back ref the root_id is the id of the subvol or snapshot and
386 * ref_id is the id of the tree referencing it. 387 * ref_id is the id of the tree referencing it.
388 *
389 * Will return 0, -ENOMEM, or anything from the CoW path
387 */ 390 */
388int btrfs_add_root_ref(struct btrfs_trans_handle *trans, 391int btrfs_add_root_ref(struct btrfs_trans_handle *trans,
389 struct btrfs_root *tree_root, 392 struct btrfs_root *tree_root,
@@ -407,7 +410,11 @@ int btrfs_add_root_ref(struct btrfs_trans_handle *trans,
407again: 410again:
408 ret = btrfs_insert_empty_item(trans, tree_root, path, &key, 411 ret = btrfs_insert_empty_item(trans, tree_root, path, &key,
409 sizeof(*ref) + name_len); 412 sizeof(*ref) + name_len);
410 BUG_ON(ret); 413 if (ret) {
414 btrfs_abort_transaction(trans, tree_root, ret);
415 btrfs_free_path(path);
416 return ret;
417 }
411 418
412 leaf = path->nodes[0]; 419 leaf = path->nodes[0];
413 ref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_root_ref); 420 ref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_root_ref);
diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
index 390e7102b0ff..90acc82046c3 100644
--- a/fs/btrfs/scrub.c
+++ b/fs/btrfs/scrub.c
@@ -36,37 +36,30 @@
36 * Future enhancements: 36 * Future enhancements:
37 * - In case an unrepairable extent is encountered, track which files are 37 * - In case an unrepairable extent is encountered, track which files are
38 * affected and report them 38 * affected and report them
39 * - In case of a read error on files with nodatasum, map the file and read
40 * the extent to trigger a writeback of the good copy
41 * - track and record media errors, throw out bad devices 39 * - track and record media errors, throw out bad devices
42 * - add a mode to also read unallocated space 40 * - add a mode to also read unallocated space
43 */ 41 */
44 42
45struct scrub_bio; 43struct scrub_block;
46struct scrub_page;
47struct scrub_dev; 44struct scrub_dev;
48static void scrub_bio_end_io(struct bio *bio, int err);
49static void scrub_checksum(struct btrfs_work *work);
50static int scrub_checksum_data(struct scrub_dev *sdev,
51 struct scrub_page *spag, void *buffer);
52static int scrub_checksum_tree_block(struct scrub_dev *sdev,
53 struct scrub_page *spag, u64 logical,
54 void *buffer);
55static int scrub_checksum_super(struct scrub_bio *sbio, void *buffer);
56static int scrub_fixup_check(struct scrub_bio *sbio, int ix);
57static void scrub_fixup_end_io(struct bio *bio, int err);
58static int scrub_fixup_io(int rw, struct block_device *bdev, sector_t sector,
59 struct page *page);
60static void scrub_fixup(struct scrub_bio *sbio, int ix);
61 45
62#define SCRUB_PAGES_PER_BIO 16 /* 64k per bio */ 46#define SCRUB_PAGES_PER_BIO 16 /* 64k per bio */
63#define SCRUB_BIOS_PER_DEV 16 /* 1 MB per device in flight */ 47#define SCRUB_BIOS_PER_DEV 16 /* 1 MB per device in flight */
48#define SCRUB_MAX_PAGES_PER_BLOCK 16 /* 64k per node/leaf/sector */
64 49
65struct scrub_page { 50struct scrub_page {
51 struct scrub_block *sblock;
52 struct page *page;
53 struct block_device *bdev;
66 u64 flags; /* extent flags */ 54 u64 flags; /* extent flags */
67 u64 generation; 55 u64 generation;
68 int mirror_num; 56 u64 logical;
69 int have_csum; 57 u64 physical;
58 struct {
59 unsigned int mirror_num:8;
60 unsigned int have_csum:1;
61 unsigned int io_error:1;
62 };
70 u8 csum[BTRFS_CSUM_SIZE]; 63 u8 csum[BTRFS_CSUM_SIZE];
71}; 64};
72 65
@@ -77,12 +70,25 @@ struct scrub_bio {
77 int err; 70 int err;
78 u64 logical; 71 u64 logical;
79 u64 physical; 72 u64 physical;
80 struct scrub_page spag[SCRUB_PAGES_PER_BIO]; 73 struct scrub_page *pagev[SCRUB_PAGES_PER_BIO];
81 u64 count; 74 int page_count;
82 int next_free; 75 int next_free;
83 struct btrfs_work work; 76 struct btrfs_work work;
84}; 77};
85 78
79struct scrub_block {
80 struct scrub_page pagev[SCRUB_MAX_PAGES_PER_BLOCK];
81 int page_count;
82 atomic_t outstanding_pages;
83 atomic_t ref_count; /* free mem on transition to zero */
84 struct scrub_dev *sdev;
85 struct {
86 unsigned int header_error:1;
87 unsigned int checksum_error:1;
88 unsigned int no_io_error_seen:1;
89 };
90};
91
86struct scrub_dev { 92struct scrub_dev {
87 struct scrub_bio *bios[SCRUB_BIOS_PER_DEV]; 93 struct scrub_bio *bios[SCRUB_BIOS_PER_DEV];
88 struct btrfs_device *dev; 94 struct btrfs_device *dev;
@@ -96,6 +102,10 @@ struct scrub_dev {
96 struct list_head csum_list; 102 struct list_head csum_list;
97 atomic_t cancel_req; 103 atomic_t cancel_req;
98 int readonly; 104 int readonly;
105 int pages_per_bio; /* <= SCRUB_PAGES_PER_BIO */
106 u32 sectorsize;
107 u32 nodesize;
108 u32 leafsize;
99 /* 109 /*
100 * statistics 110 * statistics
101 */ 111 */
@@ -124,6 +134,43 @@ struct scrub_warning {
124 int scratch_bufsize; 134 int scratch_bufsize;
125}; 135};
126 136
137
138static int scrub_handle_errored_block(struct scrub_block *sblock_to_check);
139static int scrub_setup_recheck_block(struct scrub_dev *sdev,
140 struct btrfs_mapping_tree *map_tree,
141 u64 length, u64 logical,
142 struct scrub_block *sblock);
143static int scrub_recheck_block(struct btrfs_fs_info *fs_info,
144 struct scrub_block *sblock, int is_metadata,
145 int have_csum, u8 *csum, u64 generation,
146 u16 csum_size);
147static void scrub_recheck_block_checksum(struct btrfs_fs_info *fs_info,
148 struct scrub_block *sblock,
149 int is_metadata, int have_csum,
150 const u8 *csum, u64 generation,
151 u16 csum_size);
152static void scrub_complete_bio_end_io(struct bio *bio, int err);
153static int scrub_repair_block_from_good_copy(struct scrub_block *sblock_bad,
154 struct scrub_block *sblock_good,
155 int force_write);
156static int scrub_repair_page_from_good_copy(struct scrub_block *sblock_bad,
157 struct scrub_block *sblock_good,
158 int page_num, int force_write);
159static int scrub_checksum_data(struct scrub_block *sblock);
160static int scrub_checksum_tree_block(struct scrub_block *sblock);
161static int scrub_checksum_super(struct scrub_block *sblock);
162static void scrub_block_get(struct scrub_block *sblock);
163static void scrub_block_put(struct scrub_block *sblock);
164static int scrub_add_page_to_bio(struct scrub_dev *sdev,
165 struct scrub_page *spage);
166static int scrub_pages(struct scrub_dev *sdev, u64 logical, u64 len,
167 u64 physical, u64 flags, u64 gen, int mirror_num,
168 u8 *csum, int force);
169static void scrub_bio_end_io(struct bio *bio, int err);
170static void scrub_bio_end_io_worker(struct btrfs_work *work);
171static void scrub_block_complete(struct scrub_block *sblock);
172
173
127static void scrub_free_csums(struct scrub_dev *sdev) 174static void scrub_free_csums(struct scrub_dev *sdev)
128{ 175{
129 while (!list_empty(&sdev->csum_list)) { 176 while (!list_empty(&sdev->csum_list)) {
@@ -135,23 +182,6 @@ static void scrub_free_csums(struct scrub_dev *sdev)
135 } 182 }
136} 183}
137 184
138static void scrub_free_bio(struct bio *bio)
139{
140 int i;
141 struct page *last_page = NULL;
142
143 if (!bio)
144 return;
145
146 for (i = 0; i < bio->bi_vcnt; ++i) {
147 if (bio->bi_io_vec[i].bv_page == last_page)
148 continue;
149 last_page = bio->bi_io_vec[i].bv_page;
150 __free_page(last_page);
151 }
152 bio_put(bio);
153}
154
155static noinline_for_stack void scrub_free_dev(struct scrub_dev *sdev) 185static noinline_for_stack void scrub_free_dev(struct scrub_dev *sdev)
156{ 186{
157 int i; 187 int i;
@@ -159,13 +189,23 @@ static noinline_for_stack void scrub_free_dev(struct scrub_dev *sdev)
159 if (!sdev) 189 if (!sdev)
160 return; 190 return;
161 191
192 /* this can happen when scrub is cancelled */
193 if (sdev->curr != -1) {
194 struct scrub_bio *sbio = sdev->bios[sdev->curr];
195
196 for (i = 0; i < sbio->page_count; i++) {
197 BUG_ON(!sbio->pagev[i]);
198 BUG_ON(!sbio->pagev[i]->page);
199 scrub_block_put(sbio->pagev[i]->sblock);
200 }
201 bio_put(sbio->bio);
202 }
203
162 for (i = 0; i < SCRUB_BIOS_PER_DEV; ++i) { 204 for (i = 0; i < SCRUB_BIOS_PER_DEV; ++i) {
163 struct scrub_bio *sbio = sdev->bios[i]; 205 struct scrub_bio *sbio = sdev->bios[i];
164 206
165 if (!sbio) 207 if (!sbio)
166 break; 208 break;
167
168 scrub_free_bio(sbio->bio);
169 kfree(sbio); 209 kfree(sbio);
170 } 210 }
171 211
@@ -179,11 +219,16 @@ struct scrub_dev *scrub_setup_dev(struct btrfs_device *dev)
179 struct scrub_dev *sdev; 219 struct scrub_dev *sdev;
180 int i; 220 int i;
181 struct btrfs_fs_info *fs_info = dev->dev_root->fs_info; 221 struct btrfs_fs_info *fs_info = dev->dev_root->fs_info;
222 int pages_per_bio;
182 223
224 pages_per_bio = min_t(int, SCRUB_PAGES_PER_BIO,
225 bio_get_nr_vecs(dev->bdev));
183 sdev = kzalloc(sizeof(*sdev), GFP_NOFS); 226 sdev = kzalloc(sizeof(*sdev), GFP_NOFS);
184 if (!sdev) 227 if (!sdev)
185 goto nomem; 228 goto nomem;
186 sdev->dev = dev; 229 sdev->dev = dev;
230 sdev->pages_per_bio = pages_per_bio;
231 sdev->curr = -1;
187 for (i = 0; i < SCRUB_BIOS_PER_DEV; ++i) { 232 for (i = 0; i < SCRUB_BIOS_PER_DEV; ++i) {
188 struct scrub_bio *sbio; 233 struct scrub_bio *sbio;
189 234
@@ -194,8 +239,8 @@ struct scrub_dev *scrub_setup_dev(struct btrfs_device *dev)
194 239
195 sbio->index = i; 240 sbio->index = i;
196 sbio->sdev = sdev; 241 sbio->sdev = sdev;
197 sbio->count = 0; 242 sbio->page_count = 0;
198 sbio->work.func = scrub_checksum; 243 sbio->work.func = scrub_bio_end_io_worker;
199 244
200 if (i != SCRUB_BIOS_PER_DEV-1) 245 if (i != SCRUB_BIOS_PER_DEV-1)
201 sdev->bios[i]->next_free = i + 1; 246 sdev->bios[i]->next_free = i + 1;
@@ -203,7 +248,9 @@ struct scrub_dev *scrub_setup_dev(struct btrfs_device *dev)
203 sdev->bios[i]->next_free = -1; 248 sdev->bios[i]->next_free = -1;
204 } 249 }
205 sdev->first_free = 0; 250 sdev->first_free = 0;
206 sdev->curr = -1; 251 sdev->nodesize = dev->dev_root->nodesize;
252 sdev->leafsize = dev->dev_root->leafsize;
253 sdev->sectorsize = dev->dev_root->sectorsize;
207 atomic_set(&sdev->in_flight, 0); 254 atomic_set(&sdev->in_flight, 0);
208 atomic_set(&sdev->fixup_cnt, 0); 255 atomic_set(&sdev->fixup_cnt, 0);
209 atomic_set(&sdev->cancel_req, 0); 256 atomic_set(&sdev->cancel_req, 0);
@@ -294,10 +341,9 @@ err:
294 return 0; 341 return 0;
295} 342}
296 343
297static void scrub_print_warning(const char *errstr, struct scrub_bio *sbio, 344static void scrub_print_warning(const char *errstr, struct scrub_block *sblock)
298 int ix)
299{ 345{
300 struct btrfs_device *dev = sbio->sdev->dev; 346 struct btrfs_device *dev = sblock->sdev->dev;
301 struct btrfs_fs_info *fs_info = dev->dev_root->fs_info; 347 struct btrfs_fs_info *fs_info = dev->dev_root->fs_info;
302 struct btrfs_path *path; 348 struct btrfs_path *path;
303 struct btrfs_key found_key; 349 struct btrfs_key found_key;
@@ -316,8 +362,9 @@ static void scrub_print_warning(const char *errstr, struct scrub_bio *sbio,
316 362
317 swarn.scratch_buf = kmalloc(bufsize, GFP_NOFS); 363 swarn.scratch_buf = kmalloc(bufsize, GFP_NOFS);
318 swarn.msg_buf = kmalloc(bufsize, GFP_NOFS); 364 swarn.msg_buf = kmalloc(bufsize, GFP_NOFS);
319 swarn.sector = (sbio->physical + ix * PAGE_SIZE) >> 9; 365 BUG_ON(sblock->page_count < 1);
320 swarn.logical = sbio->logical + ix * PAGE_SIZE; 366 swarn.sector = (sblock->pagev[0].physical) >> 9;
367 swarn.logical = sblock->pagev[0].logical;
321 swarn.errstr = errstr; 368 swarn.errstr = errstr;
322 swarn.dev = dev; 369 swarn.dev = dev;
323 swarn.msg_bufsize = bufsize; 370 swarn.msg_bufsize = bufsize;
@@ -342,7 +389,8 @@ static void scrub_print_warning(const char *errstr, struct scrub_bio *sbio,
342 do { 389 do {
343 ret = tree_backref_for_extent(&ptr, eb, ei, item_size, 390 ret = tree_backref_for_extent(&ptr, eb, ei, item_size,
344 &ref_root, &ref_level); 391 &ref_root, &ref_level);
345 printk(KERN_WARNING "%s at logical %llu on dev %s, " 392 printk(KERN_WARNING
393 "btrfs: %s at logical %llu on dev %s, "
346 "sector %llu: metadata %s (level %d) in tree " 394 "sector %llu: metadata %s (level %d) in tree "
347 "%llu\n", errstr, swarn.logical, dev->name, 395 "%llu\n", errstr, swarn.logical, dev->name,
348 (unsigned long long)swarn.sector, 396 (unsigned long long)swarn.sector,
@@ -352,8 +400,8 @@ static void scrub_print_warning(const char *errstr, struct scrub_bio *sbio,
352 } while (ret != 1); 400 } while (ret != 1);
353 } else { 401 } else {
354 swarn.path = path; 402 swarn.path = path;
355 iterate_extent_inodes(fs_info, path, found_key.objectid, 403 iterate_extent_inodes(fs_info, found_key.objectid,
356 extent_item_pos, 404 extent_item_pos, 1,
357 scrub_print_warning_inode, &swarn); 405 scrub_print_warning_inode, &swarn);
358 } 406 }
359 407
@@ -531,9 +579,9 @@ out:
531 spin_lock(&sdev->stat_lock); 579 spin_lock(&sdev->stat_lock);
532 ++sdev->stat.uncorrectable_errors; 580 ++sdev->stat.uncorrectable_errors;
533 spin_unlock(&sdev->stat_lock); 581 spin_unlock(&sdev->stat_lock);
534 printk_ratelimited(KERN_ERR "btrfs: unable to fixup " 582 printk_ratelimited(KERN_ERR
535 "(nodatasum) error at logical %llu\n", 583 "btrfs: unable to fixup (nodatasum) error at logical %llu on dev %s\n",
536 fixup->logical); 584 (unsigned long long)fixup->logical, sdev->dev->name);
537 } 585 }
538 586
539 btrfs_free_path(path); 587 btrfs_free_path(path);
@@ -550,91 +598,168 @@ out:
550} 598}
551 599
552/* 600/*
553 * scrub_recheck_error gets called when either verification of the page 601 * scrub_handle_errored_block gets called when either verification of the
554 * failed or the bio failed to read, e.g. with EIO. In the latter case, 602 * pages failed or the bio failed to read, e.g. with EIO. In the latter
555 * recheck_error gets called for every page in the bio, even though only 603 * case, this function handles all pages in the bio, even though only one
556 * one may be bad 604 * may be bad.
605 * The goal of this function is to repair the errored block by using the
606 * contents of one of the mirrors.
557 */ 607 */
558static int scrub_recheck_error(struct scrub_bio *sbio, int ix) 608static int scrub_handle_errored_block(struct scrub_block *sblock_to_check)
559{ 609{
560 struct scrub_dev *sdev = sbio->sdev; 610 struct scrub_dev *sdev = sblock_to_check->sdev;
561 u64 sector = (sbio->physical + ix * PAGE_SIZE) >> 9; 611 struct btrfs_fs_info *fs_info;
612 u64 length;
613 u64 logical;
614 u64 generation;
615 unsigned int failed_mirror_index;
616 unsigned int is_metadata;
617 unsigned int have_csum;
618 u8 *csum;
619 struct scrub_block *sblocks_for_recheck; /* holds one for each mirror */
620 struct scrub_block *sblock_bad;
621 int ret;
622 int mirror_index;
623 int page_num;
624 int success;
562 static DEFINE_RATELIMIT_STATE(_rs, DEFAULT_RATELIMIT_INTERVAL, 625 static DEFINE_RATELIMIT_STATE(_rs, DEFAULT_RATELIMIT_INTERVAL,
563 DEFAULT_RATELIMIT_BURST); 626 DEFAULT_RATELIMIT_BURST);
627
628 BUG_ON(sblock_to_check->page_count < 1);
629 fs_info = sdev->dev->dev_root->fs_info;
630 length = sblock_to_check->page_count * PAGE_SIZE;
631 logical = sblock_to_check->pagev[0].logical;
632 generation = sblock_to_check->pagev[0].generation;
633 BUG_ON(sblock_to_check->pagev[0].mirror_num < 1);
634 failed_mirror_index = sblock_to_check->pagev[0].mirror_num - 1;
635 is_metadata = !(sblock_to_check->pagev[0].flags &
636 BTRFS_EXTENT_FLAG_DATA);
637 have_csum = sblock_to_check->pagev[0].have_csum;
638 csum = sblock_to_check->pagev[0].csum;
564 639
565 if (sbio->err) { 640 /*
566 if (scrub_fixup_io(READ, sbio->sdev->dev->bdev, sector, 641 * read all mirrors one after the other. This includes to
567 sbio->bio->bi_io_vec[ix].bv_page) == 0) { 642 * re-read the extent or metadata block that failed (that was
568 if (scrub_fixup_check(sbio, ix) == 0) 643 * the cause that this fixup code is called) another time,
569 return 0; 644 * page by page this time in order to know which pages
570 } 645 * caused I/O errors and which ones are good (for all mirrors).
571 if (__ratelimit(&_rs)) 646 * It is the goal to handle the situation when more than one
572 scrub_print_warning("i/o error", sbio, ix); 647 * mirror contains I/O errors, but the errors do not
573 } else { 648 * overlap, i.e. the data can be repaired by selecting the
574 if (__ratelimit(&_rs)) 649 * pages from those mirrors without I/O error on the
575 scrub_print_warning("checksum error", sbio, ix); 650 * particular pages. One example (with blocks >= 2 * PAGE_SIZE)
651 * would be that mirror #1 has an I/O error on the first page,
652 * the second page is good, and mirror #2 has an I/O error on
653 * the second page, but the first page is good.
654 * Then the first page of the first mirror can be repaired by
655 * taking the first page of the second mirror, and the
656 * second page of the second mirror can be repaired by
657 * copying the contents of the 2nd page of the 1st mirror.
658 * One more note: if the pages of one mirror contain I/O
659 * errors, the checksum cannot be verified. In order to get
660 * the best data for repairing, the first attempt is to find
661 * a mirror without I/O errors and with a validated checksum.
662 * Only if this is not possible, the pages are picked from
663 * mirrors with I/O errors without considering the checksum.
664 * If the latter is the case, at the end, the checksum of the
665 * repaired area is verified in order to correctly maintain
666 * the statistics.
667 */
668
669 sblocks_for_recheck = kzalloc(BTRFS_MAX_MIRRORS *
670 sizeof(*sblocks_for_recheck),
671 GFP_NOFS);
672 if (!sblocks_for_recheck) {
673 spin_lock(&sdev->stat_lock);
674 sdev->stat.malloc_errors++;
675 sdev->stat.read_errors++;
676 sdev->stat.uncorrectable_errors++;
677 spin_unlock(&sdev->stat_lock);
678 goto out;
576 } 679 }
577 680
578 spin_lock(&sdev->stat_lock); 681 /* setup the context, map the logical blocks and alloc the pages */
579 ++sdev->stat.read_errors; 682 ret = scrub_setup_recheck_block(sdev, &fs_info->mapping_tree, length,
580 spin_unlock(&sdev->stat_lock); 683 logical, sblocks_for_recheck);
684 if (ret) {
685 spin_lock(&sdev->stat_lock);
686 sdev->stat.read_errors++;
687 sdev->stat.uncorrectable_errors++;
688 spin_unlock(&sdev->stat_lock);
689 goto out;
690 }
691 BUG_ON(failed_mirror_index >= BTRFS_MAX_MIRRORS);
692 sblock_bad = sblocks_for_recheck + failed_mirror_index;
581 693
582 scrub_fixup(sbio, ix); 694 /* build and submit the bios for the failed mirror, check checksums */
583 return 1; 695 ret = scrub_recheck_block(fs_info, sblock_bad, is_metadata, have_csum,
584} 696 csum, generation, sdev->csum_size);
697 if (ret) {
698 spin_lock(&sdev->stat_lock);
699 sdev->stat.read_errors++;
700 sdev->stat.uncorrectable_errors++;
701 spin_unlock(&sdev->stat_lock);
702 goto out;
703 }
585 704
586static int scrub_fixup_check(struct scrub_bio *sbio, int ix) 705 if (!sblock_bad->header_error && !sblock_bad->checksum_error &&
587{ 706 sblock_bad->no_io_error_seen) {
588 int ret = 1; 707 /*
589 struct page *page; 708 * the error disappeared after reading page by page, or
590 void *buffer; 709 * the area was part of a huge bio and other parts of the
591 u64 flags = sbio->spag[ix].flags; 710 * bio caused I/O errors, or the block layer merged several
711 * read requests into one and the error is caused by a
712 * different bio (usually one of the two latter cases is
713 * the cause)
714 */
715 spin_lock(&sdev->stat_lock);
716 sdev->stat.unverified_errors++;
717 spin_unlock(&sdev->stat_lock);
592 718
593 page = sbio->bio->bi_io_vec[ix].bv_page; 719 goto out;
594 buffer = kmap_atomic(page);
595 if (flags & BTRFS_EXTENT_FLAG_DATA) {
596 ret = scrub_checksum_data(sbio->sdev,
597 sbio->spag + ix, buffer);
598 } else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
599 ret = scrub_checksum_tree_block(sbio->sdev,
600 sbio->spag + ix,
601 sbio->logical + ix * PAGE_SIZE,
602 buffer);
603 } else {
604 WARN_ON(1);
605 } 720 }
606 kunmap_atomic(buffer);
607 721
608 return ret; 722 if (!sblock_bad->no_io_error_seen) {
609} 723 spin_lock(&sdev->stat_lock);
724 sdev->stat.read_errors++;
725 spin_unlock(&sdev->stat_lock);
726 if (__ratelimit(&_rs))
727 scrub_print_warning("i/o error", sblock_to_check);
728 } else if (sblock_bad->checksum_error) {
729 spin_lock(&sdev->stat_lock);
730 sdev->stat.csum_errors++;
731 spin_unlock(&sdev->stat_lock);
732 if (__ratelimit(&_rs))
733 scrub_print_warning("checksum error", sblock_to_check);
734 } else if (sblock_bad->header_error) {
735 spin_lock(&sdev->stat_lock);
736 sdev->stat.verify_errors++;
737 spin_unlock(&sdev->stat_lock);
738 if (__ratelimit(&_rs))
739 scrub_print_warning("checksum/header error",
740 sblock_to_check);
741 }
610 742
611static void scrub_fixup_end_io(struct bio *bio, int err) 743 if (sdev->readonly)
612{ 744 goto did_not_correct_error;
613 complete((struct completion *)bio->bi_private);
614}
615 745
616static void scrub_fixup(struct scrub_bio *sbio, int ix) 746 if (!is_metadata && !have_csum) {
617{ 747 struct scrub_fixup_nodatasum *fixup_nodatasum;
618 struct scrub_dev *sdev = sbio->sdev; 748
619 struct btrfs_fs_info *fs_info = sdev->dev->dev_root->fs_info; 749 /*
620 struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree; 750 * !is_metadata and !have_csum, this means that the data
621 struct btrfs_bio *bbio = NULL; 751 * might not be COW'ed, that it might be modified
622 struct scrub_fixup_nodatasum *fixup; 752 * concurrently. The general strategy to work on the
623 u64 logical = sbio->logical + ix * PAGE_SIZE; 753 * commit root does not help in the case when COW is not
624 u64 length; 754 * used.
625 int i; 755 */
626 int ret; 756 fixup_nodatasum = kzalloc(sizeof(*fixup_nodatasum), GFP_NOFS);
627 DECLARE_COMPLETION_ONSTACK(complete); 757 if (!fixup_nodatasum)
628 758 goto did_not_correct_error;
629 if ((sbio->spag[ix].flags & BTRFS_EXTENT_FLAG_DATA) && 759 fixup_nodatasum->sdev = sdev;
630 (sbio->spag[ix].have_csum == 0)) { 760 fixup_nodatasum->logical = logical;
631 fixup = kzalloc(sizeof(*fixup), GFP_NOFS); 761 fixup_nodatasum->root = fs_info->extent_root;
632 if (!fixup) 762 fixup_nodatasum->mirror_num = failed_mirror_index + 1;
633 goto uncorrectable;
634 fixup->sdev = sdev;
635 fixup->logical = logical;
636 fixup->root = fs_info->extent_root;
637 fixup->mirror_num = sbio->spag[ix].mirror_num;
638 /* 763 /*
639 * increment scrubs_running to prevent cancel requests from 764 * increment scrubs_running to prevent cancel requests from
640 * completing as long as a fixup worker is running. we must also 765 * completing as long as a fixup worker is running. we must also
@@ -649,235 +774,528 @@ static void scrub_fixup(struct scrub_bio *sbio, int ix)
649 atomic_inc(&fs_info->scrubs_paused); 774 atomic_inc(&fs_info->scrubs_paused);
650 mutex_unlock(&fs_info->scrub_lock); 775 mutex_unlock(&fs_info->scrub_lock);
651 atomic_inc(&sdev->fixup_cnt); 776 atomic_inc(&sdev->fixup_cnt);
652 fixup->work.func = scrub_fixup_nodatasum; 777 fixup_nodatasum->work.func = scrub_fixup_nodatasum;
653 btrfs_queue_worker(&fs_info->scrub_workers, &fixup->work); 778 btrfs_queue_worker(&fs_info->scrub_workers,
654 return; 779 &fixup_nodatasum->work);
780 goto out;
655 } 781 }
656 782
657 length = PAGE_SIZE; 783 /*
658 ret = btrfs_map_block(map_tree, REQ_WRITE, logical, &length, 784 * now build and submit the bios for the other mirrors, check
659 &bbio, 0); 785 * checksums
660 if (ret || !bbio || length < PAGE_SIZE) { 786 */
661 printk(KERN_ERR 787 for (mirror_index = 0;
662 "scrub_fixup: btrfs_map_block failed us for %llu\n", 788 mirror_index < BTRFS_MAX_MIRRORS &&
663 (unsigned long long)logical); 789 sblocks_for_recheck[mirror_index].page_count > 0;
664 WARN_ON(1); 790 mirror_index++) {
665 kfree(bbio); 791 if (mirror_index == failed_mirror_index)
666 return; 792 continue;
793
794 /* build and submit the bios, check checksums */
795 ret = scrub_recheck_block(fs_info,
796 sblocks_for_recheck + mirror_index,
797 is_metadata, have_csum, csum,
798 generation, sdev->csum_size);
799 if (ret)
800 goto did_not_correct_error;
667 } 801 }
668 802
669 if (bbio->num_stripes == 1) 803 /*
670 /* there aren't any replicas */ 804 * first try to pick the mirror which is completely without I/O
671 goto uncorrectable; 805 * errors and also does not have a checksum error.
806 * If one is found, and if a checksum is present, the full block
807 * that is known to contain an error is rewritten. Afterwards
808 * the block is known to be corrected.
809 * If a mirror is found which is completely correct, and no
810 * checksum is present, only those pages are rewritten that had
811 * an I/O error in the block to be repaired, since it cannot be
812 * determined, which copy of the other pages is better (and it
813 * could happen otherwise that a correct page would be
814 * overwritten by a bad one).
815 */
816 for (mirror_index = 0;
817 mirror_index < BTRFS_MAX_MIRRORS &&
818 sblocks_for_recheck[mirror_index].page_count > 0;
819 mirror_index++) {
820 struct scrub_block *sblock_other = sblocks_for_recheck +
821 mirror_index;
822
823 if (!sblock_other->header_error &&
824 !sblock_other->checksum_error &&
825 sblock_other->no_io_error_seen) {
826 int force_write = is_metadata || have_csum;
827
828 ret = scrub_repair_block_from_good_copy(sblock_bad,
829 sblock_other,
830 force_write);
831 if (0 == ret)
832 goto corrected_error;
833 }
834 }
672 835
673 /* 836 /*
674 * first find a good copy 837 * in case of I/O errors in the area that is supposed to be
838 * repaired, continue by picking good copies of those pages.
839 * Select the good pages from mirrors to rewrite bad pages from
840 * the area to fix. Afterwards verify the checksum of the block
841 * that is supposed to be repaired. This verification step is
842 * only done for the purpose of statistic counting and for the
843 * final scrub report, whether errors remain.
844 * A perfect algorithm could make use of the checksum and try
845 * all possible combinations of pages from the different mirrors
846 * until the checksum verification succeeds. For example, when
847 * the 2nd page of mirror #1 faces I/O errors, and the 2nd page
848 * of mirror #2 is readable but the final checksum test fails,
849 * then the 2nd page of mirror #3 could be tried, whether now
850 * the final checksum succeedes. But this would be a rare
851 * exception and is therefore not implemented. At least it is
852 * avoided that the good copy is overwritten.
853 * A more useful improvement would be to pick the sectors
854 * without I/O error based on sector sizes (512 bytes on legacy
855 * disks) instead of on PAGE_SIZE. Then maybe 512 byte of one
856 * mirror could be repaired by taking 512 byte of a different
857 * mirror, even if other 512 byte sectors in the same PAGE_SIZE
858 * area are unreadable.
675 */ 859 */
676 for (i = 0; i < bbio->num_stripes; ++i) {
677 if (i + 1 == sbio->spag[ix].mirror_num)
678 continue;
679 860
680 if (scrub_fixup_io(READ, bbio->stripes[i].dev->bdev, 861 /* can only fix I/O errors from here on */
681 bbio->stripes[i].physical >> 9, 862 if (sblock_bad->no_io_error_seen)
682 sbio->bio->bi_io_vec[ix].bv_page)) { 863 goto did_not_correct_error;
683 /* I/O-error, this is not a good copy */ 864
865 success = 1;
866 for (page_num = 0; page_num < sblock_bad->page_count; page_num++) {
867 struct scrub_page *page_bad = sblock_bad->pagev + page_num;
868
869 if (!page_bad->io_error)
684 continue; 870 continue;
871
872 for (mirror_index = 0;
873 mirror_index < BTRFS_MAX_MIRRORS &&
874 sblocks_for_recheck[mirror_index].page_count > 0;
875 mirror_index++) {
876 struct scrub_block *sblock_other = sblocks_for_recheck +
877 mirror_index;
878 struct scrub_page *page_other = sblock_other->pagev +
879 page_num;
880
881 if (!page_other->io_error) {
882 ret = scrub_repair_page_from_good_copy(
883 sblock_bad, sblock_other, page_num, 0);
884 if (0 == ret) {
885 page_bad->io_error = 0;
886 break; /* succeeded for this page */
887 }
888 }
685 } 889 }
686 890
687 if (scrub_fixup_check(sbio, ix) == 0) 891 if (page_bad->io_error) {
688 break; 892 /* did not find a mirror to copy the page from */
893 success = 0;
894 }
689 } 895 }
690 if (i == bbio->num_stripes)
691 goto uncorrectable;
692 896
693 if (!sdev->readonly) { 897 if (success) {
694 /* 898 if (is_metadata || have_csum) {
695 * bi_io_vec[ix].bv_page now contains good data, write it back 899 /*
696 */ 900 * need to verify the checksum now that all
697 if (scrub_fixup_io(WRITE, sdev->dev->bdev, 901 * sectors on disk are repaired (the write
698 (sbio->physical + ix * PAGE_SIZE) >> 9, 902 * request for data to be repaired is on its way).
699 sbio->bio->bi_io_vec[ix].bv_page)) { 903 * Just be lazy and use scrub_recheck_block()
700 /* I/O-error, writeback failed, give up */ 904 * which re-reads the data before the checksum
701 goto uncorrectable; 905 * is verified, but most likely the data comes out
906 * of the page cache.
907 */
908 ret = scrub_recheck_block(fs_info, sblock_bad,
909 is_metadata, have_csum, csum,
910 generation, sdev->csum_size);
911 if (!ret && !sblock_bad->header_error &&
912 !sblock_bad->checksum_error &&
913 sblock_bad->no_io_error_seen)
914 goto corrected_error;
915 else
916 goto did_not_correct_error;
917 } else {
918corrected_error:
919 spin_lock(&sdev->stat_lock);
920 sdev->stat.corrected_errors++;
921 spin_unlock(&sdev->stat_lock);
922 printk_ratelimited(KERN_ERR
923 "btrfs: fixed up error at logical %llu on dev %s\n",
924 (unsigned long long)logical, sdev->dev->name);
702 } 925 }
926 } else {
927did_not_correct_error:
928 spin_lock(&sdev->stat_lock);
929 sdev->stat.uncorrectable_errors++;
930 spin_unlock(&sdev->stat_lock);
931 printk_ratelimited(KERN_ERR
932 "btrfs: unable to fixup (regular) error at logical %llu on dev %s\n",
933 (unsigned long long)logical, sdev->dev->name);
703 } 934 }
704 935
705 kfree(bbio); 936out:
706 spin_lock(&sdev->stat_lock); 937 if (sblocks_for_recheck) {
707 ++sdev->stat.corrected_errors; 938 for (mirror_index = 0; mirror_index < BTRFS_MAX_MIRRORS;
708 spin_unlock(&sdev->stat_lock); 939 mirror_index++) {
940 struct scrub_block *sblock = sblocks_for_recheck +
941 mirror_index;
942 int page_index;
943
944 for (page_index = 0; page_index < SCRUB_PAGES_PER_BIO;
945 page_index++)
946 if (sblock->pagev[page_index].page)
947 __free_page(
948 sblock->pagev[page_index].page);
949 }
950 kfree(sblocks_for_recheck);
951 }
709 952
710 printk_ratelimited(KERN_ERR "btrfs: fixed up error at logical %llu\n", 953 return 0;
711 (unsigned long long)logical); 954}
712 return;
713 955
714uncorrectable: 956static int scrub_setup_recheck_block(struct scrub_dev *sdev,
715 kfree(bbio); 957 struct btrfs_mapping_tree *map_tree,
716 spin_lock(&sdev->stat_lock); 958 u64 length, u64 logical,
717 ++sdev->stat.uncorrectable_errors; 959 struct scrub_block *sblocks_for_recheck)
718 spin_unlock(&sdev->stat_lock); 960{
961 int page_index;
962 int mirror_index;
963 int ret;
964
965 /*
966 * note: the three members sdev, ref_count and outstanding_pages
967 * are not used (and not set) in the blocks that are used for
968 * the recheck procedure
969 */
970
971 page_index = 0;
972 while (length > 0) {
973 u64 sublen = min_t(u64, length, PAGE_SIZE);
974 u64 mapped_length = sublen;
975 struct btrfs_bio *bbio = NULL;
976
977 /*
978 * with a length of PAGE_SIZE, each returned stripe
979 * represents one mirror
980 */
981 ret = btrfs_map_block(map_tree, WRITE, logical, &mapped_length,
982 &bbio, 0);
983 if (ret || !bbio || mapped_length < sublen) {
984 kfree(bbio);
985 return -EIO;
986 }
719 987
720 printk_ratelimited(KERN_ERR "btrfs: unable to fixup (regular) error at " 988 BUG_ON(page_index >= SCRUB_PAGES_PER_BIO);
721 "logical %llu\n", (unsigned long long)logical); 989 for (mirror_index = 0; mirror_index < (int)bbio->num_stripes;
990 mirror_index++) {
991 struct scrub_block *sblock;
992 struct scrub_page *page;
993
994 if (mirror_index >= BTRFS_MAX_MIRRORS)
995 continue;
996
997 sblock = sblocks_for_recheck + mirror_index;
998 page = sblock->pagev + page_index;
999 page->logical = logical;
1000 page->physical = bbio->stripes[mirror_index].physical;
1001 page->bdev = bbio->stripes[mirror_index].dev->bdev;
1002 page->mirror_num = mirror_index + 1;
1003 page->page = alloc_page(GFP_NOFS);
1004 if (!page->page) {
1005 spin_lock(&sdev->stat_lock);
1006 sdev->stat.malloc_errors++;
1007 spin_unlock(&sdev->stat_lock);
1008 return -ENOMEM;
1009 }
1010 sblock->page_count++;
1011 }
1012 kfree(bbio);
1013 length -= sublen;
1014 logical += sublen;
1015 page_index++;
1016 }
1017
1018 return 0;
722} 1019}
723 1020
724static int scrub_fixup_io(int rw, struct block_device *bdev, sector_t sector, 1021/*
725 struct page *page) 1022 * this function will check the on disk data for checksum errors, header
1023 * errors and read I/O errors. If any I/O errors happen, the exact pages
1024 * which are errored are marked as being bad. The goal is to enable scrub
1025 * to take those pages that are not errored from all the mirrors so that
1026 * the pages that are errored in the just handled mirror can be repaired.
1027 */
1028static int scrub_recheck_block(struct btrfs_fs_info *fs_info,
1029 struct scrub_block *sblock, int is_metadata,
1030 int have_csum, u8 *csum, u64 generation,
1031 u16 csum_size)
726{ 1032{
727 struct bio *bio = NULL; 1033 int page_num;
728 int ret; 1034
729 DECLARE_COMPLETION_ONSTACK(complete); 1035 sblock->no_io_error_seen = 1;
1036 sblock->header_error = 0;
1037 sblock->checksum_error = 0;
1038
1039 for (page_num = 0; page_num < sblock->page_count; page_num++) {
1040 struct bio *bio;
1041 int ret;
1042 struct scrub_page *page = sblock->pagev + page_num;
1043 DECLARE_COMPLETION_ONSTACK(complete);
1044
1045 BUG_ON(!page->page);
1046 bio = bio_alloc(GFP_NOFS, 1);
1047 bio->bi_bdev = page->bdev;
1048 bio->bi_sector = page->physical >> 9;
1049 bio->bi_end_io = scrub_complete_bio_end_io;
1050 bio->bi_private = &complete;
1051
1052 ret = bio_add_page(bio, page->page, PAGE_SIZE, 0);
1053 if (PAGE_SIZE != ret) {
1054 bio_put(bio);
1055 return -EIO;
1056 }
1057 btrfsic_submit_bio(READ, bio);
730 1058
731 bio = bio_alloc(GFP_NOFS, 1); 1059 /* this will also unplug the queue */
732 bio->bi_bdev = bdev; 1060 wait_for_completion(&complete);
733 bio->bi_sector = sector;
734 bio_add_page(bio, page, PAGE_SIZE, 0);
735 bio->bi_end_io = scrub_fixup_end_io;
736 bio->bi_private = &complete;
737 btrfsic_submit_bio(rw, bio);
738 1061
739 /* this will also unplug the queue */ 1062 page->io_error = !test_bit(BIO_UPTODATE, &bio->bi_flags);
740 wait_for_completion(&complete); 1063 if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
1064 sblock->no_io_error_seen = 0;
1065 bio_put(bio);
1066 }
741 1067
742 ret = !test_bit(BIO_UPTODATE, &bio->bi_flags); 1068 if (sblock->no_io_error_seen)
743 bio_put(bio); 1069 scrub_recheck_block_checksum(fs_info, sblock, is_metadata,
744 return ret; 1070 have_csum, csum, generation,
1071 csum_size);
1072
1073 return 0;
745} 1074}
746 1075
747static void scrub_bio_end_io(struct bio *bio, int err) 1076static void scrub_recheck_block_checksum(struct btrfs_fs_info *fs_info,
1077 struct scrub_block *sblock,
1078 int is_metadata, int have_csum,
1079 const u8 *csum, u64 generation,
1080 u16 csum_size)
748{ 1081{
749 struct scrub_bio *sbio = bio->bi_private; 1082 int page_num;
750 struct scrub_dev *sdev = sbio->sdev; 1083 u8 calculated_csum[BTRFS_CSUM_SIZE];
751 struct btrfs_fs_info *fs_info = sdev->dev->dev_root->fs_info; 1084 u32 crc = ~(u32)0;
1085 struct btrfs_root *root = fs_info->extent_root;
1086 void *mapped_buffer;
1087
1088 BUG_ON(!sblock->pagev[0].page);
1089 if (is_metadata) {
1090 struct btrfs_header *h;
1091
1092 mapped_buffer = kmap_atomic(sblock->pagev[0].page);
1093 h = (struct btrfs_header *)mapped_buffer;
1094
1095 if (sblock->pagev[0].logical != le64_to_cpu(h->bytenr) ||
1096 generation != le64_to_cpu(h->generation) ||
1097 memcmp(h->fsid, fs_info->fsid, BTRFS_UUID_SIZE) ||
1098 memcmp(h->chunk_tree_uuid, fs_info->chunk_tree_uuid,
1099 BTRFS_UUID_SIZE))
1100 sblock->header_error = 1;
1101 csum = h->csum;
1102 } else {
1103 if (!have_csum)
1104 return;
752 1105
753 sbio->err = err; 1106 mapped_buffer = kmap_atomic(sblock->pagev[0].page);
754 sbio->bio = bio; 1107 }
755 1108
756 btrfs_queue_worker(&fs_info->scrub_workers, &sbio->work); 1109 for (page_num = 0;;) {
1110 if (page_num == 0 && is_metadata)
1111 crc = btrfs_csum_data(root,
1112 ((u8 *)mapped_buffer) + BTRFS_CSUM_SIZE,
1113 crc, PAGE_SIZE - BTRFS_CSUM_SIZE);
1114 else
1115 crc = btrfs_csum_data(root, mapped_buffer, crc,
1116 PAGE_SIZE);
1117
1118 kunmap_atomic(mapped_buffer);
1119 page_num++;
1120 if (page_num >= sblock->page_count)
1121 break;
1122 BUG_ON(!sblock->pagev[page_num].page);
1123
1124 mapped_buffer = kmap_atomic(sblock->pagev[page_num].page);
1125 }
1126
1127 btrfs_csum_final(crc, calculated_csum);
1128 if (memcmp(calculated_csum, csum, csum_size))
1129 sblock->checksum_error = 1;
757} 1130}
758 1131
759static void scrub_checksum(struct btrfs_work *work) 1132static void scrub_complete_bio_end_io(struct bio *bio, int err)
760{ 1133{
761 struct scrub_bio *sbio = container_of(work, struct scrub_bio, work); 1134 complete((struct completion *)bio->bi_private);
762 struct scrub_dev *sdev = sbio->sdev; 1135}
763 struct page *page;
764 void *buffer;
765 int i;
766 u64 flags;
767 u64 logical;
768 int ret;
769 1136
770 if (sbio->err) { 1137static int scrub_repair_block_from_good_copy(struct scrub_block *sblock_bad,
771 ret = 0; 1138 struct scrub_block *sblock_good,
772 for (i = 0; i < sbio->count; ++i) 1139 int force_write)
773 ret |= scrub_recheck_error(sbio, i); 1140{
774 if (!ret) { 1141 int page_num;
775 spin_lock(&sdev->stat_lock); 1142 int ret = 0;
776 ++sdev->stat.unverified_errors;
777 spin_unlock(&sdev->stat_lock);
778 }
779 1143
780 sbio->bio->bi_flags &= ~(BIO_POOL_MASK - 1); 1144 for (page_num = 0; page_num < sblock_bad->page_count; page_num++) {
781 sbio->bio->bi_flags |= 1 << BIO_UPTODATE; 1145 int ret_sub;
782 sbio->bio->bi_phys_segments = 0;
783 sbio->bio->bi_idx = 0;
784 1146
785 for (i = 0; i < sbio->count; i++) { 1147 ret_sub = scrub_repair_page_from_good_copy(sblock_bad,
786 struct bio_vec *bi; 1148 sblock_good,
787 bi = &sbio->bio->bi_io_vec[i]; 1149 page_num,
788 bi->bv_offset = 0; 1150 force_write);
789 bi->bv_len = PAGE_SIZE; 1151 if (ret_sub)
790 } 1152 ret = ret_sub;
791 goto out;
792 } 1153 }
793 for (i = 0; i < sbio->count; ++i) { 1154
794 page = sbio->bio->bi_io_vec[i].bv_page; 1155 return ret;
795 buffer = kmap_atomic(page); 1156}
796 flags = sbio->spag[i].flags; 1157
797 logical = sbio->logical + i * PAGE_SIZE; 1158static int scrub_repair_page_from_good_copy(struct scrub_block *sblock_bad,
798 ret = 0; 1159 struct scrub_block *sblock_good,
799 if (flags & BTRFS_EXTENT_FLAG_DATA) { 1160 int page_num, int force_write)
800 ret = scrub_checksum_data(sdev, sbio->spag + i, buffer); 1161{
801 } else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) { 1162 struct scrub_page *page_bad = sblock_bad->pagev + page_num;
802 ret = scrub_checksum_tree_block(sdev, sbio->spag + i, 1163 struct scrub_page *page_good = sblock_good->pagev + page_num;
803 logical, buffer); 1164
804 } else if (flags & BTRFS_EXTENT_FLAG_SUPER) { 1165 BUG_ON(sblock_bad->pagev[page_num].page == NULL);
805 BUG_ON(i); 1166 BUG_ON(sblock_good->pagev[page_num].page == NULL);
806 (void)scrub_checksum_super(sbio, buffer); 1167 if (force_write || sblock_bad->header_error ||
807 } else { 1168 sblock_bad->checksum_error || page_bad->io_error) {
808 WARN_ON(1); 1169 struct bio *bio;
809 } 1170 int ret;
810 kunmap_atomic(buffer); 1171 DECLARE_COMPLETION_ONSTACK(complete);
811 if (ret) { 1172
812 ret = scrub_recheck_error(sbio, i); 1173 bio = bio_alloc(GFP_NOFS, 1);
813 if (!ret) { 1174 bio->bi_bdev = page_bad->bdev;
814 spin_lock(&sdev->stat_lock); 1175 bio->bi_sector = page_bad->physical >> 9;
815 ++sdev->stat.unverified_errors; 1176 bio->bi_end_io = scrub_complete_bio_end_io;
816 spin_unlock(&sdev->stat_lock); 1177 bio->bi_private = &complete;
817 } 1178
1179 ret = bio_add_page(bio, page_good->page, PAGE_SIZE, 0);
1180 if (PAGE_SIZE != ret) {
1181 bio_put(bio);
1182 return -EIO;
818 } 1183 }
1184 btrfsic_submit_bio(WRITE, bio);
1185
1186 /* this will also unplug the queue */
1187 wait_for_completion(&complete);
1188 bio_put(bio);
819 } 1189 }
820 1190
821out: 1191 return 0;
822 scrub_free_bio(sbio->bio); 1192}
823 sbio->bio = NULL; 1193
824 spin_lock(&sdev->list_lock); 1194static void scrub_checksum(struct scrub_block *sblock)
825 sbio->next_free = sdev->first_free; 1195{
826 sdev->first_free = sbio->index; 1196 u64 flags;
827 spin_unlock(&sdev->list_lock); 1197 int ret;
828 atomic_dec(&sdev->in_flight); 1198
829 wake_up(&sdev->list_wait); 1199 BUG_ON(sblock->page_count < 1);
1200 flags = sblock->pagev[0].flags;
1201 ret = 0;
1202 if (flags & BTRFS_EXTENT_FLAG_DATA)
1203 ret = scrub_checksum_data(sblock);
1204 else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)
1205 ret = scrub_checksum_tree_block(sblock);
1206 else if (flags & BTRFS_EXTENT_FLAG_SUPER)
1207 (void)scrub_checksum_super(sblock);
1208 else
1209 WARN_ON(1);
1210 if (ret)
1211 scrub_handle_errored_block(sblock);
830} 1212}
831 1213
832static int scrub_checksum_data(struct scrub_dev *sdev, 1214static int scrub_checksum_data(struct scrub_block *sblock)
833 struct scrub_page *spag, void *buffer)
834{ 1215{
1216 struct scrub_dev *sdev = sblock->sdev;
835 u8 csum[BTRFS_CSUM_SIZE]; 1217 u8 csum[BTRFS_CSUM_SIZE];
1218 u8 *on_disk_csum;
1219 struct page *page;
1220 void *buffer;
836 u32 crc = ~(u32)0; 1221 u32 crc = ~(u32)0;
837 int fail = 0; 1222 int fail = 0;
838 struct btrfs_root *root = sdev->dev->dev_root; 1223 struct btrfs_root *root = sdev->dev->dev_root;
1224 u64 len;
1225 int index;
839 1226
840 if (!spag->have_csum) 1227 BUG_ON(sblock->page_count < 1);
1228 if (!sblock->pagev[0].have_csum)
841 return 0; 1229 return 0;
842 1230
843 crc = btrfs_csum_data(root, buffer, crc, PAGE_SIZE); 1231 on_disk_csum = sblock->pagev[0].csum;
1232 page = sblock->pagev[0].page;
1233 buffer = kmap_atomic(page);
1234
1235 len = sdev->sectorsize;
1236 index = 0;
1237 for (;;) {
1238 u64 l = min_t(u64, len, PAGE_SIZE);
1239
1240 crc = btrfs_csum_data(root, buffer, crc, l);
1241 kunmap_atomic(buffer);
1242 len -= l;
1243 if (len == 0)
1244 break;
1245 index++;
1246 BUG_ON(index >= sblock->page_count);
1247 BUG_ON(!sblock->pagev[index].page);
1248 page = sblock->pagev[index].page;
1249 buffer = kmap_atomic(page);
1250 }
1251
844 btrfs_csum_final(crc, csum); 1252 btrfs_csum_final(crc, csum);
845 if (memcmp(csum, spag->csum, sdev->csum_size)) 1253 if (memcmp(csum, on_disk_csum, sdev->csum_size))
846 fail = 1; 1254 fail = 1;
847 1255
848 spin_lock(&sdev->stat_lock); 1256 if (fail) {
849 ++sdev->stat.data_extents_scrubbed; 1257 spin_lock(&sdev->stat_lock);
850 sdev->stat.data_bytes_scrubbed += PAGE_SIZE;
851 if (fail)
852 ++sdev->stat.csum_errors; 1258 ++sdev->stat.csum_errors;
853 spin_unlock(&sdev->stat_lock); 1259 spin_unlock(&sdev->stat_lock);
1260 }
854 1261
855 return fail; 1262 return fail;
856} 1263}
857 1264
858static int scrub_checksum_tree_block(struct scrub_dev *sdev, 1265static int scrub_checksum_tree_block(struct scrub_block *sblock)
859 struct scrub_page *spag, u64 logical,
860 void *buffer)
861{ 1266{
1267 struct scrub_dev *sdev = sblock->sdev;
862 struct btrfs_header *h; 1268 struct btrfs_header *h;
863 struct btrfs_root *root = sdev->dev->dev_root; 1269 struct btrfs_root *root = sdev->dev->dev_root;
864 struct btrfs_fs_info *fs_info = root->fs_info; 1270 struct btrfs_fs_info *fs_info = root->fs_info;
865 u8 csum[BTRFS_CSUM_SIZE]; 1271 u8 calculated_csum[BTRFS_CSUM_SIZE];
1272 u8 on_disk_csum[BTRFS_CSUM_SIZE];
1273 struct page *page;
1274 void *mapped_buffer;
1275 u64 mapped_size;
1276 void *p;
866 u32 crc = ~(u32)0; 1277 u32 crc = ~(u32)0;
867 int fail = 0; 1278 int fail = 0;
868 int crc_fail = 0; 1279 int crc_fail = 0;
1280 u64 len;
1281 int index;
1282
1283 BUG_ON(sblock->page_count < 1);
1284 page = sblock->pagev[0].page;
1285 mapped_buffer = kmap_atomic(page);
1286 h = (struct btrfs_header *)mapped_buffer;
1287 memcpy(on_disk_csum, h->csum, sdev->csum_size);
869 1288
870 /* 1289 /*
871 * we don't use the getter functions here, as we 1290 * we don't use the getter functions here, as we
872 * a) don't have an extent buffer and 1291 * a) don't have an extent buffer and
873 * b) the page is already kmapped 1292 * b) the page is already kmapped
874 */ 1293 */
875 h = (struct btrfs_header *)buffer;
876 1294
877 if (logical != le64_to_cpu(h->bytenr)) 1295 if (sblock->pagev[0].logical != le64_to_cpu(h->bytenr))
878 ++fail; 1296 ++fail;
879 1297
880 if (spag->generation != le64_to_cpu(h->generation)) 1298 if (sblock->pagev[0].generation != le64_to_cpu(h->generation))
881 ++fail; 1299 ++fail;
882 1300
883 if (memcmp(h->fsid, fs_info->fsid, BTRFS_UUID_SIZE)) 1301 if (memcmp(h->fsid, fs_info->fsid, BTRFS_UUID_SIZE))
@@ -887,51 +1305,99 @@ static int scrub_checksum_tree_block(struct scrub_dev *sdev,
887 BTRFS_UUID_SIZE)) 1305 BTRFS_UUID_SIZE))
888 ++fail; 1306 ++fail;
889 1307
890 crc = btrfs_csum_data(root, buffer + BTRFS_CSUM_SIZE, crc, 1308 BUG_ON(sdev->nodesize != sdev->leafsize);
891 PAGE_SIZE - BTRFS_CSUM_SIZE); 1309 len = sdev->nodesize - BTRFS_CSUM_SIZE;
892 btrfs_csum_final(crc, csum); 1310 mapped_size = PAGE_SIZE - BTRFS_CSUM_SIZE;
893 if (memcmp(csum, h->csum, sdev->csum_size)) 1311 p = ((u8 *)mapped_buffer) + BTRFS_CSUM_SIZE;
1312 index = 0;
1313 for (;;) {
1314 u64 l = min_t(u64, len, mapped_size);
1315
1316 crc = btrfs_csum_data(root, p, crc, l);
1317 kunmap_atomic(mapped_buffer);
1318 len -= l;
1319 if (len == 0)
1320 break;
1321 index++;
1322 BUG_ON(index >= sblock->page_count);
1323 BUG_ON(!sblock->pagev[index].page);
1324 page = sblock->pagev[index].page;
1325 mapped_buffer = kmap_atomic(page);
1326 mapped_size = PAGE_SIZE;
1327 p = mapped_buffer;
1328 }
1329
1330 btrfs_csum_final(crc, calculated_csum);
1331 if (memcmp(calculated_csum, on_disk_csum, sdev->csum_size))
894 ++crc_fail; 1332 ++crc_fail;
895 1333
896 spin_lock(&sdev->stat_lock); 1334 if (crc_fail || fail) {
897 ++sdev->stat.tree_extents_scrubbed; 1335 spin_lock(&sdev->stat_lock);
898 sdev->stat.tree_bytes_scrubbed += PAGE_SIZE; 1336 if (crc_fail)
899 if (crc_fail) 1337 ++sdev->stat.csum_errors;
900 ++sdev->stat.csum_errors; 1338 if (fail)
901 if (fail) 1339 ++sdev->stat.verify_errors;
902 ++sdev->stat.verify_errors; 1340 spin_unlock(&sdev->stat_lock);
903 spin_unlock(&sdev->stat_lock); 1341 }
904 1342
905 return fail || crc_fail; 1343 return fail || crc_fail;
906} 1344}
907 1345
908static int scrub_checksum_super(struct scrub_bio *sbio, void *buffer) 1346static int scrub_checksum_super(struct scrub_block *sblock)
909{ 1347{
910 struct btrfs_super_block *s; 1348 struct btrfs_super_block *s;
911 u64 logical; 1349 struct scrub_dev *sdev = sblock->sdev;
912 struct scrub_dev *sdev = sbio->sdev;
913 struct btrfs_root *root = sdev->dev->dev_root; 1350 struct btrfs_root *root = sdev->dev->dev_root;
914 struct btrfs_fs_info *fs_info = root->fs_info; 1351 struct btrfs_fs_info *fs_info = root->fs_info;
915 u8 csum[BTRFS_CSUM_SIZE]; 1352 u8 calculated_csum[BTRFS_CSUM_SIZE];
1353 u8 on_disk_csum[BTRFS_CSUM_SIZE];
1354 struct page *page;
1355 void *mapped_buffer;
1356 u64 mapped_size;
1357 void *p;
916 u32 crc = ~(u32)0; 1358 u32 crc = ~(u32)0;
917 int fail = 0; 1359 int fail = 0;
1360 u64 len;
1361 int index;
918 1362
919 s = (struct btrfs_super_block *)buffer; 1363 BUG_ON(sblock->page_count < 1);
920 logical = sbio->logical; 1364 page = sblock->pagev[0].page;
1365 mapped_buffer = kmap_atomic(page);
1366 s = (struct btrfs_super_block *)mapped_buffer;
1367 memcpy(on_disk_csum, s->csum, sdev->csum_size);
921 1368
922 if (logical != le64_to_cpu(s->bytenr)) 1369 if (sblock->pagev[0].logical != le64_to_cpu(s->bytenr))
923 ++fail; 1370 ++fail;
924 1371
925 if (sbio->spag[0].generation != le64_to_cpu(s->generation)) 1372 if (sblock->pagev[0].generation != le64_to_cpu(s->generation))
926 ++fail; 1373 ++fail;
927 1374
928 if (memcmp(s->fsid, fs_info->fsid, BTRFS_UUID_SIZE)) 1375 if (memcmp(s->fsid, fs_info->fsid, BTRFS_UUID_SIZE))
929 ++fail; 1376 ++fail;
930 1377
931 crc = btrfs_csum_data(root, buffer + BTRFS_CSUM_SIZE, crc, 1378 len = BTRFS_SUPER_INFO_SIZE - BTRFS_CSUM_SIZE;
932 PAGE_SIZE - BTRFS_CSUM_SIZE); 1379 mapped_size = PAGE_SIZE - BTRFS_CSUM_SIZE;
933 btrfs_csum_final(crc, csum); 1380 p = ((u8 *)mapped_buffer) + BTRFS_CSUM_SIZE;
934 if (memcmp(csum, s->csum, sbio->sdev->csum_size)) 1381 index = 0;
1382 for (;;) {
1383 u64 l = min_t(u64, len, mapped_size);
1384
1385 crc = btrfs_csum_data(root, p, crc, l);
1386 kunmap_atomic(mapped_buffer);
1387 len -= l;
1388 if (len == 0)
1389 break;
1390 index++;
1391 BUG_ON(index >= sblock->page_count);
1392 BUG_ON(!sblock->pagev[index].page);
1393 page = sblock->pagev[index].page;
1394 mapped_buffer = kmap_atomic(page);
1395 mapped_size = PAGE_SIZE;
1396 p = mapped_buffer;
1397 }
1398
1399 btrfs_csum_final(crc, calculated_csum);
1400 if (memcmp(calculated_csum, on_disk_csum, sdev->csum_size))
935 ++fail; 1401 ++fail;
936 1402
937 if (fail) { 1403 if (fail) {
@@ -948,29 +1414,42 @@ static int scrub_checksum_super(struct scrub_bio *sbio, void *buffer)
948 return fail; 1414 return fail;
949} 1415}
950 1416
951static int scrub_submit(struct scrub_dev *sdev) 1417static void scrub_block_get(struct scrub_block *sblock)
1418{
1419 atomic_inc(&sblock->ref_count);
1420}
1421
1422static void scrub_block_put(struct scrub_block *sblock)
1423{
1424 if (atomic_dec_and_test(&sblock->ref_count)) {
1425 int i;
1426
1427 for (i = 0; i < sblock->page_count; i++)
1428 if (sblock->pagev[i].page)
1429 __free_page(sblock->pagev[i].page);
1430 kfree(sblock);
1431 }
1432}
1433
1434static void scrub_submit(struct scrub_dev *sdev)
952{ 1435{
953 struct scrub_bio *sbio; 1436 struct scrub_bio *sbio;
954 1437
955 if (sdev->curr == -1) 1438 if (sdev->curr == -1)
956 return 0; 1439 return;
957 1440
958 sbio = sdev->bios[sdev->curr]; 1441 sbio = sdev->bios[sdev->curr];
959 sbio->err = 0;
960 sdev->curr = -1; 1442 sdev->curr = -1;
961 atomic_inc(&sdev->in_flight); 1443 atomic_inc(&sdev->in_flight);
962 1444
963 btrfsic_submit_bio(READ, sbio->bio); 1445 btrfsic_submit_bio(READ, sbio->bio);
964
965 return 0;
966} 1446}
967 1447
968static int scrub_page(struct scrub_dev *sdev, u64 logical, u64 len, 1448static int scrub_add_page_to_bio(struct scrub_dev *sdev,
969 u64 physical, u64 flags, u64 gen, int mirror_num, 1449 struct scrub_page *spage)
970 u8 *csum, int force)
971{ 1450{
1451 struct scrub_block *sblock = spage->sblock;
972 struct scrub_bio *sbio; 1452 struct scrub_bio *sbio;
973 struct page *page;
974 int ret; 1453 int ret;
975 1454
976again: 1455again:
@@ -983,7 +1462,7 @@ again:
983 if (sdev->curr != -1) { 1462 if (sdev->curr != -1) {
984 sdev->first_free = sdev->bios[sdev->curr]->next_free; 1463 sdev->first_free = sdev->bios[sdev->curr]->next_free;
985 sdev->bios[sdev->curr]->next_free = -1; 1464 sdev->bios[sdev->curr]->next_free = -1;
986 sdev->bios[sdev->curr]->count = 0; 1465 sdev->bios[sdev->curr]->page_count = 0;
987 spin_unlock(&sdev->list_lock); 1466 spin_unlock(&sdev->list_lock);
988 } else { 1467 } else {
989 spin_unlock(&sdev->list_lock); 1468 spin_unlock(&sdev->list_lock);
@@ -991,62 +1470,200 @@ again:
991 } 1470 }
992 } 1471 }
993 sbio = sdev->bios[sdev->curr]; 1472 sbio = sdev->bios[sdev->curr];
994 if (sbio->count == 0) { 1473 if (sbio->page_count == 0) {
995 struct bio *bio; 1474 struct bio *bio;
996 1475
997 sbio->physical = physical; 1476 sbio->physical = spage->physical;
998 sbio->logical = logical; 1477 sbio->logical = spage->logical;
999 bio = bio_alloc(GFP_NOFS, SCRUB_PAGES_PER_BIO); 1478 bio = sbio->bio;
1000 if (!bio) 1479 if (!bio) {
1001 return -ENOMEM; 1480 bio = bio_alloc(GFP_NOFS, sdev->pages_per_bio);
1481 if (!bio)
1482 return -ENOMEM;
1483 sbio->bio = bio;
1484 }
1002 1485
1003 bio->bi_private = sbio; 1486 bio->bi_private = sbio;
1004 bio->bi_end_io = scrub_bio_end_io; 1487 bio->bi_end_io = scrub_bio_end_io;
1005 bio->bi_bdev = sdev->dev->bdev; 1488 bio->bi_bdev = sdev->dev->bdev;
1006 bio->bi_sector = sbio->physical >> 9; 1489 bio->bi_sector = spage->physical >> 9;
1007 sbio->err = 0; 1490 sbio->err = 0;
1008 sbio->bio = bio; 1491 } else if (sbio->physical + sbio->page_count * PAGE_SIZE !=
1009 } else if (sbio->physical + sbio->count * PAGE_SIZE != physical || 1492 spage->physical ||
1010 sbio->logical + sbio->count * PAGE_SIZE != logical) { 1493 sbio->logical + sbio->page_count * PAGE_SIZE !=
1011 ret = scrub_submit(sdev); 1494 spage->logical) {
1012 if (ret) 1495 scrub_submit(sdev);
1013 return ret;
1014 goto again; 1496 goto again;
1015 } 1497 }
1016 sbio->spag[sbio->count].flags = flags;
1017 sbio->spag[sbio->count].generation = gen;
1018 sbio->spag[sbio->count].have_csum = 0;
1019 sbio->spag[sbio->count].mirror_num = mirror_num;
1020
1021 page = alloc_page(GFP_NOFS);
1022 if (!page)
1023 return -ENOMEM;
1024 1498
1025 ret = bio_add_page(sbio->bio, page, PAGE_SIZE, 0); 1499 sbio->pagev[sbio->page_count] = spage;
1026 if (!ret) { 1500 ret = bio_add_page(sbio->bio, spage->page, PAGE_SIZE, 0);
1027 __free_page(page); 1501 if (ret != PAGE_SIZE) {
1028 ret = scrub_submit(sdev); 1502 if (sbio->page_count < 1) {
1029 if (ret) 1503 bio_put(sbio->bio);
1030 return ret; 1504 sbio->bio = NULL;
1505 return -EIO;
1506 }
1507 scrub_submit(sdev);
1031 goto again; 1508 goto again;
1032 } 1509 }
1033 1510
1034 if (csum) { 1511 scrub_block_get(sblock); /* one for the added page */
1035 sbio->spag[sbio->count].have_csum = 1; 1512 atomic_inc(&sblock->outstanding_pages);
1036 memcpy(sbio->spag[sbio->count].csum, csum, sdev->csum_size); 1513 sbio->page_count++;
1514 if (sbio->page_count == sdev->pages_per_bio)
1515 scrub_submit(sdev);
1516
1517 return 0;
1518}
1519
1520static int scrub_pages(struct scrub_dev *sdev, u64 logical, u64 len,
1521 u64 physical, u64 flags, u64 gen, int mirror_num,
1522 u8 *csum, int force)
1523{
1524 struct scrub_block *sblock;
1525 int index;
1526
1527 sblock = kzalloc(sizeof(*sblock), GFP_NOFS);
1528 if (!sblock) {
1529 spin_lock(&sdev->stat_lock);
1530 sdev->stat.malloc_errors++;
1531 spin_unlock(&sdev->stat_lock);
1532 return -ENOMEM;
1037 } 1533 }
1038 ++sbio->count; 1534
1039 if (sbio->count == SCRUB_PAGES_PER_BIO || force) { 1535 /* one ref inside this function, plus one for each page later on */
1536 atomic_set(&sblock->ref_count, 1);
1537 sblock->sdev = sdev;
1538 sblock->no_io_error_seen = 1;
1539
1540 for (index = 0; len > 0; index++) {
1541 struct scrub_page *spage = sblock->pagev + index;
1542 u64 l = min_t(u64, len, PAGE_SIZE);
1543
1544 BUG_ON(index >= SCRUB_MAX_PAGES_PER_BLOCK);
1545 spage->page = alloc_page(GFP_NOFS);
1546 if (!spage->page) {
1547 spin_lock(&sdev->stat_lock);
1548 sdev->stat.malloc_errors++;
1549 spin_unlock(&sdev->stat_lock);
1550 while (index > 0) {
1551 index--;
1552 __free_page(sblock->pagev[index].page);
1553 }
1554 kfree(sblock);
1555 return -ENOMEM;
1556 }
1557 spage->sblock = sblock;
1558 spage->bdev = sdev->dev->bdev;
1559 spage->flags = flags;
1560 spage->generation = gen;
1561 spage->logical = logical;
1562 spage->physical = physical;
1563 spage->mirror_num = mirror_num;
1564 if (csum) {
1565 spage->have_csum = 1;
1566 memcpy(spage->csum, csum, sdev->csum_size);
1567 } else {
1568 spage->have_csum = 0;
1569 }
1570 sblock->page_count++;
1571 len -= l;
1572 logical += l;
1573 physical += l;
1574 }
1575
1576 BUG_ON(sblock->page_count == 0);
1577 for (index = 0; index < sblock->page_count; index++) {
1578 struct scrub_page *spage = sblock->pagev + index;
1040 int ret; 1579 int ret;
1041 1580
1042 ret = scrub_submit(sdev); 1581 ret = scrub_add_page_to_bio(sdev, spage);
1043 if (ret) 1582 if (ret) {
1583 scrub_block_put(sblock);
1044 return ret; 1584 return ret;
1585 }
1045 } 1586 }
1046 1587
1588 if (force)
1589 scrub_submit(sdev);
1590
1591 /* last one frees, either here or in bio completion for last page */
1592 scrub_block_put(sblock);
1047 return 0; 1593 return 0;
1048} 1594}
1049 1595
1596static void scrub_bio_end_io(struct bio *bio, int err)
1597{
1598 struct scrub_bio *sbio = bio->bi_private;
1599 struct scrub_dev *sdev = sbio->sdev;
1600 struct btrfs_fs_info *fs_info = sdev->dev->dev_root->fs_info;
1601
1602 sbio->err = err;
1603 sbio->bio = bio;
1604
1605 btrfs_queue_worker(&fs_info->scrub_workers, &sbio->work);
1606}
1607
1608static void scrub_bio_end_io_worker(struct btrfs_work *work)
1609{
1610 struct scrub_bio *sbio = container_of(work, struct scrub_bio, work);
1611 struct scrub_dev *sdev = sbio->sdev;
1612 int i;
1613
1614 BUG_ON(sbio->page_count > SCRUB_PAGES_PER_BIO);
1615 if (sbio->err) {
1616 for (i = 0; i < sbio->page_count; i++) {
1617 struct scrub_page *spage = sbio->pagev[i];
1618
1619 spage->io_error = 1;
1620 spage->sblock->no_io_error_seen = 0;
1621 }
1622 }
1623
1624 /* now complete the scrub_block items that have all pages completed */
1625 for (i = 0; i < sbio->page_count; i++) {
1626 struct scrub_page *spage = sbio->pagev[i];
1627 struct scrub_block *sblock = spage->sblock;
1628
1629 if (atomic_dec_and_test(&sblock->outstanding_pages))
1630 scrub_block_complete(sblock);
1631 scrub_block_put(sblock);
1632 }
1633
1634 if (sbio->err) {
1635 /* what is this good for??? */
1636 sbio->bio->bi_flags &= ~(BIO_POOL_MASK - 1);
1637 sbio->bio->bi_flags |= 1 << BIO_UPTODATE;
1638 sbio->bio->bi_phys_segments = 0;
1639 sbio->bio->bi_idx = 0;
1640
1641 for (i = 0; i < sbio->page_count; i++) {
1642 struct bio_vec *bi;
1643 bi = &sbio->bio->bi_io_vec[i];
1644 bi->bv_offset = 0;
1645 bi->bv_len = PAGE_SIZE;
1646 }
1647 }
1648
1649 bio_put(sbio->bio);
1650 sbio->bio = NULL;
1651 spin_lock(&sdev->list_lock);
1652 sbio->next_free = sdev->first_free;
1653 sdev->first_free = sbio->index;
1654 spin_unlock(&sdev->list_lock);
1655 atomic_dec(&sdev->in_flight);
1656 wake_up(&sdev->list_wait);
1657}
1658
1659static void scrub_block_complete(struct scrub_block *sblock)
1660{
1661 if (!sblock->no_io_error_seen)
1662 scrub_handle_errored_block(sblock);
1663 else
1664 scrub_checksum(sblock);
1665}
1666
1050static int scrub_find_csum(struct scrub_dev *sdev, u64 logical, u64 len, 1667static int scrub_find_csum(struct scrub_dev *sdev, u64 logical, u64 len,
1051 u8 *csum) 1668 u8 *csum)
1052{ 1669{
@@ -1054,7 +1671,6 @@ static int scrub_find_csum(struct scrub_dev *sdev, u64 logical, u64 len,
1054 int ret = 0; 1671 int ret = 0;
1055 unsigned long i; 1672 unsigned long i;
1056 unsigned long num_sectors; 1673 unsigned long num_sectors;
1057 u32 sectorsize = sdev->dev->dev_root->sectorsize;
1058 1674
1059 while (!list_empty(&sdev->csum_list)) { 1675 while (!list_empty(&sdev->csum_list)) {
1060 sum = list_first_entry(&sdev->csum_list, 1676 sum = list_first_entry(&sdev->csum_list,
@@ -1072,7 +1688,7 @@ static int scrub_find_csum(struct scrub_dev *sdev, u64 logical, u64 len,
1072 if (!sum) 1688 if (!sum)
1073 return 0; 1689 return 0;
1074 1690
1075 num_sectors = sum->len / sectorsize; 1691 num_sectors = sum->len / sdev->sectorsize;
1076 for (i = 0; i < num_sectors; ++i) { 1692 for (i = 0; i < num_sectors; ++i) {
1077 if (sum->sums[i].bytenr == logical) { 1693 if (sum->sums[i].bytenr == logical) {
1078 memcpy(csum, &sum->sums[i].sum, sdev->csum_size); 1694 memcpy(csum, &sum->sums[i].sum, sdev->csum_size);
@@ -1093,9 +1709,28 @@ static int scrub_extent(struct scrub_dev *sdev, u64 logical, u64 len,
1093{ 1709{
1094 int ret; 1710 int ret;
1095 u8 csum[BTRFS_CSUM_SIZE]; 1711 u8 csum[BTRFS_CSUM_SIZE];
1712 u32 blocksize;
1713
1714 if (flags & BTRFS_EXTENT_FLAG_DATA) {
1715 blocksize = sdev->sectorsize;
1716 spin_lock(&sdev->stat_lock);
1717 sdev->stat.data_extents_scrubbed++;
1718 sdev->stat.data_bytes_scrubbed += len;
1719 spin_unlock(&sdev->stat_lock);
1720 } else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
1721 BUG_ON(sdev->nodesize != sdev->leafsize);
1722 blocksize = sdev->nodesize;
1723 spin_lock(&sdev->stat_lock);
1724 sdev->stat.tree_extents_scrubbed++;
1725 sdev->stat.tree_bytes_scrubbed += len;
1726 spin_unlock(&sdev->stat_lock);
1727 } else {
1728 blocksize = sdev->sectorsize;
1729 BUG_ON(1);
1730 }
1096 1731
1097 while (len) { 1732 while (len) {
1098 u64 l = min_t(u64, len, PAGE_SIZE); 1733 u64 l = min_t(u64, len, blocksize);
1099 int have_csum = 0; 1734 int have_csum = 0;
1100 1735
1101 if (flags & BTRFS_EXTENT_FLAG_DATA) { 1736 if (flags & BTRFS_EXTENT_FLAG_DATA) {
@@ -1104,8 +1739,8 @@ static int scrub_extent(struct scrub_dev *sdev, u64 logical, u64 len,
1104 if (have_csum == 0) 1739 if (have_csum == 0)
1105 ++sdev->stat.no_csum; 1740 ++sdev->stat.no_csum;
1106 } 1741 }
1107 ret = scrub_page(sdev, logical, l, physical, flags, gen, 1742 ret = scrub_pages(sdev, logical, l, physical, flags, gen,
1108 mirror_num, have_csum ? csum : NULL, 0); 1743 mirror_num, have_csum ? csum : NULL, 0);
1109 if (ret) 1744 if (ret)
1110 return ret; 1745 return ret;
1111 len -= l; 1746 len -= l;
@@ -1170,6 +1805,11 @@ static noinline_for_stack int scrub_stripe(struct scrub_dev *sdev,
1170 if (!path) 1805 if (!path)
1171 return -ENOMEM; 1806 return -ENOMEM;
1172 1807
1808 /*
1809 * work on commit root. The related disk blocks are static as
1810 * long as COW is applied. This means, it is save to rewrite
1811 * them to repair disk errors without any race conditions
1812 */
1173 path->search_commit_root = 1; 1813 path->search_commit_root = 1;
1174 path->skip_locking = 1; 1814 path->skip_locking = 1;
1175 1815
@@ -1516,15 +2156,18 @@ static noinline_for_stack int scrub_supers(struct scrub_dev *sdev)
1516 struct btrfs_device *device = sdev->dev; 2156 struct btrfs_device *device = sdev->dev;
1517 struct btrfs_root *root = device->dev_root; 2157 struct btrfs_root *root = device->dev_root;
1518 2158
2159 if (root->fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR)
2160 return -EIO;
2161
1519 gen = root->fs_info->last_trans_committed; 2162 gen = root->fs_info->last_trans_committed;
1520 2163
1521 for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) { 2164 for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
1522 bytenr = btrfs_sb_offset(i); 2165 bytenr = btrfs_sb_offset(i);
1523 if (bytenr + BTRFS_SUPER_INFO_SIZE >= device->total_bytes) 2166 if (bytenr + BTRFS_SUPER_INFO_SIZE > device->total_bytes)
1524 break; 2167 break;
1525 2168
1526 ret = scrub_page(sdev, bytenr, PAGE_SIZE, bytenr, 2169 ret = scrub_pages(sdev, bytenr, BTRFS_SUPER_INFO_SIZE, bytenr,
1527 BTRFS_EXTENT_FLAG_SUPER, gen, i, NULL, 1); 2170 BTRFS_EXTENT_FLAG_SUPER, gen, i, NULL, 1);
1528 if (ret) 2171 if (ret)
1529 return ret; 2172 return ret;
1530 } 2173 }
@@ -1583,10 +2226,30 @@ int btrfs_scrub_dev(struct btrfs_root *root, u64 devid, u64 start, u64 end,
1583 /* 2226 /*
1584 * check some assumptions 2227 * check some assumptions
1585 */ 2228 */
1586 if (root->sectorsize != PAGE_SIZE || 2229 if (root->nodesize != root->leafsize) {
1587 root->sectorsize != root->leafsize || 2230 printk(KERN_ERR
1588 root->sectorsize != root->nodesize) { 2231 "btrfs_scrub: size assumption nodesize == leafsize (%d == %d) fails\n",
1589 printk(KERN_ERR "btrfs_scrub: size assumptions fail\n"); 2232 root->nodesize, root->leafsize);
2233 return -EINVAL;
2234 }
2235
2236 if (root->nodesize > BTRFS_STRIPE_LEN) {
2237 /*
2238 * in this case scrub is unable to calculate the checksum
2239 * the way scrub is implemented. Do not handle this
2240 * situation at all because it won't ever happen.
2241 */
2242 printk(KERN_ERR
2243 "btrfs_scrub: size assumption nodesize <= BTRFS_STRIPE_LEN (%d <= %d) fails\n",
2244 root->nodesize, BTRFS_STRIPE_LEN);
2245 return -EINVAL;
2246 }
2247
2248 if (root->sectorsize != PAGE_SIZE) {
2249 /* not supported for data w/o checksums */
2250 printk(KERN_ERR
2251 "btrfs_scrub: size assumption sectorsize != PAGE_SIZE (%d != %lld) fails\n",
2252 root->sectorsize, (unsigned long long)PAGE_SIZE);
1590 return -EINVAL; 2253 return -EINVAL;
1591 } 2254 }
1592 2255
@@ -1656,7 +2319,7 @@ int btrfs_scrub_dev(struct btrfs_root *root, u64 devid, u64 start, u64 end,
1656 return ret; 2319 return ret;
1657} 2320}
1658 2321
1659int btrfs_scrub_pause(struct btrfs_root *root) 2322void btrfs_scrub_pause(struct btrfs_root *root)
1660{ 2323{
1661 struct btrfs_fs_info *fs_info = root->fs_info; 2324 struct btrfs_fs_info *fs_info = root->fs_info;
1662 2325
@@ -1671,34 +2334,28 @@ int btrfs_scrub_pause(struct btrfs_root *root)
1671 mutex_lock(&fs_info->scrub_lock); 2334 mutex_lock(&fs_info->scrub_lock);
1672 } 2335 }
1673 mutex_unlock(&fs_info->scrub_lock); 2336 mutex_unlock(&fs_info->scrub_lock);
1674
1675 return 0;
1676} 2337}
1677 2338
1678int btrfs_scrub_continue(struct btrfs_root *root) 2339void btrfs_scrub_continue(struct btrfs_root *root)
1679{ 2340{
1680 struct btrfs_fs_info *fs_info = root->fs_info; 2341 struct btrfs_fs_info *fs_info = root->fs_info;
1681 2342
1682 atomic_dec(&fs_info->scrub_pause_req); 2343 atomic_dec(&fs_info->scrub_pause_req);
1683 wake_up(&fs_info->scrub_pause_wait); 2344 wake_up(&fs_info->scrub_pause_wait);
1684 return 0;
1685} 2345}
1686 2346
1687int btrfs_scrub_pause_super(struct btrfs_root *root) 2347void btrfs_scrub_pause_super(struct btrfs_root *root)
1688{ 2348{
1689 down_write(&root->fs_info->scrub_super_lock); 2349 down_write(&root->fs_info->scrub_super_lock);
1690 return 0;
1691} 2350}
1692 2351
1693int btrfs_scrub_continue_super(struct btrfs_root *root) 2352void btrfs_scrub_continue_super(struct btrfs_root *root)
1694{ 2353{
1695 up_write(&root->fs_info->scrub_super_lock); 2354 up_write(&root->fs_info->scrub_super_lock);
1696 return 0;
1697} 2355}
1698 2356
1699int btrfs_scrub_cancel(struct btrfs_root *root) 2357int __btrfs_scrub_cancel(struct btrfs_fs_info *fs_info)
1700{ 2358{
1701 struct btrfs_fs_info *fs_info = root->fs_info;
1702 2359
1703 mutex_lock(&fs_info->scrub_lock); 2360 mutex_lock(&fs_info->scrub_lock);
1704 if (!atomic_read(&fs_info->scrubs_running)) { 2361 if (!atomic_read(&fs_info->scrubs_running)) {
@@ -1719,6 +2376,11 @@ int btrfs_scrub_cancel(struct btrfs_root *root)
1719 return 0; 2376 return 0;
1720} 2377}
1721 2378
2379int btrfs_scrub_cancel(struct btrfs_root *root)
2380{
2381 return __btrfs_scrub_cancel(root->fs_info);
2382}
2383
1722int btrfs_scrub_cancel_dev(struct btrfs_root *root, struct btrfs_device *dev) 2384int btrfs_scrub_cancel_dev(struct btrfs_root *root, struct btrfs_device *dev)
1723{ 2385{
1724 struct btrfs_fs_info *fs_info = root->fs_info; 2386 struct btrfs_fs_info *fs_info = root->fs_info;
@@ -1741,6 +2403,7 @@ int btrfs_scrub_cancel_dev(struct btrfs_root *root, struct btrfs_device *dev)
1741 2403
1742 return 0; 2404 return 0;
1743} 2405}
2406
1744int btrfs_scrub_cancel_devid(struct btrfs_root *root, u64 devid) 2407int btrfs_scrub_cancel_devid(struct btrfs_root *root, u64 devid)
1745{ 2408{
1746 struct btrfs_fs_info *fs_info = root->fs_info; 2409 struct btrfs_fs_info *fs_info = root->fs_info;
diff --git a/fs/btrfs/struct-funcs.c b/fs/btrfs/struct-funcs.c
index bc1f6ad18442..c6ffa5812419 100644
--- a/fs/btrfs/struct-funcs.c
+++ b/fs/btrfs/struct-funcs.c
@@ -44,8 +44,9 @@
44#define BTRFS_SETGET_FUNCS(name, type, member, bits) \ 44#define BTRFS_SETGET_FUNCS(name, type, member, bits) \
45u##bits btrfs_##name(struct extent_buffer *eb, type *s); \ 45u##bits btrfs_##name(struct extent_buffer *eb, type *s); \
46void btrfs_set_##name(struct extent_buffer *eb, type *s, u##bits val); \ 46void btrfs_set_##name(struct extent_buffer *eb, type *s, u##bits val); \
47u##bits btrfs_##name(struct extent_buffer *eb, \ 47void btrfs_set_token_##name(struct extent_buffer *eb, type *s, u##bits val, struct btrfs_map_token *token); \
48 type *s) \ 48u##bits btrfs_token_##name(struct extent_buffer *eb, \
49 type *s, struct btrfs_map_token *token) \
49{ \ 50{ \
50 unsigned long part_offset = (unsigned long)s; \ 51 unsigned long part_offset = (unsigned long)s; \
51 unsigned long offset = part_offset + offsetof(type, member); \ 52 unsigned long offset = part_offset + offsetof(type, member); \
@@ -54,9 +55,18 @@ u##bits btrfs_##name(struct extent_buffer *eb, \
54 char *kaddr; \ 55 char *kaddr; \
55 unsigned long map_start; \ 56 unsigned long map_start; \
56 unsigned long map_len; \ 57 unsigned long map_len; \
58 unsigned long mem_len = sizeof(((type *)0)->member); \
57 u##bits res; \ 59 u##bits res; \
60 if (token && token->kaddr && token->offset <= offset && \
61 token->eb == eb && \
62 (token->offset + PAGE_CACHE_SIZE >= offset + mem_len)) { \
63 kaddr = token->kaddr; \
64 p = (type *)(kaddr + part_offset - token->offset); \
65 res = le##bits##_to_cpu(p->member); \
66 return res; \
67 } \
58 err = map_private_extent_buffer(eb, offset, \ 68 err = map_private_extent_buffer(eb, offset, \
59 sizeof(((type *)0)->member), \ 69 mem_len, \
60 &kaddr, &map_start, &map_len); \ 70 &kaddr, &map_start, &map_len); \
61 if (err) { \ 71 if (err) { \
62 __le##bits leres; \ 72 __le##bits leres; \
@@ -65,10 +75,15 @@ u##bits btrfs_##name(struct extent_buffer *eb, \
65 } \ 75 } \
66 p = (type *)(kaddr + part_offset - map_start); \ 76 p = (type *)(kaddr + part_offset - map_start); \
67 res = le##bits##_to_cpu(p->member); \ 77 res = le##bits##_to_cpu(p->member); \
78 if (token) { \
79 token->kaddr = kaddr; \
80 token->offset = map_start; \
81 token->eb = eb; \
82 } \
68 return res; \ 83 return res; \
69} \ 84} \
70void btrfs_set_##name(struct extent_buffer *eb, \ 85void btrfs_set_token_##name(struct extent_buffer *eb, \
71 type *s, u##bits val) \ 86 type *s, u##bits val, struct btrfs_map_token *token) \
72{ \ 87{ \
73 unsigned long part_offset = (unsigned long)s; \ 88 unsigned long part_offset = (unsigned long)s; \
74 unsigned long offset = part_offset + offsetof(type, member); \ 89 unsigned long offset = part_offset + offsetof(type, member); \
@@ -77,8 +92,17 @@ void btrfs_set_##name(struct extent_buffer *eb, \
77 char *kaddr; \ 92 char *kaddr; \
78 unsigned long map_start; \ 93 unsigned long map_start; \
79 unsigned long map_len; \ 94 unsigned long map_len; \
95 unsigned long mem_len = sizeof(((type *)0)->member); \
96 if (token && token->kaddr && token->offset <= offset && \
97 token->eb == eb && \
98 (token->offset + PAGE_CACHE_SIZE >= offset + mem_len)) { \
99 kaddr = token->kaddr; \
100 p = (type *)(kaddr + part_offset - token->offset); \
101 p->member = cpu_to_le##bits(val); \
102 return; \
103 } \
80 err = map_private_extent_buffer(eb, offset, \ 104 err = map_private_extent_buffer(eb, offset, \
81 sizeof(((type *)0)->member), \ 105 mem_len, \
82 &kaddr, &map_start, &map_len); \ 106 &kaddr, &map_start, &map_len); \
83 if (err) { \ 107 if (err) { \
84 __le##bits val2; \ 108 __le##bits val2; \
@@ -88,7 +112,22 @@ void btrfs_set_##name(struct extent_buffer *eb, \
88 } \ 112 } \
89 p = (type *)(kaddr + part_offset - map_start); \ 113 p = (type *)(kaddr + part_offset - map_start); \
90 p->member = cpu_to_le##bits(val); \ 114 p->member = cpu_to_le##bits(val); \
91} 115 if (token) { \
116 token->kaddr = kaddr; \
117 token->offset = map_start; \
118 token->eb = eb; \
119 } \
120} \
121void btrfs_set_##name(struct extent_buffer *eb, \
122 type *s, u##bits val) \
123{ \
124 btrfs_set_token_##name(eb, s, val, NULL); \
125} \
126u##bits btrfs_##name(struct extent_buffer *eb, \
127 type *s) \
128{ \
129 return btrfs_token_##name(eb, s, NULL); \
130} \
92 131
93#include "ctree.h" 132#include "ctree.h"
94 133
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
index 81df3fec6a6d..8d5d380f7bdb 100644
--- a/fs/btrfs/super.c
+++ b/fs/btrfs/super.c
@@ -76,6 +76,9 @@ static const char *btrfs_decode_error(struct btrfs_fs_info *fs_info, int errno,
76 case -EROFS: 76 case -EROFS:
77 errstr = "Readonly filesystem"; 77 errstr = "Readonly filesystem";
78 break; 78 break;
79 case -EEXIST:
80 errstr = "Object already exists";
81 break;
79 default: 82 default:
80 if (nbuf) { 83 if (nbuf) {
81 if (snprintf(nbuf, 16, "error %d", -errno) >= 0) 84 if (snprintf(nbuf, 16, "error %d", -errno) >= 0)
@@ -116,6 +119,8 @@ static void btrfs_handle_error(struct btrfs_fs_info *fs_info)
116 if (fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) { 119 if (fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) {
117 sb->s_flags |= MS_RDONLY; 120 sb->s_flags |= MS_RDONLY;
118 printk(KERN_INFO "btrfs is forced readonly\n"); 121 printk(KERN_INFO "btrfs is forced readonly\n");
122 __btrfs_scrub_cancel(fs_info);
123// WARN_ON(1);
119 } 124 }
120} 125}
121 126
@@ -124,25 +129,132 @@ static void btrfs_handle_error(struct btrfs_fs_info *fs_info)
124 * invokes the approciate error response. 129 * invokes the approciate error response.
125 */ 130 */
126void __btrfs_std_error(struct btrfs_fs_info *fs_info, const char *function, 131void __btrfs_std_error(struct btrfs_fs_info *fs_info, const char *function,
127 unsigned int line, int errno) 132 unsigned int line, int errno, const char *fmt, ...)
128{ 133{
129 struct super_block *sb = fs_info->sb; 134 struct super_block *sb = fs_info->sb;
130 char nbuf[16]; 135 char nbuf[16];
131 const char *errstr; 136 const char *errstr;
137 va_list args;
138 va_start(args, fmt);
132 139
133 /* 140 /*
134 * Special case: if the error is EROFS, and we're already 141 * Special case: if the error is EROFS, and we're already
135 * under MS_RDONLY, then it is safe here. 142 * under MS_RDONLY, then it is safe here.
136 */ 143 */
137 if (errno == -EROFS && (sb->s_flags & MS_RDONLY)) 144 if (errno == -EROFS && (sb->s_flags & MS_RDONLY))
145 return;
146
147 errstr = btrfs_decode_error(fs_info, errno, nbuf);
148 if (fmt) {
149 struct va_format vaf = {
150 .fmt = fmt,
151 .va = &args,
152 };
153
154 printk(KERN_CRIT "BTRFS error (device %s) in %s:%d: %s (%pV)\n",
155 sb->s_id, function, line, errstr, &vaf);
156 } else {
157 printk(KERN_CRIT "BTRFS error (device %s) in %s:%d: %s\n",
158 sb->s_id, function, line, errstr);
159 }
160
161 /* Don't go through full error handling during mount */
162 if (sb->s_flags & MS_BORN) {
163 save_error_info(fs_info);
164 btrfs_handle_error(fs_info);
165 }
166 va_end(args);
167}
168
169const char *logtypes[] = {
170 "emergency",
171 "alert",
172 "critical",
173 "error",
174 "warning",
175 "notice",
176 "info",
177 "debug",
178};
179
180void btrfs_printk(struct btrfs_fs_info *fs_info, const char *fmt, ...)
181{
182 struct super_block *sb = fs_info->sb;
183 char lvl[4];
184 struct va_format vaf;
185 va_list args;
186 const char *type = logtypes[4];
187
188 va_start(args, fmt);
189
190 if (fmt[0] == '<' && isdigit(fmt[1]) && fmt[2] == '>') {
191 strncpy(lvl, fmt, 3);
192 fmt += 3;
193 type = logtypes[fmt[1] - '0'];
194 } else
195 *lvl = '\0';
196
197 vaf.fmt = fmt;
198 vaf.va = &args;
199 printk("%sBTRFS %s (device %s): %pV", lvl, type, sb->s_id, &vaf);
200}
201
202/*
203 * We only mark the transaction aborted and then set the file system read-only.
204 * This will prevent new transactions from starting or trying to join this
205 * one.
206 *
207 * This means that error recovery at the call site is limited to freeing
208 * any local memory allocations and passing the error code up without
209 * further cleanup. The transaction should complete as it normally would
210 * in the call path but will return -EIO.
211 *
212 * We'll complete the cleanup in btrfs_end_transaction and
213 * btrfs_commit_transaction.
214 */
215void __btrfs_abort_transaction(struct btrfs_trans_handle *trans,
216 struct btrfs_root *root, const char *function,
217 unsigned int line, int errno)
218{
219 WARN_ONCE(1, KERN_DEBUG "btrfs: Transaction aborted");
220 trans->aborted = errno;
221 /* Nothing used. The other threads that have joined this
222 * transaction may be able to continue. */
223 if (!trans->blocks_used) {
224 btrfs_printk(root->fs_info, "Aborting unused transaction.\n");
138 return; 225 return;
226 }
227 trans->transaction->aborted = errno;
228 __btrfs_std_error(root->fs_info, function, line, errno, NULL);
229}
230/*
231 * __btrfs_panic decodes unexpected, fatal errors from the caller,
232 * issues an alert, and either panics or BUGs, depending on mount options.
233 */
234void __btrfs_panic(struct btrfs_fs_info *fs_info, const char *function,
235 unsigned int line, int errno, const char *fmt, ...)
236{
237 char nbuf[16];
238 char *s_id = "<unknown>";
239 const char *errstr;
240 struct va_format vaf = { .fmt = fmt };
241 va_list args;
139 242
140 errstr = btrfs_decode_error(fs_info, errno, nbuf); 243 if (fs_info)
141 printk(KERN_CRIT "BTRFS error (device %s) in %s:%d: %s\n", 244 s_id = fs_info->sb->s_id;
142 sb->s_id, function, line, errstr);
143 save_error_info(fs_info);
144 245
145 btrfs_handle_error(fs_info); 246 va_start(args, fmt);
247 vaf.va = &args;
248
249 errstr = btrfs_decode_error(fs_info, errno, nbuf);
250 if (fs_info->mount_opt & BTRFS_MOUNT_PANIC_ON_FATAL_ERROR)
251 panic(KERN_CRIT "BTRFS panic (device %s) in %s:%d: %pV (%s)\n",
252 s_id, function, line, &vaf, errstr);
253
254 printk(KERN_CRIT "BTRFS panic (device %s) in %s:%d: %pV (%s)\n",
255 s_id, function, line, &vaf, errstr);
256 va_end(args);
257 /* Caller calls BUG() */
146} 258}
147 259
148static void btrfs_put_super(struct super_block *sb) 260static void btrfs_put_super(struct super_block *sb)
@@ -166,7 +278,7 @@ enum {
166 Opt_enospc_debug, Opt_subvolrootid, Opt_defrag, Opt_inode_cache, 278 Opt_enospc_debug, Opt_subvolrootid, Opt_defrag, Opt_inode_cache,
167 Opt_no_space_cache, Opt_recovery, Opt_skip_balance, 279 Opt_no_space_cache, Opt_recovery, Opt_skip_balance,
168 Opt_check_integrity, Opt_check_integrity_including_extent_data, 280 Opt_check_integrity, Opt_check_integrity_including_extent_data,
169 Opt_check_integrity_print_mask, 281 Opt_check_integrity_print_mask, Opt_fatal_errors,
170 Opt_err, 282 Opt_err,
171}; 283};
172 284
@@ -206,12 +318,14 @@ static match_table_t tokens = {
206 {Opt_check_integrity, "check_int"}, 318 {Opt_check_integrity, "check_int"},
207 {Opt_check_integrity_including_extent_data, "check_int_data"}, 319 {Opt_check_integrity_including_extent_data, "check_int_data"},
208 {Opt_check_integrity_print_mask, "check_int_print_mask=%d"}, 320 {Opt_check_integrity_print_mask, "check_int_print_mask=%d"},
321 {Opt_fatal_errors, "fatal_errors=%s"},
209 {Opt_err, NULL}, 322 {Opt_err, NULL},
210}; 323};
211 324
212/* 325/*
213 * Regular mount options parser. Everything that is needed only when 326 * Regular mount options parser. Everything that is needed only when
214 * reading in a new superblock is parsed here. 327 * reading in a new superblock is parsed here.
328 * XXX JDM: This needs to be cleaned up for remount.
215 */ 329 */
216int btrfs_parse_options(struct btrfs_root *root, char *options) 330int btrfs_parse_options(struct btrfs_root *root, char *options)
217{ 331{
@@ -438,6 +552,18 @@ int btrfs_parse_options(struct btrfs_root *root, char *options)
438 ret = -EINVAL; 552 ret = -EINVAL;
439 goto out; 553 goto out;
440#endif 554#endif
555 case Opt_fatal_errors:
556 if (strcmp(args[0].from, "panic") == 0)
557 btrfs_set_opt(info->mount_opt,
558 PANIC_ON_FATAL_ERROR);
559 else if (strcmp(args[0].from, "bug") == 0)
560 btrfs_clear_opt(info->mount_opt,
561 PANIC_ON_FATAL_ERROR);
562 else {
563 ret = -EINVAL;
564 goto out;
565 }
566 break;
441 case Opt_err: 567 case Opt_err:
442 printk(KERN_INFO "btrfs: unrecognized mount option " 568 printk(KERN_INFO "btrfs: unrecognized mount option "
443 "'%s'\n", p); 569 "'%s'\n", p);
@@ -762,6 +888,8 @@ static int btrfs_show_options(struct seq_file *seq, struct dentry *dentry)
762 seq_puts(seq, ",inode_cache"); 888 seq_puts(seq, ",inode_cache");
763 if (btrfs_test_opt(root, SKIP_BALANCE)) 889 if (btrfs_test_opt(root, SKIP_BALANCE))
764 seq_puts(seq, ",skip_balance"); 890 seq_puts(seq, ",skip_balance");
891 if (btrfs_test_opt(root, PANIC_ON_FATAL_ERROR))
892 seq_puts(seq, ",fatal_errors=panic");
765 return 0; 893 return 0;
766} 894}
767 895
@@ -995,11 +1123,20 @@ static int btrfs_remount(struct super_block *sb, int *flags, char *data)
995{ 1123{
996 struct btrfs_fs_info *fs_info = btrfs_sb(sb); 1124 struct btrfs_fs_info *fs_info = btrfs_sb(sb);
997 struct btrfs_root *root = fs_info->tree_root; 1125 struct btrfs_root *root = fs_info->tree_root;
1126 unsigned old_flags = sb->s_flags;
1127 unsigned long old_opts = fs_info->mount_opt;
1128 unsigned long old_compress_type = fs_info->compress_type;
1129 u64 old_max_inline = fs_info->max_inline;
1130 u64 old_alloc_start = fs_info->alloc_start;
1131 int old_thread_pool_size = fs_info->thread_pool_size;
1132 unsigned int old_metadata_ratio = fs_info->metadata_ratio;
998 int ret; 1133 int ret;
999 1134
1000 ret = btrfs_parse_options(root, data); 1135 ret = btrfs_parse_options(root, data);
1001 if (ret) 1136 if (ret) {
1002 return -EINVAL; 1137 ret = -EINVAL;
1138 goto restore;
1139 }
1003 1140
1004 if ((*flags & MS_RDONLY) == (sb->s_flags & MS_RDONLY)) 1141 if ((*flags & MS_RDONLY) == (sb->s_flags & MS_RDONLY))
1005 return 0; 1142 return 0;
@@ -1007,26 +1144,44 @@ static int btrfs_remount(struct super_block *sb, int *flags, char *data)
1007 if (*flags & MS_RDONLY) { 1144 if (*flags & MS_RDONLY) {
1008 sb->s_flags |= MS_RDONLY; 1145 sb->s_flags |= MS_RDONLY;
1009 1146
1010 ret = btrfs_commit_super(root); 1147 ret = btrfs_commit_super(root);
1011 WARN_ON(ret); 1148 if (ret)
1149 goto restore;
1012 } else { 1150 } else {
1013 if (fs_info->fs_devices->rw_devices == 0) 1151 if (fs_info->fs_devices->rw_devices == 0)
1014 return -EACCES; 1152 ret = -EACCES;
1153 goto restore;
1015 1154
1016 if (btrfs_super_log_root(fs_info->super_copy) != 0) 1155 if (btrfs_super_log_root(fs_info->super_copy) != 0)
1017 return -EINVAL; 1156 ret = -EINVAL;
1157 goto restore;
1018 1158
1019 ret = btrfs_cleanup_fs_roots(fs_info); 1159 ret = btrfs_cleanup_fs_roots(fs_info);
1020 WARN_ON(ret); 1160 if (ret)
1161 goto restore;
1021 1162
1022 /* recover relocation */ 1163 /* recover relocation */
1023 ret = btrfs_recover_relocation(root); 1164 ret = btrfs_recover_relocation(root);
1024 WARN_ON(ret); 1165 if (ret)
1166 goto restore;
1025 1167
1026 sb->s_flags &= ~MS_RDONLY; 1168 sb->s_flags &= ~MS_RDONLY;
1027 } 1169 }
1028 1170
1029 return 0; 1171 return 0;
1172
1173restore:
1174 /* We've hit an error - don't reset MS_RDONLY */
1175 if (sb->s_flags & MS_RDONLY)
1176 old_flags |= MS_RDONLY;
1177 sb->s_flags = old_flags;
1178 fs_info->mount_opt = old_opts;
1179 fs_info->compress_type = old_compress_type;
1180 fs_info->max_inline = old_max_inline;
1181 fs_info->alloc_start = old_alloc_start;
1182 fs_info->thread_pool_size = old_thread_pool_size;
1183 fs_info->metadata_ratio = old_metadata_ratio;
1184 return ret;
1030} 1185}
1031 1186
1032/* Used to sort the devices by max_avail(descending sort) */ 1187/* Used to sort the devices by max_avail(descending sort) */
@@ -1356,9 +1511,7 @@ static int __init init_btrfs_fs(void)
1356 if (err) 1511 if (err)
1357 return err; 1512 return err;
1358 1513
1359 err = btrfs_init_compress(); 1514 btrfs_init_compress();
1360 if (err)
1361 goto free_sysfs;
1362 1515
1363 err = btrfs_init_cachep(); 1516 err = btrfs_init_cachep();
1364 if (err) 1517 if (err)
@@ -1384,6 +1537,8 @@ static int __init init_btrfs_fs(void)
1384 if (err) 1537 if (err)
1385 goto unregister_ioctl; 1538 goto unregister_ioctl;
1386 1539
1540 btrfs_init_lockdep();
1541
1387 printk(KERN_INFO "%s loaded\n", BTRFS_BUILD_VERSION); 1542 printk(KERN_INFO "%s loaded\n", BTRFS_BUILD_VERSION);
1388 return 0; 1543 return 0;
1389 1544
@@ -1399,7 +1554,6 @@ free_cachep:
1399 btrfs_destroy_cachep(); 1554 btrfs_destroy_cachep();
1400free_compress: 1555free_compress:
1401 btrfs_exit_compress(); 1556 btrfs_exit_compress();
1402free_sysfs:
1403 btrfs_exit_sysfs(); 1557 btrfs_exit_sysfs();
1404 return err; 1558 return err;
1405} 1559}
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
index 04b77e3ceb7a..8da29e8e4de1 100644
--- a/fs/btrfs/transaction.c
+++ b/fs/btrfs/transaction.c
@@ -31,7 +31,7 @@
31 31
32#define BTRFS_ROOT_TRANS_TAG 0 32#define BTRFS_ROOT_TRANS_TAG 0
33 33
34static noinline void put_transaction(struct btrfs_transaction *transaction) 34void put_transaction(struct btrfs_transaction *transaction)
35{ 35{
36 WARN_ON(atomic_read(&transaction->use_count) == 0); 36 WARN_ON(atomic_read(&transaction->use_count) == 0);
37 if (atomic_dec_and_test(&transaction->use_count)) { 37 if (atomic_dec_and_test(&transaction->use_count)) {
@@ -58,6 +58,12 @@ static noinline int join_transaction(struct btrfs_root *root, int nofail)
58 58
59 spin_lock(&root->fs_info->trans_lock); 59 spin_lock(&root->fs_info->trans_lock);
60loop: 60loop:
61 /* The file system has been taken offline. No new transactions. */
62 if (root->fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) {
63 spin_unlock(&root->fs_info->trans_lock);
64 return -EROFS;
65 }
66
61 if (root->fs_info->trans_no_join) { 67 if (root->fs_info->trans_no_join) {
62 if (!nofail) { 68 if (!nofail) {
63 spin_unlock(&root->fs_info->trans_lock); 69 spin_unlock(&root->fs_info->trans_lock);
@@ -67,6 +73,8 @@ loop:
67 73
68 cur_trans = root->fs_info->running_transaction; 74 cur_trans = root->fs_info->running_transaction;
69 if (cur_trans) { 75 if (cur_trans) {
76 if (cur_trans->aborted)
77 return cur_trans->aborted;
70 atomic_inc(&cur_trans->use_count); 78 atomic_inc(&cur_trans->use_count);
71 atomic_inc(&cur_trans->num_writers); 79 atomic_inc(&cur_trans->num_writers);
72 cur_trans->num_joined++; 80 cur_trans->num_joined++;
@@ -123,6 +131,7 @@ loop:
123 root->fs_info->generation++; 131 root->fs_info->generation++;
124 cur_trans->transid = root->fs_info->generation; 132 cur_trans->transid = root->fs_info->generation;
125 root->fs_info->running_transaction = cur_trans; 133 root->fs_info->running_transaction = cur_trans;
134 cur_trans->aborted = 0;
126 spin_unlock(&root->fs_info->trans_lock); 135 spin_unlock(&root->fs_info->trans_lock);
127 136
128 return 0; 137 return 0;
@@ -318,6 +327,7 @@ again:
318 h->use_count = 1; 327 h->use_count = 1;
319 h->block_rsv = NULL; 328 h->block_rsv = NULL;
320 h->orig_rsv = NULL; 329 h->orig_rsv = NULL;
330 h->aborted = 0;
321 331
322 smp_mb(); 332 smp_mb();
323 if (cur_trans->blocked && may_wait_transaction(root, type)) { 333 if (cur_trans->blocked && may_wait_transaction(root, type)) {
@@ -327,8 +337,7 @@ again:
327 337
328 if (num_bytes) { 338 if (num_bytes) {
329 trace_btrfs_space_reservation(root->fs_info, "transaction", 339 trace_btrfs_space_reservation(root->fs_info, "transaction",
330 (u64)(unsigned long)h, 340 h->transid, num_bytes, 1);
331 num_bytes, 1);
332 h->block_rsv = &root->fs_info->trans_block_rsv; 341 h->block_rsv = &root->fs_info->trans_block_rsv;
333 h->bytes_reserved = num_bytes; 342 h->bytes_reserved = num_bytes;
334 } 343 }
@@ -440,6 +449,7 @@ int btrfs_should_end_transaction(struct btrfs_trans_handle *trans,
440 struct btrfs_transaction *cur_trans = trans->transaction; 449 struct btrfs_transaction *cur_trans = trans->transaction;
441 struct btrfs_block_rsv *rsv = trans->block_rsv; 450 struct btrfs_block_rsv *rsv = trans->block_rsv;
442 int updates; 451 int updates;
452 int err;
443 453
444 smp_mb(); 454 smp_mb();
445 if (cur_trans->blocked || cur_trans->delayed_refs.flushing) 455 if (cur_trans->blocked || cur_trans->delayed_refs.flushing)
@@ -453,8 +463,11 @@ int btrfs_should_end_transaction(struct btrfs_trans_handle *trans,
453 463
454 updates = trans->delayed_ref_updates; 464 updates = trans->delayed_ref_updates;
455 trans->delayed_ref_updates = 0; 465 trans->delayed_ref_updates = 0;
456 if (updates) 466 if (updates) {
457 btrfs_run_delayed_refs(trans, root, updates); 467 err = btrfs_run_delayed_refs(trans, root, updates);
468 if (err) /* Error code will also eval true */
469 return err;
470 }
458 471
459 trans->block_rsv = rsv; 472 trans->block_rsv = rsv;
460 473
@@ -525,6 +538,11 @@ static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
525 if (throttle) 538 if (throttle)
526 btrfs_run_delayed_iputs(root); 539 btrfs_run_delayed_iputs(root);
527 540
541 if (trans->aborted ||
542 root->fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) {
543 return -EIO;
544 }
545
528 return 0; 546 return 0;
529} 547}
530 548
@@ -690,11 +708,13 @@ static int update_cowonly_root(struct btrfs_trans_handle *trans,
690 ret = btrfs_update_root(trans, tree_root, 708 ret = btrfs_update_root(trans, tree_root,
691 &root->root_key, 709 &root->root_key,
692 &root->root_item); 710 &root->root_item);
693 BUG_ON(ret); 711 if (ret)
712 return ret;
694 713
695 old_root_used = btrfs_root_used(&root->root_item); 714 old_root_used = btrfs_root_used(&root->root_item);
696 ret = btrfs_write_dirty_block_groups(trans, root); 715 ret = btrfs_write_dirty_block_groups(trans, root);
697 BUG_ON(ret); 716 if (ret)
717 return ret;
698 } 718 }
699 719
700 if (root != root->fs_info->extent_root) 720 if (root != root->fs_info->extent_root)
@@ -705,6 +725,10 @@ static int update_cowonly_root(struct btrfs_trans_handle *trans,
705 725
706/* 726/*
707 * update all the cowonly tree roots on disk 727 * update all the cowonly tree roots on disk
728 *
729 * The error handling in this function may not be obvious. Any of the
730 * failures will cause the file system to go offline. We still need
731 * to clean up the delayed refs.
708 */ 732 */
709static noinline int commit_cowonly_roots(struct btrfs_trans_handle *trans, 733static noinline int commit_cowonly_roots(struct btrfs_trans_handle *trans,
710 struct btrfs_root *root) 734 struct btrfs_root *root)
@@ -715,22 +739,30 @@ static noinline int commit_cowonly_roots(struct btrfs_trans_handle *trans,
715 int ret; 739 int ret;
716 740
717 ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1); 741 ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
718 BUG_ON(ret); 742 if (ret)
743 return ret;
719 744
720 eb = btrfs_lock_root_node(fs_info->tree_root); 745 eb = btrfs_lock_root_node(fs_info->tree_root);
721 btrfs_cow_block(trans, fs_info->tree_root, eb, NULL, 0, &eb); 746 ret = btrfs_cow_block(trans, fs_info->tree_root, eb, NULL,
747 0, &eb);
722 btrfs_tree_unlock(eb); 748 btrfs_tree_unlock(eb);
723 free_extent_buffer(eb); 749 free_extent_buffer(eb);
724 750
751 if (ret)
752 return ret;
753
725 ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1); 754 ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
726 BUG_ON(ret); 755 if (ret)
756 return ret;
727 757
728 while (!list_empty(&fs_info->dirty_cowonly_roots)) { 758 while (!list_empty(&fs_info->dirty_cowonly_roots)) {
729 next = fs_info->dirty_cowonly_roots.next; 759 next = fs_info->dirty_cowonly_roots.next;
730 list_del_init(next); 760 list_del_init(next);
731 root = list_entry(next, struct btrfs_root, dirty_list); 761 root = list_entry(next, struct btrfs_root, dirty_list);
732 762
733 update_cowonly_root(trans, root); 763 ret = update_cowonly_root(trans, root);
764 if (ret)
765 return ret;
734 } 766 }
735 767
736 down_write(&fs_info->extent_commit_sem); 768 down_write(&fs_info->extent_commit_sem);
@@ -874,7 +906,7 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
874 906
875 new_root_item = kmalloc(sizeof(*new_root_item), GFP_NOFS); 907 new_root_item = kmalloc(sizeof(*new_root_item), GFP_NOFS);
876 if (!new_root_item) { 908 if (!new_root_item) {
877 pending->error = -ENOMEM; 909 ret = pending->error = -ENOMEM;
878 goto fail; 910 goto fail;
879 } 911 }
880 912
@@ -911,21 +943,24 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
911 * insert the directory item 943 * insert the directory item
912 */ 944 */
913 ret = btrfs_set_inode_index(parent_inode, &index); 945 ret = btrfs_set_inode_index(parent_inode, &index);
914 BUG_ON(ret); 946 BUG_ON(ret); /* -ENOMEM */
915 ret = btrfs_insert_dir_item(trans, parent_root, 947 ret = btrfs_insert_dir_item(trans, parent_root,
916 dentry->d_name.name, dentry->d_name.len, 948 dentry->d_name.name, dentry->d_name.len,
917 parent_inode, &key, 949 parent_inode, &key,
918 BTRFS_FT_DIR, index); 950 BTRFS_FT_DIR, index);
919 if (ret) { 951 if (ret == -EEXIST) {
920 pending->error = -EEXIST; 952 pending->error = -EEXIST;
921 dput(parent); 953 dput(parent);
922 goto fail; 954 goto fail;
955 } else if (ret) {
956 goto abort_trans_dput;
923 } 957 }
924 958
925 btrfs_i_size_write(parent_inode, parent_inode->i_size + 959 btrfs_i_size_write(parent_inode, parent_inode->i_size +
926 dentry->d_name.len * 2); 960 dentry->d_name.len * 2);
927 ret = btrfs_update_inode(trans, parent_root, parent_inode); 961 ret = btrfs_update_inode(trans, parent_root, parent_inode);
928 BUG_ON(ret); 962 if (ret)
963 goto abort_trans_dput;
929 964
930 /* 965 /*
931 * pull in the delayed directory update 966 * pull in the delayed directory update
@@ -934,7 +969,10 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
934 * snapshot 969 * snapshot
935 */ 970 */
936 ret = btrfs_run_delayed_items(trans, root); 971 ret = btrfs_run_delayed_items(trans, root);
937 BUG_ON(ret); 972 if (ret) { /* Transaction aborted */
973 dput(parent);
974 goto fail;
975 }
938 976
939 record_root_in_trans(trans, root); 977 record_root_in_trans(trans, root);
940 btrfs_set_root_last_snapshot(&root->root_item, trans->transid); 978 btrfs_set_root_last_snapshot(&root->root_item, trans->transid);
@@ -949,12 +987,21 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
949 btrfs_set_root_flags(new_root_item, root_flags); 987 btrfs_set_root_flags(new_root_item, root_flags);
950 988
951 old = btrfs_lock_root_node(root); 989 old = btrfs_lock_root_node(root);
952 btrfs_cow_block(trans, root, old, NULL, 0, &old); 990 ret = btrfs_cow_block(trans, root, old, NULL, 0, &old);
991 if (ret) {
992 btrfs_tree_unlock(old);
993 free_extent_buffer(old);
994 goto abort_trans_dput;
995 }
996
953 btrfs_set_lock_blocking(old); 997 btrfs_set_lock_blocking(old);
954 998
955 btrfs_copy_root(trans, root, old, &tmp, objectid); 999 ret = btrfs_copy_root(trans, root, old, &tmp, objectid);
1000 /* clean up in any case */
956 btrfs_tree_unlock(old); 1001 btrfs_tree_unlock(old);
957 free_extent_buffer(old); 1002 free_extent_buffer(old);
1003 if (ret)
1004 goto abort_trans_dput;
958 1005
959 /* see comments in should_cow_block() */ 1006 /* see comments in should_cow_block() */
960 root->force_cow = 1; 1007 root->force_cow = 1;
@@ -966,7 +1013,8 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
966 ret = btrfs_insert_root(trans, tree_root, &key, new_root_item); 1013 ret = btrfs_insert_root(trans, tree_root, &key, new_root_item);
967 btrfs_tree_unlock(tmp); 1014 btrfs_tree_unlock(tmp);
968 free_extent_buffer(tmp); 1015 free_extent_buffer(tmp);
969 BUG_ON(ret); 1016 if (ret)
1017 goto abort_trans_dput;
970 1018
971 /* 1019 /*
972 * insert root back/forward references 1020 * insert root back/forward references
@@ -975,19 +1023,32 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
975 parent_root->root_key.objectid, 1023 parent_root->root_key.objectid,
976 btrfs_ino(parent_inode), index, 1024 btrfs_ino(parent_inode), index,
977 dentry->d_name.name, dentry->d_name.len); 1025 dentry->d_name.name, dentry->d_name.len);
978 BUG_ON(ret);
979 dput(parent); 1026 dput(parent);
1027 if (ret)
1028 goto fail;
980 1029
981 key.offset = (u64)-1; 1030 key.offset = (u64)-1;
982 pending->snap = btrfs_read_fs_root_no_name(root->fs_info, &key); 1031 pending->snap = btrfs_read_fs_root_no_name(root->fs_info, &key);
983 BUG_ON(IS_ERR(pending->snap)); 1032 if (IS_ERR(pending->snap)) {
1033 ret = PTR_ERR(pending->snap);
1034 goto abort_trans;
1035 }
984 1036
985 btrfs_reloc_post_snapshot(trans, pending); 1037 ret = btrfs_reloc_post_snapshot(trans, pending);
1038 if (ret)
1039 goto abort_trans;
1040 ret = 0;
986fail: 1041fail:
987 kfree(new_root_item); 1042 kfree(new_root_item);
988 trans->block_rsv = rsv; 1043 trans->block_rsv = rsv;
989 btrfs_block_rsv_release(root, &pending->block_rsv, (u64)-1); 1044 btrfs_block_rsv_release(root, &pending->block_rsv, (u64)-1);
990 return 0; 1045 return ret;
1046
1047abort_trans_dput:
1048 dput(parent);
1049abort_trans:
1050 btrfs_abort_transaction(trans, root, ret);
1051 goto fail;
991} 1052}
992 1053
993/* 1054/*
@@ -1124,6 +1185,33 @@ int btrfs_commit_transaction_async(struct btrfs_trans_handle *trans,
1124 return 0; 1185 return 0;
1125} 1186}
1126 1187
1188
1189static void cleanup_transaction(struct btrfs_trans_handle *trans,
1190 struct btrfs_root *root)
1191{
1192 struct btrfs_transaction *cur_trans = trans->transaction;
1193
1194 WARN_ON(trans->use_count > 1);
1195
1196 spin_lock(&root->fs_info->trans_lock);
1197 list_del_init(&cur_trans->list);
1198 spin_unlock(&root->fs_info->trans_lock);
1199
1200 btrfs_cleanup_one_transaction(trans->transaction, root);
1201
1202 put_transaction(cur_trans);
1203 put_transaction(cur_trans);
1204
1205 trace_btrfs_transaction_commit(root);
1206
1207 btrfs_scrub_continue(root);
1208
1209 if (current->journal_info == trans)
1210 current->journal_info = NULL;
1211
1212 kmem_cache_free(btrfs_trans_handle_cachep, trans);
1213}
1214
1127/* 1215/*
1128 * btrfs_transaction state sequence: 1216 * btrfs_transaction state sequence:
1129 * in_commit = 0, blocked = 0 (initial) 1217 * in_commit = 0, blocked = 0 (initial)
@@ -1135,10 +1223,10 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
1135 struct btrfs_root *root) 1223 struct btrfs_root *root)
1136{ 1224{
1137 unsigned long joined = 0; 1225 unsigned long joined = 0;
1138 struct btrfs_transaction *cur_trans; 1226 struct btrfs_transaction *cur_trans = trans->transaction;
1139 struct btrfs_transaction *prev_trans = NULL; 1227 struct btrfs_transaction *prev_trans = NULL;
1140 DEFINE_WAIT(wait); 1228 DEFINE_WAIT(wait);
1141 int ret; 1229 int ret = -EIO;
1142 int should_grow = 0; 1230 int should_grow = 0;
1143 unsigned long now = get_seconds(); 1231 unsigned long now = get_seconds();
1144 int flush_on_commit = btrfs_test_opt(root, FLUSHONCOMMIT); 1232 int flush_on_commit = btrfs_test_opt(root, FLUSHONCOMMIT);
@@ -1148,13 +1236,18 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
1148 btrfs_trans_release_metadata(trans, root); 1236 btrfs_trans_release_metadata(trans, root);
1149 trans->block_rsv = NULL; 1237 trans->block_rsv = NULL;
1150 1238
1239 if (cur_trans->aborted)
1240 goto cleanup_transaction;
1241
1151 /* make a pass through all the delayed refs we have so far 1242 /* make a pass through all the delayed refs we have so far
1152 * any runnings procs may add more while we are here 1243 * any runnings procs may add more while we are here
1153 */ 1244 */
1154 ret = btrfs_run_delayed_refs(trans, root, 0); 1245 ret = btrfs_run_delayed_refs(trans, root, 0);
1155 BUG_ON(ret); 1246 if (ret)
1247 goto cleanup_transaction;
1156 1248
1157 cur_trans = trans->transaction; 1249 cur_trans = trans->transaction;
1250
1158 /* 1251 /*
1159 * set the flushing flag so procs in this transaction have to 1252 * set the flushing flag so procs in this transaction have to
1160 * start sending their work down. 1253 * start sending their work down.
@@ -1162,19 +1255,20 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
1162 cur_trans->delayed_refs.flushing = 1; 1255 cur_trans->delayed_refs.flushing = 1;
1163 1256
1164 ret = btrfs_run_delayed_refs(trans, root, 0); 1257 ret = btrfs_run_delayed_refs(trans, root, 0);
1165 BUG_ON(ret); 1258 if (ret)
1259 goto cleanup_transaction;
1166 1260
1167 spin_lock(&cur_trans->commit_lock); 1261 spin_lock(&cur_trans->commit_lock);
1168 if (cur_trans->in_commit) { 1262 if (cur_trans->in_commit) {
1169 spin_unlock(&cur_trans->commit_lock); 1263 spin_unlock(&cur_trans->commit_lock);
1170 atomic_inc(&cur_trans->use_count); 1264 atomic_inc(&cur_trans->use_count);
1171 btrfs_end_transaction(trans, root); 1265 ret = btrfs_end_transaction(trans, root);
1172 1266
1173 wait_for_commit(root, cur_trans); 1267 wait_for_commit(root, cur_trans);
1174 1268
1175 put_transaction(cur_trans); 1269 put_transaction(cur_trans);
1176 1270
1177 return 0; 1271 return ret;
1178 } 1272 }
1179 1273
1180 trans->transaction->in_commit = 1; 1274 trans->transaction->in_commit = 1;
@@ -1214,12 +1308,12 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
1214 1308
1215 if (flush_on_commit || snap_pending) { 1309 if (flush_on_commit || snap_pending) {
1216 btrfs_start_delalloc_inodes(root, 1); 1310 btrfs_start_delalloc_inodes(root, 1);
1217 ret = btrfs_wait_ordered_extents(root, 0, 1); 1311 btrfs_wait_ordered_extents(root, 0, 1);
1218 BUG_ON(ret);
1219 } 1312 }
1220 1313
1221 ret = btrfs_run_delayed_items(trans, root); 1314 ret = btrfs_run_delayed_items(trans, root);
1222 BUG_ON(ret); 1315 if (ret)
1316 goto cleanup_transaction;
1223 1317
1224 /* 1318 /*
1225 * rename don't use btrfs_join_transaction, so, once we 1319 * rename don't use btrfs_join_transaction, so, once we
@@ -1261,13 +1355,22 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
1261 mutex_lock(&root->fs_info->reloc_mutex); 1355 mutex_lock(&root->fs_info->reloc_mutex);
1262 1356
1263 ret = btrfs_run_delayed_items(trans, root); 1357 ret = btrfs_run_delayed_items(trans, root);
1264 BUG_ON(ret); 1358 if (ret) {
1359 mutex_unlock(&root->fs_info->reloc_mutex);
1360 goto cleanup_transaction;
1361 }
1265 1362
1266 ret = create_pending_snapshots(trans, root->fs_info); 1363 ret = create_pending_snapshots(trans, root->fs_info);
1267 BUG_ON(ret); 1364 if (ret) {
1365 mutex_unlock(&root->fs_info->reloc_mutex);
1366 goto cleanup_transaction;
1367 }
1268 1368
1269 ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1); 1369 ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
1270 BUG_ON(ret); 1370 if (ret) {
1371 mutex_unlock(&root->fs_info->reloc_mutex);
1372 goto cleanup_transaction;
1373 }
1271 1374
1272 /* 1375 /*
1273 * make sure none of the code above managed to slip in a 1376 * make sure none of the code above managed to slip in a
@@ -1294,7 +1397,10 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
1294 mutex_lock(&root->fs_info->tree_log_mutex); 1397 mutex_lock(&root->fs_info->tree_log_mutex);
1295 1398
1296 ret = commit_fs_roots(trans, root); 1399 ret = commit_fs_roots(trans, root);
1297 BUG_ON(ret); 1400 if (ret) {
1401 mutex_unlock(&root->fs_info->tree_log_mutex);
1402 goto cleanup_transaction;
1403 }
1298 1404
1299 /* commit_fs_roots gets rid of all the tree log roots, it is now 1405 /* commit_fs_roots gets rid of all the tree log roots, it is now
1300 * safe to free the root of tree log roots 1406 * safe to free the root of tree log roots
@@ -1302,7 +1408,10 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
1302 btrfs_free_log_root_tree(trans, root->fs_info); 1408 btrfs_free_log_root_tree(trans, root->fs_info);
1303 1409
1304 ret = commit_cowonly_roots(trans, root); 1410 ret = commit_cowonly_roots(trans, root);
1305 BUG_ON(ret); 1411 if (ret) {
1412 mutex_unlock(&root->fs_info->tree_log_mutex);
1413 goto cleanup_transaction;
1414 }
1306 1415
1307 btrfs_prepare_extent_commit(trans, root); 1416 btrfs_prepare_extent_commit(trans, root);
1308 1417
@@ -1336,8 +1445,18 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
1336 wake_up(&root->fs_info->transaction_wait); 1445 wake_up(&root->fs_info->transaction_wait);
1337 1446
1338 ret = btrfs_write_and_wait_transaction(trans, root); 1447 ret = btrfs_write_and_wait_transaction(trans, root);
1339 BUG_ON(ret); 1448 if (ret) {
1340 write_ctree_super(trans, root, 0); 1449 btrfs_error(root->fs_info, ret,
1450 "Error while writing out transaction.");
1451 mutex_unlock(&root->fs_info->tree_log_mutex);
1452 goto cleanup_transaction;
1453 }
1454
1455 ret = write_ctree_super(trans, root, 0);
1456 if (ret) {
1457 mutex_unlock(&root->fs_info->tree_log_mutex);
1458 goto cleanup_transaction;
1459 }
1341 1460
1342 /* 1461 /*
1343 * the super is written, we can safely allow the tree-loggers 1462 * the super is written, we can safely allow the tree-loggers
@@ -1373,6 +1492,15 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
1373 btrfs_run_delayed_iputs(root); 1492 btrfs_run_delayed_iputs(root);
1374 1493
1375 return ret; 1494 return ret;
1495
1496cleanup_transaction:
1497 btrfs_printk(root->fs_info, "Skipping commit of aborted transaction.\n");
1498// WARN_ON(1);
1499 if (current->journal_info == trans)
1500 current->journal_info = NULL;
1501 cleanup_transaction(trans, root);
1502
1503 return ret;
1376} 1504}
1377 1505
1378/* 1506/*
@@ -1388,6 +1516,8 @@ int btrfs_clean_old_snapshots(struct btrfs_root *root)
1388 spin_unlock(&fs_info->trans_lock); 1516 spin_unlock(&fs_info->trans_lock);
1389 1517
1390 while (!list_empty(&list)) { 1518 while (!list_empty(&list)) {
1519 int ret;
1520
1391 root = list_entry(list.next, struct btrfs_root, root_list); 1521 root = list_entry(list.next, struct btrfs_root, root_list);
1392 list_del(&root->root_list); 1522 list_del(&root->root_list);
1393 1523
@@ -1395,9 +1525,10 @@ int btrfs_clean_old_snapshots(struct btrfs_root *root)
1395 1525
1396 if (btrfs_header_backref_rev(root->node) < 1526 if (btrfs_header_backref_rev(root->node) <
1397 BTRFS_MIXED_BACKREF_REV) 1527 BTRFS_MIXED_BACKREF_REV)
1398 btrfs_drop_snapshot(root, NULL, 0, 0); 1528 ret = btrfs_drop_snapshot(root, NULL, 0, 0);
1399 else 1529 else
1400 btrfs_drop_snapshot(root, NULL, 1, 0); 1530 ret =btrfs_drop_snapshot(root, NULL, 1, 0);
1531 BUG_ON(ret < 0);
1401 } 1532 }
1402 return 0; 1533 return 0;
1403} 1534}
diff --git a/fs/btrfs/transaction.h b/fs/btrfs/transaction.h
index 02564e6230ac..fe27379e368b 100644
--- a/fs/btrfs/transaction.h
+++ b/fs/btrfs/transaction.h
@@ -43,6 +43,7 @@ struct btrfs_transaction {
43 wait_queue_head_t commit_wait; 43 wait_queue_head_t commit_wait;
44 struct list_head pending_snapshots; 44 struct list_head pending_snapshots;
45 struct btrfs_delayed_ref_root delayed_refs; 45 struct btrfs_delayed_ref_root delayed_refs;
46 int aborted;
46}; 47};
47 48
48struct btrfs_trans_handle { 49struct btrfs_trans_handle {
@@ -55,6 +56,7 @@ struct btrfs_trans_handle {
55 struct btrfs_transaction *transaction; 56 struct btrfs_transaction *transaction;
56 struct btrfs_block_rsv *block_rsv; 57 struct btrfs_block_rsv *block_rsv;
57 struct btrfs_block_rsv *orig_rsv; 58 struct btrfs_block_rsv *orig_rsv;
59 int aborted;
58}; 60};
59 61
60struct btrfs_pending_snapshot { 62struct btrfs_pending_snapshot {
@@ -114,4 +116,5 @@ int btrfs_wait_marked_extents(struct btrfs_root *root,
114 struct extent_io_tree *dirty_pages, int mark); 116 struct extent_io_tree *dirty_pages, int mark);
115int btrfs_transaction_blocked(struct btrfs_fs_info *info); 117int btrfs_transaction_blocked(struct btrfs_fs_info *info);
116int btrfs_transaction_in_commit(struct btrfs_fs_info *info); 118int btrfs_transaction_in_commit(struct btrfs_fs_info *info);
119void put_transaction(struct btrfs_transaction *transaction);
117#endif 120#endif
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
index 966cc74f5d6c..d017283ae6f5 100644
--- a/fs/btrfs/tree-log.c
+++ b/fs/btrfs/tree-log.c
@@ -212,14 +212,13 @@ int btrfs_pin_log_trans(struct btrfs_root *root)
212 * indicate we're done making changes to the log tree 212 * indicate we're done making changes to the log tree
213 * and wake up anyone waiting to do a sync 213 * and wake up anyone waiting to do a sync
214 */ 214 */
215int btrfs_end_log_trans(struct btrfs_root *root) 215void btrfs_end_log_trans(struct btrfs_root *root)
216{ 216{
217 if (atomic_dec_and_test(&root->log_writers)) { 217 if (atomic_dec_and_test(&root->log_writers)) {
218 smp_mb(); 218 smp_mb();
219 if (waitqueue_active(&root->log_writer_wait)) 219 if (waitqueue_active(&root->log_writer_wait))
220 wake_up(&root->log_writer_wait); 220 wake_up(&root->log_writer_wait);
221 } 221 }
222 return 0;
223} 222}
224 223
225 224
@@ -378,12 +377,11 @@ insert:
378 u32 found_size; 377 u32 found_size;
379 found_size = btrfs_item_size_nr(path->nodes[0], 378 found_size = btrfs_item_size_nr(path->nodes[0],
380 path->slots[0]); 379 path->slots[0]);
381 if (found_size > item_size) { 380 if (found_size > item_size)
382 btrfs_truncate_item(trans, root, path, item_size, 1); 381 btrfs_truncate_item(trans, root, path, item_size, 1);
383 } else if (found_size < item_size) { 382 else if (found_size < item_size)
384 ret = btrfs_extend_item(trans, root, path, 383 btrfs_extend_item(trans, root, path,
385 item_size - found_size); 384 item_size - found_size);
386 }
387 } else if (ret) { 385 } else if (ret) {
388 return ret; 386 return ret;
389 } 387 }
@@ -1763,7 +1761,7 @@ static noinline int walk_down_log_tree(struct btrfs_trans_handle *trans,
1763 BTRFS_TREE_LOG_OBJECTID); 1761 BTRFS_TREE_LOG_OBJECTID);
1764 ret = btrfs_free_and_pin_reserved_extent(root, 1762 ret = btrfs_free_and_pin_reserved_extent(root,
1765 bytenr, blocksize); 1763 bytenr, blocksize);
1766 BUG_ON(ret); 1764 BUG_ON(ret); /* -ENOMEM or logic errors */
1767 } 1765 }
1768 free_extent_buffer(next); 1766 free_extent_buffer(next);
1769 continue; 1767 continue;
@@ -1871,20 +1869,26 @@ static int walk_log_tree(struct btrfs_trans_handle *trans,
1871 wret = walk_down_log_tree(trans, log, path, &level, wc); 1869 wret = walk_down_log_tree(trans, log, path, &level, wc);
1872 if (wret > 0) 1870 if (wret > 0)
1873 break; 1871 break;
1874 if (wret < 0) 1872 if (wret < 0) {
1875 ret = wret; 1873 ret = wret;
1874 goto out;
1875 }
1876 1876
1877 wret = walk_up_log_tree(trans, log, path, &level, wc); 1877 wret = walk_up_log_tree(trans, log, path, &level, wc);
1878 if (wret > 0) 1878 if (wret > 0)
1879 break; 1879 break;
1880 if (wret < 0) 1880 if (wret < 0) {
1881 ret = wret; 1881 ret = wret;
1882 goto out;
1883 }
1882 } 1884 }
1883 1885
1884 /* was the root node processed? if not, catch it here */ 1886 /* was the root node processed? if not, catch it here */
1885 if (path->nodes[orig_level]) { 1887 if (path->nodes[orig_level]) {
1886 wc->process_func(log, path->nodes[orig_level], wc, 1888 ret = wc->process_func(log, path->nodes[orig_level], wc,
1887 btrfs_header_generation(path->nodes[orig_level])); 1889 btrfs_header_generation(path->nodes[orig_level]));
1890 if (ret)
1891 goto out;
1888 if (wc->free) { 1892 if (wc->free) {
1889 struct extent_buffer *next; 1893 struct extent_buffer *next;
1890 1894
@@ -1900,10 +1904,11 @@ static int walk_log_tree(struct btrfs_trans_handle *trans,
1900 BTRFS_TREE_LOG_OBJECTID); 1904 BTRFS_TREE_LOG_OBJECTID);
1901 ret = btrfs_free_and_pin_reserved_extent(log, next->start, 1905 ret = btrfs_free_and_pin_reserved_extent(log, next->start,
1902 next->len); 1906 next->len);
1903 BUG_ON(ret); 1907 BUG_ON(ret); /* -ENOMEM or logic errors */
1904 } 1908 }
1905 } 1909 }
1906 1910
1911out:
1907 for (i = 0; i <= orig_level; i++) { 1912 for (i = 0; i <= orig_level; i++) {
1908 if (path->nodes[i]) { 1913 if (path->nodes[i]) {
1909 free_extent_buffer(path->nodes[i]); 1914 free_extent_buffer(path->nodes[i]);
@@ -1963,8 +1968,8 @@ static int wait_log_commit(struct btrfs_trans_handle *trans,
1963 return 0; 1968 return 0;
1964} 1969}
1965 1970
1966static int wait_for_writer(struct btrfs_trans_handle *trans, 1971static void wait_for_writer(struct btrfs_trans_handle *trans,
1967 struct btrfs_root *root) 1972 struct btrfs_root *root)
1968{ 1973{
1969 DEFINE_WAIT(wait); 1974 DEFINE_WAIT(wait);
1970 while (root->fs_info->last_trans_log_full_commit != 1975 while (root->fs_info->last_trans_log_full_commit !=
@@ -1978,7 +1983,6 @@ static int wait_for_writer(struct btrfs_trans_handle *trans,
1978 mutex_lock(&root->log_mutex); 1983 mutex_lock(&root->log_mutex);
1979 finish_wait(&root->log_writer_wait, &wait); 1984 finish_wait(&root->log_writer_wait, &wait);
1980 } 1985 }
1981 return 0;
1982} 1986}
1983 1987
1984/* 1988/*
@@ -2046,7 +2050,11 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
2046 * wait for them until later. 2050 * wait for them until later.
2047 */ 2051 */
2048 ret = btrfs_write_marked_extents(log, &log->dirty_log_pages, mark); 2052 ret = btrfs_write_marked_extents(log, &log->dirty_log_pages, mark);
2049 BUG_ON(ret); 2053 if (ret) {
2054 btrfs_abort_transaction(trans, root, ret);
2055 mutex_unlock(&root->log_mutex);
2056 goto out;
2057 }
2050 2058
2051 btrfs_set_root_node(&log->root_item, log->node); 2059 btrfs_set_root_node(&log->root_item, log->node);
2052 2060
@@ -2077,7 +2085,11 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
2077 } 2085 }
2078 2086
2079 if (ret) { 2087 if (ret) {
2080 BUG_ON(ret != -ENOSPC); 2088 if (ret != -ENOSPC) {
2089 btrfs_abort_transaction(trans, root, ret);
2090 mutex_unlock(&log_root_tree->log_mutex);
2091 goto out;
2092 }
2081 root->fs_info->last_trans_log_full_commit = trans->transid; 2093 root->fs_info->last_trans_log_full_commit = trans->transid;
2082 btrfs_wait_marked_extents(log, &log->dirty_log_pages, mark); 2094 btrfs_wait_marked_extents(log, &log->dirty_log_pages, mark);
2083 mutex_unlock(&log_root_tree->log_mutex); 2095 mutex_unlock(&log_root_tree->log_mutex);
@@ -2117,7 +2129,11 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
2117 ret = btrfs_write_and_wait_marked_extents(log_root_tree, 2129 ret = btrfs_write_and_wait_marked_extents(log_root_tree,
2118 &log_root_tree->dirty_log_pages, 2130 &log_root_tree->dirty_log_pages,
2119 EXTENT_DIRTY | EXTENT_NEW); 2131 EXTENT_DIRTY | EXTENT_NEW);
2120 BUG_ON(ret); 2132 if (ret) {
2133 btrfs_abort_transaction(trans, root, ret);
2134 mutex_unlock(&log_root_tree->log_mutex);
2135 goto out_wake_log_root;
2136 }
2121 btrfs_wait_marked_extents(log, &log->dirty_log_pages, mark); 2137 btrfs_wait_marked_extents(log, &log->dirty_log_pages, mark);
2122 2138
2123 btrfs_set_super_log_root(root->fs_info->super_for_commit, 2139 btrfs_set_super_log_root(root->fs_info->super_for_commit,
@@ -2326,7 +2342,9 @@ out_unlock:
2326 if (ret == -ENOSPC) { 2342 if (ret == -ENOSPC) {
2327 root->fs_info->last_trans_log_full_commit = trans->transid; 2343 root->fs_info->last_trans_log_full_commit = trans->transid;
2328 ret = 0; 2344 ret = 0;
2329 } 2345 } else if (ret < 0)
2346 btrfs_abort_transaction(trans, root, ret);
2347
2330 btrfs_end_log_trans(root); 2348 btrfs_end_log_trans(root);
2331 2349
2332 return err; 2350 return err;
@@ -2357,7 +2375,8 @@ int btrfs_del_inode_ref_in_log(struct btrfs_trans_handle *trans,
2357 if (ret == -ENOSPC) { 2375 if (ret == -ENOSPC) {
2358 root->fs_info->last_trans_log_full_commit = trans->transid; 2376 root->fs_info->last_trans_log_full_commit = trans->transid;
2359 ret = 0; 2377 ret = 0;
2360 } 2378 } else if (ret < 0 && ret != -ENOENT)
2379 btrfs_abort_transaction(trans, root, ret);
2361 btrfs_end_log_trans(root); 2380 btrfs_end_log_trans(root);
2362 2381
2363 return ret; 2382 return ret;
@@ -3169,13 +3188,20 @@ int btrfs_recover_log_trees(struct btrfs_root *log_root_tree)
3169 fs_info->log_root_recovering = 1; 3188 fs_info->log_root_recovering = 1;
3170 3189
3171 trans = btrfs_start_transaction(fs_info->tree_root, 0); 3190 trans = btrfs_start_transaction(fs_info->tree_root, 0);
3172 BUG_ON(IS_ERR(trans)); 3191 if (IS_ERR(trans)) {
3192 ret = PTR_ERR(trans);
3193 goto error;
3194 }
3173 3195
3174 wc.trans = trans; 3196 wc.trans = trans;
3175 wc.pin = 1; 3197 wc.pin = 1;
3176 3198
3177 ret = walk_log_tree(trans, log_root_tree, &wc); 3199 ret = walk_log_tree(trans, log_root_tree, &wc);
3178 BUG_ON(ret); 3200 if (ret) {
3201 btrfs_error(fs_info, ret, "Failed to pin buffers while "
3202 "recovering log root tree.");
3203 goto error;
3204 }
3179 3205
3180again: 3206again:
3181 key.objectid = BTRFS_TREE_LOG_OBJECTID; 3207 key.objectid = BTRFS_TREE_LOG_OBJECTID;
@@ -3184,8 +3210,12 @@ again:
3184 3210
3185 while (1) { 3211 while (1) {
3186 ret = btrfs_search_slot(NULL, log_root_tree, &key, path, 0, 0); 3212 ret = btrfs_search_slot(NULL, log_root_tree, &key, path, 0, 0);
3187 if (ret < 0) 3213
3188 break; 3214 if (ret < 0) {
3215 btrfs_error(fs_info, ret,
3216 "Couldn't find tree log root.");
3217 goto error;
3218 }
3189 if (ret > 0) { 3219 if (ret > 0) {
3190 if (path->slots[0] == 0) 3220 if (path->slots[0] == 0)
3191 break; 3221 break;
@@ -3199,14 +3229,24 @@ again:
3199 3229
3200 log = btrfs_read_fs_root_no_radix(log_root_tree, 3230 log = btrfs_read_fs_root_no_radix(log_root_tree,
3201 &found_key); 3231 &found_key);
3202 BUG_ON(IS_ERR(log)); 3232 if (IS_ERR(log)) {
3233 ret = PTR_ERR(log);
3234 btrfs_error(fs_info, ret,
3235 "Couldn't read tree log root.");
3236 goto error;
3237 }
3203 3238
3204 tmp_key.objectid = found_key.offset; 3239 tmp_key.objectid = found_key.offset;
3205 tmp_key.type = BTRFS_ROOT_ITEM_KEY; 3240 tmp_key.type = BTRFS_ROOT_ITEM_KEY;
3206 tmp_key.offset = (u64)-1; 3241 tmp_key.offset = (u64)-1;
3207 3242
3208 wc.replay_dest = btrfs_read_fs_root_no_name(fs_info, &tmp_key); 3243 wc.replay_dest = btrfs_read_fs_root_no_name(fs_info, &tmp_key);
3209 BUG_ON(IS_ERR_OR_NULL(wc.replay_dest)); 3244 if (IS_ERR(wc.replay_dest)) {
3245 ret = PTR_ERR(wc.replay_dest);
3246 btrfs_error(fs_info, ret, "Couldn't read target root "
3247 "for tree log recovery.");
3248 goto error;
3249 }
3210 3250
3211 wc.replay_dest->log_root = log; 3251 wc.replay_dest->log_root = log;
3212 btrfs_record_root_in_trans(trans, wc.replay_dest); 3252 btrfs_record_root_in_trans(trans, wc.replay_dest);
@@ -3254,6 +3294,10 @@ again:
3254 3294
3255 kfree(log_root_tree); 3295 kfree(log_root_tree);
3256 return 0; 3296 return 0;
3297
3298error:
3299 btrfs_free_path(path);
3300 return ret;
3257} 3301}
3258 3302
3259/* 3303/*
diff --git a/fs/btrfs/tree-log.h b/fs/btrfs/tree-log.h
index 2270ac58d746..862ac813f6b8 100644
--- a/fs/btrfs/tree-log.h
+++ b/fs/btrfs/tree-log.h
@@ -38,7 +38,7 @@ int btrfs_del_inode_ref_in_log(struct btrfs_trans_handle *trans,
38 struct btrfs_root *root, 38 struct btrfs_root *root,
39 const char *name, int name_len, 39 const char *name, int name_len,
40 struct inode *inode, u64 dirid); 40 struct inode *inode, u64 dirid);
41int btrfs_end_log_trans(struct btrfs_root *root); 41void btrfs_end_log_trans(struct btrfs_root *root);
42int btrfs_pin_log_trans(struct btrfs_root *root); 42int btrfs_pin_log_trans(struct btrfs_root *root);
43int btrfs_log_inode_parent(struct btrfs_trans_handle *trans, 43int btrfs_log_inode_parent(struct btrfs_trans_handle *trans,
44 struct btrfs_root *root, struct inode *inode, 44 struct btrfs_root *root, struct inode *inode,
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index ef41f285a475..a872b48be0ae 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -67,7 +67,7 @@ static void free_fs_devices(struct btrfs_fs_devices *fs_devices)
67 kfree(fs_devices); 67 kfree(fs_devices);
68} 68}
69 69
70int btrfs_cleanup_fs_uuids(void) 70void btrfs_cleanup_fs_uuids(void)
71{ 71{
72 struct btrfs_fs_devices *fs_devices; 72 struct btrfs_fs_devices *fs_devices;
73 73
@@ -77,7 +77,6 @@ int btrfs_cleanup_fs_uuids(void)
77 list_del(&fs_devices->list); 77 list_del(&fs_devices->list);
78 free_fs_devices(fs_devices); 78 free_fs_devices(fs_devices);
79 } 79 }
80 return 0;
81} 80}
82 81
83static noinline struct btrfs_device *__find_device(struct list_head *head, 82static noinline struct btrfs_device *__find_device(struct list_head *head,
@@ -130,7 +129,7 @@ static void requeue_list(struct btrfs_pending_bios *pending_bios,
130 * the list if the block device is congested. This way, multiple devices 129 * the list if the block device is congested. This way, multiple devices
131 * can make progress from a single worker thread. 130 * can make progress from a single worker thread.
132 */ 131 */
133static noinline int run_scheduled_bios(struct btrfs_device *device) 132static noinline void run_scheduled_bios(struct btrfs_device *device)
134{ 133{
135 struct bio *pending; 134 struct bio *pending;
136 struct backing_dev_info *bdi; 135 struct backing_dev_info *bdi;
@@ -316,7 +315,6 @@ loop_lock:
316 315
317done: 316done:
318 blk_finish_plug(&plug); 317 blk_finish_plug(&plug);
319 return 0;
320} 318}
321 319
322static void pending_bios_fn(struct btrfs_work *work) 320static void pending_bios_fn(struct btrfs_work *work)
@@ -455,7 +453,7 @@ error:
455 return ERR_PTR(-ENOMEM); 453 return ERR_PTR(-ENOMEM);
456} 454}
457 455
458int btrfs_close_extra_devices(struct btrfs_fs_devices *fs_devices) 456void btrfs_close_extra_devices(struct btrfs_fs_devices *fs_devices)
459{ 457{
460 struct btrfs_device *device, *next; 458 struct btrfs_device *device, *next;
461 459
@@ -503,7 +501,6 @@ again:
503 fs_devices->latest_trans = latest_transid; 501 fs_devices->latest_trans = latest_transid;
504 502
505 mutex_unlock(&uuid_mutex); 503 mutex_unlock(&uuid_mutex);
506 return 0;
507} 504}
508 505
509static void __free_device(struct work_struct *work) 506static void __free_device(struct work_struct *work)
@@ -552,10 +549,10 @@ static int __btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
552 fs_devices->num_can_discard--; 549 fs_devices->num_can_discard--;
553 550
554 new_device = kmalloc(sizeof(*new_device), GFP_NOFS); 551 new_device = kmalloc(sizeof(*new_device), GFP_NOFS);
555 BUG_ON(!new_device); 552 BUG_ON(!new_device); /* -ENOMEM */
556 memcpy(new_device, device, sizeof(*new_device)); 553 memcpy(new_device, device, sizeof(*new_device));
557 new_device->name = kstrdup(device->name, GFP_NOFS); 554 new_device->name = kstrdup(device->name, GFP_NOFS);
558 BUG_ON(device->name && !new_device->name); 555 BUG_ON(device->name && !new_device->name); /* -ENOMEM */
559 new_device->bdev = NULL; 556 new_device->bdev = NULL;
560 new_device->writeable = 0; 557 new_device->writeable = 0;
561 new_device->in_fs_metadata = 0; 558 new_device->in_fs_metadata = 0;
@@ -625,6 +622,8 @@ static int __btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
625 printk(KERN_INFO "open %s failed\n", device->name); 622 printk(KERN_INFO "open %s failed\n", device->name);
626 goto error; 623 goto error;
627 } 624 }
625 filemap_write_and_wait(bdev->bd_inode->i_mapping);
626 invalidate_bdev(bdev);
628 set_blocksize(bdev, 4096); 627 set_blocksize(bdev, 4096);
629 628
630 bh = btrfs_read_dev_super(bdev); 629 bh = btrfs_read_dev_super(bdev);
@@ -1039,8 +1038,10 @@ again:
1039 leaf = path->nodes[0]; 1038 leaf = path->nodes[0];
1040 extent = btrfs_item_ptr(leaf, path->slots[0], 1039 extent = btrfs_item_ptr(leaf, path->slots[0],
1041 struct btrfs_dev_extent); 1040 struct btrfs_dev_extent);
1041 } else {
1042 btrfs_error(root->fs_info, ret, "Slot search failed");
1043 goto out;
1042 } 1044 }
1043 BUG_ON(ret);
1044 1045
1045 if (device->bytes_used > 0) { 1046 if (device->bytes_used > 0) {
1046 u64 len = btrfs_dev_extent_length(leaf, extent); 1047 u64 len = btrfs_dev_extent_length(leaf, extent);
@@ -1050,7 +1051,10 @@ again:
1050 spin_unlock(&root->fs_info->free_chunk_lock); 1051 spin_unlock(&root->fs_info->free_chunk_lock);
1051 } 1052 }
1052 ret = btrfs_del_item(trans, root, path); 1053 ret = btrfs_del_item(trans, root, path);
1053 1054 if (ret) {
1055 btrfs_error(root->fs_info, ret,
1056 "Failed to remove dev extent item");
1057 }
1054out: 1058out:
1055 btrfs_free_path(path); 1059 btrfs_free_path(path);
1056 return ret; 1060 return ret;
@@ -1078,7 +1082,8 @@ int btrfs_alloc_dev_extent(struct btrfs_trans_handle *trans,
1078 key.type = BTRFS_DEV_EXTENT_KEY; 1082 key.type = BTRFS_DEV_EXTENT_KEY;
1079 ret = btrfs_insert_empty_item(trans, root, path, &key, 1083 ret = btrfs_insert_empty_item(trans, root, path, &key,
1080 sizeof(*extent)); 1084 sizeof(*extent));
1081 BUG_ON(ret); 1085 if (ret)
1086 goto out;
1082 1087
1083 leaf = path->nodes[0]; 1088 leaf = path->nodes[0];
1084 extent = btrfs_item_ptr(leaf, path->slots[0], 1089 extent = btrfs_item_ptr(leaf, path->slots[0],
@@ -1093,6 +1098,7 @@ int btrfs_alloc_dev_extent(struct btrfs_trans_handle *trans,
1093 1098
1094 btrfs_set_dev_extent_length(leaf, extent, num_bytes); 1099 btrfs_set_dev_extent_length(leaf, extent, num_bytes);
1095 btrfs_mark_buffer_dirty(leaf); 1100 btrfs_mark_buffer_dirty(leaf);
1101out:
1096 btrfs_free_path(path); 1102 btrfs_free_path(path);
1097 return ret; 1103 return ret;
1098} 1104}
@@ -1118,7 +1124,7 @@ static noinline int find_next_chunk(struct btrfs_root *root,
1118 if (ret < 0) 1124 if (ret < 0)
1119 goto error; 1125 goto error;
1120 1126
1121 BUG_ON(ret == 0); 1127 BUG_ON(ret == 0); /* Corruption */
1122 1128
1123 ret = btrfs_previous_item(root, path, 0, BTRFS_CHUNK_ITEM_KEY); 1129 ret = btrfs_previous_item(root, path, 0, BTRFS_CHUNK_ITEM_KEY);
1124 if (ret) { 1130 if (ret) {
@@ -1162,7 +1168,7 @@ static noinline int find_next_devid(struct btrfs_root *root, u64 *objectid)
1162 if (ret < 0) 1168 if (ret < 0)
1163 goto error; 1169 goto error;
1164 1170
1165 BUG_ON(ret == 0); 1171 BUG_ON(ret == 0); /* Corruption */
1166 1172
1167 ret = btrfs_previous_item(root, path, BTRFS_DEV_ITEMS_OBJECTID, 1173 ret = btrfs_previous_item(root, path, BTRFS_DEV_ITEMS_OBJECTID,
1168 BTRFS_DEV_ITEM_KEY); 1174 BTRFS_DEV_ITEM_KEY);
@@ -1350,6 +1356,7 @@ int btrfs_rm_device(struct btrfs_root *root, char *device_path)
1350 } 1356 }
1351 1357
1352 set_blocksize(bdev, 4096); 1358 set_blocksize(bdev, 4096);
1359 invalidate_bdev(bdev);
1353 bh = btrfs_read_dev_super(bdev); 1360 bh = btrfs_read_dev_super(bdev);
1354 if (!bh) { 1361 if (!bh) {
1355 ret = -EINVAL; 1362 ret = -EINVAL;
@@ -1596,7 +1603,7 @@ next_slot:
1596 (unsigned long)btrfs_device_fsid(dev_item), 1603 (unsigned long)btrfs_device_fsid(dev_item),
1597 BTRFS_UUID_SIZE); 1604 BTRFS_UUID_SIZE);
1598 device = btrfs_find_device(root, devid, dev_uuid, fs_uuid); 1605 device = btrfs_find_device(root, devid, dev_uuid, fs_uuid);
1599 BUG_ON(!device); 1606 BUG_ON(!device); /* Logic error */
1600 1607
1601 if (device->fs_devices->seeding) { 1608 if (device->fs_devices->seeding) {
1602 btrfs_set_device_generation(leaf, dev_item, 1609 btrfs_set_device_generation(leaf, dev_item,
@@ -1706,7 +1713,7 @@ int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
1706 if (seeding_dev) { 1713 if (seeding_dev) {
1707 sb->s_flags &= ~MS_RDONLY; 1714 sb->s_flags &= ~MS_RDONLY;
1708 ret = btrfs_prepare_sprout(root); 1715 ret = btrfs_prepare_sprout(root);
1709 BUG_ON(ret); 1716 BUG_ON(ret); /* -ENOMEM */
1710 } 1717 }
1711 1718
1712 device->fs_devices = root->fs_info->fs_devices; 1719 device->fs_devices = root->fs_info->fs_devices;
@@ -1744,11 +1751,15 @@ int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
1744 1751
1745 if (seeding_dev) { 1752 if (seeding_dev) {
1746 ret = init_first_rw_device(trans, root, device); 1753 ret = init_first_rw_device(trans, root, device);
1747 BUG_ON(ret); 1754 if (ret)
1755 goto error_trans;
1748 ret = btrfs_finish_sprout(trans, root); 1756 ret = btrfs_finish_sprout(trans, root);
1749 BUG_ON(ret); 1757 if (ret)
1758 goto error_trans;
1750 } else { 1759 } else {
1751 ret = btrfs_add_device(trans, root, device); 1760 ret = btrfs_add_device(trans, root, device);
1761 if (ret)
1762 goto error_trans;
1752 } 1763 }
1753 1764
1754 /* 1765 /*
@@ -1758,17 +1769,31 @@ int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
1758 btrfs_clear_space_info_full(root->fs_info); 1769 btrfs_clear_space_info_full(root->fs_info);
1759 1770
1760 unlock_chunks(root); 1771 unlock_chunks(root);
1761 btrfs_commit_transaction(trans, root); 1772 ret = btrfs_commit_transaction(trans, root);
1762 1773
1763 if (seeding_dev) { 1774 if (seeding_dev) {
1764 mutex_unlock(&uuid_mutex); 1775 mutex_unlock(&uuid_mutex);
1765 up_write(&sb->s_umount); 1776 up_write(&sb->s_umount);
1766 1777
1778 if (ret) /* transaction commit */
1779 return ret;
1780
1767 ret = btrfs_relocate_sys_chunks(root); 1781 ret = btrfs_relocate_sys_chunks(root);
1768 BUG_ON(ret); 1782 if (ret < 0)
1783 btrfs_error(root->fs_info, ret,
1784 "Failed to relocate sys chunks after "
1785 "device initialization. This can be fixed "
1786 "using the \"btrfs balance\" command.");
1769 } 1787 }
1770 1788
1771 return ret; 1789 return ret;
1790
1791error_trans:
1792 unlock_chunks(root);
1793 btrfs_abort_transaction(trans, root, ret);
1794 btrfs_end_transaction(trans, root);
1795 kfree(device->name);
1796 kfree(device);
1772error: 1797error:
1773 blkdev_put(bdev, FMODE_EXCL); 1798 blkdev_put(bdev, FMODE_EXCL);
1774 if (seeding_dev) { 1799 if (seeding_dev) {
@@ -1876,10 +1901,20 @@ static int btrfs_free_chunk(struct btrfs_trans_handle *trans,
1876 key.type = BTRFS_CHUNK_ITEM_KEY; 1901 key.type = BTRFS_CHUNK_ITEM_KEY;
1877 1902
1878 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 1903 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1879 BUG_ON(ret); 1904 if (ret < 0)
1905 goto out;
1906 else if (ret > 0) { /* Logic error or corruption */
1907 btrfs_error(root->fs_info, -ENOENT,
1908 "Failed lookup while freeing chunk.");
1909 ret = -ENOENT;
1910 goto out;
1911 }
1880 1912
1881 ret = btrfs_del_item(trans, root, path); 1913 ret = btrfs_del_item(trans, root, path);
1882 1914 if (ret < 0)
1915 btrfs_error(root->fs_info, ret,
1916 "Failed to delete chunk item.");
1917out:
1883 btrfs_free_path(path); 1918 btrfs_free_path(path);
1884 return ret; 1919 return ret;
1885} 1920}
@@ -2041,7 +2076,7 @@ again:
2041 ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0); 2076 ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0);
2042 if (ret < 0) 2077 if (ret < 0)
2043 goto error; 2078 goto error;
2044 BUG_ON(ret == 0); 2079 BUG_ON(ret == 0); /* Corruption */
2045 2080
2046 ret = btrfs_previous_item(chunk_root, path, key.objectid, 2081 ret = btrfs_previous_item(chunk_root, path, key.objectid,
2047 key.type); 2082 key.type);
@@ -2250,15 +2285,13 @@ static void unset_balance_control(struct btrfs_fs_info *fs_info)
2250 * Balance filters. Return 1 if chunk should be filtered out 2285 * Balance filters. Return 1 if chunk should be filtered out
2251 * (should not be balanced). 2286 * (should not be balanced).
2252 */ 2287 */
2253static int chunk_profiles_filter(u64 chunk_profile, 2288static int chunk_profiles_filter(u64 chunk_type,
2254 struct btrfs_balance_args *bargs) 2289 struct btrfs_balance_args *bargs)
2255{ 2290{
2256 chunk_profile &= BTRFS_BLOCK_GROUP_PROFILE_MASK; 2291 chunk_type = chunk_to_extended(chunk_type) &
2257 2292 BTRFS_EXTENDED_PROFILE_MASK;
2258 if (chunk_profile == 0)
2259 chunk_profile = BTRFS_AVAIL_ALLOC_BIT_SINGLE;
2260 2293
2261 if (bargs->profiles & chunk_profile) 2294 if (bargs->profiles & chunk_type)
2262 return 0; 2295 return 0;
2263 2296
2264 return 1; 2297 return 1;
@@ -2365,18 +2398,16 @@ static int chunk_vrange_filter(struct extent_buffer *leaf,
2365 return 1; 2398 return 1;
2366} 2399}
2367 2400
2368static int chunk_soft_convert_filter(u64 chunk_profile, 2401static int chunk_soft_convert_filter(u64 chunk_type,
2369 struct btrfs_balance_args *bargs) 2402 struct btrfs_balance_args *bargs)
2370{ 2403{
2371 if (!(bargs->flags & BTRFS_BALANCE_ARGS_CONVERT)) 2404 if (!(bargs->flags & BTRFS_BALANCE_ARGS_CONVERT))
2372 return 0; 2405 return 0;
2373 2406
2374 chunk_profile &= BTRFS_BLOCK_GROUP_PROFILE_MASK; 2407 chunk_type = chunk_to_extended(chunk_type) &
2408 BTRFS_EXTENDED_PROFILE_MASK;
2375 2409
2376 if (chunk_profile == 0) 2410 if (bargs->target == chunk_type)
2377 chunk_profile = BTRFS_AVAIL_ALLOC_BIT_SINGLE;
2378
2379 if (bargs->target & chunk_profile)
2380 return 1; 2411 return 1;
2381 2412
2382 return 0; 2413 return 0;
@@ -2602,6 +2633,30 @@ error:
2602 return ret; 2633 return ret;
2603} 2634}
2604 2635
2636/**
2637 * alloc_profile_is_valid - see if a given profile is valid and reduced
2638 * @flags: profile to validate
2639 * @extended: if true @flags is treated as an extended profile
2640 */
2641static int alloc_profile_is_valid(u64 flags, int extended)
2642{
2643 u64 mask = (extended ? BTRFS_EXTENDED_PROFILE_MASK :
2644 BTRFS_BLOCK_GROUP_PROFILE_MASK);
2645
2646 flags &= ~BTRFS_BLOCK_GROUP_TYPE_MASK;
2647
2648 /* 1) check that all other bits are zeroed */
2649 if (flags & ~mask)
2650 return 0;
2651
2652 /* 2) see if profile is reduced */
2653 if (flags == 0)
2654 return !extended; /* "0" is valid for usual profiles */
2655
2656 /* true if exactly one bit set */
2657 return (flags & (flags - 1)) == 0;
2658}
2659
2605static inline int balance_need_close(struct btrfs_fs_info *fs_info) 2660static inline int balance_need_close(struct btrfs_fs_info *fs_info)
2606{ 2661{
2607 /* cancel requested || normal exit path */ 2662 /* cancel requested || normal exit path */
@@ -2630,6 +2685,7 @@ int btrfs_balance(struct btrfs_balance_control *bctl,
2630{ 2685{
2631 struct btrfs_fs_info *fs_info = bctl->fs_info; 2686 struct btrfs_fs_info *fs_info = bctl->fs_info;
2632 u64 allowed; 2687 u64 allowed;
2688 int mixed = 0;
2633 int ret; 2689 int ret;
2634 2690
2635 if (btrfs_fs_closing(fs_info) || 2691 if (btrfs_fs_closing(fs_info) ||
@@ -2639,13 +2695,16 @@ int btrfs_balance(struct btrfs_balance_control *bctl,
2639 goto out; 2695 goto out;
2640 } 2696 }
2641 2697
2698 allowed = btrfs_super_incompat_flags(fs_info->super_copy);
2699 if (allowed & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS)
2700 mixed = 1;
2701
2642 /* 2702 /*
2643 * In case of mixed groups both data and meta should be picked, 2703 * In case of mixed groups both data and meta should be picked,
2644 * and identical options should be given for both of them. 2704 * and identical options should be given for both of them.
2645 */ 2705 */
2646 allowed = btrfs_super_incompat_flags(fs_info->super_copy); 2706 allowed = BTRFS_BALANCE_DATA | BTRFS_BALANCE_METADATA;
2647 if ((allowed & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS) && 2707 if (mixed && (bctl->flags & allowed)) {
2648 (bctl->flags & (BTRFS_BALANCE_DATA | BTRFS_BALANCE_METADATA))) {
2649 if (!(bctl->flags & BTRFS_BALANCE_DATA) || 2708 if (!(bctl->flags & BTRFS_BALANCE_DATA) ||
2650 !(bctl->flags & BTRFS_BALANCE_METADATA) || 2709 !(bctl->flags & BTRFS_BALANCE_METADATA) ||
2651 memcmp(&bctl->data, &bctl->meta, sizeof(bctl->data))) { 2710 memcmp(&bctl->data, &bctl->meta, sizeof(bctl->data))) {
@@ -2656,14 +2715,6 @@ int btrfs_balance(struct btrfs_balance_control *bctl,
2656 } 2715 }
2657 } 2716 }
2658 2717
2659 /*
2660 * Profile changing sanity checks. Skip them if a simple
2661 * balance is requested.
2662 */
2663 if (!((bctl->data.flags | bctl->sys.flags | bctl->meta.flags) &
2664 BTRFS_BALANCE_ARGS_CONVERT))
2665 goto do_balance;
2666
2667 allowed = BTRFS_AVAIL_ALLOC_BIT_SINGLE; 2718 allowed = BTRFS_AVAIL_ALLOC_BIT_SINGLE;
2668 if (fs_info->fs_devices->num_devices == 1) 2719 if (fs_info->fs_devices->num_devices == 1)
2669 allowed |= BTRFS_BLOCK_GROUP_DUP; 2720 allowed |= BTRFS_BLOCK_GROUP_DUP;
@@ -2673,24 +2724,27 @@ int btrfs_balance(struct btrfs_balance_control *bctl,
2673 allowed |= (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID1 | 2724 allowed |= (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID1 |
2674 BTRFS_BLOCK_GROUP_RAID10); 2725 BTRFS_BLOCK_GROUP_RAID10);
2675 2726
2676 if (!profile_is_valid(bctl->data.target, 1) || 2727 if ((bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
2677 bctl->data.target & ~allowed) { 2728 (!alloc_profile_is_valid(bctl->data.target, 1) ||
2729 (bctl->data.target & ~allowed))) {
2678 printk(KERN_ERR "btrfs: unable to start balance with target " 2730 printk(KERN_ERR "btrfs: unable to start balance with target "
2679 "data profile %llu\n", 2731 "data profile %llu\n",
2680 (unsigned long long)bctl->data.target); 2732 (unsigned long long)bctl->data.target);
2681 ret = -EINVAL; 2733 ret = -EINVAL;
2682 goto out; 2734 goto out;
2683 } 2735 }
2684 if (!profile_is_valid(bctl->meta.target, 1) || 2736 if ((bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
2685 bctl->meta.target & ~allowed) { 2737 (!alloc_profile_is_valid(bctl->meta.target, 1) ||
2738 (bctl->meta.target & ~allowed))) {
2686 printk(KERN_ERR "btrfs: unable to start balance with target " 2739 printk(KERN_ERR "btrfs: unable to start balance with target "
2687 "metadata profile %llu\n", 2740 "metadata profile %llu\n",
2688 (unsigned long long)bctl->meta.target); 2741 (unsigned long long)bctl->meta.target);
2689 ret = -EINVAL; 2742 ret = -EINVAL;
2690 goto out; 2743 goto out;
2691 } 2744 }
2692 if (!profile_is_valid(bctl->sys.target, 1) || 2745 if ((bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
2693 bctl->sys.target & ~allowed) { 2746 (!alloc_profile_is_valid(bctl->sys.target, 1) ||
2747 (bctl->sys.target & ~allowed))) {
2694 printk(KERN_ERR "btrfs: unable to start balance with target " 2748 printk(KERN_ERR "btrfs: unable to start balance with target "
2695 "system profile %llu\n", 2749 "system profile %llu\n",
2696 (unsigned long long)bctl->sys.target); 2750 (unsigned long long)bctl->sys.target);
@@ -2698,7 +2752,9 @@ int btrfs_balance(struct btrfs_balance_control *bctl,
2698 goto out; 2752 goto out;
2699 } 2753 }
2700 2754
2701 if (bctl->data.target & BTRFS_BLOCK_GROUP_DUP) { 2755 /* allow dup'ed data chunks only in mixed mode */
2756 if (!mixed && (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
2757 (bctl->data.target & BTRFS_BLOCK_GROUP_DUP)) {
2702 printk(KERN_ERR "btrfs: dup for data is not allowed\n"); 2758 printk(KERN_ERR "btrfs: dup for data is not allowed\n");
2703 ret = -EINVAL; 2759 ret = -EINVAL;
2704 goto out; 2760 goto out;
@@ -2724,7 +2780,6 @@ int btrfs_balance(struct btrfs_balance_control *bctl,
2724 } 2780 }
2725 } 2781 }
2726 2782
2727do_balance:
2728 ret = insert_balance_item(fs_info->tree_root, bctl); 2783 ret = insert_balance_item(fs_info->tree_root, bctl);
2729 if (ret && ret != -EEXIST) 2784 if (ret && ret != -EEXIST)
2730 goto out; 2785 goto out;
@@ -2967,7 +3022,7 @@ again:
2967 key.offset = (u64)-1; 3022 key.offset = (u64)-1;
2968 key.type = BTRFS_DEV_EXTENT_KEY; 3023 key.type = BTRFS_DEV_EXTENT_KEY;
2969 3024
2970 while (1) { 3025 do {
2971 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 3026 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2972 if (ret < 0) 3027 if (ret < 0)
2973 goto done; 3028 goto done;
@@ -3009,8 +3064,7 @@ again:
3009 goto done; 3064 goto done;
3010 if (ret == -ENOSPC) 3065 if (ret == -ENOSPC)
3011 failed++; 3066 failed++;
3012 key.offset -= 1; 3067 } while (key.offset-- > 0);
3013 }
3014 3068
3015 if (failed && !retried) { 3069 if (failed && !retried) {
3016 failed = 0; 3070 failed = 0;
@@ -3128,11 +3182,7 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
3128 int i; 3182 int i;
3129 int j; 3183 int j;
3130 3184
3131 if ((type & BTRFS_BLOCK_GROUP_RAID1) && 3185 BUG_ON(!alloc_profile_is_valid(type, 0));
3132 (type & BTRFS_BLOCK_GROUP_DUP)) {
3133 WARN_ON(1);
3134 type &= ~BTRFS_BLOCK_GROUP_DUP;
3135 }
3136 3186
3137 if (list_empty(&fs_devices->alloc_list)) 3187 if (list_empty(&fs_devices->alloc_list))
3138 return -ENOSPC; 3188 return -ENOSPC;
@@ -3328,13 +3378,15 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
3328 write_lock(&em_tree->lock); 3378 write_lock(&em_tree->lock);
3329 ret = add_extent_mapping(em_tree, em); 3379 ret = add_extent_mapping(em_tree, em);
3330 write_unlock(&em_tree->lock); 3380 write_unlock(&em_tree->lock);
3331 BUG_ON(ret);
3332 free_extent_map(em); 3381 free_extent_map(em);
3382 if (ret)
3383 goto error;
3333 3384
3334 ret = btrfs_make_block_group(trans, extent_root, 0, type, 3385 ret = btrfs_make_block_group(trans, extent_root, 0, type,
3335 BTRFS_FIRST_CHUNK_TREE_OBJECTID, 3386 BTRFS_FIRST_CHUNK_TREE_OBJECTID,
3336 start, num_bytes); 3387 start, num_bytes);
3337 BUG_ON(ret); 3388 if (ret)
3389 goto error;
3338 3390
3339 for (i = 0; i < map->num_stripes; ++i) { 3391 for (i = 0; i < map->num_stripes; ++i) {
3340 struct btrfs_device *device; 3392 struct btrfs_device *device;
@@ -3347,7 +3399,10 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
3347 info->chunk_root->root_key.objectid, 3399 info->chunk_root->root_key.objectid,
3348 BTRFS_FIRST_CHUNK_TREE_OBJECTID, 3400 BTRFS_FIRST_CHUNK_TREE_OBJECTID,
3349 start, dev_offset, stripe_size); 3401 start, dev_offset, stripe_size);
3350 BUG_ON(ret); 3402 if (ret) {
3403 btrfs_abort_transaction(trans, extent_root, ret);
3404 goto error;
3405 }
3351 } 3406 }
3352 3407
3353 kfree(devices_info); 3408 kfree(devices_info);
@@ -3383,7 +3438,8 @@ static int __finish_chunk_alloc(struct btrfs_trans_handle *trans,
3383 device = map->stripes[index].dev; 3438 device = map->stripes[index].dev;
3384 device->bytes_used += stripe_size; 3439 device->bytes_used += stripe_size;
3385 ret = btrfs_update_device(trans, device); 3440 ret = btrfs_update_device(trans, device);
3386 BUG_ON(ret); 3441 if (ret)
3442 goto out_free;
3387 index++; 3443 index++;
3388 } 3444 }
3389 3445
@@ -3420,16 +3476,19 @@ static int __finish_chunk_alloc(struct btrfs_trans_handle *trans,
3420 key.offset = chunk_offset; 3476 key.offset = chunk_offset;
3421 3477
3422 ret = btrfs_insert_item(trans, chunk_root, &key, chunk, item_size); 3478 ret = btrfs_insert_item(trans, chunk_root, &key, chunk, item_size);
3423 BUG_ON(ret);
3424 3479
3425 if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) { 3480 if (ret == 0 && map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
3481 /*
3482 * TODO: Cleanup of inserted chunk root in case of
3483 * failure.
3484 */
3426 ret = btrfs_add_system_chunk(chunk_root, &key, chunk, 3485 ret = btrfs_add_system_chunk(chunk_root, &key, chunk,
3427 item_size); 3486 item_size);
3428 BUG_ON(ret);
3429 } 3487 }
3430 3488
3489out_free:
3431 kfree(chunk); 3490 kfree(chunk);
3432 return 0; 3491 return ret;
3433} 3492}
3434 3493
3435/* 3494/*
@@ -3461,7 +3520,8 @@ int btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
3461 3520
3462 ret = __finish_chunk_alloc(trans, extent_root, map, chunk_offset, 3521 ret = __finish_chunk_alloc(trans, extent_root, map, chunk_offset,
3463 chunk_size, stripe_size); 3522 chunk_size, stripe_size);
3464 BUG_ON(ret); 3523 if (ret)
3524 return ret;
3465 return 0; 3525 return 0;
3466} 3526}
3467 3527
@@ -3493,7 +3553,8 @@ static noinline int init_first_rw_device(struct btrfs_trans_handle *trans,
3493 3553
3494 ret = __btrfs_alloc_chunk(trans, extent_root, &map, &chunk_size, 3554 ret = __btrfs_alloc_chunk(trans, extent_root, &map, &chunk_size,
3495 &stripe_size, chunk_offset, alloc_profile); 3555 &stripe_size, chunk_offset, alloc_profile);
3496 BUG_ON(ret); 3556 if (ret)
3557 return ret;
3497 3558
3498 sys_chunk_offset = chunk_offset + chunk_size; 3559 sys_chunk_offset = chunk_offset + chunk_size;
3499 3560
@@ -3504,10 +3565,12 @@ static noinline int init_first_rw_device(struct btrfs_trans_handle *trans,
3504 ret = __btrfs_alloc_chunk(trans, extent_root, &sys_map, 3565 ret = __btrfs_alloc_chunk(trans, extent_root, &sys_map,
3505 &sys_chunk_size, &sys_stripe_size, 3566 &sys_chunk_size, &sys_stripe_size,
3506 sys_chunk_offset, alloc_profile); 3567 sys_chunk_offset, alloc_profile);
3507 BUG_ON(ret); 3568 if (ret)
3569 goto abort;
3508 3570
3509 ret = btrfs_add_device(trans, fs_info->chunk_root, device); 3571 ret = btrfs_add_device(trans, fs_info->chunk_root, device);
3510 BUG_ON(ret); 3572 if (ret)
3573 goto abort;
3511 3574
3512 /* 3575 /*
3513 * Modifying chunk tree needs allocating new blocks from both 3576 * Modifying chunk tree needs allocating new blocks from both
@@ -3517,13 +3580,20 @@ static noinline int init_first_rw_device(struct btrfs_trans_handle *trans,
3517 */ 3580 */
3518 ret = __finish_chunk_alloc(trans, extent_root, map, chunk_offset, 3581 ret = __finish_chunk_alloc(trans, extent_root, map, chunk_offset,
3519 chunk_size, stripe_size); 3582 chunk_size, stripe_size);
3520 BUG_ON(ret); 3583 if (ret)
3584 goto abort;
3521 3585
3522 ret = __finish_chunk_alloc(trans, extent_root, sys_map, 3586 ret = __finish_chunk_alloc(trans, extent_root, sys_map,
3523 sys_chunk_offset, sys_chunk_size, 3587 sys_chunk_offset, sys_chunk_size,
3524 sys_stripe_size); 3588 sys_stripe_size);
3525 BUG_ON(ret); 3589 if (ret)
3590 goto abort;
3591
3526 return 0; 3592 return 0;
3593
3594abort:
3595 btrfs_abort_transaction(trans, root, ret);
3596 return ret;
3527} 3597}
3528 3598
3529int btrfs_chunk_readonly(struct btrfs_root *root, u64 chunk_offset) 3599int btrfs_chunk_readonly(struct btrfs_root *root, u64 chunk_offset)
@@ -3874,7 +3944,7 @@ int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree,
3874 do_div(length, map->num_stripes); 3944 do_div(length, map->num_stripes);
3875 3945
3876 buf = kzalloc(sizeof(u64) * map->num_stripes, GFP_NOFS); 3946 buf = kzalloc(sizeof(u64) * map->num_stripes, GFP_NOFS);
3877 BUG_ON(!buf); 3947 BUG_ON(!buf); /* -ENOMEM */
3878 3948
3879 for (i = 0; i < map->num_stripes; i++) { 3949 for (i = 0; i < map->num_stripes; i++) {
3880 if (devid && map->stripes[i].dev->devid != devid) 3950 if (devid && map->stripes[i].dev->devid != devid)
@@ -3967,7 +4037,7 @@ struct async_sched {
3967 * This will add one bio to the pending list for a device and make sure 4037 * This will add one bio to the pending list for a device and make sure
3968 * the work struct is scheduled. 4038 * the work struct is scheduled.
3969 */ 4039 */
3970static noinline int schedule_bio(struct btrfs_root *root, 4040static noinline void schedule_bio(struct btrfs_root *root,
3971 struct btrfs_device *device, 4041 struct btrfs_device *device,
3972 int rw, struct bio *bio) 4042 int rw, struct bio *bio)
3973{ 4043{
@@ -3979,7 +4049,7 @@ static noinline int schedule_bio(struct btrfs_root *root,
3979 bio_get(bio); 4049 bio_get(bio);
3980 btrfsic_submit_bio(rw, bio); 4050 btrfsic_submit_bio(rw, bio);
3981 bio_put(bio); 4051 bio_put(bio);
3982 return 0; 4052 return;
3983 } 4053 }
3984 4054
3985 /* 4055 /*
@@ -4013,7 +4083,6 @@ static noinline int schedule_bio(struct btrfs_root *root,
4013 if (should_queue) 4083 if (should_queue)
4014 btrfs_queue_worker(&root->fs_info->submit_workers, 4084 btrfs_queue_worker(&root->fs_info->submit_workers,
4015 &device->work); 4085 &device->work);
4016 return 0;
4017} 4086}
4018 4087
4019int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio, 4088int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio,
@@ -4036,7 +4105,8 @@ int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio,
4036 4105
4037 ret = btrfs_map_block(map_tree, rw, logical, &map_length, &bbio, 4106 ret = btrfs_map_block(map_tree, rw, logical, &map_length, &bbio,
4038 mirror_num); 4107 mirror_num);
4039 BUG_ON(ret); 4108 if (ret) /* -ENOMEM */
4109 return ret;
4040 4110
4041 total_devs = bbio->num_stripes; 4111 total_devs = bbio->num_stripes;
4042 if (map_length < length) { 4112 if (map_length < length) {
@@ -4055,7 +4125,7 @@ int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio,
4055 while (dev_nr < total_devs) { 4125 while (dev_nr < total_devs) {
4056 if (dev_nr < total_devs - 1) { 4126 if (dev_nr < total_devs - 1) {
4057 bio = bio_clone(first_bio, GFP_NOFS); 4127 bio = bio_clone(first_bio, GFP_NOFS);
4058 BUG_ON(!bio); 4128 BUG_ON(!bio); /* -ENOMEM */
4059 } else { 4129 } else {
4060 bio = first_bio; 4130 bio = first_bio;
4061 } 4131 }
@@ -4209,13 +4279,13 @@ static int read_one_chunk(struct btrfs_root *root, struct btrfs_key *key,
4209 write_lock(&map_tree->map_tree.lock); 4279 write_lock(&map_tree->map_tree.lock);
4210 ret = add_extent_mapping(&map_tree->map_tree, em); 4280 ret = add_extent_mapping(&map_tree->map_tree, em);
4211 write_unlock(&map_tree->map_tree.lock); 4281 write_unlock(&map_tree->map_tree.lock);
4212 BUG_ON(ret); 4282 BUG_ON(ret); /* Tree corruption */
4213 free_extent_map(em); 4283 free_extent_map(em);
4214 4284
4215 return 0; 4285 return 0;
4216} 4286}
4217 4287
4218static int fill_device_from_item(struct extent_buffer *leaf, 4288static void fill_device_from_item(struct extent_buffer *leaf,
4219 struct btrfs_dev_item *dev_item, 4289 struct btrfs_dev_item *dev_item,
4220 struct btrfs_device *device) 4290 struct btrfs_device *device)
4221{ 4291{
@@ -4232,8 +4302,6 @@ static int fill_device_from_item(struct extent_buffer *leaf,
4232 4302
4233 ptr = (unsigned long)btrfs_device_uuid(dev_item); 4303 ptr = (unsigned long)btrfs_device_uuid(dev_item);
4234 read_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE); 4304 read_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
4235
4236 return 0;
4237} 4305}
4238 4306
4239static int open_seed_devices(struct btrfs_root *root, u8 *fsid) 4307static int open_seed_devices(struct btrfs_root *root, u8 *fsid)
@@ -4384,7 +4452,7 @@ int btrfs_read_sys_array(struct btrfs_root *root)
4384 * to silence the warning eg. on PowerPC 64. 4452 * to silence the warning eg. on PowerPC 64.
4385 */ 4453 */
4386 if (PAGE_CACHE_SIZE > BTRFS_SUPER_INFO_SIZE) 4454 if (PAGE_CACHE_SIZE > BTRFS_SUPER_INFO_SIZE)
4387 SetPageUptodate(sb->first_page); 4455 SetPageUptodate(sb->pages[0]);
4388 4456
4389 write_extent_buffer(sb, super_copy, 0, BTRFS_SUPER_INFO_SIZE); 4457 write_extent_buffer(sb, super_copy, 0, BTRFS_SUPER_INFO_SIZE);
4390 array_size = btrfs_super_sys_array_size(super_copy); 4458 array_size = btrfs_super_sys_array_size(super_copy);
diff --git a/fs/btrfs/volumes.h b/fs/btrfs/volumes.h
index 19ac95048b88..bb6b03f97aaa 100644
--- a/fs/btrfs/volumes.h
+++ b/fs/btrfs/volumes.h
@@ -260,12 +260,12 @@ int btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
260int btrfs_scan_one_device(const char *path, fmode_t flags, void *holder, 260int btrfs_scan_one_device(const char *path, fmode_t flags, void *holder,
261 struct btrfs_fs_devices **fs_devices_ret); 261 struct btrfs_fs_devices **fs_devices_ret);
262int btrfs_close_devices(struct btrfs_fs_devices *fs_devices); 262int btrfs_close_devices(struct btrfs_fs_devices *fs_devices);
263int btrfs_close_extra_devices(struct btrfs_fs_devices *fs_devices); 263void btrfs_close_extra_devices(struct btrfs_fs_devices *fs_devices);
264int btrfs_add_device(struct btrfs_trans_handle *trans, 264int btrfs_add_device(struct btrfs_trans_handle *trans,
265 struct btrfs_root *root, 265 struct btrfs_root *root,
266 struct btrfs_device *device); 266 struct btrfs_device *device);
267int btrfs_rm_device(struct btrfs_root *root, char *device_path); 267int btrfs_rm_device(struct btrfs_root *root, char *device_path);
268int btrfs_cleanup_fs_uuids(void); 268void btrfs_cleanup_fs_uuids(void);
269int btrfs_num_copies(struct btrfs_mapping_tree *map_tree, u64 logical, u64 len); 269int btrfs_num_copies(struct btrfs_mapping_tree *map_tree, u64 logical, u64 len);
270int btrfs_grow_device(struct btrfs_trans_handle *trans, 270int btrfs_grow_device(struct btrfs_trans_handle *trans,
271 struct btrfs_device *device, u64 new_size); 271 struct btrfs_device *device, u64 new_size);
diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
index 573b899b5a5d..270464629416 100644
--- a/fs/cifs/cifs_debug.c
+++ b/fs/cifs/cifs_debug.c
@@ -58,15 +58,16 @@ cifs_dump_mem(char *label, void *data, int length)
58} 58}
59 59
60#ifdef CONFIG_CIFS_DEBUG2 60#ifdef CONFIG_CIFS_DEBUG2
61void cifs_dump_detail(struct smb_hdr *smb) 61void cifs_dump_detail(void *buf)
62{ 62{
63 struct smb_hdr *smb = (struct smb_hdr *)buf;
64
63 cERROR(1, "Cmd: %d Err: 0x%x Flags: 0x%x Flgs2: 0x%x Mid: %d Pid: %d", 65 cERROR(1, "Cmd: %d Err: 0x%x Flags: 0x%x Flgs2: 0x%x Mid: %d Pid: %d",
64 smb->Command, smb->Status.CifsError, 66 smb->Command, smb->Status.CifsError,
65 smb->Flags, smb->Flags2, smb->Mid, smb->Pid); 67 smb->Flags, smb->Flags2, smb->Mid, smb->Pid);
66 cERROR(1, "smb buf %p len %d", smb, smbCalcSize(smb)); 68 cERROR(1, "smb buf %p len %d", smb, smbCalcSize(smb));
67} 69}
68 70
69
70void cifs_dump_mids(struct TCP_Server_Info *server) 71void cifs_dump_mids(struct TCP_Server_Info *server)
71{ 72{
72 struct list_head *tmp; 73 struct list_head *tmp;
@@ -79,15 +80,15 @@ void cifs_dump_mids(struct TCP_Server_Info *server)
79 spin_lock(&GlobalMid_Lock); 80 spin_lock(&GlobalMid_Lock);
80 list_for_each(tmp, &server->pending_mid_q) { 81 list_for_each(tmp, &server->pending_mid_q) {
81 mid_entry = list_entry(tmp, struct mid_q_entry, qhead); 82 mid_entry = list_entry(tmp, struct mid_q_entry, qhead);
82 cERROR(1, "State: %d Cmd: %d Pid: %d Cbdata: %p Mid %d", 83 cERROR(1, "State: %d Cmd: %d Pid: %d Cbdata: %p Mid %llu",
83 mid_entry->midState, 84 mid_entry->mid_state,
84 (int)mid_entry->command, 85 le16_to_cpu(mid_entry->command),
85 mid_entry->pid, 86 mid_entry->pid,
86 mid_entry->callback_data, 87 mid_entry->callback_data,
87 mid_entry->mid); 88 mid_entry->mid);
88#ifdef CONFIG_CIFS_STATS2 89#ifdef CONFIG_CIFS_STATS2
89 cERROR(1, "IsLarge: %d buf: %p time rcv: %ld now: %ld", 90 cERROR(1, "IsLarge: %d buf: %p time rcv: %ld now: %ld",
90 mid_entry->largeBuf, 91 mid_entry->large_buf,
91 mid_entry->resp_buf, 92 mid_entry->resp_buf,
92 mid_entry->when_received, 93 mid_entry->when_received,
93 jiffies); 94 jiffies);
@@ -217,12 +218,12 @@ static int cifs_debug_data_proc_show(struct seq_file *m, void *v)
217 mid_entry = list_entry(tmp3, struct mid_q_entry, 218 mid_entry = list_entry(tmp3, struct mid_q_entry,
218 qhead); 219 qhead);
219 seq_printf(m, "\tState: %d com: %d pid:" 220 seq_printf(m, "\tState: %d com: %d pid:"
220 " %d cbdata: %p mid %d\n", 221 " %d cbdata: %p mid %llu\n",
221 mid_entry->midState, 222 mid_entry->mid_state,
222 (int)mid_entry->command, 223 le16_to_cpu(mid_entry->command),
223 mid_entry->pid, 224 mid_entry->pid,
224 mid_entry->callback_data, 225 mid_entry->callback_data,
225 mid_entry->mid); 226 mid_entry->mid);
226 } 227 }
227 spin_unlock(&GlobalMid_Lock); 228 spin_unlock(&GlobalMid_Lock);
228 } 229 }
@@ -417,7 +418,6 @@ static const struct file_operations cifs_stats_proc_fops = {
417 418
418static struct proc_dir_entry *proc_fs_cifs; 419static struct proc_dir_entry *proc_fs_cifs;
419static const struct file_operations cifsFYI_proc_fops; 420static const struct file_operations cifsFYI_proc_fops;
420static const struct file_operations cifs_oplock_proc_fops;
421static const struct file_operations cifs_lookup_cache_proc_fops; 421static const struct file_operations cifs_lookup_cache_proc_fops;
422static const struct file_operations traceSMB_proc_fops; 422static const struct file_operations traceSMB_proc_fops;
423static const struct file_operations cifs_multiuser_mount_proc_fops; 423static const struct file_operations cifs_multiuser_mount_proc_fops;
@@ -438,7 +438,6 @@ cifs_proc_init(void)
438#endif /* STATS */ 438#endif /* STATS */
439 proc_create("cifsFYI", 0, proc_fs_cifs, &cifsFYI_proc_fops); 439 proc_create("cifsFYI", 0, proc_fs_cifs, &cifsFYI_proc_fops);
440 proc_create("traceSMB", 0, proc_fs_cifs, &traceSMB_proc_fops); 440 proc_create("traceSMB", 0, proc_fs_cifs, &traceSMB_proc_fops);
441 proc_create("OplockEnabled", 0, proc_fs_cifs, &cifs_oplock_proc_fops);
442 proc_create("LinuxExtensionsEnabled", 0, proc_fs_cifs, 441 proc_create("LinuxExtensionsEnabled", 0, proc_fs_cifs,
443 &cifs_linux_ext_proc_fops); 442 &cifs_linux_ext_proc_fops);
444 proc_create("MultiuserMount", 0, proc_fs_cifs, 443 proc_create("MultiuserMount", 0, proc_fs_cifs,
@@ -462,7 +461,6 @@ cifs_proc_clean(void)
462 remove_proc_entry("Stats", proc_fs_cifs); 461 remove_proc_entry("Stats", proc_fs_cifs);
463#endif 462#endif
464 remove_proc_entry("MultiuserMount", proc_fs_cifs); 463 remove_proc_entry("MultiuserMount", proc_fs_cifs);
465 remove_proc_entry("OplockEnabled", proc_fs_cifs);
466 remove_proc_entry("SecurityFlags", proc_fs_cifs); 464 remove_proc_entry("SecurityFlags", proc_fs_cifs);
467 remove_proc_entry("LinuxExtensionsEnabled", proc_fs_cifs); 465 remove_proc_entry("LinuxExtensionsEnabled", proc_fs_cifs);
468 remove_proc_entry("LookupCacheEnabled", proc_fs_cifs); 466 remove_proc_entry("LookupCacheEnabled", proc_fs_cifs);
@@ -508,46 +506,6 @@ static const struct file_operations cifsFYI_proc_fops = {
508 .write = cifsFYI_proc_write, 506 .write = cifsFYI_proc_write,
509}; 507};
510 508
511static int cifs_oplock_proc_show(struct seq_file *m, void *v)
512{
513 seq_printf(m, "%d\n", enable_oplocks);
514 return 0;
515}
516
517static int cifs_oplock_proc_open(struct inode *inode, struct file *file)
518{
519 return single_open(file, cifs_oplock_proc_show, NULL);
520}
521
522static ssize_t cifs_oplock_proc_write(struct file *file,
523 const char __user *buffer, size_t count, loff_t *ppos)
524{
525 char c;
526 int rc;
527
528 printk(KERN_WARNING "CIFS: The /proc/fs/cifs/OplockEnabled interface "
529 "will be removed in kernel version 3.4. Please migrate to "
530 "using the 'enable_oplocks' module parameter in cifs.ko.\n");
531 rc = get_user(c, buffer);
532 if (rc)
533 return rc;
534 if (c == '0' || c == 'n' || c == 'N')
535 enable_oplocks = false;
536 else if (c == '1' || c == 'y' || c == 'Y')
537 enable_oplocks = true;
538
539 return count;
540}
541
542static const struct file_operations cifs_oplock_proc_fops = {
543 .owner = THIS_MODULE,
544 .open = cifs_oplock_proc_open,
545 .read = seq_read,
546 .llseek = seq_lseek,
547 .release = single_release,
548 .write = cifs_oplock_proc_write,
549};
550
551static int cifs_linux_ext_proc_show(struct seq_file *m, void *v) 509static int cifs_linux_ext_proc_show(struct seq_file *m, void *v)
552{ 510{
553 seq_printf(m, "%d\n", linuxExtEnabled); 511 seq_printf(m, "%d\n", linuxExtEnabled);
diff --git a/fs/cifs/cifs_debug.h b/fs/cifs/cifs_debug.h
index 8942b28cf807..566e0ae8dc2c 100644
--- a/fs/cifs/cifs_debug.h
+++ b/fs/cifs/cifs_debug.h
@@ -26,13 +26,13 @@
26void cifs_dump_mem(char *label, void *data, int length); 26void cifs_dump_mem(char *label, void *data, int length);
27#ifdef CONFIG_CIFS_DEBUG2 27#ifdef CONFIG_CIFS_DEBUG2
28#define DBG2 2 28#define DBG2 2
29void cifs_dump_detail(struct smb_hdr *); 29void cifs_dump_detail(void *);
30void cifs_dump_mids(struct TCP_Server_Info *); 30void cifs_dump_mids(struct TCP_Server_Info *);
31#else 31#else
32#define DBG2 0 32#define DBG2 0
33#endif 33#endif
34extern int traceSMB; /* flag which enables the function below */ 34extern int traceSMB; /* flag which enables the function below */
35void dump_smb(struct smb_hdr *, int); 35void dump_smb(void *, int);
36#define CIFS_INFO 0x01 36#define CIFS_INFO 0x01
37#define CIFS_RC 0x02 37#define CIFS_RC 0x02
38#define CIFS_TIMER 0x04 38#define CIFS_TIMER 0x04
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
index eee522c56ef0..d34212822444 100644
--- a/fs/cifs/cifsfs.c
+++ b/fs/cifs/cifsfs.c
@@ -85,6 +85,8 @@ extern mempool_t *cifs_sm_req_poolp;
85extern mempool_t *cifs_req_poolp; 85extern mempool_t *cifs_req_poolp;
86extern mempool_t *cifs_mid_poolp; 86extern mempool_t *cifs_mid_poolp;
87 87
88struct workqueue_struct *cifsiod_wq;
89
88static int 90static int
89cifs_read_super(struct super_block *sb) 91cifs_read_super(struct super_block *sb)
90{ 92{
@@ -1111,9 +1113,15 @@ init_cifs(void)
1111 cFYI(1, "cifs_max_pending set to max of %u", CIFS_MAX_REQ); 1113 cFYI(1, "cifs_max_pending set to max of %u", CIFS_MAX_REQ);
1112 } 1114 }
1113 1115
1116 cifsiod_wq = alloc_workqueue("cifsiod", WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1117 if (!cifsiod_wq) {
1118 rc = -ENOMEM;
1119 goto out_clean_proc;
1120 }
1121
1114 rc = cifs_fscache_register(); 1122 rc = cifs_fscache_register();
1115 if (rc) 1123 if (rc)
1116 goto out_clean_proc; 1124 goto out_destroy_wq;
1117 1125
1118 rc = cifs_init_inodecache(); 1126 rc = cifs_init_inodecache();
1119 if (rc) 1127 if (rc)
@@ -1161,6 +1169,8 @@ out_destroy_inodecache:
1161 cifs_destroy_inodecache(); 1169 cifs_destroy_inodecache();
1162out_unreg_fscache: 1170out_unreg_fscache:
1163 cifs_fscache_unregister(); 1171 cifs_fscache_unregister();
1172out_destroy_wq:
1173 destroy_workqueue(cifsiod_wq);
1164out_clean_proc: 1174out_clean_proc:
1165 cifs_proc_clean(); 1175 cifs_proc_clean();
1166 return rc; 1176 return rc;
@@ -1183,6 +1193,7 @@ exit_cifs(void)
1183 cifs_destroy_mids(); 1193 cifs_destroy_mids();
1184 cifs_destroy_inodecache(); 1194 cifs_destroy_inodecache();
1185 cifs_fscache_unregister(); 1195 cifs_fscache_unregister();
1196 destroy_workqueue(cifsiod_wq);
1186 cifs_proc_clean(); 1197 cifs_proc_clean();
1187} 1198}
1188 1199
diff --git a/fs/cifs/cifsfs.h b/fs/cifs/cifsfs.h
index fe5ecf1b422a..d1389bb33ceb 100644
--- a/fs/cifs/cifsfs.h
+++ b/fs/cifs/cifsfs.h
@@ -125,5 +125,5 @@ extern long cifs_ioctl(struct file *filep, unsigned int cmd, unsigned long arg);
125extern const struct export_operations cifs_export_ops; 125extern const struct export_operations cifs_export_ops;
126#endif /* CONFIG_CIFS_NFSD_EXPORT */ 126#endif /* CONFIG_CIFS_NFSD_EXPORT */
127 127
128#define CIFS_VERSION "1.76" 128#define CIFS_VERSION "1.77"
129#endif /* _CIFSFS_H */ 129#endif /* _CIFSFS_H */
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
index 339ebe3ebc0d..4ff6313f0a91 100644
--- a/fs/cifs/cifsglob.h
+++ b/fs/cifs/cifsglob.h
@@ -230,6 +230,12 @@ struct cifs_mnt_data {
230 int flags; 230 int flags;
231}; 231};
232 232
233static inline unsigned int
234get_rfc1002_length(void *buf)
235{
236 return be32_to_cpu(*((__be32 *)buf));
237}
238
233struct TCP_Server_Info { 239struct TCP_Server_Info {
234 struct list_head tcp_ses_list; 240 struct list_head tcp_ses_list;
235 struct list_head smb_ses_list; 241 struct list_head smb_ses_list;
@@ -276,7 +282,7 @@ struct TCP_Server_Info {
276 vcnumbers */ 282 vcnumbers */
277 int capabilities; /* allow selective disabling of caps by smb sess */ 283 int capabilities; /* allow selective disabling of caps by smb sess */
278 int timeAdj; /* Adjust for difference in server time zone in sec */ 284 int timeAdj; /* Adjust for difference in server time zone in sec */
279 __u16 CurrentMid; /* multiplex id - rotating counter */ 285 __u64 CurrentMid; /* multiplex id - rotating counter */
280 char cryptkey[CIFS_CRYPTO_KEY_SIZE]; /* used by ntlm, ntlmv2 etc */ 286 char cryptkey[CIFS_CRYPTO_KEY_SIZE]; /* used by ntlm, ntlmv2 etc */
281 /* 16th byte of RFC1001 workstation name is always null */ 287 /* 16th byte of RFC1001 workstation name is always null */
282 char workstation_RFC1001_name[RFC1001_NAME_LEN_WITH_NULL]; 288 char workstation_RFC1001_name[RFC1001_NAME_LEN_WITH_NULL];
@@ -335,6 +341,18 @@ has_credits(struct TCP_Server_Info *server, int *credits)
335 return num > 0; 341 return num > 0;
336} 342}
337 343
344static inline size_t
345header_size(void)
346{
347 return sizeof(struct smb_hdr);
348}
349
350static inline size_t
351max_header_size(void)
352{
353 return MAX_CIFS_HDR_SIZE;
354}
355
338/* 356/*
339 * Macros to allow the TCP_Server_Info->net field and related code to drop out 357 * Macros to allow the TCP_Server_Info->net field and related code to drop out
340 * when CONFIG_NET_NS isn't set. 358 * when CONFIG_NET_NS isn't set.
@@ -583,9 +601,11 @@ struct cifs_io_parms {
583 * Take a reference on the file private data. Must be called with 601 * Take a reference on the file private data. Must be called with
584 * cifs_file_list_lock held. 602 * cifs_file_list_lock held.
585 */ 603 */
586static inline void cifsFileInfo_get(struct cifsFileInfo *cifs_file) 604static inline
605struct cifsFileInfo *cifsFileInfo_get(struct cifsFileInfo *cifs_file)
587{ 606{
588 ++cifs_file->count; 607 ++cifs_file->count;
608 return cifs_file;
589} 609}
590 610
591void cifsFileInfo_put(struct cifsFileInfo *cifs_file); 611void cifsFileInfo_put(struct cifsFileInfo *cifs_file);
@@ -606,7 +626,7 @@ struct cifsInodeInfo {
606 bool delete_pending; /* DELETE_ON_CLOSE is set */ 626 bool delete_pending; /* DELETE_ON_CLOSE is set */
607 bool invalid_mapping; /* pagecache is invalid */ 627 bool invalid_mapping; /* pagecache is invalid */
608 unsigned long time; /* jiffies of last update of inode */ 628 unsigned long time; /* jiffies of last update of inode */
609 u64 server_eof; /* current file size on server */ 629 u64 server_eof; /* current file size on server -- protected by i_lock */
610 u64 uniqueid; /* server inode number */ 630 u64 uniqueid; /* server inode number */
611 u64 createtime; /* creation time on server */ 631 u64 createtime; /* creation time on server */
612#ifdef CONFIG_CIFS_FSCACHE 632#ifdef CONFIG_CIFS_FSCACHE
@@ -713,8 +733,8 @@ typedef void (mid_callback_t)(struct mid_q_entry *mid);
713/* one of these for every pending CIFS request to the server */ 733/* one of these for every pending CIFS request to the server */
714struct mid_q_entry { 734struct mid_q_entry {
715 struct list_head qhead; /* mids waiting on reply from this server */ 735 struct list_head qhead; /* mids waiting on reply from this server */
716 __u16 mid; /* multiplex id */ 736 __u64 mid; /* multiplex id */
717 __u16 pid; /* process id */ 737 __u32 pid; /* process id */
718 __u32 sequence_number; /* for CIFS signing */ 738 __u32 sequence_number; /* for CIFS signing */
719 unsigned long when_alloc; /* when mid was created */ 739 unsigned long when_alloc; /* when mid was created */
720#ifdef CONFIG_CIFS_STATS2 740#ifdef CONFIG_CIFS_STATS2
@@ -724,10 +744,10 @@ struct mid_q_entry {
724 mid_receive_t *receive; /* call receive callback */ 744 mid_receive_t *receive; /* call receive callback */
725 mid_callback_t *callback; /* call completion callback */ 745 mid_callback_t *callback; /* call completion callback */
726 void *callback_data; /* general purpose pointer for callback */ 746 void *callback_data; /* general purpose pointer for callback */
727 struct smb_hdr *resp_buf; /* pointer to received SMB header */ 747 void *resp_buf; /* pointer to received SMB header */
728 int midState; /* wish this were enum but can not pass to wait_event */ 748 int mid_state; /* wish this were enum but can not pass to wait_event */
729 __u8 command; /* smb command code */ 749 __le16 command; /* smb command code */
730 bool largeBuf:1; /* if valid response, is pointer to large buf */ 750 bool large_buf:1; /* if valid response, is pointer to large buf */
731 bool multiRsp:1; /* multiple trans2 responses for one request */ 751 bool multiRsp:1; /* multiple trans2 responses for one request */
732 bool multiEnd:1; /* both received */ 752 bool multiEnd:1; /* both received */
733}; 753};
@@ -1052,5 +1072,6 @@ GLOBAL_EXTERN spinlock_t gidsidlock;
1052void cifs_oplock_break(struct work_struct *work); 1072void cifs_oplock_break(struct work_struct *work);
1053 1073
1054extern const struct slow_work_ops cifs_oplock_break_ops; 1074extern const struct slow_work_ops cifs_oplock_break_ops;
1075extern struct workqueue_struct *cifsiod_wq;
1055 1076
1056#endif /* _CIFS_GLOB_H */ 1077#endif /* _CIFS_GLOB_H */
diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h
index 503e73d8bdb7..96192c1e380a 100644
--- a/fs/cifs/cifsproto.h
+++ b/fs/cifs/cifsproto.h
@@ -77,7 +77,7 @@ extern int SendReceive(const unsigned int /* xid */ , struct cifs_ses *,
77 struct smb_hdr * /* out */ , 77 struct smb_hdr * /* out */ ,
78 int * /* bytes returned */ , const int long_op); 78 int * /* bytes returned */ , const int long_op);
79extern int SendReceiveNoRsp(const unsigned int xid, struct cifs_ses *ses, 79extern int SendReceiveNoRsp(const unsigned int xid, struct cifs_ses *ses,
80 struct smb_hdr *in_buf, int flags); 80 char *in_buf, int flags);
81extern int cifs_check_receive(struct mid_q_entry *mid, 81extern int cifs_check_receive(struct mid_q_entry *mid,
82 struct TCP_Server_Info *server, bool log_error); 82 struct TCP_Server_Info *server, bool log_error);
83extern int SendReceive2(const unsigned int /* xid */ , struct cifs_ses *, 83extern int SendReceive2(const unsigned int /* xid */ , struct cifs_ses *,
@@ -91,9 +91,8 @@ extern int SendReceiveBlockingLock(const unsigned int xid,
91extern void cifs_add_credits(struct TCP_Server_Info *server, 91extern void cifs_add_credits(struct TCP_Server_Info *server,
92 const unsigned int add); 92 const unsigned int add);
93extern void cifs_set_credits(struct TCP_Server_Info *server, const int val); 93extern void cifs_set_credits(struct TCP_Server_Info *server, const int val);
94extern int checkSMB(struct smb_hdr *smb, __u16 mid, unsigned int length); 94extern int checkSMB(char *buf, unsigned int length);
95extern bool is_valid_oplock_break(struct smb_hdr *smb, 95extern bool is_valid_oplock_break(char *, struct TCP_Server_Info *);
96 struct TCP_Server_Info *);
97extern bool backup_cred(struct cifs_sb_info *); 96extern bool backup_cred(struct cifs_sb_info *);
98extern bool is_size_safe_to_change(struct cifsInodeInfo *, __u64 eof); 97extern bool is_size_safe_to_change(struct cifsInodeInfo *, __u64 eof);
99extern void cifs_update_eof(struct cifsInodeInfo *cifsi, loff_t offset, 98extern void cifs_update_eof(struct cifsInodeInfo *cifsi, loff_t offset,
@@ -107,7 +106,7 @@ extern int cifs_convert_address(struct sockaddr *dst, const char *src, int len);
107extern int cifs_set_port(struct sockaddr *addr, const unsigned short int port); 106extern int cifs_set_port(struct sockaddr *addr, const unsigned short int port);
108extern int cifs_fill_sockaddr(struct sockaddr *dst, const char *src, int len, 107extern int cifs_fill_sockaddr(struct sockaddr *dst, const char *src, int len,
109 const unsigned short int port); 108 const unsigned short int port);
110extern int map_smb_to_linux_error(struct smb_hdr *smb, bool logErr); 109extern int map_smb_to_linux_error(char *buf, bool logErr);
111extern void header_assemble(struct smb_hdr *, char /* command */ , 110extern void header_assemble(struct smb_hdr *, char /* command */ ,
112 const struct cifs_tcon *, int /* length of 111 const struct cifs_tcon *, int /* length of
113 fixed section (word count) in two byte units */); 112 fixed section (word count) in two byte units */);
@@ -116,7 +115,7 @@ extern int small_smb_init_no_tc(const int smb_cmd, const int wct,
116 void **request_buf); 115 void **request_buf);
117extern int CIFS_SessSetup(unsigned int xid, struct cifs_ses *ses, 116extern int CIFS_SessSetup(unsigned int xid, struct cifs_ses *ses,
118 const struct nls_table *nls_cp); 117 const struct nls_table *nls_cp);
119extern __u16 GetNextMid(struct TCP_Server_Info *server); 118extern __u64 GetNextMid(struct TCP_Server_Info *server);
120extern struct timespec cifs_NTtimeToUnix(__le64 utc_nanoseconds_since_1601); 119extern struct timespec cifs_NTtimeToUnix(__le64 utc_nanoseconds_since_1601);
121extern u64 cifs_UnixTimeToNT(struct timespec); 120extern u64 cifs_UnixTimeToNT(struct timespec);
122extern struct timespec cnvrtDosUnixTm(__le16 le_date, __le16 le_time, 121extern struct timespec cnvrtDosUnixTm(__le16 le_date, __le16 le_time,
@@ -484,18 +483,25 @@ int cifs_async_readv(struct cifs_readdata *rdata);
484/* asynchronous write support */ 483/* asynchronous write support */
485struct cifs_writedata { 484struct cifs_writedata {
486 struct kref refcount; 485 struct kref refcount;
486 struct list_head list;
487 struct completion done;
487 enum writeback_sync_modes sync_mode; 488 enum writeback_sync_modes sync_mode;
488 struct work_struct work; 489 struct work_struct work;
489 struct cifsFileInfo *cfile; 490 struct cifsFileInfo *cfile;
490 __u64 offset; 491 __u64 offset;
492 pid_t pid;
491 unsigned int bytes; 493 unsigned int bytes;
492 int result; 494 int result;
495 void (*marshal_iov) (struct kvec *iov,
496 struct cifs_writedata *wdata);
493 unsigned int nr_pages; 497 unsigned int nr_pages;
494 struct page *pages[1]; 498 struct page *pages[1];
495}; 499};
496 500
497int cifs_async_writev(struct cifs_writedata *wdata); 501int cifs_async_writev(struct cifs_writedata *wdata);
498struct cifs_writedata *cifs_writedata_alloc(unsigned int nr_pages); 502void cifs_writev_complete(struct work_struct *work);
503struct cifs_writedata *cifs_writedata_alloc(unsigned int nr_pages,
504 work_func_t complete);
499void cifs_writedata_release(struct kref *refcount); 505void cifs_writedata_release(struct kref *refcount);
500 506
501#endif /* _CIFSPROTO_H */ 507#endif /* _CIFSPROTO_H */
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
index 70aac35c398f..8fecc99be344 100644
--- a/fs/cifs/cifssmb.c
+++ b/fs/cifs/cifssmb.c
@@ -696,7 +696,7 @@ CIFSSMBTDis(const int xid, struct cifs_tcon *tcon)
696 if (rc) 696 if (rc)
697 return rc; 697 return rc;
698 698
699 rc = SendReceiveNoRsp(xid, tcon->ses, smb_buffer, 0); 699 rc = SendReceiveNoRsp(xid, tcon->ses, (char *)smb_buffer, 0);
700 if (rc) 700 if (rc)
701 cFYI(1, "Tree disconnect failed %d", rc); 701 cFYI(1, "Tree disconnect failed %d", rc);
702 702
@@ -792,7 +792,7 @@ CIFSSMBLogoff(const int xid, struct cifs_ses *ses)
792 pSMB->hdr.Uid = ses->Suid; 792 pSMB->hdr.Uid = ses->Suid;
793 793
794 pSMB->AndXCommand = 0xFF; 794 pSMB->AndXCommand = 0xFF;
795 rc = SendReceiveNoRsp(xid, ses, (struct smb_hdr *) pSMB, 0); 795 rc = SendReceiveNoRsp(xid, ses, (char *) pSMB, 0);
796session_already_dead: 796session_already_dead:
797 mutex_unlock(&ses->session_mutex); 797 mutex_unlock(&ses->session_mutex);
798 798
@@ -1414,8 +1414,7 @@ cifs_readdata_free(struct cifs_readdata *rdata)
1414static int 1414static int
1415cifs_readv_discard(struct TCP_Server_Info *server, struct mid_q_entry *mid) 1415cifs_readv_discard(struct TCP_Server_Info *server, struct mid_q_entry *mid)
1416{ 1416{
1417 READ_RSP *rsp = (READ_RSP *)server->smallbuf; 1417 unsigned int rfclen = get_rfc1002_length(server->smallbuf);
1418 unsigned int rfclen = be32_to_cpu(rsp->hdr.smb_buf_length);
1419 int remaining = rfclen + 4 - server->total_read; 1418 int remaining = rfclen + 4 - server->total_read;
1420 struct cifs_readdata *rdata = mid->callback_data; 1419 struct cifs_readdata *rdata = mid->callback_data;
1421 1420
@@ -1424,7 +1423,7 @@ cifs_readv_discard(struct TCP_Server_Info *server, struct mid_q_entry *mid)
1424 1423
1425 length = cifs_read_from_socket(server, server->bigbuf, 1424 length = cifs_read_from_socket(server, server->bigbuf,
1426 min_t(unsigned int, remaining, 1425 min_t(unsigned int, remaining,
1427 CIFSMaxBufSize + MAX_CIFS_HDR_SIZE)); 1426 CIFSMaxBufSize + max_header_size()));
1428 if (length < 0) 1427 if (length < 0)
1429 return length; 1428 return length;
1430 server->total_read += length; 1429 server->total_read += length;
@@ -1435,19 +1434,40 @@ cifs_readv_discard(struct TCP_Server_Info *server, struct mid_q_entry *mid)
1435 return 0; 1434 return 0;
1436} 1435}
1437 1436
1437static inline size_t
1438read_rsp_size(void)
1439{
1440 return sizeof(READ_RSP);
1441}
1442
1443static inline unsigned int
1444read_data_offset(char *buf)
1445{
1446 READ_RSP *rsp = (READ_RSP *)buf;
1447 return le16_to_cpu(rsp->DataOffset);
1448}
1449
1450static inline unsigned int
1451read_data_length(char *buf)
1452{
1453 READ_RSP *rsp = (READ_RSP *)buf;
1454 return (le16_to_cpu(rsp->DataLengthHigh) << 16) +
1455 le16_to_cpu(rsp->DataLength);
1456}
1457
1438static int 1458static int
1439cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid) 1459cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid)
1440{ 1460{
1441 int length, len; 1461 int length, len;
1442 unsigned int data_offset, remaining, data_len; 1462 unsigned int data_offset, remaining, data_len;
1443 struct cifs_readdata *rdata = mid->callback_data; 1463 struct cifs_readdata *rdata = mid->callback_data;
1444 READ_RSP *rsp = (READ_RSP *)server->smallbuf; 1464 char *buf = server->smallbuf;
1445 unsigned int rfclen = be32_to_cpu(rsp->hdr.smb_buf_length) + 4; 1465 unsigned int buflen = get_rfc1002_length(buf) + 4;
1446 u64 eof; 1466 u64 eof;
1447 pgoff_t eof_index; 1467 pgoff_t eof_index;
1448 struct page *page, *tpage; 1468 struct page *page, *tpage;
1449 1469
1450 cFYI(1, "%s: mid=%u offset=%llu bytes=%u", __func__, 1470 cFYI(1, "%s: mid=%llu offset=%llu bytes=%u", __func__,
1451 mid->mid, rdata->offset, rdata->bytes); 1471 mid->mid, rdata->offset, rdata->bytes);
1452 1472
1453 /* 1473 /*
@@ -1455,10 +1475,9 @@ cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid)
1455 * can if there's not enough data. At this point, we've read down to 1475 * can if there's not enough data. At this point, we've read down to
1456 * the Mid. 1476 * the Mid.
1457 */ 1477 */
1458 len = min_t(unsigned int, rfclen, sizeof(*rsp)) - 1478 len = min_t(unsigned int, buflen, read_rsp_size()) - header_size() + 1;
1459 sizeof(struct smb_hdr) + 1;
1460 1479
1461 rdata->iov[0].iov_base = server->smallbuf + sizeof(struct smb_hdr) - 1; 1480 rdata->iov[0].iov_base = buf + header_size() - 1;
1462 rdata->iov[0].iov_len = len; 1481 rdata->iov[0].iov_len = len;
1463 1482
1464 length = cifs_readv_from_socket(server, rdata->iov, 1, len); 1483 length = cifs_readv_from_socket(server, rdata->iov, 1, len);
@@ -1467,7 +1486,7 @@ cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid)
1467 server->total_read += length; 1486 server->total_read += length;
1468 1487
1469 /* Was the SMB read successful? */ 1488 /* Was the SMB read successful? */
1470 rdata->result = map_smb_to_linux_error(&rsp->hdr, false); 1489 rdata->result = map_smb_to_linux_error(buf, false);
1471 if (rdata->result != 0) { 1490 if (rdata->result != 0) {
1472 cFYI(1, "%s: server returned error %d", __func__, 1491 cFYI(1, "%s: server returned error %d", __func__,
1473 rdata->result); 1492 rdata->result);
@@ -1475,14 +1494,14 @@ cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid)
1475 } 1494 }
1476 1495
1477 /* Is there enough to get to the rest of the READ_RSP header? */ 1496 /* Is there enough to get to the rest of the READ_RSP header? */
1478 if (server->total_read < sizeof(READ_RSP)) { 1497 if (server->total_read < read_rsp_size()) {
1479 cFYI(1, "%s: server returned short header. got=%u expected=%zu", 1498 cFYI(1, "%s: server returned short header. got=%u expected=%zu",
1480 __func__, server->total_read, sizeof(READ_RSP)); 1499 __func__, server->total_read, read_rsp_size());
1481 rdata->result = -EIO; 1500 rdata->result = -EIO;
1482 return cifs_readv_discard(server, mid); 1501 return cifs_readv_discard(server, mid);
1483 } 1502 }
1484 1503
1485 data_offset = le16_to_cpu(rsp->DataOffset) + 4; 1504 data_offset = read_data_offset(buf) + 4;
1486 if (data_offset < server->total_read) { 1505 if (data_offset < server->total_read) {
1487 /* 1506 /*
1488 * win2k8 sometimes sends an offset of 0 when the read 1507 * win2k8 sometimes sends an offset of 0 when the read
@@ -1506,7 +1525,7 @@ cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid)
1506 len = data_offset - server->total_read; 1525 len = data_offset - server->total_read;
1507 if (len > 0) { 1526 if (len > 0) {
1508 /* read any junk before data into the rest of smallbuf */ 1527 /* read any junk before data into the rest of smallbuf */
1509 rdata->iov[0].iov_base = server->smallbuf + server->total_read; 1528 rdata->iov[0].iov_base = buf + server->total_read;
1510 rdata->iov[0].iov_len = len; 1529 rdata->iov[0].iov_len = len;
1511 length = cifs_readv_from_socket(server, rdata->iov, 1, len); 1530 length = cifs_readv_from_socket(server, rdata->iov, 1, len);
1512 if (length < 0) 1531 if (length < 0)
@@ -1515,15 +1534,14 @@ cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid)
1515 } 1534 }
1516 1535
1517 /* set up first iov for signature check */ 1536 /* set up first iov for signature check */
1518 rdata->iov[0].iov_base = server->smallbuf; 1537 rdata->iov[0].iov_base = buf;
1519 rdata->iov[0].iov_len = server->total_read; 1538 rdata->iov[0].iov_len = server->total_read;
1520 cFYI(1, "0: iov_base=%p iov_len=%zu", 1539 cFYI(1, "0: iov_base=%p iov_len=%zu",
1521 rdata->iov[0].iov_base, rdata->iov[0].iov_len); 1540 rdata->iov[0].iov_base, rdata->iov[0].iov_len);
1522 1541
1523 /* how much data is in the response? */ 1542 /* how much data is in the response? */
1524 data_len = le16_to_cpu(rsp->DataLengthHigh) << 16; 1543 data_len = read_data_length(buf);
1525 data_len += le16_to_cpu(rsp->DataLength); 1544 if (data_offset + data_len > buflen) {
1526 if (data_offset + data_len > rfclen) {
1527 /* data_len is corrupt -- discard frame */ 1545 /* data_len is corrupt -- discard frame */
1528 rdata->result = -EIO; 1546 rdata->result = -EIO;
1529 return cifs_readv_discard(server, mid); 1547 return cifs_readv_discard(server, mid);
@@ -1602,11 +1620,11 @@ cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid)
1602 1620
1603 rdata->bytes = length; 1621 rdata->bytes = length;
1604 1622
1605 cFYI(1, "total_read=%u rfclen=%u remaining=%u", server->total_read, 1623 cFYI(1, "total_read=%u buflen=%u remaining=%u", server->total_read,
1606 rfclen, remaining); 1624 buflen, remaining);
1607 1625
1608 /* discard anything left over */ 1626 /* discard anything left over */
1609 if (server->total_read < rfclen) 1627 if (server->total_read < buflen)
1610 return cifs_readv_discard(server, mid); 1628 return cifs_readv_discard(server, mid);
1611 1629
1612 dequeue_mid(mid, false); 1630 dequeue_mid(mid, false);
@@ -1647,10 +1665,10 @@ cifs_readv_callback(struct mid_q_entry *mid)
1647 struct cifs_tcon *tcon = tlink_tcon(rdata->cfile->tlink); 1665 struct cifs_tcon *tcon = tlink_tcon(rdata->cfile->tlink);
1648 struct TCP_Server_Info *server = tcon->ses->server; 1666 struct TCP_Server_Info *server = tcon->ses->server;
1649 1667
1650 cFYI(1, "%s: mid=%u state=%d result=%d bytes=%u", __func__, 1668 cFYI(1, "%s: mid=%llu state=%d result=%d bytes=%u", __func__,
1651 mid->mid, mid->midState, rdata->result, rdata->bytes); 1669 mid->mid, mid->mid_state, rdata->result, rdata->bytes);
1652 1670
1653 switch (mid->midState) { 1671 switch (mid->mid_state) {
1654 case MID_RESPONSE_RECEIVED: 1672 case MID_RESPONSE_RECEIVED:
1655 /* result already set, check signature */ 1673 /* result already set, check signature */
1656 if (server->sec_mode & 1674 if (server->sec_mode &
@@ -1671,7 +1689,7 @@ cifs_readv_callback(struct mid_q_entry *mid)
1671 rdata->result = -EIO; 1689 rdata->result = -EIO;
1672 } 1690 }
1673 1691
1674 queue_work(system_nrt_wq, &rdata->work); 1692 queue_work(cifsiod_wq, &rdata->work);
1675 DeleteMidQEntry(mid); 1693 DeleteMidQEntry(mid);
1676 cifs_add_credits(server, 1); 1694 cifs_add_credits(server, 1);
1677} 1695}
@@ -2017,7 +2035,7 @@ cifs_writev_requeue(struct cifs_writedata *wdata)
2017 kref_put(&wdata->refcount, cifs_writedata_release); 2035 kref_put(&wdata->refcount, cifs_writedata_release);
2018} 2036}
2019 2037
2020static void 2038void
2021cifs_writev_complete(struct work_struct *work) 2039cifs_writev_complete(struct work_struct *work)
2022{ 2040{
2023 struct cifs_writedata *wdata = container_of(work, 2041 struct cifs_writedata *wdata = container_of(work,
@@ -2026,7 +2044,9 @@ cifs_writev_complete(struct work_struct *work)
2026 int i = 0; 2044 int i = 0;
2027 2045
2028 if (wdata->result == 0) { 2046 if (wdata->result == 0) {
2047 spin_lock(&inode->i_lock);
2029 cifs_update_eof(CIFS_I(inode), wdata->offset, wdata->bytes); 2048 cifs_update_eof(CIFS_I(inode), wdata->offset, wdata->bytes);
2049 spin_unlock(&inode->i_lock);
2030 cifs_stats_bytes_written(tlink_tcon(wdata->cfile->tlink), 2050 cifs_stats_bytes_written(tlink_tcon(wdata->cfile->tlink),
2031 wdata->bytes); 2051 wdata->bytes);
2032 } else if (wdata->sync_mode == WB_SYNC_ALL && wdata->result == -EAGAIN) 2052 } else if (wdata->sync_mode == WB_SYNC_ALL && wdata->result == -EAGAIN)
@@ -2047,7 +2067,7 @@ cifs_writev_complete(struct work_struct *work)
2047} 2067}
2048 2068
2049struct cifs_writedata * 2069struct cifs_writedata *
2050cifs_writedata_alloc(unsigned int nr_pages) 2070cifs_writedata_alloc(unsigned int nr_pages, work_func_t complete)
2051{ 2071{
2052 struct cifs_writedata *wdata; 2072 struct cifs_writedata *wdata;
2053 2073
@@ -2061,14 +2081,16 @@ cifs_writedata_alloc(unsigned int nr_pages)
2061 wdata = kzalloc(sizeof(*wdata) + 2081 wdata = kzalloc(sizeof(*wdata) +
2062 sizeof(struct page *) * (nr_pages - 1), GFP_NOFS); 2082 sizeof(struct page *) * (nr_pages - 1), GFP_NOFS);
2063 if (wdata != NULL) { 2083 if (wdata != NULL) {
2064 INIT_WORK(&wdata->work, cifs_writev_complete);
2065 kref_init(&wdata->refcount); 2084 kref_init(&wdata->refcount);
2085 INIT_LIST_HEAD(&wdata->list);
2086 init_completion(&wdata->done);
2087 INIT_WORK(&wdata->work, complete);
2066 } 2088 }
2067 return wdata; 2089 return wdata;
2068} 2090}
2069 2091
2070/* 2092/*
2071 * Check the midState and signature on received buffer (if any), and queue the 2093 * Check the mid_state and signature on received buffer (if any), and queue the
2072 * workqueue completion task. 2094 * workqueue completion task.
2073 */ 2095 */
2074static void 2096static void
@@ -2079,7 +2101,7 @@ cifs_writev_callback(struct mid_q_entry *mid)
2079 unsigned int written; 2101 unsigned int written;
2080 WRITE_RSP *smb = (WRITE_RSP *)mid->resp_buf; 2102 WRITE_RSP *smb = (WRITE_RSP *)mid->resp_buf;
2081 2103
2082 switch (mid->midState) { 2104 switch (mid->mid_state) {
2083 case MID_RESPONSE_RECEIVED: 2105 case MID_RESPONSE_RECEIVED:
2084 wdata->result = cifs_check_receive(mid, tcon->ses->server, 0); 2106 wdata->result = cifs_check_receive(mid, tcon->ses->server, 0);
2085 if (wdata->result != 0) 2107 if (wdata->result != 0)
@@ -2111,7 +2133,7 @@ cifs_writev_callback(struct mid_q_entry *mid)
2111 break; 2133 break;
2112 } 2134 }
2113 2135
2114 queue_work(system_nrt_wq, &wdata->work); 2136 queue_work(cifsiod_wq, &wdata->work);
2115 DeleteMidQEntry(mid); 2137 DeleteMidQEntry(mid);
2116 cifs_add_credits(tcon->ses->server, 1); 2138 cifs_add_credits(tcon->ses->server, 1);
2117} 2139}
@@ -2124,7 +2146,6 @@ cifs_async_writev(struct cifs_writedata *wdata)
2124 WRITE_REQ *smb = NULL; 2146 WRITE_REQ *smb = NULL;
2125 int wct; 2147 int wct;
2126 struct cifs_tcon *tcon = tlink_tcon(wdata->cfile->tlink); 2148 struct cifs_tcon *tcon = tlink_tcon(wdata->cfile->tlink);
2127 struct inode *inode = wdata->cfile->dentry->d_inode;
2128 struct kvec *iov = NULL; 2149 struct kvec *iov = NULL;
2129 2150
2130 if (tcon->ses->capabilities & CAP_LARGE_FILES) { 2151 if (tcon->ses->capabilities & CAP_LARGE_FILES) {
@@ -2148,8 +2169,8 @@ cifs_async_writev(struct cifs_writedata *wdata)
2148 goto async_writev_out; 2169 goto async_writev_out;
2149 } 2170 }
2150 2171
2151 smb->hdr.Pid = cpu_to_le16((__u16)wdata->cfile->pid); 2172 smb->hdr.Pid = cpu_to_le16((__u16)wdata->pid);
2152 smb->hdr.PidHigh = cpu_to_le16((__u16)(wdata->cfile->pid >> 16)); 2173 smb->hdr.PidHigh = cpu_to_le16((__u16)(wdata->pid >> 16));
2153 2174
2154 smb->AndXCommand = 0xFF; /* none */ 2175 smb->AndXCommand = 0xFF; /* none */
2155 smb->Fid = wdata->cfile->netfid; 2176 smb->Fid = wdata->cfile->netfid;
@@ -2167,15 +2188,13 @@ cifs_async_writev(struct cifs_writedata *wdata)
2167 iov[0].iov_len = be32_to_cpu(smb->hdr.smb_buf_length) + 4 + 1; 2188 iov[0].iov_len = be32_to_cpu(smb->hdr.smb_buf_length) + 4 + 1;
2168 iov[0].iov_base = smb; 2189 iov[0].iov_base = smb;
2169 2190
2170 /* marshal up the pages into iov array */ 2191 /*
2171 wdata->bytes = 0; 2192 * This function should marshal up the page array into the kvec
2172 for (i = 0; i < wdata->nr_pages; i++) { 2193 * array, reserving [0] for the header. It should kmap the pages
2173 iov[i + 1].iov_len = min(inode->i_size - 2194 * and set the iov_len properly for each one. It may also set
2174 page_offset(wdata->pages[i]), 2195 * wdata->bytes too.
2175 (loff_t)PAGE_CACHE_SIZE); 2196 */
2176 iov[i + 1].iov_base = kmap(wdata->pages[i]); 2197 wdata->marshal_iov(iov, wdata);
2177 wdata->bytes += iov[i + 1].iov_len;
2178 }
2179 2198
2180 cFYI(1, "async write at %llu %u bytes", wdata->offset, wdata->bytes); 2199 cFYI(1, "async write at %llu %u bytes", wdata->offset, wdata->bytes);
2181 2200
@@ -2420,8 +2439,7 @@ CIFSSMBLock(const int xid, struct cifs_tcon *tcon,
2420 (struct smb_hdr *) pSMB, &bytes_returned); 2439 (struct smb_hdr *) pSMB, &bytes_returned);
2421 cifs_small_buf_release(pSMB); 2440 cifs_small_buf_release(pSMB);
2422 } else { 2441 } else {
2423 rc = SendReceiveNoRsp(xid, tcon->ses, (struct smb_hdr *)pSMB, 2442 rc = SendReceiveNoRsp(xid, tcon->ses, (char *)pSMB, timeout);
2424 timeout);
2425 /* SMB buffer freed by function above */ 2443 /* SMB buffer freed by function above */
2426 } 2444 }
2427 cifs_stats_inc(&tcon->num_locks); 2445 cifs_stats_inc(&tcon->num_locks);
@@ -2588,7 +2606,7 @@ CIFSSMBClose(const int xid, struct cifs_tcon *tcon, int smb_file_id)
2588 pSMB->FileID = (__u16) smb_file_id; 2606 pSMB->FileID = (__u16) smb_file_id;
2589 pSMB->LastWriteTime = 0xFFFFFFFF; 2607 pSMB->LastWriteTime = 0xFFFFFFFF;
2590 pSMB->ByteCount = 0; 2608 pSMB->ByteCount = 0;
2591 rc = SendReceiveNoRsp(xid, tcon->ses, (struct smb_hdr *) pSMB, 0); 2609 rc = SendReceiveNoRsp(xid, tcon->ses, (char *) pSMB, 0);
2592 cifs_stats_inc(&tcon->num_closes); 2610 cifs_stats_inc(&tcon->num_closes);
2593 if (rc) { 2611 if (rc) {
2594 if (rc != -EINTR) { 2612 if (rc != -EINTR) {
@@ -2617,7 +2635,7 @@ CIFSSMBFlush(const int xid, struct cifs_tcon *tcon, int smb_file_id)
2617 2635
2618 pSMB->FileID = (__u16) smb_file_id; 2636 pSMB->FileID = (__u16) smb_file_id;
2619 pSMB->ByteCount = 0; 2637 pSMB->ByteCount = 0;
2620 rc = SendReceiveNoRsp(xid, tcon->ses, (struct smb_hdr *) pSMB, 0); 2638 rc = SendReceiveNoRsp(xid, tcon->ses, (char *) pSMB, 0);
2621 cifs_stats_inc(&tcon->num_flushes); 2639 cifs_stats_inc(&tcon->num_flushes);
2622 if (rc) 2640 if (rc)
2623 cERROR(1, "Send error in Flush = %d", rc); 2641 cERROR(1, "Send error in Flush = %d", rc);
@@ -4625,7 +4643,7 @@ CIFSFindClose(const int xid, struct cifs_tcon *tcon,
4625 4643
4626 pSMB->FileID = searchHandle; 4644 pSMB->FileID = searchHandle;
4627 pSMB->ByteCount = 0; 4645 pSMB->ByteCount = 0;
4628 rc = SendReceiveNoRsp(xid, tcon->ses, (struct smb_hdr *) pSMB, 0); 4646 rc = SendReceiveNoRsp(xid, tcon->ses, (char *) pSMB, 0);
4629 if (rc) 4647 if (rc)
4630 cERROR(1, "Send error in FindClose = %d", rc); 4648 cERROR(1, "Send error in FindClose = %d", rc);
4631 4649
@@ -5646,7 +5664,7 @@ CIFSSMBSetFileSize(const int xid, struct cifs_tcon *tcon, __u64 size,
5646 pSMB->Reserved4 = 0; 5664 pSMB->Reserved4 = 0;
5647 inc_rfc1001_len(pSMB, byte_count); 5665 inc_rfc1001_len(pSMB, byte_count);
5648 pSMB->ByteCount = cpu_to_le16(byte_count); 5666 pSMB->ByteCount = cpu_to_le16(byte_count);
5649 rc = SendReceiveNoRsp(xid, tcon->ses, (struct smb_hdr *) pSMB, 0); 5667 rc = SendReceiveNoRsp(xid, tcon->ses, (char *) pSMB, 0);
5650 if (rc) { 5668 if (rc) {
5651 cFYI(1, "Send error in SetFileInfo (SetFileSize) = %d", rc); 5669 cFYI(1, "Send error in SetFileInfo (SetFileSize) = %d", rc);
5652 } 5670 }
@@ -5715,7 +5733,7 @@ CIFSSMBSetFileInfo(const int xid, struct cifs_tcon *tcon,
5715 inc_rfc1001_len(pSMB, byte_count); 5733 inc_rfc1001_len(pSMB, byte_count);
5716 pSMB->ByteCount = cpu_to_le16(byte_count); 5734 pSMB->ByteCount = cpu_to_le16(byte_count);
5717 memcpy(data_offset, data, sizeof(FILE_BASIC_INFO)); 5735 memcpy(data_offset, data, sizeof(FILE_BASIC_INFO));
5718 rc = SendReceiveNoRsp(xid, tcon->ses, (struct smb_hdr *) pSMB, 0); 5736 rc = SendReceiveNoRsp(xid, tcon->ses, (char *) pSMB, 0);
5719 if (rc) 5737 if (rc)
5720 cFYI(1, "Send error in Set Time (SetFileInfo) = %d", rc); 5738 cFYI(1, "Send error in Set Time (SetFileInfo) = %d", rc);
5721 5739
@@ -5774,7 +5792,7 @@ CIFSSMBSetFileDisposition(const int xid, struct cifs_tcon *tcon,
5774 inc_rfc1001_len(pSMB, byte_count); 5792 inc_rfc1001_len(pSMB, byte_count);
5775 pSMB->ByteCount = cpu_to_le16(byte_count); 5793 pSMB->ByteCount = cpu_to_le16(byte_count);
5776 *data_offset = delete_file ? 1 : 0; 5794 *data_offset = delete_file ? 1 : 0;
5777 rc = SendReceiveNoRsp(xid, tcon->ses, (struct smb_hdr *) pSMB, 0); 5795 rc = SendReceiveNoRsp(xid, tcon->ses, (char *) pSMB, 0);
5778 if (rc) 5796 if (rc)
5779 cFYI(1, "Send error in SetFileDisposition = %d", rc); 5797 cFYI(1, "Send error in SetFileDisposition = %d", rc);
5780 5798
@@ -6006,7 +6024,7 @@ CIFSSMBUnixSetFileInfo(const int xid, struct cifs_tcon *tcon,
6006 6024
6007 cifs_fill_unix_set_info(data_offset, args); 6025 cifs_fill_unix_set_info(data_offset, args);
6008 6026
6009 rc = SendReceiveNoRsp(xid, tcon->ses, (struct smb_hdr *) pSMB, 0); 6027 rc = SendReceiveNoRsp(xid, tcon->ses, (char *) pSMB, 0);
6010 if (rc) 6028 if (rc)
6011 cFYI(1, "Send error in Set Time (SetFileInfo) = %d", rc); 6029 cFYI(1, "Send error in Set Time (SetFileInfo) = %d", rc);
6012 6030
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index 5560e1d5e54b..302a15c505a9 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -40,6 +40,8 @@
40#include <linux/module.h> 40#include <linux/module.h>
41#include <keys/user-type.h> 41#include <keys/user-type.h>
42#include <net/ipv6.h> 42#include <net/ipv6.h>
43#include <linux/parser.h>
44
43#include "cifspdu.h" 45#include "cifspdu.h"
44#include "cifsglob.h" 46#include "cifsglob.h"
45#include "cifsproto.h" 47#include "cifsproto.h"
@@ -63,6 +65,193 @@ extern mempool_t *cifs_req_poolp;
63#define TLINK_ERROR_EXPIRE (1 * HZ) 65#define TLINK_ERROR_EXPIRE (1 * HZ)
64#define TLINK_IDLE_EXPIRE (600 * HZ) 66#define TLINK_IDLE_EXPIRE (600 * HZ)
65 67
68enum {
69
70 /* Mount options that take no arguments */
71 Opt_user_xattr, Opt_nouser_xattr,
72 Opt_forceuid, Opt_noforceuid,
73 Opt_noblocksend, Opt_noautotune,
74 Opt_hard, Opt_soft, Opt_perm, Opt_noperm,
75 Opt_mapchars, Opt_nomapchars, Opt_sfu,
76 Opt_nosfu, Opt_nodfs, Opt_posixpaths,
77 Opt_noposixpaths, Opt_nounix,
78 Opt_nocase,
79 Opt_brl, Opt_nobrl,
80 Opt_forcemandatorylock, Opt_setuids,
81 Opt_nosetuids, Opt_dynperm, Opt_nodynperm,
82 Opt_nohard, Opt_nosoft,
83 Opt_nointr, Opt_intr,
84 Opt_nostrictsync, Opt_strictsync,
85 Opt_serverino, Opt_noserverino,
86 Opt_rwpidforward, Opt_cifsacl, Opt_nocifsacl,
87 Opt_acl, Opt_noacl, Opt_locallease,
88 Opt_sign, Opt_seal, Opt_direct,
89 Opt_strictcache, Opt_noac,
90 Opt_fsc, Opt_mfsymlinks,
91 Opt_multiuser, Opt_sloppy,
92
93 /* Mount options which take numeric value */
94 Opt_backupuid, Opt_backupgid, Opt_uid,
95 Opt_cruid, Opt_gid, Opt_file_mode,
96 Opt_dirmode, Opt_port,
97 Opt_rsize, Opt_wsize, Opt_actimeo,
98
99 /* Mount options which take string value */
100 Opt_user, Opt_pass, Opt_ip,
101 Opt_unc, Opt_domain,
102 Opt_srcaddr, Opt_prefixpath,
103 Opt_iocharset, Opt_sockopt,
104 Opt_netbiosname, Opt_servern,
105 Opt_ver, Opt_sec,
106
107 /* Mount options to be ignored */
108 Opt_ignore,
109
110 /* Options which could be blank */
111 Opt_blank_pass,
112
113 Opt_err
114};
115
116static const match_table_t cifs_mount_option_tokens = {
117
118 { Opt_user_xattr, "user_xattr" },
119 { Opt_nouser_xattr, "nouser_xattr" },
120 { Opt_forceuid, "forceuid" },
121 { Opt_noforceuid, "noforceuid" },
122 { Opt_noblocksend, "noblocksend" },
123 { Opt_noautotune, "noautotune" },
124 { Opt_hard, "hard" },
125 { Opt_soft, "soft" },
126 { Opt_perm, "perm" },
127 { Opt_noperm, "noperm" },
128 { Opt_mapchars, "mapchars" },
129 { Opt_nomapchars, "nomapchars" },
130 { Opt_sfu, "sfu" },
131 { Opt_nosfu, "nosfu" },
132 { Opt_nodfs, "nodfs" },
133 { Opt_posixpaths, "posixpaths" },
134 { Opt_noposixpaths, "noposixpaths" },
135 { Opt_nounix, "nounix" },
136 { Opt_nounix, "nolinux" },
137 { Opt_nocase, "nocase" },
138 { Opt_nocase, "ignorecase" },
139 { Opt_brl, "brl" },
140 { Opt_nobrl, "nobrl" },
141 { Opt_nobrl, "nolock" },
142 { Opt_forcemandatorylock, "forcemandatorylock" },
143 { Opt_forcemandatorylock, "forcemand" },
144 { Opt_setuids, "setuids" },
145 { Opt_nosetuids, "nosetuids" },
146 { Opt_dynperm, "dynperm" },
147 { Opt_nodynperm, "nodynperm" },
148 { Opt_nohard, "nohard" },
149 { Opt_nosoft, "nosoft" },
150 { Opt_nointr, "nointr" },
151 { Opt_intr, "intr" },
152 { Opt_nostrictsync, "nostrictsync" },
153 { Opt_strictsync, "strictsync" },
154 { Opt_serverino, "serverino" },
155 { Opt_noserverino, "noserverino" },
156 { Opt_rwpidforward, "rwpidforward" },
157 { Opt_cifsacl, "cifsacl" },
158 { Opt_nocifsacl, "nocifsacl" },
159 { Opt_acl, "acl" },
160 { Opt_noacl, "noacl" },
161 { Opt_locallease, "locallease" },
162 { Opt_sign, "sign" },
163 { Opt_seal, "seal" },
164 { Opt_direct, "direct" },
165 { Opt_direct, "forceddirectio" },
166 { Opt_strictcache, "strictcache" },
167 { Opt_noac, "noac" },
168 { Opt_fsc, "fsc" },
169 { Opt_mfsymlinks, "mfsymlinks" },
170 { Opt_multiuser, "multiuser" },
171 { Opt_sloppy, "sloppy" },
172
173 { Opt_backupuid, "backupuid=%s" },
174 { Opt_backupgid, "backupgid=%s" },
175 { Opt_uid, "uid=%s" },
176 { Opt_cruid, "cruid=%s" },
177 { Opt_gid, "gid=%s" },
178 { Opt_file_mode, "file_mode=%s" },
179 { Opt_dirmode, "dirmode=%s" },
180 { Opt_dirmode, "dir_mode=%s" },
181 { Opt_port, "port=%s" },
182 { Opt_rsize, "rsize=%s" },
183 { Opt_wsize, "wsize=%s" },
184 { Opt_actimeo, "actimeo=%s" },
185
186 { Opt_user, "user=%s" },
187 { Opt_user, "username=%s" },
188 { Opt_blank_pass, "pass=" },
189 { Opt_pass, "pass=%s" },
190 { Opt_pass, "password=%s" },
191 { Opt_ip, "ip=%s" },
192 { Opt_ip, "addr=%s" },
193 { Opt_unc, "unc=%s" },
194 { Opt_unc, "target=%s" },
195 { Opt_unc, "path=%s" },
196 { Opt_domain, "dom=%s" },
197 { Opt_domain, "domain=%s" },
198 { Opt_domain, "workgroup=%s" },
199 { Opt_srcaddr, "srcaddr=%s" },
200 { Opt_prefixpath, "prefixpath=%s" },
201 { Opt_iocharset, "iocharset=%s" },
202 { Opt_sockopt, "sockopt=%s" },
203 { Opt_netbiosname, "netbiosname=%s" },
204 { Opt_servern, "servern=%s" },
205 { Opt_ver, "ver=%s" },
206 { Opt_ver, "vers=%s" },
207 { Opt_ver, "version=%s" },
208 { Opt_sec, "sec=%s" },
209
210 { Opt_ignore, "cred" },
211 { Opt_ignore, "credentials" },
212 { Opt_ignore, "guest" },
213 { Opt_ignore, "rw" },
214 { Opt_ignore, "ro" },
215 { Opt_ignore, "suid" },
216 { Opt_ignore, "nosuid" },
217 { Opt_ignore, "exec" },
218 { Opt_ignore, "noexec" },
219 { Opt_ignore, "nodev" },
220 { Opt_ignore, "noauto" },
221 { Opt_ignore, "dev" },
222 { Opt_ignore, "mand" },
223 { Opt_ignore, "nomand" },
224 { Opt_ignore, "_netdev" },
225
226 { Opt_err, NULL }
227};
228
229enum {
230 Opt_sec_krb5, Opt_sec_krb5i, Opt_sec_krb5p,
231 Opt_sec_ntlmsspi, Opt_sec_ntlmssp,
232 Opt_ntlm, Opt_sec_ntlmi, Opt_sec_ntlmv2i,
233 Opt_sec_nontlm, Opt_sec_lanman,
234 Opt_sec_none,
235
236 Opt_sec_err
237};
238
239static const match_table_t cifs_secflavor_tokens = {
240 { Opt_sec_krb5, "krb5" },
241 { Opt_sec_krb5i, "krb5i" },
242 { Opt_sec_krb5p, "krb5p" },
243 { Opt_sec_ntlmsspi, "ntlmsspi" },
244 { Opt_sec_ntlmssp, "ntlmssp" },
245 { Opt_ntlm, "ntlm" },
246 { Opt_sec_ntlmi, "ntlmi" },
247 { Opt_sec_ntlmv2i, "ntlmv2i" },
248 { Opt_sec_nontlm, "nontlm" },
249 { Opt_sec_lanman, "lanman" },
250 { Opt_sec_none, "none" },
251
252 { Opt_sec_err, NULL }
253};
254
66static int ip_connect(struct TCP_Server_Info *server); 255static int ip_connect(struct TCP_Server_Info *server);
67static int generic_ip_connect(struct TCP_Server_Info *server); 256static int generic_ip_connect(struct TCP_Server_Info *server);
68static void tlink_rb_insert(struct rb_root *root, struct tcon_link *new_tlink); 257static void tlink_rb_insert(struct rb_root *root, struct tcon_link *new_tlink);
@@ -143,8 +332,8 @@ cifs_reconnect(struct TCP_Server_Info *server)
143 spin_lock(&GlobalMid_Lock); 332 spin_lock(&GlobalMid_Lock);
144 list_for_each_safe(tmp, tmp2, &server->pending_mid_q) { 333 list_for_each_safe(tmp, tmp2, &server->pending_mid_q) {
145 mid_entry = list_entry(tmp, struct mid_q_entry, qhead); 334 mid_entry = list_entry(tmp, struct mid_q_entry, qhead);
146 if (mid_entry->midState == MID_REQUEST_SUBMITTED) 335 if (mid_entry->mid_state == MID_REQUEST_SUBMITTED)
147 mid_entry->midState = MID_RETRY_NEEDED; 336 mid_entry->mid_state = MID_RETRY_NEEDED;
148 list_move(&mid_entry->qhead, &retry_list); 337 list_move(&mid_entry->qhead, &retry_list);
149 } 338 }
150 spin_unlock(&GlobalMid_Lock); 339 spin_unlock(&GlobalMid_Lock);
@@ -183,8 +372,9 @@ cifs_reconnect(struct TCP_Server_Info *server)
183 -EINVAL = invalid transact2 372 -EINVAL = invalid transact2
184 373
185 */ 374 */
186static int check2ndT2(struct smb_hdr *pSMB) 375static int check2ndT2(char *buf)
187{ 376{
377 struct smb_hdr *pSMB = (struct smb_hdr *)buf;
188 struct smb_t2_rsp *pSMBt; 378 struct smb_t2_rsp *pSMBt;
189 int remaining; 379 int remaining;
190 __u16 total_data_size, data_in_this_rsp; 380 __u16 total_data_size, data_in_this_rsp;
@@ -224,10 +414,10 @@ static int check2ndT2(struct smb_hdr *pSMB)
224 return remaining; 414 return remaining;
225} 415}
226 416
227static int coalesce_t2(struct smb_hdr *psecond, struct smb_hdr *pTargetSMB) 417static int coalesce_t2(char *second_buf, struct smb_hdr *target_hdr)
228{ 418{
229 struct smb_t2_rsp *pSMBs = (struct smb_t2_rsp *)psecond; 419 struct smb_t2_rsp *pSMBs = (struct smb_t2_rsp *)second_buf;
230 struct smb_t2_rsp *pSMBt = (struct smb_t2_rsp *)pTargetSMB; 420 struct smb_t2_rsp *pSMBt = (struct smb_t2_rsp *)target_hdr;
231 char *data_area_of_tgt; 421 char *data_area_of_tgt;
232 char *data_area_of_src; 422 char *data_area_of_src;
233 int remaining; 423 int remaining;
@@ -280,23 +470,23 @@ static int coalesce_t2(struct smb_hdr *psecond, struct smb_hdr *pTargetSMB)
280 put_unaligned_le16(total_in_tgt, &pSMBt->t2_rsp.DataCount); 470 put_unaligned_le16(total_in_tgt, &pSMBt->t2_rsp.DataCount);
281 471
282 /* fix up the BCC */ 472 /* fix up the BCC */
283 byte_count = get_bcc(pTargetSMB); 473 byte_count = get_bcc(target_hdr);
284 byte_count += total_in_src; 474 byte_count += total_in_src;
285 /* is the result too big for the field? */ 475 /* is the result too big for the field? */
286 if (byte_count > USHRT_MAX) { 476 if (byte_count > USHRT_MAX) {
287 cFYI(1, "coalesced BCC too large (%u)", byte_count); 477 cFYI(1, "coalesced BCC too large (%u)", byte_count);
288 return -EPROTO; 478 return -EPROTO;
289 } 479 }
290 put_bcc(byte_count, pTargetSMB); 480 put_bcc(byte_count, target_hdr);
291 481
292 byte_count = be32_to_cpu(pTargetSMB->smb_buf_length); 482 byte_count = be32_to_cpu(target_hdr->smb_buf_length);
293 byte_count += total_in_src; 483 byte_count += total_in_src;
294 /* don't allow buffer to overflow */ 484 /* don't allow buffer to overflow */
295 if (byte_count > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) { 485 if (byte_count > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
296 cFYI(1, "coalesced BCC exceeds buffer size (%u)", byte_count); 486 cFYI(1, "coalesced BCC exceeds buffer size (%u)", byte_count);
297 return -ENOBUFS; 487 return -ENOBUFS;
298 } 488 }
299 pTargetSMB->smb_buf_length = cpu_to_be32(byte_count); 489 target_hdr->smb_buf_length = cpu_to_be32(byte_count);
300 490
301 /* copy second buffer into end of first buffer */ 491 /* copy second buffer into end of first buffer */
302 memcpy(data_area_of_tgt, data_area_of_src, total_in_src); 492 memcpy(data_area_of_tgt, data_area_of_src, total_in_src);
@@ -334,7 +524,7 @@ cifs_echo_request(struct work_struct *work)
334 server->hostname); 524 server->hostname);
335 525
336requeue_echo: 526requeue_echo:
337 queue_delayed_work(system_nrt_wq, &server->echo, SMB_ECHO_INTERVAL); 527 queue_delayed_work(cifsiod_wq, &server->echo, SMB_ECHO_INTERVAL);
338} 528}
339 529
340static bool 530static bool
@@ -350,7 +540,7 @@ allocate_buffers(struct TCP_Server_Info *server)
350 } 540 }
351 } else if (server->large_buf) { 541 } else if (server->large_buf) {
352 /* we are reusing a dirty large buf, clear its start */ 542 /* we are reusing a dirty large buf, clear its start */
353 memset(server->bigbuf, 0, sizeof(struct smb_hdr)); 543 memset(server->bigbuf, 0, header_size());
354 } 544 }
355 545
356 if (!server->smallbuf) { 546 if (!server->smallbuf) {
@@ -364,7 +554,7 @@ allocate_buffers(struct TCP_Server_Info *server)
364 /* beginning of smb buffer is cleared in our buf_get */ 554 /* beginning of smb buffer is cleared in our buf_get */
365 } else { 555 } else {
366 /* if existing small buf clear beginning */ 556 /* if existing small buf clear beginning */
367 memset(server->smallbuf, 0, sizeof(struct smb_hdr)); 557 memset(server->smallbuf, 0, header_size());
368 } 558 }
369 559
370 return true; 560 return true;
@@ -566,15 +756,16 @@ is_smb_response(struct TCP_Server_Info *server, unsigned char type)
566} 756}
567 757
568static struct mid_q_entry * 758static struct mid_q_entry *
569find_mid(struct TCP_Server_Info *server, struct smb_hdr *buf) 759find_mid(struct TCP_Server_Info *server, char *buffer)
570{ 760{
761 struct smb_hdr *buf = (struct smb_hdr *)buffer;
571 struct mid_q_entry *mid; 762 struct mid_q_entry *mid;
572 763
573 spin_lock(&GlobalMid_Lock); 764 spin_lock(&GlobalMid_Lock);
574 list_for_each_entry(mid, &server->pending_mid_q, qhead) { 765 list_for_each_entry(mid, &server->pending_mid_q, qhead) {
575 if (mid->mid == buf->Mid && 766 if (mid->mid == buf->Mid &&
576 mid->midState == MID_REQUEST_SUBMITTED && 767 mid->mid_state == MID_REQUEST_SUBMITTED &&
577 mid->command == buf->Command) { 768 le16_to_cpu(mid->command) == buf->Command) {
578 spin_unlock(&GlobalMid_Lock); 769 spin_unlock(&GlobalMid_Lock);
579 return mid; 770 return mid;
580 } 771 }
@@ -591,16 +782,16 @@ dequeue_mid(struct mid_q_entry *mid, bool malformed)
591#endif 782#endif
592 spin_lock(&GlobalMid_Lock); 783 spin_lock(&GlobalMid_Lock);
593 if (!malformed) 784 if (!malformed)
594 mid->midState = MID_RESPONSE_RECEIVED; 785 mid->mid_state = MID_RESPONSE_RECEIVED;
595 else 786 else
596 mid->midState = MID_RESPONSE_MALFORMED; 787 mid->mid_state = MID_RESPONSE_MALFORMED;
597 list_del_init(&mid->qhead); 788 list_del_init(&mid->qhead);
598 spin_unlock(&GlobalMid_Lock); 789 spin_unlock(&GlobalMid_Lock);
599} 790}
600 791
601static void 792static void
602handle_mid(struct mid_q_entry *mid, struct TCP_Server_Info *server, 793handle_mid(struct mid_q_entry *mid, struct TCP_Server_Info *server,
603 struct smb_hdr *buf, int malformed) 794 char *buf, int malformed)
604{ 795{
605 if (malformed == 0 && check2ndT2(buf) > 0) { 796 if (malformed == 0 && check2ndT2(buf) > 0) {
606 mid->multiRsp = true; 797 mid->multiRsp = true;
@@ -620,13 +811,13 @@ handle_mid(struct mid_q_entry *mid, struct TCP_Server_Info *server,
620 } else { 811 } else {
621 /* Have first buffer */ 812 /* Have first buffer */
622 mid->resp_buf = buf; 813 mid->resp_buf = buf;
623 mid->largeBuf = true; 814 mid->large_buf = true;
624 server->bigbuf = NULL; 815 server->bigbuf = NULL;
625 } 816 }
626 return; 817 return;
627 } 818 }
628 mid->resp_buf = buf; 819 mid->resp_buf = buf;
629 mid->largeBuf = server->large_buf; 820 mid->large_buf = server->large_buf;
630 /* Was previous buf put in mpx struct for multi-rsp? */ 821 /* Was previous buf put in mpx struct for multi-rsp? */
631 if (!mid->multiRsp) { 822 if (!mid->multiRsp) {
632 /* smb buffer will be freed by user thread */ 823 /* smb buffer will be freed by user thread */
@@ -682,8 +873,8 @@ static void clean_demultiplex_info(struct TCP_Server_Info *server)
682 spin_lock(&GlobalMid_Lock); 873 spin_lock(&GlobalMid_Lock);
683 list_for_each_safe(tmp, tmp2, &server->pending_mid_q) { 874 list_for_each_safe(tmp, tmp2, &server->pending_mid_q) {
684 mid_entry = list_entry(tmp, struct mid_q_entry, qhead); 875 mid_entry = list_entry(tmp, struct mid_q_entry, qhead);
685 cFYI(1, "Clearing mid 0x%x", mid_entry->mid); 876 cFYI(1, "Clearing mid 0x%llx", mid_entry->mid);
686 mid_entry->midState = MID_SHUTDOWN; 877 mid_entry->mid_state = MID_SHUTDOWN;
687 list_move(&mid_entry->qhead, &dispose_list); 878 list_move(&mid_entry->qhead, &dispose_list);
688 } 879 }
689 spin_unlock(&GlobalMid_Lock); 880 spin_unlock(&GlobalMid_Lock);
@@ -691,7 +882,7 @@ static void clean_demultiplex_info(struct TCP_Server_Info *server)
691 /* now walk dispose list and issue callbacks */ 882 /* now walk dispose list and issue callbacks */
692 list_for_each_safe(tmp, tmp2, &dispose_list) { 883 list_for_each_safe(tmp, tmp2, &dispose_list) {
693 mid_entry = list_entry(tmp, struct mid_q_entry, qhead); 884 mid_entry = list_entry(tmp, struct mid_q_entry, qhead);
694 cFYI(1, "Callback mid 0x%x", mid_entry->mid); 885 cFYI(1, "Callback mid 0x%llx", mid_entry->mid);
695 list_del_init(&mid_entry->qhead); 886 list_del_init(&mid_entry->qhead);
696 mid_entry->callback(mid_entry); 887 mid_entry->callback(mid_entry);
697 } 888 }
@@ -731,11 +922,10 @@ standard_receive3(struct TCP_Server_Info *server, struct mid_q_entry *mid)
731{ 922{
732 int length; 923 int length;
733 char *buf = server->smallbuf; 924 char *buf = server->smallbuf;
734 struct smb_hdr *smb_buffer = (struct smb_hdr *)buf; 925 unsigned int pdu_length = get_rfc1002_length(buf);
735 unsigned int pdu_length = be32_to_cpu(smb_buffer->smb_buf_length);
736 926
737 /* make sure this will fit in a large buffer */ 927 /* make sure this will fit in a large buffer */
738 if (pdu_length > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) { 928 if (pdu_length > CIFSMaxBufSize + max_header_size() - 4) {
739 cERROR(1, "SMB response too long (%u bytes)", 929 cERROR(1, "SMB response too long (%u bytes)",
740 pdu_length); 930 pdu_length);
741 cifs_reconnect(server); 931 cifs_reconnect(server);
@@ -746,20 +936,18 @@ standard_receive3(struct TCP_Server_Info *server, struct mid_q_entry *mid)
746 /* switch to large buffer if too big for a small one */ 936 /* switch to large buffer if too big for a small one */
747 if (pdu_length > MAX_CIFS_SMALL_BUFFER_SIZE - 4) { 937 if (pdu_length > MAX_CIFS_SMALL_BUFFER_SIZE - 4) {
748 server->large_buf = true; 938 server->large_buf = true;
749 memcpy(server->bigbuf, server->smallbuf, server->total_read); 939 memcpy(server->bigbuf, buf, server->total_read);
750 buf = server->bigbuf; 940 buf = server->bigbuf;
751 smb_buffer = (struct smb_hdr *)buf;
752 } 941 }
753 942
754 /* now read the rest */ 943 /* now read the rest */
755 length = cifs_read_from_socket(server, 944 length = cifs_read_from_socket(server, buf + header_size() - 1,
756 buf + sizeof(struct smb_hdr) - 1, 945 pdu_length - header_size() + 1 + 4);
757 pdu_length - sizeof(struct smb_hdr) + 1 + 4);
758 if (length < 0) 946 if (length < 0)
759 return length; 947 return length;
760 server->total_read += length; 948 server->total_read += length;
761 949
762 dump_smb(smb_buffer, server->total_read); 950 dump_smb(buf, server->total_read);
763 951
764 /* 952 /*
765 * We know that we received enough to get to the MID as we 953 * We know that we received enough to get to the MID as we
@@ -770,7 +958,7 @@ standard_receive3(struct TCP_Server_Info *server, struct mid_q_entry *mid)
770 * 48 bytes is enough to display the header and a little bit 958 * 48 bytes is enough to display the header and a little bit
771 * into the payload for debugging purposes. 959 * into the payload for debugging purposes.
772 */ 960 */
773 length = checkSMB(smb_buffer, smb_buffer->Mid, server->total_read); 961 length = checkSMB(buf, server->total_read);
774 if (length != 0) 962 if (length != 0)
775 cifs_dump_mem("Bad SMB: ", buf, 963 cifs_dump_mem("Bad SMB: ", buf,
776 min_t(unsigned int, server->total_read, 48)); 964 min_t(unsigned int, server->total_read, 48));
@@ -778,7 +966,7 @@ standard_receive3(struct TCP_Server_Info *server, struct mid_q_entry *mid)
778 if (!mid) 966 if (!mid)
779 return length; 967 return length;
780 968
781 handle_mid(mid, server, smb_buffer, length); 969 handle_mid(mid, server, buf, length);
782 return 0; 970 return 0;
783} 971}
784 972
@@ -789,7 +977,6 @@ cifs_demultiplex_thread(void *p)
789 struct TCP_Server_Info *server = p; 977 struct TCP_Server_Info *server = p;
790 unsigned int pdu_length; 978 unsigned int pdu_length;
791 char *buf = NULL; 979 char *buf = NULL;
792 struct smb_hdr *smb_buffer = NULL;
793 struct task_struct *task_to_wake = NULL; 980 struct task_struct *task_to_wake = NULL;
794 struct mid_q_entry *mid_entry; 981 struct mid_q_entry *mid_entry;
795 982
@@ -810,7 +997,6 @@ cifs_demultiplex_thread(void *p)
810 continue; 997 continue;
811 998
812 server->large_buf = false; 999 server->large_buf = false;
813 smb_buffer = (struct smb_hdr *)server->smallbuf;
814 buf = server->smallbuf; 1000 buf = server->smallbuf;
815 pdu_length = 4; /* enough to get RFC1001 header */ 1001 pdu_length = 4; /* enough to get RFC1001 header */
816 1002
@@ -823,14 +1009,14 @@ cifs_demultiplex_thread(void *p)
823 * The right amount was read from socket - 4 bytes, 1009 * The right amount was read from socket - 4 bytes,
824 * so we can now interpret the length field. 1010 * so we can now interpret the length field.
825 */ 1011 */
826 pdu_length = be32_to_cpu(smb_buffer->smb_buf_length); 1012 pdu_length = get_rfc1002_length(buf);
827 1013
828 cFYI(1, "RFC1002 header 0x%x", pdu_length); 1014 cFYI(1, "RFC1002 header 0x%x", pdu_length);
829 if (!is_smb_response(server, buf[0])) 1015 if (!is_smb_response(server, buf[0]))
830 continue; 1016 continue;
831 1017
832 /* make sure we have enough to get to the MID */ 1018 /* make sure we have enough to get to the MID */
833 if (pdu_length < sizeof(struct smb_hdr) - 1 - 4) { 1019 if (pdu_length < header_size() - 1 - 4) {
834 cERROR(1, "SMB response too short (%u bytes)", 1020 cERROR(1, "SMB response too short (%u bytes)",
835 pdu_length); 1021 pdu_length);
836 cifs_reconnect(server); 1022 cifs_reconnect(server);
@@ -840,12 +1026,12 @@ cifs_demultiplex_thread(void *p)
840 1026
841 /* read down to the MID */ 1027 /* read down to the MID */
842 length = cifs_read_from_socket(server, buf + 4, 1028 length = cifs_read_from_socket(server, buf + 4,
843 sizeof(struct smb_hdr) - 1 - 4); 1029 header_size() - 1 - 4);
844 if (length < 0) 1030 if (length < 0)
845 continue; 1031 continue;
846 server->total_read += length; 1032 server->total_read += length;
847 1033
848 mid_entry = find_mid(server, smb_buffer); 1034 mid_entry = find_mid(server, buf);
849 1035
850 if (!mid_entry || !mid_entry->receive) 1036 if (!mid_entry || !mid_entry->receive)
851 length = standard_receive3(server, mid_entry); 1037 length = standard_receive3(server, mid_entry);
@@ -855,22 +1041,19 @@ cifs_demultiplex_thread(void *p)
855 if (length < 0) 1041 if (length < 0)
856 continue; 1042 continue;
857 1043
858 if (server->large_buf) { 1044 if (server->large_buf)
859 buf = server->bigbuf; 1045 buf = server->bigbuf;
860 smb_buffer = (struct smb_hdr *)buf;
861 }
862 1046
863 server->lstrp = jiffies; 1047 server->lstrp = jiffies;
864 if (mid_entry != NULL) { 1048 if (mid_entry != NULL) {
865 if (!mid_entry->multiRsp || mid_entry->multiEnd) 1049 if (!mid_entry->multiRsp || mid_entry->multiEnd)
866 mid_entry->callback(mid_entry); 1050 mid_entry->callback(mid_entry);
867 } else if (!is_valid_oplock_break(smb_buffer, server)) { 1051 } else if (!is_valid_oplock_break(buf, server)) {
868 cERROR(1, "No task to wake, unknown frame received! " 1052 cERROR(1, "No task to wake, unknown frame received! "
869 "NumMids %d", atomic_read(&midCount)); 1053 "NumMids %d", atomic_read(&midCount));
870 cifs_dump_mem("Received Data is: ", buf, 1054 cifs_dump_mem("Received Data is: ", buf, header_size());
871 sizeof(struct smb_hdr));
872#ifdef CONFIG_CIFS_DEBUG2 1055#ifdef CONFIG_CIFS_DEBUG2
873 cifs_dump_detail(smb_buffer); 1056 cifs_dump_detail(buf);
874 cifs_dump_mids(server); 1057 cifs_dump_mids(server);
875#endif /* CIFS_DEBUG2 */ 1058#endif /* CIFS_DEBUG2 */
876 1059
@@ -926,23 +1109,95 @@ extract_hostname(const char *unc)
926 return dst; 1109 return dst;
927} 1110}
928 1111
1112static int get_option_ul(substring_t args[], unsigned long *option)
1113{
1114 int rc;
1115 char *string;
1116
1117 string = match_strdup(args);
1118 if (string == NULL)
1119 return -ENOMEM;
1120 rc = kstrtoul(string, 10, option);
1121 kfree(string);
1122
1123 return rc;
1124}
1125
1126
1127static int cifs_parse_security_flavors(char *value,
1128 struct smb_vol *vol)
1129{
1130
1131 substring_t args[MAX_OPT_ARGS];
1132
1133 switch (match_token(value, cifs_secflavor_tokens, args)) {
1134 case Opt_sec_krb5:
1135 vol->secFlg |= CIFSSEC_MAY_KRB5;
1136 break;
1137 case Opt_sec_krb5i:
1138 vol->secFlg |= CIFSSEC_MAY_KRB5 | CIFSSEC_MUST_SIGN;
1139 break;
1140 case Opt_sec_krb5p:
1141 /* vol->secFlg |= CIFSSEC_MUST_SEAL | CIFSSEC_MAY_KRB5; */
1142 cERROR(1, "Krb5 cifs privacy not supported");
1143 break;
1144 case Opt_sec_ntlmssp:
1145 vol->secFlg |= CIFSSEC_MAY_NTLMSSP;
1146 break;
1147 case Opt_sec_ntlmsspi:
1148 vol->secFlg |= CIFSSEC_MAY_NTLMSSP | CIFSSEC_MUST_SIGN;
1149 break;
1150 case Opt_ntlm:
1151 /* ntlm is default so can be turned off too */
1152 vol->secFlg |= CIFSSEC_MAY_NTLM;
1153 break;
1154 case Opt_sec_ntlmi:
1155 vol->secFlg |= CIFSSEC_MAY_NTLM | CIFSSEC_MUST_SIGN;
1156 break;
1157 case Opt_sec_nontlm:
1158 vol->secFlg |= CIFSSEC_MAY_NTLMV2;
1159 break;
1160 case Opt_sec_ntlmv2i:
1161 vol->secFlg |= CIFSSEC_MAY_NTLMV2 | CIFSSEC_MUST_SIGN;
1162 break;
1163#ifdef CONFIG_CIFS_WEAK_PW_HASH
1164 case Opt_sec_lanman:
1165 vol->secFlg |= CIFSSEC_MAY_LANMAN;
1166 break;
1167#endif
1168 case Opt_sec_none:
1169 vol->nullauth = 1;
1170 break;
1171 default:
1172 cERROR(1, "bad security option: %s", value);
1173 return 1;
1174 }
1175
1176 return 0;
1177}
1178
929static int 1179static int
930cifs_parse_mount_options(const char *mountdata, const char *devname, 1180cifs_parse_mount_options(const char *mountdata, const char *devname,
931 struct smb_vol *vol) 1181 struct smb_vol *vol)
932{ 1182{
933 char *value, *data, *end; 1183 char *data, *end;
934 char *mountdata_copy = NULL, *options; 1184 char *mountdata_copy = NULL, *options;
935 int err;
936 unsigned int temp_len, i, j; 1185 unsigned int temp_len, i, j;
937 char separator[2]; 1186 char separator[2];
938 short int override_uid = -1; 1187 short int override_uid = -1;
939 short int override_gid = -1; 1188 short int override_gid = -1;
940 bool uid_specified = false; 1189 bool uid_specified = false;
941 bool gid_specified = false; 1190 bool gid_specified = false;
1191 bool sloppy = false;
1192 char *invalid = NULL;
942 char *nodename = utsname()->nodename; 1193 char *nodename = utsname()->nodename;
1194 char *string = NULL;
1195 char *tmp_end, *value;
1196 char delim;
943 1197
944 separator[0] = ','; 1198 separator[0] = ',';
945 separator[1] = 0; 1199 separator[1] = 0;
1200 delim = separator[0];
946 1201
947 /* 1202 /*
948 * does not have to be perfect mapping since field is 1203 * does not have to be perfect mapping since field is
@@ -981,6 +1236,7 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
981 1236
982 options = mountdata_copy; 1237 options = mountdata_copy;
983 end = options + strlen(options); 1238 end = options + strlen(options);
1239
984 if (strncmp(options, "sep=", 4) == 0) { 1240 if (strncmp(options, "sep=", 4) == 0) {
985 if (options[4] != 0) { 1241 if (options[4] != 0) {
986 separator[0] = options[4]; 1242 separator[0] = options[4];
@@ -993,609 +1249,652 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
993 vol->backupgid_specified = false; /* no backup intent for a group */ 1249 vol->backupgid_specified = false; /* no backup intent for a group */
994 1250
995 while ((data = strsep(&options, separator)) != NULL) { 1251 while ((data = strsep(&options, separator)) != NULL) {
1252 substring_t args[MAX_OPT_ARGS];
1253 unsigned long option;
1254 int token;
1255
996 if (!*data) 1256 if (!*data)
997 continue; 1257 continue;
998 if ((value = strchr(data, '=')) != NULL)
999 *value++ = '\0';
1000 1258
1001 /* Have to parse this before we parse for "user" */ 1259 token = match_token(data, cifs_mount_option_tokens, args);
1002 if (strnicmp(data, "user_xattr", 10) == 0) { 1260
1261 switch (token) {
1262
1263 /* Ingnore the following */
1264 case Opt_ignore:
1265 break;
1266
1267 /* Boolean values */
1268 case Opt_user_xattr:
1003 vol->no_xattr = 0; 1269 vol->no_xattr = 0;
1004 } else if (strnicmp(data, "nouser_xattr", 12) == 0) { 1270 break;
1271 case Opt_nouser_xattr:
1005 vol->no_xattr = 1; 1272 vol->no_xattr = 1;
1006 } else if (strnicmp(data, "user", 4) == 0) { 1273 break;
1007 if (!value) { 1274 case Opt_forceuid:
1008 printk(KERN_WARNING
1009 "CIFS: invalid or missing username\n");
1010 goto cifs_parse_mount_err;
1011 } else if (!*value) {
1012 /* null user, ie anonymous, authentication */
1013 vol->nullauth = 1;
1014 }
1015 if (strnlen(value, MAX_USERNAME_SIZE) <
1016 MAX_USERNAME_SIZE) {
1017 vol->username = kstrdup(value, GFP_KERNEL);
1018 if (!vol->username) {
1019 printk(KERN_WARNING "CIFS: no memory "
1020 "for username\n");
1021 goto cifs_parse_mount_err;
1022 }
1023 } else {
1024 printk(KERN_WARNING "CIFS: username too long\n");
1025 goto cifs_parse_mount_err;
1026 }
1027 } else if (strnicmp(data, "pass", 4) == 0) {
1028 if (!value) {
1029 vol->password = NULL;
1030 continue;
1031 } else if (value[0] == 0) {
1032 /* check if string begins with double comma
1033 since that would mean the password really
1034 does start with a comma, and would not
1035 indicate an empty string */
1036 if (value[1] != separator[0]) {
1037 vol->password = NULL;
1038 continue;
1039 }
1040 }
1041 temp_len = strlen(value);
1042 /* removed password length check, NTLM passwords
1043 can be arbitrarily long */
1044
1045 /* if comma in password, the string will be
1046 prematurely null terminated. Commas in password are
1047 specified across the cifs mount interface by a double
1048 comma ie ,, and a comma used as in other cases ie ','
1049 as a parameter delimiter/separator is single and due
1050 to the strsep above is temporarily zeroed. */
1051
1052 /* NB: password legally can have multiple commas and
1053 the only illegal character in a password is null */
1054
1055 if ((value[temp_len] == 0) &&
1056 (value + temp_len < end) &&
1057 (value[temp_len+1] == separator[0])) {
1058 /* reinsert comma */
1059 value[temp_len] = separator[0];
1060 temp_len += 2; /* move after second comma */
1061 while (value[temp_len] != 0) {
1062 if (value[temp_len] == separator[0]) {
1063 if (value[temp_len+1] ==
1064 separator[0]) {
1065 /* skip second comma */
1066 temp_len++;
1067 } else {
1068 /* single comma indicating start
1069 of next parm */
1070 break;
1071 }
1072 }
1073 temp_len++;
1074 }
1075 if (value[temp_len] == 0) {
1076 options = NULL;
1077 } else {
1078 value[temp_len] = 0;
1079 /* point option to start of next parm */
1080 options = value + temp_len + 1;
1081 }
1082 /* go from value to value + temp_len condensing
1083 double commas to singles. Note that this ends up
1084 allocating a few bytes too many, which is ok */
1085 vol->password = kzalloc(temp_len, GFP_KERNEL);
1086 if (vol->password == NULL) {
1087 printk(KERN_WARNING "CIFS: no memory "
1088 "for password\n");
1089 goto cifs_parse_mount_err;
1090 }
1091 for (i = 0, j = 0; i < temp_len; i++, j++) {
1092 vol->password[j] = value[i];
1093 if (value[i] == separator[0]
1094 && value[i+1] == separator[0]) {
1095 /* skip second comma */
1096 i++;
1097 }
1098 }
1099 vol->password[j] = 0;
1100 } else {
1101 vol->password = kzalloc(temp_len+1, GFP_KERNEL);
1102 if (vol->password == NULL) {
1103 printk(KERN_WARNING "CIFS: no memory "
1104 "for password\n");
1105 goto cifs_parse_mount_err;
1106 }
1107 strcpy(vol->password, value);
1108 }
1109 } else if (!strnicmp(data, "ip", 2) ||
1110 !strnicmp(data, "addr", 4)) {
1111 if (!value || !*value) {
1112 vol->UNCip = NULL;
1113 } else if (strnlen(value, INET6_ADDRSTRLEN) <
1114 INET6_ADDRSTRLEN) {
1115 vol->UNCip = kstrdup(value, GFP_KERNEL);
1116 if (!vol->UNCip) {
1117 printk(KERN_WARNING "CIFS: no memory "
1118 "for UNC IP\n");
1119 goto cifs_parse_mount_err;
1120 }
1121 } else {
1122 printk(KERN_WARNING "CIFS: ip address "
1123 "too long\n");
1124 goto cifs_parse_mount_err;
1125 }
1126 } else if (strnicmp(data, "sec", 3) == 0) {
1127 if (!value || !*value) {
1128 cERROR(1, "no security value specified");
1129 continue;
1130 } else if (strnicmp(value, "krb5i", 5) == 0) {
1131 vol->secFlg |= CIFSSEC_MAY_KRB5 |
1132 CIFSSEC_MUST_SIGN;
1133 } else if (strnicmp(value, "krb5p", 5) == 0) {
1134 /* vol->secFlg |= CIFSSEC_MUST_SEAL |
1135 CIFSSEC_MAY_KRB5; */
1136 cERROR(1, "Krb5 cifs privacy not supported");
1137 goto cifs_parse_mount_err;
1138 } else if (strnicmp(value, "krb5", 4) == 0) {
1139 vol->secFlg |= CIFSSEC_MAY_KRB5;
1140 } else if (strnicmp(value, "ntlmsspi", 8) == 0) {
1141 vol->secFlg |= CIFSSEC_MAY_NTLMSSP |
1142 CIFSSEC_MUST_SIGN;
1143 } else if (strnicmp(value, "ntlmssp", 7) == 0) {
1144 vol->secFlg |= CIFSSEC_MAY_NTLMSSP;
1145 } else if (strnicmp(value, "ntlmv2i", 7) == 0) {
1146 vol->secFlg |= CIFSSEC_MAY_NTLMV2 |
1147 CIFSSEC_MUST_SIGN;
1148 } else if (strnicmp(value, "ntlmv2", 6) == 0) {
1149 vol->secFlg |= CIFSSEC_MAY_NTLMV2;
1150 } else if (strnicmp(value, "ntlmi", 5) == 0) {
1151 vol->secFlg |= CIFSSEC_MAY_NTLM |
1152 CIFSSEC_MUST_SIGN;
1153 } else if (strnicmp(value, "ntlm", 4) == 0) {
1154 /* ntlm is default so can be turned off too */
1155 vol->secFlg |= CIFSSEC_MAY_NTLM;
1156 } else if (strnicmp(value, "nontlm", 6) == 0) {
1157 /* BB is there a better way to do this? */
1158 vol->secFlg |= CIFSSEC_MAY_NTLMV2;
1159#ifdef CONFIG_CIFS_WEAK_PW_HASH
1160 } else if (strnicmp(value, "lanman", 6) == 0) {
1161 vol->secFlg |= CIFSSEC_MAY_LANMAN;
1162#endif
1163 } else if (strnicmp(value, "none", 4) == 0) {
1164 vol->nullauth = 1;
1165 } else {
1166 cERROR(1, "bad security option: %s", value);
1167 goto cifs_parse_mount_err;
1168 }
1169 } else if (strnicmp(data, "vers", 3) == 0) {
1170 if (!value || !*value) {
1171 cERROR(1, "no protocol version specified"
1172 " after vers= mount option");
1173 } else if ((strnicmp(value, "cifs", 4) == 0) ||
1174 (strnicmp(value, "1", 1) == 0)) {
1175 /* this is the default */
1176 continue;
1177 }
1178 } else if ((strnicmp(data, "unc", 3) == 0)
1179 || (strnicmp(data, "target", 6) == 0)
1180 || (strnicmp(data, "path", 4) == 0)) {
1181 if (!value || !*value) {
1182 printk(KERN_WARNING "CIFS: invalid path to "
1183 "network resource\n");
1184 goto cifs_parse_mount_err;
1185 }
1186 if ((temp_len = strnlen(value, 300)) < 300) {
1187 vol->UNC = kmalloc(temp_len+1, GFP_KERNEL);
1188 if (vol->UNC == NULL)
1189 goto cifs_parse_mount_err;
1190 strcpy(vol->UNC, value);
1191 if (strncmp(vol->UNC, "//", 2) == 0) {
1192 vol->UNC[0] = '\\';
1193 vol->UNC[1] = '\\';
1194 } else if (strncmp(vol->UNC, "\\\\", 2) != 0) {
1195 printk(KERN_WARNING
1196 "CIFS: UNC Path does not begin "
1197 "with // or \\\\ \n");
1198 goto cifs_parse_mount_err;
1199 }
1200 } else {
1201 printk(KERN_WARNING "CIFS: UNC name too long\n");
1202 goto cifs_parse_mount_err;
1203 }
1204 } else if ((strnicmp(data, "domain", 3) == 0)
1205 || (strnicmp(data, "workgroup", 5) == 0)) {
1206 if (!value || !*value) {
1207 printk(KERN_WARNING "CIFS: invalid domain name\n");
1208 goto cifs_parse_mount_err;
1209 }
1210 /* BB are there cases in which a comma can be valid in
1211 a domain name and need special handling? */
1212 if (strnlen(value, 256) < 256) {
1213 vol->domainname = kstrdup(value, GFP_KERNEL);
1214 if (!vol->domainname) {
1215 printk(KERN_WARNING "CIFS: no memory "
1216 "for domainname\n");
1217 goto cifs_parse_mount_err;
1218 }
1219 cFYI(1, "Domain name set");
1220 } else {
1221 printk(KERN_WARNING "CIFS: domain name too "
1222 "long\n");
1223 goto cifs_parse_mount_err;
1224 }
1225 } else if (strnicmp(data, "srcaddr", 7) == 0) {
1226 vol->srcaddr.ss_family = AF_UNSPEC;
1227
1228 if (!value || !*value) {
1229 printk(KERN_WARNING "CIFS: srcaddr value"
1230 " not specified.\n");
1231 goto cifs_parse_mount_err;
1232 }
1233 i = cifs_convert_address((struct sockaddr *)&vol->srcaddr,
1234 value, strlen(value));
1235 if (i == 0) {
1236 printk(KERN_WARNING "CIFS: Could not parse"
1237 " srcaddr: %s\n",
1238 value);
1239 goto cifs_parse_mount_err;
1240 }
1241 } else if (strnicmp(data, "prefixpath", 10) == 0) {
1242 if (!value || !*value) {
1243 printk(KERN_WARNING
1244 "CIFS: invalid path prefix\n");
1245 goto cifs_parse_mount_err;
1246 }
1247 if ((temp_len = strnlen(value, 1024)) < 1024) {
1248 if (value[0] != '/')
1249 temp_len++; /* missing leading slash */
1250 vol->prepath = kmalloc(temp_len+1, GFP_KERNEL);
1251 if (vol->prepath == NULL)
1252 goto cifs_parse_mount_err;
1253 if (value[0] != '/') {
1254 vol->prepath[0] = '/';
1255 strcpy(vol->prepath+1, value);
1256 } else
1257 strcpy(vol->prepath, value);
1258 cFYI(1, "prefix path %s", vol->prepath);
1259 } else {
1260 printk(KERN_WARNING "CIFS: prefix too long\n");
1261 goto cifs_parse_mount_err;
1262 }
1263 } else if (strnicmp(data, "iocharset", 9) == 0) {
1264 if (!value || !*value) {
1265 printk(KERN_WARNING "CIFS: invalid iocharset "
1266 "specified\n");
1267 goto cifs_parse_mount_err;
1268 }
1269 if (strnlen(value, 65) < 65) {
1270 if (strnicmp(value, "default", 7)) {
1271 vol->iocharset = kstrdup(value,
1272 GFP_KERNEL);
1273
1274 if (!vol->iocharset) {
1275 printk(KERN_WARNING "CIFS: no "
1276 "memory for"
1277 "charset\n");
1278 goto cifs_parse_mount_err;
1279 }
1280 }
1281 /* if iocharset not set then load_nls_default
1282 is used by caller */
1283 cFYI(1, "iocharset set to %s", value);
1284 } else {
1285 printk(KERN_WARNING "CIFS: iocharset name "
1286 "too long.\n");
1287 goto cifs_parse_mount_err;
1288 }
1289 } else if (!strnicmp(data, "uid", 3) && value && *value) {
1290 vol->linux_uid = simple_strtoul(value, &value, 0);
1291 uid_specified = true;
1292 } else if (!strnicmp(data, "cruid", 5) && value && *value) {
1293 vol->cred_uid = simple_strtoul(value, &value, 0);
1294 } else if (!strnicmp(data, "forceuid", 8)) {
1295 override_uid = 1; 1275 override_uid = 1;
1296 } else if (!strnicmp(data, "noforceuid", 10)) { 1276 break;
1277 case Opt_noforceuid:
1297 override_uid = 0; 1278 override_uid = 0;
1298 } else if (!strnicmp(data, "gid", 3) && value && *value) { 1279 break;
1299 vol->linux_gid = simple_strtoul(value, &value, 0); 1280 case Opt_noblocksend:
1300 gid_specified = true;
1301 } else if (!strnicmp(data, "forcegid", 8)) {
1302 override_gid = 1;
1303 } else if (!strnicmp(data, "noforcegid", 10)) {
1304 override_gid = 0;
1305 } else if (strnicmp(data, "file_mode", 4) == 0) {
1306 if (value && *value) {
1307 vol->file_mode =
1308 simple_strtoul(value, &value, 0);
1309 }
1310 } else if (strnicmp(data, "dir_mode", 4) == 0) {
1311 if (value && *value) {
1312 vol->dir_mode =
1313 simple_strtoul(value, &value, 0);
1314 }
1315 } else if (strnicmp(data, "dirmode", 4) == 0) {
1316 if (value && *value) {
1317 vol->dir_mode =
1318 simple_strtoul(value, &value, 0);
1319 }
1320 } else if (strnicmp(data, "port", 4) == 0) {
1321 if (value && *value) {
1322 vol->port =
1323 simple_strtoul(value, &value, 0);
1324 }
1325 } else if (strnicmp(data, "rsize", 5) == 0) {
1326 if (value && *value) {
1327 vol->rsize =
1328 simple_strtoul(value, &value, 0);
1329 }
1330 } else if (strnicmp(data, "wsize", 5) == 0) {
1331 if (value && *value) {
1332 vol->wsize =
1333 simple_strtoul(value, &value, 0);
1334 }
1335 } else if (strnicmp(data, "sockopt", 5) == 0) {
1336 if (!value || !*value) {
1337 cERROR(1, "no socket option specified");
1338 continue;
1339 } else if (strnicmp(value, "TCP_NODELAY", 11) == 0) {
1340 vol->sockopt_tcp_nodelay = 1;
1341 }
1342 } else if (strnicmp(data, "netbiosname", 4) == 0) {
1343 if (!value || !*value || (*value == ' ')) {
1344 cFYI(1, "invalid (empty) netbiosname");
1345 } else {
1346 memset(vol->source_rfc1001_name, 0x20,
1347 RFC1001_NAME_LEN);
1348 /*
1349 * FIXME: are there cases in which a comma can
1350 * be valid in workstation netbios name (and
1351 * need special handling)?
1352 */
1353 for (i = 0; i < RFC1001_NAME_LEN; i++) {
1354 /* don't ucase netbiosname for user */
1355 if (value[i] == 0)
1356 break;
1357 vol->source_rfc1001_name[i] = value[i];
1358 }
1359 /* The string has 16th byte zero still from
1360 set at top of the function */
1361 if (i == RFC1001_NAME_LEN && value[i] != 0)
1362 printk(KERN_WARNING "CIFS: netbiosname"
1363 " longer than 15 truncated.\n");
1364 }
1365 } else if (strnicmp(data, "servern", 7) == 0) {
1366 /* servernetbiosname specified override *SMBSERVER */
1367 if (!value || !*value || (*value == ' ')) {
1368 cFYI(1, "empty server netbiosname specified");
1369 } else {
1370 /* last byte, type, is 0x20 for servr type */
1371 memset(vol->target_rfc1001_name, 0x20,
1372 RFC1001_NAME_LEN_WITH_NULL);
1373
1374 for (i = 0; i < 15; i++) {
1375 /* BB are there cases in which a comma can be
1376 valid in this workstation netbios name
1377 (and need special handling)? */
1378
1379 /* user or mount helper must uppercase
1380 the netbiosname */
1381 if (value[i] == 0)
1382 break;
1383 else
1384 vol->target_rfc1001_name[i] =
1385 value[i];
1386 }
1387 /* The string has 16th byte zero still from
1388 set at top of the function */
1389 if (i == RFC1001_NAME_LEN && value[i] != 0)
1390 printk(KERN_WARNING "CIFS: server net"
1391 "biosname longer than 15 truncated.\n");
1392 }
1393 } else if (strnicmp(data, "actimeo", 7) == 0) {
1394 if (value && *value) {
1395 vol->actimeo = HZ * simple_strtoul(value,
1396 &value, 0);
1397 if (vol->actimeo > CIFS_MAX_ACTIMEO) {
1398 cERROR(1, "CIFS: attribute cache"
1399 "timeout too large");
1400 goto cifs_parse_mount_err;
1401 }
1402 }
1403 } else if (strnicmp(data, "credentials", 4) == 0) {
1404 /* ignore */
1405 } else if (strnicmp(data, "version", 3) == 0) {
1406 /* ignore */
1407 } else if (strnicmp(data, "guest", 5) == 0) {
1408 /* ignore */
1409 } else if (strnicmp(data, "rw", 2) == 0 && strlen(data) == 2) {
1410 /* ignore */
1411 } else if (strnicmp(data, "ro", 2) == 0) {
1412 /* ignore */
1413 } else if (strnicmp(data, "noblocksend", 11) == 0) {
1414 vol->noblocksnd = 1; 1281 vol->noblocksnd = 1;
1415 } else if (strnicmp(data, "noautotune", 10) == 0) { 1282 break;
1283 case Opt_noautotune:
1416 vol->noautotune = 1; 1284 vol->noautotune = 1;
1417 } else if ((strnicmp(data, "suid", 4) == 0) || 1285 break;
1418 (strnicmp(data, "nosuid", 6) == 0) || 1286 case Opt_hard:
1419 (strnicmp(data, "exec", 4) == 0) ||
1420 (strnicmp(data, "noexec", 6) == 0) ||
1421 (strnicmp(data, "nodev", 5) == 0) ||
1422 (strnicmp(data, "noauto", 6) == 0) ||
1423 (strnicmp(data, "dev", 3) == 0)) {
1424 /* The mount tool or mount.cifs helper (if present)
1425 uses these opts to set flags, and the flags are read
1426 by the kernel vfs layer before we get here (ie
1427 before read super) so there is no point trying to
1428 parse these options again and set anything and it
1429 is ok to just ignore them */
1430 continue;
1431 } else if (strnicmp(data, "hard", 4) == 0) {
1432 vol->retry = 1; 1287 vol->retry = 1;
1433 } else if (strnicmp(data, "soft", 4) == 0) { 1288 break;
1289 case Opt_soft:
1434 vol->retry = 0; 1290 vol->retry = 0;
1435 } else if (strnicmp(data, "perm", 4) == 0) { 1291 break;
1292 case Opt_perm:
1436 vol->noperm = 0; 1293 vol->noperm = 0;
1437 } else if (strnicmp(data, "noperm", 6) == 0) { 1294 break;
1295 case Opt_noperm:
1438 vol->noperm = 1; 1296 vol->noperm = 1;
1439 } else if (strnicmp(data, "mapchars", 8) == 0) { 1297 break;
1298 case Opt_mapchars:
1440 vol->remap = 1; 1299 vol->remap = 1;
1441 } else if (strnicmp(data, "nomapchars", 10) == 0) { 1300 break;
1301 case Opt_nomapchars:
1442 vol->remap = 0; 1302 vol->remap = 0;
1443 } else if (strnicmp(data, "sfu", 3) == 0) { 1303 break;
1304 case Opt_sfu:
1444 vol->sfu_emul = 1; 1305 vol->sfu_emul = 1;
1445 } else if (strnicmp(data, "nosfu", 5) == 0) { 1306 break;
1307 case Opt_nosfu:
1446 vol->sfu_emul = 0; 1308 vol->sfu_emul = 0;
1447 } else if (strnicmp(data, "nodfs", 5) == 0) { 1309 break;
1310 case Opt_nodfs:
1448 vol->nodfs = 1; 1311 vol->nodfs = 1;
1449 } else if (strnicmp(data, "posixpaths", 10) == 0) { 1312 break;
1313 case Opt_posixpaths:
1450 vol->posix_paths = 1; 1314 vol->posix_paths = 1;
1451 } else if (strnicmp(data, "noposixpaths", 12) == 0) { 1315 break;
1316 case Opt_noposixpaths:
1452 vol->posix_paths = 0; 1317 vol->posix_paths = 0;
1453 } else if (strnicmp(data, "nounix", 6) == 0) { 1318 break;
1454 vol->no_linux_ext = 1; 1319 case Opt_nounix:
1455 } else if (strnicmp(data, "nolinux", 7) == 0) {
1456 vol->no_linux_ext = 1; 1320 vol->no_linux_ext = 1;
1457 } else if ((strnicmp(data, "nocase", 6) == 0) || 1321 break;
1458 (strnicmp(data, "ignorecase", 10) == 0)) { 1322 case Opt_nocase:
1459 vol->nocase = 1; 1323 vol->nocase = 1;
1460 } else if (strnicmp(data, "mand", 4) == 0) { 1324 break;
1461 /* ignore */ 1325 case Opt_brl:
1462 } else if (strnicmp(data, "nomand", 6) == 0) {
1463 /* ignore */
1464 } else if (strnicmp(data, "_netdev", 7) == 0) {
1465 /* ignore */
1466 } else if (strnicmp(data, "brl", 3) == 0) {
1467 vol->nobrl = 0; 1326 vol->nobrl = 0;
1468 } else if ((strnicmp(data, "nobrl", 5) == 0) || 1327 break;
1469 (strnicmp(data, "nolock", 6) == 0)) { 1328 case Opt_nobrl:
1470 vol->nobrl = 1; 1329 vol->nobrl = 1;
1471 /* turn off mandatory locking in mode 1330 /*
1472 if remote locking is turned off since the 1331 * turn off mandatory locking in mode
1473 local vfs will do advisory */ 1332 * if remote locking is turned off since the
1333 * local vfs will do advisory
1334 */
1474 if (vol->file_mode == 1335 if (vol->file_mode ==
1475 (S_IALLUGO & ~(S_ISUID | S_IXGRP))) 1336 (S_IALLUGO & ~(S_ISUID | S_IXGRP)))
1476 vol->file_mode = S_IALLUGO; 1337 vol->file_mode = S_IALLUGO;
1477 } else if (strnicmp(data, "forcemandatorylock", 9) == 0) { 1338 break;
1478 /* will take the shorter form "forcemand" as well */ 1339 case Opt_forcemandatorylock:
1479 /* This mount option will force use of mandatory
1480 (DOS/Windows style) byte range locks, instead of
1481 using posix advisory byte range locks, even if the
1482 Unix extensions are available and posix locks would
1483 be supported otherwise. If Unix extensions are not
1484 negotiated this has no effect since mandatory locks
1485 would be used (mandatory locks is all that those
1486 those servers support) */
1487 vol->mand_lock = 1; 1340 vol->mand_lock = 1;
1488 } else if (strnicmp(data, "setuids", 7) == 0) { 1341 break;
1342 case Opt_setuids:
1489 vol->setuids = 1; 1343 vol->setuids = 1;
1490 } else if (strnicmp(data, "nosetuids", 9) == 0) { 1344 break;
1345 case Opt_nosetuids:
1491 vol->setuids = 0; 1346 vol->setuids = 0;
1492 } else if (strnicmp(data, "dynperm", 7) == 0) { 1347 break;
1348 case Opt_dynperm:
1493 vol->dynperm = true; 1349 vol->dynperm = true;
1494 } else if (strnicmp(data, "nodynperm", 9) == 0) { 1350 break;
1351 case Opt_nodynperm:
1495 vol->dynperm = false; 1352 vol->dynperm = false;
1496 } else if (strnicmp(data, "nohard", 6) == 0) { 1353 break;
1354 case Opt_nohard:
1497 vol->retry = 0; 1355 vol->retry = 0;
1498 } else if (strnicmp(data, "nosoft", 6) == 0) { 1356 break;
1357 case Opt_nosoft:
1499 vol->retry = 1; 1358 vol->retry = 1;
1500 } else if (strnicmp(data, "nointr", 6) == 0) { 1359 break;
1360 case Opt_nointr:
1501 vol->intr = 0; 1361 vol->intr = 0;
1502 } else if (strnicmp(data, "intr", 4) == 0) { 1362 break;
1363 case Opt_intr:
1503 vol->intr = 1; 1364 vol->intr = 1;
1504 } else if (strnicmp(data, "nostrictsync", 12) == 0) { 1365 break;
1366 case Opt_nostrictsync:
1505 vol->nostrictsync = 1; 1367 vol->nostrictsync = 1;
1506 } else if (strnicmp(data, "strictsync", 10) == 0) { 1368 break;
1369 case Opt_strictsync:
1507 vol->nostrictsync = 0; 1370 vol->nostrictsync = 0;
1508 } else if (strnicmp(data, "serverino", 7) == 0) { 1371 break;
1372 case Opt_serverino:
1509 vol->server_ino = 1; 1373 vol->server_ino = 1;
1510 } else if (strnicmp(data, "noserverino", 9) == 0) { 1374 break;
1375 case Opt_noserverino:
1511 vol->server_ino = 0; 1376 vol->server_ino = 0;
1512 } else if (strnicmp(data, "rwpidforward", 12) == 0) { 1377 break;
1378 case Opt_rwpidforward:
1513 vol->rwpidforward = 1; 1379 vol->rwpidforward = 1;
1514 } else if (strnicmp(data, "cifsacl", 7) == 0) { 1380 break;
1381 case Opt_cifsacl:
1515 vol->cifs_acl = 1; 1382 vol->cifs_acl = 1;
1516 } else if (strnicmp(data, "nocifsacl", 9) == 0) { 1383 break;
1384 case Opt_nocifsacl:
1517 vol->cifs_acl = 0; 1385 vol->cifs_acl = 0;
1518 } else if (strnicmp(data, "acl", 3) == 0) { 1386 break;
1387 case Opt_acl:
1519 vol->no_psx_acl = 0; 1388 vol->no_psx_acl = 0;
1520 } else if (strnicmp(data, "noacl", 5) == 0) { 1389 break;
1390 case Opt_noacl:
1521 vol->no_psx_acl = 1; 1391 vol->no_psx_acl = 1;
1522 } else if (strnicmp(data, "locallease", 6) == 0) { 1392 break;
1393 case Opt_locallease:
1523 vol->local_lease = 1; 1394 vol->local_lease = 1;
1524 } else if (strnicmp(data, "sign", 4) == 0) { 1395 break;
1396 case Opt_sign:
1525 vol->secFlg |= CIFSSEC_MUST_SIGN; 1397 vol->secFlg |= CIFSSEC_MUST_SIGN;
1526 } else if (strnicmp(data, "seal", 4) == 0) { 1398 break;
1399 case Opt_seal:
1527 /* we do not do the following in secFlags because seal 1400 /* we do not do the following in secFlags because seal
1528 is a per tree connection (mount) not a per socket 1401 * is a per tree connection (mount) not a per socket
1529 or per-smb connection option in the protocol */ 1402 * or per-smb connection option in the protocol
1530 /* vol->secFlg |= CIFSSEC_MUST_SEAL; */ 1403 * vol->secFlg |= CIFSSEC_MUST_SEAL;
1404 */
1531 vol->seal = 1; 1405 vol->seal = 1;
1532 } else if (strnicmp(data, "direct", 6) == 0) { 1406 break;
1533 vol->direct_io = 1; 1407 case Opt_direct:
1534 } else if (strnicmp(data, "forcedirectio", 13) == 0) {
1535 vol->direct_io = 1; 1408 vol->direct_io = 1;
1536 } else if (strnicmp(data, "strictcache", 11) == 0) { 1409 break;
1410 case Opt_strictcache:
1537 vol->strict_io = 1; 1411 vol->strict_io = 1;
1538 } else if (strnicmp(data, "noac", 4) == 0) { 1412 break;
1413 case Opt_noac:
1539 printk(KERN_WARNING "CIFS: Mount option noac not " 1414 printk(KERN_WARNING "CIFS: Mount option noac not "
1540 "supported. Instead set " 1415 "supported. Instead set "
1541 "/proc/fs/cifs/LookupCacheEnabled to 0\n"); 1416 "/proc/fs/cifs/LookupCacheEnabled to 0\n");
1542 } else if (strnicmp(data, "fsc", 3) == 0) { 1417 break;
1418 case Opt_fsc:
1543#ifndef CONFIG_CIFS_FSCACHE 1419#ifndef CONFIG_CIFS_FSCACHE
1544 cERROR(1, "FS-Cache support needs CONFIG_CIFS_FSCACHE " 1420 cERROR(1, "FS-Cache support needs CONFIG_CIFS_FSCACHE "
1545 "kernel config option set"); 1421 "kernel config option set");
1546 goto cifs_parse_mount_err; 1422 goto cifs_parse_mount_err;
1547#endif 1423#endif
1548 vol->fsc = true; 1424 vol->fsc = true;
1549 } else if (strnicmp(data, "mfsymlinks", 10) == 0) { 1425 break;
1426 case Opt_mfsymlinks:
1550 vol->mfsymlinks = true; 1427 vol->mfsymlinks = true;
1551 } else if (strnicmp(data, "multiuser", 8) == 0) { 1428 break;
1429 case Opt_multiuser:
1552 vol->multiuser = true; 1430 vol->multiuser = true;
1553 } else if (!strnicmp(data, "backupuid", 9) && value && *value) { 1431 break;
1554 err = kstrtouint(value, 0, &vol->backupuid); 1432 case Opt_sloppy:
1555 if (err < 0) { 1433 sloppy = true;
1434 break;
1435
1436 /* Numeric Values */
1437 case Opt_backupuid:
1438 if (get_option_ul(args, &option)) {
1556 cERROR(1, "%s: Invalid backupuid value", 1439 cERROR(1, "%s: Invalid backupuid value",
1557 __func__); 1440 __func__);
1558 goto cifs_parse_mount_err; 1441 goto cifs_parse_mount_err;
1559 } 1442 }
1443 vol->backupuid = option;
1560 vol->backupuid_specified = true; 1444 vol->backupuid_specified = true;
1561 } else if (!strnicmp(data, "backupgid", 9) && value && *value) { 1445 break;
1562 err = kstrtouint(value, 0, &vol->backupgid); 1446 case Opt_backupgid:
1563 if (err < 0) { 1447 if (get_option_ul(args, &option)) {
1564 cERROR(1, "%s: Invalid backupgid value", 1448 cERROR(1, "%s: Invalid backupgid value",
1565 __func__); 1449 __func__);
1566 goto cifs_parse_mount_err; 1450 goto cifs_parse_mount_err;
1567 } 1451 }
1452 vol->backupgid = option;
1568 vol->backupgid_specified = true; 1453 vol->backupgid_specified = true;
1569 } else 1454 break;
1570 printk(KERN_WARNING "CIFS: Unknown mount option %s\n", 1455 case Opt_uid:
1571 data); 1456 if (get_option_ul(args, &option)) {
1572 } 1457 cERROR(1, "%s: Invalid uid value",
1573 if (vol->UNC == NULL) { 1458 __func__);
1574 if (devname == NULL) { 1459 goto cifs_parse_mount_err;
1575 printk(KERN_WARNING "CIFS: Missing UNC name for mount " 1460 }
1576 "target\n"); 1461 vol->linux_uid = option;
1577 goto cifs_parse_mount_err; 1462 uid_specified = true;
1578 } 1463 break;
1579 if ((temp_len = strnlen(devname, 300)) < 300) { 1464 case Opt_cruid:
1580 vol->UNC = kmalloc(temp_len+1, GFP_KERNEL); 1465 if (get_option_ul(args, &option)) {
1581 if (vol->UNC == NULL) 1466 cERROR(1, "%s: Invalid cruid value",
1467 __func__);
1468 goto cifs_parse_mount_err;
1469 }
1470 vol->cred_uid = option;
1471 break;
1472 case Opt_gid:
1473 if (get_option_ul(args, &option)) {
1474 cERROR(1, "%s: Invalid gid value",
1475 __func__);
1476 goto cifs_parse_mount_err;
1477 }
1478 vol->linux_gid = option;
1479 gid_specified = true;
1480 break;
1481 case Opt_file_mode:
1482 if (get_option_ul(args, &option)) {
1483 cERROR(1, "%s: Invalid file_mode value",
1484 __func__);
1485 goto cifs_parse_mount_err;
1486 }
1487 vol->file_mode = option;
1488 break;
1489 case Opt_dirmode:
1490 if (get_option_ul(args, &option)) {
1491 cERROR(1, "%s: Invalid dir_mode value",
1492 __func__);
1493 goto cifs_parse_mount_err;
1494 }
1495 vol->dir_mode = option;
1496 break;
1497 case Opt_port:
1498 if (get_option_ul(args, &option)) {
1499 cERROR(1, "%s: Invalid port value",
1500 __func__);
1501 goto cifs_parse_mount_err;
1502 }
1503 vol->port = option;
1504 break;
1505 case Opt_rsize:
1506 if (get_option_ul(args, &option)) {
1507 cERROR(1, "%s: Invalid rsize value",
1508 __func__);
1509 goto cifs_parse_mount_err;
1510 }
1511 vol->rsize = option;
1512 break;
1513 case Opt_wsize:
1514 if (get_option_ul(args, &option)) {
1515 cERROR(1, "%s: Invalid wsize value",
1516 __func__);
1517 goto cifs_parse_mount_err;
1518 }
1519 vol->wsize = option;
1520 break;
1521 case Opt_actimeo:
1522 if (get_option_ul(args, &option)) {
1523 cERROR(1, "%s: Invalid actimeo value",
1524 __func__);
1525 goto cifs_parse_mount_err;
1526 }
1527 vol->actimeo = HZ * option;
1528 if (vol->actimeo > CIFS_MAX_ACTIMEO) {
1529 cERROR(1, "CIFS: attribute cache"
1530 "timeout too large");
1531 goto cifs_parse_mount_err;
1532 }
1533 break;
1534
1535 /* String Arguments */
1536
1537 case Opt_user:
1538 string = match_strdup(args);
1539 if (string == NULL)
1540 goto out_nomem;
1541
1542 if (!*string) {
1543 /* null user, ie. anonymous authentication */
1544 vol->nullauth = 1;
1545 } else if (strnlen(string, MAX_USERNAME_SIZE) >
1546 MAX_USERNAME_SIZE) {
1547 printk(KERN_WARNING "CIFS: username too long\n");
1548 goto cifs_parse_mount_err;
1549 }
1550 vol->username = kstrdup(string, GFP_KERNEL);
1551 if (!vol->username) {
1552 printk(KERN_WARNING "CIFS: no memory "
1553 "for username\n");
1554 goto cifs_parse_mount_err;
1555 }
1556 break;
1557 case Opt_blank_pass:
1558 vol->password = NULL;
1559 break;
1560 case Opt_pass:
1561 /* passwords have to be handled differently
1562 * to allow the character used for deliminator
1563 * to be passed within them
1564 */
1565
1566 /* Obtain the value string */
1567 value = strchr(data, '=');
1568 if (value != NULL)
1569 *value++ = '\0';
1570
1571 /* Set tmp_end to end of the string */
1572 tmp_end = (char *) value + strlen(value);
1573
1574 /* Check if following character is the deliminator
1575 * If yes, we have encountered a double deliminator
1576 * reset the NULL character to the deliminator
1577 */
1578 if (tmp_end < end && tmp_end[1] == delim)
1579 tmp_end[0] = delim;
1580
1581 /* Keep iterating until we get to a single deliminator
1582 * OR the end
1583 */
1584 while ((tmp_end = strchr(tmp_end, delim)) != NULL &&
1585 (tmp_end[1] == delim)) {
1586 tmp_end = (char *) &tmp_end[2];
1587 }
1588
1589 /* Reset var options to point to next element */
1590 if (tmp_end) {
1591 tmp_end[0] = '\0';
1592 options = (char *) &tmp_end[1];
1593 } else
1594 /* Reached the end of the mount option string */
1595 options = end;
1596
1597 /* Now build new password string */
1598 temp_len = strlen(value);
1599 vol->password = kzalloc(temp_len+1, GFP_KERNEL);
1600 if (vol->password == NULL) {
1601 printk(KERN_WARNING "CIFS: no memory "
1602 "for password\n");
1582 goto cifs_parse_mount_err; 1603 goto cifs_parse_mount_err;
1583 strcpy(vol->UNC, devname); 1604 }
1584 if (strncmp(vol->UNC, "//", 2) == 0) { 1605
1606 for (i = 0, j = 0; i < temp_len; i++, j++) {
1607 vol->password[j] = value[i];
1608 if ((value[i] == delim) &&
1609 value[i+1] == delim)
1610 /* skip the second deliminator */
1611 i++;
1612 }
1613 vol->password[j] = '\0';
1614 break;
1615 case Opt_ip:
1616 string = match_strdup(args);
1617 if (string == NULL)
1618 goto out_nomem;
1619
1620 if (!*string) {
1621 vol->UNCip = NULL;
1622 } else if (strnlen(string, INET6_ADDRSTRLEN) >
1623 INET6_ADDRSTRLEN) {
1624 printk(KERN_WARNING "CIFS: ip address "
1625 "too long\n");
1626 goto cifs_parse_mount_err;
1627 }
1628 vol->UNCip = kstrdup(string, GFP_KERNEL);
1629 if (!vol->UNCip) {
1630 printk(KERN_WARNING "CIFS: no memory "
1631 "for UNC IP\n");
1632 goto cifs_parse_mount_err;
1633 }
1634 break;
1635 case Opt_unc:
1636 string = match_strdup(args);
1637 if (string == NULL)
1638 goto out_nomem;
1639
1640 if (!*string) {
1641 printk(KERN_WARNING "CIFS: invalid path to "
1642 "network resource\n");
1643 goto cifs_parse_mount_err;
1644 }
1645
1646 temp_len = strnlen(string, 300);
1647 if (temp_len == 300) {
1648 printk(KERN_WARNING "CIFS: UNC name too long\n");
1649 goto cifs_parse_mount_err;
1650 }
1651
1652 if (strncmp(string, "//", 2) == 0) {
1585 vol->UNC[0] = '\\'; 1653 vol->UNC[0] = '\\';
1586 vol->UNC[1] = '\\'; 1654 vol->UNC[1] = '\\';
1587 } else if (strncmp(vol->UNC, "\\\\", 2) != 0) { 1655 } else if (strncmp(string, "\\\\", 2) != 0) {
1588 printk(KERN_WARNING "CIFS: UNC Path does not " 1656 printk(KERN_WARNING "CIFS: UNC Path does not "
1589 "begin with // or \\\\ \n"); 1657 "begin with // or \\\\\n");
1590 goto cifs_parse_mount_err; 1658 goto cifs_parse_mount_err;
1591 } 1659 }
1592 value = strpbrk(vol->UNC+2, "/\\"); 1660
1593 if (value) 1661 vol->UNC = kmalloc(temp_len+1, GFP_KERNEL);
1594 *value = '\\'; 1662 if (vol->UNC == NULL) {
1595 } else { 1663 printk(KERN_WARNING "CIFS: no memory "
1596 printk(KERN_WARNING "CIFS: UNC name too long\n"); 1664 "for UNC\n");
1665 goto cifs_parse_mount_err;
1666 }
1667 strcpy(vol->UNC, string);
1668 break;
1669 case Opt_domain:
1670 string = match_strdup(args);
1671 if (string == NULL)
1672 goto out_nomem;
1673
1674 if (!*string) {
1675 printk(KERN_WARNING "CIFS: invalid domain"
1676 " name\n");
1677 goto cifs_parse_mount_err;
1678 } else if (strnlen(string, 256) == 256) {
1679 printk(KERN_WARNING "CIFS: domain name too"
1680 " long\n");
1681 goto cifs_parse_mount_err;
1682 }
1683
1684 vol->domainname = kstrdup(string, GFP_KERNEL);
1685 if (!vol->domainname) {
1686 printk(KERN_WARNING "CIFS: no memory "
1687 "for domainname\n");
1688 goto cifs_parse_mount_err;
1689 }
1690 cFYI(1, "Domain name set");
1691 break;
1692 case Opt_srcaddr:
1693 string = match_strdup(args);
1694 if (string == NULL)
1695 goto out_nomem;
1696
1697 if (!*string) {
1698 printk(KERN_WARNING "CIFS: srcaddr value not"
1699 " specified\n");
1700 goto cifs_parse_mount_err;
1701 } else if (!cifs_convert_address(
1702 (struct sockaddr *)&vol->srcaddr,
1703 string, strlen(string))) {
1704 printk(KERN_WARNING "CIFS: Could not parse"
1705 " srcaddr: %s\n", string);
1706 goto cifs_parse_mount_err;
1707 }
1708 break;
1709 case Opt_prefixpath:
1710 string = match_strdup(args);
1711 if (string == NULL)
1712 goto out_nomem;
1713
1714 if (!*string) {
1715 printk(KERN_WARNING "CIFS: Invalid path"
1716 " prefix\n");
1717 goto cifs_parse_mount_err;
1718 }
1719 temp_len = strnlen(string, 1024);
1720 if (string[0] != '/')
1721 temp_len++; /* missing leading slash */
1722 if (temp_len > 1024) {
1723 printk(KERN_WARNING "CIFS: prefix too long\n");
1724 goto cifs_parse_mount_err;
1725 }
1726
1727 vol->prepath = kmalloc(temp_len+1, GFP_KERNEL);
1728 if (vol->prepath == NULL) {
1729 printk(KERN_WARNING "CIFS: no memory "
1730 "for path prefix\n");
1731 goto cifs_parse_mount_err;
1732 }
1733
1734 if (string[0] != '/') {
1735 vol->prepath[0] = '/';
1736 strcpy(vol->prepath+1, string);
1737 } else
1738 strcpy(vol->prepath, string);
1739
1740 break;
1741 case Opt_iocharset:
1742 string = match_strdup(args);
1743 if (string == NULL)
1744 goto out_nomem;
1745
1746 if (!*string) {
1747 printk(KERN_WARNING "CIFS: Invalid iocharset"
1748 " specified\n");
1749 goto cifs_parse_mount_err;
1750 } else if (strnlen(string, 1024) >= 65) {
1751 printk(KERN_WARNING "CIFS: iocharset name "
1752 "too long.\n");
1753 goto cifs_parse_mount_err;
1754 }
1755
1756 if (strnicmp(string, "default", 7) != 0) {
1757 vol->iocharset = kstrdup(string,
1758 GFP_KERNEL);
1759 if (!vol->iocharset) {
1760 printk(KERN_WARNING "CIFS: no memory"
1761 "for charset\n");
1762 goto cifs_parse_mount_err;
1763 }
1764 }
1765 /* if iocharset not set then load_nls_default
1766 * is used by caller
1767 */
1768 cFYI(1, "iocharset set to %s", string);
1769 break;
1770 case Opt_sockopt:
1771 string = match_strdup(args);
1772 if (string == NULL)
1773 goto out_nomem;
1774
1775 if (!*string) {
1776 printk(KERN_WARNING "CIFS: No socket option"
1777 " specified\n");
1778 goto cifs_parse_mount_err;
1779 }
1780 if (strnicmp(string, "TCP_NODELAY", 11) == 0)
1781 vol->sockopt_tcp_nodelay = 1;
1782 break;
1783 case Opt_netbiosname:
1784 string = match_strdup(args);
1785 if (string == NULL)
1786 goto out_nomem;
1787
1788 if (!*string) {
1789 printk(KERN_WARNING "CIFS: Invalid (empty)"
1790 " netbiosname\n");
1791 break;
1792 }
1793
1794 memset(vol->source_rfc1001_name, 0x20,
1795 RFC1001_NAME_LEN);
1796 /*
1797 * FIXME: are there cases in which a comma can
1798 * be valid in workstation netbios name (and
1799 * need special handling)?
1800 */
1801 for (i = 0; i < RFC1001_NAME_LEN; i++) {
1802 /* don't ucase netbiosname for user */
1803 if (string[i] == 0)
1804 break;
1805 vol->source_rfc1001_name[i] = string[i];
1806 }
1807 /* The string has 16th byte zero still from
1808 * set at top of the function
1809 */
1810 if (i == RFC1001_NAME_LEN && string[i] != 0)
1811 printk(KERN_WARNING "CIFS: netbiosname"
1812 " longer than 15 truncated.\n");
1813
1814 break;
1815 case Opt_servern:
1816 /* servernetbiosname specified override *SMBSERVER */
1817 string = match_strdup(args);
1818 if (string == NULL)
1819 goto out_nomem;
1820
1821 if (!*string) {
1822 printk(KERN_WARNING "CIFS: Empty server"
1823 " netbiosname specified\n");
1824 break;
1825 }
1826 /* last byte, type, is 0x20 for servr type */
1827 memset(vol->target_rfc1001_name, 0x20,
1828 RFC1001_NAME_LEN_WITH_NULL);
1829
1830 /* BB are there cases in which a comma can be
1831 valid in this workstation netbios name
1832 (and need special handling)? */
1833
1834 /* user or mount helper must uppercase the
1835 netbios name */
1836 for (i = 0; i < 15; i++) {
1837 if (string[i] == 0)
1838 break;
1839 vol->target_rfc1001_name[i] = string[i];
1840 }
1841 /* The string has 16th byte zero still from
1842 set at top of the function */
1843 if (i == RFC1001_NAME_LEN && string[i] != 0)
1844 printk(KERN_WARNING "CIFS: server net"
1845 "biosname longer than 15 truncated.\n");
1846 break;
1847 case Opt_ver:
1848 string = match_strdup(args);
1849 if (string == NULL)
1850 goto out_nomem;
1851
1852 if (!*string) {
1853 cERROR(1, "no protocol version specified"
1854 " after vers= mount option");
1855 goto cifs_parse_mount_err;
1856 }
1857
1858 if (strnicmp(string, "cifs", 4) == 0 ||
1859 strnicmp(string, "1", 1) == 0) {
1860 /* This is the default */
1861 break;
1862 }
1863 /* For all other value, error */
1864 printk(KERN_WARNING "CIFS: Invalid version"
1865 " specified\n");
1597 goto cifs_parse_mount_err; 1866 goto cifs_parse_mount_err;
1867 case Opt_sec:
1868 string = match_strdup(args);
1869 if (string == NULL)
1870 goto out_nomem;
1871
1872 if (!*string) {
1873 printk(KERN_WARNING "CIFS: no security flavor"
1874 " specified\n");
1875 break;
1876 }
1877
1878 if (cifs_parse_security_flavors(string, vol) != 0)
1879 goto cifs_parse_mount_err;
1880 break;
1881 default:
1882 /*
1883 * An option we don't recognize. Save it off for later
1884 * if we haven't already found one
1885 */
1886 if (!invalid)
1887 invalid = data;
1888 break;
1598 } 1889 }
1890 /* Free up any allocated string */
1891 kfree(string);
1892 string = NULL;
1893 }
1894
1895 if (!sloppy && invalid) {
1896 printk(KERN_ERR "CIFS: Unknown mount option \"%s\"\n", invalid);
1897 goto cifs_parse_mount_err;
1599 } 1898 }
1600 1899
1601#ifndef CONFIG_KEYS 1900#ifndef CONFIG_KEYS
@@ -1625,7 +1924,10 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
1625 kfree(mountdata_copy); 1924 kfree(mountdata_copy);
1626 return 0; 1925 return 0;
1627 1926
1927out_nomem:
1928 printk(KERN_WARNING "Could not allocate temporary buffer\n");
1628cifs_parse_mount_err: 1929cifs_parse_mount_err:
1930 kfree(string);
1629 kfree(mountdata_copy); 1931 kfree(mountdata_copy);
1630 return 1; 1932 return 1;
1631} 1933}
@@ -1977,7 +2279,7 @@ cifs_get_tcp_session(struct smb_vol *volume_info)
1977 cifs_fscache_get_client_cookie(tcp_ses); 2279 cifs_fscache_get_client_cookie(tcp_ses);
1978 2280
1979 /* queue echo request delayed work */ 2281 /* queue echo request delayed work */
1980 queue_delayed_work(system_nrt_wq, &tcp_ses->echo, SMB_ECHO_INTERVAL); 2282 queue_delayed_work(cifsiod_wq, &tcp_ses->echo, SMB_ECHO_INTERVAL);
1981 2283
1982 return tcp_ses; 2284 return tcp_ses;
1983 2285
@@ -3543,7 +3845,7 @@ remote_path_check:
3543 tlink_rb_insert(&cifs_sb->tlink_tree, tlink); 3845 tlink_rb_insert(&cifs_sb->tlink_tree, tlink);
3544 spin_unlock(&cifs_sb->tlink_tree_lock); 3846 spin_unlock(&cifs_sb->tlink_tree_lock);
3545 3847
3546 queue_delayed_work(system_nrt_wq, &cifs_sb->prune_tlinks, 3848 queue_delayed_work(cifsiod_wq, &cifs_sb->prune_tlinks,
3547 TLINK_IDLE_EXPIRE); 3849 TLINK_IDLE_EXPIRE);
3548 3850
3549mount_fail_check: 3851mount_fail_check:
@@ -4097,6 +4399,6 @@ cifs_prune_tlinks(struct work_struct *work)
4097 } 4399 }
4098 spin_unlock(&cifs_sb->tlink_tree_lock); 4400 spin_unlock(&cifs_sb->tlink_tree_lock);
4099 4401
4100 queue_delayed_work(system_nrt_wq, &cifs_sb->prune_tlinks, 4402 queue_delayed_work(cifsiod_wq, &cifs_sb->prune_tlinks,
4101 TLINK_IDLE_EXPIRE); 4403 TLINK_IDLE_EXPIRE);
4102} 4404}
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index 159fcc56dc2d..460d87b7cda0 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -1399,7 +1399,10 @@ int cifs_lock(struct file *file, int cmd, struct file_lock *flock)
1399 return rc; 1399 return rc;
1400} 1400}
1401 1401
1402/* update the file size (if needed) after a write */ 1402/*
1403 * update the file size (if needed) after a write. Should be called with
1404 * the inode->i_lock held
1405 */
1403void 1406void
1404cifs_update_eof(struct cifsInodeInfo *cifsi, loff_t offset, 1407cifs_update_eof(struct cifsInodeInfo *cifsi, loff_t offset,
1405 unsigned int bytes_written) 1408 unsigned int bytes_written)
@@ -1471,7 +1474,9 @@ static ssize_t cifs_write(struct cifsFileInfo *open_file, __u32 pid,
1471 return rc; 1474 return rc;
1472 } 1475 }
1473 } else { 1476 } else {
1477 spin_lock(&dentry->d_inode->i_lock);
1474 cifs_update_eof(cifsi, *poffset, bytes_written); 1478 cifs_update_eof(cifsi, *poffset, bytes_written);
1479 spin_unlock(&dentry->d_inode->i_lock);
1475 *poffset += bytes_written; 1480 *poffset += bytes_written;
1476 } 1481 }
1477 } 1482 }
@@ -1648,6 +1653,27 @@ static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to)
1648 return rc; 1653 return rc;
1649} 1654}
1650 1655
1656/*
1657 * Marshal up the iov array, reserving the first one for the header. Also,
1658 * set wdata->bytes.
1659 */
1660static void
1661cifs_writepages_marshal_iov(struct kvec *iov, struct cifs_writedata *wdata)
1662{
1663 int i;
1664 struct inode *inode = wdata->cfile->dentry->d_inode;
1665 loff_t size = i_size_read(inode);
1666
1667 /* marshal up the pages into iov array */
1668 wdata->bytes = 0;
1669 for (i = 0; i < wdata->nr_pages; i++) {
1670 iov[i + 1].iov_len = min(size - page_offset(wdata->pages[i]),
1671 (loff_t)PAGE_CACHE_SIZE);
1672 iov[i + 1].iov_base = kmap(wdata->pages[i]);
1673 wdata->bytes += iov[i + 1].iov_len;
1674 }
1675}
1676
1651static int cifs_writepages(struct address_space *mapping, 1677static int cifs_writepages(struct address_space *mapping,
1652 struct writeback_control *wbc) 1678 struct writeback_control *wbc)
1653{ 1679{
@@ -1684,7 +1710,8 @@ retry:
1684 tofind = min((cifs_sb->wsize / PAGE_CACHE_SIZE) - 1, 1710 tofind = min((cifs_sb->wsize / PAGE_CACHE_SIZE) - 1,
1685 end - index) + 1; 1711 end - index) + 1;
1686 1712
1687 wdata = cifs_writedata_alloc((unsigned int)tofind); 1713 wdata = cifs_writedata_alloc((unsigned int)tofind,
1714 cifs_writev_complete);
1688 if (!wdata) { 1715 if (!wdata) {
1689 rc = -ENOMEM; 1716 rc = -ENOMEM;
1690 break; 1717 break;
@@ -1791,6 +1818,7 @@ retry:
1791 wdata->sync_mode = wbc->sync_mode; 1818 wdata->sync_mode = wbc->sync_mode;
1792 wdata->nr_pages = nr_pages; 1819 wdata->nr_pages = nr_pages;
1793 wdata->offset = page_offset(wdata->pages[0]); 1820 wdata->offset = page_offset(wdata->pages[0]);
1821 wdata->marshal_iov = cifs_writepages_marshal_iov;
1794 1822
1795 do { 1823 do {
1796 if (wdata->cfile != NULL) 1824 if (wdata->cfile != NULL)
@@ -1802,6 +1830,7 @@ retry:
1802 rc = -EBADF; 1830 rc = -EBADF;
1803 break; 1831 break;
1804 } 1832 }
1833 wdata->pid = wdata->cfile->pid;
1805 rc = cifs_async_writev(wdata); 1834 rc = cifs_async_writev(wdata);
1806 } while (wbc->sync_mode == WB_SYNC_ALL && rc == -EAGAIN); 1835 } while (wbc->sync_mode == WB_SYNC_ALL && rc == -EAGAIN);
1807 1836
@@ -2043,7 +2072,7 @@ cifs_write_allocate_pages(struct page **pages, unsigned long num_pages)
2043 unsigned long i; 2072 unsigned long i;
2044 2073
2045 for (i = 0; i < num_pages; i++) { 2074 for (i = 0; i < num_pages; i++) {
2046 pages[i] = alloc_page(__GFP_HIGHMEM); 2075 pages[i] = alloc_page(GFP_KERNEL|__GFP_HIGHMEM);
2047 if (!pages[i]) { 2076 if (!pages[i]) {
2048 /* 2077 /*
2049 * save number of pages we have already allocated and 2078 * save number of pages we have already allocated and
@@ -2051,15 +2080,14 @@ cifs_write_allocate_pages(struct page **pages, unsigned long num_pages)
2051 */ 2080 */
2052 num_pages = i; 2081 num_pages = i;
2053 rc = -ENOMEM; 2082 rc = -ENOMEM;
2054 goto error; 2083 break;
2055 } 2084 }
2056 } 2085 }
2057 2086
2058 return rc; 2087 if (rc) {
2059 2088 for (i = 0; i < num_pages; i++)
2060error: 2089 put_page(pages[i]);
2061 for (i = 0; i < num_pages; i++) 2090 }
2062 put_page(pages[i]);
2063 return rc; 2091 return rc;
2064} 2092}
2065 2093
@@ -2070,9 +2098,7 @@ size_t get_numpages(const size_t wsize, const size_t len, size_t *cur_len)
2070 size_t clen; 2098 size_t clen;
2071 2099
2072 clen = min_t(const size_t, len, wsize); 2100 clen = min_t(const size_t, len, wsize);
2073 num_pages = clen / PAGE_CACHE_SIZE; 2101 num_pages = DIV_ROUND_UP(clen, PAGE_SIZE);
2074 if (clen % PAGE_CACHE_SIZE)
2075 num_pages++;
2076 2102
2077 if (cur_len) 2103 if (cur_len)
2078 *cur_len = clen; 2104 *cur_len = clen;
@@ -2080,24 +2106,79 @@ size_t get_numpages(const size_t wsize, const size_t len, size_t *cur_len)
2080 return num_pages; 2106 return num_pages;
2081} 2107}
2082 2108
2109static void
2110cifs_uncached_marshal_iov(struct kvec *iov, struct cifs_writedata *wdata)
2111{
2112 int i;
2113 size_t bytes = wdata->bytes;
2114
2115 /* marshal up the pages into iov array */
2116 for (i = 0; i < wdata->nr_pages; i++) {
2117 iov[i + 1].iov_len = min_t(size_t, bytes, PAGE_SIZE);
2118 iov[i + 1].iov_base = kmap(wdata->pages[i]);
2119 bytes -= iov[i + 1].iov_len;
2120 }
2121}
2122
2123static void
2124cifs_uncached_writev_complete(struct work_struct *work)
2125{
2126 int i;
2127 struct cifs_writedata *wdata = container_of(work,
2128 struct cifs_writedata, work);
2129 struct inode *inode = wdata->cfile->dentry->d_inode;
2130 struct cifsInodeInfo *cifsi = CIFS_I(inode);
2131
2132 spin_lock(&inode->i_lock);
2133 cifs_update_eof(cifsi, wdata->offset, wdata->bytes);
2134 if (cifsi->server_eof > inode->i_size)
2135 i_size_write(inode, cifsi->server_eof);
2136 spin_unlock(&inode->i_lock);
2137
2138 complete(&wdata->done);
2139
2140 if (wdata->result != -EAGAIN) {
2141 for (i = 0; i < wdata->nr_pages; i++)
2142 put_page(wdata->pages[i]);
2143 }
2144
2145 kref_put(&wdata->refcount, cifs_writedata_release);
2146}
2147
2148/* attempt to send write to server, retry on any -EAGAIN errors */
2149static int
2150cifs_uncached_retry_writev(struct cifs_writedata *wdata)
2151{
2152 int rc;
2153
2154 do {
2155 if (wdata->cfile->invalidHandle) {
2156 rc = cifs_reopen_file(wdata->cfile, false);
2157 if (rc != 0)
2158 continue;
2159 }
2160 rc = cifs_async_writev(wdata);
2161 } while (rc == -EAGAIN);
2162
2163 return rc;
2164}
2165
2083static ssize_t 2166static ssize_t
2084cifs_iovec_write(struct file *file, const struct iovec *iov, 2167cifs_iovec_write(struct file *file, const struct iovec *iov,
2085 unsigned long nr_segs, loff_t *poffset) 2168 unsigned long nr_segs, loff_t *poffset)
2086{ 2169{
2087 unsigned int written; 2170 unsigned long nr_pages, i;
2088 unsigned long num_pages, npages, i;
2089 size_t copied, len, cur_len; 2171 size_t copied, len, cur_len;
2090 ssize_t total_written = 0; 2172 ssize_t total_written = 0;
2091 struct kvec *to_send; 2173 loff_t offset = *poffset;
2092 struct page **pages;
2093 struct iov_iter it; 2174 struct iov_iter it;
2094 struct inode *inode;
2095 struct cifsFileInfo *open_file; 2175 struct cifsFileInfo *open_file;
2096 struct cifs_tcon *pTcon; 2176 struct cifs_tcon *tcon;
2097 struct cifs_sb_info *cifs_sb; 2177 struct cifs_sb_info *cifs_sb;
2098 struct cifs_io_parms io_parms; 2178 struct cifs_writedata *wdata, *tmp;
2099 int xid, rc; 2179 struct list_head wdata_list;
2100 __u32 pid; 2180 int rc;
2181 pid_t pid;
2101 2182
2102 len = iov_length(iov, nr_segs); 2183 len = iov_length(iov, nr_segs);
2103 if (!len) 2184 if (!len)
@@ -2107,103 +2188,103 @@ cifs_iovec_write(struct file *file, const struct iovec *iov,
2107 if (rc) 2188 if (rc)
2108 return rc; 2189 return rc;
2109 2190
2191 INIT_LIST_HEAD(&wdata_list);
2110 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb); 2192 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
2111 num_pages = get_numpages(cifs_sb->wsize, len, &cur_len);
2112
2113 pages = kmalloc(sizeof(struct pages *)*num_pages, GFP_KERNEL);
2114 if (!pages)
2115 return -ENOMEM;
2116
2117 to_send = kmalloc(sizeof(struct kvec)*(num_pages + 1), GFP_KERNEL);
2118 if (!to_send) {
2119 kfree(pages);
2120 return -ENOMEM;
2121 }
2122
2123 rc = cifs_write_allocate_pages(pages, num_pages);
2124 if (rc) {
2125 kfree(pages);
2126 kfree(to_send);
2127 return rc;
2128 }
2129
2130 xid = GetXid();
2131 open_file = file->private_data; 2193 open_file = file->private_data;
2194 tcon = tlink_tcon(open_file->tlink);
2132 2195
2133 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD) 2196 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2134 pid = open_file->pid; 2197 pid = open_file->pid;
2135 else 2198 else
2136 pid = current->tgid; 2199 pid = current->tgid;
2137 2200
2138 pTcon = tlink_tcon(open_file->tlink);
2139 inode = file->f_path.dentry->d_inode;
2140
2141 iov_iter_init(&it, iov, nr_segs, len, 0); 2201 iov_iter_init(&it, iov, nr_segs, len, 0);
2142 npages = num_pages;
2143
2144 do { 2202 do {
2145 size_t save_len = cur_len; 2203 size_t save_len;
2146 for (i = 0; i < npages; i++) { 2204
2147 copied = min_t(const size_t, cur_len, PAGE_CACHE_SIZE); 2205 nr_pages = get_numpages(cifs_sb->wsize, len, &cur_len);
2148 copied = iov_iter_copy_from_user(pages[i], &it, 0, 2206 wdata = cifs_writedata_alloc(nr_pages,
2149 copied); 2207 cifs_uncached_writev_complete);
2208 if (!wdata) {
2209 rc = -ENOMEM;
2210 break;
2211 }
2212
2213 rc = cifs_write_allocate_pages(wdata->pages, nr_pages);
2214 if (rc) {
2215 kfree(wdata);
2216 break;
2217 }
2218
2219 save_len = cur_len;
2220 for (i = 0; i < nr_pages; i++) {
2221 copied = min_t(const size_t, cur_len, PAGE_SIZE);
2222 copied = iov_iter_copy_from_user(wdata->pages[i], &it,
2223 0, copied);
2150 cur_len -= copied; 2224 cur_len -= copied;
2151 iov_iter_advance(&it, copied); 2225 iov_iter_advance(&it, copied);
2152 to_send[i+1].iov_base = kmap(pages[i]);
2153 to_send[i+1].iov_len = copied;
2154 } 2226 }
2155
2156 cur_len = save_len - cur_len; 2227 cur_len = save_len - cur_len;
2157 2228
2158 do { 2229 wdata->sync_mode = WB_SYNC_ALL;
2159 if (open_file->invalidHandle) { 2230 wdata->nr_pages = nr_pages;
2160 rc = cifs_reopen_file(open_file, false); 2231 wdata->offset = (__u64)offset;
2161 if (rc != 0) 2232 wdata->cfile = cifsFileInfo_get(open_file);
2162 break; 2233 wdata->pid = pid;
2163 } 2234 wdata->bytes = cur_len;
2164 io_parms.netfid = open_file->netfid; 2235 wdata->marshal_iov = cifs_uncached_marshal_iov;
2165 io_parms.pid = pid; 2236 rc = cifs_uncached_retry_writev(wdata);
2166 io_parms.tcon = pTcon; 2237 if (rc) {
2167 io_parms.offset = *poffset; 2238 kref_put(&wdata->refcount, cifs_writedata_release);
2168 io_parms.length = cur_len;
2169 rc = CIFSSMBWrite2(xid, &io_parms, &written, to_send,
2170 npages, 0);
2171 } while (rc == -EAGAIN);
2172
2173 for (i = 0; i < npages; i++)
2174 kunmap(pages[i]);
2175
2176 if (written) {
2177 len -= written;
2178 total_written += written;
2179 cifs_update_eof(CIFS_I(inode), *poffset, written);
2180 *poffset += written;
2181 } else if (rc < 0) {
2182 if (!total_written)
2183 total_written = rc;
2184 break; 2239 break;
2185 } 2240 }
2186 2241
2187 /* get length and number of kvecs of the next write */ 2242 list_add_tail(&wdata->list, &wdata_list);
2188 npages = get_numpages(cifs_sb->wsize, len, &cur_len); 2243 offset += cur_len;
2244 len -= cur_len;
2189 } while (len > 0); 2245 } while (len > 0);
2190 2246
2191 if (total_written > 0) { 2247 /*
2192 spin_lock(&inode->i_lock); 2248 * If at least one write was successfully sent, then discard any rc
2193 if (*poffset > inode->i_size) 2249 * value from the later writes. If the other write succeeds, then
2194 i_size_write(inode, *poffset); 2250 * we'll end up returning whatever was written. If it fails, then
2195 spin_unlock(&inode->i_lock); 2251 * we'll get a new rc value from that.
2252 */
2253 if (!list_empty(&wdata_list))
2254 rc = 0;
2255
2256 /*
2257 * Wait for and collect replies for any successful sends in order of
2258 * increasing offset. Once an error is hit or we get a fatal signal
2259 * while waiting, then return without waiting for any more replies.
2260 */
2261restart_loop:
2262 list_for_each_entry_safe(wdata, tmp, &wdata_list, list) {
2263 if (!rc) {
2264 /* FIXME: freezable too? */
2265 rc = wait_for_completion_killable(&wdata->done);
2266 if (rc)
2267 rc = -EINTR;
2268 else if (wdata->result)
2269 rc = wdata->result;
2270 else
2271 total_written += wdata->bytes;
2272
2273 /* resend call if it's a retryable error */
2274 if (rc == -EAGAIN) {
2275 rc = cifs_uncached_retry_writev(wdata);
2276 goto restart_loop;
2277 }
2278 }
2279 list_del_init(&wdata->list);
2280 kref_put(&wdata->refcount, cifs_writedata_release);
2196 } 2281 }
2197 2282
2198 cifs_stats_bytes_written(pTcon, total_written); 2283 if (total_written > 0)
2199 mark_inode_dirty_sync(inode); 2284 *poffset += total_written;
2200 2285
2201 for (i = 0; i < num_pages; i++) 2286 cifs_stats_bytes_written(tcon, total_written);
2202 put_page(pages[i]); 2287 return total_written ? total_written : (ssize_t)rc;
2203 kfree(to_send);
2204 kfree(pages);
2205 FreeXid(xid);
2206 return total_written;
2207} 2288}
2208 2289
2209ssize_t cifs_user_writev(struct kiocb *iocb, const struct iovec *iov, 2290ssize_t cifs_user_writev(struct kiocb *iocb, const struct iovec *iov,
diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
index c273c12de98e..c29d1aa2c54f 100644
--- a/fs/cifs/misc.c
+++ b/fs/cifs/misc.c
@@ -213,55 +213,62 @@ cifs_small_buf_release(void *buf_to_free)
213} 213}
214 214
215/* 215/*
216 Find a free multiplex id (SMB mid). Otherwise there could be 216 * Find a free multiplex id (SMB mid). Otherwise there could be
217 mid collisions which might cause problems, demultiplexing the 217 * mid collisions which might cause problems, demultiplexing the
218 wrong response to this request. Multiplex ids could collide if 218 * wrong response to this request. Multiplex ids could collide if
219 one of a series requests takes much longer than the others, or 219 * one of a series requests takes much longer than the others, or
220 if a very large number of long lived requests (byte range 220 * if a very large number of long lived requests (byte range
221 locks or FindNotify requests) are pending. No more than 221 * locks or FindNotify requests) are pending. No more than
222 64K-1 requests can be outstanding at one time. If no 222 * 64K-1 requests can be outstanding at one time. If no
223 mids are available, return zero. A future optimization 223 * mids are available, return zero. A future optimization
224 could make the combination of mids and uid the key we use 224 * could make the combination of mids and uid the key we use
225 to demultiplex on (rather than mid alone). 225 * to demultiplex on (rather than mid alone).
226 In addition to the above check, the cifs demultiplex 226 * In addition to the above check, the cifs demultiplex
227 code already used the command code as a secondary 227 * code already used the command code as a secondary
228 check of the frame and if signing is negotiated the 228 * check of the frame and if signing is negotiated the
229 response would be discarded if the mid were the same 229 * response would be discarded if the mid were the same
230 but the signature was wrong. Since the mid is not put in the 230 * but the signature was wrong. Since the mid is not put in the
231 pending queue until later (when it is about to be dispatched) 231 * pending queue until later (when it is about to be dispatched)
232 we do have to limit the number of outstanding requests 232 * we do have to limit the number of outstanding requests
233 to somewhat less than 64K-1 although it is hard to imagine 233 * to somewhat less than 64K-1 although it is hard to imagine
234 so many threads being in the vfs at one time. 234 * so many threads being in the vfs at one time.
235*/ 235 */
236__u16 GetNextMid(struct TCP_Server_Info *server) 236__u64 GetNextMid(struct TCP_Server_Info *server)
237{ 237{
238 __u16 mid = 0; 238 __u64 mid = 0;
239 __u16 last_mid; 239 __u16 last_mid, cur_mid;
240 bool collision; 240 bool collision;
241 241
242 spin_lock(&GlobalMid_Lock); 242 spin_lock(&GlobalMid_Lock);
243 last_mid = server->CurrentMid; /* we do not want to loop forever */ 243
244 server->CurrentMid++; 244 /* mid is 16 bit only for CIFS/SMB */
245 /* This nested loop looks more expensive than it is. 245 cur_mid = (__u16)((server->CurrentMid) & 0xffff);
246 In practice the list of pending requests is short, 246 /* we do not want to loop forever */
247 fewer than 50, and the mids are likely to be unique 247 last_mid = cur_mid;
248 on the first pass through the loop unless some request 248 cur_mid++;
249 takes longer than the 64 thousand requests before it 249
250 (and it would also have to have been a request that 250 /*
251 did not time out) */ 251 * This nested loop looks more expensive than it is.
252 while (server->CurrentMid != last_mid) { 252 * In practice the list of pending requests is short,
253 * fewer than 50, and the mids are likely to be unique
254 * on the first pass through the loop unless some request
255 * takes longer than the 64 thousand requests before it
256 * (and it would also have to have been a request that
257 * did not time out).
258 */
259 while (cur_mid != last_mid) {
253 struct mid_q_entry *mid_entry; 260 struct mid_q_entry *mid_entry;
254 unsigned int num_mids; 261 unsigned int num_mids;
255 262
256 collision = false; 263 collision = false;
257 if (server->CurrentMid == 0) 264 if (cur_mid == 0)
258 server->CurrentMid++; 265 cur_mid++;
259 266
260 num_mids = 0; 267 num_mids = 0;
261 list_for_each_entry(mid_entry, &server->pending_mid_q, qhead) { 268 list_for_each_entry(mid_entry, &server->pending_mid_q, qhead) {
262 ++num_mids; 269 ++num_mids;
263 if (mid_entry->mid == server->CurrentMid && 270 if (mid_entry->mid == cur_mid &&
264 mid_entry->midState == MID_REQUEST_SUBMITTED) { 271 mid_entry->mid_state == MID_REQUEST_SUBMITTED) {
265 /* This mid is in use, try a different one */ 272 /* This mid is in use, try a different one */
266 collision = true; 273 collision = true;
267 break; 274 break;
@@ -282,10 +289,11 @@ __u16 GetNextMid(struct TCP_Server_Info *server)
282 server->tcpStatus = CifsNeedReconnect; 289 server->tcpStatus = CifsNeedReconnect;
283 290
284 if (!collision) { 291 if (!collision) {
285 mid = server->CurrentMid; 292 mid = (__u64)cur_mid;
293 server->CurrentMid = mid;
286 break; 294 break;
287 } 295 }
288 server->CurrentMid++; 296 cur_mid++;
289 } 297 }
290 spin_unlock(&GlobalMid_Lock); 298 spin_unlock(&GlobalMid_Lock);
291 return mid; 299 return mid;
@@ -420,8 +428,10 @@ check_smb_hdr(struct smb_hdr *smb, __u16 mid)
420} 428}
421 429
422int 430int
423checkSMB(struct smb_hdr *smb, __u16 mid, unsigned int total_read) 431checkSMB(char *buf, unsigned int total_read)
424{ 432{
433 struct smb_hdr *smb = (struct smb_hdr *)buf;
434 __u16 mid = smb->Mid;
425 __u32 rfclen = be32_to_cpu(smb->smb_buf_length); 435 __u32 rfclen = be32_to_cpu(smb->smb_buf_length);
426 __u32 clc_len; /* calculated length */ 436 __u32 clc_len; /* calculated length */
427 cFYI(0, "checkSMB Length: 0x%x, smb_buf_length: 0x%x", 437 cFYI(0, "checkSMB Length: 0x%x, smb_buf_length: 0x%x",
@@ -502,8 +512,9 @@ checkSMB(struct smb_hdr *smb, __u16 mid, unsigned int total_read)
502} 512}
503 513
504bool 514bool
505is_valid_oplock_break(struct smb_hdr *buf, struct TCP_Server_Info *srv) 515is_valid_oplock_break(char *buffer, struct TCP_Server_Info *srv)
506{ 516{
517 struct smb_hdr *buf = (struct smb_hdr *)buffer;
507 struct smb_com_lock_req *pSMB = (struct smb_com_lock_req *)buf; 518 struct smb_com_lock_req *pSMB = (struct smb_com_lock_req *)buf;
508 struct list_head *tmp, *tmp1, *tmp2; 519 struct list_head *tmp, *tmp1, *tmp2;
509 struct cifs_ses *ses; 520 struct cifs_ses *ses;
@@ -584,7 +595,7 @@ is_valid_oplock_break(struct smb_hdr *buf, struct TCP_Server_Info *srv)
584 595
585 cifs_set_oplock_level(pCifsInode, 596 cifs_set_oplock_level(pCifsInode,
586 pSMB->OplockLevel ? OPLOCK_READ : 0); 597 pSMB->OplockLevel ? OPLOCK_READ : 0);
587 queue_work(system_nrt_wq, 598 queue_work(cifsiod_wq,
588 &netfile->oplock_break); 599 &netfile->oplock_break);
589 netfile->oplock_break_cancelled = false; 600 netfile->oplock_break_cancelled = false;
590 601
@@ -604,16 +615,15 @@ is_valid_oplock_break(struct smb_hdr *buf, struct TCP_Server_Info *srv)
604} 615}
605 616
606void 617void
607dump_smb(struct smb_hdr *smb_buf, int smb_buf_length) 618dump_smb(void *buf, int smb_buf_length)
608{ 619{
609 int i, j; 620 int i, j;
610 char debug_line[17]; 621 char debug_line[17];
611 unsigned char *buffer; 622 unsigned char *buffer = buf;
612 623
613 if (traceSMB == 0) 624 if (traceSMB == 0)
614 return; 625 return;
615 626
616 buffer = (unsigned char *) smb_buf;
617 for (i = 0, j = 0; i < smb_buf_length; i++, j++) { 627 for (i = 0, j = 0; i < smb_buf_length; i++, j++) {
618 if (i % 8 == 0) { 628 if (i % 8 == 0) {
619 /* have reached the beginning of line */ 629 /* have reached the beginning of line */
diff --git a/fs/cifs/netmisc.c b/fs/cifs/netmisc.c
index 73e47e84b61a..dd23a321bdda 100644
--- a/fs/cifs/netmisc.c
+++ b/fs/cifs/netmisc.c
@@ -836,8 +836,9 @@ ntstatus_to_dos(__u32 ntstatus, __u8 *eclass, __u16 *ecode)
836} 836}
837 837
838int 838int
839map_smb_to_linux_error(struct smb_hdr *smb, bool logErr) 839map_smb_to_linux_error(char *buf, bool logErr)
840{ 840{
841 struct smb_hdr *smb = (struct smb_hdr *)buf;
841 unsigned int i; 842 unsigned int i;
842 int rc = -EIO; /* if transport error smb error may not be set */ 843 int rc = -EIO; /* if transport error smb error may not be set */
843 __u8 smberrclass; 844 __u8 smberrclass;
diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c
index 310918b6fcb4..0961336513d5 100644
--- a/fs/cifs/transport.c
+++ b/fs/cifs/transport.c
@@ -60,8 +60,8 @@ AllocMidQEntry(const struct smb_hdr *smb_buffer, struct TCP_Server_Info *server)
60 memset(temp, 0, sizeof(struct mid_q_entry)); 60 memset(temp, 0, sizeof(struct mid_q_entry));
61 temp->mid = smb_buffer->Mid; /* always LE */ 61 temp->mid = smb_buffer->Mid; /* always LE */
62 temp->pid = current->pid; 62 temp->pid = current->pid;
63 temp->command = smb_buffer->Command; 63 temp->command = cpu_to_le16(smb_buffer->Command);
64 cFYI(1, "For smb_command %d", temp->command); 64 cFYI(1, "For smb_command %d", smb_buffer->Command);
65 /* do_gettimeofday(&temp->when_sent);*/ /* easier to use jiffies */ 65 /* do_gettimeofday(&temp->when_sent);*/ /* easier to use jiffies */
66 /* when mid allocated can be before when sent */ 66 /* when mid allocated can be before when sent */
67 temp->when_alloc = jiffies; 67 temp->when_alloc = jiffies;
@@ -75,7 +75,7 @@ AllocMidQEntry(const struct smb_hdr *smb_buffer, struct TCP_Server_Info *server)
75 } 75 }
76 76
77 atomic_inc(&midCount); 77 atomic_inc(&midCount);
78 temp->midState = MID_REQUEST_ALLOCATED; 78 temp->mid_state = MID_REQUEST_ALLOCATED;
79 return temp; 79 return temp;
80} 80}
81 81
@@ -85,9 +85,9 @@ DeleteMidQEntry(struct mid_q_entry *midEntry)
85#ifdef CONFIG_CIFS_STATS2 85#ifdef CONFIG_CIFS_STATS2
86 unsigned long now; 86 unsigned long now;
87#endif 87#endif
88 midEntry->midState = MID_FREE; 88 midEntry->mid_state = MID_FREE;
89 atomic_dec(&midCount); 89 atomic_dec(&midCount);
90 if (midEntry->largeBuf) 90 if (midEntry->large_buf)
91 cifs_buf_release(midEntry->resp_buf); 91 cifs_buf_release(midEntry->resp_buf);
92 else 92 else
93 cifs_small_buf_release(midEntry->resp_buf); 93 cifs_small_buf_release(midEntry->resp_buf);
@@ -97,8 +97,8 @@ DeleteMidQEntry(struct mid_q_entry *midEntry)
97 something is wrong, unless it is quite a slow link or server */ 97 something is wrong, unless it is quite a slow link or server */
98 if ((now - midEntry->when_alloc) > HZ) { 98 if ((now - midEntry->when_alloc) > HZ) {
99 if ((cifsFYI & CIFS_TIMER) && 99 if ((cifsFYI & CIFS_TIMER) &&
100 (midEntry->command != SMB_COM_LOCKING_ANDX)) { 100 (midEntry->command != cpu_to_le16(SMB_COM_LOCKING_ANDX))) {
101 printk(KERN_DEBUG " CIFS slow rsp: cmd %d mid %d", 101 printk(KERN_DEBUG " CIFS slow rsp: cmd %d mid %llu",
102 midEntry->command, midEntry->mid); 102 midEntry->command, midEntry->mid);
103 printk(" A: 0x%lx S: 0x%lx R: 0x%lx\n", 103 printk(" A: 0x%lx S: 0x%lx R: 0x%lx\n",
104 now - midEntry->when_alloc, 104 now - midEntry->when_alloc,
@@ -126,11 +126,11 @@ smb_sendv(struct TCP_Server_Info *server, struct kvec *iov, int n_vec)
126 int rc = 0; 126 int rc = 0;
127 int i = 0; 127 int i = 0;
128 struct msghdr smb_msg; 128 struct msghdr smb_msg;
129 struct smb_hdr *smb_buffer = iov[0].iov_base; 129 __be32 *buf_len = (__be32 *)(iov[0].iov_base);
130 unsigned int len = iov[0].iov_len; 130 unsigned int len = iov[0].iov_len;
131 unsigned int total_len; 131 unsigned int total_len;
132 int first_vec = 0; 132 int first_vec = 0;
133 unsigned int smb_buf_length = be32_to_cpu(smb_buffer->smb_buf_length); 133 unsigned int smb_buf_length = get_rfc1002_length(iov[0].iov_base);
134 struct socket *ssocket = server->ssocket; 134 struct socket *ssocket = server->ssocket;
135 135
136 if (ssocket == NULL) 136 if (ssocket == NULL)
@@ -150,7 +150,7 @@ smb_sendv(struct TCP_Server_Info *server, struct kvec *iov, int n_vec)
150 total_len += iov[i].iov_len; 150 total_len += iov[i].iov_len;
151 151
152 cFYI(1, "Sending smb: total_len %d", total_len); 152 cFYI(1, "Sending smb: total_len %d", total_len);
153 dump_smb(smb_buffer, len); 153 dump_smb(iov[0].iov_base, len);
154 154
155 i = 0; 155 i = 0;
156 while (total_len) { 156 while (total_len) {
@@ -158,24 +158,24 @@ smb_sendv(struct TCP_Server_Info *server, struct kvec *iov, int n_vec)
158 n_vec - first_vec, total_len); 158 n_vec - first_vec, total_len);
159 if ((rc == -ENOSPC) || (rc == -EAGAIN)) { 159 if ((rc == -ENOSPC) || (rc == -EAGAIN)) {
160 i++; 160 i++;
161 /* if blocking send we try 3 times, since each can block 161 /*
162 for 5 seconds. For nonblocking we have to try more 162 * If blocking send we try 3 times, since each can block
163 but wait increasing amounts of time allowing time for 163 * for 5 seconds. For nonblocking we have to try more
164 socket to clear. The overall time we wait in either 164 * but wait increasing amounts of time allowing time for
165 case to send on the socket is about 15 seconds. 165 * socket to clear. The overall time we wait in either
166 Similarly we wait for 15 seconds for 166 * case to send on the socket is about 15 seconds.
167 a response from the server in SendReceive[2] 167 * Similarly we wait for 15 seconds for a response from
168 for the server to send a response back for 168 * the server in SendReceive[2] for the server to send
169 most types of requests (except SMB Write 169 * a response back for most types of requests (except
170 past end of file which can be slow, and 170 * SMB Write past end of file which can be slow, and
171 blocking lock operations). NFS waits slightly longer 171 * blocking lock operations). NFS waits slightly longer
172 than CIFS, but this can make it take longer for 172 * than CIFS, but this can make it take longer for
173 nonresponsive servers to be detected and 15 seconds 173 * nonresponsive servers to be detected and 15 seconds
174 is more than enough time for modern networks to 174 * is more than enough time for modern networks to
175 send a packet. In most cases if we fail to send 175 * send a packet. In most cases if we fail to send
176 after the retries we will kill the socket and 176 * after the retries we will kill the socket and
177 reconnect which may clear the network problem. 177 * reconnect which may clear the network problem.
178 */ 178 */
179 if ((i >= 14) || (!server->noblocksnd && (i > 2))) { 179 if ((i >= 14) || (!server->noblocksnd && (i > 2))) {
180 cERROR(1, "sends on sock %p stuck for 15 seconds", 180 cERROR(1, "sends on sock %p stuck for 15 seconds",
181 ssocket); 181 ssocket);
@@ -235,9 +235,8 @@ smb_sendv(struct TCP_Server_Info *server, struct kvec *iov, int n_vec)
235 else 235 else
236 rc = 0; 236 rc = 0;
237 237
238 /* Don't want to modify the buffer as a 238 /* Don't want to modify the buffer as a side effect of this call. */
239 side effect of this call. */ 239 *buf_len = cpu_to_be32(smb_buf_length);
240 smb_buffer->smb_buf_length = cpu_to_be32(smb_buf_length);
241 240
242 return rc; 241 return rc;
243} 242}
@@ -342,13 +341,40 @@ wait_for_response(struct TCP_Server_Info *server, struct mid_q_entry *midQ)
342 int error; 341 int error;
343 342
344 error = wait_event_freezekillable(server->response_q, 343 error = wait_event_freezekillable(server->response_q,
345 midQ->midState != MID_REQUEST_SUBMITTED); 344 midQ->mid_state != MID_REQUEST_SUBMITTED);
346 if (error < 0) 345 if (error < 0)
347 return -ERESTARTSYS; 346 return -ERESTARTSYS;
348 347
349 return 0; 348 return 0;
350} 349}
351 350
351static int
352cifs_setup_async_request(struct TCP_Server_Info *server, struct kvec *iov,
353 unsigned int nvec, struct mid_q_entry **ret_mid)
354{
355 int rc;
356 struct smb_hdr *hdr = (struct smb_hdr *)iov[0].iov_base;
357 struct mid_q_entry *mid;
358
359 /* enable signing if server requires it */
360 if (server->sec_mode & (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))
361 hdr->Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
362
363 mid = AllocMidQEntry(hdr, server);
364 if (mid == NULL)
365 return -ENOMEM;
366
367 /* put it on the pending_mid_q */
368 spin_lock(&GlobalMid_Lock);
369 list_add_tail(&mid->qhead, &server->pending_mid_q);
370 spin_unlock(&GlobalMid_Lock);
371
372 rc = cifs_sign_smb2(iov, nvec, server, &mid->sequence_number);
373 if (rc)
374 delete_mid(mid);
375 *ret_mid = mid;
376 return rc;
377}
352 378
353/* 379/*
354 * Send a SMB request and set the callback function in the mid to handle 380 * Send a SMB request and set the callback function in the mid to handle
@@ -361,40 +387,24 @@ cifs_call_async(struct TCP_Server_Info *server, struct kvec *iov,
361{ 387{
362 int rc; 388 int rc;
363 struct mid_q_entry *mid; 389 struct mid_q_entry *mid;
364 struct smb_hdr *hdr = (struct smb_hdr *)iov[0].iov_base;
365 390
366 rc = wait_for_free_request(server, ignore_pend ? CIFS_ASYNC_OP : 0); 391 rc = wait_for_free_request(server, ignore_pend ? CIFS_ASYNC_OP : 0);
367 if (rc) 392 if (rc)
368 return rc; 393 return rc;
369 394
370 /* enable signing if server requires it */
371 if (server->sec_mode & (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))
372 hdr->Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
373
374 mutex_lock(&server->srv_mutex); 395 mutex_lock(&server->srv_mutex);
375 mid = AllocMidQEntry(hdr, server); 396 rc = cifs_setup_async_request(server, iov, nvec, &mid);
376 if (mid == NULL) { 397 if (rc) {
377 mutex_unlock(&server->srv_mutex); 398 mutex_unlock(&server->srv_mutex);
378 cifs_add_credits(server, 1); 399 cifs_add_credits(server, 1);
379 wake_up(&server->request_q); 400 wake_up(&server->request_q);
380 return -ENOMEM; 401 return rc;
381 }
382
383 /* put it on the pending_mid_q */
384 spin_lock(&GlobalMid_Lock);
385 list_add_tail(&mid->qhead, &server->pending_mid_q);
386 spin_unlock(&GlobalMid_Lock);
387
388 rc = cifs_sign_smb2(iov, nvec, server, &mid->sequence_number);
389 if (rc) {
390 mutex_unlock(&server->srv_mutex);
391 goto out_err;
392 } 402 }
393 403
394 mid->receive = receive; 404 mid->receive = receive;
395 mid->callback = callback; 405 mid->callback = callback;
396 mid->callback_data = cbdata; 406 mid->callback_data = cbdata;
397 mid->midState = MID_REQUEST_SUBMITTED; 407 mid->mid_state = MID_REQUEST_SUBMITTED;
398 408
399 cifs_in_send_inc(server); 409 cifs_in_send_inc(server);
400 rc = smb_sendv(server, iov, nvec); 410 rc = smb_sendv(server, iov, nvec);
@@ -424,14 +434,14 @@ out_err:
424 */ 434 */
425int 435int
426SendReceiveNoRsp(const unsigned int xid, struct cifs_ses *ses, 436SendReceiveNoRsp(const unsigned int xid, struct cifs_ses *ses,
427 struct smb_hdr *in_buf, int flags) 437 char *in_buf, int flags)
428{ 438{
429 int rc; 439 int rc;
430 struct kvec iov[1]; 440 struct kvec iov[1];
431 int resp_buf_type; 441 int resp_buf_type;
432 442
433 iov[0].iov_base = (char *)in_buf; 443 iov[0].iov_base = in_buf;
434 iov[0].iov_len = be32_to_cpu(in_buf->smb_buf_length) + 4; 444 iov[0].iov_len = get_rfc1002_length(in_buf) + 4;
435 flags |= CIFS_NO_RESP; 445 flags |= CIFS_NO_RESP;
436 rc = SendReceive2(xid, ses, iov, 1, &resp_buf_type, flags); 446 rc = SendReceive2(xid, ses, iov, 1, &resp_buf_type, flags);
437 cFYI(DBG2, "SendRcvNoRsp flags %d rc %d", flags, rc); 447 cFYI(DBG2, "SendRcvNoRsp flags %d rc %d", flags, rc);
@@ -444,11 +454,11 @@ cifs_sync_mid_result(struct mid_q_entry *mid, struct TCP_Server_Info *server)
444{ 454{
445 int rc = 0; 455 int rc = 0;
446 456
447 cFYI(1, "%s: cmd=%d mid=%d state=%d", __func__, mid->command, 457 cFYI(1, "%s: cmd=%d mid=%llu state=%d", __func__,
448 mid->mid, mid->midState); 458 le16_to_cpu(mid->command), mid->mid, mid->mid_state);
449 459
450 spin_lock(&GlobalMid_Lock); 460 spin_lock(&GlobalMid_Lock);
451 switch (mid->midState) { 461 switch (mid->mid_state) {
452 case MID_RESPONSE_RECEIVED: 462 case MID_RESPONSE_RECEIVED:
453 spin_unlock(&GlobalMid_Lock); 463 spin_unlock(&GlobalMid_Lock);
454 return rc; 464 return rc;
@@ -463,8 +473,8 @@ cifs_sync_mid_result(struct mid_q_entry *mid, struct TCP_Server_Info *server)
463 break; 473 break;
464 default: 474 default:
465 list_del_init(&mid->qhead); 475 list_del_init(&mid->qhead);
466 cERROR(1, "%s: invalid mid state mid=%d state=%d", __func__, 476 cERROR(1, "%s: invalid mid state mid=%llu state=%d", __func__,
467 mid->mid, mid->midState); 477 mid->mid, mid->mid_state);
468 rc = -EIO; 478 rc = -EIO;
469 } 479 }
470 spin_unlock(&GlobalMid_Lock); 480 spin_unlock(&GlobalMid_Lock);
@@ -514,7 +524,7 @@ int
514cifs_check_receive(struct mid_q_entry *mid, struct TCP_Server_Info *server, 524cifs_check_receive(struct mid_q_entry *mid, struct TCP_Server_Info *server,
515 bool log_error) 525 bool log_error)
516{ 526{
517 unsigned int len = be32_to_cpu(mid->resp_buf->smb_buf_length) + 4; 527 unsigned int len = get_rfc1002_length(mid->resp_buf) + 4;
518 528
519 dump_smb(mid->resp_buf, min_t(u32, 92, len)); 529 dump_smb(mid->resp_buf, min_t(u32, 92, len));
520 530
@@ -534,6 +544,24 @@ cifs_check_receive(struct mid_q_entry *mid, struct TCP_Server_Info *server,
534 return map_smb_to_linux_error(mid->resp_buf, log_error); 544 return map_smb_to_linux_error(mid->resp_buf, log_error);
535} 545}
536 546
547static int
548cifs_setup_request(struct cifs_ses *ses, struct kvec *iov,
549 unsigned int nvec, struct mid_q_entry **ret_mid)
550{
551 int rc;
552 struct smb_hdr *hdr = (struct smb_hdr *)iov[0].iov_base;
553 struct mid_q_entry *mid;
554
555 rc = allocate_mid(ses, hdr, &mid);
556 if (rc)
557 return rc;
558 rc = cifs_sign_smb2(iov, nvec, ses->server, &mid->sequence_number);
559 if (rc)
560 delete_mid(mid);
561 *ret_mid = mid;
562 return rc;
563}
564
537int 565int
538SendReceive2(const unsigned int xid, struct cifs_ses *ses, 566SendReceive2(const unsigned int xid, struct cifs_ses *ses,
539 struct kvec *iov, int n_vec, int *pRespBufType /* ret */, 567 struct kvec *iov, int n_vec, int *pRespBufType /* ret */,
@@ -542,55 +570,53 @@ SendReceive2(const unsigned int xid, struct cifs_ses *ses,
542 int rc = 0; 570 int rc = 0;
543 int long_op; 571 int long_op;
544 struct mid_q_entry *midQ; 572 struct mid_q_entry *midQ;
545 struct smb_hdr *in_buf = iov[0].iov_base; 573 char *buf = iov[0].iov_base;
546 574
547 long_op = flags & CIFS_TIMEOUT_MASK; 575 long_op = flags & CIFS_TIMEOUT_MASK;
548 576
549 *pRespBufType = CIFS_NO_BUFFER; /* no response buf yet */ 577 *pRespBufType = CIFS_NO_BUFFER; /* no response buf yet */
550 578
551 if ((ses == NULL) || (ses->server == NULL)) { 579 if ((ses == NULL) || (ses->server == NULL)) {
552 cifs_small_buf_release(in_buf); 580 cifs_small_buf_release(buf);
553 cERROR(1, "Null session"); 581 cERROR(1, "Null session");
554 return -EIO; 582 return -EIO;
555 } 583 }
556 584
557 if (ses->server->tcpStatus == CifsExiting) { 585 if (ses->server->tcpStatus == CifsExiting) {
558 cifs_small_buf_release(in_buf); 586 cifs_small_buf_release(buf);
559 return -ENOENT; 587 return -ENOENT;
560 } 588 }
561 589
562 /* Ensure that we do not send more than 50 overlapping requests 590 /*
563 to the same server. We may make this configurable later or 591 * Ensure that we do not send more than 50 overlapping requests
564 use ses->maxReq */ 592 * to the same server. We may make this configurable later or
593 * use ses->maxReq.
594 */
565 595
566 rc = wait_for_free_request(ses->server, long_op); 596 rc = wait_for_free_request(ses->server, long_op);
567 if (rc) { 597 if (rc) {
568 cifs_small_buf_release(in_buf); 598 cifs_small_buf_release(buf);
569 return rc; 599 return rc;
570 } 600 }
571 601
572 /* make sure that we sign in the same order that we send on this socket 602 /*
573 and avoid races inside tcp sendmsg code that could cause corruption 603 * Make sure that we sign in the same order that we send on this socket
574 of smb data */ 604 * and avoid races inside tcp sendmsg code that could cause corruption
605 * of smb data.
606 */
575 607
576 mutex_lock(&ses->server->srv_mutex); 608 mutex_lock(&ses->server->srv_mutex);
577 609
578 rc = allocate_mid(ses, in_buf, &midQ); 610 rc = cifs_setup_request(ses, iov, n_vec, &midQ);
579 if (rc) { 611 if (rc) {
580 mutex_unlock(&ses->server->srv_mutex); 612 mutex_unlock(&ses->server->srv_mutex);
581 cifs_small_buf_release(in_buf); 613 cifs_small_buf_release(buf);
582 /* Update # of requests on wire to server */ 614 /* Update # of requests on wire to server */
583 cifs_add_credits(ses->server, 1); 615 cifs_add_credits(ses->server, 1);
584 return rc; 616 return rc;
585 } 617 }
586 rc = cifs_sign_smb2(iov, n_vec, ses->server, &midQ->sequence_number);
587 if (rc) {
588 mutex_unlock(&ses->server->srv_mutex);
589 cifs_small_buf_release(in_buf);
590 goto out;
591 }
592 618
593 midQ->midState = MID_REQUEST_SUBMITTED; 619 midQ->mid_state = MID_REQUEST_SUBMITTED;
594 cifs_in_send_inc(ses->server); 620 cifs_in_send_inc(ses->server);
595 rc = smb_sendv(ses->server, iov, n_vec); 621 rc = smb_sendv(ses->server, iov, n_vec);
596 cifs_in_send_dec(ses->server); 622 cifs_in_send_dec(ses->server);
@@ -599,30 +625,30 @@ SendReceive2(const unsigned int xid, struct cifs_ses *ses,
599 mutex_unlock(&ses->server->srv_mutex); 625 mutex_unlock(&ses->server->srv_mutex);
600 626
601 if (rc < 0) { 627 if (rc < 0) {
602 cifs_small_buf_release(in_buf); 628 cifs_small_buf_release(buf);
603 goto out; 629 goto out;
604 } 630 }
605 631
606 if (long_op == CIFS_ASYNC_OP) { 632 if (long_op == CIFS_ASYNC_OP) {
607 cifs_small_buf_release(in_buf); 633 cifs_small_buf_release(buf);
608 goto out; 634 goto out;
609 } 635 }
610 636
611 rc = wait_for_response(ses->server, midQ); 637 rc = wait_for_response(ses->server, midQ);
612 if (rc != 0) { 638 if (rc != 0) {
613 send_nt_cancel(ses->server, in_buf, midQ); 639 send_nt_cancel(ses->server, (struct smb_hdr *)buf, midQ);
614 spin_lock(&GlobalMid_Lock); 640 spin_lock(&GlobalMid_Lock);
615 if (midQ->midState == MID_REQUEST_SUBMITTED) { 641 if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
616 midQ->callback = DeleteMidQEntry; 642 midQ->callback = DeleteMidQEntry;
617 spin_unlock(&GlobalMid_Lock); 643 spin_unlock(&GlobalMid_Lock);
618 cifs_small_buf_release(in_buf); 644 cifs_small_buf_release(buf);
619 cifs_add_credits(ses->server, 1); 645 cifs_add_credits(ses->server, 1);
620 return rc; 646 return rc;
621 } 647 }
622 spin_unlock(&GlobalMid_Lock); 648 spin_unlock(&GlobalMid_Lock);
623 } 649 }
624 650
625 cifs_small_buf_release(in_buf); 651 cifs_small_buf_release(buf);
626 652
627 rc = cifs_sync_mid_result(midQ, ses->server); 653 rc = cifs_sync_mid_result(midQ, ses->server);
628 if (rc != 0) { 654 if (rc != 0) {
@@ -630,15 +656,16 @@ SendReceive2(const unsigned int xid, struct cifs_ses *ses,
630 return rc; 656 return rc;
631 } 657 }
632 658
633 if (!midQ->resp_buf || midQ->midState != MID_RESPONSE_RECEIVED) { 659 if (!midQ->resp_buf || midQ->mid_state != MID_RESPONSE_RECEIVED) {
634 rc = -EIO; 660 rc = -EIO;
635 cFYI(1, "Bad MID state?"); 661 cFYI(1, "Bad MID state?");
636 goto out; 662 goto out;
637 } 663 }
638 664
639 iov[0].iov_base = (char *)midQ->resp_buf; 665 buf = (char *)midQ->resp_buf;
640 iov[0].iov_len = be32_to_cpu(midQ->resp_buf->smb_buf_length) + 4; 666 iov[0].iov_base = buf;
641 if (midQ->largeBuf) 667 iov[0].iov_len = get_rfc1002_length(buf) + 4;
668 if (midQ->large_buf)
642 *pRespBufType = CIFS_LARGE_BUFFER; 669 *pRespBufType = CIFS_LARGE_BUFFER;
643 else 670 else
644 *pRespBufType = CIFS_SMALL_BUFFER; 671 *pRespBufType = CIFS_SMALL_BUFFER;
@@ -710,7 +737,7 @@ SendReceive(const unsigned int xid, struct cifs_ses *ses,
710 goto out; 737 goto out;
711 } 738 }
712 739
713 midQ->midState = MID_REQUEST_SUBMITTED; 740 midQ->mid_state = MID_REQUEST_SUBMITTED;
714 741
715 cifs_in_send_inc(ses->server); 742 cifs_in_send_inc(ses->server);
716 rc = smb_send(ses->server, in_buf, be32_to_cpu(in_buf->smb_buf_length)); 743 rc = smb_send(ses->server, in_buf, be32_to_cpu(in_buf->smb_buf_length));
@@ -728,7 +755,7 @@ SendReceive(const unsigned int xid, struct cifs_ses *ses,
728 if (rc != 0) { 755 if (rc != 0) {
729 send_nt_cancel(ses->server, in_buf, midQ); 756 send_nt_cancel(ses->server, in_buf, midQ);
730 spin_lock(&GlobalMid_Lock); 757 spin_lock(&GlobalMid_Lock);
731 if (midQ->midState == MID_REQUEST_SUBMITTED) { 758 if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
732 /* no longer considered to be "in-flight" */ 759 /* no longer considered to be "in-flight" */
733 midQ->callback = DeleteMidQEntry; 760 midQ->callback = DeleteMidQEntry;
734 spin_unlock(&GlobalMid_Lock); 761 spin_unlock(&GlobalMid_Lock);
@@ -745,13 +772,13 @@ SendReceive(const unsigned int xid, struct cifs_ses *ses,
745 } 772 }
746 773
747 if (!midQ->resp_buf || !out_buf || 774 if (!midQ->resp_buf || !out_buf ||
748 midQ->midState != MID_RESPONSE_RECEIVED) { 775 midQ->mid_state != MID_RESPONSE_RECEIVED) {
749 rc = -EIO; 776 rc = -EIO;
750 cERROR(1, "Bad MID state?"); 777 cERROR(1, "Bad MID state?");
751 goto out; 778 goto out;
752 } 779 }
753 780
754 *pbytes_returned = be32_to_cpu(midQ->resp_buf->smb_buf_length); 781 *pbytes_returned = get_rfc1002_length(midQ->resp_buf);
755 memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4); 782 memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
756 rc = cifs_check_receive(midQ, ses->server, 0); 783 rc = cifs_check_receive(midQ, ses->server, 0);
757out: 784out:
@@ -844,7 +871,7 @@ SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon,
844 return rc; 871 return rc;
845 } 872 }
846 873
847 midQ->midState = MID_REQUEST_SUBMITTED; 874 midQ->mid_state = MID_REQUEST_SUBMITTED;
848 cifs_in_send_inc(ses->server); 875 cifs_in_send_inc(ses->server);
849 rc = smb_send(ses->server, in_buf, be32_to_cpu(in_buf->smb_buf_length)); 876 rc = smb_send(ses->server, in_buf, be32_to_cpu(in_buf->smb_buf_length));
850 cifs_in_send_dec(ses->server); 877 cifs_in_send_dec(ses->server);
@@ -858,13 +885,13 @@ SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon,
858 885
859 /* Wait for a reply - allow signals to interrupt. */ 886 /* Wait for a reply - allow signals to interrupt. */
860 rc = wait_event_interruptible(ses->server->response_q, 887 rc = wait_event_interruptible(ses->server->response_q,
861 (!(midQ->midState == MID_REQUEST_SUBMITTED)) || 888 (!(midQ->mid_state == MID_REQUEST_SUBMITTED)) ||
862 ((ses->server->tcpStatus != CifsGood) && 889 ((ses->server->tcpStatus != CifsGood) &&
863 (ses->server->tcpStatus != CifsNew))); 890 (ses->server->tcpStatus != CifsNew)));
864 891
865 /* Were we interrupted by a signal ? */ 892 /* Were we interrupted by a signal ? */
866 if ((rc == -ERESTARTSYS) && 893 if ((rc == -ERESTARTSYS) &&
867 (midQ->midState == MID_REQUEST_SUBMITTED) && 894 (midQ->mid_state == MID_REQUEST_SUBMITTED) &&
868 ((ses->server->tcpStatus == CifsGood) || 895 ((ses->server->tcpStatus == CifsGood) ||
869 (ses->server->tcpStatus == CifsNew))) { 896 (ses->server->tcpStatus == CifsNew))) {
870 897
@@ -894,7 +921,7 @@ SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon,
894 if (rc) { 921 if (rc) {
895 send_nt_cancel(ses->server, in_buf, midQ); 922 send_nt_cancel(ses->server, in_buf, midQ);
896 spin_lock(&GlobalMid_Lock); 923 spin_lock(&GlobalMid_Lock);
897 if (midQ->midState == MID_REQUEST_SUBMITTED) { 924 if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
898 /* no longer considered to be "in-flight" */ 925 /* no longer considered to be "in-flight" */
899 midQ->callback = DeleteMidQEntry; 926 midQ->callback = DeleteMidQEntry;
900 spin_unlock(&GlobalMid_Lock); 927 spin_unlock(&GlobalMid_Lock);
@@ -912,13 +939,13 @@ SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon,
912 return rc; 939 return rc;
913 940
914 /* rcvd frame is ok */ 941 /* rcvd frame is ok */
915 if (out_buf == NULL || midQ->midState != MID_RESPONSE_RECEIVED) { 942 if (out_buf == NULL || midQ->mid_state != MID_RESPONSE_RECEIVED) {
916 rc = -EIO; 943 rc = -EIO;
917 cERROR(1, "Bad MID state?"); 944 cERROR(1, "Bad MID state?");
918 goto out; 945 goto out;
919 } 946 }
920 947
921 *pbytes_returned = be32_to_cpu(midQ->resp_buf->smb_buf_length); 948 *pbytes_returned = get_rfc1002_length(midQ->resp_buf);
922 memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4); 949 memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
923 rc = cifs_check_receive(midQ, ses->server, 0); 950 rc = cifs_check_receive(midQ, ses->server, 0);
924out: 951out:
diff --git a/fs/compat.c b/fs/compat.c
index 14483a715bbb..f2944ace7a7b 100644
--- a/fs/compat.c
+++ b/fs/compat.c
@@ -1170,10 +1170,9 @@ compat_sys_readv(unsigned long fd, const struct compat_iovec __user *vec,
1170} 1170}
1171 1171
1172asmlinkage ssize_t 1172asmlinkage ssize_t
1173compat_sys_preadv(unsigned long fd, const struct compat_iovec __user *vec, 1173compat_sys_preadv64(unsigned long fd, const struct compat_iovec __user *vec,
1174 unsigned long vlen, u32 pos_low, u32 pos_high) 1174 unsigned long vlen, loff_t pos)
1175{ 1175{
1176 loff_t pos = ((loff_t)pos_high << 32) | pos_low;
1177 struct file *file; 1176 struct file *file;
1178 int fput_needed; 1177 int fput_needed;
1179 ssize_t ret; 1178 ssize_t ret;
@@ -1190,6 +1189,14 @@ compat_sys_preadv(unsigned long fd, const struct compat_iovec __user *vec,
1190 return ret; 1189 return ret;
1191} 1190}
1192 1191
1192asmlinkage ssize_t
1193compat_sys_preadv(unsigned long fd, const struct compat_iovec __user *vec,
1194 unsigned long vlen, u32 pos_low, u32 pos_high)
1195{
1196 loff_t pos = ((loff_t)pos_high << 32) | pos_low;
1197 return compat_sys_preadv64(fd, vec, vlen, pos);
1198}
1199
1193static size_t compat_writev(struct file *file, 1200static size_t compat_writev(struct file *file,
1194 const struct compat_iovec __user *vec, 1201 const struct compat_iovec __user *vec,
1195 unsigned long vlen, loff_t *pos) 1202 unsigned long vlen, loff_t *pos)
@@ -1229,10 +1236,9 @@ compat_sys_writev(unsigned long fd, const struct compat_iovec __user *vec,
1229} 1236}
1230 1237
1231asmlinkage ssize_t 1238asmlinkage ssize_t
1232compat_sys_pwritev(unsigned long fd, const struct compat_iovec __user *vec, 1239compat_sys_pwritev64(unsigned long fd, const struct compat_iovec __user *vec,
1233 unsigned long vlen, u32 pos_low, u32 pos_high) 1240 unsigned long vlen, loff_t pos)
1234{ 1241{
1235 loff_t pos = ((loff_t)pos_high << 32) | pos_low;
1236 struct file *file; 1242 struct file *file;
1237 int fput_needed; 1243 int fput_needed;
1238 ssize_t ret; 1244 ssize_t ret;
@@ -1249,6 +1255,14 @@ compat_sys_pwritev(unsigned long fd, const struct compat_iovec __user *vec,
1249 return ret; 1255 return ret;
1250} 1256}
1251 1257
1258asmlinkage ssize_t
1259compat_sys_pwritev(unsigned long fd, const struct compat_iovec __user *vec,
1260 unsigned long vlen, u32 pos_low, u32 pos_high)
1261{
1262 loff_t pos = ((loff_t)pos_high << 32) | pos_low;
1263 return compat_sys_pwritev64(fd, vec, vlen, pos);
1264}
1265
1252asmlinkage long 1266asmlinkage long
1253compat_sys_vmsplice(int fd, const struct compat_iovec __user *iov32, 1267compat_sys_vmsplice(int fd, const struct compat_iovec __user *iov32,
1254 unsigned int nr_segs, unsigned int flags) 1268 unsigned int nr_segs, unsigned int flags)
diff --git a/fs/exec.c b/fs/exec.c
index c8b63d14da85..9a1d9f0a60ab 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -1028,10 +1028,10 @@ static void flush_old_files(struct files_struct * files)
1028 fdt = files_fdtable(files); 1028 fdt = files_fdtable(files);
1029 if (i >= fdt->max_fds) 1029 if (i >= fdt->max_fds)
1030 break; 1030 break;
1031 set = fdt->close_on_exec->fds_bits[j]; 1031 set = fdt->close_on_exec[j];
1032 if (!set) 1032 if (!set)
1033 continue; 1033 continue;
1034 fdt->close_on_exec->fds_bits[j] = 0; 1034 fdt->close_on_exec[j] = 0;
1035 spin_unlock(&files->file_lock); 1035 spin_unlock(&files->file_lock);
1036 for ( ; set ; i++,set >>= 1) { 1036 for ( ; set ; i++,set >>= 1) {
1037 if (set & 1) { 1037 if (set & 1) {
@@ -2067,8 +2067,8 @@ static int umh_pipe_setup(struct subprocess_info *info, struct cred *new)
2067 fd_install(0, rp); 2067 fd_install(0, rp);
2068 spin_lock(&cf->file_lock); 2068 spin_lock(&cf->file_lock);
2069 fdt = files_fdtable(cf); 2069 fdt = files_fdtable(cf);
2070 FD_SET(0, fdt->open_fds); 2070 __set_open_fd(0, fdt);
2071 FD_CLR(0, fdt->close_on_exec); 2071 __clear_close_on_exec(0, fdt);
2072 spin_unlock(&cf->file_lock); 2072 spin_unlock(&cf->file_lock);
2073 2073
2074 /* and disallow core files too */ 2074 /* and disallow core files too */
diff --git a/fs/ext2/ext2.h b/fs/ext2/ext2.h
index 75ad433c6691..0b2b4db5bdcd 100644
--- a/fs/ext2/ext2.h
+++ b/fs/ext2/ext2.h
@@ -1,5 +1,636 @@
1/*
2 * Copyright (C) 1992, 1993, 1994, 1995
3 * Remy Card (card@masi.ibp.fr)
4 * Laboratoire MASI - Institut Blaise Pascal
5 * Universite Pierre et Marie Curie (Paris VI)
6 *
7 * from
8 *
9 * linux/include/linux/minix_fs.h
10 *
11 * Copyright (C) 1991, 1992 Linus Torvalds
12 */
1#include <linux/fs.h> 13#include <linux/fs.h>
2#include <linux/ext2_fs.h> 14#include <linux/ext2_fs.h>
15#include <linux/blockgroup_lock.h>
16#include <linux/percpu_counter.h>
17#include <linux/rbtree.h>
18
19/* XXX Here for now... not interested in restructing headers JUST now */
20
21/* data type for block offset of block group */
22typedef int ext2_grpblk_t;
23
24/* data type for filesystem-wide blocks number */
25typedef unsigned long ext2_fsblk_t;
26
27#define E2FSBLK "%lu"
28
29struct ext2_reserve_window {
30 ext2_fsblk_t _rsv_start; /* First byte reserved */
31 ext2_fsblk_t _rsv_end; /* Last byte reserved or 0 */
32};
33
34struct ext2_reserve_window_node {
35 struct rb_node rsv_node;
36 __u32 rsv_goal_size;
37 __u32 rsv_alloc_hit;
38 struct ext2_reserve_window rsv_window;
39};
40
41struct ext2_block_alloc_info {
42 /* information about reservation window */
43 struct ext2_reserve_window_node rsv_window_node;
44 /*
45 * was i_next_alloc_block in ext2_inode_info
46 * is the logical (file-relative) number of the
47 * most-recently-allocated block in this file.
48 * We use this for detecting linearly ascending allocation requests.
49 */
50 __u32 last_alloc_logical_block;
51 /*
52 * Was i_next_alloc_goal in ext2_inode_info
53 * is the *physical* companion to i_next_alloc_block.
54 * it the the physical block number of the block which was most-recentl
55 * allocated to this file. This give us the goal (target) for the next
56 * allocation when we detect linearly ascending requests.
57 */
58 ext2_fsblk_t last_alloc_physical_block;
59};
60
61#define rsv_start rsv_window._rsv_start
62#define rsv_end rsv_window._rsv_end
63
64/*
65 * second extended-fs super-block data in memory
66 */
67struct ext2_sb_info {
68 unsigned long s_frag_size; /* Size of a fragment in bytes */
69 unsigned long s_frags_per_block;/* Number of fragments per block */
70 unsigned long s_inodes_per_block;/* Number of inodes per block */
71 unsigned long s_frags_per_group;/* Number of fragments in a group */
72 unsigned long s_blocks_per_group;/* Number of blocks in a group */
73 unsigned long s_inodes_per_group;/* Number of inodes in a group */
74 unsigned long s_itb_per_group; /* Number of inode table blocks per group */
75 unsigned long s_gdb_count; /* Number of group descriptor blocks */
76 unsigned long s_desc_per_block; /* Number of group descriptors per block */
77 unsigned long s_groups_count; /* Number of groups in the fs */
78 unsigned long s_overhead_last; /* Last calculated overhead */
79 unsigned long s_blocks_last; /* Last seen block count */
80 struct buffer_head * s_sbh; /* Buffer containing the super block */
81 struct ext2_super_block * s_es; /* Pointer to the super block in the buffer */
82 struct buffer_head ** s_group_desc;
83 unsigned long s_mount_opt;
84 unsigned long s_sb_block;
85 uid_t s_resuid;
86 gid_t s_resgid;
87 unsigned short s_mount_state;
88 unsigned short s_pad;
89 int s_addr_per_block_bits;
90 int s_desc_per_block_bits;
91 int s_inode_size;
92 int s_first_ino;
93 spinlock_t s_next_gen_lock;
94 u32 s_next_generation;
95 unsigned long s_dir_count;
96 u8 *s_debts;
97 struct percpu_counter s_freeblocks_counter;
98 struct percpu_counter s_freeinodes_counter;
99 struct percpu_counter s_dirs_counter;
100 struct blockgroup_lock *s_blockgroup_lock;
101 /* root of the per fs reservation window tree */
102 spinlock_t s_rsv_window_lock;
103 struct rb_root s_rsv_window_root;
104 struct ext2_reserve_window_node s_rsv_window_head;
105 /*
106 * s_lock protects against concurrent modifications of s_mount_state,
107 * s_blocks_last, s_overhead_last and the content of superblock's
108 * buffer pointed to by sbi->s_es.
109 *
110 * Note: It is used in ext2_show_options() to provide a consistent view
111 * of the mount options.
112 */
113 spinlock_t s_lock;
114};
115
116static inline spinlock_t *
117sb_bgl_lock(struct ext2_sb_info *sbi, unsigned int block_group)
118{
119 return bgl_lock_ptr(sbi->s_blockgroup_lock, block_group);
120}
121
122/*
123 * Define EXT2FS_DEBUG to produce debug messages
124 */
125#undef EXT2FS_DEBUG
126
127/*
128 * Define EXT2_RESERVATION to reserve data blocks for expanding files
129 */
130#define EXT2_DEFAULT_RESERVE_BLOCKS 8
131/*max window size: 1024(direct blocks) + 3([t,d]indirect blocks) */
132#define EXT2_MAX_RESERVE_BLOCKS 1027
133#define EXT2_RESERVE_WINDOW_NOT_ALLOCATED 0
134/*
135 * The second extended file system version
136 */
137#define EXT2FS_DATE "95/08/09"
138#define EXT2FS_VERSION "0.5b"
139
140/*
141 * Debug code
142 */
143#ifdef EXT2FS_DEBUG
144# define ext2_debug(f, a...) { \
145 printk ("EXT2-fs DEBUG (%s, %d): %s:", \
146 __FILE__, __LINE__, __func__); \
147 printk (f, ## a); \
148 }
149#else
150# define ext2_debug(f, a...) /**/
151#endif
152
153/*
154 * Special inode numbers
155 */
156#define EXT2_BAD_INO 1 /* Bad blocks inode */
157#define EXT2_ROOT_INO 2 /* Root inode */
158#define EXT2_BOOT_LOADER_INO 5 /* Boot loader inode */
159#define EXT2_UNDEL_DIR_INO 6 /* Undelete directory inode */
160
161/* First non-reserved inode for old ext2 filesystems */
162#define EXT2_GOOD_OLD_FIRST_INO 11
163
164static inline struct ext2_sb_info *EXT2_SB(struct super_block *sb)
165{
166 return sb->s_fs_info;
167}
168
169/*
170 * Macro-instructions used to manage several block sizes
171 */
172#define EXT2_MIN_BLOCK_SIZE 1024
173#define EXT2_MAX_BLOCK_SIZE 4096
174#define EXT2_MIN_BLOCK_LOG_SIZE 10
175#define EXT2_BLOCK_SIZE(s) ((s)->s_blocksize)
176#define EXT2_ADDR_PER_BLOCK(s) (EXT2_BLOCK_SIZE(s) / sizeof (__u32))
177#define EXT2_BLOCK_SIZE_BITS(s) ((s)->s_blocksize_bits)
178#define EXT2_ADDR_PER_BLOCK_BITS(s) (EXT2_SB(s)->s_addr_per_block_bits)
179#define EXT2_INODE_SIZE(s) (EXT2_SB(s)->s_inode_size)
180#define EXT2_FIRST_INO(s) (EXT2_SB(s)->s_first_ino)
181
182/*
183 * Macro-instructions used to manage fragments
184 */
185#define EXT2_MIN_FRAG_SIZE 1024
186#define EXT2_MAX_FRAG_SIZE 4096
187#define EXT2_MIN_FRAG_LOG_SIZE 10
188#define EXT2_FRAG_SIZE(s) (EXT2_SB(s)->s_frag_size)
189#define EXT2_FRAGS_PER_BLOCK(s) (EXT2_SB(s)->s_frags_per_block)
190
191/*
192 * Structure of a blocks group descriptor
193 */
194struct ext2_group_desc
195{
196 __le32 bg_block_bitmap; /* Blocks bitmap block */
197 __le32 bg_inode_bitmap; /* Inodes bitmap block */
198 __le32 bg_inode_table; /* Inodes table block */
199 __le16 bg_free_blocks_count; /* Free blocks count */
200 __le16 bg_free_inodes_count; /* Free inodes count */
201 __le16 bg_used_dirs_count; /* Directories count */
202 __le16 bg_pad;
203 __le32 bg_reserved[3];
204};
205
206/*
207 * Macro-instructions used to manage group descriptors
208 */
209#define EXT2_BLOCKS_PER_GROUP(s) (EXT2_SB(s)->s_blocks_per_group)
210#define EXT2_DESC_PER_BLOCK(s) (EXT2_SB(s)->s_desc_per_block)
211#define EXT2_INODES_PER_GROUP(s) (EXT2_SB(s)->s_inodes_per_group)
212#define EXT2_DESC_PER_BLOCK_BITS(s) (EXT2_SB(s)->s_desc_per_block_bits)
213
214/*
215 * Constants relative to the data blocks
216 */
217#define EXT2_NDIR_BLOCKS 12
218#define EXT2_IND_BLOCK EXT2_NDIR_BLOCKS
219#define EXT2_DIND_BLOCK (EXT2_IND_BLOCK + 1)
220#define EXT2_TIND_BLOCK (EXT2_DIND_BLOCK + 1)
221#define EXT2_N_BLOCKS (EXT2_TIND_BLOCK + 1)
222
223/*
224 * Inode flags (GETFLAGS/SETFLAGS)
225 */
226#define EXT2_SECRM_FL FS_SECRM_FL /* Secure deletion */
227#define EXT2_UNRM_FL FS_UNRM_FL /* Undelete */
228#define EXT2_COMPR_FL FS_COMPR_FL /* Compress file */
229#define EXT2_SYNC_FL FS_SYNC_FL /* Synchronous updates */
230#define EXT2_IMMUTABLE_FL FS_IMMUTABLE_FL /* Immutable file */
231#define EXT2_APPEND_FL FS_APPEND_FL /* writes to file may only append */
232#define EXT2_NODUMP_FL FS_NODUMP_FL /* do not dump file */
233#define EXT2_NOATIME_FL FS_NOATIME_FL /* do not update atime */
234/* Reserved for compression usage... */
235#define EXT2_DIRTY_FL FS_DIRTY_FL
236#define EXT2_COMPRBLK_FL FS_COMPRBLK_FL /* One or more compressed clusters */
237#define EXT2_NOCOMP_FL FS_NOCOMP_FL /* Don't compress */
238#define EXT2_ECOMPR_FL FS_ECOMPR_FL /* Compression error */
239/* End compression flags --- maybe not all used */
240#define EXT2_BTREE_FL FS_BTREE_FL /* btree format dir */
241#define EXT2_INDEX_FL FS_INDEX_FL /* hash-indexed directory */
242#define EXT2_IMAGIC_FL FS_IMAGIC_FL /* AFS directory */
243#define EXT2_JOURNAL_DATA_FL FS_JOURNAL_DATA_FL /* Reserved for ext3 */
244#define EXT2_NOTAIL_FL FS_NOTAIL_FL /* file tail should not be merged */
245#define EXT2_DIRSYNC_FL FS_DIRSYNC_FL /* dirsync behaviour (directories only) */
246#define EXT2_TOPDIR_FL FS_TOPDIR_FL /* Top of directory hierarchies*/
247#define EXT2_RESERVED_FL FS_RESERVED_FL /* reserved for ext2 lib */
248
249#define EXT2_FL_USER_VISIBLE FS_FL_USER_VISIBLE /* User visible flags */
250#define EXT2_FL_USER_MODIFIABLE FS_FL_USER_MODIFIABLE /* User modifiable flags */
251
252/* Flags that should be inherited by new inodes from their parent. */
253#define EXT2_FL_INHERITED (EXT2_SECRM_FL | EXT2_UNRM_FL | EXT2_COMPR_FL |\
254 EXT2_SYNC_FL | EXT2_NODUMP_FL |\
255 EXT2_NOATIME_FL | EXT2_COMPRBLK_FL |\
256 EXT2_NOCOMP_FL | EXT2_JOURNAL_DATA_FL |\
257 EXT2_NOTAIL_FL | EXT2_DIRSYNC_FL)
258
259/* Flags that are appropriate for regular files (all but dir-specific ones). */
260#define EXT2_REG_FLMASK (~(EXT2_DIRSYNC_FL | EXT2_TOPDIR_FL))
261
262/* Flags that are appropriate for non-directories/regular files. */
263#define EXT2_OTHER_FLMASK (EXT2_NODUMP_FL | EXT2_NOATIME_FL)
264
265/* Mask out flags that are inappropriate for the given type of inode. */
266static inline __u32 ext2_mask_flags(umode_t mode, __u32 flags)
267{
268 if (S_ISDIR(mode))
269 return flags;
270 else if (S_ISREG(mode))
271 return flags & EXT2_REG_FLMASK;
272 else
273 return flags & EXT2_OTHER_FLMASK;
274}
275
276/*
277 * ioctl commands
278 */
279#define EXT2_IOC_GETFLAGS FS_IOC_GETFLAGS
280#define EXT2_IOC_SETFLAGS FS_IOC_SETFLAGS
281#define EXT2_IOC_GETVERSION FS_IOC_GETVERSION
282#define EXT2_IOC_SETVERSION FS_IOC_SETVERSION
283#define EXT2_IOC_GETRSVSZ _IOR('f', 5, long)
284#define EXT2_IOC_SETRSVSZ _IOW('f', 6, long)
285
286/*
287 * ioctl commands in 32 bit emulation
288 */
289#define EXT2_IOC32_GETFLAGS FS_IOC32_GETFLAGS
290#define EXT2_IOC32_SETFLAGS FS_IOC32_SETFLAGS
291#define EXT2_IOC32_GETVERSION FS_IOC32_GETVERSION
292#define EXT2_IOC32_SETVERSION FS_IOC32_SETVERSION
293
294/*
295 * Structure of an inode on the disk
296 */
297struct ext2_inode {
298 __le16 i_mode; /* File mode */
299 __le16 i_uid; /* Low 16 bits of Owner Uid */
300 __le32 i_size; /* Size in bytes */
301 __le32 i_atime; /* Access time */
302 __le32 i_ctime; /* Creation time */
303 __le32 i_mtime; /* Modification time */
304 __le32 i_dtime; /* Deletion Time */
305 __le16 i_gid; /* Low 16 bits of Group Id */
306 __le16 i_links_count; /* Links count */
307 __le32 i_blocks; /* Blocks count */
308 __le32 i_flags; /* File flags */
309 union {
310 struct {
311 __le32 l_i_reserved1;
312 } linux1;
313 struct {
314 __le32 h_i_translator;
315 } hurd1;
316 struct {
317 __le32 m_i_reserved1;
318 } masix1;
319 } osd1; /* OS dependent 1 */
320 __le32 i_block[EXT2_N_BLOCKS];/* Pointers to blocks */
321 __le32 i_generation; /* File version (for NFS) */
322 __le32 i_file_acl; /* File ACL */
323 __le32 i_dir_acl; /* Directory ACL */
324 __le32 i_faddr; /* Fragment address */
325 union {
326 struct {
327 __u8 l_i_frag; /* Fragment number */
328 __u8 l_i_fsize; /* Fragment size */
329 __u16 i_pad1;
330 __le16 l_i_uid_high; /* these 2 fields */
331 __le16 l_i_gid_high; /* were reserved2[0] */
332 __u32 l_i_reserved2;
333 } linux2;
334 struct {
335 __u8 h_i_frag; /* Fragment number */
336 __u8 h_i_fsize; /* Fragment size */
337 __le16 h_i_mode_high;
338 __le16 h_i_uid_high;
339 __le16 h_i_gid_high;
340 __le32 h_i_author;
341 } hurd2;
342 struct {
343 __u8 m_i_frag; /* Fragment number */
344 __u8 m_i_fsize; /* Fragment size */
345 __u16 m_pad1;
346 __u32 m_i_reserved2[2];
347 } masix2;
348 } osd2; /* OS dependent 2 */
349};
350
351#define i_size_high i_dir_acl
352
353#define i_reserved1 osd1.linux1.l_i_reserved1
354#define i_frag osd2.linux2.l_i_frag
355#define i_fsize osd2.linux2.l_i_fsize
356#define i_uid_low i_uid
357#define i_gid_low i_gid
358#define i_uid_high osd2.linux2.l_i_uid_high
359#define i_gid_high osd2.linux2.l_i_gid_high
360#define i_reserved2 osd2.linux2.l_i_reserved2
361
362/*
363 * File system states
364 */
365#define EXT2_VALID_FS 0x0001 /* Unmounted cleanly */
366#define EXT2_ERROR_FS 0x0002 /* Errors detected */
367
368/*
369 * Mount flags
370 */
371#define EXT2_MOUNT_CHECK 0x000001 /* Do mount-time checks */
372#define EXT2_MOUNT_OLDALLOC 0x000002 /* Don't use the new Orlov allocator */
373#define EXT2_MOUNT_GRPID 0x000004 /* Create files with directory's group */
374#define EXT2_MOUNT_DEBUG 0x000008 /* Some debugging messages */
375#define EXT2_MOUNT_ERRORS_CONT 0x000010 /* Continue on errors */
376#define EXT2_MOUNT_ERRORS_RO 0x000020 /* Remount fs ro on errors */
377#define EXT2_MOUNT_ERRORS_PANIC 0x000040 /* Panic on errors */
378#define EXT2_MOUNT_MINIX_DF 0x000080 /* Mimics the Minix statfs */
379#define EXT2_MOUNT_NOBH 0x000100 /* No buffer_heads */
380#define EXT2_MOUNT_NO_UID32 0x000200 /* Disable 32-bit UIDs */
381#define EXT2_MOUNT_XATTR_USER 0x004000 /* Extended user attributes */
382#define EXT2_MOUNT_POSIX_ACL 0x008000 /* POSIX Access Control Lists */
383#define EXT2_MOUNT_XIP 0x010000 /* Execute in place */
384#define EXT2_MOUNT_USRQUOTA 0x020000 /* user quota */
385#define EXT2_MOUNT_GRPQUOTA 0x040000 /* group quota */
386#define EXT2_MOUNT_RESERVATION 0x080000 /* Preallocation */
387
388
389#define clear_opt(o, opt) o &= ~EXT2_MOUNT_##opt
390#define set_opt(o, opt) o |= EXT2_MOUNT_##opt
391#define test_opt(sb, opt) (EXT2_SB(sb)->s_mount_opt & \
392 EXT2_MOUNT_##opt)
393/*
394 * Maximal mount counts between two filesystem checks
395 */
396#define EXT2_DFL_MAX_MNT_COUNT 20 /* Allow 20 mounts */
397#define EXT2_DFL_CHECKINTERVAL 0 /* Don't use interval check */
398
399/*
400 * Behaviour when detecting errors
401 */
402#define EXT2_ERRORS_CONTINUE 1 /* Continue execution */
403#define EXT2_ERRORS_RO 2 /* Remount fs read-only */
404#define EXT2_ERRORS_PANIC 3 /* Panic */
405#define EXT2_ERRORS_DEFAULT EXT2_ERRORS_CONTINUE
406
407/*
408 * Structure of the super block
409 */
410struct ext2_super_block {
411 __le32 s_inodes_count; /* Inodes count */
412 __le32 s_blocks_count; /* Blocks count */
413 __le32 s_r_blocks_count; /* Reserved blocks count */
414 __le32 s_free_blocks_count; /* Free blocks count */
415 __le32 s_free_inodes_count; /* Free inodes count */
416 __le32 s_first_data_block; /* First Data Block */
417 __le32 s_log_block_size; /* Block size */
418 __le32 s_log_frag_size; /* Fragment size */
419 __le32 s_blocks_per_group; /* # Blocks per group */
420 __le32 s_frags_per_group; /* # Fragments per group */
421 __le32 s_inodes_per_group; /* # Inodes per group */
422 __le32 s_mtime; /* Mount time */
423 __le32 s_wtime; /* Write time */
424 __le16 s_mnt_count; /* Mount count */
425 __le16 s_max_mnt_count; /* Maximal mount count */
426 __le16 s_magic; /* Magic signature */
427 __le16 s_state; /* File system state */
428 __le16 s_errors; /* Behaviour when detecting errors */
429 __le16 s_minor_rev_level; /* minor revision level */
430 __le32 s_lastcheck; /* time of last check */
431 __le32 s_checkinterval; /* max. time between checks */
432 __le32 s_creator_os; /* OS */
433 __le32 s_rev_level; /* Revision level */
434 __le16 s_def_resuid; /* Default uid for reserved blocks */
435 __le16 s_def_resgid; /* Default gid for reserved blocks */
436 /*
437 * These fields are for EXT2_DYNAMIC_REV superblocks only.
438 *
439 * Note: the difference between the compatible feature set and
440 * the incompatible feature set is that if there is a bit set
441 * in the incompatible feature set that the kernel doesn't
442 * know about, it should refuse to mount the filesystem.
443 *
444 * e2fsck's requirements are more strict; if it doesn't know
445 * about a feature in either the compatible or incompatible
446 * feature set, it must abort and not try to meddle with
447 * things it doesn't understand...
448 */
449 __le32 s_first_ino; /* First non-reserved inode */
450 __le16 s_inode_size; /* size of inode structure */
451 __le16 s_block_group_nr; /* block group # of this superblock */
452 __le32 s_feature_compat; /* compatible feature set */
453 __le32 s_feature_incompat; /* incompatible feature set */
454 __le32 s_feature_ro_compat; /* readonly-compatible feature set */
455 __u8 s_uuid[16]; /* 128-bit uuid for volume */
456 char s_volume_name[16]; /* volume name */
457 char s_last_mounted[64]; /* directory where last mounted */
458 __le32 s_algorithm_usage_bitmap; /* For compression */
459 /*
460 * Performance hints. Directory preallocation should only
461 * happen if the EXT2_COMPAT_PREALLOC flag is on.
462 */
463 __u8 s_prealloc_blocks; /* Nr of blocks to try to preallocate*/
464 __u8 s_prealloc_dir_blocks; /* Nr to preallocate for dirs */
465 __u16 s_padding1;
466 /*
467 * Journaling support valid if EXT3_FEATURE_COMPAT_HAS_JOURNAL set.
468 */
469 __u8 s_journal_uuid[16]; /* uuid of journal superblock */
470 __u32 s_journal_inum; /* inode number of journal file */
471 __u32 s_journal_dev; /* device number of journal file */
472 __u32 s_last_orphan; /* start of list of inodes to delete */
473 __u32 s_hash_seed[4]; /* HTREE hash seed */
474 __u8 s_def_hash_version; /* Default hash version to use */
475 __u8 s_reserved_char_pad;
476 __u16 s_reserved_word_pad;
477 __le32 s_default_mount_opts;
478 __le32 s_first_meta_bg; /* First metablock block group */
479 __u32 s_reserved[190]; /* Padding to the end of the block */
480};
481
482/*
483 * Codes for operating systems
484 */
485#define EXT2_OS_LINUX 0
486#define EXT2_OS_HURD 1
487#define EXT2_OS_MASIX 2
488#define EXT2_OS_FREEBSD 3
489#define EXT2_OS_LITES 4
490
491/*
492 * Revision levels
493 */
494#define EXT2_GOOD_OLD_REV 0 /* The good old (original) format */
495#define EXT2_DYNAMIC_REV 1 /* V2 format w/ dynamic inode sizes */
496
497#define EXT2_CURRENT_REV EXT2_GOOD_OLD_REV
498#define EXT2_MAX_SUPP_REV EXT2_DYNAMIC_REV
499
500#define EXT2_GOOD_OLD_INODE_SIZE 128
501
502/*
503 * Feature set definitions
504 */
505
506#define EXT2_HAS_COMPAT_FEATURE(sb,mask) \
507 ( EXT2_SB(sb)->s_es->s_feature_compat & cpu_to_le32(mask) )
508#define EXT2_HAS_RO_COMPAT_FEATURE(sb,mask) \
509 ( EXT2_SB(sb)->s_es->s_feature_ro_compat & cpu_to_le32(mask) )
510#define EXT2_HAS_INCOMPAT_FEATURE(sb,mask) \
511 ( EXT2_SB(sb)->s_es->s_feature_incompat & cpu_to_le32(mask) )
512#define EXT2_SET_COMPAT_FEATURE(sb,mask) \
513 EXT2_SB(sb)->s_es->s_feature_compat |= cpu_to_le32(mask)
514#define EXT2_SET_RO_COMPAT_FEATURE(sb,mask) \
515 EXT2_SB(sb)->s_es->s_feature_ro_compat |= cpu_to_le32(mask)
516#define EXT2_SET_INCOMPAT_FEATURE(sb,mask) \
517 EXT2_SB(sb)->s_es->s_feature_incompat |= cpu_to_le32(mask)
518#define EXT2_CLEAR_COMPAT_FEATURE(sb,mask) \
519 EXT2_SB(sb)->s_es->s_feature_compat &= ~cpu_to_le32(mask)
520#define EXT2_CLEAR_RO_COMPAT_FEATURE(sb,mask) \
521 EXT2_SB(sb)->s_es->s_feature_ro_compat &= ~cpu_to_le32(mask)
522#define EXT2_CLEAR_INCOMPAT_FEATURE(sb,mask) \
523 EXT2_SB(sb)->s_es->s_feature_incompat &= ~cpu_to_le32(mask)
524
525#define EXT2_FEATURE_COMPAT_DIR_PREALLOC 0x0001
526#define EXT2_FEATURE_COMPAT_IMAGIC_INODES 0x0002
527#define EXT3_FEATURE_COMPAT_HAS_JOURNAL 0x0004
528#define EXT2_FEATURE_COMPAT_EXT_ATTR 0x0008
529#define EXT2_FEATURE_COMPAT_RESIZE_INO 0x0010
530#define EXT2_FEATURE_COMPAT_DIR_INDEX 0x0020
531#define EXT2_FEATURE_COMPAT_ANY 0xffffffff
532
533#define EXT2_FEATURE_RO_COMPAT_SPARSE_SUPER 0x0001
534#define EXT2_FEATURE_RO_COMPAT_LARGE_FILE 0x0002
535#define EXT2_FEATURE_RO_COMPAT_BTREE_DIR 0x0004
536#define EXT2_FEATURE_RO_COMPAT_ANY 0xffffffff
537
538#define EXT2_FEATURE_INCOMPAT_COMPRESSION 0x0001
539#define EXT2_FEATURE_INCOMPAT_FILETYPE 0x0002
540#define EXT3_FEATURE_INCOMPAT_RECOVER 0x0004
541#define EXT3_FEATURE_INCOMPAT_JOURNAL_DEV 0x0008
542#define EXT2_FEATURE_INCOMPAT_META_BG 0x0010
543#define EXT2_FEATURE_INCOMPAT_ANY 0xffffffff
544
545#define EXT2_FEATURE_COMPAT_SUPP EXT2_FEATURE_COMPAT_EXT_ATTR
546#define EXT2_FEATURE_INCOMPAT_SUPP (EXT2_FEATURE_INCOMPAT_FILETYPE| \
547 EXT2_FEATURE_INCOMPAT_META_BG)
548#define EXT2_FEATURE_RO_COMPAT_SUPP (EXT2_FEATURE_RO_COMPAT_SPARSE_SUPER| \
549 EXT2_FEATURE_RO_COMPAT_LARGE_FILE| \
550 EXT2_FEATURE_RO_COMPAT_BTREE_DIR)
551#define EXT2_FEATURE_RO_COMPAT_UNSUPPORTED ~EXT2_FEATURE_RO_COMPAT_SUPP
552#define EXT2_FEATURE_INCOMPAT_UNSUPPORTED ~EXT2_FEATURE_INCOMPAT_SUPP
553
554/*
555 * Default values for user and/or group using reserved blocks
556 */
557#define EXT2_DEF_RESUID 0
558#define EXT2_DEF_RESGID 0
559
560/*
561 * Default mount options
562 */
563#define EXT2_DEFM_DEBUG 0x0001
564#define EXT2_DEFM_BSDGROUPS 0x0002
565#define EXT2_DEFM_XATTR_USER 0x0004
566#define EXT2_DEFM_ACL 0x0008
567#define EXT2_DEFM_UID16 0x0010
568 /* Not used by ext2, but reserved for use by ext3 */
569#define EXT3_DEFM_JMODE 0x0060
570#define EXT3_DEFM_JMODE_DATA 0x0020
571#define EXT3_DEFM_JMODE_ORDERED 0x0040
572#define EXT3_DEFM_JMODE_WBACK 0x0060
573
574/*
575 * Structure of a directory entry
576 */
577
578struct ext2_dir_entry {
579 __le32 inode; /* Inode number */
580 __le16 rec_len; /* Directory entry length */
581 __le16 name_len; /* Name length */
582 char name[]; /* File name, up to EXT2_NAME_LEN */
583};
584
585/*
586 * The new version of the directory entry. Since EXT2 structures are
587 * stored in intel byte order, and the name_len field could never be
588 * bigger than 255 chars, it's safe to reclaim the extra byte for the
589 * file_type field.
590 */
591struct ext2_dir_entry_2 {
592 __le32 inode; /* Inode number */
593 __le16 rec_len; /* Directory entry length */
594 __u8 name_len; /* Name length */
595 __u8 file_type;
596 char name[]; /* File name, up to EXT2_NAME_LEN */
597};
598
599/*
600 * Ext2 directory file types. Only the low 3 bits are used. The
601 * other bits are reserved for now.
602 */
603enum {
604 EXT2_FT_UNKNOWN = 0,
605 EXT2_FT_REG_FILE = 1,
606 EXT2_FT_DIR = 2,
607 EXT2_FT_CHRDEV = 3,
608 EXT2_FT_BLKDEV = 4,
609 EXT2_FT_FIFO = 5,
610 EXT2_FT_SOCK = 6,
611 EXT2_FT_SYMLINK = 7,
612 EXT2_FT_MAX
613};
614
615/*
616 * EXT2_DIR_PAD defines the directory entries boundaries
617 *
618 * NOTE: It must be a multiple of 4
619 */
620#define EXT2_DIR_PAD 4
621#define EXT2_DIR_ROUND (EXT2_DIR_PAD - 1)
622#define EXT2_DIR_REC_LEN(name_len) (((name_len) + 8 + EXT2_DIR_ROUND) & \
623 ~EXT2_DIR_ROUND)
624#define EXT2_MAX_REC_LEN ((1<<16)-1)
625
626static inline void verify_offsets(void)
627{
628#define A(x,y) BUILD_BUG_ON(x != offsetof(struct ext2_super_block, y));
629 A(EXT2_SB_MAGIC_OFFSET, s_magic);
630 A(EXT2_SB_BLOCKS_OFFSET, s_blocks_count);
631 A(EXT2_SB_BSIZE_OFFSET, s_log_block_size);
632#undef A
633}
3 634
4/* 635/*
5 * ext2 mount options 636 * ext2 mount options
diff --git a/fs/ext2/xattr_security.c b/fs/ext2/xattr_security.c
index be7a8d02c9a7..cfedb2cb0d8c 100644
--- a/fs/ext2/xattr_security.c
+++ b/fs/ext2/xattr_security.c
@@ -3,10 +3,7 @@
3 * Handler for storing security labels as extended attributes. 3 * Handler for storing security labels as extended attributes.
4 */ 4 */
5 5
6#include <linux/slab.h> 6#include "ext2.h"
7#include <linux/string.h>
8#include <linux/fs.h>
9#include <linux/ext2_fs.h>
10#include <linux/security.h> 7#include <linux/security.h>
11#include "xattr.h" 8#include "xattr.h"
12 9
diff --git a/fs/ext2/xattr_trusted.c b/fs/ext2/xattr_trusted.c
index 2989467d3595..7e192574c001 100644
--- a/fs/ext2/xattr_trusted.c
+++ b/fs/ext2/xattr_trusted.c
@@ -5,10 +5,7 @@
5 * Copyright (C) 2003 by Andreas Gruenbacher, <a.gruenbacher@computer.org> 5 * Copyright (C) 2003 by Andreas Gruenbacher, <a.gruenbacher@computer.org>
6 */ 6 */
7 7
8#include <linux/string.h> 8#include "ext2.h"
9#include <linux/capability.h>
10#include <linux/fs.h>
11#include <linux/ext2_fs.h>
12#include "xattr.h" 9#include "xattr.h"
13 10
14static size_t 11static size_t
diff --git a/fs/ext2/xip.c b/fs/ext2/xip.c
index 322a56b2dfb1..1c3312858fcf 100644
--- a/fs/ext2/xip.c
+++ b/fs/ext2/xip.c
@@ -9,8 +9,6 @@
9#include <linux/fs.h> 9#include <linux/fs.h>
10#include <linux/genhd.h> 10#include <linux/genhd.h>
11#include <linux/buffer_head.h> 11#include <linux/buffer_head.h>
12#include <linux/ext2_fs_sb.h>
13#include <linux/ext2_fs.h>
14#include <linux/blkdev.h> 12#include <linux/blkdev.h>
15#include "ext2.h" 13#include "ext2.h"
16#include "xip.h" 14#include "xip.h"
diff --git a/fs/ext3/acl.c b/fs/ext3/acl.c
index 3091f62e55b6..c76832c8d192 100644
--- a/fs/ext3/acl.c
+++ b/fs/ext3/acl.c
@@ -4,13 +4,7 @@
4 * Copyright (C) 2001-2003 Andreas Gruenbacher, <agruen@suse.de> 4 * Copyright (C) 2001-2003 Andreas Gruenbacher, <agruen@suse.de>
5 */ 5 */
6 6
7#include <linux/init.h> 7#include "ext3.h"
8#include <linux/sched.h>
9#include <linux/slab.h>
10#include <linux/capability.h>
11#include <linux/fs.h>
12#include <linux/ext3_jbd.h>
13#include <linux/ext3_fs.h>
14#include "xattr.h" 8#include "xattr.h"
15#include "acl.h" 9#include "acl.h"
16 10
diff --git a/fs/ext3/balloc.c b/fs/ext3/balloc.c
index 1e036b79384c..baac1b129fba 100644
--- a/fs/ext3/balloc.c
+++ b/fs/ext3/balloc.c
@@ -11,17 +11,9 @@
11 * David S. Miller (davem@caip.rutgers.edu), 1995 11 * David S. Miller (davem@caip.rutgers.edu), 1995
12 */ 12 */
13 13
14#include <linux/time.h>
15#include <linux/capability.h>
16#include <linux/fs.h>
17#include <linux/slab.h>
18#include <linux/jbd.h>
19#include <linux/ext3_fs.h>
20#include <linux/ext3_jbd.h>
21#include <linux/quotaops.h> 14#include <linux/quotaops.h>
22#include <linux/buffer_head.h>
23#include <linux/blkdev.h> 15#include <linux/blkdev.h>
24#include <trace/events/ext3.h> 16#include "ext3.h"
25 17
26/* 18/*
27 * balloc.c contains the blocks allocation and deallocation routines 19 * balloc.c contains the blocks allocation and deallocation routines
diff --git a/fs/ext3/bitmap.c b/fs/ext3/bitmap.c
index 6afc39d80253..909d13e26560 100644
--- a/fs/ext3/bitmap.c
+++ b/fs/ext3/bitmap.c
@@ -7,9 +7,7 @@
7 * Universite Pierre et Marie Curie (Paris VI) 7 * Universite Pierre et Marie Curie (Paris VI)
8 */ 8 */
9 9
10#include <linux/buffer_head.h> 10#include "ext3.h"
11#include <linux/jbd.h>
12#include <linux/ext3_fs.h>
13 11
14#ifdef EXT3FS_DEBUG 12#ifdef EXT3FS_DEBUG
15 13
diff --git a/fs/ext3/dir.c b/fs/ext3/dir.c
index 34f0a072b935..cc761ad8fa57 100644
--- a/fs/ext3/dir.c
+++ b/fs/ext3/dir.c
@@ -21,12 +21,7 @@
21 * 21 *
22 */ 22 */
23 23
24#include <linux/fs.h> 24#include "ext3.h"
25#include <linux/jbd.h>
26#include <linux/ext3_fs.h>
27#include <linux/buffer_head.h>
28#include <linux/slab.h>
29#include <linux/rbtree.h>
30 25
31static unsigned char ext3_filetype_table[] = { 26static unsigned char ext3_filetype_table[] = {
32 DT_UNKNOWN, DT_REG, DT_DIR, DT_CHR, DT_BLK, DT_FIFO, DT_SOCK, DT_LNK 27 DT_UNKNOWN, DT_REG, DT_DIR, DT_CHR, DT_BLK, DT_FIFO, DT_SOCK, DT_LNK
diff --git a/include/linux/ext3_fs.h b/fs/ext3/ext3.h
index f5a84eef6ed2..b6515fd7e56c 100644
--- a/include/linux/ext3_fs.h
+++ b/fs/ext3/ext3.h
@@ -1,5 +1,11 @@
1/* 1/*
2 * linux/include/linux/ext3_fs.h 2 * Written by Stephen C. Tweedie <sct@redhat.com>, 1999
3 *
4 * Copyright 1998--1999 Red Hat corp --- All Rights Reserved
5 *
6 * This file is part of the Linux kernel and is made available under
7 * the terms of the GNU General Public License, version 2, or at your
8 * option, any later version, incorporated herein by reference.
3 * 9 *
4 * Copyright (C) 1992, 1993, 1994, 1995 10 * Copyright (C) 1992, 1993, 1994, 1995
5 * Remy Card (card@masi.ibp.fr) 11 * Remy Card (card@masi.ibp.fr)
@@ -13,12 +19,11 @@
13 * Copyright (C) 1991, 1992 Linus Torvalds 19 * Copyright (C) 1991, 1992 Linus Torvalds
14 */ 20 */
15 21
16#ifndef _LINUX_EXT3_FS_H 22#include <linux/fs.h>
17#define _LINUX_EXT3_FS_H 23#include <linux/jbd.h>
18
19#include <linux/types.h>
20#include <linux/magic.h> 24#include <linux/magic.h>
21#include <linux/bug.h> 25#include <linux/bug.h>
26#include <linux/blockgroup_lock.h>
22 27
23/* 28/*
24 * The second extended filesystem constants/structures 29 * The second extended filesystem constants/structures
@@ -75,29 +80,12 @@
75#define EXT3_MIN_BLOCK_SIZE 1024 80#define EXT3_MIN_BLOCK_SIZE 1024
76#define EXT3_MAX_BLOCK_SIZE 65536 81#define EXT3_MAX_BLOCK_SIZE 65536
77#define EXT3_MIN_BLOCK_LOG_SIZE 10 82#define EXT3_MIN_BLOCK_LOG_SIZE 10
78#ifdef __KERNEL__ 83#define EXT3_BLOCK_SIZE(s) ((s)->s_blocksize)
79# define EXT3_BLOCK_SIZE(s) ((s)->s_blocksize)
80#else
81# define EXT3_BLOCK_SIZE(s) (EXT3_MIN_BLOCK_SIZE << (s)->s_log_block_size)
82#endif
83#define EXT3_ADDR_PER_BLOCK(s) (EXT3_BLOCK_SIZE(s) / sizeof (__u32)) 84#define EXT3_ADDR_PER_BLOCK(s) (EXT3_BLOCK_SIZE(s) / sizeof (__u32))
84#ifdef __KERNEL__ 85#define EXT3_BLOCK_SIZE_BITS(s) ((s)->s_blocksize_bits)
85# define EXT3_BLOCK_SIZE_BITS(s) ((s)->s_blocksize_bits)
86#else
87# define EXT3_BLOCK_SIZE_BITS(s) ((s)->s_log_block_size + 10)
88#endif
89#ifdef __KERNEL__
90#define EXT3_ADDR_PER_BLOCK_BITS(s) (EXT3_SB(s)->s_addr_per_block_bits) 86#define EXT3_ADDR_PER_BLOCK_BITS(s) (EXT3_SB(s)->s_addr_per_block_bits)
91#define EXT3_INODE_SIZE(s) (EXT3_SB(s)->s_inode_size) 87#define EXT3_INODE_SIZE(s) (EXT3_SB(s)->s_inode_size)
92#define EXT3_FIRST_INO(s) (EXT3_SB(s)->s_first_ino) 88#define EXT3_FIRST_INO(s) (EXT3_SB(s)->s_first_ino)
93#else
94#define EXT3_INODE_SIZE(s) (((s)->s_rev_level == EXT3_GOOD_OLD_REV) ? \
95 EXT3_GOOD_OLD_INODE_SIZE : \
96 (s)->s_inode_size)
97#define EXT3_FIRST_INO(s) (((s)->s_rev_level == EXT3_GOOD_OLD_REV) ? \
98 EXT3_GOOD_OLD_FIRST_INO : \
99 (s)->s_first_ino)
100#endif
101 89
102/* 90/*
103 * Macro-instructions used to manage fragments 91 * Macro-instructions used to manage fragments
@@ -105,13 +93,8 @@
105#define EXT3_MIN_FRAG_SIZE 1024 93#define EXT3_MIN_FRAG_SIZE 1024
106#define EXT3_MAX_FRAG_SIZE 4096 94#define EXT3_MAX_FRAG_SIZE 4096
107#define EXT3_MIN_FRAG_LOG_SIZE 10 95#define EXT3_MIN_FRAG_LOG_SIZE 10
108#ifdef __KERNEL__ 96#define EXT3_FRAG_SIZE(s) (EXT3_SB(s)->s_frag_size)
109# define EXT3_FRAG_SIZE(s) (EXT3_SB(s)->s_frag_size) 97#define EXT3_FRAGS_PER_BLOCK(s) (EXT3_SB(s)->s_frags_per_block)
110# define EXT3_FRAGS_PER_BLOCK(s) (EXT3_SB(s)->s_frags_per_block)
111#else
112# define EXT3_FRAG_SIZE(s) (EXT3_MIN_FRAG_SIZE << (s)->s_log_frag_size)
113# define EXT3_FRAGS_PER_BLOCK(s) (EXT3_BLOCK_SIZE(s) / EXT3_FRAG_SIZE(s))
114#endif
115 98
116/* 99/*
117 * Structure of a blocks group descriptor 100 * Structure of a blocks group descriptor
@@ -131,16 +114,10 @@ struct ext3_group_desc
131/* 114/*
132 * Macro-instructions used to manage group descriptors 115 * Macro-instructions used to manage group descriptors
133 */ 116 */
134#ifdef __KERNEL__ 117#define EXT3_BLOCKS_PER_GROUP(s) (EXT3_SB(s)->s_blocks_per_group)
135# define EXT3_BLOCKS_PER_GROUP(s) (EXT3_SB(s)->s_blocks_per_group) 118#define EXT3_DESC_PER_BLOCK(s) (EXT3_SB(s)->s_desc_per_block)
136# define EXT3_DESC_PER_BLOCK(s) (EXT3_SB(s)->s_desc_per_block) 119#define EXT3_INODES_PER_GROUP(s) (EXT3_SB(s)->s_inodes_per_group)
137# define EXT3_INODES_PER_GROUP(s) (EXT3_SB(s)->s_inodes_per_group) 120#define EXT3_DESC_PER_BLOCK_BITS(s) (EXT3_SB(s)->s_desc_per_block_bits)
138# define EXT3_DESC_PER_BLOCK_BITS(s) (EXT3_SB(s)->s_desc_per_block_bits)
139#else
140# define EXT3_BLOCKS_PER_GROUP(s) ((s)->s_blocks_per_group)
141# define EXT3_DESC_PER_BLOCK(s) (EXT3_BLOCK_SIZE(s) / sizeof (struct ext3_group_desc))
142# define EXT3_INODES_PER_GROUP(s) ((s)->s_inodes_per_group)
143#endif
144 121
145/* 122/*
146 * Constants relative to the data blocks 123 * Constants relative to the data blocks
@@ -336,7 +313,6 @@ struct ext3_inode {
336 313
337#define i_size_high i_dir_acl 314#define i_size_high i_dir_acl
338 315
339#if defined(__KERNEL__) || defined(__linux__)
340#define i_reserved1 osd1.linux1.l_i_reserved1 316#define i_reserved1 osd1.linux1.l_i_reserved1
341#define i_frag osd2.linux2.l_i_frag 317#define i_frag osd2.linux2.l_i_frag
342#define i_fsize osd2.linux2.l_i_fsize 318#define i_fsize osd2.linux2.l_i_fsize
@@ -346,24 +322,6 @@ struct ext3_inode {
346#define i_gid_high osd2.linux2.l_i_gid_high 322#define i_gid_high osd2.linux2.l_i_gid_high
347#define i_reserved2 osd2.linux2.l_i_reserved2 323#define i_reserved2 osd2.linux2.l_i_reserved2
348 324
349#elif defined(__GNU__)
350
351#define i_translator osd1.hurd1.h_i_translator
352#define i_frag osd2.hurd2.h_i_frag;
353#define i_fsize osd2.hurd2.h_i_fsize;
354#define i_uid_high osd2.hurd2.h_i_uid_high
355#define i_gid_high osd2.hurd2.h_i_gid_high
356#define i_author osd2.hurd2.h_i_author
357
358#elif defined(__masix__)
359
360#define i_reserved1 osd1.masix1.m_i_reserved1
361#define i_frag osd2.masix2.m_i_frag
362#define i_fsize osd2.masix2.m_i_fsize
363#define i_reserved2 osd2.masix2.m_i_reserved2
364
365#endif /* defined(__KERNEL__) || defined(__linux__) */
366
367/* 325/*
368 * File system states 326 * File system states
369 */ 327 */
@@ -531,9 +489,197 @@ struct ext3_super_block {
531 __u32 s_reserved[162]; /* Padding to the end of the block */ 489 __u32 s_reserved[162]; /* Padding to the end of the block */
532}; 490};
533 491
534#ifdef __KERNEL__ 492/* data type for block offset of block group */
535#include <linux/ext3_fs_i.h> 493typedef int ext3_grpblk_t;
536#include <linux/ext3_fs_sb.h> 494
495/* data type for filesystem-wide blocks number */
496typedef unsigned long ext3_fsblk_t;
497
498#define E3FSBLK "%lu"
499
500struct ext3_reserve_window {
501 ext3_fsblk_t _rsv_start; /* First byte reserved */
502 ext3_fsblk_t _rsv_end; /* Last byte reserved or 0 */
503};
504
505struct ext3_reserve_window_node {
506 struct rb_node rsv_node;
507 __u32 rsv_goal_size;
508 __u32 rsv_alloc_hit;
509 struct ext3_reserve_window rsv_window;
510};
511
512struct ext3_block_alloc_info {
513 /* information about reservation window */
514 struct ext3_reserve_window_node rsv_window_node;
515 /*
516 * was i_next_alloc_block in ext3_inode_info
517 * is the logical (file-relative) number of the
518 * most-recently-allocated block in this file.
519 * We use this for detecting linearly ascending allocation requests.
520 */
521 __u32 last_alloc_logical_block;
522 /*
523 * Was i_next_alloc_goal in ext3_inode_info
524 * is the *physical* companion to i_next_alloc_block.
525 * it the physical block number of the block which was most-recentl
526 * allocated to this file. This give us the goal (target) for the next
527 * allocation when we detect linearly ascending requests.
528 */
529 ext3_fsblk_t last_alloc_physical_block;
530};
531
532#define rsv_start rsv_window._rsv_start
533#define rsv_end rsv_window._rsv_end
534
535/*
536 * third extended file system inode data in memory
537 */
538struct ext3_inode_info {
539 __le32 i_data[15]; /* unconverted */
540 __u32 i_flags;
541#ifdef EXT3_FRAGMENTS
542 __u32 i_faddr;
543 __u8 i_frag_no;
544 __u8 i_frag_size;
545#endif
546 ext3_fsblk_t i_file_acl;
547 __u32 i_dir_acl;
548 __u32 i_dtime;
549
550 /*
551 * i_block_group is the number of the block group which contains
552 * this file's inode. Constant across the lifetime of the inode,
553 * it is ued for making block allocation decisions - we try to
554 * place a file's data blocks near its inode block, and new inodes
555 * near to their parent directory's inode.
556 */
557 __u32 i_block_group;
558 unsigned long i_state_flags; /* Dynamic state flags for ext3 */
559
560 /* block reservation info */
561 struct ext3_block_alloc_info *i_block_alloc_info;
562
563 __u32 i_dir_start_lookup;
564#ifdef CONFIG_EXT3_FS_XATTR
565 /*
566 * Extended attributes can be read independently of the main file
567 * data. Taking i_mutex even when reading would cause contention
568 * between readers of EAs and writers of regular file data, so
569 * instead we synchronize on xattr_sem when reading or changing
570 * EAs.
571 */
572 struct rw_semaphore xattr_sem;
573#endif
574
575 struct list_head i_orphan; /* unlinked but open inodes */
576
577 /*
578 * i_disksize keeps track of what the inode size is ON DISK, not
579 * in memory. During truncate, i_size is set to the new size by
580 * the VFS prior to calling ext3_truncate(), but the filesystem won't
581 * set i_disksize to 0 until the truncate is actually under way.
582 *
583 * The intent is that i_disksize always represents the blocks which
584 * are used by this file. This allows recovery to restart truncate
585 * on orphans if we crash during truncate. We actually write i_disksize
586 * into the on-disk inode when writing inodes out, instead of i_size.
587 *
588 * The only time when i_disksize and i_size may be different is when
589 * a truncate is in progress. The only things which change i_disksize
590 * are ext3_get_block (growth) and ext3_truncate (shrinkth).
591 */
592 loff_t i_disksize;
593
594 /* on-disk additional length */
595 __u16 i_extra_isize;
596
597 /*
598 * truncate_mutex is for serialising ext3_truncate() against
599 * ext3_getblock(). In the 2.4 ext2 design, great chunks of inode's
600 * data tree are chopped off during truncate. We can't do that in
601 * ext3 because whenever we perform intermediate commits during
602 * truncate, the inode and all the metadata blocks *must* be in a
603 * consistent state which allows truncation of the orphans to restart
604 * during recovery. Hence we must fix the get_block-vs-truncate race
605 * by other means, so we have truncate_mutex.
606 */
607 struct mutex truncate_mutex;
608
609 /*
610 * Transactions that contain inode's metadata needed to complete
611 * fsync and fdatasync, respectively.
612 */
613 atomic_t i_sync_tid;
614 atomic_t i_datasync_tid;
615
616 struct inode vfs_inode;
617};
618
619/*
620 * third extended-fs super-block data in memory
621 */
622struct ext3_sb_info {
623 unsigned long s_frag_size; /* Size of a fragment in bytes */
624 unsigned long s_frags_per_block;/* Number of fragments per block */
625 unsigned long s_inodes_per_block;/* Number of inodes per block */
626 unsigned long s_frags_per_group;/* Number of fragments in a group */
627 unsigned long s_blocks_per_group;/* Number of blocks in a group */
628 unsigned long s_inodes_per_group;/* Number of inodes in a group */
629 unsigned long s_itb_per_group; /* Number of inode table blocks per group */
630 unsigned long s_gdb_count; /* Number of group descriptor blocks */
631 unsigned long s_desc_per_block; /* Number of group descriptors per block */
632 unsigned long s_groups_count; /* Number of groups in the fs */
633 unsigned long s_overhead_last; /* Last calculated overhead */
634 unsigned long s_blocks_last; /* Last seen block count */
635 struct buffer_head * s_sbh; /* Buffer containing the super block */
636 struct ext3_super_block * s_es; /* Pointer to the super block in the buffer */
637 struct buffer_head ** s_group_desc;
638 unsigned long s_mount_opt;
639 ext3_fsblk_t s_sb_block;
640 uid_t s_resuid;
641 gid_t s_resgid;
642 unsigned short s_mount_state;
643 unsigned short s_pad;
644 int s_addr_per_block_bits;
645 int s_desc_per_block_bits;
646 int s_inode_size;
647 int s_first_ino;
648 spinlock_t s_next_gen_lock;
649 u32 s_next_generation;
650 u32 s_hash_seed[4];
651 int s_def_hash_version;
652 int s_hash_unsigned; /* 3 if hash should be signed, 0 if not */
653 struct percpu_counter s_freeblocks_counter;
654 struct percpu_counter s_freeinodes_counter;
655 struct percpu_counter s_dirs_counter;
656 struct blockgroup_lock *s_blockgroup_lock;
657
658 /* root of the per fs reservation window tree */
659 spinlock_t s_rsv_window_lock;
660 struct rb_root s_rsv_window_root;
661 struct ext3_reserve_window_node s_rsv_window_head;
662
663 /* Journaling */
664 struct inode * s_journal_inode;
665 struct journal_s * s_journal;
666 struct list_head s_orphan;
667 struct mutex s_orphan_lock;
668 struct mutex s_resize_lock;
669 unsigned long s_commit_interval;
670 struct block_device *journal_bdev;
671#ifdef CONFIG_QUOTA
672 char *s_qf_names[MAXQUOTAS]; /* Names of quota files with journalled quota */
673 int s_jquota_fmt; /* Format of quota to use */
674#endif
675};
676
677static inline spinlock_t *
678sb_bgl_lock(struct ext3_sb_info *sbi, unsigned int block_group)
679{
680 return bgl_lock_ptr(sbi->s_blockgroup_lock, block_group);
681}
682
537static inline struct ext3_sb_info * EXT3_SB(struct super_block *sb) 683static inline struct ext3_sb_info * EXT3_SB(struct super_block *sb)
538{ 684{
539 return sb->s_fs_info; 685 return sb->s_fs_info;
@@ -576,12 +722,6 @@ static inline void ext3_clear_inode_state(struct inode *inode, int bit)
576{ 722{
577 clear_bit(bit, &EXT3_I(inode)->i_state_flags); 723 clear_bit(bit, &EXT3_I(inode)->i_state_flags);
578} 724}
579#else
580/* Assume that user mode programs are passing in an ext3fs superblock, not
581 * a kernel struct super_block. This will allow us to call the feature-test
582 * macros from user land. */
583#define EXT3_SB(sb) (sb)
584#endif
585 725
586#define NEXT_ORPHAN(inode) EXT3_I(inode)->i_dtime 726#define NEXT_ORPHAN(inode) EXT3_I(inode)->i_dtime
587 727
@@ -771,8 +911,6 @@ static inline __le16 ext3_rec_len_to_disk(unsigned len)
771#define DX_HASH_HALF_MD4_UNSIGNED 4 911#define DX_HASH_HALF_MD4_UNSIGNED 4
772#define DX_HASH_TEA_UNSIGNED 5 912#define DX_HASH_TEA_UNSIGNED 5
773 913
774#ifdef __KERNEL__
775
776/* hash info structure used by the directory hash */ 914/* hash info structure used by the directory hash */
777struct dx_hash_info 915struct dx_hash_info
778{ 916{
@@ -974,7 +1112,211 @@ extern const struct inode_operations ext3_special_inode_operations;
974extern const struct inode_operations ext3_symlink_inode_operations; 1112extern const struct inode_operations ext3_symlink_inode_operations;
975extern const struct inode_operations ext3_fast_symlink_inode_operations; 1113extern const struct inode_operations ext3_fast_symlink_inode_operations;
976 1114
1115#define EXT3_JOURNAL(inode) (EXT3_SB((inode)->i_sb)->s_journal)
1116
1117/* Define the number of blocks we need to account to a transaction to
1118 * modify one block of data.
1119 *
1120 * We may have to touch one inode, one bitmap buffer, up to three
1121 * indirection blocks, the group and superblock summaries, and the data
1122 * block to complete the transaction. */
1123
1124#define EXT3_SINGLEDATA_TRANS_BLOCKS 8U
1125
1126/* Extended attribute operations touch at most two data buffers,
1127 * two bitmap buffers, and two group summaries, in addition to the inode
1128 * and the superblock, which are already accounted for. */
1129
1130#define EXT3_XATTR_TRANS_BLOCKS 6U
1131
1132/* Define the minimum size for a transaction which modifies data. This
1133 * needs to take into account the fact that we may end up modifying two
1134 * quota files too (one for the group, one for the user quota). The
1135 * superblock only gets updated once, of course, so don't bother
1136 * counting that again for the quota updates. */
1137
1138#define EXT3_DATA_TRANS_BLOCKS(sb) (EXT3_SINGLEDATA_TRANS_BLOCKS + \
1139 EXT3_XATTR_TRANS_BLOCKS - 2 + \
1140 EXT3_MAXQUOTAS_TRANS_BLOCKS(sb))
1141
1142/* Delete operations potentially hit one directory's namespace plus an
1143 * entire inode, plus arbitrary amounts of bitmap/indirection data. Be
1144 * generous. We can grow the delete transaction later if necessary. */
1145
1146#define EXT3_DELETE_TRANS_BLOCKS(sb) (EXT3_MAXQUOTAS_TRANS_BLOCKS(sb) + 64)
1147
1148/* Define an arbitrary limit for the amount of data we will anticipate
1149 * writing to any given transaction. For unbounded transactions such as
1150 * write(2) and truncate(2) we can write more than this, but we always
1151 * start off at the maximum transaction size and grow the transaction
1152 * optimistically as we go. */
1153
1154#define EXT3_MAX_TRANS_DATA 64U
1155
1156/* We break up a large truncate or write transaction once the handle's
1157 * buffer credits gets this low, we need either to extend the
1158 * transaction or to start a new one. Reserve enough space here for
1159 * inode, bitmap, superblock, group and indirection updates for at least
1160 * one block, plus two quota updates. Quota allocations are not
1161 * needed. */
1162
1163#define EXT3_RESERVE_TRANS_BLOCKS 12U
1164
1165#define EXT3_INDEX_EXTRA_TRANS_BLOCKS 8
1166
1167#ifdef CONFIG_QUOTA
1168/* Amount of blocks needed for quota update - we know that the structure was
1169 * allocated so we need to update only inode+data */
1170#define EXT3_QUOTA_TRANS_BLOCKS(sb) (test_opt(sb, QUOTA) ? 2 : 0)
1171/* Amount of blocks needed for quota insert/delete - we do some block writes
1172 * but inode, sb and group updates are done only once */
1173#define EXT3_QUOTA_INIT_BLOCKS(sb) (test_opt(sb, QUOTA) ? (DQUOT_INIT_ALLOC*\
1174 (EXT3_SINGLEDATA_TRANS_BLOCKS-3)+3+DQUOT_INIT_REWRITE) : 0)
1175#define EXT3_QUOTA_DEL_BLOCKS(sb) (test_opt(sb, QUOTA) ? (DQUOT_DEL_ALLOC*\
1176 (EXT3_SINGLEDATA_TRANS_BLOCKS-3)+3+DQUOT_DEL_REWRITE) : 0)
1177#else
1178#define EXT3_QUOTA_TRANS_BLOCKS(sb) 0
1179#define EXT3_QUOTA_INIT_BLOCKS(sb) 0
1180#define EXT3_QUOTA_DEL_BLOCKS(sb) 0
1181#endif
1182#define EXT3_MAXQUOTAS_TRANS_BLOCKS(sb) (MAXQUOTAS*EXT3_QUOTA_TRANS_BLOCKS(sb))
1183#define EXT3_MAXQUOTAS_INIT_BLOCKS(sb) (MAXQUOTAS*EXT3_QUOTA_INIT_BLOCKS(sb))
1184#define EXT3_MAXQUOTAS_DEL_BLOCKS(sb) (MAXQUOTAS*EXT3_QUOTA_DEL_BLOCKS(sb))
1185
1186int
1187ext3_mark_iloc_dirty(handle_t *handle,
1188 struct inode *inode,
1189 struct ext3_iloc *iloc);
1190
1191/*
1192 * On success, We end up with an outstanding reference count against
1193 * iloc->bh. This _must_ be cleaned up later.
1194 */
1195
1196int ext3_reserve_inode_write(handle_t *handle, struct inode *inode,
1197 struct ext3_iloc *iloc);
1198
1199int ext3_mark_inode_dirty(handle_t *handle, struct inode *inode);
1200
1201/*
1202 * Wrapper functions with which ext3 calls into JBD. The intent here is
1203 * to allow these to be turned into appropriate stubs so ext3 can control
1204 * ext2 filesystems, so ext2+ext3 systems only nee one fs. This work hasn't
1205 * been done yet.
1206 */
1207
1208static inline void ext3_journal_release_buffer(handle_t *handle,
1209 struct buffer_head *bh)
1210{
1211 journal_release_buffer(handle, bh);
1212}
1213
1214void ext3_journal_abort_handle(const char *caller, const char *err_fn,
1215 struct buffer_head *bh, handle_t *handle, int err);
1216
1217int __ext3_journal_get_undo_access(const char *where, handle_t *handle,
1218 struct buffer_head *bh);
1219
1220int __ext3_journal_get_write_access(const char *where, handle_t *handle,
1221 struct buffer_head *bh);
1222
1223int __ext3_journal_forget(const char *where, handle_t *handle,
1224 struct buffer_head *bh);
977 1225
978#endif /* __KERNEL__ */ 1226int __ext3_journal_revoke(const char *where, handle_t *handle,
1227 unsigned long blocknr, struct buffer_head *bh);
1228
1229int __ext3_journal_get_create_access(const char *where,
1230 handle_t *handle, struct buffer_head *bh);
1231
1232int __ext3_journal_dirty_metadata(const char *where,
1233 handle_t *handle, struct buffer_head *bh);
1234
1235#define ext3_journal_get_undo_access(handle, bh) \
1236 __ext3_journal_get_undo_access(__func__, (handle), (bh))
1237#define ext3_journal_get_write_access(handle, bh) \
1238 __ext3_journal_get_write_access(__func__, (handle), (bh))
1239#define ext3_journal_revoke(handle, blocknr, bh) \
1240 __ext3_journal_revoke(__func__, (handle), (blocknr), (bh))
1241#define ext3_journal_get_create_access(handle, bh) \
1242 __ext3_journal_get_create_access(__func__, (handle), (bh))
1243#define ext3_journal_dirty_metadata(handle, bh) \
1244 __ext3_journal_dirty_metadata(__func__, (handle), (bh))
1245#define ext3_journal_forget(handle, bh) \
1246 __ext3_journal_forget(__func__, (handle), (bh))
1247
1248int ext3_journal_dirty_data(handle_t *handle, struct buffer_head *bh);
1249
1250handle_t *ext3_journal_start_sb(struct super_block *sb, int nblocks);
1251int __ext3_journal_stop(const char *where, handle_t *handle);
1252
1253static inline handle_t *ext3_journal_start(struct inode *inode, int nblocks)
1254{
1255 return ext3_journal_start_sb(inode->i_sb, nblocks);
1256}
1257
1258#define ext3_journal_stop(handle) \
1259 __ext3_journal_stop(__func__, (handle))
1260
1261static inline handle_t *ext3_journal_current_handle(void)
1262{
1263 return journal_current_handle();
1264}
1265
1266static inline int ext3_journal_extend(handle_t *handle, int nblocks)
1267{
1268 return journal_extend(handle, nblocks);
1269}
1270
1271static inline int ext3_journal_restart(handle_t *handle, int nblocks)
1272{
1273 return journal_restart(handle, nblocks);
1274}
1275
1276static inline int ext3_journal_blocks_per_page(struct inode *inode)
1277{
1278 return journal_blocks_per_page(inode);
1279}
1280
1281static inline int ext3_journal_force_commit(journal_t *journal)
1282{
1283 return journal_force_commit(journal);
1284}
1285
1286/* super.c */
1287int ext3_force_commit(struct super_block *sb);
1288
1289static inline int ext3_should_journal_data(struct inode *inode)
1290{
1291 if (!S_ISREG(inode->i_mode))
1292 return 1;
1293 if (test_opt(inode->i_sb, DATA_FLAGS) == EXT3_MOUNT_JOURNAL_DATA)
1294 return 1;
1295 if (EXT3_I(inode)->i_flags & EXT3_JOURNAL_DATA_FL)
1296 return 1;
1297 return 0;
1298}
1299
1300static inline int ext3_should_order_data(struct inode *inode)
1301{
1302 if (!S_ISREG(inode->i_mode))
1303 return 0;
1304 if (EXT3_I(inode)->i_flags & EXT3_JOURNAL_DATA_FL)
1305 return 0;
1306 if (test_opt(inode->i_sb, DATA_FLAGS) == EXT3_MOUNT_ORDERED_DATA)
1307 return 1;
1308 return 0;
1309}
1310
1311static inline int ext3_should_writeback_data(struct inode *inode)
1312{
1313 if (!S_ISREG(inode->i_mode))
1314 return 0;
1315 if (EXT3_I(inode)->i_flags & EXT3_JOURNAL_DATA_FL)
1316 return 0;
1317 if (test_opt(inode->i_sb, DATA_FLAGS) == EXT3_MOUNT_WRITEBACK_DATA)
1318 return 1;
1319 return 0;
1320}
979 1321
980#endif /* _LINUX_EXT3_FS_H */ 1322#include <trace/events/ext3.h>
diff --git a/fs/ext3/ext3_jbd.c b/fs/ext3/ext3_jbd.c
index d401f148d74d..785a3261a26c 100644
--- a/fs/ext3/ext3_jbd.c
+++ b/fs/ext3/ext3_jbd.c
@@ -2,7 +2,7 @@
2 * Interface between ext3 and JBD 2 * Interface between ext3 and JBD
3 */ 3 */
4 4
5#include <linux/ext3_jbd.h> 5#include "ext3.h"
6 6
7int __ext3_journal_get_undo_access(const char *where, handle_t *handle, 7int __ext3_journal_get_undo_access(const char *where, handle_t *handle,
8 struct buffer_head *bh) 8 struct buffer_head *bh)
diff --git a/fs/ext3/file.c b/fs/ext3/file.c
index 724df69847dc..25cb413277e9 100644
--- a/fs/ext3/file.c
+++ b/fs/ext3/file.c
@@ -18,12 +18,8 @@
18 * (jj@sunsite.ms.mff.cuni.cz) 18 * (jj@sunsite.ms.mff.cuni.cz)
19 */ 19 */
20 20
21#include <linux/time.h>
22#include <linux/fs.h>
23#include <linux/jbd.h>
24#include <linux/quotaops.h> 21#include <linux/quotaops.h>
25#include <linux/ext3_fs.h> 22#include "ext3.h"
26#include <linux/ext3_jbd.h>
27#include "xattr.h" 23#include "xattr.h"
28#include "acl.h" 24#include "acl.h"
29 25
diff --git a/fs/ext3/fsync.c b/fs/ext3/fsync.c
index 1860ed356323..d4dff278cbd8 100644
--- a/fs/ext3/fsync.c
+++ b/fs/ext3/fsync.c
@@ -22,15 +22,9 @@
22 * we can depend on generic_block_fdatasync() to sync the data blocks. 22 * we can depend on generic_block_fdatasync() to sync the data blocks.
23 */ 23 */
24 24
25#include <linux/time.h>
26#include <linux/blkdev.h> 25#include <linux/blkdev.h>
27#include <linux/fs.h>
28#include <linux/sched.h>
29#include <linux/writeback.h> 26#include <linux/writeback.h>
30#include <linux/jbd.h> 27#include "ext3.h"
31#include <linux/ext3_fs.h>
32#include <linux/ext3_jbd.h>
33#include <trace/events/ext3.h>
34 28
35/* 29/*
36 * akpm: A new design for ext3_sync_file(). 30 * akpm: A new design for ext3_sync_file().
diff --git a/fs/ext3/hash.c b/fs/ext3/hash.c
index 7d215b4d4f2e..d10231ddcf8a 100644
--- a/fs/ext3/hash.c
+++ b/fs/ext3/hash.c
@@ -9,9 +9,7 @@
9 * License. 9 * License.
10 */ 10 */
11 11
12#include <linux/fs.h> 12#include "ext3.h"
13#include <linux/jbd.h>
14#include <linux/ext3_fs.h>
15#include <linux/cryptohash.h> 13#include <linux/cryptohash.h>
16 14
17#define DELTA 0x9E3779B9 15#define DELTA 0x9E3779B9
diff --git a/fs/ext3/ialloc.c b/fs/ext3/ialloc.c
index 1cde28438014..e3c39e4cec19 100644
--- a/fs/ext3/ialloc.c
+++ b/fs/ext3/ialloc.c
@@ -12,21 +12,10 @@
12 * David S. Miller (davem@caip.rutgers.edu), 1995 12 * David S. Miller (davem@caip.rutgers.edu), 1995
13 */ 13 */
14 14
15#include <linux/time.h>
16#include <linux/fs.h>
17#include <linux/jbd.h>
18#include <linux/ext3_fs.h>
19#include <linux/ext3_jbd.h>
20#include <linux/stat.h>
21#include <linux/string.h>
22#include <linux/quotaops.h> 15#include <linux/quotaops.h>
23#include <linux/buffer_head.h>
24#include <linux/random.h> 16#include <linux/random.h>
25#include <linux/bitops.h>
26#include <trace/events/ext3.h>
27
28#include <asm/byteorder.h>
29 17
18#include "ext3.h"
30#include "xattr.h" 19#include "xattr.h"
31#include "acl.h" 20#include "acl.h"
32 21
diff --git a/fs/ext3/inode.c b/fs/ext3/inode.c
index 6d3418662b54..10d7812f6021 100644
--- a/fs/ext3/inode.c
+++ b/fs/ext3/inode.c
@@ -22,22 +22,12 @@
22 * Assorted race fixes, rewrite of ext3_get_block() by Al Viro, 2000 22 * Assorted race fixes, rewrite of ext3_get_block() by Al Viro, 2000
23 */ 23 */
24 24
25#include <linux/fs.h>
26#include <linux/time.h>
27#include <linux/ext3_jbd.h>
28#include <linux/jbd.h>
29#include <linux/highuid.h> 25#include <linux/highuid.h>
30#include <linux/pagemap.h>
31#include <linux/quotaops.h> 26#include <linux/quotaops.h>
32#include <linux/string.h>
33#include <linux/buffer_head.h>
34#include <linux/writeback.h> 27#include <linux/writeback.h>
35#include <linux/mpage.h> 28#include <linux/mpage.h>
36#include <linux/uio.h>
37#include <linux/bio.h>
38#include <linux/fiemap.h>
39#include <linux/namei.h> 29#include <linux/namei.h>
40#include <trace/events/ext3.h> 30#include "ext3.h"
41#include "xattr.h" 31#include "xattr.h"
42#include "acl.h" 32#include "acl.h"
43 33
diff --git a/fs/ext3/ioctl.c b/fs/ext3/ioctl.c
index 4af574ce4a46..677a5c27dc69 100644
--- a/fs/ext3/ioctl.c
+++ b/fs/ext3/ioctl.c
@@ -7,15 +7,10 @@
7 * Universite Pierre et Marie Curie (Paris VI) 7 * Universite Pierre et Marie Curie (Paris VI)
8 */ 8 */
9 9
10#include <linux/fs.h>
11#include <linux/jbd.h>
12#include <linux/capability.h>
13#include <linux/ext3_fs.h>
14#include <linux/ext3_jbd.h>
15#include <linux/mount.h> 10#include <linux/mount.h>
16#include <linux/time.h>
17#include <linux/compat.h> 11#include <linux/compat.h>
18#include <asm/uaccess.h> 12#include <asm/uaccess.h>
13#include "ext3.h"
19 14
20long ext3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) 15long ext3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
21{ 16{
diff --git a/fs/ext3/namei.c b/fs/ext3/namei.c
index e8e211795e9f..d7940b24cf68 100644
--- a/fs/ext3/namei.c
+++ b/fs/ext3/namei.c
@@ -24,20 +24,8 @@
24 * Theodore Ts'o, 2002 24 * Theodore Ts'o, 2002
25 */ 25 */
26 26
27#include <linux/fs.h>
28#include <linux/pagemap.h>
29#include <linux/jbd.h>
30#include <linux/time.h>
31#include <linux/ext3_fs.h>
32#include <linux/ext3_jbd.h>
33#include <linux/fcntl.h>
34#include <linux/stat.h>
35#include <linux/string.h>
36#include <linux/quotaops.h> 27#include <linux/quotaops.h>
37#include <linux/buffer_head.h> 28#include "ext3.h"
38#include <linux/bio.h>
39#include <trace/events/ext3.h>
40
41#include "namei.h" 29#include "namei.h"
42#include "xattr.h" 30#include "xattr.h"
43#include "acl.h" 31#include "acl.h"
diff --git a/fs/ext3/resize.c b/fs/ext3/resize.c
index 7916e4ce166a..0f814f3450de 100644
--- a/fs/ext3/resize.c
+++ b/fs/ext3/resize.c
@@ -11,10 +11,7 @@
11 11
12#define EXT3FS_DEBUG 12#define EXT3FS_DEBUG
13 13
14#include <linux/ext3_jbd.h> 14#include "ext3.h"
15
16#include <linux/errno.h>
17#include <linux/slab.h>
18 15
19 16
20#define outside(b, first, last) ((b) < (first) || (b) >= (last)) 17#define outside(b, first, last) ((b) < (first) || (b) >= (last))
diff --git a/fs/ext3/super.c b/fs/ext3/super.c
index e0b45b93327b..cf0b5921cf0f 100644
--- a/fs/ext3/super.c
+++ b/fs/ext3/super.c
@@ -17,22 +17,12 @@
17 */ 17 */
18 18
19#include <linux/module.h> 19#include <linux/module.h>
20#include <linux/string.h>
21#include <linux/fs.h>
22#include <linux/time.h>
23#include <linux/jbd.h>
24#include <linux/ext3_fs.h>
25#include <linux/ext3_jbd.h>
26#include <linux/slab.h>
27#include <linux/init.h>
28#include <linux/blkdev.h> 20#include <linux/blkdev.h>
29#include <linux/parser.h> 21#include <linux/parser.h>
30#include <linux/buffer_head.h>
31#include <linux/exportfs.h> 22#include <linux/exportfs.h>
32#include <linux/vfs.h> 23#include <linux/statfs.h>
33#include <linux/random.h> 24#include <linux/random.h>
34#include <linux/mount.h> 25#include <linux/mount.h>
35#include <linux/namei.h>
36#include <linux/quotaops.h> 26#include <linux/quotaops.h>
37#include <linux/seq_file.h> 27#include <linux/seq_file.h>
38#include <linux/log2.h> 28#include <linux/log2.h>
@@ -40,13 +30,13 @@
40 30
41#include <asm/uaccess.h> 31#include <asm/uaccess.h>
42 32
33#define CREATE_TRACE_POINTS
34
35#include "ext3.h"
43#include "xattr.h" 36#include "xattr.h"
44#include "acl.h" 37#include "acl.h"
45#include "namei.h" 38#include "namei.h"
46 39
47#define CREATE_TRACE_POINTS
48#include <trace/events/ext3.h>
49
50#ifdef CONFIG_EXT3_DEFAULTS_TO_ORDERED 40#ifdef CONFIG_EXT3_DEFAULTS_TO_ORDERED
51 #define EXT3_MOUNT_DEFAULT_DATA_MODE EXT3_MOUNT_ORDERED_DATA 41 #define EXT3_MOUNT_DEFAULT_DATA_MODE EXT3_MOUNT_ORDERED_DATA
52#else 42#else
diff --git a/fs/ext3/symlink.c b/fs/ext3/symlink.c
index 7c4898207776..6b01c3eab1f3 100644
--- a/fs/ext3/symlink.c
+++ b/fs/ext3/symlink.c
@@ -17,10 +17,8 @@
17 * ext3 symlink handling code 17 * ext3 symlink handling code
18 */ 18 */
19 19
20#include <linux/fs.h>
21#include <linux/jbd.h>
22#include <linux/ext3_fs.h>
23#include <linux/namei.h> 20#include <linux/namei.h>
21#include "ext3.h"
24#include "xattr.h" 22#include "xattr.h"
25 23
26static void * ext3_follow_link(struct dentry *dentry, struct nameidata *nd) 24static void * ext3_follow_link(struct dentry *dentry, struct nameidata *nd)
diff --git a/fs/ext3/xattr.c b/fs/ext3/xattr.c
index d565759d82ee..d22ebb7a4f55 100644
--- a/fs/ext3/xattr.c
+++ b/fs/ext3/xattr.c
@@ -50,14 +50,9 @@
50 * by the buffer lock. 50 * by the buffer lock.
51 */ 51 */
52 52
53#include <linux/init.h> 53#include "ext3.h"
54#include <linux/fs.h>
55#include <linux/slab.h>
56#include <linux/ext3_jbd.h>
57#include <linux/ext3_fs.h>
58#include <linux/mbcache.h> 54#include <linux/mbcache.h>
59#include <linux/quotaops.h> 55#include <linux/quotaops.h>
60#include <linux/rwsem.h>
61#include "xattr.h" 56#include "xattr.h"
62#include "acl.h" 57#include "acl.h"
63 58
diff --git a/fs/ext3/xattr_security.c b/fs/ext3/xattr_security.c
index ea26f2acab94..3387664ad70e 100644
--- a/fs/ext3/xattr_security.c
+++ b/fs/ext3/xattr_security.c
@@ -3,12 +3,8 @@
3 * Handler for storing security labels as extended attributes. 3 * Handler for storing security labels as extended attributes.
4 */ 4 */
5 5
6#include <linux/slab.h>
7#include <linux/string.h>
8#include <linux/fs.h>
9#include <linux/ext3_jbd.h>
10#include <linux/ext3_fs.h>
11#include <linux/security.h> 6#include <linux/security.h>
7#include "ext3.h"
12#include "xattr.h" 8#include "xattr.h"
13 9
14static size_t 10static size_t
diff --git a/fs/ext3/xattr_trusted.c b/fs/ext3/xattr_trusted.c
index 2526a8829de8..d75727cc67fa 100644
--- a/fs/ext3/xattr_trusted.c
+++ b/fs/ext3/xattr_trusted.c
@@ -5,11 +5,7 @@
5 * Copyright (C) 2003 by Andreas Gruenbacher, <a.gruenbacher@computer.org> 5 * Copyright (C) 2003 by Andreas Gruenbacher, <a.gruenbacher@computer.org>
6 */ 6 */
7 7
8#include <linux/string.h> 8#include "ext3.h"
9#include <linux/capability.h>
10#include <linux/fs.h>
11#include <linux/ext3_jbd.h>
12#include <linux/ext3_fs.h>
13#include "xattr.h" 9#include "xattr.h"
14 10
15static size_t 11static size_t
diff --git a/fs/ext3/xattr_user.c b/fs/ext3/xattr_user.c
index b32e473a1e33..5612af3567e0 100644
--- a/fs/ext3/xattr_user.c
+++ b/fs/ext3/xattr_user.c
@@ -5,10 +5,7 @@
5 * Copyright (C) 2001 by Andreas Gruenbacher, <a.gruenbacher@computer.org> 5 * Copyright (C) 2001 by Andreas Gruenbacher, <a.gruenbacher@computer.org>
6 */ 6 */
7 7
8#include <linux/string.h> 8#include "ext3.h"
9#include <linux/fs.h>
10#include <linux/ext3_jbd.h>
11#include <linux/ext3_fs.h>
12#include "xattr.h" 9#include "xattr.h"
13 10
14static size_t 11static size_t
diff --git a/fs/ext4/dir.c b/fs/ext4/dir.c
index ad56866d729a..b86786202643 100644
--- a/fs/ext4/dir.c
+++ b/fs/ext4/dir.c
@@ -32,24 +32,8 @@ static unsigned char ext4_filetype_table[] = {
32 DT_UNKNOWN, DT_REG, DT_DIR, DT_CHR, DT_BLK, DT_FIFO, DT_SOCK, DT_LNK 32 DT_UNKNOWN, DT_REG, DT_DIR, DT_CHR, DT_BLK, DT_FIFO, DT_SOCK, DT_LNK
33}; 33};
34 34
35static int ext4_readdir(struct file *, void *, filldir_t);
36static int ext4_dx_readdir(struct file *filp, 35static int ext4_dx_readdir(struct file *filp,
37 void *dirent, filldir_t filldir); 36 void *dirent, filldir_t filldir);
38static int ext4_release_dir(struct inode *inode,
39 struct file *filp);
40
41const struct file_operations ext4_dir_operations = {
42 .llseek = ext4_llseek,
43 .read = generic_read_dir,
44 .readdir = ext4_readdir, /* we take BKL. needed?*/
45 .unlocked_ioctl = ext4_ioctl,
46#ifdef CONFIG_COMPAT
47 .compat_ioctl = ext4_compat_ioctl,
48#endif
49 .fsync = ext4_sync_file,
50 .release = ext4_release_dir,
51};
52
53 37
54static unsigned char get_dtype(struct super_block *sb, int filetype) 38static unsigned char get_dtype(struct super_block *sb, int filetype)
55{ 39{
@@ -60,6 +44,26 @@ static unsigned char get_dtype(struct super_block *sb, int filetype)
60 return (ext4_filetype_table[filetype]); 44 return (ext4_filetype_table[filetype]);
61} 45}
62 46
47/**
48 * Check if the given dir-inode refers to an htree-indexed directory
49 * (or a directory which chould potentially get coverted to use htree
50 * indexing).
51 *
52 * Return 1 if it is a dx dir, 0 if not
53 */
54static int is_dx_dir(struct inode *inode)
55{
56 struct super_block *sb = inode->i_sb;
57
58 if (EXT4_HAS_COMPAT_FEATURE(inode->i_sb,
59 EXT4_FEATURE_COMPAT_DIR_INDEX) &&
60 ((ext4_test_inode_flag(inode, EXT4_INODE_INDEX)) ||
61 ((inode->i_size >> sb->s_blocksize_bits) == 1)))
62 return 1;
63
64 return 0;
65}
66
63/* 67/*
64 * Return 0 if the directory entry is OK, and 1 if there is a problem 68 * Return 0 if the directory entry is OK, and 1 if there is a problem
65 * 69 *
@@ -115,18 +119,13 @@ static int ext4_readdir(struct file *filp,
115 unsigned int offset; 119 unsigned int offset;
116 int i, stored; 120 int i, stored;
117 struct ext4_dir_entry_2 *de; 121 struct ext4_dir_entry_2 *de;
118 struct super_block *sb;
119 int err; 122 int err;
120 struct inode *inode = filp->f_path.dentry->d_inode; 123 struct inode *inode = filp->f_path.dentry->d_inode;
124 struct super_block *sb = inode->i_sb;
121 int ret = 0; 125 int ret = 0;
122 int dir_has_error = 0; 126 int dir_has_error = 0;
123 127
124 sb = inode->i_sb; 128 if (is_dx_dir(inode)) {
125
126 if (EXT4_HAS_COMPAT_FEATURE(inode->i_sb,
127 EXT4_FEATURE_COMPAT_DIR_INDEX) &&
128 ((ext4_test_inode_flag(inode, EXT4_INODE_INDEX)) ||
129 ((inode->i_size >> sb->s_blocksize_bits) == 1))) {
130 err = ext4_dx_readdir(filp, dirent, filldir); 129 err = ext4_dx_readdir(filp, dirent, filldir);
131 if (err != ERR_BAD_DX_DIR) { 130 if (err != ERR_BAD_DX_DIR) {
132 ret = err; 131 ret = err;
@@ -254,22 +253,134 @@ out:
254 return ret; 253 return ret;
255} 254}
256 255
256static inline int is_32bit_api(void)
257{
258#ifdef CONFIG_COMPAT
259 return is_compat_task();
260#else
261 return (BITS_PER_LONG == 32);
262#endif
263}
264
257/* 265/*
258 * These functions convert from the major/minor hash to an f_pos 266 * These functions convert from the major/minor hash to an f_pos
259 * value. 267 * value for dx directories
268 *
269 * Upper layer (for example NFS) should specify FMODE_32BITHASH or
270 * FMODE_64BITHASH explicitly. On the other hand, we allow ext4 to be mounted
271 * directly on both 32-bit and 64-bit nodes, under such case, neither
272 * FMODE_32BITHASH nor FMODE_64BITHASH is specified.
273 */
274static inline loff_t hash2pos(struct file *filp, __u32 major, __u32 minor)
275{
276 if ((filp->f_mode & FMODE_32BITHASH) ||
277 (!(filp->f_mode & FMODE_64BITHASH) && is_32bit_api()))
278 return major >> 1;
279 else
280 return ((__u64)(major >> 1) << 32) | (__u64)minor;
281}
282
283static inline __u32 pos2maj_hash(struct file *filp, loff_t pos)
284{
285 if ((filp->f_mode & FMODE_32BITHASH) ||
286 (!(filp->f_mode & FMODE_64BITHASH) && is_32bit_api()))
287 return (pos << 1) & 0xffffffff;
288 else
289 return ((pos >> 32) << 1) & 0xffffffff;
290}
291
292static inline __u32 pos2min_hash(struct file *filp, loff_t pos)
293{
294 if ((filp->f_mode & FMODE_32BITHASH) ||
295 (!(filp->f_mode & FMODE_64BITHASH) && is_32bit_api()))
296 return 0;
297 else
298 return pos & 0xffffffff;
299}
300
301/*
302 * Return 32- or 64-bit end-of-file for dx directories
303 */
304static inline loff_t ext4_get_htree_eof(struct file *filp)
305{
306 if ((filp->f_mode & FMODE_32BITHASH) ||
307 (!(filp->f_mode & FMODE_64BITHASH) && is_32bit_api()))
308 return EXT4_HTREE_EOF_32BIT;
309 else
310 return EXT4_HTREE_EOF_64BIT;
311}
312
313
314/*
315 * ext4_dir_llseek() based on generic_file_llseek() to handle both
316 * non-htree and htree directories, where the "offset" is in terms
317 * of the filename hash value instead of the byte offset.
260 * 318 *
261 * Currently we only use major hash numer. This is unfortunate, but 319 * NOTE: offsets obtained *before* ext4_set_inode_flag(dir, EXT4_INODE_INDEX)
262 * on 32-bit machines, the same VFS interface is used for lseek and 320 * will be invalid once the directory was converted into a dx directory
263 * llseek, so if we use the 64 bit offset, then the 32-bit versions of
264 * lseek/telldir/seekdir will blow out spectacularly, and from within
265 * the ext2 low-level routine, we don't know if we're being called by
266 * a 64-bit version of the system call or the 32-bit version of the
267 * system call. Worse yet, NFSv2 only allows for a 32-bit readdir
268 * cookie. Sigh.
269 */ 321 */
270#define hash2pos(major, minor) (major >> 1) 322loff_t ext4_dir_llseek(struct file *file, loff_t offset, int origin)
271#define pos2maj_hash(pos) ((pos << 1) & 0xffffffff) 323{
272#define pos2min_hash(pos) (0) 324 struct inode *inode = file->f_mapping->host;
325 loff_t ret = -EINVAL;
326 int dx_dir = is_dx_dir(inode);
327
328 mutex_lock(&inode->i_mutex);
329
330 /* NOTE: relative offsets with dx directories might not work
331 * as expected, as it is difficult to figure out the
332 * correct offset between dx hashes */
333
334 switch (origin) {
335 case SEEK_END:
336 if (unlikely(offset > 0))
337 goto out_err; /* not supported for directories */
338
339 /* so only negative offsets are left, does that have a
340 * meaning for directories at all? */
341 if (dx_dir)
342 offset += ext4_get_htree_eof(file);
343 else
344 offset += inode->i_size;
345 break;
346 case SEEK_CUR:
347 /*
348 * Here we special-case the lseek(fd, 0, SEEK_CUR)
349 * position-querying operation. Avoid rewriting the "same"
350 * f_pos value back to the file because a concurrent read(),
351 * write() or lseek() might have altered it
352 */
353 if (offset == 0) {
354 offset = file->f_pos;
355 goto out_ok;
356 }
357
358 offset += file->f_pos;
359 break;
360 }
361
362 if (unlikely(offset < 0))
363 goto out_err;
364
365 if (!dx_dir) {
366 if (offset > inode->i_sb->s_maxbytes)
367 goto out_err;
368 } else if (offset > ext4_get_htree_eof(file))
369 goto out_err;
370
371 /* Special lock needed here? */
372 if (offset != file->f_pos) {
373 file->f_pos = offset;
374 file->f_version = 0;
375 }
376
377out_ok:
378 ret = offset;
379out_err:
380 mutex_unlock(&inode->i_mutex);
381
382 return ret;
383}
273 384
274/* 385/*
275 * This structure holds the nodes of the red-black tree used to store 386 * This structure holds the nodes of the red-black tree used to store
@@ -330,15 +441,16 @@ static void free_rb_tree_fname(struct rb_root *root)
330} 441}
331 442
332 443
333static struct dir_private_info *ext4_htree_create_dir_info(loff_t pos) 444static struct dir_private_info *ext4_htree_create_dir_info(struct file *filp,
445 loff_t pos)
334{ 446{
335 struct dir_private_info *p; 447 struct dir_private_info *p;
336 448
337 p = kzalloc(sizeof(struct dir_private_info), GFP_KERNEL); 449 p = kzalloc(sizeof(struct dir_private_info), GFP_KERNEL);
338 if (!p) 450 if (!p)
339 return NULL; 451 return NULL;
340 p->curr_hash = pos2maj_hash(pos); 452 p->curr_hash = pos2maj_hash(filp, pos);
341 p->curr_minor_hash = pos2min_hash(pos); 453 p->curr_minor_hash = pos2min_hash(filp, pos);
342 return p; 454 return p;
343} 455}
344 456
@@ -430,7 +542,7 @@ static int call_filldir(struct file *filp, void *dirent,
430 inode->i_ino, current->comm); 542 inode->i_ino, current->comm);
431 return 0; 543 return 0;
432 } 544 }
433 curr_pos = hash2pos(fname->hash, fname->minor_hash); 545 curr_pos = hash2pos(filp, fname->hash, fname->minor_hash);
434 while (fname) { 546 while (fname) {
435 error = filldir(dirent, fname->name, 547 error = filldir(dirent, fname->name,
436 fname->name_len, curr_pos, 548 fname->name_len, curr_pos,
@@ -455,13 +567,13 @@ static int ext4_dx_readdir(struct file *filp,
455 int ret; 567 int ret;
456 568
457 if (!info) { 569 if (!info) {
458 info = ext4_htree_create_dir_info(filp->f_pos); 570 info = ext4_htree_create_dir_info(filp, filp->f_pos);
459 if (!info) 571 if (!info)
460 return -ENOMEM; 572 return -ENOMEM;
461 filp->private_data = info; 573 filp->private_data = info;
462 } 574 }
463 575
464 if (filp->f_pos == EXT4_HTREE_EOF) 576 if (filp->f_pos == ext4_get_htree_eof(filp))
465 return 0; /* EOF */ 577 return 0; /* EOF */
466 578
467 /* Some one has messed with f_pos; reset the world */ 579 /* Some one has messed with f_pos; reset the world */
@@ -469,8 +581,8 @@ static int ext4_dx_readdir(struct file *filp,
469 free_rb_tree_fname(&info->root); 581 free_rb_tree_fname(&info->root);
470 info->curr_node = NULL; 582 info->curr_node = NULL;
471 info->extra_fname = NULL; 583 info->extra_fname = NULL;
472 info->curr_hash = pos2maj_hash(filp->f_pos); 584 info->curr_hash = pos2maj_hash(filp, filp->f_pos);
473 info->curr_minor_hash = pos2min_hash(filp->f_pos); 585 info->curr_minor_hash = pos2min_hash(filp, filp->f_pos);
474 } 586 }
475 587
476 /* 588 /*
@@ -502,7 +614,7 @@ static int ext4_dx_readdir(struct file *filp,
502 if (ret < 0) 614 if (ret < 0)
503 return ret; 615 return ret;
504 if (ret == 0) { 616 if (ret == 0) {
505 filp->f_pos = EXT4_HTREE_EOF; 617 filp->f_pos = ext4_get_htree_eof(filp);
506 break; 618 break;
507 } 619 }
508 info->curr_node = rb_first(&info->root); 620 info->curr_node = rb_first(&info->root);
@@ -522,7 +634,7 @@ static int ext4_dx_readdir(struct file *filp,
522 info->curr_minor_hash = fname->minor_hash; 634 info->curr_minor_hash = fname->minor_hash;
523 } else { 635 } else {
524 if (info->next_hash == ~0) { 636 if (info->next_hash == ~0) {
525 filp->f_pos = EXT4_HTREE_EOF; 637 filp->f_pos = ext4_get_htree_eof(filp);
526 break; 638 break;
527 } 639 }
528 info->curr_hash = info->next_hash; 640 info->curr_hash = info->next_hash;
@@ -541,3 +653,15 @@ static int ext4_release_dir(struct inode *inode, struct file *filp)
541 653
542 return 0; 654 return 0;
543} 655}
656
657const struct file_operations ext4_dir_operations = {
658 .llseek = ext4_dir_llseek,
659 .read = generic_read_dir,
660 .readdir = ext4_readdir,
661 .unlocked_ioctl = ext4_ioctl,
662#ifdef CONFIG_COMPAT
663 .compat_ioctl = ext4_compat_ioctl,
664#endif
665 .fsync = ext4_sync_file,
666 .release = ext4_release_dir,
667};
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index ded731ac8a32..ab2594a30f86 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -1623,7 +1623,11 @@ struct dx_hash_info
1623 u32 *seed; 1623 u32 *seed;
1624}; 1624};
1625 1625
1626#define EXT4_HTREE_EOF 0x7fffffff 1626
1627/* 32 and 64 bit signed EOF for dx directories */
1628#define EXT4_HTREE_EOF_32BIT ((1UL << (32 - 1)) - 1)
1629#define EXT4_HTREE_EOF_64BIT ((1ULL << (64 - 1)) - 1)
1630
1627 1631
1628/* 1632/*
1629 * Control parameters used by ext4_htree_next_block 1633 * Control parameters used by ext4_htree_next_block
diff --git a/fs/ext4/hash.c b/fs/ext4/hash.c
index ac8f168c8ab4..fa8e4911d354 100644
--- a/fs/ext4/hash.c
+++ b/fs/ext4/hash.c
@@ -200,8 +200,8 @@ int ext4fs_dirhash(const char *name, int len, struct dx_hash_info *hinfo)
200 return -1; 200 return -1;
201 } 201 }
202 hash = hash & ~1; 202 hash = hash & ~1;
203 if (hash == (EXT4_HTREE_EOF << 1)) 203 if (hash == (EXT4_HTREE_EOF_32BIT << 1))
204 hash = (EXT4_HTREE_EOF-1) << 1; 204 hash = (EXT4_HTREE_EOF_32BIT - 1) << 1;
205 hinfo->hash = hash; 205 hinfo->hash = hash;
206 hinfo->minor_hash = minor_hash; 206 hinfo->minor_hash = minor_hash;
207 return 0; 207 return 0;
diff --git a/fs/ext4/page-io.c b/fs/ext4/page-io.c
index 74cd1f7f1f88..dcdeef169a69 100644
--- a/fs/ext4/page-io.c
+++ b/fs/ext4/page-io.c
@@ -60,6 +60,7 @@ void ext4_ioend_wait(struct inode *inode)
60static void put_io_page(struct ext4_io_page *io_page) 60static void put_io_page(struct ext4_io_page *io_page)
61{ 61{
62 if (atomic_dec_and_test(&io_page->p_count)) { 62 if (atomic_dec_and_test(&io_page->p_count)) {
63 end_page_writeback(io_page->p_page);
63 put_page(io_page->p_page); 64 put_page(io_page->p_page);
64 kmem_cache_free(io_page_cachep, io_page); 65 kmem_cache_free(io_page_cachep, io_page);
65 } 66 }
@@ -233,9 +234,9 @@ static void ext4_end_bio(struct bio *bio, int error)
233 } while (bh != head); 234 } while (bh != head);
234 } 235 }
235 236
236 if (atomic_read(&io_end->pages[i]->p_count) == 1) 237 put_io_page(io_end->pages[i]);
237 end_page_writeback(io_end->pages[i]->p_page);
238 } 238 }
239 io_end->num_io_pages = 0;
239 inode = io_end->inode; 240 inode = io_end->inode;
240 241
241 if (error) { 242 if (error) {
@@ -427,8 +428,6 @@ int ext4_bio_write_page(struct ext4_io_submit *io,
427 * PageWriteback bit from the page to prevent the system from 428 * PageWriteback bit from the page to prevent the system from
428 * wedging later on. 429 * wedging later on.
429 */ 430 */
430 if (atomic_read(&io_page->p_count) == 1)
431 end_page_writeback(page);
432 put_io_page(io_page); 431 put_io_page(io_page);
433 return ret; 432 return ret;
434} 433}
diff --git a/fs/fcntl.c b/fs/fcntl.c
index 22764c7c8382..75e7c1f3a080 100644
--- a/fs/fcntl.c
+++ b/fs/fcntl.c
@@ -32,20 +32,20 @@ void set_close_on_exec(unsigned int fd, int flag)
32 spin_lock(&files->file_lock); 32 spin_lock(&files->file_lock);
33 fdt = files_fdtable(files); 33 fdt = files_fdtable(files);
34 if (flag) 34 if (flag)
35 FD_SET(fd, fdt->close_on_exec); 35 __set_close_on_exec(fd, fdt);
36 else 36 else
37 FD_CLR(fd, fdt->close_on_exec); 37 __clear_close_on_exec(fd, fdt);
38 spin_unlock(&files->file_lock); 38 spin_unlock(&files->file_lock);
39} 39}
40 40
41static int get_close_on_exec(unsigned int fd) 41static bool get_close_on_exec(unsigned int fd)
42{ 42{
43 struct files_struct *files = current->files; 43 struct files_struct *files = current->files;
44 struct fdtable *fdt; 44 struct fdtable *fdt;
45 int res; 45 bool res;
46 rcu_read_lock(); 46 rcu_read_lock();
47 fdt = files_fdtable(files); 47 fdt = files_fdtable(files);
48 res = FD_ISSET(fd, fdt->close_on_exec); 48 res = close_on_exec(fd, fdt);
49 rcu_read_unlock(); 49 rcu_read_unlock();
50 return res; 50 return res;
51} 51}
@@ -90,15 +90,15 @@ SYSCALL_DEFINE3(dup3, unsigned int, oldfd, unsigned int, newfd, int, flags)
90 err = -EBUSY; 90 err = -EBUSY;
91 fdt = files_fdtable(files); 91 fdt = files_fdtable(files);
92 tofree = fdt->fd[newfd]; 92 tofree = fdt->fd[newfd];
93 if (!tofree && FD_ISSET(newfd, fdt->open_fds)) 93 if (!tofree && fd_is_open(newfd, fdt))
94 goto out_unlock; 94 goto out_unlock;
95 get_file(file); 95 get_file(file);
96 rcu_assign_pointer(fdt->fd[newfd], file); 96 rcu_assign_pointer(fdt->fd[newfd], file);
97 FD_SET(newfd, fdt->open_fds); 97 __set_open_fd(newfd, fdt);
98 if (flags & O_CLOEXEC) 98 if (flags & O_CLOEXEC)
99 FD_SET(newfd, fdt->close_on_exec); 99 __set_close_on_exec(newfd, fdt);
100 else 100 else
101 FD_CLR(newfd, fdt->close_on_exec); 101 __clear_close_on_exec(newfd, fdt);
102 spin_unlock(&files->file_lock); 102 spin_unlock(&files->file_lock);
103 103
104 if (tofree) 104 if (tofree)
diff --git a/fs/file.c b/fs/file.c
index 3c426de7203a..ba3f6053025c 100644
--- a/fs/file.c
+++ b/fs/file.c
@@ -40,7 +40,7 @@ int sysctl_nr_open_max = 1024 * 1024; /* raised later */
40 */ 40 */
41static DEFINE_PER_CPU(struct fdtable_defer, fdtable_defer_list); 41static DEFINE_PER_CPU(struct fdtable_defer, fdtable_defer_list);
42 42
43static void *alloc_fdmem(unsigned int size) 43static void *alloc_fdmem(size_t size)
44{ 44{
45 /* 45 /*
46 * Very large allocations can stress page reclaim, so fall back to 46 * Very large allocations can stress page reclaim, so fall back to
@@ -142,7 +142,7 @@ static void copy_fdtable(struct fdtable *nfdt, struct fdtable *ofdt)
142static struct fdtable * alloc_fdtable(unsigned int nr) 142static struct fdtable * alloc_fdtable(unsigned int nr)
143{ 143{
144 struct fdtable *fdt; 144 struct fdtable *fdt;
145 char *data; 145 void *data;
146 146
147 /* 147 /*
148 * Figure out how many fds we actually want to support in this fdtable. 148 * Figure out how many fds we actually want to support in this fdtable.
@@ -172,14 +172,15 @@ static struct fdtable * alloc_fdtable(unsigned int nr)
172 data = alloc_fdmem(nr * sizeof(struct file *)); 172 data = alloc_fdmem(nr * sizeof(struct file *));
173 if (!data) 173 if (!data)
174 goto out_fdt; 174 goto out_fdt;
175 fdt->fd = (struct file **)data; 175 fdt->fd = data;
176 data = alloc_fdmem(max_t(unsigned int, 176
177 data = alloc_fdmem(max_t(size_t,
177 2 * nr / BITS_PER_BYTE, L1_CACHE_BYTES)); 178 2 * nr / BITS_PER_BYTE, L1_CACHE_BYTES));
178 if (!data) 179 if (!data)
179 goto out_arr; 180 goto out_arr;
180 fdt->open_fds = (fd_set *)data; 181 fdt->open_fds = data;
181 data += nr / BITS_PER_BYTE; 182 data += nr / BITS_PER_BYTE;
182 fdt->close_on_exec = (fd_set *)data; 183 fdt->close_on_exec = data;
183 fdt->next = NULL; 184 fdt->next = NULL;
184 185
185 return fdt; 186 return fdt;
@@ -275,11 +276,11 @@ static int count_open_files(struct fdtable *fdt)
275 int i; 276 int i;
276 277
277 /* Find the last open fd */ 278 /* Find the last open fd */
278 for (i = size/(8*sizeof(long)); i > 0; ) { 279 for (i = size / BITS_PER_LONG; i > 0; ) {
279 if (fdt->open_fds->fds_bits[--i]) 280 if (fdt->open_fds[--i])
280 break; 281 break;
281 } 282 }
282 i = (i+1) * 8 * sizeof(long); 283 i = (i + 1) * BITS_PER_LONG;
283 return i; 284 return i;
284} 285}
285 286
@@ -306,8 +307,8 @@ struct files_struct *dup_fd(struct files_struct *oldf, int *errorp)
306 newf->next_fd = 0; 307 newf->next_fd = 0;
307 new_fdt = &newf->fdtab; 308 new_fdt = &newf->fdtab;
308 new_fdt->max_fds = NR_OPEN_DEFAULT; 309 new_fdt->max_fds = NR_OPEN_DEFAULT;
309 new_fdt->close_on_exec = (fd_set *)&newf->close_on_exec_init; 310 new_fdt->close_on_exec = newf->close_on_exec_init;
310 new_fdt->open_fds = (fd_set *)&newf->open_fds_init; 311 new_fdt->open_fds = newf->open_fds_init;
311 new_fdt->fd = &newf->fd_array[0]; 312 new_fdt->fd = &newf->fd_array[0];
312 new_fdt->next = NULL; 313 new_fdt->next = NULL;
313 314
@@ -350,10 +351,8 @@ struct files_struct *dup_fd(struct files_struct *oldf, int *errorp)
350 old_fds = old_fdt->fd; 351 old_fds = old_fdt->fd;
351 new_fds = new_fdt->fd; 352 new_fds = new_fdt->fd;
352 353
353 memcpy(new_fdt->open_fds->fds_bits, 354 memcpy(new_fdt->open_fds, old_fdt->open_fds, open_files / 8);
354 old_fdt->open_fds->fds_bits, open_files/8); 355 memcpy(new_fdt->close_on_exec, old_fdt->close_on_exec, open_files / 8);
355 memcpy(new_fdt->close_on_exec->fds_bits,
356 old_fdt->close_on_exec->fds_bits, open_files/8);
357 356
358 for (i = open_files; i != 0; i--) { 357 for (i = open_files; i != 0; i--) {
359 struct file *f = *old_fds++; 358 struct file *f = *old_fds++;
@@ -366,7 +365,7 @@ struct files_struct *dup_fd(struct files_struct *oldf, int *errorp)
366 * is partway through open(). So make sure that this 365 * is partway through open(). So make sure that this
367 * fd is available to the new process. 366 * fd is available to the new process.
368 */ 367 */
369 FD_CLR(open_files - i, new_fdt->open_fds); 368 __clear_open_fd(open_files - i, new_fdt);
370 } 369 }
371 rcu_assign_pointer(*new_fds++, f); 370 rcu_assign_pointer(*new_fds++, f);
372 } 371 }
@@ -379,11 +378,11 @@ struct files_struct *dup_fd(struct files_struct *oldf, int *errorp)
379 memset(new_fds, 0, size); 378 memset(new_fds, 0, size);
380 379
381 if (new_fdt->max_fds > open_files) { 380 if (new_fdt->max_fds > open_files) {
382 int left = (new_fdt->max_fds-open_files)/8; 381 int left = (new_fdt->max_fds - open_files) / 8;
383 int start = open_files / (8 * sizeof(unsigned long)); 382 int start = open_files / BITS_PER_LONG;
384 383
385 memset(&new_fdt->open_fds->fds_bits[start], 0, left); 384 memset(&new_fdt->open_fds[start], 0, left);
386 memset(&new_fdt->close_on_exec->fds_bits[start], 0, left); 385 memset(&new_fdt->close_on_exec[start], 0, left);
387 } 386 }
388 387
389 rcu_assign_pointer(newf->fdt, new_fdt); 388 rcu_assign_pointer(newf->fdt, new_fdt);
@@ -419,8 +418,8 @@ struct files_struct init_files = {
419 .fdtab = { 418 .fdtab = {
420 .max_fds = NR_OPEN_DEFAULT, 419 .max_fds = NR_OPEN_DEFAULT,
421 .fd = &init_files.fd_array[0], 420 .fd = &init_files.fd_array[0],
422 .close_on_exec = (fd_set *)&init_files.close_on_exec_init, 421 .close_on_exec = init_files.close_on_exec_init,
423 .open_fds = (fd_set *)&init_files.open_fds_init, 422 .open_fds = init_files.open_fds_init,
424 }, 423 },
425 .file_lock = __SPIN_LOCK_UNLOCKED(init_task.file_lock), 424 .file_lock = __SPIN_LOCK_UNLOCKED(init_task.file_lock),
426}; 425};
@@ -443,8 +442,7 @@ repeat:
443 fd = files->next_fd; 442 fd = files->next_fd;
444 443
445 if (fd < fdt->max_fds) 444 if (fd < fdt->max_fds)
446 fd = find_next_zero_bit(fdt->open_fds->fds_bits, 445 fd = find_next_zero_bit(fdt->open_fds, fdt->max_fds, fd);
447 fdt->max_fds, fd);
448 446
449 error = expand_files(files, fd); 447 error = expand_files(files, fd);
450 if (error < 0) 448 if (error < 0)
@@ -460,11 +458,11 @@ repeat:
460 if (start <= files->next_fd) 458 if (start <= files->next_fd)
461 files->next_fd = fd + 1; 459 files->next_fd = fd + 1;
462 460
463 FD_SET(fd, fdt->open_fds); 461 __set_open_fd(fd, fdt);
464 if (flags & O_CLOEXEC) 462 if (flags & O_CLOEXEC)
465 FD_SET(fd, fdt->close_on_exec); 463 __set_close_on_exec(fd, fdt);
466 else 464 else
467 FD_CLR(fd, fdt->close_on_exec); 465 __clear_close_on_exec(fd, fdt);
468 error = fd; 466 error = fd;
469#if 1 467#if 1
470 /* Sanity check */ 468 /* Sanity check */
diff --git a/fs/gfs2/file.c b/fs/gfs2/file.c
index 76834587a8a4..a3d2c9ee8d66 100644
--- a/fs/gfs2/file.c
+++ b/fs/gfs2/file.c
@@ -18,7 +18,6 @@
18#include <linux/mount.h> 18#include <linux/mount.h>
19#include <linux/fs.h> 19#include <linux/fs.h>
20#include <linux/gfs2_ondisk.h> 20#include <linux/gfs2_ondisk.h>
21#include <linux/ext2_fs.h>
22#include <linux/falloc.h> 21#include <linux/falloc.h>
23#include <linux/swap.h> 22#include <linux/swap.h>
24#include <linux/crc32.h> 23#include <linux/crc32.h>
diff --git a/fs/jffs2/acl.c b/fs/jffs2/acl.c
index 926d02068a14..922f146e4235 100644
--- a/fs/jffs2/acl.c
+++ b/fs/jffs2/acl.c
@@ -9,6 +9,8 @@
9 * 9 *
10 */ 10 */
11 11
12#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13
12#include <linux/kernel.h> 14#include <linux/kernel.h>
13#include <linux/slab.h> 15#include <linux/slab.h>
14#include <linux/fs.h> 16#include <linux/fs.h>
diff --git a/fs/jffs2/background.c b/fs/jffs2/background.c
index 404111b016c9..2b60ce1996aa 100644
--- a/fs/jffs2/background.c
+++ b/fs/jffs2/background.c
@@ -10,6 +10,8 @@
10 * 10 *
11 */ 11 */
12 12
13#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14
13#include <linux/kernel.h> 15#include <linux/kernel.h>
14#include <linux/jffs2.h> 16#include <linux/jffs2.h>
15#include <linux/mtd/mtd.h> 17#include <linux/mtd/mtd.h>
@@ -42,12 +44,13 @@ int jffs2_start_garbage_collect_thread(struct jffs2_sb_info *c)
42 44
43 tsk = kthread_run(jffs2_garbage_collect_thread, c, "jffs2_gcd_mtd%d", c->mtd->index); 45 tsk = kthread_run(jffs2_garbage_collect_thread, c, "jffs2_gcd_mtd%d", c->mtd->index);
44 if (IS_ERR(tsk)) { 46 if (IS_ERR(tsk)) {
45 printk(KERN_WARNING "fork failed for JFFS2 garbage collect thread: %ld\n", -PTR_ERR(tsk)); 47 pr_warn("fork failed for JFFS2 garbage collect thread: %ld\n",
48 -PTR_ERR(tsk));
46 complete(&c->gc_thread_exit); 49 complete(&c->gc_thread_exit);
47 ret = PTR_ERR(tsk); 50 ret = PTR_ERR(tsk);
48 } else { 51 } else {
49 /* Wait for it... */ 52 /* Wait for it... */
50 D1(printk(KERN_DEBUG "JFFS2: Garbage collect thread is pid %d\n", tsk->pid)); 53 jffs2_dbg(1, "Garbage collect thread is pid %d\n", tsk->pid);
51 wait_for_completion(&c->gc_thread_start); 54 wait_for_completion(&c->gc_thread_start);
52 ret = tsk->pid; 55 ret = tsk->pid;
53 } 56 }
@@ -60,7 +63,7 @@ void jffs2_stop_garbage_collect_thread(struct jffs2_sb_info *c)
60 int wait = 0; 63 int wait = 0;
61 spin_lock(&c->erase_completion_lock); 64 spin_lock(&c->erase_completion_lock);
62 if (c->gc_task) { 65 if (c->gc_task) {
63 D1(printk(KERN_DEBUG "jffs2: Killing GC task %d\n", c->gc_task->pid)); 66 jffs2_dbg(1, "Killing GC task %d\n", c->gc_task->pid);
64 send_sig(SIGKILL, c->gc_task, 1); 67 send_sig(SIGKILL, c->gc_task, 1);
65 wait = 1; 68 wait = 1;
66 } 69 }
@@ -90,7 +93,7 @@ static int jffs2_garbage_collect_thread(void *_c)
90 if (!jffs2_thread_should_wake(c)) { 93 if (!jffs2_thread_should_wake(c)) {
91 set_current_state (TASK_INTERRUPTIBLE); 94 set_current_state (TASK_INTERRUPTIBLE);
92 spin_unlock(&c->erase_completion_lock); 95 spin_unlock(&c->erase_completion_lock);
93 D1(printk(KERN_DEBUG "jffs2_garbage_collect_thread sleeping...\n")); 96 jffs2_dbg(1, "%s(): sleeping...\n", __func__);
94 schedule(); 97 schedule();
95 } else 98 } else
96 spin_unlock(&c->erase_completion_lock); 99 spin_unlock(&c->erase_completion_lock);
@@ -109,7 +112,7 @@ static int jffs2_garbage_collect_thread(void *_c)
109 schedule_timeout_interruptible(msecs_to_jiffies(50)); 112 schedule_timeout_interruptible(msecs_to_jiffies(50));
110 113
111 if (kthread_should_stop()) { 114 if (kthread_should_stop()) {
112 D1(printk(KERN_DEBUG "jffs2_garbage_collect_thread(): kthread_stop() called.\n")); 115 jffs2_dbg(1, "%s(): kthread_stop() called\n", __func__);
113 goto die; 116 goto die;
114 } 117 }
115 118
@@ -126,28 +129,32 @@ static int jffs2_garbage_collect_thread(void *_c)
126 129
127 switch(signr) { 130 switch(signr) {
128 case SIGSTOP: 131 case SIGSTOP:
129 D1(printk(KERN_DEBUG "jffs2_garbage_collect_thread(): SIGSTOP received.\n")); 132 jffs2_dbg(1, "%s(): SIGSTOP received\n",
133 __func__);
130 set_current_state(TASK_STOPPED); 134 set_current_state(TASK_STOPPED);
131 schedule(); 135 schedule();
132 break; 136 break;
133 137
134 case SIGKILL: 138 case SIGKILL:
135 D1(printk(KERN_DEBUG "jffs2_garbage_collect_thread(): SIGKILL received.\n")); 139 jffs2_dbg(1, "%s(): SIGKILL received\n",
140 __func__);
136 goto die; 141 goto die;
137 142
138 case SIGHUP: 143 case SIGHUP:
139 D1(printk(KERN_DEBUG "jffs2_garbage_collect_thread(): SIGHUP received.\n")); 144 jffs2_dbg(1, "%s(): SIGHUP received\n",
145 __func__);
140 break; 146 break;
141 default: 147 default:
142 D1(printk(KERN_DEBUG "jffs2_garbage_collect_thread(): signal %ld received\n", signr)); 148 jffs2_dbg(1, "%s(): signal %ld received\n",
149 __func__, signr);
143 } 150 }
144 } 151 }
145 /* We don't want SIGHUP to interrupt us. STOP and KILL are OK though. */ 152 /* We don't want SIGHUP to interrupt us. STOP and KILL are OK though. */
146 disallow_signal(SIGHUP); 153 disallow_signal(SIGHUP);
147 154
148 D1(printk(KERN_DEBUG "jffs2_garbage_collect_thread(): pass\n")); 155 jffs2_dbg(1, "%s(): pass\n", __func__);
149 if (jffs2_garbage_collect_pass(c) == -ENOSPC) { 156 if (jffs2_garbage_collect_pass(c) == -ENOSPC) {
150 printk(KERN_NOTICE "No space for garbage collection. Aborting GC thread\n"); 157 pr_notice("No space for garbage collection. Aborting GC thread\n");
151 goto die; 158 goto die;
152 } 159 }
153 } 160 }
diff --git a/fs/jffs2/build.c b/fs/jffs2/build.c
index 3005ec4520ad..a3750f902adc 100644
--- a/fs/jffs2/build.c
+++ b/fs/jffs2/build.c
@@ -10,6 +10,8 @@
10 * 10 *
11 */ 11 */
12 12
13#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14
13#include <linux/kernel.h> 15#include <linux/kernel.h>
14#include <linux/sched.h> 16#include <linux/sched.h>
15#include <linux/slab.h> 17#include <linux/slab.h>
@@ -307,8 +309,8 @@ static void jffs2_calc_trigger_levels(struct jffs2_sb_info *c)
307 trying to GC to make more space. It'll be a fruitless task */ 309 trying to GC to make more space. It'll be a fruitless task */
308 c->nospc_dirty_size = c->sector_size + (c->flash_size / 100); 310 c->nospc_dirty_size = c->sector_size + (c->flash_size / 100);
309 311
310 dbg_fsbuild("JFFS2 trigger levels (size %d KiB, block size %d KiB, %d blocks)\n", 312 dbg_fsbuild("trigger levels (size %d KiB, block size %d KiB, %d blocks)\n",
311 c->flash_size / 1024, c->sector_size / 1024, c->nr_blocks); 313 c->flash_size / 1024, c->sector_size / 1024, c->nr_blocks);
312 dbg_fsbuild("Blocks required to allow deletion: %d (%d KiB)\n", 314 dbg_fsbuild("Blocks required to allow deletion: %d (%d KiB)\n",
313 c->resv_blocks_deletion, c->resv_blocks_deletion*c->sector_size/1024); 315 c->resv_blocks_deletion, c->resv_blocks_deletion*c->sector_size/1024);
314 dbg_fsbuild("Blocks required to allow writes: %d (%d KiB)\n", 316 dbg_fsbuild("Blocks required to allow writes: %d (%d KiB)\n",
diff --git a/fs/jffs2/compr.c b/fs/jffs2/compr.c
index 96ed3c9ec3fc..4849a4c9a0e2 100644
--- a/fs/jffs2/compr.c
+++ b/fs/jffs2/compr.c
@@ -12,6 +12,8 @@
12 * 12 *
13 */ 13 */
14 14
15#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
16
15#include "compr.h" 17#include "compr.h"
16 18
17static DEFINE_SPINLOCK(jffs2_compressor_list_lock); 19static DEFINE_SPINLOCK(jffs2_compressor_list_lock);
@@ -79,7 +81,7 @@ static int jffs2_selected_compress(u8 compr, unsigned char *data_in,
79 81
80 output_buf = kmalloc(*cdatalen, GFP_KERNEL); 82 output_buf = kmalloc(*cdatalen, GFP_KERNEL);
81 if (!output_buf) { 83 if (!output_buf) {
82 printk(KERN_WARNING "JFFS2: No memory for compressor allocation. Compression failed.\n"); 84 pr_warn("No memory for compressor allocation. Compression failed.\n");
83 return ret; 85 return ret;
84 } 86 }
85 orig_slen = *datalen; 87 orig_slen = *datalen;
@@ -188,7 +190,8 @@ uint16_t jffs2_compress(struct jffs2_sb_info *c, struct jffs2_inode_info *f,
188 tmp_buf = kmalloc(orig_slen, GFP_KERNEL); 190 tmp_buf = kmalloc(orig_slen, GFP_KERNEL);
189 spin_lock(&jffs2_compressor_list_lock); 191 spin_lock(&jffs2_compressor_list_lock);
190 if (!tmp_buf) { 192 if (!tmp_buf) {
191 printk(KERN_WARNING "JFFS2: No memory for compressor allocation. (%d bytes)\n", orig_slen); 193 pr_warn("No memory for compressor allocation. (%d bytes)\n",
194 orig_slen);
192 continue; 195 continue;
193 } 196 }
194 else { 197 else {
@@ -235,7 +238,7 @@ uint16_t jffs2_compress(struct jffs2_sb_info *c, struct jffs2_inode_info *f,
235 cpage_out, datalen, cdatalen); 238 cpage_out, datalen, cdatalen);
236 break; 239 break;
237 default: 240 default:
238 printk(KERN_ERR "JFFS2: unknown compression mode.\n"); 241 pr_err("unknown compression mode\n");
239 } 242 }
240 243
241 if (ret == JFFS2_COMPR_NONE) { 244 if (ret == JFFS2_COMPR_NONE) {
@@ -277,7 +280,8 @@ int jffs2_decompress(struct jffs2_sb_info *c, struct jffs2_inode_info *f,
277 ret = this->decompress(cdata_in, data_out, cdatalen, datalen); 280 ret = this->decompress(cdata_in, data_out, cdatalen, datalen);
278 spin_lock(&jffs2_compressor_list_lock); 281 spin_lock(&jffs2_compressor_list_lock);
279 if (ret) { 282 if (ret) {
280 printk(KERN_WARNING "Decompressor \"%s\" returned %d\n", this->name, ret); 283 pr_warn("Decompressor \"%s\" returned %d\n",
284 this->name, ret);
281 } 285 }
282 else { 286 else {
283 this->stat_decompr_blocks++; 287 this->stat_decompr_blocks++;
@@ -287,7 +291,7 @@ int jffs2_decompress(struct jffs2_sb_info *c, struct jffs2_inode_info *f,
287 return ret; 291 return ret;
288 } 292 }
289 } 293 }
290 printk(KERN_WARNING "JFFS2 compression type 0x%02x not available.\n", comprtype); 294 pr_warn("compression type 0x%02x not available\n", comprtype);
291 spin_unlock(&jffs2_compressor_list_lock); 295 spin_unlock(&jffs2_compressor_list_lock);
292 return -EIO; 296 return -EIO;
293 } 297 }
@@ -299,7 +303,7 @@ int jffs2_register_compressor(struct jffs2_compressor *comp)
299 struct jffs2_compressor *this; 303 struct jffs2_compressor *this;
300 304
301 if (!comp->name) { 305 if (!comp->name) {
302 printk(KERN_WARNING "NULL compressor name at registering JFFS2 compressor. Failed.\n"); 306 pr_warn("NULL compressor name at registering JFFS2 compressor. Failed.\n");
303 return -1; 307 return -1;
304 } 308 }
305 comp->compr_buf_size=0; 309 comp->compr_buf_size=0;
@@ -309,7 +313,7 @@ int jffs2_register_compressor(struct jffs2_compressor *comp)
309 comp->stat_compr_new_size=0; 313 comp->stat_compr_new_size=0;
310 comp->stat_compr_blocks=0; 314 comp->stat_compr_blocks=0;
311 comp->stat_decompr_blocks=0; 315 comp->stat_decompr_blocks=0;
312 D1(printk(KERN_DEBUG "Registering JFFS2 compressor \"%s\"\n", comp->name)); 316 jffs2_dbg(1, "Registering JFFS2 compressor \"%s\"\n", comp->name);
313 317
314 spin_lock(&jffs2_compressor_list_lock); 318 spin_lock(&jffs2_compressor_list_lock);
315 319
@@ -332,15 +336,15 @@ out:
332 336
333int jffs2_unregister_compressor(struct jffs2_compressor *comp) 337int jffs2_unregister_compressor(struct jffs2_compressor *comp)
334{ 338{
335 D2(struct jffs2_compressor *this;) 339 D2(struct jffs2_compressor *this);
336 340
337 D1(printk(KERN_DEBUG "Unregistering JFFS2 compressor \"%s\"\n", comp->name)); 341 jffs2_dbg(1, "Unregistering JFFS2 compressor \"%s\"\n", comp->name);
338 342
339 spin_lock(&jffs2_compressor_list_lock); 343 spin_lock(&jffs2_compressor_list_lock);
340 344
341 if (comp->usecount) { 345 if (comp->usecount) {
342 spin_unlock(&jffs2_compressor_list_lock); 346 spin_unlock(&jffs2_compressor_list_lock);
343 printk(KERN_WARNING "JFFS2: Compressor module is in use. Unregister failed.\n"); 347 pr_warn("Compressor module is in use. Unregister failed.\n");
344 return -1; 348 return -1;
345 } 349 }
346 list_del(&comp->list); 350 list_del(&comp->list);
@@ -377,17 +381,17 @@ int __init jffs2_compressors_init(void)
377/* Setting default compression mode */ 381/* Setting default compression mode */
378#ifdef CONFIG_JFFS2_CMODE_NONE 382#ifdef CONFIG_JFFS2_CMODE_NONE
379 jffs2_compression_mode = JFFS2_COMPR_MODE_NONE; 383 jffs2_compression_mode = JFFS2_COMPR_MODE_NONE;
380 D1(printk(KERN_INFO "JFFS2: default compression mode: none\n");) 384 jffs2_dbg(1, "default compression mode: none\n");
381#else 385#else
382#ifdef CONFIG_JFFS2_CMODE_SIZE 386#ifdef CONFIG_JFFS2_CMODE_SIZE
383 jffs2_compression_mode = JFFS2_COMPR_MODE_SIZE; 387 jffs2_compression_mode = JFFS2_COMPR_MODE_SIZE;
384 D1(printk(KERN_INFO "JFFS2: default compression mode: size\n");) 388 jffs2_dbg(1, "default compression mode: size\n");
385#else 389#else
386#ifdef CONFIG_JFFS2_CMODE_FAVOURLZO 390#ifdef CONFIG_JFFS2_CMODE_FAVOURLZO
387 jffs2_compression_mode = JFFS2_COMPR_MODE_FAVOURLZO; 391 jffs2_compression_mode = JFFS2_COMPR_MODE_FAVOURLZO;
388 D1(printk(KERN_INFO "JFFS2: default compression mode: favourlzo\n");) 392 jffs2_dbg(1, "default compression mode: favourlzo\n");
389#else 393#else
390 D1(printk(KERN_INFO "JFFS2: default compression mode: priority\n");) 394 jffs2_dbg(1, "default compression mode: priority\n");
391#endif 395#endif
392#endif 396#endif
393#endif 397#endif
diff --git a/fs/jffs2/compr_lzo.c b/fs/jffs2/compr_lzo.c
index af186ee674d8..c553bd6506da 100644
--- a/fs/jffs2/compr_lzo.c
+++ b/fs/jffs2/compr_lzo.c
@@ -33,7 +33,6 @@ static int __init alloc_workspace(void)
33 lzo_compress_buf = vmalloc(lzo1x_worst_compress(PAGE_SIZE)); 33 lzo_compress_buf = vmalloc(lzo1x_worst_compress(PAGE_SIZE));
34 34
35 if (!lzo_mem || !lzo_compress_buf) { 35 if (!lzo_mem || !lzo_compress_buf) {
36 printk(KERN_WARNING "Failed to allocate lzo deflate workspace\n");
37 free_workspace(); 36 free_workspace();
38 return -ENOMEM; 37 return -ENOMEM;
39 } 38 }
diff --git a/fs/jffs2/compr_rubin.c b/fs/jffs2/compr_rubin.c
index 9e7cec808c4c..92e0644bf867 100644
--- a/fs/jffs2/compr_rubin.c
+++ b/fs/jffs2/compr_rubin.c
@@ -10,6 +10,8 @@
10 * 10 *
11 */ 11 */
12 12
13#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14
13#include <linux/string.h> 15#include <linux/string.h>
14#include <linux/types.h> 16#include <linux/types.h>
15#include <linux/jffs2.h> 17#include <linux/jffs2.h>
diff --git a/fs/jffs2/compr_zlib.c b/fs/jffs2/compr_zlib.c
index 5a001020c542..0b9a1e44e833 100644
--- a/fs/jffs2/compr_zlib.c
+++ b/fs/jffs2/compr_zlib.c
@@ -14,6 +14,8 @@
14#error "The userspace support got too messy and was removed. Update your mkfs.jffs2" 14#error "The userspace support got too messy and was removed. Update your mkfs.jffs2"
15#endif 15#endif
16 16
17#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18
17#include <linux/kernel.h> 19#include <linux/kernel.h>
18#include <linux/zlib.h> 20#include <linux/zlib.h>
19#include <linux/zutil.h> 21#include <linux/zutil.h>
@@ -42,18 +44,18 @@ static int __init alloc_workspaces(void)
42{ 44{
43 def_strm.workspace = vmalloc(zlib_deflate_workspacesize(MAX_WBITS, 45 def_strm.workspace = vmalloc(zlib_deflate_workspacesize(MAX_WBITS,
44 MAX_MEM_LEVEL)); 46 MAX_MEM_LEVEL));
45 if (!def_strm.workspace) { 47 if (!def_strm.workspace)
46 printk(KERN_WARNING "Failed to allocate %d bytes for deflate workspace\n", zlib_deflate_workspacesize(MAX_WBITS, MAX_MEM_LEVEL));
47 return -ENOMEM; 48 return -ENOMEM;
48 } 49
49 D1(printk(KERN_DEBUG "Allocated %d bytes for deflate workspace\n", zlib_deflate_workspacesize(MAX_WBITS, MAX_MEM_LEVEL))); 50 jffs2_dbg(1, "Allocated %d bytes for deflate workspace\n",
51 zlib_deflate_workspacesize(MAX_WBITS, MAX_MEM_LEVEL));
50 inf_strm.workspace = vmalloc(zlib_inflate_workspacesize()); 52 inf_strm.workspace = vmalloc(zlib_inflate_workspacesize());
51 if (!inf_strm.workspace) { 53 if (!inf_strm.workspace) {
52 printk(KERN_WARNING "Failed to allocate %d bytes for inflate workspace\n", zlib_inflate_workspacesize());
53 vfree(def_strm.workspace); 54 vfree(def_strm.workspace);
54 return -ENOMEM; 55 return -ENOMEM;
55 } 56 }
56 D1(printk(KERN_DEBUG "Allocated %d bytes for inflate workspace\n", zlib_inflate_workspacesize())); 57 jffs2_dbg(1, "Allocated %d bytes for inflate workspace\n",
58 zlib_inflate_workspacesize());
57 return 0; 59 return 0;
58} 60}
59 61
@@ -79,7 +81,7 @@ static int jffs2_zlib_compress(unsigned char *data_in,
79 mutex_lock(&deflate_mutex); 81 mutex_lock(&deflate_mutex);
80 82
81 if (Z_OK != zlib_deflateInit(&def_strm, 3)) { 83 if (Z_OK != zlib_deflateInit(&def_strm, 3)) {
82 printk(KERN_WARNING "deflateInit failed\n"); 84 pr_warn("deflateInit failed\n");
83 mutex_unlock(&deflate_mutex); 85 mutex_unlock(&deflate_mutex);
84 return -1; 86 return -1;
85 } 87 }
@@ -93,13 +95,14 @@ static int jffs2_zlib_compress(unsigned char *data_in,
93 while (def_strm.total_out < *dstlen - STREAM_END_SPACE && def_strm.total_in < *sourcelen) { 95 while (def_strm.total_out < *dstlen - STREAM_END_SPACE && def_strm.total_in < *sourcelen) {
94 def_strm.avail_out = *dstlen - (def_strm.total_out + STREAM_END_SPACE); 96 def_strm.avail_out = *dstlen - (def_strm.total_out + STREAM_END_SPACE);
95 def_strm.avail_in = min((unsigned)(*sourcelen-def_strm.total_in), def_strm.avail_out); 97 def_strm.avail_in = min((unsigned)(*sourcelen-def_strm.total_in), def_strm.avail_out);
96 D1(printk(KERN_DEBUG "calling deflate with avail_in %d, avail_out %d\n", 98 jffs2_dbg(1, "calling deflate with avail_in %d, avail_out %d\n",
97 def_strm.avail_in, def_strm.avail_out)); 99 def_strm.avail_in, def_strm.avail_out);
98 ret = zlib_deflate(&def_strm, Z_PARTIAL_FLUSH); 100 ret = zlib_deflate(&def_strm, Z_PARTIAL_FLUSH);
99 D1(printk(KERN_DEBUG "deflate returned with avail_in %d, avail_out %d, total_in %ld, total_out %ld\n", 101 jffs2_dbg(1, "deflate returned with avail_in %d, avail_out %d, total_in %ld, total_out %ld\n",
100 def_strm.avail_in, def_strm.avail_out, def_strm.total_in, def_strm.total_out)); 102 def_strm.avail_in, def_strm.avail_out,
103 def_strm.total_in, def_strm.total_out);
101 if (ret != Z_OK) { 104 if (ret != Z_OK) {
102 D1(printk(KERN_DEBUG "deflate in loop returned %d\n", ret)); 105 jffs2_dbg(1, "deflate in loop returned %d\n", ret);
103 zlib_deflateEnd(&def_strm); 106 zlib_deflateEnd(&def_strm);
104 mutex_unlock(&deflate_mutex); 107 mutex_unlock(&deflate_mutex);
105 return -1; 108 return -1;
@@ -111,20 +114,20 @@ static int jffs2_zlib_compress(unsigned char *data_in,
111 zlib_deflateEnd(&def_strm); 114 zlib_deflateEnd(&def_strm);
112 115
113 if (ret != Z_STREAM_END) { 116 if (ret != Z_STREAM_END) {
114 D1(printk(KERN_DEBUG "final deflate returned %d\n", ret)); 117 jffs2_dbg(1, "final deflate returned %d\n", ret);
115 ret = -1; 118 ret = -1;
116 goto out; 119 goto out;
117 } 120 }
118 121
119 if (def_strm.total_out >= def_strm.total_in) { 122 if (def_strm.total_out >= def_strm.total_in) {
120 D1(printk(KERN_DEBUG "zlib compressed %ld bytes into %ld; failing\n", 123 jffs2_dbg(1, "zlib compressed %ld bytes into %ld; failing\n",
121 def_strm.total_in, def_strm.total_out)); 124 def_strm.total_in, def_strm.total_out);
122 ret = -1; 125 ret = -1;
123 goto out; 126 goto out;
124 } 127 }
125 128
126 D1(printk(KERN_DEBUG "zlib compressed %ld bytes into %ld\n", 129 jffs2_dbg(1, "zlib compressed %ld bytes into %ld\n",
127 def_strm.total_in, def_strm.total_out)); 130 def_strm.total_in, def_strm.total_out);
128 131
129 *dstlen = def_strm.total_out; 132 *dstlen = def_strm.total_out;
130 *sourcelen = def_strm.total_in; 133 *sourcelen = def_strm.total_in;
@@ -157,18 +160,18 @@ static int jffs2_zlib_decompress(unsigned char *data_in,
157 ((data_in[0] & 0x0f) == Z_DEFLATED) && 160 ((data_in[0] & 0x0f) == Z_DEFLATED) &&
158 !(((data_in[0]<<8) + data_in[1]) % 31)) { 161 !(((data_in[0]<<8) + data_in[1]) % 31)) {
159 162
160 D2(printk(KERN_DEBUG "inflate skipping adler32\n")); 163 jffs2_dbg(2, "inflate skipping adler32\n");
161 wbits = -((data_in[0] >> 4) + 8); 164 wbits = -((data_in[0] >> 4) + 8);
162 inf_strm.next_in += 2; 165 inf_strm.next_in += 2;
163 inf_strm.avail_in -= 2; 166 inf_strm.avail_in -= 2;
164 } else { 167 } else {
165 /* Let this remain D1 for now -- it should never happen */ 168 /* Let this remain D1 for now -- it should never happen */
166 D1(printk(KERN_DEBUG "inflate not skipping adler32\n")); 169 jffs2_dbg(1, "inflate not skipping adler32\n");
167 } 170 }
168 171
169 172
170 if (Z_OK != zlib_inflateInit2(&inf_strm, wbits)) { 173 if (Z_OK != zlib_inflateInit2(&inf_strm, wbits)) {
171 printk(KERN_WARNING "inflateInit failed\n"); 174 pr_warn("inflateInit failed\n");
172 mutex_unlock(&inflate_mutex); 175 mutex_unlock(&inflate_mutex);
173 return 1; 176 return 1;
174 } 177 }
@@ -176,7 +179,7 @@ static int jffs2_zlib_decompress(unsigned char *data_in,
176 while((ret = zlib_inflate(&inf_strm, Z_FINISH)) == Z_OK) 179 while((ret = zlib_inflate(&inf_strm, Z_FINISH)) == Z_OK)
177 ; 180 ;
178 if (ret != Z_STREAM_END) { 181 if (ret != Z_STREAM_END) {
179 printk(KERN_NOTICE "inflate returned %d\n", ret); 182 pr_notice("inflate returned %d\n", ret);
180 } 183 }
181 zlib_inflateEnd(&inf_strm); 184 zlib_inflateEnd(&inf_strm);
182 mutex_unlock(&inflate_mutex); 185 mutex_unlock(&inflate_mutex);
diff --git a/fs/jffs2/debug.c b/fs/jffs2/debug.c
index e0b76c87a91a..1090eb64b90d 100644
--- a/fs/jffs2/debug.c
+++ b/fs/jffs2/debug.c
@@ -10,6 +10,8 @@
10 * 10 *
11 */ 11 */
12 12
13#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14
13#include <linux/kernel.h> 15#include <linux/kernel.h>
14#include <linux/types.h> 16#include <linux/types.h>
15#include <linux/pagemap.h> 17#include <linux/pagemap.h>
@@ -261,12 +263,15 @@ void __jffs2_dbg_superblock_counts(struct jffs2_sb_info *c)
261 bad += c->sector_size; 263 bad += c->sector_size;
262 } 264 }
263 265
264#define check(sz) \ 266#define check(sz) \
265 if (sz != c->sz##_size) { \ 267do { \
266 printk(KERN_WARNING #sz "_size mismatch counted 0x%x, c->" #sz "_size 0x%x\n", \ 268 if (sz != c->sz##_size) { \
267 sz, c->sz##_size); \ 269 pr_warn("%s_size mismatch counted 0x%x, c->%s_size 0x%x\n", \
268 dump = 1; \ 270 #sz, sz, #sz, c->sz##_size); \
269 } 271 dump = 1; \
272 } \
273} while (0)
274
270 check(free); 275 check(free);
271 check(dirty); 276 check(dirty);
272 check(used); 277 check(used);
@@ -274,11 +279,12 @@ void __jffs2_dbg_superblock_counts(struct jffs2_sb_info *c)
274 check(unchecked); 279 check(unchecked);
275 check(bad); 280 check(bad);
276 check(erasing); 281 check(erasing);
282
277#undef check 283#undef check
278 284
279 if (nr_counted != c->nr_blocks) { 285 if (nr_counted != c->nr_blocks) {
280 printk(KERN_WARNING "%s counted only 0x%x blocks of 0x%x. Where are the others?\n", 286 pr_warn("%s counted only 0x%x blocks of 0x%x. Where are the others?\n",
281 __func__, nr_counted, c->nr_blocks); 287 __func__, nr_counted, c->nr_blocks);
282 dump = 1; 288 dump = 1;
283 } 289 }
284 290
diff --git a/fs/jffs2/debug.h b/fs/jffs2/debug.h
index c4f8eef5ca68..4fd9be4cbc98 100644
--- a/fs/jffs2/debug.h
+++ b/fs/jffs2/debug.h
@@ -51,6 +51,7 @@
51 * superseded by nicer dbg_xxx() macros... 51 * superseded by nicer dbg_xxx() macros...
52 */ 52 */
53#if CONFIG_JFFS2_FS_DEBUG > 0 53#if CONFIG_JFFS2_FS_DEBUG > 0
54#define DEBUG
54#define D1(x) x 55#define D1(x) x
55#else 56#else
56#define D1(x) 57#define D1(x)
@@ -62,50 +63,33 @@
62#define D2(x) 63#define D2(x)
63#endif 64#endif
64 65
66#define jffs2_dbg(level, fmt, ...) \
67do { \
68 if (CONFIG_JFFS2_FS_DEBUG >= level) \
69 pr_debug(fmt, ##__VA_ARGS__); \
70} while (0)
71
65/* The prefixes of JFFS2 messages */ 72/* The prefixes of JFFS2 messages */
73#define JFFS2_DBG KERN_DEBUG
66#define JFFS2_DBG_PREFIX "[JFFS2 DBG]" 74#define JFFS2_DBG_PREFIX "[JFFS2 DBG]"
67#define JFFS2_ERR_PREFIX "JFFS2 error:"
68#define JFFS2_WARN_PREFIX "JFFS2 warning:"
69#define JFFS2_NOTICE_PREFIX "JFFS2 notice:"
70
71#define JFFS2_ERR KERN_ERR
72#define JFFS2_WARN KERN_WARNING
73#define JFFS2_NOT KERN_NOTICE
74#define JFFS2_DBG KERN_DEBUG
75
76#define JFFS2_DBG_MSG_PREFIX JFFS2_DBG JFFS2_DBG_PREFIX 75#define JFFS2_DBG_MSG_PREFIX JFFS2_DBG JFFS2_DBG_PREFIX
77#define JFFS2_ERR_MSG_PREFIX JFFS2_ERR JFFS2_ERR_PREFIX
78#define JFFS2_WARN_MSG_PREFIX JFFS2_WARN JFFS2_WARN_PREFIX
79#define JFFS2_NOTICE_MSG_PREFIX JFFS2_NOT JFFS2_NOTICE_PREFIX
80 76
81/* JFFS2 message macros */ 77/* JFFS2 message macros */
82#define JFFS2_ERROR(fmt, ...) \ 78#define JFFS2_ERROR(fmt, ...) \
83 do { \ 79 pr_err("error: (%d) %s: " fmt, \
84 printk(JFFS2_ERR_MSG_PREFIX \ 80 task_pid_nr(current), __func__, ##__VA_ARGS__)
85 " (%d) %s: " fmt, task_pid_nr(current), \
86 __func__ , ##__VA_ARGS__); \
87 } while(0)
88 81
89#define JFFS2_WARNING(fmt, ...) \ 82#define JFFS2_WARNING(fmt, ...) \
90 do { \ 83 pr_warn("warning: (%d) %s: " fmt, \
91 printk(JFFS2_WARN_MSG_PREFIX \ 84 task_pid_nr(current), __func__, ##__VA_ARGS__)
92 " (%d) %s: " fmt, task_pid_nr(current), \
93 __func__ , ##__VA_ARGS__); \
94 } while(0)
95 85
96#define JFFS2_NOTICE(fmt, ...) \ 86#define JFFS2_NOTICE(fmt, ...) \
97 do { \ 87 pr_notice("notice: (%d) %s: " fmt, \
98 printk(JFFS2_NOTICE_MSG_PREFIX \ 88 task_pid_nr(current), __func__, ##__VA_ARGS__)
99 " (%d) %s: " fmt, task_pid_nr(current), \
100 __func__ , ##__VA_ARGS__); \
101 } while(0)
102 89
103#define JFFS2_DEBUG(fmt, ...) \ 90#define JFFS2_DEBUG(fmt, ...) \
104 do { \ 91 printk(KERN_DEBUG "[JFFS2 DBG] (%d) %s: " fmt, \
105 printk(JFFS2_DBG_MSG_PREFIX \ 92 task_pid_nr(current), __func__, ##__VA_ARGS__)
106 " (%d) %s: " fmt, task_pid_nr(current), \
107 __func__ , ##__VA_ARGS__); \
108 } while(0)
109 93
110/* 94/*
111 * We split our debugging messages on several parts, depending on the JFFS2 95 * We split our debugging messages on several parts, depending on the JFFS2
diff --git a/fs/jffs2/dir.c b/fs/jffs2/dir.c
index 973ac5822bd7..b56018896d5e 100644
--- a/fs/jffs2/dir.c
+++ b/fs/jffs2/dir.c
@@ -10,6 +10,8 @@
10 * 10 *
11 */ 11 */
12 12
13#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14
13#include <linux/kernel.h> 15#include <linux/kernel.h>
14#include <linux/slab.h> 16#include <linux/slab.h>
15#include <linux/fs.h> 17#include <linux/fs.h>
@@ -79,7 +81,7 @@ static struct dentry *jffs2_lookup(struct inode *dir_i, struct dentry *target,
79 uint32_t ino = 0; 81 uint32_t ino = 0;
80 struct inode *inode = NULL; 82 struct inode *inode = NULL;
81 83
82 D1(printk(KERN_DEBUG "jffs2_lookup()\n")); 84 jffs2_dbg(1, "jffs2_lookup()\n");
83 85
84 if (target->d_name.len > JFFS2_MAX_NAME_LEN) 86 if (target->d_name.len > JFFS2_MAX_NAME_LEN)
85 return ERR_PTR(-ENAMETOOLONG); 87 return ERR_PTR(-ENAMETOOLONG);
@@ -103,7 +105,7 @@ static struct dentry *jffs2_lookup(struct inode *dir_i, struct dentry *target,
103 if (ino) { 105 if (ino) {
104 inode = jffs2_iget(dir_i->i_sb, ino); 106 inode = jffs2_iget(dir_i->i_sb, ino);
105 if (IS_ERR(inode)) 107 if (IS_ERR(inode))
106 printk(KERN_WARNING "iget() failed for ino #%u\n", ino); 108 pr_warn("iget() failed for ino #%u\n", ino);
107 } 109 }
108 110
109 return d_splice_alias(inode, target); 111 return d_splice_alias(inode, target);
@@ -119,21 +121,22 @@ static int jffs2_readdir(struct file *filp, void *dirent, filldir_t filldir)
119 struct jffs2_full_dirent *fd; 121 struct jffs2_full_dirent *fd;
120 unsigned long offset, curofs; 122 unsigned long offset, curofs;
121 123
122 D1(printk(KERN_DEBUG "jffs2_readdir() for dir_i #%lu\n", filp->f_path.dentry->d_inode->i_ino)); 124 jffs2_dbg(1, "jffs2_readdir() for dir_i #%lu\n",
125 filp->f_path.dentry->d_inode->i_ino);
123 126
124 f = JFFS2_INODE_INFO(inode); 127 f = JFFS2_INODE_INFO(inode);
125 128
126 offset = filp->f_pos; 129 offset = filp->f_pos;
127 130
128 if (offset == 0) { 131 if (offset == 0) {
129 D1(printk(KERN_DEBUG "Dirent 0: \".\", ino #%lu\n", inode->i_ino)); 132 jffs2_dbg(1, "Dirent 0: \".\", ino #%lu\n", inode->i_ino);
130 if (filldir(dirent, ".", 1, 0, inode->i_ino, DT_DIR) < 0) 133 if (filldir(dirent, ".", 1, 0, inode->i_ino, DT_DIR) < 0)
131 goto out; 134 goto out;
132 offset++; 135 offset++;
133 } 136 }
134 if (offset == 1) { 137 if (offset == 1) {
135 unsigned long pino = parent_ino(filp->f_path.dentry); 138 unsigned long pino = parent_ino(filp->f_path.dentry);
136 D1(printk(KERN_DEBUG "Dirent 1: \"..\", ino #%lu\n", pino)); 139 jffs2_dbg(1, "Dirent 1: \"..\", ino #%lu\n", pino);
137 if (filldir(dirent, "..", 2, 1, pino, DT_DIR) < 0) 140 if (filldir(dirent, "..", 2, 1, pino, DT_DIR) < 0)
138 goto out; 141 goto out;
139 offset++; 142 offset++;
@@ -146,16 +149,18 @@ static int jffs2_readdir(struct file *filp, void *dirent, filldir_t filldir)
146 curofs++; 149 curofs++;
147 /* First loop: curofs = 2; offset = 2 */ 150 /* First loop: curofs = 2; offset = 2 */
148 if (curofs < offset) { 151 if (curofs < offset) {
149 D2(printk(KERN_DEBUG "Skipping dirent: \"%s\", ino #%u, type %d, because curofs %ld < offset %ld\n", 152 jffs2_dbg(2, "Skipping dirent: \"%s\", ino #%u, type %d, because curofs %ld < offset %ld\n",
150 fd->name, fd->ino, fd->type, curofs, offset)); 153 fd->name, fd->ino, fd->type, curofs, offset);
151 continue; 154 continue;
152 } 155 }
153 if (!fd->ino) { 156 if (!fd->ino) {
154 D2(printk(KERN_DEBUG "Skipping deletion dirent \"%s\"\n", fd->name)); 157 jffs2_dbg(2, "Skipping deletion dirent \"%s\"\n",
158 fd->name);
155 offset++; 159 offset++;
156 continue; 160 continue;
157 } 161 }
158 D2(printk(KERN_DEBUG "Dirent %ld: \"%s\", ino #%u, type %d\n", offset, fd->name, fd->ino, fd->type)); 162 jffs2_dbg(2, "Dirent %ld: \"%s\", ino #%u, type %d\n",
163 offset, fd->name, fd->ino, fd->type);
159 if (filldir(dirent, fd->name, strlen(fd->name), offset, fd->ino, fd->type) < 0) 164 if (filldir(dirent, fd->name, strlen(fd->name), offset, fd->ino, fd->type) < 0)
160 break; 165 break;
161 offset++; 166 offset++;
@@ -184,12 +189,12 @@ static int jffs2_create(struct inode *dir_i, struct dentry *dentry,
184 189
185 c = JFFS2_SB_INFO(dir_i->i_sb); 190 c = JFFS2_SB_INFO(dir_i->i_sb);
186 191
187 D1(printk(KERN_DEBUG "jffs2_create()\n")); 192 jffs2_dbg(1, "%s()\n", __func__);
188 193
189 inode = jffs2_new_inode(dir_i, mode, ri); 194 inode = jffs2_new_inode(dir_i, mode, ri);
190 195
191 if (IS_ERR(inode)) { 196 if (IS_ERR(inode)) {
192 D1(printk(KERN_DEBUG "jffs2_new_inode() failed\n")); 197 jffs2_dbg(1, "jffs2_new_inode() failed\n");
193 jffs2_free_raw_inode(ri); 198 jffs2_free_raw_inode(ri);
194 return PTR_ERR(inode); 199 return PTR_ERR(inode);
195 } 200 }
@@ -217,9 +222,9 @@ static int jffs2_create(struct inode *dir_i, struct dentry *dentry,
217 222
218 jffs2_free_raw_inode(ri); 223 jffs2_free_raw_inode(ri);
219 224
220 D1(printk(KERN_DEBUG "jffs2_create: Created ino #%lu with mode %o, nlink %d(%d). nrpages %ld\n", 225 jffs2_dbg(1, "%s(): Created ino #%lu with mode %o, nlink %d(%d). nrpages %ld\n",
221 inode->i_ino, inode->i_mode, inode->i_nlink, 226 __func__, inode->i_ino, inode->i_mode, inode->i_nlink,
222 f->inocache->pino_nlink, inode->i_mapping->nrpages)); 227 f->inocache->pino_nlink, inode->i_mapping->nrpages);
223 228
224 d_instantiate(dentry, inode); 229 d_instantiate(dentry, inode);
225 unlock_new_inode(inode); 230 unlock_new_inode(inode);
@@ -362,14 +367,15 @@ static int jffs2_symlink (struct inode *dir_i, struct dentry *dentry, const char
362 /* We use f->target field to store the target path. */ 367 /* We use f->target field to store the target path. */
363 f->target = kmemdup(target, targetlen + 1, GFP_KERNEL); 368 f->target = kmemdup(target, targetlen + 1, GFP_KERNEL);
364 if (!f->target) { 369 if (!f->target) {
365 printk(KERN_WARNING "Can't allocate %d bytes of memory\n", targetlen + 1); 370 pr_warn("Can't allocate %d bytes of memory\n", targetlen + 1);
366 mutex_unlock(&f->sem); 371 mutex_unlock(&f->sem);
367 jffs2_complete_reservation(c); 372 jffs2_complete_reservation(c);
368 ret = -ENOMEM; 373 ret = -ENOMEM;
369 goto fail; 374 goto fail;
370 } 375 }
371 376
372 D1(printk(KERN_DEBUG "jffs2_symlink: symlink's target '%s' cached\n", (char *)f->target)); 377 jffs2_dbg(1, "%s(): symlink's target '%s' cached\n",
378 __func__, (char *)f->target);
373 379
374 /* No data here. Only a metadata node, which will be 380 /* No data here. Only a metadata node, which will be
375 obsoleted by the first data write 381 obsoleted by the first data write
@@ -856,7 +862,8 @@ static int jffs2_rename (struct inode *old_dir_i, struct dentry *old_dentry,
856 f->inocache->pino_nlink++; 862 f->inocache->pino_nlink++;
857 mutex_unlock(&f->sem); 863 mutex_unlock(&f->sem);
858 864
859 printk(KERN_NOTICE "jffs2_rename(): Link succeeded, unlink failed (err %d). You now have a hard link\n", ret); 865 pr_notice("%s(): Link succeeded, unlink failed (err %d). You now have a hard link\n",
866 __func__, ret);
860 /* Might as well let the VFS know */ 867 /* Might as well let the VFS know */
861 d_instantiate(new_dentry, old_dentry->d_inode); 868 d_instantiate(new_dentry, old_dentry->d_inode);
862 ihold(old_dentry->d_inode); 869 ihold(old_dentry->d_inode);
diff --git a/fs/jffs2/erase.c b/fs/jffs2/erase.c
index eafb8d37a6fb..4a6cf289be24 100644
--- a/fs/jffs2/erase.c
+++ b/fs/jffs2/erase.c
@@ -10,6 +10,8 @@
10 * 10 *
11 */ 11 */
12 12
13#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14
13#include <linux/kernel.h> 15#include <linux/kernel.h>
14#include <linux/slab.h> 16#include <linux/slab.h>
15#include <linux/mtd/mtd.h> 17#include <linux/mtd/mtd.h>
@@ -46,11 +48,12 @@ static void jffs2_erase_block(struct jffs2_sb_info *c,
46#else /* Linux */ 48#else /* Linux */
47 struct erase_info *instr; 49 struct erase_info *instr;
48 50
49 D1(printk(KERN_DEBUG "jffs2_erase_block(): erase block %#08x (range %#08x-%#08x)\n", 51 jffs2_dbg(1, "%s(): erase block %#08x (range %#08x-%#08x)\n",
50 jeb->offset, jeb->offset, jeb->offset + c->sector_size)); 52 __func__,
53 jeb->offset, jeb->offset, jeb->offset + c->sector_size);
51 instr = kmalloc(sizeof(struct erase_info) + sizeof(struct erase_priv_struct), GFP_KERNEL); 54 instr = kmalloc(sizeof(struct erase_info) + sizeof(struct erase_priv_struct), GFP_KERNEL);
52 if (!instr) { 55 if (!instr) {
53 printk(KERN_WARNING "kmalloc for struct erase_info in jffs2_erase_block failed. Refiling block for later\n"); 56 pr_warn("kmalloc for struct erase_info in jffs2_erase_block failed. Refiling block for later\n");
54 mutex_lock(&c->erase_free_sem); 57 mutex_lock(&c->erase_free_sem);
55 spin_lock(&c->erase_completion_lock); 58 spin_lock(&c->erase_completion_lock);
56 list_move(&jeb->list, &c->erase_pending_list); 59 list_move(&jeb->list, &c->erase_pending_list);
@@ -69,7 +72,6 @@ static void jffs2_erase_block(struct jffs2_sb_info *c,
69 instr->len = c->sector_size; 72 instr->len = c->sector_size;
70 instr->callback = jffs2_erase_callback; 73 instr->callback = jffs2_erase_callback;
71 instr->priv = (unsigned long)(&instr[1]); 74 instr->priv = (unsigned long)(&instr[1]);
72 instr->fail_addr = MTD_FAIL_ADDR_UNKNOWN;
73 75
74 ((struct erase_priv_struct *)instr->priv)->jeb = jeb; 76 ((struct erase_priv_struct *)instr->priv)->jeb = jeb;
75 ((struct erase_priv_struct *)instr->priv)->c = c; 77 ((struct erase_priv_struct *)instr->priv)->c = c;
@@ -84,7 +86,8 @@ static void jffs2_erase_block(struct jffs2_sb_info *c,
84 86
85 if (ret == -ENOMEM || ret == -EAGAIN) { 87 if (ret == -ENOMEM || ret == -EAGAIN) {
86 /* Erase failed immediately. Refile it on the list */ 88 /* Erase failed immediately. Refile it on the list */
87 D1(printk(KERN_DEBUG "Erase at 0x%08x failed: %d. Refiling on erase_pending_list\n", jeb->offset, ret)); 89 jffs2_dbg(1, "Erase at 0x%08x failed: %d. Refiling on erase_pending_list\n",
90 jeb->offset, ret);
88 mutex_lock(&c->erase_free_sem); 91 mutex_lock(&c->erase_free_sem);
89 spin_lock(&c->erase_completion_lock); 92 spin_lock(&c->erase_completion_lock);
90 list_move(&jeb->list, &c->erase_pending_list); 93 list_move(&jeb->list, &c->erase_pending_list);
@@ -97,9 +100,11 @@ static void jffs2_erase_block(struct jffs2_sb_info *c,
97 } 100 }
98 101
99 if (ret == -EROFS) 102 if (ret == -EROFS)
100 printk(KERN_WARNING "Erase at 0x%08x failed immediately: -EROFS. Is the sector locked?\n", jeb->offset); 103 pr_warn("Erase at 0x%08x failed immediately: -EROFS. Is the sector locked?\n",
104 jeb->offset);
101 else 105 else
102 printk(KERN_WARNING "Erase at 0x%08x failed immediately: errno %d\n", jeb->offset, ret); 106 pr_warn("Erase at 0x%08x failed immediately: errno %d\n",
107 jeb->offset, ret);
103 108
104 jffs2_erase_failed(c, jeb, bad_offset); 109 jffs2_erase_failed(c, jeb, bad_offset);
105} 110}
@@ -125,13 +130,14 @@ int jffs2_erase_pending_blocks(struct jffs2_sb_info *c, int count)
125 130
126 work_done++; 131 work_done++;
127 if (!--count) { 132 if (!--count) {
128 D1(printk(KERN_DEBUG "Count reached. jffs2_erase_pending_blocks leaving\n")); 133 jffs2_dbg(1, "Count reached. jffs2_erase_pending_blocks leaving\n");
129 goto done; 134 goto done;
130 } 135 }
131 136
132 } else if (!list_empty(&c->erase_pending_list)) { 137 } else if (!list_empty(&c->erase_pending_list)) {
133 jeb = list_entry(c->erase_pending_list.next, struct jffs2_eraseblock, list); 138 jeb = list_entry(c->erase_pending_list.next, struct jffs2_eraseblock, list);
134 D1(printk(KERN_DEBUG "Starting erase of pending block 0x%08x\n", jeb->offset)); 139 jffs2_dbg(1, "Starting erase of pending block 0x%08x\n",
140 jeb->offset);
135 list_del(&jeb->list); 141 list_del(&jeb->list);
136 c->erasing_size += c->sector_size; 142 c->erasing_size += c->sector_size;
137 c->wasted_size -= jeb->wasted_size; 143 c->wasted_size -= jeb->wasted_size;
@@ -159,13 +165,13 @@ int jffs2_erase_pending_blocks(struct jffs2_sb_info *c, int count)
159 spin_unlock(&c->erase_completion_lock); 165 spin_unlock(&c->erase_completion_lock);
160 mutex_unlock(&c->erase_free_sem); 166 mutex_unlock(&c->erase_free_sem);
161 done: 167 done:
162 D1(printk(KERN_DEBUG "jffs2_erase_pending_blocks completed\n")); 168 jffs2_dbg(1, "jffs2_erase_pending_blocks completed\n");
163 return work_done; 169 return work_done;
164} 170}
165 171
166static void jffs2_erase_succeeded(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb) 172static void jffs2_erase_succeeded(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb)
167{ 173{
168 D1(printk(KERN_DEBUG "Erase completed successfully at 0x%08x\n", jeb->offset)); 174 jffs2_dbg(1, "Erase completed successfully at 0x%08x\n", jeb->offset);
169 mutex_lock(&c->erase_free_sem); 175 mutex_lock(&c->erase_free_sem);
170 spin_lock(&c->erase_completion_lock); 176 spin_lock(&c->erase_completion_lock);
171 list_move_tail(&jeb->list, &c->erase_complete_list); 177 list_move_tail(&jeb->list, &c->erase_complete_list);
@@ -214,7 +220,7 @@ static void jffs2_erase_callback(struct erase_info *instr)
214 struct erase_priv_struct *priv = (void *)instr->priv; 220 struct erase_priv_struct *priv = (void *)instr->priv;
215 221
216 if(instr->state != MTD_ERASE_DONE) { 222 if(instr->state != MTD_ERASE_DONE) {
217 printk(KERN_WARNING "Erase at 0x%08llx finished, but state != MTD_ERASE_DONE. State is 0x%x instead.\n", 223 pr_warn("Erase at 0x%08llx finished, but state != MTD_ERASE_DONE. State is 0x%x instead.\n",
218 (unsigned long long)instr->addr, instr->state); 224 (unsigned long long)instr->addr, instr->state);
219 jffs2_erase_failed(priv->c, priv->jeb, instr->fail_addr); 225 jffs2_erase_failed(priv->c, priv->jeb, instr->fail_addr);
220 } else { 226 } else {
@@ -269,8 +275,8 @@ static inline void jffs2_remove_node_refs_from_ino_list(struct jffs2_sb_info *c,
269 return; 275 return;
270 } 276 }
271 277
272 D1(printk(KERN_DEBUG "Removed nodes in range 0x%08x-0x%08x from ino #%u\n", 278 jffs2_dbg(1, "Removed nodes in range 0x%08x-0x%08x from ino #%u\n",
273 jeb->offset, jeb->offset + c->sector_size, ic->ino)); 279 jeb->offset, jeb->offset + c->sector_size, ic->ino);
274 280
275 D2({ 281 D2({
276 int i=0; 282 int i=0;
@@ -281,7 +287,7 @@ static inline void jffs2_remove_node_refs_from_ino_list(struct jffs2_sb_info *c,
281 287
282 printk(KERN_DEBUG); 288 printk(KERN_DEBUG);
283 while(this) { 289 while(this) {
284 printk(KERN_CONT "0x%08x(%d)->", 290 pr_cont("0x%08x(%d)->",
285 ref_offset(this), ref_flags(this)); 291 ref_offset(this), ref_flags(this));
286 if (++i == 5) { 292 if (++i == 5) {
287 printk(KERN_DEBUG); 293 printk(KERN_DEBUG);
@@ -289,7 +295,7 @@ static inline void jffs2_remove_node_refs_from_ino_list(struct jffs2_sb_info *c,
289 } 295 }
290 this = this->next_in_ino; 296 this = this->next_in_ino;
291 } 297 }
292 printk(KERN_CONT "\n"); 298 pr_cont("\n");
293 }); 299 });
294 300
295 switch (ic->class) { 301 switch (ic->class) {
@@ -310,7 +316,8 @@ static inline void jffs2_remove_node_refs_from_ino_list(struct jffs2_sb_info *c,
310void jffs2_free_jeb_node_refs(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb) 316void jffs2_free_jeb_node_refs(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb)
311{ 317{
312 struct jffs2_raw_node_ref *block, *ref; 318 struct jffs2_raw_node_ref *block, *ref;
313 D1(printk(KERN_DEBUG "Freeing all node refs for eraseblock offset 0x%08x\n", jeb->offset)); 319 jffs2_dbg(1, "Freeing all node refs for eraseblock offset 0x%08x\n",
320 jeb->offset);
314 321
315 block = ref = jeb->first_node; 322 block = ref = jeb->first_node;
316 323
@@ -342,12 +349,13 @@ static int jffs2_block_check_erase(struct jffs2_sb_info *c, struct jffs2_erasebl
342 &ebuf, NULL); 349 &ebuf, NULL);
343 if (ret != -EOPNOTSUPP) { 350 if (ret != -EOPNOTSUPP) {
344 if (ret) { 351 if (ret) {
345 D1(printk(KERN_DEBUG "MTD point failed %d\n", ret)); 352 jffs2_dbg(1, "MTD point failed %d\n", ret);
346 goto do_flash_read; 353 goto do_flash_read;
347 } 354 }
348 if (retlen < c->sector_size) { 355 if (retlen < c->sector_size) {
349 /* Don't muck about if it won't let us point to the whole erase sector */ 356 /* Don't muck about if it won't let us point to the whole erase sector */
350 D1(printk(KERN_DEBUG "MTD point returned len too short: 0x%zx\n", retlen)); 357 jffs2_dbg(1, "MTD point returned len too short: 0x%zx\n",
358 retlen);
351 mtd_unpoint(c->mtd, jeb->offset, retlen); 359 mtd_unpoint(c->mtd, jeb->offset, retlen);
352 goto do_flash_read; 360 goto do_flash_read;
353 } 361 }
@@ -359,8 +367,10 @@ static int jffs2_block_check_erase(struct jffs2_sb_info *c, struct jffs2_erasebl
359 } while(--retlen); 367 } while(--retlen);
360 mtd_unpoint(c->mtd, jeb->offset, c->sector_size); 368 mtd_unpoint(c->mtd, jeb->offset, c->sector_size);
361 if (retlen) { 369 if (retlen) {
362 printk(KERN_WARNING "Newly-erased block contained word 0x%lx at offset 0x%08tx\n", 370 pr_warn("Newly-erased block contained word 0x%lx at offset 0x%08tx\n",
363 *wordebuf, jeb->offset + c->sector_size-retlen*sizeof(*wordebuf)); 371 *wordebuf,
372 jeb->offset +
373 c->sector_size-retlen * sizeof(*wordebuf));
364 return -EIO; 374 return -EIO;
365 } 375 }
366 return 0; 376 return 0;
@@ -368,11 +378,12 @@ static int jffs2_block_check_erase(struct jffs2_sb_info *c, struct jffs2_erasebl
368 do_flash_read: 378 do_flash_read:
369 ebuf = kmalloc(PAGE_SIZE, GFP_KERNEL); 379 ebuf = kmalloc(PAGE_SIZE, GFP_KERNEL);
370 if (!ebuf) { 380 if (!ebuf) {
371 printk(KERN_WARNING "Failed to allocate page buffer for verifying erase at 0x%08x. Refiling\n", jeb->offset); 381 pr_warn("Failed to allocate page buffer for verifying erase at 0x%08x. Refiling\n",
382 jeb->offset);
372 return -EAGAIN; 383 return -EAGAIN;
373 } 384 }
374 385
375 D1(printk(KERN_DEBUG "Verifying erase at 0x%08x\n", jeb->offset)); 386 jffs2_dbg(1, "Verifying erase at 0x%08x\n", jeb->offset);
376 387
377 for (ofs = jeb->offset; ofs < jeb->offset + c->sector_size; ) { 388 for (ofs = jeb->offset; ofs < jeb->offset + c->sector_size; ) {
378 uint32_t readlen = min((uint32_t)PAGE_SIZE, jeb->offset + c->sector_size - ofs); 389 uint32_t readlen = min((uint32_t)PAGE_SIZE, jeb->offset + c->sector_size - ofs);
@@ -382,12 +393,14 @@ static int jffs2_block_check_erase(struct jffs2_sb_info *c, struct jffs2_erasebl
382 393
383 ret = mtd_read(c->mtd, ofs, readlen, &retlen, ebuf); 394 ret = mtd_read(c->mtd, ofs, readlen, &retlen, ebuf);
384 if (ret) { 395 if (ret) {
385 printk(KERN_WARNING "Read of newly-erased block at 0x%08x failed: %d. Putting on bad_list\n", ofs, ret); 396 pr_warn("Read of newly-erased block at 0x%08x failed: %d. Putting on bad_list\n",
397 ofs, ret);
386 ret = -EIO; 398 ret = -EIO;
387 goto fail; 399 goto fail;
388 } 400 }
389 if (retlen != readlen) { 401 if (retlen != readlen) {
390 printk(KERN_WARNING "Short read from newly-erased block at 0x%08x. Wanted %d, got %zd\n", ofs, readlen, retlen); 402 pr_warn("Short read from newly-erased block at 0x%08x. Wanted %d, got %zd\n",
403 ofs, readlen, retlen);
391 ret = -EIO; 404 ret = -EIO;
392 goto fail; 405 goto fail;
393 } 406 }
@@ -396,7 +409,8 @@ static int jffs2_block_check_erase(struct jffs2_sb_info *c, struct jffs2_erasebl
396 unsigned long *datum = ebuf + i; 409 unsigned long *datum = ebuf + i;
397 if (*datum + 1) { 410 if (*datum + 1) {
398 *bad_offset += i; 411 *bad_offset += i;
399 printk(KERN_WARNING "Newly-erased block contained word 0x%lx at offset 0x%08x\n", *datum, *bad_offset); 412 pr_warn("Newly-erased block contained word 0x%lx at offset 0x%08x\n",
413 *datum, *bad_offset);
400 ret = -EIO; 414 ret = -EIO;
401 goto fail; 415 goto fail;
402 } 416 }
@@ -422,7 +436,7 @@ static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseb
422 } 436 }
423 437
424 /* Write the erase complete marker */ 438 /* Write the erase complete marker */
425 D1(printk(KERN_DEBUG "Writing erased marker to block at 0x%08x\n", jeb->offset)); 439 jffs2_dbg(1, "Writing erased marker to block at 0x%08x\n", jeb->offset);
426 bad_offset = jeb->offset; 440 bad_offset = jeb->offset;
427 441
428 /* Cleanmarker in oob area or no cleanmarker at all ? */ 442 /* Cleanmarker in oob area or no cleanmarker at all ? */
@@ -451,10 +465,10 @@ static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseb
451 465
452 if (ret || retlen != sizeof(marker)) { 466 if (ret || retlen != sizeof(marker)) {
453 if (ret) 467 if (ret)
454 printk(KERN_WARNING "Write clean marker to block at 0x%08x failed: %d\n", 468 pr_warn("Write clean marker to block at 0x%08x failed: %d\n",
455 jeb->offset, ret); 469 jeb->offset, ret);
456 else 470 else
457 printk(KERN_WARNING "Short write to newly-erased block at 0x%08x: Wanted %zd, got %zd\n", 471 pr_warn("Short write to newly-erased block at 0x%08x: Wanted %zd, got %zd\n",
458 jeb->offset, sizeof(marker), retlen); 472 jeb->offset, sizeof(marker), retlen);
459 473
460 goto filebad; 474 goto filebad;
diff --git a/fs/jffs2/file.c b/fs/jffs2/file.c
index 61e6723535b9..db3889ba8818 100644
--- a/fs/jffs2/file.c
+++ b/fs/jffs2/file.c
@@ -10,6 +10,8 @@
10 * 10 *
11 */ 11 */
12 12
13#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14
13#include <linux/kernel.h> 15#include <linux/kernel.h>
14#include <linux/fs.h> 16#include <linux/fs.h>
15#include <linux/time.h> 17#include <linux/time.h>
@@ -85,7 +87,8 @@ static int jffs2_do_readpage_nolock (struct inode *inode, struct page *pg)
85 unsigned char *pg_buf; 87 unsigned char *pg_buf;
86 int ret; 88 int ret;
87 89
88 D2(printk(KERN_DEBUG "jffs2_do_readpage_nolock(): ino #%lu, page at offset 0x%lx\n", inode->i_ino, pg->index << PAGE_CACHE_SHIFT)); 90 jffs2_dbg(2, "%s(): ino #%lu, page at offset 0x%lx\n",
91 __func__, inode->i_ino, pg->index << PAGE_CACHE_SHIFT);
89 92
90 BUG_ON(!PageLocked(pg)); 93 BUG_ON(!PageLocked(pg));
91 94
@@ -105,7 +108,7 @@ static int jffs2_do_readpage_nolock (struct inode *inode, struct page *pg)
105 flush_dcache_page(pg); 108 flush_dcache_page(pg);
106 kunmap(pg); 109 kunmap(pg);
107 110
108 D2(printk(KERN_DEBUG "readpage finished\n")); 111 jffs2_dbg(2, "readpage finished\n");
109 return ret; 112 return ret;
110} 113}
111 114
@@ -144,7 +147,7 @@ static int jffs2_write_begin(struct file *filp, struct address_space *mapping,
144 return -ENOMEM; 147 return -ENOMEM;
145 *pagep = pg; 148 *pagep = pg;
146 149
147 D1(printk(KERN_DEBUG "jffs2_write_begin()\n")); 150 jffs2_dbg(1, "%s()\n", __func__);
148 151
149 if (pageofs > inode->i_size) { 152 if (pageofs > inode->i_size) {
150 /* Make new hole frag from old EOF to new page */ 153 /* Make new hole frag from old EOF to new page */
@@ -153,8 +156,8 @@ static int jffs2_write_begin(struct file *filp, struct address_space *mapping,
153 struct jffs2_full_dnode *fn; 156 struct jffs2_full_dnode *fn;
154 uint32_t alloc_len; 157 uint32_t alloc_len;
155 158
156 D1(printk(KERN_DEBUG "Writing new hole frag 0x%x-0x%x between current EOF and new page\n", 159 jffs2_dbg(1, "Writing new hole frag 0x%x-0x%x between current EOF and new page\n",
157 (unsigned int)inode->i_size, pageofs)); 160 (unsigned int)inode->i_size, pageofs);
158 161
159 ret = jffs2_reserve_space(c, sizeof(ri), &alloc_len, 162 ret = jffs2_reserve_space(c, sizeof(ri), &alloc_len,
160 ALLOC_NORMAL, JFFS2_SUMMARY_INODE_SIZE); 163 ALLOC_NORMAL, JFFS2_SUMMARY_INODE_SIZE);
@@ -198,7 +201,8 @@ static int jffs2_write_begin(struct file *filp, struct address_space *mapping,
198 f->metadata = NULL; 201 f->metadata = NULL;
199 } 202 }
200 if (ret) { 203 if (ret) {
201 D1(printk(KERN_DEBUG "Eep. add_full_dnode_to_inode() failed in write_begin, returned %d\n", ret)); 204 jffs2_dbg(1, "Eep. add_full_dnode_to_inode() failed in write_begin, returned %d\n",
205 ret);
202 jffs2_mark_node_obsolete(c, fn->raw); 206 jffs2_mark_node_obsolete(c, fn->raw);
203 jffs2_free_full_dnode(fn); 207 jffs2_free_full_dnode(fn);
204 jffs2_complete_reservation(c); 208 jffs2_complete_reservation(c);
@@ -222,7 +226,7 @@ static int jffs2_write_begin(struct file *filp, struct address_space *mapping,
222 if (ret) 226 if (ret)
223 goto out_page; 227 goto out_page;
224 } 228 }
225 D1(printk(KERN_DEBUG "end write_begin(). pg->flags %lx\n", pg->flags)); 229 jffs2_dbg(1, "end write_begin(). pg->flags %lx\n", pg->flags);
226 return ret; 230 return ret;
227 231
228out_page: 232out_page:
@@ -248,8 +252,9 @@ static int jffs2_write_end(struct file *filp, struct address_space *mapping,
248 int ret = 0; 252 int ret = 0;
249 uint32_t writtenlen = 0; 253 uint32_t writtenlen = 0;
250 254
251 D1(printk(KERN_DEBUG "jffs2_write_end(): ino #%lu, page at 0x%lx, range %d-%d, flags %lx\n", 255 jffs2_dbg(1, "%s(): ino #%lu, page at 0x%lx, range %d-%d, flags %lx\n",
252 inode->i_ino, pg->index << PAGE_CACHE_SHIFT, start, end, pg->flags)); 256 __func__, inode->i_ino, pg->index << PAGE_CACHE_SHIFT,
257 start, end, pg->flags);
253 258
254 /* We need to avoid deadlock with page_cache_read() in 259 /* We need to avoid deadlock with page_cache_read() in
255 jffs2_garbage_collect_pass(). So the page must be 260 jffs2_garbage_collect_pass(). So the page must be
@@ -268,7 +273,8 @@ static int jffs2_write_end(struct file *filp, struct address_space *mapping,
268 ri = jffs2_alloc_raw_inode(); 273 ri = jffs2_alloc_raw_inode();
269 274
270 if (!ri) { 275 if (!ri) {
271 D1(printk(KERN_DEBUG "jffs2_write_end(): Allocation of raw inode failed\n")); 276 jffs2_dbg(1, "%s(): Allocation of raw inode failed\n",
277 __func__);
272 unlock_page(pg); 278 unlock_page(pg);
273 page_cache_release(pg); 279 page_cache_release(pg);
274 return -ENOMEM; 280 return -ENOMEM;
@@ -315,13 +321,14 @@ static int jffs2_write_end(struct file *filp, struct address_space *mapping,
315 /* generic_file_write has written more to the page cache than we've 321 /* generic_file_write has written more to the page cache than we've
316 actually written to the medium. Mark the page !Uptodate so that 322 actually written to the medium. Mark the page !Uptodate so that
317 it gets reread */ 323 it gets reread */
318 D1(printk(KERN_DEBUG "jffs2_write_end(): Not all bytes written. Marking page !uptodate\n")); 324 jffs2_dbg(1, "%s(): Not all bytes written. Marking page !uptodate\n",
325 __func__);
319 SetPageError(pg); 326 SetPageError(pg);
320 ClearPageUptodate(pg); 327 ClearPageUptodate(pg);
321 } 328 }
322 329
323 D1(printk(KERN_DEBUG "jffs2_write_end() returning %d\n", 330 jffs2_dbg(1, "%s() returning %d\n",
324 writtenlen > 0 ? writtenlen : ret)); 331 __func__, writtenlen > 0 ? writtenlen : ret);
325 unlock_page(pg); 332 unlock_page(pg);
326 page_cache_release(pg); 333 page_cache_release(pg);
327 return writtenlen > 0 ? writtenlen : ret; 334 return writtenlen > 0 ? writtenlen : ret;
diff --git a/fs/jffs2/fs.c b/fs/jffs2/fs.c
index c0d5c9d770da..bb6f993ebca9 100644
--- a/fs/jffs2/fs.c
+++ b/fs/jffs2/fs.c
@@ -10,6 +10,8 @@
10 * 10 *
11 */ 11 */
12 12
13#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14
13#include <linux/capability.h> 15#include <linux/capability.h>
14#include <linux/kernel.h> 16#include <linux/kernel.h>
15#include <linux/sched.h> 17#include <linux/sched.h>
@@ -39,7 +41,7 @@ int jffs2_do_setattr (struct inode *inode, struct iattr *iattr)
39 int ret; 41 int ret;
40 int alloc_type = ALLOC_NORMAL; 42 int alloc_type = ALLOC_NORMAL;
41 43
42 D1(printk(KERN_DEBUG "jffs2_setattr(): ino #%lu\n", inode->i_ino)); 44 jffs2_dbg(1, "%s(): ino #%lu\n", __func__, inode->i_ino);
43 45
44 /* Special cases - we don't want more than one data node 46 /* Special cases - we don't want more than one data node
45 for these types on the medium at any time. So setattr 47 for these types on the medium at any time. So setattr
@@ -50,7 +52,8 @@ int jffs2_do_setattr (struct inode *inode, struct iattr *iattr)
50 /* For these, we don't actually need to read the old node */ 52 /* For these, we don't actually need to read the old node */
51 mdatalen = jffs2_encode_dev(&dev, inode->i_rdev); 53 mdatalen = jffs2_encode_dev(&dev, inode->i_rdev);
52 mdata = (char *)&dev; 54 mdata = (char *)&dev;
53 D1(printk(KERN_DEBUG "jffs2_setattr(): Writing %d bytes of kdev_t\n", mdatalen)); 55 jffs2_dbg(1, "%s(): Writing %d bytes of kdev_t\n",
56 __func__, mdatalen);
54 } else if (S_ISLNK(inode->i_mode)) { 57 } else if (S_ISLNK(inode->i_mode)) {
55 mutex_lock(&f->sem); 58 mutex_lock(&f->sem);
56 mdatalen = f->metadata->size; 59 mdatalen = f->metadata->size;
@@ -66,7 +69,8 @@ int jffs2_do_setattr (struct inode *inode, struct iattr *iattr)
66 return ret; 69 return ret;
67 } 70 }
68 mutex_unlock(&f->sem); 71 mutex_unlock(&f->sem);
69 D1(printk(KERN_DEBUG "jffs2_setattr(): Writing %d bytes of symlink target\n", mdatalen)); 72 jffs2_dbg(1, "%s(): Writing %d bytes of symlink target\n",
73 __func__, mdatalen);
70 } 74 }
71 75
72 ri = jffs2_alloc_raw_inode(); 76 ri = jffs2_alloc_raw_inode();
@@ -233,7 +237,8 @@ void jffs2_evict_inode (struct inode *inode)
233 struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb); 237 struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb);
234 struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode); 238 struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode);
235 239
236 D1(printk(KERN_DEBUG "jffs2_evict_inode(): ino #%lu mode %o\n", inode->i_ino, inode->i_mode)); 240 jffs2_dbg(1, "%s(): ino #%lu mode %o\n",
241 __func__, inode->i_ino, inode->i_mode);
237 truncate_inode_pages(&inode->i_data, 0); 242 truncate_inode_pages(&inode->i_data, 0);
238 end_writeback(inode); 243 end_writeback(inode);
239 jffs2_do_clear_inode(c, f); 244 jffs2_do_clear_inode(c, f);
@@ -249,7 +254,7 @@ struct inode *jffs2_iget(struct super_block *sb, unsigned long ino)
249 dev_t rdev = 0; 254 dev_t rdev = 0;
250 int ret; 255 int ret;
251 256
252 D1(printk(KERN_DEBUG "jffs2_iget(): ino == %lu\n", ino)); 257 jffs2_dbg(1, "%s(): ino == %lu\n", __func__, ino);
253 258
254 inode = iget_locked(sb, ino); 259 inode = iget_locked(sb, ino);
255 if (!inode) 260 if (!inode)
@@ -317,14 +322,16 @@ struct inode *jffs2_iget(struct super_block *sb, unsigned long ino)
317 /* Read the device numbers from the media */ 322 /* Read the device numbers from the media */
318 if (f->metadata->size != sizeof(jdev.old_id) && 323 if (f->metadata->size != sizeof(jdev.old_id) &&
319 f->metadata->size != sizeof(jdev.new_id)) { 324 f->metadata->size != sizeof(jdev.new_id)) {
320 printk(KERN_NOTICE "Device node has strange size %d\n", f->metadata->size); 325 pr_notice("Device node has strange size %d\n",
326 f->metadata->size);
321 goto error_io; 327 goto error_io;
322 } 328 }
323 D1(printk(KERN_DEBUG "Reading device numbers from flash\n")); 329 jffs2_dbg(1, "Reading device numbers from flash\n");
324 ret = jffs2_read_dnode(c, f, f->metadata, (char *)&jdev, 0, f->metadata->size); 330 ret = jffs2_read_dnode(c, f, f->metadata, (char *)&jdev, 0, f->metadata->size);
325 if (ret < 0) { 331 if (ret < 0) {
326 /* Eep */ 332 /* Eep */
327 printk(KERN_NOTICE "Read device numbers for inode %lu failed\n", (unsigned long)inode->i_ino); 333 pr_notice("Read device numbers for inode %lu failed\n",
334 (unsigned long)inode->i_ino);
328 goto error; 335 goto error;
329 } 336 }
330 if (f->metadata->size == sizeof(jdev.old_id)) 337 if (f->metadata->size == sizeof(jdev.old_id))
@@ -339,12 +346,13 @@ struct inode *jffs2_iget(struct super_block *sb, unsigned long ino)
339 break; 346 break;
340 347
341 default: 348 default:
342 printk(KERN_WARNING "jffs2_read_inode(): Bogus imode %o for ino %lu\n", inode->i_mode, (unsigned long)inode->i_ino); 349 pr_warn("%s(): Bogus i_mode %o for ino %lu\n",
350 __func__, inode->i_mode, (unsigned long)inode->i_ino);
343 } 351 }
344 352
345 mutex_unlock(&f->sem); 353 mutex_unlock(&f->sem);
346 354
347 D1(printk(KERN_DEBUG "jffs2_read_inode() returning\n")); 355 jffs2_dbg(1, "jffs2_read_inode() returning\n");
348 unlock_new_inode(inode); 356 unlock_new_inode(inode);
349 return inode; 357 return inode;
350 358
@@ -362,11 +370,13 @@ void jffs2_dirty_inode(struct inode *inode, int flags)
362 struct iattr iattr; 370 struct iattr iattr;
363 371
364 if (!(inode->i_state & I_DIRTY_DATASYNC)) { 372 if (!(inode->i_state & I_DIRTY_DATASYNC)) {
365 D2(printk(KERN_DEBUG "jffs2_dirty_inode() not calling setattr() for ino #%lu\n", inode->i_ino)); 373 jffs2_dbg(2, "%s(): not calling setattr() for ino #%lu\n",
374 __func__, inode->i_ino);
366 return; 375 return;
367 } 376 }
368 377
369 D1(printk(KERN_DEBUG "jffs2_dirty_inode() calling setattr() for ino #%lu\n", inode->i_ino)); 378 jffs2_dbg(1, "%s(): calling setattr() for ino #%lu\n",
379 __func__, inode->i_ino);
370 380
371 iattr.ia_valid = ATTR_MODE|ATTR_UID|ATTR_GID|ATTR_ATIME|ATTR_MTIME|ATTR_CTIME; 381 iattr.ia_valid = ATTR_MODE|ATTR_UID|ATTR_GID|ATTR_ATIME|ATTR_MTIME|ATTR_CTIME;
372 iattr.ia_mode = inode->i_mode; 382 iattr.ia_mode = inode->i_mode;
@@ -414,7 +424,8 @@ struct inode *jffs2_new_inode (struct inode *dir_i, umode_t mode, struct jffs2_r
414 struct jffs2_inode_info *f; 424 struct jffs2_inode_info *f;
415 int ret; 425 int ret;
416 426
417 D1(printk(KERN_DEBUG "jffs2_new_inode(): dir_i %ld, mode 0x%x\n", dir_i->i_ino, mode)); 427 jffs2_dbg(1, "%s(): dir_i %ld, mode 0x%x\n",
428 __func__, dir_i->i_ino, mode);
418 429
419 c = JFFS2_SB_INFO(sb); 430 c = JFFS2_SB_INFO(sb);
420 431
@@ -504,11 +515,11 @@ int jffs2_do_fill_super(struct super_block *sb, void *data, int silent)
504 515
505#ifndef CONFIG_JFFS2_FS_WRITEBUFFER 516#ifndef CONFIG_JFFS2_FS_WRITEBUFFER
506 if (c->mtd->type == MTD_NANDFLASH) { 517 if (c->mtd->type == MTD_NANDFLASH) {
507 printk(KERN_ERR "jffs2: Cannot operate on NAND flash unless jffs2 NAND support is compiled in.\n"); 518 pr_err("Cannot operate on NAND flash unless jffs2 NAND support is compiled in\n");
508 return -EINVAL; 519 return -EINVAL;
509 } 520 }
510 if (c->mtd->type == MTD_DATAFLASH) { 521 if (c->mtd->type == MTD_DATAFLASH) {
511 printk(KERN_ERR "jffs2: Cannot operate on DataFlash unless jffs2 DataFlash support is compiled in.\n"); 522 pr_err("Cannot operate on DataFlash unless jffs2 DataFlash support is compiled in\n");
512 return -EINVAL; 523 return -EINVAL;
513 } 524 }
514#endif 525#endif
@@ -522,12 +533,13 @@ int jffs2_do_fill_super(struct super_block *sb, void *data, int silent)
522 */ 533 */
523 if ((c->sector_size * blocks) != c->flash_size) { 534 if ((c->sector_size * blocks) != c->flash_size) {
524 c->flash_size = c->sector_size * blocks; 535 c->flash_size = c->sector_size * blocks;
525 printk(KERN_INFO "jffs2: Flash size not aligned to erasesize, reducing to %dKiB\n", 536 pr_info("Flash size not aligned to erasesize, reducing to %dKiB\n",
526 c->flash_size / 1024); 537 c->flash_size / 1024);
527 } 538 }
528 539
529 if (c->flash_size < 5*c->sector_size) { 540 if (c->flash_size < 5*c->sector_size) {
530 printk(KERN_ERR "jffs2: Too few erase blocks (%d)\n", c->flash_size / c->sector_size); 541 pr_err("Too few erase blocks (%d)\n",
542 c->flash_size / c->sector_size);
531 return -EINVAL; 543 return -EINVAL;
532 } 544 }
533 545
@@ -550,17 +562,17 @@ int jffs2_do_fill_super(struct super_block *sb, void *data, int silent)
550 if ((ret = jffs2_do_mount_fs(c))) 562 if ((ret = jffs2_do_mount_fs(c)))
551 goto out_inohash; 563 goto out_inohash;
552 564
553 D1(printk(KERN_DEBUG "jffs2_do_fill_super(): Getting root inode\n")); 565 jffs2_dbg(1, "%s(): Getting root inode\n", __func__);
554 root_i = jffs2_iget(sb, 1); 566 root_i = jffs2_iget(sb, 1);
555 if (IS_ERR(root_i)) { 567 if (IS_ERR(root_i)) {
556 D1(printk(KERN_WARNING "get root inode failed\n")); 568 jffs2_dbg(1, "get root inode failed\n");
557 ret = PTR_ERR(root_i); 569 ret = PTR_ERR(root_i);
558 goto out_root; 570 goto out_root;
559 } 571 }
560 572
561 ret = -ENOMEM; 573 ret = -ENOMEM;
562 574
563 D1(printk(KERN_DEBUG "jffs2_do_fill_super(): d_alloc_root()\n")); 575 jffs2_dbg(1, "%s(): d_make_root()\n", __func__);
564 sb->s_root = d_make_root(root_i); 576 sb->s_root = d_make_root(root_i);
565 if (!sb->s_root) 577 if (!sb->s_root)
566 goto out_root; 578 goto out_root;
@@ -618,20 +630,21 @@ struct jffs2_inode_info *jffs2_gc_fetch_inode(struct jffs2_sb_info *c,
618 */ 630 */
619 inode = ilookup(OFNI_BS_2SFFJ(c), inum); 631 inode = ilookup(OFNI_BS_2SFFJ(c), inum);
620 if (!inode) { 632 if (!inode) {
621 D1(printk(KERN_DEBUG "ilookup() failed for ino #%u; inode is probably deleted.\n", 633 jffs2_dbg(1, "ilookup() failed for ino #%u; inode is probably deleted.\n",
622 inum)); 634 inum);
623 635
624 spin_lock(&c->inocache_lock); 636 spin_lock(&c->inocache_lock);
625 ic = jffs2_get_ino_cache(c, inum); 637 ic = jffs2_get_ino_cache(c, inum);
626 if (!ic) { 638 if (!ic) {
627 D1(printk(KERN_DEBUG "Inode cache for ino #%u is gone.\n", inum)); 639 jffs2_dbg(1, "Inode cache for ino #%u is gone\n",
640 inum);
628 spin_unlock(&c->inocache_lock); 641 spin_unlock(&c->inocache_lock);
629 return NULL; 642 return NULL;
630 } 643 }
631 if (ic->state != INO_STATE_CHECKEDABSENT) { 644 if (ic->state != INO_STATE_CHECKEDABSENT) {
632 /* Wait for progress. Don't just loop */ 645 /* Wait for progress. Don't just loop */
633 D1(printk(KERN_DEBUG "Waiting for ino #%u in state %d\n", 646 jffs2_dbg(1, "Waiting for ino #%u in state %d\n",
634 ic->ino, ic->state)); 647 ic->ino, ic->state);
635 sleep_on_spinunlock(&c->inocache_wq, &c->inocache_lock); 648 sleep_on_spinunlock(&c->inocache_wq, &c->inocache_lock);
636 } else { 649 } else {
637 spin_unlock(&c->inocache_lock); 650 spin_unlock(&c->inocache_lock);
@@ -649,8 +662,8 @@ struct jffs2_inode_info *jffs2_gc_fetch_inode(struct jffs2_sb_info *c,
649 return ERR_CAST(inode); 662 return ERR_CAST(inode);
650 } 663 }
651 if (is_bad_inode(inode)) { 664 if (is_bad_inode(inode)) {
652 printk(KERN_NOTICE "Eep. read_inode() failed for ino #%u. unlinked %d\n", 665 pr_notice("Eep. read_inode() failed for ino #%u. unlinked %d\n",
653 inum, unlinked); 666 inum, unlinked);
654 /* NB. This will happen again. We need to do something appropriate here. */ 667 /* NB. This will happen again. We need to do something appropriate here. */
655 iput(inode); 668 iput(inode);
656 return ERR_PTR(-EIO); 669 return ERR_PTR(-EIO);
diff --git a/fs/jffs2/gc.c b/fs/jffs2/gc.c
index 31dce611337c..ad271c70aa25 100644
--- a/fs/jffs2/gc.c
+++ b/fs/jffs2/gc.c
@@ -10,6 +10,8 @@
10 * 10 *
11 */ 11 */
12 12
13#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14
13#include <linux/kernel.h> 15#include <linux/kernel.h>
14#include <linux/mtd/mtd.h> 16#include <linux/mtd/mtd.h>
15#include <linux/slab.h> 17#include <linux/slab.h>
@@ -51,44 +53,44 @@ static struct jffs2_eraseblock *jffs2_find_gc_block(struct jffs2_sb_info *c)
51 number of free blocks is low. */ 53 number of free blocks is low. */
52again: 54again:
53 if (!list_empty(&c->bad_used_list) && c->nr_free_blocks > c->resv_blocks_gcbad) { 55 if (!list_empty(&c->bad_used_list) && c->nr_free_blocks > c->resv_blocks_gcbad) {
54 D1(printk(KERN_DEBUG "Picking block from bad_used_list to GC next\n")); 56 jffs2_dbg(1, "Picking block from bad_used_list to GC next\n");
55 nextlist = &c->bad_used_list; 57 nextlist = &c->bad_used_list;
56 } else if (n < 50 && !list_empty(&c->erasable_list)) { 58 } else if (n < 50 && !list_empty(&c->erasable_list)) {
57 /* Note that most of them will have gone directly to be erased. 59 /* Note that most of them will have gone directly to be erased.
58 So don't favour the erasable_list _too_ much. */ 60 So don't favour the erasable_list _too_ much. */
59 D1(printk(KERN_DEBUG "Picking block from erasable_list to GC next\n")); 61 jffs2_dbg(1, "Picking block from erasable_list to GC next\n");
60 nextlist = &c->erasable_list; 62 nextlist = &c->erasable_list;
61 } else if (n < 110 && !list_empty(&c->very_dirty_list)) { 63 } else if (n < 110 && !list_empty(&c->very_dirty_list)) {
62 /* Most of the time, pick one off the very_dirty list */ 64 /* Most of the time, pick one off the very_dirty list */
63 D1(printk(KERN_DEBUG "Picking block from very_dirty_list to GC next\n")); 65 jffs2_dbg(1, "Picking block from very_dirty_list to GC next\n");
64 nextlist = &c->very_dirty_list; 66 nextlist = &c->very_dirty_list;
65 } else if (n < 126 && !list_empty(&c->dirty_list)) { 67 } else if (n < 126 && !list_empty(&c->dirty_list)) {
66 D1(printk(KERN_DEBUG "Picking block from dirty_list to GC next\n")); 68 jffs2_dbg(1, "Picking block from dirty_list to GC next\n");
67 nextlist = &c->dirty_list; 69 nextlist = &c->dirty_list;
68 } else if (!list_empty(&c->clean_list)) { 70 } else if (!list_empty(&c->clean_list)) {
69 D1(printk(KERN_DEBUG "Picking block from clean_list to GC next\n")); 71 jffs2_dbg(1, "Picking block from clean_list to GC next\n");
70 nextlist = &c->clean_list; 72 nextlist = &c->clean_list;
71 } else if (!list_empty(&c->dirty_list)) { 73 } else if (!list_empty(&c->dirty_list)) {
72 D1(printk(KERN_DEBUG "Picking block from dirty_list to GC next (clean_list was empty)\n")); 74 jffs2_dbg(1, "Picking block from dirty_list to GC next (clean_list was empty)\n");
73 75
74 nextlist = &c->dirty_list; 76 nextlist = &c->dirty_list;
75 } else if (!list_empty(&c->very_dirty_list)) { 77 } else if (!list_empty(&c->very_dirty_list)) {
76 D1(printk(KERN_DEBUG "Picking block from very_dirty_list to GC next (clean_list and dirty_list were empty)\n")); 78 jffs2_dbg(1, "Picking block from very_dirty_list to GC next (clean_list and dirty_list were empty)\n");
77 nextlist = &c->very_dirty_list; 79 nextlist = &c->very_dirty_list;
78 } else if (!list_empty(&c->erasable_list)) { 80 } else if (!list_empty(&c->erasable_list)) {
79 D1(printk(KERN_DEBUG "Picking block from erasable_list to GC next (clean_list and {very_,}dirty_list were empty)\n")); 81 jffs2_dbg(1, "Picking block from erasable_list to GC next (clean_list and {very_,}dirty_list were empty)\n");
80 82
81 nextlist = &c->erasable_list; 83 nextlist = &c->erasable_list;
82 } else if (!list_empty(&c->erasable_pending_wbuf_list)) { 84 } else if (!list_empty(&c->erasable_pending_wbuf_list)) {
83 /* There are blocks are wating for the wbuf sync */ 85 /* There are blocks are wating for the wbuf sync */
84 D1(printk(KERN_DEBUG "Synching wbuf in order to reuse erasable_pending_wbuf_list blocks\n")); 86 jffs2_dbg(1, "Synching wbuf in order to reuse erasable_pending_wbuf_list blocks\n");
85 spin_unlock(&c->erase_completion_lock); 87 spin_unlock(&c->erase_completion_lock);
86 jffs2_flush_wbuf_pad(c); 88 jffs2_flush_wbuf_pad(c);
87 spin_lock(&c->erase_completion_lock); 89 spin_lock(&c->erase_completion_lock);
88 goto again; 90 goto again;
89 } else { 91 } else {
90 /* Eep. All were empty */ 92 /* Eep. All were empty */
91 D1(printk(KERN_NOTICE "jffs2: No clean, dirty _or_ erasable blocks to GC from! Where are they all?\n")); 93 jffs2_dbg(1, "No clean, dirty _or_ erasable blocks to GC from! Where are they all?\n");
92 return NULL; 94 return NULL;
93 } 95 }
94 96
@@ -97,13 +99,15 @@ again:
97 c->gcblock = ret; 99 c->gcblock = ret;
98 ret->gc_node = ret->first_node; 100 ret->gc_node = ret->first_node;
99 if (!ret->gc_node) { 101 if (!ret->gc_node) {
100 printk(KERN_WARNING "Eep. ret->gc_node for block at 0x%08x is NULL\n", ret->offset); 102 pr_warn("Eep. ret->gc_node for block at 0x%08x is NULL\n",
103 ret->offset);
101 BUG(); 104 BUG();
102 } 105 }
103 106
104 /* Have we accidentally picked a clean block with wasted space ? */ 107 /* Have we accidentally picked a clean block with wasted space ? */
105 if (ret->wasted_size) { 108 if (ret->wasted_size) {
106 D1(printk(KERN_DEBUG "Converting wasted_size %08x to dirty_size\n", ret->wasted_size)); 109 jffs2_dbg(1, "Converting wasted_size %08x to dirty_size\n",
110 ret->wasted_size);
107 ret->dirty_size += ret->wasted_size; 111 ret->dirty_size += ret->wasted_size;
108 c->wasted_size -= ret->wasted_size; 112 c->wasted_size -= ret->wasted_size;
109 c->dirty_size += ret->wasted_size; 113 c->dirty_size += ret->wasted_size;
@@ -140,8 +144,8 @@ int jffs2_garbage_collect_pass(struct jffs2_sb_info *c)
140 144
141 /* checked_ino is protected by the alloc_sem */ 145 /* checked_ino is protected by the alloc_sem */
142 if (c->checked_ino > c->highest_ino && xattr) { 146 if (c->checked_ino > c->highest_ino && xattr) {
143 printk(KERN_CRIT "Checked all inodes but still 0x%x bytes of unchecked space?\n", 147 pr_crit("Checked all inodes but still 0x%x bytes of unchecked space?\n",
144 c->unchecked_size); 148 c->unchecked_size);
145 jffs2_dbg_dump_block_lists_nolock(c); 149 jffs2_dbg_dump_block_lists_nolock(c);
146 spin_unlock(&c->erase_completion_lock); 150 spin_unlock(&c->erase_completion_lock);
147 mutex_unlock(&c->alloc_sem); 151 mutex_unlock(&c->alloc_sem);
@@ -163,8 +167,8 @@ int jffs2_garbage_collect_pass(struct jffs2_sb_info *c)
163 } 167 }
164 168
165 if (!ic->pino_nlink) { 169 if (!ic->pino_nlink) {
166 D1(printk(KERN_DEBUG "Skipping check of ino #%d with nlink/pino zero\n", 170 jffs2_dbg(1, "Skipping check of ino #%d with nlink/pino zero\n",
167 ic->ino)); 171 ic->ino);
168 spin_unlock(&c->inocache_lock); 172 spin_unlock(&c->inocache_lock);
169 jffs2_xattr_delete_inode(c, ic); 173 jffs2_xattr_delete_inode(c, ic);
170 continue; 174 continue;
@@ -172,13 +176,15 @@ int jffs2_garbage_collect_pass(struct jffs2_sb_info *c)
172 switch(ic->state) { 176 switch(ic->state) {
173 case INO_STATE_CHECKEDABSENT: 177 case INO_STATE_CHECKEDABSENT:
174 case INO_STATE_PRESENT: 178 case INO_STATE_PRESENT:
175 D1(printk(KERN_DEBUG "Skipping ino #%u already checked\n", ic->ino)); 179 jffs2_dbg(1, "Skipping ino #%u already checked\n",
180 ic->ino);
176 spin_unlock(&c->inocache_lock); 181 spin_unlock(&c->inocache_lock);
177 continue; 182 continue;
178 183
179 case INO_STATE_GC: 184 case INO_STATE_GC:
180 case INO_STATE_CHECKING: 185 case INO_STATE_CHECKING:
181 printk(KERN_WARNING "Inode #%u is in state %d during CRC check phase!\n", ic->ino, ic->state); 186 pr_warn("Inode #%u is in state %d during CRC check phase!\n",
187 ic->ino, ic->state);
182 spin_unlock(&c->inocache_lock); 188 spin_unlock(&c->inocache_lock);
183 BUG(); 189 BUG();
184 190
@@ -186,7 +192,8 @@ int jffs2_garbage_collect_pass(struct jffs2_sb_info *c)
186 /* We need to wait for it to finish, lest we move on 192 /* We need to wait for it to finish, lest we move on
187 and trigger the BUG() above while we haven't yet 193 and trigger the BUG() above while we haven't yet
188 finished checking all its nodes */ 194 finished checking all its nodes */
189 D1(printk(KERN_DEBUG "Waiting for ino #%u to finish reading\n", ic->ino)); 195 jffs2_dbg(1, "Waiting for ino #%u to finish reading\n",
196 ic->ino);
190 /* We need to come back again for the _same_ inode. We've 197 /* We need to come back again for the _same_ inode. We've
191 made no progress in this case, but that should be OK */ 198 made no progress in this case, but that should be OK */
192 c->checked_ino--; 199 c->checked_ino--;
@@ -204,11 +211,13 @@ int jffs2_garbage_collect_pass(struct jffs2_sb_info *c)
204 ic->state = INO_STATE_CHECKING; 211 ic->state = INO_STATE_CHECKING;
205 spin_unlock(&c->inocache_lock); 212 spin_unlock(&c->inocache_lock);
206 213
207 D1(printk(KERN_DEBUG "jffs2_garbage_collect_pass() triggering inode scan of ino#%u\n", ic->ino)); 214 jffs2_dbg(1, "%s(): triggering inode scan of ino#%u\n",
215 __func__, ic->ino);
208 216
209 ret = jffs2_do_crccheck_inode(c, ic); 217 ret = jffs2_do_crccheck_inode(c, ic);
210 if (ret) 218 if (ret)
211 printk(KERN_WARNING "Returned error for crccheck of ino #%u. Expect badness...\n", ic->ino); 219 pr_warn("Returned error for crccheck of ino #%u. Expect badness...\n",
220 ic->ino);
212 221
213 jffs2_set_inocache_state(c, ic, INO_STATE_CHECKEDABSENT); 222 jffs2_set_inocache_state(c, ic, INO_STATE_CHECKEDABSENT);
214 mutex_unlock(&c->alloc_sem); 223 mutex_unlock(&c->alloc_sem);
@@ -220,11 +229,11 @@ int jffs2_garbage_collect_pass(struct jffs2_sb_info *c)
220 !list_empty(&c->erase_pending_list)) { 229 !list_empty(&c->erase_pending_list)) {
221 spin_unlock(&c->erase_completion_lock); 230 spin_unlock(&c->erase_completion_lock);
222 mutex_unlock(&c->alloc_sem); 231 mutex_unlock(&c->alloc_sem);
223 D1(printk(KERN_DEBUG "jffs2_garbage_collect_pass() erasing pending blocks\n")); 232 jffs2_dbg(1, "%s(): erasing pending blocks\n", __func__);
224 if (jffs2_erase_pending_blocks(c, 1)) 233 if (jffs2_erase_pending_blocks(c, 1))
225 return 0; 234 return 0;
226 235
227 D1(printk(KERN_DEBUG "No progress from erasing blocks; doing GC anyway\n")); 236 jffs2_dbg(1, "No progress from erasing block; doing GC anyway\n");
228 spin_lock(&c->erase_completion_lock); 237 spin_lock(&c->erase_completion_lock);
229 mutex_lock(&c->alloc_sem); 238 mutex_lock(&c->alloc_sem);
230 } 239 }
@@ -242,13 +251,14 @@ int jffs2_garbage_collect_pass(struct jffs2_sb_info *c)
242 mutex_unlock(&c->alloc_sem); 251 mutex_unlock(&c->alloc_sem);
243 return -EAGAIN; 252 return -EAGAIN;
244 } 253 }
245 D1(printk(KERN_NOTICE "jffs2: Couldn't find erase block to garbage collect!\n")); 254 jffs2_dbg(1, "Couldn't find erase block to garbage collect!\n");
246 spin_unlock(&c->erase_completion_lock); 255 spin_unlock(&c->erase_completion_lock);
247 mutex_unlock(&c->alloc_sem); 256 mutex_unlock(&c->alloc_sem);
248 return -EIO; 257 return -EIO;
249 } 258 }
250 259
251 D1(printk(KERN_DEBUG "GC from block %08x, used_size %08x, dirty_size %08x, free_size %08x\n", jeb->offset, jeb->used_size, jeb->dirty_size, jeb->free_size)); 260 jffs2_dbg(1, "GC from block %08x, used_size %08x, dirty_size %08x, free_size %08x\n",
261 jeb->offset, jeb->used_size, jeb->dirty_size, jeb->free_size);
252 D1(if (c->nextblock) 262 D1(if (c->nextblock)
253 printk(KERN_DEBUG "Nextblock at %08x, used_size %08x, dirty_size %08x, wasted_size %08x, free_size %08x\n", c->nextblock->offset, c->nextblock->used_size, c->nextblock->dirty_size, c->nextblock->wasted_size, c->nextblock->free_size)); 263 printk(KERN_DEBUG "Nextblock at %08x, used_size %08x, dirty_size %08x, wasted_size %08x, free_size %08x\n", c->nextblock->offset, c->nextblock->used_size, c->nextblock->dirty_size, c->nextblock->wasted_size, c->nextblock->free_size));
254 264
@@ -261,12 +271,14 @@ int jffs2_garbage_collect_pass(struct jffs2_sb_info *c)
261 gcblock_dirty = jeb->dirty_size; 271 gcblock_dirty = jeb->dirty_size;
262 272
263 while(ref_obsolete(raw)) { 273 while(ref_obsolete(raw)) {
264 D1(printk(KERN_DEBUG "Node at 0x%08x is obsolete... skipping\n", ref_offset(raw))); 274 jffs2_dbg(1, "Node at 0x%08x is obsolete... skipping\n",
275 ref_offset(raw));
265 raw = ref_next(raw); 276 raw = ref_next(raw);
266 if (unlikely(!raw)) { 277 if (unlikely(!raw)) {
267 printk(KERN_WARNING "eep. End of raw list while still supposedly nodes to GC\n"); 278 pr_warn("eep. End of raw list while still supposedly nodes to GC\n");
268 printk(KERN_WARNING "erase block at 0x%08x. free_size 0x%08x, dirty_size 0x%08x, used_size 0x%08x\n", 279 pr_warn("erase block at 0x%08x. free_size 0x%08x, dirty_size 0x%08x, used_size 0x%08x\n",
269 jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size); 280 jeb->offset, jeb->free_size,
281 jeb->dirty_size, jeb->used_size);
270 jeb->gc_node = raw; 282 jeb->gc_node = raw;
271 spin_unlock(&c->erase_completion_lock); 283 spin_unlock(&c->erase_completion_lock);
272 mutex_unlock(&c->alloc_sem); 284 mutex_unlock(&c->alloc_sem);
@@ -275,7 +287,8 @@ int jffs2_garbage_collect_pass(struct jffs2_sb_info *c)
275 } 287 }
276 jeb->gc_node = raw; 288 jeb->gc_node = raw;
277 289
278 D1(printk(KERN_DEBUG "Going to garbage collect node at 0x%08x\n", ref_offset(raw))); 290 jffs2_dbg(1, "Going to garbage collect node at 0x%08x\n",
291 ref_offset(raw));
279 292
280 if (!raw->next_in_ino) { 293 if (!raw->next_in_ino) {
281 /* Inode-less node. Clean marker, snapshot or something like that */ 294 /* Inode-less node. Clean marker, snapshot or something like that */
@@ -316,7 +329,9 @@ int jffs2_garbage_collect_pass(struct jffs2_sb_info *c)
316 329
317 spin_unlock(&c->erase_completion_lock); 330 spin_unlock(&c->erase_completion_lock);
318 331
319 D1(printk(KERN_DEBUG "jffs2_garbage_collect_pass collecting from block @0x%08x. Node @0x%08x(%d), ino #%u\n", jeb->offset, ref_offset(raw), ref_flags(raw), ic->ino)); 332 jffs2_dbg(1, "%s(): collecting from block @0x%08x. Node @0x%08x(%d), ino #%u\n",
333 __func__, jeb->offset, ref_offset(raw), ref_flags(raw),
334 ic->ino);
320 335
321 /* Three possibilities: 336 /* Three possibilities:
322 1. Inode is already in-core. We must iget it and do proper 337 1. Inode is already in-core. We must iget it and do proper
@@ -336,8 +351,8 @@ int jffs2_garbage_collect_pass(struct jffs2_sb_info *c)
336 if (ref_flags(raw) == REF_PRISTINE) 351 if (ref_flags(raw) == REF_PRISTINE)
337 ic->state = INO_STATE_GC; 352 ic->state = INO_STATE_GC;
338 else { 353 else {
339 D1(printk(KERN_DEBUG "Ino #%u is absent but node not REF_PRISTINE. Reading.\n", 354 jffs2_dbg(1, "Ino #%u is absent but node not REF_PRISTINE. Reading.\n",
340 ic->ino)); 355 ic->ino);
341 } 356 }
342 break; 357 break;
343 358
@@ -353,8 +368,8 @@ int jffs2_garbage_collect_pass(struct jffs2_sb_info *c)
353 we're holding the alloc_sem, no other garbage collection 368 we're holding the alloc_sem, no other garbage collection
354 can happen. 369 can happen.
355 */ 370 */
356 printk(KERN_CRIT "Inode #%u already in state %d in jffs2_garbage_collect_pass()!\n", 371 pr_crit("Inode #%u already in state %d in jffs2_garbage_collect_pass()!\n",
357 ic->ino, ic->state); 372 ic->ino, ic->state);
358 mutex_unlock(&c->alloc_sem); 373 mutex_unlock(&c->alloc_sem);
359 spin_unlock(&c->inocache_lock); 374 spin_unlock(&c->inocache_lock);
360 BUG(); 375 BUG();
@@ -367,8 +382,8 @@ int jffs2_garbage_collect_pass(struct jffs2_sb_info *c)
367 drop the alloc_sem before sleeping. */ 382 drop the alloc_sem before sleeping. */
368 383
369 mutex_unlock(&c->alloc_sem); 384 mutex_unlock(&c->alloc_sem);
370 D1(printk(KERN_DEBUG "jffs2_garbage_collect_pass() waiting for ino #%u in state %d\n", 385 jffs2_dbg(1, "%s(): waiting for ino #%u in state %d\n",
371 ic->ino, ic->state)); 386 __func__, ic->ino, ic->state);
372 sleep_on_spinunlock(&c->inocache_wq, &c->inocache_lock); 387 sleep_on_spinunlock(&c->inocache_wq, &c->inocache_lock);
373 /* And because we dropped the alloc_sem we must start again from the 388 /* And because we dropped the alloc_sem we must start again from the
374 beginning. Ponder chance of livelock here -- we're returning success 389 beginning. Ponder chance of livelock here -- we're returning success
@@ -433,7 +448,8 @@ int jffs2_garbage_collect_pass(struct jffs2_sb_info *c)
433 test_gcnode: 448 test_gcnode:
434 if (jeb->dirty_size == gcblock_dirty && !ref_obsolete(jeb->gc_node)) { 449 if (jeb->dirty_size == gcblock_dirty && !ref_obsolete(jeb->gc_node)) {
435 /* Eep. This really should never happen. GC is broken */ 450 /* Eep. This really should never happen. GC is broken */
436 printk(KERN_ERR "Error garbage collecting node at %08x!\n", ref_offset(jeb->gc_node)); 451 pr_err("Error garbage collecting node at %08x!\n",
452 ref_offset(jeb->gc_node));
437 ret = -ENOSPC; 453 ret = -ENOSPC;
438 } 454 }
439 release_sem: 455 release_sem:
@@ -445,7 +461,8 @@ int jffs2_garbage_collect_pass(struct jffs2_sb_info *c)
445 461
446 eraseit: 462 eraseit:
447 if (c->gcblock && !c->gcblock->used_size) { 463 if (c->gcblock && !c->gcblock->used_size) {
448 D1(printk(KERN_DEBUG "Block at 0x%08x completely obsoleted by GC. Moving to erase_pending_list\n", c->gcblock->offset)); 464 jffs2_dbg(1, "Block at 0x%08x completely obsoleted by GC. Moving to erase_pending_list\n",
465 c->gcblock->offset);
449 /* We're GC'ing an empty block? */ 466 /* We're GC'ing an empty block? */
450 list_add_tail(&c->gcblock->list, &c->erase_pending_list); 467 list_add_tail(&c->gcblock->list, &c->erase_pending_list);
451 c->gcblock = NULL; 468 c->gcblock = NULL;
@@ -475,12 +492,12 @@ static int jffs2_garbage_collect_live(struct jffs2_sb_info *c, struct jffs2_era
475 492
476 if (c->gcblock != jeb) { 493 if (c->gcblock != jeb) {
477 spin_unlock(&c->erase_completion_lock); 494 spin_unlock(&c->erase_completion_lock);
478 D1(printk(KERN_DEBUG "GC block is no longer gcblock. Restart\n")); 495 jffs2_dbg(1, "GC block is no longer gcblock. Restart\n");
479 goto upnout; 496 goto upnout;
480 } 497 }
481 if (ref_obsolete(raw)) { 498 if (ref_obsolete(raw)) {
482 spin_unlock(&c->erase_completion_lock); 499 spin_unlock(&c->erase_completion_lock);
483 D1(printk(KERN_DEBUG "node to be GC'd was obsoleted in the meantime.\n")); 500 jffs2_dbg(1, "node to be GC'd was obsoleted in the meantime.\n");
484 /* They'll call again */ 501 /* They'll call again */
485 goto upnout; 502 goto upnout;
486 } 503 }
@@ -536,10 +553,10 @@ static int jffs2_garbage_collect_live(struct jffs2_sb_info *c, struct jffs2_era
536 } else if (fd) { 553 } else if (fd) {
537 ret = jffs2_garbage_collect_deletion_dirent(c, jeb, f, fd); 554 ret = jffs2_garbage_collect_deletion_dirent(c, jeb, f, fd);
538 } else { 555 } else {
539 printk(KERN_WARNING "Raw node at 0x%08x wasn't in node lists for ino #%u\n", 556 pr_warn("Raw node at 0x%08x wasn't in node lists for ino #%u\n",
540 ref_offset(raw), f->inocache->ino); 557 ref_offset(raw), f->inocache->ino);
541 if (ref_obsolete(raw)) { 558 if (ref_obsolete(raw)) {
542 printk(KERN_WARNING "But it's obsolete so we don't mind too much\n"); 559 pr_warn("But it's obsolete so we don't mind too much\n");
543 } else { 560 } else {
544 jffs2_dbg_dump_node(c, ref_offset(raw)); 561 jffs2_dbg_dump_node(c, ref_offset(raw));
545 BUG(); 562 BUG();
@@ -562,7 +579,8 @@ static int jffs2_garbage_collect_pristine(struct jffs2_sb_info *c,
562 uint32_t crc, rawlen; 579 uint32_t crc, rawlen;
563 int retried = 0; 580 int retried = 0;
564 581
565 D1(printk(KERN_DEBUG "Going to GC REF_PRISTINE node at 0x%08x\n", ref_offset(raw))); 582 jffs2_dbg(1, "Going to GC REF_PRISTINE node at 0x%08x\n",
583 ref_offset(raw));
566 584
567 alloclen = rawlen = ref_totlen(c, c->gcblock, raw); 585 alloclen = rawlen = ref_totlen(c, c->gcblock, raw);
568 586
@@ -595,8 +613,8 @@ static int jffs2_garbage_collect_pristine(struct jffs2_sb_info *c,
595 613
596 crc = crc32(0, node, sizeof(struct jffs2_unknown_node)-4); 614 crc = crc32(0, node, sizeof(struct jffs2_unknown_node)-4);
597 if (je32_to_cpu(node->u.hdr_crc) != crc) { 615 if (je32_to_cpu(node->u.hdr_crc) != crc) {
598 printk(KERN_WARNING "Header CRC failed on REF_PRISTINE node at 0x%08x: Read 0x%08x, calculated 0x%08x\n", 616 pr_warn("Header CRC failed on REF_PRISTINE node at 0x%08x: Read 0x%08x, calculated 0x%08x\n",
599 ref_offset(raw), je32_to_cpu(node->u.hdr_crc), crc); 617 ref_offset(raw), je32_to_cpu(node->u.hdr_crc), crc);
600 goto bail; 618 goto bail;
601 } 619 }
602 620
@@ -604,16 +622,18 @@ static int jffs2_garbage_collect_pristine(struct jffs2_sb_info *c,
604 case JFFS2_NODETYPE_INODE: 622 case JFFS2_NODETYPE_INODE:
605 crc = crc32(0, node, sizeof(node->i)-8); 623 crc = crc32(0, node, sizeof(node->i)-8);
606 if (je32_to_cpu(node->i.node_crc) != crc) { 624 if (je32_to_cpu(node->i.node_crc) != crc) {
607 printk(KERN_WARNING "Node CRC failed on REF_PRISTINE data node at 0x%08x: Read 0x%08x, calculated 0x%08x\n", 625 pr_warn("Node CRC failed on REF_PRISTINE data node at 0x%08x: Read 0x%08x, calculated 0x%08x\n",
608 ref_offset(raw), je32_to_cpu(node->i.node_crc), crc); 626 ref_offset(raw), je32_to_cpu(node->i.node_crc),
627 crc);
609 goto bail; 628 goto bail;
610 } 629 }
611 630
612 if (je32_to_cpu(node->i.dsize)) { 631 if (je32_to_cpu(node->i.dsize)) {
613 crc = crc32(0, node->i.data, je32_to_cpu(node->i.csize)); 632 crc = crc32(0, node->i.data, je32_to_cpu(node->i.csize));
614 if (je32_to_cpu(node->i.data_crc) != crc) { 633 if (je32_to_cpu(node->i.data_crc) != crc) {
615 printk(KERN_WARNING "Data CRC failed on REF_PRISTINE data node at 0x%08x: Read 0x%08x, calculated 0x%08x\n", 634 pr_warn("Data CRC failed on REF_PRISTINE data node at 0x%08x: Read 0x%08x, calculated 0x%08x\n",
616 ref_offset(raw), je32_to_cpu(node->i.data_crc), crc); 635 ref_offset(raw),
636 je32_to_cpu(node->i.data_crc), crc);
617 goto bail; 637 goto bail;
618 } 638 }
619 } 639 }
@@ -622,21 +642,24 @@ static int jffs2_garbage_collect_pristine(struct jffs2_sb_info *c,
622 case JFFS2_NODETYPE_DIRENT: 642 case JFFS2_NODETYPE_DIRENT:
623 crc = crc32(0, node, sizeof(node->d)-8); 643 crc = crc32(0, node, sizeof(node->d)-8);
624 if (je32_to_cpu(node->d.node_crc) != crc) { 644 if (je32_to_cpu(node->d.node_crc) != crc) {
625 printk(KERN_WARNING "Node CRC failed on REF_PRISTINE dirent node at 0x%08x: Read 0x%08x, calculated 0x%08x\n", 645 pr_warn("Node CRC failed on REF_PRISTINE dirent node at 0x%08x: Read 0x%08x, calculated 0x%08x\n",
626 ref_offset(raw), je32_to_cpu(node->d.node_crc), crc); 646 ref_offset(raw),
647 je32_to_cpu(node->d.node_crc), crc);
627 goto bail; 648 goto bail;
628 } 649 }
629 650
630 if (strnlen(node->d.name, node->d.nsize) != node->d.nsize) { 651 if (strnlen(node->d.name, node->d.nsize) != node->d.nsize) {
631 printk(KERN_WARNING "Name in dirent node at 0x%08x contains zeroes\n", ref_offset(raw)); 652 pr_warn("Name in dirent node at 0x%08x contains zeroes\n",
653 ref_offset(raw));
632 goto bail; 654 goto bail;
633 } 655 }
634 656
635 if (node->d.nsize) { 657 if (node->d.nsize) {
636 crc = crc32(0, node->d.name, node->d.nsize); 658 crc = crc32(0, node->d.name, node->d.nsize);
637 if (je32_to_cpu(node->d.name_crc) != crc) { 659 if (je32_to_cpu(node->d.name_crc) != crc) {
638 printk(KERN_WARNING "Name CRC failed on REF_PRISTINE dirent node at 0x%08x: Read 0x%08x, calculated 0x%08x\n", 660 pr_warn("Name CRC failed on REF_PRISTINE dirent node at 0x%08x: Read 0x%08x, calculated 0x%08x\n",
639 ref_offset(raw), je32_to_cpu(node->d.name_crc), crc); 661 ref_offset(raw),
662 je32_to_cpu(node->d.name_crc), crc);
640 goto bail; 663 goto bail;
641 } 664 }
642 } 665 }
@@ -644,8 +667,8 @@ static int jffs2_garbage_collect_pristine(struct jffs2_sb_info *c,
644 default: 667 default:
645 /* If it's inode-less, we don't _know_ what it is. Just copy it intact */ 668 /* If it's inode-less, we don't _know_ what it is. Just copy it intact */
646 if (ic) { 669 if (ic) {
647 printk(KERN_WARNING "Unknown node type for REF_PRISTINE node at 0x%08x: 0x%04x\n", 670 pr_warn("Unknown node type for REF_PRISTINE node at 0x%08x: 0x%04x\n",
648 ref_offset(raw), je16_to_cpu(node->u.nodetype)); 671 ref_offset(raw), je16_to_cpu(node->u.nodetype));
649 goto bail; 672 goto bail;
650 } 673 }
651 } 674 }
@@ -657,12 +680,13 @@ static int jffs2_garbage_collect_pristine(struct jffs2_sb_info *c,
657 ret = jffs2_flash_write(c, phys_ofs, rawlen, &retlen, (char *)node); 680 ret = jffs2_flash_write(c, phys_ofs, rawlen, &retlen, (char *)node);
658 681
659 if (ret || (retlen != rawlen)) { 682 if (ret || (retlen != rawlen)) {
660 printk(KERN_NOTICE "Write of %d bytes at 0x%08x failed. returned %d, retlen %zd\n", 683 pr_notice("Write of %d bytes at 0x%08x failed. returned %d, retlen %zd\n",
661 rawlen, phys_ofs, ret, retlen); 684 rawlen, phys_ofs, ret, retlen);
662 if (retlen) { 685 if (retlen) {
663 jffs2_add_physical_node_ref(c, phys_ofs | REF_OBSOLETE, rawlen, NULL); 686 jffs2_add_physical_node_ref(c, phys_ofs | REF_OBSOLETE, rawlen, NULL);
664 } else { 687 } else {
665 printk(KERN_NOTICE "Not marking the space at 0x%08x as dirty because the flash driver returned retlen zero\n", phys_ofs); 688 pr_notice("Not marking the space at 0x%08x as dirty because the flash driver returned retlen zero\n",
689 phys_ofs);
666 } 690 }
667 if (!retried) { 691 if (!retried) {
668 /* Try to reallocate space and retry */ 692 /* Try to reallocate space and retry */
@@ -671,7 +695,7 @@ static int jffs2_garbage_collect_pristine(struct jffs2_sb_info *c,
671 695
672 retried = 1; 696 retried = 1;
673 697
674 D1(printk(KERN_DEBUG "Retrying failed write of REF_PRISTINE node.\n")); 698 jffs2_dbg(1, "Retrying failed write of REF_PRISTINE node.\n");
675 699
676 jffs2_dbg_acct_sanity_check(c,jeb); 700 jffs2_dbg_acct_sanity_check(c,jeb);
677 jffs2_dbg_acct_paranoia_check(c, jeb); 701 jffs2_dbg_acct_paranoia_check(c, jeb);
@@ -681,14 +705,16 @@ static int jffs2_garbage_collect_pristine(struct jffs2_sb_info *c,
681 it is only an upper estimation */ 705 it is only an upper estimation */
682 706
683 if (!ret) { 707 if (!ret) {
684 D1(printk(KERN_DEBUG "Allocated space at 0x%08x to retry failed write.\n", phys_ofs)); 708 jffs2_dbg(1, "Allocated space at 0x%08x to retry failed write.\n",
709 phys_ofs);
685 710
686 jffs2_dbg_acct_sanity_check(c,jeb); 711 jffs2_dbg_acct_sanity_check(c,jeb);
687 jffs2_dbg_acct_paranoia_check(c, jeb); 712 jffs2_dbg_acct_paranoia_check(c, jeb);
688 713
689 goto retry; 714 goto retry;
690 } 715 }
691 D1(printk(KERN_DEBUG "Failed to allocate space to retry failed write: %d!\n", ret)); 716 jffs2_dbg(1, "Failed to allocate space to retry failed write: %d!\n",
717 ret);
692 } 718 }
693 719
694 if (!ret) 720 if (!ret)
@@ -698,7 +724,8 @@ static int jffs2_garbage_collect_pristine(struct jffs2_sb_info *c,
698 jffs2_add_physical_node_ref(c, phys_ofs | REF_PRISTINE, rawlen, ic); 724 jffs2_add_physical_node_ref(c, phys_ofs | REF_PRISTINE, rawlen, ic);
699 725
700 jffs2_mark_node_obsolete(c, raw); 726 jffs2_mark_node_obsolete(c, raw);
701 D1(printk(KERN_DEBUG "WHEEE! GC REF_PRISTINE node at 0x%08x succeeded\n", ref_offset(raw))); 727 jffs2_dbg(1, "WHEEE! GC REF_PRISTINE node at 0x%08x succeeded\n",
728 ref_offset(raw));
702 729
703 out_node: 730 out_node:
704 kfree(node); 731 kfree(node);
@@ -725,29 +752,32 @@ static int jffs2_garbage_collect_metadata(struct jffs2_sb_info *c, struct jffs2_
725 /* For these, we don't actually need to read the old node */ 752 /* For these, we don't actually need to read the old node */
726 mdatalen = jffs2_encode_dev(&dev, JFFS2_F_I_RDEV(f)); 753 mdatalen = jffs2_encode_dev(&dev, JFFS2_F_I_RDEV(f));
727 mdata = (char *)&dev; 754 mdata = (char *)&dev;
728 D1(printk(KERN_DEBUG "jffs2_garbage_collect_metadata(): Writing %d bytes of kdev_t\n", mdatalen)); 755 jffs2_dbg(1, "%s(): Writing %d bytes of kdev_t\n",
756 __func__, mdatalen);
729 } else if (S_ISLNK(JFFS2_F_I_MODE(f))) { 757 } else if (S_ISLNK(JFFS2_F_I_MODE(f))) {
730 mdatalen = fn->size; 758 mdatalen = fn->size;
731 mdata = kmalloc(fn->size, GFP_KERNEL); 759 mdata = kmalloc(fn->size, GFP_KERNEL);
732 if (!mdata) { 760 if (!mdata) {
733 printk(KERN_WARNING "kmalloc of mdata failed in jffs2_garbage_collect_metadata()\n"); 761 pr_warn("kmalloc of mdata failed in jffs2_garbage_collect_metadata()\n");
734 return -ENOMEM; 762 return -ENOMEM;
735 } 763 }
736 ret = jffs2_read_dnode(c, f, fn, mdata, 0, mdatalen); 764 ret = jffs2_read_dnode(c, f, fn, mdata, 0, mdatalen);
737 if (ret) { 765 if (ret) {
738 printk(KERN_WARNING "read of old metadata failed in jffs2_garbage_collect_metadata(): %d\n", ret); 766 pr_warn("read of old metadata failed in jffs2_garbage_collect_metadata(): %d\n",
767 ret);
739 kfree(mdata); 768 kfree(mdata);
740 return ret; 769 return ret;
741 } 770 }
742 D1(printk(KERN_DEBUG "jffs2_garbage_collect_metadata(): Writing %d bites of symlink target\n", mdatalen)); 771 jffs2_dbg(1, "%s(): Writing %d bites of symlink target\n",
772 __func__, mdatalen);
743 773
744 } 774 }
745 775
746 ret = jffs2_reserve_space_gc(c, sizeof(ri) + mdatalen, &alloclen, 776 ret = jffs2_reserve_space_gc(c, sizeof(ri) + mdatalen, &alloclen,
747 JFFS2_SUMMARY_INODE_SIZE); 777 JFFS2_SUMMARY_INODE_SIZE);
748 if (ret) { 778 if (ret) {
749 printk(KERN_WARNING "jffs2_reserve_space_gc of %zd bytes for garbage_collect_metadata failed: %d\n", 779 pr_warn("jffs2_reserve_space_gc of %zd bytes for garbage_collect_metadata failed: %d\n",
750 sizeof(ri)+ mdatalen, ret); 780 sizeof(ri) + mdatalen, ret);
751 goto out; 781 goto out;
752 } 782 }
753 783
@@ -784,7 +814,7 @@ static int jffs2_garbage_collect_metadata(struct jffs2_sb_info *c, struct jffs2_
784 new_fn = jffs2_write_dnode(c, f, &ri, mdata, mdatalen, ALLOC_GC); 814 new_fn = jffs2_write_dnode(c, f, &ri, mdata, mdatalen, ALLOC_GC);
785 815
786 if (IS_ERR(new_fn)) { 816 if (IS_ERR(new_fn)) {
787 printk(KERN_WARNING "Error writing new dnode: %ld\n", PTR_ERR(new_fn)); 817 pr_warn("Error writing new dnode: %ld\n", PTR_ERR(new_fn));
788 ret = PTR_ERR(new_fn); 818 ret = PTR_ERR(new_fn);
789 goto out; 819 goto out;
790 } 820 }
@@ -827,14 +857,15 @@ static int jffs2_garbage_collect_dirent(struct jffs2_sb_info *c, struct jffs2_er
827 ret = jffs2_reserve_space_gc(c, sizeof(rd)+rd.nsize, &alloclen, 857 ret = jffs2_reserve_space_gc(c, sizeof(rd)+rd.nsize, &alloclen,
828 JFFS2_SUMMARY_DIRENT_SIZE(rd.nsize)); 858 JFFS2_SUMMARY_DIRENT_SIZE(rd.nsize));
829 if (ret) { 859 if (ret) {
830 printk(KERN_WARNING "jffs2_reserve_space_gc of %zd bytes for garbage_collect_dirent failed: %d\n", 860 pr_warn("jffs2_reserve_space_gc of %zd bytes for garbage_collect_dirent failed: %d\n",
831 sizeof(rd)+rd.nsize, ret); 861 sizeof(rd)+rd.nsize, ret);
832 return ret; 862 return ret;
833 } 863 }
834 new_fd = jffs2_write_dirent(c, f, &rd, fd->name, rd.nsize, ALLOC_GC); 864 new_fd = jffs2_write_dirent(c, f, &rd, fd->name, rd.nsize, ALLOC_GC);
835 865
836 if (IS_ERR(new_fd)) { 866 if (IS_ERR(new_fd)) {
837 printk(KERN_WARNING "jffs2_write_dirent in garbage_collect_dirent failed: %ld\n", PTR_ERR(new_fd)); 867 pr_warn("jffs2_write_dirent in garbage_collect_dirent failed: %ld\n",
868 PTR_ERR(new_fd));
838 return PTR_ERR(new_fd); 869 return PTR_ERR(new_fd);
839 } 870 }
840 jffs2_add_fd_to_list(c, new_fd, &f->dents); 871 jffs2_add_fd_to_list(c, new_fd, &f->dents);
@@ -887,19 +918,22 @@ static int jffs2_garbage_collect_deletion_dirent(struct jffs2_sb_info *c, struct
887 if (SECTOR_ADDR(raw->flash_offset) == SECTOR_ADDR(fd->raw->flash_offset)) 918 if (SECTOR_ADDR(raw->flash_offset) == SECTOR_ADDR(fd->raw->flash_offset))
888 continue; 919 continue;
889 920
890 D1(printk(KERN_DEBUG "Check potential deletion dirent at %08x\n", ref_offset(raw))); 921 jffs2_dbg(1, "Check potential deletion dirent at %08x\n",
922 ref_offset(raw));
891 923
892 /* This is an obsolete node belonging to the same directory, and it's of the right 924 /* This is an obsolete node belonging to the same directory, and it's of the right
893 length. We need to take a closer look...*/ 925 length. We need to take a closer look...*/
894 ret = jffs2_flash_read(c, ref_offset(raw), rawlen, &retlen, (char *)rd); 926 ret = jffs2_flash_read(c, ref_offset(raw), rawlen, &retlen, (char *)rd);
895 if (ret) { 927 if (ret) {
896 printk(KERN_WARNING "jffs2_g_c_deletion_dirent(): Read error (%d) reading obsolete node at %08x\n", ret, ref_offset(raw)); 928 pr_warn("%s(): Read error (%d) reading obsolete node at %08x\n",
929 __func__, ret, ref_offset(raw));
897 /* If we can't read it, we don't need to continue to obsolete it. Continue */ 930 /* If we can't read it, we don't need to continue to obsolete it. Continue */
898 continue; 931 continue;
899 } 932 }
900 if (retlen != rawlen) { 933 if (retlen != rawlen) {
901 printk(KERN_WARNING "jffs2_g_c_deletion_dirent(): Short read (%zd not %u) reading header from obsolete node at %08x\n", 934 pr_warn("%s(): Short read (%zd not %u) reading header from obsolete node at %08x\n",
902 retlen, rawlen, ref_offset(raw)); 935 __func__, retlen, rawlen,
936 ref_offset(raw));
903 continue; 937 continue;
904 } 938 }
905 939
@@ -923,8 +957,9 @@ static int jffs2_garbage_collect_deletion_dirent(struct jffs2_sb_info *c, struct
923 a new deletion dirent to replace it */ 957 a new deletion dirent to replace it */
924 mutex_unlock(&c->erase_free_sem); 958 mutex_unlock(&c->erase_free_sem);
925 959
926 D1(printk(KERN_DEBUG "Deletion dirent at %08x still obsoletes real dirent \"%s\" at %08x for ino #%u\n", 960 jffs2_dbg(1, "Deletion dirent at %08x still obsoletes real dirent \"%s\" at %08x for ino #%u\n",
927 ref_offset(fd->raw), fd->name, ref_offset(raw), je32_to_cpu(rd->ino))); 961 ref_offset(fd->raw), fd->name,
962 ref_offset(raw), je32_to_cpu(rd->ino));
928 kfree(rd); 963 kfree(rd);
929 964
930 return jffs2_garbage_collect_dirent(c, jeb, f, fd); 965 return jffs2_garbage_collect_dirent(c, jeb, f, fd);
@@ -947,7 +982,8 @@ static int jffs2_garbage_collect_deletion_dirent(struct jffs2_sb_info *c, struct
947 fdp = &(*fdp)->next; 982 fdp = &(*fdp)->next;
948 } 983 }
949 if (!found) { 984 if (!found) {
950 printk(KERN_WARNING "Deletion dirent \"%s\" not found in list for ino #%u\n", fd->name, f->inocache->ino); 985 pr_warn("Deletion dirent \"%s\" not found in list for ino #%u\n",
986 fd->name, f->inocache->ino);
951 } 987 }
952 jffs2_mark_node_obsolete(c, fd->raw); 988 jffs2_mark_node_obsolete(c, fd->raw);
953 jffs2_free_full_dirent(fd); 989 jffs2_free_full_dirent(fd);
@@ -964,8 +1000,8 @@ static int jffs2_garbage_collect_hole(struct jffs2_sb_info *c, struct jffs2_eras
964 uint32_t alloclen, ilen; 1000 uint32_t alloclen, ilen;
965 int ret; 1001 int ret;
966 1002
967 D1(printk(KERN_DEBUG "Writing replacement hole node for ino #%u from offset 0x%x to 0x%x\n", 1003 jffs2_dbg(1, "Writing replacement hole node for ino #%u from offset 0x%x to 0x%x\n",
968 f->inocache->ino, start, end)); 1004 f->inocache->ino, start, end);
969 1005
970 memset(&ri, 0, sizeof(ri)); 1006 memset(&ri, 0, sizeof(ri));
971 1007
@@ -976,35 +1012,37 @@ static int jffs2_garbage_collect_hole(struct jffs2_sb_info *c, struct jffs2_eras
976 write it out again with the _same_ version as before */ 1012 write it out again with the _same_ version as before */
977 ret = jffs2_flash_read(c, ref_offset(fn->raw), sizeof(ri), &readlen, (char *)&ri); 1013 ret = jffs2_flash_read(c, ref_offset(fn->raw), sizeof(ri), &readlen, (char *)&ri);
978 if (readlen != sizeof(ri) || ret) { 1014 if (readlen != sizeof(ri) || ret) {
979 printk(KERN_WARNING "Node read failed in jffs2_garbage_collect_hole. Ret %d, retlen %zd. Data will be lost by writing new hole node\n", ret, readlen); 1015 pr_warn("Node read failed in jffs2_garbage_collect_hole. Ret %d, retlen %zd. Data will be lost by writing new hole node\n",
1016 ret, readlen);
980 goto fill; 1017 goto fill;
981 } 1018 }
982 if (je16_to_cpu(ri.nodetype) != JFFS2_NODETYPE_INODE) { 1019 if (je16_to_cpu(ri.nodetype) != JFFS2_NODETYPE_INODE) {
983 printk(KERN_WARNING "jffs2_garbage_collect_hole: Node at 0x%08x had node type 0x%04x instead of JFFS2_NODETYPE_INODE(0x%04x)\n", 1020 pr_warn("%s(): Node at 0x%08x had node type 0x%04x instead of JFFS2_NODETYPE_INODE(0x%04x)\n",
984 ref_offset(fn->raw), 1021 __func__, ref_offset(fn->raw),
985 je16_to_cpu(ri.nodetype), JFFS2_NODETYPE_INODE); 1022 je16_to_cpu(ri.nodetype), JFFS2_NODETYPE_INODE);
986 return -EIO; 1023 return -EIO;
987 } 1024 }
988 if (je32_to_cpu(ri.totlen) != sizeof(ri)) { 1025 if (je32_to_cpu(ri.totlen) != sizeof(ri)) {
989 printk(KERN_WARNING "jffs2_garbage_collect_hole: Node at 0x%08x had totlen 0x%x instead of expected 0x%zx\n", 1026 pr_warn("%s(): Node at 0x%08x had totlen 0x%x instead of expected 0x%zx\n",
990 ref_offset(fn->raw), 1027 __func__, ref_offset(fn->raw),
991 je32_to_cpu(ri.totlen), sizeof(ri)); 1028 je32_to_cpu(ri.totlen), sizeof(ri));
992 return -EIO; 1029 return -EIO;
993 } 1030 }
994 crc = crc32(0, &ri, sizeof(ri)-8); 1031 crc = crc32(0, &ri, sizeof(ri)-8);
995 if (crc != je32_to_cpu(ri.node_crc)) { 1032 if (crc != je32_to_cpu(ri.node_crc)) {
996 printk(KERN_WARNING "jffs2_garbage_collect_hole: Node at 0x%08x had CRC 0x%08x which doesn't match calculated CRC 0x%08x\n", 1033 pr_warn("%s: Node at 0x%08x had CRC 0x%08x which doesn't match calculated CRC 0x%08x\n",
997 ref_offset(fn->raw), 1034 __func__, ref_offset(fn->raw),
998 je32_to_cpu(ri.node_crc), crc); 1035 je32_to_cpu(ri.node_crc), crc);
999 /* FIXME: We could possibly deal with this by writing new holes for each frag */ 1036 /* FIXME: We could possibly deal with this by writing new holes for each frag */
1000 printk(KERN_WARNING "Data in the range 0x%08x to 0x%08x of inode #%u will be lost\n", 1037 pr_warn("Data in the range 0x%08x to 0x%08x of inode #%u will be lost\n",
1001 start, end, f->inocache->ino); 1038 start, end, f->inocache->ino);
1002 goto fill; 1039 goto fill;
1003 } 1040 }
1004 if (ri.compr != JFFS2_COMPR_ZERO) { 1041 if (ri.compr != JFFS2_COMPR_ZERO) {
1005 printk(KERN_WARNING "jffs2_garbage_collect_hole: Node 0x%08x wasn't a hole node!\n", ref_offset(fn->raw)); 1042 pr_warn("%s(): Node 0x%08x wasn't a hole node!\n",
1006 printk(KERN_WARNING "Data in the range 0x%08x to 0x%08x of inode #%u will be lost\n", 1043 __func__, ref_offset(fn->raw));
1007 start, end, f->inocache->ino); 1044 pr_warn("Data in the range 0x%08x to 0x%08x of inode #%u will be lost\n",
1045 start, end, f->inocache->ino);
1008 goto fill; 1046 goto fill;
1009 } 1047 }
1010 } else { 1048 } else {
@@ -1043,14 +1081,14 @@ static int jffs2_garbage_collect_hole(struct jffs2_sb_info *c, struct jffs2_eras
1043 ret = jffs2_reserve_space_gc(c, sizeof(ri), &alloclen, 1081 ret = jffs2_reserve_space_gc(c, sizeof(ri), &alloclen,
1044 JFFS2_SUMMARY_INODE_SIZE); 1082 JFFS2_SUMMARY_INODE_SIZE);
1045 if (ret) { 1083 if (ret) {
1046 printk(KERN_WARNING "jffs2_reserve_space_gc of %zd bytes for garbage_collect_hole failed: %d\n", 1084 pr_warn("jffs2_reserve_space_gc of %zd bytes for garbage_collect_hole failed: %d\n",
1047 sizeof(ri), ret); 1085 sizeof(ri), ret);
1048 return ret; 1086 return ret;
1049 } 1087 }
1050 new_fn = jffs2_write_dnode(c, f, &ri, NULL, 0, ALLOC_GC); 1088 new_fn = jffs2_write_dnode(c, f, &ri, NULL, 0, ALLOC_GC);
1051 1089
1052 if (IS_ERR(new_fn)) { 1090 if (IS_ERR(new_fn)) {
1053 printk(KERN_WARNING "Error writing new hole node: %ld\n", PTR_ERR(new_fn)); 1091 pr_warn("Error writing new hole node: %ld\n", PTR_ERR(new_fn));
1054 return PTR_ERR(new_fn); 1092 return PTR_ERR(new_fn);
1055 } 1093 }
1056 if (je32_to_cpu(ri.version) == f->highest_version) { 1094 if (je32_to_cpu(ri.version) == f->highest_version) {
@@ -1070,9 +1108,9 @@ static int jffs2_garbage_collect_hole(struct jffs2_sb_info *c, struct jffs2_eras
1070 * above.) 1108 * above.)
1071 */ 1109 */
1072 D1(if(unlikely(fn->frags <= 1)) { 1110 D1(if(unlikely(fn->frags <= 1)) {
1073 printk(KERN_WARNING "jffs2_garbage_collect_hole: Replacing fn with %d frag(s) but new ver %d != highest_version %d of ino #%d\n", 1111 pr_warn("%s(): Replacing fn with %d frag(s) but new ver %d != highest_version %d of ino #%d\n",
1074 fn->frags, je32_to_cpu(ri.version), f->highest_version, 1112 __func__, fn->frags, je32_to_cpu(ri.version),
1075 je32_to_cpu(ri.ino)); 1113 f->highest_version, je32_to_cpu(ri.ino));
1076 }); 1114 });
1077 1115
1078 /* This is a partially-overlapped hole node. Mark it REF_NORMAL not REF_PRISTINE */ 1116 /* This is a partially-overlapped hole node. Mark it REF_NORMAL not REF_PRISTINE */
@@ -1089,11 +1127,11 @@ static int jffs2_garbage_collect_hole(struct jffs2_sb_info *c, struct jffs2_eras
1089 } 1127 }
1090 } 1128 }
1091 if (fn->frags) { 1129 if (fn->frags) {
1092 printk(KERN_WARNING "jffs2_garbage_collect_hole: Old node still has frags!\n"); 1130 pr_warn("%s(): Old node still has frags!\n", __func__);
1093 BUG(); 1131 BUG();
1094 } 1132 }
1095 if (!new_fn->frags) { 1133 if (!new_fn->frags) {
1096 printk(KERN_WARNING "jffs2_garbage_collect_hole: New node has no frags!\n"); 1134 pr_warn("%s(): New node has no frags!\n", __func__);
1097 BUG(); 1135 BUG();
1098 } 1136 }
1099 1137
@@ -1117,8 +1155,8 @@ static int jffs2_garbage_collect_dnode(struct jffs2_sb_info *c, struct jffs2_era
1117 1155
1118 memset(&ri, 0, sizeof(ri)); 1156 memset(&ri, 0, sizeof(ri));
1119 1157
1120 D1(printk(KERN_DEBUG "Writing replacement dnode for ino #%u from offset 0x%x to 0x%x\n", 1158 jffs2_dbg(1, "Writing replacement dnode for ino #%u from offset 0x%x to 0x%x\n",
1121 f->inocache->ino, start, end)); 1159 f->inocache->ino, start, end);
1122 1160
1123 orig_end = end; 1161 orig_end = end;
1124 orig_start = start; 1162 orig_start = start;
@@ -1149,15 +1187,15 @@ static int jffs2_garbage_collect_dnode(struct jffs2_sb_info *c, struct jffs2_era
1149 /* If the previous frag doesn't even reach the beginning, there's 1187 /* If the previous frag doesn't even reach the beginning, there's
1150 excessive fragmentation. Just merge. */ 1188 excessive fragmentation. Just merge. */
1151 if (frag->ofs > min) { 1189 if (frag->ofs > min) {
1152 D1(printk(KERN_DEBUG "Expanding down to cover partial frag (0x%x-0x%x)\n", 1190 jffs2_dbg(1, "Expanding down to cover partial frag (0x%x-0x%x)\n",
1153 frag->ofs, frag->ofs+frag->size)); 1191 frag->ofs, frag->ofs+frag->size);
1154 start = frag->ofs; 1192 start = frag->ofs;
1155 continue; 1193 continue;
1156 } 1194 }
1157 /* OK. This frag holds the first byte of the page. */ 1195 /* OK. This frag holds the first byte of the page. */
1158 if (!frag->node || !frag->node->raw) { 1196 if (!frag->node || !frag->node->raw) {
1159 D1(printk(KERN_DEBUG "First frag in page is hole (0x%x-0x%x). Not expanding down.\n", 1197 jffs2_dbg(1, "First frag in page is hole (0x%x-0x%x). Not expanding down.\n",
1160 frag->ofs, frag->ofs+frag->size)); 1198 frag->ofs, frag->ofs+frag->size);
1161 break; 1199 break;
1162 } else { 1200 } else {
1163 1201
@@ -1171,19 +1209,25 @@ static int jffs2_garbage_collect_dnode(struct jffs2_sb_info *c, struct jffs2_era
1171 jeb = &c->blocks[raw->flash_offset / c->sector_size]; 1209 jeb = &c->blocks[raw->flash_offset / c->sector_size];
1172 1210
1173 if (jeb == c->gcblock) { 1211 if (jeb == c->gcblock) {
1174 D1(printk(KERN_DEBUG "Expanding down to cover frag (0x%x-0x%x) in gcblock at %08x\n", 1212 jffs2_dbg(1, "Expanding down to cover frag (0x%x-0x%x) in gcblock at %08x\n",
1175 frag->ofs, frag->ofs+frag->size, ref_offset(raw))); 1213 frag->ofs,
1214 frag->ofs + frag->size,
1215 ref_offset(raw));
1176 start = frag->ofs; 1216 start = frag->ofs;
1177 break; 1217 break;
1178 } 1218 }
1179 if (!ISDIRTY(jeb->dirty_size + jeb->wasted_size)) { 1219 if (!ISDIRTY(jeb->dirty_size + jeb->wasted_size)) {
1180 D1(printk(KERN_DEBUG "Not expanding down to cover frag (0x%x-0x%x) in clean block %08x\n", 1220 jffs2_dbg(1, "Not expanding down to cover frag (0x%x-0x%x) in clean block %08x\n",
1181 frag->ofs, frag->ofs+frag->size, jeb->offset)); 1221 frag->ofs,
1222 frag->ofs + frag->size,
1223 jeb->offset);
1182 break; 1224 break;
1183 } 1225 }
1184 1226
1185 D1(printk(KERN_DEBUG "Expanding down to cover frag (0x%x-0x%x) in dirty block %08x\n", 1227 jffs2_dbg(1, "Expanding down to cover frag (0x%x-0x%x) in dirty block %08x\n",
1186 frag->ofs, frag->ofs+frag->size, jeb->offset)); 1228 frag->ofs,
1229 frag->ofs + frag->size,
1230 jeb->offset);
1187 start = frag->ofs; 1231 start = frag->ofs;
1188 break; 1232 break;
1189 } 1233 }
@@ -1199,15 +1243,15 @@ static int jffs2_garbage_collect_dnode(struct jffs2_sb_info *c, struct jffs2_era
1199 /* If the previous frag doesn't even reach the beginning, there's lots 1243 /* If the previous frag doesn't even reach the beginning, there's lots
1200 of fragmentation. Just merge. */ 1244 of fragmentation. Just merge. */
1201 if (frag->ofs+frag->size < max) { 1245 if (frag->ofs+frag->size < max) {
1202 D1(printk(KERN_DEBUG "Expanding up to cover partial frag (0x%x-0x%x)\n", 1246 jffs2_dbg(1, "Expanding up to cover partial frag (0x%x-0x%x)\n",
1203 frag->ofs, frag->ofs+frag->size)); 1247 frag->ofs, frag->ofs+frag->size);
1204 end = frag->ofs + frag->size; 1248 end = frag->ofs + frag->size;
1205 continue; 1249 continue;
1206 } 1250 }
1207 1251
1208 if (!frag->node || !frag->node->raw) { 1252 if (!frag->node || !frag->node->raw) {
1209 D1(printk(KERN_DEBUG "Last frag in page is hole (0x%x-0x%x). Not expanding up.\n", 1253 jffs2_dbg(1, "Last frag in page is hole (0x%x-0x%x). Not expanding up.\n",
1210 frag->ofs, frag->ofs+frag->size)); 1254 frag->ofs, frag->ofs+frag->size);
1211 break; 1255 break;
1212 } else { 1256 } else {
1213 1257
@@ -1221,25 +1265,31 @@ static int jffs2_garbage_collect_dnode(struct jffs2_sb_info *c, struct jffs2_era
1221 jeb = &c->blocks[raw->flash_offset / c->sector_size]; 1265 jeb = &c->blocks[raw->flash_offset / c->sector_size];
1222 1266
1223 if (jeb == c->gcblock) { 1267 if (jeb == c->gcblock) {
1224 D1(printk(KERN_DEBUG "Expanding up to cover frag (0x%x-0x%x) in gcblock at %08x\n", 1268 jffs2_dbg(1, "Expanding up to cover frag (0x%x-0x%x) in gcblock at %08x\n",
1225 frag->ofs, frag->ofs+frag->size, ref_offset(raw))); 1269 frag->ofs,
1270 frag->ofs + frag->size,
1271 ref_offset(raw));
1226 end = frag->ofs + frag->size; 1272 end = frag->ofs + frag->size;
1227 break; 1273 break;
1228 } 1274 }
1229 if (!ISDIRTY(jeb->dirty_size + jeb->wasted_size)) { 1275 if (!ISDIRTY(jeb->dirty_size + jeb->wasted_size)) {
1230 D1(printk(KERN_DEBUG "Not expanding up to cover frag (0x%x-0x%x) in clean block %08x\n", 1276 jffs2_dbg(1, "Not expanding up to cover frag (0x%x-0x%x) in clean block %08x\n",
1231 frag->ofs, frag->ofs+frag->size, jeb->offset)); 1277 frag->ofs,
1278 frag->ofs + frag->size,
1279 jeb->offset);
1232 break; 1280 break;
1233 } 1281 }
1234 1282
1235 D1(printk(KERN_DEBUG "Expanding up to cover frag (0x%x-0x%x) in dirty block %08x\n", 1283 jffs2_dbg(1, "Expanding up to cover frag (0x%x-0x%x) in dirty block %08x\n",
1236 frag->ofs, frag->ofs+frag->size, jeb->offset)); 1284 frag->ofs,
1285 frag->ofs + frag->size,
1286 jeb->offset);
1237 end = frag->ofs + frag->size; 1287 end = frag->ofs + frag->size;
1238 break; 1288 break;
1239 } 1289 }
1240 } 1290 }
1241 D1(printk(KERN_DEBUG "Expanded dnode to write from (0x%x-0x%x) to (0x%x-0x%x)\n", 1291 jffs2_dbg(1, "Expanded dnode to write from (0x%x-0x%x) to (0x%x-0x%x)\n",
1242 orig_start, orig_end, start, end)); 1292 orig_start, orig_end, start, end);
1243 1293
1244 D1(BUG_ON(end > frag_last(&f->fragtree)->ofs + frag_last(&f->fragtree)->size)); 1294 D1(BUG_ON(end > frag_last(&f->fragtree)->ofs + frag_last(&f->fragtree)->size));
1245 BUG_ON(end < orig_end); 1295 BUG_ON(end < orig_end);
@@ -1256,7 +1306,8 @@ static int jffs2_garbage_collect_dnode(struct jffs2_sb_info *c, struct jffs2_era
1256 pg_ptr = jffs2_gc_fetch_page(c, f, start, &pg); 1306 pg_ptr = jffs2_gc_fetch_page(c, f, start, &pg);
1257 1307
1258 if (IS_ERR(pg_ptr)) { 1308 if (IS_ERR(pg_ptr)) {
1259 printk(KERN_WARNING "read_cache_page() returned error: %ld\n", PTR_ERR(pg_ptr)); 1309 pr_warn("read_cache_page() returned error: %ld\n",
1310 PTR_ERR(pg_ptr));
1260 return PTR_ERR(pg_ptr); 1311 return PTR_ERR(pg_ptr);
1261 } 1312 }
1262 1313
@@ -1270,8 +1321,8 @@ static int jffs2_garbage_collect_dnode(struct jffs2_sb_info *c, struct jffs2_era
1270 &alloclen, JFFS2_SUMMARY_INODE_SIZE); 1321 &alloclen, JFFS2_SUMMARY_INODE_SIZE);
1271 1322
1272 if (ret) { 1323 if (ret) {
1273 printk(KERN_WARNING "jffs2_reserve_space_gc of %zd bytes for garbage_collect_dnode failed: %d\n", 1324 pr_warn("jffs2_reserve_space_gc of %zd bytes for garbage_collect_dnode failed: %d\n",
1274 sizeof(ri)+ JFFS2_MIN_DATA_LEN, ret); 1325 sizeof(ri) + JFFS2_MIN_DATA_LEN, ret);
1275 break; 1326 break;
1276 } 1327 }
1277 cdatalen = min_t(uint32_t, alloclen - sizeof(ri), end - offset); 1328 cdatalen = min_t(uint32_t, alloclen - sizeof(ri), end - offset);
@@ -1308,7 +1359,8 @@ static int jffs2_garbage_collect_dnode(struct jffs2_sb_info *c, struct jffs2_era
1308 jffs2_free_comprbuf(comprbuf, writebuf); 1359 jffs2_free_comprbuf(comprbuf, writebuf);
1309 1360
1310 if (IS_ERR(new_fn)) { 1361 if (IS_ERR(new_fn)) {
1311 printk(KERN_WARNING "Error writing new dnode: %ld\n", PTR_ERR(new_fn)); 1362 pr_warn("Error writing new dnode: %ld\n",
1363 PTR_ERR(new_fn));
1312 ret = PTR_ERR(new_fn); 1364 ret = PTR_ERR(new_fn);
1313 break; 1365 break;
1314 } 1366 }
diff --git a/fs/jffs2/malloc.c b/fs/jffs2/malloc.c
index c082868910f2..4f47aa24b556 100644
--- a/fs/jffs2/malloc.c
+++ b/fs/jffs2/malloc.c
@@ -9,6 +9,8 @@
9 * 9 *
10 */ 10 */
11 11
12#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13
12#include <linux/kernel.h> 14#include <linux/kernel.h>
13#include <linux/slab.h> 15#include <linux/slab.h>
14#include <linux/init.h> 16#include <linux/init.h>
diff --git a/fs/jffs2/nodelist.c b/fs/jffs2/nodelist.c
index 5e03233c2363..975a1f562c10 100644
--- a/fs/jffs2/nodelist.c
+++ b/fs/jffs2/nodelist.c
@@ -9,6 +9,8 @@
9 * 9 *
10 */ 10 */
11 11
12#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13
12#include <linux/kernel.h> 14#include <linux/kernel.h>
13#include <linux/sched.h> 15#include <linux/sched.h>
14#include <linux/fs.h> 16#include <linux/fs.h>
@@ -687,8 +689,8 @@ int jffs2_scan_dirty_space(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb
687 if (!size) 689 if (!size)
688 return 0; 690 return 0;
689 if (unlikely(size > jeb->free_size)) { 691 if (unlikely(size > jeb->free_size)) {
690 printk(KERN_CRIT "Dirty space 0x%x larger then free_size 0x%x (wasted 0x%x)\n", 692 pr_crit("Dirty space 0x%x larger then free_size 0x%x (wasted 0x%x)\n",
691 size, jeb->free_size, jeb->wasted_size); 693 size, jeb->free_size, jeb->wasted_size);
692 BUG(); 694 BUG();
693 } 695 }
694 /* REF_EMPTY_NODE is !obsolete, so that works OK */ 696 /* REF_EMPTY_NODE is !obsolete, so that works OK */
@@ -726,8 +728,10 @@ static inline uint32_t __ref_totlen(struct jffs2_sb_info *c,
726 728
727 /* Last node in block. Use free_space */ 729 /* Last node in block. Use free_space */
728 if (unlikely(ref != jeb->last_node)) { 730 if (unlikely(ref != jeb->last_node)) {
729 printk(KERN_CRIT "ref %p @0x%08x is not jeb->last_node (%p @0x%08x)\n", 731 pr_crit("ref %p @0x%08x is not jeb->last_node (%p @0x%08x)\n",
730 ref, ref_offset(ref), jeb->last_node, jeb->last_node?ref_offset(jeb->last_node):0); 732 ref, ref_offset(ref), jeb->last_node,
733 jeb->last_node ?
734 ref_offset(jeb->last_node) : 0);
731 BUG(); 735 BUG();
732 } 736 }
733 ref_end = jeb->offset + c->sector_size - jeb->free_size; 737 ref_end = jeb->offset + c->sector_size - jeb->free_size;
@@ -747,16 +751,20 @@ uint32_t __jffs2_ref_totlen(struct jffs2_sb_info *c, struct jffs2_eraseblock *je
747 if (!jeb) 751 if (!jeb)
748 jeb = &c->blocks[ref->flash_offset / c->sector_size]; 752 jeb = &c->blocks[ref->flash_offset / c->sector_size];
749 753
750 printk(KERN_CRIT "Totlen for ref at %p (0x%08x-0x%08x) miscalculated as 0x%x instead of %x\n", 754 pr_crit("Totlen for ref at %p (0x%08x-0x%08x) miscalculated as 0x%x instead of %x\n",
751 ref, ref_offset(ref), ref_offset(ref)+ref->__totlen, 755 ref, ref_offset(ref), ref_offset(ref) + ref->__totlen,
752 ret, ref->__totlen); 756 ret, ref->__totlen);
753 if (ref_next(ref)) { 757 if (ref_next(ref)) {
754 printk(KERN_CRIT "next %p (0x%08x-0x%08x)\n", ref_next(ref), ref_offset(ref_next(ref)), 758 pr_crit("next %p (0x%08x-0x%08x)\n",
755 ref_offset(ref_next(ref))+ref->__totlen); 759 ref_next(ref), ref_offset(ref_next(ref)),
760 ref_offset(ref_next(ref)) + ref->__totlen);
756 } else 761 } else
757 printk(KERN_CRIT "No next ref. jeb->last_node is %p\n", jeb->last_node); 762 pr_crit("No next ref. jeb->last_node is %p\n",
763 jeb->last_node);
758 764
759 printk(KERN_CRIT "jeb->wasted_size %x, dirty_size %x, used_size %x, free_size %x\n", jeb->wasted_size, jeb->dirty_size, jeb->used_size, jeb->free_size); 765 pr_crit("jeb->wasted_size %x, dirty_size %x, used_size %x, free_size %x\n",
766 jeb->wasted_size, jeb->dirty_size, jeb->used_size,
767 jeb->free_size);
760 768
761#if defined(JFFS2_DBG_DUMPS) || defined(JFFS2_DBG_PARANOIA_CHECKS) 769#if defined(JFFS2_DBG_DUMPS) || defined(JFFS2_DBG_PARANOIA_CHECKS)
762 __jffs2_dbg_dump_node_refs_nolock(c, jeb); 770 __jffs2_dbg_dump_node_refs_nolock(c, jeb);
diff --git a/fs/jffs2/nodemgmt.c b/fs/jffs2/nodemgmt.c
index 694aa5b03505..6784d1e7a7eb 100644
--- a/fs/jffs2/nodemgmt.c
+++ b/fs/jffs2/nodemgmt.c
@@ -9,6 +9,8 @@
9 * 9 *
10 */ 10 */
11 11
12#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13
12#include <linux/kernel.h> 14#include <linux/kernel.h>
13#include <linux/mtd/mtd.h> 15#include <linux/mtd/mtd.h>
14#include <linux/compiler.h> 16#include <linux/compiler.h>
@@ -46,10 +48,10 @@ int jffs2_reserve_space(struct jffs2_sb_info *c, uint32_t minsize,
46 /* align it */ 48 /* align it */
47 minsize = PAD(minsize); 49 minsize = PAD(minsize);
48 50
49 D1(printk(KERN_DEBUG "jffs2_reserve_space(): Requested 0x%x bytes\n", minsize)); 51 jffs2_dbg(1, "%s(): Requested 0x%x bytes\n", __func__, minsize);
50 mutex_lock(&c->alloc_sem); 52 mutex_lock(&c->alloc_sem);
51 53
52 D1(printk(KERN_DEBUG "jffs2_reserve_space(): alloc sem got\n")); 54 jffs2_dbg(1, "%s(): alloc sem got\n", __func__);
53 55
54 spin_lock(&c->erase_completion_lock); 56 spin_lock(&c->erase_completion_lock);
55 57
@@ -73,11 +75,13 @@ int jffs2_reserve_space(struct jffs2_sb_info *c, uint32_t minsize,
73 dirty = c->dirty_size + c->erasing_size - c->nr_erasing_blocks * c->sector_size + c->unchecked_size; 75 dirty = c->dirty_size + c->erasing_size - c->nr_erasing_blocks * c->sector_size + c->unchecked_size;
74 if (dirty < c->nospc_dirty_size) { 76 if (dirty < c->nospc_dirty_size) {
75 if (prio == ALLOC_DELETION && c->nr_free_blocks + c->nr_erasing_blocks >= c->resv_blocks_deletion) { 77 if (prio == ALLOC_DELETION && c->nr_free_blocks + c->nr_erasing_blocks >= c->resv_blocks_deletion) {
76 D1(printk(KERN_NOTICE "jffs2_reserve_space(): Low on dirty space to GC, but it's a deletion. Allowing...\n")); 78 jffs2_dbg(1, "%s(): Low on dirty space to GC, but it's a deletion. Allowing...\n",
79 __func__);
77 break; 80 break;
78 } 81 }
79 D1(printk(KERN_DEBUG "dirty size 0x%08x + unchecked_size 0x%08x < nospc_dirty_size 0x%08x, returning -ENOSPC\n", 82 jffs2_dbg(1, "dirty size 0x%08x + unchecked_size 0x%08x < nospc_dirty_size 0x%08x, returning -ENOSPC\n",
80 dirty, c->unchecked_size, c->sector_size)); 83 dirty, c->unchecked_size,
84 c->sector_size);
81 85
82 spin_unlock(&c->erase_completion_lock); 86 spin_unlock(&c->erase_completion_lock);
83 mutex_unlock(&c->alloc_sem); 87 mutex_unlock(&c->alloc_sem);
@@ -96,12 +100,13 @@ int jffs2_reserve_space(struct jffs2_sb_info *c, uint32_t minsize,
96 avail = c->free_size + c->dirty_size + c->erasing_size + c->unchecked_size; 100 avail = c->free_size + c->dirty_size + c->erasing_size + c->unchecked_size;
97 if ( (avail / c->sector_size) <= blocksneeded) { 101 if ( (avail / c->sector_size) <= blocksneeded) {
98 if (prio == ALLOC_DELETION && c->nr_free_blocks + c->nr_erasing_blocks >= c->resv_blocks_deletion) { 102 if (prio == ALLOC_DELETION && c->nr_free_blocks + c->nr_erasing_blocks >= c->resv_blocks_deletion) {
99 D1(printk(KERN_NOTICE "jffs2_reserve_space(): Low on possibly available space, but it's a deletion. Allowing...\n")); 103 jffs2_dbg(1, "%s(): Low on possibly available space, but it's a deletion. Allowing...\n",
104 __func__);
100 break; 105 break;
101 } 106 }
102 107
103 D1(printk(KERN_DEBUG "max. available size 0x%08x < blocksneeded * sector_size 0x%08x, returning -ENOSPC\n", 108 jffs2_dbg(1, "max. available size 0x%08x < blocksneeded * sector_size 0x%08x, returning -ENOSPC\n",
104 avail, blocksneeded * c->sector_size)); 109 avail, blocksneeded * c->sector_size);
105 spin_unlock(&c->erase_completion_lock); 110 spin_unlock(&c->erase_completion_lock);
106 mutex_unlock(&c->alloc_sem); 111 mutex_unlock(&c->alloc_sem);
107 return -ENOSPC; 112 return -ENOSPC;
@@ -109,9 +114,14 @@ int jffs2_reserve_space(struct jffs2_sb_info *c, uint32_t minsize,
109 114
110 mutex_unlock(&c->alloc_sem); 115 mutex_unlock(&c->alloc_sem);
111 116
112 D1(printk(KERN_DEBUG "Triggering GC pass. nr_free_blocks %d, nr_erasing_blocks %d, free_size 0x%08x, dirty_size 0x%08x, wasted_size 0x%08x, used_size 0x%08x, erasing_size 0x%08x, bad_size 0x%08x (total 0x%08x of 0x%08x)\n", 117 jffs2_dbg(1, "Triggering GC pass. nr_free_blocks %d, nr_erasing_blocks %d, free_size 0x%08x, dirty_size 0x%08x, wasted_size 0x%08x, used_size 0x%08x, erasing_size 0x%08x, bad_size 0x%08x (total 0x%08x of 0x%08x)\n",
113 c->nr_free_blocks, c->nr_erasing_blocks, c->free_size, c->dirty_size, c->wasted_size, c->used_size, c->erasing_size, c->bad_size, 118 c->nr_free_blocks, c->nr_erasing_blocks,
114 c->free_size + c->dirty_size + c->wasted_size + c->used_size + c->erasing_size + c->bad_size, c->flash_size)); 119 c->free_size, c->dirty_size, c->wasted_size,
120 c->used_size, c->erasing_size, c->bad_size,
121 c->free_size + c->dirty_size +
122 c->wasted_size + c->used_size +
123 c->erasing_size + c->bad_size,
124 c->flash_size);
115 spin_unlock(&c->erase_completion_lock); 125 spin_unlock(&c->erase_completion_lock);
116 126
117 ret = jffs2_garbage_collect_pass(c); 127 ret = jffs2_garbage_collect_pass(c);
@@ -124,7 +134,8 @@ int jffs2_reserve_space(struct jffs2_sb_info *c, uint32_t minsize,
124 DECLARE_WAITQUEUE(wait, current); 134 DECLARE_WAITQUEUE(wait, current);
125 set_current_state(TASK_UNINTERRUPTIBLE); 135 set_current_state(TASK_UNINTERRUPTIBLE);
126 add_wait_queue(&c->erase_wait, &wait); 136 add_wait_queue(&c->erase_wait, &wait);
127 D1(printk(KERN_DEBUG "%s waiting for erase to complete\n", __func__)); 137 jffs2_dbg(1, "%s waiting for erase to complete\n",
138 __func__);
128 spin_unlock(&c->erase_completion_lock); 139 spin_unlock(&c->erase_completion_lock);
129 140
130 schedule(); 141 schedule();
@@ -144,7 +155,7 @@ int jffs2_reserve_space(struct jffs2_sb_info *c, uint32_t minsize,
144 155
145 ret = jffs2_do_reserve_space(c, minsize, len, sumsize); 156 ret = jffs2_do_reserve_space(c, minsize, len, sumsize);
146 if (ret) { 157 if (ret) {
147 D1(printk(KERN_DEBUG "jffs2_reserve_space: ret is %d\n", ret)); 158 jffs2_dbg(1, "%s(): ret is %d\n", __func__, ret);
148 } 159 }
149 } 160 }
150 spin_unlock(&c->erase_completion_lock); 161 spin_unlock(&c->erase_completion_lock);
@@ -161,13 +172,14 @@ int jffs2_reserve_space_gc(struct jffs2_sb_info *c, uint32_t minsize,
161 int ret = -EAGAIN; 172 int ret = -EAGAIN;
162 minsize = PAD(minsize); 173 minsize = PAD(minsize);
163 174
164 D1(printk(KERN_DEBUG "jffs2_reserve_space_gc(): Requested 0x%x bytes\n", minsize)); 175 jffs2_dbg(1, "%s(): Requested 0x%x bytes\n", __func__, minsize);
165 176
166 spin_lock(&c->erase_completion_lock); 177 spin_lock(&c->erase_completion_lock);
167 while(ret == -EAGAIN) { 178 while(ret == -EAGAIN) {
168 ret = jffs2_do_reserve_space(c, minsize, len, sumsize); 179 ret = jffs2_do_reserve_space(c, minsize, len, sumsize);
169 if (ret) { 180 if (ret) {
170 D1(printk(KERN_DEBUG "jffs2_reserve_space_gc: looping, ret is %d\n", ret)); 181 jffs2_dbg(1, "%s(): looping, ret is %d\n",
182 __func__, ret);
171 } 183 }
172 } 184 }
173 spin_unlock(&c->erase_completion_lock); 185 spin_unlock(&c->erase_completion_lock);
@@ -184,8 +196,8 @@ static void jffs2_close_nextblock(struct jffs2_sb_info *c, struct jffs2_eraseblo
184{ 196{
185 197
186 if (c->nextblock == NULL) { 198 if (c->nextblock == NULL) {
187 D1(printk(KERN_DEBUG "jffs2_close_nextblock: Erase block at 0x%08x has already been placed in a list\n", 199 jffs2_dbg(1, "%s(): Erase block at 0x%08x has already been placed in a list\n",
188 jeb->offset)); 200 __func__, jeb->offset);
189 return; 201 return;
190 } 202 }
191 /* Check, if we have a dirty block now, or if it was dirty already */ 203 /* Check, if we have a dirty block now, or if it was dirty already */
@@ -195,17 +207,20 @@ static void jffs2_close_nextblock(struct jffs2_sb_info *c, struct jffs2_eraseblo
195 jeb->dirty_size += jeb->wasted_size; 207 jeb->dirty_size += jeb->wasted_size;
196 jeb->wasted_size = 0; 208 jeb->wasted_size = 0;
197 if (VERYDIRTY(c, jeb->dirty_size)) { 209 if (VERYDIRTY(c, jeb->dirty_size)) {
198 D1(printk(KERN_DEBUG "Adding full erase block at 0x%08x to very_dirty_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n", 210 jffs2_dbg(1, "Adding full erase block at 0x%08x to very_dirty_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
199 jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size)); 211 jeb->offset, jeb->free_size, jeb->dirty_size,
212 jeb->used_size);
200 list_add_tail(&jeb->list, &c->very_dirty_list); 213 list_add_tail(&jeb->list, &c->very_dirty_list);
201 } else { 214 } else {
202 D1(printk(KERN_DEBUG "Adding full erase block at 0x%08x to dirty_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n", 215 jffs2_dbg(1, "Adding full erase block at 0x%08x to dirty_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
203 jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size)); 216 jeb->offset, jeb->free_size, jeb->dirty_size,
217 jeb->used_size);
204 list_add_tail(&jeb->list, &c->dirty_list); 218 list_add_tail(&jeb->list, &c->dirty_list);
205 } 219 }
206 } else { 220 } else {
207 D1(printk(KERN_DEBUG "Adding full erase block at 0x%08x to clean_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n", 221 jffs2_dbg(1, "Adding full erase block at 0x%08x to clean_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
208 jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size)); 222 jeb->offset, jeb->free_size, jeb->dirty_size,
223 jeb->used_size);
209 list_add_tail(&jeb->list, &c->clean_list); 224 list_add_tail(&jeb->list, &c->clean_list);
210 } 225 }
211 c->nextblock = NULL; 226 c->nextblock = NULL;
@@ -230,13 +245,14 @@ static int jffs2_find_nextblock(struct jffs2_sb_info *c)
230 list_move_tail(&ejeb->list, &c->erase_pending_list); 245 list_move_tail(&ejeb->list, &c->erase_pending_list);
231 c->nr_erasing_blocks++; 246 c->nr_erasing_blocks++;
232 jffs2_garbage_collect_trigger(c); 247 jffs2_garbage_collect_trigger(c);
233 D1(printk(KERN_DEBUG "jffs2_find_nextblock: Triggering erase of erasable block at 0x%08x\n", 248 jffs2_dbg(1, "%s(): Triggering erase of erasable block at 0x%08x\n",
234 ejeb->offset)); 249 __func__, ejeb->offset);
235 } 250 }
236 251
237 if (!c->nr_erasing_blocks && 252 if (!c->nr_erasing_blocks &&
238 !list_empty(&c->erasable_pending_wbuf_list)) { 253 !list_empty(&c->erasable_pending_wbuf_list)) {
239 D1(printk(KERN_DEBUG "jffs2_find_nextblock: Flushing write buffer\n")); 254 jffs2_dbg(1, "%s(): Flushing write buffer\n",
255 __func__);
240 /* c->nextblock is NULL, no update to c->nextblock allowed */ 256 /* c->nextblock is NULL, no update to c->nextblock allowed */
241 spin_unlock(&c->erase_completion_lock); 257 spin_unlock(&c->erase_completion_lock);
242 jffs2_flush_wbuf_pad(c); 258 jffs2_flush_wbuf_pad(c);
@@ -248,9 +264,11 @@ static int jffs2_find_nextblock(struct jffs2_sb_info *c)
248 if (!c->nr_erasing_blocks) { 264 if (!c->nr_erasing_blocks) {
249 /* Ouch. We're in GC, or we wouldn't have got here. 265 /* Ouch. We're in GC, or we wouldn't have got here.
250 And there's no space left. At all. */ 266 And there's no space left. At all. */
251 printk(KERN_CRIT "Argh. No free space left for GC. nr_erasing_blocks is %d. nr_free_blocks is %d. (erasableempty: %s, erasingempty: %s, erasependingempty: %s)\n", 267 pr_crit("Argh. No free space left for GC. nr_erasing_blocks is %d. nr_free_blocks is %d. (erasableempty: %s, erasingempty: %s, erasependingempty: %s)\n",
252 c->nr_erasing_blocks, c->nr_free_blocks, list_empty(&c->erasable_list)?"yes":"no", 268 c->nr_erasing_blocks, c->nr_free_blocks,
253 list_empty(&c->erasing_list)?"yes":"no", list_empty(&c->erase_pending_list)?"yes":"no"); 269 list_empty(&c->erasable_list) ? "yes" : "no",
270 list_empty(&c->erasing_list) ? "yes" : "no",
271 list_empty(&c->erase_pending_list) ? "yes" : "no");
254 return -ENOSPC; 272 return -ENOSPC;
255 } 273 }
256 274
@@ -278,7 +296,8 @@ static int jffs2_find_nextblock(struct jffs2_sb_info *c)
278 c->wbuf_ofs = 0xffffffff; 296 c->wbuf_ofs = 0xffffffff;
279#endif 297#endif
280 298
281 D1(printk(KERN_DEBUG "jffs2_find_nextblock(): new nextblock = 0x%08x\n", c->nextblock->offset)); 299 jffs2_dbg(1, "%s(): new nextblock = 0x%08x\n",
300 __func__, c->nextblock->offset);
282 301
283 return 0; 302 return 0;
284} 303}
@@ -345,7 +364,8 @@ static int jffs2_do_reserve_space(struct jffs2_sb_info *c, uint32_t minsize,
345 364
346 if (jffs2_wbuf_dirty(c)) { 365 if (jffs2_wbuf_dirty(c)) {
347 spin_unlock(&c->erase_completion_lock); 366 spin_unlock(&c->erase_completion_lock);
348 D1(printk(KERN_DEBUG "jffs2_do_reserve_space: Flushing write buffer\n")); 367 jffs2_dbg(1, "%s(): Flushing write buffer\n",
368 __func__);
349 jffs2_flush_wbuf_pad(c); 369 jffs2_flush_wbuf_pad(c);
350 spin_lock(&c->erase_completion_lock); 370 spin_lock(&c->erase_completion_lock);
351 jeb = c->nextblock; 371 jeb = c->nextblock;
@@ -387,7 +407,8 @@ static int jffs2_do_reserve_space(struct jffs2_sb_info *c, uint32_t minsize,
387 jeb = c->nextblock; 407 jeb = c->nextblock;
388 408
389 if (jeb->free_size != c->sector_size - c->cleanmarker_size) { 409 if (jeb->free_size != c->sector_size - c->cleanmarker_size) {
390 printk(KERN_WARNING "Eep. Block 0x%08x taken from free_list had free_size of 0x%08x!!\n", jeb->offset, jeb->free_size); 410 pr_warn("Eep. Block 0x%08x taken from free_list had free_size of 0x%08x!!\n",
411 jeb->offset, jeb->free_size);
391 goto restart; 412 goto restart;
392 } 413 }
393 } 414 }
@@ -408,8 +429,9 @@ static int jffs2_do_reserve_space(struct jffs2_sb_info *c, uint32_t minsize,
408 spin_lock(&c->erase_completion_lock); 429 spin_lock(&c->erase_completion_lock);
409 } 430 }
410 431
411 D1(printk(KERN_DEBUG "jffs2_do_reserve_space(): Giving 0x%x bytes at 0x%x\n", 432 jffs2_dbg(1, "%s(): Giving 0x%x bytes at 0x%x\n",
412 *len, jeb->offset + (c->sector_size - jeb->free_size))); 433 __func__,
434 *len, jeb->offset + (c->sector_size - jeb->free_size));
413 return 0; 435 return 0;
414} 436}
415 437
@@ -434,20 +456,22 @@ struct jffs2_raw_node_ref *jffs2_add_physical_node_ref(struct jffs2_sb_info *c,
434 456
435 jeb = &c->blocks[ofs / c->sector_size]; 457 jeb = &c->blocks[ofs / c->sector_size];
436 458
437 D1(printk(KERN_DEBUG "jffs2_add_physical_node_ref(): Node at 0x%x(%d), size 0x%x\n", 459 jffs2_dbg(1, "%s(): Node at 0x%x(%d), size 0x%x\n",
438 ofs & ~3, ofs & 3, len)); 460 __func__, ofs & ~3, ofs & 3, len);
439#if 1 461#if 1
440 /* Allow non-obsolete nodes only to be added at the end of c->nextblock, 462 /* Allow non-obsolete nodes only to be added at the end of c->nextblock,
441 if c->nextblock is set. Note that wbuf.c will file obsolete nodes 463 if c->nextblock is set. Note that wbuf.c will file obsolete nodes
442 even after refiling c->nextblock */ 464 even after refiling c->nextblock */
443 if ((c->nextblock || ((ofs & 3) != REF_OBSOLETE)) 465 if ((c->nextblock || ((ofs & 3) != REF_OBSOLETE))
444 && (jeb != c->nextblock || (ofs & ~3) != jeb->offset + (c->sector_size - jeb->free_size))) { 466 && (jeb != c->nextblock || (ofs & ~3) != jeb->offset + (c->sector_size - jeb->free_size))) {
445 printk(KERN_WARNING "argh. node added in wrong place at 0x%08x(%d)\n", ofs & ~3, ofs & 3); 467 pr_warn("argh. node added in wrong place at 0x%08x(%d)\n",
468 ofs & ~3, ofs & 3);
446 if (c->nextblock) 469 if (c->nextblock)
447 printk(KERN_WARNING "nextblock 0x%08x", c->nextblock->offset); 470 pr_warn("nextblock 0x%08x", c->nextblock->offset);
448 else 471 else
449 printk(KERN_WARNING "No nextblock"); 472 pr_warn("No nextblock");
450 printk(", expected at %08x\n", jeb->offset + (c->sector_size - jeb->free_size)); 473 pr_cont(", expected at %08x\n",
474 jeb->offset + (c->sector_size - jeb->free_size));
451 return ERR_PTR(-EINVAL); 475 return ERR_PTR(-EINVAL);
452 } 476 }
453#endif 477#endif
@@ -457,8 +481,9 @@ struct jffs2_raw_node_ref *jffs2_add_physical_node_ref(struct jffs2_sb_info *c,
457 481
458 if (!jeb->free_size && !jeb->dirty_size && !ISDIRTY(jeb->wasted_size)) { 482 if (!jeb->free_size && !jeb->dirty_size && !ISDIRTY(jeb->wasted_size)) {
459 /* If it lives on the dirty_list, jffs2_reserve_space will put it there */ 483 /* If it lives on the dirty_list, jffs2_reserve_space will put it there */
460 D1(printk(KERN_DEBUG "Adding full erase block at 0x%08x to clean_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n", 484 jffs2_dbg(1, "Adding full erase block at 0x%08x to clean_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
461 jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size)); 485 jeb->offset, jeb->free_size, jeb->dirty_size,
486 jeb->used_size);
462 if (jffs2_wbuf_dirty(c)) { 487 if (jffs2_wbuf_dirty(c)) {
463 /* Flush the last write in the block if it's outstanding */ 488 /* Flush the last write in the block if it's outstanding */
464 spin_unlock(&c->erase_completion_lock); 489 spin_unlock(&c->erase_completion_lock);
@@ -480,7 +505,7 @@ struct jffs2_raw_node_ref *jffs2_add_physical_node_ref(struct jffs2_sb_info *c,
480 505
481void jffs2_complete_reservation(struct jffs2_sb_info *c) 506void jffs2_complete_reservation(struct jffs2_sb_info *c)
482{ 507{
483 D1(printk(KERN_DEBUG "jffs2_complete_reservation()\n")); 508 jffs2_dbg(1, "jffs2_complete_reservation()\n");
484 spin_lock(&c->erase_completion_lock); 509 spin_lock(&c->erase_completion_lock);
485 jffs2_garbage_collect_trigger(c); 510 jffs2_garbage_collect_trigger(c);
486 spin_unlock(&c->erase_completion_lock); 511 spin_unlock(&c->erase_completion_lock);
@@ -493,7 +518,7 @@ static inline int on_list(struct list_head *obj, struct list_head *head)
493 518
494 list_for_each(this, head) { 519 list_for_each(this, head) {
495 if (this == obj) { 520 if (this == obj) {
496 D1(printk("%p is on list at %p\n", obj, head)); 521 jffs2_dbg(1, "%p is on list at %p\n", obj, head);
497 return 1; 522 return 1;
498 523
499 } 524 }
@@ -511,16 +536,18 @@ void jffs2_mark_node_obsolete(struct jffs2_sb_info *c, struct jffs2_raw_node_ref
511 uint32_t freed_len; 536 uint32_t freed_len;
512 537
513 if(unlikely(!ref)) { 538 if(unlikely(!ref)) {
514 printk(KERN_NOTICE "EEEEEK. jffs2_mark_node_obsolete called with NULL node\n"); 539 pr_notice("EEEEEK. jffs2_mark_node_obsolete called with NULL node\n");
515 return; 540 return;
516 } 541 }
517 if (ref_obsolete(ref)) { 542 if (ref_obsolete(ref)) {
518 D1(printk(KERN_DEBUG "jffs2_mark_node_obsolete called with already obsolete node at 0x%08x\n", ref_offset(ref))); 543 jffs2_dbg(1, "%s(): called with already obsolete node at 0x%08x\n",
544 __func__, ref_offset(ref));
519 return; 545 return;
520 } 546 }
521 blocknr = ref->flash_offset / c->sector_size; 547 blocknr = ref->flash_offset / c->sector_size;
522 if (blocknr >= c->nr_blocks) { 548 if (blocknr >= c->nr_blocks) {
523 printk(KERN_NOTICE "raw node at 0x%08x is off the end of device!\n", ref->flash_offset); 549 pr_notice("raw node at 0x%08x is off the end of device!\n",
550 ref->flash_offset);
524 BUG(); 551 BUG();
525 } 552 }
526 jeb = &c->blocks[blocknr]; 553 jeb = &c->blocks[blocknr];
@@ -542,27 +569,31 @@ void jffs2_mark_node_obsolete(struct jffs2_sb_info *c, struct jffs2_raw_node_ref
542 569
543 if (ref_flags(ref) == REF_UNCHECKED) { 570 if (ref_flags(ref) == REF_UNCHECKED) {
544 D1(if (unlikely(jeb->unchecked_size < freed_len)) { 571 D1(if (unlikely(jeb->unchecked_size < freed_len)) {
545 printk(KERN_NOTICE "raw unchecked node of size 0x%08x freed from erase block %d at 0x%08x, but unchecked_size was already 0x%08x\n", 572 pr_notice("raw unchecked node of size 0x%08x freed from erase block %d at 0x%08x, but unchecked_size was already 0x%08x\n",
546 freed_len, blocknr, ref->flash_offset, jeb->used_size); 573 freed_len, blocknr,
574 ref->flash_offset, jeb->used_size);
547 BUG(); 575 BUG();
548 }) 576 })
549 D1(printk(KERN_DEBUG "Obsoleting previously unchecked node at 0x%08x of len %x: ", ref_offset(ref), freed_len)); 577 jffs2_dbg(1, "Obsoleting previously unchecked node at 0x%08x of len %x\n",
578 ref_offset(ref), freed_len);
550 jeb->unchecked_size -= freed_len; 579 jeb->unchecked_size -= freed_len;
551 c->unchecked_size -= freed_len; 580 c->unchecked_size -= freed_len;
552 } else { 581 } else {
553 D1(if (unlikely(jeb->used_size < freed_len)) { 582 D1(if (unlikely(jeb->used_size < freed_len)) {
554 printk(KERN_NOTICE "raw node of size 0x%08x freed from erase block %d at 0x%08x, but used_size was already 0x%08x\n", 583 pr_notice("raw node of size 0x%08x freed from erase block %d at 0x%08x, but used_size was already 0x%08x\n",
555 freed_len, blocknr, ref->flash_offset, jeb->used_size); 584 freed_len, blocknr,
585 ref->flash_offset, jeb->used_size);
556 BUG(); 586 BUG();
557 }) 587 })
558 D1(printk(KERN_DEBUG "Obsoleting node at 0x%08x of len %#x: ", ref_offset(ref), freed_len)); 588 jffs2_dbg(1, "Obsoleting node at 0x%08x of len %#x: ",
589 ref_offset(ref), freed_len);
559 jeb->used_size -= freed_len; 590 jeb->used_size -= freed_len;
560 c->used_size -= freed_len; 591 c->used_size -= freed_len;
561 } 592 }
562 593
563 // Take care, that wasted size is taken into concern 594 // Take care, that wasted size is taken into concern
564 if ((jeb->dirty_size || ISDIRTY(jeb->wasted_size + freed_len)) && jeb != c->nextblock) { 595 if ((jeb->dirty_size || ISDIRTY(jeb->wasted_size + freed_len)) && jeb != c->nextblock) {
565 D1(printk("Dirtying\n")); 596 jffs2_dbg(1, "Dirtying\n");
566 addedsize = freed_len; 597 addedsize = freed_len;
567 jeb->dirty_size += freed_len; 598 jeb->dirty_size += freed_len;
568 c->dirty_size += freed_len; 599 c->dirty_size += freed_len;
@@ -570,12 +601,12 @@ void jffs2_mark_node_obsolete(struct jffs2_sb_info *c, struct jffs2_raw_node_ref
570 /* Convert wasted space to dirty, if not a bad block */ 601 /* Convert wasted space to dirty, if not a bad block */
571 if (jeb->wasted_size) { 602 if (jeb->wasted_size) {
572 if (on_list(&jeb->list, &c->bad_used_list)) { 603 if (on_list(&jeb->list, &c->bad_used_list)) {
573 D1(printk(KERN_DEBUG "Leaving block at %08x on the bad_used_list\n", 604 jffs2_dbg(1, "Leaving block at %08x on the bad_used_list\n",
574 jeb->offset)); 605 jeb->offset);
575 addedsize = 0; /* To fool the refiling code later */ 606 addedsize = 0; /* To fool the refiling code later */
576 } else { 607 } else {
577 D1(printk(KERN_DEBUG "Converting %d bytes of wasted space to dirty in block at %08x\n", 608 jffs2_dbg(1, "Converting %d bytes of wasted space to dirty in block at %08x\n",
578 jeb->wasted_size, jeb->offset)); 609 jeb->wasted_size, jeb->offset);
579 addedsize += jeb->wasted_size; 610 addedsize += jeb->wasted_size;
580 jeb->dirty_size += jeb->wasted_size; 611 jeb->dirty_size += jeb->wasted_size;
581 c->dirty_size += jeb->wasted_size; 612 c->dirty_size += jeb->wasted_size;
@@ -584,7 +615,7 @@ void jffs2_mark_node_obsolete(struct jffs2_sb_info *c, struct jffs2_raw_node_ref
584 } 615 }
585 } 616 }
586 } else { 617 } else {
587 D1(printk("Wasting\n")); 618 jffs2_dbg(1, "Wasting\n");
588 addedsize = 0; 619 addedsize = 0;
589 jeb->wasted_size += freed_len; 620 jeb->wasted_size += freed_len;
590 c->wasted_size += freed_len; 621 c->wasted_size += freed_len;
@@ -606,50 +637,57 @@ void jffs2_mark_node_obsolete(struct jffs2_sb_info *c, struct jffs2_raw_node_ref
606 } 637 }
607 638
608 if (jeb == c->nextblock) { 639 if (jeb == c->nextblock) {
609 D2(printk(KERN_DEBUG "Not moving nextblock 0x%08x to dirty/erase_pending list\n", jeb->offset)); 640 jffs2_dbg(2, "Not moving nextblock 0x%08x to dirty/erase_pending list\n",
641 jeb->offset);
610 } else if (!jeb->used_size && !jeb->unchecked_size) { 642 } else if (!jeb->used_size && !jeb->unchecked_size) {
611 if (jeb == c->gcblock) { 643 if (jeb == c->gcblock) {
612 D1(printk(KERN_DEBUG "gcblock at 0x%08x completely dirtied. Clearing gcblock...\n", jeb->offset)); 644 jffs2_dbg(1, "gcblock at 0x%08x completely dirtied. Clearing gcblock...\n",
645 jeb->offset);
613 c->gcblock = NULL; 646 c->gcblock = NULL;
614 } else { 647 } else {
615 D1(printk(KERN_DEBUG "Eraseblock at 0x%08x completely dirtied. Removing from (dirty?) list...\n", jeb->offset)); 648 jffs2_dbg(1, "Eraseblock at 0x%08x completely dirtied. Removing from (dirty?) list...\n",
649 jeb->offset);
616 list_del(&jeb->list); 650 list_del(&jeb->list);
617 } 651 }
618 if (jffs2_wbuf_dirty(c)) { 652 if (jffs2_wbuf_dirty(c)) {
619 D1(printk(KERN_DEBUG "...and adding to erasable_pending_wbuf_list\n")); 653 jffs2_dbg(1, "...and adding to erasable_pending_wbuf_list\n");
620 list_add_tail(&jeb->list, &c->erasable_pending_wbuf_list); 654 list_add_tail(&jeb->list, &c->erasable_pending_wbuf_list);
621 } else { 655 } else {
622 if (jiffies & 127) { 656 if (jiffies & 127) {
623 /* Most of the time, we just erase it immediately. Otherwise we 657 /* Most of the time, we just erase it immediately. Otherwise we
624 spend ages scanning it on mount, etc. */ 658 spend ages scanning it on mount, etc. */
625 D1(printk(KERN_DEBUG "...and adding to erase_pending_list\n")); 659 jffs2_dbg(1, "...and adding to erase_pending_list\n");
626 list_add_tail(&jeb->list, &c->erase_pending_list); 660 list_add_tail(&jeb->list, &c->erase_pending_list);
627 c->nr_erasing_blocks++; 661 c->nr_erasing_blocks++;
628 jffs2_garbage_collect_trigger(c); 662 jffs2_garbage_collect_trigger(c);
629 } else { 663 } else {
630 /* Sometimes, however, we leave it elsewhere so it doesn't get 664 /* Sometimes, however, we leave it elsewhere so it doesn't get
631 immediately reused, and we spread the load a bit. */ 665 immediately reused, and we spread the load a bit. */
632 D1(printk(KERN_DEBUG "...and adding to erasable_list\n")); 666 jffs2_dbg(1, "...and adding to erasable_list\n");
633 list_add_tail(&jeb->list, &c->erasable_list); 667 list_add_tail(&jeb->list, &c->erasable_list);
634 } 668 }
635 } 669 }
636 D1(printk(KERN_DEBUG "Done OK\n")); 670 jffs2_dbg(1, "Done OK\n");
637 } else if (jeb == c->gcblock) { 671 } else if (jeb == c->gcblock) {
638 D2(printk(KERN_DEBUG "Not moving gcblock 0x%08x to dirty_list\n", jeb->offset)); 672 jffs2_dbg(2, "Not moving gcblock 0x%08x to dirty_list\n",
673 jeb->offset);
639 } else if (ISDIRTY(jeb->dirty_size) && !ISDIRTY(jeb->dirty_size - addedsize)) { 674 } else if (ISDIRTY(jeb->dirty_size) && !ISDIRTY(jeb->dirty_size - addedsize)) {
640 D1(printk(KERN_DEBUG "Eraseblock at 0x%08x is freshly dirtied. Removing from clean list...\n", jeb->offset)); 675 jffs2_dbg(1, "Eraseblock at 0x%08x is freshly dirtied. Removing from clean list...\n",
676 jeb->offset);
641 list_del(&jeb->list); 677 list_del(&jeb->list);
642 D1(printk(KERN_DEBUG "...and adding to dirty_list\n")); 678 jffs2_dbg(1, "...and adding to dirty_list\n");
643 list_add_tail(&jeb->list, &c->dirty_list); 679 list_add_tail(&jeb->list, &c->dirty_list);
644 } else if (VERYDIRTY(c, jeb->dirty_size) && 680 } else if (VERYDIRTY(c, jeb->dirty_size) &&
645 !VERYDIRTY(c, jeb->dirty_size - addedsize)) { 681 !VERYDIRTY(c, jeb->dirty_size - addedsize)) {
646 D1(printk(KERN_DEBUG "Eraseblock at 0x%08x is now very dirty. Removing from dirty list...\n", jeb->offset)); 682 jffs2_dbg(1, "Eraseblock at 0x%08x is now very dirty. Removing from dirty list...\n",
683 jeb->offset);
647 list_del(&jeb->list); 684 list_del(&jeb->list);
648 D1(printk(KERN_DEBUG "...and adding to very_dirty_list\n")); 685 jffs2_dbg(1, "...and adding to very_dirty_list\n");
649 list_add_tail(&jeb->list, &c->very_dirty_list); 686 list_add_tail(&jeb->list, &c->very_dirty_list);
650 } else { 687 } else {
651 D1(printk(KERN_DEBUG "Eraseblock at 0x%08x not moved anywhere. (free 0x%08x, dirty 0x%08x, used 0x%08x)\n", 688 jffs2_dbg(1, "Eraseblock at 0x%08x not moved anywhere. (free 0x%08x, dirty 0x%08x, used 0x%08x)\n",
652 jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size)); 689 jeb->offset, jeb->free_size, jeb->dirty_size,
690 jeb->used_size);
653 } 691 }
654 692
655 spin_unlock(&c->erase_completion_lock); 693 spin_unlock(&c->erase_completion_lock);
@@ -665,33 +703,40 @@ void jffs2_mark_node_obsolete(struct jffs2_sb_info *c, struct jffs2_raw_node_ref
665 the block hasn't _already_ been erased, and that 'ref' itself hasn't been freed yet 703 the block hasn't _already_ been erased, and that 'ref' itself hasn't been freed yet
666 by jffs2_free_jeb_node_refs() in erase.c. Which is nice. */ 704 by jffs2_free_jeb_node_refs() in erase.c. Which is nice. */
667 705
668 D1(printk(KERN_DEBUG "obliterating obsoleted node at 0x%08x\n", ref_offset(ref))); 706 jffs2_dbg(1, "obliterating obsoleted node at 0x%08x\n",
707 ref_offset(ref));
669 ret = jffs2_flash_read(c, ref_offset(ref), sizeof(n), &retlen, (char *)&n); 708 ret = jffs2_flash_read(c, ref_offset(ref), sizeof(n), &retlen, (char *)&n);
670 if (ret) { 709 if (ret) {
671 printk(KERN_WARNING "Read error reading from obsoleted node at 0x%08x: %d\n", ref_offset(ref), ret); 710 pr_warn("Read error reading from obsoleted node at 0x%08x: %d\n",
711 ref_offset(ref), ret);
672 goto out_erase_sem; 712 goto out_erase_sem;
673 } 713 }
674 if (retlen != sizeof(n)) { 714 if (retlen != sizeof(n)) {
675 printk(KERN_WARNING "Short read from obsoleted node at 0x%08x: %zd\n", ref_offset(ref), retlen); 715 pr_warn("Short read from obsoleted node at 0x%08x: %zd\n",
716 ref_offset(ref), retlen);
676 goto out_erase_sem; 717 goto out_erase_sem;
677 } 718 }
678 if (PAD(je32_to_cpu(n.totlen)) != PAD(freed_len)) { 719 if (PAD(je32_to_cpu(n.totlen)) != PAD(freed_len)) {
679 printk(KERN_WARNING "Node totlen on flash (0x%08x) != totlen from node ref (0x%08x)\n", je32_to_cpu(n.totlen), freed_len); 720 pr_warn("Node totlen on flash (0x%08x) != totlen from node ref (0x%08x)\n",
721 je32_to_cpu(n.totlen), freed_len);
680 goto out_erase_sem; 722 goto out_erase_sem;
681 } 723 }
682 if (!(je16_to_cpu(n.nodetype) & JFFS2_NODE_ACCURATE)) { 724 if (!(je16_to_cpu(n.nodetype) & JFFS2_NODE_ACCURATE)) {
683 D1(printk(KERN_DEBUG "Node at 0x%08x was already marked obsolete (nodetype 0x%04x)\n", ref_offset(ref), je16_to_cpu(n.nodetype))); 725 jffs2_dbg(1, "Node at 0x%08x was already marked obsolete (nodetype 0x%04x)\n",
726 ref_offset(ref), je16_to_cpu(n.nodetype));
684 goto out_erase_sem; 727 goto out_erase_sem;
685 } 728 }
686 /* XXX FIXME: This is ugly now */ 729 /* XXX FIXME: This is ugly now */
687 n.nodetype = cpu_to_je16(je16_to_cpu(n.nodetype) & ~JFFS2_NODE_ACCURATE); 730 n.nodetype = cpu_to_je16(je16_to_cpu(n.nodetype) & ~JFFS2_NODE_ACCURATE);
688 ret = jffs2_flash_write(c, ref_offset(ref), sizeof(n), &retlen, (char *)&n); 731 ret = jffs2_flash_write(c, ref_offset(ref), sizeof(n), &retlen, (char *)&n);
689 if (ret) { 732 if (ret) {
690 printk(KERN_WARNING "Write error in obliterating obsoleted node at 0x%08x: %d\n", ref_offset(ref), ret); 733 pr_warn("Write error in obliterating obsoleted node at 0x%08x: %d\n",
734 ref_offset(ref), ret);
691 goto out_erase_sem; 735 goto out_erase_sem;
692 } 736 }
693 if (retlen != sizeof(n)) { 737 if (retlen != sizeof(n)) {
694 printk(KERN_WARNING "Short write in obliterating obsoleted node at 0x%08x: %zd\n", ref_offset(ref), retlen); 738 pr_warn("Short write in obliterating obsoleted node at 0x%08x: %zd\n",
739 ref_offset(ref), retlen);
695 goto out_erase_sem; 740 goto out_erase_sem;
696 } 741 }
697 742
@@ -751,8 +796,8 @@ int jffs2_thread_should_wake(struct jffs2_sb_info *c)
751 return 1; 796 return 1;
752 797
753 if (c->unchecked_size) { 798 if (c->unchecked_size) {
754 D1(printk(KERN_DEBUG "jffs2_thread_should_wake(): unchecked_size %d, checked_ino #%d\n", 799 jffs2_dbg(1, "jffs2_thread_should_wake(): unchecked_size %d, checked_ino #%d\n",
755 c->unchecked_size, c->checked_ino)); 800 c->unchecked_size, c->checked_ino);
756 return 1; 801 return 1;
757 } 802 }
758 803
@@ -780,8 +825,9 @@ int jffs2_thread_should_wake(struct jffs2_sb_info *c)
780 } 825 }
781 } 826 }
782 827
783 D1(printk(KERN_DEBUG "jffs2_thread_should_wake(): nr_free_blocks %d, nr_erasing_blocks %d, dirty_size 0x%x, vdirty_blocks %d: %s\n", 828 jffs2_dbg(1, "%s(): nr_free_blocks %d, nr_erasing_blocks %d, dirty_size 0x%x, vdirty_blocks %d: %s\n",
784 c->nr_free_blocks, c->nr_erasing_blocks, c->dirty_size, nr_very_dirty, ret?"yes":"no")); 829 __func__, c->nr_free_blocks, c->nr_erasing_blocks,
830 c->dirty_size, nr_very_dirty, ret ? "yes" : "no");
785 831
786 return ret; 832 return ret;
787} 833}
diff --git a/fs/jffs2/os-linux.h b/fs/jffs2/os-linux.h
index ab65ee3ec858..1cd3aec9d9ae 100644
--- a/fs/jffs2/os-linux.h
+++ b/fs/jffs2/os-linux.h
@@ -76,7 +76,7 @@ static inline void jffs2_init_inode_info(struct jffs2_inode_info *f)
76#define jffs2_write_nand_cleanmarker(c,jeb) (-EIO) 76#define jffs2_write_nand_cleanmarker(c,jeb) (-EIO)
77 77
78#define jffs2_flash_write(c, ofs, len, retlen, buf) jffs2_flash_direct_write(c, ofs, len, retlen, buf) 78#define jffs2_flash_write(c, ofs, len, retlen, buf) jffs2_flash_direct_write(c, ofs, len, retlen, buf)
79#define jffs2_flash_read(c, ofs, len, retlen, buf) ((c)->mtd->read((c)->mtd, ofs, len, retlen, buf)) 79#define jffs2_flash_read(c, ofs, len, retlen, buf) (mtd_read((c)->mtd, ofs, len, retlen, buf))
80#define jffs2_flush_wbuf_pad(c) ({ do{} while(0); (void)(c), 0; }) 80#define jffs2_flush_wbuf_pad(c) ({ do{} while(0); (void)(c), 0; })
81#define jffs2_flush_wbuf_gc(c, i) ({ do{} while(0); (void)(c), (void) i, 0; }) 81#define jffs2_flush_wbuf_gc(c, i) ({ do{} while(0); (void)(c), (void) i, 0; })
82#define jffs2_write_nand_badblock(c,jeb,bad_offset) (1) 82#define jffs2_write_nand_badblock(c,jeb,bad_offset) (1)
@@ -108,8 +108,6 @@ static inline void jffs2_init_inode_info(struct jffs2_inode_info *f)
108 108
109#define jffs2_cleanmarker_oob(c) (c->mtd->type == MTD_NANDFLASH) 109#define jffs2_cleanmarker_oob(c) (c->mtd->type == MTD_NANDFLASH)
110 110
111#define jffs2_flash_write_oob(c, ofs, len, retlen, buf) ((c)->mtd->write_oob((c)->mtd, ofs, len, retlen, buf))
112#define jffs2_flash_read_oob(c, ofs, len, retlen, buf) ((c)->mtd->read_oob((c)->mtd, ofs, len, retlen, buf))
113#define jffs2_wbuf_dirty(c) (!!(c)->wbuf_len) 111#define jffs2_wbuf_dirty(c) (!!(c)->wbuf_len)
114 112
115/* wbuf.c */ 113/* wbuf.c */
diff --git a/fs/jffs2/read.c b/fs/jffs2/read.c
index 3f39be1b0455..0b042b1fc82f 100644
--- a/fs/jffs2/read.c
+++ b/fs/jffs2/read.c
@@ -9,6 +9,8 @@
9 * 9 *
10 */ 10 */
11 11
12#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13
12#include <linux/kernel.h> 14#include <linux/kernel.h>
13#include <linux/slab.h> 15#include <linux/slab.h>
14#include <linux/crc32.h> 16#include <linux/crc32.h>
@@ -36,24 +38,25 @@ int jffs2_read_dnode(struct jffs2_sb_info *c, struct jffs2_inode_info *f,
36 ret = jffs2_flash_read(c, ref_offset(fd->raw), sizeof(*ri), &readlen, (char *)ri); 38 ret = jffs2_flash_read(c, ref_offset(fd->raw), sizeof(*ri), &readlen, (char *)ri);
37 if (ret) { 39 if (ret) {
38 jffs2_free_raw_inode(ri); 40 jffs2_free_raw_inode(ri);
39 printk(KERN_WARNING "Error reading node from 0x%08x: %d\n", ref_offset(fd->raw), ret); 41 pr_warn("Error reading node from 0x%08x: %d\n",
42 ref_offset(fd->raw), ret);
40 return ret; 43 return ret;
41 } 44 }
42 if (readlen != sizeof(*ri)) { 45 if (readlen != sizeof(*ri)) {
43 jffs2_free_raw_inode(ri); 46 jffs2_free_raw_inode(ri);
44 printk(KERN_WARNING "Short read from 0x%08x: wanted 0x%zx bytes, got 0x%zx\n", 47 pr_warn("Short read from 0x%08x: wanted 0x%zx bytes, got 0x%zx\n",
45 ref_offset(fd->raw), sizeof(*ri), readlen); 48 ref_offset(fd->raw), sizeof(*ri), readlen);
46 return -EIO; 49 return -EIO;
47 } 50 }
48 crc = crc32(0, ri, sizeof(*ri)-8); 51 crc = crc32(0, ri, sizeof(*ri)-8);
49 52
50 D1(printk(KERN_DEBUG "Node read from %08x: node_crc %08x, calculated CRC %08x. dsize %x, csize %x, offset %x, buf %p\n", 53 jffs2_dbg(1, "Node read from %08x: node_crc %08x, calculated CRC %08x. dsize %x, csize %x, offset %x, buf %p\n",
51 ref_offset(fd->raw), je32_to_cpu(ri->node_crc), 54 ref_offset(fd->raw), je32_to_cpu(ri->node_crc),
52 crc, je32_to_cpu(ri->dsize), je32_to_cpu(ri->csize), 55 crc, je32_to_cpu(ri->dsize), je32_to_cpu(ri->csize),
53 je32_to_cpu(ri->offset), buf)); 56 je32_to_cpu(ri->offset), buf);
54 if (crc != je32_to_cpu(ri->node_crc)) { 57 if (crc != je32_to_cpu(ri->node_crc)) {
55 printk(KERN_WARNING "Node CRC %08x != calculated CRC %08x for node at %08x\n", 58 pr_warn("Node CRC %08x != calculated CRC %08x for node at %08x\n",
56 je32_to_cpu(ri->node_crc), crc, ref_offset(fd->raw)); 59 je32_to_cpu(ri->node_crc), crc, ref_offset(fd->raw));
57 ret = -EIO; 60 ret = -EIO;
58 goto out_ri; 61 goto out_ri;
59 } 62 }
@@ -66,8 +69,8 @@ int jffs2_read_dnode(struct jffs2_sb_info *c, struct jffs2_inode_info *f,
66 } 69 }
67 70
68 D1(if(ofs + len > je32_to_cpu(ri->dsize)) { 71 D1(if(ofs + len > je32_to_cpu(ri->dsize)) {
69 printk(KERN_WARNING "jffs2_read_dnode() asked for %d bytes at %d from %d-byte node\n", 72 pr_warn("jffs2_read_dnode() asked for %d bytes at %d from %d-byte node\n",
70 len, ofs, je32_to_cpu(ri->dsize)); 73 len, ofs, je32_to_cpu(ri->dsize));
71 ret = -EINVAL; 74 ret = -EINVAL;
72 goto out_ri; 75 goto out_ri;
73 }); 76 });
@@ -107,8 +110,8 @@ int jffs2_read_dnode(struct jffs2_sb_info *c, struct jffs2_inode_info *f,
107 decomprbuf = readbuf; 110 decomprbuf = readbuf;
108 } 111 }
109 112
110 D2(printk(KERN_DEBUG "Read %d bytes to %p\n", je32_to_cpu(ri->csize), 113 jffs2_dbg(2, "Read %d bytes to %p\n", je32_to_cpu(ri->csize),
111 readbuf)); 114 readbuf);
112 ret = jffs2_flash_read(c, (ref_offset(fd->raw)) + sizeof(*ri), 115 ret = jffs2_flash_read(c, (ref_offset(fd->raw)) + sizeof(*ri),
113 je32_to_cpu(ri->csize), &readlen, readbuf); 116 je32_to_cpu(ri->csize), &readlen, readbuf);
114 117
@@ -119,18 +122,19 @@ int jffs2_read_dnode(struct jffs2_sb_info *c, struct jffs2_inode_info *f,
119 122
120 crc = crc32(0, readbuf, je32_to_cpu(ri->csize)); 123 crc = crc32(0, readbuf, je32_to_cpu(ri->csize));
121 if (crc != je32_to_cpu(ri->data_crc)) { 124 if (crc != je32_to_cpu(ri->data_crc)) {
122 printk(KERN_WARNING "Data CRC %08x != calculated CRC %08x for node at %08x\n", 125 pr_warn("Data CRC %08x != calculated CRC %08x for node at %08x\n",
123 je32_to_cpu(ri->data_crc), crc, ref_offset(fd->raw)); 126 je32_to_cpu(ri->data_crc), crc, ref_offset(fd->raw));
124 ret = -EIO; 127 ret = -EIO;
125 goto out_decomprbuf; 128 goto out_decomprbuf;
126 } 129 }
127 D2(printk(KERN_DEBUG "Data CRC matches calculated CRC %08x\n", crc)); 130 jffs2_dbg(2, "Data CRC matches calculated CRC %08x\n", crc);
128 if (ri->compr != JFFS2_COMPR_NONE) { 131 if (ri->compr != JFFS2_COMPR_NONE) {
129 D2(printk(KERN_DEBUG "Decompress %d bytes from %p to %d bytes at %p\n", 132 jffs2_dbg(2, "Decompress %d bytes from %p to %d bytes at %p\n",
130 je32_to_cpu(ri->csize), readbuf, je32_to_cpu(ri->dsize), decomprbuf)); 133 je32_to_cpu(ri->csize), readbuf,
134 je32_to_cpu(ri->dsize), decomprbuf);
131 ret = jffs2_decompress(c, f, ri->compr | (ri->usercompr << 8), readbuf, decomprbuf, je32_to_cpu(ri->csize), je32_to_cpu(ri->dsize)); 135 ret = jffs2_decompress(c, f, ri->compr | (ri->usercompr << 8), readbuf, decomprbuf, je32_to_cpu(ri->csize), je32_to_cpu(ri->dsize));
132 if (ret) { 136 if (ret) {
133 printk(KERN_WARNING "Error: jffs2_decompress returned %d\n", ret); 137 pr_warn("Error: jffs2_decompress returned %d\n", ret);
134 goto out_decomprbuf; 138 goto out_decomprbuf;
135 } 139 }
136 } 140 }
@@ -157,8 +161,8 @@ int jffs2_read_inode_range(struct jffs2_sb_info *c, struct jffs2_inode_info *f,
157 struct jffs2_node_frag *frag; 161 struct jffs2_node_frag *frag;
158 int ret; 162 int ret;
159 163
160 D1(printk(KERN_DEBUG "jffs2_read_inode_range: ino #%u, range 0x%08x-0x%08x\n", 164 jffs2_dbg(1, "%s(): ino #%u, range 0x%08x-0x%08x\n",
161 f->inocache->ino, offset, offset+len)); 165 __func__, f->inocache->ino, offset, offset + len);
162 166
163 frag = jffs2_lookup_node_frag(&f->fragtree, offset); 167 frag = jffs2_lookup_node_frag(&f->fragtree, offset);
164 168
@@ -168,22 +172,27 @@ int jffs2_read_inode_range(struct jffs2_sb_info *c, struct jffs2_inode_info *f,
168 * (or perhaps is before it, if we've been asked to read off the 172 * (or perhaps is before it, if we've been asked to read off the
169 * end of the file). */ 173 * end of the file). */
170 while(offset < end) { 174 while(offset < end) {
171 D2(printk(KERN_DEBUG "jffs2_read_inode_range: offset %d, end %d\n", offset, end)); 175 jffs2_dbg(2, "%s(): offset %d, end %d\n",
176 __func__, offset, end);
172 if (unlikely(!frag || frag->ofs > offset || 177 if (unlikely(!frag || frag->ofs > offset ||
173 frag->ofs + frag->size <= offset)) { 178 frag->ofs + frag->size <= offset)) {
174 uint32_t holesize = end - offset; 179 uint32_t holesize = end - offset;
175 if (frag && frag->ofs > offset) { 180 if (frag && frag->ofs > offset) {
176 D1(printk(KERN_NOTICE "Eep. Hole in ino #%u fraglist. frag->ofs = 0x%08x, offset = 0x%08x\n", f->inocache->ino, frag->ofs, offset)); 181 jffs2_dbg(1, "Eep. Hole in ino #%u fraglist. frag->ofs = 0x%08x, offset = 0x%08x\n",
182 f->inocache->ino, frag->ofs, offset);
177 holesize = min(holesize, frag->ofs - offset); 183 holesize = min(holesize, frag->ofs - offset);
178 } 184 }
179 D1(printk(KERN_DEBUG "Filling non-frag hole from %d-%d\n", offset, offset+holesize)); 185 jffs2_dbg(1, "Filling non-frag hole from %d-%d\n",
186 offset, offset + holesize);
180 memset(buf, 0, holesize); 187 memset(buf, 0, holesize);
181 buf += holesize; 188 buf += holesize;
182 offset += holesize; 189 offset += holesize;
183 continue; 190 continue;
184 } else if (unlikely(!frag->node)) { 191 } else if (unlikely(!frag->node)) {
185 uint32_t holeend = min(end, frag->ofs + frag->size); 192 uint32_t holeend = min(end, frag->ofs + frag->size);
186 D1(printk(KERN_DEBUG "Filling frag hole from %d-%d (frag 0x%x 0x%x)\n", offset, holeend, frag->ofs, frag->ofs + frag->size)); 193 jffs2_dbg(1, "Filling frag hole from %d-%d (frag 0x%x 0x%x)\n",
194 offset, holeend, frag->ofs,
195 frag->ofs + frag->size);
187 memset(buf, 0, holeend - offset); 196 memset(buf, 0, holeend - offset);
188 buf += holeend - offset; 197 buf += holeend - offset;
189 offset = holeend; 198 offset = holeend;
@@ -195,20 +204,23 @@ int jffs2_read_inode_range(struct jffs2_sb_info *c, struct jffs2_inode_info *f,
195 204
196 fragofs = offset - frag->ofs; 205 fragofs = offset - frag->ofs;
197 readlen = min(frag->size - fragofs, end - offset); 206 readlen = min(frag->size - fragofs, end - offset);
198 D1(printk(KERN_DEBUG "Reading %d-%d from node at 0x%08x (%d)\n", 207 jffs2_dbg(1, "Reading %d-%d from node at 0x%08x (%d)\n",
199 frag->ofs+fragofs, frag->ofs+fragofs+readlen, 208 frag->ofs+fragofs,
200 ref_offset(frag->node->raw), ref_flags(frag->node->raw))); 209 frag->ofs + fragofs+readlen,
210 ref_offset(frag->node->raw),
211 ref_flags(frag->node->raw));
201 ret = jffs2_read_dnode(c, f, frag->node, buf, fragofs + frag->ofs - frag->node->ofs, readlen); 212 ret = jffs2_read_dnode(c, f, frag->node, buf, fragofs + frag->ofs - frag->node->ofs, readlen);
202 D2(printk(KERN_DEBUG "node read done\n")); 213 jffs2_dbg(2, "node read done\n");
203 if (ret) { 214 if (ret) {
204 D1(printk(KERN_DEBUG"jffs2_read_inode_range error %d\n",ret)); 215 jffs2_dbg(1, "%s(): error %d\n",
216 __func__, ret);
205 memset(buf, 0, readlen); 217 memset(buf, 0, readlen);
206 return ret; 218 return ret;
207 } 219 }
208 buf += readlen; 220 buf += readlen;
209 offset += readlen; 221 offset += readlen;
210 frag = frag_next(frag); 222 frag = frag_next(frag);
211 D2(printk(KERN_DEBUG "node read was OK. Looping\n")); 223 jffs2_dbg(2, "node read was OK. Looping\n");
212 } 224 }
213 } 225 }
214 return 0; 226 return 0;
diff --git a/fs/jffs2/readinode.c b/fs/jffs2/readinode.c
index 3093ac4fb24c..dc0437e84763 100644
--- a/fs/jffs2/readinode.c
+++ b/fs/jffs2/readinode.c
@@ -9,6 +9,8 @@
9 * 9 *
10 */ 10 */
11 11
12#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13
12#include <linux/kernel.h> 14#include <linux/kernel.h>
13#include <linux/sched.h> 15#include <linux/sched.h>
14#include <linux/slab.h> 16#include <linux/slab.h>
diff --git a/fs/jffs2/scan.c b/fs/jffs2/scan.c
index f99464833bb2..7654e87b0428 100644
--- a/fs/jffs2/scan.c
+++ b/fs/jffs2/scan.c
@@ -9,6 +9,8 @@
9 * 9 *
10 */ 10 */
11 11
12#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13
12#include <linux/kernel.h> 14#include <linux/kernel.h>
13#include <linux/sched.h> 15#include <linux/sched.h>
14#include <linux/slab.h> 16#include <linux/slab.h>
@@ -22,15 +24,15 @@
22 24
23#define DEFAULT_EMPTY_SCAN_SIZE 256 25#define DEFAULT_EMPTY_SCAN_SIZE 256
24 26
25#define noisy_printk(noise, args...) do { \ 27#define noisy_printk(noise, fmt, ...) \
26 if (*(noise)) { \ 28do { \
27 printk(KERN_NOTICE args); \ 29 if (*(noise)) { \
28 (*(noise))--; \ 30 pr_notice(fmt, ##__VA_ARGS__); \
29 if (!(*(noise))) { \ 31 (*(noise))--; \
30 printk(KERN_NOTICE "Further such events for this erase block will not be printed\n"); \ 32 if (!(*(noise))) \
31 } \ 33 pr_notice("Further such events for this erase block will not be printed\n"); \
32 } \ 34 } \
33} while(0) 35} while (0)
34 36
35static uint32_t pseudo_random; 37static uint32_t pseudo_random;
36 38
@@ -96,18 +98,17 @@ int jffs2_scan_medium(struct jffs2_sb_info *c)
96#ifndef __ECOS 98#ifndef __ECOS
97 size_t pointlen, try_size; 99 size_t pointlen, try_size;
98 100
99 if (c->mtd->point) { 101 ret = mtd_point(c->mtd, 0, c->mtd->size, &pointlen,
100 ret = mtd_point(c->mtd, 0, c->mtd->size, &pointlen, 102 (void **)&flashbuf, NULL);
101 (void **)&flashbuf, NULL); 103 if (!ret && pointlen < c->mtd->size) {
102 if (!ret && pointlen < c->mtd->size) { 104 /* Don't muck about if it won't let us point to the whole flash */
103 /* Don't muck about if it won't let us point to the whole flash */ 105 jffs2_dbg(1, "MTD point returned len too short: 0x%zx\n",
104 D1(printk(KERN_DEBUG "MTD point returned len too short: 0x%zx\n", pointlen)); 106 pointlen);
105 mtd_unpoint(c->mtd, 0, pointlen); 107 mtd_unpoint(c->mtd, 0, pointlen);
106 flashbuf = NULL; 108 flashbuf = NULL;
107 }
108 if (ret && ret != -EOPNOTSUPP)
109 D1(printk(KERN_DEBUG "MTD point failed %d\n", ret));
110 } 109 }
110 if (ret && ret != -EOPNOTSUPP)
111 jffs2_dbg(1, "MTD point failed %d\n", ret);
111#endif 112#endif
112 if (!flashbuf) { 113 if (!flashbuf) {
113 /* For NAND it's quicker to read a whole eraseblock at a time, 114 /* For NAND it's quicker to read a whole eraseblock at a time,
@@ -117,15 +118,15 @@ int jffs2_scan_medium(struct jffs2_sb_info *c)
117 else 118 else
118 try_size = PAGE_SIZE; 119 try_size = PAGE_SIZE;
119 120
120 D1(printk(KERN_DEBUG "Trying to allocate readbuf of %zu " 121 jffs2_dbg(1, "Trying to allocate readbuf of %zu "
121 "bytes\n", try_size)); 122 "bytes\n", try_size);
122 123
123 flashbuf = mtd_kmalloc_up_to(c->mtd, &try_size); 124 flashbuf = mtd_kmalloc_up_to(c->mtd, &try_size);
124 if (!flashbuf) 125 if (!flashbuf)
125 return -ENOMEM; 126 return -ENOMEM;
126 127
127 D1(printk(KERN_DEBUG "Allocated readbuf of %zu bytes\n", 128 jffs2_dbg(1, "Allocated readbuf of %zu bytes\n",
128 try_size)); 129 try_size);
129 130
130 buf_size = (uint32_t)try_size; 131 buf_size = (uint32_t)try_size;
131 } 132 }
@@ -178,7 +179,8 @@ int jffs2_scan_medium(struct jffs2_sb_info *c)
178 c->nr_free_blocks++; 179 c->nr_free_blocks++;
179 } else { 180 } else {
180 /* Dirt */ 181 /* Dirt */
181 D1(printk(KERN_DEBUG "Adding all-dirty block at 0x%08x to erase_pending_list\n", jeb->offset)); 182 jffs2_dbg(1, "Adding all-dirty block at 0x%08x to erase_pending_list\n",
183 jeb->offset);
182 list_add(&jeb->list, &c->erase_pending_list); 184 list_add(&jeb->list, &c->erase_pending_list);
183 c->nr_erasing_blocks++; 185 c->nr_erasing_blocks++;
184 } 186 }
@@ -205,7 +207,8 @@ int jffs2_scan_medium(struct jffs2_sb_info *c)
205 } 207 }
206 /* update collected summary information for the current nextblock */ 208 /* update collected summary information for the current nextblock */
207 jffs2_sum_move_collected(c, s); 209 jffs2_sum_move_collected(c, s);
208 D1(printk(KERN_DEBUG "jffs2_scan_medium(): new nextblock = 0x%08x\n", jeb->offset)); 210 jffs2_dbg(1, "%s(): new nextblock = 0x%08x\n",
211 __func__, jeb->offset);
209 c->nextblock = jeb; 212 c->nextblock = jeb;
210 } else { 213 } else {
211 ret = file_dirty(c, jeb); 214 ret = file_dirty(c, jeb);
@@ -217,20 +220,21 @@ int jffs2_scan_medium(struct jffs2_sb_info *c)
217 case BLK_STATE_ALLDIRTY: 220 case BLK_STATE_ALLDIRTY:
218 /* Nothing valid - not even a clean marker. Needs erasing. */ 221 /* Nothing valid - not even a clean marker. Needs erasing. */
219 /* For now we just put it on the erasing list. We'll start the erases later */ 222 /* For now we just put it on the erasing list. We'll start the erases later */
220 D1(printk(KERN_NOTICE "JFFS2: Erase block at 0x%08x is not formatted. It will be erased\n", jeb->offset)); 223 jffs2_dbg(1, "Erase block at 0x%08x is not formatted. It will be erased\n",
224 jeb->offset);
221 list_add(&jeb->list, &c->erase_pending_list); 225 list_add(&jeb->list, &c->erase_pending_list);
222 c->nr_erasing_blocks++; 226 c->nr_erasing_blocks++;
223 break; 227 break;
224 228
225 case BLK_STATE_BADBLOCK: 229 case BLK_STATE_BADBLOCK:
226 D1(printk(KERN_NOTICE "JFFS2: Block at 0x%08x is bad\n", jeb->offset)); 230 jffs2_dbg(1, "Block at 0x%08x is bad\n", jeb->offset);
227 list_add(&jeb->list, &c->bad_list); 231 list_add(&jeb->list, &c->bad_list);
228 c->bad_size += c->sector_size; 232 c->bad_size += c->sector_size;
229 c->free_size -= c->sector_size; 233 c->free_size -= c->sector_size;
230 bad_blocks++; 234 bad_blocks++;
231 break; 235 break;
232 default: 236 default:
233 printk(KERN_WARNING "jffs2_scan_medium(): unknown block state\n"); 237 pr_warn("%s(): unknown block state\n", __func__);
234 BUG(); 238 BUG();
235 } 239 }
236 } 240 }
@@ -250,16 +254,17 @@ int jffs2_scan_medium(struct jffs2_sb_info *c)
250 254
251 uint32_t skip = c->nextblock->free_size % c->wbuf_pagesize; 255 uint32_t skip = c->nextblock->free_size % c->wbuf_pagesize;
252 256
253 D1(printk(KERN_DEBUG "jffs2_scan_medium(): Skipping %d bytes in nextblock to ensure page alignment\n", 257 jffs2_dbg(1, "%s(): Skipping %d bytes in nextblock to ensure page alignment\n",
254 skip)); 258 __func__, skip);
255 jffs2_prealloc_raw_node_refs(c, c->nextblock, 1); 259 jffs2_prealloc_raw_node_refs(c, c->nextblock, 1);
256 jffs2_scan_dirty_space(c, c->nextblock, skip); 260 jffs2_scan_dirty_space(c, c->nextblock, skip);
257 } 261 }
258#endif 262#endif
259 if (c->nr_erasing_blocks) { 263 if (c->nr_erasing_blocks) {
260 if ( !c->used_size && ((c->nr_free_blocks+empty_blocks+bad_blocks)!= c->nr_blocks || bad_blocks == c->nr_blocks) ) { 264 if ( !c->used_size && ((c->nr_free_blocks+empty_blocks+bad_blocks)!= c->nr_blocks || bad_blocks == c->nr_blocks) ) {
261 printk(KERN_NOTICE "Cowardly refusing to erase blocks on filesystem with no valid JFFS2 nodes\n"); 265 pr_notice("Cowardly refusing to erase blocks on filesystem with no valid JFFS2 nodes\n");
262 printk(KERN_NOTICE "empty_blocks %d, bad_blocks %d, c->nr_blocks %d\n",empty_blocks,bad_blocks,c->nr_blocks); 266 pr_notice("empty_blocks %d, bad_blocks %d, c->nr_blocks %d\n",
267 empty_blocks, bad_blocks, c->nr_blocks);
263 ret = -EIO; 268 ret = -EIO;
264 goto out; 269 goto out;
265 } 270 }
@@ -287,11 +292,13 @@ static int jffs2_fill_scan_buf(struct jffs2_sb_info *c, void *buf,
287 292
288 ret = jffs2_flash_read(c, ofs, len, &retlen, buf); 293 ret = jffs2_flash_read(c, ofs, len, &retlen, buf);
289 if (ret) { 294 if (ret) {
290 D1(printk(KERN_WARNING "mtd->read(0x%x bytes from 0x%x) returned %d\n", len, ofs, ret)); 295 jffs2_dbg(1, "mtd->read(0x%x bytes from 0x%x) returned %d\n",
296 len, ofs, ret);
291 return ret; 297 return ret;
292 } 298 }
293 if (retlen < len) { 299 if (retlen < len) {
294 D1(printk(KERN_WARNING "Read at 0x%x gave only 0x%zx bytes\n", ofs, retlen)); 300 jffs2_dbg(1, "Read at 0x%x gave only 0x%zx bytes\n",
301 ofs, retlen);
295 return -EIO; 302 return -EIO;
296 } 303 }
297 return 0; 304 return 0;
@@ -368,7 +375,7 @@ static int jffs2_scan_xattr_node(struct jffs2_sb_info *c, struct jffs2_erasebloc
368 375
369 if (jffs2_sum_active()) 376 if (jffs2_sum_active())
370 jffs2_sum_add_xattr_mem(s, rx, ofs - jeb->offset); 377 jffs2_sum_add_xattr_mem(s, rx, ofs - jeb->offset);
371 dbg_xattr("scaning xdatum at %#08x (xid=%u, version=%u)\n", 378 dbg_xattr("scanning xdatum at %#08x (xid=%u, version=%u)\n",
372 ofs, xd->xid, xd->version); 379 ofs, xd->xid, xd->version);
373 return 0; 380 return 0;
374} 381}
@@ -449,7 +456,7 @@ static int jffs2_scan_eraseblock (struct jffs2_sb_info *c, struct jffs2_eraseblo
449 ofs = jeb->offset; 456 ofs = jeb->offset;
450 prevofs = jeb->offset - 1; 457 prevofs = jeb->offset - 1;
451 458
452 D1(printk(KERN_DEBUG "jffs2_scan_eraseblock(): Scanning block at 0x%x\n", ofs)); 459 jffs2_dbg(1, "%s(): Scanning block at 0x%x\n", __func__, ofs);
453 460
454#ifdef CONFIG_JFFS2_FS_WRITEBUFFER 461#ifdef CONFIG_JFFS2_FS_WRITEBUFFER
455 if (jffs2_cleanmarker_oob(c)) { 462 if (jffs2_cleanmarker_oob(c)) {
@@ -459,7 +466,7 @@ static int jffs2_scan_eraseblock (struct jffs2_sb_info *c, struct jffs2_eraseblo
459 return BLK_STATE_BADBLOCK; 466 return BLK_STATE_BADBLOCK;
460 467
461 ret = jffs2_check_nand_cleanmarker(c, jeb); 468 ret = jffs2_check_nand_cleanmarker(c, jeb);
462 D2(printk(KERN_NOTICE "jffs_check_nand_cleanmarker returned %d\n",ret)); 469 jffs2_dbg(2, "jffs_check_nand_cleanmarker returned %d\n", ret);
463 470
464 /* Even if it's not found, we still scan to see 471 /* Even if it's not found, we still scan to see
465 if the block is empty. We use this information 472 if the block is empty. We use this information
@@ -561,7 +568,8 @@ static int jffs2_scan_eraseblock (struct jffs2_sb_info *c, struct jffs2_eraseblo
561 if (jffs2_cleanmarker_oob(c)) { 568 if (jffs2_cleanmarker_oob(c)) {
562 /* scan oob, take care of cleanmarker */ 569 /* scan oob, take care of cleanmarker */
563 int ret = jffs2_check_oob_empty(c, jeb, cleanmarkerfound); 570 int ret = jffs2_check_oob_empty(c, jeb, cleanmarkerfound);
564 D2(printk(KERN_NOTICE "jffs2_check_oob_empty returned %d\n",ret)); 571 jffs2_dbg(2, "jffs2_check_oob_empty returned %d\n",
572 ret);
565 switch (ret) { 573 switch (ret) {
566 case 0: return cleanmarkerfound ? BLK_STATE_CLEANMARKER : BLK_STATE_ALLFF; 574 case 0: return cleanmarkerfound ? BLK_STATE_CLEANMARKER : BLK_STATE_ALLFF;
567 case 1: return BLK_STATE_ALLDIRTY; 575 case 1: return BLK_STATE_ALLDIRTY;
@@ -569,15 +577,16 @@ static int jffs2_scan_eraseblock (struct jffs2_sb_info *c, struct jffs2_eraseblo
569 } 577 }
570 } 578 }
571#endif 579#endif
572 D1(printk(KERN_DEBUG "Block at 0x%08x is empty (erased)\n", jeb->offset)); 580 jffs2_dbg(1, "Block at 0x%08x is empty (erased)\n",
581 jeb->offset);
573 if (c->cleanmarker_size == 0) 582 if (c->cleanmarker_size == 0)
574 return BLK_STATE_CLEANMARKER; /* don't bother with re-erase */ 583 return BLK_STATE_CLEANMARKER; /* don't bother with re-erase */
575 else 584 else
576 return BLK_STATE_ALLFF; /* OK to erase if all blocks are like this */ 585 return BLK_STATE_ALLFF; /* OK to erase if all blocks are like this */
577 } 586 }
578 if (ofs) { 587 if (ofs) {
579 D1(printk(KERN_DEBUG "Free space at %08x ends at %08x\n", jeb->offset, 588 jffs2_dbg(1, "Free space at %08x ends at %08x\n", jeb->offset,
580 jeb->offset + ofs)); 589 jeb->offset + ofs);
581 if ((err = jffs2_prealloc_raw_node_refs(c, jeb, 1))) 590 if ((err = jffs2_prealloc_raw_node_refs(c, jeb, 1)))
582 return err; 591 return err;
583 if ((err = jffs2_scan_dirty_space(c, jeb, ofs))) 592 if ((err = jffs2_scan_dirty_space(c, jeb, ofs)))
@@ -604,12 +613,13 @@ scan_more:
604 cond_resched(); 613 cond_resched();
605 614
606 if (ofs & 3) { 615 if (ofs & 3) {
607 printk(KERN_WARNING "Eep. ofs 0x%08x not word-aligned!\n", ofs); 616 pr_warn("Eep. ofs 0x%08x not word-aligned!\n", ofs);
608 ofs = PAD(ofs); 617 ofs = PAD(ofs);
609 continue; 618 continue;
610 } 619 }
611 if (ofs == prevofs) { 620 if (ofs == prevofs) {
612 printk(KERN_WARNING "ofs 0x%08x has already been seen. Skipping\n", ofs); 621 pr_warn("ofs 0x%08x has already been seen. Skipping\n",
622 ofs);
613 if ((err = jffs2_scan_dirty_space(c, jeb, 4))) 623 if ((err = jffs2_scan_dirty_space(c, jeb, 4)))
614 return err; 624 return err;
615 ofs += 4; 625 ofs += 4;
@@ -618,8 +628,10 @@ scan_more:
618 prevofs = ofs; 628 prevofs = ofs;
619 629
620 if (jeb->offset + c->sector_size < ofs + sizeof(*node)) { 630 if (jeb->offset + c->sector_size < ofs + sizeof(*node)) {
621 D1(printk(KERN_DEBUG "Fewer than %zd bytes left to end of block. (%x+%x<%x+%zx) Not reading\n", sizeof(struct jffs2_unknown_node), 631 jffs2_dbg(1, "Fewer than %zd bytes left to end of block. (%x+%x<%x+%zx) Not reading\n",
622 jeb->offset, c->sector_size, ofs, sizeof(*node))); 632 sizeof(struct jffs2_unknown_node),
633 jeb->offset, c->sector_size, ofs,
634 sizeof(*node));
623 if ((err = jffs2_scan_dirty_space(c, jeb, (jeb->offset + c->sector_size)-ofs))) 635 if ((err = jffs2_scan_dirty_space(c, jeb, (jeb->offset + c->sector_size)-ofs)))
624 return err; 636 return err;
625 break; 637 break;
@@ -627,8 +639,9 @@ scan_more:
627 639
628 if (buf_ofs + buf_len < ofs + sizeof(*node)) { 640 if (buf_ofs + buf_len < ofs + sizeof(*node)) {
629 buf_len = min_t(uint32_t, buf_size, jeb->offset + c->sector_size - ofs); 641 buf_len = min_t(uint32_t, buf_size, jeb->offset + c->sector_size - ofs);
630 D1(printk(KERN_DEBUG "Fewer than %zd bytes (node header) left to end of buf. Reading 0x%x at 0x%08x\n", 642 jffs2_dbg(1, "Fewer than %zd bytes (node header) left to end of buf. Reading 0x%x at 0x%08x\n",
631 sizeof(struct jffs2_unknown_node), buf_len, ofs)); 643 sizeof(struct jffs2_unknown_node),
644 buf_len, ofs);
632 err = jffs2_fill_scan_buf(c, buf, ofs, buf_len); 645 err = jffs2_fill_scan_buf(c, buf, ofs, buf_len);
633 if (err) 646 if (err)
634 return err; 647 return err;
@@ -645,13 +658,13 @@ scan_more:
645 ofs += 4; 658 ofs += 4;
646 scan_end = min_t(uint32_t, EMPTY_SCAN_SIZE(c->sector_size)/8, buf_len); 659 scan_end = min_t(uint32_t, EMPTY_SCAN_SIZE(c->sector_size)/8, buf_len);
647 660
648 D1(printk(KERN_DEBUG "Found empty flash at 0x%08x\n", ofs)); 661 jffs2_dbg(1, "Found empty flash at 0x%08x\n", ofs);
649 more_empty: 662 more_empty:
650 inbuf_ofs = ofs - buf_ofs; 663 inbuf_ofs = ofs - buf_ofs;
651 while (inbuf_ofs < scan_end) { 664 while (inbuf_ofs < scan_end) {
652 if (unlikely(*(uint32_t *)(&buf[inbuf_ofs]) != 0xffffffff)) { 665 if (unlikely(*(uint32_t *)(&buf[inbuf_ofs]) != 0xffffffff)) {
653 printk(KERN_WARNING "Empty flash at 0x%08x ends at 0x%08x\n", 666 pr_warn("Empty flash at 0x%08x ends at 0x%08x\n",
654 empty_start, ofs); 667 empty_start, ofs);
655 if ((err = jffs2_scan_dirty_space(c, jeb, ofs-empty_start))) 668 if ((err = jffs2_scan_dirty_space(c, jeb, ofs-empty_start)))
656 return err; 669 return err;
657 goto scan_more; 670 goto scan_more;
@@ -661,13 +674,15 @@ scan_more:
661 ofs += 4; 674 ofs += 4;
662 } 675 }
663 /* Ran off end. */ 676 /* Ran off end. */
664 D1(printk(KERN_DEBUG "Empty flash to end of buffer at 0x%08x\n", ofs)); 677 jffs2_dbg(1, "Empty flash to end of buffer at 0x%08x\n",
678 ofs);
665 679
666 /* If we're only checking the beginning of a block with a cleanmarker, 680 /* If we're only checking the beginning of a block with a cleanmarker,
667 bail now */ 681 bail now */
668 if (buf_ofs == jeb->offset && jeb->used_size == PAD(c->cleanmarker_size) && 682 if (buf_ofs == jeb->offset && jeb->used_size == PAD(c->cleanmarker_size) &&
669 c->cleanmarker_size && !jeb->dirty_size && !ref_next(jeb->first_node)) { 683 c->cleanmarker_size && !jeb->dirty_size && !ref_next(jeb->first_node)) {
670 D1(printk(KERN_DEBUG "%d bytes at start of block seems clean... assuming all clean\n", EMPTY_SCAN_SIZE(c->sector_size))); 684 jffs2_dbg(1, "%d bytes at start of block seems clean... assuming all clean\n",
685 EMPTY_SCAN_SIZE(c->sector_size));
671 return BLK_STATE_CLEANMARKER; 686 return BLK_STATE_CLEANMARKER;
672 } 687 }
673 if (!buf_size && (scan_end != buf_len)) {/* XIP/point case */ 688 if (!buf_size && (scan_end != buf_len)) {/* XIP/point case */
@@ -680,13 +695,14 @@ scan_more:
680 if (!buf_len) { 695 if (!buf_len) {
681 /* No more to read. Break out of main loop without marking 696 /* No more to read. Break out of main loop without marking
682 this range of empty space as dirty (because it's not) */ 697 this range of empty space as dirty (because it's not) */
683 D1(printk(KERN_DEBUG "Empty flash at %08x runs to end of block. Treating as free_space\n", 698 jffs2_dbg(1, "Empty flash at %08x runs to end of block. Treating as free_space\n",
684 empty_start)); 699 empty_start);
685 break; 700 break;
686 } 701 }
687 /* point never reaches here */ 702 /* point never reaches here */
688 scan_end = buf_len; 703 scan_end = buf_len;
689 D1(printk(KERN_DEBUG "Reading another 0x%x at 0x%08x\n", buf_len, ofs)); 704 jffs2_dbg(1, "Reading another 0x%x at 0x%08x\n",
705 buf_len, ofs);
690 err = jffs2_fill_scan_buf(c, buf, ofs, buf_len); 706 err = jffs2_fill_scan_buf(c, buf, ofs, buf_len);
691 if (err) 707 if (err)
692 return err; 708 return err;
@@ -695,22 +711,23 @@ scan_more:
695 } 711 }
696 712
697 if (ofs == jeb->offset && je16_to_cpu(node->magic) == KSAMTIB_CIGAM_2SFFJ) { 713 if (ofs == jeb->offset && je16_to_cpu(node->magic) == KSAMTIB_CIGAM_2SFFJ) {
698 printk(KERN_WARNING "Magic bitmask is backwards at offset 0x%08x. Wrong endian filesystem?\n", ofs); 714 pr_warn("Magic bitmask is backwards at offset 0x%08x. Wrong endian filesystem?\n",
715 ofs);
699 if ((err = jffs2_scan_dirty_space(c, jeb, 4))) 716 if ((err = jffs2_scan_dirty_space(c, jeb, 4)))
700 return err; 717 return err;
701 ofs += 4; 718 ofs += 4;
702 continue; 719 continue;
703 } 720 }
704 if (je16_to_cpu(node->magic) == JFFS2_DIRTY_BITMASK) { 721 if (je16_to_cpu(node->magic) == JFFS2_DIRTY_BITMASK) {
705 D1(printk(KERN_DEBUG "Dirty bitmask at 0x%08x\n", ofs)); 722 jffs2_dbg(1, "Dirty bitmask at 0x%08x\n", ofs);
706 if ((err = jffs2_scan_dirty_space(c, jeb, 4))) 723 if ((err = jffs2_scan_dirty_space(c, jeb, 4)))
707 return err; 724 return err;
708 ofs += 4; 725 ofs += 4;
709 continue; 726 continue;
710 } 727 }
711 if (je16_to_cpu(node->magic) == JFFS2_OLD_MAGIC_BITMASK) { 728 if (je16_to_cpu(node->magic) == JFFS2_OLD_MAGIC_BITMASK) {
712 printk(KERN_WARNING "Old JFFS2 bitmask found at 0x%08x\n", ofs); 729 pr_warn("Old JFFS2 bitmask found at 0x%08x\n", ofs);
713 printk(KERN_WARNING "You cannot use older JFFS2 filesystems with newer kernels\n"); 730 pr_warn("You cannot use older JFFS2 filesystems with newer kernels\n");
714 if ((err = jffs2_scan_dirty_space(c, jeb, 4))) 731 if ((err = jffs2_scan_dirty_space(c, jeb, 4)))
715 return err; 732 return err;
716 ofs += 4; 733 ofs += 4;
@@ -718,7 +735,8 @@ scan_more:
718 } 735 }
719 if (je16_to_cpu(node->magic) != JFFS2_MAGIC_BITMASK) { 736 if (je16_to_cpu(node->magic) != JFFS2_MAGIC_BITMASK) {
720 /* OK. We're out of possibilities. Whinge and move on */ 737 /* OK. We're out of possibilities. Whinge and move on */
721 noisy_printk(&noise, "jffs2_scan_eraseblock(): Magic bitmask 0x%04x not found at 0x%08x: 0x%04x instead\n", 738 noisy_printk(&noise, "%s(): Magic bitmask 0x%04x not found at 0x%08x: 0x%04x instead\n",
739 __func__,
722 JFFS2_MAGIC_BITMASK, ofs, 740 JFFS2_MAGIC_BITMASK, ofs,
723 je16_to_cpu(node->magic)); 741 je16_to_cpu(node->magic));
724 if ((err = jffs2_scan_dirty_space(c, jeb, 4))) 742 if ((err = jffs2_scan_dirty_space(c, jeb, 4)))
@@ -733,7 +751,8 @@ scan_more:
733 hdr_crc = crc32(0, &crcnode, sizeof(crcnode)-4); 751 hdr_crc = crc32(0, &crcnode, sizeof(crcnode)-4);
734 752
735 if (hdr_crc != je32_to_cpu(node->hdr_crc)) { 753 if (hdr_crc != je32_to_cpu(node->hdr_crc)) {
736 noisy_printk(&noise, "jffs2_scan_eraseblock(): Node at 0x%08x {0x%04x, 0x%04x, 0x%08x) has invalid CRC 0x%08x (calculated 0x%08x)\n", 754 noisy_printk(&noise, "%s(): Node at 0x%08x {0x%04x, 0x%04x, 0x%08x) has invalid CRC 0x%08x (calculated 0x%08x)\n",
755 __func__,
737 ofs, je16_to_cpu(node->magic), 756 ofs, je16_to_cpu(node->magic),
738 je16_to_cpu(node->nodetype), 757 je16_to_cpu(node->nodetype),
739 je32_to_cpu(node->totlen), 758 je32_to_cpu(node->totlen),
@@ -747,9 +766,9 @@ scan_more:
747 766
748 if (ofs + je32_to_cpu(node->totlen) > jeb->offset + c->sector_size) { 767 if (ofs + je32_to_cpu(node->totlen) > jeb->offset + c->sector_size) {
749 /* Eep. Node goes over the end of the erase block. */ 768 /* Eep. Node goes over the end of the erase block. */
750 printk(KERN_WARNING "Node at 0x%08x with length 0x%08x would run over the end of the erase block\n", 769 pr_warn("Node at 0x%08x with length 0x%08x would run over the end of the erase block\n",
751 ofs, je32_to_cpu(node->totlen)); 770 ofs, je32_to_cpu(node->totlen));
752 printk(KERN_WARNING "Perhaps the file system was created with the wrong erase size?\n"); 771 pr_warn("Perhaps the file system was created with the wrong erase size?\n");
753 if ((err = jffs2_scan_dirty_space(c, jeb, 4))) 772 if ((err = jffs2_scan_dirty_space(c, jeb, 4)))
754 return err; 773 return err;
755 ofs += 4; 774 ofs += 4;
@@ -758,7 +777,8 @@ scan_more:
758 777
759 if (!(je16_to_cpu(node->nodetype) & JFFS2_NODE_ACCURATE)) { 778 if (!(je16_to_cpu(node->nodetype) & JFFS2_NODE_ACCURATE)) {
760 /* Wheee. This is an obsoleted node */ 779 /* Wheee. This is an obsoleted node */
761 D2(printk(KERN_DEBUG "Node at 0x%08x is obsolete. Skipping\n", ofs)); 780 jffs2_dbg(2, "Node at 0x%08x is obsolete. Skipping\n",
781 ofs);
762 if ((err = jffs2_scan_dirty_space(c, jeb, PAD(je32_to_cpu(node->totlen))))) 782 if ((err = jffs2_scan_dirty_space(c, jeb, PAD(je32_to_cpu(node->totlen)))))
763 return err; 783 return err;
764 ofs += PAD(je32_to_cpu(node->totlen)); 784 ofs += PAD(je32_to_cpu(node->totlen));
@@ -769,8 +789,9 @@ scan_more:
769 case JFFS2_NODETYPE_INODE: 789 case JFFS2_NODETYPE_INODE:
770 if (buf_ofs + buf_len < ofs + sizeof(struct jffs2_raw_inode)) { 790 if (buf_ofs + buf_len < ofs + sizeof(struct jffs2_raw_inode)) {
771 buf_len = min_t(uint32_t, buf_size, jeb->offset + c->sector_size - ofs); 791 buf_len = min_t(uint32_t, buf_size, jeb->offset + c->sector_size - ofs);
772 D1(printk(KERN_DEBUG "Fewer than %zd bytes (inode node) left to end of buf. Reading 0x%x at 0x%08x\n", 792 jffs2_dbg(1, "Fewer than %zd bytes (inode node) left to end of buf. Reading 0x%x at 0x%08x\n",
773 sizeof(struct jffs2_raw_inode), buf_len, ofs)); 793 sizeof(struct jffs2_raw_inode),
794 buf_len, ofs);
774 err = jffs2_fill_scan_buf(c, buf, ofs, buf_len); 795 err = jffs2_fill_scan_buf(c, buf, ofs, buf_len);
775 if (err) 796 if (err)
776 return err; 797 return err;
@@ -785,8 +806,9 @@ scan_more:
785 case JFFS2_NODETYPE_DIRENT: 806 case JFFS2_NODETYPE_DIRENT:
786 if (buf_ofs + buf_len < ofs + je32_to_cpu(node->totlen)) { 807 if (buf_ofs + buf_len < ofs + je32_to_cpu(node->totlen)) {
787 buf_len = min_t(uint32_t, buf_size, jeb->offset + c->sector_size - ofs); 808 buf_len = min_t(uint32_t, buf_size, jeb->offset + c->sector_size - ofs);
788 D1(printk(KERN_DEBUG "Fewer than %d bytes (dirent node) left to end of buf. Reading 0x%x at 0x%08x\n", 809 jffs2_dbg(1, "Fewer than %d bytes (dirent node) left to end of buf. Reading 0x%x at 0x%08x\n",
789 je32_to_cpu(node->totlen), buf_len, ofs)); 810 je32_to_cpu(node->totlen), buf_len,
811 ofs);
790 err = jffs2_fill_scan_buf(c, buf, ofs, buf_len); 812 err = jffs2_fill_scan_buf(c, buf, ofs, buf_len);
791 if (err) 813 if (err)
792 return err; 814 return err;
@@ -802,9 +824,9 @@ scan_more:
802 case JFFS2_NODETYPE_XATTR: 824 case JFFS2_NODETYPE_XATTR:
803 if (buf_ofs + buf_len < ofs + je32_to_cpu(node->totlen)) { 825 if (buf_ofs + buf_len < ofs + je32_to_cpu(node->totlen)) {
804 buf_len = min_t(uint32_t, buf_size, jeb->offset + c->sector_size - ofs); 826 buf_len = min_t(uint32_t, buf_size, jeb->offset + c->sector_size - ofs);
805 D1(printk(KERN_DEBUG "Fewer than %d bytes (xattr node)" 827 jffs2_dbg(1, "Fewer than %d bytes (xattr node) left to end of buf. Reading 0x%x at 0x%08x\n",
806 " left to end of buf. Reading 0x%x at 0x%08x\n", 828 je32_to_cpu(node->totlen), buf_len,
807 je32_to_cpu(node->totlen), buf_len, ofs)); 829 ofs);
808 err = jffs2_fill_scan_buf(c, buf, ofs, buf_len); 830 err = jffs2_fill_scan_buf(c, buf, ofs, buf_len);
809 if (err) 831 if (err)
810 return err; 832 return err;
@@ -819,9 +841,9 @@ scan_more:
819 case JFFS2_NODETYPE_XREF: 841 case JFFS2_NODETYPE_XREF:
820 if (buf_ofs + buf_len < ofs + je32_to_cpu(node->totlen)) { 842 if (buf_ofs + buf_len < ofs + je32_to_cpu(node->totlen)) {
821 buf_len = min_t(uint32_t, buf_size, jeb->offset + c->sector_size - ofs); 843 buf_len = min_t(uint32_t, buf_size, jeb->offset + c->sector_size - ofs);
822 D1(printk(KERN_DEBUG "Fewer than %d bytes (xref node)" 844 jffs2_dbg(1, "Fewer than %d bytes (xref node) left to end of buf. Reading 0x%x at 0x%08x\n",
823 " left to end of buf. Reading 0x%x at 0x%08x\n", 845 je32_to_cpu(node->totlen), buf_len,
824 je32_to_cpu(node->totlen), buf_len, ofs)); 846 ofs);
825 err = jffs2_fill_scan_buf(c, buf, ofs, buf_len); 847 err = jffs2_fill_scan_buf(c, buf, ofs, buf_len);
826 if (err) 848 if (err)
827 return err; 849 return err;
@@ -836,15 +858,17 @@ scan_more:
836#endif /* CONFIG_JFFS2_FS_XATTR */ 858#endif /* CONFIG_JFFS2_FS_XATTR */
837 859
838 case JFFS2_NODETYPE_CLEANMARKER: 860 case JFFS2_NODETYPE_CLEANMARKER:
839 D1(printk(KERN_DEBUG "CLEANMARKER node found at 0x%08x\n", ofs)); 861 jffs2_dbg(1, "CLEANMARKER node found at 0x%08x\n", ofs);
840 if (je32_to_cpu(node->totlen) != c->cleanmarker_size) { 862 if (je32_to_cpu(node->totlen) != c->cleanmarker_size) {
841 printk(KERN_NOTICE "CLEANMARKER node found at 0x%08x has totlen 0x%x != normal 0x%x\n", 863 pr_notice("CLEANMARKER node found at 0x%08x has totlen 0x%x != normal 0x%x\n",
842 ofs, je32_to_cpu(node->totlen), c->cleanmarker_size); 864 ofs, je32_to_cpu(node->totlen),
865 c->cleanmarker_size);
843 if ((err = jffs2_scan_dirty_space(c, jeb, PAD(sizeof(struct jffs2_unknown_node))))) 866 if ((err = jffs2_scan_dirty_space(c, jeb, PAD(sizeof(struct jffs2_unknown_node)))))
844 return err; 867 return err;
845 ofs += PAD(sizeof(struct jffs2_unknown_node)); 868 ofs += PAD(sizeof(struct jffs2_unknown_node));
846 } else if (jeb->first_node) { 869 } else if (jeb->first_node) {
847 printk(KERN_NOTICE "CLEANMARKER node found at 0x%08x, not first node in block (0x%08x)\n", ofs, jeb->offset); 870 pr_notice("CLEANMARKER node found at 0x%08x, not first node in block (0x%08x)\n",
871 ofs, jeb->offset);
848 if ((err = jffs2_scan_dirty_space(c, jeb, PAD(sizeof(struct jffs2_unknown_node))))) 872 if ((err = jffs2_scan_dirty_space(c, jeb, PAD(sizeof(struct jffs2_unknown_node)))))
849 return err; 873 return err;
850 ofs += PAD(sizeof(struct jffs2_unknown_node)); 874 ofs += PAD(sizeof(struct jffs2_unknown_node));
@@ -866,7 +890,8 @@ scan_more:
866 default: 890 default:
867 switch (je16_to_cpu(node->nodetype) & JFFS2_COMPAT_MASK) { 891 switch (je16_to_cpu(node->nodetype) & JFFS2_COMPAT_MASK) {
868 case JFFS2_FEATURE_ROCOMPAT: 892 case JFFS2_FEATURE_ROCOMPAT:
869 printk(KERN_NOTICE "Read-only compatible feature node (0x%04x) found at offset 0x%08x\n", je16_to_cpu(node->nodetype), ofs); 893 pr_notice("Read-only compatible feature node (0x%04x) found at offset 0x%08x\n",
894 je16_to_cpu(node->nodetype), ofs);
870 c->flags |= JFFS2_SB_FLAG_RO; 895 c->flags |= JFFS2_SB_FLAG_RO;
871 if (!(jffs2_is_readonly(c))) 896 if (!(jffs2_is_readonly(c)))
872 return -EROFS; 897 return -EROFS;
@@ -876,18 +901,21 @@ scan_more:
876 break; 901 break;
877 902
878 case JFFS2_FEATURE_INCOMPAT: 903 case JFFS2_FEATURE_INCOMPAT:
879 printk(KERN_NOTICE "Incompatible feature node (0x%04x) found at offset 0x%08x\n", je16_to_cpu(node->nodetype), ofs); 904 pr_notice("Incompatible feature node (0x%04x) found at offset 0x%08x\n",
905 je16_to_cpu(node->nodetype), ofs);
880 return -EINVAL; 906 return -EINVAL;
881 907
882 case JFFS2_FEATURE_RWCOMPAT_DELETE: 908 case JFFS2_FEATURE_RWCOMPAT_DELETE:
883 D1(printk(KERN_NOTICE "Unknown but compatible feature node (0x%04x) found at offset 0x%08x\n", je16_to_cpu(node->nodetype), ofs)); 909 jffs2_dbg(1, "Unknown but compatible feature node (0x%04x) found at offset 0x%08x\n",
910 je16_to_cpu(node->nodetype), ofs);
884 if ((err = jffs2_scan_dirty_space(c, jeb, PAD(je32_to_cpu(node->totlen))))) 911 if ((err = jffs2_scan_dirty_space(c, jeb, PAD(je32_to_cpu(node->totlen)))))
885 return err; 912 return err;
886 ofs += PAD(je32_to_cpu(node->totlen)); 913 ofs += PAD(je32_to_cpu(node->totlen));
887 break; 914 break;
888 915
889 case JFFS2_FEATURE_RWCOMPAT_COPY: { 916 case JFFS2_FEATURE_RWCOMPAT_COPY: {
890 D1(printk(KERN_NOTICE "Unknown but compatible feature node (0x%04x) found at offset 0x%08x\n", je16_to_cpu(node->nodetype), ofs)); 917 jffs2_dbg(1, "Unknown but compatible feature node (0x%04x) found at offset 0x%08x\n",
918 je16_to_cpu(node->nodetype), ofs);
891 919
892 jffs2_link_node_ref(c, jeb, ofs | REF_PRISTINE, PAD(je32_to_cpu(node->totlen)), NULL); 920 jffs2_link_node_ref(c, jeb, ofs | REF_PRISTINE, PAD(je32_to_cpu(node->totlen)), NULL);
893 921
@@ -908,8 +936,9 @@ scan_more:
908 } 936 }
909 } 937 }
910 938
911 D1(printk(KERN_DEBUG "Block at 0x%08x: free 0x%08x, dirty 0x%08x, unchecked 0x%08x, used 0x%08x, wasted 0x%08x\n", 939 jffs2_dbg(1, "Block at 0x%08x: free 0x%08x, dirty 0x%08x, unchecked 0x%08x, used 0x%08x, wasted 0x%08x\n",
912 jeb->offset,jeb->free_size, jeb->dirty_size, jeb->unchecked_size, jeb->used_size, jeb->wasted_size)); 940 jeb->offset, jeb->free_size, jeb->dirty_size,
941 jeb->unchecked_size, jeb->used_size, jeb->wasted_size);
913 942
914 /* mark_node_obsolete can add to wasted !! */ 943 /* mark_node_obsolete can add to wasted !! */
915 if (jeb->wasted_size) { 944 if (jeb->wasted_size) {
@@ -935,7 +964,7 @@ struct jffs2_inode_cache *jffs2_scan_make_ino_cache(struct jffs2_sb_info *c, uin
935 964
936 ic = jffs2_alloc_inode_cache(); 965 ic = jffs2_alloc_inode_cache();
937 if (!ic) { 966 if (!ic) {
938 printk(KERN_NOTICE "jffs2_scan_make_inode_cache(): allocation of inode cache failed\n"); 967 pr_notice("%s(): allocation of inode cache failed\n", __func__);
939 return NULL; 968 return NULL;
940 } 969 }
941 memset(ic, 0, sizeof(*ic)); 970 memset(ic, 0, sizeof(*ic));
@@ -954,7 +983,7 @@ static int jffs2_scan_inode_node(struct jffs2_sb_info *c, struct jffs2_erasebloc
954 struct jffs2_inode_cache *ic; 983 struct jffs2_inode_cache *ic;
955 uint32_t crc, ino = je32_to_cpu(ri->ino); 984 uint32_t crc, ino = je32_to_cpu(ri->ino);
956 985
957 D1(printk(KERN_DEBUG "jffs2_scan_inode_node(): Node at 0x%08x\n", ofs)); 986 jffs2_dbg(1, "%s(): Node at 0x%08x\n", __func__, ofs);
958 987
959 /* We do very little here now. Just check the ino# to which we should attribute 988 /* We do very little here now. Just check the ino# to which we should attribute
960 this node; we can do all the CRC checking etc. later. There's a tradeoff here -- 989 this node; we can do all the CRC checking etc. later. There's a tradeoff here --
@@ -968,9 +997,8 @@ static int jffs2_scan_inode_node(struct jffs2_sb_info *c, struct jffs2_erasebloc
968 /* Check the node CRC in any case. */ 997 /* Check the node CRC in any case. */
969 crc = crc32(0, ri, sizeof(*ri)-8); 998 crc = crc32(0, ri, sizeof(*ri)-8);
970 if (crc != je32_to_cpu(ri->node_crc)) { 999 if (crc != je32_to_cpu(ri->node_crc)) {
971 printk(KERN_NOTICE "jffs2_scan_inode_node(): CRC failed on " 1000 pr_notice("%s(): CRC failed on node at 0x%08x: Read 0x%08x, calculated 0x%08x\n",
972 "node at 0x%08x: Read 0x%08x, calculated 0x%08x\n", 1001 __func__, ofs, je32_to_cpu(ri->node_crc), crc);
973 ofs, je32_to_cpu(ri->node_crc), crc);
974 /* 1002 /*
975 * We believe totlen because the CRC on the node 1003 * We believe totlen because the CRC on the node
976 * _header_ was OK, just the node itself failed. 1004 * _header_ was OK, just the node itself failed.
@@ -989,10 +1017,10 @@ static int jffs2_scan_inode_node(struct jffs2_sb_info *c, struct jffs2_erasebloc
989 /* Wheee. It worked */ 1017 /* Wheee. It worked */
990 jffs2_link_node_ref(c, jeb, ofs | REF_UNCHECKED, PAD(je32_to_cpu(ri->totlen)), ic); 1018 jffs2_link_node_ref(c, jeb, ofs | REF_UNCHECKED, PAD(je32_to_cpu(ri->totlen)), ic);
991 1019
992 D1(printk(KERN_DEBUG "Node is ino #%u, version %d. Range 0x%x-0x%x\n", 1020 jffs2_dbg(1, "Node is ino #%u, version %d. Range 0x%x-0x%x\n",
993 je32_to_cpu(ri->ino), je32_to_cpu(ri->version), 1021 je32_to_cpu(ri->ino), je32_to_cpu(ri->version),
994 je32_to_cpu(ri->offset), 1022 je32_to_cpu(ri->offset),
995 je32_to_cpu(ri->offset)+je32_to_cpu(ri->dsize))); 1023 je32_to_cpu(ri->offset)+je32_to_cpu(ri->dsize));
996 1024
997 pseudo_random += je32_to_cpu(ri->version); 1025 pseudo_random += je32_to_cpu(ri->version);
998 1026
@@ -1012,15 +1040,15 @@ static int jffs2_scan_dirent_node(struct jffs2_sb_info *c, struct jffs2_eraseblo
1012 uint32_t crc; 1040 uint32_t crc;
1013 int err; 1041 int err;
1014 1042
1015 D1(printk(KERN_DEBUG "jffs2_scan_dirent_node(): Node at 0x%08x\n", ofs)); 1043 jffs2_dbg(1, "%s(): Node at 0x%08x\n", __func__, ofs);
1016 1044
1017 /* We don't get here unless the node is still valid, so we don't have to 1045 /* We don't get here unless the node is still valid, so we don't have to
1018 mask in the ACCURATE bit any more. */ 1046 mask in the ACCURATE bit any more. */
1019 crc = crc32(0, rd, sizeof(*rd)-8); 1047 crc = crc32(0, rd, sizeof(*rd)-8);
1020 1048
1021 if (crc != je32_to_cpu(rd->node_crc)) { 1049 if (crc != je32_to_cpu(rd->node_crc)) {
1022 printk(KERN_NOTICE "jffs2_scan_dirent_node(): Node CRC failed on node at 0x%08x: Read 0x%08x, calculated 0x%08x\n", 1050 pr_notice("%s(): Node CRC failed on node at 0x%08x: Read 0x%08x, calculated 0x%08x\n",
1023 ofs, je32_to_cpu(rd->node_crc), crc); 1051 __func__, ofs, je32_to_cpu(rd->node_crc), crc);
1024 /* We believe totlen because the CRC on the node _header_ was OK, just the node itself failed. */ 1052 /* We believe totlen because the CRC on the node _header_ was OK, just the node itself failed. */
1025 if ((err = jffs2_scan_dirty_space(c, jeb, PAD(je32_to_cpu(rd->totlen))))) 1053 if ((err = jffs2_scan_dirty_space(c, jeb, PAD(je32_to_cpu(rd->totlen)))))
1026 return err; 1054 return err;
@@ -1032,7 +1060,7 @@ static int jffs2_scan_dirent_node(struct jffs2_sb_info *c, struct jffs2_eraseblo
1032 /* Should never happen. Did. (OLPC trac #4184)*/ 1060 /* Should never happen. Did. (OLPC trac #4184)*/
1033 checkedlen = strnlen(rd->name, rd->nsize); 1061 checkedlen = strnlen(rd->name, rd->nsize);
1034 if (checkedlen < rd->nsize) { 1062 if (checkedlen < rd->nsize) {
1035 printk(KERN_ERR "Dirent at %08x has zeroes in name. Truncating to %d chars\n", 1063 pr_err("Dirent at %08x has zeroes in name. Truncating to %d chars\n",
1036 ofs, checkedlen); 1064 ofs, checkedlen);
1037 } 1065 }
1038 fd = jffs2_alloc_full_dirent(checkedlen+1); 1066 fd = jffs2_alloc_full_dirent(checkedlen+1);
@@ -1044,9 +1072,10 @@ static int jffs2_scan_dirent_node(struct jffs2_sb_info *c, struct jffs2_eraseblo
1044 1072
1045 crc = crc32(0, fd->name, rd->nsize); 1073 crc = crc32(0, fd->name, rd->nsize);
1046 if (crc != je32_to_cpu(rd->name_crc)) { 1074 if (crc != je32_to_cpu(rd->name_crc)) {
1047 printk(KERN_NOTICE "jffs2_scan_dirent_node(): Name CRC failed on node at 0x%08x: Read 0x%08x, calculated 0x%08x\n", 1075 pr_notice("%s(): Name CRC failed on node at 0x%08x: Read 0x%08x, calculated 0x%08x\n",
1048 ofs, je32_to_cpu(rd->name_crc), crc); 1076 __func__, ofs, je32_to_cpu(rd->name_crc), crc);
1049 D1(printk(KERN_NOTICE "Name for which CRC failed is (now) '%s', ino #%d\n", fd->name, je32_to_cpu(rd->ino))); 1077 jffs2_dbg(1, "Name for which CRC failed is (now) '%s', ino #%d\n",
1078 fd->name, je32_to_cpu(rd->ino));
1050 jffs2_free_full_dirent(fd); 1079 jffs2_free_full_dirent(fd);
1051 /* FIXME: Why do we believe totlen? */ 1080 /* FIXME: Why do we believe totlen? */
1052 /* We believe totlen because the CRC on the node _header_ was OK, just the name failed. */ 1081 /* We believe totlen because the CRC on the node _header_ was OK, just the name failed. */
diff --git a/fs/jffs2/security.c b/fs/jffs2/security.c
index 0f20208df602..aca97f35b292 100644
--- a/fs/jffs2/security.c
+++ b/fs/jffs2/security.c
@@ -23,8 +23,8 @@
23#include "nodelist.h" 23#include "nodelist.h"
24 24
25/* ---- Initial Security Label(s) Attachment callback --- */ 25/* ---- Initial Security Label(s) Attachment callback --- */
26int jffs2_initxattrs(struct inode *inode, const struct xattr *xattr_array, 26static int jffs2_initxattrs(struct inode *inode,
27 void *fs_info) 27 const struct xattr *xattr_array, void *fs_info)
28{ 28{
29 const struct xattr *xattr; 29 const struct xattr *xattr;
30 int err = 0; 30 int err = 0;
diff --git a/fs/jffs2/summary.c b/fs/jffs2/summary.c
index e537fb0e0184..c522d098bb4f 100644
--- a/fs/jffs2/summary.c
+++ b/fs/jffs2/summary.c
@@ -11,6 +11,8 @@
11 * 11 *
12 */ 12 */
13 13
14#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15
14#include <linux/kernel.h> 16#include <linux/kernel.h>
15#include <linux/slab.h> 17#include <linux/slab.h>
16#include <linux/mtd/mtd.h> 18#include <linux/mtd/mtd.h>
@@ -442,13 +444,16 @@ static int jffs2_sum_process_sum_data(struct jffs2_sb_info *c, struct jffs2_eras
442 /* This should never happen, but https://dev.laptop.org/ticket/4184 */ 444 /* This should never happen, but https://dev.laptop.org/ticket/4184 */
443 checkedlen = strnlen(spd->name, spd->nsize); 445 checkedlen = strnlen(spd->name, spd->nsize);
444 if (!checkedlen) { 446 if (!checkedlen) {
445 printk(KERN_ERR "Dirent at %08x has zero at start of name. Aborting mount.\n", 447 pr_err("Dirent at %08x has zero at start of name. Aborting mount.\n",
446 jeb->offset + je32_to_cpu(spd->offset)); 448 jeb->offset +
449 je32_to_cpu(spd->offset));
447 return -EIO; 450 return -EIO;
448 } 451 }
449 if (checkedlen < spd->nsize) { 452 if (checkedlen < spd->nsize) {
450 printk(KERN_ERR "Dirent at %08x has zeroes in name. Truncating to %d chars\n", 453 pr_err("Dirent at %08x has zeroes in name. Truncating to %d chars\n",
451 jeb->offset + je32_to_cpu(spd->offset), checkedlen); 454 jeb->offset +
455 je32_to_cpu(spd->offset),
456 checkedlen);
452 } 457 }
453 458
454 459
@@ -808,8 +813,7 @@ static int jffs2_sum_write_data(struct jffs2_sb_info *c, struct jffs2_eraseblock
808 813
809 sum_ofs = jeb->offset + c->sector_size - jeb->free_size; 814 sum_ofs = jeb->offset + c->sector_size - jeb->free_size;
810 815
811 dbg_summary("JFFS2: writing out data to flash to pos : 0x%08x\n", 816 dbg_summary("writing out data to flash to pos : 0x%08x\n", sum_ofs);
812 sum_ofs);
813 817
814 ret = jffs2_flash_writev(c, vecs, 2, sum_ofs, &retlen, 0); 818 ret = jffs2_flash_writev(c, vecs, 2, sum_ofs, &retlen, 0);
815 819
diff --git a/fs/jffs2/super.c b/fs/jffs2/super.c
index f2d96b5e64f6..f9916f312bd8 100644
--- a/fs/jffs2/super.c
+++ b/fs/jffs2/super.c
@@ -9,6 +9,8 @@
9 * 9 *
10 */ 10 */
11 11
12#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13
12#include <linux/kernel.h> 14#include <linux/kernel.h>
13#include <linux/module.h> 15#include <linux/module.h>
14#include <linux/slab.h> 16#include <linux/slab.h>
@@ -69,7 +71,7 @@ static void jffs2_write_super(struct super_block *sb)
69 sb->s_dirt = 0; 71 sb->s_dirt = 0;
70 72
71 if (!(sb->s_flags & MS_RDONLY)) { 73 if (!(sb->s_flags & MS_RDONLY)) {
72 D1(printk(KERN_DEBUG "jffs2_write_super()\n")); 74 jffs2_dbg(1, "%s()\n", __func__);
73 jffs2_flush_wbuf_gc(c, 0); 75 jffs2_flush_wbuf_gc(c, 0);
74 } 76 }
75 77
@@ -214,8 +216,8 @@ static int jffs2_parse_options(struct jffs2_sb_info *c, char *data)
214 JFFS2_COMPR_MODE_FORCEZLIB; 216 JFFS2_COMPR_MODE_FORCEZLIB;
215#endif 217#endif
216 else { 218 else {
217 printk(KERN_ERR "JFFS2 Error: unknown compressor \"%s\"", 219 pr_err("Error: unknown compressor \"%s\"\n",
218 name); 220 name);
219 kfree(name); 221 kfree(name);
220 return -EINVAL; 222 return -EINVAL;
221 } 223 }
@@ -223,8 +225,8 @@ static int jffs2_parse_options(struct jffs2_sb_info *c, char *data)
223 c->mount_opts.override_compr = true; 225 c->mount_opts.override_compr = true;
224 break; 226 break;
225 default: 227 default:
226 printk(KERN_ERR "JFFS2 Error: unrecognized mount option '%s' or missing value\n", 228 pr_err("Error: unrecognized mount option '%s' or missing value\n",
227 p); 229 p);
228 return -EINVAL; 230 return -EINVAL;
229 } 231 }
230 } 232 }
@@ -266,9 +268,9 @@ static int jffs2_fill_super(struct super_block *sb, void *data, int silent)
266 struct jffs2_sb_info *c; 268 struct jffs2_sb_info *c;
267 int ret; 269 int ret;
268 270
269 D1(printk(KERN_DEBUG "jffs2_get_sb_mtd():" 271 jffs2_dbg(1, "jffs2_get_sb_mtd():"
270 " New superblock for device %d (\"%s\")\n", 272 " New superblock for device %d (\"%s\")\n",
271 sb->s_mtd->index, sb->s_mtd->name)); 273 sb->s_mtd->index, sb->s_mtd->name);
272 274
273 c = kzalloc(sizeof(*c), GFP_KERNEL); 275 c = kzalloc(sizeof(*c), GFP_KERNEL);
274 if (!c) 276 if (!c)
@@ -315,7 +317,7 @@ static void jffs2_put_super (struct super_block *sb)
315{ 317{
316 struct jffs2_sb_info *c = JFFS2_SB_INFO(sb); 318 struct jffs2_sb_info *c = JFFS2_SB_INFO(sb);
317 319
318 D2(printk(KERN_DEBUG "jffs2: jffs2_put_super()\n")); 320 jffs2_dbg(2, "%s()\n", __func__);
319 321
320 if (sb->s_dirt) 322 if (sb->s_dirt)
321 jffs2_write_super(sb); 323 jffs2_write_super(sb);
@@ -336,7 +338,7 @@ static void jffs2_put_super (struct super_block *sb)
336 kfree(c->inocache_list); 338 kfree(c->inocache_list);
337 jffs2_clear_xattr_subsystem(c); 339 jffs2_clear_xattr_subsystem(c);
338 mtd_sync(c->mtd); 340 mtd_sync(c->mtd);
339 D1(printk(KERN_DEBUG "jffs2_put_super returning\n")); 341 jffs2_dbg(1, "%s(): returning\n", __func__);
340} 342}
341 343
342static void jffs2_kill_sb(struct super_block *sb) 344static void jffs2_kill_sb(struct super_block *sb)
@@ -371,7 +373,7 @@ static int __init init_jffs2_fs(void)
371 BUILD_BUG_ON(sizeof(struct jffs2_raw_inode) != 68); 373 BUILD_BUG_ON(sizeof(struct jffs2_raw_inode) != 68);
372 BUILD_BUG_ON(sizeof(struct jffs2_raw_summary) != 32); 374 BUILD_BUG_ON(sizeof(struct jffs2_raw_summary) != 32);
373 375
374 printk(KERN_INFO "JFFS2 version 2.2." 376 pr_info("version 2.2."
375#ifdef CONFIG_JFFS2_FS_WRITEBUFFER 377#ifdef CONFIG_JFFS2_FS_WRITEBUFFER
376 " (NAND)" 378 " (NAND)"
377#endif 379#endif
@@ -386,22 +388,22 @@ static int __init init_jffs2_fs(void)
386 SLAB_MEM_SPREAD), 388 SLAB_MEM_SPREAD),
387 jffs2_i_init_once); 389 jffs2_i_init_once);
388 if (!jffs2_inode_cachep) { 390 if (!jffs2_inode_cachep) {
389 printk(KERN_ERR "JFFS2 error: Failed to initialise inode cache\n"); 391 pr_err("error: Failed to initialise inode cache\n");
390 return -ENOMEM; 392 return -ENOMEM;
391 } 393 }
392 ret = jffs2_compressors_init(); 394 ret = jffs2_compressors_init();
393 if (ret) { 395 if (ret) {
394 printk(KERN_ERR "JFFS2 error: Failed to initialise compressors\n"); 396 pr_err("error: Failed to initialise compressors\n");
395 goto out; 397 goto out;
396 } 398 }
397 ret = jffs2_create_slab_caches(); 399 ret = jffs2_create_slab_caches();
398 if (ret) { 400 if (ret) {
399 printk(KERN_ERR "JFFS2 error: Failed to initialise slab caches\n"); 401 pr_err("error: Failed to initialise slab caches\n");
400 goto out_compressors; 402 goto out_compressors;
401 } 403 }
402 ret = register_filesystem(&jffs2_fs_type); 404 ret = register_filesystem(&jffs2_fs_type);
403 if (ret) { 405 if (ret) {
404 printk(KERN_ERR "JFFS2 error: Failed to register filesystem\n"); 406 pr_err("error: Failed to register filesystem\n");
405 goto out_slab; 407 goto out_slab;
406 } 408 }
407 return 0; 409 return 0;
diff --git a/fs/jffs2/symlink.c b/fs/jffs2/symlink.c
index e3035afb1814..6e563332bb24 100644
--- a/fs/jffs2/symlink.c
+++ b/fs/jffs2/symlink.c
@@ -9,6 +9,8 @@
9 * 9 *
10 */ 10 */
11 11
12#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13
12#include <linux/kernel.h> 14#include <linux/kernel.h>
13#include <linux/fs.h> 15#include <linux/fs.h>
14#include <linux/namei.h> 16#include <linux/namei.h>
@@ -47,10 +49,11 @@ static void *jffs2_follow_link(struct dentry *dentry, struct nameidata *nd)
47 */ 49 */
48 50
49 if (!p) { 51 if (!p) {
50 printk(KERN_ERR "jffs2_follow_link(): can't find symlink target\n"); 52 pr_err("%s(): can't find symlink target\n", __func__);
51 p = ERR_PTR(-EIO); 53 p = ERR_PTR(-EIO);
52 } 54 }
53 D1(printk(KERN_DEBUG "jffs2_follow_link(): target path is '%s'\n", (char *) f->target)); 55 jffs2_dbg(1, "%s(): target path is '%s'\n",
56 __func__, (char *)f->target);
54 57
55 nd_set_link(nd, p); 58 nd_set_link(nd, p);
56 59
diff --git a/fs/jffs2/wbuf.c b/fs/jffs2/wbuf.c
index 30e8f47e8a23..74d9be19df3f 100644
--- a/fs/jffs2/wbuf.c
+++ b/fs/jffs2/wbuf.c
@@ -11,6 +11,8 @@
11 * 11 *
12 */ 12 */
13 13
14#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15
14#include <linux/kernel.h> 16#include <linux/kernel.h>
15#include <linux/slab.h> 17#include <linux/slab.h>
16#include <linux/mtd/mtd.h> 18#include <linux/mtd/mtd.h>
@@ -91,7 +93,7 @@ static void jffs2_wbuf_dirties_inode(struct jffs2_sb_info *c, uint32_t ino)
91 93
92 new = kmalloc(sizeof(*new), GFP_KERNEL); 94 new = kmalloc(sizeof(*new), GFP_KERNEL);
93 if (!new) { 95 if (!new) {
94 D1(printk(KERN_DEBUG "No memory to allocate inodirty. Fallback to all considered dirty\n")); 96 jffs2_dbg(1, "No memory to allocate inodirty. Fallback to all considered dirty\n");
95 jffs2_clear_wbuf_ino_list(c); 97 jffs2_clear_wbuf_ino_list(c);
96 c->wbuf_inodes = &inodirty_nomem; 98 c->wbuf_inodes = &inodirty_nomem;
97 return; 99 return;
@@ -113,19 +115,20 @@ static inline void jffs2_refile_wbuf_blocks(struct jffs2_sb_info *c)
113 list_for_each_safe(this, next, &c->erasable_pending_wbuf_list) { 115 list_for_each_safe(this, next, &c->erasable_pending_wbuf_list) {
114 struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list); 116 struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list);
115 117
116 D1(printk(KERN_DEBUG "Removing eraseblock at 0x%08x from erasable_pending_wbuf_list...\n", jeb->offset)); 118 jffs2_dbg(1, "Removing eraseblock at 0x%08x from erasable_pending_wbuf_list...\n",
119 jeb->offset);
117 list_del(this); 120 list_del(this);
118 if ((jiffies + (n++)) & 127) { 121 if ((jiffies + (n++)) & 127) {
119 /* Most of the time, we just erase it immediately. Otherwise we 122 /* Most of the time, we just erase it immediately. Otherwise we
120 spend ages scanning it on mount, etc. */ 123 spend ages scanning it on mount, etc. */
121 D1(printk(KERN_DEBUG "...and adding to erase_pending_list\n")); 124 jffs2_dbg(1, "...and adding to erase_pending_list\n");
122 list_add_tail(&jeb->list, &c->erase_pending_list); 125 list_add_tail(&jeb->list, &c->erase_pending_list);
123 c->nr_erasing_blocks++; 126 c->nr_erasing_blocks++;
124 jffs2_garbage_collect_trigger(c); 127 jffs2_garbage_collect_trigger(c);
125 } else { 128 } else {
126 /* Sometimes, however, we leave it elsewhere so it doesn't get 129 /* Sometimes, however, we leave it elsewhere so it doesn't get
127 immediately reused, and we spread the load a bit. */ 130 immediately reused, and we spread the load a bit. */
128 D1(printk(KERN_DEBUG "...and adding to erasable_list\n")); 131 jffs2_dbg(1, "...and adding to erasable_list\n");
129 list_add_tail(&jeb->list, &c->erasable_list); 132 list_add_tail(&jeb->list, &c->erasable_list);
130 } 133 }
131 } 134 }
@@ -136,7 +139,7 @@ static inline void jffs2_refile_wbuf_blocks(struct jffs2_sb_info *c)
136 139
137static void jffs2_block_refile(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, int allow_empty) 140static void jffs2_block_refile(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, int allow_empty)
138{ 141{
139 D1(printk("About to refile bad block at %08x\n", jeb->offset)); 142 jffs2_dbg(1, "About to refile bad block at %08x\n", jeb->offset);
140 143
141 /* File the existing block on the bad_used_list.... */ 144 /* File the existing block on the bad_used_list.... */
142 if (c->nextblock == jeb) 145 if (c->nextblock == jeb)
@@ -144,12 +147,14 @@ static void jffs2_block_refile(struct jffs2_sb_info *c, struct jffs2_eraseblock
144 else /* Not sure this should ever happen... need more coffee */ 147 else /* Not sure this should ever happen... need more coffee */
145 list_del(&jeb->list); 148 list_del(&jeb->list);
146 if (jeb->first_node) { 149 if (jeb->first_node) {
147 D1(printk("Refiling block at %08x to bad_used_list\n", jeb->offset)); 150 jffs2_dbg(1, "Refiling block at %08x to bad_used_list\n",
151 jeb->offset);
148 list_add(&jeb->list, &c->bad_used_list); 152 list_add(&jeb->list, &c->bad_used_list);
149 } else { 153 } else {
150 BUG_ON(allow_empty == REFILE_NOTEMPTY); 154 BUG_ON(allow_empty == REFILE_NOTEMPTY);
151 /* It has to have had some nodes or we couldn't be here */ 155 /* It has to have had some nodes or we couldn't be here */
152 D1(printk("Refiling block at %08x to erase_pending_list\n", jeb->offset)); 156 jffs2_dbg(1, "Refiling block at %08x to erase_pending_list\n",
157 jeb->offset);
153 list_add(&jeb->list, &c->erase_pending_list); 158 list_add(&jeb->list, &c->erase_pending_list);
154 c->nr_erasing_blocks++; 159 c->nr_erasing_blocks++;
155 jffs2_garbage_collect_trigger(c); 160 jffs2_garbage_collect_trigger(c);
@@ -230,10 +235,12 @@ static int jffs2_verify_write(struct jffs2_sb_info *c, unsigned char *buf,
230 235
231 ret = mtd_read(c->mtd, ofs, c->wbuf_pagesize, &retlen, c->wbuf_verify); 236 ret = mtd_read(c->mtd, ofs, c->wbuf_pagesize, &retlen, c->wbuf_verify);
232 if (ret && ret != -EUCLEAN && ret != -EBADMSG) { 237 if (ret && ret != -EUCLEAN && ret != -EBADMSG) {
233 printk(KERN_WARNING "jffs2_verify_write(): Read back of page at %08x failed: %d\n", c->wbuf_ofs, ret); 238 pr_warn("%s(): Read back of page at %08x failed: %d\n",
239 __func__, c->wbuf_ofs, ret);
234 return ret; 240 return ret;
235 } else if (retlen != c->wbuf_pagesize) { 241 } else if (retlen != c->wbuf_pagesize) {
236 printk(KERN_WARNING "jffs2_verify_write(): Read back of page at %08x gave short read: %zd not %d.\n", ofs, retlen, c->wbuf_pagesize); 242 pr_warn("%s(): Read back of page at %08x gave short read: %zd not %d\n",
243 __func__, ofs, retlen, c->wbuf_pagesize);
237 return -EIO; 244 return -EIO;
238 } 245 }
239 if (!memcmp(buf, c->wbuf_verify, c->wbuf_pagesize)) 246 if (!memcmp(buf, c->wbuf_verify, c->wbuf_pagesize))
@@ -246,12 +253,12 @@ static int jffs2_verify_write(struct jffs2_sb_info *c, unsigned char *buf,
246 else 253 else
247 eccstr = "OK or unused"; 254 eccstr = "OK or unused";
248 255
249 printk(KERN_WARNING "Write verify error (ECC %s) at %08x. Wrote:\n", 256 pr_warn("Write verify error (ECC %s) at %08x. Wrote:\n",
250 eccstr, c->wbuf_ofs); 257 eccstr, c->wbuf_ofs);
251 print_hex_dump(KERN_WARNING, "", DUMP_PREFIX_OFFSET, 16, 1, 258 print_hex_dump(KERN_WARNING, "", DUMP_PREFIX_OFFSET, 16, 1,
252 c->wbuf, c->wbuf_pagesize, 0); 259 c->wbuf, c->wbuf_pagesize, 0);
253 260
254 printk(KERN_WARNING "Read back:\n"); 261 pr_warn("Read back:\n");
255 print_hex_dump(KERN_WARNING, "", DUMP_PREFIX_OFFSET, 16, 1, 262 print_hex_dump(KERN_WARNING, "", DUMP_PREFIX_OFFSET, 16, 1,
256 c->wbuf_verify, c->wbuf_pagesize, 0); 263 c->wbuf_verify, c->wbuf_pagesize, 0);
257 264
@@ -308,7 +315,7 @@ static void jffs2_wbuf_recover(struct jffs2_sb_info *c)
308 315
309 if (!first_raw) { 316 if (!first_raw) {
310 /* All nodes were obsolete. Nothing to recover. */ 317 /* All nodes were obsolete. Nothing to recover. */
311 D1(printk(KERN_DEBUG "No non-obsolete nodes to be recovered. Just filing block bad\n")); 318 jffs2_dbg(1, "No non-obsolete nodes to be recovered. Just filing block bad\n");
312 c->wbuf_len = 0; 319 c->wbuf_len = 0;
313 return; 320 return;
314 } 321 }
@@ -331,7 +338,7 @@ static void jffs2_wbuf_recover(struct jffs2_sb_info *c)
331 338
332 buf = kmalloc(end - start, GFP_KERNEL); 339 buf = kmalloc(end - start, GFP_KERNEL);
333 if (!buf) { 340 if (!buf) {
334 printk(KERN_CRIT "Malloc failure in wbuf recovery. Data loss ensues.\n"); 341 pr_crit("Malloc failure in wbuf recovery. Data loss ensues.\n");
335 342
336 goto read_failed; 343 goto read_failed;
337 } 344 }
@@ -346,7 +353,7 @@ static void jffs2_wbuf_recover(struct jffs2_sb_info *c)
346 ret = 0; 353 ret = 0;
347 354
348 if (ret || retlen != c->wbuf_ofs - start) { 355 if (ret || retlen != c->wbuf_ofs - start) {
349 printk(KERN_CRIT "Old data are already lost in wbuf recovery. Data loss ensues.\n"); 356 pr_crit("Old data are already lost in wbuf recovery. Data loss ensues.\n");
350 357
351 kfree(buf); 358 kfree(buf);
352 buf = NULL; 359 buf = NULL;
@@ -380,7 +387,7 @@ static void jffs2_wbuf_recover(struct jffs2_sb_info *c)
380 /* ... and get an allocation of space from a shiny new block instead */ 387 /* ... and get an allocation of space from a shiny new block instead */
381 ret = jffs2_reserve_space_gc(c, end-start, &len, JFFS2_SUMMARY_NOSUM_SIZE); 388 ret = jffs2_reserve_space_gc(c, end-start, &len, JFFS2_SUMMARY_NOSUM_SIZE);
382 if (ret) { 389 if (ret) {
383 printk(KERN_WARNING "Failed to allocate space for wbuf recovery. Data loss ensues.\n"); 390 pr_warn("Failed to allocate space for wbuf recovery. Data loss ensues.\n");
384 kfree(buf); 391 kfree(buf);
385 return; 392 return;
386 } 393 }
@@ -390,7 +397,7 @@ static void jffs2_wbuf_recover(struct jffs2_sb_info *c)
390 397
391 ret = jffs2_prealloc_raw_node_refs(c, c->nextblock, nr_refile); 398 ret = jffs2_prealloc_raw_node_refs(c, c->nextblock, nr_refile);
392 if (ret) { 399 if (ret) {
393 printk(KERN_WARNING "Failed to allocate node refs for wbuf recovery. Data loss ensues.\n"); 400 pr_warn("Failed to allocate node refs for wbuf recovery. Data loss ensues.\n");
394 kfree(buf); 401 kfree(buf);
395 return; 402 return;
396 } 403 }
@@ -406,13 +413,13 @@ static void jffs2_wbuf_recover(struct jffs2_sb_info *c)
406 unsigned char *rewrite_buf = buf?:c->wbuf; 413 unsigned char *rewrite_buf = buf?:c->wbuf;
407 uint32_t towrite = (end-start) - ((end-start)%c->wbuf_pagesize); 414 uint32_t towrite = (end-start) - ((end-start)%c->wbuf_pagesize);
408 415
409 D1(printk(KERN_DEBUG "Write 0x%x bytes at 0x%08x in wbuf recover\n", 416 jffs2_dbg(1, "Write 0x%x bytes at 0x%08x in wbuf recover\n",
410 towrite, ofs)); 417 towrite, ofs);
411 418
412#ifdef BREAKMEHEADER 419#ifdef BREAKMEHEADER
413 static int breakme; 420 static int breakme;
414 if (breakme++ == 20) { 421 if (breakme++ == 20) {
415 printk(KERN_NOTICE "Faking write error at 0x%08x\n", ofs); 422 pr_notice("Faking write error at 0x%08x\n", ofs);
416 breakme = 0; 423 breakme = 0;
417 mtd_write(c->mtd, ofs, towrite, &retlen, brokenbuf); 424 mtd_write(c->mtd, ofs, towrite, &retlen, brokenbuf);
418 ret = -EIO; 425 ret = -EIO;
@@ -423,7 +430,7 @@ static void jffs2_wbuf_recover(struct jffs2_sb_info *c)
423 430
424 if (ret || retlen != towrite || jffs2_verify_write(c, rewrite_buf, ofs)) { 431 if (ret || retlen != towrite || jffs2_verify_write(c, rewrite_buf, ofs)) {
425 /* Argh. We tried. Really we did. */ 432 /* Argh. We tried. Really we did. */
426 printk(KERN_CRIT "Recovery of wbuf failed due to a second write error\n"); 433 pr_crit("Recovery of wbuf failed due to a second write error\n");
427 kfree(buf); 434 kfree(buf);
428 435
429 if (retlen) 436 if (retlen)
@@ -431,7 +438,7 @@ static void jffs2_wbuf_recover(struct jffs2_sb_info *c)
431 438
432 return; 439 return;
433 } 440 }
434 printk(KERN_NOTICE "Recovery of wbuf succeeded to %08x\n", ofs); 441 pr_notice("Recovery of wbuf succeeded to %08x\n", ofs);
435 442
436 c->wbuf_len = (end - start) - towrite; 443 c->wbuf_len = (end - start) - towrite;
437 c->wbuf_ofs = ofs + towrite; 444 c->wbuf_ofs = ofs + towrite;
@@ -459,8 +466,8 @@ static void jffs2_wbuf_recover(struct jffs2_sb_info *c)
459 struct jffs2_raw_node_ref **adjust_ref = NULL; 466 struct jffs2_raw_node_ref **adjust_ref = NULL;
460 struct jffs2_inode_info *f = NULL; 467 struct jffs2_inode_info *f = NULL;
461 468
462 D1(printk(KERN_DEBUG "Refiling block of %08x at %08x(%d) to %08x\n", 469 jffs2_dbg(1, "Refiling block of %08x at %08x(%d) to %08x\n",
463 rawlen, ref_offset(raw), ref_flags(raw), ofs)); 470 rawlen, ref_offset(raw), ref_flags(raw), ofs);
464 471
465 ic = jffs2_raw_ref_to_ic(raw); 472 ic = jffs2_raw_ref_to_ic(raw);
466 473
@@ -540,7 +547,8 @@ static void jffs2_wbuf_recover(struct jffs2_sb_info *c)
540 547
541 /* Fix up the original jeb now it's on the bad_list */ 548 /* Fix up the original jeb now it's on the bad_list */
542 if (first_raw == jeb->first_node) { 549 if (first_raw == jeb->first_node) {
543 D1(printk(KERN_DEBUG "Failing block at %08x is now empty. Moving to erase_pending_list\n", jeb->offset)); 550 jffs2_dbg(1, "Failing block at %08x is now empty. Moving to erase_pending_list\n",
551 jeb->offset);
544 list_move(&jeb->list, &c->erase_pending_list); 552 list_move(&jeb->list, &c->erase_pending_list);
545 c->nr_erasing_blocks++; 553 c->nr_erasing_blocks++;
546 jffs2_garbage_collect_trigger(c); 554 jffs2_garbage_collect_trigger(c);
@@ -554,7 +562,8 @@ static void jffs2_wbuf_recover(struct jffs2_sb_info *c)
554 562
555 spin_unlock(&c->erase_completion_lock); 563 spin_unlock(&c->erase_completion_lock);
556 564
557 D1(printk(KERN_DEBUG "wbuf recovery completed OK. wbuf_ofs 0x%08x, len 0x%x\n", c->wbuf_ofs, c->wbuf_len)); 565 jffs2_dbg(1, "wbuf recovery completed OK. wbuf_ofs 0x%08x, len 0x%x\n",
566 c->wbuf_ofs, c->wbuf_len);
558 567
559} 568}
560 569
@@ -579,7 +588,7 @@ static int __jffs2_flush_wbuf(struct jffs2_sb_info *c, int pad)
579 return 0; 588 return 0;
580 589
581 if (!mutex_is_locked(&c->alloc_sem)) { 590 if (!mutex_is_locked(&c->alloc_sem)) {
582 printk(KERN_CRIT "jffs2_flush_wbuf() called with alloc_sem not locked!\n"); 591 pr_crit("jffs2_flush_wbuf() called with alloc_sem not locked!\n");
583 BUG(); 592 BUG();
584 } 593 }
585 594
@@ -617,7 +626,7 @@ static int __jffs2_flush_wbuf(struct jffs2_sb_info *c, int pad)
617#ifdef BREAKME 626#ifdef BREAKME
618 static int breakme; 627 static int breakme;
619 if (breakme++ == 20) { 628 if (breakme++ == 20) {
620 printk(KERN_NOTICE "Faking write error at 0x%08x\n", c->wbuf_ofs); 629 pr_notice("Faking write error at 0x%08x\n", c->wbuf_ofs);
621 breakme = 0; 630 breakme = 0;
622 mtd_write(c->mtd, c->wbuf_ofs, c->wbuf_pagesize, &retlen, 631 mtd_write(c->mtd, c->wbuf_ofs, c->wbuf_pagesize, &retlen,
623 brokenbuf); 632 brokenbuf);
@@ -629,11 +638,11 @@ static int __jffs2_flush_wbuf(struct jffs2_sb_info *c, int pad)
629 &retlen, c->wbuf); 638 &retlen, c->wbuf);
630 639
631 if (ret) { 640 if (ret) {
632 printk(KERN_WARNING "jffs2_flush_wbuf(): Write failed with %d\n", ret); 641 pr_warn("jffs2_flush_wbuf(): Write failed with %d\n", ret);
633 goto wfail; 642 goto wfail;
634 } else if (retlen != c->wbuf_pagesize) { 643 } else if (retlen != c->wbuf_pagesize) {
635 printk(KERN_WARNING "jffs2_flush_wbuf(): Write was short: %zd instead of %d\n", 644 pr_warn("jffs2_flush_wbuf(): Write was short: %zd instead of %d\n",
636 retlen, c->wbuf_pagesize); 645 retlen, c->wbuf_pagesize);
637 ret = -EIO; 646 ret = -EIO;
638 goto wfail; 647 goto wfail;
639 } else if ((ret = jffs2_verify_write(c, c->wbuf, c->wbuf_ofs))) { 648 } else if ((ret = jffs2_verify_write(c, c->wbuf, c->wbuf_ofs))) {
@@ -647,17 +656,18 @@ static int __jffs2_flush_wbuf(struct jffs2_sb_info *c, int pad)
647 if (pad) { 656 if (pad) {
648 uint32_t waste = c->wbuf_pagesize - c->wbuf_len; 657 uint32_t waste = c->wbuf_pagesize - c->wbuf_len;
649 658
650 D1(printk(KERN_DEBUG "jffs2_flush_wbuf() adjusting free_size of %sblock at %08x\n", 659 jffs2_dbg(1, "jffs2_flush_wbuf() adjusting free_size of %sblock at %08x\n",
651 (wbuf_jeb==c->nextblock)?"next":"", wbuf_jeb->offset)); 660 (wbuf_jeb == c->nextblock) ? "next" : "",
661 wbuf_jeb->offset);
652 662
653 /* wbuf_pagesize - wbuf_len is the amount of space that's to be 663 /* wbuf_pagesize - wbuf_len is the amount of space that's to be
654 padded. If there is less free space in the block than that, 664 padded. If there is less free space in the block than that,
655 something screwed up */ 665 something screwed up */
656 if (wbuf_jeb->free_size < waste) { 666 if (wbuf_jeb->free_size < waste) {
657 printk(KERN_CRIT "jffs2_flush_wbuf(): Accounting error. wbuf at 0x%08x has 0x%03x bytes, 0x%03x left.\n", 667 pr_crit("jffs2_flush_wbuf(): Accounting error. wbuf at 0x%08x has 0x%03x bytes, 0x%03x left.\n",
658 c->wbuf_ofs, c->wbuf_len, waste); 668 c->wbuf_ofs, c->wbuf_len, waste);
659 printk(KERN_CRIT "jffs2_flush_wbuf(): But free_size for block at 0x%08x is only 0x%08x\n", 669 pr_crit("jffs2_flush_wbuf(): But free_size for block at 0x%08x is only 0x%08x\n",
660 wbuf_jeb->offset, wbuf_jeb->free_size); 670 wbuf_jeb->offset, wbuf_jeb->free_size);
661 BUG(); 671 BUG();
662 } 672 }
663 673
@@ -694,14 +704,14 @@ int jffs2_flush_wbuf_gc(struct jffs2_sb_info *c, uint32_t ino)
694 uint32_t old_wbuf_len; 704 uint32_t old_wbuf_len;
695 int ret = 0; 705 int ret = 0;
696 706
697 D1(printk(KERN_DEBUG "jffs2_flush_wbuf_gc() called for ino #%u...\n", ino)); 707 jffs2_dbg(1, "jffs2_flush_wbuf_gc() called for ino #%u...\n", ino);
698 708
699 if (!c->wbuf) 709 if (!c->wbuf)
700 return 0; 710 return 0;
701 711
702 mutex_lock(&c->alloc_sem); 712 mutex_lock(&c->alloc_sem);
703 if (!jffs2_wbuf_pending_for_ino(c, ino)) { 713 if (!jffs2_wbuf_pending_for_ino(c, ino)) {
704 D1(printk(KERN_DEBUG "Ino #%d not pending in wbuf. Returning\n", ino)); 714 jffs2_dbg(1, "Ino #%d not pending in wbuf. Returning\n", ino);
705 mutex_unlock(&c->alloc_sem); 715 mutex_unlock(&c->alloc_sem);
706 return 0; 716 return 0;
707 } 717 }
@@ -711,7 +721,8 @@ int jffs2_flush_wbuf_gc(struct jffs2_sb_info *c, uint32_t ino)
711 721
712 if (c->unchecked_size) { 722 if (c->unchecked_size) {
713 /* GC won't make any progress for a while */ 723 /* GC won't make any progress for a while */
714 D1(printk(KERN_DEBUG "jffs2_flush_wbuf_gc() padding. Not finished checking\n")); 724 jffs2_dbg(1, "%s(): padding. Not finished checking\n",
725 __func__);
715 down_write(&c->wbuf_sem); 726 down_write(&c->wbuf_sem);
716 ret = __jffs2_flush_wbuf(c, PAD_ACCOUNTING); 727 ret = __jffs2_flush_wbuf(c, PAD_ACCOUNTING);
717 /* retry flushing wbuf in case jffs2_wbuf_recover 728 /* retry flushing wbuf in case jffs2_wbuf_recover
@@ -724,7 +735,7 @@ int jffs2_flush_wbuf_gc(struct jffs2_sb_info *c, uint32_t ino)
724 735
725 mutex_unlock(&c->alloc_sem); 736 mutex_unlock(&c->alloc_sem);
726 737
727 D1(printk(KERN_DEBUG "jffs2_flush_wbuf_gc() calls gc pass\n")); 738 jffs2_dbg(1, "%s(): calls gc pass\n", __func__);
728 739
729 ret = jffs2_garbage_collect_pass(c); 740 ret = jffs2_garbage_collect_pass(c);
730 if (ret) { 741 if (ret) {
@@ -742,7 +753,7 @@ int jffs2_flush_wbuf_gc(struct jffs2_sb_info *c, uint32_t ino)
742 mutex_lock(&c->alloc_sem); 753 mutex_lock(&c->alloc_sem);
743 } 754 }
744 755
745 D1(printk(KERN_DEBUG "jffs2_flush_wbuf_gc() ends...\n")); 756 jffs2_dbg(1, "%s(): ends...\n", __func__);
746 757
747 mutex_unlock(&c->alloc_sem); 758 mutex_unlock(&c->alloc_sem);
748 return ret; 759 return ret;
@@ -811,9 +822,8 @@ int jffs2_flash_writev(struct jffs2_sb_info *c, const struct kvec *invecs,
811 if (SECTOR_ADDR(to) != SECTOR_ADDR(c->wbuf_ofs)) { 822 if (SECTOR_ADDR(to) != SECTOR_ADDR(c->wbuf_ofs)) {
812 /* It's a write to a new block */ 823 /* It's a write to a new block */
813 if (c->wbuf_len) { 824 if (c->wbuf_len) {
814 D1(printk(KERN_DEBUG "jffs2_flash_writev() to 0x%lx " 825 jffs2_dbg(1, "%s(): to 0x%lx causes flush of wbuf at 0x%08x\n",
815 "causes flush of wbuf at 0x%08x\n", 826 __func__, (unsigned long)to, c->wbuf_ofs);
816 (unsigned long)to, c->wbuf_ofs));
817 ret = __jffs2_flush_wbuf(c, PAD_NOACCOUNT); 827 ret = __jffs2_flush_wbuf(c, PAD_NOACCOUNT);
818 if (ret) 828 if (ret)
819 goto outerr; 829 goto outerr;
@@ -825,11 +835,11 @@ int jffs2_flash_writev(struct jffs2_sb_info *c, const struct kvec *invecs,
825 835
826 if (to != PAD(c->wbuf_ofs + c->wbuf_len)) { 836 if (to != PAD(c->wbuf_ofs + c->wbuf_len)) {
827 /* We're not writing immediately after the writebuffer. Bad. */ 837 /* We're not writing immediately after the writebuffer. Bad. */
828 printk(KERN_CRIT "jffs2_flash_writev(): Non-contiguous write " 838 pr_crit("%s(): Non-contiguous write to %08lx\n",
829 "to %08lx\n", (unsigned long)to); 839 __func__, (unsigned long)to);
830 if (c->wbuf_len) 840 if (c->wbuf_len)
831 printk(KERN_CRIT "wbuf was previously %08x-%08x\n", 841 pr_crit("wbuf was previously %08x-%08x\n",
832 c->wbuf_ofs, c->wbuf_ofs+c->wbuf_len); 842 c->wbuf_ofs, c->wbuf_ofs + c->wbuf_len);
833 BUG(); 843 BUG();
834 } 844 }
835 845
@@ -957,8 +967,8 @@ int jffs2_flash_read(struct jffs2_sb_info *c, loff_t ofs, size_t len, size_t *re
957 967
958 if ( (ret == -EBADMSG || ret == -EUCLEAN) && (*retlen == len) ) { 968 if ( (ret == -EBADMSG || ret == -EUCLEAN) && (*retlen == len) ) {
959 if (ret == -EBADMSG) 969 if (ret == -EBADMSG)
960 printk(KERN_WARNING "mtd->read(0x%zx bytes from 0x%llx)" 970 pr_warn("mtd->read(0x%zx bytes from 0x%llx) returned ECC error\n",
961 " returned ECC error\n", len, ofs); 971 len, ofs);
962 /* 972 /*
963 * We have the raw data without ECC correction in the buffer, 973 * We have the raw data without ECC correction in the buffer,
964 * maybe we are lucky and all data or parts are correct. We 974 * maybe we are lucky and all data or parts are correct. We
@@ -1034,9 +1044,8 @@ int jffs2_check_oob_empty(struct jffs2_sb_info *c,
1034 1044
1035 ret = mtd_read_oob(c->mtd, jeb->offset, &ops); 1045 ret = mtd_read_oob(c->mtd, jeb->offset, &ops);
1036 if (ret || ops.oobretlen != ops.ooblen) { 1046 if (ret || ops.oobretlen != ops.ooblen) {
1037 printk(KERN_ERR "cannot read OOB for EB at %08x, requested %zd" 1047 pr_err("cannot read OOB for EB at %08x, requested %zd bytes, read %zd bytes, error %d\n",
1038 " bytes, read %zd bytes, error %d\n", 1048 jeb->offset, ops.ooblen, ops.oobretlen, ret);
1039 jeb->offset, ops.ooblen, ops.oobretlen, ret);
1040 if (!ret) 1049 if (!ret)
1041 ret = -EIO; 1050 ret = -EIO;
1042 return ret; 1051 return ret;
@@ -1048,8 +1057,8 @@ int jffs2_check_oob_empty(struct jffs2_sb_info *c,
1048 continue; 1057 continue;
1049 1058
1050 if (ops.oobbuf[i] != 0xFF) { 1059 if (ops.oobbuf[i] != 0xFF) {
1051 D2(printk(KERN_DEBUG "Found %02x at %x in OOB for " 1060 jffs2_dbg(2, "Found %02x at %x in OOB for "
1052 "%08x\n", ops.oobbuf[i], i, jeb->offset)); 1061 "%08x\n", ops.oobbuf[i], i, jeb->offset);
1053 return 1; 1062 return 1;
1054 } 1063 }
1055 } 1064 }
@@ -1077,9 +1086,8 @@ int jffs2_check_nand_cleanmarker(struct jffs2_sb_info *c,
1077 1086
1078 ret = mtd_read_oob(c->mtd, jeb->offset, &ops); 1087 ret = mtd_read_oob(c->mtd, jeb->offset, &ops);
1079 if (ret || ops.oobretlen != ops.ooblen) { 1088 if (ret || ops.oobretlen != ops.ooblen) {
1080 printk(KERN_ERR "cannot read OOB for EB at %08x, requested %zd" 1089 pr_err("cannot read OOB for EB at %08x, requested %zd bytes, read %zd bytes, error %d\n",
1081 " bytes, read %zd bytes, error %d\n", 1090 jeb->offset, ops.ooblen, ops.oobretlen, ret);
1082 jeb->offset, ops.ooblen, ops.oobretlen, ret);
1083 if (!ret) 1091 if (!ret)
1084 ret = -EIO; 1092 ret = -EIO;
1085 return ret; 1093 return ret;
@@ -1103,9 +1111,8 @@ int jffs2_write_nand_cleanmarker(struct jffs2_sb_info *c,
1103 1111
1104 ret = mtd_write_oob(c->mtd, jeb->offset, &ops); 1112 ret = mtd_write_oob(c->mtd, jeb->offset, &ops);
1105 if (ret || ops.oobretlen != ops.ooblen) { 1113 if (ret || ops.oobretlen != ops.ooblen) {
1106 printk(KERN_ERR "cannot write OOB for EB at %08x, requested %zd" 1114 pr_err("cannot write OOB for EB at %08x, requested %zd bytes, read %zd bytes, error %d\n",
1107 " bytes, read %zd bytes, error %d\n", 1115 jeb->offset, ops.ooblen, ops.oobretlen, ret);
1108 jeb->offset, ops.ooblen, ops.oobretlen, ret);
1109 if (!ret) 1116 if (!ret)
1110 ret = -EIO; 1117 ret = -EIO;
1111 return ret; 1118 return ret;
@@ -1130,11 +1137,12 @@ int jffs2_write_nand_badblock(struct jffs2_sb_info *c, struct jffs2_eraseblock *
1130 if( ++jeb->bad_count < MAX_ERASE_FAILURES) 1137 if( ++jeb->bad_count < MAX_ERASE_FAILURES)
1131 return 0; 1138 return 0;
1132 1139
1133 printk(KERN_WARNING "JFFS2: marking eraseblock at %08x\n as bad", bad_offset); 1140 pr_warn("marking eraseblock at %08x as bad\n", bad_offset);
1134 ret = mtd_block_markbad(c->mtd, bad_offset); 1141 ret = mtd_block_markbad(c->mtd, bad_offset);
1135 1142
1136 if (ret) { 1143 if (ret) {
1137 D1(printk(KERN_WARNING "jffs2_write_nand_badblock(): Write failed for block at %08x: error %d\n", jeb->offset, ret)); 1144 jffs2_dbg(1, "%s(): Write failed for block at %08x: error %d\n",
1145 __func__, jeb->offset, ret);
1138 return ret; 1146 return ret;
1139 } 1147 }
1140 return 1; 1148 return 1;
@@ -1151,11 +1159,11 @@ int jffs2_nand_flash_setup(struct jffs2_sb_info *c)
1151 c->cleanmarker_size = 0; 1159 c->cleanmarker_size = 0;
1152 1160
1153 if (!oinfo || oinfo->oobavail == 0) { 1161 if (!oinfo || oinfo->oobavail == 0) {
1154 printk(KERN_ERR "inconsistent device description\n"); 1162 pr_err("inconsistent device description\n");
1155 return -EINVAL; 1163 return -EINVAL;
1156 } 1164 }
1157 1165
1158 D1(printk(KERN_DEBUG "JFFS2 using OOB on NAND\n")); 1166 jffs2_dbg(1, "using OOB on NAND\n");
1159 1167
1160 c->oobavail = oinfo->oobavail; 1168 c->oobavail = oinfo->oobavail;
1161 1169
@@ -1222,7 +1230,7 @@ int jffs2_dataflash_setup(struct jffs2_sb_info *c) {
1222 1230
1223 if ((c->flash_size % c->sector_size) != 0) { 1231 if ((c->flash_size % c->sector_size) != 0) {
1224 c->flash_size = (c->flash_size / c->sector_size) * c->sector_size; 1232 c->flash_size = (c->flash_size / c->sector_size) * c->sector_size;
1225 printk(KERN_WARNING "JFFS2 flash size adjusted to %dKiB\n", c->flash_size); 1233 pr_warn("flash size adjusted to %dKiB\n", c->flash_size);
1226 }; 1234 };
1227 1235
1228 c->wbuf_ofs = 0xFFFFFFFF; 1236 c->wbuf_ofs = 0xFFFFFFFF;
@@ -1239,7 +1247,8 @@ int jffs2_dataflash_setup(struct jffs2_sb_info *c) {
1239 } 1247 }
1240#endif 1248#endif
1241 1249
1242 printk(KERN_INFO "JFFS2 write-buffering enabled buffer (%d) erasesize (%d)\n", c->wbuf_pagesize, c->sector_size); 1250 pr_info("write-buffering enabled buffer (%d) erasesize (%d)\n",
1251 c->wbuf_pagesize, c->sector_size);
1243 1252
1244 return 0; 1253 return 0;
1245} 1254}
@@ -1297,7 +1306,8 @@ int jffs2_ubivol_setup(struct jffs2_sb_info *c) {
1297 if (!c->wbuf) 1306 if (!c->wbuf)
1298 return -ENOMEM; 1307 return -ENOMEM;
1299 1308
1300 printk(KERN_INFO "JFFS2 write-buffering enabled buffer (%d) erasesize (%d)\n", c->wbuf_pagesize, c->sector_size); 1309 pr_info("write-buffering enabled buffer (%d) erasesize (%d)\n",
1310 c->wbuf_pagesize, c->sector_size);
1301 1311
1302 return 0; 1312 return 0;
1303} 1313}
diff --git a/fs/jffs2/write.c b/fs/jffs2/write.c
index 30d175b6d290..b634de4c8101 100644
--- a/fs/jffs2/write.c
+++ b/fs/jffs2/write.c
@@ -9,6 +9,8 @@
9 * 9 *
10 */ 10 */
11 11
12#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13
12#include <linux/kernel.h> 14#include <linux/kernel.h>
13#include <linux/fs.h> 15#include <linux/fs.h>
14#include <linux/crc32.h> 16#include <linux/crc32.h>
@@ -36,7 +38,7 @@ int jffs2_do_new_inode(struct jffs2_sb_info *c, struct jffs2_inode_info *f,
36 f->inocache->state = INO_STATE_PRESENT; 38 f->inocache->state = INO_STATE_PRESENT;
37 39
38 jffs2_add_ino_cache(c, f->inocache); 40 jffs2_add_ino_cache(c, f->inocache);
39 D1(printk(KERN_DEBUG "jffs2_do_new_inode(): Assigned ino# %d\n", f->inocache->ino)); 41 jffs2_dbg(1, "%s(): Assigned ino# %d\n", __func__, f->inocache->ino);
40 ri->ino = cpu_to_je32(f->inocache->ino); 42 ri->ino = cpu_to_je32(f->inocache->ino);
41 43
42 ri->magic = cpu_to_je16(JFFS2_MAGIC_BITMASK); 44 ri->magic = cpu_to_je16(JFFS2_MAGIC_BITMASK);
@@ -68,7 +70,7 @@ struct jffs2_full_dnode *jffs2_write_dnode(struct jffs2_sb_info *c, struct jffs2
68 unsigned long cnt = 2; 70 unsigned long cnt = 2;
69 71
70 D1(if(je32_to_cpu(ri->hdr_crc) != crc32(0, ri, sizeof(struct jffs2_unknown_node)-4)) { 72 D1(if(je32_to_cpu(ri->hdr_crc) != crc32(0, ri, sizeof(struct jffs2_unknown_node)-4)) {
71 printk(KERN_CRIT "Eep. CRC not correct in jffs2_write_dnode()\n"); 73 pr_crit("Eep. CRC not correct in jffs2_write_dnode()\n");
72 BUG(); 74 BUG();
73 } 75 }
74 ); 76 );
@@ -78,7 +80,9 @@ struct jffs2_full_dnode *jffs2_write_dnode(struct jffs2_sb_info *c, struct jffs2
78 vecs[1].iov_len = datalen; 80 vecs[1].iov_len = datalen;
79 81
80 if (je32_to_cpu(ri->totlen) != sizeof(*ri) + datalen) { 82 if (je32_to_cpu(ri->totlen) != sizeof(*ri) + datalen) {
81 printk(KERN_WARNING "jffs2_write_dnode: ri->totlen (0x%08x) != sizeof(*ri) (0x%08zx) + datalen (0x%08x)\n", je32_to_cpu(ri->totlen), sizeof(*ri), datalen); 83 pr_warn("%s(): ri->totlen (0x%08x) != sizeof(*ri) (0x%08zx) + datalen (0x%08x)\n",
84 __func__, je32_to_cpu(ri->totlen),
85 sizeof(*ri), datalen);
82 } 86 }
83 87
84 fn = jffs2_alloc_full_dnode(); 88 fn = jffs2_alloc_full_dnode();
@@ -95,9 +99,9 @@ struct jffs2_full_dnode *jffs2_write_dnode(struct jffs2_sb_info *c, struct jffs2
95 99
96 if ((alloc_mode!=ALLOC_GC) && (je32_to_cpu(ri->version) < f->highest_version)) { 100 if ((alloc_mode!=ALLOC_GC) && (je32_to_cpu(ri->version) < f->highest_version)) {
97 BUG_ON(!retried); 101 BUG_ON(!retried);
98 D1(printk(KERN_DEBUG "jffs2_write_dnode : dnode_version %d, " 102 jffs2_dbg(1, "%s(): dnode_version %d, highest version %d -> updating dnode\n",
99 "highest version %d -> updating dnode\n", 103 __func__,
100 je32_to_cpu(ri->version), f->highest_version)); 104 je32_to_cpu(ri->version), f->highest_version);
101 ri->version = cpu_to_je32(++f->highest_version); 105 ri->version = cpu_to_je32(++f->highest_version);
102 ri->node_crc = cpu_to_je32(crc32(0, ri, sizeof(*ri)-8)); 106 ri->node_crc = cpu_to_je32(crc32(0, ri, sizeof(*ri)-8));
103 } 107 }
@@ -106,8 +110,8 @@ struct jffs2_full_dnode *jffs2_write_dnode(struct jffs2_sb_info *c, struct jffs2
106 (alloc_mode==ALLOC_GC)?0:f->inocache->ino); 110 (alloc_mode==ALLOC_GC)?0:f->inocache->ino);
107 111
108 if (ret || (retlen != sizeof(*ri) + datalen)) { 112 if (ret || (retlen != sizeof(*ri) + datalen)) {
109 printk(KERN_NOTICE "Write of %zd bytes at 0x%08x failed. returned %d, retlen %zd\n", 113 pr_notice("Write of %zd bytes at 0x%08x failed. returned %d, retlen %zd\n",
110 sizeof(*ri)+datalen, flash_ofs, ret, retlen); 114 sizeof(*ri) + datalen, flash_ofs, ret, retlen);
111 115
112 /* Mark the space as dirtied */ 116 /* Mark the space as dirtied */
113 if (retlen) { 117 if (retlen) {
@@ -118,7 +122,8 @@ struct jffs2_full_dnode *jffs2_write_dnode(struct jffs2_sb_info *c, struct jffs2
118 this node */ 122 this node */
119 jffs2_add_physical_node_ref(c, flash_ofs | REF_OBSOLETE, PAD(sizeof(*ri)+datalen), NULL); 123 jffs2_add_physical_node_ref(c, flash_ofs | REF_OBSOLETE, PAD(sizeof(*ri)+datalen), NULL);
120 } else { 124 } else {
121 printk(KERN_NOTICE "Not marking the space at 0x%08x as dirty because the flash driver returned retlen zero\n", flash_ofs); 125 pr_notice("Not marking the space at 0x%08x as dirty because the flash driver returned retlen zero\n",
126 flash_ofs);
122 } 127 }
123 if (!retried && alloc_mode != ALLOC_NORETRY) { 128 if (!retried && alloc_mode != ALLOC_NORETRY) {
124 /* Try to reallocate space and retry */ 129 /* Try to reallocate space and retry */
@@ -127,7 +132,7 @@ struct jffs2_full_dnode *jffs2_write_dnode(struct jffs2_sb_info *c, struct jffs2
127 132
128 retried = 1; 133 retried = 1;
129 134
130 D1(printk(KERN_DEBUG "Retrying failed write.\n")); 135 jffs2_dbg(1, "Retrying failed write.\n");
131 136
132 jffs2_dbg_acct_sanity_check(c,jeb); 137 jffs2_dbg_acct_sanity_check(c,jeb);
133 jffs2_dbg_acct_paranoia_check(c, jeb); 138 jffs2_dbg_acct_paranoia_check(c, jeb);
@@ -147,14 +152,16 @@ struct jffs2_full_dnode *jffs2_write_dnode(struct jffs2_sb_info *c, struct jffs2
147 152
148 if (!ret) { 153 if (!ret) {
149 flash_ofs = write_ofs(c); 154 flash_ofs = write_ofs(c);
150 D1(printk(KERN_DEBUG "Allocated space at 0x%08x to retry failed write.\n", flash_ofs)); 155 jffs2_dbg(1, "Allocated space at 0x%08x to retry failed write.\n",
156 flash_ofs);
151 157
152 jffs2_dbg_acct_sanity_check(c,jeb); 158 jffs2_dbg_acct_sanity_check(c,jeb);
153 jffs2_dbg_acct_paranoia_check(c, jeb); 159 jffs2_dbg_acct_paranoia_check(c, jeb);
154 160
155 goto retry; 161 goto retry;
156 } 162 }
157 D1(printk(KERN_DEBUG "Failed to allocate space to retry failed write: %d!\n", ret)); 163 jffs2_dbg(1, "Failed to allocate space to retry failed write: %d!\n",
164 ret);
158 } 165 }
159 /* Release the full_dnode which is now useless, and return */ 166 /* Release the full_dnode which is now useless, and return */
160 jffs2_free_full_dnode(fn); 167 jffs2_free_full_dnode(fn);
@@ -183,10 +190,10 @@ struct jffs2_full_dnode *jffs2_write_dnode(struct jffs2_sb_info *c, struct jffs2
183 fn->size = je32_to_cpu(ri->dsize); 190 fn->size = je32_to_cpu(ri->dsize);
184 fn->frags = 0; 191 fn->frags = 0;
185 192
186 D1(printk(KERN_DEBUG "jffs2_write_dnode wrote node at 0x%08x(%d) with dsize 0x%x, csize 0x%x, node_crc 0x%08x, data_crc 0x%08x, totlen 0x%08x\n", 193 jffs2_dbg(1, "jffs2_write_dnode wrote node at 0x%08x(%d) with dsize 0x%x, csize 0x%x, node_crc 0x%08x, data_crc 0x%08x, totlen 0x%08x\n",
187 flash_ofs & ~3, flash_ofs & 3, je32_to_cpu(ri->dsize), 194 flash_ofs & ~3, flash_ofs & 3, je32_to_cpu(ri->dsize),
188 je32_to_cpu(ri->csize), je32_to_cpu(ri->node_crc), 195 je32_to_cpu(ri->csize), je32_to_cpu(ri->node_crc),
189 je32_to_cpu(ri->data_crc), je32_to_cpu(ri->totlen))); 196 je32_to_cpu(ri->data_crc), je32_to_cpu(ri->totlen));
190 197
191 if (retried) { 198 if (retried) {
192 jffs2_dbg_acct_sanity_check(c,NULL); 199 jffs2_dbg_acct_sanity_check(c,NULL);
@@ -206,22 +213,23 @@ struct jffs2_full_dirent *jffs2_write_dirent(struct jffs2_sb_info *c, struct jff
206 int retried = 0; 213 int retried = 0;
207 int ret; 214 int ret;
208 215
209 D1(printk(KERN_DEBUG "jffs2_write_dirent(ino #%u, name at *0x%p \"%s\"->ino #%u, name_crc 0x%08x)\n", 216 jffs2_dbg(1, "%s(ino #%u, name at *0x%p \"%s\"->ino #%u, name_crc 0x%08x)\n",
217 __func__,
210 je32_to_cpu(rd->pino), name, name, je32_to_cpu(rd->ino), 218 je32_to_cpu(rd->pino), name, name, je32_to_cpu(rd->ino),
211 je32_to_cpu(rd->name_crc))); 219 je32_to_cpu(rd->name_crc));
212 220
213 D1(if(je32_to_cpu(rd->hdr_crc) != crc32(0, rd, sizeof(struct jffs2_unknown_node)-4)) { 221 D1(if(je32_to_cpu(rd->hdr_crc) != crc32(0, rd, sizeof(struct jffs2_unknown_node)-4)) {
214 printk(KERN_CRIT "Eep. CRC not correct in jffs2_write_dirent()\n"); 222 pr_crit("Eep. CRC not correct in jffs2_write_dirent()\n");
215 BUG(); 223 BUG();
216 }); 224 });
217 225
218 if (strnlen(name, namelen) != namelen) { 226 if (strnlen(name, namelen) != namelen) {
219 /* This should never happen, but seems to have done on at least one 227 /* This should never happen, but seems to have done on at least one
220 occasion: https://dev.laptop.org/ticket/4184 */ 228 occasion: https://dev.laptop.org/ticket/4184 */
221 printk(KERN_CRIT "Error in jffs2_write_dirent() -- name contains zero bytes!\n"); 229 pr_crit("Error in jffs2_write_dirent() -- name contains zero bytes!\n");
222 printk(KERN_CRIT "Directory inode #%u, name at *0x%p \"%s\"->ino #%u, name_crc 0x%08x\n", 230 pr_crit("Directory inode #%u, name at *0x%p \"%s\"->ino #%u, name_crc 0x%08x\n",
223 je32_to_cpu(rd->pino), name, name, je32_to_cpu(rd->ino), 231 je32_to_cpu(rd->pino), name, name, je32_to_cpu(rd->ino),
224 je32_to_cpu(rd->name_crc)); 232 je32_to_cpu(rd->name_crc));
225 WARN_ON(1); 233 WARN_ON(1);
226 return ERR_PTR(-EIO); 234 return ERR_PTR(-EIO);
227 } 235 }
@@ -249,9 +257,9 @@ struct jffs2_full_dirent *jffs2_write_dirent(struct jffs2_sb_info *c, struct jff
249 257
250 if ((alloc_mode!=ALLOC_GC) && (je32_to_cpu(rd->version) < f->highest_version)) { 258 if ((alloc_mode!=ALLOC_GC) && (je32_to_cpu(rd->version) < f->highest_version)) {
251 BUG_ON(!retried); 259 BUG_ON(!retried);
252 D1(printk(KERN_DEBUG "jffs2_write_dirent : dirent_version %d, " 260 jffs2_dbg(1, "%s(): dirent_version %d, highest version %d -> updating dirent\n",
253 "highest version %d -> updating dirent\n", 261 __func__,
254 je32_to_cpu(rd->version), f->highest_version)); 262 je32_to_cpu(rd->version), f->highest_version);
255 rd->version = cpu_to_je32(++f->highest_version); 263 rd->version = cpu_to_je32(++f->highest_version);
256 fd->version = je32_to_cpu(rd->version); 264 fd->version = je32_to_cpu(rd->version);
257 rd->node_crc = cpu_to_je32(crc32(0, rd, sizeof(*rd)-8)); 265 rd->node_crc = cpu_to_je32(crc32(0, rd, sizeof(*rd)-8));
@@ -260,13 +268,14 @@ struct jffs2_full_dirent *jffs2_write_dirent(struct jffs2_sb_info *c, struct jff
260 ret = jffs2_flash_writev(c, vecs, 2, flash_ofs, &retlen, 268 ret = jffs2_flash_writev(c, vecs, 2, flash_ofs, &retlen,
261 (alloc_mode==ALLOC_GC)?0:je32_to_cpu(rd->pino)); 269 (alloc_mode==ALLOC_GC)?0:je32_to_cpu(rd->pino));
262 if (ret || (retlen != sizeof(*rd) + namelen)) { 270 if (ret || (retlen != sizeof(*rd) + namelen)) {
263 printk(KERN_NOTICE "Write of %zd bytes at 0x%08x failed. returned %d, retlen %zd\n", 271 pr_notice("Write of %zd bytes at 0x%08x failed. returned %d, retlen %zd\n",
264 sizeof(*rd)+namelen, flash_ofs, ret, retlen); 272 sizeof(*rd) + namelen, flash_ofs, ret, retlen);
265 /* Mark the space as dirtied */ 273 /* Mark the space as dirtied */
266 if (retlen) { 274 if (retlen) {
267 jffs2_add_physical_node_ref(c, flash_ofs | REF_OBSOLETE, PAD(sizeof(*rd)+namelen), NULL); 275 jffs2_add_physical_node_ref(c, flash_ofs | REF_OBSOLETE, PAD(sizeof(*rd)+namelen), NULL);
268 } else { 276 } else {
269 printk(KERN_NOTICE "Not marking the space at 0x%08x as dirty because the flash driver returned retlen zero\n", flash_ofs); 277 pr_notice("Not marking the space at 0x%08x as dirty because the flash driver returned retlen zero\n",
278 flash_ofs);
270 } 279 }
271 if (!retried) { 280 if (!retried) {
272 /* Try to reallocate space and retry */ 281 /* Try to reallocate space and retry */
@@ -275,7 +284,7 @@ struct jffs2_full_dirent *jffs2_write_dirent(struct jffs2_sb_info *c, struct jff
275 284
276 retried = 1; 285 retried = 1;
277 286
278 D1(printk(KERN_DEBUG "Retrying failed write.\n")); 287 jffs2_dbg(1, "Retrying failed write.\n");
279 288
280 jffs2_dbg_acct_sanity_check(c,jeb); 289 jffs2_dbg_acct_sanity_check(c,jeb);
281 jffs2_dbg_acct_paranoia_check(c, jeb); 290 jffs2_dbg_acct_paranoia_check(c, jeb);
@@ -295,12 +304,14 @@ struct jffs2_full_dirent *jffs2_write_dirent(struct jffs2_sb_info *c, struct jff
295 304
296 if (!ret) { 305 if (!ret) {
297 flash_ofs = write_ofs(c); 306 flash_ofs = write_ofs(c);
298 D1(printk(KERN_DEBUG "Allocated space at 0x%08x to retry failed write.\n", flash_ofs)); 307 jffs2_dbg(1, "Allocated space at 0x%08x to retry failed write\n",
308 flash_ofs);
299 jffs2_dbg_acct_sanity_check(c,jeb); 309 jffs2_dbg_acct_sanity_check(c,jeb);
300 jffs2_dbg_acct_paranoia_check(c, jeb); 310 jffs2_dbg_acct_paranoia_check(c, jeb);
301 goto retry; 311 goto retry;
302 } 312 }
303 D1(printk(KERN_DEBUG "Failed to allocate space to retry failed write: %d!\n", ret)); 313 jffs2_dbg(1, "Failed to allocate space to retry failed write: %d!\n",
314 ret);
304 } 315 }
305 /* Release the full_dnode which is now useless, and return */ 316 /* Release the full_dnode which is now useless, and return */
306 jffs2_free_full_dirent(fd); 317 jffs2_free_full_dirent(fd);
@@ -333,8 +344,8 @@ int jffs2_write_inode_range(struct jffs2_sb_info *c, struct jffs2_inode_info *f,
333 int ret = 0; 344 int ret = 0;
334 uint32_t writtenlen = 0; 345 uint32_t writtenlen = 0;
335 346
336 D1(printk(KERN_DEBUG "jffs2_write_inode_range(): Ino #%u, ofs 0x%x, len 0x%x\n", 347 jffs2_dbg(1, "%s(): Ino #%u, ofs 0x%x, len 0x%x\n",
337 f->inocache->ino, offset, writelen)); 348 __func__, f->inocache->ino, offset, writelen);
338 349
339 while(writelen) { 350 while(writelen) {
340 struct jffs2_full_dnode *fn; 351 struct jffs2_full_dnode *fn;
@@ -345,12 +356,13 @@ int jffs2_write_inode_range(struct jffs2_sb_info *c, struct jffs2_inode_info *f,
345 int retried = 0; 356 int retried = 0;
346 357
347 retry: 358 retry:
348 D2(printk(KERN_DEBUG "jffs2_commit_write() loop: 0x%x to write to 0x%x\n", writelen, offset)); 359 jffs2_dbg(2, "jffs2_commit_write() loop: 0x%x to write to 0x%x\n",
360 writelen, offset);
349 361
350 ret = jffs2_reserve_space(c, sizeof(*ri) + JFFS2_MIN_DATA_LEN, 362 ret = jffs2_reserve_space(c, sizeof(*ri) + JFFS2_MIN_DATA_LEN,
351 &alloclen, ALLOC_NORMAL, JFFS2_SUMMARY_INODE_SIZE); 363 &alloclen, ALLOC_NORMAL, JFFS2_SUMMARY_INODE_SIZE);
352 if (ret) { 364 if (ret) {
353 D1(printk(KERN_DEBUG "jffs2_reserve_space returned %d\n", ret)); 365 jffs2_dbg(1, "jffs2_reserve_space returned %d\n", ret);
354 break; 366 break;
355 } 367 }
356 mutex_lock(&f->sem); 368 mutex_lock(&f->sem);
@@ -386,7 +398,7 @@ int jffs2_write_inode_range(struct jffs2_sb_info *c, struct jffs2_inode_info *f,
386 if (!retried) { 398 if (!retried) {
387 /* Write error to be retried */ 399 /* Write error to be retried */
388 retried = 1; 400 retried = 1;
389 D1(printk(KERN_DEBUG "Retrying node write in jffs2_write_inode_range()\n")); 401 jffs2_dbg(1, "Retrying node write in jffs2_write_inode_range()\n");
390 goto retry; 402 goto retry;
391 } 403 }
392 break; 404 break;
@@ -399,7 +411,8 @@ int jffs2_write_inode_range(struct jffs2_sb_info *c, struct jffs2_inode_info *f,
399 } 411 }
400 if (ret) { 412 if (ret) {
401 /* Eep */ 413 /* Eep */
402 D1(printk(KERN_DEBUG "Eep. add_full_dnode_to_inode() failed in commit_write, returned %d\n", ret)); 414 jffs2_dbg(1, "Eep. add_full_dnode_to_inode() failed in commit_write, returned %d\n",
415 ret);
403 jffs2_mark_node_obsolete(c, fn->raw); 416 jffs2_mark_node_obsolete(c, fn->raw);
404 jffs2_free_full_dnode(fn); 417 jffs2_free_full_dnode(fn);
405 418
@@ -410,11 +423,11 @@ int jffs2_write_inode_range(struct jffs2_sb_info *c, struct jffs2_inode_info *f,
410 mutex_unlock(&f->sem); 423 mutex_unlock(&f->sem);
411 jffs2_complete_reservation(c); 424 jffs2_complete_reservation(c);
412 if (!datalen) { 425 if (!datalen) {
413 printk(KERN_WARNING "Eep. We didn't actually write any data in jffs2_write_inode_range()\n"); 426 pr_warn("Eep. We didn't actually write any data in jffs2_write_inode_range()\n");
414 ret = -EIO; 427 ret = -EIO;
415 break; 428 break;
416 } 429 }
417 D1(printk(KERN_DEBUG "increasing writtenlen by %d\n", datalen)); 430 jffs2_dbg(1, "increasing writtenlen by %d\n", datalen);
418 writtenlen += datalen; 431 writtenlen += datalen;
419 offset += datalen; 432 offset += datalen;
420 writelen -= datalen; 433 writelen -= datalen;
@@ -439,7 +452,7 @@ int jffs2_do_create(struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f,
439 */ 452 */
440 ret = jffs2_reserve_space(c, sizeof(*ri), &alloclen, ALLOC_NORMAL, 453 ret = jffs2_reserve_space(c, sizeof(*ri), &alloclen, ALLOC_NORMAL,
441 JFFS2_SUMMARY_INODE_SIZE); 454 JFFS2_SUMMARY_INODE_SIZE);
442 D1(printk(KERN_DEBUG "jffs2_do_create(): reserved 0x%x bytes\n", alloclen)); 455 jffs2_dbg(1, "%s(): reserved 0x%x bytes\n", __func__, alloclen);
443 if (ret) 456 if (ret)
444 return ret; 457 return ret;
445 458
@@ -450,11 +463,11 @@ int jffs2_do_create(struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f,
450 463
451 fn = jffs2_write_dnode(c, f, ri, NULL, 0, ALLOC_NORMAL); 464 fn = jffs2_write_dnode(c, f, ri, NULL, 0, ALLOC_NORMAL);
452 465
453 D1(printk(KERN_DEBUG "jffs2_do_create created file with mode 0x%x\n", 466 jffs2_dbg(1, "jffs2_do_create created file with mode 0x%x\n",
454 jemode_to_cpu(ri->mode))); 467 jemode_to_cpu(ri->mode));
455 468
456 if (IS_ERR(fn)) { 469 if (IS_ERR(fn)) {
457 D1(printk(KERN_DEBUG "jffs2_write_dnode() failed\n")); 470 jffs2_dbg(1, "jffs2_write_dnode() failed\n");
458 /* Eeek. Wave bye bye */ 471 /* Eeek. Wave bye bye */
459 mutex_unlock(&f->sem); 472 mutex_unlock(&f->sem);
460 jffs2_complete_reservation(c); 473 jffs2_complete_reservation(c);
@@ -480,7 +493,7 @@ int jffs2_do_create(struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f,
480 493
481 if (ret) { 494 if (ret) {
482 /* Eep. */ 495 /* Eep. */
483 D1(printk(KERN_DEBUG "jffs2_reserve_space() for dirent failed\n")); 496 jffs2_dbg(1, "jffs2_reserve_space() for dirent failed\n");
484 return ret; 497 return ret;
485 } 498 }
486 499
@@ -597,8 +610,8 @@ int jffs2_do_unlink(struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f,
597 !memcmp(fd->name, name, namelen) && 610 !memcmp(fd->name, name, namelen) &&
598 !fd->name[namelen]) { 611 !fd->name[namelen]) {
599 612
600 D1(printk(KERN_DEBUG "Marking old dirent node (ino #%u) @%08x obsolete\n", 613 jffs2_dbg(1, "Marking old dirent node (ino #%u) @%08x obsolete\n",
601 fd->ino, ref_offset(fd->raw))); 614 fd->ino, ref_offset(fd->raw));
602 jffs2_mark_node_obsolete(c, fd->raw); 615 jffs2_mark_node_obsolete(c, fd->raw);
603 /* We don't want to remove it from the list immediately, 616 /* We don't want to remove it from the list immediately,
604 because that screws up getdents()/seek() semantics even 617 because that screws up getdents()/seek() semantics even
@@ -627,11 +640,13 @@ int jffs2_do_unlink(struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f,
627 dead_f->dents = fd->next; 640 dead_f->dents = fd->next;
628 641
629 if (fd->ino) { 642 if (fd->ino) {
630 printk(KERN_WARNING "Deleting inode #%u with active dentry \"%s\"->ino #%u\n", 643 pr_warn("Deleting inode #%u with active dentry \"%s\"->ino #%u\n",
631 dead_f->inocache->ino, fd->name, fd->ino); 644 dead_f->inocache->ino,
645 fd->name, fd->ino);
632 } else { 646 } else {
633 D1(printk(KERN_DEBUG "Removing deletion dirent for \"%s\" from dir ino #%u\n", 647 jffs2_dbg(1, "Removing deletion dirent for \"%s\" from dir ino #%u\n",
634 fd->name, dead_f->inocache->ino)); 648 fd->name,
649 dead_f->inocache->ino);
635 } 650 }
636 if (fd->raw) 651 if (fd->raw)
637 jffs2_mark_node_obsolete(c, fd->raw); 652 jffs2_mark_node_obsolete(c, fd->raw);
diff --git a/fs/jffs2/xattr.c b/fs/jffs2/xattr.c
index 3e93cdd19005..b55b803eddcb 100644
--- a/fs/jffs2/xattr.c
+++ b/fs/jffs2/xattr.c
@@ -9,6 +9,8 @@
9 * 9 *
10 */ 10 */
11 11
12#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13
12#include <linux/kernel.h> 14#include <linux/kernel.h>
13#include <linux/slab.h> 15#include <linux/slab.h>
14#include <linux/fs.h> 16#include <linux/fs.h>
diff --git a/fs/lockd/svc.c b/fs/lockd/svc.c
index 2774e1013b34..f49b9afc4436 100644
--- a/fs/lockd/svc.c
+++ b/fs/lockd/svc.c
@@ -496,7 +496,7 @@ static int param_set_##name(const char *val, struct kernel_param *kp) \
496 __typeof__(type) num = which_strtol(val, &endp, 0); \ 496 __typeof__(type) num = which_strtol(val, &endp, 0); \
497 if (endp == val || *endp || num < (min) || num > (max)) \ 497 if (endp == val || *endp || num < (min) || num > (max)) \
498 return -EINVAL; \ 498 return -EINVAL; \
499 *((int *) kp->arg) = num; \ 499 *((type *) kp->arg) = num; \
500 return 0; \ 500 return 0; \
501} 501}
502 502
diff --git a/fs/namei.c b/fs/namei.c
index e615ff37e27d..1898198abc3d 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -1054,53 +1054,65 @@ static void follow_dotdot(struct nameidata *nd)
1054} 1054}
1055 1055
1056/* 1056/*
1057 * Allocate a dentry with name and parent, and perform a parent 1057 * This looks up the name in dcache, possibly revalidates the old dentry and
1058 * directory ->lookup on it. Returns the new dentry, or ERR_PTR 1058 * allocates a new one if not found or not valid. In the need_lookup argument
1059 * on error. parent->d_inode->i_mutex must be held. d_lookup must 1059 * returns whether i_op->lookup is necessary.
1060 * have verified that no child exists while under i_mutex. 1060 *
1061 * dir->d_inode->i_mutex must be held
1061 */ 1062 */
1062static struct dentry *d_alloc_and_lookup(struct dentry *parent, 1063static struct dentry *lookup_dcache(struct qstr *name, struct dentry *dir,
1063 struct qstr *name, struct nameidata *nd) 1064 struct nameidata *nd, bool *need_lookup)
1064{ 1065{
1065 struct inode *inode = parent->d_inode;
1066 struct dentry *dentry; 1066 struct dentry *dentry;
1067 struct dentry *old; 1067 int error;
1068 1068
1069 /* Don't create child dentry for a dead directory. */ 1069 *need_lookup = false;
1070 if (unlikely(IS_DEADDIR(inode))) 1070 dentry = d_lookup(dir, name);
1071 return ERR_PTR(-ENOENT); 1071 if (dentry) {
1072 if (d_need_lookup(dentry)) {
1073 *need_lookup = true;
1074 } else if (dentry->d_flags & DCACHE_OP_REVALIDATE) {
1075 error = d_revalidate(dentry, nd);
1076 if (unlikely(error <= 0)) {
1077 if (error < 0) {
1078 dput(dentry);
1079 return ERR_PTR(error);
1080 } else if (!d_invalidate(dentry)) {
1081 dput(dentry);
1082 dentry = NULL;
1083 }
1084 }
1085 }
1086 }
1072 1087
1073 dentry = d_alloc(parent, name); 1088 if (!dentry) {
1074 if (unlikely(!dentry)) 1089 dentry = d_alloc(dir, name);
1075 return ERR_PTR(-ENOMEM); 1090 if (unlikely(!dentry))
1091 return ERR_PTR(-ENOMEM);
1076 1092
1077 old = inode->i_op->lookup(inode, dentry, nd); 1093 *need_lookup = true;
1078 if (unlikely(old)) {
1079 dput(dentry);
1080 dentry = old;
1081 } 1094 }
1082 return dentry; 1095 return dentry;
1083} 1096}
1084 1097
1085/* 1098/*
1086 * We already have a dentry, but require a lookup to be performed on the parent 1099 * Call i_op->lookup on the dentry. The dentry must be negative but may be
1087 * directory to fill in d_inode. Returns the new dentry, or ERR_PTR on error. 1100 * hashed if it was pouplated with DCACHE_NEED_LOOKUP.
1088 * parent->d_inode->i_mutex must be held. d_lookup must have verified that no 1101 *
1089 * child exists while under i_mutex. 1102 * dir->d_inode->i_mutex must be held
1090 */ 1103 */
1091static struct dentry *d_inode_lookup(struct dentry *parent, struct dentry *dentry, 1104static struct dentry *lookup_real(struct inode *dir, struct dentry *dentry,
1092 struct nameidata *nd) 1105 struct nameidata *nd)
1093{ 1106{
1094 struct inode *inode = parent->d_inode;
1095 struct dentry *old; 1107 struct dentry *old;
1096 1108
1097 /* Don't create child dentry for a dead directory. */ 1109 /* Don't create child dentry for a dead directory. */
1098 if (unlikely(IS_DEADDIR(inode))) { 1110 if (unlikely(IS_DEADDIR(dir))) {
1099 dput(dentry); 1111 dput(dentry);
1100 return ERR_PTR(-ENOENT); 1112 return ERR_PTR(-ENOENT);
1101 } 1113 }
1102 1114
1103 old = inode->i_op->lookup(inode, dentry, nd); 1115 old = dir->i_op->lookup(dir, dentry, nd);
1104 if (unlikely(old)) { 1116 if (unlikely(old)) {
1105 dput(dentry); 1117 dput(dentry);
1106 dentry = old; 1118 dentry = old;
@@ -1108,6 +1120,19 @@ static struct dentry *d_inode_lookup(struct dentry *parent, struct dentry *dentr
1108 return dentry; 1120 return dentry;
1109} 1121}
1110 1122
1123static struct dentry *__lookup_hash(struct qstr *name,
1124 struct dentry *base, struct nameidata *nd)
1125{
1126 bool need_lookup;
1127 struct dentry *dentry;
1128
1129 dentry = lookup_dcache(name, base, nd, &need_lookup);
1130 if (!need_lookup)
1131 return dentry;
1132
1133 return lookup_real(base->d_inode, dentry, nd);
1134}
1135
1111/* 1136/*
1112 * It's more convoluted than I'd like it to be, but... it's still fairly 1137 * It's more convoluted than I'd like it to be, but... it's still fairly
1113 * small and for now I'd prefer to have fast path as straight as possible. 1138 * small and for now I'd prefer to have fast path as straight as possible.
@@ -1139,6 +1164,8 @@ static int do_lookup(struct nameidata *nd, struct qstr *name,
1139 return -ECHILD; 1164 return -ECHILD;
1140 nd->seq = seq; 1165 nd->seq = seq;
1141 1166
1167 if (unlikely(d_need_lookup(dentry)))
1168 goto unlazy;
1142 if (unlikely(dentry->d_flags & DCACHE_OP_REVALIDATE)) { 1169 if (unlikely(dentry->d_flags & DCACHE_OP_REVALIDATE)) {
1143 status = d_revalidate(dentry, nd); 1170 status = d_revalidate(dentry, nd);
1144 if (unlikely(status <= 0)) { 1171 if (unlikely(status <= 0)) {
@@ -1147,8 +1174,6 @@ static int do_lookup(struct nameidata *nd, struct qstr *name,
1147 goto unlazy; 1174 goto unlazy;
1148 } 1175 }
1149 } 1176 }
1150 if (unlikely(d_need_lookup(dentry)))
1151 goto unlazy;
1152 path->mnt = mnt; 1177 path->mnt = mnt;
1153 path->dentry = dentry; 1178 path->dentry = dentry;
1154 if (unlikely(!__follow_mount_rcu(nd, path, inode))) 1179 if (unlikely(!__follow_mount_rcu(nd, path, inode)))
@@ -1163,38 +1188,14 @@ unlazy:
1163 dentry = __d_lookup(parent, name); 1188 dentry = __d_lookup(parent, name);
1164 } 1189 }
1165 1190
1166 if (dentry && unlikely(d_need_lookup(dentry))) { 1191 if (unlikely(!dentry))
1192 goto need_lookup;
1193
1194 if (unlikely(d_need_lookup(dentry))) {
1167 dput(dentry); 1195 dput(dentry);
1168 dentry = NULL; 1196 goto need_lookup;
1169 }
1170retry:
1171 if (unlikely(!dentry)) {
1172 struct inode *dir = parent->d_inode;
1173 BUG_ON(nd->inode != dir);
1174
1175 mutex_lock(&dir->i_mutex);
1176 dentry = d_lookup(parent, name);
1177 if (likely(!dentry)) {
1178 dentry = d_alloc_and_lookup(parent, name, nd);
1179 if (IS_ERR(dentry)) {
1180 mutex_unlock(&dir->i_mutex);
1181 return PTR_ERR(dentry);
1182 }
1183 /* known good */
1184 need_reval = 0;
1185 status = 1;
1186 } else if (unlikely(d_need_lookup(dentry))) {
1187 dentry = d_inode_lookup(parent, dentry, nd);
1188 if (IS_ERR(dentry)) {
1189 mutex_unlock(&dir->i_mutex);
1190 return PTR_ERR(dentry);
1191 }
1192 /* known good */
1193 need_reval = 0;
1194 status = 1;
1195 }
1196 mutex_unlock(&dir->i_mutex);
1197 } 1197 }
1198
1198 if (unlikely(dentry->d_flags & DCACHE_OP_REVALIDATE) && need_reval) 1199 if (unlikely(dentry->d_flags & DCACHE_OP_REVALIDATE) && need_reval)
1199 status = d_revalidate(dentry, nd); 1200 status = d_revalidate(dentry, nd);
1200 if (unlikely(status <= 0)) { 1201 if (unlikely(status <= 0)) {
@@ -1204,12 +1205,10 @@ retry:
1204 } 1205 }
1205 if (!d_invalidate(dentry)) { 1206 if (!d_invalidate(dentry)) {
1206 dput(dentry); 1207 dput(dentry);
1207 dentry = NULL; 1208 goto need_lookup;
1208 need_reval = 1;
1209 goto retry;
1210 } 1209 }
1211 } 1210 }
1212 1211done:
1213 path->mnt = mnt; 1212 path->mnt = mnt;
1214 path->dentry = dentry; 1213 path->dentry = dentry;
1215 err = follow_managed(path, nd->flags); 1214 err = follow_managed(path, nd->flags);
@@ -1221,6 +1220,16 @@ retry:
1221 nd->flags |= LOOKUP_JUMPED; 1220 nd->flags |= LOOKUP_JUMPED;
1222 *inode = path->dentry->d_inode; 1221 *inode = path->dentry->d_inode;
1223 return 0; 1222 return 0;
1223
1224need_lookup:
1225 BUG_ON(nd->inode != parent->d_inode);
1226
1227 mutex_lock(&parent->d_inode->i_mutex);
1228 dentry = __lookup_hash(name, parent, nd);
1229 mutex_unlock(&parent->d_inode->i_mutex);
1230 if (IS_ERR(dentry))
1231 return PTR_ERR(dentry);
1232 goto done;
1224} 1233}
1225 1234
1226static inline int may_lookup(struct nameidata *nd) 1235static inline int may_lookup(struct nameidata *nd)
@@ -1846,59 +1855,6 @@ int vfs_path_lookup(struct dentry *dentry, struct vfsmount *mnt,
1846 return err; 1855 return err;
1847} 1856}
1848 1857
1849static struct dentry *__lookup_hash(struct qstr *name,
1850 struct dentry *base, struct nameidata *nd)
1851{
1852 struct inode *inode = base->d_inode;
1853 struct dentry *dentry;
1854 int err;
1855
1856 err = inode_permission(inode, MAY_EXEC);
1857 if (err)
1858 return ERR_PTR(err);
1859
1860 /*
1861 * Don't bother with __d_lookup: callers are for creat as
1862 * well as unlink, so a lot of the time it would cost
1863 * a double lookup.
1864 */
1865 dentry = d_lookup(base, name);
1866
1867 if (dentry && d_need_lookup(dentry)) {
1868 /*
1869 * __lookup_hash is called with the parent dir's i_mutex already
1870 * held, so we are good to go here.
1871 */
1872 dentry = d_inode_lookup(base, dentry, nd);
1873 if (IS_ERR(dentry))
1874 return dentry;
1875 }
1876
1877 if (dentry && (dentry->d_flags & DCACHE_OP_REVALIDATE)) {
1878 int status = d_revalidate(dentry, nd);
1879 if (unlikely(status <= 0)) {
1880 /*
1881 * The dentry failed validation.
1882 * If d_revalidate returned 0 attempt to invalidate
1883 * the dentry otherwise d_revalidate is asking us
1884 * to return a fail status.
1885 */
1886 if (status < 0) {
1887 dput(dentry);
1888 return ERR_PTR(status);
1889 } else if (!d_invalidate(dentry)) {
1890 dput(dentry);
1891 dentry = NULL;
1892 }
1893 }
1894 }
1895
1896 if (!dentry)
1897 dentry = d_alloc_and_lookup(base, name, nd);
1898
1899 return dentry;
1900}
1901
1902/* 1858/*
1903 * Restricted form of lookup. Doesn't follow links, single-component only, 1859 * Restricted form of lookup. Doesn't follow links, single-component only,
1904 * needs parent already locked. Doesn't follow mounts. 1860 * needs parent already locked. Doesn't follow mounts.
@@ -1924,6 +1880,7 @@ struct dentry *lookup_one_len(const char *name, struct dentry *base, int len)
1924{ 1880{
1925 struct qstr this; 1881 struct qstr this;
1926 unsigned int c; 1882 unsigned int c;
1883 int err;
1927 1884
1928 WARN_ON_ONCE(!mutex_is_locked(&base->d_inode->i_mutex)); 1885 WARN_ON_ONCE(!mutex_is_locked(&base->d_inode->i_mutex));
1929 1886
@@ -1948,6 +1905,10 @@ struct dentry *lookup_one_len(const char *name, struct dentry *base, int len)
1948 return ERR_PTR(err); 1905 return ERR_PTR(err);
1949 } 1906 }
1950 1907
1908 err = inode_permission(base->d_inode, MAY_EXEC);
1909 if (err)
1910 return ERR_PTR(err);
1911
1951 return __lookup_hash(&this, base, NULL); 1912 return __lookup_hash(&this, base, NULL);
1952} 1913}
1953 1914
@@ -2749,7 +2710,7 @@ SYSCALL_DEFINE2(mkdir, const char __user *, pathname, umode_t, mode)
2749 2710
2750/* 2711/*
2751 * The dentry_unhash() helper will try to drop the dentry early: we 2712 * The dentry_unhash() helper will try to drop the dentry early: we
2752 * should have a usage count of 2 if we're the only user of this 2713 * should have a usage count of 1 if we're the only user of this
2753 * dentry, and if that is true (possibly after pruning the dcache), 2714 * dentry, and if that is true (possibly after pruning the dcache),
2754 * then we drop the dentry now. 2715 * then we drop the dentry now.
2755 * 2716 *
diff --git a/fs/nfsd/current_stateid.h b/fs/nfsd/current_stateid.h
new file mode 100644
index 000000000000..4123551208d8
--- /dev/null
+++ b/fs/nfsd/current_stateid.h
@@ -0,0 +1,28 @@
1#ifndef _NFSD4_CURRENT_STATE_H
2#define _NFSD4_CURRENT_STATE_H
3
4#include "state.h"
5#include "xdr4.h"
6
7extern void clear_current_stateid(struct nfsd4_compound_state *cstate);
8/*
9 * functions to set current state id
10 */
11extern void nfsd4_set_opendowngradestateid(struct nfsd4_compound_state *cstate, struct nfsd4_open_downgrade *);
12extern void nfsd4_set_openstateid(struct nfsd4_compound_state *, struct nfsd4_open *);
13extern void nfsd4_set_lockstateid(struct nfsd4_compound_state *, struct nfsd4_lock *);
14extern void nfsd4_set_closestateid(struct nfsd4_compound_state *, struct nfsd4_close *);
15
16/*
17 * functions to consume current state id
18 */
19extern void nfsd4_get_opendowngradestateid(struct nfsd4_compound_state *cstate, struct nfsd4_open_downgrade *);
20extern void nfsd4_get_delegreturnstateid(struct nfsd4_compound_state *, struct nfsd4_delegreturn *);
21extern void nfsd4_get_freestateid(struct nfsd4_compound_state *, struct nfsd4_free_stateid *);
22extern void nfsd4_get_setattrstateid(struct nfsd4_compound_state *, struct nfsd4_setattr *);
23extern void nfsd4_get_closestateid(struct nfsd4_compound_state *, struct nfsd4_close *);
24extern void nfsd4_get_lockustateid(struct nfsd4_compound_state *, struct nfsd4_locku *);
25extern void nfsd4_get_readstateid(struct nfsd4_compound_state *, struct nfsd4_read *);
26extern void nfsd4_get_writestateid(struct nfsd4_compound_state *, struct nfsd4_write *);
27
28#endif /* _NFSD4_CURRENT_STATE_H */
diff --git a/fs/nfsd/export.c b/fs/nfsd/export.c
index cf8a6bd062fa..8e9689abbc0c 100644
--- a/fs/nfsd/export.c
+++ b/fs/nfsd/export.c
@@ -87,7 +87,7 @@ static int expkey_parse(struct cache_detail *cd, char *mesg, int mlen)
87 struct svc_expkey key; 87 struct svc_expkey key;
88 struct svc_expkey *ek = NULL; 88 struct svc_expkey *ek = NULL;
89 89
90 if (mlen < 1 || mesg[mlen-1] != '\n') 90 if (mesg[mlen - 1] != '\n')
91 return -EINVAL; 91 return -EINVAL;
92 mesg[mlen-1] = 0; 92 mesg[mlen-1] = 0;
93 93
diff --git a/fs/nfsd/netns.h b/fs/nfsd/netns.h
new file mode 100644
index 000000000000..12e0cff435b4
--- /dev/null
+++ b/fs/nfsd/netns.h
@@ -0,0 +1,34 @@
1/*
2 * per net namespace data structures for nfsd
3 *
4 * Copyright (C) 2012, Jeff Layton <jlayton@redhat.com>
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the Free
8 * Software Foundation; either version 2 of the License, or (at your option)
9 * any later version.
10 *
11 * This program is distributed in the hope that it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along with
17 * this program; if not, write to the Free Software Foundation, Inc., 51
18 * Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
19 */
20
21#ifndef __NFSD_NETNS_H__
22#define __NFSD_NETNS_H__
23
24#include <net/net_namespace.h>
25#include <net/netns/generic.h>
26
27struct cld_net;
28
29struct nfsd_net {
30 struct cld_net *cld_net;
31};
32
33extern int nfsd_net_id;
34#endif /* __NFSD_NETNS_H__ */
diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c
index 0e262f32ac41..c8e9f637153a 100644
--- a/fs/nfsd/nfs4callback.c
+++ b/fs/nfsd/nfs4callback.c
@@ -645,7 +645,6 @@ static int setup_callback_client(struct nfs4_client *clp, struct nfs4_cb_conn *c
645 .timeout = &timeparms, 645 .timeout = &timeparms,
646 .program = &cb_program, 646 .program = &cb_program,
647 .version = 0, 647 .version = 0,
648 .authflavor = clp->cl_flavor,
649 .flags = (RPC_CLNT_CREATE_NOPING | RPC_CLNT_CREATE_QUIET), 648 .flags = (RPC_CLNT_CREATE_NOPING | RPC_CLNT_CREATE_QUIET),
650 }; 649 };
651 struct rpc_clnt *client; 650 struct rpc_clnt *client;
@@ -656,6 +655,7 @@ static int setup_callback_client(struct nfs4_client *clp, struct nfs4_cb_conn *c
656 args.client_name = clp->cl_principal; 655 args.client_name = clp->cl_principal;
657 args.prognumber = conn->cb_prog, 656 args.prognumber = conn->cb_prog,
658 args.protocol = XPRT_TRANSPORT_TCP; 657 args.protocol = XPRT_TRANSPORT_TCP;
658 args.authflavor = clp->cl_flavor;
659 clp->cl_cb_ident = conn->cb_ident; 659 clp->cl_cb_ident = conn->cb_ident;
660 } else { 660 } else {
661 if (!conn->cb_xprt) 661 if (!conn->cb_xprt)
@@ -665,6 +665,7 @@ static int setup_callback_client(struct nfs4_client *clp, struct nfs4_cb_conn *c
665 args.bc_xprt = conn->cb_xprt; 665 args.bc_xprt = conn->cb_xprt;
666 args.prognumber = clp->cl_cb_session->se_cb_prog; 666 args.prognumber = clp->cl_cb_session->se_cb_prog;
667 args.protocol = XPRT_TRANSPORT_BC_TCP; 667 args.protocol = XPRT_TRANSPORT_BC_TCP;
668 args.authflavor = RPC_AUTH_UNIX;
668 } 669 }
669 /* Create RPC client */ 670 /* Create RPC client */
670 client = rpc_create(&args); 671 client = rpc_create(&args);
@@ -754,9 +755,9 @@ static void do_probe_callback(struct nfs4_client *clp)
754 */ 755 */
755void nfsd4_probe_callback(struct nfs4_client *clp) 756void nfsd4_probe_callback(struct nfs4_client *clp)
756{ 757{
757 /* XXX: atomicity? Also, should we be using cl_cb_flags? */ 758 /* XXX: atomicity? Also, should we be using cl_flags? */
758 clp->cl_cb_state = NFSD4_CB_UNKNOWN; 759 clp->cl_cb_state = NFSD4_CB_UNKNOWN;
759 set_bit(NFSD4_CLIENT_CB_UPDATE, &clp->cl_cb_flags); 760 set_bit(NFSD4_CLIENT_CB_UPDATE, &clp->cl_flags);
760 do_probe_callback(clp); 761 do_probe_callback(clp);
761} 762}
762 763
@@ -915,7 +916,7 @@ void nfsd4_destroy_callback_queue(void)
915/* must be called under the state lock */ 916/* must be called under the state lock */
916void nfsd4_shutdown_callback(struct nfs4_client *clp) 917void nfsd4_shutdown_callback(struct nfs4_client *clp)
917{ 918{
918 set_bit(NFSD4_CLIENT_KILL, &clp->cl_cb_flags); 919 set_bit(NFSD4_CLIENT_CB_KILL, &clp->cl_flags);
919 /* 920 /*
920 * Note this won't actually result in a null callback; 921 * Note this won't actually result in a null callback;
921 * instead, nfsd4_do_callback_rpc() will detect the killed 922 * instead, nfsd4_do_callback_rpc() will detect the killed
@@ -966,15 +967,15 @@ static void nfsd4_process_cb_update(struct nfsd4_callback *cb)
966 svc_xprt_put(clp->cl_cb_conn.cb_xprt); 967 svc_xprt_put(clp->cl_cb_conn.cb_xprt);
967 clp->cl_cb_conn.cb_xprt = NULL; 968 clp->cl_cb_conn.cb_xprt = NULL;
968 } 969 }
969 if (test_bit(NFSD4_CLIENT_KILL, &clp->cl_cb_flags)) 970 if (test_bit(NFSD4_CLIENT_CB_KILL, &clp->cl_flags))
970 return; 971 return;
971 spin_lock(&clp->cl_lock); 972 spin_lock(&clp->cl_lock);
972 /* 973 /*
973 * Only serialized callback code is allowed to clear these 974 * Only serialized callback code is allowed to clear these
974 * flags; main nfsd code can only set them: 975 * flags; main nfsd code can only set them:
975 */ 976 */
976 BUG_ON(!clp->cl_cb_flags); 977 BUG_ON(!(clp->cl_flags & NFSD4_CLIENT_CB_FLAG_MASK));
977 clear_bit(NFSD4_CLIENT_CB_UPDATE, &clp->cl_cb_flags); 978 clear_bit(NFSD4_CLIENT_CB_UPDATE, &clp->cl_flags);
978 memcpy(&conn, &cb->cb_clp->cl_cb_conn, sizeof(struct nfs4_cb_conn)); 979 memcpy(&conn, &cb->cb_clp->cl_cb_conn, sizeof(struct nfs4_cb_conn));
979 c = __nfsd4_find_backchannel(clp); 980 c = __nfsd4_find_backchannel(clp);
980 if (c) { 981 if (c) {
@@ -986,7 +987,7 @@ static void nfsd4_process_cb_update(struct nfsd4_callback *cb)
986 987
987 err = setup_callback_client(clp, &conn, ses); 988 err = setup_callback_client(clp, &conn, ses);
988 if (err) { 989 if (err) {
989 warn_no_callback_path(clp, err); 990 nfsd4_mark_cb_down(clp, err);
990 return; 991 return;
991 } 992 }
992 /* Yay, the callback channel's back! Restart any callbacks: */ 993 /* Yay, the callback channel's back! Restart any callbacks: */
@@ -1000,7 +1001,7 @@ void nfsd4_do_callback_rpc(struct work_struct *w)
1000 struct nfs4_client *clp = cb->cb_clp; 1001 struct nfs4_client *clp = cb->cb_clp;
1001 struct rpc_clnt *clnt; 1002 struct rpc_clnt *clnt;
1002 1003
1003 if (clp->cl_cb_flags) 1004 if (clp->cl_flags & NFSD4_CLIENT_CB_FLAG_MASK)
1004 nfsd4_process_cb_update(cb); 1005 nfsd4_process_cb_update(cb);
1005 1006
1006 clnt = clp->cl_cb_client; 1007 clnt = clp->cl_cb_client;
diff --git a/fs/nfsd/nfs4idmap.c b/fs/nfsd/nfs4idmap.c
index 94096273cd6c..322d11ce06a4 100644
--- a/fs/nfsd/nfs4idmap.c
+++ b/fs/nfsd/nfs4idmap.c
@@ -41,6 +41,14 @@
41#include "nfsd.h" 41#include "nfsd.h"
42 42
43/* 43/*
44 * Turn off idmapping when using AUTH_SYS.
45 */
46static bool nfs4_disable_idmapping = true;
47module_param(nfs4_disable_idmapping, bool, 0644);
48MODULE_PARM_DESC(nfs4_disable_idmapping,
49 "Turn off server's NFSv4 idmapping when using 'sec=sys'");
50
51/*
44 * Cache entry 52 * Cache entry
45 */ 53 */
46 54
@@ -561,28 +569,65 @@ idmap_id_to_name(struct svc_rqst *rqstp, int type, uid_t id, char *name)
561 return ret; 569 return ret;
562} 570}
563 571
572static bool
573numeric_name_to_id(struct svc_rqst *rqstp, int type, const char *name, u32 namelen, uid_t *id)
574{
575 int ret;
576 char buf[11];
577
578 if (namelen + 1 > sizeof(buf))
579 /* too long to represent a 32-bit id: */
580 return false;
581 /* Just to make sure it's null-terminated: */
582 memcpy(buf, name, namelen);
583 buf[namelen] = '\0';
584 ret = kstrtouint(name, 10, id);
585 return ret == 0;
586}
587
588static __be32
589do_name_to_id(struct svc_rqst *rqstp, int type, const char *name, u32 namelen, uid_t *id)
590{
591 if (nfs4_disable_idmapping && rqstp->rq_flavor < RPC_AUTH_GSS)
592 if (numeric_name_to_id(rqstp, type, name, namelen, id))
593 return 0;
594 /*
595 * otherwise, fall through and try idmapping, for
596 * backwards compatibility with clients sending names:
597 */
598 return idmap_name_to_id(rqstp, type, name, namelen, id);
599}
600
601static int
602do_id_to_name(struct svc_rqst *rqstp, int type, uid_t id, char *name)
603{
604 if (nfs4_disable_idmapping && rqstp->rq_flavor < RPC_AUTH_GSS)
605 return sprintf(name, "%u", id);
606 return idmap_id_to_name(rqstp, type, id, name);
607}
608
564__be32 609__be32
565nfsd_map_name_to_uid(struct svc_rqst *rqstp, const char *name, size_t namelen, 610nfsd_map_name_to_uid(struct svc_rqst *rqstp, const char *name, size_t namelen,
566 __u32 *id) 611 __u32 *id)
567{ 612{
568 return idmap_name_to_id(rqstp, IDMAP_TYPE_USER, name, namelen, id); 613 return do_name_to_id(rqstp, IDMAP_TYPE_USER, name, namelen, id);
569} 614}
570 615
571__be32 616__be32
572nfsd_map_name_to_gid(struct svc_rqst *rqstp, const char *name, size_t namelen, 617nfsd_map_name_to_gid(struct svc_rqst *rqstp, const char *name, size_t namelen,
573 __u32 *id) 618 __u32 *id)
574{ 619{
575 return idmap_name_to_id(rqstp, IDMAP_TYPE_GROUP, name, namelen, id); 620 return do_name_to_id(rqstp, IDMAP_TYPE_GROUP, name, namelen, id);
576} 621}
577 622
578int 623int
579nfsd_map_uid_to_name(struct svc_rqst *rqstp, __u32 id, char *name) 624nfsd_map_uid_to_name(struct svc_rqst *rqstp, __u32 id, char *name)
580{ 625{
581 return idmap_id_to_name(rqstp, IDMAP_TYPE_USER, id, name); 626 return do_id_to_name(rqstp, IDMAP_TYPE_USER, id, name);
582} 627}
583 628
584int 629int
585nfsd_map_gid_to_name(struct svc_rqst *rqstp, __u32 id, char *name) 630nfsd_map_gid_to_name(struct svc_rqst *rqstp, __u32 id, char *name)
586{ 631{
587 return idmap_id_to_name(rqstp, IDMAP_TYPE_GROUP, id, name); 632 return do_id_to_name(rqstp, IDMAP_TYPE_GROUP, id, name);
588} 633}
diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
index 896da74ec563..2ed14dfd00a2 100644
--- a/fs/nfsd/nfs4proc.c
+++ b/fs/nfsd/nfs4proc.c
@@ -39,6 +39,7 @@
39#include "cache.h" 39#include "cache.h"
40#include "xdr4.h" 40#include "xdr4.h"
41#include "vfs.h" 41#include "vfs.h"
42#include "current_stateid.h"
42 43
43#define NFSDDBG_FACILITY NFSDDBG_PROC 44#define NFSDDBG_FACILITY NFSDDBG_PROC
44 45
@@ -192,10 +193,13 @@ static __be32 nfsd_check_obj_isreg(struct svc_fh *fh)
192static __be32 193static __be32
193do_open_lookup(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_open *open) 194do_open_lookup(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_open *open)
194{ 195{
195 struct svc_fh resfh; 196 struct svc_fh *resfh;
196 __be32 status; 197 __be32 status;
197 198
198 fh_init(&resfh, NFS4_FHSIZE); 199 resfh = kmalloc(sizeof(struct svc_fh), GFP_KERNEL);
200 if (!resfh)
201 return nfserr_jukebox;
202 fh_init(resfh, NFS4_FHSIZE);
199 open->op_truncate = 0; 203 open->op_truncate = 0;
200 204
201 if (open->op_create) { 205 if (open->op_create) {
@@ -220,7 +224,7 @@ do_open_lookup(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_o
220 */ 224 */
221 status = do_nfsd_create(rqstp, current_fh, open->op_fname.data, 225 status = do_nfsd_create(rqstp, current_fh, open->op_fname.data,
222 open->op_fname.len, &open->op_iattr, 226 open->op_fname.len, &open->op_iattr,
223 &resfh, open->op_createmode, 227 resfh, open->op_createmode,
224 (u32 *)open->op_verf.data, 228 (u32 *)open->op_verf.data,
225 &open->op_truncate, &open->op_created); 229 &open->op_truncate, &open->op_created);
226 230
@@ -234,30 +238,29 @@ do_open_lookup(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_o
234 FATTR4_WORD1_TIME_MODIFY); 238 FATTR4_WORD1_TIME_MODIFY);
235 } else { 239 } else {
236 status = nfsd_lookup(rqstp, current_fh, 240 status = nfsd_lookup(rqstp, current_fh,
237 open->op_fname.data, open->op_fname.len, &resfh); 241 open->op_fname.data, open->op_fname.len, resfh);
238 fh_unlock(current_fh); 242 fh_unlock(current_fh);
239 if (status) 243 if (status)
240 goto out; 244 goto out;
241 status = nfsd_check_obj_isreg(&resfh); 245 status = nfsd_check_obj_isreg(resfh);
242 } 246 }
243 if (status) 247 if (status)
244 goto out; 248 goto out;
245 249
246 if (is_create_with_attrs(open) && open->op_acl != NULL) 250 if (is_create_with_attrs(open) && open->op_acl != NULL)
247 do_set_nfs4_acl(rqstp, &resfh, open->op_acl, open->op_bmval); 251 do_set_nfs4_acl(rqstp, resfh, open->op_acl, open->op_bmval);
248
249 set_change_info(&open->op_cinfo, current_fh);
250 fh_dup2(current_fh, &resfh);
251 252
252 /* set reply cache */ 253 /* set reply cache */
253 fh_copy_shallow(&open->op_openowner->oo_owner.so_replay.rp_openfh, 254 fh_copy_shallow(&open->op_openowner->oo_owner.so_replay.rp_openfh,
254 &resfh.fh_handle); 255 &resfh->fh_handle);
255 if (!open->op_created) 256 if (!open->op_created)
256 status = do_open_permission(rqstp, current_fh, open, 257 status = do_open_permission(rqstp, resfh, open,
257 NFSD_MAY_NOP); 258 NFSD_MAY_NOP);
258 259 set_change_info(&open->op_cinfo, current_fh);
260 fh_dup2(current_fh, resfh);
259out: 261out:
260 fh_put(&resfh); 262 fh_put(resfh);
263 kfree(resfh);
261 return status; 264 return status;
262} 265}
263 266
@@ -310,16 +313,14 @@ nfsd4_open(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
310 if (open->op_create && open->op_claim_type != NFS4_OPEN_CLAIM_NULL) 313 if (open->op_create && open->op_claim_type != NFS4_OPEN_CLAIM_NULL)
311 return nfserr_inval; 314 return nfserr_inval;
312 315
313 /* We don't yet support WANT bits: */
314 open->op_share_access &= NFS4_SHARE_ACCESS_MASK;
315
316 open->op_created = 0; 316 open->op_created = 0;
317 /* 317 /*
318 * RFC5661 18.51.3 318 * RFC5661 18.51.3
319 * Before RECLAIM_COMPLETE done, server should deny new lock 319 * Before RECLAIM_COMPLETE done, server should deny new lock
320 */ 320 */
321 if (nfsd4_has_session(cstate) && 321 if (nfsd4_has_session(cstate) &&
322 !cstate->session->se_client->cl_firststate && 322 !test_bit(NFSD4_CLIENT_RECLAIM_COMPLETE,
323 &cstate->session->se_client->cl_flags) &&
323 open->op_claim_type != NFS4_OPEN_CLAIM_PREVIOUS) 324 open->op_claim_type != NFS4_OPEN_CLAIM_PREVIOUS)
324 return nfserr_grace; 325 return nfserr_grace;
325 326
@@ -452,6 +453,10 @@ nfsd4_restorefh(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
452 return nfserr_restorefh; 453 return nfserr_restorefh;
453 454
454 fh_dup2(&cstate->current_fh, &cstate->save_fh); 455 fh_dup2(&cstate->current_fh, &cstate->save_fh);
456 if (HAS_STATE_ID(cstate, SAVED_STATE_ID_FLAG)) {
457 memcpy(&cstate->current_stateid, &cstate->save_stateid, sizeof(stateid_t));
458 SET_STATE_ID(cstate, CURRENT_STATE_ID_FLAG);
459 }
455 return nfs_ok; 460 return nfs_ok;
456} 461}
457 462
@@ -463,6 +468,10 @@ nfsd4_savefh(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
463 return nfserr_nofilehandle; 468 return nfserr_nofilehandle;
464 469
465 fh_dup2(&cstate->save_fh, &cstate->current_fh); 470 fh_dup2(&cstate->save_fh, &cstate->current_fh);
471 if (HAS_STATE_ID(cstate, CURRENT_STATE_ID_FLAG)) {
472 memcpy(&cstate->save_stateid, &cstate->current_stateid, sizeof(stateid_t));
473 SET_STATE_ID(cstate, SAVED_STATE_ID_FLAG);
474 }
466 return nfs_ok; 475 return nfs_ok;
467} 476}
468 477
@@ -481,14 +490,20 @@ nfsd4_access(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
481 &access->ac_supported); 490 &access->ac_supported);
482} 491}
483 492
493static void gen_boot_verifier(nfs4_verifier *verifier)
494{
495 __be32 verf[2];
496
497 verf[0] = (__be32)nfssvc_boot.tv_sec;
498 verf[1] = (__be32)nfssvc_boot.tv_usec;
499 memcpy(verifier->data, verf, sizeof(verifier->data));
500}
501
484static __be32 502static __be32
485nfsd4_commit(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, 503nfsd4_commit(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
486 struct nfsd4_commit *commit) 504 struct nfsd4_commit *commit)
487{ 505{
488 u32 *p = (u32 *)commit->co_verf.data; 506 gen_boot_verifier(&commit->co_verf);
489 *p++ = nfssvc_boot.tv_sec;
490 *p++ = nfssvc_boot.tv_usec;
491
492 return nfsd_commit(rqstp, &cstate->current_fh, commit->co_offset, 507 return nfsd_commit(rqstp, &cstate->current_fh, commit->co_offset,
493 commit->co_count); 508 commit->co_count);
494} 509}
@@ -865,7 +880,6 @@ nfsd4_write(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
865{ 880{
866 stateid_t *stateid = &write->wr_stateid; 881 stateid_t *stateid = &write->wr_stateid;
867 struct file *filp = NULL; 882 struct file *filp = NULL;
868 u32 *p;
869 __be32 status = nfs_ok; 883 __be32 status = nfs_ok;
870 unsigned long cnt; 884 unsigned long cnt;
871 885
@@ -887,9 +901,7 @@ nfsd4_write(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
887 901
888 cnt = write->wr_buflen; 902 cnt = write->wr_buflen;
889 write->wr_how_written = write->wr_stable_how; 903 write->wr_how_written = write->wr_stable_how;
890 p = (u32 *)write->wr_verifier.data; 904 gen_boot_verifier(&write->wr_verifier);
891 *p++ = nfssvc_boot.tv_sec;
892 *p++ = nfssvc_boot.tv_usec;
893 905
894 status = nfsd_write(rqstp, &cstate->current_fh, filp, 906 status = nfsd_write(rqstp, &cstate->current_fh, filp,
895 write->wr_offset, rqstp->rq_vec, write->wr_vlen, 907 write->wr_offset, rqstp->rq_vec, write->wr_vlen,
@@ -1000,6 +1012,8 @@ static inline void nfsd4_increment_op_stats(u32 opnum)
1000typedef __be32(*nfsd4op_func)(struct svc_rqst *, struct nfsd4_compound_state *, 1012typedef __be32(*nfsd4op_func)(struct svc_rqst *, struct nfsd4_compound_state *,
1001 void *); 1013 void *);
1002typedef u32(*nfsd4op_rsize)(struct svc_rqst *, struct nfsd4_op *op); 1014typedef u32(*nfsd4op_rsize)(struct svc_rqst *, struct nfsd4_op *op);
1015typedef void(*stateid_setter)(struct nfsd4_compound_state *, void *);
1016typedef void(*stateid_getter)(struct nfsd4_compound_state *, void *);
1003 1017
1004enum nfsd4_op_flags { 1018enum nfsd4_op_flags {
1005 ALLOWED_WITHOUT_FH = 1 << 0, /* No current filehandle required */ 1019 ALLOWED_WITHOUT_FH = 1 << 0, /* No current filehandle required */
@@ -1025,6 +1039,10 @@ enum nfsd4_op_flags {
1025 * the v4.0 case). 1039 * the v4.0 case).
1026 */ 1040 */
1027 OP_CACHEME = 1 << 6, 1041 OP_CACHEME = 1 << 6,
1042 /*
1043 * These are ops which clear current state id.
1044 */
1045 OP_CLEAR_STATEID = 1 << 7,
1028}; 1046};
1029 1047
1030struct nfsd4_operation { 1048struct nfsd4_operation {
@@ -1033,11 +1051,15 @@ struct nfsd4_operation {
1033 char *op_name; 1051 char *op_name;
1034 /* Try to get response size before operation */ 1052 /* Try to get response size before operation */
1035 nfsd4op_rsize op_rsize_bop; 1053 nfsd4op_rsize op_rsize_bop;
1054 stateid_setter op_get_currentstateid;
1055 stateid_getter op_set_currentstateid;
1036}; 1056};
1037 1057
1038static struct nfsd4_operation nfsd4_ops[]; 1058static struct nfsd4_operation nfsd4_ops[];
1039 1059
1060#ifdef NFSD_DEBUG
1040static const char *nfsd4_op_name(unsigned opnum); 1061static const char *nfsd4_op_name(unsigned opnum);
1062#endif
1041 1063
1042/* 1064/*
1043 * Enforce NFSv4.1 COMPOUND ordering rules: 1065 * Enforce NFSv4.1 COMPOUND ordering rules:
@@ -1215,13 +1237,23 @@ nfsd4_proc_compound(struct svc_rqst *rqstp,
1215 if (op->status) 1237 if (op->status)
1216 goto encode_op; 1238 goto encode_op;
1217 1239
1218 if (opdesc->op_func) 1240 if (opdesc->op_func) {
1241 if (opdesc->op_get_currentstateid)
1242 opdesc->op_get_currentstateid(cstate, &op->u);
1219 op->status = opdesc->op_func(rqstp, cstate, &op->u); 1243 op->status = opdesc->op_func(rqstp, cstate, &op->u);
1220 else 1244 } else
1221 BUG_ON(op->status == nfs_ok); 1245 BUG_ON(op->status == nfs_ok);
1222 1246
1223 if (!op->status && need_wrongsec_check(rqstp)) 1247 if (!op->status) {
1224 op->status = check_nfsd_access(cstate->current_fh.fh_export, rqstp); 1248 if (opdesc->op_set_currentstateid)
1249 opdesc->op_set_currentstateid(cstate, &op->u);
1250
1251 if (opdesc->op_flags & OP_CLEAR_STATEID)
1252 clear_current_stateid(cstate);
1253
1254 if (need_wrongsec_check(rqstp))
1255 op->status = check_nfsd_access(cstate->current_fh.fh_export, rqstp);
1256 }
1225 1257
1226encode_op: 1258encode_op:
1227 /* Only from SEQUENCE */ 1259 /* Only from SEQUENCE */
@@ -1413,6 +1445,8 @@ static struct nfsd4_operation nfsd4_ops[] = {
1413 .op_flags = OP_MODIFIES_SOMETHING, 1445 .op_flags = OP_MODIFIES_SOMETHING,
1414 .op_name = "OP_CLOSE", 1446 .op_name = "OP_CLOSE",
1415 .op_rsize_bop = (nfsd4op_rsize)nfsd4_status_stateid_rsize, 1447 .op_rsize_bop = (nfsd4op_rsize)nfsd4_status_stateid_rsize,
1448 .op_get_currentstateid = (stateid_getter)nfsd4_get_closestateid,
1449 .op_set_currentstateid = (stateid_setter)nfsd4_set_closestateid,
1416 }, 1450 },
1417 [OP_COMMIT] = { 1451 [OP_COMMIT] = {
1418 .op_func = (nfsd4op_func)nfsd4_commit, 1452 .op_func = (nfsd4op_func)nfsd4_commit,
@@ -1422,7 +1456,7 @@ static struct nfsd4_operation nfsd4_ops[] = {
1422 }, 1456 },
1423 [OP_CREATE] = { 1457 [OP_CREATE] = {
1424 .op_func = (nfsd4op_func)nfsd4_create, 1458 .op_func = (nfsd4op_func)nfsd4_create,
1425 .op_flags = OP_MODIFIES_SOMETHING | OP_CACHEME, 1459 .op_flags = OP_MODIFIES_SOMETHING | OP_CACHEME | OP_CLEAR_STATEID,
1426 .op_name = "OP_CREATE", 1460 .op_name = "OP_CREATE",
1427 .op_rsize_bop = (nfsd4op_rsize)nfsd4_create_rsize, 1461 .op_rsize_bop = (nfsd4op_rsize)nfsd4_create_rsize,
1428 }, 1462 },
@@ -1431,6 +1465,7 @@ static struct nfsd4_operation nfsd4_ops[] = {
1431 .op_flags = OP_MODIFIES_SOMETHING, 1465 .op_flags = OP_MODIFIES_SOMETHING,
1432 .op_name = "OP_DELEGRETURN", 1466 .op_name = "OP_DELEGRETURN",
1433 .op_rsize_bop = nfsd4_only_status_rsize, 1467 .op_rsize_bop = nfsd4_only_status_rsize,
1468 .op_get_currentstateid = (stateid_getter)nfsd4_get_delegreturnstateid,
1434 }, 1469 },
1435 [OP_GETATTR] = { 1470 [OP_GETATTR] = {
1436 .op_func = (nfsd4op_func)nfsd4_getattr, 1471 .op_func = (nfsd4op_func)nfsd4_getattr,
@@ -1453,6 +1488,7 @@ static struct nfsd4_operation nfsd4_ops[] = {
1453 .op_flags = OP_MODIFIES_SOMETHING, 1488 .op_flags = OP_MODIFIES_SOMETHING,
1454 .op_name = "OP_LOCK", 1489 .op_name = "OP_LOCK",
1455 .op_rsize_bop = (nfsd4op_rsize)nfsd4_lock_rsize, 1490 .op_rsize_bop = (nfsd4op_rsize)nfsd4_lock_rsize,
1491 .op_set_currentstateid = (stateid_setter)nfsd4_set_lockstateid,
1456 }, 1492 },
1457 [OP_LOCKT] = { 1493 [OP_LOCKT] = {
1458 .op_func = (nfsd4op_func)nfsd4_lockt, 1494 .op_func = (nfsd4op_func)nfsd4_lockt,
@@ -1463,15 +1499,16 @@ static struct nfsd4_operation nfsd4_ops[] = {
1463 .op_flags = OP_MODIFIES_SOMETHING, 1499 .op_flags = OP_MODIFIES_SOMETHING,
1464 .op_name = "OP_LOCKU", 1500 .op_name = "OP_LOCKU",
1465 .op_rsize_bop = (nfsd4op_rsize)nfsd4_status_stateid_rsize, 1501 .op_rsize_bop = (nfsd4op_rsize)nfsd4_status_stateid_rsize,
1502 .op_get_currentstateid = (stateid_getter)nfsd4_get_lockustateid,
1466 }, 1503 },
1467 [OP_LOOKUP] = { 1504 [OP_LOOKUP] = {
1468 .op_func = (nfsd4op_func)nfsd4_lookup, 1505 .op_func = (nfsd4op_func)nfsd4_lookup,
1469 .op_flags = OP_HANDLES_WRONGSEC, 1506 .op_flags = OP_HANDLES_WRONGSEC | OP_CLEAR_STATEID,
1470 .op_name = "OP_LOOKUP", 1507 .op_name = "OP_LOOKUP",
1471 }, 1508 },
1472 [OP_LOOKUPP] = { 1509 [OP_LOOKUPP] = {
1473 .op_func = (nfsd4op_func)nfsd4_lookupp, 1510 .op_func = (nfsd4op_func)nfsd4_lookupp,
1474 .op_flags = OP_HANDLES_WRONGSEC, 1511 .op_flags = OP_HANDLES_WRONGSEC | OP_CLEAR_STATEID,
1475 .op_name = "OP_LOOKUPP", 1512 .op_name = "OP_LOOKUPP",
1476 }, 1513 },
1477 [OP_NVERIFY] = { 1514 [OP_NVERIFY] = {
@@ -1483,6 +1520,7 @@ static struct nfsd4_operation nfsd4_ops[] = {
1483 .op_flags = OP_HANDLES_WRONGSEC | OP_MODIFIES_SOMETHING, 1520 .op_flags = OP_HANDLES_WRONGSEC | OP_MODIFIES_SOMETHING,
1484 .op_name = "OP_OPEN", 1521 .op_name = "OP_OPEN",
1485 .op_rsize_bop = (nfsd4op_rsize)nfsd4_open_rsize, 1522 .op_rsize_bop = (nfsd4op_rsize)nfsd4_open_rsize,
1523 .op_set_currentstateid = (stateid_setter)nfsd4_set_openstateid,
1486 }, 1524 },
1487 [OP_OPEN_CONFIRM] = { 1525 [OP_OPEN_CONFIRM] = {
1488 .op_func = (nfsd4op_func)nfsd4_open_confirm, 1526 .op_func = (nfsd4op_func)nfsd4_open_confirm,
@@ -1495,25 +1533,30 @@ static struct nfsd4_operation nfsd4_ops[] = {
1495 .op_flags = OP_MODIFIES_SOMETHING, 1533 .op_flags = OP_MODIFIES_SOMETHING,
1496 .op_name = "OP_OPEN_DOWNGRADE", 1534 .op_name = "OP_OPEN_DOWNGRADE",
1497 .op_rsize_bop = (nfsd4op_rsize)nfsd4_status_stateid_rsize, 1535 .op_rsize_bop = (nfsd4op_rsize)nfsd4_status_stateid_rsize,
1536 .op_get_currentstateid = (stateid_getter)nfsd4_get_opendowngradestateid,
1537 .op_set_currentstateid = (stateid_setter)nfsd4_set_opendowngradestateid,
1498 }, 1538 },
1499 [OP_PUTFH] = { 1539 [OP_PUTFH] = {
1500 .op_func = (nfsd4op_func)nfsd4_putfh, 1540 .op_func = (nfsd4op_func)nfsd4_putfh,
1501 .op_flags = ALLOWED_WITHOUT_FH | ALLOWED_ON_ABSENT_FS 1541 .op_flags = ALLOWED_WITHOUT_FH | ALLOWED_ON_ABSENT_FS
1502 | OP_IS_PUTFH_LIKE | OP_MODIFIES_SOMETHING, 1542 | OP_IS_PUTFH_LIKE | OP_MODIFIES_SOMETHING
1543 | OP_CLEAR_STATEID,
1503 .op_name = "OP_PUTFH", 1544 .op_name = "OP_PUTFH",
1504 .op_rsize_bop = (nfsd4op_rsize)nfsd4_only_status_rsize, 1545 .op_rsize_bop = (nfsd4op_rsize)nfsd4_only_status_rsize,
1505 }, 1546 },
1506 [OP_PUTPUBFH] = { 1547 [OP_PUTPUBFH] = {
1507 .op_func = (nfsd4op_func)nfsd4_putrootfh, 1548 .op_func = (nfsd4op_func)nfsd4_putrootfh,
1508 .op_flags = ALLOWED_WITHOUT_FH | ALLOWED_ON_ABSENT_FS 1549 .op_flags = ALLOWED_WITHOUT_FH | ALLOWED_ON_ABSENT_FS
1509 | OP_IS_PUTFH_LIKE | OP_MODIFIES_SOMETHING, 1550 | OP_IS_PUTFH_LIKE | OP_MODIFIES_SOMETHING
1551 | OP_CLEAR_STATEID,
1510 .op_name = "OP_PUTPUBFH", 1552 .op_name = "OP_PUTPUBFH",
1511 .op_rsize_bop = (nfsd4op_rsize)nfsd4_only_status_rsize, 1553 .op_rsize_bop = (nfsd4op_rsize)nfsd4_only_status_rsize,
1512 }, 1554 },
1513 [OP_PUTROOTFH] = { 1555 [OP_PUTROOTFH] = {
1514 .op_func = (nfsd4op_func)nfsd4_putrootfh, 1556 .op_func = (nfsd4op_func)nfsd4_putrootfh,
1515 .op_flags = ALLOWED_WITHOUT_FH | ALLOWED_ON_ABSENT_FS 1557 .op_flags = ALLOWED_WITHOUT_FH | ALLOWED_ON_ABSENT_FS
1516 | OP_IS_PUTFH_LIKE | OP_MODIFIES_SOMETHING, 1558 | OP_IS_PUTFH_LIKE | OP_MODIFIES_SOMETHING
1559 | OP_CLEAR_STATEID,
1517 .op_name = "OP_PUTROOTFH", 1560 .op_name = "OP_PUTROOTFH",
1518 .op_rsize_bop = (nfsd4op_rsize)nfsd4_only_status_rsize, 1561 .op_rsize_bop = (nfsd4op_rsize)nfsd4_only_status_rsize,
1519 }, 1562 },
@@ -1522,6 +1565,7 @@ static struct nfsd4_operation nfsd4_ops[] = {
1522 .op_flags = OP_MODIFIES_SOMETHING, 1565 .op_flags = OP_MODIFIES_SOMETHING,
1523 .op_name = "OP_READ", 1566 .op_name = "OP_READ",
1524 .op_rsize_bop = (nfsd4op_rsize)nfsd4_read_rsize, 1567 .op_rsize_bop = (nfsd4op_rsize)nfsd4_read_rsize,
1568 .op_get_currentstateid = (stateid_getter)nfsd4_get_readstateid,
1525 }, 1569 },
1526 [OP_READDIR] = { 1570 [OP_READDIR] = {
1527 .op_func = (nfsd4op_func)nfsd4_readdir, 1571 .op_func = (nfsd4op_func)nfsd4_readdir,
@@ -1576,6 +1620,7 @@ static struct nfsd4_operation nfsd4_ops[] = {
1576 .op_name = "OP_SETATTR", 1620 .op_name = "OP_SETATTR",
1577 .op_flags = OP_MODIFIES_SOMETHING | OP_CACHEME, 1621 .op_flags = OP_MODIFIES_SOMETHING | OP_CACHEME,
1578 .op_rsize_bop = (nfsd4op_rsize)nfsd4_setattr_rsize, 1622 .op_rsize_bop = (nfsd4op_rsize)nfsd4_setattr_rsize,
1623 .op_get_currentstateid = (stateid_getter)nfsd4_get_setattrstateid,
1579 }, 1624 },
1580 [OP_SETCLIENTID] = { 1625 [OP_SETCLIENTID] = {
1581 .op_func = (nfsd4op_func)nfsd4_setclientid, 1626 .op_func = (nfsd4op_func)nfsd4_setclientid,
@@ -1600,6 +1645,7 @@ static struct nfsd4_operation nfsd4_ops[] = {
1600 .op_flags = OP_MODIFIES_SOMETHING | OP_CACHEME, 1645 .op_flags = OP_MODIFIES_SOMETHING | OP_CACHEME,
1601 .op_name = "OP_WRITE", 1646 .op_name = "OP_WRITE",
1602 .op_rsize_bop = (nfsd4op_rsize)nfsd4_write_rsize, 1647 .op_rsize_bop = (nfsd4op_rsize)nfsd4_write_rsize,
1648 .op_get_currentstateid = (stateid_getter)nfsd4_get_writestateid,
1603 }, 1649 },
1604 [OP_RELEASE_LOCKOWNER] = { 1650 [OP_RELEASE_LOCKOWNER] = {
1605 .op_func = (nfsd4op_func)nfsd4_release_lockowner, 1651 .op_func = (nfsd4op_func)nfsd4_release_lockowner,
@@ -1674,12 +1720,14 @@ static struct nfsd4_operation nfsd4_ops[] = {
1674 }, 1720 },
1675}; 1721};
1676 1722
1723#ifdef NFSD_DEBUG
1677static const char *nfsd4_op_name(unsigned opnum) 1724static const char *nfsd4_op_name(unsigned opnum)
1678{ 1725{
1679 if (opnum < ARRAY_SIZE(nfsd4_ops)) 1726 if (opnum < ARRAY_SIZE(nfsd4_ops))
1680 return nfsd4_ops[opnum].op_name; 1727 return nfsd4_ops[opnum].op_name;
1681 return "unknown_operation"; 1728 return "unknown_operation";
1682} 1729}
1730#endif
1683 1731
1684#define nfsd4_voidres nfsd4_voidargs 1732#define nfsd4_voidres nfsd4_voidargs
1685struct nfsd4_voidargs { int dummy; }; 1733struct nfsd4_voidargs { int dummy; };
diff --git a/fs/nfsd/nfs4recover.c b/fs/nfsd/nfs4recover.c
index 0b3e875d1abd..4767429264a2 100644
--- a/fs/nfsd/nfs4recover.c
+++ b/fs/nfsd/nfs4recover.c
@@ -1,5 +1,6 @@
1/* 1/*
2* Copyright (c) 2004 The Regents of the University of Michigan. 2* Copyright (c) 2004 The Regents of the University of Michigan.
3* Copyright (c) 2012 Jeff Layton <jlayton@redhat.com>
3* All rights reserved. 4* All rights reserved.
4* 5*
5* Andy Adamson <andros@citi.umich.edu> 6* Andy Adamson <andros@citi.umich.edu>
@@ -36,16 +37,34 @@
36#include <linux/namei.h> 37#include <linux/namei.h>
37#include <linux/crypto.h> 38#include <linux/crypto.h>
38#include <linux/sched.h> 39#include <linux/sched.h>
40#include <linux/fs.h>
41#include <linux/module.h>
42#include <net/net_namespace.h>
43#include <linux/sunrpc/rpc_pipe_fs.h>
44#include <linux/sunrpc/clnt.h>
45#include <linux/nfsd/cld.h>
39 46
40#include "nfsd.h" 47#include "nfsd.h"
41#include "state.h" 48#include "state.h"
42#include "vfs.h" 49#include "vfs.h"
50#include "netns.h"
43 51
44#define NFSDDBG_FACILITY NFSDDBG_PROC 52#define NFSDDBG_FACILITY NFSDDBG_PROC
45 53
54/* Declarations */
55struct nfsd4_client_tracking_ops {
56 int (*init)(struct net *);
57 void (*exit)(struct net *);
58 void (*create)(struct nfs4_client *);
59 void (*remove)(struct nfs4_client *);
60 int (*check)(struct nfs4_client *);
61 void (*grace_done)(struct net *, time_t);
62};
63
46/* Globals */ 64/* Globals */
47static struct file *rec_file; 65static struct file *rec_file;
48static char user_recovery_dirname[PATH_MAX] = "/var/lib/nfs/v4recovery"; 66static char user_recovery_dirname[PATH_MAX] = "/var/lib/nfs/v4recovery";
67static struct nfsd4_client_tracking_ops *client_tracking_ops;
49 68
50static int 69static int
51nfs4_save_creds(const struct cred **original_creds) 70nfs4_save_creds(const struct cred **original_creds)
@@ -117,7 +136,8 @@ out_no_tfm:
117 return status; 136 return status;
118} 137}
119 138
120void nfsd4_create_clid_dir(struct nfs4_client *clp) 139static void
140nfsd4_create_clid_dir(struct nfs4_client *clp)
121{ 141{
122 const struct cred *original_cred; 142 const struct cred *original_cred;
123 char *dname = clp->cl_recdir; 143 char *dname = clp->cl_recdir;
@@ -126,9 +146,8 @@ void nfsd4_create_clid_dir(struct nfs4_client *clp)
126 146
127 dprintk("NFSD: nfsd4_create_clid_dir for \"%s\"\n", dname); 147 dprintk("NFSD: nfsd4_create_clid_dir for \"%s\"\n", dname);
128 148
129 if (clp->cl_firststate) 149 if (test_and_set_bit(NFSD4_CLIENT_STABLE, &clp->cl_flags))
130 return; 150 return;
131 clp->cl_firststate = 1;
132 if (!rec_file) 151 if (!rec_file)
133 return; 152 return;
134 status = nfs4_save_creds(&original_cred); 153 status = nfs4_save_creds(&original_cred);
@@ -265,19 +284,19 @@ out_unlock:
265 return status; 284 return status;
266} 285}
267 286
268void 287static void
269nfsd4_remove_clid_dir(struct nfs4_client *clp) 288nfsd4_remove_clid_dir(struct nfs4_client *clp)
270{ 289{
271 const struct cred *original_cred; 290 const struct cred *original_cred;
272 int status; 291 int status;
273 292
274 if (!rec_file || !clp->cl_firststate) 293 if (!rec_file || !test_bit(NFSD4_CLIENT_STABLE, &clp->cl_flags))
275 return; 294 return;
276 295
277 status = mnt_want_write_file(rec_file); 296 status = mnt_want_write_file(rec_file);
278 if (status) 297 if (status)
279 goto out; 298 goto out;
280 clp->cl_firststate = 0; 299 clear_bit(NFSD4_CLIENT_STABLE, &clp->cl_flags);
281 300
282 status = nfs4_save_creds(&original_cred); 301 status = nfs4_save_creds(&original_cred);
283 if (status < 0) 302 if (status < 0)
@@ -292,7 +311,6 @@ out:
292 if (status) 311 if (status)
293 printk("NFSD: Failed to remove expired client state directory" 312 printk("NFSD: Failed to remove expired client state directory"
294 " %.*s\n", HEXDIR_LEN, clp->cl_recdir); 313 " %.*s\n", HEXDIR_LEN, clp->cl_recdir);
295 return;
296} 314}
297 315
298static int 316static int
@@ -311,8 +329,9 @@ purge_old(struct dentry *parent, struct dentry *child)
311 return 0; 329 return 0;
312} 330}
313 331
314void 332static void
315nfsd4_recdir_purge_old(void) { 333nfsd4_recdir_purge_old(struct net *net, time_t boot_time)
334{
316 int status; 335 int status;
317 336
318 if (!rec_file) 337 if (!rec_file)
@@ -343,7 +362,7 @@ load_recdir(struct dentry *parent, struct dentry *child)
343 return 0; 362 return 0;
344} 363}
345 364
346int 365static int
347nfsd4_recdir_load(void) { 366nfsd4_recdir_load(void) {
348 int status; 367 int status;
349 368
@@ -361,8 +380,8 @@ nfsd4_recdir_load(void) {
361 * Hold reference to the recovery directory. 380 * Hold reference to the recovery directory.
362 */ 381 */
363 382
364void 383static int
365nfsd4_init_recdir() 384nfsd4_init_recdir(void)
366{ 385{
367 const struct cred *original_cred; 386 const struct cred *original_cred;
368 int status; 387 int status;
@@ -377,20 +396,44 @@ nfsd4_init_recdir()
377 printk("NFSD: Unable to change credentials to find recovery" 396 printk("NFSD: Unable to change credentials to find recovery"
378 " directory: error %d\n", 397 " directory: error %d\n",
379 status); 398 status);
380 return; 399 return status;
381 } 400 }
382 401
383 rec_file = filp_open(user_recovery_dirname, O_RDONLY | O_DIRECTORY, 0); 402 rec_file = filp_open(user_recovery_dirname, O_RDONLY | O_DIRECTORY, 0);
384 if (IS_ERR(rec_file)) { 403 if (IS_ERR(rec_file)) {
385 printk("NFSD: unable to find recovery directory %s\n", 404 printk("NFSD: unable to find recovery directory %s\n",
386 user_recovery_dirname); 405 user_recovery_dirname);
406 status = PTR_ERR(rec_file);
387 rec_file = NULL; 407 rec_file = NULL;
388 } 408 }
389 409
390 nfs4_reset_creds(original_cred); 410 nfs4_reset_creds(original_cred);
411 return status;
391} 412}
392 413
393void 414static int
415nfsd4_load_reboot_recovery_data(struct net *net)
416{
417 int status;
418
419 /* XXX: The legacy code won't work in a container */
420 if (net != &init_net) {
421 WARN(1, KERN_ERR "NFSD: attempt to initialize legacy client "
422 "tracking in a container!\n");
423 return -EINVAL;
424 }
425
426 nfs4_lock_state();
427 status = nfsd4_init_recdir();
428 if (!status)
429 status = nfsd4_recdir_load();
430 nfs4_unlock_state();
431 if (status)
432 printk(KERN_ERR "NFSD: Failure reading reboot recovery data\n");
433 return status;
434}
435
436static void
394nfsd4_shutdown_recdir(void) 437nfsd4_shutdown_recdir(void)
395{ 438{
396 if (!rec_file) 439 if (!rec_file)
@@ -399,6 +442,13 @@ nfsd4_shutdown_recdir(void)
399 rec_file = NULL; 442 rec_file = NULL;
400} 443}
401 444
445static void
446nfsd4_legacy_tracking_exit(struct net *net)
447{
448 nfs4_release_reclaim();
449 nfsd4_shutdown_recdir();
450}
451
402/* 452/*
403 * Change the NFSv4 recovery directory to recdir. 453 * Change the NFSv4 recovery directory to recdir.
404 */ 454 */
@@ -425,3 +475,572 @@ nfs4_recoverydir(void)
425{ 475{
426 return user_recovery_dirname; 476 return user_recovery_dirname;
427} 477}
478
479static int
480nfsd4_check_legacy_client(struct nfs4_client *clp)
481{
482 /* did we already find that this client is stable? */
483 if (test_bit(NFSD4_CLIENT_STABLE, &clp->cl_flags))
484 return 0;
485
486 /* look for it in the reclaim hashtable otherwise */
487 if (nfsd4_find_reclaim_client(clp)) {
488 set_bit(NFSD4_CLIENT_STABLE, &clp->cl_flags);
489 return 0;
490 }
491
492 return -ENOENT;
493}
494
495static struct nfsd4_client_tracking_ops nfsd4_legacy_tracking_ops = {
496 .init = nfsd4_load_reboot_recovery_data,
497 .exit = nfsd4_legacy_tracking_exit,
498 .create = nfsd4_create_clid_dir,
499 .remove = nfsd4_remove_clid_dir,
500 .check = nfsd4_check_legacy_client,
501 .grace_done = nfsd4_recdir_purge_old,
502};
503
504/* Globals */
505#define NFSD_PIPE_DIR "nfsd"
506#define NFSD_CLD_PIPE "cld"
507
508/* per-net-ns structure for holding cld upcall info */
509struct cld_net {
510 struct rpc_pipe *cn_pipe;
511 spinlock_t cn_lock;
512 struct list_head cn_list;
513 unsigned int cn_xid;
514};
515
516struct cld_upcall {
517 struct list_head cu_list;
518 struct cld_net *cu_net;
519 struct task_struct *cu_task;
520 struct cld_msg cu_msg;
521};
522
523static int
524__cld_pipe_upcall(struct rpc_pipe *pipe, struct cld_msg *cmsg)
525{
526 int ret;
527 struct rpc_pipe_msg msg;
528
529 memset(&msg, 0, sizeof(msg));
530 msg.data = cmsg;
531 msg.len = sizeof(*cmsg);
532
533 /*
534 * Set task state before we queue the upcall. That prevents
535 * wake_up_process in the downcall from racing with schedule.
536 */
537 set_current_state(TASK_UNINTERRUPTIBLE);
538 ret = rpc_queue_upcall(pipe, &msg);
539 if (ret < 0) {
540 set_current_state(TASK_RUNNING);
541 goto out;
542 }
543
544 schedule();
545 set_current_state(TASK_RUNNING);
546
547 if (msg.errno < 0)
548 ret = msg.errno;
549out:
550 return ret;
551}
552
553static int
554cld_pipe_upcall(struct rpc_pipe *pipe, struct cld_msg *cmsg)
555{
556 int ret;
557
558 /*
559 * -EAGAIN occurs when pipe is closed and reopened while there are
560 * upcalls queued.
561 */
562 do {
563 ret = __cld_pipe_upcall(pipe, cmsg);
564 } while (ret == -EAGAIN);
565
566 return ret;
567}
568
569static ssize_t
570cld_pipe_downcall(struct file *filp, const char __user *src, size_t mlen)
571{
572 struct cld_upcall *tmp, *cup;
573 struct cld_msg *cmsg = (struct cld_msg *)src;
574 uint32_t xid;
575 struct nfsd_net *nn = net_generic(filp->f_dentry->d_sb->s_fs_info,
576 nfsd_net_id);
577 struct cld_net *cn = nn->cld_net;
578
579 if (mlen != sizeof(*cmsg)) {
580 dprintk("%s: got %lu bytes, expected %lu\n", __func__, mlen,
581 sizeof(*cmsg));
582 return -EINVAL;
583 }
584
585 /* copy just the xid so we can try to find that */
586 if (copy_from_user(&xid, &cmsg->cm_xid, sizeof(xid)) != 0) {
587 dprintk("%s: error when copying xid from userspace", __func__);
588 return -EFAULT;
589 }
590
591 /* walk the list and find corresponding xid */
592 cup = NULL;
593 spin_lock(&cn->cn_lock);
594 list_for_each_entry(tmp, &cn->cn_list, cu_list) {
595 if (get_unaligned(&tmp->cu_msg.cm_xid) == xid) {
596 cup = tmp;
597 list_del_init(&cup->cu_list);
598 break;
599 }
600 }
601 spin_unlock(&cn->cn_lock);
602
603 /* couldn't find upcall? */
604 if (!cup) {
605 dprintk("%s: couldn't find upcall -- xid=%u\n", __func__, xid);
606 return -EINVAL;
607 }
608
609 if (copy_from_user(&cup->cu_msg, src, mlen) != 0)
610 return -EFAULT;
611
612 wake_up_process(cup->cu_task);
613 return mlen;
614}
615
616static void
617cld_pipe_destroy_msg(struct rpc_pipe_msg *msg)
618{
619 struct cld_msg *cmsg = msg->data;
620 struct cld_upcall *cup = container_of(cmsg, struct cld_upcall,
621 cu_msg);
622
623 /* errno >= 0 means we got a downcall */
624 if (msg->errno >= 0)
625 return;
626
627 wake_up_process(cup->cu_task);
628}
629
630static const struct rpc_pipe_ops cld_upcall_ops = {
631 .upcall = rpc_pipe_generic_upcall,
632 .downcall = cld_pipe_downcall,
633 .destroy_msg = cld_pipe_destroy_msg,
634};
635
636static struct dentry *
637nfsd4_cld_register_sb(struct super_block *sb, struct rpc_pipe *pipe)
638{
639 struct dentry *dir, *dentry;
640
641 dir = rpc_d_lookup_sb(sb, NFSD_PIPE_DIR);
642 if (dir == NULL)
643 return ERR_PTR(-ENOENT);
644 dentry = rpc_mkpipe_dentry(dir, NFSD_CLD_PIPE, NULL, pipe);
645 dput(dir);
646 return dentry;
647}
648
649static void
650nfsd4_cld_unregister_sb(struct rpc_pipe *pipe)
651{
652 if (pipe->dentry)
653 rpc_unlink(pipe->dentry);
654}
655
656static struct dentry *
657nfsd4_cld_register_net(struct net *net, struct rpc_pipe *pipe)
658{
659 struct super_block *sb;
660 struct dentry *dentry;
661
662 sb = rpc_get_sb_net(net);
663 if (!sb)
664 return NULL;
665 dentry = nfsd4_cld_register_sb(sb, pipe);
666 rpc_put_sb_net(net);
667 return dentry;
668}
669
670static void
671nfsd4_cld_unregister_net(struct net *net, struct rpc_pipe *pipe)
672{
673 struct super_block *sb;
674
675 sb = rpc_get_sb_net(net);
676 if (sb) {
677 nfsd4_cld_unregister_sb(pipe);
678 rpc_put_sb_net(net);
679 }
680}
681
682/* Initialize rpc_pipefs pipe for communication with client tracking daemon */
683static int
684nfsd4_init_cld_pipe(struct net *net)
685{
686 int ret;
687 struct dentry *dentry;
688 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
689 struct cld_net *cn;
690
691 if (nn->cld_net)
692 return 0;
693
694 cn = kzalloc(sizeof(*cn), GFP_KERNEL);
695 if (!cn) {
696 ret = -ENOMEM;
697 goto err;
698 }
699
700 cn->cn_pipe = rpc_mkpipe_data(&cld_upcall_ops, RPC_PIPE_WAIT_FOR_OPEN);
701 if (IS_ERR(cn->cn_pipe)) {
702 ret = PTR_ERR(cn->cn_pipe);
703 goto err;
704 }
705 spin_lock_init(&cn->cn_lock);
706 INIT_LIST_HEAD(&cn->cn_list);
707
708 dentry = nfsd4_cld_register_net(net, cn->cn_pipe);
709 if (IS_ERR(dentry)) {
710 ret = PTR_ERR(dentry);
711 goto err_destroy_data;
712 }
713
714 cn->cn_pipe->dentry = dentry;
715 nn->cld_net = cn;
716 return 0;
717
718err_destroy_data:
719 rpc_destroy_pipe_data(cn->cn_pipe);
720err:
721 kfree(cn);
722 printk(KERN_ERR "NFSD: unable to create nfsdcld upcall pipe (%d)\n",
723 ret);
724 return ret;
725}
726
727static void
728nfsd4_remove_cld_pipe(struct net *net)
729{
730 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
731 struct cld_net *cn = nn->cld_net;
732
733 nfsd4_cld_unregister_net(net, cn->cn_pipe);
734 rpc_destroy_pipe_data(cn->cn_pipe);
735 kfree(nn->cld_net);
736 nn->cld_net = NULL;
737}
738
739static struct cld_upcall *
740alloc_cld_upcall(struct cld_net *cn)
741{
742 struct cld_upcall *new, *tmp;
743
744 new = kzalloc(sizeof(*new), GFP_KERNEL);
745 if (!new)
746 return new;
747
748 /* FIXME: hard cap on number in flight? */
749restart_search:
750 spin_lock(&cn->cn_lock);
751 list_for_each_entry(tmp, &cn->cn_list, cu_list) {
752 if (tmp->cu_msg.cm_xid == cn->cn_xid) {
753 cn->cn_xid++;
754 spin_unlock(&cn->cn_lock);
755 goto restart_search;
756 }
757 }
758 new->cu_task = current;
759 new->cu_msg.cm_vers = CLD_UPCALL_VERSION;
760 put_unaligned(cn->cn_xid++, &new->cu_msg.cm_xid);
761 new->cu_net = cn;
762 list_add(&new->cu_list, &cn->cn_list);
763 spin_unlock(&cn->cn_lock);
764
765 dprintk("%s: allocated xid %u\n", __func__, new->cu_msg.cm_xid);
766
767 return new;
768}
769
770static void
771free_cld_upcall(struct cld_upcall *victim)
772{
773 struct cld_net *cn = victim->cu_net;
774
775 spin_lock(&cn->cn_lock);
776 list_del(&victim->cu_list);
777 spin_unlock(&cn->cn_lock);
778 kfree(victim);
779}
780
781/* Ask daemon to create a new record */
782static void
783nfsd4_cld_create(struct nfs4_client *clp)
784{
785 int ret;
786 struct cld_upcall *cup;
787 /* FIXME: determine net from clp */
788 struct nfsd_net *nn = net_generic(&init_net, nfsd_net_id);
789 struct cld_net *cn = nn->cld_net;
790
791 /* Don't upcall if it's already stored */
792 if (test_bit(NFSD4_CLIENT_STABLE, &clp->cl_flags))
793 return;
794
795 cup = alloc_cld_upcall(cn);
796 if (!cup) {
797 ret = -ENOMEM;
798 goto out_err;
799 }
800
801 cup->cu_msg.cm_cmd = Cld_Create;
802 cup->cu_msg.cm_u.cm_name.cn_len = clp->cl_name.len;
803 memcpy(cup->cu_msg.cm_u.cm_name.cn_id, clp->cl_name.data,
804 clp->cl_name.len);
805
806 ret = cld_pipe_upcall(cn->cn_pipe, &cup->cu_msg);
807 if (!ret) {
808 ret = cup->cu_msg.cm_status;
809 set_bit(NFSD4_CLIENT_STABLE, &clp->cl_flags);
810 }
811
812 free_cld_upcall(cup);
813out_err:
814 if (ret)
815 printk(KERN_ERR "NFSD: Unable to create client "
816 "record on stable storage: %d\n", ret);
817}
818
819/* Ask daemon to create a new record */
820static void
821nfsd4_cld_remove(struct nfs4_client *clp)
822{
823 int ret;
824 struct cld_upcall *cup;
825 /* FIXME: determine net from clp */
826 struct nfsd_net *nn = net_generic(&init_net, nfsd_net_id);
827 struct cld_net *cn = nn->cld_net;
828
829 /* Don't upcall if it's already removed */
830 if (!test_bit(NFSD4_CLIENT_STABLE, &clp->cl_flags))
831 return;
832
833 cup = alloc_cld_upcall(cn);
834 if (!cup) {
835 ret = -ENOMEM;
836 goto out_err;
837 }
838
839 cup->cu_msg.cm_cmd = Cld_Remove;
840 cup->cu_msg.cm_u.cm_name.cn_len = clp->cl_name.len;
841 memcpy(cup->cu_msg.cm_u.cm_name.cn_id, clp->cl_name.data,
842 clp->cl_name.len);
843
844 ret = cld_pipe_upcall(cn->cn_pipe, &cup->cu_msg);
845 if (!ret) {
846 ret = cup->cu_msg.cm_status;
847 clear_bit(NFSD4_CLIENT_STABLE, &clp->cl_flags);
848 }
849
850 free_cld_upcall(cup);
851out_err:
852 if (ret)
853 printk(KERN_ERR "NFSD: Unable to remove client "
854 "record from stable storage: %d\n", ret);
855}
856
857/* Check for presence of a record, and update its timestamp */
858static int
859nfsd4_cld_check(struct nfs4_client *clp)
860{
861 int ret;
862 struct cld_upcall *cup;
863 /* FIXME: determine net from clp */
864 struct nfsd_net *nn = net_generic(&init_net, nfsd_net_id);
865 struct cld_net *cn = nn->cld_net;
866
867 /* Don't upcall if one was already stored during this grace pd */
868 if (test_bit(NFSD4_CLIENT_STABLE, &clp->cl_flags))
869 return 0;
870
871 cup = alloc_cld_upcall(cn);
872 if (!cup) {
873 printk(KERN_ERR "NFSD: Unable to check client record on "
874 "stable storage: %d\n", -ENOMEM);
875 return -ENOMEM;
876 }
877
878 cup->cu_msg.cm_cmd = Cld_Check;
879 cup->cu_msg.cm_u.cm_name.cn_len = clp->cl_name.len;
880 memcpy(cup->cu_msg.cm_u.cm_name.cn_id, clp->cl_name.data,
881 clp->cl_name.len);
882
883 ret = cld_pipe_upcall(cn->cn_pipe, &cup->cu_msg);
884 if (!ret) {
885 ret = cup->cu_msg.cm_status;
886 set_bit(NFSD4_CLIENT_STABLE, &clp->cl_flags);
887 }
888
889 free_cld_upcall(cup);
890 return ret;
891}
892
893static void
894nfsd4_cld_grace_done(struct net *net, time_t boot_time)
895{
896 int ret;
897 struct cld_upcall *cup;
898 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
899 struct cld_net *cn = nn->cld_net;
900
901 cup = alloc_cld_upcall(cn);
902 if (!cup) {
903 ret = -ENOMEM;
904 goto out_err;
905 }
906
907 cup->cu_msg.cm_cmd = Cld_GraceDone;
908 cup->cu_msg.cm_u.cm_gracetime = (int64_t)boot_time;
909 ret = cld_pipe_upcall(cn->cn_pipe, &cup->cu_msg);
910 if (!ret)
911 ret = cup->cu_msg.cm_status;
912
913 free_cld_upcall(cup);
914out_err:
915 if (ret)
916 printk(KERN_ERR "NFSD: Unable to end grace period: %d\n", ret);
917}
918
919static struct nfsd4_client_tracking_ops nfsd4_cld_tracking_ops = {
920 .init = nfsd4_init_cld_pipe,
921 .exit = nfsd4_remove_cld_pipe,
922 .create = nfsd4_cld_create,
923 .remove = nfsd4_cld_remove,
924 .check = nfsd4_cld_check,
925 .grace_done = nfsd4_cld_grace_done,
926};
927
928int
929nfsd4_client_tracking_init(struct net *net)
930{
931 int status;
932 struct path path;
933
934 if (!client_tracking_ops) {
935 client_tracking_ops = &nfsd4_cld_tracking_ops;
936 status = kern_path(nfs4_recoverydir(), LOOKUP_FOLLOW, &path);
937 if (!status) {
938 if (S_ISDIR(path.dentry->d_inode->i_mode))
939 client_tracking_ops =
940 &nfsd4_legacy_tracking_ops;
941 path_put(&path);
942 }
943 }
944
945 status = client_tracking_ops->init(net);
946 if (status) {
947 printk(KERN_WARNING "NFSD: Unable to initialize client "
948 "recovery tracking! (%d)\n", status);
949 client_tracking_ops = NULL;
950 }
951 return status;
952}
953
954void
955nfsd4_client_tracking_exit(struct net *net)
956{
957 if (client_tracking_ops) {
958 client_tracking_ops->exit(net);
959 client_tracking_ops = NULL;
960 }
961}
962
963void
964nfsd4_client_record_create(struct nfs4_client *clp)
965{
966 if (client_tracking_ops)
967 client_tracking_ops->create(clp);
968}
969
970void
971nfsd4_client_record_remove(struct nfs4_client *clp)
972{
973 if (client_tracking_ops)
974 client_tracking_ops->remove(clp);
975}
976
977int
978nfsd4_client_record_check(struct nfs4_client *clp)
979{
980 if (client_tracking_ops)
981 return client_tracking_ops->check(clp);
982
983 return -EOPNOTSUPP;
984}
985
986void
987nfsd4_record_grace_done(struct net *net, time_t boot_time)
988{
989 if (client_tracking_ops)
990 client_tracking_ops->grace_done(net, boot_time);
991}
992
993static int
994rpc_pipefs_event(struct notifier_block *nb, unsigned long event, void *ptr)
995{
996 struct super_block *sb = ptr;
997 struct net *net = sb->s_fs_info;
998 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
999 struct cld_net *cn = nn->cld_net;
1000 struct dentry *dentry;
1001 int ret = 0;
1002
1003 if (!try_module_get(THIS_MODULE))
1004 return 0;
1005
1006 if (!cn) {
1007 module_put(THIS_MODULE);
1008 return 0;
1009 }
1010
1011 switch (event) {
1012 case RPC_PIPEFS_MOUNT:
1013 dentry = nfsd4_cld_register_sb(sb, cn->cn_pipe);
1014 if (IS_ERR(dentry)) {
1015 ret = PTR_ERR(dentry);
1016 break;
1017 }
1018 cn->cn_pipe->dentry = dentry;
1019 break;
1020 case RPC_PIPEFS_UMOUNT:
1021 if (cn->cn_pipe->dentry)
1022 nfsd4_cld_unregister_sb(cn->cn_pipe);
1023 break;
1024 default:
1025 ret = -ENOTSUPP;
1026 break;
1027 }
1028 module_put(THIS_MODULE);
1029 return ret;
1030}
1031
1032struct notifier_block nfsd4_cld_block = {
1033 .notifier_call = rpc_pipefs_event,
1034};
1035
1036int
1037register_cld_notifier(void)
1038{
1039 return rpc_pipefs_notifier_register(&nfsd4_cld_block);
1040}
1041
1042void
1043unregister_cld_notifier(void)
1044{
1045 rpc_pipefs_notifier_unregister(&nfsd4_cld_block);
1046}
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index c5cddd659429..1841f8bf845e 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -58,11 +58,15 @@ static const stateid_t one_stateid = {
58static const stateid_t zero_stateid = { 58static const stateid_t zero_stateid = {
59 /* all fields zero */ 59 /* all fields zero */
60}; 60};
61static const stateid_t currentstateid = {
62 .si_generation = 1,
63};
61 64
62static u64 current_sessionid = 1; 65static u64 current_sessionid = 1;
63 66
64#define ZERO_STATEID(stateid) (!memcmp((stateid), &zero_stateid, sizeof(stateid_t))) 67#define ZERO_STATEID(stateid) (!memcmp((stateid), &zero_stateid, sizeof(stateid_t)))
65#define ONE_STATEID(stateid) (!memcmp((stateid), &one_stateid, sizeof(stateid_t))) 68#define ONE_STATEID(stateid) (!memcmp((stateid), &one_stateid, sizeof(stateid_t)))
69#define CURRENT_STATEID(stateid) (!memcmp((stateid), &currentstateid, sizeof(stateid_t)))
66 70
67/* forward declarations */ 71/* forward declarations */
68static int check_for_locks(struct nfs4_file *filp, struct nfs4_lockowner *lowner); 72static int check_for_locks(struct nfs4_file *filp, struct nfs4_lockowner *lowner);
@@ -91,6 +95,19 @@ nfs4_lock_state(void)
91 mutex_lock(&client_mutex); 95 mutex_lock(&client_mutex);
92} 96}
93 97
98static void free_session(struct kref *);
99
100/* Must be called under the client_lock */
101static void nfsd4_put_session_locked(struct nfsd4_session *ses)
102{
103 kref_put(&ses->se_ref, free_session);
104}
105
106static void nfsd4_get_session(struct nfsd4_session *ses)
107{
108 kref_get(&ses->se_ref);
109}
110
94void 111void
95nfs4_unlock_state(void) 112nfs4_unlock_state(void)
96{ 113{
@@ -605,12 +622,20 @@ hash_sessionid(struct nfs4_sessionid *sessionid)
605 return sid->sequence % SESSION_HASH_SIZE; 622 return sid->sequence % SESSION_HASH_SIZE;
606} 623}
607 624
625#ifdef NFSD_DEBUG
608static inline void 626static inline void
609dump_sessionid(const char *fn, struct nfs4_sessionid *sessionid) 627dump_sessionid(const char *fn, struct nfs4_sessionid *sessionid)
610{ 628{
611 u32 *ptr = (u32 *)(&sessionid->data[0]); 629 u32 *ptr = (u32 *)(&sessionid->data[0]);
612 dprintk("%s: %u:%u:%u:%u\n", fn, ptr[0], ptr[1], ptr[2], ptr[3]); 630 dprintk("%s: %u:%u:%u:%u\n", fn, ptr[0], ptr[1], ptr[2], ptr[3]);
613} 631}
632#else
633static inline void
634dump_sessionid(const char *fn, struct nfs4_sessionid *sessionid)
635{
636}
637#endif
638
614 639
615static void 640static void
616gen_sessionid(struct nfsd4_session *ses) 641gen_sessionid(struct nfsd4_session *ses)
@@ -832,11 +857,12 @@ static void nfsd4_del_conns(struct nfsd4_session *s)
832 spin_unlock(&clp->cl_lock); 857 spin_unlock(&clp->cl_lock);
833} 858}
834 859
835void free_session(struct kref *kref) 860static void free_session(struct kref *kref)
836{ 861{
837 struct nfsd4_session *ses; 862 struct nfsd4_session *ses;
838 int mem; 863 int mem;
839 864
865 BUG_ON(!spin_is_locked(&client_lock));
840 ses = container_of(kref, struct nfsd4_session, se_ref); 866 ses = container_of(kref, struct nfsd4_session, se_ref);
841 nfsd4_del_conns(ses); 867 nfsd4_del_conns(ses);
842 spin_lock(&nfsd_drc_lock); 868 spin_lock(&nfsd_drc_lock);
@@ -847,6 +873,13 @@ void free_session(struct kref *kref)
847 kfree(ses); 873 kfree(ses);
848} 874}
849 875
876void nfsd4_put_session(struct nfsd4_session *ses)
877{
878 spin_lock(&client_lock);
879 nfsd4_put_session_locked(ses);
880 spin_unlock(&client_lock);
881}
882
850static struct nfsd4_session *alloc_init_session(struct svc_rqst *rqstp, struct nfs4_client *clp, struct nfsd4_create_session *cses) 883static struct nfsd4_session *alloc_init_session(struct svc_rqst *rqstp, struct nfs4_client *clp, struct nfsd4_create_session *cses)
851{ 884{
852 struct nfsd4_session *new; 885 struct nfsd4_session *new;
@@ -894,7 +927,9 @@ static struct nfsd4_session *alloc_init_session(struct svc_rqst *rqstp, struct n
894 status = nfsd4_new_conn_from_crses(rqstp, new); 927 status = nfsd4_new_conn_from_crses(rqstp, new);
895 /* whoops: benny points out, status is ignored! (err, or bogus) */ 928 /* whoops: benny points out, status is ignored! (err, or bogus) */
896 if (status) { 929 if (status) {
930 spin_lock(&client_lock);
897 free_session(&new->se_ref); 931 free_session(&new->se_ref);
932 spin_unlock(&client_lock);
898 return NULL; 933 return NULL;
899 } 934 }
900 if (cses->flags & SESSION4_BACK_CHAN) { 935 if (cses->flags & SESSION4_BACK_CHAN) {
@@ -1006,12 +1041,13 @@ static struct nfs4_client *alloc_client(struct xdr_netobj name)
1006static inline void 1041static inline void
1007free_client(struct nfs4_client *clp) 1042free_client(struct nfs4_client *clp)
1008{ 1043{
1044 BUG_ON(!spin_is_locked(&client_lock));
1009 while (!list_empty(&clp->cl_sessions)) { 1045 while (!list_empty(&clp->cl_sessions)) {
1010 struct nfsd4_session *ses; 1046 struct nfsd4_session *ses;
1011 ses = list_entry(clp->cl_sessions.next, struct nfsd4_session, 1047 ses = list_entry(clp->cl_sessions.next, struct nfsd4_session,
1012 se_perclnt); 1048 se_perclnt);
1013 list_del(&ses->se_perclnt); 1049 list_del(&ses->se_perclnt);
1014 nfsd4_put_session(ses); 1050 nfsd4_put_session_locked(ses);
1015 } 1051 }
1016 if (clp->cl_cred.cr_group_info) 1052 if (clp->cl_cred.cr_group_info)
1017 put_group_info(clp->cl_cred.cr_group_info); 1053 put_group_info(clp->cl_cred.cr_group_info);
@@ -1138,12 +1174,12 @@ static void gen_clid(struct nfs4_client *clp)
1138 1174
1139static void gen_confirm(struct nfs4_client *clp) 1175static void gen_confirm(struct nfs4_client *clp)
1140{ 1176{
1177 __be32 verf[2];
1141 static u32 i; 1178 static u32 i;
1142 u32 *p;
1143 1179
1144 p = (u32 *)clp->cl_confirm.data; 1180 verf[0] = (__be32)get_seconds();
1145 *p++ = get_seconds(); 1181 verf[1] = (__be32)i++;
1146 *p++ = i++; 1182 memcpy(clp->cl_confirm.data, verf, sizeof(clp->cl_confirm.data));
1147} 1183}
1148 1184
1149static struct nfs4_stid *find_stateid(struct nfs4_client *cl, stateid_t *t) 1185static struct nfs4_stid *find_stateid(struct nfs4_client *cl, stateid_t *t)
@@ -1180,7 +1216,9 @@ static struct nfs4_client *create_client(struct xdr_netobj name, char *recdir,
1180 if (princ) { 1216 if (princ) {
1181 clp->cl_principal = kstrdup(princ, GFP_KERNEL); 1217 clp->cl_principal = kstrdup(princ, GFP_KERNEL);
1182 if (clp->cl_principal == NULL) { 1218 if (clp->cl_principal == NULL) {
1219 spin_lock(&client_lock);
1183 free_client(clp); 1220 free_client(clp);
1221 spin_unlock(&client_lock);
1184 return NULL; 1222 return NULL;
1185 } 1223 }
1186 } 1224 }
@@ -1347,6 +1385,7 @@ nfsd4_store_cache_entry(struct nfsd4_compoundres *resp)
1347 slot->sl_opcnt = resp->opcnt; 1385 slot->sl_opcnt = resp->opcnt;
1348 slot->sl_status = resp->cstate.status; 1386 slot->sl_status = resp->cstate.status;
1349 1387
1388 slot->sl_flags |= NFSD4_SLOT_INITIALIZED;
1350 if (nfsd4_not_cached(resp)) { 1389 if (nfsd4_not_cached(resp)) {
1351 slot->sl_datalen = 0; 1390 slot->sl_datalen = 0;
1352 return; 1391 return;
@@ -1374,15 +1413,12 @@ nfsd4_enc_sequence_replay(struct nfsd4_compoundargs *args,
1374 struct nfsd4_op *op; 1413 struct nfsd4_op *op;
1375 struct nfsd4_slot *slot = resp->cstate.slot; 1414 struct nfsd4_slot *slot = resp->cstate.slot;
1376 1415
1377 dprintk("--> %s resp->opcnt %d cachethis %u \n", __func__,
1378 resp->opcnt, resp->cstate.slot->sl_cachethis);
1379
1380 /* Encode the replayed sequence operation */ 1416 /* Encode the replayed sequence operation */
1381 op = &args->ops[resp->opcnt - 1]; 1417 op = &args->ops[resp->opcnt - 1];
1382 nfsd4_encode_operation(resp, op); 1418 nfsd4_encode_operation(resp, op);
1383 1419
1384 /* Return nfserr_retry_uncached_rep in next operation. */ 1420 /* Return nfserr_retry_uncached_rep in next operation. */
1385 if (args->opcnt > 1 && slot->sl_cachethis == 0) { 1421 if (args->opcnt > 1 && !(slot->sl_flags & NFSD4_SLOT_CACHETHIS)) {
1386 op = &args->ops[resp->opcnt++]; 1422 op = &args->ops[resp->opcnt++];
1387 op->status = nfserr_retry_uncached_rep; 1423 op->status = nfserr_retry_uncached_rep;
1388 nfsd4_encode_operation(resp, op); 1424 nfsd4_encode_operation(resp, op);
@@ -1575,16 +1611,11 @@ check_slot_seqid(u32 seqid, u32 slot_seqid, int slot_inuse)
1575 else 1611 else
1576 return nfserr_seq_misordered; 1612 return nfserr_seq_misordered;
1577 } 1613 }
1578 /* Normal */ 1614 /* Note unsigned 32-bit arithmetic handles wraparound: */
1579 if (likely(seqid == slot_seqid + 1)) 1615 if (likely(seqid == slot_seqid + 1))
1580 return nfs_ok; 1616 return nfs_ok;
1581 /* Replay */
1582 if (seqid == slot_seqid) 1617 if (seqid == slot_seqid)
1583 return nfserr_replay_cache; 1618 return nfserr_replay_cache;
1584 /* Wraparound */
1585 if (seqid == 1 && (slot_seqid + 1) == 0)
1586 return nfs_ok;
1587 /* Misordered replay or misordered new request */
1588 return nfserr_seq_misordered; 1619 return nfserr_seq_misordered;
1589} 1620}
1590 1621
@@ -1815,9 +1846,10 @@ nfsd4_destroy_session(struct svc_rqst *r,
1815 nfsd4_probe_callback_sync(ses->se_client); 1846 nfsd4_probe_callback_sync(ses->se_client);
1816 nfs4_unlock_state(); 1847 nfs4_unlock_state();
1817 1848
1849 spin_lock(&client_lock);
1818 nfsd4_del_conns(ses); 1850 nfsd4_del_conns(ses);
1819 1851 nfsd4_put_session_locked(ses);
1820 nfsd4_put_session(ses); 1852 spin_unlock(&client_lock);
1821 status = nfs_ok; 1853 status = nfs_ok;
1822out: 1854out:
1823 dprintk("%s returns %d\n", __func__, ntohl(status)); 1855 dprintk("%s returns %d\n", __func__, ntohl(status));
@@ -1921,8 +1953,12 @@ nfsd4_sequence(struct svc_rqst *rqstp,
1921 * sr_highest_slotid and the sr_target_slot id to maxslots */ 1953 * sr_highest_slotid and the sr_target_slot id to maxslots */
1922 seq->maxslots = session->se_fchannel.maxreqs; 1954 seq->maxslots = session->se_fchannel.maxreqs;
1923 1955
1924 status = check_slot_seqid(seq->seqid, slot->sl_seqid, slot->sl_inuse); 1956 status = check_slot_seqid(seq->seqid, slot->sl_seqid,
1957 slot->sl_flags & NFSD4_SLOT_INUSE);
1925 if (status == nfserr_replay_cache) { 1958 if (status == nfserr_replay_cache) {
1959 status = nfserr_seq_misordered;
1960 if (!(slot->sl_flags & NFSD4_SLOT_INITIALIZED))
1961 goto out;
1926 cstate->slot = slot; 1962 cstate->slot = slot;
1927 cstate->session = session; 1963 cstate->session = session;
1928 /* Return the cached reply status and set cstate->status 1964 /* Return the cached reply status and set cstate->status
@@ -1938,9 +1974,12 @@ nfsd4_sequence(struct svc_rqst *rqstp,
1938 conn = NULL; 1974 conn = NULL;
1939 1975
1940 /* Success! bump slot seqid */ 1976 /* Success! bump slot seqid */
1941 slot->sl_inuse = true;
1942 slot->sl_seqid = seq->seqid; 1977 slot->sl_seqid = seq->seqid;
1943 slot->sl_cachethis = seq->cachethis; 1978 slot->sl_flags |= NFSD4_SLOT_INUSE;
1979 if (seq->cachethis)
1980 slot->sl_flags |= NFSD4_SLOT_CACHETHIS;
1981 else
1982 slot->sl_flags &= ~NFSD4_SLOT_CACHETHIS;
1944 1983
1945 cstate->slot = slot; 1984 cstate->slot = slot;
1946 cstate->session = session; 1985 cstate->session = session;
@@ -2030,7 +2069,8 @@ nfsd4_reclaim_complete(struct svc_rqst *rqstp, struct nfsd4_compound_state *csta
2030 2069
2031 nfs4_lock_state(); 2070 nfs4_lock_state();
2032 status = nfserr_complete_already; 2071 status = nfserr_complete_already;
2033 if (cstate->session->se_client->cl_firststate) 2072 if (test_and_set_bit(NFSD4_CLIENT_RECLAIM_COMPLETE,
2073 &cstate->session->se_client->cl_flags))
2034 goto out; 2074 goto out;
2035 2075
2036 status = nfserr_stale_clientid; 2076 status = nfserr_stale_clientid;
@@ -2045,7 +2085,7 @@ nfsd4_reclaim_complete(struct svc_rqst *rqstp, struct nfsd4_compound_state *csta
2045 goto out; 2085 goto out;
2046 2086
2047 status = nfs_ok; 2087 status = nfs_ok;
2048 nfsd4_create_clid_dir(cstate->session->se_client); 2088 nfsd4_client_record_create(cstate->session->se_client);
2049out: 2089out:
2050 nfs4_unlock_state(); 2090 nfs4_unlock_state();
2051 return status; 2091 return status;
@@ -2240,7 +2280,7 @@ nfsd4_setclientid_confirm(struct svc_rqst *rqstp,
2240 conf = find_confirmed_client_by_str(unconf->cl_recdir, 2280 conf = find_confirmed_client_by_str(unconf->cl_recdir,
2241 hash); 2281 hash);
2242 if (conf) { 2282 if (conf) {
2243 nfsd4_remove_clid_dir(conf); 2283 nfsd4_client_record_remove(conf);
2244 expire_client(conf); 2284 expire_client(conf);
2245 } 2285 }
2246 move_to_confirmed(unconf); 2286 move_to_confirmed(unconf);
@@ -2633,8 +2673,6 @@ nfs4_check_delegmode(struct nfs4_delegation *dp, int flags)
2633 2673
2634static int share_access_to_flags(u32 share_access) 2674static int share_access_to_flags(u32 share_access)
2635{ 2675{
2636 share_access &= ~NFS4_SHARE_WANT_MASK;
2637
2638 return share_access == NFS4_SHARE_ACCESS_READ ? RD_STATE : WR_STATE; 2676 return share_access == NFS4_SHARE_ACCESS_READ ? RD_STATE : WR_STATE;
2639} 2677}
2640 2678
@@ -2776,10 +2814,9 @@ nfs4_upgrade_open(struct svc_rqst *rqstp, struct nfs4_file *fp, struct svc_fh *c
2776 2814
2777 2815
2778static void 2816static void
2779nfs4_set_claim_prev(struct nfsd4_open *open) 2817nfs4_set_claim_prev(struct nfsd4_open *open, bool has_session)
2780{ 2818{
2781 open->op_openowner->oo_flags |= NFS4_OO_CONFIRMED; 2819 open->op_openowner->oo_flags |= NFS4_OO_CONFIRMED;
2782 open->op_openowner->oo_owner.so_client->cl_firststate = 1;
2783} 2820}
2784 2821
2785/* Should we give out recallable state?: */ 2822/* Should we give out recallable state?: */
@@ -2855,6 +2892,27 @@ static int nfs4_set_delegation(struct nfs4_delegation *dp, int flag)
2855 return 0; 2892 return 0;
2856} 2893}
2857 2894
2895static void nfsd4_open_deleg_none_ext(struct nfsd4_open *open, int status)
2896{
2897 open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT;
2898 if (status == -EAGAIN)
2899 open->op_why_no_deleg = WND4_CONTENTION;
2900 else {
2901 open->op_why_no_deleg = WND4_RESOURCE;
2902 switch (open->op_deleg_want) {
2903 case NFS4_SHARE_WANT_READ_DELEG:
2904 case NFS4_SHARE_WANT_WRITE_DELEG:
2905 case NFS4_SHARE_WANT_ANY_DELEG:
2906 break;
2907 case NFS4_SHARE_WANT_CANCEL:
2908 open->op_why_no_deleg = WND4_CANCELLED;
2909 break;
2910 case NFS4_SHARE_WANT_NO_DELEG:
2911 BUG(); /* not supposed to get here */
2912 }
2913 }
2914}
2915
2858/* 2916/*
2859 * Attempt to hand out a delegation. 2917 * Attempt to hand out a delegation.
2860 */ 2918 */
@@ -2864,7 +2922,7 @@ nfs4_open_delegation(struct svc_fh *fh, struct nfsd4_open *open, struct nfs4_ol_
2864 struct nfs4_delegation *dp; 2922 struct nfs4_delegation *dp;
2865 struct nfs4_openowner *oo = container_of(stp->st_stateowner, struct nfs4_openowner, oo_owner); 2923 struct nfs4_openowner *oo = container_of(stp->st_stateowner, struct nfs4_openowner, oo_owner);
2866 int cb_up; 2924 int cb_up;
2867 int status, flag = 0; 2925 int status = 0, flag = 0;
2868 2926
2869 cb_up = nfsd4_cb_channel_good(oo->oo_owner.so_client); 2927 cb_up = nfsd4_cb_channel_good(oo->oo_owner.so_client);
2870 flag = NFS4_OPEN_DELEGATE_NONE; 2928 flag = NFS4_OPEN_DELEGATE_NONE;
@@ -2905,11 +2963,16 @@ nfs4_open_delegation(struct svc_fh *fh, struct nfsd4_open *open, struct nfs4_ol_
2905 dprintk("NFSD: delegation stateid=" STATEID_FMT "\n", 2963 dprintk("NFSD: delegation stateid=" STATEID_FMT "\n",
2906 STATEID_VAL(&dp->dl_stid.sc_stateid)); 2964 STATEID_VAL(&dp->dl_stid.sc_stateid));
2907out: 2965out:
2908 if (open->op_claim_type == NFS4_OPEN_CLAIM_PREVIOUS
2909 && flag == NFS4_OPEN_DELEGATE_NONE
2910 && open->op_delegate_type != NFS4_OPEN_DELEGATE_NONE)
2911 dprintk("NFSD: WARNING: refusing delegation reclaim\n");
2912 open->op_delegate_type = flag; 2966 open->op_delegate_type = flag;
2967 if (flag == NFS4_OPEN_DELEGATE_NONE) {
2968 if (open->op_claim_type == NFS4_OPEN_CLAIM_PREVIOUS &&
2969 open->op_delegate_type != NFS4_OPEN_DELEGATE_NONE)
2970 dprintk("NFSD: WARNING: refusing delegation reclaim\n");
2971
2972 /* 4.1 client asking for a delegation? */
2973 if (open->op_deleg_want)
2974 nfsd4_open_deleg_none_ext(open, status);
2975 }
2913 return; 2976 return;
2914out_free: 2977out_free:
2915 nfs4_put_delegation(dp); 2978 nfs4_put_delegation(dp);
@@ -2918,6 +2981,24 @@ out_no_deleg:
2918 goto out; 2981 goto out;
2919} 2982}
2920 2983
2984static void nfsd4_deleg_xgrade_none_ext(struct nfsd4_open *open,
2985 struct nfs4_delegation *dp)
2986{
2987 if (open->op_deleg_want == NFS4_SHARE_WANT_READ_DELEG &&
2988 dp->dl_type == NFS4_OPEN_DELEGATE_WRITE) {
2989 open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT;
2990 open->op_why_no_deleg = WND4_NOT_SUPP_DOWNGRADE;
2991 } else if (open->op_deleg_want == NFS4_SHARE_WANT_WRITE_DELEG &&
2992 dp->dl_type == NFS4_OPEN_DELEGATE_WRITE) {
2993 open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT;
2994 open->op_why_no_deleg = WND4_NOT_SUPP_UPGRADE;
2995 }
2996 /* Otherwise the client must be confused wanting a delegation
2997 * it already has, therefore we don't return
2998 * NFS4_OPEN_DELEGATE_NONE_EXT and reason.
2999 */
3000}
3001
2921/* 3002/*
2922 * called with nfs4_lock_state() held. 3003 * called with nfs4_lock_state() held.
2923 */ 3004 */
@@ -2979,24 +3060,36 @@ nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nf
2979 update_stateid(&stp->st_stid.sc_stateid); 3060 update_stateid(&stp->st_stid.sc_stateid);
2980 memcpy(&open->op_stateid, &stp->st_stid.sc_stateid, sizeof(stateid_t)); 3061 memcpy(&open->op_stateid, &stp->st_stid.sc_stateid, sizeof(stateid_t));
2981 3062
2982 if (nfsd4_has_session(&resp->cstate)) 3063 if (nfsd4_has_session(&resp->cstate)) {
2983 open->op_openowner->oo_flags |= NFS4_OO_CONFIRMED; 3064 open->op_openowner->oo_flags |= NFS4_OO_CONFIRMED;
2984 3065
3066 if (open->op_deleg_want & NFS4_SHARE_WANT_NO_DELEG) {
3067 open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT;
3068 open->op_why_no_deleg = WND4_NOT_WANTED;
3069 goto nodeleg;
3070 }
3071 }
3072
2985 /* 3073 /*
2986 * Attempt to hand out a delegation. No error return, because the 3074 * Attempt to hand out a delegation. No error return, because the
2987 * OPEN succeeds even if we fail. 3075 * OPEN succeeds even if we fail.
2988 */ 3076 */
2989 nfs4_open_delegation(current_fh, open, stp); 3077 nfs4_open_delegation(current_fh, open, stp);
2990 3078nodeleg:
2991 status = nfs_ok; 3079 status = nfs_ok;
2992 3080
2993 dprintk("%s: stateid=" STATEID_FMT "\n", __func__, 3081 dprintk("%s: stateid=" STATEID_FMT "\n", __func__,
2994 STATEID_VAL(&stp->st_stid.sc_stateid)); 3082 STATEID_VAL(&stp->st_stid.sc_stateid));
2995out: 3083out:
3084 /* 4.1 client trying to upgrade/downgrade delegation? */
3085 if (open->op_delegate_type == NFS4_OPEN_DELEGATE_NONE && dp &&
3086 open->op_deleg_want)
3087 nfsd4_deleg_xgrade_none_ext(open, dp);
3088
2996 if (fp) 3089 if (fp)
2997 put_nfs4_file(fp); 3090 put_nfs4_file(fp);
2998 if (status == 0 && open->op_claim_type == NFS4_OPEN_CLAIM_PREVIOUS) 3091 if (status == 0 && open->op_claim_type == NFS4_OPEN_CLAIM_PREVIOUS)
2999 nfs4_set_claim_prev(open); 3092 nfs4_set_claim_prev(open, nfsd4_has_session(&resp->cstate));
3000 /* 3093 /*
3001 * To finish the open response, we just need to set the rflags. 3094 * To finish the open response, we just need to set the rflags.
3002 */ 3095 */
@@ -3066,7 +3159,7 @@ static void
3066nfsd4_end_grace(void) 3159nfsd4_end_grace(void)
3067{ 3160{
3068 dprintk("NFSD: end of grace period\n"); 3161 dprintk("NFSD: end of grace period\n");
3069 nfsd4_recdir_purge_old(); 3162 nfsd4_record_grace_done(&init_net, boot_time);
3070 locks_end_grace(&nfsd4_manager); 3163 locks_end_grace(&nfsd4_manager);
3071 /* 3164 /*
3072 * Now that every NFSv4 client has had the chance to recover and 3165 * Now that every NFSv4 client has had the chance to recover and
@@ -3115,7 +3208,7 @@ nfs4_laundromat(void)
3115 clp = list_entry(pos, struct nfs4_client, cl_lru); 3208 clp = list_entry(pos, struct nfs4_client, cl_lru);
3116 dprintk("NFSD: purging unused client (clientid %08x)\n", 3209 dprintk("NFSD: purging unused client (clientid %08x)\n",
3117 clp->cl_clientid.cl_id); 3210 clp->cl_clientid.cl_id);
3118 nfsd4_remove_clid_dir(clp); 3211 nfsd4_client_record_remove(clp);
3119 expire_client(clp); 3212 expire_client(clp);
3120 } 3213 }
3121 spin_lock(&recall_lock); 3214 spin_lock(&recall_lock);
@@ -3400,7 +3493,14 @@ __be32
3400nfsd4_test_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, 3493nfsd4_test_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
3401 struct nfsd4_test_stateid *test_stateid) 3494 struct nfsd4_test_stateid *test_stateid)
3402{ 3495{
3403 /* real work is done during encoding */ 3496 struct nfsd4_test_stateid_id *stateid;
3497 struct nfs4_client *cl = cstate->session->se_client;
3498
3499 nfs4_lock_state();
3500 list_for_each_entry(stateid, &test_stateid->ts_stateid_list, ts_id_list)
3501 stateid->ts_id_status = nfs4_validate_stateid(cl, &stateid->ts_id_stateid);
3502 nfs4_unlock_state();
3503
3404 return nfs_ok; 3504 return nfs_ok;
3405} 3505}
3406 3506
@@ -3539,7 +3639,7 @@ nfsd4_open_confirm(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
3539 dprintk("NFSD: %s: success, seqid=%d stateid=" STATEID_FMT "\n", 3639 dprintk("NFSD: %s: success, seqid=%d stateid=" STATEID_FMT "\n",
3540 __func__, oc->oc_seqid, STATEID_VAL(&stp->st_stid.sc_stateid)); 3640 __func__, oc->oc_seqid, STATEID_VAL(&stp->st_stid.sc_stateid));
3541 3641
3542 nfsd4_create_clid_dir(oo->oo_owner.so_client); 3642 nfsd4_client_record_create(oo->oo_owner.so_client);
3543 status = nfs_ok; 3643 status = nfs_ok;
3544out: 3644out:
3545 if (!cstate->replay_owner) 3645 if (!cstate->replay_owner)
@@ -3596,7 +3696,9 @@ nfsd4_open_downgrade(struct svc_rqst *rqstp,
3596 cstate->current_fh.fh_dentry->d_name.name); 3696 cstate->current_fh.fh_dentry->d_name.name);
3597 3697
3598 /* We don't yet support WANT bits: */ 3698 /* We don't yet support WANT bits: */
3599 od->od_share_access &= NFS4_SHARE_ACCESS_MASK; 3699 if (od->od_deleg_want)
3700 dprintk("NFSD: %s: od_deleg_want=0x%x ignored\n", __func__,
3701 od->od_deleg_want);
3600 3702
3601 nfs4_lock_state(); 3703 nfs4_lock_state();
3602 status = nfs4_preprocess_confirmed_seqid_op(cstate, od->od_seqid, 3704 status = nfs4_preprocess_confirmed_seqid_op(cstate, od->od_seqid,
@@ -4353,7 +4455,9 @@ nfs4_has_reclaimed_state(const char *name, bool use_exchange_id)
4353 struct nfs4_client *clp; 4455 struct nfs4_client *clp;
4354 4456
4355 clp = find_confirmed_client_by_str(name, strhashval); 4457 clp = find_confirmed_client_by_str(name, strhashval);
4356 return clp ? 1 : 0; 4458 if (!clp)
4459 return 0;
4460 return test_bit(NFSD4_CLIENT_STABLE, &clp->cl_flags);
4357} 4461}
4358 4462
4359/* 4463/*
@@ -4377,7 +4481,7 @@ nfs4_client_to_reclaim(const char *name)
4377 return 1; 4481 return 1;
4378} 4482}
4379 4483
4380static void 4484void
4381nfs4_release_reclaim(void) 4485nfs4_release_reclaim(void)
4382{ 4486{
4383 struct nfs4_client_reclaim *crp = NULL; 4487 struct nfs4_client_reclaim *crp = NULL;
@@ -4397,19 +4501,12 @@ nfs4_release_reclaim(void)
4397 4501
4398/* 4502/*
4399 * called from OPEN, CLAIM_PREVIOUS with a new clientid. */ 4503 * called from OPEN, CLAIM_PREVIOUS with a new clientid. */
4400static struct nfs4_client_reclaim * 4504struct nfs4_client_reclaim *
4401nfs4_find_reclaim_client(clientid_t *clid) 4505nfsd4_find_reclaim_client(struct nfs4_client *clp)
4402{ 4506{
4403 unsigned int strhashval; 4507 unsigned int strhashval;
4404 struct nfs4_client *clp;
4405 struct nfs4_client_reclaim *crp = NULL; 4508 struct nfs4_client_reclaim *crp = NULL;
4406 4509
4407
4408 /* find clientid in conf_id_hashtbl */
4409 clp = find_confirmed_client(clid);
4410 if (clp == NULL)
4411 return NULL;
4412
4413 dprintk("NFSD: nfs4_find_reclaim_client for %.*s with recdir %s\n", 4510 dprintk("NFSD: nfs4_find_reclaim_client for %.*s with recdir %s\n",
4414 clp->cl_name.len, clp->cl_name.data, 4511 clp->cl_name.len, clp->cl_name.data,
4415 clp->cl_recdir); 4512 clp->cl_recdir);
@@ -4430,7 +4527,14 @@ nfs4_find_reclaim_client(clientid_t *clid)
4430__be32 4527__be32
4431nfs4_check_open_reclaim(clientid_t *clid) 4528nfs4_check_open_reclaim(clientid_t *clid)
4432{ 4529{
4433 return nfs4_find_reclaim_client(clid) ? nfs_ok : nfserr_reclaim_bad; 4530 struct nfs4_client *clp;
4531
4532 /* find clientid in conf_id_hashtbl */
4533 clp = find_confirmed_client(clid);
4534 if (clp == NULL)
4535 return nfserr_reclaim_bad;
4536
4537 return nfsd4_client_record_check(clp) ? nfserr_reclaim_bad : nfs_ok;
4434} 4538}
4435 4539
4436#ifdef CONFIG_NFSD_FAULT_INJECTION 4540#ifdef CONFIG_NFSD_FAULT_INJECTION
@@ -4442,7 +4546,7 @@ void nfsd_forget_clients(u64 num)
4442 4546
4443 nfs4_lock_state(); 4547 nfs4_lock_state();
4444 list_for_each_entry_safe(clp, next, &client_lru, cl_lru) { 4548 list_for_each_entry_safe(clp, next, &client_lru, cl_lru) {
4445 nfsd4_remove_clid_dir(clp); 4549 nfsd4_client_record_remove(clp);
4446 expire_client(clp); 4550 expire_client(clp);
4447 if (++count == num) 4551 if (++count == num)
4448 break; 4552 break;
@@ -4577,19 +4681,6 @@ nfs4_state_init(void)
4577 reclaim_str_hashtbl_size = 0; 4681 reclaim_str_hashtbl_size = 0;
4578} 4682}
4579 4683
4580static void
4581nfsd4_load_reboot_recovery_data(void)
4582{
4583 int status;
4584
4585 nfs4_lock_state();
4586 nfsd4_init_recdir();
4587 status = nfsd4_recdir_load();
4588 nfs4_unlock_state();
4589 if (status)
4590 printk("NFSD: Failure reading reboot recovery data\n");
4591}
4592
4593/* 4684/*
4594 * Since the lifetime of a delegation isn't limited to that of an open, a 4685 * Since the lifetime of a delegation isn't limited to that of an open, a
4595 * client may quite reasonably hang on to a delegation as long as it has 4686 * client may quite reasonably hang on to a delegation as long as it has
@@ -4613,21 +4704,34 @@ set_max_delegations(void)
4613 4704
4614/* initialization to perform when the nfsd service is started: */ 4705/* initialization to perform when the nfsd service is started: */
4615 4706
4616static int 4707int
4617__nfs4_state_start(void) 4708nfs4_state_start(void)
4618{ 4709{
4619 int ret; 4710 int ret;
4620 4711
4712 /*
4713 * FIXME: For now, we hang most of the pernet global stuff off of
4714 * init_net until nfsd is fully containerized. Eventually, we'll
4715 * need to pass a net pointer into this function, take a reference
4716 * to that instead and then do most of the rest of this on a per-net
4717 * basis.
4718 */
4719 get_net(&init_net);
4720 nfsd4_client_tracking_init(&init_net);
4621 boot_time = get_seconds(); 4721 boot_time = get_seconds();
4622 locks_start_grace(&nfsd4_manager); 4722 locks_start_grace(&nfsd4_manager);
4623 printk(KERN_INFO "NFSD: starting %ld-second grace period\n", 4723 printk(KERN_INFO "NFSD: starting %ld-second grace period\n",
4624 nfsd4_grace); 4724 nfsd4_grace);
4625 ret = set_callback_cred(); 4725 ret = set_callback_cred();
4626 if (ret) 4726 if (ret) {
4627 return -ENOMEM; 4727 ret = -ENOMEM;
4728 goto out_recovery;
4729 }
4628 laundry_wq = create_singlethread_workqueue("nfsd4"); 4730 laundry_wq = create_singlethread_workqueue("nfsd4");
4629 if (laundry_wq == NULL) 4731 if (laundry_wq == NULL) {
4630 return -ENOMEM; 4732 ret = -ENOMEM;
4733 goto out_recovery;
4734 }
4631 ret = nfsd4_create_callback_queue(); 4735 ret = nfsd4_create_callback_queue();
4632 if (ret) 4736 if (ret)
4633 goto out_free_laundry; 4737 goto out_free_laundry;
@@ -4636,16 +4740,12 @@ __nfs4_state_start(void)
4636 return 0; 4740 return 0;
4637out_free_laundry: 4741out_free_laundry:
4638 destroy_workqueue(laundry_wq); 4742 destroy_workqueue(laundry_wq);
4743out_recovery:
4744 nfsd4_client_tracking_exit(&init_net);
4745 put_net(&init_net);
4639 return ret; 4746 return ret;
4640} 4747}
4641 4748
4642int
4643nfs4_state_start(void)
4644{
4645 nfsd4_load_reboot_recovery_data();
4646 return __nfs4_state_start();
4647}
4648
4649static void 4749static void
4650__nfs4_state_shutdown(void) 4750__nfs4_state_shutdown(void)
4651{ 4751{
@@ -4676,7 +4776,8 @@ __nfs4_state_shutdown(void)
4676 unhash_delegation(dp); 4776 unhash_delegation(dp);
4677 } 4777 }
4678 4778
4679 nfsd4_shutdown_recdir(); 4779 nfsd4_client_tracking_exit(&init_net);
4780 put_net(&init_net);
4680} 4781}
4681 4782
4682void 4783void
@@ -4686,8 +4787,108 @@ nfs4_state_shutdown(void)
4686 destroy_workqueue(laundry_wq); 4787 destroy_workqueue(laundry_wq);
4687 locks_end_grace(&nfsd4_manager); 4788 locks_end_grace(&nfsd4_manager);
4688 nfs4_lock_state(); 4789 nfs4_lock_state();
4689 nfs4_release_reclaim();
4690 __nfs4_state_shutdown(); 4790 __nfs4_state_shutdown();
4691 nfs4_unlock_state(); 4791 nfs4_unlock_state();
4692 nfsd4_destroy_callback_queue(); 4792 nfsd4_destroy_callback_queue();
4693} 4793}
4794
4795static void
4796get_stateid(struct nfsd4_compound_state *cstate, stateid_t *stateid)
4797{
4798 if (HAS_STATE_ID(cstate, CURRENT_STATE_ID_FLAG) && CURRENT_STATEID(stateid))
4799 memcpy(stateid, &cstate->current_stateid, sizeof(stateid_t));
4800}
4801
4802static void
4803put_stateid(struct nfsd4_compound_state *cstate, stateid_t *stateid)
4804{
4805 if (cstate->minorversion) {
4806 memcpy(&cstate->current_stateid, stateid, sizeof(stateid_t));
4807 SET_STATE_ID(cstate, CURRENT_STATE_ID_FLAG);
4808 }
4809}
4810
4811void
4812clear_current_stateid(struct nfsd4_compound_state *cstate)
4813{
4814 CLEAR_STATE_ID(cstate, CURRENT_STATE_ID_FLAG);
4815}
4816
4817/*
4818 * functions to set current state id
4819 */
4820void
4821nfsd4_set_opendowngradestateid(struct nfsd4_compound_state *cstate, struct nfsd4_open_downgrade *odp)
4822{
4823 put_stateid(cstate, &odp->od_stateid);
4824}
4825
4826void
4827nfsd4_set_openstateid(struct nfsd4_compound_state *cstate, struct nfsd4_open *open)
4828{
4829 put_stateid(cstate, &open->op_stateid);
4830}
4831
4832void
4833nfsd4_set_closestateid(struct nfsd4_compound_state *cstate, struct nfsd4_close *close)
4834{
4835 put_stateid(cstate, &close->cl_stateid);
4836}
4837
4838void
4839nfsd4_set_lockstateid(struct nfsd4_compound_state *cstate, struct nfsd4_lock *lock)
4840{
4841 put_stateid(cstate, &lock->lk_resp_stateid);
4842}
4843
4844/*
4845 * functions to consume current state id
4846 */
4847
4848void
4849nfsd4_get_opendowngradestateid(struct nfsd4_compound_state *cstate, struct nfsd4_open_downgrade *odp)
4850{
4851 get_stateid(cstate, &odp->od_stateid);
4852}
4853
4854void
4855nfsd4_get_delegreturnstateid(struct nfsd4_compound_state *cstate, struct nfsd4_delegreturn *drp)
4856{
4857 get_stateid(cstate, &drp->dr_stateid);
4858}
4859
4860void
4861nfsd4_get_freestateid(struct nfsd4_compound_state *cstate, struct nfsd4_free_stateid *fsp)
4862{
4863 get_stateid(cstate, &fsp->fr_stateid);
4864}
4865
4866void
4867nfsd4_get_setattrstateid(struct nfsd4_compound_state *cstate, struct nfsd4_setattr *setattr)
4868{
4869 get_stateid(cstate, &setattr->sa_stateid);
4870}
4871
4872void
4873nfsd4_get_closestateid(struct nfsd4_compound_state *cstate, struct nfsd4_close *close)
4874{
4875 get_stateid(cstate, &close->cl_stateid);
4876}
4877
4878void
4879nfsd4_get_lockustateid(struct nfsd4_compound_state *cstate, struct nfsd4_locku *locku)
4880{
4881 get_stateid(cstate, &locku->lu_stateid);
4882}
4883
4884void
4885nfsd4_get_readstateid(struct nfsd4_compound_state *cstate, struct nfsd4_read *read)
4886{
4887 get_stateid(cstate, &read->rd_stateid);
4888}
4889
4890void
4891nfsd4_get_writestateid(struct nfsd4_compound_state *cstate, struct nfsd4_write *write)
4892{
4893 get_stateid(cstate, &write->wr_stateid);
4894}
diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
index 0ec5a1b9700e..bcd8904ab1e3 100644
--- a/fs/nfsd/nfs4xdr.c
+++ b/fs/nfsd/nfs4xdr.c
@@ -133,22 +133,6 @@ xdr_error: \
133 } \ 133 } \
134} while (0) 134} while (0)
135 135
136static void save_buf(struct nfsd4_compoundargs *argp, struct nfsd4_saved_compoundargs *savep)
137{
138 savep->p = argp->p;
139 savep->end = argp->end;
140 savep->pagelen = argp->pagelen;
141 savep->pagelist = argp->pagelist;
142}
143
144static void restore_buf(struct nfsd4_compoundargs *argp, struct nfsd4_saved_compoundargs *savep)
145{
146 argp->p = savep->p;
147 argp->end = savep->end;
148 argp->pagelen = savep->pagelen;
149 argp->pagelist = savep->pagelist;
150}
151
152static __be32 *read_buf(struct nfsd4_compoundargs *argp, u32 nbytes) 136static __be32 *read_buf(struct nfsd4_compoundargs *argp, u32 nbytes)
153{ 137{
154 /* We want more bytes than seem to be available. 138 /* We want more bytes than seem to be available.
@@ -638,14 +622,18 @@ nfsd4_decode_lookup(struct nfsd4_compoundargs *argp, struct nfsd4_lookup *lookup
638 DECODE_TAIL; 622 DECODE_TAIL;
639} 623}
640 624
641static __be32 nfsd4_decode_share_access(struct nfsd4_compoundargs *argp, u32 *x) 625static __be32 nfsd4_decode_share_access(struct nfsd4_compoundargs *argp, u32 *share_access, u32 *deleg_want, u32 *deleg_when)
642{ 626{
643 __be32 *p; 627 __be32 *p;
644 u32 w; 628 u32 w;
645 629
646 READ_BUF(4); 630 READ_BUF(4);
647 READ32(w); 631 READ32(w);
648 *x = w; 632 *share_access = w & NFS4_SHARE_ACCESS_MASK;
633 *deleg_want = w & NFS4_SHARE_WANT_MASK;
634 if (deleg_when)
635 *deleg_when = w & NFS4_SHARE_WHEN_MASK;
636
649 switch (w & NFS4_SHARE_ACCESS_MASK) { 637 switch (w & NFS4_SHARE_ACCESS_MASK) {
650 case NFS4_SHARE_ACCESS_READ: 638 case NFS4_SHARE_ACCESS_READ:
651 case NFS4_SHARE_ACCESS_WRITE: 639 case NFS4_SHARE_ACCESS_WRITE:
@@ -673,6 +661,9 @@ static __be32 nfsd4_decode_share_access(struct nfsd4_compoundargs *argp, u32 *x)
673 w &= ~NFS4_SHARE_WANT_MASK; 661 w &= ~NFS4_SHARE_WANT_MASK;
674 if (!w) 662 if (!w)
675 return nfs_ok; 663 return nfs_ok;
664
665 if (!deleg_when) /* open_downgrade */
666 return nfserr_inval;
676 switch (w) { 667 switch (w) {
677 case NFS4_SHARE_SIGNAL_DELEG_WHEN_RESRC_AVAIL: 668 case NFS4_SHARE_SIGNAL_DELEG_WHEN_RESRC_AVAIL:
678 case NFS4_SHARE_PUSH_DELEG_WHEN_UNCONTENDED: 669 case NFS4_SHARE_PUSH_DELEG_WHEN_UNCONTENDED:
@@ -719,6 +710,7 @@ static __be32
719nfsd4_decode_open(struct nfsd4_compoundargs *argp, struct nfsd4_open *open) 710nfsd4_decode_open(struct nfsd4_compoundargs *argp, struct nfsd4_open *open)
720{ 711{
721 DECODE_HEAD; 712 DECODE_HEAD;
713 u32 dummy;
722 714
723 memset(open->op_bmval, 0, sizeof(open->op_bmval)); 715 memset(open->op_bmval, 0, sizeof(open->op_bmval));
724 open->op_iattr.ia_valid = 0; 716 open->op_iattr.ia_valid = 0;
@@ -727,7 +719,9 @@ nfsd4_decode_open(struct nfsd4_compoundargs *argp, struct nfsd4_open *open)
727 /* seqid, share_access, share_deny, clientid, ownerlen */ 719 /* seqid, share_access, share_deny, clientid, ownerlen */
728 READ_BUF(4); 720 READ_BUF(4);
729 READ32(open->op_seqid); 721 READ32(open->op_seqid);
730 status = nfsd4_decode_share_access(argp, &open->op_share_access); 722 /* decode, yet ignore deleg_when until supported */
723 status = nfsd4_decode_share_access(argp, &open->op_share_access,
724 &open->op_deleg_want, &dummy);
731 if (status) 725 if (status)
732 goto xdr_error; 726 goto xdr_error;
733 status = nfsd4_decode_share_deny(argp, &open->op_share_deny); 727 status = nfsd4_decode_share_deny(argp, &open->op_share_deny);
@@ -755,14 +749,14 @@ nfsd4_decode_open(struct nfsd4_compoundargs *argp, struct nfsd4_open *open)
755 goto out; 749 goto out;
756 break; 750 break;
757 case NFS4_CREATE_EXCLUSIVE: 751 case NFS4_CREATE_EXCLUSIVE:
758 READ_BUF(8); 752 READ_BUF(NFS4_VERIFIER_SIZE);
759 COPYMEM(open->op_verf.data, 8); 753 COPYMEM(open->op_verf.data, NFS4_VERIFIER_SIZE);
760 break; 754 break;
761 case NFS4_CREATE_EXCLUSIVE4_1: 755 case NFS4_CREATE_EXCLUSIVE4_1:
762 if (argp->minorversion < 1) 756 if (argp->minorversion < 1)
763 goto xdr_error; 757 goto xdr_error;
764 READ_BUF(8); 758 READ_BUF(NFS4_VERIFIER_SIZE);
765 COPYMEM(open->op_verf.data, 8); 759 COPYMEM(open->op_verf.data, NFS4_VERIFIER_SIZE);
766 status = nfsd4_decode_fattr(argp, open->op_bmval, 760 status = nfsd4_decode_fattr(argp, open->op_bmval,
767 &open->op_iattr, &open->op_acl); 761 &open->op_iattr, &open->op_acl);
768 if (status) 762 if (status)
@@ -848,7 +842,8 @@ nfsd4_decode_open_downgrade(struct nfsd4_compoundargs *argp, struct nfsd4_open_d
848 return status; 842 return status;
849 READ_BUF(4); 843 READ_BUF(4);
850 READ32(open_down->od_seqid); 844 READ32(open_down->od_seqid);
851 status = nfsd4_decode_share_access(argp, &open_down->od_share_access); 845 status = nfsd4_decode_share_access(argp, &open_down->od_share_access,
846 &open_down->od_deleg_want, NULL);
852 if (status) 847 if (status)
853 return status; 848 return status;
854 status = nfsd4_decode_share_deny(argp, &open_down->od_share_deny); 849 status = nfsd4_decode_share_deny(argp, &open_down->od_share_deny);
@@ -994,8 +989,8 @@ nfsd4_decode_setclientid(struct nfsd4_compoundargs *argp, struct nfsd4_setclient
994{ 989{
995 DECODE_HEAD; 990 DECODE_HEAD;
996 991
997 READ_BUF(8); 992 READ_BUF(NFS4_VERIFIER_SIZE);
998 COPYMEM(setclientid->se_verf.data, 8); 993 COPYMEM(setclientid->se_verf.data, NFS4_VERIFIER_SIZE);
999 994
1000 status = nfsd4_decode_opaque(argp, &setclientid->se_name); 995 status = nfsd4_decode_opaque(argp, &setclientid->se_name);
1001 if (status) 996 if (status)
@@ -1020,9 +1015,9 @@ nfsd4_decode_setclientid_confirm(struct nfsd4_compoundargs *argp, struct nfsd4_s
1020{ 1015{
1021 DECODE_HEAD; 1016 DECODE_HEAD;
1022 1017
1023 READ_BUF(8 + sizeof(nfs4_verifier)); 1018 READ_BUF(8 + NFS4_VERIFIER_SIZE);
1024 COPYMEM(&scd_c->sc_clientid, 8); 1019 COPYMEM(&scd_c->sc_clientid, 8);
1025 COPYMEM(&scd_c->sc_confirm, sizeof(nfs4_verifier)); 1020 COPYMEM(&scd_c->sc_confirm, NFS4_VERIFIER_SIZE);
1026 1021
1027 DECODE_TAIL; 1022 DECODE_TAIL;
1028} 1023}
@@ -1385,26 +1380,29 @@ nfsd4_decode_sequence(struct nfsd4_compoundargs *argp,
1385static __be32 1380static __be32
1386nfsd4_decode_test_stateid(struct nfsd4_compoundargs *argp, struct nfsd4_test_stateid *test_stateid) 1381nfsd4_decode_test_stateid(struct nfsd4_compoundargs *argp, struct nfsd4_test_stateid *test_stateid)
1387{ 1382{
1388 unsigned int nbytes;
1389 stateid_t si;
1390 int i; 1383 int i;
1391 __be32 *p; 1384 __be32 *p, status;
1392 __be32 status; 1385 struct nfsd4_test_stateid_id *stateid;
1393 1386
1394 READ_BUF(4); 1387 READ_BUF(4);
1395 test_stateid->ts_num_ids = ntohl(*p++); 1388 test_stateid->ts_num_ids = ntohl(*p++);
1396 1389
1397 nbytes = test_stateid->ts_num_ids * sizeof(stateid_t); 1390 INIT_LIST_HEAD(&test_stateid->ts_stateid_list);
1398 if (nbytes > (u32)((char *)argp->end - (char *)argp->p))
1399 goto xdr_error;
1400
1401 test_stateid->ts_saved_args = argp;
1402 save_buf(argp, &test_stateid->ts_savedp);
1403 1391
1404 for (i = 0; i < test_stateid->ts_num_ids; i++) { 1392 for (i = 0; i < test_stateid->ts_num_ids; i++) {
1405 status = nfsd4_decode_stateid(argp, &si); 1393 stateid = kmalloc(sizeof(struct nfsd4_test_stateid_id), GFP_KERNEL);
1394 if (!stateid) {
1395 status = PTR_ERR(stateid);
1396 goto out;
1397 }
1398
1399 defer_free(argp, kfree, stateid);
1400 INIT_LIST_HEAD(&stateid->ts_id_list);
1401 list_add_tail(&stateid->ts_id_list, &test_stateid->ts_stateid_list);
1402
1403 status = nfsd4_decode_stateid(argp, &stateid->ts_id_stateid);
1406 if (status) 1404 if (status)
1407 return status; 1405 goto out;
1408 } 1406 }
1409 1407
1410 status = 0; 1408 status = 0;
@@ -2661,8 +2659,8 @@ nfsd4_encode_commit(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_
2661 __be32 *p; 2659 __be32 *p;
2662 2660
2663 if (!nfserr) { 2661 if (!nfserr) {
2664 RESERVE_SPACE(8); 2662 RESERVE_SPACE(NFS4_VERIFIER_SIZE);
2665 WRITEMEM(commit->co_verf.data, 8); 2663 WRITEMEM(commit->co_verf.data, NFS4_VERIFIER_SIZE);
2666 ADJUST_ARGS(); 2664 ADJUST_ARGS();
2667 } 2665 }
2668 return nfserr; 2666 return nfserr;
@@ -2851,6 +2849,20 @@ nfsd4_encode_open(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_op
2851 WRITE32(0); /* XXX: is NULL principal ok? */ 2849 WRITE32(0); /* XXX: is NULL principal ok? */
2852 ADJUST_ARGS(); 2850 ADJUST_ARGS();
2853 break; 2851 break;
2852 case NFS4_OPEN_DELEGATE_NONE_EXT: /* 4.1 */
2853 switch (open->op_why_no_deleg) {
2854 case WND4_CONTENTION:
2855 case WND4_RESOURCE:
2856 RESERVE_SPACE(8);
2857 WRITE32(open->op_why_no_deleg);
2858 WRITE32(0); /* deleg signaling not supported yet */
2859 break;
2860 default:
2861 RESERVE_SPACE(4);
2862 WRITE32(open->op_why_no_deleg);
2863 }
2864 ADJUST_ARGS();
2865 break;
2854 default: 2866 default:
2855 BUG(); 2867 BUG();
2856 } 2868 }
@@ -3008,7 +3020,7 @@ nfsd4_encode_readdir(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4
3008 if (resp->xbuf->page_len) 3020 if (resp->xbuf->page_len)
3009 return nfserr_resource; 3021 return nfserr_resource;
3010 3022
3011 RESERVE_SPACE(8); /* verifier */ 3023 RESERVE_SPACE(NFS4_VERIFIER_SIZE);
3012 savep = p; 3024 savep = p;
3013 3025
3014 /* XXX: Following NFSv3, we ignore the READDIR verifier for now. */ 3026 /* XXX: Following NFSv3, we ignore the READDIR verifier for now. */
@@ -3209,9 +3221,9 @@ nfsd4_encode_setclientid(struct nfsd4_compoundres *resp, __be32 nfserr, struct n
3209 __be32 *p; 3221 __be32 *p;
3210 3222
3211 if (!nfserr) { 3223 if (!nfserr) {
3212 RESERVE_SPACE(8 + sizeof(nfs4_verifier)); 3224 RESERVE_SPACE(8 + NFS4_VERIFIER_SIZE);
3213 WRITEMEM(&scd->se_clientid, 8); 3225 WRITEMEM(&scd->se_clientid, 8);
3214 WRITEMEM(&scd->se_confirm, sizeof(nfs4_verifier)); 3226 WRITEMEM(&scd->se_confirm, NFS4_VERIFIER_SIZE);
3215 ADJUST_ARGS(); 3227 ADJUST_ARGS();
3216 } 3228 }
3217 else if (nfserr == nfserr_clid_inuse) { 3229 else if (nfserr == nfserr_clid_inuse) {
@@ -3232,7 +3244,7 @@ nfsd4_encode_write(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_w
3232 RESERVE_SPACE(16); 3244 RESERVE_SPACE(16);
3233 WRITE32(write->wr_bytes_written); 3245 WRITE32(write->wr_bytes_written);
3234 WRITE32(write->wr_how_written); 3246 WRITE32(write->wr_how_written);
3235 WRITEMEM(write->wr_verifier.data, 8); 3247 WRITEMEM(write->wr_verifier.data, NFS4_VERIFIER_SIZE);
3236 ADJUST_ARGS(); 3248 ADJUST_ARGS();
3237 } 3249 }
3238 return nfserr; 3250 return nfserr;
@@ -3391,30 +3403,17 @@ __be32
3391nfsd4_encode_test_stateid(struct nfsd4_compoundres *resp, int nfserr, 3403nfsd4_encode_test_stateid(struct nfsd4_compoundres *resp, int nfserr,
3392 struct nfsd4_test_stateid *test_stateid) 3404 struct nfsd4_test_stateid *test_stateid)
3393{ 3405{
3394 struct nfsd4_compoundargs *argp; 3406 struct nfsd4_test_stateid_id *stateid, *next;
3395 struct nfs4_client *cl = resp->cstate.session->se_client;
3396 stateid_t si;
3397 __be32 *p; 3407 __be32 *p;
3398 int i;
3399 int valid;
3400
3401 restore_buf(test_stateid->ts_saved_args, &test_stateid->ts_savedp);
3402 argp = test_stateid->ts_saved_args;
3403 3408
3404 RESERVE_SPACE(4); 3409 RESERVE_SPACE(4 + (4 * test_stateid->ts_num_ids));
3405 *p++ = htonl(test_stateid->ts_num_ids); 3410 *p++ = htonl(test_stateid->ts_num_ids);
3406 resp->p = p;
3407 3411
3408 nfs4_lock_state(); 3412 list_for_each_entry_safe(stateid, next, &test_stateid->ts_stateid_list, ts_id_list) {
3409 for (i = 0; i < test_stateid->ts_num_ids; i++) { 3413 *p++ = htonl(stateid->ts_id_status);
3410 nfsd4_decode_stateid(argp, &si);
3411 valid = nfs4_validate_stateid(cl, &si);
3412 RESERVE_SPACE(4);
3413 *p++ = htonl(valid);
3414 resp->p = p;
3415 } 3414 }
3416 nfs4_unlock_state();
3417 3415
3416 ADJUST_ARGS();
3418 return nfserr; 3417 return nfserr;
3419} 3418}
3420 3419
@@ -3532,7 +3531,7 @@ int nfsd4_check_resp_size(struct nfsd4_compoundres *resp, u32 pad)
3532 if (length > session->se_fchannel.maxresp_sz) 3531 if (length > session->se_fchannel.maxresp_sz)
3533 return nfserr_rep_too_big; 3532 return nfserr_rep_too_big;
3534 3533
3535 if (slot->sl_cachethis == 1 && 3534 if ((slot->sl_flags & NFSD4_SLOT_CACHETHIS) &&
3536 length > session->se_fchannel.maxresp_cached) 3535 length > session->se_fchannel.maxresp_cached)
3537 return nfserr_rep_too_big_to_cache; 3536 return nfserr_rep_too_big_to_cache;
3538 3537
@@ -3656,8 +3655,7 @@ nfs4svc_encode_compoundres(struct svc_rqst *rqstp, __be32 *p, struct nfsd4_compo
3656 if (nfsd4_has_session(cs)) { 3655 if (nfsd4_has_session(cs)) {
3657 if (cs->status != nfserr_replay_cache) { 3656 if (cs->status != nfserr_replay_cache) {
3658 nfsd4_store_cache_entry(resp); 3657 nfsd4_store_cache_entry(resp);
3659 dprintk("%s: SET SLOT STATE TO AVAILABLE\n", __func__); 3658 cs->slot->sl_flags &= ~NFSD4_SLOT_INUSE;
3660 cs->slot->sl_inuse = false;
3661 } 3659 }
3662 /* Renew the clientid on success and on replay */ 3660 /* Renew the clientid on success and on replay */
3663 release_session_client(cs->session); 3661 release_session_client(cs->session);
diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c
index 64c24af8d7ea..2c53be6d3579 100644
--- a/fs/nfsd/nfsctl.c
+++ b/fs/nfsd/nfsctl.c
@@ -13,12 +13,14 @@
13#include <linux/sunrpc/clnt.h> 13#include <linux/sunrpc/clnt.h>
14#include <linux/sunrpc/gss_api.h> 14#include <linux/sunrpc/gss_api.h>
15#include <linux/sunrpc/gss_krb5_enctypes.h> 15#include <linux/sunrpc/gss_krb5_enctypes.h>
16#include <linux/sunrpc/rpc_pipe_fs.h>
16#include <linux/module.h> 17#include <linux/module.h>
17 18
18#include "idmap.h" 19#include "idmap.h"
19#include "nfsd.h" 20#include "nfsd.h"
20#include "cache.h" 21#include "cache.h"
21#include "fault_inject.h" 22#include "fault_inject.h"
23#include "netns.h"
22 24
23/* 25/*
24 * We have a single directory with several nodes in it. 26 * We have a single directory with several nodes in it.
@@ -1124,14 +1126,26 @@ static int create_proc_exports_entry(void)
1124} 1126}
1125#endif 1127#endif
1126 1128
1129int nfsd_net_id;
1130static struct pernet_operations nfsd_net_ops = {
1131 .id = &nfsd_net_id,
1132 .size = sizeof(struct nfsd_net),
1133};
1134
1127static int __init init_nfsd(void) 1135static int __init init_nfsd(void)
1128{ 1136{
1129 int retval; 1137 int retval;
1130 printk(KERN_INFO "Installing knfsd (copyright (C) 1996 okir@monad.swb.de).\n"); 1138 printk(KERN_INFO "Installing knfsd (copyright (C) 1996 okir@monad.swb.de).\n");
1131 1139
1132 retval = nfsd4_init_slabs(); 1140 retval = register_cld_notifier();
1133 if (retval) 1141 if (retval)
1134 return retval; 1142 return retval;
1143 retval = register_pernet_subsys(&nfsd_net_ops);
1144 if (retval < 0)
1145 goto out_unregister_notifier;
1146 retval = nfsd4_init_slabs();
1147 if (retval)
1148 goto out_unregister_pernet;
1135 nfs4_state_init(); 1149 nfs4_state_init();
1136 retval = nfsd_fault_inject_init(); /* nfsd fault injection controls */ 1150 retval = nfsd_fault_inject_init(); /* nfsd fault injection controls */
1137 if (retval) 1151 if (retval)
@@ -1169,6 +1183,10 @@ out_free_stat:
1169 nfsd_fault_inject_cleanup(); 1183 nfsd_fault_inject_cleanup();
1170out_free_slabs: 1184out_free_slabs:
1171 nfsd4_free_slabs(); 1185 nfsd4_free_slabs();
1186out_unregister_pernet:
1187 unregister_pernet_subsys(&nfsd_net_ops);
1188out_unregister_notifier:
1189 unregister_cld_notifier();
1172 return retval; 1190 return retval;
1173} 1191}
1174 1192
@@ -1184,6 +1202,8 @@ static void __exit exit_nfsd(void)
1184 nfsd4_free_slabs(); 1202 nfsd4_free_slabs();
1185 nfsd_fault_inject_cleanup(); 1203 nfsd_fault_inject_cleanup();
1186 unregister_filesystem(&nfsd_fs_type); 1204 unregister_filesystem(&nfsd_fs_type);
1205 unregister_pernet_subsys(&nfsd_net_ops);
1206 unregister_cld_notifier();
1187} 1207}
1188 1208
1189MODULE_AUTHOR("Olaf Kirch <okir@monad.swb.de>"); 1209MODULE_AUTHOR("Olaf Kirch <okir@monad.swb.de>");
diff --git a/fs/nfsd/nfsd.h b/fs/nfsd/nfsd.h
index 1d1e8589b4ce..1671429ffa66 100644
--- a/fs/nfsd/nfsd.h
+++ b/fs/nfsd/nfsd.h
@@ -364,12 +364,17 @@ static inline u32 nfsd_suppattrs2(u32 minorversion)
364 NFSD_WRITEABLE_ATTRS_WORD2 364 NFSD_WRITEABLE_ATTRS_WORD2
365 365
366extern int nfsd4_is_junction(struct dentry *dentry); 366extern int nfsd4_is_junction(struct dentry *dentry);
367#else 367extern int register_cld_notifier(void);
368extern void unregister_cld_notifier(void);
369#else /* CONFIG_NFSD_V4 */
368static inline int nfsd4_is_junction(struct dentry *dentry) 370static inline int nfsd4_is_junction(struct dentry *dentry)
369{ 371{
370 return 0; 372 return 0;
371} 373}
372 374
375#define register_cld_notifier() 0
376#define unregister_cld_notifier() do { } while(0)
377
373#endif /* CONFIG_NFSD_V4 */ 378#endif /* CONFIG_NFSD_V4 */
374 379
375#endif /* LINUX_NFSD_NFSD_H */ 380#endif /* LINUX_NFSD_NFSD_H */
diff --git a/fs/nfsd/nfssvc.c b/fs/nfsd/nfssvc.c
index fce472f5f39e..28dfad39f0c5 100644
--- a/fs/nfsd/nfssvc.c
+++ b/fs/nfsd/nfssvc.c
@@ -307,33 +307,37 @@ static void set_max_drc(void)
307 dprintk("%s nfsd_drc_max_mem %u \n", __func__, nfsd_drc_max_mem); 307 dprintk("%s nfsd_drc_max_mem %u \n", __func__, nfsd_drc_max_mem);
308} 308}
309 309
310int nfsd_create_serv(void) 310static int nfsd_get_default_max_blksize(void)
311{ 311{
312 int err = 0; 312 struct sysinfo i;
313 unsigned long long target;
314 unsigned long ret;
315
316 si_meminfo(&i);
317 target = (i.totalram - i.totalhigh) << PAGE_SHIFT;
318 /*
319 * Aim for 1/4096 of memory per thread This gives 1MB on 4Gig
320 * machines, but only uses 32K on 128M machines. Bottom out at
321 * 8K on 32M and smaller. Of course, this is only a default.
322 */
323 target >>= 12;
324
325 ret = NFSSVC_MAXBLKSIZE;
326 while (ret > target && ret >= 8*1024*2)
327 ret /= 2;
328 return ret;
329}
313 330
331int nfsd_create_serv(void)
332{
314 WARN_ON(!mutex_is_locked(&nfsd_mutex)); 333 WARN_ON(!mutex_is_locked(&nfsd_mutex));
315 if (nfsd_serv) { 334 if (nfsd_serv) {
316 svc_get(nfsd_serv); 335 svc_get(nfsd_serv);
317 return 0; 336 return 0;
318 } 337 }
319 if (nfsd_max_blksize == 0) { 338 if (nfsd_max_blksize == 0)
320 /* choose a suitable default */ 339 nfsd_max_blksize = nfsd_get_default_max_blksize();
321 struct sysinfo i;
322 si_meminfo(&i);
323 /* Aim for 1/4096 of memory per thread
324 * This gives 1MB on 4Gig machines
325 * But only uses 32K on 128M machines.
326 * Bottom out at 8K on 32M and smaller.
327 * Of course, this is only a default.
328 */
329 nfsd_max_blksize = NFSSVC_MAXBLKSIZE;
330 i.totalram <<= PAGE_SHIFT - 12;
331 while (nfsd_max_blksize > i.totalram &&
332 nfsd_max_blksize >= 8*1024*2)
333 nfsd_max_blksize /= 2;
334 }
335 nfsd_reset_versions(); 340 nfsd_reset_versions();
336
337 nfsd_serv = svc_create_pooled(&nfsd_program, nfsd_max_blksize, 341 nfsd_serv = svc_create_pooled(&nfsd_program, nfsd_max_blksize,
338 nfsd_last_thread, nfsd, THIS_MODULE); 342 nfsd_last_thread, nfsd, THIS_MODULE);
339 if (nfsd_serv == NULL) 343 if (nfsd_serv == NULL)
@@ -341,7 +345,7 @@ int nfsd_create_serv(void)
341 345
342 set_max_drc(); 346 set_max_drc();
343 do_gettimeofday(&nfssvc_boot); /* record boot time */ 347 do_gettimeofday(&nfssvc_boot); /* record boot time */
344 return err; 348 return 0;
345} 349}
346 350
347int nfsd_nrpools(void) 351int nfsd_nrpools(void)
diff --git a/fs/nfsd/state.h b/fs/nfsd/state.h
index ffb5df1db94f..89ab137d379a 100644
--- a/fs/nfsd/state.h
+++ b/fs/nfsd/state.h
@@ -128,12 +128,14 @@ static inline struct nfs4_delegation *delegstateid(struct nfs4_stid *s)
128 (NFSD_CACHE_SIZE_SLOTS_PER_SESSION * NFSD_SLOT_CACHE_SIZE) 128 (NFSD_CACHE_SIZE_SLOTS_PER_SESSION * NFSD_SLOT_CACHE_SIZE)
129 129
130struct nfsd4_slot { 130struct nfsd4_slot {
131 bool sl_inuse;
132 bool sl_cachethis;
133 u16 sl_opcnt;
134 u32 sl_seqid; 131 u32 sl_seqid;
135 __be32 sl_status; 132 __be32 sl_status;
136 u32 sl_datalen; 133 u32 sl_datalen;
134 u16 sl_opcnt;
135#define NFSD4_SLOT_INUSE (1 << 0)
136#define NFSD4_SLOT_CACHETHIS (1 << 1)
137#define NFSD4_SLOT_INITIALIZED (1 << 2)
138 u8 sl_flags;
137 char sl_data[]; 139 char sl_data[];
138}; 140};
139 141
@@ -196,18 +198,7 @@ struct nfsd4_session {
196 struct nfsd4_slot *se_slots[]; /* forward channel slots */ 198 struct nfsd4_slot *se_slots[]; /* forward channel slots */
197}; 199};
198 200
199static inline void 201extern void nfsd4_put_session(struct nfsd4_session *ses);
200nfsd4_put_session(struct nfsd4_session *ses)
201{
202 extern void free_session(struct kref *kref);
203 kref_put(&ses->se_ref, free_session);
204}
205
206static inline void
207nfsd4_get_session(struct nfsd4_session *ses)
208{
209 kref_get(&ses->se_ref);
210}
211 202
212/* formatted contents of nfs4_sessionid */ 203/* formatted contents of nfs4_sessionid */
213struct nfsd4_sessionid { 204struct nfsd4_sessionid {
@@ -245,14 +236,17 @@ struct nfs4_client {
245 struct svc_cred cl_cred; /* setclientid principal */ 236 struct svc_cred cl_cred; /* setclientid principal */
246 clientid_t cl_clientid; /* generated by server */ 237 clientid_t cl_clientid; /* generated by server */
247 nfs4_verifier cl_confirm; /* generated by server */ 238 nfs4_verifier cl_confirm; /* generated by server */
248 u32 cl_firststate; /* recovery dir creation */
249 u32 cl_minorversion; 239 u32 cl_minorversion;
250 240
251 /* for v4.0 and v4.1 callbacks: */ 241 /* for v4.0 and v4.1 callbacks: */
252 struct nfs4_cb_conn cl_cb_conn; 242 struct nfs4_cb_conn cl_cb_conn;
253#define NFSD4_CLIENT_CB_UPDATE 1 243#define NFSD4_CLIENT_CB_UPDATE (0)
254#define NFSD4_CLIENT_KILL 2 244#define NFSD4_CLIENT_CB_KILL (1)
255 unsigned long cl_cb_flags; 245#define NFSD4_CLIENT_STABLE (2) /* client on stable storage */
246#define NFSD4_CLIENT_RECLAIM_COMPLETE (3) /* reclaim_complete done */
247#define NFSD4_CLIENT_CB_FLAG_MASK (1 << NFSD4_CLIENT_CB_UPDATE | \
248 1 << NFSD4_CLIENT_CB_KILL)
249 unsigned long cl_flags;
256 struct rpc_clnt *cl_cb_client; 250 struct rpc_clnt *cl_cb_client;
257 u32 cl_cb_ident; 251 u32 cl_cb_ident;
258#define NFSD4_CB_UP 0 252#define NFSD4_CB_UP 0
@@ -463,6 +457,8 @@ extern __be32 nfs4_preprocess_stateid_op(struct nfsd4_compound_state *cstate,
463extern void nfs4_lock_state(void); 457extern void nfs4_lock_state(void);
464extern void nfs4_unlock_state(void); 458extern void nfs4_unlock_state(void);
465extern int nfs4_in_grace(void); 459extern int nfs4_in_grace(void);
460extern void nfs4_release_reclaim(void);
461extern struct nfs4_client_reclaim *nfsd4_find_reclaim_client(struct nfs4_client *crp);
466extern __be32 nfs4_check_open_reclaim(clientid_t *clid); 462extern __be32 nfs4_check_open_reclaim(clientid_t *clid);
467extern void nfs4_free_openowner(struct nfs4_openowner *); 463extern void nfs4_free_openowner(struct nfs4_openowner *);
468extern void nfs4_free_lockowner(struct nfs4_lockowner *); 464extern void nfs4_free_lockowner(struct nfs4_lockowner *);
@@ -477,16 +473,17 @@ extern void nfsd4_destroy_callback_queue(void);
477extern void nfsd4_shutdown_callback(struct nfs4_client *); 473extern void nfsd4_shutdown_callback(struct nfs4_client *);
478extern void nfs4_put_delegation(struct nfs4_delegation *dp); 474extern void nfs4_put_delegation(struct nfs4_delegation *dp);
479extern __be32 nfs4_make_rec_clidname(char *clidname, struct xdr_netobj *clname); 475extern __be32 nfs4_make_rec_clidname(char *clidname, struct xdr_netobj *clname);
480extern void nfsd4_init_recdir(void);
481extern int nfsd4_recdir_load(void);
482extern void nfsd4_shutdown_recdir(void);
483extern int nfs4_client_to_reclaim(const char *name); 476extern int nfs4_client_to_reclaim(const char *name);
484extern int nfs4_has_reclaimed_state(const char *name, bool use_exchange_id); 477extern int nfs4_has_reclaimed_state(const char *name, bool use_exchange_id);
485extern void nfsd4_recdir_purge_old(void);
486extern void nfsd4_create_clid_dir(struct nfs4_client *clp);
487extern void nfsd4_remove_clid_dir(struct nfs4_client *clp);
488extern void release_session_client(struct nfsd4_session *); 478extern void release_session_client(struct nfsd4_session *);
489extern __be32 nfs4_validate_stateid(struct nfs4_client *, stateid_t *); 479extern __be32 nfs4_validate_stateid(struct nfs4_client *, stateid_t *);
490extern void nfsd4_purge_closed_stateid(struct nfs4_stateowner *); 480extern void nfsd4_purge_closed_stateid(struct nfs4_stateowner *);
491 481
482/* nfs4recover operations */
483extern int nfsd4_client_tracking_init(struct net *net);
484extern void nfsd4_client_tracking_exit(struct net *net);
485extern void nfsd4_client_record_create(struct nfs4_client *clp);
486extern void nfsd4_client_record_remove(struct nfs4_client *clp);
487extern int nfsd4_client_record_check(struct nfs4_client *clp);
488extern void nfsd4_record_grace_done(struct net *net, time_t boot_time);
492#endif /* NFSD4_STATE_H */ 489#endif /* NFSD4_STATE_H */
diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
index e59f71d0cf73..296d671654d6 100644
--- a/fs/nfsd/vfs.c
+++ b/fs/nfsd/vfs.c
@@ -737,12 +737,13 @@ static int nfsd_open_break_lease(struct inode *inode, int access)
737 737
738/* 738/*
739 * Open an existing file or directory. 739 * Open an existing file or directory.
740 * The access argument indicates the type of open (read/write/lock) 740 * The may_flags argument indicates the type of open (read/write/lock)
741 * and additional flags.
741 * N.B. After this call fhp needs an fh_put 742 * N.B. After this call fhp needs an fh_put
742 */ 743 */
743__be32 744__be32
744nfsd_open(struct svc_rqst *rqstp, struct svc_fh *fhp, umode_t type, 745nfsd_open(struct svc_rqst *rqstp, struct svc_fh *fhp, umode_t type,
745 int access, struct file **filp) 746 int may_flags, struct file **filp)
746{ 747{
747 struct dentry *dentry; 748 struct dentry *dentry;
748 struct inode *inode; 749 struct inode *inode;
@@ -757,7 +758,7 @@ nfsd_open(struct svc_rqst *rqstp, struct svc_fh *fhp, umode_t type,
757 * and (hopefully) checked permission - so allow OWNER_OVERRIDE 758 * and (hopefully) checked permission - so allow OWNER_OVERRIDE
758 * in case a chmod has now revoked permission. 759 * in case a chmod has now revoked permission.
759 */ 760 */
760 err = fh_verify(rqstp, fhp, type, access | NFSD_MAY_OWNER_OVERRIDE); 761 err = fh_verify(rqstp, fhp, type, may_flags | NFSD_MAY_OWNER_OVERRIDE);
761 if (err) 762 if (err)
762 goto out; 763 goto out;
763 764
@@ -768,7 +769,7 @@ nfsd_open(struct svc_rqst *rqstp, struct svc_fh *fhp, umode_t type,
768 * or any access when mandatory locking enabled 769 * or any access when mandatory locking enabled
769 */ 770 */
770 err = nfserr_perm; 771 err = nfserr_perm;
771 if (IS_APPEND(inode) && (access & NFSD_MAY_WRITE)) 772 if (IS_APPEND(inode) && (may_flags & NFSD_MAY_WRITE))
772 goto out; 773 goto out;
773 /* 774 /*
774 * We must ignore files (but only files) which might have mandatory 775 * We must ignore files (but only files) which might have mandatory
@@ -781,12 +782,12 @@ nfsd_open(struct svc_rqst *rqstp, struct svc_fh *fhp, umode_t type,
781 if (!inode->i_fop) 782 if (!inode->i_fop)
782 goto out; 783 goto out;
783 784
784 host_err = nfsd_open_break_lease(inode, access); 785 host_err = nfsd_open_break_lease(inode, may_flags);
785 if (host_err) /* NOMEM or WOULDBLOCK */ 786 if (host_err) /* NOMEM or WOULDBLOCK */
786 goto out_nfserr; 787 goto out_nfserr;
787 788
788 if (access & NFSD_MAY_WRITE) { 789 if (may_flags & NFSD_MAY_WRITE) {
789 if (access & NFSD_MAY_READ) 790 if (may_flags & NFSD_MAY_READ)
790 flags = O_RDWR|O_LARGEFILE; 791 flags = O_RDWR|O_LARGEFILE;
791 else 792 else
792 flags = O_WRONLY|O_LARGEFILE; 793 flags = O_WRONLY|O_LARGEFILE;
@@ -795,8 +796,15 @@ nfsd_open(struct svc_rqst *rqstp, struct svc_fh *fhp, umode_t type,
795 flags, current_cred()); 796 flags, current_cred());
796 if (IS_ERR(*filp)) 797 if (IS_ERR(*filp))
797 host_err = PTR_ERR(*filp); 798 host_err = PTR_ERR(*filp);
798 else 799 else {
799 host_err = ima_file_check(*filp, access); 800 host_err = ima_file_check(*filp, may_flags);
801
802 if (may_flags & NFSD_MAY_64BIT_COOKIE)
803 (*filp)->f_mode |= FMODE_64BITHASH;
804 else
805 (*filp)->f_mode |= FMODE_32BITHASH;
806 }
807
800out_nfserr: 808out_nfserr:
801 err = nfserrno(host_err); 809 err = nfserrno(host_err);
802out: 810out:
@@ -2021,8 +2029,13 @@ nfsd_readdir(struct svc_rqst *rqstp, struct svc_fh *fhp, loff_t *offsetp,
2021 __be32 err; 2029 __be32 err;
2022 struct file *file; 2030 struct file *file;
2023 loff_t offset = *offsetp; 2031 loff_t offset = *offsetp;
2032 int may_flags = NFSD_MAY_READ;
2033
2034 /* NFSv2 only supports 32 bit cookies */
2035 if (rqstp->rq_vers > 2)
2036 may_flags |= NFSD_MAY_64BIT_COOKIE;
2024 2037
2025 err = nfsd_open(rqstp, fhp, S_IFDIR, NFSD_MAY_READ, &file); 2038 err = nfsd_open(rqstp, fhp, S_IFDIR, may_flags, &file);
2026 if (err) 2039 if (err)
2027 goto out; 2040 goto out;
2028 2041
diff --git a/fs/nfsd/vfs.h b/fs/nfsd/vfs.h
index 1dcd238e11a0..ec0611b2b738 100644
--- a/fs/nfsd/vfs.h
+++ b/fs/nfsd/vfs.h
@@ -27,6 +27,8 @@
27#define NFSD_MAY_BYPASS_GSS 0x400 27#define NFSD_MAY_BYPASS_GSS 0x400
28#define NFSD_MAY_READ_IF_EXEC 0x800 28#define NFSD_MAY_READ_IF_EXEC 0x800
29 29
30#define NFSD_MAY_64BIT_COOKIE 0x1000 /* 64 bit readdir cookies for >= NFSv3 */
31
30#define NFSD_MAY_CREATE (NFSD_MAY_EXEC|NFSD_MAY_WRITE) 32#define NFSD_MAY_CREATE (NFSD_MAY_EXEC|NFSD_MAY_WRITE)
31#define NFSD_MAY_REMOVE (NFSD_MAY_EXEC|NFSD_MAY_WRITE|NFSD_MAY_TRUNC) 33#define NFSD_MAY_REMOVE (NFSD_MAY_EXEC|NFSD_MAY_WRITE|NFSD_MAY_TRUNC)
32 34
diff --git a/fs/nfsd/xdr4.h b/fs/nfsd/xdr4.h
index 2364747ee97d..1b3501598ab5 100644
--- a/fs/nfsd/xdr4.h
+++ b/fs/nfsd/xdr4.h
@@ -43,6 +43,13 @@
43#define NFSD4_MAX_TAGLEN 128 43#define NFSD4_MAX_TAGLEN 128
44#define XDR_LEN(n) (((n) + 3) & ~3) 44#define XDR_LEN(n) (((n) + 3) & ~3)
45 45
46#define CURRENT_STATE_ID_FLAG (1<<0)
47#define SAVED_STATE_ID_FLAG (1<<1)
48
49#define SET_STATE_ID(c, f) ((c)->sid_flags |= (f))
50#define HAS_STATE_ID(c, f) ((c)->sid_flags & (f))
51#define CLEAR_STATE_ID(c, f) ((c)->sid_flags &= ~(f))
52
46struct nfsd4_compound_state { 53struct nfsd4_compound_state {
47 struct svc_fh current_fh; 54 struct svc_fh current_fh;
48 struct svc_fh save_fh; 55 struct svc_fh save_fh;
@@ -54,6 +61,10 @@ struct nfsd4_compound_state {
54 size_t iovlen; 61 size_t iovlen;
55 u32 minorversion; 62 u32 minorversion;
56 u32 status; 63 u32 status;
64 stateid_t current_stateid;
65 stateid_t save_stateid;
66 /* to indicate current and saved state id presents */
67 u32 sid_flags;
57}; 68};
58 69
59static inline bool nfsd4_has_session(struct nfsd4_compound_state *cs) 70static inline bool nfsd4_has_session(struct nfsd4_compound_state *cs)
@@ -212,16 +223,19 @@ struct nfsd4_open {
212 struct xdr_netobj op_fname; /* request - everything but CLAIM_PREV */ 223 struct xdr_netobj op_fname; /* request - everything but CLAIM_PREV */
213 u32 op_delegate_type; /* request - CLAIM_PREV only */ 224 u32 op_delegate_type; /* request - CLAIM_PREV only */
214 stateid_t op_delegate_stateid; /* request - response */ 225 stateid_t op_delegate_stateid; /* request - response */
226 u32 op_why_no_deleg; /* response - DELEG_NONE_EXT only */
215 u32 op_create; /* request */ 227 u32 op_create; /* request */
216 u32 op_createmode; /* request */ 228 u32 op_createmode; /* request */
217 u32 op_bmval[3]; /* request */ 229 u32 op_bmval[3]; /* request */
218 struct iattr iattr; /* UNCHECKED4, GUARDED4, EXCLUSIVE4_1 */ 230 struct iattr iattr; /* UNCHECKED4, GUARDED4, EXCLUSIVE4_1 */
219 nfs4_verifier verf; /* EXCLUSIVE4 */ 231 nfs4_verifier op_verf __attribute__((aligned(32)));
232 /* EXCLUSIVE4 */
220 clientid_t op_clientid; /* request */ 233 clientid_t op_clientid; /* request */
221 struct xdr_netobj op_owner; /* request */ 234 struct xdr_netobj op_owner; /* request */
222 u32 op_seqid; /* request */ 235 u32 op_seqid; /* request */
223 u32 op_share_access; /* request */ 236 u32 op_share_access; /* request */
224 u32 op_share_deny; /* request */ 237 u32 op_share_deny; /* request */
238 u32 op_deleg_want; /* request */
225 stateid_t op_stateid; /* response */ 239 stateid_t op_stateid; /* response */
226 u32 op_recall; /* recall */ 240 u32 op_recall; /* recall */
227 struct nfsd4_change_info op_cinfo; /* response */ 241 struct nfsd4_change_info op_cinfo; /* response */
@@ -234,7 +248,6 @@ struct nfsd4_open {
234 struct nfs4_acl *op_acl; 248 struct nfs4_acl *op_acl;
235}; 249};
236#define op_iattr iattr 250#define op_iattr iattr
237#define op_verf verf
238 251
239struct nfsd4_open_confirm { 252struct nfsd4_open_confirm {
240 stateid_t oc_req_stateid /* request */; 253 stateid_t oc_req_stateid /* request */;
@@ -245,8 +258,9 @@ struct nfsd4_open_confirm {
245struct nfsd4_open_downgrade { 258struct nfsd4_open_downgrade {
246 stateid_t od_stateid; 259 stateid_t od_stateid;
247 u32 od_seqid; 260 u32 od_seqid;
248 u32 od_share_access; 261 u32 od_share_access; /* request */
249 u32 od_share_deny; 262 u32 od_deleg_want; /* request */
263 u32 od_share_deny; /* request */
250}; 264};
251 265
252 266
@@ -343,10 +357,15 @@ struct nfsd4_saved_compoundargs {
343 struct page **pagelist; 357 struct page **pagelist;
344}; 358};
345 359
360struct nfsd4_test_stateid_id {
361 __be32 ts_id_status;
362 stateid_t ts_id_stateid;
363 struct list_head ts_id_list;
364};
365
346struct nfsd4_test_stateid { 366struct nfsd4_test_stateid {
347 __be32 ts_num_ids; 367 __be32 ts_num_ids;
348 struct nfsd4_compoundargs *ts_saved_args; 368 struct list_head ts_stateid_list;
349 struct nfsd4_saved_compoundargs ts_savedp;
350}; 369};
351 370
352struct nfsd4_free_stateid { 371struct nfsd4_free_stateid {
@@ -503,7 +522,8 @@ static inline bool nfsd4_is_solo_sequence(struct nfsd4_compoundres *resp)
503 522
504static inline bool nfsd4_not_cached(struct nfsd4_compoundres *resp) 523static inline bool nfsd4_not_cached(struct nfsd4_compoundres *resp)
505{ 524{
506 return !resp->cstate.slot->sl_cachethis || nfsd4_is_solo_sequence(resp); 525 return !(resp->cstate.slot->sl_flags & NFSD4_SLOT_CACHETHIS)
526 || nfsd4_is_solo_sequence(resp);
507} 527}
508 528
509#define NFS4_SVC_XDRSIZE sizeof(struct nfsd4_compoundargs) 529#define NFS4_SVC_XDRSIZE sizeof(struct nfsd4_compoundargs)
diff --git a/fs/ocfs2/ioctl.c b/fs/ocfs2/ioctl.c
index a6fda3c188aa..a1a1bfd652c9 100644
--- a/fs/ocfs2/ioctl.c
+++ b/fs/ocfs2/ioctl.c
@@ -28,8 +28,6 @@
28#include "suballoc.h" 28#include "suballoc.h"
29#include "move_extents.h" 29#include "move_extents.h"
30 30
31#include <linux/ext2_fs.h>
32
33#define o2info_from_user(a, b) \ 31#define o2info_from_user(a, b) \
34 copy_from_user(&(a), (b), sizeof(a)) 32 copy_from_user(&(a), (b), sizeof(a))
35#define o2info_to_user(a, b) \ 33#define o2info_to_user(a, b) \
diff --git a/fs/open.c b/fs/open.c
index 77becc041149..5720854156db 100644
--- a/fs/open.c
+++ b/fs/open.c
@@ -836,7 +836,7 @@ EXPORT_SYMBOL(dentry_open);
836static void __put_unused_fd(struct files_struct *files, unsigned int fd) 836static void __put_unused_fd(struct files_struct *files, unsigned int fd)
837{ 837{
838 struct fdtable *fdt = files_fdtable(files); 838 struct fdtable *fdt = files_fdtable(files);
839 __FD_CLR(fd, fdt->open_fds); 839 __clear_open_fd(fd, fdt);
840 if (fd < files->next_fd) 840 if (fd < files->next_fd)
841 files->next_fd = fd; 841 files->next_fd = fd;
842} 842}
@@ -1080,7 +1080,7 @@ SYSCALL_DEFINE1(close, unsigned int, fd)
1080 if (!filp) 1080 if (!filp)
1081 goto out_unlock; 1081 goto out_unlock;
1082 rcu_assign_pointer(fdt->fd[fd], NULL); 1082 rcu_assign_pointer(fdt->fd[fd], NULL);
1083 FD_CLR(fd, fdt->close_on_exec); 1083 __clear_close_on_exec(fd, fdt);
1084 __put_unused_fd(files, fd); 1084 __put_unused_fd(files, fd);
1085 spin_unlock(&files->file_lock); 1085 spin_unlock(&files->file_lock);
1086 retval = filp_close(filp, files); 1086 retval = filp_close(filp, files);
diff --git a/fs/proc/base.c b/fs/proc/base.c
index 3b42c1418f31..1c8b280146d7 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -1753,7 +1753,7 @@ static int proc_fd_info(struct inode *inode, struct path *path, char *info)
1753 1753
1754 fdt = files_fdtable(files); 1754 fdt = files_fdtable(files);
1755 f_flags = file->f_flags & ~O_CLOEXEC; 1755 f_flags = file->f_flags & ~O_CLOEXEC;
1756 if (FD_ISSET(fd, fdt->close_on_exec)) 1756 if (close_on_exec(fd, fdt))
1757 f_flags |= O_CLOEXEC; 1757 f_flags |= O_CLOEXEC;
1758 1758
1759 if (path) { 1759 if (path) {
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index c283832d411d..2b9a7607cbd5 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -783,7 +783,6 @@ static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
783 783
784 /* find the first VMA at or above 'addr' */ 784 /* find the first VMA at or above 'addr' */
785 vma = find_vma(walk->mm, addr); 785 vma = find_vma(walk->mm, addr);
786 spin_lock(&walk->mm->page_table_lock);
787 if (pmd_trans_huge_lock(pmd, vma) == 1) { 786 if (pmd_trans_huge_lock(pmd, vma) == 1) {
788 for (; addr != end; addr += PAGE_SIZE) { 787 for (; addr != end; addr += PAGE_SIZE) {
789 unsigned long offset; 788 unsigned long offset;
diff --git a/fs/pstore/inode.c b/fs/pstore/inode.c
index f37c32b94525..50952c9bd06c 100644
--- a/fs/pstore/inode.c
+++ b/fs/pstore/inode.c
@@ -105,26 +105,12 @@ static const struct inode_operations pstore_dir_inode_operations = {
105 .unlink = pstore_unlink, 105 .unlink = pstore_unlink,
106}; 106};
107 107
108static struct inode *pstore_get_inode(struct super_block *sb, 108static struct inode *pstore_get_inode(struct super_block *sb)
109 const struct inode *dir, int mode, dev_t dev)
110{ 109{
111 struct inode *inode = new_inode(sb); 110 struct inode *inode = new_inode(sb);
112
113 if (inode) { 111 if (inode) {
114 inode->i_ino = get_next_ino(); 112 inode->i_ino = get_next_ino();
115 inode->i_uid = inode->i_gid = 0;
116 inode->i_mode = mode;
117 inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; 113 inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
118 switch (mode & S_IFMT) {
119 case S_IFREG:
120 inode->i_fop = &pstore_file_operations;
121 break;
122 case S_IFDIR:
123 inode->i_op = &pstore_dir_inode_operations;
124 inode->i_fop = &simple_dir_operations;
125 inc_nlink(inode);
126 break;
127 }
128 } 114 }
129 return inode; 115 return inode;
130} 116}
@@ -216,9 +202,11 @@ int pstore_mkfile(enum pstore_type_id type, char *psname, u64 id,
216 return rc; 202 return rc;
217 203
218 rc = -ENOMEM; 204 rc = -ENOMEM;
219 inode = pstore_get_inode(pstore_sb, root->d_inode, S_IFREG | 0444, 0); 205 inode = pstore_get_inode(pstore_sb);
220 if (!inode) 206 if (!inode)
221 goto fail; 207 goto fail;
208 inode->i_mode = S_IFREG | 0444;
209 inode->i_fop = &pstore_file_operations;
222 private = kmalloc(sizeof *private + size, GFP_KERNEL); 210 private = kmalloc(sizeof *private + size, GFP_KERNEL);
223 if (!private) 211 if (!private)
224 goto fail_alloc; 212 goto fail_alloc;
@@ -293,10 +281,12 @@ int pstore_fill_super(struct super_block *sb, void *data, int silent)
293 281
294 parse_options(data); 282 parse_options(data);
295 283
296 inode = pstore_get_inode(sb, NULL, S_IFDIR | 0755, 0); 284 inode = pstore_get_inode(sb);
297 if (inode) { 285 if (inode) {
298 /* override ramfs "dir" options so we catch unlink(2) */ 286 inode->i_mode = S_IFDIR | 0755;
299 inode->i_op = &pstore_dir_inode_operations; 287 inode->i_op = &pstore_dir_inode_operations;
288 inode->i_fop = &simple_dir_operations;
289 inc_nlink(inode);
300 } 290 }
301 sb->s_root = d_make_root(inode); 291 sb->s_root = d_make_root(inode);
302 if (!sb->s_root) 292 if (!sb->s_root)
diff --git a/fs/romfs/storage.c b/fs/romfs/storage.c
index 71e2b4d50a0a..f86f51f99ace 100644
--- a/fs/romfs/storage.c
+++ b/fs/romfs/storage.c
@@ -19,7 +19,7 @@
19#endif 19#endif
20 20
21#ifdef CONFIG_ROMFS_ON_MTD 21#ifdef CONFIG_ROMFS_ON_MTD
22#define ROMFS_MTD_READ(sb, ...) ((sb)->s_mtd->read((sb)->s_mtd, ##__VA_ARGS__)) 22#define ROMFS_MTD_READ(sb, ...) mtd_read((sb)->s_mtd, ##__VA_ARGS__)
23 23
24/* 24/*
25 * read data from an romfs image on an MTD device 25 * read data from an romfs image on an MTD device
diff --git a/fs/select.c b/fs/select.c
index 6fb8943d580b..17d33d09fc16 100644
--- a/fs/select.c
+++ b/fs/select.c
@@ -348,7 +348,7 @@ static int max_select_fd(unsigned long n, fd_set_bits *fds)
348 set = ~(~0UL << (n & (__NFDBITS-1))); 348 set = ~(~0UL << (n & (__NFDBITS-1)));
349 n /= __NFDBITS; 349 n /= __NFDBITS;
350 fdt = files_fdtable(current->files); 350 fdt = files_fdtable(current->files);
351 open_fds = fdt->open_fds->fds_bits+n; 351 open_fds = fdt->open_fds + n;
352 max = 0; 352 max = 0;
353 if (set) { 353 if (set) {
354 set &= BITS(fds, n); 354 set &= BITS(fds, n);
diff --git a/drivers/acpi/acpica/acconfig.h b/include/acpi/acconfig.h
index 1f30af613e87..03f14856bd09 100644
--- a/drivers/acpi/acpica/acconfig.h
+++ b/include/acpi/acconfig.h
@@ -85,6 +85,23 @@
85 */ 85 */
86#define ACPI_CHECKSUM_ABORT FALSE 86#define ACPI_CHECKSUM_ABORT FALSE
87 87
88/*
89 * Generate a version of ACPICA that only supports "reduced hardware"
90 * platforms (as defined in ACPI 5.0). Set to TRUE to generate a specialized
91 * version of ACPICA that ONLY supports the ACPI 5.0 "reduced hardware"
92 * model. In other words, no ACPI hardware is supported.
93 *
94 * If TRUE, this means no support for the following:
95 * PM Event and Control registers
96 * SCI interrupt (and handler)
97 * Fixed Events
98 * General Purpose Events (GPEs)
99 * Global Lock
100 * ACPI PM timer
101 * FACS table (Waking vectors and Global Lock)
102 */
103#define ACPI_REDUCED_HARDWARE FALSE
104
88/****************************************************************************** 105/******************************************************************************
89 * 106 *
90 * Subsystem Constants 107 * Subsystem Constants
@@ -93,7 +110,7 @@
93 110
94/* Version of ACPI supported */ 111/* Version of ACPI supported */
95 112
96#define ACPI_CA_SUPPORT_LEVEL 3 113#define ACPI_CA_SUPPORT_LEVEL 5
97 114
98/* Maximum count for a semaphore object */ 115/* Maximum count for a semaphore object */
99 116
diff --git a/include/acpi/acexcep.h b/include/acpi/acexcep.h
index 5b6c391efc8e..92d6e1d701ff 100644
--- a/include/acpi/acexcep.h
+++ b/include/acpi/acexcep.h
@@ -57,6 +57,7 @@
57#define ACPI_SUCCESS(a) (!(a)) 57#define ACPI_SUCCESS(a) (!(a))
58#define ACPI_FAILURE(a) (a) 58#define ACPI_FAILURE(a) (a)
59 59
60#define ACPI_SKIP(a) (a == AE_CTRL_SKIP)
60#define AE_OK (acpi_status) 0x0000 61#define AE_OK (acpi_status) 0x0000
61 62
62/* 63/*
@@ -89,8 +90,9 @@
89#define AE_SAME_HANDLER (acpi_status) (0x0019 | AE_CODE_ENVIRONMENTAL) 90#define AE_SAME_HANDLER (acpi_status) (0x0019 | AE_CODE_ENVIRONMENTAL)
90#define AE_NO_HANDLER (acpi_status) (0x001A | AE_CODE_ENVIRONMENTAL) 91#define AE_NO_HANDLER (acpi_status) (0x001A | AE_CODE_ENVIRONMENTAL)
91#define AE_OWNER_ID_LIMIT (acpi_status) (0x001B | AE_CODE_ENVIRONMENTAL) 92#define AE_OWNER_ID_LIMIT (acpi_status) (0x001B | AE_CODE_ENVIRONMENTAL)
93#define AE_NOT_CONFIGURED (acpi_status) (0x001C | AE_CODE_ENVIRONMENTAL)
92 94
93#define AE_CODE_ENV_MAX 0x001B 95#define AE_CODE_ENV_MAX 0x001C
94 96
95/* 97/*
96 * Programmer exceptions 98 * Programmer exceptions
@@ -213,7 +215,8 @@ char const *acpi_gbl_exception_names_env[] = {
213 "AE_ABORT_METHOD", 215 "AE_ABORT_METHOD",
214 "AE_SAME_HANDLER", 216 "AE_SAME_HANDLER",
215 "AE_NO_HANDLER", 217 "AE_NO_HANDLER",
216 "AE_OWNER_ID_LIMIT" 218 "AE_OWNER_ID_LIMIT",
219 "AE_NOT_CONFIGURED"
217}; 220};
218 221
219char const *acpi_gbl_exception_names_pgm[] = { 222char const *acpi_gbl_exception_names_pgm[] = {
diff --git a/include/acpi/acnames.h b/include/acpi/acnames.h
index 5b5af0d30a97..38f508816e4a 100644
--- a/include/acpi/acnames.h
+++ b/include/acpi/acnames.h
@@ -46,6 +46,7 @@
46 46
47/* Method names - these methods can appear anywhere in the namespace */ 47/* Method names - these methods can appear anywhere in the namespace */
48 48
49#define METHOD_NAME__SB_ "_SB_"
49#define METHOD_NAME__HID "_HID" 50#define METHOD_NAME__HID "_HID"
50#define METHOD_NAME__CID "_CID" 51#define METHOD_NAME__CID "_CID"
51#define METHOD_NAME__UID "_UID" 52#define METHOD_NAME__UID "_UID"
@@ -64,11 +65,11 @@
64 65
65/* Method names - these methods must appear at the namespace root */ 66/* Method names - these methods must appear at the namespace root */
66 67
67#define METHOD_NAME__BFS "\\_BFS" 68#define METHOD_PATHNAME__BFS "\\_BFS"
68#define METHOD_NAME__GTS "\\_GTS" 69#define METHOD_PATHNAME__GTS "\\_GTS"
69#define METHOD_NAME__PTS "\\_PTS" 70#define METHOD_PATHNAME__PTS "\\_PTS"
70#define METHOD_NAME__SST "\\_SI._SST" 71#define METHOD_PATHNAME__SST "\\_SI._SST"
71#define METHOD_NAME__WAK "\\_WAK" 72#define METHOD_PATHNAME__WAK "\\_WAK"
72 73
73/* Definitions of the predefined namespace names */ 74/* Definitions of the predefined namespace names */
74 75
@@ -79,6 +80,5 @@
79#define ACPI_PREFIX_LOWER (u32) 0x69706361 /* "acpi" */ 80#define ACPI_PREFIX_LOWER (u32) 0x69706361 /* "acpi" */
80 81
81#define ACPI_NS_ROOT_PATH "\\" 82#define ACPI_NS_ROOT_PATH "\\"
82#define ACPI_NS_SYSTEM_BUS "_SB_"
83 83
84#endif /* __ACNAMES_H__ */ 84#endif /* __ACNAMES_H__ */
diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h
index 6cd5b6403a7b..f1c8ca60e824 100644
--- a/include/acpi/acpi_bus.h
+++ b/include/acpi/acpi_bus.h
@@ -323,6 +323,8 @@ int acpi_bus_set_power(acpi_handle handle, int state);
323int acpi_bus_update_power(acpi_handle handle, int *state_p); 323int acpi_bus_update_power(acpi_handle handle, int *state_p);
324bool acpi_bus_power_manageable(acpi_handle handle); 324bool acpi_bus_power_manageable(acpi_handle handle);
325bool acpi_bus_can_wakeup(acpi_handle handle); 325bool acpi_bus_can_wakeup(acpi_handle handle);
326int acpi_power_resource_register_device(struct device *dev, acpi_handle handle);
327void acpi_power_resource_unregister_device(struct device *dev, acpi_handle handle);
326#ifdef CONFIG_ACPI_PROC_EVENT 328#ifdef CONFIG_ACPI_PROC_EVENT
327int acpi_bus_generate_proc_event(struct acpi_device *device, u8 type, int data); 329int acpi_bus_generate_proc_event(struct acpi_device *device, u8 type, int data);
328int acpi_bus_generate_proc_event4(const char *class, const char *bid, u8 type, int data); 330int acpi_bus_generate_proc_event4(const char *class, const char *bid, u8 type, int data);
@@ -392,8 +394,13 @@ static inline int acpi_pm_device_sleep_state(struct device *d, int *p)
392#endif 394#endif
393 395
394#ifdef CONFIG_PM_SLEEP 396#ifdef CONFIG_PM_SLEEP
397int acpi_pm_device_run_wake(struct device *, bool);
395int acpi_pm_device_sleep_wake(struct device *, bool); 398int acpi_pm_device_sleep_wake(struct device *, bool);
396#else 399#else
400static inline int acpi_pm_device_run_wake(struct device *dev, bool enable)
401{
402 return -ENODEV;
403}
397static inline int acpi_pm_device_sleep_wake(struct device *dev, bool enable) 404static inline int acpi_pm_device_sleep_wake(struct device *dev, bool enable)
398{ 405{
399 return -ENODEV; 406 return -ENODEV;
diff --git a/include/acpi/acpiosxf.h b/include/acpi/acpiosxf.h
index 7c9aebe8a7aa..21a5548c6686 100644
--- a/include/acpi/acpiosxf.h
+++ b/include/acpi/acpiosxf.h
@@ -95,6 +95,11 @@ acpi_status
95acpi_os_table_override(struct acpi_table_header *existing_table, 95acpi_os_table_override(struct acpi_table_header *existing_table,
96 struct acpi_table_header **new_table); 96 struct acpi_table_header **new_table);
97 97
98acpi_status
99acpi_os_physical_table_override(struct acpi_table_header *existing_table,
100 acpi_physical_address * new_address,
101 u32 *new_table_length);
102
98/* 103/*
99 * Spinlock primitives 104 * Spinlock primitives
100 */ 105 */
@@ -217,14 +222,10 @@ acpi_status acpi_os_write_port(acpi_io_address address, u32 value, u32 width);
217 * Platform and hardware-independent physical memory interfaces 222 * Platform and hardware-independent physical memory interfaces
218 */ 223 */
219acpi_status 224acpi_status
220acpi_os_read_memory(acpi_physical_address address, u32 * value, u32 width); 225acpi_os_read_memory(acpi_physical_address address, u64 *value, u32 width);
221acpi_status
222acpi_os_read_memory64(acpi_physical_address address, u64 *value, u32 width);
223 226
224acpi_status 227acpi_status
225acpi_os_write_memory(acpi_physical_address address, u32 value, u32 width); 228acpi_os_write_memory(acpi_physical_address address, u64 value, u32 width);
226acpi_status
227acpi_os_write_memory64(acpi_physical_address address, u64 value, u32 width);
228 229
229/* 230/*
230 * Platform and hardware-independent PCI configuration space access 231 * Platform and hardware-independent PCI configuration space access
diff --git a/include/acpi/acpixf.h b/include/acpi/acpixf.h
index a28da35ba45e..982110134672 100644
--- a/include/acpi/acpixf.h
+++ b/include/acpi/acpixf.h
@@ -47,8 +47,9 @@
47 47
48/* Current ACPICA subsystem version in YYYYMMDD format */ 48/* Current ACPICA subsystem version in YYYYMMDD format */
49 49
50#define ACPI_CA_VERSION 0x20120111 50#define ACPI_CA_VERSION 0x20120320
51 51
52#include "acconfig.h"
52#include "actypes.h" 53#include "actypes.h"
53#include "actbl.h" 54#include "actbl.h"
54 55
@@ -71,6 +72,33 @@ extern u8 acpi_gbl_copy_dsdt_locally;
71extern u8 acpi_gbl_truncate_io_addresses; 72extern u8 acpi_gbl_truncate_io_addresses;
72extern u8 acpi_gbl_disable_auto_repair; 73extern u8 acpi_gbl_disable_auto_repair;
73 74
75/*
76 * Hardware-reduced prototypes. All interfaces that use these macros will
77 * be configured out of the ACPICA build if the ACPI_REDUCED_HARDWARE flag
78 * is set to TRUE.
79 */
80#if (!ACPI_REDUCED_HARDWARE)
81#define ACPI_HW_DEPENDENT_RETURN_STATUS(prototype) \
82 prototype;
83
84#define ACPI_HW_DEPENDENT_RETURN_OK(prototype) \
85 prototype;
86
87#define ACPI_HW_DEPENDENT_RETURN_VOID(prototype) \
88 prototype;
89
90#else
91#define ACPI_HW_DEPENDENT_RETURN_STATUS(prototype) \
92 static ACPI_INLINE prototype {return(AE_NOT_CONFIGURED);}
93
94#define ACPI_HW_DEPENDENT_RETURN_OK(prototype) \
95 static ACPI_INLINE prototype {return(AE_OK);}
96
97#define ACPI_HW_DEPENDENT_RETURN_VOID(prototype) \
98 static ACPI_INLINE prototype {}
99
100#endif /* !ACPI_REDUCED_HARDWARE */
101
74extern u32 acpi_current_gpe_count; 102extern u32 acpi_current_gpe_count;
75extern struct acpi_table_fadt acpi_gbl_FADT; 103extern struct acpi_table_fadt acpi_gbl_FADT;
76extern u8 acpi_gbl_system_awake_and_running; 104extern u8 acpi_gbl_system_awake_and_running;
@@ -96,9 +124,8 @@ acpi_status acpi_terminate(void);
96acpi_status acpi_subsystem_status(void); 124acpi_status acpi_subsystem_status(void);
97#endif 125#endif
98 126
99acpi_status acpi_enable(void); 127ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status acpi_enable(void))
100 128ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status acpi_disable(void))
101acpi_status acpi_disable(void);
102 129
103#ifdef ACPI_FUTURE_USAGE 130#ifdef ACPI_FUTURE_USAGE
104acpi_status acpi_get_system_info(struct acpi_buffer *ret_buffer); 131acpi_status acpi_get_system_info(struct acpi_buffer *ret_buffer);
@@ -235,17 +262,34 @@ acpi_status acpi_get_parent(acpi_handle object, acpi_handle * out_handle);
235acpi_status 262acpi_status
236acpi_install_initialization_handler(acpi_init_handler handler, u32 function); 263acpi_install_initialization_handler(acpi_init_handler handler, u32 function);
237 264
238acpi_status 265ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
239acpi_install_global_event_handler(ACPI_GBL_EVENT_HANDLER handler, 266 acpi_install_global_event_handler
240 void *context); 267 (ACPI_GBL_EVENT_HANDLER handler, void *context))
241 268
242acpi_status 269ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
243acpi_install_fixed_event_handler(u32 acpi_event, 270 acpi_install_fixed_event_handler(u32
244 acpi_event_handler handler, void *context); 271 acpi_event,
245 272 acpi_event_handler
246acpi_status 273 handler,
247acpi_remove_fixed_event_handler(u32 acpi_event, acpi_event_handler handler); 274 void
248 275 *context))
276ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
277 acpi_remove_fixed_event_handler(u32 acpi_event,
278 acpi_event_handler
279 handler))
280ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
281 acpi_install_gpe_handler(acpi_handle
282 gpe_device,
283 u32 gpe_number,
284 u32 type,
285 acpi_gpe_handler
286 address,
287 void *context))
288ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
289 acpi_remove_gpe_handler(acpi_handle gpe_device,
290 u32 gpe_number,
291 acpi_gpe_handler
292 address))
249acpi_status 293acpi_status
250acpi_install_notify_handler(acpi_handle device, 294acpi_install_notify_handler(acpi_handle device,
251 u32 handler_type, 295 u32 handler_type,
@@ -266,15 +310,6 @@ acpi_remove_address_space_handler(acpi_handle device,
266 acpi_adr_space_type space_id, 310 acpi_adr_space_type space_id,
267 acpi_adr_space_handler handler); 311 acpi_adr_space_handler handler);
268 312
269acpi_status
270acpi_install_gpe_handler(acpi_handle gpe_device,
271 u32 gpe_number,
272 u32 type, acpi_gpe_handler address, void *context);
273
274acpi_status
275acpi_remove_gpe_handler(acpi_handle gpe_device,
276 u32 gpe_number, acpi_gpe_handler address);
277
278#ifdef ACPI_FUTURE_USAGE 313#ifdef ACPI_FUTURE_USAGE
279acpi_status acpi_install_exception_handler(acpi_exception_handler handler); 314acpi_status acpi_install_exception_handler(acpi_exception_handler handler);
280#endif 315#endif
@@ -284,9 +319,11 @@ acpi_status acpi_install_interface_handler(acpi_interface_handler handler);
284/* 319/*
285 * Global Lock interfaces 320 * Global Lock interfaces
286 */ 321 */
287acpi_status acpi_acquire_global_lock(u16 timeout, u32 * handle); 322ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
288 323 acpi_acquire_global_lock(u16 timeout,
289acpi_status acpi_release_global_lock(u32 handle); 324 u32 *handle))
325ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
326 acpi_release_global_lock(u32 handle))
290 327
291/* 328/*
292 * Interfaces to AML mutex objects 329 * Interfaces to AML mutex objects
@@ -299,47 +336,75 @@ acpi_status acpi_release_mutex(acpi_handle handle, acpi_string pathname);
299/* 336/*
300 * Fixed Event interfaces 337 * Fixed Event interfaces
301 */ 338 */
302acpi_status acpi_enable_event(u32 event, u32 flags); 339ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
303 340 acpi_enable_event(u32 event, u32 flags))
304acpi_status acpi_disable_event(u32 event, u32 flags);
305 341
306acpi_status acpi_clear_event(u32 event); 342ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
343 acpi_disable_event(u32 event, u32 flags))
307 344
308acpi_status acpi_get_event_status(u32 event, acpi_event_status * event_status); 345ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status acpi_clear_event(u32 event))
309 346
347ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
348 acpi_get_event_status(u32 event,
349 acpi_event_status
350 *event_status))
310/* 351/*
311 * General Purpose Event (GPE) Interfaces 352 * General Purpose Event (GPE) Interfaces
312 */ 353 */
313acpi_status acpi_enable_gpe(acpi_handle gpe_device, u32 gpe_number); 354ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status acpi_update_all_gpes(void))
314 355
315acpi_status acpi_disable_gpe(acpi_handle gpe_device, u32 gpe_number); 356ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
316 357 acpi_enable_gpe(acpi_handle gpe_device,
317acpi_status acpi_clear_gpe(acpi_handle gpe_device, u32 gpe_number); 358 u32 gpe_number))
318 359
319acpi_status 360ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
320acpi_setup_gpe_for_wake(acpi_handle parent_device, 361 acpi_disable_gpe(acpi_handle gpe_device,
321 acpi_handle gpe_device, u32 gpe_number); 362 u32 gpe_number))
322 363
323acpi_status acpi_set_gpe_wake_mask(acpi_handle gpe_device, u32 gpe_number, u8 action); 364ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
324 365 acpi_clear_gpe(acpi_handle gpe_device,
325acpi_status 366 u32 gpe_number))
326acpi_get_gpe_status(acpi_handle gpe_device, 367
327 u32 gpe_number, acpi_event_status *event_status); 368ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
328 369 acpi_set_gpe(acpi_handle gpe_device,
329acpi_status acpi_disable_all_gpes(void); 370 u32 gpe_number, u8 action))
330 371
331acpi_status acpi_enable_all_runtime_gpes(void); 372ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
332 373 acpi_finish_gpe(acpi_handle gpe_device,
333acpi_status acpi_get_gpe_device(u32 gpe_index, acpi_handle *gpe_device); 374 u32 gpe_number))
334 375
335acpi_status 376ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
336acpi_install_gpe_block(acpi_handle gpe_device, 377 acpi_setup_gpe_for_wake(acpi_handle
337 struct acpi_generic_address *gpe_block_address, 378 parent_device,
338 u32 register_count, u32 interrupt_number); 379 acpi_handle gpe_device,
339 380 u32 gpe_number))
340acpi_status acpi_remove_gpe_block(acpi_handle gpe_device); 381ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
341 382 acpi_set_gpe_wake_mask(acpi_handle gpe_device,
342acpi_status acpi_update_all_gpes(void); 383 u32 gpe_number,
384 u8 action))
385ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
386 acpi_get_gpe_status(acpi_handle gpe_device,
387 u32 gpe_number,
388 acpi_event_status
389 *event_status))
390
391ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status acpi_disable_all_gpes(void))
392
393ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status acpi_enable_all_runtime_gpes(void))
394
395ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
396 acpi_get_gpe_device(u32 gpe_index,
397 acpi_handle * gpe_device))
398
399ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
400 acpi_install_gpe_block(acpi_handle gpe_device,
401 struct
402 acpi_generic_address
403 *gpe_block_address,
404 u32 register_count,
405 u32 interrupt_number))
406ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
407 acpi_remove_gpe_block(acpi_handle gpe_device))
343 408
344/* 409/*
345 * Resource interfaces 410 * Resource interfaces
@@ -391,34 +456,60 @@ acpi_buffer_to_resource(u8 *aml_buffer,
391 */ 456 */
392acpi_status acpi_reset(void); 457acpi_status acpi_reset(void);
393 458
394acpi_status acpi_read_bit_register(u32 register_id, u32 *return_value); 459ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
460 acpi_read_bit_register(u32 register_id,
461 u32 *return_value))
395 462
396acpi_status acpi_write_bit_register(u32 register_id, u32 value); 463ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
464 acpi_write_bit_register(u32 register_id,
465 u32 value))
397 466
398acpi_status acpi_set_firmware_waking_vector(u32 physical_address); 467ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
468 acpi_set_firmware_waking_vector(u32
469 physical_address))
399 470
400#if ACPI_MACHINE_WIDTH == 64 471#if ACPI_MACHINE_WIDTH == 64
401acpi_status acpi_set_firmware_waking_vector64(u64 physical_address); 472ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
473 acpi_set_firmware_waking_vector64(u64
474 physical_address))
402#endif 475#endif
403 476
404acpi_status acpi_read(u64 *value, struct acpi_generic_address *reg); 477acpi_status acpi_read(u64 *value, struct acpi_generic_address *reg);
405 478
406acpi_status acpi_write(u64 value, struct acpi_generic_address *reg); 479acpi_status acpi_write(u64 value, struct acpi_generic_address *reg);
407 480
481/*
482 * Sleep/Wake interfaces
483 */
408acpi_status 484acpi_status
409acpi_get_sleep_type_data(u8 sleep_state, u8 * slp_typ_a, u8 * slp_typ_b); 485acpi_get_sleep_type_data(u8 sleep_state, u8 * slp_typ_a, u8 * slp_typ_b);
410 486
411acpi_status acpi_enter_sleep_state_prep(u8 sleep_state); 487acpi_status acpi_enter_sleep_state_prep(u8 sleep_state);
412 488
413acpi_status asmlinkage acpi_enter_sleep_state(u8 sleep_state); 489acpi_status asmlinkage acpi_enter_sleep_state(u8 sleep_state, u8 flags);
414 490
415acpi_status asmlinkage acpi_enter_sleep_state_s4bios(void); 491ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status asmlinkage acpi_enter_sleep_state_s4bios(void))
416 492
417acpi_status acpi_leave_sleep_state_prep(u8 sleep_state); 493acpi_status acpi_leave_sleep_state_prep(u8 sleep_state, u8 flags);
418 494
419acpi_status acpi_leave_sleep_state(u8 sleep_state); 495acpi_status acpi_leave_sleep_state(u8 sleep_state);
420 496
421/* 497/*
498 * ACPI Timer interfaces
499 */
500#ifdef ACPI_FUTURE_USAGE
501ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
502 acpi_get_timer_resolution(u32 *resolution))
503
504ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status acpi_get_timer(u32 *ticks))
505
506ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
507 acpi_get_timer_duration(u32 start_ticks,
508 u32 end_ticks,
509 u32 *time_elapsed))
510#endif /* ACPI_FUTURE_USAGE */
511
512/*
422 * Error/Warning output 513 * Error/Warning output
423 */ 514 */
424void ACPI_INTERNAL_VAR_XFACE 515void ACPI_INTERNAL_VAR_XFACE
diff --git a/include/acpi/actbl.h b/include/acpi/actbl.h
index 8e1b92f6f650..8dea54665dcf 100644
--- a/include/acpi/actbl.h
+++ b/include/acpi/actbl.h
@@ -309,6 +309,13 @@ enum acpi_prefered_pm_profiles {
309 PM_TABLET = 8 309 PM_TABLET = 8
310}; 310};
311 311
312/* Values for sleep_status and sleep_control registers (V5 FADT) */
313
314#define ACPI_X_WAKE_STATUS 0x80
315#define ACPI_X_SLEEP_TYPE_MASK 0x1C
316#define ACPI_X_SLEEP_TYPE_POSITION 0x02
317#define ACPI_X_SLEEP_ENABLE 0x20
318
312/* Reset to default packing */ 319/* Reset to default packing */
313 320
314#pragma pack() 321#pragma pack()
diff --git a/include/acpi/actypes.h b/include/acpi/actypes.h
index d5dee7ce9474..eba66043cf1b 100644
--- a/include/acpi/actypes.h
+++ b/include/acpi/actypes.h
@@ -518,6 +518,13 @@ typedef u64 acpi_integer;
518#define ACPI_SLEEP_TYPE_INVALID 0xFF 518#define ACPI_SLEEP_TYPE_INVALID 0xFF
519 519
520/* 520/*
521 * Sleep/Wake flags
522 */
523#define ACPI_NO_OPTIONAL_METHODS 0x00 /* Do not execute any optional methods */
524#define ACPI_EXECUTE_GTS 0x01 /* For enter sleep interface */
525#define ACPI_EXECUTE_BFS 0x02 /* For leave sleep prep interface */
526
527/*
521 * Standard notify values 528 * Standard notify values
522 */ 529 */
523#define ACPI_NOTIFY_BUS_CHECK (u8) 0x00 530#define ACPI_NOTIFY_BUS_CHECK (u8) 0x00
@@ -532,8 +539,9 @@ typedef u64 acpi_integer;
532#define ACPI_NOTIFY_DEVICE_PLD_CHECK (u8) 0x09 539#define ACPI_NOTIFY_DEVICE_PLD_CHECK (u8) 0x09
533#define ACPI_NOTIFY_RESERVED (u8) 0x0A 540#define ACPI_NOTIFY_RESERVED (u8) 0x0A
534#define ACPI_NOTIFY_LOCALITY_UPDATE (u8) 0x0B 541#define ACPI_NOTIFY_LOCALITY_UPDATE (u8) 0x0B
542#define ACPI_NOTIFY_SHUTDOWN_REQUEST (u8) 0x0C
535 543
536#define ACPI_NOTIFY_MAX 0x0B 544#define ACPI_NOTIFY_MAX 0x0C
537 545
538/* 546/*
539 * Types associated with ACPI names and objects. The first group of 547 * Types associated with ACPI names and objects. The first group of
@@ -698,7 +706,8 @@ typedef u32 acpi_event_status;
698#define ACPI_ALL_NOTIFY (ACPI_SYSTEM_NOTIFY | ACPI_DEVICE_NOTIFY) 706#define ACPI_ALL_NOTIFY (ACPI_SYSTEM_NOTIFY | ACPI_DEVICE_NOTIFY)
699#define ACPI_MAX_NOTIFY_HANDLER_TYPE 0x3 707#define ACPI_MAX_NOTIFY_HANDLER_TYPE 0x3
700 708
701#define ACPI_MAX_SYS_NOTIFY 0x7f 709#define ACPI_MAX_SYS_NOTIFY 0x7F
710#define ACPI_MAX_DEVICE_SPECIFIC_NOTIFY 0xBF
702 711
703/* Address Space (Operation Region) Types */ 712/* Address Space (Operation Region) Types */
704 713
@@ -786,6 +795,15 @@ typedef u8 acpi_adr_space_type;
786#define ACPI_ENABLE_EVENT 1 795#define ACPI_ENABLE_EVENT 1
787#define ACPI_DISABLE_EVENT 0 796#define ACPI_DISABLE_EVENT 0
788 797
798/* Sleep function dispatch */
799
800typedef acpi_status(*ACPI_SLEEP_FUNCTION) (u8 sleep_state, u8 flags);
801
802struct acpi_sleep_functions {
803 ACPI_SLEEP_FUNCTION legacy_function;
804 ACPI_SLEEP_FUNCTION extended_function;
805};
806
789/* 807/*
790 * External ACPI object definition 808 * External ACPI object definition
791 */ 809 */
diff --git a/include/asm-generic/posix_types.h b/include/asm-generic/posix_types.h
index 3dab00860e71..91d44bd4dde3 100644
--- a/include/asm-generic/posix_types.h
+++ b/include/asm-generic/posix_types.h
@@ -10,8 +10,13 @@
10 * architectures, so that you can override them. 10 * architectures, so that you can override them.
11 */ 11 */
12 12
13#ifndef __kernel_long_t
14typedef long __kernel_long_t;
15typedef unsigned long __kernel_ulong_t;
16#endif
17
13#ifndef __kernel_ino_t 18#ifndef __kernel_ino_t
14typedef unsigned long __kernel_ino_t; 19typedef __kernel_ulong_t __kernel_ino_t;
15#endif 20#endif
16 21
17#ifndef __kernel_mode_t 22#ifndef __kernel_mode_t
@@ -19,7 +24,7 @@ typedef unsigned int __kernel_mode_t;
19#endif 24#endif
20 25
21#ifndef __kernel_nlink_t 26#ifndef __kernel_nlink_t
22typedef unsigned long __kernel_nlink_t; 27typedef __kernel_ulong_t __kernel_nlink_t;
23#endif 28#endif
24 29
25#ifndef __kernel_pid_t 30#ifndef __kernel_pid_t
@@ -36,7 +41,7 @@ typedef unsigned int __kernel_gid_t;
36#endif 41#endif
37 42
38#ifndef __kernel_suseconds_t 43#ifndef __kernel_suseconds_t
39typedef long __kernel_suseconds_t; 44typedef __kernel_long_t __kernel_suseconds_t;
40#endif 45#endif
41 46
42#ifndef __kernel_daddr_t 47#ifndef __kernel_daddr_t
@@ -44,8 +49,8 @@ typedef int __kernel_daddr_t;
44#endif 49#endif
45 50
46#ifndef __kernel_uid32_t 51#ifndef __kernel_uid32_t
47typedef __kernel_uid_t __kernel_uid32_t; 52typedef unsigned int __kernel_uid32_t;
48typedef __kernel_gid_t __kernel_gid32_t; 53typedef unsigned int __kernel_gid32_t;
49#endif 54#endif
50 55
51#ifndef __kernel_old_uid_t 56#ifndef __kernel_old_uid_t
@@ -67,99 +72,29 @@ typedef unsigned int __kernel_size_t;
67typedef int __kernel_ssize_t; 72typedef int __kernel_ssize_t;
68typedef int __kernel_ptrdiff_t; 73typedef int __kernel_ptrdiff_t;
69#else 74#else
70typedef unsigned long __kernel_size_t; 75typedef __kernel_ulong_t __kernel_size_t;
71typedef long __kernel_ssize_t; 76typedef __kernel_long_t __kernel_ssize_t;
72typedef long __kernel_ptrdiff_t; 77typedef __kernel_long_t __kernel_ptrdiff_t;
73#endif 78#endif
74#endif 79#endif
75 80
81#ifndef __kernel_fsid_t
82typedef struct {
83 int val[2];
84} __kernel_fsid_t;
85#endif
86
76/* 87/*
77 * anything below here should be completely generic 88 * anything below here should be completely generic
78 */ 89 */
79typedef long __kernel_off_t; 90typedef __kernel_long_t __kernel_off_t;
80typedef long long __kernel_loff_t; 91typedef long long __kernel_loff_t;
81typedef long __kernel_time_t; 92typedef __kernel_long_t __kernel_time_t;
82typedef long __kernel_clock_t; 93typedef __kernel_long_t __kernel_clock_t;
83typedef int __kernel_timer_t; 94typedef int __kernel_timer_t;
84typedef int __kernel_clockid_t; 95typedef int __kernel_clockid_t;
85typedef char * __kernel_caddr_t; 96typedef char * __kernel_caddr_t;
86typedef unsigned short __kernel_uid16_t; 97typedef unsigned short __kernel_uid16_t;
87typedef unsigned short __kernel_gid16_t; 98typedef unsigned short __kernel_gid16_t;
88 99
89typedef struct {
90 int val[2];
91} __kernel_fsid_t;
92
93#ifdef __KERNEL__
94
95#undef __FD_SET
96static inline void __FD_SET(unsigned long __fd, __kernel_fd_set *__fdsetp)
97{
98 unsigned long __tmp = __fd / __NFDBITS;
99 unsigned long __rem = __fd % __NFDBITS;
100 __fdsetp->fds_bits[__tmp] |= (1UL<<__rem);
101}
102
103#undef __FD_CLR
104static inline void __FD_CLR(unsigned long __fd, __kernel_fd_set *__fdsetp)
105{
106 unsigned long __tmp = __fd / __NFDBITS;
107 unsigned long __rem = __fd % __NFDBITS;
108 __fdsetp->fds_bits[__tmp] &= ~(1UL<<__rem);
109}
110
111#undef __FD_ISSET
112static inline int __FD_ISSET(unsigned long __fd, const __kernel_fd_set *__p)
113{
114 unsigned long __tmp = __fd / __NFDBITS;
115 unsigned long __rem = __fd % __NFDBITS;
116 return (__p->fds_bits[__tmp] & (1UL<<__rem)) != 0;
117}
118
119/*
120 * This will unroll the loop for the normal constant case (8 ints,
121 * for a 256-bit fd_set)
122 */
123#undef __FD_ZERO
124static inline void __FD_ZERO(__kernel_fd_set *__p)
125{
126 unsigned long *__tmp = __p->fds_bits;
127 int __i;
128
129 if (__builtin_constant_p(__FDSET_LONGS)) {
130 switch (__FDSET_LONGS) {
131 case 16:
132 __tmp[ 0] = 0; __tmp[ 1] = 0;
133 __tmp[ 2] = 0; __tmp[ 3] = 0;
134 __tmp[ 4] = 0; __tmp[ 5] = 0;
135 __tmp[ 6] = 0; __tmp[ 7] = 0;
136 __tmp[ 8] = 0; __tmp[ 9] = 0;
137 __tmp[10] = 0; __tmp[11] = 0;
138 __tmp[12] = 0; __tmp[13] = 0;
139 __tmp[14] = 0; __tmp[15] = 0;
140 return;
141
142 case 8:
143 __tmp[ 0] = 0; __tmp[ 1] = 0;
144 __tmp[ 2] = 0; __tmp[ 3] = 0;
145 __tmp[ 4] = 0; __tmp[ 5] = 0;
146 __tmp[ 6] = 0; __tmp[ 7] = 0;
147 return;
148
149 case 4:
150 __tmp[ 0] = 0; __tmp[ 1] = 0;
151 __tmp[ 2] = 0; __tmp[ 3] = 0;
152 return;
153 }
154 }
155 __i = __FDSET_LONGS;
156 while (__i) {
157 __i--;
158 *__tmp = 0;
159 __tmp++;
160 }
161}
162
163#endif /* __KERNEL__ */
164
165#endif /* __ASM_GENERIC_POSIX_TYPES_H */ 100#endif /* __ASM_GENERIC_POSIX_TYPES_H */
diff --git a/include/asm-generic/unistd.h b/include/asm-generic/unistd.h
index 2292d1af9d70..991ef01cd77e 100644
--- a/include/asm-generic/unistd.h
+++ b/include/asm-generic/unistd.h
@@ -218,7 +218,7 @@ __SC_COMP(__NR_pwritev, sys_pwritev, compat_sys_pwritev)
218 218
219/* fs/sendfile.c */ 219/* fs/sendfile.c */
220#define __NR3264_sendfile 71 220#define __NR3264_sendfile 71
221__SC_3264(__NR3264_sendfile, sys_sendfile64, sys_sendfile) 221__SYSCALL(__NR3264_sendfile, sys_sendfile64)
222 222
223/* fs/select.c */ 223/* fs/select.c */
224#define __NR_pselect6 72 224#define __NR_pselect6 72
diff --git a/include/linux/Kbuild b/include/linux/Kbuild
index a25555381097..d05df2810354 100644
--- a/include/linux/Kbuild
+++ b/include/linux/Kbuild
@@ -120,7 +120,6 @@ header-y += errno.h
120header-y += errqueue.h 120header-y += errqueue.h
121header-y += ethtool.h 121header-y += ethtool.h
122header-y += eventpoll.h 122header-y += eventpoll.h
123header-y += ext2_fs.h
124header-y += fadvise.h 123header-y += fadvise.h
125header-y += falloc.h 124header-y += falloc.h
126header-y += fanotify.h 125header-y += fanotify.h
@@ -357,6 +356,7 @@ header-y += suspend_ioctls.h
357header-y += swab.h 356header-y += swab.h
358header-y += synclink.h 357header-y += synclink.h
359header-y += sysctl.h 358header-y += sysctl.h
359header-y += sysinfo.h
360header-y += taskstats.h 360header-y += taskstats.h
361header-y += tcp.h 361header-y += tcp.h
362header-y += telephony.h 362header-y += telephony.h
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index f53fea61f40a..f421dd84f29d 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -372,4 +372,14 @@ static inline int acpi_nvs_for_each_region(int (*func)(__u64, __u64, void *),
372 372
373#endif /* !CONFIG_ACPI */ 373#endif /* !CONFIG_ACPI */
374 374
375#ifdef CONFIG_ACPI
376void acpi_os_set_prepare_sleep(int (*func)(u8 sleep_state,
377 u32 pm1a_ctrl, u32 pm1b_ctrl));
378
379acpi_status acpi_os_prepare_sleep(u8 sleep_state,
380 u32 pm1a_control, u32 pm1b_control);
381#else
382#define acpi_os_set_prepare_sleep(func, pm1a_ctrl, pm1b_ctrl) do { } while (0)
383#endif
384
375#endif /*_LINUX_ACPI_H*/ 385#endif /*_LINUX_ACPI_H*/
diff --git a/include/linux/aio_abi.h b/include/linux/aio_abi.h
index 2c8731664180..86fa7a71336a 100644
--- a/include/linux/aio_abi.h
+++ b/include/linux/aio_abi.h
@@ -30,7 +30,7 @@
30#include <linux/types.h> 30#include <linux/types.h>
31#include <asm/byteorder.h> 31#include <asm/byteorder.h>
32 32
33typedef unsigned long aio_context_t; 33typedef __kernel_ulong_t aio_context_t;
34 34
35enum { 35enum {
36 IOCB_CMD_PREAD = 0, 36 IOCB_CMD_PREAD = 0,
diff --git a/include/linux/amba/pl08x.h b/include/linux/amba/pl08x.h
index 033f6aa670de..e64ce2cfee99 100644
--- a/include/linux/amba/pl08x.h
+++ b/include/linux/amba/pl08x.h
@@ -47,9 +47,6 @@ enum {
47 * @muxval: a number usually used to poke into some mux regiser to 47 * @muxval: a number usually used to poke into some mux regiser to
48 * mux in the signal to this channel 48 * mux in the signal to this channel
49 * @cctl_opt: default options for the channel control register 49 * @cctl_opt: default options for the channel control register
50 * @device_fc: Flow Controller Settings for ccfg register. Only valid for slave
51 * channels. Fill with 'true' if peripheral should be flow controller. Direction
52 * will be selected at Runtime.
53 * @addr: source/target address in physical memory for this DMA channel, 50 * @addr: source/target address in physical memory for this DMA channel,
54 * can be the address of a FIFO register for burst requests for example. 51 * can be the address of a FIFO register for burst requests for example.
55 * This can be left undefined if the PrimeCell API is used for configuring 52 * This can be left undefined if the PrimeCell API is used for configuring
@@ -68,7 +65,6 @@ struct pl08x_channel_data {
68 int max_signal; 65 int max_signal;
69 u32 muxval; 66 u32 muxval;
70 u32 cctl; 67 u32 cctl;
71 bool device_fc;
72 dma_addr_t addr; 68 dma_addr_t addr;
73 bool circular_buffer; 69 bool circular_buffer;
74 bool single; 70 bool single;
@@ -176,13 +172,15 @@ enum pl08x_dma_chan_state {
176 * @runtime_addr: address for RX/TX according to the runtime config 172 * @runtime_addr: address for RX/TX according to the runtime config
177 * @runtime_direction: current direction of this channel according to 173 * @runtime_direction: current direction of this channel according to
178 * runtime config 174 * runtime config
179 * @lc: last completed transaction on this channel
180 * @pend_list: queued transactions pending on this channel 175 * @pend_list: queued transactions pending on this channel
181 * @at: active transaction on this channel 176 * @at: active transaction on this channel
182 * @lock: a lock for this channel data 177 * @lock: a lock for this channel data
183 * @host: a pointer to the host (internal use) 178 * @host: a pointer to the host (internal use)
184 * @state: whether the channel is idle, paused, running etc 179 * @state: whether the channel is idle, paused, running etc
185 * @slave: whether this channel is a device (slave) or for memcpy 180 * @slave: whether this channel is a device (slave) or for memcpy
181 * @device_fc: Flow Controller Settings for ccfg register. Only valid for slave
182 * channels. Fill with 'true' if peripheral should be flow controller. Direction
183 * will be selected at Runtime.
186 * @waiting: a TX descriptor on this channel which is waiting for a physical 184 * @waiting: a TX descriptor on this channel which is waiting for a physical
187 * channel to become available 185 * channel to become available
188 */ 186 */
@@ -198,13 +196,13 @@ struct pl08x_dma_chan {
198 u32 src_cctl; 196 u32 src_cctl;
199 u32 dst_cctl; 197 u32 dst_cctl;
200 enum dma_transfer_direction runtime_direction; 198 enum dma_transfer_direction runtime_direction;
201 dma_cookie_t lc;
202 struct list_head pend_list; 199 struct list_head pend_list;
203 struct pl08x_txd *at; 200 struct pl08x_txd *at;
204 spinlock_t lock; 201 spinlock_t lock;
205 struct pl08x_driver_data *host; 202 struct pl08x_driver_data *host;
206 enum pl08x_dma_chan_state state; 203 enum pl08x_dma_chan_state state;
207 bool slave; 204 bool slave;
205 bool device_fc;
208 struct pl08x_txd *waiting; 206 struct pl08x_txd *waiting;
209}; 207};
210 208
diff --git a/include/linux/amba/pl330.h b/include/linux/amba/pl330.h
index 12e023c19ac1..fe93758e8403 100644
--- a/include/linux/amba/pl330.h
+++ b/include/linux/amba/pl330.h
@@ -13,7 +13,6 @@
13#define __AMBA_PL330_H_ 13#define __AMBA_PL330_H_
14 14
15#include <linux/dmaengine.h> 15#include <linux/dmaengine.h>
16#include <asm/hardware/pl330.h>
17 16
18struct dma_pl330_platdata { 17struct dma_pl330_platdata {
19 /* 18 /*
diff --git a/include/linux/compat.h b/include/linux/compat.h
index 7e05fcee75a1..5d46217f84ad 100644
--- a/include/linux/compat.h
+++ b/include/linux/compat.h
@@ -19,6 +19,10 @@
19#include <asm/siginfo.h> 19#include <asm/siginfo.h>
20#include <asm/signal.h> 20#include <asm/signal.h>
21 21
22#ifndef COMPAT_USE_64BIT_TIME
23#define COMPAT_USE_64BIT_TIME 0
24#endif
25
22#define compat_jiffies_to_clock_t(x) \ 26#define compat_jiffies_to_clock_t(x) \
23 (((unsigned long)(x) * COMPAT_USER_HZ) / HZ) 27 (((unsigned long)(x) * COMPAT_USER_HZ) / HZ)
24 28
@@ -83,10 +87,26 @@ typedef struct {
83 compat_sigset_word sig[_COMPAT_NSIG_WORDS]; 87 compat_sigset_word sig[_COMPAT_NSIG_WORDS];
84} compat_sigset_t; 88} compat_sigset_t;
85 89
90/*
91 * These functions operate strictly on struct compat_time*
92 */
86extern int get_compat_timespec(struct timespec *, 93extern int get_compat_timespec(struct timespec *,
87 const struct compat_timespec __user *); 94 const struct compat_timespec __user *);
88extern int put_compat_timespec(const struct timespec *, 95extern int put_compat_timespec(const struct timespec *,
89 struct compat_timespec __user *); 96 struct compat_timespec __user *);
97extern int get_compat_timeval(struct timeval *,
98 const struct compat_timeval __user *);
99extern int put_compat_timeval(const struct timeval *,
100 struct compat_timeval __user *);
101/*
102 * These functions operate on 32- or 64-bit specs depending on
103 * COMPAT_USE_64BIT_TIME, hence the void user pointer arguments and the
104 * naming as compat_get/put_ rather than get/put_compat_.
105 */
106extern int compat_get_timespec(struct timespec *, const void __user *);
107extern int compat_put_timespec(const struct timespec *, void __user *);
108extern int compat_get_timeval(struct timeval *, const void __user *);
109extern int compat_put_timeval(const struct timeval *, void __user *);
90 110
91struct compat_iovec { 111struct compat_iovec {
92 compat_uptr_t iov_base; 112 compat_uptr_t iov_base;
@@ -224,6 +244,7 @@ struct compat_sysinfo;
224struct compat_sysctl_args; 244struct compat_sysctl_args;
225struct compat_kexec_segment; 245struct compat_kexec_segment;
226struct compat_mq_attr; 246struct compat_mq_attr;
247struct compat_msgbuf;
227 248
228extern void compat_exit_robust_list(struct task_struct *curr); 249extern void compat_exit_robust_list(struct task_struct *curr);
229 250
@@ -234,13 +255,22 @@ asmlinkage long
234compat_sys_get_robust_list(int pid, compat_uptr_t __user *head_ptr, 255compat_sys_get_robust_list(int pid, compat_uptr_t __user *head_ptr,
235 compat_size_t __user *len_ptr); 256 compat_size_t __user *len_ptr);
236 257
258#ifdef CONFIG_ARCH_WANT_OLD_COMPAT_IPC
237long compat_sys_semctl(int first, int second, int third, void __user *uptr); 259long compat_sys_semctl(int first, int second, int third, void __user *uptr);
238long compat_sys_msgsnd(int first, int second, int third, void __user *uptr); 260long compat_sys_msgsnd(int first, int second, int third, void __user *uptr);
239long compat_sys_msgrcv(int first, int second, int msgtyp, int third, 261long compat_sys_msgrcv(int first, int second, int msgtyp, int third,
240 int version, void __user *uptr); 262 int version, void __user *uptr);
241long compat_sys_msgctl(int first, int second, void __user *uptr);
242long compat_sys_shmat(int first, int second, compat_uptr_t third, int version, 263long compat_sys_shmat(int first, int second, compat_uptr_t third, int version,
243 void __user *uptr); 264 void __user *uptr);
265#else
266long compat_sys_semctl(int semid, int semnum, int cmd, int arg);
267long compat_sys_msgsnd(int msqid, struct compat_msgbuf __user *msgp,
268 size_t msgsz, int msgflg);
269long compat_sys_msgrcv(int msqid, struct compat_msgbuf __user *msgp,
270 size_t msgsz, long msgtyp, int msgflg);
271long compat_sys_shmat(int shmid, compat_uptr_t shmaddr, int shmflg);
272#endif
273long compat_sys_msgctl(int first, int second, void __user *uptr);
244long compat_sys_shmctl(int first, int second, void __user *uptr); 274long compat_sys_shmctl(int first, int second, void __user *uptr);
245long compat_sys_semtimedop(int semid, struct sembuf __user *tsems, 275long compat_sys_semtimedop(int semid, struct sembuf __user *tsems,
246 unsigned nsems, const struct compat_timespec __user *timeout); 276 unsigned nsems, const struct compat_timespec __user *timeout);
diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h
index 712abcc205ae..6c26a3da0e03 100644
--- a/include/linux/cpuidle.h
+++ b/include/linux/cpuidle.h
@@ -15,6 +15,7 @@
15#include <linux/list.h> 15#include <linux/list.h>
16#include <linux/kobject.h> 16#include <linux/kobject.h>
17#include <linux/completion.h> 17#include <linux/completion.h>
18#include <linux/hrtimer.h>
18 19
19#define CPUIDLE_STATE_MAX 8 20#define CPUIDLE_STATE_MAX 8
20#define CPUIDLE_NAME_LEN 16 21#define CPUIDLE_NAME_LEN 16
@@ -43,12 +44,15 @@ struct cpuidle_state {
43 44
44 unsigned int flags; 45 unsigned int flags;
45 unsigned int exit_latency; /* in US */ 46 unsigned int exit_latency; /* in US */
46 unsigned int power_usage; /* in mW */ 47 int power_usage; /* in mW */
47 unsigned int target_residency; /* in US */ 48 unsigned int target_residency; /* in US */
49 unsigned int disable;
48 50
49 int (*enter) (struct cpuidle_device *dev, 51 int (*enter) (struct cpuidle_device *dev,
50 struct cpuidle_driver *drv, 52 struct cpuidle_driver *drv,
51 int index); 53 int index);
54
55 int (*enter_dead) (struct cpuidle_device *dev, int index);
52}; 56};
53 57
54/* Idle State Flags */ 58/* Idle State Flags */
@@ -96,7 +100,6 @@ struct cpuidle_device {
96 struct list_head device_list; 100 struct list_head device_list;
97 struct kobject kobj; 101 struct kobject kobj;
98 struct completion kobj_unregister; 102 struct completion kobj_unregister;
99 void *governor_data;
100}; 103};
101 104
102DECLARE_PER_CPU(struct cpuidle_device *, cpuidle_devices); 105DECLARE_PER_CPU(struct cpuidle_device *, cpuidle_devices);
@@ -118,10 +121,12 @@ static inline int cpuidle_get_last_residency(struct cpuidle_device *dev)
118 ****************************/ 121 ****************************/
119 122
120struct cpuidle_driver { 123struct cpuidle_driver {
121 char name[CPUIDLE_NAME_LEN]; 124 const char *name;
122 struct module *owner; 125 struct module *owner;
123 126
124 unsigned int power_specified:1; 127 unsigned int power_specified:1;
128 /* set to 1 to use the core cpuidle time keeping (for all states). */
129 unsigned int en_core_tk_irqen:1;
125 struct cpuidle_state states[CPUIDLE_STATE_MAX]; 130 struct cpuidle_state states[CPUIDLE_STATE_MAX];
126 int state_count; 131 int state_count;
127 int safe_state_index; 132 int safe_state_index;
@@ -140,6 +145,11 @@ extern void cpuidle_pause_and_lock(void);
140extern void cpuidle_resume_and_unlock(void); 145extern void cpuidle_resume_and_unlock(void);
141extern int cpuidle_enable_device(struct cpuidle_device *dev); 146extern int cpuidle_enable_device(struct cpuidle_device *dev);
142extern void cpuidle_disable_device(struct cpuidle_device *dev); 147extern void cpuidle_disable_device(struct cpuidle_device *dev);
148extern int cpuidle_wrap_enter(struct cpuidle_device *dev,
149 struct cpuidle_driver *drv, int index,
150 int (*enter)(struct cpuidle_device *dev,
151 struct cpuidle_driver *drv, int index));
152extern int cpuidle_play_dead(void);
143 153
144#else 154#else
145static inline void disable_cpuidle(void) { } 155static inline void disable_cpuidle(void) { }
@@ -157,6 +167,12 @@ static inline void cpuidle_resume_and_unlock(void) { }
157static inline int cpuidle_enable_device(struct cpuidle_device *dev) 167static inline int cpuidle_enable_device(struct cpuidle_device *dev)
158{return -ENODEV; } 168{return -ENODEV; }
159static inline void cpuidle_disable_device(struct cpuidle_device *dev) { } 169static inline void cpuidle_disable_device(struct cpuidle_device *dev) { }
170static inline int cpuidle_wrap_enter(struct cpuidle_device *dev,
171 struct cpuidle_driver *drv, int index,
172 int (*enter)(struct cpuidle_device *dev,
173 struct cpuidle_driver *drv, int index))
174{ return -ENODEV; }
175static inline int cpuidle_play_dead(void) {return -ENODEV; }
160 176
161#endif 177#endif
162 178
diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h
index 7a7e5fd2a277..668f66baac7b 100644
--- a/include/linux/cpuset.h
+++ b/include/linux/cpuset.h
@@ -22,7 +22,7 @@ extern int cpuset_init(void);
22extern void cpuset_init_smp(void); 22extern void cpuset_init_smp(void);
23extern void cpuset_update_active_cpus(void); 23extern void cpuset_update_active_cpus(void);
24extern void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask); 24extern void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask);
25extern int cpuset_cpus_allowed_fallback(struct task_struct *p); 25extern void cpuset_cpus_allowed_fallback(struct task_struct *p);
26extern nodemask_t cpuset_mems_allowed(struct task_struct *p); 26extern nodemask_t cpuset_mems_allowed(struct task_struct *p);
27#define cpuset_current_mems_allowed (current->mems_allowed) 27#define cpuset_current_mems_allowed (current->mems_allowed)
28void cpuset_init_current_mems_allowed(void); 28void cpuset_init_current_mems_allowed(void);
@@ -135,10 +135,8 @@ static inline void cpuset_cpus_allowed(struct task_struct *p,
135 cpumask_copy(mask, cpu_possible_mask); 135 cpumask_copy(mask, cpu_possible_mask);
136} 136}
137 137
138static inline int cpuset_cpus_allowed_fallback(struct task_struct *p) 138static inline void cpuset_cpus_allowed_fallback(struct task_struct *p)
139{ 139{
140 do_set_cpus_allowed(p, cpu_possible_mask);
141 return cpumask_any(cpu_active_mask);
142} 140}
143 141
144static inline nodemask_t cpuset_mems_allowed(struct task_struct *p) 142static inline nodemask_t cpuset_mems_allowed(struct task_struct *p)
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
index e13117cbd2f7..5a736af3cc7a 100644
--- a/include/linux/dma-mapping.h
+++ b/include/linux/dma-mapping.h
@@ -77,7 +77,7 @@ static inline u64 dma_get_mask(struct device *dev)
77 return DMA_BIT_MASK(32); 77 return DMA_BIT_MASK(32);
78} 78}
79 79
80#ifdef ARCH_HAS_DMA_SET_COHERENT_MASK 80#ifdef CONFIG_ARCH_HAS_DMA_SET_COHERENT_MASK
81int dma_set_coherent_mask(struct device *dev, u64 mask); 81int dma_set_coherent_mask(struct device *dev, u64 mask);
82#else 82#else
83static inline int dma_set_coherent_mask(struct device *dev, u64 mask) 83static inline int dma_set_coherent_mask(struct device *dev, u64 mask)
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
index a5966f691ef8..676f967390ae 100644
--- a/include/linux/dmaengine.h
+++ b/include/linux/dmaengine.h
@@ -18,14 +18,15 @@
18 * The full GNU General Public License is included in this distribution in the 18 * The full GNU General Public License is included in this distribution in the
19 * file called COPYING. 19 * file called COPYING.
20 */ 20 */
21#ifndef DMAENGINE_H 21#ifndef LINUX_DMAENGINE_H
22#define DMAENGINE_H 22#define LINUX_DMAENGINE_H
23 23
24#include <linux/device.h> 24#include <linux/device.h>
25#include <linux/uio.h> 25#include <linux/uio.h>
26#include <linux/bug.h> 26#include <linux/bug.h>
27#include <linux/scatterlist.h> 27#include <linux/scatterlist.h>
28#include <linux/bitmap.h> 28#include <linux/bitmap.h>
29#include <linux/types.h>
29#include <asm/page.h> 30#include <asm/page.h>
30 31
31/** 32/**
@@ -258,6 +259,7 @@ struct dma_chan_percpu {
258 * struct dma_chan - devices supply DMA channels, clients use them 259 * struct dma_chan - devices supply DMA channels, clients use them
259 * @device: ptr to the dma device who supplies this channel, always !%NULL 260 * @device: ptr to the dma device who supplies this channel, always !%NULL
260 * @cookie: last cookie value returned to client 261 * @cookie: last cookie value returned to client
262 * @completed_cookie: last completed cookie for this channel
261 * @chan_id: channel ID for sysfs 263 * @chan_id: channel ID for sysfs
262 * @dev: class device for sysfs 264 * @dev: class device for sysfs
263 * @device_node: used to add this to the device chan list 265 * @device_node: used to add this to the device chan list
@@ -269,6 +271,7 @@ struct dma_chan_percpu {
269struct dma_chan { 271struct dma_chan {
270 struct dma_device *device; 272 struct dma_device *device;
271 dma_cookie_t cookie; 273 dma_cookie_t cookie;
274 dma_cookie_t completed_cookie;
272 275
273 /* sysfs */ 276 /* sysfs */
274 int chan_id; 277 int chan_id;
@@ -332,6 +335,9 @@ enum dma_slave_buswidth {
332 * may or may not be applicable on memory sources. 335 * may or may not be applicable on memory sources.
333 * @dst_maxburst: same as src_maxburst but for destination target 336 * @dst_maxburst: same as src_maxburst but for destination target
334 * mutatis mutandis. 337 * mutatis mutandis.
338 * @device_fc: Flow Controller Settings. Only valid for slave channels. Fill
339 * with 'true' if peripheral should be flow controller. Direction will be
340 * selected at Runtime.
335 * 341 *
336 * This struct is passed in as configuration data to a DMA engine 342 * This struct is passed in as configuration data to a DMA engine
337 * in order to set up a certain channel for DMA transport at runtime. 343 * in order to set up a certain channel for DMA transport at runtime.
@@ -358,6 +364,7 @@ struct dma_slave_config {
358 enum dma_slave_buswidth dst_addr_width; 364 enum dma_slave_buswidth dst_addr_width;
359 u32 src_maxburst; 365 u32 src_maxburst;
360 u32 dst_maxburst; 366 u32 dst_maxburst;
367 bool device_fc;
361}; 368};
362 369
363static inline const char *dma_chan_name(struct dma_chan *chan) 370static inline const char *dma_chan_name(struct dma_chan *chan)
@@ -576,10 +583,11 @@ struct dma_device {
576 struct dma_async_tx_descriptor *(*device_prep_slave_sg)( 583 struct dma_async_tx_descriptor *(*device_prep_slave_sg)(
577 struct dma_chan *chan, struct scatterlist *sgl, 584 struct dma_chan *chan, struct scatterlist *sgl,
578 unsigned int sg_len, enum dma_transfer_direction direction, 585 unsigned int sg_len, enum dma_transfer_direction direction,
579 unsigned long flags); 586 unsigned long flags, void *context);
580 struct dma_async_tx_descriptor *(*device_prep_dma_cyclic)( 587 struct dma_async_tx_descriptor *(*device_prep_dma_cyclic)(
581 struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len, 588 struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
582 size_t period_len, enum dma_transfer_direction direction); 589 size_t period_len, enum dma_transfer_direction direction,
590 void *context);
583 struct dma_async_tx_descriptor *(*device_prep_interleaved_dma)( 591 struct dma_async_tx_descriptor *(*device_prep_interleaved_dma)(
584 struct dma_chan *chan, struct dma_interleaved_template *xt, 592 struct dma_chan *chan, struct dma_interleaved_template *xt,
585 unsigned long flags); 593 unsigned long flags);
@@ -613,7 +621,24 @@ static inline struct dma_async_tx_descriptor *dmaengine_prep_slave_single(
613 struct scatterlist sg; 621 struct scatterlist sg;
614 sg_init_one(&sg, buf, len); 622 sg_init_one(&sg, buf, len);
615 623
616 return chan->device->device_prep_slave_sg(chan, &sg, 1, dir, flags); 624 return chan->device->device_prep_slave_sg(chan, &sg, 1,
625 dir, flags, NULL);
626}
627
628static inline struct dma_async_tx_descriptor *dmaengine_prep_slave_sg(
629 struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len,
630 enum dma_transfer_direction dir, unsigned long flags)
631{
632 return chan->device->device_prep_slave_sg(chan, sgl, sg_len,
633 dir, flags, NULL);
634}
635
636static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_cyclic(
637 struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
638 size_t period_len, enum dma_transfer_direction dir)
639{
640 return chan->device->device_prep_dma_cyclic(chan, buf_addr, buf_len,
641 period_len, dir, NULL);
617} 642}
618 643
619static inline int dmaengine_terminate_all(struct dma_chan *chan) 644static inline int dmaengine_terminate_all(struct dma_chan *chan)
diff --git a/include/linux/dw_dmac.h b/include/linux/dw_dmac.h
index f2c64f92c4a0..2412e02d7c0f 100644
--- a/include/linux/dw_dmac.h
+++ b/include/linux/dw_dmac.h
@@ -31,18 +31,6 @@ struct dw_dma_platform_data {
31 unsigned char chan_priority; 31 unsigned char chan_priority;
32}; 32};
33 33
34/**
35 * enum dw_dma_slave_width - DMA slave register access width.
36 * @DMA_SLAVE_WIDTH_8BIT: Do 8-bit slave register accesses
37 * @DMA_SLAVE_WIDTH_16BIT: Do 16-bit slave register accesses
38 * @DMA_SLAVE_WIDTH_32BIT: Do 32-bit slave register accesses
39 */
40enum dw_dma_slave_width {
41 DW_DMA_SLAVE_WIDTH_8BIT,
42 DW_DMA_SLAVE_WIDTH_16BIT,
43 DW_DMA_SLAVE_WIDTH_32BIT,
44};
45
46/* bursts size */ 34/* bursts size */
47enum dw_dma_msize { 35enum dw_dma_msize {
48 DW_DMA_MSIZE_1, 36 DW_DMA_MSIZE_1,
@@ -55,47 +43,21 @@ enum dw_dma_msize {
55 DW_DMA_MSIZE_256, 43 DW_DMA_MSIZE_256,
56}; 44};
57 45
58/* flow controller */
59enum dw_dma_fc {
60 DW_DMA_FC_D_M2M,
61 DW_DMA_FC_D_M2P,
62 DW_DMA_FC_D_P2M,
63 DW_DMA_FC_D_P2P,
64 DW_DMA_FC_P_P2M,
65 DW_DMA_FC_SP_P2P,
66 DW_DMA_FC_P_M2P,
67 DW_DMA_FC_DP_P2P,
68};
69
70/** 46/**
71 * struct dw_dma_slave - Controller-specific information about a slave 47 * struct dw_dma_slave - Controller-specific information about a slave
72 * 48 *
73 * @dma_dev: required DMA master device 49 * @dma_dev: required DMA master device
74 * @tx_reg: physical address of data register used for
75 * memory-to-peripheral transfers
76 * @rx_reg: physical address of data register used for
77 * peripheral-to-memory transfers
78 * @reg_width: peripheral register width
79 * @cfg_hi: Platform-specific initializer for the CFG_HI register 50 * @cfg_hi: Platform-specific initializer for the CFG_HI register
80 * @cfg_lo: Platform-specific initializer for the CFG_LO register 51 * @cfg_lo: Platform-specific initializer for the CFG_LO register
81 * @src_master: src master for transfers on allocated channel. 52 * @src_master: src master for transfers on allocated channel.
82 * @dst_master: dest master for transfers on allocated channel. 53 * @dst_master: dest master for transfers on allocated channel.
83 * @src_msize: src burst size.
84 * @dst_msize: dest burst size.
85 * @fc: flow controller for DMA transfer
86 */ 54 */
87struct dw_dma_slave { 55struct dw_dma_slave {
88 struct device *dma_dev; 56 struct device *dma_dev;
89 dma_addr_t tx_reg;
90 dma_addr_t rx_reg;
91 enum dw_dma_slave_width reg_width;
92 u32 cfg_hi; 57 u32 cfg_hi;
93 u32 cfg_lo; 58 u32 cfg_lo;
94 u8 src_master; 59 u8 src_master;
95 u8 dst_master; 60 u8 dst_master;
96 u8 src_msize;
97 u8 dst_msize;
98 u8 fc;
99}; 61};
100 62
101/* Platform-configurable bits in CFG_HI */ 63/* Platform-configurable bits in CFG_HI */
diff --git a/include/linux/ext2_fs.h b/include/linux/ext2_fs.h
index ce1b719e8bd4..2723e715f67a 100644
--- a/include/linux/ext2_fs.h
+++ b/include/linux/ext2_fs.h
@@ -18,574 +18,25 @@
18 18
19#include <linux/types.h> 19#include <linux/types.h>
20#include <linux/magic.h> 20#include <linux/magic.h>
21#include <linux/fs.h>
22 21
23/* 22#define EXT2_NAME_LEN 255
24 * The second extended filesystem constants/structures
25 */
26
27/*
28 * Define EXT2FS_DEBUG to produce debug messages
29 */
30#undef EXT2FS_DEBUG
31
32/*
33 * Define EXT2_RESERVATION to reserve data blocks for expanding files
34 */
35#define EXT2_DEFAULT_RESERVE_BLOCKS 8
36/*max window size: 1024(direct blocks) + 3([t,d]indirect blocks) */
37#define EXT2_MAX_RESERVE_BLOCKS 1027
38#define EXT2_RESERVE_WINDOW_NOT_ALLOCATED 0
39/*
40 * The second extended file system version
41 */
42#define EXT2FS_DATE "95/08/09"
43#define EXT2FS_VERSION "0.5b"
44
45/*
46 * Debug code
47 */
48#ifdef EXT2FS_DEBUG
49# define ext2_debug(f, a...) { \
50 printk ("EXT2-fs DEBUG (%s, %d): %s:", \
51 __FILE__, __LINE__, __func__); \
52 printk (f, ## a); \
53 }
54#else
55# define ext2_debug(f, a...) /**/
56#endif
57
58/*
59 * Special inode numbers
60 */
61#define EXT2_BAD_INO 1 /* Bad blocks inode */
62#define EXT2_ROOT_INO 2 /* Root inode */
63#define EXT2_BOOT_LOADER_INO 5 /* Boot loader inode */
64#define EXT2_UNDEL_DIR_INO 6 /* Undelete directory inode */
65
66/* First non-reserved inode for old ext2 filesystems */
67#define EXT2_GOOD_OLD_FIRST_INO 11
68
69#ifdef __KERNEL__
70#include <linux/ext2_fs_sb.h>
71static inline struct ext2_sb_info *EXT2_SB(struct super_block *sb)
72{
73 return sb->s_fs_info;
74}
75#else
76/* Assume that user mode programs are passing in an ext2fs superblock, not
77 * a kernel struct super_block. This will allow us to call the feature-test
78 * macros from user land. */
79#define EXT2_SB(sb) (sb)
80#endif
81 23
82/* 24/*
83 * Maximal count of links to a file 25 * Maximal count of links to a file
84 */ 26 */
85#define EXT2_LINK_MAX 32000 27#define EXT2_LINK_MAX 32000
86 28
87/* 29#define EXT2_SB_MAGIC_OFFSET 0x38
88 * Macro-instructions used to manage several block sizes 30#define EXT2_SB_BLOCKS_OFFSET 0x04
89 */ 31#define EXT2_SB_BSIZE_OFFSET 0x18
90#define EXT2_MIN_BLOCK_SIZE 1024
91#define EXT2_MAX_BLOCK_SIZE 4096
92#define EXT2_MIN_BLOCK_LOG_SIZE 10
93#ifdef __KERNEL__
94# define EXT2_BLOCK_SIZE(s) ((s)->s_blocksize)
95#else
96# define EXT2_BLOCK_SIZE(s) (EXT2_MIN_BLOCK_SIZE << (s)->s_log_block_size)
97#endif
98#define EXT2_ADDR_PER_BLOCK(s) (EXT2_BLOCK_SIZE(s) / sizeof (__u32))
99#ifdef __KERNEL__
100# define EXT2_BLOCK_SIZE_BITS(s) ((s)->s_blocksize_bits)
101#else
102# define EXT2_BLOCK_SIZE_BITS(s) ((s)->s_log_block_size + 10)
103#endif
104#ifdef __KERNEL__
105#define EXT2_ADDR_PER_BLOCK_BITS(s) (EXT2_SB(s)->s_addr_per_block_bits)
106#define EXT2_INODE_SIZE(s) (EXT2_SB(s)->s_inode_size)
107#define EXT2_FIRST_INO(s) (EXT2_SB(s)->s_first_ino)
108#else
109#define EXT2_INODE_SIZE(s) (((s)->s_rev_level == EXT2_GOOD_OLD_REV) ? \
110 EXT2_GOOD_OLD_INODE_SIZE : \
111 (s)->s_inode_size)
112#define EXT2_FIRST_INO(s) (((s)->s_rev_level == EXT2_GOOD_OLD_REV) ? \
113 EXT2_GOOD_OLD_FIRST_INO : \
114 (s)->s_first_ino)
115#endif
116 32
117/* 33static inline u64 ext2_image_size(void *ext2_sb)
118 * Macro-instructions used to manage fragments
119 */
120#define EXT2_MIN_FRAG_SIZE 1024
121#define EXT2_MAX_FRAG_SIZE 4096
122#define EXT2_MIN_FRAG_LOG_SIZE 10
123#ifdef __KERNEL__
124# define EXT2_FRAG_SIZE(s) (EXT2_SB(s)->s_frag_size)
125# define EXT2_FRAGS_PER_BLOCK(s) (EXT2_SB(s)->s_frags_per_block)
126#else
127# define EXT2_FRAG_SIZE(s) (EXT2_MIN_FRAG_SIZE << (s)->s_log_frag_size)
128# define EXT2_FRAGS_PER_BLOCK(s) (EXT2_BLOCK_SIZE(s) / EXT2_FRAG_SIZE(s))
129#endif
130
131/*
132 * Structure of a blocks group descriptor
133 */
134struct ext2_group_desc
135{ 34{
136 __le32 bg_block_bitmap; /* Blocks bitmap block */ 35 __u8 *p = ext2_sb;
137 __le32 bg_inode_bitmap; /* Inodes bitmap block */ 36 if (*(__le16 *)(p + EXT2_SB_MAGIC_OFFSET) != cpu_to_le16(EXT2_SUPER_MAGIC))
138 __le32 bg_inode_table; /* Inodes table block */ 37 return 0;
139 __le16 bg_free_blocks_count; /* Free blocks count */ 38 return (u64)le32_to_cpup((__le32 *)(p + EXT2_SB_BLOCKS_OFFSET)) <<
140 __le16 bg_free_inodes_count; /* Free inodes count */ 39 le32_to_cpup((__le32 *)(p + EXT2_SB_BSIZE_OFFSET));
141 __le16 bg_used_dirs_count; /* Directories count */
142 __le16 bg_pad;
143 __le32 bg_reserved[3];
144};
145
146/*
147 * Macro-instructions used to manage group descriptors
148 */
149#ifdef __KERNEL__
150# define EXT2_BLOCKS_PER_GROUP(s) (EXT2_SB(s)->s_blocks_per_group)
151# define EXT2_DESC_PER_BLOCK(s) (EXT2_SB(s)->s_desc_per_block)
152# define EXT2_INODES_PER_GROUP(s) (EXT2_SB(s)->s_inodes_per_group)
153# define EXT2_DESC_PER_BLOCK_BITS(s) (EXT2_SB(s)->s_desc_per_block_bits)
154#else
155# define EXT2_BLOCKS_PER_GROUP(s) ((s)->s_blocks_per_group)
156# define EXT2_DESC_PER_BLOCK(s) (EXT2_BLOCK_SIZE(s) / sizeof (struct ext2_group_desc))
157# define EXT2_INODES_PER_GROUP(s) ((s)->s_inodes_per_group)
158#endif
159
160/*
161 * Constants relative to the data blocks
162 */
163#define EXT2_NDIR_BLOCKS 12
164#define EXT2_IND_BLOCK EXT2_NDIR_BLOCKS
165#define EXT2_DIND_BLOCK (EXT2_IND_BLOCK + 1)
166#define EXT2_TIND_BLOCK (EXT2_DIND_BLOCK + 1)
167#define EXT2_N_BLOCKS (EXT2_TIND_BLOCK + 1)
168
169/*
170 * Inode flags (GETFLAGS/SETFLAGS)
171 */
172#define EXT2_SECRM_FL FS_SECRM_FL /* Secure deletion */
173#define EXT2_UNRM_FL FS_UNRM_FL /* Undelete */
174#define EXT2_COMPR_FL FS_COMPR_FL /* Compress file */
175#define EXT2_SYNC_FL FS_SYNC_FL /* Synchronous updates */
176#define EXT2_IMMUTABLE_FL FS_IMMUTABLE_FL /* Immutable file */
177#define EXT2_APPEND_FL FS_APPEND_FL /* writes to file may only append */
178#define EXT2_NODUMP_FL FS_NODUMP_FL /* do not dump file */
179#define EXT2_NOATIME_FL FS_NOATIME_FL /* do not update atime */
180/* Reserved for compression usage... */
181#define EXT2_DIRTY_FL FS_DIRTY_FL
182#define EXT2_COMPRBLK_FL FS_COMPRBLK_FL /* One or more compressed clusters */
183#define EXT2_NOCOMP_FL FS_NOCOMP_FL /* Don't compress */
184#define EXT2_ECOMPR_FL FS_ECOMPR_FL /* Compression error */
185/* End compression flags --- maybe not all used */
186#define EXT2_BTREE_FL FS_BTREE_FL /* btree format dir */
187#define EXT2_INDEX_FL FS_INDEX_FL /* hash-indexed directory */
188#define EXT2_IMAGIC_FL FS_IMAGIC_FL /* AFS directory */
189#define EXT2_JOURNAL_DATA_FL FS_JOURNAL_DATA_FL /* Reserved for ext3 */
190#define EXT2_NOTAIL_FL FS_NOTAIL_FL /* file tail should not be merged */
191#define EXT2_DIRSYNC_FL FS_DIRSYNC_FL /* dirsync behaviour (directories only) */
192#define EXT2_TOPDIR_FL FS_TOPDIR_FL /* Top of directory hierarchies*/
193#define EXT2_RESERVED_FL FS_RESERVED_FL /* reserved for ext2 lib */
194
195#define EXT2_FL_USER_VISIBLE FS_FL_USER_VISIBLE /* User visible flags */
196#define EXT2_FL_USER_MODIFIABLE FS_FL_USER_MODIFIABLE /* User modifiable flags */
197
198/* Flags that should be inherited by new inodes from their parent. */
199#define EXT2_FL_INHERITED (EXT2_SECRM_FL | EXT2_UNRM_FL | EXT2_COMPR_FL |\
200 EXT2_SYNC_FL | EXT2_NODUMP_FL |\
201 EXT2_NOATIME_FL | EXT2_COMPRBLK_FL |\
202 EXT2_NOCOMP_FL | EXT2_JOURNAL_DATA_FL |\
203 EXT2_NOTAIL_FL | EXT2_DIRSYNC_FL)
204
205/* Flags that are appropriate for regular files (all but dir-specific ones). */
206#define EXT2_REG_FLMASK (~(EXT2_DIRSYNC_FL | EXT2_TOPDIR_FL))
207
208/* Flags that are appropriate for non-directories/regular files. */
209#define EXT2_OTHER_FLMASK (EXT2_NODUMP_FL | EXT2_NOATIME_FL)
210
211/* Mask out flags that are inappropriate for the given type of inode. */
212static inline __u32 ext2_mask_flags(umode_t mode, __u32 flags)
213{
214 if (S_ISDIR(mode))
215 return flags;
216 else if (S_ISREG(mode))
217 return flags & EXT2_REG_FLMASK;
218 else
219 return flags & EXT2_OTHER_FLMASK;
220} 40}
221 41
222/*
223 * ioctl commands
224 */
225#define EXT2_IOC_GETFLAGS FS_IOC_GETFLAGS
226#define EXT2_IOC_SETFLAGS FS_IOC_SETFLAGS
227#define EXT2_IOC_GETVERSION FS_IOC_GETVERSION
228#define EXT2_IOC_SETVERSION FS_IOC_SETVERSION
229#define EXT2_IOC_GETRSVSZ _IOR('f', 5, long)
230#define EXT2_IOC_SETRSVSZ _IOW('f', 6, long)
231
232/*
233 * ioctl commands in 32 bit emulation
234 */
235#define EXT2_IOC32_GETFLAGS FS_IOC32_GETFLAGS
236#define EXT2_IOC32_SETFLAGS FS_IOC32_SETFLAGS
237#define EXT2_IOC32_GETVERSION FS_IOC32_GETVERSION
238#define EXT2_IOC32_SETVERSION FS_IOC32_SETVERSION
239
240/*
241 * Structure of an inode on the disk
242 */
243struct ext2_inode {
244 __le16 i_mode; /* File mode */
245 __le16 i_uid; /* Low 16 bits of Owner Uid */
246 __le32 i_size; /* Size in bytes */
247 __le32 i_atime; /* Access time */
248 __le32 i_ctime; /* Creation time */
249 __le32 i_mtime; /* Modification time */
250 __le32 i_dtime; /* Deletion Time */
251 __le16 i_gid; /* Low 16 bits of Group Id */
252 __le16 i_links_count; /* Links count */
253 __le32 i_blocks; /* Blocks count */
254 __le32 i_flags; /* File flags */
255 union {
256 struct {
257 __le32 l_i_reserved1;
258 } linux1;
259 struct {
260 __le32 h_i_translator;
261 } hurd1;
262 struct {
263 __le32 m_i_reserved1;
264 } masix1;
265 } osd1; /* OS dependent 1 */
266 __le32 i_block[EXT2_N_BLOCKS];/* Pointers to blocks */
267 __le32 i_generation; /* File version (for NFS) */
268 __le32 i_file_acl; /* File ACL */
269 __le32 i_dir_acl; /* Directory ACL */
270 __le32 i_faddr; /* Fragment address */
271 union {
272 struct {
273 __u8 l_i_frag; /* Fragment number */
274 __u8 l_i_fsize; /* Fragment size */
275 __u16 i_pad1;
276 __le16 l_i_uid_high; /* these 2 fields */
277 __le16 l_i_gid_high; /* were reserved2[0] */
278 __u32 l_i_reserved2;
279 } linux2;
280 struct {
281 __u8 h_i_frag; /* Fragment number */
282 __u8 h_i_fsize; /* Fragment size */
283 __le16 h_i_mode_high;
284 __le16 h_i_uid_high;
285 __le16 h_i_gid_high;
286 __le32 h_i_author;
287 } hurd2;
288 struct {
289 __u8 m_i_frag; /* Fragment number */
290 __u8 m_i_fsize; /* Fragment size */
291 __u16 m_pad1;
292 __u32 m_i_reserved2[2];
293 } masix2;
294 } osd2; /* OS dependent 2 */
295};
296
297#define i_size_high i_dir_acl
298
299#if defined(__KERNEL__) || defined(__linux__)
300#define i_reserved1 osd1.linux1.l_i_reserved1
301#define i_frag osd2.linux2.l_i_frag
302#define i_fsize osd2.linux2.l_i_fsize
303#define i_uid_low i_uid
304#define i_gid_low i_gid
305#define i_uid_high osd2.linux2.l_i_uid_high
306#define i_gid_high osd2.linux2.l_i_gid_high
307#define i_reserved2 osd2.linux2.l_i_reserved2
308#endif
309
310#ifdef __hurd__
311#define i_translator osd1.hurd1.h_i_translator
312#define i_frag osd2.hurd2.h_i_frag
313#define i_fsize osd2.hurd2.h_i_fsize
314#define i_uid_high osd2.hurd2.h_i_uid_high
315#define i_gid_high osd2.hurd2.h_i_gid_high
316#define i_author osd2.hurd2.h_i_author
317#endif
318
319#ifdef __masix__
320#define i_reserved1 osd1.masix1.m_i_reserved1
321#define i_frag osd2.masix2.m_i_frag
322#define i_fsize osd2.masix2.m_i_fsize
323#define i_reserved2 osd2.masix2.m_i_reserved2
324#endif
325
326/*
327 * File system states
328 */
329#define EXT2_VALID_FS 0x0001 /* Unmounted cleanly */
330#define EXT2_ERROR_FS 0x0002 /* Errors detected */
331
332/*
333 * Mount flags
334 */
335#define EXT2_MOUNT_CHECK 0x000001 /* Do mount-time checks */
336#define EXT2_MOUNT_OLDALLOC 0x000002 /* Don't use the new Orlov allocator */
337#define EXT2_MOUNT_GRPID 0x000004 /* Create files with directory's group */
338#define EXT2_MOUNT_DEBUG 0x000008 /* Some debugging messages */
339#define EXT2_MOUNT_ERRORS_CONT 0x000010 /* Continue on errors */
340#define EXT2_MOUNT_ERRORS_RO 0x000020 /* Remount fs ro on errors */
341#define EXT2_MOUNT_ERRORS_PANIC 0x000040 /* Panic on errors */
342#define EXT2_MOUNT_MINIX_DF 0x000080 /* Mimics the Minix statfs */
343#define EXT2_MOUNT_NOBH 0x000100 /* No buffer_heads */
344#define EXT2_MOUNT_NO_UID32 0x000200 /* Disable 32-bit UIDs */
345#define EXT2_MOUNT_XATTR_USER 0x004000 /* Extended user attributes */
346#define EXT2_MOUNT_POSIX_ACL 0x008000 /* POSIX Access Control Lists */
347#define EXT2_MOUNT_XIP 0x010000 /* Execute in place */
348#define EXT2_MOUNT_USRQUOTA 0x020000 /* user quota */
349#define EXT2_MOUNT_GRPQUOTA 0x040000 /* group quota */
350#define EXT2_MOUNT_RESERVATION 0x080000 /* Preallocation */
351
352
353#define clear_opt(o, opt) o &= ~EXT2_MOUNT_##opt
354#define set_opt(o, opt) o |= EXT2_MOUNT_##opt
355#define test_opt(sb, opt) (EXT2_SB(sb)->s_mount_opt & \
356 EXT2_MOUNT_##opt)
357/*
358 * Maximal mount counts between two filesystem checks
359 */
360#define EXT2_DFL_MAX_MNT_COUNT 20 /* Allow 20 mounts */
361#define EXT2_DFL_CHECKINTERVAL 0 /* Don't use interval check */
362
363/*
364 * Behaviour when detecting errors
365 */
366#define EXT2_ERRORS_CONTINUE 1 /* Continue execution */
367#define EXT2_ERRORS_RO 2 /* Remount fs read-only */
368#define EXT2_ERRORS_PANIC 3 /* Panic */
369#define EXT2_ERRORS_DEFAULT EXT2_ERRORS_CONTINUE
370
371/*
372 * Structure of the super block
373 */
374struct ext2_super_block {
375 __le32 s_inodes_count; /* Inodes count */
376 __le32 s_blocks_count; /* Blocks count */
377 __le32 s_r_blocks_count; /* Reserved blocks count */
378 __le32 s_free_blocks_count; /* Free blocks count */
379 __le32 s_free_inodes_count; /* Free inodes count */
380 __le32 s_first_data_block; /* First Data Block */
381 __le32 s_log_block_size; /* Block size */
382 __le32 s_log_frag_size; /* Fragment size */
383 __le32 s_blocks_per_group; /* # Blocks per group */
384 __le32 s_frags_per_group; /* # Fragments per group */
385 __le32 s_inodes_per_group; /* # Inodes per group */
386 __le32 s_mtime; /* Mount time */
387 __le32 s_wtime; /* Write time */
388 __le16 s_mnt_count; /* Mount count */
389 __le16 s_max_mnt_count; /* Maximal mount count */
390 __le16 s_magic; /* Magic signature */
391 __le16 s_state; /* File system state */
392 __le16 s_errors; /* Behaviour when detecting errors */
393 __le16 s_minor_rev_level; /* minor revision level */
394 __le32 s_lastcheck; /* time of last check */
395 __le32 s_checkinterval; /* max. time between checks */
396 __le32 s_creator_os; /* OS */
397 __le32 s_rev_level; /* Revision level */
398 __le16 s_def_resuid; /* Default uid for reserved blocks */
399 __le16 s_def_resgid; /* Default gid for reserved blocks */
400 /*
401 * These fields are for EXT2_DYNAMIC_REV superblocks only.
402 *
403 * Note: the difference between the compatible feature set and
404 * the incompatible feature set is that if there is a bit set
405 * in the incompatible feature set that the kernel doesn't
406 * know about, it should refuse to mount the filesystem.
407 *
408 * e2fsck's requirements are more strict; if it doesn't know
409 * about a feature in either the compatible or incompatible
410 * feature set, it must abort and not try to meddle with
411 * things it doesn't understand...
412 */
413 __le32 s_first_ino; /* First non-reserved inode */
414 __le16 s_inode_size; /* size of inode structure */
415 __le16 s_block_group_nr; /* block group # of this superblock */
416 __le32 s_feature_compat; /* compatible feature set */
417 __le32 s_feature_incompat; /* incompatible feature set */
418 __le32 s_feature_ro_compat; /* readonly-compatible feature set */
419 __u8 s_uuid[16]; /* 128-bit uuid for volume */
420 char s_volume_name[16]; /* volume name */
421 char s_last_mounted[64]; /* directory where last mounted */
422 __le32 s_algorithm_usage_bitmap; /* For compression */
423 /*
424 * Performance hints. Directory preallocation should only
425 * happen if the EXT2_COMPAT_PREALLOC flag is on.
426 */
427 __u8 s_prealloc_blocks; /* Nr of blocks to try to preallocate*/
428 __u8 s_prealloc_dir_blocks; /* Nr to preallocate for dirs */
429 __u16 s_padding1;
430 /*
431 * Journaling support valid if EXT3_FEATURE_COMPAT_HAS_JOURNAL set.
432 */
433 __u8 s_journal_uuid[16]; /* uuid of journal superblock */
434 __u32 s_journal_inum; /* inode number of journal file */
435 __u32 s_journal_dev; /* device number of journal file */
436 __u32 s_last_orphan; /* start of list of inodes to delete */
437 __u32 s_hash_seed[4]; /* HTREE hash seed */
438 __u8 s_def_hash_version; /* Default hash version to use */
439 __u8 s_reserved_char_pad;
440 __u16 s_reserved_word_pad;
441 __le32 s_default_mount_opts;
442 __le32 s_first_meta_bg; /* First metablock block group */
443 __u32 s_reserved[190]; /* Padding to the end of the block */
444};
445
446/*
447 * Codes for operating systems
448 */
449#define EXT2_OS_LINUX 0
450#define EXT2_OS_HURD 1
451#define EXT2_OS_MASIX 2
452#define EXT2_OS_FREEBSD 3
453#define EXT2_OS_LITES 4
454
455/*
456 * Revision levels
457 */
458#define EXT2_GOOD_OLD_REV 0 /* The good old (original) format */
459#define EXT2_DYNAMIC_REV 1 /* V2 format w/ dynamic inode sizes */
460
461#define EXT2_CURRENT_REV EXT2_GOOD_OLD_REV
462#define EXT2_MAX_SUPP_REV EXT2_DYNAMIC_REV
463
464#define EXT2_GOOD_OLD_INODE_SIZE 128
465
466/*
467 * Feature set definitions
468 */
469
470#define EXT2_HAS_COMPAT_FEATURE(sb,mask) \
471 ( EXT2_SB(sb)->s_es->s_feature_compat & cpu_to_le32(mask) )
472#define EXT2_HAS_RO_COMPAT_FEATURE(sb,mask) \
473 ( EXT2_SB(sb)->s_es->s_feature_ro_compat & cpu_to_le32(mask) )
474#define EXT2_HAS_INCOMPAT_FEATURE(sb,mask) \
475 ( EXT2_SB(sb)->s_es->s_feature_incompat & cpu_to_le32(mask) )
476#define EXT2_SET_COMPAT_FEATURE(sb,mask) \
477 EXT2_SB(sb)->s_es->s_feature_compat |= cpu_to_le32(mask)
478#define EXT2_SET_RO_COMPAT_FEATURE(sb,mask) \
479 EXT2_SB(sb)->s_es->s_feature_ro_compat |= cpu_to_le32(mask)
480#define EXT2_SET_INCOMPAT_FEATURE(sb,mask) \
481 EXT2_SB(sb)->s_es->s_feature_incompat |= cpu_to_le32(mask)
482#define EXT2_CLEAR_COMPAT_FEATURE(sb,mask) \
483 EXT2_SB(sb)->s_es->s_feature_compat &= ~cpu_to_le32(mask)
484#define EXT2_CLEAR_RO_COMPAT_FEATURE(sb,mask) \
485 EXT2_SB(sb)->s_es->s_feature_ro_compat &= ~cpu_to_le32(mask)
486#define EXT2_CLEAR_INCOMPAT_FEATURE(sb,mask) \
487 EXT2_SB(sb)->s_es->s_feature_incompat &= ~cpu_to_le32(mask)
488
489#define EXT2_FEATURE_COMPAT_DIR_PREALLOC 0x0001
490#define EXT2_FEATURE_COMPAT_IMAGIC_INODES 0x0002
491#define EXT3_FEATURE_COMPAT_HAS_JOURNAL 0x0004
492#define EXT2_FEATURE_COMPAT_EXT_ATTR 0x0008
493#define EXT2_FEATURE_COMPAT_RESIZE_INO 0x0010
494#define EXT2_FEATURE_COMPAT_DIR_INDEX 0x0020
495#define EXT2_FEATURE_COMPAT_ANY 0xffffffff
496
497#define EXT2_FEATURE_RO_COMPAT_SPARSE_SUPER 0x0001
498#define EXT2_FEATURE_RO_COMPAT_LARGE_FILE 0x0002
499#define EXT2_FEATURE_RO_COMPAT_BTREE_DIR 0x0004
500#define EXT2_FEATURE_RO_COMPAT_ANY 0xffffffff
501
502#define EXT2_FEATURE_INCOMPAT_COMPRESSION 0x0001
503#define EXT2_FEATURE_INCOMPAT_FILETYPE 0x0002
504#define EXT3_FEATURE_INCOMPAT_RECOVER 0x0004
505#define EXT3_FEATURE_INCOMPAT_JOURNAL_DEV 0x0008
506#define EXT2_FEATURE_INCOMPAT_META_BG 0x0010
507#define EXT2_FEATURE_INCOMPAT_ANY 0xffffffff
508
509#define EXT2_FEATURE_COMPAT_SUPP EXT2_FEATURE_COMPAT_EXT_ATTR
510#define EXT2_FEATURE_INCOMPAT_SUPP (EXT2_FEATURE_INCOMPAT_FILETYPE| \
511 EXT2_FEATURE_INCOMPAT_META_BG)
512#define EXT2_FEATURE_RO_COMPAT_SUPP (EXT2_FEATURE_RO_COMPAT_SPARSE_SUPER| \
513 EXT2_FEATURE_RO_COMPAT_LARGE_FILE| \
514 EXT2_FEATURE_RO_COMPAT_BTREE_DIR)
515#define EXT2_FEATURE_RO_COMPAT_UNSUPPORTED ~EXT2_FEATURE_RO_COMPAT_SUPP
516#define EXT2_FEATURE_INCOMPAT_UNSUPPORTED ~EXT2_FEATURE_INCOMPAT_SUPP
517
518/*
519 * Default values for user and/or group using reserved blocks
520 */
521#define EXT2_DEF_RESUID 0
522#define EXT2_DEF_RESGID 0
523
524/*
525 * Default mount options
526 */
527#define EXT2_DEFM_DEBUG 0x0001
528#define EXT2_DEFM_BSDGROUPS 0x0002
529#define EXT2_DEFM_XATTR_USER 0x0004
530#define EXT2_DEFM_ACL 0x0008
531#define EXT2_DEFM_UID16 0x0010
532 /* Not used by ext2, but reserved for use by ext3 */
533#define EXT3_DEFM_JMODE 0x0060
534#define EXT3_DEFM_JMODE_DATA 0x0020
535#define EXT3_DEFM_JMODE_ORDERED 0x0040
536#define EXT3_DEFM_JMODE_WBACK 0x0060
537
538/*
539 * Structure of a directory entry
540 */
541#define EXT2_NAME_LEN 255
542
543struct ext2_dir_entry {
544 __le32 inode; /* Inode number */
545 __le16 rec_len; /* Directory entry length */
546 __le16 name_len; /* Name length */
547 char name[EXT2_NAME_LEN]; /* File name */
548};
549
550/*
551 * The new version of the directory entry. Since EXT2 structures are
552 * stored in intel byte order, and the name_len field could never be
553 * bigger than 255 chars, it's safe to reclaim the extra byte for the
554 * file_type field.
555 */
556struct ext2_dir_entry_2 {
557 __le32 inode; /* Inode number */
558 __le16 rec_len; /* Directory entry length */
559 __u8 name_len; /* Name length */
560 __u8 file_type;
561 char name[EXT2_NAME_LEN]; /* File name */
562};
563
564/*
565 * Ext2 directory file types. Only the low 3 bits are used. The
566 * other bits are reserved for now.
567 */
568enum {
569 EXT2_FT_UNKNOWN = 0,
570 EXT2_FT_REG_FILE = 1,
571 EXT2_FT_DIR = 2,
572 EXT2_FT_CHRDEV = 3,
573 EXT2_FT_BLKDEV = 4,
574 EXT2_FT_FIFO = 5,
575 EXT2_FT_SOCK = 6,
576 EXT2_FT_SYMLINK = 7,
577 EXT2_FT_MAX
578};
579
580/*
581 * EXT2_DIR_PAD defines the directory entries boundaries
582 *
583 * NOTE: It must be a multiple of 4
584 */
585#define EXT2_DIR_PAD 4
586#define EXT2_DIR_ROUND (EXT2_DIR_PAD - 1)
587#define EXT2_DIR_REC_LEN(name_len) (((name_len) + 8 + EXT2_DIR_ROUND) & \
588 ~EXT2_DIR_ROUND)
589#define EXT2_MAX_REC_LEN ((1<<16)-1)
590
591#endif /* _LINUX_EXT2_FS_H */ 42#endif /* _LINUX_EXT2_FS_H */
diff --git a/include/linux/ext2_fs_sb.h b/include/linux/ext2_fs_sb.h
deleted file mode 100644
index db4d9f586bb6..000000000000
--- a/include/linux/ext2_fs_sb.h
+++ /dev/null
@@ -1,126 +0,0 @@
1/*
2 * linux/include/linux/ext2_fs_sb.h
3 *
4 * Copyright (C) 1992, 1993, 1994, 1995
5 * Remy Card (card@masi.ibp.fr)
6 * Laboratoire MASI - Institut Blaise Pascal
7 * Universite Pierre et Marie Curie (Paris VI)
8 *
9 * from
10 *
11 * linux/include/linux/minix_fs_sb.h
12 *
13 * Copyright (C) 1991, 1992 Linus Torvalds
14 */
15
16#ifndef _LINUX_EXT2_FS_SB
17#define _LINUX_EXT2_FS_SB
18
19#include <linux/blockgroup_lock.h>
20#include <linux/percpu_counter.h>
21#include <linux/rbtree.h>
22
23/* XXX Here for now... not interested in restructing headers JUST now */
24
25/* data type for block offset of block group */
26typedef int ext2_grpblk_t;
27
28/* data type for filesystem-wide blocks number */
29typedef unsigned long ext2_fsblk_t;
30
31#define E2FSBLK "%lu"
32
33struct ext2_reserve_window {
34 ext2_fsblk_t _rsv_start; /* First byte reserved */
35 ext2_fsblk_t _rsv_end; /* Last byte reserved or 0 */
36};
37
38struct ext2_reserve_window_node {
39 struct rb_node rsv_node;
40 __u32 rsv_goal_size;
41 __u32 rsv_alloc_hit;
42 struct ext2_reserve_window rsv_window;
43};
44
45struct ext2_block_alloc_info {
46 /* information about reservation window */
47 struct ext2_reserve_window_node rsv_window_node;
48 /*
49 * was i_next_alloc_block in ext2_inode_info
50 * is the logical (file-relative) number of the
51 * most-recently-allocated block in this file.
52 * We use this for detecting linearly ascending allocation requests.
53 */
54 __u32 last_alloc_logical_block;
55 /*
56 * Was i_next_alloc_goal in ext2_inode_info
57 * is the *physical* companion to i_next_alloc_block.
58 * it the the physical block number of the block which was most-recentl
59 * allocated to this file. This give us the goal (target) for the next
60 * allocation when we detect linearly ascending requests.
61 */
62 ext2_fsblk_t last_alloc_physical_block;
63};
64
65#define rsv_start rsv_window._rsv_start
66#define rsv_end rsv_window._rsv_end
67
68/*
69 * second extended-fs super-block data in memory
70 */
71struct ext2_sb_info {
72 unsigned long s_frag_size; /* Size of a fragment in bytes */
73 unsigned long s_frags_per_block;/* Number of fragments per block */
74 unsigned long s_inodes_per_block;/* Number of inodes per block */
75 unsigned long s_frags_per_group;/* Number of fragments in a group */
76 unsigned long s_blocks_per_group;/* Number of blocks in a group */
77 unsigned long s_inodes_per_group;/* Number of inodes in a group */
78 unsigned long s_itb_per_group; /* Number of inode table blocks per group */
79 unsigned long s_gdb_count; /* Number of group descriptor blocks */
80 unsigned long s_desc_per_block; /* Number of group descriptors per block */
81 unsigned long s_groups_count; /* Number of groups in the fs */
82 unsigned long s_overhead_last; /* Last calculated overhead */
83 unsigned long s_blocks_last; /* Last seen block count */
84 struct buffer_head * s_sbh; /* Buffer containing the super block */
85 struct ext2_super_block * s_es; /* Pointer to the super block in the buffer */
86 struct buffer_head ** s_group_desc;
87 unsigned long s_mount_opt;
88 unsigned long s_sb_block;
89 uid_t s_resuid;
90 gid_t s_resgid;
91 unsigned short s_mount_state;
92 unsigned short s_pad;
93 int s_addr_per_block_bits;
94 int s_desc_per_block_bits;
95 int s_inode_size;
96 int s_first_ino;
97 spinlock_t s_next_gen_lock;
98 u32 s_next_generation;
99 unsigned long s_dir_count;
100 u8 *s_debts;
101 struct percpu_counter s_freeblocks_counter;
102 struct percpu_counter s_freeinodes_counter;
103 struct percpu_counter s_dirs_counter;
104 struct blockgroup_lock *s_blockgroup_lock;
105 /* root of the per fs reservation window tree */
106 spinlock_t s_rsv_window_lock;
107 struct rb_root s_rsv_window_root;
108 struct ext2_reserve_window_node s_rsv_window_head;
109 /*
110 * s_lock protects against concurrent modifications of s_mount_state,
111 * s_blocks_last, s_overhead_last and the content of superblock's
112 * buffer pointed to by sbi->s_es.
113 *
114 * Note: It is used in ext2_show_options() to provide a consistent view
115 * of the mount options.
116 */
117 spinlock_t s_lock;
118};
119
120static inline spinlock_t *
121sb_bgl_lock(struct ext2_sb_info *sbi, unsigned int block_group)
122{
123 return bgl_lock_ptr(sbi->s_blockgroup_lock, block_group);
124}
125
126#endif /* _LINUX_EXT2_FS_SB */
diff --git a/include/linux/ext3_fs_i.h b/include/linux/ext3_fs_i.h
deleted file mode 100644
index f42c098aed8d..000000000000
--- a/include/linux/ext3_fs_i.h
+++ /dev/null
@@ -1,151 +0,0 @@
1/*
2 * linux/include/linux/ext3_fs_i.h
3 *
4 * Copyright (C) 1992, 1993, 1994, 1995
5 * Remy Card (card@masi.ibp.fr)
6 * Laboratoire MASI - Institut Blaise Pascal
7 * Universite Pierre et Marie Curie (Paris VI)
8 *
9 * from
10 *
11 * linux/include/linux/minix_fs_i.h
12 *
13 * Copyright (C) 1991, 1992 Linus Torvalds
14 */
15
16#ifndef _LINUX_EXT3_FS_I
17#define _LINUX_EXT3_FS_I
18
19#include <linux/rwsem.h>
20#include <linux/rbtree.h>
21#include <linux/seqlock.h>
22#include <linux/mutex.h>
23
24/* data type for block offset of block group */
25typedef int ext3_grpblk_t;
26
27/* data type for filesystem-wide blocks number */
28typedef unsigned long ext3_fsblk_t;
29
30#define E3FSBLK "%lu"
31
32struct ext3_reserve_window {
33 ext3_fsblk_t _rsv_start; /* First byte reserved */
34 ext3_fsblk_t _rsv_end; /* Last byte reserved or 0 */
35};
36
37struct ext3_reserve_window_node {
38 struct rb_node rsv_node;
39 __u32 rsv_goal_size;
40 __u32 rsv_alloc_hit;
41 struct ext3_reserve_window rsv_window;
42};
43
44struct ext3_block_alloc_info {
45 /* information about reservation window */
46 struct ext3_reserve_window_node rsv_window_node;
47 /*
48 * was i_next_alloc_block in ext3_inode_info
49 * is the logical (file-relative) number of the
50 * most-recently-allocated block in this file.
51 * We use this for detecting linearly ascending allocation requests.
52 */
53 __u32 last_alloc_logical_block;
54 /*
55 * Was i_next_alloc_goal in ext3_inode_info
56 * is the *physical* companion to i_next_alloc_block.
57 * it the physical block number of the block which was most-recentl
58 * allocated to this file. This give us the goal (target) for the next
59 * allocation when we detect linearly ascending requests.
60 */
61 ext3_fsblk_t last_alloc_physical_block;
62};
63
64#define rsv_start rsv_window._rsv_start
65#define rsv_end rsv_window._rsv_end
66
67/*
68 * third extended file system inode data in memory
69 */
70struct ext3_inode_info {
71 __le32 i_data[15]; /* unconverted */
72 __u32 i_flags;
73#ifdef EXT3_FRAGMENTS
74 __u32 i_faddr;
75 __u8 i_frag_no;
76 __u8 i_frag_size;
77#endif
78 ext3_fsblk_t i_file_acl;
79 __u32 i_dir_acl;
80 __u32 i_dtime;
81
82 /*
83 * i_block_group is the number of the block group which contains
84 * this file's inode. Constant across the lifetime of the inode,
85 * it is ued for making block allocation decisions - we try to
86 * place a file's data blocks near its inode block, and new inodes
87 * near to their parent directory's inode.
88 */
89 __u32 i_block_group;
90 unsigned long i_state_flags; /* Dynamic state flags for ext3 */
91
92 /* block reservation info */
93 struct ext3_block_alloc_info *i_block_alloc_info;
94
95 __u32 i_dir_start_lookup;
96#ifdef CONFIG_EXT3_FS_XATTR
97 /*
98 * Extended attributes can be read independently of the main file
99 * data. Taking i_mutex even when reading would cause contention
100 * between readers of EAs and writers of regular file data, so
101 * instead we synchronize on xattr_sem when reading or changing
102 * EAs.
103 */
104 struct rw_semaphore xattr_sem;
105#endif
106
107 struct list_head i_orphan; /* unlinked but open inodes */
108
109 /*
110 * i_disksize keeps track of what the inode size is ON DISK, not
111 * in memory. During truncate, i_size is set to the new size by
112 * the VFS prior to calling ext3_truncate(), but the filesystem won't
113 * set i_disksize to 0 until the truncate is actually under way.
114 *
115 * The intent is that i_disksize always represents the blocks which
116 * are used by this file. This allows recovery to restart truncate
117 * on orphans if we crash during truncate. We actually write i_disksize
118 * into the on-disk inode when writing inodes out, instead of i_size.
119 *
120 * The only time when i_disksize and i_size may be different is when
121 * a truncate is in progress. The only things which change i_disksize
122 * are ext3_get_block (growth) and ext3_truncate (shrinkth).
123 */
124 loff_t i_disksize;
125
126 /* on-disk additional length */
127 __u16 i_extra_isize;
128
129 /*
130 * truncate_mutex is for serialising ext3_truncate() against
131 * ext3_getblock(). In the 2.4 ext2 design, great chunks of inode's
132 * data tree are chopped off during truncate. We can't do that in
133 * ext3 because whenever we perform intermediate commits during
134 * truncate, the inode and all the metadata blocks *must* be in a
135 * consistent state which allows truncation of the orphans to restart
136 * during recovery. Hence we must fix the get_block-vs-truncate race
137 * by other means, so we have truncate_mutex.
138 */
139 struct mutex truncate_mutex;
140
141 /*
142 * Transactions that contain inode's metadata needed to complete
143 * fsync and fdatasync, respectively.
144 */
145 atomic_t i_sync_tid;
146 atomic_t i_datasync_tid;
147
148 struct inode vfs_inode;
149};
150
151#endif /* _LINUX_EXT3_FS_I */
diff --git a/include/linux/ext3_fs_sb.h b/include/linux/ext3_fs_sb.h
deleted file mode 100644
index 64365252f1b0..000000000000
--- a/include/linux/ext3_fs_sb.h
+++ /dev/null
@@ -1,91 +0,0 @@
1/*
2 * linux/include/linux/ext3_fs_sb.h
3 *
4 * Copyright (C) 1992, 1993, 1994, 1995
5 * Remy Card (card@masi.ibp.fr)
6 * Laboratoire MASI - Institut Blaise Pascal
7 * Universite Pierre et Marie Curie (Paris VI)
8 *
9 * from
10 *
11 * linux/include/linux/minix_fs_sb.h
12 *
13 * Copyright (C) 1991, 1992 Linus Torvalds
14 */
15
16#ifndef _LINUX_EXT3_FS_SB
17#define _LINUX_EXT3_FS_SB
18
19#ifdef __KERNEL__
20#include <linux/timer.h>
21#include <linux/wait.h>
22#include <linux/blockgroup_lock.h>
23#include <linux/percpu_counter.h>
24#endif
25#include <linux/rbtree.h>
26
27/*
28 * third extended-fs super-block data in memory
29 */
30struct ext3_sb_info {
31 unsigned long s_frag_size; /* Size of a fragment in bytes */
32 unsigned long s_frags_per_block;/* Number of fragments per block */
33 unsigned long s_inodes_per_block;/* Number of inodes per block */
34 unsigned long s_frags_per_group;/* Number of fragments in a group */
35 unsigned long s_blocks_per_group;/* Number of blocks in a group */
36 unsigned long s_inodes_per_group;/* Number of inodes in a group */
37 unsigned long s_itb_per_group; /* Number of inode table blocks per group */
38 unsigned long s_gdb_count; /* Number of group descriptor blocks */
39 unsigned long s_desc_per_block; /* Number of group descriptors per block */
40 unsigned long s_groups_count; /* Number of groups in the fs */
41 unsigned long s_overhead_last; /* Last calculated overhead */
42 unsigned long s_blocks_last; /* Last seen block count */
43 struct buffer_head * s_sbh; /* Buffer containing the super block */
44 struct ext3_super_block * s_es; /* Pointer to the super block in the buffer */
45 struct buffer_head ** s_group_desc;
46 unsigned long s_mount_opt;
47 ext3_fsblk_t s_sb_block;
48 uid_t s_resuid;
49 gid_t s_resgid;
50 unsigned short s_mount_state;
51 unsigned short s_pad;
52 int s_addr_per_block_bits;
53 int s_desc_per_block_bits;
54 int s_inode_size;
55 int s_first_ino;
56 spinlock_t s_next_gen_lock;
57 u32 s_next_generation;
58 u32 s_hash_seed[4];
59 int s_def_hash_version;
60 int s_hash_unsigned; /* 3 if hash should be signed, 0 if not */
61 struct percpu_counter s_freeblocks_counter;
62 struct percpu_counter s_freeinodes_counter;
63 struct percpu_counter s_dirs_counter;
64 struct blockgroup_lock *s_blockgroup_lock;
65
66 /* root of the per fs reservation window tree */
67 spinlock_t s_rsv_window_lock;
68 struct rb_root s_rsv_window_root;
69 struct ext3_reserve_window_node s_rsv_window_head;
70
71 /* Journaling */
72 struct inode * s_journal_inode;
73 struct journal_s * s_journal;
74 struct list_head s_orphan;
75 struct mutex s_orphan_lock;
76 struct mutex s_resize_lock;
77 unsigned long s_commit_interval;
78 struct block_device *journal_bdev;
79#ifdef CONFIG_QUOTA
80 char *s_qf_names[MAXQUOTAS]; /* Names of quota files with journalled quota */
81 int s_jquota_fmt; /* Format of quota to use */
82#endif
83};
84
85static inline spinlock_t *
86sb_bgl_lock(struct ext3_sb_info *sbi, unsigned int block_group)
87{
88 return bgl_lock_ptr(sbi->s_blockgroup_lock, block_group);
89}
90
91#endif /* _LINUX_EXT3_FS_SB */
diff --git a/include/linux/ext3_jbd.h b/include/linux/ext3_jbd.h
deleted file mode 100644
index d7b5ddca99c2..000000000000
--- a/include/linux/ext3_jbd.h
+++ /dev/null
@@ -1,229 +0,0 @@
1/*
2 * linux/include/linux/ext3_jbd.h
3 *
4 * Written by Stephen C. Tweedie <sct@redhat.com>, 1999
5 *
6 * Copyright 1998--1999 Red Hat corp --- All Rights Reserved
7 *
8 * This file is part of the Linux kernel and is made available under
9 * the terms of the GNU General Public License, version 2, or at your
10 * option, any later version, incorporated herein by reference.
11 *
12 * Ext3-specific journaling extensions.
13 */
14
15#ifndef _LINUX_EXT3_JBD_H
16#define _LINUX_EXT3_JBD_H
17
18#include <linux/fs.h>
19#include <linux/jbd.h>
20#include <linux/ext3_fs.h>
21
22#define EXT3_JOURNAL(inode) (EXT3_SB((inode)->i_sb)->s_journal)
23
24/* Define the number of blocks we need to account to a transaction to
25 * modify one block of data.
26 *
27 * We may have to touch one inode, one bitmap buffer, up to three
28 * indirection blocks, the group and superblock summaries, and the data
29 * block to complete the transaction. */
30
31#define EXT3_SINGLEDATA_TRANS_BLOCKS 8U
32
33/* Extended attribute operations touch at most two data buffers,
34 * two bitmap buffers, and two group summaries, in addition to the inode
35 * and the superblock, which are already accounted for. */
36
37#define EXT3_XATTR_TRANS_BLOCKS 6U
38
39/* Define the minimum size for a transaction which modifies data. This
40 * needs to take into account the fact that we may end up modifying two
41 * quota files too (one for the group, one for the user quota). The
42 * superblock only gets updated once, of course, so don't bother
43 * counting that again for the quota updates. */
44
45#define EXT3_DATA_TRANS_BLOCKS(sb) (EXT3_SINGLEDATA_TRANS_BLOCKS + \
46 EXT3_XATTR_TRANS_BLOCKS - 2 + \
47 EXT3_MAXQUOTAS_TRANS_BLOCKS(sb))
48
49/* Delete operations potentially hit one directory's namespace plus an
50 * entire inode, plus arbitrary amounts of bitmap/indirection data. Be
51 * generous. We can grow the delete transaction later if necessary. */
52
53#define EXT3_DELETE_TRANS_BLOCKS(sb) (EXT3_MAXQUOTAS_TRANS_BLOCKS(sb) + 64)
54
55/* Define an arbitrary limit for the amount of data we will anticipate
56 * writing to any given transaction. For unbounded transactions such as
57 * write(2) and truncate(2) we can write more than this, but we always
58 * start off at the maximum transaction size and grow the transaction
59 * optimistically as we go. */
60
61#define EXT3_MAX_TRANS_DATA 64U
62
63/* We break up a large truncate or write transaction once the handle's
64 * buffer credits gets this low, we need either to extend the
65 * transaction or to start a new one. Reserve enough space here for
66 * inode, bitmap, superblock, group and indirection updates for at least
67 * one block, plus two quota updates. Quota allocations are not
68 * needed. */
69
70#define EXT3_RESERVE_TRANS_BLOCKS 12U
71
72#define EXT3_INDEX_EXTRA_TRANS_BLOCKS 8
73
74#ifdef CONFIG_QUOTA
75/* Amount of blocks needed for quota update - we know that the structure was
76 * allocated so we need to update only inode+data */
77#define EXT3_QUOTA_TRANS_BLOCKS(sb) (test_opt(sb, QUOTA) ? 2 : 0)
78/* Amount of blocks needed for quota insert/delete - we do some block writes
79 * but inode, sb and group updates are done only once */
80#define EXT3_QUOTA_INIT_BLOCKS(sb) (test_opt(sb, QUOTA) ? (DQUOT_INIT_ALLOC*\
81 (EXT3_SINGLEDATA_TRANS_BLOCKS-3)+3+DQUOT_INIT_REWRITE) : 0)
82#define EXT3_QUOTA_DEL_BLOCKS(sb) (test_opt(sb, QUOTA) ? (DQUOT_DEL_ALLOC*\
83 (EXT3_SINGLEDATA_TRANS_BLOCKS-3)+3+DQUOT_DEL_REWRITE) : 0)
84#else
85#define EXT3_QUOTA_TRANS_BLOCKS(sb) 0
86#define EXT3_QUOTA_INIT_BLOCKS(sb) 0
87#define EXT3_QUOTA_DEL_BLOCKS(sb) 0
88#endif
89#define EXT3_MAXQUOTAS_TRANS_BLOCKS(sb) (MAXQUOTAS*EXT3_QUOTA_TRANS_BLOCKS(sb))
90#define EXT3_MAXQUOTAS_INIT_BLOCKS(sb) (MAXQUOTAS*EXT3_QUOTA_INIT_BLOCKS(sb))
91#define EXT3_MAXQUOTAS_DEL_BLOCKS(sb) (MAXQUOTAS*EXT3_QUOTA_DEL_BLOCKS(sb))
92
93int
94ext3_mark_iloc_dirty(handle_t *handle,
95 struct inode *inode,
96 struct ext3_iloc *iloc);
97
98/*
99 * On success, We end up with an outstanding reference count against
100 * iloc->bh. This _must_ be cleaned up later.
101 */
102
103int ext3_reserve_inode_write(handle_t *handle, struct inode *inode,
104 struct ext3_iloc *iloc);
105
106int ext3_mark_inode_dirty(handle_t *handle, struct inode *inode);
107
108/*
109 * Wrapper functions with which ext3 calls into JBD. The intent here is
110 * to allow these to be turned into appropriate stubs so ext3 can control
111 * ext2 filesystems, so ext2+ext3 systems only nee one fs. This work hasn't
112 * been done yet.
113 */
114
115static inline void ext3_journal_release_buffer(handle_t *handle,
116 struct buffer_head *bh)
117{
118 journal_release_buffer(handle, bh);
119}
120
121void ext3_journal_abort_handle(const char *caller, const char *err_fn,
122 struct buffer_head *bh, handle_t *handle, int err);
123
124int __ext3_journal_get_undo_access(const char *where, handle_t *handle,
125 struct buffer_head *bh);
126
127int __ext3_journal_get_write_access(const char *where, handle_t *handle,
128 struct buffer_head *bh);
129
130int __ext3_journal_forget(const char *where, handle_t *handle,
131 struct buffer_head *bh);
132
133int __ext3_journal_revoke(const char *where, handle_t *handle,
134 unsigned long blocknr, struct buffer_head *bh);
135
136int __ext3_journal_get_create_access(const char *where,
137 handle_t *handle, struct buffer_head *bh);
138
139int __ext3_journal_dirty_metadata(const char *where,
140 handle_t *handle, struct buffer_head *bh);
141
142#define ext3_journal_get_undo_access(handle, bh) \
143 __ext3_journal_get_undo_access(__func__, (handle), (bh))
144#define ext3_journal_get_write_access(handle, bh) \
145 __ext3_journal_get_write_access(__func__, (handle), (bh))
146#define ext3_journal_revoke(handle, blocknr, bh) \
147 __ext3_journal_revoke(__func__, (handle), (blocknr), (bh))
148#define ext3_journal_get_create_access(handle, bh) \
149 __ext3_journal_get_create_access(__func__, (handle), (bh))
150#define ext3_journal_dirty_metadata(handle, bh) \
151 __ext3_journal_dirty_metadata(__func__, (handle), (bh))
152#define ext3_journal_forget(handle, bh) \
153 __ext3_journal_forget(__func__, (handle), (bh))
154
155int ext3_journal_dirty_data(handle_t *handle, struct buffer_head *bh);
156
157handle_t *ext3_journal_start_sb(struct super_block *sb, int nblocks);
158int __ext3_journal_stop(const char *where, handle_t *handle);
159
160static inline handle_t *ext3_journal_start(struct inode *inode, int nblocks)
161{
162 return ext3_journal_start_sb(inode->i_sb, nblocks);
163}
164
165#define ext3_journal_stop(handle) \
166 __ext3_journal_stop(__func__, (handle))
167
168static inline handle_t *ext3_journal_current_handle(void)
169{
170 return journal_current_handle();
171}
172
173static inline int ext3_journal_extend(handle_t *handle, int nblocks)
174{
175 return journal_extend(handle, nblocks);
176}
177
178static inline int ext3_journal_restart(handle_t *handle, int nblocks)
179{
180 return journal_restart(handle, nblocks);
181}
182
183static inline int ext3_journal_blocks_per_page(struct inode *inode)
184{
185 return journal_blocks_per_page(inode);
186}
187
188static inline int ext3_journal_force_commit(journal_t *journal)
189{
190 return journal_force_commit(journal);
191}
192
193/* super.c */
194int ext3_force_commit(struct super_block *sb);
195
196static inline int ext3_should_journal_data(struct inode *inode)
197{
198 if (!S_ISREG(inode->i_mode))
199 return 1;
200 if (test_opt(inode->i_sb, DATA_FLAGS) == EXT3_MOUNT_JOURNAL_DATA)
201 return 1;
202 if (EXT3_I(inode)->i_flags & EXT3_JOURNAL_DATA_FL)
203 return 1;
204 return 0;
205}
206
207static inline int ext3_should_order_data(struct inode *inode)
208{
209 if (!S_ISREG(inode->i_mode))
210 return 0;
211 if (EXT3_I(inode)->i_flags & EXT3_JOURNAL_DATA_FL)
212 return 0;
213 if (test_opt(inode->i_sb, DATA_FLAGS) == EXT3_MOUNT_ORDERED_DATA)
214 return 1;
215 return 0;
216}
217
218static inline int ext3_should_writeback_data(struct inode *inode)
219{
220 if (!S_ISREG(inode->i_mode))
221 return 0;
222 if (EXT3_I(inode)->i_flags & EXT3_JOURNAL_DATA_FL)
223 return 0;
224 if (test_opt(inode->i_sb, DATA_FLAGS) == EXT3_MOUNT_WRITEBACK_DATA)
225 return 1;
226 return 0;
227}
228
229#endif /* _LINUX_EXT3_JBD_H */
diff --git a/include/linux/fdtable.h b/include/linux/fdtable.h
index 82163c4b32c9..158a41eed314 100644
--- a/include/linux/fdtable.h
+++ b/include/linux/fdtable.h
@@ -21,23 +21,45 @@
21 */ 21 */
22#define NR_OPEN_DEFAULT BITS_PER_LONG 22#define NR_OPEN_DEFAULT BITS_PER_LONG
23 23
24/*
25 * The embedded_fd_set is a small fd_set,
26 * suitable for most tasks (which open <= BITS_PER_LONG files)
27 */
28struct embedded_fd_set {
29 unsigned long fds_bits[1];
30};
31
32struct fdtable { 24struct fdtable {
33 unsigned int max_fds; 25 unsigned int max_fds;
34 struct file __rcu **fd; /* current fd array */ 26 struct file __rcu **fd; /* current fd array */
35 fd_set *close_on_exec; 27 unsigned long *close_on_exec;
36 fd_set *open_fds; 28 unsigned long *open_fds;
37 struct rcu_head rcu; 29 struct rcu_head rcu;
38 struct fdtable *next; 30 struct fdtable *next;
39}; 31};
40 32
33static inline void __set_close_on_exec(int fd, struct fdtable *fdt)
34{
35 __set_bit(fd, fdt->close_on_exec);
36}
37
38static inline void __clear_close_on_exec(int fd, struct fdtable *fdt)
39{
40 __clear_bit(fd, fdt->close_on_exec);
41}
42
43static inline bool close_on_exec(int fd, const struct fdtable *fdt)
44{
45 return test_bit(fd, fdt->close_on_exec);
46}
47
48static inline void __set_open_fd(int fd, struct fdtable *fdt)
49{
50 __set_bit(fd, fdt->open_fds);
51}
52
53static inline void __clear_open_fd(int fd, struct fdtable *fdt)
54{
55 __clear_bit(fd, fdt->open_fds);
56}
57
58static inline bool fd_is_open(int fd, const struct fdtable *fdt)
59{
60 return test_bit(fd, fdt->open_fds);
61}
62
41/* 63/*
42 * Open file table structure 64 * Open file table structure
43 */ 65 */
@@ -53,8 +75,8 @@ struct files_struct {
53 */ 75 */
54 spinlock_t file_lock ____cacheline_aligned_in_smp; 76 spinlock_t file_lock ____cacheline_aligned_in_smp;
55 int next_fd; 77 int next_fd;
56 struct embedded_fd_set close_on_exec_init; 78 unsigned long close_on_exec_init[1];
57 struct embedded_fd_set open_fds_init; 79 unsigned long open_fds_init[1];
58 struct file __rcu * fd_array[NR_OPEN_DEFAULT]; 80 struct file __rcu * fd_array[NR_OPEN_DEFAULT];
59}; 81};
60 82
diff --git a/include/linux/fs.h b/include/linux/fs.h
index c437f914d537..135693e79f2b 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -92,6 +92,10 @@ struct inodes_stat_t {
92/* File is opened using open(.., 3, ..) and is writeable only for ioctls 92/* File is opened using open(.., 3, ..) and is writeable only for ioctls
93 (specialy hack for floppy.c) */ 93 (specialy hack for floppy.c) */
94#define FMODE_WRITE_IOCTL ((__force fmode_t)0x100) 94#define FMODE_WRITE_IOCTL ((__force fmode_t)0x100)
95/* 32bit hashes as llseek() offset (for directories) */
96#define FMODE_32BITHASH ((__force fmode_t)0x200)
97/* 64bit hashes as llseek() offset (for directories) */
98#define FMODE_64BITHASH ((__force fmode_t)0x400)
95 99
96/* 100/*
97 * Don't update ctime and mtime. 101 * Don't update ctime and mtime.
diff --git a/arch/arm/mach-mxs/include/mach/dma.h b/include/linux/fsl/mxs-dma.h
index 203d7c4a3e11..203d7c4a3e11 100644
--- a/arch/arm/mach-mxs/include/mach/dma.h
+++ b/include/linux/fsl/mxs-dma.h
diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h
index dd478fc8f9f5..5f3f3be5af09 100644
--- a/include/linux/ftrace_event.h
+++ b/include/linux/ftrace_event.h
@@ -144,12 +144,14 @@ struct event_filter;
144enum trace_reg { 144enum trace_reg {
145 TRACE_REG_REGISTER, 145 TRACE_REG_REGISTER,
146 TRACE_REG_UNREGISTER, 146 TRACE_REG_UNREGISTER,
147#ifdef CONFIG_PERF_EVENTS
147 TRACE_REG_PERF_REGISTER, 148 TRACE_REG_PERF_REGISTER,
148 TRACE_REG_PERF_UNREGISTER, 149 TRACE_REG_PERF_UNREGISTER,
149 TRACE_REG_PERF_OPEN, 150 TRACE_REG_PERF_OPEN,
150 TRACE_REG_PERF_CLOSE, 151 TRACE_REG_PERF_CLOSE,
151 TRACE_REG_PERF_ADD, 152 TRACE_REG_PERF_ADD,
152 TRACE_REG_PERF_DEL, 153 TRACE_REG_PERF_DEL,
154#endif
153}; 155};
154 156
155struct ftrace_event_call; 157struct ftrace_event_call;
diff --git a/include/linux/gpio_keys.h b/include/linux/gpio_keys.h
index 004ff33ab38e..a7e977ff4abf 100644
--- a/include/linux/gpio_keys.h
+++ b/include/linux/gpio_keys.h
@@ -6,7 +6,7 @@ struct device;
6struct gpio_keys_button { 6struct gpio_keys_button {
7 /* Configuration parameters */ 7 /* Configuration parameters */
8 unsigned int code; /* input event code (KEY_*, SW_*) */ 8 unsigned int code; /* input event code (KEY_*, SW_*) */
9 int gpio; 9 int gpio; /* -1 if this key does not support gpio */
10 int active_low; 10 int active_low;
11 const char *desc; 11 const char *desc;
12 unsigned int type; /* input event type (EV_KEY, EV_SW, EV_ABS) */ 12 unsigned int type; /* input event type (EV_KEY, EV_SW, EV_ABS) */
@@ -14,6 +14,7 @@ struct gpio_keys_button {
14 int debounce_interval; /* debounce ticks interval in msecs */ 14 int debounce_interval; /* debounce ticks interval in msecs */
15 bool can_disable; 15 bool can_disable;
16 int value; /* axis value for EV_ABS */ 16 int value; /* axis value for EV_ABS */
17 unsigned int irq; /* Irq number in case of interrupt keys */
17}; 18};
18 19
19struct gpio_keys_platform_data { 20struct gpio_keys_platform_data {
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index 5db52d0ff1d4..645231c373c8 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -1,6 +1,8 @@
1#ifndef _LINUX_KERNEL_H 1#ifndef _LINUX_KERNEL_H
2#define _LINUX_KERNEL_H 2#define _LINUX_KERNEL_H
3 3
4#include <linux/sysinfo.h>
5
4/* 6/*
5 * 'kernel.h' contains some often-used function prototypes etc 7 * 'kernel.h' contains some often-used function prototypes etc
6 */ 8 */
@@ -428,16 +430,10 @@ extern int __must_check hex2bin(u8 *dst, const char *src, size_t count);
428 * Most likely, you want to use tracing_on/tracing_off. 430 * Most likely, you want to use tracing_on/tracing_off.
429 */ 431 */
430#ifdef CONFIG_RING_BUFFER 432#ifdef CONFIG_RING_BUFFER
431void tracing_on(void);
432void tracing_off(void);
433/* trace_off_permanent stops recording with no way to bring it back */ 433/* trace_off_permanent stops recording with no way to bring it back */
434void tracing_off_permanent(void); 434void tracing_off_permanent(void);
435int tracing_is_on(void);
436#else 435#else
437static inline void tracing_on(void) { }
438static inline void tracing_off(void) { }
439static inline void tracing_off_permanent(void) { } 436static inline void tracing_off_permanent(void) { }
440static inline int tracing_is_on(void) { return 0; }
441#endif 437#endif
442 438
443enum ftrace_dump_mode { 439enum ftrace_dump_mode {
@@ -447,6 +443,10 @@ enum ftrace_dump_mode {
447}; 443};
448 444
449#ifdef CONFIG_TRACING 445#ifdef CONFIG_TRACING
446void tracing_on(void);
447void tracing_off(void);
448int tracing_is_on(void);
449
450extern void tracing_start(void); 450extern void tracing_start(void);
451extern void tracing_stop(void); 451extern void tracing_stop(void);
452extern void ftrace_off_permanent(void); 452extern void ftrace_off_permanent(void);
@@ -531,6 +531,11 @@ static inline void tracing_start(void) { }
531static inline void tracing_stop(void) { } 531static inline void tracing_stop(void) { }
532static inline void ftrace_off_permanent(void) { } 532static inline void ftrace_off_permanent(void) { }
533static inline void trace_dump_stack(void) { } 533static inline void trace_dump_stack(void) { }
534
535static inline void tracing_on(void) { }
536static inline void tracing_off(void) { }
537static inline int tracing_is_on(void) { return 0; }
538
534static inline int 539static inline int
535trace_printk(const char *fmt, ...) 540trace_printk(const char *fmt, ...)
536{ 541{
@@ -698,27 +703,8 @@ static inline void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) { }
698# define REBUILD_DUE_TO_FTRACE_MCOUNT_RECORD 703# define REBUILD_DUE_TO_FTRACE_MCOUNT_RECORD
699#endif 704#endif
700 705
701struct sysinfo;
702extern int do_sysinfo(struct sysinfo *info); 706extern int do_sysinfo(struct sysinfo *info);
703 707
704#endif /* __KERNEL__ */ 708#endif /* __KERNEL__ */
705 709
706#define SI_LOAD_SHIFT 16
707struct sysinfo {
708 long uptime; /* Seconds since boot */
709 unsigned long loads[3]; /* 1, 5, and 15 minute load averages */
710 unsigned long totalram; /* Total usable main memory size */
711 unsigned long freeram; /* Available memory size */
712 unsigned long sharedram; /* Amount of shared memory */
713 unsigned long bufferram; /* Memory used by buffers */
714 unsigned long totalswap; /* Total swap space size */
715 unsigned long freeswap; /* swap space still available */
716 unsigned short procs; /* Number of current processes */
717 unsigned short pad; /* explicit padding for m68k */
718 unsigned long totalhigh; /* Total high memory size */
719 unsigned long freehigh; /* Available high memory size */
720 unsigned int mem_unit; /* Memory unit size in bytes */
721 char _f[20-2*sizeof(long)-sizeof(int)]; /* Padding: libc5 uses this.. */
722};
723
724#endif 710#endif
diff --git a/include/linux/lp8727.h b/include/linux/lp8727.h
index d21fa2865bf4..ea98c6133d32 100644
--- a/include/linux/lp8727.h
+++ b/include/linux/lp8727.h
@@ -1,4 +1,7 @@
1/* 1/*
2 * LP8727 Micro/Mini USB IC with integrated charger
3 *
4 * Copyright (C) 2011 Texas Instruments
2 * Copyright (C) 2011 National Semiconductor 5 * Copyright (C) 2011 National Semiconductor
3 * 6 *
4 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
@@ -32,13 +35,24 @@ enum lp8727_ichg {
32 ICHG_1000mA, 35 ICHG_1000mA,
33}; 36};
34 37
38/**
39 * struct lp8727_chg_param
40 * @eoc_level : end of charge level setting
41 * @ichg : charging current
42 */
35struct lp8727_chg_param { 43struct lp8727_chg_param {
36 /* end of charge level setting */
37 enum lp8727_eoc_level eoc_level; 44 enum lp8727_eoc_level eoc_level;
38 /* charging current */
39 enum lp8727_ichg ichg; 45 enum lp8727_ichg ichg;
40}; 46};
41 47
48/**
49 * struct lp8727_platform_data
50 * @get_batt_present : check battery status - exists or not
51 * @get_batt_level : get battery voltage (mV)
52 * @get_batt_capacity : get battery capacity (%)
53 * @get_batt_temp : get battery temperature
54 * @ac, @usb : charging parameters each charger type
55 */
42struct lp8727_platform_data { 56struct lp8727_platform_data {
43 u8 (*get_batt_present)(void); 57 u8 (*get_batt_present)(void);
44 u16 (*get_batt_level)(void); 58 u16 (*get_batt_level)(void);
diff --git a/include/linux/mfd/abx500.h b/include/linux/mfd/abx500.h
index 5fa697477b71..ee96cd51d8b2 100644
--- a/include/linux/mfd/abx500.h
+++ b/include/linux/mfd/abx500.h
@@ -146,6 +146,279 @@ struct abx500_init_settings {
146 u8 setting; 146 u8 setting;
147}; 147};
148 148
149/* Battery driver related data */
150/*
151 * ADC for the battery thermistor.
152 * When using the ABx500_ADC_THERM_BATCTRL the battery ID resistor is combined
153 * with a NTC resistor to both identify the battery and to measure its
154 * temperature. Different phone manufactures uses different techniques to both
155 * identify the battery and to read its temperature.
156 */
157enum abx500_adc_therm {
158 ABx500_ADC_THERM_BATCTRL,
159 ABx500_ADC_THERM_BATTEMP,
160};
161
162/**
163 * struct abx500_res_to_temp - defines one point in a temp to res curve. To
164 * be used in battery packs that combines the identification resistor with a
165 * NTC resistor.
166 * @temp: battery pack temperature in Celcius
167 * @resist: NTC resistor net total resistance
168 */
169struct abx500_res_to_temp {
170 int temp;
171 int resist;
172};
173
174/**
175 * struct abx500_v_to_cap - Table for translating voltage to capacity
176 * @voltage: Voltage in mV
177 * @capacity: Capacity in percent
178 */
179struct abx500_v_to_cap {
180 int voltage;
181 int capacity;
182};
183
184/* Forward declaration */
185struct abx500_fg;
186
187/**
188 * struct abx500_fg_parameters - Fuel gauge algorithm parameters, in seconds
189 * if not specified
190 * @recovery_sleep_timer: Time between measurements while recovering
191 * @recovery_total_time: Total recovery time
192 * @init_timer: Measurement interval during startup
193 * @init_discard_time: Time we discard voltage measurement at startup
194 * @init_total_time: Total init time during startup
195 * @high_curr_time: Time current has to be high to go to recovery
196 * @accu_charging: FG accumulation time while charging
197 * @accu_high_curr: FG accumulation time in high current mode
198 * @high_curr_threshold: High current threshold, in mA
199 * @lowbat_threshold: Low battery threshold, in mV
200 * @overbat_threshold: Over battery threshold, in mV
201 * @battok_falling_th_sel0 Threshold in mV for battOk signal sel0
202 * Resolution in 50 mV step.
203 * @battok_raising_th_sel1 Threshold in mV for battOk signal sel1
204 * Resolution in 50 mV step.
205 * @user_cap_limit Capacity reported from user must be within this
206 * limit to be considered as sane, in percentage
207 * points.
208 * @maint_thres This is the threshold where we stop reporting
209 * battery full while in maintenance, in per cent
210 */
211struct abx500_fg_parameters {
212 int recovery_sleep_timer;
213 int recovery_total_time;
214 int init_timer;
215 int init_discard_time;
216 int init_total_time;
217 int high_curr_time;
218 int accu_charging;
219 int accu_high_curr;
220 int high_curr_threshold;
221 int lowbat_threshold;
222 int overbat_threshold;
223 int battok_falling_th_sel0;
224 int battok_raising_th_sel1;
225 int user_cap_limit;
226 int maint_thres;
227};
228
229/**
230 * struct abx500_charger_maximization - struct used by the board config.
231 * @use_maxi: Enable maximization for this battery type
232 * @maxi_chg_curr: Maximum charger current allowed
233 * @maxi_wait_cycles: cycles to wait before setting charger current
234 * @charger_curr_step delta between two charger current settings (mA)
235 */
236struct abx500_maxim_parameters {
237 bool ena_maxi;
238 int chg_curr;
239 int wait_cycles;
240 int charger_curr_step;
241};
242
243/**
244 * struct abx500_battery_type - different batteries supported
245 * @name: battery technology
246 * @resis_high: battery upper resistance limit
247 * @resis_low: battery lower resistance limit
248 * @charge_full_design: Maximum battery capacity in mAh
249 * @nominal_voltage: Nominal voltage of the battery in mV
250 * @termination_vol: max voltage upto which battery can be charged
251 * @termination_curr battery charging termination current in mA
252 * @recharge_vol battery voltage limit that will trigger a new
253 * full charging cycle in the case where maintenan-
254 * -ce charging has been disabled
255 * @normal_cur_lvl: charger current in normal state in mA
256 * @normal_vol_lvl: charger voltage in normal state in mV
257 * @maint_a_cur_lvl: charger current in maintenance A state in mA
258 * @maint_a_vol_lvl: charger voltage in maintenance A state in mV
259 * @maint_a_chg_timer_h: charge time in maintenance A state
260 * @maint_b_cur_lvl: charger current in maintenance B state in mA
261 * @maint_b_vol_lvl: charger voltage in maintenance B state in mV
262 * @maint_b_chg_timer_h: charge time in maintenance B state
263 * @low_high_cur_lvl: charger current in temp low/high state in mA
264 * @low_high_vol_lvl: charger voltage in temp low/high state in mV'
265 * @battery_resistance: battery inner resistance in mOhm.
266 * @n_r_t_tbl_elements: number of elements in r_to_t_tbl
267 * @r_to_t_tbl: table containing resistance to temp points
268 * @n_v_cap_tbl_elements: number of elements in v_to_cap_tbl
269 * @v_to_cap_tbl: Voltage to capacity (in %) table
270 * @n_batres_tbl_elements number of elements in the batres_tbl
271 * @batres_tbl battery internal resistance vs temperature table
272 */
273struct abx500_battery_type {
274 int name;
275 int resis_high;
276 int resis_low;
277 int charge_full_design;
278 int nominal_voltage;
279 int termination_vol;
280 int termination_curr;
281 int recharge_vol;
282 int normal_cur_lvl;
283 int normal_vol_lvl;
284 int maint_a_cur_lvl;
285 int maint_a_vol_lvl;
286 int maint_a_chg_timer_h;
287 int maint_b_cur_lvl;
288 int maint_b_vol_lvl;
289 int maint_b_chg_timer_h;
290 int low_high_cur_lvl;
291 int low_high_vol_lvl;
292 int battery_resistance;
293 int n_temp_tbl_elements;
294 struct abx500_res_to_temp *r_to_t_tbl;
295 int n_v_cap_tbl_elements;
296 struct abx500_v_to_cap *v_to_cap_tbl;
297 int n_batres_tbl_elements;
298 struct batres_vs_temp *batres_tbl;
299};
300
301/**
302 * struct abx500_bm_capacity_levels - abx500 capacity level data
303 * @critical: critical capacity level in percent
304 * @low: low capacity level in percent
305 * @normal: normal capacity level in percent
306 * @high: high capacity level in percent
307 * @full: full capacity level in percent
308 */
309struct abx500_bm_capacity_levels {
310 int critical;
311 int low;
312 int normal;
313 int high;
314 int full;
315};
316
317/**
318 * struct abx500_bm_charger_parameters - Charger specific parameters
319 * @usb_volt_max: maximum allowed USB charger voltage in mV
320 * @usb_curr_max: maximum allowed USB charger current in mA
321 * @ac_volt_max: maximum allowed AC charger voltage in mV
322 * @ac_curr_max: maximum allowed AC charger current in mA
323 */
324struct abx500_bm_charger_parameters {
325 int usb_volt_max;
326 int usb_curr_max;
327 int ac_volt_max;
328 int ac_curr_max;
329};
330
331/**
332 * struct abx500_bm_data - abx500 battery management data
333 * @temp_under under this temp, charging is stopped
334 * @temp_low between this temp and temp_under charging is reduced
335 * @temp_high between this temp and temp_over charging is reduced
336 * @temp_over over this temp, charging is stopped
337 * @temp_now present battery temperature
338 * @temp_interval_chg temperature measurement interval in s when charging
339 * @temp_interval_nochg temperature measurement interval in s when not charging
340 * @main_safety_tmr_h safety timer for main charger
341 * @usb_safety_tmr_h safety timer for usb charger
342 * @bkup_bat_v voltage which we charge the backup battery with
343 * @bkup_bat_i current which we charge the backup battery with
344 * @no_maintenance indicates that maintenance charging is disabled
345 * @abx500_adc_therm placement of thermistor, batctrl or battemp adc
346 * @chg_unknown_bat flag to enable charging of unknown batteries
347 * @enable_overshoot flag to enable VBAT overshoot control
348 * @auto_trig flag to enable auto adc trigger
349 * @fg_res resistance of FG resistor in 0.1mOhm
350 * @n_btypes number of elements in array bat_type
351 * @batt_id index of the identified battery in array bat_type
352 * @interval_charging charge alg cycle period time when charging (sec)
353 * @interval_not_charging charge alg cycle period time when not charging (sec)
354 * @temp_hysteresis temperature hysteresis
355 * @gnd_lift_resistance Battery ground to phone ground resistance (mOhm)
356 * @maxi: maximization parameters
357 * @cap_levels capacity in percent for the different capacity levels
358 * @bat_type table of supported battery types
359 * @chg_params charger parameters
360 * @fg_params fuel gauge parameters
361 */
362struct abx500_bm_data {
363 int temp_under;
364 int temp_low;
365 int temp_high;
366 int temp_over;
367 int temp_now;
368 int temp_interval_chg;
369 int temp_interval_nochg;
370 int main_safety_tmr_h;
371 int usb_safety_tmr_h;
372 int bkup_bat_v;
373 int bkup_bat_i;
374 bool no_maintenance;
375 bool chg_unknown_bat;
376 bool enable_overshoot;
377 bool auto_trig;
378 enum abx500_adc_therm adc_therm;
379 int fg_res;
380 int n_btypes;
381 int batt_id;
382 int interval_charging;
383 int interval_not_charging;
384 int temp_hysteresis;
385 int gnd_lift_resistance;
386 const struct abx500_maxim_parameters *maxi;
387 const struct abx500_bm_capacity_levels *cap_levels;
388 const struct abx500_battery_type *bat_type;
389 const struct abx500_bm_charger_parameters *chg_params;
390 const struct abx500_fg_parameters *fg_params;
391};
392
393struct abx500_chargalg_platform_data {
394 char **supplied_to;
395 size_t num_supplicants;
396};
397
398struct abx500_charger_platform_data {
399 char **supplied_to;
400 size_t num_supplicants;
401 bool autopower_cfg;
402};
403
404struct abx500_btemp_platform_data {
405 char **supplied_to;
406 size_t num_supplicants;
407};
408
409struct abx500_fg_platform_data {
410 char **supplied_to;
411 size_t num_supplicants;
412};
413
414struct abx500_bm_plat_data {
415 struct abx500_bm_data *battery;
416 struct abx500_charger_platform_data *charger;
417 struct abx500_btemp_platform_data *btemp;
418 struct abx500_fg_platform_data *fg;
419 struct abx500_chargalg_platform_data *chargalg;
420};
421
149int abx500_set_register_interruptible(struct device *dev, u8 bank, u8 reg, 422int abx500_set_register_interruptible(struct device *dev, u8 bank, u8 reg,
150 u8 value); 423 u8 value);
151int abx500_get_register_interruptible(struct device *dev, u8 bank, u8 reg, 424int abx500_get_register_interruptible(struct device *dev, u8 bank, u8 reg,
diff --git a/include/linux/mfd/abx500/ab8500-bm.h b/include/linux/mfd/abx500/ab8500-bm.h
new file mode 100644
index 000000000000..44310c98ee6e
--- /dev/null
+++ b/include/linux/mfd/abx500/ab8500-bm.h
@@ -0,0 +1,474 @@
1/*
2 * Copyright ST-Ericsson 2012.
3 *
4 * Author: Arun Murthy <arun.murthy@stericsson.com>
5 * Licensed under GPLv2.
6 */
7
8#ifndef _AB8500_BM_H
9#define _AB8500_BM_H
10
11#include <linux/kernel.h>
12#include <linux/mfd/abx500.h>
13
14/*
15 * System control 2 register offsets.
16 * bank = 0x02
17 */
18#define AB8500_MAIN_WDOG_CTRL_REG 0x01
19#define AB8500_LOW_BAT_REG 0x03
20#define AB8500_BATT_OK_REG 0x04
21/*
22 * USB/ULPI register offsets
23 * Bank : 0x5
24 */
25#define AB8500_USB_LINE_STAT_REG 0x80
26
27/*
28 * Charger / status register offfsets
29 * Bank : 0x0B
30 */
31#define AB8500_CH_STATUS1_REG 0x00
32#define AB8500_CH_STATUS2_REG 0x01
33#define AB8500_CH_USBCH_STAT1_REG 0x02
34#define AB8500_CH_USBCH_STAT2_REG 0x03
35#define AB8500_CH_FSM_STAT_REG 0x04
36#define AB8500_CH_STAT_REG 0x05
37
38/*
39 * Charger / control register offfsets
40 * Bank : 0x0B
41 */
42#define AB8500_CH_VOLT_LVL_REG 0x40
43#define AB8500_CH_VOLT_LVL_MAX_REG 0x41 /*Only in Cut2.0*/
44#define AB8500_CH_OPT_CRNTLVL_REG 0x42
45#define AB8500_CH_OPT_CRNTLVL_MAX_REG 0x43 /*Only in Cut2.0*/
46#define AB8500_CH_WD_TIMER_REG 0x50
47#define AB8500_CHARG_WD_CTRL 0x51
48#define AB8500_BTEMP_HIGH_TH 0x52
49#define AB8500_LED_INDICATOR_PWM_CTRL 0x53
50#define AB8500_LED_INDICATOR_PWM_DUTY 0x54
51#define AB8500_BATT_OVV 0x55
52#define AB8500_CHARGER_CTRL 0x56
53#define AB8500_BAT_CTRL_CURRENT_SOURCE 0x60 /*Only in Cut2.0*/
54
55/*
56 * Charger / main control register offsets
57 * Bank : 0x0B
58 */
59#define AB8500_MCH_CTRL1 0x80
60#define AB8500_MCH_CTRL2 0x81
61#define AB8500_MCH_IPT_CURLVL_REG 0x82
62#define AB8500_CH_WD_REG 0x83
63
64/*
65 * Charger / USB control register offsets
66 * Bank : 0x0B
67 */
68#define AB8500_USBCH_CTRL1_REG 0xC0
69#define AB8500_USBCH_CTRL2_REG 0xC1
70#define AB8500_USBCH_IPT_CRNTLVL_REG 0xC2
71
72/*
73 * Gas Gauge register offsets
74 * Bank : 0x0C
75 */
76#define AB8500_GASG_CC_CTRL_REG 0x00
77#define AB8500_GASG_CC_ACCU1_REG 0x01
78#define AB8500_GASG_CC_ACCU2_REG 0x02
79#define AB8500_GASG_CC_ACCU3_REG 0x03
80#define AB8500_GASG_CC_ACCU4_REG 0x04
81#define AB8500_GASG_CC_SMPL_CNTRL_REG 0x05
82#define AB8500_GASG_CC_SMPL_CNTRH_REG 0x06
83#define AB8500_GASG_CC_SMPL_CNVL_REG 0x07
84#define AB8500_GASG_CC_SMPL_CNVH_REG 0x08
85#define AB8500_GASG_CC_CNTR_AVGOFF_REG 0x09
86#define AB8500_GASG_CC_OFFSET_REG 0x0A
87#define AB8500_GASG_CC_NCOV_ACCU 0x10
88#define AB8500_GASG_CC_NCOV_ACCU_CTRL 0x11
89#define AB8500_GASG_CC_NCOV_ACCU_LOW 0x12
90#define AB8500_GASG_CC_NCOV_ACCU_MED 0x13
91#define AB8500_GASG_CC_NCOV_ACCU_HIGH 0x14
92
93/*
94 * Interrupt register offsets
95 * Bank : 0x0E
96 */
97#define AB8500_IT_SOURCE2_REG 0x01
98#define AB8500_IT_SOURCE21_REG 0x14
99
100/*
101 * RTC register offsets
102 * Bank: 0x0F
103 */
104#define AB8500_RTC_BACKUP_CHG_REG 0x0C
105#define AB8500_RTC_CC_CONF_REG 0x01
106#define AB8500_RTC_CTRL_REG 0x0B
107
108/*
109 * OTP register offsets
110 * Bank : 0x15
111 */
112#define AB8500_OTP_CONF_15 0x0E
113
114/* GPADC constants from AB8500 spec, UM0836 */
115#define ADC_RESOLUTION 1024
116#define ADC_CH_MAIN_MIN 0
117#define ADC_CH_MAIN_MAX 20030
118#define ADC_CH_VBUS_MIN 0
119#define ADC_CH_VBUS_MAX 20030
120#define ADC_CH_VBAT_MIN 2300
121#define ADC_CH_VBAT_MAX 4800
122#define ADC_CH_BKBAT_MIN 0
123#define ADC_CH_BKBAT_MAX 3200
124
125/* Main charge i/p current */
126#define MAIN_CH_IP_CUR_0P9A 0x80
127#define MAIN_CH_IP_CUR_1P0A 0x90
128#define MAIN_CH_IP_CUR_1P1A 0xA0
129#define MAIN_CH_IP_CUR_1P2A 0xB0
130#define MAIN_CH_IP_CUR_1P3A 0xC0
131#define MAIN_CH_IP_CUR_1P4A 0xD0
132#define MAIN_CH_IP_CUR_1P5A 0xE0
133
134/* ChVoltLevel */
135#define CH_VOL_LVL_3P5 0x00
136#define CH_VOL_LVL_4P0 0x14
137#define CH_VOL_LVL_4P05 0x16
138#define CH_VOL_LVL_4P1 0x1B
139#define CH_VOL_LVL_4P15 0x20
140#define CH_VOL_LVL_4P2 0x25
141#define CH_VOL_LVL_4P6 0x4D
142
143/* ChOutputCurrentLevel */
144#define CH_OP_CUR_LVL_0P1 0x00
145#define CH_OP_CUR_LVL_0P2 0x01
146#define CH_OP_CUR_LVL_0P3 0x02
147#define CH_OP_CUR_LVL_0P4 0x03
148#define CH_OP_CUR_LVL_0P5 0x04
149#define CH_OP_CUR_LVL_0P6 0x05
150#define CH_OP_CUR_LVL_0P7 0x06
151#define CH_OP_CUR_LVL_0P8 0x07
152#define CH_OP_CUR_LVL_0P9 0x08
153#define CH_OP_CUR_LVL_1P4 0x0D
154#define CH_OP_CUR_LVL_1P5 0x0E
155#define CH_OP_CUR_LVL_1P6 0x0F
156
157/* BTEMP High thermal limits */
158#define BTEMP_HIGH_TH_57_0 0x00
159#define BTEMP_HIGH_TH_52 0x01
160#define BTEMP_HIGH_TH_57_1 0x02
161#define BTEMP_HIGH_TH_62 0x03
162
163/* current is mA */
164#define USB_0P1A 100
165#define USB_0P2A 200
166#define USB_0P3A 300
167#define USB_0P4A 400
168#define USB_0P5A 500
169
170#define LOW_BAT_3P1V 0x20
171#define LOW_BAT_2P3V 0x00
172#define LOW_BAT_RESET 0x01
173#define LOW_BAT_ENABLE 0x01
174
175/* Backup battery constants */
176#define BUP_ICH_SEL_50UA 0x00
177#define BUP_ICH_SEL_150UA 0x04
178#define BUP_ICH_SEL_300UA 0x08
179#define BUP_ICH_SEL_700UA 0x0C
180
181#define BUP_VCH_SEL_2P5V 0x00
182#define BUP_VCH_SEL_2P6V 0x01
183#define BUP_VCH_SEL_2P8V 0x02
184#define BUP_VCH_SEL_3P1V 0x03
185
186/* Battery OVV constants */
187#define BATT_OVV_ENA 0x02
188#define BATT_OVV_TH_3P7 0x00
189#define BATT_OVV_TH_4P75 0x01
190
191/* A value to indicate over voltage */
192#define BATT_OVV_VALUE 4750
193
194/* VBUS OVV constants */
195#define VBUS_OVV_SELECT_MASK 0x78
196#define VBUS_OVV_SELECT_5P6V 0x00
197#define VBUS_OVV_SELECT_5P7V 0x08
198#define VBUS_OVV_SELECT_5P8V 0x10
199#define VBUS_OVV_SELECT_5P9V 0x18
200#define VBUS_OVV_SELECT_6P0V 0x20
201#define VBUS_OVV_SELECT_6P1V 0x28
202#define VBUS_OVV_SELECT_6P2V 0x30
203#define VBUS_OVV_SELECT_6P3V 0x38
204
205#define VBUS_AUTO_IN_CURR_LIM_ENA 0x04
206
207/* Fuel Gauge constants */
208#define RESET_ACCU 0x02
209#define READ_REQ 0x01
210#define CC_DEEP_SLEEP_ENA 0x02
211#define CC_PWR_UP_ENA 0x01
212#define CC_SAMPLES_40 0x28
213#define RD_NCONV_ACCU_REQ 0x01
214#define CC_CALIB 0x08
215#define CC_INTAVGOFFSET_ENA 0x10
216#define CC_MUXOFFSET 0x80
217#define CC_INT_CAL_N_AVG_MASK 0x60
218#define CC_INT_CAL_SAMPLES_16 0x40
219#define CC_INT_CAL_SAMPLES_8 0x20
220#define CC_INT_CAL_SAMPLES_4 0x00
221
222/* RTC constants */
223#define RTC_BUP_CH_ENA 0x10
224
225/* BatCtrl Current Source Constants */
226#define BAT_CTRL_7U_ENA 0x01
227#define BAT_CTRL_20U_ENA 0x02
228#define BAT_CTRL_CMP_ENA 0x04
229#define FORCE_BAT_CTRL_CMP_HIGH 0x08
230#define BAT_CTRL_PULL_UP_ENA 0x10
231
232/* Battery type */
233#define BATTERY_UNKNOWN 00
234
235/**
236 * struct res_to_temp - defines one point in a temp to res curve. To
237 * be used in battery packs that combines the identification resistor with a
238 * NTC resistor.
239 * @temp: battery pack temperature in Celcius
240 * @resist: NTC resistor net total resistance
241 */
242struct res_to_temp {
243 int temp;
244 int resist;
245};
246
247/**
248 * struct batres_vs_temp - defines one point in a temp vs battery internal
249 * resistance curve.
250 * @temp: battery pack temperature in Celcius
251 * @resist: battery internal reistance in mOhm
252 */
253struct batres_vs_temp {
254 int temp;
255 int resist;
256};
257
258/* Forward declaration */
259struct ab8500_fg;
260
261/**
262 * struct ab8500_fg_parameters - Fuel gauge algorithm parameters, in seconds
263 * if not specified
264 * @recovery_sleep_timer: Time between measurements while recovering
265 * @recovery_total_time: Total recovery time
266 * @init_timer: Measurement interval during startup
267 * @init_discard_time: Time we discard voltage measurement at startup
268 * @init_total_time: Total init time during startup
269 * @high_curr_time: Time current has to be high to go to recovery
270 * @accu_charging: FG accumulation time while charging
271 * @accu_high_curr: FG accumulation time in high current mode
272 * @high_curr_threshold: High current threshold, in mA
273 * @lowbat_threshold: Low battery threshold, in mV
274 * @battok_falling_th_sel0 Threshold in mV for battOk signal sel0
275 * Resolution in 50 mV step.
276 * @battok_raising_th_sel1 Threshold in mV for battOk signal sel1
277 * Resolution in 50 mV step.
278 * @user_cap_limit Capacity reported from user must be within this
279 * limit to be considered as sane, in percentage
280 * points.
281 * @maint_thres This is the threshold where we stop reporting
282 * battery full while in maintenance, in per cent
283 */
284struct ab8500_fg_parameters {
285 int recovery_sleep_timer;
286 int recovery_total_time;
287 int init_timer;
288 int init_discard_time;
289 int init_total_time;
290 int high_curr_time;
291 int accu_charging;
292 int accu_high_curr;
293 int high_curr_threshold;
294 int lowbat_threshold;
295 int battok_falling_th_sel0;
296 int battok_raising_th_sel1;
297 int user_cap_limit;
298 int maint_thres;
299};
300
301/**
302 * struct ab8500_charger_maximization - struct used by the board config.
303 * @use_maxi: Enable maximization for this battery type
304 * @maxi_chg_curr: Maximum charger current allowed
305 * @maxi_wait_cycles: cycles to wait before setting charger current
306 * @charger_curr_step delta between two charger current settings (mA)
307 */
308struct ab8500_maxim_parameters {
309 bool ena_maxi;
310 int chg_curr;
311 int wait_cycles;
312 int charger_curr_step;
313};
314
315/**
316 * struct ab8500_bm_capacity_levels - ab8500 capacity level data
317 * @critical: critical capacity level in percent
318 * @low: low capacity level in percent
319 * @normal: normal capacity level in percent
320 * @high: high capacity level in percent
321 * @full: full capacity level in percent
322 */
323struct ab8500_bm_capacity_levels {
324 int critical;
325 int low;
326 int normal;
327 int high;
328 int full;
329};
330
331/**
332 * struct ab8500_bm_charger_parameters - Charger specific parameters
333 * @usb_volt_max: maximum allowed USB charger voltage in mV
334 * @usb_curr_max: maximum allowed USB charger current in mA
335 * @ac_volt_max: maximum allowed AC charger voltage in mV
336 * @ac_curr_max: maximum allowed AC charger current in mA
337 */
338struct ab8500_bm_charger_parameters {
339 int usb_volt_max;
340 int usb_curr_max;
341 int ac_volt_max;
342 int ac_curr_max;
343};
344
345/**
346 * struct ab8500_bm_data - ab8500 battery management data
347 * @temp_under under this temp, charging is stopped
348 * @temp_low between this temp and temp_under charging is reduced
349 * @temp_high between this temp and temp_over charging is reduced
350 * @temp_over over this temp, charging is stopped
351 * @temp_interval_chg temperature measurement interval in s when charging
352 * @temp_interval_nochg temperature measurement interval in s when not charging
353 * @main_safety_tmr_h safety timer for main charger
354 * @usb_safety_tmr_h safety timer for usb charger
355 * @bkup_bat_v voltage which we charge the backup battery with
356 * @bkup_bat_i current which we charge the backup battery with
357 * @no_maintenance indicates that maintenance charging is disabled
358 * @adc_therm placement of thermistor, batctrl or battemp adc
359 * @chg_unknown_bat flag to enable charging of unknown batteries
360 * @enable_overshoot flag to enable VBAT overshoot control
361 * @fg_res resistance of FG resistor in 0.1mOhm
362 * @n_btypes number of elements in array bat_type
363 * @batt_id index of the identified battery in array bat_type
364 * @interval_charging charge alg cycle period time when charging (sec)
365 * @interval_not_charging charge alg cycle period time when not charging (sec)
366 * @temp_hysteresis temperature hysteresis
367 * @gnd_lift_resistance Battery ground to phone ground resistance (mOhm)
368 * @maxi: maximization parameters
369 * @cap_levels capacity in percent for the different capacity levels
370 * @bat_type table of supported battery types
371 * @chg_params charger parameters
372 * @fg_params fuel gauge parameters
373 */
374struct ab8500_bm_data {
375 int temp_under;
376 int temp_low;
377 int temp_high;
378 int temp_over;
379 int temp_interval_chg;
380 int temp_interval_nochg;
381 int main_safety_tmr_h;
382 int usb_safety_tmr_h;
383 int bkup_bat_v;
384 int bkup_bat_i;
385 bool no_maintenance;
386 bool chg_unknown_bat;
387 bool enable_overshoot;
388 enum abx500_adc_therm adc_therm;
389 int fg_res;
390 int n_btypes;
391 int batt_id;
392 int interval_charging;
393 int interval_not_charging;
394 int temp_hysteresis;
395 int gnd_lift_resistance;
396 const struct ab8500_maxim_parameters *maxi;
397 const struct ab8500_bm_capacity_levels *cap_levels;
398 const struct ab8500_bm_charger_parameters *chg_params;
399 const struct ab8500_fg_parameters *fg_params;
400};
401
402struct ab8500_charger_platform_data {
403 char **supplied_to;
404 size_t num_supplicants;
405 bool autopower_cfg;
406};
407
408struct ab8500_btemp_platform_data {
409 char **supplied_to;
410 size_t num_supplicants;
411};
412
413struct ab8500_fg_platform_data {
414 char **supplied_to;
415 size_t num_supplicants;
416};
417
418struct ab8500_chargalg_platform_data {
419 char **supplied_to;
420 size_t num_supplicants;
421};
422struct ab8500_btemp;
423struct ab8500_gpadc;
424struct ab8500_fg;
425#ifdef CONFIG_AB8500_BM
426void ab8500_fg_reinit(void);
427void ab8500_charger_usb_state_changed(u8 bm_usb_state, u16 mA);
428struct ab8500_btemp *ab8500_btemp_get(void);
429int ab8500_btemp_get_batctrl_temp(struct ab8500_btemp *btemp);
430struct ab8500_fg *ab8500_fg_get(void);
431int ab8500_fg_inst_curr_blocking(struct ab8500_fg *dev);
432int ab8500_fg_inst_curr_start(struct ab8500_fg *di);
433int ab8500_fg_inst_curr_finalize(struct ab8500_fg *di, int *res);
434int ab8500_fg_inst_curr_done(struct ab8500_fg *di);
435
436#else
437int ab8500_fg_inst_curr_done(struct ab8500_fg *di)
438{
439}
440static void ab8500_fg_reinit(void)
441{
442}
443static void ab8500_charger_usb_state_changed(u8 bm_usb_state, u16 mA)
444{
445}
446static struct ab8500_btemp *ab8500_btemp_get(void)
447{
448 return NULL;
449}
450static int ab8500_btemp_get_batctrl_temp(struct ab8500_btemp *btemp)
451{
452 return 0;
453}
454struct ab8500_fg *ab8500_fg_get(void)
455{
456 return NULL;
457}
458static int ab8500_fg_inst_curr_blocking(struct ab8500_fg *dev)
459{
460 return -ENODEV;
461}
462
463static inline int ab8500_fg_inst_curr_start(struct ab8500_fg *di)
464{
465 return -ENODEV;
466}
467
468static inline int ab8500_fg_inst_curr_finalize(struct ab8500_fg *di, int *res)
469{
470 return -ENODEV;
471}
472
473#endif
474#endif /* _AB8500_BM_H */
diff --git a/include/linux/mfd/abx500/ux500_chargalg.h b/include/linux/mfd/abx500/ux500_chargalg.h
new file mode 100644
index 000000000000..9b07725750c9
--- /dev/null
+++ b/include/linux/mfd/abx500/ux500_chargalg.h
@@ -0,0 +1,38 @@
1/*
2 * Copyright (C) ST-Ericsson SA 2012
3 * Author: Johan Gardsmark <johan.gardsmark@stericsson.com> for ST-Ericsson.
4 * License terms: GNU General Public License (GPL), version 2
5 */
6
7#ifndef _UX500_CHARGALG_H
8#define _UX500_CHARGALG_H
9
10#include <linux/power_supply.h>
11
12#define psy_to_ux500_charger(x) container_of((x), \
13 struct ux500_charger, psy)
14
15/* Forward declaration */
16struct ux500_charger;
17
18struct ux500_charger_ops {
19 int (*enable) (struct ux500_charger *, int, int, int);
20 int (*kick_wd) (struct ux500_charger *);
21 int (*update_curr) (struct ux500_charger *, int);
22};
23
24/**
25 * struct ux500_charger - power supply ux500 charger sub class
26 * @psy power supply base class
27 * @ops ux500 charger operations
28 * @max_out_volt maximum output charger voltage in mV
29 * @max_out_curr maximum output charger current in mA
30 */
31struct ux500_charger {
32 struct power_supply psy;
33 struct ux500_charger_ops ops;
34 int max_out_volt;
35 int max_out_curr;
36};
37
38#endif
diff --git a/include/linux/mtd/bbm.h b/include/linux/mtd/bbm.h
index c4eec228eef9..650ef352f045 100644
--- a/include/linux/mtd/bbm.h
+++ b/include/linux/mtd/bbm.h
@@ -112,6 +112,11 @@ struct nand_bbt_descr {
112#define NAND_BBT_USE_FLASH 0x00020000 112#define NAND_BBT_USE_FLASH 0x00020000
113/* Do not store flash based bad block table in OOB area; store it in-band */ 113/* Do not store flash based bad block table in OOB area; store it in-band */
114#define NAND_BBT_NO_OOB 0x00040000 114#define NAND_BBT_NO_OOB 0x00040000
115/*
116 * Do not write new bad block markers to OOB; useful, e.g., when ECC covers
117 * entire spare area. Must be used with NAND_BBT_USE_FLASH.
118 */
119#define NAND_BBT_NO_OOB_BBM 0x00080000
115 120
116/* 121/*
117 * Flag set by nand_create_default_bbt_descr(), marking that the nand_bbt_descr 122 * Flag set by nand_create_default_bbt_descr(), marking that the nand_bbt_descr
diff --git a/include/linux/mtd/blktrans.h b/include/linux/mtd/blktrans.h
index 1bbd9f289245..ed270bd2e4df 100644
--- a/include/linux/mtd/blktrans.h
+++ b/include/linux/mtd/blktrans.h
@@ -47,6 +47,7 @@ struct mtd_blktrans_dev {
47 struct request_queue *rq; 47 struct request_queue *rq;
48 spinlock_t queue_lock; 48 spinlock_t queue_lock;
49 void *priv; 49 void *priv;
50 fmode_t file_mode;
50}; 51};
51 52
52struct mtd_blktrans_ops { 53struct mtd_blktrans_ops {
diff --git a/include/linux/mtd/fsmc.h b/include/linux/mtd/fsmc.h
index 6987995ad3cf..b20029221fb1 100644
--- a/include/linux/mtd/fsmc.h
+++ b/include/linux/mtd/fsmc.h
@@ -26,95 +26,83 @@
26#define FSMC_NAND_BW8 1 26#define FSMC_NAND_BW8 1
27#define FSMC_NAND_BW16 2 27#define FSMC_NAND_BW16 2
28 28
29/*
30 * The placement of the Command Latch Enable (CLE) and
31 * Address Latch Enable (ALE) is twisted around in the
32 * SPEAR310 implementation.
33 */
34#if defined(CONFIG_MACH_SPEAR310)
35#define PLAT_NAND_CLE (1 << 17)
36#define PLAT_NAND_ALE (1 << 16)
37#else
38#define PLAT_NAND_CLE (1 << 16)
39#define PLAT_NAND_ALE (1 << 17)
40#endif
41
42#define FSMC_MAX_NOR_BANKS 4 29#define FSMC_MAX_NOR_BANKS 4
43#define FSMC_MAX_NAND_BANKS 4 30#define FSMC_MAX_NAND_BANKS 4
44 31
45#define FSMC_FLASH_WIDTH8 1 32#define FSMC_FLASH_WIDTH8 1
46#define FSMC_FLASH_WIDTH16 2 33#define FSMC_FLASH_WIDTH16 2
47 34
48struct fsmc_nor_bank_regs { 35/* fsmc controller registers for NOR flash */
49 uint32_t ctrl; 36#define CTRL 0x0
50 uint32_t ctrl_tim; 37 /* ctrl register definitions */
51}; 38 #define BANK_ENABLE (1 << 0)
52 39 #define MUXED (1 << 1)
53/* ctrl register definitions */ 40 #define NOR_DEV (2 << 2)
54#define BANK_ENABLE (1 << 0) 41 #define WIDTH_8 (0 << 4)
55#define MUXED (1 << 1) 42 #define WIDTH_16 (1 << 4)
56#define NOR_DEV (2 << 2) 43 #define RSTPWRDWN (1 << 6)
57#define WIDTH_8 (0 << 4) 44 #define WPROT (1 << 7)
58#define WIDTH_16 (1 << 4) 45 #define WRT_ENABLE (1 << 12)
59#define RSTPWRDWN (1 << 6) 46 #define WAIT_ENB (1 << 13)
60#define WPROT (1 << 7) 47
61#define WRT_ENABLE (1 << 12) 48#define CTRL_TIM 0x4
62#define WAIT_ENB (1 << 13) 49 /* ctrl_tim register definitions */
63 50
64/* ctrl_tim register definitions */ 51#define FSMC_NOR_BANK_SZ 0x8
65
66struct fsmc_nand_bank_regs {
67 uint32_t pc;
68 uint32_t sts;
69 uint32_t comm;
70 uint32_t attrib;
71 uint32_t ioata;
72 uint32_t ecc1;
73 uint32_t ecc2;
74 uint32_t ecc3;
75};
76
77#define FSMC_NOR_REG_SIZE 0x40 52#define FSMC_NOR_REG_SIZE 0x40
78 53
79struct fsmc_regs { 54#define FSMC_NOR_REG(base, bank, reg) (base + \
80 struct fsmc_nor_bank_regs nor_bank_regs[FSMC_MAX_NOR_BANKS]; 55 FSMC_NOR_BANK_SZ * (bank) + \
81 uint8_t reserved_1[0x40 - 0x20]; 56 reg)
82 struct fsmc_nand_bank_regs bank_regs[FSMC_MAX_NAND_BANKS]; 57
83 uint8_t reserved_2[0xfe0 - 0xc0]; 58/* fsmc controller registers for NAND flash */
84 uint32_t peripid0; /* 0xfe0 */ 59#define PC 0x00
85 uint32_t peripid1; /* 0xfe4 */ 60 /* pc register definitions */
86 uint32_t peripid2; /* 0xfe8 */ 61 #define FSMC_RESET (1 << 0)
87 uint32_t peripid3; /* 0xfec */ 62 #define FSMC_WAITON (1 << 1)
88 uint32_t pcellid0; /* 0xff0 */ 63 #define FSMC_ENABLE (1 << 2)
89 uint32_t pcellid1; /* 0xff4 */ 64 #define FSMC_DEVTYPE_NAND (1 << 3)
90 uint32_t pcellid2; /* 0xff8 */ 65 #define FSMC_DEVWID_8 (0 << 4)
91 uint32_t pcellid3; /* 0xffc */ 66 #define FSMC_DEVWID_16 (1 << 4)
92}; 67 #define FSMC_ECCEN (1 << 6)
68 #define FSMC_ECCPLEN_512 (0 << 7)
69 #define FSMC_ECCPLEN_256 (1 << 7)
70 #define FSMC_TCLR_1 (1)
71 #define FSMC_TCLR_SHIFT (9)
72 #define FSMC_TCLR_MASK (0xF)
73 #define FSMC_TAR_1 (1)
74 #define FSMC_TAR_SHIFT (13)
75 #define FSMC_TAR_MASK (0xF)
76#define STS 0x04
77 /* sts register definitions */
78 #define FSMC_CODE_RDY (1 << 15)
79#define COMM 0x08
80 /* comm register definitions */
81 #define FSMC_TSET_0 0
82 #define FSMC_TSET_SHIFT 0
83 #define FSMC_TSET_MASK 0xFF
84 #define FSMC_TWAIT_6 6
85 #define FSMC_TWAIT_SHIFT 8
86 #define FSMC_TWAIT_MASK 0xFF
87 #define FSMC_THOLD_4 4
88 #define FSMC_THOLD_SHIFT 16
89 #define FSMC_THOLD_MASK 0xFF
90 #define FSMC_THIZ_1 1
91 #define FSMC_THIZ_SHIFT 24
92 #define FSMC_THIZ_MASK 0xFF
93#define ATTRIB 0x0C
94#define IOATA 0x10
95#define ECC1 0x14
96#define ECC2 0x18
97#define ECC3 0x1C
98#define FSMC_NAND_BANK_SZ 0x20
99
100#define FSMC_NAND_REG(base, bank, reg) (base + FSMC_NOR_REG_SIZE + \
101 (FSMC_NAND_BANK_SZ * (bank)) + \
102 reg)
93 103
94#define FSMC_BUSY_WAIT_TIMEOUT (1 * HZ) 104#define FSMC_BUSY_WAIT_TIMEOUT (1 * HZ)
95 105
96/* pc register definitions */
97#define FSMC_RESET (1 << 0)
98#define FSMC_WAITON (1 << 1)
99#define FSMC_ENABLE (1 << 2)
100#define FSMC_DEVTYPE_NAND (1 << 3)
101#define FSMC_DEVWID_8 (0 << 4)
102#define FSMC_DEVWID_16 (1 << 4)
103#define FSMC_ECCEN (1 << 6)
104#define FSMC_ECCPLEN_512 (0 << 7)
105#define FSMC_ECCPLEN_256 (1 << 7)
106#define FSMC_TCLR_1 (1 << 9)
107#define FSMC_TAR_1 (1 << 13)
108
109/* sts register definitions */
110#define FSMC_CODE_RDY (1 << 15)
111
112/* comm register definitions */
113#define FSMC_TSET_0 (0 << 0)
114#define FSMC_TWAIT_6 (6 << 8)
115#define FSMC_THOLD_4 (4 << 16)
116#define FSMC_THIZ_1 (1 << 24)
117
118/* 106/*
119 * There are 13 bytes of ecc for every 512 byte block in FSMC version 8 107 * There are 13 bytes of ecc for every 512 byte block in FSMC version 8
120 * and it has to be read consecutively and immediately after the 512 108 * and it has to be read consecutively and immediately after the 512
@@ -133,6 +121,20 @@ struct fsmc_eccplace {
133 struct fsmc_nand_eccplace eccplace[MAX_ECCPLACE_ENTRIES]; 121 struct fsmc_nand_eccplace eccplace[MAX_ECCPLACE_ENTRIES];
134}; 122};
135 123
124struct fsmc_nand_timings {
125 uint8_t tclr;
126 uint8_t tar;
127 uint8_t thiz;
128 uint8_t thold;
129 uint8_t twait;
130 uint8_t tset;
131};
132
133enum access_mode {
134 USE_DMA_ACCESS = 1,
135 USE_WORD_ACCESS,
136};
137
136/** 138/**
137 * fsmc_nand_platform_data - platform specific NAND controller config 139 * fsmc_nand_platform_data - platform specific NAND controller config
138 * @partitions: partition table for the platform, use a default fallback 140 * @partitions: partition table for the platform, use a default fallback
@@ -146,12 +148,23 @@ struct fsmc_eccplace {
146 * this may be set to NULL 148 * this may be set to NULL
147 */ 149 */
148struct fsmc_nand_platform_data { 150struct fsmc_nand_platform_data {
151 struct fsmc_nand_timings *nand_timings;
149 struct mtd_partition *partitions; 152 struct mtd_partition *partitions;
150 unsigned int nr_partitions; 153 unsigned int nr_partitions;
151 unsigned int options; 154 unsigned int options;
152 unsigned int width; 155 unsigned int width;
153 unsigned int bank; 156 unsigned int bank;
157
158 /* CLE, ALE offsets */
159 unsigned int cle_off;
160 unsigned int ale_off;
161 enum access_mode mode;
162
154 void (*select_bank)(uint32_t bank, uint32_t busw); 163 void (*select_bank)(uint32_t bank, uint32_t busw);
164
165 /* priv structures for dma accesses */
166 void *read_dma_priv;
167 void *write_dma_priv;
155}; 168};
156 169
157extern int __init fsmc_nor_init(struct platform_device *pdev, 170extern int __init fsmc_nor_init(struct platform_device *pdev,
diff --git a/include/linux/mtd/mtd.h b/include/linux/mtd/mtd.h
index d43dc25af82e..cf5ea8cdcf8e 100644
--- a/include/linux/mtd/mtd.h
+++ b/include/linux/mtd/mtd.h
@@ -164,6 +164,9 @@ struct mtd_info {
164 /* ECC layout structure pointer - read only! */ 164 /* ECC layout structure pointer - read only! */
165 struct nand_ecclayout *ecclayout; 165 struct nand_ecclayout *ecclayout;
166 166
167 /* max number of correctible bit errors per writesize */
168 unsigned int ecc_strength;
169
167 /* Data for variable erase regions. If numeraseregions is zero, 170 /* Data for variable erase regions. If numeraseregions is zero,
168 * it means that the whole device has erasesize as given above. 171 * it means that the whole device has erasesize as given above.
169 */ 172 */
@@ -174,52 +177,52 @@ struct mtd_info {
174 * Do not call via these pointers, use corresponding mtd_*() 177 * Do not call via these pointers, use corresponding mtd_*()
175 * wrappers instead. 178 * wrappers instead.
176 */ 179 */
177 int (*erase) (struct mtd_info *mtd, struct erase_info *instr); 180 int (*_erase) (struct mtd_info *mtd, struct erase_info *instr);
178 int (*point) (struct mtd_info *mtd, loff_t from, size_t len, 181 int (*_point) (struct mtd_info *mtd, loff_t from, size_t len,
179 size_t *retlen, void **virt, resource_size_t *phys); 182 size_t *retlen, void **virt, resource_size_t *phys);
180 void (*unpoint) (struct mtd_info *mtd, loff_t from, size_t len); 183 int (*_unpoint) (struct mtd_info *mtd, loff_t from, size_t len);
181 unsigned long (*get_unmapped_area) (struct mtd_info *mtd, 184 unsigned long (*_get_unmapped_area) (struct mtd_info *mtd,
182 unsigned long len, 185 unsigned long len,
183 unsigned long offset, 186 unsigned long offset,
184 unsigned long flags); 187 unsigned long flags);
185 int (*read) (struct mtd_info *mtd, loff_t from, size_t len, 188 int (*_read) (struct mtd_info *mtd, loff_t from, size_t len,
186 size_t *retlen, u_char *buf); 189 size_t *retlen, u_char *buf);
187 int (*write) (struct mtd_info *mtd, loff_t to, size_t len, 190 int (*_write) (struct mtd_info *mtd, loff_t to, size_t len,
188 size_t *retlen, const u_char *buf); 191 size_t *retlen, const u_char *buf);
189 int (*panic_write) (struct mtd_info *mtd, loff_t to, size_t len, 192 int (*_panic_write) (struct mtd_info *mtd, loff_t to, size_t len,
190 size_t *retlen, const u_char *buf); 193 size_t *retlen, const u_char *buf);
191 int (*read_oob) (struct mtd_info *mtd, loff_t from, 194 int (*_read_oob) (struct mtd_info *mtd, loff_t from,
192 struct mtd_oob_ops *ops);
193 int (*write_oob) (struct mtd_info *mtd, loff_t to,
194 struct mtd_oob_ops *ops); 195 struct mtd_oob_ops *ops);
195 int (*get_fact_prot_info) (struct mtd_info *mtd, struct otp_info *buf, 196 int (*_write_oob) (struct mtd_info *mtd, loff_t to,
196 size_t len); 197 struct mtd_oob_ops *ops);
197 int (*read_fact_prot_reg) (struct mtd_info *mtd, loff_t from, 198 int (*_get_fact_prot_info) (struct mtd_info *mtd, struct otp_info *buf,
198 size_t len, size_t *retlen, u_char *buf); 199 size_t len);
199 int (*get_user_prot_info) (struct mtd_info *mtd, struct otp_info *buf, 200 int (*_read_fact_prot_reg) (struct mtd_info *mtd, loff_t from,
200 size_t len); 201 size_t len, size_t *retlen, u_char *buf);
201 int (*read_user_prot_reg) (struct mtd_info *mtd, loff_t from, 202 int (*_get_user_prot_info) (struct mtd_info *mtd, struct otp_info *buf,
202 size_t len, size_t *retlen, u_char *buf); 203 size_t len);
203 int (*write_user_prot_reg) (struct mtd_info *mtd, loff_t to, size_t len, 204 int (*_read_user_prot_reg) (struct mtd_info *mtd, loff_t from,
204 size_t *retlen, u_char *buf); 205 size_t len, size_t *retlen, u_char *buf);
205 int (*lock_user_prot_reg) (struct mtd_info *mtd, loff_t from, 206 int (*_write_user_prot_reg) (struct mtd_info *mtd, loff_t to,
206 size_t len); 207 size_t len, size_t *retlen, u_char *buf);
207 int (*writev) (struct mtd_info *mtd, const struct kvec *vecs, 208 int (*_lock_user_prot_reg) (struct mtd_info *mtd, loff_t from,
209 size_t len);
210 int (*_writev) (struct mtd_info *mtd, const struct kvec *vecs,
208 unsigned long count, loff_t to, size_t *retlen); 211 unsigned long count, loff_t to, size_t *retlen);
209 void (*sync) (struct mtd_info *mtd); 212 void (*_sync) (struct mtd_info *mtd);
210 int (*lock) (struct mtd_info *mtd, loff_t ofs, uint64_t len); 213 int (*_lock) (struct mtd_info *mtd, loff_t ofs, uint64_t len);
211 int (*unlock) (struct mtd_info *mtd, loff_t ofs, uint64_t len); 214 int (*_unlock) (struct mtd_info *mtd, loff_t ofs, uint64_t len);
212 int (*is_locked) (struct mtd_info *mtd, loff_t ofs, uint64_t len); 215 int (*_is_locked) (struct mtd_info *mtd, loff_t ofs, uint64_t len);
213 int (*block_isbad) (struct mtd_info *mtd, loff_t ofs); 216 int (*_block_isbad) (struct mtd_info *mtd, loff_t ofs);
214 int (*block_markbad) (struct mtd_info *mtd, loff_t ofs); 217 int (*_block_markbad) (struct mtd_info *mtd, loff_t ofs);
215 int (*suspend) (struct mtd_info *mtd); 218 int (*_suspend) (struct mtd_info *mtd);
216 void (*resume) (struct mtd_info *mtd); 219 void (*_resume) (struct mtd_info *mtd);
217 /* 220 /*
218 * If the driver is something smart, like UBI, it may need to maintain 221 * If the driver is something smart, like UBI, it may need to maintain
219 * its own reference counting. The below functions are only for driver. 222 * its own reference counting. The below functions are only for driver.
220 */ 223 */
221 int (*get_device) (struct mtd_info *mtd); 224 int (*_get_device) (struct mtd_info *mtd);
222 void (*put_device) (struct mtd_info *mtd); 225 void (*_put_device) (struct mtd_info *mtd);
223 226
224 /* Backing device capabilities for this device 227 /* Backing device capabilities for this device
225 * - provides mmap capabilities 228 * - provides mmap capabilities
@@ -240,214 +243,75 @@ struct mtd_info {
240 int usecount; 243 int usecount;
241}; 244};
242 245
243/* 246int mtd_erase(struct mtd_info *mtd, struct erase_info *instr);
244 * Erase is an asynchronous operation. Device drivers are supposed 247int mtd_point(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen,
245 * to call instr->callback() whenever the operation completes, even 248 void **virt, resource_size_t *phys);
246 * if it completes with a failure. 249int mtd_unpoint(struct mtd_info *mtd, loff_t from, size_t len);
247 * Callers are supposed to pass a callback function and wait for it 250unsigned long mtd_get_unmapped_area(struct mtd_info *mtd, unsigned long len,
248 * to be called before writing to the block. 251 unsigned long offset, unsigned long flags);
249 */ 252int mtd_read(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen,
250static inline int mtd_erase(struct mtd_info *mtd, struct erase_info *instr) 253 u_char *buf);
251{ 254int mtd_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen,
252 return mtd->erase(mtd, instr); 255 const u_char *buf);
253} 256int mtd_panic_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen,
254 257 const u_char *buf);
255/*
256 * This stuff for eXecute-In-Place. phys is optional and may be set to NULL.
257 */
258static inline int mtd_point(struct mtd_info *mtd, loff_t from, size_t len,
259 size_t *retlen, void **virt, resource_size_t *phys)
260{
261 *retlen = 0;
262 if (!mtd->point)
263 return -EOPNOTSUPP;
264 return mtd->point(mtd, from, len, retlen, virt, phys);
265}
266
267/* We probably shouldn't allow XIP if the unpoint isn't a NULL */
268static inline void mtd_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
269{
270 return mtd->unpoint(mtd, from, len);
271}
272
273/*
274 * Allow NOMMU mmap() to directly map the device (if not NULL)
275 * - return the address to which the offset maps
276 * - return -ENOSYS to indicate refusal to do the mapping
277 */
278static inline unsigned long mtd_get_unmapped_area(struct mtd_info *mtd,
279 unsigned long len,
280 unsigned long offset,
281 unsigned long flags)
282{
283 if (!mtd->get_unmapped_area)
284 return -EOPNOTSUPP;
285 return mtd->get_unmapped_area(mtd, len, offset, flags);
286}
287
288static inline int mtd_read(struct mtd_info *mtd, loff_t from, size_t len,
289 size_t *retlen, u_char *buf)
290{
291 return mtd->read(mtd, from, len, retlen, buf);
292}
293
294static inline int mtd_write(struct mtd_info *mtd, loff_t to, size_t len,
295 size_t *retlen, const u_char *buf)
296{
297 *retlen = 0;
298 if (!mtd->write)
299 return -EROFS;
300 return mtd->write(mtd, to, len, retlen, buf);
301}
302
303/*
304 * In blackbox flight recorder like scenarios we want to make successful writes
305 * in interrupt context. panic_write() is only intended to be called when its
306 * known the kernel is about to panic and we need the write to succeed. Since
307 * the kernel is not going to be running for much longer, this function can
308 * break locks and delay to ensure the write succeeds (but not sleep).
309 */
310static inline int mtd_panic_write(struct mtd_info *mtd, loff_t to, size_t len,
311 size_t *retlen, const u_char *buf)
312{
313 *retlen = 0;
314 if (!mtd->panic_write)
315 return -EOPNOTSUPP;
316 return mtd->panic_write(mtd, to, len, retlen, buf);
317}
318 258
319static inline int mtd_read_oob(struct mtd_info *mtd, loff_t from, 259static inline int mtd_read_oob(struct mtd_info *mtd, loff_t from,
320 struct mtd_oob_ops *ops) 260 struct mtd_oob_ops *ops)
321{ 261{
322 ops->retlen = ops->oobretlen = 0; 262 ops->retlen = ops->oobretlen = 0;
323 if (!mtd->read_oob) 263 if (!mtd->_read_oob)
324 return -EOPNOTSUPP; 264 return -EOPNOTSUPP;
325 return mtd->read_oob(mtd, from, ops); 265 return mtd->_read_oob(mtd, from, ops);
326} 266}
327 267
328static inline int mtd_write_oob(struct mtd_info *mtd, loff_t to, 268static inline int mtd_write_oob(struct mtd_info *mtd, loff_t to,
329 struct mtd_oob_ops *ops) 269 struct mtd_oob_ops *ops)
330{ 270{
331 ops->retlen = ops->oobretlen = 0; 271 ops->retlen = ops->oobretlen = 0;
332 if (!mtd->write_oob) 272 if (!mtd->_write_oob)
333 return -EOPNOTSUPP;
334 return mtd->write_oob(mtd, to, ops);
335}
336
337/*
338 * Method to access the protection register area, present in some flash
339 * devices. The user data is one time programmable but the factory data is read
340 * only.
341 */
342static inline int mtd_get_fact_prot_info(struct mtd_info *mtd,
343 struct otp_info *buf, size_t len)
344{
345 if (!mtd->get_fact_prot_info)
346 return -EOPNOTSUPP; 273 return -EOPNOTSUPP;
347 return mtd->get_fact_prot_info(mtd, buf, len); 274 if (!(mtd->flags & MTD_WRITEABLE))
348} 275 return -EROFS;
349 276 return mtd->_write_oob(mtd, to, ops);
350static inline int mtd_read_fact_prot_reg(struct mtd_info *mtd, loff_t from,
351 size_t len, size_t *retlen,
352 u_char *buf)
353{
354 *retlen = 0;
355 if (!mtd->read_fact_prot_reg)
356 return -EOPNOTSUPP;
357 return mtd->read_fact_prot_reg(mtd, from, len, retlen, buf);
358}
359
360static inline int mtd_get_user_prot_info(struct mtd_info *mtd,
361 struct otp_info *buf,
362 size_t len)
363{
364 if (!mtd->get_user_prot_info)
365 return -EOPNOTSUPP;
366 return mtd->get_user_prot_info(mtd, buf, len);
367}
368
369static inline int mtd_read_user_prot_reg(struct mtd_info *mtd, loff_t from,
370 size_t len, size_t *retlen,
371 u_char *buf)
372{
373 *retlen = 0;
374 if (!mtd->read_user_prot_reg)
375 return -EOPNOTSUPP;
376 return mtd->read_user_prot_reg(mtd, from, len, retlen, buf);
377}
378
379static inline int mtd_write_user_prot_reg(struct mtd_info *mtd, loff_t to,
380 size_t len, size_t *retlen,
381 u_char *buf)
382{
383 *retlen = 0;
384 if (!mtd->write_user_prot_reg)
385 return -EOPNOTSUPP;
386 return mtd->write_user_prot_reg(mtd, to, len, retlen, buf);
387} 277}
388 278
389static inline int mtd_lock_user_prot_reg(struct mtd_info *mtd, loff_t from, 279int mtd_get_fact_prot_info(struct mtd_info *mtd, struct otp_info *buf,
390 size_t len) 280 size_t len);
391{ 281int mtd_read_fact_prot_reg(struct mtd_info *mtd, loff_t from, size_t len,
392 if (!mtd->lock_user_prot_reg) 282 size_t *retlen, u_char *buf);
393 return -EOPNOTSUPP; 283int mtd_get_user_prot_info(struct mtd_info *mtd, struct otp_info *buf,
394 return mtd->lock_user_prot_reg(mtd, from, len); 284 size_t len);
395} 285int mtd_read_user_prot_reg(struct mtd_info *mtd, loff_t from, size_t len,
286 size_t *retlen, u_char *buf);
287int mtd_write_user_prot_reg(struct mtd_info *mtd, loff_t to, size_t len,
288 size_t *retlen, u_char *buf);
289int mtd_lock_user_prot_reg(struct mtd_info *mtd, loff_t from, size_t len);
396 290
397int mtd_writev(struct mtd_info *mtd, const struct kvec *vecs, 291int mtd_writev(struct mtd_info *mtd, const struct kvec *vecs,
398 unsigned long count, loff_t to, size_t *retlen); 292 unsigned long count, loff_t to, size_t *retlen);
399 293
400static inline void mtd_sync(struct mtd_info *mtd) 294static inline void mtd_sync(struct mtd_info *mtd)
401{ 295{
402 if (mtd->sync) 296 if (mtd->_sync)
403 mtd->sync(mtd); 297 mtd->_sync(mtd);
404}
405
406/* Chip-supported device locking */
407static inline int mtd_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
408{
409 if (!mtd->lock)
410 return -EOPNOTSUPP;
411 return mtd->lock(mtd, ofs, len);
412} 298}
413 299
414static inline int mtd_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len) 300int mtd_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
415{ 301int mtd_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
416 if (!mtd->unlock) 302int mtd_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len);
417 return -EOPNOTSUPP; 303int mtd_block_isbad(struct mtd_info *mtd, loff_t ofs);
418 return mtd->unlock(mtd, ofs, len); 304int mtd_block_markbad(struct mtd_info *mtd, loff_t ofs);
419}
420
421static inline int mtd_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len)
422{
423 if (!mtd->is_locked)
424 return -EOPNOTSUPP;
425 return mtd->is_locked(mtd, ofs, len);
426}
427 305
428static inline int mtd_suspend(struct mtd_info *mtd) 306static inline int mtd_suspend(struct mtd_info *mtd)
429{ 307{
430 return mtd->suspend ? mtd->suspend(mtd) : 0; 308 return mtd->_suspend ? mtd->_suspend(mtd) : 0;
431} 309}
432 310
433static inline void mtd_resume(struct mtd_info *mtd) 311static inline void mtd_resume(struct mtd_info *mtd)
434{ 312{
435 if (mtd->resume) 313 if (mtd->_resume)
436 mtd->resume(mtd); 314 mtd->_resume(mtd);
437}
438
439static inline int mtd_block_isbad(struct mtd_info *mtd, loff_t ofs)
440{
441 if (!mtd->block_isbad)
442 return 0;
443 return mtd->block_isbad(mtd, ofs);
444}
445
446static inline int mtd_block_markbad(struct mtd_info *mtd, loff_t ofs)
447{
448 if (!mtd->block_markbad)
449 return -EOPNOTSUPP;
450 return mtd->block_markbad(mtd, ofs);
451} 315}
452 316
453static inline uint32_t mtd_div_by_eb(uint64_t sz, struct mtd_info *mtd) 317static inline uint32_t mtd_div_by_eb(uint64_t sz, struct mtd_info *mtd)
@@ -482,12 +346,12 @@ static inline uint32_t mtd_mod_by_ws(uint64_t sz, struct mtd_info *mtd)
482 346
483static inline int mtd_has_oob(const struct mtd_info *mtd) 347static inline int mtd_has_oob(const struct mtd_info *mtd)
484{ 348{
485 return mtd->read_oob && mtd->write_oob; 349 return mtd->_read_oob && mtd->_write_oob;
486} 350}
487 351
488static inline int mtd_can_have_bb(const struct mtd_info *mtd) 352static inline int mtd_can_have_bb(const struct mtd_info *mtd)
489{ 353{
490 return !!mtd->block_isbad; 354 return !!mtd->_block_isbad;
491} 355}
492 356
493 /* Kernel-side ioctl definitions */ 357 /* Kernel-side ioctl definitions */
diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h
index 63b5a8b6dfbd..1482340d3d9f 100644
--- a/include/linux/mtd/nand.h
+++ b/include/linux/mtd/nand.h
@@ -324,6 +324,7 @@ struct nand_hw_control {
324 * @steps: number of ECC steps per page 324 * @steps: number of ECC steps per page
325 * @size: data bytes per ECC step 325 * @size: data bytes per ECC step
326 * @bytes: ECC bytes per step 326 * @bytes: ECC bytes per step
327 * @strength: max number of correctible bits per ECC step
327 * @total: total number of ECC bytes per page 328 * @total: total number of ECC bytes per page
328 * @prepad: padding information for syndrome based ECC generators 329 * @prepad: padding information for syndrome based ECC generators
329 * @postpad: padding information for syndrome based ECC generators 330 * @postpad: padding information for syndrome based ECC generators
@@ -351,6 +352,7 @@ struct nand_ecc_ctrl {
351 int size; 352 int size;
352 int bytes; 353 int bytes;
353 int total; 354 int total;
355 int strength;
354 int prepad; 356 int prepad;
355 int postpad; 357 int postpad;
356 struct nand_ecclayout *layout; 358 struct nand_ecclayout *layout;
@@ -448,8 +450,9 @@ struct nand_buffers {
448 * will be copied to the appropriate nand_bbt_descr's. 450 * will be copied to the appropriate nand_bbt_descr's.
449 * @badblockpos: [INTERN] position of the bad block marker in the oob 451 * @badblockpos: [INTERN] position of the bad block marker in the oob
450 * area. 452 * area.
451 * @badblockbits: [INTERN] number of bits to left-shift the bad block 453 * @badblockbits: [INTERN] minimum number of set bits in a good block's
452 * number 454 * bad block marker position; i.e., BBM == 11110111b is
455 * not bad when badblockbits == 7
453 * @cellinfo: [INTERN] MLC/multichip data from chip ident 456 * @cellinfo: [INTERN] MLC/multichip data from chip ident
454 * @numchips: [INTERN] number of physical chips 457 * @numchips: [INTERN] number of physical chips
455 * @chipsize: [INTERN] the size of one chip for multichip arrays 458 * @chipsize: [INTERN] the size of one chip for multichip arrays
diff --git a/include/linux/mtd/pmc551.h b/include/linux/mtd/pmc551.h
deleted file mode 100644
index 27ad40aed19f..000000000000
--- a/include/linux/mtd/pmc551.h
+++ /dev/null
@@ -1,78 +0,0 @@
1/*
2 * PMC551 PCI Mezzanine Ram Device
3 *
4 * Author:
5 * Mark Ferrell
6 * Copyright 1999,2000 Nortel Networks
7 *
8 * License:
9 * As part of this driver was derrived from the slram.c driver it falls
10 * under the same license, which is GNU General Public License v2
11 */
12
13#ifndef __MTD_PMC551_H__
14#define __MTD_PMC551_H__
15
16#include <linux/mtd/mtd.h>
17
18#define PMC551_VERSION \
19 "Ramix PMC551 PCI Mezzanine Ram Driver. (C) 1999,2000 Nortel Networks.\n"
20
21/*
22 * Our personal and private information
23 */
24struct mypriv {
25 struct pci_dev *dev;
26 u_char *start;
27 u32 base_map0;
28 u32 curr_map0;
29 u32 asize;
30 struct mtd_info *nextpmc551;
31};
32
33/*
34 * Function Prototypes
35 */
36static int pmc551_erase(struct mtd_info *, struct erase_info *);
37static void pmc551_unpoint(struct mtd_info *, loff_t, size_t);
38static int pmc551_point(struct mtd_info *mtd, loff_t from, size_t len,
39 size_t *retlen, void **virt, resource_size_t *phys);
40static int pmc551_read(struct mtd_info *, loff_t, size_t, size_t *, u_char *);
41static int pmc551_write(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
42
43
44/*
45 * Define the PCI ID's if the kernel doesn't define them for us
46 */
47#ifndef PCI_VENDOR_ID_V3_SEMI
48#define PCI_VENDOR_ID_V3_SEMI 0x11b0
49#endif
50
51#ifndef PCI_DEVICE_ID_V3_SEMI_V370PDC
52#define PCI_DEVICE_ID_V3_SEMI_V370PDC 0x0200
53#endif
54
55
56#define PMC551_PCI_MEM_MAP0 0x50
57#define PMC551_PCI_MEM_MAP1 0x54
58#define PMC551_PCI_MEM_MAP_MAP_ADDR_MASK 0x3ff00000
59#define PMC551_PCI_MEM_MAP_APERTURE_MASK 0x000000f0
60#define PMC551_PCI_MEM_MAP_REG_EN 0x00000002
61#define PMC551_PCI_MEM_MAP_ENABLE 0x00000001
62
63#define PMC551_SDRAM_MA 0x60
64#define PMC551_SDRAM_CMD 0x62
65#define PMC551_DRAM_CFG 0x64
66#define PMC551_SYS_CTRL_REG 0x78
67
68#define PMC551_DRAM_BLK0 0x68
69#define PMC551_DRAM_BLK1 0x6c
70#define PMC551_DRAM_BLK2 0x70
71#define PMC551_DRAM_BLK3 0x74
72#define PMC551_DRAM_BLK_GET_SIZE(x) (524288<<((x>>4)&0x0f))
73#define PMC551_DRAM_BLK_SET_COL_MUX(x,v) (((x) & ~0x00007000) | (((v) & 0x7) << 12))
74#define PMC551_DRAM_BLK_SET_ROW_MUX(x,v) (((x) & ~0x00000f00) | (((v) & 0xf) << 8))
75
76
77#endif /* __MTD_PMC551_H__ */
78
diff --git a/include/linux/mtd/sh_flctl.h b/include/linux/mtd/sh_flctl.h
index 9cf4c4c79555..a38e1fa8af01 100644
--- a/include/linux/mtd/sh_flctl.h
+++ b/include/linux/mtd/sh_flctl.h
@@ -23,6 +23,7 @@
23#include <linux/mtd/mtd.h> 23#include <linux/mtd/mtd.h>
24#include <linux/mtd/nand.h> 24#include <linux/mtd/nand.h>
25#include <linux/mtd/partitions.h> 25#include <linux/mtd/partitions.h>
26#include <linux/pm_qos.h>
26 27
27/* FLCTL registers */ 28/* FLCTL registers */
28#define FLCMNCR(f) (f->reg + 0x0) 29#define FLCMNCR(f) (f->reg + 0x0)
@@ -38,6 +39,7 @@
38#define FLDTFIFO(f) (f->reg + 0x24) 39#define FLDTFIFO(f) (f->reg + 0x24)
39#define FLECFIFO(f) (f->reg + 0x28) 40#define FLECFIFO(f) (f->reg + 0x28)
40#define FLTRCR(f) (f->reg + 0x2C) 41#define FLTRCR(f) (f->reg + 0x2C)
42#define FLHOLDCR(f) (f->reg + 0x38)
41#define FL4ECCRESULT0(f) (f->reg + 0x80) 43#define FL4ECCRESULT0(f) (f->reg + 0x80)
42#define FL4ECCRESULT1(f) (f->reg + 0x84) 44#define FL4ECCRESULT1(f) (f->reg + 0x84)
43#define FL4ECCRESULT2(f) (f->reg + 0x88) 45#define FL4ECCRESULT2(f) (f->reg + 0x88)
@@ -67,6 +69,30 @@
67#define CE0_ENABLE (0x1 << 3) /* Chip Enable 0 */ 69#define CE0_ENABLE (0x1 << 3) /* Chip Enable 0 */
68#define TYPESEL_SET (0x1 << 0) 70#define TYPESEL_SET (0x1 << 0)
69 71
72/*
73 * Clock settings using the PULSEx registers from FLCMNCR
74 *
75 * Some hardware uses bits called PULSEx instead of FCKSEL_E and QTSEL_E
76 * to control the clock divider used between the High-Speed Peripheral Clock
77 * and the FLCTL internal clock. If so, use CLK_8_BIT_xxx for connecting 8 bit
78 * and CLK_16_BIT_xxx for connecting 16 bit bus bandwith NAND chips. For the 16
79 * bit version the divider is seperate for the pulse width of high and low
80 * signals.
81 */
82#define PULSE3 (0x1 << 27)
83#define PULSE2 (0x1 << 17)
84#define PULSE1 (0x1 << 15)
85#define PULSE0 (0x1 << 9)
86#define CLK_8B_0_5 PULSE1
87#define CLK_8B_1 0x0
88#define CLK_8B_1_5 (PULSE1 | PULSE2)
89#define CLK_8B_2 PULSE0
90#define CLK_8B_3 (PULSE0 | PULSE1 | PULSE2)
91#define CLK_8B_4 (PULSE0 | PULSE2)
92#define CLK_16B_6L_2H PULSE0
93#define CLK_16B_9L_3H (PULSE0 | PULSE1 | PULSE2)
94#define CLK_16B_12L_4H (PULSE0 | PULSE2)
95
70/* FLCMDCR control bits */ 96/* FLCMDCR control bits */
71#define ADRCNT2_E (0x1 << 31) /* 5byte address enable */ 97#define ADRCNT2_E (0x1 << 31) /* 5byte address enable */
72#define ADRMD_E (0x1 << 26) /* Sector address access */ 98#define ADRMD_E (0x1 << 26) /* Sector address access */
@@ -85,6 +111,15 @@
85#define TRSTRT (0x1 << 0) /* translation start */ 111#define TRSTRT (0x1 << 0) /* translation start */
86#define TREND (0x1 << 1) /* translation end */ 112#define TREND (0x1 << 1) /* translation end */
87 113
114/*
115 * FLHOLDCR control bits
116 *
117 * HOLDEN: Bus Occupancy Enable (inverted)
118 * Enable this bit when the external bus might be used in between transfers.
119 * If not set and the bus gets used by other modules, a deadlock occurs.
120 */
121#define HOLDEN (0x1 << 0)
122
88/* FL4ECCCR control bits */ 123/* FL4ECCCR control bits */
89#define _4ECCFA (0x1 << 2) /* 4 symbols correct fault */ 124#define _4ECCFA (0x1 << 2) /* 4 symbols correct fault */
90#define _4ECCEND (0x1 << 1) /* 4 symbols end */ 125#define _4ECCEND (0x1 << 1) /* 4 symbols end */
@@ -97,6 +132,7 @@ struct sh_flctl {
97 struct mtd_info mtd; 132 struct mtd_info mtd;
98 struct nand_chip chip; 133 struct nand_chip chip;
99 struct platform_device *pdev; 134 struct platform_device *pdev;
135 struct dev_pm_qos_request pm_qos;
100 void __iomem *reg; 136 void __iomem *reg;
101 137
102 uint8_t done_buff[2048 + 64]; /* max size 2048 + 64 */ 138 uint8_t done_buff[2048 + 64]; /* max size 2048 + 64 */
@@ -108,11 +144,14 @@ struct sh_flctl {
108 int erase1_page_addr; /* page_addr in ERASE1 cmd */ 144 int erase1_page_addr; /* page_addr in ERASE1 cmd */
109 uint32_t erase_ADRCNT; /* bits of FLCMDCR in ERASE1 cmd */ 145 uint32_t erase_ADRCNT; /* bits of FLCMDCR in ERASE1 cmd */
110 uint32_t rw_ADRCNT; /* bits of FLCMDCR in READ WRITE cmd */ 146 uint32_t rw_ADRCNT; /* bits of FLCMDCR in READ WRITE cmd */
147 uint32_t flcmncr_base; /* base value of FLCMNCR */
111 148
112 int hwecc_cant_correct[4]; 149 int hwecc_cant_correct[4];
113 150
114 unsigned page_size:1; /* NAND page size (0 = 512, 1 = 2048) */ 151 unsigned page_size:1; /* NAND page size (0 = 512, 1 = 2048) */
115 unsigned hwecc:1; /* Hardware ECC (0 = disabled, 1 = enabled) */ 152 unsigned hwecc:1; /* Hardware ECC (0 = disabled, 1 = enabled) */
153 unsigned holden:1; /* Hardware has FLHOLDCR and HOLDEN is set */
154 unsigned qos_request:1; /* QoS request to prevent deep power shutdown */
116}; 155};
117 156
118struct sh_flctl_platform_data { 157struct sh_flctl_platform_data {
@@ -121,6 +160,7 @@ struct sh_flctl_platform_data {
121 unsigned long flcmncr_val; 160 unsigned long flcmncr_val;
122 161
123 unsigned has_hwecc:1; 162 unsigned has_hwecc:1;
163 unsigned use_holden:1;
124}; 164};
125 165
126static inline struct sh_flctl *mtd_to_flctl(struct mtd_info *mtdinfo) 166static inline struct sh_flctl *mtd_to_flctl(struct mtd_info *mtdinfo)
diff --git a/include/linux/mtd/spear_smi.h b/include/linux/mtd/spear_smi.h
new file mode 100644
index 000000000000..8ae1726044c3
--- /dev/null
+++ b/include/linux/mtd/spear_smi.h
@@ -0,0 +1,65 @@
1/*
2 * Copyright © 2010 ST Microelectronics
3 * Shiraz Hashim <shiraz.hashim@st.com>
4 *
5 * This file is licensed under the terms of the GNU General Public
6 * License version 2. This program is licensed "as is" without any
7 * warranty of any kind, whether express or implied.
8 */
9
10#ifndef __MTD_SPEAR_SMI_H
11#define __MTD_SPEAR_SMI_H
12
13#include <linux/types.h>
14#include <linux/mtd/mtd.h>
15#include <linux/mtd/partitions.h>
16#include <linux/platform_device.h>
17#include <linux/of.h>
18
19/* max possible slots for serial-nor flash chip in the SMI controller */
20#define MAX_NUM_FLASH_CHIP 4
21
22/* macro to define partitions for flash devices */
23#define DEFINE_PARTS(n, of, s) \
24{ \
25 .name = n, \
26 .offset = of, \
27 .size = s, \
28}
29
30/**
31 * struct spear_smi_flash_info - platform structure for passing flash
32 * information
33 *
34 * name: name of the serial nor flash for identification
35 * mem_base: the memory base on which the flash is mapped
36 * size: size of the flash in bytes
37 * partitions: parition details
38 * nr_partitions: number of partitions
39 * fast_mode: whether flash supports fast mode
40 */
41
42struct spear_smi_flash_info {
43 char *name;
44 unsigned long mem_base;
45 unsigned long size;
46 struct mtd_partition *partitions;
47 int nr_partitions;
48 u8 fast_mode;
49};
50
51/**
52 * struct spear_smi_plat_data - platform structure for configuring smi
53 *
54 * clk_rate: clk rate at which SMI must operate
55 * num_flashes: number of flashes present on board
56 * board_flash_info: specific details of each flash present on board
57 */
58struct spear_smi_plat_data {
59 unsigned long clk_rate;
60 int num_flashes;
61 struct spear_smi_flash_info *board_flash_info;
62 struct device_node *np[MAX_NUM_FLASH_CHIP];
63};
64
65#endif /* __MTD_SPEAR_SMI_H */
diff --git a/include/linux/mtio.h b/include/linux/mtio.h
index 8f825756c459..18543e2db06f 100644
--- a/include/linux/mtio.h
+++ b/include/linux/mtio.h
@@ -194,6 +194,7 @@ struct mtpos {
194#define MT_ST_SYSV 0x1000 194#define MT_ST_SYSV 0x1000
195#define MT_ST_NOWAIT 0x2000 195#define MT_ST_NOWAIT 0x2000
196#define MT_ST_SILI 0x4000 196#define MT_ST_SILI 0x4000
197#define MT_ST_NOWAIT_EOF 0x8000
197 198
198/* The mode parameters to be controlled. Parameter chosen with bits 20-28 */ 199/* The mode parameters to be controlled. Parameter chosen with bits 20-28 */
199#define MT_ST_CLEAR_DEFAULT 0xfffff 200#define MT_ST_CLEAR_DEFAULT 0xfffff
diff --git a/include/linux/nfs4.h b/include/linux/nfs4.h
index 834df8bf08b6..0987146b0637 100644
--- a/include/linux/nfs4.h
+++ b/include/linux/nfs4.h
@@ -438,7 +438,20 @@ enum limit_by4 {
438enum open_delegation_type4 { 438enum open_delegation_type4 {
439 NFS4_OPEN_DELEGATE_NONE = 0, 439 NFS4_OPEN_DELEGATE_NONE = 0,
440 NFS4_OPEN_DELEGATE_READ = 1, 440 NFS4_OPEN_DELEGATE_READ = 1,
441 NFS4_OPEN_DELEGATE_WRITE = 2 441 NFS4_OPEN_DELEGATE_WRITE = 2,
442 NFS4_OPEN_DELEGATE_NONE_EXT = 3, /* 4.1 */
443};
444
445enum why_no_delegation4 { /* new to v4.1 */
446 WND4_NOT_WANTED = 0,
447 WND4_CONTENTION = 1,
448 WND4_RESOURCE = 2,
449 WND4_NOT_SUPP_FTYPE = 3,
450 WND4_WRITE_DELEG_NOT_SUPP_FTYPE = 4,
451 WND4_NOT_SUPP_UPGRADE = 5,
452 WND4_NOT_SUPP_DOWNGRADE = 6,
453 WND4_CANCELLED = 7,
454 WND4_IS_DIR = 8,
442}; 455};
443 456
444enum lock_type4 { 457enum lock_type4 {
diff --git a/include/linux/nfsd/cld.h b/include/linux/nfsd/cld.h
new file mode 100644
index 000000000000..f14a9ab06f1f
--- /dev/null
+++ b/include/linux/nfsd/cld.h
@@ -0,0 +1,56 @@
1/*
2 * Upcall description for nfsdcld communication
3 *
4 * Copyright (c) 2012 Red Hat, Inc.
5 * Author(s): Jeff Layton <jlayton@redhat.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20 */
21
22#ifndef _NFSD_CLD_H
23#define _NFSD_CLD_H
24
25/* latest upcall version available */
26#define CLD_UPCALL_VERSION 1
27
28/* defined by RFC3530 */
29#define NFS4_OPAQUE_LIMIT 1024
30
31enum cld_command {
32 Cld_Create, /* create a record for this cm_id */
33 Cld_Remove, /* remove record of this cm_id */
34 Cld_Check, /* is this cm_id allowed? */
35 Cld_GraceDone, /* grace period is complete */
36};
37
38/* representation of long-form NFSv4 client ID */
39struct cld_name {
40 uint16_t cn_len; /* length of cm_id */
41 unsigned char cn_id[NFS4_OPAQUE_LIMIT]; /* client-provided */
42} __attribute__((packed));
43
44/* message struct for communication with userspace */
45struct cld_msg {
46 uint8_t cm_vers; /* upcall version */
47 uint8_t cm_cmd; /* upcall command */
48 int16_t cm_status; /* return code */
49 uint32_t cm_xid; /* transaction id */
50 union {
51 int64_t cm_gracetime; /* grace period start time */
52 struct cld_name cm_name;
53 } __attribute__((packed)) cm_u;
54} __attribute__((packed));
55
56#endif /* !_NFSD_CLD_H */
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index bd9f55a5958d..ddbb6a901f65 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -299,18 +299,31 @@ struct perf_event_mmap_page {
299 /* 299 /*
300 * Bits needed to read the hw events in user-space. 300 * Bits needed to read the hw events in user-space.
301 * 301 *
302 * u32 seq; 302 * u32 seq, time_mult, time_shift, idx, width;
303 * s64 count; 303 * u64 count, enabled, running;
304 * u64 cyc, time_offset;
305 * s64 pmc = 0;
304 * 306 *
305 * do { 307 * do {
306 * seq = pc->lock; 308 * seq = pc->lock;
307 *
308 * barrier() 309 * barrier()
309 * if (pc->index) { 310 *
310 * count = pmc_read(pc->index - 1); 311 * enabled = pc->time_enabled;
311 * count += pc->offset; 312 * running = pc->time_running;
312 * } else 313 *
313 * goto regular_read; 314 * if (pc->cap_usr_time && enabled != running) {
315 * cyc = rdtsc();
316 * time_offset = pc->time_offset;
317 * time_mult = pc->time_mult;
318 * time_shift = pc->time_shift;
319 * }
320 *
321 * idx = pc->index;
322 * count = pc->offset;
323 * if (pc->cap_usr_rdpmc && idx) {
324 * width = pc->pmc_width;
325 * pmc = rdpmc(idx - 1);
326 * }
314 * 327 *
315 * barrier(); 328 * barrier();
316 * } while (pc->lock != seq); 329 * } while (pc->lock != seq);
@@ -323,14 +336,57 @@ struct perf_event_mmap_page {
323 __s64 offset; /* add to hardware event value */ 336 __s64 offset; /* add to hardware event value */
324 __u64 time_enabled; /* time event active */ 337 __u64 time_enabled; /* time event active */
325 __u64 time_running; /* time event on cpu */ 338 __u64 time_running; /* time event on cpu */
326 __u32 time_mult, time_shift; 339 union {
340 __u64 capabilities;
341 __u64 cap_usr_time : 1,
342 cap_usr_rdpmc : 1,
343 cap_____res : 62;
344 };
345
346 /*
347 * If cap_usr_rdpmc this field provides the bit-width of the value
348 * read using the rdpmc() or equivalent instruction. This can be used
349 * to sign extend the result like:
350 *
351 * pmc <<= 64 - width;
352 * pmc >>= 64 - width; // signed shift right
353 * count += pmc;
354 */
355 __u16 pmc_width;
356
357 /*
358 * If cap_usr_time the below fields can be used to compute the time
359 * delta since time_enabled (in ns) using rdtsc or similar.
360 *
361 * u64 quot, rem;
362 * u64 delta;
363 *
364 * quot = (cyc >> time_shift);
365 * rem = cyc & ((1 << time_shift) - 1);
366 * delta = time_offset + quot * time_mult +
367 * ((rem * time_mult) >> time_shift);
368 *
369 * Where time_offset,time_mult,time_shift and cyc are read in the
370 * seqcount loop described above. This delta can then be added to
371 * enabled and possible running (if idx), improving the scaling:
372 *
373 * enabled += delta;
374 * if (idx)
375 * running += delta;
376 *
377 * quot = count / running;
378 * rem = count % running;
379 * count = quot * enabled + (rem * enabled) / running;
380 */
381 __u16 time_shift;
382 __u32 time_mult;
327 __u64 time_offset; 383 __u64 time_offset;
328 384
329 /* 385 /*
330 * Hole for extension of the self monitor capabilities 386 * Hole for extension of the self monitor capabilities
331 */ 387 */
332 388
333 __u64 __reserved[121]; /* align to 1k */ 389 __u64 __reserved[120]; /* align to 1k */
334 390
335 /* 391 /*
336 * Control data for the mmap() data buffer. 392 * Control data for the mmap() data buffer.
@@ -550,6 +606,7 @@ struct perf_guest_info_callbacks {
550#include <linux/irq_work.h> 606#include <linux/irq_work.h>
551#include <linux/static_key.h> 607#include <linux/static_key.h>
552#include <linux/atomic.h> 608#include <linux/atomic.h>
609#include <linux/sysfs.h>
553#include <asm/local.h> 610#include <asm/local.h>
554 611
555#define PERF_MAX_STACK_DEPTH 255 612#define PERF_MAX_STACK_DEPTH 255
@@ -1291,5 +1348,18 @@ do { \
1291 register_cpu_notifier(&fn##_nb); \ 1348 register_cpu_notifier(&fn##_nb); \
1292} while (0) 1349} while (0)
1293 1350
1351
1352#define PMU_FORMAT_ATTR(_name, _format) \
1353static ssize_t \
1354_name##_show(struct device *dev, \
1355 struct device_attribute *attr, \
1356 char *page) \
1357{ \
1358 BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE); \
1359 return sprintf(page, _format "\n"); \
1360} \
1361 \
1362static struct device_attribute format_attr_##_name = __ATTR_RO(_name)
1363
1294#endif /* __KERNEL__ */ 1364#endif /* __KERNEL__ */
1295#endif /* _LINUX_PERF_EVENT_H */ 1365#endif /* _LINUX_PERF_EVENT_H */
diff --git a/arch/arm/mach-zynq/include/mach/io.h b/include/linux/platform_data/spear_thermal.h
index 39d9885e0e9a..724f2e1cbbcb 100644
--- a/arch/arm/mach-zynq/include/mach/io.h
+++ b/include/linux/platform_data/spear_thermal.h
@@ -1,6 +1,8 @@
1/* arch/arm/mach-zynq/include/mach/io.h 1/*
2 * SPEAr thermal driver platform data.
2 * 3 *
3 * Copyright (C) 2011 Xilinx 4 * Copyright (C) 2011-2012 ST Microelectronics
5 * Author: Vincenzo Frascino <vincenzo.frascino@st.com>
4 * 6 *
5 * This software is licensed under the terms of the GNU General Public 7 * This software is licensed under the terms of the GNU General Public
6 * License version 2, as published by the Free Software Foundation, and 8 * License version 2, as published by the Free Software Foundation, and
@@ -10,24 +12,15 @@
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details. 14 * GNU General Public License for more details.
15 *
13 */ 16 */
17#ifndef SPEAR_THERMAL_H
18#define SPEAR_THERMAL_H
14 19
15#ifndef __MACH_IO_H__ 20/* SPEAr Thermal Sensor Platform Data */
16#define __MACH_IO_H__ 21struct spear_thermal_pdata {
17 22 /* flags used to enable thermal sensor */
18/* Allow IO space to be anywhere in the memory */ 23 unsigned int thermal_flags;
19 24};
20#define IO_SPACE_LIMIT 0xffff
21
22/* IO address mapping macros, nothing special at this time but required */
23
24#ifdef __ASSEMBLER__
25#define IOMEM(x) (x)
26#else
27#define IOMEM(x) ((void __force __iomem *)(x))
28#endif
29
30#define __io(a) __typesafe_io(a)
31#define __mem_pci(a) (a)
32 25
33#endif 26#endif /* SPEAR_THERMAL_H */
diff --git a/include/linux/power/max17042_battery.h b/include/linux/power/max17042_battery.h
index fe99211fb2b8..e01b167e66f0 100644
--- a/include/linux/power/max17042_battery.h
+++ b/include/linux/power/max17042_battery.h
@@ -27,6 +27,8 @@
27#define MAX17042_BATTERY_FULL (100) 27#define MAX17042_BATTERY_FULL (100)
28#define MAX17042_DEFAULT_SNS_RESISTOR (10000) 28#define MAX17042_DEFAULT_SNS_RESISTOR (10000)
29 29
30#define MAX17042_CHARACTERIZATION_DATA_SIZE 48
31
30enum max17042_register { 32enum max17042_register {
31 MAX17042_STATUS = 0x00, 33 MAX17042_STATUS = 0x00,
32 MAX17042_VALRT_Th = 0x01, 34 MAX17042_VALRT_Th = 0x01,
@@ -40,11 +42,11 @@ enum max17042_register {
40 MAX17042_VCELL = 0x09, 42 MAX17042_VCELL = 0x09,
41 MAX17042_Current = 0x0A, 43 MAX17042_Current = 0x0A,
42 MAX17042_AvgCurrent = 0x0B, 44 MAX17042_AvgCurrent = 0x0B,
43 MAX17042_Qresidual = 0x0C, 45
44 MAX17042_SOC = 0x0D, 46 MAX17042_SOC = 0x0D,
45 MAX17042_AvSOC = 0x0E, 47 MAX17042_AvSOC = 0x0E,
46 MAX17042_RemCap = 0x0F, 48 MAX17042_RemCap = 0x0F,
47 MAX17402_FullCAP = 0x10, 49 MAX17042_FullCAP = 0x10,
48 MAX17042_TTE = 0x11, 50 MAX17042_TTE = 0x11,
49 MAX17042_V_empty = 0x12, 51 MAX17042_V_empty = 0x12,
50 52
@@ -62,14 +64,14 @@ enum max17042_register {
62 MAX17042_AvCap = 0x1F, 64 MAX17042_AvCap = 0x1F,
63 MAX17042_ManName = 0x20, 65 MAX17042_ManName = 0x20,
64 MAX17042_DevName = 0x21, 66 MAX17042_DevName = 0x21,
65 MAX17042_DevChem = 0x22,
66 67
68 MAX17042_FullCAPNom = 0x23,
67 MAX17042_TempNom = 0x24, 69 MAX17042_TempNom = 0x24,
68 MAX17042_TempCold = 0x25, 70 MAX17042_TempLim = 0x25,
69 MAX17042_TempHot = 0x26, 71 MAX17042_TempHot = 0x26,
70 MAX17042_AIN = 0x27, 72 MAX17042_AIN = 0x27,
71 MAX17042_LearnCFG = 0x28, 73 MAX17042_LearnCFG = 0x28,
72 MAX17042_SHFTCFG = 0x29, 74 MAX17042_FilterCFG = 0x29,
73 MAX17042_RelaxCFG = 0x2A, 75 MAX17042_RelaxCFG = 0x2A,
74 MAX17042_MiscCFG = 0x2B, 76 MAX17042_MiscCFG = 0x2B,
75 MAX17042_TGAIN = 0x2C, 77 MAX17042_TGAIN = 0x2C,
@@ -77,22 +79,41 @@ enum max17042_register {
77 MAX17042_CGAIN = 0x2E, 79 MAX17042_CGAIN = 0x2E,
78 MAX17042_COFF = 0x2F, 80 MAX17042_COFF = 0x2F,
79 81
80 MAX17042_Q_empty = 0x33, 82 MAX17042_MaskSOC = 0x32,
83 MAX17042_SOC_empty = 0x33,
81 MAX17042_T_empty = 0x34, 84 MAX17042_T_empty = 0x34,
82 85
86 MAX17042_FullCAP0 = 0x35,
87 MAX17042_LAvg_empty = 0x36,
88 MAX17042_FCTC = 0x37,
83 MAX17042_RCOMP0 = 0x38, 89 MAX17042_RCOMP0 = 0x38,
84 MAX17042_TempCo = 0x39, 90 MAX17042_TempCo = 0x39,
85 MAX17042_Rx = 0x3A, 91 MAX17042_EmptyTempCo = 0x3A,
86 MAX17042_T_empty0 = 0x3B, 92 MAX17042_K_empty0 = 0x3B,
87 MAX17042_TaskPeriod = 0x3C, 93 MAX17042_TaskPeriod = 0x3C,
88 MAX17042_FSTAT = 0x3D, 94 MAX17042_FSTAT = 0x3D,
89 95
90 MAX17042_SHDNTIMER = 0x3F, 96 MAX17042_SHDNTIMER = 0x3F,
91 97
92 MAX17042_VFRemCap = 0x4A, 98 MAX17042_dQacc = 0x45,
99 MAX17042_dPacc = 0x46,
100
101 MAX17042_VFSOC0 = 0x48,
93 102
94 MAX17042_QH = 0x4D, 103 MAX17042_QH = 0x4D,
95 MAX17042_QL = 0x4E, 104 MAX17042_QL = 0x4E,
105
106 MAX17042_VFSOC0Enable = 0x60,
107 MAX17042_MLOCKReg1 = 0x62,
108 MAX17042_MLOCKReg2 = 0x63,
109
110 MAX17042_MODELChrTbl = 0x80,
111
112 MAX17042_OCV = 0xEE,
113
114 MAX17042_OCVInternal = 0xFB,
115
116 MAX17042_VFSOC = 0xFF,
96}; 117};
97 118
98/* 119/*
@@ -105,10 +126,64 @@ struct max17042_reg_data {
105 u16 data; 126 u16 data;
106}; 127};
107 128
129struct max17042_config_data {
130 /* External current sense resistor value in milli-ohms */
131 u32 cur_sense_val;
132
133 /* A/D measurement */
134 u16 tgain; /* 0x2C */
135 u16 toff; /* 0x2D */
136 u16 cgain; /* 0x2E */
137 u16 coff; /* 0x2F */
138
139 /* Alert / Status */
140 u16 valrt_thresh; /* 0x01 */
141 u16 talrt_thresh; /* 0x02 */
142 u16 soc_alrt_thresh; /* 0x03 */
143 u16 config; /* 0x01D */
144 u16 shdntimer; /* 0x03F */
145
146 /* App data */
147 u16 design_cap; /* 0x18 */
148 u16 ichgt_term; /* 0x1E */
149
150 /* MG3 config */
151 u16 at_rate; /* 0x04 */
152 u16 learn_cfg; /* 0x28 */
153 u16 filter_cfg; /* 0x29 */
154 u16 relax_cfg; /* 0x2A */
155 u16 misc_cfg; /* 0x2B */
156 u16 masksoc; /* 0x32 */
157
158 /* MG3 save and restore */
159 u16 fullcap; /* 0x10 */
160 u16 fullcapnom; /* 0x23 */
161 u16 socempty; /* 0x33 */
162 u16 lavg_empty; /* 0x36 */
163 u16 dqacc; /* 0x45 */
164 u16 dpacc; /* 0x46 */
165
166 /* Cell technology from power_supply.h */
167 u16 cell_technology;
168
169 /* Cell Data */
170 u16 vempty; /* 0x12 */
171 u16 temp_nom; /* 0x24 */
172 u16 temp_lim; /* 0x25 */
173 u16 fctc; /* 0x37 */
174 u16 rcomp0; /* 0x38 */
175 u16 tcompc0; /* 0x39 */
176 u16 empty_tempco; /* 0x3A */
177 u16 kempty0; /* 0x3B */
178 u16 cell_char_tbl[MAX17042_CHARACTERIZATION_DATA_SIZE];
179} __packed;
180
108struct max17042_platform_data { 181struct max17042_platform_data {
109 struct max17042_reg_data *init_data; 182 struct max17042_reg_data *init_data;
183 struct max17042_config_data *config_data;
110 int num_init_data; /* Number of enties in init_data array */ 184 int num_init_data; /* Number of enties in init_data array */
111 bool enable_current_sense; 185 bool enable_current_sense;
186 bool enable_por_init; /* Use POR init from Maxim appnote */
112 187
113 /* 188 /*
114 * R_sns in micro-ohms. 189 * R_sns in micro-ohms.
diff --git a/include/linux/power/smb347-charger.h b/include/linux/power/smb347-charger.h
new file mode 100644
index 000000000000..b3cb20dab55f
--- /dev/null
+++ b/include/linux/power/smb347-charger.h
@@ -0,0 +1,117 @@
1/*
2 * Summit Microelectronics SMB347 Battery Charger Driver
3 *
4 * Copyright (C) 2011, Intel Corporation
5 *
6 * Authors: Bruce E. Robertson <bruce.e.robertson@intel.com>
7 * Mika Westerberg <mika.westerberg@linux.intel.com>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 */
13
14#ifndef SMB347_CHARGER_H
15#define SMB347_CHARGER_H
16
17#include <linux/types.h>
18#include <linux/power_supply.h>
19
20enum {
21 /* use the default compensation method */
22 SMB347_SOFT_TEMP_COMPENSATE_DEFAULT = -1,
23
24 SMB347_SOFT_TEMP_COMPENSATE_NONE,
25 SMB347_SOFT_TEMP_COMPENSATE_CURRENT,
26 SMB347_SOFT_TEMP_COMPENSATE_VOLTAGE,
27};
28
29/* Use default factory programmed value for hard/soft temperature limit */
30#define SMB347_TEMP_USE_DEFAULT -273
31
32/*
33 * Charging enable can be controlled by software (via i2c) by
34 * smb347-charger driver or by EN pin (active low/high).
35 */
36enum smb347_chg_enable {
37 SMB347_CHG_ENABLE_SW,
38 SMB347_CHG_ENABLE_PIN_ACTIVE_LOW,
39 SMB347_CHG_ENABLE_PIN_ACTIVE_HIGH,
40};
41
42/**
43 * struct smb347_charger_platform_data - platform data for SMB347 charger
44 * @battery_info: Information about the battery
45 * @max_charge_current: maximum current (in uA) the battery can be charged
46 * @max_charge_voltage: maximum voltage (in uV) the battery can be charged
47 * @pre_charge_current: current (in uA) to use in pre-charging phase
48 * @termination_current: current (in uA) used to determine when the
49 * charging cycle terminates
50 * @pre_to_fast_voltage: voltage (in uV) treshold used for transitioning to
51 * pre-charge to fast charge mode
52 * @mains_current_limit: maximum input current drawn from AC/DC input (in uA)
53 * @usb_hc_current_limit: maximum input high current (in uA) drawn from USB
54 * input
55 * @chip_temp_threshold: die temperature where device starts limiting charge
56 * current [%100 - %130] (in degree C)
57 * @soft_cold_temp_limit: soft cold temperature limit [%0 - %15] (in degree C),
58 * granularity is 5 deg C.
59 * @soft_hot_temp_limit: soft hot temperature limit [%40 - %55] (in degree C),
60 * granularity is 5 deg C.
61 * @hard_cold_temp_limit: hard cold temperature limit [%-5 - %10] (in degree C),
62 * granularity is 5 deg C.
63 * @hard_hot_temp_limit: hard hot temperature limit [%50 - %65] (in degree C),
64 * granularity is 5 deg C.
65 * @suspend_on_hard_temp_limit: suspend charging when hard limit is hit
66 * @soft_temp_limit_compensation: compensation method when soft temperature
67 * limit is hit
68 * @charge_current_compensation: current (in uA) for charging compensation
69 * current when temperature hits soft limits
70 * @use_mains: AC/DC input can be used
71 * @use_usb: USB input can be used
72 * @use_usb_otg: USB OTG output can be used (not implemented yet)
73 * @irq_gpio: GPIO number used for interrupts (%-1 if not used)
74 * @enable_control: how charging enable/disable is controlled
75 * (driver/pin controls)
76 *
77 * @use_main, @use_usb, and @use_usb_otg are means to enable/disable
78 * hardware support for these. This is useful when we want to have for
79 * example OTG charging controlled via OTG transceiver driver and not by
80 * the SMB347 hardware.
81 *
82 * Hard and soft temperature limit values are given as described in the
83 * device data sheet and assuming NTC beta value is %3750. Even if this is
84 * not the case, these values should be used. They can be mapped to the
85 * corresponding NTC beta values with the help of table %2 in the data
86 * sheet. So for example if NTC beta is %3375 and we want to program hard
87 * hot limit to be %53 deg C, @hard_hot_temp_limit should be set to %50.
88 *
89 * If zero value is given in any of the current and voltage values, the
90 * factory programmed default will be used. For soft/hard temperature
91 * values, pass in %SMB347_TEMP_USE_DEFAULT instead.
92 */
93struct smb347_charger_platform_data {
94 struct power_supply_info battery_info;
95 unsigned int max_charge_current;
96 unsigned int max_charge_voltage;
97 unsigned int pre_charge_current;
98 unsigned int termination_current;
99 unsigned int pre_to_fast_voltage;
100 unsigned int mains_current_limit;
101 unsigned int usb_hc_current_limit;
102 unsigned int chip_temp_threshold;
103 int soft_cold_temp_limit;
104 int soft_hot_temp_limit;
105 int hard_cold_temp_limit;
106 int hard_hot_temp_limit;
107 bool suspend_on_hard_temp_limit;
108 unsigned int soft_temp_limit_compensation;
109 unsigned int charge_current_compensation;
110 bool use_mains;
111 bool use_usb;
112 bool use_usb_otg;
113 int irq_gpio;
114 enum smb347_chg_enable enable_control;
115};
116
117#endif /* SMB347_CHARGER_H */
diff --git a/include/linux/ring_buffer.h b/include/linux/ring_buffer.h
index 67be0376d8e3..7be2e88f23fd 100644
--- a/include/linux/ring_buffer.h
+++ b/include/linux/ring_buffer.h
@@ -151,6 +151,9 @@ int ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu);
151 151
152void ring_buffer_record_disable(struct ring_buffer *buffer); 152void ring_buffer_record_disable(struct ring_buffer *buffer);
153void ring_buffer_record_enable(struct ring_buffer *buffer); 153void ring_buffer_record_enable(struct ring_buffer *buffer);
154void ring_buffer_record_off(struct ring_buffer *buffer);
155void ring_buffer_record_on(struct ring_buffer *buffer);
156int ring_buffer_record_is_on(struct ring_buffer *buffer);
154void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu); 157void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu);
155void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu); 158void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu);
156 159
diff --git a/include/linux/rtc.h b/include/linux/rtc.h
index 93f4d035076b..fcabfb4873c8 100644
--- a/include/linux/rtc.h
+++ b/include/linux/rtc.h
@@ -202,7 +202,8 @@ struct rtc_device
202 struct hrtimer pie_timer; /* sub second exp, so needs hrtimer */ 202 struct hrtimer pie_timer; /* sub second exp, so needs hrtimer */
203 int pie_enabled; 203 int pie_enabled;
204 struct work_struct irqwork; 204 struct work_struct irqwork;
205 205 /* Some hardware can't support UIE mode */
206 int uie_unsupported;
206 207
207#ifdef CONFIG_RTC_INTF_DEV_UIE_EMUL 208#ifdef CONFIG_RTC_INTF_DEV_UIE_EMUL
208 struct work_struct uie_task; 209 struct work_struct uie_task;
diff --git a/include/linux/sh_intc.h b/include/linux/sh_intc.h
index b160645f5599..6aed0805927f 100644
--- a/include/linux/sh_intc.h
+++ b/include/linux/sh_intc.h
@@ -3,6 +3,23 @@
3 3
4#include <linux/ioport.h> 4#include <linux/ioport.h>
5 5
6#ifdef CONFIG_SUPERH
7#define INTC_NR_IRQS 512
8#else
9#define INTC_NR_IRQS 1024
10#endif
11
12/*
13 * Convert back and forth between INTEVT and IRQ values.
14 */
15#ifdef CONFIG_CPU_HAS_INTEVT
16#define evt2irq(evt) (((evt) >> 5) - 16)
17#define irq2evt(irq) (((irq) + 16) << 5)
18#else
19#define evt2irq(evt) (evt)
20#define irq2evt(irq) (irq)
21#endif
22
6typedef unsigned char intc_enum; 23typedef unsigned char intc_enum;
7 24
8struct intc_vect { 25struct intc_vect {
diff --git a/include/linux/spinlock_api_smp.h b/include/linux/spinlock_api_smp.h
index e253ccd7a604..51df117abe46 100644
--- a/include/linux/spinlock_api_smp.h
+++ b/include/linux/spinlock_api_smp.h
@@ -67,7 +67,7 @@ _raw_spin_unlock_irqrestore(raw_spinlock_t *lock, unsigned long flags)
67#define _raw_spin_trylock_bh(lock) __raw_spin_trylock_bh(lock) 67#define _raw_spin_trylock_bh(lock) __raw_spin_trylock_bh(lock)
68#endif 68#endif
69 69
70#ifdef CONFIG_INLINE_SPIN_UNLOCK 70#ifndef CONFIG_UNINLINE_SPIN_UNLOCK
71#define _raw_spin_unlock(lock) __raw_spin_unlock(lock) 71#define _raw_spin_unlock(lock) __raw_spin_unlock(lock)
72#endif 72#endif
73 73
diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h
index c14fe86dac59..0b8e3e6bdacf 100644
--- a/include/linux/sunrpc/svc_rdma.h
+++ b/include/linux/sunrpc/svc_rdma.h
@@ -190,7 +190,7 @@ extern int svc_rdma_xdr_encode_error(struct svcxprt_rdma *,
190extern void svc_rdma_xdr_encode_write_list(struct rpcrdma_msg *, int); 190extern void svc_rdma_xdr_encode_write_list(struct rpcrdma_msg *, int);
191extern void svc_rdma_xdr_encode_reply_array(struct rpcrdma_write_array *, int); 191extern void svc_rdma_xdr_encode_reply_array(struct rpcrdma_write_array *, int);
192extern void svc_rdma_xdr_encode_array_chunk(struct rpcrdma_write_array *, int, 192extern void svc_rdma_xdr_encode_array_chunk(struct rpcrdma_write_array *, int,
193 u32, u64, u32); 193 __be32, __be64, u32);
194extern void svc_rdma_xdr_encode_reply_header(struct svcxprt_rdma *, 194extern void svc_rdma_xdr_encode_reply_header(struct svcxprt_rdma *,
195 struct rpcrdma_msg *, 195 struct rpcrdma_msg *,
196 struct rpcrdma_msg *, 196 struct rpcrdma_msg *,
@@ -292,7 +292,7 @@ svc_rdma_get_reply_array(struct rpcrdma_msg *rmsgp)
292 if (wr_ary) { 292 if (wr_ary) {
293 rp_ary = (struct rpcrdma_write_array *) 293 rp_ary = (struct rpcrdma_write_array *)
294 &wr_ary-> 294 &wr_ary->
295 wc_array[wr_ary->wc_nchunks].wc_target.rs_length; 295 wc_array[ntohl(wr_ary->wc_nchunks)].wc_target.rs_length;
296 296
297 goto found_it; 297 goto found_it;
298 } 298 }
diff --git a/include/linux/sysinfo.h b/include/linux/sysinfo.h
new file mode 100644
index 000000000000..934335a22522
--- /dev/null
+++ b/include/linux/sysinfo.h
@@ -0,0 +1,24 @@
1#ifndef _LINUX_SYSINFO_H
2#define _LINUX_SYSINFO_H
3
4#include <linux/types.h>
5
6#define SI_LOAD_SHIFT 16
7struct sysinfo {
8 __kernel_long_t uptime; /* Seconds since boot */
9 __kernel_ulong_t loads[3]; /* 1, 5, and 15 minute load averages */
10 __kernel_ulong_t totalram; /* Total usable main memory size */
11 __kernel_ulong_t freeram; /* Available memory size */
12 __kernel_ulong_t sharedram; /* Amount of shared memory */
13 __kernel_ulong_t bufferram; /* Memory used by buffers */
14 __kernel_ulong_t totalswap; /* Total swap space size */
15 __kernel_ulong_t freeswap; /* swap space still available */
16 __u16 procs; /* Number of current processes */
17 __u16 pad; /* Explicit padding for m68k */
18 __kernel_ulong_t totalhigh; /* Total high memory size */
19 __kernel_ulong_t freehigh; /* Available high memory size */
20 __u32 mem_unit; /* Memory unit size in bytes */
21 char _f[20-2*sizeof(__kernel_ulong_t)-sizeof(__u32)]; /* Padding: libc5 uses this.. */
22};
23
24#endif /* _LINUX_SYSINFO_H */
diff --git a/include/linux/tboot.h b/include/linux/tboot.h
index 1dba6ee55203..c75128bed5fa 100644
--- a/include/linux/tboot.h
+++ b/include/linux/tboot.h
@@ -143,7 +143,6 @@ static inline int tboot_enabled(void)
143 143
144extern void tboot_probe(void); 144extern void tboot_probe(void);
145extern void tboot_shutdown(u32 shutdown_type); 145extern void tboot_shutdown(u32 shutdown_type);
146extern void tboot_sleep(u8 sleep_state, u32 pm1a_control, u32 pm1b_control);
147extern struct acpi_table_header *tboot_get_dmar_table( 146extern struct acpi_table_header *tboot_get_dmar_table(
148 struct acpi_table_header *dmar_tbl); 147 struct acpi_table_header *dmar_tbl);
149extern int tboot_force_iommu(void); 148extern int tboot_force_iommu(void);
diff --git a/include/linux/time.h b/include/linux/time.h
index b3061782dec3..33a92ead4d88 100644
--- a/include/linux/time.h
+++ b/include/linux/time.h
@@ -116,7 +116,6 @@ static inline struct timespec timespec_sub(struct timespec lhs,
116extern void read_persistent_clock(struct timespec *ts); 116extern void read_persistent_clock(struct timespec *ts);
117extern void read_boot_clock(struct timespec *ts); 117extern void read_boot_clock(struct timespec *ts);
118extern int update_persistent_clock(struct timespec now); 118extern int update_persistent_clock(struct timespec now);
119extern int no_sync_cmos_clock __read_mostly;
120void timekeeping_init(void); 119void timekeeping_init(void);
121extern int timekeeping_suspended; 120extern int timekeeping_suspended;
122 121
@@ -256,6 +255,7 @@ static __always_inline void timespec_add_ns(struct timespec *a, u64 ns)
256 a->tv_sec += __iter_div_u64_rem(a->tv_nsec + ns, NSEC_PER_SEC, &ns); 255 a->tv_sec += __iter_div_u64_rem(a->tv_nsec + ns, NSEC_PER_SEC, &ns);
257 a->tv_nsec = ns; 256 a->tv_nsec = ns;
258} 257}
258
259#endif /* __KERNEL__ */ 259#endif /* __KERNEL__ */
260 260
261#define NFDBITS __NFDBITS 261#define NFDBITS __NFDBITS
diff --git a/include/linux/timex.h b/include/linux/timex.h
index b75e1864ed19..99bc88b1fc02 100644
--- a/include/linux/timex.h
+++ b/include/linux/timex.h
@@ -252,7 +252,7 @@ extern void ntp_clear(void);
252/* Returns how long ticks are at present, in ns / 2^NTP_SCALE_SHIFT. */ 252/* Returns how long ticks are at present, in ns / 2^NTP_SCALE_SHIFT. */
253extern u64 ntp_tick_length(void); 253extern u64 ntp_tick_length(void);
254 254
255extern void second_overflow(void); 255extern int second_overflow(unsigned long secs);
256extern int do_adjtimex(struct timex *); 256extern int do_adjtimex(struct timex *);
257extern void hardpps(const struct timespec *, const struct timespec *); 257extern void hardpps(const struct timespec *, const struct timespec *);
258 258
diff --git a/include/linux/virtio.h b/include/linux/virtio.h
index d0018d27c281..8efd28ae5597 100644
--- a/include/linux/virtio.h
+++ b/include/linux/virtio.h
@@ -96,7 +96,6 @@ struct virtio_driver {
96 void (*config_changed)(struct virtio_device *dev); 96 void (*config_changed)(struct virtio_device *dev);
97#ifdef CONFIG_PM 97#ifdef CONFIG_PM
98 int (*freeze)(struct virtio_device *dev); 98 int (*freeze)(struct virtio_device *dev);
99 int (*thaw)(struct virtio_device *dev);
100 int (*restore)(struct virtio_device *dev); 99 int (*restore)(struct virtio_device *dev);
101#endif 100#endif
102}; 101};
diff --git a/include/scsi/iscsi_if.h b/include/scsi/iscsi_if.h
index 9c23ee8fd2d3..917741bb8e11 100644
--- a/include/scsi/iscsi_if.h
+++ b/include/scsi/iscsi_if.h
@@ -261,7 +261,8 @@ struct iscsi_uevent {
261 } host_event; 261 } host_event;
262 struct msg_ping_comp { 262 struct msg_ping_comp {
263 uint32_t host_no; 263 uint32_t host_no;
264 uint32_t status; 264 uint32_t status; /* enum
265 * iscsi_ping_status_code */
265 uint32_t pid; /* unique ping id associated 266 uint32_t pid; /* unique ping id associated
266 with each ping request */ 267 with each ping request */
267 uint32_t data_size; 268 uint32_t data_size;
@@ -483,6 +484,20 @@ enum iscsi_port_state {
483 ISCSI_PORT_STATE_UP = 0x2, 484 ISCSI_PORT_STATE_UP = 0x2,
484}; 485};
485 486
487/* iSCSI PING status/error code */
488enum iscsi_ping_status_code {
489 ISCSI_PING_SUCCESS = 0,
490 ISCSI_PING_FW_DISABLED = 0x1,
491 ISCSI_PING_IPADDR_INVALID = 0x2,
492 ISCSI_PING_LINKLOCAL_IPV6_ADDR_INVALID = 0x3,
493 ISCSI_PING_TIMEOUT = 0x4,
494 ISCSI_PING_INVALID_DEST_ADDR = 0x5,
495 ISCSI_PING_OVERSIZE_PACKET = 0x6,
496 ISCSI_PING_ICMP_ERROR = 0x7,
497 ISCSI_PING_MAX_REQ_EXCEEDED = 0x8,
498 ISCSI_PING_NO_ARP_RECEIVED = 0x9,
499};
500
486#define iscsi_ptr(_handle) ((void*)(unsigned long)_handle) 501#define iscsi_ptr(_handle) ((void*)(unsigned long)_handle)
487#define iscsi_handle(_ptr) ((uint64_t)(unsigned long)_ptr) 502#define iscsi_handle(_ptr) ((uint64_t)(unsigned long)_ptr)
488 503
@@ -578,6 +593,6 @@ struct iscsi_chap_rec {
578 char username[ISCSI_CHAP_AUTH_NAME_MAX_LEN]; 593 char username[ISCSI_CHAP_AUTH_NAME_MAX_LEN];
579 uint8_t password[ISCSI_CHAP_AUTH_SECRET_MAX_LEN]; 594 uint8_t password[ISCSI_CHAP_AUTH_SECRET_MAX_LEN];
580 uint8_t password_length; 595 uint8_t password_length;
581} __packed; 596};
582 597
583#endif 598#endif
diff --git a/include/scsi/libfcoe.h b/include/scsi/libfcoe.h
index 5a35a2a2d3c5..cfdb55f0937e 100644
--- a/include/scsi/libfcoe.h
+++ b/include/scsi/libfcoe.h
@@ -165,7 +165,8 @@ struct fcoe_ctlr {
165 * @switch_name: WWN of switch from advertisement 165 * @switch_name: WWN of switch from advertisement
166 * @fabric_name: WWN of fabric from advertisement 166 * @fabric_name: WWN of fabric from advertisement
167 * @fc_map: FC_MAP value from advertisement 167 * @fc_map: FC_MAP value from advertisement
168 * @fcf_mac: Ethernet address of the FCF 168 * @fcf_mac: Ethernet address of the FCF for FIP traffic
169 * @fcoe_mac: Ethernet address of the FCF for FCoE traffic
169 * @vfid: virtual fabric ID 170 * @vfid: virtual fabric ID
170 * @pri: selection priority, smaller values are better 171 * @pri: selection priority, smaller values are better
171 * @flogi_sent: current FLOGI sent to this FCF 172 * @flogi_sent: current FLOGI sent to this FCF
@@ -188,6 +189,7 @@ struct fcoe_fcf {
188 u32 fc_map; 189 u32 fc_map;
189 u16 vfid; 190 u16 vfid;
190 u8 fcf_mac[ETH_ALEN]; 191 u8 fcf_mac[ETH_ALEN];
192 u8 fcoe_mac[ETH_ALEN];
191 193
192 u8 pri; 194 u8 pri;
193 u8 flogi_sent; 195 u8 flogi_sent;
diff --git a/include/trace/events/btrfs.h b/include/trace/events/btrfs.h
index 84f3001a568d..91b91e805673 100644
--- a/include/trace/events/btrfs.h
+++ b/include/trace/events/btrfs.h
@@ -6,6 +6,7 @@
6 6
7#include <linux/writeback.h> 7#include <linux/writeback.h>
8#include <linux/tracepoint.h> 8#include <linux/tracepoint.h>
9#include <trace/events/gfpflags.h>
9 10
10struct btrfs_root; 11struct btrfs_root;
11struct btrfs_fs_info; 12struct btrfs_fs_info;
@@ -862,6 +863,49 @@ TRACE_EVENT(btrfs_setup_cluster,
862 __entry->size, __entry->max_size, __entry->bitmap) 863 __entry->size, __entry->max_size, __entry->bitmap)
863); 864);
864 865
866struct extent_state;
867TRACE_EVENT(alloc_extent_state,
868
869 TP_PROTO(struct extent_state *state, gfp_t mask, unsigned long IP),
870
871 TP_ARGS(state, mask, IP),
872
873 TP_STRUCT__entry(
874 __field(struct extent_state *, state)
875 __field(gfp_t, mask)
876 __field(unsigned long, ip)
877 ),
878
879 TP_fast_assign(
880 __entry->state = state,
881 __entry->mask = mask,
882 __entry->ip = IP
883 ),
884
885 TP_printk("state=%p; mask = %s; caller = %pF", __entry->state,
886 show_gfp_flags(__entry->mask), (void *)__entry->ip)
887);
888
889TRACE_EVENT(free_extent_state,
890
891 TP_PROTO(struct extent_state *state, unsigned long IP),
892
893 TP_ARGS(state, IP),
894
895 TP_STRUCT__entry(
896 __field(struct extent_state *, state)
897 __field(unsigned long, ip)
898 ),
899
900 TP_fast_assign(
901 __entry->state = state,
902 __entry->ip = IP
903 ),
904
905 TP_printk(" state=%p; caller = %pF", __entry->state,
906 (void *)__entry->ip)
907);
908
865#endif /* _TRACE_BTRFS_H */ 909#endif /* _TRACE_BTRFS_H */
866 910
867/* This part must be outside protection */ 911/* This part must be outside protection */
diff --git a/init/do_mounts_initrd.c b/init/do_mounts_initrd.c
index 3098a38f3ae1..9047330c73e9 100644
--- a/init/do_mounts_initrd.c
+++ b/init/do_mounts_initrd.c
@@ -2,7 +2,6 @@
2#include <linux/kernel.h> 2#include <linux/kernel.h>
3#include <linux/fs.h> 3#include <linux/fs.h>
4#include <linux/minix_fs.h> 4#include <linux/minix_fs.h>
5#include <linux/ext2_fs.h>
6#include <linux/romfs_fs.h> 5#include <linux/romfs_fs.h>
7#include <linux/initrd.h> 6#include <linux/initrd.h>
8#include <linux/sched.h> 7#include <linux/sched.h>
diff --git a/init/do_mounts_rd.c b/init/do_mounts_rd.c
index 01f1306aa26e..6212586df29a 100644
--- a/init/do_mounts_rd.c
+++ b/init/do_mounts_rd.c
@@ -54,20 +54,19 @@ identify_ramdisk_image(int fd, int start_block, decompress_fn *decompressor)
54{ 54{
55 const int size = 512; 55 const int size = 512;
56 struct minix_super_block *minixsb; 56 struct minix_super_block *minixsb;
57 struct ext2_super_block *ext2sb;
58 struct romfs_super_block *romfsb; 57 struct romfs_super_block *romfsb;
59 struct cramfs_super *cramfsb; 58 struct cramfs_super *cramfsb;
60 struct squashfs_super_block *squashfsb; 59 struct squashfs_super_block *squashfsb;
61 int nblocks = -1; 60 int nblocks = -1;
62 unsigned char *buf; 61 unsigned char *buf;
63 const char *compress_name; 62 const char *compress_name;
63 unsigned long n;
64 64
65 buf = kmalloc(size, GFP_KERNEL); 65 buf = kmalloc(size, GFP_KERNEL);
66 if (!buf) 66 if (!buf)
67 return -ENOMEM; 67 return -ENOMEM;
68 68
69 minixsb = (struct minix_super_block *) buf; 69 minixsb = (struct minix_super_block *) buf;
70 ext2sb = (struct ext2_super_block *) buf;
71 romfsb = (struct romfs_super_block *) buf; 70 romfsb = (struct romfs_super_block *) buf;
72 cramfsb = (struct cramfs_super *) buf; 71 cramfsb = (struct cramfs_super *) buf;
73 squashfsb = (struct squashfs_super_block *) buf; 72 squashfsb = (struct squashfs_super_block *) buf;
@@ -150,12 +149,12 @@ identify_ramdisk_image(int fd, int start_block, decompress_fn *decompressor)
150 } 149 }
151 150
152 /* Try ext2 */ 151 /* Try ext2 */
153 if (ext2sb->s_magic == cpu_to_le16(EXT2_SUPER_MAGIC)) { 152 n = ext2_image_size(buf);
153 if (n) {
154 printk(KERN_NOTICE 154 printk(KERN_NOTICE
155 "RAMDISK: ext2 filesystem found at block %d\n", 155 "RAMDISK: ext2 filesystem found at block %d\n",
156 start_block); 156 start_block);
157 nblocks = le32_to_cpu(ext2sb->s_blocks_count) << 157 nblocks = n;
158 le32_to_cpu(ext2sb->s_log_block_size);
159 goto done; 158 goto done;
160 } 159 }
161 160
diff --git a/ipc/compat.c b/ipc/compat.c
index 845a28738d3a..a6df704f521e 100644
--- a/ipc/compat.c
+++ b/ipc/compat.c
@@ -27,6 +27,7 @@
27#include <linux/msg.h> 27#include <linux/msg.h>
28#include <linux/shm.h> 28#include <linux/shm.h>
29#include <linux/syscalls.h> 29#include <linux/syscalls.h>
30#include <linux/ptrace.h>
30 31
31#include <linux/mutex.h> 32#include <linux/mutex.h>
32#include <asm/uaccess.h> 33#include <asm/uaccess.h>
@@ -117,6 +118,7 @@ extern int sem_ctls[];
117 118
118static inline int compat_ipc_parse_version(int *cmd) 119static inline int compat_ipc_parse_version(int *cmd)
119{ 120{
121#ifdef CONFIG_ARCH_WANT_OLD_COMPAT_IPC
120 int version = *cmd & IPC_64; 122 int version = *cmd & IPC_64;
121 123
122 /* this is tricky: architectures that have support for the old 124 /* this is tricky: architectures that have support for the old
@@ -128,6 +130,10 @@ static inline int compat_ipc_parse_version(int *cmd)
128 *cmd &= ~IPC_64; 130 *cmd &= ~IPC_64;
129#endif 131#endif
130 return version; 132 return version;
133#else
134 /* With the asm-generic APIs, we always use the 64-bit versions. */
135 return IPC_64;
136#endif
131} 137}
132 138
133static inline int __get_compat_ipc64_perm(struct ipc64_perm *p64, 139static inline int __get_compat_ipc64_perm(struct ipc64_perm *p64,
@@ -232,10 +238,9 @@ static inline int put_compat_semid_ds(struct semid64_ds *s,
232 return err; 238 return err;
233} 239}
234 240
235long compat_sys_semctl(int first, int second, int third, void __user *uptr) 241static long do_compat_semctl(int first, int second, int third, u32 pad)
236{ 242{
237 union semun fourth; 243 union semun fourth;
238 u32 pad;
239 int err, err2; 244 int err, err2;
240 struct semid64_ds s64; 245 struct semid64_ds s64;
241 struct semid64_ds __user *up64; 246 struct semid64_ds __user *up64;
@@ -243,10 +248,6 @@ long compat_sys_semctl(int first, int second, int third, void __user *uptr)
243 248
244 memset(&s64, 0, sizeof(s64)); 249 memset(&s64, 0, sizeof(s64));
245 250
246 if (!uptr)
247 return -EINVAL;
248 if (get_user(pad, (u32 __user *) uptr))
249 return -EFAULT;
250 if ((third & (~IPC_64)) == SETVAL) 251 if ((third & (~IPC_64)) == SETVAL)
251 fourth.val = (int) pad; 252 fourth.val = (int) pad;
252 else 253 else
@@ -305,6 +306,18 @@ long compat_sys_semctl(int first, int second, int third, void __user *uptr)
305 return err; 306 return err;
306} 307}
307 308
309#ifdef CONFIG_ARCH_WANT_OLD_COMPAT_IPC
310long compat_sys_semctl(int first, int second, int third, void __user *uptr)
311{
312 u32 pad;
313
314 if (!uptr)
315 return -EINVAL;
316 if (get_user(pad, (u32 __user *) uptr))
317 return -EFAULT;
318 return do_compat_semctl(first, second, third, pad);
319}
320
308long compat_sys_msgsnd(int first, int second, int third, void __user *uptr) 321long compat_sys_msgsnd(int first, int second, int third, void __user *uptr)
309{ 322{
310 struct compat_msgbuf __user *up = uptr; 323 struct compat_msgbuf __user *up = uptr;
@@ -353,6 +366,37 @@ long compat_sys_msgrcv(int first, int second, int msgtyp, int third,
353out: 366out:
354 return err; 367 return err;
355} 368}
369#else
370long compat_sys_semctl(int semid, int semnum, int cmd, int arg)
371{
372 return do_compat_semctl(semid, semnum, cmd, arg);
373}
374
375long compat_sys_msgsnd(int msqid, struct compat_msgbuf __user *msgp,
376 size_t msgsz, int msgflg)
377{
378 compat_long_t mtype;
379
380 if (get_user(mtype, &msgp->mtype))
381 return -EFAULT;
382 return do_msgsnd(msqid, mtype, msgp->mtext, msgsz, msgflg);
383}
384
385long compat_sys_msgrcv(int msqid, struct compat_msgbuf __user *msgp,
386 size_t msgsz, long msgtyp, int msgflg)
387{
388 long err, mtype;
389
390 err = do_msgrcv(msqid, &mtype, msgp->mtext, msgsz, msgtyp, msgflg);
391 if (err < 0)
392 goto out;
393
394 if (put_user(mtype, &msgp->mtype))
395 err = -EFAULT;
396 out:
397 return err;
398}
399#endif
356 400
357static inline int get_compat_msqid64(struct msqid64_ds *m64, 401static inline int get_compat_msqid64(struct msqid64_ds *m64,
358 struct compat_msqid64_ds __user *up64) 402 struct compat_msqid64_ds __user *up64)
@@ -470,6 +514,7 @@ long compat_sys_msgctl(int first, int second, void __user *uptr)
470 return err; 514 return err;
471} 515}
472 516
517#ifdef CONFIG_ARCH_WANT_OLD_COMPAT_IPC
473long compat_sys_shmat(int first, int second, compat_uptr_t third, int version, 518long compat_sys_shmat(int first, int second, compat_uptr_t third, int version,
474 void __user *uptr) 519 void __user *uptr)
475{ 520{
@@ -485,6 +530,19 @@ long compat_sys_shmat(int first, int second, compat_uptr_t third, int version,
485 uaddr = compat_ptr(third); 530 uaddr = compat_ptr(third);
486 return put_user(raddr, uaddr); 531 return put_user(raddr, uaddr);
487} 532}
533#else
534long compat_sys_shmat(int shmid, compat_uptr_t shmaddr, int shmflg)
535{
536 unsigned long ret;
537 long err;
538
539 err = do_shmat(shmid, compat_ptr(shmaddr), shmflg, &ret);
540 if (err)
541 return err;
542 force_successful_syscall_return();
543 return (long)ret;
544}
545#endif
488 546
489static inline int get_compat_shmid64_ds(struct shmid64_ds *s64, 547static inline int get_compat_shmid64_ds(struct shmid64_ds *s64,
490 struct compat_shmid64_ds __user *up64) 548 struct compat_shmid64_ds __user *up64)
diff --git a/kernel/Kconfig.locks b/kernel/Kconfig.locks
index 5068e2a4e75f..2251882daf53 100644
--- a/kernel/Kconfig.locks
+++ b/kernel/Kconfig.locks
@@ -124,8 +124,8 @@ config INLINE_SPIN_LOCK_IRQSAVE
124 def_bool !DEBUG_SPINLOCK && !GENERIC_LOCKBREAK && \ 124 def_bool !DEBUG_SPINLOCK && !GENERIC_LOCKBREAK && \
125 ARCH_INLINE_SPIN_LOCK_IRQSAVE 125 ARCH_INLINE_SPIN_LOCK_IRQSAVE
126 126
127config INLINE_SPIN_UNLOCK 127config UNINLINE_SPIN_UNLOCK
128 def_bool !DEBUG_SPINLOCK && (!PREEMPT || ARCH_INLINE_SPIN_UNLOCK) 128 bool
129 129
130config INLINE_SPIN_UNLOCK_BH 130config INLINE_SPIN_UNLOCK_BH
131 def_bool !DEBUG_SPINLOCK && ARCH_INLINE_SPIN_UNLOCK_BH 131 def_bool !DEBUG_SPINLOCK && ARCH_INLINE_SPIN_UNLOCK_BH
diff --git a/kernel/Kconfig.preempt b/kernel/Kconfig.preempt
index 24e7cb0ba26a..3f9c97419f02 100644
--- a/kernel/Kconfig.preempt
+++ b/kernel/Kconfig.preempt
@@ -36,6 +36,7 @@ config PREEMPT_VOLUNTARY
36config PREEMPT 36config PREEMPT
37 bool "Preemptible Kernel (Low-Latency Desktop)" 37 bool "Preemptible Kernel (Low-Latency Desktop)"
38 select PREEMPT_COUNT 38 select PREEMPT_COUNT
39 select UNINLINE_SPIN_UNLOCK if !ARCH_INLINE_SPIN_UNLOCK
39 help 40 help
40 This option reduces the latency of the kernel by making 41 This option reduces the latency of the kernel by making
41 all kernel code (that is not executing in a critical section) 42 all kernel code (that is not executing in a critical section)
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index f4ea4b6f3cf1..ed64ccac67c9 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -1883,7 +1883,7 @@ static void cgroup_task_migrate(struct cgroup *cgrp, struct cgroup *oldcgrp,
1883 */ 1883 */
1884int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk) 1884int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk)
1885{ 1885{
1886 int retval; 1886 int retval = 0;
1887 struct cgroup_subsys *ss, *failed_ss = NULL; 1887 struct cgroup_subsys *ss, *failed_ss = NULL;
1888 struct cgroup *oldcgrp; 1888 struct cgroup *oldcgrp;
1889 struct cgroupfs_root *root = cgrp->root; 1889 struct cgroupfs_root *root = cgrp->root;
diff --git a/kernel/compat.c b/kernel/compat.c
index f346cedfe24d..74ff8498809a 100644
--- a/kernel/compat.c
+++ b/kernel/compat.c
@@ -31,11 +31,10 @@
31#include <asm/uaccess.h> 31#include <asm/uaccess.h>
32 32
33/* 33/*
34 * Note that the native side is already converted to a timespec, because 34 * Get/set struct timeval with struct timespec on the native side
35 * that's what we want anyway.
36 */ 35 */
37static int compat_get_timeval(struct timespec *o, 36static int compat_get_timeval_convert(struct timespec *o,
38 struct compat_timeval __user *i) 37 struct compat_timeval __user *i)
39{ 38{
40 long usec; 39 long usec;
41 40
@@ -46,8 +45,8 @@ static int compat_get_timeval(struct timespec *o,
46 return 0; 45 return 0;
47} 46}
48 47
49static int compat_put_timeval(struct compat_timeval __user *o, 48static int compat_put_timeval_convert(struct compat_timeval __user *o,
50 struct timeval *i) 49 struct timeval *i)
51{ 50{
52 return (put_user(i->tv_sec, &o->tv_sec) || 51 return (put_user(i->tv_sec, &o->tv_sec) ||
53 put_user(i->tv_usec, &o->tv_usec)) ? -EFAULT : 0; 52 put_user(i->tv_usec, &o->tv_usec)) ? -EFAULT : 0;
@@ -117,7 +116,7 @@ asmlinkage long compat_sys_gettimeofday(struct compat_timeval __user *tv,
117 if (tv) { 116 if (tv) {
118 struct timeval ktv; 117 struct timeval ktv;
119 do_gettimeofday(&ktv); 118 do_gettimeofday(&ktv);
120 if (compat_put_timeval(tv, &ktv)) 119 if (compat_put_timeval_convert(tv, &ktv))
121 return -EFAULT; 120 return -EFAULT;
122 } 121 }
123 if (tz) { 122 if (tz) {
@@ -135,7 +134,7 @@ asmlinkage long compat_sys_settimeofday(struct compat_timeval __user *tv,
135 struct timezone ktz; 134 struct timezone ktz;
136 135
137 if (tv) { 136 if (tv) {
138 if (compat_get_timeval(&kts, tv)) 137 if (compat_get_timeval_convert(&kts, tv))
139 return -EFAULT; 138 return -EFAULT;
140 } 139 }
141 if (tz) { 140 if (tz) {
@@ -146,12 +145,29 @@ asmlinkage long compat_sys_settimeofday(struct compat_timeval __user *tv,
146 return do_sys_settimeofday(tv ? &kts : NULL, tz ? &ktz : NULL); 145 return do_sys_settimeofday(tv ? &kts : NULL, tz ? &ktz : NULL);
147} 146}
148 147
148int get_compat_timeval(struct timeval *tv, const struct compat_timeval __user *ctv)
149{
150 return (!access_ok(VERIFY_READ, ctv, sizeof(*ctv)) ||
151 __get_user(tv->tv_sec, &ctv->tv_sec) ||
152 __get_user(tv->tv_usec, &ctv->tv_usec)) ? -EFAULT : 0;
153}
154EXPORT_SYMBOL_GPL(get_compat_timeval);
155
156int put_compat_timeval(const struct timeval *tv, struct compat_timeval __user *ctv)
157{
158 return (!access_ok(VERIFY_WRITE, ctv, sizeof(*ctv)) ||
159 __put_user(tv->tv_sec, &ctv->tv_sec) ||
160 __put_user(tv->tv_usec, &ctv->tv_usec)) ? -EFAULT : 0;
161}
162EXPORT_SYMBOL_GPL(put_compat_timeval);
163
149int get_compat_timespec(struct timespec *ts, const struct compat_timespec __user *cts) 164int get_compat_timespec(struct timespec *ts, const struct compat_timespec __user *cts)
150{ 165{
151 return (!access_ok(VERIFY_READ, cts, sizeof(*cts)) || 166 return (!access_ok(VERIFY_READ, cts, sizeof(*cts)) ||
152 __get_user(ts->tv_sec, &cts->tv_sec) || 167 __get_user(ts->tv_sec, &cts->tv_sec) ||
153 __get_user(ts->tv_nsec, &cts->tv_nsec)) ? -EFAULT : 0; 168 __get_user(ts->tv_nsec, &cts->tv_nsec)) ? -EFAULT : 0;
154} 169}
170EXPORT_SYMBOL_GPL(get_compat_timespec);
155 171
156int put_compat_timespec(const struct timespec *ts, struct compat_timespec __user *cts) 172int put_compat_timespec(const struct timespec *ts, struct compat_timespec __user *cts)
157{ 173{
@@ -161,6 +177,42 @@ int put_compat_timespec(const struct timespec *ts, struct compat_timespec __user
161} 177}
162EXPORT_SYMBOL_GPL(put_compat_timespec); 178EXPORT_SYMBOL_GPL(put_compat_timespec);
163 179
180int compat_get_timeval(struct timeval *tv, const void __user *utv)
181{
182 if (COMPAT_USE_64BIT_TIME)
183 return copy_from_user(tv, utv, sizeof *tv) ? -EFAULT : 0;
184 else
185 return get_compat_timeval(tv, utv);
186}
187EXPORT_SYMBOL_GPL(compat_get_timeval);
188
189int compat_put_timeval(const struct timeval *tv, void __user *utv)
190{
191 if (COMPAT_USE_64BIT_TIME)
192 return copy_to_user(utv, tv, sizeof *tv) ? -EFAULT : 0;
193 else
194 return put_compat_timeval(tv, utv);
195}
196EXPORT_SYMBOL_GPL(compat_put_timeval);
197
198int compat_get_timespec(struct timespec *ts, const void __user *uts)
199{
200 if (COMPAT_USE_64BIT_TIME)
201 return copy_from_user(ts, uts, sizeof *ts) ? -EFAULT : 0;
202 else
203 return get_compat_timespec(ts, uts);
204}
205EXPORT_SYMBOL_GPL(compat_get_timespec);
206
207int compat_put_timespec(const struct timespec *ts, void __user *uts)
208{
209 if (COMPAT_USE_64BIT_TIME)
210 return copy_to_user(uts, ts, sizeof *ts) ? -EFAULT : 0;
211 else
212 return put_compat_timespec(ts, uts);
213}
214EXPORT_SYMBOL_GPL(compat_put_timespec);
215
164static long compat_nanosleep_restart(struct restart_block *restart) 216static long compat_nanosleep_restart(struct restart_block *restart)
165{ 217{
166 struct compat_timespec __user *rmtp; 218 struct compat_timespec __user *rmtp;
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index eedeebe64b1a..14f7070b4ba2 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -2162,10 +2162,9 @@ void cpuset_cpus_allowed(struct task_struct *tsk, struct cpumask *pmask)
2162 mutex_unlock(&callback_mutex); 2162 mutex_unlock(&callback_mutex);
2163} 2163}
2164 2164
2165int cpuset_cpus_allowed_fallback(struct task_struct *tsk) 2165void cpuset_cpus_allowed_fallback(struct task_struct *tsk)
2166{ 2166{
2167 const struct cpuset *cs; 2167 const struct cpuset *cs;
2168 int cpu;
2169 2168
2170 rcu_read_lock(); 2169 rcu_read_lock();
2171 cs = task_cs(tsk); 2170 cs = task_cs(tsk);
@@ -2186,22 +2185,10 @@ int cpuset_cpus_allowed_fallback(struct task_struct *tsk)
2186 * changes in tsk_cs()->cpus_allowed. Otherwise we can temporary 2185 * changes in tsk_cs()->cpus_allowed. Otherwise we can temporary
2187 * set any mask even if it is not right from task_cs() pov, 2186 * set any mask even if it is not right from task_cs() pov,
2188 * the pending set_cpus_allowed_ptr() will fix things. 2187 * the pending set_cpus_allowed_ptr() will fix things.
2188 *
2189 * select_fallback_rq() will fix things ups and set cpu_possible_mask
2190 * if required.
2189 */ 2191 */
2190
2191 cpu = cpumask_any_and(&tsk->cpus_allowed, cpu_active_mask);
2192 if (cpu >= nr_cpu_ids) {
2193 /*
2194 * Either tsk->cpus_allowed is wrong (see above) or it
2195 * is actually empty. The latter case is only possible
2196 * if we are racing with remove_tasks_in_empty_cpuset().
2197 * Like above we can temporary set any mask and rely on
2198 * set_cpus_allowed_ptr() as synchronization point.
2199 */
2200 do_set_cpus_allowed(tsk, cpu_possible_mask);
2201 cpu = cpumask_any(cpu_active_mask);
2202 }
2203
2204 return cpu;
2205} 2192}
2206 2193
2207void cpuset_init_current_mems_allowed(void) 2194void cpuset_init_current_mems_allowed(void)
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 4b50357914fb..a6a9ec4cd8f5 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -3348,7 +3348,7 @@ static void calc_timer_values(struct perf_event *event,
3348 *running = ctx_time - event->tstamp_running; 3348 *running = ctx_time - event->tstamp_running;
3349} 3349}
3350 3350
3351void __weak perf_update_user_clock(struct perf_event_mmap_page *userpg, u64 now) 3351void __weak arch_perf_update_userpage(struct perf_event_mmap_page *userpg, u64 now)
3352{ 3352{
3353} 3353}
3354 3354
@@ -3398,7 +3398,7 @@ void perf_event_update_userpage(struct perf_event *event)
3398 userpg->time_running = running + 3398 userpg->time_running = running +
3399 atomic64_read(&event->child_total_time_running); 3399 atomic64_read(&event->child_total_time_running);
3400 3400
3401 perf_update_user_clock(userpg, now); 3401 arch_perf_update_userpage(userpg, now);
3402 3402
3403 barrier(); 3403 barrier();
3404 ++userpg->lock; 3404 ++userpg->lock;
@@ -7116,6 +7116,13 @@ void __init perf_event_init(void)
7116 7116
7117 /* do not patch jump label more than once per second */ 7117 /* do not patch jump label more than once per second */
7118 jump_label_rate_limit(&perf_sched_events, HZ); 7118 jump_label_rate_limit(&perf_sched_events, HZ);
7119
7120 /*
7121 * Build time assertion that we keep the data_head at the intended
7122 * location. IOW, validation we got the __reserved[] size right.
7123 */
7124 BUILD_BUG_ON((offsetof(struct perf_event_mmap_page, data_head))
7125 != 1024);
7119} 7126}
7120 7127
7121static int __init perf_event_sysfs_init(void) 7128static int __init perf_event_sysfs_init(void)
diff --git a/kernel/exit.c b/kernel/exit.c
index 3db1909faed9..d8bd3b425fa7 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -474,7 +474,7 @@ static void close_files(struct files_struct * files)
474 i = j * __NFDBITS; 474 i = j * __NFDBITS;
475 if (i >= fdt->max_fds) 475 if (i >= fdt->max_fds)
476 break; 476 break;
477 set = fdt->open_fds->fds_bits[j++]; 477 set = fdt->open_fds[j++];
478 while (set) { 478 while (set) {
479 if (set & 1) { 479 if (set & 1) {
480 struct file * file = xchg(&fdt->fd[i], NULL); 480 struct file * file = xchg(&fdt->fd[i], NULL);
diff --git a/kernel/futex.c b/kernel/futex.c
index 72efa1e4359a..e2b0fb9a0b3b 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -59,6 +59,7 @@
59#include <linux/magic.h> 59#include <linux/magic.h>
60#include <linux/pid.h> 60#include <linux/pid.h>
61#include <linux/nsproxy.h> 61#include <linux/nsproxy.h>
62#include <linux/ptrace.h>
62 63
63#include <asm/futex.h> 64#include <asm/futex.h>
64 65
@@ -2443,40 +2444,31 @@ SYSCALL_DEFINE3(get_robust_list, int, pid,
2443{ 2444{
2444 struct robust_list_head __user *head; 2445 struct robust_list_head __user *head;
2445 unsigned long ret; 2446 unsigned long ret;
2446 const struct cred *cred = current_cred(), *pcred; 2447 struct task_struct *p;
2447 2448
2448 if (!futex_cmpxchg_enabled) 2449 if (!futex_cmpxchg_enabled)
2449 return -ENOSYS; 2450 return -ENOSYS;
2450 2451
2452 WARN_ONCE(1, "deprecated: get_robust_list will be deleted in 2013.\n");
2453
2454 rcu_read_lock();
2455
2456 ret = -ESRCH;
2451 if (!pid) 2457 if (!pid)
2452 head = current->robust_list; 2458 p = current;
2453 else { 2459 else {
2454 struct task_struct *p;
2455
2456 ret = -ESRCH;
2457 rcu_read_lock();
2458 p = find_task_by_vpid(pid); 2460 p = find_task_by_vpid(pid);
2459 if (!p) 2461 if (!p)
2460 goto err_unlock; 2462 goto err_unlock;
2461 ret = -EPERM;
2462 pcred = __task_cred(p);
2463 /* If victim is in different user_ns, then uids are not
2464 comparable, so we must have CAP_SYS_PTRACE */
2465 if (cred->user->user_ns != pcred->user->user_ns) {
2466 if (!ns_capable(pcred->user->user_ns, CAP_SYS_PTRACE))
2467 goto err_unlock;
2468 goto ok;
2469 }
2470 /* If victim is in same user_ns, then uids are comparable */
2471 if (cred->euid != pcred->euid &&
2472 cred->euid != pcred->uid &&
2473 !ns_capable(pcred->user->user_ns, CAP_SYS_PTRACE))
2474 goto err_unlock;
2475ok:
2476 head = p->robust_list;
2477 rcu_read_unlock();
2478 } 2463 }
2479 2464
2465 ret = -EPERM;
2466 if (!ptrace_may_access(p, PTRACE_MODE_READ))
2467 goto err_unlock;
2468
2469 head = p->robust_list;
2470 rcu_read_unlock();
2471
2480 if (put_user(sizeof(*head), len_ptr)) 2472 if (put_user(sizeof(*head), len_ptr))
2481 return -EFAULT; 2473 return -EFAULT;
2482 return put_user(head, head_ptr); 2474 return put_user(head, head_ptr);
diff --git a/kernel/futex_compat.c b/kernel/futex_compat.c
index 5f9e689dc8f0..83e368b005fc 100644
--- a/kernel/futex_compat.c
+++ b/kernel/futex_compat.c
@@ -10,6 +10,7 @@
10#include <linux/compat.h> 10#include <linux/compat.h>
11#include <linux/nsproxy.h> 11#include <linux/nsproxy.h>
12#include <linux/futex.h> 12#include <linux/futex.h>
13#include <linux/ptrace.h>
13 14
14#include <asm/uaccess.h> 15#include <asm/uaccess.h>
15 16
@@ -136,40 +137,31 @@ compat_sys_get_robust_list(int pid, compat_uptr_t __user *head_ptr,
136{ 137{
137 struct compat_robust_list_head __user *head; 138 struct compat_robust_list_head __user *head;
138 unsigned long ret; 139 unsigned long ret;
139 const struct cred *cred = current_cred(), *pcred; 140 struct task_struct *p;
140 141
141 if (!futex_cmpxchg_enabled) 142 if (!futex_cmpxchg_enabled)
142 return -ENOSYS; 143 return -ENOSYS;
143 144
145 WARN_ONCE(1, "deprecated: get_robust_list will be deleted in 2013.\n");
146
147 rcu_read_lock();
148
149 ret = -ESRCH;
144 if (!pid) 150 if (!pid)
145 head = current->compat_robust_list; 151 p = current;
146 else { 152 else {
147 struct task_struct *p;
148
149 ret = -ESRCH;
150 rcu_read_lock();
151 p = find_task_by_vpid(pid); 153 p = find_task_by_vpid(pid);
152 if (!p) 154 if (!p)
153 goto err_unlock; 155 goto err_unlock;
154 ret = -EPERM;
155 pcred = __task_cred(p);
156 /* If victim is in different user_ns, then uids are not
157 comparable, so we must have CAP_SYS_PTRACE */
158 if (cred->user->user_ns != pcred->user->user_ns) {
159 if (!ns_capable(pcred->user->user_ns, CAP_SYS_PTRACE))
160 goto err_unlock;
161 goto ok;
162 }
163 /* If victim is in same user_ns, then uids are comparable */
164 if (cred->euid != pcred->euid &&
165 cred->euid != pcred->uid &&
166 !ns_capable(pcred->user->user_ns, CAP_SYS_PTRACE))
167 goto err_unlock;
168ok:
169 head = p->compat_robust_list;
170 rcu_read_unlock();
171 } 156 }
172 157
158 ret = -EPERM;
159 if (!ptrace_may_access(p, PTRACE_MODE_READ))
160 goto err_unlock;
161
162 head = p->compat_robust_list;
163 rcu_read_unlock();
164
173 if (put_user(sizeof(*head), len_ptr)) 165 if (put_user(sizeof(*head), len_ptr))
174 return -EFAULT; 166 return -EFAULT;
175 return put_user(ptr_to_compat(head), head_ptr); 167 return put_user(ptr_to_compat(head), head_ptr);
diff --git a/kernel/irq/Kconfig b/kernel/irq/Kconfig
index 5a38bf4de641..cf1a4a68ce44 100644
--- a/kernel/irq/Kconfig
+++ b/kernel/irq/Kconfig
@@ -13,7 +13,7 @@ config GENERIC_HARDIRQS
13# Options selectable by the architecture code 13# Options selectable by the architecture code
14 14
15# Make sparse irq Kconfig switch below available 15# Make sparse irq Kconfig switch below available
16config HAVE_SPARSE_IRQ 16config MAY_HAVE_SPARSE_IRQ
17 bool 17 bool
18 18
19# Enable the generic irq autoprobe mechanism 19# Enable the generic irq autoprobe mechanism
@@ -56,13 +56,22 @@ config GENERIC_IRQ_CHIP
56config IRQ_DOMAIN 56config IRQ_DOMAIN
57 bool 57 bool
58 58
59config IRQ_DOMAIN_DEBUG
60 bool "Expose hardware/virtual IRQ mapping via debugfs"
61 depends on IRQ_DOMAIN && DEBUG_FS
62 help
63 This option will show the mapping relationship between hardware irq
64 numbers and Linux irq numbers. The mapping is exposed via debugfs
65 in the file "virq_mapping".
66
67 If you don't know what this means you don't need it.
68
59# Support forced irq threading 69# Support forced irq threading
60config IRQ_FORCED_THREADING 70config IRQ_FORCED_THREADING
61 bool 71 bool
62 72
63config SPARSE_IRQ 73config SPARSE_IRQ
64 bool "Support sparse irq numbering" 74 bool "Support sparse irq numbering" if MAY_HAVE_SPARSE_IRQ
65 depends on HAVE_SPARSE_IRQ
66 ---help--- 75 ---help---
67 76
68 Sparse irq numbering is useful for distro kernels that want 77 Sparse irq numbering is useful for distro kernels that want
diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c
index 6ff84e6a954c..bdb180325551 100644
--- a/kernel/irq/handle.c
+++ b/kernel/irq/handle.c
@@ -54,14 +54,18 @@ static void warn_no_thread(unsigned int irq, struct irqaction *action)
54static void irq_wake_thread(struct irq_desc *desc, struct irqaction *action) 54static void irq_wake_thread(struct irq_desc *desc, struct irqaction *action)
55{ 55{
56 /* 56 /*
57 * Wake up the handler thread for this action. In case the 57 * In case the thread crashed and was killed we just pretend that
58 * thread crashed and was killed we just pretend that we 58 * we handled the interrupt. The hardirq handler has disabled the
59 * handled the interrupt. The hardirq handler has disabled the 59 * device interrupt, so no irq storm is lurking.
60 * device interrupt, so no irq storm is lurking. If the 60 */
61 if (action->thread->flags & PF_EXITING)
62 return;
63
64 /*
65 * Wake up the handler thread for this action. If the
61 * RUNTHREAD bit is already set, nothing to do. 66 * RUNTHREAD bit is already set, nothing to do.
62 */ 67 */
63 if ((action->thread->flags & PF_EXITING) || 68 if (test_and_set_bit(IRQTF_RUNTHREAD, &action->thread_flags))
64 test_and_set_bit(IRQTF_RUNTHREAD, &action->thread_flags))
65 return; 69 return;
66 70
67 /* 71 /*
diff --git a/kernel/irq/irqdomain.c b/kernel/irq/irqdomain.c
index af48e59bc2ff..3601f3fbf67c 100644
--- a/kernel/irq/irqdomain.c
+++ b/kernel/irq/irqdomain.c
@@ -632,7 +632,7 @@ unsigned int irq_linear_revmap(struct irq_domain *domain,
632 return revmap[hwirq]; 632 return revmap[hwirq];
633} 633}
634 634
635#ifdef CONFIG_VIRQ_DEBUG 635#ifdef CONFIG_IRQ_DOMAIN_DEBUG
636static int virq_debug_show(struct seq_file *m, void *private) 636static int virq_debug_show(struct seq_file *m, void *private)
637{ 637{
638 unsigned long flags; 638 unsigned long flags;
@@ -668,7 +668,7 @@ static int virq_debug_show(struct seq_file *m, void *private)
668 data = irq_desc_get_chip_data(desc); 668 data = irq_desc_get_chip_data(desc);
669 seq_printf(m, "0x%16p ", data); 669 seq_printf(m, "0x%16p ", data);
670 670
671 if (desc->irq_data.domain->of_node) 671 if (desc->irq_data.domain && desc->irq_data.domain->of_node)
672 p = desc->irq_data.domain->of_node->full_name; 672 p = desc->irq_data.domain->of_node->full_name;
673 else 673 else
674 p = none; 674 p = none;
@@ -695,14 +695,14 @@ static const struct file_operations virq_debug_fops = {
695 695
696static int __init irq_debugfs_init(void) 696static int __init irq_debugfs_init(void)
697{ 697{
698 if (debugfs_create_file("virq_mapping", S_IRUGO, powerpc_debugfs_root, 698 if (debugfs_create_file("irq_domain_mapping", S_IRUGO, NULL,
699 NULL, &virq_debug_fops) == NULL) 699 NULL, &virq_debug_fops) == NULL)
700 return -ENOMEM; 700 return -ENOMEM;
701 701
702 return 0; 702 return 0;
703} 703}
704__initcall(irq_debugfs_init); 704__initcall(irq_debugfs_init);
705#endif /* CONFIG_VIRQ_DEBUG */ 705#endif /* CONFIG_IRQ_DOMAIN_DEBUG */
706 706
707int irq_domain_simple_map(struct irq_domain *d, unsigned int irq, 707int irq_domain_simple_map(struct irq_domain *d, unsigned int irq,
708 irq_hw_number_t hwirq) 708 irq_hw_number_t hwirq)
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index b0ccd1ac2d6a..89a3ea82569b 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -282,7 +282,7 @@ setup_affinity(unsigned int irq, struct irq_desc *desc, struct cpumask *mask)
282{ 282{
283 struct irq_chip *chip = irq_desc_get_chip(desc); 283 struct irq_chip *chip = irq_desc_get_chip(desc);
284 struct cpumask *set = irq_default_affinity; 284 struct cpumask *set = irq_default_affinity;
285 int ret; 285 int ret, node = desc->irq_data.node;
286 286
287 /* Excludes PER_CPU and NO_BALANCE interrupts */ 287 /* Excludes PER_CPU and NO_BALANCE interrupts */
288 if (!irq_can_set_affinity(irq)) 288 if (!irq_can_set_affinity(irq))
@@ -301,6 +301,13 @@ setup_affinity(unsigned int irq, struct irq_desc *desc, struct cpumask *mask)
301 } 301 }
302 302
303 cpumask_and(mask, cpu_online_mask, set); 303 cpumask_and(mask, cpu_online_mask, set);
304 if (node != NUMA_NO_NODE) {
305 const struct cpumask *nodemask = cpumask_of_node(node);
306
307 /* make sure at least one of the cpus in nodemask is online */
308 if (cpumask_intersects(mask, nodemask))
309 cpumask_and(mask, mask, nodemask);
310 }
304 ret = chip->irq_set_affinity(&desc->irq_data, mask, false); 311 ret = chip->irq_set_affinity(&desc->irq_data, mask, false);
305 switch (ret) { 312 switch (ret) {
306 case IRQ_SET_MASK_OK: 313 case IRQ_SET_MASK_OK:
@@ -645,7 +652,7 @@ static int irq_wait_for_interrupt(struct irqaction *action)
645 * is marked MASKED. 652 * is marked MASKED.
646 */ 653 */
647static void irq_finalize_oneshot(struct irq_desc *desc, 654static void irq_finalize_oneshot(struct irq_desc *desc,
648 struct irqaction *action, bool force) 655 struct irqaction *action)
649{ 656{
650 if (!(desc->istate & IRQS_ONESHOT)) 657 if (!(desc->istate & IRQS_ONESHOT))
651 return; 658 return;
@@ -679,7 +686,7 @@ again:
679 * we would clear the threads_oneshot bit of this thread which 686 * we would clear the threads_oneshot bit of this thread which
680 * was just set. 687 * was just set.
681 */ 688 */
682 if (!force && test_bit(IRQTF_RUNTHREAD, &action->thread_flags)) 689 if (test_bit(IRQTF_RUNTHREAD, &action->thread_flags))
683 goto out_unlock; 690 goto out_unlock;
684 691
685 desc->threads_oneshot &= ~action->thread_mask; 692 desc->threads_oneshot &= ~action->thread_mask;
@@ -739,7 +746,7 @@ irq_forced_thread_fn(struct irq_desc *desc, struct irqaction *action)
739 746
740 local_bh_disable(); 747 local_bh_disable();
741 ret = action->thread_fn(action->irq, action->dev_id); 748 ret = action->thread_fn(action->irq, action->dev_id);
742 irq_finalize_oneshot(desc, action, false); 749 irq_finalize_oneshot(desc, action);
743 local_bh_enable(); 750 local_bh_enable();
744 return ret; 751 return ret;
745} 752}
@@ -755,7 +762,7 @@ static irqreturn_t irq_thread_fn(struct irq_desc *desc,
755 irqreturn_t ret; 762 irqreturn_t ret;
756 763
757 ret = action->thread_fn(action->irq, action->dev_id); 764 ret = action->thread_fn(action->irq, action->dev_id);
758 irq_finalize_oneshot(desc, action, false); 765 irq_finalize_oneshot(desc, action);
759 return ret; 766 return ret;
760} 767}
761 768
@@ -844,7 +851,7 @@ void exit_irq_thread(void)
844 wake_threads_waitq(desc); 851 wake_threads_waitq(desc);
845 852
846 /* Prevent a stale desc->threads_oneshot */ 853 /* Prevent a stale desc->threads_oneshot */
847 irq_finalize_oneshot(desc, action, true); 854 irq_finalize_oneshot(desc, action);
848} 855}
849 856
850static void irq_setup_forced_threading(struct irqaction *new) 857static void irq_setup_forced_threading(struct irqaction *new)
diff --git a/kernel/irq/migration.c b/kernel/irq/migration.c
index 47420908fba0..c3c89751b327 100644
--- a/kernel/irq/migration.c
+++ b/kernel/irq/migration.c
@@ -43,12 +43,16 @@ void irq_move_masked_irq(struct irq_data *idata)
43 * masking the irqs. 43 * masking the irqs.
44 */ 44 */
45 if (likely(cpumask_any_and(desc->pending_mask, cpu_online_mask) 45 if (likely(cpumask_any_and(desc->pending_mask, cpu_online_mask)
46 < nr_cpu_ids)) 46 < nr_cpu_ids)) {
47 if (!chip->irq_set_affinity(&desc->irq_data, 47 int ret = chip->irq_set_affinity(&desc->irq_data,
48 desc->pending_mask, false)) { 48 desc->pending_mask, false);
49 switch (ret) {
50 case IRQ_SET_MASK_OK:
49 cpumask_copy(desc->irq_data.affinity, desc->pending_mask); 51 cpumask_copy(desc->irq_data.affinity, desc->pending_mask);
52 case IRQ_SET_MASK_OK_NOCOPY:
50 irq_set_thread_affinity(desc); 53 irq_set_thread_affinity(desc);
51 } 54 }
55 }
52 56
53 cpumask_clear(desc->pending_mask); 57 cpumask_clear(desc->pending_mask);
54} 58}
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 157fb9b2b186..4603b9d8f30a 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -1265,29 +1265,59 @@ EXPORT_SYMBOL_GPL(kick_process);
1265 */ 1265 */
1266static int select_fallback_rq(int cpu, struct task_struct *p) 1266static int select_fallback_rq(int cpu, struct task_struct *p)
1267{ 1267{
1268 int dest_cpu;
1269 const struct cpumask *nodemask = cpumask_of_node(cpu_to_node(cpu)); 1268 const struct cpumask *nodemask = cpumask_of_node(cpu_to_node(cpu));
1269 enum { cpuset, possible, fail } state = cpuset;
1270 int dest_cpu;
1270 1271
1271 /* Look for allowed, online CPU in same node. */ 1272 /* Look for allowed, online CPU in same node. */
1272 for_each_cpu_and(dest_cpu, nodemask, cpu_active_mask) 1273 for_each_cpu(dest_cpu, nodemask) {
1274 if (!cpu_online(dest_cpu))
1275 continue;
1276 if (!cpu_active(dest_cpu))
1277 continue;
1273 if (cpumask_test_cpu(dest_cpu, tsk_cpus_allowed(p))) 1278 if (cpumask_test_cpu(dest_cpu, tsk_cpus_allowed(p)))
1274 return dest_cpu; 1279 return dest_cpu;
1280 }
1281
1282 for (;;) {
1283 /* Any allowed, online CPU? */
1284 for_each_cpu(dest_cpu, tsk_cpus_allowed(p)) {
1285 if (!cpu_online(dest_cpu))
1286 continue;
1287 if (!cpu_active(dest_cpu))
1288 continue;
1289 goto out;
1290 }
1275 1291
1276 /* Any allowed, online CPU? */ 1292 switch (state) {
1277 dest_cpu = cpumask_any_and(tsk_cpus_allowed(p), cpu_active_mask); 1293 case cpuset:
1278 if (dest_cpu < nr_cpu_ids) 1294 /* No more Mr. Nice Guy. */
1279 return dest_cpu; 1295 cpuset_cpus_allowed_fallback(p);
1296 state = possible;
1297 break;
1280 1298
1281 /* No more Mr. Nice Guy. */ 1299 case possible:
1282 dest_cpu = cpuset_cpus_allowed_fallback(p); 1300 do_set_cpus_allowed(p, cpu_possible_mask);
1283 /* 1301 state = fail;
1284 * Don't tell them about moving exiting tasks or 1302 break;
1285 * kernel threads (both mm NULL), since they never 1303
1286 * leave kernel. 1304 case fail:
1287 */ 1305 BUG();
1288 if (p->mm && printk_ratelimit()) { 1306 break;
1289 printk_sched("process %d (%s) no longer affine to cpu%d\n", 1307 }
1290 task_pid_nr(p), p->comm, cpu); 1308 }
1309
1310out:
1311 if (state != cpuset) {
1312 /*
1313 * Don't tell them about moving exiting tasks or
1314 * kernel threads (both mm NULL), since they never
1315 * leave kernel.
1316 */
1317 if (p->mm && printk_ratelimit()) {
1318 printk_sched("process %d (%s) no longer affine to cpu%d\n",
1319 task_pid_nr(p), p->comm, cpu);
1320 }
1291 } 1321 }
1292 1322
1293 return dest_cpu; 1323 return dest_cpu;
@@ -1934,6 +1964,7 @@ static void finish_task_switch(struct rq *rq, struct task_struct *prev)
1934 local_irq_enable(); 1964 local_irq_enable();
1935#endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */ 1965#endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */
1936 finish_lock_switch(rq, prev); 1966 finish_lock_switch(rq, prev);
1967 finish_arch_post_lock_switch();
1937 1968
1938 fire_sched_in_preempt_notifiers(current); 1969 fire_sched_in_preempt_notifiers(current);
1939 if (mm) 1970 if (mm)
@@ -3071,8 +3102,6 @@ EXPORT_SYMBOL(sub_preempt_count);
3071 */ 3102 */
3072static noinline void __schedule_bug(struct task_struct *prev) 3103static noinline void __schedule_bug(struct task_struct *prev)
3073{ 3104{
3074 struct pt_regs *regs = get_irq_regs();
3075
3076 if (oops_in_progress) 3105 if (oops_in_progress)
3077 return; 3106 return;
3078 3107
@@ -3083,11 +3112,7 @@ static noinline void __schedule_bug(struct task_struct *prev)
3083 print_modules(); 3112 print_modules();
3084 if (irqs_disabled()) 3113 if (irqs_disabled())
3085 print_irqtrace_events(prev); 3114 print_irqtrace_events(prev);
3086 3115 dump_stack();
3087 if (regs)
3088 show_regs(regs);
3089 else
3090 dump_stack();
3091} 3116}
3092 3117
3093/* 3118/*
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 94340c7544a9..0d97ebdc58f0 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -416,8 +416,8 @@ find_matching_se(struct sched_entity **se, struct sched_entity **pse)
416 416
417#endif /* CONFIG_FAIR_GROUP_SCHED */ 417#endif /* CONFIG_FAIR_GROUP_SCHED */
418 418
419static void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, 419static __always_inline
420 unsigned long delta_exec); 420void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, unsigned long delta_exec);
421 421
422/************************************************************** 422/**************************************************************
423 * Scheduling class tree data structure manipulation methods: 423 * Scheduling class tree data structure manipulation methods:
@@ -1162,7 +1162,7 @@ static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se)
1162 __clear_buddies_skip(se); 1162 __clear_buddies_skip(se);
1163} 1163}
1164 1164
1165static void return_cfs_rq_runtime(struct cfs_rq *cfs_rq); 1165static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq);
1166 1166
1167static void 1167static void
1168dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) 1168dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
@@ -1546,8 +1546,8 @@ static void __account_cfs_rq_runtime(struct cfs_rq *cfs_rq,
1546 resched_task(rq_of(cfs_rq)->curr); 1546 resched_task(rq_of(cfs_rq)->curr);
1547} 1547}
1548 1548
1549static __always_inline void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, 1549static __always_inline
1550 unsigned long delta_exec) 1550void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, unsigned long delta_exec)
1551{ 1551{
1552 if (!cfs_bandwidth_used() || !cfs_rq->runtime_enabled) 1552 if (!cfs_bandwidth_used() || !cfs_rq->runtime_enabled)
1553 return; 1553 return;
@@ -2073,11 +2073,11 @@ void unthrottle_offline_cfs_rqs(struct rq *rq)
2073} 2073}
2074 2074
2075#else /* CONFIG_CFS_BANDWIDTH */ 2075#else /* CONFIG_CFS_BANDWIDTH */
2076static void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, 2076static __always_inline
2077 unsigned long delta_exec) {} 2077void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, unsigned long delta_exec) {}
2078static void check_cfs_rq_runtime(struct cfs_rq *cfs_rq) {} 2078static void check_cfs_rq_runtime(struct cfs_rq *cfs_rq) {}
2079static void check_enqueue_throttle(struct cfs_rq *cfs_rq) {} 2079static void check_enqueue_throttle(struct cfs_rq *cfs_rq) {}
2080static void return_cfs_rq_runtime(struct cfs_rq *cfs_rq) {} 2080static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq) {}
2081 2081
2082static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq) 2082static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq)
2083{ 2083{
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index b60dad720173..44af55e6d5d0 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -1428,7 +1428,7 @@ static struct task_struct *pick_next_highest_task_rt(struct rq *rq, int cpu)
1428next_idx: 1428next_idx:
1429 if (idx >= MAX_RT_PRIO) 1429 if (idx >= MAX_RT_PRIO)
1430 continue; 1430 continue;
1431 if (next && next->prio < idx) 1431 if (next && next->prio <= idx)
1432 continue; 1432 continue;
1433 list_for_each_entry(rt_se, array->queue + idx, run_list) { 1433 list_for_each_entry(rt_se, array->queue + idx, run_list) {
1434 struct task_struct *p; 1434 struct task_struct *p;
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 42b1f304b044..fb3acba4d52e 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -681,6 +681,9 @@ static inline int task_running(struct rq *rq, struct task_struct *p)
681#ifndef finish_arch_switch 681#ifndef finish_arch_switch
682# define finish_arch_switch(prev) do { } while (0) 682# define finish_arch_switch(prev) do { } while (0)
683#endif 683#endif
684#ifndef finish_arch_post_lock_switch
685# define finish_arch_post_lock_switch() do { } while (0)
686#endif
684 687
685#ifndef __ARCH_WANT_UNLOCKED_CTXSW 688#ifndef __ARCH_WANT_UNLOCKED_CTXSW
686static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next) 689static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next)
diff --git a/kernel/spinlock.c b/kernel/spinlock.c
index 84c7d96918bf..5cdd8065a3ce 100644
--- a/kernel/spinlock.c
+++ b/kernel/spinlock.c
@@ -163,7 +163,7 @@ void __lockfunc _raw_spin_lock_bh(raw_spinlock_t *lock)
163EXPORT_SYMBOL(_raw_spin_lock_bh); 163EXPORT_SYMBOL(_raw_spin_lock_bh);
164#endif 164#endif
165 165
166#ifndef CONFIG_INLINE_SPIN_UNLOCK 166#ifdef CONFIG_UNINLINE_SPIN_UNLOCK
167void __lockfunc _raw_spin_unlock(raw_spinlock_t *lock) 167void __lockfunc _raw_spin_unlock(raw_spinlock_t *lock)
168{ 168{
169 __raw_spin_unlock(lock); 169 __raw_spin_unlock(lock);
diff --git a/kernel/time.c b/kernel/time.c
index 73e416db0a1e..ba744cf80696 100644
--- a/kernel/time.c
+++ b/kernel/time.c
@@ -163,7 +163,6 @@ int do_sys_settimeofday(const struct timespec *tv, const struct timezone *tz)
163 return error; 163 return error;
164 164
165 if (tz) { 165 if (tz) {
166 /* SMP safe, global irq locking makes it work. */
167 sys_tz = *tz; 166 sys_tz = *tz;
168 update_vsyscall_tz(); 167 update_vsyscall_tz();
169 if (firsttime) { 168 if (firsttime) {
@@ -173,12 +172,7 @@ int do_sys_settimeofday(const struct timespec *tv, const struct timezone *tz)
173 } 172 }
174 } 173 }
175 if (tv) 174 if (tv)
176 {
177 /* SMP safe, again the code in arch/foo/time.c should
178 * globally block out interrupts when it runs.
179 */
180 return do_settimeofday(tv); 175 return do_settimeofday(tv);
181 }
182 return 0; 176 return 0;
183} 177}
184 178
diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c
index 8a46f5d64504..8a538c55fc7b 100644
--- a/kernel/time/alarmtimer.c
+++ b/kernel/time/alarmtimer.c
@@ -96,6 +96,11 @@ static int alarmtimer_rtc_add_device(struct device *dev,
96 return 0; 96 return 0;
97} 97}
98 98
99static inline void alarmtimer_rtc_timer_init(void)
100{
101 rtc_timer_init(&rtctimer, NULL, NULL);
102}
103
99static struct class_interface alarmtimer_rtc_interface = { 104static struct class_interface alarmtimer_rtc_interface = {
100 .add_dev = &alarmtimer_rtc_add_device, 105 .add_dev = &alarmtimer_rtc_add_device,
101}; 106};
@@ -117,6 +122,7 @@ static inline struct rtc_device *alarmtimer_get_rtcdev(void)
117#define rtcdev (NULL) 122#define rtcdev (NULL)
118static inline int alarmtimer_rtc_interface_setup(void) { return 0; } 123static inline int alarmtimer_rtc_interface_setup(void) { return 0; }
119static inline void alarmtimer_rtc_interface_remove(void) { } 124static inline void alarmtimer_rtc_interface_remove(void) { }
125static inline void alarmtimer_rtc_timer_init(void) { }
120#endif 126#endif
121 127
122/** 128/**
@@ -783,6 +789,8 @@ static int __init alarmtimer_init(void)
783 .nsleep = alarm_timer_nsleep, 789 .nsleep = alarm_timer_nsleep,
784 }; 790 };
785 791
792 alarmtimer_rtc_timer_init();
793
786 posix_timers_register_clock(CLOCK_REALTIME_ALARM, &alarm_clock); 794 posix_timers_register_clock(CLOCK_REALTIME_ALARM, &alarm_clock);
787 posix_timers_register_clock(CLOCK_BOOTTIME_ALARM, &alarm_clock); 795 posix_timers_register_clock(CLOCK_BOOTTIME_ALARM, &alarm_clock);
788 796
diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c
index a45ca167ab24..c9583382141a 100644
--- a/kernel/time/clocksource.c
+++ b/kernel/time/clocksource.c
@@ -500,7 +500,7 @@ static u32 clocksource_max_adjustment(struct clocksource *cs)
500{ 500{
501 u64 ret; 501 u64 ret;
502 /* 502 /*
503 * We won't try to correct for more then 11% adjustments (110,000 ppm), 503 * We won't try to correct for more than 11% adjustments (110,000 ppm),
504 */ 504 */
505 ret = (u64)cs->mult * 11; 505 ret = (u64)cs->mult * 11;
506 do_div(ret,100); 506 do_div(ret,100);
diff --git a/kernel/time/ntp.c b/kernel/time/ntp.c
index 6e039b144daf..f03fd83b170b 100644
--- a/kernel/time/ntp.c
+++ b/kernel/time/ntp.c
@@ -34,8 +34,6 @@ unsigned long tick_nsec;
34static u64 tick_length; 34static u64 tick_length;
35static u64 tick_length_base; 35static u64 tick_length_base;
36 36
37static struct hrtimer leap_timer;
38
39#define MAX_TICKADJ 500LL /* usecs */ 37#define MAX_TICKADJ 500LL /* usecs */
40#define MAX_TICKADJ_SCALED \ 38#define MAX_TICKADJ_SCALED \
41 (((MAX_TICKADJ * NSEC_PER_USEC) << NTP_SCALE_SHIFT) / NTP_INTERVAL_FREQ) 39 (((MAX_TICKADJ * NSEC_PER_USEC) << NTP_SCALE_SHIFT) / NTP_INTERVAL_FREQ)
@@ -381,70 +379,63 @@ u64 ntp_tick_length(void)
381 379
382 380
383/* 381/*
384 * Leap second processing. If in leap-insert state at the end of the 382 * this routine handles the overflow of the microsecond field
385 * day, the system clock is set back one second; if in leap-delete 383 *
386 * state, the system clock is set ahead one second. 384 * The tricky bits of code to handle the accurate clock support
385 * were provided by Dave Mills (Mills@UDEL.EDU) of NTP fame.
386 * They were originally developed for SUN and DEC kernels.
387 * All the kudos should go to Dave for this stuff.
388 *
389 * Also handles leap second processing, and returns leap offset
387 */ 390 */
388static enum hrtimer_restart ntp_leap_second(struct hrtimer *timer) 391int second_overflow(unsigned long secs)
389{ 392{
390 enum hrtimer_restart res = HRTIMER_NORESTART; 393 s64 delta;
391 unsigned long flags;
392 int leap = 0; 394 int leap = 0;
395 unsigned long flags;
393 396
394 spin_lock_irqsave(&ntp_lock, flags); 397 spin_lock_irqsave(&ntp_lock, flags);
398
399 /*
400 * Leap second processing. If in leap-insert state at the end of the
401 * day, the system clock is set back one second; if in leap-delete
402 * state, the system clock is set ahead one second.
403 */
395 switch (time_state) { 404 switch (time_state) {
396 case TIME_OK: 405 case TIME_OK:
406 if (time_status & STA_INS)
407 time_state = TIME_INS;
408 else if (time_status & STA_DEL)
409 time_state = TIME_DEL;
397 break; 410 break;
398 case TIME_INS: 411 case TIME_INS:
399 leap = -1; 412 if (secs % 86400 == 0) {
400 time_state = TIME_OOP; 413 leap = -1;
401 printk(KERN_NOTICE 414 time_state = TIME_OOP;
402 "Clock: inserting leap second 23:59:60 UTC\n"); 415 printk(KERN_NOTICE
403 hrtimer_add_expires_ns(&leap_timer, NSEC_PER_SEC); 416 "Clock: inserting leap second 23:59:60 UTC\n");
404 res = HRTIMER_RESTART; 417 }
405 break; 418 break;
406 case TIME_DEL: 419 case TIME_DEL:
407 leap = 1; 420 if ((secs + 1) % 86400 == 0) {
408 time_tai--; 421 leap = 1;
409 time_state = TIME_WAIT; 422 time_tai--;
410 printk(KERN_NOTICE 423 time_state = TIME_WAIT;
411 "Clock: deleting leap second 23:59:59 UTC\n"); 424 printk(KERN_NOTICE
425 "Clock: deleting leap second 23:59:59 UTC\n");
426 }
412 break; 427 break;
413 case TIME_OOP: 428 case TIME_OOP:
414 time_tai++; 429 time_tai++;
415 time_state = TIME_WAIT; 430 time_state = TIME_WAIT;
416 /* fall through */ 431 break;
432
417 case TIME_WAIT: 433 case TIME_WAIT:
418 if (!(time_status & (STA_INS | STA_DEL))) 434 if (!(time_status & (STA_INS | STA_DEL)))
419 time_state = TIME_OK; 435 time_state = TIME_OK;
420 break; 436 break;
421 } 437 }
422 spin_unlock_irqrestore(&ntp_lock, flags);
423 438
424 /*
425 * We have to call this outside of the ntp_lock to keep
426 * the proper locking hierarchy
427 */
428 if (leap)
429 timekeeping_leap_insert(leap);
430
431 return res;
432}
433
434/*
435 * this routine handles the overflow of the microsecond field
436 *
437 * The tricky bits of code to handle the accurate clock support
438 * were provided by Dave Mills (Mills@UDEL.EDU) of NTP fame.
439 * They were originally developed for SUN and DEC kernels.
440 * All the kudos should go to Dave for this stuff.
441 */
442void second_overflow(void)
443{
444 s64 delta;
445 unsigned long flags;
446
447 spin_lock_irqsave(&ntp_lock, flags);
448 439
449 /* Bump the maxerror field */ 440 /* Bump the maxerror field */
450 time_maxerror += MAXFREQ / NSEC_PER_USEC; 441 time_maxerror += MAXFREQ / NSEC_PER_USEC;
@@ -481,15 +472,17 @@ void second_overflow(void)
481 tick_length += (s64)(time_adjust * NSEC_PER_USEC / NTP_INTERVAL_FREQ) 472 tick_length += (s64)(time_adjust * NSEC_PER_USEC / NTP_INTERVAL_FREQ)
482 << NTP_SCALE_SHIFT; 473 << NTP_SCALE_SHIFT;
483 time_adjust = 0; 474 time_adjust = 0;
475
476
477
484out: 478out:
485 spin_unlock_irqrestore(&ntp_lock, flags); 479 spin_unlock_irqrestore(&ntp_lock, flags);
480
481 return leap;
486} 482}
487 483
488#ifdef CONFIG_GENERIC_CMOS_UPDATE 484#ifdef CONFIG_GENERIC_CMOS_UPDATE
489 485
490/* Disable the cmos update - used by virtualization and embedded */
491int no_sync_cmos_clock __read_mostly;
492
493static void sync_cmos_clock(struct work_struct *work); 486static void sync_cmos_clock(struct work_struct *work);
494 487
495static DECLARE_DELAYED_WORK(sync_cmos_work, sync_cmos_clock); 488static DECLARE_DELAYED_WORK(sync_cmos_work, sync_cmos_clock);
@@ -536,35 +529,13 @@ static void sync_cmos_clock(struct work_struct *work)
536 529
537static void notify_cmos_timer(void) 530static void notify_cmos_timer(void)
538{ 531{
539 if (!no_sync_cmos_clock) 532 schedule_delayed_work(&sync_cmos_work, 0);
540 schedule_delayed_work(&sync_cmos_work, 0);
541} 533}
542 534
543#else 535#else
544static inline void notify_cmos_timer(void) { } 536static inline void notify_cmos_timer(void) { }
545#endif 537#endif
546 538
547/*
548 * Start the leap seconds timer:
549 */
550static inline void ntp_start_leap_timer(struct timespec *ts)
551{
552 long now = ts->tv_sec;
553
554 if (time_status & STA_INS) {
555 time_state = TIME_INS;
556 now += 86400 - now % 86400;
557 hrtimer_start(&leap_timer, ktime_set(now, 0), HRTIMER_MODE_ABS);
558
559 return;
560 }
561
562 if (time_status & STA_DEL) {
563 time_state = TIME_DEL;
564 now += 86400 - (now + 1) % 86400;
565 hrtimer_start(&leap_timer, ktime_set(now, 0), HRTIMER_MODE_ABS);
566 }
567}
568 539
569/* 540/*
570 * Propagate a new txc->status value into the NTP state: 541 * Propagate a new txc->status value into the NTP state:
@@ -589,22 +560,6 @@ static inline void process_adj_status(struct timex *txc, struct timespec *ts)
589 time_status &= STA_RONLY; 560 time_status &= STA_RONLY;
590 time_status |= txc->status & ~STA_RONLY; 561 time_status |= txc->status & ~STA_RONLY;
591 562
592 switch (time_state) {
593 case TIME_OK:
594 ntp_start_leap_timer(ts);
595 break;
596 case TIME_INS:
597 case TIME_DEL:
598 time_state = TIME_OK;
599 ntp_start_leap_timer(ts);
600 case TIME_WAIT:
601 if (!(time_status & (STA_INS | STA_DEL)))
602 time_state = TIME_OK;
603 break;
604 case TIME_OOP:
605 hrtimer_restart(&leap_timer);
606 break;
607 }
608} 563}
609/* 564/*
610 * Called with the xtime lock held, so we can access and modify 565 * Called with the xtime lock held, so we can access and modify
@@ -686,9 +641,6 @@ int do_adjtimex(struct timex *txc)
686 (txc->tick < 900000/USER_HZ || 641 (txc->tick < 900000/USER_HZ ||
687 txc->tick > 1100000/USER_HZ)) 642 txc->tick > 1100000/USER_HZ))
688 return -EINVAL; 643 return -EINVAL;
689
690 if (txc->modes & ADJ_STATUS && time_state != TIME_OK)
691 hrtimer_cancel(&leap_timer);
692 } 644 }
693 645
694 if (txc->modes & ADJ_SETOFFSET) { 646 if (txc->modes & ADJ_SETOFFSET) {
@@ -1010,6 +962,4 @@ __setup("ntp_tick_adj=", ntp_tick_adj_setup);
1010void __init ntp_init(void) 962void __init ntp_init(void)
1011{ 963{
1012 ntp_clear(); 964 ntp_clear();
1013 hrtimer_init(&leap_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
1014 leap_timer.function = ntp_leap_second;
1015} 965}
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index 15be32e19c6e..d66b21308f7c 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -184,18 +184,6 @@ static void timekeeping_update(bool clearntp)
184} 184}
185 185
186 186
187void timekeeping_leap_insert(int leapsecond)
188{
189 unsigned long flags;
190
191 write_seqlock_irqsave(&timekeeper.lock, flags);
192 timekeeper.xtime.tv_sec += leapsecond;
193 timekeeper.wall_to_monotonic.tv_sec -= leapsecond;
194 timekeeping_update(false);
195 write_sequnlock_irqrestore(&timekeeper.lock, flags);
196
197}
198
199/** 187/**
200 * timekeeping_forward_now - update clock to the current time 188 * timekeeping_forward_now - update clock to the current time
201 * 189 *
@@ -448,9 +436,12 @@ EXPORT_SYMBOL(timekeeping_inject_offset);
448static int change_clocksource(void *data) 436static int change_clocksource(void *data)
449{ 437{
450 struct clocksource *new, *old; 438 struct clocksource *new, *old;
439 unsigned long flags;
451 440
452 new = (struct clocksource *) data; 441 new = (struct clocksource *) data;
453 442
443 write_seqlock_irqsave(&timekeeper.lock, flags);
444
454 timekeeping_forward_now(); 445 timekeeping_forward_now();
455 if (!new->enable || new->enable(new) == 0) { 446 if (!new->enable || new->enable(new) == 0) {
456 old = timekeeper.clock; 447 old = timekeeper.clock;
@@ -458,6 +449,10 @@ static int change_clocksource(void *data)
458 if (old->disable) 449 if (old->disable)
459 old->disable(old); 450 old->disable(old);
460 } 451 }
452 timekeeping_update(true);
453
454 write_sequnlock_irqrestore(&timekeeper.lock, flags);
455
461 return 0; 456 return 0;
462} 457}
463 458
@@ -827,7 +822,7 @@ static void timekeeping_adjust(s64 offset)
827 int adj; 822 int adj;
828 823
829 /* 824 /*
830 * The point of this is to check if the error is greater then half 825 * The point of this is to check if the error is greater than half
831 * an interval. 826 * an interval.
832 * 827 *
833 * First we shift it down from NTP_SHIFT to clocksource->shifted nsecs. 828 * First we shift it down from NTP_SHIFT to clocksource->shifted nsecs.
@@ -835,7 +830,7 @@ static void timekeeping_adjust(s64 offset)
835 * Note we subtract one in the shift, so that error is really error*2. 830 * Note we subtract one in the shift, so that error is really error*2.
836 * This "saves" dividing(shifting) interval twice, but keeps the 831 * This "saves" dividing(shifting) interval twice, but keeps the
837 * (error > interval) comparison as still measuring if error is 832 * (error > interval) comparison as still measuring if error is
838 * larger then half an interval. 833 * larger than half an interval.
839 * 834 *
840 * Note: It does not "save" on aggravation when reading the code. 835 * Note: It does not "save" on aggravation when reading the code.
841 */ 836 */
@@ -843,7 +838,7 @@ static void timekeeping_adjust(s64 offset)
843 if (error > interval) { 838 if (error > interval) {
844 /* 839 /*
845 * We now divide error by 4(via shift), which checks if 840 * We now divide error by 4(via shift), which checks if
846 * the error is greater then twice the interval. 841 * the error is greater than twice the interval.
847 * If it is greater, we need a bigadjust, if its smaller, 842 * If it is greater, we need a bigadjust, if its smaller,
848 * we can adjust by 1. 843 * we can adjust by 1.
849 */ 844 */
@@ -874,13 +869,15 @@ static void timekeeping_adjust(s64 offset)
874 } else /* No adjustment needed */ 869 } else /* No adjustment needed */
875 return; 870 return;
876 871
877 WARN_ONCE(timekeeper.clock->maxadj && 872 if (unlikely(timekeeper.clock->maxadj &&
878 (timekeeper.mult + adj > timekeeper.clock->mult + 873 (timekeeper.mult + adj >
879 timekeeper.clock->maxadj), 874 timekeeper.clock->mult + timekeeper.clock->maxadj))) {
880 "Adjusting %s more then 11%% (%ld vs %ld)\n", 875 printk_once(KERN_WARNING
876 "Adjusting %s more than 11%% (%ld vs %ld)\n",
881 timekeeper.clock->name, (long)timekeeper.mult + adj, 877 timekeeper.clock->name, (long)timekeeper.mult + adj,
882 (long)timekeeper.clock->mult + 878 (long)timekeeper.clock->mult +
883 timekeeper.clock->maxadj); 879 timekeeper.clock->maxadj);
880 }
884 /* 881 /*
885 * So the following can be confusing. 882 * So the following can be confusing.
886 * 883 *
@@ -952,7 +949,7 @@ static cycle_t logarithmic_accumulation(cycle_t offset, int shift)
952 u64 nsecps = (u64)NSEC_PER_SEC << timekeeper.shift; 949 u64 nsecps = (u64)NSEC_PER_SEC << timekeeper.shift;
953 u64 raw_nsecs; 950 u64 raw_nsecs;
954 951
955 /* If the offset is smaller then a shifted interval, do nothing */ 952 /* If the offset is smaller than a shifted interval, do nothing */
956 if (offset < timekeeper.cycle_interval<<shift) 953 if (offset < timekeeper.cycle_interval<<shift)
957 return offset; 954 return offset;
958 955
@@ -962,9 +959,11 @@ static cycle_t logarithmic_accumulation(cycle_t offset, int shift)
962 959
963 timekeeper.xtime_nsec += timekeeper.xtime_interval << shift; 960 timekeeper.xtime_nsec += timekeeper.xtime_interval << shift;
964 while (timekeeper.xtime_nsec >= nsecps) { 961 while (timekeeper.xtime_nsec >= nsecps) {
962 int leap;
965 timekeeper.xtime_nsec -= nsecps; 963 timekeeper.xtime_nsec -= nsecps;
966 timekeeper.xtime.tv_sec++; 964 timekeeper.xtime.tv_sec++;
967 second_overflow(); 965 leap = second_overflow(timekeeper.xtime.tv_sec);
966 timekeeper.xtime.tv_sec += leap;
968 } 967 }
969 968
970 /* Accumulate raw time */ 969 /* Accumulate raw time */
@@ -1018,13 +1017,13 @@ static void update_wall_time(void)
1018 * With NO_HZ we may have to accumulate many cycle_intervals 1017 * With NO_HZ we may have to accumulate many cycle_intervals
1019 * (think "ticks") worth of time at once. To do this efficiently, 1018 * (think "ticks") worth of time at once. To do this efficiently,
1020 * we calculate the largest doubling multiple of cycle_intervals 1019 * we calculate the largest doubling multiple of cycle_intervals
1021 * that is smaller then the offset. We then accumulate that 1020 * that is smaller than the offset. We then accumulate that
1022 * chunk in one go, and then try to consume the next smaller 1021 * chunk in one go, and then try to consume the next smaller
1023 * doubled multiple. 1022 * doubled multiple.
1024 */ 1023 */
1025 shift = ilog2(offset) - ilog2(timekeeper.cycle_interval); 1024 shift = ilog2(offset) - ilog2(timekeeper.cycle_interval);
1026 shift = max(0, shift); 1025 shift = max(0, shift);
1027 /* Bound shift to one less then what overflows tick_length */ 1026 /* Bound shift to one less than what overflows tick_length */
1028 maxshift = (64 - (ilog2(ntp_tick_length())+1)) - 1; 1027 maxshift = (64 - (ilog2(ntp_tick_length())+1)) - 1;
1029 shift = min(shift, maxshift); 1028 shift = min(shift, maxshift);
1030 while (offset >= timekeeper.cycle_interval) { 1029 while (offset >= timekeeper.cycle_interval) {
@@ -1072,12 +1071,14 @@ static void update_wall_time(void)
1072 1071
1073 /* 1072 /*
1074 * Finally, make sure that after the rounding 1073 * Finally, make sure that after the rounding
1075 * xtime.tv_nsec isn't larger then NSEC_PER_SEC 1074 * xtime.tv_nsec isn't larger than NSEC_PER_SEC
1076 */ 1075 */
1077 if (unlikely(timekeeper.xtime.tv_nsec >= NSEC_PER_SEC)) { 1076 if (unlikely(timekeeper.xtime.tv_nsec >= NSEC_PER_SEC)) {
1077 int leap;
1078 timekeeper.xtime.tv_nsec -= NSEC_PER_SEC; 1078 timekeeper.xtime.tv_nsec -= NSEC_PER_SEC;
1079 timekeeper.xtime.tv_sec++; 1079 timekeeper.xtime.tv_sec++;
1080 second_overflow(); 1080 leap = second_overflow(timekeeper.xtime.tv_sec);
1081 timekeeper.xtime.tv_sec += leap;
1081 } 1082 }
1082 1083
1083 timekeeping_update(false); 1084 timekeeping_update(false);
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig
index cd3134510f3d..a1d2849f2473 100644
--- a/kernel/trace/Kconfig
+++ b/kernel/trace/Kconfig
@@ -141,7 +141,7 @@ if FTRACE
141config FUNCTION_TRACER 141config FUNCTION_TRACER
142 bool "Kernel Function Tracer" 142 bool "Kernel Function Tracer"
143 depends on HAVE_FUNCTION_TRACER 143 depends on HAVE_FUNCTION_TRACER
144 select FRAME_POINTER if !ARM_UNWIND && !S390 && !MICROBLAZE 144 select FRAME_POINTER if !ARM_UNWIND && !PPC && !S390 && !MICROBLAZE
145 select KALLSYMS 145 select KALLSYMS
146 select GENERIC_TRACER 146 select GENERIC_TRACER
147 select CONTEXT_SWITCH_TRACER 147 select CONTEXT_SWITCH_TRACER
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 867bd1dd2dd0..0fa92f677c92 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -249,7 +249,8 @@ static void update_ftrace_function(void)
249#else 249#else
250 __ftrace_trace_function = func; 250 __ftrace_trace_function = func;
251#endif 251#endif
252 ftrace_trace_function = ftrace_test_stop_func; 252 ftrace_trace_function =
253 (func == ftrace_stub) ? func : ftrace_test_stop_func;
253#endif 254#endif
254} 255}
255 256
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index f5b7b5c1195b..cf8d11e91efd 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -154,33 +154,10 @@ enum {
154 154
155static unsigned long ring_buffer_flags __read_mostly = RB_BUFFERS_ON; 155static unsigned long ring_buffer_flags __read_mostly = RB_BUFFERS_ON;
156 156
157#define BUF_PAGE_HDR_SIZE offsetof(struct buffer_data_page, data) 157/* Used for individual buffers (after the counter) */
158 158#define RB_BUFFER_OFF (1 << 20)
159/**
160 * tracing_on - enable all tracing buffers
161 *
162 * This function enables all tracing buffers that may have been
163 * disabled with tracing_off.
164 */
165void tracing_on(void)
166{
167 set_bit(RB_BUFFERS_ON_BIT, &ring_buffer_flags);
168}
169EXPORT_SYMBOL_GPL(tracing_on);
170 159
171/** 160#define BUF_PAGE_HDR_SIZE offsetof(struct buffer_data_page, data)
172 * tracing_off - turn off all tracing buffers
173 *
174 * This function stops all tracing buffers from recording data.
175 * It does not disable any overhead the tracers themselves may
176 * be causing. This function simply causes all recording to
177 * the ring buffers to fail.
178 */
179void tracing_off(void)
180{
181 clear_bit(RB_BUFFERS_ON_BIT, &ring_buffer_flags);
182}
183EXPORT_SYMBOL_GPL(tracing_off);
184 161
185/** 162/**
186 * tracing_off_permanent - permanently disable ring buffers 163 * tracing_off_permanent - permanently disable ring buffers
@@ -193,15 +170,6 @@ void tracing_off_permanent(void)
193 set_bit(RB_BUFFERS_DISABLED_BIT, &ring_buffer_flags); 170 set_bit(RB_BUFFERS_DISABLED_BIT, &ring_buffer_flags);
194} 171}
195 172
196/**
197 * tracing_is_on - show state of ring buffers enabled
198 */
199int tracing_is_on(void)
200{
201 return ring_buffer_flags == RB_BUFFERS_ON;
202}
203EXPORT_SYMBOL_GPL(tracing_is_on);
204
205#define RB_EVNT_HDR_SIZE (offsetof(struct ring_buffer_event, array)) 173#define RB_EVNT_HDR_SIZE (offsetof(struct ring_buffer_event, array))
206#define RB_ALIGNMENT 4U 174#define RB_ALIGNMENT 4U
207#define RB_MAX_SMALL_DATA (RB_ALIGNMENT * RINGBUF_TYPE_DATA_TYPE_LEN_MAX) 175#define RB_MAX_SMALL_DATA (RB_ALIGNMENT * RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
@@ -2619,6 +2587,63 @@ void ring_buffer_record_enable(struct ring_buffer *buffer)
2619EXPORT_SYMBOL_GPL(ring_buffer_record_enable); 2587EXPORT_SYMBOL_GPL(ring_buffer_record_enable);
2620 2588
2621/** 2589/**
2590 * ring_buffer_record_off - stop all writes into the buffer
2591 * @buffer: The ring buffer to stop writes to.
2592 *
2593 * This prevents all writes to the buffer. Any attempt to write
2594 * to the buffer after this will fail and return NULL.
2595 *
2596 * This is different than ring_buffer_record_disable() as
2597 * it works like an on/off switch, where as the disable() verison
2598 * must be paired with a enable().
2599 */
2600void ring_buffer_record_off(struct ring_buffer *buffer)
2601{
2602 unsigned int rd;
2603 unsigned int new_rd;
2604
2605 do {
2606 rd = atomic_read(&buffer->record_disabled);
2607 new_rd = rd | RB_BUFFER_OFF;
2608 } while (atomic_cmpxchg(&buffer->record_disabled, rd, new_rd) != rd);
2609}
2610EXPORT_SYMBOL_GPL(ring_buffer_record_off);
2611
2612/**
2613 * ring_buffer_record_on - restart writes into the buffer
2614 * @buffer: The ring buffer to start writes to.
2615 *
2616 * This enables all writes to the buffer that was disabled by
2617 * ring_buffer_record_off().
2618 *
2619 * This is different than ring_buffer_record_enable() as
2620 * it works like an on/off switch, where as the enable() verison
2621 * must be paired with a disable().
2622 */
2623void ring_buffer_record_on(struct ring_buffer *buffer)
2624{
2625 unsigned int rd;
2626 unsigned int new_rd;
2627
2628 do {
2629 rd = atomic_read(&buffer->record_disabled);
2630 new_rd = rd & ~RB_BUFFER_OFF;
2631 } while (atomic_cmpxchg(&buffer->record_disabled, rd, new_rd) != rd);
2632}
2633EXPORT_SYMBOL_GPL(ring_buffer_record_on);
2634
2635/**
2636 * ring_buffer_record_is_on - return true if the ring buffer can write
2637 * @buffer: The ring buffer to see if write is enabled
2638 *
2639 * Returns true if the ring buffer is in a state that it accepts writes.
2640 */
2641int ring_buffer_record_is_on(struct ring_buffer *buffer)
2642{
2643 return !atomic_read(&buffer->record_disabled);
2644}
2645
2646/**
2622 * ring_buffer_record_disable_cpu - stop all writes into the cpu_buffer 2647 * ring_buffer_record_disable_cpu - stop all writes into the cpu_buffer
2623 * @buffer: The ring buffer to stop writes to. 2648 * @buffer: The ring buffer to stop writes to.
2624 * @cpu: The CPU buffer to stop 2649 * @cpu: The CPU buffer to stop
@@ -4039,68 +4064,6 @@ int ring_buffer_read_page(struct ring_buffer *buffer,
4039} 4064}
4040EXPORT_SYMBOL_GPL(ring_buffer_read_page); 4065EXPORT_SYMBOL_GPL(ring_buffer_read_page);
4041 4066
4042#ifdef CONFIG_TRACING
4043static ssize_t
4044rb_simple_read(struct file *filp, char __user *ubuf,
4045 size_t cnt, loff_t *ppos)
4046{
4047 unsigned long *p = filp->private_data;
4048 char buf[64];
4049 int r;
4050
4051 if (test_bit(RB_BUFFERS_DISABLED_BIT, p))
4052 r = sprintf(buf, "permanently disabled\n");
4053 else
4054 r = sprintf(buf, "%d\n", test_bit(RB_BUFFERS_ON_BIT, p));
4055
4056 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
4057}
4058
4059static ssize_t
4060rb_simple_write(struct file *filp, const char __user *ubuf,
4061 size_t cnt, loff_t *ppos)
4062{
4063 unsigned long *p = filp->private_data;
4064 unsigned long val;
4065 int ret;
4066
4067 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
4068 if (ret)
4069 return ret;
4070
4071 if (val)
4072 set_bit(RB_BUFFERS_ON_BIT, p);
4073 else
4074 clear_bit(RB_BUFFERS_ON_BIT, p);
4075
4076 (*ppos)++;
4077
4078 return cnt;
4079}
4080
4081static const struct file_operations rb_simple_fops = {
4082 .open = tracing_open_generic,
4083 .read = rb_simple_read,
4084 .write = rb_simple_write,
4085 .llseek = default_llseek,
4086};
4087
4088
4089static __init int rb_init_debugfs(void)
4090{
4091 struct dentry *d_tracer;
4092
4093 d_tracer = tracing_init_dentry();
4094
4095 trace_create_file("tracing_on", 0644, d_tracer,
4096 &ring_buffer_flags, &rb_simple_fops);
4097
4098 return 0;
4099}
4100
4101fs_initcall(rb_init_debugfs);
4102#endif
4103
4104#ifdef CONFIG_HOTPLUG_CPU 4067#ifdef CONFIG_HOTPLUG_CPU
4105static int rb_cpu_notify(struct notifier_block *self, 4068static int rb_cpu_notify(struct notifier_block *self,
4106 unsigned long action, void *hcpu) 4069 unsigned long action, void *hcpu)
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 10d5503f0d04..ed7b5d1e12f4 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -36,6 +36,7 @@
36#include <linux/ctype.h> 36#include <linux/ctype.h>
37#include <linux/init.h> 37#include <linux/init.h>
38#include <linux/poll.h> 38#include <linux/poll.h>
39#include <linux/nmi.h>
39#include <linux/fs.h> 40#include <linux/fs.h>
40 41
41#include "trace.h" 42#include "trace.h"
@@ -352,6 +353,59 @@ static void wakeup_work_handler(struct work_struct *work)
352static DECLARE_DELAYED_WORK(wakeup_work, wakeup_work_handler); 353static DECLARE_DELAYED_WORK(wakeup_work, wakeup_work_handler);
353 354
354/** 355/**
356 * tracing_on - enable tracing buffers
357 *
358 * This function enables tracing buffers that may have been
359 * disabled with tracing_off.
360 */
361void tracing_on(void)
362{
363 if (global_trace.buffer)
364 ring_buffer_record_on(global_trace.buffer);
365 /*
366 * This flag is only looked at when buffers haven't been
367 * allocated yet. We don't really care about the race
368 * between setting this flag and actually turning
369 * on the buffer.
370 */
371 global_trace.buffer_disabled = 0;
372}
373EXPORT_SYMBOL_GPL(tracing_on);
374
375/**
376 * tracing_off - turn off tracing buffers
377 *
378 * This function stops the tracing buffers from recording data.
379 * It does not disable any overhead the tracers themselves may
380 * be causing. This function simply causes all recording to
381 * the ring buffers to fail.
382 */
383void tracing_off(void)
384{
385 if (global_trace.buffer)
386 ring_buffer_record_on(global_trace.buffer);
387 /*
388 * This flag is only looked at when buffers haven't been
389 * allocated yet. We don't really care about the race
390 * between setting this flag and actually turning
391 * on the buffer.
392 */
393 global_trace.buffer_disabled = 1;
394}
395EXPORT_SYMBOL_GPL(tracing_off);
396
397/**
398 * tracing_is_on - show state of ring buffers enabled
399 */
400int tracing_is_on(void)
401{
402 if (global_trace.buffer)
403 return ring_buffer_record_is_on(global_trace.buffer);
404 return !global_trace.buffer_disabled;
405}
406EXPORT_SYMBOL_GPL(tracing_is_on);
407
408/**
355 * trace_wake_up - wake up tasks waiting for trace input 409 * trace_wake_up - wake up tasks waiting for trace input
356 * 410 *
357 * Schedules a delayed work to wake up any task that is blocked on the 411 * Schedules a delayed work to wake up any task that is blocked on the
@@ -1644,6 +1698,7 @@ __find_next_entry(struct trace_iterator *iter, int *ent_cpu,
1644 int cpu_file = iter->cpu_file; 1698 int cpu_file = iter->cpu_file;
1645 u64 next_ts = 0, ts; 1699 u64 next_ts = 0, ts;
1646 int next_cpu = -1; 1700 int next_cpu = -1;
1701 int next_size = 0;
1647 int cpu; 1702 int cpu;
1648 1703
1649 /* 1704 /*
@@ -1675,9 +1730,12 @@ __find_next_entry(struct trace_iterator *iter, int *ent_cpu,
1675 next_cpu = cpu; 1730 next_cpu = cpu;
1676 next_ts = ts; 1731 next_ts = ts;
1677 next_lost = lost_events; 1732 next_lost = lost_events;
1733 next_size = iter->ent_size;
1678 } 1734 }
1679 } 1735 }
1680 1736
1737 iter->ent_size = next_size;
1738
1681 if (ent_cpu) 1739 if (ent_cpu)
1682 *ent_cpu = next_cpu; 1740 *ent_cpu = next_cpu;
1683 1741
@@ -4567,6 +4625,55 @@ static __init void create_trace_options_dir(void)
4567 create_trace_option_core_file(trace_options[i], i); 4625 create_trace_option_core_file(trace_options[i], i);
4568} 4626}
4569 4627
4628static ssize_t
4629rb_simple_read(struct file *filp, char __user *ubuf,
4630 size_t cnt, loff_t *ppos)
4631{
4632 struct ring_buffer *buffer = filp->private_data;
4633 char buf[64];
4634 int r;
4635
4636 if (buffer)
4637 r = ring_buffer_record_is_on(buffer);
4638 else
4639 r = 0;
4640
4641 r = sprintf(buf, "%d\n", r);
4642
4643 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
4644}
4645
4646static ssize_t
4647rb_simple_write(struct file *filp, const char __user *ubuf,
4648 size_t cnt, loff_t *ppos)
4649{
4650 struct ring_buffer *buffer = filp->private_data;
4651 unsigned long val;
4652 int ret;
4653
4654 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
4655 if (ret)
4656 return ret;
4657
4658 if (buffer) {
4659 if (val)
4660 ring_buffer_record_on(buffer);
4661 else
4662 ring_buffer_record_off(buffer);
4663 }
4664
4665 (*ppos)++;
4666
4667 return cnt;
4668}
4669
4670static const struct file_operations rb_simple_fops = {
4671 .open = tracing_open_generic,
4672 .read = rb_simple_read,
4673 .write = rb_simple_write,
4674 .llseek = default_llseek,
4675};
4676
4570static __init int tracer_init_debugfs(void) 4677static __init int tracer_init_debugfs(void)
4571{ 4678{
4572 struct dentry *d_tracer; 4679 struct dentry *d_tracer;
@@ -4626,6 +4733,9 @@ static __init int tracer_init_debugfs(void)
4626 trace_create_file("trace_clock", 0644, d_tracer, NULL, 4733 trace_create_file("trace_clock", 0644, d_tracer, NULL,
4627 &trace_clock_fops); 4734 &trace_clock_fops);
4628 4735
4736 trace_create_file("tracing_on", 0644, d_tracer,
4737 global_trace.buffer, &rb_simple_fops);
4738
4629#ifdef CONFIG_DYNAMIC_FTRACE 4739#ifdef CONFIG_DYNAMIC_FTRACE
4630 trace_create_file("dyn_ftrace_total_info", 0444, d_tracer, 4740 trace_create_file("dyn_ftrace_total_info", 0444, d_tracer,
4631 &ftrace_update_tot_cnt, &tracing_dyn_info_fops); 4741 &ftrace_update_tot_cnt, &tracing_dyn_info_fops);
@@ -4798,6 +4908,7 @@ __ftrace_dump(bool disable_tracing, enum ftrace_dump_mode oops_dump_mode)
4798 if (ret != TRACE_TYPE_NO_CONSUME) 4908 if (ret != TRACE_TYPE_NO_CONSUME)
4799 trace_consume(&iter); 4909 trace_consume(&iter);
4800 } 4910 }
4911 touch_nmi_watchdog();
4801 4912
4802 trace_printk_seq(&iter.seq); 4913 trace_printk_seq(&iter.seq);
4803 } 4914 }
@@ -4863,6 +4974,8 @@ __init static int tracer_alloc_buffers(void)
4863 goto out_free_cpumask; 4974 goto out_free_cpumask;
4864 } 4975 }
4865 global_trace.entries = ring_buffer_size(global_trace.buffer); 4976 global_trace.entries = ring_buffer_size(global_trace.buffer);
4977 if (global_trace.buffer_disabled)
4978 tracing_off();
4866 4979
4867 4980
4868#ifdef CONFIG_TRACER_MAX_TRACE 4981#ifdef CONFIG_TRACER_MAX_TRACE
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index 54faec790bc1..95059f091a24 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -154,6 +154,7 @@ struct trace_array {
154 struct ring_buffer *buffer; 154 struct ring_buffer *buffer;
155 unsigned long entries; 155 unsigned long entries;
156 int cpu; 156 int cpu;
157 int buffer_disabled;
157 cycle_t time_start; 158 cycle_t time_start;
158 struct task_struct *waiter; 159 struct task_struct *waiter;
159 struct trace_array_cpu *data[NR_CPUS]; 160 struct trace_array_cpu *data[NR_CPUS];
@@ -835,13 +836,11 @@ extern const char *__stop___trace_bprintk_fmt[];
835 filter) 836 filter)
836#include "trace_entries.h" 837#include "trace_entries.h"
837 838
838#ifdef CONFIG_PERF_EVENTS
839#ifdef CONFIG_FUNCTION_TRACER 839#ifdef CONFIG_FUNCTION_TRACER
840int perf_ftrace_event_register(struct ftrace_event_call *call, 840int perf_ftrace_event_register(struct ftrace_event_call *call,
841 enum trace_reg type, void *data); 841 enum trace_reg type, void *data);
842#else 842#else
843#define perf_ftrace_event_register NULL 843#define perf_ftrace_event_register NULL
844#endif /* CONFIG_FUNCTION_TRACER */ 844#endif /* CONFIG_FUNCTION_TRACER */
845#endif /* CONFIG_PERF_EVENTS */
846 845
847#endif /* _LINUX_KERNEL_TRACE_H */ 846#endif /* _LINUX_KERNEL_TRACE_H */
diff --git a/kernel/trace/trace_entries.h b/kernel/trace/trace_entries.h
index d91eb0541b3a..4108e1250ca2 100644
--- a/kernel/trace/trace_entries.h
+++ b/kernel/trace/trace_entries.h
@@ -166,6 +166,12 @@ FTRACE_ENTRY_DUP(wakeup, ctx_switch_entry,
166 166
167#define FTRACE_STACK_ENTRIES 8 167#define FTRACE_STACK_ENTRIES 8
168 168
169#ifndef CONFIG_64BIT
170# define IP_FMT "%08lx"
171#else
172# define IP_FMT "%016lx"
173#endif
174
169FTRACE_ENTRY(kernel_stack, stack_entry, 175FTRACE_ENTRY(kernel_stack, stack_entry,
170 176
171 TRACE_STACK, 177 TRACE_STACK,
@@ -175,8 +181,9 @@ FTRACE_ENTRY(kernel_stack, stack_entry,
175 __dynamic_array(unsigned long, caller ) 181 __dynamic_array(unsigned long, caller )
176 ), 182 ),
177 183
178 F_printk("\t=> (%08lx)\n\t=> (%08lx)\n\t=> (%08lx)\n\t=> (%08lx)\n" 184 F_printk("\t=> (" IP_FMT ")\n\t=> (" IP_FMT ")\n\t=> (" IP_FMT ")\n"
179 "\t=> (%08lx)\n\t=> (%08lx)\n\t=> (%08lx)\n\t=> (%08lx)\n", 185 "\t=> (" IP_FMT ")\n\t=> (" IP_FMT ")\n\t=> (" IP_FMT ")\n"
186 "\t=> (" IP_FMT ")\n\t=> (" IP_FMT ")\n",
180 __entry->caller[0], __entry->caller[1], __entry->caller[2], 187 __entry->caller[0], __entry->caller[1], __entry->caller[2],
181 __entry->caller[3], __entry->caller[4], __entry->caller[5], 188 __entry->caller[3], __entry->caller[4], __entry->caller[5],
182 __entry->caller[6], __entry->caller[7]), 189 __entry->caller[6], __entry->caller[7]),
@@ -193,8 +200,9 @@ FTRACE_ENTRY(user_stack, userstack_entry,
193 __array( unsigned long, caller, FTRACE_STACK_ENTRIES ) 200 __array( unsigned long, caller, FTRACE_STACK_ENTRIES )
194 ), 201 ),
195 202
196 F_printk("\t=> (%08lx)\n\t=> (%08lx)\n\t=> (%08lx)\n\t=> (%08lx)\n" 203 F_printk("\t=> (" IP_FMT ")\n\t=> (" IP_FMT ")\n\t=> (" IP_FMT ")\n"
197 "\t=> (%08lx)\n\t=> (%08lx)\n\t=> (%08lx)\n\t=> (%08lx)\n", 204 "\t=> (" IP_FMT ")\n\t=> (" IP_FMT ")\n\t=> (" IP_FMT ")\n"
205 "\t=> (" IP_FMT ")\n\t=> (" IP_FMT ")\n",
198 __entry->caller[0], __entry->caller[1], __entry->caller[2], 206 __entry->caller[0], __entry->caller[1], __entry->caller[2],
199 __entry->caller[3], __entry->caller[4], __entry->caller[5], 207 __entry->caller[3], __entry->caller[4], __entry->caller[5],
200 __entry->caller[6], __entry->caller[7]), 208 __entry->caller[6], __entry->caller[7]),
diff --git a/kernel/trace/trace_export.c b/kernel/trace/trace_export.c
index 7b46c9bd22ae..3dd15e8bc856 100644
--- a/kernel/trace/trace_export.c
+++ b/kernel/trace/trace_export.c
@@ -162,7 +162,7 @@ ftrace_define_fields_##name(struct ftrace_event_call *event_call) \
162#define __dynamic_array(type, item) 162#define __dynamic_array(type, item)
163 163
164#undef F_printk 164#undef F_printk
165#define F_printk(fmt, args...) #fmt ", " __stringify(args) 165#define F_printk(fmt, args...) __stringify(fmt) ", " __stringify(args)
166 166
167#undef FTRACE_ENTRY_REG 167#undef FTRACE_ENTRY_REG
168#define FTRACE_ENTRY_REG(call, struct_name, etype, tstruct, print, filter,\ 168#define FTRACE_ENTRY_REG(call, struct_name, etype, tstruct, print, filter,\
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index f7af95d304c5..6777153f18f3 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -499,6 +499,7 @@ config RT_MUTEX_TESTER
499config DEBUG_SPINLOCK 499config DEBUG_SPINLOCK
500 bool "Spinlock and rw-lock debugging: basic checks" 500 bool "Spinlock and rw-lock debugging: basic checks"
501 depends on DEBUG_KERNEL 501 depends on DEBUG_KERNEL
502 select UNINLINE_SPIN_UNLOCK
502 help 503 help
503 Say Y here and build SMP to catch missing spinlock initialization 504 Say Y here and build SMP to catch missing spinlock initialization
504 and certain other kinds of spinlock errors commonly made. This is 505 and certain other kinds of spinlock errors commonly made. This is
diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
index 49142612916e..5914623f426a 100644
--- a/net/bluetooth/hci_sock.c
+++ b/net/bluetooth/hci_sock.c
@@ -733,7 +733,8 @@ static inline void hci_sock_cmsg(struct sock *sk, struct msghdr *msg, struct sk_
733 data = &tv; 733 data = &tv;
734 len = sizeof(tv); 734 len = sizeof(tv);
735#ifdef CONFIG_COMPAT 735#ifdef CONFIG_COMPAT
736 if (msg->msg_flags & MSG_CMSG_COMPAT) { 736 if (!COMPAT_USE_64BIT_TIME &&
737 (msg->msg_flags & MSG_CMSG_COMPAT)) {
737 ctv.tv_sec = tv.tv_sec; 738 ctv.tv_sec = tv.tv_sec;
738 ctv.tv_usec = tv.tv_usec; 739 ctv.tv_usec = tv.tv_usec;
739 data = &ctv; 740 data = &ctv;
diff --git a/net/compat.c b/net/compat.c
index 64b4515a64e6..e055708b8ec9 100644
--- a/net/compat.c
+++ b/net/compat.c
@@ -219,8 +219,6 @@ Efault:
219 219
220int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *data) 220int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *data)
221{ 221{
222 struct compat_timeval ctv;
223 struct compat_timespec cts[3];
224 struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control; 222 struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
225 struct compat_cmsghdr cmhdr; 223 struct compat_cmsghdr cmhdr;
226 int cmlen; 224 int cmlen;
@@ -230,24 +228,28 @@ int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *dat
230 return 0; /* XXX: return error? check spec. */ 228 return 0; /* XXX: return error? check spec. */
231 } 229 }
232 230
233 if (level == SOL_SOCKET && type == SCM_TIMESTAMP) { 231 if (!COMPAT_USE_64BIT_TIME) {
234 struct timeval *tv = (struct timeval *)data; 232 struct compat_timeval ctv;
235 ctv.tv_sec = tv->tv_sec; 233 struct compat_timespec cts[3];
236 ctv.tv_usec = tv->tv_usec; 234 if (level == SOL_SOCKET && type == SCM_TIMESTAMP) {
237 data = &ctv; 235 struct timeval *tv = (struct timeval *)data;
238 len = sizeof(ctv); 236 ctv.tv_sec = tv->tv_sec;
239 } 237 ctv.tv_usec = tv->tv_usec;
240 if (level == SOL_SOCKET && 238 data = &ctv;
241 (type == SCM_TIMESTAMPNS || type == SCM_TIMESTAMPING)) { 239 len = sizeof(ctv);
242 int count = type == SCM_TIMESTAMPNS ? 1 : 3; 240 }
243 int i; 241 if (level == SOL_SOCKET &&
244 struct timespec *ts = (struct timespec *)data; 242 (type == SCM_TIMESTAMPNS || type == SCM_TIMESTAMPING)) {
245 for (i = 0; i < count; i++) { 243 int count = type == SCM_TIMESTAMPNS ? 1 : 3;
246 cts[i].tv_sec = ts[i].tv_sec; 244 int i;
247 cts[i].tv_nsec = ts[i].tv_nsec; 245 struct timespec *ts = (struct timespec *)data;
246 for (i = 0; i < count; i++) {
247 cts[i].tv_sec = ts[i].tv_sec;
248 cts[i].tv_nsec = ts[i].tv_nsec;
249 }
250 data = &cts;
251 len = sizeof(cts[0]) * count;
248 } 252 }
249 data = &cts;
250 len = sizeof(cts[0]) * count;
251 } 253 }
252 254
253 cmlen = CMSG_COMPAT_LEN(len); 255 cmlen = CMSG_COMPAT_LEN(len);
@@ -454,11 +456,15 @@ static int compat_sock_getsockopt(struct socket *sock, int level, int optname,
454 456
455int compat_sock_get_timestamp(struct sock *sk, struct timeval __user *userstamp) 457int compat_sock_get_timestamp(struct sock *sk, struct timeval __user *userstamp)
456{ 458{
457 struct compat_timeval __user *ctv = 459 struct compat_timeval __user *ctv;
458 (struct compat_timeval __user *) userstamp; 460 int err;
459 int err = -ENOENT;
460 struct timeval tv; 461 struct timeval tv;
461 462
463 if (COMPAT_USE_64BIT_TIME)
464 return sock_get_timestamp(sk, userstamp);
465
466 ctv = (struct compat_timeval __user *) userstamp;
467 err = -ENOENT;
462 if (!sock_flag(sk, SOCK_TIMESTAMP)) 468 if (!sock_flag(sk, SOCK_TIMESTAMP))
463 sock_enable_timestamp(sk, SOCK_TIMESTAMP); 469 sock_enable_timestamp(sk, SOCK_TIMESTAMP);
464 tv = ktime_to_timeval(sk->sk_stamp); 470 tv = ktime_to_timeval(sk->sk_stamp);
@@ -478,11 +484,15 @@ EXPORT_SYMBOL(compat_sock_get_timestamp);
478 484
479int compat_sock_get_timestampns(struct sock *sk, struct timespec __user *userstamp) 485int compat_sock_get_timestampns(struct sock *sk, struct timespec __user *userstamp)
480{ 486{
481 struct compat_timespec __user *ctv = 487 struct compat_timespec __user *ctv;
482 (struct compat_timespec __user *) userstamp; 488 int err;
483 int err = -ENOENT;
484 struct timespec ts; 489 struct timespec ts;
485 490
491 if (COMPAT_USE_64BIT_TIME)
492 return sock_get_timestampns (sk, userstamp);
493
494 ctv = (struct compat_timespec __user *) userstamp;
495 err = -ENOENT;
486 if (!sock_flag(sk, SOCK_TIMESTAMP)) 496 if (!sock_flag(sk, SOCK_TIMESTAMP))
487 sock_enable_timestamp(sk, SOCK_TIMESTAMP); 497 sock_enable_timestamp(sk, SOCK_TIMESTAMP);
488 ts = ktime_to_timespec(sk->sk_stamp); 498 ts = ktime_to_timespec(sk->sk_stamp);
@@ -767,6 +777,11 @@ asmlinkage long compat_sys_recvmmsg(int fd, struct compat_mmsghdr __user *mmsg,
767 int datagrams; 777 int datagrams;
768 struct timespec ktspec; 778 struct timespec ktspec;
769 779
780 if (COMPAT_USE_64BIT_TIME)
781 return __sys_recvmmsg(fd, (struct mmsghdr __user *)mmsg, vlen,
782 flags | MSG_CMSG_COMPAT,
783 (struct timespec *) timeout);
784
770 if (timeout == NULL) 785 if (timeout == NULL)
771 return __sys_recvmmsg(fd, (struct mmsghdr __user *)mmsg, vlen, 786 return __sys_recvmmsg(fd, (struct mmsghdr __user *)mmsg, vlen,
772 flags | MSG_CMSG_COMPAT, NULL); 787 flags | MSG_CMSG_COMPAT, NULL);
diff --git a/net/socket.c b/net/socket.c
index 12a48d846223..484cc6953fc6 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -2592,7 +2592,7 @@ void socket_seq_show(struct seq_file *seq)
2592 2592
2593#ifdef CONFIG_COMPAT 2593#ifdef CONFIG_COMPAT
2594static int do_siocgstamp(struct net *net, struct socket *sock, 2594static int do_siocgstamp(struct net *net, struct socket *sock,
2595 unsigned int cmd, struct compat_timeval __user *up) 2595 unsigned int cmd, void __user *up)
2596{ 2596{
2597 mm_segment_t old_fs = get_fs(); 2597 mm_segment_t old_fs = get_fs();
2598 struct timeval ktv; 2598 struct timeval ktv;
@@ -2601,15 +2601,14 @@ static int do_siocgstamp(struct net *net, struct socket *sock,
2601 set_fs(KERNEL_DS); 2601 set_fs(KERNEL_DS);
2602 err = sock_do_ioctl(net, sock, cmd, (unsigned long)&ktv); 2602 err = sock_do_ioctl(net, sock, cmd, (unsigned long)&ktv);
2603 set_fs(old_fs); 2603 set_fs(old_fs);
2604 if (!err) { 2604 if (!err)
2605 err = put_user(ktv.tv_sec, &up->tv_sec); 2605 err = compat_put_timeval(up, &ktv);
2606 err |= __put_user(ktv.tv_usec, &up->tv_usec); 2606
2607 }
2608 return err; 2607 return err;
2609} 2608}
2610 2609
2611static int do_siocgstampns(struct net *net, struct socket *sock, 2610static int do_siocgstampns(struct net *net, struct socket *sock,
2612 unsigned int cmd, struct compat_timespec __user *up) 2611 unsigned int cmd, void __user *up)
2613{ 2612{
2614 mm_segment_t old_fs = get_fs(); 2613 mm_segment_t old_fs = get_fs();
2615 struct timespec kts; 2614 struct timespec kts;
@@ -2618,10 +2617,9 @@ static int do_siocgstampns(struct net *net, struct socket *sock,
2618 set_fs(KERNEL_DS); 2617 set_fs(KERNEL_DS);
2619 err = sock_do_ioctl(net, sock, cmd, (unsigned long)&kts); 2618 err = sock_do_ioctl(net, sock, cmd, (unsigned long)&kts);
2620 set_fs(old_fs); 2619 set_fs(old_fs);
2621 if (!err) { 2620 if (!err)
2622 err = put_user(kts.tv_sec, &up->tv_sec); 2621 err = compat_put_timespec(up, &kts);
2623 err |= __put_user(kts.tv_nsec, &up->tv_nsec); 2622
2624 }
2625 return err; 2623 return err;
2626} 2624}
2627 2625
diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
index f21ece088764..de0b0f39d9d8 100644
--- a/net/sunrpc/cache.c
+++ b/net/sunrpc/cache.c
@@ -830,6 +830,8 @@ static ssize_t cache_do_downcall(char *kaddr, const char __user *buf,
830{ 830{
831 ssize_t ret; 831 ssize_t ret;
832 832
833 if (count == 0)
834 return -EINVAL;
833 if (copy_from_user(kaddr, buf, count)) 835 if (copy_from_user(kaddr, buf, count))
834 return -EFAULT; 836 return -EFAULT;
835 kaddr[count] = '\0'; 837 kaddr[count] = '\0';
diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c
index c84c0e0c41cb..0af37fc46818 100644
--- a/net/sunrpc/rpc_pipe.c
+++ b/net/sunrpc/rpc_pipe.c
@@ -1014,6 +1014,7 @@ enum {
1014 RPCAUTH_statd, 1014 RPCAUTH_statd,
1015 RPCAUTH_nfsd4_cb, 1015 RPCAUTH_nfsd4_cb,
1016 RPCAUTH_cache, 1016 RPCAUTH_cache,
1017 RPCAUTH_nfsd,
1017 RPCAUTH_RootEOF 1018 RPCAUTH_RootEOF
1018}; 1019};
1019 1020
@@ -1046,6 +1047,10 @@ static const struct rpc_filelist files[] = {
1046 .name = "cache", 1047 .name = "cache",
1047 .mode = S_IFDIR | S_IRUGO | S_IXUGO, 1048 .mode = S_IFDIR | S_IRUGO | S_IXUGO,
1048 }, 1049 },
1050 [RPCAUTH_nfsd] = {
1051 .name = "nfsd",
1052 .mode = S_IFDIR | S_IRUGO | S_IXUGO,
1053 },
1049}; 1054};
1050 1055
1051/* 1056/*
diff --git a/net/sunrpc/svcauth_unix.c b/net/sunrpc/svcauth_unix.c
index bcd574f2ac56..521d8f7dc833 100644
--- a/net/sunrpc/svcauth_unix.c
+++ b/net/sunrpc/svcauth_unix.c
@@ -507,7 +507,7 @@ static int unix_gid_parse(struct cache_detail *cd,
507 time_t expiry; 507 time_t expiry;
508 struct unix_gid ug, *ugp; 508 struct unix_gid ug, *ugp;
509 509
510 if (mlen <= 0 || mesg[mlen-1] != '\n') 510 if (mesg[mlen - 1] != '\n')
511 return -EINVAL; 511 return -EINVAL;
512 mesg[mlen-1] = 0; 512 mesg[mlen-1] = 0;
513 513
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
index 40ae884db865..824d32fb3121 100644
--- a/net/sunrpc/svcsock.c
+++ b/net/sunrpc/svcsock.c
@@ -1381,8 +1381,6 @@ void svc_sock_update_bufs(struct svc_serv *serv)
1381 spin_lock_bh(&serv->sv_lock); 1381 spin_lock_bh(&serv->sv_lock);
1382 list_for_each_entry(svsk, &serv->sv_permsocks, sk_xprt.xpt_list) 1382 list_for_each_entry(svsk, &serv->sv_permsocks, sk_xprt.xpt_list)
1383 set_bit(XPT_CHNGBUF, &svsk->sk_xprt.xpt_flags); 1383 set_bit(XPT_CHNGBUF, &svsk->sk_xprt.xpt_flags);
1384 list_for_each_entry(svsk, &serv->sv_tempsocks, sk_xprt.xpt_list)
1385 set_bit(XPT_CHNGBUF, &svsk->sk_xprt.xpt_flags);
1386 spin_unlock_bh(&serv->sv_lock); 1384 spin_unlock_bh(&serv->sv_lock);
1387} 1385}
1388EXPORT_SYMBOL_GPL(svc_sock_update_bufs); 1386EXPORT_SYMBOL_GPL(svc_sock_update_bufs);
diff --git a/net/sunrpc/xprtrdma/svc_rdma.c b/net/sunrpc/xprtrdma/svc_rdma.c
index 09af4fab1a45..8343737e85f4 100644
--- a/net/sunrpc/xprtrdma/svc_rdma.c
+++ b/net/sunrpc/xprtrdma/svc_rdma.c
@@ -47,6 +47,7 @@
47#include <linux/sunrpc/clnt.h> 47#include <linux/sunrpc/clnt.h>
48#include <linux/sunrpc/sched.h> 48#include <linux/sunrpc/sched.h>
49#include <linux/sunrpc/svc_rdma.h> 49#include <linux/sunrpc/svc_rdma.h>
50#include "xprt_rdma.h"
50 51
51#define RPCDBG_FACILITY RPCDBG_SVCXPRT 52#define RPCDBG_FACILITY RPCDBG_SVCXPRT
52 53
diff --git a/net/sunrpc/xprtrdma/svc_rdma_marshal.c b/net/sunrpc/xprtrdma/svc_rdma_marshal.c
index 9530ef2d40dc..8d2edddf48cf 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_marshal.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_marshal.c
@@ -60,21 +60,11 @@ static u32 *decode_read_list(u32 *va, u32 *vaend)
60 struct rpcrdma_read_chunk *ch = (struct rpcrdma_read_chunk *)va; 60 struct rpcrdma_read_chunk *ch = (struct rpcrdma_read_chunk *)va;
61 61
62 while (ch->rc_discrim != xdr_zero) { 62 while (ch->rc_discrim != xdr_zero) {
63 u64 ch_offset;
64
65 if (((unsigned long)ch + sizeof(struct rpcrdma_read_chunk)) > 63 if (((unsigned long)ch + sizeof(struct rpcrdma_read_chunk)) >
66 (unsigned long)vaend) { 64 (unsigned long)vaend) {
67 dprintk("svcrdma: vaend=%p, ch=%p\n", vaend, ch); 65 dprintk("svcrdma: vaend=%p, ch=%p\n", vaend, ch);
68 return NULL; 66 return NULL;
69 } 67 }
70
71 ch->rc_discrim = ntohl(ch->rc_discrim);
72 ch->rc_position = ntohl(ch->rc_position);
73 ch->rc_target.rs_handle = ntohl(ch->rc_target.rs_handle);
74 ch->rc_target.rs_length = ntohl(ch->rc_target.rs_length);
75 va = (u32 *)&ch->rc_target.rs_offset;
76 xdr_decode_hyper(va, &ch_offset);
77 put_unaligned(ch_offset, (u64 *)va);
78 ch++; 68 ch++;
79 } 69 }
80 return (u32 *)&ch->rc_position; 70 return (u32 *)&ch->rc_position;
@@ -91,7 +81,7 @@ void svc_rdma_rcl_chunk_counts(struct rpcrdma_read_chunk *ch,
91 *byte_count = 0; 81 *byte_count = 0;
92 *ch_count = 0; 82 *ch_count = 0;
93 for (; ch->rc_discrim != 0; ch++) { 83 for (; ch->rc_discrim != 0; ch++) {
94 *byte_count = *byte_count + ch->rc_target.rs_length; 84 *byte_count = *byte_count + ntohl(ch->rc_target.rs_length);
95 *ch_count = *ch_count + 1; 85 *ch_count = *ch_count + 1;
96 } 86 }
97} 87}
@@ -108,7 +98,8 @@ void svc_rdma_rcl_chunk_counts(struct rpcrdma_read_chunk *ch,
108 */ 98 */
109static u32 *decode_write_list(u32 *va, u32 *vaend) 99static u32 *decode_write_list(u32 *va, u32 *vaend)
110{ 100{
111 int ch_no; 101 int nchunks;
102
112 struct rpcrdma_write_array *ary = 103 struct rpcrdma_write_array *ary =
113 (struct rpcrdma_write_array *)va; 104 (struct rpcrdma_write_array *)va;
114 105
@@ -121,37 +112,24 @@ static u32 *decode_write_list(u32 *va, u32 *vaend)
121 dprintk("svcrdma: ary=%p, vaend=%p\n", ary, vaend); 112 dprintk("svcrdma: ary=%p, vaend=%p\n", ary, vaend);
122 return NULL; 113 return NULL;
123 } 114 }
124 ary->wc_discrim = ntohl(ary->wc_discrim); 115 nchunks = ntohl(ary->wc_nchunks);
125 ary->wc_nchunks = ntohl(ary->wc_nchunks);
126 if (((unsigned long)&ary->wc_array[0] + 116 if (((unsigned long)&ary->wc_array[0] +
127 (sizeof(struct rpcrdma_write_chunk) * ary->wc_nchunks)) > 117 (sizeof(struct rpcrdma_write_chunk) * nchunks)) >
128 (unsigned long)vaend) { 118 (unsigned long)vaend) {
129 dprintk("svcrdma: ary=%p, wc_nchunks=%d, vaend=%p\n", 119 dprintk("svcrdma: ary=%p, wc_nchunks=%d, vaend=%p\n",
130 ary, ary->wc_nchunks, vaend); 120 ary, nchunks, vaend);
131 return NULL; 121 return NULL;
132 } 122 }
133 for (ch_no = 0; ch_no < ary->wc_nchunks; ch_no++) {
134 u64 ch_offset;
135
136 ary->wc_array[ch_no].wc_target.rs_handle =
137 ntohl(ary->wc_array[ch_no].wc_target.rs_handle);
138 ary->wc_array[ch_no].wc_target.rs_length =
139 ntohl(ary->wc_array[ch_no].wc_target.rs_length);
140 va = (u32 *)&ary->wc_array[ch_no].wc_target.rs_offset;
141 xdr_decode_hyper(va, &ch_offset);
142 put_unaligned(ch_offset, (u64 *)va);
143 }
144
145 /* 123 /*
146 * rs_length is the 2nd 4B field in wc_target and taking its 124 * rs_length is the 2nd 4B field in wc_target and taking its
147 * address skips the list terminator 125 * address skips the list terminator
148 */ 126 */
149 return (u32 *)&ary->wc_array[ch_no].wc_target.rs_length; 127 return (u32 *)&ary->wc_array[nchunks].wc_target.rs_length;
150} 128}
151 129
152static u32 *decode_reply_array(u32 *va, u32 *vaend) 130static u32 *decode_reply_array(u32 *va, u32 *vaend)
153{ 131{
154 int ch_no; 132 int nchunks;
155 struct rpcrdma_write_array *ary = 133 struct rpcrdma_write_array *ary =
156 (struct rpcrdma_write_array *)va; 134 (struct rpcrdma_write_array *)va;
157 135
@@ -164,28 +142,15 @@ static u32 *decode_reply_array(u32 *va, u32 *vaend)
164 dprintk("svcrdma: ary=%p, vaend=%p\n", ary, vaend); 142 dprintk("svcrdma: ary=%p, vaend=%p\n", ary, vaend);
165 return NULL; 143 return NULL;
166 } 144 }
167 ary->wc_discrim = ntohl(ary->wc_discrim); 145 nchunks = ntohl(ary->wc_nchunks);
168 ary->wc_nchunks = ntohl(ary->wc_nchunks);
169 if (((unsigned long)&ary->wc_array[0] + 146 if (((unsigned long)&ary->wc_array[0] +
170 (sizeof(struct rpcrdma_write_chunk) * ary->wc_nchunks)) > 147 (sizeof(struct rpcrdma_write_chunk) * nchunks)) >
171 (unsigned long)vaend) { 148 (unsigned long)vaend) {
172 dprintk("svcrdma: ary=%p, wc_nchunks=%d, vaend=%p\n", 149 dprintk("svcrdma: ary=%p, wc_nchunks=%d, vaend=%p\n",
173 ary, ary->wc_nchunks, vaend); 150 ary, nchunks, vaend);
174 return NULL; 151 return NULL;
175 } 152 }
176 for (ch_no = 0; ch_no < ary->wc_nchunks; ch_no++) { 153 return (u32 *)&ary->wc_array[nchunks];
177 u64 ch_offset;
178
179 ary->wc_array[ch_no].wc_target.rs_handle =
180 ntohl(ary->wc_array[ch_no].wc_target.rs_handle);
181 ary->wc_array[ch_no].wc_target.rs_length =
182 ntohl(ary->wc_array[ch_no].wc_target.rs_length);
183 va = (u32 *)&ary->wc_array[ch_no].wc_target.rs_offset;
184 xdr_decode_hyper(va, &ch_offset);
185 put_unaligned(ch_offset, (u64 *)va);
186 }
187
188 return (u32 *)&ary->wc_array[ch_no];
189} 154}
190 155
191int svc_rdma_xdr_decode_req(struct rpcrdma_msg **rdma_req, 156int svc_rdma_xdr_decode_req(struct rpcrdma_msg **rdma_req,
@@ -386,13 +351,14 @@ void svc_rdma_xdr_encode_reply_array(struct rpcrdma_write_array *ary,
386 351
387void svc_rdma_xdr_encode_array_chunk(struct rpcrdma_write_array *ary, 352void svc_rdma_xdr_encode_array_chunk(struct rpcrdma_write_array *ary,
388 int chunk_no, 353 int chunk_no,
389 u32 rs_handle, u64 rs_offset, 354 __be32 rs_handle,
355 __be64 rs_offset,
390 u32 write_len) 356 u32 write_len)
391{ 357{
392 struct rpcrdma_segment *seg = &ary->wc_array[chunk_no].wc_target; 358 struct rpcrdma_segment *seg = &ary->wc_array[chunk_no].wc_target;
393 seg->rs_handle = htonl(rs_handle); 359 seg->rs_handle = rs_handle;
360 seg->rs_offset = rs_offset;
394 seg->rs_length = htonl(write_len); 361 seg->rs_length = htonl(write_len);
395 xdr_encode_hyper((u32 *) &seg->rs_offset, rs_offset);
396} 362}
397 363
398void svc_rdma_xdr_encode_reply_header(struct svcxprt_rdma *xprt, 364void svc_rdma_xdr_encode_reply_header(struct svcxprt_rdma *xprt,
diff --git a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
index df67211c4baf..41cb63b623df 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
@@ -147,7 +147,7 @@ static int map_read_chunks(struct svcxprt_rdma *xprt,
147 page_off = 0; 147 page_off = 0;
148 ch = (struct rpcrdma_read_chunk *)&rmsgp->rm_body.rm_chunks[0]; 148 ch = (struct rpcrdma_read_chunk *)&rmsgp->rm_body.rm_chunks[0];
149 ch_no = 0; 149 ch_no = 0;
150 ch_bytes = ch->rc_target.rs_length; 150 ch_bytes = ntohl(ch->rc_target.rs_length);
151 head->arg.head[0] = rqstp->rq_arg.head[0]; 151 head->arg.head[0] = rqstp->rq_arg.head[0];
152 head->arg.tail[0] = rqstp->rq_arg.tail[0]; 152 head->arg.tail[0] = rqstp->rq_arg.tail[0];
153 head->arg.pages = &head->pages[head->count]; 153 head->arg.pages = &head->pages[head->count];
@@ -183,7 +183,7 @@ static int map_read_chunks(struct svcxprt_rdma *xprt,
183 ch_no++; 183 ch_no++;
184 ch++; 184 ch++;
185 chl_map->ch[ch_no].start = sge_no; 185 chl_map->ch[ch_no].start = sge_no;
186 ch_bytes = ch->rc_target.rs_length; 186 ch_bytes = ntohl(ch->rc_target.rs_length);
187 /* If bytes remaining account for next chunk */ 187 /* If bytes remaining account for next chunk */
188 if (byte_count) { 188 if (byte_count) {
189 head->arg.page_len += ch_bytes; 189 head->arg.page_len += ch_bytes;
@@ -281,11 +281,12 @@ static int fast_reg_read_chunks(struct svcxprt_rdma *xprt,
281 offset = 0; 281 offset = 0;
282 ch = (struct rpcrdma_read_chunk *)&rmsgp->rm_body.rm_chunks[0]; 282 ch = (struct rpcrdma_read_chunk *)&rmsgp->rm_body.rm_chunks[0];
283 for (ch_no = 0; ch_no < ch_count; ch_no++) { 283 for (ch_no = 0; ch_no < ch_count; ch_no++) {
284 int len = ntohl(ch->rc_target.rs_length);
284 rpl_map->sge[ch_no].iov_base = frmr->kva + offset; 285 rpl_map->sge[ch_no].iov_base = frmr->kva + offset;
285 rpl_map->sge[ch_no].iov_len = ch->rc_target.rs_length; 286 rpl_map->sge[ch_no].iov_len = len;
286 chl_map->ch[ch_no].count = 1; 287 chl_map->ch[ch_no].count = 1;
287 chl_map->ch[ch_no].start = ch_no; 288 chl_map->ch[ch_no].start = ch_no;
288 offset += ch->rc_target.rs_length; 289 offset += len;
289 ch++; 290 ch++;
290 } 291 }
291 292
@@ -316,7 +317,7 @@ static int rdma_set_ctxt_sge(struct svcxprt_rdma *xprt,
316 for (i = 0; i < count; i++) { 317 for (i = 0; i < count; i++) {
317 ctxt->sge[i].length = 0; /* in case map fails */ 318 ctxt->sge[i].length = 0; /* in case map fails */
318 if (!frmr) { 319 if (!frmr) {
319 BUG_ON(0 == virt_to_page(vec[i].iov_base)); 320 BUG_ON(!virt_to_page(vec[i].iov_base));
320 off = (unsigned long)vec[i].iov_base & ~PAGE_MASK; 321 off = (unsigned long)vec[i].iov_base & ~PAGE_MASK;
321 ctxt->sge[i].addr = 322 ctxt->sge[i].addr =
322 ib_dma_map_page(xprt->sc_cm_id->device, 323 ib_dma_map_page(xprt->sc_cm_id->device,
@@ -426,6 +427,7 @@ static int rdma_read_xdr(struct svcxprt_rdma *xprt,
426 427
427 for (ch = (struct rpcrdma_read_chunk *)&rmsgp->rm_body.rm_chunks[0]; 428 for (ch = (struct rpcrdma_read_chunk *)&rmsgp->rm_body.rm_chunks[0];
428 ch->rc_discrim != 0; ch++, ch_no++) { 429 ch->rc_discrim != 0; ch++, ch_no++) {
430 u64 rs_offset;
429next_sge: 431next_sge:
430 ctxt = svc_rdma_get_context(xprt); 432 ctxt = svc_rdma_get_context(xprt);
431 ctxt->direction = DMA_FROM_DEVICE; 433 ctxt->direction = DMA_FROM_DEVICE;
@@ -440,10 +442,10 @@ next_sge:
440 read_wr.opcode = IB_WR_RDMA_READ; 442 read_wr.opcode = IB_WR_RDMA_READ;
441 ctxt->wr_op = read_wr.opcode; 443 ctxt->wr_op = read_wr.opcode;
442 read_wr.send_flags = IB_SEND_SIGNALED; 444 read_wr.send_flags = IB_SEND_SIGNALED;
443 read_wr.wr.rdma.rkey = ch->rc_target.rs_handle; 445 read_wr.wr.rdma.rkey = ntohl(ch->rc_target.rs_handle);
444 read_wr.wr.rdma.remote_addr = 446 xdr_decode_hyper((__be32 *)&ch->rc_target.rs_offset,
445 get_unaligned(&(ch->rc_target.rs_offset)) + 447 &rs_offset);
446 sgl_offset; 448 read_wr.wr.rdma.remote_addr = rs_offset + sgl_offset;
447 read_wr.sg_list = ctxt->sge; 449 read_wr.sg_list = ctxt->sge;
448 read_wr.num_sge = 450 read_wr.num_sge =
449 rdma_read_max_sge(xprt, chl_map->ch[ch_no].count); 451 rdma_read_max_sge(xprt, chl_map->ch[ch_no].count);
diff --git a/net/sunrpc/xprtrdma/svc_rdma_sendto.c b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
index 249a835b703f..42eb7ba0b903 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_sendto.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
@@ -409,21 +409,21 @@ static int send_write_chunks(struct svcxprt_rdma *xprt,
409 u64 rs_offset; 409 u64 rs_offset;
410 410
411 arg_ch = &arg_ary->wc_array[chunk_no].wc_target; 411 arg_ch = &arg_ary->wc_array[chunk_no].wc_target;
412 write_len = min(xfer_len, arg_ch->rs_length); 412 write_len = min(xfer_len, ntohl(arg_ch->rs_length));
413 413
414 /* Prepare the response chunk given the length actually 414 /* Prepare the response chunk given the length actually
415 * written */ 415 * written */
416 rs_offset = get_unaligned(&(arg_ch->rs_offset)); 416 xdr_decode_hyper((__be32 *)&arg_ch->rs_offset, &rs_offset);
417 svc_rdma_xdr_encode_array_chunk(res_ary, chunk_no, 417 svc_rdma_xdr_encode_array_chunk(res_ary, chunk_no,
418 arg_ch->rs_handle, 418 arg_ch->rs_handle,
419 rs_offset, 419 arg_ch->rs_offset,
420 write_len); 420 write_len);
421 chunk_off = 0; 421 chunk_off = 0;
422 while (write_len) { 422 while (write_len) {
423 int this_write; 423 int this_write;
424 this_write = min(write_len, max_write); 424 this_write = min(write_len, max_write);
425 ret = send_write(xprt, rqstp, 425 ret = send_write(xprt, rqstp,
426 arg_ch->rs_handle, 426 ntohl(arg_ch->rs_handle),
427 rs_offset + chunk_off, 427 rs_offset + chunk_off,
428 xdr_off, 428 xdr_off,
429 this_write, 429 this_write,
@@ -457,6 +457,7 @@ static int send_reply_chunks(struct svcxprt_rdma *xprt,
457 u32 xdr_off; 457 u32 xdr_off;
458 int chunk_no; 458 int chunk_no;
459 int chunk_off; 459 int chunk_off;
460 int nchunks;
460 struct rpcrdma_segment *ch; 461 struct rpcrdma_segment *ch;
461 struct rpcrdma_write_array *arg_ary; 462 struct rpcrdma_write_array *arg_ary;
462 struct rpcrdma_write_array *res_ary; 463 struct rpcrdma_write_array *res_ary;
@@ -476,26 +477,27 @@ static int send_reply_chunks(struct svcxprt_rdma *xprt,
476 max_write = xprt->sc_max_sge * PAGE_SIZE; 477 max_write = xprt->sc_max_sge * PAGE_SIZE;
477 478
478 /* xdr offset starts at RPC message */ 479 /* xdr offset starts at RPC message */
480 nchunks = ntohl(arg_ary->wc_nchunks);
479 for (xdr_off = 0, chunk_no = 0; 481 for (xdr_off = 0, chunk_no = 0;
480 xfer_len && chunk_no < arg_ary->wc_nchunks; 482 xfer_len && chunk_no < nchunks;
481 chunk_no++) { 483 chunk_no++) {
482 u64 rs_offset; 484 u64 rs_offset;
483 ch = &arg_ary->wc_array[chunk_no].wc_target; 485 ch = &arg_ary->wc_array[chunk_no].wc_target;
484 write_len = min(xfer_len, ch->rs_length); 486 write_len = min(xfer_len, htonl(ch->rs_length));
485 487
486 /* Prepare the reply chunk given the length actually 488 /* Prepare the reply chunk given the length actually
487 * written */ 489 * written */
488 rs_offset = get_unaligned(&(ch->rs_offset)); 490 xdr_decode_hyper((__be32 *)&ch->rs_offset, &rs_offset);
489 svc_rdma_xdr_encode_array_chunk(res_ary, chunk_no, 491 svc_rdma_xdr_encode_array_chunk(res_ary, chunk_no,
490 ch->rs_handle, rs_offset, 492 ch->rs_handle, ch->rs_offset,
491 write_len); 493 write_len);
492 chunk_off = 0; 494 chunk_off = 0;
493 while (write_len) { 495 while (write_len) {
494 int this_write; 496 int this_write;
495 497
496 this_write = min(write_len, max_write); 498 this_write = min(write_len, max_write);
497 ret = send_write(xprt, rqstp, 499 ret = send_write(xprt, rqstp,
498 ch->rs_handle, 500 ntohl(ch->rs_handle),
499 rs_offset + chunk_off, 501 rs_offset + chunk_off,
500 xdr_off, 502 xdr_off,
501 this_write, 503 this_write,
diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c
index 894cb42db91d..73b428bef598 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_transport.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c
@@ -51,6 +51,7 @@
51#include <rdma/rdma_cm.h> 51#include <rdma/rdma_cm.h>
52#include <linux/sunrpc/svc_rdma.h> 52#include <linux/sunrpc/svc_rdma.h>
53#include <linux/export.h> 53#include <linux/export.h>
54#include "xprt_rdma.h"
54 55
55#define RPCDBG_FACILITY RPCDBG_SVCXPRT 56#define RPCDBG_FACILITY RPCDBG_SVCXPRT
56 57
@@ -90,12 +91,6 @@ struct svc_xprt_class svc_rdma_class = {
90 .xcl_max_payload = RPCSVC_MAXPAYLOAD_TCP, 91 .xcl_max_payload = RPCSVC_MAXPAYLOAD_TCP,
91}; 92};
92 93
93/* WR context cache. Created in svc_rdma.c */
94extern struct kmem_cache *svc_rdma_ctxt_cachep;
95
96/* Workqueue created in svc_rdma.c */
97extern struct workqueue_struct *svc_rdma_wq;
98
99struct svc_rdma_op_ctxt *svc_rdma_get_context(struct svcxprt_rdma *xprt) 94struct svc_rdma_op_ctxt *svc_rdma_get_context(struct svcxprt_rdma *xprt)
100{ 95{
101 struct svc_rdma_op_ctxt *ctxt; 96 struct svc_rdma_op_ctxt *ctxt;
@@ -150,9 +145,6 @@ void svc_rdma_put_context(struct svc_rdma_op_ctxt *ctxt, int free_pages)
150 atomic_dec(&xprt->sc_ctxt_used); 145 atomic_dec(&xprt->sc_ctxt_used);
151} 146}
152 147
153/* Temporary NFS request map cache. Created in svc_rdma.c */
154extern struct kmem_cache *svc_rdma_map_cachep;
155
156/* 148/*
157 * Temporary NFS req mappings are shared across all transport 149 * Temporary NFS req mappings are shared across all transport
158 * instances. These are short lived and should be bounded by the number 150 * instances. These are short lived and should be bounded by the number
diff --git a/net/sunrpc/xprtrdma/xprt_rdma.h b/net/sunrpc/xprtrdma/xprt_rdma.h
index 08c5d5a128fc..9a66c95b5837 100644
--- a/net/sunrpc/xprtrdma/xprt_rdma.h
+++ b/net/sunrpc/xprtrdma/xprt_rdma.h
@@ -343,4 +343,11 @@ void rpcrdma_reply_handler(struct rpcrdma_rep *);
343 */ 343 */
344int rpcrdma_marshal_req(struct rpc_rqst *); 344int rpcrdma_marshal_req(struct rpc_rqst *);
345 345
346/* Temporary NFS request map cache. Created in svc_rdma.c */
347extern struct kmem_cache *svc_rdma_map_cachep;
348/* WR context cache. Created in svc_rdma.c */
349extern struct kmem_cache *svc_rdma_ctxt_cachep;
350/* Workqueue created in svc_rdma.c */
351extern struct workqueue_struct *svc_rdma_wq;
352
346#endif /* _LINUX_SUNRPC_XPRT_RDMA_H */ 353#endif /* _LINUX_SUNRPC_XPRT_RDMA_H */
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index 92bc5181dbeb..890b03f8d877 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -2475,6 +2475,7 @@ static struct rpc_xprt_ops xs_tcp_ops = {
2475static struct rpc_xprt_ops bc_tcp_ops = { 2475static struct rpc_xprt_ops bc_tcp_ops = {
2476 .reserve_xprt = xprt_reserve_xprt, 2476 .reserve_xprt = xprt_reserve_xprt,
2477 .release_xprt = xprt_release_xprt, 2477 .release_xprt = xprt_release_xprt,
2478 .rpcbind = xs_local_rpcbind,
2478 .buf_alloc = bc_malloc, 2479 .buf_alloc = bc_malloc,
2479 .buf_free = bc_free, 2480 .buf_free = bc_free,
2480 .send_request = bc_send_request, 2481 .send_request = bc_send_request,
diff --git a/scripts/Kbuild.include b/scripts/Kbuild.include
index d897278b1f97..6a3ee981931d 100644
--- a/scripts/Kbuild.include
+++ b/scripts/Kbuild.include
@@ -104,7 +104,7 @@ as-option = $(call try-run,\
104# Usage: cflags-y += $(call as-instr,instr,option1,option2) 104# Usage: cflags-y += $(call as-instr,instr,option1,option2)
105 105
106as-instr = $(call try-run,\ 106as-instr = $(call try-run,\
107 /bin/echo -e "$(1)" | $(CC) $(KBUILD_AFLAGS) -c -xassembler -o "$$TMP" -,$(2),$(3)) 107 printf "%b\n" "$(1)" | $(CC) $(KBUILD_AFLAGS) -c -xassembler -o "$$TMP" -,$(2),$(3))
108 108
109# cc-option 109# cc-option
110# Usage: cflags-y += $(call cc-option,-march=winchip-c6,-march=i586) 110# Usage: cflags-y += $(call cc-option,-march=winchip-c6,-march=i586)
diff --git a/scripts/Makefile.build b/scripts/Makefile.build
index d2b366c16b64..ff1720d28d0c 100644
--- a/scripts/Makefile.build
+++ b/scripts/Makefile.build
@@ -69,6 +69,7 @@ warning-1 += -Wmissing-prototypes
69warning-1 += -Wold-style-definition 69warning-1 += -Wold-style-definition
70warning-1 += $(call cc-option, -Wmissing-include-dirs) 70warning-1 += $(call cc-option, -Wmissing-include-dirs)
71warning-1 += $(call cc-option, -Wunused-but-set-variable) 71warning-1 += $(call cc-option, -Wunused-but-set-variable)
72warning-1 += $(call cc-disable-warning, missing-field-initializers)
72 73
73warning-2 := -Waggregate-return 74warning-2 := -Waggregate-return
74warning-2 += -Wcast-align 75warning-2 += -Wcast-align
@@ -76,6 +77,7 @@ warning-2 += -Wdisabled-optimization
76warning-2 += -Wnested-externs 77warning-2 += -Wnested-externs
77warning-2 += -Wshadow 78warning-2 += -Wshadow
78warning-2 += $(call cc-option, -Wlogical-op) 79warning-2 += $(call cc-option, -Wlogical-op)
80warning-2 += $(call cc-option, -Wmissing-field-initializers)
79 81
80warning-3 := -Wbad-function-cast 82warning-3 := -Wbad-function-cast
81warning-3 += -Wcast-qual 83warning-3 += -Wcast-qual
diff --git a/scripts/Makefile.lib b/scripts/Makefile.lib
index 00c368c6e996..0be6f110cce7 100644
--- a/scripts/Makefile.lib
+++ b/scripts/Makefile.lib
@@ -304,6 +304,30 @@ cmd_lzo = (cat $(filter-out FORCE,$^) | \
304 lzop -9 && $(call size_append, $(filter-out FORCE,$^))) > $@ || \ 304 lzop -9 && $(call size_append, $(filter-out FORCE,$^))) > $@ || \
305 (rm -f $@ ; false) 305 (rm -f $@ ; false)
306 306
307# U-Boot mkimage
308# ---------------------------------------------------------------------------
309
310MKIMAGE := $(srctree)/scripts/mkuboot.sh
311
312# SRCARCH just happens to match slightly more than ARCH (on sparc), so reduces
313# the number of overrides in arch makefiles
314UIMAGE_ARCH ?= $(SRCARCH)
315UIMAGE_COMPRESSION ?= $(if $(2),$(2),none)
316UIMAGE_OPTS-y ?=
317UIMAGE_TYPE ?= kernel
318UIMAGE_LOADADDR ?= arch_must_set_this
319UIMAGE_ENTRYADDR ?= $(UIMAGE_LOADADDR)
320UIMAGE_NAME ?= 'Linux-$(KERNELRELEASE)'
321UIMAGE_IN ?= $<
322UIMAGE_OUT ?= $@
323
324quiet_cmd_uimage = UIMAGE $(UIMAGE_OUT)
325 cmd_uimage = $(CONFIG_SHELL) $(MKIMAGE) -A $(UIMAGE_ARCH) -O linux \
326 -C $(UIMAGE_COMPRESSION) $(UIMAGE_OPTS-y) \
327 -T $(UIMAGE_TYPE) \
328 -a $(UIMAGE_LOADADDR) -e $(UIMAGE_ENTRYADDR) \
329 -n $(UIMAGE_NAME) -d $(UIMAGE_IN) $(UIMAGE_OUT)
330
307# XZ 331# XZ
308# --------------------------------------------------------------------------- 332# ---------------------------------------------------------------------------
309# Use xzkern to compress the kernel image and xzmisc to compress other things. 333# Use xzkern to compress the kernel image and xzmisc to compress other things.
diff --git a/scripts/coccinelle/api/ptr_ret.cocci b/scripts/coccinelle/api/ptr_ret.cocci
new file mode 100644
index 000000000000..cbfd08c7d8c7
--- /dev/null
+++ b/scripts/coccinelle/api/ptr_ret.cocci
@@ -0,0 +1,70 @@
1///
2/// Use PTR_RET rather than if(IS_ERR(...)) + PTR_ERR
3///
4// Confidence: High
5// Copyright: (C) 2012 Julia Lawall, INRIA/LIP6. GPLv2.
6// Copyright: (C) 2012 Gilles Muller, INRIA/LiP6. GPLv2.
7// URL: http://coccinelle.lip6.fr/
8// Options: -no_includes -include_headers
9//
10// Keywords: ERR_PTR, PTR_ERR, PTR_RET
11// Version min: 2.6.39
12//
13
14virtual context
15virtual patch
16virtual org
17virtual report
18
19@depends on patch@
20expression ptr;
21@@
22
23- if (IS_ERR(ptr)) return PTR_ERR(ptr); else return 0;
24+ return PTR_RET(ptr);
25
26@depends on patch@
27expression ptr;
28@@
29
30- if (IS_ERR(ptr)) return PTR_ERR(ptr); return 0;
31+ return PTR_RET(ptr);
32
33@r1 depends on !patch@
34expression ptr;
35position p1;
36@@
37
38* if@p1 (IS_ERR(ptr)) return PTR_ERR(ptr); else return 0;
39
40@r2 depends on !patch@
41expression ptr;
42position p2;
43@@
44
45* if@p2 (IS_ERR(ptr)) return PTR_ERR(ptr); return 0;
46
47@script:python depends on org@
48p << r1.p1;
49@@
50
51coccilib.org.print_todo(p[0], "WARNING: PTR_RET can be used")
52
53
54@script:python depends on org@
55p << r2.p2;
56@@
57
58coccilib.org.print_todo(p[0], "WARNING: PTR_RET can be used")
59
60@script:python depends on report@
61p << r1.p1;
62@@
63
64coccilib.report.print_report(p[0], "WARNING: PTR_RET can be used")
65
66@script:python depends on report@
67p << r2.p2;
68@@
69
70coccilib.report.print_report(p[0], "WARNING: PTR_RET can be used")
diff --git a/scripts/coccinelle/free/clk_put.cocci b/scripts/coccinelle/free/clk_put.cocci
new file mode 100644
index 000000000000..46747adfd20a
--- /dev/null
+++ b/scripts/coccinelle/free/clk_put.cocci
@@ -0,0 +1,67 @@
1/// Find missing clk_puts.
2///
3//# This only signals a missing clk_put when there is a clk_put later
4//# in the same function.
5//# False positives can be due to loops.
6//
7// Confidence: Moderate
8// Copyright: (C) 2012 Julia Lawall, INRIA/LIP6. GPLv2.
9// Copyright: (C) 2012 Gilles Muller, INRIA/LiP6. GPLv2.
10// URL: http://coccinelle.lip6.fr/
11// Comments:
12// Options:
13
14virtual context
15virtual org
16virtual report
17
18@clk@
19expression e;
20statement S,S1;
21int ret;
22position p1,p2,p3;
23@@
24
25e = clk_get@p1(...)
26... when != clk_put(e)
27if (<+...e...+>) S
28... when any
29 when != clk_put(e)
30 when != if (...) { ... clk_put(e); ... }
31(
32 if (ret == 0) S1
33|
34if (...)
35 { ...
36 return 0; }
37|
38if (...)
39 { ...
40 return <+...e...+>; }
41|
42*if@p2 (...)
43 { ... when != clk_put(e)
44 when forall
45 return@p3 ...; }
46)
47... when any
48clk_put(e);
49
50@script:python depends on org@
51p1 << clk.p1;
52p2 << clk.p2;
53p3 << clk.p3;
54@@
55
56cocci.print_main("clk_get",p1)
57cocci.print_secs("if",p2)
58cocci.print_secs("needed clk_put",p3)
59
60@script:python depends on report@
61p1 << clk.p1;
62p2 << clk.p2;
63p3 << clk.p3;
64@@
65
66msg = "ERROR: missing clk_put; clk_get on line %s and execution via conditional on line %s" % (p1[0].line,p2[0].line)
67coccilib.report.print_report(p3[0],msg)
diff --git a/scripts/coccinelle/free/iounmap.cocci b/scripts/coccinelle/free/iounmap.cocci
new file mode 100644
index 000000000000..5384f4ba1192
--- /dev/null
+++ b/scripts/coccinelle/free/iounmap.cocci
@@ -0,0 +1,67 @@
1/// Find missing iounmaps.
2///
3//# This only signals a missing iounmap when there is an iounmap later
4//# in the same function.
5//# False positives can be due to loops.
6//
7// Confidence: Moderate
8// Copyright: (C) 2012 Julia Lawall, INRIA/LIP6. GPLv2.
9// Copyright: (C) 2012 Gilles Muller, INRIA/LiP6. GPLv2.
10// URL: http://coccinelle.lip6.fr/
11// Comments:
12// Options:
13
14virtual context
15virtual org
16virtual report
17
18@iom@
19expression e;
20statement S,S1;
21int ret;
22position p1,p2,p3;
23@@
24
25e = \(ioremap@p1\|ioremap_nocache@p1\)(...)
26... when != iounmap(e)
27if (<+...e...+>) S
28... when any
29 when != iounmap(e)
30 when != if (...) { ... iounmap(e); ... }
31(
32 if (ret == 0) S1
33|
34if (...)
35 { ...
36 return 0; }
37|
38if (...)
39 { ...
40 return <+...e...+>; }
41|
42*if@p2 (...)
43 { ... when != iounmap(e)
44 when forall
45 return@p3 ...; }
46)
47... when any
48iounmap(e);
49
50@script:python depends on org@
51p1 << iom.p1;
52p2 << iom.p2;
53p3 << iom.p3;
54@@
55
56cocci.print_main("ioremap",p1)
57cocci.print_secs("if",p2)
58cocci.print_secs("needed iounmap",p3)
59
60@script:python depends on report@
61p1 << iom.p1;
62p2 << iom.p2;
63p3 << iom.p3;
64@@
65
66msg = "ERROR: missing iounmap; ioremap on line %s and execution via conditional on line %s" % (p1[0].line,p2[0].line)
67coccilib.report.print_report(p3[0],msg)
diff --git a/scripts/coccinelle/misc/boolinit.cocci b/scripts/coccinelle/misc/boolinit.cocci
new file mode 100644
index 000000000000..97ce41ce8135
--- /dev/null
+++ b/scripts/coccinelle/misc/boolinit.cocci
@@ -0,0 +1,178 @@
1/// Bool initializations should use true and false. Bool tests don't need
2/// comparisons. Based on contributions from Joe Perches, Rusty Russell
3/// and Bruce W Allan.
4///
5// Confidence: High
6// Copyright: (C) 2012 Julia Lawall, INRIA/LIP6. GPLv2.
7// Copyright: (C) 2012 Gilles Muller, INRIA/LiP6. GPLv2.
8// URL: http://coccinelle.lip6.fr/
9// Options: -include_headers
10
11virtual patch
12virtual context
13virtual org
14virtual report
15
16@depends on patch@
17bool t;
18symbol true;
19symbol false;
20@@
21
22(
23- t == true
24+ t
25|
26- true == t
27+ t
28|
29- t != true
30+ !t
31|
32- true != t
33+ !t
34|
35- t == false
36+ !t
37|
38- false == t
39+ !t
40|
41- t != false
42+ t
43|
44- false != t
45+ t
46)
47
48@depends on patch disable is_zero, isnt_zero@
49bool t;
50@@
51
52(
53- t == 1
54+ t
55|
56- t != 1
57+ !t
58|
59- t == 0
60+ !t
61|
62- t != 0
63+ t
64)
65
66@depends on patch@
67bool b;
68@@
69(
70 b =
71- 0
72+ false
73|
74 b =
75- 1
76+ true
77)
78
79// ---------------------------------------------------------------------
80
81@r1 depends on !patch@
82bool t;
83position p;
84@@
85
86(
87* t@p == true
88|
89* true == t@p
90|
91* t@p != true
92|
93* true != t@p
94|
95* t@p == false
96|
97* false == t@p
98|
99* t@p != false
100|
101* false != t@p
102)
103
104@r2 depends on !patch disable is_zero, isnt_zero@
105bool t;
106position p;
107@@
108
109(
110* t@p == 1
111|
112* t@p != 1
113|
114* t@p == 0
115|
116* t@p != 0
117)
118
119@r3 depends on !patch@
120bool b;
121position p1,p2;
122constant c;
123@@
124(
125*b@p1 = 0
126|
127*b@p1 = 1
128|
129*b@p2 = c
130)
131
132@script:python depends on org@
133p << r1.p;
134@@
135
136cocci.print_main("WARNING: Comparison to bool",p)
137
138@script:python depends on org@
139p << r2.p;
140@@
141
142cocci.print_main("WARNING: Comparison of bool to 0/1",p)
143
144@script:python depends on org@
145p1 << r3.p1;
146@@
147
148cocci.print_main("WARNING: Assignment of bool to 0/1",p1)
149
150@script:python depends on org@
151p2 << r3.p2;
152@@
153
154cocci.print_main("ERROR: Assignment of bool to non-0/1 constant",p2)
155
156@script:python depends on report@
157p << r1.p;
158@@
159
160coccilib.report.print_report(p[0],"WARNING: Comparison to bool")
161
162@script:python depends on report@
163p << r2.p;
164@@
165
166coccilib.report.print_report(p[0],"WARNING: Comparison of bool to 0/1")
167
168@script:python depends on report@
169p1 << r3.p1;
170@@
171
172coccilib.report.print_report(p1[0],"WARNING: Assignment of bool to 0/1")
173
174@script:python depends on report@
175p2 << r3.p2;
176@@
177
178coccilib.report.print_report(p2[0],"ERROR: Assignment of bool to non-0/1 constant")
diff --git a/scripts/coccinelle/misc/cstptr.cocci b/scripts/coccinelle/misc/cstptr.cocci
new file mode 100644
index 000000000000..d42564484528
--- /dev/null
+++ b/scripts/coccinelle/misc/cstptr.cocci
@@ -0,0 +1,41 @@
1/// PTR_ERR should be applied before its argument is reassigned, typically
2/// to NULL
3///
4// Confidence: High
5// Copyright: (C) 2012 Julia Lawall, INRIA/LIP6. GPLv2.
6// Copyright: (C) 2012 Gilles Muller, INRIA/LiP6. GPLv2.
7// URL: http://coccinelle.lip6.fr/
8// Comments:
9// Options: -no_includes -include_headers
10
11virtual org
12virtual report
13virtual context
14
15@r exists@
16expression e,e1;
17constant c;
18position p1,p2;
19@@
20
21*e@p1 = c
22... when != e = e1
23 when != &e
24 when != true IS_ERR(e)
25*PTR_ERR@p2(e)
26
27@script:python depends on org@
28p1 << r.p1;
29p2 << r.p2;
30@@
31
32cocci.print_main("PTR_ERR",p2)
33cocci.print_secs("assignment",p1)
34
35@script:python depends on report@
36p1 << r.p1;
37p2 << r.p2;
38@@
39
40msg = "ERROR: PTR_ERR applied after initialization to constant on line %s" % (p1[0].line)
41coccilib.report.print_report(p2[0],msg)
diff --git a/scripts/coccinelle/null/badzero.cocci b/scripts/coccinelle/null/badzero.cocci
new file mode 100644
index 000000000000..d79baf7220e7
--- /dev/null
+++ b/scripts/coccinelle/null/badzero.cocci
@@ -0,0 +1,237 @@
1/// Compare pointer-typed values to NULL rather than 0
2///
3//# This makes an effort to choose between !x and x == NULL. !x is used
4//# if it has previously been used with the function used to initialize x.
5//# This relies on type information. More type information can be obtained
6//# using the option -all_includes and the option -I to specify an
7//# include path.
8//
9// Confidence: High
10// Copyright: (C) 2012 Julia Lawall, INRIA/LIP6. GPLv2.
11// Copyright: (C) 2012 Gilles Muller, INRIA/LiP6. GPLv2.
12// URL: http://coccinelle.lip6.fr/
13// Comments:
14// Options:
15
16virtual patch
17virtual context
18virtual org
19virtual report
20
21@initialize:ocaml@
22let negtable = Hashtbl.create 101
23
24@depends on patch@
25expression *E;
26identifier f;
27@@
28
29(
30 (E = f(...)) ==
31- 0
32+ NULL
33|
34 (E = f(...)) !=
35- 0
36+ NULL
37|
38- 0
39+ NULL
40 == (E = f(...))
41|
42- 0
43+ NULL
44 != (E = f(...))
45)
46
47
48@t1 depends on !patch@
49expression *E;
50identifier f;
51position p;
52@@
53
54(
55 (E = f(...)) ==
56* 0@p
57|
58 (E = f(...)) !=
59* 0@p
60|
61* 0@p
62 == (E = f(...))
63|
64* 0@p
65 != (E = f(...))
66)
67
68@script:python depends on org@
69p << t1.p;
70@@
71
72coccilib.org.print_todo(p[0], "WARNING comparing pointer to 0")
73
74@script:python depends on report@
75p << t1.p;
76@@
77
78coccilib.report.print_report(p[0], "WARNING comparing pointer to 0")
79
80// Tests of returned values
81
82@s@
83identifier f;
84expression E,E1;
85@@
86
87 E = f(...)
88 ... when != E = E1
89 !E
90
91@script:ocaml depends on s@
92f << s.f;
93@@
94
95try let _ = Hashtbl.find negtable f in ()
96with Not_found -> Hashtbl.add negtable f ()
97
98@ r disable is_zero,isnt_zero exists @
99expression *E;
100identifier f;
101@@
102
103E = f(...)
104...
105(E == 0
106|E != 0
107|0 == E
108|0 != E
109)
110
111@script:ocaml@
112f << r.f;
113@@
114
115try let _ = Hashtbl.find negtable f in ()
116with Not_found -> include_match false
117
118// This rule may lead to inconsistent path problems, if E is defined in two
119// places
120@ depends on patch disable is_zero,isnt_zero @
121expression *E;
122expression E1;
123identifier r.f;
124@@
125
126E = f(...)
127<...
128(
129- E == 0
130+ !E
131|
132- E != 0
133+ E
134|
135- 0 == E
136+ !E
137|
138- 0 != E
139+ E
140)
141...>
142?E = E1
143
144@t2 depends on !patch disable is_zero,isnt_zero @
145expression *E;
146expression E1;
147identifier r.f;
148position p1;
149position p2;
150@@
151
152E = f(...)
153<...
154(
155* E == 0@p1
156|
157* E != 0@p2
158|
159* 0@p1 == E
160|
161* 0@p1 != E
162)
163...>
164?E = E1
165
166@script:python depends on org@
167p << t2.p1;
168@@
169
170coccilib.org.print_todo(p[0], "WARNING comparing pointer to 0, suggest !E")
171
172@script:python depends on org@
173p << t2.p2;
174@@
175
176coccilib.org.print_todo(p[0], "WARNING comparing pointer to 0")
177
178@script:python depends on report@
179p << t2.p1;
180@@
181
182coccilib.report.print_report(p[0], "WARNING comparing pointer to 0, suggest !E")
183
184@script:python depends on report@
185p << t2.p2;
186@@
187
188coccilib.report.print_report(p[0], "WARNING comparing pointer to 0")
189
190@ depends on patch disable is_zero,isnt_zero @
191expression *E;
192@@
193
194(
195 E ==
196- 0
197+ NULL
198|
199 E !=
200- 0
201+ NULL
202|
203- 0
204+ NULL
205 == E
206|
207- 0
208+ NULL
209 != E
210)
211
212@ t3 depends on !patch disable is_zero,isnt_zero @
213expression *E;
214position p;
215@@
216
217(
218* E == 0@p
219|
220* E != 0@p
221|
222* 0@p == E
223|
224* 0@p != E
225)
226
227@script:python depends on org@
228p << t3.p;
229@@
230
231coccilib.org.print_todo(p[0], "WARNING comparing pointer to 0")
232
233@script:python depends on report@
234p << t3.p;
235@@
236
237coccilib.report.print_report(p[0], "WARNING comparing pointer to 0")
diff --git a/scripts/dtc/dtc.c b/scripts/dtc/dtc.c
index 451c92d31b19..2ef5e2e3dd38 100644
--- a/scripts/dtc/dtc.c
+++ b/scripts/dtc/dtc.c
@@ -101,7 +101,7 @@ int main(int argc, char *argv[])
101 const char *outform = "dts"; 101 const char *outform = "dts";
102 const char *outname = "-"; 102 const char *outname = "-";
103 const char *depname = NULL; 103 const char *depname = NULL;
104 int force = 0, check = 0, sort = 0; 104 int force = 0, sort = 0;
105 const char *arg; 105 const char *arg;
106 int opt; 106 int opt;
107 FILE *outf = NULL; 107 FILE *outf = NULL;
@@ -143,9 +143,6 @@ int main(int argc, char *argv[])
143 case 'f': 143 case 'f':
144 force = 1; 144 force = 1;
145 break; 145 break;
146 case 'c':
147 check = 1;
148 break;
149 case 'q': 146 case 'q':
150 quiet++; 147 quiet++;
151 break; 148 break;
diff --git a/scripts/dtc/flattree.c b/scripts/dtc/flattree.c
index ead0332c87e1..28d0b2381df6 100644
--- a/scripts/dtc/flattree.c
+++ b/scripts/dtc/flattree.c
@@ -697,7 +697,6 @@ static struct reserve_info *flat_read_mem_reserve(struct inbuf *inb)
697{ 697{
698 struct reserve_info *reservelist = NULL; 698 struct reserve_info *reservelist = NULL;
699 struct reserve_info *new; 699 struct reserve_info *new;
700 const char *p;
701 struct fdt_reserve_entry re; 700 struct fdt_reserve_entry re;
702 701
703 /* 702 /*
@@ -706,7 +705,6 @@ static struct reserve_info *flat_read_mem_reserve(struct inbuf *inb)
706 * 705 *
707 * First pass, count entries. 706 * First pass, count entries.
708 */ 707 */
709 p = inb->ptr;
710 while (1) { 708 while (1) {
711 flat_read_chunk(inb, &re, sizeof(re)); 709 flat_read_chunk(inb, &re, sizeof(re));
712 re.address = fdt64_to_cpu(re.address); 710 re.address = fdt64_to_cpu(re.address);
diff --git a/scripts/gcc-goto.sh b/scripts/gcc-goto.sh
index 98cffcb941ea..a2af2e88daf3 100644
--- a/scripts/gcc-goto.sh
+++ b/scripts/gcc-goto.sh
@@ -2,4 +2,20 @@
2# Test for gcc 'asm goto' support 2# Test for gcc 'asm goto' support
3# Copyright (C) 2010, Jason Baron <jbaron@redhat.com> 3# Copyright (C) 2010, Jason Baron <jbaron@redhat.com>
4 4
5echo "int main(void) { entry: asm goto (\"\"::::entry); return 0; }" | $@ -x c - -c -o /dev/null >/dev/null 2>&1 && echo "y" 5cat << "END" | $@ -x c - -c -o /dev/null >/dev/null 2>&1 && echo "y"
6int main(void)
7{
8#ifdef __arm__
9 /*
10 * Not related to asm goto, but used by jump label
11 * and broken on some ARM GCC versions (see GCC Bug 48637).
12 */
13 static struct { int dummy; int state; } tp;
14 asm (".long %c0" :: "i" (&tp.state));
15#endif
16
17entry:
18 asm goto ("" :::: entry);
19 return 0;
20}
21END
diff --git a/scripts/headers_check.pl b/scripts/headers_check.pl
index 7957e7a5166a..64ac2380e4d5 100644
--- a/scripts/headers_check.pl
+++ b/scripts/headers_check.pl
@@ -19,6 +19,7 @@
19# 3) Check for leaked CONFIG_ symbols 19# 3) Check for leaked CONFIG_ symbols
20 20
21use strict; 21use strict;
22use File::Basename;
22 23
23my ($dir, $arch, @files) = @ARGV; 24my ($dir, $arch, @files) = @ARGV;
24 25
@@ -99,6 +100,39 @@ sub check_asm_types
99} 100}
100 101
101my $linux_types; 102my $linux_types;
103my %import_stack = ();
104sub check_include_typesh
105{
106 my $path = $_[0];
107 my $import_path;
108
109 my $fh;
110 my @file_paths = ($path, $dir . "/" . $path, dirname($filename) . "/" . $path);
111 for my $possible ( @file_paths ) {
112 if (not $import_stack{$possible} and open($fh, '<', $possible)) {
113 $import_path = $possible;
114 $import_stack{$import_path} = 1;
115 last;
116 }
117 }
118 if (eof $fh) {
119 return;
120 }
121
122 my $line;
123 while ($line = <$fh>) {
124 if ($line =~ m/^\s*#\s*include\s+<linux\/types.h>/) {
125 $linux_types = 1;
126 last;
127 }
128 if (my $included = ($line =~ /^\s*#\s*include\s+[<"](\S+)[>"]/)[0]) {
129 check_include_typesh($included);
130 }
131 }
132 close $fh;
133 delete $import_stack{$import_path};
134}
135
102sub check_sizetypes 136sub check_sizetypes
103{ 137{
104 if ($filename =~ /types.h|int-l64.h|int-ll64.h/o) { 138 if ($filename =~ /types.h|int-l64.h|int-ll64.h/o) {
@@ -113,6 +147,9 @@ sub check_sizetypes
113 $linux_types = 1; 147 $linux_types = 1;
114 return; 148 return;
115 } 149 }
150 if (my $included = ($line =~ /^\s*#\s*include\s+[<"](\S+)[>"]/)[0]) {
151 check_include_typesh($included);
152 }
116 if ($line =~ m/__[us](8|16|32|64)\b/) { 153 if ($line =~ m/__[us](8|16|32|64)\b/) {
117 printf STDERR "$filename:$lineno: " . 154 printf STDERR "$filename:$lineno: " .
118 "found __[us]{8,16,32,64} type " . 155 "found __[us]{8,16,32,64} type " .
@@ -122,4 +159,3 @@ sub check_sizetypes
122 #$ret = 1; 159 #$ret = 1;
123 } 160 }
124} 161}
125
diff --git a/scripts/kconfig/confdata.c b/scripts/kconfig/confdata.c
index 7c7a5a6cc3f5..0586085136d1 100644
--- a/scripts/kconfig/confdata.c
+++ b/scripts/kconfig/confdata.c
@@ -344,10 +344,8 @@ setsym:
344 344
345int conf_read(const char *name) 345int conf_read(const char *name)
346{ 346{
347 struct symbol *sym, *choice_sym; 347 struct symbol *sym;
348 struct property *prop; 348 int i;
349 struct expr *e;
350 int i, flags;
351 349
352 sym_set_change_count(0); 350 sym_set_change_count(0);
353 351
@@ -357,7 +355,7 @@ int conf_read(const char *name)
357 for_all_symbols(i, sym) { 355 for_all_symbols(i, sym) {
358 sym_calc_value(sym); 356 sym_calc_value(sym);
359 if (sym_is_choice(sym) || (sym->flags & SYMBOL_AUTO)) 357 if (sym_is_choice(sym) || (sym->flags & SYMBOL_AUTO))
360 goto sym_ok; 358 continue;
361 if (sym_has_value(sym) && (sym->flags & SYMBOL_WRITE)) { 359 if (sym_has_value(sym) && (sym->flags & SYMBOL_WRITE)) {
362 /* check that calculated value agrees with saved value */ 360 /* check that calculated value agrees with saved value */
363 switch (sym->type) { 361 switch (sym->type) {
@@ -366,30 +364,18 @@ int conf_read(const char *name)
366 if (sym->def[S_DEF_USER].tri != sym_get_tristate_value(sym)) 364 if (sym->def[S_DEF_USER].tri != sym_get_tristate_value(sym))
367 break; 365 break;
368 if (!sym_is_choice(sym)) 366 if (!sym_is_choice(sym))
369 goto sym_ok; 367 continue;
370 /* fall through */ 368 /* fall through */
371 default: 369 default:
372 if (!strcmp(sym->curr.val, sym->def[S_DEF_USER].val)) 370 if (!strcmp(sym->curr.val, sym->def[S_DEF_USER].val))
373 goto sym_ok; 371 continue;
374 break; 372 break;
375 } 373 }
376 } else if (!sym_has_value(sym) && !(sym->flags & SYMBOL_WRITE)) 374 } else if (!sym_has_value(sym) && !(sym->flags & SYMBOL_WRITE))
377 /* no previous value and not saved */ 375 /* no previous value and not saved */
378 goto sym_ok; 376 continue;
379 conf_unsaved++; 377 conf_unsaved++;
380 /* maybe print value in verbose mode... */ 378 /* maybe print value in verbose mode... */
381 sym_ok:
382 if (!sym_is_choice(sym))
383 continue;
384 /* The choice symbol only has a set value (and thus is not new)
385 * if all its visible childs have values.
386 */
387 prop = sym_get_choice_prop(sym);
388 flags = sym->flags;
389 expr_list_for_each_sym(prop->expr, e, choice_sym)
390 if (choice_sym->visible != no)
391 flags &= choice_sym->flags;
392 sym->flags &= flags | ~SYMBOL_DEF_USER;
393 } 379 }
394 380
395 for_all_symbols(i, sym) { 381 for_all_symbols(i, sym) {
diff --git a/scripts/kconfig/merge_config.sh b/scripts/kconfig/merge_config.sh
index ceadf0e150cf..974d5cb7e30a 100644..100755
--- a/scripts/kconfig/merge_config.sh
+++ b/scripts/kconfig/merge_config.sh
@@ -31,10 +31,12 @@ usage() {
31 echo " -h display this help text" 31 echo " -h display this help text"
32 echo " -m only merge the fragments, do not execute the make command" 32 echo " -m only merge the fragments, do not execute the make command"
33 echo " -n use allnoconfig instead of alldefconfig" 33 echo " -n use allnoconfig instead of alldefconfig"
34 echo " -r list redundant entries when merging fragments"
34} 35}
35 36
36MAKE=true 37MAKE=true
37ALLTARGET=alldefconfig 38ALLTARGET=alldefconfig
39WARNREDUN=false
38 40
39while true; do 41while true; do
40 case $1 in 42 case $1 in
@@ -52,18 +54,27 @@ while true; do
52 usage 54 usage
53 exit 55 exit
54 ;; 56 ;;
57 "-r")
58 WARNREDUN=true
59 shift
60 continue
61 ;;
55 *) 62 *)
56 break 63 break
57 ;; 64 ;;
58 esac 65 esac
59done 66done
60 67
61 68INITFILE=$1
69shift;
62 70
63MERGE_LIST=$* 71MERGE_LIST=$*
64SED_CONFIG_EXP="s/^\(# \)\{0,1\}\(CONFIG_[a-zA-Z0-9_]*\)[= ].*/\2/p" 72SED_CONFIG_EXP="s/^\(# \)\{0,1\}\(CONFIG_[a-zA-Z0-9_]*\)[= ].*/\2/p"
65TMP_FILE=$(mktemp ./.tmp.config.XXXXXXXXXX) 73TMP_FILE=$(mktemp ./.tmp.config.XXXXXXXXXX)
66 74
75echo "Using $INITFILE as base"
76cat $INITFILE > $TMP_FILE
77
67# Merge files, printing warnings on overrided values 78# Merge files, printing warnings on overrided values
68for MERGE_FILE in $MERGE_LIST ; do 79for MERGE_FILE in $MERGE_LIST ; do
69 echo "Merging $MERGE_FILE" 80 echo "Merging $MERGE_FILE"
@@ -79,6 +90,8 @@ for MERGE_FILE in $MERGE_LIST ; do
79 echo Previous value: $PREV_VAL 90 echo Previous value: $PREV_VAL
80 echo New value: $NEW_VAL 91 echo New value: $NEW_VAL
81 echo 92 echo
93 elif [ "$WARNREDUN" = "true" ]; then
94 echo Value of $CFG is redundant by fragment $MERGE_FILE:
82 fi 95 fi
83 sed -i "/$CFG[ =]/d" $TMP_FILE 96 sed -i "/$CFG[ =]/d" $TMP_FILE
84 fi 97 fi
diff --git a/scripts/kconfig/symbol.c b/scripts/kconfig/symbol.c
index 071f00c3046e..22a3c400fc41 100644
--- a/scripts/kconfig/symbol.c
+++ b/scripts/kconfig/symbol.c
@@ -262,11 +262,18 @@ static struct symbol *sym_calc_choice(struct symbol *sym)
262 struct symbol *def_sym; 262 struct symbol *def_sym;
263 struct property *prop; 263 struct property *prop;
264 struct expr *e; 264 struct expr *e;
265 int flags;
265 266
266 /* first calculate all choice values' visibilities */ 267 /* first calculate all choice values' visibilities */
268 flags = sym->flags;
267 prop = sym_get_choice_prop(sym); 269 prop = sym_get_choice_prop(sym);
268 expr_list_for_each_sym(prop->expr, e, def_sym) 270 expr_list_for_each_sym(prop->expr, e, def_sym) {
269 sym_calc_visibility(def_sym); 271 sym_calc_visibility(def_sym);
272 if (def_sym->visible != no)
273 flags &= def_sym->flags;
274 }
275
276 sym->flags &= flags | ~SYMBOL_DEF_USER;
270 277
271 /* is the user choice visible? */ 278 /* is the user choice visible? */
272 def_sym = sym->def[S_DEF_USER].val; 279 def_sym = sym->def[S_DEF_USER].val;
diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c
index 9adb667dd31a..3f01fd908730 100644
--- a/scripts/mod/modpost.c
+++ b/scripts/mod/modpost.c
@@ -849,7 +849,7 @@ static void check_section(const char *modname, struct elf_info *elf,
849 849
850#define ALL_INIT_DATA_SECTIONS \ 850#define ALL_INIT_DATA_SECTIONS \
851 ".init.setup$", ".init.rodata$", \ 851 ".init.setup$", ".init.rodata$", \
852 ".devinit.rodata$", ".cpuinit.rodata$", ".meminit.rodata$" \ 852 ".devinit.rodata$", ".cpuinit.rodata$", ".meminit.rodata$", \
853 ".init.data$", ".devinit.data$", ".cpuinit.data$", ".meminit.data$" 853 ".init.data$", ".devinit.data$", ".cpuinit.data$", ".meminit.data$"
854#define ALL_EXIT_DATA_SECTIONS \ 854#define ALL_EXIT_DATA_SECTIONS \
855 ".exit.data$", ".devexit.data$", ".cpuexit.data$", ".memexit.data$" 855 ".exit.data$", ".devexit.data$", ".cpuexit.data$", ".memexit.data$"
diff --git a/scripts/package/builddeb b/scripts/package/builddeb
index 3c6c0b14c807..eee5f8ed2493 100644
--- a/scripts/package/builddeb
+++ b/scripts/package/builddeb
@@ -97,6 +97,7 @@ mkdir -m 755 -p "$libc_headers_dir/DEBIAN"
97mkdir -p "$libc_headers_dir/usr/share/doc/$libc_headers_packagename" 97mkdir -p "$libc_headers_dir/usr/share/doc/$libc_headers_packagename"
98mkdir -m 755 -p "$kernel_headers_dir/DEBIAN" 98mkdir -m 755 -p "$kernel_headers_dir/DEBIAN"
99mkdir -p "$kernel_headers_dir/usr/share/doc/$kernel_headers_packagename" 99mkdir -p "$kernel_headers_dir/usr/share/doc/$kernel_headers_packagename"
100mkdir -p "$kernel_headers_dir/lib/modules/$version/"
100if [ "$ARCH" = "um" ] ; then 101if [ "$ARCH" = "um" ] ; then
101 mkdir -p "$tmpdir/usr/lib/uml/modules/$version" "$tmpdir/usr/bin" 102 mkdir -p "$tmpdir/usr/lib/uml/modules/$version" "$tmpdir/usr/bin"
102fi 103fi
@@ -120,15 +121,19 @@ else
120fi 121fi
121 122
122if grep -q '^CONFIG_MODULES=y' .config ; then 123if grep -q '^CONFIG_MODULES=y' .config ; then
123 INSTALL_MOD_PATH="$tmpdir" make KBUILD_SRC= modules_install 124 INSTALL_MOD_PATH="$tmpdir" $MAKE KBUILD_SRC= modules_install
125 rm -f "$tmpdir/lib/modules/$version/build"
126 rm -f "$tmpdir/lib/modules/$version/source"
124 if [ "$ARCH" = "um" ] ; then 127 if [ "$ARCH" = "um" ] ; then
125 mv "$tmpdir/lib/modules/$version"/* "$tmpdir/usr/lib/uml/modules/$version/" 128 mv "$tmpdir/lib/modules/$version"/* "$tmpdir/usr/lib/uml/modules/$version/"
126 rmdir "$tmpdir/lib/modules/$version" 129 rmdir "$tmpdir/lib/modules/$version"
127 fi 130 fi
128fi 131fi
129 132
130make headers_check 133if [ "$ARCH" != "um" ]; then
131make headers_install INSTALL_HDR_PATH="$libc_headers_dir/usr" 134 $MAKE headers_check KBUILD_SRC=
135 $MAKE headers_install KBUILD_SRC= INSTALL_HDR_PATH="$libc_headers_dir/usr"
136fi
132 137
133# Install the maintainer scripts 138# Install the maintainer scripts
134# Note: hook scripts under /etc/kernel are also executed by official Debian 139# Note: hook scripts under /etc/kernel are also executed by official Debian
@@ -245,6 +250,7 @@ destdir=$kernel_headers_dir/usr/src/linux-headers-$version
245mkdir -p "$destdir" 250mkdir -p "$destdir"
246(cd $srctree; tar -c -f - -T "$objtree/debian/hdrsrcfiles") | (cd $destdir; tar -xf -) 251(cd $srctree; tar -c -f - -T "$objtree/debian/hdrsrcfiles") | (cd $destdir; tar -xf -)
247(cd $objtree; tar -c -f - -T "$objtree/debian/hdrobjfiles") | (cd $destdir; tar -xf -) 252(cd $objtree; tar -c -f - -T "$objtree/debian/hdrobjfiles") | (cd $destdir; tar -xf -)
253ln -sf "/usr/src/linux-headers-$version" "$kernel_headers_dir/lib/modules/$version/build"
248rm -f "$objtree/debian/hdrsrcfiles" "$objtree/debian/hdrobjfiles" 254rm -f "$objtree/debian/hdrsrcfiles" "$objtree/debian/hdrobjfiles"
249arch=$(dpkg --print-architecture) 255arch=$(dpkg --print-architecture)
250 256
@@ -259,8 +265,6 @@ Description: Linux kernel headers for $KERNELRELEASE on $arch
259 This is useful for people who need to build external modules 265 This is useful for people who need to build external modules
260EOF 266EOF
261 267
262create_package "$kernel_headers_packagename" "$kernel_headers_dir"
263
264# Do we have firmware? Move it out of the way and build it into a package. 268# Do we have firmware? Move it out of the way and build it into a package.
265if [ -e "$tmpdir/lib/firmware" ]; then 269if [ -e "$tmpdir/lib/firmware" ]; then
266 mv "$tmpdir/lib/firmware" "$fwdir/lib/" 270 mv "$tmpdir/lib/firmware" "$fwdir/lib/"
@@ -287,7 +291,11 @@ Description: Linux support headers for userspace development
287 are used by the installed headers for GNU glibc and other system libraries. 291 are used by the installed headers for GNU glibc and other system libraries.
288EOF 292EOF
289 293
290create_package "$libc_headers_packagename" "$libc_headers_dir" 294if [ "$ARCH" != "um" ]; then
295 create_package "$kernel_headers_packagename" "$kernel_headers_dir"
296 create_package "$libc_headers_packagename" "$libc_headers_dir"
297fi
298
291create_package "$packagename" "$tmpdir" 299create_package "$packagename" "$tmpdir"
292 300
293exit 0 301exit 0
diff --git a/scripts/patch-kernel b/scripts/patch-kernel
index 20fb25c23382..d000ea3a41fd 100755
--- a/scripts/patch-kernel
+++ b/scripts/patch-kernel
@@ -116,6 +116,10 @@ findFile () {
116 ext=".bz2" 116 ext=".bz2"
117 name="bzip2" 117 name="bzip2"
118 uncomp="bunzip2 -dc" 118 uncomp="bunzip2 -dc"
119 elif [ -r ${filebase}.xz ]; then
120 ext=".xz"
121 name="xz"
122 uncomp="xz -dc"
119 elif [ -r ${filebase}.zip ]; then 123 elif [ -r ${filebase}.zip ]; then
120 ext=".zip" 124 ext=".zip"
121 name="zip" 125 name="zip"
diff --git a/scripts/setlocalversion b/scripts/setlocalversion
index 4d403844e137..bd6dca8a0ab2 100755
--- a/scripts/setlocalversion
+++ b/scripts/setlocalversion
@@ -75,8 +75,7 @@ scm_version()
75 [ -w . ] && git update-index --refresh --unmerged > /dev/null 75 [ -w . ] && git update-index --refresh --unmerged > /dev/null
76 76
77 # Check for uncommitted changes 77 # Check for uncommitted changes
78 if git diff-index --name-only HEAD | grep -v "^scripts/package" \ 78 if git diff-index --name-only HEAD | grep -qv "^scripts/package"; then
79 | read dummy; then
80 printf '%s' -dirty 79 printf '%s' -dirty
81 fi 80 fi
82 81
diff --git a/scripts/tags.sh b/scripts/tags.sh
index 833813a99e7c..0d6004e20658 100755
--- a/scripts/tags.sh
+++ b/scripts/tags.sh
@@ -116,7 +116,7 @@ docscope()
116 116
117dogtags() 117dogtags()
118{ 118{
119 all_sources | gtags -f - 119 all_sources | gtags -i -f -
120} 120}
121 121
122exuberant() 122exuberant()
@@ -166,9 +166,6 @@ exuberant()
166 all_defconfigs | xargs -r $1 -a \ 166 all_defconfigs | xargs -r $1 -a \
167 --langdef=dotconfig --language-force=dotconfig \ 167 --langdef=dotconfig --language-force=dotconfig \
168 --regex-dotconfig='/^#?[[:blank:]]*(CONFIG_[[:alnum:]_]+)/\1/' 168 --regex-dotconfig='/^#?[[:blank:]]*(CONFIG_[[:alnum:]_]+)/\1/'
169
170 # Remove structure forward declarations.
171 LANG=C sed -i -e '/^\([a-zA-Z_][a-zA-Z0-9_]*\)\t.*\t\/\^struct \1;.*\$\/;"\tx$/d' tags
172} 169}
173 170
174emacs() 171emacs()
@@ -233,6 +230,7 @@ if [ "${ARCH}" = "um" ]; then
233 fi 230 fi
234fi 231fi
235 232
233remove_structs=
236case "$1" in 234case "$1" in
237 "cscope") 235 "cscope")
238 docscope 236 docscope
@@ -245,10 +243,17 @@ case "$1" in
245 "tags") 243 "tags")
246 rm -f tags 244 rm -f tags
247 xtags ctags 245 xtags ctags
246 remove_structs=y
248 ;; 247 ;;
249 248
250 "TAGS") 249 "TAGS")
251 rm -f TAGS 250 rm -f TAGS
252 xtags etags 251 xtags etags
252 remove_structs=y
253 ;; 253 ;;
254esac 254esac
255
256# Remove structure forward declarations.
257if [ -n $remove_structs ]; then
258 LANG=C sed -i -e '/^\([a-zA-Z_][a-zA-Z0-9_]*\)\t.*\t\/\^struct \1;.*\$\/;"\tx$/d' $1
259fi
diff --git a/security/selinux/avc.c b/security/selinux/avc.c
index 6989472d0957..1a70fa26da72 100644
--- a/security/selinux/avc.c
+++ b/security/selinux/avc.c
@@ -513,7 +513,7 @@ static noinline int slow_avc_audit(u32 ssid, u32 tsid, u16 tclass,
513 * be performed under a lock, to allow the lock to be released 513 * be performed under a lock, to allow the lock to be released
514 * before calling the auditing code. 514 * before calling the auditing code.
515 */ 515 */
516int avc_audit(u32 ssid, u32 tsid, 516inline int avc_audit(u32 ssid, u32 tsid,
517 u16 tclass, u32 requested, 517 u16 tclass, u32 requested,
518 struct av_decision *avd, int result, struct common_audit_data *a, 518 struct av_decision *avd, int result, struct common_audit_data *a,
519 unsigned flags) 519 unsigned flags)
@@ -741,6 +741,41 @@ int avc_ss_reset(u32 seqno)
741 return rc; 741 return rc;
742} 742}
743 743
744/*
745 * Slow-path helper function for avc_has_perm_noaudit,
746 * when the avc_node lookup fails. We get called with
747 * the RCU read lock held, and need to return with it
748 * still held, but drop if for the security compute.
749 *
750 * Don't inline this, since it's the slow-path and just
751 * results in a bigger stack frame.
752 */
753static noinline struct avc_node *avc_compute_av(u32 ssid, u32 tsid,
754 u16 tclass, struct av_decision *avd)
755{
756 rcu_read_unlock();
757 security_compute_av(ssid, tsid, tclass, avd);
758 rcu_read_lock();
759 return avc_insert(ssid, tsid, tclass, avd);
760}
761
762static noinline int avc_denied(u32 ssid, u32 tsid,
763 u16 tclass, u32 requested,
764 unsigned flags,
765 struct av_decision *avd)
766{
767 if (flags & AVC_STRICT)
768 return -EACCES;
769
770 if (selinux_enforcing && !(avd->flags & AVD_FLAGS_PERMISSIVE))
771 return -EACCES;
772
773 avc_update_node(AVC_CALLBACK_GRANT, requested, ssid,
774 tsid, tclass, avd->seqno);
775 return 0;
776}
777
778
744/** 779/**
745 * avc_has_perm_noaudit - Check permissions but perform no auditing. 780 * avc_has_perm_noaudit - Check permissions but perform no auditing.
746 * @ssid: source security identifier 781 * @ssid: source security identifier
@@ -761,7 +796,7 @@ int avc_ss_reset(u32 seqno)
761 * auditing, e.g. in cases where a lock must be held for the check but 796 * auditing, e.g. in cases where a lock must be held for the check but
762 * should be released for the auditing. 797 * should be released for the auditing.
763 */ 798 */
764int avc_has_perm_noaudit(u32 ssid, u32 tsid, 799inline int avc_has_perm_noaudit(u32 ssid, u32 tsid,
765 u16 tclass, u32 requested, 800 u16 tclass, u32 requested,
766 unsigned flags, 801 unsigned flags,
767 struct av_decision *avd) 802 struct av_decision *avd)
@@ -776,26 +811,15 @@ int avc_has_perm_noaudit(u32 ssid, u32 tsid,
776 811
777 node = avc_lookup(ssid, tsid, tclass); 812 node = avc_lookup(ssid, tsid, tclass);
778 if (unlikely(!node)) { 813 if (unlikely(!node)) {
779 rcu_read_unlock(); 814 node = avc_compute_av(ssid, tsid, tclass, avd);
780 security_compute_av(ssid, tsid, tclass, avd);
781 rcu_read_lock();
782 node = avc_insert(ssid, tsid, tclass, avd);
783 } else { 815 } else {
784 memcpy(avd, &node->ae.avd, sizeof(*avd)); 816 memcpy(avd, &node->ae.avd, sizeof(*avd));
785 avd = &node->ae.avd; 817 avd = &node->ae.avd;
786 } 818 }
787 819
788 denied = requested & ~(avd->allowed); 820 denied = requested & ~(avd->allowed);
789 821 if (unlikely(denied))
790 if (denied) { 822 rc = avc_denied(ssid, tsid, tclass, requested, flags, avd);
791 if (flags & AVC_STRICT)
792 rc = -EACCES;
793 else if (!selinux_enforcing || (avd->flags & AVD_FLAGS_PERMISSIVE))
794 avc_update_node(AVC_CALLBACK_GRANT, requested, ssid,
795 tsid, tclass, avd->seqno);
796 else
797 rc = -EACCES;
798 }
799 823
800 rcu_read_unlock(); 824 rcu_read_unlock();
801 return rc; 825 return rc;
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index 304929909375..28482f9e15b8 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -28,7 +28,6 @@
28#include <linux/kernel.h> 28#include <linux/kernel.h>
29#include <linux/tracehook.h> 29#include <linux/tracehook.h>
30#include <linux/errno.h> 30#include <linux/errno.h>
31#include <linux/ext2_fs.h>
32#include <linux/sched.h> 31#include <linux/sched.h>
33#include <linux/security.h> 32#include <linux/security.h>
34#include <linux/xattr.h> 33#include <linux/xattr.h>
@@ -2147,7 +2146,7 @@ static inline void flush_unauthorized_files(const struct cred *cred,
2147 fdt = files_fdtable(files); 2146 fdt = files_fdtable(files);
2148 if (i >= fdt->max_fds) 2147 if (i >= fdt->max_fds)
2149 break; 2148 break;
2150 set = fdt->open_fds->fds_bits[j]; 2149 set = fdt->open_fds[j];
2151 if (!set) 2150 if (!set)
2152 continue; 2151 continue;
2153 spin_unlock(&files->file_lock); 2152 spin_unlock(&files->file_lock);
@@ -2971,15 +2970,15 @@ static int selinux_file_ioctl(struct file *file, unsigned int cmd,
2971 /* fall through */ 2970 /* fall through */
2972 case FIGETBSZ: 2971 case FIGETBSZ:
2973 /* fall through */ 2972 /* fall through */
2974 case EXT2_IOC_GETFLAGS: 2973 case FS_IOC_GETFLAGS:
2975 /* fall through */ 2974 /* fall through */
2976 case EXT2_IOC_GETVERSION: 2975 case FS_IOC_GETVERSION:
2977 error = file_has_perm(cred, file, FILE__GETATTR); 2976 error = file_has_perm(cred, file, FILE__GETATTR);
2978 break; 2977 break;
2979 2978
2980 case EXT2_IOC_SETFLAGS: 2979 case FS_IOC_SETFLAGS:
2981 /* fall through */ 2980 /* fall through */
2982 case EXT2_IOC_SETVERSION: 2981 case FS_IOC_SETVERSION:
2983 error = file_has_perm(cred, file, FILE__SETATTR); 2982 error = file_has_perm(cred, file, FILE__SETATTR);
2984 break; 2983 break;
2985 2984
diff --git a/security/selinux/selinuxfs.c b/security/selinux/selinuxfs.c
index 48a7d0014b4f..d7018bfa1f00 100644
--- a/security/selinux/selinuxfs.c
+++ b/security/selinux/selinuxfs.c
@@ -344,7 +344,7 @@ static int sel_make_classes(void);
344static int sel_make_policycap(void); 344static int sel_make_policycap(void);
345 345
346/* declaration for sel_make_class_dirs */ 346/* declaration for sel_make_class_dirs */
347static int sel_make_dir(struct inode *dir, struct dentry *dentry, 347static struct dentry *sel_make_dir(struct dentry *dir, const char *name,
348 unsigned long *ino); 348 unsigned long *ino);
349 349
350static ssize_t sel_read_mls(struct file *filp, char __user *buf, 350static ssize_t sel_read_mls(struct file *filp, char __user *buf,
@@ -1678,13 +1678,9 @@ static int sel_make_class_dir_entries(char *classname, int index,
1678 inode->i_ino = sel_class_to_ino(index); 1678 inode->i_ino = sel_class_to_ino(index);
1679 d_add(dentry, inode); 1679 d_add(dentry, inode);
1680 1680
1681 dentry = d_alloc_name(dir, "perms"); 1681 dentry = sel_make_dir(dir, "perms", &last_class_ino);
1682 if (!dentry) 1682 if (IS_ERR(dentry))
1683 return -ENOMEM; 1683 return PTR_ERR(dentry);
1684
1685 rc = sel_make_dir(dir->d_inode, dentry, &last_class_ino);
1686 if (rc)
1687 return rc;
1688 1684
1689 rc = sel_make_perm_files(classname, index, dentry); 1685 rc = sel_make_perm_files(classname, index, dentry);
1690 1686
@@ -1733,15 +1729,12 @@ static int sel_make_classes(void)
1733 for (i = 0; i < nclasses; i++) { 1729 for (i = 0; i < nclasses; i++) {
1734 struct dentry *class_name_dir; 1730 struct dentry *class_name_dir;
1735 1731
1736 rc = -ENOMEM; 1732 class_name_dir = sel_make_dir(class_dir, classes[i],
1737 class_name_dir = d_alloc_name(class_dir, classes[i]);
1738 if (!class_name_dir)
1739 goto out;
1740
1741 rc = sel_make_dir(class_dir->d_inode, class_name_dir,
1742 &last_class_ino); 1733 &last_class_ino);
1743 if (rc) 1734 if (IS_ERR(class_name_dir)) {
1735 rc = PTR_ERR(class_name_dir);
1744 goto out; 1736 goto out;
1737 }
1745 1738
1746 /* i+1 since class values are 1-indexed */ 1739 /* i+1 since class values are 1-indexed */
1747 rc = sel_make_class_dir_entries(classes[i], i + 1, 1740 rc = sel_make_class_dir_entries(classes[i], i + 1,
@@ -1787,14 +1780,20 @@ static int sel_make_policycap(void)
1787 return 0; 1780 return 0;
1788} 1781}
1789 1782
1790static int sel_make_dir(struct inode *dir, struct dentry *dentry, 1783static struct dentry *sel_make_dir(struct dentry *dir, const char *name,
1791 unsigned long *ino) 1784 unsigned long *ino)
1792{ 1785{
1786 struct dentry *dentry = d_alloc_name(dir, name);
1793 struct inode *inode; 1787 struct inode *inode;
1794 1788
1795 inode = sel_make_inode(dir->i_sb, S_IFDIR | S_IRUGO | S_IXUGO); 1789 if (!dentry)
1796 if (!inode) 1790 return ERR_PTR(-ENOMEM);
1797 return -ENOMEM; 1791
1792 inode = sel_make_inode(dir->d_sb, S_IFDIR | S_IRUGO | S_IXUGO);
1793 if (!inode) {
1794 dput(dentry);
1795 return ERR_PTR(-ENOMEM);
1796 }
1798 1797
1799 inode->i_op = &simple_dir_inode_operations; 1798 inode->i_op = &simple_dir_inode_operations;
1800 inode->i_fop = &simple_dir_operations; 1799 inode->i_fop = &simple_dir_operations;
@@ -1803,16 +1802,16 @@ static int sel_make_dir(struct inode *dir, struct dentry *dentry,
1803 inc_nlink(inode); 1802 inc_nlink(inode);
1804 d_add(dentry, inode); 1803 d_add(dentry, inode);
1805 /* bump link count on parent directory, too */ 1804 /* bump link count on parent directory, too */
1806 inc_nlink(dir); 1805 inc_nlink(dir->d_inode);
1807 1806
1808 return 0; 1807 return dentry;
1809} 1808}
1810 1809
1811static int sel_fill_super(struct super_block *sb, void *data, int silent) 1810static int sel_fill_super(struct super_block *sb, void *data, int silent)
1812{ 1811{
1813 int ret; 1812 int ret;
1814 struct dentry *dentry; 1813 struct dentry *dentry;
1815 struct inode *inode, *root_inode; 1814 struct inode *inode;
1816 struct inode_security_struct *isec; 1815 struct inode_security_struct *isec;
1817 1816
1818 static struct tree_descr selinux_files[] = { 1817 static struct tree_descr selinux_files[] = {
@@ -1839,18 +1838,12 @@ static int sel_fill_super(struct super_block *sb, void *data, int silent)
1839 if (ret) 1838 if (ret)
1840 goto err; 1839 goto err;
1841 1840
1842 root_inode = sb->s_root->d_inode; 1841 bool_dir = sel_make_dir(sb->s_root, BOOL_DIR_NAME, &sel_last_ino);
1843 1842 if (IS_ERR(bool_dir)) {
1844 ret = -ENOMEM; 1843 ret = PTR_ERR(bool_dir);
1845 dentry = d_alloc_name(sb->s_root, BOOL_DIR_NAME); 1844 bool_dir = NULL;
1846 if (!dentry)
1847 goto err; 1845 goto err;
1848 1846 }
1849 ret = sel_make_dir(root_inode, dentry, &sel_last_ino);
1850 if (ret)
1851 goto err;
1852
1853 bool_dir = dentry;
1854 1847
1855 ret = -ENOMEM; 1848 ret = -ENOMEM;
1856 dentry = d_alloc_name(sb->s_root, NULL_FILE_NAME); 1849 dentry = d_alloc_name(sb->s_root, NULL_FILE_NAME);
@@ -1872,54 +1865,39 @@ static int sel_fill_super(struct super_block *sb, void *data, int silent)
1872 d_add(dentry, inode); 1865 d_add(dentry, inode);
1873 selinux_null = dentry; 1866 selinux_null = dentry;
1874 1867
1875 ret = -ENOMEM; 1868 dentry = sel_make_dir(sb->s_root, "avc", &sel_last_ino);
1876 dentry = d_alloc_name(sb->s_root, "avc"); 1869 if (IS_ERR(dentry)) {
1877 if (!dentry) 1870 ret = PTR_ERR(dentry);
1878 goto err;
1879
1880 ret = sel_make_dir(root_inode, dentry, &sel_last_ino);
1881 if (ret)
1882 goto err; 1871 goto err;
1872 }
1883 1873
1884 ret = sel_make_avc_files(dentry); 1874 ret = sel_make_avc_files(dentry);
1885 if (ret) 1875 if (ret)
1886 goto err; 1876 goto err;
1887 1877
1888 ret = -ENOMEM; 1878 dentry = sel_make_dir(sb->s_root, "initial_contexts", &sel_last_ino);
1889 dentry = d_alloc_name(sb->s_root, "initial_contexts"); 1879 if (IS_ERR(dentry)) {
1890 if (!dentry) 1880 ret = PTR_ERR(dentry);
1891 goto err;
1892
1893 ret = sel_make_dir(root_inode, dentry, &sel_last_ino);
1894 if (ret)
1895 goto err; 1881 goto err;
1882 }
1896 1883
1897 ret = sel_make_initcon_files(dentry); 1884 ret = sel_make_initcon_files(dentry);
1898 if (ret) 1885 if (ret)
1899 goto err; 1886 goto err;
1900 1887
1901 ret = -ENOMEM; 1888 class_dir = sel_make_dir(sb->s_root, "class", &sel_last_ino);
1902 dentry = d_alloc_name(sb->s_root, "class"); 1889 if (IS_ERR(class_dir)) {
1903 if (!dentry) 1890 ret = PTR_ERR(class_dir);
1904 goto err; 1891 class_dir = NULL;
1905
1906 ret = sel_make_dir(root_inode, dentry, &sel_last_ino);
1907 if (ret)
1908 goto err;
1909
1910 class_dir = dentry;
1911
1912 ret = -ENOMEM;
1913 dentry = d_alloc_name(sb->s_root, "policy_capabilities");
1914 if (!dentry)
1915 goto err; 1892 goto err;
1893 }
1916 1894
1917 ret = sel_make_dir(root_inode, dentry, &sel_last_ino); 1895 policycap_dir = sel_make_dir(sb->s_root, "policy_capabilities", &sel_last_ino);
1918 if (ret) 1896 if (IS_ERR(policycap_dir)) {
1897 ret = PTR_ERR(policycap_dir);
1898 policycap_dir = NULL;
1919 goto err; 1899 goto err;
1920 1900 }
1921 policycap_dir = dentry;
1922
1923 return 0; 1901 return 0;
1924err: 1902err:
1925 printk(KERN_ERR "SELinux: %s: failed while creating inodes\n", 1903 printk(KERN_ERR "SELinux: %s: failed while creating inodes\n",
diff --git a/sound/arm/pxa2xx-ac97-lib.c b/sound/arm/pxa2xx-ac97-lib.c
index d1aa4218f129..48d7c0aa5073 100644
--- a/sound/arm/pxa2xx-ac97-lib.c
+++ b/sound/arm/pxa2xx-ac97-lib.c
@@ -17,11 +17,12 @@
17#include <linux/clk.h> 17#include <linux/clk.h>
18#include <linux/delay.h> 18#include <linux/delay.h>
19#include <linux/module.h> 19#include <linux/module.h>
20#include <linux/io.h>
20 21
21#include <sound/ac97_codec.h> 22#include <sound/ac97_codec.h>
22#include <sound/pxa2xx-lib.h> 23#include <sound/pxa2xx-lib.h>
23 24
24#include <asm/irq.h> 25#include <mach/irqs.h>
25#include <mach/regs-ac97.h> 26#include <mach/regs-ac97.h>
26#include <mach/audio.h> 27#include <mach/audio.h>
27 28
diff --git a/sound/arm/pxa2xx-ac97.c b/sound/arm/pxa2xx-ac97.c
index 3a39626a82d6..afef72c4f0d3 100644
--- a/sound/arm/pxa2xx-ac97.c
+++ b/sound/arm/pxa2xx-ac97.c
@@ -11,6 +11,7 @@
11 */ 11 */
12 12
13#include <linux/init.h> 13#include <linux/init.h>
14#include <linux/io.h>
14#include <linux/module.h> 15#include <linux/module.h>
15#include <linux/platform_device.h> 16#include <linux/platform_device.h>
16 17
diff --git a/sound/atmel/abdac.c b/sound/atmel/abdac.c
index 4fa1dbd8ee83..f7c2bb08055d 100644
--- a/sound/atmel/abdac.c
+++ b/sound/atmel/abdac.c
@@ -16,6 +16,7 @@
16#include <linux/interrupt.h> 16#include <linux/interrupt.h>
17#include <linux/module.h> 17#include <linux/module.h>
18#include <linux/platform_device.h> 18#include <linux/platform_device.h>
19#include <linux/types.h>
19#include <linux/io.h> 20#include <linux/io.h>
20 21
21#include <sound/core.h> 22#include <sound/core.h>
@@ -467,15 +468,24 @@ static int __devinit atmel_abdac_probe(struct platform_device *pdev)
467 snd_card_set_dev(card, &pdev->dev); 468 snd_card_set_dev(card, &pdev->dev);
468 469
469 if (pdata->dws.dma_dev) { 470 if (pdata->dws.dma_dev) {
470 struct dw_dma_slave *dws = &pdata->dws;
471 dma_cap_mask_t mask; 471 dma_cap_mask_t mask;
472 472
473 dws->tx_reg = regs->start + DAC_DATA;
474
475 dma_cap_zero(mask); 473 dma_cap_zero(mask);
476 dma_cap_set(DMA_SLAVE, mask); 474 dma_cap_set(DMA_SLAVE, mask);
477 475
478 dac->dma.chan = dma_request_channel(mask, filter, dws); 476 dac->dma.chan = dma_request_channel(mask, filter, &pdata->dws);
477 if (dac->dma.chan) {
478 struct dma_slave_config dma_conf = {
479 .dst_addr = regs->start + DAC_DATA,
480 .dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
481 .src_maxburst = 1,
482 .dst_maxburst = 1,
483 .direction = DMA_MEM_TO_DEV,
484 .device_fc = false,
485 };
486
487 dmaengine_slave_config(dac->dma.chan, &dma_conf);
488 }
479 } 489 }
480 if (!pdata->dws.dma_dev || !dac->dma.chan) { 490 if (!pdata->dws.dma_dev || !dac->dma.chan) {
481 dev_dbg(&pdev->dev, "DMA not available\n"); 491 dev_dbg(&pdev->dev, "DMA not available\n");
diff --git a/sound/atmel/ac97c.c b/sound/atmel/ac97c.c
index 61dade698358..115313ef54d6 100644
--- a/sound/atmel/ac97c.c
+++ b/sound/atmel/ac97c.c
@@ -20,6 +20,7 @@
20#include <linux/platform_device.h> 20#include <linux/platform_device.h>
21#include <linux/mutex.h> 21#include <linux/mutex.h>
22#include <linux/gpio.h> 22#include <linux/gpio.h>
23#include <linux/types.h>
23#include <linux/io.h> 24#include <linux/io.h>
24 25
25#include <sound/core.h> 26#include <sound/core.h>
@@ -1014,16 +1015,28 @@ static int __devinit atmel_ac97c_probe(struct platform_device *pdev)
1014 1015
1015 if (cpu_is_at32ap7000()) { 1016 if (cpu_is_at32ap7000()) {
1016 if (pdata->rx_dws.dma_dev) { 1017 if (pdata->rx_dws.dma_dev) {
1017 struct dw_dma_slave *dws = &pdata->rx_dws;
1018 dma_cap_mask_t mask; 1018 dma_cap_mask_t mask;
1019 1019
1020 dws->rx_reg = regs->start + AC97C_CARHR + 2;
1021
1022 dma_cap_zero(mask); 1020 dma_cap_zero(mask);
1023 dma_cap_set(DMA_SLAVE, mask); 1021 dma_cap_set(DMA_SLAVE, mask);
1024 1022
1025 chip->dma.rx_chan = dma_request_channel(mask, filter, 1023 chip->dma.rx_chan = dma_request_channel(mask, filter,
1026 dws); 1024 &pdata->rx_dws);
1025 if (chip->dma.rx_chan) {
1026 struct dma_slave_config dma_conf = {
1027 .src_addr = regs->start + AC97C_CARHR +
1028 2,
1029 .src_addr_width =
1030 DMA_SLAVE_BUSWIDTH_2_BYTES,
1031 .src_maxburst = 1,
1032 .dst_maxburst = 1,
1033 .direction = DMA_DEV_TO_MEM,
1034 .device_fc = false,
1035 };
1036
1037 dmaengine_slave_config(chip->dma.rx_chan,
1038 &dma_conf);
1039 }
1027 1040
1028 dev_info(&chip->pdev->dev, "using %s for DMA RX\n", 1041 dev_info(&chip->pdev->dev, "using %s for DMA RX\n",
1029 dev_name(&chip->dma.rx_chan->dev->device)); 1042 dev_name(&chip->dma.rx_chan->dev->device));
@@ -1031,16 +1044,28 @@ static int __devinit atmel_ac97c_probe(struct platform_device *pdev)
1031 } 1044 }
1032 1045
1033 if (pdata->tx_dws.dma_dev) { 1046 if (pdata->tx_dws.dma_dev) {
1034 struct dw_dma_slave *dws = &pdata->tx_dws;
1035 dma_cap_mask_t mask; 1047 dma_cap_mask_t mask;
1036 1048
1037 dws->tx_reg = regs->start + AC97C_CATHR + 2;
1038
1039 dma_cap_zero(mask); 1049 dma_cap_zero(mask);
1040 dma_cap_set(DMA_SLAVE, mask); 1050 dma_cap_set(DMA_SLAVE, mask);
1041 1051
1042 chip->dma.tx_chan = dma_request_channel(mask, filter, 1052 chip->dma.tx_chan = dma_request_channel(mask, filter,
1043 dws); 1053 &pdata->tx_dws);
1054 if (chip->dma.tx_chan) {
1055 struct dma_slave_config dma_conf = {
1056 .dst_addr = regs->start + AC97C_CATHR +
1057 2,
1058 .dst_addr_width =
1059 DMA_SLAVE_BUSWIDTH_2_BYTES,
1060 .src_maxburst = 1,
1061 .dst_maxburst = 1,
1062 .direction = DMA_MEM_TO_DEV,
1063 .device_fc = false,
1064 };
1065
1066 dmaengine_slave_config(chip->dma.tx_chan,
1067 &dma_conf);
1068 }
1044 1069
1045 dev_info(&chip->pdev->dev, "using %s for DMA TX\n", 1070 dev_info(&chip->pdev->dev, "using %s for DMA TX\n",
1046 dev_name(&chip->dma.tx_chan->dev->device)); 1071 dev_name(&chip->dma.tx_chan->dev->device));
diff --git a/sound/core/seq/seq_dummy.c b/sound/core/seq/seq_dummy.c
index bbe32d2177d9..dbc550716790 100644
--- a/sound/core/seq/seq_dummy.c
+++ b/sound/core/seq/seq_dummy.c
@@ -46,7 +46,7 @@
46 46
47 The number of ports to be created can be specified via the module 47 The number of ports to be created can be specified via the module
48 parameter "ports". For example, to create four ports, add the 48 parameter "ports". For example, to create four ports, add the
49 following option in /etc/modprobe.conf: 49 following option in a configuration file under /etc/modprobe.d/:
50 50
51 option snd-seq-dummy ports=4 51 option snd-seq-dummy ports=4
52 52
diff --git a/sound/drivers/Kconfig b/sound/drivers/Kconfig
index c8961165277c..fe5ae09ffccb 100644
--- a/sound/drivers/Kconfig
+++ b/sound/drivers/Kconfig
@@ -50,7 +50,8 @@ config SND_PCSP
50 before the other sound driver of yours, making the 50 before the other sound driver of yours, making the
51 pc-speaker a default sound device. Which is likely not 51 pc-speaker a default sound device. Which is likely not
52 what you want. To make this driver play nicely with other 52 what you want. To make this driver play nicely with other
53 sound driver, you can add this into your /etc/modprobe.conf: 53 sound driver, you can add this in a configuration file under
54 /etc/modprobe.d/ directory:
54 options snd-pcsp index=2 55 options snd-pcsp index=2
55 56
56 You don't need this driver if you only want your pc-speaker to beep. 57 You don't need this driver if you only want your pc-speaker to beep.
diff --git a/sound/isa/opti9xx/opti92x-ad1848.c b/sound/isa/opti9xx/opti92x-ad1848.c
index babaedd242f7..d7ccf28bd66a 100644
--- a/sound/isa/opti9xx/opti92x-ad1848.c
+++ b/sound/isa/opti9xx/opti92x-ad1848.c
@@ -65,7 +65,7 @@ static int index = SNDRV_DEFAULT_IDX1; /* Index 0-MAX */
65static char *id = SNDRV_DEFAULT_STR1; /* ID for this card */ 65static char *id = SNDRV_DEFAULT_STR1; /* ID for this card */
66//static bool enable = SNDRV_DEFAULT_ENABLE1; /* Enable this card */ 66//static bool enable = SNDRV_DEFAULT_ENABLE1; /* Enable this card */
67#ifdef CONFIG_PNP 67#ifdef CONFIG_PNP
68static int isapnp = 1; /* Enable ISA PnP detection */ 68static bool isapnp = true; /* Enable ISA PnP detection */
69#endif 69#endif
70static long port = SNDRV_DEFAULT_PORT1; /* 0x530,0xe80,0xf40,0x604 */ 70static long port = SNDRV_DEFAULT_PORT1; /* 0x530,0xe80,0xf40,0x604 */
71static long mpu_port = SNDRV_DEFAULT_PORT1; /* 0x300,0x310,0x320,0x330 */ 71static long mpu_port = SNDRV_DEFAULT_PORT1; /* 0x300,0x310,0x320,0x330 */
diff --git a/sound/oss/msnd_pinnacle.c b/sound/oss/msnd_pinnacle.c
index eba734560f6f..2c79d60a725f 100644
--- a/sound/oss/msnd_pinnacle.c
+++ b/sound/oss/msnd_pinnacle.c
@@ -1631,7 +1631,7 @@ static int ide_irq __initdata = 0;
1631static int joystick_io __initdata = 0; 1631static int joystick_io __initdata = 0;
1632 1632
1633/* If we have the digital daugherboard... */ 1633/* If we have the digital daugherboard... */
1634static int digital __initdata = 0; 1634static bool digital __initdata = false;
1635#endif 1635#endif
1636 1636
1637static int fifosize __initdata = DEFFIFOSIZE; 1637static int fifosize __initdata = DEFFIFOSIZE;
diff --git a/sound/pci/asihpi/hpi_internal.h b/sound/pci/asihpi/hpi_internal.h
index 4cc315daeda0..8c63200cf339 100644
--- a/sound/pci/asihpi/hpi_internal.h
+++ b/sound/pci/asihpi/hpi_internal.h
@@ -42,7 +42,7 @@ On error *pLockedMemHandle marked invalid, non-zero returned.
42If this function succeeds, then HpiOs_LockedMem_GetVirtAddr() and 42If this function succeeds, then HpiOs_LockedMem_GetVirtAddr() and
43HpiOs_LockedMem_GetPyhsAddr() will always succed on the returned handle. 43HpiOs_LockedMem_GetPyhsAddr() will always succed on the returned handle.
44*/ 44*/
45u16 hpios_locked_mem_alloc(struct consistent_dma_area *p_locked_mem_handle, 45int hpios_locked_mem_alloc(struct consistent_dma_area *p_locked_mem_handle,
46 /**< memory handle */ 46 /**< memory handle */
47 u32 size, /**< Size in bytes to allocate */ 47 u32 size, /**< Size in bytes to allocate */
48 struct pci_dev *p_os_reference 48 struct pci_dev *p_os_reference
diff --git a/sound/pci/asihpi/hpios.c b/sound/pci/asihpi/hpios.c
index 2d7d1c2e1d0d..87f4385fe8c7 100644
--- a/sound/pci/asihpi/hpios.c
+++ b/sound/pci/asihpi/hpios.c
@@ -43,7 +43,7 @@ void hpios_delay_micro_seconds(u32 num_micro_sec)
43 43
44On error, return -ENOMEM, and *pMemArea.size = 0 44On error, return -ENOMEM, and *pMemArea.size = 0
45*/ 45*/
46u16 hpios_locked_mem_alloc(struct consistent_dma_area *p_mem_area, u32 size, 46int hpios_locked_mem_alloc(struct consistent_dma_area *p_mem_area, u32 size,
47 struct pci_dev *pdev) 47 struct pci_dev *pdev)
48{ 48{
49 /*?? any benefit in using managed dmam_alloc_coherent? */ 49 /*?? any benefit in using managed dmam_alloc_coherent? */
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 8ea2fd654327..9917e55d6f11 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -2717,9 +2717,6 @@ static int alc_auto_fill_adc_caps(struct hda_codec *codec)
2717 int max_nums = ARRAY_SIZE(spec->private_adc_nids); 2717 int max_nums = ARRAY_SIZE(spec->private_adc_nids);
2718 int i, nums = 0; 2718 int i, nums = 0;
2719 2719
2720 if (spec->shared_mic_hp)
2721 max_nums = 1; /* no multi streams with the shared HP/mic */
2722
2723 nid = codec->start_nid; 2720 nid = codec->start_nid;
2724 for (i = 0; i < codec->num_nodes; i++, nid++) { 2721 for (i = 0; i < codec->num_nodes; i++, nid++) {
2725 hda_nid_t src; 2722 hda_nid_t src;
@@ -4076,6 +4073,7 @@ static void alc_remove_invalid_adc_nids(struct hda_codec *codec)
4076 if (spec->dyn_adc_switch) 4073 if (spec->dyn_adc_switch)
4077 return; 4074 return;
4078 4075
4076 again:
4079 nums = 0; 4077 nums = 0;
4080 for (n = 0; n < spec->num_adc_nids; n++) { 4078 for (n = 0; n < spec->num_adc_nids; n++) {
4081 hda_nid_t cap = spec->private_capsrc_nids[n]; 4079 hda_nid_t cap = spec->private_capsrc_nids[n];
@@ -4096,6 +4094,11 @@ static void alc_remove_invalid_adc_nids(struct hda_codec *codec)
4096 if (!nums) { 4094 if (!nums) {
4097 /* check whether ADC-switch is possible */ 4095 /* check whether ADC-switch is possible */
4098 if (!alc_check_dyn_adc_switch(codec)) { 4096 if (!alc_check_dyn_adc_switch(codec)) {
4097 if (spec->shared_mic_hp) {
4098 spec->shared_mic_hp = 0;
4099 spec->private_imux[0].num_items = 1;
4100 goto again;
4101 }
4099 printk(KERN_WARNING "hda_codec: %s: no valid ADC found;" 4102 printk(KERN_WARNING "hda_codec: %s: no valid ADC found;"
4100 " using fallback 0x%x\n", 4103 " using fallback 0x%x\n",
4101 codec->chip_name, spec->private_adc_nids[0]); 4104 codec->chip_name, spec->private_adc_nids[0]);
@@ -4113,7 +4116,7 @@ static void alc_remove_invalid_adc_nids(struct hda_codec *codec)
4113 4116
4114 if (spec->auto_mic) 4117 if (spec->auto_mic)
4115 alc_auto_mic_check_imux(codec); /* check auto-mic setups */ 4118 alc_auto_mic_check_imux(codec); /* check auto-mic setups */
4116 else if (spec->input_mux->num_items == 1) 4119 else if (spec->input_mux->num_items == 1 || spec->shared_mic_hp)
4117 spec->num_adc_nids = 1; /* reduce to a single ADC */ 4120 spec->num_adc_nids = 1; /* reduce to a single ADC */
4118} 4121}
4119 4122
diff --git a/sound/soc/codecs/wm8994.c b/sound/soc/codecs/wm8994.c
index fe7fbaeb7146..7c49642af052 100644
--- a/sound/soc/codecs/wm8994.c
+++ b/sound/soc/codecs/wm8994.c
@@ -3629,7 +3629,7 @@ static int wm8994_codec_probe(struct snd_soc_codec *codec)
3629 case 2: 3629 case 2:
3630 case 3: 3630 case 3:
3631 wm8994->hubs.dcs_codes_l = -9; 3631 wm8994->hubs.dcs_codes_l = -9;
3632 wm8994->hubs.dcs_codes_r = -5; 3632 wm8994->hubs.dcs_codes_r = -7;
3633 break; 3633 break;
3634 default: 3634 default:
3635 break; 3635 break;
diff --git a/sound/soc/imx/imx-pcm-dma-mx2.c b/sound/soc/imx/imx-pcm-dma-mx2.c
index e43c8fa2788b..6b818de2fc03 100644
--- a/sound/soc/imx/imx-pcm-dma-mx2.c
+++ b/sound/soc/imx/imx-pcm-dma-mx2.c
@@ -21,6 +21,7 @@
21#include <linux/platform_device.h> 21#include <linux/platform_device.h>
22#include <linux/slab.h> 22#include <linux/slab.h>
23#include <linux/dmaengine.h> 23#include <linux/dmaengine.h>
24#include <linux/types.h>
24 25
25#include <sound/core.h> 26#include <sound/core.h>
26#include <sound/initval.h> 27#include <sound/initval.h>
@@ -58,6 +59,8 @@ static int snd_imx_pcm_hw_params(struct snd_pcm_substream *substream,
58 if (ret) 59 if (ret)
59 return ret; 60 return ret;
60 61
62 slave_config.device_fc = false;
63
61 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { 64 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
62 slave_config.dst_addr = dma_params->dma_addr; 65 slave_config.dst_addr = dma_params->dma_addr;
63 slave_config.dst_maxburst = dma_params->burstsize; 66 slave_config.dst_maxburst = dma_params->burstsize;
diff --git a/sound/soc/mxs/mxs-pcm.c b/sound/soc/mxs/mxs-pcm.c
index 6ca1f46d84a4..e373fbbc97a0 100644
--- a/sound/soc/mxs/mxs-pcm.c
+++ b/sound/soc/mxs/mxs-pcm.c
@@ -28,6 +28,7 @@
28#include <linux/platform_device.h> 28#include <linux/platform_device.h>
29#include <linux/slab.h> 29#include <linux/slab.h>
30#include <linux/dmaengine.h> 30#include <linux/dmaengine.h>
31#include <linux/fsl/mxs-dma.h>
31 32
32#include <sound/core.h> 33#include <sound/core.h>
33#include <sound/initval.h> 34#include <sound/initval.h>
@@ -36,7 +37,6 @@
36#include <sound/soc.h> 37#include <sound/soc.h>
37#include <sound/dmaengine_pcm.h> 38#include <sound/dmaengine_pcm.h>
38 39
39#include <mach/dma.h>
40#include "mxs-pcm.h" 40#include "mxs-pcm.h"
41 41
42struct mxs_pcm_dma_data { 42struct mxs_pcm_dma_data {
diff --git a/sound/soc/mxs/mxs-saif.c b/sound/soc/mxs/mxs-saif.c
index 12be05b16880..53f4fd8feced 100644
--- a/sound/soc/mxs/mxs-saif.c
+++ b/sound/soc/mxs/mxs-saif.c
@@ -24,12 +24,12 @@
24#include <linux/clk.h> 24#include <linux/clk.h>
25#include <linux/delay.h> 25#include <linux/delay.h>
26#include <linux/time.h> 26#include <linux/time.h>
27#include <linux/fsl/mxs-dma.h>
27#include <sound/core.h> 28#include <sound/core.h>
28#include <sound/pcm.h> 29#include <sound/pcm.h>
29#include <sound/pcm_params.h> 30#include <sound/pcm_params.h>
30#include <sound/soc.h> 31#include <sound/soc.h>
31#include <sound/saif.h> 32#include <sound/saif.h>
32#include <mach/dma.h>
33#include <asm/mach-types.h> 33#include <asm/mach-types.h>
34#include <mach/hardware.h> 34#include <mach/hardware.h>
35#include <mach/mxs.h> 35#include <mach/mxs.h>
diff --git a/sound/soc/pxa/pxa2xx-ac97.c b/sound/soc/pxa/pxa2xx-ac97.c
index 4800d5fe568d..06ea2744cc88 100644
--- a/sound/soc/pxa/pxa2xx-ac97.c
+++ b/sound/soc/pxa/pxa2xx-ac97.c
@@ -11,6 +11,7 @@
11 */ 11 */
12 12
13#include <linux/init.h> 13#include <linux/init.h>
14#include <linux/io.h>
14#include <linux/module.h> 15#include <linux/module.h>
15#include <linux/platform_device.h> 16#include <linux/platform_device.h>
16 17
diff --git a/sound/soc/sh/siu_pcm.c b/sound/soc/sh/siu_pcm.c
index 0193e595d415..5cfcc655e95f 100644
--- a/sound/soc/sh/siu_pcm.c
+++ b/sound/soc/sh/siu_pcm.c
@@ -130,7 +130,7 @@ static int siu_pcm_wr_set(struct siu_port *port_info,
130 sg_dma_len(&sg) = size; 130 sg_dma_len(&sg) = size;
131 sg_dma_address(&sg) = buff; 131 sg_dma_address(&sg) = buff;
132 132
133 desc = siu_stream->chan->device->device_prep_slave_sg(siu_stream->chan, 133 desc = dmaengine_prep_slave_sg(siu_stream->chan,
134 &sg, 1, DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 134 &sg, 1, DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
135 if (!desc) { 135 if (!desc) {
136 dev_err(dev, "Failed to allocate a dma descriptor\n"); 136 dev_err(dev, "Failed to allocate a dma descriptor\n");
@@ -180,7 +180,7 @@ static int siu_pcm_rd_set(struct siu_port *port_info,
180 sg_dma_len(&sg) = size; 180 sg_dma_len(&sg) = size;
181 sg_dma_address(&sg) = buff; 181 sg_dma_address(&sg) = buff;
182 182
183 desc = siu_stream->chan->device->device_prep_slave_sg(siu_stream->chan, 183 desc = dmaengine_prep_slave_sg(siu_stream->chan,
184 &sg, 1, DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 184 &sg, 1, DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
185 if (!desc) { 185 if (!desc) {
186 dev_err(dev, "Failed to allocate dma descriptor\n"); 186 dev_err(dev, "Failed to allocate dma descriptor\n");
diff --git a/sound/soc/soc-dmaengine-pcm.c b/sound/soc/soc-dmaengine-pcm.c
index 4420b7030c83..475695234b3d 100644
--- a/sound/soc/soc-dmaengine-pcm.c
+++ b/sound/soc/soc-dmaengine-pcm.c
@@ -143,7 +143,7 @@ static int dmaengine_pcm_prepare_and_submit(struct snd_pcm_substream *substream)
143 direction = snd_pcm_substream_to_dma_direction(substream); 143 direction = snd_pcm_substream_to_dma_direction(substream);
144 144
145 prtd->pos = 0; 145 prtd->pos = 0;
146 desc = chan->device->device_prep_dma_cyclic(chan, 146 desc = dmaengine_prep_dma_cyclic(chan,
147 substream->runtime->dma_addr, 147 substream->runtime->dma_addr,
148 snd_pcm_lib_buffer_bytes(substream), 148 snd_pcm_lib_buffer_bytes(substream),
149 snd_pcm_lib_period_bytes(substream), direction); 149 snd_pcm_lib_period_bytes(substream), direction);
diff --git a/sound/soc/txx9/txx9aclc.c b/sound/soc/txx9/txx9aclc.c
index 21554611557c..b609d2c64c55 100644
--- a/sound/soc/txx9/txx9aclc.c
+++ b/sound/soc/txx9/txx9aclc.c
@@ -132,7 +132,7 @@ txx9aclc_dma_submit(struct txx9aclc_dmadata *dmadata, dma_addr_t buf_dma_addr)
132 sg_set_page(&sg, pfn_to_page(PFN_DOWN(buf_dma_addr)), 132 sg_set_page(&sg, pfn_to_page(PFN_DOWN(buf_dma_addr)),
133 dmadata->frag_bytes, buf_dma_addr & (PAGE_SIZE - 1)); 133 dmadata->frag_bytes, buf_dma_addr & (PAGE_SIZE - 1));
134 sg_dma_address(&sg) = buf_dma_addr; 134 sg_dma_address(&sg) = buf_dma_addr;
135 desc = chan->device->device_prep_slave_sg(chan, &sg, 1, 135 desc = dmaengine_prep_slave_sg(chan, &sg, 1,
136 dmadata->substream->stream == SNDRV_PCM_STREAM_PLAYBACK ? 136 dmadata->substream->stream == SNDRV_PCM_STREAM_PLAYBACK ?
137 DMA_MEM_TO_DEV : DMA_DEV_TO_MEM, 137 DMA_MEM_TO_DEV : DMA_DEV_TO_MEM,
138 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 138 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
diff --git a/tools/perf/Documentation/perf-report.txt b/tools/perf/Documentation/perf-report.txt
index 87feeee8b90c..2d89f02719b5 100644
--- a/tools/perf/Documentation/perf-report.txt
+++ b/tools/perf/Documentation/perf-report.txt
@@ -48,6 +48,9 @@ OPTIONS
48 Only consider these symbols. CSV that understands 48 Only consider these symbols. CSV that understands
49 file://filename entries. 49 file://filename entries.
50 50
51--symbol-filter=::
52 Only show symbols that match (partially) with this filter.
53
51-U:: 54-U::
52--hide-unresolved:: 55--hide-unresolved::
53 Only display entries resolved to a symbol. 56 Only display entries resolved to a symbol.
@@ -110,6 +113,8 @@ OPTIONS
110 requires a tty, if one is not present, as when piping to other 113 requires a tty, if one is not present, as when piping to other
111 commands, the stdio interface is used. 114 commands, the stdio interface is used.
112 115
116--gtk:: Use the GTK2 interface.
117
113-k:: 118-k::
114--vmlinux=<file>:: 119--vmlinux=<file>::
115 vmlinux pathname 120 vmlinux pathname
diff --git a/tools/perf/Makefile b/tools/perf/Makefile
index 74fd7f89208a..820371f10d1b 100644
--- a/tools/perf/Makefile
+++ b/tools/perf/Makefile
@@ -182,7 +182,7 @@ endif
182 182
183### --- END CONFIGURATION SECTION --- 183### --- END CONFIGURATION SECTION ---
184 184
185BASIC_CFLAGS = -Iutil/include -Iarch/$(ARCH)/include -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64 -D_GNU_SOURCE 185BASIC_CFLAGS = -Iutil/include -Iarch/$(ARCH)/include -I$(OUTPUT)/util -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64 -D_GNU_SOURCE
186BASIC_LDFLAGS = 186BASIC_LDFLAGS =
187 187
188# Guard against environment variables 188# Guard against environment variables
@@ -234,6 +234,25 @@ endif
234 234
235export PERL_PATH 235export PERL_PATH
236 236
237FLEX = $(CROSS_COMPILE)flex
238BISON= $(CROSS_COMPILE)bison
239
240event-parser:
241 $(QUIET_BISON)$(BISON) -v util/parse-events.y -d -o $(OUTPUT)util/parse-events-bison.c
242 $(QUIET_FLEX)$(FLEX) --header-file=$(OUTPUT)util/parse-events-flex.h -t util/parse-events.l > $(OUTPUT)util/parse-events-flex.c
243
244$(OUTPUT)util/parse-events-flex.c: event-parser
245$(OUTPUT)util/parse-events-bison.c: event-parser
246
247pmu-parser:
248 $(QUIET_BISON)$(BISON) -v util/pmu.y -d -o $(OUTPUT)util/pmu-bison.c
249 $(QUIET_FLEX)$(FLEX) --header-file=$(OUTPUT)util/pmu-flex.h -t util/pmu.l > $(OUTPUT)util/pmu-flex.c
250
251$(OUTPUT)util/pmu-flex.c: pmu-parser
252$(OUTPUT)util/pmu-bison.c: pmu-parser
253
254$(OUTPUT)util/parse-events.o: event-parser pmu-parser
255
237LIB_FILE=$(OUTPUT)libperf.a 256LIB_FILE=$(OUTPUT)libperf.a
238 257
239LIB_H += ../../include/linux/perf_event.h 258LIB_H += ../../include/linux/perf_event.h
@@ -249,7 +268,7 @@ LIB_H += util/include/linux/const.h
249LIB_H += util/include/linux/ctype.h 268LIB_H += util/include/linux/ctype.h
250LIB_H += util/include/linux/kernel.h 269LIB_H += util/include/linux/kernel.h
251LIB_H += util/include/linux/list.h 270LIB_H += util/include/linux/list.h
252LIB_H += util/include/linux/module.h 271LIB_H += util/include/linux/export.h
253LIB_H += util/include/linux/poison.h 272LIB_H += util/include/linux/poison.h
254LIB_H += util/include/linux/prefetch.h 273LIB_H += util/include/linux/prefetch.h
255LIB_H += util/include/linux/rbtree.h 274LIB_H += util/include/linux/rbtree.h
@@ -276,6 +295,7 @@ LIB_H += util/build-id.h
276LIB_H += util/debug.h 295LIB_H += util/debug.h
277LIB_H += util/debugfs.h 296LIB_H += util/debugfs.h
278LIB_H += util/sysfs.h 297LIB_H += util/sysfs.h
298LIB_H += util/pmu.h
279LIB_H += util/event.h 299LIB_H += util/event.h
280LIB_H += util/evsel.h 300LIB_H += util/evsel.h
281LIB_H += util/evlist.h 301LIB_H += util/evlist.h
@@ -323,6 +343,7 @@ LIB_OBJS += $(OUTPUT)util/config.o
323LIB_OBJS += $(OUTPUT)util/ctype.o 343LIB_OBJS += $(OUTPUT)util/ctype.o
324LIB_OBJS += $(OUTPUT)util/debugfs.o 344LIB_OBJS += $(OUTPUT)util/debugfs.o
325LIB_OBJS += $(OUTPUT)util/sysfs.o 345LIB_OBJS += $(OUTPUT)util/sysfs.o
346LIB_OBJS += $(OUTPUT)util/pmu.o
326LIB_OBJS += $(OUTPUT)util/environment.o 347LIB_OBJS += $(OUTPUT)util/environment.o
327LIB_OBJS += $(OUTPUT)util/event.o 348LIB_OBJS += $(OUTPUT)util/event.o
328LIB_OBJS += $(OUTPUT)util/evlist.o 349LIB_OBJS += $(OUTPUT)util/evlist.o
@@ -359,6 +380,10 @@ LIB_OBJS += $(OUTPUT)util/session.o
359LIB_OBJS += $(OUTPUT)util/thread.o 380LIB_OBJS += $(OUTPUT)util/thread.o
360LIB_OBJS += $(OUTPUT)util/thread_map.o 381LIB_OBJS += $(OUTPUT)util/thread_map.o
361LIB_OBJS += $(OUTPUT)util/trace-event-parse.o 382LIB_OBJS += $(OUTPUT)util/trace-event-parse.o
383LIB_OBJS += $(OUTPUT)util/parse-events-flex.o
384LIB_OBJS += $(OUTPUT)util/parse-events-bison.o
385LIB_OBJS += $(OUTPUT)util/pmu-flex.o
386LIB_OBJS += $(OUTPUT)util/pmu-bison.o
362LIB_OBJS += $(OUTPUT)util/trace-event-read.o 387LIB_OBJS += $(OUTPUT)util/trace-event-read.o
363LIB_OBJS += $(OUTPUT)util/trace-event-info.o 388LIB_OBJS += $(OUTPUT)util/trace-event-info.o
364LIB_OBJS += $(OUTPUT)util/trace-event-scripting.o 389LIB_OBJS += $(OUTPUT)util/trace-event-scripting.o
@@ -501,6 +526,20 @@ else
501 endif 526 endif
502endif 527endif
503 528
529ifdef NO_GTK2
530 BASIC_CFLAGS += -DNO_GTK2
531else
532 FLAGS_GTK2=$(ALL_CFLAGS) $(ALL_LDFLAGS) $(EXTLIBS) $(shell pkg-config --libs --cflags gtk+-2.0)
533 ifneq ($(call try-cc,$(SOURCE_GTK2),$(FLAGS_GTK2)),y)
534 msg := $(warning GTK2 not found, disables GTK2 support. Please install gtk2-devel or libgtk2.0-dev);
535 BASIC_CFLAGS += -DNO_GTK2_SUPPORT
536 else
537 BASIC_CFLAGS += $(shell pkg-config --cflags gtk+-2.0)
538 EXTLIBS += $(shell pkg-config --libs gtk+-2.0)
539 LIB_OBJS += $(OUTPUT)util/gtk/browser.o
540 endif
541endif
542
504ifdef NO_LIBPERL 543ifdef NO_LIBPERL
505 BASIC_CFLAGS += -DNO_LIBPERL 544 BASIC_CFLAGS += -DNO_LIBPERL
506else 545else
@@ -647,6 +686,8 @@ ifndef V
647 QUIET_LINK = @echo ' ' LINK $@; 686 QUIET_LINK = @echo ' ' LINK $@;
648 QUIET_MKDIR = @echo ' ' MKDIR $@; 687 QUIET_MKDIR = @echo ' ' MKDIR $@;
649 QUIET_GEN = @echo ' ' GEN $@; 688 QUIET_GEN = @echo ' ' GEN $@;
689 QUIET_FLEX = @echo ' ' FLEX $@;
690 QUIET_BISON = @echo ' ' BISON $@;
650endif 691endif
651endif 692endif
652 693
@@ -727,12 +768,28 @@ $(OUTPUT)perf.o perf.spec \
727 $(SCRIPTS) \ 768 $(SCRIPTS) \
728 : $(OUTPUT)PERF-VERSION-FILE 769 : $(OUTPUT)PERF-VERSION-FILE
729 770
771.SUFFIXES:
772.SUFFIXES: .o .c .S .s
773
774# These two need to be here so that when O= is not used they take precedence
775# over the general rule for .o
776
777$(OUTPUT)util/%-flex.o: $(OUTPUT)util/%-flex.c $(OUTPUT)PERF-CFLAGS
778 $(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) -Iutil/ -Wno-redundant-decls -Wno-switch-default -Wno-unused-function $<
779
780$(OUTPUT)util/%-bison.o: $(OUTPUT)util/%-bison.c $(OUTPUT)PERF-CFLAGS
781 $(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) -DYYENABLE_NLS=0 -DYYLTYPE_IS_TRIVIAL=0 -Iutil/ -Wno-redundant-decls -Wno-switch-default -Wno-unused-function $<
782
730$(OUTPUT)%.o: %.c $(OUTPUT)PERF-CFLAGS 783$(OUTPUT)%.o: %.c $(OUTPUT)PERF-CFLAGS
731 $(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) $< 784 $(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) $<
785$(OUTPUT)%.i: %.c $(OUTPUT)PERF-CFLAGS
786 $(QUIET_CC)$(CC) -o $@ -E $(ALL_CFLAGS) $<
732$(OUTPUT)%.s: %.c $(OUTPUT)PERF-CFLAGS 787$(OUTPUT)%.s: %.c $(OUTPUT)PERF-CFLAGS
733 $(QUIET_CC)$(CC) -S $(ALL_CFLAGS) $< 788 $(QUIET_CC)$(CC) -o $@ -S $(ALL_CFLAGS) $<
734$(OUTPUT)%.o: %.S 789$(OUTPUT)%.o: %.S
735 $(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) $< 790 $(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) $<
791$(OUTPUT)%.s: %.S
792 $(QUIET_CC)$(CC) -o $@ -E $(ALL_CFLAGS) $<
736 793
737$(OUTPUT)util/exec_cmd.o: util/exec_cmd.c $(OUTPUT)PERF-CFLAGS 794$(OUTPUT)util/exec_cmd.o: util/exec_cmd.c $(OUTPUT)PERF-CFLAGS
738 $(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) \ 795 $(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) \
@@ -795,6 +852,8 @@ help:
795 @echo ' html - make html documentation' 852 @echo ' html - make html documentation'
796 @echo ' info - make GNU info documentation (access with info <foo>)' 853 @echo ' info - make GNU info documentation (access with info <foo>)'
797 @echo ' pdf - make pdf documentation' 854 @echo ' pdf - make pdf documentation'
855 @echo ' event-parser - make event parser code'
856 @echo ' pmu-parser - make pmu format parser code'
798 @echo ' TAGS - use etags to make tag information for source browsing' 857 @echo ' TAGS - use etags to make tag information for source browsing'
799 @echo ' tags - use ctags to make tag information for source browsing' 858 @echo ' tags - use ctags to make tag information for source browsing'
800 @echo ' cscope - use cscope to make interactive browsing database' 859 @echo ' cscope - use cscope to make interactive browsing database'
@@ -931,6 +990,7 @@ clean:
931 $(RM) *.spec *.pyc *.pyo */*.pyc */*.pyo $(OUTPUT)common-cmds.h TAGS tags cscope* 990 $(RM) *.spec *.pyc *.pyo */*.pyc */*.pyo $(OUTPUT)common-cmds.h TAGS tags cscope*
932 $(MAKE) -C Documentation/ clean 991 $(MAKE) -C Documentation/ clean
933 $(RM) $(OUTPUT)PERF-VERSION-FILE $(OUTPUT)PERF-CFLAGS 992 $(RM) $(OUTPUT)PERF-VERSION-FILE $(OUTPUT)PERF-CFLAGS
993 $(RM) $(OUTPUT)util/*-{bison,flex}*
934 $(python-clean) 994 $(python-clean)
935 995
936.PHONY: all install clean strip 996.PHONY: all install clean strip
diff --git a/tools/perf/builtin-diff.c b/tools/perf/builtin-diff.c
index 4f19513d7dda..d29d350fb2b7 100644
--- a/tools/perf/builtin-diff.c
+++ b/tools/perf/builtin-diff.c
@@ -24,6 +24,11 @@ static char diff__default_sort_order[] = "dso,symbol";
24static bool force; 24static bool force;
25static bool show_displacement; 25static bool show_displacement;
26 26
27struct perf_diff {
28 struct perf_tool tool;
29 struct perf_session *session;
30};
31
27static int hists__add_entry(struct hists *self, 32static int hists__add_entry(struct hists *self,
28 struct addr_location *al, u64 period) 33 struct addr_location *al, u64 period)
29{ 34{
@@ -32,12 +37,14 @@ static int hists__add_entry(struct hists *self,
32 return -ENOMEM; 37 return -ENOMEM;
33} 38}
34 39
35static int diff__process_sample_event(struct perf_tool *tool __used, 40static int diff__process_sample_event(struct perf_tool *tool,
36 union perf_event *event, 41 union perf_event *event,
37 struct perf_sample *sample, 42 struct perf_sample *sample,
38 struct perf_evsel *evsel __used, 43 struct perf_evsel *evsel __used,
39 struct machine *machine) 44 struct machine *machine)
40{ 45{
46 struct perf_diff *_diff = container_of(tool, struct perf_diff, tool);
47 struct perf_session *session = _diff->session;
41 struct addr_location al; 48 struct addr_location al;
42 49
43 if (perf_event__preprocess_sample(event, machine, &al, sample, NULL) < 0) { 50 if (perf_event__preprocess_sample(event, machine, &al, sample, NULL) < 0) {
@@ -49,24 +56,26 @@ static int diff__process_sample_event(struct perf_tool *tool __used,
49 if (al.filtered || al.sym == NULL) 56 if (al.filtered || al.sym == NULL)
50 return 0; 57 return 0;
51 58
52 if (hists__add_entry(&evsel->hists, &al, sample->period)) { 59 if (hists__add_entry(&session->hists, &al, sample->period)) {
53 pr_warning("problem incrementing symbol period, skipping event\n"); 60 pr_warning("problem incrementing symbol period, skipping event\n");
54 return -1; 61 return -1;
55 } 62 }
56 63
57 evsel->hists.stats.total_period += sample->period; 64 session->hists.stats.total_period += sample->period;
58 return 0; 65 return 0;
59} 66}
60 67
61static struct perf_tool perf_diff = { 68static struct perf_diff diff = {
62 .sample = diff__process_sample_event, 69 .tool = {
63 .mmap = perf_event__process_mmap, 70 .sample = diff__process_sample_event,
64 .comm = perf_event__process_comm, 71 .mmap = perf_event__process_mmap,
65 .exit = perf_event__process_task, 72 .comm = perf_event__process_comm,
66 .fork = perf_event__process_task, 73 .exit = perf_event__process_task,
67 .lost = perf_event__process_lost, 74 .fork = perf_event__process_task,
68 .ordered_samples = true, 75 .lost = perf_event__process_lost,
69 .ordering_requires_timestamps = true, 76 .ordered_samples = true,
77 .ordering_requires_timestamps = true,
78 },
70}; 79};
71 80
72static void perf_session__insert_hist_entry_by_name(struct rb_root *root, 81static void perf_session__insert_hist_entry_by_name(struct rb_root *root,
@@ -107,12 +116,6 @@ static void hists__resort_entries(struct hists *self)
107 self->entries = tmp; 116 self->entries = tmp;
108} 117}
109 118
110static void hists__set_positions(struct hists *self)
111{
112 hists__output_resort(self);
113 hists__resort_entries(self);
114}
115
116static struct hist_entry *hists__find_entry(struct hists *self, 119static struct hist_entry *hists__find_entry(struct hists *self,
117 struct hist_entry *he) 120 struct hist_entry *he)
118{ 121{
@@ -146,30 +149,37 @@ static void hists__match(struct hists *older, struct hists *newer)
146static int __cmd_diff(void) 149static int __cmd_diff(void)
147{ 150{
148 int ret, i; 151 int ret, i;
152#define older (session[0])
153#define newer (session[1])
149 struct perf_session *session[2]; 154 struct perf_session *session[2];
150 155
151 session[0] = perf_session__new(input_old, O_RDONLY, force, false, &perf_diff); 156 older = perf_session__new(input_old, O_RDONLY, force, false,
152 session[1] = perf_session__new(input_new, O_RDONLY, force, false, &perf_diff); 157 &diff.tool);
158 newer = perf_session__new(input_new, O_RDONLY, force, false,
159 &diff.tool);
153 if (session[0] == NULL || session[1] == NULL) 160 if (session[0] == NULL || session[1] == NULL)
154 return -ENOMEM; 161 return -ENOMEM;
155 162
156 for (i = 0; i < 2; ++i) { 163 for (i = 0; i < 2; ++i) {
157 ret = perf_session__process_events(session[i], &perf_diff); 164 diff.session = session[i];
165 ret = perf_session__process_events(session[i], &diff.tool);
158 if (ret) 166 if (ret)
159 goto out_delete; 167 goto out_delete;
168 hists__output_resort(&session[i]->hists);
160 } 169 }
161 170
162 hists__output_resort(&session[1]->hists);
163 if (show_displacement) 171 if (show_displacement)
164 hists__set_positions(&session[0]->hists); 172 hists__resort_entries(&older->hists);
165 173
166 hists__match(&session[0]->hists, &session[1]->hists); 174 hists__match(&older->hists, &newer->hists);
167 hists__fprintf(&session[1]->hists, &session[0]->hists, 175 hists__fprintf(&newer->hists, &older->hists,
168 show_displacement, true, 0, 0, stdout); 176 show_displacement, true, 0, 0, stdout);
169out_delete: 177out_delete:
170 for (i = 0; i < 2; ++i) 178 for (i = 0; i < 2; ++i)
171 perf_session__delete(session[i]); 179 perf_session__delete(session[i]);
172 return ret; 180 return ret;
181#undef older
182#undef newer
173} 183}
174 184
175static const char * const diff_usage[] = { 185static const char * const diff_usage[] = {
diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c
index 8e91c6eba18a..2e317438980b 100644
--- a/tools/perf/builtin-report.c
+++ b/tools/perf/builtin-report.c
@@ -40,7 +40,7 @@ struct perf_report {
40 struct perf_tool tool; 40 struct perf_tool tool;
41 struct perf_session *session; 41 struct perf_session *session;
42 char const *input_name; 42 char const *input_name;
43 bool force, use_tui, use_stdio; 43 bool force, use_tui, use_gtk, use_stdio;
44 bool hide_unresolved; 44 bool hide_unresolved;
45 bool dont_use_callchains; 45 bool dont_use_callchains;
46 bool show_full_info; 46 bool show_full_info;
@@ -50,6 +50,7 @@ struct perf_report {
50 const char *pretty_printing_style; 50 const char *pretty_printing_style;
51 symbol_filter_t annotate_init; 51 symbol_filter_t annotate_init;
52 const char *cpu_list; 52 const char *cpu_list;
53 const char *symbol_filter_str;
53 DECLARE_BITMAP(cpu_bitmap, MAX_NR_CPUS); 54 DECLARE_BITMAP(cpu_bitmap, MAX_NR_CPUS);
54}; 55};
55 56
@@ -400,6 +401,9 @@ static int __cmd_report(struct perf_report *rep)
400 list_for_each_entry(pos, &session->evlist->entries, node) { 401 list_for_each_entry(pos, &session->evlist->entries, node) {
401 struct hists *hists = &pos->hists; 402 struct hists *hists = &pos->hists;
402 403
404 if (pos->idx == 0)
405 hists->symbol_filter_str = rep->symbol_filter_str;
406
403 hists__collapse_resort(hists); 407 hists__collapse_resort(hists);
404 hists__output_resort(hists); 408 hists__output_resort(hists);
405 nr_samples += hists->stats.nr_events[PERF_RECORD_SAMPLE]; 409 nr_samples += hists->stats.nr_events[PERF_RECORD_SAMPLE];
@@ -411,8 +415,13 @@ static int __cmd_report(struct perf_report *rep)
411 } 415 }
412 416
413 if (use_browser > 0) { 417 if (use_browser > 0) {
414 perf_evlist__tui_browse_hists(session->evlist, help, 418 if (use_browser == 1) {
415 NULL, NULL, 0); 419 perf_evlist__tui_browse_hists(session->evlist, help,
420 NULL, NULL, 0);
421 } else if (use_browser == 2) {
422 perf_evlist__gtk_browse_hists(session->evlist, help,
423 NULL, NULL, 0);
424 }
416 } else 425 } else
417 perf_evlist__tty_browse_hists(session->evlist, rep, help); 426 perf_evlist__tty_browse_hists(session->evlist, rep, help);
418 427
@@ -569,6 +578,7 @@ int cmd_report(int argc, const char **argv, const char *prefix __used)
569 OPT_STRING(0, "pretty", &report.pretty_printing_style, "key", 578 OPT_STRING(0, "pretty", &report.pretty_printing_style, "key",
570 "pretty printing style key: normal raw"), 579 "pretty printing style key: normal raw"),
571 OPT_BOOLEAN(0, "tui", &report.use_tui, "Use the TUI interface"), 580 OPT_BOOLEAN(0, "tui", &report.use_tui, "Use the TUI interface"),
581 OPT_BOOLEAN(0, "gtk", &report.use_gtk, "Use the GTK2 interface"),
572 OPT_BOOLEAN(0, "stdio", &report.use_stdio, 582 OPT_BOOLEAN(0, "stdio", &report.use_stdio,
573 "Use the stdio interface"), 583 "Use the stdio interface"),
574 OPT_STRING('s', "sort", &sort_order, "key[,key2...]", 584 OPT_STRING('s', "sort", &sort_order, "key[,key2...]",
@@ -591,6 +601,8 @@ int cmd_report(int argc, const char **argv, const char *prefix __used)
591 "only consider symbols in these comms"), 601 "only consider symbols in these comms"),
592 OPT_STRING('S', "symbols", &symbol_conf.sym_list_str, "symbol[,symbol...]", 602 OPT_STRING('S', "symbols", &symbol_conf.sym_list_str, "symbol[,symbol...]",
593 "only consider these symbols"), 603 "only consider these symbols"),
604 OPT_STRING(0, "symbol-filter", &report.symbol_filter_str, "filter",
605 "only show symbols that (partially) match with this filter"),
594 OPT_STRING('w', "column-widths", &symbol_conf.col_width_list_str, 606 OPT_STRING('w', "column-widths", &symbol_conf.col_width_list_str,
595 "width[,width...]", 607 "width[,width...]",
596 "don't try to adjust column width, use these fixed values"), 608 "don't try to adjust column width, use these fixed values"),
@@ -624,6 +636,8 @@ int cmd_report(int argc, const char **argv, const char *prefix __used)
624 use_browser = 0; 636 use_browser = 0;
625 else if (report.use_tui) 637 else if (report.use_tui)
626 use_browser = 1; 638 use_browser = 1;
639 else if (report.use_gtk)
640 use_browser = 2;
627 641
628 if (report.inverted_callchain) 642 if (report.inverted_callchain)
629 callchain_param.order = ORDER_CALLER; 643 callchain_param.order = ORDER_CALLER;
@@ -660,7 +674,10 @@ int cmd_report(int argc, const char **argv, const char *prefix __used)
660 } 674 }
661 675
662 if (strcmp(report.input_name, "-") != 0) { 676 if (strcmp(report.input_name, "-") != 0) {
663 setup_browser(true); 677 if (report.use_gtk)
678 perf_gtk_setup_browser(argc, argv, true);
679 else
680 setup_browser(true);
664 } else { 681 } else {
665 use_browser = 0; 682 use_browser = 0;
666 } 683 }
@@ -709,11 +726,16 @@ int cmd_report(int argc, const char **argv, const char *prefix __used)
709 } else 726 } else
710 symbol_conf.exclude_other = false; 727 symbol_conf.exclude_other = false;
711 728
712 /* 729 if (argc) {
713 * Any (unrecognized) arguments left? 730 /*
714 */ 731 * Special case: if there's an argument left then assume that
715 if (argc) 732 * it's a symbol filter:
716 usage_with_options(report_usage, options); 733 */
734 if (argc > 1)
735 usage_with_options(report_usage, options);
736
737 report.symbol_filter_str = argv[0];
738 }
717 739
718 sort_entry__setup_elide(&sort_comm, symbol_conf.comm_list, "comm", stdout); 740 sort_entry__setup_elide(&sort_comm, symbol_conf.comm_list, "comm", stdout);
719 741
diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
index ea40e4e8b227..c941bb640f49 100644
--- a/tools/perf/builtin-stat.c
+++ b/tools/perf/builtin-stat.c
@@ -296,7 +296,7 @@ static int create_perf_stat_counter(struct perf_evsel *evsel,
296 if (system_wide) 296 if (system_wide)
297 return perf_evsel__open_per_cpu(evsel, evsel_list->cpus, 297 return perf_evsel__open_per_cpu(evsel, evsel_list->cpus,
298 group, group_fd); 298 group, group_fd);
299 if (!target_pid && !target_tid) { 299 if (!target_pid && !target_tid && (!group || evsel == first)) {
300 attr->disabled = 1; 300 attr->disabled = 1;
301 attr->enable_on_exec = 1; 301 attr->enable_on_exec = 1;
302 } 302 }
diff --git a/tools/perf/builtin-test.c b/tools/perf/builtin-test.c
index 3e087ce8daa6..1c5b9801ac61 100644
--- a/tools/perf/builtin-test.c
+++ b/tools/perf/builtin-test.c
@@ -13,6 +13,7 @@
13#include "util/parse-events.h" 13#include "util/parse-events.h"
14#include "util/symbol.h" 14#include "util/symbol.h"
15#include "util/thread_map.h" 15#include "util/thread_map.h"
16#include "util/pmu.h"
16#include "../../include/linux/hw_breakpoint.h" 17#include "../../include/linux/hw_breakpoint.h"
17 18
18#include <sys/mman.h> 19#include <sys/mman.h>
@@ -650,7 +651,7 @@ static int test__checkevent_raw(struct perf_evlist *evlist)
650 651
651 TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->nr_entries); 652 TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->nr_entries);
652 TEST_ASSERT_VAL("wrong type", PERF_TYPE_RAW == evsel->attr.type); 653 TEST_ASSERT_VAL("wrong type", PERF_TYPE_RAW == evsel->attr.type);
653 TEST_ASSERT_VAL("wrong config", 1 == evsel->attr.config); 654 TEST_ASSERT_VAL("wrong config", 0x1a == evsel->attr.config);
654 return 0; 655 return 0;
655} 656}
656 657
@@ -677,6 +678,24 @@ static int test__checkevent_symbolic_name(struct perf_evlist *evlist)
677 return 0; 678 return 0;
678} 679}
679 680
681static int test__checkevent_symbolic_name_config(struct perf_evlist *evlist)
682{
683 struct perf_evsel *evsel = list_entry(evlist->entries.next,
684 struct perf_evsel, node);
685
686 TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->nr_entries);
687 TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->attr.type);
688 TEST_ASSERT_VAL("wrong config",
689 PERF_COUNT_HW_CPU_CYCLES == evsel->attr.config);
690 TEST_ASSERT_VAL("wrong period",
691 100000 == evsel->attr.sample_period);
692 TEST_ASSERT_VAL("wrong config1",
693 0 == evsel->attr.config1);
694 TEST_ASSERT_VAL("wrong config2",
695 1 == evsel->attr.config2);
696 return 0;
697}
698
680static int test__checkevent_symbolic_alias(struct perf_evlist *evlist) 699static int test__checkevent_symbolic_alias(struct perf_evlist *evlist)
681{ 700{
682 struct perf_evsel *evsel = list_entry(evlist->entries.next, 701 struct perf_evsel *evsel = list_entry(evlist->entries.next,
@@ -858,6 +877,115 @@ static int test__checkevent_genhw_modifier(struct perf_evlist *evlist)
858 return test__checkevent_genhw(evlist); 877 return test__checkevent_genhw(evlist);
859} 878}
860 879
880static int test__checkevent_breakpoint_modifier(struct perf_evlist *evlist)
881{
882 struct perf_evsel *evsel = list_entry(evlist->entries.next,
883 struct perf_evsel, node);
884
885 TEST_ASSERT_VAL("wrong exclude_user", !evsel->attr.exclude_user);
886 TEST_ASSERT_VAL("wrong exclude_kernel", evsel->attr.exclude_kernel);
887 TEST_ASSERT_VAL("wrong exclude_hv", evsel->attr.exclude_hv);
888 TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip);
889
890 return test__checkevent_breakpoint(evlist);
891}
892
893static int test__checkevent_breakpoint_x_modifier(struct perf_evlist *evlist)
894{
895 struct perf_evsel *evsel = list_entry(evlist->entries.next,
896 struct perf_evsel, node);
897
898 TEST_ASSERT_VAL("wrong exclude_user", evsel->attr.exclude_user);
899 TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->attr.exclude_kernel);
900 TEST_ASSERT_VAL("wrong exclude_hv", evsel->attr.exclude_hv);
901 TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip);
902
903 return test__checkevent_breakpoint_x(evlist);
904}
905
906static int test__checkevent_breakpoint_r_modifier(struct perf_evlist *evlist)
907{
908 struct perf_evsel *evsel = list_entry(evlist->entries.next,
909 struct perf_evsel, node);
910
911 TEST_ASSERT_VAL("wrong exclude_user", evsel->attr.exclude_user);
912 TEST_ASSERT_VAL("wrong exclude_kernel", evsel->attr.exclude_kernel);
913 TEST_ASSERT_VAL("wrong exclude_hv", !evsel->attr.exclude_hv);
914 TEST_ASSERT_VAL("wrong precise_ip", evsel->attr.precise_ip);
915
916 return test__checkevent_breakpoint_r(evlist);
917}
918
919static int test__checkevent_breakpoint_w_modifier(struct perf_evlist *evlist)
920{
921 struct perf_evsel *evsel = list_entry(evlist->entries.next,
922 struct perf_evsel, node);
923
924 TEST_ASSERT_VAL("wrong exclude_user", !evsel->attr.exclude_user);
925 TEST_ASSERT_VAL("wrong exclude_kernel", evsel->attr.exclude_kernel);
926 TEST_ASSERT_VAL("wrong exclude_hv", evsel->attr.exclude_hv);
927 TEST_ASSERT_VAL("wrong precise_ip", evsel->attr.precise_ip);
928
929 return test__checkevent_breakpoint_w(evlist);
930}
931
932static int test__checkevent_pmu(struct perf_evlist *evlist)
933{
934
935 struct perf_evsel *evsel = list_entry(evlist->entries.next,
936 struct perf_evsel, node);
937
938 TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->nr_entries);
939 TEST_ASSERT_VAL("wrong type", PERF_TYPE_RAW == evsel->attr.type);
940 TEST_ASSERT_VAL("wrong config", 10 == evsel->attr.config);
941 TEST_ASSERT_VAL("wrong config1", 1 == evsel->attr.config1);
942 TEST_ASSERT_VAL("wrong config2", 3 == evsel->attr.config2);
943 TEST_ASSERT_VAL("wrong period", 1000 == evsel->attr.sample_period);
944
945 return 0;
946}
947
948static int test__checkevent_list(struct perf_evlist *evlist)
949{
950 struct perf_evsel *evsel;
951
952 TEST_ASSERT_VAL("wrong number of entries", 3 == evlist->nr_entries);
953
954 /* r1 */
955 evsel = list_entry(evlist->entries.next, struct perf_evsel, node);
956 TEST_ASSERT_VAL("wrong type", PERF_TYPE_RAW == evsel->attr.type);
957 TEST_ASSERT_VAL("wrong config", 1 == evsel->attr.config);
958 TEST_ASSERT_VAL("wrong config1", 0 == evsel->attr.config1);
959 TEST_ASSERT_VAL("wrong config2", 0 == evsel->attr.config2);
960 TEST_ASSERT_VAL("wrong exclude_user", !evsel->attr.exclude_user);
961 TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->attr.exclude_kernel);
962 TEST_ASSERT_VAL("wrong exclude_hv", !evsel->attr.exclude_hv);
963 TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip);
964
965 /* syscalls:sys_enter_open:k */
966 evsel = list_entry(evsel->node.next, struct perf_evsel, node);
967 TEST_ASSERT_VAL("wrong type", PERF_TYPE_TRACEPOINT == evsel->attr.type);
968 TEST_ASSERT_VAL("wrong sample_type",
969 (PERF_SAMPLE_RAW | PERF_SAMPLE_TIME | PERF_SAMPLE_CPU) ==
970 evsel->attr.sample_type);
971 TEST_ASSERT_VAL("wrong sample_period", 1 == evsel->attr.sample_period);
972 TEST_ASSERT_VAL("wrong exclude_user", evsel->attr.exclude_user);
973 TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->attr.exclude_kernel);
974 TEST_ASSERT_VAL("wrong exclude_hv", evsel->attr.exclude_hv);
975 TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip);
976
977 /* 1:1:hp */
978 evsel = list_entry(evsel->node.next, struct perf_evsel, node);
979 TEST_ASSERT_VAL("wrong type", 1 == evsel->attr.type);
980 TEST_ASSERT_VAL("wrong config", 1 == evsel->attr.config);
981 TEST_ASSERT_VAL("wrong exclude_user", evsel->attr.exclude_user);
982 TEST_ASSERT_VAL("wrong exclude_kernel", evsel->attr.exclude_kernel);
983 TEST_ASSERT_VAL("wrong exclude_hv", !evsel->attr.exclude_hv);
984 TEST_ASSERT_VAL("wrong precise_ip", evsel->attr.precise_ip);
985
986 return 0;
987}
988
861static struct test__event_st { 989static struct test__event_st {
862 const char *name; 990 const char *name;
863 __u32 type; 991 __u32 type;
@@ -872,7 +1000,7 @@ static struct test__event_st {
872 .check = test__checkevent_tracepoint_multi, 1000 .check = test__checkevent_tracepoint_multi,
873 }, 1001 },
874 { 1002 {
875 .name = "r1", 1003 .name = "r1a",
876 .check = test__checkevent_raw, 1004 .check = test__checkevent_raw,
877 }, 1005 },
878 { 1006 {
@@ -884,6 +1012,10 @@ static struct test__event_st {
884 .check = test__checkevent_symbolic_name, 1012 .check = test__checkevent_symbolic_name,
885 }, 1013 },
886 { 1014 {
1015 .name = "cycles/period=100000,config2/",
1016 .check = test__checkevent_symbolic_name_config,
1017 },
1018 {
887 .name = "faults", 1019 .name = "faults",
888 .check = test__checkevent_symbolic_alias, 1020 .check = test__checkevent_symbolic_alias,
889 }, 1021 },
@@ -916,7 +1048,7 @@ static struct test__event_st {
916 .check = test__checkevent_tracepoint_multi_modifier, 1048 .check = test__checkevent_tracepoint_multi_modifier,
917 }, 1049 },
918 { 1050 {
919 .name = "r1:kp", 1051 .name = "r1a:kp",
920 .check = test__checkevent_raw_modifier, 1052 .check = test__checkevent_raw_modifier,
921 }, 1053 },
922 { 1054 {
@@ -935,6 +1067,30 @@ static struct test__event_st {
935 .name = "L1-dcache-load-miss:kp", 1067 .name = "L1-dcache-load-miss:kp",
936 .check = test__checkevent_genhw_modifier, 1068 .check = test__checkevent_genhw_modifier,
937 }, 1069 },
1070 {
1071 .name = "mem:0:u",
1072 .check = test__checkevent_breakpoint_modifier,
1073 },
1074 {
1075 .name = "mem:0:x:k",
1076 .check = test__checkevent_breakpoint_x_modifier,
1077 },
1078 {
1079 .name = "mem:0:r:hp",
1080 .check = test__checkevent_breakpoint_r_modifier,
1081 },
1082 {
1083 .name = "mem:0:w:up",
1084 .check = test__checkevent_breakpoint_w_modifier,
1085 },
1086 {
1087 .name = "cpu/config=10,config1,config2=3,period=1000/u",
1088 .check = test__checkevent_pmu,
1089 },
1090 {
1091 .name = "r1,syscalls:sys_enter_open:k,1:1:hp",
1092 .check = test__checkevent_list,
1093 },
938}; 1094};
939 1095
940#define TEST__EVENTS_CNT (sizeof(test__events) / sizeof(struct test__event_st)) 1096#define TEST__EVENTS_CNT (sizeof(test__events) / sizeof(struct test__event_st))
@@ -960,10 +1116,9 @@ static int test__parse_events(void)
960 } 1116 }
961 1117
962 ret = e->check(evlist); 1118 ret = e->check(evlist);
1119 perf_evlist__delete(evlist);
963 if (ret) 1120 if (ret)
964 break; 1121 break;
965
966 perf_evlist__delete(evlist);
967 } 1122 }
968 1123
969 return ret; 1124 return ret;
@@ -1462,6 +1617,11 @@ static int test__rdpmc(void)
1462 1617
1463#endif 1618#endif
1464 1619
1620static int test__perf_pmu(void)
1621{
1622 return perf_pmu__test();
1623}
1624
1465static struct test { 1625static struct test {
1466 const char *desc; 1626 const char *desc;
1467 int (*func)(void); 1627 int (*func)(void);
@@ -1497,6 +1657,10 @@ static struct test {
1497 .func = test__PERF_RECORD, 1657 .func = test__PERF_RECORD,
1498 }, 1658 },
1499 { 1659 {
1660 .desc = "Test perf pmu format parsing",
1661 .func = test__perf_pmu,
1662 },
1663 {
1500 .func = NULL, 1664 .func = NULL,
1501 }, 1665 },
1502}; 1666};
diff --git a/tools/perf/config/feature-tests.mak b/tools/perf/config/feature-tests.mak
index 6170fd2531b5..d9084e03ce56 100644
--- a/tools/perf/config/feature-tests.mak
+++ b/tools/perf/config/feature-tests.mak
@@ -65,6 +65,21 @@ int main(void)
65endef 65endef
66endif 66endif
67 67
68ifndef NO_GTK2
69define SOURCE_GTK2
70#pragma GCC diagnostic ignored \"-Wstrict-prototypes\"
71#include <gtk/gtk.h>
72#pragma GCC diagnostic error \"-Wstrict-prototypes\"
73
74int main(int argc, char *argv[])
75{
76 gtk_init(&argc, &argv);
77
78 return 0;
79}
80endef
81endif
82
68ifndef NO_LIBPERL 83ifndef NO_LIBPERL
69define SOURCE_PERL_EMBED 84define SOURCE_PERL_EMBED
70#include <EXTERN.h> 85#include <EXTERN.h>
diff --git a/tools/perf/util/annotate.c b/tools/perf/util/annotate.c
index e5a462f1d07c..199f69ec656f 100644
--- a/tools/perf/util/annotate.c
+++ b/tools/perf/util/annotate.c
@@ -28,8 +28,8 @@ int symbol__annotate_init(struct map *map __used, struct symbol *sym)
28int symbol__alloc_hist(struct symbol *sym) 28int symbol__alloc_hist(struct symbol *sym)
29{ 29{
30 struct annotation *notes = symbol__annotation(sym); 30 struct annotation *notes = symbol__annotation(sym);
31 size_t sizeof_sym_hist = (sizeof(struct sym_hist) + 31 const size_t size = sym->end - sym->start + 1;
32 (sym->end - sym->start) * sizeof(u64)); 32 size_t sizeof_sym_hist = (sizeof(struct sym_hist) + size * sizeof(u64));
33 33
34 notes->src = zalloc(sizeof(*notes->src) + symbol_conf.nr_events * sizeof_sym_hist); 34 notes->src = zalloc(sizeof(*notes->src) + symbol_conf.nr_events * sizeof_sym_hist);
35 if (notes->src == NULL) 35 if (notes->src == NULL)
@@ -64,7 +64,7 @@ int symbol__inc_addr_samples(struct symbol *sym, struct map *map,
64 64
65 pr_debug3("%s: addr=%#" PRIx64 "\n", __func__, map->unmap_ip(map, addr)); 65 pr_debug3("%s: addr=%#" PRIx64 "\n", __func__, map->unmap_ip(map, addr));
66 66
67 if (addr >= sym->end) 67 if (addr > sym->end)
68 return 0; 68 return 0;
69 69
70 offset = addr - sym->start; 70 offset = addr - sym->start;
@@ -408,7 +408,7 @@ static int symbol__get_source_line(struct symbol *sym, struct map *map,
408 if (!notes->src->lines) 408 if (!notes->src->lines)
409 return -1; 409 return -1;
410 410
411 start = map->unmap_ip(map, sym->start); 411 start = map__rip_2objdump(map, sym->start);
412 412
413 for (i = 0; i < len; i++) { 413 for (i = 0; i < len; i++) {
414 char *path = NULL; 414 char *path = NULL;
diff --git a/tools/perf/util/cache.h b/tools/perf/util/cache.h
index fc5e5a09d5b9..8dd224df3e54 100644
--- a/tools/perf/util/cache.h
+++ b/tools/perf/util/cache.h
@@ -45,6 +45,18 @@ void setup_browser(bool fallback_to_pager);
45void exit_browser(bool wait_for_ok); 45void exit_browser(bool wait_for_ok);
46#endif 46#endif
47 47
48#ifdef NO_GTK2_SUPPORT
49static inline void perf_gtk_setup_browser(int argc __used, const char *argv[] __used, bool fallback_to_pager)
50{
51 if (fallback_to_pager)
52 setup_pager();
53}
54static inline void perf_gtk_exit_browser(bool wait_for_ok __used) {}
55#else
56void perf_gtk_setup_browser(int argc, const char *argv[], bool fallback_to_pager);
57void perf_gtk_exit_browser(bool wait_for_ok);
58#endif
59
48char *alias_lookup(const char *alias); 60char *alias_lookup(const char *alias);
49int split_cmdline(char *cmdline, const char ***argv); 61int split_cmdline(char *cmdline, const char ***argv);
50 62
diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c
index 159263d17c2d..1986d8051bd1 100644
--- a/tools/perf/util/evlist.c
+++ b/tools/perf/util/evlist.c
@@ -51,13 +51,15 @@ struct perf_evlist *perf_evlist__new(struct cpu_map *cpus,
51void perf_evlist__config_attrs(struct perf_evlist *evlist, 51void perf_evlist__config_attrs(struct perf_evlist *evlist,
52 struct perf_record_opts *opts) 52 struct perf_record_opts *opts)
53{ 53{
54 struct perf_evsel *evsel; 54 struct perf_evsel *evsel, *first;
55 55
56 if (evlist->cpus->map[0] < 0) 56 if (evlist->cpus->map[0] < 0)
57 opts->no_inherit = true; 57 opts->no_inherit = true;
58 58
59 first = list_entry(evlist->entries.next, struct perf_evsel, node);
60
59 list_for_each_entry(evsel, &evlist->entries, node) { 61 list_for_each_entry(evsel, &evlist->entries, node) {
60 perf_evsel__config(evsel, opts); 62 perf_evsel__config(evsel, opts, first);
61 63
62 if (evlist->nr_entries > 1) 64 if (evlist->nr_entries > 1)
63 evsel->attr.sample_type |= PERF_SAMPLE_ID; 65 evsel->attr.sample_type |= PERF_SAMPLE_ID;
diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
index f421f7cbc0d3..8c13dbcb84b9 100644
--- a/tools/perf/util/evsel.c
+++ b/tools/perf/util/evsel.c
@@ -34,7 +34,7 @@ int __perf_evsel__sample_size(u64 sample_type)
34 return size; 34 return size;
35} 35}
36 36
37static void hists__init(struct hists *hists) 37void hists__init(struct hists *hists)
38{ 38{
39 memset(hists, 0, sizeof(*hists)); 39 memset(hists, 0, sizeof(*hists));
40 hists->entries_in_array[0] = hists->entries_in_array[1] = RB_ROOT; 40 hists->entries_in_array[0] = hists->entries_in_array[1] = RB_ROOT;
@@ -63,7 +63,8 @@ struct perf_evsel *perf_evsel__new(struct perf_event_attr *attr, int idx)
63 return evsel; 63 return evsel;
64} 64}
65 65
66void perf_evsel__config(struct perf_evsel *evsel, struct perf_record_opts *opts) 66void perf_evsel__config(struct perf_evsel *evsel, struct perf_record_opts *opts,
67 struct perf_evsel *first)
67{ 68{
68 struct perf_event_attr *attr = &evsel->attr; 69 struct perf_event_attr *attr = &evsel->attr;
69 int track = !evsel->idx; /* only the first counter needs these */ 70 int track = !evsel->idx; /* only the first counter needs these */
@@ -134,7 +135,8 @@ void perf_evsel__config(struct perf_evsel *evsel, struct perf_record_opts *opts)
134 attr->mmap = track; 135 attr->mmap = track;
135 attr->comm = track; 136 attr->comm = track;
136 137
137 if (!opts->target_pid && !opts->target_tid && !opts->system_wide) { 138 if (!opts->target_pid && !opts->target_tid && !opts->system_wide &&
139 (!opts->group || evsel == first)) {
138 attr->disabled = 1; 140 attr->disabled = 1;
139 attr->enable_on_exec = 1; 141 attr->enable_on_exec = 1;
140 } 142 }
@@ -578,6 +580,8 @@ int perf_event__parse_sample(const union perf_event *event, u64 type,
578 return -EFAULT; 580 return -EFAULT;
579 581
580 data->raw_data = (void *) pdata; 582 data->raw_data = (void *) pdata;
583
584 array = (void *)array + data->raw_size + sizeof(u32);
581 } 585 }
582 586
583 if (type & PERF_SAMPLE_BRANCH_STACK) { 587 if (type & PERF_SAMPLE_BRANCH_STACK) {
diff --git a/tools/perf/util/evsel.h b/tools/perf/util/evsel.h
index 326b8e4d5035..3d6b3e4cb66b 100644
--- a/tools/perf/util/evsel.h
+++ b/tools/perf/util/evsel.h
@@ -80,7 +80,8 @@ void perf_evsel__exit(struct perf_evsel *evsel);
80void perf_evsel__delete(struct perf_evsel *evsel); 80void perf_evsel__delete(struct perf_evsel *evsel);
81 81
82void perf_evsel__config(struct perf_evsel *evsel, 82void perf_evsel__config(struct perf_evsel *evsel,
83 struct perf_record_opts *opts); 83 struct perf_record_opts *opts,
84 struct perf_evsel *first);
84 85
85int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads); 86int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads);
86int perf_evsel__alloc_id(struct perf_evsel *evsel, int ncpus, int nthreads); 87int perf_evsel__alloc_id(struct perf_evsel *evsel, int ncpus, int nthreads);
@@ -169,4 +170,6 @@ static inline int perf_evsel__sample_size(struct perf_evsel *evsel)
169 return __perf_evsel__sample_size(evsel->attr.sample_type); 170 return __perf_evsel__sample_size(evsel->attr.sample_type);
170} 171}
171 172
173void hists__init(struct hists *hists);
174
172#endif /* __PERF_EVSEL_H */ 175#endif /* __PERF_EVSEL_H */
diff --git a/tools/perf/util/gtk/browser.c b/tools/perf/util/gtk/browser.c
new file mode 100644
index 000000000000..258352a2356c
--- /dev/null
+++ b/tools/perf/util/gtk/browser.c
@@ -0,0 +1,189 @@
1#include "../evlist.h"
2#include "../cache.h"
3#include "../evsel.h"
4#include "../sort.h"
5#include "../hist.h"
6#include "gtk.h"
7
8#include <signal.h>
9
10#define MAX_COLUMNS 32
11
12void perf_gtk_setup_browser(int argc, const char *argv[],
13 bool fallback_to_pager __used)
14{
15 gtk_init(&argc, (char ***)&argv);
16}
17
18void perf_gtk_exit_browser(bool wait_for_ok __used)
19{
20 gtk_main_quit();
21}
22
23static void perf_gtk_signal(int sig)
24{
25 psignal(sig, "perf");
26 gtk_main_quit();
27}
28
29static void perf_gtk_resize_window(GtkWidget *window)
30{
31 GdkRectangle rect;
32 GdkScreen *screen;
33 int monitor;
34 int height;
35 int width;
36
37 screen = gtk_widget_get_screen(window);
38
39 monitor = gdk_screen_get_monitor_at_window(screen, window->window);
40
41 gdk_screen_get_monitor_geometry(screen, monitor, &rect);
42
43 width = rect.width * 3 / 4;
44 height = rect.height * 3 / 4;
45
46 gtk_window_resize(GTK_WINDOW(window), width, height);
47}
48
49static void perf_gtk_show_hists(GtkWidget *window, struct hists *hists)
50{
51 GType col_types[MAX_COLUMNS];
52 GtkCellRenderer *renderer;
53 struct sort_entry *se;
54 GtkListStore *store;
55 struct rb_node *nd;
56 u64 total_period;
57 GtkWidget *view;
58 int col_idx;
59 int nr_cols;
60
61 nr_cols = 0;
62
63 /* The percentage column */
64 col_types[nr_cols++] = G_TYPE_STRING;
65
66 list_for_each_entry(se, &hist_entry__sort_list, list) {
67 if (se->elide)
68 continue;
69
70 col_types[nr_cols++] = G_TYPE_STRING;
71 }
72
73 store = gtk_list_store_newv(nr_cols, col_types);
74
75 view = gtk_tree_view_new();
76
77 renderer = gtk_cell_renderer_text_new();
78
79 col_idx = 0;
80
81 /* The percentage column */
82 gtk_tree_view_insert_column_with_attributes(GTK_TREE_VIEW(view),
83 -1, "Overhead (%)",
84 renderer, "text",
85 col_idx++, NULL);
86
87 list_for_each_entry(se, &hist_entry__sort_list, list) {
88 if (se->elide)
89 continue;
90
91 gtk_tree_view_insert_column_with_attributes(GTK_TREE_VIEW(view),
92 -1, se->se_header,
93 renderer, "text",
94 col_idx++, NULL);
95 }
96
97 gtk_tree_view_set_model(GTK_TREE_VIEW(view), GTK_TREE_MODEL(store));
98
99 g_object_unref(GTK_TREE_MODEL(store));
100
101 total_period = hists->stats.total_period;
102
103 for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
104 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
105 GtkTreeIter iter;
106 double percent;
107 char s[512];
108
109 if (h->filtered)
110 continue;
111
112 gtk_list_store_append(store, &iter);
113
114 col_idx = 0;
115
116 percent = (h->period * 100.0) / total_period;
117
118 snprintf(s, ARRAY_SIZE(s), "%.2f", percent);
119
120 gtk_list_store_set(store, &iter, col_idx++, s, -1);
121
122 list_for_each_entry(se, &hist_entry__sort_list, list) {
123 if (se->elide)
124 continue;
125
126 se->se_snprintf(h, s, ARRAY_SIZE(s),
127 hists__col_len(hists, se->se_width_idx));
128
129 gtk_list_store_set(store, &iter, col_idx++, s, -1);
130 }
131 }
132
133 gtk_container_add(GTK_CONTAINER(window), view);
134}
135
136int perf_evlist__gtk_browse_hists(struct perf_evlist *evlist,
137 const char *help __used,
138 void (*timer) (void *arg)__used,
139 void *arg __used, int delay_secs __used)
140{
141 struct perf_evsel *pos;
142 GtkWidget *notebook;
143 GtkWidget *window;
144
145 signal(SIGSEGV, perf_gtk_signal);
146 signal(SIGFPE, perf_gtk_signal);
147 signal(SIGINT, perf_gtk_signal);
148 signal(SIGQUIT, perf_gtk_signal);
149 signal(SIGTERM, perf_gtk_signal);
150
151 window = gtk_window_new(GTK_WINDOW_TOPLEVEL);
152
153 gtk_window_set_title(GTK_WINDOW(window), "perf report");
154
155 g_signal_connect(window, "delete_event", gtk_main_quit, NULL);
156
157 notebook = gtk_notebook_new();
158
159 list_for_each_entry(pos, &evlist->entries, node) {
160 struct hists *hists = &pos->hists;
161 const char *evname = event_name(pos);
162 GtkWidget *scrolled_window;
163 GtkWidget *tab_label;
164
165 scrolled_window = gtk_scrolled_window_new(NULL, NULL);
166
167 gtk_scrolled_window_set_policy(GTK_SCROLLED_WINDOW(scrolled_window),
168 GTK_POLICY_AUTOMATIC,
169 GTK_POLICY_AUTOMATIC);
170
171 perf_gtk_show_hists(scrolled_window, hists);
172
173 tab_label = gtk_label_new(evname);
174
175 gtk_notebook_append_page(GTK_NOTEBOOK(notebook), scrolled_window, tab_label);
176 }
177
178 gtk_container_add(GTK_CONTAINER(window), notebook);
179
180 gtk_widget_show_all(window);
181
182 perf_gtk_resize_window(window);
183
184 gtk_window_set_position(GTK_WINDOW(window), GTK_WIN_POS_CENTER);
185
186 gtk_main();
187
188 return 0;
189}
diff --git a/tools/perf/util/gtk/gtk.h b/tools/perf/util/gtk/gtk.h
new file mode 100644
index 000000000000..75177ee04032
--- /dev/null
+++ b/tools/perf/util/gtk/gtk.h
@@ -0,0 +1,8 @@
1#ifndef _PERF_GTK_H_
2#define _PERF_GTK_H_ 1
3
4#pragma GCC diagnostic ignored "-Wstrict-prototypes"
5#include <gtk/gtk.h>
6#pragma GCC diagnostic error "-Wstrict-prototypes"
7
8#endif /* _PERF_GTK_H_ */
diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c
index fcd9cf3ea63e..4c7c2d73251f 100644
--- a/tools/perf/util/header.c
+++ b/tools/perf/util/header.c
@@ -1177,7 +1177,7 @@ static void print_event_desc(struct perf_header *ph, int fd, FILE *fp)
1177 goto error; 1177 goto error;
1178 1178
1179 msz = sizeof(attr); 1179 msz = sizeof(attr);
1180 if (sz < (ssize_t)msz) 1180 if (sz < msz)
1181 msz = sz; 1181 msz = sz;
1182 1182
1183 for (i = 0 ; i < nre; i++) { 1183 for (i = 0 ; i < nre; i++) {
diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c
index 3dc99a9b71f5..2ec4b60aff6c 100644
--- a/tools/perf/util/hist.c
+++ b/tools/perf/util/hist.c
@@ -10,11 +10,14 @@ static bool hists__filter_entry_by_dso(struct hists *hists,
10 struct hist_entry *he); 10 struct hist_entry *he);
11static bool hists__filter_entry_by_thread(struct hists *hists, 11static bool hists__filter_entry_by_thread(struct hists *hists,
12 struct hist_entry *he); 12 struct hist_entry *he);
13static bool hists__filter_entry_by_symbol(struct hists *hists,
14 struct hist_entry *he);
13 15
14enum hist_filter { 16enum hist_filter {
15 HIST_FILTER__DSO, 17 HIST_FILTER__DSO,
16 HIST_FILTER__THREAD, 18 HIST_FILTER__THREAD,
17 HIST_FILTER__PARENT, 19 HIST_FILTER__PARENT,
20 HIST_FILTER__SYMBOL,
18}; 21};
19 22
20struct callchain_param callchain_param = { 23struct callchain_param callchain_param = {
@@ -420,6 +423,7 @@ static void hists__apply_filters(struct hists *hists, struct hist_entry *he)
420{ 423{
421 hists__filter_entry_by_dso(hists, he); 424 hists__filter_entry_by_dso(hists, he);
422 hists__filter_entry_by_thread(hists, he); 425 hists__filter_entry_by_thread(hists, he);
426 hists__filter_entry_by_symbol(hists, he);
423} 427}
424 428
425static void __hists__collapse_resort(struct hists *hists, bool threaded) 429static void __hists__collapse_resort(struct hists *hists, bool threaded)
@@ -603,7 +607,7 @@ static void init_rem_hits(void)
603 rem_hits.ms.sym = rem_sq_bracket; 607 rem_hits.ms.sym = rem_sq_bracket;
604} 608}
605 609
606static size_t __callchain__fprintf_graph(FILE *fp, struct callchain_node *self, 610static size_t __callchain__fprintf_graph(FILE *fp, struct rb_root *root,
607 u64 total_samples, int depth, 611 u64 total_samples, int depth,
608 int depth_mask, int left_margin) 612 int depth_mask, int left_margin)
609{ 613{
@@ -611,21 +615,16 @@ static size_t __callchain__fprintf_graph(FILE *fp, struct callchain_node *self,
611 struct callchain_node *child; 615 struct callchain_node *child;
612 struct callchain_list *chain; 616 struct callchain_list *chain;
613 int new_depth_mask = depth_mask; 617 int new_depth_mask = depth_mask;
614 u64 new_total;
615 u64 remaining; 618 u64 remaining;
616 size_t ret = 0; 619 size_t ret = 0;
617 int i; 620 int i;
618 uint entries_printed = 0; 621 uint entries_printed = 0;
619 622
620 if (callchain_param.mode == CHAIN_GRAPH_REL) 623 remaining = total_samples;
621 new_total = self->children_hit;
622 else
623 new_total = total_samples;
624
625 remaining = new_total;
626 624
627 node = rb_first(&self->rb_root); 625 node = rb_first(root);
628 while (node) { 626 while (node) {
627 u64 new_total;
629 u64 cumul; 628 u64 cumul;
630 629
631 child = rb_entry(node, struct callchain_node, rb_node); 630 child = rb_entry(node, struct callchain_node, rb_node);
@@ -653,11 +652,17 @@ static size_t __callchain__fprintf_graph(FILE *fp, struct callchain_node *self,
653 list_for_each_entry(chain, &child->val, list) { 652 list_for_each_entry(chain, &child->val, list) {
654 ret += ipchain__fprintf_graph(fp, chain, depth, 653 ret += ipchain__fprintf_graph(fp, chain, depth,
655 new_depth_mask, i++, 654 new_depth_mask, i++,
656 new_total, 655 total_samples,
657 cumul, 656 cumul,
658 left_margin); 657 left_margin);
659 } 658 }
660 ret += __callchain__fprintf_graph(fp, child, new_total, 659
660 if (callchain_param.mode == CHAIN_GRAPH_REL)
661 new_total = child->children_hit;
662 else
663 new_total = total_samples;
664
665 ret += __callchain__fprintf_graph(fp, &child->rb_root, new_total,
661 depth + 1, 666 depth + 1,
662 new_depth_mask | (1 << depth), 667 new_depth_mask | (1 << depth),
663 left_margin); 668 left_margin);
@@ -667,61 +672,75 @@ static size_t __callchain__fprintf_graph(FILE *fp, struct callchain_node *self,
667 } 672 }
668 673
669 if (callchain_param.mode == CHAIN_GRAPH_REL && 674 if (callchain_param.mode == CHAIN_GRAPH_REL &&
670 remaining && remaining != new_total) { 675 remaining && remaining != total_samples) {
671 676
672 if (!rem_sq_bracket) 677 if (!rem_sq_bracket)
673 return ret; 678 return ret;
674 679
675 new_depth_mask &= ~(1 << (depth - 1)); 680 new_depth_mask &= ~(1 << (depth - 1));
676
677 ret += ipchain__fprintf_graph(fp, &rem_hits, depth, 681 ret += ipchain__fprintf_graph(fp, &rem_hits, depth,
678 new_depth_mask, 0, new_total, 682 new_depth_mask, 0, total_samples,
679 remaining, left_margin); 683 remaining, left_margin);
680 } 684 }
681 685
682 return ret; 686 return ret;
683} 687}
684 688
685static size_t callchain__fprintf_graph(FILE *fp, struct callchain_node *self, 689static size_t callchain__fprintf_graph(FILE *fp, struct rb_root *root,
686 u64 total_samples, int left_margin) 690 u64 total_samples, int left_margin)
687{ 691{
692 struct callchain_node *cnode;
688 struct callchain_list *chain; 693 struct callchain_list *chain;
694 u32 entries_printed = 0;
689 bool printed = false; 695 bool printed = false;
696 struct rb_node *node;
690 int i = 0; 697 int i = 0;
691 int ret = 0; 698 int ret;
692 u32 entries_printed = 0;
693
694 list_for_each_entry(chain, &self->val, list) {
695 if (!i++ && sort__first_dimension == SORT_SYM)
696 continue;
697
698 if (!printed) {
699 ret += callchain__fprintf_left_margin(fp, left_margin);
700 ret += fprintf(fp, "|\n");
701 ret += callchain__fprintf_left_margin(fp, left_margin);
702 ret += fprintf(fp, "---");
703
704 left_margin += 3;
705 printed = true;
706 } else
707 ret += callchain__fprintf_left_margin(fp, left_margin);
708 699
709 if (chain->ms.sym) 700 /*
710 ret += fprintf(fp, " %s\n", chain->ms.sym->name); 701 * If have one single callchain root, don't bother printing
711 else 702 * its percentage (100 % in fractal mode and the same percentage
712 ret += fprintf(fp, " %p\n", (void *)(long)chain->ip); 703 * than the hist in graph mode). This also avoid one level of column.
704 */
705 node = rb_first(root);
706 if (node && !rb_next(node)) {
707 cnode = rb_entry(node, struct callchain_node, rb_node);
708 list_for_each_entry(chain, &cnode->val, list) {
709 /*
710 * If we sort by symbol, the first entry is the same than
711 * the symbol. No need to print it otherwise it appears as
712 * displayed twice.
713 */
714 if (!i++ && sort__first_dimension == SORT_SYM)
715 continue;
716 if (!printed) {
717 ret += callchain__fprintf_left_margin(fp, left_margin);
718 ret += fprintf(fp, "|\n");
719 ret += callchain__fprintf_left_margin(fp, left_margin);
720 ret += fprintf(fp, "---");
721 left_margin += 3;
722 printed = true;
723 } else
724 ret += callchain__fprintf_left_margin(fp, left_margin);
725
726 if (chain->ms.sym)
727 ret += fprintf(fp, " %s\n", chain->ms.sym->name);
728 else
729 ret += fprintf(fp, " %p\n", (void *)(long)chain->ip);
713 730
714 if (++entries_printed == callchain_param.print_limit) 731 if (++entries_printed == callchain_param.print_limit)
715 break; 732 break;
733 }
734 root = &cnode->rb_root;
716 } 735 }
717 736
718 ret += __callchain__fprintf_graph(fp, self, total_samples, 1, 1, left_margin); 737 return __callchain__fprintf_graph(fp, root, total_samples,
719 738 1, 1, left_margin);
720 return ret;
721} 739}
722 740
723static size_t callchain__fprintf_flat(FILE *fp, struct callchain_node *self, 741static size_t __callchain__fprintf_flat(FILE *fp,
724 u64 total_samples) 742 struct callchain_node *self,
743 u64 total_samples)
725{ 744{
726 struct callchain_list *chain; 745 struct callchain_list *chain;
727 size_t ret = 0; 746 size_t ret = 0;
@@ -729,7 +748,7 @@ static size_t callchain__fprintf_flat(FILE *fp, struct callchain_node *self,
729 if (!self) 748 if (!self)
730 return 0; 749 return 0;
731 750
732 ret += callchain__fprintf_flat(fp, self->parent, total_samples); 751 ret += __callchain__fprintf_flat(fp, self->parent, total_samples);
733 752
734 753
735 list_for_each_entry(chain, &self->val, list) { 754 list_for_each_entry(chain, &self->val, list) {
@@ -745,44 +764,58 @@ static size_t callchain__fprintf_flat(FILE *fp, struct callchain_node *self,
745 return ret; 764 return ret;
746} 765}
747 766
748static size_t hist_entry_callchain__fprintf(struct hist_entry *he, 767static size_t callchain__fprintf_flat(FILE *fp, struct rb_root *self,
749 u64 total_samples, int left_margin, 768 u64 total_samples)
750 FILE *fp)
751{ 769{
752 struct rb_node *rb_node;
753 struct callchain_node *chain;
754 size_t ret = 0; 770 size_t ret = 0;
755 u32 entries_printed = 0; 771 u32 entries_printed = 0;
772 struct rb_node *rb_node;
773 struct callchain_node *chain;
756 774
757 rb_node = rb_first(&he->sorted_chain); 775 rb_node = rb_first(self);
758 while (rb_node) { 776 while (rb_node) {
759 double percent; 777 double percent;
760 778
761 chain = rb_entry(rb_node, struct callchain_node, rb_node); 779 chain = rb_entry(rb_node, struct callchain_node, rb_node);
762 percent = chain->hit * 100.0 / total_samples; 780 percent = chain->hit * 100.0 / total_samples;
763 switch (callchain_param.mode) { 781
764 case CHAIN_FLAT: 782 ret = percent_color_fprintf(fp, " %6.2f%%\n", percent);
765 ret += percent_color_fprintf(fp, " %6.2f%%\n", 783 ret += __callchain__fprintf_flat(fp, chain, total_samples);
766 percent);
767 ret += callchain__fprintf_flat(fp, chain, total_samples);
768 break;
769 case CHAIN_GRAPH_ABS: /* Falldown */
770 case CHAIN_GRAPH_REL:
771 ret += callchain__fprintf_graph(fp, chain, total_samples,
772 left_margin);
773 case CHAIN_NONE:
774 default:
775 break;
776 }
777 ret += fprintf(fp, "\n"); 784 ret += fprintf(fp, "\n");
778 if (++entries_printed == callchain_param.print_limit) 785 if (++entries_printed == callchain_param.print_limit)
779 break; 786 break;
787
780 rb_node = rb_next(rb_node); 788 rb_node = rb_next(rb_node);
781 } 789 }
782 790
783 return ret; 791 return ret;
784} 792}
785 793
794static size_t hist_entry_callchain__fprintf(struct hist_entry *he,
795 u64 total_samples, int left_margin,
796 FILE *fp)
797{
798 switch (callchain_param.mode) {
799 case CHAIN_GRAPH_REL:
800 return callchain__fprintf_graph(fp, &he->sorted_chain, he->period,
801 left_margin);
802 break;
803 case CHAIN_GRAPH_ABS:
804 return callchain__fprintf_graph(fp, &he->sorted_chain, total_samples,
805 left_margin);
806 break;
807 case CHAIN_FLAT:
808 return callchain__fprintf_flat(fp, &he->sorted_chain, total_samples);
809 break;
810 case CHAIN_NONE:
811 break;
812 default:
813 pr_err("Bad callchain mode\n");
814 }
815
816 return 0;
817}
818
786void hists__output_recalc_col_len(struct hists *hists, int max_rows) 819void hists__output_recalc_col_len(struct hists *hists, int max_rows)
787{ 820{
788 struct rb_node *next = rb_first(&hists->entries); 821 struct rb_node *next = rb_first(&hists->entries);
@@ -887,9 +920,9 @@ static int hist_entry__pcnt_snprintf(struct hist_entry *he, char *s,
887 diff = new_percent - old_percent; 920 diff = new_percent - old_percent;
888 921
889 if (fabs(diff) >= 0.01) 922 if (fabs(diff) >= 0.01)
890 ret += scnprintf(bf, sizeof(bf), "%+4.2F%%", diff); 923 scnprintf(bf, sizeof(bf), "%+4.2F%%", diff);
891 else 924 else
892 ret += scnprintf(bf, sizeof(bf), " "); 925 scnprintf(bf, sizeof(bf), " ");
893 926
894 if (sep) 927 if (sep)
895 ret += scnprintf(s + ret, size - ret, "%c%s", *sep, bf); 928 ret += scnprintf(s + ret, size - ret, "%c%s", *sep, bf);
@@ -898,9 +931,9 @@ static int hist_entry__pcnt_snprintf(struct hist_entry *he, char *s,
898 931
899 if (show_displacement) { 932 if (show_displacement) {
900 if (displacement) 933 if (displacement)
901 ret += scnprintf(bf, sizeof(bf), "%+4ld", displacement); 934 scnprintf(bf, sizeof(bf), "%+4ld", displacement);
902 else 935 else
903 ret += scnprintf(bf, sizeof(bf), " "); 936 scnprintf(bf, sizeof(bf), " ");
904 937
905 if (sep) 938 if (sep)
906 ret += scnprintf(s + ret, size - ret, "%c%s", *sep, bf); 939 ret += scnprintf(s + ret, size - ret, "%c%s", *sep, bf);
@@ -1247,6 +1280,37 @@ void hists__filter_by_thread(struct hists *hists)
1247 } 1280 }
1248} 1281}
1249 1282
1283static bool hists__filter_entry_by_symbol(struct hists *hists,
1284 struct hist_entry *he)
1285{
1286 if (hists->symbol_filter_str != NULL &&
1287 (!he->ms.sym || strstr(he->ms.sym->name,
1288 hists->symbol_filter_str) == NULL)) {
1289 he->filtered |= (1 << HIST_FILTER__SYMBOL);
1290 return true;
1291 }
1292
1293 return false;
1294}
1295
1296void hists__filter_by_symbol(struct hists *hists)
1297{
1298 struct rb_node *nd;
1299
1300 hists->nr_entries = hists->stats.total_period = 0;
1301 hists->stats.nr_events[PERF_RECORD_SAMPLE] = 0;
1302 hists__reset_col_len(hists);
1303
1304 for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
1305 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
1306
1307 if (hists__filter_entry_by_symbol(hists, h))
1308 continue;
1309
1310 hists__remove_entry_filter(hists, h, HIST_FILTER__SYMBOL);
1311 }
1312}
1313
1250int hist_entry__inc_addr_samples(struct hist_entry *he, int evidx, u64 ip) 1314int hist_entry__inc_addr_samples(struct hist_entry *he, int evidx, u64 ip)
1251{ 1315{
1252 return symbol__inc_addr_samples(he->ms.sym, he->ms.map, evidx, ip); 1316 return symbol__inc_addr_samples(he->ms.sym, he->ms.map, evidx, ip);
diff --git a/tools/perf/util/hist.h b/tools/perf/util/hist.h
index 9413f3e31fea..2cae9df40e04 100644
--- a/tools/perf/util/hist.h
+++ b/tools/perf/util/hist.h
@@ -62,6 +62,7 @@ struct hists {
62 const struct thread *thread_filter; 62 const struct thread *thread_filter;
63 const struct dso *dso_filter; 63 const struct dso *dso_filter;
64 const char *uid_filter_str; 64 const char *uid_filter_str;
65 const char *symbol_filter_str;
65 pthread_mutex_t lock; 66 pthread_mutex_t lock;
66 struct events_stats stats; 67 struct events_stats stats;
67 u64 event_stream; 68 u64 event_stream;
@@ -107,6 +108,7 @@ int hist_entry__annotate(struct hist_entry *self, size_t privsize);
107 108
108void hists__filter_by_dso(struct hists *hists); 109void hists__filter_by_dso(struct hists *hists);
109void hists__filter_by_thread(struct hists *hists); 110void hists__filter_by_thread(struct hists *hists);
111void hists__filter_by_symbol(struct hists *hists);
110 112
111u16 hists__col_len(struct hists *self, enum hist_column col); 113u16 hists__col_len(struct hists *self, enum hist_column col);
112void hists__set_col_len(struct hists *self, enum hist_column col, u16 len); 114void hists__set_col_len(struct hists *self, enum hist_column col, u16 len);
@@ -145,6 +147,23 @@ int perf_evlist__tui_browse_hists(struct perf_evlist *evlist, const char *help,
145 int refresh); 147 int refresh);
146#endif 148#endif
147 149
150#ifdef NO_GTK2_SUPPORT
151static inline
152int perf_evlist__gtk_browse_hists(struct perf_evlist *evlist __used,
153 const char *help __used,
154 void(*timer)(void *arg) __used,
155 void *arg __used,
156 int refresh __used)
157{
158 return 0;
159}
160
161#else
162int perf_evlist__gtk_browse_hists(struct perf_evlist *evlist, const char *help,
163 void(*timer)(void *arg), void *arg,
164 int refresh);
165#endif
166
148unsigned int hists__sort_list_width(struct hists *self); 167unsigned int hists__sort_list_width(struct hists *self);
149 168
150#endif /* __PERF_HIST_H */ 169#endif /* __PERF_HIST_H */
diff --git a/tools/perf/util/include/linux/module.h b/tools/perf/util/include/linux/export.h
index b43e2dc21e04..b43e2dc21e04 100644
--- a/tools/perf/util/include/linux/module.h
+++ b/tools/perf/util/include/linux/export.h
diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c
index c7a6f6faf91e..5b3a0ef4e232 100644
--- a/tools/perf/util/parse-events.c
+++ b/tools/perf/util/parse-events.c
@@ -11,6 +11,10 @@
11#include "cache.h" 11#include "cache.h"
12#include "header.h" 12#include "header.h"
13#include "debugfs.h" 13#include "debugfs.h"
14#include "parse-events-flex.h"
15#include "pmu.h"
16
17#define MAX_NAME_LEN 100
14 18
15struct event_symbol { 19struct event_symbol {
16 u8 type; 20 u8 type;
@@ -19,11 +23,8 @@ struct event_symbol {
19 const char *alias; 23 const char *alias;
20}; 24};
21 25
22enum event_result { 26int parse_events_parse(struct list_head *list, struct list_head *list_tmp,
23 EVT_FAILED, 27 int *idx);
24 EVT_HANDLED,
25 EVT_HANDLED_ALL
26};
27 28
28#define CHW(x) .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_##x 29#define CHW(x) .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_##x
29#define CSW(x) .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_##x 30#define CSW(x) .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_##x
@@ -354,7 +355,24 @@ const char *__event_name(int type, u64 config)
354 return "unknown"; 355 return "unknown";
355} 356}
356 357
357static int parse_aliases(const char **str, const char *names[][MAX_ALIASES], int size) 358static int add_event(struct list_head *list, int *idx,
359 struct perf_event_attr *attr, char *name)
360{
361 struct perf_evsel *evsel;
362
363 event_attr_init(attr);
364
365 evsel = perf_evsel__new(attr, (*idx)++);
366 if (!evsel)
367 return -ENOMEM;
368
369 list_add_tail(&evsel->node, list);
370
371 evsel->name = strdup(name);
372 return 0;
373}
374
375static int parse_aliases(char *str, const char *names[][MAX_ALIASES], int size)
358{ 376{
359 int i, j; 377 int i, j;
360 int n, longest = -1; 378 int n, longest = -1;
@@ -362,58 +380,57 @@ static int parse_aliases(const char **str, const char *names[][MAX_ALIASES], int
362 for (i = 0; i < size; i++) { 380 for (i = 0; i < size; i++) {
363 for (j = 0; j < MAX_ALIASES && names[i][j]; j++) { 381 for (j = 0; j < MAX_ALIASES && names[i][j]; j++) {
364 n = strlen(names[i][j]); 382 n = strlen(names[i][j]);
365 if (n > longest && !strncasecmp(*str, names[i][j], n)) 383 if (n > longest && !strncasecmp(str, names[i][j], n))
366 longest = n; 384 longest = n;
367 } 385 }
368 if (longest > 0) { 386 if (longest > 0)
369 *str += longest;
370 return i; 387 return i;
371 }
372 } 388 }
373 389
374 return -1; 390 return -1;
375} 391}
376 392
377static enum event_result 393int parse_events_add_cache(struct list_head *list, int *idx,
378parse_generic_hw_event(const char **str, struct perf_event_attr *attr) 394 char *type, char *op_result1, char *op_result2)
379{ 395{
380 const char *s = *str; 396 struct perf_event_attr attr;
397 char name[MAX_NAME_LEN];
381 int cache_type = -1, cache_op = -1, cache_result = -1; 398 int cache_type = -1, cache_op = -1, cache_result = -1;
399 char *op_result[2] = { op_result1, op_result2 };
400 int i, n;
382 401
383 cache_type = parse_aliases(&s, hw_cache, PERF_COUNT_HW_CACHE_MAX);
384 /* 402 /*
385 * No fallback - if we cannot get a clear cache type 403 * No fallback - if we cannot get a clear cache type
386 * then bail out: 404 * then bail out:
387 */ 405 */
406 cache_type = parse_aliases(type, hw_cache,
407 PERF_COUNT_HW_CACHE_MAX);
388 if (cache_type == -1) 408 if (cache_type == -1)
389 return EVT_FAILED; 409 return -EINVAL;
390 410
391 while ((cache_op == -1 || cache_result == -1) && *s == '-') { 411 n = snprintf(name, MAX_NAME_LEN, "%s", type);
392 ++s; 412
413 for (i = 0; (i < 2) && (op_result[i]); i++) {
414 char *str = op_result[i];
415
416 snprintf(name + n, MAX_NAME_LEN - n, "-%s\n", str);
393 417
394 if (cache_op == -1) { 418 if (cache_op == -1) {
395 cache_op = parse_aliases(&s, hw_cache_op, 419 cache_op = parse_aliases(str, hw_cache_op,
396 PERF_COUNT_HW_CACHE_OP_MAX); 420 PERF_COUNT_HW_CACHE_OP_MAX);
397 if (cache_op >= 0) { 421 if (cache_op >= 0) {
398 if (!is_cache_op_valid(cache_type, cache_op)) 422 if (!is_cache_op_valid(cache_type, cache_op))
399 return EVT_FAILED; 423 return -EINVAL;
400 continue; 424 continue;
401 } 425 }
402 } 426 }
403 427
404 if (cache_result == -1) { 428 if (cache_result == -1) {
405 cache_result = parse_aliases(&s, hw_cache_result, 429 cache_result = parse_aliases(str, hw_cache_result,
406 PERF_COUNT_HW_CACHE_RESULT_MAX); 430 PERF_COUNT_HW_CACHE_RESULT_MAX);
407 if (cache_result >= 0) 431 if (cache_result >= 0)
408 continue; 432 continue;
409 } 433 }
410
411 /*
412 * Can't parse this as a cache op or result, so back up
413 * to the '-'.
414 */
415 --s;
416 break;
417 } 434 }
418 435
419 /* 436 /*
@@ -428,20 +445,17 @@ parse_generic_hw_event(const char **str, struct perf_event_attr *attr)
428 if (cache_result == -1) 445 if (cache_result == -1)
429 cache_result = PERF_COUNT_HW_CACHE_RESULT_ACCESS; 446 cache_result = PERF_COUNT_HW_CACHE_RESULT_ACCESS;
430 447
431 attr->config = cache_type | (cache_op << 8) | (cache_result << 16); 448 memset(&attr, 0, sizeof(attr));
432 attr->type = PERF_TYPE_HW_CACHE; 449 attr.config = cache_type | (cache_op << 8) | (cache_result << 16);
433 450 attr.type = PERF_TYPE_HW_CACHE;
434 *str = s; 451 return add_event(list, idx, &attr, name);
435 return EVT_HANDLED;
436} 452}
437 453
438static enum event_result 454static int add_tracepoint(struct list_head *list, int *idx,
439parse_single_tracepoint_event(char *sys_name, 455 char *sys_name, char *evt_name)
440 const char *evt_name,
441 unsigned int evt_length,
442 struct perf_event_attr *attr,
443 const char **strp)
444{ 456{
457 struct perf_event_attr attr;
458 char name[MAX_NAME_LEN];
445 char evt_path[MAXPATHLEN]; 459 char evt_path[MAXPATHLEN];
446 char id_buf[4]; 460 char id_buf[4];
447 u64 id; 461 u64 id;
@@ -452,130 +466,80 @@ parse_single_tracepoint_event(char *sys_name,
452 466
453 fd = open(evt_path, O_RDONLY); 467 fd = open(evt_path, O_RDONLY);
454 if (fd < 0) 468 if (fd < 0)
455 return EVT_FAILED; 469 return -1;
456 470
457 if (read(fd, id_buf, sizeof(id_buf)) < 0) { 471 if (read(fd, id_buf, sizeof(id_buf)) < 0) {
458 close(fd); 472 close(fd);
459 return EVT_FAILED; 473 return -1;
460 } 474 }
461 475
462 close(fd); 476 close(fd);
463 id = atoll(id_buf); 477 id = atoll(id_buf);
464 attr->config = id;
465 attr->type = PERF_TYPE_TRACEPOINT;
466 *strp += strlen(sys_name) + evt_length + 1; /* + 1 for the ':' */
467
468 attr->sample_type |= PERF_SAMPLE_RAW;
469 attr->sample_type |= PERF_SAMPLE_TIME;
470 attr->sample_type |= PERF_SAMPLE_CPU;
471
472 attr->sample_period = 1;
473 478
479 memset(&attr, 0, sizeof(attr));
480 attr.config = id;
481 attr.type = PERF_TYPE_TRACEPOINT;
482 attr.sample_type |= PERF_SAMPLE_RAW;
483 attr.sample_type |= PERF_SAMPLE_TIME;
484 attr.sample_type |= PERF_SAMPLE_CPU;
485 attr.sample_period = 1;
474 486
475 return EVT_HANDLED; 487 snprintf(name, MAX_NAME_LEN, "%s:%s", sys_name, evt_name);
488 return add_event(list, idx, &attr, name);
476} 489}
477 490
478/* sys + ':' + event + ':' + flags*/ 491static int add_tracepoint_multi(struct list_head *list, int *idx,
479#define MAX_EVOPT_LEN (MAX_EVENT_LENGTH * 2 + 2 + 128) 492 char *sys_name, char *evt_name)
480static enum event_result
481parse_multiple_tracepoint_event(struct perf_evlist *evlist, char *sys_name,
482 const char *evt_exp, char *flags)
483{ 493{
484 char evt_path[MAXPATHLEN]; 494 char evt_path[MAXPATHLEN];
485 struct dirent *evt_ent; 495 struct dirent *evt_ent;
486 DIR *evt_dir; 496 DIR *evt_dir;
497 int ret = 0;
487 498
488 snprintf(evt_path, MAXPATHLEN, "%s/%s", tracing_events_path, sys_name); 499 snprintf(evt_path, MAXPATHLEN, "%s/%s", tracing_events_path, sys_name);
489 evt_dir = opendir(evt_path); 500 evt_dir = opendir(evt_path);
490
491 if (!evt_dir) { 501 if (!evt_dir) {
492 perror("Can't open event dir"); 502 perror("Can't open event dir");
493 return EVT_FAILED; 503 return -1;
494 } 504 }
495 505
496 while ((evt_ent = readdir(evt_dir))) { 506 while (!ret && (evt_ent = readdir(evt_dir))) {
497 char event_opt[MAX_EVOPT_LEN + 1];
498 int len;
499
500 if (!strcmp(evt_ent->d_name, ".") 507 if (!strcmp(evt_ent->d_name, ".")
501 || !strcmp(evt_ent->d_name, "..") 508 || !strcmp(evt_ent->d_name, "..")
502 || !strcmp(evt_ent->d_name, "enable") 509 || !strcmp(evt_ent->d_name, "enable")
503 || !strcmp(evt_ent->d_name, "filter")) 510 || !strcmp(evt_ent->d_name, "filter"))
504 continue; 511 continue;
505 512
506 if (!strglobmatch(evt_ent->d_name, evt_exp)) 513 if (!strglobmatch(evt_ent->d_name, evt_name))
507 continue; 514 continue;
508 515
509 len = snprintf(event_opt, MAX_EVOPT_LEN, "%s:%s%s%s", sys_name, 516 ret = add_tracepoint(list, idx, sys_name, evt_ent->d_name);
510 evt_ent->d_name, flags ? ":" : "",
511 flags ?: "");
512 if (len < 0)
513 return EVT_FAILED;
514
515 if (parse_events(evlist, event_opt, 0))
516 return EVT_FAILED;
517 } 517 }
518 518
519 return EVT_HANDLED_ALL; 519 return ret;
520} 520}
521 521
522static enum event_result 522int parse_events_add_tracepoint(struct list_head *list, int *idx,
523parse_tracepoint_event(struct perf_evlist *evlist, const char **strp, 523 char *sys, char *event)
524 struct perf_event_attr *attr)
525{ 524{
526 const char *evt_name; 525 int ret;
527 char *flags = NULL, *comma_loc;
528 char sys_name[MAX_EVENT_LENGTH];
529 unsigned int sys_length, evt_length;
530
531 if (debugfs_valid_mountpoint(tracing_events_path))
532 return 0;
533
534 evt_name = strchr(*strp, ':');
535 if (!evt_name)
536 return EVT_FAILED;
537
538 sys_length = evt_name - *strp;
539 if (sys_length >= MAX_EVENT_LENGTH)
540 return 0;
541 526
542 strncpy(sys_name, *strp, sys_length); 527 ret = debugfs_valid_mountpoint(tracing_events_path);
543 sys_name[sys_length] = '\0'; 528 if (ret)
544 evt_name = evt_name + 1; 529 return ret;
545 530
546 comma_loc = strchr(evt_name, ','); 531 return strpbrk(event, "*?") ?
547 if (comma_loc) { 532 add_tracepoint_multi(list, idx, sys, event) :
548 /* take the event name up to the comma */ 533 add_tracepoint(list, idx, sys, event);
549 evt_name = strndup(evt_name, comma_loc - evt_name);
550 }
551 flags = strchr(evt_name, ':');
552 if (flags) {
553 /* split it out: */
554 evt_name = strndup(evt_name, flags - evt_name);
555 flags++;
556 }
557
558 evt_length = strlen(evt_name);
559 if (evt_length >= MAX_EVENT_LENGTH)
560 return EVT_FAILED;
561 if (strpbrk(evt_name, "*?")) {
562 *strp += strlen(sys_name) + evt_length + 1; /* 1 == the ':' */
563 return parse_multiple_tracepoint_event(evlist, sys_name,
564 evt_name, flags);
565 } else {
566 return parse_single_tracepoint_event(sys_name, evt_name,
567 evt_length, attr, strp);
568 }
569} 534}
570 535
571static enum event_result 536static int
572parse_breakpoint_type(const char *type, const char **strp, 537parse_breakpoint_type(const char *type, struct perf_event_attr *attr)
573 struct perf_event_attr *attr)
574{ 538{
575 int i; 539 int i;
576 540
577 for (i = 0; i < 3; i++) { 541 for (i = 0; i < 3; i++) {
578 if (!type[i]) 542 if (!type || !type[i])
579 break; 543 break;
580 544
581 switch (type[i]) { 545 switch (type[i]) {
@@ -589,164 +553,146 @@ parse_breakpoint_type(const char *type, const char **strp,
589 attr->bp_type |= HW_BREAKPOINT_X; 553 attr->bp_type |= HW_BREAKPOINT_X;
590 break; 554 break;
591 default: 555 default:
592 return EVT_FAILED; 556 return -EINVAL;
593 } 557 }
594 } 558 }
559
595 if (!attr->bp_type) /* Default */ 560 if (!attr->bp_type) /* Default */
596 attr->bp_type = HW_BREAKPOINT_R | HW_BREAKPOINT_W; 561 attr->bp_type = HW_BREAKPOINT_R | HW_BREAKPOINT_W;
597 562
598 *strp = type + i; 563 return 0;
599
600 return EVT_HANDLED;
601} 564}
602 565
603static enum event_result 566int parse_events_add_breakpoint(struct list_head *list, int *idx,
604parse_breakpoint_event(const char **strp, struct perf_event_attr *attr) 567 void *ptr, char *type)
605{ 568{
606 const char *target; 569 struct perf_event_attr attr;
607 const char *type; 570 char name[MAX_NAME_LEN];
608 char *endaddr;
609 u64 addr;
610 enum event_result err;
611
612 target = strchr(*strp, ':');
613 if (!target)
614 return EVT_FAILED;
615
616 if (strncmp(*strp, "mem", target - *strp) != 0)
617 return EVT_FAILED;
618
619 target++;
620
621 addr = strtoull(target, &endaddr, 0);
622 if (target == endaddr)
623 return EVT_FAILED;
624
625 attr->bp_addr = addr;
626 *strp = endaddr;
627 571
628 type = strchr(target, ':'); 572 memset(&attr, 0, sizeof(attr));
573 attr.bp_addr = (unsigned long) ptr;
629 574
630 /* If no type is defined, just rw as default */ 575 if (parse_breakpoint_type(type, &attr))
631 if (!type) { 576 return -EINVAL;
632 attr->bp_type = HW_BREAKPOINT_R | HW_BREAKPOINT_W;
633 } else {
634 err = parse_breakpoint_type(++type, strp, attr);
635 if (err == EVT_FAILED)
636 return EVT_FAILED;
637 }
638 577
639 /* 578 /*
640 * We should find a nice way to override the access length 579 * We should find a nice way to override the access length
641 * Provide some defaults for now 580 * Provide some defaults for now
642 */ 581 */
643 if (attr->bp_type == HW_BREAKPOINT_X) 582 if (attr.bp_type == HW_BREAKPOINT_X)
644 attr->bp_len = sizeof(long); 583 attr.bp_len = sizeof(long);
645 else 584 else
646 attr->bp_len = HW_BREAKPOINT_LEN_4; 585 attr.bp_len = HW_BREAKPOINT_LEN_4;
647 586
648 attr->type = PERF_TYPE_BREAKPOINT; 587 attr.type = PERF_TYPE_BREAKPOINT;
649 588
650 return EVT_HANDLED; 589 snprintf(name, MAX_NAME_LEN, "mem:%p:%s", ptr, type ? type : "rw");
590 return add_event(list, idx, &attr, name);
651} 591}
652 592
653static int check_events(const char *str, unsigned int i) 593static int config_term(struct perf_event_attr *attr,
594 struct parse_events__term *term)
654{ 595{
655 int n; 596 switch (term->type) {
597 case PARSE_EVENTS__TERM_TYPE_CONFIG:
598 attr->config = term->val.num;
599 break;
600 case PARSE_EVENTS__TERM_TYPE_CONFIG1:
601 attr->config1 = term->val.num;
602 break;
603 case PARSE_EVENTS__TERM_TYPE_CONFIG2:
604 attr->config2 = term->val.num;
605 break;
606 case PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD:
607 attr->sample_period = term->val.num;
608 break;
609 case PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE:
610 /*
611 * TODO uncomment when the field is available
612 * attr->branch_sample_type = term->val.num;
613 */
614 break;
615 default:
616 return -EINVAL;
617 }
618 return 0;
619}
656 620
657 n = strlen(event_symbols[i].symbol); 621static int config_attr(struct perf_event_attr *attr,
658 if (!strncasecmp(str, event_symbols[i].symbol, n)) 622 struct list_head *head, int fail)
659 return n; 623{
624 struct parse_events__term *term;
660 625
661 n = strlen(event_symbols[i].alias); 626 list_for_each_entry(term, head, list)
662 if (n) { 627 if (config_term(attr, term) && fail)
663 if (!strncasecmp(str, event_symbols[i].alias, n)) 628 return -EINVAL;
664 return n;
665 }
666 629
667 return 0; 630 return 0;
668} 631}
669 632
670static enum event_result 633int parse_events_add_numeric(struct list_head *list, int *idx,
671parse_symbolic_event(const char **strp, struct perf_event_attr *attr) 634 unsigned long type, unsigned long config,
635 struct list_head *head_config)
672{ 636{
673 const char *str = *strp; 637 struct perf_event_attr attr;
674 unsigned int i; 638
675 int n; 639 memset(&attr, 0, sizeof(attr));
676 640 attr.type = type;
677 for (i = 0; i < ARRAY_SIZE(event_symbols); i++) { 641 attr.config = config;
678 n = check_events(str, i); 642
679 if (n > 0) { 643 if (head_config &&
680 attr->type = event_symbols[i].type; 644 config_attr(&attr, head_config, 1))
681 attr->config = event_symbols[i].config; 645 return -EINVAL;
682 *strp = str + n; 646
683 return EVT_HANDLED; 647 return add_event(list, idx, &attr,
684 } 648 (char *) __event_name(type, config));
685 }
686 return EVT_FAILED;
687} 649}
688 650
689static enum event_result 651int parse_events_add_pmu(struct list_head *list, int *idx,
690parse_raw_event(const char **strp, struct perf_event_attr *attr) 652 char *name, struct list_head *head_config)
691{ 653{
692 const char *str = *strp; 654 struct perf_event_attr attr;
693 u64 config; 655 struct perf_pmu *pmu;
694 int n; 656
695 657 pmu = perf_pmu__find(name);
696 if (*str != 'r') 658 if (!pmu)
697 return EVT_FAILED; 659 return -EINVAL;
698 n = hex2u64(str + 1, &config); 660
699 if (n > 0) { 661 memset(&attr, 0, sizeof(attr));
700 const char *end = str + n + 1; 662
701 if (*end != '\0' && *end != ',' && *end != ':') 663 /*
702 return EVT_FAILED; 664 * Configure hardcoded terms first, no need to check
703 665 * return value when called with fail == 0 ;)
704 *strp = end; 666 */
705 attr->type = PERF_TYPE_RAW; 667 config_attr(&attr, head_config, 0);
706 attr->config = config; 668
707 return EVT_HANDLED; 669 if (perf_pmu__config(pmu, &attr, head_config))
708 } 670 return -EINVAL;
709 return EVT_FAILED; 671
672 return add_event(list, idx, &attr, (char *) "pmu");
710} 673}
711 674
712static enum event_result 675void parse_events_update_lists(struct list_head *list_event,
713parse_numeric_event(const char **strp, struct perf_event_attr *attr) 676 struct list_head *list_all)
714{ 677{
715 const char *str = *strp; 678 /*
716 char *endp; 679 * Called for single event definition. Update the
717 unsigned long type; 680 * 'all event' list, and reinit the 'signle event'
718 u64 config; 681 * list, for next event definition.
719 682 */
720 type = strtoul(str, &endp, 0); 683 list_splice_tail(list_event, list_all);
721 if (endp > str && type < PERF_TYPE_MAX && *endp == ':') { 684 INIT_LIST_HEAD(list_event);
722 str = endp + 1;
723 config = strtoul(str, &endp, 0);
724 if (endp > str) {
725 attr->type = type;
726 attr->config = config;
727 *strp = endp;
728 return EVT_HANDLED;
729 }
730 }
731 return EVT_FAILED;
732} 685}
733 686
734static int 687int parse_events_modifier(struct list_head *list, char *str)
735parse_event_modifier(const char **strp, struct perf_event_attr *attr)
736{ 688{
737 const char *str = *strp; 689 struct perf_evsel *evsel;
738 int exclude = 0, exclude_GH = 0; 690 int exclude = 0, exclude_GH = 0;
739 int eu = 0, ek = 0, eh = 0, eH = 0, eG = 0, precise = 0; 691 int eu = 0, ek = 0, eh = 0, eH = 0, eG = 0, precise = 0;
740 692
741 if (!*str) 693 if (str == NULL)
742 return 0; 694 return 0;
743 695
744 if (*str == ',')
745 return 0;
746
747 if (*str++ != ':')
748 return -1;
749
750 while (*str) { 696 while (*str) {
751 if (*str == 'u') { 697 if (*str == 'u') {
752 if (!exclude) 698 if (!exclude)
@@ -775,111 +721,62 @@ parse_event_modifier(const char **strp, struct perf_event_attr *attr)
775 721
776 ++str; 722 ++str;
777 } 723 }
778 if (str < *strp + 2)
779 return -1;
780 724
781 *strp = str; 725 /*
726 * precise ip:
727 *
728 * 0 - SAMPLE_IP can have arbitrary skid
729 * 1 - SAMPLE_IP must have constant skid
730 * 2 - SAMPLE_IP requested to have 0 skid
731 * 3 - SAMPLE_IP must have 0 skid
732 *
733 * See also PERF_RECORD_MISC_EXACT_IP
734 */
735 if (precise > 3)
736 return -EINVAL;
782 737
783 attr->exclude_user = eu; 738 list_for_each_entry(evsel, list, node) {
784 attr->exclude_kernel = ek; 739 evsel->attr.exclude_user = eu;
785 attr->exclude_hv = eh; 740 evsel->attr.exclude_kernel = ek;
786 attr->precise_ip = precise; 741 evsel->attr.exclude_hv = eh;
787 attr->exclude_host = eH; 742 evsel->attr.precise_ip = precise;
788 attr->exclude_guest = eG; 743 evsel->attr.exclude_host = eH;
744 evsel->attr.exclude_guest = eG;
745 }
789 746
790 return 0; 747 return 0;
791} 748}
792 749
793/* 750int parse_events(struct perf_evlist *evlist, const char *str, int unset __used)
794 * Each event can have multiple symbolic names.
795 * Symbolic names are (almost) exactly matched.
796 */
797static enum event_result
798parse_event_symbols(struct perf_evlist *evlist, const char **str,
799 struct perf_event_attr *attr)
800{ 751{
801 enum event_result ret; 752 LIST_HEAD(list);
802 753 LIST_HEAD(list_tmp);
803 ret = parse_tracepoint_event(evlist, str, attr); 754 YY_BUFFER_STATE buffer;
804 if (ret != EVT_FAILED) 755 int ret, idx = evlist->nr_entries;
805 goto modifier;
806
807 ret = parse_raw_event(str, attr);
808 if (ret != EVT_FAILED)
809 goto modifier;
810 756
811 ret = parse_numeric_event(str, attr); 757 buffer = parse_events__scan_string(str);
812 if (ret != EVT_FAILED)
813 goto modifier;
814 758
815 ret = parse_symbolic_event(str, attr); 759 ret = parse_events_parse(&list, &list_tmp, &idx);
816 if (ret != EVT_FAILED)
817 goto modifier;
818 760
819 ret = parse_generic_hw_event(str, attr); 761 parse_events__flush_buffer(buffer);
820 if (ret != EVT_FAILED) 762 parse_events__delete_buffer(buffer);
821 goto modifier;
822 763
823 ret = parse_breakpoint_event(str, attr); 764 if (!ret) {
824 if (ret != EVT_FAILED) 765 int entries = idx - evlist->nr_entries;
825 goto modifier; 766 perf_evlist__splice_list_tail(evlist, &list, entries);
826 767 return 0;
827 fprintf(stderr, "invalid or unsupported event: '%s'\n", *str);
828 fprintf(stderr, "Run 'perf list' for a list of valid events\n");
829 return EVT_FAILED;
830
831modifier:
832 if (parse_event_modifier(str, attr) < 0) {
833 fprintf(stderr, "invalid event modifier: '%s'\n", *str);
834 fprintf(stderr, "Run 'perf list' for a list of valid events and modifiers\n");
835
836 return EVT_FAILED;
837 } 768 }
838 769
770 /*
771 * There are 2 users - builtin-record and builtin-test objects.
772 * Both call perf_evlist__delete in case of error, so we dont
773 * need to bother.
774 */
775 fprintf(stderr, "invalid or unsupported event: '%s'\n", str);
776 fprintf(stderr, "Run 'perf list' for a list of valid events\n");
839 return ret; 777 return ret;
840} 778}
841 779
842int parse_events(struct perf_evlist *evlist , const char *str, int unset __used)
843{
844 struct perf_event_attr attr;
845 enum event_result ret;
846 const char *ostr;
847
848 for (;;) {
849 ostr = str;
850 memset(&attr, 0, sizeof(attr));
851 event_attr_init(&attr);
852 ret = parse_event_symbols(evlist, &str, &attr);
853 if (ret == EVT_FAILED)
854 return -1;
855
856 if (!(*str == 0 || *str == ',' || isspace(*str)))
857 return -1;
858
859 if (ret != EVT_HANDLED_ALL) {
860 struct perf_evsel *evsel;
861 evsel = perf_evsel__new(&attr, evlist->nr_entries);
862 if (evsel == NULL)
863 return -1;
864 perf_evlist__add(evlist, evsel);
865
866 evsel->name = calloc(str - ostr + 1, 1);
867 if (!evsel->name)
868 return -1;
869 strncpy(evsel->name, ostr, str - ostr);
870 }
871
872 if (*str == 0)
873 break;
874 if (*str == ',')
875 ++str;
876 while (isspace(*str))
877 ++str;
878 }
879
880 return 0;
881}
882
883int parse_events_option(const struct option *opt, const char *str, 780int parse_events_option(const struct option *opt, const char *str,
884 int unset __used) 781 int unset __used)
885{ 782{
@@ -1052,8 +949,6 @@ int print_hwcache_events(const char *event_glob)
1052 return printed; 949 return printed;
1053} 950}
1054 951
1055#define MAX_NAME_LEN 100
1056
1057/* 952/*
1058 * Print the help text for the event symbols: 953 * Print the help text for the event symbols:
1059 */ 954 */
@@ -1102,8 +997,12 @@ void print_events(const char *event_glob)
1102 997
1103 printf("\n"); 998 printf("\n");
1104 printf(" %-50s [%s]\n", 999 printf(" %-50s [%s]\n",
1105 "rNNN (see 'perf list --help' on how to encode it)", 1000 "rNNN",
1106 event_type_descriptors[PERF_TYPE_RAW]); 1001 event_type_descriptors[PERF_TYPE_RAW]);
1002 printf(" %-50s [%s]\n",
1003 "cpu/t1=v1[,t2=v2,t3 ...]/modifier",
1004 event_type_descriptors[PERF_TYPE_RAW]);
1005 printf(" (see 'perf list --help' on how to encode it)\n");
1107 printf("\n"); 1006 printf("\n");
1108 1007
1109 printf(" %-50s [%s]\n", 1008 printf(" %-50s [%s]\n",
@@ -1113,3 +1012,51 @@ void print_events(const char *event_glob)
1113 1012
1114 print_tracepoint_events(NULL, NULL); 1013 print_tracepoint_events(NULL, NULL);
1115} 1014}
1015
1016int parse_events__is_hardcoded_term(struct parse_events__term *term)
1017{
1018 return term->type <= PARSE_EVENTS__TERM_TYPE_HARDCODED_MAX;
1019}
1020
1021int parse_events__new_term(struct parse_events__term **_term, int type,
1022 char *config, char *str, long num)
1023{
1024 struct parse_events__term *term;
1025
1026 term = zalloc(sizeof(*term));
1027 if (!term)
1028 return -ENOMEM;
1029
1030 INIT_LIST_HEAD(&term->list);
1031 term->type = type;
1032 term->config = config;
1033
1034 switch (type) {
1035 case PARSE_EVENTS__TERM_TYPE_CONFIG:
1036 case PARSE_EVENTS__TERM_TYPE_CONFIG1:
1037 case PARSE_EVENTS__TERM_TYPE_CONFIG2:
1038 case PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD:
1039 case PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE:
1040 case PARSE_EVENTS__TERM_TYPE_NUM:
1041 term->val.num = num;
1042 break;
1043 case PARSE_EVENTS__TERM_TYPE_STR:
1044 term->val.str = str;
1045 break;
1046 default:
1047 return -EINVAL;
1048 }
1049
1050 *_term = term;
1051 return 0;
1052}
1053
1054void parse_events__free_terms(struct list_head *terms)
1055{
1056 struct parse_events__term *term, *h;
1057
1058 list_for_each_entry_safe(term, h, terms, list)
1059 free(term);
1060
1061 free(terms);
1062}
diff --git a/tools/perf/util/parse-events.h b/tools/perf/util/parse-events.h
index 7e0cbe75d5f1..ca069f893381 100644
--- a/tools/perf/util/parse-events.h
+++ b/tools/perf/util/parse-events.h
@@ -33,6 +33,55 @@ extern int parse_filter(const struct option *opt, const char *str, int unset);
33 33
34#define EVENTS_HELP_MAX (128*1024) 34#define EVENTS_HELP_MAX (128*1024)
35 35
36enum {
37 PARSE_EVENTS__TERM_TYPE_CONFIG,
38 PARSE_EVENTS__TERM_TYPE_CONFIG1,
39 PARSE_EVENTS__TERM_TYPE_CONFIG2,
40 PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD,
41 PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE,
42 PARSE_EVENTS__TERM_TYPE_NUM,
43 PARSE_EVENTS__TERM_TYPE_STR,
44
45 PARSE_EVENTS__TERM_TYPE_HARDCODED_MAX =
46 PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE,
47};
48
49struct parse_events__term {
50 char *config;
51 union {
52 char *str;
53 long num;
54 } val;
55 int type;
56
57 struct list_head list;
58};
59
60int parse_events__is_hardcoded_term(struct parse_events__term *term);
61int parse_events__new_term(struct parse_events__term **term, int type,
62 char *config, char *str, long num);
63void parse_events__free_terms(struct list_head *terms);
64int parse_events_modifier(struct list_head *list __used, char *str __used);
65int parse_events_add_tracepoint(struct list_head *list, int *idx,
66 char *sys, char *event);
67int parse_events_add_raw(struct perf_evlist *evlist, unsigned long config,
68 unsigned long config1, unsigned long config2,
69 char *mod);
70int parse_events_add_numeric(struct list_head *list, int *idx,
71 unsigned long type, unsigned long config,
72 struct list_head *head_config);
73int parse_events_add_cache(struct list_head *list, int *idx,
74 char *type, char *op_result1, char *op_result2);
75int parse_events_add_breakpoint(struct list_head *list, int *idx,
76 void *ptr, char *type);
77int parse_events_add_pmu(struct list_head *list, int *idx,
78 char *pmu , struct list_head *head_config);
79void parse_events_update_lists(struct list_head *list_event,
80 struct list_head *list_all);
81void parse_events_error(struct list_head *list_all,
82 struct list_head *list_event,
83 int *idx, char const *msg);
84
36void print_events(const char *event_glob); 85void print_events(const char *event_glob);
37void print_events_type(u8 type); 86void print_events_type(u8 type);
38void print_tracepoint_events(const char *subsys_glob, const char *event_glob); 87void print_tracepoint_events(const char *subsys_glob, const char *event_glob);
diff --git a/tools/perf/util/parse-events.l b/tools/perf/util/parse-events.l
new file mode 100644
index 000000000000..05d766e3ecb5
--- /dev/null
+++ b/tools/perf/util/parse-events.l
@@ -0,0 +1,127 @@
1
2%option prefix="parse_events_"
3
4%{
5#include <errno.h>
6#include "../perf.h"
7#include "parse-events-bison.h"
8#include "parse-events.h"
9
10static int __value(char *str, int base, int token)
11{
12 long num;
13
14 errno = 0;
15 num = strtoul(str, NULL, base);
16 if (errno)
17 return PE_ERROR;
18
19 parse_events_lval.num = num;
20 return token;
21}
22
23static int value(int base)
24{
25 return __value(parse_events_text, base, PE_VALUE);
26}
27
28static int raw(void)
29{
30 return __value(parse_events_text + 1, 16, PE_RAW);
31}
32
33static int str(int token)
34{
35 parse_events_lval.str = strdup(parse_events_text);
36 return token;
37}
38
39static int sym(int type, int config)
40{
41 parse_events_lval.num = (type << 16) + config;
42 return PE_VALUE_SYM;
43}
44
45static int term(int type)
46{
47 parse_events_lval.num = type;
48 return PE_TERM;
49}
50
51%}
52
53num_dec [0-9]+
54num_hex 0x[a-fA-F0-9]+
55num_raw_hex [a-fA-F0-9]+
56name [a-zA-Z_*?][a-zA-Z0-9_*?]*
57modifier_event [ukhp]{1,5}
58modifier_bp [rwx]
59
60%%
61cpu-cycles|cycles { return sym(PERF_TYPE_HARDWARE, PERF_COUNT_HW_CPU_CYCLES); }
62stalled-cycles-frontend|idle-cycles-frontend { return sym(PERF_TYPE_HARDWARE, PERF_COUNT_HW_STALLED_CYCLES_FRONTEND); }
63stalled-cycles-backend|idle-cycles-backend { return sym(PERF_TYPE_HARDWARE, PERF_COUNT_HW_STALLED_CYCLES_BACKEND); }
64instructions { return sym(PERF_TYPE_HARDWARE, PERF_COUNT_HW_INSTRUCTIONS); }
65cache-references { return sym(PERF_TYPE_HARDWARE, PERF_COUNT_HW_CACHE_REFERENCES); }
66cache-misses { return sym(PERF_TYPE_HARDWARE, PERF_COUNT_HW_CACHE_MISSES); }
67branch-instructions|branches { return sym(PERF_TYPE_HARDWARE, PERF_COUNT_HW_BRANCH_INSTRUCTIONS); }
68branch-misses { return sym(PERF_TYPE_HARDWARE, PERF_COUNT_HW_BRANCH_MISSES); }
69bus-cycles { return sym(PERF_TYPE_HARDWARE, PERF_COUNT_HW_BUS_CYCLES); }
70ref-cycles { return sym(PERF_TYPE_HARDWARE, PERF_COUNT_HW_REF_CPU_CYCLES); }
71cpu-clock { return sym(PERF_TYPE_SOFTWARE, PERF_COUNT_SW_CPU_CLOCK); }
72task-clock { return sym(PERF_TYPE_SOFTWARE, PERF_COUNT_SW_TASK_CLOCK); }
73page-faults|faults { return sym(PERF_TYPE_SOFTWARE, PERF_COUNT_SW_PAGE_FAULTS); }
74minor-faults { return sym(PERF_TYPE_SOFTWARE, PERF_COUNT_SW_PAGE_FAULTS_MIN); }
75major-faults { return sym(PERF_TYPE_SOFTWARE, PERF_COUNT_SW_PAGE_FAULTS_MAJ); }
76context-switches|cs { return sym(PERF_TYPE_SOFTWARE, PERF_COUNT_SW_CONTEXT_SWITCHES); }
77cpu-migrations|migrations { return sym(PERF_TYPE_SOFTWARE, PERF_COUNT_SW_CPU_MIGRATIONS); }
78alignment-faults { return sym(PERF_TYPE_SOFTWARE, PERF_COUNT_SW_ALIGNMENT_FAULTS); }
79emulation-faults { return sym(PERF_TYPE_SOFTWARE, PERF_COUNT_SW_EMULATION_FAULTS); }
80
81L1-dcache|l1-d|l1d|L1-data |
82L1-icache|l1-i|l1i|L1-instruction |
83LLC|L2 |
84dTLB|d-tlb|Data-TLB |
85iTLB|i-tlb|Instruction-TLB |
86branch|branches|bpu|btb|bpc |
87node { return str(PE_NAME_CACHE_TYPE); }
88
89load|loads|read |
90store|stores|write |
91prefetch|prefetches |
92speculative-read|speculative-load |
93refs|Reference|ops|access |
94misses|miss { return str(PE_NAME_CACHE_OP_RESULT); }
95
96 /*
97 * These are event config hardcoded term names to be specified
98 * within xxx/.../ syntax. So far we dont clash with other names,
99 * so we can put them here directly. In case the we have a conflict
100 * in future, this needs to go into '//' condition block.
101 */
102config { return term(PARSE_EVENTS__TERM_TYPE_CONFIG); }
103config1 { return term(PARSE_EVENTS__TERM_TYPE_CONFIG1); }
104config2 { return term(PARSE_EVENTS__TERM_TYPE_CONFIG2); }
105period { return term(PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD); }
106branch_type { return term(PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE); }
107
108mem: { return PE_PREFIX_MEM; }
109r{num_raw_hex} { return raw(); }
110{num_dec} { return value(10); }
111{num_hex} { return value(16); }
112
113{modifier_event} { return str(PE_MODIFIER_EVENT); }
114{modifier_bp} { return str(PE_MODIFIER_BP); }
115{name} { return str(PE_NAME); }
116"/" { return '/'; }
117- { return '-'; }
118, { return ','; }
119: { return ':'; }
120= { return '='; }
121
122%%
123
124int parse_events_wrap(void)
125{
126 return 1;
127}
diff --git a/tools/perf/util/parse-events.y b/tools/perf/util/parse-events.y
new file mode 100644
index 000000000000..d9637da7333c
--- /dev/null
+++ b/tools/perf/util/parse-events.y
@@ -0,0 +1,229 @@
1
2%name-prefix "parse_events_"
3%parse-param {struct list_head *list_all}
4%parse-param {struct list_head *list_event}
5%parse-param {int *idx}
6
7%{
8
9#define YYDEBUG 1
10
11#include <linux/compiler.h>
12#include <linux/list.h>
13#include "types.h"
14#include "util.h"
15#include "parse-events.h"
16
17extern int parse_events_lex (void);
18
19#define ABORT_ON(val) \
20do { \
21 if (val) \
22 YYABORT; \
23} while (0)
24
25%}
26
27%token PE_VALUE PE_VALUE_SYM PE_RAW PE_TERM
28%token PE_NAME
29%token PE_MODIFIER_EVENT PE_MODIFIER_BP
30%token PE_NAME_CACHE_TYPE PE_NAME_CACHE_OP_RESULT
31%token PE_PREFIX_MEM PE_PREFIX_RAW
32%token PE_ERROR
33%type <num> PE_VALUE
34%type <num> PE_VALUE_SYM
35%type <num> PE_RAW
36%type <num> PE_TERM
37%type <str> PE_NAME
38%type <str> PE_NAME_CACHE_TYPE
39%type <str> PE_NAME_CACHE_OP_RESULT
40%type <str> PE_MODIFIER_EVENT
41%type <str> PE_MODIFIER_BP
42%type <head> event_config
43%type <term> event_term
44
45%union
46{
47 char *str;
48 unsigned long num;
49 struct list_head *head;
50 struct parse_events__term *term;
51}
52%%
53
54events:
55events ',' event | event
56
57event:
58event_def PE_MODIFIER_EVENT
59{
60 /*
61 * Apply modifier on all events added by single event definition
62 * (there could be more events added for multiple tracepoint
63 * definitions via '*?'.
64 */
65 ABORT_ON(parse_events_modifier(list_event, $2));
66 parse_events_update_lists(list_event, list_all);
67}
68|
69event_def
70{
71 parse_events_update_lists(list_event, list_all);
72}
73
74event_def: event_pmu |
75 event_legacy_symbol |
76 event_legacy_cache sep_dc |
77 event_legacy_mem |
78 event_legacy_tracepoint sep_dc |
79 event_legacy_numeric sep_dc |
80 event_legacy_raw sep_dc
81
82event_pmu:
83PE_NAME '/' event_config '/'
84{
85 ABORT_ON(parse_events_add_pmu(list_event, idx, $1, $3));
86 parse_events__free_terms($3);
87}
88
89event_legacy_symbol:
90PE_VALUE_SYM '/' event_config '/'
91{
92 int type = $1 >> 16;
93 int config = $1 & 255;
94
95 ABORT_ON(parse_events_add_numeric(list_event, idx, type, config, $3));
96 parse_events__free_terms($3);
97}
98|
99PE_VALUE_SYM sep_slash_dc
100{
101 int type = $1 >> 16;
102 int config = $1 & 255;
103
104 ABORT_ON(parse_events_add_numeric(list_event, idx, type, config, NULL));
105}
106
107event_legacy_cache:
108PE_NAME_CACHE_TYPE '-' PE_NAME_CACHE_OP_RESULT '-' PE_NAME_CACHE_OP_RESULT
109{
110 ABORT_ON(parse_events_add_cache(list_event, idx, $1, $3, $5));
111}
112|
113PE_NAME_CACHE_TYPE '-' PE_NAME_CACHE_OP_RESULT
114{
115 ABORT_ON(parse_events_add_cache(list_event, idx, $1, $3, NULL));
116}
117|
118PE_NAME_CACHE_TYPE
119{
120 ABORT_ON(parse_events_add_cache(list_event, idx, $1, NULL, NULL));
121}
122
123event_legacy_mem:
124PE_PREFIX_MEM PE_VALUE ':' PE_MODIFIER_BP sep_dc
125{
126 ABORT_ON(parse_events_add_breakpoint(list_event, idx, (void *) $2, $4));
127}
128|
129PE_PREFIX_MEM PE_VALUE sep_dc
130{
131 ABORT_ON(parse_events_add_breakpoint(list_event, idx, (void *) $2, NULL));
132}
133
134event_legacy_tracepoint:
135PE_NAME ':' PE_NAME
136{
137 ABORT_ON(parse_events_add_tracepoint(list_event, idx, $1, $3));
138}
139
140event_legacy_numeric:
141PE_VALUE ':' PE_VALUE
142{
143 ABORT_ON(parse_events_add_numeric(list_event, idx, $1, $3, NULL));
144}
145
146event_legacy_raw:
147PE_RAW
148{
149 ABORT_ON(parse_events_add_numeric(list_event, idx, PERF_TYPE_RAW, $1, NULL));
150}
151
152event_config:
153event_config ',' event_term
154{
155 struct list_head *head = $1;
156 struct parse_events__term *term = $3;
157
158 ABORT_ON(!head);
159 list_add_tail(&term->list, head);
160 $$ = $1;
161}
162|
163event_term
164{
165 struct list_head *head = malloc(sizeof(*head));
166 struct parse_events__term *term = $1;
167
168 ABORT_ON(!head);
169 INIT_LIST_HEAD(head);
170 list_add_tail(&term->list, head);
171 $$ = head;
172}
173
174event_term:
175PE_NAME '=' PE_NAME
176{
177 struct parse_events__term *term;
178
179 ABORT_ON(parse_events__new_term(&term, PARSE_EVENTS__TERM_TYPE_STR,
180 $1, $3, 0));
181 $$ = term;
182}
183|
184PE_NAME '=' PE_VALUE
185{
186 struct parse_events__term *term;
187
188 ABORT_ON(parse_events__new_term(&term, PARSE_EVENTS__TERM_TYPE_NUM,
189 $1, NULL, $3));
190 $$ = term;
191}
192|
193PE_NAME
194{
195 struct parse_events__term *term;
196
197 ABORT_ON(parse_events__new_term(&term, PARSE_EVENTS__TERM_TYPE_NUM,
198 $1, NULL, 1));
199 $$ = term;
200}
201|
202PE_TERM '=' PE_VALUE
203{
204 struct parse_events__term *term;
205
206 ABORT_ON(parse_events__new_term(&term, $1, NULL, NULL, $3));
207 $$ = term;
208}
209|
210PE_TERM
211{
212 struct parse_events__term *term;
213
214 ABORT_ON(parse_events__new_term(&term, $1, NULL, NULL, 1));
215 $$ = term;
216}
217
218sep_dc: ':' |
219
220sep_slash_dc: '/' | ':' |
221
222%%
223
224void parse_events_error(struct list_head *list_all __used,
225 struct list_head *list_event __used,
226 int *idx __used,
227 char const *msg __used)
228{
229}
diff --git a/tools/perf/util/pmu.c b/tools/perf/util/pmu.c
new file mode 100644
index 000000000000..cb08a118e811
--- /dev/null
+++ b/tools/perf/util/pmu.c
@@ -0,0 +1,469 @@
1
2#include <linux/list.h>
3#include <sys/types.h>
4#include <sys/stat.h>
5#include <unistd.h>
6#include <stdio.h>
7#include <dirent.h>
8#include "sysfs.h"
9#include "util.h"
10#include "pmu.h"
11#include "parse-events.h"
12
13int perf_pmu_parse(struct list_head *list, char *name);
14extern FILE *perf_pmu_in;
15
16static LIST_HEAD(pmus);
17
18/*
19 * Parse & process all the sysfs attributes located under
20 * the directory specified in 'dir' parameter.
21 */
22static int pmu_format_parse(char *dir, struct list_head *head)
23{
24 struct dirent *evt_ent;
25 DIR *format_dir;
26 int ret = 0;
27
28 format_dir = opendir(dir);
29 if (!format_dir)
30 return -EINVAL;
31
32 while (!ret && (evt_ent = readdir(format_dir))) {
33 char path[PATH_MAX];
34 char *name = evt_ent->d_name;
35 FILE *file;
36
37 if (!strcmp(name, ".") || !strcmp(name, ".."))
38 continue;
39
40 snprintf(path, PATH_MAX, "%s/%s", dir, name);
41
42 ret = -EINVAL;
43 file = fopen(path, "r");
44 if (!file)
45 break;
46
47 perf_pmu_in = file;
48 ret = perf_pmu_parse(head, name);
49 fclose(file);
50 }
51
52 closedir(format_dir);
53 return ret;
54}
55
56/*
57 * Reading/parsing the default pmu format definition, which should be
58 * located at:
59 * /sys/bus/event_source/devices/<dev>/format as sysfs group attributes.
60 */
61static int pmu_format(char *name, struct list_head *format)
62{
63 struct stat st;
64 char path[PATH_MAX];
65 const char *sysfs;
66
67 sysfs = sysfs_find_mountpoint();
68 if (!sysfs)
69 return -1;
70
71 snprintf(path, PATH_MAX,
72 "%s/bus/event_source/devices/%s/format", sysfs, name);
73
74 if (stat(path, &st) < 0)
75 return -1;
76
77 if (pmu_format_parse(path, format))
78 return -1;
79
80 return 0;
81}
82
83/*
84 * Reading/parsing the default pmu type value, which should be
85 * located at:
86 * /sys/bus/event_source/devices/<dev>/type as sysfs attribute.
87 */
88static int pmu_type(char *name, __u32 *type)
89{
90 struct stat st;
91 char path[PATH_MAX];
92 const char *sysfs;
93 FILE *file;
94 int ret = 0;
95
96 sysfs = sysfs_find_mountpoint();
97 if (!sysfs)
98 return -1;
99
100 snprintf(path, PATH_MAX,
101 "%s/bus/event_source/devices/%s/type", sysfs, name);
102
103 if (stat(path, &st) < 0)
104 return -1;
105
106 file = fopen(path, "r");
107 if (!file)
108 return -EINVAL;
109
110 if (1 != fscanf(file, "%u", type))
111 ret = -1;
112
113 fclose(file);
114 return ret;
115}
116
117static struct perf_pmu *pmu_lookup(char *name)
118{
119 struct perf_pmu *pmu;
120 LIST_HEAD(format);
121 __u32 type;
122
123 /*
124 * The pmu data we store & need consists of the pmu
125 * type value and format definitions. Load both right
126 * now.
127 */
128 if (pmu_format(name, &format))
129 return NULL;
130
131 if (pmu_type(name, &type))
132 return NULL;
133
134 pmu = zalloc(sizeof(*pmu));
135 if (!pmu)
136 return NULL;
137
138 INIT_LIST_HEAD(&pmu->format);
139 list_splice(&format, &pmu->format);
140 pmu->name = strdup(name);
141 pmu->type = type;
142 return pmu;
143}
144
145static struct perf_pmu *pmu_find(char *name)
146{
147 struct perf_pmu *pmu;
148
149 list_for_each_entry(pmu, &pmus, list)
150 if (!strcmp(pmu->name, name))
151 return pmu;
152
153 return NULL;
154}
155
156struct perf_pmu *perf_pmu__find(char *name)
157{
158 struct perf_pmu *pmu;
159
160 /*
161 * Once PMU is loaded it stays in the list,
162 * so we keep us from multiple reading/parsing
163 * the pmu format definitions.
164 */
165 pmu = pmu_find(name);
166 if (pmu)
167 return pmu;
168
169 return pmu_lookup(name);
170}
171
172static struct perf_pmu__format*
173pmu_find_format(struct list_head *formats, char *name)
174{
175 struct perf_pmu__format *format;
176
177 list_for_each_entry(format, formats, list)
178 if (!strcmp(format->name, name))
179 return format;
180
181 return NULL;
182}
183
184/*
185 * Returns value based on the format definition (format parameter)
186 * and unformated value (value parameter).
187 *
188 * TODO maybe optimize a little ;)
189 */
190static __u64 pmu_format_value(unsigned long *format, __u64 value)
191{
192 unsigned long fbit, vbit;
193 __u64 v = 0;
194
195 for (fbit = 0, vbit = 0; fbit < PERF_PMU_FORMAT_BITS; fbit++) {
196
197 if (!test_bit(fbit, format))
198 continue;
199
200 if (!(value & (1llu << vbit++)))
201 continue;
202
203 v |= (1llu << fbit);
204 }
205
206 return v;
207}
208
209/*
210 * Setup one of config[12] attr members based on the
211 * user input data - temr parameter.
212 */
213static int pmu_config_term(struct list_head *formats,
214 struct perf_event_attr *attr,
215 struct parse_events__term *term)
216{
217 struct perf_pmu__format *format;
218 __u64 *vp;
219
220 /*
221 * Support only for hardcoded and numnerial terms.
222 * Hardcoded terms should be already in, so nothing
223 * to be done for them.
224 */
225 if (parse_events__is_hardcoded_term(term))
226 return 0;
227
228 if (term->type != PARSE_EVENTS__TERM_TYPE_NUM)
229 return -EINVAL;
230
231 format = pmu_find_format(formats, term->config);
232 if (!format)
233 return -EINVAL;
234
235 switch (format->value) {
236 case PERF_PMU_FORMAT_VALUE_CONFIG:
237 vp = &attr->config;
238 break;
239 case PERF_PMU_FORMAT_VALUE_CONFIG1:
240 vp = &attr->config1;
241 break;
242 case PERF_PMU_FORMAT_VALUE_CONFIG2:
243 vp = &attr->config2;
244 break;
245 default:
246 return -EINVAL;
247 }
248
249 *vp |= pmu_format_value(format->bits, term->val.num);
250 return 0;
251}
252
253static int pmu_config(struct list_head *formats, struct perf_event_attr *attr,
254 struct list_head *head_terms)
255{
256 struct parse_events__term *term, *h;
257
258 list_for_each_entry_safe(term, h, head_terms, list)
259 if (pmu_config_term(formats, attr, term))
260 return -EINVAL;
261
262 return 0;
263}
264
265/*
266 * Configures event's 'attr' parameter based on the:
267 * 1) users input - specified in terms parameter
268 * 2) pmu format definitions - specified by pmu parameter
269 */
270int perf_pmu__config(struct perf_pmu *pmu, struct perf_event_attr *attr,
271 struct list_head *head_terms)
272{
273 attr->type = pmu->type;
274 return pmu_config(&pmu->format, attr, head_terms);
275}
276
277int perf_pmu__new_format(struct list_head *list, char *name,
278 int config, unsigned long *bits)
279{
280 struct perf_pmu__format *format;
281
282 format = zalloc(sizeof(*format));
283 if (!format)
284 return -ENOMEM;
285
286 format->name = strdup(name);
287 format->value = config;
288 memcpy(format->bits, bits, sizeof(format->bits));
289
290 list_add_tail(&format->list, list);
291 return 0;
292}
293
294void perf_pmu__set_format(unsigned long *bits, long from, long to)
295{
296 long b;
297
298 if (!to)
299 to = from;
300
301 memset(bits, 0, BITS_TO_LONGS(PERF_PMU_FORMAT_BITS));
302 for (b = from; b <= to; b++)
303 set_bit(b, bits);
304}
305
306/* Simulated format definitions. */
307static struct test_format {
308 const char *name;
309 const char *value;
310} test_formats[] = {
311 { "krava01", "config:0-1,62-63\n", },
312 { "krava02", "config:10-17\n", },
313 { "krava03", "config:5\n", },
314 { "krava11", "config1:0,2,4,6,8,20-28\n", },
315 { "krava12", "config1:63\n", },
316 { "krava13", "config1:45-47\n", },
317 { "krava21", "config2:0-3,10-13,20-23,30-33,40-43,50-53,60-63\n", },
318 { "krava22", "config2:8,18,48,58\n", },
319 { "krava23", "config2:28-29,38\n", },
320};
321
322#define TEST_FORMATS_CNT (sizeof(test_formats) / sizeof(struct test_format))
323
324/* Simulated users input. */
325static struct parse_events__term test_terms[] = {
326 {
327 .config = (char *) "krava01",
328 .val.num = 15,
329 .type = PARSE_EVENTS__TERM_TYPE_NUM,
330 },
331 {
332 .config = (char *) "krava02",
333 .val.num = 170,
334 .type = PARSE_EVENTS__TERM_TYPE_NUM,
335 },
336 {
337 .config = (char *) "krava03",
338 .val.num = 1,
339 .type = PARSE_EVENTS__TERM_TYPE_NUM,
340 },
341 {
342 .config = (char *) "krava11",
343 .val.num = 27,
344 .type = PARSE_EVENTS__TERM_TYPE_NUM,
345 },
346 {
347 .config = (char *) "krava12",
348 .val.num = 1,
349 .type = PARSE_EVENTS__TERM_TYPE_NUM,
350 },
351 {
352 .config = (char *) "krava13",
353 .val.num = 2,
354 .type = PARSE_EVENTS__TERM_TYPE_NUM,
355 },
356 {
357 .config = (char *) "krava21",
358 .val.num = 119,
359 .type = PARSE_EVENTS__TERM_TYPE_NUM,
360 },
361 {
362 .config = (char *) "krava22",
363 .val.num = 11,
364 .type = PARSE_EVENTS__TERM_TYPE_NUM,
365 },
366 {
367 .config = (char *) "krava23",
368 .val.num = 2,
369 .type = PARSE_EVENTS__TERM_TYPE_NUM,
370 },
371};
372#define TERMS_CNT (sizeof(test_terms) / sizeof(struct parse_events__term))
373
374/*
375 * Prepare format directory data, exported by kernel
376 * at /sys/bus/event_source/devices/<dev>/format.
377 */
378static char *test_format_dir_get(void)
379{
380 static char dir[PATH_MAX];
381 unsigned int i;
382
383 snprintf(dir, PATH_MAX, "/tmp/perf-pmu-test-format-XXXXXX");
384 if (!mkdtemp(dir))
385 return NULL;
386
387 for (i = 0; i < TEST_FORMATS_CNT; i++) {
388 static char name[PATH_MAX];
389 struct test_format *format = &test_formats[i];
390 FILE *file;
391
392 snprintf(name, PATH_MAX, "%s/%s", dir, format->name);
393
394 file = fopen(name, "w");
395 if (!file)
396 return NULL;
397
398 if (1 != fwrite(format->value, strlen(format->value), 1, file))
399 break;
400
401 fclose(file);
402 }
403
404 return dir;
405}
406
407/* Cleanup format directory. */
408static int test_format_dir_put(char *dir)
409{
410 char buf[PATH_MAX];
411 snprintf(buf, PATH_MAX, "rm -f %s/*\n", dir);
412 if (system(buf))
413 return -1;
414
415 snprintf(buf, PATH_MAX, "rmdir %s\n", dir);
416 return system(buf);
417}
418
419static struct list_head *test_terms_list(void)
420{
421 static LIST_HEAD(terms);
422 unsigned int i;
423
424 for (i = 0; i < TERMS_CNT; i++)
425 list_add_tail(&test_terms[i].list, &terms);
426
427 return &terms;
428}
429
430#undef TERMS_CNT
431
432int perf_pmu__test(void)
433{
434 char *format = test_format_dir_get();
435 LIST_HEAD(formats);
436 struct list_head *terms = test_terms_list();
437 int ret;
438
439 if (!format)
440 return -EINVAL;
441
442 do {
443 struct perf_event_attr attr;
444
445 memset(&attr, 0, sizeof(attr));
446
447 ret = pmu_format_parse(format, &formats);
448 if (ret)
449 break;
450
451 ret = pmu_config(&formats, &attr, terms);
452 if (ret)
453 break;
454
455 ret = -EINVAL;
456
457 if (attr.config != 0xc00000000002a823)
458 break;
459 if (attr.config1 != 0x8000400000000145)
460 break;
461 if (attr.config2 != 0x0400000020041d07)
462 break;
463
464 ret = 0;
465 } while (0);
466
467 test_format_dir_put(format);
468 return ret;
469}
diff --git a/tools/perf/util/pmu.h b/tools/perf/util/pmu.h
new file mode 100644
index 000000000000..68c0db965e1f
--- /dev/null
+++ b/tools/perf/util/pmu.h
@@ -0,0 +1,41 @@
1#ifndef __PMU_H
2#define __PMU_H
3
4#include <linux/bitops.h>
5#include "../../../include/linux/perf_event.h"
6
7enum {
8 PERF_PMU_FORMAT_VALUE_CONFIG,
9 PERF_PMU_FORMAT_VALUE_CONFIG1,
10 PERF_PMU_FORMAT_VALUE_CONFIG2,
11};
12
13#define PERF_PMU_FORMAT_BITS 64
14
15struct perf_pmu__format {
16 char *name;
17 int value;
18 DECLARE_BITMAP(bits, PERF_PMU_FORMAT_BITS);
19 struct list_head list;
20};
21
22struct perf_pmu {
23 char *name;
24 __u32 type;
25 struct list_head format;
26 struct list_head list;
27};
28
29struct perf_pmu *perf_pmu__find(char *name);
30int perf_pmu__config(struct perf_pmu *pmu, struct perf_event_attr *attr,
31 struct list_head *head_terms);
32
33int perf_pmu_wrap(void);
34void perf_pmu_error(struct list_head *list, char *name, char const *msg);
35
36int perf_pmu__new_format(struct list_head *list, char *name,
37 int config, unsigned long *bits);
38void perf_pmu__set_format(unsigned long *bits, long from, long to);
39
40int perf_pmu__test(void);
41#endif /* __PMU_H */
diff --git a/tools/perf/util/pmu.l b/tools/perf/util/pmu.l
new file mode 100644
index 000000000000..a15d9fbd7c0e
--- /dev/null
+++ b/tools/perf/util/pmu.l
@@ -0,0 +1,43 @@
1%option prefix="perf_pmu_"
2
3%{
4#include <stdlib.h>
5#include <linux/bitops.h>
6#include "pmu.h"
7#include "pmu-bison.h"
8
9static int value(int base)
10{
11 long num;
12
13 errno = 0;
14 num = strtoul(perf_pmu_text, NULL, base);
15 if (errno)
16 return PP_ERROR;
17
18 perf_pmu_lval.num = num;
19 return PP_VALUE;
20}
21
22%}
23
24num_dec [0-9]+
25
26%%
27
28{num_dec} { return value(10); }
29config { return PP_CONFIG; }
30config1 { return PP_CONFIG1; }
31config2 { return PP_CONFIG2; }
32- { return '-'; }
33: { return ':'; }
34, { return ','; }
35. { ; }
36\n { ; }
37
38%%
39
40int perf_pmu_wrap(void)
41{
42 return 1;
43}
diff --git a/tools/perf/util/pmu.y b/tools/perf/util/pmu.y
new file mode 100644
index 000000000000..20ea77e93169
--- /dev/null
+++ b/tools/perf/util/pmu.y
@@ -0,0 +1,93 @@
1
2%name-prefix "perf_pmu_"
3%parse-param {struct list_head *format}
4%parse-param {char *name}
5
6%{
7
8#include <linux/compiler.h>
9#include <linux/list.h>
10#include <linux/bitmap.h>
11#include <string.h>
12#include "pmu.h"
13
14extern int perf_pmu_lex (void);
15
16#define ABORT_ON(val) \
17do { \
18 if (val) \
19 YYABORT; \
20} while (0)
21
22%}
23
24%token PP_CONFIG PP_CONFIG1 PP_CONFIG2
25%token PP_VALUE PP_ERROR
26%type <num> PP_VALUE
27%type <bits> bit_term
28%type <bits> bits
29
30%union
31{
32 unsigned long num;
33 DECLARE_BITMAP(bits, PERF_PMU_FORMAT_BITS);
34}
35
36%%
37
38format:
39format format_term
40|
41format_term
42
43format_term:
44PP_CONFIG ':' bits
45{
46 ABORT_ON(perf_pmu__new_format(format, name,
47 PERF_PMU_FORMAT_VALUE_CONFIG,
48 $3));
49}
50|
51PP_CONFIG1 ':' bits
52{
53 ABORT_ON(perf_pmu__new_format(format, name,
54 PERF_PMU_FORMAT_VALUE_CONFIG1,
55 $3));
56}
57|
58PP_CONFIG2 ':' bits
59{
60 ABORT_ON(perf_pmu__new_format(format, name,
61 PERF_PMU_FORMAT_VALUE_CONFIG2,
62 $3));
63}
64
65bits:
66bits ',' bit_term
67{
68 bitmap_or($$, $1, $3, 64);
69}
70|
71bit_term
72{
73 memcpy($$, $1, sizeof($1));
74}
75
76bit_term:
77PP_VALUE '-' PP_VALUE
78{
79 perf_pmu__set_format($$, $1, $3);
80}
81|
82PP_VALUE
83{
84 perf_pmu__set_format($$, $1, 0);
85}
86
87%%
88
89void perf_pmu_error(struct list_head *list __used,
90 char *name __used,
91 char const *msg __used)
92{
93}
diff --git a/tools/perf/util/probe-finder.c b/tools/perf/util/probe-finder.c
index 2cc162d3b78c..d448984ed789 100644
--- a/tools/perf/util/probe-finder.c
+++ b/tools/perf/util/probe-finder.c
@@ -972,10 +972,12 @@ static int probe_point_search_cb(Dwarf_Die *sp_die, void *data)
972 struct dwarf_callback_param *param = data; 972 struct dwarf_callback_param *param = data;
973 struct probe_finder *pf = param->data; 973 struct probe_finder *pf = param->data;
974 struct perf_probe_point *pp = &pf->pev->point; 974 struct perf_probe_point *pp = &pf->pev->point;
975 Dwarf_Attribute attr;
975 976
976 /* Check tag and diename */ 977 /* Check tag and diename */
977 if (dwarf_tag(sp_die) != DW_TAG_subprogram || 978 if (dwarf_tag(sp_die) != DW_TAG_subprogram ||
978 !die_compare_name(sp_die, pp->function)) 979 !die_compare_name(sp_die, pp->function) ||
980 dwarf_attr(sp_die, DW_AT_declaration, &attr))
979 return DWARF_CB_OK; 981 return DWARF_CB_OK;
980 982
981 /* Check declared file */ 983 /* Check declared file */
diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c
index 002ebbf59f48..9412e3b05f68 100644
--- a/tools/perf/util/session.c
+++ b/tools/perf/util/session.c
@@ -140,6 +140,7 @@ struct perf_session *perf_session__new(const char *filename, int mode,
140 INIT_LIST_HEAD(&self->ordered_samples.sample_cache); 140 INIT_LIST_HEAD(&self->ordered_samples.sample_cache);
141 INIT_LIST_HEAD(&self->ordered_samples.to_free); 141 INIT_LIST_HEAD(&self->ordered_samples.to_free);
142 machine__init(&self->host_machine, "", HOST_KERNEL_ID); 142 machine__init(&self->host_machine, "", HOST_KERNEL_ID);
143 hists__init(&self->hists);
143 144
144 if (mode == O_RDONLY) { 145 if (mode == O_RDONLY) {
145 if (perf_session__open(self, force) < 0) 146 if (perf_session__open(self, force) < 0)
diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c
index 5dd83c3e2c0c..c0a028c3ebaf 100644
--- a/tools/perf/util/symbol.c
+++ b/tools/perf/util/symbol.c
@@ -1,6 +1,5 @@
1#include <dirent.h> 1#include <dirent.h>
2#include <errno.h> 2#include <errno.h>
3#include <libgen.h>
4#include <stdlib.h> 3#include <stdlib.h>
5#include <stdio.h> 4#include <stdio.h>
6#include <string.h> 5#include <string.h>
@@ -51,6 +50,8 @@ struct symbol_conf symbol_conf = {
51 50
52int dso__name_len(const struct dso *dso) 51int dso__name_len(const struct dso *dso)
53{ 52{
53 if (!dso)
54 return strlen("[unknown]");
54 if (verbose) 55 if (verbose)
55 return dso->long_name_len; 56 return dso->long_name_len;
56 57
diff --git a/tools/perf/util/trace-event-parse.c b/tools/perf/util/trace-event-parse.c
index a4088ced1e64..dfd1bd8371a4 100644
--- a/tools/perf/util/trace-event-parse.c
+++ b/tools/perf/util/trace-event-parse.c
@@ -722,7 +722,7 @@ static char *event_read_name(void)
722static int event_read_id(void) 722static int event_read_id(void)
723{ 723{
724 char *token; 724 char *token;
725 int id; 725 int id = -1;
726 726
727 if (read_expected_item(EVENT_ITEM, "ID") < 0) 727 if (read_expected_item(EVENT_ITEM, "ID") < 0)
728 return -1; 728 return -1;
@@ -731,15 +731,13 @@ static int event_read_id(void)
731 return -1; 731 return -1;
732 732
733 if (read_expect_type(EVENT_ITEM, &token) < 0) 733 if (read_expect_type(EVENT_ITEM, &token) < 0)
734 goto fail; 734 goto free;
735 735
736 id = strtoul(token, NULL, 0); 736 id = strtoul(token, NULL, 0);
737 free_token(token);
738 return id;
739 737
740 fail: 738 free:
741 free_token(token); 739 free_token(token);
742 return -1; 740 return id;
743} 741}
744 742
745static int field_is_string(struct format_field *field) 743static int field_is_string(struct format_field *field)
diff --git a/tools/perf/util/ui/browser.h b/tools/perf/util/ui/browser.h
index 84d761b730c1..6ee82f60feaf 100644
--- a/tools/perf/util/ui/browser.h
+++ b/tools/perf/util/ui/browser.h
@@ -49,6 +49,8 @@ int ui_browser__warning(struct ui_browser *browser, int timeout,
49 const char *format, ...); 49 const char *format, ...);
50int ui_browser__help_window(struct ui_browser *browser, const char *text); 50int ui_browser__help_window(struct ui_browser *browser, const char *text);
51bool ui_browser__dialog_yesno(struct ui_browser *browser, const char *text); 51bool ui_browser__dialog_yesno(struct ui_browser *browser, const char *text);
52int ui_browser__input_window(const char *title, const char *text, char *input,
53 const char *exit_msg, int delay_sec);
52 54
53void ui_browser__argv_seek(struct ui_browser *browser, off_t offset, int whence); 55void ui_browser__argv_seek(struct ui_browser *browser, off_t offset, int whence);
54unsigned int ui_browser__argv_refresh(struct ui_browser *browser); 56unsigned int ui_browser__argv_refresh(struct ui_browser *browser);
diff --git a/tools/perf/util/ui/browsers/hists.c b/tools/perf/util/ui/browsers/hists.c
index fa530fcc764a..d7a1c4afe28b 100644
--- a/tools/perf/util/ui/browsers/hists.c
+++ b/tools/perf/util/ui/browsers/hists.c
@@ -879,6 +879,7 @@ static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events,
879 char *options[16]; 879 char *options[16];
880 int nr_options = 0; 880 int nr_options = 0;
881 int key = -1; 881 int key = -1;
882 char buf[64];
882 883
883 if (browser == NULL) 884 if (browser == NULL)
884 return -1; 885 return -1;
@@ -933,6 +934,16 @@ static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events,
933 goto zoom_dso; 934 goto zoom_dso;
934 case 't': 935 case 't':
935 goto zoom_thread; 936 goto zoom_thread;
937 case 's':
938 if (ui_browser__input_window("Symbol to show",
939 "Please enter the name of symbol you want to see",
940 buf, "ENTER: OK, ESC: Cancel",
941 delay_secs * 2) == K_ENTER) {
942 self->symbol_filter_str = *buf ? buf : NULL;
943 hists__filter_by_symbol(self);
944 hist_browser__reset(browser);
945 }
946 continue;
936 case K_F1: 947 case K_F1:
937 case 'h': 948 case 'h':
938 case '?': 949 case '?':
@@ -950,7 +961,8 @@ static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events,
950 "C Collapse all callchains\n" 961 "C Collapse all callchains\n"
951 "E Expand all callchains\n" 962 "E Expand all callchains\n"
952 "d Zoom into current DSO\n" 963 "d Zoom into current DSO\n"
953 "t Zoom into current Thread"); 964 "t Zoom into current Thread\n"
965 "s Filter symbol by name");
954 continue; 966 continue;
955 case K_ENTER: 967 case K_ENTER:
956 case K_RIGHT: 968 case K_RIGHT:
diff --git a/tools/perf/util/ui/keysyms.h b/tools/perf/util/ui/keysyms.h
index 3458b1985761..809eca5707fa 100644
--- a/tools/perf/util/ui/keysyms.h
+++ b/tools/perf/util/ui/keysyms.h
@@ -16,6 +16,8 @@
16#define K_TAB '\t' 16#define K_TAB '\t'
17#define K_UNTAB SL_KEY_UNTAB 17#define K_UNTAB SL_KEY_UNTAB
18#define K_UP SL_KEY_UP 18#define K_UP SL_KEY_UP
19#define K_BKSPC 0x7f
20#define K_DEL SL_KEY_DELETE
19 21
20/* Not really keys */ 22/* Not really keys */
21#define K_TIMER -1 23#define K_TIMER -1
diff --git a/tools/perf/util/ui/util.c b/tools/perf/util/ui/util.c
index 45daa7c41dad..ad4374a16bb0 100644
--- a/tools/perf/util/ui/util.c
+++ b/tools/perf/util/ui/util.c
@@ -69,6 +69,88 @@ int ui__popup_menu(int argc, char * const argv[])
69 return popup_menu__run(&menu); 69 return popup_menu__run(&menu);
70} 70}
71 71
72int ui_browser__input_window(const char *title, const char *text, char *input,
73 const char *exit_msg, int delay_secs)
74{
75 int x, y, len, key;
76 int max_len = 60, nr_lines = 0;
77 static char buf[50];
78 const char *t;
79
80 t = text;
81 while (1) {
82 const char *sep = strchr(t, '\n');
83
84 if (sep == NULL)
85 sep = strchr(t, '\0');
86 len = sep - t;
87 if (max_len < len)
88 max_len = len;
89 ++nr_lines;
90 if (*sep == '\0')
91 break;
92 t = sep + 1;
93 }
94
95 max_len += 2;
96 nr_lines += 8;
97 y = SLtt_Screen_Rows / 2 - nr_lines / 2;
98 x = SLtt_Screen_Cols / 2 - max_len / 2;
99
100 SLsmg_set_color(0);
101 SLsmg_draw_box(y, x++, nr_lines, max_len);
102 if (title) {
103 SLsmg_gotorc(y, x + 1);
104 SLsmg_write_string((char *)title);
105 }
106 SLsmg_gotorc(++y, x);
107 nr_lines -= 7;
108 max_len -= 2;
109 SLsmg_write_wrapped_string((unsigned char *)text, y, x,
110 nr_lines, max_len, 1);
111 y += nr_lines;
112 len = 5;
113 while (len--) {
114 SLsmg_gotorc(y + len - 1, x);
115 SLsmg_write_nstring((char *)" ", max_len);
116 }
117 SLsmg_draw_box(y++, x + 1, 3, max_len - 2);
118
119 SLsmg_gotorc(y + 3, x);
120 SLsmg_write_nstring((char *)exit_msg, max_len);
121 SLsmg_refresh();
122
123 x += 2;
124 len = 0;
125 key = ui__getch(delay_secs);
126 while (key != K_TIMER && key != K_ENTER && key != K_ESC) {
127 if (key == K_BKSPC) {
128 if (len == 0)
129 goto next_key;
130 SLsmg_gotorc(y, x + --len);
131 SLsmg_write_char(' ');
132 } else {
133 buf[len] = key;
134 SLsmg_gotorc(y, x + len++);
135 SLsmg_write_char(key);
136 }
137 SLsmg_refresh();
138
139 /* XXX more graceful overflow handling needed */
140 if (len == sizeof(buf) - 1) {
141 ui_helpline__push("maximum size of symbol name reached!");
142 key = K_ENTER;
143 break;
144 }
145next_key:
146 key = ui__getch(delay_secs);
147 }
148
149 buf[len] = '\0';
150 strncpy(input, buf, len+1);
151 return key;
152}
153
72int ui__question_window(const char *title, const char *text, 154int ui__question_window(const char *title, const char *text,
73 const char *exit_msg, int delay_secs) 155 const char *exit_msg, int delay_secs)
74{ 156{
diff --git a/tools/power/cpupower/Makefile b/tools/power/cpupower/Makefile
index e8a03aceceb1..a93e06cfcc2a 100644
--- a/tools/power/cpupower/Makefile
+++ b/tools/power/cpupower/Makefile
@@ -19,6 +19,16 @@
19# along with this program; if not, write to the Free Software 19# along with this program; if not, write to the Free Software
20# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 20# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21# 21#
22OUTPUT=./
23ifeq ("$(origin O)", "command line")
24 OUTPUT := $(O)/
25endif
26
27ifneq ($(OUTPUT),)
28# check that the output directory actually exists
29OUTDIR := $(shell cd $(OUTPUT) && /bin/pwd)
30$(if $(OUTDIR),, $(error output directory "$(OUTPUT)" does not exist))
31endif
22 32
23# --- CONFIGURATION BEGIN --- 33# --- CONFIGURATION BEGIN ---
24 34
@@ -87,6 +97,7 @@ AR = $(CROSS)ar
87STRIP = $(CROSS)strip 97STRIP = $(CROSS)strip
88RANLIB = $(CROSS)ranlib 98RANLIB = $(CROSS)ranlib
89HOSTCC = gcc 99HOSTCC = gcc
100MKDIR = mkdir
90 101
91 102
92# Now we set up the build system 103# Now we set up the build system
@@ -95,7 +106,7 @@ HOSTCC = gcc
95# set up PWD so that older versions of make will work with our build. 106# set up PWD so that older versions of make will work with our build.
96PWD = $(shell pwd) 107PWD = $(shell pwd)
97 108
98GMO_FILES = ${shell for HLANG in ${LANGUAGES}; do echo po/$$HLANG.gmo; done;} 109GMO_FILES = ${shell for HLANG in ${LANGUAGES}; do echo $(OUTPUT)po/$$HLANG.gmo; done;}
99 110
100export CROSS CC AR STRIP RANLIB CFLAGS LDFLAGS LIB_OBJS 111export CROSS CC AR STRIP RANLIB CFLAGS LDFLAGS LIB_OBJS
101 112
@@ -122,15 +133,18 @@ UTIL_OBJS = utils/helpers/amd.o utils/helpers/topology.o utils/helpers/msr.o \
122 utils/cpupower.o utils/cpufreq-info.o utils/cpufreq-set.o \ 133 utils/cpupower.o utils/cpufreq-info.o utils/cpufreq-set.o \
123 utils/cpupower-set.o utils/cpupower-info.o utils/cpuidle-info.o 134 utils/cpupower-set.o utils/cpupower-info.o utils/cpuidle-info.o
124 135
136UTIL_SRC := $(UTIL_OBJS:.o=.c)
137
138UTIL_OBJS := $(addprefix $(OUTPUT),$(UTIL_OBJS))
139
125UTIL_HEADERS = utils/helpers/helpers.h utils/idle_monitor/cpupower-monitor.h \ 140UTIL_HEADERS = utils/helpers/helpers.h utils/idle_monitor/cpupower-monitor.h \
126 utils/helpers/bitmask.h \ 141 utils/helpers/bitmask.h \
127 utils/idle_monitor/idle_monitors.h utils/idle_monitor/idle_monitors.def 142 utils/idle_monitor/idle_monitors.h utils/idle_monitor/idle_monitors.def
128 143
129UTIL_SRC := $(UTIL_OBJS:.o=.c)
130
131LIB_HEADERS = lib/cpufreq.h lib/sysfs.h 144LIB_HEADERS = lib/cpufreq.h lib/sysfs.h
132LIB_SRC = lib/cpufreq.c lib/sysfs.c 145LIB_SRC = lib/cpufreq.c lib/sysfs.c
133LIB_OBJS = lib/cpufreq.o lib/sysfs.o 146LIB_OBJS = lib/cpufreq.o lib/sysfs.o
147LIB_OBJS := $(addprefix $(OUTPUT),$(LIB_OBJS))
134 148
135CFLAGS += -pipe 149CFLAGS += -pipe
136 150
@@ -168,83 +182,90 @@ endif
168 182
169# the actual make rules 183# the actual make rules
170 184
171all: libcpupower cpupower $(COMPILE_NLS) $(COMPILE_BENCH) 185all: libcpupower $(OUTPUT)cpupower $(COMPILE_NLS) $(COMPILE_BENCH)
172 186
173lib/%.o: $(LIB_SRC) $(LIB_HEADERS) 187$(OUTPUT)lib/%.o: $(LIB_SRC) $(LIB_HEADERS)
174 $(ECHO) " CC " $@ 188 $(ECHO) " CC " $@
175 $(QUIET) $(CC) $(CFLAGS) -fPIC -o $@ -c lib/$*.c 189 $(QUIET) $(CC) $(CFLAGS) -fPIC -o $@ -c lib/$*.c
176 190
177libcpupower.so.$(LIB_MAJ): $(LIB_OBJS) 191$(OUTPUT)libcpupower.so.$(LIB_MAJ): $(LIB_OBJS)
178 $(ECHO) " LD " $@ 192 $(ECHO) " LD " $@
179 $(QUIET) $(CC) -shared $(CFLAGS) $(LDFLAGS) -o $@ \ 193 $(QUIET) $(CC) -shared $(CFLAGS) $(LDFLAGS) -o $@ \
180 -Wl,-soname,libcpupower.so.$(LIB_MIN) $(LIB_OBJS) 194 -Wl,-soname,libcpupower.so.$(LIB_MIN) $(LIB_OBJS)
181 @ln -sf $@ libcpupower.so 195 @ln -sf $(@F) $(OUTPUT)libcpupower.so
182 @ln -sf $@ libcpupower.so.$(LIB_MIN) 196 @ln -sf $(@F) $(OUTPUT)libcpupower.so.$(LIB_MIN)
183 197
184libcpupower: libcpupower.so.$(LIB_MAJ) 198libcpupower: $(OUTPUT)libcpupower.so.$(LIB_MAJ)
185 199
186# Let all .o files depend on its .c file and all headers 200# Let all .o files depend on its .c file and all headers
187# Might be worth to put this into utils/Makefile at some point of time 201# Might be worth to put this into utils/Makefile at some point of time
188$(UTIL_OBJS): $(UTIL_HEADERS) 202$(UTIL_OBJS): $(UTIL_HEADERS)
189 203
190.c.o: 204$(OUTPUT)%.o: %.c
191 $(ECHO) " CC " $@ 205 $(ECHO) " CC " $@
192 $(QUIET) $(CC) $(CFLAGS) -I./lib -I ./utils -o $@ -c $*.c 206 $(QUIET) $(CC) $(CFLAGS) -I./lib -I ./utils -o $@ -c $*.c
193 207
194cpupower: $(UTIL_OBJS) libcpupower.so.$(LIB_MAJ) 208$(OUTPUT)cpupower: $(UTIL_OBJS) $(OUTPUT)libcpupower.so.$(LIB_MAJ)
195 $(ECHO) " CC " $@ 209 $(ECHO) " CC " $@
196 $(QUIET) $(CC) $(CFLAGS) $(LDFLAGS) -lcpupower -lrt -lpci -L. -o $@ $(UTIL_OBJS) 210 $(QUIET) $(CC) $(CFLAGS) $(LDFLAGS) $(UTIL_OBJS) -lcpupower -lrt -lpci -L$(OUTPUT) -o $@
197 $(QUIET) $(STRIPCMD) $@ 211 $(QUIET) $(STRIPCMD) $@
198 212
199po/$(PACKAGE).pot: $(UTIL_SRC) 213$(OUTPUT)po/$(PACKAGE).pot: $(UTIL_SRC)
200 $(ECHO) " GETTEXT " $@ 214 $(ECHO) " GETTEXT " $@
201 $(QUIET) xgettext --default-domain=$(PACKAGE) --add-comments \ 215 $(QUIET) xgettext --default-domain=$(PACKAGE) --add-comments \
202 --keyword=_ --keyword=N_ $(UTIL_SRC) && \ 216 --keyword=_ --keyword=N_ $(UTIL_SRC) -p $(@D) -o $(@F)
203 test -f $(PACKAGE).po && \
204 mv -f $(PACKAGE).po po/$(PACKAGE).pot
205 217
206po/%.gmo: po/%.po 218$(OUTPUT)po/%.gmo: po/%.po
207 $(ECHO) " MSGFMT " $@ 219 $(ECHO) " MSGFMT " $@
208 $(QUIET) msgfmt -o $@ po/$*.po 220 $(QUIET) msgfmt -o $@ po/$*.po
209 221
210create-gmo: ${GMO_FILES} 222create-gmo: ${GMO_FILES}
211 223
212update-po: po/$(PACKAGE).pot 224update-po: $(OUTPUT)po/$(PACKAGE).pot
213 $(ECHO) " MSGMRG " $@ 225 $(ECHO) " MSGMRG " $@
214 $(QUIET) @for HLANG in $(LANGUAGES); do \ 226 $(QUIET) @for HLANG in $(LANGUAGES); do \
215 echo -n "Updating $$HLANG "; \ 227 echo -n "Updating $$HLANG "; \
216 if msgmerge po/$$HLANG.po po/$(PACKAGE).pot -o \ 228 if msgmerge po/$$HLANG.po $< -o \
217 po/$$HLANG.new.po; then \ 229 $(OUTPUT)po/$$HLANG.new.po; then \
218 mv -f po/$$HLANG.new.po po/$$HLANG.po; \ 230 mv -f $(OUTPUT)po/$$HLANG.new.po $(OUTPUT)po/$$HLANG.po; \
219 else \ 231 else \
220 echo "msgmerge for $$HLANG failed!"; \ 232 echo "msgmerge for $$HLANG failed!"; \
221 rm -f po/$$HLANG.new.po; \ 233 rm -f $(OUTPUT)po/$$HLANG.new.po; \
222 fi; \ 234 fi; \
223 done; 235 done;
224 236
225compile-bench: libcpupower.so.$(LIB_MAJ) 237compile-bench: $(OUTPUT)libcpupower.so.$(LIB_MAJ)
226 @V=$(V) confdir=$(confdir) $(MAKE) -C bench 238 @V=$(V) confdir=$(confdir) $(MAKE) -C bench O=$(OUTPUT)
239
240# we compile into subdirectories. if the target directory is not the
241# source directory, they might not exists. So we depend the various
242# files onto their directories.
243DIRECTORY_DEPS = $(LIB_OBJS) $(UTIL_OBJS) $(GMO_FILES)
244$(DIRECTORY_DEPS): | $(sort $(dir $(DIRECTORY_DEPS)))
245
246# In the second step, we make a rule to actually create these directories
247$(sort $(dir $(DIRECTORY_DEPS))):
248 $(ECHO) " MKDIR " $@
249 $(QUIET) $(MKDIR) -p $@ 2>/dev/null
227 250
228clean: 251clean:
229 -find . \( -not -type d \) -and \( -name '*~' -o -name '*.[oas]' \) -type f -print \ 252 -find $(OUTPUT) \( -not -type d \) -and \( -name '*~' -o -name '*.[oas]' \) -type f -print \
230 | xargs rm -f 253 | xargs rm -f
231 -rm -f $(UTIL_BINS) 254 -rm -f $(OUTPUT)cpupower
232 -rm -f $(IDLE_OBJS) 255 -rm -f $(OUTPUT)libcpupower.so*
233 -rm -f cpupower 256 -rm -rf $(OUTPUT)po/*.{gmo,pot}
234 -rm -f libcpupower.so* 257 $(MAKE) -C bench O=$(OUTPUT) clean
235 -rm -rf po/*.gmo po/*.pot
236 $(MAKE) -C bench clean
237 258
238 259
239install-lib: 260install-lib:
240 $(INSTALL) -d $(DESTDIR)${libdir} 261 $(INSTALL) -d $(DESTDIR)${libdir}
241 $(CP) libcpupower.so* $(DESTDIR)${libdir}/ 262 $(CP) $(OUTPUT)libcpupower.so* $(DESTDIR)${libdir}/
242 $(INSTALL) -d $(DESTDIR)${includedir} 263 $(INSTALL) -d $(DESTDIR)${includedir}
243 $(INSTALL_DATA) lib/cpufreq.h $(DESTDIR)${includedir}/cpufreq.h 264 $(INSTALL_DATA) lib/cpufreq.h $(DESTDIR)${includedir}/cpufreq.h
244 265
245install-tools: 266install-tools:
246 $(INSTALL) -d $(DESTDIR)${bindir} 267 $(INSTALL) -d $(DESTDIR)${bindir}
247 $(INSTALL_PROGRAM) cpupower $(DESTDIR)${bindir} 268 $(INSTALL_PROGRAM) $(OUTPUT)cpupower $(DESTDIR)${bindir}
248 269
249install-man: 270install-man:
250 $(INSTALL_DATA) -D man/cpupower.1 $(DESTDIR)${mandir}/man1/cpupower.1 271 $(INSTALL_DATA) -D man/cpupower.1 $(DESTDIR)${mandir}/man1/cpupower.1
@@ -257,13 +278,13 @@ install-man:
257install-gmo: 278install-gmo:
258 $(INSTALL) -d $(DESTDIR)${localedir} 279 $(INSTALL) -d $(DESTDIR)${localedir}
259 for HLANG in $(LANGUAGES); do \ 280 for HLANG in $(LANGUAGES); do \
260 echo '$(INSTALL_DATA) -D po/$$HLANG.gmo $(DESTDIR)${localedir}/$$HLANG/LC_MESSAGES/cpupower.mo'; \ 281 echo '$(INSTALL_DATA) -D $(OUTPUT)po/$$HLANG.gmo $(DESTDIR)${localedir}/$$HLANG/LC_MESSAGES/cpupower.mo'; \
261 $(INSTALL_DATA) -D po/$$HLANG.gmo $(DESTDIR)${localedir}/$$HLANG/LC_MESSAGES/cpupower.mo; \ 282 $(INSTALL_DATA) -D $(OUTPUT)po/$$HLANG.gmo $(DESTDIR)${localedir}/$$HLANG/LC_MESSAGES/cpupower.mo; \
262 done; 283 done;
263 284
264install-bench: 285install-bench:
265 @#DESTDIR must be set from outside to survive 286 @#DESTDIR must be set from outside to survive
266 @sbindir=$(sbindir) bindir=$(bindir) docdir=$(docdir) confdir=$(confdir) $(MAKE) -C bench install 287 @sbindir=$(sbindir) bindir=$(bindir) docdir=$(docdir) confdir=$(confdir) $(MAKE) -C bench O=$(OUTPUT) install
267 288
268install: all install-lib install-tools install-man $(INSTALL_NLS) $(INSTALL_BENCH) 289install: all install-lib install-tools install-man $(INSTALL_NLS) $(INSTALL_BENCH)
269 290
diff --git a/tools/power/cpupower/bench/Makefile b/tools/power/cpupower/bench/Makefile
index 2b67606fc3e3..7ec7021a29cd 100644
--- a/tools/power/cpupower/bench/Makefile
+++ b/tools/power/cpupower/bench/Makefile
@@ -1,29 +1,36 @@
1LIBS = -L../ -lm -lcpupower 1OUTPUT := ./
2ifeq ("$(origin O)", "command line")
3ifneq ($(O),)
4 OUTPUT := $(O)/
5endif
6endif
2 7
3OBJS = main.o parse.o system.o benchmark.o 8LIBS = -L../ -L$(OUTPUT) -lm -lcpupower
9
10OBJS = $(OUTPUT)main.o $(OUTPUT)parse.o $(OUTPUT)system.o $(OUTPUT)benchmark.o
4CFLAGS += -D_GNU_SOURCE -I../lib -DDEFAULT_CONFIG_FILE=\"$(confdir)/cpufreq-bench.conf\" 11CFLAGS += -D_GNU_SOURCE -I../lib -DDEFAULT_CONFIG_FILE=\"$(confdir)/cpufreq-bench.conf\"
5 12
6%.o : %.c 13$(OUTPUT)%.o : %.c
7 $(ECHO) " CC " $@ 14 $(ECHO) " CC " $@
8 $(QUIET) $(CC) -c $(CFLAGS) $< -o $@ 15 $(QUIET) $(CC) -c $(CFLAGS) $< -o $@
9 16
10cpufreq-bench: $(OBJS) 17$(OUTPUT)cpufreq-bench: $(OBJS)
11 $(ECHO) " CC " $@ 18 $(ECHO) " CC " $@
12 $(QUIET) $(CC) -o $@ $(CFLAGS) $(OBJS) $(LIBS) 19 $(QUIET) $(CC) -o $@ $(CFLAGS) $(OBJS) $(LIBS)
13 20
14all: cpufreq-bench 21all: $(OUTPUT)cpufreq-bench
15 22
16install: 23install:
17 mkdir -p $(DESTDIR)/$(sbindir) 24 mkdir -p $(DESTDIR)/$(sbindir)
18 mkdir -p $(DESTDIR)/$(bindir) 25 mkdir -p $(DESTDIR)/$(bindir)
19 mkdir -p $(DESTDIR)/$(docdir) 26 mkdir -p $(DESTDIR)/$(docdir)
20 mkdir -p $(DESTDIR)/$(confdir) 27 mkdir -p $(DESTDIR)/$(confdir)
21 install -m 755 cpufreq-bench $(DESTDIR)/$(sbindir)/cpufreq-bench 28 install -m 755 $(OUTPUT)cpufreq-bench $(DESTDIR)/$(sbindir)/cpufreq-bench
22 install -m 755 cpufreq-bench_plot.sh $(DESTDIR)/$(bindir)/cpufreq-bench_plot.sh 29 install -m 755 cpufreq-bench_plot.sh $(DESTDIR)/$(bindir)/cpufreq-bench_plot.sh
23 install -m 644 README-BENCH $(DESTDIR)/$(docdir)/README-BENCH 30 install -m 644 README-BENCH $(DESTDIR)/$(docdir)/README-BENCH
24 install -m 755 cpufreq-bench_script.sh $(DESTDIR)/$(docdir)/cpufreq-bench_script.sh 31 install -m 755 cpufreq-bench_script.sh $(DESTDIR)/$(docdir)/cpufreq-bench_script.sh
25 install -m 644 example.cfg $(DESTDIR)/$(confdir)/cpufreq-bench.conf 32 install -m 644 example.cfg $(DESTDIR)/$(confdir)/cpufreq-bench.conf
26 33
27clean: 34clean:
28 rm -f *.o 35 rm -f $(OUTPUT)*.o
29 rm -f cpufreq-bench 36 rm -f $(OUTPUT)cpufreq-bench
diff --git a/tools/power/cpupower/debug/i386/Makefile b/tools/power/cpupower/debug/i386/Makefile
index d08cc1ead9bc..3ba158f0e287 100644
--- a/tools/power/cpupower/debug/i386/Makefile
+++ b/tools/power/cpupower/debug/i386/Makefile
@@ -1,20 +1,38 @@
1OUTPUT=./
2ifeq ("$(origin O)", "command line")
3 OUTPUT := $(O)/
4endif
5
6DESTDIR =
7bindir = /usr/bin
8
9INSTALL = /usr/bin/install
10
11
1default: all 12default: all
2 13
3centrino-decode: centrino-decode.c 14$(OUTPUT)centrino-decode: centrino-decode.c
4 $(CC) $(CFLAGS) -o centrino-decode centrino-decode.c 15 $(CC) $(CFLAGS) -o $@ centrino-decode.c
5 16
6dump_psb: dump_psb.c 17$(OUTPUT)dump_psb: dump_psb.c
7 $(CC) $(CFLAGS) -o dump_psb dump_psb.c 18 $(CC) $(CFLAGS) -o $@ dump_psb.c
8 19
9intel_gsic: intel_gsic.c 20$(OUTPUT)intel_gsic: intel_gsic.c
10 $(CC) $(CFLAGS) -o intel_gsic -llrmi intel_gsic.c 21 $(CC) $(CFLAGS) -o $@ -llrmi intel_gsic.c
11 22
12powernow-k8-decode: powernow-k8-decode.c 23$(OUTPUT)powernow-k8-decode: powernow-k8-decode.c
13 $(CC) $(CFLAGS) -o powernow-k8-decode powernow-k8-decode.c 24 $(CC) $(CFLAGS) -o $@ powernow-k8-decode.c
14 25
15all: centrino-decode dump_psb intel_gsic powernow-k8-decode 26all: $(OUTPUT)centrino-decode $(OUTPUT)dump_psb $(OUTPUT)intel_gsic $(OUTPUT)powernow-k8-decode
16 27
17clean: 28clean:
18 rm -rf centrino-decode dump_psb intel_gsic powernow-k8-decode 29 rm -rf $(OUTPUT){centrino-decode,dump_psb,intel_gsic,powernow-k8-decode}
30
31install:
32 $(INSTALL) -d $(DESTDIR)${bindir}
33 $(INSTALL) $(OUTPUT)centrino-decode $(DESTDIR)${bindir}
34 $(INSTALL) $(OUTPUT)powernow-k8-decode $(DESTDIR)${bindir}
35 $(INSTALL) $(OUTPUT)dump_psb $(DESTDIR)${bindir}
36 $(INSTALL) $(OUTPUT)intel_gsic $(DESTDIR)${bindir}
19 37
20.PHONY: all default clean 38.PHONY: all default clean install
diff --git a/tools/power/cpupower/debug/x86_64/Makefile b/tools/power/cpupower/debug/x86_64/Makefile
index 3326217dd311..1c5214526716 100644
--- a/tools/power/cpupower/debug/x86_64/Makefile
+++ b/tools/power/cpupower/debug/x86_64/Makefile
@@ -1,14 +1,30 @@
1OUTPUT=./
2ifeq ("$(origin O)", "command line")
3 OUTPUT := $(O)/
4endif
5
6DESTDIR =
7bindir = /usr/bin
8
9INSTALL = /usr/bin/install
10
11
1default: all 12default: all
2 13
3centrino-decode: ../i386/centrino-decode.c 14$(OUTPUT)centrino-decode: ../i386/centrino-decode.c
4 $(CC) $(CFLAGS) -o $@ $< 15 $(CC) $(CFLAGS) -o $@ $<
5 16
6powernow-k8-decode: ../i386/powernow-k8-decode.c 17$(OUTPUT)powernow-k8-decode: ../i386/powernow-k8-decode.c
7 $(CC) $(CFLAGS) -o $@ $< 18 $(CC) $(CFLAGS) -o $@ $<
8 19
9all: centrino-decode powernow-k8-decode 20all: $(OUTPUT)centrino-decode $(OUTPUT)powernow-k8-decode
10 21
11clean: 22clean:
12 rm -rf centrino-decode powernow-k8-decode 23 rm -rf $(OUTPUT)centrino-decode $(OUTPUT)powernow-k8-decode
24
25install:
26 $(INSTALL) -d $(DESTDIR)${bindir}
27 $(INSTALL) $(OUTPUT)centrino-decode $(DESTDIR)${bindir}
28 $(INSTALL) $(OUTPUT)powernow-k8-decode $(DESTDIR)${bindir}
13 29
14.PHONY: all default clean 30.PHONY: all default clean install
diff --git a/tools/power/cpupower/man/cpupower-frequency-info.1 b/tools/power/cpupower/man/cpupower-frequency-info.1
index bb60a8d1e45a..4a1918ea8f9c 100644
--- a/tools/power/cpupower/man/cpupower-frequency-info.1
+++ b/tools/power/cpupower/man/cpupower-frequency-info.1
@@ -1,4 +1,4 @@
1.TH "cpupower-frequency-info" "1" "0.1" "Mattia Dongili" "" 1.TH "CPUPOWER\-FREQUENCY\-INFO" "1" "0.1" "" "cpupower Manual"
2.SH "NAME" 2.SH "NAME"
3.LP 3.LP
4cpupower frequency\-info \- Utility to retrieve cpufreq kernel information 4cpupower frequency\-info \- Utility to retrieve cpufreq kernel information
@@ -50,8 +50,6 @@ Prints out information like provided by the /proc/cpufreq interface in 2.4. and
50\fB\-m\fR \fB\-\-human\fR 50\fB\-m\fR \fB\-\-human\fR
51human\-readable output for the \-f, \-w, \-s and \-y parameters. 51human\-readable output for the \-f, \-w, \-s and \-y parameters.
52.TP 52.TP
53\fB\-h\fR \fB\-\-help\fR
54Prints out the help screen.
55.SH "REMARKS" 53.SH "REMARKS"
56.LP 54.LP
57By default only values of core zero are displayed. How to display settings of 55By default only values of core zero are displayed. How to display settings of
diff --git a/tools/power/cpupower/man/cpupower-frequency-set.1 b/tools/power/cpupower/man/cpupower-frequency-set.1
index 685f469093ad..3eacc8d03d1a 100644
--- a/tools/power/cpupower/man/cpupower-frequency-set.1
+++ b/tools/power/cpupower/man/cpupower-frequency-set.1
@@ -1,4 +1,4 @@
1.TH "cpupower-freqency-set" "1" "0.1" "Mattia Dongili" "" 1.TH "CPUPOWER\-FREQUENCY\-SET" "1" "0.1" "" "cpupower Manual"
2.SH "NAME" 2.SH "NAME"
3.LP 3.LP
4cpupower frequency\-set \- A small tool which allows to modify cpufreq settings. 4cpupower frequency\-set \- A small tool which allows to modify cpufreq settings.
@@ -26,8 +26,6 @@ specific frequency to be set. Requires userspace governor to be available and lo
26\fB\-r\fR \fB\-\-related\fR 26\fB\-r\fR \fB\-\-related\fR
27modify all hardware-related CPUs at the same time 27modify all hardware-related CPUs at the same time
28.TP 28.TP
29\fB\-h\fR \fB\-\-help\fR
30Prints out the help screen.
31.SH "REMARKS" 29.SH "REMARKS"
32.LP 30.LP
33By default values are applied on all cores. How to modify single core 31By default values are applied on all cores. How to modify single core
diff --git a/tools/power/cpupower/man/cpupower-idle-info.1 b/tools/power/cpupower/man/cpupower-idle-info.1
new file mode 100644
index 000000000000..4178effd9e99
--- /dev/null
+++ b/tools/power/cpupower/man/cpupower-idle-info.1
@@ -0,0 +1,90 @@
1.TH "CPUPOWER-IDLE-INFO" "1" "0.1" "" "cpupower Manual"
2.SH "NAME"
3.LP
4cpupower idle\-info \- Utility to retrieve cpu idle kernel information
5.SH "SYNTAX"
6.LP
7cpupower [ \-c cpulist ] idle\-info [\fIoptions\fP]
8.SH "DESCRIPTION"
9.LP
10A tool which prints out per cpu idle information helpful to developers and interested users.
11.SH "OPTIONS"
12.LP
13.TP
14\fB\-f\fR \fB\-\-silent\fR
15Only print a summary of all available C-states in the system.
16.TP
17\fB\-e\fR \fB\-\-proc\fR
18deprecated.
19Prints out idle information in old /proc/acpi/processor/*/power format. This
20interface has been removed from the kernel for quite some time, do not let
21further code depend on this option, best do not use it.
22
23.SH IDLE\-INFO DESCRIPTIONS
24CPU sleep state statistics and descriptions are retrieved from sysfs files,
25exported by the cpuidle kernel subsystem. The kernel only updates these
26statistics when it enters or leaves an idle state, therefore on a very idle or
27a very busy system, these statistics may not be accurate. They still provide a
28good overview about the usage and availability of processor sleep states on
29the platform.
30
31Be aware that the sleep states as exported by the hardware or BIOS and used by
32the Linux kernel may not exactly reflect the capabilities of the
33processor. This often is the case on the X86 architecture when the acpi_idle
34driver is used. It is also possible that the hardware overrules the kernel
35requests, due to internal activity monitors or other reasons.
36On recent X86 platforms it is often possible to read out hardware registers
37which monitor the duration of sleep states the processor resided in. The
38cpupower monitor tool (cpupower\-monitor(1)) can be used to show real sleep
39state residencies. Please refer to the architecture specific description
40section below.
41
42.SH IDLE\-INFO ARCHITECTURE SPECIFIC DESCRIPTIONS
43.SS "X86"
44POLL idle state
45
46If cpuidle is active, X86 platforms have one special idle state.
47The POLL idle state is not a real idle state, it does not save any
48power. Instead, a busy\-loop is executed doing nothing for a short period of
49time. This state is used if the kernel knows that work has to be processed
50very soon and entering any real hardware idle state may result in a slight
51performance penalty.
52
53There exist two different cpuidle drivers on the X86 architecture platform:
54
55"acpi_idle" cpuidle driver
56
57The acpi_idle cpuidle driver retrieves available sleep states (C\-states) from
58the ACPI BIOS tables (from the _CST ACPI function on recent platforms or from
59the FADT BIOS table on older ones).
60The C1 state is not retrieved from ACPI tables. If the C1 state is entered,
61the kernel will call the hlt instruction (or mwait on Intel).
62
63"intel_idle" cpuidle driver
64
65In kernel 2.6.36 the intel_idle driver was introduced.
66It only serves recent Intel CPUs (Nehalem, Westmere, Sandybridge, Atoms or
67newer). On older Intel CPUs the acpi_idle driver is still used (if the BIOS
68provides C\-state ACPI tables).
69The intel_idle driver knows the sleep state capabilities of the processor and
70ignores ACPI BIOS exported processor sleep states tables.
71
72.SH "REMARKS"
73.LP
74By default only values of core zero are displayed. How to display settings of
75other cores is described in the cpupower(1) manpage in the \-\-cpu option
76section.
77.SH REFERENCES
78http://www.acpi.info/spec.htm
79.SH "FILES"
80.nf
81\fI/sys/devices/system/cpu/cpu*/cpuidle/state*\fP
82\fI/sys/devices/system/cpu/cpuidle/*\fP
83.fi
84.SH "AUTHORS"
85.nf
86Thomas Renninger <trenn@suse.de>
87.fi
88.SH "SEE ALSO"
89.LP
90cpupower(1), cpupower\-monitor(1), cpupower\-info(1), cpupower\-set(1)
diff --git a/tools/power/cpupower/man/cpupower-monitor.1 b/tools/power/cpupower/man/cpupower-monitor.1
index d5cfa265c3d3..1141c2073719 100644
--- a/tools/power/cpupower/man/cpupower-monitor.1
+++ b/tools/power/cpupower/man/cpupower-monitor.1
@@ -107,7 +107,7 @@ Deepest package sleep states may in reality show up as machine/platform wide
107sleep states and can only be entered if all cores are idle. Look up Intel 107sleep states and can only be entered if all cores are idle. Look up Intel
108manuals (some are provided in the References section) for further details. 108manuals (some are provided in the References section) for further details.
109 109
110.SS "Ontario" "Liano" 110.SS "Fam_12h" "Fam_14h"
111AMD laptop and desktop processor (family 12h and 14h) sleep state counters. 111AMD laptop and desktop processor (family 12h and 14h) sleep state counters.
112The registers are accessed via PCI and therefore can still be read out while 112The registers are accessed via PCI and therefore can still be read out while
113cores have been offlined. 113cores have been offlined.
diff --git a/tools/power/cpupower/utils/cpuidle-info.c b/tools/power/cpupower/utils/cpuidle-info.c
index b028267c1376..8145af5f93a6 100644
--- a/tools/power/cpupower/utils/cpuidle-info.c
+++ b/tools/power/cpupower/utils/cpuidle-info.c
@@ -35,17 +35,9 @@ static void cpuidle_cpu_output(unsigned int cpu, int verbose)
35 printf(_("CPU %u: Can't read idle state info\n"), cpu); 35 printf(_("CPU %u: Can't read idle state info\n"), cpu);
36 return; 36 return;
37 } 37 }
38 tmp = sysfs_get_idlestate_name(cpu, idlestates - 1);
39 if (!tmp) {
40 printf(_("Could not determine max idle state %u\n"),
41 idlestates - 1);
42 return;
43 }
44
45 printf(_("Number of idle states: %d\n"), idlestates); 38 printf(_("Number of idle states: %d\n"), idlestates);
46
47 printf(_("Available idle states:")); 39 printf(_("Available idle states:"));
48 for (idlestate = 1; idlestate < idlestates; idlestate++) { 40 for (idlestate = 0; idlestate < idlestates; idlestate++) {
49 tmp = sysfs_get_idlestate_name(cpu, idlestate); 41 tmp = sysfs_get_idlestate_name(cpu, idlestate);
50 if (!tmp) 42 if (!tmp)
51 continue; 43 continue;
@@ -57,7 +49,7 @@ static void cpuidle_cpu_output(unsigned int cpu, int verbose)
57 if (!verbose) 49 if (!verbose)
58 return; 50 return;
59 51
60 for (idlestate = 1; idlestate < idlestates; idlestate++) { 52 for (idlestate = 0; idlestate < idlestates; idlestate++) {
61 tmp = sysfs_get_idlestate_name(cpu, idlestate); 53 tmp = sysfs_get_idlestate_name(cpu, idlestate);
62 if (!tmp) 54 if (!tmp)
63 continue; 55 continue;
diff --git a/tools/power/cpupower/utils/helpers/amd.c b/tools/power/cpupower/utils/helpers/amd.c
index 87d5605bdda8..6437ef39aeea 100644
--- a/tools/power/cpupower/utils/helpers/amd.c
+++ b/tools/power/cpupower/utils/helpers/amd.c
@@ -112,14 +112,12 @@ int decode_pstates(unsigned int cpu, unsigned int cpu_family,
112int amd_pci_get_num_boost_states(int *active, int *states) 112int amd_pci_get_num_boost_states(int *active, int *states)
113{ 113{
114 struct pci_access *pci_acc; 114 struct pci_access *pci_acc;
115 int vendor_id = 0x1022;
116 int boost_dev_ids[4] = {0x1204, 0x1604, 0x1704, 0};
117 struct pci_dev *device; 115 struct pci_dev *device;
118 uint8_t val = 0; 116 uint8_t val = 0;
119 117
120 *active = *states = 0; 118 *active = *states = 0;
121 119
122 device = pci_acc_init(&pci_acc, vendor_id, boost_dev_ids); 120 device = pci_slot_func_init(&pci_acc, 0x18, 4);
123 121
124 if (device == NULL) 122 if (device == NULL)
125 return -ENODEV; 123 return -ENODEV;
diff --git a/tools/power/cpupower/utils/helpers/helpers.h b/tools/power/cpupower/utils/helpers/helpers.h
index 2747e738efb0..2eb584cf2f55 100644
--- a/tools/power/cpupower/utils/helpers/helpers.h
+++ b/tools/power/cpupower/utils/helpers/helpers.h
@@ -66,8 +66,8 @@ enum cpupower_cpu_vendor {X86_VENDOR_UNKNOWN = 0, X86_VENDOR_INTEL,
66#define CPUPOWER_CAP_AMD_CBP 0x00000004 66#define CPUPOWER_CAP_AMD_CBP 0x00000004
67#define CPUPOWER_CAP_PERF_BIAS 0x00000008 67#define CPUPOWER_CAP_PERF_BIAS 0x00000008
68#define CPUPOWER_CAP_HAS_TURBO_RATIO 0x00000010 68#define CPUPOWER_CAP_HAS_TURBO_RATIO 0x00000010
69#define CPUPOWER_CAP_IS_SNB 0x00000011 69#define CPUPOWER_CAP_IS_SNB 0x00000020
70#define CPUPOWER_CAP_INTEL_IDA 0x00000012 70#define CPUPOWER_CAP_INTEL_IDA 0x00000040
71 71
72#define MAX_HW_PSTATES 10 72#define MAX_HW_PSTATES 10
73 73
@@ -132,8 +132,11 @@ extern unsigned long long msr_intel_get_turbo_ratio(unsigned int cpu);
132 132
133/* PCI stuff ****************************/ 133/* PCI stuff ****************************/
134extern int amd_pci_get_num_boost_states(int *active, int *states); 134extern int amd_pci_get_num_boost_states(int *active, int *states);
135extern struct pci_dev *pci_acc_init(struct pci_access **pacc, int vendor_id, 135extern struct pci_dev *pci_acc_init(struct pci_access **pacc, int domain,
136 int *dev_ids); 136 int bus, int slot, int func, int vendor,
137 int dev);
138extern struct pci_dev *pci_slot_func_init(struct pci_access **pacc,
139 int slot, int func);
137 140
138/* PCI stuff ****************************/ 141/* PCI stuff ****************************/
139 142
diff --git a/tools/power/cpupower/utils/helpers/pci.c b/tools/power/cpupower/utils/helpers/pci.c
index cd2eb6fe41c4..9690798e6446 100644
--- a/tools/power/cpupower/utils/helpers/pci.c
+++ b/tools/power/cpupower/utils/helpers/pci.c
@@ -10,19 +10,24 @@
10 * **pacc : if a valid pci_dev is returned 10 * **pacc : if a valid pci_dev is returned
11 * *pacc must be passed to pci_acc_cleanup to free it 11 * *pacc must be passed to pci_acc_cleanup to free it
12 * 12 *
13 * vendor_id : the pci vendor id matching the pci device to access 13 * domain: domain
14 * dev_ids : device ids matching the pci device to access 14 * bus: bus
15 * slot: slot
16 * func: func
17 * vendor: vendor
18 * device: device
19 * Pass -1 for one of the six above to match any
15 * 20 *
16 * Returns : 21 * Returns :
17 * struct pci_dev which can be used with pci_{read,write}_* functions 22 * struct pci_dev which can be used with pci_{read,write}_* functions
18 * to access the PCI config space of matching pci devices 23 * to access the PCI config space of matching pci devices
19 */ 24 */
20struct pci_dev *pci_acc_init(struct pci_access **pacc, int vendor_id, 25struct pci_dev *pci_acc_init(struct pci_access **pacc, int domain, int bus,
21 int *dev_ids) 26 int slot, int func, int vendor, int dev)
22{ 27{
23 struct pci_filter filter_nb_link = { -1, -1, -1, -1, vendor_id, 0}; 28 struct pci_filter filter_nb_link = { domain, bus, slot, func,
29 vendor, dev };
24 struct pci_dev *device; 30 struct pci_dev *device;
25 unsigned int i;
26 31
27 *pacc = pci_alloc(); 32 *pacc = pci_alloc();
28 if (*pacc == NULL) 33 if (*pacc == NULL)
@@ -31,14 +36,20 @@ struct pci_dev *pci_acc_init(struct pci_access **pacc, int vendor_id,
31 pci_init(*pacc); 36 pci_init(*pacc);
32 pci_scan_bus(*pacc); 37 pci_scan_bus(*pacc);
33 38
34 for (i = 0; dev_ids[i] != 0; i++) { 39 for (device = (*pacc)->devices; device; device = device->next) {
35 filter_nb_link.device = dev_ids[i]; 40 if (pci_filter_match(&filter_nb_link, device))
36 for (device = (*pacc)->devices; device; device = device->next) { 41 return device;
37 if (pci_filter_match(&filter_nb_link, device))
38 return device;
39 }
40 } 42 }
41 pci_cleanup(*pacc); 43 pci_cleanup(*pacc);
42 return NULL; 44 return NULL;
43} 45}
46
47/* Typically one wants to get a specific slot(device)/func of the root domain
48 and bus */
49struct pci_dev *pci_slot_func_init(struct pci_access **pacc, int slot,
50 int func)
51{
52 return pci_acc_init(pacc, 0, 0, slot, func, -1, -1);
53}
54
44#endif /* defined(__i386__) || defined(__x86_64__) */ 55#endif /* defined(__i386__) || defined(__x86_64__) */
diff --git a/tools/power/cpupower/utils/idle_monitor/amd_fam14h_idle.c b/tools/power/cpupower/utils/idle_monitor/amd_fam14h_idle.c
index 202e555988be..2116df9ad832 100644
--- a/tools/power/cpupower/utils/idle_monitor/amd_fam14h_idle.c
+++ b/tools/power/cpupower/utils/idle_monitor/amd_fam14h_idle.c
@@ -20,8 +20,6 @@
20#include "idle_monitor/cpupower-monitor.h" 20#include "idle_monitor/cpupower-monitor.h"
21#include "helpers/helpers.h" 21#include "helpers/helpers.h"
22 22
23/******** PCI parts could go into own file and get shared ***************/
24
25#define PCI_NON_PC0_OFFSET 0xb0 23#define PCI_NON_PC0_OFFSET 0xb0
26#define PCI_PC1_OFFSET 0xb4 24#define PCI_PC1_OFFSET 0xb4
27#define PCI_PC6_OFFSET 0xb8 25#define PCI_PC6_OFFSET 0xb8
@@ -82,10 +80,7 @@ static cstate_t amd_fam14h_cstates[AMD_FAM14H_STATE_NUM] = {
82}; 80};
83 81
84static struct pci_access *pci_acc; 82static struct pci_access *pci_acc;
85static int pci_vendor_id = 0x1022;
86static int pci_dev_ids[2] = {0x1716, 0};
87static struct pci_dev *amd_fam14h_pci_dev; 83static struct pci_dev *amd_fam14h_pci_dev;
88
89static int nbp1_entered; 84static int nbp1_entered;
90 85
91struct timespec start_time; 86struct timespec start_time;
@@ -286,13 +281,13 @@ struct cpuidle_monitor *amd_fam14h_register(void)
286 if (cpupower_cpu_info.vendor != X86_VENDOR_AMD) 281 if (cpupower_cpu_info.vendor != X86_VENDOR_AMD)
287 return NULL; 282 return NULL;
288 283
289 if (cpupower_cpu_info.family == 0x14) { 284 if (cpupower_cpu_info.family == 0x14)
290 if (cpu_count <= 0 || cpu_count > 2) { 285 strncpy(amd_fam14h_monitor.name, "Fam_14h",
291 fprintf(stderr, "AMD fam14h: Invalid cpu count: %d\n", 286 MONITOR_NAME_LEN - 1);
292 cpu_count); 287 else if (cpupower_cpu_info.family == 0x12)
293 return NULL; 288 strncpy(amd_fam14h_monitor.name, "Fam_12h",
294 } 289 MONITOR_NAME_LEN - 1);
295 } else 290 else
296 return NULL; 291 return NULL;
297 292
298 /* We do not alloc for nbp1 machine wide counter */ 293 /* We do not alloc for nbp1 machine wide counter */
@@ -303,7 +298,9 @@ struct cpuidle_monitor *amd_fam14h_register(void)
303 sizeof(unsigned long long)); 298 sizeof(unsigned long long));
304 } 299 }
305 300
306 amd_fam14h_pci_dev = pci_acc_init(&pci_acc, pci_vendor_id, pci_dev_ids); 301 /* We need PCI device: Slot 18, Func 6, compare with BKDG
302 for fam 12h/14h */
303 amd_fam14h_pci_dev = pci_slot_func_init(&pci_acc, 0x18, 6);
307 if (amd_fam14h_pci_dev == NULL || pci_acc == NULL) 304 if (amd_fam14h_pci_dev == NULL || pci_acc == NULL)
308 return NULL; 305 return NULL;
309 306
@@ -325,7 +322,7 @@ static void amd_fam14h_unregister(void)
325} 322}
326 323
327struct cpuidle_monitor amd_fam14h_monitor = { 324struct cpuidle_monitor amd_fam14h_monitor = {
328 .name = "Ontario", 325 .name = "",
329 .hw_states = amd_fam14h_cstates, 326 .hw_states = amd_fam14h_cstates,
330 .hw_states_num = AMD_FAM14H_STATE_NUM, 327 .hw_states_num = AMD_FAM14H_STATE_NUM,
331 .start = amd_fam14h_start, 328 .start = amd_fam14h_start,
diff --git a/tools/power/x86/turbostat/turbostat.8 b/tools/power/x86/turbostat/turbostat.8
index 555c69a5592a..adf175f61496 100644
--- a/tools/power/x86/turbostat/turbostat.8
+++ b/tools/power/x86/turbostat/turbostat.8
@@ -4,11 +4,13 @@ turbostat \- Report processor frequency and idle statistics
4.SH SYNOPSIS 4.SH SYNOPSIS
5.ft B 5.ft B
6.B turbostat 6.B turbostat
7.RB [ "\-s" ]
7.RB [ "\-v" ] 8.RB [ "\-v" ]
8.RB [ "\-M MSR#" ] 9.RB [ "\-M MSR#" ]
9.RB command 10.RB command
10.br 11.br
11.B turbostat 12.B turbostat
13.RB [ "\-s" ]
12.RB [ "\-v" ] 14.RB [ "\-v" ]
13.RB [ "\-M MSR#" ] 15.RB [ "\-M MSR#" ]
14.RB [ "\-i interval_sec" ] 16.RB [ "\-i interval_sec" ]
@@ -25,6 +27,8 @@ supports an "invariant" TSC, plus the APERF and MPERF MSRs.
25on processors that additionally support C-state residency counters. 27on processors that additionally support C-state residency counters.
26 28
27.SS Options 29.SS Options
30The \fB-s\fP option prints only a 1-line summary for each sample interval.
31.PP
28The \fB-v\fP option increases verbosity. 32The \fB-v\fP option increases verbosity.
29.PP 33.PP
30The \fB-M MSR#\fP option dumps the specified MSR, 34The \fB-M MSR#\fP option dumps the specified MSR,
@@ -39,13 +43,14 @@ displays the statistics gathered since it was forked.
39.SH FIELD DESCRIPTIONS 43.SH FIELD DESCRIPTIONS
40.nf 44.nf
41\fBpk\fP processor package number. 45\fBpk\fP processor package number.
42\fBcr\fP processor core number. 46\fBcor\fP processor core number.
43\fBCPU\fP Linux CPU (logical processor) number. 47\fBCPU\fP Linux CPU (logical processor) number.
48Note that multiple CPUs per core indicate support for Intel(R) Hyper-Threading Technology.
44\fB%c0\fP percent of the interval that the CPU retired instructions. 49\fB%c0\fP percent of the interval that the CPU retired instructions.
45\fBGHz\fP average clock rate while the CPU was in c0 state. 50\fBGHz\fP average clock rate while the CPU was in c0 state.
46\fBTSC\fP average GHz that the TSC ran during the entire interval. 51\fBTSC\fP average GHz that the TSC ran during the entire interval.
47\fB%c1, %c3, %c6\fP show the percentage residency in hardware core idle states. 52\fB%c1, %c3, %c6, %c7\fP show the percentage residency in hardware core idle states.
48\fB%pc3, %pc6\fP percentage residency in hardware package idle states. 53\fB%pc2, %pc3, %pc6, %pc7\fP percentage residency in hardware package idle states.
49.fi 54.fi
50.PP 55.PP
51.SH EXAMPLE 56.SH EXAMPLE
@@ -53,25 +58,37 @@ Without any parameters, turbostat prints out counters ever 5 seconds.
53(override interval with "-i sec" option, or specify a command 58(override interval with "-i sec" option, or specify a command
54for turbostat to fork). 59for turbostat to fork).
55 60
56The first row of statistics reflect the average for the entire system. 61The first row of statistics is a summary for the entire system.
62Note that the summary is a weighted average.
57Subsequent rows show per-CPU statistics. 63Subsequent rows show per-CPU statistics.
58 64
59.nf 65.nf
60[root@x980]# ./turbostat 66[root@x980]# ./turbostat
61cr CPU %c0 GHz TSC %c1 %c3 %c6 %pc3 %pc6 67cor CPU %c0 GHz TSC %c1 %c3 %c6 %pc3 %pc6
62 0.04 1.62 3.38 0.11 0.00 99.85 0.00 95.07 68 0.60 1.63 3.38 2.91 0.00 96.49 0.00 76.64
63 0 0 0.04 1.62 3.38 0.06 0.00 99.90 0.00 95.07 69 0 0 0.59 1.62 3.38 4.51 0.00 94.90 0.00 76.64
64 0 6 0.02 1.62 3.38 0.08 0.00 99.90 0.00 95.07 70 0 6 1.13 1.64 3.38 3.97 0.00 94.90 0.00 76.64
65 1 2 0.10 1.62 3.38 0.29 0.00 99.61 0.00 95.07 71 1 2 0.08 1.62 3.38 0.07 0.00 99.85 0.00 76.64
66 1 8 0.11 1.62 3.38 0.28 0.00 99.61 0.00 95.07 72 1 8 0.03 1.62 3.38 0.12 0.00 99.85 0.00 76.64
67 2 4 0.01 1.62 3.38 0.01 0.00 99.98 0.00 95.07 73 2 4 0.01 1.62 3.38 0.06 0.00 99.93 0.00 76.64
68 2 10 0.01 1.61 3.38 0.02 0.00 99.98 0.00 95.07 74 2 10 0.04 1.62 3.38 0.02 0.00 99.93 0.00 76.64
69 8 1 0.07 1.62 3.38 0.15 0.00 99.78 0.00 95.07 75 8 1 2.85 1.62 3.38 11.71 0.00 85.44 0.00 76.64
70 8 7 0.03 1.62 3.38 0.19 0.00 99.78 0.00 95.07 76 8 7 1.98 1.62 3.38 12.58 0.00 85.44 0.00 76.64
71 9 3 0.01 1.62 3.38 0.02 0.00 99.98 0.00 95.07 77 9 3 0.36 1.62 3.38 0.71 0.00 98.93 0.00 76.64
72 9 9 0.01 1.62 3.38 0.02 0.00 99.98 0.00 95.07 78 9 9 0.09 1.62 3.38 0.98 0.00 98.93 0.00 76.64
73 10 5 0.01 1.62 3.38 0.13 0.00 99.86 0.00 95.07 79 10 5 0.03 1.62 3.38 0.09 0.00 99.87 0.00 76.64
74 10 11 0.08 1.62 3.38 0.05 0.00 99.86 0.00 95.07 80 10 11 0.07 1.62 3.38 0.06 0.00 99.87 0.00 76.64
81.fi
82.SH SUMMARY EXAMPLE
83The "-s" option prints the column headers just once,
84and then the one line system summary for each sample interval.
85
86.nf
87[root@x980]# ./turbostat -s
88 %c0 GHz TSC %c1 %c3 %c6 %pc3 %pc6
89 0.61 1.89 3.38 5.95 0.00 93.44 0.00 66.33
90 0.52 1.62 3.38 6.83 0.00 92.65 0.00 61.11
91 0.62 1.92 3.38 5.47 0.00 93.91 0.00 67.31
75.fi 92.fi
76.SH VERBOSE EXAMPLE 93.SH VERBOSE EXAMPLE
77The "-v" option adds verbosity to the output: 94The "-v" option adds verbosity to the output:
@@ -101,33 +118,33 @@ until ^C while the other CPUs are mostly idle:
101 118
102.nf 119.nf
103[root@x980 lenb]# ./turbostat cat /dev/zero > /dev/null 120[root@x980 lenb]# ./turbostat cat /dev/zero > /dev/null
104 121^C
105^Ccr CPU %c0 GHz TSC %c1 %c3 %c6 %pc3 %pc6 122cor CPU %c0 GHz TSC %c1 %c3 %c6 %pc3 %pc6
106 8.49 3.63 3.38 16.23 0.66 74.63 0.00 0.00 123 8.63 3.64 3.38 14.46 0.49 76.42 0.00 0.00
107 0 0 1.22 3.62 3.38 32.18 0.00 66.60 0.00 0.00 124 0 0 0.34 3.36 3.38 99.66 0.00 0.00 0.00 0.00
108 0 6 0.40 3.61 3.38 33.00 0.00 66.60 0.00 0.00 125 0 6 99.96 3.64 3.38 0.04 0.00 0.00 0.00 0.00
109 1 2 0.11 3.14 3.38 0.19 3.95 95.75 0.00 0.00 126 1 2 0.14 3.50 3.38 1.75 2.04 96.07 0.00 0.00
110 1 8 0.05 2.88 3.38 0.25 3.95 95.75 0.00 0.00 127 1 8 0.38 3.57 3.38 1.51 2.04 96.07 0.00 0.00
111 2 4 0.00 3.13 3.38 0.02 0.00 99.98 0.00 0.00 128 2 4 0.01 2.65 3.38 0.06 0.00 99.93 0.00 0.00
112 2 10 0.00 3.09 3.38 0.02 0.00 99.98 0.00 0.00 129 2 10 0.03 2.12 3.38 0.04 0.00 99.93 0.00 0.00
113 8 1 0.04 3.50 3.38 14.43 0.00 85.54 0.00 0.00 130 8 1 0.91 3.59 3.38 35.27 0.92 62.90 0.00 0.00
114 8 7 0.03 2.98 3.38 14.43 0.00 85.54 0.00 0.00 131 8 7 1.61 3.63 3.38 34.57 0.92 62.90 0.00 0.00
115 9 3 0.00 3.16 3.38 100.00 0.00 0.00 0.00 0.00 132 9 3 0.04 3.38 3.38 0.20 0.00 99.76 0.00 0.00
116 9 9 99.93 3.63 3.38 0.06 0.00 0.00 0.00 0.00 133 9 9 0.04 3.29 3.38 0.20 0.00 99.76 0.00 0.00
117 10 5 0.01 2.82 3.38 0.08 0.00 99.91 0.00 0.00 134 10 5 0.03 3.08 3.38 0.12 0.00 99.85 0.00 0.00
118 10 11 0.02 3.36 3.38 0.06 0.00 99.91 0.00 0.00 135 10 11 0.05 3.07 3.38 0.10 0.00 99.85 0.00 0.00
1196.950866 sec 1364.907015 sec
120 137
121.fi 138.fi
122Above the cycle soaker drives cpu9 up 3.6 Ghz turbo limit 139Above the cycle soaker drives cpu6 up 3.6 Ghz turbo limit
123while the other processors are generally in various states of idle. 140while the other processors are generally in various states of idle.
124 141
125Note that cpu3 is an HT sibling sharing core9 142Note that cpu0 is an HT sibling sharing core0
126with cpu9, and thus it is unable to get to an idle state 143with cpu6, and thus it is unable to get to an idle state
127deeper than c1 while cpu9 is busy. 144deeper than c1 while cpu6 is busy.
128 145
129Note that turbostat reports average GHz of 3.61, while 146Note that turbostat reports average GHz of 3.64, while
130the arithmetic average of the GHz column above is 3.24. 147the arithmetic average of the GHz column above is lower.
131This is a weighted average, where the weight is %c0. ie. it is the total number of 148This is a weighted average, where the weight is %c0. ie. it is the total number of
132un-halted cycles elapsed per time divided by the number of CPUs. 149un-halted cycles elapsed per time divided by the number of CPUs.
133.SH NOTES 150.SH NOTES
@@ -167,6 +184,6 @@ http://www.intel.com/products/processor/manuals/
167.SH "SEE ALSO" 184.SH "SEE ALSO"
168msr(4), vmstat(8) 185msr(4), vmstat(8)
169.PP 186.PP
170.SH AUTHORS 187.SH AUTHOR
171.nf 188.nf
172Written by Len Brown <len.brown@intel.com> 189Written by Len Brown <len.brown@intel.com>
diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c
index 310d3dd5e547..ab2f682fd44c 100644
--- a/tools/power/x86/turbostat/turbostat.c
+++ b/tools/power/x86/turbostat/turbostat.c
@@ -2,7 +2,7 @@
2 * turbostat -- show CPU frequency and C-state residency 2 * turbostat -- show CPU frequency and C-state residency
3 * on modern Intel turbo-capable processors. 3 * on modern Intel turbo-capable processors.
4 * 4 *
5 * Copyright (c) 2010, Intel Corporation. 5 * Copyright (c) 2012 Intel Corporation.
6 * Len Brown <len.brown@intel.com> 6 * Len Brown <len.brown@intel.com>
7 * 7 *
8 * This program is free software; you can redistribute it and/or modify it 8 * This program is free software; you can redistribute it and/or modify it
@@ -19,6 +19,7 @@
19 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 19 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
20 */ 20 */
21 21
22#define _GNU_SOURCE
22#include <stdio.h> 23#include <stdio.h>
23#include <unistd.h> 24#include <unistd.h>
24#include <sys/types.h> 25#include <sys/types.h>
@@ -32,6 +33,7 @@
32#include <dirent.h> 33#include <dirent.h>
33#include <string.h> 34#include <string.h>
34#include <ctype.h> 35#include <ctype.h>
36#include <sched.h>
35 37
36#define MSR_TSC 0x10 38#define MSR_TSC 0x10
37#define MSR_NEHALEM_PLATFORM_INFO 0xCE 39#define MSR_NEHALEM_PLATFORM_INFO 0xCE
@@ -49,6 +51,7 @@
49char *proc_stat = "/proc/stat"; 51char *proc_stat = "/proc/stat";
50unsigned int interval_sec = 5; /* set with -i interval_sec */ 52unsigned int interval_sec = 5; /* set with -i interval_sec */
51unsigned int verbose; /* set with -v */ 53unsigned int verbose; /* set with -v */
54unsigned int summary_only; /* set with -s */
52unsigned int skip_c0; 55unsigned int skip_c0;
53unsigned int skip_c1; 56unsigned int skip_c1;
54unsigned int do_nhm_cstates; 57unsigned int do_nhm_cstates;
@@ -68,9 +71,10 @@ unsigned int show_cpu;
68int aperf_mperf_unstable; 71int aperf_mperf_unstable;
69int backwards_count; 72int backwards_count;
70char *progname; 73char *progname;
71int need_reinitialize;
72 74
73int num_cpus; 75int num_cpus;
76cpu_set_t *cpu_mask;
77size_t cpu_mask_size;
74 78
75struct counters { 79struct counters {
76 unsigned long long tsc; /* per thread */ 80 unsigned long long tsc; /* per thread */
@@ -99,44 +103,76 @@ struct timeval tv_even;
99struct timeval tv_odd; 103struct timeval tv_odd;
100struct timeval tv_delta; 104struct timeval tv_delta;
101 105
102unsigned long long get_msr(int cpu, off_t offset) 106/*
107 * cpu_mask_init(ncpus)
108 *
109 * allocate and clear cpu_mask
110 * set cpu_mask_size
111 */
112void cpu_mask_init(int ncpus)
113{
114 cpu_mask = CPU_ALLOC(ncpus);
115 if (cpu_mask == NULL) {
116 perror("CPU_ALLOC");
117 exit(3);
118 }
119 cpu_mask_size = CPU_ALLOC_SIZE(ncpus);
120 CPU_ZERO_S(cpu_mask_size, cpu_mask);
121}
122
123void cpu_mask_uninit()
124{
125 CPU_FREE(cpu_mask);
126 cpu_mask = NULL;
127 cpu_mask_size = 0;
128}
129
130int cpu_migrate(int cpu)
131{
132 CPU_ZERO_S(cpu_mask_size, cpu_mask);
133 CPU_SET_S(cpu, cpu_mask_size, cpu_mask);
134 if (sched_setaffinity(0, cpu_mask_size, cpu_mask) == -1)
135 return -1;
136 else
137 return 0;
138}
139
140int get_msr(int cpu, off_t offset, unsigned long long *msr)
103{ 141{
104 ssize_t retval; 142 ssize_t retval;
105 unsigned long long msr;
106 char pathname[32]; 143 char pathname[32];
107 int fd; 144 int fd;
108 145
109 sprintf(pathname, "/dev/cpu/%d/msr", cpu); 146 sprintf(pathname, "/dev/cpu/%d/msr", cpu);
110 fd = open(pathname, O_RDONLY); 147 fd = open(pathname, O_RDONLY);
111 if (fd < 0) { 148 if (fd < 0)
112 perror(pathname); 149 return -1;
113 need_reinitialize = 1;
114 return 0;
115 }
116
117 retval = pread(fd, &msr, sizeof msr, offset);
118 if (retval != sizeof msr) {
119 fprintf(stderr, "cpu%d pread(..., 0x%zx) = %jd\n",
120 cpu, offset, retval);
121 exit(-2);
122 }
123 150
151 retval = pread(fd, msr, sizeof *msr, offset);
124 close(fd); 152 close(fd);
125 return msr; 153
154 if (retval != sizeof *msr)
155 return -1;
156
157 return 0;
126} 158}
127 159
128void print_header(void) 160void print_header(void)
129{ 161{
130 if (show_pkg) 162 if (show_pkg)
131 fprintf(stderr, "pk"); 163 fprintf(stderr, "pk");
164 if (show_pkg)
165 fprintf(stderr, " ");
132 if (show_core) 166 if (show_core)
133 fprintf(stderr, " cr"); 167 fprintf(stderr, "cor");
134 if (show_cpu) 168 if (show_cpu)
135 fprintf(stderr, " CPU"); 169 fprintf(stderr, " CPU");
170 if (show_pkg || show_core || show_cpu)
171 fprintf(stderr, " ");
136 if (do_nhm_cstates) 172 if (do_nhm_cstates)
137 fprintf(stderr, " %%c0 "); 173 fprintf(stderr, " %%c0");
138 if (has_aperf) 174 if (has_aperf)
139 fprintf(stderr, " GHz"); 175 fprintf(stderr, " GHz");
140 fprintf(stderr, " TSC"); 176 fprintf(stderr, " TSC");
141 if (do_nhm_cstates) 177 if (do_nhm_cstates)
142 fprintf(stderr, " %%c1"); 178 fprintf(stderr, " %%c1");
@@ -147,13 +183,13 @@ void print_header(void)
147 if (do_snb_cstates) 183 if (do_snb_cstates)
148 fprintf(stderr, " %%c7"); 184 fprintf(stderr, " %%c7");
149 if (do_snb_cstates) 185 if (do_snb_cstates)
150 fprintf(stderr, " %%pc2"); 186 fprintf(stderr, " %%pc2");
151 if (do_nhm_cstates) 187 if (do_nhm_cstates)
152 fprintf(stderr, " %%pc3"); 188 fprintf(stderr, " %%pc3");
153 if (do_nhm_cstates) 189 if (do_nhm_cstates)
154 fprintf(stderr, " %%pc6"); 190 fprintf(stderr, " %%pc6");
155 if (do_snb_cstates) 191 if (do_snb_cstates)
156 fprintf(stderr, " %%pc7"); 192 fprintf(stderr, " %%pc7");
157 if (extra_msr_offset) 193 if (extra_msr_offset)
158 fprintf(stderr, " MSR 0x%x ", extra_msr_offset); 194 fprintf(stderr, " MSR 0x%x ", extra_msr_offset);
159 195
@@ -187,6 +223,15 @@ void dump_list(struct counters *cnt)
187 dump_cnt(cnt); 223 dump_cnt(cnt);
188} 224}
189 225
226/*
227 * column formatting convention & formats
228 * package: "pk" 2 columns %2d
229 * core: "cor" 3 columns %3d
230 * CPU: "CPU" 3 columns %3d
231 * GHz: "GHz" 3 columns %3.2
232 * TSC: "TSC" 3 columns %3.2
233 * percentage " %pc3" %6.2
234 */
190void print_cnt(struct counters *p) 235void print_cnt(struct counters *p)
191{ 236{
192 double interval_float; 237 double interval_float;
@@ -196,39 +241,45 @@ void print_cnt(struct counters *p)
196 /* topology columns, print blanks on 1st (average) line */ 241 /* topology columns, print blanks on 1st (average) line */
197 if (p == cnt_average) { 242 if (p == cnt_average) {
198 if (show_pkg) 243 if (show_pkg)
244 fprintf(stderr, " ");
245 if (show_pkg && show_core)
199 fprintf(stderr, " "); 246 fprintf(stderr, " ");
200 if (show_core) 247 if (show_core)
201 fprintf(stderr, " "); 248 fprintf(stderr, " ");
202 if (show_cpu) 249 if (show_cpu)
203 fprintf(stderr, " "); 250 fprintf(stderr, " " " ");
204 } else { 251 } else {
205 if (show_pkg) 252 if (show_pkg)
206 fprintf(stderr, "%d", p->pkg); 253 fprintf(stderr, "%2d", p->pkg);
254 if (show_pkg && show_core)
255 fprintf(stderr, " ");
207 if (show_core) 256 if (show_core)
208 fprintf(stderr, "%4d", p->core); 257 fprintf(stderr, "%3d", p->core);
209 if (show_cpu) 258 if (show_cpu)
210 fprintf(stderr, "%4d", p->cpu); 259 fprintf(stderr, " %3d", p->cpu);
211 } 260 }
212 261
213 /* %c0 */ 262 /* %c0 */
214 if (do_nhm_cstates) { 263 if (do_nhm_cstates) {
264 if (show_pkg || show_core || show_cpu)
265 fprintf(stderr, " ");
215 if (!skip_c0) 266 if (!skip_c0)
216 fprintf(stderr, "%7.2f", 100.0 * p->mperf/p->tsc); 267 fprintf(stderr, "%6.2f", 100.0 * p->mperf/p->tsc);
217 else 268 else
218 fprintf(stderr, " ****"); 269 fprintf(stderr, " ****");
219 } 270 }
220 271
221 /* GHz */ 272 /* GHz */
222 if (has_aperf) { 273 if (has_aperf) {
223 if (!aperf_mperf_unstable) { 274 if (!aperf_mperf_unstable) {
224 fprintf(stderr, "%5.2f", 275 fprintf(stderr, " %3.2f",
225 1.0 * p->tsc / units * p->aperf / 276 1.0 * p->tsc / units * p->aperf /
226 p->mperf / interval_float); 277 p->mperf / interval_float);
227 } else { 278 } else {
228 if (p->aperf > p->tsc || p->mperf > p->tsc) { 279 if (p->aperf > p->tsc || p->mperf > p->tsc) {
229 fprintf(stderr, " ****"); 280 fprintf(stderr, " ***");
230 } else { 281 } else {
231 fprintf(stderr, "%4.1f*", 282 fprintf(stderr, "%3.1f*",
232 1.0 * p->tsc / 283 1.0 * p->tsc /
233 units * p->aperf / 284 units * p->aperf /
234 p->mperf / interval_float); 285 p->mperf / interval_float);
@@ -241,7 +292,7 @@ void print_cnt(struct counters *p)
241 292
242 if (do_nhm_cstates) { 293 if (do_nhm_cstates) {
243 if (!skip_c1) 294 if (!skip_c1)
244 fprintf(stderr, "%7.2f", 100.0 * p->c1/p->tsc); 295 fprintf(stderr, " %6.2f", 100.0 * p->c1/p->tsc);
245 else 296 else
246 fprintf(stderr, " ****"); 297 fprintf(stderr, " ****");
247 } 298 }
@@ -252,13 +303,13 @@ void print_cnt(struct counters *p)
252 if (do_snb_cstates) 303 if (do_snb_cstates)
253 fprintf(stderr, " %6.2f", 100.0 * p->c7/p->tsc); 304 fprintf(stderr, " %6.2f", 100.0 * p->c7/p->tsc);
254 if (do_snb_cstates) 305 if (do_snb_cstates)
255 fprintf(stderr, " %5.2f", 100.0 * p->pc2/p->tsc); 306 fprintf(stderr, " %6.2f", 100.0 * p->pc2/p->tsc);
256 if (do_nhm_cstates) 307 if (do_nhm_cstates)
257 fprintf(stderr, " %5.2f", 100.0 * p->pc3/p->tsc); 308 fprintf(stderr, " %6.2f", 100.0 * p->pc3/p->tsc);
258 if (do_nhm_cstates) 309 if (do_nhm_cstates)
259 fprintf(stderr, " %5.2f", 100.0 * p->pc6/p->tsc); 310 fprintf(stderr, " %6.2f", 100.0 * p->pc6/p->tsc);
260 if (do_snb_cstates) 311 if (do_snb_cstates)
261 fprintf(stderr, " %5.2f", 100.0 * p->pc7/p->tsc); 312 fprintf(stderr, " %6.2f", 100.0 * p->pc7/p->tsc);
262 if (extra_msr_offset) 313 if (extra_msr_offset)
263 fprintf(stderr, " 0x%016llx", p->extra_msr); 314 fprintf(stderr, " 0x%016llx", p->extra_msr);
264 putc('\n', stderr); 315 putc('\n', stderr);
@@ -267,12 +318,20 @@ void print_cnt(struct counters *p)
267void print_counters(struct counters *counters) 318void print_counters(struct counters *counters)
268{ 319{
269 struct counters *cnt; 320 struct counters *cnt;
321 static int printed;
322
270 323
271 print_header(); 324 if (!printed || !summary_only)
325 print_header();
272 326
273 if (num_cpus > 1) 327 if (num_cpus > 1)
274 print_cnt(cnt_average); 328 print_cnt(cnt_average);
275 329
330 printed = 1;
331
332 if (summary_only)
333 return;
334
276 for (cnt = counters; cnt != NULL; cnt = cnt->next) 335 for (cnt = counters; cnt != NULL; cnt = cnt->next)
277 print_cnt(cnt); 336 print_cnt(cnt);
278 337
@@ -440,31 +499,51 @@ void compute_average(struct counters *delta, struct counters *avg)
440 free(sum); 499 free(sum);
441} 500}
442 501
443void get_counters(struct counters *cnt) 502int get_counters(struct counters *cnt)
444{ 503{
445 for ( ; cnt; cnt = cnt->next) { 504 for ( ; cnt; cnt = cnt->next) {
446 cnt->tsc = get_msr(cnt->cpu, MSR_TSC); 505
447 if (do_nhm_cstates) 506 if (cpu_migrate(cnt->cpu))
448 cnt->c3 = get_msr(cnt->cpu, MSR_CORE_C3_RESIDENCY); 507 return -1;
449 if (do_nhm_cstates) 508
450 cnt->c6 = get_msr(cnt->cpu, MSR_CORE_C6_RESIDENCY); 509 if (get_msr(cnt->cpu, MSR_TSC, &cnt->tsc))
451 if (do_snb_cstates) 510 return -1;
452 cnt->c7 = get_msr(cnt->cpu, MSR_CORE_C7_RESIDENCY); 511
453 if (has_aperf) 512 if (has_aperf) {
454 cnt->aperf = get_msr(cnt->cpu, MSR_APERF); 513 if (get_msr(cnt->cpu, MSR_APERF, &cnt->aperf))
455 if (has_aperf) 514 return -1;
456 cnt->mperf = get_msr(cnt->cpu, MSR_MPERF); 515 if (get_msr(cnt->cpu, MSR_MPERF, &cnt->mperf))
457 if (do_snb_cstates) 516 return -1;
458 cnt->pc2 = get_msr(cnt->cpu, MSR_PKG_C2_RESIDENCY); 517 }
459 if (do_nhm_cstates) 518
460 cnt->pc3 = get_msr(cnt->cpu, MSR_PKG_C3_RESIDENCY); 519 if (do_nhm_cstates) {
461 if (do_nhm_cstates) 520 if (get_msr(cnt->cpu, MSR_CORE_C3_RESIDENCY, &cnt->c3))
462 cnt->pc6 = get_msr(cnt->cpu, MSR_PKG_C6_RESIDENCY); 521 return -1;
522 if (get_msr(cnt->cpu, MSR_CORE_C6_RESIDENCY, &cnt->c6))
523 return -1;
524 }
525
463 if (do_snb_cstates) 526 if (do_snb_cstates)
464 cnt->pc7 = get_msr(cnt->cpu, MSR_PKG_C7_RESIDENCY); 527 if (get_msr(cnt->cpu, MSR_CORE_C7_RESIDENCY, &cnt->c7))
528 return -1;
529
530 if (do_nhm_cstates) {
531 if (get_msr(cnt->cpu, MSR_PKG_C3_RESIDENCY, &cnt->pc3))
532 return -1;
533 if (get_msr(cnt->cpu, MSR_PKG_C6_RESIDENCY, &cnt->pc6))
534 return -1;
535 }
536 if (do_snb_cstates) {
537 if (get_msr(cnt->cpu, MSR_PKG_C2_RESIDENCY, &cnt->pc2))
538 return -1;
539 if (get_msr(cnt->cpu, MSR_PKG_C7_RESIDENCY, &cnt->pc7))
540 return -1;
541 }
465 if (extra_msr_offset) 542 if (extra_msr_offset)
466 cnt->extra_msr = get_msr(cnt->cpu, extra_msr_offset); 543 if (get_msr(cnt->cpu, extra_msr_offset, &cnt->extra_msr))
544 return -1;
467 } 545 }
546 return 0;
468} 547}
469 548
470void print_nehalem_info(void) 549void print_nehalem_info(void)
@@ -475,7 +554,7 @@ void print_nehalem_info(void)
475 if (!do_nehalem_platform_info) 554 if (!do_nehalem_platform_info)
476 return; 555 return;
477 556
478 msr = get_msr(0, MSR_NEHALEM_PLATFORM_INFO); 557 get_msr(0, MSR_NEHALEM_PLATFORM_INFO, &msr);
479 558
480 ratio = (msr >> 40) & 0xFF; 559 ratio = (msr >> 40) & 0xFF;
481 fprintf(stderr, "%d * %.0f = %.0f MHz max efficiency\n", 560 fprintf(stderr, "%d * %.0f = %.0f MHz max efficiency\n",
@@ -491,7 +570,7 @@ void print_nehalem_info(void)
491 if (!do_nehalem_turbo_ratio_limit) 570 if (!do_nehalem_turbo_ratio_limit)
492 return; 571 return;
493 572
494 msr = get_msr(0, MSR_NEHALEM_TURBO_RATIO_LIMIT); 573 get_msr(0, MSR_NEHALEM_TURBO_RATIO_LIMIT, &msr);
495 574
496 ratio = (msr >> 24) & 0xFF; 575 ratio = (msr >> 24) & 0xFF;
497 if (ratio) 576 if (ratio)
@@ -557,7 +636,8 @@ void insert_counters(struct counters **list,
557 return; 636 return;
558 } 637 }
559 638
560 show_cpu = 1; /* there is more than one CPU */ 639 if (!summary_only)
640 show_cpu = 1; /* there is more than one CPU */
561 641
562 /* 642 /*
563 * insert on front of list. 643 * insert on front of list.
@@ -575,13 +655,15 @@ void insert_counters(struct counters **list,
575 655
576 while (prev->next && (prev->next->pkg < new->pkg)) { 656 while (prev->next && (prev->next->pkg < new->pkg)) {
577 prev = prev->next; 657 prev = prev->next;
578 show_pkg = 1; /* there is more than 1 package */ 658 if (!summary_only)
659 show_pkg = 1; /* there is more than 1 package */
579 } 660 }
580 661
581 while (prev->next && (prev->next->pkg == new->pkg) 662 while (prev->next && (prev->next->pkg == new->pkg)
582 && (prev->next->core < new->core)) { 663 && (prev->next->core < new->core)) {
583 prev = prev->next; 664 prev = prev->next;
584 show_core = 1; /* there is more than 1 core */ 665 if (!summary_only)
666 show_core = 1; /* there is more than 1 core */
585 } 667 }
586 668
587 while (prev->next && (prev->next->pkg == new->pkg) 669 while (prev->next && (prev->next->pkg == new->pkg)
@@ -681,7 +763,7 @@ int get_core_id(int cpu)
681} 763}
682 764
683/* 765/*
684 * run func(index, cpu) on every cpu in /proc/stat 766 * run func(pkg, core, cpu) on every cpu in /proc/stat
685 */ 767 */
686 768
687int for_all_cpus(void (func)(int, int, int)) 769int for_all_cpus(void (func)(int, int, int))
@@ -717,18 +799,18 @@ int for_all_cpus(void (func)(int, int, int))
717 799
718void re_initialize(void) 800void re_initialize(void)
719{ 801{
720 printf("turbostat: topology changed, re-initializing.\n");
721 free_all_counters(); 802 free_all_counters();
722 num_cpus = for_all_cpus(alloc_new_counters); 803 num_cpus = for_all_cpus(alloc_new_counters);
723 need_reinitialize = 0; 804 cpu_mask_uninit();
724 printf("num_cpus is now %d\n", num_cpus); 805 cpu_mask_init(num_cpus);
806 printf("turbostat: re-initialized with num_cpus %d\n", num_cpus);
725} 807}
726 808
727void dummy(int pkg, int core, int cpu) { return; } 809void dummy(int pkg, int core, int cpu) { return; }
728/* 810/*
729 * check to see if a cpu came on-line 811 * check to see if a cpu came on-line
730 */ 812 */
731void verify_num_cpus(void) 813int verify_num_cpus(void)
732{ 814{
733 int new_num_cpus; 815 int new_num_cpus;
734 816
@@ -738,8 +820,9 @@ void verify_num_cpus(void)
738 if (verbose) 820 if (verbose)
739 printf("num_cpus was %d, is now %d\n", 821 printf("num_cpus was %d, is now %d\n",
740 num_cpus, new_num_cpus); 822 num_cpus, new_num_cpus);
741 need_reinitialize = 1; 823 return -1;
742 } 824 }
825 return 0;
743} 826}
744 827
745void turbostat_loop() 828void turbostat_loop()
@@ -749,25 +832,25 @@ restart:
749 gettimeofday(&tv_even, (struct timezone *)NULL); 832 gettimeofday(&tv_even, (struct timezone *)NULL);
750 833
751 while (1) { 834 while (1) {
752 verify_num_cpus(); 835 if (verify_num_cpus()) {
753 if (need_reinitialize) {
754 re_initialize(); 836 re_initialize();
755 goto restart; 837 goto restart;
756 } 838 }
757 sleep(interval_sec); 839 sleep(interval_sec);
758 get_counters(cnt_odd); 840 if (get_counters(cnt_odd)) {
841 re_initialize();
842 goto restart;
843 }
759 gettimeofday(&tv_odd, (struct timezone *)NULL); 844 gettimeofday(&tv_odd, (struct timezone *)NULL);
760
761 compute_delta(cnt_odd, cnt_even, cnt_delta); 845 compute_delta(cnt_odd, cnt_even, cnt_delta);
762 timersub(&tv_odd, &tv_even, &tv_delta); 846 timersub(&tv_odd, &tv_even, &tv_delta);
763 compute_average(cnt_delta, cnt_average); 847 compute_average(cnt_delta, cnt_average);
764 print_counters(cnt_delta); 848 print_counters(cnt_delta);
765 if (need_reinitialize) { 849 sleep(interval_sec);
850 if (get_counters(cnt_even)) {
766 re_initialize(); 851 re_initialize();
767 goto restart; 852 goto restart;
768 } 853 }
769 sleep(interval_sec);
770 get_counters(cnt_even);
771 gettimeofday(&tv_even, (struct timezone *)NULL); 854 gettimeofday(&tv_even, (struct timezone *)NULL);
772 compute_delta(cnt_even, cnt_odd, cnt_delta); 855 compute_delta(cnt_even, cnt_odd, cnt_delta);
773 timersub(&tv_even, &tv_odd, &tv_delta); 856 timersub(&tv_even, &tv_odd, &tv_delta);
@@ -953,6 +1036,7 @@ void turbostat_init()
953 check_super_user(); 1036 check_super_user();
954 1037
955 num_cpus = for_all_cpus(alloc_new_counters); 1038 num_cpus = for_all_cpus(alloc_new_counters);
1039 cpu_mask_init(num_cpus);
956 1040
957 if (verbose) 1041 if (verbose)
958 print_nehalem_info(); 1042 print_nehalem_info();
@@ -1005,8 +1089,11 @@ void cmdline(int argc, char **argv)
1005 1089
1006 progname = argv[0]; 1090 progname = argv[0];
1007 1091
1008 while ((opt = getopt(argc, argv, "+vi:M:")) != -1) { 1092 while ((opt = getopt(argc, argv, "+svi:M:")) != -1) {
1009 switch (opt) { 1093 switch (opt) {
1094 case 's':
1095 summary_only++;
1096 break;
1010 case 'v': 1097 case 'v':
1011 verbose++; 1098 verbose++;
1012 break; 1099 break;