aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorUwe Kleine-König <u.kleine-koenig@pengutronix.de>2010-06-30 06:16:24 -0400
committerUwe Kleine-König <u.kleine-koenig@pengutronix.de>2010-07-26 08:27:25 -0400
commit2dcf78c0eeae3bd07082821557014f25f02ca2e9 (patch)
tree8ca5c4c7f35c9a9ab07fcd9732124c905e609aa1
parent6b6322676add0fa2713d0ec89a28390fd4d907f5 (diff)
parent5109a4597f7e758b8d20694392d0361a0b4c43b1 (diff)
Merge branch 'imx/for-2.6.36' of git://git.pengutronix.de/git/ukl/linux-2.6 into HEAD
There are some more conflicts than detected by git, namely support for the newly added cpuimx machines needed to be converted to dynamic device registration. Signed-off-by: Uwe Kleine-König <u.kleine-koenig@pengutronix.de> Conflicts: arch/arm/mach-imx/Makefile arch/arm/mach-imx/devices.c arch/arm/mach-imx/devices.h arch/arm/mach-imx/eukrea_mbimx27-baseboard.c arch/arm/mach-mx2/Kconfig arch/arm/mach-mx25/Makefile arch/arm/mach-mx25/devices.c arch/arm/plat-mxc/include/mach/mx25.h arch/arm/plat-mxc/include/mach/mxc_nand.h
-rw-r--r--Documentation/ABI/testing/sysfs-firmware-sfi15
-rw-r--r--Documentation/DMA-API-HOWTO.txt85
-rw-r--r--Documentation/SubmittingDrivers5
-rw-r--r--Documentation/acpi/apei/einj.txt59
-rw-r--r--Documentation/arm/Samsung-S3C24XX/GPIO.txt81
-rw-r--r--Documentation/arm/Samsung-S3C24XX/Overview.txt15
-rw-r--r--Documentation/arm/Samsung/GPIO.txt42
-rw-r--r--Documentation/arm/Samsung/Overview.txt33
-rw-r--r--Documentation/cgroups/cgroups.txt2
-rw-r--r--Documentation/cgroups/memory.txt326
-rw-r--r--Documentation/feature-removal-schedule.txt10
-rw-r--r--Documentation/filesystems/Locking7
-rw-r--r--Documentation/filesystems/squashfs.txt32
-rw-r--r--Documentation/filesystems/vfs.txt9
-rw-r--r--Documentation/hwmon/dme173751
-rw-r--r--Documentation/hwmon/lm637
-rw-r--r--Documentation/hwmon/ltc42454
-rw-r--r--Documentation/hwmon/sysfs-interface13
-rw-r--r--Documentation/hwmon/tmp10226
-rw-r--r--Documentation/kernel-parameters.txt14
-rw-r--r--Documentation/vm/numa186
-rw-r--r--MAINTAINERS28
-rw-r--r--Makefile4
-rw-r--r--arch/alpha/Kconfig3
-rw-r--r--arch/alpha/include/asm/scatterlist.h19
-rw-r--r--arch/alpha/math-emu/sfp-util.h5
-rw-r--r--arch/arm/Makefile4
-rw-r--r--arch/arm/configs/s3c2410_defconfig99
-rw-r--r--arch/arm/configs/s3c6400_defconfig419
-rw-r--r--arch/arm/configs/s5p6440_defconfig29
-rw-r--r--arch/arm/configs/s5p6442_defconfig24
-rw-r--r--arch/arm/configs/s5pc100_defconfig233
-rw-r--r--arch/arm/configs/s5pc110_defconfig30
-rw-r--r--arch/arm/configs/s5pv210_defconfig33
-rw-r--r--arch/arm/include/asm/scatterlist.h3
-rw-r--r--arch/arm/mach-davinci/board-da850-evm.c19
-rw-r--r--arch/arm/mach-davinci/include/mach/mmc.h3
-rw-r--r--arch/arm/mach-imx/Kconfig (renamed from arch/arm/mach-mx2/Kconfig)93
-rw-r--r--arch/arm/mach-imx/Makefile (renamed from arch/arm/mach-mx2/Makefile)18
-rw-r--r--arch/arm/mach-imx/Makefile.boot (renamed from arch/arm/mach-mx2/Makefile.boot)4
-rw-r--r--arch/arm/mach-imx/clock-imx1.c (renamed from arch/arm/mach-mx1/clock.c)50
-rw-r--r--arch/arm/mach-imx/clock-imx21.c (renamed from arch/arm/mach-mx2/clock_imx21.c)0
-rw-r--r--arch/arm/mach-imx/clock-imx27.c (renamed from arch/arm/mach-mx2/clock_imx27.c)0
-rw-r--r--arch/arm/mach-imx/cpu-imx27.c (renamed from arch/arm/mach-mx2/cpu_imx27.c)0
-rw-r--r--arch/arm/mach-imx/devices-imx1.h18
-rw-r--r--arch/arm/mach-imx/devices-imx21.h30
-rw-r--r--arch/arm/mach-imx/devices-imx27.h38
-rw-r--r--arch/arm/mach-imx/devices.c (renamed from arch/arm/mach-mx2/devices.c)259
-rw-r--r--arch/arm/mach-imx/devices.h (renamed from arch/arm/mach-mx2/devices.h)30
-rw-r--r--arch/arm/mach-imx/dma-v1.c (renamed from arch/arm/plat-mxc/dma-mx1-mx2.c)4
-rw-r--r--arch/arm/mach-imx/eukrea_mbimx27-baseboard.c (renamed from arch/arm/mach-mx2/eukrea_mbimx27-baseboard.c)24
-rw-r--r--arch/arm/mach-imx/include/mach/dma-mx1-mx2.h10
-rw-r--r--arch/arm/mach-imx/include/mach/dma-v1.h (renamed from arch/arm/plat-mxc/include/mach/dma-mx1-mx2.h)10
-rw-r--r--arch/arm/mach-imx/mach-cpuimx27.c (renamed from arch/arm/mach-mx2/mach-cpuimx27.c)25
-rw-r--r--arch/arm/mach-imx/mach-imx27lite.c (renamed from arch/arm/mach-mx2/mach-imx27lite.c)11
-rw-r--r--arch/arm/mach-imx/mach-mx1ads.c (renamed from arch/arm/mach-mx1/mach-mx1ads.c)34
-rw-r--r--arch/arm/mach-imx/mach-mx21ads.c (renamed from arch/arm/mach-mx2/mach-mx21ads.c)58
-rw-r--r--arch/arm/mach-imx/mach-mx27_3ds.c (renamed from arch/arm/mach-mx2/mach-mx27_3ds.c)17
-rw-r--r--arch/arm/mach-imx/mach-mx27ads.c (renamed from arch/arm/mach-mx2/mach-mx27ads.c)76
-rw-r--r--arch/arm/mach-imx/mach-mxt_td60.c (renamed from arch/arm/mach-mx2/mach-mxt_td60.c)36
-rw-r--r--arch/arm/mach-imx/mach-pca100.c (renamed from arch/arm/mach-mx2/mach-pca100.c)23
-rw-r--r--arch/arm/mach-imx/mach-pcm038.c (renamed from arch/arm/mach-mx2/mach-pcm038.c)33
-rw-r--r--arch/arm/mach-imx/mach-scb9328.c (renamed from arch/arm/mach-mx1/mach-scb9328.c)21
-rw-r--r--arch/arm/mach-imx/mm-imx1.c (renamed from arch/arm/mach-mx1/generic.c)23
-rw-r--r--arch/arm/mach-imx/mm-imx21.c (renamed from arch/arm/mach-mx2/mm-imx21.c)5
-rw-r--r--arch/arm/mach-imx/mm-imx27.c (renamed from arch/arm/mach-mx2/mm-imx27.c)5
-rw-r--r--arch/arm/mach-imx/mx1-camera-fiq-ksym.c (renamed from arch/arm/mach-mx1/ksym_mx1.c)0
-rw-r--r--arch/arm/mach-imx/mx1-camera-fiq.S (renamed from arch/arm/mach-mx1/mx1_camera_fiq.S)0
-rw-r--r--arch/arm/mach-imx/pcm970-baseboard.c (renamed from arch/arm/mach-mx2/pcm970-baseboard.c)0
-rw-r--r--arch/arm/mach-imx/pm-imx27.c (renamed from arch/arm/mach-mx2/pm-imx27.c)0
-rw-r--r--arch/arm/mach-mx1/Kconfig19
-rw-r--r--arch/arm/mach-mx1/Makefile15
-rw-r--r--arch/arm/mach-mx1/Makefile.boot4
-rw-r--r--arch/arm/mach-mx1/crm_regs.h55
-rw-r--r--arch/arm/mach-mx1/devices.c242
-rw-r--r--arch/arm/mach-mx1/devices.h7
-rw-r--r--arch/arm/mach-mx2/serial.c141
-rw-r--r--arch/arm/mach-mx25/Kconfig5
-rw-r--r--arch/arm/mach-mx25/Makefile2
-rw-r--r--arch/arm/mach-mx25/devices-imx25.h38
-rw-r--r--arch/arm/mach-mx25/devices.c231
-rw-r--r--arch/arm/mach-mx25/devices.h12
-rw-r--r--arch/arm/mach-mx25/eukrea_mbimxsd-baseboard.c5
-rw-r--r--arch/arm/mach-mx25/mach-cpuimx25.c19
-rw-r--r--arch/arm/mach-mx25/mach-mx25_3ds.c (renamed from arch/arm/mach-mx25/mach-mx25pdk.c)21
-rw-r--r--arch/arm/mach-mx25/mm.c7
-rw-r--r--arch/arm/mach-mx3/Kconfig29
-rw-r--r--arch/arm/mach-mx3/Makefile2
-rw-r--r--arch/arm/mach-mx3/devices-imx31.h38
-rw-r--r--arch/arm/mach-mx3/devices-imx35.h32
-rw-r--r--arch/arm/mach-mx3/devices.c247
-rw-r--r--arch/arm/mach-mx3/devices.h13
-rw-r--r--arch/arm/mach-mx3/eukrea_mbimxsd-baseboard.c5
-rw-r--r--arch/arm/mach-mx3/mach-armadillo5x0.c17
-rw-r--r--arch/arm/mach-mx3/mach-cpuimx35.c17
-rw-r--r--arch/arm/mach-mx3/mach-kzm_arm11_01.c31
-rw-r--r--arch/arm/mach-mx3/mach-mx31_3ds.c64
-rw-r--r--arch/arm/mach-mx3/mach-mx31ads.c55
-rw-r--r--arch/arm/mach-mx3/mach-mx31lilly.c14
-rw-r--r--arch/arm/mach-mx3/mach-mx31lite.c17
-rw-r--r--arch/arm/mach-mx3/mach-mx31moboard.c95
-rw-r--r--arch/arm/mach-mx3/mach-mx35_3ds.c (renamed from arch/arm/mach-mx3/mach-mx35pdk.c)16
-rw-r--r--arch/arm/mach-mx3/mach-pcm037.c31
-rw-r--r--arch/arm/mach-mx3/mach-pcm037_eet.c7
-rw-r--r--arch/arm/mach-mx3/mach-pcm043.c25
-rw-r--r--arch/arm/mach-mx3/mach-qong.c16
-rw-r--r--arch/arm/mach-mx3/mm.c7
-rw-r--r--arch/arm/mach-mx3/mx31lilly-db.c14
-rw-r--r--arch/arm/mach-mx3/mx31lite-db.c15
-rw-r--r--arch/arm/mach-mx3/mx31moboard-devboard.c10
-rw-r--r--arch/arm/mach-mx3/mx31moboard-marxbot.c4
-rw-r--r--arch/arm/mach-mx3/mx31moboard-smartbot.c11
-rw-r--r--arch/arm/mach-mx5/devices.c2
-rw-r--r--arch/arm/mach-mx5/mm.c3
-rw-r--r--arch/arm/mach-mxc91231/crm_regs.h5
-rw-r--r--arch/arm/mach-mxc91231/devices.c2
-rw-r--r--arch/arm/mach-mxc91231/mm.c8
-rw-r--r--arch/arm/mach-omap2/board-3430sdp.c4
-rw-r--r--arch/arm/mach-omap2/board-ldp.c3
-rw-r--r--arch/arm/mach-omap2/board-omap3evm.c4
-rw-r--r--arch/arm/mach-omap2/board-omap3pandora.c4
-rw-r--r--arch/arm/mach-omap2/board-omap3touchbook.c3
-rw-r--r--arch/arm/mach-orion5x/dns323-setup.c22
-rw-r--r--arch/arm/mach-s3c2440/mach-gta02.c76
-rw-r--r--arch/arm/mach-s3c64xx/clock.c6
-rw-r--r--arch/arm/mach-s5p6440/include/mach/irqs.h9
-rw-r--r--arch/arm/mach-s5p6442/include/mach/irqs.h5
-rw-r--r--arch/arm/mach-s5pc100/include/mach/irqs.h3
-rw-r--r--arch/arm/mach-s5pc100/include/mach/regs-gpio.h7
-rw-r--r--arch/arm/mach-s5pv210/include/mach/irqs.h14
-rw-r--r--arch/arm/mach-s5pv210/include/mach/regs-gpio.h14
-rw-r--r--arch/arm/mach-u300/i2c.c57
-rw-r--r--arch/arm/mach-u300/include/mach/irqs.h7
-rw-r--r--arch/arm/mach-ux500/board-mop500.c2
-rw-r--r--arch/arm/mach-ux500/clock.c2
-rw-r--r--arch/arm/mach-ux500/cpu-db8500.c4
-rw-r--r--arch/arm/mach-ux500/devices-db8500.c109
-rw-r--r--arch/arm/mach-ux500/include/mach/db8500-regs.h12
-rw-r--r--arch/arm/mach-ux500/include/mach/devices.h3
-rw-r--r--arch/arm/mach-ux500/ste-dma40-db8500.h154
-rw-r--r--arch/arm/plat-mxc/Kconfig10
-rw-r--r--arch/arm/plat-mxc/Makefile4
-rw-r--r--arch/arm/plat-mxc/audmux-v1.c4
-rw-r--r--arch/arm/plat-mxc/audmux-v2.c4
-rw-r--r--arch/arm/plat-mxc/devices.c33
-rw-r--r--arch/arm/plat-mxc/devices/Kconfig11
-rw-r--r--arch/arm/plat-mxc/devices/Makefile4
-rw-r--r--arch/arm/plat-mxc/devices/platform-imx-i2c.c29
-rw-r--r--arch/arm/plat-mxc/devices/platform-imx-uart.c60
-rw-r--r--arch/arm/plat-mxc/devices/platform-mxc_nand.c44
-rw-r--r--arch/arm/plat-mxc/devices/platform-spi_imx.c30
-rw-r--r--arch/arm/plat-mxc/ehci.c4
-rw-r--r--arch/arm/plat-mxc/include/mach/board-armadillo5x0.h15
-rw-r--r--arch/arm/plat-mxc/include/mach/board-eukrea_cpuimx27.h2
-rw-r--r--arch/arm/plat-mxc/include/mach/board-kzmarm11.h39
-rw-r--r--arch/arm/plat-mxc/include/mach/board-mx21ads.h52
-rw-r--r--arch/arm/plat-mxc/include/mach/board-mx27ads.h344
-rw-r--r--arch/arm/plat-mxc/include/mach/board-mx27lite.h14
-rw-r--r--arch/arm/plat-mxc/include/mach/board-mx27pdk.h14
-rw-r--r--arch/arm/plat-mxc/include/mach/board-mx31_3ds.h59
-rw-r--r--arch/arm/plat-mxc/include/mach/board-mx31ads.h117
-rw-r--r--arch/arm/plat-mxc/include/mach/board-mx31lilly.h2
-rw-r--r--arch/arm/plat-mxc/include/mach/board-mx31lite.h2
-rw-r--r--arch/arm/plat-mxc/include/mach/board-mx31moboard.h2
-rw-r--r--arch/arm/plat-mxc/include/mach/board-mx35pdk.h22
-rw-r--r--arch/arm/plat-mxc/include/mach/board-pcm037.h22
-rw-r--r--arch/arm/plat-mxc/include/mach/board-pcm038.h2
-rw-r--r--arch/arm/plat-mxc/include/mach/board-pcm043.h22
-rw-r--r--arch/arm/plat-mxc/include/mach/board-qong.h17
-rw-r--r--arch/arm/plat-mxc/include/mach/devices-common.h42
-rw-r--r--arch/arm/plat-mxc/include/mach/iomux-mxc91231.h4
-rw-r--r--arch/arm/plat-mxc/include/mach/mx1.h28
-rw-r--r--arch/arm/plat-mxc/include/mach/mx25.h36
-rw-r--r--arch/arm/plat-mxc/include/mach/mx27.h4
-rw-r--r--arch/arm/plat-mxc/include/mach/mx31.h4
-rw-r--r--arch/arm/plat-mxc/include/mach/mx35.h4
-rw-r--r--arch/arm/plat-mxc/include/mach/mx3_camera.h4
-rw-r--r--arch/arm/plat-mxc/include/mach/mxc91231.h4
-rw-r--r--arch/arm/plat-mxc/include/mach/mxc_nand.h6
-rw-r--r--arch/arm/plat-mxc/include/mach/system.h4
-rw-r--r--arch/arm/plat-mxc/include/mach/timex.h4
-rw-r--r--arch/arm/plat-mxc/include/mach/uncompress.h4
-rw-r--r--arch/arm/plat-mxc/include/mach/vmalloc.h4
-rw-r--r--arch/arm/plat-mxc/irq.c3
-rw-r--r--arch/arm/plat-mxc/system.c4
-rw-r--r--arch/arm/plat-mxc/tzic.c2
-rw-r--r--arch/arm/plat-omap/gpio.c104
-rw-r--r--arch/arm/plat-s5p/Kconfig1
-rw-r--r--arch/arm/plat-s5p/clock.c1
-rw-r--r--arch/arm/plat-s5p/include/plat/irqs.h7
-rw-r--r--arch/arm/plat-s5p/irq-eint.c15
-rw-r--r--arch/arm/plat-samsung/include/plat/gpio-cfg.h23
-rw-r--r--arch/avr32/include/asm/scatterlist.h20
-rw-r--r--arch/blackfin/include/asm/scatterlist.h22
-rw-r--r--arch/blackfin/kernel/ptrace.c33
-rw-r--r--arch/cris/include/asm/scatterlist.h17
-rw-r--r--arch/frv/include/asm/cache.h2
-rw-r--r--arch/frv/include/asm/mem-layout.h4
-rw-r--r--arch/frv/include/asm/scatterlist.h40
-rw-r--r--arch/frv/kernel/ptrace.c20
-rw-r--r--arch/frv/kernel/sysctl.c18
-rw-r--r--arch/h8300/include/asm/scatterlist.h12
-rw-r--r--arch/ia64/Kconfig11
-rw-r--r--arch/ia64/include/asm/acpi.h1
-rw-r--r--arch/ia64/include/asm/scatterlist.h4
-rw-r--r--arch/ia64/include/asm/topology.h5
-rw-r--r--arch/ia64/kernel/pci-swiotlb.c2
-rw-r--r--arch/ia64/kernel/ptrace.c4
-rw-r--r--arch/ia64/kernel/smpboot.c11
-rw-r--r--arch/ia64/pci/pci.c5
-rw-r--r--arch/m32r/include/asm/scatterlist.h15
-rw-r--r--arch/m68k/Kconfig1
-rw-r--r--arch/m68k/amiga/config.c174
-rw-r--r--arch/m68k/amiga/platform.c116
-rw-r--r--arch/m68k/include/asm/amigayle.h6
-rw-r--r--arch/m68k/include/asm/atomic.h2
-rw-r--r--arch/m68k/include/asm/cache.h2
-rw-r--r--arch/m68k/include/asm/scatterlist.h16
-rw-r--r--arch/microblaze/include/asm/scatterlist.h2
-rw-r--r--arch/mips/include/asm/scatterlist.h22
-rw-r--r--arch/mn10300/include/asm/scatterlist.h39
-rw-r--r--arch/parisc/Kconfig3
-rw-r--r--arch/parisc/include/asm/cacheflush.h16
-rw-r--r--arch/parisc/include/asm/scatterlist.h20
-rw-r--r--arch/parisc/kernel/asm-offsets.c15
-rw-r--r--arch/parisc/kernel/entry.S52
-rw-r--r--arch/parisc/kernel/syscall.S32
-rw-r--r--arch/parisc/math-emu/decode_exc.c1
-rw-r--r--arch/parisc/mm/fault.c7
-rw-r--r--arch/powerpc/Kconfig3
-rw-r--r--arch/powerpc/include/asm/scatterlist.h28
-rw-r--r--arch/powerpc/include/asm/sfp-machine.h6
-rw-r--r--arch/powerpc/kernel/dma-swiotlb.c4
-rw-r--r--arch/powerpc/kernel/dma.c12
-rw-r--r--arch/powerpc/platforms/cell/spufs/file.c3
-rw-r--r--arch/powerpc/platforms/cell/spufs/inode.c2
-rw-r--r--arch/powerpc/platforms/pseries/hvCall_inst.c10
-rw-r--r--arch/powerpc/sysdev/fsl_rio.c376
-rw-r--r--arch/s390/Kconfig8
-rw-r--r--arch/s390/boot/compressed/Makefile5
-rw-r--r--arch/s390/boot/compressed/misc.c4
-rw-r--r--arch/s390/include/asm/atomic.h19
-rw-r--r--arch/s390/include/asm/ccwdev.h10
-rw-r--r--arch/s390/include/asm/scatterlist.h2
-rw-r--r--arch/s390/include/asm/sfp-util.h2
-rw-r--r--arch/s390/kernel/asm-offsets.c4
-rw-r--r--arch/s390/kernel/entry64.S2
-rw-r--r--arch/s390/kernel/kprobes.c3
-rw-r--r--arch/s390/kernel/setup.c2
-rw-r--r--arch/s390/kernel/smp.c6
-rw-r--r--arch/s390/kvm/Kconfig11
-rw-r--r--arch/s390/kvm/sie64a.S4
-rw-r--r--arch/s390/mm/cmm.c109
-rw-r--r--arch/score/include/asm/scatterlist.h2
-rw-r--r--arch/sh/Kconfig3
-rw-r--r--arch/sh/kernel/ptrace_32.c23
-rw-r--r--arch/sh/math-emu/sfp-util.h4
-rw-r--r--arch/sparc/Kconfig3
-rw-r--r--arch/sparc/include/asm/scatterlist.h5
-rw-r--r--arch/sparc/kernel/perf_event.c108
-rw-r--r--arch/sparc/math-emu/sfp-util_32.h6
-rw-r--r--arch/sparc/math-emu/sfp-util_64.h6
-rw-r--r--arch/x86/Kconfig7
-rw-r--r--arch/x86/boot/compressed/relocs.c4
-rw-r--r--arch/x86/include/asm/acpi.h2
-rw-r--r--arch/x86/include/asm/cpufeature.h7
-rw-r--r--arch/x86/include/asm/mce.h8
-rw-r--r--arch/x86/include/asm/perf_event_p4.h3
-rw-r--r--arch/x86/include/asm/rdc321x_defs.h12
-rw-r--r--arch/x86/include/asm/scatterlist.h5
-rw-r--r--arch/x86/include/asm/thread_info.h4
-rw-r--r--arch/x86/include/asm/topology.h26
-rw-r--r--arch/x86/kernel/acpi/boot.c19
-rw-r--r--arch/x86/kernel/acpi/sleep.c2
-rw-r--r--arch/x86/kernel/apic/apic.c41
-rw-r--r--arch/x86/kernel/cpu/common.c6
-rw-r--r--arch/x86/kernel/cpu/cpufreq/powernow-k8.c6
-rw-r--r--arch/x86/kernel/cpu/mcheck/Makefile2
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce-apei.c138
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce-internal.h23
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce.c79
-rw-r--r--arch/x86/kernel/cpu/mcheck/therm_throt.c2
-rw-r--r--arch/x86/kernel/cpu/perf_event.c6
-rw-r--r--arch/x86/kernel/cpu/perf_event_p4.c41
-rw-r--r--arch/x86/kernel/cpuid.c2
-rw-r--r--arch/x86/kernel/msr.c2
-rw-r--r--arch/x86/kernel/pci-swiotlb.c2
-rw-r--r--arch/x86/kernel/setup.c11
-rw-r--r--arch/x86/kernel/setup_percpu.c4
-rw-r--r--arch/x86/kernel/smpboot.c26
-rw-r--r--arch/x86/lguest/boot.c1
-rw-r--r--arch/x86/mm/numa_64.c9
-rw-r--r--arch/x86/mm/pat.c10
-rw-r--r--arch/x86/mm/pat_internal.h6
-rw-r--r--arch/x86/mm/pat_rbtree.c7
-rw-r--r--arch/x86/mm/pf_in.c2
-rw-r--r--arch/x86/mm/pgtable_32.c1
-rw-r--r--arch/x86/pci/acpi.c8
-rw-r--r--arch/xtensa/include/asm/scatterlist.h23
-rw-r--r--drivers/Makefile2
-rw-r--r--drivers/acpi/Kconfig9
-rw-r--r--drivers/acpi/Makefile5
-rw-r--r--drivers/acpi/acpi_pad.c22
-rw-r--r--drivers/acpi/acpica/evxfevnt.c33
-rw-r--r--drivers/acpi/acpica/hwacpi.c20
-rw-r--r--drivers/acpi/apei/Kconfig30
-rw-r--r--drivers/acpi/apei/Makefile5
-rw-r--r--drivers/acpi/apei/apei-base.c593
-rw-r--r--drivers/acpi/apei/apei-internal.h114
-rw-r--r--drivers/acpi/apei/cper.c84
-rw-r--r--drivers/acpi/apei/einj.c548
-rw-r--r--drivers/acpi/apei/erst.c855
-rw-r--r--drivers/acpi/apei/ghes.c427
-rw-r--r--drivers/acpi/apei/hest.c173
-rw-r--r--drivers/acpi/atomicio.c360
-rw-r--r--drivers/acpi/ec.c3
-rw-r--r--drivers/acpi/hed.c112
-rw-r--r--drivers/acpi/hest.c139
-rw-r--r--drivers/acpi/osl.c9
-rw-r--r--drivers/acpi/pci_root.c67
-rw-r--r--drivers/acpi/processor_driver.c15
-rw-r--r--drivers/acpi/processor_idle.c58
-rw-r--r--drivers/acpi/sleep.c157
-rw-r--r--drivers/acpi/sleep.h2
-rw-r--r--drivers/acpi/tables.c4
-rw-r--r--drivers/acpi/video.c118
-rw-r--r--drivers/acpi/video_detect.c2
-rw-r--r--drivers/ata/Kconfig511
-rw-r--r--drivers/ata/Makefile83
-rw-r--r--drivers/ata/ata_generic.c2
-rw-r--r--drivers/ata/ata_piix.c4
-rw-r--r--drivers/ata/libata-core.c16
-rw-r--r--drivers/ata/libata-sff.c416
-rw-r--r--drivers/ata/pata_acpi.c2
-rw-r--r--drivers/ata/pata_ali.c5
-rw-r--r--drivers/ata/pata_amd.c2
-rw-r--r--drivers/ata/pata_artop.c2
-rw-r--r--drivers/ata/pata_atiixp.c4
-rw-r--r--drivers/ata/pata_atp867x.c2
-rw-r--r--drivers/ata/pata_bf54x.c2
-rw-r--r--drivers/ata/pata_cmd64x.c2
-rw-r--r--drivers/ata/pata_cs5520.c2
-rw-r--r--drivers/ata/pata_cs5530.c2
-rw-r--r--drivers/ata/pata_cs5535.c2
-rw-r--r--drivers/ata/pata_cs5536.c2
-rw-r--r--drivers/ata/pata_cypress.c2
-rw-r--r--drivers/ata/pata_efar.c4
-rw-r--r--drivers/ata/pata_hpt366.c2
-rw-r--r--drivers/ata/pata_hpt37x.c2
-rw-r--r--drivers/ata/pata_hpt3x2n.c2
-rw-r--r--drivers/ata/pata_hpt3x3.c2
-rw-r--r--drivers/ata/pata_icside.c2
-rw-r--r--drivers/ata/pata_it8213.c2
-rw-r--r--drivers/ata/pata_it821x.c2
-rw-r--r--drivers/ata/pata_jmicron.c2
-rw-r--r--drivers/ata/pata_macio.c2
-rw-r--r--drivers/ata/pata_marvell.c2
-rw-r--r--drivers/ata/pata_mpc52xx.c2
-rw-r--r--drivers/ata/pata_netcell.c2
-rw-r--r--drivers/ata/pata_ninja32.c2
-rw-r--r--drivers/ata/pata_ns87415.c2
-rw-r--r--drivers/ata/pata_octeon_cf.c18
-rw-r--r--drivers/ata/pata_oldpiix.c2
-rw-r--r--drivers/ata/pata_optidma.c2
-rw-r--r--drivers/ata/pata_pdc2027x.c2
-rw-r--r--drivers/ata/pata_pdc202xx_old.c2
-rw-r--r--drivers/ata/pata_piccolo.c2
-rw-r--r--drivers/ata/pata_radisys.c2
-rw-r--r--drivers/ata/pata_rdc.c4
-rw-r--r--drivers/ata/pata_sc1200.c2
-rw-r--r--drivers/ata/pata_scc.c4
-rw-r--r--drivers/ata/pata_sch.c2
-rw-r--r--drivers/ata/pata_serverworks.c2
-rw-r--r--drivers/ata/pata_sil680.c4
-rw-r--r--drivers/ata/pata_sis.c2
-rw-r--r--drivers/ata/pata_sl82c105.c2
-rw-r--r--drivers/ata/pata_triflex.c2
-rw-r--r--drivers/ata/pata_via.c2
-rw-r--r--drivers/ata/sata_mv.c4
-rw-r--r--drivers/ata/sata_nv.c8
-rw-r--r--drivers/ata/sata_qstor.c16
-rw-r--r--drivers/ata/sata_sil.c4
-rw-r--r--drivers/ata/sata_sis.c4
-rw-r--r--drivers/ata/sata_svw.c2
-rw-r--r--drivers/ata/sata_uli.c2
-rw-r--r--drivers/ata/sata_via.c8
-rw-r--r--drivers/ata/sata_vsc.c2
-rw-r--r--drivers/base/topology.c2
-rw-r--r--drivers/char/Kconfig7
-rw-r--r--drivers/char/Makefile1
-rw-r--r--drivers/char/agp/amd64-agp.c28
-rw-r--r--drivers/char/amiserial.c61
-rw-r--r--drivers/char/applicom.c11
-rw-r--r--drivers/char/ipmi/ipmi_msghandler.c15
-rw-r--r--drivers/char/ipmi/ipmi_si_intf.c468
-rw-r--r--drivers/char/ppdev.c4
-rw-r--r--drivers/char/ps3flash.c3
-rw-r--r--drivers/char/ramoops.c162
-rw-r--r--drivers/char/vt.c10
-rw-r--r--drivers/cpuidle/cpuidle.c12
-rw-r--r--drivers/cpuidle/cpuidle.h1
-rw-r--r--drivers/cpuidle/driver.c16
-rw-r--r--drivers/cpuidle/sysfs.c5
-rw-r--r--drivers/dma/Kconfig9
-rw-r--r--drivers/dma/Makefile1
-rw-r--r--drivers/dma/pl330.c866
-rw-r--r--drivers/edac/i5000_edac.c20
-rw-r--r--drivers/edac/i5400_edac.c20
-rw-r--r--drivers/edac/i82443bxgx_edac.c22
-rw-r--r--drivers/firewire/core-card.c22
-rw-r--r--drivers/firewire/core-cdev.c8
-rw-r--r--drivers/firewire/core-transaction.c96
-rw-r--r--drivers/firewire/core.h6
-rw-r--r--drivers/firewire/ohci.c188
-rw-r--r--drivers/firewire/ohci.h10
-rw-r--r--drivers/gpio/Kconfig37
-rw-r--r--drivers/gpio/Makefile5
-rw-r--r--drivers/gpio/cs5535-gpio.c2
-rw-r--r--drivers/gpio/gpiolib.c49
-rw-r--r--drivers/gpio/it8761e_gpio.c5
-rw-r--r--drivers/gpio/janz-ttl.c258
-rw-r--r--drivers/gpio/langwell_gpio.c83
-rw-r--r--drivers/gpio/max732x.c368
-rw-r--r--drivers/gpio/pca953x.c2
-rw-r--r--drivers/gpio/pl061.c2
-rw-r--r--drivers/gpio/rdc321x-gpio.c246
-rw-r--r--drivers/gpio/tc35892-gpio.c381
-rw-r--r--drivers/gpu/drm/drm_edid.c5
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c3
-rw-r--r--drivers/gpu/drm/nouveau/nv40_graph.c8
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon.h1
-rw-r--r--drivers/gpu/drm/radeon/radeon_agp.c5
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c13
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_state.c5
-rw-r--r--drivers/hid/Kconfig8
-rw-r--r--drivers/hid/Makefile1
-rw-r--r--drivers/hid/hid-core.c1
-rw-r--r--drivers/hid/hid-debug.c2
-rw-r--r--drivers/hid/hid-gyration.c1
-rw-r--r--drivers/hid/hid-ids.h1
-rw-r--r--drivers/hid/hid-roccat-kone.c73
-rw-r--r--drivers/hid/hid-roccat-kone.h9
-rw-r--r--drivers/hid/hid-roccat.c428
-rw-r--r--drivers/hid/hid-roccat.h31
-rw-r--r--drivers/hwmon/Kconfig34
-rw-r--r--drivers/hwmon/Makefile2
-rw-r--r--drivers/hwmon/adm1031.c68
-rw-r--r--drivers/hwmon/applesmc.c186
-rw-r--r--drivers/hwmon/asus_atk0110.c7
-rw-r--r--drivers/hwmon/dme1737.c328
-rw-r--r--drivers/hwmon/emc1403.c344
-rw-r--r--drivers/hwmon/f71882fg.c170
-rw-r--r--drivers/hwmon/lm63.c16
-rw-r--r--drivers/hwmon/lm75.c2
-rw-r--r--drivers/hwmon/lm90.c3
-rw-r--r--drivers/hwmon/ltc4245.c18
-rw-r--r--drivers/hwmon/tmp102.c321
-rw-r--r--drivers/hwmon/tmp401.c255
-rw-r--r--drivers/ide/gayle.c147
-rw-r--r--drivers/idle/Kconfig11
-rw-r--r--drivers/idle/Makefile1
-rwxr-xr-xdrivers/idle/intel_idle.c461
-rw-r--r--drivers/ieee1394/dv1394.c11
-rw-r--r--drivers/ieee1394/raw1394.c3
-rw-r--r--drivers/ieee1394/video1394.c5
-rw-r--r--drivers/infiniband/core/ucm.c11
-rw-r--r--drivers/infiniband/hw/ehca/ehca_irq.c2
-rw-r--r--drivers/infiniband/hw/qib/qib_fs.c25
-rw-r--r--drivers/infiniband/hw/qib/qib_iba6120.c12
-rw-r--r--drivers/infiniband/hw/qib/qib_iba7322.c771
-rw-r--r--drivers/infiniband/hw/qib/qib_init.c6
-rw-r--r--drivers/input/joydev.c10
-rw-r--r--drivers/input/keyboard/amikbd.c97
-rw-r--r--drivers/input/misc/Kconfig10
-rw-r--r--drivers/input/misc/Makefile1
-rw-r--r--drivers/input/misc/max8925_onkey.c148
-rw-r--r--drivers/input/misc/twl4030-vibra.c2
-rw-r--r--drivers/input/misc/uinput.c4
-rw-r--r--drivers/input/mouse/amimouse.c98
-rw-r--r--drivers/input/touchscreen/Kconfig13
-rw-r--r--drivers/input/touchscreen/Makefile1
-rw-r--r--drivers/input/touchscreen/ads7846.c4
-rw-r--r--drivers/input/touchscreen/s3c2410_ts.c2
-rw-r--r--drivers/input/touchscreen/tps6507x-ts.c400
-rw-r--r--drivers/input/touchscreen/usbtouchscreen.c7
-rw-r--r--drivers/isdn/mISDN/timerdev.c2
-rw-r--r--drivers/leds/Kconfig19
-rw-r--r--drivers/leds/Makefile2
-rw-r--r--drivers/leds/led-class.c2
-rw-r--r--drivers/leds/leds-88pm860x.c11
-rw-r--r--drivers/leds/leds-gpio.c30
-rw-r--r--drivers/leds/leds-lp3944.c9
-rw-r--r--drivers/leds/leds-mc13783.c403
-rw-r--r--drivers/leds/leds-net5501.c94
-rw-r--r--drivers/leds/leds-ss4200.c2
-rw-r--r--drivers/md/raid5.c2
-rw-r--r--drivers/message/i2o/i2o_config.c11
-rw-r--r--drivers/mfd/88pm860x-core.c6
-rw-r--r--drivers/mfd/88pm860x-i2c.c2
-rw-r--r--drivers/mfd/Kconfig87
-rw-r--r--drivers/mfd/Makefile13
-rw-r--r--drivers/mfd/ab3100-core.c99
-rw-r--r--drivers/mfd/ab3100-otp.c13
-rw-r--r--drivers/mfd/ab3550-core.c1401
-rw-r--r--drivers/mfd/ab4500-core.c209
-rw-r--r--drivers/mfd/ab8500-core.c444
-rw-r--r--drivers/mfd/ab8500-spi.c133
-rw-r--r--drivers/mfd/abx500-core.c157
-rw-r--r--drivers/mfd/da903x.c1
-rw-r--r--drivers/mfd/janz-cmodio.c304
-rw-r--r--drivers/mfd/max8925-core.c7
-rw-r--r--drivers/mfd/max8925-i2c.c2
-rw-r--r--drivers/mfd/mc13783-core.c4
-rw-r--r--drivers/mfd/menelaus.c3
-rw-r--r--drivers/mfd/mfd-core.c2
-rw-r--r--drivers/mfd/pcf50633-adc.c39
-rw-r--r--drivers/mfd/pcf50633-core.c348
-rw-r--r--drivers/mfd/pcf50633-irq.c318
-rw-r--r--drivers/mfd/rdc321x-southbridge.c123
-rw-r--r--drivers/mfd/t7l66xb.c3
-rw-r--r--drivers/mfd/tc35892.c347
-rw-r--r--drivers/mfd/timberdale.c156
-rw-r--r--drivers/mfd/timberdale.h16
-rw-r--r--drivers/mfd/tps65010.c2
-rw-r--r--drivers/mfd/tps6507x.c159
-rw-r--r--drivers/mfd/twl4030-irq.c11
-rw-r--r--drivers/mfd/wm831x-core.c112
-rw-r--r--drivers/mfd/wm831x-irq.c18
-rw-r--r--drivers/mfd/wm8350-i2c.c6
-rw-r--r--drivers/mfd/wm8400-core.c4
-rw-r--r--drivers/misc/lkdtm.c20
-rw-r--r--drivers/mmc/core/core.c3
-rw-r--r--drivers/mmc/core/sd_ops.c2
-rw-r--r--drivers/mmc/core/sdio_io.c30
-rw-r--r--drivers/mmc/host/Kconfig20
-rw-r--r--drivers/mmc/host/Makefile2
-rw-r--r--drivers/mmc/host/at91_mci.c2
-rw-r--r--drivers/mmc/host/atmel-mci.c64
-rw-r--r--drivers/mmc/host/au1xmmc.c2
-rw-r--r--drivers/mmc/host/bfin_sdh.c2
-rw-r--r--drivers/mmc/host/cb710-mmc.c2
-rw-r--r--drivers/mmc/host/davinci_mmc.c111
-rw-r--r--drivers/mmc/host/imxmmc.c2
-rw-r--r--drivers/mmc/host/mmci.c2
-rw-r--r--drivers/mmc/host/msm_sdcc.c2
-rw-r--r--drivers/mmc/host/mvsdio.c2
-rw-r--r--drivers/mmc/host/mxcmmc.c2
-rw-r--r--drivers/mmc/host/omap.c64
-rw-r--r--drivers/mmc/host/omap_hsmmc.c279
-rw-r--r--drivers/mmc/host/pxamci.c2
-rw-r--r--drivers/mmc/host/s3cmci.c3
-rw-r--r--drivers/mmc/host/sdhci-of-core.c2
-rw-r--r--drivers/mmc/host/sdhci-of-esdhc.c12
-rw-r--r--drivers/mmc/host/sdhci-of-hlwd.c12
-rw-r--r--drivers/mmc/host/sdhci-pci.c2
-rw-r--r--drivers/mmc/host/sdhci-pltfm.c26
-rw-r--r--drivers/mmc/host/sdhci-s3c.c10
-rw-r--r--drivers/mmc/host/sdhci-spear.c298
-rw-r--r--drivers/mmc/host/sdhci.c25
-rw-r--r--drivers/mmc/host/sdhci.h42
-rw-r--r--drivers/mmc/host/sdricoh_cs.c2
-rw-r--r--drivers/mmc/host/sh_mmcif.c965
-rw-r--r--drivers/mmc/host/tifm_sd.c2
-rw-r--r--drivers/mmc/host/tmio_mmc.c2
-rw-r--r--drivers/mmc/host/via-sdmmc.c2
-rw-r--r--drivers/mmc/host/wbsd.c2
-rw-r--r--drivers/mtd/ubi/cdev.c3
-rw-r--r--drivers/net/3c507.c3
-rw-r--r--drivers/net/benet/be_cmds.c2
-rw-r--r--drivers/net/benet/be_main.c2
-rw-r--r--drivers/net/can/Kconfig10
-rw-r--r--drivers/net/can/Makefile1
-rw-r--r--drivers/net/can/janz-ican3.c1830
-rw-r--r--drivers/net/cnic.c10
-rw-r--r--drivers/net/cnic_if.h4
-rw-r--r--drivers/net/fec.c28
-rw-r--r--drivers/net/hamradio/yam.c3
-rw-r--r--drivers/net/ll_temac.h5
-rw-r--r--drivers/net/ll_temac_main.c84
-rw-r--r--drivers/parport/parport_amiga.c64
-rw-r--r--drivers/pci/pcie/aer/aerdrv.h17
-rw-r--r--drivers/pci/pcie/aer/aerdrv_acpi.c77
-rw-r--r--drivers/pci/pcie/aer/aerdrv_core.c8
-rw-r--r--drivers/pci/probe.c8
-rw-r--r--drivers/rapidio/Kconfig24
-rw-r--r--drivers/rapidio/Makefile4
-rw-r--r--drivers/rapidio/rio-scan.c424
-rw-r--r--drivers/rapidio/rio.c433
-rw-r--r--drivers/rapidio/rio.h44
-rw-r--r--drivers/rapidio/switches/Kconfig28
-rw-r--r--drivers/rapidio/switches/Makefile9
-rw-r--r--drivers/rapidio/switches/idtcps.c137
-rw-r--r--drivers/rapidio/switches/tsi500.c20
-rw-r--r--drivers/rapidio/switches/tsi568.c146
-rw-r--r--drivers/rapidio/switches/tsi57x.c315
-rw-r--r--drivers/regulator/ab3100.c35
-rw-r--r--drivers/regulator/tps6507x-regulator.c373
-rw-r--r--drivers/rtc/Kconfig7
-rw-r--r--drivers/rtc/Makefile1
-rw-r--r--drivers/rtc/rtc-ab3100.c41
-rw-r--r--drivers/rtc/rtc-ab8500.c363
-rw-r--r--drivers/rtc/rtc-m41t80.c6
-rw-r--r--drivers/s390/block/dasd.c23
-rw-r--r--drivers/s390/block/dasd_eckd.c1
-rw-r--r--drivers/s390/block/dasd_int.h1
-rw-r--r--drivers/s390/cio/ccwgroup.c7
-rw-r--r--drivers/s390/cio/ccwreq.c15
-rw-r--r--drivers/s390/cio/ioasm.h15
-rw-r--r--drivers/scsi/a2091.c245
-rw-r--r--drivers/scsi/a2091.h4
-rw-r--r--drivers/scsi/a3000.c256
-rw-r--r--drivers/scsi/a3000.h4
-rw-r--r--drivers/scsi/a4000t.c101
-rw-r--r--drivers/scsi/aacraid/commctrl.c4
-rw-r--r--drivers/scsi/arcmsr/arcmsr.h29
-rw-r--r--drivers/scsi/arcmsr/arcmsr_attr.c3
-rw-r--r--drivers/scsi/arcmsr/arcmsr_hba.c684
-rw-r--r--drivers/scsi/be2iscsi/be_mgmt.c5
-rw-r--r--drivers/scsi/bfa/bfa_core.c22
-rw-r--r--drivers/scsi/gvp11.c541
-rw-r--r--drivers/scsi/gvp11.h11
-rw-r--r--drivers/scsi/ipr.c221
-rw-r--r--drivers/scsi/ipr.h31
-rw-r--r--drivers/scsi/iscsi_tcp.c6
-rw-r--r--drivers/scsi/mvme147.c33
-rw-r--r--drivers/scsi/osst.c9
-rw-r--r--drivers/scsi/scsi_scan.c9
-rw-r--r--drivers/scsi/st.c1
-rw-r--r--drivers/serial/s5pv210.c8
-rw-r--r--drivers/sfi/sfi_acpi.c41
-rw-r--r--drivers/sfi/sfi_core.c105
-rw-r--r--drivers/sfi/sfi_core.h8
-rw-r--r--drivers/staging/go7007/saa7134-go7007.c8
-rw-r--r--drivers/staging/pohmelfs/inode.c10
-rw-r--r--drivers/telephony/ixj.c15
-rw-r--r--drivers/usb/gadget/printer.c2
-rw-r--r--drivers/vhost/net.c14
-rw-r--r--drivers/vhost/vhost.c57
-rw-r--r--drivers/video/backlight/88pm860x_bl.c2
-rw-r--r--drivers/video/backlight/Kconfig116
-rw-r--r--drivers/video/backlight/Makefile4
-rw-r--r--drivers/video/backlight/adp8860_bl.c838
-rw-r--r--drivers/video/backlight/adx_bl.c4
-rw-r--r--drivers/video/backlight/ep93xx_bl.c160
-rw-r--r--drivers/video/backlight/l4f00242t03.c11
-rw-r--r--drivers/video/backlight/max8925_bl.c1
-rw-r--r--drivers/video/backlight/mbp_nvidia_bl.c47
-rw-r--r--drivers/video/backlight/pcf50633-backlight.c190
-rw-r--r--drivers/video/backlight/s6e63m0.c920
-rw-r--r--drivers/video/backlight/s6e63m0_gamma.h266
-rw-r--r--drivers/video/bf54x-lq043fb.c7
-rw-r--r--drivers/video/bfin-t350mcqb-fb.c7
-rw-r--r--drivers/video/fb_defio.c2
-rw-r--r--drivers/video/s3fb.c101
-rw-r--r--drivers/video/via/viafbdev.c11
-rw-r--r--drivers/watchdog/rdc321x_wdt.c53
-rw-r--r--fs/9p/vfs_file.c6
-rw-r--r--fs/adfs/dir.c2
-rw-r--r--fs/adfs/file.c2
-rw-r--r--fs/adfs/inode.c3
-rw-r--r--fs/affs/affs.h2
-rw-r--r--fs/affs/file.c4
-rw-r--r--fs/affs/namei.c2
-rw-r--r--fs/afs/internal.h2
-rw-r--r--fs/afs/write.c3
-rw-r--r--fs/aio.c71
-rw-r--r--fs/anon_inodes.c2
-rw-r--r--fs/attr.c50
-rw-r--r--fs/autofs/root.c1
-rw-r--r--fs/autofs4/dev-ioctl.c13
-rw-r--r--fs/bad_inode.c3
-rw-r--r--fs/bfs/dir.c2
-rw-r--r--fs/block_dev.c16
-rw-r--r--fs/btrfs/async-thread.c1
-rw-r--r--fs/btrfs/btrfs_inode.h3
-rw-r--r--fs/btrfs/ctree.c109
-rw-r--r--fs/btrfs/ctree.h165
-rw-r--r--fs/btrfs/delayed-ref.c101
-rw-r--r--fs/btrfs/delayed-ref.h3
-rw-r--r--fs/btrfs/disk-io.c169
-rw-r--r--fs/btrfs/disk-io.h4
-rw-r--r--fs/btrfs/extent-tree.c2255
-rw-r--r--fs/btrfs/extent_io.c85
-rw-r--r--fs/btrfs/extent_io.h14
-rw-r--r--fs/btrfs/file-item.c28
-rw-r--r--fs/btrfs/file.c169
-rw-r--r--fs/btrfs/inode-item.c27
-rw-r--r--fs/btrfs/inode.c1713
-rw-r--r--fs/btrfs/ioctl.c206
-rw-r--r--fs/btrfs/ordered-data.c82
-rw-r--r--fs/btrfs/ordered-data.h9
-rw-r--r--fs/btrfs/relocation.c1971
-rw-r--r--fs/btrfs/root-tree.c23
-rw-r--r--fs/btrfs/super.c30
-rw-r--r--fs/btrfs/transaction.c232
-rw-r--r--fs/btrfs/transaction.h24
-rw-r--r--fs/btrfs/tree-defrag.c7
-rw-r--r--fs/btrfs/tree-log.c241
-rw-r--r--fs/btrfs/tree-log.h2
-rw-r--r--fs/btrfs/volumes.c17
-rw-r--r--fs/btrfs/xattr.c12
-rw-r--r--fs/buffer.c123
-rw-r--r--fs/ceph/auth.c7
-rw-r--r--fs/ceph/auth.h6
-rw-r--r--fs/ceph/auth_none.c8
-rw-r--r--fs/ceph/auth_x.c12
-rw-r--r--fs/ceph/caps.c4
-rw-r--r--fs/ceph/ceph_fs.h21
-rw-r--r--fs/ceph/dir.c7
-rw-r--r--fs/ceph/export.c2
-rw-r--r--fs/ceph/file.c2
-rw-r--r--fs/ceph/inode.c2
-rw-r--r--fs/ceph/mds_client.c21
-rw-r--r--fs/ceph/messenger.c6
-rw-r--r--fs/ceph/messenger.h1
-rw-r--r--fs/ceph/mon_client.c5
-rw-r--r--fs/ceph/osd_client.c7
-rw-r--r--fs/ceph/osdmap.c2
-rw-r--r--fs/ceph/super.c12
-rw-r--r--fs/ceph/super.h3
-rw-r--r--fs/cifs/cifsfs.h2
-rw-r--r--fs/cifs/file.c4
-rw-r--r--fs/coda/coda_int.h3
-rw-r--r--fs/coda/file.c4
-rw-r--r--fs/compat.c132
-rw-r--r--fs/configfs/inode.c9
-rw-r--r--fs/debugfs/file.c21
-rw-r--r--fs/direct-io.c123
-rw-r--r--fs/ecryptfs/file.c2
-rw-r--r--fs/ecryptfs/inode.c4
-rw-r--r--fs/exec.c195
-rw-r--r--fs/exofs/file.c7
-rw-r--r--fs/ext2/ext2.h3
-rw-r--r--fs/ext2/file.c7
-rw-r--r--fs/ext2/inode.c153
-rw-r--r--fs/ext2/super.c20
-rw-r--r--fs/ext3/dir.c2
-rw-r--r--fs/ext3/fsync.c4
-rw-r--r--fs/ext3/super.c38
-rw-r--r--fs/ext4/balloc.c5
-rw-r--r--fs/ext4/block_validity.c4
-rw-r--r--fs/ext4/dir.c26
-rw-r--r--fs/ext4/ext4.h169
-rw-r--r--fs/ext4/ext4_jbd2.h8
-rw-r--r--fs/ext4/extents.c417
-rw-r--r--fs/ext4/file.c2
-rw-r--r--fs/ext4/fsync.c41
-rw-r--r--fs/ext4/ialloc.c89
-rw-r--r--fs/ext4/inode.c723
-rw-r--r--fs/ext4/ioctl.c27
-rw-r--r--fs/ext4/mballoc.c120
-rw-r--r--fs/ext4/migrate.c2
-rw-r--r--fs/ext4/move_extent.c13
-rw-r--r--fs/ext4/namei.c61
-rw-r--r--fs/ext4/resize.c3
-rw-r--r--fs/ext4/super.c117
-rw-r--r--fs/ext4/symlink.c2
-rw-r--r--fs/ext4/xattr.c39
-rw-r--r--fs/fat/fat.h6
-rw-r--r--fs/fat/file.c40
-rw-r--r--fs/fat/inode.c35
-rw-r--r--fs/file_table.c21
-rw-r--r--fs/freevxfs/vxfs_lookup.c2
-rw-r--r--fs/fscache/object-list.c2
-rw-r--r--fs/fuse/dev.c527
-rw-r--r--fs/fuse/dir.c5
-rw-r--r--fs/fuse/file.c48
-rw-r--r--fs/fuse/fuse_i.h6
-rw-r--r--fs/gfs2/aops.c8
-rw-r--r--fs/gfs2/file.c4
-rw-r--r--fs/gfs2/ops_inode.c5
-rw-r--r--fs/hostfs/hostfs_kern.c4
-rw-r--r--fs/hpfs/file.c4
-rw-r--r--fs/hpfs/hpfs_fn.h2
-rw-r--r--fs/hppfs/hppfs.c2
-rw-r--r--fs/hugetlbfs/inode.c2
-rw-r--r--fs/isofs/dir.c1
-rw-r--r--fs/jbd2/transaction.c5
-rw-r--r--fs/jffs2/file.c4
-rw-r--r--fs/jffs2/fs.c4
-rw-r--r--fs/jffs2/os-linux.h2
-rw-r--r--fs/jfs/file.c4
-rw-r--r--fs/jfs/jfs_inode.h2
-rw-r--r--fs/jfs/super.c16
-rw-r--r--fs/libfs.c108
-rw-r--r--fs/logfs/file.c4
-rw-r--r--fs/logfs/logfs.h2
-rw-r--r--fs/minix/dir.c7
-rw-r--r--fs/minix/file.c2
-rw-r--r--fs/minix/itree_v2.c27
-rw-r--r--fs/namei.c2
-rw-r--r--fs/ncpfs/dir.c1
-rw-r--r--fs/ncpfs/file.c2
-rw-r--r--fs/nfs/dir.c7
-rw-r--r--fs/nfs/file.c5
-rw-r--r--fs/nfs/write.c20
-rw-r--r--fs/nilfs2/file.c4
-rw-r--r--fs/nilfs2/nilfs.h2
-rw-r--r--fs/ntfs/dir.c5
-rw-r--r--fs/ntfs/file.c9
-rw-r--r--fs/ocfs2/file.c15
-rw-r--r--fs/ocfs2/super.c50
-rw-r--r--fs/omfs/file.c2
-rw-r--r--fs/pipe.c19
-rw-r--r--fs/proc/array.c4
-rw-r--r--fs/proc/base.c16
-rw-r--r--fs/proc/generic.c15
-rw-r--r--fs/proc/kcore.c2
-rw-r--r--fs/proc/root.c1
-rw-r--r--fs/qnx4/dir.c3
-rw-r--r--fs/quota/dquot.c201
-rw-r--r--fs/quota/quota.c4
-rw-r--r--fs/ramfs/file-mmu.c3
-rw-r--r--fs/ramfs/file-nommu.c9
-rw-r--r--fs/read_write.c17
-rw-r--r--fs/reiserfs/dir.c9
-rw-r--r--fs/reiserfs/file.c5
-rw-r--r--fs/reiserfs/super.c48
-rw-r--r--fs/smbfs/dir.c1
-rw-r--r--fs/smbfs/file.c3
-rw-r--r--fs/smbfs/inode.c2
-rw-r--r--fs/squashfs/Kconfig11
-rw-r--r--fs/squashfs/Makefile2
-rw-r--r--fs/squashfs/inode.c92
-rw-r--r--fs/squashfs/namei.c6
-rw-r--r--fs/squashfs/squashfs.h12
-rw-r--r--fs/squashfs/squashfs_fs.h76
-rw-r--r--fs/squashfs/squashfs_fs_i.h3
-rw-r--r--fs/squashfs/squashfs_fs_sb.h3
-rw-r--r--fs/squashfs/super.c30
-rw-r--r--fs/squashfs/symlink.c11
-rw-r--r--fs/squashfs/xattr.c323
-rw-r--r--fs/squashfs/xattr.h46
-rw-r--r--fs/squashfs/xattr_id.c100
-rw-r--r--fs/super.c17
-rw-r--r--fs/sync.c8
-rw-r--r--fs/sysfs/inode.c8
-rw-r--r--fs/sysv/dir.c2
-rw-r--r--fs/sysv/file.c2
-rw-r--r--fs/sysv/inode.c1
-rw-r--r--fs/ubifs/file.c17
-rw-r--r--fs/ubifs/ubifs.h4
-rw-r--r--fs/udf/balloc.c43
-rw-r--r--fs/udf/dir.c3
-rw-r--r--fs/udf/file.c28
-rw-r--r--fs/udf/ialloc.c21
-rw-r--r--fs/udf/inode.c5
-rw-r--r--fs/udf/namei.c20
-rw-r--r--fs/udf/super.c13
-rw-r--r--fs/udf/udfdecl.h1
-rw-r--r--fs/ufs/balloc.c24
-rw-r--r--fs/ufs/dir.c2
-rw-r--r--fs/ufs/file.c5
-rw-r--r--fs/ufs/ialloc.c13
-rw-r--r--fs/ufs/inode.c4
-rw-r--r--fs/ufs/namei.c16
-rw-r--r--fs/ufs/super.c112
-rw-r--r--fs/ufs/truncate.c20
-rw-r--r--fs/ufs/ufs_fs.h1
-rw-r--r--fs/xfs/linux-2.6/xfs_file.c10
-rw-r--r--include/acpi/acpi_bus.h2
-rw-r--r--include/acpi/acpi_drivers.h3
-rw-r--r--include/acpi/acpi_hest.h12
-rw-r--r--include/acpi/apei.h34
-rw-r--r--include/acpi/atomicio.h10
-rw-r--r--include/acpi/hed.h18
-rw-r--r--include/acpi/processor.h13
-rw-r--r--include/acpi/video.h16
-rw-r--r--include/asm-generic/dma-mapping-common.h20
-rw-r--r--include/asm-generic/gpio.h11
-rw-r--r--include/asm-generic/scatterlist.h17
-rw-r--r--include/asm-generic/topology.h3
-rw-r--r--include/asm-generic/vmlinux.lds.h8
-rw-r--r--include/linux/acpi.h3
-rw-r--r--include/linux/aio.h5
-rw-r--r--include/linux/amba/pl330.h45
-rw-r--r--include/linux/buffer_head.h11
-rw-r--r--include/linux/byteorder/big_endian.h3
-rw-r--r--include/linux/byteorder/little_endian.h3
-rw-r--r--include/linux/cgroup.h2
-rw-r--r--include/linux/compat.h4
-rw-r--r--include/linux/completion.h2
-rw-r--r--include/linux/cper.h314
-rw-r--r--include/linux/cpuidle.h8
-rw-r--r--include/linux/cpuset.h6
-rw-r--r--include/linux/cred.h1
-rw-r--r--include/linux/debugfs.h2
-rw-r--r--include/linux/dma-mapping.h25
-rw-r--r--include/linux/ext3_fs.h2
-rw-r--r--include/linux/fb.h5
-rw-r--r--include/linux/file.h1
-rw-r--r--include/linux/firewire.h5
-rw-r--r--include/linux/fs.h48
-rw-r--r--include/linux/ftrace_event.h103
-rw-r--r--include/linux/fuse.h5
-rw-r--r--include/linux/gpio.h5
-rw-r--r--include/linux/i2c/adp8860.h154
-rw-r--r--include/linux/i2c/max732x.h3
-rw-r--r--include/linux/i2c/pca953x.h2
-rw-r--r--include/linux/init_task.h13
-rw-r--r--include/linux/input.h12
-rw-r--r--include/linux/input/tps6507x-ts.h24
-rw-r--r--include/linux/joystick.h4
-rw-r--r--include/linux/kmod.h64
-rw-r--r--include/linux/lcd.h23
-rw-r--r--include/linux/leds.h12
-rw-r--r--include/linux/libata.h36
-rw-r--r--include/linux/memcontrol.h6
-rw-r--r--include/linux/mfd/88pm860x.h4
-rw-r--r--include/linux/mfd/ab4500.h262
-rw-r--r--include/linux/mfd/ab8500.h128
-rw-r--r--include/linux/mfd/abx500.h (renamed from include/linux/mfd/ab3100.h)134
-rw-r--r--include/linux/mfd/janz.h54
-rw-r--r--include/linux/mfd/mc13783.h66
-rw-r--r--include/linux/mfd/pcf50633/backlight.h51
-rw-r--r--include/linux/mfd/pcf50633/core.h4
-rw-r--r--include/linux/mfd/rdc321x.h26
-rw-r--r--include/linux/mfd/tc35892.h132
-rw-r--r--include/linux/mfd/tps6507x.h169
-rw-r--r--include/linux/mfd/wm831x/core.h5
-rw-r--r--include/linux/mmc/host.h2
-rw-r--r--include/linux/mmc/sdhci-spear.h42
-rw-r--r--include/linux/mmc/sdio_func.h3
-rw-r--r--include/linux/mmc/sh_mmcif.h39
-rw-r--r--include/linux/mmzone.h6
-rw-r--r--include/linux/notifier.h5
-rw-r--r--include/linux/page_cgroup.h5
-rw-r--r--include/linux/pci.h3
-rw-r--r--include/linux/perf_event.h28
-rw-r--r--include/linux/quota.h20
-rw-r--r--include/linux/quotaops.h114
-rw-r--r--include/linux/random.h28
-rw-r--r--include/linux/rio.h55
-rw-r--r--include/linux/rio_drv.h6
-rw-r--r--include/linux/rio_ids.h14
-rw-r--r--include/linux/rio_regs.h80
-rw-r--r--include/linux/sched.h17
-rw-r--r--include/linux/sdhci-pltfm.h35
-rw-r--r--include/linux/sem.h4
-rw-r--r--include/linux/sfi.h24
-rw-r--r--include/linux/slub_def.h11
-rw-r--r--include/linux/swap.h5
-rw-r--r--include/linux/swiotlb.h10
-rw-r--r--include/linux/syscalls.h57
-rw-r--r--include/linux/threads.h9
-rw-r--r--include/linux/topology.h112
-rw-r--r--include/linux/tracepoint.h98
-rw-r--r--include/linux/uinput.h10
-rw-r--r--include/linux/usb/audio-v2.h16
-rw-r--r--include/linux/uuid.h70
-rw-r--r--include/net/cls_cgroup.h2
-rw-r--r--include/net/sctp/structs.h2
-rw-r--r--include/net/sock.h20
-rw-r--r--include/trace/events/ext4.h100
-rw-r--r--include/trace/ftrace.h249
-rw-r--r--include/trace/syscall.h10
-rw-r--r--ipc/sem.c322
-rw-r--r--ipc/shm.c11
-rw-r--r--kernel/cgroup.c1
-rw-r--r--kernel/cpu.c103
-rw-r--r--kernel/cpuset.c20
-rw-r--r--kernel/cred.c60
-rw-r--r--kernel/exit.c40
-rw-r--r--kernel/fork.c51
-rw-r--r--kernel/hrtimer.c2
-rw-r--r--kernel/kmod.c193
-rw-r--r--kernel/mutex.c7
-rw-r--r--kernel/padata.c4
-rw-r--r--kernel/panic.c1
-rw-r--r--kernel/perf_event.c429
-rw-r--r--kernel/pid.c7
-rw-r--r--kernel/posix-cpu-timers.c12
-rw-r--r--kernel/posix-timers.c11
-rw-r--r--kernel/profile.c8
-rw-r--r--kernel/ptrace.c26
-rw-r--r--kernel/relay.c2
-rw-r--r--kernel/sched.c25
-rw-r--r--kernel/sched_debug.c10
-rw-r--r--kernel/signal.c23
-rw-r--r--kernel/smp.c2
-rw-r--r--kernel/softirq.c2
-rw-r--r--kernel/sys.c6
-rw-r--r--kernel/timer.c17
-rw-r--r--kernel/trace/blktrace.c138
-rw-r--r--kernel/trace/ftrace.c7
-rw-r--r--kernel/trace/kmemtrace.c70
-rw-r--r--kernel/trace/ring_buffer.c19
-rw-r--r--kernel/trace/trace.c15
-rw-r--r--kernel/trace/trace.h9
-rw-r--r--kernel/trace/trace_branch.c8
-rw-r--r--kernel/trace/trace_event_perf.c185
-rw-r--r--kernel/trace/trace_events.c139
-rw-r--r--kernel/trace/trace_events_filter.c28
-rw-r--r--kernel/trace/trace_export.c16
-rw-r--r--kernel/trace/trace_functions_graph.c13
-rw-r--r--kernel/trace/trace_kprobe.c113
-rw-r--r--kernel/trace/trace_output.c137
-rw-r--r--kernel/trace/trace_output.h2
-rw-r--r--kernel/trace/trace_sched_switch.c20
-rw-r--r--kernel/trace/trace_sched_wakeup.c28
-rw-r--r--kernel/trace/trace_syscalls.c146
-rw-r--r--kernel/trace/trace_workqueue.c26
-rw-r--r--kernel/tracepoint.c91
-rw-r--r--kernel/workqueue.c9
-rw-r--r--lib/Kconfig.debug12
-rw-r--r--lib/Makefile3
-rw-r--r--lib/atomic64_test.c1
-rw-r--r--lib/cpu-notifier-error-inject.c63
-rw-r--r--lib/crc32.c2
-rw-r--r--lib/idr.c5
-rw-r--r--lib/radix-tree.c4
-rw-r--r--lib/random32.c38
-rw-r--r--lib/swiotlb.c31
-rw-r--r--lib/uuid.c53
-rw-r--r--mm/filemap.c43
-rw-r--r--mm/memcontrol.c689
-rw-r--r--mm/mempolicy.c3
-rw-r--r--mm/migrate.c2
-rw-r--r--mm/nommu.c32
-rw-r--r--mm/oom_kill.c5
-rw-r--r--mm/page_alloc.c50
-rw-r--r--mm/shmem.c109
-rw-r--r--mm/slab.c47
-rw-r--r--mm/slub.c33
-rw-r--r--mm/swap.c1
-rw-r--r--mm/truncate.c10
-rw-r--r--net/core/datagram.c6
-rw-r--r--net/core/drop_monitor.c12
-rw-r--r--net/core/neighbour.c1
-rw-r--r--net/core/rtnetlink.c26
-rw-r--r--net/core/sock.c33
-rw-r--r--net/ipv4/ipmr.c2
-rw-r--r--net/ipv4/udp.c14
-rw-r--r--net/ipv6/ip6_output.c2
-rw-r--r--net/ipv6/ip6mr.c2
-rw-r--r--net/ipv6/udp.c5
-rw-r--r--net/iucv/af_iucv.c2
-rw-r--r--net/iucv/iucv.c9
-rw-r--r--net/netfilter/xt_TEE.c4
-rw-r--r--net/sunrpc/xprtsock.c29
-rw-r--r--samples/tracepoints/tp-samples-trace.h4
-rw-r--r--samples/tracepoints/tracepoint-probe-sample.c13
-rw-r--r--samples/tracepoints/tracepoint-probe-sample2.c7
-rw-r--r--scripts/gen_initramfs_list.sh1
-rw-r--r--scripts/kconfig/streamline_config.pl9
-rw-r--r--security/keys/internal.h1
-rw-r--r--security/keys/keyctl.c2
-rw-r--r--security/keys/process_keys.c3
-rw-r--r--security/keys/request_key.c32
-rw-r--r--sound/core/pcm_lib.c13
-rw-r--r--sound/core/pcm_native.c39
-rw-r--r--sound/mips/au1x00.c1
-rw-r--r--sound/oss/dmasound/dmasound_atari.c5
-rw-r--r--sound/pci/asihpi/hpi.h8
-rw-r--r--sound/pci/asihpi/hpi6000.c6
-rw-r--r--sound/pci/asihpi/hpi6205.c21
-rw-r--r--sound/pci/asihpi/hpi_internal.h5
-rw-r--r--sound/pci/asihpi/hpicmn.c38
-rw-r--r--sound/pci/asihpi/hpifunc.c17
-rw-r--r--sound/pci/asihpi/hpios.c23
-rw-r--r--sound/pci/asihpi/hpios.h9
-rw-r--r--sound/pci/aw2/aw2-alsa.c11
-rw-r--r--sound/pci/emu10k1/emufx.c36
-rw-r--r--sound/pci/hda/hda_intel.c11
-rw-r--r--sound/pci/hda/patch_conexant.c2
-rw-r--r--sound/pci/hda/patch_realtek.c84
-rw-r--r--sound/pci/hda/patch_sigmatel.c2
-rw-r--r--sound/soc/codecs/wm8350.c4
-rw-r--r--sound/soc/codecs/wm8400.c18
-rw-r--r--sound/soc/codecs/wm8990.c18
-rw-r--r--sound/soc/imx/imx-pcm-dma-mx2.c7
-rw-r--r--sound/soc/sh/siu_dai.c2
-rw-r--r--sound/usb/caiaq/control.c36
-rw-r--r--sound/usb/caiaq/device.c8
-rw-r--r--sound/usb/caiaq/input.c2
-rw-r--r--sound/usb/endpoint.c64
-rw-r--r--sound/usb/format.c24
-rw-r--r--sound/usb/format.h7
-rw-r--r--sound/usb/midi.c110
-rw-r--r--sound/usb/midi.h2
-rw-r--r--sound/usb/mixer.c2
-rw-r--r--sound/usb/pcm.c37
-rw-r--r--sound/usb/quirks-table.h11
-rw-r--r--sound/usb/quirks.c1
-rw-r--r--sound/usb/usbaudio.h1
-rw-r--r--tools/perf/Documentation/perf-stat.txt3
-rw-r--r--tools/perf/builtin-annotate.c61
-rw-r--r--tools/perf/builtin-probe.c10
-rw-r--r--tools/perf/builtin-record.c72
-rw-r--r--tools/perf/builtin-report.c64
-rw-r--r--tools/perf/builtin-stat.c18
-rw-r--r--tools/perf/builtin-top.c2
-rw-r--r--tools/perf/perf.c25
-rw-r--r--tools/perf/util/abspath.c81
-rw-r--r--tools/perf/util/build-id.c22
-rw-r--r--tools/perf/util/build-id.h2
-rw-r--r--tools/perf/util/cache.h57
-rw-r--r--tools/perf/util/callchain.c1
-rw-r--r--tools/perf/util/callchain.h1
-rw-r--r--tools/perf/util/config.c461
-rw-r--r--tools/perf/util/debug.c2
-rw-r--r--tools/perf/util/exec_cmd.c6
-rw-r--r--tools/perf/util/exec_cmd.h1
-rw-r--r--tools/perf/util/header.c84
-rw-r--r--tools/perf/util/help.c30
-rw-r--r--tools/perf/util/hist.c42
-rw-r--r--tools/perf/util/hist.h24
-rw-r--r--tools/perf/util/newt.c151
-rw-r--r--tools/perf/util/path.c204
-rw-r--r--tools/perf/util/probe-finder.c33
-rw-r--r--tools/perf/util/probe-finder.h3
-rw-r--r--tools/perf/util/quote.c433
-rw-r--r--tools/perf/util/quote.h39
-rw-r--r--tools/perf/util/run-command.c90
-rw-r--r--tools/perf/util/run-command.h30
-rw-r--r--tools/perf/util/session.c8
-rw-r--r--tools/perf/util/session.h8
-rw-r--r--tools/perf/util/sigchain.c2
-rw-r--r--tools/perf/util/sigchain.h1
-rw-r--r--tools/perf/util/strbuf.c229
-rw-r--r--tools/perf/util/strbuf.h45
-rw-r--r--tools/perf/util/symbol.c52
-rw-r--r--tools/perf/util/symbol.h3
-rw-r--r--tools/perf/util/trace-event-read.c19
-rw-r--r--tools/perf/util/trace-event.h7
-rw-r--r--tools/perf/util/util.h177
-rw-r--r--tools/perf/util/wrapper.c110
-rw-r--r--usr/Makefile5
-rw-r--r--usr/initramfs_data.lzo.S29
1131 files changed, 44721 insertions, 18365 deletions
diff --git a/Documentation/ABI/testing/sysfs-firmware-sfi b/Documentation/ABI/testing/sysfs-firmware-sfi
new file mode 100644
index 000000000000..4be7d44aeacf
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-firmware-sfi
@@ -0,0 +1,15 @@
1What: /sys/firmware/sfi/tables/
2Date: May 2010
3Contact: Len Brown <lenb@kernel.org>
4Description:
5 SFI defines a number of small static memory tables
6 so the kernel can get platform information from firmware.
7
8 The tables are defined in the latest SFI specification:
9 http://simplefirmware.org/documentation
10
11 While the tables are used by the kernel, user-space
12 can observe them this way:
13
14 # cd /sys/firmware/sfi/tables
15 # cat $TABLENAME > $TABLENAME.bin
diff --git a/Documentation/DMA-API-HOWTO.txt b/Documentation/DMA-API-HOWTO.txt
index 2e435adfbd6b..98ce51796f71 100644
--- a/Documentation/DMA-API-HOWTO.txt
+++ b/Documentation/DMA-API-HOWTO.txt
@@ -639,6 +639,36 @@ is planned to completely remove virt_to_bus() and bus_to_virt() as
639they are entirely deprecated. Some ports already do not provide these 639they are entirely deprecated. Some ports already do not provide these
640as it is impossible to correctly support them. 640as it is impossible to correctly support them.
641 641
642 Handling Errors
643
644DMA address space is limited on some architectures and an allocation
645failure can be determined by:
646
647- checking if dma_alloc_coherent returns NULL or dma_map_sg returns 0
648
649- checking the returned dma_addr_t of dma_map_single and dma_map_page
650 by using dma_mapping_error():
651
652 dma_addr_t dma_handle;
653
654 dma_handle = dma_map_single(dev, addr, size, direction);
655 if (dma_mapping_error(dev, dma_handle)) {
656 /*
657 * reduce current DMA mapping usage,
658 * delay and try again later or
659 * reset driver.
660 */
661 }
662
663Networking drivers must call dev_kfree_skb to free the socket buffer
664and return NETDEV_TX_OK if the DMA mapping fails on the transmit hook
665(ndo_start_xmit). This means that the socket buffer is just dropped in
666the failure case.
667
668SCSI drivers must return SCSI_MLQUEUE_HOST_BUSY if the DMA mapping
669fails in the queuecommand hook. This means that the SCSI subsystem
670passes the command to the driver again later.
671
642 Optimizing Unmap State Space Consumption 672 Optimizing Unmap State Space Consumption
643 673
644On many platforms, dma_unmap_{single,page}() is simply a nop. 674On many platforms, dma_unmap_{single,page}() is simply a nop.
@@ -703,42 +733,25 @@ to "Closing".
703 733
7041) Struct scatterlist requirements. 7341) Struct scatterlist requirements.
705 735
706 Struct scatterlist must contain, at a minimum, the following 736 Don't invent the architecture specific struct scatterlist; just use
707 members: 737 <asm-generic/scatterlist.h>. You need to enable
708 738 CONFIG_NEED_SG_DMA_LENGTH if the architecture supports IOMMUs
709 struct page *page; 739 (including software IOMMU).
710 unsigned int offset; 740
711 unsigned int length; 7412) ARCH_KMALLOC_MINALIGN
712 742
713 The base address is specified by a "page+offset" pair. 743 Architectures must ensure that kmalloc'ed buffer is
714 744 DMA-safe. Drivers and subsystems depend on it. If an architecture
715 Previous versions of struct scatterlist contained a "void *address" 745 isn't fully DMA-coherent (i.e. hardware doesn't ensure that data in
716 field that was sometimes used instead of page+offset. As of Linux 746 the CPU cache is identical to data in main memory),
717 2.5., page+offset is always used, and the "address" field has been 747 ARCH_KMALLOC_MINALIGN must be set so that the memory allocator
718 deleted. 748 makes sure that kmalloc'ed buffer doesn't share a cache line with
719 749 the others. See arch/arm/include/asm/cache.h as an example.
7202) More to come... 750
721 751 Note that ARCH_KMALLOC_MINALIGN is about DMA memory alignment
722 Handling Errors 752 constraints. You don't need to worry about the architecture data
723 753 alignment constraints (e.g. the alignment constraints about 64-bit
724DMA address space is limited on some architectures and an allocation 754 objects).
725failure can be determined by:
726
727- checking if dma_alloc_coherent returns NULL or dma_map_sg returns 0
728
729- checking the returned dma_addr_t of dma_map_single and dma_map_page
730 by using dma_mapping_error():
731
732 dma_addr_t dma_handle;
733
734 dma_handle = dma_map_single(dev, addr, size, direction);
735 if (dma_mapping_error(dev, dma_handle)) {
736 /*
737 * reduce current DMA mapping usage,
738 * delay and try again later or
739 * reset driver.
740 */
741 }
742 755
743 Closing 756 Closing
744 757
diff --git a/Documentation/SubmittingDrivers b/Documentation/SubmittingDrivers
index 99e72a81fa2f..4947fd8fb182 100644
--- a/Documentation/SubmittingDrivers
+++ b/Documentation/SubmittingDrivers
@@ -130,6 +130,8 @@ Linux kernel master tree:
130 ftp.??.kernel.org:/pub/linux/kernel/... 130 ftp.??.kernel.org:/pub/linux/kernel/...
131 ?? == your country code, such as "us", "uk", "fr", etc. 131 ?? == your country code, such as "us", "uk", "fr", etc.
132 132
133 http://git.kernel.org/?p=linux/kernel/git/torvalds/linux-2.6.git
134
133Linux kernel mailing list: 135Linux kernel mailing list:
134 linux-kernel@vger.kernel.org 136 linux-kernel@vger.kernel.org
135 [mail majordomo@vger.kernel.org to subscribe] 137 [mail majordomo@vger.kernel.org to subscribe]
@@ -160,3 +162,6 @@ How to NOT write kernel driver by Arjan van de Ven:
160 162
161Kernel Janitor: 163Kernel Janitor:
162 http://janitor.kernelnewbies.org/ 164 http://janitor.kernelnewbies.org/
165
166GIT, Fast Version Control System:
167 http://git-scm.com/
diff --git a/Documentation/acpi/apei/einj.txt b/Documentation/acpi/apei/einj.txt
new file mode 100644
index 000000000000..dfab71848dc8
--- /dev/null
+++ b/Documentation/acpi/apei/einj.txt
@@ -0,0 +1,59 @@
1 APEI Error INJection
2 ~~~~~~~~~~~~~~~~~~~~
3
4EINJ provides a hardware error injection mechanism
5It is very useful for debugging and testing of other APEI and RAS features.
6
7To use EINJ, make sure the following are enabled in your kernel
8configuration:
9
10CONFIG_DEBUG_FS
11CONFIG_ACPI_APEI
12CONFIG_ACPI_APEI_EINJ
13
14The user interface of EINJ is debug file system, under the
15directory apei/einj. The following files are provided.
16
17- available_error_type
18 Reading this file returns the error injection capability of the
19 platform, that is, which error types are supported. The error type
20 definition is as follow, the left field is the error type value, the
21 right field is error description.
22
23 0x00000001 Processor Correctable
24 0x00000002 Processor Uncorrectable non-fatal
25 0x00000004 Processor Uncorrectable fatal
26 0x00000008 Memory Correctable
27 0x00000010 Memory Uncorrectable non-fatal
28 0x00000020 Memory Uncorrectable fatal
29 0x00000040 PCI Express Correctable
30 0x00000080 PCI Express Uncorrectable fatal
31 0x00000100 PCI Express Uncorrectable non-fatal
32 0x00000200 Platform Correctable
33 0x00000400 Platform Uncorrectable non-fatal
34 0x00000800 Platform Uncorrectable fatal
35
36 The format of file contents are as above, except there are only the
37 available error type lines.
38
39- error_type
40 This file is used to set the error type value. The error type value
41 is defined in "available_error_type" description.
42
43- error_inject
44 Write any integer to this file to trigger the error
45 injection. Before this, please specify all necessary error
46 parameters.
47
48- param1
49 This file is used to set the first error parameter value. Effect of
50 parameter depends on error_type specified. For memory error, this is
51 physical memory address.
52
53- param2
54 This file is used to set the second error parameter value. Effect of
55 parameter depends on error_type specified. For memory error, this is
56 physical memory address mask.
57
58For more information about EINJ, please refer to ACPI specification
59version 4.0, section 17.5.
diff --git a/Documentation/arm/Samsung-S3C24XX/GPIO.txt b/Documentation/arm/Samsung-S3C24XX/GPIO.txt
index 2af2cf39915f..816d6071669e 100644
--- a/Documentation/arm/Samsung-S3C24XX/GPIO.txt
+++ b/Documentation/arm/Samsung-S3C24XX/GPIO.txt
@@ -12,6 +12,8 @@ Introduction
12 of the s3c2410 GPIO system, please read the Samsung provided 12 of the s3c2410 GPIO system, please read the Samsung provided
13 data-sheet/users manual to find out the complete list. 13 data-sheet/users manual to find out the complete list.
14 14
15 See Documentation/arm/Samsung/GPIO.txt for the core implemetation.
16
15 17
16GPIOLIB 18GPIOLIB
17------- 19-------
@@ -24,8 +26,60 @@ GPIOLIB
24 listed below will be removed (they may be marked as __deprecated 26 listed below will be removed (they may be marked as __deprecated
25 in the near future). 27 in the near future).
26 28
27 - s3c2410_gpio_getpin 29 The following functions now either have a s3c_ specific variant
28 - s3c2410_gpio_setpin 30 or are merged into gpiolib. See the definitions in
31 arch/arm/plat-samsung/include/plat/gpio-cfg.h:
32
33 s3c2410_gpio_setpin() gpio_set_value() or gpio_direction_output()
34 s3c2410_gpio_getpin() gpio_get_value() or gpio_direction_input()
35 s3c2410_gpio_getirq() gpio_to_irq()
36 s3c2410_gpio_cfgpin() s3c_gpio_cfgpin()
37 s3c2410_gpio_getcfg() s3c_gpio_getcfg()
38 s3c2410_gpio_pullup() s3c_gpio_setpull()
39
40
41GPIOLIB conversion
42------------------
43
44If you need to convert your board or driver to use gpiolib from the exiting
45s3c2410 api, then here are some notes on the process.
46
471) If your board is exclusively using an GPIO, say to control peripheral
48 power, then it will require to claim the gpio with gpio_request() before
49 it can use it.
50
51 It is recommended to check the return value, with at least WARN_ON()
52 during initialisation.
53
542) The s3c2410_gpio_cfgpin() can be directly replaced with s3c_gpio_cfgpin()
55 as they have the same arguments, and can either take the pin specific
56 values, or the more generic special-function-number arguments.
57
583) s3c2410_gpio_pullup() changs have the problem that whilst the
59 s3c2410_gpio_pullup(x, 1) can be easily translated to the
60 s3c_gpio_setpull(x, S3C_GPIO_PULL_NONE), the s3c2410_gpio_pullup(x, 0)
61 are not so easy.
62
63 The s3c2410_gpio_pullup(x, 0) case enables the pull-up (or in the case
64 of some of the devices, a pull-down) and as such the new API distinguishes
65 between the UP and DOWN case. There is currently no 'just turn on' setting
66 which may be required if this becomes a problem.
67
684) s3c2410_gpio_setpin() can be replaced by gpio_set_value(), the old call
69 does not implicitly configure the relevant gpio to output. The gpio
70 direction should be changed before using gpio_set_value().
71
725) s3c2410_gpio_getpin() is replaceable by gpio_get_value() if the pin
73 has been set to input. It is currently unknown what the behaviour is
74 when using gpio_get_value() on an output pin (s3c2410_gpio_getpin
75 would return the value the pin is supposed to be outputting).
76
776) s3c2410_gpio_getirq() should be directly replacable with the
78 gpio_to_irq() call.
79
80The s3c2410_gpio and gpio_ calls have always operated on the same gpio
81numberspace, so there is no problem with converting the gpio numbering
82between the calls.
29 83
30 84
31Headers 85Headers
@@ -54,6 +108,11 @@ PIN Numbers
54 eg S3C2410_GPA(0) or S3C2410_GPF(1). These defines are used to tell 108 eg S3C2410_GPA(0) or S3C2410_GPF(1). These defines are used to tell
55 the GPIO functions which pin is to be used. 109 the GPIO functions which pin is to be used.
56 110
111 With the conversion to gpiolib, there is no longer a direct conversion
112 from gpio pin number to register base address as in earlier kernels. This
113 is due to the number space required for newer SoCs where the later
114 GPIOs are not contiguous.
115
57 116
58Configuring a pin 117Configuring a pin
59----------------- 118-----------------
@@ -71,6 +130,8 @@ Configuring a pin
71 which would turn GPA(0) into the lowest Address line A0, and set 130 which would turn GPA(0) into the lowest Address line A0, and set
72 GPE(8) to be connected to the SDIO/MMC controller's SDDAT1 line. 131 GPE(8) to be connected to the SDIO/MMC controller's SDDAT1 line.
73 132
133 The s3c_gpio_cfgpin() call is a functional replacement for this call.
134
74 135
75Reading the current configuration 136Reading the current configuration
76--------------------------------- 137---------------------------------
@@ -82,6 +143,9 @@ Reading the current configuration
82 The return value will be from the same set of values which can be 143 The return value will be from the same set of values which can be
83 passed to s3c2410_gpio_cfgpin(). 144 passed to s3c2410_gpio_cfgpin().
84 145
146 The s3c_gpio_getcfg() call should be a functional replacement for
147 this call.
148
85 149
86Configuring a pull-up resistor 150Configuring a pull-up resistor
87------------------------------ 151------------------------------
@@ -95,6 +159,10 @@ Configuring a pull-up resistor
95 Where the to value is zero to set the pull-up off, and 1 to enable 159 Where the to value is zero to set the pull-up off, and 1 to enable
96 the specified pull-up. Any other values are currently undefined. 160 the specified pull-up. Any other values are currently undefined.
97 161
162 The s3c_gpio_setpull() offers similar functionality, but with the
163 ability to encode whether the pull is up or down. Currently there
164 is no 'just on' state, so up or down must be selected.
165
98 166
99Getting the state of a PIN 167Getting the state of a PIN
100-------------------------- 168--------------------------
@@ -106,6 +174,9 @@ Getting the state of a PIN
106 This will return either zero or non-zero. Do not count on this 174 This will return either zero or non-zero. Do not count on this
107 function returning 1 if the pin is set. 175 function returning 1 if the pin is set.
108 176
177 This call is now implemented by the relevant gpiolib calls, convert
178 your board or driver to use gpiolib.
179
109 180
110Setting the state of a PIN 181Setting the state of a PIN
111-------------------------- 182--------------------------
@@ -117,6 +188,9 @@ Setting the state of a PIN
117 Which sets the given pin to the value. Use 0 to write 0, and 1 to 188 Which sets the given pin to the value. Use 0 to write 0, and 1 to
118 set the output to 1. 189 set the output to 1.
119 190
191 This call is now implemented by the relevant gpiolib calls, convert
192 your board or driver to use gpiolib.
193
120 194
121Getting the IRQ number associated with a PIN 195Getting the IRQ number associated with a PIN
122-------------------------------------------- 196--------------------------------------------
@@ -128,6 +202,9 @@ Getting the IRQ number associated with a PIN
128 202
129 Note, not all pins have an IRQ. 203 Note, not all pins have an IRQ.
130 204
205 This call is now implemented by the relevant gpiolib calls, convert
206 your board or driver to use gpiolib.
207
131 208
132Authour 209Authour
133------- 210-------
diff --git a/Documentation/arm/Samsung-S3C24XX/Overview.txt b/Documentation/arm/Samsung-S3C24XX/Overview.txt
index 081892df4fda..c12bfc1a00c9 100644
--- a/Documentation/arm/Samsung-S3C24XX/Overview.txt
+++ b/Documentation/arm/Samsung-S3C24XX/Overview.txt
@@ -8,10 +8,16 @@ Introduction
8 8
9 The Samsung S3C24XX range of ARM9 System-on-Chip CPUs are supported 9 The Samsung S3C24XX range of ARM9 System-on-Chip CPUs are supported
10 by the 's3c2410' architecture of ARM Linux. Currently the S3C2410, 10 by the 's3c2410' architecture of ARM Linux. Currently the S3C2410,
11 S3C2412, S3C2413, S3C2440, S3C2442 and S3C2443 devices are supported. 11 S3C2412, S3C2413, S3C2416 S3C2440, S3C2442, S3C2443 and S3C2450 devices
12 are supported.
12 13
13 Support for the S3C2400 and S3C24A0 series are in progress. 14 Support for the S3C2400 and S3C24A0 series are in progress.
14 15
16 The S3C2416 and S3C2450 devices are very similar and S3C2450 support is
17 included under the arch/arm/mach-s3c2416 directory. Note, whilst core
18 support for these SoCs is in, work on some of the extra peripherals
19 and extra interrupts is still ongoing.
20
15 21
16Configuration 22Configuration
17------------- 23-------------
@@ -209,6 +215,13 @@ GPIO
209 Newer kernels carry GPIOLIB, and support is being moved towards 215 Newer kernels carry GPIOLIB, and support is being moved towards
210 this with some of the older support in line to be removed. 216 this with some of the older support in line to be removed.
211 217
218 As of v2.6.34, the move towards using gpiolib support is almost
219 complete, and very little of the old calls are left.
220
221 See Documentation/arm/Samsung-S3C24XX/GPIO.txt for the S3C24XX specific
222 support and Documentation/arm/Samsung/GPIO.txt for the core Samsung
223 implementation.
224
212 225
213Clock Management 226Clock Management
214---------------- 227----------------
diff --git a/Documentation/arm/Samsung/GPIO.txt b/Documentation/arm/Samsung/GPIO.txt
new file mode 100644
index 000000000000..05850c62abeb
--- /dev/null
+++ b/Documentation/arm/Samsung/GPIO.txt
@@ -0,0 +1,42 @@
1 Samsung GPIO implementation
2 ===========================
3
4Introduction
5------------
6
7This outlines the Samsung GPIO implementation and the architecture
8specfic calls provided alongisde the drivers/gpio core.
9
10
11S3C24XX (Legacy)
12----------------
13
14See Documentation/arm/Samsung-S3C24XX/GPIO.txt for more information
15about these devices. Their implementation is being brought into line
16with the core samsung implementation described in this document.
17
18
19GPIOLIB integration
20-------------------
21
22The gpio implementation uses gpiolib as much as possible, only providing
23specific calls for the items that require Samsung specific handling, such
24as pin special-function or pull resistor control.
25
26GPIO numbering is synchronised between the Samsung and gpiolib system.
27
28
29PIN configuration
30-----------------
31
32Pin configuration is specific to the Samsung architecutre, with each SoC
33registering the necessary information for the core gpio configuration
34implementation to configure pins as necessary.
35
36The s3c_gpio_cfgpin() and s3c_gpio_setpull() provide the means for a
37driver or machine to change gpio configuration.
38
39See arch/arm/plat-samsung/include/plat/gpio-cfg.h for more information
40on these functions.
41
42
diff --git a/Documentation/arm/Samsung/Overview.txt b/Documentation/arm/Samsung/Overview.txt
index 7cced1fea9c3..c3094ea51aa7 100644
--- a/Documentation/arm/Samsung/Overview.txt
+++ b/Documentation/arm/Samsung/Overview.txt
@@ -13,9 +13,10 @@ Introduction
13 13
14 - S3C24XX: See Documentation/arm/Samsung-S3C24XX/Overview.txt for full list 14 - S3C24XX: See Documentation/arm/Samsung-S3C24XX/Overview.txt for full list
15 - S3C64XX: S3C6400 and S3C6410 15 - S3C64XX: S3C6400 and S3C6410
16 - S5PC6440 16 - S5P6440
17 17 - S5P6442
18 S5PC100 and S5PC110 support is currently being merged 18 - S5PC100
19 - S5PC110 / S5PV210
19 20
20 21
21S3C24XX Systems 22S3C24XX Systems
@@ -35,7 +36,10 @@ Configuration
35 unifying all the SoCs into one kernel. 36 unifying all the SoCs into one kernel.
36 37
37 s5p6440_defconfig - S5P6440 specific default configuration 38 s5p6440_defconfig - S5P6440 specific default configuration
39 s5p6442_defconfig - S5P6442 specific default configuration
38 s5pc100_defconfig - S5PC100 specific default configuration 40 s5pc100_defconfig - S5PC100 specific default configuration
41 s5pc110_defconfig - S5PC110 specific default configuration
42 s5pv210_defconfig - S5PV210 specific default configuration
39 43
40 44
41Layout 45Layout
@@ -50,18 +54,27 @@ Layout
50 specific information. It contains the base clock, GPIO and device definitions 54 specific information. It contains the base clock, GPIO and device definitions
51 to get the system running. 55 to get the system running.
52 56
53 plat-s3c is the s3c24xx/s3c64xx platform directory, although it is currently
54 involved in other builds this will be phased out once the relevant code is
55 moved elsewhere.
56
57 plat-s3c24xx is for s3c24xx specific builds, see the S3C24XX docs. 57 plat-s3c24xx is for s3c24xx specific builds, see the S3C24XX docs.
58 58
59 plat-s3c64xx is for the s3c64xx specific bits, see the S3C24XX docs. 59 plat-s5p is for s5p specific builds, and contains common support for the
60 S5P specific systems. Not all S5Ps use all the features in this directory
61 due to differences in the hardware.
62
63
64Layout changes
65--------------
66
67 The old plat-s3c and plat-s5pc1xx directories have been removed, with
68 support moved to either plat-samsung or plat-s5p as necessary. These moves
69 where to simplify the include and dependency issues involved with having
70 so many different platform directories.
60 71
61 plat-s5p is for s5p specific builds, more to be added. 72 It was decided to remove plat-s5pc1xx as some of the support was already
73 in plat-s5p or plat-samsung, with the S5PC110 support added with S5PV210
74 the only user was the S5PC100. The S5PC100 specific items where moved to
75 arch/arm/mach-s5pc100.
62 76
63 77
64 [ to finish ]
65 78
66 79
67Port Contributors 80Port Contributors
diff --git a/Documentation/cgroups/cgroups.txt b/Documentation/cgroups/cgroups.txt
index 57444c2609fc..b34823ff1646 100644
--- a/Documentation/cgroups/cgroups.txt
+++ b/Documentation/cgroups/cgroups.txt
@@ -339,7 +339,7 @@ To mount a cgroup hierarchy with all available subsystems, type:
339The "xxx" is not interpreted by the cgroup code, but will appear in 339The "xxx" is not interpreted by the cgroup code, but will appear in
340/proc/mounts so may be any useful identifying string that you like. 340/proc/mounts so may be any useful identifying string that you like.
341 341
342To mount a cgroup hierarchy with just the cpuset and numtasks 342To mount a cgroup hierarchy with just the cpuset and memory
343subsystems, type: 343subsystems, type:
344# mount -t cgroup -o cpuset,memory hier1 /dev/cgroup 344# mount -t cgroup -o cpuset,memory hier1 /dev/cgroup
345 345
diff --git a/Documentation/cgroups/memory.txt b/Documentation/cgroups/memory.txt
index 6cab1f29da4c..7781857dc940 100644
--- a/Documentation/cgroups/memory.txt
+++ b/Documentation/cgroups/memory.txt
@@ -1,18 +1,15 @@
1Memory Resource Controller 1Memory Resource Controller
2 2
3NOTE: The Memory Resource Controller has been generically been referred 3NOTE: The Memory Resource Controller has been generically been referred
4to as the memory controller in this document. Do not confuse memory controller 4 to as the memory controller in this document. Do not confuse memory
5used here with the memory controller that is used in hardware. 5 controller used here with the memory controller that is used in hardware.
6 6
7Salient features 7(For editors)
8 8In this document:
9a. Enable control of Anonymous, Page Cache (mapped and unmapped) and 9 When we mention a cgroup (cgroupfs's directory) with memory controller,
10 Swap Cache memory pages. 10 we call it "memory cgroup". When you see git-log and source code, you'll
11b. The infrastructure allows easy addition of other types of memory to control 11 see patch's title and function names tend to use "memcg".
12c. Provides *zero overhead* for non memory controller users 12 In this document, we avoid using it.
13d. Provides a double LRU: global memory pressure causes reclaim from the
14 global LRU; a cgroup on hitting a limit, reclaims from the per
15 cgroup LRU
16 13
17Benefits and Purpose of the memory controller 14Benefits and Purpose of the memory controller
18 15
@@ -33,6 +30,45 @@ d. A CD/DVD burner could control the amount of memory used by the
33e. There are several other use cases, find one or use the controller just 30e. There are several other use cases, find one or use the controller just
34 for fun (to learn and hack on the VM subsystem). 31 for fun (to learn and hack on the VM subsystem).
35 32
33Current Status: linux-2.6.34-mmotm(development version of 2010/April)
34
35Features:
36 - accounting anonymous pages, file caches, swap caches usage and limiting them.
37 - private LRU and reclaim routine. (system's global LRU and private LRU
38 work independently from each other)
39 - optionally, memory+swap usage can be accounted and limited.
40 - hierarchical accounting
41 - soft limit
42 - moving(recharging) account at moving a task is selectable.
43 - usage threshold notifier
44 - oom-killer disable knob and oom-notifier
45 - Root cgroup has no limit controls.
46
47 Kernel memory and Hugepages are not under control yet. We just manage
48 pages on LRU. To add more controls, we have to take care of performance.
49
50Brief summary of control files.
51
52 tasks # attach a task(thread) and show list of threads
53 cgroup.procs # show list of processes
54 cgroup.event_control # an interface for event_fd()
55 memory.usage_in_bytes # show current memory(RSS+Cache) usage.
56 memory.memsw.usage_in_bytes # show current memory+Swap usage
57 memory.limit_in_bytes # set/show limit of memory usage
58 memory.memsw.limit_in_bytes # set/show limit of memory+Swap usage
59 memory.failcnt # show the number of memory usage hits limits
60 memory.memsw.failcnt # show the number of memory+Swap hits limits
61 memory.max_usage_in_bytes # show max memory usage recorded
62 memory.memsw.usage_in_bytes # show max memory+Swap usage recorded
63 memory.soft_limit_in_bytes # set/show soft limit of memory usage
64 memory.stat # show various statistics
65 memory.use_hierarchy # set/show hierarchical account enabled
66 memory.force_empty # trigger forced move charge to parent
67 memory.swappiness # set/show swappiness parameter of vmscan
68 (See sysctl's vm.swappiness)
69 memory.move_charge_at_immigrate # set/show controls of moving charges
70 memory.oom_control # set/show oom controls.
71
361. History 721. History
37 73
38The memory controller has a long history. A request for comments for the memory 74The memory controller has a long history. A request for comments for the memory
@@ -106,14 +142,14 @@ the necessary data structures and check if the cgroup that is being charged
106is over its limit. If it is then reclaim is invoked on the cgroup. 142is over its limit. If it is then reclaim is invoked on the cgroup.
107More details can be found in the reclaim section of this document. 143More details can be found in the reclaim section of this document.
108If everything goes well, a page meta-data-structure called page_cgroup is 144If everything goes well, a page meta-data-structure called page_cgroup is
109allocated and associated with the page. This routine also adds the page to 145updated. page_cgroup has its own LRU on cgroup.
110the per cgroup LRU. 146(*) page_cgroup structure is allocated at boot/memory-hotplug time.
111 147
1122.2.1 Accounting details 1482.2.1 Accounting details
113 149
114All mapped anon pages (RSS) and cache pages (Page Cache) are accounted. 150All mapped anon pages (RSS) and cache pages (Page Cache) are accounted.
115(some pages which never be reclaimable and will not be on global LRU 151Some pages which are never reclaimable and will not be on the global LRU
116 are not accounted. we just accounts pages under usual vm management.) 152are not accounted. We just account pages under usual VM management.
117 153
118RSS pages are accounted at page_fault unless they've already been accounted 154RSS pages are accounted at page_fault unless they've already been accounted
119for earlier. A file page will be accounted for as Page Cache when it's 155for earlier. A file page will be accounted for as Page Cache when it's
@@ -121,12 +157,19 @@ inserted into inode (radix-tree). While it's mapped into the page tables of
121processes, duplicate accounting is carefully avoided. 157processes, duplicate accounting is carefully avoided.
122 158
123A RSS page is unaccounted when it's fully unmapped. A PageCache page is 159A RSS page is unaccounted when it's fully unmapped. A PageCache page is
124unaccounted when it's removed from radix-tree. 160unaccounted when it's removed from radix-tree. Even if RSS pages are fully
161unmapped (by kswapd), they may exist as SwapCache in the system until they
162are really freed. Such SwapCaches also also accounted.
163A swapped-in page is not accounted until it's mapped.
164
165Note: The kernel does swapin-readahead and read multiple swaps at once.
166This means swapped-in pages may contain pages for other tasks than a task
167causing page fault. So, we avoid accounting at swap-in I/O.
125 168
126At page migration, accounting information is kept. 169At page migration, accounting information is kept.
127 170
128Note: we just account pages-on-lru because our purpose is to control amount 171Note: we just account pages-on-LRU because our purpose is to control amount
129of used pages. not-on-lru pages are tend to be out-of-control from vm view. 172of used pages; not-on-LRU pages tend to be out-of-control from VM view.
130 173
1312.3 Shared Page Accounting 1742.3 Shared Page Accounting
132 175
@@ -143,6 +186,7 @@ caller of swapoff rather than the users of shmem.
143 186
144 187
1452.4 Swap Extension (CONFIG_CGROUP_MEM_RES_CTLR_SWAP) 1882.4 Swap Extension (CONFIG_CGROUP_MEM_RES_CTLR_SWAP)
189
146Swap Extension allows you to record charge for swap. A swapped-in page is 190Swap Extension allows you to record charge for swap. A swapped-in page is
147charged back to original page allocator if possible. 191charged back to original page allocator if possible.
148 192
@@ -150,13 +194,20 @@ When swap is accounted, following files are added.
150 - memory.memsw.usage_in_bytes. 194 - memory.memsw.usage_in_bytes.
151 - memory.memsw.limit_in_bytes. 195 - memory.memsw.limit_in_bytes.
152 196
153usage of mem+swap is limited by memsw.limit_in_bytes. 197memsw means memory+swap. Usage of memory+swap is limited by
198memsw.limit_in_bytes.
154 199
155* why 'mem+swap' rather than swap. 200Example: Assume a system with 4G of swap. A task which allocates 6G of memory
201(by mistake) under 2G memory limitation will use all swap.
202In this case, setting memsw.limit_in_bytes=3G will prevent bad use of swap.
203By using memsw limit, you can avoid system OOM which can be caused by swap
204shortage.
205
206* why 'memory+swap' rather than swap.
156The global LRU(kswapd) can swap out arbitrary pages. Swap-out means 207The global LRU(kswapd) can swap out arbitrary pages. Swap-out means
157to move account from memory to swap...there is no change in usage of 208to move account from memory to swap...there is no change in usage of
158mem+swap. In other words, when we want to limit the usage of swap without 209memory+swap. In other words, when we want to limit the usage of swap without
159affecting global LRU, mem+swap limit is better than just limiting swap from 210affecting global LRU, memory+swap limit is better than just limiting swap from
160OS point of view. 211OS point of view.
161 212
162* What happens when a cgroup hits memory.memsw.limit_in_bytes 213* What happens when a cgroup hits memory.memsw.limit_in_bytes
@@ -168,12 +219,12 @@ it by cgroup.
168 219
1692.5 Reclaim 2202.5 Reclaim
170 221
171Each cgroup maintains a per cgroup LRU that consists of an active 222Each cgroup maintains a per cgroup LRU which has the same structure as
172and inactive list. When a cgroup goes over its limit, we first try 223global VM. When a cgroup goes over its limit, we first try
173to reclaim memory from the cgroup so as to make space for the new 224to reclaim memory from the cgroup so as to make space for the new
174pages that the cgroup has touched. If the reclaim is unsuccessful, 225pages that the cgroup has touched. If the reclaim is unsuccessful,
175an OOM routine is invoked to select and kill the bulkiest task in the 226an OOM routine is invoked to select and kill the bulkiest task in the
176cgroup. 227cgroup. (See 10. OOM Control below.)
177 228
178The reclaim algorithm has not been modified for cgroups, except that 229The reclaim algorithm has not been modified for cgroups, except that
179pages that are selected for reclaiming come from the per cgroup LRU 230pages that are selected for reclaiming come from the per cgroup LRU
@@ -184,13 +235,22 @@ limits on the root cgroup.
184 235
185Note2: When panic_on_oom is set to "2", the whole system will panic. 236Note2: When panic_on_oom is set to "2", the whole system will panic.
186 237
1872. Locking 238When oom event notifier is registered, event will be delivered.
239(See oom_control section)
240
2412.6 Locking
188 242
189The memory controller uses the following hierarchy 243 lock_page_cgroup()/unlock_page_cgroup() should not be called under
244 mapping->tree_lock.
190 245
1911. zone->lru_lock is used for selecting pages to be isolated 246 Other lock order is following:
1922. mem->per_zone->lru_lock protects the per cgroup LRU (per zone) 247 PG_locked.
1933. lock_page_cgroup() is used to protect page->page_cgroup 248 mm->page_table_lock
249 zone->lru_lock
250 lock_page_cgroup.
251 In many cases, just lock_page_cgroup() is called.
252 per-zone-per-cgroup LRU (cgroup's private LRU) is just guarded by
253 zone->lru_lock, it has no lock of its own.
194 254
1953. User Interface 2553. User Interface
196 256
@@ -199,6 +259,7 @@ The memory controller uses the following hierarchy
199a. Enable CONFIG_CGROUPS 259a. Enable CONFIG_CGROUPS
200b. Enable CONFIG_RESOURCE_COUNTERS 260b. Enable CONFIG_RESOURCE_COUNTERS
201c. Enable CONFIG_CGROUP_MEM_RES_CTLR 261c. Enable CONFIG_CGROUP_MEM_RES_CTLR
262d. Enable CONFIG_CGROUP_MEM_RES_CTLR_SWAP (to use swap extension)
202 263
2031. Prepare the cgroups 2641. Prepare the cgroups
204# mkdir -p /cgroups 265# mkdir -p /cgroups
@@ -206,31 +267,28 @@ c. Enable CONFIG_CGROUP_MEM_RES_CTLR
206 267
2072. Make the new group and move bash into it 2682. Make the new group and move bash into it
208# mkdir /cgroups/0 269# mkdir /cgroups/0
209# echo $$ > /cgroups/0/tasks 270# echo $$ > /cgroups/0/tasks
210 271
211Since now we're in the 0 cgroup, 272Since now we're in the 0 cgroup, we can alter the memory limit:
212We can alter the memory limit:
213# echo 4M > /cgroups/0/memory.limit_in_bytes 273# echo 4M > /cgroups/0/memory.limit_in_bytes
214 274
215NOTE: We can use a suffix (k, K, m, M, g or G) to indicate values in kilo, 275NOTE: We can use a suffix (k, K, m, M, g or G) to indicate values in kilo,
216mega or gigabytes. 276mega or gigabytes. (Here, Kilo, Mega, Giga are Kibibytes, Mebibytes, Gibibytes.)
277
217NOTE: We can write "-1" to reset the *.limit_in_bytes(unlimited). 278NOTE: We can write "-1" to reset the *.limit_in_bytes(unlimited).
218NOTE: We cannot set limits on the root cgroup any more. 279NOTE: We cannot set limits on the root cgroup any more.
219 280
220# cat /cgroups/0/memory.limit_in_bytes 281# cat /cgroups/0/memory.limit_in_bytes
2214194304 2824194304
222 283
223NOTE: The interface has now changed to display the usage in bytes
224instead of pages
225
226We can check the usage: 284We can check the usage:
227# cat /cgroups/0/memory.usage_in_bytes 285# cat /cgroups/0/memory.usage_in_bytes
2281216512 2861216512
229 287
230A successful write to this file does not guarantee a successful set of 288A successful write to this file does not guarantee a successful set of
231this limit to the value written into the file. This can be due to a 289this limit to the value written into the file. This can be due to a
232number of factors, such as rounding up to page boundaries or the total 290number of factors, such as rounding up to page boundaries or the total
233availability of memory on the system. The user is required to re-read 291availability of memory on the system. The user is required to re-read
234this file after a write to guarantee the value committed by the kernel. 292this file after a write to guarantee the value committed by the kernel.
235 293
236# echo 1 > memory.limit_in_bytes 294# echo 1 > memory.limit_in_bytes
@@ -245,15 +303,23 @@ caches, RSS and Active pages/Inactive pages are shown.
245 303
2464. Testing 3044. Testing
247 305
248Balbir posted lmbench, AIM9, LTP and vmmstress results [10] and [11]. 306For testing features and implementation, see memcg_test.txt.
249Apart from that v6 has been tested with several applications and regular 307
250daily use. The controller has also been tested on the PPC64, x86_64 and 308Performance test is also important. To see pure memory controller's overhead,
251UML platforms. 309testing on tmpfs will give you good numbers of small overheads.
310Example: do kernel make on tmpfs.
311
312Page-fault scalability is also important. At measuring parallel
313page fault test, multi-process test may be better than multi-thread
314test because it has noise of shared objects/status.
315
316But the above two are testing extreme situations.
317Trying usual test under memory controller is always helpful.
252 318
2534.1 Troubleshooting 3194.1 Troubleshooting
254 320
255Sometimes a user might find that the application under a cgroup is 321Sometimes a user might find that the application under a cgroup is
256terminated. There are several causes for this: 322terminated by OOM killer. There are several causes for this:
257 323
2581. The cgroup limit is too low (just too low to do anything useful) 3241. The cgroup limit is too low (just too low to do anything useful)
2592. The user is using anonymous memory and swap is turned off or too low 3252. The user is using anonymous memory and swap is turned off or too low
@@ -261,6 +327,9 @@ terminated. There are several causes for this:
261A sync followed by echo 1 > /proc/sys/vm/drop_caches will help get rid of 327A sync followed by echo 1 > /proc/sys/vm/drop_caches will help get rid of
262some of the pages cached in the cgroup (page cache pages). 328some of the pages cached in the cgroup (page cache pages).
263 329
330To know what happens, disable OOM_Kill by 10. OOM Control(see below) and
331seeing what happens will be helpful.
332
2644.2 Task migration 3334.2 Task migration
265 334
266When a task migrates from one cgroup to another, its charge is not 335When a task migrates from one cgroup to another, its charge is not
@@ -268,16 +337,19 @@ carried forward by default. The pages allocated from the original cgroup still
268remain charged to it, the charge is dropped when the page is freed or 337remain charged to it, the charge is dropped when the page is freed or
269reclaimed. 338reclaimed.
270 339
271Note: You can move charges of a task along with task migration. See 8. 340You can move charges of a task along with task migration.
341See 8. "Move charges at task migration"
272 342
2734.3 Removing a cgroup 3434.3 Removing a cgroup
274 344
275A cgroup can be removed by rmdir, but as discussed in sections 4.1 and 4.2, a 345A cgroup can be removed by rmdir, but as discussed in sections 4.1 and 4.2, a
276cgroup might have some charge associated with it, even though all 346cgroup might have some charge associated with it, even though all
277tasks have migrated away from it. 347tasks have migrated away from it. (because we charge against pages, not
278Such charges are freed(at default) or moved to its parent. When moved, 348against tasks.)
279both of RSS and CACHES are moved to parent. 349
280If both of them are busy, rmdir() returns -EBUSY. See 5.1 Also. 350Such charges are freed or moved to their parent. At moving, both of RSS
351and CACHES are moved to parent.
352rmdir() may return -EBUSY if freeing/moving fails. See 5.1 also.
281 353
282Charges recorded in swap information is not updated at removal of cgroup. 354Charges recorded in swap information is not updated at removal of cgroup.
283Recorded information is discarded and a cgroup which uses swap (swapcache) 355Recorded information is discarded and a cgroup which uses swap (swapcache)
@@ -293,10 +365,10 @@ will be charged as a new owner of it.
293 365
294 # echo 0 > memory.force_empty 366 # echo 0 > memory.force_empty
295 367
296 Almost all pages tracked by this memcg will be unmapped and freed. Some of 368 Almost all pages tracked by this memory cgroup will be unmapped and freed.
297 pages cannot be freed because it's locked or in-use. Such pages are moved 369 Some pages cannot be freed because they are locked or in-use. Such pages are
298 to parent and this cgroup will be empty. But this may return -EBUSY in 370 moved to parent and this cgroup will be empty. This may return -EBUSY if
299 some too busy case. 371 VM is too busy to free/move all pages immediately.
300 372
301 Typical use case of this interface is that calling this before rmdir(). 373 Typical use case of this interface is that calling this before rmdir().
302 Because rmdir() moves all pages to parent, some out-of-use page caches can be 374 Because rmdir() moves all pages to parent, some out-of-use page caches can be
@@ -306,19 +378,41 @@ will be charged as a new owner of it.
306 378
307memory.stat file includes following statistics 379memory.stat file includes following statistics
308 380
381# per-memory cgroup local status
309cache - # of bytes of page cache memory. 382cache - # of bytes of page cache memory.
310rss - # of bytes of anonymous and swap cache memory. 383rss - # of bytes of anonymous and swap cache memory.
384mapped_file - # of bytes of mapped file (includes tmpfs/shmem)
311pgpgin - # of pages paged in (equivalent to # of charging events). 385pgpgin - # of pages paged in (equivalent to # of charging events).
312pgpgout - # of pages paged out (equivalent to # of uncharging events). 386pgpgout - # of pages paged out (equivalent to # of uncharging events).
313active_anon - # of bytes of anonymous and swap cache memory on active 387swap - # of bytes of swap usage
314 lru list.
315inactive_anon - # of bytes of anonymous memory and swap cache memory on 388inactive_anon - # of bytes of anonymous memory and swap cache memory on
316 inactive lru list. 389 LRU list.
317active_file - # of bytes of file-backed memory on active lru list. 390active_anon - # of bytes of anonymous and swap cache memory on active
318inactive_file - # of bytes of file-backed memory on inactive lru list. 391 inactive LRU list.
392inactive_file - # of bytes of file-backed memory on inactive LRU list.
393active_file - # of bytes of file-backed memory on active LRU list.
319unevictable - # of bytes of memory that cannot be reclaimed (mlocked etc). 394unevictable - # of bytes of memory that cannot be reclaimed (mlocked etc).
320 395
321The following additional stats are dependent on CONFIG_DEBUG_VM. 396# status considering hierarchy (see memory.use_hierarchy settings)
397
398hierarchical_memory_limit - # of bytes of memory limit with regard to hierarchy
399 under which the memory cgroup is
400hierarchical_memsw_limit - # of bytes of memory+swap limit with regard to
401 hierarchy under which memory cgroup is.
402
403total_cache - sum of all children's "cache"
404total_rss - sum of all children's "rss"
405total_mapped_file - sum of all children's "cache"
406total_pgpgin - sum of all children's "pgpgin"
407total_pgpgout - sum of all children's "pgpgout"
408total_swap - sum of all children's "swap"
409total_inactive_anon - sum of all children's "inactive_anon"
410total_active_anon - sum of all children's "active_anon"
411total_inactive_file - sum of all children's "inactive_file"
412total_active_file - sum of all children's "active_file"
413total_unevictable - sum of all children's "unevictable"
414
415# The following additional stats are dependent on CONFIG_DEBUG_VM.
322 416
323inactive_ratio - VM internal parameter. (see mm/page_alloc.c) 417inactive_ratio - VM internal parameter. (see mm/page_alloc.c)
324recent_rotated_anon - VM internal parameter. (see mm/vmscan.c) 418recent_rotated_anon - VM internal parameter. (see mm/vmscan.c)
@@ -327,24 +421,37 @@ recent_scanned_anon - VM internal parameter. (see mm/vmscan.c)
327recent_scanned_file - VM internal parameter. (see mm/vmscan.c) 421recent_scanned_file - VM internal parameter. (see mm/vmscan.c)
328 422
329Memo: 423Memo:
330 recent_rotated means recent frequency of lru rotation. 424 recent_rotated means recent frequency of LRU rotation.
331 recent_scanned means recent # of scans to lru. 425 recent_scanned means recent # of scans to LRU.
332 showing for better debug please see the code for meanings. 426 showing for better debug please see the code for meanings.
333 427
334Note: 428Note:
335 Only anonymous and swap cache memory is listed as part of 'rss' stat. 429 Only anonymous and swap cache memory is listed as part of 'rss' stat.
336 This should not be confused with the true 'resident set size' or the 430 This should not be confused with the true 'resident set size' or the
337 amount of physical memory used by the cgroup. Per-cgroup rss 431 amount of physical memory used by the cgroup.
338 accounting is not done yet. 432 'rss + file_mapped" will give you resident set size of cgroup.
433 (Note: file and shmem may be shared among other cgroups. In that case,
434 file_mapped is accounted only when the memory cgroup is owner of page
435 cache.)
339 436
3405.3 swappiness 4375.3 swappiness
341 Similar to /proc/sys/vm/swappiness, but affecting a hierarchy of groups only.
342 438
343 Following cgroups' swappiness can't be changed. 439Similar to /proc/sys/vm/swappiness, but affecting a hierarchy of groups only.
344 - root cgroup (uses /proc/sys/vm/swappiness).
345 - a cgroup which uses hierarchy and it has child cgroup.
346 - a cgroup which uses hierarchy and not the root of hierarchy.
347 440
441Following cgroups' swappiness can't be changed.
442- root cgroup (uses /proc/sys/vm/swappiness).
443- a cgroup which uses hierarchy and it has other cgroup(s) below it.
444- a cgroup which uses hierarchy and not the root of hierarchy.
445
4465.4 failcnt
447
448A memory cgroup provides memory.failcnt and memory.memsw.failcnt files.
449This failcnt(== failure count) shows the number of times that a usage counter
450hit its limit. When a memory cgroup hits a limit, failcnt increases and
451memory under it will be reclaimed.
452
453You can reset failcnt by writing 0 to failcnt file.
454# echo 0 > .../memory.failcnt
348 455
3496. Hierarchy support 4566. Hierarchy support
350 457
@@ -363,13 +470,13 @@ hierarchy
363 470
364In the diagram above, with hierarchical accounting enabled, all memory 471In the diagram above, with hierarchical accounting enabled, all memory
365usage of e, is accounted to its ancestors up until the root (i.e, c and root), 472usage of e, is accounted to its ancestors up until the root (i.e, c and root),
366that has memory.use_hierarchy enabled. If one of the ancestors goes over its 473that has memory.use_hierarchy enabled. If one of the ancestors goes over its
367limit, the reclaim algorithm reclaims from the tasks in the ancestor and the 474limit, the reclaim algorithm reclaims from the tasks in the ancestor and the
368children of the ancestor. 475children of the ancestor.
369 476
3706.1 Enabling hierarchical accounting and reclaim 4776.1 Enabling hierarchical accounting and reclaim
371 478
372The memory controller by default disables the hierarchy feature. Support 479A memory cgroup by default disables the hierarchy feature. Support
373can be enabled by writing 1 to memory.use_hierarchy file of the root cgroup 480can be enabled by writing 1 to memory.use_hierarchy file of the root cgroup
374 481
375# echo 1 > memory.use_hierarchy 482# echo 1 > memory.use_hierarchy
@@ -379,10 +486,10 @@ The feature can be disabled by
379# echo 0 > memory.use_hierarchy 486# echo 0 > memory.use_hierarchy
380 487
381NOTE1: Enabling/disabling will fail if the cgroup already has other 488NOTE1: Enabling/disabling will fail if the cgroup already has other
382cgroups created below it. 489 cgroups created below it.
383 490
384NOTE2: When panic_on_oom is set to "2", the whole system will panic in 491NOTE2: When panic_on_oom is set to "2", the whole system will panic in
385case of an oom event in any cgroup. 492 case of an OOM event in any cgroup.
386 493
3877. Soft limits 4947. Soft limits
388 495
@@ -392,7 +499,7 @@ is to allow control groups to use as much of the memory as needed, provided
392a. There is no memory contention 499a. There is no memory contention
393b. They do not exceed their hard limit 500b. They do not exceed their hard limit
394 501
395When the system detects memory contention or low memory control groups 502When the system detects memory contention or low memory, control groups
396are pushed back to their soft limits. If the soft limit of each control 503are pushed back to their soft limits. If the soft limit of each control
397group is very high, they are pushed back as much as possible to make 504group is very high, they are pushed back as much as possible to make
398sure that one control group does not starve the others of memory. 505sure that one control group does not starve the others of memory.
@@ -406,7 +513,7 @@ it gets invoked from balance_pgdat (kswapd).
4067.1 Interface 5137.1 Interface
407 514
408Soft limits can be setup by using the following commands (in this example we 515Soft limits can be setup by using the following commands (in this example we
409assume a soft limit of 256 megabytes) 516assume a soft limit of 256 MiB)
410 517
411# echo 256M > memory.soft_limit_in_bytes 518# echo 256M > memory.soft_limit_in_bytes
412 519
@@ -442,7 +549,7 @@ Note: Charges are moved only when you move mm->owner, IOW, a leader of a thread
442Note: If we cannot find enough space for the task in the destination cgroup, we 549Note: If we cannot find enough space for the task in the destination cgroup, we
443 try to make space by reclaiming memory. Task migration may fail if we 550 try to make space by reclaiming memory. Task migration may fail if we
444 cannot make enough space. 551 cannot make enough space.
445Note: It can take several seconds if you move charges in giga bytes order. 552Note: It can take several seconds if you move charges much.
446 553
447And if you want disable it again: 554And if you want disable it again:
448 555
@@ -451,21 +558,27 @@ And if you want disable it again:
4518.2 Type of charges which can be move 5588.2 Type of charges which can be move
452 559
453Each bits of move_charge_at_immigrate has its own meaning about what type of 560Each bits of move_charge_at_immigrate has its own meaning about what type of
454charges should be moved. 561charges should be moved. But in any cases, it must be noted that an account of
562a page or a swap can be moved only when it is charged to the task's current(old)
563memory cgroup.
455 564
456 bit | what type of charges would be moved ? 565 bit | what type of charges would be moved ?
457 -----+------------------------------------------------------------------------ 566 -----+------------------------------------------------------------------------
458 0 | A charge of an anonymous page(or swap of it) used by the target task. 567 0 | A charge of an anonymous page(or swap of it) used by the target task.
459 | Those pages and swaps must be used only by the target task. You must 568 | Those pages and swaps must be used only by the target task. You must
460 | enable Swap Extension(see 2.4) to enable move of swap charges. 569 | enable Swap Extension(see 2.4) to enable move of swap charges.
461 570 -----+------------------------------------------------------------------------
462Note: Those pages and swaps must be charged to the old cgroup. 571 1 | A charge of file pages(normal file, tmpfs file(e.g. ipc shared memory)
463Note: More type of pages(e.g. file cache, shmem,) will be supported by other 572 | and swaps of tmpfs file) mmapped by the target task. Unlike the case of
464 bits in future. 573 | anonymous pages, file pages(and swaps) in the range mmapped by the task
574 | will be moved even if the task hasn't done page fault, i.e. they might
575 | not be the task's "RSS", but other task's "RSS" that maps the same file.
576 | And mapcount of the page is ignored(the page can be moved even if
577 | page_mapcount(page) > 1). You must enable Swap Extension(see 2.4) to
578 | enable move of swap charges.
465 579
4668.3 TODO 5808.3 TODO
467 581
468- Add support for other types of pages(e.g. file cache, shmem, etc.).
469- Implement madvise(2) to let users decide the vma to be moved or not to be 582- Implement madvise(2) to let users decide the vma to be moved or not to be
470 moved. 583 moved.
471- All of moving charge operations are done under cgroup_mutex. It's not good 584- All of moving charge operations are done under cgroup_mutex. It's not good
@@ -473,22 +586,61 @@ Note: More type of pages(e.g. file cache, shmem,) will be supported by other
473 586
4749. Memory thresholds 5879. Memory thresholds
475 588
476Memory controler implements memory thresholds using cgroups notification 589Memory cgroup implements memory thresholds using cgroups notification
477API (see cgroups.txt). It allows to register multiple memory and memsw 590API (see cgroups.txt). It allows to register multiple memory and memsw
478thresholds and gets notifications when it crosses. 591thresholds and gets notifications when it crosses.
479 592
480To register a threshold application need: 593To register a threshold application need:
481 - create an eventfd using eventfd(2); 594- create an eventfd using eventfd(2);
482 - open memory.usage_in_bytes or memory.memsw.usage_in_bytes; 595- open memory.usage_in_bytes or memory.memsw.usage_in_bytes;
483 - write string like "<event_fd> <memory.usage_in_bytes> <threshold>" to 596- write string like "<event_fd> <fd of memory.usage_in_bytes> <threshold>" to
484 cgroup.event_control. 597 cgroup.event_control.
485 598
486Application will be notified through eventfd when memory usage crosses 599Application will be notified through eventfd when memory usage crosses
487threshold in any direction. 600threshold in any direction.
488 601
489It's applicable for root and non-root cgroup. 602It's applicable for root and non-root cgroup.
490 603
49110. TODO 60410. OOM Control
605
606memory.oom_control file is for OOM notification and other controls.
607
608Memory cgroup implements OOM notifier using cgroup notification
609API (See cgroups.txt). It allows to register multiple OOM notification
610delivery and gets notification when OOM happens.
611
612To register a notifier, application need:
613 - create an eventfd using eventfd(2)
614 - open memory.oom_control file
615 - write string like "<event_fd> <fd of memory.oom_control>" to
616 cgroup.event_control
617
618Application will be notified through eventfd when OOM happens.
619OOM notification doesn't work for root cgroup.
620
621You can disable OOM-killer by writing "1" to memory.oom_control file, as:
622
623 #echo 1 > memory.oom_control
624
625This operation is only allowed to the top cgroup of sub-hierarchy.
626If OOM-killer is disabled, tasks under cgroup will hang/sleep
627in memory cgroup's OOM-waitqueue when they request accountable memory.
628
629For running them, you have to relax the memory cgroup's OOM status by
630 * enlarge limit or reduce usage.
631To reduce usage,
632 * kill some tasks.
633 * move some tasks to other group with account migration.
634 * remove some files (on tmpfs?)
635
636Then, stopped tasks will work again.
637
638At reading, current status of OOM is shown.
639 oom_kill_disable 0 or 1 (if 1, oom-killer is disabled)
640 under_oom 0 or 1 (if 1, the memory cgroup is under OOM, tasks may
641 be stopped.)
642
64311. TODO
492 644
4931. Add support for accounting huge pages (as a separate controller) 6451. Add support for accounting huge pages (as a separate controller)
4942. Make per-cgroup scanner reclaim not-shared pages first 6462. Make per-cgroup scanner reclaim not-shared pages first
diff --git a/Documentation/feature-removal-schedule.txt b/Documentation/feature-removal-schedule.txt
index a86152ae2f6f..672be0109d02 100644
--- a/Documentation/feature-removal-schedule.txt
+++ b/Documentation/feature-removal-schedule.txt
@@ -646,3 +646,13 @@ Who: Thomas Gleixner <tglx@linutronix.de>
646 646
647---------------------------- 647----------------------------
648 648
649What: old ieee1394 subsystem (CONFIG_IEEE1394)
650When: 2.6.37
651Files: drivers/ieee1394/ except init_ohci1394_dma.c
652Why: superseded by drivers/firewire/ (CONFIG_FIREWIRE) which offers more
653 features, better performance, and better security, all with smaller
654 and more modern code base
655Who: Stefan Richter <stefanr@s5r6.in-berlin.de>
656
657----------------------------
658
diff --git a/Documentation/filesystems/Locking b/Documentation/filesystems/Locking
index af1608070cd5..96d4293607ec 100644
--- a/Documentation/filesystems/Locking
+++ b/Documentation/filesystems/Locking
@@ -380,7 +380,7 @@ prototypes:
380 int (*open) (struct inode *, struct file *); 380 int (*open) (struct inode *, struct file *);
381 int (*flush) (struct file *); 381 int (*flush) (struct file *);
382 int (*release) (struct inode *, struct file *); 382 int (*release) (struct inode *, struct file *);
383 int (*fsync) (struct file *, struct dentry *, int datasync); 383 int (*fsync) (struct file *, int datasync);
384 int (*aio_fsync) (struct kiocb *, int datasync); 384 int (*aio_fsync) (struct kiocb *, int datasync);
385 int (*fasync) (int, struct file *, int); 385 int (*fasync) (int, struct file *, int);
386 int (*lock) (struct file *, int, struct file_lock *); 386 int (*lock) (struct file *, int, struct file_lock *);
@@ -429,8 +429,9 @@ check_flags: no
429implementations. If your fs is not using generic_file_llseek, you 429implementations. If your fs is not using generic_file_llseek, you
430need to acquire and release the appropriate locks in your ->llseek(). 430need to acquire and release the appropriate locks in your ->llseek().
431For many filesystems, it is probably safe to acquire the inode 431For many filesystems, it is probably safe to acquire the inode
432mutex. Note some filesystems (i.e. remote ones) provide no 432mutex or just to use i_size_read() instead.
433protection for i_size so you will need to use the BKL. 433Note: this does not protect the file->f_pos against concurrent modifications
434since this is something the userspace has to take care about.
434 435
435Note: ext2_release() was *the* source of contention on fs-intensive 436Note: ext2_release() was *the* source of contention on fs-intensive
436loads and dropping BKL on ->release() helps to get rid of that (we still 437loads and dropping BKL on ->release() helps to get rid of that (we still
diff --git a/Documentation/filesystems/squashfs.txt b/Documentation/filesystems/squashfs.txt
index b324c033035a..203f7202cc9e 100644
--- a/Documentation/filesystems/squashfs.txt
+++ b/Documentation/filesystems/squashfs.txt
@@ -38,7 +38,8 @@ Hard link support: yes no
38Real inode numbers: yes no 38Real inode numbers: yes no
3932-bit uids/gids: yes no 3932-bit uids/gids: yes no
40File creation time: yes no 40File creation time: yes no
41Xattr and ACL support: no no 41Xattr support: yes no
42ACL support: no no
42 43
43Squashfs compresses data, inodes and directories. In addition, inode and 44Squashfs compresses data, inodes and directories. In addition, inode and
44directory data are highly compacted, and packed on byte boundaries. Each 45directory data are highly compacted, and packed on byte boundaries. Each
@@ -58,7 +59,7 @@ obtained from this site also.
583. SQUASHFS FILESYSTEM DESIGN 593. SQUASHFS FILESYSTEM DESIGN
59----------------------------- 60-----------------------------
60 61
61A squashfs filesystem consists of seven parts, packed together on a byte 62A squashfs filesystem consists of a maximum of eight parts, packed together on a byte
62alignment: 63alignment:
63 64
64 --------------- 65 ---------------
@@ -80,6 +81,9 @@ alignment:
80 |---------------| 81 |---------------|
81 | uid/gid | 82 | uid/gid |
82 | lookup table | 83 | lookup table |
84 |---------------|
85 | xattr |
86 | table |
83 --------------- 87 ---------------
84 88
85Compressed data blocks are written to the filesystem as files are read from 89Compressed data blocks are written to the filesystem as files are read from
@@ -192,6 +196,26 @@ This table is stored compressed into metadata blocks. A second index table is
192used to locate these. This second index table for speed of access (and because 196used to locate these. This second index table for speed of access (and because
193it is small) is read at mount time and cached in memory. 197it is small) is read at mount time and cached in memory.
194 198
1993.7 Xattr table
200---------------
201
202The xattr table contains extended attributes for each inode. The xattrs
203for each inode are stored in a list, each list entry containing a type,
204name and value field. The type field encodes the xattr prefix
205("user.", "trusted." etc) and it also encodes how the name/value fields
206should be interpreted. Currently the type indicates whether the value
207is stored inline (in which case the value field contains the xattr value),
208or if it is stored out of line (in which case the value field stores a
209reference to where the actual value is stored). This allows large values
210to be stored out of line improving scanning and lookup performance and it
211also allows values to be de-duplicated, the value being stored once, and
212all other occurences holding an out of line reference to that value.
213
214The xattr lists are packed into compressed 8K metadata blocks.
215To reduce overhead in inodes, rather than storing the on-disk
216location of the xattr list inside each inode, a 32-bit xattr id
217is stored. This xattr id is mapped into the location of the xattr
218list using a second xattr id lookup table.
195 219
1964. TODOS AND OUTSTANDING ISSUES 2204. TODOS AND OUTSTANDING ISSUES
197------------------------------- 221-------------------------------
@@ -199,9 +223,7 @@ it is small) is read at mount time and cached in memory.
1994.1 Todo list 2234.1 Todo list
200------------- 224-------------
201 225
202Implement Xattr and ACL support. The Squashfs 4.0 filesystem layout has hooks 226Implement ACL support.
203for these but the code has not been written. Once the code has been written
204the existing layout should not require modification.
205 227
2064.2 Squashfs internal cache 2284.2 Squashfs internal cache
207--------------------------- 229---------------------------
diff --git a/Documentation/filesystems/vfs.txt b/Documentation/filesystems/vfs.txt
index b66858538df5..94677e7dcb13 100644
--- a/Documentation/filesystems/vfs.txt
+++ b/Documentation/filesystems/vfs.txt
@@ -401,11 +401,16 @@ otherwise noted.
401 started might not be in the page cache at the end of the 401 started might not be in the page cache at the end of the
402 walk). 402 walk).
403 403
404 truncate: called by the VFS to change the size of a file. The 404 truncate: Deprecated. This will not be called if ->setsize is defined.
405 Called by the VFS to change the size of a file. The
405 i_size field of the inode is set to the desired size by the 406 i_size field of the inode is set to the desired size by the
406 VFS before this method is called. This method is called by 407 VFS before this method is called. This method is called by
407 the truncate(2) system call and related functionality. 408 the truncate(2) system call and related functionality.
408 409
410 Note: ->truncate and vmtruncate are deprecated. Do not add new
411 instances/calls of these. Filesystems should be converted to do their
412 truncate sequence via ->setattr().
413
409 permission: called by the VFS to check for access rights on a POSIX-like 414 permission: called by the VFS to check for access rights on a POSIX-like
410 filesystem. 415 filesystem.
411 416
@@ -729,7 +734,7 @@ struct file_operations {
729 int (*open) (struct inode *, struct file *); 734 int (*open) (struct inode *, struct file *);
730 int (*flush) (struct file *); 735 int (*flush) (struct file *);
731 int (*release) (struct inode *, struct file *); 736 int (*release) (struct inode *, struct file *);
732 int (*fsync) (struct file *, struct dentry *, int datasync); 737 int (*fsync) (struct file *, int datasync);
733 int (*aio_fsync) (struct kiocb *, int datasync); 738 int (*aio_fsync) (struct kiocb *, int datasync);
734 int (*fasync) (int, struct file *, int); 739 int (*fasync) (int, struct file *, int);
735 int (*lock) (struct file *, int, struct file_lock *); 740 int (*lock) (struct file *, int, struct file_lock *);
diff --git a/Documentation/hwmon/dme1737 b/Documentation/hwmon/dme1737
index 001d2e70bc11..fc5df7654d63 100644
--- a/Documentation/hwmon/dme1737
+++ b/Documentation/hwmon/dme1737
@@ -9,11 +9,15 @@ Supported chips:
9 * SMSC SCH3112, SCH3114, SCH3116 9 * SMSC SCH3112, SCH3114, SCH3116
10 Prefix: 'sch311x' 10 Prefix: 'sch311x'
11 Addresses scanned: none, address read from Super-I/O config space 11 Addresses scanned: none, address read from Super-I/O config space
12 Datasheet: http://www.nuhorizons.com/FeaturedProducts/Volume1/SMSC/311x.pdf 12 Datasheet: Available on the Internet
13 * SMSC SCH5027 13 * SMSC SCH5027
14 Prefix: 'sch5027' 14 Prefix: 'sch5027'
15 Addresses scanned: I2C 0x2c, 0x2d, 0x2e 15 Addresses scanned: I2C 0x2c, 0x2d, 0x2e
16 Datasheet: Provided by SMSC upon request and under NDA 16 Datasheet: Provided by SMSC upon request and under NDA
17 * SMSC SCH5127
18 Prefix: 'sch5127'
19 Addresses scanned: none, address read from Super-I/O config space
20 Datasheet: Provided by SMSC upon request and under NDA
17 21
18Authors: 22Authors:
19 Juerg Haefliger <juergh@gmail.com> 23 Juerg Haefliger <juergh@gmail.com>
@@ -36,8 +40,8 @@ Description
36----------- 40-----------
37 41
38This driver implements support for the hardware monitoring capabilities of the 42This driver implements support for the hardware monitoring capabilities of the
39SMSC DME1737 and Asus A8000 (which are the same), SMSC SCH5027, and SMSC 43SMSC DME1737 and Asus A8000 (which are the same), SMSC SCH5027, SCH311x,
40SCH311x Super-I/O chips. These chips feature monitoring of 3 temp sensors 44and SCH5127 Super-I/O chips. These chips feature monitoring of 3 temp sensors
41temp[1-3] (2 remote diodes and 1 internal), 7 voltages in[0-6] (6 external and 45temp[1-3] (2 remote diodes and 1 internal), 7 voltages in[0-6] (6 external and
421 internal) and up to 6 fan speeds fan[1-6]. Additionally, the chips implement 461 internal) and up to 6 fan speeds fan[1-6]. Additionally, the chips implement
43up to 5 PWM outputs pwm[1-3,5-6] for controlling fan speeds both manually and 47up to 5 PWM outputs pwm[1-3,5-6] for controlling fan speeds both manually and
@@ -48,14 +52,14 @@ Fan[3-6] and pwm[3,5-6] are optional features and their availability depends on
48the configuration of the chip. The driver will detect which features are 52the configuration of the chip. The driver will detect which features are
49present during initialization and create the sysfs attributes accordingly. 53present during initialization and create the sysfs attributes accordingly.
50 54
51For the SCH311x, fan[1-3] and pwm[1-3] are always present and fan[4-6] and 55For the SCH311x and SCH5127, fan[1-3] and pwm[1-3] are always present and
52pwm[5-6] don't exist. 56fan[4-6] and pwm[5-6] don't exist.
53 57
54The hardware monitoring features of the DME1737, A8000, and SCH5027 are only 58The hardware monitoring features of the DME1737, A8000, and SCH5027 are only
55accessible via SMBus, while the SCH311x only provides access via the ISA bus. 59accessible via SMBus, while the SCH311x and SCH5127 only provide access via
56The driver will therefore register itself as an I2C client driver if it detects 60the ISA bus. The driver will therefore register itself as an I2C client driver
57a DME1737, A8000, or SCH5027 and as a platform driver if it detects a SCH311x 61if it detects a DME1737, A8000, or SCH5027 and as a platform driver if it
58chip. 62detects a SCH311x or SCH5127 chip.
59 63
60 64
61Voltage Monitoring 65Voltage Monitoring
@@ -76,7 +80,7 @@ DME1737, A8000:
76 in6: Vbat (+3.0V) 0V - 4.38V 80 in6: Vbat (+3.0V) 0V - 4.38V
77 81
78SCH311x: 82SCH311x:
79 in0: +2.5V 0V - 6.64V 83 in0: +2.5V 0V - 3.32V
80 in1: Vccp (processor core) 0V - 2V 84 in1: Vccp (processor core) 0V - 2V
81 in2: VCC (internal +3.3V) 0V - 4.38V 85 in2: VCC (internal +3.3V) 0V - 4.38V
82 in3: +5V 0V - 6.64V 86 in3: +5V 0V - 6.64V
@@ -93,6 +97,15 @@ SCH5027:
93 in5: VTR (+3.3V standby) 0V - 4.38V 97 in5: VTR (+3.3V standby) 0V - 4.38V
94 in6: Vbat (+3.0V) 0V - 4.38V 98 in6: Vbat (+3.0V) 0V - 4.38V
95 99
100SCH5127:
101 in0: +2.5 0V - 3.32V
102 in1: Vccp (processor core) 0V - 3V
103 in2: VCC (internal +3.3V) 0V - 4.38V
104 in3: V2_IN 0V - 1.5V
105 in4: V1_IN 0V - 1.5V
106 in5: VTR (+3.3V standby) 0V - 4.38V
107 in6: Vbat (+3.0V) 0V - 4.38V
108
96Each voltage input has associated min and max limits which trigger an alarm 109Each voltage input has associated min and max limits which trigger an alarm
97when crossed. 110when crossed.
98 111
@@ -293,3 +306,21 @@ pwm[1-3]_auto_point1_pwm RW Auto PWM pwm point. Auto_point1 is the
293pwm[1-3]_auto_point2_pwm RO Auto PWM pwm point. Auto_point2 is the 306pwm[1-3]_auto_point2_pwm RO Auto PWM pwm point. Auto_point2 is the
294 full-speed duty-cycle which is hard- 307 full-speed duty-cycle which is hard-
295 wired to 255 (100% duty-cycle). 308 wired to 255 (100% duty-cycle).
309
310Chip Differences
311----------------
312
313Feature dme1737 sch311x sch5027 sch5127
314-------------------------------------------------------
315temp[1-3]_offset yes yes
316vid yes
317zone3 yes yes yes
318zone[1-3]_hyst yes yes
319pwm min/off yes yes
320fan3 opt yes opt yes
321pwm3 opt yes opt yes
322fan4 opt opt
323fan5 opt opt
324pwm5 opt opt
325fan6 opt opt
326pwm6 opt opt
diff --git a/Documentation/hwmon/lm63 b/Documentation/hwmon/lm63
index 31660bf97979..b9843eab1afb 100644
--- a/Documentation/hwmon/lm63
+++ b/Documentation/hwmon/lm63
@@ -7,6 +7,11 @@ Supported chips:
7 Addresses scanned: I2C 0x4c 7 Addresses scanned: I2C 0x4c
8 Datasheet: Publicly available at the National Semiconductor website 8 Datasheet: Publicly available at the National Semiconductor website
9 http://www.national.com/pf/LM/LM63.html 9 http://www.national.com/pf/LM/LM63.html
10 * National Semiconductor LM64
11 Prefix: 'lm64'
12 Addresses scanned: I2C 0x18 and 0x4e
13 Datasheet: Publicly available at the National Semiconductor website
14 http://www.national.com/pf/LM/LM64.html
10 15
11Author: Jean Delvare <khali@linux-fr.org> 16Author: Jean Delvare <khali@linux-fr.org>
12 17
@@ -55,3 +60,5 @@ The lm63 driver will not update its values more frequently than every
55second; reading them more often will do no harm, but will return 'old' 60second; reading them more often will do no harm, but will return 'old'
56values. 61values.
57 62
63The LM64 is effectively an LM63 with GPIO lines. The driver does not
64support these GPIO lines at present.
diff --git a/Documentation/hwmon/ltc4245 b/Documentation/hwmon/ltc4245
index 02838a47d862..86b5880d8502 100644
--- a/Documentation/hwmon/ltc4245
+++ b/Documentation/hwmon/ltc4245
@@ -72,9 +72,7 @@ in6_min_alarm 5v output undervoltage alarm
72in7_min_alarm 3v output undervoltage alarm 72in7_min_alarm 3v output undervoltage alarm
73in8_min_alarm Vee (-12v) output undervoltage alarm 73in8_min_alarm Vee (-12v) output undervoltage alarm
74 74
75in9_input GPIO #1 voltage data 75in9_input GPIO voltage data
76in10_input GPIO #2 voltage data
77in11_input GPIO #3 voltage data
78 76
79power1_input 12v power usage (mW) 77power1_input 12v power usage (mW)
80power2_input 5v power usage (mW) 78power2_input 5v power usage (mW)
diff --git a/Documentation/hwmon/sysfs-interface b/Documentation/hwmon/sysfs-interface
index 3de6b0bcb147..d4e2917c6f18 100644
--- a/Documentation/hwmon/sysfs-interface
+++ b/Documentation/hwmon/sysfs-interface
@@ -80,9 +80,9 @@ All entries (except name) are optional, and should only be created in a
80given driver if the chip has the feature. 80given driver if the chip has the feature.
81 81
82 82
83******** 83*********************
84* Name * 84* Global attributes *
85******** 85*********************
86 86
87name The chip name. 87name The chip name.
88 This should be a short, lowercase string, not containing 88 This should be a short, lowercase string, not containing
@@ -91,6 +91,13 @@ name The chip name.
91 I2C devices get this attribute created automatically. 91 I2C devices get this attribute created automatically.
92 RO 92 RO
93 93
94update_rate The rate at which the chip will update readings.
95 Unit: millisecond
96 RW
97 Some devices have a variable update rate. This attribute
98 can be used to change the update rate to the desired
99 frequency.
100
94 101
95************ 102************
96* Voltages * 103* Voltages *
diff --git a/Documentation/hwmon/tmp102 b/Documentation/hwmon/tmp102
new file mode 100644
index 000000000000..8454a7763122
--- /dev/null
+++ b/Documentation/hwmon/tmp102
@@ -0,0 +1,26 @@
1Kernel driver tmp102
2====================
3
4Supported chips:
5 * Texas Instruments TMP102
6 Prefix: 'tmp102'
7 Addresses scanned: none
8 Datasheet: http://focus.ti.com/docs/prod/folders/print/tmp102.html
9
10Author:
11 Steven King <sfking@fdwdc.com>
12
13Description
14-----------
15
16The Texas Instruments TMP102 implements one temperature sensor. Limits can be
17set through the Overtemperature Shutdown register and Hysteresis register. The
18sensor is accurate to 0.5 degree over the range of -25 to +85 C, and to 1.0
19degree from -40 to +125 C. Resolution of the sensor is 0.0625 degree. The
20operating temperature has a minimum of -55 C and a maximum of +150 C.
21
22The TMP102 has a programmable update rate that can select between 8, 4, 1, and
230.5 Hz. (Currently the driver only supports the default of 4 Hz).
24
25The driver provides the common sysfs-interface for temperatures (see
26Documentation/hwmon/sysfs-interface under Temperatures).
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index b56ea860da21..1808f1157f30 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -145,11 +145,10 @@ and is between 256 and 4096 characters. It is defined in the file
145 145
146 acpi= [HW,ACPI,X86] 146 acpi= [HW,ACPI,X86]
147 Advanced Configuration and Power Interface 147 Advanced Configuration and Power Interface
148 Format: { force | off | ht | strict | noirq | rsdt } 148 Format: { force | off | strict | noirq | rsdt }
149 force -- enable ACPI if default was off 149 force -- enable ACPI if default was off
150 off -- disable ACPI if default was on 150 off -- disable ACPI if default was on
151 noirq -- do not use ACPI for IRQ routing 151 noirq -- do not use ACPI for IRQ routing
152 ht -- run only enough ACPI to enable Hyper Threading
153 strict -- Be less tolerant of platforms that are not 152 strict -- Be less tolerant of platforms that are not
154 strictly ACPI specification compliant. 153 strictly ACPI specification compliant.
155 rsdt -- prefer RSDT over (default) XSDT 154 rsdt -- prefer RSDT over (default) XSDT
@@ -758,6 +757,10 @@ and is between 256 and 4096 characters. It is defined in the file
758 Default value is 0. 757 Default value is 0.
759 Value can be changed at runtime via /selinux/enforce. 758 Value can be changed at runtime via /selinux/enforce.
760 759
760 erst_disable [ACPI]
761 Disable Error Record Serialization Table (ERST)
762 support.
763
761 ether= [HW,NET] Ethernet cards parameters 764 ether= [HW,NET] Ethernet cards parameters
762 This option is obsoleted by the "netdev=" option, which 765 This option is obsoleted by the "netdev=" option, which
763 has equivalent usage. See its documentation for details. 766 has equivalent usage. See its documentation for details.
@@ -852,6 +855,11 @@ and is between 256 and 4096 characters. It is defined in the file
852 hd= [EIDE] (E)IDE hard drive subsystem geometry 855 hd= [EIDE] (E)IDE hard drive subsystem geometry
853 Format: <cyl>,<head>,<sect> 856 Format: <cyl>,<head>,<sect>
854 857
858 hest_disable [ACPI]
859 Disable Hardware Error Source Table (HEST) support;
860 corresponding firmware-first mode error processing
861 logic will be disabled.
862
855 highmem=nn[KMG] [KNL,BOOT] forces the highmem zone to have an exact 863 highmem=nn[KMG] [KNL,BOOT] forces the highmem zone to have an exact
856 size of <nn>. This works even on boxes that have no 864 size of <nn>. This works even on boxes that have no
857 highmem otherwise. This also works to reduce highmem 865 highmem otherwise. This also works to reduce highmem
@@ -1252,6 +1260,8 @@ and is between 256 and 4096 characters. It is defined in the file
1252 * nohrst, nosrst, norst: suppress hard, soft 1260 * nohrst, nosrst, norst: suppress hard, soft
1253 and both resets. 1261 and both resets.
1254 1262
1263 * dump_id: dump IDENTIFY data.
1264
1255 If there are multiple matching configurations changing 1265 If there are multiple matching configurations changing
1256 the same attribute, the last one is used. 1266 the same attribute, the last one is used.
1257 1267
diff --git a/Documentation/vm/numa b/Documentation/vm/numa
index e93ad9425e2a..a200a386429d 100644
--- a/Documentation/vm/numa
+++ b/Documentation/vm/numa
@@ -1,41 +1,149 @@
1Started Nov 1999 by Kanoj Sarcar <kanoj@sgi.com> 1Started Nov 1999 by Kanoj Sarcar <kanoj@sgi.com>
2 2
3The intent of this file is to have an uptodate, running commentary 3What is NUMA?
4from different people about NUMA specific code in the Linux vm. 4
5 5This question can be answered from a couple of perspectives: the
6What is NUMA? It is an architecture where the memory access times 6hardware view and the Linux software view.
7for different regions of memory from a given processor varies 7
8according to the "distance" of the memory region from the processor. 8From the hardware perspective, a NUMA system is a computer platform that
9Each region of memory to which access times are the same from any 9comprises multiple components or assemblies each of which may contain 0
10cpu, is called a node. On such architectures, it is beneficial if 10or more CPUs, local memory, and/or IO buses. For brevity and to
11the kernel tries to minimize inter node communications. Schemes 11disambiguate the hardware view of these physical components/assemblies
12for this range from kernel text and read-only data replication 12from the software abstraction thereof, we'll call the components/assemblies
13across nodes, and trying to house all the data structures that 13'cells' in this document.
14key components of the kernel need on memory on that node. 14
15 15Each of the 'cells' may be viewed as an SMP [symmetric multi-processor] subset
16Currently, all the numa support is to provide efficient handling 16of the system--although some components necessary for a stand-alone SMP system
17of widely discontiguous physical memory, so architectures which 17may not be populated on any given cell. The cells of the NUMA system are
18are not NUMA but can have huge holes in the physical address space 18connected together with some sort of system interconnect--e.g., a crossbar or
19can use the same code. All this code is bracketed by CONFIG_DISCONTIGMEM. 19point-to-point link are common types of NUMA system interconnects. Both of
20 20these types of interconnects can be aggregated to create NUMA platforms with
21The initial port includes NUMAizing the bootmem allocator code by 21cells at multiple distances from other cells.
22encapsulating all the pieces of information into a bootmem_data_t 22
23structure. Node specific calls have been added to the allocator. 23For Linux, the NUMA platforms of interest are primarily what is known as Cache
24In theory, any platform which uses the bootmem allocator should 24Coherent NUMA or ccNUMA systems. With ccNUMA systems, all memory is visible
25be able to put the bootmem and mem_map data structures anywhere 25to and accessible from any CPU attached to any cell and cache coherency
26it deems best. 26is handled in hardware by the processor caches and/or the system interconnect.
27 27
28Each node's page allocation data structures have also been encapsulated 28Memory access time and effective memory bandwidth varies depending on how far
29into a pg_data_t. The bootmem_data_t is just one part of this. To 29away the cell containing the CPU or IO bus making the memory access is from the
30make the code look uniform between NUMA and regular UMA platforms, 30cell containing the target memory. For example, access to memory by CPUs
31UMA platforms have a statically allocated pg_data_t too (contig_page_data). 31attached to the same cell will experience faster access times and higher
32For the sake of uniformity, the function num_online_nodes() is also defined 32bandwidths than accesses to memory on other, remote cells. NUMA platforms
33for all platforms. As we run benchmarks, we might decide to NUMAize 33can have cells at multiple remote distances from any given cell.
34more variables like low_on_memory, nr_free_pages etc into the pg_data_t. 34
35 35Platform vendors don't build NUMA systems just to make software developers'
36The NUMA aware page allocation code currently tries to allocate pages 36lives interesting. Rather, this architecture is a means to provide scalable
37from different nodes in a round robin manner. This will be changed to 37memory bandwidth. However, to achieve scalable memory bandwidth, system and
38do concentratic circle search, starting from current node, once the 38application software must arrange for a large majority of the memory references
39NUMA port achieves more maturity. The call alloc_pages_node has been 39[cache misses] to be to "local" memory--memory on the same cell, if any--or
40added, so that drivers can make the call and not worry about whether 40to the closest cell with memory.
41it is running on a NUMA or UMA platform. 41
42This leads to the Linux software view of a NUMA system:
43
44Linux divides the system's hardware resources into multiple software
45abstractions called "nodes". Linux maps the nodes onto the physical cells
46of the hardware platform, abstracting away some of the details for some
47architectures. As with physical cells, software nodes may contain 0 or more
48CPUs, memory and/or IO buses. And, again, memory accesses to memory on
49"closer" nodes--nodes that map to closer cells--will generally experience
50faster access times and higher effective bandwidth than accesses to more
51remote cells.
52
53For some architectures, such as x86, Linux will "hide" any node representing a
54physical cell that has no memory attached, and reassign any CPUs attached to
55that cell to a node representing a cell that does have memory. Thus, on
56these architectures, one cannot assume that all CPUs that Linux associates with
57a given node will see the same local memory access times and bandwidth.
58
59In addition, for some architectures, again x86 is an example, Linux supports
60the emulation of additional nodes. For NUMA emulation, linux will carve up
61the existing nodes--or the system memory for non-NUMA platforms--into multiple
62nodes. Each emulated node will manage a fraction of the underlying cells'
63physical memory. NUMA emluation is useful for testing NUMA kernel and
64application features on non-NUMA platforms, and as a sort of memory resource
65management mechanism when used together with cpusets.
66[see Documentation/cgroups/cpusets.txt]
67
68For each node with memory, Linux constructs an independent memory management
69subsystem, complete with its own free page lists, in-use page lists, usage
70statistics and locks to mediate access. In addition, Linux constructs for
71each memory zone [one or more of DMA, DMA32, NORMAL, HIGH_MEMORY, MOVABLE],
72an ordered "zonelist". A zonelist specifies the zones/nodes to visit when a
73selected zone/node cannot satisfy the allocation request. This situation,
74when a zone has no available memory to satisfy a request, is called
75"overflow" or "fallback".
76
77Because some nodes contain multiple zones containing different types of
78memory, Linux must decide whether to order the zonelists such that allocations
79fall back to the same zone type on a different node, or to a different zone
80type on the same node. This is an important consideration because some zones,
81such as DMA or DMA32, represent relatively scarce resources. Linux chooses
82a default zonelist order based on the sizes of the various zone types relative
83to the total memory of the node and the total memory of the system. The
84default zonelist order may be overridden using the numa_zonelist_order kernel
85boot parameter or sysctl. [see Documentation/kernel-parameters.txt and
86Documentation/sysctl/vm.txt]
87
88By default, Linux will attempt to satisfy memory allocation requests from the
89node to which the CPU that executes the request is assigned. Specifically,
90Linux will attempt to allocate from the first node in the appropriate zonelist
91for the node where the request originates. This is called "local allocation."
92If the "local" node cannot satisfy the request, the kernel will examine other
93nodes' zones in the selected zonelist looking for the first zone in the list
94that can satisfy the request.
95
96Local allocation will tend to keep subsequent access to the allocated memory
97"local" to the underlying physical resources and off the system interconnect--
98as long as the task on whose behalf the kernel allocated some memory does not
99later migrate away from that memory. The Linux scheduler is aware of the
100NUMA topology of the platform--embodied in the "scheduling domains" data
101structures [see Documentation/scheduler/sched-domains.txt]--and the scheduler
102attempts to minimize task migration to distant scheduling domains. However,
103the scheduler does not take a task's NUMA footprint into account directly.
104Thus, under sufficient imbalance, tasks can migrate between nodes, remote
105from their initial node and kernel data structures.
106
107System administrators and application designers can restrict a task's migration
108to improve NUMA locality using various CPU affinity command line interfaces,
109such as taskset(1) and numactl(1), and program interfaces such as
110sched_setaffinity(2). Further, one can modify the kernel's default local
111allocation behavior using Linux NUMA memory policy.
112[see Documentation/vm/numa_memory_policy.]
113
114System administrators can restrict the CPUs and nodes' memories that a non-
115privileged user can specify in the scheduling or NUMA commands and functions
116using control groups and CPUsets. [see Documentation/cgroups/CPUsets.txt]
117
118On architectures that do not hide memoryless nodes, Linux will include only
119zones [nodes] with memory in the zonelists. This means that for a memoryless
120node the "local memory node"--the node of the first zone in CPU's node's
121zonelist--will not be the node itself. Rather, it will be the node that the
122kernel selected as the nearest node with memory when it built the zonelists.
123So, default, local allocations will succeed with the kernel supplying the
124closest available memory. This is a consequence of the same mechanism that
125allows such allocations to fallback to other nearby nodes when a node that
126does contain memory overflows.
127
128Some kernel allocations do not want or cannot tolerate this allocation fallback
129behavior. Rather they want to be sure they get memory from the specified node
130or get notified that the node has no free memory. This is usually the case when
131a subsystem allocates per CPU memory resources, for example.
132
133A typical model for making such an allocation is to obtain the node id of the
134node to which the "current CPU" is attached using one of the kernel's
135numa_node_id() or CPU_to_node() functions and then request memory from only
136the node id returned. When such an allocation fails, the requesting subsystem
137may revert to its own fallback path. The slab kernel memory allocator is an
138example of this. Or, the subsystem may choose to disable or not to enable
139itself on allocation failure. The kernel profiling subsystem is an example of
140this.
141
142If the architecture supports--does not hide--memoryless nodes, then CPUs
143attached to memoryless nodes would always incur the fallback path overhead
144or some subsystems would fail to initialize if they attempted to allocated
145memory exclusively from a node without memory. To support such
146architectures transparently, kernel subsystems can use the numa_mem_id()
147or cpu_to_mem() function to locate the "local memory node" for the calling or
148specified CPU. Again, this is the same node from which default, local page
149allocations will be attempted.
diff --git a/MAINTAINERS b/MAINTAINERS
index a8fe9b461e09..13608bd2e791 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -969,6 +969,18 @@ M: Wan ZongShun <mcuos.com@gmail.com>
969L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 969L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
970W: http://www.mcuos.com 970W: http://www.mcuos.com
971S: Maintained 971S: Maintained
972F: arch/arm/mach-w90x900/
973F: arch/arm/mach-nuc93x/
974F: drivers/input/keyboard/w90p910_keypad.c
975F: drivers/input/touchscreen/w90p910_ts.c
976F: drivers/watchdog/nuc900_wdt.c
977F: drivers/net/arm/w90p910_ether.c
978F: drivers/mtd/nand/w90p910_nand.c
979F: drivers/rtc/rtc-nuc900.c
980F: drivers/spi/spi_nuc900.c
981F: drivers/usb/host/ehci-w90x900.c
982F: drivers/video/nuc900fb.c
983F: drivers/sound/soc/nuc900/
972 984
973ARM/U300 MACHINE SUPPORT 985ARM/U300 MACHINE SUPPORT
974M: Linus Walleij <linus.walleij@stericsson.com> 986M: Linus Walleij <linus.walleij@stericsson.com>
@@ -2875,6 +2887,13 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/dtor/input.git
2875S: Maintained 2887S: Maintained
2876F: drivers/input/ 2888F: drivers/input/
2877 2889
2890INTEL IDLE DRIVER
2891M: Len Brown <lenb@kernel.org>
2892L: linux-pm@lists.linux-foundation.org
2893T: git git://git.kernel.org/pub/scm/linux/kernel/git/lenb/linux-idle-2.6.git
2894S: Supported
2895F: drivers/idle/intel_idle.c
2896
2878INTEL FRAMEBUFFER DRIVER (excluding 810 and 815) 2897INTEL FRAMEBUFFER DRIVER (excluding 810 and 815)
2879M: Maik Broemme <mbroemme@plusserver.de> 2898M: Maik Broemme <mbroemme@plusserver.de>
2880L: linux-fbdev@vger.kernel.org 2899L: linux-fbdev@vger.kernel.org
@@ -4824,6 +4843,9 @@ W: http://www.ibm.com/developerworks/linux/linux390/
4824S: Supported 4843S: Supported
4825F: arch/s390/ 4844F: arch/s390/
4826F: drivers/s390/ 4845F: drivers/s390/
4846F: fs/partitions/ibm.c
4847F: Documentation/s390/
4848F: Documentation/DocBook/s390*
4827 4849
4828S390 NETWORK DRIVERS 4850S390 NETWORK DRIVERS
4829M: Ursula Braun <ursula.braun@de.ibm.com> 4851M: Ursula Braun <ursula.braun@de.ibm.com>
@@ -4992,6 +5014,12 @@ L: linux-mmc@vger.kernel.org
4992S: Maintained 5014S: Maintained
4993F: drivers/mmc/host/sdhci-s3c.c 5015F: drivers/mmc/host/sdhci-s3c.c
4994 5016
5017SECURE DIGITAL HOST CONTROLLER INTERFACE (SDHCI) ST SPEAR DRIVER
5018M: Viresh Kumar <viresh.kumar@st.com>
5019L: linux-mmc@vger.kernel.org
5020S: Maintained
5021F: drivers/mmc/host/sdhci-spear.c
5022
4995SECURITY SUBSYSTEM 5023SECURITY SUBSYSTEM
4996M: James Morris <jmorris@namei.org> 5024M: James Morris <jmorris@namei.org>
4997L: linux-security-module@vger.kernel.org (suggested Cc:) 5025L: linux-security-module@vger.kernel.org (suggested Cc:)
diff --git a/Makefile b/Makefile
index ebc8225f7a96..6e39ec701cbf 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
1VERSION = 2 1VERSION = 2
2PATCHLEVEL = 6 2PATCHLEVEL = 6
3SUBLEVEL = 34 3SUBLEVEL = 35
4EXTRAVERSION = 4EXTRAVERSION = -rc1
5NAME = Sheep on Meth 5NAME = Sheep on Meth
6 6
7# *DOCUMENTATION* 7# *DOCUMENTATION*
diff --git a/arch/alpha/Kconfig b/arch/alpha/Kconfig
index 24efdfe277fc..3e2e540a0f2a 100644
--- a/arch/alpha/Kconfig
+++ b/arch/alpha/Kconfig
@@ -61,6 +61,9 @@ config ZONE_DMA
61config NEED_DMA_MAP_STATE 61config NEED_DMA_MAP_STATE
62 def_bool y 62 def_bool y
63 63
64config NEED_SG_DMA_LENGTH
65 def_bool y
66
64config GENERIC_ISA_DMA 67config GENERIC_ISA_DMA
65 bool 68 bool
66 default y 69 default y
diff --git a/arch/alpha/include/asm/scatterlist.h b/arch/alpha/include/asm/scatterlist.h
index 440747ca6349..5728c52a7412 100644
--- a/arch/alpha/include/asm/scatterlist.h
+++ b/arch/alpha/include/asm/scatterlist.h
@@ -1,24 +1,7 @@
1#ifndef _ALPHA_SCATTERLIST_H 1#ifndef _ALPHA_SCATTERLIST_H
2#define _ALPHA_SCATTERLIST_H 2#define _ALPHA_SCATTERLIST_H
3 3
4#include <asm/page.h> 4#include <asm-generic/scatterlist.h>
5#include <asm/types.h>
6
7struct scatterlist {
8#ifdef CONFIG_DEBUG_SG
9 unsigned long sg_magic;
10#endif
11 unsigned long page_link;
12 unsigned int offset;
13
14 unsigned int length;
15
16 dma_addr_t dma_address;
17 __u32 dma_length;
18};
19
20#define sg_dma_address(sg) ((sg)->dma_address)
21#define sg_dma_len(sg) ((sg)->dma_length)
22 5
23#define ISA_DMA_THRESHOLD (~0UL) 6#define ISA_DMA_THRESHOLD (~0UL)
24 7
diff --git a/arch/alpha/math-emu/sfp-util.h b/arch/alpha/math-emu/sfp-util.h
index d4c6ae7fee47..f53707f77455 100644
--- a/arch/alpha/math-emu/sfp-util.h
+++ b/arch/alpha/math-emu/sfp-util.h
@@ -28,3 +28,8 @@ extern unsigned long __udiv_qrnnd (unsigned long *, unsigned long,
28#define UDIV_NEEDS_NORMALIZATION 1 28#define UDIV_NEEDS_NORMALIZATION 1
29 29
30#define abort() goto bad_insn 30#define abort() goto bad_insn
31
32#ifndef __LITTLE_ENDIAN
33#define __LITTLE_ENDIAN -1
34#endif
35#define __BYTE_ORDER __LITTLE_ENDIAN
diff --git a/arch/arm/Makefile b/arch/arm/Makefile
index 64ba313724d2..ad81ece7f826 100644
--- a/arch/arm/Makefile
+++ b/arch/arm/Makefile
@@ -145,8 +145,8 @@ machine-$(CONFIG_ARCH_LOKI) := loki
145machine-$(CONFIG_ARCH_MMP) := mmp 145machine-$(CONFIG_ARCH_MMP) := mmp
146machine-$(CONFIG_ARCH_MSM) := msm 146machine-$(CONFIG_ARCH_MSM) := msm
147machine-$(CONFIG_ARCH_MV78XX0) := mv78xx0 147machine-$(CONFIG_ARCH_MV78XX0) := mv78xx0
148machine-$(CONFIG_ARCH_MX1) := mx1 148machine-$(CONFIG_ARCH_MX1) := imx
149machine-$(CONFIG_ARCH_MX2) := mx2 149machine-$(CONFIG_ARCH_MX2) := imx
150machine-$(CONFIG_ARCH_MX25) := mx25 150machine-$(CONFIG_ARCH_MX25) := mx25
151machine-$(CONFIG_ARCH_MX3) := mx3 151machine-$(CONFIG_ARCH_MX3) := mx3
152machine-$(CONFIG_ARCH_MX5) := mx5 152machine-$(CONFIG_ARCH_MX5) := mx5
diff --git a/arch/arm/configs/s3c2410_defconfig b/arch/arm/configs/s3c2410_defconfig
index 9236475e7131..44cea2ddd22b 100644
--- a/arch/arm/configs/s3c2410_defconfig
+++ b/arch/arm/configs/s3c2410_defconfig
@@ -1,12 +1,14 @@
1# 1#
2# Automatically generated make config: don't edit 2# Automatically generated make config: don't edit
3# Linux kernel version: 2.6.34 3# Linux kernel version: 2.6.34
4# Sat May 22 03:17:31 2010 4# Fri May 28 19:15:48 2010
5# 5#
6CONFIG_ARM=y 6CONFIG_ARM=y
7CONFIG_HAVE_PWM=y 7CONFIG_HAVE_PWM=y
8CONFIG_SYS_SUPPORTS_APM_EMULATION=y 8CONFIG_SYS_SUPPORTS_APM_EMULATION=y
9CONFIG_GENERIC_GPIO=y 9CONFIG_GENERIC_GPIO=y
10CONFIG_GENERIC_TIME=y
11CONFIG_ARCH_USES_GETTIMEOFFSET=y
10CONFIG_HAVE_PROC_CPU=y 12CONFIG_HAVE_PROC_CPU=y
11CONFIG_NO_IOPORT=y 13CONFIG_NO_IOPORT=y
12CONFIG_GENERIC_HARDIRQS=y 14CONFIG_GENERIC_HARDIRQS=y
@@ -35,6 +37,7 @@ CONFIG_INIT_ENV_ARG_LIMIT=32
35CONFIG_LOCALVERSION="" 37CONFIG_LOCALVERSION=""
36CONFIG_LOCALVERSION_AUTO=y 38CONFIG_LOCALVERSION_AUTO=y
37CONFIG_HAVE_KERNEL_GZIP=y 39CONFIG_HAVE_KERNEL_GZIP=y
40CONFIG_HAVE_KERNEL_LZMA=y
38CONFIG_HAVE_KERNEL_LZO=y 41CONFIG_HAVE_KERNEL_LZO=y
39CONFIG_KERNEL_GZIP=y 42CONFIG_KERNEL_GZIP=y
40# CONFIG_KERNEL_BZIP2 is not set 43# CONFIG_KERNEL_BZIP2 is not set
@@ -186,9 +189,11 @@ CONFIG_MMU=y
186# CONFIG_ARCH_INTEGRATOR is not set 189# CONFIG_ARCH_INTEGRATOR is not set
187# CONFIG_ARCH_REALVIEW is not set 190# CONFIG_ARCH_REALVIEW is not set
188# CONFIG_ARCH_VERSATILE is not set 191# CONFIG_ARCH_VERSATILE is not set
192# CONFIG_ARCH_VEXPRESS is not set
189# CONFIG_ARCH_AT91 is not set 193# CONFIG_ARCH_AT91 is not set
190# CONFIG_ARCH_BCMRING is not set 194# CONFIG_ARCH_BCMRING is not set
191# CONFIG_ARCH_CLPS711X is not set 195# CONFIG_ARCH_CLPS711X is not set
196# CONFIG_ARCH_CNS3XXX is not set
192# CONFIG_ARCH_GEMINI is not set 197# CONFIG_ARCH_GEMINI is not set
193# CONFIG_ARCH_EBSA110 is not set 198# CONFIG_ARCH_EBSA110 is not set
194# CONFIG_ARCH_EP93XX is not set 199# CONFIG_ARCH_EP93XX is not set
@@ -224,7 +229,7 @@ CONFIG_ARCH_S3C2410=y
224# CONFIG_ARCH_S3C64XX is not set 229# CONFIG_ARCH_S3C64XX is not set
225# CONFIG_ARCH_S5P6440 is not set 230# CONFIG_ARCH_S5P6440 is not set
226# CONFIG_ARCH_S5P6442 is not set 231# CONFIG_ARCH_S5P6442 is not set
227# CONFIG_ARCH_S5PC1XX is not set 232# CONFIG_ARCH_S5PC100 is not set
228# CONFIG_ARCH_S5PV210 is not set 233# CONFIG_ARCH_S5PV210 is not set
229# CONFIG_ARCH_SHARK is not set 234# CONFIG_ARCH_SHARK is not set
230# CONFIG_ARCH_LH7A40X is not set 235# CONFIG_ARCH_LH7A40X is not set
@@ -233,6 +238,7 @@ CONFIG_ARCH_S3C2410=y
233# CONFIG_ARCH_NOMADIK is not set 238# CONFIG_ARCH_NOMADIK is not set
234# CONFIG_ARCH_DAVINCI is not set 239# CONFIG_ARCH_DAVINCI is not set
235# CONFIG_ARCH_OMAP is not set 240# CONFIG_ARCH_OMAP is not set
241# CONFIG_PLAT_SPEAR is not set
236CONFIG_PLAT_SAMSUNG=y 242CONFIG_PLAT_SAMSUNG=y
237 243
238# 244#
@@ -243,11 +249,18 @@ CONFIG_S3C_BOOT_ERROR_RESET=y
243CONFIG_S3C_BOOT_UART_FORCE_FIFO=y 249CONFIG_S3C_BOOT_UART_FORCE_FIFO=y
244CONFIG_S3C_LOWLEVEL_UART_PORT=0 250CONFIG_S3C_LOWLEVEL_UART_PORT=0
245CONFIG_SAMSUNG_CLKSRC=y 251CONFIG_SAMSUNG_CLKSRC=y
252CONFIG_S3C_GPIO_CFG_S3C24XX=y
253CONFIG_S3C_GPIO_PULL_UPDOWN=y
254CONFIG_S3C_GPIO_PULL_UP=y
246CONFIG_SAMSUNG_GPIO_EXTRA=0 255CONFIG_SAMSUNG_GPIO_EXTRA=0
247CONFIG_S3C_GPIO_SPACE=0 256CONFIG_S3C_GPIO_SPACE=0
248CONFIG_S3C_ADC=y 257CONFIG_S3C_ADC=y
249CONFIG_S3C_DEV_HSMMC=y 258CONFIG_S3C_DEV_HSMMC=y
259CONFIG_S3C_DEV_HSMMC1=y
260CONFIG_S3C_DEV_HWMON=y
261CONFIG_S3C_DEV_FB=y
250CONFIG_S3C_DEV_USB_HOST=y 262CONFIG_S3C_DEV_USB_HOST=y
263CONFIG_S3C_DEV_WDT=y
251CONFIG_S3C_DEV_NAND=y 264CONFIG_S3C_DEV_NAND=y
252CONFIG_S3C_DMA=y 265CONFIG_S3C_DMA=y
253 266
@@ -260,6 +273,7 @@ CONFIG_PLAT_S3C24XX=y
260CONFIG_CPU_LLSERIAL_S3C2410=y 273CONFIG_CPU_LLSERIAL_S3C2410=y
261CONFIG_CPU_LLSERIAL_S3C2440=y 274CONFIG_CPU_LLSERIAL_S3C2440=y
262CONFIG_S3C2410_CLOCK=y 275CONFIG_S3C2410_CLOCK=y
276CONFIG_S3C2443_CLOCK=y
263CONFIG_S3C24XX_DCLK=y 277CONFIG_S3C24XX_DCLK=y
264CONFIG_S3C24XX_PWM=y 278CONFIG_S3C24XX_PWM=y
265CONFIG_S3C24XX_GPIO_EXTRA=128 279CONFIG_S3C24XX_GPIO_EXTRA=128
@@ -270,6 +284,7 @@ CONFIG_S3C2410_DMA=y
270# CONFIG_S3C2410_DMA_DEBUG is not set 284# CONFIG_S3C2410_DMA_DEBUG is not set
271CONFIG_MACH_SMDK=y 285CONFIG_MACH_SMDK=y
272CONFIG_S3C24XX_SIMTEC_AUDIO=y 286CONFIG_S3C24XX_SIMTEC_AUDIO=y
287CONFIG_S3C2410_SETUP_TS=y
273 288
274# 289#
275# S3C2400 Machines 290# S3C2400 Machines
@@ -289,6 +304,7 @@ CONFIG_ARCH_H1940=y
289# CONFIG_H1940BT is not set 304# CONFIG_H1940BT is not set
290CONFIG_PM_H1940=y 305CONFIG_PM_H1940=y
291CONFIG_MACH_N30=y 306CONFIG_MACH_N30=y
307CONFIG_MACH_N35=y
292CONFIG_ARCH_BAST=y 308CONFIG_ARCH_BAST=y
293CONFIG_MACH_OTOM=y 309CONFIG_MACH_OTOM=y
294CONFIG_MACH_AML_M5900=y 310CONFIG_MACH_AML_M5900=y
@@ -309,6 +325,13 @@ CONFIG_MACH_SMDK2413=y
309CONFIG_MACH_S3C2413=y 325CONFIG_MACH_S3C2413=y
310CONFIG_MACH_SMDK2412=y 326CONFIG_MACH_SMDK2412=y
311CONFIG_MACH_VSTMS=y 327CONFIG_MACH_VSTMS=y
328CONFIG_CPU_S3C2416=y
329CONFIG_S3C2416_DMA=y
330
331#
332# S3C2416 Machines
333#
334CONFIG_MACH_SMDK2416=y
312CONFIG_CPU_S3C2440=y 335CONFIG_CPU_S3C2440=y
313CONFIG_CPU_S3C2442=y 336CONFIG_CPU_S3C2442=y
314CONFIG_CPU_S3C244X=y 337CONFIG_CPU_S3C244X=y
@@ -320,9 +343,9 @@ CONFIG_S3C2440_DMA=y
320# S3C2440 and S3C2442 Machines 343# S3C2440 and S3C2442 Machines
321# 344#
322CONFIG_MACH_ANUBIS=y 345CONFIG_MACH_ANUBIS=y
323# CONFIG_MACH_NEO1973_GTA02 is not set 346CONFIG_MACH_NEO1973_GTA02=y
324CONFIG_MACH_OSIRIS=y 347CONFIG_MACH_OSIRIS=y
325# CONFIG_MACH_OSIRIS_DVS is not set 348CONFIG_MACH_OSIRIS_DVS=m
326CONFIG_MACH_RX3715=y 349CONFIG_MACH_RX3715=y
327CONFIG_ARCH_S3C2440=y 350CONFIG_ARCH_S3C2440=y
328CONFIG_MACH_NEXCODER_2440=y 351CONFIG_MACH_NEXCODER_2440=y
@@ -330,6 +353,7 @@ CONFIG_SMDK2440_CPU2440=y
330CONFIG_SMDK2440_CPU2442=y 353CONFIG_SMDK2440_CPU2442=y
331CONFIG_MACH_AT2440EVB=y 354CONFIG_MACH_AT2440EVB=y
332CONFIG_MACH_MINI2440=y 355CONFIG_MACH_MINI2440=y
356CONFIG_MACH_RX1950=y
333CONFIG_CPU_S3C2443=y 357CONFIG_CPU_S3C2443=y
334CONFIG_S3C2443_DMA=y 358CONFIG_S3C2443_DMA=y
335 359
@@ -410,6 +434,7 @@ CONFIG_ALIGNMENT_TRAP=y
410CONFIG_ZBOOT_ROM_TEXT=0x0 434CONFIG_ZBOOT_ROM_TEXT=0x0
411CONFIG_ZBOOT_ROM_BSS=0x0 435CONFIG_ZBOOT_ROM_BSS=0x0
412CONFIG_CMDLINE="root=/dev/hda1 ro init=/bin/bash console=ttySAC0" 436CONFIG_CMDLINE="root=/dev/hda1 ro init=/bin/bash console=ttySAC0"
437# CONFIG_CMDLINE_FORCE is not set
413# CONFIG_XIP_KERNEL is not set 438# CONFIG_XIP_KERNEL is not set
414# CONFIG_KEXEC is not set 439# CONFIG_KEXEC is not set
415 440
@@ -509,7 +534,9 @@ CONFIG_TCP_CONG_ILLINOIS=m
509# CONFIG_DEFAULT_BIC is not set 534# CONFIG_DEFAULT_BIC is not set
510CONFIG_DEFAULT_CUBIC=y 535CONFIG_DEFAULT_CUBIC=y
511# CONFIG_DEFAULT_HTCP is not set 536# CONFIG_DEFAULT_HTCP is not set
537# CONFIG_DEFAULT_HYBLA is not set
512# CONFIG_DEFAULT_VEGAS is not set 538# CONFIG_DEFAULT_VEGAS is not set
539# CONFIG_DEFAULT_VENO is not set
513# CONFIG_DEFAULT_WESTWOOD is not set 540# CONFIG_DEFAULT_WESTWOOD is not set
514# CONFIG_DEFAULT_RENO is not set 541# CONFIG_DEFAULT_RENO is not set
515CONFIG_DEFAULT_TCP_CONG="cubic" 542CONFIG_DEFAULT_TCP_CONG="cubic"
@@ -566,6 +593,16 @@ CONFIG_NF_CONNTRACK_TFTP=m
566CONFIG_NF_CT_NETLINK=m 593CONFIG_NF_CT_NETLINK=m
567# CONFIG_NETFILTER_TPROXY is not set 594# CONFIG_NETFILTER_TPROXY is not set
568CONFIG_NETFILTER_XTABLES=m 595CONFIG_NETFILTER_XTABLES=m
596
597#
598# Xtables combined modules
599#
600CONFIG_NETFILTER_XT_MARK=m
601CONFIG_NETFILTER_XT_CONNMARK=m
602
603#
604# Xtables targets
605#
569CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m 606CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
570CONFIG_NETFILTER_XT_TARGET_CONNMARK=m 607CONFIG_NETFILTER_XT_TARGET_CONNMARK=m
571# CONFIG_NETFILTER_XT_TARGET_CT is not set 608# CONFIG_NETFILTER_XT_TARGET_CT is not set
@@ -577,9 +614,14 @@ CONFIG_NETFILTER_XT_TARGET_NFLOG=m
577CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m 614CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
578# CONFIG_NETFILTER_XT_TARGET_NOTRACK is not set 615# CONFIG_NETFILTER_XT_TARGET_NOTRACK is not set
579CONFIG_NETFILTER_XT_TARGET_RATEEST=m 616CONFIG_NETFILTER_XT_TARGET_RATEEST=m
617# CONFIG_NETFILTER_XT_TARGET_TEE is not set
580# CONFIG_NETFILTER_XT_TARGET_TRACE is not set 618# CONFIG_NETFILTER_XT_TARGET_TRACE is not set
581CONFIG_NETFILTER_XT_TARGET_TCPMSS=m 619CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
582# CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP is not set 620# CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP is not set
621
622#
623# Xtables matches
624#
583CONFIG_NETFILTER_XT_MATCH_CLUSTER=m 625CONFIG_NETFILTER_XT_MATCH_CLUSTER=m
584CONFIG_NETFILTER_XT_MATCH_COMMENT=m 626CONFIG_NETFILTER_XT_MATCH_COMMENT=m
585CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m 627CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m
@@ -598,6 +640,7 @@ CONFIG_NETFILTER_XT_MATCH_LIMIT=m
598CONFIG_NETFILTER_XT_MATCH_MAC=m 640CONFIG_NETFILTER_XT_MATCH_MAC=m
599CONFIG_NETFILTER_XT_MATCH_MARK=m 641CONFIG_NETFILTER_XT_MATCH_MARK=m
600CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m 642CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m
643# CONFIG_NETFILTER_XT_MATCH_OSF is not set
601CONFIG_NETFILTER_XT_MATCH_OWNER=m 644CONFIG_NETFILTER_XT_MATCH_OWNER=m
602CONFIG_NETFILTER_XT_MATCH_POLICY=m 645CONFIG_NETFILTER_XT_MATCH_POLICY=m
603CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m 646CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m
@@ -605,7 +648,6 @@ CONFIG_NETFILTER_XT_MATCH_QUOTA=m
605CONFIG_NETFILTER_XT_MATCH_RATEEST=m 648CONFIG_NETFILTER_XT_MATCH_RATEEST=m
606CONFIG_NETFILTER_XT_MATCH_REALM=m 649CONFIG_NETFILTER_XT_MATCH_REALM=m
607CONFIG_NETFILTER_XT_MATCH_RECENT=m 650CONFIG_NETFILTER_XT_MATCH_RECENT=m
608# CONFIG_NETFILTER_XT_MATCH_RECENT_PROC_COMPAT is not set
609CONFIG_NETFILTER_XT_MATCH_SCTP=m 651CONFIG_NETFILTER_XT_MATCH_SCTP=m
610CONFIG_NETFILTER_XT_MATCH_STATE=m 652CONFIG_NETFILTER_XT_MATCH_STATE=m
611CONFIG_NETFILTER_XT_MATCH_STATISTIC=m 653CONFIG_NETFILTER_XT_MATCH_STATISTIC=m
@@ -613,7 +655,6 @@ CONFIG_NETFILTER_XT_MATCH_STRING=m
613CONFIG_NETFILTER_XT_MATCH_TCPMSS=m 655CONFIG_NETFILTER_XT_MATCH_TCPMSS=m
614CONFIG_NETFILTER_XT_MATCH_TIME=m 656CONFIG_NETFILTER_XT_MATCH_TIME=m
615CONFIG_NETFILTER_XT_MATCH_U32=m 657CONFIG_NETFILTER_XT_MATCH_U32=m
616# CONFIG_NETFILTER_XT_MATCH_OSF is not set
617CONFIG_IP_VS=m 658CONFIG_IP_VS=m
618# CONFIG_IP_VS_IPV6 is not set 659# CONFIG_IP_VS_IPV6 is not set
619# CONFIG_IP_VS_DEBUG is not set 660# CONFIG_IP_VS_DEBUG is not set
@@ -713,6 +754,7 @@ CONFIG_IP6_NF_RAW=m
713# CONFIG_RDS is not set 754# CONFIG_RDS is not set
714# CONFIG_TIPC is not set 755# CONFIG_TIPC is not set
715# CONFIG_ATM is not set 756# CONFIG_ATM is not set
757# CONFIG_L2TP is not set
716# CONFIG_BRIDGE is not set 758# CONFIG_BRIDGE is not set
717# CONFIG_NET_DSA is not set 759# CONFIG_NET_DSA is not set
718# CONFIG_VLAN_8021Q is not set 760# CONFIG_VLAN_8021Q is not set
@@ -739,6 +781,7 @@ CONFIG_NET_CLS_ROUTE=y
739# CONFIG_IRDA is not set 781# CONFIG_IRDA is not set
740CONFIG_BT=m 782CONFIG_BT=m
741CONFIG_BT_L2CAP=m 783CONFIG_BT_L2CAP=m
784# CONFIG_BT_L2CAP_EXT_FEATURES is not set
742CONFIG_BT_SCO=m 785CONFIG_BT_SCO=m
743CONFIG_BT_RFCOMM=m 786CONFIG_BT_RFCOMM=m
744CONFIG_BT_RFCOMM_TTY=y 787CONFIG_BT_RFCOMM_TTY=y
@@ -775,6 +818,7 @@ CONFIG_CFG80211_WEXT=y
775CONFIG_WIRELESS_EXT_SYSFS=y 818CONFIG_WIRELESS_EXT_SYSFS=y
776# CONFIG_LIB80211 is not set 819# CONFIG_LIB80211 is not set
777CONFIG_MAC80211=m 820CONFIG_MAC80211=m
821CONFIG_MAC80211_HAS_RC=y
778CONFIG_MAC80211_RC_MINSTREL=y 822CONFIG_MAC80211_RC_MINSTREL=y
779# CONFIG_MAC80211_RC_DEFAULT_PID is not set 823# CONFIG_MAC80211_RC_DEFAULT_PID is not set
780CONFIG_MAC80211_RC_DEFAULT_MINSTREL=y 824CONFIG_MAC80211_RC_DEFAULT_MINSTREL=y
@@ -785,6 +829,7 @@ CONFIG_MAC80211_LEDS=y
785# CONFIG_WIMAX is not set 829# CONFIG_WIMAX is not set
786# CONFIG_RFKILL is not set 830# CONFIG_RFKILL is not set
787# CONFIG_NET_9P is not set 831# CONFIG_NET_9P is not set
832# CONFIG_CAIF is not set
788 833
789# 834#
790# Device Drivers 835# Device Drivers
@@ -828,6 +873,7 @@ CONFIG_MTD_BLOCK=y
828# CONFIG_INFTL is not set 873# CONFIG_INFTL is not set
829# CONFIG_RFD_FTL is not set 874# CONFIG_RFD_FTL is not set
830# CONFIG_SSFDC is not set 875# CONFIG_SSFDC is not set
876# CONFIG_SM_FTL is not set
831# CONFIG_MTD_OOPS is not set 877# CONFIG_MTD_OOPS is not set
832 878
833# 879#
@@ -882,9 +928,12 @@ CONFIG_MTD_ROM=y
882# CONFIG_MTD_DOC2001 is not set 928# CONFIG_MTD_DOC2001 is not set
883# CONFIG_MTD_DOC2001PLUS is not set 929# CONFIG_MTD_DOC2001PLUS is not set
884CONFIG_MTD_NAND=y 930CONFIG_MTD_NAND=y
885# CONFIG_MTD_NAND_VERIFY_WRITE is not set 931CONFIG_MTD_NAND_ECC=y
886# CONFIG_MTD_NAND_ECC_SMC is not set 932# CONFIG_MTD_NAND_ECC_SMC is not set
933# CONFIG_MTD_NAND_VERIFY_WRITE is not set
934# CONFIG_MTD_SM_COMMON is not set
887# CONFIG_MTD_NAND_MUSEUM_IDS is not set 935# CONFIG_MTD_NAND_MUSEUM_IDS is not set
936CONFIG_MTD_NAND_DENALI_SCRATCH_REG_ADDR=0xFF108018
888# CONFIG_MTD_NAND_GPIO is not set 937# CONFIG_MTD_NAND_GPIO is not set
889CONFIG_MTD_NAND_IDS=y 938CONFIG_MTD_NAND_IDS=y
890CONFIG_MTD_NAND_S3C2410=y 939CONFIG_MTD_NAND_S3C2410=y
@@ -1149,6 +1198,7 @@ CONFIG_KEYBOARD_ATKBD=y
1149# CONFIG_QT2160 is not set 1198# CONFIG_QT2160 is not set
1150# CONFIG_KEYBOARD_LKKBD is not set 1199# CONFIG_KEYBOARD_LKKBD is not set
1151# CONFIG_KEYBOARD_GPIO is not set 1200# CONFIG_KEYBOARD_GPIO is not set
1201# CONFIG_KEYBOARD_TCA6416 is not set
1152# CONFIG_KEYBOARD_MATRIX is not set 1202# CONFIG_KEYBOARD_MATRIX is not set
1153# CONFIG_KEYBOARD_LM8323 is not set 1203# CONFIG_KEYBOARD_LM8323 is not set
1154# CONFIG_KEYBOARD_MAX7359 is not set 1204# CONFIG_KEYBOARD_MAX7359 is not set
@@ -1212,6 +1262,7 @@ CONFIG_INPUT_TOUCHSCREEN=y
1212# CONFIG_TOUCHSCREEN_AD7879_SPI is not set 1262# CONFIG_TOUCHSCREEN_AD7879_SPI is not set
1213# CONFIG_TOUCHSCREEN_AD7879 is not set 1263# CONFIG_TOUCHSCREEN_AD7879 is not set
1214# CONFIG_TOUCHSCREEN_DYNAPRO is not set 1264# CONFIG_TOUCHSCREEN_DYNAPRO is not set
1265# CONFIG_TOUCHSCREEN_HAMPSHIRE is not set
1215# CONFIG_TOUCHSCREEN_EETI is not set 1266# CONFIG_TOUCHSCREEN_EETI is not set
1216# CONFIG_TOUCHSCREEN_FUJITSU is not set 1267# CONFIG_TOUCHSCREEN_FUJITSU is not set
1217# CONFIG_TOUCHSCREEN_S3C2410 is not set 1268# CONFIG_TOUCHSCREEN_S3C2410 is not set
@@ -1248,6 +1299,7 @@ CONFIG_TOUCHSCREEN_USB_NEXIO=y
1248# CONFIG_TOUCHSCREEN_TSC2007 is not set 1299# CONFIG_TOUCHSCREEN_TSC2007 is not set
1249# CONFIG_TOUCHSCREEN_W90X900 is not set 1300# CONFIG_TOUCHSCREEN_W90X900 is not set
1250CONFIG_INPUT_MISC=y 1301CONFIG_INPUT_MISC=y
1302# CONFIG_INPUT_AD714X is not set
1251CONFIG_INPUT_ATI_REMOTE=m 1303CONFIG_INPUT_ATI_REMOTE=m
1252CONFIG_INPUT_ATI_REMOTE2=m 1304CONFIG_INPUT_ATI_REMOTE2=m
1253CONFIG_INPUT_KEYSPAN_REMOTE=m 1305CONFIG_INPUT_KEYSPAN_REMOTE=m
@@ -1255,6 +1307,8 @@ CONFIG_INPUT_POWERMATE=m
1255CONFIG_INPUT_YEALINK=m 1307CONFIG_INPUT_YEALINK=m
1256CONFIG_INPUT_CM109=m 1308CONFIG_INPUT_CM109=m
1257CONFIG_INPUT_UINPUT=m 1309CONFIG_INPUT_UINPUT=m
1310# CONFIG_INPUT_PCF50633_PMU is not set
1311# CONFIG_INPUT_PCF8574 is not set
1258CONFIG_INPUT_GPIO_ROTARY_ENCODER=m 1312CONFIG_INPUT_GPIO_ROTARY_ENCODER=m
1259 1313
1260# 1314#
@@ -1287,6 +1341,7 @@ CONFIG_SERIAL_NONSTANDARD=y
1287# CONFIG_MOXA_INTELLIO is not set 1341# CONFIG_MOXA_INTELLIO is not set
1288# CONFIG_MOXA_SMARTIO is not set 1342# CONFIG_MOXA_SMARTIO is not set
1289# CONFIG_N_HDLC is not set 1343# CONFIG_N_HDLC is not set
1344# CONFIG_N_GSM is not set
1290# CONFIG_RISCOM8 is not set 1345# CONFIG_RISCOM8 is not set
1291# CONFIG_SPECIALIX is not set 1346# CONFIG_SPECIALIX is not set
1292# CONFIG_STALDRV is not set 1347# CONFIG_STALDRV is not set
@@ -1324,6 +1379,8 @@ CONFIG_SERIAL_S3C2440=y
1324CONFIG_SERIAL_CORE=y 1379CONFIG_SERIAL_CORE=y
1325CONFIG_SERIAL_CORE_CONSOLE=y 1380CONFIG_SERIAL_CORE_CONSOLE=y
1326# CONFIG_SERIAL_TIMBERDALE is not set 1381# CONFIG_SERIAL_TIMBERDALE is not set
1382# CONFIG_SERIAL_ALTERA_JTAGUART is not set
1383# CONFIG_SERIAL_ALTERA_UART is not set
1327CONFIG_UNIX98_PTYS=y 1384CONFIG_UNIX98_PTYS=y
1328# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set 1385# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
1329CONFIG_LEGACY_PTYS=y 1386CONFIG_LEGACY_PTYS=y
@@ -1439,7 +1496,16 @@ CONFIG_GPIOLIB=y
1439# AC97 GPIO expanders: 1496# AC97 GPIO expanders:
1440# 1497#
1441# CONFIG_W1 is not set 1498# CONFIG_W1 is not set
1442# CONFIG_POWER_SUPPLY is not set 1499CONFIG_POWER_SUPPLY=y
1500# CONFIG_POWER_SUPPLY_DEBUG is not set
1501# CONFIG_PDA_POWER is not set
1502# CONFIG_APM_POWER is not set
1503# CONFIG_TEST_POWER is not set
1504# CONFIG_BATTERY_DS2760 is not set
1505# CONFIG_BATTERY_DS2782 is not set
1506# CONFIG_BATTERY_BQ27x00 is not set
1507# CONFIG_BATTERY_MAX17040 is not set
1508# CONFIG_CHARGER_PCF50633 is not set
1443CONFIG_HWMON=y 1509CONFIG_HWMON=y
1444CONFIG_HWMON_VID=m 1510CONFIG_HWMON_VID=m
1445# CONFIG_HWMON_DEBUG_CHIP is not set 1511# CONFIG_HWMON_DEBUG_CHIP is not set
@@ -1499,6 +1565,7 @@ CONFIG_SENSORS_LM85=m
1499# CONFIG_SENSORS_SMSC47M192 is not set 1565# CONFIG_SENSORS_SMSC47M192 is not set
1500# CONFIG_SENSORS_SMSC47B397 is not set 1566# CONFIG_SENSORS_SMSC47B397 is not set
1501# CONFIG_SENSORS_ADS7828 is not set 1567# CONFIG_SENSORS_ADS7828 is not set
1568# CONFIG_SENSORS_ADS7871 is not set
1502# CONFIG_SENSORS_AMC6821 is not set 1569# CONFIG_SENSORS_AMC6821 is not set
1503# CONFIG_SENSORS_THMC50 is not set 1570# CONFIG_SENSORS_THMC50 is not set
1504# CONFIG_SENSORS_TMP401 is not set 1571# CONFIG_SENSORS_TMP401 is not set
@@ -1555,7 +1622,7 @@ CONFIG_MFD_SM501=y
1555# CONFIG_HTC_PASIC3 is not set 1622# CONFIG_HTC_PASIC3 is not set
1556# CONFIG_HTC_I2CPLD is not set 1623# CONFIG_HTC_I2CPLD is not set
1557# CONFIG_UCB1400_CORE is not set 1624# CONFIG_UCB1400_CORE is not set
1558# CONFIG_TPS65010 is not set 1625CONFIG_TPS65010=m
1559# CONFIG_TWL4030_CORE is not set 1626# CONFIG_TWL4030_CORE is not set
1560# CONFIG_MFD_TMIO is not set 1627# CONFIG_MFD_TMIO is not set
1561# CONFIG_MFD_T7L66XB is not set 1628# CONFIG_MFD_T7L66XB is not set
@@ -1568,8 +1635,10 @@ CONFIG_MFD_SM501=y
1568# CONFIG_MFD_WM831X is not set 1635# CONFIG_MFD_WM831X is not set
1569# CONFIG_MFD_WM8350_I2C is not set 1636# CONFIG_MFD_WM8350_I2C is not set
1570# CONFIG_MFD_WM8994 is not set 1637# CONFIG_MFD_WM8994 is not set
1571# CONFIG_MFD_PCF50633 is not set 1638CONFIG_MFD_PCF50633=y
1572# CONFIG_MFD_MC13783 is not set 1639# CONFIG_MFD_MC13783 is not set
1640# CONFIG_PCF50633_ADC is not set
1641CONFIG_PCF50633_GPIO=y
1573# CONFIG_AB3100_CORE is not set 1642# CONFIG_AB3100_CORE is not set
1574# CONFIG_EZX_PCAP is not set 1643# CONFIG_EZX_PCAP is not set
1575# CONFIG_AB4500_CORE is not set 1644# CONFIG_AB4500_CORE is not set
@@ -1685,6 +1754,7 @@ CONFIG_SND_S3C24XX_SOC_I2S=y
1685CONFIG_SND_S3C_I2SV2_SOC=m 1754CONFIG_SND_S3C_I2SV2_SOC=m
1686CONFIG_SND_S3C2412_SOC_I2S=m 1755CONFIG_SND_S3C2412_SOC_I2S=m
1687CONFIG_SND_S3C_SOC_AC97=m 1756CONFIG_SND_S3C_SOC_AC97=m
1757# CONFIG_SND_S3C24XX_SOC_NEO1973_GTA02_WM8753 is not set
1688CONFIG_SND_S3C24XX_SOC_JIVE_WM8750=m 1758CONFIG_SND_S3C24XX_SOC_JIVE_WM8750=m
1689CONFIG_SND_S3C24XX_SOC_SMDK2443_WM9710=m 1759CONFIG_SND_S3C24XX_SOC_SMDK2443_WM9710=m
1690CONFIG_SND_S3C24XX_SOC_LN2440SBC_ALC650=m 1760CONFIG_SND_S3C24XX_SOC_LN2440SBC_ALC650=m
@@ -1836,10 +1906,12 @@ CONFIG_USB_SERIAL_PL2303=y
1836# CONFIG_USB_SERIAL_TI is not set 1906# CONFIG_USB_SERIAL_TI is not set
1837# CONFIG_USB_SERIAL_CYBERJACK is not set 1907# CONFIG_USB_SERIAL_CYBERJACK is not set
1838# CONFIG_USB_SERIAL_XIRCOM is not set 1908# CONFIG_USB_SERIAL_XIRCOM is not set
1909CONFIG_USB_SERIAL_WWAN=m
1839CONFIG_USB_SERIAL_OPTION=m 1910CONFIG_USB_SERIAL_OPTION=m
1840# CONFIG_USB_SERIAL_OMNINET is not set 1911# CONFIG_USB_SERIAL_OMNINET is not set
1841# CONFIG_USB_SERIAL_OPTICON is not set 1912# CONFIG_USB_SERIAL_OPTICON is not set
1842# CONFIG_USB_SERIAL_VIVOPAY_SERIAL is not set 1913# CONFIG_USB_SERIAL_VIVOPAY_SERIAL is not set
1914# CONFIG_USB_SERIAL_ZIO is not set
1843# CONFIG_USB_SERIAL_DEBUG is not set 1915# CONFIG_USB_SERIAL_DEBUG is not set
1844 1916
1845# 1917#
@@ -1991,6 +2063,7 @@ CONFIG_RTC_INTF_DEV=y
1991# CONFIG_RTC_DRV_BQ4802 is not set 2063# CONFIG_RTC_DRV_BQ4802 is not set
1992# CONFIG_RTC_DRV_RP5C01 is not set 2064# CONFIG_RTC_DRV_RP5C01 is not set
1993# CONFIG_RTC_DRV_V3020 is not set 2065# CONFIG_RTC_DRV_V3020 is not set
2066# CONFIG_RTC_DRV_PCF50633 is not set
1994 2067
1995# 2068#
1996# on-CPU RTC drivers 2069# on-CPU RTC drivers
@@ -1999,10 +2072,6 @@ CONFIG_RTC_DRV_S3C=y
1999# CONFIG_DMADEVICES is not set 2072# CONFIG_DMADEVICES is not set
2000# CONFIG_AUXDISPLAY is not set 2073# CONFIG_AUXDISPLAY is not set
2001# CONFIG_UIO is not set 2074# CONFIG_UIO is not set
2002
2003#
2004# TI VLYNQ
2005#
2006# CONFIG_STAGING is not set 2075# CONFIG_STAGING is not set
2007 2076
2008# 2077#
@@ -2274,6 +2343,7 @@ CONFIG_HAVE_FUNCTION_TRACER=y
2274CONFIG_TRACING_SUPPORT=y 2343CONFIG_TRACING_SUPPORT=y
2275CONFIG_FTRACE=y 2344CONFIG_FTRACE=y
2276# CONFIG_FUNCTION_TRACER is not set 2345# CONFIG_FUNCTION_TRACER is not set
2346# CONFIG_IRQSOFF_TRACER is not set
2277# CONFIG_SCHED_TRACER is not set 2347# CONFIG_SCHED_TRACER is not set
2278# CONFIG_ENABLE_DEFAULT_TRACERS is not set 2348# CONFIG_ENABLE_DEFAULT_TRACERS is not set
2279# CONFIG_BOOT_TRACER is not set 2349# CONFIG_BOOT_TRACER is not set
@@ -2284,6 +2354,7 @@ CONFIG_BRANCH_PROFILE_NONE=y
2284# CONFIG_KMEMTRACE is not set 2354# CONFIG_KMEMTRACE is not set
2285# CONFIG_WORKQUEUE_TRACER is not set 2355# CONFIG_WORKQUEUE_TRACER is not set
2286# CONFIG_BLK_DEV_IO_TRACE is not set 2356# CONFIG_BLK_DEV_IO_TRACE is not set
2357# CONFIG_ATOMIC64_SELFTEST is not set
2287# CONFIG_SAMPLES is not set 2358# CONFIG_SAMPLES is not set
2288CONFIG_HAVE_ARCH_KGDB=y 2359CONFIG_HAVE_ARCH_KGDB=y
2289# CONFIG_KGDB is not set 2360# CONFIG_KGDB is not set
diff --git a/arch/arm/configs/s3c6400_defconfig b/arch/arm/configs/s3c6400_defconfig
index a3a9993e5cd0..2b642386f030 100644
--- a/arch/arm/configs/s3c6400_defconfig
+++ b/arch/arm/configs/s3c6400_defconfig
@@ -1,11 +1,14 @@
1# 1#
2# Automatically generated make config: don't edit 2# Automatically generated make config: don't edit
3# Linux kernel version: 2.6.34 3# Linux kernel version: 2.6.34
4# Sat May 22 03:17:32 2010 4# Fri May 28 19:05:39 2010
5# 5#
6CONFIG_ARM=y 6CONFIG_ARM=y
7CONFIG_HAVE_PWM=y
7CONFIG_SYS_SUPPORTS_APM_EMULATION=y 8CONFIG_SYS_SUPPORTS_APM_EMULATION=y
8CONFIG_GENERIC_GPIO=y 9CONFIG_GENERIC_GPIO=y
10CONFIG_GENERIC_TIME=y
11CONFIG_ARCH_USES_GETTIMEOFFSET=y
9CONFIG_HAVE_PROC_CPU=y 12CONFIG_HAVE_PROC_CPU=y
10CONFIG_NO_IOPORT=y 13CONFIG_NO_IOPORT=y
11CONFIG_GENERIC_HARDIRQS=y 14CONFIG_GENERIC_HARDIRQS=y
@@ -34,6 +37,7 @@ CONFIG_INIT_ENV_ARG_LIMIT=32
34CONFIG_LOCALVERSION="" 37CONFIG_LOCALVERSION=""
35CONFIG_LOCALVERSION_AUTO=y 38CONFIG_LOCALVERSION_AUTO=y
36CONFIG_HAVE_KERNEL_GZIP=y 39CONFIG_HAVE_KERNEL_GZIP=y
40CONFIG_HAVE_KERNEL_LZMA=y
37CONFIG_HAVE_KERNEL_LZO=y 41CONFIG_HAVE_KERNEL_LZO=y
38CONFIG_KERNEL_GZIP=y 42CONFIG_KERNEL_GZIP=y
39# CONFIG_KERNEL_BZIP2 is not set 43# CONFIG_KERNEL_BZIP2 is not set
@@ -179,9 +183,11 @@ CONFIG_MMU=y
179# CONFIG_ARCH_INTEGRATOR is not set 183# CONFIG_ARCH_INTEGRATOR is not set
180# CONFIG_ARCH_REALVIEW is not set 184# CONFIG_ARCH_REALVIEW is not set
181# CONFIG_ARCH_VERSATILE is not set 185# CONFIG_ARCH_VERSATILE is not set
186# CONFIG_ARCH_VEXPRESS is not set
182# CONFIG_ARCH_AT91 is not set 187# CONFIG_ARCH_AT91 is not set
183# CONFIG_ARCH_BCMRING is not set 188# CONFIG_ARCH_BCMRING is not set
184# CONFIG_ARCH_CLPS711X is not set 189# CONFIG_ARCH_CLPS711X is not set
190# CONFIG_ARCH_CNS3XXX is not set
185# CONFIG_ARCH_GEMINI is not set 191# CONFIG_ARCH_GEMINI is not set
186# CONFIG_ARCH_EBSA110 is not set 192# CONFIG_ARCH_EBSA110 is not set
187# CONFIG_ARCH_EP93XX is not set 193# CONFIG_ARCH_EP93XX is not set
@@ -217,7 +223,7 @@ CONFIG_MMU=y
217CONFIG_ARCH_S3C64XX=y 223CONFIG_ARCH_S3C64XX=y
218# CONFIG_ARCH_S5P6440 is not set 224# CONFIG_ARCH_S5P6440 is not set
219# CONFIG_ARCH_S5P6442 is not set 225# CONFIG_ARCH_S5P6442 is not set
220# CONFIG_ARCH_S5PC1XX is not set 226# CONFIG_ARCH_S5PC100 is not set
221# CONFIG_ARCH_S5PV210 is not set 227# CONFIG_ARCH_S5PV210 is not set
222# CONFIG_ARCH_SHARK is not set 228# CONFIG_ARCH_SHARK is not set
223# CONFIG_ARCH_LH7A40X is not set 229# CONFIG_ARCH_LH7A40X is not set
@@ -226,6 +232,7 @@ CONFIG_ARCH_S3C64XX=y
226# CONFIG_ARCH_NOMADIK is not set 232# CONFIG_ARCH_NOMADIK is not set
227# CONFIG_ARCH_DAVINCI is not set 233# CONFIG_ARCH_DAVINCI is not set
228# CONFIG_ARCH_OMAP is not set 234# CONFIG_ARCH_OMAP is not set
235# CONFIG_PLAT_SPEAR is not set
229CONFIG_PLAT_SAMSUNG=y 236CONFIG_PLAT_SAMSUNG=y
230 237
231# 238#
@@ -247,11 +254,17 @@ CONFIG_S3C_GPIO_TRACK=y
247# CONFIG_S3C_ADC is not set 254# CONFIG_S3C_ADC is not set
248CONFIG_S3C_DEV_HSMMC=y 255CONFIG_S3C_DEV_HSMMC=y
249CONFIG_S3C_DEV_HSMMC1=y 256CONFIG_S3C_DEV_HSMMC1=y
257CONFIG_S3C_DEV_HSMMC2=y
258CONFIG_S3C_DEV_HWMON=y
250CONFIG_S3C_DEV_I2C1=y 259CONFIG_S3C_DEV_I2C1=y
251CONFIG_S3C_DEV_FB=y 260CONFIG_S3C_DEV_FB=y
252CONFIG_S3C_DEV_USB_HOST=y 261CONFIG_S3C_DEV_USB_HOST=y
253CONFIG_S3C_DEV_USB_HSOTG=y 262CONFIG_S3C_DEV_USB_HSOTG=y
263CONFIG_S3C_DEV_WDT=y
254CONFIG_S3C_DEV_NAND=y 264CONFIG_S3C_DEV_NAND=y
265CONFIG_S3C_DEV_RTC=y
266CONFIG_SAMSUNG_DEV_ADC=y
267CONFIG_SAMSUNG_DEV_TS=y
255CONFIG_S3C_DMA=y 268CONFIG_S3C_DMA=y
256 269
257# 270#
@@ -260,7 +273,9 @@ CONFIG_S3C_DMA=y
260# CONFIG_SAMSUNG_PM_DEBUG is not set 273# CONFIG_SAMSUNG_PM_DEBUG is not set
261# CONFIG_S3C_PM_DEBUG_LED_SMDK is not set 274# CONFIG_S3C_PM_DEBUG_LED_SMDK is not set
262# CONFIG_SAMSUNG_PM_CHECK is not set 275# CONFIG_SAMSUNG_PM_CHECK is not set
276CONFIG_SAMSUNG_WAKEMASK=y
263CONFIG_PLAT_S3C64XX=y 277CONFIG_PLAT_S3C64XX=y
278CONFIG_CPU_S3C6400=y
264CONFIG_CPU_S3C6410=y 279CONFIG_CPU_S3C6410=y
265CONFIG_S3C64XX_DMA=y 280CONFIG_S3C64XX_DMA=y
266CONFIG_S3C64XX_SETUP_SDHCI=y 281CONFIG_S3C64XX_SETUP_SDHCI=y
@@ -268,15 +283,18 @@ CONFIG_S3C64XX_SETUP_I2C0=y
268CONFIG_S3C64XX_SETUP_I2C1=y 283CONFIG_S3C64XX_SETUP_I2C1=y
269CONFIG_S3C64XX_SETUP_FB_24BPP=y 284CONFIG_S3C64XX_SETUP_FB_24BPP=y
270CONFIG_S3C64XX_SETUP_SDHCI_GPIO=y 285CONFIG_S3C64XX_SETUP_SDHCI_GPIO=y
271# CONFIG_MACH_SMDK6400 is not set 286CONFIG_MACH_SMDK6400=y
272# CONFIG_MACH_ANW6410 is not set 287CONFIG_MACH_ANW6410=y
273CONFIG_MACH_SMDK6410=y 288CONFIG_MACH_SMDK6410=y
274CONFIG_SMDK6410_SD_CH0=y 289CONFIG_SMDK6410_SD_CH0=y
275# CONFIG_SMDK6410_SD_CH1 is not set 290# CONFIG_SMDK6410_SD_CH1 is not set
276# CONFIG_SMDK6410_WM1190_EV1 is not set 291# CONFIG_SMDK6410_WM1190_EV1 is not set
277# CONFIG_SMDK6410_WM1192_EV1 is not set 292# CONFIG_SMDK6410_WM1192_EV1 is not set
278# CONFIG_MACH_NCP is not set 293CONFIG_MACH_NCP=y
279# CONFIG_MACH_HMT is not set 294CONFIG_MACH_HMT=y
295CONFIG_MACH_SMARTQ=y
296CONFIG_MACH_SMARTQ5=y
297CONFIG_MACH_SMARTQ7=y
280 298
281# 299#
282# Processor Type 300# Processor Type
@@ -302,6 +320,7 @@ CONFIG_ARM_THUMB=y
302# CONFIG_CPU_DCACHE_DISABLE is not set 320# CONFIG_CPU_DCACHE_DISABLE is not set
303# CONFIG_CPU_BPREDICT_DISABLE is not set 321# CONFIG_CPU_BPREDICT_DISABLE is not set
304CONFIG_ARM_L1_CACHE_SHIFT=5 322CONFIG_ARM_L1_CACHE_SHIFT=5
323CONFIG_ARM_DMA_MEM_BUFFERABLE=y
305CONFIG_CPU_HAS_PMU=y 324CONFIG_CPU_HAS_PMU=y
306# CONFIG_ARM_ERRATA_411920 is not set 325# CONFIG_ARM_ERRATA_411920 is not set
307CONFIG_ARM_VIC=y 326CONFIG_ARM_VIC=y
@@ -352,6 +371,7 @@ CONFIG_ALIGNMENT_TRAP=y
352CONFIG_ZBOOT_ROM_TEXT=0 371CONFIG_ZBOOT_ROM_TEXT=0
353CONFIG_ZBOOT_ROM_BSS=0 372CONFIG_ZBOOT_ROM_BSS=0
354CONFIG_CMDLINE="console=ttySAC0,115200 root=/dev/ram init=/linuxrc initrd=0x51000000,6M ramdisk_size=6144" 373CONFIG_CMDLINE="console=ttySAC0,115200 root=/dev/ram init=/linuxrc initrd=0x51000000,6M ramdisk_size=6144"
374# CONFIG_CMDLINE_FORCE is not set
355# CONFIG_XIP_KERNEL is not set 375# CONFIG_XIP_KERNEL is not set
356# CONFIG_KEXEC is not set 376# CONFIG_KEXEC is not set
357 377
@@ -430,6 +450,7 @@ CONFIG_MTD=y
430# CONFIG_INFTL is not set 450# CONFIG_INFTL is not set
431# CONFIG_RFD_FTL is not set 451# CONFIG_RFD_FTL is not set
432# CONFIG_SSFDC is not set 452# CONFIG_SSFDC is not set
453# CONFIG_SM_FTL is not set
433# CONFIG_MTD_OOPS is not set 454# CONFIG_MTD_OOPS is not set
434 455
435# 456#
@@ -460,6 +481,9 @@ CONFIG_MTD_CFI_I2=y
460# 481#
461# Self-contained MTD device drivers 482# Self-contained MTD device drivers
462# 483#
484# CONFIG_MTD_DATAFLASH is not set
485# CONFIG_MTD_M25P80 is not set
486# CONFIG_MTD_SST25L is not set
463# CONFIG_MTD_SLRAM is not set 487# CONFIG_MTD_SLRAM is not set
464# CONFIG_MTD_PHRAM is not set 488# CONFIG_MTD_PHRAM is not set
465# CONFIG_MTD_MTDRAM is not set 489# CONFIG_MTD_MTDRAM is not set
@@ -472,9 +496,12 @@ CONFIG_MTD_CFI_I2=y
472# CONFIG_MTD_DOC2001 is not set 496# CONFIG_MTD_DOC2001 is not set
473# CONFIG_MTD_DOC2001PLUS is not set 497# CONFIG_MTD_DOC2001PLUS is not set
474CONFIG_MTD_NAND=y 498CONFIG_MTD_NAND=y
475# CONFIG_MTD_NAND_VERIFY_WRITE is not set 499CONFIG_MTD_NAND_ECC=y
476# CONFIG_MTD_NAND_ECC_SMC is not set 500# CONFIG_MTD_NAND_ECC_SMC is not set
501# CONFIG_MTD_NAND_VERIFY_WRITE is not set
502# CONFIG_MTD_SM_COMMON is not set
477# CONFIG_MTD_NAND_MUSEUM_IDS is not set 503# CONFIG_MTD_NAND_MUSEUM_IDS is not set
504CONFIG_MTD_NAND_DENALI_SCRATCH_REG_ADDR=0xFF108018
478# CONFIG_MTD_NAND_GPIO is not set 505# CONFIG_MTD_NAND_GPIO is not set
479CONFIG_MTD_NAND_IDS=y 506CONFIG_MTD_NAND_IDS=y
480CONFIG_MTD_NAND_S3C2410=y 507CONFIG_MTD_NAND_S3C2410=y
@@ -483,6 +510,7 @@ CONFIG_MTD_NAND_S3C2410=y
483# CONFIG_MTD_NAND_S3C2410_CLKSTOP is not set 510# CONFIG_MTD_NAND_S3C2410_CLKSTOP is not set
484# CONFIG_MTD_NAND_DISKONCHIP is not set 511# CONFIG_MTD_NAND_DISKONCHIP is not set
485# CONFIG_MTD_NAND_PLATFORM is not set 512# CONFIG_MTD_NAND_PLATFORM is not set
513# CONFIG_MTD_ALAUDA is not set
486# CONFIG_MTD_ONENAND is not set 514# CONFIG_MTD_ONENAND is not set
487 515
488# 516#
@@ -503,6 +531,7 @@ CONFIG_BLK_DEV_LOOP=y
503# 531#
504# DRBD disabled because PROC_FS, INET or CONNECTOR not selected 532# DRBD disabled because PROC_FS, INET or CONNECTOR not selected
505# 533#
534# CONFIG_BLK_DEV_UB is not set
506CONFIG_BLK_DEV_RAM=y 535CONFIG_BLK_DEV_RAM=y
507CONFIG_BLK_DEV_RAM_COUNT=16 536CONFIG_BLK_DEV_RAM_COUNT=16
508CONFIG_BLK_DEV_RAM_SIZE=4096 537CONFIG_BLK_DEV_RAM_SIZE=4096
@@ -516,12 +545,14 @@ CONFIG_MISC_DEVICES=y
516# CONFIG_ISL29003 is not set 545# CONFIG_ISL29003 is not set
517# CONFIG_SENSORS_TSL2550 is not set 546# CONFIG_SENSORS_TSL2550 is not set
518# CONFIG_DS1682 is not set 547# CONFIG_DS1682 is not set
548# CONFIG_TI_DAC7512 is not set
519# CONFIG_C2PORT is not set 549# CONFIG_C2PORT is not set
520 550
521# 551#
522# EEPROM support 552# EEPROM support
523# 553#
524CONFIG_EEPROM_AT24=y 554CONFIG_EEPROM_AT24=y
555# CONFIG_EEPROM_AT25 is not set
525# CONFIG_EEPROM_LEGACY is not set 556# CONFIG_EEPROM_LEGACY is not set
526# CONFIG_EEPROM_MAX6875 is not set 557# CONFIG_EEPROM_MAX6875 is not set
527# CONFIG_EEPROM_93CX6 is not set 558# CONFIG_EEPROM_93CX6 is not set
@@ -569,6 +600,7 @@ CONFIG_KEYBOARD_ATKBD=y
569# CONFIG_QT2160 is not set 600# CONFIG_QT2160 is not set
570# CONFIG_KEYBOARD_LKKBD is not set 601# CONFIG_KEYBOARD_LKKBD is not set
571# CONFIG_KEYBOARD_GPIO is not set 602# CONFIG_KEYBOARD_GPIO is not set
603# CONFIG_KEYBOARD_TCA6416 is not set
572# CONFIG_KEYBOARD_MATRIX is not set 604# CONFIG_KEYBOARD_MATRIX is not set
573# CONFIG_KEYBOARD_MAX7359 is not set 605# CONFIG_KEYBOARD_MAX7359 is not set
574# CONFIG_KEYBOARD_NEWTON is not set 606# CONFIG_KEYBOARD_NEWTON is not set
@@ -635,9 +667,12 @@ CONFIG_SERIAL_SAMSUNG_UARTS=4
635# CONFIG_SERIAL_SAMSUNG_DEBUG is not set 667# CONFIG_SERIAL_SAMSUNG_DEBUG is not set
636CONFIG_SERIAL_SAMSUNG_CONSOLE=y 668CONFIG_SERIAL_SAMSUNG_CONSOLE=y
637CONFIG_SERIAL_S3C6400=y 669CONFIG_SERIAL_S3C6400=y
670# CONFIG_SERIAL_MAX3100 is not set
638CONFIG_SERIAL_CORE=y 671CONFIG_SERIAL_CORE=y
639CONFIG_SERIAL_CORE_CONSOLE=y 672CONFIG_SERIAL_CORE_CONSOLE=y
640# CONFIG_SERIAL_TIMBERDALE is not set 673# CONFIG_SERIAL_TIMBERDALE is not set
674# CONFIG_SERIAL_ALTERA_JTAGUART is not set
675# CONFIG_SERIAL_ALTERA_UART is not set
641CONFIG_UNIX98_PTYS=y 676CONFIG_UNIX98_PTYS=y
642# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set 677# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
643CONFIG_LEGACY_PTYS=y 678CONFIG_LEGACY_PTYS=y
@@ -673,6 +708,7 @@ CONFIG_I2C_S3C2410=y
673# 708#
674# CONFIG_I2C_PARPORT_LIGHT is not set 709# CONFIG_I2C_PARPORT_LIGHT is not set
675# CONFIG_I2C_TAOS_EVM is not set 710# CONFIG_I2C_TAOS_EVM is not set
711# CONFIG_I2C_TINY_USB is not set
676 712
677# 713#
678# Other I2C/SMBus bus drivers 714# Other I2C/SMBus bus drivers
@@ -682,7 +718,24 @@ CONFIG_I2C_S3C2410=y
682# CONFIG_I2C_DEBUG_CORE is not set 718# CONFIG_I2C_DEBUG_CORE is not set
683# CONFIG_I2C_DEBUG_ALGO is not set 719# CONFIG_I2C_DEBUG_ALGO is not set
684# CONFIG_I2C_DEBUG_BUS is not set 720# CONFIG_I2C_DEBUG_BUS is not set
685# CONFIG_SPI is not set 721CONFIG_SPI=y
722# CONFIG_SPI_DEBUG is not set
723CONFIG_SPI_MASTER=y
724
725#
726# SPI Master Controller Drivers
727#
728CONFIG_SPI_BITBANG=m
729CONFIG_SPI_GPIO=m
730CONFIG_SPI_S3C64XX=m
731# CONFIG_SPI_XILINX is not set
732# CONFIG_SPI_DESIGNWARE is not set
733
734#
735# SPI Protocol Masters
736#
737# CONFIG_SPI_SPIDEV is not set
738# CONFIG_SPI_TLE62X0 is not set
686 739
687# 740#
688# PPS support 741# PPS support
@@ -714,6 +767,9 @@ CONFIG_GPIOLIB=y
714# 767#
715# SPI GPIO expanders: 768# SPI GPIO expanders:
716# 769#
770# CONFIG_GPIO_MAX7301 is not set
771# CONFIG_GPIO_MCP23S08 is not set
772# CONFIG_GPIO_MC33880 is not set
717 773
718# 774#
719# AC97 GPIO expanders: 775# AC97 GPIO expanders:
@@ -729,6 +785,7 @@ CONFIG_HWMON=y
729# 785#
730# CONFIG_SENSORS_AD7414 is not set 786# CONFIG_SENSORS_AD7414 is not set
731# CONFIG_SENSORS_AD7418 is not set 787# CONFIG_SENSORS_AD7418 is not set
788# CONFIG_SENSORS_ADCXX is not set
732# CONFIG_SENSORS_ADM1021 is not set 789# CONFIG_SENSORS_ADM1021 is not set
733# CONFIG_SENSORS_ADM1025 is not set 790# CONFIG_SENSORS_ADM1025 is not set
734# CONFIG_SENSORS_ADM1026 is not set 791# CONFIG_SENSORS_ADM1026 is not set
@@ -750,6 +807,7 @@ CONFIG_HWMON=y
750# CONFIG_SENSORS_GL520SM is not set 807# CONFIG_SENSORS_GL520SM is not set
751# CONFIG_SENSORS_IT87 is not set 808# CONFIG_SENSORS_IT87 is not set
752# CONFIG_SENSORS_LM63 is not set 809# CONFIG_SENSORS_LM63 is not set
810# CONFIG_SENSORS_LM70 is not set
753# CONFIG_SENSORS_LM73 is not set 811# CONFIG_SENSORS_LM73 is not set
754# CONFIG_SENSORS_LM75 is not set 812# CONFIG_SENSORS_LM75 is not set
755# CONFIG_SENSORS_LM77 is not set 813# CONFIG_SENSORS_LM77 is not set
@@ -764,6 +822,7 @@ CONFIG_HWMON=y
764# CONFIG_SENSORS_LTC4215 is not set 822# CONFIG_SENSORS_LTC4215 is not set
765# CONFIG_SENSORS_LTC4245 is not set 823# CONFIG_SENSORS_LTC4245 is not set
766# CONFIG_SENSORS_LM95241 is not set 824# CONFIG_SENSORS_LM95241 is not set
825# CONFIG_SENSORS_MAX1111 is not set
767# CONFIG_SENSORS_MAX1619 is not set 826# CONFIG_SENSORS_MAX1619 is not set
768# CONFIG_SENSORS_MAX6650 is not set 827# CONFIG_SENSORS_MAX6650 is not set
769# CONFIG_SENSORS_PC87360 is not set 828# CONFIG_SENSORS_PC87360 is not set
@@ -775,6 +834,7 @@ CONFIG_HWMON=y
775# CONFIG_SENSORS_SMSC47M192 is not set 834# CONFIG_SENSORS_SMSC47M192 is not set
776# CONFIG_SENSORS_SMSC47B397 is not set 835# CONFIG_SENSORS_SMSC47B397 is not set
777# CONFIG_SENSORS_ADS7828 is not set 836# CONFIG_SENSORS_ADS7828 is not set
837# CONFIG_SENSORS_ADS7871 is not set
778# CONFIG_SENSORS_AMC6821 is not set 838# CONFIG_SENSORS_AMC6821 is not set
779# CONFIG_SENSORS_THMC50 is not set 839# CONFIG_SENSORS_THMC50 is not set
780# CONFIG_SENSORS_TMP401 is not set 840# CONFIG_SENSORS_TMP401 is not set
@@ -788,9 +848,11 @@ CONFIG_HWMON=y
788# CONFIG_SENSORS_W83L786NG is not set 848# CONFIG_SENSORS_W83L786NG is not set
789# CONFIG_SENSORS_W83627HF is not set 849# CONFIG_SENSORS_W83627HF is not set
790# CONFIG_SENSORS_W83627EHF is not set 850# CONFIG_SENSORS_W83627EHF is not set
851# CONFIG_SENSORS_LIS3_SPI is not set
791# CONFIG_SENSORS_LIS3_I2C is not set 852# CONFIG_SENSORS_LIS3_I2C is not set
792# CONFIG_THERMAL is not set 853# CONFIG_THERMAL is not set
793# CONFIG_WATCHDOG is not set 854# CONFIG_WATCHDOG is not set
855CONFIG_HAVE_S3C2410_WATCHDOG=y
794CONFIG_SSB_POSSIBLE=y 856CONFIG_SSB_POSSIBLE=y
795 857
796# 858#
@@ -823,7 +885,10 @@ CONFIG_SSB_POSSIBLE=y
823# CONFIG_MFD_WM8350_I2C is not set 885# CONFIG_MFD_WM8350_I2C is not set
824# CONFIG_MFD_WM8994 is not set 886# CONFIG_MFD_WM8994 is not set
825# CONFIG_MFD_PCF50633 is not set 887# CONFIG_MFD_PCF50633 is not set
888# CONFIG_MFD_MC13783 is not set
826# CONFIG_AB3100_CORE is not set 889# CONFIG_AB3100_CORE is not set
890# CONFIG_EZX_PCAP is not set
891# CONFIG_AB4500_CORE is not set
827# CONFIG_REGULATOR is not set 892# CONFIG_REGULATOR is not set
828# CONFIG_MEDIA_SUPPORT is not set 893# CONFIG_MEDIA_SUPPORT is not set
829 894
@@ -832,8 +897,47 @@ CONFIG_SSB_POSSIBLE=y
832# 897#
833# CONFIG_VGASTATE is not set 898# CONFIG_VGASTATE is not set
834# CONFIG_VIDEO_OUTPUT_CONTROL is not set 899# CONFIG_VIDEO_OUTPUT_CONTROL is not set
835# CONFIG_FB is not set 900CONFIG_FB=y
836# CONFIG_BACKLIGHT_LCD_SUPPORT is not set 901# CONFIG_FIRMWARE_EDID is not set
902# CONFIG_FB_DDC is not set
903# CONFIG_FB_BOOT_VESA_SUPPORT is not set
904CONFIG_FB_CFB_FILLRECT=y
905CONFIG_FB_CFB_COPYAREA=y
906CONFIG_FB_CFB_IMAGEBLIT=y
907# CONFIG_FB_CFB_REV_PIXELS_IN_BYTE is not set
908# CONFIG_FB_SYS_FILLRECT is not set
909# CONFIG_FB_SYS_COPYAREA is not set
910# CONFIG_FB_SYS_IMAGEBLIT is not set
911# CONFIG_FB_FOREIGN_ENDIAN is not set
912# CONFIG_FB_SYS_FOPS is not set
913# CONFIG_FB_SVGALIB is not set
914# CONFIG_FB_MACMODES is not set
915# CONFIG_FB_BACKLIGHT is not set
916# CONFIG_FB_MODE_HELPERS is not set
917# CONFIG_FB_TILEBLITTING is not set
918
919#
920# Frame buffer hardware drivers
921#
922# CONFIG_FB_S1D13XXX is not set
923CONFIG_FB_S3C=y
924# CONFIG_FB_S3C_DEBUG_REGWRITE is not set
925# CONFIG_FB_VIRTUAL is not set
926# CONFIG_FB_METRONOME is not set
927# CONFIG_FB_MB862XX is not set
928# CONFIG_FB_BROADSHEET is not set
929CONFIG_BACKLIGHT_LCD_SUPPORT=y
930CONFIG_LCD_CLASS_DEVICE=y
931# CONFIG_LCD_L4F00242T03 is not set
932# CONFIG_LCD_LMS283GF05 is not set
933CONFIG_LCD_LTV350QV=y
934# CONFIG_LCD_ILI9320 is not set
935# CONFIG_LCD_TDO24M is not set
936# CONFIG_LCD_VGG2432A4 is not set
937# CONFIG_LCD_PLATFORM is not set
938CONFIG_BACKLIGHT_CLASS_DEVICE=y
939CONFIG_BACKLIGHT_GENERIC=y
940CONFIG_BACKLIGHT_PWM=y
837 941
838# 942#
839# Display device support 943# Display device support
@@ -845,6 +949,8 @@ CONFIG_SSB_POSSIBLE=y
845# 949#
846# CONFIG_VGA_CONSOLE is not set 950# CONFIG_VGA_CONSOLE is not set
847CONFIG_DUMMY_CONSOLE=y 951CONFIG_DUMMY_CONSOLE=y
952# CONFIG_FRAMEBUFFER_CONSOLE is not set
953# CONFIG_LOGO is not set
848CONFIG_SOUND=y 954CONFIG_SOUND=y
849CONFIG_SOUND_OSS_CORE=y 955CONFIG_SOUND_OSS_CORE=y
850CONFIG_SOUND_OSS_CORE_PRECLAIM=y 956CONFIG_SOUND_OSS_CORE_PRECLAIM=y
@@ -873,10 +979,16 @@ CONFIG_SND_DRIVERS=y
873# CONFIG_SND_SERIAL_U16550 is not set 979# CONFIG_SND_SERIAL_U16550 is not set
874# CONFIG_SND_MPU401 is not set 980# CONFIG_SND_MPU401 is not set
875CONFIG_SND_ARM=y 981CONFIG_SND_ARM=y
982CONFIG_SND_SPI=y
983CONFIG_SND_USB=y
984# CONFIG_SND_USB_AUDIO is not set
985# CONFIG_SND_USB_UA101 is not set
986# CONFIG_SND_USB_CAIAQ is not set
876CONFIG_SND_SOC=m 987CONFIG_SND_SOC=m
877CONFIG_SND_SOC_AC97_BUS=y 988CONFIG_SND_SOC_AC97_BUS=y
878CONFIG_SND_S3C24XX_SOC=m 989CONFIG_SND_S3C24XX_SOC=m
879CONFIG_SND_S3C_SOC_AC97=m 990CONFIG_SND_S3C_SOC_AC97=m
991# CONFIG_SND_S3C64XX_SOC_WM8580 is not set
880CONFIG_SND_SOC_SMDK_WM9713=m 992CONFIG_SND_SOC_SMDK_WM9713=m
881CONFIG_SND_SOC_I2C_AND_SPI=m 993CONFIG_SND_SOC_I2C_AND_SPI=m
882# CONFIG_SND_SOC_ALL_CODECS is not set 994# CONFIG_SND_SOC_ALL_CODECS is not set
@@ -886,29 +998,197 @@ CONFIG_AC97_BUS=m
886CONFIG_HID_SUPPORT=y 998CONFIG_HID_SUPPORT=y
887CONFIG_HID=y 999CONFIG_HID=y
888# CONFIG_HIDRAW is not set 1000# CONFIG_HIDRAW is not set
1001
1002#
1003# USB Input Devices
1004#
1005CONFIG_USB_HID=y
889# CONFIG_HID_PID is not set 1006# CONFIG_HID_PID is not set
1007# CONFIG_USB_HIDDEV is not set
890 1008
891# 1009#
892# Special HID drivers 1010# Special HID drivers
893# 1011#
1012# CONFIG_HID_3M_PCT is not set
1013CONFIG_HID_A4TECH=y
1014CONFIG_HID_APPLE=y
1015CONFIG_HID_BELKIN=y
1016# CONFIG_HID_CANDO is not set
1017CONFIG_HID_CHERRY=y
1018CONFIG_HID_CHICONY=y
1019# CONFIG_HID_PRODIKEYS is not set
1020CONFIG_HID_CYPRESS=y
1021# CONFIG_HID_DRAGONRISE is not set
1022# CONFIG_HID_EGALAX is not set
1023CONFIG_HID_EZKEY=y
1024CONFIG_HID_KYE=y
1025# CONFIG_HID_GYRATION is not set
1026# CONFIG_HID_TWINHAN is not set
1027CONFIG_HID_KENSINGTON=y
1028CONFIG_HID_LOGITECH=y
1029# CONFIG_LOGITECH_FF is not set
1030# CONFIG_LOGIRUMBLEPAD2_FF is not set
1031# CONFIG_LOGIG940_FF is not set
1032CONFIG_HID_MICROSOFT=y
1033# CONFIG_HID_MOSART is not set
1034CONFIG_HID_MONTEREY=y
1035# CONFIG_HID_NTRIG is not set
1036# CONFIG_HID_ORTEK is not set
1037# CONFIG_HID_PANTHERLORD is not set
1038# CONFIG_HID_PETALYNX is not set
1039# CONFIG_HID_PICOLCD is not set
1040# CONFIG_HID_QUANTA is not set
1041# CONFIG_HID_ROCCAT_KONE is not set
1042# CONFIG_HID_SAMSUNG is not set
1043# CONFIG_HID_SONY is not set
1044# CONFIG_HID_STANTUM is not set
1045# CONFIG_HID_SUNPLUS is not set
1046# CONFIG_HID_GREENASIA is not set
1047# CONFIG_HID_SMARTJOYPLUS is not set
1048# CONFIG_HID_TOPSEED is not set
1049# CONFIG_HID_THRUSTMASTER is not set
1050# CONFIG_HID_ZEROPLUS is not set
1051# CONFIG_HID_ZYDACRON is not set
894CONFIG_USB_SUPPORT=y 1052CONFIG_USB_SUPPORT=y
895CONFIG_USB_ARCH_HAS_HCD=y 1053CONFIG_USB_ARCH_HAS_HCD=y
896CONFIG_USB_ARCH_HAS_OHCI=y 1054CONFIG_USB_ARCH_HAS_OHCI=y
897# CONFIG_USB_ARCH_HAS_EHCI is not set 1055# CONFIG_USB_ARCH_HAS_EHCI is not set
898# CONFIG_USB is not set 1056CONFIG_USB=y
1057# CONFIG_USB_DEBUG is not set
1058CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
899 1059
900# 1060#
901# Enable Host or Gadget support to see Inventra options 1061# Miscellaneous USB options
902# 1062#
1063CONFIG_USB_DEVICEFS=y
1064CONFIG_USB_DEVICE_CLASS=y
1065# CONFIG_USB_DYNAMIC_MINORS is not set
1066# CONFIG_USB_MON is not set
1067# CONFIG_USB_WUSB is not set
1068# CONFIG_USB_WUSB_CBAF is not set
1069
1070#
1071# USB Host Controller Drivers
1072#
1073# CONFIG_USB_C67X00_HCD is not set
1074# CONFIG_USB_OXU210HP_HCD is not set
1075# CONFIG_USB_ISP116X_HCD is not set
1076# CONFIG_USB_ISP1760_HCD is not set
1077# CONFIG_USB_ISP1362_HCD is not set
1078CONFIG_USB_OHCI_HCD=y
1079# CONFIG_USB_OHCI_BIG_ENDIAN_DESC is not set
1080# CONFIG_USB_OHCI_BIG_ENDIAN_MMIO is not set
1081CONFIG_USB_OHCI_LITTLE_ENDIAN=y
1082# CONFIG_USB_SL811_HCD is not set
1083# CONFIG_USB_R8A66597_HCD is not set
1084# CONFIG_USB_HWA_HCD is not set
1085# CONFIG_USB_MUSB_HDRC is not set
1086
1087#
1088# USB Device Class drivers
1089#
1090CONFIG_USB_ACM=m
1091CONFIG_USB_PRINTER=m
1092# CONFIG_USB_WDM is not set
1093# CONFIG_USB_TMC is not set
903 1094
904# 1095#
905# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may 1096# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may
906# 1097#
1098
1099#
1100# also be needed; see USB_STORAGE Help for more info
1101#
1102# CONFIG_USB_LIBUSUAL is not set
1103
1104#
1105# USB Imaging devices
1106#
1107# CONFIG_USB_MDC800 is not set
1108
1109#
1110# USB port drivers
1111#
1112CONFIG_USB_SERIAL=m
1113# CONFIG_USB_EZUSB is not set
1114CONFIG_USB_SERIAL_GENERIC=y
1115# CONFIG_USB_SERIAL_AIRCABLE is not set
1116# CONFIG_USB_SERIAL_ARK3116 is not set
1117# CONFIG_USB_SERIAL_BELKIN is not set
1118# CONFIG_USB_SERIAL_CH341 is not set
1119# CONFIG_USB_SERIAL_WHITEHEAT is not set
1120# CONFIG_USB_SERIAL_DIGI_ACCELEPORT is not set
1121# CONFIG_USB_SERIAL_CP210X is not set
1122# CONFIG_USB_SERIAL_CYPRESS_M8 is not set
1123CONFIG_USB_SERIAL_EMPEG=m
1124CONFIG_USB_SERIAL_FTDI_SIO=m
1125# CONFIG_USB_SERIAL_FUNSOFT is not set
1126# CONFIG_USB_SERIAL_VISOR is not set
1127# CONFIG_USB_SERIAL_IPAQ is not set
1128# CONFIG_USB_SERIAL_IR is not set
1129# CONFIG_USB_SERIAL_EDGEPORT is not set
1130# CONFIG_USB_SERIAL_EDGEPORT_TI is not set
1131# CONFIG_USB_SERIAL_GARMIN is not set
1132# CONFIG_USB_SERIAL_IPW is not set
1133# CONFIG_USB_SERIAL_IUU is not set
1134# CONFIG_USB_SERIAL_KEYSPAN_PDA is not set
1135# CONFIG_USB_SERIAL_KEYSPAN is not set
1136# CONFIG_USB_SERIAL_KLSI is not set
1137# CONFIG_USB_SERIAL_KOBIL_SCT is not set
1138# CONFIG_USB_SERIAL_MCT_U232 is not set
1139# CONFIG_USB_SERIAL_MOS7720 is not set
1140# CONFIG_USB_SERIAL_MOS7840 is not set
1141# CONFIG_USB_SERIAL_MOTOROLA is not set
1142# CONFIG_USB_SERIAL_NAVMAN is not set
1143CONFIG_USB_SERIAL_PL2303=m
1144# CONFIG_USB_SERIAL_OTI6858 is not set
1145# CONFIG_USB_SERIAL_QCAUX is not set
1146# CONFIG_USB_SERIAL_QUALCOMM is not set
1147# CONFIG_USB_SERIAL_SPCP8X5 is not set
1148# CONFIG_USB_SERIAL_HP4X is not set
1149# CONFIG_USB_SERIAL_SAFE is not set
1150# CONFIG_USB_SERIAL_SIEMENS_MPI is not set
1151# CONFIG_USB_SERIAL_SIERRAWIRELESS is not set
1152# CONFIG_USB_SERIAL_SYMBOL is not set
1153# CONFIG_USB_SERIAL_TI is not set
1154# CONFIG_USB_SERIAL_CYBERJACK is not set
1155# CONFIG_USB_SERIAL_XIRCOM is not set
1156# CONFIG_USB_SERIAL_OPTION is not set
1157# CONFIG_USB_SERIAL_OMNINET is not set
1158# CONFIG_USB_SERIAL_OPTICON is not set
1159# CONFIG_USB_SERIAL_VIVOPAY_SERIAL is not set
1160# CONFIG_USB_SERIAL_ZIO is not set
1161# CONFIG_USB_SERIAL_DEBUG is not set
1162
1163#
1164# USB Miscellaneous drivers
1165#
1166# CONFIG_USB_EMI62 is not set
1167# CONFIG_USB_EMI26 is not set
1168# CONFIG_USB_ADUTUX is not set
1169# CONFIG_USB_SEVSEG is not set
1170# CONFIG_USB_RIO500 is not set
1171# CONFIG_USB_LEGOTOWER is not set
1172# CONFIG_USB_LCD is not set
1173# CONFIG_USB_LED is not set
1174# CONFIG_USB_CYPRESS_CY7C63 is not set
1175# CONFIG_USB_CYTHERM is not set
1176# CONFIG_USB_IDMOUSE is not set
1177# CONFIG_USB_FTDI_ELAN is not set
1178# CONFIG_USB_APPLEDISPLAY is not set
1179# CONFIG_USB_LD is not set
1180# CONFIG_USB_TRANCEVIBRATOR is not set
1181# CONFIG_USB_IOWARRIOR is not set
1182# CONFIG_USB_TEST is not set
1183# CONFIG_USB_ISIGHTFW is not set
907# CONFIG_USB_GADGET is not set 1184# CONFIG_USB_GADGET is not set
908 1185
909# 1186#
910# OTG and related infrastructure 1187# OTG and related infrastructure
911# 1188#
1189# CONFIG_USB_GPIO_VBUS is not set
1190# CONFIG_USB_ULPI is not set
1191# CONFIG_NOP_USB_XCEIV is not set
912CONFIG_MMC=y 1192CONFIG_MMC=y
913CONFIG_MMC_DEBUG=y 1193CONFIG_MMC_DEBUG=y
914CONFIG_MMC_UNSAFE_RESUME=y 1194CONFIG_MMC_UNSAFE_RESUME=y
@@ -928,18 +1208,80 @@ CONFIG_MMC_SDHCI=y
928# CONFIG_MMC_SDHCI_PLTFM is not set 1208# CONFIG_MMC_SDHCI_PLTFM is not set
929CONFIG_MMC_SDHCI_S3C=y 1209CONFIG_MMC_SDHCI_S3C=y
930# CONFIG_MMC_SDHCI_S3C_DMA is not set 1210# CONFIG_MMC_SDHCI_S3C_DMA is not set
1211# CONFIG_MMC_SPI is not set
931# CONFIG_MEMSTICK is not set 1212# CONFIG_MEMSTICK is not set
932# CONFIG_NEW_LEDS is not set 1213# CONFIG_NEW_LEDS is not set
933# CONFIG_ACCESSIBILITY is not set 1214# CONFIG_ACCESSIBILITY is not set
934CONFIG_RTC_LIB=y 1215CONFIG_RTC_LIB=y
935# CONFIG_RTC_CLASS is not set 1216CONFIG_RTC_CLASS=y
1217CONFIG_RTC_HCTOSYS=y
1218CONFIG_RTC_HCTOSYS_DEVICE="rtc0"
1219# CONFIG_RTC_DEBUG is not set
1220
1221#
1222# RTC interfaces
1223#
1224CONFIG_RTC_INTF_SYSFS=y
1225CONFIG_RTC_INTF_PROC=y
1226CONFIG_RTC_INTF_DEV=y
1227# CONFIG_RTC_INTF_DEV_UIE_EMUL is not set
1228# CONFIG_RTC_DRV_TEST is not set
1229
1230#
1231# I2C RTC drivers
1232#
1233# CONFIG_RTC_DRV_DS1307 is not set
1234# CONFIG_RTC_DRV_DS1374 is not set
1235# CONFIG_RTC_DRV_DS1672 is not set
1236# CONFIG_RTC_DRV_MAX6900 is not set
1237# CONFIG_RTC_DRV_RS5C372 is not set
1238# CONFIG_RTC_DRV_ISL1208 is not set
1239# CONFIG_RTC_DRV_X1205 is not set
1240# CONFIG_RTC_DRV_PCF8563 is not set
1241# CONFIG_RTC_DRV_PCF8583 is not set
1242# CONFIG_RTC_DRV_M41T80 is not set
1243# CONFIG_RTC_DRV_BQ32K is not set
1244# CONFIG_RTC_DRV_S35390A is not set
1245# CONFIG_RTC_DRV_FM3130 is not set
1246# CONFIG_RTC_DRV_RX8581 is not set
1247# CONFIG_RTC_DRV_RX8025 is not set
1248
1249#
1250# SPI RTC drivers
1251#
1252# CONFIG_RTC_DRV_M41T94 is not set
1253# CONFIG_RTC_DRV_DS1305 is not set
1254# CONFIG_RTC_DRV_DS1390 is not set
1255# CONFIG_RTC_DRV_MAX6902 is not set
1256# CONFIG_RTC_DRV_R9701 is not set
1257# CONFIG_RTC_DRV_RS5C348 is not set
1258# CONFIG_RTC_DRV_DS3234 is not set
1259# CONFIG_RTC_DRV_PCF2123 is not set
1260
1261#
1262# Platform RTC drivers
1263#
1264# CONFIG_RTC_DRV_CMOS is not set
1265# CONFIG_RTC_DRV_DS1286 is not set
1266# CONFIG_RTC_DRV_DS1511 is not set
1267# CONFIG_RTC_DRV_DS1553 is not set
1268# CONFIG_RTC_DRV_DS1742 is not set
1269# CONFIG_RTC_DRV_STK17TA8 is not set
1270# CONFIG_RTC_DRV_M48T86 is not set
1271# CONFIG_RTC_DRV_M48T35 is not set
1272# CONFIG_RTC_DRV_M48T59 is not set
1273# CONFIG_RTC_DRV_MSM6242 is not set
1274# CONFIG_RTC_DRV_BQ4802 is not set
1275# CONFIG_RTC_DRV_RP5C01 is not set
1276# CONFIG_RTC_DRV_V3020 is not set
1277
1278#
1279# on-CPU RTC drivers
1280#
1281CONFIG_RTC_DRV_S3C=y
936# CONFIG_DMADEVICES is not set 1282# CONFIG_DMADEVICES is not set
937# CONFIG_AUXDISPLAY is not set 1283# CONFIG_AUXDISPLAY is not set
938# CONFIG_UIO is not set 1284# CONFIG_UIO is not set
939
940#
941# TI VLYNQ
942#
943# CONFIG_STAGING is not set 1285# CONFIG_STAGING is not set
944 1286
945# 1287#
@@ -1033,7 +1375,46 @@ CONFIG_ROMFS_ON_BLOCK=y
1033# 1375#
1034# CONFIG_PARTITION_ADVANCED is not set 1376# CONFIG_PARTITION_ADVANCED is not set
1035CONFIG_MSDOS_PARTITION=y 1377CONFIG_MSDOS_PARTITION=y
1036# CONFIG_NLS is not set 1378CONFIG_NLS=y
1379CONFIG_NLS_DEFAULT="iso8859-1"
1380# CONFIG_NLS_CODEPAGE_437 is not set
1381# CONFIG_NLS_CODEPAGE_737 is not set
1382# CONFIG_NLS_CODEPAGE_775 is not set
1383# CONFIG_NLS_CODEPAGE_850 is not set
1384# CONFIG_NLS_CODEPAGE_852 is not set
1385# CONFIG_NLS_CODEPAGE_855 is not set
1386# CONFIG_NLS_CODEPAGE_857 is not set
1387# CONFIG_NLS_CODEPAGE_860 is not set
1388# CONFIG_NLS_CODEPAGE_861 is not set
1389# CONFIG_NLS_CODEPAGE_862 is not set
1390# CONFIG_NLS_CODEPAGE_863 is not set
1391# CONFIG_NLS_CODEPAGE_864 is not set
1392# CONFIG_NLS_CODEPAGE_865 is not set
1393# CONFIG_NLS_CODEPAGE_866 is not set
1394# CONFIG_NLS_CODEPAGE_869 is not set
1395# CONFIG_NLS_CODEPAGE_936 is not set
1396# CONFIG_NLS_CODEPAGE_950 is not set
1397# CONFIG_NLS_CODEPAGE_932 is not set
1398# CONFIG_NLS_CODEPAGE_949 is not set
1399# CONFIG_NLS_CODEPAGE_874 is not set
1400# CONFIG_NLS_ISO8859_8 is not set
1401# CONFIG_NLS_CODEPAGE_1250 is not set
1402# CONFIG_NLS_CODEPAGE_1251 is not set
1403# CONFIG_NLS_ASCII is not set
1404# CONFIG_NLS_ISO8859_1 is not set
1405# CONFIG_NLS_ISO8859_2 is not set
1406# CONFIG_NLS_ISO8859_3 is not set
1407# CONFIG_NLS_ISO8859_4 is not set
1408# CONFIG_NLS_ISO8859_5 is not set
1409# CONFIG_NLS_ISO8859_6 is not set
1410# CONFIG_NLS_ISO8859_7 is not set
1411# CONFIG_NLS_ISO8859_9 is not set
1412# CONFIG_NLS_ISO8859_13 is not set
1413# CONFIG_NLS_ISO8859_14 is not set
1414# CONFIG_NLS_ISO8859_15 is not set
1415# CONFIG_NLS_KOI8_R is not set
1416# CONFIG_NLS_KOI8_U is not set
1417# CONFIG_NLS_UTF8 is not set
1037 1418
1038# 1419#
1039# Kernel hacking 1420# Kernel hacking
@@ -1096,6 +1477,7 @@ CONFIG_HAVE_FUNCTION_TRACER=y
1096CONFIG_TRACING_SUPPORT=y 1477CONFIG_TRACING_SUPPORT=y
1097CONFIG_FTRACE=y 1478CONFIG_FTRACE=y
1098# CONFIG_FUNCTION_TRACER is not set 1479# CONFIG_FUNCTION_TRACER is not set
1480# CONFIG_IRQSOFF_TRACER is not set
1099# CONFIG_SCHED_TRACER is not set 1481# CONFIG_SCHED_TRACER is not set
1100# CONFIG_ENABLE_DEFAULT_TRACERS is not set 1482# CONFIG_ENABLE_DEFAULT_TRACERS is not set
1101# CONFIG_BOOT_TRACER is not set 1483# CONFIG_BOOT_TRACER is not set
@@ -1106,6 +1488,7 @@ CONFIG_BRANCH_PROFILE_NONE=y
1106# CONFIG_KMEMTRACE is not set 1488# CONFIG_KMEMTRACE is not set
1107# CONFIG_WORKQUEUE_TRACER is not set 1489# CONFIG_WORKQUEUE_TRACER is not set
1108# CONFIG_BLK_DEV_IO_TRACE is not set 1490# CONFIG_BLK_DEV_IO_TRACE is not set
1491# CONFIG_ATOMIC64_SELFTEST is not set
1109# CONFIG_SAMPLES is not set 1492# CONFIG_SAMPLES is not set
1110CONFIG_HAVE_ARCH_KGDB=y 1493CONFIG_HAVE_ARCH_KGDB=y
1111# CONFIG_KGDB is not set 1494# CONFIG_KGDB is not set
diff --git a/arch/arm/configs/s5p6440_defconfig b/arch/arm/configs/s5p6440_defconfig
index 619bfab3ab39..532e987beb4d 100644
--- a/arch/arm/configs/s5p6440_defconfig
+++ b/arch/arm/configs/s5p6440_defconfig
@@ -1,11 +1,13 @@
1# 1#
2# Automatically generated make config: don't edit 2# Automatically generated make config: don't edit
3# Linux kernel version: 2.6.34 3# Linux kernel version: 2.6.34
4# Sat May 22 03:18:18 2010 4# Wed May 26 19:04:32 2010
5# 5#
6CONFIG_ARM=y 6CONFIG_ARM=y
7CONFIG_SYS_SUPPORTS_APM_EMULATION=y 7CONFIG_SYS_SUPPORTS_APM_EMULATION=y
8CONFIG_GENERIC_GPIO=y 8CONFIG_GENERIC_GPIO=y
9CONFIG_GENERIC_TIME=y
10CONFIG_ARCH_USES_GETTIMEOFFSET=y
9CONFIG_HAVE_PROC_CPU=y 11CONFIG_HAVE_PROC_CPU=y
10CONFIG_NO_IOPORT=y 12CONFIG_NO_IOPORT=y
11CONFIG_GENERIC_HARDIRQS=y 13CONFIG_GENERIC_HARDIRQS=y
@@ -33,6 +35,7 @@ CONFIG_INIT_ENV_ARG_LIMIT=32
33CONFIG_LOCALVERSION="" 35CONFIG_LOCALVERSION=""
34CONFIG_LOCALVERSION_AUTO=y 36CONFIG_LOCALVERSION_AUTO=y
35CONFIG_HAVE_KERNEL_GZIP=y 37CONFIG_HAVE_KERNEL_GZIP=y
38CONFIG_HAVE_KERNEL_LZMA=y
36CONFIG_HAVE_KERNEL_LZO=y 39CONFIG_HAVE_KERNEL_LZO=y
37CONFIG_KERNEL_GZIP=y 40CONFIG_KERNEL_GZIP=y
38# CONFIG_KERNEL_BZIP2 is not set 41# CONFIG_KERNEL_BZIP2 is not set
@@ -178,9 +181,11 @@ CONFIG_MMU=y
178# CONFIG_ARCH_INTEGRATOR is not set 181# CONFIG_ARCH_INTEGRATOR is not set
179# CONFIG_ARCH_REALVIEW is not set 182# CONFIG_ARCH_REALVIEW is not set
180# CONFIG_ARCH_VERSATILE is not set 183# CONFIG_ARCH_VERSATILE is not set
184# CONFIG_ARCH_VEXPRESS is not set
181# CONFIG_ARCH_AT91 is not set 185# CONFIG_ARCH_AT91 is not set
182# CONFIG_ARCH_BCMRING is not set 186# CONFIG_ARCH_BCMRING is not set
183# CONFIG_ARCH_CLPS711X is not set 187# CONFIG_ARCH_CLPS711X is not set
188# CONFIG_ARCH_CNS3XXX is not set
184# CONFIG_ARCH_GEMINI is not set 189# CONFIG_ARCH_GEMINI is not set
185# CONFIG_ARCH_EBSA110 is not set 190# CONFIG_ARCH_EBSA110 is not set
186# CONFIG_ARCH_EP93XX is not set 191# CONFIG_ARCH_EP93XX is not set
@@ -216,7 +221,7 @@ CONFIG_MMU=y
216# CONFIG_ARCH_S3C64XX is not set 221# CONFIG_ARCH_S3C64XX is not set
217CONFIG_ARCH_S5P6440=y 222CONFIG_ARCH_S5P6440=y
218# CONFIG_ARCH_S5P6442 is not set 223# CONFIG_ARCH_S5P6442 is not set
219# CONFIG_ARCH_S5PC1XX is not set 224# CONFIG_ARCH_S5PC100 is not set
220# CONFIG_ARCH_S5PV210 is not set 225# CONFIG_ARCH_S5PV210 is not set
221# CONFIG_ARCH_SHARK is not set 226# CONFIG_ARCH_SHARK is not set
222# CONFIG_ARCH_LH7A40X is not set 227# CONFIG_ARCH_LH7A40X is not set
@@ -225,6 +230,7 @@ CONFIG_ARCH_S5P6440=y
225# CONFIG_ARCH_NOMADIK is not set 230# CONFIG_ARCH_NOMADIK is not set
226# CONFIG_ARCH_DAVINCI is not set 231# CONFIG_ARCH_DAVINCI is not set
227# CONFIG_ARCH_OMAP is not set 232# CONFIG_ARCH_OMAP is not set
233# CONFIG_PLAT_SPEAR is not set
228CONFIG_PLAT_SAMSUNG=y 234CONFIG_PLAT_SAMSUNG=y
229 235
230# 236#
@@ -240,10 +246,15 @@ CONFIG_SAMSUNG_GPIOLIB_4BIT=y
240CONFIG_S3C_GPIO_CFG_S3C24XX=y 246CONFIG_S3C_GPIO_CFG_S3C24XX=y
241CONFIG_S3C_GPIO_CFG_S3C64XX=y 247CONFIG_S3C_GPIO_CFG_S3C64XX=y
242CONFIG_S3C_GPIO_PULL_UPDOWN=y 248CONFIG_S3C_GPIO_PULL_UPDOWN=y
249CONFIG_S5P_GPIO_DRVSTR=y
243CONFIG_SAMSUNG_GPIO_EXTRA=0 250CONFIG_SAMSUNG_GPIO_EXTRA=0
244CONFIG_S3C_GPIO_SPACE=0 251CONFIG_S3C_GPIO_SPACE=0
245CONFIG_S3C_GPIO_TRACK=y 252CONFIG_S3C_GPIO_TRACK=y
246# CONFIG_S3C_ADC is not set 253# CONFIG_S3C_ADC is not set
254CONFIG_S3C_DEV_WDT=y
255CONFIG_SAMSUNG_DEV_ADC=y
256CONFIG_SAMSUNG_DEV_TS=y
257CONFIG_S3C_PL330_DMA=y
247 258
248# 259#
249# Power management 260# Power management
@@ -276,10 +287,12 @@ CONFIG_ARM_THUMB=y
276# CONFIG_CPU_DCACHE_DISABLE is not set 287# CONFIG_CPU_DCACHE_DISABLE is not set
277# CONFIG_CPU_BPREDICT_DISABLE is not set 288# CONFIG_CPU_BPREDICT_DISABLE is not set
278CONFIG_ARM_L1_CACHE_SHIFT=5 289CONFIG_ARM_L1_CACHE_SHIFT=5
290CONFIG_ARM_DMA_MEM_BUFFERABLE=y
279CONFIG_CPU_HAS_PMU=y 291CONFIG_CPU_HAS_PMU=y
280# CONFIG_ARM_ERRATA_411920 is not set 292# CONFIG_ARM_ERRATA_411920 is not set
281CONFIG_ARM_VIC=y 293CONFIG_ARM_VIC=y
282CONFIG_ARM_VIC_NR=2 294CONFIG_ARM_VIC_NR=2
295CONFIG_PL330=y
283 296
284# 297#
285# Bus support 298# Bus support
@@ -326,6 +339,7 @@ CONFIG_ALIGNMENT_TRAP=y
326CONFIG_ZBOOT_ROM_TEXT=0 339CONFIG_ZBOOT_ROM_TEXT=0
327CONFIG_ZBOOT_ROM_BSS=0 340CONFIG_ZBOOT_ROM_BSS=0
328CONFIG_CMDLINE="root=/dev/ram0 rw ramdisk=8192 initrd=0x20800000,8M console=ttySAC1,115200 init=/linuxrc" 341CONFIG_CMDLINE="root=/dev/ram0 rw ramdisk=8192 initrd=0x20800000,8M console=ttySAC1,115200 init=/linuxrc"
342# CONFIG_CMDLINE_FORCE is not set
329# CONFIG_XIP_KERNEL is not set 343# CONFIG_XIP_KERNEL is not set
330# CONFIG_KEXEC is not set 344# CONFIG_KEXEC is not set
331 345
@@ -490,7 +504,9 @@ CONFIG_MOUSE_PS2_TRACKPOINT=y
490CONFIG_INPUT_TOUCHSCREEN=y 504CONFIG_INPUT_TOUCHSCREEN=y
491# CONFIG_TOUCHSCREEN_AD7879 is not set 505# CONFIG_TOUCHSCREEN_AD7879 is not set
492# CONFIG_TOUCHSCREEN_DYNAPRO is not set 506# CONFIG_TOUCHSCREEN_DYNAPRO is not set
507# CONFIG_TOUCHSCREEN_HAMPSHIRE is not set
493# CONFIG_TOUCHSCREEN_FUJITSU is not set 508# CONFIG_TOUCHSCREEN_FUJITSU is not set
509# CONFIG_TOUCHSCREEN_S3C2410 is not set
494# CONFIG_TOUCHSCREEN_GUNZE is not set 510# CONFIG_TOUCHSCREEN_GUNZE is not set
495# CONFIG_TOUCHSCREEN_ELO is not set 511# CONFIG_TOUCHSCREEN_ELO is not set
496# CONFIG_TOUCHSCREEN_WACOM_W8001 is not set 512# CONFIG_TOUCHSCREEN_WACOM_W8001 is not set
@@ -546,6 +562,8 @@ CONFIG_SERIAL_S3C6400=y
546CONFIG_SERIAL_CORE=y 562CONFIG_SERIAL_CORE=y
547CONFIG_SERIAL_CORE_CONSOLE=y 563CONFIG_SERIAL_CORE_CONSOLE=y
548# CONFIG_SERIAL_TIMBERDALE is not set 564# CONFIG_SERIAL_TIMBERDALE is not set
565# CONFIG_SERIAL_ALTERA_JTAGUART is not set
566# CONFIG_SERIAL_ALTERA_UART is not set
549CONFIG_UNIX98_PTYS=y 567CONFIG_UNIX98_PTYS=y
550# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set 568# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
551CONFIG_LEGACY_PTYS=y 569CONFIG_LEGACY_PTYS=y
@@ -593,6 +611,7 @@ CONFIG_GPIOLIB=y
593# CONFIG_HWMON is not set 611# CONFIG_HWMON is not set
594# CONFIG_THERMAL is not set 612# CONFIG_THERMAL is not set
595# CONFIG_WATCHDOG is not set 613# CONFIG_WATCHDOG is not set
614CONFIG_HAVE_S3C2410_WATCHDOG=y
596CONFIG_SSB_POSSIBLE=y 615CONFIG_SSB_POSSIBLE=y
597 616
598# 617#
@@ -649,10 +668,6 @@ CONFIG_RTC_LIB=y
649# CONFIG_DMADEVICES is not set 668# CONFIG_DMADEVICES is not set
650# CONFIG_AUXDISPLAY is not set 669# CONFIG_AUXDISPLAY is not set
651# CONFIG_UIO is not set 670# CONFIG_UIO is not set
652
653#
654# TI VLYNQ
655#
656# CONFIG_STAGING is not set 671# CONFIG_STAGING is not set
657 672
658# 673#
@@ -850,6 +865,7 @@ CONFIG_HAVE_FUNCTION_TRACER=y
850CONFIG_TRACING_SUPPORT=y 865CONFIG_TRACING_SUPPORT=y
851CONFIG_FTRACE=y 866CONFIG_FTRACE=y
852# CONFIG_FUNCTION_TRACER is not set 867# CONFIG_FUNCTION_TRACER is not set
868# CONFIG_IRQSOFF_TRACER is not set
853# CONFIG_SCHED_TRACER is not set 869# CONFIG_SCHED_TRACER is not set
854# CONFIG_ENABLE_DEFAULT_TRACERS is not set 870# CONFIG_ENABLE_DEFAULT_TRACERS is not set
855# CONFIG_BOOT_TRACER is not set 871# CONFIG_BOOT_TRACER is not set
@@ -860,6 +876,7 @@ CONFIG_BRANCH_PROFILE_NONE=y
860# CONFIG_KMEMTRACE is not set 876# CONFIG_KMEMTRACE is not set
861# CONFIG_WORKQUEUE_TRACER is not set 877# CONFIG_WORKQUEUE_TRACER is not set
862# CONFIG_BLK_DEV_IO_TRACE is not set 878# CONFIG_BLK_DEV_IO_TRACE is not set
879# CONFIG_ATOMIC64_SELFTEST is not set
863# CONFIG_SAMPLES is not set 880# CONFIG_SAMPLES is not set
864CONFIG_HAVE_ARCH_KGDB=y 881CONFIG_HAVE_ARCH_KGDB=y
865# CONFIG_KGDB is not set 882# CONFIG_KGDB is not set
diff --git a/arch/arm/configs/s5p6442_defconfig b/arch/arm/configs/s5p6442_defconfig
index d7ea27509cf4..068219b360f5 100644
--- a/arch/arm/configs/s5p6442_defconfig
+++ b/arch/arm/configs/s5p6442_defconfig
@@ -1,11 +1,13 @@
1# 1#
2# Automatically generated make config: don't edit 2# Automatically generated make config: don't edit
3# Linux kernel version: 2.6.34 3# Linux kernel version: 2.6.34
4# Sat May 22 03:18:19 2010 4# Wed May 26 19:04:34 2010
5# 5#
6CONFIG_ARM=y 6CONFIG_ARM=y
7CONFIG_SYS_SUPPORTS_APM_EMULATION=y 7CONFIG_SYS_SUPPORTS_APM_EMULATION=y
8CONFIG_GENERIC_GPIO=y 8CONFIG_GENERIC_GPIO=y
9CONFIG_GENERIC_TIME=y
10CONFIG_ARCH_USES_GETTIMEOFFSET=y
9CONFIG_HAVE_PROC_CPU=y 11CONFIG_HAVE_PROC_CPU=y
10CONFIG_NO_IOPORT=y 12CONFIG_NO_IOPORT=y
11CONFIG_GENERIC_HARDIRQS=y 13CONFIG_GENERIC_HARDIRQS=y
@@ -33,6 +35,7 @@ CONFIG_INIT_ENV_ARG_LIMIT=32
33CONFIG_LOCALVERSION="" 35CONFIG_LOCALVERSION=""
34CONFIG_LOCALVERSION_AUTO=y 36CONFIG_LOCALVERSION_AUTO=y
35CONFIG_HAVE_KERNEL_GZIP=y 37CONFIG_HAVE_KERNEL_GZIP=y
38CONFIG_HAVE_KERNEL_LZMA=y
36CONFIG_HAVE_KERNEL_LZO=y 39CONFIG_HAVE_KERNEL_LZO=y
37CONFIG_KERNEL_GZIP=y 40CONFIG_KERNEL_GZIP=y
38# CONFIG_KERNEL_BZIP2 is not set 41# CONFIG_KERNEL_BZIP2 is not set
@@ -178,9 +181,11 @@ CONFIG_MMU=y
178# CONFIG_ARCH_INTEGRATOR is not set 181# CONFIG_ARCH_INTEGRATOR is not set
179# CONFIG_ARCH_REALVIEW is not set 182# CONFIG_ARCH_REALVIEW is not set
180# CONFIG_ARCH_VERSATILE is not set 183# CONFIG_ARCH_VERSATILE is not set
184# CONFIG_ARCH_VEXPRESS is not set
181# CONFIG_ARCH_AT91 is not set 185# CONFIG_ARCH_AT91 is not set
182# CONFIG_ARCH_BCMRING is not set 186# CONFIG_ARCH_BCMRING is not set
183# CONFIG_ARCH_CLPS711X is not set 187# CONFIG_ARCH_CLPS711X is not set
188# CONFIG_ARCH_CNS3XXX is not set
184# CONFIG_ARCH_GEMINI is not set 189# CONFIG_ARCH_GEMINI is not set
185# CONFIG_ARCH_EBSA110 is not set 190# CONFIG_ARCH_EBSA110 is not set
186# CONFIG_ARCH_EP93XX is not set 191# CONFIG_ARCH_EP93XX is not set
@@ -216,7 +221,7 @@ CONFIG_MMU=y
216# CONFIG_ARCH_S3C64XX is not set 221# CONFIG_ARCH_S3C64XX is not set
217# CONFIG_ARCH_S5P6440 is not set 222# CONFIG_ARCH_S5P6440 is not set
218CONFIG_ARCH_S5P6442=y 223CONFIG_ARCH_S5P6442=y
219# CONFIG_ARCH_S5PC1XX is not set 224# CONFIG_ARCH_S5PC100 is not set
220# CONFIG_ARCH_S5PV210 is not set 225# CONFIG_ARCH_S5PV210 is not set
221# CONFIG_ARCH_SHARK is not set 226# CONFIG_ARCH_SHARK is not set
222# CONFIG_ARCH_LH7A40X is not set 227# CONFIG_ARCH_LH7A40X is not set
@@ -225,6 +230,7 @@ CONFIG_ARCH_S5P6442=y
225# CONFIG_ARCH_NOMADIK is not set 230# CONFIG_ARCH_NOMADIK is not set
226# CONFIG_ARCH_DAVINCI is not set 231# CONFIG_ARCH_DAVINCI is not set
227# CONFIG_ARCH_OMAP is not set 232# CONFIG_ARCH_OMAP is not set
233# CONFIG_PLAT_SPEAR is not set
228CONFIG_PLAT_SAMSUNG=y 234CONFIG_PLAT_SAMSUNG=y
229 235
230# 236#
@@ -240,10 +246,12 @@ CONFIG_SAMSUNG_GPIOLIB_4BIT=y
240CONFIG_S3C_GPIO_CFG_S3C24XX=y 246CONFIG_S3C_GPIO_CFG_S3C24XX=y
241CONFIG_S3C_GPIO_CFG_S3C64XX=y 247CONFIG_S3C_GPIO_CFG_S3C64XX=y
242CONFIG_S3C_GPIO_PULL_UPDOWN=y 248CONFIG_S3C_GPIO_PULL_UPDOWN=y
249CONFIG_S5P_GPIO_DRVSTR=y
243CONFIG_SAMSUNG_GPIO_EXTRA=0 250CONFIG_SAMSUNG_GPIO_EXTRA=0
244CONFIG_S3C_GPIO_SPACE=0 251CONFIG_S3C_GPIO_SPACE=0
245CONFIG_S3C_GPIO_TRACK=y 252CONFIG_S3C_GPIO_TRACK=y
246# CONFIG_S3C_ADC is not set 253# CONFIG_S3C_ADC is not set
254CONFIG_S3C_PL330_DMA=y
247 255
248# 256#
249# Power management 257# Power management
@@ -276,10 +284,12 @@ CONFIG_ARM_THUMB=y
276# CONFIG_CPU_DCACHE_DISABLE is not set 284# CONFIG_CPU_DCACHE_DISABLE is not set
277# CONFIG_CPU_BPREDICT_DISABLE is not set 285# CONFIG_CPU_BPREDICT_DISABLE is not set
278CONFIG_ARM_L1_CACHE_SHIFT=5 286CONFIG_ARM_L1_CACHE_SHIFT=5
287CONFIG_ARM_DMA_MEM_BUFFERABLE=y
279CONFIG_CPU_HAS_PMU=y 288CONFIG_CPU_HAS_PMU=y
280# CONFIG_ARM_ERRATA_411920 is not set 289# CONFIG_ARM_ERRATA_411920 is not set
281CONFIG_ARM_VIC=y 290CONFIG_ARM_VIC=y
282CONFIG_ARM_VIC_NR=2 291CONFIG_ARM_VIC_NR=2
292CONFIG_PL330=y
283 293
284# 294#
285# Bus support 295# Bus support
@@ -326,6 +336,7 @@ CONFIG_ALIGNMENT_TRAP=y
326CONFIG_ZBOOT_ROM_TEXT=0 336CONFIG_ZBOOT_ROM_TEXT=0
327CONFIG_ZBOOT_ROM_BSS=0 337CONFIG_ZBOOT_ROM_BSS=0
328CONFIG_CMDLINE="root=/dev/ram0 rw ramdisk=8192 initrd=0x20800000,8M console=ttySAC1,115200 init=/linuxrc" 338CONFIG_CMDLINE="root=/dev/ram0 rw ramdisk=8192 initrd=0x20800000,8M console=ttySAC1,115200 init=/linuxrc"
339# CONFIG_CMDLINE_FORCE is not set
329# CONFIG_XIP_KERNEL is not set 340# CONFIG_XIP_KERNEL is not set
330# CONFIG_KEXEC is not set 341# CONFIG_KEXEC is not set
331 342
@@ -471,6 +482,7 @@ CONFIG_INPUT_EVDEV=y
471CONFIG_INPUT_TOUCHSCREEN=y 482CONFIG_INPUT_TOUCHSCREEN=y
472# CONFIG_TOUCHSCREEN_AD7879 is not set 483# CONFIG_TOUCHSCREEN_AD7879 is not set
473# CONFIG_TOUCHSCREEN_DYNAPRO is not set 484# CONFIG_TOUCHSCREEN_DYNAPRO is not set
485# CONFIG_TOUCHSCREEN_HAMPSHIRE is not set
474# CONFIG_TOUCHSCREEN_FUJITSU is not set 486# CONFIG_TOUCHSCREEN_FUJITSU is not set
475# CONFIG_TOUCHSCREEN_GUNZE is not set 487# CONFIG_TOUCHSCREEN_GUNZE is not set
476# CONFIG_TOUCHSCREEN_ELO is not set 488# CONFIG_TOUCHSCREEN_ELO is not set
@@ -525,6 +537,8 @@ CONFIG_SERIAL_S5PV210=y
525CONFIG_SERIAL_CORE=y 537CONFIG_SERIAL_CORE=y
526CONFIG_SERIAL_CORE_CONSOLE=y 538CONFIG_SERIAL_CORE_CONSOLE=y
527# CONFIG_SERIAL_TIMBERDALE is not set 539# CONFIG_SERIAL_TIMBERDALE is not set
540# CONFIG_SERIAL_ALTERA_JTAGUART is not set
541# CONFIG_SERIAL_ALTERA_UART is not set
528CONFIG_UNIX98_PTYS=y 542CONFIG_UNIX98_PTYS=y
529# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set 543# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
530CONFIG_LEGACY_PTYS=y 544CONFIG_LEGACY_PTYS=y
@@ -624,10 +638,6 @@ CONFIG_RTC_LIB=y
624# CONFIG_DMADEVICES is not set 638# CONFIG_DMADEVICES is not set
625# CONFIG_AUXDISPLAY is not set 639# CONFIG_AUXDISPLAY is not set
626# CONFIG_UIO is not set 640# CONFIG_UIO is not set
627
628#
629# TI VLYNQ
630#
631# CONFIG_STAGING is not set 641# CONFIG_STAGING is not set
632 642
633# 643#
@@ -836,6 +846,7 @@ CONFIG_HAVE_FUNCTION_TRACER=y
836CONFIG_TRACING_SUPPORT=y 846CONFIG_TRACING_SUPPORT=y
837CONFIG_FTRACE=y 847CONFIG_FTRACE=y
838# CONFIG_FUNCTION_TRACER is not set 848# CONFIG_FUNCTION_TRACER is not set
849# CONFIG_IRQSOFF_TRACER is not set
839# CONFIG_SCHED_TRACER is not set 850# CONFIG_SCHED_TRACER is not set
840# CONFIG_ENABLE_DEFAULT_TRACERS is not set 851# CONFIG_ENABLE_DEFAULT_TRACERS is not set
841# CONFIG_BOOT_TRACER is not set 852# CONFIG_BOOT_TRACER is not set
@@ -846,6 +857,7 @@ CONFIG_BRANCH_PROFILE_NONE=y
846# CONFIG_KMEMTRACE is not set 857# CONFIG_KMEMTRACE is not set
847# CONFIG_WORKQUEUE_TRACER is not set 858# CONFIG_WORKQUEUE_TRACER is not set
848# CONFIG_BLK_DEV_IO_TRACE is not set 859# CONFIG_BLK_DEV_IO_TRACE is not set
860# CONFIG_ATOMIC64_SELFTEST is not set
849# CONFIG_SAMPLES is not set 861# CONFIG_SAMPLES is not set
850CONFIG_HAVE_ARCH_KGDB=y 862CONFIG_HAVE_ARCH_KGDB=y
851# CONFIG_KGDB is not set 863# CONFIG_KGDB is not set
diff --git a/arch/arm/configs/s5pc100_defconfig b/arch/arm/configs/s5pc100_defconfig
index 2053be6c9af1..ebc6245b9fca 100644
--- a/arch/arm/configs/s5pc100_defconfig
+++ b/arch/arm/configs/s5pc100_defconfig
@@ -1,12 +1,14 @@
1# 1#
2# Automatically generated make config: don't edit 2# Automatically generated make config: don't edit
3# Linux kernel version: 2.6.30 3# Linux kernel version: 2.6.34
4# Wed Jul 1 15:53:07 2009 4# Wed May 26 19:04:35 2010
5# 5#
6CONFIG_ARM=y 6CONFIG_ARM=y
7CONFIG_SYS_SUPPORTS_APM_EMULATION=y 7CONFIG_SYS_SUPPORTS_APM_EMULATION=y
8CONFIG_GENERIC_GPIO=y 8CONFIG_GENERIC_GPIO=y
9CONFIG_MMU=y 9CONFIG_GENERIC_TIME=y
10CONFIG_ARCH_USES_GETTIMEOFFSET=y
11CONFIG_HAVE_PROC_CPU=y
10CONFIG_NO_IOPORT=y 12CONFIG_NO_IOPORT=y
11CONFIG_GENERIC_HARDIRQS=y 13CONFIG_GENERIC_HARDIRQS=y
12CONFIG_STACKTRACE_SUPPORT=y 14CONFIG_STACKTRACE_SUPPORT=y
@@ -18,7 +20,9 @@ CONFIG_GENERIC_IRQ_PROBE=y
18CONFIG_RWSEM_GENERIC_SPINLOCK=y 20CONFIG_RWSEM_GENERIC_SPINLOCK=y
19CONFIG_GENERIC_HWEIGHT=y 21CONFIG_GENERIC_HWEIGHT=y
20CONFIG_GENERIC_CALIBRATE_DELAY=y 22CONFIG_GENERIC_CALIBRATE_DELAY=y
23CONFIG_NEED_DMA_MAP_STATE=y
21CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y 24CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
25CONFIG_ARM_L1_CACHE_SHIFT_6=y
22CONFIG_VECTORS_BASE=0xffff0000 26CONFIG_VECTORS_BASE=0xffff0000
23CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config" 27CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
24CONFIG_CONSTRUCTORS=y 28CONFIG_CONSTRUCTORS=y
@@ -31,6 +35,13 @@ CONFIG_BROKEN_ON_SMP=y
31CONFIG_INIT_ENV_ARG_LIMIT=32 35CONFIG_INIT_ENV_ARG_LIMIT=32
32CONFIG_LOCALVERSION="" 36CONFIG_LOCALVERSION=""
33CONFIG_LOCALVERSION_AUTO=y 37CONFIG_LOCALVERSION_AUTO=y
38CONFIG_HAVE_KERNEL_GZIP=y
39CONFIG_HAVE_KERNEL_LZMA=y
40CONFIG_HAVE_KERNEL_LZO=y
41CONFIG_KERNEL_GZIP=y
42# CONFIG_KERNEL_BZIP2 is not set
43# CONFIG_KERNEL_LZMA is not set
44# CONFIG_KERNEL_LZO is not set
34CONFIG_SWAP=y 45CONFIG_SWAP=y
35# CONFIG_SYSVIPC is not set 46# CONFIG_SYSVIPC is not set
36# CONFIG_BSD_PROCESS_ACCT is not set 47# CONFIG_BSD_PROCESS_ACCT is not set
@@ -38,14 +49,15 @@ CONFIG_SWAP=y
38# 49#
39# RCU Subsystem 50# RCU Subsystem
40# 51#
41CONFIG_CLASSIC_RCU=y 52CONFIG_TREE_RCU=y
42# CONFIG_TREE_RCU is not set 53# CONFIG_TREE_PREEMPT_RCU is not set
43# CONFIG_PREEMPT_RCU is not set 54# CONFIG_TINY_RCU is not set
55# CONFIG_RCU_TRACE is not set
56CONFIG_RCU_FANOUT=32
57# CONFIG_RCU_FANOUT_EXACT is not set
44# CONFIG_TREE_RCU_TRACE is not set 58# CONFIG_TREE_RCU_TRACE is not set
45# CONFIG_PREEMPT_RCU_TRACE is not set
46# CONFIG_IKCONFIG is not set 59# CONFIG_IKCONFIG is not set
47CONFIG_LOG_BUF_SHIFT=17 60CONFIG_LOG_BUF_SHIFT=17
48# CONFIG_GROUP_SCHED is not set
49# CONFIG_CGROUPS is not set 61# CONFIG_CGROUPS is not set
50CONFIG_SYSFS_DEPRECATED=y 62CONFIG_SYSFS_DEPRECATED=y
51CONFIG_SYSFS_DEPRECATED_V2=y 63CONFIG_SYSFS_DEPRECATED_V2=y
@@ -59,6 +71,7 @@ CONFIG_INITRAMFS_SOURCE=""
59CONFIG_RD_GZIP=y 71CONFIG_RD_GZIP=y
60CONFIG_RD_BZIP2=y 72CONFIG_RD_BZIP2=y
61CONFIG_RD_LZMA=y 73CONFIG_RD_LZMA=y
74CONFIG_RD_LZO=y
62CONFIG_CC_OPTIMIZE_FOR_SIZE=y 75CONFIG_CC_OPTIMIZE_FOR_SIZE=y
63CONFIG_SYSCTL=y 76CONFIG_SYSCTL=y
64CONFIG_ANON_INODES=y 77CONFIG_ANON_INODES=y
@@ -80,19 +93,21 @@ CONFIG_TIMERFD=y
80CONFIG_EVENTFD=y 93CONFIG_EVENTFD=y
81CONFIG_SHMEM=y 94CONFIG_SHMEM=y
82CONFIG_AIO=y 95CONFIG_AIO=y
96CONFIG_HAVE_PERF_EVENTS=y
97CONFIG_PERF_USE_VMALLOC=y
83 98
84# 99#
85# Performance Counters 100# Kernel Performance Events And Counters
86# 101#
102# CONFIG_PERF_EVENTS is not set
103# CONFIG_PERF_COUNTERS is not set
87CONFIG_VM_EVENT_COUNTERS=y 104CONFIG_VM_EVENT_COUNTERS=y
88CONFIG_SLUB_DEBUG=y 105CONFIG_SLUB_DEBUG=y
89# CONFIG_STRIP_ASM_SYMS is not set
90CONFIG_COMPAT_BRK=y 106CONFIG_COMPAT_BRK=y
91# CONFIG_SLAB is not set 107# CONFIG_SLAB is not set
92CONFIG_SLUB=y 108CONFIG_SLUB=y
93# CONFIG_SLOB is not set 109# CONFIG_SLOB is not set
94# CONFIG_PROFILING is not set 110# CONFIG_PROFILING is not set
95# CONFIG_MARKERS is not set
96CONFIG_HAVE_OPROFILE=y 111CONFIG_HAVE_OPROFILE=y
97# CONFIG_KPROBES is not set 112# CONFIG_KPROBES is not set
98CONFIG_HAVE_KPROBES=y 113CONFIG_HAVE_KPROBES=y
@@ -122,25 +137,56 @@ CONFIG_LBDAF=y
122# IO Schedulers 137# IO Schedulers
123# 138#
124CONFIG_IOSCHED_NOOP=y 139CONFIG_IOSCHED_NOOP=y
125CONFIG_IOSCHED_AS=y
126CONFIG_IOSCHED_DEADLINE=y 140CONFIG_IOSCHED_DEADLINE=y
127CONFIG_IOSCHED_CFQ=y 141CONFIG_IOSCHED_CFQ=y
128# CONFIG_DEFAULT_AS is not set
129# CONFIG_DEFAULT_DEADLINE is not set 142# CONFIG_DEFAULT_DEADLINE is not set
130CONFIG_DEFAULT_CFQ=y 143CONFIG_DEFAULT_CFQ=y
131# CONFIG_DEFAULT_NOOP is not set 144# CONFIG_DEFAULT_NOOP is not set
132CONFIG_DEFAULT_IOSCHED="cfq" 145CONFIG_DEFAULT_IOSCHED="cfq"
146# CONFIG_INLINE_SPIN_TRYLOCK is not set
147# CONFIG_INLINE_SPIN_TRYLOCK_BH is not set
148# CONFIG_INLINE_SPIN_LOCK is not set
149# CONFIG_INLINE_SPIN_LOCK_BH is not set
150# CONFIG_INLINE_SPIN_LOCK_IRQ is not set
151# CONFIG_INLINE_SPIN_LOCK_IRQSAVE is not set
152# CONFIG_INLINE_SPIN_UNLOCK is not set
153# CONFIG_INLINE_SPIN_UNLOCK_BH is not set
154# CONFIG_INLINE_SPIN_UNLOCK_IRQ is not set
155# CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE is not set
156# CONFIG_INLINE_READ_TRYLOCK is not set
157# CONFIG_INLINE_READ_LOCK is not set
158# CONFIG_INLINE_READ_LOCK_BH is not set
159# CONFIG_INLINE_READ_LOCK_IRQ is not set
160# CONFIG_INLINE_READ_LOCK_IRQSAVE is not set
161# CONFIG_INLINE_READ_UNLOCK is not set
162# CONFIG_INLINE_READ_UNLOCK_BH is not set
163# CONFIG_INLINE_READ_UNLOCK_IRQ is not set
164# CONFIG_INLINE_READ_UNLOCK_IRQRESTORE is not set
165# CONFIG_INLINE_WRITE_TRYLOCK is not set
166# CONFIG_INLINE_WRITE_LOCK is not set
167# CONFIG_INLINE_WRITE_LOCK_BH is not set
168# CONFIG_INLINE_WRITE_LOCK_IRQ is not set
169# CONFIG_INLINE_WRITE_LOCK_IRQSAVE is not set
170# CONFIG_INLINE_WRITE_UNLOCK is not set
171# CONFIG_INLINE_WRITE_UNLOCK_BH is not set
172# CONFIG_INLINE_WRITE_UNLOCK_IRQ is not set
173# CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE is not set
174# CONFIG_MUTEX_SPIN_ON_OWNER is not set
133# CONFIG_FREEZER is not set 175# CONFIG_FREEZER is not set
134 176
135# 177#
136# System Type 178# System Type
137# 179#
180CONFIG_MMU=y
138# CONFIG_ARCH_AAEC2000 is not set 181# CONFIG_ARCH_AAEC2000 is not set
139# CONFIG_ARCH_INTEGRATOR is not set 182# CONFIG_ARCH_INTEGRATOR is not set
140# CONFIG_ARCH_REALVIEW is not set 183# CONFIG_ARCH_REALVIEW is not set
141# CONFIG_ARCH_VERSATILE is not set 184# CONFIG_ARCH_VERSATILE is not set
185# CONFIG_ARCH_VEXPRESS is not set
142# CONFIG_ARCH_AT91 is not set 186# CONFIG_ARCH_AT91 is not set
187# CONFIG_ARCH_BCMRING is not set
143# CONFIG_ARCH_CLPS711X is not set 188# CONFIG_ARCH_CLPS711X is not set
189# CONFIG_ARCH_CNS3XXX is not set
144# CONFIG_ARCH_GEMINI is not set 190# CONFIG_ARCH_GEMINI is not set
145# CONFIG_ARCH_EBSA110 is not set 191# CONFIG_ARCH_EBSA110 is not set
146# CONFIG_ARCH_EP93XX is not set 192# CONFIG_ARCH_EP93XX is not set
@@ -156,6 +202,7 @@ CONFIG_DEFAULT_IOSCHED="cfq"
156# CONFIG_ARCH_IXP2000 is not set 202# CONFIG_ARCH_IXP2000 is not set
157# CONFIG_ARCH_IXP4XX is not set 203# CONFIG_ARCH_IXP4XX is not set
158# CONFIG_ARCH_L7200 is not set 204# CONFIG_ARCH_L7200 is not set
205# CONFIG_ARCH_DOVE is not set
159# CONFIG_ARCH_KIRKWOOD is not set 206# CONFIG_ARCH_KIRKWOOD is not set
160# CONFIG_ARCH_LOKI is not set 207# CONFIG_ARCH_LOKI is not set
161# CONFIG_ARCH_MV78XX0 is not set 208# CONFIG_ARCH_MV78XX0 is not set
@@ -164,39 +211,64 @@ CONFIG_DEFAULT_IOSCHED="cfq"
164# CONFIG_ARCH_KS8695 is not set 211# CONFIG_ARCH_KS8695 is not set
165# CONFIG_ARCH_NS9XXX is not set 212# CONFIG_ARCH_NS9XXX is not set
166# CONFIG_ARCH_W90X900 is not set 213# CONFIG_ARCH_W90X900 is not set
214# CONFIG_ARCH_NUC93X is not set
167# CONFIG_ARCH_PNX4008 is not set 215# CONFIG_ARCH_PNX4008 is not set
168# CONFIG_ARCH_PXA is not set 216# CONFIG_ARCH_PXA is not set
169# CONFIG_ARCH_MSM is not set 217# CONFIG_ARCH_MSM is not set
218# CONFIG_ARCH_SHMOBILE is not set
170# CONFIG_ARCH_RPC is not set 219# CONFIG_ARCH_RPC is not set
171# CONFIG_ARCH_SA1100 is not set 220# CONFIG_ARCH_SA1100 is not set
172# CONFIG_ARCH_S3C2410 is not set 221# CONFIG_ARCH_S3C2410 is not set
173# CONFIG_ARCH_S3C64XX is not set 222# CONFIG_ARCH_S3C64XX is not set
223# CONFIG_ARCH_S5P6440 is not set
224# CONFIG_ARCH_S5P6442 is not set
174CONFIG_ARCH_S5PC100=y 225CONFIG_ARCH_S5PC100=y
226# CONFIG_ARCH_S5PV210 is not set
175# CONFIG_ARCH_SHARK is not set 227# CONFIG_ARCH_SHARK is not set
176# CONFIG_ARCH_LH7A40X is not set 228# CONFIG_ARCH_LH7A40X is not set
177# CONFIG_ARCH_U300 is not set 229# CONFIG_ARCH_U300 is not set
230# CONFIG_ARCH_U8500 is not set
231# CONFIG_ARCH_NOMADIK is not set
178# CONFIG_ARCH_DAVINCI is not set 232# CONFIG_ARCH_DAVINCI is not set
179# CONFIG_ARCH_OMAP is not set 233# CONFIG_ARCH_OMAP is not set
180CONFIG_PLAT_S3C=y 234# CONFIG_PLAT_SPEAR is not set
235CONFIG_PLAT_SAMSUNG=y
181 236
182# 237#
183# Boot options 238# Boot options
184# 239#
185# CONFIG_S3C_BOOT_ERROR_RESET is not set 240# CONFIG_S3C_BOOT_ERROR_RESET is not set
186CONFIG_S3C_BOOT_UART_FORCE_FIFO=y 241CONFIG_S3C_BOOT_UART_FORCE_FIFO=y
242CONFIG_S3C_LOWLEVEL_UART_PORT=0
243CONFIG_SAMSUNG_CLKSRC=y
244CONFIG_SAMSUNG_IRQ_VIC_TIMER=y
245CONFIG_SAMSUNG_IRQ_UART=y
246CONFIG_SAMSUNG_GPIOLIB_4BIT=y
247CONFIG_S3C_GPIO_CFG_S3C24XX=y
248CONFIG_S3C_GPIO_CFG_S3C64XX=y
249CONFIG_S3C_GPIO_PULL_UPDOWN=y
250CONFIG_S5P_GPIO_DRVSTR=y
251CONFIG_SAMSUNG_GPIO_EXTRA=0
252CONFIG_S3C_GPIO_SPACE=0
253CONFIG_S3C_GPIO_TRACK=y
254# CONFIG_S3C_ADC is not set
255CONFIG_S3C_DEV_HSMMC=y
256CONFIG_S3C_DEV_HSMMC1=y
257CONFIG_S3C_DEV_HSMMC2=y
258CONFIG_S3C_DEV_I2C1=y
259CONFIG_S3C_DEV_FB=y
260CONFIG_S3C_PL330_DMA=y
187 261
188# 262#
189# Power management 263# Power management
190# 264#
191CONFIG_S3C_LOWLEVEL_UART_PORT=0 265CONFIG_PLAT_S5P=y
192CONFIG_S3C_GPIO_SPACE=0 266CONFIG_S5P_EXT_INT=y
193CONFIG_S3C_GPIO_TRACK=y
194CONFIG_S3C_GPIO_PULL_UPDOWN=y
195CONFIG_PLAT_S5PC1XX=y
196CONFIG_CPU_S5PC100_INIT=y
197CONFIG_CPU_S5PC100_CLOCK=y
198CONFIG_S5PC100_SETUP_I2C0=y
199CONFIG_CPU_S5PC100=y 267CONFIG_CPU_S5PC100=y
268CONFIG_S5PC100_SETUP_FB_24BPP=y
269CONFIG_S5PC100_SETUP_I2C1=y
270CONFIG_S5PC100_SETUP_SDHCI=y
271CONFIG_S5PC100_SETUP_SDHCI_GPIO=y
200CONFIG_MACH_SMDKC100=y 272CONFIG_MACH_SMDKC100=y
201 273
202# 274#
@@ -206,7 +278,7 @@ CONFIG_CPU_32v6K=y
206CONFIG_CPU_V7=y 278CONFIG_CPU_V7=y
207CONFIG_CPU_32v7=y 279CONFIG_CPU_32v7=y
208CONFIG_CPU_ABRT_EV7=y 280CONFIG_CPU_ABRT_EV7=y
209CONFIG_CPU_PABRT_IFAR=y 281CONFIG_CPU_PABRT_V7=y
210CONFIG_CPU_CACHE_V7=y 282CONFIG_CPU_CACHE_V7=y
211CONFIG_CPU_CACHE_VIPT=y 283CONFIG_CPU_CACHE_VIPT=y
212CONFIG_CPU_COPY_V6=y 284CONFIG_CPU_COPY_V6=y
@@ -224,11 +296,15 @@ CONFIG_ARM_THUMB=y
224# CONFIG_CPU_DCACHE_DISABLE is not set 296# CONFIG_CPU_DCACHE_DISABLE is not set
225# CONFIG_CPU_BPREDICT_DISABLE is not set 297# CONFIG_CPU_BPREDICT_DISABLE is not set
226CONFIG_HAS_TLS_REG=y 298CONFIG_HAS_TLS_REG=y
299CONFIG_ARM_L1_CACHE_SHIFT=6
300CONFIG_ARM_DMA_MEM_BUFFERABLE=y
301CONFIG_CPU_HAS_PMU=y
227# CONFIG_ARM_ERRATA_430973 is not set 302# CONFIG_ARM_ERRATA_430973 is not set
228# CONFIG_ARM_ERRATA_458693 is not set 303# CONFIG_ARM_ERRATA_458693 is not set
229# CONFIG_ARM_ERRATA_460075 is not set 304# CONFIG_ARM_ERRATA_460075 is not set
230CONFIG_ARM_VIC=y 305CONFIG_ARM_VIC=y
231CONFIG_ARM_VIC_NR=2 306CONFIG_ARM_VIC_NR=2
307CONFIG_PL330=y
232 308
233# 309#
234# Bus support 310# Bus support
@@ -244,8 +320,11 @@ CONFIG_VMSPLIT_3G=y
244# CONFIG_VMSPLIT_2G is not set 320# CONFIG_VMSPLIT_2G is not set
245# CONFIG_VMSPLIT_1G is not set 321# CONFIG_VMSPLIT_1G is not set
246CONFIG_PAGE_OFFSET=0xC0000000 322CONFIG_PAGE_OFFSET=0xC0000000
323CONFIG_PREEMPT_NONE=y
324# CONFIG_PREEMPT_VOLUNTARY is not set
247# CONFIG_PREEMPT is not set 325# CONFIG_PREEMPT is not set
248CONFIG_HZ=100 326CONFIG_HZ=100
327# CONFIG_THUMB2_KERNEL is not set
249CONFIG_AEABI=y 328CONFIG_AEABI=y
250CONFIG_OABI_COMPAT=y 329CONFIG_OABI_COMPAT=y
251# CONFIG_ARCH_SPARSEMEM_DEFAULT is not set 330# CONFIG_ARCH_SPARSEMEM_DEFAULT is not set
@@ -258,12 +337,11 @@ CONFIG_FLATMEM_MANUAL=y
258CONFIG_FLATMEM=y 337CONFIG_FLATMEM=y
259CONFIG_FLAT_NODE_MEM_MAP=y 338CONFIG_FLAT_NODE_MEM_MAP=y
260CONFIG_PAGEFLAGS_EXTENDED=y 339CONFIG_PAGEFLAGS_EXTENDED=y
261CONFIG_SPLIT_PTLOCK_CPUS=4 340CONFIG_SPLIT_PTLOCK_CPUS=999999
262# CONFIG_PHYS_ADDR_T_64BIT is not set 341# CONFIG_PHYS_ADDR_T_64BIT is not set
263CONFIG_ZONE_DMA_FLAG=0 342CONFIG_ZONE_DMA_FLAG=0
264CONFIG_VIRT_TO_BUS=y 343CONFIG_VIRT_TO_BUS=y
265CONFIG_HAVE_MLOCK=y 344# CONFIG_KSM is not set
266CONFIG_HAVE_MLOCKED_PAGE_BIT=y
267CONFIG_DEFAULT_MMAP_MIN_ADDR=4096 345CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
268CONFIG_ALIGNMENT_TRAP=y 346CONFIG_ALIGNMENT_TRAP=y
269# CONFIG_UACCESS_WITH_MEMCPY is not set 347# CONFIG_UACCESS_WITH_MEMCPY is not set
@@ -274,6 +352,7 @@ CONFIG_ALIGNMENT_TRAP=y
274CONFIG_ZBOOT_ROM_TEXT=0 352CONFIG_ZBOOT_ROM_TEXT=0
275CONFIG_ZBOOT_ROM_BSS=0 353CONFIG_ZBOOT_ROM_BSS=0
276CONFIG_CMDLINE="root=/dev/mtdblock2 rootfstype=cramfs init=/linuxrc console=ttySAC2,115200 mem=128M" 354CONFIG_CMDLINE="root=/dev/mtdblock2 rootfstype=cramfs init=/linuxrc console=ttySAC2,115200 mem=128M"
355# CONFIG_CMDLINE_FORCE is not set
277# CONFIG_XIP_KERNEL is not set 356# CONFIG_XIP_KERNEL is not set
278# CONFIG_KEXEC is not set 357# CONFIG_KEXEC is not set
279 358
@@ -317,6 +396,7 @@ CONFIG_ARCH_SUSPEND_POSSIBLE=y
317# Generic Driver Options 396# Generic Driver Options
318# 397#
319CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" 398CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
399# CONFIG_DEVTMPFS is not set
320CONFIG_STANDALONE=y 400CONFIG_STANDALONE=y
321CONFIG_PREVENT_FIRMWARE_BUILD=y 401CONFIG_PREVENT_FIRMWARE_BUILD=y
322CONFIG_FW_LOADER=y 402CONFIG_FW_LOADER=y
@@ -331,6 +411,10 @@ CONFIG_BLK_DEV=y
331# CONFIG_BLK_DEV_COW_COMMON is not set 411# CONFIG_BLK_DEV_COW_COMMON is not set
332CONFIG_BLK_DEV_LOOP=y 412CONFIG_BLK_DEV_LOOP=y
333# CONFIG_BLK_DEV_CRYPTOLOOP is not set 413# CONFIG_BLK_DEV_CRYPTOLOOP is not set
414
415#
416# DRBD disabled because PROC_FS, INET or CONNECTOR not selected
417#
334CONFIG_BLK_DEV_RAM=y 418CONFIG_BLK_DEV_RAM=y
335CONFIG_BLK_DEV_RAM_COUNT=16 419CONFIG_BLK_DEV_RAM_COUNT=16
336CONFIG_BLK_DEV_RAM_SIZE=8192 420CONFIG_BLK_DEV_RAM_SIZE=8192
@@ -338,9 +422,12 @@ CONFIG_BLK_DEV_RAM_SIZE=8192
338# CONFIG_CDROM_PKTCDVD is not set 422# CONFIG_CDROM_PKTCDVD is not set
339# CONFIG_MG_DISK is not set 423# CONFIG_MG_DISK is not set
340CONFIG_MISC_DEVICES=y 424CONFIG_MISC_DEVICES=y
425# CONFIG_AD525X_DPOT is not set
341# CONFIG_ICS932S401 is not set 426# CONFIG_ICS932S401 is not set
342# CONFIG_ENCLOSURE_SERVICES is not set 427# CONFIG_ENCLOSURE_SERVICES is not set
343# CONFIG_ISL29003 is not set 428# CONFIG_ISL29003 is not set
429# CONFIG_SENSORS_TSL2550 is not set
430# CONFIG_DS1682 is not set
344# CONFIG_C2PORT is not set 431# CONFIG_C2PORT is not set
345 432
346# 433#
@@ -350,18 +437,21 @@ CONFIG_EEPROM_AT24=y
350# CONFIG_EEPROM_LEGACY is not set 437# CONFIG_EEPROM_LEGACY is not set
351# CONFIG_EEPROM_MAX6875 is not set 438# CONFIG_EEPROM_MAX6875 is not set
352# CONFIG_EEPROM_93CX6 is not set 439# CONFIG_EEPROM_93CX6 is not set
440# CONFIG_IWMC3200TOP is not set
353CONFIG_HAVE_IDE=y 441CONFIG_HAVE_IDE=y
354# CONFIG_IDE is not set 442# CONFIG_IDE is not set
355 443
356# 444#
357# SCSI device support 445# SCSI device support
358# 446#
447CONFIG_SCSI_MOD=y
359# CONFIG_RAID_ATTRS is not set 448# CONFIG_RAID_ATTRS is not set
360# CONFIG_SCSI is not set 449# CONFIG_SCSI is not set
361# CONFIG_SCSI_DMA is not set 450# CONFIG_SCSI_DMA is not set
362# CONFIG_SCSI_NETLINK is not set 451# CONFIG_SCSI_NETLINK is not set
363# CONFIG_ATA is not set 452# CONFIG_ATA is not set
364# CONFIG_MD is not set 453# CONFIG_MD is not set
454# CONFIG_PHONE is not set
365 455
366# 456#
367# Input device support 457# Input device support
@@ -369,6 +459,7 @@ CONFIG_HAVE_IDE=y
369CONFIG_INPUT=y 459CONFIG_INPUT=y
370# CONFIG_INPUT_FF_MEMLESS is not set 460# CONFIG_INPUT_FF_MEMLESS is not set
371# CONFIG_INPUT_POLLDEV is not set 461# CONFIG_INPUT_POLLDEV is not set
462# CONFIG_INPUT_SPARSEKMAP is not set
372 463
373# 464#
374# Userland interfaces 465# Userland interfaces
@@ -385,13 +476,19 @@ CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
385# Input Device Drivers 476# Input Device Drivers
386# 477#
387CONFIG_INPUT_KEYBOARD=y 478CONFIG_INPUT_KEYBOARD=y
479# CONFIG_KEYBOARD_ADP5588 is not set
388CONFIG_KEYBOARD_ATKBD=y 480CONFIG_KEYBOARD_ATKBD=y
389# CONFIG_KEYBOARD_SUNKBD is not set 481# CONFIG_QT2160 is not set
390# CONFIG_KEYBOARD_LKKBD is not set 482# CONFIG_KEYBOARD_LKKBD is not set
391# CONFIG_KEYBOARD_XTKBD is not set 483# CONFIG_KEYBOARD_GPIO is not set
484# CONFIG_KEYBOARD_TCA6416 is not set
485# CONFIG_KEYBOARD_MATRIX is not set
486# CONFIG_KEYBOARD_MAX7359 is not set
392# CONFIG_KEYBOARD_NEWTON is not set 487# CONFIG_KEYBOARD_NEWTON is not set
488# CONFIG_KEYBOARD_OPENCORES is not set
393# CONFIG_KEYBOARD_STOWAWAY is not set 489# CONFIG_KEYBOARD_STOWAWAY is not set
394# CONFIG_KEYBOARD_GPIO is not set 490# CONFIG_KEYBOARD_SUNKBD is not set
491# CONFIG_KEYBOARD_XTKBD is not set
395CONFIG_INPUT_MOUSE=y 492CONFIG_INPUT_MOUSE=y
396CONFIG_MOUSE_PS2=y 493CONFIG_MOUSE_PS2=y
397CONFIG_MOUSE_PS2_ALPS=y 494CONFIG_MOUSE_PS2_ALPS=y
@@ -399,6 +496,7 @@ CONFIG_MOUSE_PS2_LOGIPS2PP=y
399CONFIG_MOUSE_PS2_SYNAPTICS=y 496CONFIG_MOUSE_PS2_SYNAPTICS=y
400CONFIG_MOUSE_PS2_TRACKPOINT=y 497CONFIG_MOUSE_PS2_TRACKPOINT=y
401# CONFIG_MOUSE_PS2_ELANTECH is not set 498# CONFIG_MOUSE_PS2_ELANTECH is not set
499# CONFIG_MOUSE_PS2_SENTELIC is not set
402# CONFIG_MOUSE_PS2_TOUCHKIT is not set 500# CONFIG_MOUSE_PS2_TOUCHKIT is not set
403# CONFIG_MOUSE_SERIAL is not set 501# CONFIG_MOUSE_SERIAL is not set
404# CONFIG_MOUSE_APPLETOUCH is not set 502# CONFIG_MOUSE_APPLETOUCH is not set
@@ -418,6 +516,7 @@ CONFIG_SERIO=y
418CONFIG_SERIO_SERPORT=y 516CONFIG_SERIO_SERPORT=y
419CONFIG_SERIO_LIBPS2=y 517CONFIG_SERIO_LIBPS2=y
420# CONFIG_SERIO_RAW is not set 518# CONFIG_SERIO_RAW is not set
519# CONFIG_SERIO_ALTERA_PS2 is not set
421# CONFIG_GAMEPORT is not set 520# CONFIG_GAMEPORT is not set
422 521
423# 522#
@@ -444,11 +543,16 @@ CONFIG_SERIAL_8250_RUNTIME_UARTS=4
444# Non-8250 serial port support 543# Non-8250 serial port support
445# 544#
446CONFIG_SERIAL_SAMSUNG=y 545CONFIG_SERIAL_SAMSUNG=y
447CONFIG_SERIAL_SAMSUNG_UARTS=3 546CONFIG_SERIAL_SAMSUNG_UARTS_4=y
547CONFIG_SERIAL_SAMSUNG_UARTS=4
448# CONFIG_SERIAL_SAMSUNG_DEBUG is not set 548# CONFIG_SERIAL_SAMSUNG_DEBUG is not set
449CONFIG_SERIAL_SAMSUNG_CONSOLE=y 549CONFIG_SERIAL_SAMSUNG_CONSOLE=y
550CONFIG_SERIAL_S3C6400=y
450CONFIG_SERIAL_CORE=y 551CONFIG_SERIAL_CORE=y
451CONFIG_SERIAL_CORE_CONSOLE=y 552CONFIG_SERIAL_CORE_CONSOLE=y
553# CONFIG_SERIAL_TIMBERDALE is not set
554# CONFIG_SERIAL_ALTERA_JTAGUART is not set
555# CONFIG_SERIAL_ALTERA_UART is not set
452CONFIG_UNIX98_PTYS=y 556CONFIG_UNIX98_PTYS=y
453# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set 557# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
454CONFIG_LEGACY_PTYS=y 558CONFIG_LEGACY_PTYS=y
@@ -461,6 +565,7 @@ CONFIG_HW_RANDOM=y
461# CONFIG_TCG_TPM is not set 565# CONFIG_TCG_TPM is not set
462CONFIG_I2C=y 566CONFIG_I2C=y
463CONFIG_I2C_BOARDINFO=y 567CONFIG_I2C_BOARDINFO=y
568CONFIG_I2C_COMPAT=y
464CONFIG_I2C_CHARDEV=y 569CONFIG_I2C_CHARDEV=y
465CONFIG_I2C_HELPER_AUTO=y 570CONFIG_I2C_HELPER_AUTO=y
466 571
@@ -471,9 +576,11 @@ CONFIG_I2C_HELPER_AUTO=y
471# 576#
472# I2C system bus drivers (mostly embedded / system-on-chip) 577# I2C system bus drivers (mostly embedded / system-on-chip)
473# 578#
579# CONFIG_I2C_DESIGNWARE is not set
474# CONFIG_I2C_GPIO is not set 580# CONFIG_I2C_GPIO is not set
475# CONFIG_I2C_OCORES is not set 581# CONFIG_I2C_OCORES is not set
476# CONFIG_I2C_SIMTEC is not set 582# CONFIG_I2C_SIMTEC is not set
583# CONFIG_I2C_XILINX is not set
477 584
478# 585#
479# External I2C/SMBus adapter drivers 586# External I2C/SMBus adapter drivers
@@ -486,20 +593,15 @@ CONFIG_I2C_HELPER_AUTO=y
486# 593#
487# CONFIG_I2C_PCA_PLATFORM is not set 594# CONFIG_I2C_PCA_PLATFORM is not set
488# CONFIG_I2C_STUB is not set 595# CONFIG_I2C_STUB is not set
489
490#
491# Miscellaneous I2C Chip support
492#
493# CONFIG_DS1682 is not set
494# CONFIG_SENSORS_PCF8574 is not set
495# CONFIG_PCF8575 is not set
496# CONFIG_SENSORS_PCA9539 is not set
497# CONFIG_SENSORS_TSL2550 is not set
498# CONFIG_I2C_DEBUG_CORE is not set 596# CONFIG_I2C_DEBUG_CORE is not set
499# CONFIG_I2C_DEBUG_ALGO is not set 597# CONFIG_I2C_DEBUG_ALGO is not set
500# CONFIG_I2C_DEBUG_BUS is not set 598# CONFIG_I2C_DEBUG_BUS is not set
501# CONFIG_I2C_DEBUG_CHIP is not set
502# CONFIG_SPI is not set 599# CONFIG_SPI is not set
600
601#
602# PPS support
603#
604# CONFIG_PPS is not set
503CONFIG_ARCH_REQUIRE_GPIOLIB=y 605CONFIG_ARCH_REQUIRE_GPIOLIB=y
504CONFIG_GPIOLIB=y 606CONFIG_GPIOLIB=y
505# CONFIG_DEBUG_GPIO is not set 607# CONFIG_DEBUG_GPIO is not set
@@ -508,13 +610,16 @@ CONFIG_GPIOLIB=y
508# 610#
509# Memory mapped GPIO expanders: 611# Memory mapped GPIO expanders:
510# 612#
613# CONFIG_GPIO_IT8761E is not set
511 614
512# 615#
513# I2C GPIO expanders: 616# I2C GPIO expanders:
514# 617#
618# CONFIG_GPIO_MAX7300 is not set
515# CONFIG_GPIO_MAX732X is not set 619# CONFIG_GPIO_MAX732X is not set
516# CONFIG_GPIO_PCA953X is not set 620# CONFIG_GPIO_PCA953X is not set
517# CONFIG_GPIO_PCF857X is not set 621# CONFIG_GPIO_PCF857X is not set
622# CONFIG_GPIO_ADP5588 is not set
518 623
519# 624#
520# PCI GPIO expanders: 625# PCI GPIO expanders:
@@ -523,10 +628,19 @@ CONFIG_GPIOLIB=y
523# 628#
524# SPI GPIO expanders: 629# SPI GPIO expanders:
525# 630#
631
632#
633# AC97 GPIO expanders:
634#
526# CONFIG_W1 is not set 635# CONFIG_W1 is not set
527# CONFIG_POWER_SUPPLY is not set 636# CONFIG_POWER_SUPPLY is not set
528CONFIG_HWMON=y 637CONFIG_HWMON=y
529# CONFIG_HWMON_VID is not set 638# CONFIG_HWMON_VID is not set
639# CONFIG_HWMON_DEBUG_CHIP is not set
640
641#
642# Native drivers
643#
530# CONFIG_SENSORS_AD7414 is not set 644# CONFIG_SENSORS_AD7414 is not set
531# CONFIG_SENSORS_AD7418 is not set 645# CONFIG_SENSORS_AD7418 is not set
532# CONFIG_SENSORS_ADM1021 is not set 646# CONFIG_SENSORS_ADM1021 is not set
@@ -535,10 +649,11 @@ CONFIG_HWMON=y
535# CONFIG_SENSORS_ADM1029 is not set 649# CONFIG_SENSORS_ADM1029 is not set
536# CONFIG_SENSORS_ADM1031 is not set 650# CONFIG_SENSORS_ADM1031 is not set
537# CONFIG_SENSORS_ADM9240 is not set 651# CONFIG_SENSORS_ADM9240 is not set
652# CONFIG_SENSORS_ADT7411 is not set
538# CONFIG_SENSORS_ADT7462 is not set 653# CONFIG_SENSORS_ADT7462 is not set
539# CONFIG_SENSORS_ADT7470 is not set 654# CONFIG_SENSORS_ADT7470 is not set
540# CONFIG_SENSORS_ADT7473 is not set
541# CONFIG_SENSORS_ADT7475 is not set 655# CONFIG_SENSORS_ADT7475 is not set
656# CONFIG_SENSORS_ASC7621 is not set
542# CONFIG_SENSORS_ATXP1 is not set 657# CONFIG_SENSORS_ATXP1 is not set
543# CONFIG_SENSORS_DS1621 is not set 658# CONFIG_SENSORS_DS1621 is not set
544# CONFIG_SENSORS_F71805F is not set 659# CONFIG_SENSORS_F71805F is not set
@@ -549,6 +664,7 @@ CONFIG_HWMON=y
549# CONFIG_SENSORS_GL520SM is not set 664# CONFIG_SENSORS_GL520SM is not set
550# CONFIG_SENSORS_IT87 is not set 665# CONFIG_SENSORS_IT87 is not set
551# CONFIG_SENSORS_LM63 is not set 666# CONFIG_SENSORS_LM63 is not set
667# CONFIG_SENSORS_LM73 is not set
552# CONFIG_SENSORS_LM75 is not set 668# CONFIG_SENSORS_LM75 is not set
553# CONFIG_SENSORS_LM77 is not set 669# CONFIG_SENSORS_LM77 is not set
554# CONFIG_SENSORS_LM78 is not set 670# CONFIG_SENSORS_LM78 is not set
@@ -573,8 +689,10 @@ CONFIG_HWMON=y
573# CONFIG_SENSORS_SMSC47M192 is not set 689# CONFIG_SENSORS_SMSC47M192 is not set
574# CONFIG_SENSORS_SMSC47B397 is not set 690# CONFIG_SENSORS_SMSC47B397 is not set
575# CONFIG_SENSORS_ADS7828 is not set 691# CONFIG_SENSORS_ADS7828 is not set
692# CONFIG_SENSORS_AMC6821 is not set
576# CONFIG_SENSORS_THMC50 is not set 693# CONFIG_SENSORS_THMC50 is not set
577# CONFIG_SENSORS_TMP401 is not set 694# CONFIG_SENSORS_TMP401 is not set
695# CONFIG_SENSORS_TMP421 is not set
578# CONFIG_SENSORS_VT1211 is not set 696# CONFIG_SENSORS_VT1211 is not set
579# CONFIG_SENSORS_W83781D is not set 697# CONFIG_SENSORS_W83781D is not set
580# CONFIG_SENSORS_W83791D is not set 698# CONFIG_SENSORS_W83791D is not set
@@ -584,9 +702,8 @@ CONFIG_HWMON=y
584# CONFIG_SENSORS_W83L786NG is not set 702# CONFIG_SENSORS_W83L786NG is not set
585# CONFIG_SENSORS_W83627HF is not set 703# CONFIG_SENSORS_W83627HF is not set
586# CONFIG_SENSORS_W83627EHF is not set 704# CONFIG_SENSORS_W83627EHF is not set
587# CONFIG_HWMON_DEBUG_CHIP is not set 705# CONFIG_SENSORS_LIS3_I2C is not set
588# CONFIG_THERMAL is not set 706# CONFIG_THERMAL is not set
589# CONFIG_THERMAL_HWMON is not set
590# CONFIG_WATCHDOG is not set 707# CONFIG_WATCHDOG is not set
591CONFIG_SSB_POSSIBLE=y 708CONFIG_SSB_POSSIBLE=y
592 709
@@ -599,10 +716,12 @@ CONFIG_SSB_POSSIBLE=y
599# Multifunction device drivers 716# Multifunction device drivers
600# 717#
601# CONFIG_MFD_CORE is not set 718# CONFIG_MFD_CORE is not set
719# CONFIG_MFD_88PM860X is not set
602# CONFIG_MFD_SM501 is not set 720# CONFIG_MFD_SM501 is not set
603# CONFIG_MFD_ASIC3 is not set 721# CONFIG_MFD_ASIC3 is not set
604# CONFIG_HTC_EGPIO is not set 722# CONFIG_HTC_EGPIO is not set
605# CONFIG_HTC_PASIC3 is not set 723# CONFIG_HTC_PASIC3 is not set
724# CONFIG_HTC_I2CPLD is not set
606# CONFIG_TPS65010 is not set 725# CONFIG_TPS65010 is not set
607# CONFIG_TWL4030_CORE is not set 726# CONFIG_TWL4030_CORE is not set
608# CONFIG_MFD_TMIO is not set 727# CONFIG_MFD_TMIO is not set
@@ -610,10 +729,15 @@ CONFIG_SSB_POSSIBLE=y
610# CONFIG_MFD_TC6387XB is not set 729# CONFIG_MFD_TC6387XB is not set
611# CONFIG_MFD_TC6393XB is not set 730# CONFIG_MFD_TC6393XB is not set
612# CONFIG_PMIC_DA903X is not set 731# CONFIG_PMIC_DA903X is not set
732# CONFIG_PMIC_ADP5520 is not set
733# CONFIG_MFD_MAX8925 is not set
613# CONFIG_MFD_WM8400 is not set 734# CONFIG_MFD_WM8400 is not set
735# CONFIG_MFD_WM831X is not set
614# CONFIG_MFD_WM8350_I2C is not set 736# CONFIG_MFD_WM8350_I2C is not set
737# CONFIG_MFD_WM8994 is not set
615# CONFIG_MFD_PCF50633 is not set 738# CONFIG_MFD_PCF50633 is not set
616# CONFIG_AB3100_CORE is not set 739# CONFIG_AB3100_CORE is not set
740# CONFIG_REGULATOR is not set
617# CONFIG_MEDIA_SUPPORT is not set 741# CONFIG_MEDIA_SUPPORT is not set
618 742
619# 743#
@@ -637,7 +761,6 @@ CONFIG_DUMMY_CONSOLE=y
637# CONFIG_SOUND is not set 761# CONFIG_SOUND is not set
638CONFIG_HID_SUPPORT=y 762CONFIG_HID_SUPPORT=y
639CONFIG_HID=y 763CONFIG_HID=y
640CONFIG_HID_DEBUG=y
641# CONFIG_HIDRAW is not set 764# CONFIG_HIDRAW is not set
642# CONFIG_HID_PID is not set 765# CONFIG_HID_PID is not set
643 766
@@ -680,13 +803,12 @@ CONFIG_SDIO_UART=y
680CONFIG_MMC_SDHCI=y 803CONFIG_MMC_SDHCI=y
681# CONFIG_MMC_SDHCI_PLTFM is not set 804# CONFIG_MMC_SDHCI_PLTFM is not set
682# CONFIG_MEMSTICK is not set 805# CONFIG_MEMSTICK is not set
683# CONFIG_ACCESSIBILITY is not set
684# CONFIG_NEW_LEDS is not set 806# CONFIG_NEW_LEDS is not set
807# CONFIG_ACCESSIBILITY is not set
685CONFIG_RTC_LIB=y 808CONFIG_RTC_LIB=y
686# CONFIG_RTC_CLASS is not set 809# CONFIG_RTC_CLASS is not set
687# CONFIG_DMADEVICES is not set 810# CONFIG_DMADEVICES is not set
688# CONFIG_AUXDISPLAY is not set 811# CONFIG_AUXDISPLAY is not set
689# CONFIG_REGULATOR is not set
690# CONFIG_UIO is not set 812# CONFIG_UIO is not set
691# CONFIG_STAGING is not set 813# CONFIG_STAGING is not set
692 814
@@ -710,6 +832,7 @@ CONFIG_FS_POSIX_ACL=y
710# CONFIG_XFS_FS is not set 832# CONFIG_XFS_FS is not set
711# CONFIG_GFS2_FS is not set 833# CONFIG_GFS2_FS is not set
712# CONFIG_BTRFS_FS is not set 834# CONFIG_BTRFS_FS is not set
835# CONFIG_NILFS2_FS is not set
713CONFIG_FILE_LOCKING=y 836CONFIG_FILE_LOCKING=y
714CONFIG_FSNOTIFY=y 837CONFIG_FSNOTIFY=y
715CONFIG_DNOTIFY=y 838CONFIG_DNOTIFY=y
@@ -758,6 +881,7 @@ CONFIG_MISC_FILESYSTEMS=y
758# CONFIG_BEFS_FS is not set 881# CONFIG_BEFS_FS is not set
759# CONFIG_BFS_FS is not set 882# CONFIG_BFS_FS is not set
760# CONFIG_EFS_FS is not set 883# CONFIG_EFS_FS is not set
884# CONFIG_LOGFS is not set
761CONFIG_CRAMFS=y 885CONFIG_CRAMFS=y
762# CONFIG_SQUASHFS is not set 886# CONFIG_SQUASHFS is not set
763# CONFIG_VXFS_FS is not set 887# CONFIG_VXFS_FS is not set
@@ -772,7 +896,6 @@ CONFIG_ROMFS_BACKED_BY_BLOCK=y
772CONFIG_ROMFS_ON_BLOCK=y 896CONFIG_ROMFS_ON_BLOCK=y
773# CONFIG_SYSV_FS is not set 897# CONFIG_SYSV_FS is not set
774# CONFIG_UFS_FS is not set 898# CONFIG_UFS_FS is not set
775# CONFIG_NILFS2_FS is not set
776 899
777# 900#
778# Partition Types 901# Partition Types
@@ -789,6 +912,7 @@ CONFIG_ENABLE_WARN_DEPRECATED=y
789CONFIG_ENABLE_MUST_CHECK=y 912CONFIG_ENABLE_MUST_CHECK=y
790CONFIG_FRAME_WARN=1024 913CONFIG_FRAME_WARN=1024
791CONFIG_MAGIC_SYSRQ=y 914CONFIG_MAGIC_SYSRQ=y
915# CONFIG_STRIP_ASM_SYMS is not set
792# CONFIG_UNUSED_SYMBOLS is not set 916# CONFIG_UNUSED_SYMBOLS is not set
793# CONFIG_DEBUG_FS is not set 917# CONFIG_DEBUG_FS is not set
794# CONFIG_HEADERS_CHECK is not set 918# CONFIG_HEADERS_CHECK is not set
@@ -826,11 +950,13 @@ CONFIG_DEBUG_MEMORY_INIT=y
826# CONFIG_DEBUG_LIST is not set 950# CONFIG_DEBUG_LIST is not set
827# CONFIG_DEBUG_SG is not set 951# CONFIG_DEBUG_SG is not set
828# CONFIG_DEBUG_NOTIFIERS is not set 952# CONFIG_DEBUG_NOTIFIERS is not set
953# CONFIG_DEBUG_CREDENTIALS is not set
829# CONFIG_BOOT_PRINTK_DELAY is not set 954# CONFIG_BOOT_PRINTK_DELAY is not set
830# CONFIG_RCU_TORTURE_TEST is not set 955# CONFIG_RCU_TORTURE_TEST is not set
831# CONFIG_RCU_CPU_STALL_DETECTOR is not set 956# CONFIG_RCU_CPU_STALL_DETECTOR is not set
832# CONFIG_BACKTRACE_SELF_TEST is not set 957# CONFIG_BACKTRACE_SELF_TEST is not set
833# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set 958# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set
959# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set
834# CONFIG_FAULT_INJECTION is not set 960# CONFIG_FAULT_INJECTION is not set
835# CONFIG_LATENCYTOP is not set 961# CONFIG_LATENCYTOP is not set
836CONFIG_SYSCTL_SYSCALL_CHECK=y 962CONFIG_SYSCTL_SYSCALL_CHECK=y
@@ -839,6 +965,7 @@ CONFIG_HAVE_FUNCTION_TRACER=y
839CONFIG_TRACING_SUPPORT=y 965CONFIG_TRACING_SUPPORT=y
840CONFIG_FTRACE=y 966CONFIG_FTRACE=y
841# CONFIG_FUNCTION_TRACER is not set 967# CONFIG_FUNCTION_TRACER is not set
968# CONFIG_IRQSOFF_TRACER is not set
842# CONFIG_SCHED_TRACER is not set 969# CONFIG_SCHED_TRACER is not set
843# CONFIG_ENABLE_DEFAULT_TRACERS is not set 970# CONFIG_ENABLE_DEFAULT_TRACERS is not set
844# CONFIG_BOOT_TRACER is not set 971# CONFIG_BOOT_TRACER is not set
@@ -849,6 +976,7 @@ CONFIG_BRANCH_PROFILE_NONE=y
849# CONFIG_KMEMTRACE is not set 976# CONFIG_KMEMTRACE is not set
850# CONFIG_WORKQUEUE_TRACER is not set 977# CONFIG_WORKQUEUE_TRACER is not set
851# CONFIG_BLK_DEV_IO_TRACE is not set 978# CONFIG_BLK_DEV_IO_TRACE is not set
979# CONFIG_ATOMIC64_SELFTEST is not set
852# CONFIG_SAMPLES is not set 980# CONFIG_SAMPLES is not set
853CONFIG_HAVE_ARCH_KGDB=y 981CONFIG_HAVE_ARCH_KGDB=y
854# CONFIG_KGDB is not set 982# CONFIG_KGDB is not set
@@ -857,8 +985,9 @@ CONFIG_DEBUG_USER=y
857CONFIG_DEBUG_ERRORS=y 985CONFIG_DEBUG_ERRORS=y
858# CONFIG_DEBUG_STACK_USAGE is not set 986# CONFIG_DEBUG_STACK_USAGE is not set
859CONFIG_DEBUG_LL=y 987CONFIG_DEBUG_LL=y
988# CONFIG_EARLY_PRINTK is not set
860# CONFIG_DEBUG_ICEDCC is not set 989# CONFIG_DEBUG_ICEDCC is not set
861CONFIG_DEBUG_S3C_PORT=y 990# CONFIG_OC_ETM is not set
862CONFIG_DEBUG_S3C_UART=0 991CONFIG_DEBUG_S3C_UART=0
863 992
864# 993#
@@ -867,7 +996,11 @@ CONFIG_DEBUG_S3C_UART=0
867# CONFIG_KEYS is not set 996# CONFIG_KEYS is not set
868# CONFIG_SECURITY is not set 997# CONFIG_SECURITY is not set
869# CONFIG_SECURITYFS is not set 998# CONFIG_SECURITYFS is not set
870# CONFIG_SECURITY_FILE_CAPABILITIES is not set 999# CONFIG_DEFAULT_SECURITY_SELINUX is not set
1000# CONFIG_DEFAULT_SECURITY_SMACK is not set
1001# CONFIG_DEFAULT_SECURITY_TOMOYO is not set
1002CONFIG_DEFAULT_SECURITY_DAC=y
1003CONFIG_DEFAULT_SECURITY=""
871# CONFIG_CRYPTO is not set 1004# CONFIG_CRYPTO is not set
872# CONFIG_BINARY_PRINTF is not set 1005# CONFIG_BINARY_PRINTF is not set
873 1006
@@ -884,8 +1017,10 @@ CONFIG_CRC32=y
884# CONFIG_CRC7 is not set 1017# CONFIG_CRC7 is not set
885# CONFIG_LIBCRC32C is not set 1018# CONFIG_LIBCRC32C is not set
886CONFIG_ZLIB_INFLATE=y 1019CONFIG_ZLIB_INFLATE=y
1020CONFIG_LZO_DECOMPRESS=y
887CONFIG_DECOMPRESS_GZIP=y 1021CONFIG_DECOMPRESS_GZIP=y
888CONFIG_DECOMPRESS_BZIP2=y 1022CONFIG_DECOMPRESS_BZIP2=y
889CONFIG_DECOMPRESS_LZMA=y 1023CONFIG_DECOMPRESS_LZMA=y
1024CONFIG_DECOMPRESS_LZO=y
890CONFIG_HAS_IOMEM=y 1025CONFIG_HAS_IOMEM=y
891CONFIG_HAS_DMA=y 1026CONFIG_HAS_DMA=y
diff --git a/arch/arm/configs/s5pc110_defconfig b/arch/arm/configs/s5pc110_defconfig
index 796cb78498c3..c4de360b0f69 100644
--- a/arch/arm/configs/s5pc110_defconfig
+++ b/arch/arm/configs/s5pc110_defconfig
@@ -1,11 +1,13 @@
1# 1#
2# Automatically generated make config: don't edit 2# Automatically generated make config: don't edit
3# Linux kernel version: 2.6.34 3# Linux kernel version: 2.6.34
4# Sat May 22 03:18:21 2010 4# Wed May 26 19:04:37 2010
5# 5#
6CONFIG_ARM=y 6CONFIG_ARM=y
7CONFIG_SYS_SUPPORTS_APM_EMULATION=y 7CONFIG_SYS_SUPPORTS_APM_EMULATION=y
8CONFIG_GENERIC_GPIO=y 8CONFIG_GENERIC_GPIO=y
9CONFIG_GENERIC_TIME=y
10CONFIG_ARCH_USES_GETTIMEOFFSET=y
9CONFIG_HAVE_PROC_CPU=y 11CONFIG_HAVE_PROC_CPU=y
10CONFIG_NO_IOPORT=y 12CONFIG_NO_IOPORT=y
11CONFIG_GENERIC_HARDIRQS=y 13CONFIG_GENERIC_HARDIRQS=y
@@ -35,6 +37,7 @@ CONFIG_INIT_ENV_ARG_LIMIT=32
35CONFIG_LOCALVERSION="" 37CONFIG_LOCALVERSION=""
36CONFIG_LOCALVERSION_AUTO=y 38CONFIG_LOCALVERSION_AUTO=y
37CONFIG_HAVE_KERNEL_GZIP=y 39CONFIG_HAVE_KERNEL_GZIP=y
40CONFIG_HAVE_KERNEL_LZMA=y
38CONFIG_HAVE_KERNEL_LZO=y 41CONFIG_HAVE_KERNEL_LZO=y
39CONFIG_KERNEL_GZIP=y 42CONFIG_KERNEL_GZIP=y
40# CONFIG_KERNEL_BZIP2 is not set 43# CONFIG_KERNEL_BZIP2 is not set
@@ -180,9 +183,11 @@ CONFIG_MMU=y
180# CONFIG_ARCH_INTEGRATOR is not set 183# CONFIG_ARCH_INTEGRATOR is not set
181# CONFIG_ARCH_REALVIEW is not set 184# CONFIG_ARCH_REALVIEW is not set
182# CONFIG_ARCH_VERSATILE is not set 185# CONFIG_ARCH_VERSATILE is not set
186# CONFIG_ARCH_VEXPRESS is not set
183# CONFIG_ARCH_AT91 is not set 187# CONFIG_ARCH_AT91 is not set
184# CONFIG_ARCH_BCMRING is not set 188# CONFIG_ARCH_BCMRING is not set
185# CONFIG_ARCH_CLPS711X is not set 189# CONFIG_ARCH_CLPS711X is not set
190# CONFIG_ARCH_CNS3XXX is not set
186# CONFIG_ARCH_GEMINI is not set 191# CONFIG_ARCH_GEMINI is not set
187# CONFIG_ARCH_EBSA110 is not set 192# CONFIG_ARCH_EBSA110 is not set
188# CONFIG_ARCH_EP93XX is not set 193# CONFIG_ARCH_EP93XX is not set
@@ -218,7 +223,7 @@ CONFIG_MMU=y
218# CONFIG_ARCH_S3C64XX is not set 223# CONFIG_ARCH_S3C64XX is not set
219# CONFIG_ARCH_S5P6440 is not set 224# CONFIG_ARCH_S5P6440 is not set
220# CONFIG_ARCH_S5P6442 is not set 225# CONFIG_ARCH_S5P6442 is not set
221# CONFIG_ARCH_S5PC1XX is not set 226# CONFIG_ARCH_S5PC100 is not set
222CONFIG_ARCH_S5PV210=y 227CONFIG_ARCH_S5PV210=y
223# CONFIG_ARCH_SHARK is not set 228# CONFIG_ARCH_SHARK is not set
224# CONFIG_ARCH_LH7A40X is not set 229# CONFIG_ARCH_LH7A40X is not set
@@ -227,6 +232,7 @@ CONFIG_ARCH_S5PV210=y
227# CONFIG_ARCH_NOMADIK is not set 232# CONFIG_ARCH_NOMADIK is not set
228# CONFIG_ARCH_DAVINCI is not set 233# CONFIG_ARCH_DAVINCI is not set
229# CONFIG_ARCH_OMAP is not set 234# CONFIG_ARCH_OMAP is not set
235# CONFIG_PLAT_SPEAR is not set
230CONFIG_PLAT_SAMSUNG=y 236CONFIG_PLAT_SAMSUNG=y
231 237
232# 238#
@@ -242,16 +248,22 @@ CONFIG_SAMSUNG_GPIOLIB_4BIT=y
242CONFIG_S3C_GPIO_CFG_S3C24XX=y 248CONFIG_S3C_GPIO_CFG_S3C24XX=y
243CONFIG_S3C_GPIO_CFG_S3C64XX=y 249CONFIG_S3C_GPIO_CFG_S3C64XX=y
244CONFIG_S3C_GPIO_PULL_UPDOWN=y 250CONFIG_S3C_GPIO_PULL_UPDOWN=y
251CONFIG_S5P_GPIO_DRVSTR=y
245CONFIG_SAMSUNG_GPIO_EXTRA=0 252CONFIG_SAMSUNG_GPIO_EXTRA=0
246CONFIG_S3C_GPIO_SPACE=0 253CONFIG_S3C_GPIO_SPACE=0
247CONFIG_S3C_GPIO_TRACK=y 254CONFIG_S3C_GPIO_TRACK=y
248# CONFIG_S3C_ADC is not set 255# CONFIG_S3C_ADC is not set
256CONFIG_S3C_DEV_WDT=y
257CONFIG_S3C_PL330_DMA=y
249 258
250# 259#
251# Power management 260# Power management
252# 261#
253CONFIG_PLAT_S5P=y 262CONFIG_PLAT_S5P=y
263CONFIG_S5P_EXT_INT=y
254CONFIG_CPU_S5PV210=y 264CONFIG_CPU_S5PV210=y
265# CONFIG_MACH_AQUILA is not set
266# CONFIG_MACH_GONI is not set
255# CONFIG_MACH_SMDKV210 is not set 267# CONFIG_MACH_SMDKV210 is not set
256CONFIG_MACH_SMDKC110=y 268CONFIG_MACH_SMDKC110=y
257 269
@@ -281,12 +293,14 @@ CONFIG_ARM_THUMB=y
281# CONFIG_CPU_BPREDICT_DISABLE is not set 293# CONFIG_CPU_BPREDICT_DISABLE is not set
282CONFIG_HAS_TLS_REG=y 294CONFIG_HAS_TLS_REG=y
283CONFIG_ARM_L1_CACHE_SHIFT=6 295CONFIG_ARM_L1_CACHE_SHIFT=6
296CONFIG_ARM_DMA_MEM_BUFFERABLE=y
284CONFIG_CPU_HAS_PMU=y 297CONFIG_CPU_HAS_PMU=y
285# CONFIG_ARM_ERRATA_430973 is not set 298# CONFIG_ARM_ERRATA_430973 is not set
286# CONFIG_ARM_ERRATA_458693 is not set 299# CONFIG_ARM_ERRATA_458693 is not set
287# CONFIG_ARM_ERRATA_460075 is not set 300# CONFIG_ARM_ERRATA_460075 is not set
288CONFIG_ARM_VIC=y 301CONFIG_ARM_VIC=y
289CONFIG_ARM_VIC_NR=2 302CONFIG_ARM_VIC_NR=2
303CONFIG_PL330=y
290 304
291# 305#
292# Bus support 306# Bus support
@@ -335,6 +349,7 @@ CONFIG_ALIGNMENT_TRAP=y
335CONFIG_ZBOOT_ROM_TEXT=0 349CONFIG_ZBOOT_ROM_TEXT=0
336CONFIG_ZBOOT_ROM_BSS=0 350CONFIG_ZBOOT_ROM_BSS=0
337CONFIG_CMDLINE="root=/dev/ram0 rw ramdisk=8192 initrd=0x20800000,8M console=ttySAC1,115200 init=/linuxrc" 351CONFIG_CMDLINE="root=/dev/ram0 rw ramdisk=8192 initrd=0x20800000,8M console=ttySAC1,115200 init=/linuxrc"
352# CONFIG_CMDLINE_FORCE is not set
338# CONFIG_XIP_KERNEL is not set 353# CONFIG_XIP_KERNEL is not set
339# CONFIG_KEXEC is not set 354# CONFIG_KEXEC is not set
340 355
@@ -481,6 +496,7 @@ CONFIG_INPUT_EVDEV=y
481CONFIG_INPUT_TOUCHSCREEN=y 496CONFIG_INPUT_TOUCHSCREEN=y
482# CONFIG_TOUCHSCREEN_AD7879 is not set 497# CONFIG_TOUCHSCREEN_AD7879 is not set
483# CONFIG_TOUCHSCREEN_DYNAPRO is not set 498# CONFIG_TOUCHSCREEN_DYNAPRO is not set
499# CONFIG_TOUCHSCREEN_HAMPSHIRE is not set
484# CONFIG_TOUCHSCREEN_FUJITSU is not set 500# CONFIG_TOUCHSCREEN_FUJITSU is not set
485# CONFIG_TOUCHSCREEN_GUNZE is not set 501# CONFIG_TOUCHSCREEN_GUNZE is not set
486# CONFIG_TOUCHSCREEN_ELO is not set 502# CONFIG_TOUCHSCREEN_ELO is not set
@@ -536,6 +552,8 @@ CONFIG_SERIAL_S5PV210=y
536CONFIG_SERIAL_CORE=y 552CONFIG_SERIAL_CORE=y
537CONFIG_SERIAL_CORE_CONSOLE=y 553CONFIG_SERIAL_CORE_CONSOLE=y
538# CONFIG_SERIAL_TIMBERDALE is not set 554# CONFIG_SERIAL_TIMBERDALE is not set
555# CONFIG_SERIAL_ALTERA_JTAGUART is not set
556# CONFIG_SERIAL_ALTERA_UART is not set
539CONFIG_UNIX98_PTYS=y 557CONFIG_UNIX98_PTYS=y
540# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set 558# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
541CONFIG_LEGACY_PTYS=y 559CONFIG_LEGACY_PTYS=y
@@ -583,6 +601,7 @@ CONFIG_GPIOLIB=y
583# CONFIG_HWMON is not set 601# CONFIG_HWMON is not set
584# CONFIG_THERMAL is not set 602# CONFIG_THERMAL is not set
585# CONFIG_WATCHDOG is not set 603# CONFIG_WATCHDOG is not set
604CONFIG_HAVE_S3C2410_WATCHDOG=y
586CONFIG_SSB_POSSIBLE=y 605CONFIG_SSB_POSSIBLE=y
587 606
588# 607#
@@ -635,10 +654,6 @@ CONFIG_RTC_LIB=y
635# CONFIG_DMADEVICES is not set 654# CONFIG_DMADEVICES is not set
636# CONFIG_AUXDISPLAY is not set 655# CONFIG_AUXDISPLAY is not set
637# CONFIG_UIO is not set 656# CONFIG_UIO is not set
638
639#
640# TI VLYNQ
641#
642# CONFIG_STAGING is not set 657# CONFIG_STAGING is not set
643 658
644# 659#
@@ -847,6 +862,8 @@ CONFIG_HAVE_FUNCTION_TRACER=y
847CONFIG_TRACING_SUPPORT=y 862CONFIG_TRACING_SUPPORT=y
848CONFIG_FTRACE=y 863CONFIG_FTRACE=y
849# CONFIG_FUNCTION_TRACER is not set 864# CONFIG_FUNCTION_TRACER is not set
865# CONFIG_IRQSOFF_TRACER is not set
866# CONFIG_PREEMPT_TRACER is not set
850# CONFIG_SCHED_TRACER is not set 867# CONFIG_SCHED_TRACER is not set
851# CONFIG_ENABLE_DEFAULT_TRACERS is not set 868# CONFIG_ENABLE_DEFAULT_TRACERS is not set
852# CONFIG_BOOT_TRACER is not set 869# CONFIG_BOOT_TRACER is not set
@@ -857,6 +874,7 @@ CONFIG_BRANCH_PROFILE_NONE=y
857# CONFIG_KMEMTRACE is not set 874# CONFIG_KMEMTRACE is not set
858# CONFIG_WORKQUEUE_TRACER is not set 875# CONFIG_WORKQUEUE_TRACER is not set
859# CONFIG_BLK_DEV_IO_TRACE is not set 876# CONFIG_BLK_DEV_IO_TRACE is not set
877# CONFIG_ATOMIC64_SELFTEST is not set
860# CONFIG_SAMPLES is not set 878# CONFIG_SAMPLES is not set
861CONFIG_HAVE_ARCH_KGDB=y 879CONFIG_HAVE_ARCH_KGDB=y
862# CONFIG_KGDB is not set 880# CONFIG_KGDB is not set
diff --git a/arch/arm/configs/s5pv210_defconfig b/arch/arm/configs/s5pv210_defconfig
index 6831dab97d96..e2f5bce29828 100644
--- a/arch/arm/configs/s5pv210_defconfig
+++ b/arch/arm/configs/s5pv210_defconfig
@@ -1,11 +1,13 @@
1# 1#
2# Automatically generated make config: don't edit 2# Automatically generated make config: don't edit
3# Linux kernel version: 2.6.34 3# Linux kernel version: 2.6.34
4# Sat May 22 03:18:22 2010 4# Wed May 26 19:04:39 2010
5# 5#
6CONFIG_ARM=y 6CONFIG_ARM=y
7CONFIG_SYS_SUPPORTS_APM_EMULATION=y 7CONFIG_SYS_SUPPORTS_APM_EMULATION=y
8CONFIG_GENERIC_GPIO=y 8CONFIG_GENERIC_GPIO=y
9CONFIG_GENERIC_TIME=y
10CONFIG_ARCH_USES_GETTIMEOFFSET=y
9CONFIG_HAVE_PROC_CPU=y 11CONFIG_HAVE_PROC_CPU=y
10CONFIG_NO_IOPORT=y 12CONFIG_NO_IOPORT=y
11CONFIG_GENERIC_HARDIRQS=y 13CONFIG_GENERIC_HARDIRQS=y
@@ -35,6 +37,7 @@ CONFIG_INIT_ENV_ARG_LIMIT=32
35CONFIG_LOCALVERSION="" 37CONFIG_LOCALVERSION=""
36CONFIG_LOCALVERSION_AUTO=y 38CONFIG_LOCALVERSION_AUTO=y
37CONFIG_HAVE_KERNEL_GZIP=y 39CONFIG_HAVE_KERNEL_GZIP=y
40CONFIG_HAVE_KERNEL_LZMA=y
38CONFIG_HAVE_KERNEL_LZO=y 41CONFIG_HAVE_KERNEL_LZO=y
39CONFIG_KERNEL_GZIP=y 42CONFIG_KERNEL_GZIP=y
40# CONFIG_KERNEL_BZIP2 is not set 43# CONFIG_KERNEL_BZIP2 is not set
@@ -180,9 +183,11 @@ CONFIG_MMU=y
180# CONFIG_ARCH_INTEGRATOR is not set 183# CONFIG_ARCH_INTEGRATOR is not set
181# CONFIG_ARCH_REALVIEW is not set 184# CONFIG_ARCH_REALVIEW is not set
182# CONFIG_ARCH_VERSATILE is not set 185# CONFIG_ARCH_VERSATILE is not set
186# CONFIG_ARCH_VEXPRESS is not set
183# CONFIG_ARCH_AT91 is not set 187# CONFIG_ARCH_AT91 is not set
184# CONFIG_ARCH_BCMRING is not set 188# CONFIG_ARCH_BCMRING is not set
185# CONFIG_ARCH_CLPS711X is not set 189# CONFIG_ARCH_CLPS711X is not set
190# CONFIG_ARCH_CNS3XXX is not set
186# CONFIG_ARCH_GEMINI is not set 191# CONFIG_ARCH_GEMINI is not set
187# CONFIG_ARCH_EBSA110 is not set 192# CONFIG_ARCH_EBSA110 is not set
188# CONFIG_ARCH_EP93XX is not set 193# CONFIG_ARCH_EP93XX is not set
@@ -218,7 +223,7 @@ CONFIG_MMU=y
218# CONFIG_ARCH_S3C64XX is not set 223# CONFIG_ARCH_S3C64XX is not set
219# CONFIG_ARCH_S5P6440 is not set 224# CONFIG_ARCH_S5P6440 is not set
220# CONFIG_ARCH_S5P6442 is not set 225# CONFIG_ARCH_S5P6442 is not set
221# CONFIG_ARCH_S5PC1XX is not set 226# CONFIG_ARCH_S5PC100 is not set
222CONFIG_ARCH_S5PV210=y 227CONFIG_ARCH_S5PV210=y
223# CONFIG_ARCH_SHARK is not set 228# CONFIG_ARCH_SHARK is not set
224# CONFIG_ARCH_LH7A40X is not set 229# CONFIG_ARCH_LH7A40X is not set
@@ -227,6 +232,7 @@ CONFIG_ARCH_S5PV210=y
227# CONFIG_ARCH_NOMADIK is not set 232# CONFIG_ARCH_NOMADIK is not set
228# CONFIG_ARCH_DAVINCI is not set 233# CONFIG_ARCH_DAVINCI is not set
229# CONFIG_ARCH_OMAP is not set 234# CONFIG_ARCH_OMAP is not set
235# CONFIG_PLAT_SPEAR is not set
230CONFIG_PLAT_SAMSUNG=y 236CONFIG_PLAT_SAMSUNG=y
231 237
232# 238#
@@ -242,16 +248,24 @@ CONFIG_SAMSUNG_GPIOLIB_4BIT=y
242CONFIG_S3C_GPIO_CFG_S3C24XX=y 248CONFIG_S3C_GPIO_CFG_S3C24XX=y
243CONFIG_S3C_GPIO_CFG_S3C64XX=y 249CONFIG_S3C_GPIO_CFG_S3C64XX=y
244CONFIG_S3C_GPIO_PULL_UPDOWN=y 250CONFIG_S3C_GPIO_PULL_UPDOWN=y
251CONFIG_S5P_GPIO_DRVSTR=y
245CONFIG_SAMSUNG_GPIO_EXTRA=0 252CONFIG_SAMSUNG_GPIO_EXTRA=0
246CONFIG_S3C_GPIO_SPACE=0 253CONFIG_S3C_GPIO_SPACE=0
247CONFIG_S3C_GPIO_TRACK=y 254CONFIG_S3C_GPIO_TRACK=y
248# CONFIG_S3C_ADC is not set 255# CONFIG_S3C_ADC is not set
256CONFIG_S3C_DEV_WDT=y
257CONFIG_SAMSUNG_DEV_ADC=y
258CONFIG_SAMSUNG_DEV_TS=y
259CONFIG_S3C_PL330_DMA=y
249 260
250# 261#
251# Power management 262# Power management
252# 263#
253CONFIG_PLAT_S5P=y 264CONFIG_PLAT_S5P=y
265CONFIG_S5P_EXT_INT=y
254CONFIG_CPU_S5PV210=y 266CONFIG_CPU_S5PV210=y
267# CONFIG_MACH_AQUILA is not set
268# CONFIG_MACH_GONI is not set
255CONFIG_MACH_SMDKV210=y 269CONFIG_MACH_SMDKV210=y
256# CONFIG_MACH_SMDKC110 is not set 270# CONFIG_MACH_SMDKC110 is not set
257 271
@@ -281,12 +295,14 @@ CONFIG_ARM_THUMB=y
281# CONFIG_CPU_BPREDICT_DISABLE is not set 295# CONFIG_CPU_BPREDICT_DISABLE is not set
282CONFIG_HAS_TLS_REG=y 296CONFIG_HAS_TLS_REG=y
283CONFIG_ARM_L1_CACHE_SHIFT=6 297CONFIG_ARM_L1_CACHE_SHIFT=6
298CONFIG_ARM_DMA_MEM_BUFFERABLE=y
284CONFIG_CPU_HAS_PMU=y 299CONFIG_CPU_HAS_PMU=y
285# CONFIG_ARM_ERRATA_430973 is not set 300# CONFIG_ARM_ERRATA_430973 is not set
286# CONFIG_ARM_ERRATA_458693 is not set 301# CONFIG_ARM_ERRATA_458693 is not set
287# CONFIG_ARM_ERRATA_460075 is not set 302# CONFIG_ARM_ERRATA_460075 is not set
288CONFIG_ARM_VIC=y 303CONFIG_ARM_VIC=y
289CONFIG_ARM_VIC_NR=2 304CONFIG_ARM_VIC_NR=2
305CONFIG_PL330=y
290 306
291# 307#
292# Bus support 308# Bus support
@@ -335,6 +351,7 @@ CONFIG_ALIGNMENT_TRAP=y
335CONFIG_ZBOOT_ROM_TEXT=0 351CONFIG_ZBOOT_ROM_TEXT=0
336CONFIG_ZBOOT_ROM_BSS=0 352CONFIG_ZBOOT_ROM_BSS=0
337CONFIG_CMDLINE="root=/dev/ram0 rw ramdisk=8192 initrd=0x20800000,8M console=ttySAC1,115200 init=/linuxrc" 353CONFIG_CMDLINE="root=/dev/ram0 rw ramdisk=8192 initrd=0x20800000,8M console=ttySAC1,115200 init=/linuxrc"
354# CONFIG_CMDLINE_FORCE is not set
338# CONFIG_XIP_KERNEL is not set 355# CONFIG_XIP_KERNEL is not set
339# CONFIG_KEXEC is not set 356# CONFIG_KEXEC is not set
340 357
@@ -481,7 +498,9 @@ CONFIG_INPUT_EVDEV=y
481CONFIG_INPUT_TOUCHSCREEN=y 498CONFIG_INPUT_TOUCHSCREEN=y
482# CONFIG_TOUCHSCREEN_AD7879 is not set 499# CONFIG_TOUCHSCREEN_AD7879 is not set
483# CONFIG_TOUCHSCREEN_DYNAPRO is not set 500# CONFIG_TOUCHSCREEN_DYNAPRO is not set
501# CONFIG_TOUCHSCREEN_HAMPSHIRE is not set
484# CONFIG_TOUCHSCREEN_FUJITSU is not set 502# CONFIG_TOUCHSCREEN_FUJITSU is not set
503# CONFIG_TOUCHSCREEN_S3C2410 is not set
485# CONFIG_TOUCHSCREEN_GUNZE is not set 504# CONFIG_TOUCHSCREEN_GUNZE is not set
486# CONFIG_TOUCHSCREEN_ELO is not set 505# CONFIG_TOUCHSCREEN_ELO is not set
487# CONFIG_TOUCHSCREEN_WACOM_W8001 is not set 506# CONFIG_TOUCHSCREEN_WACOM_W8001 is not set
@@ -536,6 +555,8 @@ CONFIG_SERIAL_S5PV210=y
536CONFIG_SERIAL_CORE=y 555CONFIG_SERIAL_CORE=y
537CONFIG_SERIAL_CORE_CONSOLE=y 556CONFIG_SERIAL_CORE_CONSOLE=y
538# CONFIG_SERIAL_TIMBERDALE is not set 557# CONFIG_SERIAL_TIMBERDALE is not set
558# CONFIG_SERIAL_ALTERA_JTAGUART is not set
559# CONFIG_SERIAL_ALTERA_UART is not set
539CONFIG_UNIX98_PTYS=y 560CONFIG_UNIX98_PTYS=y
540# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set 561# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
541CONFIG_LEGACY_PTYS=y 562CONFIG_LEGACY_PTYS=y
@@ -583,6 +604,7 @@ CONFIG_GPIOLIB=y
583# CONFIG_HWMON is not set 604# CONFIG_HWMON is not set
584# CONFIG_THERMAL is not set 605# CONFIG_THERMAL is not set
585# CONFIG_WATCHDOG is not set 606# CONFIG_WATCHDOG is not set
607CONFIG_HAVE_S3C2410_WATCHDOG=y
586CONFIG_SSB_POSSIBLE=y 608CONFIG_SSB_POSSIBLE=y
587 609
588# 610#
@@ -635,10 +657,6 @@ CONFIG_RTC_LIB=y
635# CONFIG_DMADEVICES is not set 657# CONFIG_DMADEVICES is not set
636# CONFIG_AUXDISPLAY is not set 658# CONFIG_AUXDISPLAY is not set
637# CONFIG_UIO is not set 659# CONFIG_UIO is not set
638
639#
640# TI VLYNQ
641#
642# CONFIG_STAGING is not set 660# CONFIG_STAGING is not set
643 661
644# 662#
@@ -847,6 +865,8 @@ CONFIG_HAVE_FUNCTION_TRACER=y
847CONFIG_TRACING_SUPPORT=y 865CONFIG_TRACING_SUPPORT=y
848CONFIG_FTRACE=y 866CONFIG_FTRACE=y
849# CONFIG_FUNCTION_TRACER is not set 867# CONFIG_FUNCTION_TRACER is not set
868# CONFIG_IRQSOFF_TRACER is not set
869# CONFIG_PREEMPT_TRACER is not set
850# CONFIG_SCHED_TRACER is not set 870# CONFIG_SCHED_TRACER is not set
851# CONFIG_ENABLE_DEFAULT_TRACERS is not set 871# CONFIG_ENABLE_DEFAULT_TRACERS is not set
852# CONFIG_BOOT_TRACER is not set 872# CONFIG_BOOT_TRACER is not set
@@ -857,6 +877,7 @@ CONFIG_BRANCH_PROFILE_NONE=y
857# CONFIG_KMEMTRACE is not set 877# CONFIG_KMEMTRACE is not set
858# CONFIG_WORKQUEUE_TRACER is not set 878# CONFIG_WORKQUEUE_TRACER is not set
859# CONFIG_BLK_DEV_IO_TRACE is not set 879# CONFIG_BLK_DEV_IO_TRACE is not set
880# CONFIG_ATOMIC64_SELFTEST is not set
860# CONFIG_SAMPLES is not set 881# CONFIG_SAMPLES is not set
861CONFIG_HAVE_ARCH_KGDB=y 882CONFIG_HAVE_ARCH_KGDB=y
862# CONFIG_KGDB is not set 883# CONFIG_KGDB is not set
diff --git a/arch/arm/include/asm/scatterlist.h b/arch/arm/include/asm/scatterlist.h
index bcda59f39941..2f87870d9347 100644
--- a/arch/arm/include/asm/scatterlist.h
+++ b/arch/arm/include/asm/scatterlist.h
@@ -3,9 +3,6 @@
3 3
4#include <asm/memory.h> 4#include <asm/memory.h>
5#include <asm/types.h> 5#include <asm/types.h>
6
7#include <asm-generic/scatterlist.h> 6#include <asm-generic/scatterlist.h>
8 7
9#undef ARCH_HAS_SG_CHAIN
10
11#endif /* _ASMARM_SCATTERLIST_H */ 8#endif /* _ASMARM_SCATTERLIST_H */
diff --git a/arch/arm/mach-davinci/board-da850-evm.c b/arch/arm/mach-davinci/board-da850-evm.c
index abd04932917b..2ec3095ffb7b 100644
--- a/arch/arm/mach-davinci/board-da850-evm.c
+++ b/arch/arm/mach-davinci/board-da850-evm.c
@@ -17,6 +17,7 @@
17#include <linux/i2c.h> 17#include <linux/i2c.h>
18#include <linux/i2c/at24.h> 18#include <linux/i2c/at24.h>
19#include <linux/i2c/pca953x.h> 19#include <linux/i2c/pca953x.h>
20#include <linux/mfd/tps6507x.h>
20#include <linux/gpio.h> 21#include <linux/gpio.h>
21#include <linux/platform_device.h> 22#include <linux/platform_device.h>
22#include <linux/mtd/mtd.h> 23#include <linux/mtd/mtd.h>
@@ -24,6 +25,8 @@
24#include <linux/mtd/partitions.h> 25#include <linux/mtd/partitions.h>
25#include <linux/mtd/physmap.h> 26#include <linux/mtd/physmap.h>
26#include <linux/regulator/machine.h> 27#include <linux/regulator/machine.h>
28#include <linux/mfd/tps6507x.h>
29#include <linux/input/tps6507x-ts.h>
27 30
28#include <asm/mach-types.h> 31#include <asm/mach-types.h>
29#include <asm/mach/arch.h> 32#include <asm/mach/arch.h>
@@ -533,10 +536,24 @@ struct regulator_init_data tps65070_regulator_data[] = {
533 }, 536 },
534}; 537};
535 538
539static struct touchscreen_init_data tps6507x_touchscreen_data = {
540 .poll_period = 30, /* ms between touch samples */
541 .min_pressure = 0x30, /* minimum pressure to trigger touch */
542 .vref = 0, /* turn off vref when not using A/D */
543 .vendor = 0, /* /sys/class/input/input?/id/vendor */
544 .product = 65070, /* /sys/class/input/input?/id/product */
545 .version = 0x100, /* /sys/class/input/input?/id/version */
546};
547
548static struct tps6507x_board tps_board = {
549 .tps6507x_pmic_init_data = &tps65070_regulator_data[0],
550 .tps6507x_ts_init_data = &tps6507x_touchscreen_data,
551};
552
536static struct i2c_board_info __initdata da850evm_tps65070_info[] = { 553static struct i2c_board_info __initdata da850evm_tps65070_info[] = {
537 { 554 {
538 I2C_BOARD_INFO("tps6507x", 0x48), 555 I2C_BOARD_INFO("tps6507x", 0x48),
539 .platform_data = &tps65070_regulator_data[0], 556 .platform_data = &tps_board,
540 }, 557 },
541}; 558};
542 559
diff --git a/arch/arm/mach-davinci/include/mach/mmc.h b/arch/arm/mach-davinci/include/mach/mmc.h
index 5a85e24f3673..d4f1e9675069 100644
--- a/arch/arm/mach-davinci/include/mach/mmc.h
+++ b/arch/arm/mach-davinci/include/mach/mmc.h
@@ -22,6 +22,9 @@ struct davinci_mmc_config {
22 22
23 /* Version of the MMC/SD controller */ 23 /* Version of the MMC/SD controller */
24 u8 version; 24 u8 version;
25
26 /* Number of sg segments */
27 u8 nr_sg;
25}; 28};
26void davinci_setup_mmc(int module, struct davinci_mmc_config *config); 29void davinci_setup_mmc(int module, struct davinci_mmc_config *config);
27 30
diff --git a/arch/arm/mach-mx2/Kconfig b/arch/arm/mach-imx/Kconfig
index 3f756f4ad050..c5c0369bb481 100644
--- a/arch/arm/mach-mx2/Kconfig
+++ b/arch/arm/mach-imx/Kconfig
@@ -1,42 +1,103 @@
1config IMX_HAVE_DMA_V1
2 bool
3
4if ARCH_MX1
5
6config SOC_IMX1
7 select CPU_ARM920T
8 select IMX_HAVE_DMA_V1
9 select IMX_HAVE_IOMUX_V1
10 bool
11
12comment "MX1 platforms:"
13config MACH_MXLADS
14 bool
15
16config ARCH_MX1ADS
17 bool "MX1ADS platform"
18 select MACH_MXLADS
19 select IMX_HAVE_PLATFORM_IMX_I2C
20 select IMX_HAVE_PLATFORM_IMX_UART
21 help
22 Say Y here if you are using Motorola MX1ADS/MXLADS boards
23
24config MACH_SCB9328
25 bool "Synertronixx scb9328"
26 select IMX_HAVE_PLATFORM_IMX_UART
27 help
28 Say Y here if you are using a Synertronixx scb9328 board
29
30endif
31
1if ARCH_MX2 32if ARCH_MX2
2 33
34config SOC_IMX21
35 select CPU_ARM926T
36 select ARCH_MXC_AUDMUX_V1
37 select IMX_HAVE_DMA_V1
38 select IMX_HAVE_IOMUX_V1
39 bool
40
41config SOC_IMX27
42 select CPU_ARM926T
43 select ARCH_MXC_AUDMUX_V1
44 select IMX_HAVE_DMA_V1
45 select IMX_HAVE_IOMUX_V1
46 bool
47
3choice 48choice
4 prompt "CPUs:" 49 prompt "CPUs:"
5 default MACH_MX21 50 default MACH_MX21
6 51
7config MACH_MX21 52config MACH_MX21
8 bool "i.MX21 support" 53 bool "i.MX21 support"
9 select ARCH_MXC_AUDMUX_V1 54 select SOC_IMX21
10 help 55 help
11 This enables support for Freescale's MX2 based i.MX21 processor. 56 This enables support for Freescale's MX2 based i.MX21 processor.
12 57
13config MACH_MX27 58config MACH_MX27
14 bool "i.MX27 support" 59 bool "i.MX27 support"
15 select ARCH_MXC_AUDMUX_V1 60 select SOC_IMX27
16 help 61 help
17 This enables support for Freescale's MX2 based i.MX27 processor. 62 This enables support for Freescale's MX2 based i.MX27 processor.
18 63
19endchoice 64endchoice
20 65
21comment "MX2 platforms:" 66endif
67
68if MACH_MX21
69
70comment "MX21 platforms:"
22 71
23config MACH_MX21ADS 72config MACH_MX21ADS
24 bool "MX21ADS platform" 73 bool "MX21ADS platform"
25 depends on MACH_MX21 74 select IMX_HAVE_PLATFORM_IMX_UART
75 select IMX_HAVE_PLATFORM_MXC_NAND
26 help 76 help
27 Include support for MX21ADS platform. This includes specific 77 Include support for MX21ADS platform. This includes specific
28 configurations for the board and its peripherals. 78 configurations for the board and its peripherals.
29 79
80endif
81
82if MACH_MX27
83
84comment "MX27 platforms:"
85
30config MACH_MX27ADS 86config MACH_MX27ADS
31 bool "MX27ADS platform" 87 bool "MX27ADS platform"
32 depends on MACH_MX27 88 select IMX_HAVE_PLATFORM_IMX_I2C
89 select IMX_HAVE_PLATFORM_IMX_UART
90 select IMX_HAVE_PLATFORM_MXC_NAND
33 help 91 help
34 Include support for MX27ADS platform. This includes specific 92 Include support for MX27ADS platform. This includes specific
35 configurations for the board and its peripherals. 93 configurations for the board and its peripherals.
36 94
37config MACH_PCM038 95config MACH_PCM038
38 bool "Phytec phyCORE-i.MX27 CPU module (pcm038)" 96 bool "Phytec phyCORE-i.MX27 CPU module (pcm038)"
39 depends on MACH_MX27 97 select IMX_HAVE_PLATFORM_IMX_I2C
98 select IMX_HAVE_PLATFORM_IMX_UART
99 select IMX_HAVE_PLATFORM_MXC_NAND
100 select IMX_HAVE_PLATFORM_SPI_IMX
40 select MXC_ULPI if USB_ULPI 101 select MXC_ULPI if USB_ULPI
41 help 102 help
42 Include support for phyCORE-i.MX27 (aka pcm038) platform. This 103 Include support for phyCORE-i.MX27 (aka pcm038) platform. This
@@ -58,8 +119,9 @@ endchoice
58 119
59config MACH_CPUIMX27 120config MACH_CPUIMX27
60 bool "Eukrea CPUIMX27 module" 121 bool "Eukrea CPUIMX27 module"
61 depends on MACH_MX27 122 select IMX_HAVE_PLATFORM_IMX_I2C
62 select MXC_ULPI if USB_ULPI 123 select IMX_HAVE_PLATFORM_IMX_UART
124 select IMX_HAVE_PLATFORM_MXC_NAND
63 help 125 help
64 Include support for Eukrea CPUIMX27 platform. This includes 126 Include support for Eukrea CPUIMX27 platform. This includes
65 specific configurations for the module and its peripherals. 127 specific configurations for the module and its peripherals.
@@ -86,6 +148,8 @@ choice
86config MACH_EUKREA_MBIMX27_BASEBOARD 148config MACH_EUKREA_MBIMX27_BASEBOARD
87 prompt "Eukrea MBIMX27 development board" 149 prompt "Eukrea MBIMX27 development board"
88 bool 150 bool
151 select IMX_HAVE_PLATFORM_IMX_UART
152 select IMX_HAVE_PLATFORM_SPI_IMX
89 help 153 help
90 This adds board specific devices that can be found on Eukrea's 154 This adds board specific devices that can be found on Eukrea's
91 MBIMX27 evaluation board. 155 MBIMX27 evaluation board.
@@ -94,21 +158,24 @@ endchoice
94 158
95config MACH_MX27_3DS 159config MACH_MX27_3DS
96 bool "MX27PDK platform" 160 bool "MX27PDK platform"
97 depends on MACH_MX27 161 select IMX_HAVE_PLATFORM_IMX_UART
98 help 162 help
99 Include support for MX27PDK platform. This includes specific 163 Include support for MX27PDK platform. This includes specific
100 configurations for the board and its peripherals. 164 configurations for the board and its peripherals.
101 165
102config MACH_IMX27LITE 166config MACH_IMX27LITE
103 bool "LogicPD MX27 LITEKIT platform" 167 bool "LogicPD MX27 LITEKIT platform"
104 depends on MACH_MX27 168 select IMX_HAVE_PLATFORM_IMX_UART
105 help 169 help
106 Include support for MX27 LITEKIT platform. This includes specific 170 Include support for MX27 LITEKIT platform. This includes specific
107 configurations for the board and its peripherals. 171 configurations for the board and its peripherals.
108 172
109config MACH_PCA100 173config MACH_PCA100
110 bool "Phytec phyCARD-s (pca100)" 174 bool "Phytec phyCARD-s (pca100)"
111 depends on MACH_MX27 175 select IMX_HAVE_PLATFORM_IMX_I2C
176 select IMX_HAVE_PLATFORM_IMX_UART
177 select IMX_HAVE_PLATFORM_MXC_NAND
178 select IMX_HAVE_PLATFORM_SPI_IMX
112 select MXC_ULPI if USB_ULPI 179 select MXC_ULPI if USB_ULPI
113 help 180 help
114 Include support for phyCARD-s (aka pca100) platform. This 181 Include support for phyCARD-s (aka pca100) platform. This
@@ -116,7 +183,9 @@ config MACH_PCA100
116 183
117config MACH_MXT_TD60 184config MACH_MXT_TD60
118 bool "Maxtrack i-MXT TD60" 185 bool "Maxtrack i-MXT TD60"
119 depends on MACH_MX27 186 select IMX_HAVE_PLATFORM_IMX_I2C
187 select IMX_HAVE_PLATFORM_IMX_UART
188 select IMX_HAVE_PLATFORM_MXC_NAND
120 help 189 help
121 Include support for i-MXT (aka td60) platform. This 190 Include support for i-MXT (aka td60) platform. This
122 includes specific configurations for the module and its peripherals. 191 includes specific configurations for the module and its peripherals.
diff --git a/arch/arm/mach-mx2/Makefile b/arch/arm/mach-imx/Makefile
index 27d496c3e5cb..46a9fdfbbd15 100644
--- a/arch/arm/mach-mx2/Makefile
+++ b/arch/arm/mach-imx/Makefile
@@ -4,14 +4,24 @@
4 4
5# Object file lists. 5# Object file lists.
6 6
7obj-y := devices.o serial.o 7obj-y := devices.o
8 8
9obj-$(CONFIG_MACH_MX21) += clock_imx21.o mm-imx21.o 9obj-$(CONFIG_IMX_HAVE_DMA_V1) += dma-v1.o
10 10
11obj-$(CONFIG_MACH_MX27) += cpu_imx27.o pm-imx27.o 11obj-$(CONFIG_ARCH_MX1) += clock-imx1.o mm-imx1.o
12obj-$(CONFIG_MACH_MX27) += clock_imx27.o mm-imx27.o 12obj-$(CONFIG_MACH_MX21) += clock-imx21.o mm-imx21.o
13
14obj-$(CONFIG_MACH_MX27) += cpu-imx27.o pm-imx27.o
15obj-$(CONFIG_MACH_MX27) += clock-imx27.o mm-imx27.o
16
17# Support for CMOS sensor interface
18obj-$(CONFIG_MX1_VIDEO) += mx1-camera-fiq.o mx1-camera-fiq-ksym.o
19
20obj-$(CONFIG_ARCH_MX1ADS) += mach-mx1ads.o
21obj-$(CONFIG_MACH_SCB9328) += mach-scb9328.o
13 22
14obj-$(CONFIG_MACH_MX21ADS) += mach-mx21ads.o 23obj-$(CONFIG_MACH_MX21ADS) += mach-mx21ads.o
24
15obj-$(CONFIG_MACH_MX27ADS) += mach-mx27ads.o 25obj-$(CONFIG_MACH_MX27ADS) += mach-mx27ads.o
16obj-$(CONFIG_MACH_PCM038) += mach-pcm038.o 26obj-$(CONFIG_MACH_PCM038) += mach-pcm038.o
17obj-$(CONFIG_MACH_PCM970_BASEBOARD) += pcm970-baseboard.o 27obj-$(CONFIG_MACH_PCM970_BASEBOARD) += pcm970-baseboard.o
diff --git a/arch/arm/mach-mx2/Makefile.boot b/arch/arm/mach-imx/Makefile.boot
index e867398a8fdb..7988a85cf07d 100644
--- a/arch/arm/mach-mx2/Makefile.boot
+++ b/arch/arm/mach-imx/Makefile.boot
@@ -1,3 +1,7 @@
1zreladdr-$(CONFIG_ARCH_MX1) := 0x08008000
2params_phys-$(CONFIG_ARCH_MX1) := 0x08000100
3initrd_phys-$(CONFIG_ARCH_MX1) := 0x08800000
4
1zreladdr-$(CONFIG_MACH_MX21) := 0xC0008000 5zreladdr-$(CONFIG_MACH_MX21) := 0xC0008000
2params_phys-$(CONFIG_MACH_MX21) := 0xC0000100 6params_phys-$(CONFIG_MACH_MX21) := 0xC0000100
3initrd_phys-$(CONFIG_MACH_MX21) := 0xC0800000 7initrd_phys-$(CONFIG_MACH_MX21) := 0xC0800000
diff --git a/arch/arm/mach-mx1/clock.c b/arch/arm/mach-imx/clock-imx1.c
index 6cf2d4a7511d..c05096c38301 100644
--- a/arch/arm/mach-mx1/clock.c
+++ b/arch/arm/mach-imx/clock-imx1.c
@@ -2,18 +2,17 @@
2 * Copyright (C) 2008 Sascha Hauer <s.hauer@pengutronix.de>, Pengutronix 2 * Copyright (C) 2008 Sascha Hauer <s.hauer@pengutronix.de>, Pengutronix
3 * 3 *
4 * This program is free software; you can redistribute it and/or modify 4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by 5 * it under the terms of the GNU General Public License version 2 as
6 * the Free Software Foundation; either version 2 of the License, or 6 * published by the Free Software Foundation.
7 * (at your option) any later version.
8 * 7 *
9 * This program is distributed in the hope that it will be useful, 8 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details. 11 * GNU General Public License for more details.
13 * 12 *
14 * You should have received a copy of the GNU General Public License 13 * You should have received a copy of the GNU General Public License along
15 * along with this program; if not, write to the Free Software 14 * with this program; if not, write to the Free Software Foundation, Inc.,
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 15 * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
17 */ 16 */
18 17
19#include <linux/kernel.h> 18#include <linux/kernel.h>
@@ -29,7 +28,41 @@
29#include <mach/clock.h> 28#include <mach/clock.h>
30#include <mach/hardware.h> 29#include <mach/hardware.h>
31#include <mach/common.h> 30#include <mach/common.h>
32#include "crm_regs.h" 31
32#define IO_ADDR_CCM(off) (MX1_IO_ADDRESS(MX1_CCM_BASE_ADDR + (off)))
33
34/* CCM register addresses */
35#define CCM_CSCR IO_ADDR_CCM(0x0)
36#define CCM_MPCTL0 IO_ADDR_CCM(0x4)
37#define CCM_SPCTL0 IO_ADDR_CCM(0xc)
38#define CCM_PCDR IO_ADDR_CCM(0x20)
39
40#define CCM_CSCR_CLKO_OFFSET 29
41#define CCM_CSCR_CLKO_MASK (0x7 << 29)
42#define CCM_CSCR_USB_OFFSET 26
43#define CCM_CSCR_USB_MASK (0x7 << 26)
44#define CCM_CSCR_OSC_EN_SHIFT 17
45#define CCM_CSCR_SYSTEM_SEL (1 << 16)
46#define CCM_CSCR_BCLK_OFFSET 10
47#define CCM_CSCR_BCLK_MASK (0xf << 10)
48#define CCM_CSCR_PRESC (1 << 15)
49
50#define CCM_PCDR_PCLK3_OFFSET 16
51#define CCM_PCDR_PCLK3_MASK (0x7f << 16)
52#define CCM_PCDR_PCLK2_OFFSET 4
53#define CCM_PCDR_PCLK2_MASK (0xf << 4)
54#define CCM_PCDR_PCLK1_OFFSET 0
55#define CCM_PCDR_PCLK1_MASK 0xf
56
57#define IO_ADDR_SCM(off) (MX1_IO_ADDRESS(MX1_SCM_BASE_ADDR + (off)))
58
59/* SCM register addresses */
60#define SCM_GCCR IO_ADDR_SCM(0xc)
61
62#define SCM_GCCR_DMA_CLK_EN_OFFSET 3
63#define SCM_GCCR_CSI_CLK_EN_OFFSET 2
64#define SCM_GCCR_MMA_CLK_EN_OFFSET 1
65#define SCM_GCCR_USBD_CLK_EN_OFFSET 0
33 66
34static int _clk_enable(struct clk *clk) 67static int _clk_enable(struct clk *clk)
35{ 68{
@@ -596,7 +629,8 @@ int __init mx1_clocks_init(unsigned long fref)
596 clk_enable(&hclk); 629 clk_enable(&hclk);
597 clk_enable(&fclk); 630 clk_enable(&fclk);
598 631
599 mxc_timer_init(&gpt_clk, IO_ADDRESS(TIM1_BASE_ADDR), TIM1_INT); 632 mxc_timer_init(&gpt_clk, MX1_IO_ADDRESS(MX1_TIM1_BASE_ADDR),
633 MX1_TIM1_INT);
600 634
601 return 0; 635 return 0;
602} 636}
diff --git a/arch/arm/mach-mx2/clock_imx21.c b/arch/arm/mach-imx/clock-imx21.c
index bb419ef4d133..bb419ef4d133 100644
--- a/arch/arm/mach-mx2/clock_imx21.c
+++ b/arch/arm/mach-imx/clock-imx21.c
diff --git a/arch/arm/mach-mx2/clock_imx27.c b/arch/arm/mach-imx/clock-imx27.c
index 5a1aa15c8a16..5a1aa15c8a16 100644
--- a/arch/arm/mach-mx2/clock_imx27.c
+++ b/arch/arm/mach-imx/clock-imx27.c
diff --git a/arch/arm/mach-mx2/cpu_imx27.c b/arch/arm/mach-imx/cpu-imx27.c
index d8d3b2d84dc5..d8d3b2d84dc5 100644
--- a/arch/arm/mach-mx2/cpu_imx27.c
+++ b/arch/arm/mach-imx/cpu-imx27.c
diff --git a/arch/arm/mach-imx/devices-imx1.h b/arch/arm/mach-imx/devices-imx1.h
new file mode 100644
index 000000000000..a8d94f078196
--- /dev/null
+++ b/arch/arm/mach-imx/devices-imx1.h
@@ -0,0 +1,18 @@
1/*
2 * Copyright (C) 2010 Pengutronix
3 * Uwe Kleine-Koenig <u.kleine-koenig@pengutronix.de>
4 *
5 * This program is free software; you can redistribute it and/or modify it under
6 * the terms of the GNU General Public License version 2 as published by the
7 * Free Software Foundation.
8 */
9#include <mach/mx1.h>
10#include <mach/devices-common.h>
11
12#define imx1_add_i2c_imx(pdata) \
13 imx_add_imx_i2c(0, MX1_I2C_BASE_ADDR, SZ_4K, MX1_INT_I2C, pdata)
14
15#define imx1_add_imx_uart0(pdata) \
16 imx_add_imx_uart_3irq(0, MX1_UART1_BASE_ADDR, 0xd0, MX1_INT_UART1RX, MX1_INT_UART1TX, MX1_INT_UART1RTS, pdata)
17#define imx1_add_imx_uart1(pdata) \
18 imx_add_imx_uart_3irq(0, MX1_UART2_BASE_ADDR, 0xd0, MX1_INT_UART2RX, MX1_INT_UART2TX, MX1_INT_UART2RTS, pdata)
diff --git a/arch/arm/mach-imx/devices-imx21.h b/arch/arm/mach-imx/devices-imx21.h
new file mode 100644
index 000000000000..42788e99d127
--- /dev/null
+++ b/arch/arm/mach-imx/devices-imx21.h
@@ -0,0 +1,30 @@
1/*
2 * Copyright (C) 2010 Pengutronix
3 * Uwe Kleine-Koenig <u.kleine-koenig@pengutronix.de>
4 *
5 * This program is free software; you can redistribute it and/or modify it under
6 * the terms of the GNU General Public License version 2 as published by the
7 * Free Software Foundation.
8 */
9#include <mach/mx21.h>
10#include <mach/devices-common.h>
11
12#define imx21_add_i2c_imx(pdata) \
13 imx_add_imx_i2c(0, MX2x_I2C_BASE_ADDR, SZ_4K, MX2x_INT_I2C, pdata)
14
15#define imx21_add_imx_uart0(pdata) \
16 imx_add_imx_uart_1irq(0, MX21_UART1_BASE_ADDR, SZ_4K, MX21_INT_UART1, pdata)
17#define imx21_add_imx_uart1(pdata) \
18 imx_add_imx_uart_1irq(1, MX21_UART2_BASE_ADDR, SZ_4K, MX21_INT_UART2, pdata)
19#define imx21_add_imx_uart2(pdata) \
20 imx_add_imx_uart_1irq(2, MX21_UART3_BASE_ADDR, SZ_4K, MX21_INT_UART3, pdata)
21#define imx21_add_imx_uart3(pdata) \
22 imx_add_imx_uart_1irq(3, MX21_UART4_BASE_ADDR, SZ_4K, MX21_INT_UART4, pdata)
23
24#define imx21_add_mxc_nand(pdata) \
25 imx_add_mxc_nand_v1(MX21_NFC_BASE_ADDR, MX21_INT_NANDFC, pdata)
26
27#define imx21_add_spi_imx0(pdata) \
28 imx_add_spi_imx(0, MX21_CSPI1_BASE_ADDR, SZ_4K, MX21_INT_CSPI1, pdata)
29#define imx21_add_spi_imx1(pdata) \
30 imx_add_spi_imx(1, MX21_CSPI2_BASE_ADDR, SZ_4K, MX21_INT_CSPI2, pdata)
diff --git a/arch/arm/mach-imx/devices-imx27.h b/arch/arm/mach-imx/devices-imx27.h
new file mode 100644
index 000000000000..65e7bb7ec2e8
--- /dev/null
+++ b/arch/arm/mach-imx/devices-imx27.h
@@ -0,0 +1,38 @@
1/*
2 * Copyright (C) 2010 Pengutronix
3 * Uwe Kleine-Koenig <u.kleine-koenig@pengutronix.de>
4 *
5 * This program is free software; you can redistribute it and/or modify it under
6 * the terms of the GNU General Public License version 2 as published by the
7 * Free Software Foundation.
8 */
9#include <mach/mx27.h>
10#include <mach/devices-common.h>
11
12#define imx27_add_i2c_imx0(pdata) \
13 imx_add_imx_i2c(0, MX27_I2C1_BASE_ADDR, SZ_4K, MX27_INT_I2C1, pdata)
14#define imx27_add_i2c_imx1(pdata) \
15 imx_add_imx_i2c(1, MX27_I2C2_BASE_ADDR, SZ_4K, MX27_INT_I2C2, pdata)
16
17#define imx27_add_imx_uart0(pdata) \
18 imx_add_imx_uart_1irq(0, MX27_UART1_BASE_ADDR, SZ_4K, MX27_INT_UART1, pdata)
19#define imx27_add_imx_uart1(pdata) \
20 imx_add_imx_uart_1irq(1, MX27_UART2_BASE_ADDR, SZ_4K, MX27_INT_UART2, pdata)
21#define imx27_add_imx_uart2(pdata) \
22 imx_add_imx_uart_1irq(2, MX27_UART3_BASE_ADDR, SZ_4K, MX27_INT_UART3, pdata)
23#define imx27_add_imx_uart3(pdata) \
24 imx_add_imx_uart_1irq(3, MX27_UART4_BASE_ADDR, SZ_4K, MX27_INT_UART4, pdata)
25#define imx27_add_imx_uart4(pdata) \
26 imx_add_imx_uart_1irq(4, MX27_UART5_BASE_ADDR, SZ_4K, MX27_INT_UART5, pdata)
27#define imx27_add_imx_uart5(pdata) \
28 imx_add_imx_uart_1irq(5, MX27_UART6_BASE_ADDR, SZ_4K, MX27_INT_UART6, pdata)
29
30#define imx27_add_mxc_nand(pdata) \
31 imx_add_mxc_nand_v1(MX27_NFC_BASE_ADDR, MX27_INT_NANDFC, pdata)
32
33#define imx27_add_spi_imx0(pdata) \
34 imx_add_spi_imx(0, MX27_CSPI1_BASE_ADDR, SZ_4K, MX27_INT_CSPI1, pdata)
35#define imx27_add_spi_imx1(pdata) \
36 imx_add_spi_imx(1, MX27_CSPI2_BASE_ADDR, SZ_4K, MX27_INT_CSPI2, pdata)
37#define imx27_add_spi_imx2(pdata) \
38 imx_add_spi_imx(2, MX27_CSPI3_BASE_ADDR, SZ_4K, MX27_INT_CSPI3, pdata)
diff --git a/arch/arm/mach-mx2/devices.c b/arch/arm/mach-imx/devices.c
index 28caa21cb56e..9c271a752b84 100644
--- a/arch/arm/mach-mx2/devices.c
+++ b/arch/arm/mach-imx/devices.c
@@ -11,6 +11,9 @@
11 * 11 *
12 * Copyright 2006-2007 Freescale Semiconductor, Inc. All Rights Reserved. 12 * Copyright 2006-2007 Freescale Semiconductor, Inc. All Rights Reserved.
13 * Copyright 2008 Juergen Beisert, kernel@pengutronix.de 13 * Copyright 2008 Juergen Beisert, kernel@pengutronix.de
14 * Copyright 2008 Sascha Hauer, kernel@pengutronix.de
15 * Copyright (c) 2008 Paulius Zaleckas <paulius.zaleckas@teltonika.lt>
16 * Copyright (c) 2008 Darius Augulis <darius.augulis@teltonika.lt>
14 * 17 *
15 * This program is free software; you can redistribute it and/or 18 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License 19 * modify it under the terms of the GNU General Public License
@@ -32,6 +35,7 @@
32#include <linux/platform_device.h> 35#include <linux/platform_device.h>
33#include <linux/gpio.h> 36#include <linux/gpio.h>
34#include <linux/dma-mapping.h> 37#include <linux/dma-mapping.h>
38#include <linux/serial.h>
35 39
36#include <mach/irqs.h> 40#include <mach/irqs.h>
37#include <mach/hardware.h> 41#include <mach/hardware.h>
@@ -40,6 +44,150 @@
40 44
41#include "devices.h" 45#include "devices.h"
42 46
47#if defined(CONFIG_ARCH_MX1)
48static struct resource imx1_camera_resources[] = {
49 {
50 .start = 0x00224000,
51 .end = 0x00224010,
52 .flags = IORESOURCE_MEM,
53 }, {
54 .start = MX1_CSI_INT,
55 .end = MX1_CSI_INT,
56 .flags = IORESOURCE_IRQ,
57 },
58};
59
60static u64 imx1_camera_dmamask = DMA_BIT_MASK(32);
61
62struct platform_device imx1_camera_device = {
63 .name = "mx1-camera",
64 .id = 0, /* This is used to put cameras on this interface */
65 .dev = {
66 .dma_mask = &imx1_camera_dmamask,
67 .coherent_dma_mask = DMA_BIT_MASK(32),
68 },
69 .resource = imx1_camera_resources,
70 .num_resources = ARRAY_SIZE(imx1_camera_resources),
71};
72
73static struct resource imx_rtc_resources[] = {
74 {
75 .start = 0x00204000,
76 .end = 0x00204024,
77 .flags = IORESOURCE_MEM,
78 }, {
79 .start = MX1_RTC_INT,
80 .end = MX1_RTC_INT,
81 .flags = IORESOURCE_IRQ,
82 }, {
83 .start = MX1_RTC_SAMINT,
84 .end = MX1_RTC_SAMINT,
85 .flags = IORESOURCE_IRQ,
86 },
87};
88
89struct platform_device imx_rtc_device = {
90 .name = "rtc-imx",
91 .id = 0,
92 .resource = imx_rtc_resources,
93 .num_resources = ARRAY_SIZE(imx_rtc_resources),
94};
95
96static struct resource imx_wdt_resources[] = {
97 {
98 .start = 0x00201000,
99 .end = 0x00201008,
100 .flags = IORESOURCE_MEM,
101 }, {
102 .start = MX1_WDT_INT,
103 .end = MX1_WDT_INT,
104 .flags = IORESOURCE_IRQ,
105 },
106};
107
108struct platform_device imx_wdt_device = {
109 .name = "imx-wdt",
110 .id = 0,
111 .resource = imx_wdt_resources,
112 .num_resources = ARRAY_SIZE(imx_wdt_resources),
113};
114
115static struct resource imx_usb_resources[] = {
116 {
117 .start = 0x00212000,
118 .end = 0x00212148,
119 .flags = IORESOURCE_MEM,
120 }, {
121 .start = MX1_USBD_INT0,
122 .end = MX1_USBD_INT0,
123 .flags = IORESOURCE_IRQ,
124 }, {
125 .start = MX1_USBD_INT1,
126 .end = MX1_USBD_INT1,
127 .flags = IORESOURCE_IRQ,
128 }, {
129 .start = MX1_USBD_INT2,
130 .end = MX1_USBD_INT2,
131 .flags = IORESOURCE_IRQ,
132 }, {
133 .start = MX1_USBD_INT3,
134 .end = MX1_USBD_INT3,
135 .flags = IORESOURCE_IRQ,
136 }, {
137 .start = MX1_USBD_INT4,
138 .end = MX1_USBD_INT4,
139 .flags = IORESOURCE_IRQ,
140 }, {
141 .start = MX1_USBD_INT5,
142 .end = MX1_USBD_INT5,
143 .flags = IORESOURCE_IRQ,
144 }, {
145 .start = MX1_USBD_INT6,
146 .end = MX1_USBD_INT6,
147 .flags = IORESOURCE_IRQ,
148 },
149};
150
151struct platform_device imx_usb_device = {
152 .name = "imx_udc",
153 .id = 0,
154 .num_resources = ARRAY_SIZE(imx_usb_resources),
155 .resource = imx_usb_resources,
156};
157
158/* GPIO port description */
159static struct mxc_gpio_port imx_gpio_ports[] = {
160 {
161 .chip.label = "gpio-0",
162 .base = (void __iomem *)MX1_IO_ADDRESS(MX1_GPIO_BASE_ADDR),
163 .irq = MX1_GPIO_INT_PORTA,
164 .virtual_irq_start = MXC_GPIO_IRQ_START,
165 }, {
166 .chip.label = "gpio-1",
167 .base = (void __iomem *)MX1_IO_ADDRESS(MX1_GPIO_BASE_ADDR + 0x100),
168 .irq = MX1_GPIO_INT_PORTB,
169 .virtual_irq_start = MXC_GPIO_IRQ_START + 32,
170 }, {
171 .chip.label = "gpio-2",
172 .base = (void __iomem *)MX1_IO_ADDRESS(MX1_GPIO_BASE_ADDR + 0x200),
173 .irq = MX1_GPIO_INT_PORTC,
174 .virtual_irq_start = MXC_GPIO_IRQ_START + 64,
175 }, {
176 .chip.label = "gpio-3",
177 .base = (void __iomem *)MX1_IO_ADDRESS(MX1_GPIO_BASE_ADDR + 0x300),
178 .irq = MX1_GPIO_INT_PORTD,
179 .virtual_irq_start = MXC_GPIO_IRQ_START + 96,
180 }
181};
182
183int __init imx1_register_gpios(void)
184{
185 return mxc_gpio_init(imx_gpio_ports, ARRAY_SIZE(imx_gpio_ports));
186}
187#endif
188
189#if defined(CONFIG_MACH_MX21) || defined(CONFIG_MACH_MX27)
190
43#ifdef CONFIG_MACH_MX27 191#ifdef CONFIG_MACH_MX27
44static struct resource mx27_camera_resources[] = { 192static struct resource mx27_camera_resources[] = {
45 { 193 {
@@ -72,40 +220,6 @@ struct platform_device mx27_camera_device = {
72#endif 220#endif
73 221
74/* 222/*
75 * SPI master controller
76 *
77 * - i.MX1: 2 channel (slighly different register setting)
78 * - i.MX21: 2 channel
79 * - i.MX27: 3 channel
80 */
81#define DEFINE_IMX_SPI_DEVICE(n, baseaddr, irq) \
82 static struct resource mxc_spi_resources ## n[] = { \
83 { \
84 .start = baseaddr, \
85 .end = baseaddr + SZ_4K - 1, \
86 .flags = IORESOURCE_MEM, \
87 }, { \
88 .start = irq, \
89 .end = irq, \
90 .flags = IORESOURCE_IRQ, \
91 }, \
92 }; \
93 \
94 struct platform_device mxc_spi_device ## n = { \
95 .name = "spi_imx", \
96 .id = n, \
97 .num_resources = ARRAY_SIZE(mxc_spi_resources ## n), \
98 .resource = mxc_spi_resources ## n, \
99 }
100
101DEFINE_IMX_SPI_DEVICE(0, MX2x_CSPI1_BASE_ADDR, MX2x_INT_CSPI1);
102DEFINE_IMX_SPI_DEVICE(1, MX2x_CSPI2_BASE_ADDR, MX2x_INT_CSPI2);
103
104#ifdef CONFIG_MACH_MX27
105DEFINE_IMX_SPI_DEVICE(2, MX27_CSPI3_BASE_ADDR, MX27_INT_CSPI3);
106#endif
107
108/*
109 * General Purpose Timer 223 * General Purpose Timer
110 * - i.MX21: 3 timers 224 * - i.MX21: 3 timers
111 * - i.MX27: 6 timers 225 * - i.MX27: 6 timers
@@ -171,34 +285,6 @@ struct platform_device mxc_w1_master_device = {
171 .resource = mxc_w1_master_resources, 285 .resource = mxc_w1_master_resources,
172}; 286};
173 287
174#define DEFINE_MXC_NAND_DEVICE(pfx, baseaddr, irq) \
175 static struct resource pfx ## _nand_resources[] = { \
176 { \
177 .start = baseaddr, \
178 .end = baseaddr + SZ_4K - 1, \
179 .flags = IORESOURCE_MEM, \
180 }, { \
181 .start = irq, \
182 .end = irq, \
183 .flags = IORESOURCE_IRQ, \
184 }, \
185 }; \
186 \
187 struct platform_device pfx ## _nand_device = { \
188 .name = "mxc_nand", \
189 .id = 0, \
190 .num_resources = ARRAY_SIZE(pfx ## _nand_resources), \
191 .resource = pfx ## _nand_resources, \
192 }
193
194#ifdef CONFIG_MACH_MX21
195DEFINE_MXC_NAND_DEVICE(imx21, MX21_NFC_BASE_ADDR, MX21_INT_NANDFC);
196#endif
197
198#ifdef CONFIG_MACH_MX27
199DEFINE_MXC_NAND_DEVICE(imx27, MX27_NFC_BASE_ADDR, MX27_INT_NANDFC);
200#endif
201
202/* 288/*
203 * lcdc: 289 * lcdc:
204 * - i.MX1: the basic controller 290 * - i.MX1: the basic controller
@@ -249,32 +335,6 @@ struct platform_device mxc_fec_device = {
249}; 335};
250#endif 336#endif
251 337
252#define DEFINE_IMX_I2C_DEVICE(n, baseaddr, irq) \
253 static struct resource mxc_i2c_resources ## n[] = { \
254 { \
255 .start = baseaddr, \
256 .end = baseaddr + SZ_4K - 1, \
257 .flags = IORESOURCE_MEM, \
258 }, { \
259 .start = irq, \
260 .end = irq, \
261 .flags = IORESOURCE_IRQ, \
262 } \
263 }; \
264 \
265 struct platform_device mxc_i2c_device ## n = { \
266 .name = "imx-i2c", \
267 .id = n, \
268 .num_resources = ARRAY_SIZE(mxc_i2c_resources ## n), \
269 .resource = mxc_i2c_resources ## n, \
270 }
271
272DEFINE_IMX_I2C_DEVICE(0, MX2x_I2C_BASE_ADDR, MX2x_INT_I2C);
273
274#ifdef CONFIG_MACH_MX27
275DEFINE_IMX_I2C_DEVICE(1, MX27_I2C2_BASE_ADDR, MX27_INT_I2C2);
276#endif
277
278static struct resource mxc_pwm_resources[] = { 338static struct resource mxc_pwm_resources[] = {
279 { 339 {
280 .start = MX2x_PWM_BASE_ADDR, 340 .start = MX2x_PWM_BASE_ADDR,
@@ -485,26 +545,21 @@ DEFINE_IMX_SSI_DEVICE(1, 2, MX2x_SSI1_BASE_ADDR, MX2x_INT_SSI1);
485 545
486#ifdef CONFIG_MACH_MX21 546#ifdef CONFIG_MACH_MX21
487DEFINE_MXC_GPIO_PORTS(MX21, imx21); 547DEFINE_MXC_GPIO_PORTS(MX21, imx21);
548
549int __init imx21_register_gpios(void)
550{
551 return mxc_gpio_init(imx21_gpio_ports, ARRAY_SIZE(imx21_gpio_ports));
552}
488#endif 553#endif
489 554
490#ifdef CONFIG_MACH_MX27 555#ifdef CONFIG_MACH_MX27
491DEFINE_MXC_GPIO_PORTS(MX27, imx27); 556DEFINE_MXC_GPIO_PORTS(MX27, imx27);
492#endif
493 557
494int __init mxc_register_gpios(void) 558int __init imx27_register_gpios(void)
495{ 559{
496#ifdef CONFIG_MACH_MX21 560 return mxc_gpio_init(imx27_gpio_ports, ARRAY_SIZE(imx27_gpio_ports));
497 if (cpu_is_mx21())
498 return mxc_gpio_init(imx21_gpio_ports, ARRAY_SIZE(imx21_gpio_ports));
499 else
500#endif
501#ifdef CONFIG_MACH_MX27
502 if (cpu_is_mx27())
503 return mxc_gpio_init(imx27_gpio_ports, ARRAY_SIZE(imx27_gpio_ports));
504 else
505#endif
506 return 0;
507} 561}
562#endif
508 563
509#ifdef CONFIG_MACH_MX21 564#ifdef CONFIG_MACH_MX21
510static struct resource mx21_usbhc_resources[] = { 565static struct resource mx21_usbhc_resources[] = {
@@ -550,3 +605,5 @@ struct platform_device imx_kpp_device = {
550 .num_resources = ARRAY_SIZE(imx_kpp_resources), 605 .num_resources = ARRAY_SIZE(imx_kpp_resources),
551 .resource = imx_kpp_resources, 606 .resource = imx_kpp_resources,
552}; 607};
608
609#endif
diff --git a/arch/arm/mach-mx2/devices.h b/arch/arm/mach-imx/devices.h
index aefc87a7609e..efd4527506a5 100644
--- a/arch/arm/mach-mx2/devices.h
+++ b/arch/arm/mach-imx/devices.h
@@ -1,3 +1,11 @@
1#ifdef CONFIG_ARCH_MX1
2extern struct platform_device imx1_camera_device;
3extern struct platform_device imx_rtc_device;
4extern struct platform_device imx_wdt_device;
5extern struct platform_device imx_usb_device;
6#endif
7
8#if defined(CONFIG_MACH_MX21) || defined(CONFIG_MACH_MX27)
1extern struct platform_device mxc_gpt1; 9extern struct platform_device mxc_gpt1;
2extern struct platform_device mxc_gpt2; 10extern struct platform_device mxc_gpt2;
3#ifdef CONFIG_MACH_MX27 11#ifdef CONFIG_MACH_MX27
@@ -6,26 +14,10 @@ extern struct platform_device mxc_gpt4;
6extern struct platform_device mxc_gpt5; 14extern struct platform_device mxc_gpt5;
7#endif 15#endif
8extern struct platform_device mxc_wdt; 16extern struct platform_device mxc_wdt;
9extern struct platform_device mxc_uart_device0;
10extern struct platform_device mxc_uart_device1;
11extern struct platform_device mxc_uart_device2;
12extern struct platform_device mxc_uart_device3;
13extern struct platform_device mxc_uart_device4;
14extern struct platform_device mxc_uart_device5;
15extern struct platform_device mxc_w1_master_device; 17extern struct platform_device mxc_w1_master_device;
16#ifdef CONFIG_MACH_MX21
17extern struct platform_device imx21_nand_device;
18#endif
19#ifdef CONFIG_MACH_MX27
20extern struct platform_device imx27_nand_device;
21#endif
22extern struct platform_device mxc_fb_device; 18extern struct platform_device mxc_fb_device;
23extern struct platform_device mxc_fec_device; 19extern struct platform_device mxc_fec_device;
24extern struct platform_device mxc_pwm_device; 20extern struct platform_device mxc_pwm_device;
25extern struct platform_device mxc_i2c_device0;
26#ifdef CONFIG_MACH_MX27
27extern struct platform_device mxc_i2c_device1;
28#endif
29extern struct platform_device mxc_sdhc_device0; 21extern struct platform_device mxc_sdhc_device0;
30extern struct platform_device mxc_sdhc_device1; 22extern struct platform_device mxc_sdhc_device1;
31extern struct platform_device mxc_otg_udc_device; 23extern struct platform_device mxc_otg_udc_device;
@@ -33,12 +25,8 @@ extern struct platform_device mx27_camera_device;
33extern struct platform_device mxc_otg_host; 25extern struct platform_device mxc_otg_host;
34extern struct platform_device mxc_usbh1; 26extern struct platform_device mxc_usbh1;
35extern struct platform_device mxc_usbh2; 27extern struct platform_device mxc_usbh2;
36extern struct platform_device mxc_spi_device0;
37extern struct platform_device mxc_spi_device1;
38#ifdef CONFIG_MACH_MX27
39extern struct platform_device mxc_spi_device2;
40#endif
41extern struct platform_device mx21_usbhc_device; 28extern struct platform_device mx21_usbhc_device;
42extern struct platform_device imx_ssi_device0; 29extern struct platform_device imx_ssi_device0;
43extern struct platform_device imx_ssi_device1; 30extern struct platform_device imx_ssi_device1;
44extern struct platform_device imx_kpp_device; 31extern struct platform_device imx_kpp_device;
32#endif
diff --git a/arch/arm/plat-mxc/dma-mx1-mx2.c b/arch/arm/mach-imx/dma-v1.c
index e16014b0d13c..fd1d9197d06e 100644
--- a/arch/arm/plat-mxc/dma-mx1-mx2.c
+++ b/arch/arm/mach-imx/dma-v1.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * linux/arch/arm/plat-mxc/dma-mx1-mx2.c 2 * linux/arch/arm/plat-mxc/dma-v1.c
3 * 3 *
4 * i.MX DMA registration and IRQ dispatching 4 * i.MX DMA registration and IRQ dispatching
5 * 5 *
@@ -34,7 +34,7 @@
34#include <asm/system.h> 34#include <asm/system.h>
35#include <asm/irq.h> 35#include <asm/irq.h>
36#include <mach/hardware.h> 36#include <mach/hardware.h>
37#include <mach/dma-mx1-mx2.h> 37#include <mach/dma-v1.h>
38 38
39#define DMA_DCR 0x00 /* Control Register */ 39#define DMA_DCR 0x00 /* Control Register */
40#define DMA_DISR 0x04 /* Interrupt status Register */ 40#define DMA_DISR 0x04 /* Interrupt status Register */
diff --git a/arch/arm/mach-mx2/eukrea_mbimx27-baseboard.c b/arch/arm/mach-imx/eukrea_mbimx27-baseboard.c
index d66a6c439583..27e7226ec9d4 100644
--- a/arch/arm/mach-mx2/eukrea_mbimx27-baseboard.c
+++ b/arch/arm/mach-imx/eukrea_mbimx27-baseboard.c
@@ -35,11 +35,11 @@
35#include <mach/imxfb.h> 35#include <mach/imxfb.h>
36#include <mach/hardware.h> 36#include <mach/hardware.h>
37#include <mach/mmc.h> 37#include <mach/mmc.h>
38#include <mach/imx-uart.h>
39#include <mach/spi.h> 38#include <mach/spi.h>
40#include <mach/ssi.h> 39#include <mach/ssi.h>
41#include <mach/audmux.h> 40#include <mach/audmux.h>
42 41
42#include "devices-imx27.h"
43#include "devices.h" 43#include "devices.h"
44 44
45static int eukrea_mbimx27_pins[] = { 45static int eukrea_mbimx27_pins[] = {
@@ -247,16 +247,8 @@ static struct platform_device eukrea_mbimx27_lcd_powerdev = {
247 .dev.platform_data = &eukrea_mbimx27_lcd_power_data, 247 .dev.platform_data = &eukrea_mbimx27_lcd_power_data,
248}; 248};
249 249
250static struct imxuart_platform_data uart_pdata[] = { 250static const struct imxuart_platform_data uart_pdata __initconst = {
251 { 251 .flags = IMXUART_HAVE_RTSCTS,
252 .flags = IMXUART_HAVE_RTSCTS,
253 },
254 {
255 .flags = IMXUART_HAVE_RTSCTS,
256 },
257 {
258 .flags = IMXUART_HAVE_RTSCTS,
259 },
260}; 252};
261 253
262#if defined(CONFIG_TOUCHSCREEN_ADS7846) \ 254#if defined(CONFIG_TOUCHSCREEN_ADS7846) \
@@ -305,7 +297,7 @@ static struct i2c_board_info eukrea_mbimx27_i2c_devices[] = {
305 297
306static int eukrea_mbimx27_spi_cs[] = {GPIO_PORTD | 28}; 298static int eukrea_mbimx27_spi_cs[] = {GPIO_PORTD | 28};
307 299
308static struct spi_imx_master eukrea_mbimx27_spi_0_data = { 300static const struct spi_imx_master eukrea_mbimx27_spi0_data __initconst = {
309 .chipselect = eukrea_mbimx27_spi_cs, 301 .chipselect = eukrea_mbimx27_spi_cs,
310 .num_chipselect = ARRAY_SIZE(eukrea_mbimx27_spi_cs), 302 .num_chipselect = ARRAY_SIZE(eukrea_mbimx27_spi_cs),
311}; 303};
@@ -353,10 +345,10 @@ void __init eukrea_mbimx27_baseboard_init(void)
353 ); 345 );
354#endif 346#endif
355 347
356 mxc_register_device(&mxc_uart_device1, &uart_pdata[0]); 348 imx27_add_imx_uart1(&uart_pdata);
357 mxc_register_device(&mxc_uart_device2, &uart_pdata[1]); 349 imx27_add_imx_uart2(&uart_pdata);
358#if !defined(MACH_EUKREA_CPUIMX27_USEUART4) 350#if !defined(MACH_EUKREA_CPUIMX27_USEUART4)
359 mxc_register_device(&mxc_uart_device3, &uart_pdata[2]); 351 imx27_add_imx_uart3(&uart_pdata);
360#endif 352#endif
361 353
362 mxc_register_device(&mxc_fb_device, &eukrea_mbimx27_fb_data); 354 mxc_register_device(&mxc_fb_device, &eukrea_mbimx27_fb_data);
@@ -377,7 +369,7 @@ void __init eukrea_mbimx27_baseboard_init(void)
377#if defined(CONFIG_SPI_IMX) || defined(CONFIG_SPI_IMX_MODULE) 369#if defined(CONFIG_SPI_IMX) || defined(CONFIG_SPI_IMX_MODULE)
378 /* SPI_CS0 init */ 370 /* SPI_CS0 init */
379 mxc_gpio_mode(GPIO_PORTD | 28 | GPIO_GPIO | GPIO_OUT); 371 mxc_gpio_mode(GPIO_PORTD | 28 | GPIO_GPIO | GPIO_OUT);
380 mxc_register_device(&mxc_spi_device0, &eukrea_mbimx27_spi_0_data); 372 imx27_add_spi_imx0(&eukrea_mbimx27_spi0_data);
381 spi_register_board_info(eukrea_mbimx27_spi_board_info, 373 spi_register_board_info(eukrea_mbimx27_spi_board_info,
382 ARRAY_SIZE(eukrea_mbimx27_spi_board_info)); 374 ARRAY_SIZE(eukrea_mbimx27_spi_board_info));
383#endif 375#endif
diff --git a/arch/arm/mach-imx/include/mach/dma-mx1-mx2.h b/arch/arm/mach-imx/include/mach/dma-mx1-mx2.h
new file mode 100644
index 000000000000..df5f522da6b3
--- /dev/null
+++ b/arch/arm/mach-imx/include/mach/dma-mx1-mx2.h
@@ -0,0 +1,10 @@
1#ifndef __MACH_DMA_MX1_MX2_H__
2#define __MACH_DMA_MX1_MX2_H__
3/*
4 * Don't use this header in new code, it will go away when all users are
5 * converted to mach/dma-v1.h
6 */
7
8#include <mach/dma-v1.h>
9
10#endif /* ifndef __MACH_DMA_MX1_MX2_H__ */
diff --git a/arch/arm/plat-mxc/include/mach/dma-mx1-mx2.h b/arch/arm/mach-imx/include/mach/dma-v1.h
index 7c4870bd5a21..287431cc13e5 100644
--- a/arch/arm/plat-mxc/include/mach/dma-mx1-mx2.h
+++ b/arch/arm/mach-imx/include/mach/dma-v1.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * linux/arch/arm/plat-mxc/include/mach/dma-mx1-mx2.h 2 * linux/arch/arm/mach-imx/include/mach/dma-v1.h
3 * 3 *
4 * i.MX DMA registration and IRQ dispatching 4 * i.MX DMA registration and IRQ dispatching
5 * 5 *
@@ -22,8 +22,10 @@
22 * MA 02110-1301, USA. 22 * MA 02110-1301, USA.
23 */ 23 */
24 24
25#ifndef __ASM_ARCH_MXC_DMA_H 25#ifndef __MACH_DMA_V1_H__
26#define __ASM_ARCH_MXC_DMA_H 26#define __MACH_DMA_V1_H__
27
28#define imx_has_dma_v1() (cpu_is_mx1() || cpu_is_mx21() || cpu_is_mx27())
27 29
28#define IMX_DMA_CHANNELS 16 30#define IMX_DMA_CHANNELS 16
29 31
@@ -102,4 +104,4 @@ enum imx_dma_prio {
102 104
103int imx_dma_request_by_prio(const char *name, enum imx_dma_prio prio); 105int imx_dma_request_by_prio(const char *name, enum imx_dma_prio prio);
104 106
105#endif /* _ASM_ARCH_MXC_DMA_H */ 107#endif /* __MACH_DMA_V1_H__ */
diff --git a/arch/arm/mach-mx2/mach-cpuimx27.c b/arch/arm/mach-imx/mach-cpuimx27.c
index 55291787d5d1..2a135449e52c 100644
--- a/arch/arm/mach-mx2/mach-cpuimx27.c
+++ b/arch/arm/mach-imx/mach-cpuimx27.c
@@ -38,13 +38,12 @@
38#include <mach/board-eukrea_cpuimx27.h> 38#include <mach/board-eukrea_cpuimx27.h>
39#include <mach/common.h> 39#include <mach/common.h>
40#include <mach/hardware.h> 40#include <mach/hardware.h>
41#include <mach/i2c.h>
42#include <mach/iomux-mx27.h> 41#include <mach/iomux-mx27.h>
43#include <mach/imx-uart.h>
44#include <mach/mxc_nand.h> 42#include <mach/mxc_nand.h>
45#include <mach/mxc_ehci.h> 43#include <mach/mxc_ehci.h>
46#include <mach/ulpi.h> 44#include <mach/ulpi.h>
47 45
46#include "devices-imx27.h"
48#include "devices.h" 47#include "devices.h"
49 48
50static int eukrea_cpuimx27_pins[] = { 49static int eukrea_cpuimx27_pins[] = {
@@ -146,15 +145,12 @@ static struct platform_device eukrea_cpuimx27_nor_mtd_device = {
146 .resource = &eukrea_cpuimx27_flash_resource, 145 .resource = &eukrea_cpuimx27_flash_resource,
147}; 146};
148 147
149static struct imxuart_platform_data uart_pdata[] = { 148static const struct imxuart_platform_data uart_pdata __initconst = {
150 { 149 .flags = IMXUART_HAVE_RTSCTS,
151 .flags = IMXUART_HAVE_RTSCTS,
152 }, {
153 .flags = IMXUART_HAVE_RTSCTS,
154 },
155}; 150};
156 151
157static struct mxc_nand_platform_data eukrea_cpuimx27_nand_board_info = { 152static const struct mxc_nand_platform_data
153cpuimx27_nand_board_info __initconst = {
158 .width = 1, 154 .width = 1,
159 .hw_ecc = 1, 155 .hw_ecc = 1,
160}; 156};
@@ -166,7 +162,7 @@ static struct platform_device *platform_devices[] __initdata = {
166 &mxc_w1_master_device, 162 &mxc_w1_master_device,
167}; 163};
168 164
169static struct imxi2c_platform_data eukrea_cpuimx27_i2c_1_data = { 165static const struct imxi2c_platform_data cpuimx27_i2c1_data __initconst = {
170 .bitrate = 100000, 166 .bitrate = 100000,
171}; 167};
172 168
@@ -256,15 +252,14 @@ static void __init eukrea_cpuimx27_init(void)
256 mxc_gpio_setup_multiple_pins(eukrea_cpuimx27_pins, 252 mxc_gpio_setup_multiple_pins(eukrea_cpuimx27_pins,
257 ARRAY_SIZE(eukrea_cpuimx27_pins), "CPUIMX27"); 253 ARRAY_SIZE(eukrea_cpuimx27_pins), "CPUIMX27");
258 254
259 mxc_register_device(&mxc_uart_device0, &uart_pdata[0]); 255 imx27_add_imx_uart0(&uart_pdata);
260 256
261 mxc_register_device(&imx27_nand_device, 257 imx27_add_mxc_nand(&cpuimx27_nand_board_info);
262 &eukrea_cpuimx27_nand_board_info);
263 258
264 i2c_register_board_info(0, eukrea_cpuimx27_i2c_devices, 259 i2c_register_board_info(0, eukrea_cpuimx27_i2c_devices,
265 ARRAY_SIZE(eukrea_cpuimx27_i2c_devices)); 260 ARRAY_SIZE(eukrea_cpuimx27_i2c_devices));
266 261
267 mxc_register_device(&mxc_i2c_device0, &eukrea_cpuimx27_i2c_1_data); 262 imx27_add_i2c_imx1(&cpuimx27_i2c1_data);
268 263
269 platform_add_devices(platform_devices, ARRAY_SIZE(platform_devices)); 264 platform_add_devices(platform_devices, ARRAY_SIZE(platform_devices));
270 265
@@ -274,7 +269,7 @@ static void __init eukrea_cpuimx27_init(void)
274#endif 269#endif
275#if defined(MACH_EUKREA_CPUIMX27_USEUART4) 270#if defined(MACH_EUKREA_CPUIMX27_USEUART4)
276 /* in which case UART4 is also used for Bluetooth */ 271 /* in which case UART4 is also used for Bluetooth */
277 mxc_register_device(&mxc_uart_device3, &uart_pdata[1]); 272 imx27_add_imx_uart3(&uart_pdata);
278#endif 273#endif
279 274
280#if defined(CONFIG_SERIAL_8250) || defined(CONFIG_SERIAL_8250_MODULE) 275#if defined(CONFIG_SERIAL_8250) || defined(CONFIG_SERIAL_8250_MODULE)
diff --git a/arch/arm/mach-mx2/mach-imx27lite.c b/arch/arm/mach-imx/mach-imx27lite.c
index b5710bf18b96..22a2b5d91213 100644
--- a/arch/arm/mach-mx2/mach-imx27lite.c
+++ b/arch/arm/mach-imx/mach-imx27lite.c
@@ -12,10 +12,6 @@
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details. 14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */ 15 */
20 16
21#include <linux/platform_device.h> 17#include <linux/platform_device.h>
@@ -26,10 +22,9 @@
26#include <asm/mach/map.h> 22#include <asm/mach/map.h>
27#include <mach/hardware.h> 23#include <mach/hardware.h>
28#include <mach/common.h> 24#include <mach/common.h>
29#include <mach/imx-uart.h>
30#include <mach/iomux-mx27.h> 25#include <mach/iomux-mx27.h>
31#include <mach/board-mx27lite.h>
32 26
27#include "devices-imx27.h"
33#include "devices.h" 28#include "devices.h"
34 29
35static unsigned int mx27lite_pins[] = { 30static unsigned int mx27lite_pins[] = {
@@ -59,7 +54,7 @@ static unsigned int mx27lite_pins[] = {
59 PF23_AIN_FEC_TX_EN, 54 PF23_AIN_FEC_TX_EN,
60}; 55};
61 56
62static struct imxuart_platform_data uart_pdata = { 57static const struct imxuart_platform_data uart_pdata __initconst = {
63 .flags = IMXUART_HAVE_RTSCTS, 58 .flags = IMXUART_HAVE_RTSCTS,
64}; 59};
65 60
@@ -71,7 +66,7 @@ static void __init mx27lite_init(void)
71{ 66{
72 mxc_gpio_setup_multiple_pins(mx27lite_pins, ARRAY_SIZE(mx27lite_pins), 67 mxc_gpio_setup_multiple_pins(mx27lite_pins, ARRAY_SIZE(mx27lite_pins),
73 "imx27lite"); 68 "imx27lite");
74 mxc_register_device(&mxc_uart_device0, &uart_pdata); 69 imx27_add_imx_uart0(&uart_pdata);
75 platform_add_devices(platform_devices, ARRAY_SIZE(platform_devices)); 70 platform_add_devices(platform_devices, ARRAY_SIZE(platform_devices));
76} 71}
77 72
diff --git a/arch/arm/mach-mx1/mach-mx1ads.c b/arch/arm/mach-imx/mach-mx1ads.c
index 51f3cfd83db2..77a760cfadc0 100644
--- a/arch/arm/mach-mx1/mach-mx1ads.c
+++ b/arch/arm/mach-imx/mach-mx1ads.c
@@ -26,10 +26,10 @@
26#include <mach/common.h> 26#include <mach/common.h>
27#include <mach/hardware.h> 27#include <mach/hardware.h>
28#include <mach/i2c.h> 28#include <mach/i2c.h>
29#include <mach/imx-uart.h>
30#include <mach/iomux-mx1.h> 29#include <mach/iomux-mx1.h>
31#include <mach/irqs.h> 30#include <mach/irqs.h>
32 31
32#include "devices-imx1.h"
33#include "devices.h" 33#include "devices.h"
34 34
35static int mx1ads_pins[] = { 35static int mx1ads_pins[] = {
@@ -58,12 +58,12 @@ static int mx1ads_pins[] = {
58 * UARTs platform data 58 * UARTs platform data
59 */ 59 */
60 60
61static struct imxuart_platform_data uart_pdata[] = { 61static const struct imxuart_platform_data uart0_pdata __initconst = {
62 { 62 .flags = IMXUART_HAVE_RTSCTS,
63 .flags = IMXUART_HAVE_RTSCTS, 63};
64 }, { 64
65 .flags = IMXUART_HAVE_RTSCTS, 65static const struct imxuart_platform_data uart1_pdata __initconst = {
66 }, 66 .flags = IMXUART_HAVE_RTSCTS,
67}; 67};
68 68
69/* 69/*
@@ -75,8 +75,8 @@ static struct physmap_flash_data mx1ads_flash_data = {
75}; 75};
76 76
77static struct resource flash_resource = { 77static struct resource flash_resource = {
78 .start = IMX_CS0_PHYS, 78 .start = MX1_CS0_PHYS,
79 .end = IMX_CS0_PHYS + SZ_32M - 1, 79 .end = MX1_CS0_PHYS + SZ_32M - 1,
80 .flags = IORESOURCE_MEM, 80 .flags = IORESOURCE_MEM,
81}; 81};
82 82
@@ -98,7 +98,7 @@ static struct pcf857x_platform_data pcf857x_data[] = {
98 } 98 }
99}; 99};
100 100
101static struct imxi2c_platform_data mx1ads_i2c_data = { 101static const struct imxi2c_platform_data mx1ads_i2c_data __initconst = {
102 .bitrate = 100000, 102 .bitrate = 100000,
103}; 103};
104 104
@@ -121,8 +121,8 @@ static void __init mx1ads_init(void)
121 ARRAY_SIZE(mx1ads_pins), "mx1ads"); 121 ARRAY_SIZE(mx1ads_pins), "mx1ads");
122 122
123 /* UART */ 123 /* UART */
124 mxc_register_device(&imx_uart1_device, &uart_pdata[0]); 124 imx1_add_imx_uart0(&uart0_pdata);
125 mxc_register_device(&imx_uart2_device, &uart_pdata[1]); 125 imx1_add_imx_uart1(&uart1_pdata);
126 126
127 /* Physmap flash */ 127 /* Physmap flash */
128 mxc_register_device(&flash_device, &mx1ads_flash_data); 128 mxc_register_device(&flash_device, &mx1ads_flash_data);
@@ -131,7 +131,7 @@ static void __init mx1ads_init(void)
131 i2c_register_board_info(0, mx1ads_i2c_devices, 131 i2c_register_board_info(0, mx1ads_i2c_devices,
132 ARRAY_SIZE(mx1ads_i2c_devices)); 132 ARRAY_SIZE(mx1ads_i2c_devices));
133 133
134 mxc_register_device(&imx_i2c_device, &mx1ads_i2c_data); 134 imx1_add_i2c_imx(&mx1ads_i2c_data);
135} 135}
136 136
137static void __init mx1ads_timer_init(void) 137static void __init mx1ads_timer_init(void)
@@ -145,8 +145,8 @@ struct sys_timer mx1ads_timer = {
145 145
146MACHINE_START(MX1ADS, "Freescale MX1ADS") 146MACHINE_START(MX1ADS, "Freescale MX1ADS")
147 /* Maintainer: Sascha Hauer, Pengutronix */ 147 /* Maintainer: Sascha Hauer, Pengutronix */
148 .phys_io = IMX_IO_PHYS, 148 .phys_io = MX1_IO_BASE_ADDR,
149 .io_pg_offst = (IMX_IO_BASE >> 18) & 0xfffc, 149 .io_pg_offst = (MX1_IO_BASE_ADDR_VIRT >> 18) & 0xfffc,
150 .boot_params = MX1_PHYS_OFFSET + 0x100, 150 .boot_params = MX1_PHYS_OFFSET + 0x100,
151 .map_io = mx1_map_io, 151 .map_io = mx1_map_io,
152 .init_irq = mx1_init_irq, 152 .init_irq = mx1_init_irq,
@@ -155,8 +155,8 @@ MACHINE_START(MX1ADS, "Freescale MX1ADS")
155MACHINE_END 155MACHINE_END
156 156
157MACHINE_START(MXLADS, "Freescale MXLADS") 157MACHINE_START(MXLADS, "Freescale MXLADS")
158 .phys_io = IMX_IO_PHYS, 158 .phys_io = MX1_IO_BASE_ADDR,
159 .io_pg_offst = (IMX_IO_BASE >> 18) & 0xfffc, 159 .io_pg_offst = (MX1_IO_BASE_ADDR_VIRT >> 18) & 0xfffc,
160 .boot_params = MX1_PHYS_OFFSET + 0x100, 160 .boot_params = MX1_PHYS_OFFSET + 0x100,
161 .map_io = mx1_map_io, 161 .map_io = mx1_map_io,
162 .init_irq = mx1_init_irq, 162 .init_irq = mx1_init_irq,
diff --git a/arch/arm/mach-mx2/mach-mx21ads.c b/arch/arm/mach-imx/mach-mx21ads.c
index 113e58d7cb40..96d7f8189f32 100644
--- a/arch/arm/mach-mx2/mach-mx21ads.c
+++ b/arch/arm/mach-imx/mach-mx21ads.c
@@ -12,10 +12,6 @@
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details. 14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */ 15 */
20 16
21#include <linux/platform_device.h> 17#include <linux/platform_device.h>
@@ -28,15 +24,49 @@
28#include <asm/mach/arch.h> 24#include <asm/mach/arch.h>
29#include <asm/mach/time.h> 25#include <asm/mach/time.h>
30#include <asm/mach/map.h> 26#include <asm/mach/map.h>
31#include <mach/imx-uart.h>
32#include <mach/imxfb.h> 27#include <mach/imxfb.h>
33#include <mach/iomux-mx21.h> 28#include <mach/iomux-mx21.h>
34#include <mach/mxc_nand.h> 29#include <mach/mxc_nand.h>
35#include <mach/mmc.h> 30#include <mach/mmc.h>
36#include <mach/board-mx21ads.h>
37 31
32#include "devices-imx21.h"
38#include "devices.h" 33#include "devices.h"
39 34
35/*
36 * Memory-mapped I/O on MX21ADS base board
37 */
38#define MX21ADS_MMIO_BASE_ADDR 0xf5000000
39#define MX21ADS_MMIO_SIZE SZ_16M
40
41#define MX21ADS_REG_ADDR(offset) (void __force __iomem *) \
42 (MX21ADS_MMIO_BASE_ADDR + (offset))
43
44#define MX21ADS_CS8900A_IRQ IRQ_GPIOE(11)
45#define MX21ADS_CS8900A_IOBASE_REG MX21ADS_REG_ADDR(0x000000)
46#define MX21ADS_ST16C255_IOBASE_REG MX21ADS_REG_ADDR(0x200000)
47#define MX21ADS_VERSION_REG MX21ADS_REG_ADDR(0x400000)
48#define MX21ADS_IO_REG MX21ADS_REG_ADDR(0x800000)
49
50/* MX21ADS_IO_REG bit definitions */
51#define MX21ADS_IO_SD_WP 0x0001 /* read */
52#define MX21ADS_IO_TP6 0x0001 /* write */
53#define MX21ADS_IO_SW_SEL 0x0002 /* read */
54#define MX21ADS_IO_TP7 0x0002 /* write */
55#define MX21ADS_IO_RESET_E_UART 0x0004
56#define MX21ADS_IO_RESET_BASE 0x0008
57#define MX21ADS_IO_CSI_CTL2 0x0010
58#define MX21ADS_IO_CSI_CTL1 0x0020
59#define MX21ADS_IO_CSI_CTL0 0x0040
60#define MX21ADS_IO_UART1_EN 0x0080
61#define MX21ADS_IO_UART4_EN 0x0100
62#define MX21ADS_IO_LCDON 0x0200
63#define MX21ADS_IO_IRDA_EN 0x0400
64#define MX21ADS_IO_IRDA_FIR_SEL 0x0800
65#define MX21ADS_IO_IRDA_MD0_B 0x1000
66#define MX21ADS_IO_IRDA_MD1 0x2000
67#define MX21ADS_IO_LED4_ON 0x4000
68#define MX21ADS_IO_LED3_ON 0x8000
69
40static unsigned int mx21ads_pins[] = { 70static unsigned int mx21ads_pins[] = {
41 71
42 /* CS8900A */ 72 /* CS8900A */
@@ -133,14 +163,13 @@ static struct platform_device mx21ads_nor_mtd_device = {
133 .resource = &mx21ads_flash_resource, 163 .resource = &mx21ads_flash_resource,
134}; 164};
135 165
136static struct imxuart_platform_data uart_pdata = { 166static const struct imxuart_platform_data uart_pdata_rts __initconst = {
137 .flags = IMXUART_HAVE_RTSCTS, 167 .flags = IMXUART_HAVE_RTSCTS,
138}; 168};
139 169
140static struct imxuart_platform_data uart_norts_pdata = { 170static const struct imxuart_platform_data uart_pdata_norts __initconst = {
141}; 171};
142 172
143
144static int mx21ads_fb_init(struct platform_device *pdev) 173static int mx21ads_fb_init(struct platform_device *pdev)
145{ 174{
146 u16 tmp; 175 u16 tmp;
@@ -227,7 +256,8 @@ static struct imxmmc_platform_data mx21ads_sdhc_pdata = {
227 .exit = mx21ads_sdhc_exit, 256 .exit = mx21ads_sdhc_exit,
228}; 257};
229 258
230static struct mxc_nand_platform_data mx21ads_nand_board_info = { 259static const struct mxc_nand_platform_data
260mx21ads_nand_board_info __initconst = {
231 .width = 1, 261 .width = 1,
232 .hw_ecc = 1, 262 .hw_ecc = 1,
233}; 263};
@@ -263,12 +293,12 @@ static void __init mx21ads_board_init(void)
263 mxc_gpio_setup_multiple_pins(mx21ads_pins, ARRAY_SIZE(mx21ads_pins), 293 mxc_gpio_setup_multiple_pins(mx21ads_pins, ARRAY_SIZE(mx21ads_pins),
264 "mx21ads"); 294 "mx21ads");
265 295
266 mxc_register_device(&mxc_uart_device0, &uart_pdata); 296 imx21_add_imx_uart0(&uart_pdata_rts);
267 mxc_register_device(&mxc_uart_device2, &uart_norts_pdata); 297 imx21_add_imx_uart2(&uart_pdata_norts);
268 mxc_register_device(&mxc_uart_device3, &uart_pdata); 298 imx21_add_imx_uart3(&uart_pdata_rts);
269 mxc_register_device(&mxc_fb_device, &mx21ads_fb_data); 299 mxc_register_device(&mxc_fb_device, &mx21ads_fb_data);
270 mxc_register_device(&mxc_sdhc_device0, &mx21ads_sdhc_pdata); 300 mxc_register_device(&mxc_sdhc_device0, &mx21ads_sdhc_pdata);
271 mxc_register_device(&imx21_nand_device, &mx21ads_nand_board_info); 301 imx21_add_mxc_nand(&mx21ads_nand_board_info);
272 302
273 platform_add_devices(platform_devices, ARRAY_SIZE(platform_devices)); 303 platform_add_devices(platform_devices, ARRAY_SIZE(platform_devices));
274} 304}
diff --git a/arch/arm/mach-mx2/mach-mx27_3ds.c b/arch/arm/mach-imx/mach-mx27_3ds.c
index b2f4e0db3fb3..e2a82bab012b 100644
--- a/arch/arm/mach-mx2/mach-mx27_3ds.c
+++ b/arch/arm/mach-imx/mach-mx27_3ds.c
@@ -12,10 +12,12 @@
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details. 14 * GNU General Public License for more details.
15 * 15 */
16 * You should have received a copy of the GNU General Public License 16
17 * along with this program; if not, write to the Free Software 17/*
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 18 * This machine is known as:
19 * - i.MX27 3-Stack Development System
20 * - i.MX27 Platform Development Kit (i.MX27 PDK)
19 */ 21 */
20 22
21#include <linux/platform_device.h> 23#include <linux/platform_device.h>
@@ -25,10 +27,9 @@
25#include <asm/mach/time.h> 27#include <asm/mach/time.h>
26#include <mach/hardware.h> 28#include <mach/hardware.h>
27#include <mach/common.h> 29#include <mach/common.h>
28#include <mach/imx-uart.h>
29#include <mach/iomux-mx27.h> 30#include <mach/iomux-mx27.h>
30#include <mach/board-mx27pdk.h>
31 31
32#include "devices-imx27.h"
32#include "devices.h" 33#include "devices.h"
33 34
34static unsigned int mx27pdk_pins[] = { 35static unsigned int mx27pdk_pins[] = {
@@ -58,7 +59,7 @@ static unsigned int mx27pdk_pins[] = {
58 PF23_AIN_FEC_TX_EN, 59 PF23_AIN_FEC_TX_EN,
59}; 60};
60 61
61static struct imxuart_platform_data uart_pdata = { 62static const struct imxuart_platform_data uart_pdata __initconst = {
62 .flags = IMXUART_HAVE_RTSCTS, 63 .flags = IMXUART_HAVE_RTSCTS,
63}; 64};
64 65
@@ -70,7 +71,7 @@ static void __init mx27pdk_init(void)
70{ 71{
71 mxc_gpio_setup_multiple_pins(mx27pdk_pins, ARRAY_SIZE(mx27pdk_pins), 72 mxc_gpio_setup_multiple_pins(mx27pdk_pins, ARRAY_SIZE(mx27pdk_pins),
72 "mx27pdk"); 73 "mx27pdk");
73 mxc_register_device(&mxc_uart_device0, &uart_pdata); 74 imx27_add_imx_uart0(&uart_pdata);
74 platform_add_devices(platform_devices, ARRAY_SIZE(platform_devices)); 75 platform_add_devices(platform_devices, ARRAY_SIZE(platform_devices));
75} 76}
76 77
diff --git a/arch/arm/mach-mx2/mach-mx27ads.c b/arch/arm/mach-imx/mach-mx27ads.c
index 6ce323669e58..9c77da98a10e 100644
--- a/arch/arm/mach-mx2/mach-mx27ads.c
+++ b/arch/arm/mach-imx/mach-mx27ads.c
@@ -12,10 +12,6 @@
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details. 14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */ 15 */
20 16
21#include <linux/platform_device.h> 17#include <linux/platform_device.h>
@@ -32,16 +28,44 @@
32#include <asm/mach/time.h> 28#include <asm/mach/time.h>
33#include <asm/mach/map.h> 29#include <asm/mach/map.h>
34#include <mach/gpio.h> 30#include <mach/gpio.h>
35#include <mach/imx-uart.h>
36#include <mach/iomux-mx27.h> 31#include <mach/iomux-mx27.h>
37#include <mach/board-mx27ads.h>
38#include <mach/mxc_nand.h> 32#include <mach/mxc_nand.h>
39#include <mach/i2c.h>
40#include <mach/imxfb.h> 33#include <mach/imxfb.h>
41#include <mach/mmc.h> 34#include <mach/mmc.h>
42 35
36#include "devices-imx27.h"
43#include "devices.h" 37#include "devices.h"
44 38
39/*
40 * Base address of PBC controller, CS4
41 */
42#define PBC_BASE_ADDRESS 0xf4300000
43#define PBC_REG_ADDR(offset) (void __force __iomem *) \
44 (PBC_BASE_ADDRESS + (offset))
45
46/* When the PBC address connection is fixed in h/w, defined as 1 */
47#define PBC_ADDR_SH 0
48
49/* Offsets for the PBC Controller register */
50/*
51 * PBC Board version register offset
52 */
53#define PBC_VERSION_REG PBC_REG_ADDR(0x00000 >> PBC_ADDR_SH)
54/*
55 * PBC Board control register 1 set address.
56 */
57#define PBC_BCTRL1_SET_REG PBC_REG_ADDR(0x00008 >> PBC_ADDR_SH)
58/*
59 * PBC Board control register 1 clear address.
60 */
61#define PBC_BCTRL1_CLEAR_REG PBC_REG_ADDR(0x0000C >> PBC_ADDR_SH)
62
63/* PBC Board Control Register 1 bit definitions */
64#define PBC_BCTRL1_LCDON 0x0800 /* Enable the LCD */
65
66/* to determine the correct external crystal reference */
67#define CKIH_27MHZ_BIT_SET (1 << 3)
68
45static unsigned int mx27ads_pins[] = { 69static unsigned int mx27ads_pins[] = {
46 /* UART0 */ 70 /* UART0 */
47 PE12_PF_UART1_TXD, 71 PE12_PF_UART1_TXD,
@@ -141,7 +165,8 @@ static unsigned int mx27ads_pins[] = {
141 PB9_PF_SD2_CLK, 165 PB9_PF_SD2_CLK,
142}; 166};
143 167
144static struct mxc_nand_platform_data mx27ads_nand_board_info = { 168static const struct mxc_nand_platform_data
169mx27ads_nand_board_info __initconst = {
145 .width = 1, 170 .width = 1,
146 .hw_ecc = 1, 171 .hw_ecc = 1,
147}; 172};
@@ -168,7 +193,7 @@ static struct platform_device mx27ads_nor_mtd_device = {
168 .resource = &mx27ads_flash_resource, 193 .resource = &mx27ads_flash_resource,
169}; 194};
170 195
171static struct imxi2c_platform_data mx27ads_i2c_data = { 196static const struct imxi2c_platform_data mx27ads_i2c1_data __initconst = {
172 .bitrate = 100000, 197 .bitrate = 100000,
173}; 198};
174 199
@@ -263,20 +288,8 @@ static struct platform_device *platform_devices[] __initdata = {
263 &mxc_w1_master_device, 288 &mxc_w1_master_device,
264}; 289};
265 290
266static struct imxuart_platform_data uart_pdata[] = { 291static const struct imxuart_platform_data uart_pdata __initconst = {
267 { 292 .flags = IMXUART_HAVE_RTSCTS,
268 .flags = IMXUART_HAVE_RTSCTS,
269 }, {
270 .flags = IMXUART_HAVE_RTSCTS,
271 }, {
272 .flags = IMXUART_HAVE_RTSCTS,
273 }, {
274 .flags = IMXUART_HAVE_RTSCTS,
275 }, {
276 .flags = IMXUART_HAVE_RTSCTS,
277 }, {
278 .flags = IMXUART_HAVE_RTSCTS,
279 },
280}; 293};
281 294
282static void __init mx27ads_board_init(void) 295static void __init mx27ads_board_init(void)
@@ -284,18 +297,18 @@ static void __init mx27ads_board_init(void)
284 mxc_gpio_setup_multiple_pins(mx27ads_pins, ARRAY_SIZE(mx27ads_pins), 297 mxc_gpio_setup_multiple_pins(mx27ads_pins, ARRAY_SIZE(mx27ads_pins),
285 "mx27ads"); 298 "mx27ads");
286 299
287 mxc_register_device(&mxc_uart_device0, &uart_pdata[0]); 300 imx27_add_imx_uart0(&uart_pdata);
288 mxc_register_device(&mxc_uart_device1, &uart_pdata[1]); 301 imx27_add_imx_uart1(&uart_pdata);
289 mxc_register_device(&mxc_uart_device2, &uart_pdata[2]); 302 imx27_add_imx_uart2(&uart_pdata);
290 mxc_register_device(&mxc_uart_device3, &uart_pdata[3]); 303 imx27_add_imx_uart3(&uart_pdata);
291 mxc_register_device(&mxc_uart_device4, &uart_pdata[4]); 304 imx27_add_imx_uart4(&uart_pdata);
292 mxc_register_device(&mxc_uart_device5, &uart_pdata[5]); 305 imx27_add_imx_uart5(&uart_pdata);
293 mxc_register_device(&imx27_nand_device, &mx27ads_nand_board_info); 306 imx27_add_mxc_nand(&mx27ads_nand_board_info);
294 307
295 /* only the i2c master 1 is used on this CPU card */ 308 /* only the i2c master 1 is used on this CPU card */
296 i2c_register_board_info(1, mx27ads_i2c_devices, 309 i2c_register_board_info(1, mx27ads_i2c_devices,
297 ARRAY_SIZE(mx27ads_i2c_devices)); 310 ARRAY_SIZE(mx27ads_i2c_devices));
298 mxc_register_device(&mxc_i2c_device1, &mx27ads_i2c_data); 311 imx27_add_i2c_imx1(&mx27ads_i2c1_data);
299 mxc_register_device(&mxc_fb_device, &mx27ads_fb_data); 312 mxc_register_device(&mxc_fb_device, &mx27ads_fb_data);
300 mxc_register_device(&mxc_sdhc_device0, &sdhc1_pdata); 313 mxc_register_device(&mxc_sdhc_device0, &sdhc1_pdata);
301 mxc_register_device(&mxc_sdhc_device1, &sdhc2_pdata); 314 mxc_register_device(&mxc_sdhc_device1, &sdhc2_pdata);
@@ -342,4 +355,3 @@ MACHINE_START(MX27ADS, "Freescale i.MX27ADS")
342 .init_machine = mx27ads_board_init, 355 .init_machine = mx27ads_board_init,
343 .timer = &mx27ads_timer, 356 .timer = &mx27ads_timer,
344MACHINE_END 357MACHINE_END
345
diff --git a/arch/arm/mach-mx2/mach-mxt_td60.c b/arch/arm/mach-imx/mach-mxt_td60.c
index bc3855992677..a3a1e452d4c5 100644
--- a/arch/arm/mach-mx2/mach-mxt_td60.c
+++ b/arch/arm/mach-imx/mach-mxt_td60.c
@@ -12,10 +12,6 @@
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details. 14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */ 15 */
20 16
21#include <linux/platform_device.h> 17#include <linux/platform_device.h>
@@ -32,14 +28,13 @@
32#include <asm/mach/time.h> 28#include <asm/mach/time.h>
33#include <asm/mach/map.h> 29#include <asm/mach/map.h>
34#include <linux/gpio.h> 30#include <linux/gpio.h>
35#include <mach/imx-uart.h>
36#include <mach/iomux-mx27.h> 31#include <mach/iomux-mx27.h>
37#include <mach/mxc_nand.h> 32#include <mach/mxc_nand.h>
38#include <mach/i2c.h>
39#include <linux/i2c/pca953x.h> 33#include <linux/i2c/pca953x.h>
40#include <mach/imxfb.h> 34#include <mach/imxfb.h>
41#include <mach/mmc.h> 35#include <mach/mmc.h>
42 36
37#include "devices-imx27.h"
43#include "devices.h" 38#include "devices.h"
44 39
45static unsigned int mxt_td60_pins[] __initdata = { 40static unsigned int mxt_td60_pins[] __initdata = {
@@ -128,12 +123,13 @@ static unsigned int mxt_td60_pins[] __initdata = {
128 PB9_PF_SD2_CLK, 123 PB9_PF_SD2_CLK,
129}; 124};
130 125
131static struct mxc_nand_platform_data mxt_td60_nand_board_info = { 126static const struct mxc_nand_platform_data
127mxt_td60_nand_board_info __initconst = {
132 .width = 1, 128 .width = 1,
133 .hw_ecc = 1, 129 .hw_ecc = 1,
134}; 130};
135 131
136static struct imxi2c_platform_data mxt_td60_i2c_data = { 132static const struct imxi2c_platform_data mxt_td60_i2c0_data __initconst = {
137 .bitrate = 100000, 133 .bitrate = 100000,
138}; 134};
139 135
@@ -173,7 +169,7 @@ static struct i2c_board_info mxt_td60_i2c_devices[] = {
173 }, 169 },
174}; 170};
175 171
176static struct imxi2c_platform_data mxt_td60_i2c2_data = { 172static const struct imxi2c_platform_data mxt_td60_i2c1_data __initconst = {
177 .bitrate = 100000, 173 .bitrate = 100000,
178}; 174};
179 175
@@ -239,14 +235,8 @@ static struct platform_device *platform_devices[] __initdata = {
239 &mxc_fec_device, 235 &mxc_fec_device,
240}; 236};
241 237
242static struct imxuart_platform_data uart_pdata[] = { 238static const struct imxuart_platform_data uart_pdata __initconst = {
243 { 239 .flags = IMXUART_HAVE_RTSCTS,
244 .flags = IMXUART_HAVE_RTSCTS,
245 }, {
246 .flags = IMXUART_HAVE_RTSCTS,
247 }, {
248 .flags = IMXUART_HAVE_RTSCTS,
249 },
250}; 240};
251 241
252static void __init mxt_td60_board_init(void) 242static void __init mxt_td60_board_init(void)
@@ -254,10 +244,10 @@ static void __init mxt_td60_board_init(void)
254 mxc_gpio_setup_multiple_pins(mxt_td60_pins, ARRAY_SIZE(mxt_td60_pins), 244 mxc_gpio_setup_multiple_pins(mxt_td60_pins, ARRAY_SIZE(mxt_td60_pins),
255 "MXT_TD60"); 245 "MXT_TD60");
256 246
257 mxc_register_device(&mxc_uart_device0, &uart_pdata[0]); 247 imx27_add_imx_uart0(&uart_pdata);
258 mxc_register_device(&mxc_uart_device1, &uart_pdata[1]); 248 imx27_add_imx_uart1(&uart_pdata);
259 mxc_register_device(&mxc_uart_device2, &uart_pdata[2]); 249 imx27_add_imx_uart2(&uart_pdata);
260 mxc_register_device(&imx27_nand_device, &mxt_td60_nand_board_info); 250 imx27_add_mxc_nand(&mxt_td60_nand_board_info);
261 251
262 i2c_register_board_info(0, mxt_td60_i2c_devices, 252 i2c_register_board_info(0, mxt_td60_i2c_devices,
263 ARRAY_SIZE(mxt_td60_i2c_devices)); 253 ARRAY_SIZE(mxt_td60_i2c_devices));
@@ -265,8 +255,8 @@ static void __init mxt_td60_board_init(void)
265 i2c_register_board_info(1, mxt_td60_i2c2_devices, 255 i2c_register_board_info(1, mxt_td60_i2c2_devices,
266 ARRAY_SIZE(mxt_td60_i2c2_devices)); 256 ARRAY_SIZE(mxt_td60_i2c2_devices));
267 257
268 mxc_register_device(&mxc_i2c_device0, &mxt_td60_i2c_data); 258 imx27_add_i2c_imx0(&mxt_td60_i2c0_data);
269 mxc_register_device(&mxc_i2c_device1, &mxt_td60_i2c2_data); 259 imx27_add_i2c_imx1(&mxt_td60_i2c1_data);
270 mxc_register_device(&mxc_fb_device, &mxt_td60_fb_data); 260 mxc_register_device(&mxc_fb_device, &mxt_td60_fb_data);
271 mxc_register_device(&mxc_sdhc_device0, &sdhc1_pdata); 261 mxc_register_device(&mxc_sdhc_device0, &sdhc1_pdata);
272 262
diff --git a/arch/arm/mach-mx2/mach-pca100.c b/arch/arm/mach-imx/mach-pca100.c
index 2164b7f96ef2..6c92deaf468f 100644
--- a/arch/arm/mach-mx2/mach-pca100.c
+++ b/arch/arm/mach-imx/mach-pca100.c
@@ -36,12 +36,7 @@
36#include <mach/common.h> 36#include <mach/common.h>
37#include <mach/hardware.h> 37#include <mach/hardware.h>
38#include <mach/iomux-mx27.h> 38#include <mach/iomux-mx27.h>
39#include <mach/i2c.h>
40#include <asm/mach/time.h> 39#include <asm/mach/time.h>
41#if defined(CONFIG_SPI_IMX) || defined(CONFIG_SPI_IMX_MODULE)
42#include <mach/spi.h>
43#endif
44#include <mach/imx-uart.h>
45#include <mach/audmux.h> 40#include <mach/audmux.h>
46#include <mach/ssi.h> 41#include <mach/ssi.h>
47#include <mach/mxc_nand.h> 42#include <mach/mxc_nand.h>
@@ -51,6 +46,7 @@
51#include <mach/ulpi.h> 46#include <mach/ulpi.h>
52#include <mach/imxfb.h> 47#include <mach/imxfb.h>
53 48
49#include "devices-imx27.h"
54#include "devices.h" 50#include "devices.h"
55 51
56#define OTG_PHY_CS_GPIO (GPIO_PORTB + 23) 52#define OTG_PHY_CS_GPIO (GPIO_PORTB + 23)
@@ -166,11 +162,12 @@ static int pca100_pins[] = {
166 GPIO_PORTE | 5 | GPIO_GPIO | GPIO_IN, /* GPIO2_IRQ */ 162 GPIO_PORTE | 5 | GPIO_GPIO | GPIO_IN, /* GPIO2_IRQ */
167}; 163};
168 164
169static struct imxuart_platform_data uart_pdata = { 165static const struct imxuart_platform_data uart_pdata __initconst = {
170 .flags = IMXUART_HAVE_RTSCTS, 166 .flags = IMXUART_HAVE_RTSCTS,
171}; 167};
172 168
173static struct mxc_nand_platform_data pca100_nand_board_info = { 169static const struct mxc_nand_platform_data
170pca100_nand_board_info __initconst = {
174 .width = 1, 171 .width = 1,
175 .hw_ecc = 1, 172 .hw_ecc = 1,
176}; 173};
@@ -181,7 +178,7 @@ static struct platform_device *platform_devices[] __initdata = {
181 &mxc_wdt, 178 &mxc_wdt,
182}; 179};
183 180
184static struct imxi2c_platform_data pca100_i2c_1_data = { 181static const struct imxi2c_platform_data pca100_i2c1_data __initconst = {
185 .bitrate = 100000, 182 .bitrate = 100000,
186}; 183};
187 184
@@ -224,7 +221,7 @@ static struct spi_board_info pca100_spi_board_info[] __initdata = {
224 221
225static int pca100_spi_cs[] = {SPI1_SS0, SPI1_SS1}; 222static int pca100_spi_cs[] = {SPI1_SS0, SPI1_SS1};
226 223
227static struct spi_imx_master pca100_spi_0_data = { 224static const struct spi_imx_master pca100_spi0_data __initconst = {
228 .chipselect = pca100_spi_cs, 225 .chipselect = pca100_spi_cs,
229 .num_chipselect = ARRAY_SIZE(pca100_spi_cs), 226 .num_chipselect = ARRAY_SIZE(pca100_spi_cs),
230}; 227};
@@ -394,24 +391,24 @@ static void __init pca100_init(void)
394 391
395 mxc_register_device(&imx_ssi_device0, &pca100_ssi_pdata); 392 mxc_register_device(&imx_ssi_device0, &pca100_ssi_pdata);
396 393
397 mxc_register_device(&mxc_uart_device0, &uart_pdata); 394 imx27_add_imx_uart0(&uart_pdata);
398 395
399 mxc_register_device(&mxc_sdhc_device1, &sdhc_pdata); 396 mxc_register_device(&mxc_sdhc_device1, &sdhc_pdata);
400 397
401 mxc_register_device(&imx27_nand_device, &pca100_nand_board_info); 398 imx27_add_mxc_nand(&pca100_nand_board_info);
402 399
403 /* only the i2c master 1 is used on this CPU card */ 400 /* only the i2c master 1 is used on this CPU card */
404 i2c_register_board_info(1, pca100_i2c_devices, 401 i2c_register_board_info(1, pca100_i2c_devices,
405 ARRAY_SIZE(pca100_i2c_devices)); 402 ARRAY_SIZE(pca100_i2c_devices));
406 403
407 mxc_register_device(&mxc_i2c_device1, &pca100_i2c_1_data); 404 imx27_add_i2c_imx1(&pca100_i2c1_data);
408 405
409#if defined(CONFIG_SPI_IMX) || defined(CONFIG_SPI_IMX_MODULE) 406#if defined(CONFIG_SPI_IMX) || defined(CONFIG_SPI_IMX_MODULE)
410 mxc_gpio_mode(GPIO_PORTD | 28 | GPIO_GPIO | GPIO_IN); 407 mxc_gpio_mode(GPIO_PORTD | 28 | GPIO_GPIO | GPIO_IN);
411 mxc_gpio_mode(GPIO_PORTD | 27 | GPIO_GPIO | GPIO_IN); 408 mxc_gpio_mode(GPIO_PORTD | 27 | GPIO_GPIO | GPIO_IN);
412 spi_register_board_info(pca100_spi_board_info, 409 spi_register_board_info(pca100_spi_board_info,
413 ARRAY_SIZE(pca100_spi_board_info)); 410 ARRAY_SIZE(pca100_spi_board_info));
414 mxc_register_device(&mxc_spi_device0, &pca100_spi_0_data); 411 imx27_add_spi_imx0(&pca100_spi_0_data);
415#endif 412#endif
416 413
417 gpio_request(OTG_PHY_CS_GPIO, "usb-otg-cs"); 414 gpio_request(OTG_PHY_CS_GPIO, "usb-otg-cs");
diff --git a/arch/arm/mach-mx2/mach-pcm038.c b/arch/arm/mach-imx/mach-pcm038.c
index 36c89431679a..9212e8f37001 100644
--- a/arch/arm/mach-mx2/mach-pcm038.c
+++ b/arch/arm/mach-imx/mach-pcm038.c
@@ -35,14 +35,12 @@
35#include <mach/board-pcm038.h> 35#include <mach/board-pcm038.h>
36#include <mach/common.h> 36#include <mach/common.h>
37#include <mach/hardware.h> 37#include <mach/hardware.h>
38#include <mach/i2c.h>
39#include <mach/iomux-mx27.h> 38#include <mach/iomux-mx27.h>
40#include <mach/imx-uart.h>
41#include <mach/mxc_nand.h> 39#include <mach/mxc_nand.h>
42#include <mach/spi.h>
43#include <mach/mxc_ehci.h> 40#include <mach/mxc_ehci.h>
44#include <mach/ulpi.h> 41#include <mach/ulpi.h>
45 42
43#include "devices-imx27.h"
46#include "devices.h" 44#include "devices.h"
47 45
48static int pcm038_pins[] = { 46static int pcm038_pins[] = {
@@ -162,17 +160,12 @@ static struct platform_device pcm038_nor_mtd_device = {
162 .resource = &pcm038_flash_resource, 160 .resource = &pcm038_flash_resource,
163}; 161};
164 162
165static struct imxuart_platform_data uart_pdata[] = { 163static const struct imxuart_platform_data uart_pdata __initconst = {
166 { 164 .flags = IMXUART_HAVE_RTSCTS,
167 .flags = IMXUART_HAVE_RTSCTS,
168 }, {
169 .flags = IMXUART_HAVE_RTSCTS,
170 }, {
171 .flags = IMXUART_HAVE_RTSCTS,
172 },
173}; 165};
174 166
175static struct mxc_nand_platform_data pcm038_nand_board_info = { 167static const struct mxc_nand_platform_data
168pcm038_nand_board_info __initconst = {
176 .width = 1, 169 .width = 1,
177 .hw_ecc = 1, 170 .hw_ecc = 1,
178}; 171};
@@ -192,7 +185,7 @@ static void __init pcm038_init_sram(void)
192 mx27_setup_weimcs(1, 0x0000d843, 0x22252521, 0x22220a00); 185 mx27_setup_weimcs(1, 0x0000d843, 0x22252521, 0x22220a00);
193} 186}
194 187
195static struct imxi2c_platform_data pcm038_i2c_1_data = { 188static const struct imxi2c_platform_data pcm038_i2c1_data __initconst = {
196 .bitrate = 100000, 189 .bitrate = 100000,
197}; 190};
198 191
@@ -215,7 +208,7 @@ static struct i2c_board_info pcm038_i2c_devices[] = {
215 208
216static int pcm038_spi_cs[] = {GPIO_PORTD + 28}; 209static int pcm038_spi_cs[] = {GPIO_PORTD + 28};
217 210
218static struct spi_imx_master pcm038_spi_0_data = { 211static const struct spi_imx_master pcm038_spi0_data __initconst = {
219 .chipselect = pcm038_spi_cs, 212 .chipselect = pcm038_spi_cs,
220 .num_chipselect = ARRAY_SIZE(pcm038_spi_cs), 213 .num_chipselect = ARRAY_SIZE(pcm038_spi_cs),
221}; 214};
@@ -305,18 +298,18 @@ static void __init pcm038_init(void)
305 298
306 pcm038_init_sram(); 299 pcm038_init_sram();
307 300
308 mxc_register_device(&mxc_uart_device0, &uart_pdata[0]); 301 imx27_add_imx_uart0(&uart_pdata);
309 mxc_register_device(&mxc_uart_device1, &uart_pdata[1]); 302 imx27_add_imx_uart1(&uart_pdata);
310 mxc_register_device(&mxc_uart_device2, &uart_pdata[2]); 303 imx27_add_imx_uart2(&uart_pdata);
311 304
312 mxc_gpio_mode(PE16_AF_OWIRE); 305 mxc_gpio_mode(PE16_AF_OWIRE);
313 mxc_register_device(&imx27_nand_device, &pcm038_nand_board_info); 306 imx27_add_mxc_nand(&pcm038_nand_board_info);
314 307
315 /* only the i2c master 1 is used on this CPU card */ 308 /* only the i2c master 1 is used on this CPU card */
316 i2c_register_board_info(1, pcm038_i2c_devices, 309 i2c_register_board_info(1, pcm038_i2c_devices,
317 ARRAY_SIZE(pcm038_i2c_devices)); 310 ARRAY_SIZE(pcm038_i2c_devices));
318 311
319 mxc_register_device(&mxc_i2c_device1, &pcm038_i2c_1_data); 312 imx27_add_i2c_imx1(&pcm038_i2c1_data);
320 313
321 /* PE18 for user-LED D40 */ 314 /* PE18 for user-LED D40 */
322 mxc_gpio_mode(GPIO_PORTE | 18 | GPIO_GPIO | GPIO_OUT); 315 mxc_gpio_mode(GPIO_PORTE | 18 | GPIO_GPIO | GPIO_OUT);
@@ -326,7 +319,7 @@ static void __init pcm038_init(void)
326 /* MC13783 IRQ */ 319 /* MC13783 IRQ */
327 mxc_gpio_mode(GPIO_PORTB | 23 | GPIO_GPIO | GPIO_IN); 320 mxc_gpio_mode(GPIO_PORTB | 23 | GPIO_GPIO | GPIO_IN);
328 321
329 mxc_register_device(&mxc_spi_device0, &pcm038_spi_0_data); 322 imx27_add_spi_imx0(&pcm038_spi0_data);
330 spi_register_board_info(pcm038_spi_board_info, 323 spi_register_board_info(pcm038_spi_board_info,
331 ARRAY_SIZE(pcm038_spi_board_info)); 324 ARRAY_SIZE(pcm038_spi_board_info));
332 325
diff --git a/arch/arm/mach-mx1/mach-scb9328.c b/arch/arm/mach-imx/mach-scb9328.c
index 7587a7a12460..88bf0d1e26e6 100644
--- a/arch/arm/mach-mx1/mach-scb9328.c
+++ b/arch/arm/mach-imx/mach-scb9328.c
@@ -22,17 +22,17 @@
22#include <mach/common.h> 22#include <mach/common.h>
23#include <mach/hardware.h> 23#include <mach/hardware.h>
24#include <mach/irqs.h> 24#include <mach/irqs.h>
25#include <mach/imx-uart.h>
26#include <mach/iomux-mx1.h> 25#include <mach/iomux-mx1.h>
27 26
27#include "devices-imx1.h"
28#include "devices.h" 28#include "devices.h"
29 29
30/* 30/*
31 * This scb9328 has a 32MiB flash 31 * This scb9328 has a 32MiB flash
32 */ 32 */
33static struct resource flash_resource = { 33static struct resource flash_resource = {
34 .start = IMX_CS0_PHYS, 34 .start = MX1_CS0_PHYS,
35 .end = IMX_CS0_PHYS + (32 * 1024 * 1024) - 1, 35 .end = MX1_CS0_PHYS + (32 * 1024 * 1024) - 1,
36 .flags = IORESOURCE_MEM, 36 .flags = IORESOURCE_MEM,
37}; 37};
38 38
@@ -70,13 +70,13 @@ static struct dm9000_plat_data dm9000_platdata = {
70static struct resource dm9000x_resources[] = { 70static struct resource dm9000x_resources[] = {
71 { 71 {
72 .name = "address area", 72 .name = "address area",
73 .start = IMX_CS5_PHYS, 73 .start = MX1_CS5_PHYS,
74 .end = IMX_CS5_PHYS + 1, 74 .end = MX1_CS5_PHYS + 1,
75 .flags = IORESOURCE_MEM, /* address access */ 75 .flags = IORESOURCE_MEM, /* address access */
76 }, { 76 }, {
77 .name = "data area", 77 .name = "data area",
78 .start = IMX_CS5_PHYS + 4, 78 .start = MX1_CS5_PHYS + 4,
79 .end = IMX_CS5_PHYS + 5, 79 .end = MX1_CS5_PHYS + 5,
80 .flags = IORESOURCE_MEM, /* data access */ 80 .flags = IORESOURCE_MEM, /* data access */
81 }, { 81 }, {
82 .start = IRQ_GPIOC(3), 82 .start = IRQ_GPIOC(3),
@@ -108,14 +108,13 @@ static int uart1_mxc_init(struct platform_device *pdev)
108 ARRAY_SIZE(mxc_uart1_pins), "UART1"); 108 ARRAY_SIZE(mxc_uart1_pins), "UART1");
109} 109}
110 110
111static int uart1_mxc_exit(struct platform_device *pdev) 111static void uart1_mxc_exit(struct platform_device *pdev)
112{ 112{
113 mxc_gpio_release_multiple_pins(mxc_uart1_pins, 113 mxc_gpio_release_multiple_pins(mxc_uart1_pins,
114 ARRAY_SIZE(mxc_uart1_pins)); 114 ARRAY_SIZE(mxc_uart1_pins));
115 return 0;
116} 115}
117 116
118static struct imxuart_platform_data uart_pdata = { 117static const struct imxuart_platform_data uart_pdata __initconst = {
119 .init = uart1_mxc_init, 118 .init = uart1_mxc_init,
120 .exit = uart1_mxc_exit, 119 .exit = uart1_mxc_exit,
121 .flags = IMXUART_HAVE_RTSCTS, 120 .flags = IMXUART_HAVE_RTSCTS,
@@ -131,7 +130,7 @@ static struct platform_device *devices[] __initdata = {
131 */ 130 */
132static void __init scb9328_init(void) 131static void __init scb9328_init(void)
133{ 132{
134 mxc_register_device(&imx_uart1_device, &uart_pdata); 133 imx1_add_imx_uart0(&uart_pdata);
135 134
136 printk(KERN_INFO"Scb9328: Adding devices\n"); 135 printk(KERN_INFO"Scb9328: Adding devices\n");
137 platform_add_devices(devices, ARRAY_SIZE(devices)); 136 platform_add_devices(devices, ARRAY_SIZE(devices));
diff --git a/arch/arm/mach-mx1/generic.c b/arch/arm/mach-imx/mm-imx1.c
index 7f9fc1034c08..117ebf6bc951 100644
--- a/arch/arm/mach-mx1/generic.c
+++ b/arch/arm/mach-imx/mm-imx1.c
@@ -3,7 +3,7 @@
3 * Created: april 20th, 2004 3 * Created: april 20th, 2004
4 * Copyright: Synertronixx GmbH 4 * Copyright: Synertronixx GmbH
5 * 5 *
6 * Common code for i.MX machines 6 * Common code for i.MX1 machines
7 * 7 *
8 * This program is free software; you can redistribute it and/or modify 8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by 9 * it under the terms of the GNU General Public License as published by
@@ -14,11 +14,6 @@
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details. 16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 *
22 */ 17 */
23#include <linux/kernel.h> 18#include <linux/kernel.h>
24#include <linux/init.h> 19#include <linux/init.h>
@@ -31,23 +26,25 @@
31 26
32static struct map_desc imx_io_desc[] __initdata = { 27static struct map_desc imx_io_desc[] __initdata = {
33 { 28 {
34 .virtual = IMX_IO_BASE, 29 .virtual = MX1_IO_BASE_ADDR_VIRT,
35 .pfn = __phys_to_pfn(IMX_IO_PHYS), 30 .pfn = __phys_to_pfn(MX1_IO_BASE_ADDR),
36 .length = IMX_IO_SIZE, 31 .length = MX1_IO_SIZE,
37 .type = MT_DEVICE 32 .type = MT_DEVICE
38 } 33 }
39}; 34};
40 35
41void __init mx1_map_io(void) 36void __init mx1_map_io(void)
42{ 37{
43 mxc_set_cpu_type(MXC_CPU_MX1); 38 mxc_set_cpu_type(MXC_CPU_MX1);
44 mxc_arch_reset_init(IO_ADDRESS(WDT_BASE_ADDR)); 39 mxc_arch_reset_init(MX1_IO_ADDRESS(MX1_WDT_BASE_ADDR));
45 40
46 iotable_init(imx_io_desc, ARRAY_SIZE(imx_io_desc)); 41 iotable_init(imx_io_desc, ARRAY_SIZE(imx_io_desc));
47} 42}
48 43
44int imx1_register_gpios(void);
45
49void __init mx1_init_irq(void) 46void __init mx1_init_irq(void)
50{ 47{
51 mxc_init_irq(IO_ADDRESS(AVIC_BASE_ADDR)); 48 imx1_register_gpios();
49 mxc_init_irq(MX1_IO_ADDRESS(MX1_AVIC_BASE_ADDR));
52} 50}
53
diff --git a/arch/arm/mach-mx2/mm-imx21.c b/arch/arm/mach-imx/mm-imx21.c
index 64134314d012..68aa5d2ecdb1 100644
--- a/arch/arm/mach-mx2/mm-imx21.c
+++ b/arch/arm/mach-imx/mm-imx21.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * arch/arm/mach-mx2/mm-imx21.c 2 * arch/arm/mach-imx/mm-imx21.c
3 * 3 *
4 * Copyright (C) 2008 Juergen Beisert (kernel@pengutronix.de) 4 * Copyright (C) 2008 Juergen Beisert (kernel@pengutronix.de)
5 * 5 *
@@ -77,7 +77,10 @@ void __init mx21_map_io(void)
77 iotable_init(imx21_io_desc, ARRAY_SIZE(imx21_io_desc)); 77 iotable_init(imx21_io_desc, ARRAY_SIZE(imx21_io_desc));
78} 78}
79 79
80int imx21_register_gpios(void);
81
80void __init mx21_init_irq(void) 82void __init mx21_init_irq(void)
81{ 83{
84 imx21_register_gpios();
82 mxc_init_irq(MX21_IO_ADDRESS(MX21_AVIC_BASE_ADDR)); 85 mxc_init_irq(MX21_IO_ADDRESS(MX21_AVIC_BASE_ADDR));
83} 86}
diff --git a/arch/arm/mach-mx2/mm-imx27.c b/arch/arm/mach-imx/mm-imx27.c
index 3366ed44cfd5..bcedce9c87dd 100644
--- a/arch/arm/mach-mx2/mm-imx27.c
+++ b/arch/arm/mach-imx/mm-imx27.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * arch/arm/mach-mx2/mm-imx27.c 2 * arch/arm/mach-imx/mm-imx27.c
3 * 3 *
4 * Copyright (C) 2008 Juergen Beisert (kernel@pengutronix.de) 4 * Copyright (C) 2008 Juergen Beisert (kernel@pengutronix.de)
5 * 5 *
@@ -77,7 +77,10 @@ void __init mx27_map_io(void)
77 iotable_init(imx27_io_desc, ARRAY_SIZE(imx27_io_desc)); 77 iotable_init(imx27_io_desc, ARRAY_SIZE(imx27_io_desc));
78} 78}
79 79
80int imx27_register_gpios(void);
81
80void __init mx27_init_irq(void) 82void __init mx27_init_irq(void)
81{ 83{
84 imx27_register_gpios();
82 mxc_init_irq(MX27_IO_ADDRESS(MX27_AVIC_BASE_ADDR)); 85 mxc_init_irq(MX27_IO_ADDRESS(MX27_AVIC_BASE_ADDR));
83} 86}
diff --git a/arch/arm/mach-mx1/ksym_mx1.c b/arch/arm/mach-imx/mx1-camera-fiq-ksym.c
index b09ee12a4ff0..b09ee12a4ff0 100644
--- a/arch/arm/mach-mx1/ksym_mx1.c
+++ b/arch/arm/mach-imx/mx1-camera-fiq-ksym.c
diff --git a/arch/arm/mach-mx1/mx1_camera_fiq.S b/arch/arm/mach-imx/mx1-camera-fiq.S
index 9c69aa65bf17..9c69aa65bf17 100644
--- a/arch/arm/mach-mx1/mx1_camera_fiq.S
+++ b/arch/arm/mach-imx/mx1-camera-fiq.S
diff --git a/arch/arm/mach-mx2/pcm970-baseboard.c b/arch/arm/mach-imx/pcm970-baseboard.c
index f490a406d57e..f490a406d57e 100644
--- a/arch/arm/mach-mx2/pcm970-baseboard.c
+++ b/arch/arm/mach-imx/pcm970-baseboard.c
diff --git a/arch/arm/mach-mx2/pm-imx27.c b/arch/arm/mach-imx/pm-imx27.c
index afc17ce0bb54..afc17ce0bb54 100644
--- a/arch/arm/mach-mx2/pm-imx27.c
+++ b/arch/arm/mach-imx/pm-imx27.c
diff --git a/arch/arm/mach-mx1/Kconfig b/arch/arm/mach-mx1/Kconfig
deleted file mode 100644
index eb7660f5d4b7..000000000000
--- a/arch/arm/mach-mx1/Kconfig
+++ /dev/null
@@ -1,19 +0,0 @@
1if ARCH_MX1
2
3comment "MX1 platforms:"
4
5config MACH_MXLADS
6 bool
7
8config ARCH_MX1ADS
9 bool "MX1ADS platform"
10 select MACH_MXLADS
11 help
12 Say Y here if you are using Motorola MX1ADS/MXLADS boards
13
14config MACH_SCB9328
15 bool "Synertronixx scb9328"
16 help
17 Say Y here if you are using a Synertronixx scb9328 board
18
19endif
diff --git a/arch/arm/mach-mx1/Makefile b/arch/arm/mach-mx1/Makefile
deleted file mode 100644
index fc2ddf82441b..000000000000
--- a/arch/arm/mach-mx1/Makefile
+++ /dev/null
@@ -1,15 +0,0 @@
1#
2# Makefile for the linux kernel.
3#
4
5# Object file lists.
6
7EXTRA_CFLAGS += -DIMX_NEEDS_DEPRECATED_SYMBOLS
8obj-y += generic.o clock.o devices.o
9
10# Support for CMOS sensor interface
11obj-$(CONFIG_MX1_VIDEO) += ksym_mx1.o mx1_camera_fiq.o
12
13# Specific board support
14obj-$(CONFIG_ARCH_MX1ADS) += mach-mx1ads.o
15obj-$(CONFIG_MACH_SCB9328) += mach-scb9328.o
diff --git a/arch/arm/mach-mx1/Makefile.boot b/arch/arm/mach-mx1/Makefile.boot
deleted file mode 100644
index 8ed1492288a2..000000000000
--- a/arch/arm/mach-mx1/Makefile.boot
+++ /dev/null
@@ -1,4 +0,0 @@
1 zreladdr-y := 0x08008000
2params_phys-y := 0x08000100
3initrd_phys-y := 0x08800000
4
diff --git a/arch/arm/mach-mx1/crm_regs.h b/arch/arm/mach-mx1/crm_regs.h
deleted file mode 100644
index 22e866ff0c09..000000000000
--- a/arch/arm/mach-mx1/crm_regs.h
+++ /dev/null
@@ -1,55 +0,0 @@
1/*
2 * Copyright 2004-2007 Freescale Semiconductor, Inc. All Rights Reserved.
3 * Copyright (c) 2008 Paulius Zaleckas <paulius.zaleckas@teltonika.lt>
4 *
5 * This file may be distributed under the terms of the GNU General
6 * Public License, version 2.
7 */
8
9#ifndef __ARCH_ARM_MACH_MX1_CRM_REGS_H__
10#define __ARCH_ARM_MACH_MX1_CRM_REGS_H__
11
12#define CCM_BASE IO_ADDRESS(CCM_BASE_ADDR)
13#define SCM_BASE IO_ADDRESS(SCM_BASE_ADDR)
14
15/* CCM register addresses */
16#define CCM_CSCR (CCM_BASE + 0x0)
17#define CCM_MPCTL0 (CCM_BASE + 0x4)
18#define CCM_MPCTL1 (CCM_BASE + 0x8)
19#define CCM_SPCTL0 (CCM_BASE + 0xC)
20#define CCM_SPCTL1 (CCM_BASE + 0x10)
21#define CCM_PCDR (CCM_BASE + 0x20)
22
23#define CCM_CSCR_CLKO_OFFSET 29
24#define CCM_CSCR_CLKO_MASK (0x7 << 29)
25#define CCM_CSCR_USB_OFFSET 26
26#define CCM_CSCR_USB_MASK (0x7 << 26)
27#define CCM_CSCR_SPLL_RESTART (1 << 22)
28#define CCM_CSCR_MPLL_RESTART (1 << 21)
29#define CCM_CSCR_OSC_EN_SHIFT 17
30#define CCM_CSCR_SYSTEM_SEL (1 << 16)
31#define CCM_CSCR_BCLK_OFFSET 10
32#define CCM_CSCR_BCLK_MASK (0xF << 10)
33#define CCM_CSCR_PRESC (1 << 15)
34#define CCM_CSCR_SPEN (1 << 1)
35#define CCM_CSCR_MPEN (1 << 0)
36
37#define CCM_PCDR_PCLK3_OFFSET 16
38#define CCM_PCDR_PCLK3_MASK (0x7F << 16)
39#define CCM_PCDR_PCLK2_OFFSET 4
40#define CCM_PCDR_PCLK2_MASK (0xF << 4)
41#define CCM_PCDR_PCLK1_OFFSET 0
42#define CCM_PCDR_PCLK1_MASK 0xF
43
44/* SCM register addresses */
45#define SCM_SIDR (SCM_BASE + 0x0)
46#define SCM_FMCR (SCM_BASE + 0x4)
47#define SCM_GPCR (SCM_BASE + 0x8)
48#define SCM_GCCR (SCM_BASE + 0xC)
49
50#define SCM_GCCR_DMA_CLK_EN_OFFSET 3
51#define SCM_GCCR_CSI_CLK_EN_OFFSET 2
52#define SCM_GCCR_MMA_CLK_EN_OFFSET 1
53#define SCM_GCCR_USBD_CLK_EN_OFFSET 0
54
55#endif /* __ARCH_ARM_MACH_MX2_CRM_REGS_H__ */
diff --git a/arch/arm/mach-mx1/devices.c b/arch/arm/mach-mx1/devices.c
deleted file mode 100644
index b6be29d1cb08..000000000000
--- a/arch/arm/mach-mx1/devices.c
+++ /dev/null
@@ -1,242 +0,0 @@
1/*
2 * Copyright 2006-2007 Freescale Semiconductor, Inc. All Rights Reserved.
3 * Copyright 2008 Sascha Hauer, kernel@pengutronix.de
4 * Copyright (c) 2008 Paulius Zaleckas <paulius.zaleckas@teltonika.lt>
5 * Copyright (c) 2008 Darius Augulis <darius.augulis@teltonika.lt>
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version 2
10 * of the License, or (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor,
19 * Boston, MA 02110-1301, USA.
20 */
21
22#include <linux/kernel.h>
23#include <linux/init.h>
24#include <linux/platform_device.h>
25#include <linux/gpio.h>
26#include <mach/irqs.h>
27#include <mach/hardware.h>
28
29#include "devices.h"
30
31static struct resource imx_csi_resources[] = {
32 {
33 .start = 0x00224000,
34 .end = 0x00224010,
35 .flags = IORESOURCE_MEM,
36 }, {
37 .start = CSI_INT,
38 .end = CSI_INT,
39 .flags = IORESOURCE_IRQ,
40 },
41};
42
43static u64 imx_csi_dmamask = 0xffffffffUL;
44
45struct platform_device imx_csi_device = {
46 .name = "mx1-camera",
47 .id = 0, /* This is used to put cameras on this interface */
48 .dev = {
49 .dma_mask = &imx_csi_dmamask,
50 .coherent_dma_mask = 0xffffffff,
51 },
52 .resource = imx_csi_resources,
53 .num_resources = ARRAY_SIZE(imx_csi_resources),
54};
55
56static struct resource imx_i2c_resources[] = {
57 {
58 .start = 0x00217000,
59 .end = 0x00217010,
60 .flags = IORESOURCE_MEM,
61 }, {
62 .start = I2C_INT,
63 .end = I2C_INT,
64 .flags = IORESOURCE_IRQ,
65 },
66};
67
68struct platform_device imx_i2c_device = {
69 .name = "imx-i2c",
70 .id = 0,
71 .resource = imx_i2c_resources,
72 .num_resources = ARRAY_SIZE(imx_i2c_resources),
73};
74
75static struct resource imx_uart1_resources[] = {
76 {
77 .start = UART1_BASE_ADDR,
78 .end = UART1_BASE_ADDR + 0xD0,
79 .flags = IORESOURCE_MEM,
80 }, {
81 .start = UART1_MINT_RX,
82 .end = UART1_MINT_RX,
83 .flags = IORESOURCE_IRQ,
84 }, {
85 .start = UART1_MINT_TX,
86 .end = UART1_MINT_TX,
87 .flags = IORESOURCE_IRQ,
88 }, {
89 .start = UART1_MINT_RTS,
90 .end = UART1_MINT_RTS,
91 .flags = IORESOURCE_IRQ,
92 },
93};
94
95struct platform_device imx_uart1_device = {
96 .name = "imx-uart",
97 .id = 0,
98 .num_resources = ARRAY_SIZE(imx_uart1_resources),
99 .resource = imx_uart1_resources,
100};
101
102static struct resource imx_uart2_resources[] = {
103 {
104 .start = UART2_BASE_ADDR,
105 .end = UART2_BASE_ADDR + 0xD0,
106 .flags = IORESOURCE_MEM,
107 }, {
108 .start = UART2_MINT_RX,
109 .end = UART2_MINT_RX,
110 .flags = IORESOURCE_IRQ,
111 }, {
112 .start = UART2_MINT_TX,
113 .end = UART2_MINT_TX,
114 .flags = IORESOURCE_IRQ,
115 }, {
116 .start = UART2_MINT_RTS,
117 .end = UART2_MINT_RTS,
118 .flags = IORESOURCE_IRQ,
119 },
120};
121
122struct platform_device imx_uart2_device = {
123 .name = "imx-uart",
124 .id = 1,
125 .num_resources = ARRAY_SIZE(imx_uart2_resources),
126 .resource = imx_uart2_resources,
127};
128
129static struct resource imx_rtc_resources[] = {
130 {
131 .start = 0x00204000,
132 .end = 0x00204024,
133 .flags = IORESOURCE_MEM,
134 }, {
135 .start = RTC_INT,
136 .end = RTC_INT,
137 .flags = IORESOURCE_IRQ,
138 }, {
139 .start = RTC_SAMINT,
140 .end = RTC_SAMINT,
141 .flags = IORESOURCE_IRQ,
142 },
143};
144
145struct platform_device imx_rtc_device = {
146 .name = "rtc-imx",
147 .id = 0,
148 .resource = imx_rtc_resources,
149 .num_resources = ARRAY_SIZE(imx_rtc_resources),
150};
151
152static struct resource imx_wdt_resources[] = {
153 {
154 .start = 0x00201000,
155 .end = 0x00201008,
156 .flags = IORESOURCE_MEM,
157 }, {
158 .start = WDT_INT,
159 .end = WDT_INT,
160 .flags = IORESOURCE_IRQ,
161 },
162};
163
164struct platform_device imx_wdt_device = {
165 .name = "imx-wdt",
166 .id = 0,
167 .resource = imx_wdt_resources,
168 .num_resources = ARRAY_SIZE(imx_wdt_resources),
169};
170
171static struct resource imx_usb_resources[] = {
172 {
173 .start = 0x00212000,
174 .end = 0x00212148,
175 .flags = IORESOURCE_MEM,
176 }, {
177 .start = USBD_INT0,
178 .end = USBD_INT0,
179 .flags = IORESOURCE_IRQ,
180 }, {
181 .start = USBD_INT1,
182 .end = USBD_INT1,
183 .flags = IORESOURCE_IRQ,
184 }, {
185 .start = USBD_INT2,
186 .end = USBD_INT2,
187 .flags = IORESOURCE_IRQ,
188 }, {
189 .start = USBD_INT3,
190 .end = USBD_INT3,
191 .flags = IORESOURCE_IRQ,
192 }, {
193 .start = USBD_INT4,
194 .end = USBD_INT4,
195 .flags = IORESOURCE_IRQ,
196 }, {
197 .start = USBD_INT5,
198 .end = USBD_INT5,
199 .flags = IORESOURCE_IRQ,
200 }, {
201 .start = USBD_INT6,
202 .end = USBD_INT6,
203 .flags = IORESOURCE_IRQ,
204 },
205};
206
207struct platform_device imx_usb_device = {
208 .name = "imx_udc",
209 .id = 0,
210 .num_resources = ARRAY_SIZE(imx_usb_resources),
211 .resource = imx_usb_resources,
212};
213
214/* GPIO port description */
215static struct mxc_gpio_port imx_gpio_ports[] = {
216 {
217 .chip.label = "gpio-0",
218 .base = (void __iomem *)IO_ADDRESS(GPIO_BASE_ADDR),
219 .irq = GPIO_INT_PORTA,
220 .virtual_irq_start = MXC_GPIO_IRQ_START,
221 }, {
222 .chip.label = "gpio-1",
223 .base = (void __iomem *)IO_ADDRESS(GPIO_BASE_ADDR + 0x100),
224 .irq = GPIO_INT_PORTB,
225 .virtual_irq_start = MXC_GPIO_IRQ_START + 32,
226 }, {
227 .chip.label = "gpio-2",
228 .base = (void __iomem *)IO_ADDRESS(GPIO_BASE_ADDR + 0x200),
229 .irq = GPIO_INT_PORTC,
230 .virtual_irq_start = MXC_GPIO_IRQ_START + 64,
231 }, {
232 .chip.label = "gpio-3",
233 .base = (void __iomem *)IO_ADDRESS(GPIO_BASE_ADDR + 0x300),
234 .irq = GPIO_INT_PORTD,
235 .virtual_irq_start = MXC_GPIO_IRQ_START + 96,
236 }
237};
238
239int __init mxc_register_gpios(void)
240{
241 return mxc_gpio_init(imx_gpio_ports, ARRAY_SIZE(imx_gpio_ports));
242}
diff --git a/arch/arm/mach-mx1/devices.h b/arch/arm/mach-mx1/devices.h
deleted file mode 100644
index 0da5d7cce3a2..000000000000
--- a/arch/arm/mach-mx1/devices.h
+++ /dev/null
@@ -1,7 +0,0 @@
1extern struct platform_device imx_csi_device;
2extern struct platform_device imx_i2c_device;
3extern struct platform_device imx_uart1_device;
4extern struct platform_device imx_uart2_device;
5extern struct platform_device imx_rtc_device;
6extern struct platform_device imx_wdt_device;
7extern struct platform_device imx_usb_device;
diff --git a/arch/arm/mach-mx2/serial.c b/arch/arm/mach-mx2/serial.c
deleted file mode 100644
index 1c0c835b2252..000000000000
--- a/arch/arm/mach-mx2/serial.c
+++ /dev/null
@@ -1,141 +0,0 @@
1/*
2 * Copyright 2006-2007 Freescale Semiconductor, Inc. All Rights Reserved.
3 * Copyright 2008 Juergen Beisert, kernel@pengutronix.de
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version 2
8 * of the License, or (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
17 * MA 02110-1301, USA.
18 */
19
20#include <linux/module.h>
21#include <linux/platform_device.h>
22#include <linux/serial.h>
23#include <mach/hardware.h>
24#include <mach/imx-uart.h>
25#include "devices.h"
26
27static struct resource uart0[] = {
28 {
29 .start = MX2x_UART1_BASE_ADDR,
30 .end = MX2x_UART1_BASE_ADDR + 0x0B5,
31 .flags = IORESOURCE_MEM,
32 }, {
33 .start = MX2x_INT_UART1,
34 .end = MX2x_INT_UART1,
35 .flags = IORESOURCE_IRQ,
36 },
37};
38
39struct platform_device mxc_uart_device0 = {
40 .name = "imx-uart",
41 .id = 0,
42 .resource = uart0,
43 .num_resources = ARRAY_SIZE(uart0),
44};
45
46static struct resource uart1[] = {
47 {
48 .start = MX2x_UART2_BASE_ADDR,
49 .end = MX2x_UART2_BASE_ADDR + 0x0B5,
50 .flags = IORESOURCE_MEM,
51 }, {
52 .start = MX2x_INT_UART2,
53 .end = MX2x_INT_UART2,
54 .flags = IORESOURCE_IRQ,
55 },
56};
57
58struct platform_device mxc_uart_device1 = {
59 .name = "imx-uart",
60 .id = 1,
61 .resource = uart1,
62 .num_resources = ARRAY_SIZE(uart1),
63};
64
65static struct resource uart2[] = {
66 {
67 .start = MX2x_UART3_BASE_ADDR,
68 .end = MX2x_UART3_BASE_ADDR + 0x0B5,
69 .flags = IORESOURCE_MEM,
70 }, {
71 .start = MX2x_INT_UART3,
72 .end = MX2x_INT_UART3,
73 .flags = IORESOURCE_IRQ,
74 },
75};
76
77struct platform_device mxc_uart_device2 = {
78 .name = "imx-uart",
79 .id = 2,
80 .resource = uart2,
81 .num_resources = ARRAY_SIZE(uart2),
82};
83
84static struct resource uart3[] = {
85 {
86 .start = MX2x_UART4_BASE_ADDR,
87 .end = MX2x_UART4_BASE_ADDR + 0x0B5,
88 .flags = IORESOURCE_MEM,
89 }, {
90 .start = MX2x_INT_UART4,
91 .end = MX2x_INT_UART4,
92 .flags = IORESOURCE_IRQ,
93 },
94};
95
96struct platform_device mxc_uart_device3 = {
97 .name = "imx-uart",
98 .id = 3,
99 .resource = uart3,
100 .num_resources = ARRAY_SIZE(uart3),
101};
102
103#ifdef CONFIG_MACH_MX27
104static struct resource uart4[] = {
105 {
106 .start = MX27_UART5_BASE_ADDR,
107 .end = MX27_UART5_BASE_ADDR + 0x0B5,
108 .flags = IORESOURCE_MEM,
109 }, {
110 .start = MX27_INT_UART5,
111 .end = MX27_INT_UART5,
112 .flags = IORESOURCE_IRQ,
113 },
114};
115
116struct platform_device mxc_uart_device4 = {
117 .name = "imx-uart",
118 .id = 4,
119 .resource = uart4,
120 .num_resources = ARRAY_SIZE(uart4),
121};
122
123static struct resource uart5[] = {
124 {
125 .start = MX27_UART6_BASE_ADDR,
126 .end = MX27_UART6_BASE_ADDR + 0x0B5,
127 .flags = IORESOURCE_MEM,
128 }, {
129 .start = MX27_INT_UART6,
130 .end = MX27_INT_UART6,
131 .flags = IORESOURCE_IRQ,
132 },
133};
134
135struct platform_device mxc_uart_device5 = {
136 .name = "imx-uart",
137 .id = 5,
138 .resource = uart5,
139 .num_resources = ARRAY_SIZE(uart5),
140};
141#endif
diff --git a/arch/arm/mach-mx25/Kconfig b/arch/arm/mach-mx25/Kconfig
index 3a6668eebf9a..67e0b54218ae 100644
--- a/arch/arm/mach-mx25/Kconfig
+++ b/arch/arm/mach-mx25/Kconfig
@@ -4,9 +4,14 @@ comment "MX25 platforms:"
4 4
5config MACH_MX25_3DS 5config MACH_MX25_3DS
6 bool "Support MX25PDK (3DS) Platform" 6 bool "Support MX25PDK (3DS) Platform"
7 select IMX_HAVE_PLATFORM_IMX_UART
8 select IMX_HAVE_PLATFORM_MXC_NAND
7 9
8config MACH_EUKREA_CPUIMX25 10config MACH_EUKREA_CPUIMX25
9 bool "Support Eukrea CPUIMX25 Platform" 11 bool "Support Eukrea CPUIMX25 Platform"
12 select IMX_HAVE_PLATFORM_IMX_I2C
13 select IMX_HAVE_PLATFORM_IMX_UART
14 select IMX_HAVE_PLATFORM_MXC_NAND
10 select MXC_ULPI if USB_ULPI 15 select MXC_ULPI if USB_ULPI
11 16
12choice 17choice
diff --git a/arch/arm/mach-mx25/Makefile b/arch/arm/mach-mx25/Makefile
index 83ab5d805104..87ffb9c2f90a 100644
--- a/arch/arm/mach-mx25/Makefile
+++ b/arch/arm/mach-mx25/Makefile
@@ -1,5 +1,5 @@
1obj-y := mm.o devices.o 1obj-y := mm.o devices.o
2obj-$(CONFIG_ARCH_MX25) += clock.o 2obj-$(CONFIG_ARCH_MX25) += clock.o
3obj-$(CONFIG_MACH_MX25_3DS) += mach-mx25pdk.o 3obj-$(CONFIG_MACH_MX25_3DS) += mach-mx25_3ds.o
4obj-$(CONFIG_MACH_EUKREA_CPUIMX25) += mach-cpuimx25.o 4obj-$(CONFIG_MACH_EUKREA_CPUIMX25) += mach-cpuimx25.o
5obj-$(CONFIG_MACH_EUKREA_MBIMXSD_BASEBOARD) += eukrea_mbimxsd-baseboard.o 5obj-$(CONFIG_MACH_EUKREA_MBIMXSD_BASEBOARD) += eukrea_mbimxsd-baseboard.o
diff --git a/arch/arm/mach-mx25/devices-imx25.h b/arch/arm/mach-mx25/devices-imx25.h
new file mode 100644
index 000000000000..2025cb947fcf
--- /dev/null
+++ b/arch/arm/mach-mx25/devices-imx25.h
@@ -0,0 +1,38 @@
1/*
2 * Copyright (C) 2010 Pengutronix
3 * Uwe Kleine-Koenig <u.kleine-koenig@pengutronix.de>
4 *
5 * This program is free software; you can redistribute it and/or modify it under
6 * the terms of the GNU General Public License version 2 as published by the
7 * Free Software Foundation.
8 */
9#include <mach/mx25.h>
10#include <mach/devices-common.h>
11
12#define imx25_add_imx_i2c0(pdata) \
13 imx_add_imx_i2c(0, MX25_I2C1_BASE_ADDR, SZ_16K, MX25_INT_I2C1, pdata)
14#define imx25_add_imx_i2c1(pdata) \
15 imx_add_imx_i2c(1, MX25_I2C2_BASE_ADDR, SZ_16K, MX25_INT_I2C2, pdata)
16#define imx25_add_imx_i2c2(pdata) \
17 imx_add_imx_i2c(2, MX25_I2C3_BASE_ADDR, SZ_16K, MX25_INT_I2C3, pdata)
18
19#define imx25_add_imx_uart0(pdata) \
20 imx_add_imx_uart_1irq(0, MX25_UART1_BASE_ADDR, SZ_16K, MX25_INT_UART1, pdata)
21#define imx25_add_imx_uart1(pdata) \
22 imx_add_imx_uart_1irq(1, MX25_UART2_BASE_ADDR, SZ_16K, MX25_INT_UART2, pdata)
23#define imx25_add_imx_uart2(pdata) \
24 imx_add_imx_uart_1irq(2, MX25_UART3_BASE_ADDR, SZ_16K, MX25_INT_UART3, pdata)
25#define imx25_add_imx_uart3(pdata) \
26 imx_add_imx_uart_1irq(3, MX25_UART4_BASE_ADDR, SZ_16K, MX25_INT_UART4, pdata)
27#define imx25_add_imx_uart4(pdata) \
28 imx_add_imx_uart_1irq(4, MX25_UART5_BASE_ADDR, SZ_16K, MX25_INT_UART5, pdata)
29
30#define imx25_add_mxc_nand(pdata) \
31 imx_add_mxc_nand_v21(MX25_NFC_BASE_ADDR, MX25_INT_NANDFC, pdata)
32
33#define imx25_add_spi_imx0(pdata) \
34 imx_add_spi_imx(0, MX25_CSPI1_BASE_ADDR, SZ_16K, MX25_INT_CSPI1, pdata)
35#define imx25_add_spi_imx1(pdata) \
36 imx_add_spi_imx(1, MX25_CSPI2_BASE_ADDR, SZ_16K, MX25_INT_CSPI2, pdata)
37#define imx25_add_spi_imx2(pdata) \
38 imx_add_spi_imx(2, MX25_CSPI3_BASE_ADDR, SZ_16K, MX25_INT_CSPI3, pdata)
diff --git a/arch/arm/mach-mx25/devices.c b/arch/arm/mach-mx25/devices.c
index 82d3e53f01f2..3468eb15b236 100644
--- a/arch/arm/mach-mx25/devices.c
+++ b/arch/arm/mach-mx25/devices.c
@@ -22,101 +22,6 @@
22#include <mach/mx25.h> 22#include <mach/mx25.h>
23#include <mach/irqs.h> 23#include <mach/irqs.h>
24 24
25static struct resource uart0[] = {
26 {
27 .start = 0x43f90000,
28 .end = 0x43f93fff,
29 .flags = IORESOURCE_MEM,
30 }, {
31 .start = 45,
32 .end = 45,
33 .flags = IORESOURCE_IRQ,
34 },
35};
36
37struct platform_device mxc_uart_device0 = {
38 .name = "imx-uart",
39 .id = 0,
40 .resource = uart0,
41 .num_resources = ARRAY_SIZE(uart0),
42};
43
44static struct resource uart1[] = {
45 {
46 .start = 0x43f94000,
47 .end = 0x43f97fff,
48 .flags = IORESOURCE_MEM,
49 }, {
50 .start = 32,
51 .end = 32,
52 .flags = IORESOURCE_IRQ,
53 },
54};
55
56struct platform_device mxc_uart_device1 = {
57 .name = "imx-uart",
58 .id = 1,
59 .resource = uart1,
60 .num_resources = ARRAY_SIZE(uart1),
61};
62
63static struct resource uart2[] = {
64 {
65 .start = 0x5000c000,
66 .end = 0x5000ffff,
67 .flags = IORESOURCE_MEM,
68 }, {
69 .start = 18,
70 .end = 18,
71 .flags = IORESOURCE_IRQ,
72 },
73};
74
75struct platform_device mxc_uart_device2 = {
76 .name = "imx-uart",
77 .id = 2,
78 .resource = uart2,
79 .num_resources = ARRAY_SIZE(uart2),
80};
81
82static struct resource uart3[] = {
83 {
84 .start = 0x50008000,
85 .end = 0x5000bfff,
86 .flags = IORESOURCE_MEM,
87 }, {
88 .start = 5,
89 .end = 5,
90 .flags = IORESOURCE_IRQ,
91 },
92};
93
94struct platform_device mxc_uart_device3 = {
95 .name = "imx-uart",
96 .id = 3,
97 .resource = uart3,
98 .num_resources = ARRAY_SIZE(uart3),
99};
100
101static struct resource uart4[] = {
102 {
103 .start = 0x5002c000,
104 .end = 0x5002ffff,
105 .flags = IORESOURCE_MEM,
106 }, {
107 .start = 40,
108 .end = 40,
109 .flags = IORESOURCE_IRQ,
110 },
111};
112
113struct platform_device mxc_uart_device4 = {
114 .name = "imx-uart",
115 .id = 4,
116 .resource = uart4,
117 .num_resources = ARRAY_SIZE(uart4),
118};
119
120static u64 otg_dmamask = DMA_BIT_MASK(32); 25static u64 otg_dmamask = DMA_BIT_MASK(32);
121 26
122static struct resource mxc_otg_resources[] = { 27static struct resource mxc_otg_resources[] = {
@@ -179,63 +84,6 @@ struct platform_device mxc_usbh2 = {
179 .num_resources = ARRAY_SIZE(mxc_usbh2_resources), 84 .num_resources = ARRAY_SIZE(mxc_usbh2_resources),
180}; 85};
181 86
182static struct resource mxc_spi_resources0[] = {
183 {
184 .start = 0x43fa4000,
185 .end = 0x43fa7fff,
186 .flags = IORESOURCE_MEM,
187 }, {
188 .start = 14,
189 .end = 14,
190 .flags = IORESOURCE_IRQ,
191 },
192};
193
194struct platform_device mxc_spi_device0 = {
195 .name = "spi_imx",
196 .id = 0,
197 .num_resources = ARRAY_SIZE(mxc_spi_resources0),
198 .resource = mxc_spi_resources0,
199};
200
201static struct resource mxc_spi_resources1[] = {
202 {
203 .start = 0x50010000,
204 .end = 0x50013fff,
205 .flags = IORESOURCE_MEM,
206 }, {
207 .start = 13,
208 .end = 13,
209 .flags = IORESOURCE_IRQ,
210 },
211};
212
213struct platform_device mxc_spi_device1 = {
214 .name = "spi_imx",
215 .id = 1,
216 .num_resources = ARRAY_SIZE(mxc_spi_resources1),
217 .resource = mxc_spi_resources1,
218};
219
220static struct resource mxc_spi_resources2[] = {
221 {
222 .start = 0x50004000,
223 .end = 0x50007fff,
224 .flags = IORESOURCE_MEM,
225 }, {
226 .start = 0,
227 .end = 0,
228 .flags = IORESOURCE_IRQ,
229 },
230};
231
232struct platform_device mxc_spi_device2 = {
233 .name = "spi_imx",
234 .id = 2,
235 .num_resources = ARRAY_SIZE(mxc_spi_resources2),
236 .resource = mxc_spi_resources2,
237};
238
239static struct resource mxc_pwm_resources0[] = { 87static struct resource mxc_pwm_resources0[] = {
240 { 88 {
241 .start = 0x53fe0000, 89 .start = 0x53fe0000,
@@ -331,63 +179,6 @@ struct platform_device mxc_pwm_device3 = {
331 .resource = mxc_pwm_resources3, 179 .resource = mxc_pwm_resources3,
332}; 180};
333 181
334static struct resource mxc_i2c_1_resources[] = {
335 {
336 .start = 0x43f80000,
337 .end = 0x43f83fff,
338 .flags = IORESOURCE_MEM,
339 }, {
340 .start = 3,
341 .end = 3,
342 .flags = IORESOURCE_IRQ,
343 }
344};
345
346struct platform_device mxc_i2c_device0 = {
347 .name = "imx-i2c",
348 .id = 0,
349 .num_resources = ARRAY_SIZE(mxc_i2c_1_resources),
350 .resource = mxc_i2c_1_resources,
351};
352
353static struct resource mxc_i2c_2_resources[] = {
354 {
355 .start = 0x43f98000,
356 .end = 0x43f9bfff,
357 .flags = IORESOURCE_MEM,
358 }, {
359 .start = 4,
360 .end = 4,
361 .flags = IORESOURCE_IRQ,
362 }
363};
364
365struct platform_device mxc_i2c_device1 = {
366 .name = "imx-i2c",
367 .id = 1,
368 .num_resources = ARRAY_SIZE(mxc_i2c_2_resources),
369 .resource = mxc_i2c_2_resources,
370};
371
372static struct resource mxc_i2c_3_resources[] = {
373 {
374 .start = 0x43f84000,
375 .end = 0x43f87fff,
376 .flags = IORESOURCE_MEM,
377 }, {
378 .start = 10,
379 .end = 10,
380 .flags = IORESOURCE_IRQ,
381 }
382};
383
384struct platform_device mxc_i2c_device2 = {
385 .name = "imx-i2c",
386 .id = 2,
387 .num_resources = ARRAY_SIZE(mxc_i2c_3_resources),
388 .resource = mxc_i2c_3_resources,
389};
390
391static struct mxc_gpio_port imx_gpio_ports[] = { 182static struct mxc_gpio_port imx_gpio_ports[] = {
392 { 183 {
393 .chip.label = "gpio-0", 184 .chip.label = "gpio-0",
@@ -412,7 +203,7 @@ static struct mxc_gpio_port imx_gpio_ports[] = {
412 } 203 }
413}; 204};
414 205
415int __init mxc_register_gpios(void) 206int __init imx25_register_gpios(void)
416{ 207{
417 return mxc_gpio_init(imx_gpio_ports, ARRAY_SIZE(imx_gpio_ports)); 208 return mxc_gpio_init(imx_gpio_ports, ARRAY_SIZE(imx_gpio_ports));
418} 209}
@@ -437,26 +228,6 @@ struct platform_device mx25_fec_device = {
437 .resource = mx25_fec_resources, 228 .resource = mx25_fec_resources,
438}; 229};
439 230
440static struct resource mxc_nand_resources[] = {
441 {
442 .start = MX25_NFC_BASE_ADDR,
443 .end = MX25_NFC_BASE_ADDR + 0x1fff,
444 .flags = IORESOURCE_MEM,
445 },
446 {
447 .start = MX25_INT_NANDFC,
448 .end = MX25_INT_NANDFC,
449 .flags = IORESOURCE_IRQ,
450 },
451};
452
453struct platform_device mxc_nand_device = {
454 .name = "mxc_nand",
455 .id = 0,
456 .num_resources = ARRAY_SIZE(mxc_nand_resources),
457 .resource = mxc_nand_resources,
458};
459
460static struct resource mx25_rtc_resources[] = { 231static struct resource mx25_rtc_resources[] = {
461 { 232 {
462 .start = MX25_DRYICE_BASE_ADDR, 233 .start = MX25_DRYICE_BASE_ADDR,
diff --git a/arch/arm/mach-mx25/devices.h b/arch/arm/mach-mx25/devices.h
index 00e29f57a596..4aceb68e35a7 100644
--- a/arch/arm/mach-mx25/devices.h
+++ b/arch/arm/mach-mx25/devices.h
@@ -1,24 +1,12 @@
1extern struct platform_device mxc_uart_device0;
2extern struct platform_device mxc_uart_device1;
3extern struct platform_device mxc_uart_device2;
4extern struct platform_device mxc_uart_device3;
5extern struct platform_device mxc_uart_device4;
6extern struct platform_device mxc_otg; 1extern struct platform_device mxc_otg;
7extern struct platform_device otg_udc_device; 2extern struct platform_device otg_udc_device;
8extern struct platform_device mxc_usbh2; 3extern struct platform_device mxc_usbh2;
9extern struct platform_device mxc_spi_device0;
10extern struct platform_device mxc_spi_device1;
11extern struct platform_device mxc_spi_device2;
12extern struct platform_device mxc_pwm_device0; 4extern struct platform_device mxc_pwm_device0;
13extern struct platform_device mxc_pwm_device1; 5extern struct platform_device mxc_pwm_device1;
14extern struct platform_device mxc_pwm_device2; 6extern struct platform_device mxc_pwm_device2;
15extern struct platform_device mxc_pwm_device3; 7extern struct platform_device mxc_pwm_device3;
16extern struct platform_device mxc_keypad_device; 8extern struct platform_device mxc_keypad_device;
17extern struct platform_device mxc_i2c_device0;
18extern struct platform_device mxc_i2c_device1;
19extern struct platform_device mxc_i2c_device2;
20extern struct platform_device mx25_fec_device; 9extern struct platform_device mx25_fec_device;
21extern struct platform_device mxc_nand_device;
22extern struct platform_device mx25_rtc_device; 10extern struct platform_device mx25_rtc_device;
23extern struct platform_device mx25_fb_device; 11extern struct platform_device mx25_fb_device;
24extern struct platform_device mxc_wdt; 12extern struct platform_device mxc_wdt;
diff --git a/arch/arm/mach-mx25/eukrea_mbimxsd-baseboard.c b/arch/arm/mach-mx25/eukrea_mbimxsd-baseboard.c
index e0f0dfda4d01..f07b1f95ac76 100644
--- a/arch/arm/mach-mx25/eukrea_mbimxsd-baseboard.c
+++ b/arch/arm/mach-mx25/eukrea_mbimxsd-baseboard.c
@@ -38,6 +38,7 @@
38#include <mach/ssi.h> 38#include <mach/ssi.h>
39#include <mach/audmux.h> 39#include <mach/audmux.h>
40 40
41#include "devices-imx25.h"
41#include "devices.h" 42#include "devices.h"
42 43
43static struct pad_desc eukrea_mbimxsd_pads[] = { 44static struct pad_desc eukrea_mbimxsd_pads[] = {
@@ -195,7 +196,7 @@ static struct platform_device *platform_devices[] __initdata = {
195 &eukrea_mbimxsd_lcd_powerdev, 196 &eukrea_mbimxsd_lcd_powerdev,
196}; 197};
197 198
198static struct imxuart_platform_data uart_pdata = { 199static const struct imxuart_platform_data uart_pdata __initconst = {
199 .flags = IMXUART_HAVE_RTSCTS, 200 .flags = IMXUART_HAVE_RTSCTS,
200}; 201};
201 202
@@ -237,7 +238,7 @@ void __init eukrea_mbimxsd_baseboard_init(void)
237 ); 238 );
238#endif 239#endif
239 240
240 mxc_register_device(&mxc_uart_device1, &uart_pdata); 241 imx25_add_imx_uart1(&uart_pdata);
241 mxc_register_device(&mx25_fb_device, &eukrea_mximxsd_fb_pdata); 242 mxc_register_device(&mx25_fb_device, &eukrea_mximxsd_fb_pdata);
242 mxc_register_device(&imx_ssi_device0, &eukrea_mbimxsd_ssi_pdata); 243 mxc_register_device(&imx_ssi_device0, &eukrea_mbimxsd_ssi_pdata);
243 244
diff --git a/arch/arm/mach-mx25/mach-cpuimx25.c b/arch/arm/mach-mx25/mach-cpuimx25.c
index 4796484830a0..d39f9ccd4be0 100644
--- a/arch/arm/mach-mx25/mach-cpuimx25.c
+++ b/arch/arm/mach-mx25/mach-cpuimx25.c
@@ -37,18 +37,17 @@
37#include <asm/memory.h> 37#include <asm/memory.h>
38#include <asm/mach/map.h> 38#include <asm/mach/map.h>
39#include <mach/common.h> 39#include <mach/common.h>
40#include <mach/imx-uart.h>
41#include <mach/i2c.h>
42#include <mach/mx25.h> 40#include <mach/mx25.h>
43#include <mach/mxc_nand.h> 41#include <mach/mxc_nand.h>
44#include <mach/imxfb.h> 42#include <mach/imxfb.h>
45#include <mach/mxc_ehci.h> 43#include <mach/mxc_ehci.h>
46#include <mach/ulpi.h> 44#include <mach/ulpi.h>
45#include <mach/iomux-mx25.h>
47 46
47#include "devices-imx25.h"
48#include "devices.h" 48#include "devices.h"
49#include <mach/iomux-mx25.h>
50 49
51static struct imxuart_platform_data uart_pdata = { 50static const struct imxuart_platform_data uart_pdata __initconst = {
52 .flags = IMXUART_HAVE_RTSCTS, 51 .flags = IMXUART_HAVE_RTSCTS,
53}; 52};
54 53
@@ -72,13 +71,15 @@ static struct fec_platform_data mx25_fec_pdata = {
72 .phy = PHY_INTERFACE_MODE_RMII, 71 .phy = PHY_INTERFACE_MODE_RMII,
73}; 72};
74 73
75static struct mxc_nand_platform_data eukrea_cpuimx25_nand_board_info = { 74static const struct mxc_nand_platform_data
75eukrea_cpuimx25_nand_board_info __initconst = {
76 .width = 1, 76 .width = 1,
77 .hw_ecc = 1, 77 .hw_ecc = 1,
78 .flash_bbt = 1, 78 .flash_bbt = 1,
79}; 79};
80 80
81static struct imxi2c_platform_data eukrea_cpuimx25_i2c_1_data = { 81static const struct imxi2c_platform_data
82eukrea_cpuimx25_i2c0_data __initconst = {
82 .bitrate = 100000, 83 .bitrate = 100000,
83}; 84};
84 85
@@ -125,14 +126,14 @@ static void __init eukrea_cpuimx25_init(void)
125 ARRAY_SIZE(eukrea_cpuimx25_pads))) 126 ARRAY_SIZE(eukrea_cpuimx25_pads)))
126 printk(KERN_ERR "error setting cpuimx25 pads !\n"); 127 printk(KERN_ERR "error setting cpuimx25 pads !\n");
127 128
128 mxc_register_device(&mxc_uart_device0, &uart_pdata); 129 imx25_add_imx_uart0(&uart_pdata);
129 mxc_register_device(&mxc_nand_device, &eukrea_cpuimx25_nand_board_info); 130 imx25_add_mxc_nand(&eukrea_cpuimx25_nand_board_info);
130 mxc_register_device(&mx25_rtc_device, NULL); 131 mxc_register_device(&mx25_rtc_device, NULL);
131 mxc_register_device(&mx25_fec_device, &mx25_fec_pdata); 132 mxc_register_device(&mx25_fec_device, &mx25_fec_pdata);
132 133
133 i2c_register_board_info(0, eukrea_cpuimx25_i2c_devices, 134 i2c_register_board_info(0, eukrea_cpuimx25_i2c_devices,
134 ARRAY_SIZE(eukrea_cpuimx25_i2c_devices)); 135 ARRAY_SIZE(eukrea_cpuimx25_i2c_devices));
135 mxc_register_device(&mxc_i2c_device0, &eukrea_cpuimx25_i2c_1_data); 136 imx25_add_imx_i2c0(&eukrea_cpuimx25_i2c0_data);
136 137
137#if defined(CONFIG_USB_ULPI) 138#if defined(CONFIG_USB_ULPI)
138 if (otg_mode_host) { 139 if (otg_mode_host) {
diff --git a/arch/arm/mach-mx25/mach-mx25pdk.c b/arch/arm/mach-mx25/mach-mx25_3ds.c
index ba3fbef1c41f..62bc21f11a71 100644
--- a/arch/arm/mach-mx25/mach-mx25pdk.c
+++ b/arch/arm/mach-mx25/mach-mx25_3ds.c
@@ -16,6 +16,12 @@
16 * Boston, MA 02110-1301, USA. 16 * Boston, MA 02110-1301, USA.
17 */ 17 */
18 18
19/*
20 * This machine is known as:
21 * - i.MX25 3-Stack Development System
22 * - i.MX25 Platform Development Kit (i.MX25 PDK)
23 */
24
19#include <linux/types.h> 25#include <linux/types.h>
20#include <linux/init.h> 26#include <linux/init.h>
21#include <linux/delay.h> 27#include <linux/delay.h>
@@ -33,14 +39,14 @@
33#include <asm/memory.h> 39#include <asm/memory.h>
34#include <asm/mach/map.h> 40#include <asm/mach/map.h>
35#include <mach/common.h> 41#include <mach/common.h>
36#include <mach/imx-uart.h>
37#include <mach/mx25.h> 42#include <mach/mx25.h>
38#include <mach/mxc_nand.h>
39#include <mach/imxfb.h> 43#include <mach/imxfb.h>
40#include "devices.h"
41#include <mach/iomux-mx25.h> 44#include <mach/iomux-mx25.h>
42 45
43static struct imxuart_platform_data uart_pdata = { 46#include "devices-imx25.h"
47#include "devices.h"
48
49static const struct imxuart_platform_data uart_pdata __initconst = {
44 .flags = IMXUART_HAVE_RTSCTS, 50 .flags = IMXUART_HAVE_RTSCTS,
45}; 51};
46 52
@@ -114,7 +120,8 @@ static void __init mx25pdk_fec_reset(void)
114 gpio_set_value(FEC_RESET_B_GPIO, 1); 120 gpio_set_value(FEC_RESET_B_GPIO, 1);
115} 121}
116 122
117static struct mxc_nand_platform_data mx25pdk_nand_board_info = { 123static const struct mxc_nand_platform_data
124mx25pdk_nand_board_info __initconst = {
118 .width = 1, 125 .width = 1,
119 .hw_ecc = 1, 126 .hw_ecc = 1,
120 .flash_bbt = 1, 127 .flash_bbt = 1,
@@ -177,9 +184,9 @@ static void __init mx25pdk_init(void)
177 mxc_iomux_v3_setup_multiple_pads(mx25pdk_pads, 184 mxc_iomux_v3_setup_multiple_pads(mx25pdk_pads,
178 ARRAY_SIZE(mx25pdk_pads)); 185 ARRAY_SIZE(mx25pdk_pads));
179 186
180 mxc_register_device(&mxc_uart_device0, &uart_pdata); 187 imx25_add_imx_uart0(&uart_pdata);
181 mxc_register_device(&mxc_usbh2, NULL); 188 mxc_register_device(&mxc_usbh2, NULL);
182 mxc_register_device(&mxc_nand_device, &mx25pdk_nand_board_info); 189 imx25_add_mxc_nand(&mx25pdk_nand_board_info);
183 mxc_register_device(&mx25_rtc_device, NULL); 190 mxc_register_device(&mx25_rtc_device, NULL);
184 mxc_register_device(&mx25_fb_device, &mx25pdk_fb_pdata); 191 mxc_register_device(&mx25_fb_device, &mx25pdk_fb_pdata);
185 mxc_register_device(&mxc_wdt, NULL); 192 mxc_register_device(&mxc_wdt, NULL);
diff --git a/arch/arm/mach-mx25/mm.c b/arch/arm/mach-mx25/mm.c
index a7e587ff3e9e..593e14545f5a 100644
--- a/arch/arm/mach-mx25/mm.c
+++ b/arch/arm/mach-mx25/mm.c
@@ -14,10 +14,6 @@
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details. 16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 */ 17 */
22 18
23#include <linux/mm.h> 19#include <linux/mm.h>
@@ -69,8 +65,11 @@ void __init mx25_map_io(void)
69 iotable_init(mxc_io_desc, ARRAY_SIZE(mxc_io_desc)); 65 iotable_init(mxc_io_desc, ARRAY_SIZE(mxc_io_desc));
70} 66}
71 67
68int imx25_register_gpios(void);
69
72void __init mx25_init_irq(void) 70void __init mx25_init_irq(void)
73{ 71{
72 imx25_register_gpios();
74 mxc_init_irq((void __iomem *)MX25_AVIC_BASE_ADDR_VIRT); 73 mxc_init_irq((void __iomem *)MX25_AVIC_BASE_ADDR_VIRT);
75} 74}
76 75
diff --git a/arch/arm/mach-mx3/Kconfig b/arch/arm/mach-mx3/Kconfig
index b09e9a94adf1..a11112afde5e 100644
--- a/arch/arm/mach-mx3/Kconfig
+++ b/arch/arm/mach-mx3/Kconfig
@@ -15,6 +15,8 @@ comment "MX3 platforms:"
15config MACH_MX31ADS 15config MACH_MX31ADS
16 bool "Support MX31ADS platforms" 16 bool "Support MX31ADS platforms"
17 select ARCH_MX31 17 select ARCH_MX31
18 select IMX_HAVE_PLATFORM_IMX_I2C
19 select IMX_HAVE_PLATFORM_IMX_UART
18 default y 20 default y
19 help 21 help
20 Include support for MX31ADS platform. This includes specific 22 Include support for MX31ADS platform. This includes specific
@@ -34,6 +36,9 @@ config MACH_MX31ADS_WM1133_EV1
34config MACH_PCM037 36config MACH_PCM037
35 bool "Support Phytec pcm037 (i.MX31) platforms" 37 bool "Support Phytec pcm037 (i.MX31) platforms"
36 select ARCH_MX31 38 select ARCH_MX31
39 select IMX_HAVE_PLATFORM_IMX_I2C
40 select IMX_HAVE_PLATFORM_IMX_UART
41 select IMX_HAVE_PLATFORM_MXC_NAND
37 select MXC_ULPI if USB_ULPI 42 select MXC_ULPI if USB_ULPI
38 help 43 help
39 Include support for Phytec pcm037 platform. This includes 44 Include support for Phytec pcm037 platform. This includes
@@ -42,6 +47,7 @@ config MACH_PCM037
42config MACH_PCM037_EET 47config MACH_PCM037_EET
43 bool "Support pcm037 EET board extensions" 48 bool "Support pcm037 EET board extensions"
44 depends on MACH_PCM037 49 depends on MACH_PCM037
50 select IMX_HAVE_PLATFORM_SPI_IMX
45 help 51 help
46 Add support for PCM037 EET baseboard extensions. If you are using the 52 Add support for PCM037 EET baseboard extensions. If you are using the
47 OLED display with EET, use "video=mx3fb:CMEL-OLED" kernel 53 OLED display with EET, use "video=mx3fb:CMEL-OLED" kernel
@@ -51,6 +57,9 @@ config MACH_MX31LITE
51 bool "Support MX31 LITEKIT (LogicPD)" 57 bool "Support MX31 LITEKIT (LogicPD)"
52 select ARCH_MX31 58 select ARCH_MX31
53 select MXC_ULPI if USB_ULPI 59 select MXC_ULPI if USB_ULPI
60 select IMX_HAVE_PLATFORM_IMX_UART
61 select IMX_HAVE_PLATFORM_MXC_NAND
62 select IMX_HAVE_PLATFORM_SPI_IMX
54 help 63 help
55 Include support for MX31 LITEKIT platform. This includes specific 64 Include support for MX31 LITEKIT platform. This includes specific
56 configurations for the board and its peripherals. 65 configurations for the board and its peripherals.
@@ -58,6 +67,9 @@ config MACH_MX31LITE
58config MACH_MX31_3DS 67config MACH_MX31_3DS
59 bool "Support MX31PDK (3DS)" 68 bool "Support MX31PDK (3DS)"
60 select ARCH_MX31 69 select ARCH_MX31
70 select IMX_HAVE_PLATFORM_IMX_UART
71 select IMX_HAVE_PLATFORM_MXC_NAND
72 select IMX_HAVE_PLATFORM_SPI_IMX
61 help 73 help
62 Include support for MX31PDK (3DS) platform. This includes specific 74 Include support for MX31PDK (3DS) platform. This includes specific
63 configurations for the board and its peripherals. 75 configurations for the board and its peripherals.
@@ -74,6 +86,9 @@ config MACH_MX31_3DS_MXC_NAND_USE_BBT
74config MACH_MX31MOBOARD 86config MACH_MX31MOBOARD
75 bool "Support mx31moboard platforms (EPFL Mobots group)" 87 bool "Support mx31moboard platforms (EPFL Mobots group)"
76 select ARCH_MX31 88 select ARCH_MX31
89 select IMX_HAVE_PLATFORM_IMX_I2C
90 select IMX_HAVE_PLATFORM_IMX_UART
91 select IMX_HAVE_PLATFORM_SPI_IMX
77 select MXC_ULPI if USB_ULPI 92 select MXC_ULPI if USB_ULPI
78 help 93 help
79 Include support for mx31moboard platform. This includes specific 94 Include support for mx31moboard platform. This includes specific
@@ -82,6 +97,8 @@ config MACH_MX31MOBOARD
82config MACH_MX31LILLY 97config MACH_MX31LILLY
83 bool "Support MX31 LILLY-1131 platforms (INCO startec)" 98 bool "Support MX31 LILLY-1131 platforms (INCO startec)"
84 select ARCH_MX31 99 select ARCH_MX31
100 select IMX_HAVE_PLATFORM_IMX_UART
101 select IMX_HAVE_PLATFORM_SPI_IMX
85 select MXC_ULPI if USB_ULPI 102 select MXC_ULPI if USB_ULPI
86 help 103 help
87 Include support for mx31 based LILLY1131 modules. This includes 104 Include support for mx31 based LILLY1131 modules. This includes
@@ -90,6 +107,7 @@ config MACH_MX31LILLY
90config MACH_QONG 107config MACH_QONG
91 bool "Support Dave/DENX QongEVB-LITE platform" 108 bool "Support Dave/DENX QongEVB-LITE platform"
92 select ARCH_MX31 109 select ARCH_MX31
110 select IMX_HAVE_PLATFORM_IMX_UART
93 help 111 help
94 Include support for Dave/DENX QongEVB-LITE platform. This includes 112 Include support for Dave/DENX QongEVB-LITE platform. This includes
95 specific configurations for the board and its peripherals. 113 specific configurations for the board and its peripherals.
@@ -97,6 +115,9 @@ config MACH_QONG
97config MACH_PCM043 115config MACH_PCM043
98 bool "Support Phytec pcm043 (i.MX35) platforms" 116 bool "Support Phytec pcm043 (i.MX35) platforms"
99 select ARCH_MX35 117 select ARCH_MX35
118 select IMX_HAVE_PLATFORM_IMX_I2C
119 select IMX_HAVE_PLATFORM_IMX_UART
120 select IMX_HAVE_PLATFORM_MXC_NAND
100 select MXC_ULPI if USB_ULPI 121 select MXC_ULPI if USB_ULPI
101 help 122 help
102 Include support for Phytec pcm043 platform. This includes 123 Include support for Phytec pcm043 platform. This includes
@@ -105,6 +126,9 @@ config MACH_PCM043
105config MACH_ARMADILLO5X0 126config MACH_ARMADILLO5X0
106 bool "Support Atmark Armadillo-500 Development Base Board" 127 bool "Support Atmark Armadillo-500 Development Base Board"
107 select ARCH_MX31 128 select ARCH_MX31
129 select IMX_HAVE_PLATFORM_IMX_I2C
130 select IMX_HAVE_PLATFORM_IMX_UART
131 select IMX_HAVE_PLATFORM_MXC_NAND
108 select MXC_ULPI if USB_ULPI 132 select MXC_ULPI if USB_ULPI
109 help 133 help
110 Include support for Atmark Armadillo-500 platform. This includes 134 Include support for Atmark Armadillo-500 platform. This includes
@@ -113,6 +137,7 @@ config MACH_ARMADILLO5X0
113config MACH_MX35_3DS 137config MACH_MX35_3DS
114 bool "Support MX35PDK platform" 138 bool "Support MX35PDK platform"
115 select ARCH_MX35 139 select ARCH_MX35
140 select IMX_HAVE_PLATFORM_IMX_UART
116 default n 141 default n
117 help 142 help
118 Include support for MX35PDK platform. This includes specific 143 Include support for MX35PDK platform. This includes specific
@@ -121,6 +146,7 @@ config MACH_MX35_3DS
121config MACH_KZM_ARM11_01 146config MACH_KZM_ARM11_01
122 bool "Support KZM-ARM11-01(Kyoto Microcomputer)" 147 bool "Support KZM-ARM11-01(Kyoto Microcomputer)"
123 select ARCH_MX31 148 select ARCH_MX31
149 select IMX_HAVE_PLATFORM_IMX_UART
124 help 150 help
125 Include support for KZM-ARM11-01. This includes specific 151 Include support for KZM-ARM11-01. This includes specific
126 configurations for the board and its peripherals. 152 configurations for the board and its peripherals.
@@ -128,6 +154,9 @@ config MACH_KZM_ARM11_01
128config MACH_EUKREA_CPUIMX35 154config MACH_EUKREA_CPUIMX35
129 bool "Support Eukrea CPUIMX35 Platform" 155 bool "Support Eukrea CPUIMX35 Platform"
130 select ARCH_MX35 156 select ARCH_MX35
157 select IMX_HAVE_PLATFORM_IMX_UART
158 select IMX_HAVE_PLATFORM_IMX_I2C
159 select IMX_HAVE_PLATFORM_MXC_NAND
131 select MXC_ULPI if USB_ULPI 160 select MXC_ULPI if USB_ULPI
132 help 161 help
133 Include support for Eukrea CPUIMX35 platform. This includes 162 Include support for Eukrea CPUIMX35 platform. This includes
diff --git a/arch/arm/mach-mx3/Makefile b/arch/arm/mach-mx3/Makefile
index ef68ff55a7b6..54bc935acdc6 100644
--- a/arch/arm/mach-mx3/Makefile
+++ b/arch/arm/mach-mx3/Makefile
@@ -22,7 +22,7 @@ obj-$(CONFIG_MACH_MX31MOBOARD) += mach-mx31moboard.o mx31moboard-devboard.o \
22obj-$(CONFIG_MACH_QONG) += mach-qong.o 22obj-$(CONFIG_MACH_QONG) += mach-qong.o
23obj-$(CONFIG_MACH_PCM043) += mach-pcm043.o 23obj-$(CONFIG_MACH_PCM043) += mach-pcm043.o
24obj-$(CONFIG_MACH_ARMADILLO5X0) += mach-armadillo5x0.o 24obj-$(CONFIG_MACH_ARMADILLO5X0) += mach-armadillo5x0.o
25obj-$(CONFIG_MACH_MX35_3DS) += mach-mx35pdk.o 25obj-$(CONFIG_MACH_MX35_3DS) += mach-mx35_3ds.o
26obj-$(CONFIG_MACH_KZM_ARM11_01) += mach-kzm_arm11_01.o 26obj-$(CONFIG_MACH_KZM_ARM11_01) += mach-kzm_arm11_01.o
27obj-$(CONFIG_MACH_EUKREA_CPUIMX35) += mach-cpuimx35.o 27obj-$(CONFIG_MACH_EUKREA_CPUIMX35) += mach-cpuimx35.o
28obj-$(CONFIG_MACH_EUKREA_MBIMXSD_BASEBOARD) += eukrea_mbimxsd-baseboard.o 28obj-$(CONFIG_MACH_EUKREA_MBIMXSD_BASEBOARD) += eukrea_mbimxsd-baseboard.o
diff --git a/arch/arm/mach-mx3/devices-imx31.h b/arch/arm/mach-mx3/devices-imx31.h
new file mode 100644
index 000000000000..3b1a44a20585
--- /dev/null
+++ b/arch/arm/mach-mx3/devices-imx31.h
@@ -0,0 +1,38 @@
1/*
2 * Copyright (C) 2010 Pengutronix
3 * Uwe Kleine-Koenig <u.kleine-koenig@pengutronix.de>
4 *
5 * This program is free software; you can redistribute it and/or modify it under
6 * the terms of the GNU General Public License version 2 as published by the
7 * Free Software Foundation.
8 */
9#include <mach/mx31.h>
10#include <mach/devices-common.h>
11
12#define imx31_add_imx_i2c0(pdata) \
13 imx_add_imx_i2c(0, MX31_I2C1_BASE_ADDR, SZ_4K, MX31_INT_I2C1, pdata)
14#define imx31_add_imx_i2c1(pdata) \
15 imx_add_imx_i2c(1, MX31_I2C2_BASE_ADDR, SZ_4K, MX31_INT_I2C2, pdata)
16#define imx31_add_imx_i2c2(pdata) \
17 imx_add_imx_i2c(2, MX31_I2C3_BASE_ADDR, SZ_4K, MX31_INT_I2C3, pdata)
18
19#define imx31_add_imx_uart0(pdata) \
20 imx_add_imx_uart_1irq(0, MX31_UART1_BASE_ADDR, SZ_16K, MX31_INT_UART1, pdata)
21#define imx31_add_imx_uart1(pdata) \
22 imx_add_imx_uart_1irq(1, MX31_UART2_BASE_ADDR, SZ_16K, MX31_INT_UART2, pdata)
23#define imx31_add_imx_uart2(pdata) \
24 imx_add_imx_uart_1irq(2, MX31_UART3_BASE_ADDR, SZ_16K, MX31_INT_UART3, pdata)
25#define imx31_add_imx_uart3(pdata) \
26 imx_add_imx_uart_1irq(3, MX31_UART4_BASE_ADDR, SZ_16K, MX31_INT_UART4, pdata)
27#define imx31_add_imx_uart4(pdata) \
28 imx_add_imx_uart_1irq(4, MX31_UART5_BASE_ADDR, SZ_16K, MX31_INT_UART5, pdata)
29
30#define imx31_add_mxc_nand(pdata) \
31 imx_add_mxc_nand_v1(MX31_NFC_BASE_ADDR, MX31_INT_NANDFC, pdata)
32
33#define imx31_add_spi_imx0(pdata) \
34 imx_add_spi_imx(0, MX31_CSPI1_BASE_ADDR, SZ_4K, MX31_INT_CSPI1, pdata)
35#define imx31_add_spi_imx1(pdata) \
36 imx_add_spi_imx(1, MX31_CSPI2_BASE_ADDR, SZ_4K, MX31_INT_CSPI2, pdata)
37#define imx31_add_spi_imx2(pdata) \
38 imx_add_spi_imx(2, MX31_CSPI3_BASE_ADDR, SZ_4K, MX31_INT_CSPI3, pdata)
diff --git a/arch/arm/mach-mx3/devices-imx35.h b/arch/arm/mach-mx3/devices-imx35.h
new file mode 100644
index 000000000000..536d9b9a250b
--- /dev/null
+++ b/arch/arm/mach-mx3/devices-imx35.h
@@ -0,0 +1,32 @@
1/*
2 * Copyright (C) 2010 Pengutronix
3 * Uwe Kleine-Koenig <u.kleine-koenig@pengutronix.de>
4 *
5 * This program is free software; you can redistribute it and/or modify it under
6 * the terms of the GNU General Public License version 2 as published by the
7 * Free Software Foundation.
8 */
9#include <mach/mx35.h>
10#include <mach/devices-common.h>
11
12#define imx35_add_imx_i2c0(pdata) \
13 imx_add_imx_i2c(0, MX35_I2C1_BASE_ADDR, SZ_4K, MX35_INT_I2C1, pdata)
14#define imx35_add_imx_i2c1(pdata) \
15 imx_add_imx_i2c(1, MX35_I2C2_BASE_ADDR, SZ_4K, MX35_INT_I2C2, pdata)
16#define imx35_add_imx_i2c2(pdata) \
17 imx_add_imx_i2c(2, MX35_I2C3_BASE_ADDR, SZ_4K, MX35_INT_I2C3, pdata)
18
19#define imx35_add_imx_uart0(pdata) \
20 imx_add_imx_uart_1irq(0, MX35_UART1_BASE_ADDR, SZ_16K, MX35_INT_UART1, pdata)
21#define imx35_add_imx_uart1(pdata) \
22 imx_add_imx_uart_1irq(1, MX35_UART2_BASE_ADDR, SZ_16K, MX35_INT_UART2, pdata)
23#define imx35_add_imx_uart2(pdata) \
24 imx_add_imx_uart_1irq(2, MX35_UART3_BASE_ADDR, SZ_16K, MX35_INT_UART3, pdata)
25
26#define imx35_add_mxc_nand(pdata) \
27 imx_add_mxc_nand_v21(MX35_NFC_BASE_ADDR, MX35_INT_NANDFC, pdata)
28
29#define imx35_add_spi_imx0(pdata) \
30 imx_add_spi_imx(0, MX35_CSPI1_BASE_ADDR, SZ_4K, MX35_INT_CSPI1, pdata)
31#define imx35_add_spi_imx1(pdata) \
32 imx_add_spi_imx(1, MX35_CSPI2_BASE_ADDR, SZ_4K, MX35_INT_CSPI2, pdata)
diff --git a/arch/arm/mach-mx3/devices.c b/arch/arm/mach-mx3/devices.c
index db7acd6e9101..a4fd1a26fc91 100644
--- a/arch/arm/mach-mx3/devices.c
+++ b/arch/arm/mach-mx3/devices.c
@@ -25,108 +25,10 @@
25#include <mach/hardware.h> 25#include <mach/hardware.h>
26#include <mach/irqs.h> 26#include <mach/irqs.h>
27#include <mach/common.h> 27#include <mach/common.h>
28#include <mach/imx-uart.h>
29#include <mach/mx3_camera.h> 28#include <mach/mx3_camera.h>
30 29
31#include "devices.h" 30#include "devices.h"
32 31
33static struct resource uart0[] = {
34 {
35 .start = UART1_BASE_ADDR,
36 .end = UART1_BASE_ADDR + 0x0B5,
37 .flags = IORESOURCE_MEM,
38 }, {
39 .start = MXC_INT_UART1,
40 .end = MXC_INT_UART1,
41 .flags = IORESOURCE_IRQ,
42 },
43};
44
45struct platform_device mxc_uart_device0 = {
46 .name = "imx-uart",
47 .id = 0,
48 .resource = uart0,
49 .num_resources = ARRAY_SIZE(uart0),
50};
51
52static struct resource uart1[] = {
53 {
54 .start = UART2_BASE_ADDR,
55 .end = UART2_BASE_ADDR + 0x0B5,
56 .flags = IORESOURCE_MEM,
57 }, {
58 .start = MXC_INT_UART2,
59 .end = MXC_INT_UART2,
60 .flags = IORESOURCE_IRQ,
61 },
62};
63
64struct platform_device mxc_uart_device1 = {
65 .name = "imx-uart",
66 .id = 1,
67 .resource = uart1,
68 .num_resources = ARRAY_SIZE(uart1),
69};
70
71static struct resource uart2[] = {
72 {
73 .start = UART3_BASE_ADDR,
74 .end = UART3_BASE_ADDR + 0x0B5,
75 .flags = IORESOURCE_MEM,
76 }, {
77 .start = MXC_INT_UART3,
78 .end = MXC_INT_UART3,
79 .flags = IORESOURCE_IRQ,
80 },
81};
82
83struct platform_device mxc_uart_device2 = {
84 .name = "imx-uart",
85 .id = 2,
86 .resource = uart2,
87 .num_resources = ARRAY_SIZE(uart2),
88};
89
90#ifdef CONFIG_ARCH_MX31
91static struct resource uart3[] = {
92 {
93 .start = UART4_BASE_ADDR,
94 .end = UART4_BASE_ADDR + 0x0B5,
95 .flags = IORESOURCE_MEM,
96 }, {
97 .start = MXC_INT_UART4,
98 .end = MXC_INT_UART4,
99 .flags = IORESOURCE_IRQ,
100 },
101};
102
103struct platform_device mxc_uart_device3 = {
104 .name = "imx-uart",
105 .id = 3,
106 .resource = uart3,
107 .num_resources = ARRAY_SIZE(uart3),
108};
109
110static struct resource uart4[] = {
111 {
112 .start = UART5_BASE_ADDR,
113 .end = UART5_BASE_ADDR + 0x0B5,
114 .flags = IORESOURCE_MEM,
115 }, {
116 .start = MXC_INT_UART5,
117 .end = MXC_INT_UART5,
118 .flags = IORESOURCE_IRQ,
119 },
120};
121
122struct platform_device mxc_uart_device4 = {
123 .name = "imx-uart",
124 .id = 4,
125 .resource = uart4,
126 .num_resources = ARRAY_SIZE(uart4),
127};
128#endif /* CONFIG_ARCH_MX31 */
129
130/* GPIO port description */ 32/* GPIO port description */
131static struct mxc_gpio_port imx_gpio_ports[] = { 33static struct mxc_gpio_port imx_gpio_ports[] = {
132 { 34 {
@@ -147,7 +49,7 @@ static struct mxc_gpio_port imx_gpio_ports[] = {
147 } 49 }
148}; 50};
149 51
150int __init mxc_register_gpios(void) 52int __init imx3x_register_gpios(void)
151{ 53{
152 return mxc_gpio_init(imx_gpio_ports, ARRAY_SIZE(imx_gpio_ports)); 54 return mxc_gpio_init(imx_gpio_ports, ARRAY_SIZE(imx_gpio_ports));
153} 55}
@@ -167,82 +69,6 @@ struct platform_device mxc_w1_master_device = {
167 .resource = mxc_w1_master_resources, 69 .resource = mxc_w1_master_resources,
168}; 70};
169 71
170static struct resource mxc_nand_resources[] = {
171 {
172 .start = 0, /* runtime dependent */
173 .end = 0,
174 .flags = IORESOURCE_MEM,
175 }, {
176 .start = MXC_INT_NANDFC,
177 .end = MXC_INT_NANDFC,
178 .flags = IORESOURCE_IRQ,
179 },
180};
181
182struct platform_device mxc_nand_device = {
183 .name = "mxc_nand",
184 .id = 0,
185 .num_resources = ARRAY_SIZE(mxc_nand_resources),
186 .resource = mxc_nand_resources,
187};
188
189static struct resource mxc_i2c0_resources[] = {
190 {
191 .start = I2C_BASE_ADDR,
192 .end = I2C_BASE_ADDR + SZ_4K - 1,
193 .flags = IORESOURCE_MEM,
194 }, {
195 .start = MXC_INT_I2C,
196 .end = MXC_INT_I2C,
197 .flags = IORESOURCE_IRQ,
198 },
199};
200
201struct platform_device mxc_i2c_device0 = {
202 .name = "imx-i2c",
203 .id = 0,
204 .num_resources = ARRAY_SIZE(mxc_i2c0_resources),
205 .resource = mxc_i2c0_resources,
206};
207
208static struct resource mxc_i2c1_resources[] = {
209 {
210 .start = I2C2_BASE_ADDR,
211 .end = I2C2_BASE_ADDR + SZ_4K - 1,
212 .flags = IORESOURCE_MEM,
213 }, {
214 .start = MXC_INT_I2C2,
215 .end = MXC_INT_I2C2,
216 .flags = IORESOURCE_IRQ,
217 },
218};
219
220struct platform_device mxc_i2c_device1 = {
221 .name = "imx-i2c",
222 .id = 1,
223 .num_resources = ARRAY_SIZE(mxc_i2c1_resources),
224 .resource = mxc_i2c1_resources,
225};
226
227static struct resource mxc_i2c2_resources[] = {
228 {
229 .start = I2C3_BASE_ADDR,
230 .end = I2C3_BASE_ADDR + SZ_4K - 1,
231 .flags = IORESOURCE_MEM,
232 }, {
233 .start = MXC_INT_I2C3,
234 .end = MXC_INT_I2C3,
235 .flags = IORESOURCE_IRQ,
236 },
237};
238
239struct platform_device mxc_i2c_device2 = {
240 .name = "imx-i2c",
241 .id = 2,
242 .num_resources = ARRAY_SIZE(mxc_i2c2_resources),
243 .resource = mxc_i2c2_resources,
244};
245
246#ifdef CONFIG_ARCH_MX31 72#ifdef CONFIG_ARCH_MX31
247static struct resource mxcsdhc0_resources[] = { 73static struct resource mxcsdhc0_resources[] = {
248 { 74 {
@@ -455,68 +281,7 @@ struct platform_device mxc_usbh2 = {
455 .num_resources = ARRAY_SIZE(mxc_usbh2_resources), 281 .num_resources = ARRAY_SIZE(mxc_usbh2_resources),
456}; 282};
457 283
458/* 284#if defined(CONFIG_ARCH_MX35)
459 * SPI master controller
460 * 3 channels
461 */
462static struct resource mxc_spi_0_resources[] = {
463 {
464 .start = CSPI1_BASE_ADDR,
465 .end = CSPI1_BASE_ADDR + SZ_4K - 1,
466 .flags = IORESOURCE_MEM,
467 }, {
468 .start = MXC_INT_CSPI1,
469 .end = MXC_INT_CSPI1,
470 .flags = IORESOURCE_IRQ,
471 },
472};
473
474static struct resource mxc_spi_1_resources[] = {
475 {
476 .start = CSPI2_BASE_ADDR,
477 .end = CSPI2_BASE_ADDR + SZ_4K - 1,
478 .flags = IORESOURCE_MEM,
479 }, {
480 .start = MXC_INT_CSPI2,
481 .end = MXC_INT_CSPI2,
482 .flags = IORESOURCE_IRQ,
483 },
484};
485
486static struct resource mxc_spi_2_resources[] = {
487 {
488 .start = CSPI3_BASE_ADDR,
489 .end = CSPI3_BASE_ADDR + SZ_4K - 1,
490 .flags = IORESOURCE_MEM,
491 }, {
492 .start = MXC_INT_CSPI3,
493 .end = MXC_INT_CSPI3,
494 .flags = IORESOURCE_IRQ,
495 },
496};
497
498struct platform_device mxc_spi_device0 = {
499 .name = "spi_imx",
500 .id = 0,
501 .num_resources = ARRAY_SIZE(mxc_spi_0_resources),
502 .resource = mxc_spi_0_resources,
503};
504
505struct platform_device mxc_spi_device1 = {
506 .name = "spi_imx",
507 .id = 1,
508 .num_resources = ARRAY_SIZE(mxc_spi_1_resources),
509 .resource = mxc_spi_1_resources,
510};
511
512struct platform_device mxc_spi_device2 = {
513 .name = "spi_imx",
514 .id = 2,
515 .num_resources = ARRAY_SIZE(mxc_spi_2_resources),
516 .resource = mxc_spi_2_resources,
517};
518
519#ifdef CONFIG_ARCH_MX35
520static struct resource mxc_fec_resources[] = { 285static struct resource mxc_fec_resources[] = {
521 { 286 {
522 .start = MXC_FEC_BASE_ADDR, 287 .start = MXC_FEC_BASE_ADDR,
@@ -628,16 +393,15 @@ struct platform_device imx_kpp_device = {
628 393
629static int __init mx3_devices_init(void) 394static int __init mx3_devices_init(void)
630{ 395{
396#if defined(CONFIG_ARCH_MX31)
631 if (cpu_is_mx31()) { 397 if (cpu_is_mx31()) {
632 mxc_nand_resources[0].start = MX31_NFC_BASE_ADDR;
633 mxc_nand_resources[0].end = MX31_NFC_BASE_ADDR + 0xfff;
634 imx_wdt_resources[0].start = MX31_WDOG_BASE_ADDR; 398 imx_wdt_resources[0].start = MX31_WDOG_BASE_ADDR;
635 imx_wdt_resources[0].end = MX31_WDOG_BASE_ADDR + 0x3fff; 399 imx_wdt_resources[0].end = MX31_WDOG_BASE_ADDR + 0x3fff;
636 mxc_register_device(&mxc_rnga_device, NULL); 400 mxc_register_device(&mxc_rnga_device, NULL);
637 } 401 }
402#endif
403#if defined(CONFIG_ARCH_MX35)
638 if (cpu_is_mx35()) { 404 if (cpu_is_mx35()) {
639 mxc_nand_resources[0].start = MX35_NFC_BASE_ADDR;
640 mxc_nand_resources[0].end = MX35_NFC_BASE_ADDR + 0x1fff;
641 otg_resources[0].start = MX35_OTG_BASE_ADDR; 405 otg_resources[0].start = MX35_OTG_BASE_ADDR;
642 otg_resources[0].end = MX35_OTG_BASE_ADDR + 0x1ff; 406 otg_resources[0].end = MX35_OTG_BASE_ADDR + 0x1ff;
643 otg_resources[1].start = MXC_INT_USBOTG; 407 otg_resources[1].start = MXC_INT_USBOTG;
@@ -653,6 +417,7 @@ static int __init mx3_devices_init(void)
653 imx_wdt_resources[0].start = MX35_WDOG_BASE_ADDR; 417 imx_wdt_resources[0].start = MX35_WDOG_BASE_ADDR;
654 imx_wdt_resources[0].end = MX35_WDOG_BASE_ADDR + 0x3fff; 418 imx_wdt_resources[0].end = MX35_WDOG_BASE_ADDR + 0x3fff;
655 } 419 }
420#endif
656 421
657 return 0; 422 return 0;
658} 423}
diff --git a/arch/arm/mach-mx3/devices.h b/arch/arm/mach-mx3/devices.h
index 2c3c8646a29e..e5535234839f 100644
--- a/arch/arm/mach-mx3/devices.h
+++ b/arch/arm/mach-mx3/devices.h
@@ -1,14 +1,4 @@
1
2extern struct platform_device mxc_uart_device0;
3extern struct platform_device mxc_uart_device1;
4extern struct platform_device mxc_uart_device2;
5extern struct platform_device mxc_uart_device3;
6extern struct platform_device mxc_uart_device4;
7extern struct platform_device mxc_w1_master_device; 1extern struct platform_device mxc_w1_master_device;
8extern struct platform_device mxc_nand_device;
9extern struct platform_device mxc_i2c_device0;
10extern struct platform_device mxc_i2c_device1;
11extern struct platform_device mxc_i2c_device2;
12extern struct platform_device mx3_ipu; 2extern struct platform_device mx3_ipu;
13extern struct platform_device mx3_fb; 3extern struct platform_device mx3_fb;
14extern struct platform_device mx3_camera; 4extern struct platform_device mx3_camera;
@@ -20,9 +10,6 @@ extern struct platform_device mxc_otg_host;
20extern struct platform_device mxc_usbh1; 10extern struct platform_device mxc_usbh1;
21extern struct platform_device mxc_usbh2; 11extern struct platform_device mxc_usbh2;
22extern struct platform_device mxc_rnga_device; 12extern struct platform_device mxc_rnga_device;
23extern struct platform_device mxc_spi_device0;
24extern struct platform_device mxc_spi_device1;
25extern struct platform_device mxc_spi_device2;
26extern struct platform_device imx_ssi_device0; 13extern struct platform_device imx_ssi_device0;
27extern struct platform_device imx_ssi_device1; 14extern struct platform_device imx_ssi_device1;
28extern struct platform_device imx_ssi_device1; 15extern struct platform_device imx_ssi_device1;
diff --git a/arch/arm/mach-mx3/eukrea_mbimxsd-baseboard.c b/arch/arm/mach-mx3/eukrea_mbimxsd-baseboard.c
index 678597852443..368a603accfe 100644
--- a/arch/arm/mach-mx3/eukrea_mbimxsd-baseboard.c
+++ b/arch/arm/mach-mx3/eukrea_mbimxsd-baseboard.c
@@ -46,6 +46,7 @@
46#include <mach/audmux.h> 46#include <mach/audmux.h>
47#include <mach/ssi.h> 47#include <mach/ssi.h>
48 48
49#include "devices-imx35.h"
49#include "devices.h" 50#include "devices.h"
50 51
51static const struct fb_videomode fb_modedb[] = { 52static const struct fb_videomode fb_modedb[] = {
@@ -196,7 +197,7 @@ static struct platform_device *platform_devices[] __initdata = {
196 &eukrea_mbimxsd_lcd_powerdev, 197 &eukrea_mbimxsd_lcd_powerdev,
197}; 198};
198 199
199static struct imxuart_platform_data uart_pdata = { 200static const struct imxuart_platform_data uart_pdata __initconst = {
200 .flags = IMXUART_HAVE_RTSCTS, 201 .flags = IMXUART_HAVE_RTSCTS,
201}; 202};
202 203
@@ -238,7 +239,7 @@ void __init eukrea_mbimxsd_baseboard_init(void)
238 ); 239 );
239#endif 240#endif
240 241
241 mxc_register_device(&mxc_uart_device1, &uart_pdata); 242 imx35_add_imx_uart1(&uart_pdata);
242 mxc_register_device(&mx3_ipu, &mx3_ipu_data); 243 mxc_register_device(&mx3_ipu, &mx3_ipu_data);
243 mxc_register_device(&mx3_fb, &mx3fb_pdata); 244 mxc_register_device(&mx3_fb, &mx3fb_pdata);
244 245
diff --git a/arch/arm/mach-mx3/mach-armadillo5x0.c b/arch/arm/mach-mx3/mach-armadillo5x0.c
index 5f72ec91af2d..96aadcadb4ff 100644
--- a/arch/arm/mach-mx3/mach-armadillo5x0.c
+++ b/arch/arm/mach-mx3/mach-armadillo5x0.c
@@ -48,16 +48,14 @@
48#include <asm/mach/map.h> 48#include <asm/mach/map.h>
49 49
50#include <mach/common.h> 50#include <mach/common.h>
51#include <mach/imx-uart.h>
52#include <mach/iomux-mx3.h> 51#include <mach/iomux-mx3.h>
53#include <mach/board-armadillo5x0.h>
54#include <mach/mmc.h> 52#include <mach/mmc.h>
55#include <mach/ipu.h> 53#include <mach/ipu.h>
56#include <mach/mx3fb.h> 54#include <mach/mx3fb.h>
57#include <mach/mxc_nand.h>
58#include <mach/mxc_ehci.h> 55#include <mach/mxc_ehci.h>
59#include <mach/ulpi.h> 56#include <mach/ulpi.h>
60 57
58#include "devices-imx31.h"
61#include "devices.h" 59#include "devices.h"
62#include "crm_regs.h" 60#include "crm_regs.h"
63 61
@@ -301,7 +299,8 @@ static struct platform_device armadillo5x0_button_device = {
301/* 299/*
302 * NAND Flash 300 * NAND Flash
303 */ 301 */
304static struct mxc_nand_platform_data armadillo5x0_nand_flash_pdata = { 302static const struct mxc_nand_platform_data
303armadillo5x0_nand_board_info __initconst = {
305 .width = 1, 304 .width = 1,
306 .hw_ecc = 1, 305 .hw_ecc = 1,
307}; 306};
@@ -493,13 +492,12 @@ static struct platform_device armadillo5x0_smc911x_device = {
493}; 492};
494 493
495/* UART device data */ 494/* UART device data */
496static struct imxuart_platform_data uart_pdata = { 495static const struct imxuart_platform_data uart_pdata __initconst = {
497 .flags = IMXUART_HAVE_RTSCTS, 496 .flags = IMXUART_HAVE_RTSCTS,
498}; 497};
499 498
500static struct platform_device *devices[] __initdata = { 499static struct platform_device *devices[] __initdata = {
501 &armadillo5x0_smc911x_device, 500 &armadillo5x0_smc911x_device,
502 &mxc_i2c_device1,
503 &armadillo5x0_button_device, 501 &armadillo5x0_button_device,
504}; 502};
505 503
@@ -512,10 +510,11 @@ static void __init armadillo5x0_init(void)
512 ARRAY_SIZE(armadillo5x0_pins), "armadillo5x0"); 510 ARRAY_SIZE(armadillo5x0_pins), "armadillo5x0");
513 511
514 platform_add_devices(devices, ARRAY_SIZE(devices)); 512 platform_add_devices(devices, ARRAY_SIZE(devices));
513 imx31_add_imx_i2c1(NULL);
515 514
516 /* Register UART */ 515 /* Register UART */
517 mxc_register_device(&mxc_uart_device0, &uart_pdata); 516 imx31_add_imx_uart0(&uart_pdata);
518 mxc_register_device(&mxc_uart_device1, &uart_pdata); 517 imx31_add_imx_uart1(&uart_pdata);
519 518
520 /* SMSC9118 IRQ pin */ 519 /* SMSC9118 IRQ pin */
521 gpio_direction_input(MX31_PIN_GPIO1_0); 520 gpio_direction_input(MX31_PIN_GPIO1_0);
@@ -532,7 +531,7 @@ static void __init armadillo5x0_init(void)
532 &armadillo5x0_nor_flash_pdata); 531 &armadillo5x0_nor_flash_pdata);
533 532
534 /* Register NAND Flash */ 533 /* Register NAND Flash */
535 mxc_register_device(&mxc_nand_device, &armadillo5x0_nand_flash_pdata); 534 imx31_add_mxc_nand(&armadillo5x0_nand_board_info);
536 535
537 /* set NAND page size to 2k if not configured via boot mode pins */ 536 /* set NAND page size to 2k if not configured via boot mode pins */
538 __raw_writel(__raw_readl(MXC_CCM_RCSR) | (1 << 30), MXC_CCM_RCSR); 537 __raw_writel(__raw_readl(MXC_CCM_RCSR) | (1 << 30), MXC_CCM_RCSR);
diff --git a/arch/arm/mach-mx3/mach-cpuimx35.c b/arch/arm/mach-mx3/mach-cpuimx35.c
index 55caa5cb8bc7..4f6146d31328 100644
--- a/arch/arm/mach-mx3/mach-cpuimx35.c
+++ b/arch/arm/mach-mx3/mach-cpuimx35.c
@@ -40,20 +40,20 @@
40#include <mach/board-eukrea_cpuimx35.h> 40#include <mach/board-eukrea_cpuimx35.h>
41#include <mach/hardware.h> 41#include <mach/hardware.h>
42#include <mach/common.h> 42#include <mach/common.h>
43#include <mach/imx-uart.h>
44#include <mach/i2c.h>
45#include <mach/iomux-mx35.h> 43#include <mach/iomux-mx35.h>
46#include <mach/mxc_nand.h> 44#include <mach/mxc_nand.h>
47#include <mach/mxc_ehci.h> 45#include <mach/mxc_ehci.h>
48#include <mach/ulpi.h> 46#include <mach/ulpi.h>
49 47
48#include "devices-imx35.h"
50#include "devices.h" 49#include "devices.h"
51 50
52static struct imxuart_platform_data uart_pdata = { 51static const struct imxuart_platform_data uart_pdata __initconst = {
53 .flags = IMXUART_HAVE_RTSCTS, 52 .flags = IMXUART_HAVE_RTSCTS,
54}; 53};
55 54
56static struct imxi2c_platform_data eukrea_cpuimx35_i2c_1_data = { 55static const struct imxi2c_platform_data
56eukrea_cpuimx35_i2c0_data __initconst = {
57 .bitrate = 50000, 57 .bitrate = 50000,
58}; 58};
59 59
@@ -134,7 +134,8 @@ static struct pad_desc eukrea_cpuimx35_pads[] = {
134 MX35_PAD_ATA_DA2__GPIO3_2, 134 MX35_PAD_ATA_DA2__GPIO3_2,
135}; 135};
136 136
137static struct mxc_nand_platform_data pcm037_nand_board_info = { 137static const struct mxc_nand_platform_data
138eukrea_cpuimx35_nand_board_info __initconst = {
138 .width = 1, 139 .width = 1,
139 .hw_ecc = 1, 140 .hw_ecc = 1,
140 .flash_bbt = 1, 141 .flash_bbt = 1,
@@ -181,12 +182,12 @@ static void __init mxc_board_init(void)
181 182
182 platform_add_devices(devices, ARRAY_SIZE(devices)); 183 platform_add_devices(devices, ARRAY_SIZE(devices));
183 184
184 mxc_register_device(&mxc_uart_device0, &uart_pdata); 185 imx35_add_imx_uart0(&uart_pdata);
185 mxc_register_device(&mxc_nand_device, &pcm037_nand_board_info); 186 imx35_add_mxc_nand(&eukrea_cpuimx35_nand_board_info);
186 187
187 i2c_register_board_info(0, eukrea_cpuimx35_i2c_devices, 188 i2c_register_board_info(0, eukrea_cpuimx35_i2c_devices,
188 ARRAY_SIZE(eukrea_cpuimx35_i2c_devices)); 189 ARRAY_SIZE(eukrea_cpuimx35_i2c_devices));
189 mxc_register_device(&mxc_i2c_device0, &eukrea_cpuimx35_i2c_1_data); 190 imx35_add_imx_i2c0(&eukrea_cpuimx35_i2c0_data);
190 191
191#if defined(CONFIG_USB_ULPI) 192#if defined(CONFIG_USB_ULPI)
192 if (otg_mode_host) { 193 if (otg_mode_host) {
diff --git a/arch/arm/mach-mx3/mach-kzm_arm11_01.c b/arch/arm/mach-mx3/mach-kzm_arm11_01.c
index f085d5d1a6de..5b23e416d6c7 100644
--- a/arch/arm/mach-mx3/mach-kzm_arm11_01.c
+++ b/arch/arm/mach-mx3/mach-kzm_arm11_01.c
@@ -16,10 +16,6 @@
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of 16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details. 18 * GNU General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 */ 19 */
24 20
25#include <linux/gpio.h> 21#include <linux/gpio.h>
@@ -37,13 +33,12 @@
37#include <asm/mach/map.h> 33#include <asm/mach/map.h>
38#include <asm/mach/time.h> 34#include <asm/mach/time.h>
39 35
40#include <mach/board-kzmarm11.h>
41#include <mach/clock.h> 36#include <mach/clock.h>
42#include <mach/common.h> 37#include <mach/common.h>
43#include <mach/imx-uart.h>
44#include <mach/iomux-mx3.h> 38#include <mach/iomux-mx3.h>
45#include <mach/memory.h> 39#include <mach/memory.h>
46 40
41#include "devices-imx31.h"
47#include "devices.h" 42#include "devices.h"
48 43
49#define KZM_ARM11_IO_ADDRESS(x) ( \ 44#define KZM_ARM11_IO_ADDRESS(x) ( \
@@ -51,6 +46,23 @@
51 IMX_IO_ADDRESS(x, MX31_CS5) ?: \ 46 IMX_IO_ADDRESS(x, MX31_CS5) ?: \
52 MX31_IO_ADDRESS(x)) 47 MX31_IO_ADDRESS(x))
53 48
49/*
50 * KZM-ARM11-01 Board Control Registers on FPGA
51 */
52#define KZM_ARM11_CTL1 (MX31_CS4_BASE_ADDR + 0x1000)
53#define KZM_ARM11_CTL2 (MX31_CS4_BASE_ADDR + 0x1001)
54#define KZM_ARM11_RSW1 (MX31_CS4_BASE_ADDR + 0x1002)
55#define KZM_ARM11_BACK_LIGHT (MX31_CS4_BASE_ADDR + 0x1004)
56#define KZM_ARM11_FPGA_REV (MX31_CS4_BASE_ADDR + 0x1008)
57#define KZM_ARM11_7SEG_LED (MX31_CS4_BASE_ADDR + 0x1010)
58#define KZM_ARM11_LEDS (MX31_CS4_BASE_ADDR + 0x1020)
59#define KZM_ARM11_DIPSW2 (MX31_CS4_BASE_ADDR + 0x1003)
60
61/*
62 * External UART for touch panel on FPGA
63 */
64#define KZM_ARM11_16550 (MX31_CS4_BASE_ADDR + 0x1050)
65
54#if defined(CONFIG_SERIAL_8250) || defined(CONFIG_SERIAL_8250_MODULE) 66#if defined(CONFIG_SERIAL_8250) || defined(CONFIG_SERIAL_8250_MODULE)
55/* 67/*
56 * KZM-ARM11-01 has an external UART on FPGA 68 * KZM-ARM11-01 has an external UART on FPGA
@@ -173,15 +185,14 @@ static inline int kzm_init_smsc9118(void)
173#endif 185#endif
174 186
175#if defined(CONFIG_SERIAL_IMX) || defined(CONFIG_SERIAL_IMX_MODULE) 187#if defined(CONFIG_SERIAL_IMX) || defined(CONFIG_SERIAL_IMX_MODULE)
176static struct imxuart_platform_data uart_pdata = { 188static const struct imxuart_platform_data uart_pdata __initconst = {
177 .flags = IMXUART_HAVE_RTSCTS, 189 .flags = IMXUART_HAVE_RTSCTS,
178}; 190};
179 191
180static void __init kzm_init_imx_uart(void) 192static void __init kzm_init_imx_uart(void)
181{ 193{
182 mxc_register_device(&mxc_uart_device0, &uart_pdata); 194 imx31_add_imx_uart0(&uart_pdata);
183 195 imx31_add_imx_uart1(&uart_pdata);
184 mxc_register_device(&mxc_uart_device1, &uart_pdata);
185} 196}
186#else 197#else
187static inline void kzm_init_imx_uart(void) 198static inline void kzm_init_imx_uart(void)
diff --git a/arch/arm/mach-mx3/mach-mx31_3ds.c b/arch/arm/mach-mx3/mach-mx31_3ds.c
index 7e8d09ab9e6c..d4d9e7a1f735 100644
--- a/arch/arm/mach-mx3/mach-mx31_3ds.c
+++ b/arch/arm/mach-mx3/mach-mx31_3ds.c
@@ -10,10 +10,6 @@
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details. 12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 */ 13 */
18 14
19#include <linux/delay.h> 15#include <linux/delay.h>
@@ -37,19 +33,46 @@
37#include <asm/memory.h> 33#include <asm/memory.h>
38#include <asm/mach/map.h> 34#include <asm/mach/map.h>
39#include <mach/common.h> 35#include <mach/common.h>
40#include <mach/board-mx31_3ds.h>
41#include <mach/imx-uart.h>
42#include <mach/iomux-mx3.h> 36#include <mach/iomux-mx3.h>
43#include <mach/mxc_nand.h> 37
44#include <mach/spi.h> 38#include "devices-imx31.h"
45#include "devices.h" 39#include "devices.h"
46 40
47/*! 41/* Definitions for components on the Debug board */
48 * @file mx31_3ds.c 42
49 * 43/* Base address of CPLD controller on the Debug board */
50 * @brief This file contains the board-specific initialization routines. 44#define DEBUG_BASE_ADDRESS CS5_IO_ADDRESS(MX3x_CS5_BASE_ADDR)
51 * 45
52 * @ingroup System 46/* LAN9217 ethernet base address */
47#define LAN9217_BASE_ADDR MX3x_CS5_BASE_ADDR
48
49/* CPLD config and interrupt base address */
50#define CPLD_ADDR (DEBUG_BASE_ADDRESS + 0x20000)
51
52/* status, interrupt */
53#define CPLD_INT_STATUS_REG (CPLD_ADDR + 0x10)
54#define CPLD_INT_MASK_REG (CPLD_ADDR + 0x38)
55#define CPLD_INT_RESET_REG (CPLD_ADDR + 0x20)
56/* magic word for debug CPLD */
57#define CPLD_MAGIC_NUMBER1_REG (CPLD_ADDR + 0x40)
58#define CPLD_MAGIC_NUMBER2_REG (CPLD_ADDR + 0x48)
59/* CPLD code version */
60#define CPLD_CODE_VER_REG (CPLD_ADDR + 0x50)
61/* magic word for debug CPLD */
62#define CPLD_MAGIC_NUMBER3_REG (CPLD_ADDR + 0x58)
63
64/* CPLD IRQ line for external uart, external ethernet etc */
65#define EXPIO_PARENT_INT IOMUX_TO_IRQ(MX31_PIN_GPIO1_1)
66
67#define MXC_EXP_IO_BASE (MXC_BOARD_IRQ_START)
68#define MXC_IRQ_TO_EXPIO(irq) ((irq) - MXC_EXP_IO_BASE)
69
70#define EXPIO_INT_ENET (MXC_EXP_IO_BASE + 0)
71
72#define MXC_MAX_EXP_IO_LINES 16
73
74/*
75 * This file contains the board-specific initialization routines.
53 */ 76 */
54 77
55static int mx31_3ds_pins[] = { 78static int mx31_3ds_pins[] = {
@@ -145,7 +168,7 @@ static int spi1_internal_chipselect[] = {
145 MXC_SPI_CS(2), 168 MXC_SPI_CS(2),
146}; 169};
147 170
148static struct spi_imx_master spi1_pdata = { 171static const struct spi_imx_master spi1_pdata __initconst = {
149 .chipselect = spi1_internal_chipselect, 172 .chipselect = spi1_internal_chipselect,
150 .num_chipselect = ARRAY_SIZE(spi1_internal_chipselect), 173 .num_chipselect = ARRAY_SIZE(spi1_internal_chipselect),
151}; 174};
@@ -165,7 +188,8 @@ static struct spi_board_info mx31_3ds_spi_devs[] __initdata = {
165/* 188/*
166 * NAND Flash 189 * NAND Flash
167 */ 190 */
168static struct mxc_nand_platform_data imx31_3ds_nand_flash_pdata = { 191static const struct mxc_nand_platform_data
192mx31_3ds_nand_board_info __initconst = {
169 .width = 1, 193 .width = 1,
170 .hw_ecc = 1, 194 .hw_ecc = 1,
171#ifdef MACH_MX31_3DS_MXC_NAND_USE_BBT 195#ifdef MACH_MX31_3DS_MXC_NAND_USE_BBT
@@ -225,7 +249,7 @@ static struct fsl_usb2_platform_data usbotg_pdata = {
225 .phy_mode = FSL_USB2_PHY_ULPI, 249 .phy_mode = FSL_USB2_PHY_ULPI,
226}; 250};
227 251
228static struct imxuart_platform_data uart_pdata = { 252static const struct imxuart_platform_data uart_pdata __initconst = {
229 .flags = IMXUART_HAVE_RTSCTS, 253 .flags = IMXUART_HAVE_RTSCTS,
230}; 254};
231 255
@@ -407,10 +431,10 @@ static void __init mxc_board_init(void)
407 mxc_iomux_setup_multiple_pins(mx31_3ds_pins, ARRAY_SIZE(mx31_3ds_pins), 431 mxc_iomux_setup_multiple_pins(mx31_3ds_pins, ARRAY_SIZE(mx31_3ds_pins),
408 "mx31_3ds"); 432 "mx31_3ds");
409 433
410 mxc_register_device(&mxc_uart_device0, &uart_pdata); 434 imx31_add_imx_uart0(&uart_pdata);
411 mxc_register_device(&mxc_nand_device, &imx31_3ds_nand_flash_pdata); 435 imx31_add_mxc_nand(&mx31_3ds_nand_board_info);
412 436
413 mxc_register_device(&mxc_spi_device1, &spi1_pdata); 437 imx31_add_spi_imx0(&spi1_pdata);
414 spi_register_board_info(mx31_3ds_spi_devs, 438 spi_register_board_info(mx31_3ds_spi_devs,
415 ARRAY_SIZE(mx31_3ds_spi_devs)); 439 ARRAY_SIZE(mx31_3ds_spi_devs));
416 440
diff --git a/arch/arm/mach-mx3/mach-mx31ads.c b/arch/arm/mach-mx3/mach-mx31ads.c
index b3d1a1895c20..94b3e7c42404 100644
--- a/arch/arm/mach-mx3/mach-mx31ads.c
+++ b/arch/arm/mach-mx3/mach-mx31ads.c
@@ -12,10 +12,6 @@
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details. 14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */ 15 */
20 16
21#include <linux/types.h> 17#include <linux/types.h>
@@ -33,8 +29,6 @@
33#include <asm/memory.h> 29#include <asm/memory.h>
34#include <asm/mach/map.h> 30#include <asm/mach/map.h>
35#include <mach/common.h> 31#include <mach/common.h>
36#include <mach/board-mx31ads.h>
37#include <mach/imx-uart.h>
38#include <mach/iomux-mx3.h> 32#include <mach/iomux-mx3.h>
39 33
40#ifdef CONFIG_MACH_MX31ADS_WM1133_EV1 34#ifdef CONFIG_MACH_MX31ADS_WM1133_EV1
@@ -43,14 +37,45 @@
43#include <linux/mfd/wm8350/pmic.h> 37#include <linux/mfd/wm8350/pmic.h>
44#endif 38#endif
45 39
40#include "devices-imx31.h"
46#include "devices.h" 41#include "devices.h"
47 42
48/*! 43/* Base address of PBC controller */
49 * @file mx31ads.c 44#define PBC_BASE_ADDRESS MX31_CS4_BASE_ADDR_VIRT
50 * 45/* Offsets for the PBC Controller register */
51 * @brief This file contains the board-specific initialization routines. 46
52 * 47/* PBC Board interrupt status register */
53 * @ingroup System 48#define PBC_INTSTATUS 0x000016
49
50/* PBC Board interrupt current status register */
51#define PBC_INTCURR_STATUS 0x000018
52
53/* PBC Interrupt mask register set address */
54#define PBC_INTMASK_SET 0x00001A
55
56/* PBC Interrupt mask register clear address */
57#define PBC_INTMASK_CLEAR 0x00001C
58
59/* External UART A */
60#define PBC_SC16C652_UARTA 0x010000
61
62/* External UART B */
63#define PBC_SC16C652_UARTB 0x010010
64
65#define PBC_INTSTATUS_REG (PBC_INTSTATUS + PBC_BASE_ADDRESS)
66#define PBC_INTMASK_SET_REG (PBC_INTMASK_SET + PBC_BASE_ADDRESS)
67#define PBC_INTMASK_CLEAR_REG (PBC_INTMASK_CLEAR + PBC_BASE_ADDRESS)
68#define EXPIO_PARENT_INT IOMUX_TO_IRQ(MX31_PIN_GPIO1_4)
69
70#define MXC_EXP_IO_BASE (MXC_BOARD_IRQ_START)
71#define MXC_IRQ_TO_EXPIO(irq) ((irq) - MXC_EXP_IO_BASE)
72
73#define EXPIO_INT_XUART_INTA (MXC_EXP_IO_BASE + 10)
74#define EXPIO_INT_XUART_INTB (MXC_EXP_IO_BASE + 11)
75
76#define MXC_MAX_EXP_IO_LINES 16
77/*
78 * This file contains the board-specific initialization routines.
54 */ 79 */
55 80
56#if defined(CONFIG_SERIAL_8250) || defined(CONFIG_SERIAL_8250_MODULE) 81#if defined(CONFIG_SERIAL_8250) || defined(CONFIG_SERIAL_8250_MODULE)
@@ -98,7 +123,7 @@ static inline int mxc_init_extuart(void)
98#endif 123#endif
99 124
100#if defined(CONFIG_SERIAL_IMX) || defined(CONFIG_SERIAL_IMX_MODULE) 125#if defined(CONFIG_SERIAL_IMX) || defined(CONFIG_SERIAL_IMX_MODULE)
101static struct imxuart_platform_data uart_pdata = { 126static const struct imxuart_platform_data uart_pdata __initconst = {
102 .flags = IMXUART_HAVE_RTSCTS, 127 .flags = IMXUART_HAVE_RTSCTS,
103}; 128};
104 129
@@ -112,7 +137,7 @@ static unsigned int uart_pins[] = {
112static inline void mxc_init_imx_uart(void) 137static inline void mxc_init_imx_uart(void)
113{ 138{
114 mxc_iomux_setup_multiple_pins(uart_pins, ARRAY_SIZE(uart_pins), "uart-0"); 139 mxc_iomux_setup_multiple_pins(uart_pins, ARRAY_SIZE(uart_pins), "uart-0");
115 mxc_register_device(&mxc_uart_device0, &uart_pdata); 140 imx31_add_imx_uart0(&uart_pdata);
116} 141}
117#else /* !SERIAL_IMX */ 142#else /* !SERIAL_IMX */
118static inline void mxc_init_imx_uart(void) 143static inline void mxc_init_imx_uart(void)
@@ -475,7 +500,7 @@ static void mxc_init_i2c(void)
475 mxc_iomux_mode(IOMUX_MODE(MX31_PIN_CSPI2_MOSI, IOMUX_CONFIG_ALT1)); 500 mxc_iomux_mode(IOMUX_MODE(MX31_PIN_CSPI2_MOSI, IOMUX_CONFIG_ALT1));
476 mxc_iomux_mode(IOMUX_MODE(MX31_PIN_CSPI2_MISO, IOMUX_CONFIG_ALT1)); 501 mxc_iomux_mode(IOMUX_MODE(MX31_PIN_CSPI2_MISO, IOMUX_CONFIG_ALT1));
477 502
478 mxc_register_device(&mxc_i2c_device1, NULL); 503 imx31_add_imx_i2c1(NULL);
479} 504}
480#else 505#else
481static void mxc_init_i2c(void) 506static void mxc_init_i2c(void)
diff --git a/arch/arm/mach-mx3/mach-mx31lilly.c b/arch/arm/mach-mx3/mach-mx31lilly.c
index 46bf57c44372..84942cf41b63 100644
--- a/arch/arm/mach-mx3/mach-mx31lilly.c
+++ b/arch/arm/mach-mx3/mach-mx31lilly.c
@@ -18,10 +18,6 @@
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of 18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details. 20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, write to the Free Software
24 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
25 */ 21 */
26 22
27#include <linux/types.h> 23#include <linux/types.h>
@@ -46,10 +42,10 @@
46#include <mach/common.h> 42#include <mach/common.h>
47#include <mach/iomux-mx3.h> 43#include <mach/iomux-mx3.h>
48#include <mach/board-mx31lilly.h> 44#include <mach/board-mx31lilly.h>
49#include <mach/spi.h>
50#include <mach/mxc_ehci.h> 45#include <mach/mxc_ehci.h>
51#include <mach/ulpi.h> 46#include <mach/ulpi.h>
52 47
48#include "devices-imx31.h"
53#include "devices.h" 49#include "devices.h"
54 50
55/* 51/*
@@ -257,12 +253,12 @@ static int spi_internal_chipselect[] = {
257 MXC_SPI_CS(2), 253 MXC_SPI_CS(2),
258}; 254};
259 255
260static struct spi_imx_master spi0_pdata = { 256static const struct spi_imx_master spi0_pdata __initconst = {
261 .chipselect = spi_internal_chipselect, 257 .chipselect = spi_internal_chipselect,
262 .num_chipselect = ARRAY_SIZE(spi_internal_chipselect), 258 .num_chipselect = ARRAY_SIZE(spi_internal_chipselect),
263}; 259};
264 260
265static struct spi_imx_master spi1_pdata = { 261static const struct spi_imx_master spi1_pdata __initconst = {
266 .chipselect = spi_internal_chipselect, 262 .chipselect = spi_internal_chipselect,
267 .num_chipselect = ARRAY_SIZE(spi_internal_chipselect), 263 .num_chipselect = ARRAY_SIZE(spi_internal_chipselect),
268}; 264};
@@ -315,8 +311,8 @@ static void __init mx31lilly_board_init(void)
315 mxc_iomux_alloc_pin(MX31_PIN_CSPI2_SS1__SS1, "SPI2_SS1"); 311 mxc_iomux_alloc_pin(MX31_PIN_CSPI2_SS1__SS1, "SPI2_SS1");
316 mxc_iomux_alloc_pin(MX31_PIN_CSPI2_SS2__SS2, "SPI2_SS2"); 312 mxc_iomux_alloc_pin(MX31_PIN_CSPI2_SS2__SS2, "SPI2_SS2");
317 313
318 mxc_register_device(&mxc_spi_device0, &spi0_pdata); 314 imx31_add_spi_imx0(&spi0_pdata);
319 mxc_register_device(&mxc_spi_device1, &spi1_pdata); 315 imx31_add_spi_imx1(&spi1_pdata);
320 spi_register_board_info(&mc13783_dev, 1); 316 spi_register_board_info(&mc13783_dev, 1);
321 317
322 platform_add_devices(devices, ARRAY_SIZE(devices)); 318 platform_add_devices(devices, ARRAY_SIZE(devices));
diff --git a/arch/arm/mach-mx3/mach-mx31lite.c b/arch/arm/mach-mx3/mach-mx31lite.c
index 2b6d11400877..da236c497d2a 100644
--- a/arch/arm/mach-mx3/mach-mx31lite.c
+++ b/arch/arm/mach-mx3/mach-mx31lite.c
@@ -13,10 +13,6 @@
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details. 15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 */ 16 */
21 17
22#include <linux/types.h> 18#include <linux/types.h>
@@ -42,14 +38,12 @@
42#include <mach/hardware.h> 38#include <mach/hardware.h>
43#include <mach/common.h> 39#include <mach/common.h>
44#include <mach/board-mx31lite.h> 40#include <mach/board-mx31lite.h>
45#include <mach/imx-uart.h>
46#include <mach/iomux-mx3.h> 41#include <mach/iomux-mx3.h>
47#include <mach/irqs.h> 42#include <mach/irqs.h>
48#include <mach/mxc_nand.h>
49#include <mach/spi.h>
50#include <mach/mxc_ehci.h> 43#include <mach/mxc_ehci.h>
51#include <mach/ulpi.h> 44#include <mach/ulpi.h>
52 45
46#include "devices-imx31.h"
53#include "devices.h" 47#include "devices.h"
54 48
55/* 49/*
@@ -69,7 +63,8 @@ static unsigned int mx31lite_pins[] = {
69 MX31_PIN_CSPI2_SS2__SS2, 63 MX31_PIN_CSPI2_SS2__SS2,
70}; 64};
71 65
72static struct mxc_nand_platform_data mx31lite_nand_board_info = { 66static const struct mxc_nand_platform_data
67mx31lite_nand_board_info __initconst = {
73 .width = 1, 68 .width = 1,
74 .hw_ecc = 1, 69 .hw_ecc = 1,
75}; 70};
@@ -112,7 +107,7 @@ static int spi_internal_chipselect[] = {
112 MXC_SPI_CS(0), 107 MXC_SPI_CS(0),
113}; 108};
114 109
115static struct spi_imx_master spi1_pdata = { 110static const struct spi_imx_master spi1_pdata __initconst = {
116 .chipselect = spi_internal_chipselect, 111 .chipselect = spi_internal_chipselect,
117 .num_chipselect = ARRAY_SIZE(spi_internal_chipselect), 112 .num_chipselect = ARRAY_SIZE(spi_internal_chipselect),
118}; 113};
@@ -253,9 +248,9 @@ static void __init mxc_board_init(void)
253 248
254 /* NOR and NAND flash */ 249 /* NOR and NAND flash */
255 platform_device_register(&physmap_flash_device); 250 platform_device_register(&physmap_flash_device);
256 mxc_register_device(&mxc_nand_device, &mx31lite_nand_board_info); 251 imx31_add_mxc_nand(&mx31lite_nand_board_info);
257 252
258 mxc_register_device(&mxc_spi_device1, &spi1_pdata); 253 imx31_add_spi_imx1(&spi1_pdata);
259 spi_register_board_info(&mc13783_spi_dev, 1); 254 spi_register_board_info(&mc13783_spi_dev, 1);
260 255
261#if defined(CONFIG_USB_ULPI) 256#if defined(CONFIG_USB_ULPI)
diff --git a/arch/arm/mach-mx3/mach-mx31moboard.c b/arch/arm/mach-mx3/mach-mx31moboard.c
index 33a8d35498a7..67776bc61c33 100644
--- a/arch/arm/mach-mx3/mach-mx31moboard.c
+++ b/arch/arm/mach-mx3/mach-mx31moboard.c
@@ -10,10 +10,6 @@
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details. 12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 */ 13 */
18 14
19#include <linux/delay.h> 15#include <linux/delay.h>
@@ -42,16 +38,15 @@
42#include <mach/board-mx31moboard.h> 38#include <mach/board-mx31moboard.h>
43#include <mach/common.h> 39#include <mach/common.h>
44#include <mach/hardware.h> 40#include <mach/hardware.h>
45#include <mach/imx-uart.h>
46#include <mach/iomux-mx3.h> 41#include <mach/iomux-mx3.h>
47#include <mach/ipu.h> 42#include <mach/ipu.h>
48#include <mach/i2c.h>
49#include <mach/mmc.h> 43#include <mach/mmc.h>
50#include <mach/mxc_ehci.h> 44#include <mach/mxc_ehci.h>
51#include <mach/mx3_camera.h> 45#include <mach/mx3_camera.h>
52#include <mach/spi.h> 46#include <mach/spi.h>
53#include <mach/ulpi.h> 47#include <mach/ulpi.h>
54 48
49#include "devices-imx31.h"
55#include "devices.h" 50#include "devices.h"
56 51
57static unsigned int moboard_pins[] = { 52static unsigned int moboard_pins[] = {
@@ -130,24 +125,36 @@ static struct platform_device mx31moboard_flash = {
130 125
131static int moboard_uart0_init(struct platform_device *pdev) 126static int moboard_uart0_init(struct platform_device *pdev)
132{ 127{
133 gpio_request(IOMUX_TO_GPIO(MX31_PIN_CTS1), "uart0-cts-hack"); 128 int ret = gpio_request(IOMUX_TO_GPIO(MX31_PIN_CTS1), "uart0-cts-hack");
134 gpio_direction_output(IOMUX_TO_GPIO(MX31_PIN_CTS1), 0); 129 if (ret)
135 return 0; 130 return ret;
131
132 ret = gpio_direction_output(IOMUX_TO_GPIO(MX31_PIN_CTS1), 0);
133 if (ret)
134 gpio_free(IOMUX_TO_GPIO(MX31_PIN_CTS1));
135
136 return ret;
137}
138
139static void moboard_uart0_exit(struct platform_device *pdev)
140{
141 gpio_free(IOMUX_TO_GPIO(MX31_PIN_CTS1));
136} 142}
137 143
138static struct imxuart_platform_data uart0_pdata = { 144static const struct imxuart_platform_data uart0_pdata __initconst = {
139 .init = moboard_uart0_init, 145 .init = moboard_uart0_init,
146 .exit = moboard_uart0_exit,
140}; 147};
141 148
142static struct imxuart_platform_data uart4_pdata = { 149static const struct imxuart_platform_data uart4_pdata __initconst = {
143 .flags = IMXUART_HAVE_RTSCTS, 150 .flags = IMXUART_HAVE_RTSCTS,
144}; 151};
145 152
146static struct imxi2c_platform_data moboard_i2c0_pdata = { 153static const struct imxi2c_platform_data moboard_i2c0_data __initconst = {
147 .bitrate = 400000, 154 .bitrate = 400000,
148}; 155};
149 156
150static struct imxi2c_platform_data moboard_i2c1_pdata = { 157static const struct imxi2c_platform_data moboard_i2c1_data __initconst = {
151 .bitrate = 100000, 158 .bitrate = 100000,
152}; 159};
153 160
@@ -156,7 +163,7 @@ static int moboard_spi1_cs[] = {
156 MXC_SPI_CS(2), 163 MXC_SPI_CS(2),
157}; 164};
158 165
159static struct spi_imx_master moboard_spi1_master = { 166static const struct spi_imx_master moboard_spi1_pdata __initconst = {
160 .chipselect = moboard_spi1_cs, 167 .chipselect = moboard_spi1_cs,
161 .num_chipselect = ARRAY_SIZE(moboard_spi1_cs), 168 .num_chipselect = ARRAY_SIZE(moboard_spi1_cs),
162}; 169};
@@ -220,11 +227,54 @@ static struct mc13783_regulator_init_data moboard_regulators[] = {
220 }, 227 },
221}; 228};
222 229
230static struct mc13783_led_platform_data moboard_led[] = {
231 {
232 .id = MC13783_LED_R1,
233 .name = "coreboard-led-4:red",
234 .max_current = 2,
235 },
236 {
237 .id = MC13783_LED_G1,
238 .name = "coreboard-led-4:green",
239 .max_current = 2,
240 },
241 {
242 .id = MC13783_LED_B1,
243 .name = "coreboard-led-4:blue",
244 .max_current = 2,
245 },
246 {
247 .id = MC13783_LED_R2,
248 .name = "coreboard-led-5:red",
249 .max_current = 3,
250 },
251 {
252 .id = MC13783_LED_G2,
253 .name = "coreboard-led-5:green",
254 .max_current = 3,
255 },
256 {
257 .id = MC13783_LED_B2,
258 .name = "coreboard-led-5:blue",
259 .max_current = 3,
260 },
261};
262
263static struct mc13783_leds_platform_data moboard_leds = {
264 .num_leds = ARRAY_SIZE(moboard_led),
265 .led = moboard_led,
266 .flags = MC13783_LED_SLEWLIMTC,
267 .abmode = MC13783_LED_AB_DISABLED,
268 .tc1_period = MC13783_LED_PERIOD_10MS,
269 .tc2_period = MC13783_LED_PERIOD_10MS,
270};
271
223static struct mc13783_platform_data moboard_pmic = { 272static struct mc13783_platform_data moboard_pmic = {
224 .regulators = moboard_regulators, 273 .regulators = moboard_regulators,
225 .num_regulators = ARRAY_SIZE(moboard_regulators), 274 .num_regulators = ARRAY_SIZE(moboard_regulators),
275 .leds = &moboard_leds,
226 .flags = MC13783_USE_REGULATOR | MC13783_USE_RTC | 276 .flags = MC13783_USE_REGULATOR | MC13783_USE_RTC |
227 MC13783_USE_ADC, 277 MC13783_USE_ADC | MC13783_USE_LED,
228}; 278};
229 279
230static struct spi_board_info moboard_spi_board_info[] __initdata = { 280static struct spi_board_info moboard_spi_board_info[] __initdata = {
@@ -243,7 +293,7 @@ static int moboard_spi2_cs[] = {
243 MXC_SPI_CS(1), 293 MXC_SPI_CS(1),
244}; 294};
245 295
246static struct spi_imx_master moboard_spi2_master = { 296static const struct spi_imx_master moboard_spi2_pdata __initconst = {
247 .chipselect = moboard_spi2_cs, 297 .chipselect = moboard_spi2_cs,
248 .num_chipselect = ARRAY_SIZE(moboard_spi2_cs), 298 .num_chipselect = ARRAY_SIZE(moboard_spi2_cs),
249}; 299};
@@ -456,15 +506,14 @@ static void __init mxc_board_init(void)
456 506
457 platform_add_devices(devices, ARRAY_SIZE(devices)); 507 platform_add_devices(devices, ARRAY_SIZE(devices));
458 508
459 mxc_register_device(&mxc_uart_device0, &uart0_pdata); 509 imx31_add_imx_uart0(&uart0_pdata);
460 510 imx31_add_imx_uart4(&uart4_pdata);
461 mxc_register_device(&mxc_uart_device4, &uart4_pdata);
462 511
463 mxc_register_device(&mxc_i2c_device0, &moboard_i2c0_pdata); 512 imx31_add_imx_i2c0(&moboard_i2c0_data);
464 mxc_register_device(&mxc_i2c_device1, &moboard_i2c1_pdata); 513 imx31_add_imx_i2c1(&moboard_i2c1_data);
465 514
466 mxc_register_device(&mxc_spi_device1, &moboard_spi1_master); 515 imx31_add_spi_imx1(&moboard_spi1_pdata);
467 mxc_register_device(&mxc_spi_device2, &moboard_spi2_master); 516 imx31_add_spi_imx2(&moboard_spi2_pdata);
468 517
469 gpio_request(IOMUX_TO_GPIO(MX31_PIN_GPIO1_3), "pmic-irq"); 518 gpio_request(IOMUX_TO_GPIO(MX31_PIN_GPIO1_3), "pmic-irq");
470 gpio_direction_input(IOMUX_TO_GPIO(MX31_PIN_GPIO1_3)); 519 gpio_direction_input(IOMUX_TO_GPIO(MX31_PIN_GPIO1_3));
diff --git a/arch/arm/mach-mx3/mach-mx35pdk.c b/arch/arm/mach-mx3/mach-mx35_3ds.c
index bcac84d4dca4..1c30d7212f17 100644
--- a/arch/arm/mach-mx3/mach-mx35pdk.c
+++ b/arch/arm/mach-mx3/mach-mx35_3ds.c
@@ -12,10 +12,12 @@
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details. 14 * GNU General Public License for more details.
15 * 15 */
16 * You should have received a copy of the GNU General Public License 16
17 * along with this program; if not, write to the Free Software 17/*
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 18 * This machine is known as:
19 * - i.MX35 3-Stack Development System
20 * - i.MX35 Platform Development Kit (i.MX35 PDK)
19 */ 21 */
20 22
21#include <linux/types.h> 23#include <linux/types.h>
@@ -32,12 +34,12 @@
32 34
33#include <mach/hardware.h> 35#include <mach/hardware.h>
34#include <mach/common.h> 36#include <mach/common.h>
35#include <mach/imx-uart.h>
36#include <mach/iomux-mx35.h> 37#include <mach/iomux-mx35.h>
37 38
39#include "devices-imx35.h"
38#include "devices.h" 40#include "devices.h"
39 41
40static struct imxuart_platform_data uart_pdata = { 42static const struct imxuart_platform_data uart_pdata __initconst = {
41 .flags = IMXUART_HAVE_RTSCTS, 43 .flags = IMXUART_HAVE_RTSCTS,
42}; 44};
43 45
@@ -90,7 +92,7 @@ static void __init mxc_board_init(void)
90 92
91 platform_add_devices(devices, ARRAY_SIZE(devices)); 93 platform_add_devices(devices, ARRAY_SIZE(devices));
92 94
93 mxc_register_device(&mxc_uart_device0, &uart_pdata); 95 imx35_add_imx_uart0(&uart_pdata);
94 96
95 mxc_register_device(&mxc_otg_udc_device, &usb_pdata); 97 mxc_register_device(&mxc_otg_udc_device, &usb_pdata);
96} 98}
diff --git a/arch/arm/mach-mx3/mach-pcm037.c b/arch/arm/mach-mx3/mach-pcm037.c
index bb6c056854e9..8a292dd1a714 100644
--- a/arch/arm/mach-mx3/mach-pcm037.c
+++ b/arch/arm/mach-mx3/mach-pcm037.c
@@ -10,10 +10,6 @@
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details. 12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 */ 13 */
18 14
19#include <linux/types.h> 15#include <linux/types.h>
@@ -43,20 +39,17 @@
43#include <asm/mach/arch.h> 39#include <asm/mach/arch.h>
44#include <asm/mach/time.h> 40#include <asm/mach/time.h>
45#include <asm/mach/map.h> 41#include <asm/mach/map.h>
46#include <mach/board-pcm037.h>
47#include <mach/common.h> 42#include <mach/common.h>
48#include <mach/hardware.h> 43#include <mach/hardware.h>
49#include <mach/i2c.h>
50#include <mach/imx-uart.h>
51#include <mach/iomux-mx3.h> 44#include <mach/iomux-mx3.h>
52#include <mach/ipu.h> 45#include <mach/ipu.h>
53#include <mach/mmc.h> 46#include <mach/mmc.h>
54#include <mach/mx3_camera.h> 47#include <mach/mx3_camera.h>
55#include <mach/mx3fb.h> 48#include <mach/mx3fb.h>
56#include <mach/mxc_nand.h>
57#include <mach/mxc_ehci.h> 49#include <mach/mxc_ehci.h>
58#include <mach/ulpi.h> 50#include <mach/ulpi.h>
59 51
52#include "devices-imx31.h"
60#include "devices.h" 53#include "devices.h"
61#include "pcm037.h" 54#include "pcm037.h"
62 55
@@ -225,7 +218,7 @@ static struct platform_device pcm037_flash = {
225 .num_resources = 1, 218 .num_resources = 1,
226}; 219};
227 220
228static struct imxuart_platform_data uart_pdata = { 221static const struct imxuart_platform_data uart_pdata __initconst = {
229 .flags = IMXUART_HAVE_RTSCTS, 222 .flags = IMXUART_HAVE_RTSCTS,
230}; 223};
231 224
@@ -279,16 +272,17 @@ static struct platform_device pcm037_sram_device = {
279 .resource = &pcm038_sram_resource, 272 .resource = &pcm038_sram_resource,
280}; 273};
281 274
282static struct mxc_nand_platform_data pcm037_nand_board_info = { 275static const struct mxc_nand_platform_data
276pcm037_nand_board_info __initconst = {
283 .width = 1, 277 .width = 1,
284 .hw_ecc = 1, 278 .hw_ecc = 1,
285}; 279};
286 280
287static struct imxi2c_platform_data pcm037_i2c_1_data = { 281static const struct imxi2c_platform_data pcm037_i2c1_data __initconst = {
288 .bitrate = 100000, 282 .bitrate = 100000,
289}; 283};
290 284
291static struct imxi2c_platform_data pcm037_i2c_2_data = { 285static const struct imxi2c_platform_data pcm037_i2c2_data __initconst = {
292 .bitrate = 20000, 286 .bitrate = 20000,
293}; 287};
294 288
@@ -615,9 +609,10 @@ static void __init mxc_board_init(void)
615 609
616 platform_add_devices(devices, ARRAY_SIZE(devices)); 610 platform_add_devices(devices, ARRAY_SIZE(devices));
617 611
618 mxc_register_device(&mxc_uart_device0, &uart_pdata); 612 imx31_add_imx_uart0(&uart_pdata);
619 mxc_register_device(&mxc_uart_device1, &uart_pdata); 613 /* XXX: should't this have .flags = 0 (i.e. no RTSCTS) on PCM037_EET? */
620 mxc_register_device(&mxc_uart_device2, &uart_pdata); 614 imx31_add_imx_uart1(&uart_pdata);
615 imx31_add_imx_uart2(&uart_pdata);
621 616
622 mxc_register_device(&mxc_w1_master_device, NULL); 617 mxc_register_device(&mxc_w1_master_device, NULL);
623 618
@@ -635,10 +630,10 @@ static void __init mxc_board_init(void)
635 i2c_register_board_info(1, pcm037_i2c_devices, 630 i2c_register_board_info(1, pcm037_i2c_devices,
636 ARRAY_SIZE(pcm037_i2c_devices)); 631 ARRAY_SIZE(pcm037_i2c_devices));
637 632
638 mxc_register_device(&mxc_i2c_device1, &pcm037_i2c_1_data); 633 imx31_add_imx_i2c1(&pcm037_i2c1_data);
639 mxc_register_device(&mxc_i2c_device2, &pcm037_i2c_2_data); 634 imx31_add_imx_i2c2(&pcm037_i2c2_data);
640 635
641 mxc_register_device(&mxc_nand_device, &pcm037_nand_board_info); 636 imx31_add_mxc_nand(&pcm037_nand_board_info);
642 mxc_register_device(&mxcsdhc_device0, &sdhc_pdata); 637 mxc_register_device(&mxcsdhc_device0, &sdhc_pdata);
643 mxc_register_device(&mx3_ipu, &mx3_ipu_data); 638 mxc_register_device(&mx3_ipu, &mx3_ipu_data);
644 mxc_register_device(&mx3_fb, &mx3fb_pdata); 639 mxc_register_device(&mx3_fb, &mx3fb_pdata);
diff --git a/arch/arm/mach-mx3/mach-pcm037_eet.c b/arch/arm/mach-mx3/mach-pcm037_eet.c
index 8d386000fc40..c8b98218efee 100644
--- a/arch/arm/mach-mx3/mach-pcm037_eet.c
+++ b/arch/arm/mach-mx3/mach-pcm037_eet.c
@@ -13,9 +13,6 @@
13#include <linux/spi/spi.h> 13#include <linux/spi/spi.h>
14 14
15#include <mach/common.h> 15#include <mach/common.h>
16#if defined(CONFIG_SPI_IMX) || defined(CONFIG_SPI_IMX_MODULE)
17#include <mach/spi.h>
18#endif
19#include <mach/iomux-mx3.h> 16#include <mach/iomux-mx3.h>
20 17
21#include <asm/mach-types.h> 18#include <asm/mach-types.h>
@@ -64,7 +61,7 @@ static struct spi_board_info pcm037_spi_dev[] = {
64#if defined(CONFIG_SPI_IMX) || defined(CONFIG_SPI_IMX_MODULE) 61#if defined(CONFIG_SPI_IMX) || defined(CONFIG_SPI_IMX_MODULE)
65static int pcm037_spi1_cs[] = {MXC_SPI_CS(1), IOMUX_TO_GPIO(MX31_PIN_KEY_COL7)}; 62static int pcm037_spi1_cs[] = {MXC_SPI_CS(1), IOMUX_TO_GPIO(MX31_PIN_KEY_COL7)};
66 63
67struct spi_imx_master pcm037_spi1_master = { 64static const struct spi_imx_master pcm037_spi1_pdata __initconst = {
68 .chipselect = pcm037_spi1_cs, 65 .chipselect = pcm037_spi1_cs,
69 .num_chipselect = ARRAY_SIZE(pcm037_spi1_cs), 66 .num_chipselect = ARRAY_SIZE(pcm037_spi1_cs),
70}; 67};
@@ -184,7 +181,7 @@ static int eet_init_devices(void)
184 /* SPI */ 181 /* SPI */
185 spi_register_board_info(pcm037_spi_dev, ARRAY_SIZE(pcm037_spi_dev)); 182 spi_register_board_info(pcm037_spi_dev, ARRAY_SIZE(pcm037_spi_dev));
186#if defined(CONFIG_SPI_IMX) || defined(CONFIG_SPI_IMX_MODULE) 183#if defined(CONFIG_SPI_IMX) || defined(CONFIG_SPI_IMX_MODULE)
187 mxc_register_device(&mxc_spi_device0, &pcm037_spi1_master); 184 imx35_add_spi_imx0(&pcm037_spi1_pdata);
188#endif 185#endif
189 186
190 platform_device_register(&pcm037_gpio_keys_device); 187 platform_device_register(&pcm037_gpio_keys_device);
diff --git a/arch/arm/mach-mx3/mach-pcm043.c b/arch/arm/mach-mx3/mach-pcm043.c
index 8071b7281c4b..b92f624c755e 100644
--- a/arch/arm/mach-mx3/mach-pcm043.c
+++ b/arch/arm/mach-mx3/mach-pcm043.c
@@ -10,10 +10,6 @@
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details. 12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 */ 13 */
18 14
19#include <linux/types.h> 15#include <linux/types.h>
@@ -40,19 +36,15 @@
40 36
41#include <mach/hardware.h> 37#include <mach/hardware.h>
42#include <mach/common.h> 38#include <mach/common.h>
43#include <mach/imx-uart.h>
44#if defined CONFIG_I2C_IMX || defined CONFIG_I2C_IMX_MODULE
45#include <mach/i2c.h>
46#endif
47#include <mach/iomux-mx35.h> 39#include <mach/iomux-mx35.h>
48#include <mach/ipu.h> 40#include <mach/ipu.h>
49#include <mach/mx3fb.h> 41#include <mach/mx3fb.h>
50#include <mach/mxc_nand.h>
51#include <mach/mxc_ehci.h> 42#include <mach/mxc_ehci.h>
52#include <mach/ulpi.h> 43#include <mach/ulpi.h>
53#include <mach/audmux.h> 44#include <mach/audmux.h>
54#include <mach/ssi.h> 45#include <mach/ssi.h>
55 46
47#include "devices-imx35.h"
56#include "devices.h" 48#include "devices.h"
57 49
58static const struct fb_videomode fb_modedb[] = { 50static const struct fb_videomode fb_modedb[] = {
@@ -122,12 +114,12 @@ static struct platform_device pcm043_flash = {
122 .num_resources = 1, 114 .num_resources = 1,
123}; 115};
124 116
125static struct imxuart_platform_data uart_pdata = { 117static const struct imxuart_platform_data uart_pdata __initconst = {
126 .flags = IMXUART_HAVE_RTSCTS, 118 .flags = IMXUART_HAVE_RTSCTS,
127}; 119};
128 120
129#if defined CONFIG_I2C_IMX || defined CONFIG_I2C_IMX_MODULE 121#if defined CONFIG_I2C_IMX || defined CONFIG_I2C_IMX_MODULE
130static struct imxi2c_platform_data pcm043_i2c_1_data = { 122static const struct imxi2c_platform_data pcm043_i2c0_data __initconst = {
131 .bitrate = 50000, 123 .bitrate = 50000,
132}; 124};
133 125
@@ -304,7 +296,8 @@ static struct imx_ssi_platform_data pcm043_ssi_pdata = {
304 .flags = IMX_SSI_USE_AC97, 296 .flags = IMX_SSI_USE_AC97,
305}; 297};
306 298
307static struct mxc_nand_platform_data pcm037_nand_board_info = { 299static const struct mxc_nand_platform_data
300pcm037_nand_board_info __initconst = {
308 .width = 1, 301 .width = 1,
309 .hw_ecc = 1, 302 .hw_ecc = 1,
310}; 303};
@@ -363,17 +356,17 @@ static void __init mxc_board_init(void)
363 356
364 platform_add_devices(devices, ARRAY_SIZE(devices)); 357 platform_add_devices(devices, ARRAY_SIZE(devices));
365 358
366 mxc_register_device(&mxc_uart_device0, &uart_pdata); 359 imx35_add_imx_uart0(&uart_pdata);
367 mxc_register_device(&mxc_nand_device, &pcm037_nand_board_info); 360 imx35_add_mxc_nand(&pcm037_nand_board_info);
368 mxc_register_device(&imx_ssi_device0, &pcm043_ssi_pdata); 361 mxc_register_device(&imx_ssi_device0, &pcm043_ssi_pdata);
369 362
370 mxc_register_device(&mxc_uart_device1, &uart_pdata); 363 imx35_add_imx_uart1(&uart_pdata);
371 364
372#if defined CONFIG_I2C_IMX || defined CONFIG_I2C_IMX_MODULE 365#if defined CONFIG_I2C_IMX || defined CONFIG_I2C_IMX_MODULE
373 i2c_register_board_info(0, pcm043_i2c_devices, 366 i2c_register_board_info(0, pcm043_i2c_devices,
374 ARRAY_SIZE(pcm043_i2c_devices)); 367 ARRAY_SIZE(pcm043_i2c_devices));
375 368
376 mxc_register_device(&mxc_i2c_device0, &pcm043_i2c_1_data); 369 imx35_add_imx_i2c0(&pcm043_i2c0_data);
377#endif 370#endif
378 371
379 mxc_register_device(&mx3_ipu, &mx3_ipu_data); 372 mxc_register_device(&mx3_ipu, &mx3_ipu_data);
diff --git a/arch/arm/mach-mx3/mach-qong.c b/arch/arm/mach-mx3/mach-qong.c
index e5b5b8323a17..d44ac70222a5 100644
--- a/arch/arm/mach-mx3/mach-qong.c
+++ b/arch/arm/mach-mx3/mach-qong.c
@@ -10,10 +10,6 @@
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details. 12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 */ 13 */
18 14
19#include <linux/types.h> 15#include <linux/types.h>
@@ -34,9 +30,9 @@
34#include <mach/common.h> 30#include <mach/common.h>
35#include <asm/page.h> 31#include <asm/page.h>
36#include <asm/setup.h> 32#include <asm/setup.h>
37#include <mach/board-qong.h>
38#include <mach/imx-uart.h>
39#include <mach/iomux-mx3.h> 33#include <mach/iomux-mx3.h>
34
35#include "devices-imx31.h"
40#include "devices.h" 36#include "devices.h"
41 37
42/* FPGA defines */ 38/* FPGA defines */
@@ -62,7 +58,7 @@
62 * This file contains the board-specific initialization routines. 58 * This file contains the board-specific initialization routines.
63 */ 59 */
64 60
65static struct imxuart_platform_data uart_pdata = { 61static const struct imxuart_platform_data uart_pdata __initconst = {
66 .flags = IMXUART_HAVE_RTSCTS, 62 .flags = IMXUART_HAVE_RTSCTS,
67}; 63};
68 64
@@ -73,11 +69,11 @@ static int uart_pins[] = {
73 MX31_PIN_RXD1__RXD1 69 MX31_PIN_RXD1__RXD1
74}; 70};
75 71
76static inline void mxc_init_imx_uart(void) 72static inline void __init mxc_init_imx_uart(void)
77{ 73{
78 mxc_iomux_setup_multiple_pins(uart_pins, ARRAY_SIZE(uart_pins), 74 mxc_iomux_setup_multiple_pins(uart_pins, ARRAY_SIZE(uart_pins),
79 "uart-0"); 75 "uart-0");
80 mxc_register_device(&mxc_uart_device0, &uart_pdata); 76 imx31_add_imx_uart0(&uart_pdata);
81} 77}
82 78
83static struct resource dnet_resources[] = { 79static struct resource dnet_resources[] = {
@@ -116,7 +112,7 @@ static struct physmap_flash_data qong_flash_data = {
116 112
117static struct resource qong_flash_resource = { 113static struct resource qong_flash_resource = {
118 .start = MX31_CS0_BASE_ADDR, 114 .start = MX31_CS0_BASE_ADDR,
119 .end = MX31_CS0_BASE_ADDR + QONG_NOR_SIZE - 1, 115 .end = MX31_CS0_BASE_ADDR + SZ_128M - 1,
120 .flags = IORESOURCE_MEM, 116 .flags = IORESOURCE_MEM,
121}; 117};
122 118
diff --git a/arch/arm/mach-mx3/mm.c b/arch/arm/mach-mx3/mm.c
index 6858a4f9806c..a378fba49a8b 100644
--- a/arch/arm/mach-mx3/mm.c
+++ b/arch/arm/mach-mx3/mm.c
@@ -14,10 +14,6 @@
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details. 16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 */ 17 */
22 18
23#include <linux/mm.h> 19#include <linux/mm.h>
@@ -97,8 +93,11 @@ void __init mx35_map_io(void)
97} 93}
98#endif 94#endif
99 95
96int imx3x_register_gpios(void);
97
100void __init mx31_init_irq(void) 98void __init mx31_init_irq(void)
101{ 99{
100 imx3x_register_gpios();
102 mxc_init_irq(IO_ADDRESS(AVIC_BASE_ADDR)); 101 mxc_init_irq(IO_ADDRESS(AVIC_BASE_ADDR));
103} 102}
104 103
diff --git a/arch/arm/mach-mx3/mx31lilly-db.c b/arch/arm/mach-mx3/mx31lilly-db.c
index 7aebd74a12e8..827fd3c80201 100644
--- a/arch/arm/mach-mx3/mx31lilly-db.c
+++ b/arch/arm/mach-mx3/mx31lilly-db.c
@@ -18,10 +18,6 @@
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of 18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details. 20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, write to the Free Software
24 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
25 */ 21 */
26 22
27#include <linux/kernel.h> 23#include <linux/kernel.h>
@@ -36,13 +32,13 @@
36 32
37#include <mach/hardware.h> 33#include <mach/hardware.h>
38#include <mach/common.h> 34#include <mach/common.h>
39#include <mach/imx-uart.h>
40#include <mach/iomux-mx3.h> 35#include <mach/iomux-mx3.h>
41#include <mach/board-mx31lilly.h> 36#include <mach/board-mx31lilly.h>
42#include <mach/mmc.h> 37#include <mach/mmc.h>
43#include <mach/mx3fb.h> 38#include <mach/mx3fb.h>
44#include <mach/ipu.h> 39#include <mach/ipu.h>
45 40
41#include "devices-imx31.h"
46#include "devices.h" 42#include "devices.h"
47 43
48/* 44/*
@@ -96,7 +92,7 @@ static unsigned int lilly_db_board_pins[] __initdata = {
96}; 92};
97 93
98/* UART */ 94/* UART */
99static struct imxuart_platform_data uart_pdata __initdata = { 95static const struct imxuart_platform_data uart_pdata __initconst = {
100 .flags = IMXUART_HAVE_RTSCTS, 96 .flags = IMXUART_HAVE_RTSCTS,
101}; 97};
102 98
@@ -217,9 +213,9 @@ void __init mx31lilly_db_init(void)
217 mxc_iomux_setup_multiple_pins(lilly_db_board_pins, 213 mxc_iomux_setup_multiple_pins(lilly_db_board_pins,
218 ARRAY_SIZE(lilly_db_board_pins), 214 ARRAY_SIZE(lilly_db_board_pins),
219 "development board pins"); 215 "development board pins");
220 mxc_register_device(&mxc_uart_device0, &uart_pdata); 216 imx31_add_imx_uart0(&uart_pdata);
221 mxc_register_device(&mxc_uart_device1, &uart_pdata); 217 imx31_add_imx_uart1(&uart_pdata);
222 mxc_register_device(&mxc_uart_device2, &uart_pdata); 218 imx31_add_imx_uart2(&uart_pdata);
223 mxc_register_device(&mxcsdhc_device0, &mmc_pdata); 219 mxc_register_device(&mxcsdhc_device0, &mmc_pdata);
224 mx31lilly_init_fb(); 220 mx31lilly_init_fb();
225} 221}
diff --git a/arch/arm/mach-mx3/mx31lite-db.c b/arch/arm/mach-mx3/mx31lite-db.c
index 5f05bfbec380..7b0e74e275ba 100644
--- a/arch/arm/mach-mx3/mx31lite-db.c
+++ b/arch/arm/mach-mx3/mx31lite-db.c
@@ -18,10 +18,6 @@
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of 18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details. 20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, write to the Free Software
24 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
25 */ 21 */
26 22
27#include <linux/kernel.h> 23#include <linux/kernel.h>
@@ -37,12 +33,11 @@
37 33
38#include <mach/hardware.h> 34#include <mach/hardware.h>
39#include <mach/common.h> 35#include <mach/common.h>
40#include <mach/imx-uart.h>
41#include <mach/iomux-mx3.h> 36#include <mach/iomux-mx3.h>
42#include <mach/board-mx31lite.h> 37#include <mach/board-mx31lite.h>
43#include <mach/mmc.h> 38#include <mach/mmc.h>
44#include <mach/spi.h>
45 39
40#include "devices-imx31.h"
46#include "devices.h" 41#include "devices.h"
47 42
48/* 43/*
@@ -76,7 +71,7 @@ static unsigned int litekit_db_board_pins[] __initdata = {
76}; 71};
77 72
78/* UART */ 73/* UART */
79static struct imxuart_platform_data uart_pdata __initdata = { 74static const struct imxuart_platform_data uart_pdata __initconst = {
80 .flags = IMXUART_HAVE_RTSCTS, 75 .flags = IMXUART_HAVE_RTSCTS,
81}; 76};
82 77
@@ -161,7 +156,7 @@ static int spi_internal_chipselect[] = {
161 MXC_SPI_CS(2), 156 MXC_SPI_CS(2),
162}; 157};
163 158
164static struct spi_imx_master spi0_pdata = { 159static const struct spi_imx_master spi0_pdata __initconst = {
165 .chipselect = spi_internal_chipselect, 160 .chipselect = spi_internal_chipselect,
166 .num_chipselect = ARRAY_SIZE(spi_internal_chipselect), 161 .num_chipselect = ARRAY_SIZE(spi_internal_chipselect),
167}; 162};
@@ -201,9 +196,9 @@ void __init mx31lite_db_init(void)
201 mxc_iomux_setup_multiple_pins(litekit_db_board_pins, 196 mxc_iomux_setup_multiple_pins(litekit_db_board_pins,
202 ARRAY_SIZE(litekit_db_board_pins), 197 ARRAY_SIZE(litekit_db_board_pins),
203 "development board pins"); 198 "development board pins");
204 mxc_register_device(&mxc_uart_device0, &uart_pdata); 199 imx31_add_imx_uart0(&uart_pdata);
205 mxc_register_device(&mxcsdhc_device0, &mmc_pdata); 200 mxc_register_device(&mxcsdhc_device0, &mmc_pdata);
206 mxc_register_device(&mxc_spi_device0, &spi0_pdata); 201 imx31_add_spi_imx0(&spi0_pdata);
207 platform_device_register(&litekit_led_device); 202 platform_device_register(&litekit_led_device);
208 mxc_register_device(&imx_wdt_device0, NULL); 203 mxc_register_device(&imx_wdt_device0, NULL);
209 mxc_register_device(&imx_rtc_device0, NULL); 204 mxc_register_device(&imx_rtc_device0, NULL);
diff --git a/arch/arm/mach-mx3/mx31moboard-devboard.c b/arch/arm/mach-mx3/mx31moboard-devboard.c
index 582299cb2c08..fc395a7a8599 100644
--- a/arch/arm/mach-mx3/mx31moboard-devboard.c
+++ b/arch/arm/mach-mx3/mx31moboard-devboard.c
@@ -10,10 +10,6 @@
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details. 12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 */ 13 */
18 14
19#include <linux/gpio.h> 15#include <linux/gpio.h>
@@ -27,13 +23,13 @@
27#include <linux/usb/otg.h> 23#include <linux/usb/otg.h>
28 24
29#include <mach/common.h> 25#include <mach/common.h>
30#include <mach/imx-uart.h>
31#include <mach/iomux-mx3.h> 26#include <mach/iomux-mx3.h>
32#include <mach/hardware.h> 27#include <mach/hardware.h>
33#include <mach/mmc.h> 28#include <mach/mmc.h>
34#include <mach/mxc_ehci.h> 29#include <mach/mxc_ehci.h>
35#include <mach/ulpi.h> 30#include <mach/ulpi.h>
36 31
32#include "devices-imx31.h"
37#include "devices.h" 33#include "devices.h"
38 34
39static unsigned int devboard_pins[] = { 35static unsigned int devboard_pins[] = {
@@ -56,7 +52,7 @@ static unsigned int devboard_pins[] = {
56 MX31_PIN_RI_DCE1__GPIO2_10, MX31_PIN_DCD_DCE1__GPIO2_11, 52 MX31_PIN_RI_DCE1__GPIO2_10, MX31_PIN_DCD_DCE1__GPIO2_11,
57}; 53};
58 54
59static struct imxuart_platform_data uart_pdata = { 55static const struct imxuart_platform_data uart_pdata __initconst = {
60 .flags = IMXUART_HAVE_RTSCTS, 56 .flags = IMXUART_HAVE_RTSCTS,
61}; 57};
62 58
@@ -230,7 +226,7 @@ void __init mx31moboard_devboard_init(void)
230 mxc_iomux_setup_multiple_pins(devboard_pins, ARRAY_SIZE(devboard_pins), 226 mxc_iomux_setup_multiple_pins(devboard_pins, ARRAY_SIZE(devboard_pins),
231 "devboard"); 227 "devboard");
232 228
233 mxc_register_device(&mxc_uart_device1, &uart_pdata); 229 imx31_add_imx_uart1(&uart_pdata);
234 230
235 mxc_register_device(&mxcsdhc_device1, &sdhc2_pdata); 231 mxc_register_device(&mxcsdhc_device1, &sdhc2_pdata);
236 232
diff --git a/arch/arm/mach-mx3/mx31moboard-marxbot.c b/arch/arm/mach-mx3/mx31moboard-marxbot.c
index 4930f8c27e66..0551eb39d97e 100644
--- a/arch/arm/mach-mx3/mx31moboard-marxbot.c
+++ b/arch/arm/mach-mx3/mx31moboard-marxbot.c
@@ -10,10 +10,6 @@
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details. 12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 */ 13 */
18 14
19#include <linux/delay.h> 15#include <linux/delay.h>
diff --git a/arch/arm/mach-mx3/mx31moboard-smartbot.c b/arch/arm/mach-mx3/mx31moboard-smartbot.c
index 293eea6d9d97..40c3e7564cb6 100644
--- a/arch/arm/mach-mx3/mx31moboard-smartbot.c
+++ b/arch/arm/mach-mx3/mx31moboard-smartbot.c
@@ -10,10 +10,6 @@
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details. 12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 */ 13 */
18 14
19#include <linux/delay.h> 15#include <linux/delay.h>
@@ -30,7 +26,6 @@
30 26
31#include <mach/common.h> 27#include <mach/common.h>
32#include <mach/hardware.h> 28#include <mach/hardware.h>
33#include <mach/imx-uart.h>
34#include <mach/iomux-mx3.h> 29#include <mach/iomux-mx3.h>
35#include <mach/board-mx31moboard.h> 30#include <mach/board-mx31moboard.h>
36#include <mach/mxc_ehci.h> 31#include <mach/mxc_ehci.h>
@@ -38,6 +33,7 @@
38 33
39#include <media/soc_camera.h> 34#include <media/soc_camera.h>
40 35
36#include "devices-imx31.h"
41#include "devices.h" 37#include "devices.h"
42 38
43static unsigned int smartbot_pins[] = { 39static unsigned int smartbot_pins[] = {
@@ -59,7 +55,7 @@ static unsigned int smartbot_pins[] = {
59 MX31_PIN_RI_DCE1__GPIO2_10, MX31_PIN_DCD_DCE1__GPIO2_11, 55 MX31_PIN_RI_DCE1__GPIO2_10, MX31_PIN_DCD_DCE1__GPIO2_11,
60}; 56};
61 57
62static struct imxuart_platform_data uart_pdata = { 58static const struct imxuart_platform_data uart_pdata __initconst = {
63 .flags = IMXUART_HAVE_RTSCTS, 59 .flags = IMXUART_HAVE_RTSCTS,
64}; 60};
65 61
@@ -183,8 +179,7 @@ void __init mx31moboard_smartbot_init(int board)
183 mxc_iomux_setup_multiple_pins(smartbot_pins, ARRAY_SIZE(smartbot_pins), 179 mxc_iomux_setup_multiple_pins(smartbot_pins, ARRAY_SIZE(smartbot_pins),
184 "smartbot"); 180 "smartbot");
185 181
186 mxc_register_device(&mxc_uart_device1, &uart_pdata); 182 imx31_add_imx_uart1(&uart_pdata);
187
188 183
189 switch (board) { 184 switch (board) {
190 case MX31SMARTBOT: 185 case MX31SMARTBOT:
diff --git a/arch/arm/mach-mx5/devices.c b/arch/arm/mach-mx5/devices.c
index ede4fcbc7e80..fa118646c99e 100644
--- a/arch/arm/mach-mx5/devices.c
+++ b/arch/arm/mach-mx5/devices.c
@@ -253,7 +253,7 @@ static struct mxc_gpio_port mxc_gpio_ports[] = {
253 }, 253 },
254}; 254};
255 255
256int __init mxc_register_gpios(void) 256int __init imx51_register_gpios(void)
257{ 257{
258 return mxc_gpio_init(mxc_gpio_ports, ARRAY_SIZE(mxc_gpio_ports)); 258 return mxc_gpio_init(mxc_gpio_ports, ARRAY_SIZE(mxc_gpio_ports));
259} 259}
diff --git a/arch/arm/mach-mx5/mm.c b/arch/arm/mach-mx5/mm.c
index b7677ef80cc4..2f79722508cf 100644
--- a/arch/arm/mach-mx5/mm.c
+++ b/arch/arm/mach-mx5/mm.c
@@ -65,6 +65,8 @@ void __init mx51_map_io(void)
65 iotable_init(mxc_io_desc, ARRAY_SIZE(mxc_io_desc)); 65 iotable_init(mxc_io_desc, ARRAY_SIZE(mxc_io_desc));
66} 66}
67 67
68int imx51_register_gpios(void);
69
68void __init mx51_init_irq(void) 70void __init mx51_init_irq(void)
69{ 71{
70 unsigned long tzic_addr; 72 unsigned long tzic_addr;
@@ -79,5 +81,6 @@ void __init mx51_init_irq(void)
79 if (!tzic_virt) 81 if (!tzic_virt)
80 panic("unable to map TZIC interrupt controller\n"); 82 panic("unable to map TZIC interrupt controller\n");
81 83
84 imx51_register_gpios();
82 tzic_init_irq(tzic_virt); 85 tzic_init_irq(tzic_virt);
83} 86}
diff --git a/arch/arm/mach-mxc91231/crm_regs.h b/arch/arm/mach-mxc91231/crm_regs.h
index ce4f59058189..b989baccd675 100644
--- a/arch/arm/mach-mxc91231/crm_regs.h
+++ b/arch/arm/mach-mxc91231/crm_regs.h
@@ -11,11 +11,6 @@
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details. 13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 *
19 */ 14 */
20 15
21#ifndef _ARCH_ARM_MACH_MXC91231_CRM_REGS_H_ 16#ifndef _ARCH_ARM_MACH_MXC91231_CRM_REGS_H_
diff --git a/arch/arm/mach-mxc91231/devices.c b/arch/arm/mach-mxc91231/devices.c
index 353bd977b393..027af4f0d18a 100644
--- a/arch/arm/mach-mxc91231/devices.c
+++ b/arch/arm/mach-mxc91231/devices.c
@@ -135,7 +135,7 @@ static struct mxc_gpio_port mxc_gpio_ports[] = {
135 }, 135 },
136}; 136};
137 137
138int __init mxc_register_gpios(void) 138int __init mxc91231_register_gpios(void)
139{ 139{
140 return mxc_gpio_init(mxc_gpio_ports, ARRAY_SIZE(mxc_gpio_ports)); 140 return mxc_gpio_init(mxc_gpio_ports, ARRAY_SIZE(mxc_gpio_ports));
141} 141}
diff --git a/arch/arm/mach-mxc91231/mm.c b/arch/arm/mach-mxc91231/mm.c
index 6becda3ff331..aeccfd755fee 100644
--- a/arch/arm/mach-mxc91231/mm.c
+++ b/arch/arm/mach-mxc91231/mm.c
@@ -15,11 +15,6 @@
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details. 17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 *
23 */ 18 */
24 19
25#include <linux/mm.h> 20#include <linux/mm.h>
@@ -88,7 +83,10 @@ void __init mxc91231_map_io(void)
88 iotable_init(mxc_io_desc, ARRAY_SIZE(mxc_io_desc)); 83 iotable_init(mxc_io_desc, ARRAY_SIZE(mxc_io_desc));
89} 84}
90 85
86int mxc91231_register_gpios(void);
87
91void __init mxc91231_init_irq(void) 88void __init mxc91231_init_irq(void)
92{ 89{
90 mxc91231_register_gpios();
93 mxc_init_irq(MXC91231_IO_ADDRESS(MXC91231_AVIC_BASE_ADDR)); 91 mxc_init_irq(MXC91231_IO_ADDRESS(MXC91231_AVIC_BASE_ADDR));
94} 92}
diff --git a/arch/arm/mach-omap2/board-3430sdp.c b/arch/arm/mach-omap2/board-3430sdp.c
index e7d629b3c76a..f474a80b8867 100644
--- a/arch/arm/mach-omap2/board-3430sdp.c
+++ b/arch/arm/mach-omap2/board-3430sdp.c
@@ -137,9 +137,7 @@ static void ads7846_dev_init(void)
137 } 137 }
138 138
139 gpio_direction_input(ts_gpio); 139 gpio_direction_input(ts_gpio);
140 140 gpio_set_debounce(ts_gpio, 310);
141 omap_set_gpio_debounce(ts_gpio, 1);
142 omap_set_gpio_debounce_time(ts_gpio, 0xa);
143} 141}
144 142
145static int ads7846_get_pendown_state(void) 143static int ads7846_get_pendown_state(void)
diff --git a/arch/arm/mach-omap2/board-ldp.c b/arch/arm/mach-omap2/board-ldp.c
index 5fcb52e71298..fefd7e6e9779 100644
--- a/arch/arm/mach-omap2/board-ldp.c
+++ b/arch/arm/mach-omap2/board-ldp.c
@@ -209,8 +209,7 @@ static void ads7846_dev_init(void)
209 } 209 }
210 210
211 gpio_direction_input(ts_gpio); 211 gpio_direction_input(ts_gpio);
212 omap_set_gpio_debounce(ts_gpio, 1); 212 gpio_set_debounce(ts_gpio, 310);
213 omap_set_gpio_debounce_time(ts_gpio, 0xa);
214} 213}
215 214
216static int ads7846_get_pendown_state(void) 215static int ads7846_get_pendown_state(void)
diff --git a/arch/arm/mach-omap2/board-omap3evm.c b/arch/arm/mach-omap2/board-omap3evm.c
index 81bba194b030..b95261013812 100644
--- a/arch/arm/mach-omap2/board-omap3evm.c
+++ b/arch/arm/mach-omap2/board-omap3evm.c
@@ -579,9 +579,7 @@ static void ads7846_dev_init(void)
579 printk(KERN_ERR "can't get ads7846 pen down GPIO\n"); 579 printk(KERN_ERR "can't get ads7846 pen down GPIO\n");
580 580
581 gpio_direction_input(OMAP3_EVM_TS_GPIO); 581 gpio_direction_input(OMAP3_EVM_TS_GPIO);
582 582 gpio_set_debounce(OMAP3_EVM_TS_GPIO, 310);
583 omap_set_gpio_debounce(OMAP3_EVM_TS_GPIO, 1);
584 omap_set_gpio_debounce_time(OMAP3_EVM_TS_GPIO, 0xa);
585} 583}
586 584
587static int ads7846_get_pendown_state(void) 585static int ads7846_get_pendown_state(void)
diff --git a/arch/arm/mach-omap2/board-omap3pandora.c b/arch/arm/mach-omap2/board-omap3pandora.c
index 395d049bf010..db06dc910ba7 100644
--- a/arch/arm/mach-omap2/board-omap3pandora.c
+++ b/arch/arm/mach-omap2/board-omap3pandora.c
@@ -130,8 +130,8 @@ static struct platform_device pandora_keys_gpio = {
130static void __init pandora_keys_gpio_init(void) 130static void __init pandora_keys_gpio_init(void)
131{ 131{
132 /* set debounce time for GPIO banks 4 and 6 */ 132 /* set debounce time for GPIO banks 4 and 6 */
133 omap_set_gpio_debounce_time(32 * 3, GPIO_DEBOUNCE_TIME); 133 gpio_set_debounce(32 * 3, GPIO_DEBOUNCE_TIME);
134 omap_set_gpio_debounce_time(32 * 5, GPIO_DEBOUNCE_TIME); 134 gpio_set_debounce(32 * 5, GPIO_DEBOUNCE_TIME);
135} 135}
136 136
137static int board_keymap[] = { 137static int board_keymap[] = {
diff --git a/arch/arm/mach-omap2/board-omap3touchbook.c b/arch/arm/mach-omap2/board-omap3touchbook.c
index 2504d41f923e..2f5f8233dd5b 100644
--- a/arch/arm/mach-omap2/board-omap3touchbook.c
+++ b/arch/arm/mach-omap2/board-omap3touchbook.c
@@ -328,8 +328,7 @@ static void __init omap3_ads7846_init(void)
328 } 328 }
329 329
330 gpio_direction_input(OMAP3_TS_GPIO); 330 gpio_direction_input(OMAP3_TS_GPIO);
331 omap_set_gpio_debounce(OMAP3_TS_GPIO, 1); 331 gpio_set_debounce(OMAP3_TS_GPIO, 310);
332 omap_set_gpio_debounce_time(OMAP3_TS_GPIO, 0xa);
333} 332}
334 333
335static struct ads7846_platform_data ads7846_config = { 334static struct ads7846_platform_data ads7846_config = {
diff --git a/arch/arm/mach-orion5x/dns323-setup.c b/arch/arm/mach-orion5x/dns323-setup.c
index 685f34a9634b..fe0de1698edc 100644
--- a/arch/arm/mach-orion5x/dns323-setup.c
+++ b/arch/arm/mach-orion5x/dns323-setup.c
@@ -240,22 +240,23 @@ error_fail:
240 240
241#define ORION_BLINK_HALF_PERIOD 100 /* ms */ 241#define ORION_BLINK_HALF_PERIOD 100 /* ms */
242 242
243static int dns323_gpio_blink_set(unsigned gpio, 243static int dns323_gpio_blink_set(unsigned gpio, int state,
244 unsigned long *delay_on, unsigned long *delay_off) 244 unsigned long *delay_on, unsigned long *delay_off)
245{ 245{
246 static int value = 0;
247 246
248 if (!*delay_on && !*delay_off) 247 if (delay_on && delay_off && !*delay_on && !*delay_off)
249 *delay_on = *delay_off = ORION_BLINK_HALF_PERIOD; 248 *delay_on = *delay_off = ORION_BLINK_HALF_PERIOD;
250 249
251 if (ORION_BLINK_HALF_PERIOD == *delay_on 250 switch(state) {
252 && ORION_BLINK_HALF_PERIOD == *delay_off) { 251 case GPIO_LED_NO_BLINK_LOW:
253 value = !value; 252 case GPIO_LED_NO_BLINK_HIGH:
254 orion_gpio_set_blink(gpio, value); 253 orion_gpio_set_blink(gpio, 0);
255 return 0; 254 gpio_set_value(gpio, state);
255 break;
256 case GPIO_LED_BLINK:
257 orion_gpio_set_blink(gpio, 1);
256 } 258 }
257 259 return 0;
258 return -EINVAL;
259} 260}
260 261
261static struct gpio_led dns323_leds[] = { 262static struct gpio_led dns323_leds[] = {
@@ -263,6 +264,7 @@ static struct gpio_led dns323_leds[] = {
263 .name = "power:blue", 264 .name = "power:blue",
264 .gpio = DNS323_GPIO_LED_POWER2, 265 .gpio = DNS323_GPIO_LED_POWER2,
265 .default_trigger = "timer", 266 .default_trigger = "timer",
267 .active_low = 1,
266 }, { 268 }, {
267 .name = "right:amber", 269 .name = "right:amber",
268 .gpio = DNS323_GPIO_LED_RIGHT_AMBER, 270 .gpio = DNS323_GPIO_LED_RIGHT_AMBER,
diff --git a/arch/arm/mach-s3c2440/mach-gta02.c b/arch/arm/mach-s3c2440/mach-gta02.c
index 45799c608d8f..9e39faa283b9 100644
--- a/arch/arm/mach-s3c2440/mach-gta02.c
+++ b/arch/arm/mach-s3c2440/mach-gta02.c
@@ -49,7 +49,6 @@
49#include <linux/io.h> 49#include <linux/io.h>
50 50
51#include <linux/i2c.h> 51#include <linux/i2c.h>
52#include <linux/backlight.h>
53#include <linux/regulator/machine.h> 52#include <linux/regulator/machine.h>
54 53
55#include <linux/mfd/pcf50633/core.h> 54#include <linux/mfd/pcf50633/core.h>
@@ -57,6 +56,7 @@
57#include <linux/mfd/pcf50633/adc.h> 56#include <linux/mfd/pcf50633/adc.h>
58#include <linux/mfd/pcf50633/gpio.h> 57#include <linux/mfd/pcf50633/gpio.h>
59#include <linux/mfd/pcf50633/pmic.h> 58#include <linux/mfd/pcf50633/pmic.h>
59#include <linux/mfd/pcf50633/backlight.h>
60 60
61#include <asm/mach/arch.h> 61#include <asm/mach/arch.h>
62#include <asm/mach/map.h> 62#include <asm/mach/map.h>
@@ -254,6 +254,12 @@ static char *gta02_batteries[] = {
254 "battery", 254 "battery",
255}; 255};
256 256
257static struct pcf50633_bl_platform_data gta02_backlight_data = {
258 .default_brightness = 0x3f,
259 .default_brightness_limit = 0,
260 .ramp_time = 5,
261};
262
257struct pcf50633_platform_data gta02_pcf_pdata = { 263struct pcf50633_platform_data gta02_pcf_pdata = {
258 .resumers = { 264 .resumers = {
259 [0] = PCF50633_INT1_USBINS | 265 [0] = PCF50633_INT1_USBINS |
@@ -271,6 +277,8 @@ struct pcf50633_platform_data gta02_pcf_pdata = {
271 277
272 .charger_reference_current_ma = 1000, 278 .charger_reference_current_ma = 1000,
273 279
280 .backlight_data = &gta02_backlight_data,
281
274 .reg_init_data = { 282 .reg_init_data = {
275 [PCF50633_REGULATOR_AUTO] = { 283 [PCF50633_REGULATOR_AUTO] = {
276 .constraints = { 284 .constraints = {
@@ -478,71 +486,6 @@ static struct s3c2410_udc_mach_info gta02_udc_cfg = {
478 486
479}; 487};
480 488
481
482
483static void gta02_bl_set_intensity(int intensity)
484{
485 struct pcf50633 *pcf = gta02_pcf;
486 int old_intensity = pcf50633_reg_read(pcf, PCF50633_REG_LEDOUT);
487
488 /* We map 8-bit intensity to 6-bit intensity in hardware. */
489 intensity >>= 2;
490
491 /*
492 * This can happen during, eg, print of panic on blanked console,
493 * but we can't service i2c without interrupts active, so abort.
494 */
495 if (in_atomic()) {
496 printk(KERN_ERR "gta02_bl_set_intensity called while atomic\n");
497 return;
498 }
499
500 old_intensity = pcf50633_reg_read(pcf, PCF50633_REG_LEDOUT);
501 if (intensity == old_intensity)
502 return;
503
504 /* We can't do this anywhere else. */
505 pcf50633_reg_write(pcf, PCF50633_REG_LEDDIM, 5);
506
507 if (!(pcf50633_reg_read(pcf, PCF50633_REG_LEDENA) & 3))
508 old_intensity = 0;
509
510 /*
511 * The PCF50633 cannot handle LEDOUT = 0 (datasheet p60)
512 * if seen, you have to re-enable the LED unit.
513 */
514 if (!intensity || !old_intensity)
515 pcf50633_reg_write(pcf, PCF50633_REG_LEDENA, 0);
516
517 /* Illegal to set LEDOUT to 0. */
518 if (!intensity)
519 pcf50633_reg_set_bit_mask(pcf, PCF50633_REG_LEDOUT, 0x3f, 2);
520 else
521 pcf50633_reg_set_bit_mask(pcf, PCF50633_REG_LEDOUT, 0x3f,
522 intensity);
523
524 if (intensity)
525 pcf50633_reg_write(pcf, PCF50633_REG_LEDENA, 2);
526
527}
528
529static struct generic_bl_info gta02_bl_info = {
530 .name = "gta02-bl",
531 .max_intensity = 0xff,
532 .default_intensity = 0xff,
533 .set_bl_intensity = gta02_bl_set_intensity,
534};
535
536static struct platform_device gta02_bl_dev = {
537 .name = "generic-bl",
538 .id = 1,
539 .dev = {
540 .platform_data = &gta02_bl_info,
541 },
542};
543
544
545
546/* USB */ 489/* USB */
547static struct s3c2410_hcd_info gta02_usb_info __initdata = { 490static struct s3c2410_hcd_info gta02_usb_info __initdata = {
548 .port[0] = { 491 .port[0] = {
@@ -579,7 +522,6 @@ static struct platform_device *gta02_devices[] __initdata = {
579/* These guys DO need to be children of PMU. */ 522/* These guys DO need to be children of PMU. */
580 523
581static struct platform_device *gta02_devices_pmu_children[] = { 524static struct platform_device *gta02_devices_pmu_children[] = {
582 &gta02_bl_dev,
583}; 525};
584 526
585 527
diff --git a/arch/arm/mach-s3c64xx/clock.c b/arch/arm/mach-s3c64xx/clock.c
index 7a4138beb665..fbd85a9b7bbf 100644
--- a/arch/arm/mach-s3c64xx/clock.c
+++ b/arch/arm/mach-s3c64xx/clock.c
@@ -259,6 +259,12 @@ static struct clk init_clocks[] = {
259 .enable = s3c64xx_hclk_ctrl, 259 .enable = s3c64xx_hclk_ctrl,
260 .ctrlbit = S3C_CLKCON_HCLK_HSMMC2, 260 .ctrlbit = S3C_CLKCON_HCLK_HSMMC2,
261 }, { 261 }, {
262 .name = "otg",
263 .id = -1,
264 .parent = &clk_h,
265 .enable = s3c64xx_hclk_ctrl,
266 .ctrlbit = S3C_CLKCON_HCLK_USB,
267 }, {
262 .name = "timers", 268 .name = "timers",
263 .id = -1, 269 .id = -1,
264 .parent = &clk_p, 270 .parent = &clk_p,
diff --git a/arch/arm/mach-s5p6440/include/mach/irqs.h b/arch/arm/mach-s5p6440/include/mach/irqs.h
index a4b9b40d18f2..911854d9ad42 100644
--- a/arch/arm/mach-s5p6440/include/mach/irqs.h
+++ b/arch/arm/mach-s5p6440/include/mach/irqs.h
@@ -72,7 +72,14 @@
72#define S5P_IRQ_EINT_BASE (S5P_IRQ_VIC1(31) + 6) 72#define S5P_IRQ_EINT_BASE (S5P_IRQ_VIC1(31) + 6)
73 73
74#define S5P_EINT(x) ((x) + S5P_IRQ_EINT_BASE) 74#define S5P_EINT(x) ((x) + S5P_IRQ_EINT_BASE)
75#define IRQ_EINT(x) S5P_EINT(x) 75
76#define S5P_EINT_BASE1 (S5P_IRQ_EINT_BASE)
77/*
78 * S5P6440 has 0-15 external interrupts in group 0. Only these can be used
79 * to wake up from sleep. If request is beyond this range, by mistake, a large
80 * return value for an irq number should be indication of something amiss.
81 */
82#define S5P_EINT_BASE2 (0xf0000000)
76 83
77/* 84/*
78 * Next the external interrupt groups. These are similar to the IRQ_EINT(x) 85 * Next the external interrupt groups. These are similar to the IRQ_EINT(x)
diff --git a/arch/arm/mach-s5p6442/include/mach/irqs.h b/arch/arm/mach-s5p6442/include/mach/irqs.h
index da665809f6e4..02c23749c023 100644
--- a/arch/arm/mach-s5p6442/include/mach/irqs.h
+++ b/arch/arm/mach-s5p6442/include/mach/irqs.h
@@ -77,8 +77,9 @@
77 77
78#define S5P_IRQ_EINT_BASE (IRQ_VIC_END + 1) 78#define S5P_IRQ_EINT_BASE (IRQ_VIC_END + 1)
79 79
80#define IRQ_EINT(x) ((x) < 16 ? S5P_IRQ_VIC0(x) : \ 80#define S5P_EINT_BASE1 (S5P_IRQ_VIC0(0))
81 (S5P_IRQ_EINT_BASE + (x)-16)) 81#define S5P_EINT_BASE2 (S5P_IRQ_EINT_BASE)
82
82/* Set the default NR_IRQS */ 83/* Set the default NR_IRQS */
83 84
84#define NR_IRQS (IRQ_EINT(31) + 1) 85#define NR_IRQS (IRQ_EINT(31) + 1)
diff --git a/arch/arm/mach-s5pc100/include/mach/irqs.h b/arch/arm/mach-s5pc100/include/mach/irqs.h
index 15066df3ced9..28aa551dc3a8 100644
--- a/arch/arm/mach-s5pc100/include/mach/irqs.h
+++ b/arch/arm/mach-s5pc100/include/mach/irqs.h
@@ -100,9 +100,6 @@
100#define S5P_EINT_BASE1 (S5P_IRQ_VIC0(0)) 100#define S5P_EINT_BASE1 (S5P_IRQ_VIC0(0))
101#define S5P_EINT_BASE2 (IRQ_VIC_END + 1) 101#define S5P_EINT_BASE2 (IRQ_VIC_END + 1)
102 102
103#define IRQ_EINT(x) ((x) < 16 ? S5P_IRQ_VIC0(x) : \
104 (S5P_EINT_BASE2 + (x) - 16))
105
106#define S3C_IRQ_GPIO_BASE (IRQ_EINT(31) + 1) 103#define S3C_IRQ_GPIO_BASE (IRQ_EINT(31) + 1)
107#define S3C_IRQ_GPIO(x) (S3C_IRQ_GPIO_BASE + (x)) 104#define S3C_IRQ_GPIO(x) (S3C_IRQ_GPIO_BASE + (x))
108 105
diff --git a/arch/arm/mach-s5pc100/include/mach/regs-gpio.h b/arch/arm/mach-s5pc100/include/mach/regs-gpio.h
index 763edebdd577..dd6295e1251d 100644
--- a/arch/arm/mach-s5pc100/include/mach/regs-gpio.h
+++ b/arch/arm/mach-s5pc100/include/mach/regs-gpio.h
@@ -60,12 +60,9 @@
60#define S5PC100EINT30PEND (S5P_VA_GPIO + 0xF40) 60#define S5PC100EINT30PEND (S5P_VA_GPIO + 0xF40)
61#define S5P_EINT_PEND(x) (S5PC100EINT30PEND + ((x) * 0x4)) 61#define S5P_EINT_PEND(x) (S5PC100EINT30PEND + ((x) * 0x4))
62 62
63#define eint_offset(irq) ((irq) < IRQ_EINT16_31 ? ((irq) - IRQ_EINT(0)) : \ 63#define EINT_REG_NR(x) (EINT_OFFSET(x) >> 3)
64 (((irq) - S5P_EINT_BASE2)))
65 64
66#define EINT_REG_NR(x) (eint_offset(x) >> 3) 65#define eint_irq_to_bit(irq) (1 << (EINT_OFFSET(irq) & 0x7))
67
68#define eint_irq_to_bit(irq) (1 << (eint_offset(irq) & 0x7))
69 66
70/* values for S5P_EXTINT0 */ 67/* values for S5P_EXTINT0 */
71#define S5P_EXTINT_LOWLEV (0x00) 68#define S5P_EXTINT_LOWLEV (0x00)
diff --git a/arch/arm/mach-s5pv210/include/mach/irqs.h b/arch/arm/mach-s5pv210/include/mach/irqs.h
index 92fc6c7fc064..96895378ea27 100644
--- a/arch/arm/mach-s5pv210/include/mach/irqs.h
+++ b/arch/arm/mach-s5pv210/include/mach/irqs.h
@@ -118,22 +118,12 @@
118#define IRQ_MDNIE3 S5P_IRQ_VIC3(8) 118#define IRQ_MDNIE3 S5P_IRQ_VIC3(8)
119#define IRQ_VIC_END S5P_IRQ_VIC3(31) 119#define IRQ_VIC_END S5P_IRQ_VIC3(31)
120 120
121#define S5P_EINT_16_31_BASE (IRQ_VIC_END + 1) 121#define S5P_EINT_BASE1 (S5P_IRQ_VIC0(0))
122 122#define S5P_EINT_BASE2 (IRQ_VIC_END + 1)
123#define EINT_MODE S3C_GPIO_SFN(0xf)
124
125#define IRQ_EINT(x) ((x) < 16 ? ((x) + S5P_IRQ_VIC0(0)) \
126 : ((x) + S5P_EINT_16_31_BASE))
127 123
128/* Set the default NR_IRQS */ 124/* Set the default NR_IRQS */
129
130#define NR_IRQS (IRQ_EINT(31) + 1) 125#define NR_IRQS (IRQ_EINT(31) + 1)
131 126
132#define EINT_GPIO_0(x) S5PV210_GPH0(x)
133#define EINT_GPIO_1(x) S5PV210_GPH1(x)
134#define EINT_GPIO_2(x) S5PV210_GPH2(x)
135#define EINT_GPIO_3(x) S5PV210_GPH3(x)
136
137/* Compatibility */ 127/* Compatibility */
138#define IRQ_LCD_FIFO IRQ_LCD0 128#define IRQ_LCD_FIFO IRQ_LCD0
139#define IRQ_LCD_VSYNC IRQ_LCD1 129#define IRQ_LCD_VSYNC IRQ_LCD1
diff --git a/arch/arm/mach-s5pv210/include/mach/regs-gpio.h b/arch/arm/mach-s5pv210/include/mach/regs-gpio.h
index 6d068091c36c..49e029b4978a 100644
--- a/arch/arm/mach-s5pv210/include/mach/regs-gpio.h
+++ b/arch/arm/mach-s5pv210/include/mach/regs-gpio.h
@@ -27,12 +27,9 @@
27#define S5PV210_EINT30PEND (S5P_VA_GPIO + 0xF40) 27#define S5PV210_EINT30PEND (S5P_VA_GPIO + 0xF40)
28#define S5P_EINT_PEND(x) (S5PV210_EINT30PEND + ((x) * 0x4)) 28#define S5P_EINT_PEND(x) (S5PV210_EINT30PEND + ((x) * 0x4))
29 29
30#define eint_offset(irq) ((irq) < IRQ_EINT16_31 ? ((irq) - IRQ_EINT(0)) \ 30#define EINT_REG_NR(x) (EINT_OFFSET(x) >> 3)
31 : ((irq) - S5P_EINT_16_31_BASE))
32 31
33#define EINT_REG_NR(x) (eint_offset(x) >> 3) 32#define eint_irq_to_bit(irq) (1 << (EINT_OFFSET(irq) & 0x7))
34
35#define eint_irq_to_bit(irq) (1 << (eint_offset(irq) & 0x7))
36 33
37/* values for S5P_EXTINT0 */ 34/* values for S5P_EXTINT0 */
38#define S5P_EXTINT_LOWLEV (0x00) 35#define S5P_EXTINT_LOWLEV (0x00)
@@ -41,4 +38,11 @@
41#define S5P_EXTINT_RISEEDGE (0x03) 38#define S5P_EXTINT_RISEEDGE (0x03)
42#define S5P_EXTINT_BOTHEDGE (0x04) 39#define S5P_EXTINT_BOTHEDGE (0x04)
43 40
41#define EINT_MODE S3C_GPIO_SFN(0xf)
42
43#define EINT_GPIO_0(x) S5PV210_GPH0(x)
44#define EINT_GPIO_1(x) S5PV210_GPH1(x)
45#define EINT_GPIO_2(x) S5PV210_GPH2(x)
46#define EINT_GPIO_3(x) S5PV210_GPH3(x)
47
44#endif /* __ASM_ARCH_REGS_GPIO_H */ 48#endif /* __ASM_ARCH_REGS_GPIO_H */
diff --git a/arch/arm/mach-u300/i2c.c b/arch/arm/mach-u300/i2c.c
index c73ed06b6065..f0394baa11fa 100644
--- a/arch/arm/mach-u300/i2c.c
+++ b/arch/arm/mach-u300/i2c.c
@@ -9,7 +9,7 @@
9 */ 9 */
10#include <linux/kernel.h> 10#include <linux/kernel.h>
11#include <linux/i2c.h> 11#include <linux/i2c.h>
12#include <linux/mfd/ab3100.h> 12#include <linux/mfd/abx500.h>
13#include <linux/regulator/machine.h> 13#include <linux/regulator/machine.h>
14#include <linux/amba/bus.h> 14#include <linux/amba/bus.h>
15#include <mach/irqs.h> 15#include <mach/irqs.h>
@@ -46,6 +46,7 @@
46/* BUCK SLEEP 0xAC: 1.05V, Not used, SLEEP_A and B, Not used */ 46/* BUCK SLEEP 0xAC: 1.05V, Not used, SLEEP_A and B, Not used */
47#define BUCK_SLEEP_SETTING 0xAC 47#define BUCK_SLEEP_SETTING 0xAC
48 48
49#ifdef CONFIG_AB3100_CORE
49static struct regulator_consumer_supply supply_ldo_c[] = { 50static struct regulator_consumer_supply supply_ldo_c[] = {
50 { 51 {
51 .dev_name = "ab3100-codec", 52 .dev_name = "ab3100-codec",
@@ -253,14 +254,68 @@ static struct ab3100_platform_data ab3100_plf_data = {
253 LDO_D_SETTING, 254 LDO_D_SETTING,
254 }, 255 },
255}; 256};
257#endif
258
259#ifdef CONFIG_AB3550_CORE
260static struct abx500_init_settings ab3550_init_settings[] = {
261 {
262 .bank = 0,
263 .reg = AB3550_IMR1,
264 .setting = 0xff
265 },
266 {
267 .bank = 0,
268 .reg = AB3550_IMR2,
269 .setting = 0xff
270 },
271 {
272 .bank = 0,
273 .reg = AB3550_IMR3,
274 .setting = 0xff
275 },
276 {
277 .bank = 0,
278 .reg = AB3550_IMR4,
279 .setting = 0xff
280 },
281 {
282 .bank = 0,
283 .reg = AB3550_IMR5,
284 /* The two most significant bits are not used */
285 .setting = 0x3f
286 },
287};
288
289static struct ab3550_platform_data ab3550_plf_data = {
290 .irq = {
291 .base = IRQ_AB3550_BASE,
292 .count = (IRQ_AB3550_END - IRQ_AB3550_BASE + 1),
293 },
294 .dev_data = {
295 },
296 .init_settings = ab3550_init_settings,
297 .init_settings_sz = ARRAY_SIZE(ab3550_init_settings),
298};
299#endif
256 300
257static struct i2c_board_info __initdata bus0_i2c_board_info[] = { 301static struct i2c_board_info __initdata bus0_i2c_board_info[] = {
302#if defined(CONFIG_AB3550_CORE)
303 {
304 .type = "ab3550",
305 .addr = 0x4A,
306 .irq = IRQ_U300_IRQ0_EXT,
307 .platform_data = &ab3550_plf_data,
308 },
309#elif defined(CONFIG_AB3100_CORE)
258 { 310 {
259 .type = "ab3100", 311 .type = "ab3100",
260 .addr = 0x48, 312 .addr = 0x48,
261 .irq = IRQ_U300_IRQ0_EXT, 313 .irq = IRQ_U300_IRQ0_EXT,
262 .platform_data = &ab3100_plf_data, 314 .platform_data = &ab3100_plf_data,
263 }, 315 },
316#else
317 { },
318#endif
264}; 319};
265 320
266static struct i2c_board_info __initdata bus1_i2c_board_info[] = { 321static struct i2c_board_info __initdata bus1_i2c_board_info[] = {
diff --git a/arch/arm/mach-u300/include/mach/irqs.h b/arch/arm/mach-u300/include/mach/irqs.h
index a6867b12773e..09b1b28fa8fd 100644
--- a/arch/arm/mach-u300/include/mach/irqs.h
+++ b/arch/arm/mach-u300/include/mach/irqs.h
@@ -109,6 +109,13 @@
109#define U300_NR_IRQS 48 109#define U300_NR_IRQS 48
110#endif 110#endif
111 111
112#ifdef CONFIG_AB3550_CORE
113#define IRQ_AB3550_BASE (U300_NR_IRQS)
114#define IRQ_AB3550_END (IRQ_AB3550_BASE + 37)
115
116#define NR_IRQS (IRQ_AB3550_END + 1)
117#else
112#define NR_IRQS U300_NR_IRQS 118#define NR_IRQS U300_NR_IRQS
119#endif
113 120
114#endif 121#endif
diff --git a/arch/arm/mach-ux500/board-mop500.c b/arch/arm/mach-ux500/board-mop500.c
index 072196c57263..bb8d7b771817 100644
--- a/arch/arm/mach-ux500/board-mop500.c
+++ b/arch/arm/mach-ux500/board-mop500.c
@@ -50,7 +50,7 @@ struct pl022_config_chip ab4500_chip_info = {
50 50
51static struct spi_board_info u8500_spi_devices[] = { 51static struct spi_board_info u8500_spi_devices[] = {
52 { 52 {
53 .modalias = "ab4500", 53 .modalias = "ab8500",
54 .controller_data = &ab4500_chip_info, 54 .controller_data = &ab4500_chip_info,
55 .max_speed_hz = 12000000, 55 .max_speed_hz = 12000000,
56 .bus_num = 0, 56 .bus_num = 0,
diff --git a/arch/arm/mach-ux500/clock.c b/arch/arm/mach-ux500/clock.c
index 1b2c9890e8b4..6544855af2f1 100644
--- a/arch/arm/mach-ux500/clock.c
+++ b/arch/arm/mach-ux500/clock.c
@@ -411,7 +411,7 @@ static struct clk_lookup u8500_common_clks[] = {
411 CLK(apetraceclk, "apetrace", NULL), 411 CLK(apetraceclk, "apetrace", NULL),
412 CLK(mcdeclk, "mcde", NULL), 412 CLK(mcdeclk, "mcde", NULL),
413 CLK(ipi2clk, "ipi2", NULL), 413 CLK(ipi2clk, "ipi2", NULL),
414 CLK(dmaclk, "dma40", NULL), 414 CLK(dmaclk, "dma40.0", NULL),
415 CLK(b2r2clk, "b2r2", NULL), 415 CLK(b2r2clk, "b2r2", NULL),
416 CLK(tvclk, "tv", NULL), 416 CLK(tvclk, "tv", NULL),
417}; 417};
diff --git a/arch/arm/mach-ux500/cpu-db8500.c b/arch/arm/mach-ux500/cpu-db8500.c
index d04299f3b6b5..f21c444edd99 100644
--- a/arch/arm/mach-ux500/cpu-db8500.c
+++ b/arch/arm/mach-ux500/cpu-db8500.c
@@ -32,6 +32,7 @@ static struct platform_device *platform_devs[] __initdata = {
32 &u8500_gpio_devs[6], 32 &u8500_gpio_devs[6],
33 &u8500_gpio_devs[7], 33 &u8500_gpio_devs[7],
34 &u8500_gpio_devs[8], 34 &u8500_gpio_devs[8],
35 &u8500_dma40_device,
35}; 36};
36 37
37/* minimum static i/o mapping required to boot U8500 platforms */ 38/* minimum static i/o mapping required to boot U8500 platforms */
@@ -71,6 +72,9 @@ void __init u8500_init_devices(void)
71{ 72{
72 ux500_init_devices(); 73 ux500_init_devices();
73 74
75 if (cpu_is_u8500ed())
76 dma40_u8500ed_fixup();
77
74 /* Register the platform devices */ 78 /* Register the platform devices */
75 platform_add_devices(platform_devs, ARRAY_SIZE(platform_devs)); 79 platform_add_devices(platform_devs, ARRAY_SIZE(platform_devs));
76 80
diff --git a/arch/arm/mach-ux500/devices-db8500.c b/arch/arm/mach-ux500/devices-db8500.c
index 20334236afce..822903421943 100644
--- a/arch/arm/mach-ux500/devices-db8500.c
+++ b/arch/arm/mach-ux500/devices-db8500.c
@@ -12,9 +12,13 @@
12#include <linux/gpio.h> 12#include <linux/gpio.h>
13#include <linux/amba/bus.h> 13#include <linux/amba/bus.h>
14 14
15#include <plat/ste_dma40.h>
16
15#include <mach/hardware.h> 17#include <mach/hardware.h>
16#include <mach/setup.h> 18#include <mach/setup.h>
17 19
20#include "ste-dma40-db8500.h"
21
18static struct nmk_gpio_platform_data u8500_gpio_data[] = { 22static struct nmk_gpio_platform_data u8500_gpio_data[] = {
19 GPIO_DATA("GPIO-0-31", 0), 23 GPIO_DATA("GPIO-0-31", 0),
20 GPIO_DATA("GPIO-32-63", 32), /* 37..63 not routed to pin */ 24 GPIO_DATA("GPIO-32-63", 32), /* 37..63 not routed to pin */
@@ -105,3 +109,108 @@ struct platform_device u8500_i2c4_device = {
105 .resource = u8500_i2c4_resources, 109 .resource = u8500_i2c4_resources,
106 .num_resources = ARRAY_SIZE(u8500_i2c4_resources), 110 .num_resources = ARRAY_SIZE(u8500_i2c4_resources),
107}; 111};
112
113static struct resource dma40_resources[] = {
114 [0] = {
115 .start = U8500_DMA_BASE,
116 .end = U8500_DMA_BASE + SZ_4K - 1,
117 .flags = IORESOURCE_MEM,
118 .name = "base",
119 },
120 [1] = {
121 .start = U8500_DMA_LCPA_BASE,
122 .end = U8500_DMA_LCPA_BASE + SZ_4K - 1,
123 .flags = IORESOURCE_MEM,
124 .name = "lcpa",
125 },
126 [2] = {
127 .start = U8500_DMA_LCLA_BASE,
128 .end = U8500_DMA_LCLA_BASE + 16 * 1024 - 1,
129 .flags = IORESOURCE_MEM,
130 .name = "lcla",
131 },
132 [3] = {
133 .start = IRQ_DMA,
134 .end = IRQ_DMA,
135 .flags = IORESOURCE_IRQ}
136};
137
138/* Default configuration for physcial memcpy */
139struct stedma40_chan_cfg dma40_memcpy_conf_phy = {
140 .channel_type = (STEDMA40_CHANNEL_IN_PHY_MODE |
141 STEDMA40_LOW_PRIORITY_CHANNEL |
142 STEDMA40_PCHAN_BASIC_MODE),
143 .dir = STEDMA40_MEM_TO_MEM,
144
145 .src_info.endianess = STEDMA40_LITTLE_ENDIAN,
146 .src_info.data_width = STEDMA40_BYTE_WIDTH,
147 .src_info.psize = STEDMA40_PSIZE_PHY_1,
148
149 .dst_info.endianess = STEDMA40_LITTLE_ENDIAN,
150 .dst_info.data_width = STEDMA40_BYTE_WIDTH,
151 .dst_info.psize = STEDMA40_PSIZE_PHY_1,
152
153};
154/* Default configuration for logical memcpy */
155struct stedma40_chan_cfg dma40_memcpy_conf_log = {
156 .channel_type = (STEDMA40_CHANNEL_IN_LOG_MODE |
157 STEDMA40_LOW_PRIORITY_CHANNEL |
158 STEDMA40_LCHAN_SRC_LOG_DST_LOG |
159 STEDMA40_NO_TIM_FOR_LINK),
160 .dir = STEDMA40_MEM_TO_MEM,
161
162 .src_info.endianess = STEDMA40_LITTLE_ENDIAN,
163 .src_info.data_width = STEDMA40_BYTE_WIDTH,
164 .src_info.psize = STEDMA40_PSIZE_LOG_1,
165
166 .dst_info.endianess = STEDMA40_LITTLE_ENDIAN,
167 .dst_info.data_width = STEDMA40_BYTE_WIDTH,
168 .dst_info.psize = STEDMA40_PSIZE_LOG_1,
169
170};
171
172/*
173 * Mapping between destination event lines and physical device address.
174 * The event line is tied to a device and therefor the address is constant.
175 */
176static const dma_addr_t dma40_tx_map[STEDMA40_NR_DEV];
177
178/* Mapping between source event lines and physical device address */
179static const dma_addr_t dma40_rx_map[STEDMA40_NR_DEV];
180
181/* Reserved event lines for memcpy only */
182static int dma40_memcpy_event[] = {
183 STEDMA40_MEMCPY_TX_1,
184 STEDMA40_MEMCPY_TX_2,
185 STEDMA40_MEMCPY_TX_3,
186 STEDMA40_MEMCPY_TX_4,
187};
188
189static struct stedma40_platform_data dma40_plat_data = {
190 .dev_len = STEDMA40_NR_DEV,
191 .dev_rx = dma40_rx_map,
192 .dev_tx = dma40_tx_map,
193 .memcpy = dma40_memcpy_event,
194 .memcpy_len = ARRAY_SIZE(dma40_memcpy_event),
195 .memcpy_conf_phy = &dma40_memcpy_conf_phy,
196 .memcpy_conf_log = &dma40_memcpy_conf_log,
197 .llis_per_log = 8,
198};
199
200struct platform_device u8500_dma40_device = {
201 .dev = {
202 .platform_data = &dma40_plat_data,
203 },
204 .name = "dma40",
205 .id = 0,
206 .num_resources = ARRAY_SIZE(dma40_resources),
207 .resource = dma40_resources
208};
209
210void dma40_u8500ed_fixup(void)
211{
212 dma40_plat_data.memcpy = NULL;
213 dma40_plat_data.memcpy_len = 0;
214 dma40_resources[0].start = U8500_DMA_BASE_ED;
215 dma40_resources[0].end = U8500_DMA_BASE_ED + SZ_4K - 1;
216}
diff --git a/arch/arm/mach-ux500/include/mach/db8500-regs.h b/arch/arm/mach-ux500/include/mach/db8500-regs.h
index 9169e1e382a3..85fc6a80b386 100644
--- a/arch/arm/mach-ux500/include/mach/db8500-regs.h
+++ b/arch/arm/mach-ux500/include/mach/db8500-regs.h
@@ -7,6 +7,18 @@
7#ifndef __MACH_DB8500_REGS_H 7#ifndef __MACH_DB8500_REGS_H
8#define __MACH_DB8500_REGS_H 8#define __MACH_DB8500_REGS_H
9 9
10/* Base address and bank offsets for ESRAM */
11#define U8500_ESRAM_BASE 0x40000000
12#define U8500_ESRAM_BANK_SIZE 0x00020000
13#define U8500_ESRAM_BANK0 U8500_ESRAM_BASE
14#define U8500_ESRAM_BANK1 (U8500_ESRAM_BASE + U8500_ESRAM_BANK_SIZE)
15#define U8500_ESRAM_BANK2 (U8500_ESRAM_BANK1 + U8500_ESRAM_BANK_SIZE)
16#define U8500_ESRAM_BANK3 (U8500_ESRAM_BANK2 + U8500_ESRAM_BANK_SIZE)
17#define U8500_ESRAM_BANK4 (U8500_ESRAM_BANK3 + U8500_ESRAM_BANK_SIZE)
18/* Use bank 4 for DMA LCLA and LCPA */
19#define U8500_DMA_LCLA_BASE U8500_ESRAM_BANK4
20#define U8500_DMA_LCPA_BASE (U8500_ESRAM_BANK4 + 0x4000)
21
10#define U8500_PER3_BASE 0x80000000 22#define U8500_PER3_BASE 0x80000000
11#define U8500_STM_BASE 0x80100000 23#define U8500_STM_BASE 0x80100000
12#define U8500_STM_REG_BASE (U8500_STM_BASE + 0xF000) 24#define U8500_STM_REG_BASE (U8500_STM_BASE + 0xF000)
diff --git a/arch/arm/mach-ux500/include/mach/devices.h b/arch/arm/mach-ux500/include/mach/devices.h
index 0422af00a56e..c2b2f2574947 100644
--- a/arch/arm/mach-ux500/include/mach/devices.h
+++ b/arch/arm/mach-ux500/include/mach/devices.h
@@ -25,5 +25,8 @@ extern struct platform_device ux500_i2c3_device;
25 25
26extern struct platform_device u8500_i2c0_device; 26extern struct platform_device u8500_i2c0_device;
27extern struct platform_device u8500_i2c4_device; 27extern struct platform_device u8500_i2c4_device;
28extern struct platform_device u8500_dma40_device;
29
30void dma40_u8500ed_fixup(void);
28 31
29#endif 32#endif
diff --git a/arch/arm/mach-ux500/ste-dma40-db8500.h b/arch/arm/mach-ux500/ste-dma40-db8500.h
new file mode 100644
index 000000000000..e7016278dfa9
--- /dev/null
+++ b/arch/arm/mach-ux500/ste-dma40-db8500.h
@@ -0,0 +1,154 @@
1/*
2 * arch/arm/mach-ux500/ste_dma40_db8500.h
3 * DB8500-SoC-specific configuration for DMA40
4 *
5 * Copyright (C) ST-Ericsson 2007-2010
6 * License terms: GNU General Public License (GPL) version 2
7 * Author: Per Friden <per.friden@stericsson.com>
8 * Author: Jonas Aaberg <jonas.aberg@stericsson.com>
9 */
10#ifndef STE_DMA40_DB8500_H
11#define STE_DMA40_DB8500_H
12
13#define STEDMA40_NR_DEV 64
14
15enum dma_src_dev_type {
16 STEDMA40_DEV_SPI0_RX = 0,
17 STEDMA40_DEV_SD_MMC0_RX = 1,
18 STEDMA40_DEV_SD_MMC1_RX = 2,
19 STEDMA40_DEV_SD_MMC2_RX = 3,
20 STEDMA40_DEV_I2C1_RX = 4,
21 STEDMA40_DEV_I2C3_RX = 5,
22 STEDMA40_DEV_I2C2_RX = 6,
23 STEDMA40_DEV_I2C4_RX = 7, /* Only on V1 */
24 STEDMA40_DEV_SSP0_RX = 8,
25 STEDMA40_DEV_SSP1_RX = 9,
26 STEDMA40_DEV_MCDE_RX = 10,
27 STEDMA40_DEV_UART2_RX = 11,
28 STEDMA40_DEV_UART1_RX = 12,
29 STEDMA40_DEV_UART0_RX = 13,
30 STEDMA40_DEV_MSP2_RX = 14,
31 STEDMA40_DEV_I2C0_RX = 15,
32 STEDMA40_DEV_USB_OTG_IEP_8 = 16,
33 STEDMA40_DEV_USB_OTG_IEP_1_9 = 17,
34 STEDMA40_DEV_USB_OTG_IEP_2_10 = 18,
35 STEDMA40_DEV_USB_OTG_IEP_3_11 = 19,
36 STEDMA40_DEV_SLIM0_CH0_RX_HSI_RX_CH0 = 20,
37 STEDMA40_DEV_SLIM0_CH1_RX_HSI_RX_CH1 = 21,
38 STEDMA40_DEV_SLIM0_CH2_RX_HSI_RX_CH2 = 22,
39 STEDMA40_DEV_SLIM0_CH3_RX_HSI_RX_CH3 = 23,
40 STEDMA40_DEV_SRC_SXA0_RX_TX = 24,
41 STEDMA40_DEV_SRC_SXA1_RX_TX = 25,
42 STEDMA40_DEV_SRC_SXA2_RX_TX = 26,
43 STEDMA40_DEV_SRC_SXA3_RX_TX = 27,
44 STEDMA40_DEV_SD_MM2_RX = 28,
45 STEDMA40_DEV_SD_MM0_RX = 29,
46 STEDMA40_DEV_MSP1_RX = 30,
47 /*
48 * This channel is either SlimBus or MSP,
49 * never both at the same time.
50 */
51 STEDMA40_SLIM0_CH0_RX = 31,
52 STEDMA40_DEV_MSP0_RX = 31,
53 STEDMA40_DEV_SD_MM1_RX = 32,
54 STEDMA40_DEV_SPI2_RX = 33,
55 STEDMA40_DEV_I2C3_RX2 = 34,
56 STEDMA40_DEV_SPI1_RX = 35,
57 STEDMA40_DEV_USB_OTG_IEP_4_12 = 36,
58 STEDMA40_DEV_USB_OTG_IEP_5_13 = 37,
59 STEDMA40_DEV_USB_OTG_IEP_6_14 = 38,
60 STEDMA40_DEV_USB_OTG_IEP_7_15 = 39,
61 STEDMA40_DEV_SPI3_RX = 40,
62 STEDMA40_DEV_SD_MM3_RX = 41,
63 STEDMA40_DEV_SD_MM4_RX = 42,
64 STEDMA40_DEV_SD_MM5_RX = 43,
65 STEDMA40_DEV_SRC_SXA4_RX_TX = 44,
66 STEDMA40_DEV_SRC_SXA5_RX_TX = 45,
67 STEDMA40_DEV_SRC_SXA6_RX_TX = 46,
68 STEDMA40_DEV_SRC_SXA7_RX_TX = 47,
69 STEDMA40_DEV_CAC1_RX = 48,
70 /* RX channels 49 and 50 are unused */
71 STEDMA40_DEV_MSHC_RX = 51,
72 STEDMA40_DEV_SLIM1_CH0_RX_HSI_RX_CH4 = 52,
73 STEDMA40_DEV_SLIM1_CH1_RX_HSI_RX_CH5 = 53,
74 STEDMA40_DEV_SLIM1_CH2_RX_HSI_RX_CH6 = 54,
75 STEDMA40_DEV_SLIM1_CH3_RX_HSI_RX_CH7 = 55,
76 /* RX channels 56 thru 60 are unused */
77 STEDMA40_DEV_CAC0_RX = 61,
78 /* RX channels 62 and 63 are unused */
79};
80
81enum dma_dest_dev_type {
82 STEDMA40_DEV_SPI0_TX = 0,
83 STEDMA40_DEV_SD_MMC0_TX = 1,
84 STEDMA40_DEV_SD_MMC1_TX = 2,
85 STEDMA40_DEV_SD_MMC2_TX = 3,
86 STEDMA40_DEV_I2C1_TX = 4,
87 STEDMA40_DEV_I2C3_TX = 5,
88 STEDMA40_DEV_I2C2_TX = 6,
89 STEDMA50_DEV_I2C4_TX = 7, /* Only on V1 */
90 STEDMA40_DEV_SSP0_TX = 8,
91 STEDMA40_DEV_SSP1_TX = 9,
92 /* TX channel 10 is unused */
93 STEDMA40_DEV_UART2_TX = 11,
94 STEDMA40_DEV_UART1_TX = 12,
95 STEDMA40_DEV_UART0_TX= 13,
96 STEDMA40_DEV_MSP2_TX = 14,
97 STEDMA40_DEV_I2C0_TX = 15,
98 STEDMA40_DEV_USB_OTG_OEP_8 = 16,
99 STEDMA40_DEV_USB_OTG_OEP_1_9 = 17,
100 STEDMA40_DEV_USB_OTG_OEP_2_10= 18,
101 STEDMA40_DEV_USB_OTG_OEP_3_11 = 19,
102 STEDMA40_DEV_SLIM0_CH0_TX_HSI_TX_CH0 = 20,
103 STEDMA40_DEV_SLIM0_CH1_TX_HSI_TX_CH1 = 21,
104 STEDMA40_DEV_SLIM0_CH2_TX_HSI_TX_CH2 = 22,
105 STEDMA40_DEV_SLIM0_CH3_TX_HSI_TX_CH3 = 23,
106 STEDMA40_DEV_DST_SXA0_RX_TX = 24,
107 STEDMA40_DEV_DST_SXA1_RX_TX = 25,
108 STEDMA40_DEV_DST_SXA2_RX_TX = 26,
109 STEDMA40_DEV_DST_SXA3_RX_TX = 27,
110 STEDMA40_DEV_SD_MM2_TX = 28,
111 STEDMA40_DEV_SD_MM0_TX = 29,
112 STEDMA40_DEV_MSP1_TX = 30,
113 /*
114 * This channel is either SlimBus or MSP,
115 * never both at the same time.
116 */
117 STEDMA40_SLIM0_CH0_TX = 31,
118 STEDMA40_DEV_MSP0_TX = 31,
119 STEDMA40_DEV_SD_MM1_TX = 32,
120 STEDMA40_DEV_SPI2_TX = 33,
121 /* Secondary I2C3 channel */
122 STEDMA40_DEV_I2C3_TX2 = 34,
123 STEDMA40_DEV_SPI1_TX = 35,
124 STEDMA40_DEV_USB_OTG_OEP_4_12 = 36,
125 STEDMA40_DEV_USB_OTG_OEP_5_13 = 37,
126 STEDMA40_DEV_USB_OTG_OEP_6_14 = 38,
127 STEDMA40_DEV_USB_OTG_OEP_7_15 = 39,
128 STEDMA40_DEV_SPI3_TX = 40,
129 STEDMA40_DEV_SD_MM3_TX = 41,
130 STEDMA40_DEV_SD_MM4_TX = 42,
131 STEDMA40_DEV_SD_MM5_TX = 43,
132 STEDMA40_DEV_DST_SXA4_RX_TX = 44,
133 STEDMA40_DEV_DST_SXA5_RX_TX = 45,
134 STEDMA40_DEV_DST_SXA6_RX_TX = 46,
135 STEDMA40_DEV_DST_SXA7_RX_TX = 47,
136 STEDMA40_DEV_CAC1_TX = 48,
137 STEDMA40_DEV_CAC1_TX_HAC1_TX = 49,
138 STEDMA40_DEV_HAC1_TX = 50,
139 STEDMA40_MEMXCPY_TX_0 = 51,
140 STEDMA40_DEV_SLIM1_CH0_TX_HSI_TX_CH4 = 52,
141 STEDMA40_DEV_SLIM1_CH1_TX_HSI_TX_CH5 = 53,
142 STEDMA40_DEV_SLIM1_CH2_TX_HSI_TX_CH6 = 54,
143 STEDMA40_DEV_SLIM1_CH3_TX_HSI_TX_CH7 = 55,
144 STEDMA40_MEMCPY_TX_1 = 56,
145 STEDMA40_MEMCPY_TX_2 = 57,
146 STEDMA40_MEMCPY_TX_3 = 58,
147 STEDMA40_MEMCPY_TX_4 = 59,
148 STEDMA40_MEMCPY_TX_5 = 60,
149 STEDMA40_DEV_CAC0_TX = 61,
150 STEDMA40_DEV_CAC0_TX_HAC0_TX = 62,
151 STEDMA40_DEV_HAC0_TX = 63,
152};
153
154#endif
diff --git a/arch/arm/plat-mxc/Kconfig b/arch/arm/plat-mxc/Kconfig
index 902ba9e42c5b..20b2e79e54f2 100644
--- a/arch/arm/plat-mxc/Kconfig
+++ b/arch/arm/plat-mxc/Kconfig
@@ -1,5 +1,7 @@
1if ARCH_MXC 1if ARCH_MXC
2 2
3source "arch/arm/plat-mxc/devices/Kconfig"
4
3menu "Freescale MXC Implementations" 5menu "Freescale MXC Implementations"
4 6
5choice 7choice
@@ -8,15 +10,12 @@ choice
8 10
9config ARCH_MX1 11config ARCH_MX1
10 bool "MX1-based" 12 bool "MX1-based"
11 select CPU_ARM920T 13 select SOC_IMX1
12 select IMX_HAVE_IOMUX_V1
13 help 14 help
14 This enables support for systems based on the Freescale i.MX1 family 15 This enables support for systems based on the Freescale i.MX1 family
15 16
16config ARCH_MX2 17config ARCH_MX2
17 bool "MX2-based" 18 bool "MX2-based"
18 select CPU_ARM926T
19 select IMX_HAVE_IOMUX_V1
20 help 19 help
21 This enables support for systems based on the Freescale i.MX2 family 20 This enables support for systems based on the Freescale i.MX2 family
22 21
@@ -49,8 +48,7 @@ config ARCH_MX5
49 48
50endchoice 49endchoice
51 50
52source "arch/arm/mach-mx1/Kconfig" 51source "arch/arm/mach-imx/Kconfig"
53source "arch/arm/mach-mx2/Kconfig"
54source "arch/arm/mach-mx3/Kconfig" 52source "arch/arm/mach-mx3/Kconfig"
55source "arch/arm/mach-mx25/Kconfig" 53source "arch/arm/mach-mx25/Kconfig"
56source "arch/arm/mach-mxc91231/Kconfig" 54source "arch/arm/mach-mxc91231/Kconfig"
diff --git a/arch/arm/plat-mxc/Makefile b/arch/arm/plat-mxc/Makefile
index 895bc3c5e0c0..c7506a80eb31 100644
--- a/arch/arm/plat-mxc/Makefile
+++ b/arch/arm/plat-mxc/Makefile
@@ -8,8 +8,6 @@ obj-y := irq.o clock.o gpio.o time.o devices.o cpu.o system.o
8# MX51 uses the TZIC interrupt controller, older platforms use AVIC (irq.o) 8# MX51 uses the TZIC interrupt controller, older platforms use AVIC (irq.o)
9obj-$(CONFIG_MXC_TZIC) += tzic.o 9obj-$(CONFIG_MXC_TZIC) += tzic.o
10 10
11obj-$(CONFIG_ARCH_MX1) += dma-mx1-mx2.o
12obj-$(CONFIG_ARCH_MX2) += dma-mx1-mx2.o
13obj-$(CONFIG_IMX_HAVE_IOMUX_V1) += iomux-v1.o 11obj-$(CONFIG_IMX_HAVE_IOMUX_V1) += iomux-v1.o
14obj-$(CONFIG_ARCH_MXC_IOMUX_V3) += iomux-v3.o 12obj-$(CONFIG_ARCH_MXC_IOMUX_V3) += iomux-v3.o
15obj-$(CONFIG_MXC_PWM) += pwm.o 13obj-$(CONFIG_MXC_PWM) += pwm.o
@@ -21,3 +19,5 @@ ifdef CONFIG_SND_IMX_SOC
21obj-y += ssi-fiq.o 19obj-y += ssi-fiq.o
22obj-y += ssi-fiq-ksym.o 20obj-y += ssi-fiq-ksym.o
23endif 21endif
22
23obj-y += devices/
diff --git a/arch/arm/plat-mxc/audmux-v1.c b/arch/arm/plat-mxc/audmux-v1.c
index b62917ca3f95..1180bef7664b 100644
--- a/arch/arm/plat-mxc/audmux-v1.c
+++ b/arch/arm/plat-mxc/audmux-v1.c
@@ -13,10 +13,6 @@
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details. 15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20 */ 16 */
21 17
22#include <linux/module.h> 18#include <linux/module.h>
diff --git a/arch/arm/plat-mxc/audmux-v2.c b/arch/arm/plat-mxc/audmux-v2.c
index ab94d78a927f..f9e7cdbd0005 100644
--- a/arch/arm/plat-mxc/audmux-v2.c
+++ b/arch/arm/plat-mxc/audmux-v2.c
@@ -13,10 +13,6 @@
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details. 15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20 */ 16 */
21 17
22#include <linux/module.h> 18#include <linux/module.h>
diff --git a/arch/arm/plat-mxc/devices.c b/arch/arm/plat-mxc/devices.c
index 56f2fb5cc456..735776d84956 100644
--- a/arch/arm/plat-mxc/devices.c
+++ b/arch/arm/plat-mxc/devices.c
@@ -18,6 +18,7 @@
18 18
19#include <linux/kernel.h> 19#include <linux/kernel.h>
20#include <linux/init.h> 20#include <linux/init.h>
21#include <linux/err.h>
21#include <linux/platform_device.h> 22#include <linux/platform_device.h>
22#include <mach/common.h> 23#include <mach/common.h>
23 24
@@ -35,3 +36,35 @@ int __init mxc_register_device(struct platform_device *pdev, void *data)
35 return ret; 36 return ret;
36} 37}
37 38
39struct platform_device *__init imx_add_platform_device(const char *name, int id,
40 const struct resource *res, unsigned int num_resources,
41 const void *data, size_t size_data)
42{
43 int ret = -ENOMEM;
44 struct platform_device *pdev;
45
46 pdev = platform_device_alloc(name, id);
47 if (!pdev)
48 goto err;
49
50 if (res) {
51 ret = platform_device_add_resources(pdev, res, num_resources);
52 if (ret)
53 goto err;
54 }
55
56 if (data) {
57 ret = platform_device_add_data(pdev, data, size_data);
58 if (ret)
59 goto err;
60 }
61
62 ret = platform_device_add(pdev);
63 if (ret) {
64err:
65 platform_device_put(pdev);
66 return ERR_PTR(ret);
67 }
68
69 return pdev;
70}
diff --git a/arch/arm/plat-mxc/devices/Kconfig b/arch/arm/plat-mxc/devices/Kconfig
new file mode 100644
index 000000000000..09230f8c802a
--- /dev/null
+++ b/arch/arm/plat-mxc/devices/Kconfig
@@ -0,0 +1,11 @@
1config IMX_HAVE_PLATFORM_IMX_I2C
2 bool
3
4config IMX_HAVE_PLATFORM_IMX_UART
5 bool
6
7config IMX_HAVE_PLATFORM_MXC_NAND
8 bool
9
10config IMX_HAVE_PLATFORM_SPI_IMX
11 bool
diff --git a/arch/arm/plat-mxc/devices/Makefile b/arch/arm/plat-mxc/devices/Makefile
new file mode 100644
index 000000000000..5ecbb244d210
--- /dev/null
+++ b/arch/arm/plat-mxc/devices/Makefile
@@ -0,0 +1,4 @@
1obj-$(CONFIG_IMX_HAVE_PLATFORM_IMX_I2C) += platform-imx-i2c.o
2obj-$(CONFIG_IMX_HAVE_PLATFORM_IMX_UART) += platform-imx-uart.o
3obj-$(CONFIG_IMX_HAVE_PLATFORM_MXC_NAND) += platform-mxc_nand.o
4obj-$(CONFIG_IMX_HAVE_PLATFORM_SPI_IMX) += platform-spi_imx.o
diff --git a/arch/arm/plat-mxc/devices/platform-imx-i2c.c b/arch/arm/plat-mxc/devices/platform-imx-i2c.c
new file mode 100644
index 000000000000..d0af9f7d8aed
--- /dev/null
+++ b/arch/arm/plat-mxc/devices/platform-imx-i2c.c
@@ -0,0 +1,29 @@
1/*
2 * Copyright (C) 2009-2010 Pengutronix
3 * Uwe Kleine-Koenig <u.kleine-koenig@pengutronix.de>
4 *
5 * This program is free software; you can redistribute it and/or modify it under
6 * the terms of the GNU General Public License version 2 as published by the
7 * Free Software Foundation.
8 */
9#include <mach/devices-common.h>
10
11struct platform_device *__init imx_add_imx_i2c(int id,
12 resource_size_t iobase, resource_size_t iosize, int irq,
13 const struct imxi2c_platform_data *pdata)
14{
15 struct resource res[] = {
16 {
17 .start = iobase,
18 .end = iobase + iosize - 1,
19 .flags = IORESOURCE_MEM,
20 }, {
21 .start = irq,
22 .end = irq,
23 .flags = IORESOURCE_IRQ,
24 },
25 };
26
27 return imx_add_platform_device("imx-i2c", id, res, ARRAY_SIZE(res),
28 pdata, sizeof(*pdata));
29}
diff --git a/arch/arm/plat-mxc/devices/platform-imx-uart.c b/arch/arm/plat-mxc/devices/platform-imx-uart.c
new file mode 100644
index 000000000000..fa3dff1433e8
--- /dev/null
+++ b/arch/arm/plat-mxc/devices/platform-imx-uart.c
@@ -0,0 +1,60 @@
1/*
2 * Copyright (C) 2009-2010 Pengutronix
3 * Uwe Kleine-Koenig <u.kleine-koenig@pengutronix.de>
4 *
5 * This program is free software; you can redistribute it and/or modify it under
6 * the terms of the GNU General Public License version 2 as published by the
7 * Free Software Foundation.
8 */
9#include <mach/devices-common.h>
10
11struct platform_device *__init imx_add_imx_uart_3irq(int id,
12 resource_size_t iobase, resource_size_t iosize,
13 resource_size_t irqrx, resource_size_t irqtx,
14 resource_size_t irqrts,
15 const struct imxuart_platform_data *pdata)
16{
17 struct resource res[] = {
18 {
19 .start = iobase,
20 .end = iobase + iosize - 1,
21 .flags = IORESOURCE_MEM,
22 }, {
23 .start = irqrx,
24 .end = irqrx,
25 .flags = IORESOURCE_IRQ,
26 }, {
27 .start = irqtx,
28 .end = irqtx,
29 .flags = IORESOURCE_IRQ,
30 }, {
31 .start = irqrts,
32 .end = irqrx,
33 .flags = IORESOURCE_IRQ,
34 },
35 };
36
37 return imx_add_platform_device("imx-uart", id, res, ARRAY_SIZE(res),
38 pdata, sizeof(*pdata));
39}
40
41struct platform_device *__init imx_add_imx_uart_1irq(int id,
42 resource_size_t iobase, resource_size_t iosize,
43 resource_size_t irq,
44 const struct imxuart_platform_data *pdata)
45{
46 struct resource res[] = {
47 {
48 .start = iobase,
49 .end = iobase + iosize - 1,
50 .flags = IORESOURCE_MEM,
51 }, {
52 .start = irq,
53 .end = irq,
54 .flags = IORESOURCE_IRQ,
55 },
56 };
57
58 return imx_add_platform_device("imx-uart", id, res, ARRAY_SIZE(res),
59 pdata, sizeof(*pdata));
60}
diff --git a/arch/arm/plat-mxc/devices/platform-mxc_nand.c b/arch/arm/plat-mxc/devices/platform-mxc_nand.c
new file mode 100644
index 000000000000..1c286418d123
--- /dev/null
+++ b/arch/arm/plat-mxc/devices/platform-mxc_nand.c
@@ -0,0 +1,44 @@
1/*
2 * Copyright (C) 2009-2010 Pengutronix
3 * Uwe Kleine-Koenig <u.kleine-koenig@pengutronix.de>
4 *
5 * This program is free software; you can redistribute it and/or modify it under
6 * the terms of the GNU General Public License version 2 as published by the
7 * Free Software Foundation.
8 */
9#include <asm/sizes.h>
10#include <mach/devices-common.h>
11
12static struct platform_device *__init imx_add_mxc_nand(resource_size_t iobase,
13 int irq, const struct mxc_nand_platform_data *pdata,
14 resource_size_t iosize)
15{
16 static int id = 0;
17
18 struct resource res[] = {
19 {
20 .start = iobase,
21 .end = iobase + iosize - 1,
22 .flags = IORESOURCE_MEM,
23 }, {
24 .start = irq,
25 .end = irq,
26 .flags = IORESOURCE_IRQ,
27 },
28 };
29
30 return imx_add_platform_device("mxc_nand", id++, res, ARRAY_SIZE(res),
31 pdata, sizeof(*pdata));
32}
33
34struct platform_device *__init imx_add_mxc_nand_v1(resource_size_t iobase,
35 int irq, const struct mxc_nand_platform_data *pdata)
36{
37 return imx_add_mxc_nand(iobase, irq, pdata, SZ_4K);
38}
39
40struct platform_device *__init imx_add_mxc_nand_v21(resource_size_t iobase,
41 int irq, const struct mxc_nand_platform_data *pdata)
42{
43 return imx_add_mxc_nand(iobase, irq, pdata, SZ_8K);
44}
diff --git a/arch/arm/plat-mxc/devices/platform-spi_imx.c b/arch/arm/plat-mxc/devices/platform-spi_imx.c
new file mode 100644
index 000000000000..2831a6d3eb4b
--- /dev/null
+++ b/arch/arm/plat-mxc/devices/platform-spi_imx.c
@@ -0,0 +1,30 @@
1/*
2 * Copyright (C) 2009-2010 Pengutronix
3 * Uwe Kleine-Koenig <u.kleine-koenig@pengutronix.de>
4 *
5 * This program is free software; you can redistribute it and/or modify it under
6 * the terms of the GNU General Public License version 2 as published by the
7 * Free Software Foundation.
8 */
9#include <asm/sizes.h>
10#include <mach/devices-common.h>
11
12struct platform_device *__init imx_add_spi_imx(int id,
13 resource_size_t iobase, resource_size_t iosize, int irq,
14 const struct spi_imx_master *pdata)
15{
16 struct resource res[] = {
17 {
18 .start = iobase,
19 .end = iobase + iosize - 1,
20 .flags = IORESOURCE_MEM,
21 }, {
22 .start = irq,
23 .end = irq,
24 .flags = IORESOURCE_IRQ,
25 },
26 };
27
28 return imx_add_platform_device("spi_imx", id, res, ARRAY_SIZE(res),
29 pdata, sizeof(*pdata));
30}
diff --git a/arch/arm/plat-mxc/ehci.c b/arch/arm/plat-mxc/ehci.c
index 618479258bb6..35a064ff02ba 100644
--- a/arch/arm/plat-mxc/ehci.c
+++ b/arch/arm/plat-mxc/ehci.c
@@ -11,10 +11,6 @@
11 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY 11 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
12 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * for more details. 13 * for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software Foundation,
17 * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
18 */ 14 */
19 15
20#include <linux/platform_device.h> 16#include <linux/platform_device.h>
diff --git a/arch/arm/plat-mxc/include/mach/board-armadillo5x0.h b/arch/arm/plat-mxc/include/mach/board-armadillo5x0.h
deleted file mode 100644
index 0376c133c9f4..000000000000
--- a/arch/arm/plat-mxc/include/mach/board-armadillo5x0.h
+++ /dev/null
@@ -1,15 +0,0 @@
1/*
2 * Copyright 2009 Alberto Panizzo <maramaopercheseimorto@gmail.com>.
3 * All Rights Reserved.
4 */
5
6/*
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#ifndef __ASM_ARCH_MXC_BOARD_ARMADILLO5X0_H__
13#define __ASM_ARCH_MXC_BOARD_ARMADILLO5X0_H__
14
15#endif
diff --git a/arch/arm/plat-mxc/include/mach/board-eukrea_cpuimx27.h b/arch/arm/plat-mxc/include/mach/board-eukrea_cpuimx27.h
index a1fd5830af48..45b2fb8bed61 100644
--- a/arch/arm/plat-mxc/include/mach/board-eukrea_cpuimx27.h
+++ b/arch/arm/plat-mxc/include/mach/board-eukrea_cpuimx27.h
@@ -25,7 +25,7 @@
25#ifndef __ASSEMBLY__ 25#ifndef __ASSEMBLY__
26/* 26/*
27 * This CPU module needs a baseboard to work. After basic initializing 27 * This CPU module needs a baseboard to work. After basic initializing
28 * its own devices, it calls baseboard's init function. 28 * its own devices, it calls the baseboard's init function.
29 * TODO: Add your own baseboard init function and call it from 29 * TODO: Add your own baseboard init function and call it from
30 * inside eukrea_cpuimx27_init(). 30 * inside eukrea_cpuimx27_init().
31 * 31 *
diff --git a/arch/arm/plat-mxc/include/mach/board-kzmarm11.h b/arch/arm/plat-mxc/include/mach/board-kzmarm11.h
deleted file mode 100644
index 93cc66f104c7..000000000000
--- a/arch/arm/plat-mxc/include/mach/board-kzmarm11.h
+++ /dev/null
@@ -1,39 +0,0 @@
1/*
2 * Copyright (C) 2009 Yoichi Yuasa <yuasa@linux-mips.org>
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
17 */
18#ifndef __ARM_ARCH_BOARD_KZM_ARM11_H
19#define __ARM_ARCH_BOARD_KZM_ARM11_H
20
21/*
22 * KZM-ARM11-01 Board Control Registers on FPGA
23 */
24#define KZM_ARM11_CTL1 (MX31_CS4_BASE_ADDR + 0x1000)
25#define KZM_ARM11_CTL2 (MX31_CS4_BASE_ADDR + 0x1001)
26#define KZM_ARM11_RSW1 (MX31_CS4_BASE_ADDR + 0x1002)
27#define KZM_ARM11_BACK_LIGHT (MX31_CS4_BASE_ADDR + 0x1004)
28#define KZM_ARM11_FPGA_REV (MX31_CS4_BASE_ADDR + 0x1008)
29#define KZM_ARM11_7SEG_LED (MX31_CS4_BASE_ADDR + 0x1010)
30#define KZM_ARM11_LEDS (MX31_CS4_BASE_ADDR + 0x1020)
31#define KZM_ARM11_DIPSW2 (MX31_CS4_BASE_ADDR + 0x1003)
32
33/*
34 * External UART for touch panel on FPGA
35 */
36#define KZM_ARM11_16550 (MX31_CS4_BASE_ADDR + 0x1050)
37
38#endif /* __ARM_ARCH_BOARD_KZM_ARM11_H */
39
diff --git a/arch/arm/plat-mxc/include/mach/board-mx21ads.h b/arch/arm/plat-mxc/include/mach/board-mx21ads.h
deleted file mode 100644
index 0cf4fa29510c..000000000000
--- a/arch/arm/plat-mxc/include/mach/board-mx21ads.h
+++ /dev/null
@@ -1,52 +0,0 @@
1/*
2 * Copyright 2005-2007 Freescale Semiconductor, Inc. All Rights Reserved.
3 */
4
5/*
6 * The code contained herein is licensed under the GNU General Public
7 * License. You may obtain a copy of the GNU General Public License
8 * Version 2 or later at the following locations:
9 *
10 * http://www.opensource.org/licenses/gpl-license.html
11 * http://www.gnu.org/copyleft/gpl.html
12 */
13
14#ifndef __ASM_ARCH_MXC_BOARD_MX21ADS_H__
15#define __ASM_ARCH_MXC_BOARD_MX21ADS_H__
16
17/*
18 * Memory-mapped I/O on MX21ADS base board
19 */
20#define MX21ADS_MMIO_BASE_ADDR 0xF5000000
21#define MX21ADS_MMIO_SIZE SZ_16M
22
23#define MX21ADS_REG_ADDR(offset) (void __force __iomem *) \
24 (MX21ADS_MMIO_BASE_ADDR + (offset))
25
26#define MX21ADS_CS8900A_IRQ IRQ_GPIOE(11)
27#define MX21ADS_CS8900A_IOBASE_REG MX21ADS_REG_ADDR(0x000000)
28#define MX21ADS_ST16C255_IOBASE_REG MX21ADS_REG_ADDR(0x200000)
29#define MX21ADS_VERSION_REG MX21ADS_REG_ADDR(0x400000)
30#define MX21ADS_IO_REG MX21ADS_REG_ADDR(0x800000)
31
32/* MX21ADS_IO_REG bit definitions */
33#define MX21ADS_IO_SD_WP 0x0001 /* read */
34#define MX21ADS_IO_TP6 0x0001 /* write */
35#define MX21ADS_IO_SW_SEL 0x0002 /* read */
36#define MX21ADS_IO_TP7 0x0002 /* write */
37#define MX21ADS_IO_RESET_E_UART 0x0004
38#define MX21ADS_IO_RESET_BASE 0x0008
39#define MX21ADS_IO_CSI_CTL2 0x0010
40#define MX21ADS_IO_CSI_CTL1 0x0020
41#define MX21ADS_IO_CSI_CTL0 0x0040
42#define MX21ADS_IO_UART1_EN 0x0080
43#define MX21ADS_IO_UART4_EN 0x0100
44#define MX21ADS_IO_LCDON 0x0200
45#define MX21ADS_IO_IRDA_EN 0x0400
46#define MX21ADS_IO_IRDA_FIR_SEL 0x0800
47#define MX21ADS_IO_IRDA_MD0_B 0x1000
48#define MX21ADS_IO_IRDA_MD1 0x2000
49#define MX21ADS_IO_LED4_ON 0x4000
50#define MX21ADS_IO_LED3_ON 0x8000
51
52#endif /* __ASM_ARCH_MXC_BOARD_MX21ADS_H__ */
diff --git a/arch/arm/plat-mxc/include/mach/board-mx27ads.h b/arch/arm/plat-mxc/include/mach/board-mx27ads.h
deleted file mode 100644
index 7776d230327f..000000000000
--- a/arch/arm/plat-mxc/include/mach/board-mx27ads.h
+++ /dev/null
@@ -1,344 +0,0 @@
1/*
2 * Copyright 2005-2007 Freescale Semiconductor, Inc. All Rights Reserved.
3 */
4
5/*
6 * The code contained herein is licensed under the GNU General Public
7 * License. You may obtain a copy of the GNU General Public License
8 * Version 2 or later at the following locations:
9 *
10 * http://www.opensource.org/licenses/gpl-license.html
11 * http://www.gnu.org/copyleft/gpl.html
12 */
13
14#ifndef __ASM_ARCH_MXC_BOARD_MX27ADS_H__
15#define __ASM_ARCH_MXC_BOARD_MX27ADS_H__
16
17/* external interrupt multiplexer */
18#define MXC_EXP_IO_BASE (MXC_BOARD_IRQ_START)
19
20#define MXC_VIRTUAL_INTS_BASE (MXC_EXP_IO_BASE + MXC_MAX_EXP_IO_LINES)
21#define MXC_SDIO1_CARD_IRQ MXC_VIRTUAL_INTS_BASE
22#define MXC_SDIO2_CARD_IRQ (MXC_VIRTUAL_INTS_BASE + 1)
23#define MXC_SDIO3_CARD_IRQ (MXC_VIRTUAL_INTS_BASE + 2)
24
25#define MXC_MAX_BOARD_INTS (MXC_MAX_EXP_IO_LINES + \
26 MXC_MAX_VIRTUAL_INTS)
27
28/*
29 * @name Memory Size parameters
30 */
31
32/*
33 * Size of SDRAM memory
34 */
35#define SDRAM_MEM_SIZE SZ_128M
36
37/*
38 * PBC Controller parameters
39 */
40
41/*
42 * Base address of PBC controller, CS4
43 */
44#define PBC_BASE_ADDRESS 0xf4300000
45#define PBC_REG_ADDR(offset) (void __force __iomem *) \
46 (PBC_BASE_ADDRESS + (offset))
47
48/*
49 * PBC Interupt name definitions
50 */
51#define PBC_GPIO1_0 0
52#define PBC_GPIO1_1 1
53#define PBC_GPIO1_2 2
54#define PBC_GPIO1_3 3
55#define PBC_GPIO1_4 4
56#define PBC_GPIO1_5 5
57
58#define PBC_INTR_MAX_NUM 6
59#define PBC_INTR_SHARED_MAX_NUM 8
60
61/* When the PBC address connection is fixed in h/w, defined as 1 */
62#define PBC_ADDR_SH 0
63
64/* Offsets for the PBC Controller register */
65/*
66 * PBC Board version register offset
67 */
68#define PBC_VERSION_REG PBC_REG_ADDR(0x00000 >> PBC_ADDR_SH)
69/*
70 * PBC Board control register 1 set address.
71 */
72#define PBC_BCTRL1_SET_REG PBC_REG_ADDR(0x00008 >> PBC_ADDR_SH)
73/*
74 * PBC Board control register 1 clear address.
75 */
76#define PBC_BCTRL1_CLEAR_REG PBC_REG_ADDR(0x0000C >> PBC_ADDR_SH)
77/*
78 * PBC Board control register 2 set address.
79 */
80#define PBC_BCTRL2_SET_REG PBC_REG_ADDR(0x00010 >> PBC_ADDR_SH)
81/*
82 * PBC Board control register 2 clear address.
83 */
84#define PBC_BCTRL2_CLEAR_REG PBC_REG_ADDR(0x00014 >> PBC_ADDR_SH)
85/*
86 * PBC Board control register 3 set address.
87 */
88#define PBC_BCTRL3_SET_REG PBC_REG_ADDR(0x00018 >> PBC_ADDR_SH)
89/*
90 * PBC Board control register 3 clear address.
91 */
92#define PBC_BCTRL3_CLEAR_REG PBC_REG_ADDR(0x0001C >> PBC_ADDR_SH)
93/*
94 * PBC Board control register 3 set address.
95 */
96#define PBC_BCTRL4_SET_REG PBC_REG_ADDR(0x00020 >> PBC_ADDR_SH)
97/*
98 * PBC Board control register 4 clear address.
99 */
100#define PBC_BCTRL4_CLEAR_REG PBC_REG_ADDR(0x00024 >> PBC_ADDR_SH)
101/*PBC_ADDR_SH
102 * PBC Board status register 1.
103 */
104#define PBC_BSTAT1_REG PBC_REG_ADDR(0x00028 >> PBC_ADDR_SH)
105/*
106 * PBC Board interrupt status register.
107 */
108#define PBC_INTSTATUS_REG PBC_REG_ADDR(0x0002C >> PBC_ADDR_SH)
109/*
110 * PBC Board interrupt current status register.
111 */
112#define PBC_INTCURR_STATUS_REG PBC_REG_ADDR(0x00034 >> PBC_ADDR_SH)
113/*
114 * PBC Interrupt mask register set address.
115 */
116#define PBC_INTMASK_SET_REG PBC_REG_ADDR(0x00038 >> PBC_ADDR_SH)
117/*
118 * PBC Interrupt mask register clear address.
119 */
120#define PBC_INTMASK_CLEAR_REG PBC_REG_ADDR(0x0003C >> PBC_ADDR_SH)
121/*
122 * External UART A.
123 */
124#define PBC_SC16C652_UARTA_REG PBC_REG_ADDR(0x20000 >> PBC_ADDR_SH)
125/*
126 * UART 4 Expanding Signal Status.
127 */
128#define PBC_UART_STATUS_REG PBC_REG_ADDR(0x22000 >> PBC_ADDR_SH)
129/*
130 * UART 4 Expanding Signal Control Set.
131 */
132#define PBC_UCTRL_SET_REG PBC_REG_ADDR(0x24000 >> PBC_ADDR_SH)
133/*
134 * UART 4 Expanding Signal Control Clear.
135 */
136#define PBC_UCTRL_CLR_REG PBC_REG_ADDR(0x26000 >> PBC_ADDR_SH)
137/*
138 * Ethernet Controller IO base address.
139 */
140#define PBC_CS8900A_IOBASE_REG PBC_REG_ADDR(0x40000 >> PBC_ADDR_SH)
141/*
142 * Ethernet Controller Memory base address.
143 */
144#define PBC_CS8900A_MEMBASE_REG PBC_REG_ADDR(0x42000 >> PBC_ADDR_SH)
145/*
146 * Ethernet Controller DMA base address.
147 */
148#define PBC_CS8900A_DMABASE_REG PBC_REG_ADDR(0x44000 >> PBC_ADDR_SH)
149
150/* PBC Board Version Register bit definition */
151#define PBC_VERSION_ADS 0x8000 /* Bit15=1 means version for ads */
152#define PBC_VERSION_EVB_REVB 0x4000 /* BIT14=1 means version for evb revb */
153
154/* PBC Board Control Register 1 bit definitions */
155#define PBC_BCTRL1_ERST 0x0001 /* Ethernet Reset */
156#define PBC_BCTRL1_URST 0x0002 /* Reset External UART controller */
157#define PBC_BCTRL1_FRST 0x0004 /* FEC Reset */
158#define PBC_BCTRL1_ESLEEP 0x0010 /* Enable ethernet Sleep */
159#define PBC_BCTRL1_LCDON 0x0800 /* Enable the LCD */
160
161/* PBC Board Control Register 2 bit definitions */
162#define PBC_BCTRL2_VCC_EN 0x0004 /* Enable VCC */
163#define PBC_BCTRL2_VPP_EN 0x0008 /* Enable Vpp */
164#define PBC_BCTRL2_ATAFEC_EN 0X0010
165#define PBC_BCTRL2_ATAFEC_SEL 0X0020
166#define PBC_BCTRL2_ATA_EN 0X0040
167#define PBC_BCTRL2_IRDA_SD 0X0080
168#define PBC_BCTRL2_IRDA_EN 0X0100
169#define PBC_BCTRL2_CCTL10 0X0200
170#define PBC_BCTRL2_CCTL11 0X0400
171
172/* PBC Board Control Register 3 bit definitions */
173#define PBC_BCTRL3_HSH_EN 0X0020
174#define PBC_BCTRL3_FSH_MOD 0X0040
175#define PBC_BCTRL3_OTG_HS_EN 0X0080
176#define PBC_BCTRL3_OTG_VBUS_EN 0X0100
177#define PBC_BCTRL3_FSH_VBUS_EN 0X0200
178#define PBC_BCTRL3_USB_OTG_ON 0X0800
179#define PBC_BCTRL3_USB_FSH_ON 0X1000
180
181/* PBC Board Control Register 4 bit definitions */
182#define PBC_BCTRL4_REGEN_SEL 0X0001
183#define PBC_BCTRL4_USER_OFF 0X0002
184#define PBC_BCTRL4_VIB_EN 0X0004
185#define PBC_BCTRL4_PWRGT1_EN 0X0008
186#define PBC_BCTRL4_PWRGT2_EN 0X0010
187#define PBC_BCTRL4_STDBY_PRI 0X0020
188
189#ifndef __ASSEMBLY__
190/*
191 * Enumerations for SD cards and memory stick card. This corresponds to
192 * the card EN bits in the IMR: SD1_EN | MS_EN | SD3_EN | SD2_EN.
193 */
194enum mxc_card_no {
195 MXC_CARD_SD2 = 0,
196 MXC_CARD_SD3,
197 MXC_CARD_MS,
198 MXC_CARD_SD1,
199 MXC_CARD_MIN = MXC_CARD_SD2,
200 MXC_CARD_MAX = MXC_CARD_SD1,
201};
202#endif
203
204#define MXC_CPLD_VER_1_50 0x01
205
206/*
207 * PBC BSTAT Register bit definitions
208 */
209#define PBC_BSTAT_PRI_INT 0X0001
210#define PBC_BSTAT_USB_BYP 0X0002
211#define PBC_BSTAT_ATA_IOCS16 0X0004
212#define PBC_BSTAT_ATA_CBLID 0X0008
213#define PBC_BSTAT_ATA_DASP 0X0010
214#define PBC_BSTAT_PWR_RDY 0X0020
215#define PBC_BSTAT_SD3_WP 0X0100
216#define PBC_BSTAT_SD2_WP 0X0200
217#define PBC_BSTAT_SD1_WP 0X0400
218#define PBC_BSTAT_SD3_DET 0X0800
219#define PBC_BSTAT_SD2_DET 0X1000
220#define PBC_BSTAT_SD1_DET 0X2000
221#define PBC_BSTAT_MS_DET 0X4000
222#define PBC_BSTAT_SD3_DET_BIT 11
223#define PBC_BSTAT_SD2_DET_BIT 12
224#define PBC_BSTAT_SD1_DET_BIT 13
225#define PBC_BSTAT_MS_DET_BIT 14
226#define MXC_BSTAT_BIT(n) ((n == MXC_CARD_SD2) ? PBC_BSTAT_SD2_DET : \
227 ((n == MXC_CARD_SD3) ? PBC_BSTAT_SD3_DET : \
228 ((n == MXC_CARD_SD1) ? PBC_BSTAT_SD1_DET : \
229 ((n == MXC_CARD_MS) ? PBC_BSTAT_MS_DET : \
230 0x0))))
231
232/*
233 * PBC UART Control Register bit definitions
234 */
235#define PBC_UCTRL_DCE_DCD 0X0001
236#define PBC_UCTRL_DCE_DSR 0X0002
237#define PBC_UCTRL_DCE_RI 0X0004
238#define PBC_UCTRL_DTE_DTR 0X0100
239
240/*
241 * PBC UART Status Register bit definitions
242 */
243#define PBC_USTAT_DTE_DCD 0X0001
244#define PBC_USTAT_DTE_DSR 0X0002
245#define PBC_USTAT_DTE_RI 0X0004
246#define PBC_USTAT_DCE_DTR 0X0100
247
248/*
249 * PBC Interupt mask register bit definitions
250 */
251#define PBC_INTR_SD3_R_EN_BIT 4
252#define PBC_INTR_SD2_R_EN_BIT 0
253#define PBC_INTR_SD1_R_EN_BIT 6
254#define PBC_INTR_MS_R_EN_BIT 5
255#define PBC_INTR_SD3_EN_BIT 13
256#define PBC_INTR_SD2_EN_BIT 12
257#define PBC_INTR_MS_EN_BIT 14
258#define PBC_INTR_SD1_EN_BIT 15
259
260#define PBC_INTR_SD2_R_EN 0x0001
261#define PBC_INTR_LOW_BAT 0X0002
262#define PBC_INTR_OTG_FSOVER 0X0004
263#define PBC_INTR_FSH_OVER 0X0008
264#define PBC_INTR_SD3_R_EN 0x0010
265#define PBC_INTR_MS_R_EN 0x0020
266#define PBC_INTR_SD1_R_EN 0x0040
267#define PBC_INTR_FEC_INT 0X0080
268#define PBC_INTR_ENET_INT 0X0100
269#define PBC_INTR_OTGFS_INT 0X0200
270#define PBC_INTR_XUART_INT 0X0400
271#define PBC_INTR_CCTL12 0X0800
272#define PBC_INTR_SD2_EN 0x1000
273#define PBC_INTR_SD3_EN 0x2000
274#define PBC_INTR_MS_EN 0x4000
275#define PBC_INTR_SD1_EN 0x8000
276
277
278
279/* For interrupts like xuart, enet etc */
280#define EXPIO_PARENT_INT IOMUX_TO_IRQ(MX27_PIN_TIN)
281#define MXC_MAX_EXP_IO_LINES 16
282
283/*
284 * This corresponds to PBC_INTMASK_SET_REG at offset 0x38.
285 *
286 */
287#define EXPIO_INT_LOW_BAT (MXC_EXP_IO_BASE + 1)
288#define EXPIO_INT_OTG_FS_OVR (MXC_EXP_IO_BASE + 2)
289#define EXPIO_INT_FSH_OVR (MXC_EXP_IO_BASE + 3)
290#define EXPIO_INT_RES4 (MXC_EXP_IO_BASE + 4)
291#define EXPIO_INT_RES5 (MXC_EXP_IO_BASE + 5)
292#define EXPIO_INT_RES6 (MXC_EXP_IO_BASE + 6)
293#define EXPIO_INT_FEC (MXC_EXP_IO_BASE + 7)
294#define EXPIO_INT_ENET_INT (MXC_EXP_IO_BASE + 8)
295#define EXPIO_INT_OTG_FS_INT (MXC_EXP_IO_BASE + 9)
296#define EXPIO_INT_XUART_INTA (MXC_EXP_IO_BASE + 10)
297#define EXPIO_INT_CCTL12_INT (MXC_EXP_IO_BASE + 11)
298#define EXPIO_INT_SD2_EN (MXC_EXP_IO_BASE + 12)
299#define EXPIO_INT_SD3_EN (MXC_EXP_IO_BASE + 13)
300#define EXPIO_INT_MS_EN (MXC_EXP_IO_BASE + 14)
301#define EXPIO_INT_SD1_EN (MXC_EXP_IO_BASE + 15)
302
303/*
304 * This is System IRQ used by CS8900A for interrupt generation
305 * taken from platform.h
306 */
307#define CS8900AIRQ EXPIO_INT_ENET_INT
308/* This is I/O Base address used to access registers of CS8900A on MXC ADS */
309#define CS8900A_BASE_ADDRESS (PBC_CS8900A_IOBASE_REG + 0x300)
310
311#define MXC_PMIC_INT_LINE IOMUX_TO_IRQ(MX27_PIN_TOUT)
312
313/*
314* This is used to detect if the CPLD version is for mx27 evb board rev-a
315*/
316#define PBC_CPLD_VERSION_IS_REVA() \
317 ((__raw_readw(PBC_VERSION_REG) & \
318 (PBC_VERSION_ADS | PBC_VERSION_EVB_REVB))\
319 == 0)
320
321/* This is used to active or inactive ata signal in CPLD .
322 * It is dependent with hardware
323 */
324#define PBC_ATA_SIGNAL_ACTIVE() \
325 __raw_writew( \
326 PBC_BCTRL2_ATAFEC_EN|PBC_BCTRL2_ATAFEC_SEL|PBC_BCTRL2_ATA_EN, \
327 PBC_BCTRL2_CLEAR_REG)
328
329#define PBC_ATA_SIGNAL_INACTIVE() \
330 __raw_writew( \
331 PBC_BCTRL2_ATAFEC_EN|PBC_BCTRL2_ATAFEC_SEL|PBC_BCTRL2_ATA_EN, \
332 PBC_BCTRL2_SET_REG)
333
334#define MXC_BD_LED1 (1 << 5)
335#define MXC_BD_LED2 (1 << 6)
336#define MXC_BD_LED_ON(led) \
337 __raw_writew(led, PBC_BCTRL1_SET_REG)
338#define MXC_BD_LED_OFF(led) \
339 __raw_writew(led, PBC_BCTRL1_CLEAR_REG)
340
341/* to determine the correct external crystal reference */
342#define CKIH_27MHZ_BIT_SET (1 << 3)
343
344#endif /* __ASM_ARCH_MXC_BOARD_MX27ADS_H__ */
diff --git a/arch/arm/plat-mxc/include/mach/board-mx27lite.h b/arch/arm/plat-mxc/include/mach/board-mx27lite.h
deleted file mode 100644
index ea87551d2736..000000000000
--- a/arch/arm/plat-mxc/include/mach/board-mx27lite.h
+++ /dev/null
@@ -1,14 +0,0 @@
1/*
2 * Copyright 2009 Freescale Semiconductor, Inc. All Rights Reserved.
3 */
4
5/*
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11#ifndef __ASM_ARCH_MXC_BOARD_MX27LITE_H__
12#define __ASM_ARCH_MXC_BOARD_MX27LITE_H__
13
14#endif /* __ASM_ARCH_MXC_BOARD_MX27LITE_H__ */
diff --git a/arch/arm/plat-mxc/include/mach/board-mx27pdk.h b/arch/arm/plat-mxc/include/mach/board-mx27pdk.h
deleted file mode 100644
index fec1bcfa9164..000000000000
--- a/arch/arm/plat-mxc/include/mach/board-mx27pdk.h
+++ /dev/null
@@ -1,14 +0,0 @@
1/*
2 * Copyright 2009 Freescale Semiconductor, Inc. All Rights Reserved.
3 */
4
5/*
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11#ifndef __ASM_ARCH_MXC_BOARD_MX27PDK_H__
12#define __ASM_ARCH_MXC_BOARD_MX27PDK_H__
13
14#endif /* __ASM_ARCH_MXC_BOARD_MX27PDK_H__ */
diff --git a/arch/arm/plat-mxc/include/mach/board-mx31_3ds.h b/arch/arm/plat-mxc/include/mach/board-mx31_3ds.h
deleted file mode 100644
index da92933a233b..000000000000
--- a/arch/arm/plat-mxc/include/mach/board-mx31_3ds.h
+++ /dev/null
@@ -1,59 +0,0 @@
1/*
2 * Copyright 2008 Freescale Semiconductor, Inc. All Rights Reserved.
3 */
4
5/*
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11#ifndef __ASM_ARCH_MXC_BOARD_MX31_3DS_H__
12#define __ASM_ARCH_MXC_BOARD_MX31_3DS_H__
13
14/* Definitions for components on the Debug board */
15
16/* Base address of CPLD controller on the Debug board */
17#define DEBUG_BASE_ADDRESS CS5_IO_ADDRESS(CS5_BASE_ADDR)
18
19/* LAN9217 ethernet base address */
20#define LAN9217_BASE_ADDR CS5_BASE_ADDR
21
22/* CPLD config and interrupt base address */
23#define CPLD_ADDR (DEBUG_BASE_ADDRESS + 0x20000)
24
25/* LED switchs */
26#define CPLD_LED_REG (CPLD_ADDR + 0x00)
27/* buttons */
28#define CPLD_SWITCH_BUTTONS_REG (EXPIO_ADDR + 0x08)
29/* status, interrupt */
30#define CPLD_INT_STATUS_REG (CPLD_ADDR + 0x10)
31#define CPLD_INT_MASK_REG (CPLD_ADDR + 0x38)
32#define CPLD_INT_RESET_REG (CPLD_ADDR + 0x20)
33/* magic word for debug CPLD */
34#define CPLD_MAGIC_NUMBER1_REG (CPLD_ADDR + 0x40)
35#define CPLD_MAGIC_NUMBER2_REG (CPLD_ADDR + 0x48)
36/* CPLD code version */
37#define CPLD_CODE_VER_REG (CPLD_ADDR + 0x50)
38/* magic word for debug CPLD */
39#define CPLD_MAGIC_NUMBER3_REG (CPLD_ADDR + 0x58)
40/* module reset register */
41#define CPLD_MODULE_RESET_REG (CPLD_ADDR + 0x60)
42/* CPU ID and Personality ID */
43#define CPLD_MCU_BOARD_ID_REG (CPLD_ADDR + 0x68)
44
45/* CPLD IRQ line for external uart, external ethernet etc */
46#define EXPIO_PARENT_INT IOMUX_TO_IRQ(MX31_PIN_GPIO1_1)
47
48#define MXC_EXP_IO_BASE (MXC_BOARD_IRQ_START)
49#define MXC_IRQ_TO_EXPIO(irq) ((irq) - MXC_EXP_IO_BASE)
50
51#define EXPIO_INT_ENET (MXC_EXP_IO_BASE + 0)
52#define EXPIO_INT_XUART_A (MXC_EXP_IO_BASE + 1)
53#define EXPIO_INT_XUART_B (MXC_EXP_IO_BASE + 2)
54#define EXPIO_INT_BUTTON_A (MXC_EXP_IO_BASE + 3)
55#define EXPIO_INT_BUTTON_B (MXC_EXP_IO_BASE + 4)
56
57#define MXC_MAX_EXP_IO_LINES 16
58
59#endif /* __ASM_ARCH_MXC_BOARD_MX31_3DS_H__ */
diff --git a/arch/arm/plat-mxc/include/mach/board-mx31ads.h b/arch/arm/plat-mxc/include/mach/board-mx31ads.h
deleted file mode 100644
index 095a199591c6..000000000000
--- a/arch/arm/plat-mxc/include/mach/board-mx31ads.h
+++ /dev/null
@@ -1,117 +0,0 @@
1/*
2 * Copyright 2005-2007 Freescale Semiconductor, Inc. All Rights Reserved.
3 */
4
5/*
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11#ifndef __ASM_ARCH_MXC_BOARD_MX31ADS_H__
12#define __ASM_ARCH_MXC_BOARD_MX31ADS_H__
13
14#include <mach/hardware.h>
15
16/* Base address of PBC controller */
17#define PBC_BASE_ADDRESS MX31_CS4_BASE_ADDR_VIRT
18/* Offsets for the PBC Controller register */
19
20/* PBC Board status register offset */
21#define PBC_BSTAT 0x000002
22
23/* PBC Board control register 1 set address */
24#define PBC_BCTRL1_SET 0x000004
25
26/* PBC Board control register 1 clear address */
27#define PBC_BCTRL1_CLEAR 0x000006
28
29/* PBC Board control register 2 set address */
30#define PBC_BCTRL2_SET 0x000008
31
32/* PBC Board control register 2 clear address */
33#define PBC_BCTRL2_CLEAR 0x00000A
34
35/* PBC Board control register 3 set address */
36#define PBC_BCTRL3_SET 0x00000C
37
38/* PBC Board control register 3 clear address */
39#define PBC_BCTRL3_CLEAR 0x00000E
40
41/* PBC Board control register 4 set address */
42#define PBC_BCTRL4_SET 0x000010
43
44/* PBC Board control register 4 clear address */
45#define PBC_BCTRL4_CLEAR 0x000012
46
47/* PBC Board status register 1 */
48#define PBC_BSTAT1 0x000014
49
50/* PBC Board interrupt status register */
51#define PBC_INTSTATUS 0x000016
52
53/* PBC Board interrupt current status register */
54#define PBC_INTCURR_STATUS 0x000018
55
56/* PBC Interrupt mask register set address */
57#define PBC_INTMASK_SET 0x00001A
58
59/* PBC Interrupt mask register clear address */
60#define PBC_INTMASK_CLEAR 0x00001C
61
62/* External UART A */
63#define PBC_SC16C652_UARTA 0x010000
64
65/* External UART B */
66#define PBC_SC16C652_UARTB 0x010010
67
68/* Ethernet Controller IO base address */
69#define PBC_CS8900A_IOBASE 0x020000
70
71/* Ethernet Controller Memory base address */
72#define PBC_CS8900A_MEMBASE 0x021000
73
74/* Ethernet Controller DMA base address */
75#define PBC_CS8900A_DMABASE 0x022000
76
77/* External chip select 0 */
78#define PBC_XCS0 0x040000
79
80/* LCD Display enable */
81#define PBC_LCD_EN_B 0x060000
82
83/* Code test debug enable */
84#define PBC_CODE_B 0x070000
85
86/* PSRAM memory select */
87#define PBC_PSRAM_B 0x5000000
88
89#define PBC_INTSTATUS_REG (PBC_INTSTATUS + PBC_BASE_ADDRESS)
90#define PBC_INTCURR_STATUS_REG (PBC_INTCURR_STATUS + PBC_BASE_ADDRESS)
91#define PBC_INTMASK_SET_REG (PBC_INTMASK_SET + PBC_BASE_ADDRESS)
92#define PBC_INTMASK_CLEAR_REG (PBC_INTMASK_CLEAR + PBC_BASE_ADDRESS)
93#define EXPIO_PARENT_INT IOMUX_TO_IRQ(MX31_PIN_GPIO1_4)
94
95#define MXC_EXP_IO_BASE (MXC_BOARD_IRQ_START)
96#define MXC_IRQ_TO_EXPIO(irq) ((irq) - MXC_EXP_IO_BASE)
97
98#define EXPIO_INT_LOW_BAT (MXC_EXP_IO_BASE + 0)
99#define EXPIO_INT_PB_IRQ (MXC_EXP_IO_BASE + 1)
100#define EXPIO_INT_OTG_FS_OVR (MXC_EXP_IO_BASE + 2)
101#define EXPIO_INT_FSH_OVR (MXC_EXP_IO_BASE + 3)
102#define EXPIO_INT_RES4 (MXC_EXP_IO_BASE + 4)
103#define EXPIO_INT_RES5 (MXC_EXP_IO_BASE + 5)
104#define EXPIO_INT_RES6 (MXC_EXP_IO_BASE + 6)
105#define EXPIO_INT_RES7 (MXC_EXP_IO_BASE + 7)
106#define EXPIO_INT_ENET_INT (MXC_EXP_IO_BASE + 8)
107#define EXPIO_INT_OTG_FS_INT (MXC_EXP_IO_BASE + 9)
108#define EXPIO_INT_XUART_INTA (MXC_EXP_IO_BASE + 10)
109#define EXPIO_INT_XUART_INTB (MXC_EXP_IO_BASE + 11)
110#define EXPIO_INT_SYNTH_IRQ (MXC_EXP_IO_BASE + 12)
111#define EXPIO_INT_CE_INT1 (MXC_EXP_IO_BASE + 13)
112#define EXPIO_INT_CE_INT2 (MXC_EXP_IO_BASE + 14)
113#define EXPIO_INT_RES15 (MXC_EXP_IO_BASE + 15)
114
115#define MXC_MAX_EXP_IO_LINES 16
116
117#endif /* __ASM_ARCH_MXC_BOARD_MX31ADS_H__ */
diff --git a/arch/arm/plat-mxc/include/mach/board-mx31lilly.h b/arch/arm/plat-mxc/include/mach/board-mx31lilly.h
index eb5a5024622e..0df71bfefbb1 100644
--- a/arch/arm/plat-mxc/include/mach/board-mx31lilly.h
+++ b/arch/arm/plat-mxc/include/mach/board-mx31lilly.h
@@ -31,7 +31,7 @@ enum mx31lilly_boards {
31 31
32/* 32/*
33 * This CPU module needs a baseboard to work. After basic initializing 33 * This CPU module needs a baseboard to work. After basic initializing
34 * its own devices, it calls baseboard's init function. 34 * its own devices, it calls the baseboard's init function.
35 */ 35 */
36 36
37extern void mx31lilly_db_init(void); 37extern void mx31lilly_db_init(void);
diff --git a/arch/arm/plat-mxc/include/mach/board-mx31lite.h b/arch/arm/plat-mxc/include/mach/board-mx31lite.h
index 2b2da0367578..c1ad0ae807cc 100644
--- a/arch/arm/plat-mxc/include/mach/board-mx31lite.h
+++ b/arch/arm/plat-mxc/include/mach/board-mx31lite.h
@@ -32,7 +32,7 @@ enum mx31lite_boards {
32 32
33/* 33/*
34 * This CPU module needs a baseboard to work. After basic initializing 34 * This CPU module needs a baseboard to work. After basic initializing
35 * its own devices, it calls baseboard's init function. 35 * its own devices, it calls the baseboard's init function.
36 */ 36 */
37 37
38extern void mx31lite_db_init(void); 38extern void mx31lite_db_init(void);
diff --git a/arch/arm/plat-mxc/include/mach/board-mx31moboard.h b/arch/arm/plat-mxc/include/mach/board-mx31moboard.h
index 36ff3cedee1a..de14543891cf 100644
--- a/arch/arm/plat-mxc/include/mach/board-mx31moboard.h
+++ b/arch/arm/plat-mxc/include/mach/board-mx31moboard.h
@@ -31,7 +31,7 @@ enum mx31moboard_boards {
31 31
32/* 32/*
33 * This CPU module needs a baseboard to work. After basic initializing 33 * This CPU module needs a baseboard to work. After basic initializing
34 * its own devices, it calls baseboard's init function. 34 * its own devices, it calls the baseboard's init function.
35 */ 35 */
36 36
37extern void mx31moboard_devboard_init(void); 37extern void mx31moboard_devboard_init(void);
diff --git a/arch/arm/plat-mxc/include/mach/board-mx35pdk.h b/arch/arm/plat-mxc/include/mach/board-mx35pdk.h
deleted file mode 100644
index 383f1c04df06..000000000000
--- a/arch/arm/plat-mxc/include/mach/board-mx35pdk.h
+++ /dev/null
@@ -1,22 +0,0 @@
1/*
2 * Copyright 2009 Freescale Semiconductor, Inc. All Rights Reserved
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 */
18
19#ifndef __ASM_ARCH_MXC_BOARD_MX35PDK_H__
20#define __ASM_ARCH_MXC_BOARD_MX35PDK_H__
21
22#endif /* __ASM_ARCH_MXC_BOARD_MX35PDK_H__ */
diff --git a/arch/arm/plat-mxc/include/mach/board-pcm037.h b/arch/arm/plat-mxc/include/mach/board-pcm037.h
deleted file mode 100644
index 13411709b13a..000000000000
--- a/arch/arm/plat-mxc/include/mach/board-pcm037.h
+++ /dev/null
@@ -1,22 +0,0 @@
1/*
2 * Copyright (C) 2008 Sascha Hauer, Pengutronix
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 */
18
19#ifndef __ASM_ARCH_MXC_BOARD_PCM037_H__
20#define __ASM_ARCH_MXC_BOARD_PCM037_H__
21
22#endif /* __ASM_ARCH_MXC_BOARD_PCM037_H__ */
diff --git a/arch/arm/plat-mxc/include/mach/board-pcm038.h b/arch/arm/plat-mxc/include/mach/board-pcm038.h
index 410f9786ed22..6f371e35753d 100644
--- a/arch/arm/plat-mxc/include/mach/board-pcm038.h
+++ b/arch/arm/plat-mxc/include/mach/board-pcm038.h
@@ -22,7 +22,7 @@
22#ifndef __ASSEMBLY__ 22#ifndef __ASSEMBLY__
23/* 23/*
24 * This CPU module needs a baseboard to work. After basic initializing 24 * This CPU module needs a baseboard to work. After basic initializing
25 * its own devices, it calls baseboard's init function. 25 * its own devices, it calls the baseboard's init function.
26 * TODO: Add your own baseboard init function and call it from 26 * TODO: Add your own baseboard init function and call it from
27 * inside pcm038_init(). 27 * inside pcm038_init().
28 * 28 *
diff --git a/arch/arm/plat-mxc/include/mach/board-pcm043.h b/arch/arm/plat-mxc/include/mach/board-pcm043.h
deleted file mode 100644
index 1ac4e1682e5c..000000000000
--- a/arch/arm/plat-mxc/include/mach/board-pcm043.h
+++ /dev/null
@@ -1,22 +0,0 @@
1/*
2 * Copyright (C) 2008 Sascha Hauer, Pengutronix
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 */
18
19#ifndef __ASM_ARCH_MXC_BOARD_PCM043_H__
20#define __ASM_ARCH_MXC_BOARD_PCM043_H__
21
22#endif /* __ASM_ARCH_MXC_BOARD_PCM043_H__ */
diff --git a/arch/arm/plat-mxc/include/mach/board-qong.h b/arch/arm/plat-mxc/include/mach/board-qong.h
deleted file mode 100644
index 6d88c7af4b23..000000000000
--- a/arch/arm/plat-mxc/include/mach/board-qong.h
+++ /dev/null
@@ -1,17 +0,0 @@
1/*
2 * Copyright 2009 Ilya Yanok, Emcraft Systems Ltd, <yanok@emcraft.com>
3 */
4
5/*
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11#ifndef __ASM_ARCH_MXC_BOARD_QONG_H__
12#define __ASM_ARCH_MXC_BOARD_QONG_H__
13
14/* NOR FLASH */
15#define QONG_NOR_SIZE (128*1024*1024)
16
17#endif /* __ASM_ARCH_MXC_BOARD_QONG_H__ */
diff --git a/arch/arm/plat-mxc/include/mach/devices-common.h b/arch/arm/plat-mxc/include/mach/devices-common.h
new file mode 100644
index 000000000000..05c8d3f0a08f
--- /dev/null
+++ b/arch/arm/plat-mxc/include/mach/devices-common.h
@@ -0,0 +1,42 @@
1/*
2 * Copyright (C) 2009-2010 Pengutronix
3 * Uwe Kleine-Koenig <u.kleine-koenig@pengutronix.de>
4 *
5 * This program is free software; you can redistribute it and/or modify it under
6 * the terms of the GNU General Public License version 2 as published by the
7 * Free Software Foundation.
8 */
9#include <linux/kernel.h>
10#include <linux/platform_device.h>
11#include <linux/init.h>
12
13struct platform_device *imx_add_platform_device(const char *name, int id,
14 const struct resource *res, unsigned int num_resources,
15 const void *data, size_t size_data);
16
17#include <mach/i2c.h>
18struct platform_device *__init imx_add_imx_i2c(int id,
19 resource_size_t iobase, resource_size_t iosize, int irq,
20 const struct imxi2c_platform_data *pdata);
21
22#include <mach/imx-uart.h>
23struct platform_device *__init imx_add_imx_uart_3irq(int id,
24 resource_size_t iobase, resource_size_t iosize,
25 resource_size_t irqrx, resource_size_t irqtx,
26 resource_size_t irqrts,
27 const struct imxuart_platform_data *pdata);
28struct platform_device *__init imx_add_imx_uart_1irq(int id,
29 resource_size_t iobase, resource_size_t iosize,
30 resource_size_t irq,
31 const struct imxuart_platform_data *pdata);
32
33#include <mach/mxc_nand.h>
34struct platform_device *__init imx_add_mxc_nand_v1(resource_size_t iobase,
35 int irq, const struct mxc_nand_platform_data *pdata);
36struct platform_device *__init imx_add_mxc_nand_v21(resource_size_t iobase,
37 int irq, const struct mxc_nand_platform_data *pdata);
38
39#include <mach/spi.h>
40struct platform_device *__init imx_add_spi_imx(int id,
41 resource_size_t iobase, resource_size_t iosize, int irq,
42 const struct spi_imx_master *pdata);
diff --git a/arch/arm/plat-mxc/include/mach/iomux-mxc91231.h b/arch/arm/plat-mxc/include/mach/iomux-mxc91231.h
index 3887f3fe29d4..15d59510f597 100644
--- a/arch/arm/plat-mxc/include/mach/iomux-mxc91231.h
+++ b/arch/arm/plat-mxc/include/mach/iomux-mxc91231.h
@@ -12,10 +12,6 @@
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details. 14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */ 15 */
20 16
21#ifndef __MACH_IOMUX_MXC91231_H__ 17#ifndef __MACH_IOMUX_MXC91231_H__
diff --git a/arch/arm/plat-mxc/include/mach/mx1.h b/arch/arm/plat-mxc/include/mach/mx1.h
index 5eba7e6785de..641b24618239 100644
--- a/arch/arm/plat-mxc/include/mach/mx1.h
+++ b/arch/arm/plat-mxc/include/mach/mx1.h
@@ -91,24 +91,24 @@
91#define MX1_SIM_DATA_INT 16 91#define MX1_SIM_DATA_INT 16
92#define MX1_RTC_INT 17 92#define MX1_RTC_INT 17
93#define MX1_RTC_SAMINT 18 93#define MX1_RTC_SAMINT 18
94#define MX1_UART2_MINT_PFERR 19 94#define MX1_INT_UART2PFERR 19
95#define MX1_UART2_MINT_RTS 20 95#define MX1_INT_UART2RTS 20
96#define MX1_UART2_MINT_DTR 21 96#define MX1_INT_UART2DTR 21
97#define MX1_UART2_MINT_UARTC 22 97#define MX1_INT_UART2UARTC 22
98#define MX1_UART2_MINT_TX 23 98#define MX1_INT_UART2TX 23
99#define MX1_UART2_MINT_RX 24 99#define MX1_INT_UART2RX 24
100#define MX1_UART1_MINT_PFERR 25 100#define MX1_INT_UART1PFERR 25
101#define MX1_UART1_MINT_RTS 26 101#define MX1_INT_UART1RTS 26
102#define MX1_UART1_MINT_DTR 27 102#define MX1_INT_UART1DTR 27
103#define MX1_UART1_MINT_UARTC 28 103#define MX1_INT_UART1UARTC 28
104#define MX1_UART1_MINT_TX 29 104#define MX1_INT_UART1TX 29
105#define MX1_UART1_MINT_RX 30 105#define MX1_INT_UART1RX 30
106#define MX1_VOICE_DAC_INT 31 106#define MX1_VOICE_DAC_INT 31
107#define MX1_VOICE_ADC_INT 32 107#define MX1_VOICE_ADC_INT 32
108#define MX1_PEN_DATA_INT 33 108#define MX1_PEN_DATA_INT 33
109#define MX1_PWM_INT 34 109#define MX1_PWM_INT 34
110#define MX1_SDHC_INT 35 110#define MX1_SDHC_INT 35
111#define MX1_I2C_INT 39 111#define MX1_INT_I2C 39
112#define MX1_CSPI_INT 41 112#define MX1_CSPI_INT 41
113#define MX1_SSI_TX_INT 42 113#define MX1_SSI_TX_INT 42
114#define MX1_SSI_TX_ERR_INT 43 114#define MX1_SSI_TX_ERR_INT 43
@@ -245,7 +245,7 @@
245#define PEN_DATA_INT MX1_PEN_DATA_INT 245#define PEN_DATA_INT MX1_PEN_DATA_INT
246#define PWM_INT MX1_PWM_INT 246#define PWM_INT MX1_PWM_INT
247#define SDHC_INT MX1_SDHC_INT 247#define SDHC_INT MX1_SDHC_INT
248#define I2C_INT MX1_I2C_INT 248#define I2C_INT MX1_INT_I2C
249#define CSPI_INT MX1_CSPI_INT 249#define CSPI_INT MX1_CSPI_INT
250#define SSI_TX_INT MX1_SSI_TX_INT 250#define SSI_TX_INT MX1_SSI_TX_INT
251#define SSI_TX_ERR_INT MX1_SSI_TX_ERR_INT 251#define SSI_TX_ERR_INT MX1_SSI_TX_ERR_INT
diff --git a/arch/arm/plat-mxc/include/mach/mx25.h b/arch/arm/plat-mxc/include/mach/mx25.h
index 7516f2949afe..2f2aad1032c1 100644
--- a/arch/arm/plat-mxc/include/mach/mx25.h
+++ b/arch/arm/plat-mxc/include/mach/mx25.h
@@ -11,6 +11,10 @@
11#define MX25_AVIC_BASE_ADDR_VIRT 0xfc400000 11#define MX25_AVIC_BASE_ADDR_VIRT 0xfc400000
12#define MX25_AVIC_SIZE SZ_1M 12#define MX25_AVIC_SIZE SZ_1M
13 13
14#define MX25_I2C1_BASE_ADDR (MX25_AIPS1_BASE_ADDR + 0x80000)
15#define MX25_I2C3_BASE_ADDR (MX25_AIPS1_BASE_ADDR + 0x84000)
16#define MX25_I2C2_BASE_ADDR (MX25_AIPS1_BASE_ADDR + 0x98000)
17#define MX25_CSPI1_BASE_ADDR (MX25_AIPS1_BASE_ADDR + 0xa4000)
14#define MX25_IOMUXC_BASE_ADDR (MX25_AIPS1_BASE_ADDR + 0xac000) 18#define MX25_IOMUXC_BASE_ADDR (MX25_AIPS1_BASE_ADDR + 0xac000)
15 19
16#define MX25_CRM_BASE_ADDR (MX25_AIPS2_BASE_ADDR + 0x80000) 20#define MX25_CRM_BASE_ADDR (MX25_AIPS2_BASE_ADDR + 0x80000)
@@ -30,7 +34,12 @@
30#define MX25_UART1_BASE_ADDR 0x43f90000 34#define MX25_UART1_BASE_ADDR 0x43f90000
31#define MX25_UART2_BASE_ADDR 0x43f94000 35#define MX25_UART2_BASE_ADDR 0x43f94000
32#define MX25_AUDMUX_BASE_ADDR 0x43fb0000 36#define MX25_AUDMUX_BASE_ADDR 0x43fb0000
37#define MX25_UART3_BASE_ADDR 0x5000c000
38#define MX25_UART4_BASE_ADDR 0x50008000
39#define MX25_UART5_BASE_ADDR 0x5002c000
33 40
41#define MX25_CSPI3_BASE_ADDR 0x50004000
42#define MX25_CSPI2_BASE_ADDR 0x50010000
34#define MX25_FEC_BASE_ADDR 0x50038000 43#define MX25_FEC_BASE_ADDR 0x50038000
35#define MX25_SSI2_BASE_ADDR 0x50014000 44#define MX25_SSI2_BASE_ADDR 0x50014000
36#define MX25_SSI1_BASE_ADDR 0x50034000 45#define MX25_SSI1_BASE_ADDR 0x50034000
@@ -41,14 +50,25 @@
41#define MX25_OTG_BASE_ADDR 0x53ff4000 50#define MX25_OTG_BASE_ADDR 0x53ff4000
42#define MX25_CSI_BASE_ADDR 0x53ff8000 51#define MX25_CSI_BASE_ADDR 0x53ff8000
43 52
44#define MX25_INT_SSI2 11 53#define MX25_INT_CSPI3 0
45#define MX25_INT_SSI1 12 54#define MX25_INT_I2C1 3
46#define MX25_INT_CSI 17 55#define MX25_INT_I2C2 4
47#define MX25_INT_DRYICE 25 56#define MX25_INT_UART4 5
48#define MX25_INT_NANDFC 33 57#define MX25_INT_I2C3 10
49#define MX25_INT_LCDC 39 58#define MX25_INT_SSI2 11
50#define MX25_INT_KPP 24 59#define MX25_INT_SSI1 12
51#define MX25_INT_FEC 57 60#define MX25_INT_CSPI2 13
61#define MX25_INT_CSPI1 14
62#define MX25_INT_CSI 17
63#define MX25_INT_UART3 18
64#define MX25_INT_KPP 24
65#define MX25_INT_DRYICE 25
66#define MX25_INT_UART2 32
67#define MX25_INT_NANDFC 33
68#define MX25_INT_LCDC 39
69#define MX25_INT_UART5 40
70#define MX25_INT_UART1 45
71#define MX25_INT_FEC 57
52 72
53#if defined(IMX_NEEDS_DEPRECATED_SYMBOLS) 73#if defined(IMX_NEEDS_DEPRECATED_SYMBOLS)
54#define UART1_BASE_ADDR MX25_UART1_BASE_ADDR 74#define UART1_BASE_ADDR MX25_UART1_BASE_ADDR
diff --git a/arch/arm/plat-mxc/include/mach/mx27.h b/arch/arm/plat-mxc/include/mach/mx27.h
index bae9cd75beee..a8ab2e02a8ca 100644
--- a/arch/arm/plat-mxc/include/mach/mx27.h
+++ b/arch/arm/plat-mxc/include/mach/mx27.h
@@ -48,7 +48,7 @@
48#define MX27_CSPI2_BASE_ADDR (MX27_AIPI_BASE_ADDR + 0x0f000) 48#define MX27_CSPI2_BASE_ADDR (MX27_AIPI_BASE_ADDR + 0x0f000)
49#define MX27_SSI1_BASE_ADDR (MX27_AIPI_BASE_ADDR + 0x10000) 49#define MX27_SSI1_BASE_ADDR (MX27_AIPI_BASE_ADDR + 0x10000)
50#define MX27_SSI2_BASE_ADDR (MX27_AIPI_BASE_ADDR + 0x11000) 50#define MX27_SSI2_BASE_ADDR (MX27_AIPI_BASE_ADDR + 0x11000)
51#define MX27_I2C_BASE_ADDR (MX27_AIPI_BASE_ADDR + 0x12000) 51#define MX27_I2C1_BASE_ADDR (MX27_AIPI_BASE_ADDR + 0x12000)
52#define MX27_SDHC1_BASE_ADDR (MX27_AIPI_BASE_ADDR + 0x13000) 52#define MX27_SDHC1_BASE_ADDR (MX27_AIPI_BASE_ADDR + 0x13000)
53#define MX27_SDHC2_BASE_ADDR (MX27_AIPI_BASE_ADDR + 0x14000) 53#define MX27_SDHC2_BASE_ADDR (MX27_AIPI_BASE_ADDR + 0x14000)
54#define MX27_GPIO_BASE_ADDR (MX27_AIPI_BASE_ADDR + 0x15000) 54#define MX27_GPIO_BASE_ADDR (MX27_AIPI_BASE_ADDR + 0x15000)
@@ -150,7 +150,7 @@ static inline void mx27_setup_weimcs(size_t cs,
150#define MX27_INT_SDHC3 9 150#define MX27_INT_SDHC3 9
151#define MX27_INT_SDHC2 10 151#define MX27_INT_SDHC2 10
152#define MX27_INT_SDHC1 11 152#define MX27_INT_SDHC1 11
153#define MX27_INT_I2C 12 153#define MX27_INT_I2C1 12
154#define MX27_INT_SSI2 13 154#define MX27_INT_SSI2 13
155#define MX27_INT_SSI1 14 155#define MX27_INT_SSI1 14
156#define MX27_INT_CSPI2 15 156#define MX27_INT_CSPI2 15
diff --git a/arch/arm/plat-mxc/include/mach/mx31.h b/arch/arm/plat-mxc/include/mach/mx31.h
index fb90e119c2b5..afee3ab9d62e 100644
--- a/arch/arm/plat-mxc/include/mach/mx31.h
+++ b/arch/arm/plat-mxc/include/mach/mx31.h
@@ -23,7 +23,7 @@
23#define MX31_ETB_SLOT4_BASE_ADDR (MX31_AIPS1_BASE_ADDR + 0x10000) 23#define MX31_ETB_SLOT4_BASE_ADDR (MX31_AIPS1_BASE_ADDR + 0x10000)
24#define MX31_ETB_SLOT5_BASE_ADDR (MX31_AIPS1_BASE_ADDR + 0x14000) 24#define MX31_ETB_SLOT5_BASE_ADDR (MX31_AIPS1_BASE_ADDR + 0x14000)
25#define MX31_ECT_CTIO_BASE_ADDR (MX31_AIPS1_BASE_ADDR + 0x18000) 25#define MX31_ECT_CTIO_BASE_ADDR (MX31_AIPS1_BASE_ADDR + 0x18000)
26#define MX31_I2C_BASE_ADDR (MX31_AIPS1_BASE_ADDR + 0x80000) 26#define MX31_I2C1_BASE_ADDR (MX31_AIPS1_BASE_ADDR + 0x80000)
27#define MX31_I2C3_BASE_ADDR (MX31_AIPS1_BASE_ADDR + 0x84000) 27#define MX31_I2C3_BASE_ADDR (MX31_AIPS1_BASE_ADDR + 0x84000)
28#define MX31_OTG_BASE_ADDR (MX31_AIPS1_BASE_ADDR + 0x88000) 28#define MX31_OTG_BASE_ADDR (MX31_AIPS1_BASE_ADDR + 0x88000)
29#define MX31_ATA_BASE_ADDR (MX31_AIPS1_BASE_ADDR + 0x8c000) 29#define MX31_ATA_BASE_ADDR (MX31_AIPS1_BASE_ADDR + 0x8c000)
@@ -145,7 +145,7 @@ static inline void mx31_setup_weimcs(size_t cs,
145#define MX31_INT_FIRI 7 145#define MX31_INT_FIRI 7
146#define MX31_INT_MMC_SDHC2 8 146#define MX31_INT_MMC_SDHC2 8
147#define MX31_INT_MMC_SDHC1 9 147#define MX31_INT_MMC_SDHC1 9
148#define MX31_INT_I2C 10 148#define MX31_INT_I2C1 10
149#define MX31_INT_SSI2 11 149#define MX31_INT_SSI2 11
150#define MX31_INT_SSI1 12 150#define MX31_INT_SSI1 12
151#define MX31_INT_CSPI2 13 151#define MX31_INT_CSPI2 13
diff --git a/arch/arm/plat-mxc/include/mach/mx35.h b/arch/arm/plat-mxc/include/mach/mx35.h
index 526a55842ae5..cda60c715127 100644
--- a/arch/arm/plat-mxc/include/mach/mx35.h
+++ b/arch/arm/plat-mxc/include/mach/mx35.h
@@ -18,7 +18,7 @@
18#define MX35_ETB_SLOT4_BASE_ADDR (MX35_AIPS1_BASE_ADDR + 0x10000) 18#define MX35_ETB_SLOT4_BASE_ADDR (MX35_AIPS1_BASE_ADDR + 0x10000)
19#define MX35_ETB_SLOT5_BASE_ADDR (MX35_AIPS1_BASE_ADDR + 0x14000) 19#define MX35_ETB_SLOT5_BASE_ADDR (MX35_AIPS1_BASE_ADDR + 0x14000)
20#define MX35_ECT_CTIO_BASE_ADDR (MX35_AIPS1_BASE_ADDR + 0x18000) 20#define MX35_ECT_CTIO_BASE_ADDR (MX35_AIPS1_BASE_ADDR + 0x18000)
21#define MX35_I2C_BASE_ADDR (MX35_AIPS1_BASE_ADDR + 0x80000) 21#define MX35_I2C1_BASE_ADDR (MX35_AIPS1_BASE_ADDR + 0x80000)
22#define MX35_I2C3_BASE_ADDR (MX35_AIPS1_BASE_ADDR + 0x84000) 22#define MX35_I2C3_BASE_ADDR (MX35_AIPS1_BASE_ADDR + 0x84000)
23#define MX35_UART1_BASE_ADDR (MX35_AIPS1_BASE_ADDR + 0x90000) 23#define MX35_UART1_BASE_ADDR (MX35_AIPS1_BASE_ADDR + 0x90000)
24#define MX35_UART2_BASE_ADDR (MX35_AIPS1_BASE_ADDR + 0x94000) 24#define MX35_UART2_BASE_ADDR (MX35_AIPS1_BASE_ADDR + 0x94000)
@@ -123,7 +123,7 @@
123#define MX35_INT_MMC_SDHC1 7 123#define MX35_INT_MMC_SDHC1 7
124#define MX35_INT_MMC_SDHC2 8 124#define MX35_INT_MMC_SDHC2 8
125#define MX35_INT_MMC_SDHC3 9 125#define MX35_INT_MMC_SDHC3 9
126#define MX35_INT_I2C 10 126#define MX35_INT_I2C1 10
127#define MX35_INT_SSI1 11 127#define MX35_INT_SSI1 11
128#define MX35_INT_SSI2 12 128#define MX35_INT_SSI2 12
129#define MX35_INT_CSPI2 13 129#define MX35_INT_CSPI2 13
diff --git a/arch/arm/plat-mxc/include/mach/mx3_camera.h b/arch/arm/plat-mxc/include/mach/mx3_camera.h
index 36d7ff27b5e2..f226ee3777e1 100644
--- a/arch/arm/plat-mxc/include/mach/mx3_camera.h
+++ b/arch/arm/plat-mxc/include/mach/mx3_camera.h
@@ -12,10 +12,6 @@
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details. 14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19 */ 15 */
20 16
21#ifndef _MX3_CAMERA_H_ 17#ifndef _MX3_CAMERA_H_
diff --git a/arch/arm/plat-mxc/include/mach/mxc91231.h b/arch/arm/plat-mxc/include/mach/mxc91231.h
index 5182b986b785..0ca3101ebf36 100644
--- a/arch/arm/plat-mxc/include/mach/mxc91231.h
+++ b/arch/arm/plat-mxc/include/mach/mxc91231.h
@@ -13,10 +13,6 @@
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details. 15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 */ 16 */
21#ifndef __MACH_MXC91231_H__ 17#ifndef __MACH_MXC91231_H__
22#define __MACH_MXC91231_H__ 18#define __MACH_MXC91231_H__
diff --git a/arch/arm/plat-mxc/include/mach/mxc_nand.h b/arch/arm/plat-mxc/include/mach/mxc_nand.h
index 2d74748c5db7..04c0d060d814 100644
--- a/arch/arm/plat-mxc/include/mach/mxc_nand.h
+++ b/arch/arm/plat-mxc/include/mach/mxc_nand.h
@@ -23,9 +23,9 @@
23#include <linux/mtd/partitions.h> 23#include <linux/mtd/partitions.h>
24 24
25struct mxc_nand_platform_data { 25struct mxc_nand_platform_data {
26 int width; /* data bus width in bytes */ 26 unsigned int width; /* data bus width in bytes */
27 int hw_ecc:1; /* 0 if supress hardware ECC */ 27 unsigned int hw_ecc:1; /* 0 if supress hardware ECC */
28 int flash_bbt:1; /* set to 1 to use a flash based bbt */ 28 unsigned int flash_bbt:1; /* set to 1 to use a flash based bbt */
29 struct mtd_partition *parts; /* partition table */ 29 struct mtd_partition *parts; /* partition table */
30 int nr_parts; /* size of parts */ 30 int nr_parts; /* size of parts */
31}; 31};
diff --git a/arch/arm/plat-mxc/include/mach/system.h b/arch/arm/plat-mxc/include/mach/system.h
index ef00199568de..4acd1143a9bd 100644
--- a/arch/arm/plat-mxc/include/mach/system.h
+++ b/arch/arm/plat-mxc/include/mach/system.h
@@ -12,10 +12,6 @@
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details. 14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */ 15 */
20 16
21#ifndef __ASM_ARCH_MXC_SYSTEM_H__ 17#ifndef __ASM_ARCH_MXC_SYSTEM_H__
diff --git a/arch/arm/plat-mxc/include/mach/timex.h b/arch/arm/plat-mxc/include/mach/timex.h
index 024416ed11cd..2d9624697cc9 100644
--- a/arch/arm/plat-mxc/include/mach/timex.h
+++ b/arch/arm/plat-mxc/include/mach/timex.h
@@ -11,10 +11,6 @@
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details. 13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */ 14 */
19 15
20#ifndef __ASM_ARCH_MXC_TIMEX_H__ 16#ifndef __ASM_ARCH_MXC_TIMEX_H__
diff --git a/arch/arm/plat-mxc/include/mach/uncompress.h b/arch/arm/plat-mxc/include/mach/uncompress.h
index b6d3d0fddc48..d9bd37e4667a 100644
--- a/arch/arm/plat-mxc/include/mach/uncompress.h
+++ b/arch/arm/plat-mxc/include/mach/uncompress.h
@@ -13,10 +13,6 @@
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details. 15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 */ 16 */
21#ifndef __ASM_ARCH_MXC_UNCOMPRESS_H__ 17#ifndef __ASM_ARCH_MXC_UNCOMPRESS_H__
22#define __ASM_ARCH_MXC_UNCOMPRESS_H__ 18#define __ASM_ARCH_MXC_UNCOMPRESS_H__
diff --git a/arch/arm/plat-mxc/include/mach/vmalloc.h b/arch/arm/plat-mxc/include/mach/vmalloc.h
index 44243a278434..ef6379c474be 100644
--- a/arch/arm/plat-mxc/include/mach/vmalloc.h
+++ b/arch/arm/plat-mxc/include/mach/vmalloc.h
@@ -11,10 +11,6 @@
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details. 13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */ 14 */
19 15
20#ifndef __ASM_ARCH_MXC_VMALLOC_H__ 16#ifndef __ASM_ARCH_MXC_VMALLOC_H__
diff --git a/arch/arm/plat-mxc/irq.c b/arch/arm/plat-mxc/irq.c
index 778ddfe57d89..7331f2ace5fe 100644
--- a/arch/arm/plat-mxc/irq.c
+++ b/arch/arm/plat-mxc/irq.c
@@ -142,9 +142,6 @@ void __init mxc_init_irq(void __iomem *irqbase)
142 for (i = 0; i < 8; i++) 142 for (i = 0; i < 8; i++)
143 __raw_writel(0, avic_base + AVIC_NIPRIORITY(i)); 143 __raw_writel(0, avic_base + AVIC_NIPRIORITY(i));
144 144
145 /* init architectures chained interrupt handler */
146 mxc_register_gpios();
147
148#ifdef CONFIG_FIQ 145#ifdef CONFIG_FIQ
149 /* Initialize FIQ */ 146 /* Initialize FIQ */
150 init_FIQ(); 147 init_FIQ();
diff --git a/arch/arm/plat-mxc/system.c b/arch/arm/plat-mxc/system.c
index 97f42799fa58..925bce4607e7 100644
--- a/arch/arm/plat-mxc/system.c
+++ b/arch/arm/plat-mxc/system.c
@@ -14,10 +14,6 @@
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details. 16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 */ 17 */
22 18
23#include <linux/kernel.h> 19#include <linux/kernel.h>
diff --git a/arch/arm/plat-mxc/tzic.c b/arch/arm/plat-mxc/tzic.c
index 9b86d2a60d43..b3da9aad4295 100644
--- a/arch/arm/plat-mxc/tzic.c
+++ b/arch/arm/plat-mxc/tzic.c
@@ -145,8 +145,6 @@ void __init tzic_init_irq(void __iomem *irqbase)
145 set_irq_handler(i, handle_level_irq); 145 set_irq_handler(i, handle_level_irq);
146 set_irq_flags(i, IRQF_VALID); 146 set_irq_flags(i, IRQF_VALID);
147 } 147 }
148 mxc_register_gpios();
149
150 pr_info("TrustZone Interrupt Controller (TZIC) initialized\n"); 148 pr_info("TrustZone Interrupt Controller (TZIC) initialized\n");
151} 149}
152 150
diff --git a/arch/arm/plat-omap/gpio.c b/arch/arm/plat-omap/gpio.c
index dc2ac42d6319..393e9219a5b6 100644
--- a/arch/arm/plat-omap/gpio.c
+++ b/arch/arm/plat-omap/gpio.c
@@ -624,79 +624,58 @@ do { \
624 __raw_writel(l, base + reg); \ 624 __raw_writel(l, base + reg); \
625} while(0) 625} while(0)
626 626
627void omap_set_gpio_debounce(int gpio, int enable) 627/**
628 * _set_gpio_debounce - low level gpio debounce time
629 * @bank: the gpio bank we're acting upon
630 * @gpio: the gpio number on this @gpio
631 * @debounce: debounce time to use
632 *
633 * OMAP's debounce time is in 31us steps so we need
634 * to convert and round up to the closest unit.
635 */
636static void _set_gpio_debounce(struct gpio_bank *bank, unsigned gpio,
637 unsigned debounce)
628{ 638{
629 struct gpio_bank *bank; 639 void __iomem *reg = bank->base;
630 void __iomem *reg; 640 u32 val;
631 unsigned long flags; 641 u32 l;
632 u32 val, l = 1 << get_gpio_index(gpio); 642
643 if (debounce < 32)
644 debounce = 0x01;
645 else if (debounce > 7936)
646 debounce = 0xff;
647 else
648 debounce = (debounce / 0x1f) - 1;
633 649
634 if (cpu_class_is_omap1()) 650 l = 1 << get_gpio_index(gpio);
635 return;
636 651
637 bank = get_gpio_bank(gpio); 652 if (cpu_is_omap44xx())
638 reg = bank->base; 653 reg += OMAP4_GPIO_DEBOUNCINGTIME;
654 else
655 reg += OMAP24XX_GPIO_DEBOUNCE_VAL;
656
657 __raw_writel(debounce, reg);
639 658
659 reg = bank->base;
640 if (cpu_is_omap44xx()) 660 if (cpu_is_omap44xx())
641 reg += OMAP4_GPIO_DEBOUNCENABLE; 661 reg += OMAP4_GPIO_DEBOUNCENABLE;
642 else 662 else
643 reg += OMAP24XX_GPIO_DEBOUNCE_EN; 663 reg += OMAP24XX_GPIO_DEBOUNCE_EN;
644 664
645 if (!(bank->mod_usage & l)) {
646 printk(KERN_ERR "GPIO %d not requested\n", gpio);
647 return;
648 }
649
650 spin_lock_irqsave(&bank->lock, flags);
651 val = __raw_readl(reg); 665 val = __raw_readl(reg);
652 666
653 if (enable && !(val & l)) 667 if (debounce) {
654 val |= l; 668 val |= l;
655 else if (!enable && (val & l)) 669 if (cpu_is_omap34xx() || cpu_is_omap44xx())
656 val &= ~l;
657 else
658 goto done;
659
660 if (cpu_is_omap34xx() || cpu_is_omap44xx()) {
661 bank->dbck_enable_mask = val;
662 if (enable)
663 clk_enable(bank->dbck); 670 clk_enable(bank->dbck);
664 else 671 } else {
672 val &= ~l;
673 if (cpu_is_omap34xx() || cpu_is_omap44xx())
665 clk_disable(bank->dbck); 674 clk_disable(bank->dbck);
666 } 675 }
667 676
668 __raw_writel(val, reg); 677 __raw_writel(val, reg);
669done:
670 spin_unlock_irqrestore(&bank->lock, flags);
671} 678}
672EXPORT_SYMBOL(omap_set_gpio_debounce);
673
674void omap_set_gpio_debounce_time(int gpio, int enc_time)
675{
676 struct gpio_bank *bank;
677 void __iomem *reg;
678
679 if (cpu_class_is_omap1())
680 return;
681
682 bank = get_gpio_bank(gpio);
683 reg = bank->base;
684
685 if (!bank->mod_usage) {
686 printk(KERN_ERR "GPIO not requested\n");
687 return;
688 }
689
690 enc_time &= 0xff;
691
692 if (cpu_is_omap44xx())
693 reg += OMAP4_GPIO_DEBOUNCINGTIME;
694 else
695 reg += OMAP24XX_GPIO_DEBOUNCE_VAL;
696
697 __raw_writel(enc_time, reg);
698}
699EXPORT_SYMBOL(omap_set_gpio_debounce_time);
700 679
701#ifdef CONFIG_ARCH_OMAP2PLUS 680#ifdef CONFIG_ARCH_OMAP2PLUS
702static inline void set_24xx_gpio_triggering(struct gpio_bank *bank, int gpio, 681static inline void set_24xx_gpio_triggering(struct gpio_bank *bank, int gpio,
@@ -1656,6 +1635,20 @@ static int gpio_output(struct gpio_chip *chip, unsigned offset, int value)
1656 return 0; 1635 return 0;
1657} 1636}
1658 1637
1638static int gpio_debounce(struct gpio_chip *chip, unsigned offset,
1639 unsigned debounce)
1640{
1641 struct gpio_bank *bank;
1642 unsigned long flags;
1643
1644 bank = container_of(chip, struct gpio_bank, chip);
1645 spin_lock_irqsave(&bank->lock, flags);
1646 _set_gpio_debounce(bank, offset, debounce);
1647 spin_unlock_irqrestore(&bank->lock, flags);
1648
1649 return 0;
1650}
1651
1659static void gpio_set(struct gpio_chip *chip, unsigned offset, int value) 1652static void gpio_set(struct gpio_chip *chip, unsigned offset, int value)
1660{ 1653{
1661 struct gpio_bank *bank; 1654 struct gpio_bank *bank;
@@ -1909,6 +1902,7 @@ static int __init _omap_gpio_init(void)
1909 bank->chip.direction_input = gpio_input; 1902 bank->chip.direction_input = gpio_input;
1910 bank->chip.get = gpio_get; 1903 bank->chip.get = gpio_get;
1911 bank->chip.direction_output = gpio_output; 1904 bank->chip.direction_output = gpio_output;
1905 bank->chip.set_debounce = gpio_debounce;
1912 bank->chip.set = gpio_set; 1906 bank->chip.set = gpio_set;
1913 bank->chip.to_irq = gpio_2irq; 1907 bank->chip.to_irq = gpio_2irq;
1914 if (bank_is_mpuio(bank)) { 1908 if (bank_is_mpuio(bank)) {
diff --git a/arch/arm/plat-s5p/Kconfig b/arch/arm/plat-s5p/Kconfig
index 5cb2dd1da632..11d6a1bbd90d 100644
--- a/arch/arm/plat-s5p/Kconfig
+++ b/arch/arm/plat-s5p/Kconfig
@@ -29,3 +29,4 @@ config S5P_EXT_INT
29 bool 29 bool
30 help 30 help
31 Use the external interrupts (other than GPIO interrupts.) 31 Use the external interrupts (other than GPIO interrupts.)
32 Note: Do not choose this for S5P6440.
diff --git a/arch/arm/plat-s5p/clock.c b/arch/arm/plat-s5p/clock.c
index 24a931fd8d3b..b5e255265f20 100644
--- a/arch/arm/plat-s5p/clock.c
+++ b/arch/arm/plat-s5p/clock.c
@@ -148,6 +148,7 @@ static struct clk *s5p_clks[] __initdata = {
148 &clk_fout_vpll, 148 &clk_fout_vpll,
149 &clk_arm, 149 &clk_arm,
150 &clk_vpll, 150 &clk_vpll,
151 &clk_xusbxti,
151}; 152};
152 153
153void __init s5p_register_clocks(unsigned long xtal_freq) 154void __init s5p_register_clocks(unsigned long xtal_freq)
diff --git a/arch/arm/plat-s5p/include/plat/irqs.h b/arch/arm/plat-s5p/include/plat/irqs.h
index 9ff3d718be39..3fb3a3a17465 100644
--- a/arch/arm/plat-s5p/include/plat/irqs.h
+++ b/arch/arm/plat-s5p/include/plat/irqs.h
@@ -87,4 +87,11 @@
87#define IRQ_TIMER3 S5P_TIMER_IRQ(3) 87#define IRQ_TIMER3 S5P_TIMER_IRQ(3)
88#define IRQ_TIMER4 S5P_TIMER_IRQ(4) 88#define IRQ_TIMER4 S5P_TIMER_IRQ(4)
89 89
90#define IRQ_EINT(x) ((x) < 16 ? ((x) + S5P_EINT_BASE1) \
91 : ((x) - 16 + S5P_EINT_BASE2))
92
93#define EINT_OFFSET(irq) ((irq) < S5P_EINT_BASE2 ? \
94 ((irq) - S5P_EINT_BASE1) : \
95 ((irq) + 16 - S5P_EINT_BASE2))
96
90#endif /* __ASM_PLAT_S5P_IRQS_H */ 97#endif /* __ASM_PLAT_S5P_IRQS_H */
diff --git a/arch/arm/plat-s5p/irq-eint.c b/arch/arm/plat-s5p/irq-eint.c
index eaa70aa0127b..e56c8075df97 100644
--- a/arch/arm/plat-s5p/irq-eint.c
+++ b/arch/arm/plat-s5p/irq-eint.c
@@ -60,7 +60,7 @@ static void s5p_irq_eint_maskack(unsigned int irq)
60 60
61static int s5p_irq_eint_set_type(unsigned int irq, unsigned int type) 61static int s5p_irq_eint_set_type(unsigned int irq, unsigned int type)
62{ 62{
63 int offs = eint_offset(irq); 63 int offs = EINT_OFFSET(irq);
64 int shift; 64 int shift;
65 u32 ctrl, mask; 65 u32 ctrl, mask;
66 u32 newvalue = 0; 66 u32 newvalue = 0;
@@ -139,17 +139,16 @@ static struct irq_chip s5p_irq_eint = {
139 */ 139 */
140static inline void s5p_irq_demux_eint(unsigned int start) 140static inline void s5p_irq_demux_eint(unsigned int start)
141{ 141{
142 u32 status; 142 u32 status = __raw_readl(S5P_EINT_PEND(EINT_REG_NR(start)));
143 u32 mask = __raw_readl(S5P_EINT_MASK(EINT_REG_NR(start))); 143 u32 mask = __raw_readl(S5P_EINT_MASK(EINT_REG_NR(start)));
144 unsigned int irq; 144 unsigned int irq;
145 145
146 status = __raw_readl(S5P_EINT_PEND(EINT_REG_NR(start)));
147 status &= ~mask; 146 status &= ~mask;
148 status &= 0xff; 147 status &= 0xff;
149 148
150 while (status) { 149 while (status) {
151 irq = fls(status); 150 irq = fls(status) - 1;
152 generic_handle_irq(irq - 1 + start); 151 generic_handle_irq(irq + start);
153 status &= ~(1 << irq); 152 status &= ~(1 << irq);
154 } 153 }
155} 154}
@@ -162,12 +161,18 @@ static void s5p_irq_demux_eint16_31(unsigned int irq, struct irq_desc *desc)
162 161
163static inline void s5p_irq_vic_eint_mask(unsigned int irq) 162static inline void s5p_irq_vic_eint_mask(unsigned int irq)
164{ 163{
164 void __iomem *base = get_irq_chip_data(irq);
165
165 s5p_irq_eint_mask(irq); 166 s5p_irq_eint_mask(irq);
167 writel(1 << EINT_OFFSET(irq), base + VIC_INT_ENABLE_CLEAR);
166} 168}
167 169
168static void s5p_irq_vic_eint_unmask(unsigned int irq) 170static void s5p_irq_vic_eint_unmask(unsigned int irq)
169{ 171{
172 void __iomem *base = get_irq_chip_data(irq);
173
170 s5p_irq_eint_unmask(irq); 174 s5p_irq_eint_unmask(irq);
175 writel(1 << EINT_OFFSET(irq), base + VIC_INT_ENABLE);
171} 176}
172 177
173static inline void s5p_irq_vic_eint_ack(unsigned int irq) 178static inline void s5p_irq_vic_eint_ack(unsigned int irq)
diff --git a/arch/arm/plat-samsung/include/plat/gpio-cfg.h b/arch/arm/plat-samsung/include/plat/gpio-cfg.h
index 34efdd2b032c..db4112c6f2be 100644
--- a/arch/arm/plat-samsung/include/plat/gpio-cfg.h
+++ b/arch/arm/plat-samsung/include/plat/gpio-cfg.h
@@ -43,6 +43,11 @@ struct s3c_gpio_chip;
43 * layouts. Provide an point to vector control routine and provide any 43 * layouts. Provide an point to vector control routine and provide any
44 * per-bank configuration information that other systems such as the 44 * per-bank configuration information that other systems such as the
45 * external interrupt code will need. 45 * external interrupt code will need.
46 *
47 * @sa s3c_gpio_cfgpin
48 * @sa s3c_gpio_getcfg
49 * @sa s3c_gpio_setpull
50 * @sa s3c_gpio_getpull
46 */ 51 */
47struct s3c_gpio_cfg { 52struct s3c_gpio_cfg {
48 unsigned int cfg_eint; 53 unsigned int cfg_eint;
@@ -70,11 +75,25 @@ struct s3c_gpio_cfg {
70/** 75/**
71 * s3c_gpio_cfgpin() - Change the GPIO function of a pin. 76 * s3c_gpio_cfgpin() - Change the GPIO function of a pin.
72 * @pin pin The pin number to configure. 77 * @pin pin The pin number to configure.
73 * @pin to The configuration for the pin's function. 78 * @to to The configuration for the pin's function.
74 * 79 *
75 * Configure which function is actually connected to the external 80 * Configure which function is actually connected to the external
76 * pin, such as an gpio input, output or some form of special function 81 * pin, such as an gpio input, output or some form of special function
77 * connected to an internal peripheral block. 82 * connected to an internal peripheral block.
83 *
84 * The @to parameter can be one of the generic S3C_GPIO_INPUT, S3C_GPIO_OUTPUT
85 * or S3C_GPIO_SFN() to indicate one of the possible values that the helper
86 * will then generate the correct bit mask and shift for the configuration.
87 *
88 * If a bank of GPIOs all needs to be set to special-function 2, then
89 * the following code will work:
90 *
91 * for (gpio = start; gpio < end; gpio++)
92 * s3c_gpio_cfgpin(gpio, S3C_GPIO_SFN(2));
93 *
94 * The @to parameter can also be a specific value already shifted to the
95 * correct position in the control register, although these are discouraged
96 * in newer kernels and are only being kept for compatibility.
78 */ 97 */
79extern int s3c_gpio_cfgpin(unsigned int pin, unsigned int to); 98extern int s3c_gpio_cfgpin(unsigned int pin, unsigned int to);
80 99
@@ -108,6 +127,8 @@ extern unsigned s3c_gpio_getcfg(unsigned int pin);
108 * This function sets the state of the pull-{up,down} resistor for the 127 * This function sets the state of the pull-{up,down} resistor for the
109 * specified pin. It will return 0 if successfull, or a negative error 128 * specified pin. It will return 0 if successfull, or a negative error
110 * code if the pin cannot support the requested pull setting. 129 * code if the pin cannot support the requested pull setting.
130 *
131 * @pull is one of S3C_GPIO_PULL_NONE, S3C_GPIO_PULL_DOWN or S3C_GPIO_PULL_UP.
111*/ 132*/
112extern int s3c_gpio_setpull(unsigned int pin, s3c_gpio_pull_t pull); 133extern int s3c_gpio_setpull(unsigned int pin, s3c_gpio_pull_t pull);
113 134
diff --git a/arch/avr32/include/asm/scatterlist.h b/arch/avr32/include/asm/scatterlist.h
index 377320e3bd17..06394e5ead6c 100644
--- a/arch/avr32/include/asm/scatterlist.h
+++ b/arch/avr32/include/asm/scatterlist.h
@@ -1,25 +1,7 @@
1#ifndef __ASM_AVR32_SCATTERLIST_H 1#ifndef __ASM_AVR32_SCATTERLIST_H
2#define __ASM_AVR32_SCATTERLIST_H 2#define __ASM_AVR32_SCATTERLIST_H
3 3
4#include <asm/types.h> 4#include <asm-generic/scatterlist.h>
5
6struct scatterlist {
7#ifdef CONFIG_DEBUG_SG
8 unsigned long sg_magic;
9#endif
10 unsigned long page_link;
11 unsigned int offset;
12 dma_addr_t dma_address;
13 unsigned int length;
14};
15
16/* These macros should be used after a pci_map_sg call has been done
17 * to get bus addresses of each of the SG entries and their lengths.
18 * You should only work with the number of sg entries pci_map_sg
19 * returns.
20 */
21#define sg_dma_address(sg) ((sg)->dma_address)
22#define sg_dma_len(sg) ((sg)->length)
23 5
24#define ISA_DMA_THRESHOLD (0xffffffff) 6#define ISA_DMA_THRESHOLD (0xffffffff)
25 7
diff --git a/arch/blackfin/include/asm/scatterlist.h b/arch/blackfin/include/asm/scatterlist.h
index 04f448711cd0..64d41d34ab0b 100644
--- a/arch/blackfin/include/asm/scatterlist.h
+++ b/arch/blackfin/include/asm/scatterlist.h
@@ -1,27 +1,7 @@
1#ifndef _BLACKFIN_SCATTERLIST_H 1#ifndef _BLACKFIN_SCATTERLIST_H
2#define _BLACKFIN_SCATTERLIST_H 2#define _BLACKFIN_SCATTERLIST_H
3 3
4#include <linux/mm.h> 4#include <asm-generic/scatterlist.h>
5
6struct scatterlist {
7#ifdef CONFIG_DEBUG_SG
8 unsigned long sg_magic;
9#endif
10 unsigned long page_link;
11 unsigned int offset;
12 dma_addr_t dma_address;
13 unsigned int length;
14};
15
16/*
17 * These macros should be used after a pci_map_sg call has been done
18 * to get bus addresses of each of the SG entries and their lengths.
19 * You should only work with the number of sg entries pci_map_sg
20 * returns, or alternatively stop on the first sg_dma_len(sg) which
21 * is 0.
22 */
23#define sg_dma_address(sg) ((sg)->dma_address)
24#define sg_dma_len(sg) ((sg)->length)
25 5
26#define ISA_DMA_THRESHOLD (0xffffffff) 6#define ISA_DMA_THRESHOLD (0xffffffff)
27 7
diff --git a/arch/blackfin/kernel/ptrace.c b/arch/blackfin/kernel/ptrace.c
index 43eb969405d1..6ec77685df52 100644
--- a/arch/blackfin/kernel/ptrace.c
+++ b/arch/blackfin/kernel/ptrace.c
@@ -292,28 +292,6 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
292 break; 292 break;
293 } 293 }
294 294
295#ifdef CONFIG_BINFMT_ELF_FDPIC
296 case PTRACE_GETFDPIC: {
297 unsigned long tmp = 0;
298
299 switch (addr) {
300 case_PTRACE_GETFDPIC_EXEC:
301 case PTRACE_GETFDPIC_EXEC:
302 tmp = child->mm->context.exec_fdpic_loadmap;
303 break;
304 case_PTRACE_GETFDPIC_INTERP:
305 case PTRACE_GETFDPIC_INTERP:
306 tmp = child->mm->context.interp_fdpic_loadmap;
307 break;
308 default:
309 break;
310 }
311
312 ret = put_user(tmp, datap);
313 break;
314 }
315#endif
316
317 /* when I and D space are separate, this will have to be fixed. */ 295 /* when I and D space are separate, this will have to be fixed. */
318 case PTRACE_POKEDATA: 296 case PTRACE_POKEDATA:
319 pr_debug("ptrace: PTRACE_PEEKDATA\n"); 297 pr_debug("ptrace: PTRACE_PEEKDATA\n");
@@ -357,8 +335,14 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
357 case PTRACE_PEEKUSR: 335 case PTRACE_PEEKUSR:
358 switch (addr) { 336 switch (addr) {
359#ifdef CONFIG_BINFMT_ELF_FDPIC /* backwards compat */ 337#ifdef CONFIG_BINFMT_ELF_FDPIC /* backwards compat */
360 case PT_FDPIC_EXEC: goto case_PTRACE_GETFDPIC_EXEC; 338 case PT_FDPIC_EXEC:
361 case PT_FDPIC_INTERP: goto case_PTRACE_GETFDPIC_INTERP; 339 request = PTRACE_GETFDPIC;
340 addr = PTRACE_GETFDPIC_EXEC;
341 goto case_default;
342 case PT_FDPIC_INTERP:
343 request = PTRACE_GETFDPIC;
344 addr = PTRACE_GETFDPIC_INTERP;
345 goto case_default;
362#endif 346#endif
363 default: 347 default:
364 ret = get_reg(child, addr, datap); 348 ret = get_reg(child, addr, datap);
@@ -385,6 +369,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
385 0, sizeof(struct pt_regs), 369 0, sizeof(struct pt_regs),
386 (const void __user *)data); 370 (const void __user *)data);
387 371
372 case_default:
388 default: 373 default:
389 ret = ptrace_request(child, request, addr, data); 374 ret = ptrace_request(child, request, addr, data);
390 break; 375 break;
diff --git a/arch/cris/include/asm/scatterlist.h b/arch/cris/include/asm/scatterlist.h
index faff53ad1f96..249a7842ff5f 100644
--- a/arch/cris/include/asm/scatterlist.h
+++ b/arch/cris/include/asm/scatterlist.h
@@ -1,22 +1,7 @@
1#ifndef __ASM_CRIS_SCATTERLIST_H 1#ifndef __ASM_CRIS_SCATTERLIST_H
2#define __ASM_CRIS_SCATTERLIST_H 2#define __ASM_CRIS_SCATTERLIST_H
3 3
4struct scatterlist { 4#include <asm-generic/scatterlist.h>
5#ifdef CONFIG_DEBUG_SG
6 unsigned long sg_magic;
7#endif
8 char * address; /* Location data is to be transferred to */
9 unsigned int length;
10
11 /* The following is i386 highmem junk - not used by us */
12 unsigned long page_link;
13 unsigned int offset;/* for highmem, page offset */
14
15};
16
17#define sg_dma_address(sg) ((sg)->address)
18#define sg_dma_len(sg) ((sg)->length)
19/* i386 junk */
20 5
21#define ISA_DMA_THRESHOLD (0x1fffffff) 6#define ISA_DMA_THRESHOLD (0x1fffffff)
22 7
diff --git a/arch/frv/include/asm/cache.h b/arch/frv/include/asm/cache.h
index 7dc0f0f85b7c..2797163b8f4f 100644
--- a/arch/frv/include/asm/cache.h
+++ b/arch/frv/include/asm/cache.h
@@ -17,8 +17,6 @@
17#define L1_CACHE_SHIFT (CONFIG_FRV_L1_CACHE_SHIFT) 17#define L1_CACHE_SHIFT (CONFIG_FRV_L1_CACHE_SHIFT)
18#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) 18#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
19 19
20#define ARCH_KMALLOC_MINALIGN L1_CACHE_BYTES
21
22#define __cacheline_aligned __attribute__((aligned(L1_CACHE_BYTES))) 20#define __cacheline_aligned __attribute__((aligned(L1_CACHE_BYTES)))
23#define ____cacheline_aligned __attribute__((aligned(L1_CACHE_BYTES))) 21#define ____cacheline_aligned __attribute__((aligned(L1_CACHE_BYTES)))
24 22
diff --git a/arch/frv/include/asm/mem-layout.h b/arch/frv/include/asm/mem-layout.h
index 2947764fc0e0..ccae981876fa 100644
--- a/arch/frv/include/asm/mem-layout.h
+++ b/arch/frv/include/asm/mem-layout.h
@@ -35,8 +35,8 @@
35 * the slab must be aligned such that load- and store-double instructions don't 35 * the slab must be aligned such that load- and store-double instructions don't
36 * fault if used 36 * fault if used
37 */ 37 */
38#define ARCH_KMALLOC_MINALIGN 8 38#define ARCH_KMALLOC_MINALIGN L1_CACHE_BYTES
39#define ARCH_SLAB_MINALIGN 8 39#define ARCH_SLAB_MINALIGN L1_CACHE_BYTES
40 40
41/*****************************************************************************/ 41/*****************************************************************************/
42/* 42/*
diff --git a/arch/frv/include/asm/scatterlist.h b/arch/frv/include/asm/scatterlist.h
index 4bca8a28546c..1614bfd7e3a4 100644
--- a/arch/frv/include/asm/scatterlist.h
+++ b/arch/frv/include/asm/scatterlist.h
@@ -1,45 +1,7 @@
1#ifndef _ASM_SCATTERLIST_H 1#ifndef _ASM_SCATTERLIST_H
2#define _ASM_SCATTERLIST_H 2#define _ASM_SCATTERLIST_H
3 3
4#include <asm/types.h> 4#include <asm-generic/scatterlist.h>
5
6/*
7 * Drivers must set either ->address or (preferred) page and ->offset
8 * to indicate where data must be transferred to/from.
9 *
10 * Using page is recommended since it handles highmem data as well as
11 * low mem. ->address is restricted to data which has a virtual mapping, and
12 * it will go away in the future. Updating to page can be automated very
13 * easily -- something like
14 *
15 * sg->address = some_ptr;
16 *
17 * can be rewritten as
18 *
19 * sg_set_buf(sg, some_ptr, length);
20 *
21 * and that's it. There's no excuse for not highmem enabling YOUR driver. /jens
22 */
23struct scatterlist {
24#ifdef CONFIG_DEBUG_SG
25 unsigned long sg_magic;
26#endif
27 unsigned long page_link;
28 unsigned int offset; /* for highmem, page offset */
29
30 dma_addr_t dma_address;
31 unsigned int length;
32};
33
34/*
35 * These macros should be used after a pci_map_sg call has been done
36 * to get bus addresses of each of the SG entries and their lengths.
37 * You should only work with the number of sg entries pci_map_sg
38 * returns, or alternatively stop on the first sg_dma_len(sg) which
39 * is 0.
40 */
41#define sg_dma_address(sg) ((sg)->dma_address)
42#define sg_dma_len(sg) ((sg)->length)
43 5
44#define ISA_DMA_THRESHOLD (0xffffffffUL) 6#define ISA_DMA_THRESHOLD (0xffffffffUL)
45 7
diff --git a/arch/frv/kernel/ptrace.c b/arch/frv/kernel/ptrace.c
index 60eeed3694c0..fac028936a04 100644
--- a/arch/frv/kernel/ptrace.c
+++ b/arch/frv/kernel/ptrace.c
@@ -344,26 +344,6 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
344 0, sizeof(child->thread.user->f), 344 0, sizeof(child->thread.user->f),
345 (const void __user *)data); 345 (const void __user *)data);
346 346
347 case PTRACE_GETFDPIC:
348 tmp = 0;
349 switch (addr) {
350 case PTRACE_GETFDPIC_EXEC:
351 tmp = child->mm->context.exec_fdpic_loadmap;
352 break;
353 case PTRACE_GETFDPIC_INTERP:
354 tmp = child->mm->context.interp_fdpic_loadmap;
355 break;
356 default:
357 break;
358 }
359
360 ret = 0;
361 if (put_user(tmp, (unsigned long *) data)) {
362 ret = -EFAULT;
363 break;
364 }
365 break;
366
367 default: 347 default:
368 ret = ptrace_request(child, request, addr, data); 348 ret = ptrace_request(child, request, addr, data);
369 break; 349 break;
diff --git a/arch/frv/kernel/sysctl.c b/arch/frv/kernel/sysctl.c
index 71abd1510a59..6c155d69da29 100644
--- a/arch/frv/kernel/sysctl.c
+++ b/arch/frv/kernel/sysctl.c
@@ -46,8 +46,9 @@ static void frv_change_dcache_mode(unsigned long newmode)
46/* 46/*
47 * handle requests to dynamically switch the write caching mode delivered by /proc 47 * handle requests to dynamically switch the write caching mode delivered by /proc
48 */ 48 */
49static int procctl_frv_cachemode(ctl_table *table, int write, struct file *filp, 49static int procctl_frv_cachemode(ctl_table *table, int write,
50 void __user *buffer, size_t *lenp, loff_t *ppos) 50 void __user *buffer, size_t *lenp,
51 loff_t *ppos)
51{ 52{
52 unsigned long hsr0; 53 unsigned long hsr0;
53 char buff[8]; 54 char buff[8];
@@ -84,7 +85,7 @@ static int procctl_frv_cachemode(ctl_table *table, int write, struct file *filp,
84 } 85 }
85 86
86 /* read the state */ 87 /* read the state */
87 if (filp->f_pos > 0) { 88 if (*ppos > 0) {
88 *lenp = 0; 89 *lenp = 0;
89 return 0; 90 return 0;
90 } 91 }
@@ -110,7 +111,7 @@ static int procctl_frv_cachemode(ctl_table *table, int write, struct file *filp,
110 return -EFAULT; 111 return -EFAULT;
111 112
112 *lenp = len; 113 *lenp = len;
113 filp->f_pos = len; 114 *ppos = len;
114 return 0; 115 return 0;
115 116
116} /* end procctl_frv_cachemode() */ 117} /* end procctl_frv_cachemode() */
@@ -120,8 +121,9 @@ static int procctl_frv_cachemode(ctl_table *table, int write, struct file *filp,
120 * permit the mm_struct the nominated process is using have its MMU context ID pinned 121 * permit the mm_struct the nominated process is using have its MMU context ID pinned
121 */ 122 */
122#ifdef CONFIG_MMU 123#ifdef CONFIG_MMU
123static int procctl_frv_pin_cxnr(ctl_table *table, int write, struct file *filp, 124static int procctl_frv_pin_cxnr(ctl_table *table, int write,
124 void __user *buffer, size_t *lenp, loff_t *ppos) 125 void __user *buffer, size_t *lenp,
126 loff_t *ppos)
125{ 127{
126 pid_t pid; 128 pid_t pid;
127 char buff[16], *p; 129 char buff[16], *p;
@@ -150,7 +152,7 @@ static int procctl_frv_pin_cxnr(ctl_table *table, int write, struct file *filp,
150 } 152 }
151 153
152 /* read the currently pinned CXN */ 154 /* read the currently pinned CXN */
153 if (filp->f_pos > 0) { 155 if (*ppos > 0) {
154 *lenp = 0; 156 *lenp = 0;
155 return 0; 157 return 0;
156 } 158 }
@@ -163,7 +165,7 @@ static int procctl_frv_pin_cxnr(ctl_table *table, int write, struct file *filp,
163 return -EFAULT; 165 return -EFAULT;
164 166
165 *lenp = len; 167 *lenp = len;
166 filp->f_pos = len; 168 *ppos = len;
167 return 0; 169 return 0;
168 170
169} /* end procctl_frv_pin_cxnr() */ 171} /* end procctl_frv_pin_cxnr() */
diff --git a/arch/h8300/include/asm/scatterlist.h b/arch/h8300/include/asm/scatterlist.h
index d3ecdd87ac90..de08a4a2cc1c 100644
--- a/arch/h8300/include/asm/scatterlist.h
+++ b/arch/h8300/include/asm/scatterlist.h
@@ -1,17 +1,7 @@
1#ifndef _H8300_SCATTERLIST_H 1#ifndef _H8300_SCATTERLIST_H
2#define _H8300_SCATTERLIST_H 2#define _H8300_SCATTERLIST_H
3 3
4#include <asm/types.h> 4#include <asm-generic/scatterlist.h>
5
6struct scatterlist {
7#ifdef CONFIG_DEBUG_SG
8 unsigned long sg_magic;
9#endif
10 unsigned long page_link;
11 unsigned int offset;
12 dma_addr_t dma_address;
13 unsigned int length;
14};
15 5
16#define ISA_DMA_THRESHOLD (0xffffffff) 6#define ISA_DMA_THRESHOLD (0xffffffff)
17 7
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
index 9676100b83ee..95610820041e 100644
--- a/arch/ia64/Kconfig
+++ b/arch/ia64/Kconfig
@@ -56,6 +56,9 @@ config MMU
56config NEED_DMA_MAP_STATE 56config NEED_DMA_MAP_STATE
57 def_bool y 57 def_bool y
58 58
59config NEED_SG_DMA_LENGTH
60 def_bool y
61
59config SWIOTLB 62config SWIOTLB
60 bool 63 bool
61 64
@@ -495,6 +498,14 @@ config HAVE_ARCH_NODEDATA_EXTENSION
495 def_bool y 498 def_bool y
496 depends on NUMA 499 depends on NUMA
497 500
501config USE_PERCPU_NUMA_NODE_ID
502 def_bool y
503 depends on NUMA
504
505config HAVE_MEMORYLESS_NODES
506 def_bool y
507 depends on NUMA
508
498config ARCH_PROC_KCORE_TEXT 509config ARCH_PROC_KCORE_TEXT
499 def_bool y 510 def_bool y
500 depends on PROC_KCORE 511 depends on PROC_KCORE
diff --git a/arch/ia64/include/asm/acpi.h b/arch/ia64/include/asm/acpi.h
index 21adbd7f90f8..837dc82a013e 100644
--- a/arch/ia64/include/asm/acpi.h
+++ b/arch/ia64/include/asm/acpi.h
@@ -94,7 +94,6 @@ ia64_acpi_release_global_lock (unsigned int *lock)
94#define acpi_noirq 0 /* ACPI always enabled on IA64 */ 94#define acpi_noirq 0 /* ACPI always enabled on IA64 */
95#define acpi_pci_disabled 0 /* ACPI PCI always enabled on IA64 */ 95#define acpi_pci_disabled 0 /* ACPI PCI always enabled on IA64 */
96#define acpi_strict 1 /* no ACPI spec workarounds on IA64 */ 96#define acpi_strict 1 /* no ACPI spec workarounds on IA64 */
97#define acpi_ht 0 /* no HT-only mode on IA64 */
98#endif 97#endif
99#define acpi_processor_cstate_check(x) (x) /* no idle limits on IA64 :) */ 98#define acpi_processor_cstate_check(x) (x) /* no idle limits on IA64 :) */
100static inline void disable_acpi(void) { } 99static inline void disable_acpi(void) { }
diff --git a/arch/ia64/include/asm/scatterlist.h b/arch/ia64/include/asm/scatterlist.h
index d8e98961dec7..f299a4fb25c8 100644
--- a/arch/ia64/include/asm/scatterlist.h
+++ b/arch/ia64/include/asm/scatterlist.h
@@ -1,6 +1,7 @@
1#ifndef _ASM_IA64_SCATTERLIST_H 1#ifndef _ASM_IA64_SCATTERLIST_H
2#define _ASM_IA64_SCATTERLIST_H 2#define _ASM_IA64_SCATTERLIST_H
3 3
4#include <asm-generic/scatterlist.h>
4/* 5/*
5 * It used to be that ISA_DMA_THRESHOLD had something to do with the 6 * It used to be that ISA_DMA_THRESHOLD had something to do with the
6 * DMA-limits of ISA-devices. Nowadays, its only remaining use (apart 7 * DMA-limits of ISA-devices. Nowadays, its only remaining use (apart
@@ -10,7 +11,6 @@
10 * that's 4GB - 1. 11 * that's 4GB - 1.
11 */ 12 */
12#define ISA_DMA_THRESHOLD 0xffffffff 13#define ISA_DMA_THRESHOLD 0xffffffff
13 14#define ARCH_HAS_SG_CHAIN
14#include <asm-generic/scatterlist.h>
15 15
16#endif /* _ASM_IA64_SCATTERLIST_H */ 16#endif /* _ASM_IA64_SCATTERLIST_H */
diff --git a/arch/ia64/include/asm/topology.h b/arch/ia64/include/asm/topology.h
index d323071d0f91..09f646753d1a 100644
--- a/arch/ia64/include/asm/topology.h
+++ b/arch/ia64/include/asm/topology.h
@@ -26,11 +26,6 @@
26#define RECLAIM_DISTANCE 15 26#define RECLAIM_DISTANCE 15
27 27
28/* 28/*
29 * Returns the number of the node containing CPU 'cpu'
30 */
31#define cpu_to_node(cpu) (int)(cpu_to_node_map[cpu])
32
33/*
34 * Returns a bitmask of CPUs on Node 'node'. 29 * Returns a bitmask of CPUs on Node 'node'.
35 */ 30 */
36#define cpumask_of_node(node) ((node) == -1 ? \ 31#define cpumask_of_node(node) ((node) == -1 ? \
diff --git a/arch/ia64/kernel/pci-swiotlb.c b/arch/ia64/kernel/pci-swiotlb.c
index 3095654f9ab3..d9485d952ed0 100644
--- a/arch/ia64/kernel/pci-swiotlb.c
+++ b/arch/ia64/kernel/pci-swiotlb.c
@@ -31,8 +31,6 @@ struct dma_map_ops swiotlb_dma_ops = {
31 .unmap_sg = swiotlb_unmap_sg_attrs, 31 .unmap_sg = swiotlb_unmap_sg_attrs,
32 .sync_single_for_cpu = swiotlb_sync_single_for_cpu, 32 .sync_single_for_cpu = swiotlb_sync_single_for_cpu,
33 .sync_single_for_device = swiotlb_sync_single_for_device, 33 .sync_single_for_device = swiotlb_sync_single_for_device,
34 .sync_single_range_for_cpu = swiotlb_sync_single_range_for_cpu,
35 .sync_single_range_for_device = swiotlb_sync_single_range_for_device,
36 .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu, 34 .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu,
37 .sync_sg_for_device = swiotlb_sync_sg_for_device, 35 .sync_sg_for_device = swiotlb_sync_sg_for_device,
38 .dma_supported = swiotlb_dma_supported, 36 .dma_supported = swiotlb_dma_supported,
diff --git a/arch/ia64/kernel/ptrace.c b/arch/ia64/kernel/ptrace.c
index 0dec7f702448..7c7909f9bc93 100644
--- a/arch/ia64/kernel/ptrace.c
+++ b/arch/ia64/kernel/ptrace.c
@@ -638,7 +638,7 @@ ptrace_attach_sync_user_rbs (struct task_struct *child)
638 */ 638 */
639 639
640 read_lock(&tasklist_lock); 640 read_lock(&tasklist_lock);
641 if (child->signal) { 641 if (child->sighand) {
642 spin_lock_irq(&child->sighand->siglock); 642 spin_lock_irq(&child->sighand->siglock);
643 if (child->state == TASK_STOPPED && 643 if (child->state == TASK_STOPPED &&
644 !test_and_set_tsk_thread_flag(child, TIF_RESTORE_RSE)) { 644 !test_and_set_tsk_thread_flag(child, TIF_RESTORE_RSE)) {
@@ -662,7 +662,7 @@ ptrace_attach_sync_user_rbs (struct task_struct *child)
662 * job control stop, so that SIGCONT can be used to wake it up. 662 * job control stop, so that SIGCONT can be used to wake it up.
663 */ 663 */
664 read_lock(&tasklist_lock); 664 read_lock(&tasklist_lock);
665 if (child->signal) { 665 if (child->sighand) {
666 spin_lock_irq(&child->sighand->siglock); 666 spin_lock_irq(&child->sighand->siglock);
667 if (child->state == TASK_TRACED && 667 if (child->state == TASK_TRACED &&
668 (child->signal->flags & SIGNAL_STOP_STOPPED)) { 668 (child->signal->flags & SIGNAL_STOP_STOPPED)) {
diff --git a/arch/ia64/kernel/smpboot.c b/arch/ia64/kernel/smpboot.c
index e5230b2ff2c5..6a1380e90f87 100644
--- a/arch/ia64/kernel/smpboot.c
+++ b/arch/ia64/kernel/smpboot.c
@@ -390,6 +390,14 @@ smp_callin (void)
390 390
391 fix_b0_for_bsp(); 391 fix_b0_for_bsp();
392 392
393#ifdef CONFIG_NUMA
394 /*
395 * numa_node_id() works after this.
396 */
397 set_numa_node(cpu_to_node_map[cpuid]);
398 set_numa_mem(local_memory_node(cpu_to_node_map[cpuid]));
399#endif
400
393 ipi_call_lock_irq(); 401 ipi_call_lock_irq();
394 spin_lock(&vector_lock); 402 spin_lock(&vector_lock);
395 /* Setup the per cpu irq handling data structures */ 403 /* Setup the per cpu irq handling data structures */
@@ -632,6 +640,9 @@ void __devinit smp_prepare_boot_cpu(void)
632{ 640{
633 cpu_set(smp_processor_id(), cpu_online_map); 641 cpu_set(smp_processor_id(), cpu_online_map);
634 cpu_set(smp_processor_id(), cpu_callin_map); 642 cpu_set(smp_processor_id(), cpu_callin_map);
643#ifdef CONFIG_NUMA
644 set_numa_node(cpu_to_node_map[smp_processor_id()]);
645#endif
635 per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE; 646 per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE;
636 paravirt_post_smp_prepare_boot_cpu(); 647 paravirt_post_smp_prepare_boot_cpu();
637} 648}
diff --git a/arch/ia64/pci/pci.c b/arch/ia64/pci/pci.c
index 64aff520b899..aa2533ae7e9e 100644
--- a/arch/ia64/pci/pci.c
+++ b/arch/ia64/pci/pci.c
@@ -335,8 +335,11 @@ pcibios_setup_root_windows(struct pci_bus *bus, struct pci_controller *ctrl)
335} 335}
336 336
337struct pci_bus * __devinit 337struct pci_bus * __devinit
338pci_acpi_scan_root(struct acpi_device *device, int domain, int bus) 338pci_acpi_scan_root(struct acpi_pci_root *root)
339{ 339{
340 struct acpi_device *device = root->device;
341 int domain = root->segment;
342 int bus = root->secondary.start;
340 struct pci_controller *controller; 343 struct pci_controller *controller;
341 unsigned int windows = 0; 344 unsigned int windows = 0;
342 struct pci_bus *pbus; 345 struct pci_bus *pbus;
diff --git a/arch/m32r/include/asm/scatterlist.h b/arch/m32r/include/asm/scatterlist.h
index 1ed372c73d0b..aeeddd8dac17 100644
--- a/arch/m32r/include/asm/scatterlist.h
+++ b/arch/m32r/include/asm/scatterlist.h
@@ -1,20 +1,7 @@
1#ifndef _ASM_M32R_SCATTERLIST_H 1#ifndef _ASM_M32R_SCATTERLIST_H
2#define _ASM_M32R_SCATTERLIST_H 2#define _ASM_M32R_SCATTERLIST_H
3 3
4#include <asm/types.h> 4#include <asm-generic/scatterlist.h>
5
6struct scatterlist {
7#ifdef CONFIG_DEBUG_SG
8 unsigned long sg_magic;
9#endif
10 char * address; /* Location data is to be transferred to, NULL for
11 * highmem page */
12 unsigned long page_link;
13 unsigned int offset;/* for highmem, page offset */
14
15 dma_addr_t dma_address;
16 unsigned int length;
17};
18 5
19#define ISA_DMA_THRESHOLD (0x1fffffff) 6#define ISA_DMA_THRESHOLD (0x1fffffff)
20 7
diff --git a/arch/m68k/Kconfig b/arch/m68k/Kconfig
index b5da298ba61d..2e3737b92ffc 100644
--- a/arch/m68k/Kconfig
+++ b/arch/m68k/Kconfig
@@ -7,6 +7,7 @@ config M68K
7 default y 7 default y
8 select HAVE_AOUT 8 select HAVE_AOUT
9 select HAVE_IDE 9 select HAVE_IDE
10 select GENERIC_ATOMIC64
10 11
11config MMU 12config MMU
12 bool 13 bool
diff --git a/arch/m68k/amiga/config.c b/arch/m68k/amiga/config.c
index d2cc35d98532..b1577f741fa8 100644
--- a/arch/m68k/amiga/config.c
+++ b/arch/m68k/amiga/config.c
@@ -97,10 +97,6 @@ static void amiga_get_model(char *model);
97static void amiga_get_hardware_list(struct seq_file *m); 97static void amiga_get_hardware_list(struct seq_file *m);
98/* amiga specific timer functions */ 98/* amiga specific timer functions */
99static unsigned long amiga_gettimeoffset(void); 99static unsigned long amiga_gettimeoffset(void);
100static int a3000_hwclk(int, struct rtc_time *);
101static int a2000_hwclk(int, struct rtc_time *);
102static int amiga_set_clock_mmss(unsigned long);
103static unsigned int amiga_get_ss(void);
104extern void amiga_mksound(unsigned int count, unsigned int ticks); 100extern void amiga_mksound(unsigned int count, unsigned int ticks);
105static void amiga_reset(void); 101static void amiga_reset(void);
106extern void amiga_init_sound(void); 102extern void amiga_init_sound(void);
@@ -138,10 +134,6 @@ static struct {
138 } 134 }
139}; 135};
140 136
141static struct resource rtc_resource = {
142 .start = 0x00dc0000, .end = 0x00dcffff
143};
144
145static struct resource ram_resource[NUM_MEMINFO]; 137static struct resource ram_resource[NUM_MEMINFO];
146 138
147 139
@@ -387,15 +379,6 @@ void __init config_amiga(void)
387 mach_get_model = amiga_get_model; 379 mach_get_model = amiga_get_model;
388 mach_get_hardware_list = amiga_get_hardware_list; 380 mach_get_hardware_list = amiga_get_hardware_list;
389 mach_gettimeoffset = amiga_gettimeoffset; 381 mach_gettimeoffset = amiga_gettimeoffset;
390 if (AMIGAHW_PRESENT(A3000_CLK)) {
391 mach_hwclk = a3000_hwclk;
392 rtc_resource.name = "A3000 RTC";
393 request_resource(&iomem_resource, &rtc_resource);
394 } else /* if (AMIGAHW_PRESENT(A2000_CLK)) */ {
395 mach_hwclk = a2000_hwclk;
396 rtc_resource.name = "A2000 RTC";
397 request_resource(&iomem_resource, &rtc_resource);
398 }
399 382
400 /* 383 /*
401 * default MAX_DMA=0xffffffff on all machines. If we don't do so, the SCSI 384 * default MAX_DMA=0xffffffff on all machines. If we don't do so, the SCSI
@@ -404,8 +387,6 @@ void __init config_amiga(void)
404 */ 387 */
405 mach_max_dma_address = 0xffffffff; 388 mach_max_dma_address = 0xffffffff;
406 389
407 mach_set_clock_mmss = amiga_set_clock_mmss;
408 mach_get_ss = amiga_get_ss;
409 mach_reset = amiga_reset; 390 mach_reset = amiga_reset;
410#if defined(CONFIG_INPUT_M68K_BEEP) || defined(CONFIG_INPUT_M68K_BEEP_MODULE) 391#if defined(CONFIG_INPUT_M68K_BEEP) || defined(CONFIG_INPUT_M68K_BEEP_MODULE)
411 mach_beep = amiga_mksound; 392 mach_beep = amiga_mksound;
@@ -530,161 +511,6 @@ static unsigned long amiga_gettimeoffset(void)
530 return ticks + offset; 511 return ticks + offset;
531} 512}
532 513
533static int a3000_hwclk(int op, struct rtc_time *t)
534{
535 tod_3000.cntrl1 = TOD3000_CNTRL1_HOLD;
536
537 if (!op) { /* read */
538 t->tm_sec = tod_3000.second1 * 10 + tod_3000.second2;
539 t->tm_min = tod_3000.minute1 * 10 + tod_3000.minute2;
540 t->tm_hour = tod_3000.hour1 * 10 + tod_3000.hour2;
541 t->tm_mday = tod_3000.day1 * 10 + tod_3000.day2;
542 t->tm_wday = tod_3000.weekday;
543 t->tm_mon = tod_3000.month1 * 10 + tod_3000.month2 - 1;
544 t->tm_year = tod_3000.year1 * 10 + tod_3000.year2;
545 if (t->tm_year <= 69)
546 t->tm_year += 100;
547 } else {
548 tod_3000.second1 = t->tm_sec / 10;
549 tod_3000.second2 = t->tm_sec % 10;
550 tod_3000.minute1 = t->tm_min / 10;
551 tod_3000.minute2 = t->tm_min % 10;
552 tod_3000.hour1 = t->tm_hour / 10;
553 tod_3000.hour2 = t->tm_hour % 10;
554 tod_3000.day1 = t->tm_mday / 10;
555 tod_3000.day2 = t->tm_mday % 10;
556 if (t->tm_wday != -1)
557 tod_3000.weekday = t->tm_wday;
558 tod_3000.month1 = (t->tm_mon + 1) / 10;
559 tod_3000.month2 = (t->tm_mon + 1) % 10;
560 if (t->tm_year >= 100)
561 t->tm_year -= 100;
562 tod_3000.year1 = t->tm_year / 10;
563 tod_3000.year2 = t->tm_year % 10;
564 }
565
566 tod_3000.cntrl1 = TOD3000_CNTRL1_FREE;
567
568 return 0;
569}
570
571static int a2000_hwclk(int op, struct rtc_time *t)
572{
573 int cnt = 5;
574
575 tod_2000.cntrl1 = TOD2000_CNTRL1_HOLD;
576
577 while ((tod_2000.cntrl1 & TOD2000_CNTRL1_BUSY) && cnt) {
578 tod_2000.cntrl1 &= ~TOD2000_CNTRL1_HOLD;
579 udelay(70);
580 tod_2000.cntrl1 |= TOD2000_CNTRL1_HOLD;
581 --cnt;
582 }
583
584 if (!cnt)
585 printk(KERN_INFO "hwclk: timed out waiting for RTC (0x%x)\n",
586 tod_2000.cntrl1);
587
588 if (!op) { /* read */
589 t->tm_sec = tod_2000.second1 * 10 + tod_2000.second2;
590 t->tm_min = tod_2000.minute1 * 10 + tod_2000.minute2;
591 t->tm_hour = (tod_2000.hour1 & 3) * 10 + tod_2000.hour2;
592 t->tm_mday = tod_2000.day1 * 10 + tod_2000.day2;
593 t->tm_wday = tod_2000.weekday;
594 t->tm_mon = tod_2000.month1 * 10 + tod_2000.month2 - 1;
595 t->tm_year = tod_2000.year1 * 10 + tod_2000.year2;
596 if (t->tm_year <= 69)
597 t->tm_year += 100;
598
599 if (!(tod_2000.cntrl3 & TOD2000_CNTRL3_24HMODE)) {
600 if (!(tod_2000.hour1 & TOD2000_HOUR1_PM) && t->tm_hour == 12)
601 t->tm_hour = 0;
602 else if ((tod_2000.hour1 & TOD2000_HOUR1_PM) && t->tm_hour != 12)
603 t->tm_hour += 12;
604 }
605 } else {
606 tod_2000.second1 = t->tm_sec / 10;
607 tod_2000.second2 = t->tm_sec % 10;
608 tod_2000.minute1 = t->tm_min / 10;
609 tod_2000.minute2 = t->tm_min % 10;
610 if (tod_2000.cntrl3 & TOD2000_CNTRL3_24HMODE)
611 tod_2000.hour1 = t->tm_hour / 10;
612 else if (t->tm_hour >= 12)
613 tod_2000.hour1 = TOD2000_HOUR1_PM +
614 (t->tm_hour - 12) / 10;
615 else
616 tod_2000.hour1 = t->tm_hour / 10;
617 tod_2000.hour2 = t->tm_hour % 10;
618 tod_2000.day1 = t->tm_mday / 10;
619 tod_2000.day2 = t->tm_mday % 10;
620 if (t->tm_wday != -1)
621 tod_2000.weekday = t->tm_wday;
622 tod_2000.month1 = (t->tm_mon + 1) / 10;
623 tod_2000.month2 = (t->tm_mon + 1) % 10;
624 if (t->tm_year >= 100)
625 t->tm_year -= 100;
626 tod_2000.year1 = t->tm_year / 10;
627 tod_2000.year2 = t->tm_year % 10;
628 }
629
630 tod_2000.cntrl1 &= ~TOD2000_CNTRL1_HOLD;
631
632 return 0;
633}
634
635static int amiga_set_clock_mmss(unsigned long nowtime)
636{
637 short real_seconds = nowtime % 60, real_minutes = (nowtime / 60) % 60;
638
639 if (AMIGAHW_PRESENT(A3000_CLK)) {
640 tod_3000.cntrl1 = TOD3000_CNTRL1_HOLD;
641
642 tod_3000.second1 = real_seconds / 10;
643 tod_3000.second2 = real_seconds % 10;
644 tod_3000.minute1 = real_minutes / 10;
645 tod_3000.minute2 = real_minutes % 10;
646
647 tod_3000.cntrl1 = TOD3000_CNTRL1_FREE;
648 } else /* if (AMIGAHW_PRESENT(A2000_CLK)) */ {
649 int cnt = 5;
650
651 tod_2000.cntrl1 |= TOD2000_CNTRL1_HOLD;
652
653 while ((tod_2000.cntrl1 & TOD2000_CNTRL1_BUSY) && cnt) {
654 tod_2000.cntrl1 &= ~TOD2000_CNTRL1_HOLD;
655 udelay(70);
656 tod_2000.cntrl1 |= TOD2000_CNTRL1_HOLD;
657 --cnt;
658 }
659
660 if (!cnt)
661 printk(KERN_INFO "set_clock_mmss: timed out waiting for RTC (0x%x)\n", tod_2000.cntrl1);
662
663 tod_2000.second1 = real_seconds / 10;
664 tod_2000.second2 = real_seconds % 10;
665 tod_2000.minute1 = real_minutes / 10;
666 tod_2000.minute2 = real_minutes % 10;
667
668 tod_2000.cntrl1 &= ~TOD2000_CNTRL1_HOLD;
669 }
670
671 return 0;
672}
673
674static unsigned int amiga_get_ss(void)
675{
676 unsigned int s;
677
678 if (AMIGAHW_PRESENT(A3000_CLK)) {
679 tod_3000.cntrl1 = TOD3000_CNTRL1_HOLD;
680 s = tod_3000.second1 * 10 + tod_3000.second2;
681 tod_3000.cntrl1 = TOD3000_CNTRL1_FREE;
682 } else /* if (AMIGAHW_PRESENT(A2000_CLK)) */ {
683 s = tod_2000.second1 * 10 + tod_2000.second2;
684 }
685 return s;
686}
687
688static NORET_TYPE void amiga_reset(void) 514static NORET_TYPE void amiga_reset(void)
689 ATTRIB_NORET; 515 ATTRIB_NORET;
690 516
diff --git a/arch/m68k/amiga/platform.c b/arch/m68k/amiga/platform.c
index 38f18bf14737..7fd8b41723ea 100644
--- a/arch/m68k/amiga/platform.c
+++ b/arch/m68k/amiga/platform.c
@@ -11,6 +11,7 @@
11#include <linux/zorro.h> 11#include <linux/zorro.h>
12 12
13#include <asm/amigahw.h> 13#include <asm/amigahw.h>
14#include <asm/amigayle.h>
14 15
15 16
16#ifdef CONFIG_ZORRO 17#ifdef CONFIG_ZORRO
@@ -55,11 +56,77 @@ static int __init amiga_init_bus(void)
55 56
56subsys_initcall(amiga_init_bus); 57subsys_initcall(amiga_init_bus);
57 58
58#endif /* CONFIG_ZORRO */ 59
60static int z_dev_present(zorro_id id)
61{
62 unsigned int i;
63
64 for (i = 0; i < zorro_num_autocon; i++)
65 if (zorro_autocon[i].rom.er_Manufacturer == ZORRO_MANUF(id) &&
66 zorro_autocon[i].rom.er_Product == ZORRO_PROD(id))
67 return 1;
68
69 return 0;
70}
71
72#else /* !CONFIG_ZORRO */
73
74static inline int z_dev_present(zorro_id id) { return 0; }
75
76#endif /* !CONFIG_ZORRO */
77
78
79static const struct resource a3000_scsi_resource __initconst = {
80 .start = 0xdd0000,
81 .end = 0xdd00ff,
82 .flags = IORESOURCE_MEM,
83};
84
85
86static const struct resource a4000t_scsi_resource __initconst = {
87 .start = 0xdd0000,
88 .end = 0xdd0fff,
89 .flags = IORESOURCE_MEM,
90};
91
92
93static const struct resource a1200_ide_resource __initconst = {
94 .start = 0xda0000,
95 .end = 0xda1fff,
96 .flags = IORESOURCE_MEM,
97};
98
99static const struct gayle_ide_platform_data a1200_ide_pdata __initconst = {
100 .base = 0xda0000,
101 .irqport = 0xda9000,
102 .explicit_ack = 1,
103};
104
105
106static const struct resource a4000_ide_resource __initconst = {
107 .start = 0xdd2000,
108 .end = 0xdd3fff,
109 .flags = IORESOURCE_MEM,
110};
111
112static const struct gayle_ide_platform_data a4000_ide_pdata __initconst = {
113 .base = 0xdd2020,
114 .irqport = 0xdd3020,
115 .explicit_ack = 0,
116};
117
118
119static const struct resource amiga_rtc_resource __initconst = {
120 .start = 0x00dc0000,
121 .end = 0x00dcffff,
122 .flags = IORESOURCE_MEM,
123};
59 124
60 125
61static int __init amiga_init_devices(void) 126static int __init amiga_init_devices(void)
62{ 127{
128 struct platform_device *pdev;
129
63 if (!MACH_IS_AMIGA) 130 if (!MACH_IS_AMIGA)
64 return -ENODEV; 131 return -ENODEV;
65 132
@@ -77,6 +144,53 @@ static int __init amiga_init_devices(void)
77 if (AMIGAHW_PRESENT(AMI_FLOPPY)) 144 if (AMIGAHW_PRESENT(AMI_FLOPPY))
78 platform_device_register_simple("amiga-floppy", -1, NULL, 0); 145 platform_device_register_simple("amiga-floppy", -1, NULL, 0);
79 146
147 if (AMIGAHW_PRESENT(A3000_SCSI))
148 platform_device_register_simple("amiga-a3000-scsi", -1,
149 &a3000_scsi_resource, 1);
150
151 if (AMIGAHW_PRESENT(A4000_SCSI))
152 platform_device_register_simple("amiga-a4000t-scsi", -1,
153 &a4000t_scsi_resource, 1);
154
155 if (AMIGAHW_PRESENT(A1200_IDE) ||
156 z_dev_present(ZORRO_PROD_MTEC_VIPER_MK_V_E_MATRIX_530_SCSI_IDE)) {
157 pdev = platform_device_register_simple("amiga-gayle-ide", -1,
158 &a1200_ide_resource, 1);
159 platform_device_add_data(pdev, &a1200_ide_pdata,
160 sizeof(a1200_ide_pdata));
161 }
162
163 if (AMIGAHW_PRESENT(A4000_IDE)) {
164 pdev = platform_device_register_simple("amiga-gayle-ide", -1,
165 &a4000_ide_resource, 1);
166 platform_device_add_data(pdev, &a4000_ide_pdata,
167 sizeof(a4000_ide_pdata));
168 }
169
170
171 /* other I/O hardware */
172 if (AMIGAHW_PRESENT(AMI_KEYBOARD))
173 platform_device_register_simple("amiga-keyboard", -1, NULL, 0);
174
175 if (AMIGAHW_PRESENT(AMI_MOUSE))
176 platform_device_register_simple("amiga-mouse", -1, NULL, 0);
177
178 if (AMIGAHW_PRESENT(AMI_SERIAL))
179 platform_device_register_simple("amiga-serial", -1, NULL, 0);
180
181 if (AMIGAHW_PRESENT(AMI_PARALLEL))
182 platform_device_register_simple("amiga-parallel", -1, NULL, 0);
183
184
185 /* real time clocks */
186 if (AMIGAHW_PRESENT(A2000_CLK))
187 platform_device_register_simple("rtc-msm6242", -1,
188 &amiga_rtc_resource, 1);
189
190 if (AMIGAHW_PRESENT(A3000_CLK))
191 platform_device_register_simple("rtc-rp5c01", -1,
192 &amiga_rtc_resource, 1);
193
80 return 0; 194 return 0;
81} 195}
82 196
diff --git a/arch/m68k/include/asm/amigayle.h b/arch/m68k/include/asm/amigayle.h
index bb5a6aa329f3..a01453d9c231 100644
--- a/arch/m68k/include/asm/amigayle.h
+++ b/arch/m68k/include/asm/amigayle.h
@@ -104,4 +104,10 @@ struct GAYLE {
104#define GAYLE_CFG_250NS 0x00 104#define GAYLE_CFG_250NS 0x00
105#define GAYLE_CFG_720NS 0x0c 105#define GAYLE_CFG_720NS 0x0c
106 106
107struct gayle_ide_platform_data {
108 unsigned long base;
109 unsigned long irqport;
110 int explicit_ack; /* A1200 IDE needs explicit ack */
111};
112
107#endif /* asm-m68k/amigayle.h */ 113#endif /* asm-m68k/amigayle.h */
diff --git a/arch/m68k/include/asm/atomic.h b/arch/m68k/include/asm/atomic.h
index 8d29145ebb27..eab36dcacf6c 100644
--- a/arch/m68k/include/asm/atomic.h
+++ b/arch/m68k/include/asm/atomic.h
@@ -3,3 +3,5 @@
3#else 3#else
4#include "atomic_mm.h" 4#include "atomic_mm.h"
5#endif 5#endif
6
7#include <asm-generic/atomic64.h>
diff --git a/arch/m68k/include/asm/cache.h b/arch/m68k/include/asm/cache.h
index fed3fd30de7e..ecafbe1718c3 100644
--- a/arch/m68k/include/asm/cache.h
+++ b/arch/m68k/include/asm/cache.h
@@ -8,4 +8,6 @@
8#define L1_CACHE_SHIFT 4 8#define L1_CACHE_SHIFT 4
9#define L1_CACHE_BYTES (1<< L1_CACHE_SHIFT) 9#define L1_CACHE_BYTES (1<< L1_CACHE_SHIFT)
10 10
11#define ARCH_KMALLOC_MINALIGN L1_CACHE_BYTES
12
11#endif 13#endif
diff --git a/arch/m68k/include/asm/scatterlist.h b/arch/m68k/include/asm/scatterlist.h
index e27ad902b1cf..175da06c6b95 100644
--- a/arch/m68k/include/asm/scatterlist.h
+++ b/arch/m68k/include/asm/scatterlist.h
@@ -1,23 +1,9 @@
1#ifndef _M68K_SCATTERLIST_H 1#ifndef _M68K_SCATTERLIST_H
2#define _M68K_SCATTERLIST_H 2#define _M68K_SCATTERLIST_H
3 3
4#include <linux/types.h> 4#include <asm-generic/scatterlist.h>
5
6struct scatterlist {
7#ifdef CONFIG_DEBUG_SG
8 unsigned long sg_magic;
9#endif
10 unsigned long page_link;
11 unsigned int offset;
12 unsigned int length;
13
14 dma_addr_t dma_address; /* A place to hang host-specific addresses at. */
15};
16 5
17/* This is bogus and should go away. */ 6/* This is bogus and should go away. */
18#define ISA_DMA_THRESHOLD (0x00ffffff) 7#define ISA_DMA_THRESHOLD (0x00ffffff)
19 8
20#define sg_dma_address(sg) ((sg)->dma_address)
21#define sg_dma_len(sg) ((sg)->length)
22
23#endif /* !(_M68K_SCATTERLIST_H) */ 9#endif /* !(_M68K_SCATTERLIST_H) */
diff --git a/arch/microblaze/include/asm/scatterlist.h b/arch/microblaze/include/asm/scatterlist.h
index 35d786fe93ae..dc4a8900cc80 100644
--- a/arch/microblaze/include/asm/scatterlist.h
+++ b/arch/microblaze/include/asm/scatterlist.h
@@ -1 +1,3 @@
1#include <asm-generic/scatterlist.h> 1#include <asm-generic/scatterlist.h>
2
3#define ISA_DMA_THRESHOLD (~0UL)
diff --git a/arch/mips/include/asm/scatterlist.h b/arch/mips/include/asm/scatterlist.h
index 83d69fe17c9f..9af65e79be36 100644
--- a/arch/mips/include/asm/scatterlist.h
+++ b/arch/mips/include/asm/scatterlist.h
@@ -1,27 +1,7 @@
1#ifndef __ASM_SCATTERLIST_H 1#ifndef __ASM_SCATTERLIST_H
2#define __ASM_SCATTERLIST_H 2#define __ASM_SCATTERLIST_H
3 3
4#include <asm/types.h> 4#include <asm-generic/scatterlist.h>
5
6struct scatterlist {
7#ifdef CONFIG_DEBUG_SG
8 unsigned long sg_magic;
9#endif
10 unsigned long page_link;
11 unsigned int offset;
12 dma_addr_t dma_address;
13 unsigned int length;
14};
15
16/*
17 * These macros should be used after a pci_map_sg call has been done
18 * to get bus addresses of each of the SG entries and their lengths.
19 * You should only work with the number of sg entries pci_map_sg
20 * returns, or alternatively stop on the first sg_dma_len(sg) which
21 * is 0.
22 */
23#define sg_dma_address(sg) ((sg)->dma_address)
24#define sg_dma_len(sg) ((sg)->length)
25 5
26#define ISA_DMA_THRESHOLD (0x00ffffffUL) 6#define ISA_DMA_THRESHOLD (0x00ffffffUL)
27 7
diff --git a/arch/mn10300/include/asm/scatterlist.h b/arch/mn10300/include/asm/scatterlist.h
index 67535901b9ff..7bd00b9e030d 100644
--- a/arch/mn10300/include/asm/scatterlist.h
+++ b/arch/mn10300/include/asm/scatterlist.h
@@ -11,45 +11,8 @@
11#ifndef _ASM_SCATTERLIST_H 11#ifndef _ASM_SCATTERLIST_H
12#define _ASM_SCATTERLIST_H 12#define _ASM_SCATTERLIST_H
13 13
14#include <asm/types.h> 14#include <asm-generic/scatterlist.h>
15
16/*
17 * Drivers must set either ->address or (preferred) page and ->offset
18 * to indicate where data must be transferred to/from.
19 *
20 * Using page is recommended since it handles highmem data as well as
21 * low mem. ->address is restricted to data which has a virtual mapping, and
22 * it will go away in the future. Updating to page can be automated very
23 * easily -- something like
24 *
25 * sg->address = some_ptr;
26 *
27 * can be rewritten as
28 *
29 * sg_set_page(virt_to_page(some_ptr));
30 * sg->offset = (unsigned long) some_ptr & ~PAGE_MASK;
31 *
32 * and that's it. There's no excuse for not highmem enabling YOUR driver. /jens
33 */
34struct scatterlist {
35#ifdef CONFIG_DEBUG_SG
36 unsigned long sg_magic;
37#endif
38 unsigned long page_link;
39 unsigned int offset; /* for highmem, page offset */
40 dma_addr_t dma_address;
41 unsigned int length;
42};
43 15
44#define ISA_DMA_THRESHOLD (0x00ffffff) 16#define ISA_DMA_THRESHOLD (0x00ffffff)
45 17
46/*
47 * These macros should be used after a pci_map_sg call has been done
48 * to get bus addresses of each of the SG entries and their lengths.
49 * You should only work with the number of sg entries pci_map_sg
50 * returns.
51 */
52#define sg_dma_address(sg) ((sg)->dma_address)
53#define sg_dma_len(sg) ((sg)->length)
54
55#endif /* _ASM_SCATTERLIST_H */ 18#endif /* _ASM_SCATTERLIST_H */
diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig
index 9c4da3d63bfb..05a366a5c4d5 100644
--- a/arch/parisc/Kconfig
+++ b/arch/parisc/Kconfig
@@ -98,6 +98,9 @@ config STACKTRACE_SUPPORT
98config NEED_DMA_MAP_STATE 98config NEED_DMA_MAP_STATE
99 def_bool y 99 def_bool y
100 100
101config NEED_SG_DMA_LENGTH
102 def_bool y
103
101config ISA_DMA_API 104config ISA_DMA_API
102 bool 105 bool
103 106
diff --git a/arch/parisc/include/asm/cacheflush.h b/arch/parisc/include/asm/cacheflush.h
index 477277739da5..4556d820128a 100644
--- a/arch/parisc/include/asm/cacheflush.h
+++ b/arch/parisc/include/asm/cacheflush.h
@@ -2,6 +2,7 @@
2#define _PARISC_CACHEFLUSH_H 2#define _PARISC_CACHEFLUSH_H
3 3
4#include <linux/mm.h> 4#include <linux/mm.h>
5#include <linux/uaccess.h>
5 6
6/* The usual comment is "Caches aren't brain-dead on the <architecture>". 7/* The usual comment is "Caches aren't brain-dead on the <architecture>".
7 * Unfortunately, that doesn't apply to PA-RISC. */ 8 * Unfortunately, that doesn't apply to PA-RISC. */
@@ -125,11 +126,20 @@ static inline void *kmap(struct page *page)
125 126
126#define kunmap(page) kunmap_parisc(page_address(page)) 127#define kunmap(page) kunmap_parisc(page_address(page))
127 128
128#define kmap_atomic(page, idx) page_address(page) 129static inline void *kmap_atomic(struct page *page, enum km_type idx)
130{
131 pagefault_disable();
132 return page_address(page);
133}
129 134
130#define kunmap_atomic(addr, idx) kunmap_parisc(addr) 135static inline void kunmap_atomic(void *addr, enum km_type idx)
136{
137 kunmap_parisc(addr);
138 pagefault_enable();
139}
131 140
132#define kmap_atomic_pfn(pfn, idx) page_address(pfn_to_page(pfn)) 141#define kmap_atomic_prot(page, idx, prot) kmap_atomic(page, idx)
142#define kmap_atomic_pfn(pfn, idx) kmap_atomic(pfn_to_page(pfn), (idx))
133#define kmap_atomic_to_page(ptr) virt_to_page(ptr) 143#define kmap_atomic_to_page(ptr) virt_to_page(ptr)
134#endif 144#endif
135 145
diff --git a/arch/parisc/include/asm/scatterlist.h b/arch/parisc/include/asm/scatterlist.h
index 62269b31ebf4..2c3b79b54b28 100644
--- a/arch/parisc/include/asm/scatterlist.h
+++ b/arch/parisc/include/asm/scatterlist.h
@@ -3,25 +3,9 @@
3 3
4#include <asm/page.h> 4#include <asm/page.h>
5#include <asm/types.h> 5#include <asm/types.h>
6 6#include <asm-generic/scatterlist.h>
7struct scatterlist {
8#ifdef CONFIG_DEBUG_SG
9 unsigned long sg_magic;
10#endif
11 unsigned long page_link;
12 unsigned int offset;
13
14 unsigned int length;
15
16 /* an IOVA can be 64-bits on some PA-Risc platforms. */
17 dma_addr_t iova; /* I/O Virtual Address */
18 __u32 iova_length; /* bytes mapped */
19};
20
21#define sg_virt_addr(sg) ((unsigned long)sg_virt(sg))
22#define sg_dma_address(sg) ((sg)->iova)
23#define sg_dma_len(sg) ((sg)->iova_length)
24 7
25#define ISA_DMA_THRESHOLD (~0UL) 8#define ISA_DMA_THRESHOLD (~0UL)
9#define sg_virt_addr(sg) ((unsigned long)sg_virt(sg))
26 10
27#endif /* _ASM_PARISC_SCATTERLIST_H */ 11#endif /* _ASM_PARISC_SCATTERLIST_H */
diff --git a/arch/parisc/kernel/asm-offsets.c b/arch/parisc/kernel/asm-offsets.c
index ec787b411e9a..dcd55103a4bb 100644
--- a/arch/parisc/kernel/asm-offsets.c
+++ b/arch/parisc/kernel/asm-offsets.c
@@ -45,8 +45,12 @@
45#else 45#else
46#define FRAME_SIZE 64 46#define FRAME_SIZE 64
47#endif 47#endif
48#define FRAME_ALIGN 64
48 49
49#define align(x,y) (((x)+FRAME_SIZE+(y)-1) - (((x)+(y)-1)%(y))) 50/* Add FRAME_SIZE to the size x and align it to y. All definitions
51 * that use align_frame will include space for a frame.
52 */
53#define align_frame(x,y) (((x)+FRAME_SIZE+(y)-1) - (((x)+(y)-1)%(y)))
50 54
51int main(void) 55int main(void)
52{ 56{
@@ -146,7 +150,8 @@ int main(void)
146 DEFINE(TASK_PT_IOR, offsetof(struct task_struct, thread.regs.ior)); 150 DEFINE(TASK_PT_IOR, offsetof(struct task_struct, thread.regs.ior));
147 BLANK(); 151 BLANK();
148 DEFINE(TASK_SZ, sizeof(struct task_struct)); 152 DEFINE(TASK_SZ, sizeof(struct task_struct));
149 DEFINE(TASK_SZ_ALGN, align(sizeof(struct task_struct), 64)); 153 /* TASK_SZ_ALGN includes space for a stack frame. */
154 DEFINE(TASK_SZ_ALGN, align_frame(sizeof(struct task_struct), FRAME_ALIGN));
150 BLANK(); 155 BLANK();
151 DEFINE(PT_PSW, offsetof(struct pt_regs, gr[ 0])); 156 DEFINE(PT_PSW, offsetof(struct pt_regs, gr[ 0]));
152 DEFINE(PT_GR1, offsetof(struct pt_regs, gr[ 1])); 157 DEFINE(PT_GR1, offsetof(struct pt_regs, gr[ 1]));
@@ -233,7 +238,8 @@ int main(void)
233 DEFINE(PT_ISR, offsetof(struct pt_regs, isr)); 238 DEFINE(PT_ISR, offsetof(struct pt_regs, isr));
234 DEFINE(PT_IOR, offsetof(struct pt_regs, ior)); 239 DEFINE(PT_IOR, offsetof(struct pt_regs, ior));
235 DEFINE(PT_SIZE, sizeof(struct pt_regs)); 240 DEFINE(PT_SIZE, sizeof(struct pt_regs));
236 DEFINE(PT_SZ_ALGN, align(sizeof(struct pt_regs), 64)); 241 /* PT_SZ_ALGN includes space for a stack frame. */
242 DEFINE(PT_SZ_ALGN, align_frame(sizeof(struct pt_regs), FRAME_ALIGN));
237 BLANK(); 243 BLANK();
238 DEFINE(TI_TASK, offsetof(struct thread_info, task)); 244 DEFINE(TI_TASK, offsetof(struct thread_info, task));
239 DEFINE(TI_EXEC_DOMAIN, offsetof(struct thread_info, exec_domain)); 245 DEFINE(TI_EXEC_DOMAIN, offsetof(struct thread_info, exec_domain));
@@ -242,7 +248,8 @@ int main(void)
242 DEFINE(TI_SEGMENT, offsetof(struct thread_info, addr_limit)); 248 DEFINE(TI_SEGMENT, offsetof(struct thread_info, addr_limit));
243 DEFINE(TI_PRE_COUNT, offsetof(struct thread_info, preempt_count)); 249 DEFINE(TI_PRE_COUNT, offsetof(struct thread_info, preempt_count));
244 DEFINE(THREAD_SZ, sizeof(struct thread_info)); 250 DEFINE(THREAD_SZ, sizeof(struct thread_info));
245 DEFINE(THREAD_SZ_ALGN, align(sizeof(struct thread_info), 64)); 251 /* THREAD_SZ_ALGN includes space for a stack frame. */
252 DEFINE(THREAD_SZ_ALGN, align_frame(sizeof(struct thread_info), FRAME_ALIGN));
246 BLANK(); 253 BLANK();
247 DEFINE(ICACHE_BASE, offsetof(struct pdc_cache_info, ic_base)); 254 DEFINE(ICACHE_BASE, offsetof(struct pdc_cache_info, ic_base));
248 DEFINE(ICACHE_STRIDE, offsetof(struct pdc_cache_info, ic_stride)); 255 DEFINE(ICACHE_STRIDE, offsetof(struct pdc_cache_info, ic_stride));
diff --git a/arch/parisc/kernel/entry.S b/arch/parisc/kernel/entry.S
index 3a44f7f704fa..6337adef30f6 100644
--- a/arch/parisc/kernel/entry.S
+++ b/arch/parisc/kernel/entry.S
@@ -364,32 +364,6 @@
364 .align 32 364 .align 32
365 .endm 365 .endm
366 366
367 /* The following are simple 32 vs 64 bit instruction
368 * abstractions for the macros */
369 .macro EXTR reg1,start,length,reg2
370#ifdef CONFIG_64BIT
371 extrd,u \reg1,32+(\start),\length,\reg2
372#else
373 extrw,u \reg1,\start,\length,\reg2
374#endif
375 .endm
376
377 .macro DEP reg1,start,length,reg2
378#ifdef CONFIG_64BIT
379 depd \reg1,32+(\start),\length,\reg2
380#else
381 depw \reg1,\start,\length,\reg2
382#endif
383 .endm
384
385 .macro DEPI val,start,length,reg
386#ifdef CONFIG_64BIT
387 depdi \val,32+(\start),\length,\reg
388#else
389 depwi \val,\start,\length,\reg
390#endif
391 .endm
392
393 /* In LP64, the space contains part of the upper 32 bits of the 367 /* In LP64, the space contains part of the upper 32 bits of the
394 * fault. We have to extract this and place it in the va, 368 * fault. We have to extract this and place it in the va,
395 * zeroing the corresponding bits in the space register */ 369 * zeroing the corresponding bits in the space register */
@@ -442,19 +416,19 @@
442 */ 416 */
443 .macro L2_ptep pmd,pte,index,va,fault 417 .macro L2_ptep pmd,pte,index,va,fault
444#if PT_NLEVELS == 3 418#if PT_NLEVELS == 3
445 EXTR \va,31-ASM_PMD_SHIFT,ASM_BITS_PER_PMD,\index 419 extru \va,31-ASM_PMD_SHIFT,ASM_BITS_PER_PMD,\index
446#else 420#else
447 EXTR \va,31-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index 421 extru \va,31-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index
448#endif 422#endif
449 DEP %r0,31,PAGE_SHIFT,\pmd /* clear offset */ 423 dep %r0,31,PAGE_SHIFT,\pmd /* clear offset */
450 copy %r0,\pte 424 copy %r0,\pte
451 ldw,s \index(\pmd),\pmd 425 ldw,s \index(\pmd),\pmd
452 bb,>=,n \pmd,_PxD_PRESENT_BIT,\fault 426 bb,>=,n \pmd,_PxD_PRESENT_BIT,\fault
453 DEP %r0,31,PxD_FLAG_SHIFT,\pmd /* clear flags */ 427 dep %r0,31,PxD_FLAG_SHIFT,\pmd /* clear flags */
454 copy \pmd,%r9 428 copy \pmd,%r9
455 SHLREG %r9,PxD_VALUE_SHIFT,\pmd 429 SHLREG %r9,PxD_VALUE_SHIFT,\pmd
456 EXTR \va,31-PAGE_SHIFT,ASM_BITS_PER_PTE,\index 430 extru \va,31-PAGE_SHIFT,ASM_BITS_PER_PTE,\index
457 DEP %r0,31,PAGE_SHIFT,\pmd /* clear offset */ 431 dep %r0,31,PAGE_SHIFT,\pmd /* clear offset */
458 shladd \index,BITS_PER_PTE_ENTRY,\pmd,\pmd 432 shladd \index,BITS_PER_PTE_ENTRY,\pmd,\pmd
459 LDREG %r0(\pmd),\pte /* pmd is now pte */ 433 LDREG %r0(\pmd),\pte /* pmd is now pte */
460 bb,>=,n \pte,_PAGE_PRESENT_BIT,\fault 434 bb,>=,n \pte,_PAGE_PRESENT_BIT,\fault
@@ -605,7 +579,7 @@
605 depdi 0,31,32,\tmp 579 depdi 0,31,32,\tmp
606#endif 580#endif
607 copy \va,\tmp1 581 copy \va,\tmp1
608 DEPI 0,31,23,\tmp1 582 depi 0,31,23,\tmp1
609 cmpb,COND(<>),n \tmp,\tmp1,\fault 583 cmpb,COND(<>),n \tmp,\tmp1,\fault
610 ldi (_PAGE_DIRTY|_PAGE_WRITE|_PAGE_READ),\prot 584 ldi (_PAGE_DIRTY|_PAGE_WRITE|_PAGE_READ),\prot
611 depd,z \prot,8,7,\prot 585 depd,z \prot,8,7,\prot
@@ -997,13 +971,6 @@ intr_restore:
997 971
998 rfi 972 rfi
999 nop 973 nop
1000 nop
1001 nop
1002 nop
1003 nop
1004 nop
1005 nop
1006 nop
1007 974
1008#ifndef CONFIG_PREEMPT 975#ifndef CONFIG_PREEMPT
1009# define intr_do_preempt intr_restore 976# define intr_do_preempt intr_restore
@@ -2076,9 +2043,10 @@ syscall_restore:
2076 LDREG TASK_PT_GR31(%r1),%r31 /* restore syscall rp */ 2043 LDREG TASK_PT_GR31(%r1),%r31 /* restore syscall rp */
2077 2044
2078 /* NOTE: We use rsm/ssm pair to make this operation atomic */ 2045 /* NOTE: We use rsm/ssm pair to make this operation atomic */
2046 LDREG TASK_PT_GR30(%r1),%r1 /* Get user sp */
2079 rsm PSW_SM_I, %r0 2047 rsm PSW_SM_I, %r0
2080 LDREG TASK_PT_GR30(%r1),%r30 /* restore user sp */ 2048 copy %r1,%r30 /* Restore user sp */
2081 mfsp %sr3,%r1 /* Get users space id */ 2049 mfsp %sr3,%r1 /* Get user space id */
2082 mtsp %r1,%sr7 /* Restore sr7 */ 2050 mtsp %r1,%sr7 /* Restore sr7 */
2083 ssm PSW_SM_I, %r0 2051 ssm PSW_SM_I, %r0
2084 2052
diff --git a/arch/parisc/kernel/syscall.S b/arch/parisc/kernel/syscall.S
index f5f96021caa0..68e75ce838d6 100644
--- a/arch/parisc/kernel/syscall.S
+++ b/arch/parisc/kernel/syscall.S
@@ -47,18 +47,17 @@ ENTRY(linux_gateway_page)
47 KILL_INSN 47 KILL_INSN
48 .endr 48 .endr
49 49
50 /* ADDRESS 0xb0 to 0xb4, lws uses 1 insns for entry */ 50 /* ADDRESS 0xb0 to 0xb8, lws uses two insns for entry */
51 /* Light-weight-syscall entry must always be located at 0xb0 */ 51 /* Light-weight-syscall entry must always be located at 0xb0 */
52 /* WARNING: Keep this number updated with table size changes */ 52 /* WARNING: Keep this number updated with table size changes */
53#define __NR_lws_entries (2) 53#define __NR_lws_entries (2)
54 54
55lws_entry: 55lws_entry:
56 /* Unconditional branch to lws_start, located on the 56 gate lws_start, %r0 /* increase privilege */
57 same gateway page */ 57 depi 3, 31, 2, %r31 /* Ensure we return into user mode. */
58 b,n lws_start
59 58
60 /* Fill from 0xb4 to 0xe0 */ 59 /* Fill from 0xb8 to 0xe0 */
61 .rept 11 60 .rept 10
62 KILL_INSN 61 KILL_INSN
63 .endr 62 .endr
64 63
@@ -423,9 +422,6 @@ tracesys_sigexit:
423 422
424 *********************************************************/ 423 *********************************************************/
425lws_start: 424lws_start:
426 /* Gate and ensure we return to userspace */
427 gate .+8, %r0
428 depi 3, 31, 2, %r31 /* Ensure we return to userspace */
429 425
430#ifdef CONFIG_64BIT 426#ifdef CONFIG_64BIT
431 /* FIXME: If we are a 64-bit kernel just 427 /* FIXME: If we are a 64-bit kernel just
@@ -442,7 +438,7 @@ lws_start:
442#endif 438#endif
443 439
444 /* Is the lws entry number valid? */ 440 /* Is the lws entry number valid? */
445 comiclr,>>= __NR_lws_entries, %r20, %r0 441 comiclr,>> __NR_lws_entries, %r20, %r0
446 b,n lws_exit_nosys 442 b,n lws_exit_nosys
447 443
448 /* WARNING: Trashing sr2 and sr3 */ 444 /* WARNING: Trashing sr2 and sr3 */
@@ -473,7 +469,7 @@ lws_exit:
473 /* now reset the lowest bit of sp if it was set */ 469 /* now reset the lowest bit of sp if it was set */
474 xor %r30,%r1,%r30 470 xor %r30,%r1,%r30
475#endif 471#endif
476 be,n 0(%sr3, %r31) 472 be,n 0(%sr7, %r31)
477 473
478 474
479 475
@@ -529,7 +525,6 @@ lws_compare_and_swap32:
529#endif 525#endif
530 526
531lws_compare_and_swap: 527lws_compare_and_swap:
532#ifdef CONFIG_SMP
533 /* Load start of lock table */ 528 /* Load start of lock table */
534 ldil L%lws_lock_start, %r20 529 ldil L%lws_lock_start, %r20
535 ldo R%lws_lock_start(%r20), %r28 530 ldo R%lws_lock_start(%r20), %r28
@@ -572,8 +567,6 @@ cas_wouldblock:
572 ldo 2(%r0), %r28 /* 2nd case */ 567 ldo 2(%r0), %r28 /* 2nd case */
573 b lws_exit /* Contended... */ 568 b lws_exit /* Contended... */
574 ldo -EAGAIN(%r0), %r21 /* Spin in userspace */ 569 ldo -EAGAIN(%r0), %r21 /* Spin in userspace */
575#endif
576/* CONFIG_SMP */
577 570
578 /* 571 /*
579 prev = *addr; 572 prev = *addr;
@@ -601,13 +594,11 @@ cas_action:
6011: ldw 0(%sr3,%r26), %r28 5941: ldw 0(%sr3,%r26), %r28
602 sub,<> %r28, %r25, %r0 595 sub,<> %r28, %r25, %r0
6032: stw %r24, 0(%sr3,%r26) 5962: stw %r24, 0(%sr3,%r26)
604#ifdef CONFIG_SMP
605 /* Free lock */ 597 /* Free lock */
606 stw %r20, 0(%sr2,%r20) 598 stw %r20, 0(%sr2,%r20)
607# if ENABLE_LWS_DEBUG 599#if ENABLE_LWS_DEBUG
608 /* Clear thread register indicator */ 600 /* Clear thread register indicator */
609 stw %r0, 4(%sr2,%r20) 601 stw %r0, 4(%sr2,%r20)
610# endif
611#endif 602#endif
612 /* Return to userspace, set no error */ 603 /* Return to userspace, set no error */
613 b lws_exit 604 b lws_exit
@@ -615,12 +606,10 @@ cas_action:
615 606
6163: 6073:
617 /* Error occured on load or store */ 608 /* Error occured on load or store */
618#ifdef CONFIG_SMP
619 /* Free lock */ 609 /* Free lock */
620 stw %r20, 0(%sr2,%r20) 610 stw %r20, 0(%sr2,%r20)
621# if ENABLE_LWS_DEBUG 611#if ENABLE_LWS_DEBUG
622 stw %r0, 4(%sr2,%r20) 612 stw %r0, 4(%sr2,%r20)
623# endif
624#endif 613#endif
625 b lws_exit 614 b lws_exit
626 ldo -EFAULT(%r0),%r21 /* set errno */ 615 ldo -EFAULT(%r0),%r21 /* set errno */
@@ -672,7 +661,6 @@ ENTRY(sys_call_table64)
672END(sys_call_table64) 661END(sys_call_table64)
673#endif 662#endif
674 663
675#ifdef CONFIG_SMP
676 /* 664 /*
677 All light-weight-syscall atomic operations 665 All light-weight-syscall atomic operations
678 will use this set of locks 666 will use this set of locks
@@ -694,8 +682,6 @@ ENTRY(lws_lock_start)
694 .endr 682 .endr
695END(lws_lock_start) 683END(lws_lock_start)
696 .previous 684 .previous
697#endif
698/* CONFIG_SMP for lws_lock_start */
699 685
700.end 686.end
701 687
diff --git a/arch/parisc/math-emu/decode_exc.c b/arch/parisc/math-emu/decode_exc.c
index 3ca1c6149218..27a7492ddb0d 100644
--- a/arch/parisc/math-emu/decode_exc.c
+++ b/arch/parisc/math-emu/decode_exc.c
@@ -342,6 +342,7 @@ decode_fpu(unsigned int Fpu_register[], unsigned int trap_counts[])
342 return SIGNALCODE(SIGFPE, FPE_FLTINV); 342 return SIGNALCODE(SIGFPE, FPE_FLTINV);
343 case DIVISIONBYZEROEXCEPTION: 343 case DIVISIONBYZEROEXCEPTION:
344 update_trap_counts(Fpu_register, aflags, bflags, trap_counts); 344 update_trap_counts(Fpu_register, aflags, bflags, trap_counts);
345 Clear_excp_register(exception_index);
345 return SIGNALCODE(SIGFPE, FPE_FLTDIV); 346 return SIGNALCODE(SIGFPE, FPE_FLTDIV);
346 case INEXACTEXCEPTION: 347 case INEXACTEXCEPTION:
347 update_trap_counts(Fpu_register, aflags, bflags, trap_counts); 348 update_trap_counts(Fpu_register, aflags, bflags, trap_counts);
diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
index c6afbfc95770..18162ce4261e 100644
--- a/arch/parisc/mm/fault.c
+++ b/arch/parisc/mm/fault.c
@@ -264,8 +264,7 @@ no_context:
264 264
265 out_of_memory: 265 out_of_memory:
266 up_read(&mm->mmap_sem); 266 up_read(&mm->mmap_sem);
267 printk(KERN_CRIT "VM: killing process %s\n", current->comm); 267 if (!user_mode(regs))
268 if (user_mode(regs)) 268 goto no_context;
269 do_group_exit(SIGKILL); 269 pagefault_out_of_memory();
270 goto no_context;
271} 270}
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index c4c4549c22bb..66a315e06dce 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -663,6 +663,9 @@ config ZONE_DMA
663config NEED_DMA_MAP_STATE 663config NEED_DMA_MAP_STATE
664 def_bool (PPC64 || NOT_COHERENT_CACHE) 664 def_bool (PPC64 || NOT_COHERENT_CACHE)
665 665
666config NEED_SG_DMA_LENGTH
667 def_bool y
668
666config GENERIC_ISA_DMA 669config GENERIC_ISA_DMA
667 bool 670 bool
668 depends on PPC64 || POWER4 || 6xx && !CPM2 671 depends on PPC64 || POWER4 || 6xx && !CPM2
diff --git a/arch/powerpc/include/asm/scatterlist.h b/arch/powerpc/include/asm/scatterlist.h
index 912bf597870f..34cc78fd0ef4 100644
--- a/arch/powerpc/include/asm/scatterlist.h
+++ b/arch/powerpc/include/asm/scatterlist.h
@@ -9,38 +9,12 @@
9 * 2 of the License, or (at your option) any later version. 9 * 2 of the License, or (at your option) any later version.
10 */ 10 */
11 11
12#ifdef __KERNEL__
13#include <linux/types.h>
14#include <asm/dma.h> 12#include <asm/dma.h>
15 13#include <asm-generic/scatterlist.h>
16struct scatterlist {
17#ifdef CONFIG_DEBUG_SG
18 unsigned long sg_magic;
19#endif
20 unsigned long page_link;
21 unsigned int offset;
22 unsigned int length;
23
24 /* For TCE or SWIOTLB support */
25 dma_addr_t dma_address;
26 u32 dma_length;
27};
28
29/*
30 * These macros should be used after a dma_map_sg call has been done
31 * to get bus addresses of each of the SG entries and their lengths.
32 * You should only work with the number of sg entries pci_map_sg
33 * returns, or alternatively stop on the first sg_dma_len(sg) which
34 * is 0.
35 */
36#define sg_dma_address(sg) ((sg)->dma_address)
37#define sg_dma_len(sg) ((sg)->dma_length)
38 14
39#ifdef __powerpc64__ 15#ifdef __powerpc64__
40#define ISA_DMA_THRESHOLD (~0UL) 16#define ISA_DMA_THRESHOLD (~0UL)
41#endif 17#endif
42
43#define ARCH_HAS_SG_CHAIN 18#define ARCH_HAS_SG_CHAIN
44 19
45#endif /* __KERNEL__ */
46#endif /* _ASM_POWERPC_SCATTERLIST_H */ 20#endif /* _ASM_POWERPC_SCATTERLIST_H */
diff --git a/arch/powerpc/include/asm/sfp-machine.h b/arch/powerpc/include/asm/sfp-machine.h
index 8b8fab91ad1e..3a7a67a0d006 100644
--- a/arch/powerpc/include/asm/sfp-machine.h
+++ b/arch/powerpc/include/asm/sfp-machine.h
@@ -353,6 +353,12 @@
353#define abort() \ 353#define abort() \
354 return 0 354 return 0
355 355
356#ifdef __BIG_ENDIAN
357#define __BYTE_ORDER __BIG_ENDIAN
358#else
359#define __BYTE_ORDER __LITTLE_ENDIAN
360#endif
361
356/* Exception flags. */ 362/* Exception flags. */
357#define EFLAG_INVALID (1 << (31 - 2)) 363#define EFLAG_INVALID (1 << (31 - 2))
358#define EFLAG_OVERFLOW (1 << (31 - 3)) 364#define EFLAG_OVERFLOW (1 << (31 - 3))
diff --git a/arch/powerpc/kernel/dma-swiotlb.c b/arch/powerpc/kernel/dma-swiotlb.c
index 4ff4da2c238b..e7fe218b8697 100644
--- a/arch/powerpc/kernel/dma-swiotlb.c
+++ b/arch/powerpc/kernel/dma-swiotlb.c
@@ -39,8 +39,8 @@ struct dma_map_ops swiotlb_dma_ops = {
39 .dma_supported = swiotlb_dma_supported, 39 .dma_supported = swiotlb_dma_supported,
40 .map_page = swiotlb_map_page, 40 .map_page = swiotlb_map_page,
41 .unmap_page = swiotlb_unmap_page, 41 .unmap_page = swiotlb_unmap_page,
42 .sync_single_range_for_cpu = swiotlb_sync_single_range_for_cpu, 42 .sync_single_for_cpu = swiotlb_sync_single_for_cpu,
43 .sync_single_range_for_device = swiotlb_sync_single_range_for_device, 43 .sync_single_for_device = swiotlb_sync_single_for_device,
44 .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu, 44 .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu,
45 .sync_sg_for_device = swiotlb_sync_sg_for_device, 45 .sync_sg_for_device = swiotlb_sync_sg_for_device,
46 .mapping_error = swiotlb_dma_mapping_error, 46 .mapping_error = swiotlb_dma_mapping_error,
diff --git a/arch/powerpc/kernel/dma.c b/arch/powerpc/kernel/dma.c
index 6c1df5757cd6..8d1de6f31d5a 100644
--- a/arch/powerpc/kernel/dma.c
+++ b/arch/powerpc/kernel/dma.c
@@ -127,11 +127,11 @@ static inline void dma_direct_sync_sg(struct device *dev,
127 __dma_sync_page(sg_page(sg), sg->offset, sg->length, direction); 127 __dma_sync_page(sg_page(sg), sg->offset, sg->length, direction);
128} 128}
129 129
130static inline void dma_direct_sync_single_range(struct device *dev, 130static inline void dma_direct_sync_single(struct device *dev,
131 dma_addr_t dma_handle, unsigned long offset, size_t size, 131 dma_addr_t dma_handle, size_t size,
132 enum dma_data_direction direction) 132 enum dma_data_direction direction)
133{ 133{
134 __dma_sync(bus_to_virt(dma_handle+offset), size, direction); 134 __dma_sync(bus_to_virt(dma_handle), size, direction);
135} 135}
136#endif 136#endif
137 137
@@ -144,8 +144,8 @@ struct dma_map_ops dma_direct_ops = {
144 .map_page = dma_direct_map_page, 144 .map_page = dma_direct_map_page,
145 .unmap_page = dma_direct_unmap_page, 145 .unmap_page = dma_direct_unmap_page,
146#ifdef CONFIG_NOT_COHERENT_CACHE 146#ifdef CONFIG_NOT_COHERENT_CACHE
147 .sync_single_range_for_cpu = dma_direct_sync_single_range, 147 .sync_single_for_cpu = dma_direct_sync_single,
148 .sync_single_range_for_device = dma_direct_sync_single_range, 148 .sync_single_for_device = dma_direct_sync_single,
149 .sync_sg_for_cpu = dma_direct_sync_sg, 149 .sync_sg_for_cpu = dma_direct_sync_sg,
150 .sync_sg_for_device = dma_direct_sync_sg, 150 .sync_sg_for_device = dma_direct_sync_sg,
151#endif 151#endif
diff --git a/arch/powerpc/platforms/cell/spufs/file.c b/arch/powerpc/platforms/cell/spufs/file.c
index 5c2808252516..1a40da92154c 100644
--- a/arch/powerpc/platforms/cell/spufs/file.c
+++ b/arch/powerpc/platforms/cell/spufs/file.c
@@ -1849,8 +1849,7 @@ out:
1849 return ret; 1849 return ret;
1850} 1850}
1851 1851
1852static int spufs_mfc_fsync(struct file *file, struct dentry *dentry, 1852static int spufs_mfc_fsync(struct file *file, int datasync)
1853 int datasync)
1854{ 1853{
1855 return spufs_mfc_flush(file, NULL); 1854 return spufs_mfc_flush(file, NULL);
1856} 1855}
diff --git a/arch/powerpc/platforms/cell/spufs/inode.c b/arch/powerpc/platforms/cell/spufs/inode.c
index fc1b1c42b1dc..e5e5f823d687 100644
--- a/arch/powerpc/platforms/cell/spufs/inode.c
+++ b/arch/powerpc/platforms/cell/spufs/inode.c
@@ -251,7 +251,7 @@ const struct file_operations spufs_context_fops = {
251 .llseek = dcache_dir_lseek, 251 .llseek = dcache_dir_lseek,
252 .read = generic_read_dir, 252 .read = generic_read_dir,
253 .readdir = dcache_readdir, 253 .readdir = dcache_readdir,
254 .fsync = simple_sync_file, 254 .fsync = noop_fsync,
255}; 255};
256EXPORT_SYMBOL_GPL(spufs_context_fops); 256EXPORT_SYMBOL_GPL(spufs_context_fops);
257 257
diff --git a/arch/powerpc/platforms/pseries/hvCall_inst.c b/arch/powerpc/platforms/pseries/hvCall_inst.c
index 1fefae76e295..e19ff021e711 100644
--- a/arch/powerpc/platforms/pseries/hvCall_inst.c
+++ b/arch/powerpc/platforms/pseries/hvCall_inst.c
@@ -102,7 +102,7 @@ static const struct file_operations hcall_inst_seq_fops = {
102#define CPU_NAME_BUF_SIZE 32 102#define CPU_NAME_BUF_SIZE 32
103 103
104 104
105static void probe_hcall_entry(unsigned long opcode, unsigned long *args) 105static void probe_hcall_entry(void *ignored, unsigned long opcode, unsigned long *args)
106{ 106{
107 struct hcall_stats *h; 107 struct hcall_stats *h;
108 108
@@ -114,7 +114,7 @@ static void probe_hcall_entry(unsigned long opcode, unsigned long *args)
114 h->purr_start = mfspr(SPRN_PURR); 114 h->purr_start = mfspr(SPRN_PURR);
115} 115}
116 116
117static void probe_hcall_exit(unsigned long opcode, unsigned long retval, 117static void probe_hcall_exit(void *ignored, unsigned long opcode, unsigned long retval,
118 unsigned long *retbuf) 118 unsigned long *retbuf)
119{ 119{
120 struct hcall_stats *h; 120 struct hcall_stats *h;
@@ -140,11 +140,11 @@ static int __init hcall_inst_init(void)
140 if (!firmware_has_feature(FW_FEATURE_LPAR)) 140 if (!firmware_has_feature(FW_FEATURE_LPAR))
141 return 0; 141 return 0;
142 142
143 if (register_trace_hcall_entry(probe_hcall_entry)) 143 if (register_trace_hcall_entry(probe_hcall_entry, NULL))
144 return -EINVAL; 144 return -EINVAL;
145 145
146 if (register_trace_hcall_exit(probe_hcall_exit)) { 146 if (register_trace_hcall_exit(probe_hcall_exit, NULL)) {
147 unregister_trace_hcall_entry(probe_hcall_entry); 147 unregister_trace_hcall_entry(probe_hcall_entry, NULL);
148 return -EINVAL; 148 return -EINVAL;
149 } 149 }
150 150
diff --git a/arch/powerpc/sysdev/fsl_rio.c b/arch/powerpc/sysdev/fsl_rio.c
index 6a1fde0d22b0..cd37e49e7034 100644
--- a/arch/powerpc/sysdev/fsl_rio.c
+++ b/arch/powerpc/sysdev/fsl_rio.c
@@ -1,6 +1,15 @@
1/* 1/*
2 * Freescale MPC85xx/MPC86xx RapidIO support 2 * Freescale MPC85xx/MPC86xx RapidIO support
3 * 3 *
4 * Copyright 2009 Sysgo AG
5 * Thomas Moll <thomas.moll@sysgo.com>
6 * - fixed maintenance access routines, check for aligned access
7 *
8 * Copyright 2009 Integrated Device Technology, Inc.
9 * Alex Bounine <alexandre.bounine@idt.com>
10 * - Added Port-Write message handling
11 * - Added Machine Check exception handling
12 *
4 * Copyright (C) 2007, 2008 Freescale Semiconductor, Inc. 13 * Copyright (C) 2007, 2008 Freescale Semiconductor, Inc.
5 * Zhang Wei <wei.zhang@freescale.com> 14 * Zhang Wei <wei.zhang@freescale.com>
6 * 15 *
@@ -24,19 +33,30 @@
24#include <linux/of_platform.h> 33#include <linux/of_platform.h>
25#include <linux/delay.h> 34#include <linux/delay.h>
26#include <linux/slab.h> 35#include <linux/slab.h>
36#include <linux/kfifo.h>
27 37
28#include <asm/io.h> 38#include <asm/io.h>
39#include <asm/machdep.h>
40#include <asm/uaccess.h>
41
42#undef DEBUG_PW /* Port-Write debugging */
29 43
30/* RapidIO definition irq, which read from OF-tree */ 44/* RapidIO definition irq, which read from OF-tree */
31#define IRQ_RIO_BELL(m) (((struct rio_priv *)(m->priv))->bellirq) 45#define IRQ_RIO_BELL(m) (((struct rio_priv *)(m->priv))->bellirq)
32#define IRQ_RIO_TX(m) (((struct rio_priv *)(m->priv))->txirq) 46#define IRQ_RIO_TX(m) (((struct rio_priv *)(m->priv))->txirq)
33#define IRQ_RIO_RX(m) (((struct rio_priv *)(m->priv))->rxirq) 47#define IRQ_RIO_RX(m) (((struct rio_priv *)(m->priv))->rxirq)
48#define IRQ_RIO_PW(m) (((struct rio_priv *)(m->priv))->pwirq)
34 49
35#define RIO_ATMU_REGS_OFFSET 0x10c00 50#define RIO_ATMU_REGS_OFFSET 0x10c00
36#define RIO_P_MSG_REGS_OFFSET 0x11000 51#define RIO_P_MSG_REGS_OFFSET 0x11000
37#define RIO_S_MSG_REGS_OFFSET 0x13000 52#define RIO_S_MSG_REGS_OFFSET 0x13000
38#define RIO_ESCSR 0x158 53#define RIO_ESCSR 0x158
39#define RIO_CCSR 0x15c 54#define RIO_CCSR 0x15c
55#define RIO_LTLEDCSR 0x0608
56#define RIO_LTLEDCSR_IER 0x80000000
57#define RIO_LTLEDCSR_PRT 0x01000000
58#define RIO_LTLEECSR 0x060c
59#define RIO_EPWISR 0x10010
40#define RIO_ISR_AACR 0x10120 60#define RIO_ISR_AACR 0x10120
41#define RIO_ISR_AACR_AA 0x1 /* Accept All ID */ 61#define RIO_ISR_AACR_AA 0x1 /* Accept All ID */
42#define RIO_MAINT_WIN_SIZE 0x400000 62#define RIO_MAINT_WIN_SIZE 0x400000
@@ -55,6 +75,18 @@
55#define RIO_MSG_ISR_QFI 0x00000010 75#define RIO_MSG_ISR_QFI 0x00000010
56#define RIO_MSG_ISR_DIQI 0x00000001 76#define RIO_MSG_ISR_DIQI 0x00000001
57 77
78#define RIO_IPWMR_SEN 0x00100000
79#define RIO_IPWMR_QFIE 0x00000100
80#define RIO_IPWMR_EIE 0x00000020
81#define RIO_IPWMR_CQ 0x00000002
82#define RIO_IPWMR_PWE 0x00000001
83
84#define RIO_IPWSR_QF 0x00100000
85#define RIO_IPWSR_TE 0x00000080
86#define RIO_IPWSR_QFI 0x00000010
87#define RIO_IPWSR_PWD 0x00000008
88#define RIO_IPWSR_PWB 0x00000004
89
58#define RIO_MSG_DESC_SIZE 32 90#define RIO_MSG_DESC_SIZE 32
59#define RIO_MSG_BUFFER_SIZE 4096 91#define RIO_MSG_BUFFER_SIZE 4096
60#define RIO_MIN_TX_RING_SIZE 2 92#define RIO_MIN_TX_RING_SIZE 2
@@ -121,7 +153,7 @@ struct rio_msg_regs {
121 u32 pad10[26]; 153 u32 pad10[26];
122 u32 pwmr; 154 u32 pwmr;
123 u32 pwsr; 155 u32 pwsr;
124 u32 pad11; 156 u32 epwqbar;
125 u32 pwqbar; 157 u32 pwqbar;
126}; 158};
127 159
@@ -160,6 +192,14 @@ struct rio_msg_rx_ring {
160 void *dev_id; 192 void *dev_id;
161}; 193};
162 194
195struct rio_port_write_msg {
196 void *virt;
197 dma_addr_t phys;
198 u32 msg_count;
199 u32 err_count;
200 u32 discard_count;
201};
202
163struct rio_priv { 203struct rio_priv {
164 struct device *dev; 204 struct device *dev;
165 void __iomem *regs_win; 205 void __iomem *regs_win;
@@ -172,11 +212,64 @@ struct rio_priv {
172 struct rio_dbell_ring dbell_ring; 212 struct rio_dbell_ring dbell_ring;
173 struct rio_msg_tx_ring msg_tx_ring; 213 struct rio_msg_tx_ring msg_tx_ring;
174 struct rio_msg_rx_ring msg_rx_ring; 214 struct rio_msg_rx_ring msg_rx_ring;
215 struct rio_port_write_msg port_write_msg;
175 int bellirq; 216 int bellirq;
176 int txirq; 217 int txirq;
177 int rxirq; 218 int rxirq;
219 int pwirq;
220 struct work_struct pw_work;
221 struct kfifo pw_fifo;
222 spinlock_t pw_fifo_lock;
178}; 223};
179 224
225#define __fsl_read_rio_config(x, addr, err, op) \
226 __asm__ __volatile__( \
227 "1: "op" %1,0(%2)\n" \
228 " eieio\n" \
229 "2:\n" \
230 ".section .fixup,\"ax\"\n" \
231 "3: li %1,-1\n" \
232 " li %0,%3\n" \
233 " b 2b\n" \
234 ".section __ex_table,\"a\"\n" \
235 " .align 2\n" \
236 " .long 1b,3b\n" \
237 ".text" \
238 : "=r" (err), "=r" (x) \
239 : "b" (addr), "i" (-EFAULT), "0" (err))
240
241static void __iomem *rio_regs_win;
242
243static int (*saved_mcheck_exception)(struct pt_regs *regs);
244
245static int fsl_rio_mcheck_exception(struct pt_regs *regs)
246{
247 const struct exception_table_entry *entry = NULL;
248 unsigned long reason = (mfspr(SPRN_MCSR) & MCSR_MASK);
249
250 if (reason & MCSR_BUS_RBERR) {
251 reason = in_be32((u32 *)(rio_regs_win + RIO_LTLEDCSR));
252 if (reason & (RIO_LTLEDCSR_IER | RIO_LTLEDCSR_PRT)) {
253 /* Check if we are prepared to handle this fault */
254 entry = search_exception_tables(regs->nip);
255 if (entry) {
256 pr_debug("RIO: %s - MC Exception handled\n",
257 __func__);
258 out_be32((u32 *)(rio_regs_win + RIO_LTLEDCSR),
259 0);
260 regs->msr |= MSR_RI;
261 regs->nip = entry->fixup;
262 return 1;
263 }
264 }
265 }
266
267 if (saved_mcheck_exception)
268 return saved_mcheck_exception(regs);
269 else
270 return cur_cpu_spec->machine_check(regs);
271}
272
180/** 273/**
181 * fsl_rio_doorbell_send - Send a MPC85xx doorbell message 274 * fsl_rio_doorbell_send - Send a MPC85xx doorbell message
182 * @mport: RapidIO master port info 275 * @mport: RapidIO master port info
@@ -277,27 +370,44 @@ fsl_rio_config_read(struct rio_mport *mport, int index, u16 destid,
277{ 370{
278 struct rio_priv *priv = mport->priv; 371 struct rio_priv *priv = mport->priv;
279 u8 *data; 372 u8 *data;
373 u32 rval, err = 0;
280 374
281 pr_debug 375 pr_debug
282 ("fsl_rio_config_read: index %d destid %d hopcount %d offset %8.8x len %d\n", 376 ("fsl_rio_config_read: index %d destid %d hopcount %d offset %8.8x len %d\n",
283 index, destid, hopcount, offset, len); 377 index, destid, hopcount, offset, len);
378
379 /* 16MB maintenance window possible */
380 /* allow only aligned access to maintenance registers */
381 if (offset > (0x1000000 - len) || !IS_ALIGNED(offset, len))
382 return -EINVAL;
383
284 out_be32(&priv->maint_atmu_regs->rowtar, 384 out_be32(&priv->maint_atmu_regs->rowtar,
285 (destid << 22) | (hopcount << 12) | ((offset & ~0x3) >> 9)); 385 (destid << 22) | (hopcount << 12) | (offset >> 12));
386 out_be32(&priv->maint_atmu_regs->rowtear, (destid >> 10));
286 387
287 data = (u8 *) priv->maint_win + offset; 388 data = (u8 *) priv->maint_win + (offset & (RIO_MAINT_WIN_SIZE - 1));
288 switch (len) { 389 switch (len) {
289 case 1: 390 case 1:
290 *val = in_8((u8 *) data); 391 __fsl_read_rio_config(rval, data, err, "lbz");
291 break; 392 break;
292 case 2: 393 case 2:
293 *val = in_be16((u16 *) data); 394 __fsl_read_rio_config(rval, data, err, "lhz");
294 break; 395 break;
295 default: 396 case 4:
296 *val = in_be32((u32 *) data); 397 __fsl_read_rio_config(rval, data, err, "lwz");
297 break; 398 break;
399 default:
400 return -EINVAL;
298 } 401 }
299 402
300 return 0; 403 if (err) {
404 pr_debug("RIO: cfg_read error %d for %x:%x:%x\n",
405 err, destid, hopcount, offset);
406 }
407
408 *val = rval;
409
410 return err;
301} 411}
302 412
303/** 413/**
@@ -322,10 +432,17 @@ fsl_rio_config_write(struct rio_mport *mport, int index, u16 destid,
322 pr_debug 432 pr_debug
323 ("fsl_rio_config_write: index %d destid %d hopcount %d offset %8.8x len %d val %8.8x\n", 433 ("fsl_rio_config_write: index %d destid %d hopcount %d offset %8.8x len %d val %8.8x\n",
324 index, destid, hopcount, offset, len, val); 434 index, destid, hopcount, offset, len, val);
435
436 /* 16MB maintenance windows possible */
437 /* allow only aligned access to maintenance registers */
438 if (offset > (0x1000000 - len) || !IS_ALIGNED(offset, len))
439 return -EINVAL;
440
325 out_be32(&priv->maint_atmu_regs->rowtar, 441 out_be32(&priv->maint_atmu_regs->rowtar,
326 (destid << 22) | (hopcount << 12) | ((offset & ~0x3) >> 9)); 442 (destid << 22) | (hopcount << 12) | (offset >> 12));
443 out_be32(&priv->maint_atmu_regs->rowtear, (destid >> 10));
327 444
328 data = (u8 *) priv->maint_win + offset; 445 data = (u8 *) priv->maint_win + (offset & (RIO_MAINT_WIN_SIZE - 1));
329 switch (len) { 446 switch (len) {
330 case 1: 447 case 1:
331 out_8((u8 *) data, val); 448 out_8((u8 *) data, val);
@@ -333,9 +450,11 @@ fsl_rio_config_write(struct rio_mport *mport, int index, u16 destid,
333 case 2: 450 case 2:
334 out_be16((u16 *) data, val); 451 out_be16((u16 *) data, val);
335 break; 452 break;
336 default: 453 case 4:
337 out_be32((u32 *) data, val); 454 out_be32((u32 *) data, val);
338 break; 455 break;
456 default:
457 return -EINVAL;
339 } 458 }
340 459
341 return 0; 460 return 0;
@@ -930,6 +1049,223 @@ static int fsl_rio_doorbell_init(struct rio_mport *mport)
930 return rc; 1049 return rc;
931} 1050}
932 1051
1052/**
1053 * fsl_rio_port_write_handler - MPC85xx port write interrupt handler
1054 * @irq: Linux interrupt number
1055 * @dev_instance: Pointer to interrupt-specific data
1056 *
1057 * Handles port write interrupts. Parses a list of registered
1058 * port write event handlers and executes a matching event handler.
1059 */
1060static irqreturn_t
1061fsl_rio_port_write_handler(int irq, void *dev_instance)
1062{
1063 u32 ipwmr, ipwsr;
1064 struct rio_mport *port = (struct rio_mport *)dev_instance;
1065 struct rio_priv *priv = port->priv;
1066 u32 epwisr, tmp;
1067
1068 ipwmr = in_be32(&priv->msg_regs->pwmr);
1069 ipwsr = in_be32(&priv->msg_regs->pwsr);
1070
1071 epwisr = in_be32(priv->regs_win + RIO_EPWISR);
1072 if (epwisr & 0x80000000) {
1073 tmp = in_be32(priv->regs_win + RIO_LTLEDCSR);
1074 pr_info("RIO_LTLEDCSR = 0x%x\n", tmp);
1075 out_be32(priv->regs_win + RIO_LTLEDCSR, 0);
1076 }
1077
1078 if (!(epwisr & 0x00000001))
1079 return IRQ_HANDLED;
1080
1081#ifdef DEBUG_PW
1082 pr_debug("PW Int->IPWMR: 0x%08x IPWSR: 0x%08x (", ipwmr, ipwsr);
1083 if (ipwsr & RIO_IPWSR_QF)
1084 pr_debug(" QF");
1085 if (ipwsr & RIO_IPWSR_TE)
1086 pr_debug(" TE");
1087 if (ipwsr & RIO_IPWSR_QFI)
1088 pr_debug(" QFI");
1089 if (ipwsr & RIO_IPWSR_PWD)
1090 pr_debug(" PWD");
1091 if (ipwsr & RIO_IPWSR_PWB)
1092 pr_debug(" PWB");
1093 pr_debug(" )\n");
1094#endif
1095 out_be32(&priv->msg_regs->pwsr,
1096 ipwsr & (RIO_IPWSR_TE | RIO_IPWSR_QFI | RIO_IPWSR_PWD));
1097
1098 if ((ipwmr & RIO_IPWMR_EIE) && (ipwsr & RIO_IPWSR_TE)) {
1099 priv->port_write_msg.err_count++;
1100 pr_info("RIO: Port-Write Transaction Err (%d)\n",
1101 priv->port_write_msg.err_count);
1102 }
1103 if (ipwsr & RIO_IPWSR_PWD) {
1104 priv->port_write_msg.discard_count++;
1105 pr_info("RIO: Port Discarded Port-Write Msg(s) (%d)\n",
1106 priv->port_write_msg.discard_count);
1107 }
1108
1109 /* Schedule deferred processing if PW was received */
1110 if (ipwsr & RIO_IPWSR_QFI) {
1111 /* Save PW message (if there is room in FIFO),
1112 * otherwise discard it.
1113 */
1114 if (kfifo_avail(&priv->pw_fifo) >= RIO_PW_MSG_SIZE) {
1115 priv->port_write_msg.msg_count++;
1116 kfifo_in(&priv->pw_fifo, priv->port_write_msg.virt,
1117 RIO_PW_MSG_SIZE);
1118 } else {
1119 priv->port_write_msg.discard_count++;
1120 pr_info("RIO: ISR Discarded Port-Write Msg(s) (%d)\n",
1121 priv->port_write_msg.discard_count);
1122 }
1123 schedule_work(&priv->pw_work);
1124 }
1125
1126 /* Issue Clear Queue command. This allows another
1127 * port-write to be received.
1128 */
1129 out_be32(&priv->msg_regs->pwmr, ipwmr | RIO_IPWMR_CQ);
1130
1131 return IRQ_HANDLED;
1132}
1133
1134static void fsl_pw_dpc(struct work_struct *work)
1135{
1136 struct rio_priv *priv = container_of(work, struct rio_priv, pw_work);
1137 unsigned long flags;
1138 u32 msg_buffer[RIO_PW_MSG_SIZE/sizeof(u32)];
1139
1140 /*
1141 * Process port-write messages
1142 */
1143 spin_lock_irqsave(&priv->pw_fifo_lock, flags);
1144 while (kfifo_out(&priv->pw_fifo, (unsigned char *)msg_buffer,
1145 RIO_PW_MSG_SIZE)) {
1146 /* Process one message */
1147 spin_unlock_irqrestore(&priv->pw_fifo_lock, flags);
1148#ifdef DEBUG_PW
1149 {
1150 u32 i;
1151 pr_debug("%s : Port-Write Message:", __func__);
1152 for (i = 0; i < RIO_PW_MSG_SIZE/sizeof(u32); i++) {
1153 if ((i%4) == 0)
1154 pr_debug("\n0x%02x: 0x%08x", i*4,
1155 msg_buffer[i]);
1156 else
1157 pr_debug(" 0x%08x", msg_buffer[i]);
1158 }
1159 pr_debug("\n");
1160 }
1161#endif
1162 /* Pass the port-write message to RIO core for processing */
1163 rio_inb_pwrite_handler((union rio_pw_msg *)msg_buffer);
1164 spin_lock_irqsave(&priv->pw_fifo_lock, flags);
1165 }
1166 spin_unlock_irqrestore(&priv->pw_fifo_lock, flags);
1167}
1168
1169/**
1170 * fsl_rio_pw_enable - enable/disable port-write interface init
1171 * @mport: Master port implementing the port write unit
1172 * @enable: 1=enable; 0=disable port-write message handling
1173 */
1174static int fsl_rio_pw_enable(struct rio_mport *mport, int enable)
1175{
1176 struct rio_priv *priv = mport->priv;
1177 u32 rval;
1178
1179 rval = in_be32(&priv->msg_regs->pwmr);
1180
1181 if (enable)
1182 rval |= RIO_IPWMR_PWE;
1183 else
1184 rval &= ~RIO_IPWMR_PWE;
1185
1186 out_be32(&priv->msg_regs->pwmr, rval);
1187
1188 return 0;
1189}
1190
1191/**
1192 * fsl_rio_port_write_init - MPC85xx port write interface init
1193 * @mport: Master port implementing the port write unit
1194 *
1195 * Initializes port write unit hardware and DMA buffer
1196 * ring. Called from fsl_rio_setup(). Returns %0 on success
1197 * or %-ENOMEM on failure.
1198 */
1199static int fsl_rio_port_write_init(struct rio_mport *mport)
1200{
1201 struct rio_priv *priv = mport->priv;
1202 int rc = 0;
1203
1204 /* Following configurations require a disabled port write controller */
1205 out_be32(&priv->msg_regs->pwmr,
1206 in_be32(&priv->msg_regs->pwmr) & ~RIO_IPWMR_PWE);
1207
1208 /* Initialize port write */
1209 priv->port_write_msg.virt = dma_alloc_coherent(priv->dev,
1210 RIO_PW_MSG_SIZE,
1211 &priv->port_write_msg.phys, GFP_KERNEL);
1212 if (!priv->port_write_msg.virt) {
1213 pr_err("RIO: unable allocate port write queue\n");
1214 return -ENOMEM;
1215 }
1216
1217 priv->port_write_msg.err_count = 0;
1218 priv->port_write_msg.discard_count = 0;
1219
1220 /* Point dequeue/enqueue pointers at first entry */
1221 out_be32(&priv->msg_regs->epwqbar, 0);
1222 out_be32(&priv->msg_regs->pwqbar, (u32) priv->port_write_msg.phys);
1223
1224 pr_debug("EIPWQBAR: 0x%08x IPWQBAR: 0x%08x\n",
1225 in_be32(&priv->msg_regs->epwqbar),
1226 in_be32(&priv->msg_regs->pwqbar));
1227
1228 /* Clear interrupt status IPWSR */
1229 out_be32(&priv->msg_regs->pwsr,
1230 (RIO_IPWSR_TE | RIO_IPWSR_QFI | RIO_IPWSR_PWD));
1231
1232 /* Configure port write contoller for snooping enable all reporting,
1233 clear queue full */
1234 out_be32(&priv->msg_regs->pwmr,
1235 RIO_IPWMR_SEN | RIO_IPWMR_QFIE | RIO_IPWMR_EIE | RIO_IPWMR_CQ);
1236
1237
1238 /* Hook up port-write handler */
1239 rc = request_irq(IRQ_RIO_PW(mport), fsl_rio_port_write_handler, 0,
1240 "port-write", (void *)mport);
1241 if (rc < 0) {
1242 pr_err("MPC85xx RIO: unable to request inbound doorbell irq");
1243 goto err_out;
1244 }
1245
1246 INIT_WORK(&priv->pw_work, fsl_pw_dpc);
1247 spin_lock_init(&priv->pw_fifo_lock);
1248 if (kfifo_alloc(&priv->pw_fifo, RIO_PW_MSG_SIZE * 32, GFP_KERNEL)) {
1249 pr_err("FIFO allocation failed\n");
1250 rc = -ENOMEM;
1251 goto err_out_irq;
1252 }
1253
1254 pr_debug("IPWMR: 0x%08x IPWSR: 0x%08x\n",
1255 in_be32(&priv->msg_regs->pwmr),
1256 in_be32(&priv->msg_regs->pwsr));
1257
1258 return rc;
1259
1260err_out_irq:
1261 free_irq(IRQ_RIO_PW(mport), (void *)mport);
1262err_out:
1263 dma_free_coherent(priv->dev, RIO_PW_MSG_SIZE,
1264 priv->port_write_msg.virt,
1265 priv->port_write_msg.phys);
1266 return rc;
1267}
1268
933static char *cmdline = NULL; 1269static char *cmdline = NULL;
934 1270
935static int fsl_rio_get_hdid(int index) 1271static int fsl_rio_get_hdid(int index)
@@ -1057,7 +1393,7 @@ int fsl_rio_setup(struct of_device *dev)
1057 dev_info(&dev->dev, "LAW start 0x%016llx, size 0x%016llx.\n", 1393 dev_info(&dev->dev, "LAW start 0x%016llx, size 0x%016llx.\n",
1058 law_start, law_size); 1394 law_start, law_size);
1059 1395
1060 ops = kmalloc(sizeof(struct rio_ops), GFP_KERNEL); 1396 ops = kzalloc(sizeof(struct rio_ops), GFP_KERNEL);
1061 if (!ops) { 1397 if (!ops) {
1062 rc = -ENOMEM; 1398 rc = -ENOMEM;
1063 goto err_ops; 1399 goto err_ops;
@@ -1067,6 +1403,7 @@ int fsl_rio_setup(struct of_device *dev)
1067 ops->cread = fsl_rio_config_read; 1403 ops->cread = fsl_rio_config_read;
1068 ops->cwrite = fsl_rio_config_write; 1404 ops->cwrite = fsl_rio_config_write;
1069 ops->dsend = fsl_rio_doorbell_send; 1405 ops->dsend = fsl_rio_doorbell_send;
1406 ops->pwenable = fsl_rio_pw_enable;
1070 1407
1071 port = kzalloc(sizeof(struct rio_mport), GFP_KERNEL); 1408 port = kzalloc(sizeof(struct rio_mport), GFP_KERNEL);
1072 if (!port) { 1409 if (!port) {
@@ -1089,11 +1426,12 @@ int fsl_rio_setup(struct of_device *dev)
1089 port->iores.flags = IORESOURCE_MEM; 1426 port->iores.flags = IORESOURCE_MEM;
1090 port->iores.name = "rio_io_win"; 1427 port->iores.name = "rio_io_win";
1091 1428
1429 priv->pwirq = irq_of_parse_and_map(dev->node, 0);
1092 priv->bellirq = irq_of_parse_and_map(dev->dev.of_node, 2); 1430 priv->bellirq = irq_of_parse_and_map(dev->dev.of_node, 2);
1093 priv->txirq = irq_of_parse_and_map(dev->dev.of_node, 3); 1431 priv->txirq = irq_of_parse_and_map(dev->dev.of_node, 3);
1094 priv->rxirq = irq_of_parse_and_map(dev->dev.of_node, 4); 1432 priv->rxirq = irq_of_parse_and_map(dev->dev.of_node, 4);
1095 dev_info(&dev->dev, "bellirq: %d, txirq: %d, rxirq %d\n", priv->bellirq, 1433 dev_info(&dev->dev, "pwirq: %d, bellirq: %d, txirq: %d, rxirq %d\n",
1096 priv->txirq, priv->rxirq); 1434 priv->pwirq, priv->bellirq, priv->txirq, priv->rxirq);
1097 1435
1098 rio_init_dbell_res(&port->riores[RIO_DOORBELL_RESOURCE], 0, 0xffff); 1436 rio_init_dbell_res(&port->riores[RIO_DOORBELL_RESOURCE], 0, 0xffff);
1099 rio_init_mbox_res(&port->riores[RIO_INB_MBOX_RESOURCE], 0, 0); 1437 rio_init_mbox_res(&port->riores[RIO_INB_MBOX_RESOURCE], 0, 0);
@@ -1109,6 +1447,7 @@ int fsl_rio_setup(struct of_device *dev)
1109 rio_register_mport(port); 1447 rio_register_mport(port);
1110 1448
1111 priv->regs_win = ioremap(regs.start, regs.end - regs.start + 1); 1449 priv->regs_win = ioremap(regs.start, regs.end - regs.start + 1);
1450 rio_regs_win = priv->regs_win;
1112 1451
1113 /* Probe the master port phy type */ 1452 /* Probe the master port phy type */
1114 ccsr = in_be32(priv->regs_win + RIO_CCSR); 1453 ccsr = in_be32(priv->regs_win + RIO_CCSR);
@@ -1166,7 +1505,8 @@ int fsl_rio_setup(struct of_device *dev)
1166 1505
1167 /* Configure maintenance transaction window */ 1506 /* Configure maintenance transaction window */
1168 out_be32(&priv->maint_atmu_regs->rowbar, law_start >> 12); 1507 out_be32(&priv->maint_atmu_regs->rowbar, law_start >> 12);
1169 out_be32(&priv->maint_atmu_regs->rowar, 0x80077015); /* 4M */ 1508 out_be32(&priv->maint_atmu_regs->rowar,
1509 0x80077000 | (ilog2(RIO_MAINT_WIN_SIZE) - 1));
1170 1510
1171 priv->maint_win = ioremap(law_start, RIO_MAINT_WIN_SIZE); 1511 priv->maint_win = ioremap(law_start, RIO_MAINT_WIN_SIZE);
1172 1512
@@ -1175,6 +1515,12 @@ int fsl_rio_setup(struct of_device *dev)
1175 (law_start + RIO_MAINT_WIN_SIZE) >> 12); 1515 (law_start + RIO_MAINT_WIN_SIZE) >> 12);
1176 out_be32(&priv->dbell_atmu_regs->rowar, 0x8004200b); /* 4k */ 1516 out_be32(&priv->dbell_atmu_regs->rowar, 0x8004200b); /* 4k */
1177 fsl_rio_doorbell_init(port); 1517 fsl_rio_doorbell_init(port);
1518 fsl_rio_port_write_init(port);
1519
1520 saved_mcheck_exception = ppc_md.machine_check_exception;
1521 ppc_md.machine_check_exception = fsl_rio_mcheck_exception;
1522 /* Ensure that RFXE is set */
1523 mtspr(SPRN_HID1, (mfspr(SPRN_HID1) | 0x20000));
1178 1524
1179 return 0; 1525 return 0;
1180err: 1526err:
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index 79d0ca086820..bee1c0f794cf 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -102,6 +102,7 @@ config S390
102 select HAVE_KERNEL_GZIP 102 select HAVE_KERNEL_GZIP
103 select HAVE_KERNEL_BZIP2 103 select HAVE_KERNEL_BZIP2
104 select HAVE_KERNEL_LZMA 104 select HAVE_KERNEL_LZMA
105 select HAVE_KERNEL_LZO
105 select ARCH_INLINE_SPIN_TRYLOCK 106 select ARCH_INLINE_SPIN_TRYLOCK
106 select ARCH_INLINE_SPIN_TRYLOCK_BH 107 select ARCH_INLINE_SPIN_TRYLOCK_BH
107 select ARCH_INLINE_SPIN_LOCK 108 select ARCH_INLINE_SPIN_LOCK
@@ -479,13 +480,6 @@ config CMM
479 Everybody who wants to run Linux under VM should select this 480 Everybody who wants to run Linux under VM should select this
480 option. 481 option.
481 482
482config CMM_PROC
483 bool "/proc interface to cooperative memory management"
484 depends on CMM
485 help
486 Select this option to enable the /proc interface to the
487 cooperative memory management.
488
489config CMM_IUCV 483config CMM_IUCV
490 bool "IUCV special message interface to cooperative memory management" 484 bool "IUCV special message interface to cooperative memory management"
491 depends on CMM && (SMSGIUCV=y || CMM=SMSGIUCV) 485 depends on CMM && (SMSGIUCV=y || CMM=SMSGIUCV)
diff --git a/arch/s390/boot/compressed/Makefile b/arch/s390/boot/compressed/Makefile
index 6e4a67ad07e1..1c999f726a58 100644
--- a/arch/s390/boot/compressed/Makefile
+++ b/arch/s390/boot/compressed/Makefile
@@ -7,7 +7,7 @@
7BITS := $(if $(CONFIG_64BIT),64,31) 7BITS := $(if $(CONFIG_64BIT),64,31)
8 8
9targets := vmlinux.lds vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2 \ 9targets := vmlinux.lds vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2 \
10 vmlinux.bin.lzma misc.o piggy.o sizes.h head$(BITS).o 10 vmlinux.bin.lzma vmlinux.bin.lzo misc.o piggy.o sizes.h head$(BITS).o
11 11
12KBUILD_CFLAGS := -m$(BITS) -D__KERNEL__ $(LINUX_INCLUDE) -O2 12KBUILD_CFLAGS := -m$(BITS) -D__KERNEL__ $(LINUX_INCLUDE) -O2
13KBUILD_CFLAGS += $(cflags-y) 13KBUILD_CFLAGS += $(cflags-y)
@@ -47,6 +47,7 @@ vmlinux.bin.all-y := $(obj)/vmlinux.bin
47suffix-$(CONFIG_KERNEL_GZIP) := gz 47suffix-$(CONFIG_KERNEL_GZIP) := gz
48suffix-$(CONFIG_KERNEL_BZIP2) := bz2 48suffix-$(CONFIG_KERNEL_BZIP2) := bz2
49suffix-$(CONFIG_KERNEL_LZMA) := lzma 49suffix-$(CONFIG_KERNEL_LZMA) := lzma
50suffix-$(CONFIG_KERNEL_LZO) := lzo
50 51
51$(obj)/vmlinux.bin.gz: $(vmlinux.bin.all-y) 52$(obj)/vmlinux.bin.gz: $(vmlinux.bin.all-y)
52 $(call if_changed,gzip) 53 $(call if_changed,gzip)
@@ -54,6 +55,8 @@ $(obj)/vmlinux.bin.bz2: $(vmlinux.bin.all-y)
54 $(call if_changed,bzip2) 55 $(call if_changed,bzip2)
55$(obj)/vmlinux.bin.lzma: $(vmlinux.bin.all-y) 56$(obj)/vmlinux.bin.lzma: $(vmlinux.bin.all-y)
56 $(call if_changed,lzma) 57 $(call if_changed,lzma)
58$(obj)/vmlinux.bin.lzo: $(vmlinux.bin.all-y)
59 $(call if_changed,lzo)
57 60
58LDFLAGS_piggy.o := -r --format binary --oformat $(LD_BFD) -T 61LDFLAGS_piggy.o := -r --format binary --oformat $(LD_BFD) -T
59$(obj)/piggy.o: $(obj)/vmlinux.scr $(obj)/vmlinux.bin.$(suffix-y) 62$(obj)/piggy.o: $(obj)/vmlinux.scr $(obj)/vmlinux.bin.$(suffix-y)
diff --git a/arch/s390/boot/compressed/misc.c b/arch/s390/boot/compressed/misc.c
index 14e0479d3888..0851eb1e919e 100644
--- a/arch/s390/boot/compressed/misc.c
+++ b/arch/s390/boot/compressed/misc.c
@@ -50,6 +50,10 @@ static unsigned long free_mem_end_ptr;
50#include "../../../../lib/decompress_unlzma.c" 50#include "../../../../lib/decompress_unlzma.c"
51#endif 51#endif
52 52
53#ifdef CONFIG_KERNEL_LZO
54#include "../../../../lib/decompress_unlzo.c"
55#endif
56
53extern _sclp_print_early(const char *); 57extern _sclp_print_early(const char *);
54 58
55int puts(const char *s) 59int puts(const char *s)
diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h
index 451bfbb9db3d..76daea117181 100644
--- a/arch/s390/include/asm/atomic.h
+++ b/arch/s390/include/asm/atomic.h
@@ -15,6 +15,7 @@
15 15
16#include <linux/compiler.h> 16#include <linux/compiler.h>
17#include <linux/types.h> 17#include <linux/types.h>
18#include <asm/system.h>
18 19
19#define ATOMIC_INIT(i) { (i) } 20#define ATOMIC_INIT(i) { (i) }
20 21
@@ -274,6 +275,7 @@ static inline void atomic64_clear_mask(unsigned long long mask, atomic64_t *v)
274static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u) 275static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
275{ 276{
276 long long c, old; 277 long long c, old;
278
277 c = atomic64_read(v); 279 c = atomic64_read(v);
278 for (;;) { 280 for (;;) {
279 if (unlikely(c == u)) 281 if (unlikely(c == u))
@@ -286,6 +288,23 @@ static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
286 return c != u; 288 return c != u;
287} 289}
288 290
291static inline long long atomic64_dec_if_positive(atomic64_t *v)
292{
293 long long c, old, dec;
294
295 c = atomic64_read(v);
296 for (;;) {
297 dec = c - 1;
298 if (unlikely(dec < 0))
299 break;
300 old = atomic64_cmpxchg((v), c, dec);
301 if (likely(old == c))
302 break;
303 c = old;
304 }
305 return dec;
306}
307
289#define atomic64_add(_i, _v) atomic64_add_return(_i, _v) 308#define atomic64_add(_i, _v) atomic64_add_return(_i, _v)
290#define atomic64_add_negative(_i, _v) (atomic64_add_return(_i, _v) < 0) 309#define atomic64_add_negative(_i, _v) (atomic64_add_return(_i, _v) < 0)
291#define atomic64_inc(_v) atomic64_add_return(1, _v) 310#define atomic64_inc(_v) atomic64_add_return(1, _v)
diff --git a/arch/s390/include/asm/ccwdev.h b/arch/s390/include/asm/ccwdev.h
index f4bd346a52d3..1c0030f9b890 100644
--- a/arch/s390/include/asm/ccwdev.h
+++ b/arch/s390/include/asm/ccwdev.h
@@ -91,6 +91,14 @@ struct ccw_device {
91 void (*handler) (struct ccw_device *, unsigned long, struct irb *); 91 void (*handler) (struct ccw_device *, unsigned long, struct irb *);
92}; 92};
93 93
94/*
95 * Possible CIO actions triggered by the unit check handler.
96 */
97enum uc_todo {
98 UC_TODO_RETRY,
99 UC_TODO_RETRY_ON_NEW_PATH,
100 UC_TODO_STOP
101};
94 102
95/** 103/**
96 * struct ccw driver - device driver for channel attached devices 104 * struct ccw driver - device driver for channel attached devices
@@ -107,6 +115,7 @@ struct ccw_device {
107 * @freeze: callback for freezing during hibernation snapshotting 115 * @freeze: callback for freezing during hibernation snapshotting
108 * @thaw: undo work done in @freeze 116 * @thaw: undo work done in @freeze
109 * @restore: callback for restoring after hibernation 117 * @restore: callback for restoring after hibernation
118 * @uc_handler: callback for unit check handler
110 * @driver: embedded device driver structure 119 * @driver: embedded device driver structure
111 * @name: device driver name 120 * @name: device driver name
112 */ 121 */
@@ -124,6 +133,7 @@ struct ccw_driver {
124 int (*freeze)(struct ccw_device *); 133 int (*freeze)(struct ccw_device *);
125 int (*thaw) (struct ccw_device *); 134 int (*thaw) (struct ccw_device *);
126 int (*restore)(struct ccw_device *); 135 int (*restore)(struct ccw_device *);
136 enum uc_todo (*uc_handler) (struct ccw_device *, struct irb *);
127 struct device_driver driver; 137 struct device_driver driver;
128 char *name; 138 char *name;
129}; 139};
diff --git a/arch/s390/include/asm/scatterlist.h b/arch/s390/include/asm/scatterlist.h
index 35d786fe93ae..be44d94cba54 100644
--- a/arch/s390/include/asm/scatterlist.h
+++ b/arch/s390/include/asm/scatterlist.h
@@ -1 +1,3 @@
1#define ISA_DMA_THRESHOLD (~0UL)
2
1#include <asm-generic/scatterlist.h> 3#include <asm-generic/scatterlist.h>
diff --git a/arch/s390/include/asm/sfp-util.h b/arch/s390/include/asm/sfp-util.h
index 7d43fee17e32..0addc6466d95 100644
--- a/arch/s390/include/asm/sfp-util.h
+++ b/arch/s390/include/asm/sfp-util.h
@@ -73,3 +73,5 @@ extern unsigned long __udiv_qrnnd (unsigned int *, unsigned int,
73#define UDIV_NEEDS_NORMALIZATION 0 73#define UDIV_NEEDS_NORMALIZATION 0
74 74
75#define abort() return 0 75#define abort() return 0
76
77#define __BYTE_ORDER __BIG_ENDIAN
diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c
index d9b490a2716e..5232278d79ad 100644
--- a/arch/s390/kernel/asm-offsets.c
+++ b/arch/s390/kernel/asm-offsets.c
@@ -132,8 +132,6 @@ int main(void)
132 DEFINE(__LC_MCCK_CLOCK, offsetof(struct _lowcore, mcck_clock)); 132 DEFINE(__LC_MCCK_CLOCK, offsetof(struct _lowcore, mcck_clock));
133 DEFINE(__LC_MACHINE_FLAGS, offsetof(struct _lowcore, machine_flags)); 133 DEFINE(__LC_MACHINE_FLAGS, offsetof(struct _lowcore, machine_flags));
134 DEFINE(__LC_FTRACE_FUNC, offsetof(struct _lowcore, ftrace_func)); 134 DEFINE(__LC_FTRACE_FUNC, offsetof(struct _lowcore, ftrace_func));
135 DEFINE(__LC_SIE_HOOK, offsetof(struct _lowcore, sie_hook));
136 DEFINE(__LC_CMF_HPP, offsetof(struct _lowcore, cmf_hpp));
137 DEFINE(__LC_IRB, offsetof(struct _lowcore, irb)); 135 DEFINE(__LC_IRB, offsetof(struct _lowcore, irb));
138 DEFINE(__LC_CPU_TIMER_SAVE_AREA, offsetof(struct _lowcore, cpu_timer_save_area)); 136 DEFINE(__LC_CPU_TIMER_SAVE_AREA, offsetof(struct _lowcore, cpu_timer_save_area));
139 DEFINE(__LC_CLOCK_COMP_SAVE_AREA, offsetof(struct _lowcore, clock_comp_save_area)); 137 DEFINE(__LC_CLOCK_COMP_SAVE_AREA, offsetof(struct _lowcore, clock_comp_save_area));
@@ -154,6 +152,8 @@ int main(void)
154 DEFINE(__LC_FP_CREG_SAVE_AREA, offsetof(struct _lowcore, fpt_creg_save_area)); 152 DEFINE(__LC_FP_CREG_SAVE_AREA, offsetof(struct _lowcore, fpt_creg_save_area));
155 DEFINE(__LC_LAST_BREAK, offsetof(struct _lowcore, breaking_event_addr)); 153 DEFINE(__LC_LAST_BREAK, offsetof(struct _lowcore, breaking_event_addr));
156 DEFINE(__LC_VDSO_PER_CPU, offsetof(struct _lowcore, vdso_per_cpu_data)); 154 DEFINE(__LC_VDSO_PER_CPU, offsetof(struct _lowcore, vdso_per_cpu_data));
155 DEFINE(__LC_SIE_HOOK, offsetof(struct _lowcore, sie_hook));
156 DEFINE(__LC_CMF_HPP, offsetof(struct _lowcore, cmf_hpp));
157#endif /* CONFIG_32BIT */ 157#endif /* CONFIG_32BIT */
158 return 0; 158 return 0;
159} 159}
diff --git a/arch/s390/kernel/entry64.S b/arch/s390/kernel/entry64.S
index 178d92536d90..e7192e1cb678 100644
--- a/arch/s390/kernel/entry64.S
+++ b/arch/s390/kernel/entry64.S
@@ -65,7 +65,7 @@ _TIF_SYSCALL = (_TIF_SYSCALL_TRACE>>8 | _TIF_SYSCALL_AUDIT>>8 | \
65 ltgr %r3,%r3 65 ltgr %r3,%r3
66 jz 0f 66 jz 0f
67 basr %r14,%r3 67 basr %r14,%r3
68 0: 680:
69#endif 69#endif
70 .endm 70 .endm
71 71
diff --git a/arch/s390/kernel/kprobes.c b/arch/s390/kernel/kprobes.c
index 3d34eef5a2c3..2a3d2bf6f083 100644
--- a/arch/s390/kernel/kprobes.c
+++ b/arch/s390/kernel/kprobes.c
@@ -63,6 +63,8 @@ int __kprobes is_prohibited_opcode(kprobe_opcode_t *instruction)
63 case 0x0b: /* bsm */ 63 case 0x0b: /* bsm */
64 case 0x83: /* diag */ 64 case 0x83: /* diag */
65 case 0x44: /* ex */ 65 case 0x44: /* ex */
66 case 0xac: /* stnsm */
67 case 0xad: /* stosm */
66 return -EINVAL; 68 return -EINVAL;
67 } 69 }
68 switch (*(__u16 *) instruction) { 70 switch (*(__u16 *) instruction) {
@@ -72,6 +74,7 @@ int __kprobes is_prohibited_opcode(kprobe_opcode_t *instruction)
72 case 0xb258: /* bsg */ 74 case 0xb258: /* bsg */
73 case 0xb218: /* pc */ 75 case 0xb218: /* pc */
74 case 0xb228: /* pt */ 76 case 0xb228: /* pt */
77 case 0xb98d: /* epsw */
75 return -EINVAL; 78 return -EINVAL;
76 } 79 }
77 return 0; 80 return 0;
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index 7d893248d265..c8e8e1354e1d 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -401,7 +401,6 @@ setup_lowcore(void)
401 lc->io_new_psw.mask = psw_kernel_bits; 401 lc->io_new_psw.mask = psw_kernel_bits;
402 lc->io_new_psw.addr = PSW_ADDR_AMODE | (unsigned long) io_int_handler; 402 lc->io_new_psw.addr = PSW_ADDR_AMODE | (unsigned long) io_int_handler;
403 lc->clock_comparator = -1ULL; 403 lc->clock_comparator = -1ULL;
404 lc->cmf_hpp = -1ULL;
405 lc->kernel_stack = ((unsigned long) &init_thread_union) + THREAD_SIZE; 404 lc->kernel_stack = ((unsigned long) &init_thread_union) + THREAD_SIZE;
406 lc->async_stack = (unsigned long) 405 lc->async_stack = (unsigned long)
407 __alloc_bootmem(ASYNC_SIZE, ASYNC_SIZE, 0) + ASYNC_SIZE; 406 __alloc_bootmem(ASYNC_SIZE, ASYNC_SIZE, 0) + ASYNC_SIZE;
@@ -418,6 +417,7 @@ setup_lowcore(void)
418 __ctl_set_bit(14, 29); 417 __ctl_set_bit(14, 29);
419 } 418 }
420#else 419#else
420 lc->cmf_hpp = -1ULL;
421 lc->vdso_per_cpu_data = (unsigned long) &lc->paste[0]; 421 lc->vdso_per_cpu_data = (unsigned long) &lc->paste[0];
422#endif 422#endif
423 lc->sync_enter_timer = S390_lowcore.sync_enter_timer; 423 lc->sync_enter_timer = S390_lowcore.sync_enter_timer;
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index e4d98de83dd8..541053ed234e 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -944,21 +944,21 @@ static int __cpuinit smp_cpu_notify(struct notifier_block *self,
944 struct cpu *c = &per_cpu(cpu_devices, cpu); 944 struct cpu *c = &per_cpu(cpu_devices, cpu);
945 struct sys_device *s = &c->sysdev; 945 struct sys_device *s = &c->sysdev;
946 struct s390_idle_data *idle; 946 struct s390_idle_data *idle;
947 int err = 0;
947 948
948 switch (action) { 949 switch (action) {
949 case CPU_ONLINE: 950 case CPU_ONLINE:
950 case CPU_ONLINE_FROZEN: 951 case CPU_ONLINE_FROZEN:
951 idle = &per_cpu(s390_idle, cpu); 952 idle = &per_cpu(s390_idle, cpu);
952 memset(idle, 0, sizeof(struct s390_idle_data)); 953 memset(idle, 0, sizeof(struct s390_idle_data));
953 if (sysfs_create_group(&s->kobj, &cpu_online_attr_group)) 954 err = sysfs_create_group(&s->kobj, &cpu_online_attr_group);
954 return NOTIFY_BAD;
955 break; 955 break;
956 case CPU_DEAD: 956 case CPU_DEAD:
957 case CPU_DEAD_FROZEN: 957 case CPU_DEAD_FROZEN:
958 sysfs_remove_group(&s->kobj, &cpu_online_attr_group); 958 sysfs_remove_group(&s->kobj, &cpu_online_attr_group);
959 break; 959 break;
960 } 960 }
961 return NOTIFY_OK; 961 return notifier_from_errno(err);
962} 962}
963 963
964static struct notifier_block __cpuinitdata smp_cpu_nb = { 964static struct notifier_block __cpuinitdata smp_cpu_nb = {
diff --git a/arch/s390/kvm/Kconfig b/arch/s390/kvm/Kconfig
index 2f4b687cc7fa..a7251580891c 100644
--- a/arch/s390/kvm/Kconfig
+++ b/arch/s390/kvm/Kconfig
@@ -33,17 +33,6 @@ config KVM
33 33
34 If unsure, say N. 34 If unsure, say N.
35 35
36config KVM_AWARE_CMF
37 depends on KVM
38 bool "KVM aware sampling"
39 ---help---
40 This option enhances the sampling data from the CPU Measurement
41 Facility with additional information, that allows to distinguish
42 guest(s) and host when using the kernel based virtual machine
43 functionality.
44
45 If unsure, say N.
46
47# OK, it's a little counter-intuitive to do this, but it puts it neatly under 36# OK, it's a little counter-intuitive to do this, but it puts it neatly under
48# the virtualization menu. 37# the virtualization menu.
49source drivers/vhost/Kconfig 38source drivers/vhost/Kconfig
diff --git a/arch/s390/kvm/sie64a.S b/arch/s390/kvm/sie64a.S
index 31646bd0e469..7e9d30d567b0 100644
--- a/arch/s390/kvm/sie64a.S
+++ b/arch/s390/kvm/sie64a.S
@@ -32,12 +32,10 @@ SPI_PSW = STACK_FRAME_OVERHEAD + __PT_PSW
32 32
33 33
34 .macro SPP newpp 34 .macro SPP newpp
35#ifdef CONFIG_KVM_AWARE_CMF
36 tm __LC_MACHINE_FLAGS+6,0x20 # MACHINE_FLAG_SPP 35 tm __LC_MACHINE_FLAGS+6,0x20 # MACHINE_FLAG_SPP
37 jz 0f 36 jz 0f
38 .insn s,0xb2800000,\newpp 37 .insn s,0xb2800000,\newpp
39 0: 380:
40#endif
41 .endm 39 .endm
42 40
43sie_irq_handler: 41sie_irq_handler:
diff --git a/arch/s390/mm/cmm.c b/arch/s390/mm/cmm.c
index f87b34731e1d..eb6a2ef5f82e 100644
--- a/arch/s390/mm/cmm.c
+++ b/arch/s390/mm/cmm.c
@@ -1,11 +1,9 @@
1/* 1/*
2 * arch/s390/mm/cmm.c 2 * Collaborative memory management interface.
3 * 3 *
4 * S390 version 4 * Copyright IBM Corp 2003,2010
5 * Copyright (C) 2003 IBM Deutschland Entwicklung GmbH, IBM Corporation 5 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>,
6 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
7 * 6 *
8 * Collaborative memory management interface.
9 */ 7 */
10 8
11#include <linux/errno.h> 9#include <linux/errno.h>
@@ -20,9 +18,9 @@
20#include <linux/kthread.h> 18#include <linux/kthread.h>
21#include <linux/oom.h> 19#include <linux/oom.h>
22#include <linux/suspend.h> 20#include <linux/suspend.h>
21#include <linux/uaccess.h>
23 22
24#include <asm/pgalloc.h> 23#include <asm/pgalloc.h>
25#include <asm/uaccess.h>
26#include <asm/diag.h> 24#include <asm/diag.h>
27 25
28static char *sender = "VMRMSVM"; 26static char *sender = "VMRMSVM";
@@ -53,14 +51,14 @@ static struct cmm_page_array *cmm_timed_page_list;
53static DEFINE_SPINLOCK(cmm_lock); 51static DEFINE_SPINLOCK(cmm_lock);
54 52
55static struct task_struct *cmm_thread_ptr; 53static struct task_struct *cmm_thread_ptr;
56static wait_queue_head_t cmm_thread_wait; 54static DECLARE_WAIT_QUEUE_HEAD(cmm_thread_wait);
57static struct timer_list cmm_timer; 55static DEFINE_TIMER(cmm_timer, NULL, 0, 0);
58 56
59static void cmm_timer_fn(unsigned long); 57static void cmm_timer_fn(unsigned long);
60static void cmm_set_timer(void); 58static void cmm_set_timer(void);
61 59
62static long 60static long cmm_alloc_pages(long nr, long *counter,
63cmm_alloc_pages(long nr, long *counter, struct cmm_page_array **list) 61 struct cmm_page_array **list)
64{ 62{
65 struct cmm_page_array *pa, *npa; 63 struct cmm_page_array *pa, *npa;
66 unsigned long addr; 64 unsigned long addr;
@@ -99,8 +97,7 @@ cmm_alloc_pages(long nr, long *counter, struct cmm_page_array **list)
99 return nr; 97 return nr;
100} 98}
101 99
102static long 100static long cmm_free_pages(long nr, long *counter, struct cmm_page_array **list)
103cmm_free_pages(long nr, long *counter, struct cmm_page_array **list)
104{ 101{
105 struct cmm_page_array *pa; 102 struct cmm_page_array *pa;
106 unsigned long addr; 103 unsigned long addr;
@@ -140,11 +137,10 @@ static int cmm_oom_notify(struct notifier_block *self,
140} 137}
141 138
142static struct notifier_block cmm_oom_nb = { 139static struct notifier_block cmm_oom_nb = {
143 .notifier_call = cmm_oom_notify 140 .notifier_call = cmm_oom_notify,
144}; 141};
145 142
146static int 143static int cmm_thread(void *dummy)
147cmm_thread(void *dummy)
148{ 144{
149 int rc; 145 int rc;
150 146
@@ -170,7 +166,7 @@ cmm_thread(void *dummy)
170 cmm_timed_pages_target = cmm_timed_pages; 166 cmm_timed_pages_target = cmm_timed_pages;
171 } else if (cmm_timed_pages_target < cmm_timed_pages) { 167 } else if (cmm_timed_pages_target < cmm_timed_pages) {
172 cmm_free_pages(1, &cmm_timed_pages, 168 cmm_free_pages(1, &cmm_timed_pages,
173 &cmm_timed_page_list); 169 &cmm_timed_page_list);
174 } 170 }
175 if (cmm_timed_pages > 0 && !timer_pending(&cmm_timer)) 171 if (cmm_timed_pages > 0 && !timer_pending(&cmm_timer))
176 cmm_set_timer(); 172 cmm_set_timer();
@@ -178,14 +174,12 @@ cmm_thread(void *dummy)
178 return 0; 174 return 0;
179} 175}
180 176
181static void 177static void cmm_kick_thread(void)
182cmm_kick_thread(void)
183{ 178{
184 wake_up(&cmm_thread_wait); 179 wake_up(&cmm_thread_wait);
185} 180}
186 181
187static void 182static void cmm_set_timer(void)
188cmm_set_timer(void)
189{ 183{
190 if (cmm_timed_pages_target <= 0 || cmm_timeout_seconds <= 0) { 184 if (cmm_timed_pages_target <= 0 || cmm_timeout_seconds <= 0) {
191 if (timer_pending(&cmm_timer)) 185 if (timer_pending(&cmm_timer))
@@ -202,8 +196,7 @@ cmm_set_timer(void)
202 add_timer(&cmm_timer); 196 add_timer(&cmm_timer);
203} 197}
204 198
205static void 199static void cmm_timer_fn(unsigned long ignored)
206cmm_timer_fn(unsigned long ignored)
207{ 200{
208 long nr; 201 long nr;
209 202
@@ -216,57 +209,49 @@ cmm_timer_fn(unsigned long ignored)
216 cmm_set_timer(); 209 cmm_set_timer();
217} 210}
218 211
219void 212static void cmm_set_pages(long nr)
220cmm_set_pages(long nr)
221{ 213{
222 cmm_pages_target = nr; 214 cmm_pages_target = nr;
223 cmm_kick_thread(); 215 cmm_kick_thread();
224} 216}
225 217
226long 218static long cmm_get_pages(void)
227cmm_get_pages(void)
228{ 219{
229 return cmm_pages; 220 return cmm_pages;
230} 221}
231 222
232void 223static void cmm_add_timed_pages(long nr)
233cmm_add_timed_pages(long nr)
234{ 224{
235 cmm_timed_pages_target += nr; 225 cmm_timed_pages_target += nr;
236 cmm_kick_thread(); 226 cmm_kick_thread();
237} 227}
238 228
239long 229static long cmm_get_timed_pages(void)
240cmm_get_timed_pages(void)
241{ 230{
242 return cmm_timed_pages; 231 return cmm_timed_pages;
243} 232}
244 233
245void 234static void cmm_set_timeout(long nr, long seconds)
246cmm_set_timeout(long nr, long seconds)
247{ 235{
248 cmm_timeout_pages = nr; 236 cmm_timeout_pages = nr;
249 cmm_timeout_seconds = seconds; 237 cmm_timeout_seconds = seconds;
250 cmm_set_timer(); 238 cmm_set_timer();
251} 239}
252 240
253static int 241static int cmm_skip_blanks(char *cp, char **endp)
254cmm_skip_blanks(char *cp, char **endp)
255{ 242{
256 char *str; 243 char *str;
257 244
258 for (str = cp; *str == ' ' || *str == '\t'; str++); 245 for (str = cp; *str == ' ' || *str == '\t'; str++)
246 ;
259 *endp = str; 247 *endp = str;
260 return str != cp; 248 return str != cp;
261} 249}
262 250
263#ifdef CONFIG_CMM_PROC
264
265static struct ctl_table cmm_table[]; 251static struct ctl_table cmm_table[];
266 252
267static int 253static int cmm_pages_handler(ctl_table *ctl, int write, void __user *buffer,
268cmm_pages_handler(ctl_table *ctl, int write, 254 size_t *lenp, loff_t *ppos)
269 void __user *buffer, size_t *lenp, loff_t *ppos)
270{ 255{
271 char buf[16], *p; 256 char buf[16], *p;
272 long nr; 257 long nr;
@@ -305,9 +290,8 @@ cmm_pages_handler(ctl_table *ctl, int write,
305 return 0; 290 return 0;
306} 291}
307 292
308static int 293static int cmm_timeout_handler(ctl_table *ctl, int write, void __user *buffer,
309cmm_timeout_handler(ctl_table *ctl, int write, 294 size_t *lenp, loff_t *ppos)
310 void __user *buffer, size_t *lenp, loff_t *ppos)
311{ 295{
312 char buf[64], *p; 296 char buf[64], *p;
313 long nr, seconds; 297 long nr, seconds;
@@ -370,12 +354,10 @@ static struct ctl_table cmm_dir_table[] = {
370 }, 354 },
371 { } 355 { }
372}; 356};
373#endif
374 357
375#ifdef CONFIG_CMM_IUCV 358#ifdef CONFIG_CMM_IUCV
376#define SMSG_PREFIX "CMM" 359#define SMSG_PREFIX "CMM"
377static void 360static void cmm_smsg_target(const char *from, char *msg)
378cmm_smsg_target(const char *from, char *msg)
379{ 361{
380 long nr, seconds; 362 long nr, seconds;
381 363
@@ -445,16 +427,13 @@ static struct notifier_block cmm_power_notifier = {
445 .notifier_call = cmm_power_event, 427 .notifier_call = cmm_power_event,
446}; 428};
447 429
448static int 430static int cmm_init(void)
449cmm_init (void)
450{ 431{
451 int rc = -ENOMEM; 432 int rc = -ENOMEM;
452 433
453#ifdef CONFIG_CMM_PROC
454 cmm_sysctl_header = register_sysctl_table(cmm_dir_table); 434 cmm_sysctl_header = register_sysctl_table(cmm_dir_table);
455 if (!cmm_sysctl_header) 435 if (!cmm_sysctl_header)
456 goto out_sysctl; 436 goto out_sysctl;
457#endif
458#ifdef CONFIG_CMM_IUCV 437#ifdef CONFIG_CMM_IUCV
459 rc = smsg_register_callback(SMSG_PREFIX, cmm_smsg_target); 438 rc = smsg_register_callback(SMSG_PREFIX, cmm_smsg_target);
460 if (rc < 0) 439 if (rc < 0)
@@ -466,8 +445,6 @@ cmm_init (void)
466 rc = register_pm_notifier(&cmm_power_notifier); 445 rc = register_pm_notifier(&cmm_power_notifier);
467 if (rc) 446 if (rc)
468 goto out_pm; 447 goto out_pm;
469 init_waitqueue_head(&cmm_thread_wait);
470 init_timer(&cmm_timer);
471 cmm_thread_ptr = kthread_run(cmm_thread, NULL, "cmmthread"); 448 cmm_thread_ptr = kthread_run(cmm_thread, NULL, "cmmthread");
472 rc = IS_ERR(cmm_thread_ptr) ? PTR_ERR(cmm_thread_ptr) : 0; 449 rc = IS_ERR(cmm_thread_ptr) ? PTR_ERR(cmm_thread_ptr) : 0;
473 if (rc) 450 if (rc)
@@ -483,36 +460,26 @@ out_oom_notify:
483 smsg_unregister_callback(SMSG_PREFIX, cmm_smsg_target); 460 smsg_unregister_callback(SMSG_PREFIX, cmm_smsg_target);
484out_smsg: 461out_smsg:
485#endif 462#endif
486#ifdef CONFIG_CMM_PROC
487 unregister_sysctl_table(cmm_sysctl_header); 463 unregister_sysctl_table(cmm_sysctl_header);
488out_sysctl: 464out_sysctl:
489#endif 465 del_timer_sync(&cmm_timer);
490 return rc; 466 return rc;
491} 467}
468module_init(cmm_init);
492 469
493static void 470static void cmm_exit(void)
494cmm_exit(void)
495{ 471{
496 kthread_stop(cmm_thread_ptr);
497 unregister_pm_notifier(&cmm_power_notifier);
498 unregister_oom_notifier(&cmm_oom_nb);
499 cmm_free_pages(cmm_pages, &cmm_pages, &cmm_page_list);
500 cmm_free_pages(cmm_timed_pages, &cmm_timed_pages, &cmm_timed_page_list);
501#ifdef CONFIG_CMM_PROC
502 unregister_sysctl_table(cmm_sysctl_header); 472 unregister_sysctl_table(cmm_sysctl_header);
503#endif
504#ifdef CONFIG_CMM_IUCV 473#ifdef CONFIG_CMM_IUCV
505 smsg_unregister_callback(SMSG_PREFIX, cmm_smsg_target); 474 smsg_unregister_callback(SMSG_PREFIX, cmm_smsg_target);
506#endif 475#endif
476 unregister_pm_notifier(&cmm_power_notifier);
477 unregister_oom_notifier(&cmm_oom_nb);
478 kthread_stop(cmm_thread_ptr);
479 del_timer_sync(&cmm_timer);
480 cmm_free_pages(cmm_pages, &cmm_pages, &cmm_page_list);
481 cmm_free_pages(cmm_timed_pages, &cmm_timed_pages, &cmm_timed_page_list);
507} 482}
508
509module_init(cmm_init);
510module_exit(cmm_exit); 483module_exit(cmm_exit);
511 484
512EXPORT_SYMBOL(cmm_set_pages);
513EXPORT_SYMBOL(cmm_get_pages);
514EXPORT_SYMBOL(cmm_add_timed_pages);
515EXPORT_SYMBOL(cmm_get_timed_pages);
516EXPORT_SYMBOL(cmm_set_timeout);
517
518MODULE_LICENSE("GPL"); 485MODULE_LICENSE("GPL");
diff --git a/arch/score/include/asm/scatterlist.h b/arch/score/include/asm/scatterlist.h
index 9f533b8362c7..4fa1a6658215 100644
--- a/arch/score/include/asm/scatterlist.h
+++ b/arch/score/include/asm/scatterlist.h
@@ -1,6 +1,8 @@
1#ifndef _ASM_SCORE_SCATTERLIST_H 1#ifndef _ASM_SCORE_SCATTERLIST_H
2#define _ASM_SCORE_SCATTERLIST_H 2#define _ASM_SCORE_SCATTERLIST_H
3 3
4#define ISA_DMA_THRESHOLD (~0UL)
5
4#include <asm-generic/scatterlist.h> 6#include <asm-generic/scatterlist.h>
5 7
6#endif /* _ASM_SCORE_SCATTERLIST_H */ 8#endif /* _ASM_SCORE_SCATTERLIST_H */
diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig
index 0e318c905eea..c5ee4ce60b57 100644
--- a/arch/sh/Kconfig
+++ b/arch/sh/Kconfig
@@ -186,6 +186,9 @@ config DMA_NONCOHERENT
186config NEED_DMA_MAP_STATE 186config NEED_DMA_MAP_STATE
187 def_bool DMA_NONCOHERENT 187 def_bool DMA_NONCOHERENT
188 188
189config NEED_SG_DMA_LENGTH
190 def_bool y
191
189source "init/Kconfig" 192source "init/Kconfig"
190 193
191source "kernel/Kconfig.freezer" 194source "kernel/Kconfig.freezer"
diff --git a/arch/sh/kernel/ptrace_32.c b/arch/sh/kernel/ptrace_32.c
index d4104ce9fe53..6c4bbba2a675 100644
--- a/arch/sh/kernel/ptrace_32.c
+++ b/arch/sh/kernel/ptrace_32.c
@@ -436,29 +436,6 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
436 0, sizeof(struct pt_dspregs), 436 0, sizeof(struct pt_dspregs),
437 (const void __user *)data); 437 (const void __user *)data);
438#endif 438#endif
439#ifdef CONFIG_BINFMT_ELF_FDPIC
440 case PTRACE_GETFDPIC: {
441 unsigned long tmp = 0;
442
443 switch (addr) {
444 case PTRACE_GETFDPIC_EXEC:
445 tmp = child->mm->context.exec_fdpic_loadmap;
446 break;
447 case PTRACE_GETFDPIC_INTERP:
448 tmp = child->mm->context.interp_fdpic_loadmap;
449 break;
450 default:
451 break;
452 }
453
454 ret = 0;
455 if (put_user(tmp, datap)) {
456 ret = -EFAULT;
457 break;
458 }
459 break;
460 }
461#endif
462 default: 439 default:
463 ret = ptrace_request(child, request, addr, data); 440 ret = ptrace_request(child, request, addr, data);
464 break; 441 break;
diff --git a/arch/sh/math-emu/sfp-util.h b/arch/sh/math-emu/sfp-util.h
index e8526021892f..8ae1bd310ad0 100644
--- a/arch/sh/math-emu/sfp-util.h
+++ b/arch/sh/math-emu/sfp-util.h
@@ -66,3 +66,7 @@
66 } while (0) 66 } while (0)
67 67
68#define abort() return 0 68#define abort() return 0
69
70#define __BYTE_ORDER __LITTLE_ENDIAN
71
72
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index d6781ce687e2..6f1470baa314 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -133,6 +133,9 @@ config ZONE_DMA
133config NEED_DMA_MAP_STATE 133config NEED_DMA_MAP_STATE
134 def_bool y 134 def_bool y
135 135
136config NEED_SG_DMA_LENGTH
137 def_bool y
138
136config GENERIC_ISA_DMA 139config GENERIC_ISA_DMA
137 bool 140 bool
138 default y if SPARC32 141 default y if SPARC32
diff --git a/arch/sparc/include/asm/scatterlist.h b/arch/sparc/include/asm/scatterlist.h
index d1120257b033..433e45f05fd4 100644
--- a/arch/sparc/include/asm/scatterlist.h
+++ b/arch/sparc/include/asm/scatterlist.h
@@ -1,8 +1,9 @@
1#ifndef _SPARC_SCATTERLIST_H 1#ifndef _SPARC_SCATTERLIST_H
2#define _SPARC_SCATTERLIST_H 2#define _SPARC_SCATTERLIST_H
3 3
4#define sg_dma_len(sg) ((sg)->dma_length)
5
6#include <asm-generic/scatterlist.h> 4#include <asm-generic/scatterlist.h>
7 5
6#define ISA_DMA_THRESHOLD (~0UL)
7#define ARCH_HAS_SG_CHAIN
8
8#endif /* !(_SPARC_SCATTERLIST_H) */ 9#endif /* !(_SPARC_SCATTERLIST_H) */
diff --git a/arch/sparc/kernel/perf_event.c b/arch/sparc/kernel/perf_event.c
index 34ce49f80eac..0ec92c8861dd 100644
--- a/arch/sparc/kernel/perf_event.c
+++ b/arch/sparc/kernel/perf_event.c
@@ -92,6 +92,8 @@ struct cpu_hw_events {
92 92
93 /* Enabled/disable state. */ 93 /* Enabled/disable state. */
94 int enabled; 94 int enabled;
95
96 unsigned int group_flag;
95}; 97};
96DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = { .enabled = 1, }; 98DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = { .enabled = 1, };
97 99
@@ -981,53 +983,6 @@ static int collect_events(struct perf_event *group, int max_count,
981 return n; 983 return n;
982} 984}
983 985
984static void event_sched_in(struct perf_event *event)
985{
986 event->state = PERF_EVENT_STATE_ACTIVE;
987 event->oncpu = smp_processor_id();
988 event->tstamp_running += event->ctx->time - event->tstamp_stopped;
989 if (is_software_event(event))
990 event->pmu->enable(event);
991}
992
993int hw_perf_group_sched_in(struct perf_event *group_leader,
994 struct perf_cpu_context *cpuctx,
995 struct perf_event_context *ctx)
996{
997 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
998 struct perf_event *sub;
999 int n0, n;
1000
1001 if (!sparc_pmu)
1002 return 0;
1003
1004 n0 = cpuc->n_events;
1005 n = collect_events(group_leader, perf_max_events - n0,
1006 &cpuc->event[n0], &cpuc->events[n0],
1007 &cpuc->current_idx[n0]);
1008 if (n < 0)
1009 return -EAGAIN;
1010 if (check_excludes(cpuc->event, n0, n))
1011 return -EINVAL;
1012 if (sparc_check_constraints(cpuc->event, cpuc->events, n + n0))
1013 return -EAGAIN;
1014 cpuc->n_events = n0 + n;
1015 cpuc->n_added += n;
1016
1017 cpuctx->active_oncpu += n;
1018 n = 1;
1019 event_sched_in(group_leader);
1020 list_for_each_entry(sub, &group_leader->sibling_list, group_entry) {
1021 if (sub->state != PERF_EVENT_STATE_OFF) {
1022 event_sched_in(sub);
1023 n++;
1024 }
1025 }
1026 ctx->nr_active += n;
1027
1028 return 1;
1029}
1030
1031static int sparc_pmu_enable(struct perf_event *event) 986static int sparc_pmu_enable(struct perf_event *event)
1032{ 987{
1033 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 988 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
@@ -1045,11 +1000,20 @@ static int sparc_pmu_enable(struct perf_event *event)
1045 cpuc->events[n0] = event->hw.event_base; 1000 cpuc->events[n0] = event->hw.event_base;
1046 cpuc->current_idx[n0] = PIC_NO_INDEX; 1001 cpuc->current_idx[n0] = PIC_NO_INDEX;
1047 1002
1003 /*
1004 * If group events scheduling transaction was started,
1005 * skip the schedulability test here, it will be peformed
1006 * at commit time(->commit_txn) as a whole
1007 */
1008 if (cpuc->group_flag & PERF_EVENT_TXN_STARTED)
1009 goto nocheck;
1010
1048 if (check_excludes(cpuc->event, n0, 1)) 1011 if (check_excludes(cpuc->event, n0, 1))
1049 goto out; 1012 goto out;
1050 if (sparc_check_constraints(cpuc->event, cpuc->events, n0 + 1)) 1013 if (sparc_check_constraints(cpuc->event, cpuc->events, n0 + 1))
1051 goto out; 1014 goto out;
1052 1015
1016nocheck:
1053 cpuc->n_events++; 1017 cpuc->n_events++;
1054 cpuc->n_added++; 1018 cpuc->n_added++;
1055 1019
@@ -1129,11 +1093,61 @@ static int __hw_perf_event_init(struct perf_event *event)
1129 return 0; 1093 return 0;
1130} 1094}
1131 1095
1096/*
1097 * Start group events scheduling transaction
1098 * Set the flag to make pmu::enable() not perform the
1099 * schedulability test, it will be performed at commit time
1100 */
1101static void sparc_pmu_start_txn(const struct pmu *pmu)
1102{
1103 struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
1104
1105 cpuhw->group_flag |= PERF_EVENT_TXN_STARTED;
1106}
1107
1108/*
1109 * Stop group events scheduling transaction
1110 * Clear the flag and pmu::enable() will perform the
1111 * schedulability test.
1112 */
1113static void sparc_pmu_cancel_txn(const struct pmu *pmu)
1114{
1115 struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
1116
1117 cpuhw->group_flag &= ~PERF_EVENT_TXN_STARTED;
1118}
1119
1120/*
1121 * Commit group events scheduling transaction
1122 * Perform the group schedulability test as a whole
1123 * Return 0 if success
1124 */
1125static int sparc_pmu_commit_txn(const struct pmu *pmu)
1126{
1127 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1128 int n;
1129
1130 if (!sparc_pmu)
1131 return -EINVAL;
1132
1133 cpuc = &__get_cpu_var(cpu_hw_events);
1134 n = cpuc->n_events;
1135 if (check_excludes(cpuc->event, 0, n))
1136 return -EINVAL;
1137 if (sparc_check_constraints(cpuc->event, cpuc->events, n))
1138 return -EAGAIN;
1139
1140 return 0;
1141}
1142
1132static const struct pmu pmu = { 1143static const struct pmu pmu = {
1133 .enable = sparc_pmu_enable, 1144 .enable = sparc_pmu_enable,
1134 .disable = sparc_pmu_disable, 1145 .disable = sparc_pmu_disable,
1135 .read = sparc_pmu_read, 1146 .read = sparc_pmu_read,
1136 .unthrottle = sparc_pmu_unthrottle, 1147 .unthrottle = sparc_pmu_unthrottle,
1148 .start_txn = sparc_pmu_start_txn,
1149 .cancel_txn = sparc_pmu_cancel_txn,
1150 .commit_txn = sparc_pmu_commit_txn,
1137}; 1151};
1138 1152
1139const struct pmu *hw_perf_event_init(struct perf_event *event) 1153const struct pmu *hw_perf_event_init(struct perf_event *event)
diff --git a/arch/sparc/math-emu/sfp-util_32.h b/arch/sparc/math-emu/sfp-util_32.h
index 0ea35afbb914..d1b2aff3c259 100644
--- a/arch/sparc/math-emu/sfp-util_32.h
+++ b/arch/sparc/math-emu/sfp-util_32.h
@@ -107,3 +107,9 @@
107 107
108#define abort() \ 108#define abort() \
109 return 0 109 return 0
110
111#ifdef __BIG_ENDIAN
112#define __BYTE_ORDER __BIG_ENDIAN
113#else
114#define __BYTE_ORDER __LITTLE_ENDIAN
115#endif
diff --git a/arch/sparc/math-emu/sfp-util_64.h b/arch/sparc/math-emu/sfp-util_64.h
index d17c9bc72181..425d3cf01af4 100644
--- a/arch/sparc/math-emu/sfp-util_64.h
+++ b/arch/sparc/math-emu/sfp-util_64.h
@@ -112,3 +112,9 @@
112 112
113#define abort() \ 113#define abort() \
114 return 0 114 return 0
115
116#ifdef __BIG_ENDIAN
117#define __BYTE_ORDER __BIG_ENDIAN
118#else
119#define __BYTE_ORDER __LITTLE_ENDIAN
120#endif
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index e0c619c55b4e..dcb0593b4a66 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -109,6 +109,9 @@ config SBUS
109config NEED_DMA_MAP_STATE 109config NEED_DMA_MAP_STATE
110 def_bool (X86_64 || DMAR || DMA_API_DEBUG) 110 def_bool (X86_64 || DMAR || DMA_API_DEBUG)
111 111
112config NEED_SG_DMA_LENGTH
113 def_bool y
114
112config GENERIC_ISA_DMA 115config GENERIC_ISA_DMA
113 def_bool y 116 def_bool y
114 117
@@ -1703,6 +1706,10 @@ config HAVE_ARCH_EARLY_PFN_TO_NID
1703 def_bool X86_64 1706 def_bool X86_64
1704 depends on NUMA 1707 depends on NUMA
1705 1708
1709config USE_PERCPU_NUMA_NODE_ID
1710 def_bool X86_64
1711 depends on NUMA
1712
1706menu "Power management and ACPI options" 1713menu "Power management and ACPI options"
1707 1714
1708config ARCH_HIBERNATION_HEADER 1715config ARCH_HIBERNATION_HEADER
diff --git a/arch/x86/boot/compressed/relocs.c b/arch/x86/boot/compressed/relocs.c
index 7b1aaa20c7b5..89bbf4e4d05d 100644
--- a/arch/x86/boot/compressed/relocs.c
+++ b/arch/x86/boot/compressed/relocs.c
@@ -195,11 +195,11 @@ static const char *sym_name(const char *sym_strtab, Elf32_Sym *sym)
195 195
196 196
197 197
198#if __BYTE_ORDER == __LITTLE_ENDIAN 198#if BYTE_ORDER == LITTLE_ENDIAN
199#define le16_to_cpu(val) (val) 199#define le16_to_cpu(val) (val)
200#define le32_to_cpu(val) (val) 200#define le32_to_cpu(val) (val)
201#endif 201#endif
202#if __BYTE_ORDER == __BIG_ENDIAN 202#if BYTE_ORDER == BIG_ENDIAN
203#define le16_to_cpu(val) bswap_16(val) 203#define le16_to_cpu(val) bswap_16(val)
204#define le32_to_cpu(val) bswap_32(val) 204#define le32_to_cpu(val) bswap_32(val)
205#endif 205#endif
diff --git a/arch/x86/include/asm/acpi.h b/arch/x86/include/asm/acpi.h
index 56f462cf22d2..aa2c39d968fc 100644
--- a/arch/x86/include/asm/acpi.h
+++ b/arch/x86/include/asm/acpi.h
@@ -85,7 +85,6 @@ extern int acpi_ioapic;
85extern int acpi_noirq; 85extern int acpi_noirq;
86extern int acpi_strict; 86extern int acpi_strict;
87extern int acpi_disabled; 87extern int acpi_disabled;
88extern int acpi_ht;
89extern int acpi_pci_disabled; 88extern int acpi_pci_disabled;
90extern int acpi_skip_timer_override; 89extern int acpi_skip_timer_override;
91extern int acpi_use_timer_override; 90extern int acpi_use_timer_override;
@@ -97,7 +96,6 @@ void acpi_pic_sci_set_trigger(unsigned int, u16);
97static inline void disable_acpi(void) 96static inline void disable_acpi(void)
98{ 97{
99 acpi_disabled = 1; 98 acpi_disabled = 1;
100 acpi_ht = 0;
101 acpi_pci_disabled = 1; 99 acpi_pci_disabled = 1;
102 acpi_noirq = 1; 100 acpi_noirq = 1;
103} 101}
diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
index dca9c545f44e..468145914389 100644
--- a/arch/x86/include/asm/cpufeature.h
+++ b/arch/x86/include/asm/cpufeature.h
@@ -332,6 +332,7 @@ static __always_inline __pure bool __static_cpu_has(u8 bit)
332#endif 332#endif
333} 333}
334 334
335#if __GNUC__ >= 4
335#define static_cpu_has(bit) \ 336#define static_cpu_has(bit) \
336( \ 337( \
337 __builtin_constant_p(boot_cpu_has(bit)) ? \ 338 __builtin_constant_p(boot_cpu_has(bit)) ? \
@@ -340,6 +341,12 @@ static __always_inline __pure bool __static_cpu_has(u8 bit)
340 __static_cpu_has(bit) : \ 341 __static_cpu_has(bit) : \
341 boot_cpu_has(bit) \ 342 boot_cpu_has(bit) \
342) 343)
344#else
345/*
346 * gcc 3.x is too stupid to do the static test; fall back to dynamic.
347 */
348#define static_cpu_has(bit) boot_cpu_has(bit)
349#endif
343 350
344#endif /* defined(__KERNEL__) && !defined(__ASSEMBLY__) */ 351#endif /* defined(__KERNEL__) && !defined(__ASSEMBLY__) */
345 352
diff --git a/arch/x86/include/asm/mce.h b/arch/x86/include/asm/mce.h
index 6c3fdd631ed3..f32a4301c4d4 100644
--- a/arch/x86/include/asm/mce.h
+++ b/arch/x86/include/asm/mce.h
@@ -225,5 +225,13 @@ extern void mcheck_intel_therm_init(void);
225static inline void mcheck_intel_therm_init(void) { } 225static inline void mcheck_intel_therm_init(void) { }
226#endif 226#endif
227 227
228/*
229 * Used by APEI to report memory error via /dev/mcelog
230 */
231
232struct cper_sec_mem_err;
233extern void apei_mce_report_mem_error(int corrected,
234 struct cper_sec_mem_err *mem_err);
235
228#endif /* __KERNEL__ */ 236#endif /* __KERNEL__ */
229#endif /* _ASM_X86_MCE_H */ 237#endif /* _ASM_X86_MCE_H */
diff --git a/arch/x86/include/asm/perf_event_p4.h b/arch/x86/include/asm/perf_event_p4.h
index b05400a542ff..64a8ebff06fc 100644
--- a/arch/x86/include/asm/perf_event_p4.h
+++ b/arch/x86/include/asm/perf_event_p4.h
@@ -89,7 +89,8 @@
89 P4_CCCR_ENABLE) 89 P4_CCCR_ENABLE)
90 90
91/* HT mask */ 91/* HT mask */
92#define P4_CCCR_MASK_HT (P4_CCCR_MASK | P4_CCCR_THREAD_ANY) 92#define P4_CCCR_MASK_HT \
93 (P4_CCCR_MASK | P4_CCCR_OVF_PMI_T1 | P4_CCCR_THREAD_ANY)
93 94
94#define P4_GEN_ESCR_EMASK(class, name, bit) \ 95#define P4_GEN_ESCR_EMASK(class, name, bit) \
95 class##__##name = ((1 << bit) << P4_ESCR_EVENTMASK_SHIFT) 96 class##__##name = ((1 << bit) << P4_ESCR_EVENTMASK_SHIFT)
diff --git a/arch/x86/include/asm/rdc321x_defs.h b/arch/x86/include/asm/rdc321x_defs.h
deleted file mode 100644
index c8e9c8bed3d0..000000000000
--- a/arch/x86/include/asm/rdc321x_defs.h
+++ /dev/null
@@ -1,12 +0,0 @@
1#define PFX "rdc321x: "
2
3/* General purpose configuration and data registers */
4#define RDC3210_CFGREG_ADDR 0x0CF8
5#define RDC3210_CFGREG_DATA 0x0CFC
6
7#define RDC321X_GPIO_CTRL_REG1 0x48
8#define RDC321X_GPIO_CTRL_REG2 0x84
9#define RDC321X_GPIO_DATA_REG1 0x4c
10#define RDC321X_GPIO_DATA_REG2 0x88
11
12#define RDC321X_MAX_GPIO 58
diff --git a/arch/x86/include/asm/scatterlist.h b/arch/x86/include/asm/scatterlist.h
index 75af592677ec..fb0b1874396f 100644
--- a/arch/x86/include/asm/scatterlist.h
+++ b/arch/x86/include/asm/scatterlist.h
@@ -1,8 +1,9 @@
1#ifndef _ASM_X86_SCATTERLIST_H 1#ifndef _ASM_X86_SCATTERLIST_H
2#define _ASM_X86_SCATTERLIST_H 2#define _ASM_X86_SCATTERLIST_H
3 3
4#define ISA_DMA_THRESHOLD (0x00ffffff)
5
6#include <asm-generic/scatterlist.h> 4#include <asm-generic/scatterlist.h>
7 5
6#define ISA_DMA_THRESHOLD (0x00ffffff)
7#define ARCH_HAS_SG_CHAIN
8
8#endif /* _ASM_X86_SCATTERLIST_H */ 9#endif /* _ASM_X86_SCATTERLIST_H */
diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
index 62ba9400cc43..f0b6e5dbc5a0 100644
--- a/arch/x86/include/asm/thread_info.h
+++ b/arch/x86/include/asm/thread_info.h
@@ -239,8 +239,8 @@ static inline struct thread_info *current_thread_info(void)
239#define TS_USEDFPU 0x0001 /* FPU was used by this task 239#define TS_USEDFPU 0x0001 /* FPU was used by this task
240 this quantum (SMP) */ 240 this quantum (SMP) */
241#define TS_COMPAT 0x0002 /* 32bit syscall active (64BIT)*/ 241#define TS_COMPAT 0x0002 /* 32bit syscall active (64BIT)*/
242#define TS_POLLING 0x0004 /* true if in idle loop 242#define TS_POLLING 0x0004 /* idle task polling need_resched,
243 and not sleeping */ 243 skip sending interrupt */
244#define TS_RESTORE_SIGMASK 0x0008 /* restore signal mask in do_signal() */ 244#define TS_RESTORE_SIGMASK 0x0008 /* restore signal mask in do_signal() */
245 245
246#define tsk_is_polling(t) (task_thread_info(t)->status & TS_POLLING) 246#define tsk_is_polling(t) (task_thread_info(t)->status & TS_POLLING)
diff --git a/arch/x86/include/asm/topology.h b/arch/x86/include/asm/topology.h
index c5087d796587..21899cc31e52 100644
--- a/arch/x86/include/asm/topology.h
+++ b/arch/x86/include/asm/topology.h
@@ -53,33 +53,29 @@
53extern int cpu_to_node_map[]; 53extern int cpu_to_node_map[];
54 54
55/* Returns the number of the node containing CPU 'cpu' */ 55/* Returns the number of the node containing CPU 'cpu' */
56static inline int cpu_to_node(int cpu) 56static inline int __cpu_to_node(int cpu)
57{ 57{
58 return cpu_to_node_map[cpu]; 58 return cpu_to_node_map[cpu];
59} 59}
60#define early_cpu_to_node(cpu) cpu_to_node(cpu) 60#define early_cpu_to_node __cpu_to_node
61#define cpu_to_node __cpu_to_node
61 62
62#else /* CONFIG_X86_64 */ 63#else /* CONFIG_X86_64 */
63 64
64/* Mappings between logical cpu number and node number */ 65/* Mappings between logical cpu number and node number */
65DECLARE_EARLY_PER_CPU(int, x86_cpu_to_node_map); 66DECLARE_EARLY_PER_CPU(int, x86_cpu_to_node_map);
66 67
67/* Returns the number of the current Node. */
68DECLARE_PER_CPU(int, node_number);
69#define numa_node_id() percpu_read(node_number)
70
71#ifdef CONFIG_DEBUG_PER_CPU_MAPS 68#ifdef CONFIG_DEBUG_PER_CPU_MAPS
72extern int cpu_to_node(int cpu); 69/*
70 * override generic percpu implementation of cpu_to_node
71 */
72extern int __cpu_to_node(int cpu);
73#define cpu_to_node __cpu_to_node
74
73extern int early_cpu_to_node(int cpu); 75extern int early_cpu_to_node(int cpu);
74 76
75#else /* !CONFIG_DEBUG_PER_CPU_MAPS */ 77#else /* !CONFIG_DEBUG_PER_CPU_MAPS */
76 78
77/* Returns the number of the node containing CPU 'cpu' */
78static inline int cpu_to_node(int cpu)
79{
80 return per_cpu(x86_cpu_to_node_map, cpu);
81}
82
83/* Same function but used if called before per_cpu areas are setup */ 79/* Same function but used if called before per_cpu areas are setup */
84static inline int early_cpu_to_node(int cpu) 80static inline int early_cpu_to_node(int cpu)
85{ 81{
@@ -170,6 +166,10 @@ static inline int numa_node_id(void)
170{ 166{
171 return 0; 167 return 0;
172} 168}
169/*
170 * indicate override:
171 */
172#define numa_node_id numa_node_id
173 173
174static inline int early_cpu_to_node(int cpu) 174static inline int early_cpu_to_node(int cpu)
175{ 175{
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
index 488be461a380..60cc4058ed5f 100644
--- a/arch/x86/kernel/acpi/boot.c
+++ b/arch/x86/kernel/acpi/boot.c
@@ -63,7 +63,6 @@ EXPORT_SYMBOL(acpi_disabled);
63int acpi_noirq; /* skip ACPI IRQ initialization */ 63int acpi_noirq; /* skip ACPI IRQ initialization */
64int acpi_pci_disabled; /* skip ACPI PCI scan and IRQ initialization */ 64int acpi_pci_disabled; /* skip ACPI PCI scan and IRQ initialization */
65EXPORT_SYMBOL(acpi_pci_disabled); 65EXPORT_SYMBOL(acpi_pci_disabled);
66int acpi_ht __initdata = 1; /* enable HT */
67 66
68int acpi_lapic; 67int acpi_lapic;
69int acpi_ioapic; 68int acpi_ioapic;
@@ -1501,9 +1500,8 @@ void __init acpi_boot_table_init(void)
1501 1500
1502 /* 1501 /*
1503 * If acpi_disabled, bail out 1502 * If acpi_disabled, bail out
1504 * One exception: acpi=ht continues far enough to enumerate LAPICs
1505 */ 1503 */
1506 if (acpi_disabled && !acpi_ht) 1504 if (acpi_disabled)
1507 return; 1505 return;
1508 1506
1509 /* 1507 /*
@@ -1534,9 +1532,8 @@ int __init early_acpi_boot_init(void)
1534{ 1532{
1535 /* 1533 /*
1536 * If acpi_disabled, bail out 1534 * If acpi_disabled, bail out
1537 * One exception: acpi=ht continues far enough to enumerate LAPICs
1538 */ 1535 */
1539 if (acpi_disabled && !acpi_ht) 1536 if (acpi_disabled)
1540 return 1; 1537 return 1;
1541 1538
1542 /* 1539 /*
@@ -1554,9 +1551,8 @@ int __init acpi_boot_init(void)
1554 1551
1555 /* 1552 /*
1556 * If acpi_disabled, bail out 1553 * If acpi_disabled, bail out
1557 * One exception: acpi=ht continues far enough to enumerate LAPICs
1558 */ 1554 */
1559 if (acpi_disabled && !acpi_ht) 1555 if (acpi_disabled)
1560 return 1; 1556 return 1;
1561 1557
1562 acpi_table_parse(ACPI_SIG_BOOT, acpi_parse_sbf); 1558 acpi_table_parse(ACPI_SIG_BOOT, acpi_parse_sbf);
@@ -1591,21 +1587,12 @@ static int __init parse_acpi(char *arg)
1591 /* acpi=force to over-ride black-list */ 1587 /* acpi=force to over-ride black-list */
1592 else if (strcmp(arg, "force") == 0) { 1588 else if (strcmp(arg, "force") == 0) {
1593 acpi_force = 1; 1589 acpi_force = 1;
1594 acpi_ht = 1;
1595 acpi_disabled = 0; 1590 acpi_disabled = 0;
1596 } 1591 }
1597 /* acpi=strict disables out-of-spec workarounds */ 1592 /* acpi=strict disables out-of-spec workarounds */
1598 else if (strcmp(arg, "strict") == 0) { 1593 else if (strcmp(arg, "strict") == 0) {
1599 acpi_strict = 1; 1594 acpi_strict = 1;
1600 } 1595 }
1601 /* Limit ACPI just to boot-time to enable HT */
1602 else if (strcmp(arg, "ht") == 0) {
1603 if (!acpi_force) {
1604 printk(KERN_WARNING "acpi=ht will be removed in Linux-2.6.35\n");
1605 disable_acpi();
1606 }
1607 acpi_ht = 1;
1608 }
1609 /* acpi=rsdt use RSDT instead of XSDT */ 1596 /* acpi=rsdt use RSDT instead of XSDT */
1610 else if (strcmp(arg, "rsdt") == 0) { 1597 else if (strcmp(arg, "rsdt") == 0) {
1611 acpi_rsdt_forced = 1; 1598 acpi_rsdt_forced = 1;
diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c
index f9961034e557..82e508677b91 100644
--- a/arch/x86/kernel/acpi/sleep.c
+++ b/arch/x86/kernel/acpi/sleep.c
@@ -162,8 +162,6 @@ static int __init acpi_sleep_setup(char *str)
162#endif 162#endif
163 if (strncmp(str, "old_ordering", 12) == 0) 163 if (strncmp(str, "old_ordering", 12) == 0)
164 acpi_old_suspend_ordering(); 164 acpi_old_suspend_ordering();
165 if (strncmp(str, "sci_force_enable", 16) == 0)
166 acpi_set_sci_en_on_resume();
167 str = strchr(str, ','); 165 str = strchr(str, ',');
168 if (str != NULL) 166 if (str != NULL)
169 str += strspn(str, ", \t"); 167 str += strspn(str, ", \t");
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
index e5a4a1e01618..c02cc692985c 100644
--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -51,6 +51,7 @@
51#include <asm/smp.h> 51#include <asm/smp.h>
52#include <asm/mce.h> 52#include <asm/mce.h>
53#include <asm/kvm_para.h> 53#include <asm/kvm_para.h>
54#include <asm/tsc.h>
54 55
55unsigned int num_processors; 56unsigned int num_processors;
56 57
@@ -1151,8 +1152,13 @@ static void __cpuinit lapic_setup_esr(void)
1151 */ 1152 */
1152void __cpuinit setup_local_APIC(void) 1153void __cpuinit setup_local_APIC(void)
1153{ 1154{
1154 unsigned int value; 1155 unsigned int value, queued;
1155 int i, j; 1156 int i, j, acked = 0;
1157 unsigned long long tsc = 0, ntsc;
1158 long long max_loops = cpu_khz;
1159
1160 if (cpu_has_tsc)
1161 rdtscll(tsc);
1156 1162
1157 if (disable_apic) { 1163 if (disable_apic) {
1158 arch_disable_smp_support(); 1164 arch_disable_smp_support();
@@ -1204,13 +1210,32 @@ void __cpuinit setup_local_APIC(void)
1204 * the interrupt. Hence a vector might get locked. It was noticed 1210 * the interrupt. Hence a vector might get locked. It was noticed
1205 * for timer irq (vector 0x31). Issue an extra EOI to clear ISR. 1211 * for timer irq (vector 0x31). Issue an extra EOI to clear ISR.
1206 */ 1212 */
1207 for (i = APIC_ISR_NR - 1; i >= 0; i--) { 1213 do {
1208 value = apic_read(APIC_ISR + i*0x10); 1214 queued = 0;
1209 for (j = 31; j >= 0; j--) { 1215 for (i = APIC_ISR_NR - 1; i >= 0; i--)
1210 if (value & (1<<j)) 1216 queued |= apic_read(APIC_IRR + i*0x10);
1211 ack_APIC_irq(); 1217
1218 for (i = APIC_ISR_NR - 1; i >= 0; i--) {
1219 value = apic_read(APIC_ISR + i*0x10);
1220 for (j = 31; j >= 0; j--) {
1221 if (value & (1<<j)) {
1222 ack_APIC_irq();
1223 acked++;
1224 }
1225 }
1212 } 1226 }
1213 } 1227 if (acked > 256) {
1228 printk(KERN_ERR "LAPIC pending interrupts after %d EOI\n",
1229 acked);
1230 break;
1231 }
1232 if (cpu_has_tsc) {
1233 rdtscll(ntsc);
1234 max_loops = (cpu_khz << 10) - (ntsc - tsc);
1235 } else
1236 max_loops--;
1237 } while (queued && max_loops > 0);
1238 WARN_ON(max_loops <= 0);
1214 1239
1215 /* 1240 /*
1216 * Now that we are all set up, enable the APIC 1241 * Now that we are all set up, enable the APIC
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index cc83a002786e..68e4a6f2211e 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -1121,9 +1121,9 @@ void __cpuinit cpu_init(void)
1121 oist = &per_cpu(orig_ist, cpu); 1121 oist = &per_cpu(orig_ist, cpu);
1122 1122
1123#ifdef CONFIG_NUMA 1123#ifdef CONFIG_NUMA
1124 if (cpu != 0 && percpu_read(node_number) == 0 && 1124 if (cpu != 0 && percpu_read(numa_node) == 0 &&
1125 cpu_to_node(cpu) != NUMA_NO_NODE) 1125 early_cpu_to_node(cpu) != NUMA_NO_NODE)
1126 percpu_write(node_number, cpu_to_node(cpu)); 1126 set_numa_node(early_cpu_to_node(cpu));
1127#endif 1127#endif
1128 1128
1129 me = current; 1129 me = current;
diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
index 6f3dc8fbbfdc..7ec2123838e6 100644
--- a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
+++ b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
@@ -1497,8 +1497,8 @@ static struct cpufreq_driver cpufreq_amd64_driver = {
1497 * simply keep the boost-disable flag in sync with the current global 1497 * simply keep the boost-disable flag in sync with the current global
1498 * state. 1498 * state.
1499 */ 1499 */
1500static int __cpuinit cpb_notify(struct notifier_block *nb, unsigned long action, 1500static int cpb_notify(struct notifier_block *nb, unsigned long action,
1501 void *hcpu) 1501 void *hcpu)
1502{ 1502{
1503 unsigned cpu = (long)hcpu; 1503 unsigned cpu = (long)hcpu;
1504 u32 lo, hi; 1504 u32 lo, hi;
@@ -1528,7 +1528,7 @@ static int __cpuinit cpb_notify(struct notifier_block *nb, unsigned long action,
1528 return NOTIFY_OK; 1528 return NOTIFY_OK;
1529} 1529}
1530 1530
1531static struct notifier_block __cpuinitdata cpb_nb = { 1531static struct notifier_block cpb_nb = {
1532 .notifier_call = cpb_notify, 1532 .notifier_call = cpb_notify,
1533}; 1533};
1534 1534
diff --git a/arch/x86/kernel/cpu/mcheck/Makefile b/arch/x86/kernel/cpu/mcheck/Makefile
index 4ac6d48fe11b..bb34b03af252 100644
--- a/arch/x86/kernel/cpu/mcheck/Makefile
+++ b/arch/x86/kernel/cpu/mcheck/Makefile
@@ -7,3 +7,5 @@ obj-$(CONFIG_X86_MCE_THRESHOLD) += threshold.o
7obj-$(CONFIG_X86_MCE_INJECT) += mce-inject.o 7obj-$(CONFIG_X86_MCE_INJECT) += mce-inject.o
8 8
9obj-$(CONFIG_X86_THERMAL_VECTOR) += therm_throt.o 9obj-$(CONFIG_X86_THERMAL_VECTOR) += therm_throt.o
10
11obj-$(CONFIG_ACPI_APEI) += mce-apei.o
diff --git a/arch/x86/kernel/cpu/mcheck/mce-apei.c b/arch/x86/kernel/cpu/mcheck/mce-apei.c
new file mode 100644
index 000000000000..745b54f9be89
--- /dev/null
+++ b/arch/x86/kernel/cpu/mcheck/mce-apei.c
@@ -0,0 +1,138 @@
1/*
2 * Bridge between MCE and APEI
3 *
4 * On some machine, corrected memory errors are reported via APEI
5 * generic hardware error source (GHES) instead of corrected Machine
6 * Check. These corrected memory errors can be reported to user space
7 * through /dev/mcelog via faking a corrected Machine Check, so that
8 * the error memory page can be offlined by /sbin/mcelog if the error
9 * count for one page is beyond the threshold.
10 *
11 * For fatal MCE, save MCE record into persistent storage via ERST, so
12 * that the MCE record can be logged after reboot via ERST.
13 *
14 * Copyright 2010 Intel Corp.
15 * Author: Huang Ying <ying.huang@intel.com>
16 *
17 * This program is free software; you can redistribute it and/or
18 * modify it under the terms of the GNU General Public License version
19 * 2 as published by the Free Software Foundation.
20 *
21 * This program is distributed in the hope that it will be useful,
22 * but WITHOUT ANY WARRANTY; without even the implied warranty of
23 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
24 * GNU General Public License for more details.
25 *
26 * You should have received a copy of the GNU General Public License
27 * along with this program; if not, write to the Free Software
28 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
29 */
30
31#include <linux/kernel.h>
32#include <linux/acpi.h>
33#include <linux/cper.h>
34#include <acpi/apei.h>
35#include <asm/mce.h>
36
37#include "mce-internal.h"
38
39void apei_mce_report_mem_error(int corrected, struct cper_sec_mem_err *mem_err)
40{
41 struct mce m;
42
43 /* Only corrected MC is reported */
44 if (!corrected)
45 return;
46
47 mce_setup(&m);
48 m.bank = 1;
49 /* Fake a memory read corrected error with unknown channel */
50 m.status = MCI_STATUS_VAL | MCI_STATUS_EN | MCI_STATUS_ADDRV | 0x9f;
51 m.addr = mem_err->physical_addr;
52 mce_log(&m);
53 mce_notify_irq();
54}
55EXPORT_SYMBOL_GPL(apei_mce_report_mem_error);
56
57#define CPER_CREATOR_MCE \
58 UUID_LE(0x75a574e3, 0x5052, 0x4b29, 0x8a, 0x8e, 0xbe, 0x2c, \
59 0x64, 0x90, 0xb8, 0x9d)
60#define CPER_SECTION_TYPE_MCE \
61 UUID_LE(0xfe08ffbe, 0x95e4, 0x4be7, 0xbc, 0x73, 0x40, 0x96, \
62 0x04, 0x4a, 0x38, 0xfc)
63
64/*
65 * CPER specification (in UEFI specification 2.3 appendix N) requires
66 * byte-packed.
67 */
68struct cper_mce_record {
69 struct cper_record_header hdr;
70 struct cper_section_descriptor sec_hdr;
71 struct mce mce;
72} __packed;
73
74int apei_write_mce(struct mce *m)
75{
76 struct cper_mce_record rcd;
77
78 memset(&rcd, 0, sizeof(rcd));
79 memcpy(rcd.hdr.signature, CPER_SIG_RECORD, CPER_SIG_SIZE);
80 rcd.hdr.revision = CPER_RECORD_REV;
81 rcd.hdr.signature_end = CPER_SIG_END;
82 rcd.hdr.section_count = 1;
83 rcd.hdr.error_severity = CPER_SER_FATAL;
84 /* timestamp, platform_id, partition_id are all invalid */
85 rcd.hdr.validation_bits = 0;
86 rcd.hdr.record_length = sizeof(rcd);
87 rcd.hdr.creator_id = CPER_CREATOR_MCE;
88 rcd.hdr.notification_type = CPER_NOTIFY_MCE;
89 rcd.hdr.record_id = cper_next_record_id();
90 rcd.hdr.flags = CPER_HW_ERROR_FLAGS_PREVERR;
91
92 rcd.sec_hdr.section_offset = (void *)&rcd.mce - (void *)&rcd;
93 rcd.sec_hdr.section_length = sizeof(rcd.mce);
94 rcd.sec_hdr.revision = CPER_SEC_REV;
95 /* fru_id and fru_text is invalid */
96 rcd.sec_hdr.validation_bits = 0;
97 rcd.sec_hdr.flags = CPER_SEC_PRIMARY;
98 rcd.sec_hdr.section_type = CPER_SECTION_TYPE_MCE;
99 rcd.sec_hdr.section_severity = CPER_SER_FATAL;
100
101 memcpy(&rcd.mce, m, sizeof(*m));
102
103 return erst_write(&rcd.hdr);
104}
105
106ssize_t apei_read_mce(struct mce *m, u64 *record_id)
107{
108 struct cper_mce_record rcd;
109 ssize_t len;
110
111 len = erst_read_next(&rcd.hdr, sizeof(rcd));
112 if (len <= 0)
113 return len;
114 /* Can not skip other records in storage via ERST unless clear them */
115 else if (len != sizeof(rcd) ||
116 uuid_le_cmp(rcd.hdr.creator_id, CPER_CREATOR_MCE)) {
117 if (printk_ratelimit())
118 pr_warning(
119 "MCE-APEI: Can not skip the unknown record in ERST");
120 return -EIO;
121 }
122
123 memcpy(m, &rcd.mce, sizeof(*m));
124 *record_id = rcd.hdr.record_id;
125
126 return sizeof(*m);
127}
128
129/* Check whether there is record in ERST */
130int apei_check_mce(void)
131{
132 return erst_get_record_count();
133}
134
135int apei_clear_mce(u64 record_id)
136{
137 return erst_clear(record_id);
138}
diff --git a/arch/x86/kernel/cpu/mcheck/mce-internal.h b/arch/x86/kernel/cpu/mcheck/mce-internal.h
index 32996f9fab67..fefcc69ee8b5 100644
--- a/arch/x86/kernel/cpu/mcheck/mce-internal.h
+++ b/arch/x86/kernel/cpu/mcheck/mce-internal.h
@@ -28,3 +28,26 @@ extern int mce_ser;
28 28
29extern struct mce_bank *mce_banks; 29extern struct mce_bank *mce_banks;
30 30
31#ifdef CONFIG_ACPI_APEI
32int apei_write_mce(struct mce *m);
33ssize_t apei_read_mce(struct mce *m, u64 *record_id);
34int apei_check_mce(void);
35int apei_clear_mce(u64 record_id);
36#else
37static inline int apei_write_mce(struct mce *m)
38{
39 return -EINVAL;
40}
41static inline ssize_t apei_read_mce(struct mce *m, u64 *record_id)
42{
43 return 0;
44}
45static inline int apei_check_mce(void)
46{
47 return 0;
48}
49static inline int apei_clear_mce(u64 record_id)
50{
51 return -EINVAL;
52}
53#endif
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
index 7a355ddcc64b..707165dbc203 100644
--- a/arch/x86/kernel/cpu/mcheck/mce.c
+++ b/arch/x86/kernel/cpu/mcheck/mce.c
@@ -264,7 +264,7 @@ static void wait_for_panic(void)
264 264
265static void mce_panic(char *msg, struct mce *final, char *exp) 265static void mce_panic(char *msg, struct mce *final, char *exp)
266{ 266{
267 int i; 267 int i, apei_err = 0;
268 268
269 if (!fake_panic) { 269 if (!fake_panic) {
270 /* 270 /*
@@ -287,8 +287,11 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
287 struct mce *m = &mcelog.entry[i]; 287 struct mce *m = &mcelog.entry[i];
288 if (!(m->status & MCI_STATUS_VAL)) 288 if (!(m->status & MCI_STATUS_VAL))
289 continue; 289 continue;
290 if (!(m->status & MCI_STATUS_UC)) 290 if (!(m->status & MCI_STATUS_UC)) {
291 print_mce(m); 291 print_mce(m);
292 if (!apei_err)
293 apei_err = apei_write_mce(m);
294 }
292 } 295 }
293 /* Now print uncorrected but with the final one last */ 296 /* Now print uncorrected but with the final one last */
294 for (i = 0; i < MCE_LOG_LEN; i++) { 297 for (i = 0; i < MCE_LOG_LEN; i++) {
@@ -297,11 +300,17 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
297 continue; 300 continue;
298 if (!(m->status & MCI_STATUS_UC)) 301 if (!(m->status & MCI_STATUS_UC))
299 continue; 302 continue;
300 if (!final || memcmp(m, final, sizeof(struct mce))) 303 if (!final || memcmp(m, final, sizeof(struct mce))) {
301 print_mce(m); 304 print_mce(m);
305 if (!apei_err)
306 apei_err = apei_write_mce(m);
307 }
302 } 308 }
303 if (final) 309 if (final) {
304 print_mce(final); 310 print_mce(final);
311 if (!apei_err)
312 apei_err = apei_write_mce(final);
313 }
305 if (cpu_missing) 314 if (cpu_missing)
306 printk(KERN_EMERG "Some CPUs didn't answer in synchronization\n"); 315 printk(KERN_EMERG "Some CPUs didn't answer in synchronization\n");
307 print_mce_tail(); 316 print_mce_tail();
@@ -1493,6 +1502,43 @@ static void collect_tscs(void *data)
1493 rdtscll(cpu_tsc[smp_processor_id()]); 1502 rdtscll(cpu_tsc[smp_processor_id()]);
1494} 1503}
1495 1504
1505static int mce_apei_read_done;
1506
1507/* Collect MCE record of previous boot in persistent storage via APEI ERST. */
1508static int __mce_read_apei(char __user **ubuf, size_t usize)
1509{
1510 int rc;
1511 u64 record_id;
1512 struct mce m;
1513
1514 if (usize < sizeof(struct mce))
1515 return -EINVAL;
1516
1517 rc = apei_read_mce(&m, &record_id);
1518 /* Error or no more MCE record */
1519 if (rc <= 0) {
1520 mce_apei_read_done = 1;
1521 return rc;
1522 }
1523 rc = -EFAULT;
1524 if (copy_to_user(*ubuf, &m, sizeof(struct mce)))
1525 return rc;
1526 /*
1527 * In fact, we should have cleared the record after that has
1528 * been flushed to the disk or sent to network in
1529 * /sbin/mcelog, but we have no interface to support that now,
1530 * so just clear it to avoid duplication.
1531 */
1532 rc = apei_clear_mce(record_id);
1533 if (rc) {
1534 mce_apei_read_done = 1;
1535 return rc;
1536 }
1537 *ubuf += sizeof(struct mce);
1538
1539 return 0;
1540}
1541
1496static ssize_t mce_read(struct file *filp, char __user *ubuf, size_t usize, 1542static ssize_t mce_read(struct file *filp, char __user *ubuf, size_t usize,
1497 loff_t *off) 1543 loff_t *off)
1498{ 1544{
@@ -1506,15 +1552,19 @@ static ssize_t mce_read(struct file *filp, char __user *ubuf, size_t usize,
1506 return -ENOMEM; 1552 return -ENOMEM;
1507 1553
1508 mutex_lock(&mce_read_mutex); 1554 mutex_lock(&mce_read_mutex);
1555
1556 if (!mce_apei_read_done) {
1557 err = __mce_read_apei(&buf, usize);
1558 if (err || buf != ubuf)
1559 goto out;
1560 }
1561
1509 next = rcu_dereference_check_mce(mcelog.next); 1562 next = rcu_dereference_check_mce(mcelog.next);
1510 1563
1511 /* Only supports full reads right now */ 1564 /* Only supports full reads right now */
1512 if (*off != 0 || usize < MCE_LOG_LEN*sizeof(struct mce)) { 1565 err = -EINVAL;
1513 mutex_unlock(&mce_read_mutex); 1566 if (*off != 0 || usize < MCE_LOG_LEN*sizeof(struct mce))
1514 kfree(cpu_tsc); 1567 goto out;
1515
1516 return -EINVAL;
1517 }
1518 1568
1519 err = 0; 1569 err = 0;
1520 prev = 0; 1570 prev = 0;
@@ -1562,10 +1612,15 @@ timeout:
1562 memset(&mcelog.entry[i], 0, sizeof(struct mce)); 1612 memset(&mcelog.entry[i], 0, sizeof(struct mce));
1563 } 1613 }
1564 } 1614 }
1615
1616 if (err)
1617 err = -EFAULT;
1618
1619out:
1565 mutex_unlock(&mce_read_mutex); 1620 mutex_unlock(&mce_read_mutex);
1566 kfree(cpu_tsc); 1621 kfree(cpu_tsc);
1567 1622
1568 return err ? -EFAULT : buf - ubuf; 1623 return err ? err : buf - ubuf;
1569} 1624}
1570 1625
1571static unsigned int mce_poll(struct file *file, poll_table *wait) 1626static unsigned int mce_poll(struct file *file, poll_table *wait)
@@ -1573,6 +1628,8 @@ static unsigned int mce_poll(struct file *file, poll_table *wait)
1573 poll_wait(file, &mce_wait, wait); 1628 poll_wait(file, &mce_wait, wait);
1574 if (rcu_dereference_check_mce(mcelog.next)) 1629 if (rcu_dereference_check_mce(mcelog.next))
1575 return POLLIN | POLLRDNORM; 1630 return POLLIN | POLLRDNORM;
1631 if (!mce_apei_read_done && apei_check_mce())
1632 return POLLIN | POLLRDNORM;
1576 return 0; 1633 return 0;
1577} 1634}
1578 1635
diff --git a/arch/x86/kernel/cpu/mcheck/therm_throt.c b/arch/x86/kernel/cpu/mcheck/therm_throt.c
index 81c499eceb21..e1a0a3bf9716 100644
--- a/arch/x86/kernel/cpu/mcheck/therm_throt.c
+++ b/arch/x86/kernel/cpu/mcheck/therm_throt.c
@@ -190,7 +190,7 @@ thermal_throttle_cpu_callback(struct notifier_block *nfb,
190 mutex_unlock(&therm_cpu_lock); 190 mutex_unlock(&therm_cpu_lock);
191 break; 191 break;
192 } 192 }
193 return err ? NOTIFY_BAD : NOTIFY_OK; 193 return notifier_from_errno(err);
194} 194}
195 195
196static struct notifier_block thermal_throttle_cpu_notifier __cpuinitdata = 196static struct notifier_block thermal_throttle_cpu_notifier __cpuinitdata =
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index fd4db0db3708..c77586061bcb 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -1717,7 +1717,11 @@ void perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip, int ski
1717 */ 1717 */
1718 regs->bp = rewind_frame_pointer(skip + 1); 1718 regs->bp = rewind_frame_pointer(skip + 1);
1719 regs->cs = __KERNEL_CS; 1719 regs->cs = __KERNEL_CS;
1720 local_save_flags(regs->flags); 1720 /*
1721 * We abuse bit 3 to pass exact information, see perf_misc_flags
1722 * and the comment with PERF_EFLAGS_EXACT.
1723 */
1724 regs->flags = 0;
1721} 1725}
1722 1726
1723unsigned long perf_instruction_pointer(struct pt_regs *regs) 1727unsigned long perf_instruction_pointer(struct pt_regs *regs)
diff --git a/arch/x86/kernel/cpu/perf_event_p4.c b/arch/x86/kernel/cpu/perf_event_p4.c
index 424fc8de68e4..ae85d69644d1 100644
--- a/arch/x86/kernel/cpu/perf_event_p4.c
+++ b/arch/x86/kernel/cpu/perf_event_p4.c
@@ -465,15 +465,21 @@ out:
465 return rc; 465 return rc;
466} 466}
467 467
468static inline void p4_pmu_clear_cccr_ovf(struct hw_perf_event *hwc) 468static inline int p4_pmu_clear_cccr_ovf(struct hw_perf_event *hwc)
469{ 469{
470 unsigned long dummy; 470 int overflow = 0;
471 u32 low, high;
471 472
472 rdmsrl(hwc->config_base + hwc->idx, dummy); 473 rdmsr(hwc->config_base + hwc->idx, low, high);
473 if (dummy & P4_CCCR_OVF) { 474
475 /* we need to check high bit for unflagged overflows */
476 if ((low & P4_CCCR_OVF) || !(high & (1 << 31))) {
477 overflow = 1;
474 (void)checking_wrmsrl(hwc->config_base + hwc->idx, 478 (void)checking_wrmsrl(hwc->config_base + hwc->idx,
475 ((u64)dummy) & ~P4_CCCR_OVF); 479 ((u64)low) & ~P4_CCCR_OVF);
476 } 480 }
481
482 return overflow;
477} 483}
478 484
479static inline void p4_pmu_disable_event(struct perf_event *event) 485static inline void p4_pmu_disable_event(struct perf_event *event)
@@ -584,21 +590,15 @@ static int p4_pmu_handle_irq(struct pt_regs *regs)
584 590
585 WARN_ON_ONCE(hwc->idx != idx); 591 WARN_ON_ONCE(hwc->idx != idx);
586 592
587 /* 593 /* it might be unflagged overflow */
588 * FIXME: Redundant call, actually not needed 594 handled = p4_pmu_clear_cccr_ovf(hwc);
589 * but just to check if we're screwed
590 */
591 p4_pmu_clear_cccr_ovf(hwc);
592 595
593 val = x86_perf_event_update(event); 596 val = x86_perf_event_update(event);
594 if (val & (1ULL << (x86_pmu.cntval_bits - 1))) 597 if (!handled && (val & (1ULL << (x86_pmu.cntval_bits - 1))))
595 continue; 598 continue;
596 599
597 /* 600 /* event overflow for sure */
598 * event overflow 601 data.period = event->hw.last_period;
599 */
600 handled = 1;
601 data.period = event->hw.last_period;
602 602
603 if (!x86_perf_event_set_period(event)) 603 if (!x86_perf_event_set_period(event))
604 continue; 604 continue;
@@ -670,7 +670,7 @@ static void p4_pmu_swap_config_ts(struct hw_perf_event *hwc, int cpu)
670 670
671/* 671/*
672 * ESCR address hashing is tricky, ESCRs are not sequential 672 * ESCR address hashing is tricky, ESCRs are not sequential
673 * in memory but all starts from MSR_P4_BSU_ESCR0 (0x03e0) and 673 * in memory but all starts from MSR_P4_BSU_ESCR0 (0x03a0) and
674 * the metric between any ESCRs is laid in range [0xa0,0xe1] 674 * the metric between any ESCRs is laid in range [0xa0,0xe1]
675 * 675 *
676 * so we make ~70% filled hashtable 676 * so we make ~70% filled hashtable
@@ -735,8 +735,9 @@ static int p4_get_escr_idx(unsigned int addr)
735{ 735{
736 unsigned int idx = P4_ESCR_MSR_IDX(addr); 736 unsigned int idx = P4_ESCR_MSR_IDX(addr);
737 737
738 if (unlikely(idx >= P4_ESCR_MSR_TABLE_SIZE || 738 if (unlikely(idx >= P4_ESCR_MSR_TABLE_SIZE ||
739 !p4_escr_table[idx])) { 739 !p4_escr_table[idx] ||
740 p4_escr_table[idx] != addr)) {
740 WARN_ONCE(1, "P4 PMU: Wrong address passed: %x\n", addr); 741 WARN_ONCE(1, "P4 PMU: Wrong address passed: %x\n", addr);
741 return -1; 742 return -1;
742 } 743 }
@@ -762,7 +763,7 @@ static int p4_pmu_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign
762{ 763{
763 unsigned long used_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)]; 764 unsigned long used_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
764 unsigned long escr_mask[BITS_TO_LONGS(P4_ESCR_MSR_TABLE_SIZE)]; 765 unsigned long escr_mask[BITS_TO_LONGS(P4_ESCR_MSR_TABLE_SIZE)];
765 int cpu = raw_smp_processor_id(); 766 int cpu = smp_processor_id();
766 struct hw_perf_event *hwc; 767 struct hw_perf_event *hwc;
767 struct p4_event_bind *bind; 768 struct p4_event_bind *bind;
768 unsigned int i, thread, num; 769 unsigned int i, thread, num;
diff --git a/arch/x86/kernel/cpuid.c b/arch/x86/kernel/cpuid.c
index 8b862d5900fe..1b7b31ab7d86 100644
--- a/arch/x86/kernel/cpuid.c
+++ b/arch/x86/kernel/cpuid.c
@@ -170,7 +170,7 @@ static int __cpuinit cpuid_class_cpu_callback(struct notifier_block *nfb,
170 cpuid_device_destroy(cpu); 170 cpuid_device_destroy(cpu);
171 break; 171 break;
172 } 172 }
173 return err ? NOTIFY_BAD : NOTIFY_OK; 173 return notifier_from_errno(err);
174} 174}
175 175
176static struct notifier_block __refdata cpuid_class_cpu_notifier = 176static struct notifier_block __refdata cpuid_class_cpu_notifier =
diff --git a/arch/x86/kernel/msr.c b/arch/x86/kernel/msr.c
index 4d4468e9f47c..7bf2dc4c8f70 100644
--- a/arch/x86/kernel/msr.c
+++ b/arch/x86/kernel/msr.c
@@ -230,7 +230,7 @@ static int __cpuinit msr_class_cpu_callback(struct notifier_block *nfb,
230 msr_device_destroy(cpu); 230 msr_device_destroy(cpu);
231 break; 231 break;
232 } 232 }
233 return err ? NOTIFY_BAD : NOTIFY_OK; 233 return notifier_from_errno(err);
234} 234}
235 235
236static struct notifier_block __refdata msr_class_cpu_notifier = { 236static struct notifier_block __refdata msr_class_cpu_notifier = {
diff --git a/arch/x86/kernel/pci-swiotlb.c b/arch/x86/kernel/pci-swiotlb.c
index 7d2829dde20e..a5bc528d4328 100644
--- a/arch/x86/kernel/pci-swiotlb.c
+++ b/arch/x86/kernel/pci-swiotlb.c
@@ -31,8 +31,6 @@ static struct dma_map_ops swiotlb_dma_ops = {
31 .free_coherent = swiotlb_free_coherent, 31 .free_coherent = swiotlb_free_coherent,
32 .sync_single_for_cpu = swiotlb_sync_single_for_cpu, 32 .sync_single_for_cpu = swiotlb_sync_single_for_cpu,
33 .sync_single_for_device = swiotlb_sync_single_for_device, 33 .sync_single_for_device = swiotlb_sync_single_for_device,
34 .sync_single_range_for_cpu = swiotlb_sync_single_range_for_cpu,
35 .sync_single_range_for_device = swiotlb_sync_single_range_for_device,
36 .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu, 34 .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu,
37 .sync_sg_for_device = swiotlb_sync_sg_for_device, 35 .sync_sg_for_device = swiotlb_sync_sg_for_device,
38 .map_sg = swiotlb_map_sg_attrs, 36 .map_sg = swiotlb_map_sg_attrs,
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index e8029896309a..b4ae4acbd031 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -676,6 +676,17 @@ static struct dmi_system_id __initdata bad_bios_dmi_table[] = {
676 DMI_MATCH(DMI_BOARD_NAME, "DG45FC"), 676 DMI_MATCH(DMI_BOARD_NAME, "DG45FC"),
677 }, 677 },
678 }, 678 },
679 /*
680 * The Dell Inspiron Mini 1012 has DMI_BIOS_VENDOR = "Dell Inc.", so
681 * match on the product name.
682 */
683 {
684 .callback = dmi_low_memory_corruption,
685 .ident = "Phoenix BIOS",
686 .matches = {
687 DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 1012"),
688 },
689 },
679#endif 690#endif
680 {} 691 {}
681}; 692};
diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
index ef6370b00e70..a867940a6dfc 100644
--- a/arch/x86/kernel/setup_percpu.c
+++ b/arch/x86/kernel/setup_percpu.c
@@ -265,10 +265,10 @@ void __init setup_per_cpu_areas(void)
265 265
266#if defined(CONFIG_X86_64) && defined(CONFIG_NUMA) 266#if defined(CONFIG_X86_64) && defined(CONFIG_NUMA)
267 /* 267 /*
268 * make sure boot cpu node_number is right, when boot cpu is on the 268 * make sure boot cpu numa_node is right, when boot cpu is on the
269 * node that doesn't have mem installed 269 * node that doesn't have mem installed
270 */ 270 */
271 per_cpu(node_number, boot_cpu_id) = cpu_to_node(boot_cpu_id); 271 set_cpu_numa_node(boot_cpu_id, early_cpu_to_node(boot_cpu_id));
272#endif 272#endif
273 273
274 /* Setup node to cpumask map */ 274 /* Setup node to cpumask map */
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 763d815e27a0..37462f1ddba5 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -1215,9 +1215,17 @@ __init void prefill_possible_map(void)
1215 if (!num_processors) 1215 if (!num_processors)
1216 num_processors = 1; 1216 num_processors = 1;
1217 1217
1218 if (setup_possible_cpus == -1) 1218 i = setup_max_cpus ?: 1;
1219 possible = num_processors + disabled_cpus; 1219 if (setup_possible_cpus == -1) {
1220 else 1220 possible = num_processors;
1221#ifdef CONFIG_HOTPLUG_CPU
1222 if (setup_max_cpus)
1223 possible += disabled_cpus;
1224#else
1225 if (possible > i)
1226 possible = i;
1227#endif
1228 } else
1221 possible = setup_possible_cpus; 1229 possible = setup_possible_cpus;
1222 1230
1223 total_cpus = max_t(int, possible, num_processors + disabled_cpus); 1231 total_cpus = max_t(int, possible, num_processors + disabled_cpus);
@@ -1230,11 +1238,23 @@ __init void prefill_possible_map(void)
1230 possible = nr_cpu_ids; 1238 possible = nr_cpu_ids;
1231 } 1239 }
1232 1240
1241#ifdef CONFIG_HOTPLUG_CPU
1242 if (!setup_max_cpus)
1243#endif
1244 if (possible > i) {
1245 printk(KERN_WARNING
1246 "%d Processors exceeds max_cpus limit of %u\n",
1247 possible, setup_max_cpus);
1248 possible = i;
1249 }
1250
1233 printk(KERN_INFO "SMP: Allowing %d CPUs, %d hotplug CPUs\n", 1251 printk(KERN_INFO "SMP: Allowing %d CPUs, %d hotplug CPUs\n",
1234 possible, max_t(int, possible - num_processors, 0)); 1252 possible, max_t(int, possible - num_processors, 0));
1235 1253
1236 for (i = 0; i < possible; i++) 1254 for (i = 0; i < possible; i++)
1237 set_cpu_possible(i, true); 1255 set_cpu_possible(i, true);
1256 for (; i < NR_CPUS; i++)
1257 set_cpu_possible(i, false);
1238 1258
1239 nr_cpu_ids = possible; 1259 nr_cpu_ids = possible;
1240} 1260}
diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
index 2bdf628066bd..9257510b4836 100644
--- a/arch/x86/lguest/boot.c
+++ b/arch/x86/lguest/boot.c
@@ -1390,7 +1390,6 @@ __init void lguest_init(void)
1390#endif 1390#endif
1391#ifdef CONFIG_ACPI 1391#ifdef CONFIG_ACPI
1392 acpi_disabled = 1; 1392 acpi_disabled = 1;
1393 acpi_ht = 0;
1394#endif 1393#endif
1395 1394
1396 /* 1395 /*
diff --git a/arch/x86/mm/numa_64.c b/arch/x86/mm/numa_64.c
index 8948f47fde05..a7bcc23ef96c 100644
--- a/arch/x86/mm/numa_64.c
+++ b/arch/x86/mm/numa_64.c
@@ -33,9 +33,6 @@ int numa_off __initdata;
33static unsigned long __initdata nodemap_addr; 33static unsigned long __initdata nodemap_addr;
34static unsigned long __initdata nodemap_size; 34static unsigned long __initdata nodemap_size;
35 35
36DEFINE_PER_CPU(int, node_number) = 0;
37EXPORT_PER_CPU_SYMBOL(node_number);
38
39/* 36/*
40 * Map cpu index to node index 37 * Map cpu index to node index
41 */ 38 */
@@ -809,7 +806,7 @@ void __cpuinit numa_set_node(int cpu, int node)
809 per_cpu(x86_cpu_to_node_map, cpu) = node; 806 per_cpu(x86_cpu_to_node_map, cpu) = node;
810 807
811 if (node != NUMA_NO_NODE) 808 if (node != NUMA_NO_NODE)
812 per_cpu(node_number, cpu) = node; 809 set_cpu_numa_node(cpu, node);
813} 810}
814 811
815void __cpuinit numa_clear_node(int cpu) 812void __cpuinit numa_clear_node(int cpu)
@@ -867,7 +864,7 @@ void __cpuinit numa_remove_cpu(int cpu)
867 numa_set_cpumask(cpu, 0); 864 numa_set_cpumask(cpu, 0);
868} 865}
869 866
870int cpu_to_node(int cpu) 867int __cpu_to_node(int cpu)
871{ 868{
872 if (early_per_cpu_ptr(x86_cpu_to_node_map)) { 869 if (early_per_cpu_ptr(x86_cpu_to_node_map)) {
873 printk(KERN_WARNING 870 printk(KERN_WARNING
@@ -877,7 +874,7 @@ int cpu_to_node(int cpu)
877 } 874 }
878 return per_cpu(x86_cpu_to_node_map, cpu); 875 return per_cpu(x86_cpu_to_node_map, cpu);
879} 876}
880EXPORT_SYMBOL(cpu_to_node); 877EXPORT_SYMBOL(__cpu_to_node);
881 878
882/* 879/*
883 * Same function as cpu_to_node() but used if called before the 880 * Same function as cpu_to_node() but used if called before the
diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
index bbe5502ee1cb..acc15b23b743 100644
--- a/arch/x86/mm/pat.c
+++ b/arch/x86/mm/pat.c
@@ -336,6 +336,7 @@ int free_memtype(u64 start, u64 end)
336{ 336{
337 int err = -EINVAL; 337 int err = -EINVAL;
338 int is_range_ram; 338 int is_range_ram;
339 struct memtype *entry;
339 340
340 if (!pat_enabled) 341 if (!pat_enabled)
341 return 0; 342 return 0;
@@ -355,17 +356,20 @@ int free_memtype(u64 start, u64 end)
355 } 356 }
356 357
357 spin_lock(&memtype_lock); 358 spin_lock(&memtype_lock);
358 err = rbt_memtype_erase(start, end); 359 entry = rbt_memtype_erase(start, end);
359 spin_unlock(&memtype_lock); 360 spin_unlock(&memtype_lock);
360 361
361 if (err) { 362 if (!entry) {
362 printk(KERN_INFO "%s:%d freeing invalid memtype %Lx-%Lx\n", 363 printk(KERN_INFO "%s:%d freeing invalid memtype %Lx-%Lx\n",
363 current->comm, current->pid, start, end); 364 current->comm, current->pid, start, end);
365 return -EINVAL;
364 } 366 }
365 367
368 kfree(entry);
369
366 dprintk("free_memtype request 0x%Lx-0x%Lx\n", start, end); 370 dprintk("free_memtype request 0x%Lx-0x%Lx\n", start, end);
367 371
368 return err; 372 return 0;
369} 373}
370 374
371 375
diff --git a/arch/x86/mm/pat_internal.h b/arch/x86/mm/pat_internal.h
index 4f39eefa3e61..77e5ba153fac 100644
--- a/arch/x86/mm/pat_internal.h
+++ b/arch/x86/mm/pat_internal.h
@@ -28,15 +28,15 @@ static inline char *cattr_name(unsigned long flags)
28#ifdef CONFIG_X86_PAT 28#ifdef CONFIG_X86_PAT
29extern int rbt_memtype_check_insert(struct memtype *new, 29extern int rbt_memtype_check_insert(struct memtype *new,
30 unsigned long *new_type); 30 unsigned long *new_type);
31extern int rbt_memtype_erase(u64 start, u64 end); 31extern struct memtype *rbt_memtype_erase(u64 start, u64 end);
32extern struct memtype *rbt_memtype_lookup(u64 addr); 32extern struct memtype *rbt_memtype_lookup(u64 addr);
33extern int rbt_memtype_copy_nth_element(struct memtype *out, loff_t pos); 33extern int rbt_memtype_copy_nth_element(struct memtype *out, loff_t pos);
34#else 34#else
35static inline int rbt_memtype_check_insert(struct memtype *new, 35static inline int rbt_memtype_check_insert(struct memtype *new,
36 unsigned long *new_type) 36 unsigned long *new_type)
37{ return 0; } 37{ return 0; }
38static inline int rbt_memtype_erase(u64 start, u64 end) 38static inline struct memtype *rbt_memtype_erase(u64 start, u64 end)
39{ return 0; } 39{ return NULL; }
40static inline struct memtype *rbt_memtype_lookup(u64 addr) 40static inline struct memtype *rbt_memtype_lookup(u64 addr)
41{ return NULL; } 41{ return NULL; }
42static inline int rbt_memtype_copy_nth_element(struct memtype *out, loff_t pos) 42static inline int rbt_memtype_copy_nth_element(struct memtype *out, loff_t pos)
diff --git a/arch/x86/mm/pat_rbtree.c b/arch/x86/mm/pat_rbtree.c
index 07de4cb8cc30..f537087bb740 100644
--- a/arch/x86/mm/pat_rbtree.c
+++ b/arch/x86/mm/pat_rbtree.c
@@ -231,16 +231,17 @@ int rbt_memtype_check_insert(struct memtype *new, unsigned long *ret_type)
231 return err; 231 return err;
232} 232}
233 233
234int rbt_memtype_erase(u64 start, u64 end) 234struct memtype *rbt_memtype_erase(u64 start, u64 end)
235{ 235{
236 struct memtype *data; 236 struct memtype *data;
237 237
238 data = memtype_rb_exact_match(&memtype_rbroot, start, end); 238 data = memtype_rb_exact_match(&memtype_rbroot, start, end);
239 if (!data) 239 if (!data)
240 return -EINVAL; 240 goto out;
241 241
242 rb_erase(&data->rb, &memtype_rbroot); 242 rb_erase(&data->rb, &memtype_rbroot);
243 return 0; 243out:
244 return data;
244} 245}
245 246
246struct memtype *rbt_memtype_lookup(u64 addr) 247struct memtype *rbt_memtype_lookup(u64 addr)
diff --git a/arch/x86/mm/pf_in.c b/arch/x86/mm/pf_in.c
index df3d5c861cda..308e32570d84 100644
--- a/arch/x86/mm/pf_in.c
+++ b/arch/x86/mm/pf_in.c
@@ -34,7 +34,7 @@
34/* IA32 Manual 3, 2-1 */ 34/* IA32 Manual 3, 2-1 */
35static unsigned char prefix_codes[] = { 35static unsigned char prefix_codes[] = {
36 0xF0, 0xF2, 0xF3, 0x2E, 0x36, 0x3E, 0x26, 0x64, 36 0xF0, 0xF2, 0xF3, 0x2E, 0x36, 0x3E, 0x26, 0x64,
37 0x65, 0x2E, 0x3E, 0x66, 0x67 37 0x65, 0x66, 0x67
38}; 38};
39/* IA32 Manual 3, 3-432*/ 39/* IA32 Manual 3, 3-432*/
40static unsigned int reg_rop[] = { 40static unsigned int reg_rop[] = {
diff --git a/arch/x86/mm/pgtable_32.c b/arch/x86/mm/pgtable_32.c
index 792854003ed3..cac718499256 100644
--- a/arch/x86/mm/pgtable_32.c
+++ b/arch/x86/mm/pgtable_32.c
@@ -9,7 +9,6 @@
9#include <linux/pagemap.h> 9#include <linux/pagemap.h>
10#include <linux/spinlock.h> 10#include <linux/spinlock.h>
11#include <linux/module.h> 11#include <linux/module.h>
12#include <linux/quicklist.h>
13 12
14#include <asm/system.h> 13#include <asm/system.h>
15#include <asm/pgtable.h> 14#include <asm/pgtable.h>
diff --git a/arch/x86/pci/acpi.c b/arch/x86/pci/acpi.c
index 31930fd30ea9..2ec04c424a62 100644
--- a/arch/x86/pci/acpi.c
+++ b/arch/x86/pci/acpi.c
@@ -207,10 +207,9 @@ get_current_resources(struct acpi_device *device, int busnum,
207 if (!info.res) 207 if (!info.res)
208 goto res_alloc_fail; 208 goto res_alloc_fail;
209 209
210 info.name = kmalloc(16, GFP_KERNEL); 210 info.name = kasprintf(GFP_KERNEL, "PCI Bus %04x:%02x", domain, busnum);
211 if (!info.name) 211 if (!info.name)
212 goto name_alloc_fail; 212 goto name_alloc_fail;
213 sprintf(info.name, "PCI Bus %04x:%02x", domain, busnum);
214 213
215 info.res_num = 0; 214 info.res_num = 0;
216 acpi_walk_resources(device->handle, METHOD_NAME__CRS, setup_resource, 215 acpi_walk_resources(device->handle, METHOD_NAME__CRS, setup_resource,
@@ -224,8 +223,11 @@ res_alloc_fail:
224 return; 223 return;
225} 224}
226 225
227struct pci_bus * __devinit pci_acpi_scan_root(struct acpi_device *device, int domain, int busnum) 226struct pci_bus * __devinit pci_acpi_scan_root(struct acpi_pci_root *root)
228{ 227{
228 struct acpi_device *device = root->device;
229 int domain = root->segment;
230 int busnum = root->secondary.start;
229 struct pci_bus *bus; 231 struct pci_bus *bus;
230 struct pci_sysdata *sd; 232 struct pci_sysdata *sd;
231 int node; 233 int node;
diff --git a/arch/xtensa/include/asm/scatterlist.h b/arch/xtensa/include/asm/scatterlist.h
index 810080bb0a2b..b1f9fdc1d5ba 100644
--- a/arch/xtensa/include/asm/scatterlist.h
+++ b/arch/xtensa/include/asm/scatterlist.h
@@ -11,28 +11,7 @@
11#ifndef _XTENSA_SCATTERLIST_H 11#ifndef _XTENSA_SCATTERLIST_H
12#define _XTENSA_SCATTERLIST_H 12#define _XTENSA_SCATTERLIST_H
13 13
14#include <asm/types.h> 14#include <asm-generic/scatterlist.h>
15
16struct scatterlist {
17#ifdef CONFIG_DEBUG_SG
18 unsigned long sg_magic;
19#endif
20 unsigned long page_link;
21 unsigned int offset;
22 dma_addr_t dma_address;
23 unsigned int length;
24};
25
26/*
27 * These macros should be used after a pci_map_sg call has been done
28 * to get bus addresses of each of the SG entries and their lengths.
29 * You should only work with the number of sg entries pci_map_sg
30 * returns, or alternatively stop on the first sg_dma_len(sg) which
31 * is 0.
32 */
33#define sg_dma_address(sg) ((sg)->dma_address)
34#define sg_dma_len(sg) ((sg)->length)
35
36 15
37#define ISA_DMA_THRESHOLD (~0UL) 16#define ISA_DMA_THRESHOLD (~0UL)
38 17
diff --git a/drivers/Makefile b/drivers/Makefile
index f42a03029b7c..91874e048552 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -10,6 +10,7 @@ obj-$(CONFIG_PCI) += pci/
10obj-$(CONFIG_PARISC) += parisc/ 10obj-$(CONFIG_PARISC) += parisc/
11obj-$(CONFIG_RAPIDIO) += rapidio/ 11obj-$(CONFIG_RAPIDIO) += rapidio/
12obj-y += video/ 12obj-y += video/
13obj-y += idle/
13obj-$(CONFIG_ACPI) += acpi/ 14obj-$(CONFIG_ACPI) += acpi/
14obj-$(CONFIG_SFI) += sfi/ 15obj-$(CONFIG_SFI) += sfi/
15# PnP must come after ACPI since it will eventually need to check if acpi 16# PnP must come after ACPI since it will eventually need to check if acpi
@@ -91,7 +92,6 @@ obj-$(CONFIG_EISA) += eisa/
91obj-y += lguest/ 92obj-y += lguest/
92obj-$(CONFIG_CPU_FREQ) += cpufreq/ 93obj-$(CONFIG_CPU_FREQ) += cpufreq/
93obj-$(CONFIG_CPU_IDLE) += cpuidle/ 94obj-$(CONFIG_CPU_IDLE) += cpuidle/
94obj-y += idle/
95obj-$(CONFIG_MMC) += mmc/ 95obj-$(CONFIG_MMC) += mmc/
96obj-$(CONFIG_MEMSTICK) += memstick/ 96obj-$(CONFIG_MEMSTICK) += memstick/
97obj-$(CONFIG_NEW_LEDS) += leds/ 97obj-$(CONFIG_NEW_LEDS) += leds/
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index 93d2c7971df6..746411518802 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -360,4 +360,13 @@ config ACPI_SBS
360 To compile this driver as a module, choose M here: 360 To compile this driver as a module, choose M here:
361 the modules will be called sbs and sbshc. 361 the modules will be called sbs and sbshc.
362 362
363config ACPI_HED
364 tristate "Hardware Error Device"
365 help
366 This driver supports the Hardware Error Device (PNP0C33),
367 which is used to report some hardware errors notified via
368 SCI, mainly the corrected errors.
369
370source "drivers/acpi/apei/Kconfig"
371
363endif # ACPI 372endif # ACPI
diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile
index a8d8998dd5c5..6ee33169e1dc 100644
--- a/drivers/acpi/Makefile
+++ b/drivers/acpi/Makefile
@@ -19,7 +19,7 @@ obj-y += acpi.o \
19 19
20# All the builtin files are in the "acpi." module_param namespace. 20# All the builtin files are in the "acpi." module_param namespace.
21acpi-y += osl.o utils.o reboot.o 21acpi-y += osl.o utils.o reboot.o
22acpi-y += hest.o 22acpi-y += atomicio.o
23 23
24# sleep related files 24# sleep related files
25acpi-y += wakeup.o 25acpi-y += wakeup.o
@@ -59,6 +59,7 @@ obj-$(CONFIG_ACPI_BATTERY) += battery.o
59obj-$(CONFIG_ACPI_SBS) += sbshc.o 59obj-$(CONFIG_ACPI_SBS) += sbshc.o
60obj-$(CONFIG_ACPI_SBS) += sbs.o 60obj-$(CONFIG_ACPI_SBS) += sbs.o
61obj-$(CONFIG_ACPI_POWER_METER) += power_meter.o 61obj-$(CONFIG_ACPI_POWER_METER) += power_meter.o
62obj-$(CONFIG_ACPI_HED) += hed.o
62 63
63# processor has its own "processor." module_param namespace 64# processor has its own "processor." module_param namespace
64processor-y := processor_driver.o processor_throttling.o 65processor-y := processor_driver.o processor_throttling.o
@@ -66,3 +67,5 @@ processor-y += processor_idle.o processor_thermal.o
66processor-$(CONFIG_CPU_FREQ) += processor_perflib.o 67processor-$(CONFIG_CPU_FREQ) += processor_perflib.o
67 68
68obj-$(CONFIG_ACPI_PROCESSOR_AGGREGATOR) += acpi_pad.o 69obj-$(CONFIG_ACPI_PROCESSOR_AGGREGATOR) += acpi_pad.o
70
71obj-$(CONFIG_ACPI_APEI) += apei/
diff --git a/drivers/acpi/acpi_pad.c b/drivers/acpi/acpi_pad.c
index 62122134693b..d269a8f3329c 100644
--- a/drivers/acpi/acpi_pad.c
+++ b/drivers/acpi/acpi_pad.c
@@ -43,6 +43,10 @@ static DEFINE_MUTEX(isolated_cpus_lock);
43#define CPUID5_ECX_EXTENSIONS_SUPPORTED (0x1) 43#define CPUID5_ECX_EXTENSIONS_SUPPORTED (0x1)
44#define CPUID5_ECX_INTERRUPT_BREAK (0x2) 44#define CPUID5_ECX_INTERRUPT_BREAK (0x2)
45static unsigned long power_saving_mwait_eax; 45static unsigned long power_saving_mwait_eax;
46
47static unsigned char tsc_detected_unstable;
48static unsigned char tsc_marked_unstable;
49
46static void power_saving_mwait_init(void) 50static void power_saving_mwait_init(void)
47{ 51{
48 unsigned int eax, ebx, ecx, edx; 52 unsigned int eax, ebx, ecx, edx;
@@ -87,8 +91,8 @@ static void power_saving_mwait_init(void)
87 91
88 /*FALL THROUGH*/ 92 /*FALL THROUGH*/
89 default: 93 default:
90 /* TSC could halt in idle, so notify users */ 94 /* TSC could halt in idle */
91 mark_tsc_unstable("TSC halts in idle"); 95 tsc_detected_unstable = 1;
92 } 96 }
93#endif 97#endif
94} 98}
@@ -168,16 +172,14 @@ static int power_saving_thread(void *data)
168 172
169 do_sleep = 0; 173 do_sleep = 0;
170 174
171 current_thread_info()->status &= ~TS_POLLING;
172 /*
173 * TS_POLLING-cleared state must be visible before we test
174 * NEED_RESCHED:
175 */
176 smp_mb();
177
178 expire_time = jiffies + HZ * (100 - idle_pct) / 100; 175 expire_time = jiffies + HZ * (100 - idle_pct) / 100;
179 176
180 while (!need_resched()) { 177 while (!need_resched()) {
178 if (tsc_detected_unstable && !tsc_marked_unstable) {
179 /* TSC could halt in idle, so notify users */
180 mark_tsc_unstable("TSC halts in idle");
181 tsc_marked_unstable = 1;
182 }
181 local_irq_disable(); 183 local_irq_disable();
182 cpu = smp_processor_id(); 184 cpu = smp_processor_id();
183 clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, 185 clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER,
@@ -200,8 +202,6 @@ static int power_saving_thread(void *data)
200 } 202 }
201 } 203 }
202 204
203 current_thread_info()->status |= TS_POLLING;
204
205 /* 205 /*
206 * current sched_rt has threshold for rt task running time. 206 * current sched_rt has threshold for rt task running time.
207 * When a rt task uses 95% CPU time, the rt thread will be 207 * When a rt task uses 95% CPU time, the rt thread will be
diff --git a/drivers/acpi/acpica/evxfevnt.c b/drivers/acpi/acpica/evxfevnt.c
index 7c7bbb4d402c..d5a5efc043bf 100644
--- a/drivers/acpi/acpica/evxfevnt.c
+++ b/drivers/acpi/acpica/evxfevnt.c
@@ -69,7 +69,7 @@ acpi_ev_get_gpe_device(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
69 69
70acpi_status acpi_enable(void) 70acpi_status acpi_enable(void)
71{ 71{
72 acpi_status status = AE_OK; 72 acpi_status status;
73 73
74 ACPI_FUNCTION_TRACE(acpi_enable); 74 ACPI_FUNCTION_TRACE(acpi_enable);
75 75
@@ -84,21 +84,30 @@ acpi_status acpi_enable(void)
84 if (acpi_hw_get_mode() == ACPI_SYS_MODE_ACPI) { 84 if (acpi_hw_get_mode() == ACPI_SYS_MODE_ACPI) {
85 ACPI_DEBUG_PRINT((ACPI_DB_INIT, 85 ACPI_DEBUG_PRINT((ACPI_DB_INIT,
86 "System is already in ACPI mode\n")); 86 "System is already in ACPI mode\n"));
87 } else { 87 return_ACPI_STATUS(AE_OK);
88 /* Transition to ACPI mode */ 88 }
89 89
90 status = acpi_hw_set_mode(ACPI_SYS_MODE_ACPI); 90 /* Transition to ACPI mode */
91 if (ACPI_FAILURE(status)) {
92 ACPI_ERROR((AE_INFO,
93 "Could not transition to ACPI mode"));
94 return_ACPI_STATUS(status);
95 }
96 91
97 ACPI_DEBUG_PRINT((ACPI_DB_INIT, 92 status = acpi_hw_set_mode(ACPI_SYS_MODE_ACPI);
98 "Transition to ACPI mode successful\n")); 93 if (ACPI_FAILURE(status)) {
94 ACPI_ERROR((AE_INFO,
95 "Could not transition to ACPI mode"));
96 return_ACPI_STATUS(status);
99 } 97 }
100 98
101 return_ACPI_STATUS(status); 99 /* Sanity check that transition succeeded */
100
101 if (acpi_hw_get_mode() != ACPI_SYS_MODE_ACPI) {
102 ACPI_ERROR((AE_INFO,
103 "Hardware did not enter ACPI mode"));
104 return_ACPI_STATUS(AE_NO_HARDWARE_RESPONSE);
105 }
106
107 ACPI_DEBUG_PRINT((ACPI_DB_INIT,
108 "Transition to ACPI mode successful\n"));
109
110 return_ACPI_STATUS(AE_OK);
102} 111}
103 112
104ACPI_EXPORT_SYMBOL(acpi_enable) 113ACPI_EXPORT_SYMBOL(acpi_enable)
diff --git a/drivers/acpi/acpica/hwacpi.c b/drivers/acpi/acpica/hwacpi.c
index 679a112a7d26..b44274a0b62c 100644
--- a/drivers/acpi/acpica/hwacpi.c
+++ b/drivers/acpi/acpica/hwacpi.c
@@ -63,7 +63,6 @@ acpi_status acpi_hw_set_mode(u32 mode)
63{ 63{
64 64
65 acpi_status status; 65 acpi_status status;
66 u32 retry;
67 66
68 ACPI_FUNCTION_TRACE(hw_set_mode); 67 ACPI_FUNCTION_TRACE(hw_set_mode);
69 68
@@ -125,24 +124,7 @@ acpi_status acpi_hw_set_mode(u32 mode)
125 return_ACPI_STATUS(status); 124 return_ACPI_STATUS(status);
126 } 125 }
127 126
128 /* 127 return_ACPI_STATUS(AE_OK);
129 * Some hardware takes a LONG time to switch modes. Give them 3 sec to
130 * do so, but allow faster systems to proceed more quickly.
131 */
132 retry = 3000;
133 while (retry) {
134 if (acpi_hw_get_mode() == mode) {
135 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
136 "Mode %X successfully enabled\n",
137 mode));
138 return_ACPI_STATUS(AE_OK);
139 }
140 acpi_os_stall(1000);
141 retry--;
142 }
143
144 ACPI_ERROR((AE_INFO, "Hardware did not change modes"));
145 return_ACPI_STATUS(AE_NO_HARDWARE_RESPONSE);
146} 128}
147 129
148/******************************************************************************* 130/*******************************************************************************
diff --git a/drivers/acpi/apei/Kconfig b/drivers/acpi/apei/Kconfig
new file mode 100644
index 000000000000..f8c668f27b5a
--- /dev/null
+++ b/drivers/acpi/apei/Kconfig
@@ -0,0 +1,30 @@
1config ACPI_APEI
2 bool "ACPI Platform Error Interface (APEI)"
3 depends on X86
4 help
5 APEI allows to report errors (for example from the chipset)
6 to the operating system. This improves NMI handling
7 especially. In addition it supports error serialization and
8 error injection.
9
10config ACPI_APEI_GHES
11 tristate "APEI Generic Hardware Error Source"
12 depends on ACPI_APEI && X86
13 select ACPI_HED
14 help
15 Generic Hardware Error Source provides a way to report
16 platform hardware errors (such as that from chipset). It
17 works in so called "Firmware First" mode, that is, hardware
18 errors are reported to firmware firstly, then reported to
19 Linux by firmware. This way, some non-standard hardware
20 error registers or non-standard hardware link can be checked
21 by firmware to produce more valuable hardware error
22 information for Linux.
23
24config ACPI_APEI_EINJ
25 tristate "APEI Error INJection (EINJ)"
26 depends on ACPI_APEI && DEBUG_FS
27 help
28 EINJ provides a hardware error injection mechanism, it is
29 mainly used for debugging and testing the other parts of
30 APEI and some other RAS features.
diff --git a/drivers/acpi/apei/Makefile b/drivers/acpi/apei/Makefile
new file mode 100644
index 000000000000..b13b03a17789
--- /dev/null
+++ b/drivers/acpi/apei/Makefile
@@ -0,0 +1,5 @@
1obj-$(CONFIG_ACPI_APEI) += apei.o
2obj-$(CONFIG_ACPI_APEI_GHES) += ghes.o
3obj-$(CONFIG_ACPI_APEI_EINJ) += einj.o
4
5apei-y := apei-base.o hest.o cper.o erst.o
diff --git a/drivers/acpi/apei/apei-base.c b/drivers/acpi/apei/apei-base.c
new file mode 100644
index 000000000000..db3946e9c66b
--- /dev/null
+++ b/drivers/acpi/apei/apei-base.c
@@ -0,0 +1,593 @@
1/*
2 * apei-base.c - ACPI Platform Error Interface (APEI) supporting
3 * infrastructure
4 *
5 * APEI allows to report errors (for example from the chipset) to the
6 * the operating system. This improves NMI handling especially. In
7 * addition it supports error serialization and error injection.
8 *
9 * For more information about APEI, please refer to ACPI Specification
10 * version 4.0, chapter 17.
11 *
12 * This file has Common functions used by more than one APEI table,
13 * including framework of interpreter for ERST and EINJ; resource
14 * management for APEI registers.
15 *
16 * Copyright (C) 2009, Intel Corp.
17 * Author: Huang Ying <ying.huang@intel.com>
18 *
19 * This program is free software; you can redistribute it and/or
20 * modify it under the terms of the GNU General Public License version
21 * 2 as published by the Free Software Foundation.
22 *
23 * This program is distributed in the hope that it will be useful,
24 * but WITHOUT ANY WARRANTY; without even the implied warranty of
25 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
26 * GNU General Public License for more details.
27 *
28 * You should have received a copy of the GNU General Public License
29 * along with this program; if not, write to the Free Software
30 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
31 */
32
33#include <linux/kernel.h>
34#include <linux/module.h>
35#include <linux/init.h>
36#include <linux/acpi.h>
37#include <linux/io.h>
38#include <linux/kref.h>
39#include <linux/rculist.h>
40#include <linux/interrupt.h>
41#include <linux/debugfs.h>
42#include <acpi/atomicio.h>
43
44#include "apei-internal.h"
45
46#define APEI_PFX "APEI: "
47
48/*
49 * APEI ERST (Error Record Serialization Table) and EINJ (Error
50 * INJection) interpreter framework.
51 */
52
53#define APEI_EXEC_PRESERVE_REGISTER 0x1
54
55void apei_exec_ctx_init(struct apei_exec_context *ctx,
56 struct apei_exec_ins_type *ins_table,
57 u32 instructions,
58 struct acpi_whea_header *action_table,
59 u32 entries)
60{
61 ctx->ins_table = ins_table;
62 ctx->instructions = instructions;
63 ctx->action_table = action_table;
64 ctx->entries = entries;
65}
66EXPORT_SYMBOL_GPL(apei_exec_ctx_init);
67
68int __apei_exec_read_register(struct acpi_whea_header *entry, u64 *val)
69{
70 int rc;
71
72 rc = acpi_atomic_read(val, &entry->register_region);
73 if (rc)
74 return rc;
75 *val >>= entry->register_region.bit_offset;
76 *val &= entry->mask;
77
78 return 0;
79}
80
81int apei_exec_read_register(struct apei_exec_context *ctx,
82 struct acpi_whea_header *entry)
83{
84 int rc;
85 u64 val = 0;
86
87 rc = __apei_exec_read_register(entry, &val);
88 if (rc)
89 return rc;
90 ctx->value = val;
91
92 return 0;
93}
94EXPORT_SYMBOL_GPL(apei_exec_read_register);
95
96int apei_exec_read_register_value(struct apei_exec_context *ctx,
97 struct acpi_whea_header *entry)
98{
99 int rc;
100
101 rc = apei_exec_read_register(ctx, entry);
102 if (rc)
103 return rc;
104 ctx->value = (ctx->value == entry->value);
105
106 return 0;
107}
108EXPORT_SYMBOL_GPL(apei_exec_read_register_value);
109
110int __apei_exec_write_register(struct acpi_whea_header *entry, u64 val)
111{
112 int rc;
113
114 val &= entry->mask;
115 val <<= entry->register_region.bit_offset;
116 if (entry->flags & APEI_EXEC_PRESERVE_REGISTER) {
117 u64 valr = 0;
118 rc = acpi_atomic_read(&valr, &entry->register_region);
119 if (rc)
120 return rc;
121 valr &= ~(entry->mask << entry->register_region.bit_offset);
122 val |= valr;
123 }
124 rc = acpi_atomic_write(val, &entry->register_region);
125
126 return rc;
127}
128
129int apei_exec_write_register(struct apei_exec_context *ctx,
130 struct acpi_whea_header *entry)
131{
132 return __apei_exec_write_register(entry, ctx->value);
133}
134EXPORT_SYMBOL_GPL(apei_exec_write_register);
135
136int apei_exec_write_register_value(struct apei_exec_context *ctx,
137 struct acpi_whea_header *entry)
138{
139 int rc;
140
141 ctx->value = entry->value;
142 rc = apei_exec_write_register(ctx, entry);
143
144 return rc;
145}
146EXPORT_SYMBOL_GPL(apei_exec_write_register_value);
147
148int apei_exec_noop(struct apei_exec_context *ctx,
149 struct acpi_whea_header *entry)
150{
151 return 0;
152}
153EXPORT_SYMBOL_GPL(apei_exec_noop);
154
155/*
156 * Interpret the specified action. Go through whole action table,
157 * execute all instructions belong to the action.
158 */
159int apei_exec_run(struct apei_exec_context *ctx, u8 action)
160{
161 int rc;
162 u32 i, ip;
163 struct acpi_whea_header *entry;
164 apei_exec_ins_func_t run;
165
166 ctx->ip = 0;
167
168 /*
169 * "ip" is the instruction pointer of current instruction,
170 * "ctx->ip" specifies the next instruction to executed,
171 * instruction "run" function may change the "ctx->ip" to
172 * implement "goto" semantics.
173 */
174rewind:
175 ip = 0;
176 for (i = 0; i < ctx->entries; i++) {
177 entry = &ctx->action_table[i];
178 if (entry->action != action)
179 continue;
180 if (ip == ctx->ip) {
181 if (entry->instruction >= ctx->instructions ||
182 !ctx->ins_table[entry->instruction].run) {
183 pr_warning(FW_WARN APEI_PFX
184 "Invalid action table, unknown instruction type: %d\n",
185 entry->instruction);
186 return -EINVAL;
187 }
188 run = ctx->ins_table[entry->instruction].run;
189 rc = run(ctx, entry);
190 if (rc < 0)
191 return rc;
192 else if (rc != APEI_EXEC_SET_IP)
193 ctx->ip++;
194 }
195 ip++;
196 if (ctx->ip < ip)
197 goto rewind;
198 }
199
200 return 0;
201}
202EXPORT_SYMBOL_GPL(apei_exec_run);
203
204typedef int (*apei_exec_entry_func_t)(struct apei_exec_context *ctx,
205 struct acpi_whea_header *entry,
206 void *data);
207
208static int apei_exec_for_each_entry(struct apei_exec_context *ctx,
209 apei_exec_entry_func_t func,
210 void *data,
211 int *end)
212{
213 u8 ins;
214 int i, rc;
215 struct acpi_whea_header *entry;
216 struct apei_exec_ins_type *ins_table = ctx->ins_table;
217
218 for (i = 0; i < ctx->entries; i++) {
219 entry = ctx->action_table + i;
220 ins = entry->instruction;
221 if (end)
222 *end = i;
223 if (ins >= ctx->instructions || !ins_table[ins].run) {
224 pr_warning(FW_WARN APEI_PFX
225 "Invalid action table, unknown instruction type: %d\n",
226 ins);
227 return -EINVAL;
228 }
229 rc = func(ctx, entry, data);
230 if (rc)
231 return rc;
232 }
233
234 return 0;
235}
236
237static int pre_map_gar_callback(struct apei_exec_context *ctx,
238 struct acpi_whea_header *entry,
239 void *data)
240{
241 u8 ins = entry->instruction;
242
243 if (ctx->ins_table[ins].flags & APEI_EXEC_INS_ACCESS_REGISTER)
244 return acpi_pre_map_gar(&entry->register_region);
245
246 return 0;
247}
248
249/*
250 * Pre-map all GARs in action table to make it possible to access them
251 * in NMI handler.
252 */
253int apei_exec_pre_map_gars(struct apei_exec_context *ctx)
254{
255 int rc, end;
256
257 rc = apei_exec_for_each_entry(ctx, pre_map_gar_callback,
258 NULL, &end);
259 if (rc) {
260 struct apei_exec_context ctx_unmap;
261 memcpy(&ctx_unmap, ctx, sizeof(*ctx));
262 ctx_unmap.entries = end;
263 apei_exec_post_unmap_gars(&ctx_unmap);
264 }
265
266 return rc;
267}
268EXPORT_SYMBOL_GPL(apei_exec_pre_map_gars);
269
270static int post_unmap_gar_callback(struct apei_exec_context *ctx,
271 struct acpi_whea_header *entry,
272 void *data)
273{
274 u8 ins = entry->instruction;
275
276 if (ctx->ins_table[ins].flags & APEI_EXEC_INS_ACCESS_REGISTER)
277 acpi_post_unmap_gar(&entry->register_region);
278
279 return 0;
280}
281
282/* Post-unmap all GAR in action table. */
283int apei_exec_post_unmap_gars(struct apei_exec_context *ctx)
284{
285 return apei_exec_for_each_entry(ctx, post_unmap_gar_callback,
286 NULL, NULL);
287}
288EXPORT_SYMBOL_GPL(apei_exec_post_unmap_gars);
289
290/*
291 * Resource management for GARs in APEI
292 */
293struct apei_res {
294 struct list_head list;
295 unsigned long start;
296 unsigned long end;
297};
298
299/* Collect all resources requested, to avoid conflict */
300struct apei_resources apei_resources_all = {
301 .iomem = LIST_HEAD_INIT(apei_resources_all.iomem),
302 .ioport = LIST_HEAD_INIT(apei_resources_all.ioport),
303};
304
305static int apei_res_add(struct list_head *res_list,
306 unsigned long start, unsigned long size)
307{
308 struct apei_res *res, *resn, *res_ins = NULL;
309 unsigned long end = start + size;
310
311 if (end <= start)
312 return 0;
313repeat:
314 list_for_each_entry_safe(res, resn, res_list, list) {
315 if (res->start > end || res->end < start)
316 continue;
317 else if (end <= res->end && start >= res->start) {
318 kfree(res_ins);
319 return 0;
320 }
321 list_del(&res->list);
322 res->start = start = min(res->start, start);
323 res->end = end = max(res->end, end);
324 kfree(res_ins);
325 res_ins = res;
326 goto repeat;
327 }
328
329 if (res_ins)
330 list_add(&res_ins->list, res_list);
331 else {
332 res_ins = kmalloc(sizeof(*res), GFP_KERNEL);
333 if (!res_ins)
334 return -ENOMEM;
335 res_ins->start = start;
336 res_ins->end = end;
337 list_add(&res_ins->list, res_list);
338 }
339
340 return 0;
341}
342
343static int apei_res_sub(struct list_head *res_list1,
344 struct list_head *res_list2)
345{
346 struct apei_res *res1, *resn1, *res2, *res;
347 res1 = list_entry(res_list1->next, struct apei_res, list);
348 resn1 = list_entry(res1->list.next, struct apei_res, list);
349 while (&res1->list != res_list1) {
350 list_for_each_entry(res2, res_list2, list) {
351 if (res1->start >= res2->end ||
352 res1->end <= res2->start)
353 continue;
354 else if (res1->end <= res2->end &&
355 res1->start >= res2->start) {
356 list_del(&res1->list);
357 kfree(res1);
358 break;
359 } else if (res1->end > res2->end &&
360 res1->start < res2->start) {
361 res = kmalloc(sizeof(*res), GFP_KERNEL);
362 if (!res)
363 return -ENOMEM;
364 res->start = res2->end;
365 res->end = res1->end;
366 res1->end = res2->start;
367 list_add(&res->list, &res1->list);
368 resn1 = res;
369 } else {
370 if (res1->start < res2->start)
371 res1->end = res2->start;
372 else
373 res1->start = res2->end;
374 }
375 }
376 res1 = resn1;
377 resn1 = list_entry(resn1->list.next, struct apei_res, list);
378 }
379
380 return 0;
381}
382
383static void apei_res_clean(struct list_head *res_list)
384{
385 struct apei_res *res, *resn;
386
387 list_for_each_entry_safe(res, resn, res_list, list) {
388 list_del(&res->list);
389 kfree(res);
390 }
391}
392
393void apei_resources_fini(struct apei_resources *resources)
394{
395 apei_res_clean(&resources->iomem);
396 apei_res_clean(&resources->ioport);
397}
398EXPORT_SYMBOL_GPL(apei_resources_fini);
399
400static int apei_resources_merge(struct apei_resources *resources1,
401 struct apei_resources *resources2)
402{
403 int rc;
404 struct apei_res *res;
405
406 list_for_each_entry(res, &resources2->iomem, list) {
407 rc = apei_res_add(&resources1->iomem, res->start,
408 res->end - res->start);
409 if (rc)
410 return rc;
411 }
412 list_for_each_entry(res, &resources2->ioport, list) {
413 rc = apei_res_add(&resources1->ioport, res->start,
414 res->end - res->start);
415 if (rc)
416 return rc;
417 }
418
419 return 0;
420}
421
422/*
423 * EINJ has two groups of GARs (EINJ table entry and trigger table
424 * entry), so common resources are subtracted from the trigger table
425 * resources before the second requesting.
426 */
427int apei_resources_sub(struct apei_resources *resources1,
428 struct apei_resources *resources2)
429{
430 int rc;
431
432 rc = apei_res_sub(&resources1->iomem, &resources2->iomem);
433 if (rc)
434 return rc;
435 return apei_res_sub(&resources1->ioport, &resources2->ioport);
436}
437EXPORT_SYMBOL_GPL(apei_resources_sub);
438
439/*
440 * IO memory/port rersource management mechanism is used to check
441 * whether memory/port area used by GARs conflicts with normal memory
442 * or IO memory/port of devices.
443 */
444int apei_resources_request(struct apei_resources *resources,
445 const char *desc)
446{
447 struct apei_res *res, *res_bak;
448 struct resource *r;
449
450 apei_resources_sub(resources, &apei_resources_all);
451
452 list_for_each_entry(res, &resources->iomem, list) {
453 r = request_mem_region(res->start, res->end - res->start,
454 desc);
455 if (!r) {
456 pr_err(APEI_PFX
457 "Can not request iomem region <%016llx-%016llx> for GARs.\n",
458 (unsigned long long)res->start,
459 (unsigned long long)res->end);
460 res_bak = res;
461 goto err_unmap_iomem;
462 }
463 }
464
465 list_for_each_entry(res, &resources->ioport, list) {
466 r = request_region(res->start, res->end - res->start, desc);
467 if (!r) {
468 pr_err(APEI_PFX
469 "Can not request ioport region <%016llx-%016llx> for GARs.\n",
470 (unsigned long long)res->start,
471 (unsigned long long)res->end);
472 res_bak = res;
473 goto err_unmap_ioport;
474 }
475 }
476
477 apei_resources_merge(&apei_resources_all, resources);
478
479 return 0;
480err_unmap_ioport:
481 list_for_each_entry(res, &resources->ioport, list) {
482 if (res == res_bak)
483 break;
484 release_mem_region(res->start, res->end - res->start);
485 }
486 res_bak = NULL;
487err_unmap_iomem:
488 list_for_each_entry(res, &resources->iomem, list) {
489 if (res == res_bak)
490 break;
491 release_region(res->start, res->end - res->start);
492 }
493 return -EINVAL;
494}
495EXPORT_SYMBOL_GPL(apei_resources_request);
496
497void apei_resources_release(struct apei_resources *resources)
498{
499 struct apei_res *res;
500
501 list_for_each_entry(res, &resources->iomem, list)
502 release_mem_region(res->start, res->end - res->start);
503 list_for_each_entry(res, &resources->ioport, list)
504 release_region(res->start, res->end - res->start);
505
506 apei_resources_sub(&apei_resources_all, resources);
507}
508EXPORT_SYMBOL_GPL(apei_resources_release);
509
510static int apei_check_gar(struct acpi_generic_address *reg, u64 *paddr)
511{
512 u32 width, space_id;
513
514 width = reg->bit_width;
515 space_id = reg->space_id;
516 /* Handle possible alignment issues */
517 memcpy(paddr, &reg->address, sizeof(*paddr));
518 if (!*paddr) {
519 pr_warning(FW_BUG APEI_PFX
520 "Invalid physical address in GAR [0x%llx/%u/%u]\n",
521 *paddr, width, space_id);
522 return -EINVAL;
523 }
524
525 if ((width != 8) && (width != 16) && (width != 32) && (width != 64)) {
526 pr_warning(FW_BUG APEI_PFX
527 "Invalid bit width in GAR [0x%llx/%u/%u]\n",
528 *paddr, width, space_id);
529 return -EINVAL;
530 }
531
532 if (space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY &&
533 space_id != ACPI_ADR_SPACE_SYSTEM_IO) {
534 pr_warning(FW_BUG APEI_PFX
535 "Invalid address space type in GAR [0x%llx/%u/%u]\n",
536 *paddr, width, space_id);
537 return -EINVAL;
538 }
539
540 return 0;
541}
542
543static int collect_res_callback(struct apei_exec_context *ctx,
544 struct acpi_whea_header *entry,
545 void *data)
546{
547 struct apei_resources *resources = data;
548 struct acpi_generic_address *reg = &entry->register_region;
549 u8 ins = entry->instruction;
550 u64 paddr;
551 int rc;
552
553 if (!(ctx->ins_table[ins].flags & APEI_EXEC_INS_ACCESS_REGISTER))
554 return 0;
555
556 rc = apei_check_gar(reg, &paddr);
557 if (rc)
558 return rc;
559
560 switch (reg->space_id) {
561 case ACPI_ADR_SPACE_SYSTEM_MEMORY:
562 return apei_res_add(&resources->iomem, paddr,
563 reg->bit_width / 8);
564 case ACPI_ADR_SPACE_SYSTEM_IO:
565 return apei_res_add(&resources->ioport, paddr,
566 reg->bit_width / 8);
567 default:
568 return -EINVAL;
569 }
570}
571
572/*
573 * Same register may be used by multiple instructions in GARs, so
574 * resources are collected before requesting.
575 */
576int apei_exec_collect_resources(struct apei_exec_context *ctx,
577 struct apei_resources *resources)
578{
579 return apei_exec_for_each_entry(ctx, collect_res_callback,
580 resources, NULL);
581}
582EXPORT_SYMBOL_GPL(apei_exec_collect_resources);
583
584struct dentry *apei_get_debugfs_dir(void)
585{
586 static struct dentry *dapei;
587
588 if (!dapei)
589 dapei = debugfs_create_dir("apei", NULL);
590
591 return dapei;
592}
593EXPORT_SYMBOL_GPL(apei_get_debugfs_dir);
diff --git a/drivers/acpi/apei/apei-internal.h b/drivers/acpi/apei/apei-internal.h
new file mode 100644
index 000000000000..18df1e940276
--- /dev/null
+++ b/drivers/acpi/apei/apei-internal.h
@@ -0,0 +1,114 @@
1/*
2 * apei-internal.h - ACPI Platform Error Interface internal
3 * definations.
4 */
5
6#ifndef APEI_INTERNAL_H
7#define APEI_INTERNAL_H
8
9#include <linux/cper.h>
10
11struct apei_exec_context;
12
13typedef int (*apei_exec_ins_func_t)(struct apei_exec_context *ctx,
14 struct acpi_whea_header *entry);
15
16#define APEI_EXEC_INS_ACCESS_REGISTER 0x0001
17
18struct apei_exec_ins_type {
19 u32 flags;
20 apei_exec_ins_func_t run;
21};
22
23struct apei_exec_context {
24 u32 ip;
25 u64 value;
26 u64 var1;
27 u64 var2;
28 u64 src_base;
29 u64 dst_base;
30 struct apei_exec_ins_type *ins_table;
31 u32 instructions;
32 struct acpi_whea_header *action_table;
33 u32 entries;
34};
35
36void apei_exec_ctx_init(struct apei_exec_context *ctx,
37 struct apei_exec_ins_type *ins_table,
38 u32 instructions,
39 struct acpi_whea_header *action_table,
40 u32 entries);
41
42static inline void apei_exec_ctx_set_input(struct apei_exec_context *ctx,
43 u64 input)
44{
45 ctx->value = input;
46}
47
48static inline u64 apei_exec_ctx_get_output(struct apei_exec_context *ctx)
49{
50 return ctx->value;
51}
52
53int apei_exec_run(struct apei_exec_context *ctx, u8 action);
54
55/* Common instruction implementation */
56
57/* IP has been set in instruction function */
58#define APEI_EXEC_SET_IP 1
59
60int __apei_exec_read_register(struct acpi_whea_header *entry, u64 *val);
61int __apei_exec_write_register(struct acpi_whea_header *entry, u64 val);
62int apei_exec_read_register(struct apei_exec_context *ctx,
63 struct acpi_whea_header *entry);
64int apei_exec_read_register_value(struct apei_exec_context *ctx,
65 struct acpi_whea_header *entry);
66int apei_exec_write_register(struct apei_exec_context *ctx,
67 struct acpi_whea_header *entry);
68int apei_exec_write_register_value(struct apei_exec_context *ctx,
69 struct acpi_whea_header *entry);
70int apei_exec_noop(struct apei_exec_context *ctx,
71 struct acpi_whea_header *entry);
72int apei_exec_pre_map_gars(struct apei_exec_context *ctx);
73int apei_exec_post_unmap_gars(struct apei_exec_context *ctx);
74
75struct apei_resources {
76 struct list_head iomem;
77 struct list_head ioport;
78};
79
80static inline void apei_resources_init(struct apei_resources *resources)
81{
82 INIT_LIST_HEAD(&resources->iomem);
83 INIT_LIST_HEAD(&resources->ioport);
84}
85
86void apei_resources_fini(struct apei_resources *resources);
87int apei_resources_sub(struct apei_resources *resources1,
88 struct apei_resources *resources2);
89int apei_resources_request(struct apei_resources *resources,
90 const char *desc);
91void apei_resources_release(struct apei_resources *resources);
92int apei_exec_collect_resources(struct apei_exec_context *ctx,
93 struct apei_resources *resources);
94
95struct dentry;
96struct dentry *apei_get_debugfs_dir(void);
97
98#define apei_estatus_for_each_section(estatus, section) \
99 for (section = (struct acpi_hest_generic_data *)(estatus + 1); \
100 (void *)section - (void *)estatus < estatus->data_length; \
101 section = (void *)(section+1) + section->error_data_length)
102
103static inline u32 apei_estatus_len(struct acpi_hest_generic_status *estatus)
104{
105 if (estatus->raw_data_length)
106 return estatus->raw_data_offset + \
107 estatus->raw_data_length;
108 else
109 return sizeof(*estatus) + estatus->data_length;
110}
111
112int apei_estatus_check_header(const struct acpi_hest_generic_status *estatus);
113int apei_estatus_check(const struct acpi_hest_generic_status *estatus);
114#endif
diff --git a/drivers/acpi/apei/cper.c b/drivers/acpi/apei/cper.c
new file mode 100644
index 000000000000..f4cf2fc4c8c1
--- /dev/null
+++ b/drivers/acpi/apei/cper.c
@@ -0,0 +1,84 @@
1/*
2 * UEFI Common Platform Error Record (CPER) support
3 *
4 * Copyright (C) 2010, Intel Corp.
5 * Author: Huang Ying <ying.huang@intel.com>
6 *
7 * CPER is the format used to describe platform hardware error by
8 * various APEI tables, such as ERST, BERT and HEST etc.
9 *
10 * For more information about CPER, please refer to Appendix N of UEFI
11 * Specification version 2.3.
12 *
13 * This program is free software; you can redistribute it and/or
14 * modify it under the terms of the GNU General Public License version
15 * 2 as published by the Free Software Foundation.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, write to the Free Software
24 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
25 */
26
27#include <linux/kernel.h>
28#include <linux/module.h>
29#include <linux/time.h>
30#include <linux/cper.h>
31#include <linux/acpi.h>
32
33/*
34 * CPER record ID need to be unique even after reboot, because record
35 * ID is used as index for ERST storage, while CPER records from
36 * multiple boot may co-exist in ERST.
37 */
38u64 cper_next_record_id(void)
39{
40 static atomic64_t seq;
41
42 if (!atomic64_read(&seq))
43 atomic64_set(&seq, ((u64)get_seconds()) << 32);
44
45 return atomic64_inc_return(&seq);
46}
47EXPORT_SYMBOL_GPL(cper_next_record_id);
48
49int apei_estatus_check_header(const struct acpi_hest_generic_status *estatus)
50{
51 if (estatus->data_length &&
52 estatus->data_length < sizeof(struct acpi_hest_generic_data))
53 return -EINVAL;
54 if (estatus->raw_data_length &&
55 estatus->raw_data_offset < sizeof(*estatus) + estatus->data_length)
56 return -EINVAL;
57
58 return 0;
59}
60EXPORT_SYMBOL_GPL(apei_estatus_check_header);
61
62int apei_estatus_check(const struct acpi_hest_generic_status *estatus)
63{
64 struct acpi_hest_generic_data *gdata;
65 unsigned int data_len, gedata_len;
66 int rc;
67
68 rc = apei_estatus_check_header(estatus);
69 if (rc)
70 return rc;
71 data_len = estatus->data_length;
72 gdata = (struct acpi_hest_generic_data *)(estatus + 1);
73 while (data_len > sizeof(*gdata)) {
74 gedata_len = gdata->error_data_length;
75 if (gedata_len > data_len - sizeof(*gdata))
76 return -EINVAL;
77 data_len -= gedata_len + sizeof(*gdata);
78 }
79 if (data_len)
80 return -EINVAL;
81
82 return 0;
83}
84EXPORT_SYMBOL_GPL(apei_estatus_check);
diff --git a/drivers/acpi/apei/einj.c b/drivers/acpi/apei/einj.c
new file mode 100644
index 000000000000..465c885938ee
--- /dev/null
+++ b/drivers/acpi/apei/einj.c
@@ -0,0 +1,548 @@
1/*
2 * APEI Error INJection support
3 *
4 * EINJ provides a hardware error injection mechanism, this is useful
5 * for debugging and testing of other APEI and RAS features.
6 *
7 * For more information about EINJ, please refer to ACPI Specification
8 * version 4.0, section 17.5.
9 *
10 * Copyright 2009-2010 Intel Corp.
11 * Author: Huang Ying <ying.huang@intel.com>
12 *
13 * This program is free software; you can redistribute it and/or
14 * modify it under the terms of the GNU General Public License version
15 * 2 as published by the Free Software Foundation.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, write to the Free Software
24 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
25 */
26
27#include <linux/kernel.h>
28#include <linux/module.h>
29#include <linux/init.h>
30#include <linux/io.h>
31#include <linux/debugfs.h>
32#include <linux/seq_file.h>
33#include <linux/nmi.h>
34#include <linux/delay.h>
35#include <acpi/acpi.h>
36
37#include "apei-internal.h"
38
39#define EINJ_PFX "EINJ: "
40
41#define SPIN_UNIT 100 /* 100ns */
42/* Firmware should respond within 1 miliseconds */
43#define FIRMWARE_TIMEOUT (1 * NSEC_PER_MSEC)
44
45/*
46 * Some BIOSes allow parameters to the SET_ERROR_TYPE entries in the
47 * EINJ table through an unpublished extension. Use with caution as
48 * most will ignore the parameter and make their own choice of address
49 * for error injection.
50 */
51struct einj_parameter {
52 u64 type;
53 u64 reserved1;
54 u64 reserved2;
55 u64 param1;
56 u64 param2;
57};
58
59#define EINJ_OP_BUSY 0x1
60#define EINJ_STATUS_SUCCESS 0x0
61#define EINJ_STATUS_FAIL 0x1
62#define EINJ_STATUS_INVAL 0x2
63
64#define EINJ_TAB_ENTRY(tab) \
65 ((struct acpi_whea_header *)((char *)(tab) + \
66 sizeof(struct acpi_table_einj)))
67
68static struct acpi_table_einj *einj_tab;
69
70static struct apei_resources einj_resources;
71
72static struct apei_exec_ins_type einj_ins_type[] = {
73 [ACPI_EINJ_READ_REGISTER] = {
74 .flags = APEI_EXEC_INS_ACCESS_REGISTER,
75 .run = apei_exec_read_register,
76 },
77 [ACPI_EINJ_READ_REGISTER_VALUE] = {
78 .flags = APEI_EXEC_INS_ACCESS_REGISTER,
79 .run = apei_exec_read_register_value,
80 },
81 [ACPI_EINJ_WRITE_REGISTER] = {
82 .flags = APEI_EXEC_INS_ACCESS_REGISTER,
83 .run = apei_exec_write_register,
84 },
85 [ACPI_EINJ_WRITE_REGISTER_VALUE] = {
86 .flags = APEI_EXEC_INS_ACCESS_REGISTER,
87 .run = apei_exec_write_register_value,
88 },
89 [ACPI_EINJ_NOOP] = {
90 .flags = 0,
91 .run = apei_exec_noop,
92 },
93};
94
95/*
96 * Prevent EINJ interpreter to run simultaneously, because the
97 * corresponding firmware implementation may not work properly when
98 * invoked simultaneously.
99 */
100static DEFINE_MUTEX(einj_mutex);
101
102static struct einj_parameter *einj_param;
103
104static void einj_exec_ctx_init(struct apei_exec_context *ctx)
105{
106 apei_exec_ctx_init(ctx, einj_ins_type, ARRAY_SIZE(einj_ins_type),
107 EINJ_TAB_ENTRY(einj_tab), einj_tab->entries);
108}
109
110static int __einj_get_available_error_type(u32 *type)
111{
112 struct apei_exec_context ctx;
113 int rc;
114
115 einj_exec_ctx_init(&ctx);
116 rc = apei_exec_run(&ctx, ACPI_EINJ_GET_ERROR_TYPE);
117 if (rc)
118 return rc;
119 *type = apei_exec_ctx_get_output(&ctx);
120
121 return 0;
122}
123
124/* Get error injection capabilities of the platform */
125static int einj_get_available_error_type(u32 *type)
126{
127 int rc;
128
129 mutex_lock(&einj_mutex);
130 rc = __einj_get_available_error_type(type);
131 mutex_unlock(&einj_mutex);
132
133 return rc;
134}
135
136static int einj_timedout(u64 *t)
137{
138 if ((s64)*t < SPIN_UNIT) {
139 pr_warning(FW_WARN EINJ_PFX
140 "Firmware does not respond in time\n");
141 return 1;
142 }
143 *t -= SPIN_UNIT;
144 ndelay(SPIN_UNIT);
145 touch_nmi_watchdog();
146 return 0;
147}
148
149static u64 einj_get_parameter_address(void)
150{
151 int i;
152 u64 paddr = 0;
153 struct acpi_whea_header *entry;
154
155 entry = EINJ_TAB_ENTRY(einj_tab);
156 for (i = 0; i < einj_tab->entries; i++) {
157 if (entry->action == ACPI_EINJ_SET_ERROR_TYPE &&
158 entry->instruction == ACPI_EINJ_WRITE_REGISTER &&
159 entry->register_region.space_id ==
160 ACPI_ADR_SPACE_SYSTEM_MEMORY)
161 memcpy(&paddr, &entry->register_region.address,
162 sizeof(paddr));
163 entry++;
164 }
165
166 return paddr;
167}
168
169/* do sanity check to trigger table */
170static int einj_check_trigger_header(struct acpi_einj_trigger *trigger_tab)
171{
172 if (trigger_tab->header_size != sizeof(struct acpi_einj_trigger))
173 return -EINVAL;
174 if (trigger_tab->table_size > PAGE_SIZE ||
175 trigger_tab->table_size <= trigger_tab->header_size)
176 return -EINVAL;
177 if (trigger_tab->entry_count !=
178 (trigger_tab->table_size - trigger_tab->header_size) /
179 sizeof(struct acpi_einj_entry))
180 return -EINVAL;
181
182 return 0;
183}
184
185/* Execute instructions in trigger error action table */
186static int __einj_error_trigger(u64 trigger_paddr)
187{
188 struct acpi_einj_trigger *trigger_tab = NULL;
189 struct apei_exec_context trigger_ctx;
190 struct apei_resources trigger_resources;
191 struct acpi_whea_header *trigger_entry;
192 struct resource *r;
193 u32 table_size;
194 int rc = -EIO;
195
196 r = request_mem_region(trigger_paddr, sizeof(*trigger_tab),
197 "APEI EINJ Trigger Table");
198 if (!r) {
199 pr_err(EINJ_PFX
200 "Can not request iomem region <%016llx-%016llx> for Trigger table.\n",
201 (unsigned long long)trigger_paddr,
202 (unsigned long long)trigger_paddr+sizeof(*trigger_tab));
203 goto out;
204 }
205 trigger_tab = ioremap_cache(trigger_paddr, sizeof(*trigger_tab));
206 if (!trigger_tab) {
207 pr_err(EINJ_PFX "Failed to map trigger table!\n");
208 goto out_rel_header;
209 }
210 rc = einj_check_trigger_header(trigger_tab);
211 if (rc) {
212 pr_warning(FW_BUG EINJ_PFX
213 "The trigger error action table is invalid\n");
214 goto out_rel_header;
215 }
216 rc = -EIO;
217 table_size = trigger_tab->table_size;
218 r = request_mem_region(trigger_paddr + sizeof(*trigger_tab),
219 table_size - sizeof(*trigger_tab),
220 "APEI EINJ Trigger Table");
221 if (!r) {
222 pr_err(EINJ_PFX
223"Can not request iomem region <%016llx-%016llx> for Trigger Table Entry.\n",
224 (unsigned long long)trigger_paddr+sizeof(*trigger_tab),
225 (unsigned long long)trigger_paddr + table_size);
226 goto out_rel_header;
227 }
228 iounmap(trigger_tab);
229 trigger_tab = ioremap_cache(trigger_paddr, table_size);
230 if (!trigger_tab) {
231 pr_err(EINJ_PFX "Failed to map trigger table!\n");
232 goto out_rel_entry;
233 }
234 trigger_entry = (struct acpi_whea_header *)
235 ((char *)trigger_tab + sizeof(struct acpi_einj_trigger));
236 apei_resources_init(&trigger_resources);
237 apei_exec_ctx_init(&trigger_ctx, einj_ins_type,
238 ARRAY_SIZE(einj_ins_type),
239 trigger_entry, trigger_tab->entry_count);
240 rc = apei_exec_collect_resources(&trigger_ctx, &trigger_resources);
241 if (rc)
242 goto out_fini;
243 rc = apei_resources_sub(&trigger_resources, &einj_resources);
244 if (rc)
245 goto out_fini;
246 rc = apei_resources_request(&trigger_resources, "APEI EINJ Trigger");
247 if (rc)
248 goto out_fini;
249 rc = apei_exec_pre_map_gars(&trigger_ctx);
250 if (rc)
251 goto out_release;
252
253 rc = apei_exec_run(&trigger_ctx, ACPI_EINJ_TRIGGER_ERROR);
254
255 apei_exec_post_unmap_gars(&trigger_ctx);
256out_release:
257 apei_resources_release(&trigger_resources);
258out_fini:
259 apei_resources_fini(&trigger_resources);
260out_rel_entry:
261 release_mem_region(trigger_paddr + sizeof(*trigger_tab),
262 table_size - sizeof(*trigger_tab));
263out_rel_header:
264 release_mem_region(trigger_paddr, sizeof(*trigger_tab));
265out:
266 if (trigger_tab)
267 iounmap(trigger_tab);
268
269 return rc;
270}
271
272static int __einj_error_inject(u32 type, u64 param1, u64 param2)
273{
274 struct apei_exec_context ctx;
275 u64 val, trigger_paddr, timeout = FIRMWARE_TIMEOUT;
276 int rc;
277
278 einj_exec_ctx_init(&ctx);
279
280 rc = apei_exec_run(&ctx, ACPI_EINJ_BEGIN_OPERATION);
281 if (rc)
282 return rc;
283 apei_exec_ctx_set_input(&ctx, type);
284 rc = apei_exec_run(&ctx, ACPI_EINJ_SET_ERROR_TYPE);
285 if (rc)
286 return rc;
287 if (einj_param) {
288 writeq(param1, &einj_param->param1);
289 writeq(param2, &einj_param->param2);
290 }
291 rc = apei_exec_run(&ctx, ACPI_EINJ_EXECUTE_OPERATION);
292 if (rc)
293 return rc;
294 for (;;) {
295 rc = apei_exec_run(&ctx, ACPI_EINJ_CHECK_BUSY_STATUS);
296 if (rc)
297 return rc;
298 val = apei_exec_ctx_get_output(&ctx);
299 if (!(val & EINJ_OP_BUSY))
300 break;
301 if (einj_timedout(&timeout))
302 return -EIO;
303 }
304 rc = apei_exec_run(&ctx, ACPI_EINJ_GET_COMMAND_STATUS);
305 if (rc)
306 return rc;
307 val = apei_exec_ctx_get_output(&ctx);
308 if (val != EINJ_STATUS_SUCCESS)
309 return -EBUSY;
310
311 rc = apei_exec_run(&ctx, ACPI_EINJ_GET_TRIGGER_TABLE);
312 if (rc)
313 return rc;
314 trigger_paddr = apei_exec_ctx_get_output(&ctx);
315 rc = __einj_error_trigger(trigger_paddr);
316 if (rc)
317 return rc;
318 rc = apei_exec_run(&ctx, ACPI_EINJ_END_OPERATION);
319
320 return rc;
321}
322
323/* Inject the specified hardware error */
324static int einj_error_inject(u32 type, u64 param1, u64 param2)
325{
326 int rc;
327
328 mutex_lock(&einj_mutex);
329 rc = __einj_error_inject(type, param1, param2);
330 mutex_unlock(&einj_mutex);
331
332 return rc;
333}
334
335static u32 error_type;
336static u64 error_param1;
337static u64 error_param2;
338static struct dentry *einj_debug_dir;
339
340static int available_error_type_show(struct seq_file *m, void *v)
341{
342 int rc;
343 u32 available_error_type = 0;
344
345 rc = einj_get_available_error_type(&available_error_type);
346 if (rc)
347 return rc;
348 if (available_error_type & 0x0001)
349 seq_printf(m, "0x00000001\tProcessor Correctable\n");
350 if (available_error_type & 0x0002)
351 seq_printf(m, "0x00000002\tProcessor Uncorrectable non-fatal\n");
352 if (available_error_type & 0x0004)
353 seq_printf(m, "0x00000004\tProcessor Uncorrectable fatal\n");
354 if (available_error_type & 0x0008)
355 seq_printf(m, "0x00000008\tMemory Correctable\n");
356 if (available_error_type & 0x0010)
357 seq_printf(m, "0x00000010\tMemory Uncorrectable non-fatal\n");
358 if (available_error_type & 0x0020)
359 seq_printf(m, "0x00000020\tMemory Uncorrectable fatal\n");
360 if (available_error_type & 0x0040)
361 seq_printf(m, "0x00000040\tPCI Express Correctable\n");
362 if (available_error_type & 0x0080)
363 seq_printf(m, "0x00000080\tPCI Express Uncorrectable non-fatal\n");
364 if (available_error_type & 0x0100)
365 seq_printf(m, "0x00000100\tPCI Express Uncorrectable fatal\n");
366 if (available_error_type & 0x0200)
367 seq_printf(m, "0x00000200\tPlatform Correctable\n");
368 if (available_error_type & 0x0400)
369 seq_printf(m, "0x00000400\tPlatform Uncorrectable non-fatal\n");
370 if (available_error_type & 0x0800)
371 seq_printf(m, "0x00000800\tPlatform Uncorrectable fatal\n");
372
373 return 0;
374}
375
376static int available_error_type_open(struct inode *inode, struct file *file)
377{
378 return single_open(file, available_error_type_show, NULL);
379}
380
381static const struct file_operations available_error_type_fops = {
382 .open = available_error_type_open,
383 .read = seq_read,
384 .llseek = seq_lseek,
385 .release = single_release,
386};
387
388static int error_type_get(void *data, u64 *val)
389{
390 *val = error_type;
391
392 return 0;
393}
394
395static int error_type_set(void *data, u64 val)
396{
397 int rc;
398 u32 available_error_type = 0;
399
400 /* Only one error type can be specified */
401 if (val & (val - 1))
402 return -EINVAL;
403 rc = einj_get_available_error_type(&available_error_type);
404 if (rc)
405 return rc;
406 if (!(val & available_error_type))
407 return -EINVAL;
408 error_type = val;
409
410 return 0;
411}
412
413DEFINE_SIMPLE_ATTRIBUTE(error_type_fops, error_type_get,
414 error_type_set, "0x%llx\n");
415
416static int error_inject_set(void *data, u64 val)
417{
418 if (!error_type)
419 return -EINVAL;
420
421 return einj_error_inject(error_type, error_param1, error_param2);
422}
423
424DEFINE_SIMPLE_ATTRIBUTE(error_inject_fops, NULL,
425 error_inject_set, "%llu\n");
426
427static int einj_check_table(struct acpi_table_einj *einj_tab)
428{
429 if (einj_tab->header_length != sizeof(struct acpi_table_einj))
430 return -EINVAL;
431 if (einj_tab->header.length < sizeof(struct acpi_table_einj))
432 return -EINVAL;
433 if (einj_tab->entries !=
434 (einj_tab->header.length - sizeof(struct acpi_table_einj)) /
435 sizeof(struct acpi_einj_entry))
436 return -EINVAL;
437
438 return 0;
439}
440
441static int __init einj_init(void)
442{
443 int rc;
444 u64 param_paddr;
445 acpi_status status;
446 struct dentry *fentry;
447 struct apei_exec_context ctx;
448
449 if (acpi_disabled)
450 return -ENODEV;
451
452 status = acpi_get_table(ACPI_SIG_EINJ, 0,
453 (struct acpi_table_header **)&einj_tab);
454 if (status == AE_NOT_FOUND) {
455 pr_info(EINJ_PFX "Table is not found!\n");
456 return -ENODEV;
457 } else if (ACPI_FAILURE(status)) {
458 const char *msg = acpi_format_exception(status);
459 pr_err(EINJ_PFX "Failed to get table, %s\n", msg);
460 return -EINVAL;
461 }
462
463 rc = einj_check_table(einj_tab);
464 if (rc) {
465 pr_warning(FW_BUG EINJ_PFX "EINJ table is invalid\n");
466 return -EINVAL;
467 }
468
469 rc = -ENOMEM;
470 einj_debug_dir = debugfs_create_dir("einj", apei_get_debugfs_dir());
471 if (!einj_debug_dir)
472 goto err_cleanup;
473 fentry = debugfs_create_file("available_error_type", S_IRUSR,
474 einj_debug_dir, NULL,
475 &available_error_type_fops);
476 if (!fentry)
477 goto err_cleanup;
478 fentry = debugfs_create_file("error_type", S_IRUSR | S_IWUSR,
479 einj_debug_dir, NULL, &error_type_fops);
480 if (!fentry)
481 goto err_cleanup;
482 fentry = debugfs_create_x64("param1", S_IRUSR | S_IWUSR,
483 einj_debug_dir, &error_param1);
484 if (!fentry)
485 goto err_cleanup;
486 fentry = debugfs_create_x64("param2", S_IRUSR | S_IWUSR,
487 einj_debug_dir, &error_param2);
488 if (!fentry)
489 goto err_cleanup;
490 fentry = debugfs_create_file("error_inject", S_IWUSR,
491 einj_debug_dir, NULL, &error_inject_fops);
492 if (!fentry)
493 goto err_cleanup;
494
495 apei_resources_init(&einj_resources);
496 einj_exec_ctx_init(&ctx);
497 rc = apei_exec_collect_resources(&ctx, &einj_resources);
498 if (rc)
499 goto err_fini;
500 rc = apei_resources_request(&einj_resources, "APEI EINJ");
501 if (rc)
502 goto err_fini;
503 rc = apei_exec_pre_map_gars(&ctx);
504 if (rc)
505 goto err_release;
506 param_paddr = einj_get_parameter_address();
507 if (param_paddr) {
508 einj_param = ioremap(param_paddr, sizeof(*einj_param));
509 rc = -ENOMEM;
510 if (!einj_param)
511 goto err_unmap;
512 }
513
514 pr_info(EINJ_PFX "Error INJection is initialized.\n");
515
516 return 0;
517
518err_unmap:
519 apei_exec_post_unmap_gars(&ctx);
520err_release:
521 apei_resources_release(&einj_resources);
522err_fini:
523 apei_resources_fini(&einj_resources);
524err_cleanup:
525 debugfs_remove_recursive(einj_debug_dir);
526
527 return rc;
528}
529
530static void __exit einj_exit(void)
531{
532 struct apei_exec_context ctx;
533
534 if (einj_param)
535 iounmap(einj_param);
536 einj_exec_ctx_init(&ctx);
537 apei_exec_post_unmap_gars(&ctx);
538 apei_resources_release(&einj_resources);
539 apei_resources_fini(&einj_resources);
540 debugfs_remove_recursive(einj_debug_dir);
541}
542
543module_init(einj_init);
544module_exit(einj_exit);
545
546MODULE_AUTHOR("Huang Ying");
547MODULE_DESCRIPTION("APEI Error INJection support");
548MODULE_LICENSE("GPL");
diff --git a/drivers/acpi/apei/erst.c b/drivers/acpi/apei/erst.c
new file mode 100644
index 000000000000..2ebc39115507
--- /dev/null
+++ b/drivers/acpi/apei/erst.c
@@ -0,0 +1,855 @@
1/*
2 * APEI Error Record Serialization Table support
3 *
4 * ERST is a way provided by APEI to save and retrieve hardware error
5 * infomation to and from a persistent store.
6 *
7 * For more information about ERST, please refer to ACPI Specification
8 * version 4.0, section 17.4.
9 *
10 * Copyright 2010 Intel Corp.
11 * Author: Huang Ying <ying.huang@intel.com>
12 *
13 * This program is free software; you can redistribute it and/or
14 * modify it under the terms of the GNU General Public License version
15 * 2 as published by the Free Software Foundation.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, write to the Free Software
24 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
25 */
26
27#include <linux/kernel.h>
28#include <linux/module.h>
29#include <linux/init.h>
30#include <linux/delay.h>
31#include <linux/io.h>
32#include <linux/acpi.h>
33#include <linux/uaccess.h>
34#include <linux/cper.h>
35#include <linux/nmi.h>
36#include <acpi/apei.h>
37
38#include "apei-internal.h"
39
40#define ERST_PFX "ERST: "
41
42/* ERST command status */
43#define ERST_STATUS_SUCCESS 0x0
44#define ERST_STATUS_NOT_ENOUGH_SPACE 0x1
45#define ERST_STATUS_HARDWARE_NOT_AVAILABLE 0x2
46#define ERST_STATUS_FAILED 0x3
47#define ERST_STATUS_RECORD_STORE_EMPTY 0x4
48#define ERST_STATUS_RECORD_NOT_FOUND 0x5
49
50#define ERST_TAB_ENTRY(tab) \
51 ((struct acpi_whea_header *)((char *)(tab) + \
52 sizeof(struct acpi_table_erst)))
53
54#define SPIN_UNIT 100 /* 100ns */
55/* Firmware should respond within 1 miliseconds */
56#define FIRMWARE_TIMEOUT (1 * NSEC_PER_MSEC)
57#define FIRMWARE_MAX_STALL 50 /* 50us */
58
59int erst_disable;
60EXPORT_SYMBOL_GPL(erst_disable);
61
62static struct acpi_table_erst *erst_tab;
63
64/* ERST Error Log Address Range atrributes */
65#define ERST_RANGE_RESERVED 0x0001
66#define ERST_RANGE_NVRAM 0x0002
67#define ERST_RANGE_SLOW 0x0004
68
69/*
70 * ERST Error Log Address Range, used as buffer for reading/writing
71 * error records.
72 */
73static struct erst_erange {
74 u64 base;
75 u64 size;
76 void __iomem *vaddr;
77 u32 attr;
78} erst_erange;
79
80/*
81 * Prevent ERST interpreter to run simultaneously, because the
82 * corresponding firmware implementation may not work properly when
83 * invoked simultaneously.
84 *
85 * It is used to provide exclusive accessing for ERST Error Log
86 * Address Range too.
87 */
88static DEFINE_SPINLOCK(erst_lock);
89
90static inline int erst_errno(int command_status)
91{
92 switch (command_status) {
93 case ERST_STATUS_SUCCESS:
94 return 0;
95 case ERST_STATUS_HARDWARE_NOT_AVAILABLE:
96 return -ENODEV;
97 case ERST_STATUS_NOT_ENOUGH_SPACE:
98 return -ENOSPC;
99 case ERST_STATUS_RECORD_STORE_EMPTY:
100 case ERST_STATUS_RECORD_NOT_FOUND:
101 return -ENOENT;
102 default:
103 return -EINVAL;
104 }
105}
106
107static int erst_timedout(u64 *t, u64 spin_unit)
108{
109 if ((s64)*t < spin_unit) {
110 pr_warning(FW_WARN ERST_PFX
111 "Firmware does not respond in time\n");
112 return 1;
113 }
114 *t -= spin_unit;
115 ndelay(spin_unit);
116 touch_nmi_watchdog();
117 return 0;
118}
119
120static int erst_exec_load_var1(struct apei_exec_context *ctx,
121 struct acpi_whea_header *entry)
122{
123 return __apei_exec_read_register(entry, &ctx->var1);
124}
125
126static int erst_exec_load_var2(struct apei_exec_context *ctx,
127 struct acpi_whea_header *entry)
128{
129 return __apei_exec_read_register(entry, &ctx->var2);
130}
131
132static int erst_exec_store_var1(struct apei_exec_context *ctx,
133 struct acpi_whea_header *entry)
134{
135 return __apei_exec_write_register(entry, ctx->var1);
136}
137
138static int erst_exec_add(struct apei_exec_context *ctx,
139 struct acpi_whea_header *entry)
140{
141 ctx->var1 += ctx->var2;
142 return 0;
143}
144
145static int erst_exec_subtract(struct apei_exec_context *ctx,
146 struct acpi_whea_header *entry)
147{
148 ctx->var1 -= ctx->var2;
149 return 0;
150}
151
152static int erst_exec_add_value(struct apei_exec_context *ctx,
153 struct acpi_whea_header *entry)
154{
155 int rc;
156 u64 val;
157
158 rc = __apei_exec_read_register(entry, &val);
159 if (rc)
160 return rc;
161 val += ctx->value;
162 rc = __apei_exec_write_register(entry, val);
163 return rc;
164}
165
166static int erst_exec_subtract_value(struct apei_exec_context *ctx,
167 struct acpi_whea_header *entry)
168{
169 int rc;
170 u64 val;
171
172 rc = __apei_exec_read_register(entry, &val);
173 if (rc)
174 return rc;
175 val -= ctx->value;
176 rc = __apei_exec_write_register(entry, val);
177 return rc;
178}
179
180static int erst_exec_stall(struct apei_exec_context *ctx,
181 struct acpi_whea_header *entry)
182{
183 u64 stall_time;
184
185 if (ctx->value > FIRMWARE_MAX_STALL) {
186 if (!in_nmi())
187 pr_warning(FW_WARN ERST_PFX
188 "Too long stall time for stall instruction: %llx.\n",
189 ctx->value);
190 stall_time = FIRMWARE_MAX_STALL;
191 } else
192 stall_time = ctx->value;
193 udelay(stall_time);
194 return 0;
195}
196
197static int erst_exec_stall_while_true(struct apei_exec_context *ctx,
198 struct acpi_whea_header *entry)
199{
200 int rc;
201 u64 val;
202 u64 timeout = FIRMWARE_TIMEOUT;
203 u64 stall_time;
204
205 if (ctx->var1 > FIRMWARE_MAX_STALL) {
206 if (!in_nmi())
207 pr_warning(FW_WARN ERST_PFX
208 "Too long stall time for stall while true instruction: %llx.\n",
209 ctx->var1);
210 stall_time = FIRMWARE_MAX_STALL;
211 } else
212 stall_time = ctx->var1;
213
214 for (;;) {
215 rc = __apei_exec_read_register(entry, &val);
216 if (rc)
217 return rc;
218 if (val != ctx->value)
219 break;
220 if (erst_timedout(&timeout, stall_time * NSEC_PER_USEC))
221 return -EIO;
222 }
223 return 0;
224}
225
226static int erst_exec_skip_next_instruction_if_true(
227 struct apei_exec_context *ctx,
228 struct acpi_whea_header *entry)
229{
230 int rc;
231 u64 val;
232
233 rc = __apei_exec_read_register(entry, &val);
234 if (rc)
235 return rc;
236 if (val == ctx->value) {
237 ctx->ip += 2;
238 return APEI_EXEC_SET_IP;
239 }
240
241 return 0;
242}
243
244static int erst_exec_goto(struct apei_exec_context *ctx,
245 struct acpi_whea_header *entry)
246{
247 ctx->ip = ctx->value;
248 return APEI_EXEC_SET_IP;
249}
250
251static int erst_exec_set_src_address_base(struct apei_exec_context *ctx,
252 struct acpi_whea_header *entry)
253{
254 return __apei_exec_read_register(entry, &ctx->src_base);
255}
256
257static int erst_exec_set_dst_address_base(struct apei_exec_context *ctx,
258 struct acpi_whea_header *entry)
259{
260 return __apei_exec_read_register(entry, &ctx->dst_base);
261}
262
263static int erst_exec_move_data(struct apei_exec_context *ctx,
264 struct acpi_whea_header *entry)
265{
266 int rc;
267 u64 offset;
268
269 rc = __apei_exec_read_register(entry, &offset);
270 if (rc)
271 return rc;
272 memmove((void *)ctx->dst_base + offset,
273 (void *)ctx->src_base + offset,
274 ctx->var2);
275
276 return 0;
277}
278
279static struct apei_exec_ins_type erst_ins_type[] = {
280 [ACPI_ERST_READ_REGISTER] = {
281 .flags = APEI_EXEC_INS_ACCESS_REGISTER,
282 .run = apei_exec_read_register,
283 },
284 [ACPI_ERST_READ_REGISTER_VALUE] = {
285 .flags = APEI_EXEC_INS_ACCESS_REGISTER,
286 .run = apei_exec_read_register_value,
287 },
288 [ACPI_ERST_WRITE_REGISTER] = {
289 .flags = APEI_EXEC_INS_ACCESS_REGISTER,
290 .run = apei_exec_write_register,
291 },
292 [ACPI_ERST_WRITE_REGISTER_VALUE] = {
293 .flags = APEI_EXEC_INS_ACCESS_REGISTER,
294 .run = apei_exec_write_register_value,
295 },
296 [ACPI_ERST_NOOP] = {
297 .flags = 0,
298 .run = apei_exec_noop,
299 },
300 [ACPI_ERST_LOAD_VAR1] = {
301 .flags = APEI_EXEC_INS_ACCESS_REGISTER,
302 .run = erst_exec_load_var1,
303 },
304 [ACPI_ERST_LOAD_VAR2] = {
305 .flags = APEI_EXEC_INS_ACCESS_REGISTER,
306 .run = erst_exec_load_var2,
307 },
308 [ACPI_ERST_STORE_VAR1] = {
309 .flags = APEI_EXEC_INS_ACCESS_REGISTER,
310 .run = erst_exec_store_var1,
311 },
312 [ACPI_ERST_ADD] = {
313 .flags = 0,
314 .run = erst_exec_add,
315 },
316 [ACPI_ERST_SUBTRACT] = {
317 .flags = 0,
318 .run = erst_exec_subtract,
319 },
320 [ACPI_ERST_ADD_VALUE] = {
321 .flags = APEI_EXEC_INS_ACCESS_REGISTER,
322 .run = erst_exec_add_value,
323 },
324 [ACPI_ERST_SUBTRACT_VALUE] = {
325 .flags = APEI_EXEC_INS_ACCESS_REGISTER,
326 .run = erst_exec_subtract_value,
327 },
328 [ACPI_ERST_STALL] = {
329 .flags = 0,
330 .run = erst_exec_stall,
331 },
332 [ACPI_ERST_STALL_WHILE_TRUE] = {
333 .flags = APEI_EXEC_INS_ACCESS_REGISTER,
334 .run = erst_exec_stall_while_true,
335 },
336 [ACPI_ERST_SKIP_NEXT_IF_TRUE] = {
337 .flags = APEI_EXEC_INS_ACCESS_REGISTER,
338 .run = erst_exec_skip_next_instruction_if_true,
339 },
340 [ACPI_ERST_GOTO] = {
341 .flags = 0,
342 .run = erst_exec_goto,
343 },
344 [ACPI_ERST_SET_SRC_ADDRESS_BASE] = {
345 .flags = APEI_EXEC_INS_ACCESS_REGISTER,
346 .run = erst_exec_set_src_address_base,
347 },
348 [ACPI_ERST_SET_DST_ADDRESS_BASE] = {
349 .flags = APEI_EXEC_INS_ACCESS_REGISTER,
350 .run = erst_exec_set_dst_address_base,
351 },
352 [ACPI_ERST_MOVE_DATA] = {
353 .flags = APEI_EXEC_INS_ACCESS_REGISTER,
354 .run = erst_exec_move_data,
355 },
356};
357
358static inline void erst_exec_ctx_init(struct apei_exec_context *ctx)
359{
360 apei_exec_ctx_init(ctx, erst_ins_type, ARRAY_SIZE(erst_ins_type),
361 ERST_TAB_ENTRY(erst_tab), erst_tab->entries);
362}
363
364static int erst_get_erange(struct erst_erange *range)
365{
366 struct apei_exec_context ctx;
367 int rc;
368
369 erst_exec_ctx_init(&ctx);
370 rc = apei_exec_run(&ctx, ACPI_ERST_GET_ERROR_RANGE);
371 if (rc)
372 return rc;
373 range->base = apei_exec_ctx_get_output(&ctx);
374 rc = apei_exec_run(&ctx, ACPI_ERST_GET_ERROR_LENGTH);
375 if (rc)
376 return rc;
377 range->size = apei_exec_ctx_get_output(&ctx);
378 rc = apei_exec_run(&ctx, ACPI_ERST_GET_ERROR_ATTRIBUTES);
379 if (rc)
380 return rc;
381 range->attr = apei_exec_ctx_get_output(&ctx);
382
383 return 0;
384}
385
386static ssize_t __erst_get_record_count(void)
387{
388 struct apei_exec_context ctx;
389 int rc;
390
391 erst_exec_ctx_init(&ctx);
392 rc = apei_exec_run(&ctx, ACPI_ERST_GET_RECORD_COUNT);
393 if (rc)
394 return rc;
395 return apei_exec_ctx_get_output(&ctx);
396}
397
398ssize_t erst_get_record_count(void)
399{
400 ssize_t count;
401 unsigned long flags;
402
403 if (erst_disable)
404 return -ENODEV;
405
406 spin_lock_irqsave(&erst_lock, flags);
407 count = __erst_get_record_count();
408 spin_unlock_irqrestore(&erst_lock, flags);
409
410 return count;
411}
412EXPORT_SYMBOL_GPL(erst_get_record_count);
413
414static int __erst_get_next_record_id(u64 *record_id)
415{
416 struct apei_exec_context ctx;
417 int rc;
418
419 erst_exec_ctx_init(&ctx);
420 rc = apei_exec_run(&ctx, ACPI_ERST_GET_RECORD_ID);
421 if (rc)
422 return rc;
423 *record_id = apei_exec_ctx_get_output(&ctx);
424
425 return 0;
426}
427
428/*
429 * Get the record ID of an existing error record on the persistent
430 * storage. If there is no error record on the persistent storage, the
431 * returned record_id is APEI_ERST_INVALID_RECORD_ID.
432 */
433int erst_get_next_record_id(u64 *record_id)
434{
435 int rc;
436 unsigned long flags;
437
438 if (erst_disable)
439 return -ENODEV;
440
441 spin_lock_irqsave(&erst_lock, flags);
442 rc = __erst_get_next_record_id(record_id);
443 spin_unlock_irqrestore(&erst_lock, flags);
444
445 return rc;
446}
447EXPORT_SYMBOL_GPL(erst_get_next_record_id);
448
449static int __erst_write_to_storage(u64 offset)
450{
451 struct apei_exec_context ctx;
452 u64 timeout = FIRMWARE_TIMEOUT;
453 u64 val;
454 int rc;
455
456 erst_exec_ctx_init(&ctx);
457 rc = apei_exec_run(&ctx, ACPI_ERST_BEGIN_WRITE);
458 if (rc)
459 return rc;
460 apei_exec_ctx_set_input(&ctx, offset);
461 rc = apei_exec_run(&ctx, ACPI_ERST_SET_RECORD_OFFSET);
462 if (rc)
463 return rc;
464 rc = apei_exec_run(&ctx, ACPI_ERST_EXECUTE_OPERATION);
465 if (rc)
466 return rc;
467 for (;;) {
468 rc = apei_exec_run(&ctx, ACPI_ERST_CHECK_BUSY_STATUS);
469 if (rc)
470 return rc;
471 val = apei_exec_ctx_get_output(&ctx);
472 if (!val)
473 break;
474 if (erst_timedout(&timeout, SPIN_UNIT))
475 return -EIO;
476 }
477 rc = apei_exec_run(&ctx, ACPI_ERST_GET_COMMAND_STATUS);
478 if (rc)
479 return rc;
480 val = apei_exec_ctx_get_output(&ctx);
481 rc = apei_exec_run(&ctx, ACPI_ERST_END);
482 if (rc)
483 return rc;
484
485 return erst_errno(val);
486}
487
488static int __erst_read_from_storage(u64 record_id, u64 offset)
489{
490 struct apei_exec_context ctx;
491 u64 timeout = FIRMWARE_TIMEOUT;
492 u64 val;
493 int rc;
494
495 erst_exec_ctx_init(&ctx);
496 rc = apei_exec_run(&ctx, ACPI_ERST_BEGIN_READ);
497 if (rc)
498 return rc;
499 apei_exec_ctx_set_input(&ctx, offset);
500 rc = apei_exec_run(&ctx, ACPI_ERST_SET_RECORD_OFFSET);
501 if (rc)
502 return rc;
503 apei_exec_ctx_set_input(&ctx, record_id);
504 rc = apei_exec_run(&ctx, ACPI_ERST_SET_RECORD_ID);
505 if (rc)
506 return rc;
507 rc = apei_exec_run(&ctx, ACPI_ERST_EXECUTE_OPERATION);
508 if (rc)
509 return rc;
510 for (;;) {
511 rc = apei_exec_run(&ctx, ACPI_ERST_CHECK_BUSY_STATUS);
512 if (rc)
513 return rc;
514 val = apei_exec_ctx_get_output(&ctx);
515 if (!val)
516 break;
517 if (erst_timedout(&timeout, SPIN_UNIT))
518 return -EIO;
519 };
520 rc = apei_exec_run(&ctx, ACPI_ERST_GET_COMMAND_STATUS);
521 if (rc)
522 return rc;
523 val = apei_exec_ctx_get_output(&ctx);
524 rc = apei_exec_run(&ctx, ACPI_ERST_END);
525 if (rc)
526 return rc;
527
528 return erst_errno(val);
529}
530
531static int __erst_clear_from_storage(u64 record_id)
532{
533 struct apei_exec_context ctx;
534 u64 timeout = FIRMWARE_TIMEOUT;
535 u64 val;
536 int rc;
537
538 erst_exec_ctx_init(&ctx);
539 rc = apei_exec_run(&ctx, ACPI_ERST_BEGIN_CLEAR);
540 if (rc)
541 return rc;
542 apei_exec_ctx_set_input(&ctx, record_id);
543 rc = apei_exec_run(&ctx, ACPI_ERST_SET_RECORD_ID);
544 if (rc)
545 return rc;
546 rc = apei_exec_run(&ctx, ACPI_ERST_EXECUTE_OPERATION);
547 if (rc)
548 return rc;
549 for (;;) {
550 rc = apei_exec_run(&ctx, ACPI_ERST_CHECK_BUSY_STATUS);
551 if (rc)
552 return rc;
553 val = apei_exec_ctx_get_output(&ctx);
554 if (!val)
555 break;
556 if (erst_timedout(&timeout, SPIN_UNIT))
557 return -EIO;
558 }
559 rc = apei_exec_run(&ctx, ACPI_ERST_GET_COMMAND_STATUS);
560 if (rc)
561 return rc;
562 val = apei_exec_ctx_get_output(&ctx);
563 rc = apei_exec_run(&ctx, ACPI_ERST_END);
564 if (rc)
565 return rc;
566
567 return erst_errno(val);
568}
569
570/* NVRAM ERST Error Log Address Range is not supported yet */
571static void pr_unimpl_nvram(void)
572{
573 if (printk_ratelimit())
574 pr_warning(ERST_PFX
575 "NVRAM ERST Log Address Range is not implemented yet\n");
576}
577
578static int __erst_write_to_nvram(const struct cper_record_header *record)
579{
580 /* do not print message, because printk is not safe for NMI */
581 return -ENOSYS;
582}
583
584static int __erst_read_to_erange_from_nvram(u64 record_id, u64 *offset)
585{
586 pr_unimpl_nvram();
587 return -ENOSYS;
588}
589
590static int __erst_clear_from_nvram(u64 record_id)
591{
592 pr_unimpl_nvram();
593 return -ENOSYS;
594}
595
596int erst_write(const struct cper_record_header *record)
597{
598 int rc;
599 unsigned long flags;
600 struct cper_record_header *rcd_erange;
601
602 if (erst_disable)
603 return -ENODEV;
604
605 if (memcmp(record->signature, CPER_SIG_RECORD, CPER_SIG_SIZE))
606 return -EINVAL;
607
608 if (erst_erange.attr & ERST_RANGE_NVRAM) {
609 if (!spin_trylock_irqsave(&erst_lock, flags))
610 return -EBUSY;
611 rc = __erst_write_to_nvram(record);
612 spin_unlock_irqrestore(&erst_lock, flags);
613 return rc;
614 }
615
616 if (record->record_length > erst_erange.size)
617 return -EINVAL;
618
619 if (!spin_trylock_irqsave(&erst_lock, flags))
620 return -EBUSY;
621 memcpy(erst_erange.vaddr, record, record->record_length);
622 rcd_erange = erst_erange.vaddr;
623 /* signature for serialization system */
624 memcpy(&rcd_erange->persistence_information, "ER", 2);
625
626 rc = __erst_write_to_storage(0);
627 spin_unlock_irqrestore(&erst_lock, flags);
628
629 return rc;
630}
631EXPORT_SYMBOL_GPL(erst_write);
632
633static int __erst_read_to_erange(u64 record_id, u64 *offset)
634{
635 int rc;
636
637 if (erst_erange.attr & ERST_RANGE_NVRAM)
638 return __erst_read_to_erange_from_nvram(
639 record_id, offset);
640
641 rc = __erst_read_from_storage(record_id, 0);
642 if (rc)
643 return rc;
644 *offset = 0;
645
646 return 0;
647}
648
649static ssize_t __erst_read(u64 record_id, struct cper_record_header *record,
650 size_t buflen)
651{
652 int rc;
653 u64 offset, len = 0;
654 struct cper_record_header *rcd_tmp;
655
656 rc = __erst_read_to_erange(record_id, &offset);
657 if (rc)
658 return rc;
659 rcd_tmp = erst_erange.vaddr + offset;
660 len = rcd_tmp->record_length;
661 if (len <= buflen)
662 memcpy(record, rcd_tmp, len);
663
664 return len;
665}
666
667/*
668 * If return value > buflen, the buffer size is not big enough,
669 * else if return value < 0, something goes wrong,
670 * else everything is OK, and return value is record length
671 */
672ssize_t erst_read(u64 record_id, struct cper_record_header *record,
673 size_t buflen)
674{
675 ssize_t len;
676 unsigned long flags;
677
678 if (erst_disable)
679 return -ENODEV;
680
681 spin_lock_irqsave(&erst_lock, flags);
682 len = __erst_read(record_id, record, buflen);
683 spin_unlock_irqrestore(&erst_lock, flags);
684 return len;
685}
686EXPORT_SYMBOL_GPL(erst_read);
687
688/*
689 * If return value > buflen, the buffer size is not big enough,
690 * else if return value = 0, there is no more record to read,
691 * else if return value < 0, something goes wrong,
692 * else everything is OK, and return value is record length
693 */
694ssize_t erst_read_next(struct cper_record_header *record, size_t buflen)
695{
696 int rc;
697 ssize_t len;
698 unsigned long flags;
699 u64 record_id;
700
701 if (erst_disable)
702 return -ENODEV;
703
704 spin_lock_irqsave(&erst_lock, flags);
705 rc = __erst_get_next_record_id(&record_id);
706 if (rc) {
707 spin_unlock_irqrestore(&erst_lock, flags);
708 return rc;
709 }
710 /* no more record */
711 if (record_id == APEI_ERST_INVALID_RECORD_ID) {
712 spin_unlock_irqrestore(&erst_lock, flags);
713 return 0;
714 }
715
716 len = __erst_read(record_id, record, buflen);
717 spin_unlock_irqrestore(&erst_lock, flags);
718
719 return len;
720}
721EXPORT_SYMBOL_GPL(erst_read_next);
722
723int erst_clear(u64 record_id)
724{
725 int rc;
726 unsigned long flags;
727
728 if (erst_disable)
729 return -ENODEV;
730
731 spin_lock_irqsave(&erst_lock, flags);
732 if (erst_erange.attr & ERST_RANGE_NVRAM)
733 rc = __erst_clear_from_nvram(record_id);
734 else
735 rc = __erst_clear_from_storage(record_id);
736 spin_unlock_irqrestore(&erst_lock, flags);
737
738 return rc;
739}
740EXPORT_SYMBOL_GPL(erst_clear);
741
742static int __init setup_erst_disable(char *str)
743{
744 erst_disable = 1;
745 return 0;
746}
747
748__setup("erst_disable", setup_erst_disable);
749
750static int erst_check_table(struct acpi_table_erst *erst_tab)
751{
752 if (erst_tab->header_length != sizeof(struct acpi_table_erst))
753 return -EINVAL;
754 if (erst_tab->header.length < sizeof(struct acpi_table_erst))
755 return -EINVAL;
756 if (erst_tab->entries !=
757 (erst_tab->header.length - sizeof(struct acpi_table_erst)) /
758 sizeof(struct acpi_erst_entry))
759 return -EINVAL;
760
761 return 0;
762}
763
764static int __init erst_init(void)
765{
766 int rc = 0;
767 acpi_status status;
768 struct apei_exec_context ctx;
769 struct apei_resources erst_resources;
770 struct resource *r;
771
772 if (acpi_disabled)
773 goto err;
774
775 if (erst_disable) {
776 pr_info(ERST_PFX
777 "Error Record Serialization Table (ERST) support is disabled.\n");
778 goto err;
779 }
780
781 status = acpi_get_table(ACPI_SIG_ERST, 0,
782 (struct acpi_table_header **)&erst_tab);
783 if (status == AE_NOT_FOUND) {
784 pr_err(ERST_PFX "Table is not found!\n");
785 goto err;
786 } else if (ACPI_FAILURE(status)) {
787 const char *msg = acpi_format_exception(status);
788 pr_err(ERST_PFX "Failed to get table, %s\n", msg);
789 rc = -EINVAL;
790 goto err;
791 }
792
793 rc = erst_check_table(erst_tab);
794 if (rc) {
795 pr_err(FW_BUG ERST_PFX "ERST table is invalid\n");
796 goto err;
797 }
798
799 apei_resources_init(&erst_resources);
800 erst_exec_ctx_init(&ctx);
801 rc = apei_exec_collect_resources(&ctx, &erst_resources);
802 if (rc)
803 goto err_fini;
804 rc = apei_resources_request(&erst_resources, "APEI ERST");
805 if (rc)
806 goto err_fini;
807 rc = apei_exec_pre_map_gars(&ctx);
808 if (rc)
809 goto err_release;
810 rc = erst_get_erange(&erst_erange);
811 if (rc) {
812 if (rc == -ENODEV)
813 pr_info(ERST_PFX
814 "The corresponding hardware device or firmware implementation "
815 "is not available.\n");
816 else
817 pr_err(ERST_PFX
818 "Failed to get Error Log Address Range.\n");
819 goto err_unmap_reg;
820 }
821
822 r = request_mem_region(erst_erange.base, erst_erange.size, "APEI ERST");
823 if (!r) {
824 pr_err(ERST_PFX
825 "Can not request iomem region <0x%16llx-0x%16llx> for ERST.\n",
826 (unsigned long long)erst_erange.base,
827 (unsigned long long)erst_erange.base + erst_erange.size);
828 rc = -EIO;
829 goto err_unmap_reg;
830 }
831 rc = -ENOMEM;
832 erst_erange.vaddr = ioremap_cache(erst_erange.base,
833 erst_erange.size);
834 if (!erst_erange.vaddr)
835 goto err_release_erange;
836
837 pr_info(ERST_PFX
838 "Error Record Serialization Table (ERST) support is initialized.\n");
839
840 return 0;
841
842err_release_erange:
843 release_mem_region(erst_erange.base, erst_erange.size);
844err_unmap_reg:
845 apei_exec_post_unmap_gars(&ctx);
846err_release:
847 apei_resources_release(&erst_resources);
848err_fini:
849 apei_resources_fini(&erst_resources);
850err:
851 erst_disable = 1;
852 return rc;
853}
854
855device_initcall(erst_init);
diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c
new file mode 100644
index 000000000000..fd0cc016a099
--- /dev/null
+++ b/drivers/acpi/apei/ghes.c
@@ -0,0 +1,427 @@
1/*
2 * APEI Generic Hardware Error Source support
3 *
4 * Generic Hardware Error Source provides a way to report platform
5 * hardware errors (such as that from chipset). It works in so called
6 * "Firmware First" mode, that is, hardware errors are reported to
7 * firmware firstly, then reported to Linux by firmware. This way,
8 * some non-standard hardware error registers or non-standard hardware
9 * link can be checked by firmware to produce more hardware error
10 * information for Linux.
11 *
12 * For more information about Generic Hardware Error Source, please
13 * refer to ACPI Specification version 4.0, section 17.3.2.6
14 *
15 * Now, only SCI notification type and memory errors are
16 * supported. More notification type and hardware error type will be
17 * added later.
18 *
19 * Copyright 2010 Intel Corp.
20 * Author: Huang Ying <ying.huang@intel.com>
21 *
22 * This program is free software; you can redistribute it and/or
23 * modify it under the terms of the GNU General Public License version
24 * 2 as published by the Free Software Foundation;
25 *
26 * This program is distributed in the hope that it will be useful,
27 * but WITHOUT ANY WARRANTY; without even the implied warranty of
28 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
29 * GNU General Public License for more details.
30 *
31 * You should have received a copy of the GNU General Public License
32 * along with this program; if not, write to the Free Software
33 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
34 */
35
36#include <linux/kernel.h>
37#include <linux/module.h>
38#include <linux/init.h>
39#include <linux/acpi.h>
40#include <linux/io.h>
41#include <linux/interrupt.h>
42#include <linux/cper.h>
43#include <linux/kdebug.h>
44#include <acpi/apei.h>
45#include <acpi/atomicio.h>
46#include <acpi/hed.h>
47#include <asm/mce.h>
48
49#include "apei-internal.h"
50
51#define GHES_PFX "GHES: "
52
53#define GHES_ESTATUS_MAX_SIZE 65536
54
55/*
56 * One struct ghes is created for each generic hardware error
57 * source.
58 *
59 * It provides the context for APEI hardware error timer/IRQ/SCI/NMI
60 * handler. Handler for one generic hardware error source is only
61 * triggered after the previous one is done. So handler can uses
62 * struct ghes without locking.
63 *
64 * estatus: memory buffer for error status block, allocated during
65 * HEST parsing.
66 */
67#define GHES_TO_CLEAR 0x0001
68
69struct ghes {
70 struct acpi_hest_generic *generic;
71 struct acpi_hest_generic_status *estatus;
72 struct list_head list;
73 u64 buffer_paddr;
74 unsigned long flags;
75};
76
77/*
78 * Error source lists, one list for each notification method. The
79 * members in lists are struct ghes.
80 *
81 * The list members are only added in HEST parsing and deleted during
82 * module_exit, that is, single-threaded. So no lock is needed for
83 * that.
84 *
85 * But the mutual exclusion is needed between members adding/deleting
86 * and timer/IRQ/SCI/NMI handler, which may traverse the list. RCU is
87 * used for that.
88 */
89static LIST_HEAD(ghes_sci);
90
91static struct ghes *ghes_new(struct acpi_hest_generic *generic)
92{
93 struct ghes *ghes;
94 unsigned int error_block_length;
95 int rc;
96
97 ghes = kzalloc(sizeof(*ghes), GFP_KERNEL);
98 if (!ghes)
99 return ERR_PTR(-ENOMEM);
100 ghes->generic = generic;
101 INIT_LIST_HEAD(&ghes->list);
102 rc = acpi_pre_map_gar(&generic->error_status_address);
103 if (rc)
104 goto err_free;
105 error_block_length = generic->error_block_length;
106 if (error_block_length > GHES_ESTATUS_MAX_SIZE) {
107 pr_warning(FW_WARN GHES_PFX
108 "Error status block length is too long: %u for "
109 "generic hardware error source: %d.\n",
110 error_block_length, generic->header.source_id);
111 error_block_length = GHES_ESTATUS_MAX_SIZE;
112 }
113 ghes->estatus = kmalloc(error_block_length, GFP_KERNEL);
114 if (!ghes->estatus) {
115 rc = -ENOMEM;
116 goto err_unmap;
117 }
118
119 return ghes;
120
121err_unmap:
122 acpi_post_unmap_gar(&generic->error_status_address);
123err_free:
124 kfree(ghes);
125 return ERR_PTR(rc);
126}
127
128static void ghes_fini(struct ghes *ghes)
129{
130 kfree(ghes->estatus);
131 acpi_post_unmap_gar(&ghes->generic->error_status_address);
132}
133
134enum {
135 GHES_SER_NO = 0x0,
136 GHES_SER_CORRECTED = 0x1,
137 GHES_SER_RECOVERABLE = 0x2,
138 GHES_SER_PANIC = 0x3,
139};
140
141static inline int ghes_severity(int severity)
142{
143 switch (severity) {
144 case CPER_SER_INFORMATIONAL:
145 return GHES_SER_NO;
146 case CPER_SER_CORRECTED:
147 return GHES_SER_CORRECTED;
148 case CPER_SER_RECOVERABLE:
149 return GHES_SER_RECOVERABLE;
150 case CPER_SER_FATAL:
151 return GHES_SER_PANIC;
152 default:
153 /* Unkown, go panic */
154 return GHES_SER_PANIC;
155 }
156}
157
158/* SCI handler run in work queue, so ioremap can be used here */
159static int ghes_copy_tofrom_phys(void *buffer, u64 paddr, u32 len,
160 int from_phys)
161{
162 void *vaddr;
163
164 vaddr = ioremap_cache(paddr, len);
165 if (!vaddr)
166 return -ENOMEM;
167 if (from_phys)
168 memcpy(buffer, vaddr, len);
169 else
170 memcpy(vaddr, buffer, len);
171 iounmap(vaddr);
172
173 return 0;
174}
175
176static int ghes_read_estatus(struct ghes *ghes, int silent)
177{
178 struct acpi_hest_generic *g = ghes->generic;
179 u64 buf_paddr;
180 u32 len;
181 int rc;
182
183 rc = acpi_atomic_read(&buf_paddr, &g->error_status_address);
184 if (rc) {
185 if (!silent && printk_ratelimit())
186 pr_warning(FW_WARN GHES_PFX
187"Failed to read error status block address for hardware error source: %d.\n",
188 g->header.source_id);
189 return -EIO;
190 }
191 if (!buf_paddr)
192 return -ENOENT;
193
194 rc = ghes_copy_tofrom_phys(ghes->estatus, buf_paddr,
195 sizeof(*ghes->estatus), 1);
196 if (rc)
197 return rc;
198 if (!ghes->estatus->block_status)
199 return -ENOENT;
200
201 ghes->buffer_paddr = buf_paddr;
202 ghes->flags |= GHES_TO_CLEAR;
203
204 rc = -EIO;
205 len = apei_estatus_len(ghes->estatus);
206 if (len < sizeof(*ghes->estatus))
207 goto err_read_block;
208 if (len > ghes->generic->error_block_length)
209 goto err_read_block;
210 if (apei_estatus_check_header(ghes->estatus))
211 goto err_read_block;
212 rc = ghes_copy_tofrom_phys(ghes->estatus + 1,
213 buf_paddr + sizeof(*ghes->estatus),
214 len - sizeof(*ghes->estatus), 1);
215 if (rc)
216 return rc;
217 if (apei_estatus_check(ghes->estatus))
218 goto err_read_block;
219 rc = 0;
220
221err_read_block:
222 if (rc && !silent)
223 pr_warning(FW_WARN GHES_PFX
224 "Failed to read error status block!\n");
225 return rc;
226}
227
228static void ghes_clear_estatus(struct ghes *ghes)
229{
230 ghes->estatus->block_status = 0;
231 if (!(ghes->flags & GHES_TO_CLEAR))
232 return;
233 ghes_copy_tofrom_phys(ghes->estatus, ghes->buffer_paddr,
234 sizeof(ghes->estatus->block_status), 0);
235 ghes->flags &= ~GHES_TO_CLEAR;
236}
237
238static void ghes_do_proc(struct ghes *ghes)
239{
240 int ser, processed = 0;
241 struct acpi_hest_generic_data *gdata;
242
243 ser = ghes_severity(ghes->estatus->error_severity);
244 apei_estatus_for_each_section(ghes->estatus, gdata) {
245#ifdef CONFIG_X86_MCE
246 if (!uuid_le_cmp(*(uuid_le *)gdata->section_type,
247 CPER_SEC_PLATFORM_MEM)) {
248 apei_mce_report_mem_error(
249 ser == GHES_SER_CORRECTED,
250 (struct cper_sec_mem_err *)(gdata+1));
251 processed = 1;
252 }
253#endif
254 }
255
256 if (!processed && printk_ratelimit())
257 pr_warning(GHES_PFX
258 "Unknown error record from generic hardware error source: %d\n",
259 ghes->generic->header.source_id);
260}
261
262static int ghes_proc(struct ghes *ghes)
263{
264 int rc;
265
266 rc = ghes_read_estatus(ghes, 0);
267 if (rc)
268 goto out;
269 ghes_do_proc(ghes);
270
271out:
272 ghes_clear_estatus(ghes);
273 return 0;
274}
275
276static int ghes_notify_sci(struct notifier_block *this,
277 unsigned long event, void *data)
278{
279 struct ghes *ghes;
280 int ret = NOTIFY_DONE;
281
282 rcu_read_lock();
283 list_for_each_entry_rcu(ghes, &ghes_sci, list) {
284 if (!ghes_proc(ghes))
285 ret = NOTIFY_OK;
286 }
287 rcu_read_unlock();
288
289 return ret;
290}
291
292static struct notifier_block ghes_notifier_sci = {
293 .notifier_call = ghes_notify_sci,
294};
295
296static int hest_ghes_parse(struct acpi_hest_header *hest_hdr, void *data)
297{
298 struct acpi_hest_generic *generic;
299 struct ghes *ghes = NULL;
300 int rc = 0;
301
302 if (hest_hdr->type != ACPI_HEST_TYPE_GENERIC_ERROR)
303 return 0;
304
305 generic = (struct acpi_hest_generic *)hest_hdr;
306 if (!generic->enabled)
307 return 0;
308
309 if (generic->error_block_length <
310 sizeof(struct acpi_hest_generic_status)) {
311 pr_warning(FW_BUG GHES_PFX
312"Invalid error block length: %u for generic hardware error source: %d\n",
313 generic->error_block_length,
314 generic->header.source_id);
315 goto err;
316 }
317 if (generic->records_to_preallocate == 0) {
318 pr_warning(FW_BUG GHES_PFX
319"Invalid records to preallocate: %u for generic hardware error source: %d\n",
320 generic->records_to_preallocate,
321 generic->header.source_id);
322 goto err;
323 }
324 ghes = ghes_new(generic);
325 if (IS_ERR(ghes)) {
326 rc = PTR_ERR(ghes);
327 ghes = NULL;
328 goto err;
329 }
330 switch (generic->notify.type) {
331 case ACPI_HEST_NOTIFY_POLLED:
332 pr_warning(GHES_PFX
333"Generic hardware error source: %d notified via POLL is not supported!\n",
334 generic->header.source_id);
335 break;
336 case ACPI_HEST_NOTIFY_EXTERNAL:
337 case ACPI_HEST_NOTIFY_LOCAL:
338 pr_warning(GHES_PFX
339"Generic hardware error source: %d notified via IRQ is not supported!\n",
340 generic->header.source_id);
341 break;
342 case ACPI_HEST_NOTIFY_SCI:
343 if (list_empty(&ghes_sci))
344 register_acpi_hed_notifier(&ghes_notifier_sci);
345 list_add_rcu(&ghes->list, &ghes_sci);
346 break;
347 case ACPI_HEST_NOTIFY_NMI:
348 pr_warning(GHES_PFX
349"Generic hardware error source: %d notified via NMI is not supported!\n",
350 generic->header.source_id);
351 break;
352 default:
353 pr_warning(FW_WARN GHES_PFX
354 "Unknown notification type: %u for generic hardware error source: %d\n",
355 generic->notify.type, generic->header.source_id);
356 break;
357 }
358
359 return 0;
360err:
361 if (ghes)
362 ghes_fini(ghes);
363 return rc;
364}
365
366static void ghes_cleanup(void)
367{
368 struct ghes *ghes, *nghes;
369
370 if (!list_empty(&ghes_sci))
371 unregister_acpi_hed_notifier(&ghes_notifier_sci);
372
373 synchronize_rcu();
374
375 list_for_each_entry_safe(ghes, nghes, &ghes_sci, list) {
376 list_del(&ghes->list);
377 ghes_fini(ghes);
378 kfree(ghes);
379 }
380}
381
382static int __init ghes_init(void)
383{
384 int rc;
385
386 if (acpi_disabled)
387 return -ENODEV;
388
389 if (hest_disable) {
390 pr_info(GHES_PFX "HEST is not enabled!\n");
391 return -EINVAL;
392 }
393
394 rc = apei_hest_parse(hest_ghes_parse, NULL);
395 if (rc) {
396 pr_err(GHES_PFX
397 "Error during parsing HEST generic hardware error sources.\n");
398 goto err_cleanup;
399 }
400
401 if (list_empty(&ghes_sci)) {
402 pr_info(GHES_PFX
403 "No functional generic hardware error sources.\n");
404 rc = -ENODEV;
405 goto err_cleanup;
406 }
407
408 pr_info(GHES_PFX
409 "Generic Hardware Error Source support is initialized.\n");
410
411 return 0;
412err_cleanup:
413 ghes_cleanup();
414 return rc;
415}
416
417static void __exit ghes_exit(void)
418{
419 ghes_cleanup();
420}
421
422module_init(ghes_init);
423module_exit(ghes_exit);
424
425MODULE_AUTHOR("Huang Ying");
426MODULE_DESCRIPTION("APEI Generic Hardware Error Source support");
427MODULE_LICENSE("GPL");
diff --git a/drivers/acpi/apei/hest.c b/drivers/acpi/apei/hest.c
new file mode 100644
index 000000000000..e7f40d362cb3
--- /dev/null
+++ b/drivers/acpi/apei/hest.c
@@ -0,0 +1,173 @@
1/*
2 * APEI Hardware Error Souce Table support
3 *
4 * HEST describes error sources in detail; communicates operational
5 * parameters (i.e. severity levels, masking bits, and threshold
6 * values) to Linux as necessary. It also allows the BIOS to report
7 * non-standard error sources to Linux (for example, chipset-specific
8 * error registers).
9 *
10 * For more information about HEST, please refer to ACPI Specification
11 * version 4.0, section 17.3.2.
12 *
13 * Copyright 2009 Intel Corp.
14 * Author: Huang Ying <ying.huang@intel.com>
15 *
16 * This program is free software; you can redistribute it and/or
17 * modify it under the terms of the GNU General Public License version
18 * 2 as published by the Free Software Foundation;
19 *
20 * This program is distributed in the hope that it will be useful,
21 * but WITHOUT ANY WARRANTY; without even the implied warranty of
22 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
23 * GNU General Public License for more details.
24 *
25 * You should have received a copy of the GNU General Public License
26 * along with this program; if not, write to the Free Software
27 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
28 */
29
30#include <linux/kernel.h>
31#include <linux/module.h>
32#include <linux/init.h>
33#include <linux/acpi.h>
34#include <linux/kdebug.h>
35#include <linux/highmem.h>
36#include <linux/io.h>
37#include <acpi/apei.h>
38
39#include "apei-internal.h"
40
41#define HEST_PFX "HEST: "
42
43int hest_disable;
44EXPORT_SYMBOL_GPL(hest_disable);
45
46/* HEST table parsing */
47
48static struct acpi_table_hest *hest_tab;
49
50static int hest_void_parse(struct acpi_hest_header *hest_hdr, void *data)
51{
52 return 0;
53}
54
55static int hest_esrc_len_tab[ACPI_HEST_TYPE_RESERVED] = {
56 [ACPI_HEST_TYPE_IA32_CHECK] = -1, /* need further calculation */
57 [ACPI_HEST_TYPE_IA32_CORRECTED_CHECK] = -1,
58 [ACPI_HEST_TYPE_IA32_NMI] = sizeof(struct acpi_hest_ia_nmi),
59 [ACPI_HEST_TYPE_AER_ROOT_PORT] = sizeof(struct acpi_hest_aer_root),
60 [ACPI_HEST_TYPE_AER_ENDPOINT] = sizeof(struct acpi_hest_aer),
61 [ACPI_HEST_TYPE_AER_BRIDGE] = sizeof(struct acpi_hest_aer_bridge),
62 [ACPI_HEST_TYPE_GENERIC_ERROR] = sizeof(struct acpi_hest_generic),
63};
64
65static int hest_esrc_len(struct acpi_hest_header *hest_hdr)
66{
67 u16 hest_type = hest_hdr->type;
68 int len;
69
70 if (hest_type >= ACPI_HEST_TYPE_RESERVED)
71 return 0;
72
73 len = hest_esrc_len_tab[hest_type];
74
75 if (hest_type == ACPI_HEST_TYPE_IA32_CORRECTED_CHECK) {
76 struct acpi_hest_ia_corrected *cmc;
77 cmc = (struct acpi_hest_ia_corrected *)hest_hdr;
78 len = sizeof(*cmc) + cmc->num_hardware_banks *
79 sizeof(struct acpi_hest_ia_error_bank);
80 } else if (hest_type == ACPI_HEST_TYPE_IA32_CHECK) {
81 struct acpi_hest_ia_machine_check *mc;
82 mc = (struct acpi_hest_ia_machine_check *)hest_hdr;
83 len = sizeof(*mc) + mc->num_hardware_banks *
84 sizeof(struct acpi_hest_ia_error_bank);
85 }
86 BUG_ON(len == -1);
87
88 return len;
89};
90
91int apei_hest_parse(apei_hest_func_t func, void *data)
92{
93 struct acpi_hest_header *hest_hdr;
94 int i, rc, len;
95
96 if (hest_disable)
97 return -EINVAL;
98
99 hest_hdr = (struct acpi_hest_header *)(hest_tab + 1);
100 for (i = 0; i < hest_tab->error_source_count; i++) {
101 len = hest_esrc_len(hest_hdr);
102 if (!len) {
103 pr_warning(FW_WARN HEST_PFX
104 "Unknown or unused hardware error source "
105 "type: %d for hardware error source: %d.\n",
106 hest_hdr->type, hest_hdr->source_id);
107 return -EINVAL;
108 }
109 if ((void *)hest_hdr + len >
110 (void *)hest_tab + hest_tab->header.length) {
111 pr_warning(FW_BUG HEST_PFX
112 "Table contents overflow for hardware error source: %d.\n",
113 hest_hdr->source_id);
114 return -EINVAL;
115 }
116
117 rc = func(hest_hdr, data);
118 if (rc)
119 return rc;
120
121 hest_hdr = (void *)hest_hdr + len;
122 }
123
124 return 0;
125}
126EXPORT_SYMBOL_GPL(apei_hest_parse);
127
128static int __init setup_hest_disable(char *str)
129{
130 hest_disable = 1;
131 return 0;
132}
133
134__setup("hest_disable", setup_hest_disable);
135
136static int __init hest_init(void)
137{
138 acpi_status status;
139 int rc = -ENODEV;
140
141 if (acpi_disabled)
142 goto err;
143
144 if (hest_disable) {
145 pr_info(HEST_PFX "HEST tabling parsing is disabled.\n");
146 goto err;
147 }
148
149 status = acpi_get_table(ACPI_SIG_HEST, 0,
150 (struct acpi_table_header **)&hest_tab);
151 if (status == AE_NOT_FOUND) {
152 pr_info(HEST_PFX "Table is not found!\n");
153 goto err;
154 } else if (ACPI_FAILURE(status)) {
155 const char *msg = acpi_format_exception(status);
156 pr_err(HEST_PFX "Failed to get table, %s\n", msg);
157 rc = -EINVAL;
158 goto err;
159 }
160
161 rc = apei_hest_parse(hest_void_parse, NULL);
162 if (rc)
163 goto err;
164
165 pr_info(HEST_PFX "HEST table parsing is initialized.\n");
166
167 return 0;
168err:
169 hest_disable = 1;
170 return rc;
171}
172
173subsys_initcall(hest_init);
diff --git a/drivers/acpi/atomicio.c b/drivers/acpi/atomicio.c
new file mode 100644
index 000000000000..814b19249616
--- /dev/null
+++ b/drivers/acpi/atomicio.c
@@ -0,0 +1,360 @@
1/*
2 * atomicio.c - ACPI IO memory pre-mapping/post-unmapping, then
3 * accessing in atomic context.
4 *
5 * This is used for NMI handler to access IO memory area, because
6 * ioremap/iounmap can not be used in NMI handler. The IO memory area
7 * is pre-mapped in process context and accessed in NMI handler.
8 *
9 * Copyright (C) 2009-2010, Intel Corp.
10 * Author: Huang Ying <ying.huang@intel.com>
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License version
14 * 2 as published by the Free Software Foundation.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 */
25
26#include <linux/kernel.h>
27#include <linux/module.h>
28#include <linux/init.h>
29#include <linux/acpi.h>
30#include <linux/io.h>
31#include <linux/kref.h>
32#include <linux/rculist.h>
33#include <linux/interrupt.h>
34#include <acpi/atomicio.h>
35
36#define ACPI_PFX "ACPI: "
37
38static LIST_HEAD(acpi_iomaps);
39/*
40 * Used for mutual exclusion between writers of acpi_iomaps list, for
41 * synchronization between readers and writer, RCU is used.
42 */
43static DEFINE_SPINLOCK(acpi_iomaps_lock);
44
45struct acpi_iomap {
46 struct list_head list;
47 void __iomem *vaddr;
48 unsigned long size;
49 phys_addr_t paddr;
50 struct kref ref;
51};
52
53/* acpi_iomaps_lock or RCU read lock must be held before calling */
54static struct acpi_iomap *__acpi_find_iomap(phys_addr_t paddr,
55 unsigned long size)
56{
57 struct acpi_iomap *map;
58
59 list_for_each_entry_rcu(map, &acpi_iomaps, list) {
60 if (map->paddr + map->size >= paddr + size &&
61 map->paddr <= paddr)
62 return map;
63 }
64 return NULL;
65}
66
67/*
68 * Atomic "ioremap" used by NMI handler, if the specified IO memory
69 * area is not pre-mapped, NULL will be returned.
70 *
71 * acpi_iomaps_lock or RCU read lock must be held before calling
72 */
73static void __iomem *__acpi_ioremap_fast(phys_addr_t paddr,
74 unsigned long size)
75{
76 struct acpi_iomap *map;
77
78 map = __acpi_find_iomap(paddr, size);
79 if (map)
80 return map->vaddr + (paddr - map->paddr);
81 else
82 return NULL;
83}
84
85/* acpi_iomaps_lock must be held before calling */
86static void __iomem *__acpi_try_ioremap(phys_addr_t paddr,
87 unsigned long size)
88{
89 struct acpi_iomap *map;
90
91 map = __acpi_find_iomap(paddr, size);
92 if (map) {
93 kref_get(&map->ref);
94 return map->vaddr + (paddr - map->paddr);
95 } else
96 return NULL;
97}
98
99/*
100 * Used to pre-map the specified IO memory area. First try to find
101 * whether the area is already pre-mapped, if it is, increase the
102 * reference count (in __acpi_try_ioremap) and return; otherwise, do
103 * the real ioremap, and add the mapping into acpi_iomaps list.
104 */
105static void __iomem *acpi_pre_map(phys_addr_t paddr,
106 unsigned long size)
107{
108 void __iomem *vaddr;
109 struct acpi_iomap *map;
110 unsigned long pg_sz, flags;
111 phys_addr_t pg_off;
112
113 spin_lock_irqsave(&acpi_iomaps_lock, flags);
114 vaddr = __acpi_try_ioremap(paddr, size);
115 spin_unlock_irqrestore(&acpi_iomaps_lock, flags);
116 if (vaddr)
117 return vaddr;
118
119 pg_off = paddr & PAGE_MASK;
120 pg_sz = ((paddr + size + PAGE_SIZE - 1) & PAGE_MASK) - pg_off;
121 vaddr = ioremap(pg_off, pg_sz);
122 if (!vaddr)
123 return NULL;
124 map = kmalloc(sizeof(*map), GFP_KERNEL);
125 if (!map)
126 goto err_unmap;
127 INIT_LIST_HEAD(&map->list);
128 map->paddr = pg_off;
129 map->size = pg_sz;
130 map->vaddr = vaddr;
131 kref_init(&map->ref);
132
133 spin_lock_irqsave(&acpi_iomaps_lock, flags);
134 vaddr = __acpi_try_ioremap(paddr, size);
135 if (vaddr) {
136 spin_unlock_irqrestore(&acpi_iomaps_lock, flags);
137 iounmap(map->vaddr);
138 kfree(map);
139 return vaddr;
140 }
141 list_add_tail_rcu(&map->list, &acpi_iomaps);
142 spin_unlock_irqrestore(&acpi_iomaps_lock, flags);
143
144 return vaddr + (paddr - pg_off);
145err_unmap:
146 iounmap(vaddr);
147 return NULL;
148}
149
150/* acpi_iomaps_lock must be held before calling */
151static void __acpi_kref_del_iomap(struct kref *ref)
152{
153 struct acpi_iomap *map;
154
155 map = container_of(ref, struct acpi_iomap, ref);
156 list_del_rcu(&map->list);
157}
158
159/*
160 * Used to post-unmap the specified IO memory area. The iounmap is
161 * done only if the reference count goes zero.
162 */
163static void acpi_post_unmap(phys_addr_t paddr, unsigned long size)
164{
165 struct acpi_iomap *map;
166 unsigned long flags;
167 int del;
168
169 spin_lock_irqsave(&acpi_iomaps_lock, flags);
170 map = __acpi_find_iomap(paddr, size);
171 BUG_ON(!map);
172 del = kref_put(&map->ref, __acpi_kref_del_iomap);
173 spin_unlock_irqrestore(&acpi_iomaps_lock, flags);
174
175 if (!del)
176 return;
177
178 synchronize_rcu();
179 iounmap(map->vaddr);
180 kfree(map);
181}
182
183/* In NMI handler, should set silent = 1 */
184static int acpi_check_gar(struct acpi_generic_address *reg,
185 u64 *paddr, int silent)
186{
187 u32 width, space_id;
188
189 width = reg->bit_width;
190 space_id = reg->space_id;
191 /* Handle possible alignment issues */
192 memcpy(paddr, &reg->address, sizeof(*paddr));
193 if (!*paddr) {
194 if (!silent)
195 pr_warning(FW_BUG ACPI_PFX
196 "Invalid physical address in GAR [0x%llx/%u/%u]\n",
197 *paddr, width, space_id);
198 return -EINVAL;
199 }
200
201 if ((width != 8) && (width != 16) && (width != 32) && (width != 64)) {
202 if (!silent)
203 pr_warning(FW_BUG ACPI_PFX
204 "Invalid bit width in GAR [0x%llx/%u/%u]\n",
205 *paddr, width, space_id);
206 return -EINVAL;
207 }
208
209 if (space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY &&
210 space_id != ACPI_ADR_SPACE_SYSTEM_IO) {
211 if (!silent)
212 pr_warning(FW_BUG ACPI_PFX
213 "Invalid address space type in GAR [0x%llx/%u/%u]\n",
214 *paddr, width, space_id);
215 return -EINVAL;
216 }
217
218 return 0;
219}
220
221/* Pre-map, working on GAR */
222int acpi_pre_map_gar(struct acpi_generic_address *reg)
223{
224 u64 paddr;
225 void __iomem *vaddr;
226 int rc;
227
228 if (reg->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY)
229 return 0;
230
231 rc = acpi_check_gar(reg, &paddr, 0);
232 if (rc)
233 return rc;
234
235 vaddr = acpi_pre_map(paddr, reg->bit_width / 8);
236 if (!vaddr)
237 return -EIO;
238
239 return 0;
240}
241EXPORT_SYMBOL_GPL(acpi_pre_map_gar);
242
243/* Post-unmap, working on GAR */
244int acpi_post_unmap_gar(struct acpi_generic_address *reg)
245{
246 u64 paddr;
247 int rc;
248
249 if (reg->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY)
250 return 0;
251
252 rc = acpi_check_gar(reg, &paddr, 0);
253 if (rc)
254 return rc;
255
256 acpi_post_unmap(paddr, reg->bit_width / 8);
257
258 return 0;
259}
260EXPORT_SYMBOL_GPL(acpi_post_unmap_gar);
261
262/*
263 * Can be used in atomic (including NMI) or process context. RCU read
264 * lock can only be released after the IO memory area accessing.
265 */
266static int acpi_atomic_read_mem(u64 paddr, u64 *val, u32 width)
267{
268 void __iomem *addr;
269
270 rcu_read_lock();
271 addr = __acpi_ioremap_fast(paddr, width);
272 switch (width) {
273 case 8:
274 *val = readb(addr);
275 break;
276 case 16:
277 *val = readw(addr);
278 break;
279 case 32:
280 *val = readl(addr);
281 break;
282 case 64:
283 *val = readq(addr);
284 break;
285 default:
286 return -EINVAL;
287 }
288 rcu_read_unlock();
289
290 return 0;
291}
292
293static int acpi_atomic_write_mem(u64 paddr, u64 val, u32 width)
294{
295 void __iomem *addr;
296
297 rcu_read_lock();
298 addr = __acpi_ioremap_fast(paddr, width);
299 switch (width) {
300 case 8:
301 writeb(val, addr);
302 break;
303 case 16:
304 writew(val, addr);
305 break;
306 case 32:
307 writel(val, addr);
308 break;
309 case 64:
310 writeq(val, addr);
311 break;
312 default:
313 return -EINVAL;
314 }
315 rcu_read_unlock();
316
317 return 0;
318}
319
320/* GAR accessing in atomic (including NMI) or process context */
321int acpi_atomic_read(u64 *val, struct acpi_generic_address *reg)
322{
323 u64 paddr;
324 int rc;
325
326 rc = acpi_check_gar(reg, &paddr, 1);
327 if (rc)
328 return rc;
329
330 *val = 0;
331 switch (reg->space_id) {
332 case ACPI_ADR_SPACE_SYSTEM_MEMORY:
333 return acpi_atomic_read_mem(paddr, val, reg->bit_width);
334 case ACPI_ADR_SPACE_SYSTEM_IO:
335 return acpi_os_read_port(paddr, (u32 *)val, reg->bit_width);
336 default:
337 return -EINVAL;
338 }
339}
340EXPORT_SYMBOL_GPL(acpi_atomic_read);
341
342int acpi_atomic_write(u64 val, struct acpi_generic_address *reg)
343{
344 u64 paddr;
345 int rc;
346
347 rc = acpi_check_gar(reg, &paddr, 1);
348 if (rc)
349 return rc;
350
351 switch (reg->space_id) {
352 case ACPI_ADR_SPACE_SYSTEM_MEMORY:
353 return acpi_atomic_write_mem(paddr, val, reg->bit_width);
354 case ACPI_ADR_SPACE_SYSTEM_IO:
355 return acpi_os_write_port(paddr, val, reg->bit_width);
356 default:
357 return -EINVAL;
358 }
359}
360EXPORT_SYMBOL_GPL(acpi_atomic_write);
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index f2234db85da0..e61d4f8e62a5 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -1027,10 +1027,9 @@ int __init acpi_ec_ecdt_probe(void)
1027 /* Don't trust ECDT, which comes from ASUSTek */ 1027 /* Don't trust ECDT, which comes from ASUSTek */
1028 if (!EC_FLAGS_VALIDATE_ECDT) 1028 if (!EC_FLAGS_VALIDATE_ECDT)
1029 goto install; 1029 goto install;
1030 saved_ec = kmalloc(sizeof(struct acpi_ec), GFP_KERNEL); 1030 saved_ec = kmemdup(boot_ec, sizeof(struct acpi_ec), GFP_KERNEL);
1031 if (!saved_ec) 1031 if (!saved_ec)
1032 return -ENOMEM; 1032 return -ENOMEM;
1033 memcpy(saved_ec, boot_ec, sizeof(struct acpi_ec));
1034 /* fall through */ 1033 /* fall through */
1035 } 1034 }
1036 1035
diff --git a/drivers/acpi/hed.c b/drivers/acpi/hed.c
new file mode 100644
index 000000000000..d0c1967f7597
--- /dev/null
+++ b/drivers/acpi/hed.c
@@ -0,0 +1,112 @@
1/*
2 * ACPI Hardware Error Device (PNP0C33) Driver
3 *
4 * Copyright (C) 2010, Intel Corp.
5 * Author: Huang Ying <ying.huang@intel.com>
6 *
7 * ACPI Hardware Error Device is used to report some hardware errors
8 * notified via SCI, mainly the corrected errors.
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License version
12 * 2 as published by the Free Software Foundation;
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 */
23
24#include <linux/kernel.h>
25#include <linux/module.h>
26#include <linux/init.h>
27#include <linux/acpi.h>
28#include <acpi/acpi_bus.h>
29#include <acpi/acpi_drivers.h>
30#include <acpi/hed.h>
31
32static struct acpi_device_id acpi_hed_ids[] = {
33 {"PNP0C33", 0},
34 {"", 0},
35};
36MODULE_DEVICE_TABLE(acpi, acpi_hed_ids);
37
38static acpi_handle hed_handle;
39
40static BLOCKING_NOTIFIER_HEAD(acpi_hed_notify_list);
41
42int register_acpi_hed_notifier(struct notifier_block *nb)
43{
44 return blocking_notifier_chain_register(&acpi_hed_notify_list, nb);
45}
46EXPORT_SYMBOL_GPL(register_acpi_hed_notifier);
47
48void unregister_acpi_hed_notifier(struct notifier_block *nb)
49{
50 blocking_notifier_chain_unregister(&acpi_hed_notify_list, nb);
51}
52EXPORT_SYMBOL_GPL(unregister_acpi_hed_notifier);
53
54/*
55 * SCI to report hardware error is forwarded to the listeners of HED,
56 * it is used by HEST Generic Hardware Error Source with notify type
57 * SCI.
58 */
59static void acpi_hed_notify(struct acpi_device *device, u32 event)
60{
61 blocking_notifier_call_chain(&acpi_hed_notify_list, 0, NULL);
62}
63
64static int __devinit acpi_hed_add(struct acpi_device *device)
65{
66 /* Only one hardware error device */
67 if (hed_handle)
68 return -EINVAL;
69 hed_handle = device->handle;
70 return 0;
71}
72
73static int __devexit acpi_hed_remove(struct acpi_device *device, int type)
74{
75 hed_handle = NULL;
76 return 0;
77}
78
79static struct acpi_driver acpi_hed_driver = {
80 .name = "hardware_error_device",
81 .class = "hardware_error",
82 .ids = acpi_hed_ids,
83 .ops = {
84 .add = acpi_hed_add,
85 .remove = acpi_hed_remove,
86 .notify = acpi_hed_notify,
87 },
88};
89
90static int __init acpi_hed_init(void)
91{
92 if (acpi_disabled)
93 return -ENODEV;
94
95 if (acpi_bus_register_driver(&acpi_hed_driver) < 0)
96 return -ENODEV;
97
98 return 0;
99}
100
101static void __exit acpi_hed_exit(void)
102{
103 acpi_bus_unregister_driver(&acpi_hed_driver);
104}
105
106module_init(acpi_hed_init);
107module_exit(acpi_hed_exit);
108
109ACPI_MODULE_NAME("hed");
110MODULE_AUTHOR("Huang Ying");
111MODULE_DESCRIPTION("ACPI Hardware Error Device Driver");
112MODULE_LICENSE("GPL");
diff --git a/drivers/acpi/hest.c b/drivers/acpi/hest.c
deleted file mode 100644
index 1c527a192872..000000000000
--- a/drivers/acpi/hest.c
+++ /dev/null
@@ -1,139 +0,0 @@
1#include <linux/acpi.h>
2#include <linux/pci.h>
3
4#define PREFIX "ACPI: "
5
6static inline unsigned long parse_acpi_hest_ia_machine_check(struct acpi_hest_ia_machine_check *p)
7{
8 return sizeof(*p) +
9 (sizeof(struct acpi_hest_ia_error_bank) * p->num_hardware_banks);
10}
11
12static inline unsigned long parse_acpi_hest_ia_corrected(struct acpi_hest_ia_corrected *p)
13{
14 return sizeof(*p) +
15 (sizeof(struct acpi_hest_ia_error_bank) * p->num_hardware_banks);
16}
17
18static inline unsigned long parse_acpi_hest_ia_nmi(struct acpi_hest_ia_nmi *p)
19{
20 return sizeof(*p);
21}
22
23static inline unsigned long parse_acpi_hest_generic(struct acpi_hest_generic *p)
24{
25 return sizeof(*p);
26}
27
28static inline unsigned int hest_match_pci(struct acpi_hest_aer_common *p, struct pci_dev *pci)
29{
30 return (0 == pci_domain_nr(pci->bus) &&
31 p->bus == pci->bus->number &&
32 p->device == PCI_SLOT(pci->devfn) &&
33 p->function == PCI_FUNC(pci->devfn));
34}
35
36static unsigned long parse_acpi_hest_aer(void *hdr, int type, struct pci_dev *pci, int *firmware_first)
37{
38 struct acpi_hest_aer_common *p = hdr + sizeof(struct acpi_hest_header);
39 unsigned long rc=0;
40 u8 pcie_type = 0;
41 u8 bridge = 0;
42 switch (type) {
43 case ACPI_HEST_TYPE_AER_ROOT_PORT:
44 rc = sizeof(struct acpi_hest_aer_root);
45 pcie_type = PCI_EXP_TYPE_ROOT_PORT;
46 break;
47 case ACPI_HEST_TYPE_AER_ENDPOINT:
48 rc = sizeof(struct acpi_hest_aer);
49 pcie_type = PCI_EXP_TYPE_ENDPOINT;
50 break;
51 case ACPI_HEST_TYPE_AER_BRIDGE:
52 rc = sizeof(struct acpi_hest_aer_bridge);
53 if ((pci->class >> 16) == PCI_BASE_CLASS_BRIDGE)
54 bridge = 1;
55 break;
56 }
57
58 if (p->flags & ACPI_HEST_GLOBAL) {
59 if ((pci->is_pcie && (pci->pcie_type == pcie_type)) || bridge)
60 *firmware_first = !!(p->flags & ACPI_HEST_FIRMWARE_FIRST);
61 }
62 else
63 if (hest_match_pci(p, pci))
64 *firmware_first = !!(p->flags & ACPI_HEST_FIRMWARE_FIRST);
65 return rc;
66}
67
68static int acpi_hest_firmware_first(struct acpi_table_header *stdheader, struct pci_dev *pci)
69{
70 struct acpi_table_hest *hest = (struct acpi_table_hest *)stdheader;
71 void *p = (void *)hest + sizeof(*hest); /* defined by the ACPI 4.0 spec */
72 struct acpi_hest_header *hdr = p;
73
74 int i;
75 int firmware_first = 0;
76 static unsigned char printed_unused = 0;
77 static unsigned char printed_reserved = 0;
78
79 for (i=0, hdr=p; p < (((void *)hest) + hest->header.length) && i < hest->error_source_count; i++) {
80 switch (hdr->type) {
81 case ACPI_HEST_TYPE_IA32_CHECK:
82 p += parse_acpi_hest_ia_machine_check(p);
83 break;
84 case ACPI_HEST_TYPE_IA32_CORRECTED_CHECK:
85 p += parse_acpi_hest_ia_corrected(p);
86 break;
87 case ACPI_HEST_TYPE_IA32_NMI:
88 p += parse_acpi_hest_ia_nmi(p);
89 break;
90 /* These three should never appear */
91 case ACPI_HEST_TYPE_NOT_USED3:
92 case ACPI_HEST_TYPE_NOT_USED4:
93 case ACPI_HEST_TYPE_NOT_USED5:
94 if (!printed_unused) {
95 printk(KERN_DEBUG PREFIX
96 "HEST Error Source list contains an obsolete type (%d).\n", hdr->type);
97 printed_unused = 1;
98 }
99 break;
100 case ACPI_HEST_TYPE_AER_ROOT_PORT:
101 case ACPI_HEST_TYPE_AER_ENDPOINT:
102 case ACPI_HEST_TYPE_AER_BRIDGE:
103 p += parse_acpi_hest_aer(p, hdr->type, pci, &firmware_first);
104 break;
105 case ACPI_HEST_TYPE_GENERIC_ERROR:
106 p += parse_acpi_hest_generic(p);
107 break;
108 /* These should never appear either */
109 case ACPI_HEST_TYPE_RESERVED:
110 default:
111 if (!printed_reserved) {
112 printk(KERN_DEBUG PREFIX
113 "HEST Error Source list contains a reserved type (%d).\n", hdr->type);
114 printed_reserved = 1;
115 }
116 break;
117 }
118 }
119 return firmware_first;
120}
121
122int acpi_hest_firmware_first_pci(struct pci_dev *pci)
123{
124 acpi_status status = AE_NOT_FOUND;
125 struct acpi_table_header *hest = NULL;
126
127 if (acpi_disabled)
128 return 0;
129
130 status = acpi_get_table(ACPI_SIG_HEST, 1, &hest);
131
132 if (ACPI_SUCCESS(status)) {
133 if (acpi_hest_firmware_first(hest, pci)) {
134 return 1;
135 }
136 }
137 return 0;
138}
139EXPORT_SYMBOL_GPL(acpi_hest_firmware_first_pci);
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
index 4bc1c4178f50..78418ce4fc78 100644
--- a/drivers/acpi/osl.c
+++ b/drivers/acpi/osl.c
@@ -1207,6 +1207,15 @@ int acpi_check_mem_region(resource_size_t start, resource_size_t n,
1207EXPORT_SYMBOL(acpi_check_mem_region); 1207EXPORT_SYMBOL(acpi_check_mem_region);
1208 1208
1209/* 1209/*
1210 * Let drivers know whether the resource checks are effective
1211 */
1212int acpi_resources_are_enforced(void)
1213{
1214 return acpi_enforce_resources == ENFORCE_RESOURCES_STRICT;
1215}
1216EXPORT_SYMBOL(acpi_resources_are_enforced);
1217
1218/*
1210 * Acquire a spinlock. 1219 * Acquire a spinlock.
1211 * 1220 *
1212 * handle is a pointer to the spinlock_t. 1221 * handle is a pointer to the spinlock_t.
diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c
index aefce33f2a09..4eac59393edc 100644
--- a/drivers/acpi/pci_root.c
+++ b/drivers/acpi/pci_root.c
@@ -120,7 +120,8 @@ acpi_handle acpi_get_pci_rootbridge_handle(unsigned int seg, unsigned int bus)
120 struct acpi_pci_root *root; 120 struct acpi_pci_root *root;
121 121
122 list_for_each_entry(root, &acpi_pci_roots, node) 122 list_for_each_entry(root, &acpi_pci_roots, node)
123 if ((root->segment == (u16) seg) && (root->bus_nr == (u16) bus)) 123 if ((root->segment == (u16) seg) &&
124 (root->secondary.start == (u16) bus))
124 return root->device->handle; 125 return root->device->handle;
125 return NULL; 126 return NULL;
126} 127}
@@ -154,7 +155,7 @@ EXPORT_SYMBOL_GPL(acpi_is_root_bridge);
154static acpi_status 155static acpi_status
155get_root_bridge_busnr_callback(struct acpi_resource *resource, void *data) 156get_root_bridge_busnr_callback(struct acpi_resource *resource, void *data)
156{ 157{
157 int *busnr = data; 158 struct resource *res = data;
158 struct acpi_resource_address64 address; 159 struct acpi_resource_address64 address;
159 160
160 if (resource->type != ACPI_RESOURCE_TYPE_ADDRESS16 && 161 if (resource->type != ACPI_RESOURCE_TYPE_ADDRESS16 &&
@@ -164,28 +165,27 @@ get_root_bridge_busnr_callback(struct acpi_resource *resource, void *data)
164 165
165 acpi_resource_to_address64(resource, &address); 166 acpi_resource_to_address64(resource, &address);
166 if ((address.address_length > 0) && 167 if ((address.address_length > 0) &&
167 (address.resource_type == ACPI_BUS_NUMBER_RANGE)) 168 (address.resource_type == ACPI_BUS_NUMBER_RANGE)) {
168 *busnr = address.minimum; 169 res->start = address.minimum;
170 res->end = address.minimum + address.address_length - 1;
171 }
169 172
170 return AE_OK; 173 return AE_OK;
171} 174}
172 175
173static acpi_status try_get_root_bridge_busnr(acpi_handle handle, 176static acpi_status try_get_root_bridge_busnr(acpi_handle handle,
174 unsigned long long *bus) 177 struct resource *res)
175{ 178{
176 acpi_status status; 179 acpi_status status;
177 int busnum;
178 180
179 busnum = -1; 181 res->start = -1;
180 status = 182 status =
181 acpi_walk_resources(handle, METHOD_NAME__CRS, 183 acpi_walk_resources(handle, METHOD_NAME__CRS,
182 get_root_bridge_busnr_callback, &busnum); 184 get_root_bridge_busnr_callback, res);
183 if (ACPI_FAILURE(status)) 185 if (ACPI_FAILURE(status))
184 return status; 186 return status;
185 /* Check if we really get a bus number from _CRS */ 187 if (res->start == -1)
186 if (busnum == -1)
187 return AE_ERROR; 188 return AE_ERROR;
188 *bus = busnum;
189 return AE_OK; 189 return AE_OK;
190} 190}
191 191
@@ -429,34 +429,47 @@ static int __devinit acpi_pci_root_add(struct acpi_device *device)
429 struct acpi_device *child; 429 struct acpi_device *child;
430 u32 flags, base_flags; 430 u32 flags, base_flags;
431 431
432 root = kzalloc(sizeof(struct acpi_pci_root), GFP_KERNEL);
433 if (!root)
434 return -ENOMEM;
435
432 segment = 0; 436 segment = 0;
433 status = acpi_evaluate_integer(device->handle, METHOD_NAME__SEG, NULL, 437 status = acpi_evaluate_integer(device->handle, METHOD_NAME__SEG, NULL,
434 &segment); 438 &segment);
435 if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) { 439 if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
436 printk(KERN_ERR PREFIX "can't evaluate _SEG\n"); 440 printk(KERN_ERR PREFIX "can't evaluate _SEG\n");
437 return -ENODEV; 441 result = -ENODEV;
442 goto end;
438 } 443 }
439 444
440 /* Check _CRS first, then _BBN. If no _BBN, default to zero. */ 445 /* Check _CRS first, then _BBN. If no _BBN, default to zero. */
441 bus = 0; 446 root->secondary.flags = IORESOURCE_BUS;
442 status = try_get_root_bridge_busnr(device->handle, &bus); 447 status = try_get_root_bridge_busnr(device->handle, &root->secondary);
443 if (ACPI_FAILURE(status)) { 448 if (ACPI_FAILURE(status)) {
449 /*
450 * We need both the start and end of the downstream bus range
451 * to interpret _CBA (MMCONFIG base address), so it really is
452 * supposed to be in _CRS. If we don't find it there, all we
453 * can do is assume [_BBN-0xFF] or [0-0xFF].
454 */
455 root->secondary.end = 0xFF;
456 printk(KERN_WARNING FW_BUG PREFIX
457 "no secondary bus range in _CRS\n");
444 status = acpi_evaluate_integer(device->handle, METHOD_NAME__BBN, NULL, &bus); 458 status = acpi_evaluate_integer(device->handle, METHOD_NAME__BBN, NULL, &bus);
445 if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) { 459 if (ACPI_SUCCESS(status))
446 printk(KERN_ERR PREFIX 460 root->secondary.start = bus;
447 "no bus number in _CRS and can't evaluate _BBN\n"); 461 else if (status == AE_NOT_FOUND)
448 return -ENODEV; 462 root->secondary.start = 0;
463 else {
464 printk(KERN_ERR PREFIX "can't evaluate _BBN\n");
465 result = -ENODEV;
466 goto end;
449 } 467 }
450 } 468 }
451 469
452 root = kzalloc(sizeof(struct acpi_pci_root), GFP_KERNEL);
453 if (!root)
454 return -ENOMEM;
455
456 INIT_LIST_HEAD(&root->node); 470 INIT_LIST_HEAD(&root->node);
457 root->device = device; 471 root->device = device;
458 root->segment = segment & 0xFFFF; 472 root->segment = segment & 0xFFFF;
459 root->bus_nr = bus & 0xFF;
460 strcpy(acpi_device_name(device), ACPI_PCI_ROOT_DEVICE_NAME); 473 strcpy(acpi_device_name(device), ACPI_PCI_ROOT_DEVICE_NAME);
461 strcpy(acpi_device_class(device), ACPI_PCI_ROOT_CLASS); 474 strcpy(acpi_device_class(device), ACPI_PCI_ROOT_CLASS);
462 device->driver_data = root; 475 device->driver_data = root;
@@ -475,9 +488,9 @@ static int __devinit acpi_pci_root_add(struct acpi_device *device)
475 /* TBD: Locking */ 488 /* TBD: Locking */
476 list_add_tail(&root->node, &acpi_pci_roots); 489 list_add_tail(&root->node, &acpi_pci_roots);
477 490
478 printk(KERN_INFO PREFIX "%s [%s] (%04x:%02x)\n", 491 printk(KERN_INFO PREFIX "%s [%s] (domain %04x %pR)\n",
479 acpi_device_name(device), acpi_device_bid(device), 492 acpi_device_name(device), acpi_device_bid(device),
480 root->segment, root->bus_nr); 493 root->segment, &root->secondary);
481 494
482 /* 495 /*
483 * Scan the Root Bridge 496 * Scan the Root Bridge
@@ -486,11 +499,11 @@ static int __devinit acpi_pci_root_add(struct acpi_device *device)
486 * PCI namespace does not get created until this call is made (and 499 * PCI namespace does not get created until this call is made (and
487 * thus the root bridge's pci_dev does not exist). 500 * thus the root bridge's pci_dev does not exist).
488 */ 501 */
489 root->bus = pci_acpi_scan_root(device, segment, bus); 502 root->bus = pci_acpi_scan_root(root);
490 if (!root->bus) { 503 if (!root->bus) {
491 printk(KERN_ERR PREFIX 504 printk(KERN_ERR PREFIX
492 "Bus %04x:%02x not present in PCI namespace\n", 505 "Bus %04x:%02x not present in PCI namespace\n",
493 root->segment, root->bus_nr); 506 root->segment, (unsigned int)root->secondary.start);
494 result = -ENODEV; 507 result = -ENODEV;
495 goto end; 508 goto end;
496 } 509 }
diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c
index 5675d9747e87..b1034a9ada4e 100644
--- a/drivers/acpi/processor_driver.c
+++ b/drivers/acpi/processor_driver.c
@@ -616,7 +616,8 @@ static int __cpuinit acpi_processor_add(struct acpi_device *device)
616 acpi_processor_get_limit_info(pr); 616 acpi_processor_get_limit_info(pr);
617 617
618 618
619 acpi_processor_power_init(pr, device); 619 if (cpuidle_get_driver() == &acpi_idle_driver)
620 acpi_processor_power_init(pr, device);
620 621
621 pr->cdev = thermal_cooling_device_register("Processor", device, 622 pr->cdev = thermal_cooling_device_register("Processor", device,
622 &processor_cooling_ops); 623 &processor_cooling_ops);
@@ -920,9 +921,14 @@ static int __init acpi_processor_init(void)
920 if (!acpi_processor_dir) 921 if (!acpi_processor_dir)
921 return -ENOMEM; 922 return -ENOMEM;
922#endif 923#endif
923 result = cpuidle_register_driver(&acpi_idle_driver); 924
924 if (result < 0) 925 if (!cpuidle_register_driver(&acpi_idle_driver)) {
925 goto out_proc; 926 printk(KERN_DEBUG "ACPI: %s registered with cpuidle\n",
927 acpi_idle_driver.name);
928 } else {
929 printk(KERN_DEBUG "ACPI: acpi_idle yielding to %s",
930 cpuidle_get_driver()->name);
931 }
926 932
927 result = acpi_bus_register_driver(&acpi_processor_driver); 933 result = acpi_bus_register_driver(&acpi_processor_driver);
928 if (result < 0) 934 if (result < 0)
@@ -941,7 +947,6 @@ static int __init acpi_processor_init(void)
941out_cpuidle: 947out_cpuidle:
942 cpuidle_unregister_driver(&acpi_idle_driver); 948 cpuidle_unregister_driver(&acpi_idle_driver);
943 949
944out_proc:
945#ifdef CONFIG_ACPI_PROCFS 950#ifdef CONFIG_ACPI_PROCFS
946 remove_proc_entry(ACPI_PROCESSOR_CLASS, acpi_root_dir); 951 remove_proc_entry(ACPI_PROCESSOR_CLASS, acpi_root_dir);
947#endif 952#endif
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index c3817e1f32c7..2e8c27d48f2b 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -727,19 +727,9 @@ static int acpi_processor_power_seq_show(struct seq_file *seq, void *offset)
727 break; 727 break;
728 } 728 }
729 729
730 if (pr->power.states[i].promotion.state) 730 seq_puts(seq, "promotion[--] ");
731 seq_printf(seq, "promotion[C%zd] ", 731
732 (pr->power.states[i].promotion.state - 732 seq_puts(seq, "demotion[--] ");
733 pr->power.states));
734 else
735 seq_puts(seq, "promotion[--] ");
736
737 if (pr->power.states[i].demotion.state)
738 seq_printf(seq, "demotion[C%zd] ",
739 (pr->power.states[i].demotion.state -
740 pr->power.states));
741 else
742 seq_puts(seq, "demotion[--] ");
743 733
744 seq_printf(seq, "latency[%03d] usage[%08d] duration[%020llu]\n", 734 seq_printf(seq, "latency[%03d] usage[%08d] duration[%020llu]\n",
745 pr->power.states[i].latency, 735 pr->power.states[i].latency,
@@ -869,6 +859,7 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev,
869 struct acpi_processor *pr; 859 struct acpi_processor *pr;
870 struct acpi_processor_cx *cx = cpuidle_get_statedata(state); 860 struct acpi_processor_cx *cx = cpuidle_get_statedata(state);
871 ktime_t kt1, kt2; 861 ktime_t kt1, kt2;
862 s64 idle_time_ns;
872 s64 idle_time; 863 s64 idle_time;
873 s64 sleep_ticks = 0; 864 s64 sleep_ticks = 0;
874 865
@@ -881,6 +872,7 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev,
881 return(acpi_idle_enter_c1(dev, state)); 872 return(acpi_idle_enter_c1(dev, state));
882 873
883 local_irq_disable(); 874 local_irq_disable();
875
884 if (cx->entry_method != ACPI_CSTATE_FFH) { 876 if (cx->entry_method != ACPI_CSTATE_FFH) {
885 current_thread_info()->status &= ~TS_POLLING; 877 current_thread_info()->status &= ~TS_POLLING;
886 /* 878 /*
@@ -888,12 +880,12 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev,
888 * NEED_RESCHED: 880 * NEED_RESCHED:
889 */ 881 */
890 smp_mb(); 882 smp_mb();
891 }
892 883
893 if (unlikely(need_resched())) { 884 if (unlikely(need_resched())) {
894 current_thread_info()->status |= TS_POLLING; 885 current_thread_info()->status |= TS_POLLING;
895 local_irq_enable(); 886 local_irq_enable();
896 return 0; 887 return 0;
888 }
897 } 889 }
898 890
899 /* 891 /*
@@ -910,15 +902,18 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev,
910 sched_clock_idle_sleep_event(); 902 sched_clock_idle_sleep_event();
911 acpi_idle_do_entry(cx); 903 acpi_idle_do_entry(cx);
912 kt2 = ktime_get_real(); 904 kt2 = ktime_get_real();
913 idle_time = ktime_to_us(ktime_sub(kt2, kt1)); 905 idle_time_ns = ktime_to_ns(ktime_sub(kt2, kt1));
906 idle_time = idle_time_ns;
907 do_div(idle_time, NSEC_PER_USEC);
914 908
915 sleep_ticks = us_to_pm_timer_ticks(idle_time); 909 sleep_ticks = us_to_pm_timer_ticks(idle_time);
916 910
917 /* Tell the scheduler how much we idled: */ 911 /* Tell the scheduler how much we idled: */
918 sched_clock_idle_wakeup_event(sleep_ticks*PM_TIMER_TICK_NS); 912 sched_clock_idle_wakeup_event(idle_time_ns);
919 913
920 local_irq_enable(); 914 local_irq_enable();
921 current_thread_info()->status |= TS_POLLING; 915 if (cx->entry_method != ACPI_CSTATE_FFH)
916 current_thread_info()->status |= TS_POLLING;
922 917
923 cx->usage++; 918 cx->usage++;
924 919
@@ -943,6 +938,7 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev,
943 struct acpi_processor *pr; 938 struct acpi_processor *pr;
944 struct acpi_processor_cx *cx = cpuidle_get_statedata(state); 939 struct acpi_processor_cx *cx = cpuidle_get_statedata(state);
945 ktime_t kt1, kt2; 940 ktime_t kt1, kt2;
941 s64 idle_time_ns;
946 s64 idle_time; 942 s64 idle_time;
947 s64 sleep_ticks = 0; 943 s64 sleep_ticks = 0;
948 944
@@ -968,6 +964,7 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev,
968 } 964 }
969 965
970 local_irq_disable(); 966 local_irq_disable();
967
971 if (cx->entry_method != ACPI_CSTATE_FFH) { 968 if (cx->entry_method != ACPI_CSTATE_FFH) {
972 current_thread_info()->status &= ~TS_POLLING; 969 current_thread_info()->status &= ~TS_POLLING;
973 /* 970 /*
@@ -975,12 +972,12 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev,
975 * NEED_RESCHED: 972 * NEED_RESCHED:
976 */ 973 */
977 smp_mb(); 974 smp_mb();
978 }
979 975
980 if (unlikely(need_resched())) { 976 if (unlikely(need_resched())) {
981 current_thread_info()->status |= TS_POLLING; 977 current_thread_info()->status |= TS_POLLING;
982 local_irq_enable(); 978 local_irq_enable();
983 return 0; 979 return 0;
980 }
984 } 981 }
985 982
986 acpi_unlazy_tlb(smp_processor_id()); 983 acpi_unlazy_tlb(smp_processor_id());
@@ -1025,14 +1022,17 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev,
1025 spin_unlock(&c3_lock); 1022 spin_unlock(&c3_lock);
1026 } 1023 }
1027 kt2 = ktime_get_real(); 1024 kt2 = ktime_get_real();
1028 idle_time = ktime_to_us(ktime_sub(kt2, kt1)); 1025 idle_time_ns = ktime_to_us(ktime_sub(kt2, kt1));
1026 idle_time = idle_time_ns;
1027 do_div(idle_time, NSEC_PER_USEC);
1029 1028
1030 sleep_ticks = us_to_pm_timer_ticks(idle_time); 1029 sleep_ticks = us_to_pm_timer_ticks(idle_time);
1031 /* Tell the scheduler how much we idled: */ 1030 /* Tell the scheduler how much we idled: */
1032 sched_clock_idle_wakeup_event(sleep_ticks*PM_TIMER_TICK_NS); 1031 sched_clock_idle_wakeup_event(idle_time_ns);
1033 1032
1034 local_irq_enable(); 1033 local_irq_enable();
1035 current_thread_info()->status |= TS_POLLING; 1034 if (cx->entry_method != ACPI_CSTATE_FFH)
1035 current_thread_info()->status |= TS_POLLING;
1036 1036
1037 cx->usage++; 1037 cx->usage++;
1038 1038
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index baa76bbf244a..4ab2275b4461 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -80,22 +80,6 @@ static int acpi_sleep_prepare(u32 acpi_state)
80 80
81#ifdef CONFIG_ACPI_SLEEP 81#ifdef CONFIG_ACPI_SLEEP
82static u32 acpi_target_sleep_state = ACPI_STATE_S0; 82static u32 acpi_target_sleep_state = ACPI_STATE_S0;
83/*
84 * According to the ACPI specification the BIOS should make sure that ACPI is
85 * enabled and SCI_EN bit is set on wake-up from S1 - S3 sleep states. Still,
86 * some BIOSes don't do that and therefore we use acpi_enable() to enable ACPI
87 * on such systems during resume. Unfortunately that doesn't help in
88 * particularly pathological cases in which SCI_EN has to be set directly on
89 * resume, although the specification states very clearly that this flag is
90 * owned by the hardware. The set_sci_en_on_resume variable will be set in such
91 * cases.
92 */
93static bool set_sci_en_on_resume;
94
95void __init acpi_set_sci_en_on_resume(void)
96{
97 set_sci_en_on_resume = true;
98}
99 83
100/* 84/*
101 * ACPI 1.0 wants us to execute _PTS before suspending devices, so we allow the 85 * ACPI 1.0 wants us to execute _PTS before suspending devices, so we allow the
@@ -253,11 +237,8 @@ static int acpi_suspend_enter(suspend_state_t pm_state)
253 break; 237 break;
254 } 238 }
255 239
256 /* If ACPI is not enabled by the BIOS, we need to enable it here. */ 240 /* This violates the spec but is required for bug compatibility. */
257 if (set_sci_en_on_resume) 241 acpi_write_bit_register(ACPI_BITREG_SCI_ENABLE, 1);
258 acpi_write_bit_register(ACPI_BITREG_SCI_ENABLE, 1);
259 else
260 acpi_enable();
261 242
262 /* Reprogram control registers and execute _BFS */ 243 /* Reprogram control registers and execute _BFS */
263 acpi_leave_sleep_state_prep(acpi_state); 244 acpi_leave_sleep_state_prep(acpi_state);
@@ -346,12 +327,6 @@ static int __init init_old_suspend_ordering(const struct dmi_system_id *d)
346 return 0; 327 return 0;
347} 328}
348 329
349static int __init init_set_sci_en_on_resume(const struct dmi_system_id *d)
350{
351 set_sci_en_on_resume = true;
352 return 0;
353}
354
355static struct dmi_system_id __initdata acpisleep_dmi_table[] = { 330static struct dmi_system_id __initdata acpisleep_dmi_table[] = {
356 { 331 {
357 .callback = init_old_suspend_ordering, 332 .callback = init_old_suspend_ordering,
@@ -370,22 +345,6 @@ static struct dmi_system_id __initdata acpisleep_dmi_table[] = {
370 }, 345 },
371 }, 346 },
372 { 347 {
373 .callback = init_set_sci_en_on_resume,
374 .ident = "Apple MacBook 1,1",
375 .matches = {
376 DMI_MATCH(DMI_SYS_VENDOR, "Apple Computer, Inc."),
377 DMI_MATCH(DMI_PRODUCT_NAME, "MacBook1,1"),
378 },
379 },
380 {
381 .callback = init_set_sci_en_on_resume,
382 .ident = "Apple MacMini 1,1",
383 .matches = {
384 DMI_MATCH(DMI_SYS_VENDOR, "Apple Computer, Inc."),
385 DMI_MATCH(DMI_PRODUCT_NAME, "Macmini1,1"),
386 },
387 },
388 {
389 .callback = init_old_suspend_ordering, 348 .callback = init_old_suspend_ordering,
390 .ident = "Asus Pundit P1-AH2 (M2N8L motherboard)", 349 .ident = "Asus Pundit P1-AH2 (M2N8L motherboard)",
391 .matches = { 350 .matches = {
@@ -394,94 +353,6 @@ static struct dmi_system_id __initdata acpisleep_dmi_table[] = {
394 }, 353 },
395 }, 354 },
396 { 355 {
397 .callback = init_set_sci_en_on_resume,
398 .ident = "Toshiba Satellite L300",
399 .matches = {
400 DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
401 DMI_MATCH(DMI_PRODUCT_NAME, "Satellite L300"),
402 },
403 },
404 {
405 .callback = init_set_sci_en_on_resume,
406 .ident = "Hewlett-Packard HP G7000 Notebook PC",
407 .matches = {
408 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
409 DMI_MATCH(DMI_PRODUCT_NAME, "HP G7000 Notebook PC"),
410 },
411 },
412 {
413 .callback = init_set_sci_en_on_resume,
414 .ident = "Hewlett-Packard HP Pavilion dv3 Notebook PC",
415 .matches = {
416 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
417 DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion dv3 Notebook PC"),
418 },
419 },
420 {
421 .callback = init_set_sci_en_on_resume,
422 .ident = "Hewlett-Packard Pavilion dv4",
423 .matches = {
424 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
425 DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion dv4"),
426 },
427 },
428 {
429 .callback = init_set_sci_en_on_resume,
430 .ident = "Hewlett-Packard Pavilion dv7",
431 .matches = {
432 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
433 DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion dv7"),
434 },
435 },
436 {
437 .callback = init_set_sci_en_on_resume,
438 .ident = "Hewlett-Packard Compaq Presario C700 Notebook PC",
439 .matches = {
440 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
441 DMI_MATCH(DMI_PRODUCT_NAME, "Compaq Presario C700 Notebook PC"),
442 },
443 },
444 {
445 .callback = init_set_sci_en_on_resume,
446 .ident = "Hewlett-Packard Compaq Presario CQ40 Notebook PC",
447 .matches = {
448 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
449 DMI_MATCH(DMI_PRODUCT_NAME, "Compaq Presario CQ40 Notebook PC"),
450 },
451 },
452 {
453 .callback = init_set_sci_en_on_resume,
454 .ident = "Lenovo ThinkPad T410",
455 .matches = {
456 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
457 DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T410"),
458 },
459 },
460 {
461 .callback = init_set_sci_en_on_resume,
462 .ident = "Lenovo ThinkPad T510",
463 .matches = {
464 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
465 DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T510"),
466 },
467 },
468 {
469 .callback = init_set_sci_en_on_resume,
470 .ident = "Lenovo ThinkPad W510",
471 .matches = {
472 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
473 DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad W510"),
474 },
475 },
476 {
477 .callback = init_set_sci_en_on_resume,
478 .ident = "Lenovo ThinkPad X201[s]",
479 .matches = {
480 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
481 DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X201"),
482 },
483 },
484 {
485 .callback = init_old_suspend_ordering, 356 .callback = init_old_suspend_ordering,
486 .ident = "Panasonic CF51-2L", 357 .ident = "Panasonic CF51-2L",
487 .matches = { 358 .matches = {
@@ -490,30 +361,6 @@ static struct dmi_system_id __initdata acpisleep_dmi_table[] = {
490 DMI_MATCH(DMI_BOARD_NAME, "CF51-2L"), 361 DMI_MATCH(DMI_BOARD_NAME, "CF51-2L"),
491 }, 362 },
492 }, 363 },
493 {
494 .callback = init_set_sci_en_on_resume,
495 .ident = "Dell Studio 1558",
496 .matches = {
497 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
498 DMI_MATCH(DMI_PRODUCT_NAME, "Studio 1558"),
499 },
500 },
501 {
502 .callback = init_set_sci_en_on_resume,
503 .ident = "Dell Studio 1557",
504 .matches = {
505 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
506 DMI_MATCH(DMI_PRODUCT_NAME, "Studio 1557"),
507 },
508 },
509 {
510 .callback = init_set_sci_en_on_resume,
511 .ident = "Dell Studio 1555",
512 .matches = {
513 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
514 DMI_MATCH(DMI_PRODUCT_NAME, "Studio 1555"),
515 },
516 },
517 {}, 364 {},
518}; 365};
519#endif /* CONFIG_SUSPEND */ 366#endif /* CONFIG_SUSPEND */
diff --git a/drivers/acpi/sleep.h b/drivers/acpi/sleep.h
index 8a8f3b3382a6..25b8bd149284 100644
--- a/drivers/acpi/sleep.h
+++ b/drivers/acpi/sleep.h
@@ -1,6 +1,6 @@
1 1
2extern u8 sleep_states[]; 2extern u8 sleep_states[];
3extern int acpi_suspend (u32 state); 3extern int acpi_suspend(u32 state);
4 4
5extern void acpi_enable_wakeup_device_prep(u8 sleep_state); 5extern void acpi_enable_wakeup_device_prep(u8 sleep_state);
6extern void acpi_enable_wakeup_device(u8 sleep_state); 6extern void acpi_enable_wakeup_device(u8 sleep_state);
diff --git a/drivers/acpi/tables.c b/drivers/acpi/tables.c
index 8a0ed2800e63..f336bca7c450 100644
--- a/drivers/acpi/tables.c
+++ b/drivers/acpi/tables.c
@@ -213,7 +213,7 @@ acpi_table_parse_entries(char *id,
213 unsigned long table_end; 213 unsigned long table_end;
214 acpi_size tbl_size; 214 acpi_size tbl_size;
215 215
216 if (acpi_disabled && !acpi_ht) 216 if (acpi_disabled)
217 return -ENODEV; 217 return -ENODEV;
218 218
219 if (!handler) 219 if (!handler)
@@ -280,7 +280,7 @@ int __init acpi_table_parse(char *id, acpi_table_handler handler)
280 struct acpi_table_header *table = NULL; 280 struct acpi_table_header *table = NULL;
281 acpi_size tbl_size; 281 acpi_size tbl_size;
282 282
283 if (acpi_disabled && !acpi_ht) 283 if (acpi_disabled)
284 return -ENODEV; 284 return -ENODEV;
285 285
286 if (!handler) 286 if (!handler)
diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
index a0c93b321482..9865d46f49a8 100644
--- a/drivers/acpi/video.c
+++ b/drivers/acpi/video.c
@@ -45,6 +45,7 @@
45#include <acpi/acpi_bus.h> 45#include <acpi/acpi_bus.h>
46#include <acpi/acpi_drivers.h> 46#include <acpi/acpi_drivers.h>
47#include <linux/suspend.h> 47#include <linux/suspend.h>
48#include <acpi/video.h>
48 49
49#define PREFIX "ACPI: " 50#define PREFIX "ACPI: "
50 51
@@ -65,11 +66,6 @@
65 66
66#define MAX_NAME_LEN 20 67#define MAX_NAME_LEN 20
67 68
68#define ACPI_VIDEO_DISPLAY_CRT 1
69#define ACPI_VIDEO_DISPLAY_TV 2
70#define ACPI_VIDEO_DISPLAY_DVI 3
71#define ACPI_VIDEO_DISPLAY_LCD 4
72
73#define _COMPONENT ACPI_VIDEO_COMPONENT 69#define _COMPONENT ACPI_VIDEO_COMPONENT
74ACPI_MODULE_NAME("video"); 70ACPI_MODULE_NAME("video");
75 71
@@ -1007,11 +1003,11 @@ static void acpi_video_device_find_cap(struct acpi_video_device *device)
1007 result = acpi_video_init_brightness(device); 1003 result = acpi_video_init_brightness(device);
1008 if (result) 1004 if (result)
1009 return; 1005 return;
1010 name = kzalloc(MAX_NAME_LEN, GFP_KERNEL); 1006 name = kasprintf(GFP_KERNEL, "acpi_video%d", count);
1011 if (!name) 1007 if (!name)
1012 return; 1008 return;
1009 count++;
1013 1010
1014 sprintf(name, "acpi_video%d", count++);
1015 memset(&props, 0, sizeof(struct backlight_properties)); 1011 memset(&props, 0, sizeof(struct backlight_properties));
1016 props.max_brightness = device->brightness->count - 3; 1012 props.max_brightness = device->brightness->count - 3;
1017 device->backlight = backlight_device_register(name, NULL, device, 1013 device->backlight = backlight_device_register(name, NULL, device,
@@ -1067,10 +1063,10 @@ static void acpi_video_device_find_cap(struct acpi_video_device *device)
1067 if (device->cap._DCS && device->cap._DSS) { 1063 if (device->cap._DCS && device->cap._DSS) {
1068 static int count; 1064 static int count;
1069 char *name; 1065 char *name;
1070 name = kzalloc(MAX_NAME_LEN, GFP_KERNEL); 1066 name = kasprintf(GFP_KERNEL, "acpi_video%d", count);
1071 if (!name) 1067 if (!name)
1072 return; 1068 return;
1073 sprintf(name, "acpi_video%d", count++); 1069 count++;
1074 device->output_dev = video_output_register(name, 1070 device->output_dev = video_output_register(name,
1075 NULL, device, &acpi_output_properties); 1071 NULL, device, &acpi_output_properties);
1076 kfree(name); 1072 kfree(name);
@@ -1748,11 +1744,27 @@ acpi_video_get_device_attr(struct acpi_video_bus *video, unsigned long device_id
1748} 1744}
1749 1745
1750static int 1746static int
1747acpi_video_get_device_type(struct acpi_video_bus *video,
1748 unsigned long device_id)
1749{
1750 struct acpi_video_enumerated_device *ids;
1751 int i;
1752
1753 for (i = 0; i < video->attached_count; i++) {
1754 ids = &video->attached_array[i];
1755 if ((ids->value.int_val & 0xffff) == device_id)
1756 return ids->value.int_val;
1757 }
1758
1759 return 0;
1760}
1761
1762static int
1751acpi_video_bus_get_one_device(struct acpi_device *device, 1763acpi_video_bus_get_one_device(struct acpi_device *device,
1752 struct acpi_video_bus *video) 1764 struct acpi_video_bus *video)
1753{ 1765{
1754 unsigned long long device_id; 1766 unsigned long long device_id;
1755 int status; 1767 int status, device_type;
1756 struct acpi_video_device *data; 1768 struct acpi_video_device *data;
1757 struct acpi_video_device_attrib* attribute; 1769 struct acpi_video_device_attrib* attribute;
1758 1770
@@ -1797,8 +1809,25 @@ acpi_video_bus_get_one_device(struct acpi_device *device,
1797 } 1809 }
1798 if(attribute->bios_can_detect) 1810 if(attribute->bios_can_detect)
1799 data->flags.bios = 1; 1811 data->flags.bios = 1;
1800 } else 1812 } else {
1801 data->flags.unknown = 1; 1813 /* Check for legacy IDs */
1814 device_type = acpi_video_get_device_type(video,
1815 device_id);
1816 /* Ignore bits 16 and 18-20 */
1817 switch (device_type & 0xffe2ffff) {
1818 case ACPI_VIDEO_DISPLAY_LEGACY_MONITOR:
1819 data->flags.crt = 1;
1820 break;
1821 case ACPI_VIDEO_DISPLAY_LEGACY_PANEL:
1822 data->flags.lcd = 1;
1823 break;
1824 case ACPI_VIDEO_DISPLAY_LEGACY_TV:
1825 data->flags.tvout = 1;
1826 break;
1827 default:
1828 data->flags.unknown = 1;
1829 }
1830 }
1802 1831
1803 acpi_video_device_bind(video, data); 1832 acpi_video_device_bind(video, data);
1804 acpi_video_device_find_cap(data); 1833 acpi_video_device_find_cap(data);
@@ -2032,6 +2061,71 @@ out:
2032 return result; 2061 return result;
2033} 2062}
2034 2063
2064int acpi_video_get_edid(struct acpi_device *device, int type, int device_id,
2065 void **edid)
2066{
2067 struct acpi_video_bus *video;
2068 struct acpi_video_device *video_device;
2069 union acpi_object *buffer = NULL;
2070 acpi_status status;
2071 int i, length;
2072
2073 if (!device || !acpi_driver_data(device))
2074 return -EINVAL;
2075
2076 video = acpi_driver_data(device);
2077
2078 for (i = 0; i < video->attached_count; i++) {
2079 video_device = video->attached_array[i].bind_info;
2080 length = 256;
2081
2082 if (!video_device)
2083 continue;
2084
2085 if (type) {
2086 switch (type) {
2087 case ACPI_VIDEO_DISPLAY_CRT:
2088 if (!video_device->flags.crt)
2089 continue;
2090 break;
2091 case ACPI_VIDEO_DISPLAY_TV:
2092 if (!video_device->flags.tvout)
2093 continue;
2094 break;
2095 case ACPI_VIDEO_DISPLAY_DVI:
2096 if (!video_device->flags.dvi)
2097 continue;
2098 break;
2099 case ACPI_VIDEO_DISPLAY_LCD:
2100 if (!video_device->flags.lcd)
2101 continue;
2102 break;
2103 }
2104 } else if (video_device->device_id != device_id) {
2105 continue;
2106 }
2107
2108 status = acpi_video_device_EDID(video_device, &buffer, length);
2109
2110 if (ACPI_FAILURE(status) || !buffer ||
2111 buffer->type != ACPI_TYPE_BUFFER) {
2112 length = 128;
2113 status = acpi_video_device_EDID(video_device, &buffer,
2114 length);
2115 if (ACPI_FAILURE(status) || !buffer ||
2116 buffer->type != ACPI_TYPE_BUFFER) {
2117 continue;
2118 }
2119 }
2120
2121 *edid = buffer->buffer.pointer;
2122 return length;
2123 }
2124
2125 return -ENODEV;
2126}
2127EXPORT_SYMBOL(acpi_video_get_edid);
2128
2035static int 2129static int
2036acpi_video_bus_get_devices(struct acpi_video_bus *video, 2130acpi_video_bus_get_devices(struct acpi_video_bus *video,
2037 struct acpi_device *device) 2131 struct acpi_device *device)
diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c
index fc2f26b9b407..c5fef01b3c95 100644
--- a/drivers/acpi/video_detect.c
+++ b/drivers/acpi/video_detect.c
@@ -250,7 +250,7 @@ static int __init acpi_backlight(char *str)
250 ACPI_VIDEO_BACKLIGHT_FORCE_VENDOR; 250 ACPI_VIDEO_BACKLIGHT_FORCE_VENDOR;
251 if (!strcmp("video", str)) 251 if (!strcmp("video", str))
252 acpi_video_support |= 252 acpi_video_support |=
253 ACPI_VIDEO_OUTPUT_SWITCHING_FORCE_VIDEO; 253 ACPI_VIDEO_BACKLIGHT_FORCE_VIDEO;
254 } 254 }
255 return 1; 255 return 1;
256} 256}
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig
index e68541f662b9..73f883333a0d 100644
--- a/drivers/ata/Kconfig
+++ b/drivers/ata/Kconfig
@@ -57,6 +57,8 @@ config SATA_PMP
57 This option adds support for SATA Port Multipliers 57 This option adds support for SATA Port Multipliers
58 (the SATA version of an ethernet hub, or SAS expander). 58 (the SATA version of an ethernet hub, or SAS expander).
59 59
60comment "Controllers with non-SFF native interface"
61
60config SATA_AHCI 62config SATA_AHCI
61 tristate "AHCI SATA support" 63 tristate "AHCI SATA support"
62 depends on PCI 64 depends on PCI
@@ -73,11 +75,12 @@ config SATA_AHCI_PLATFORM
73 75
74 If unsure, say N. 76 If unsure, say N.
75 77
76config SATA_SIL24 78config SATA_FSL
77 tristate "Silicon Image 3124/3132 SATA support" 79 tristate "Freescale 3.0Gbps SATA support"
78 depends on PCI 80 depends on FSL_SOC
79 help 81 help
80 This option enables support for Silicon Image 3124/3132 Serial ATA. 82 This option enables support for Freescale 3.0Gbps SATA controller.
83 It can be found on MPC837x and MPC8315.
81 84
82 If unsure, say N. 85 If unsure, say N.
83 86
@@ -87,12 +90,11 @@ config SATA_INIC162X
87 help 90 help
88 This option enables support for Initio 162x Serial ATA. 91 This option enables support for Initio 162x Serial ATA.
89 92
90config SATA_FSL 93config SATA_SIL24
91 tristate "Freescale 3.0Gbps SATA support" 94 tristate "Silicon Image 3124/3132 SATA support"
92 depends on FSL_SOC 95 depends on PCI
93 help 96 help
94 This option enables support for Freescale 3.0Gbps SATA controller. 97 This option enables support for Silicon Image 3124/3132 Serial ATA.
95 It can be found on MPC837x and MPC8315.
96 98
97 If unsure, say N. 99 If unsure, say N.
98 100
@@ -116,15 +118,65 @@ config ATA_SFF
116 118
117if ATA_SFF 119if ATA_SFF
118 120
119config SATA_SVW 121comment "SFF controllers with custom DMA interface"
120 tristate "ServerWorks Frodo / Apple K2 SATA support" 122
123config PDC_ADMA
124 tristate "Pacific Digital ADMA support"
121 depends on PCI 125 depends on PCI
122 help 126 help
123 This option enables support for Broadcom/Serverworks/Apple K2 127 This option enables support for Pacific Digital ADMA controllers
124 SATA support. 128
129 If unsure, say N.
130
131config PATA_MPC52xx
132 tristate "Freescale MPC52xx SoC internal IDE"
133 depends on PPC_MPC52xx && PPC_BESTCOMM
134 select PPC_BESTCOMM_ATA
135 help
136 This option enables support for integrated IDE controller
137 of the Freescale MPC52xx SoC.
138
139 If unsure, say N.
140
141config PATA_OCTEON_CF
142 tristate "OCTEON Boot Bus Compact Flash support"
143 depends on CPU_CAVIUM_OCTEON
144 help
145 This option enables a polled compact flash driver for use with
146 compact flash cards attached to the OCTEON boot bus.
147
148 If unsure, say N.
149
150config SATA_QSTOR
151 tristate "Pacific Digital SATA QStor support"
152 depends on PCI
153 help
154 This option enables support for Pacific Digital Serial ATA QStor.
155
156 If unsure, say N.
157
158config SATA_SX4
159 tristate "Promise SATA SX4 support (Experimental)"
160 depends on PCI && EXPERIMENTAL
161 help
162 This option enables support for Promise Serial ATA SX4.
125 163
126 If unsure, say N. 164 If unsure, say N.
127 165
166config ATA_BMDMA
167 bool "ATA BMDMA support"
168 default y
169 help
170 This option adds support for SFF ATA controllers with BMDMA
171 capability. BMDMA stands for bus-master DMA and the
172 de-facto DMA interface for SFF controllers.
173
174 If unuser, say Y.
175
176if ATA_BMDMA
177
178comment "SATA SFF controllers with BMDMA"
179
128config ATA_PIIX 180config ATA_PIIX
129 tristate "Intel ESB, ICH, PIIX3, PIIX4 PATA/SATA support" 181 tristate "Intel ESB, ICH, PIIX3, PIIX4 PATA/SATA support"
130 depends on PCI 182 depends on PCI
@@ -152,22 +204,6 @@ config SATA_NV
152 204
153 If unsure, say N. 205 If unsure, say N.
154 206
155config PDC_ADMA
156 tristate "Pacific Digital ADMA support"
157 depends on PCI
158 help
159 This option enables support for Pacific Digital ADMA controllers
160
161 If unsure, say N.
162
163config SATA_QSTOR
164 tristate "Pacific Digital SATA QStor support"
165 depends on PCI
166 help
167 This option enables support for Pacific Digital Serial ATA QStor.
168
169 If unsure, say N.
170
171config SATA_PROMISE 207config SATA_PROMISE
172 tristate "Promise SATA TX2/TX4 support" 208 tristate "Promise SATA TX2/TX4 support"
173 depends on PCI 209 depends on PCI
@@ -176,14 +212,6 @@ config SATA_PROMISE
176 212
177 If unsure, say N. 213 If unsure, say N.
178 214
179config SATA_SX4
180 tristate "Promise SATA SX4 support (Experimental)"
181 depends on PCI && EXPERIMENTAL
182 help
183 This option enables support for Promise Serial ATA SX4.
184
185 If unsure, say N.
186
187config SATA_SIL 215config SATA_SIL
188 tristate "Silicon Image SATA support" 216 tristate "Silicon Image SATA support"
189 depends on PCI 217 depends on PCI
@@ -203,6 +231,15 @@ config SATA_SIS
203 enable the PATA_SIS driver in the config. 231 enable the PATA_SIS driver in the config.
204 If unsure, say N. 232 If unsure, say N.
205 233
234config SATA_SVW
235 tristate "ServerWorks Frodo / Apple K2 SATA support"
236 depends on PCI
237 help
238 This option enables support for Broadcom/Serverworks/Apple K2
239 SATA support.
240
241 If unsure, say N.
242
206config SATA_ULI 243config SATA_ULI
207 tristate "ULi Electronics SATA support" 244 tristate "ULi Electronics SATA support"
208 depends on PCI 245 depends on PCI
@@ -227,14 +264,7 @@ config SATA_VITESSE
227 264
228 If unsure, say N. 265 If unsure, say N.
229 266
230config PATA_ACPI 267comment "PATA SFF controllers with BMDMA"
231 tristate "ACPI firmware driver for PATA"
232 depends on ATA_ACPI
233 help
234 This option enables an ACPI method driver which drives
235 motherboard PATA controller interfaces through the ACPI
236 firmware in the BIOS. This driver can sometimes handle
237 otherwise unsupported hardware.
238 268
239config PATA_ALI 269config PATA_ALI
240 tristate "ALi PATA support" 270 tristate "ALi PATA support"
@@ -262,40 +292,30 @@ config PATA_ARTOP
262 292
263 If unsure, say N. 293 If unsure, say N.
264 294
265config PATA_ATP867X 295config PATA_ATIIXP
266 tristate "ARTOP/Acard ATP867X PATA support" 296 tristate "ATI PATA support"
267 depends on PCI 297 depends on PCI
268 help 298 help
269 This option enables support for ARTOP/Acard ATP867X PATA 299 This option enables support for the ATI ATA interfaces
270 controllers. 300 found on the many ATI chipsets.
271
272 If unsure, say N.
273
274config PATA_AT32
275 tristate "Atmel AVR32 PATA support (Experimental)"
276 depends on AVR32 && PLATFORM_AT32AP && EXPERIMENTAL
277 help
278 This option enables support for the IDE devices on the
279 Atmel AT32AP platform.
280 301
281 If unsure, say N. 302 If unsure, say N.
282 303
283config PATA_ATIIXP 304config PATA_ATP867X
284 tristate "ATI PATA support" 305 tristate "ARTOP/Acard ATP867X PATA support"
285 depends on PCI 306 depends on PCI
286 help 307 help
287 This option enables support for the ATI ATA interfaces 308 This option enables support for ARTOP/Acard ATP867X PATA
288 found on the many ATI chipsets. 309 controllers.
289 310
290 If unsure, say N. 311 If unsure, say N.
291 312
292config PATA_CMD640_PCI 313config PATA_BF54X
293 tristate "CMD640 PCI PATA support (Experimental)" 314 tristate "Blackfin 54x ATAPI support"
294 depends on PCI && EXPERIMENTAL 315 depends on BF542 || BF548 || BF549
295 help 316 help
296 This option enables support for the CMD640 PCI IDE 317 This option enables support for the built-in ATAPI controller on
297 interface chip. Only the primary channel is currently 318 Blackfin 54x family chips.
298 supported.
299 319
300 If unsure, say N. 320 If unsure, say N.
301 321
@@ -362,15 +382,6 @@ config PATA_EFAR
362 382
363 If unsure, say N. 383 If unsure, say N.
364 384
365config ATA_GENERIC
366 tristate "Generic ATA support"
367 depends on PCI
368 help
369 This option enables support for generic BIOS configured
370 ATA controllers via the new ATA layer
371
372 If unsure, say N.
373
374config PATA_HPT366 385config PATA_HPT366
375 tristate "HPT 366/368 PATA support" 386 tristate "HPT 366/368 PATA support"
376 depends on PCI 387 depends on PCI
@@ -415,12 +426,20 @@ config PATA_HPT3X3_DMA
415 controllers. Enable with care as there are still some 426 controllers. Enable with care as there are still some
416 problems with DMA on this chipset. 427 problems with DMA on this chipset.
417 428
418config PATA_ISAPNP 429config PATA_ICSIDE
419 tristate "ISA Plug and Play PATA support" 430 tristate "Acorn ICS PATA support"
420 depends on ISAPNP 431 depends on ARM && ARCH_ACORN
421 help 432 help
422 This option enables support for ISA plug & play ATA 433 On Acorn systems, say Y here if you wish to use the ICS PATA
423 controllers such as those found on old soundcards. 434 interface card. This is not required for ICS partition support.
435 If you are unsure, say N to this.
436
437config PATA_IT8213
438 tristate "IT8213 PATA support (Experimental)"
439 depends on PCI && EXPERIMENTAL
440 help
441 This option enables support for the ITE 821 PATA
442 controllers via the new ATA layer.
424 443
425 If unsure, say N. 444 If unsure, say N.
426 445
@@ -434,15 +453,6 @@ config PATA_IT821X
434 453
435 If unsure, say N. 454 If unsure, say N.
436 455
437config PATA_IT8213
438 tristate "IT8213 PATA support (Experimental)"
439 depends on PCI && EXPERIMENTAL
440 help
441 This option enables support for the ITE 821 PATA
442 controllers via the new ATA layer.
443
444 If unsure, say N.
445
446config PATA_JMICRON 456config PATA_JMICRON
447 tristate "JMicron PATA support" 457 tristate "JMicron PATA support"
448 depends on PCI 458 depends on PCI
@@ -452,23 +462,14 @@ config PATA_JMICRON
452 462
453 If unsure, say N. 463 If unsure, say N.
454 464
455config PATA_LEGACY 465config PATA_MACIO
456 tristate "Legacy ISA PATA support (Experimental)" 466 tristate "Apple PowerMac/PowerBook internal 'MacIO' IDE"
457 depends on (ISA || PCI) && EXPERIMENTAL 467 depends on PPC_PMAC
458 help
459 This option enables support for ISA/VLB/PCI bus legacy PATA
460 ports and allows them to be accessed via the new ATA layer.
461
462 If unsure, say N.
463
464config PATA_TRIFLEX
465 tristate "Compaq Triflex PATA support"
466 depends on PCI
467 help 468 help
468 Enable support for the Compaq 'Triflex' IDE controller as found 469 Most IDE capable PowerMacs have IDE busses driven by a variant
469 on many Compaq Pentium-Pro systems, via the new ATA layer. 470 of this controller which is part of the Apple chipset used on
470 471 most PowerMac models. Some models have multiple busses using
471 If unsure, say N. 472 different chipsets, though generally, MacIO is one of them.
472 473
473config PATA_MARVELL 474config PATA_MARVELL
474 tristate "Marvell PATA support via legacy mode" 475 tristate "Marvell PATA support via legacy mode"
@@ -481,32 +482,6 @@ config PATA_MARVELL
481 482
482 If unsure, say N. 483 If unsure, say N.
483 484
484config PATA_MPC52xx
485 tristate "Freescale MPC52xx SoC internal IDE"
486 depends on PPC_MPC52xx && PPC_BESTCOMM
487 select PPC_BESTCOMM_ATA
488 help
489 This option enables support for integrated IDE controller
490 of the Freescale MPC52xx SoC.
491
492 If unsure, say N.
493
494config PATA_MPIIX
495 tristate "Intel PATA MPIIX support"
496 depends on PCI
497 help
498 This option enables support for MPIIX PATA support.
499
500 If unsure, say N.
501
502config PATA_OLDPIIX
503 tristate "Intel PATA old PIIX support"
504 depends on PCI
505 help
506 This option enables support for early PIIX PATA support.
507
508 If unsure, say N.
509
510config PATA_NETCELL 485config PATA_NETCELL
511 tristate "NETCELL Revolution RAID support" 486 tristate "NETCELL Revolution RAID support"
512 depends on PCI 487 depends on PCI
@@ -525,15 +500,6 @@ config PATA_NINJA32
525 500
526 If unsure, say N. 501 If unsure, say N.
527 502
528config PATA_NS87410
529 tristate "Nat Semi NS87410 PATA support"
530 depends on PCI
531 help
532 This option enables support for the National Semiconductor
533 NS87410 PCI-IDE controller.
534
535 If unsure, say N.
536
537config PATA_NS87415 503config PATA_NS87415
538 tristate "Nat Semi NS87415 PATA support" 504 tristate "Nat Semi NS87415 PATA support"
539 depends on PCI 505 depends on PCI
@@ -543,12 +509,11 @@ config PATA_NS87415
543 509
544 If unsure, say N. 510 If unsure, say N.
545 511
546config PATA_OPTI 512config PATA_OLDPIIX
547 tristate "OPTI621/6215 PATA support (Very Experimental)" 513 tristate "Intel PATA old PIIX support"
548 depends on PCI && EXPERIMENTAL 514 depends on PCI
549 help 515 help
550 This option enables full PIO support for the early Opti ATA 516 This option enables support for early PIIX PATA support.
551 controllers found on some old motherboards.
552 517
553 If unsure, say N. 518 If unsure, say N.
554 519
@@ -562,24 +527,6 @@ config PATA_OPTIDMA
562 527
563 If unsure, say N. 528 If unsure, say N.
564 529
565config PATA_PALMLD
566 tristate "Palm LifeDrive PATA support"
567 depends on MACH_PALMLD
568 help
569 This option enables support for Palm LifeDrive's internal ATA
570 port via the new ATA layer.
571
572 If unsure, say N.
573
574config PATA_PCMCIA
575 tristate "PCMCIA PATA support"
576 depends on PCMCIA
577 help
578 This option enables support for PCMCIA ATA interfaces, including
579 compact flash card adapters via the new ATA layer.
580
581 If unsure, say N.
582
583config PATA_PDC2027X 530config PATA_PDC2027X
584 tristate "Promise PATA 2027x support" 531 tristate "Promise PATA 2027x support"
585 depends on PCI 532 depends on PCI
@@ -597,12 +544,6 @@ config PATA_PDC_OLD
597 544
598 If unsure, say N. 545 If unsure, say N.
599 546
600config PATA_QDI
601 tristate "QDI VLB PATA support"
602 depends on ISA
603 help
604 Support for QDI 6500 and 6580 PATA controllers on VESA local bus.
605
606config PATA_RADISYS 547config PATA_RADISYS
607 tristate "RADISYS 82600 PATA support (Experimental)" 548 tristate "RADISYS 82600 PATA support (Experimental)"
608 depends on PCI && EXPERIMENTAL 549 depends on PCI && EXPERIMENTAL
@@ -612,15 +553,6 @@ config PATA_RADISYS
612 553
613 If unsure, say N. 554 If unsure, say N.
614 555
615config PATA_RB532
616 tristate "RouterBoard 532 PATA CompactFlash support"
617 depends on MIKROTIK_RB532
618 help
619 This option enables support for the RouterBoard 532
620 PATA CompactFlash controller.
621
622 If unsure, say N.
623
624config PATA_RDC 556config PATA_RDC
625 tristate "RDC PATA support" 557 tristate "RDC PATA support"
626 depends on PCI 558 depends on PCI
@@ -631,21 +563,30 @@ config PATA_RDC
631 563
632 If unsure, say N. 564 If unsure, say N.
633 565
634config PATA_RZ1000 566config PATA_SC1200
635 tristate "PC Tech RZ1000 PATA support" 567 tristate "SC1200 PATA support"
636 depends on PCI 568 depends on PCI
637 help 569 help
638 This option enables basic support for the PC Tech RZ1000/1 570 This option enables support for the NatSemi/AMD SC1200 SoC
639 PATA controllers via the new ATA layer 571 companion chip used with the Geode processor family.
640 572
641 If unsure, say N. 573 If unsure, say N.
642 574
643config PATA_SC1200 575config PATA_SCC
644 tristate "SC1200 PATA support" 576 tristate "Toshiba's Cell Reference Set IDE support"
577 depends on PCI && PPC_CELLEB
578 help
579 This option enables support for the built-in IDE controller on
580 Toshiba Cell Reference Board.
581
582 If unsure, say N.
583
584config PATA_SCH
585 tristate "Intel SCH PATA support"
645 depends on PCI 586 depends on PCI
646 help 587 help
647 This option enables support for the NatSemi/AMD SC1200 SoC 588 This option enables support for Intel SCH PATA on the Intel
648 companion chip used with the Geode processor family. 589 SCH (US15W, US15L, UL11L) series host controllers.
649 590
650 If unsure, say N. 591 If unsure, say N.
651 592
@@ -683,6 +624,15 @@ config PATA_TOSHIBA
683 624
684 If unsure, say N. 625 If unsure, say N.
685 626
627config PATA_TRIFLEX
628 tristate "Compaq Triflex PATA support"
629 depends on PCI
630 help
631 Enable support for the Compaq 'Triflex' IDE controller as found
632 on many Compaq Pentium-Pro systems, via the new ATA layer.
633
634 If unsure, say N.
635
686config PATA_VIA 636config PATA_VIA
687 tristate "VIA PATA support" 637 tristate "VIA PATA support"
688 depends on PCI 638 depends on PCI
@@ -701,12 +651,99 @@ config PATA_WINBOND
701 651
702 If unsure, say N. 652 If unsure, say N.
703 653
704config PATA_WINBOND_VLB 654endif # ATA_BMDMA
705 tristate "Winbond W83759A VLB PATA support (Experimental)" 655
706 depends on ISA && EXPERIMENTAL 656comment "PIO-only SFF controllers"
657
658config PATA_AT32
659 tristate "Atmel AVR32 PATA support (Experimental)"
660 depends on AVR32 && PLATFORM_AT32AP && EXPERIMENTAL
707 help 661 help
708 Support for the Winbond W83759A controller on Vesa Local Bus 662 This option enables support for the IDE devices on the
709 systems. 663 Atmel AT32AP platform.
664
665 If unsure, say N.
666
667config PATA_AT91
668 tristate "PATA support for AT91SAM9260"
669 depends on ARM && ARCH_AT91
670 help
671 This option enables support for IDE devices on the Atmel AT91SAM9260 SoC.
672
673 If unsure, say N.
674
675config PATA_CMD640_PCI
676 tristate "CMD640 PCI PATA support (Experimental)"
677 depends on PCI && EXPERIMENTAL
678 help
679 This option enables support for the CMD640 PCI IDE
680 interface chip. Only the primary channel is currently
681 supported.
682
683 If unsure, say N.
684
685config PATA_ISAPNP
686 tristate "ISA Plug and Play PATA support"
687 depends on ISAPNP
688 help
689 This option enables support for ISA plug & play ATA
690 controllers such as those found on old soundcards.
691
692 If unsure, say N.
693
694config PATA_IXP4XX_CF
695 tristate "IXP4XX Compact Flash support"
696 depends on ARCH_IXP4XX
697 help
698 This option enables support for a Compact Flash connected on
699 the ixp4xx expansion bus. This driver had been written for
700 Loft/Avila boards in mind but can work with others.
701
702 If unsure, say N.
703
704config PATA_MPIIX
705 tristate "Intel PATA MPIIX support"
706 depends on PCI
707 help
708 This option enables support for MPIIX PATA support.
709
710 If unsure, say N.
711
712config PATA_NS87410
713 tristate "Nat Semi NS87410 PATA support"
714 depends on PCI
715 help
716 This option enables support for the National Semiconductor
717 NS87410 PCI-IDE controller.
718
719 If unsure, say N.
720
721config PATA_OPTI
722 tristate "OPTI621/6215 PATA support (Very Experimental)"
723 depends on PCI && EXPERIMENTAL
724 help
725 This option enables full PIO support for the early Opti ATA
726 controllers found on some old motherboards.
727
728 If unsure, say N.
729
730config PATA_PALMLD
731 tristate "Palm LifeDrive PATA support"
732 depends on MACH_PALMLD
733 help
734 This option enables support for Palm LifeDrive's internal ATA
735 port via the new ATA layer.
736
737 If unsure, say N.
738
739config PATA_PCMCIA
740 tristate "PCMCIA PATA support"
741 depends on PCMCIA
742 help
743 This option enables support for PCMCIA ATA interfaces, including
744 compact flash card adapters via the new ATA layer.
745
746 If unsure, say N.
710 747
711config HAVE_PATA_PLATFORM 748config HAVE_PATA_PLATFORM
712 bool 749 bool
@@ -725,14 +762,6 @@ config PATA_PLATFORM
725 762
726 If unsure, say N. 763 If unsure, say N.
727 764
728config PATA_AT91
729 tristate "PATA support for AT91SAM9260"
730 depends on ARM && ARCH_AT91
731 help
732 This option enables support for IDE devices on the Atmel AT91SAM9260 SoC.
733
734 If unsure, say N.
735
736config PATA_OF_PLATFORM 765config PATA_OF_PLATFORM
737 tristate "OpenFirmware platform device PATA support" 766 tristate "OpenFirmware platform device PATA support"
738 depends on PATA_PLATFORM && PPC_OF 767 depends on PATA_PLATFORM && PPC_OF
@@ -743,69 +772,65 @@ config PATA_OF_PLATFORM
743 772
744 If unsure, say N. 773 If unsure, say N.
745 774
746config PATA_ICSIDE 775config PATA_QDI
747 tristate "Acorn ICS PATA support" 776 tristate "QDI VLB PATA support"
748 depends on ARM && ARCH_ACORN 777 depends on ISA
749 help 778 help
750 On Acorn systems, say Y here if you wish to use the ICS PATA 779 Support for QDI 6500 and 6580 PATA controllers on VESA local bus.
751 interface card. This is not required for ICS partition support.
752 If you are unsure, say N to this.
753 780
754config PATA_IXP4XX_CF 781config PATA_RB532
755 tristate "IXP4XX Compact Flash support" 782 tristate "RouterBoard 532 PATA CompactFlash support"
756 depends on ARCH_IXP4XX 783 depends on MIKROTIK_RB532
757 help 784 help
758 This option enables support for a Compact Flash connected on 785 This option enables support for the RouterBoard 532
759 the ixp4xx expansion bus. This driver had been written for 786 PATA CompactFlash controller.
760 Loft/Avila boards in mind but can work with others.
761 787
762 If unsure, say N. 788 If unsure, say N.
763 789
764config PATA_OCTEON_CF 790config PATA_RZ1000
765 tristate "OCTEON Boot Bus Compact Flash support" 791 tristate "PC Tech RZ1000 PATA support"
766 depends on CPU_CAVIUM_OCTEON 792 depends on PCI
767 help 793 help
768 This option enables a polled compact flash driver for use with 794 This option enables basic support for the PC Tech RZ1000/1
769 compact flash cards attached to the OCTEON boot bus. 795 PATA controllers via the new ATA layer
770 796
771 If unsure, say N. 797 If unsure, say N.
772 798
773config PATA_SCC 799config PATA_WINBOND_VLB
774 tristate "Toshiba's Cell Reference Set IDE support" 800 tristate "Winbond W83759A VLB PATA support (Experimental)"
775 depends on PCI && PPC_CELLEB 801 depends on ISA && EXPERIMENTAL
776 help 802 help
777 This option enables support for the built-in IDE controller on 803 Support for the Winbond W83759A controller on Vesa Local Bus
778 Toshiba Cell Reference Board. 804 systems.
779 805
780 If unsure, say N. 806comment "Generic fallback / legacy drivers"
781 807
782config PATA_SCH 808config PATA_ACPI
783 tristate "Intel SCH PATA support" 809 tristate "ACPI firmware driver for PATA"
784 depends on PCI 810 depends on ATA_ACPI && ATA_BMDMA
785 help 811 help
786 This option enables support for Intel SCH PATA on the Intel 812 This option enables an ACPI method driver which drives
787 SCH (US15W, US15L, UL11L) series host controllers. 813 motherboard PATA controller interfaces through the ACPI
788 814 firmware in the BIOS. This driver can sometimes handle
789 If unsure, say N. 815 otherwise unsupported hardware.
790 816
791config PATA_BF54X 817config ATA_GENERIC
792 tristate "Blackfin 54x ATAPI support" 818 tristate "Generic ATA support"
793 depends on BF542 || BF548 || BF549 819 depends on PCI && ATA_BMDMA
794 help 820 help
795 This option enables support for the built-in ATAPI controller on 821 This option enables support for generic BIOS configured
796 Blackfin 54x family chips. 822 ATA controllers via the new ATA layer
797 823
798 If unsure, say N. 824 If unsure, say N.
799 825
800config PATA_MACIO 826config PATA_LEGACY
801 tristate "Apple PowerMac/PowerBook internal 'MacIO' IDE" 827 tristate "Legacy ISA PATA support (Experimental)"
802 depends on PPC_PMAC 828 depends on (ISA || PCI) && EXPERIMENTAL
803 help 829 help
804 Most IDE capable PowerMacs have IDE busses driven by a variant 830 This option enables support for ISA/VLB/PCI bus legacy PATA
805 of this controller which is part of the Apple chipset used on 831 ports and allows them to be accessed via the new ATA layer.
806 most PowerMac models. Some models have multiple busses using
807 different chipsets, though generally, MacIO is one of them.
808 832
833 If unsure, say N.
809 834
810endif # ATA_SFF 835endif # ATA_SFF
811endif # ATA 836endif # ATA
diff --git a/drivers/ata/Makefile b/drivers/ata/Makefile
index d0a93c4ad3ec..7ef89d73df63 100644
--- a/drivers/ata/Makefile
+++ b/drivers/ata/Makefile
@@ -1,33 +1,39 @@
1 1
2obj-$(CONFIG_ATA) += libata.o 2obj-$(CONFIG_ATA) += libata.o
3 3
4# non-SFF interface
4obj-$(CONFIG_SATA_AHCI) += ahci.o libahci.o 5obj-$(CONFIG_SATA_AHCI) += ahci.o libahci.o
5obj-$(CONFIG_SATA_AHCI_PLATFORM) += ahci_platform.o libahci.o 6obj-$(CONFIG_SATA_AHCI_PLATFORM) += ahci_platform.o libahci.o
6obj-$(CONFIG_SATA_SVW) += sata_svw.o 7obj-$(CONFIG_SATA_FSL) += sata_fsl.o
8obj-$(CONFIG_SATA_INIC162X) += sata_inic162x.o
9obj-$(CONFIG_SATA_SIL24) += sata_sil24.o
10
11# SFF w/ custom DMA
12obj-$(CONFIG_PDC_ADMA) += pdc_adma.o
13obj-$(CONFIG_PATA_MPC52xx) += pata_mpc52xx.o
14obj-$(CONFIG_PATA_OCTEON_CF) += pata_octeon_cf.o
15obj-$(CONFIG_SATA_QSTOR) += sata_qstor.o
16obj-$(CONFIG_SATA_SX4) += sata_sx4.o
17
18# SFF SATA w/ BMDMA
7obj-$(CONFIG_ATA_PIIX) += ata_piix.o 19obj-$(CONFIG_ATA_PIIX) += ata_piix.o
20obj-$(CONFIG_SATA_MV) += sata_mv.o
21obj-$(CONFIG_SATA_NV) += sata_nv.o
8obj-$(CONFIG_SATA_PROMISE) += sata_promise.o 22obj-$(CONFIG_SATA_PROMISE) += sata_promise.o
9obj-$(CONFIG_SATA_QSTOR) += sata_qstor.o
10obj-$(CONFIG_SATA_SIL) += sata_sil.o 23obj-$(CONFIG_SATA_SIL) += sata_sil.o
11obj-$(CONFIG_SATA_SIL24) += sata_sil24.o
12obj-$(CONFIG_SATA_VIA) += sata_via.o
13obj-$(CONFIG_SATA_VITESSE) += sata_vsc.o
14obj-$(CONFIG_SATA_SIS) += sata_sis.o 24obj-$(CONFIG_SATA_SIS) += sata_sis.o
15obj-$(CONFIG_SATA_SX4) += sata_sx4.o 25obj-$(CONFIG_SATA_SVW) += sata_svw.o
16obj-$(CONFIG_SATA_NV) += sata_nv.o
17obj-$(CONFIG_SATA_ULI) += sata_uli.o 26obj-$(CONFIG_SATA_ULI) += sata_uli.o
18obj-$(CONFIG_SATA_MV) += sata_mv.o 27obj-$(CONFIG_SATA_VIA) += sata_via.o
19obj-$(CONFIG_SATA_INIC162X) += sata_inic162x.o 28obj-$(CONFIG_SATA_VITESSE) += sata_vsc.o
20obj-$(CONFIG_PDC_ADMA) += pdc_adma.o
21obj-$(CONFIG_SATA_FSL) += sata_fsl.o
22obj-$(CONFIG_PATA_MACIO) += pata_macio.o
23 29
30# SFF PATA w/ BMDMA
24obj-$(CONFIG_PATA_ALI) += pata_ali.o 31obj-$(CONFIG_PATA_ALI) += pata_ali.o
25obj-$(CONFIG_PATA_AMD) += pata_amd.o 32obj-$(CONFIG_PATA_AMD) += pata_amd.o
26obj-$(CONFIG_PATA_ARTOP) += pata_artop.o 33obj-$(CONFIG_PATA_ARTOP) += pata_artop.o
27obj-$(CONFIG_PATA_ATP867X) += pata_atp867x.o
28obj-$(CONFIG_PATA_AT32) += pata_at32.o
29obj-$(CONFIG_PATA_ATIIXP) += pata_atiixp.o 34obj-$(CONFIG_PATA_ATIIXP) += pata_atiixp.o
30obj-$(CONFIG_PATA_CMD640_PCI) += pata_cmd640.o 35obj-$(CONFIG_PATA_ATP867X) += pata_atp867x.o
36obj-$(CONFIG_PATA_BF54X) += pata_bf54x.o
31obj-$(CONFIG_PATA_CMD64X) += pata_cmd64x.o 37obj-$(CONFIG_PATA_CMD64X) += pata_cmd64x.o
32obj-$(CONFIG_PATA_CS5520) += pata_cs5520.o 38obj-$(CONFIG_PATA_CS5520) += pata_cs5520.o
33obj-$(CONFIG_PATA_CS5530) += pata_cs5530.o 39obj-$(CONFIG_PATA_CS5530) += pata_cs5530.o
@@ -39,47 +45,50 @@ obj-$(CONFIG_PATA_HPT366) += pata_hpt366.o
39obj-$(CONFIG_PATA_HPT37X) += pata_hpt37x.o 45obj-$(CONFIG_PATA_HPT37X) += pata_hpt37x.o
40obj-$(CONFIG_PATA_HPT3X2N) += pata_hpt3x2n.o 46obj-$(CONFIG_PATA_HPT3X2N) += pata_hpt3x2n.o
41obj-$(CONFIG_PATA_HPT3X3) += pata_hpt3x3.o 47obj-$(CONFIG_PATA_HPT3X3) += pata_hpt3x3.o
42obj-$(CONFIG_PATA_ISAPNP) += pata_isapnp.o 48obj-$(CONFIG_PATA_ICSIDE) += pata_icside.o
43obj-$(CONFIG_PATA_IT821X) += pata_it821x.o
44obj-$(CONFIG_PATA_IT8213) += pata_it8213.o 49obj-$(CONFIG_PATA_IT8213) += pata_it8213.o
50obj-$(CONFIG_PATA_IT821X) += pata_it821x.o
45obj-$(CONFIG_PATA_JMICRON) += pata_jmicron.o 51obj-$(CONFIG_PATA_JMICRON) += pata_jmicron.o
52obj-$(CONFIG_PATA_MACIO) += pata_macio.o
53obj-$(CONFIG_PATA_MARVELL) += pata_marvell.o
46obj-$(CONFIG_PATA_NETCELL) += pata_netcell.o 54obj-$(CONFIG_PATA_NETCELL) += pata_netcell.o
47obj-$(CONFIG_PATA_NINJA32) += pata_ninja32.o 55obj-$(CONFIG_PATA_NINJA32) += pata_ninja32.o
48obj-$(CONFIG_PATA_NS87410) += pata_ns87410.o
49obj-$(CONFIG_PATA_NS87415) += pata_ns87415.o 56obj-$(CONFIG_PATA_NS87415) += pata_ns87415.o
50obj-$(CONFIG_PATA_OPTI) += pata_opti.o
51obj-$(CONFIG_PATA_OPTIDMA) += pata_optidma.o
52obj-$(CONFIG_PATA_MPC52xx) += pata_mpc52xx.o
53obj-$(CONFIG_PATA_MARVELL) += pata_marvell.o
54obj-$(CONFIG_PATA_MPIIX) += pata_mpiix.o
55obj-$(CONFIG_PATA_OLDPIIX) += pata_oldpiix.o 57obj-$(CONFIG_PATA_OLDPIIX) += pata_oldpiix.o
56obj-$(CONFIG_PATA_PALMLD) += pata_palmld.o 58obj-$(CONFIG_PATA_OPTIDMA) += pata_optidma.o
57obj-$(CONFIG_PATA_PCMCIA) += pata_pcmcia.o
58obj-$(CONFIG_PATA_PDC2027X) += pata_pdc2027x.o 59obj-$(CONFIG_PATA_PDC2027X) += pata_pdc2027x.o
59obj-$(CONFIG_PATA_PDC_OLD) += pata_pdc202xx_old.o 60obj-$(CONFIG_PATA_PDC_OLD) += pata_pdc202xx_old.o
60obj-$(CONFIG_PATA_QDI) += pata_qdi.o
61obj-$(CONFIG_PATA_RADISYS) += pata_radisys.o 61obj-$(CONFIG_PATA_RADISYS) += pata_radisys.o
62obj-$(CONFIG_PATA_RB532) += pata_rb532_cf.o
63obj-$(CONFIG_PATA_RDC) += pata_rdc.o 62obj-$(CONFIG_PATA_RDC) += pata_rdc.o
64obj-$(CONFIG_PATA_RZ1000) += pata_rz1000.o
65obj-$(CONFIG_PATA_SC1200) += pata_sc1200.o 63obj-$(CONFIG_PATA_SC1200) += pata_sc1200.o
64obj-$(CONFIG_PATA_SCC) += pata_scc.o
65obj-$(CONFIG_PATA_SCH) += pata_sch.o
66obj-$(CONFIG_PATA_SERVERWORKS) += pata_serverworks.o 66obj-$(CONFIG_PATA_SERVERWORKS) += pata_serverworks.o
67obj-$(CONFIG_PATA_SIL680) += pata_sil680.o 67obj-$(CONFIG_PATA_SIL680) += pata_sil680.o
68obj-$(CONFIG_PATA_SIS) += pata_sis.o
68obj-$(CONFIG_PATA_TOSHIBA) += pata_piccolo.o 69obj-$(CONFIG_PATA_TOSHIBA) += pata_piccolo.o
70obj-$(CONFIG_PATA_TRIFLEX) += pata_triflex.o
69obj-$(CONFIG_PATA_VIA) += pata_via.o 71obj-$(CONFIG_PATA_VIA) += pata_via.o
70obj-$(CONFIG_PATA_WINBOND) += pata_sl82c105.o 72obj-$(CONFIG_PATA_WINBOND) += pata_sl82c105.o
71obj-$(CONFIG_PATA_WINBOND_VLB) += pata_winbond.o 73
72obj-$(CONFIG_PATA_SIS) += pata_sis.o 74# SFF PIO only
73obj-$(CONFIG_PATA_TRIFLEX) += pata_triflex.o 75obj-$(CONFIG_PATA_AT32) += pata_at32.o
76obj-$(CONFIG_PATA_AT91) += pata_at91.o
77obj-$(CONFIG_PATA_CMD640_PCI) += pata_cmd640.o
78obj-$(CONFIG_PATA_ISAPNP) += pata_isapnp.o
74obj-$(CONFIG_PATA_IXP4XX_CF) += pata_ixp4xx_cf.o 79obj-$(CONFIG_PATA_IXP4XX_CF) += pata_ixp4xx_cf.o
75obj-$(CONFIG_PATA_SCC) += pata_scc.o 80obj-$(CONFIG_PATA_MPIIX) += pata_mpiix.o
76obj-$(CONFIG_PATA_SCH) += pata_sch.o 81obj-$(CONFIG_PATA_NS87410) += pata_ns87410.o
77obj-$(CONFIG_PATA_BF54X) += pata_bf54x.o 82obj-$(CONFIG_PATA_OPTI) += pata_opti.o
78obj-$(CONFIG_PATA_OCTEON_CF) += pata_octeon_cf.o 83obj-$(CONFIG_PATA_PCMCIA) += pata_pcmcia.o
84obj-$(CONFIG_PATA_PALMLD) += pata_palmld.o
79obj-$(CONFIG_PATA_PLATFORM) += pata_platform.o 85obj-$(CONFIG_PATA_PLATFORM) += pata_platform.o
80obj-$(CONFIG_PATA_AT91) += pata_at91.o
81obj-$(CONFIG_PATA_OF_PLATFORM) += pata_of_platform.o 86obj-$(CONFIG_PATA_OF_PLATFORM) += pata_of_platform.o
82obj-$(CONFIG_PATA_ICSIDE) += pata_icside.o 87obj-$(CONFIG_PATA_QDI) += pata_qdi.o
88obj-$(CONFIG_PATA_RB532) += pata_rb532_cf.o
89obj-$(CONFIG_PATA_RZ1000) += pata_rz1000.o
90obj-$(CONFIG_PATA_WINBOND_VLB) += pata_winbond.o
91
83# Should be last but two libata driver 92# Should be last but two libata driver
84obj-$(CONFIG_PATA_ACPI) += pata_acpi.o 93obj-$(CONFIG_PATA_ACPI) += pata_acpi.o
85# Should be last but one libata driver 94# Should be last but one libata driver
diff --git a/drivers/ata/ata_generic.c b/drivers/ata/ata_generic.c
index 33fb614f9784..573158a9668d 100644
--- a/drivers/ata/ata_generic.c
+++ b/drivers/ata/ata_generic.c
@@ -155,7 +155,7 @@ static int ata_generic_init_one(struct pci_dev *dev, const struct pci_device_id
155 return rc; 155 return rc;
156 pcim_pin_device(dev); 156 pcim_pin_device(dev);
157 } 157 }
158 return ata_pci_sff_init_one(dev, ppi, &generic_sht, NULL, 0); 158 return ata_pci_bmdma_init_one(dev, ppi, &generic_sht, NULL, 0);
159} 159}
160 160
161static struct pci_device_id ata_generic[] = { 161static struct pci_device_id ata_generic[] = {
diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c
index ec52fc618763..7409f98d2ae6 100644
--- a/drivers/ata/ata_piix.c
+++ b/drivers/ata/ata_piix.c
@@ -1589,7 +1589,7 @@ static int __devinit piix_init_one(struct pci_dev *pdev,
1589 hpriv->map = piix_init_sata_map(pdev, port_info, 1589 hpriv->map = piix_init_sata_map(pdev, port_info,
1590 piix_map_db_table[ent->driver_data]); 1590 piix_map_db_table[ent->driver_data]);
1591 1591
1592 rc = ata_pci_sff_prepare_host(pdev, ppi, &host); 1592 rc = ata_pci_bmdma_prepare_host(pdev, ppi, &host);
1593 if (rc) 1593 if (rc)
1594 return rc; 1594 return rc;
1595 host->private_data = hpriv; 1595 host->private_data = hpriv;
@@ -1626,7 +1626,7 @@ static int __devinit piix_init_one(struct pci_dev *pdev,
1626 host->flags |= ATA_HOST_PARALLEL_SCAN; 1626 host->flags |= ATA_HOST_PARALLEL_SCAN;
1627 1627
1628 pci_set_master(pdev); 1628 pci_set_master(pdev);
1629 return ata_pci_sff_activate_host(host, ata_sff_interrupt, &piix_sht); 1629 return ata_pci_sff_activate_host(host, ata_bmdma_interrupt, &piix_sht);
1630} 1630}
1631 1631
1632static void piix_remove_one(struct pci_dev *pdev) 1632static void piix_remove_one(struct pci_dev *pdev)
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index c47373f01f89..06b7e49e039c 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -160,6 +160,10 @@ int libata_allow_tpm = 0;
160module_param_named(allow_tpm, libata_allow_tpm, int, 0444); 160module_param_named(allow_tpm, libata_allow_tpm, int, 0444);
161MODULE_PARM_DESC(allow_tpm, "Permit the use of TPM commands (0=off [default], 1=on)"); 161MODULE_PARM_DESC(allow_tpm, "Permit the use of TPM commands (0=off [default], 1=on)");
162 162
163static int atapi_an;
164module_param(atapi_an, int, 0444);
165MODULE_PARM_DESC(atapi_an, "Enable ATAPI AN media presence notification (0=0ff [default], 1=on)");
166
163MODULE_AUTHOR("Jeff Garzik"); 167MODULE_AUTHOR("Jeff Garzik");
164MODULE_DESCRIPTION("Library module for ATA devices"); 168MODULE_DESCRIPTION("Library module for ATA devices");
165MODULE_LICENSE("GPL"); 169MODULE_LICENSE("GPL");
@@ -2122,6 +2126,14 @@ retry:
2122 goto err_out; 2126 goto err_out;
2123 } 2127 }
2124 2128
2129 if (dev->horkage & ATA_HORKAGE_DUMP_ID) {
2130 ata_dev_printk(dev, KERN_DEBUG, "dumping IDENTIFY data, "
2131 "class=%d may_fallback=%d tried_spinup=%d\n",
2132 class, may_fallback, tried_spinup);
2133 print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET,
2134 16, 2, id, ATA_ID_WORDS * sizeof(*id), true);
2135 }
2136
2125 /* Falling back doesn't make sense if ID data was read 2137 /* Falling back doesn't make sense if ID data was read
2126 * successfully at least once. 2138 * successfully at least once.
2127 */ 2139 */
@@ -2510,7 +2522,8 @@ int ata_dev_configure(struct ata_device *dev)
2510 * to enable ATAPI AN to discern between PHY status 2522 * to enable ATAPI AN to discern between PHY status
2511 * changed notifications and ATAPI ANs. 2523 * changed notifications and ATAPI ANs.
2512 */ 2524 */
2513 if ((ap->flags & ATA_FLAG_AN) && ata_id_has_atapi_AN(id) && 2525 if (atapi_an &&
2526 (ap->flags & ATA_FLAG_AN) && ata_id_has_atapi_AN(id) &&
2514 (!sata_pmp_attached(ap) || 2527 (!sata_pmp_attached(ap) ||
2515 sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf) == 0)) { 2528 sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf) == 0)) {
2516 unsigned int err_mask; 2529 unsigned int err_mask;
@@ -6372,6 +6385,7 @@ static int __init ata_parse_force_one(char **cur,
6372 { "3.0Gbps", .spd_limit = 2 }, 6385 { "3.0Gbps", .spd_limit = 2 },
6373 { "noncq", .horkage_on = ATA_HORKAGE_NONCQ }, 6386 { "noncq", .horkage_on = ATA_HORKAGE_NONCQ },
6374 { "ncq", .horkage_off = ATA_HORKAGE_NONCQ }, 6387 { "ncq", .horkage_off = ATA_HORKAGE_NONCQ },
6388 { "dump_id", .horkage_on = ATA_HORKAGE_DUMP_ID },
6375 { "pio0", .xfer_mask = 1 << (ATA_SHIFT_PIO + 0) }, 6389 { "pio0", .xfer_mask = 1 << (ATA_SHIFT_PIO + 0) },
6376 { "pio1", .xfer_mask = 1 << (ATA_SHIFT_PIO + 1) }, 6390 { "pio1", .xfer_mask = 1 << (ATA_SHIFT_PIO + 1) },
6377 { "pio2", .xfer_mask = 1 << (ATA_SHIFT_PIO + 2) }, 6391 { "pio2", .xfer_mask = 1 << (ATA_SHIFT_PIO + 2) },
diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c
index 19ddf924944f..efa4a18cfb9d 100644
--- a/drivers/ata/libata-sff.c
+++ b/drivers/ata/libata-sff.c
@@ -63,7 +63,6 @@ const struct ata_port_operations ata_sff_port_ops = {
63 .sff_tf_read = ata_sff_tf_read, 63 .sff_tf_read = ata_sff_tf_read,
64 .sff_exec_command = ata_sff_exec_command, 64 .sff_exec_command = ata_sff_exec_command,
65 .sff_data_xfer = ata_sff_data_xfer, 65 .sff_data_xfer = ata_sff_data_xfer,
66 .sff_irq_clear = ata_sff_irq_clear,
67 .sff_drain_fifo = ata_sff_drain_fifo, 66 .sff_drain_fifo = ata_sff_drain_fifo,
68 67
69 .lost_interrupt = ata_sff_lost_interrupt, 68 .lost_interrupt = ata_sff_lost_interrupt,
@@ -395,33 +394,12 @@ void ata_sff_irq_on(struct ata_port *ap)
395 ata_sff_set_devctl(ap, ap->ctl); 394 ata_sff_set_devctl(ap, ap->ctl);
396 ata_wait_idle(ap); 395 ata_wait_idle(ap);
397 396
398 ap->ops->sff_irq_clear(ap); 397 if (ap->ops->sff_irq_clear)
398 ap->ops->sff_irq_clear(ap);
399} 399}
400EXPORT_SYMBOL_GPL(ata_sff_irq_on); 400EXPORT_SYMBOL_GPL(ata_sff_irq_on);
401 401
402/** 402/**
403 * ata_sff_irq_clear - Clear PCI IDE BMDMA interrupt.
404 * @ap: Port associated with this ATA transaction.
405 *
406 * Clear interrupt and error flags in DMA status register.
407 *
408 * May be used as the irq_clear() entry in ata_port_operations.
409 *
410 * LOCKING:
411 * spin_lock_irqsave(host lock)
412 */
413void ata_sff_irq_clear(struct ata_port *ap)
414{
415 void __iomem *mmio = ap->ioaddr.bmdma_addr;
416
417 if (!mmio)
418 return;
419
420 iowrite8(ioread8(mmio + ATA_DMA_STATUS), mmio + ATA_DMA_STATUS);
421}
422EXPORT_SYMBOL_GPL(ata_sff_irq_clear);
423
424/**
425 * ata_sff_tf_load - send taskfile registers to host controller 403 * ata_sff_tf_load - send taskfile registers to host controller
426 * @ap: Port to which output is sent 404 * @ap: Port to which output is sent
427 * @tf: ATA taskfile register set 405 * @tf: ATA taskfile register set
@@ -820,11 +798,15 @@ static void atapi_send_cdb(struct ata_port *ap, struct ata_queued_cmd *qc)
820 case ATAPI_PROT_NODATA: 798 case ATAPI_PROT_NODATA:
821 ap->hsm_task_state = HSM_ST_LAST; 799 ap->hsm_task_state = HSM_ST_LAST;
822 break; 800 break;
801#ifdef CONFIG_ATA_BMDMA
823 case ATAPI_PROT_DMA: 802 case ATAPI_PROT_DMA:
824 ap->hsm_task_state = HSM_ST_LAST; 803 ap->hsm_task_state = HSM_ST_LAST;
825 /* initiate bmdma */ 804 /* initiate bmdma */
826 ap->ops->bmdma_start(qc); 805 ap->ops->bmdma_start(qc);
827 break; 806 break;
807#endif /* CONFIG_ATA_BMDMA */
808 default:
809 BUG();
828 } 810 }
829} 811}
830 812
@@ -1491,27 +1473,27 @@ bool ata_sff_qc_fill_rtf(struct ata_queued_cmd *qc)
1491} 1473}
1492EXPORT_SYMBOL_GPL(ata_sff_qc_fill_rtf); 1474EXPORT_SYMBOL_GPL(ata_sff_qc_fill_rtf);
1493 1475
1494/** 1476static unsigned int ata_sff_idle_irq(struct ata_port *ap)
1495 * ata_sff_host_intr - Handle host interrupt for given (port, task)
1496 * @ap: Port on which interrupt arrived (possibly...)
1497 * @qc: Taskfile currently active in engine
1498 *
1499 * Handle host interrupt for given queued command. Currently,
1500 * only DMA interrupts are handled. All other commands are
1501 * handled via polling with interrupts disabled (nIEN bit).
1502 *
1503 * LOCKING:
1504 * spin_lock_irqsave(host lock)
1505 *
1506 * RETURNS:
1507 * One if interrupt was handled, zero if not (shared irq).
1508 */
1509unsigned int ata_sff_host_intr(struct ata_port *ap,
1510 struct ata_queued_cmd *qc)
1511{ 1477{
1512 struct ata_eh_info *ehi = &ap->link.eh_info; 1478 ap->stats.idle_irq++;
1513 u8 status, host_stat = 0; 1479
1514 bool bmdma_stopped = false; 1480#ifdef ATA_IRQ_TRAP
1481 if ((ap->stats.idle_irq % 1000) == 0) {
1482 ap->ops->sff_check_status(ap);
1483 if (ap->ops->sff_irq_clear)
1484 ap->ops->sff_irq_clear(ap);
1485 ata_port_printk(ap, KERN_WARNING, "irq trap\n");
1486 return 1;
1487 }
1488#endif
1489 return 0; /* irq not handled */
1490}
1491
1492static unsigned int __ata_sff_port_intr(struct ata_port *ap,
1493 struct ata_queued_cmd *qc,
1494 bool hsmv_on_idle)
1495{
1496 u8 status;
1515 1497
1516 VPRINTK("ata%u: protocol %d task_state %d\n", 1498 VPRINTK("ata%u: protocol %d task_state %d\n",
1517 ap->print_id, qc->tf.protocol, ap->hsm_task_state); 1499 ap->print_id, qc->tf.protocol, ap->hsm_task_state);
@@ -1528,90 +1510,56 @@ unsigned int ata_sff_host_intr(struct ata_port *ap,
1528 * need to check ata_is_atapi(qc->tf.protocol) again. 1510 * need to check ata_is_atapi(qc->tf.protocol) again.
1529 */ 1511 */
1530 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) 1512 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
1531 goto idle_irq; 1513 return ata_sff_idle_irq(ap);
1532 break;
1533 case HSM_ST_LAST:
1534 if (qc->tf.protocol == ATA_PROT_DMA ||
1535 qc->tf.protocol == ATAPI_PROT_DMA) {
1536 /* check status of DMA engine */
1537 host_stat = ap->ops->bmdma_status(ap);
1538 VPRINTK("ata%u: host_stat 0x%X\n",
1539 ap->print_id, host_stat);
1540
1541 /* if it's not our irq... */
1542 if (!(host_stat & ATA_DMA_INTR))
1543 goto idle_irq;
1544
1545 /* before we do anything else, clear DMA-Start bit */
1546 ap->ops->bmdma_stop(qc);
1547 bmdma_stopped = true;
1548
1549 if (unlikely(host_stat & ATA_DMA_ERR)) {
1550 /* error when transfering data to/from memory */
1551 qc->err_mask |= AC_ERR_HOST_BUS;
1552 ap->hsm_task_state = HSM_ST_ERR;
1553 }
1554 }
1555 break; 1514 break;
1556 case HSM_ST: 1515 case HSM_ST:
1516 case HSM_ST_LAST:
1557 break; 1517 break;
1558 default: 1518 default:
1559 goto idle_irq; 1519 return ata_sff_idle_irq(ap);
1560 } 1520 }
1561 1521
1562
1563 /* check main status, clearing INTRQ if needed */ 1522 /* check main status, clearing INTRQ if needed */
1564 status = ata_sff_irq_status(ap); 1523 status = ata_sff_irq_status(ap);
1565 if (status & ATA_BUSY) { 1524 if (status & ATA_BUSY) {
1566 if (bmdma_stopped) { 1525 if (hsmv_on_idle) {
1567 /* BMDMA engine is already stopped, we're screwed */ 1526 /* BMDMA engine is already stopped, we're screwed */
1568 qc->err_mask |= AC_ERR_HSM; 1527 qc->err_mask |= AC_ERR_HSM;
1569 ap->hsm_task_state = HSM_ST_ERR; 1528 ap->hsm_task_state = HSM_ST_ERR;
1570 } else 1529 } else
1571 goto idle_irq; 1530 return ata_sff_idle_irq(ap);
1572 } 1531 }
1573 1532
1574 /* clear irq events */ 1533 /* clear irq events */
1575 ap->ops->sff_irq_clear(ap); 1534 if (ap->ops->sff_irq_clear)
1535 ap->ops->sff_irq_clear(ap);
1576 1536
1577 ata_sff_hsm_move(ap, qc, status, 0); 1537 ata_sff_hsm_move(ap, qc, status, 0);
1578 1538
1579 if (unlikely(qc->err_mask) && (qc->tf.protocol == ATA_PROT_DMA ||
1580 qc->tf.protocol == ATAPI_PROT_DMA))
1581 ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat);
1582
1583 return 1; /* irq handled */ 1539 return 1; /* irq handled */
1584
1585idle_irq:
1586 ap->stats.idle_irq++;
1587
1588#ifdef ATA_IRQ_TRAP
1589 if ((ap->stats.idle_irq % 1000) == 0) {
1590 ap->ops->sff_check_status(ap);
1591 ap->ops->sff_irq_clear(ap);
1592 ata_port_printk(ap, KERN_WARNING, "irq trap\n");
1593 return 1;
1594 }
1595#endif
1596 return 0; /* irq not handled */
1597} 1540}
1598EXPORT_SYMBOL_GPL(ata_sff_host_intr);
1599 1541
1600/** 1542/**
1601 * ata_sff_interrupt - Default ATA host interrupt handler 1543 * ata_sff_port_intr - Handle SFF port interrupt
1602 * @irq: irq line (unused) 1544 * @ap: Port on which interrupt arrived (possibly...)
1603 * @dev_instance: pointer to our ata_host information structure 1545 * @qc: Taskfile currently active in engine
1604 * 1546 *
1605 * Default interrupt handler for PCI IDE devices. Calls 1547 * Handle port interrupt for given queued command.
1606 * ata_sff_host_intr() for each port that is not disabled.
1607 * 1548 *
1608 * LOCKING: 1549 * LOCKING:
1609 * Obtains host lock during operation. 1550 * spin_lock_irqsave(host lock)
1610 * 1551 *
1611 * RETURNS: 1552 * RETURNS:
1612 * IRQ_NONE or IRQ_HANDLED. 1553 * One if interrupt was handled, zero if not (shared irq).
1613 */ 1554 */
1614irqreturn_t ata_sff_interrupt(int irq, void *dev_instance) 1555unsigned int ata_sff_port_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
1556{
1557 return __ata_sff_port_intr(ap, qc, false);
1558}
1559EXPORT_SYMBOL_GPL(ata_sff_port_intr);
1560
1561static inline irqreturn_t __ata_sff_interrupt(int irq, void *dev_instance,
1562 unsigned int (*port_intr)(struct ata_port *, struct ata_queued_cmd *))
1615{ 1563{
1616 struct ata_host *host = dev_instance; 1564 struct ata_host *host = dev_instance;
1617 bool retried = false; 1565 bool retried = false;
@@ -1631,7 +1579,7 @@ retry:
1631 qc = ata_qc_from_tag(ap, ap->link.active_tag); 1579 qc = ata_qc_from_tag(ap, ap->link.active_tag);
1632 if (qc) { 1580 if (qc) {
1633 if (!(qc->tf.flags & ATA_TFLAG_POLLING)) 1581 if (!(qc->tf.flags & ATA_TFLAG_POLLING))
1634 handled |= ata_sff_host_intr(ap, qc); 1582 handled |= port_intr(ap, qc);
1635 else 1583 else
1636 polling |= 1 << i; 1584 polling |= 1 << i;
1637 } else 1585 } else
@@ -1658,7 +1606,8 @@ retry:
1658 1606
1659 if (idle & (1 << i)) { 1607 if (idle & (1 << i)) {
1660 ap->ops->sff_check_status(ap); 1608 ap->ops->sff_check_status(ap);
1661 ap->ops->sff_irq_clear(ap); 1609 if (ap->ops->sff_irq_clear)
1610 ap->ops->sff_irq_clear(ap);
1662 } else { 1611 } else {
1663 /* clear INTRQ and check if BUSY cleared */ 1612 /* clear INTRQ and check if BUSY cleared */
1664 if (!(ap->ops->sff_check_status(ap) & ATA_BUSY)) 1613 if (!(ap->ops->sff_check_status(ap) & ATA_BUSY))
@@ -1680,6 +1629,25 @@ retry:
1680 1629
1681 return IRQ_RETVAL(handled); 1630 return IRQ_RETVAL(handled);
1682} 1631}
1632
1633/**
1634 * ata_sff_interrupt - Default SFF ATA host interrupt handler
1635 * @irq: irq line (unused)
1636 * @dev_instance: pointer to our ata_host information structure
1637 *
1638 * Default interrupt handler for PCI IDE devices. Calls
1639 * ata_sff_port_intr() for each port that is not disabled.
1640 *
1641 * LOCKING:
1642 * Obtains host lock during operation.
1643 *
1644 * RETURNS:
1645 * IRQ_NONE or IRQ_HANDLED.
1646 */
1647irqreturn_t ata_sff_interrupt(int irq, void *dev_instance)
1648{
1649 return __ata_sff_interrupt(irq, dev_instance, ata_sff_port_intr);
1650}
1683EXPORT_SYMBOL_GPL(ata_sff_interrupt); 1651EXPORT_SYMBOL_GPL(ata_sff_interrupt);
1684 1652
1685/** 1653/**
@@ -1717,7 +1685,7 @@ void ata_sff_lost_interrupt(struct ata_port *ap)
1717 status); 1685 status);
1718 /* Run the host interrupt logic as if the interrupt had not been 1686 /* Run the host interrupt logic as if the interrupt had not been
1719 lost */ 1687 lost */
1720 ata_sff_host_intr(ap, qc); 1688 ata_sff_port_intr(ap, qc);
1721} 1689}
1722EXPORT_SYMBOL_GPL(ata_sff_lost_interrupt); 1690EXPORT_SYMBOL_GPL(ata_sff_lost_interrupt);
1723 1691
@@ -1744,7 +1712,8 @@ void ata_sff_freeze(struct ata_port *ap)
1744 */ 1712 */
1745 ap->ops->sff_check_status(ap); 1713 ap->ops->sff_check_status(ap);
1746 1714
1747 ap->ops->sff_irq_clear(ap); 1715 if (ap->ops->sff_irq_clear)
1716 ap->ops->sff_irq_clear(ap);
1748} 1717}
1749EXPORT_SYMBOL_GPL(ata_sff_freeze); 1718EXPORT_SYMBOL_GPL(ata_sff_freeze);
1750 1719
@@ -1761,7 +1730,8 @@ void ata_sff_thaw(struct ata_port *ap)
1761{ 1730{
1762 /* clear & re-enable interrupts */ 1731 /* clear & re-enable interrupts */
1763 ap->ops->sff_check_status(ap); 1732 ap->ops->sff_check_status(ap);
1764 ap->ops->sff_irq_clear(ap); 1733 if (ap->ops->sff_irq_clear)
1734 ap->ops->sff_irq_clear(ap);
1765 ata_sff_irq_on(ap); 1735 ata_sff_irq_on(ap);
1766} 1736}
1767EXPORT_SYMBOL_GPL(ata_sff_thaw); 1737EXPORT_SYMBOL_GPL(ata_sff_thaw);
@@ -2349,13 +2319,13 @@ int ata_pci_sff_init_host(struct ata_host *host)
2349EXPORT_SYMBOL_GPL(ata_pci_sff_init_host); 2319EXPORT_SYMBOL_GPL(ata_pci_sff_init_host);
2350 2320
2351/** 2321/**
2352 * ata_pci_sff_prepare_host - helper to prepare native PCI ATA host 2322 * ata_pci_sff_prepare_host - helper to prepare PCI PIO-only SFF ATA host
2353 * @pdev: target PCI device 2323 * @pdev: target PCI device
2354 * @ppi: array of port_info, must be enough for two ports 2324 * @ppi: array of port_info, must be enough for two ports
2355 * @r_host: out argument for the initialized ATA host 2325 * @r_host: out argument for the initialized ATA host
2356 * 2326 *
2357 * Helper to allocate ATA host for @pdev, acquire all native PCI 2327 * Helper to allocate PIO-only SFF ATA host for @pdev, acquire
2358 * resources and initialize it accordingly in one go. 2328 * all PCI resources and initialize it accordingly in one go.
2359 * 2329 *
2360 * LOCKING: 2330 * LOCKING:
2361 * Inherited from calling layer (may sleep). 2331 * Inherited from calling layer (may sleep).
@@ -2385,9 +2355,6 @@ int ata_pci_sff_prepare_host(struct pci_dev *pdev,
2385 if (rc) 2355 if (rc)
2386 goto err_out; 2356 goto err_out;
2387 2357
2388 /* init DMA related stuff */
2389 ata_pci_bmdma_init(host);
2390
2391 devres_remove_group(&pdev->dev, NULL); 2358 devres_remove_group(&pdev->dev, NULL);
2392 *r_host = host; 2359 *r_host = host;
2393 return 0; 2360 return 0;
@@ -2492,8 +2459,21 @@ out:
2492} 2459}
2493EXPORT_SYMBOL_GPL(ata_pci_sff_activate_host); 2460EXPORT_SYMBOL_GPL(ata_pci_sff_activate_host);
2494 2461
2462static const struct ata_port_info *ata_sff_find_valid_pi(
2463 const struct ata_port_info * const *ppi)
2464{
2465 int i;
2466
2467 /* look up the first valid port_info */
2468 for (i = 0; i < 2 && ppi[i]; i++)
2469 if (ppi[i]->port_ops != &ata_dummy_port_ops)
2470 return ppi[i];
2471
2472 return NULL;
2473}
2474
2495/** 2475/**
2496 * ata_pci_sff_init_one - Initialize/register PCI IDE host controller 2476 * ata_pci_sff_init_one - Initialize/register PIO-only PCI IDE controller
2497 * @pdev: Controller to be initialized 2477 * @pdev: Controller to be initialized
2498 * @ppi: array of port_info, must be enough for two ports 2478 * @ppi: array of port_info, must be enough for two ports
2499 * @sht: scsi_host_template to use when registering the host 2479 * @sht: scsi_host_template to use when registering the host
@@ -2502,11 +2482,7 @@ EXPORT_SYMBOL_GPL(ata_pci_sff_activate_host);
2502 * 2482 *
2503 * This is a helper function which can be called from a driver's 2483 * This is a helper function which can be called from a driver's
2504 * xxx_init_one() probe function if the hardware uses traditional 2484 * xxx_init_one() probe function if the hardware uses traditional
2505 * IDE taskfile registers. 2485 * IDE taskfile registers and is PIO only.
2506 *
2507 * This function calls pci_enable_device(), reserves its register
2508 * regions, sets the dma mask, enables bus master mode, and calls
2509 * ata_device_add()
2510 * 2486 *
2511 * ASSUMPTION: 2487 * ASSUMPTION:
2512 * Nobody makes a single channel controller that appears solely as 2488 * Nobody makes a single channel controller that appears solely as
@@ -2523,20 +2499,13 @@ int ata_pci_sff_init_one(struct pci_dev *pdev,
2523 struct scsi_host_template *sht, void *host_priv, int hflag) 2499 struct scsi_host_template *sht, void *host_priv, int hflag)
2524{ 2500{
2525 struct device *dev = &pdev->dev; 2501 struct device *dev = &pdev->dev;
2526 const struct ata_port_info *pi = NULL; 2502 const struct ata_port_info *pi;
2527 struct ata_host *host = NULL; 2503 struct ata_host *host = NULL;
2528 int i, rc; 2504 int rc;
2529 2505
2530 DPRINTK("ENTER\n"); 2506 DPRINTK("ENTER\n");
2531 2507
2532 /* look up the first valid port_info */ 2508 pi = ata_sff_find_valid_pi(ppi);
2533 for (i = 0; i < 2 && ppi[i]; i++) {
2534 if (ppi[i]->port_ops != &ata_dummy_port_ops) {
2535 pi = ppi[i];
2536 break;
2537 }
2538 }
2539
2540 if (!pi) { 2509 if (!pi) {
2541 dev_printk(KERN_ERR, &pdev->dev, 2510 dev_printk(KERN_ERR, &pdev->dev,
2542 "no valid port_info specified\n"); 2511 "no valid port_info specified\n");
@@ -2557,7 +2526,6 @@ int ata_pci_sff_init_one(struct pci_dev *pdev,
2557 host->private_data = host_priv; 2526 host->private_data = host_priv;
2558 host->flags |= hflag; 2527 host->flags |= hflag;
2559 2528
2560 pci_set_master(pdev);
2561 rc = ata_pci_sff_activate_host(host, ata_sff_interrupt, sht); 2529 rc = ata_pci_sff_activate_host(host, ata_sff_interrupt, sht);
2562out: 2530out:
2563 if (rc == 0) 2531 if (rc == 0)
@@ -2571,6 +2539,12 @@ EXPORT_SYMBOL_GPL(ata_pci_sff_init_one);
2571 2539
2572#endif /* CONFIG_PCI */ 2540#endif /* CONFIG_PCI */
2573 2541
2542/*
2543 * BMDMA support
2544 */
2545
2546#ifdef CONFIG_ATA_BMDMA
2547
2574const struct ata_port_operations ata_bmdma_port_ops = { 2548const struct ata_port_operations ata_bmdma_port_ops = {
2575 .inherits = &ata_sff_port_ops, 2549 .inherits = &ata_sff_port_ops,
2576 2550
@@ -2580,6 +2554,7 @@ const struct ata_port_operations ata_bmdma_port_ops = {
2580 .qc_prep = ata_bmdma_qc_prep, 2554 .qc_prep = ata_bmdma_qc_prep,
2581 .qc_issue = ata_bmdma_qc_issue, 2555 .qc_issue = ata_bmdma_qc_issue,
2582 2556
2557 .sff_irq_clear = ata_bmdma_irq_clear,
2583 .bmdma_setup = ata_bmdma_setup, 2558 .bmdma_setup = ata_bmdma_setup,
2584 .bmdma_start = ata_bmdma_start, 2559 .bmdma_start = ata_bmdma_start,
2585 .bmdma_stop = ata_bmdma_stop, 2560 .bmdma_stop = ata_bmdma_stop,
@@ -2804,6 +2779,75 @@ unsigned int ata_bmdma_qc_issue(struct ata_queued_cmd *qc)
2804EXPORT_SYMBOL_GPL(ata_bmdma_qc_issue); 2779EXPORT_SYMBOL_GPL(ata_bmdma_qc_issue);
2805 2780
2806/** 2781/**
2782 * ata_bmdma_port_intr - Handle BMDMA port interrupt
2783 * @ap: Port on which interrupt arrived (possibly...)
2784 * @qc: Taskfile currently active in engine
2785 *
2786 * Handle port interrupt for given queued command.
2787 *
2788 * LOCKING:
2789 * spin_lock_irqsave(host lock)
2790 *
2791 * RETURNS:
2792 * One if interrupt was handled, zero if not (shared irq).
2793 */
2794unsigned int ata_bmdma_port_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
2795{
2796 struct ata_eh_info *ehi = &ap->link.eh_info;
2797 u8 host_stat = 0;
2798 bool bmdma_stopped = false;
2799 unsigned int handled;
2800
2801 if (ap->hsm_task_state == HSM_ST_LAST && ata_is_dma(qc->tf.protocol)) {
2802 /* check status of DMA engine */
2803 host_stat = ap->ops->bmdma_status(ap);
2804 VPRINTK("ata%u: host_stat 0x%X\n", ap->print_id, host_stat);
2805
2806 /* if it's not our irq... */
2807 if (!(host_stat & ATA_DMA_INTR))
2808 return ata_sff_idle_irq(ap);
2809
2810 /* before we do anything else, clear DMA-Start bit */
2811 ap->ops->bmdma_stop(qc);
2812 bmdma_stopped = true;
2813
2814 if (unlikely(host_stat & ATA_DMA_ERR)) {
2815 /* error when transfering data to/from memory */
2816 qc->err_mask |= AC_ERR_HOST_BUS;
2817 ap->hsm_task_state = HSM_ST_ERR;
2818 }
2819 }
2820
2821 handled = __ata_sff_port_intr(ap, qc, bmdma_stopped);
2822
2823 if (unlikely(qc->err_mask) && ata_is_dma(qc->tf.protocol))
2824 ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat);
2825
2826 return handled;
2827}
2828EXPORT_SYMBOL_GPL(ata_bmdma_port_intr);
2829
2830/**
2831 * ata_bmdma_interrupt - Default BMDMA ATA host interrupt handler
2832 * @irq: irq line (unused)
2833 * @dev_instance: pointer to our ata_host information structure
2834 *
2835 * Default interrupt handler for PCI IDE devices. Calls
2836 * ata_bmdma_port_intr() for each port that is not disabled.
2837 *
2838 * LOCKING:
2839 * Obtains host lock during operation.
2840 *
2841 * RETURNS:
2842 * IRQ_NONE or IRQ_HANDLED.
2843 */
2844irqreturn_t ata_bmdma_interrupt(int irq, void *dev_instance)
2845{
2846 return __ata_sff_interrupt(irq, dev_instance, ata_bmdma_port_intr);
2847}
2848EXPORT_SYMBOL_GPL(ata_bmdma_interrupt);
2849
2850/**
2807 * ata_bmdma_error_handler - Stock error handler for BMDMA controller 2851 * ata_bmdma_error_handler - Stock error handler for BMDMA controller
2808 * @ap: port to handle error for 2852 * @ap: port to handle error for
2809 * 2853 *
@@ -2848,7 +2892,8 @@ void ata_bmdma_error_handler(struct ata_port *ap)
2848 /* if we're gonna thaw, make sure IRQ is clear */ 2892 /* if we're gonna thaw, make sure IRQ is clear */
2849 if (thaw) { 2893 if (thaw) {
2850 ap->ops->sff_check_status(ap); 2894 ap->ops->sff_check_status(ap);
2851 ap->ops->sff_irq_clear(ap); 2895 if (ap->ops->sff_irq_clear)
2896 ap->ops->sff_irq_clear(ap);
2852 } 2897 }
2853 } 2898 }
2854 2899
@@ -2882,6 +2927,28 @@ void ata_bmdma_post_internal_cmd(struct ata_queued_cmd *qc)
2882EXPORT_SYMBOL_GPL(ata_bmdma_post_internal_cmd); 2927EXPORT_SYMBOL_GPL(ata_bmdma_post_internal_cmd);
2883 2928
2884/** 2929/**
2930 * ata_bmdma_irq_clear - Clear PCI IDE BMDMA interrupt.
2931 * @ap: Port associated with this ATA transaction.
2932 *
2933 * Clear interrupt and error flags in DMA status register.
2934 *
2935 * May be used as the irq_clear() entry in ata_port_operations.
2936 *
2937 * LOCKING:
2938 * spin_lock_irqsave(host lock)
2939 */
2940void ata_bmdma_irq_clear(struct ata_port *ap)
2941{
2942 void __iomem *mmio = ap->ioaddr.bmdma_addr;
2943
2944 if (!mmio)
2945 return;
2946
2947 iowrite8(ioread8(mmio + ATA_DMA_STATUS), mmio + ATA_DMA_STATUS);
2948}
2949EXPORT_SYMBOL_GPL(ata_bmdma_irq_clear);
2950
2951/**
2885 * ata_bmdma_setup - Set up PCI IDE BMDMA transaction 2952 * ata_bmdma_setup - Set up PCI IDE BMDMA transaction
2886 * @qc: Info associated with this ATA transaction. 2953 * @qc: Info associated with this ATA transaction.
2887 * 2954 *
@@ -3137,7 +3204,100 @@ void ata_pci_bmdma_init(struct ata_host *host)
3137} 3204}
3138EXPORT_SYMBOL_GPL(ata_pci_bmdma_init); 3205EXPORT_SYMBOL_GPL(ata_pci_bmdma_init);
3139 3206
3207/**
3208 * ata_pci_bmdma_prepare_host - helper to prepare PCI BMDMA ATA host
3209 * @pdev: target PCI device
3210 * @ppi: array of port_info, must be enough for two ports
3211 * @r_host: out argument for the initialized ATA host
3212 *
3213 * Helper to allocate BMDMA ATA host for @pdev, acquire all PCI
3214 * resources and initialize it accordingly in one go.
3215 *
3216 * LOCKING:
3217 * Inherited from calling layer (may sleep).
3218 *
3219 * RETURNS:
3220 * 0 on success, -errno otherwise.
3221 */
3222int ata_pci_bmdma_prepare_host(struct pci_dev *pdev,
3223 const struct ata_port_info * const * ppi,
3224 struct ata_host **r_host)
3225{
3226 int rc;
3227
3228 rc = ata_pci_sff_prepare_host(pdev, ppi, r_host);
3229 if (rc)
3230 return rc;
3231
3232 ata_pci_bmdma_init(*r_host);
3233 return 0;
3234}
3235EXPORT_SYMBOL_GPL(ata_pci_bmdma_prepare_host);
3236
3237/**
3238 * ata_pci_bmdma_init_one - Initialize/register BMDMA PCI IDE controller
3239 * @pdev: Controller to be initialized
3240 * @ppi: array of port_info, must be enough for two ports
3241 * @sht: scsi_host_template to use when registering the host
3242 * @host_priv: host private_data
3243 * @hflags: host flags
3244 *
3245 * This function is similar to ata_pci_sff_init_one() but also
3246 * takes care of BMDMA initialization.
3247 *
3248 * LOCKING:
3249 * Inherited from PCI layer (may sleep).
3250 *
3251 * RETURNS:
3252 * Zero on success, negative on errno-based value on error.
3253 */
3254int ata_pci_bmdma_init_one(struct pci_dev *pdev,
3255 const struct ata_port_info * const * ppi,
3256 struct scsi_host_template *sht, void *host_priv,
3257 int hflags)
3258{
3259 struct device *dev = &pdev->dev;
3260 const struct ata_port_info *pi;
3261 struct ata_host *host = NULL;
3262 int rc;
3263
3264 DPRINTK("ENTER\n");
3265
3266 pi = ata_sff_find_valid_pi(ppi);
3267 if (!pi) {
3268 dev_printk(KERN_ERR, &pdev->dev,
3269 "no valid port_info specified\n");
3270 return -EINVAL;
3271 }
3272
3273 if (!devres_open_group(dev, NULL, GFP_KERNEL))
3274 return -ENOMEM;
3275
3276 rc = pcim_enable_device(pdev);
3277 if (rc)
3278 goto out;
3279
3280 /* prepare and activate BMDMA host */
3281 rc = ata_pci_bmdma_prepare_host(pdev, ppi, &host);
3282 if (rc)
3283 goto out;
3284 host->private_data = host_priv;
3285 host->flags |= hflags;
3286
3287 pci_set_master(pdev);
3288 rc = ata_pci_sff_activate_host(host, ata_bmdma_interrupt, sht);
3289 out:
3290 if (rc == 0)
3291 devres_remove_group(&pdev->dev, NULL);
3292 else
3293 devres_release_group(&pdev->dev, NULL);
3294
3295 return rc;
3296}
3297EXPORT_SYMBOL_GPL(ata_pci_bmdma_init_one);
3298
3140#endif /* CONFIG_PCI */ 3299#endif /* CONFIG_PCI */
3300#endif /* CONFIG_ATA_BMDMA */
3141 3301
3142/** 3302/**
3143 * ata_sff_port_init - Initialize SFF/BMDMA ATA port 3303 * ata_sff_port_init - Initialize SFF/BMDMA ATA port
diff --git a/drivers/ata/pata_acpi.c b/drivers/ata/pata_acpi.c
index 066b9f301ed5..c8d47034d5e9 100644
--- a/drivers/ata/pata_acpi.c
+++ b/drivers/ata/pata_acpi.c
@@ -260,7 +260,7 @@ static int pacpi_init_one (struct pci_dev *pdev, const struct pci_device_id *id)
260 return rc; 260 return rc;
261 pcim_pin_device(pdev); 261 pcim_pin_device(pdev);
262 } 262 }
263 return ata_pci_sff_init_one(pdev, ppi, &pacpi_sht, NULL, 0); 263 return ata_pci_bmdma_init_one(pdev, ppi, &pacpi_sht, NULL, 0);
264} 264}
265 265
266static const struct pci_device_id pacpi_pci_tbl[] = { 266static const struct pci_device_id pacpi_pci_tbl[] = {
diff --git a/drivers/ata/pata_ali.c b/drivers/ata/pata_ali.c
index f306e10c748d..794ec6e3275d 100644
--- a/drivers/ata/pata_ali.c
+++ b/drivers/ata/pata_ali.c
@@ -583,7 +583,10 @@ static int ali_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
583 ppi[0] = &info_20_udma; 583 ppi[0] = &info_20_udma;
584 } 584 }
585 585
586 return ata_pci_sff_init_one(pdev, ppi, &ali_sht, NULL, 0); 586 if (!ppi[0]->mwdma_mask && !ppi[0]->udma_mask)
587 return ata_pci_sff_init_one(pdev, ppi, &ali_sht, NULL, 0);
588 else
589 return ata_pci_bmdma_init_one(pdev, ppi, &ali_sht, NULL, 0);
587} 590}
588 591
589#ifdef CONFIG_PM 592#ifdef CONFIG_PM
diff --git a/drivers/ata/pata_amd.c b/drivers/ata/pata_amd.c
index d95eca9c547e..620a07cabe31 100644
--- a/drivers/ata/pata_amd.c
+++ b/drivers/ata/pata_amd.c
@@ -574,7 +574,7 @@ static int amd_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
574 } 574 }
575 575
576 /* And fire it up */ 576 /* And fire it up */
577 return ata_pci_sff_init_one(pdev, ppi, &amd_sht, hpriv, 0); 577 return ata_pci_bmdma_init_one(pdev, ppi, &amd_sht, hpriv, 0);
578} 578}
579 579
580#ifdef CONFIG_PM 580#ifdef CONFIG_PM
diff --git a/drivers/ata/pata_artop.c b/drivers/ata/pata_artop.c
index 4d066d6c30fa..ba43f0f8c880 100644
--- a/drivers/ata/pata_artop.c
+++ b/drivers/ata/pata_artop.c
@@ -421,7 +421,7 @@ static int artop_init_one (struct pci_dev *pdev, const struct pci_device_id *id)
421 421
422 BUG_ON(ppi[0] == NULL); 422 BUG_ON(ppi[0] == NULL);
423 423
424 return ata_pci_sff_init_one(pdev, ppi, &artop_sht, NULL, 0); 424 return ata_pci_bmdma_init_one(pdev, ppi, &artop_sht, NULL, 0);
425} 425}
426 426
427static const struct pci_device_id artop_pci_tbl[] = { 427static const struct pci_device_id artop_pci_tbl[] = {
diff --git a/drivers/ata/pata_atiixp.c b/drivers/ata/pata_atiixp.c
index 44d88b380ddd..43755616dc5a 100644
--- a/drivers/ata/pata_atiixp.c
+++ b/drivers/ata/pata_atiixp.c
@@ -246,8 +246,8 @@ static int atiixp_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
246 if (!pci_test_config_bits(pdev, &atiixp_enable_bits[i])) 246 if (!pci_test_config_bits(pdev, &atiixp_enable_bits[i]))
247 ppi[i] = &ata_dummy_port_info; 247 ppi[i] = &ata_dummy_port_info;
248 248
249 return ata_pci_sff_init_one(pdev, ppi, &atiixp_sht, NULL, 249 return ata_pci_bmdma_init_one(pdev, ppi, &atiixp_sht, NULL,
250 ATA_HOST_PARALLEL_SCAN); 250 ATA_HOST_PARALLEL_SCAN);
251} 251}
252 252
253static const struct pci_device_id atiixp[] = { 253static const struct pci_device_id atiixp[] = {
diff --git a/drivers/ata/pata_atp867x.c b/drivers/ata/pata_atp867x.c
index bb6e0746e07d..95295935dd95 100644
--- a/drivers/ata/pata_atp867x.c
+++ b/drivers/ata/pata_atp867x.c
@@ -525,7 +525,7 @@ static int atp867x_init_one(struct pci_dev *pdev,
525 525
526 pci_set_master(pdev); 526 pci_set_master(pdev);
527 527
528 rc = ata_host_activate(host, pdev->irq, ata_sff_interrupt, 528 rc = ata_host_activate(host, pdev->irq, ata_bmdma_interrupt,
529 IRQF_SHARED, &atp867x_sht); 529 IRQF_SHARED, &atp867x_sht);
530 if (rc) 530 if (rc)
531 dev_printk(KERN_ERR, &pdev->dev, "failed to activate host\n"); 531 dev_printk(KERN_ERR, &pdev->dev, "failed to activate host\n");
diff --git a/drivers/ata/pata_bf54x.c b/drivers/ata/pata_bf54x.c
index 6422cfd13d0d..9cae65de750e 100644
--- a/drivers/ata/pata_bf54x.c
+++ b/drivers/ata/pata_bf54x.c
@@ -1214,7 +1214,7 @@ static unsigned int bfin_data_xfer(struct ata_device *dev, unsigned char *buf,
1214 * bfin_irq_clear - Clear ATAPI interrupt. 1214 * bfin_irq_clear - Clear ATAPI interrupt.
1215 * @ap: Port associated with this ATA transaction. 1215 * @ap: Port associated with this ATA transaction.
1216 * 1216 *
1217 * Note: Original code is ata_sff_irq_clear(). 1217 * Note: Original code is ata_bmdma_irq_clear().
1218 */ 1218 */
1219 1219
1220static void bfin_irq_clear(struct ata_port *ap) 1220static void bfin_irq_clear(struct ata_port *ap)
diff --git a/drivers/ata/pata_cmd64x.c b/drivers/ata/pata_cmd64x.c
index 4c81a71b8877..9f5da1c7454b 100644
--- a/drivers/ata/pata_cmd64x.c
+++ b/drivers/ata/pata_cmd64x.c
@@ -367,7 +367,7 @@ static int cmd64x_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
367 pci_write_config_byte(pdev, UDIDETCR0, 0xF0); 367 pci_write_config_byte(pdev, UDIDETCR0, 0xF0);
368#endif 368#endif
369 369
370 return ata_pci_sff_init_one(pdev, ppi, &cmd64x_sht, NULL, 0); 370 return ata_pci_bmdma_init_one(pdev, ppi, &cmd64x_sht, NULL, 0);
371} 371}
372 372
373#ifdef CONFIG_PM 373#ifdef CONFIG_PM
diff --git a/drivers/ata/pata_cs5520.c b/drivers/ata/pata_cs5520.c
index 17c5f346ff01..030952f1f97c 100644
--- a/drivers/ata/pata_cs5520.c
+++ b/drivers/ata/pata_cs5520.c
@@ -221,7 +221,7 @@ static int __devinit cs5520_init_one(struct pci_dev *pdev, const struct pci_devi
221 continue; 221 continue;
222 222
223 rc = devm_request_irq(&pdev->dev, irq[ap->port_no], 223 rc = devm_request_irq(&pdev->dev, irq[ap->port_no],
224 ata_sff_interrupt, 0, DRV_NAME, host); 224 ata_bmdma_interrupt, 0, DRV_NAME, host);
225 if (rc) 225 if (rc)
226 return rc; 226 return rc;
227 227
diff --git a/drivers/ata/pata_cs5530.c b/drivers/ata/pata_cs5530.c
index e809a4233a81..f792330f0d8e 100644
--- a/drivers/ata/pata_cs5530.c
+++ b/drivers/ata/pata_cs5530.c
@@ -324,7 +324,7 @@ static int cs5530_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
324 ppi[1] = &info_palmax_secondary; 324 ppi[1] = &info_palmax_secondary;
325 325
326 /* Now kick off ATA set up */ 326 /* Now kick off ATA set up */
327 return ata_pci_sff_init_one(pdev, ppi, &cs5530_sht, NULL, 0); 327 return ata_pci_bmdma_init_one(pdev, ppi, &cs5530_sht, NULL, 0);
328} 328}
329 329
330#ifdef CONFIG_PM 330#ifdef CONFIG_PM
diff --git a/drivers/ata/pata_cs5535.c b/drivers/ata/pata_cs5535.c
index a02e6459fdcc..03a93186aa19 100644
--- a/drivers/ata/pata_cs5535.c
+++ b/drivers/ata/pata_cs5535.c
@@ -198,7 +198,7 @@ static int cs5535_init_one(struct pci_dev *dev, const struct pci_device_id *id)
198 rdmsr(ATAC_CH0D1_PIO, timings, dummy); 198 rdmsr(ATAC_CH0D1_PIO, timings, dummy);
199 if (CS5535_BAD_PIO(timings)) 199 if (CS5535_BAD_PIO(timings))
200 wrmsr(ATAC_CH0D1_PIO, 0xF7F4F7F4UL, 0); 200 wrmsr(ATAC_CH0D1_PIO, 0xF7F4F7F4UL, 0);
201 return ata_pci_sff_init_one(dev, ppi, &cs5535_sht, NULL, 0); 201 return ata_pci_bmdma_init_one(dev, ppi, &cs5535_sht, NULL, 0);
202} 202}
203 203
204static const struct pci_device_id cs5535[] = { 204static const struct pci_device_id cs5535[] = {
diff --git a/drivers/ata/pata_cs5536.c b/drivers/ata/pata_cs5536.c
index 914ae3506ff5..21ee23f89e88 100644
--- a/drivers/ata/pata_cs5536.c
+++ b/drivers/ata/pata_cs5536.c
@@ -260,7 +260,7 @@ static int cs5536_init_one(struct pci_dev *dev, const struct pci_device_id *id)
260 return -ENODEV; 260 return -ENODEV;
261 } 261 }
262 262
263 return ata_pci_sff_init_one(dev, ppi, &cs5536_sht, NULL, 0); 263 return ata_pci_bmdma_init_one(dev, ppi, &cs5536_sht, NULL, 0);
264} 264}
265 265
266static const struct pci_device_id cs5536[] = { 266static const struct pci_device_id cs5536[] = {
diff --git a/drivers/ata/pata_cypress.c b/drivers/ata/pata_cypress.c
index 0fcc096b8dac..6d915b063d93 100644
--- a/drivers/ata/pata_cypress.c
+++ b/drivers/ata/pata_cypress.c
@@ -138,7 +138,7 @@ static int cy82c693_init_one(struct pci_dev *pdev, const struct pci_device_id *i
138 if (PCI_FUNC(pdev->devfn) != 1) 138 if (PCI_FUNC(pdev->devfn) != 1)
139 return -ENODEV; 139 return -ENODEV;
140 140
141 return ata_pci_sff_init_one(pdev, ppi, &cy82c693_sht, NULL, 0); 141 return ata_pci_bmdma_init_one(pdev, ppi, &cy82c693_sht, NULL, 0);
142} 142}
143 143
144static const struct pci_device_id cy82c693[] = { 144static const struct pci_device_id cy82c693[] = {
diff --git a/drivers/ata/pata_efar.c b/drivers/ata/pata_efar.c
index 3bac0e079691..a08834758ea2 100644
--- a/drivers/ata/pata_efar.c
+++ b/drivers/ata/pata_efar.c
@@ -277,8 +277,8 @@ static int efar_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
277 dev_printk(KERN_DEBUG, &pdev->dev, 277 dev_printk(KERN_DEBUG, &pdev->dev,
278 "version " DRV_VERSION "\n"); 278 "version " DRV_VERSION "\n");
279 279
280 return ata_pci_sff_init_one(pdev, ppi, &efar_sht, NULL, 280 return ata_pci_bmdma_init_one(pdev, ppi, &efar_sht, NULL,
281 ATA_HOST_PARALLEL_SCAN); 281 ATA_HOST_PARALLEL_SCAN);
282} 282}
283 283
284static const struct pci_device_id efar_pci_tbl[] = { 284static const struct pci_device_id efar_pci_tbl[] = {
diff --git a/drivers/ata/pata_hpt366.c b/drivers/ata/pata_hpt366.c
index 8580eb3cd54d..7688868557b9 100644
--- a/drivers/ata/pata_hpt366.c
+++ b/drivers/ata/pata_hpt366.c
@@ -361,7 +361,7 @@ static int hpt36x_init_one(struct pci_dev *dev, const struct pci_device_id *id)
361 break; 361 break;
362 } 362 }
363 /* Now kick off ATA set up */ 363 /* Now kick off ATA set up */
364 return ata_pci_sff_init_one(dev, ppi, &hpt36x_sht, hpriv, 0); 364 return ata_pci_bmdma_init_one(dev, ppi, &hpt36x_sht, hpriv, 0);
365} 365}
366 366
367#ifdef CONFIG_PM 367#ifdef CONFIG_PM
diff --git a/drivers/ata/pata_hpt37x.c b/drivers/ata/pata_hpt37x.c
index 98b498b6907c..9ae4c0830577 100644
--- a/drivers/ata/pata_hpt37x.c
+++ b/drivers/ata/pata_hpt37x.c
@@ -987,7 +987,7 @@ static int hpt37x_init_one(struct pci_dev *dev, const struct pci_device_id *id)
987 } 987 }
988 988
989 /* Now kick off ATA set up */ 989 /* Now kick off ATA set up */
990 return ata_pci_sff_init_one(dev, ppi, &hpt37x_sht, private_data, 0); 990 return ata_pci_bmdma_init_one(dev, ppi, &hpt37x_sht, private_data, 0);
991} 991}
992 992
993static const struct pci_device_id hpt37x[] = { 993static const struct pci_device_id hpt37x[] = {
diff --git a/drivers/ata/pata_hpt3x2n.c b/drivers/ata/pata_hpt3x2n.c
index 8b95aeba0e74..32f3463216b8 100644
--- a/drivers/ata/pata_hpt3x2n.c
+++ b/drivers/ata/pata_hpt3x2n.c
@@ -548,7 +548,7 @@ static int hpt3x2n_init_one(struct pci_dev *dev, const struct pci_device_id *id)
548 outb(inb(iobase + 0x9c) | 0x04, iobase + 0x9c); 548 outb(inb(iobase + 0x9c) | 0x04, iobase + 0x9c);
549 549
550 /* Now kick off ATA set up */ 550 /* Now kick off ATA set up */
551 return ata_pci_sff_init_one(dev, ppi, &hpt3x2n_sht, hpriv, 0); 551 return ata_pci_bmdma_init_one(dev, ppi, &hpt3x2n_sht, hpriv, 0);
552} 552}
553 553
554static const struct pci_device_id hpt3x2n[] = { 554static const struct pci_device_id hpt3x2n[] = {
diff --git a/drivers/ata/pata_hpt3x3.c b/drivers/ata/pata_hpt3x3.c
index 727a81ce4c9f..b63d5e2d4628 100644
--- a/drivers/ata/pata_hpt3x3.c
+++ b/drivers/ata/pata_hpt3x3.c
@@ -248,7 +248,7 @@ static int hpt3x3_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
248 ata_port_pbar_desc(ap, 4, offset_cmd[i], "cmd"); 248 ata_port_pbar_desc(ap, 4, offset_cmd[i], "cmd");
249 } 249 }
250 pci_set_master(pdev); 250 pci_set_master(pdev);
251 return ata_host_activate(host, pdev->irq, ata_sff_interrupt, 251 return ata_host_activate(host, pdev->irq, ata_bmdma_interrupt,
252 IRQF_SHARED, &hpt3x3_sht); 252 IRQF_SHARED, &hpt3x3_sht);
253} 253}
254 254
diff --git a/drivers/ata/pata_icside.c b/drivers/ata/pata_icside.c
index b56e8f722d20..9f2889fe43b2 100644
--- a/drivers/ata/pata_icside.c
+++ b/drivers/ata/pata_icside.c
@@ -470,7 +470,7 @@ static int __devinit pata_icside_add_ports(struct pata_icside_info *info)
470 pata_icside_setup_ioaddr(ap, info->base, info, info->port[i]); 470 pata_icside_setup_ioaddr(ap, info->base, info, info->port[i]);
471 } 471 }
472 472
473 return ata_host_activate(host, ec->irq, ata_sff_interrupt, 0, 473 return ata_host_activate(host, ec->irq, ata_bmdma_interrupt, 0,
474 &pata_icside_sht); 474 &pata_icside_sht);
475} 475}
476 476
diff --git a/drivers/ata/pata_it8213.c b/drivers/ata/pata_it8213.c
index f971f0de88e6..4d142a2ab8fd 100644
--- a/drivers/ata/pata_it8213.c
+++ b/drivers/ata/pata_it8213.c
@@ -273,7 +273,7 @@ static int it8213_init_one (struct pci_dev *pdev, const struct pci_device_id *en
273 dev_printk(KERN_DEBUG, &pdev->dev, 273 dev_printk(KERN_DEBUG, &pdev->dev,
274 "version " DRV_VERSION "\n"); 274 "version " DRV_VERSION "\n");
275 275
276 return ata_pci_sff_init_one(pdev, ppi, &it8213_sht, NULL, 0); 276 return ata_pci_bmdma_init_one(pdev, ppi, &it8213_sht, NULL, 0);
277} 277}
278 278
279static const struct pci_device_id it8213_pci_tbl[] = { 279static const struct pci_device_id it8213_pci_tbl[] = {
diff --git a/drivers/ata/pata_it821x.c b/drivers/ata/pata_it821x.c
index 2bd2b002d14a..bf88f71a21f4 100644
--- a/drivers/ata/pata_it821x.c
+++ b/drivers/ata/pata_it821x.c
@@ -933,7 +933,7 @@ static int it821x_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
933 else 933 else
934 ppi[0] = &info_smart; 934 ppi[0] = &info_smart;
935 } 935 }
936 return ata_pci_sff_init_one(pdev, ppi, &it821x_sht, NULL, 0); 936 return ata_pci_bmdma_init_one(pdev, ppi, &it821x_sht, NULL, 0);
937} 937}
938 938
939#ifdef CONFIG_PM 939#ifdef CONFIG_PM
diff --git a/drivers/ata/pata_jmicron.c b/drivers/ata/pata_jmicron.c
index 565e01e6ac7c..cb3babbb7035 100644
--- a/drivers/ata/pata_jmicron.c
+++ b/drivers/ata/pata_jmicron.c
@@ -144,7 +144,7 @@ static int jmicron_init_one (struct pci_dev *pdev, const struct pci_device_id *i
144 }; 144 };
145 const struct ata_port_info *ppi[] = { &info, NULL }; 145 const struct ata_port_info *ppi[] = { &info, NULL };
146 146
147 return ata_pci_sff_init_one(pdev, ppi, &jmicron_sht, NULL, 0); 147 return ata_pci_bmdma_init_one(pdev, ppi, &jmicron_sht, NULL, 0);
148} 148}
149 149
150static const struct pci_device_id jmicron_pci_tbl[] = { 150static const struct pci_device_id jmicron_pci_tbl[] = {
diff --git a/drivers/ata/pata_macio.c b/drivers/ata/pata_macio.c
index b5b48e703cb7..76640ac76888 100644
--- a/drivers/ata/pata_macio.c
+++ b/drivers/ata/pata_macio.c
@@ -1110,7 +1110,7 @@ static int __devinit pata_macio_common_init(struct pata_macio_priv *priv,
1110 1110
1111 /* Start it up */ 1111 /* Start it up */
1112 priv->irq = irq; 1112 priv->irq = irq;
1113 return ata_host_activate(priv->host, irq, ata_sff_interrupt, 0, 1113 return ata_host_activate(priv->host, irq, ata_bmdma_interrupt, 0,
1114 &pata_macio_sht); 1114 &pata_macio_sht);
1115} 1115}
1116 1116
diff --git a/drivers/ata/pata_marvell.c b/drivers/ata/pata_marvell.c
index e8ca02e5a71d..dd38083dcbeb 100644
--- a/drivers/ata/pata_marvell.c
+++ b/drivers/ata/pata_marvell.c
@@ -153,7 +153,7 @@ static int marvell_init_one (struct pci_dev *pdev, const struct pci_device_id *i
153 return -ENODEV; 153 return -ENODEV;
154 } 154 }
155#endif 155#endif
156 return ata_pci_sff_init_one(pdev, ppi, &marvell_sht, NULL, 0); 156 return ata_pci_bmdma_init_one(pdev, ppi, &marvell_sht, NULL, 0);
157} 157}
158 158
159static const struct pci_device_id marvell_pci_tbl[] = { 159static const struct pci_device_id marvell_pci_tbl[] = {
diff --git a/drivers/ata/pata_mpc52xx.c b/drivers/ata/pata_mpc52xx.c
index 36afe2c1c747..f087ab55b1df 100644
--- a/drivers/ata/pata_mpc52xx.c
+++ b/drivers/ata/pata_mpc52xx.c
@@ -659,7 +659,7 @@ mpc52xx_ata_init_one(struct device *dev, struct mpc52xx_ata_priv *priv,
659 ata_port_desc(ap, "ata_regs 0x%lx", raw_ata_regs); 659 ata_port_desc(ap, "ata_regs 0x%lx", raw_ata_regs);
660 660
661 /* activate host */ 661 /* activate host */
662 return ata_host_activate(host, priv->ata_irq, ata_sff_interrupt, 0, 662 return ata_host_activate(host, priv->ata_irq, ata_bmdma_interrupt, 0,
663 &mpc52xx_ata_sht); 663 &mpc52xx_ata_sht);
664} 664}
665 665
diff --git a/drivers/ata/pata_netcell.c b/drivers/ata/pata_netcell.c
index 94f979a7f4f7..3eb921c746a1 100644
--- a/drivers/ata/pata_netcell.c
+++ b/drivers/ata/pata_netcell.c
@@ -82,7 +82,7 @@ static int netcell_init_one (struct pci_dev *pdev, const struct pci_device_id *e
82 ata_pci_bmdma_clear_simplex(pdev); 82 ata_pci_bmdma_clear_simplex(pdev);
83 83
84 /* And let the library code do the work */ 84 /* And let the library code do the work */
85 return ata_pci_sff_init_one(pdev, port_info, &netcell_sht, NULL, 0); 85 return ata_pci_bmdma_init_one(pdev, port_info, &netcell_sht, NULL, 0);
86} 86}
87 87
88static const struct pci_device_id netcell_pci_tbl[] = { 88static const struct pci_device_id netcell_pci_tbl[] = {
diff --git a/drivers/ata/pata_ninja32.c b/drivers/ata/pata_ninja32.c
index dd53a66b19e3..cc50bd09aa26 100644
--- a/drivers/ata/pata_ninja32.c
+++ b/drivers/ata/pata_ninja32.c
@@ -149,7 +149,7 @@ static int ninja32_init_one(struct pci_dev *dev, const struct pci_device_id *id)
149 149
150 ninja32_program(base); 150 ninja32_program(base);
151 /* FIXME: Should we disable them at remove ? */ 151 /* FIXME: Should we disable them at remove ? */
152 return ata_host_activate(host, dev->irq, ata_sff_interrupt, 152 return ata_host_activate(host, dev->irq, ata_bmdma_interrupt,
153 IRQF_SHARED, &ninja32_sht); 153 IRQF_SHARED, &ninja32_sht);
154} 154}
155 155
diff --git a/drivers/ata/pata_ns87415.c b/drivers/ata/pata_ns87415.c
index fdbba2d76d3e..605f198f958c 100644
--- a/drivers/ata/pata_ns87415.c
+++ b/drivers/ata/pata_ns87415.c
@@ -380,7 +380,7 @@ static int ns87415_init_one (struct pci_dev *pdev, const struct pci_device_id *e
380 380
381 ns87415_fixup(pdev); 381 ns87415_fixup(pdev);
382 382
383 return ata_pci_sff_init_one(pdev, ppi, &ns87415_sht, NULL, 0); 383 return ata_pci_bmdma_init_one(pdev, ppi, &ns87415_sht, NULL, 0);
384} 384}
385 385
386static const struct pci_device_id ns87415_pci_tbl[] = { 386static const struct pci_device_id ns87415_pci_tbl[] = {
diff --git a/drivers/ata/pata_octeon_cf.c b/drivers/ata/pata_octeon_cf.c
index 3001109352ea..06ddd91ffeda 100644
--- a/drivers/ata/pata_octeon_cf.c
+++ b/drivers/ata/pata_octeon_cf.c
@@ -750,20 +750,6 @@ static void octeon_cf_dev_config(struct ata_device *dev)
750} 750}
751 751
752/* 752/*
753 * Trap if driver tries to do standard bmdma commands. They are not
754 * supported.
755 */
756static void unreachable_qc(struct ata_queued_cmd *qc)
757{
758 BUG();
759}
760
761static u8 unreachable_port(struct ata_port *ap)
762{
763 BUG();
764}
765
766/*
767 * We don't do ATAPI DMA so return 0. 753 * We don't do ATAPI DMA so return 0.
768 */ 754 */
769static int octeon_cf_check_atapi_dma(struct ata_queued_cmd *qc) 755static int octeon_cf_check_atapi_dma(struct ata_queued_cmd *qc)
@@ -804,10 +790,6 @@ static struct ata_port_operations octeon_cf_ops = {
804 .sff_dev_select = octeon_cf_dev_select, 790 .sff_dev_select = octeon_cf_dev_select,
805 .sff_irq_on = octeon_cf_irq_on, 791 .sff_irq_on = octeon_cf_irq_on,
806 .sff_irq_clear = octeon_cf_irq_clear, 792 .sff_irq_clear = octeon_cf_irq_clear,
807 .bmdma_setup = unreachable_qc,
808 .bmdma_start = unreachable_qc,
809 .bmdma_stop = unreachable_qc,
810 .bmdma_status = unreachable_port,
811 .cable_detect = ata_cable_40wire, 793 .cable_detect = ata_cable_40wire,
812 .set_piomode = octeon_cf_set_piomode, 794 .set_piomode = octeon_cf_set_piomode,
813 .set_dmamode = octeon_cf_set_dmamode, 795 .set_dmamode = octeon_cf_set_dmamode,
diff --git a/drivers/ata/pata_oldpiix.c b/drivers/ata/pata_oldpiix.c
index 988ef2627be3..b811c1636204 100644
--- a/drivers/ata/pata_oldpiix.c
+++ b/drivers/ata/pata_oldpiix.c
@@ -248,7 +248,7 @@ static int oldpiix_init_one (struct pci_dev *pdev, const struct pci_device_id *e
248 dev_printk(KERN_DEBUG, &pdev->dev, 248 dev_printk(KERN_DEBUG, &pdev->dev,
249 "version " DRV_VERSION "\n"); 249 "version " DRV_VERSION "\n");
250 250
251 return ata_pci_sff_init_one(pdev, ppi, &oldpiix_sht, NULL, 0); 251 return ata_pci_bmdma_init_one(pdev, ppi, &oldpiix_sht, NULL, 0);
252} 252}
253 253
254static const struct pci_device_id oldpiix_pci_tbl[] = { 254static const struct pci_device_id oldpiix_pci_tbl[] = {
diff --git a/drivers/ata/pata_optidma.c b/drivers/ata/pata_optidma.c
index 76b7d12b1e8d..0852cd07de08 100644
--- a/drivers/ata/pata_optidma.c
+++ b/drivers/ata/pata_optidma.c
@@ -429,7 +429,7 @@ static int optidma_init_one(struct pci_dev *dev, const struct pci_device_id *id)
429 if (optiplus_with_udma(dev)) 429 if (optiplus_with_udma(dev))
430 ppi[0] = &info_82c700_udma; 430 ppi[0] = &info_82c700_udma;
431 431
432 return ata_pci_sff_init_one(dev, ppi, &optidma_sht, NULL, 0); 432 return ata_pci_bmdma_init_one(dev, ppi, &optidma_sht, NULL, 0);
433} 433}
434 434
435static const struct pci_device_id optidma[] = { 435static const struct pci_device_id optidma[] = {
diff --git a/drivers/ata/pata_pdc2027x.c b/drivers/ata/pata_pdc2027x.c
index 09f1f22c0307..b18351122525 100644
--- a/drivers/ata/pata_pdc2027x.c
+++ b/drivers/ata/pata_pdc2027x.c
@@ -754,7 +754,7 @@ static int __devinit pdc2027x_init_one(struct pci_dev *pdev, const struct pci_de
754 return -EIO; 754 return -EIO;
755 755
756 pci_set_master(pdev); 756 pci_set_master(pdev);
757 return ata_host_activate(host, pdev->irq, ata_sff_interrupt, 757 return ata_host_activate(host, pdev->irq, ata_bmdma_interrupt,
758 IRQF_SHARED, &pdc2027x_sht); 758 IRQF_SHARED, &pdc2027x_sht);
759} 759}
760 760
diff --git a/drivers/ata/pata_pdc202xx_old.c b/drivers/ata/pata_pdc202xx_old.c
index fa1e2f3bc0fd..c39f213e1bbc 100644
--- a/drivers/ata/pata_pdc202xx_old.c
+++ b/drivers/ata/pata_pdc202xx_old.c
@@ -337,7 +337,7 @@ static int pdc202xx_init_one(struct pci_dev *dev, const struct pci_device_id *id
337 return -ENODEV; 337 return -ENODEV;
338 } 338 }
339 } 339 }
340 return ata_pci_sff_init_one(dev, ppi, &pdc202xx_sht, NULL, 0); 340 return ata_pci_bmdma_init_one(dev, ppi, &pdc202xx_sht, NULL, 0);
341} 341}
342 342
343static const struct pci_device_id pdc202xx[] = { 343static const struct pci_device_id pdc202xx[] = {
diff --git a/drivers/ata/pata_piccolo.c b/drivers/ata/pata_piccolo.c
index 981615414849..cb01bf9496fe 100644
--- a/drivers/ata/pata_piccolo.c
+++ b/drivers/ata/pata_piccolo.c
@@ -95,7 +95,7 @@ static int ata_tosh_init_one(struct pci_dev *dev, const struct pci_device_id *id
95 }; 95 };
96 const struct ata_port_info *ppi[] = { &info, &ata_dummy_port_info }; 96 const struct ata_port_info *ppi[] = { &info, &ata_dummy_port_info };
97 /* Just one port for the moment */ 97 /* Just one port for the moment */
98 return ata_pci_sff_init_one(dev, ppi, &tosh_sht, NULL, 0); 98 return ata_pci_bmdma_init_one(dev, ppi, &tosh_sht, NULL, 0);
99} 99}
100 100
101static struct pci_device_id ata_tosh[] = { 101static struct pci_device_id ata_tosh[] = {
diff --git a/drivers/ata/pata_radisys.c b/drivers/ata/pata_radisys.c
index a5fa388e5398..8574b31f1773 100644
--- a/drivers/ata/pata_radisys.c
+++ b/drivers/ata/pata_radisys.c
@@ -227,7 +227,7 @@ static int radisys_init_one (struct pci_dev *pdev, const struct pci_device_id *e
227 dev_printk(KERN_DEBUG, &pdev->dev, 227 dev_printk(KERN_DEBUG, &pdev->dev,
228 "version " DRV_VERSION "\n"); 228 "version " DRV_VERSION "\n");
229 229
230 return ata_pci_sff_init_one(pdev, ppi, &radisys_sht, NULL, 0); 230 return ata_pci_bmdma_init_one(pdev, ppi, &radisys_sht, NULL, 0);
231} 231}
232 232
233static const struct pci_device_id radisys_pci_tbl[] = { 233static const struct pci_device_id radisys_pci_tbl[] = {
diff --git a/drivers/ata/pata_rdc.c b/drivers/ata/pata_rdc.c
index 37092cfd7bc6..5fbe9b166c69 100644
--- a/drivers/ata/pata_rdc.c
+++ b/drivers/ata/pata_rdc.c
@@ -344,7 +344,7 @@ static int __devinit rdc_init_one(struct pci_dev *pdev,
344 */ 344 */
345 pci_read_config_dword(pdev, 0x54, &hpriv->saved_iocfg); 345 pci_read_config_dword(pdev, 0x54, &hpriv->saved_iocfg);
346 346
347 rc = ata_pci_sff_prepare_host(pdev, ppi, &host); 347 rc = ata_pci_bmdma_prepare_host(pdev, ppi, &host);
348 if (rc) 348 if (rc)
349 return rc; 349 return rc;
350 host->private_data = hpriv; 350 host->private_data = hpriv;
@@ -354,7 +354,7 @@ static int __devinit rdc_init_one(struct pci_dev *pdev,
354 host->flags |= ATA_HOST_PARALLEL_SCAN; 354 host->flags |= ATA_HOST_PARALLEL_SCAN;
355 355
356 pci_set_master(pdev); 356 pci_set_master(pdev);
357 return ata_pci_sff_activate_host(host, ata_sff_interrupt, &rdc_sht); 357 return ata_pci_sff_activate_host(host, ata_bmdma_interrupt, &rdc_sht);
358} 358}
359 359
360static void rdc_remove_one(struct pci_dev *pdev) 360static void rdc_remove_one(struct pci_dev *pdev)
diff --git a/drivers/ata/pata_sc1200.c b/drivers/ata/pata_sc1200.c
index 6b5b63a2fd8e..e2c18257adff 100644
--- a/drivers/ata/pata_sc1200.c
+++ b/drivers/ata/pata_sc1200.c
@@ -237,7 +237,7 @@ static int sc1200_init_one(struct pci_dev *dev, const struct pci_device_id *id)
237 }; 237 };
238 const struct ata_port_info *ppi[] = { &info, NULL }; 238 const struct ata_port_info *ppi[] = { &info, NULL };
239 239
240 return ata_pci_sff_init_one(dev, ppi, &sc1200_sht, NULL, 0); 240 return ata_pci_bmdma_init_one(dev, ppi, &sc1200_sht, NULL, 0);
241} 241}
242 242
243static const struct pci_device_id sc1200[] = { 243static const struct pci_device_id sc1200[] = {
diff --git a/drivers/ata/pata_scc.c b/drivers/ata/pata_scc.c
index 6f6193b707cb..d9db3f8d60ef 100644
--- a/drivers/ata/pata_scc.c
+++ b/drivers/ata/pata_scc.c
@@ -875,7 +875,7 @@ static void scc_postreset(struct ata_link *link, unsigned int *classes)
875 * scc_irq_clear - Clear PCI IDE BMDMA interrupt. 875 * scc_irq_clear - Clear PCI IDE BMDMA interrupt.
876 * @ap: Port associated with this ATA transaction. 876 * @ap: Port associated with this ATA transaction.
877 * 877 *
878 * Note: Original code is ata_sff_irq_clear(). 878 * Note: Original code is ata_bmdma_irq_clear().
879 */ 879 */
880 880
881static void scc_irq_clear (struct ata_port *ap) 881static void scc_irq_clear (struct ata_port *ap)
@@ -1105,7 +1105,7 @@ static int scc_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
1105 if (rc) 1105 if (rc)
1106 return rc; 1106 return rc;
1107 1107
1108 return ata_host_activate(host, pdev->irq, ata_sff_interrupt, 1108 return ata_host_activate(host, pdev->irq, ata_bmdma_interrupt,
1109 IRQF_SHARED, &scc_sht); 1109 IRQF_SHARED, &scc_sht);
1110} 1110}
1111 1111
diff --git a/drivers/ata/pata_sch.c b/drivers/ata/pata_sch.c
index 86b3d0133c7c..e97b32f03a6e 100644
--- a/drivers/ata/pata_sch.c
+++ b/drivers/ata/pata_sch.c
@@ -179,7 +179,7 @@ static int __devinit sch_init_one(struct pci_dev *pdev,
179 dev_printk(KERN_DEBUG, &pdev->dev, 179 dev_printk(KERN_DEBUG, &pdev->dev,
180 "version " DRV_VERSION "\n"); 180 "version " DRV_VERSION "\n");
181 181
182 return ata_pci_sff_init_one(pdev, ppi, &sch_sht, NULL, 0); 182 return ata_pci_bmdma_init_one(pdev, ppi, &sch_sht, NULL, 0);
183} 183}
184 184
185static int __init sch_init(void) 185static int __init sch_init(void)
diff --git a/drivers/ata/pata_serverworks.c b/drivers/ata/pata_serverworks.c
index 43ea389df2b3..86dd714e3e1d 100644
--- a/drivers/ata/pata_serverworks.c
+++ b/drivers/ata/pata_serverworks.c
@@ -460,7 +460,7 @@ static int serverworks_init_one(struct pci_dev *pdev, const struct pci_device_id
460 if (pdev->device == PCI_DEVICE_ID_SERVERWORKS_CSB5IDE) 460 if (pdev->device == PCI_DEVICE_ID_SERVERWORKS_CSB5IDE)
461 ata_pci_bmdma_clear_simplex(pdev); 461 ata_pci_bmdma_clear_simplex(pdev);
462 462
463 return ata_pci_sff_init_one(pdev, ppi, &serverworks_sht, NULL, 0); 463 return ata_pci_bmdma_init_one(pdev, ppi, &serverworks_sht, NULL, 0);
464} 464}
465 465
466#ifdef CONFIG_PM 466#ifdef CONFIG_PM
diff --git a/drivers/ata/pata_sil680.c b/drivers/ata/pata_sil680.c
index 43faf106f647..d3190d7ec304 100644
--- a/drivers/ata/pata_sil680.c
+++ b/drivers/ata/pata_sil680.c
@@ -374,11 +374,11 @@ static int __devinit sil680_init_one(struct pci_dev *pdev,
374 ata_sff_std_ports(&host->ports[1]->ioaddr); 374 ata_sff_std_ports(&host->ports[1]->ioaddr);
375 375
376 /* Register & activate */ 376 /* Register & activate */
377 return ata_host_activate(host, pdev->irq, ata_sff_interrupt, 377 return ata_host_activate(host, pdev->irq, ata_bmdma_interrupt,
378 IRQF_SHARED, &sil680_sht); 378 IRQF_SHARED, &sil680_sht);
379 379
380use_ioports: 380use_ioports:
381 return ata_pci_sff_init_one(pdev, ppi, &sil680_sht, NULL, 0); 381 return ata_pci_bmdma_init_one(pdev, ppi, &sil680_sht, NULL, 0);
382} 382}
383 383
384#ifdef CONFIG_PM 384#ifdef CONFIG_PM
diff --git a/drivers/ata/pata_sis.c b/drivers/ata/pata_sis.c
index b6708032f321..60cea13cccce 100644
--- a/drivers/ata/pata_sis.c
+++ b/drivers/ata/pata_sis.c
@@ -826,7 +826,7 @@ static int sis_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
826 826
827 sis_fixup(pdev, chipset); 827 sis_fixup(pdev, chipset);
828 828
829 return ata_pci_sff_init_one(pdev, ppi, &sis_sht, chipset, 0); 829 return ata_pci_bmdma_init_one(pdev, ppi, &sis_sht, chipset, 0);
830} 830}
831 831
832#ifdef CONFIG_PM 832#ifdef CONFIG_PM
diff --git a/drivers/ata/pata_sl82c105.c b/drivers/ata/pata_sl82c105.c
index 733b042a7469..98548f640c8e 100644
--- a/drivers/ata/pata_sl82c105.c
+++ b/drivers/ata/pata_sl82c105.c
@@ -316,7 +316,7 @@ static int sl82c105_init_one(struct pci_dev *dev, const struct pci_device_id *id
316 val |= CTRL_P0EN | CTRL_P0F16 | CTRL_P1F16; 316 val |= CTRL_P0EN | CTRL_P0F16 | CTRL_P1F16;
317 pci_write_config_dword(dev, 0x40, val); 317 pci_write_config_dword(dev, 0x40, val);
318 318
319 return ata_pci_sff_init_one(dev, ppi, &sl82c105_sht, NULL, 0); 319 return ata_pci_bmdma_init_one(dev, ppi, &sl82c105_sht, NULL, 0);
320} 320}
321 321
322static const struct pci_device_id sl82c105[] = { 322static const struct pci_device_id sl82c105[] = {
diff --git a/drivers/ata/pata_triflex.c b/drivers/ata/pata_triflex.c
index 48f50600ed2a..0d1f89e571dd 100644
--- a/drivers/ata/pata_triflex.c
+++ b/drivers/ata/pata_triflex.c
@@ -201,7 +201,7 @@ static int triflex_init_one(struct pci_dev *dev, const struct pci_device_id *id)
201 if (!printed_version++) 201 if (!printed_version++)
202 dev_printk(KERN_DEBUG, &dev->dev, "version " DRV_VERSION "\n"); 202 dev_printk(KERN_DEBUG, &dev->dev, "version " DRV_VERSION "\n");
203 203
204 return ata_pci_sff_init_one(dev, ppi, &triflex_sht, NULL, 0); 204 return ata_pci_bmdma_init_one(dev, ppi, &triflex_sht, NULL, 0);
205} 205}
206 206
207static const struct pci_device_id triflex[] = { 207static const struct pci_device_id triflex[] = {
diff --git a/drivers/ata/pata_via.c b/drivers/ata/pata_via.c
index 7e3e0a5598b7..5e659885de16 100644
--- a/drivers/ata/pata_via.c
+++ b/drivers/ata/pata_via.c
@@ -627,7 +627,7 @@ static int via_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
627 } 627 }
628 628
629 /* We have established the device type, now fire it up */ 629 /* We have established the device type, now fire it up */
630 return ata_pci_sff_init_one(pdev, ppi, &via_sht, (void *)config, 0); 630 return ata_pci_bmdma_init_one(pdev, ppi, &via_sht, (void *)config, 0);
631} 631}
632 632
633#ifdef CONFIG_PM 633#ifdef CONFIG_PM
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
index f3471bc949d3..a476cd99b95d 100644
--- a/drivers/ata/sata_mv.c
+++ b/drivers/ata/sata_mv.c
@@ -675,8 +675,6 @@ static struct ata_port_operations mv5_ops = {
675 .freeze = mv_eh_freeze, 675 .freeze = mv_eh_freeze,
676 .thaw = mv_eh_thaw, 676 .thaw = mv_eh_thaw,
677 .hardreset = mv_hardreset, 677 .hardreset = mv_hardreset,
678 .error_handler = ata_std_error_handler, /* avoid SFF EH */
679 .post_internal_cmd = ATA_OP_NULL,
680 678
681 .scr_read = mv5_scr_read, 679 .scr_read = mv5_scr_read,
682 .scr_write = mv5_scr_write, 680 .scr_write = mv5_scr_write,
@@ -2813,7 +2811,7 @@ static void mv_port_intr(struct ata_port *ap, u32 port_cause)
2813 } else if (!edma_was_enabled) { 2811 } else if (!edma_was_enabled) {
2814 struct ata_queued_cmd *qc = mv_get_active_qc(ap); 2812 struct ata_queued_cmd *qc = mv_get_active_qc(ap);
2815 if (qc) 2813 if (qc)
2816 ata_sff_host_intr(ap, qc); 2814 ata_bmdma_port_intr(ap, qc);
2817 else 2815 else
2818 mv_unexpected_intr(ap, edma_was_enabled); 2816 mv_unexpected_intr(ap, edma_was_enabled);
2819 } 2817 }
diff --git a/drivers/ata/sata_nv.c b/drivers/ata/sata_nv.c
index baa8f0d2c86f..6fd114784116 100644
--- a/drivers/ata/sata_nv.c
+++ b/drivers/ata/sata_nv.c
@@ -920,7 +920,7 @@ static int nv_host_intr(struct ata_port *ap, u8 irq_stat)
920 } 920 }
921 921
922 /* handle interrupt */ 922 /* handle interrupt */
923 return ata_sff_host_intr(ap, qc); 923 return ata_bmdma_port_intr(ap, qc);
924} 924}
925 925
926static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance) 926static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance)
@@ -1100,7 +1100,7 @@ static void nv_adma_irq_clear(struct ata_port *ap)
1100 u32 notifier_clears[2]; 1100 u32 notifier_clears[2];
1101 1101
1102 if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) { 1102 if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) {
1103 ata_sff_irq_clear(ap); 1103 ata_bmdma_irq_clear(ap);
1104 return; 1104 return;
1105 } 1105 }
1106 1106
@@ -1505,7 +1505,7 @@ static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance)
1505 1505
1506 qc = ata_qc_from_tag(ap, ap->link.active_tag); 1506 qc = ata_qc_from_tag(ap, ap->link.active_tag);
1507 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) { 1507 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) {
1508 handled += ata_sff_host_intr(ap, qc); 1508 handled += ata_bmdma_port_intr(ap, qc);
1509 } else { 1509 } else {
1510 /* 1510 /*
1511 * No request pending? Clear interrupt status 1511 * No request pending? Clear interrupt status
@@ -2430,7 +2430,7 @@ static int nv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2430 2430
2431 ppi[0] = &nv_port_info[type]; 2431 ppi[0] = &nv_port_info[type];
2432 ipriv = ppi[0]->private_data; 2432 ipriv = ppi[0]->private_data;
2433 rc = ata_pci_sff_prepare_host(pdev, ppi, &host); 2433 rc = ata_pci_bmdma_prepare_host(pdev, ppi, &host);
2434 if (rc) 2434 if (rc)
2435 return rc; 2435 return rc;
2436 2436
diff --git a/drivers/ata/sata_qstor.c b/drivers/ata/sata_qstor.c
index d533b3d20ca1..daeebf19a6a9 100644
--- a/drivers/ata/sata_qstor.c
+++ b/drivers/ata/sata_qstor.c
@@ -120,8 +120,6 @@ static void qs_host_stop(struct ata_host *host);
120static void qs_qc_prep(struct ata_queued_cmd *qc); 120static void qs_qc_prep(struct ata_queued_cmd *qc);
121static unsigned int qs_qc_issue(struct ata_queued_cmd *qc); 121static unsigned int qs_qc_issue(struct ata_queued_cmd *qc);
122static int qs_check_atapi_dma(struct ata_queued_cmd *qc); 122static int qs_check_atapi_dma(struct ata_queued_cmd *qc);
123static void qs_bmdma_stop(struct ata_queued_cmd *qc);
124static u8 qs_bmdma_status(struct ata_port *ap);
125static void qs_freeze(struct ata_port *ap); 123static void qs_freeze(struct ata_port *ap);
126static void qs_thaw(struct ata_port *ap); 124static void qs_thaw(struct ata_port *ap);
127static int qs_prereset(struct ata_link *link, unsigned long deadline); 125static int qs_prereset(struct ata_link *link, unsigned long deadline);
@@ -137,8 +135,6 @@ static struct ata_port_operations qs_ata_ops = {
137 .inherits = &ata_sff_port_ops, 135 .inherits = &ata_sff_port_ops,
138 136
139 .check_atapi_dma = qs_check_atapi_dma, 137 .check_atapi_dma = qs_check_atapi_dma,
140 .bmdma_stop = qs_bmdma_stop,
141 .bmdma_status = qs_bmdma_status,
142 .qc_prep = qs_qc_prep, 138 .qc_prep = qs_qc_prep,
143 .qc_issue = qs_qc_issue, 139 .qc_issue = qs_qc_issue,
144 140
@@ -190,16 +186,6 @@ static int qs_check_atapi_dma(struct ata_queued_cmd *qc)
190 return 1; /* ATAPI DMA not supported */ 186 return 1; /* ATAPI DMA not supported */
191} 187}
192 188
193static void qs_bmdma_stop(struct ata_queued_cmd *qc)
194{
195 /* nothing */
196}
197
198static u8 qs_bmdma_status(struct ata_port *ap)
199{
200 return 0;
201}
202
203static inline void qs_enter_reg_mode(struct ata_port *ap) 189static inline void qs_enter_reg_mode(struct ata_port *ap)
204{ 190{
205 u8 __iomem *chan = qs_mmio_base(ap->host) + (ap->port_no * 0x4000); 191 u8 __iomem *chan = qs_mmio_base(ap->host) + (ap->port_no * 0x4000);
@@ -454,7 +440,7 @@ static inline unsigned int qs_intr_mmio(struct ata_host *host)
454 if (!pp || pp->state != qs_state_mmio) 440 if (!pp || pp->state != qs_state_mmio)
455 continue; 441 continue;
456 if (!(qc->tf.flags & ATA_TFLAG_POLLING)) 442 if (!(qc->tf.flags & ATA_TFLAG_POLLING))
457 handled |= ata_sff_host_intr(ap, qc); 443 handled |= ata_sff_port_intr(ap, qc);
458 } 444 }
459 return handled; 445 return handled;
460} 446}
diff --git a/drivers/ata/sata_sil.c b/drivers/ata/sata_sil.c
index 2dda312b6b9a..3a4f84219719 100644
--- a/drivers/ata/sata_sil.c
+++ b/drivers/ata/sata_sil.c
@@ -503,7 +503,7 @@ static void sil_host_intr(struct ata_port *ap, u32 bmdma2)
503 goto err_hsm; 503 goto err_hsm;
504 504
505 /* ack bmdma irq events */ 505 /* ack bmdma irq events */
506 ata_sff_irq_clear(ap); 506 ata_bmdma_irq_clear(ap);
507 507
508 /* kick HSM in the ass */ 508 /* kick HSM in the ass */
509 ata_sff_hsm_move(ap, qc, status, 0); 509 ata_sff_hsm_move(ap, qc, status, 0);
@@ -584,7 +584,7 @@ static void sil_thaw(struct ata_port *ap)
584 584
585 /* clear IRQ */ 585 /* clear IRQ */
586 ap->ops->sff_check_status(ap); 586 ap->ops->sff_check_status(ap);
587 ata_sff_irq_clear(ap); 587 ata_bmdma_irq_clear(ap);
588 588
589 /* turn on SATA IRQ if supported */ 589 /* turn on SATA IRQ if supported */
590 if (!(ap->flags & SIL_FLAG_NO_SATA_IRQ)) 590 if (!(ap->flags & SIL_FLAG_NO_SATA_IRQ))
diff --git a/drivers/ata/sata_sis.c b/drivers/ata/sata_sis.c
index f8a91bfd66a8..2bfe3ae03976 100644
--- a/drivers/ata/sata_sis.c
+++ b/drivers/ata/sata_sis.c
@@ -279,7 +279,7 @@ static int sis_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
279 break; 279 break;
280 } 280 }
281 281
282 rc = ata_pci_sff_prepare_host(pdev, ppi, &host); 282 rc = ata_pci_bmdma_prepare_host(pdev, ppi, &host);
283 if (rc) 283 if (rc)
284 return rc; 284 return rc;
285 285
@@ -308,7 +308,7 @@ static int sis_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
308 308
309 pci_set_master(pdev); 309 pci_set_master(pdev);
310 pci_intx(pdev, 1); 310 pci_intx(pdev, 1);
311 return ata_host_activate(host, pdev->irq, ata_sff_interrupt, 311 return ata_host_activate(host, pdev->irq, ata_bmdma_interrupt,
312 IRQF_SHARED, &sis_sht); 312 IRQF_SHARED, &sis_sht);
313} 313}
314 314
diff --git a/drivers/ata/sata_svw.c b/drivers/ata/sata_svw.c
index 101fd6a19829..7d9db4aaf07e 100644
--- a/drivers/ata/sata_svw.c
+++ b/drivers/ata/sata_svw.c
@@ -502,7 +502,7 @@ static int k2_sata_init_one(struct pci_dev *pdev, const struct pci_device_id *en
502 writel(0x0, mmio_base + K2_SATA_SIM_OFFSET); 502 writel(0x0, mmio_base + K2_SATA_SIM_OFFSET);
503 503
504 pci_set_master(pdev); 504 pci_set_master(pdev);
505 return ata_host_activate(host, pdev->irq, ata_sff_interrupt, 505 return ata_host_activate(host, pdev->irq, ata_bmdma_interrupt,
506 IRQF_SHARED, &k2_sata_sht); 506 IRQF_SHARED, &k2_sata_sht);
507} 507}
508 508
diff --git a/drivers/ata/sata_uli.c b/drivers/ata/sata_uli.c
index d8dac17dc2c8..b8578c32d344 100644
--- a/drivers/ata/sata_uli.c
+++ b/drivers/ata/sata_uli.c
@@ -242,7 +242,7 @@ static int uli_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
242 242
243 pci_set_master(pdev); 243 pci_set_master(pdev);
244 pci_intx(pdev, 1); 244 pci_intx(pdev, 1);
245 return ata_host_activate(host, pdev->irq, ata_sff_interrupt, 245 return ata_host_activate(host, pdev->irq, ata_bmdma_interrupt,
246 IRQF_SHARED, &uli_sht); 246 IRQF_SHARED, &uli_sht);
247} 247}
248 248
diff --git a/drivers/ata/sata_via.c b/drivers/ata/sata_via.c
index 08f65492cc81..101d8c219caf 100644
--- a/drivers/ata/sata_via.c
+++ b/drivers/ata/sata_via.c
@@ -308,7 +308,7 @@ static void svia_noop_freeze(struct ata_port *ap)
308 * certain way. Leave it alone and just clear pending IRQ. 308 * certain way. Leave it alone and just clear pending IRQ.
309 */ 309 */
310 ap->ops->sff_check_status(ap); 310 ap->ops->sff_check_status(ap);
311 ata_sff_irq_clear(ap); 311 ata_bmdma_irq_clear(ap);
312} 312}
313 313
314/** 314/**
@@ -463,7 +463,7 @@ static int vt6420_prepare_host(struct pci_dev *pdev, struct ata_host **r_host)
463 struct ata_host *host; 463 struct ata_host *host;
464 int rc; 464 int rc;
465 465
466 rc = ata_pci_sff_prepare_host(pdev, ppi, &host); 466 rc = ata_pci_bmdma_prepare_host(pdev, ppi, &host);
467 if (rc) 467 if (rc)
468 return rc; 468 return rc;
469 *r_host = host; 469 *r_host = host;
@@ -520,7 +520,7 @@ static int vt8251_prepare_host(struct pci_dev *pdev, struct ata_host **r_host)
520 struct ata_host *host; 520 struct ata_host *host;
521 int i, rc; 521 int i, rc;
522 522
523 rc = ata_pci_sff_prepare_host(pdev, ppi, &host); 523 rc = ata_pci_bmdma_prepare_host(pdev, ppi, &host);
524 if (rc) 524 if (rc)
525 return rc; 525 return rc;
526 *r_host = host; 526 *r_host = host;
@@ -628,7 +628,7 @@ static int svia_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
628 svia_configure(pdev); 628 svia_configure(pdev);
629 629
630 pci_set_master(pdev); 630 pci_set_master(pdev);
631 return ata_host_activate(host, pdev->irq, ata_sff_interrupt, 631 return ata_host_activate(host, pdev->irq, ata_bmdma_interrupt,
632 IRQF_SHARED, &svia_sht); 632 IRQF_SHARED, &svia_sht);
633} 633}
634 634
diff --git a/drivers/ata/sata_vsc.c b/drivers/ata/sata_vsc.c
index 2107952ebff1..b777176ff494 100644
--- a/drivers/ata/sata_vsc.c
+++ b/drivers/ata/sata_vsc.c
@@ -245,7 +245,7 @@ static void vsc_port_intr(u8 port_status, struct ata_port *ap)
245 245
246 qc = ata_qc_from_tag(ap, ap->link.active_tag); 246 qc = ata_qc_from_tag(ap, ap->link.active_tag);
247 if (qc && likely(!(qc->tf.flags & ATA_TFLAG_POLLING))) 247 if (qc && likely(!(qc->tf.flags & ATA_TFLAG_POLLING)))
248 handled = ata_sff_host_intr(ap, qc); 248 handled = ata_bmdma_port_intr(ap, qc);
249 249
250 /* We received an interrupt during a polled command, 250 /* We received an interrupt during a polled command,
251 * or some other spurious condition. Interrupt reporting 251 * or some other spurious condition. Interrupt reporting
diff --git a/drivers/base/topology.c b/drivers/base/topology.c
index bf6b13206d00..9fc630ce1ddb 100644
--- a/drivers/base/topology.c
+++ b/drivers/base/topology.c
@@ -162,7 +162,7 @@ static int __cpuinit topology_cpu_callback(struct notifier_block *nfb,
162 topology_remove_dev(cpu); 162 topology_remove_dev(cpu);
163 break; 163 break;
164 } 164 }
165 return rc ? NOTIFY_BAD : NOTIFY_OK; 165 return notifier_from_errno(rc);
166} 166}
167 167
168static int __cpuinit topology_sysfs_init(void) 168static int __cpuinit topology_sysfs_init(void)
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index e21175be25d0..f09fc0e2062d 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -1121,5 +1121,12 @@ config DEVPORT
1121 1121
1122source "drivers/s390/char/Kconfig" 1122source "drivers/s390/char/Kconfig"
1123 1123
1124config RAMOOPS
1125 tristate "Log panic/oops to a RAM buffer"
1126 default n
1127 help
1128 This enables panic and oops messages to be logged to a circular
1129 buffer in RAM where it can be read back at some later point.
1130
1124endmenu 1131endmenu
1125 1132
diff --git a/drivers/char/Makefile b/drivers/char/Makefile
index d39be4cf1f5d..88d6eac69754 100644
--- a/drivers/char/Makefile
+++ b/drivers/char/Makefile
@@ -108,6 +108,7 @@ obj-$(CONFIG_HANGCHECK_TIMER) += hangcheck-timer.o
108obj-$(CONFIG_TCG_TPM) += tpm/ 108obj-$(CONFIG_TCG_TPM) += tpm/
109 109
110obj-$(CONFIG_PS3_FLASH) += ps3flash.o 110obj-$(CONFIG_PS3_FLASH) += ps3flash.o
111obj-$(CONFIG_RAMOOPS) += ramoops.o
111 112
112obj-$(CONFIG_JS_RTC) += js-rtc.o 113obj-$(CONFIG_JS_RTC) += js-rtc.o
113js-rtc-y = rtc.o 114js-rtc-y = rtc.o
diff --git a/drivers/char/agp/amd64-agp.c b/drivers/char/agp/amd64-agp.c
index 67ea3a60de74..70312da4c968 100644
--- a/drivers/char/agp/amd64-agp.c
+++ b/drivers/char/agp/amd64-agp.c
@@ -384,7 +384,7 @@ static int __devinit uli_agp_init(struct pci_dev *pdev)
384{ 384{
385 u32 httfea,baseaddr,enuscr; 385 u32 httfea,baseaddr,enuscr;
386 struct pci_dev *dev1; 386 struct pci_dev *dev1;
387 int i; 387 int i, ret;
388 unsigned size = amd64_fetch_size(); 388 unsigned size = amd64_fetch_size();
389 389
390 dev_info(&pdev->dev, "setting up ULi AGP\n"); 390 dev_info(&pdev->dev, "setting up ULi AGP\n");
@@ -400,15 +400,18 @@ static int __devinit uli_agp_init(struct pci_dev *pdev)
400 400
401 if (i == ARRAY_SIZE(uli_sizes)) { 401 if (i == ARRAY_SIZE(uli_sizes)) {
402 dev_info(&pdev->dev, "no ULi size found for %d\n", size); 402 dev_info(&pdev->dev, "no ULi size found for %d\n", size);
403 return -ENODEV; 403 ret = -ENODEV;
404 goto put;
404 } 405 }
405 406
406 /* shadow x86-64 registers into ULi registers */ 407 /* shadow x86-64 registers into ULi registers */
407 pci_read_config_dword (k8_northbridges[0], AMD64_GARTAPERTUREBASE, &httfea); 408 pci_read_config_dword (k8_northbridges[0], AMD64_GARTAPERTUREBASE, &httfea);
408 409
409 /* if x86-64 aperture base is beyond 4G, exit here */ 410 /* if x86-64 aperture base is beyond 4G, exit here */
410 if ((httfea & 0x7fff) >> (32 - 25)) 411 if ((httfea & 0x7fff) >> (32 - 25)) {
411 return -ENODEV; 412 ret = -ENODEV;
413 goto put;
414 }
412 415
413 httfea = (httfea& 0x7fff) << 25; 416 httfea = (httfea& 0x7fff) << 25;
414 417
@@ -420,9 +423,10 @@ static int __devinit uli_agp_init(struct pci_dev *pdev)
420 enuscr= httfea+ (size * 1024 * 1024) - 1; 423 enuscr= httfea+ (size * 1024 * 1024) - 1;
421 pci_write_config_dword(dev1, ULI_X86_64_HTT_FEA_REG, httfea); 424 pci_write_config_dword(dev1, ULI_X86_64_HTT_FEA_REG, httfea);
422 pci_write_config_dword(dev1, ULI_X86_64_ENU_SCR_REG, enuscr); 425 pci_write_config_dword(dev1, ULI_X86_64_ENU_SCR_REG, enuscr);
423 426 ret = 0;
427put:
424 pci_dev_put(dev1); 428 pci_dev_put(dev1);
425 return 0; 429 return ret;
426} 430}
427 431
428 432
@@ -441,7 +445,7 @@ static int nforce3_agp_init(struct pci_dev *pdev)
441{ 445{
442 u32 tmp, apbase, apbar, aplimit; 446 u32 tmp, apbase, apbar, aplimit;
443 struct pci_dev *dev1; 447 struct pci_dev *dev1;
444 int i; 448 int i, ret;
445 unsigned size = amd64_fetch_size(); 449 unsigned size = amd64_fetch_size();
446 450
447 dev_info(&pdev->dev, "setting up Nforce3 AGP\n"); 451 dev_info(&pdev->dev, "setting up Nforce3 AGP\n");
@@ -458,7 +462,8 @@ static int nforce3_agp_init(struct pci_dev *pdev)
458 462
459 if (i == ARRAY_SIZE(nforce3_sizes)) { 463 if (i == ARRAY_SIZE(nforce3_sizes)) {
460 dev_info(&pdev->dev, "no NForce3 size found for %d\n", size); 464 dev_info(&pdev->dev, "no NForce3 size found for %d\n", size);
461 return -ENODEV; 465 ret = -ENODEV;
466 goto put;
462 } 467 }
463 468
464 pci_read_config_dword(dev1, NVIDIA_X86_64_1_APSIZE, &tmp); 469 pci_read_config_dword(dev1, NVIDIA_X86_64_1_APSIZE, &tmp);
@@ -472,7 +477,8 @@ static int nforce3_agp_init(struct pci_dev *pdev)
472 /* if x86-64 aperture base is beyond 4G, exit here */ 477 /* if x86-64 aperture base is beyond 4G, exit here */
473 if ( (apbase & 0x7fff) >> (32 - 25) ) { 478 if ( (apbase & 0x7fff) >> (32 - 25) ) {
474 dev_info(&pdev->dev, "aperture base > 4G\n"); 479 dev_info(&pdev->dev, "aperture base > 4G\n");
475 return -ENODEV; 480 ret = -ENODEV;
481 goto put;
476 } 482 }
477 483
478 apbase = (apbase & 0x7fff) << 25; 484 apbase = (apbase & 0x7fff) << 25;
@@ -488,9 +494,11 @@ static int nforce3_agp_init(struct pci_dev *pdev)
488 pci_write_config_dword(dev1, NVIDIA_X86_64_1_APBASE2, apbase); 494 pci_write_config_dword(dev1, NVIDIA_X86_64_1_APBASE2, apbase);
489 pci_write_config_dword(dev1, NVIDIA_X86_64_1_APLIMIT2, aplimit); 495 pci_write_config_dword(dev1, NVIDIA_X86_64_1_APLIMIT2, aplimit);
490 496
497 ret = 0;
498put:
491 pci_dev_put(dev1); 499 pci_dev_put(dev1);
492 500
493 return 0; 501 return ret;
494} 502}
495 503
496static int __devinit agp_amd64_probe(struct pci_dev *pdev, 504static int __devinit agp_amd64_probe(struct pci_dev *pdev,
diff --git a/drivers/char/amiserial.c b/drivers/char/amiserial.c
index 56b27671adc4..4f8d60c25a98 100644
--- a/drivers/char/amiserial.c
+++ b/drivers/char/amiserial.c
@@ -84,6 +84,7 @@ static char *serial_version = "4.30";
84#include <linux/smp_lock.h> 84#include <linux/smp_lock.h>
85#include <linux/init.h> 85#include <linux/init.h>
86#include <linux/bitops.h> 86#include <linux/bitops.h>
87#include <linux/platform_device.h>
87 88
88#include <asm/setup.h> 89#include <asm/setup.h>
89 90
@@ -1954,29 +1955,16 @@ static const struct tty_operations serial_ops = {
1954/* 1955/*
1955 * The serial driver boot-time initialization code! 1956 * The serial driver boot-time initialization code!
1956 */ 1957 */
1957static int __init rs_init(void) 1958static int __init amiga_serial_probe(struct platform_device *pdev)
1958{ 1959{
1959 unsigned long flags; 1960 unsigned long flags;
1960 struct serial_state * state; 1961 struct serial_state * state;
1961 int error; 1962 int error;
1962 1963
1963 if (!MACH_IS_AMIGA || !AMIGAHW_PRESENT(AMI_SERIAL))
1964 return -ENODEV;
1965
1966 serial_driver = alloc_tty_driver(1); 1964 serial_driver = alloc_tty_driver(1);
1967 if (!serial_driver) 1965 if (!serial_driver)
1968 return -ENOMEM; 1966 return -ENOMEM;
1969 1967
1970 /*
1971 * We request SERDAT and SERPER only, because the serial registers are
1972 * too spreaded over the custom register space
1973 */
1974 if (!request_mem_region(CUSTOM_PHYSADDR+0x30, 4,
1975 "amiserial [Paula]")) {
1976 error = -EBUSY;
1977 goto fail_put_tty_driver;
1978 }
1979
1980 IRQ_ports = NULL; 1968 IRQ_ports = NULL;
1981 1969
1982 show_serial_version(); 1970 show_serial_version();
@@ -1998,7 +1986,7 @@ static int __init rs_init(void)
1998 1986
1999 error = tty_register_driver(serial_driver); 1987 error = tty_register_driver(serial_driver);
2000 if (error) 1988 if (error)
2001 goto fail_release_mem_region; 1989 goto fail_put_tty_driver;
2002 1990
2003 state = rs_table; 1991 state = rs_table;
2004 state->magic = SSTATE_MAGIC; 1992 state->magic = SSTATE_MAGIC;
@@ -2050,23 +2038,24 @@ static int __init rs_init(void)
2050 ciab.ddra |= (SER_DTR | SER_RTS); /* outputs */ 2038 ciab.ddra |= (SER_DTR | SER_RTS); /* outputs */
2051 ciab.ddra &= ~(SER_DCD | SER_CTS | SER_DSR); /* inputs */ 2039 ciab.ddra &= ~(SER_DCD | SER_CTS | SER_DSR); /* inputs */
2052 2040
2041 platform_set_drvdata(pdev, state);
2042
2053 return 0; 2043 return 0;
2054 2044
2055fail_free_irq: 2045fail_free_irq:
2056 free_irq(IRQ_AMIGA_TBE, state); 2046 free_irq(IRQ_AMIGA_TBE, state);
2057fail_unregister: 2047fail_unregister:
2058 tty_unregister_driver(serial_driver); 2048 tty_unregister_driver(serial_driver);
2059fail_release_mem_region:
2060 release_mem_region(CUSTOM_PHYSADDR+0x30, 4);
2061fail_put_tty_driver: 2049fail_put_tty_driver:
2062 put_tty_driver(serial_driver); 2050 put_tty_driver(serial_driver);
2063 return error; 2051 return error;
2064} 2052}
2065 2053
2066static __exit void rs_exit(void) 2054static int __exit amiga_serial_remove(struct platform_device *pdev)
2067{ 2055{
2068 int error; 2056 int error;
2069 struct async_struct *info = rs_table[0].info; 2057 struct serial_state *state = platform_get_drvdata(pdev);
2058 struct async_struct *info = state->info;
2070 2059
2071 /* printk("Unloading %s: version %s\n", serial_name, serial_version); */ 2060 /* printk("Unloading %s: version %s\n", serial_name, serial_version); */
2072 tasklet_kill(&info->tlet); 2061 tasklet_kill(&info->tlet);
@@ -2075,19 +2064,38 @@ static __exit void rs_exit(void)
2075 error); 2064 error);
2076 put_tty_driver(serial_driver); 2065 put_tty_driver(serial_driver);
2077 2066
2078 if (info) { 2067 rs_table[0].info = NULL;
2079 rs_table[0].info = NULL; 2068 kfree(info);
2080 kfree(info);
2081 }
2082 2069
2083 free_irq(IRQ_AMIGA_TBE, rs_table); 2070 free_irq(IRQ_AMIGA_TBE, rs_table);
2084 free_irq(IRQ_AMIGA_RBF, rs_table); 2071 free_irq(IRQ_AMIGA_RBF, rs_table);
2085 2072
2086 release_mem_region(CUSTOM_PHYSADDR+0x30, 4); 2073 platform_set_drvdata(pdev, NULL);
2074
2075 return error;
2076}
2077
2078static struct platform_driver amiga_serial_driver = {
2079 .remove = __exit_p(amiga_serial_remove),
2080 .driver = {
2081 .name = "amiga-serial",
2082 .owner = THIS_MODULE,
2083 },
2084};
2085
2086static int __init amiga_serial_init(void)
2087{
2088 return platform_driver_probe(&amiga_serial_driver, amiga_serial_probe);
2089}
2090
2091module_init(amiga_serial_init);
2092
2093static void __exit amiga_serial_exit(void)
2094{
2095 platform_driver_unregister(&amiga_serial_driver);
2087} 2096}
2088 2097
2089module_init(rs_init) 2098module_exit(amiga_serial_exit);
2090module_exit(rs_exit)
2091 2099
2092 2100
2093#if defined(CONFIG_SERIAL_CONSOLE) && !defined(MODULE) 2101#if defined(CONFIG_SERIAL_CONSOLE) && !defined(MODULE)
@@ -2154,3 +2162,4 @@ console_initcall(amiserial_console_init);
2154#endif /* CONFIG_SERIAL_CONSOLE && !MODULE */ 2162#endif /* CONFIG_SERIAL_CONSOLE && !MODULE */
2155 2163
2156MODULE_LICENSE("GPL"); 2164MODULE_LICENSE("GPL");
2165MODULE_ALIAS("platform:amiga-serial");
diff --git a/drivers/char/applicom.c b/drivers/char/applicom.c
index 63313a33ba5f..f4ae0e0fb631 100644
--- a/drivers/char/applicom.c
+++ b/drivers/char/applicom.c
@@ -703,14 +703,9 @@ static long ac_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
703 /* In general, the device is only openable by root anyway, so we're not 703 /* In general, the device is only openable by root anyway, so we're not
704 particularly concerned that bogus ioctls can flood the console. */ 704 particularly concerned that bogus ioctls can flood the console. */
705 705
706 adgl = kmalloc(sizeof(struct st_ram_io), GFP_KERNEL); 706 adgl = memdup_user(argp, sizeof(struct st_ram_io));
707 if (!adgl) 707 if (IS_ERR(adgl))
708 return -ENOMEM; 708 return PTR_ERR(adgl);
709
710 if (copy_from_user(adgl, argp, sizeof(struct st_ram_io))) {
711 kfree(adgl);
712 return -EFAULT;
713 }
714 709
715 lock_kernel(); 710 lock_kernel();
716 IndexCard = adgl->num_card-1; 711 IndexCard = adgl->num_card-1;
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
index c6ad4234378d..4f3f8c9ec262 100644
--- a/drivers/char/ipmi/ipmi_msghandler.c
+++ b/drivers/char/ipmi/ipmi_msghandler.c
@@ -2505,12 +2505,11 @@ static int ipmi_bmc_register(ipmi_smi_t intf, int ifnum,
2505 return rv; 2505 return rv;
2506 } 2506 }
2507 2507
2508 printk(KERN_INFO 2508 dev_info(intf->si_dev, "Found new BMC (man_id: 0x%6.6x, "
2509 "ipmi: Found new BMC (man_id: 0x%6.6x, " 2509 "prod_id: 0x%4.4x, dev_id: 0x%2.2x)\n",
2510 " prod_id: 0x%4.4x, dev_id: 0x%2.2x)\n", 2510 bmc->id.manufacturer_id,
2511 bmc->id.manufacturer_id, 2511 bmc->id.product_id,
2512 bmc->id.product_id, 2512 bmc->id.device_id);
2513 bmc->id.device_id);
2514 } 2513 }
2515 2514
2516 /* 2515 /*
@@ -4037,8 +4036,8 @@ static void ipmi_request_event(void)
4037 4036
4038static struct timer_list ipmi_timer; 4037static struct timer_list ipmi_timer;
4039 4038
4040/* Call every ~100 ms. */ 4039/* Call every ~1000 ms. */
4041#define IPMI_TIMEOUT_TIME 100 4040#define IPMI_TIMEOUT_TIME 1000
4042 4041
4043/* How many jiffies does it take to get to the timeout time. */ 4042/* How many jiffies does it take to get to the timeout time. */
4044#define IPMI_TIMEOUT_JIFFIES ((IPMI_TIMEOUT_TIME * HZ) / 1000) 4043#define IPMI_TIMEOUT_JIFFIES ((IPMI_TIMEOUT_TIME * HZ) / 1000)
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
index 47ffe4a90a95..35603dd4e6c5 100644
--- a/drivers/char/ipmi/ipmi_si_intf.c
+++ b/drivers/char/ipmi/ipmi_si_intf.c
@@ -107,6 +107,14 @@ enum si_type {
107}; 107};
108static char *si_to_str[] = { "kcs", "smic", "bt" }; 108static char *si_to_str[] = { "kcs", "smic", "bt" };
109 109
110enum ipmi_addr_src {
111 SI_INVALID = 0, SI_HOTMOD, SI_HARDCODED, SI_SPMI, SI_ACPI, SI_SMBIOS,
112 SI_PCI, SI_DEVICETREE, SI_DEFAULT
113};
114static char *ipmi_addr_src_to_str[] = { NULL, "hotmod", "hardcoded", "SPMI",
115 "ACPI", "SMBIOS", "PCI",
116 "device-tree", "default" };
117
110#define DEVICE_NAME "ipmi_si" 118#define DEVICE_NAME "ipmi_si"
111 119
112static struct platform_driver ipmi_driver = { 120static struct platform_driver ipmi_driver = {
@@ -188,7 +196,7 @@ struct smi_info {
188 int (*irq_setup)(struct smi_info *info); 196 int (*irq_setup)(struct smi_info *info);
189 void (*irq_cleanup)(struct smi_info *info); 197 void (*irq_cleanup)(struct smi_info *info);
190 unsigned int io_size; 198 unsigned int io_size;
191 char *addr_source; /* ACPI, PCI, SMBIOS, hardcode, default. */ 199 enum ipmi_addr_src addr_source; /* ACPI, PCI, SMBIOS, hardcode, etc. */
192 void (*addr_source_cleanup)(struct smi_info *info); 200 void (*addr_source_cleanup)(struct smi_info *info);
193 void *addr_source_data; 201 void *addr_source_data;
194 202
@@ -300,6 +308,7 @@ static int num_max_busy_us;
300 308
301static int unload_when_empty = 1; 309static int unload_when_empty = 1;
302 310
311static int add_smi(struct smi_info *smi);
303static int try_smi_init(struct smi_info *smi); 312static int try_smi_init(struct smi_info *smi);
304static void cleanup_one_si(struct smi_info *to_clean); 313static void cleanup_one_si(struct smi_info *to_clean);
305 314
@@ -314,9 +323,14 @@ static void deliver_recv_msg(struct smi_info *smi_info,
314{ 323{
315 /* Deliver the message to the upper layer with the lock 324 /* Deliver the message to the upper layer with the lock
316 released. */ 325 released. */
317 spin_unlock(&(smi_info->si_lock)); 326
318 ipmi_smi_msg_received(smi_info->intf, msg); 327 if (smi_info->run_to_completion) {
319 spin_lock(&(smi_info->si_lock)); 328 ipmi_smi_msg_received(smi_info->intf, msg);
329 } else {
330 spin_unlock(&(smi_info->si_lock));
331 ipmi_smi_msg_received(smi_info->intf, msg);
332 spin_lock(&(smi_info->si_lock));
333 }
320} 334}
321 335
322static void return_hosed_msg(struct smi_info *smi_info, int cCode) 336static void return_hosed_msg(struct smi_info *smi_info, int cCode)
@@ -445,6 +459,9 @@ static inline void disable_si_irq(struct smi_info *smi_info)
445 if ((smi_info->irq) && (!smi_info->interrupt_disabled)) { 459 if ((smi_info->irq) && (!smi_info->interrupt_disabled)) {
446 start_disable_irq(smi_info); 460 start_disable_irq(smi_info);
447 smi_info->interrupt_disabled = 1; 461 smi_info->interrupt_disabled = 1;
462 if (!atomic_read(&smi_info->stop_operation))
463 mod_timer(&smi_info->si_timer,
464 jiffies + SI_TIMEOUT_JIFFIES);
448 } 465 }
449} 466}
450 467
@@ -576,9 +593,8 @@ static void handle_transaction_done(struct smi_info *smi_info)
576 smi_info->handlers->get_result(smi_info->si_sm, msg, 3); 593 smi_info->handlers->get_result(smi_info->si_sm, msg, 3);
577 if (msg[2] != 0) { 594 if (msg[2] != 0) {
578 /* Error clearing flags */ 595 /* Error clearing flags */
579 printk(KERN_WARNING 596 dev_warn(smi_info->dev,
580 "ipmi_si: Error clearing flags: %2.2x\n", 597 "Error clearing flags: %2.2x\n", msg[2]);
581 msg[2]);
582 } 598 }
583 if (smi_info->si_state == SI_CLEARING_FLAGS_THEN_SET_IRQ) 599 if (smi_info->si_state == SI_CLEARING_FLAGS_THEN_SET_IRQ)
584 start_enable_irq(smi_info); 600 start_enable_irq(smi_info);
@@ -670,9 +686,8 @@ static void handle_transaction_done(struct smi_info *smi_info)
670 /* We got the flags from the SMI, now handle them. */ 686 /* We got the flags from the SMI, now handle them. */
671 smi_info->handlers->get_result(smi_info->si_sm, msg, 4); 687 smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
672 if (msg[2] != 0) { 688 if (msg[2] != 0) {
673 printk(KERN_WARNING 689 dev_warn(smi_info->dev, "Could not enable interrupts"
674 "ipmi_si: Could not enable interrupts" 690 ", failed get, using polled mode.\n");
675 ", failed get, using polled mode.\n");
676 smi_info->si_state = SI_NORMAL; 691 smi_info->si_state = SI_NORMAL;
677 } else { 692 } else {
678 msg[0] = (IPMI_NETFN_APP_REQUEST << 2); 693 msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
@@ -693,11 +708,11 @@ static void handle_transaction_done(struct smi_info *smi_info)
693 708
694 /* We got the flags from the SMI, now handle them. */ 709 /* We got the flags from the SMI, now handle them. */
695 smi_info->handlers->get_result(smi_info->si_sm, msg, 4); 710 smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
696 if (msg[2] != 0) { 711 if (msg[2] != 0)
697 printk(KERN_WARNING 712 dev_warn(smi_info->dev, "Could not enable interrupts"
698 "ipmi_si: Could not enable interrupts" 713 ", failed set, using polled mode.\n");
699 ", failed set, using polled mode.\n"); 714 else
700 } 715 smi_info->interrupt_disabled = 0;
701 smi_info->si_state = SI_NORMAL; 716 smi_info->si_state = SI_NORMAL;
702 break; 717 break;
703 } 718 }
@@ -709,9 +724,8 @@ static void handle_transaction_done(struct smi_info *smi_info)
709 /* We got the flags from the SMI, now handle them. */ 724 /* We got the flags from the SMI, now handle them. */
710 smi_info->handlers->get_result(smi_info->si_sm, msg, 4); 725 smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
711 if (msg[2] != 0) { 726 if (msg[2] != 0) {
712 printk(KERN_WARNING 727 dev_warn(smi_info->dev, "Could not disable interrupts"
713 "ipmi_si: Could not disable interrupts" 728 ", failed get.\n");
714 ", failed get.\n");
715 smi_info->si_state = SI_NORMAL; 729 smi_info->si_state = SI_NORMAL;
716 } else { 730 } else {
717 msg[0] = (IPMI_NETFN_APP_REQUEST << 2); 731 msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
@@ -733,9 +747,8 @@ static void handle_transaction_done(struct smi_info *smi_info)
733 /* We got the flags from the SMI, now handle them. */ 747 /* We got the flags from the SMI, now handle them. */
734 smi_info->handlers->get_result(smi_info->si_sm, msg, 4); 748 smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
735 if (msg[2] != 0) { 749 if (msg[2] != 0) {
736 printk(KERN_WARNING 750 dev_warn(smi_info->dev, "Could not disable interrupts"
737 "ipmi_si: Could not disable interrupts" 751 ", failed set.\n");
738 ", failed set.\n");
739 } 752 }
740 smi_info->si_state = SI_NORMAL; 753 smi_info->si_state = SI_NORMAL;
741 break; 754 break;
@@ -877,6 +890,11 @@ static void sender(void *send_info,
877 printk("**Enqueue: %d.%9.9d\n", t.tv_sec, t.tv_usec); 890 printk("**Enqueue: %d.%9.9d\n", t.tv_sec, t.tv_usec);
878#endif 891#endif
879 892
893 mod_timer(&smi_info->si_timer, jiffies + SI_TIMEOUT_JIFFIES);
894
895 if (smi_info->thread)
896 wake_up_process(smi_info->thread);
897
880 if (smi_info->run_to_completion) { 898 if (smi_info->run_to_completion) {
881 /* 899 /*
882 * If we are running to completion, then throw it in 900 * If we are running to completion, then throw it in
@@ -997,6 +1015,8 @@ static int ipmi_thread(void *data)
997 ; /* do nothing */ 1015 ; /* do nothing */
998 else if (smi_result == SI_SM_CALL_WITH_DELAY && busy_wait) 1016 else if (smi_result == SI_SM_CALL_WITH_DELAY && busy_wait)
999 schedule(); 1017 schedule();
1018 else if (smi_result == SI_SM_IDLE)
1019 schedule_timeout_interruptible(100);
1000 else 1020 else
1001 schedule_timeout_interruptible(0); 1021 schedule_timeout_interruptible(0);
1002 } 1022 }
@@ -1039,6 +1059,7 @@ static void smi_timeout(unsigned long data)
1039 unsigned long flags; 1059 unsigned long flags;
1040 unsigned long jiffies_now; 1060 unsigned long jiffies_now;
1041 long time_diff; 1061 long time_diff;
1062 long timeout;
1042#ifdef DEBUG_TIMING 1063#ifdef DEBUG_TIMING
1043 struct timeval t; 1064 struct timeval t;
1044#endif 1065#endif
@@ -1059,9 +1080,9 @@ static void smi_timeout(unsigned long data)
1059 1080
1060 if ((smi_info->irq) && (!smi_info->interrupt_disabled)) { 1081 if ((smi_info->irq) && (!smi_info->interrupt_disabled)) {
1061 /* Running with interrupts, only do long timeouts. */ 1082 /* Running with interrupts, only do long timeouts. */
1062 smi_info->si_timer.expires = jiffies + SI_TIMEOUT_JIFFIES; 1083 timeout = jiffies + SI_TIMEOUT_JIFFIES;
1063 smi_inc_stat(smi_info, long_timeouts); 1084 smi_inc_stat(smi_info, long_timeouts);
1064 goto do_add_timer; 1085 goto do_mod_timer;
1065 } 1086 }
1066 1087
1067 /* 1088 /*
@@ -1070,14 +1091,15 @@ static void smi_timeout(unsigned long data)
1070 */ 1091 */
1071 if (smi_result == SI_SM_CALL_WITH_DELAY) { 1092 if (smi_result == SI_SM_CALL_WITH_DELAY) {
1072 smi_inc_stat(smi_info, short_timeouts); 1093 smi_inc_stat(smi_info, short_timeouts);
1073 smi_info->si_timer.expires = jiffies + 1; 1094 timeout = jiffies + 1;
1074 } else { 1095 } else {
1075 smi_inc_stat(smi_info, long_timeouts); 1096 smi_inc_stat(smi_info, long_timeouts);
1076 smi_info->si_timer.expires = jiffies + SI_TIMEOUT_JIFFIES; 1097 timeout = jiffies + SI_TIMEOUT_JIFFIES;
1077 } 1098 }
1078 1099
1079 do_add_timer: 1100 do_mod_timer:
1080 add_timer(&(smi_info->si_timer)); 1101 if (smi_result != SI_SM_IDLE)
1102 mod_timer(&(smi_info->si_timer), timeout);
1081} 1103}
1082 1104
1083static irqreturn_t si_irq_handler(int irq, void *data) 1105static irqreturn_t si_irq_handler(int irq, void *data)
@@ -1144,10 +1166,10 @@ static int smi_start_processing(void *send_info,
1144 new_smi->thread = kthread_run(ipmi_thread, new_smi, 1166 new_smi->thread = kthread_run(ipmi_thread, new_smi,
1145 "kipmi%d", new_smi->intf_num); 1167 "kipmi%d", new_smi->intf_num);
1146 if (IS_ERR(new_smi->thread)) { 1168 if (IS_ERR(new_smi->thread)) {
1147 printk(KERN_NOTICE "ipmi_si_intf: Could not start" 1169 dev_notice(new_smi->dev, "Could not start"
1148 " kernel thread due to error %ld, only using" 1170 " kernel thread due to error %ld, only using"
1149 " timers to drive the interface\n", 1171 " timers to drive the interface\n",
1150 PTR_ERR(new_smi->thread)); 1172 PTR_ERR(new_smi->thread));
1151 new_smi->thread = NULL; 1173 new_smi->thread = NULL;
1152 } 1174 }
1153 } 1175 }
@@ -1308,14 +1330,13 @@ static int std_irq_setup(struct smi_info *info)
1308 DEVICE_NAME, 1330 DEVICE_NAME,
1309 info); 1331 info);
1310 if (rv) { 1332 if (rv) {
1311 printk(KERN_WARNING 1333 dev_warn(info->dev, "%s unable to claim interrupt %d,"
1312 "ipmi_si: %s unable to claim interrupt %d," 1334 " running polled\n",
1313 " running polled\n", 1335 DEVICE_NAME, info->irq);
1314 DEVICE_NAME, info->irq);
1315 info->irq = 0; 1336 info->irq = 0;
1316 } else { 1337 } else {
1317 info->irq_cleanup = std_irq_cleanup; 1338 info->irq_cleanup = std_irq_cleanup;
1318 printk(" Using irq %d\n", info->irq); 1339 dev_info(info->dev, "Using irq %d\n", info->irq);
1319 } 1340 }
1320 1341
1321 return rv; 1342 return rv;
@@ -1406,8 +1427,8 @@ static int port_setup(struct smi_info *info)
1406 info->io.outputb = port_outl; 1427 info->io.outputb = port_outl;
1407 break; 1428 break;
1408 default: 1429 default:
1409 printk(KERN_WARNING "ipmi_si: Invalid register size: %d\n", 1430 dev_warn(info->dev, "Invalid register size: %d\n",
1410 info->io.regsize); 1431 info->io.regsize);
1411 return -EINVAL; 1432 return -EINVAL;
1412 } 1433 }
1413 1434
@@ -1529,8 +1550,8 @@ static int mem_setup(struct smi_info *info)
1529 break; 1550 break;
1530#endif 1551#endif
1531 default: 1552 default:
1532 printk(KERN_WARNING "ipmi_si: Invalid register size: %d\n", 1553 dev_warn(info->dev, "Invalid register size: %d\n",
1533 info->io.regsize); 1554 info->io.regsize);
1534 return -EINVAL; 1555 return -EINVAL;
1535 } 1556 }
1536 1557
@@ -1755,7 +1776,7 @@ static int hotmod_handler(const char *val, struct kernel_param *kp)
1755 goto out; 1776 goto out;
1756 } 1777 }
1757 1778
1758 info->addr_source = "hotmod"; 1779 info->addr_source = SI_HOTMOD;
1759 info->si_type = si_type; 1780 info->si_type = si_type;
1760 info->io.addr_data = addr; 1781 info->io.addr_data = addr;
1761 info->io.addr_type = addr_space; 1782 info->io.addr_type = addr_space;
@@ -1777,7 +1798,9 @@ static int hotmod_handler(const char *val, struct kernel_param *kp)
1777 info->irq_setup = std_irq_setup; 1798 info->irq_setup = std_irq_setup;
1778 info->slave_addr = ipmb; 1799 info->slave_addr = ipmb;
1779 1800
1780 try_smi_init(info); 1801 if (!add_smi(info))
1802 if (try_smi_init(info))
1803 cleanup_one_si(info);
1781 } else { 1804 } else {
1782 /* remove */ 1805 /* remove */
1783 struct smi_info *e, *tmp_e; 1806 struct smi_info *e, *tmp_e;
@@ -1813,7 +1836,8 @@ static __devinit void hardcode_find_bmc(void)
1813 if (!info) 1836 if (!info)
1814 return; 1837 return;
1815 1838
1816 info->addr_source = "hardcoded"; 1839 info->addr_source = SI_HARDCODED;
1840 printk(KERN_INFO PFX "probing via hardcoded address\n");
1817 1841
1818 if (!si_type[i] || strcmp(si_type[i], "kcs") == 0) { 1842 if (!si_type[i] || strcmp(si_type[i], "kcs") == 0) {
1819 info->si_type = SI_KCS; 1843 info->si_type = SI_KCS;
@@ -1822,8 +1846,7 @@ static __devinit void hardcode_find_bmc(void)
1822 } else if (strcmp(si_type[i], "bt") == 0) { 1846 } else if (strcmp(si_type[i], "bt") == 0) {
1823 info->si_type = SI_BT; 1847 info->si_type = SI_BT;
1824 } else { 1848 } else {
1825 printk(KERN_WARNING 1849 printk(KERN_WARNING PFX "Interface type specified "
1826 "ipmi_si: Interface type specified "
1827 "for interface %d, was invalid: %s\n", 1850 "for interface %d, was invalid: %s\n",
1828 i, si_type[i]); 1851 i, si_type[i]);
1829 kfree(info); 1852 kfree(info);
@@ -1841,11 +1864,9 @@ static __devinit void hardcode_find_bmc(void)
1841 info->io.addr_data = addrs[i]; 1864 info->io.addr_data = addrs[i];
1842 info->io.addr_type = IPMI_MEM_ADDR_SPACE; 1865 info->io.addr_type = IPMI_MEM_ADDR_SPACE;
1843 } else { 1866 } else {
1844 printk(KERN_WARNING 1867 printk(KERN_WARNING PFX "Interface type specified "
1845 "ipmi_si: Interface type specified " 1868 "for interface %d, but port and address were "
1846 "for interface %d, " 1869 "not set or set to zero.\n", i);
1847 "but port and address were not set or "
1848 "set to zero.\n", i);
1849 kfree(info); 1870 kfree(info);
1850 continue; 1871 continue;
1851 } 1872 }
@@ -1863,7 +1884,9 @@ static __devinit void hardcode_find_bmc(void)
1863 info->irq_setup = std_irq_setup; 1884 info->irq_setup = std_irq_setup;
1864 info->slave_addr = slave_addrs[i]; 1885 info->slave_addr = slave_addrs[i];
1865 1886
1866 try_smi_init(info); 1887 if (!add_smi(info))
1888 if (try_smi_init(info))
1889 cleanup_one_si(info);
1867 } 1890 }
1868} 1891}
1869 1892
@@ -1923,15 +1946,13 @@ static int acpi_gpe_irq_setup(struct smi_info *info)
1923 &ipmi_acpi_gpe, 1946 &ipmi_acpi_gpe,
1924 info); 1947 info);
1925 if (status != AE_OK) { 1948 if (status != AE_OK) {
1926 printk(KERN_WARNING 1949 dev_warn(info->dev, "%s unable to claim ACPI GPE %d,"
1927 "ipmi_si: %s unable to claim ACPI GPE %d," 1950 " running polled\n", DEVICE_NAME, info->irq);
1928 " running polled\n",
1929 DEVICE_NAME, info->irq);
1930 info->irq = 0; 1951 info->irq = 0;
1931 return -EINVAL; 1952 return -EINVAL;
1932 } else { 1953 } else {
1933 info->irq_cleanup = acpi_gpe_irq_cleanup; 1954 info->irq_cleanup = acpi_gpe_irq_cleanup;
1934 printk(" Using ACPI GPE %d\n", info->irq); 1955 dev_info(info->dev, "Using ACPI GPE %d\n", info->irq);
1935 return 0; 1956 return 0;
1936 } 1957 }
1937} 1958}
@@ -1989,8 +2010,8 @@ static __devinit int try_init_spmi(struct SPMITable *spmi)
1989 u8 addr_space; 2010 u8 addr_space;
1990 2011
1991 if (spmi->IPMIlegacy != 1) { 2012 if (spmi->IPMIlegacy != 1) {
1992 printk(KERN_INFO "IPMI: Bad SPMI legacy %d\n", spmi->IPMIlegacy); 2013 printk(KERN_INFO PFX "Bad SPMI legacy %d\n", spmi->IPMIlegacy);
1993 return -ENODEV; 2014 return -ENODEV;
1994 } 2015 }
1995 2016
1996 if (spmi->addr.space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) 2017 if (spmi->addr.space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
@@ -2000,11 +2021,12 @@ static __devinit int try_init_spmi(struct SPMITable *spmi)
2000 2021
2001 info = kzalloc(sizeof(*info), GFP_KERNEL); 2022 info = kzalloc(sizeof(*info), GFP_KERNEL);
2002 if (!info) { 2023 if (!info) {
2003 printk(KERN_ERR "ipmi_si: Could not allocate SI data (3)\n"); 2024 printk(KERN_ERR PFX "Could not allocate SI data (3)\n");
2004 return -ENOMEM; 2025 return -ENOMEM;
2005 } 2026 }
2006 2027
2007 info->addr_source = "SPMI"; 2028 info->addr_source = SI_SPMI;
2029 printk(KERN_INFO PFX "probing via SPMI\n");
2008 2030
2009 /* Figure out the interface type. */ 2031 /* Figure out the interface type. */
2010 switch (spmi->InterfaceType) { 2032 switch (spmi->InterfaceType) {
@@ -2018,8 +2040,8 @@ static __devinit int try_init_spmi(struct SPMITable *spmi)
2018 info->si_type = SI_BT; 2040 info->si_type = SI_BT;
2019 break; 2041 break;
2020 default: 2042 default:
2021 printk(KERN_INFO "ipmi_si: Unknown ACPI/SPMI SI type %d\n", 2043 printk(KERN_INFO PFX "Unknown ACPI/SPMI SI type %d\n",
2022 spmi->InterfaceType); 2044 spmi->InterfaceType);
2023 kfree(info); 2045 kfree(info);
2024 return -EIO; 2046 return -EIO;
2025 } 2047 }
@@ -2055,13 +2077,12 @@ static __devinit int try_init_spmi(struct SPMITable *spmi)
2055 info->io.addr_type = IPMI_IO_ADDR_SPACE; 2077 info->io.addr_type = IPMI_IO_ADDR_SPACE;
2056 } else { 2078 } else {
2057 kfree(info); 2079 kfree(info);
2058 printk(KERN_WARNING 2080 printk(KERN_WARNING PFX "Unknown ACPI I/O Address type\n");
2059 "ipmi_si: Unknown ACPI I/O Address type\n");
2060 return -EIO; 2081 return -EIO;
2061 } 2082 }
2062 info->io.addr_data = spmi->addr.address; 2083 info->io.addr_data = spmi->addr.address;
2063 2084
2064 try_smi_init(info); 2085 add_smi(info);
2065 2086
2066 return 0; 2087 return 0;
2067} 2088}
@@ -2093,6 +2114,7 @@ static int __devinit ipmi_pnp_probe(struct pnp_dev *dev,
2093{ 2114{
2094 struct acpi_device *acpi_dev; 2115 struct acpi_device *acpi_dev;
2095 struct smi_info *info; 2116 struct smi_info *info;
2117 struct resource *res;
2096 acpi_handle handle; 2118 acpi_handle handle;
2097 acpi_status status; 2119 acpi_status status;
2098 unsigned long long tmp; 2120 unsigned long long tmp;
@@ -2105,7 +2127,8 @@ static int __devinit ipmi_pnp_probe(struct pnp_dev *dev,
2105 if (!info) 2127 if (!info)
2106 return -ENOMEM; 2128 return -ENOMEM;
2107 2129
2108 info->addr_source = "ACPI"; 2130 info->addr_source = SI_ACPI;
2131 printk(KERN_INFO PFX "probing via ACPI\n");
2109 2132
2110 handle = acpi_dev->handle; 2133 handle = acpi_dev->handle;
2111 2134
@@ -2125,22 +2148,26 @@ static int __devinit ipmi_pnp_probe(struct pnp_dev *dev,
2125 info->si_type = SI_BT; 2148 info->si_type = SI_BT;
2126 break; 2149 break;
2127 default: 2150 default:
2128 dev_info(&dev->dev, "unknown interface type %lld\n", tmp); 2151 dev_info(&dev->dev, "unknown IPMI type %lld\n", tmp);
2129 goto err_free; 2152 goto err_free;
2130 } 2153 }
2131 2154
2132 if (pnp_port_valid(dev, 0)) { 2155 res = pnp_get_resource(dev, IORESOURCE_IO, 0);
2156 if (res) {
2133 info->io_setup = port_setup; 2157 info->io_setup = port_setup;
2134 info->io.addr_type = IPMI_IO_ADDR_SPACE; 2158 info->io.addr_type = IPMI_IO_ADDR_SPACE;
2135 info->io.addr_data = pnp_port_start(dev, 0);
2136 } else if (pnp_mem_valid(dev, 0)) {
2137 info->io_setup = mem_setup;
2138 info->io.addr_type = IPMI_MEM_ADDR_SPACE;
2139 info->io.addr_data = pnp_mem_start(dev, 0);
2140 } else { 2159 } else {
2160 res = pnp_get_resource(dev, IORESOURCE_MEM, 0);
2161 if (res) {
2162 info->io_setup = mem_setup;
2163 info->io.addr_type = IPMI_MEM_ADDR_SPACE;
2164 }
2165 }
2166 if (!res) {
2141 dev_err(&dev->dev, "no I/O or memory address\n"); 2167 dev_err(&dev->dev, "no I/O or memory address\n");
2142 goto err_free; 2168 goto err_free;
2143 } 2169 }
2170 info->io.addr_data = res->start;
2144 2171
2145 info->io.regspacing = DEFAULT_REGSPACING; 2172 info->io.regspacing = DEFAULT_REGSPACING;
2146 info->io.regsize = DEFAULT_REGSPACING; 2173 info->io.regsize = DEFAULT_REGSPACING;
@@ -2156,10 +2183,14 @@ static int __devinit ipmi_pnp_probe(struct pnp_dev *dev,
2156 info->irq_setup = std_irq_setup; 2183 info->irq_setup = std_irq_setup;
2157 } 2184 }
2158 2185
2159 info->dev = &acpi_dev->dev; 2186 info->dev = &dev->dev;
2160 pnp_set_drvdata(dev, info); 2187 pnp_set_drvdata(dev, info);
2161 2188
2162 return try_smi_init(info); 2189 dev_info(info->dev, "%pR regsize %d spacing %d irq %d\n",
2190 res, info->io.regsize, info->io.regspacing,
2191 info->irq);
2192
2193 return add_smi(info);
2163 2194
2164err_free: 2195err_free:
2165 kfree(info); 2196 kfree(info);
@@ -2264,12 +2295,12 @@ static __devinit void try_init_dmi(struct dmi_ipmi_data *ipmi_data)
2264 2295
2265 info = kzalloc(sizeof(*info), GFP_KERNEL); 2296 info = kzalloc(sizeof(*info), GFP_KERNEL);
2266 if (!info) { 2297 if (!info) {
2267 printk(KERN_ERR 2298 printk(KERN_ERR PFX "Could not allocate SI data\n");
2268 "ipmi_si: Could not allocate SI data\n");
2269 return; 2299 return;
2270 } 2300 }
2271 2301
2272 info->addr_source = "SMBIOS"; 2302 info->addr_source = SI_SMBIOS;
2303 printk(KERN_INFO PFX "probing via SMBIOS\n");
2273 2304
2274 switch (ipmi_data->type) { 2305 switch (ipmi_data->type) {
2275 case 0x01: /* KCS */ 2306 case 0x01: /* KCS */
@@ -2299,8 +2330,7 @@ static __devinit void try_init_dmi(struct dmi_ipmi_data *ipmi_data)
2299 2330
2300 default: 2331 default:
2301 kfree(info); 2332 kfree(info);
2302 printk(KERN_WARNING 2333 printk(KERN_WARNING PFX "Unknown SMBIOS I/O Address type: %d\n",
2303 "ipmi_si: Unknown SMBIOS I/O Address type: %d.\n",
2304 ipmi_data->addr_space); 2334 ipmi_data->addr_space);
2305 return; 2335 return;
2306 } 2336 }
@@ -2318,7 +2348,7 @@ static __devinit void try_init_dmi(struct dmi_ipmi_data *ipmi_data)
2318 if (info->irq) 2348 if (info->irq)
2319 info->irq_setup = std_irq_setup; 2349 info->irq_setup = std_irq_setup;
2320 2350
2321 try_smi_init(info); 2351 add_smi(info);
2322} 2352}
2323 2353
2324static void __devinit dmi_find_bmc(void) 2354static void __devinit dmi_find_bmc(void)
@@ -2368,7 +2398,8 @@ static int __devinit ipmi_pci_probe(struct pci_dev *pdev,
2368 if (!info) 2398 if (!info)
2369 return -ENOMEM; 2399 return -ENOMEM;
2370 2400
2371 info->addr_source = "PCI"; 2401 info->addr_source = SI_PCI;
2402 dev_info(&pdev->dev, "probing via PCI");
2372 2403
2373 switch (class_type) { 2404 switch (class_type) {
2374 case PCI_ERMC_CLASSCODE_TYPE_SMIC: 2405 case PCI_ERMC_CLASSCODE_TYPE_SMIC:
@@ -2385,15 +2416,13 @@ static int __devinit ipmi_pci_probe(struct pci_dev *pdev,
2385 2416
2386 default: 2417 default:
2387 kfree(info); 2418 kfree(info);
2388 printk(KERN_INFO "ipmi_si: %s: Unknown IPMI type: %d\n", 2419 dev_info(&pdev->dev, "Unknown IPMI type: %d\n", class_type);
2389 pci_name(pdev), class_type);
2390 return -ENOMEM; 2420 return -ENOMEM;
2391 } 2421 }
2392 2422
2393 rv = pci_enable_device(pdev); 2423 rv = pci_enable_device(pdev);
2394 if (rv) { 2424 if (rv) {
2395 printk(KERN_ERR "ipmi_si: %s: couldn't enable PCI device\n", 2425 dev_err(&pdev->dev, "couldn't enable PCI device\n");
2396 pci_name(pdev));
2397 kfree(info); 2426 kfree(info);
2398 return rv; 2427 return rv;
2399 } 2428 }
@@ -2421,7 +2450,11 @@ static int __devinit ipmi_pci_probe(struct pci_dev *pdev,
2421 info->dev = &pdev->dev; 2450 info->dev = &pdev->dev;
2422 pci_set_drvdata(pdev, info); 2451 pci_set_drvdata(pdev, info);
2423 2452
2424 return try_smi_init(info); 2453 dev_info(&pdev->dev, "%pR regsize %d spacing %d irq %d\n",
2454 &pdev->resource[0], info->io.regsize, info->io.regspacing,
2455 info->irq);
2456
2457 return add_smi(info);
2425} 2458}
2426 2459
2427static void __devexit ipmi_pci_remove(struct pci_dev *pdev) 2460static void __devexit ipmi_pci_remove(struct pci_dev *pdev)
@@ -2473,7 +2506,7 @@ static int __devinit ipmi_of_probe(struct of_device *dev,
2473 int ret; 2506 int ret;
2474 int proplen; 2507 int proplen;
2475 2508
2476 dev_info(&dev->dev, PFX "probing via device tree\n"); 2509 dev_info(&dev->dev, "probing via device tree\n");
2477 2510
2478 ret = of_address_to_resource(np, 0, &resource); 2511 ret = of_address_to_resource(np, 0, &resource);
2479 if (ret) { 2512 if (ret) {
@@ -2503,12 +2536,12 @@ static int __devinit ipmi_of_probe(struct of_device *dev,
2503 2536
2504 if (!info) { 2537 if (!info) {
2505 dev_err(&dev->dev, 2538 dev_err(&dev->dev,
2506 PFX "could not allocate memory for OF probe\n"); 2539 "could not allocate memory for OF probe\n");
2507 return -ENOMEM; 2540 return -ENOMEM;
2508 } 2541 }
2509 2542
2510 info->si_type = (enum si_type) match->data; 2543 info->si_type = (enum si_type) match->data;
2511 info->addr_source = "device-tree"; 2544 info->addr_source = SI_DEVICETREE;
2512 info->irq_setup = std_irq_setup; 2545 info->irq_setup = std_irq_setup;
2513 2546
2514 if (resource.flags & IORESOURCE_IO) { 2547 if (resource.flags & IORESOURCE_IO) {
@@ -2528,13 +2561,13 @@ static int __devinit ipmi_of_probe(struct of_device *dev,
2528 info->irq = irq_of_parse_and_map(dev->dev.of_node, 0); 2561 info->irq = irq_of_parse_and_map(dev->dev.of_node, 0);
2529 info->dev = &dev->dev; 2562 info->dev = &dev->dev;
2530 2563
2531 dev_dbg(&dev->dev, "addr 0x%lx regsize %d spacing %d irq %x\n", 2564 dev_dbg(&dev->dev, "addr 0x%lx regsize %d spacing %d irq %d\n",
2532 info->io.addr_data, info->io.regsize, info->io.regspacing, 2565 info->io.addr_data, info->io.regsize, info->io.regspacing,
2533 info->irq); 2566 info->irq);
2534 2567
2535 dev_set_drvdata(&dev->dev, info); 2568 dev_set_drvdata(&dev->dev, info);
2536 2569
2537 return try_smi_init(info); 2570 return add_smi(info);
2538} 2571}
2539 2572
2540static int __devexit ipmi_of_remove(struct of_device *dev) 2573static int __devexit ipmi_of_remove(struct of_device *dev)
@@ -2643,9 +2676,8 @@ static int try_enable_event_buffer(struct smi_info *smi_info)
2643 2676
2644 rv = wait_for_msg_done(smi_info); 2677 rv = wait_for_msg_done(smi_info);
2645 if (rv) { 2678 if (rv) {
2646 printk(KERN_WARNING 2679 printk(KERN_WARNING PFX "Error getting response from get"
2647 "ipmi_si: Error getting response from get global," 2680 " global enables command, the event buffer is not"
2648 " enables command, the event buffer is not"
2649 " enabled.\n"); 2681 " enabled.\n");
2650 goto out; 2682 goto out;
2651 } 2683 }
@@ -2657,10 +2689,8 @@ static int try_enable_event_buffer(struct smi_info *smi_info)
2657 resp[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2 || 2689 resp[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2 ||
2658 resp[1] != IPMI_GET_BMC_GLOBAL_ENABLES_CMD || 2690 resp[1] != IPMI_GET_BMC_GLOBAL_ENABLES_CMD ||
2659 resp[2] != 0) { 2691 resp[2] != 0) {
2660 printk(KERN_WARNING 2692 printk(KERN_WARNING PFX "Invalid return from get global"
2661 "ipmi_si: Invalid return from get global" 2693 " enables command, cannot enable the event buffer.\n");
2662 " enables command, cannot enable the event"
2663 " buffer.\n");
2664 rv = -EINVAL; 2694 rv = -EINVAL;
2665 goto out; 2695 goto out;
2666 } 2696 }
@@ -2676,9 +2706,8 @@ static int try_enable_event_buffer(struct smi_info *smi_info)
2676 2706
2677 rv = wait_for_msg_done(smi_info); 2707 rv = wait_for_msg_done(smi_info);
2678 if (rv) { 2708 if (rv) {
2679 printk(KERN_WARNING 2709 printk(KERN_WARNING PFX "Error getting response from set"
2680 "ipmi_si: Error getting response from set global," 2710 " global, enables command, the event buffer is not"
2681 " enables command, the event buffer is not"
2682 " enabled.\n"); 2711 " enabled.\n");
2683 goto out; 2712 goto out;
2684 } 2713 }
@@ -2689,10 +2718,8 @@ static int try_enable_event_buffer(struct smi_info *smi_info)
2689 if (resp_len < 3 || 2718 if (resp_len < 3 ||
2690 resp[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2 || 2719 resp[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2 ||
2691 resp[1] != IPMI_SET_BMC_GLOBAL_ENABLES_CMD) { 2720 resp[1] != IPMI_SET_BMC_GLOBAL_ENABLES_CMD) {
2692 printk(KERN_WARNING 2721 printk(KERN_WARNING PFX "Invalid return from get global,"
2693 "ipmi_si: Invalid return from get global," 2722 "enables command, not enable the event buffer.\n");
2694 "enables command, not enable the event"
2695 " buffer.\n");
2696 rv = -EINVAL; 2723 rv = -EINVAL;
2697 goto out; 2724 goto out;
2698 } 2725 }
@@ -2951,7 +2978,7 @@ static __devinit void default_find_bmc(void)
2951 if (!info) 2978 if (!info)
2952 return; 2979 return;
2953 2980
2954 info->addr_source = NULL; 2981 info->addr_source = SI_DEFAULT;
2955 2982
2956 info->si_type = ipmi_defaults[i].type; 2983 info->si_type = ipmi_defaults[i].type;
2957 info->io_setup = port_setup; 2984 info->io_setup = port_setup;
@@ -2963,14 +2990,16 @@ static __devinit void default_find_bmc(void)
2963 info->io.regsize = DEFAULT_REGSPACING; 2990 info->io.regsize = DEFAULT_REGSPACING;
2964 info->io.regshift = 0; 2991 info->io.regshift = 0;
2965 2992
2966 if (try_smi_init(info) == 0) { 2993 if (add_smi(info) == 0) {
2967 /* Found one... */ 2994 if ((try_smi_init(info)) == 0) {
2968 printk(KERN_INFO "ipmi_si: Found default %s state" 2995 /* Found one... */
2969 " machine at %s address 0x%lx\n", 2996 printk(KERN_INFO PFX "Found default %s"
2970 si_to_str[info->si_type], 2997 " state machine at %s address 0x%lx\n",
2971 addr_space_to_str[info->io.addr_type], 2998 si_to_str[info->si_type],
2972 info->io.addr_data); 2999 addr_space_to_str[info->io.addr_type],
2973 return; 3000 info->io.addr_data);
3001 } else
3002 cleanup_one_si(info);
2974 } 3003 }
2975 } 3004 }
2976} 3005}
@@ -2989,34 +3018,48 @@ static int is_new_interface(struct smi_info *info)
2989 return 1; 3018 return 1;
2990} 3019}
2991 3020
2992static int try_smi_init(struct smi_info *new_smi) 3021static int add_smi(struct smi_info *new_smi)
2993{ 3022{
2994 int rv; 3023 int rv = 0;
2995 int i;
2996
2997 if (new_smi->addr_source) {
2998 printk(KERN_INFO "ipmi_si: Trying %s-specified %s state"
2999 " machine at %s address 0x%lx, slave address 0x%x,"
3000 " irq %d\n",
3001 new_smi->addr_source,
3002 si_to_str[new_smi->si_type],
3003 addr_space_to_str[new_smi->io.addr_type],
3004 new_smi->io.addr_data,
3005 new_smi->slave_addr, new_smi->irq);
3006 }
3007 3024
3025 printk(KERN_INFO PFX "Adding %s-specified %s state machine",
3026 ipmi_addr_src_to_str[new_smi->addr_source],
3027 si_to_str[new_smi->si_type]);
3008 mutex_lock(&smi_infos_lock); 3028 mutex_lock(&smi_infos_lock);
3009 if (!is_new_interface(new_smi)) { 3029 if (!is_new_interface(new_smi)) {
3010 printk(KERN_WARNING "ipmi_si: duplicate interface\n"); 3030 printk(KERN_CONT PFX "duplicate interface\n");
3011 rv = -EBUSY; 3031 rv = -EBUSY;
3012 goto out_err; 3032 goto out_err;
3013 } 3033 }
3014 3034
3035 printk(KERN_CONT "\n");
3036
3015 /* So we know not to free it unless we have allocated one. */ 3037 /* So we know not to free it unless we have allocated one. */
3016 new_smi->intf = NULL; 3038 new_smi->intf = NULL;
3017 new_smi->si_sm = NULL; 3039 new_smi->si_sm = NULL;
3018 new_smi->handlers = NULL; 3040 new_smi->handlers = NULL;
3019 3041
3042 list_add_tail(&new_smi->link, &smi_infos);
3043
3044out_err:
3045 mutex_unlock(&smi_infos_lock);
3046 return rv;
3047}
3048
3049static int try_smi_init(struct smi_info *new_smi)
3050{
3051 int rv = 0;
3052 int i;
3053
3054 printk(KERN_INFO PFX "Trying %s-specified %s state"
3055 " machine at %s address 0x%lx, slave address 0x%x,"
3056 " irq %d\n",
3057 ipmi_addr_src_to_str[new_smi->addr_source],
3058 si_to_str[new_smi->si_type],
3059 addr_space_to_str[new_smi->io.addr_type],
3060 new_smi->io.addr_data,
3061 new_smi->slave_addr, new_smi->irq);
3062
3020 switch (new_smi->si_type) { 3063 switch (new_smi->si_type) {
3021 case SI_KCS: 3064 case SI_KCS:
3022 new_smi->handlers = &kcs_smi_handlers; 3065 new_smi->handlers = &kcs_smi_handlers;
@@ -3039,7 +3082,8 @@ static int try_smi_init(struct smi_info *new_smi)
3039 /* Allocate the state machine's data and initialize it. */ 3082 /* Allocate the state machine's data and initialize it. */
3040 new_smi->si_sm = kmalloc(new_smi->handlers->size(), GFP_KERNEL); 3083 new_smi->si_sm = kmalloc(new_smi->handlers->size(), GFP_KERNEL);
3041 if (!new_smi->si_sm) { 3084 if (!new_smi->si_sm) {
3042 printk(KERN_ERR "Could not allocate state machine memory\n"); 3085 printk(KERN_ERR PFX
3086 "Could not allocate state machine memory\n");
3043 rv = -ENOMEM; 3087 rv = -ENOMEM;
3044 goto out_err; 3088 goto out_err;
3045 } 3089 }
@@ -3049,7 +3093,7 @@ static int try_smi_init(struct smi_info *new_smi)
3049 /* Now that we know the I/O size, we can set up the I/O. */ 3093 /* Now that we know the I/O size, we can set up the I/O. */
3050 rv = new_smi->io_setup(new_smi); 3094 rv = new_smi->io_setup(new_smi);
3051 if (rv) { 3095 if (rv) {
3052 printk(KERN_ERR "Could not set up I/O space\n"); 3096 printk(KERN_ERR PFX "Could not set up I/O space\n");
3053 goto out_err; 3097 goto out_err;
3054 } 3098 }
3055 3099
@@ -3059,8 +3103,7 @@ static int try_smi_init(struct smi_info *new_smi)
3059 /* Do low-level detection first. */ 3103 /* Do low-level detection first. */
3060 if (new_smi->handlers->detect(new_smi->si_sm)) { 3104 if (new_smi->handlers->detect(new_smi->si_sm)) {
3061 if (new_smi->addr_source) 3105 if (new_smi->addr_source)
3062 printk(KERN_INFO "ipmi_si: Interface detection" 3106 printk(KERN_INFO PFX "Interface detection failed\n");
3063 " failed\n");
3064 rv = -ENODEV; 3107 rv = -ENODEV;
3065 goto out_err; 3108 goto out_err;
3066 } 3109 }
@@ -3072,7 +3115,7 @@ static int try_smi_init(struct smi_info *new_smi)
3072 rv = try_get_dev_id(new_smi); 3115 rv = try_get_dev_id(new_smi);
3073 if (rv) { 3116 if (rv) {
3074 if (new_smi->addr_source) 3117 if (new_smi->addr_source)
3075 printk(KERN_INFO "ipmi_si: There appears to be no BMC" 3118 printk(KERN_INFO PFX "There appears to be no BMC"
3076 " at this location\n"); 3119 " at this location\n");
3077 goto out_err; 3120 goto out_err;
3078 } 3121 }
@@ -3088,7 +3131,7 @@ static int try_smi_init(struct smi_info *new_smi)
3088 for (i = 0; i < SI_NUM_STATS; i++) 3131 for (i = 0; i < SI_NUM_STATS; i++)
3089 atomic_set(&new_smi->stats[i], 0); 3132 atomic_set(&new_smi->stats[i], 0);
3090 3133
3091 new_smi->interrupt_disabled = 0; 3134 new_smi->interrupt_disabled = 1;
3092 atomic_set(&new_smi->stop_operation, 0); 3135 atomic_set(&new_smi->stop_operation, 0);
3093 new_smi->intf_num = smi_num; 3136 new_smi->intf_num = smi_num;
3094 smi_num++; 3137 smi_num++;
@@ -3114,9 +3157,8 @@ static int try_smi_init(struct smi_info *new_smi)
3114 new_smi->pdev = platform_device_alloc("ipmi_si", 3157 new_smi->pdev = platform_device_alloc("ipmi_si",
3115 new_smi->intf_num); 3158 new_smi->intf_num);
3116 if (!new_smi->pdev) { 3159 if (!new_smi->pdev) {
3117 printk(KERN_ERR 3160 printk(KERN_ERR PFX
3118 "ipmi_si_intf:" 3161 "Unable to allocate platform device\n");
3119 " Unable to allocate platform device\n");
3120 goto out_err; 3162 goto out_err;
3121 } 3163 }
3122 new_smi->dev = &new_smi->pdev->dev; 3164 new_smi->dev = &new_smi->pdev->dev;
@@ -3124,9 +3166,8 @@ static int try_smi_init(struct smi_info *new_smi)
3124 3166
3125 rv = platform_device_add(new_smi->pdev); 3167 rv = platform_device_add(new_smi->pdev);
3126 if (rv) { 3168 if (rv) {
3127 printk(KERN_ERR 3169 printk(KERN_ERR PFX
3128 "ipmi_si_intf:" 3170 "Unable to register system interface device:"
3129 " Unable to register system interface device:"
3130 " %d\n", 3171 " %d\n",
3131 rv); 3172 rv);
3132 goto out_err; 3173 goto out_err;
@@ -3141,9 +3182,8 @@ static int try_smi_init(struct smi_info *new_smi)
3141 "bmc", 3182 "bmc",
3142 new_smi->slave_addr); 3183 new_smi->slave_addr);
3143 if (rv) { 3184 if (rv) {
3144 printk(KERN_ERR 3185 dev_err(new_smi->dev, "Unable to register device: error %d\n",
3145 "ipmi_si: Unable to register device: error %d\n", 3186 rv);
3146 rv);
3147 goto out_err_stop_timer; 3187 goto out_err_stop_timer;
3148 } 3188 }
3149 3189
@@ -3151,9 +3191,7 @@ static int try_smi_init(struct smi_info *new_smi)
3151 type_file_read_proc, 3191 type_file_read_proc,
3152 new_smi); 3192 new_smi);
3153 if (rv) { 3193 if (rv) {
3154 printk(KERN_ERR 3194 dev_err(new_smi->dev, "Unable to create proc entry: %d\n", rv);
3155 "ipmi_si: Unable to create proc entry: %d\n",
3156 rv);
3157 goto out_err_stop_timer; 3195 goto out_err_stop_timer;
3158 } 3196 }
3159 3197
@@ -3161,9 +3199,7 @@ static int try_smi_init(struct smi_info *new_smi)
3161 stat_file_read_proc, 3199 stat_file_read_proc,
3162 new_smi); 3200 new_smi);
3163 if (rv) { 3201 if (rv) {
3164 printk(KERN_ERR 3202 dev_err(new_smi->dev, "Unable to create proc entry: %d\n", rv);
3165 "ipmi_si: Unable to create proc entry: %d\n",
3166 rv);
3167 goto out_err_stop_timer; 3203 goto out_err_stop_timer;
3168 } 3204 }
3169 3205
@@ -3171,18 +3207,12 @@ static int try_smi_init(struct smi_info *new_smi)
3171 param_read_proc, 3207 param_read_proc,
3172 new_smi); 3208 new_smi);
3173 if (rv) { 3209 if (rv) {
3174 printk(KERN_ERR 3210 dev_err(new_smi->dev, "Unable to create proc entry: %d\n", rv);
3175 "ipmi_si: Unable to create proc entry: %d\n",
3176 rv);
3177 goto out_err_stop_timer; 3211 goto out_err_stop_timer;
3178 } 3212 }
3179 3213
3180 list_add_tail(&new_smi->link, &smi_infos); 3214 dev_info(new_smi->dev, "IPMI %s interface initialized\n",
3181 3215 si_to_str[new_smi->si_type]);
3182 mutex_unlock(&smi_infos_lock);
3183
3184 printk(KERN_INFO "IPMI %s interface initialized\n",
3185 si_to_str[new_smi->si_type]);
3186 3216
3187 return 0; 3217 return 0;
3188 3218
@@ -3191,11 +3221,17 @@ static int try_smi_init(struct smi_info *new_smi)
3191 wait_for_timer_and_thread(new_smi); 3221 wait_for_timer_and_thread(new_smi);
3192 3222
3193 out_err: 3223 out_err:
3194 if (new_smi->intf) 3224 new_smi->interrupt_disabled = 1;
3225
3226 if (new_smi->intf) {
3195 ipmi_unregister_smi(new_smi->intf); 3227 ipmi_unregister_smi(new_smi->intf);
3228 new_smi->intf = NULL;
3229 }
3196 3230
3197 if (new_smi->irq_cleanup) 3231 if (new_smi->irq_cleanup) {
3198 new_smi->irq_cleanup(new_smi); 3232 new_smi->irq_cleanup(new_smi);
3233 new_smi->irq_cleanup = NULL;
3234 }
3199 3235
3200 /* 3236 /*
3201 * Wait until we know that we are out of any interrupt 3237 * Wait until we know that we are out of any interrupt
@@ -3208,18 +3244,21 @@ static int try_smi_init(struct smi_info *new_smi)
3208 if (new_smi->handlers) 3244 if (new_smi->handlers)
3209 new_smi->handlers->cleanup(new_smi->si_sm); 3245 new_smi->handlers->cleanup(new_smi->si_sm);
3210 kfree(new_smi->si_sm); 3246 kfree(new_smi->si_sm);
3247 new_smi->si_sm = NULL;
3211 } 3248 }
3212 if (new_smi->addr_source_cleanup) 3249 if (new_smi->addr_source_cleanup) {
3213 new_smi->addr_source_cleanup(new_smi); 3250 new_smi->addr_source_cleanup(new_smi);
3214 if (new_smi->io_cleanup) 3251 new_smi->addr_source_cleanup = NULL;
3252 }
3253 if (new_smi->io_cleanup) {
3215 new_smi->io_cleanup(new_smi); 3254 new_smi->io_cleanup(new_smi);
3255 new_smi->io_cleanup = NULL;
3256 }
3216 3257
3217 if (new_smi->dev_registered) 3258 if (new_smi->dev_registered) {
3218 platform_device_unregister(new_smi->pdev); 3259 platform_device_unregister(new_smi->pdev);
3219 3260 new_smi->dev_registered = 0;
3220 kfree(new_smi); 3261 }
3221
3222 mutex_unlock(&smi_infos_lock);
3223 3262
3224 return rv; 3263 return rv;
3225} 3264}
@@ -3229,6 +3268,8 @@ static __devinit int init_ipmi_si(void)
3229 int i; 3268 int i;
3230 char *str; 3269 char *str;
3231 int rv; 3270 int rv;
3271 struct smi_info *e;
3272 enum ipmi_addr_src type = SI_INVALID;
3232 3273
3233 if (initialized) 3274 if (initialized)
3234 return 0; 3275 return 0;
@@ -3237,9 +3278,7 @@ static __devinit int init_ipmi_si(void)
3237 /* Register the device drivers. */ 3278 /* Register the device drivers. */
3238 rv = driver_register(&ipmi_driver.driver); 3279 rv = driver_register(&ipmi_driver.driver);
3239 if (rv) { 3280 if (rv) {
3240 printk(KERN_ERR 3281 printk(KERN_ERR PFX "Unable to register driver: %d\n", rv);
3241 "init_ipmi_si: Unable to register driver: %d\n",
3242 rv);
3243 return rv; 3282 return rv;
3244 } 3283 }
3245 3284
@@ -3263,38 +3302,81 @@ static __devinit int init_ipmi_si(void)
3263 3302
3264 hardcode_find_bmc(); 3303 hardcode_find_bmc();
3265 3304
3266#ifdef CONFIG_DMI 3305 /* If the user gave us a device, they presumably want us to use it */
3267 dmi_find_bmc(); 3306 mutex_lock(&smi_infos_lock);
3268#endif 3307 if (!list_empty(&smi_infos)) {
3308 mutex_unlock(&smi_infos_lock);
3309 return 0;
3310 }
3311 mutex_unlock(&smi_infos_lock);
3269 3312
3270#ifdef CONFIG_ACPI 3313#ifdef CONFIG_PCI
3271 spmi_find_bmc(); 3314 rv = pci_register_driver(&ipmi_pci_driver);
3315 if (rv)
3316 printk(KERN_ERR PFX "Unable to register PCI driver: %d\n", rv);
3272#endif 3317#endif
3318
3273#ifdef CONFIG_ACPI 3319#ifdef CONFIG_ACPI
3274 pnp_register_driver(&ipmi_pnp_driver); 3320 pnp_register_driver(&ipmi_pnp_driver);
3275#endif 3321#endif
3276 3322
3277#ifdef CONFIG_PCI 3323#ifdef CONFIG_DMI
3278 rv = pci_register_driver(&ipmi_pci_driver); 3324 dmi_find_bmc();
3279 if (rv) 3325#endif
3280 printk(KERN_ERR 3326
3281 "init_ipmi_si: Unable to register PCI driver: %d\n", 3327#ifdef CONFIG_ACPI
3282 rv); 3328 spmi_find_bmc();
3283#endif 3329#endif
3284 3330
3285#ifdef CONFIG_PPC_OF 3331#ifdef CONFIG_PPC_OF
3286 of_register_platform_driver(&ipmi_of_platform_driver); 3332 of_register_platform_driver(&ipmi_of_platform_driver);
3287#endif 3333#endif
3288 3334
3335 /* We prefer devices with interrupts, but in the case of a machine
3336 with multiple BMCs we assume that there will be several instances
3337 of a given type so if we succeed in registering a type then also
3338 try to register everything else of the same type */
3339
3340 mutex_lock(&smi_infos_lock);
3341 list_for_each_entry(e, &smi_infos, link) {
3342 /* Try to register a device if it has an IRQ and we either
3343 haven't successfully registered a device yet or this
3344 device has the same type as one we successfully registered */
3345 if (e->irq && (!type || e->addr_source == type)) {
3346 if (!try_smi_init(e)) {
3347 type = e->addr_source;
3348 }
3349 }
3350 }
3351
3352 /* type will only have been set if we successfully registered an si */
3353 if (type) {
3354 mutex_unlock(&smi_infos_lock);
3355 return 0;
3356 }
3357
3358 /* Fall back to the preferred device */
3359
3360 list_for_each_entry(e, &smi_infos, link) {
3361 if (!e->irq && (!type || e->addr_source == type)) {
3362 if (!try_smi_init(e)) {
3363 type = e->addr_source;
3364 }
3365 }
3366 }
3367 mutex_unlock(&smi_infos_lock);
3368
3369 if (type)
3370 return 0;
3371
3289 if (si_trydefaults) { 3372 if (si_trydefaults) {
3290 mutex_lock(&smi_infos_lock); 3373 mutex_lock(&smi_infos_lock);
3291 if (list_empty(&smi_infos)) { 3374 if (list_empty(&smi_infos)) {
3292 /* No BMC was found, try defaults. */ 3375 /* No BMC was found, try defaults. */
3293 mutex_unlock(&smi_infos_lock); 3376 mutex_unlock(&smi_infos_lock);
3294 default_find_bmc(); 3377 default_find_bmc();
3295 } else { 3378 } else
3296 mutex_unlock(&smi_infos_lock); 3379 mutex_unlock(&smi_infos_lock);
3297 }
3298 } 3380 }
3299 3381
3300 mutex_lock(&smi_infos_lock); 3382 mutex_lock(&smi_infos_lock);
@@ -3308,8 +3390,8 @@ static __devinit int init_ipmi_si(void)
3308 of_unregister_platform_driver(&ipmi_of_platform_driver); 3390 of_unregister_platform_driver(&ipmi_of_platform_driver);
3309#endif 3391#endif
3310 driver_unregister(&ipmi_driver.driver); 3392 driver_unregister(&ipmi_driver.driver);
3311 printk(KERN_WARNING 3393 printk(KERN_WARNING PFX
3312 "ipmi_si: Unable to find any System Interface(s)\n"); 3394 "Unable to find any System Interface(s)\n");
3313 return -ENODEV; 3395 return -ENODEV;
3314 } else { 3396 } else {
3315 mutex_unlock(&smi_infos_lock); 3397 mutex_unlock(&smi_infos_lock);
@@ -3320,7 +3402,7 @@ module_init(init_ipmi_si);
3320 3402
3321static void cleanup_one_si(struct smi_info *to_clean) 3403static void cleanup_one_si(struct smi_info *to_clean)
3322{ 3404{
3323 int rv; 3405 int rv = 0;
3324 unsigned long flags; 3406 unsigned long flags;
3325 3407
3326 if (!to_clean) 3408 if (!to_clean)
@@ -3364,14 +3446,16 @@ static void cleanup_one_si(struct smi_info *to_clean)
3364 schedule_timeout_uninterruptible(1); 3446 schedule_timeout_uninterruptible(1);
3365 } 3447 }
3366 3448
3367 rv = ipmi_unregister_smi(to_clean->intf); 3449 if (to_clean->intf)
3450 rv = ipmi_unregister_smi(to_clean->intf);
3451
3368 if (rv) { 3452 if (rv) {
3369 printk(KERN_ERR 3453 printk(KERN_ERR PFX "Unable to unregister device: errno=%d\n",
3370 "ipmi_si: Unable to unregister device: errno=%d\n",
3371 rv); 3454 rv);
3372 } 3455 }
3373 3456
3374 to_clean->handlers->cleanup(to_clean->si_sm); 3457 if (to_clean->handlers)
3458 to_clean->handlers->cleanup(to_clean->si_sm);
3375 3459
3376 kfree(to_clean->si_sm); 3460 kfree(to_clean->si_sm);
3377 3461
diff --git a/drivers/char/ppdev.c b/drivers/char/ppdev.c
index fdd37543aa79..02abfddce45a 100644
--- a/drivers/char/ppdev.c
+++ b/drivers/char/ppdev.c
@@ -287,12 +287,10 @@ static int register_device (int minor, struct pp_struct *pp)
287 char *name; 287 char *name;
288 int fl; 288 int fl;
289 289
290 name = kmalloc (strlen (CHRDEV) + 3, GFP_KERNEL); 290 name = kasprintf(GFP_KERNEL, CHRDEV "%x", minor);
291 if (name == NULL) 291 if (name == NULL)
292 return -ENOMEM; 292 return -ENOMEM;
293 293
294 sprintf (name, CHRDEV "%x", minor);
295
296 port = parport_find_number (minor); 294 port = parport_find_number (minor);
297 if (!port) { 295 if (!port) {
298 printk (KERN_WARNING "%s: no associated port!\n", name); 296 printk (KERN_WARNING "%s: no associated port!\n", name);
diff --git a/drivers/char/ps3flash.c b/drivers/char/ps3flash.c
index 606048b72bcf..85c004a518ee 100644
--- a/drivers/char/ps3flash.c
+++ b/drivers/char/ps3flash.c
@@ -305,8 +305,7 @@ static int ps3flash_flush(struct file *file, fl_owner_t id)
305 return ps3flash_writeback(ps3flash_dev); 305 return ps3flash_writeback(ps3flash_dev);
306} 306}
307 307
308static int ps3flash_fsync(struct file *file, struct dentry *dentry, 308static int ps3flash_fsync(struct file *file, int datasync)
309 int datasync)
310{ 309{
311 return ps3flash_writeback(ps3flash_dev); 310 return ps3flash_writeback(ps3flash_dev);
312} 311}
diff --git a/drivers/char/ramoops.c b/drivers/char/ramoops.c
new file mode 100644
index 000000000000..74f00b5ffa36
--- /dev/null
+++ b/drivers/char/ramoops.c
@@ -0,0 +1,162 @@
1/*
2 * RAM Oops/Panic logger
3 *
4 * Copyright (C) 2010 Marco Stornelli <marco.stornelli@gmail.com>
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * version 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
18 * 02110-1301 USA
19 *
20 */
21
22#include <linux/kernel.h>
23#include <linux/module.h>
24#include <linux/kmsg_dump.h>
25#include <linux/time.h>
26#include <linux/io.h>
27#include <linux/ioport.h>
28
29#define RAMOOPS_KERNMSG_HDR "===="
30#define RAMOOPS_HEADER_SIZE (5 + sizeof(struct timeval))
31
32#define RECORD_SIZE 4096
33
34static ulong mem_address;
35module_param(mem_address, ulong, 0400);
36MODULE_PARM_DESC(mem_address,
37 "start of reserved RAM used to store oops/panic logs");
38
39static ulong mem_size;
40module_param(mem_size, ulong, 0400);
41MODULE_PARM_DESC(mem_size,
42 "size of reserved RAM used to store oops/panic logs");
43
44static int dump_oops = 1;
45module_param(dump_oops, int, 0600);
46MODULE_PARM_DESC(dump_oops,
47 "set to 1 to dump oopses, 0 to only dump panics (default 1)");
48
49static struct ramoops_context {
50 struct kmsg_dumper dump;
51 void *virt_addr;
52 phys_addr_t phys_addr;
53 unsigned long size;
54 int count;
55 int max_count;
56} oops_cxt;
57
58static void ramoops_do_dump(struct kmsg_dumper *dumper,
59 enum kmsg_dump_reason reason, const char *s1, unsigned long l1,
60 const char *s2, unsigned long l2)
61{
62 struct ramoops_context *cxt = container_of(dumper,
63 struct ramoops_context, dump);
64 unsigned long s1_start, s2_start;
65 unsigned long l1_cpy, l2_cpy;
66 int res;
67 char *buf;
68 struct timeval timestamp;
69
70 /* Only dump oopses if dump_oops is set */
71 if (reason == KMSG_DUMP_OOPS && !dump_oops)
72 return;
73
74 buf = (char *)(cxt->virt_addr + (cxt->count * RECORD_SIZE));
75 memset(buf, '\0', RECORD_SIZE);
76 res = sprintf(buf, "%s", RAMOOPS_KERNMSG_HDR);
77 buf += res;
78 do_gettimeofday(&timestamp);
79 res = sprintf(buf, "%lu.%lu\n", (long)timestamp.tv_sec, (long)timestamp.tv_usec);
80 buf += res;
81
82 l2_cpy = min(l2, (unsigned long)(RECORD_SIZE - RAMOOPS_HEADER_SIZE));
83 l1_cpy = min(l1, (unsigned long)(RECORD_SIZE - RAMOOPS_HEADER_SIZE) - l2_cpy);
84
85 s2_start = l2 - l2_cpy;
86 s1_start = l1 - l1_cpy;
87
88 memcpy(buf, s1 + s1_start, l1_cpy);
89 memcpy(buf + l1_cpy, s2 + s2_start, l2_cpy);
90
91 cxt->count = (cxt->count + 1) % cxt->max_count;
92}
93
94static int __init ramoops_init(void)
95{
96 struct ramoops_context *cxt = &oops_cxt;
97 int err = -EINVAL;
98
99 if (!mem_size) {
100 printk(KERN_ERR "ramoops: invalid size specification");
101 goto fail3;
102 }
103
104 rounddown_pow_of_two(mem_size);
105
106 if (mem_size < RECORD_SIZE) {
107 printk(KERN_ERR "ramoops: size too small");
108 goto fail3;
109 }
110
111 cxt->max_count = mem_size / RECORD_SIZE;
112 cxt->count = 0;
113 cxt->size = mem_size;
114 cxt->phys_addr = mem_address;
115
116 if (!request_mem_region(cxt->phys_addr, cxt->size, "ramoops")) {
117 printk(KERN_ERR "ramoops: request mem region failed");
118 err = -EINVAL;
119 goto fail3;
120 }
121
122 cxt->virt_addr = ioremap(cxt->phys_addr, cxt->size);
123 if (!cxt->virt_addr) {
124 printk(KERN_ERR "ramoops: ioremap failed");
125 goto fail2;
126 }
127
128 cxt->dump.dump = ramoops_do_dump;
129 err = kmsg_dump_register(&cxt->dump);
130 if (err) {
131 printk(KERN_ERR "ramoops: registering kmsg dumper failed");
132 goto fail1;
133 }
134
135 return 0;
136
137fail1:
138 iounmap(cxt->virt_addr);
139fail2:
140 release_mem_region(cxt->phys_addr, cxt->size);
141fail3:
142 return err;
143}
144
145static void __exit ramoops_exit(void)
146{
147 struct ramoops_context *cxt = &oops_cxt;
148
149 if (kmsg_dump_unregister(&cxt->dump) < 0)
150 printk(KERN_WARNING "ramoops: could not unregister kmsg_dumper");
151
152 iounmap(cxt->virt_addr);
153 release_mem_region(cxt->phys_addr, cxt->size);
154}
155
156
157module_init(ramoops_init);
158module_exit(ramoops_exit);
159
160MODULE_LICENSE("GPL");
161MODULE_AUTHOR("Marco Stornelli <marco.stornelli@gmail.com>");
162MODULE_DESCRIPTION("RAM Oops/Panic logger/driver");
diff --git a/drivers/char/vt.c b/drivers/char/vt.c
index bd1d1164fec5..7cdb6ee569cd 100644
--- a/drivers/char/vt.c
+++ b/drivers/char/vt.c
@@ -3967,13 +3967,9 @@ static int con_font_set(struct vc_data *vc, struct console_font_op *op)
3967 font.charcount = op->charcount; 3967 font.charcount = op->charcount;
3968 font.height = op->height; 3968 font.height = op->height;
3969 font.width = op->width; 3969 font.width = op->width;
3970 font.data = kmalloc(size, GFP_KERNEL); 3970 font.data = memdup_user(op->data, size);
3971 if (!font.data) 3971 if (IS_ERR(font.data))
3972 return -ENOMEM; 3972 return PTR_ERR(font.data);
3973 if (copy_from_user(font.data, op->data, size)) {
3974 kfree(font.data);
3975 return -EFAULT;
3976 }
3977 acquire_console_sem(); 3973 acquire_console_sem();
3978 if (vc->vc_sw->con_font_set) 3974 if (vc->vc_sw->con_font_set)
3979 rc = vc->vc_sw->con_font_set(vc, &font, op->flags); 3975 rc = vc->vc_sw->con_font_set(vc, &font, op->flags);
diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c
index 12fdd3987a36..199488576a05 100644
--- a/drivers/cpuidle/cpuidle.c
+++ b/drivers/cpuidle/cpuidle.c
@@ -156,7 +156,7 @@ int cpuidle_enable_device(struct cpuidle_device *dev)
156 156
157 if (dev->enabled) 157 if (dev->enabled)
158 return 0; 158 return 0;
159 if (!cpuidle_curr_driver || !cpuidle_curr_governor) 159 if (!cpuidle_get_driver() || !cpuidle_curr_governor)
160 return -EIO; 160 return -EIO;
161 if (!dev->state_count) 161 if (!dev->state_count)
162 return -EINVAL; 162 return -EINVAL;
@@ -207,7 +207,7 @@ void cpuidle_disable_device(struct cpuidle_device *dev)
207{ 207{
208 if (!dev->enabled) 208 if (!dev->enabled)
209 return; 209 return;
210 if (!cpuidle_curr_driver || !cpuidle_curr_governor) 210 if (!cpuidle_get_driver() || !cpuidle_curr_governor)
211 return; 211 return;
212 212
213 dev->enabled = 0; 213 dev->enabled = 0;
@@ -271,10 +271,11 @@ static int __cpuidle_register_device(struct cpuidle_device *dev)
271{ 271{
272 int ret; 272 int ret;
273 struct sys_device *sys_dev = get_cpu_sysdev((unsigned long)dev->cpu); 273 struct sys_device *sys_dev = get_cpu_sysdev((unsigned long)dev->cpu);
274 struct cpuidle_driver *cpuidle_driver = cpuidle_get_driver();
274 275
275 if (!sys_dev) 276 if (!sys_dev)
276 return -EINVAL; 277 return -EINVAL;
277 if (!try_module_get(cpuidle_curr_driver->owner)) 278 if (!try_module_get(cpuidle_driver->owner))
278 return -EINVAL; 279 return -EINVAL;
279 280
280 init_completion(&dev->kobj_unregister); 281 init_completion(&dev->kobj_unregister);
@@ -284,7 +285,7 @@ static int __cpuidle_register_device(struct cpuidle_device *dev)
284 per_cpu(cpuidle_devices, dev->cpu) = dev; 285 per_cpu(cpuidle_devices, dev->cpu) = dev;
285 list_add(&dev->device_list, &cpuidle_detected_devices); 286 list_add(&dev->device_list, &cpuidle_detected_devices);
286 if ((ret = cpuidle_add_sysfs(sys_dev))) { 287 if ((ret = cpuidle_add_sysfs(sys_dev))) {
287 module_put(cpuidle_curr_driver->owner); 288 module_put(cpuidle_driver->owner);
288 return ret; 289 return ret;
289 } 290 }
290 291
@@ -325,6 +326,7 @@ EXPORT_SYMBOL_GPL(cpuidle_register_device);
325void cpuidle_unregister_device(struct cpuidle_device *dev) 326void cpuidle_unregister_device(struct cpuidle_device *dev)
326{ 327{
327 struct sys_device *sys_dev = get_cpu_sysdev((unsigned long)dev->cpu); 328 struct sys_device *sys_dev = get_cpu_sysdev((unsigned long)dev->cpu);
329 struct cpuidle_driver *cpuidle_driver = cpuidle_get_driver();
328 330
329 if (dev->registered == 0) 331 if (dev->registered == 0)
330 return; 332 return;
@@ -340,7 +342,7 @@ void cpuidle_unregister_device(struct cpuidle_device *dev)
340 342
341 cpuidle_resume_and_unlock(); 343 cpuidle_resume_and_unlock();
342 344
343 module_put(cpuidle_curr_driver->owner); 345 module_put(cpuidle_driver->owner);
344} 346}
345 347
346EXPORT_SYMBOL_GPL(cpuidle_unregister_device); 348EXPORT_SYMBOL_GPL(cpuidle_unregister_device);
diff --git a/drivers/cpuidle/cpuidle.h b/drivers/cpuidle/cpuidle.h
index 9476ba33ee2c..33e50d556f17 100644
--- a/drivers/cpuidle/cpuidle.h
+++ b/drivers/cpuidle/cpuidle.h
@@ -9,7 +9,6 @@
9 9
10/* For internal use only */ 10/* For internal use only */
11extern struct cpuidle_governor *cpuidle_curr_governor; 11extern struct cpuidle_governor *cpuidle_curr_governor;
12extern struct cpuidle_driver *cpuidle_curr_driver;
13extern struct list_head cpuidle_governors; 12extern struct list_head cpuidle_governors;
14extern struct list_head cpuidle_detected_devices; 13extern struct list_head cpuidle_detected_devices;
15extern struct mutex cpuidle_lock; 14extern struct mutex cpuidle_lock;
diff --git a/drivers/cpuidle/driver.c b/drivers/cpuidle/driver.c
index 2257004fe33d..fd1601e3d125 100644
--- a/drivers/cpuidle/driver.c
+++ b/drivers/cpuidle/driver.c
@@ -14,7 +14,7 @@
14 14
15#include "cpuidle.h" 15#include "cpuidle.h"
16 16
17struct cpuidle_driver *cpuidle_curr_driver; 17static struct cpuidle_driver *cpuidle_curr_driver;
18DEFINE_SPINLOCK(cpuidle_driver_lock); 18DEFINE_SPINLOCK(cpuidle_driver_lock);
19 19
20/** 20/**
@@ -40,13 +40,25 @@ int cpuidle_register_driver(struct cpuidle_driver *drv)
40EXPORT_SYMBOL_GPL(cpuidle_register_driver); 40EXPORT_SYMBOL_GPL(cpuidle_register_driver);
41 41
42/** 42/**
43 * cpuidle_get_driver - return the current driver
44 */
45struct cpuidle_driver *cpuidle_get_driver(void)
46{
47 return cpuidle_curr_driver;
48}
49EXPORT_SYMBOL_GPL(cpuidle_get_driver);
50
51/**
43 * cpuidle_unregister_driver - unregisters a driver 52 * cpuidle_unregister_driver - unregisters a driver
44 * @drv: the driver 53 * @drv: the driver
45 */ 54 */
46void cpuidle_unregister_driver(struct cpuidle_driver *drv) 55void cpuidle_unregister_driver(struct cpuidle_driver *drv)
47{ 56{
48 if (!drv) 57 if (drv != cpuidle_curr_driver) {
58 WARN(1, "invalid cpuidle_unregister_driver(%s)\n",
59 drv->name);
49 return; 60 return;
61 }
50 62
51 spin_lock(&cpuidle_driver_lock); 63 spin_lock(&cpuidle_driver_lock);
52 cpuidle_curr_driver = NULL; 64 cpuidle_curr_driver = NULL;
diff --git a/drivers/cpuidle/sysfs.c b/drivers/cpuidle/sysfs.c
index 0ba9c8b8ee74..0310ffaec9df 100644
--- a/drivers/cpuidle/sysfs.c
+++ b/drivers/cpuidle/sysfs.c
@@ -47,10 +47,11 @@ static ssize_t show_current_driver(struct sysdev_class *class,
47 char *buf) 47 char *buf)
48{ 48{
49 ssize_t ret; 49 ssize_t ret;
50 struct cpuidle_driver *cpuidle_driver = cpuidle_get_driver();
50 51
51 spin_lock(&cpuidle_driver_lock); 52 spin_lock(&cpuidle_driver_lock);
52 if (cpuidle_curr_driver) 53 if (cpuidle_driver)
53 ret = sprintf(buf, "%s\n", cpuidle_curr_driver->name); 54 ret = sprintf(buf, "%s\n", cpuidle_driver->name);
54 else 55 else
55 ret = sprintf(buf, "none\n"); 56 ret = sprintf(buf, "none\n");
56 spin_unlock(&cpuidle_driver_lock); 57 spin_unlock(&cpuidle_driver_lock);
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 1b8877922fb0..9e01e96fee94 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -166,6 +166,15 @@ config TIMB_DMA
166config ARCH_HAS_ASYNC_TX_FIND_CHANNEL 166config ARCH_HAS_ASYNC_TX_FIND_CHANNEL
167 bool 167 bool
168 168
169config PL330_DMA
170 tristate "DMA API Driver for PL330"
171 select DMA_ENGINE
172 depends on PL330
173 help
174 Select if your platform has one or more PL330 DMACs.
175 You need to provide platform specific settings via
176 platform_data for a dma-pl330 device.
177
169config DMA_ENGINE 178config DMA_ENGINE
170 bool 179 bool
171 180
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index 20881426c1ac..0fe5ebbfda5d 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -22,3 +22,4 @@ obj-$(CONFIG_COH901318) += coh901318.o coh901318_lli.o
22obj-$(CONFIG_AMCC_PPC440SPE_ADMA) += ppc4xx/ 22obj-$(CONFIG_AMCC_PPC440SPE_ADMA) += ppc4xx/
23obj-$(CONFIG_TIMB_DMA) += timb_dma.o 23obj-$(CONFIG_TIMB_DMA) += timb_dma.o
24obj-$(CONFIG_STE_DMA40) += ste_dma40.o ste_dma40_ll.o 24obj-$(CONFIG_STE_DMA40) += ste_dma40.o ste_dma40_ll.o
25obj-$(CONFIG_PL330_DMA) += pl330.o
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
new file mode 100644
index 000000000000..7c50f6dfd3f4
--- /dev/null
+++ b/drivers/dma/pl330.c
@@ -0,0 +1,866 @@
1/* linux/drivers/dma/pl330.c
2 *
3 * Copyright (C) 2010 Samsung Electronics Co. Ltd.
4 * Jaswinder Singh <jassi.brar@samsung.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 */
11
12#include <linux/io.h>
13#include <linux/init.h>
14#include <linux/slab.h>
15#include <linux/module.h>
16#include <linux/dmaengine.h>
17#include <linux/interrupt.h>
18#include <linux/amba/bus.h>
19#include <linux/amba/pl330.h>
20
21#define NR_DEFAULT_DESC 16
22
23enum desc_status {
24 /* In the DMAC pool */
25 FREE,
26 /*
27 * Allocted to some channel during prep_xxx
28 * Also may be sitting on the work_list.
29 */
30 PREP,
31 /*
32 * Sitting on the work_list and already submitted
33 * to the PL330 core. Not more than two descriptors
34 * of a channel can be BUSY at any time.
35 */
36 BUSY,
37 /*
38 * Sitting on the channel work_list but xfer done
39 * by PL330 core
40 */
41 DONE,
42};
43
44struct dma_pl330_chan {
45 /* Schedule desc completion */
46 struct tasklet_struct task;
47
48 /* DMA-Engine Channel */
49 struct dma_chan chan;
50
51 /* Last completed cookie */
52 dma_cookie_t completed;
53
54 /* List of to be xfered descriptors */
55 struct list_head work_list;
56
57 /* Pointer to the DMAC that manages this channel,
58 * NULL if the channel is available to be acquired.
59 * As the parent, this DMAC also provides descriptors
60 * to the channel.
61 */
62 struct dma_pl330_dmac *dmac;
63
64 /* To protect channel manipulation */
65 spinlock_t lock;
66
67 /* Token of a hardware channel thread of PL330 DMAC
68 * NULL if the channel is available to be acquired.
69 */
70 void *pl330_chid;
71};
72
73struct dma_pl330_dmac {
74 struct pl330_info pif;
75
76 /* DMA-Engine Device */
77 struct dma_device ddma;
78
79 /* Pool of descriptors available for the DMAC's channels */
80 struct list_head desc_pool;
81 /* To protect desc_pool manipulation */
82 spinlock_t pool_lock;
83
84 /* Peripheral channels connected to this DMAC */
85 struct dma_pl330_chan peripherals[0]; /* keep at end */
86};
87
88struct dma_pl330_desc {
89 /* To attach to a queue as child */
90 struct list_head node;
91
92 /* Descriptor for the DMA Engine API */
93 struct dma_async_tx_descriptor txd;
94
95 /* Xfer for PL330 core */
96 struct pl330_xfer px;
97
98 struct pl330_reqcfg rqcfg;
99 struct pl330_req req;
100
101 enum desc_status status;
102
103 /* The channel which currently holds this desc */
104 struct dma_pl330_chan *pchan;
105};
106
107static inline struct dma_pl330_chan *
108to_pchan(struct dma_chan *ch)
109{
110 if (!ch)
111 return NULL;
112
113 return container_of(ch, struct dma_pl330_chan, chan);
114}
115
116static inline struct dma_pl330_desc *
117to_desc(struct dma_async_tx_descriptor *tx)
118{
119 return container_of(tx, struct dma_pl330_desc, txd);
120}
121
122static inline void free_desc_list(struct list_head *list)
123{
124 struct dma_pl330_dmac *pdmac;
125 struct dma_pl330_desc *desc;
126 struct dma_pl330_chan *pch;
127 unsigned long flags;
128
129 if (list_empty(list))
130 return;
131
132 /* Finish off the work list */
133 list_for_each_entry(desc, list, node) {
134 dma_async_tx_callback callback;
135 void *param;
136
137 /* All desc in a list belong to same channel */
138 pch = desc->pchan;
139 callback = desc->txd.callback;
140 param = desc->txd.callback_param;
141
142 if (callback)
143 callback(param);
144
145 desc->pchan = NULL;
146 }
147
148 pdmac = pch->dmac;
149
150 spin_lock_irqsave(&pdmac->pool_lock, flags);
151 list_splice_tail_init(list, &pdmac->desc_pool);
152 spin_unlock_irqrestore(&pdmac->pool_lock, flags);
153}
154
155static inline void fill_queue(struct dma_pl330_chan *pch)
156{
157 struct dma_pl330_desc *desc;
158 int ret;
159
160 list_for_each_entry(desc, &pch->work_list, node) {
161
162 /* If already submitted */
163 if (desc->status == BUSY)
164 break;
165
166 ret = pl330_submit_req(pch->pl330_chid,
167 &desc->req);
168 if (!ret) {
169 desc->status = BUSY;
170 break;
171 } else if (ret == -EAGAIN) {
172 /* QFull or DMAC Dying */
173 break;
174 } else {
175 /* Unacceptable request */
176 desc->status = DONE;
177 dev_err(pch->dmac->pif.dev, "%s:%d Bad Desc(%d)\n",
178 __func__, __LINE__, desc->txd.cookie);
179 tasklet_schedule(&pch->task);
180 }
181 }
182}
183
184static void pl330_tasklet(unsigned long data)
185{
186 struct dma_pl330_chan *pch = (struct dma_pl330_chan *)data;
187 struct dma_pl330_desc *desc, *_dt;
188 unsigned long flags;
189 LIST_HEAD(list);
190
191 spin_lock_irqsave(&pch->lock, flags);
192
193 /* Pick up ripe tomatoes */
194 list_for_each_entry_safe(desc, _dt, &pch->work_list, node)
195 if (desc->status == DONE) {
196 pch->completed = desc->txd.cookie;
197 list_move_tail(&desc->node, &list);
198 }
199
200 /* Try to submit a req imm. next to the last completed cookie */
201 fill_queue(pch);
202
203 /* Make sure the PL330 Channel thread is active */
204 pl330_chan_ctrl(pch->pl330_chid, PL330_OP_START);
205
206 spin_unlock_irqrestore(&pch->lock, flags);
207
208 free_desc_list(&list);
209}
210
211static void dma_pl330_rqcb(void *token, enum pl330_op_err err)
212{
213 struct dma_pl330_desc *desc = token;
214 struct dma_pl330_chan *pch = desc->pchan;
215 unsigned long flags;
216
217 /* If desc aborted */
218 if (!pch)
219 return;
220
221 spin_lock_irqsave(&pch->lock, flags);
222
223 desc->status = DONE;
224
225 spin_unlock_irqrestore(&pch->lock, flags);
226
227 tasklet_schedule(&pch->task);
228}
229
230static int pl330_alloc_chan_resources(struct dma_chan *chan)
231{
232 struct dma_pl330_chan *pch = to_pchan(chan);
233 struct dma_pl330_dmac *pdmac = pch->dmac;
234 unsigned long flags;
235
236 spin_lock_irqsave(&pch->lock, flags);
237
238 pch->completed = chan->cookie = 1;
239
240 pch->pl330_chid = pl330_request_channel(&pdmac->pif);
241 if (!pch->pl330_chid) {
242 spin_unlock_irqrestore(&pch->lock, flags);
243 return 0;
244 }
245
246 tasklet_init(&pch->task, pl330_tasklet, (unsigned long) pch);
247
248 spin_unlock_irqrestore(&pch->lock, flags);
249
250 return 1;
251}
252
253static int pl330_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, unsigned long arg)
254{
255 struct dma_pl330_chan *pch = to_pchan(chan);
256 struct dma_pl330_desc *desc;
257 unsigned long flags;
258
259 /* Only supports DMA_TERMINATE_ALL */
260 if (cmd != DMA_TERMINATE_ALL)
261 return -ENXIO;
262
263 spin_lock_irqsave(&pch->lock, flags);
264
265 /* FLUSH the PL330 Channel thread */
266 pl330_chan_ctrl(pch->pl330_chid, PL330_OP_FLUSH);
267
268 /* Mark all desc done */
269 list_for_each_entry(desc, &pch->work_list, node)
270 desc->status = DONE;
271
272 spin_unlock_irqrestore(&pch->lock, flags);
273
274 pl330_tasklet((unsigned long) pch);
275
276 return 0;
277}
278
279static void pl330_free_chan_resources(struct dma_chan *chan)
280{
281 struct dma_pl330_chan *pch = to_pchan(chan);
282 unsigned long flags;
283
284 spin_lock_irqsave(&pch->lock, flags);
285
286 tasklet_kill(&pch->task);
287
288 pl330_release_channel(pch->pl330_chid);
289 pch->pl330_chid = NULL;
290
291 spin_unlock_irqrestore(&pch->lock, flags);
292}
293
294static enum dma_status
295pl330_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
296 struct dma_tx_state *txstate)
297{
298 struct dma_pl330_chan *pch = to_pchan(chan);
299 dma_cookie_t last_done, last_used;
300 int ret;
301
302 last_done = pch->completed;
303 last_used = chan->cookie;
304
305 ret = dma_async_is_complete(cookie, last_done, last_used);
306
307 dma_set_tx_state(txstate, last_done, last_used, 0);
308
309 return ret;
310}
311
312static void pl330_issue_pending(struct dma_chan *chan)
313{
314 pl330_tasklet((unsigned long) to_pchan(chan));
315}
316
317/*
318 * We returned the last one of the circular list of descriptor(s)
319 * from prep_xxx, so the argument to submit corresponds to the last
320 * descriptor of the list.
321 */
322static dma_cookie_t pl330_tx_submit(struct dma_async_tx_descriptor *tx)
323{
324 struct dma_pl330_desc *desc, *last = to_desc(tx);
325 struct dma_pl330_chan *pch = to_pchan(tx->chan);
326 dma_cookie_t cookie;
327 unsigned long flags;
328
329 spin_lock_irqsave(&pch->lock, flags);
330
331 /* Assign cookies to all nodes */
332 cookie = tx->chan->cookie;
333
334 while (!list_empty(&last->node)) {
335 desc = list_entry(last->node.next, struct dma_pl330_desc, node);
336
337 if (++cookie < 0)
338 cookie = 1;
339 desc->txd.cookie = cookie;
340
341 list_move_tail(&desc->node, &pch->work_list);
342 }
343
344 if (++cookie < 0)
345 cookie = 1;
346 last->txd.cookie = cookie;
347
348 list_add_tail(&last->node, &pch->work_list);
349
350 tx->chan->cookie = cookie;
351
352 spin_unlock_irqrestore(&pch->lock, flags);
353
354 return cookie;
355}
356
357static inline void _init_desc(struct dma_pl330_desc *desc)
358{
359 desc->pchan = NULL;
360 desc->req.x = &desc->px;
361 desc->req.token = desc;
362 desc->rqcfg.swap = SWAP_NO;
363 desc->rqcfg.privileged = 0;
364 desc->rqcfg.insnaccess = 0;
365 desc->rqcfg.scctl = SCCTRL0;
366 desc->rqcfg.dcctl = DCCTRL0;
367 desc->req.cfg = &desc->rqcfg;
368 desc->req.xfer_cb = dma_pl330_rqcb;
369 desc->txd.tx_submit = pl330_tx_submit;
370
371 INIT_LIST_HEAD(&desc->node);
372}
373
374/* Returns the number of descriptors added to the DMAC pool */
375int add_desc(struct dma_pl330_dmac *pdmac, gfp_t flg, int count)
376{
377 struct dma_pl330_desc *desc;
378 unsigned long flags;
379 int i;
380
381 if (!pdmac)
382 return 0;
383
384 desc = kmalloc(count * sizeof(*desc), flg);
385 if (!desc)
386 return 0;
387
388 spin_lock_irqsave(&pdmac->pool_lock, flags);
389
390 for (i = 0; i < count; i++) {
391 _init_desc(&desc[i]);
392 list_add_tail(&desc[i].node, &pdmac->desc_pool);
393 }
394
395 spin_unlock_irqrestore(&pdmac->pool_lock, flags);
396
397 return count;
398}
399
400static struct dma_pl330_desc *
401pluck_desc(struct dma_pl330_dmac *pdmac)
402{
403 struct dma_pl330_desc *desc = NULL;
404 unsigned long flags;
405
406 if (!pdmac)
407 return NULL;
408
409 spin_lock_irqsave(&pdmac->pool_lock, flags);
410
411 if (!list_empty(&pdmac->desc_pool)) {
412 desc = list_entry(pdmac->desc_pool.next,
413 struct dma_pl330_desc, node);
414
415 list_del_init(&desc->node);
416
417 desc->status = PREP;
418 desc->txd.callback = NULL;
419 }
420
421 spin_unlock_irqrestore(&pdmac->pool_lock, flags);
422
423 return desc;
424}
425
426static struct dma_pl330_desc *pl330_get_desc(struct dma_pl330_chan *pch)
427{
428 struct dma_pl330_dmac *pdmac = pch->dmac;
429 struct dma_pl330_peri *peri = pch->chan.private;
430 struct dma_pl330_desc *desc;
431
432 /* Pluck one desc from the pool of DMAC */
433 desc = pluck_desc(pdmac);
434
435 /* If the DMAC pool is empty, alloc new */
436 if (!desc) {
437 if (!add_desc(pdmac, GFP_ATOMIC, 1))
438 return NULL;
439
440 /* Try again */
441 desc = pluck_desc(pdmac);
442 if (!desc) {
443 dev_err(pch->dmac->pif.dev,
444 "%s:%d ALERT!\n", __func__, __LINE__);
445 return NULL;
446 }
447 }
448
449 /* Initialize the descriptor */
450 desc->pchan = pch;
451 desc->txd.cookie = 0;
452 async_tx_ack(&desc->txd);
453
454 desc->req.rqtype = peri->rqtype;
455 desc->req.peri = peri->peri_id;
456
457 dma_async_tx_descriptor_init(&desc->txd, &pch->chan);
458
459 return desc;
460}
461
462static inline void fill_px(struct pl330_xfer *px,
463 dma_addr_t dst, dma_addr_t src, size_t len)
464{
465 px->next = NULL;
466 px->bytes = len;
467 px->dst_addr = dst;
468 px->src_addr = src;
469}
470
471static struct dma_pl330_desc *
472__pl330_prep_dma_memcpy(struct dma_pl330_chan *pch, dma_addr_t dst,
473 dma_addr_t src, size_t len)
474{
475 struct dma_pl330_desc *desc = pl330_get_desc(pch);
476
477 if (!desc) {
478 dev_err(pch->dmac->pif.dev, "%s:%d Unable to fetch desc\n",
479 __func__, __LINE__);
480 return NULL;
481 }
482
483 /*
484 * Ideally we should lookout for reqs bigger than
485 * those that can be programmed with 256 bytes of
486 * MC buffer, but considering a req size is seldom
487 * going to be word-unaligned and more than 200MB,
488 * we take it easy.
489 * Also, should the limit is reached we'd rather
490 * have the platform increase MC buffer size than
491 * complicating this API driver.
492 */
493 fill_px(&desc->px, dst, src, len);
494
495 return desc;
496}
497
498/* Call after fixing burst size */
499static inline int get_burst_len(struct dma_pl330_desc *desc, size_t len)
500{
501 struct dma_pl330_chan *pch = desc->pchan;
502 struct pl330_info *pi = &pch->dmac->pif;
503 int burst_len;
504
505 burst_len = pi->pcfg.data_bus_width / 8;
506 burst_len *= pi->pcfg.data_buf_dep;
507 burst_len >>= desc->rqcfg.brst_size;
508
509 /* src/dst_burst_len can't be more than 16 */
510 if (burst_len > 16)
511 burst_len = 16;
512
513 while (burst_len > 1) {
514 if (!(len % (burst_len << desc->rqcfg.brst_size)))
515 break;
516 burst_len--;
517 }
518
519 return burst_len;
520}
521
522static struct dma_async_tx_descriptor *
523pl330_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dst,
524 dma_addr_t src, size_t len, unsigned long flags)
525{
526 struct dma_pl330_desc *desc;
527 struct dma_pl330_chan *pch = to_pchan(chan);
528 struct dma_pl330_peri *peri = chan->private;
529 struct pl330_info *pi;
530 int burst;
531
532 if (unlikely(!pch || !len || !peri))
533 return NULL;
534
535 if (peri->rqtype != MEMTOMEM)
536 return NULL;
537
538 pi = &pch->dmac->pif;
539
540 desc = __pl330_prep_dma_memcpy(pch, dst, src, len);
541 if (!desc)
542 return NULL;
543
544 desc->rqcfg.src_inc = 1;
545 desc->rqcfg.dst_inc = 1;
546
547 /* Select max possible burst size */
548 burst = pi->pcfg.data_bus_width / 8;
549
550 while (burst > 1) {
551 if (!(len % burst))
552 break;
553 burst /= 2;
554 }
555
556 desc->rqcfg.brst_size = 0;
557 while (burst != (1 << desc->rqcfg.brst_size))
558 desc->rqcfg.brst_size++;
559
560 desc->rqcfg.brst_len = get_burst_len(desc, len);
561
562 desc->txd.flags = flags;
563
564 return &desc->txd;
565}
566
567static struct dma_async_tx_descriptor *
568pl330_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
569 unsigned int sg_len, enum dma_data_direction direction,
570 unsigned long flg)
571{
572 struct dma_pl330_desc *first, *desc = NULL;
573 struct dma_pl330_chan *pch = to_pchan(chan);
574 struct dma_pl330_peri *peri = chan->private;
575 struct scatterlist *sg;
576 unsigned long flags;
577 int i, burst_size;
578 dma_addr_t addr;
579
580 if (unlikely(!pch || !sgl || !sg_len))
581 return NULL;
582
583 /* Make sure the direction is consistent */
584 if ((direction == DMA_TO_DEVICE &&
585 peri->rqtype != MEMTODEV) ||
586 (direction == DMA_FROM_DEVICE &&
587 peri->rqtype != DEVTOMEM)) {
588 dev_err(pch->dmac->pif.dev, "%s:%d Invalid Direction\n",
589 __func__, __LINE__);
590 return NULL;
591 }
592
593 addr = peri->fifo_addr;
594 burst_size = peri->burst_sz;
595
596 first = NULL;
597
598 for_each_sg(sgl, sg, sg_len, i) {
599
600 desc = pl330_get_desc(pch);
601 if (!desc) {
602 struct dma_pl330_dmac *pdmac = pch->dmac;
603
604 dev_err(pch->dmac->pif.dev,
605 "%s:%d Unable to fetch desc\n",
606 __func__, __LINE__);
607 if (!first)
608 return NULL;
609
610 spin_lock_irqsave(&pdmac->pool_lock, flags);
611
612 while (!list_empty(&first->node)) {
613 desc = list_entry(first->node.next,
614 struct dma_pl330_desc, node);
615 list_move_tail(&desc->node, &pdmac->desc_pool);
616 }
617
618 list_move_tail(&first->node, &pdmac->desc_pool);
619
620 spin_unlock_irqrestore(&pdmac->pool_lock, flags);
621
622 return NULL;
623 }
624
625 if (!first)
626 first = desc;
627 else
628 list_add_tail(&desc->node, &first->node);
629
630 if (direction == DMA_TO_DEVICE) {
631 desc->rqcfg.src_inc = 1;
632 desc->rqcfg.dst_inc = 0;
633 fill_px(&desc->px,
634 addr, sg_dma_address(sg), sg_dma_len(sg));
635 } else {
636 desc->rqcfg.src_inc = 0;
637 desc->rqcfg.dst_inc = 1;
638 fill_px(&desc->px,
639 sg_dma_address(sg), addr, sg_dma_len(sg));
640 }
641
642 desc->rqcfg.brst_size = burst_size;
643 desc->rqcfg.brst_len = 1;
644 }
645
646 /* Return the last desc in the chain */
647 desc->txd.flags = flg;
648 return &desc->txd;
649}
650
651static irqreturn_t pl330_irq_handler(int irq, void *data)
652{
653 if (pl330_update(data))
654 return IRQ_HANDLED;
655 else
656 return IRQ_NONE;
657}
658
659static int __devinit
660pl330_probe(struct amba_device *adev, struct amba_id *id)
661{
662 struct dma_pl330_platdata *pdat;
663 struct dma_pl330_dmac *pdmac;
664 struct dma_pl330_chan *pch;
665 struct pl330_info *pi;
666 struct dma_device *pd;
667 struct resource *res;
668 int i, ret, irq;
669
670 pdat = adev->dev.platform_data;
671
672 if (!pdat || !pdat->nr_valid_peri) {
673 dev_err(&adev->dev, "platform data missing\n");
674 return -ENODEV;
675 }
676
677 /* Allocate a new DMAC and its Channels */
678 pdmac = kzalloc(pdat->nr_valid_peri * sizeof(*pch)
679 + sizeof(*pdmac), GFP_KERNEL);
680 if (!pdmac) {
681 dev_err(&adev->dev, "unable to allocate mem\n");
682 return -ENOMEM;
683 }
684
685 pi = &pdmac->pif;
686 pi->dev = &adev->dev;
687 pi->pl330_data = NULL;
688 pi->mcbufsz = pdat->mcbuf_sz;
689
690 res = &adev->res;
691 request_mem_region(res->start, resource_size(res), "dma-pl330");
692
693 pi->base = ioremap(res->start, resource_size(res));
694 if (!pi->base) {
695 ret = -ENXIO;
696 goto probe_err1;
697 }
698
699 irq = adev->irq[0];
700 ret = request_irq(irq, pl330_irq_handler, 0,
701 dev_name(&adev->dev), pi);
702 if (ret)
703 goto probe_err2;
704
705 ret = pl330_add(pi);
706 if (ret)
707 goto probe_err3;
708
709 INIT_LIST_HEAD(&pdmac->desc_pool);
710 spin_lock_init(&pdmac->pool_lock);
711
712 /* Create a descriptor pool of default size */
713 if (!add_desc(pdmac, GFP_KERNEL, NR_DEFAULT_DESC))
714 dev_warn(&adev->dev, "unable to allocate desc\n");
715
716 pd = &pdmac->ddma;
717 INIT_LIST_HEAD(&pd->channels);
718
719 /* Initialize channel parameters */
720 for (i = 0; i < pdat->nr_valid_peri; i++) {
721 struct dma_pl330_peri *peri = &pdat->peri[i];
722 pch = &pdmac->peripherals[i];
723
724 switch (peri->rqtype) {
725 case MEMTOMEM:
726 dma_cap_set(DMA_MEMCPY, pd->cap_mask);
727 break;
728 case MEMTODEV:
729 case DEVTOMEM:
730 dma_cap_set(DMA_SLAVE, pd->cap_mask);
731 break;
732 default:
733 dev_err(&adev->dev, "DEVTODEV Not Supported\n");
734 continue;
735 }
736
737 INIT_LIST_HEAD(&pch->work_list);
738 spin_lock_init(&pch->lock);
739 pch->pl330_chid = NULL;
740 pch->chan.private = peri;
741 pch->chan.device = pd;
742 pch->chan.chan_id = i;
743 pch->dmac = pdmac;
744
745 /* Add the channel to the DMAC list */
746 pd->chancnt++;
747 list_add_tail(&pch->chan.device_node, &pd->channels);
748 }
749
750 pd->dev = &adev->dev;
751
752 pd->device_alloc_chan_resources = pl330_alloc_chan_resources;
753 pd->device_free_chan_resources = pl330_free_chan_resources;
754 pd->device_prep_dma_memcpy = pl330_prep_dma_memcpy;
755 pd->device_tx_status = pl330_tx_status;
756 pd->device_prep_slave_sg = pl330_prep_slave_sg;
757 pd->device_control = pl330_control;
758 pd->device_issue_pending = pl330_issue_pending;
759
760 ret = dma_async_device_register(pd);
761 if (ret) {
762 dev_err(&adev->dev, "unable to register DMAC\n");
763 goto probe_err4;
764 }
765
766 amba_set_drvdata(adev, pdmac);
767
768 dev_info(&adev->dev,
769 "Loaded driver for PL330 DMAC-%d\n", adev->periphid);
770 dev_info(&adev->dev,
771 "\tDBUFF-%ux%ubytes Num_Chans-%u Num_Peri-%u Num_Events-%u\n",
772 pi->pcfg.data_buf_dep,
773 pi->pcfg.data_bus_width / 8, pi->pcfg.num_chan,
774 pi->pcfg.num_peri, pi->pcfg.num_events);
775
776 return 0;
777
778probe_err4:
779 pl330_del(pi);
780probe_err3:
781 free_irq(irq, pi);
782probe_err2:
783 iounmap(pi->base);
784probe_err1:
785 release_mem_region(res->start, resource_size(res));
786 kfree(pdmac);
787
788 return ret;
789}
790
791static int __devexit pl330_remove(struct amba_device *adev)
792{
793 struct dma_pl330_dmac *pdmac = amba_get_drvdata(adev);
794 struct dma_pl330_chan *pch, *_p;
795 struct pl330_info *pi;
796 struct resource *res;
797 int irq;
798
799 if (!pdmac)
800 return 0;
801
802 amba_set_drvdata(adev, NULL);
803
804 /* Idle the DMAC */
805 list_for_each_entry_safe(pch, _p, &pdmac->ddma.channels,
806 chan.device_node) {
807
808 /* Remove the channel */
809 list_del(&pch->chan.device_node);
810
811 /* Flush the channel */
812 pl330_control(&pch->chan, DMA_TERMINATE_ALL, 0);
813 pl330_free_chan_resources(&pch->chan);
814 }
815
816 pi = &pdmac->pif;
817
818 pl330_del(pi);
819
820 irq = adev->irq[0];
821 free_irq(irq, pi);
822
823 iounmap(pi->base);
824
825 res = &adev->res;
826 release_mem_region(res->start, resource_size(res));
827
828 kfree(pdmac);
829
830 return 0;
831}
832
833static struct amba_id pl330_ids[] = {
834 {
835 .id = 0x00041330,
836 .mask = 0x000fffff,
837 },
838 { 0, 0 },
839};
840
841static struct amba_driver pl330_driver = {
842 .drv = {
843 .owner = THIS_MODULE,
844 .name = "dma-pl330",
845 },
846 .id_table = pl330_ids,
847 .probe = pl330_probe,
848 .remove = pl330_remove,
849};
850
851static int __init pl330_init(void)
852{
853 return amba_driver_register(&pl330_driver);
854}
855module_init(pl330_init);
856
857static void __exit pl330_exit(void)
858{
859 amba_driver_unregister(&pl330_driver);
860 return;
861}
862module_exit(pl330_exit);
863
864MODULE_AUTHOR("Jaswinder Singh <jassi.brar@samsung.com>");
865MODULE_DESCRIPTION("API Driver for PL330 DMAC");
866MODULE_LICENSE("GPL");
diff --git a/drivers/edac/i5000_edac.c b/drivers/edac/i5000_edac.c
index adc10a2ac5f6..996c1bdb5a34 100644
--- a/drivers/edac/i5000_edac.c
+++ b/drivers/edac/i5000_edac.c
@@ -774,7 +774,7 @@ static void i5000_clear_error(struct mem_ctl_info *mci)
774static void i5000_check_error(struct mem_ctl_info *mci) 774static void i5000_check_error(struct mem_ctl_info *mci)
775{ 775{
776 struct i5000_error_info info; 776 struct i5000_error_info info;
777 debugf4("MC%d: " __FILE__ ": %s()\n", mci->mc_idx, __func__); 777 debugf4("MC%d: %s: %s()\n", mci->mc_idx, __FILE__, __func__);
778 i5000_get_error_info(mci, &info); 778 i5000_get_error_info(mci, &info);
779 i5000_process_error_info(mci, &info, 1); 779 i5000_process_error_info(mci, &info, 1);
780} 780}
@@ -1353,8 +1353,8 @@ static int i5000_probe1(struct pci_dev *pdev, int dev_idx)
1353 int num_dimms_per_channel; 1353 int num_dimms_per_channel;
1354 int num_csrows; 1354 int num_csrows;
1355 1355
1356 debugf0("MC: " __FILE__ ": %s(), pdev bus %u dev=0x%x fn=0x%x\n", 1356 debugf0("MC: %s: %s(), pdev bus %u dev=0x%x fn=0x%x\n",
1357 __func__, 1357 __FILE__, __func__,
1358 pdev->bus->number, 1358 pdev->bus->number,
1359 PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn)); 1359 PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
1360 1360
@@ -1389,7 +1389,7 @@ static int i5000_probe1(struct pci_dev *pdev, int dev_idx)
1389 return -ENOMEM; 1389 return -ENOMEM;
1390 1390
1391 kobject_get(&mci->edac_mci_kobj); 1391 kobject_get(&mci->edac_mci_kobj);
1392 debugf0("MC: " __FILE__ ": %s(): mci = %p\n", __func__, mci); 1392 debugf0("MC: %s: %s(): mci = %p\n", __FILE__, __func__, mci);
1393 1393
1394 mci->dev = &pdev->dev; /* record ptr to the generic device */ 1394 mci->dev = &pdev->dev; /* record ptr to the generic device */
1395 1395
@@ -1432,8 +1432,8 @@ static int i5000_probe1(struct pci_dev *pdev, int dev_idx)
1432 1432
1433 /* add this new MC control structure to EDAC's list of MCs */ 1433 /* add this new MC control structure to EDAC's list of MCs */
1434 if (edac_mc_add_mc(mci)) { 1434 if (edac_mc_add_mc(mci)) {
1435 debugf0("MC: " __FILE__ 1435 debugf0("MC: %s: %s(): failed edac_mc_add_mc()\n",
1436 ": %s(): failed edac_mc_add_mc()\n", __func__); 1436 __FILE__, __func__);
1437 /* FIXME: perhaps some code should go here that disables error 1437 /* FIXME: perhaps some code should go here that disables error
1438 * reporting if we just enabled it 1438 * reporting if we just enabled it
1439 */ 1439 */
@@ -1478,7 +1478,7 @@ static int __devinit i5000_init_one(struct pci_dev *pdev,
1478{ 1478{
1479 int rc; 1479 int rc;
1480 1480
1481 debugf0("MC: " __FILE__ ": %s()\n", __func__); 1481 debugf0("MC: %s: %s()\n", __FILE__, __func__);
1482 1482
1483 /* wake up device */ 1483 /* wake up device */
1484 rc = pci_enable_device(pdev); 1484 rc = pci_enable_device(pdev);
@@ -1497,7 +1497,7 @@ static void __devexit i5000_remove_one(struct pci_dev *pdev)
1497{ 1497{
1498 struct mem_ctl_info *mci; 1498 struct mem_ctl_info *mci;
1499 1499
1500 debugf0(__FILE__ ": %s()\n", __func__); 1500 debugf0("%s: %s()\n", __FILE__, __func__);
1501 1501
1502 if (i5000_pci) 1502 if (i5000_pci)
1503 edac_pci_release_generic_ctl(i5000_pci); 1503 edac_pci_release_generic_ctl(i5000_pci);
@@ -1544,7 +1544,7 @@ static int __init i5000_init(void)
1544{ 1544{
1545 int pci_rc; 1545 int pci_rc;
1546 1546
1547 debugf2("MC: " __FILE__ ": %s()\n", __func__); 1547 debugf2("MC: %s: %s()\n", __FILE__, __func__);
1548 1548
1549 /* Ensure that the OPSTATE is set correctly for POLL or NMI */ 1549 /* Ensure that the OPSTATE is set correctly for POLL or NMI */
1550 opstate_init(); 1550 opstate_init();
@@ -1560,7 +1560,7 @@ static int __init i5000_init(void)
1560 */ 1560 */
1561static void __exit i5000_exit(void) 1561static void __exit i5000_exit(void)
1562{ 1562{
1563 debugf2("MC: " __FILE__ ": %s()\n", __func__); 1563 debugf2("MC: %s: %s()\n", __FILE__, __func__);
1564 pci_unregister_driver(&i5000_driver); 1564 pci_unregister_driver(&i5000_driver);
1565} 1565}
1566 1566
diff --git a/drivers/edac/i5400_edac.c b/drivers/edac/i5400_edac.c
index f99d10655ed4..010c1d6526f5 100644
--- a/drivers/edac/i5400_edac.c
+++ b/drivers/edac/i5400_edac.c
@@ -694,7 +694,7 @@ static void i5400_clear_error(struct mem_ctl_info *mci)
694static void i5400_check_error(struct mem_ctl_info *mci) 694static void i5400_check_error(struct mem_ctl_info *mci)
695{ 695{
696 struct i5400_error_info info; 696 struct i5400_error_info info;
697 debugf4("MC%d: " __FILE__ ": %s()\n", mci->mc_idx, __func__); 697 debugf4("MC%d: %s: %s()\n", mci->mc_idx, __FILE__, __func__);
698 i5400_get_error_info(mci, &info); 698 i5400_get_error_info(mci, &info);
699 i5400_process_error_info(mci, &info); 699 i5400_process_error_info(mci, &info);
700} 700}
@@ -1227,8 +1227,8 @@ static int i5400_probe1(struct pci_dev *pdev, int dev_idx)
1227 if (dev_idx >= ARRAY_SIZE(i5400_devs)) 1227 if (dev_idx >= ARRAY_SIZE(i5400_devs))
1228 return -EINVAL; 1228 return -EINVAL;
1229 1229
1230 debugf0("MC: " __FILE__ ": %s(), pdev bus %u dev=0x%x fn=0x%x\n", 1230 debugf0("MC: %s: %s(), pdev bus %u dev=0x%x fn=0x%x\n",
1231 __func__, 1231 __FILE__, __func__,
1232 pdev->bus->number, 1232 pdev->bus->number,
1233 PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn)); 1233 PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
1234 1234
@@ -1256,7 +1256,7 @@ static int i5400_probe1(struct pci_dev *pdev, int dev_idx)
1256 if (mci == NULL) 1256 if (mci == NULL)
1257 return -ENOMEM; 1257 return -ENOMEM;
1258 1258
1259 debugf0("MC: " __FILE__ ": %s(): mci = %p\n", __func__, mci); 1259 debugf0("MC: %s: %s(): mci = %p\n", __FILE__, __func__, mci);
1260 1260
1261 mci->dev = &pdev->dev; /* record ptr to the generic device */ 1261 mci->dev = &pdev->dev; /* record ptr to the generic device */
1262 1262
@@ -1299,8 +1299,8 @@ static int i5400_probe1(struct pci_dev *pdev, int dev_idx)
1299 1299
1300 /* add this new MC control structure to EDAC's list of MCs */ 1300 /* add this new MC control structure to EDAC's list of MCs */
1301 if (edac_mc_add_mc(mci)) { 1301 if (edac_mc_add_mc(mci)) {
1302 debugf0("MC: " __FILE__ 1302 debugf0("MC: %s: %s(): failed edac_mc_add_mc()\n",
1303 ": %s(): failed edac_mc_add_mc()\n", __func__); 1303 __FILE__, __func__);
1304 /* FIXME: perhaps some code should go here that disables error 1304 /* FIXME: perhaps some code should go here that disables error
1305 * reporting if we just enabled it 1305 * reporting if we just enabled it
1306 */ 1306 */
@@ -1344,7 +1344,7 @@ static int __devinit i5400_init_one(struct pci_dev *pdev,
1344{ 1344{
1345 int rc; 1345 int rc;
1346 1346
1347 debugf0("MC: " __FILE__ ": %s()\n", __func__); 1347 debugf0("MC: %s: %s()\n", __FILE__, __func__);
1348 1348
1349 /* wake up device */ 1349 /* wake up device */
1350 rc = pci_enable_device(pdev); 1350 rc = pci_enable_device(pdev);
@@ -1363,7 +1363,7 @@ static void __devexit i5400_remove_one(struct pci_dev *pdev)
1363{ 1363{
1364 struct mem_ctl_info *mci; 1364 struct mem_ctl_info *mci;
1365 1365
1366 debugf0(__FILE__ ": %s()\n", __func__); 1366 debugf0("%s: %s()\n", __FILE__, __func__);
1367 1367
1368 if (i5400_pci) 1368 if (i5400_pci)
1369 edac_pci_release_generic_ctl(i5400_pci); 1369 edac_pci_release_generic_ctl(i5400_pci);
@@ -1409,7 +1409,7 @@ static int __init i5400_init(void)
1409{ 1409{
1410 int pci_rc; 1410 int pci_rc;
1411 1411
1412 debugf2("MC: " __FILE__ ": %s()\n", __func__); 1412 debugf2("MC: %s: %s()\n", __FILE__, __func__);
1413 1413
1414 /* Ensure that the OPSTATE is set correctly for POLL or NMI */ 1414 /* Ensure that the OPSTATE is set correctly for POLL or NMI */
1415 opstate_init(); 1415 opstate_init();
@@ -1425,7 +1425,7 @@ static int __init i5400_init(void)
1425 */ 1425 */
1426static void __exit i5400_exit(void) 1426static void __exit i5400_exit(void)
1427{ 1427{
1428 debugf2("MC: " __FILE__ ": %s()\n", __func__); 1428 debugf2("MC: %s: %s()\n", __FILE__, __func__);
1429 pci_unregister_driver(&i5400_driver); 1429 pci_unregister_driver(&i5400_driver);
1430} 1430}
1431 1431
diff --git a/drivers/edac/i82443bxgx_edac.c b/drivers/edac/i82443bxgx_edac.c
index 2bf2c5051bfe..a2fa1feed724 100644
--- a/drivers/edac/i82443bxgx_edac.c
+++ b/drivers/edac/i82443bxgx_edac.c
@@ -178,7 +178,7 @@ static void i82443bxgx_edacmc_check(struct mem_ctl_info *mci)
178{ 178{
179 struct i82443bxgx_edacmc_error_info info; 179 struct i82443bxgx_edacmc_error_info info;
180 180
181 debugf1("MC%d: " __FILE__ ": %s()\n", mci->mc_idx, __func__); 181 debugf1("MC%d: %s: %s()\n", mci->mc_idx, __FILE__, __func__);
182 i82443bxgx_edacmc_get_error_info(mci, &info); 182 i82443bxgx_edacmc_get_error_info(mci, &info);
183 i82443bxgx_edacmc_process_error_info(mci, &info, 1); 183 i82443bxgx_edacmc_process_error_info(mci, &info, 1);
184} 184}
@@ -198,13 +198,13 @@ static void i82443bxgx_init_csrows(struct mem_ctl_info *mci,
198 for (index = 0; index < mci->nr_csrows; index++) { 198 for (index = 0; index < mci->nr_csrows; index++) {
199 csrow = &mci->csrows[index]; 199 csrow = &mci->csrows[index];
200 pci_read_config_byte(pdev, I82443BXGX_DRB + index, &drbar); 200 pci_read_config_byte(pdev, I82443BXGX_DRB + index, &drbar);
201 debugf1("MC%d: " __FILE__ ": %s() Row=%d DRB = %#0x\n", 201 debugf1("MC%d: %s: %s() Row=%d DRB = %#0x\n",
202 mci->mc_idx, __func__, index, drbar); 202 mci->mc_idx, __FILE__, __func__, index, drbar);
203 row_high_limit = ((u32) drbar << 23); 203 row_high_limit = ((u32) drbar << 23);
204 /* find the DRAM Chip Select Base address and mask */ 204 /* find the DRAM Chip Select Base address and mask */
205 debugf1("MC%d: " __FILE__ ": %s() Row=%d, " 205 debugf1("MC%d: %s: %s() Row=%d, "
206 "Boundry Address=%#0x, Last = %#0x \n", 206 "Boundry Address=%#0x, Last = %#0x\n",
207 mci->mc_idx, __func__, index, row_high_limit, 207 mci->mc_idx, __FILE__, __func__, index, row_high_limit,
208 row_high_limit_last); 208 row_high_limit_last);
209 209
210 /* 440GX goes to 2GB, represented with a DRB of 0. */ 210 /* 440GX goes to 2GB, represented with a DRB of 0. */
@@ -237,7 +237,7 @@ static int i82443bxgx_edacmc_probe1(struct pci_dev *pdev, int dev_idx)
237 enum mem_type mtype; 237 enum mem_type mtype;
238 enum edac_type edac_mode; 238 enum edac_type edac_mode;
239 239
240 debugf0("MC: " __FILE__ ": %s()\n", __func__); 240 debugf0("MC: %s: %s()\n", __FILE__, __func__);
241 241
242 /* Something is really hosed if PCI config space reads from 242 /* Something is really hosed if PCI config space reads from
243 * the MC aren't working. 243 * the MC aren't working.
@@ -250,7 +250,7 @@ static int i82443bxgx_edacmc_probe1(struct pci_dev *pdev, int dev_idx)
250 if (mci == NULL) 250 if (mci == NULL)
251 return -ENOMEM; 251 return -ENOMEM;
252 252
253 debugf0("MC: " __FILE__ ": %s(): mci = %p\n", __func__, mci); 253 debugf0("MC: %s: %s(): mci = %p\n", __FILE__, __func__, mci);
254 mci->dev = &pdev->dev; 254 mci->dev = &pdev->dev;
255 mci->mtype_cap = MEM_FLAG_EDO | MEM_FLAG_SDR | MEM_FLAG_RDR; 255 mci->mtype_cap = MEM_FLAG_EDO | MEM_FLAG_SDR | MEM_FLAG_RDR;
256 mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_EC | EDAC_FLAG_SECDED; 256 mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_EC | EDAC_FLAG_SECDED;
@@ -336,7 +336,7 @@ static int i82443bxgx_edacmc_probe1(struct pci_dev *pdev, int dev_idx)
336 __func__); 336 __func__);
337 } 337 }
338 338
339 debugf3("MC: " __FILE__ ": %s(): success\n", __func__); 339 debugf3("MC: %s: %s(): success\n", __FILE__, __func__);
340 return 0; 340 return 0;
341 341
342fail: 342fail:
@@ -352,7 +352,7 @@ static int __devinit i82443bxgx_edacmc_init_one(struct pci_dev *pdev,
352{ 352{
353 int rc; 353 int rc;
354 354
355 debugf0("MC: " __FILE__ ": %s()\n", __func__); 355 debugf0("MC: %s: %s()\n", __FILE__, __func__);
356 356
357 /* don't need to call pci_enable_device() */ 357 /* don't need to call pci_enable_device() */
358 rc = i82443bxgx_edacmc_probe1(pdev, ent->driver_data); 358 rc = i82443bxgx_edacmc_probe1(pdev, ent->driver_data);
@@ -367,7 +367,7 @@ static void __devexit i82443bxgx_edacmc_remove_one(struct pci_dev *pdev)
367{ 367{
368 struct mem_ctl_info *mci; 368 struct mem_ctl_info *mci;
369 369
370 debugf0(__FILE__ ": %s()\n", __func__); 370 debugf0("%s: %s()\n", __FILE__, __func__);
371 371
372 if (i82443bxgx_pci) 372 if (i82443bxgx_pci)
373 edac_pci_release_generic_ctl(i82443bxgx_pci); 373 edac_pci_release_generic_ctl(i82443bxgx_pci);
diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c
index 5045156c5313..9dcb30466ec0 100644
--- a/drivers/firewire/core-card.c
+++ b/drivers/firewire/core-card.c
@@ -30,7 +30,6 @@
30#include <linux/module.h> 30#include <linux/module.h>
31#include <linux/mutex.h> 31#include <linux/mutex.h>
32#include <linux/spinlock.h> 32#include <linux/spinlock.h>
33#include <linux/timer.h>
34#include <linux/workqueue.h> 33#include <linux/workqueue.h>
35 34
36#include <asm/atomic.h> 35#include <asm/atomic.h>
@@ -63,7 +62,7 @@ static size_t config_rom_length = 1 + 4 + 1 + 1;
63#define BIB_CRC(v) ((v) << 0) 62#define BIB_CRC(v) ((v) << 0)
64#define BIB_CRC_LENGTH(v) ((v) << 16) 63#define BIB_CRC_LENGTH(v) ((v) << 16)
65#define BIB_INFO_LENGTH(v) ((v) << 24) 64#define BIB_INFO_LENGTH(v) ((v) << 24)
66 65#define BIB_BUS_NAME 0x31333934 /* "1394" */
67#define BIB_LINK_SPEED(v) ((v) << 0) 66#define BIB_LINK_SPEED(v) ((v) << 0)
68#define BIB_GENERATION(v) ((v) << 4) 67#define BIB_GENERATION(v) ((v) << 4)
69#define BIB_MAX_ROM(v) ((v) << 8) 68#define BIB_MAX_ROM(v) ((v) << 8)
@@ -73,7 +72,8 @@ static size_t config_rom_length = 1 + 4 + 1 + 1;
73#define BIB_BMC ((1) << 28) 72#define BIB_BMC ((1) << 28)
74#define BIB_ISC ((1) << 29) 73#define BIB_ISC ((1) << 29)
75#define BIB_CMC ((1) << 30) 74#define BIB_CMC ((1) << 30)
76#define BIB_IMC ((1) << 31) 75#define BIB_IRMC ((1) << 31)
76#define NODE_CAPABILITIES 0x0c0083c0 /* per IEEE 1394 clause 8.3.2.6.5.2 */
77 77
78static void generate_config_rom(struct fw_card *card, __be32 *config_rom) 78static void generate_config_rom(struct fw_card *card, __be32 *config_rom)
79{ 79{
@@ -91,18 +91,18 @@ static void generate_config_rom(struct fw_card *card, __be32 *config_rom)
91 91
92 config_rom[0] = cpu_to_be32( 92 config_rom[0] = cpu_to_be32(
93 BIB_CRC_LENGTH(4) | BIB_INFO_LENGTH(4) | BIB_CRC(0)); 93 BIB_CRC_LENGTH(4) | BIB_INFO_LENGTH(4) | BIB_CRC(0));
94 config_rom[1] = cpu_to_be32(0x31333934); 94 config_rom[1] = cpu_to_be32(BIB_BUS_NAME);
95 config_rom[2] = cpu_to_be32( 95 config_rom[2] = cpu_to_be32(
96 BIB_LINK_SPEED(card->link_speed) | 96 BIB_LINK_SPEED(card->link_speed) |
97 BIB_GENERATION(card->config_rom_generation++ % 14 + 2) | 97 BIB_GENERATION(card->config_rom_generation++ % 14 + 2) |
98 BIB_MAX_ROM(2) | 98 BIB_MAX_ROM(2) |
99 BIB_MAX_RECEIVE(card->max_receive) | 99 BIB_MAX_RECEIVE(card->max_receive) |
100 BIB_BMC | BIB_ISC | BIB_CMC | BIB_IMC); 100 BIB_BMC | BIB_ISC | BIB_CMC | BIB_IRMC);
101 config_rom[3] = cpu_to_be32(card->guid >> 32); 101 config_rom[3] = cpu_to_be32(card->guid >> 32);
102 config_rom[4] = cpu_to_be32(card->guid); 102 config_rom[4] = cpu_to_be32(card->guid);
103 103
104 /* Generate root directory. */ 104 /* Generate root directory. */
105 config_rom[6] = cpu_to_be32(0x0c0083c0); /* node capabilities */ 105 config_rom[6] = cpu_to_be32(NODE_CAPABILITIES);
106 i = 7; 106 i = 7;
107 j = 7 + descriptor_count; 107 j = 7 + descriptor_count;
108 108
@@ -407,13 +407,6 @@ static void fw_card_bm_work(struct work_struct *work)
407 fw_card_put(card); 407 fw_card_put(card);
408} 408}
409 409
410static void flush_timer_callback(unsigned long data)
411{
412 struct fw_card *card = (struct fw_card *)data;
413
414 fw_flush_transactions(card);
415}
416
417void fw_card_initialize(struct fw_card *card, 410void fw_card_initialize(struct fw_card *card,
418 const struct fw_card_driver *driver, 411 const struct fw_card_driver *driver,
419 struct device *device) 412 struct device *device)
@@ -432,8 +425,6 @@ void fw_card_initialize(struct fw_card *card,
432 init_completion(&card->done); 425 init_completion(&card->done);
433 INIT_LIST_HEAD(&card->transaction_list); 426 INIT_LIST_HEAD(&card->transaction_list);
434 spin_lock_init(&card->lock); 427 spin_lock_init(&card->lock);
435 setup_timer(&card->flush_timer,
436 flush_timer_callback, (unsigned long)card);
437 428
438 card->local_node = NULL; 429 card->local_node = NULL;
439 430
@@ -558,7 +549,6 @@ void fw_core_remove_card(struct fw_card *card)
558 wait_for_completion(&card->done); 549 wait_for_completion(&card->done);
559 550
560 WARN_ON(!list_empty(&card->transaction_list)); 551 WARN_ON(!list_empty(&card->transaction_list));
561 del_timer_sync(&card->flush_timer);
562} 552}
563EXPORT_SYMBOL(fw_core_remove_card); 553EXPORT_SYMBOL(fw_core_remove_card);
564 554
diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c
index 14a34d99eea2..5bf106b9d791 100644
--- a/drivers/firewire/core-cdev.c
+++ b/drivers/firewire/core-cdev.c
@@ -227,7 +227,7 @@ static int fw_device_op_open(struct inode *inode, struct file *file)
227 list_add_tail(&client->link, &device->client_list); 227 list_add_tail(&client->link, &device->client_list);
228 mutex_unlock(&device->client_list_mutex); 228 mutex_unlock(&device->client_list_mutex);
229 229
230 return 0; 230 return nonseekable_open(inode, file);
231} 231}
232 232
233static void queue_event(struct client *client, struct event *event, 233static void queue_event(struct client *client, struct event *event,
@@ -1496,13 +1496,13 @@ static unsigned int fw_device_op_poll(struct file *file, poll_table * pt)
1496 1496
1497const struct file_operations fw_device_ops = { 1497const struct file_operations fw_device_ops = {
1498 .owner = THIS_MODULE, 1498 .owner = THIS_MODULE,
1499 .llseek = no_llseek,
1499 .open = fw_device_op_open, 1500 .open = fw_device_op_open,
1500 .read = fw_device_op_read, 1501 .read = fw_device_op_read,
1501 .unlocked_ioctl = fw_device_op_ioctl, 1502 .unlocked_ioctl = fw_device_op_ioctl,
1502 .poll = fw_device_op_poll,
1503 .release = fw_device_op_release,
1504 .mmap = fw_device_op_mmap, 1503 .mmap = fw_device_op_mmap,
1505 1504 .release = fw_device_op_release,
1505 .poll = fw_device_op_poll,
1506#ifdef CONFIG_COMPAT 1506#ifdef CONFIG_COMPAT
1507 .compat_ioctl = fw_device_op_compat_ioctl, 1507 .compat_ioctl = fw_device_op_compat_ioctl,
1508#endif 1508#endif
diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c
index 673b03f8b4ec..fdc33ff06dc1 100644
--- a/drivers/firewire/core-transaction.c
+++ b/drivers/firewire/core-transaction.c
@@ -81,7 +81,7 @@ static int close_transaction(struct fw_transaction *transaction,
81 spin_lock_irqsave(&card->lock, flags); 81 spin_lock_irqsave(&card->lock, flags);
82 list_for_each_entry(t, &card->transaction_list, link) { 82 list_for_each_entry(t, &card->transaction_list, link) {
83 if (t == transaction) { 83 if (t == transaction) {
84 list_del(&t->link); 84 list_del_init(&t->link);
85 card->tlabel_mask &= ~(1ULL << t->tlabel); 85 card->tlabel_mask &= ~(1ULL << t->tlabel);
86 break; 86 break;
87 } 87 }
@@ -89,6 +89,7 @@ static int close_transaction(struct fw_transaction *transaction,
89 spin_unlock_irqrestore(&card->lock, flags); 89 spin_unlock_irqrestore(&card->lock, flags);
90 90
91 if (&t->link != &card->transaction_list) { 91 if (&t->link != &card->transaction_list) {
92 del_timer_sync(&t->split_timeout_timer);
92 t->callback(card, rcode, NULL, 0, t->callback_data); 93 t->callback(card, rcode, NULL, 0, t->callback_data);
93 return 0; 94 return 0;
94 } 95 }
@@ -121,6 +122,31 @@ int fw_cancel_transaction(struct fw_card *card,
121} 122}
122EXPORT_SYMBOL(fw_cancel_transaction); 123EXPORT_SYMBOL(fw_cancel_transaction);
123 124
125static void split_transaction_timeout_callback(unsigned long data)
126{
127 struct fw_transaction *t = (struct fw_transaction *)data;
128 struct fw_card *card = t->card;
129 unsigned long flags;
130
131 spin_lock_irqsave(&card->lock, flags);
132 if (list_empty(&t->link)) {
133 spin_unlock_irqrestore(&card->lock, flags);
134 return;
135 }
136 list_del(&t->link);
137 card->tlabel_mask &= ~(1ULL << t->tlabel);
138 spin_unlock_irqrestore(&card->lock, flags);
139
140 card->driver->cancel_packet(card, &t->packet);
141
142 /*
143 * At this point cancel_packet will never call the transaction
144 * callback, since we just took the transaction out of the list.
145 * So do it here.
146 */
147 t->callback(card, RCODE_CANCELLED, NULL, 0, t->callback_data);
148}
149
124static void transmit_complete_callback(struct fw_packet *packet, 150static void transmit_complete_callback(struct fw_packet *packet,
125 struct fw_card *card, int status) 151 struct fw_card *card, int status)
126{ 152{
@@ -229,6 +255,23 @@ static void fw_fill_request(struct fw_packet *packet, int tcode, int tlabel,
229 packet->payload_mapped = false; 255 packet->payload_mapped = false;
230} 256}
231 257
258static int allocate_tlabel(struct fw_card *card)
259{
260 int tlabel;
261
262 tlabel = card->current_tlabel;
263 while (card->tlabel_mask & (1ULL << tlabel)) {
264 tlabel = (tlabel + 1) & 0x3f;
265 if (tlabel == card->current_tlabel)
266 return -EBUSY;
267 }
268
269 card->current_tlabel = (tlabel + 1) & 0x3f;
270 card->tlabel_mask |= 1ULL << tlabel;
271
272 return tlabel;
273}
274
232/** 275/**
233 * This function provides low-level access to the IEEE1394 transaction 276 * This function provides low-level access to the IEEE1394 transaction
234 * logic. Most C programs would use either fw_read(), fw_write() or 277 * logic. Most C programs would use either fw_read(), fw_write() or
@@ -277,31 +320,26 @@ void fw_send_request(struct fw_card *card, struct fw_transaction *t, int tcode,
277 int tlabel; 320 int tlabel;
278 321
279 /* 322 /*
280 * Bump the flush timer up 100ms first of all so we
281 * don't race with a flush timer callback.
282 */
283
284 mod_timer(&card->flush_timer, jiffies + DIV_ROUND_UP(HZ, 10));
285
286 /*
287 * Allocate tlabel from the bitmap and put the transaction on 323 * Allocate tlabel from the bitmap and put the transaction on
288 * the list while holding the card spinlock. 324 * the list while holding the card spinlock.
289 */ 325 */
290 326
291 spin_lock_irqsave(&card->lock, flags); 327 spin_lock_irqsave(&card->lock, flags);
292 328
293 tlabel = card->current_tlabel; 329 tlabel = allocate_tlabel(card);
294 if (card->tlabel_mask & (1ULL << tlabel)) { 330 if (tlabel < 0) {
295 spin_unlock_irqrestore(&card->lock, flags); 331 spin_unlock_irqrestore(&card->lock, flags);
296 callback(card, RCODE_SEND_ERROR, NULL, 0, callback_data); 332 callback(card, RCODE_SEND_ERROR, NULL, 0, callback_data);
297 return; 333 return;
298 } 334 }
299 335
300 card->current_tlabel = (card->current_tlabel + 1) & 0x3f;
301 card->tlabel_mask |= (1ULL << tlabel);
302
303 t->node_id = destination_id; 336 t->node_id = destination_id;
304 t->tlabel = tlabel; 337 t->tlabel = tlabel;
338 t->card = card;
339 setup_timer(&t->split_timeout_timer,
340 split_transaction_timeout_callback, (unsigned long)t);
341 /* FIXME: start this timer later, relative to t->timestamp */
342 mod_timer(&t->split_timeout_timer, jiffies + DIV_ROUND_UP(HZ, 10));
305 t->callback = callback; 343 t->callback = callback;
306 t->callback_data = callback_data; 344 t->callback_data = callback_data;
307 345
@@ -347,11 +385,13 @@ int fw_run_transaction(struct fw_card *card, int tcode, int destination_id,
347 struct transaction_callback_data d; 385 struct transaction_callback_data d;
348 struct fw_transaction t; 386 struct fw_transaction t;
349 387
388 init_timer_on_stack(&t.split_timeout_timer);
350 init_completion(&d.done); 389 init_completion(&d.done);
351 d.payload = payload; 390 d.payload = payload;
352 fw_send_request(card, &t, tcode, destination_id, generation, speed, 391 fw_send_request(card, &t, tcode, destination_id, generation, speed,
353 offset, payload, length, transaction_callback, &d); 392 offset, payload, length, transaction_callback, &d);
354 wait_for_completion(&d.done); 393 wait_for_completion(&d.done);
394 destroy_timer_on_stack(&t.split_timeout_timer);
355 395
356 return d.rcode; 396 return d.rcode;
357} 397}
@@ -394,30 +434,6 @@ void fw_send_phy_config(struct fw_card *card,
394 mutex_unlock(&phy_config_mutex); 434 mutex_unlock(&phy_config_mutex);
395} 435}
396 436
397void fw_flush_transactions(struct fw_card *card)
398{
399 struct fw_transaction *t, *next;
400 struct list_head list;
401 unsigned long flags;
402
403 INIT_LIST_HEAD(&list);
404 spin_lock_irqsave(&card->lock, flags);
405 list_splice_init(&card->transaction_list, &list);
406 card->tlabel_mask = 0;
407 spin_unlock_irqrestore(&card->lock, flags);
408
409 list_for_each_entry_safe(t, next, &list, link) {
410 card->driver->cancel_packet(card, &t->packet);
411
412 /*
413 * At this point cancel_packet will never call the
414 * transaction callback, since we just took all the
415 * transactions out of the list. So do it here.
416 */
417 t->callback(card, RCODE_CANCELLED, NULL, 0, t->callback_data);
418 }
419}
420
421static struct fw_address_handler *lookup_overlapping_address_handler( 437static struct fw_address_handler *lookup_overlapping_address_handler(
422 struct list_head *list, unsigned long long offset, size_t length) 438 struct list_head *list, unsigned long long offset, size_t length)
423{ 439{
@@ -827,8 +843,8 @@ void fw_core_handle_response(struct fw_card *card, struct fw_packet *p)
827 spin_lock_irqsave(&card->lock, flags); 843 spin_lock_irqsave(&card->lock, flags);
828 list_for_each_entry(t, &card->transaction_list, link) { 844 list_for_each_entry(t, &card->transaction_list, link) {
829 if (t->node_id == source && t->tlabel == tlabel) { 845 if (t->node_id == source && t->tlabel == tlabel) {
830 list_del(&t->link); 846 list_del_init(&t->link);
831 card->tlabel_mask &= ~(1 << t->tlabel); 847 card->tlabel_mask &= ~(1ULL << t->tlabel);
832 break; 848 break;
833 } 849 }
834 } 850 }
@@ -869,6 +885,8 @@ void fw_core_handle_response(struct fw_card *card, struct fw_packet *p)
869 break; 885 break;
870 } 886 }
871 887
888 del_timer_sync(&t->split_timeout_timer);
889
872 /* 890 /*
873 * The response handler may be executed while the request handler 891 * The response handler may be executed while the request handler
874 * is still pending. Cancel the request handler. 892 * is still pending. Cancel the request handler.
diff --git a/drivers/firewire/core.h b/drivers/firewire/core.h
index fb0321300cce..0ecfcd95f4c5 100644
--- a/drivers/firewire/core.h
+++ b/drivers/firewire/core.h
@@ -27,7 +27,12 @@ struct fw_packet;
27#define PHY_LINK_ACTIVE 0x80 27#define PHY_LINK_ACTIVE 0x80
28#define PHY_CONTENDER 0x40 28#define PHY_CONTENDER 0x40
29#define PHY_BUS_RESET 0x40 29#define PHY_BUS_RESET 0x40
30#define PHY_EXTENDED_REGISTERS 0xe0
30#define PHY_BUS_SHORT_RESET 0x40 31#define PHY_BUS_SHORT_RESET 0x40
32#define PHY_INT_STATUS_BITS 0x3c
33#define PHY_ENABLE_ACCEL 0x02
34#define PHY_ENABLE_MULTI 0x01
35#define PHY_PAGE_SELECT 0xe0
31 36
32#define BANDWIDTH_AVAILABLE_INITIAL 4915 37#define BANDWIDTH_AVAILABLE_INITIAL 4915
33#define BROADCAST_CHANNEL_INITIAL (1 << 31 | 31) 38#define BROADCAST_CHANNEL_INITIAL (1 << 31 | 31)
@@ -215,7 +220,6 @@ void fw_core_handle_request(struct fw_card *card, struct fw_packet *request);
215void fw_core_handle_response(struct fw_card *card, struct fw_packet *packet); 220void fw_core_handle_response(struct fw_card *card, struct fw_packet *packet);
216void fw_fill_response(struct fw_packet *response, u32 *request_header, 221void fw_fill_response(struct fw_packet *response, u32 *request_header,
217 int rcode, void *payload, size_t length); 222 int rcode, void *payload, size_t length);
218void fw_flush_transactions(struct fw_card *card);
219void fw_send_phy_config(struct fw_card *card, 223void fw_send_phy_config(struct fw_card *card,
220 int node_id, int generation, int gap_count); 224 int node_id, int generation, int gap_count);
221 225
diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c
index a3b083a7403a..9f627e758cfc 100644
--- a/drivers/firewire/ohci.c
+++ b/drivers/firewire/ohci.c
@@ -236,13 +236,15 @@ static char ohci_driver_name[] = KBUILD_MODNAME;
236#define QUIRK_CYCLE_TIMER 1 236#define QUIRK_CYCLE_TIMER 1
237#define QUIRK_RESET_PACKET 2 237#define QUIRK_RESET_PACKET 2
238#define QUIRK_BE_HEADERS 4 238#define QUIRK_BE_HEADERS 4
239#define QUIRK_NO_1394A 8
239 240
240/* In case of multiple matches in ohci_quirks[], only the first one is used. */ 241/* In case of multiple matches in ohci_quirks[], only the first one is used. */
241static const struct { 242static const struct {
242 unsigned short vendor, device, flags; 243 unsigned short vendor, device, flags;
243} ohci_quirks[] = { 244} ohci_quirks[] = {
244 {PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_TSB12LV22, QUIRK_CYCLE_TIMER | 245 {PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_TSB12LV22, QUIRK_CYCLE_TIMER |
245 QUIRK_RESET_PACKET}, 246 QUIRK_RESET_PACKET |
247 QUIRK_NO_1394A},
246 {PCI_VENDOR_ID_TI, PCI_ANY_ID, QUIRK_RESET_PACKET}, 248 {PCI_VENDOR_ID_TI, PCI_ANY_ID, QUIRK_RESET_PACKET},
247 {PCI_VENDOR_ID_AL, PCI_ANY_ID, QUIRK_CYCLE_TIMER}, 249 {PCI_VENDOR_ID_AL, PCI_ANY_ID, QUIRK_CYCLE_TIMER},
248 {PCI_VENDOR_ID_NEC, PCI_ANY_ID, QUIRK_CYCLE_TIMER}, 250 {PCI_VENDOR_ID_NEC, PCI_ANY_ID, QUIRK_CYCLE_TIMER},
@@ -257,15 +259,16 @@ MODULE_PARM_DESC(quirks, "Chip quirks (default = 0"
257 ", nonatomic cycle timer = " __stringify(QUIRK_CYCLE_TIMER) 259 ", nonatomic cycle timer = " __stringify(QUIRK_CYCLE_TIMER)
258 ", reset packet generation = " __stringify(QUIRK_RESET_PACKET) 260 ", reset packet generation = " __stringify(QUIRK_RESET_PACKET)
259 ", AR/selfID endianess = " __stringify(QUIRK_BE_HEADERS) 261 ", AR/selfID endianess = " __stringify(QUIRK_BE_HEADERS)
262 ", no 1394a enhancements = " __stringify(QUIRK_NO_1394A)
260 ")"); 263 ")");
261 264
262#ifdef CONFIG_FIREWIRE_OHCI_DEBUG
263
264#define OHCI_PARAM_DEBUG_AT_AR 1 265#define OHCI_PARAM_DEBUG_AT_AR 1
265#define OHCI_PARAM_DEBUG_SELFIDS 2 266#define OHCI_PARAM_DEBUG_SELFIDS 2
266#define OHCI_PARAM_DEBUG_IRQS 4 267#define OHCI_PARAM_DEBUG_IRQS 4
267#define OHCI_PARAM_DEBUG_BUSRESETS 8 /* only effective before chip init */ 268#define OHCI_PARAM_DEBUG_BUSRESETS 8 /* only effective before chip init */
268 269
270#ifdef CONFIG_FIREWIRE_OHCI_DEBUG
271
269static int param_debug; 272static int param_debug;
270module_param_named(debug, param_debug, int, 0644); 273module_param_named(debug, param_debug, int, 0644);
271MODULE_PARM_DESC(debug, "Verbose logging (default = 0" 274MODULE_PARM_DESC(debug, "Verbose logging (default = 0"
@@ -438,9 +441,10 @@ static void log_ar_at_event(char dir, int speed, u32 *header, int evt)
438 441
439#else 442#else
440 443
441#define log_irqs(evt) 444#define param_debug 0
442#define log_selfids(node_id, generation, self_id_count, sid) 445static inline void log_irqs(u32 evt) {}
443#define log_ar_at_event(dir, speed, header, evt) 446static inline void log_selfids(int node_id, int generation, int self_id_count, u32 *s) {}
447static inline void log_ar_at_event(char dir, int speed, u32 *header, int evt) {}
444 448
445#endif /* CONFIG_FIREWIRE_OHCI_DEBUG */ 449#endif /* CONFIG_FIREWIRE_OHCI_DEBUG */
446 450
@@ -460,27 +464,71 @@ static inline void flush_writes(const struct fw_ohci *ohci)
460 reg_read(ohci, OHCI1394_Version); 464 reg_read(ohci, OHCI1394_Version);
461} 465}
462 466
463static int ohci_update_phy_reg(struct fw_card *card, int addr, 467static int read_phy_reg(struct fw_ohci *ohci, int addr)
464 int clear_bits, int set_bits)
465{ 468{
466 struct fw_ohci *ohci = fw_ohci(card); 469 u32 val;
467 u32 val, old; 470 int i;
468 471
469 reg_write(ohci, OHCI1394_PhyControl, OHCI1394_PhyControl_Read(addr)); 472 reg_write(ohci, OHCI1394_PhyControl, OHCI1394_PhyControl_Read(addr));
470 flush_writes(ohci); 473 for (i = 0; i < 10; i++) {
471 msleep(2); 474 val = reg_read(ohci, OHCI1394_PhyControl);
472 val = reg_read(ohci, OHCI1394_PhyControl); 475 if (val & OHCI1394_PhyControl_ReadDone)
473 if ((val & OHCI1394_PhyControl_ReadDone) == 0) { 476 return OHCI1394_PhyControl_ReadData(val);
474 fw_error("failed to set phy reg bits.\n"); 477
475 return -EBUSY; 478 msleep(1);
476 } 479 }
480 fw_error("failed to read phy reg\n");
481
482 return -EBUSY;
483}
484
485static int write_phy_reg(const struct fw_ohci *ohci, int addr, u32 val)
486{
487 int i;
477 488
478 old = OHCI1394_PhyControl_ReadData(val);
479 old = (old & ~clear_bits) | set_bits;
480 reg_write(ohci, OHCI1394_PhyControl, 489 reg_write(ohci, OHCI1394_PhyControl,
481 OHCI1394_PhyControl_Write(addr, old)); 490 OHCI1394_PhyControl_Write(addr, val));
491 for (i = 0; i < 100; i++) {
492 val = reg_read(ohci, OHCI1394_PhyControl);
493 if (!(val & OHCI1394_PhyControl_WritePending))
494 return 0;
482 495
483 return 0; 496 msleep(1);
497 }
498 fw_error("failed to write phy reg\n");
499
500 return -EBUSY;
501}
502
503static int ohci_update_phy_reg(struct fw_card *card, int addr,
504 int clear_bits, int set_bits)
505{
506 struct fw_ohci *ohci = fw_ohci(card);
507 int ret;
508
509 ret = read_phy_reg(ohci, addr);
510 if (ret < 0)
511 return ret;
512
513 /*
514 * The interrupt status bits are cleared by writing a one bit.
515 * Avoid clearing them unless explicitly requested in set_bits.
516 */
517 if (addr == 5)
518 clear_bits |= PHY_INT_STATUS_BITS;
519
520 return write_phy_reg(ohci, addr, (ret & ~clear_bits) | set_bits);
521}
522
523static int read_paged_phy_reg(struct fw_ohci *ohci, int page, int addr)
524{
525 int ret;
526
527 ret = ohci_update_phy_reg(&ohci->card, 7, PHY_PAGE_SELECT, page << 5);
528 if (ret < 0)
529 return ret;
530
531 return read_phy_reg(ohci, addr);
484} 532}
485 533
486static int ar_context_add_page(struct ar_context *ctx) 534static int ar_context_add_page(struct ar_context *ctx)
@@ -1495,13 +1543,64 @@ static void copy_config_rom(__be32 *dest, const __be32 *src, size_t length)
1495 memset(&dest[length], 0, CONFIG_ROM_SIZE - size); 1543 memset(&dest[length], 0, CONFIG_ROM_SIZE - size);
1496} 1544}
1497 1545
1546static int configure_1394a_enhancements(struct fw_ohci *ohci)
1547{
1548 bool enable_1394a;
1549 int ret, clear, set, offset;
1550
1551 /* Check if the driver should configure link and PHY. */
1552 if (!(reg_read(ohci, OHCI1394_HCControlSet) &
1553 OHCI1394_HCControl_programPhyEnable))
1554 return 0;
1555
1556 /* Paranoia: check whether the PHY supports 1394a, too. */
1557 enable_1394a = false;
1558 ret = read_phy_reg(ohci, 2);
1559 if (ret < 0)
1560 return ret;
1561 if ((ret & PHY_EXTENDED_REGISTERS) == PHY_EXTENDED_REGISTERS) {
1562 ret = read_paged_phy_reg(ohci, 1, 8);
1563 if (ret < 0)
1564 return ret;
1565 if (ret >= 1)
1566 enable_1394a = true;
1567 }
1568
1569 if (ohci->quirks & QUIRK_NO_1394A)
1570 enable_1394a = false;
1571
1572 /* Configure PHY and link consistently. */
1573 if (enable_1394a) {
1574 clear = 0;
1575 set = PHY_ENABLE_ACCEL | PHY_ENABLE_MULTI;
1576 } else {
1577 clear = PHY_ENABLE_ACCEL | PHY_ENABLE_MULTI;
1578 set = 0;
1579 }
1580 ret = ohci_update_phy_reg(&ohci->card, 5, clear, set);
1581 if (ret < 0)
1582 return ret;
1583
1584 if (enable_1394a)
1585 offset = OHCI1394_HCControlSet;
1586 else
1587 offset = OHCI1394_HCControlClear;
1588 reg_write(ohci, offset, OHCI1394_HCControl_aPhyEnhanceEnable);
1589
1590 /* Clean up: configuration has been taken care of. */
1591 reg_write(ohci, OHCI1394_HCControlClear,
1592 OHCI1394_HCControl_programPhyEnable);
1593
1594 return 0;
1595}
1596
1498static int ohci_enable(struct fw_card *card, 1597static int ohci_enable(struct fw_card *card,
1499 const __be32 *config_rom, size_t length) 1598 const __be32 *config_rom, size_t length)
1500{ 1599{
1501 struct fw_ohci *ohci = fw_ohci(card); 1600 struct fw_ohci *ohci = fw_ohci(card);
1502 struct pci_dev *dev = to_pci_dev(card->device); 1601 struct pci_dev *dev = to_pci_dev(card->device);
1503 u32 lps; 1602 u32 lps;
1504 int i; 1603 int i, ret;
1505 1604
1506 if (software_reset(ohci)) { 1605 if (software_reset(ohci)) {
1507 fw_error("Failed to reset ohci card.\n"); 1606 fw_error("Failed to reset ohci card.\n");
@@ -1565,10 +1664,14 @@ static int ohci_enable(struct fw_card *card,
1565 if (param_debug & OHCI_PARAM_DEBUG_BUSRESETS) 1664 if (param_debug & OHCI_PARAM_DEBUG_BUSRESETS)
1566 reg_write(ohci, OHCI1394_IntMaskSet, OHCI1394_busReset); 1665 reg_write(ohci, OHCI1394_IntMaskSet, OHCI1394_busReset);
1567 1666
1667 ret = configure_1394a_enhancements(ohci);
1668 if (ret < 0)
1669 return ret;
1670
1568 /* Activate link_on bit and contender bit in our self ID packets.*/ 1671 /* Activate link_on bit and contender bit in our self ID packets.*/
1569 if (ohci_update_phy_reg(card, 4, 0, 1672 ret = ohci_update_phy_reg(card, 4, 0, PHY_LINK_ACTIVE | PHY_CONTENDER);
1570 PHY_LINK_ACTIVE | PHY_CONTENDER) < 0) 1673 if (ret < 0)
1571 return -EIO; 1674 return ret;
1572 1675
1573 /* 1676 /*
1574 * When the link is not yet enabled, the atomic config rom 1677 * When the link is not yet enabled, the atomic config rom
@@ -2304,7 +2407,7 @@ static const struct fw_card_driver ohci_driver = {
2304}; 2407};
2305 2408
2306#ifdef CONFIG_PPC_PMAC 2409#ifdef CONFIG_PPC_PMAC
2307static void ohci_pmac_on(struct pci_dev *dev) 2410static void pmac_ohci_on(struct pci_dev *dev)
2308{ 2411{
2309 if (machine_is(powermac)) { 2412 if (machine_is(powermac)) {
2310 struct device_node *ofn = pci_device_to_OF_node(dev); 2413 struct device_node *ofn = pci_device_to_OF_node(dev);
@@ -2316,7 +2419,7 @@ static void ohci_pmac_on(struct pci_dev *dev)
2316 } 2419 }
2317} 2420}
2318 2421
2319static void ohci_pmac_off(struct pci_dev *dev) 2422static void pmac_ohci_off(struct pci_dev *dev)
2320{ 2423{
2321 if (machine_is(powermac)) { 2424 if (machine_is(powermac)) {
2322 struct device_node *ofn = pci_device_to_OF_node(dev); 2425 struct device_node *ofn = pci_device_to_OF_node(dev);
@@ -2328,15 +2431,15 @@ static void ohci_pmac_off(struct pci_dev *dev)
2328 } 2431 }
2329} 2432}
2330#else 2433#else
2331#define ohci_pmac_on(dev) 2434static inline void pmac_ohci_on(struct pci_dev *dev) {}
2332#define ohci_pmac_off(dev) 2435static inline void pmac_ohci_off(struct pci_dev *dev) {}
2333#endif /* CONFIG_PPC_PMAC */ 2436#endif /* CONFIG_PPC_PMAC */
2334 2437
2335static int __devinit pci_probe(struct pci_dev *dev, 2438static int __devinit pci_probe(struct pci_dev *dev,
2336 const struct pci_device_id *ent) 2439 const struct pci_device_id *ent)
2337{ 2440{
2338 struct fw_ohci *ohci; 2441 struct fw_ohci *ohci;
2339 u32 bus_options, max_receive, link_speed, version; 2442 u32 bus_options, max_receive, link_speed, version, link_enh;
2340 u64 guid; 2443 u64 guid;
2341 int i, err, n_ir, n_it; 2444 int i, err, n_ir, n_it;
2342 size_t size; 2445 size_t size;
@@ -2349,7 +2452,7 @@ static int __devinit pci_probe(struct pci_dev *dev,
2349 2452
2350 fw_card_initialize(&ohci->card, &ohci_driver, &dev->dev); 2453 fw_card_initialize(&ohci->card, &ohci_driver, &dev->dev);
2351 2454
2352 ohci_pmac_on(dev); 2455 pmac_ohci_on(dev);
2353 2456
2354 err = pci_enable_device(dev); 2457 err = pci_enable_device(dev);
2355 if (err) { 2458 if (err) {
@@ -2389,6 +2492,23 @@ static int __devinit pci_probe(struct pci_dev *dev,
2389 if (param_quirks) 2492 if (param_quirks)
2390 ohci->quirks = param_quirks; 2493 ohci->quirks = param_quirks;
2391 2494
2495 /* TI OHCI-Lynx and compatible: set recommended configuration bits. */
2496 if (dev->vendor == PCI_VENDOR_ID_TI) {
2497 pci_read_config_dword(dev, PCI_CFG_TI_LinkEnh, &link_enh);
2498
2499 /* adjust latency of ATx FIFO: use 1.7 KB threshold */
2500 link_enh &= ~TI_LinkEnh_atx_thresh_mask;
2501 link_enh |= TI_LinkEnh_atx_thresh_1_7K;
2502
2503 /* use priority arbitration for asynchronous responses */
2504 link_enh |= TI_LinkEnh_enab_unfair;
2505
2506 /* required for aPhyEnhanceEnable to work */
2507 link_enh |= TI_LinkEnh_enab_accel;
2508
2509 pci_write_config_dword(dev, PCI_CFG_TI_LinkEnh, link_enh);
2510 }
2511
2392 ar_context_init(&ohci->ar_request_ctx, ohci, 2512 ar_context_init(&ohci->ar_request_ctx, ohci,
2393 OHCI1394_AsReqRcvContextControlSet); 2513 OHCI1394_AsReqRcvContextControlSet);
2394 2514
@@ -2466,7 +2586,7 @@ static int __devinit pci_probe(struct pci_dev *dev,
2466 pci_disable_device(dev); 2586 pci_disable_device(dev);
2467 fail_free: 2587 fail_free:
2468 kfree(&ohci->card); 2588 kfree(&ohci->card);
2469 ohci_pmac_off(dev); 2589 pmac_ohci_off(dev);
2470 fail: 2590 fail:
2471 if (err == -ENOMEM) 2591 if (err == -ENOMEM)
2472 fw_error("Out of memory\n"); 2592 fw_error("Out of memory\n");
@@ -2509,7 +2629,7 @@ static void pci_remove(struct pci_dev *dev)
2509 pci_release_region(dev, 0); 2629 pci_release_region(dev, 0);
2510 pci_disable_device(dev); 2630 pci_disable_device(dev);
2511 kfree(&ohci->card); 2631 kfree(&ohci->card);
2512 ohci_pmac_off(dev); 2632 pmac_ohci_off(dev);
2513 2633
2514 fw_notify("Removed fw-ohci device.\n"); 2634 fw_notify("Removed fw-ohci device.\n");
2515} 2635}
@@ -2530,7 +2650,7 @@ static int pci_suspend(struct pci_dev *dev, pm_message_t state)
2530 err = pci_set_power_state(dev, pci_choose_state(dev, state)); 2650 err = pci_set_power_state(dev, pci_choose_state(dev, state));
2531 if (err) 2651 if (err)
2532 fw_error("pci_set_power_state failed with %d\n", err); 2652 fw_error("pci_set_power_state failed with %d\n", err);
2533 ohci_pmac_off(dev); 2653 pmac_ohci_off(dev);
2534 2654
2535 return 0; 2655 return 0;
2536} 2656}
@@ -2540,7 +2660,7 @@ static int pci_resume(struct pci_dev *dev)
2540 struct fw_ohci *ohci = pci_get_drvdata(dev); 2660 struct fw_ohci *ohci = pci_get_drvdata(dev);
2541 int err; 2661 int err;
2542 2662
2543 ohci_pmac_on(dev); 2663 pmac_ohci_on(dev);
2544 pci_set_power_state(dev, PCI_D0); 2664 pci_set_power_state(dev, PCI_D0);
2545 pci_restore_state(dev); 2665 pci_restore_state(dev);
2546 err = pci_enable_device(dev); 2666 err = pci_enable_device(dev);
diff --git a/drivers/firewire/ohci.h b/drivers/firewire/ohci.h
index ba492d85c516..3bc9a5d744eb 100644
--- a/drivers/firewire/ohci.h
+++ b/drivers/firewire/ohci.h
@@ -67,7 +67,7 @@
67#define OHCI1394_PhyControl_ReadDone 0x80000000 67#define OHCI1394_PhyControl_ReadDone 0x80000000
68#define OHCI1394_PhyControl_ReadData(r) (((r) & 0x00ff0000) >> 16) 68#define OHCI1394_PhyControl_ReadData(r) (((r) & 0x00ff0000) >> 16)
69#define OHCI1394_PhyControl_Write(addr, data) (((addr) << 8) | (data) | 0x00004000) 69#define OHCI1394_PhyControl_Write(addr, data) (((addr) << 8) | (data) | 0x00004000)
70#define OHCI1394_PhyControl_WriteDone 0x00004000 70#define OHCI1394_PhyControl_WritePending 0x00004000
71#define OHCI1394_IsochronousCycleTimer 0x0F0 71#define OHCI1394_IsochronousCycleTimer 0x0F0
72#define OHCI1394_AsReqFilterHiSet 0x100 72#define OHCI1394_AsReqFilterHiSet 0x100
73#define OHCI1394_AsReqFilterHiClear 0x104 73#define OHCI1394_AsReqFilterHiClear 0x104
@@ -154,4 +154,12 @@
154 154
155#define OHCI1394_phy_tcode 0xe 155#define OHCI1394_phy_tcode 0xe
156 156
157/* TI extensions */
158
159#define PCI_CFG_TI_LinkEnh 0xf4
160#define TI_LinkEnh_enab_accel 0x00000002
161#define TI_LinkEnh_enab_unfair 0x00000080
162#define TI_LinkEnh_atx_thresh_mask 0x00003000
163#define TI_LinkEnh_atx_thresh_1_7K 0x00001000
164
157#endif /* _FIREWIRE_OHCI_H */ 165#endif /* _FIREWIRE_OHCI_H */
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index fee678f74a19..724038dab4ca 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -139,6 +139,13 @@ config GPIO_MAX732X
139 Board setup code must specify the model to use, and the start 139 Board setup code must specify the model to use, and the start
140 number for these GPIOs. 140 number for these GPIOs.
141 141
142config GPIO_MAX732X_IRQ
143 bool "Interrupt controller support for MAX732x"
144 depends on GPIO_MAX732X=y && GENERIC_HARDIRQS
145 help
146 Say yes here to enable the max732x to be used as an interrupt
147 controller. It requires the driver to be built in the kernel.
148
142config GPIO_PCA953X 149config GPIO_PCA953X
143 tristate "PCA953x, PCA955x, TCA64xx, and MAX7310 I/O ports" 150 tristate "PCA953x, PCA955x, TCA64xx, and MAX7310 I/O ports"
144 depends on I2C 151 depends on I2C
@@ -188,6 +195,13 @@ config GPIO_PCF857X
188 This driver provides an in-kernel interface to those GPIOs using 195 This driver provides an in-kernel interface to those GPIOs using
189 platform-neutral GPIO calls. 196 platform-neutral GPIO calls.
190 197
198config GPIO_TC35892
199 bool "TC35892 GPIOs"
200 depends on MFD_TC35892
201 help
202 This enables support for the GPIOs found on the TC35892
203 I/O Expander.
204
191config GPIO_TWL4030 205config GPIO_TWL4030
192 tristate "TWL4030, TWL5030, and TPS659x0 GPIOs" 206 tristate "TWL4030, TWL5030, and TPS659x0 GPIOs"
193 depends on TWL4030_CORE 207 depends on TWL4030_CORE
@@ -264,10 +278,10 @@ config GPIO_BT8XX
264 If unsure, say N. 278 If unsure, say N.
265 279
266config GPIO_LANGWELL 280config GPIO_LANGWELL
267 bool "Intel Moorestown Platform Langwell GPIO support" 281 bool "Intel Langwell/Penwell GPIO support"
268 depends on PCI 282 depends on PCI
269 help 283 help
270 Say Y here to support Intel Moorestown platform GPIO. 284 Say Y here to support Intel Langwell/Penwell GPIO.
271 285
272config GPIO_TIMBERDALE 286config GPIO_TIMBERDALE
273 bool "Support for timberdale GPIO IP" 287 bool "Support for timberdale GPIO IP"
@@ -275,6 +289,15 @@ config GPIO_TIMBERDALE
275 ---help--- 289 ---help---
276 Add support for the GPIO IP in the timberdale FPGA. 290 Add support for the GPIO IP in the timberdale FPGA.
277 291
292config GPIO_RDC321X
293 tristate "RDC R-321x GPIO support"
294 depends on PCI && GPIOLIB
295 select MFD_CORE
296 select MFD_RDC321X
297 help
298 Support for the RDC R321x SoC GPIOs over southbridge
299 PCI configuration space.
300
278comment "SPI GPIO expanders:" 301comment "SPI GPIO expanders:"
279 302
280config GPIO_MAX7301 303config GPIO_MAX7301
@@ -310,4 +333,14 @@ config GPIO_UCB1400
310 To compile this driver as a module, choose M here: the 333 To compile this driver as a module, choose M here: the
311 module will be called ucb1400_gpio. 334 module will be called ucb1400_gpio.
312 335
336comment "MODULbus GPIO expanders:"
337
338config GPIO_JANZ_TTL
339 tristate "Janz VMOD-TTL Digital IO Module"
340 depends on MFD_JANZ_CMODIO
341 help
342 This enables support for the Janz VMOD-TTL Digital IO module.
343 This driver provides support for driving the pins in output
344 mode only. Input mode is not supported.
345
313endif 346endif
diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile
index 10f3f8d958b1..51c3cdd41b5a 100644
--- a/drivers/gpio/Makefile
+++ b/drivers/gpio/Makefile
@@ -16,6 +16,7 @@ obj-$(CONFIG_GPIO_MCP23S08) += mcp23s08.o
16obj-$(CONFIG_GPIO_PCA953X) += pca953x.o 16obj-$(CONFIG_GPIO_PCA953X) += pca953x.o
17obj-$(CONFIG_GPIO_PCF857X) += pcf857x.o 17obj-$(CONFIG_GPIO_PCF857X) += pcf857x.o
18obj-$(CONFIG_GPIO_PL061) += pl061.o 18obj-$(CONFIG_GPIO_PL061) += pl061.o
19obj-$(CONFIG_GPIO_TC35892) += tc35892-gpio.o
19obj-$(CONFIG_GPIO_TIMBERDALE) += timbgpio.o 20obj-$(CONFIG_GPIO_TIMBERDALE) += timbgpio.o
20obj-$(CONFIG_GPIO_TWL4030) += twl4030-gpio.o 21obj-$(CONFIG_GPIO_TWL4030) += twl4030-gpio.o
21obj-$(CONFIG_GPIO_UCB1400) += ucb1400_gpio.o 22obj-$(CONFIG_GPIO_UCB1400) += ucb1400_gpio.o
@@ -27,4 +28,6 @@ obj-$(CONFIG_GPIO_VR41XX) += vr41xx_giu.o
27obj-$(CONFIG_GPIO_WM831X) += wm831x-gpio.o 28obj-$(CONFIG_GPIO_WM831X) += wm831x-gpio.o
28obj-$(CONFIG_GPIO_WM8350) += wm8350-gpiolib.o 29obj-$(CONFIG_GPIO_WM8350) += wm8350-gpiolib.o
29obj-$(CONFIG_GPIO_WM8994) += wm8994-gpio.o 30obj-$(CONFIG_GPIO_WM8994) += wm8994-gpio.o
30obj-$(CONFIG_GPIO_SCH) += sch_gpio.o \ No newline at end of file 31obj-$(CONFIG_GPIO_SCH) += sch_gpio.o
32obj-$(CONFIG_GPIO_RDC321X) += rdc321x-gpio.o
33obj-$(CONFIG_GPIO_JANZ_TTL) += janz-ttl.o
diff --git a/drivers/gpio/cs5535-gpio.c b/drivers/gpio/cs5535-gpio.c
index 0c3c498f2260..f73a1555e49d 100644
--- a/drivers/gpio/cs5535-gpio.c
+++ b/drivers/gpio/cs5535-gpio.c
@@ -197,7 +197,7 @@ static int chip_direction_output(struct gpio_chip *c, unsigned offset, int val)
197 return 0; 197 return 0;
198} 198}
199 199
200static char *cs5535_gpio_names[] = { 200static const char * const cs5535_gpio_names[] = {
201 "GPIO0", "GPIO1", "GPIO2", "GPIO3", 201 "GPIO0", "GPIO1", "GPIO2", "GPIO3",
202 "GPIO4", "GPIO5", "GPIO6", "GPIO7", 202 "GPIO4", "GPIO5", "GPIO6", "GPIO7",
203 "GPIO8", "GPIO9", "GPIO10", "GPIO11", 203 "GPIO8", "GPIO9", "GPIO10", "GPIO11",
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index cae1b8c5b08c..3ca36542e338 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -722,7 +722,7 @@ int gpio_export(unsigned gpio, bool direction_may_change)
722 unsigned long flags; 722 unsigned long flags;
723 struct gpio_desc *desc; 723 struct gpio_desc *desc;
724 int status = -EINVAL; 724 int status = -EINVAL;
725 char *ioname = NULL; 725 const char *ioname = NULL;
726 726
727 /* can't export until sysfs is available ... */ 727 /* can't export until sysfs is available ... */
728 if (!gpio_class.p) { 728 if (!gpio_class.p) {
@@ -753,7 +753,7 @@ int gpio_export(unsigned gpio, bool direction_may_change)
753 struct device *dev; 753 struct device *dev;
754 754
755 dev = device_create(&gpio_class, desc->chip->dev, MKDEV(0, 0), 755 dev = device_create(&gpio_class, desc->chip->dev, MKDEV(0, 0),
756 desc, ioname ? ioname : "gpio%d", gpio); 756 desc, ioname ? ioname : "gpio%u", gpio);
757 if (!IS_ERR(dev)) { 757 if (!IS_ERR(dev)) {
758 status = sysfs_create_group(&dev->kobj, 758 status = sysfs_create_group(&dev->kobj,
759 &gpio_attr_group); 759 &gpio_attr_group);
@@ -1106,7 +1106,7 @@ unlock:
1106fail: 1106fail:
1107 /* failures here can mean systems won't boot... */ 1107 /* failures here can mean systems won't boot... */
1108 if (status) 1108 if (status)
1109 pr_err("gpiochip_add: gpios %d..%d (%s) not registered\n", 1109 pr_err("gpiochip_add: gpios %d..%d (%s) failed to register\n",
1110 chip->base, chip->base + chip->ngpio - 1, 1110 chip->base, chip->base + chip->ngpio - 1,
1111 chip->label ? : "generic"); 1111 chip->label ? : "generic");
1112 return status; 1112 return status;
@@ -1447,6 +1447,49 @@ fail:
1447} 1447}
1448EXPORT_SYMBOL_GPL(gpio_direction_output); 1448EXPORT_SYMBOL_GPL(gpio_direction_output);
1449 1449
1450/**
1451 * gpio_set_debounce - sets @debounce time for a @gpio
1452 * @gpio: the gpio to set debounce time
1453 * @debounce: debounce time is microseconds
1454 */
1455int gpio_set_debounce(unsigned gpio, unsigned debounce)
1456{
1457 unsigned long flags;
1458 struct gpio_chip *chip;
1459 struct gpio_desc *desc = &gpio_desc[gpio];
1460 int status = -EINVAL;
1461
1462 spin_lock_irqsave(&gpio_lock, flags);
1463
1464 if (!gpio_is_valid(gpio))
1465 goto fail;
1466 chip = desc->chip;
1467 if (!chip || !chip->set || !chip->set_debounce)
1468 goto fail;
1469 gpio -= chip->base;
1470 if (gpio >= chip->ngpio)
1471 goto fail;
1472 status = gpio_ensure_requested(desc, gpio);
1473 if (status < 0)
1474 goto fail;
1475
1476 /* now we know the gpio is valid and chip won't vanish */
1477
1478 spin_unlock_irqrestore(&gpio_lock, flags);
1479
1480 might_sleep_if(extra_checks && chip->can_sleep);
1481
1482 return chip->set_debounce(chip, gpio, debounce);
1483
1484fail:
1485 spin_unlock_irqrestore(&gpio_lock, flags);
1486 if (status)
1487 pr_debug("%s: gpio-%d status %d\n",
1488 __func__, gpio, status);
1489
1490 return status;
1491}
1492EXPORT_SYMBOL_GPL(gpio_set_debounce);
1450 1493
1451/* I/O calls are only valid after configuration completed; the relevant 1494/* I/O calls are only valid after configuration completed; the relevant
1452 * "is this a valid GPIO" error checks should already have been done. 1495 * "is this a valid GPIO" error checks should already have been done.
diff --git a/drivers/gpio/it8761e_gpio.c b/drivers/gpio/it8761e_gpio.c
index 41a9388f2fde..48fc43c4bdd1 100644
--- a/drivers/gpio/it8761e_gpio.c
+++ b/drivers/gpio/it8761e_gpio.c
@@ -217,7 +217,10 @@ gpiochip_add_err:
217static void __exit it8761e_gpio_exit(void) 217static void __exit it8761e_gpio_exit(void)
218{ 218{
219 if (gpio_ba) { 219 if (gpio_ba) {
220 gpiochip_remove(&it8761e_gpio_chip); 220 int ret = gpiochip_remove(&it8761e_gpio_chip);
221
222 WARN(ret, "%s(): gpiochip_remove() failed, ret=%d\n",
223 __func__, ret);
221 224
222 release_region(gpio_ba, GPIO_IOSIZE); 225 release_region(gpio_ba, GPIO_IOSIZE);
223 gpio_ba = 0; 226 gpio_ba = 0;
diff --git a/drivers/gpio/janz-ttl.c b/drivers/gpio/janz-ttl.c
new file mode 100644
index 000000000000..813ac077e5d7
--- /dev/null
+++ b/drivers/gpio/janz-ttl.c
@@ -0,0 +1,258 @@
1/*
2 * Janz MODULbus VMOD-TTL GPIO Driver
3 *
4 * Copyright (c) 2010 Ira W. Snyder <iws@ovro.caltech.edu>
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; either version 2 of the License, or (at your
9 * option) any later version.
10 */
11
12#include <linux/kernel.h>
13#include <linux/module.h>
14#include <linux/init.h>
15#include <linux/interrupt.h>
16#include <linux/delay.h>
17#include <linux/platform_device.h>
18#include <linux/io.h>
19#include <linux/gpio.h>
20#include <linux/slab.h>
21
22#include <linux/mfd/janz.h>
23
24#define DRV_NAME "janz-ttl"
25
26#define PORTA_DIRECTION 0x23
27#define PORTB_DIRECTION 0x2B
28#define PORTC_DIRECTION 0x06
29#define PORTA_IOCTL 0x24
30#define PORTB_IOCTL 0x2C
31#define PORTC_IOCTL 0x07
32
33#define MASTER_INT_CTL 0x00
34#define MASTER_CONF_CTL 0x01
35
36#define CONF_PAE (1 << 2)
37#define CONF_PBE (1 << 7)
38#define CONF_PCE (1 << 4)
39
40struct ttl_control_regs {
41 __be16 portc;
42 __be16 portb;
43 __be16 porta;
44 __be16 control;
45};
46
47struct ttl_module {
48 struct gpio_chip gpio;
49
50 /* base address of registers */
51 struct ttl_control_regs __iomem *regs;
52
53 u8 portc_shadow;
54 u8 portb_shadow;
55 u8 porta_shadow;
56
57 spinlock_t lock;
58};
59
60static int ttl_get_value(struct gpio_chip *gpio, unsigned offset)
61{
62 struct ttl_module *mod = dev_get_drvdata(gpio->dev);
63 u8 *shadow;
64 int ret;
65
66 if (offset < 8) {
67 shadow = &mod->porta_shadow;
68 } else if (offset < 16) {
69 shadow = &mod->portb_shadow;
70 offset -= 8;
71 } else {
72 shadow = &mod->portc_shadow;
73 offset -= 16;
74 }
75
76 spin_lock(&mod->lock);
77 ret = *shadow & (1 << offset);
78 spin_unlock(&mod->lock);
79 return ret;
80}
81
82static void ttl_set_value(struct gpio_chip *gpio, unsigned offset, int value)
83{
84 struct ttl_module *mod = dev_get_drvdata(gpio->dev);
85 void __iomem *port;
86 u8 *shadow;
87
88 if (offset < 8) {
89 port = &mod->regs->porta;
90 shadow = &mod->porta_shadow;
91 } else if (offset < 16) {
92 port = &mod->regs->portb;
93 shadow = &mod->portb_shadow;
94 offset -= 8;
95 } else {
96 port = &mod->regs->portc;
97 shadow = &mod->portc_shadow;
98 offset -= 16;
99 }
100
101 spin_lock(&mod->lock);
102 if (value)
103 *shadow |= (1 << offset);
104 else
105 *shadow &= ~(1 << offset);
106
107 iowrite16be(*shadow, port);
108 spin_unlock(&mod->lock);
109}
110
111static void __devinit ttl_write_reg(struct ttl_module *mod, u8 reg, u16 val)
112{
113 iowrite16be(reg, &mod->regs->control);
114 iowrite16be(val, &mod->regs->control);
115}
116
117static void __devinit ttl_setup_device(struct ttl_module *mod)
118{
119 /* reset the device to a known state */
120 iowrite16be(0x0000, &mod->regs->control);
121 iowrite16be(0x0001, &mod->regs->control);
122 iowrite16be(0x0000, &mod->regs->control);
123
124 /* put all ports in open-drain mode */
125 ttl_write_reg(mod, PORTA_IOCTL, 0x00ff);
126 ttl_write_reg(mod, PORTB_IOCTL, 0x00ff);
127 ttl_write_reg(mod, PORTC_IOCTL, 0x000f);
128
129 /* set all ports as outputs */
130 ttl_write_reg(mod, PORTA_DIRECTION, 0x0000);
131 ttl_write_reg(mod, PORTB_DIRECTION, 0x0000);
132 ttl_write_reg(mod, PORTC_DIRECTION, 0x0000);
133
134 /* set all ports to drive zeroes */
135 iowrite16be(0x0000, &mod->regs->porta);
136 iowrite16be(0x0000, &mod->regs->portb);
137 iowrite16be(0x0000, &mod->regs->portc);
138
139 /* enable all ports */
140 ttl_write_reg(mod, MASTER_CONF_CTL, CONF_PAE | CONF_PBE | CONF_PCE);
141}
142
143static int __devinit ttl_probe(struct platform_device *pdev)
144{
145 struct janz_platform_data *pdata;
146 struct device *dev = &pdev->dev;
147 struct ttl_module *mod;
148 struct gpio_chip *gpio;
149 struct resource *res;
150 int ret;
151
152 pdata = pdev->dev.platform_data;
153 if (!pdata) {
154 dev_err(dev, "no platform data\n");
155 ret = -ENXIO;
156 goto out_return;
157 }
158
159 mod = kzalloc(sizeof(*mod), GFP_KERNEL);
160 if (!mod) {
161 dev_err(dev, "unable to allocate private data\n");
162 ret = -ENOMEM;
163 goto out_return;
164 }
165
166 platform_set_drvdata(pdev, mod);
167 spin_lock_init(&mod->lock);
168
169 /* get access to the MODULbus registers for this module */
170 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
171 if (!res) {
172 dev_err(dev, "MODULbus registers not found\n");
173 ret = -ENODEV;
174 goto out_free_mod;
175 }
176
177 mod->regs = ioremap(res->start, resource_size(res));
178 if (!mod->regs) {
179 dev_err(dev, "MODULbus registers not ioremap\n");
180 ret = -ENOMEM;
181 goto out_free_mod;
182 }
183
184 ttl_setup_device(mod);
185
186 /* Initialize the GPIO data structures */
187 gpio = &mod->gpio;
188 gpio->dev = &pdev->dev;
189 gpio->label = pdev->name;
190 gpio->get = ttl_get_value;
191 gpio->set = ttl_set_value;
192 gpio->owner = THIS_MODULE;
193
194 /* request dynamic allocation */
195 gpio->base = -1;
196 gpio->ngpio = 20;
197
198 ret = gpiochip_add(gpio);
199 if (ret) {
200 dev_err(dev, "unable to add GPIO chip\n");
201 goto out_iounmap_regs;
202 }
203
204 dev_info(&pdev->dev, "module %d: registered GPIO device\n",
205 pdata->modno);
206 return 0;
207
208out_iounmap_regs:
209 iounmap(mod->regs);
210out_free_mod:
211 kfree(mod);
212out_return:
213 return ret;
214}
215
216static int __devexit ttl_remove(struct platform_device *pdev)
217{
218 struct ttl_module *mod = platform_get_drvdata(pdev);
219 struct device *dev = &pdev->dev;
220 int ret;
221
222 ret = gpiochip_remove(&mod->gpio);
223 if (ret) {
224 dev_err(dev, "unable to remove GPIO chip\n");
225 return ret;
226 }
227
228 iounmap(mod->regs);
229 kfree(mod);
230 return 0;
231}
232
233static struct platform_driver ttl_driver = {
234 .driver = {
235 .name = DRV_NAME,
236 .owner = THIS_MODULE,
237 },
238 .probe = ttl_probe,
239 .remove = __devexit_p(ttl_remove),
240};
241
242static int __init ttl_init(void)
243{
244 return platform_driver_register(&ttl_driver);
245}
246
247static void __exit ttl_exit(void)
248{
249 platform_driver_unregister(&ttl_driver);
250}
251
252MODULE_AUTHOR("Ira W. Snyder <iws@ovro.caltech.edu>");
253MODULE_DESCRIPTION("Janz MODULbus VMOD-TTL Driver");
254MODULE_LICENSE("GPL");
255MODULE_ALIAS("platform:janz-ttl");
256
257module_init(ttl_init);
258module_exit(ttl_exit);
diff --git a/drivers/gpio/langwell_gpio.c b/drivers/gpio/langwell_gpio.c
index 00c3a14127af..8383a8d7f994 100644
--- a/drivers/gpio/langwell_gpio.c
+++ b/drivers/gpio/langwell_gpio.c
@@ -17,6 +17,7 @@
17 17
18/* Supports: 18/* Supports:
19 * Moorestown platform Langwell chip. 19 * Moorestown platform Langwell chip.
20 * Medfield platform Penwell chip.
20 */ 21 */
21 22
22#include <linux/module.h> 23#include <linux/module.h>
@@ -31,44 +32,65 @@
31#include <linux/gpio.h> 32#include <linux/gpio.h>
32#include <linux/slab.h> 33#include <linux/slab.h>
33 34
34struct lnw_gpio_register { 35/*
35 u32 GPLR[2]; 36 * Langwell chip has 64 pins and thus there are 2 32bit registers to control
36 u32 GPDR[2]; 37 * each feature, while Penwell chip has 96 pins for each block, and need 3 32bit
37 u32 GPSR[2]; 38 * registers to control them, so we only define the order here instead of a
38 u32 GPCR[2]; 39 * structure, to get a bit offset for a pin (use GPDR as an example):
39 u32 GRER[2]; 40 *
40 u32 GFER[2]; 41 * nreg = ngpio / 32;
41 u32 GEDR[2]; 42 * reg = offset / 32;
43 * bit = offset % 32;
44 * reg_addr = reg_base + GPDR * nreg * 4 + reg * 4;
45 *
46 * so the bit of reg_addr is to control pin offset's GPDR feature
47*/
48
49enum GPIO_REG {
50 GPLR = 0, /* pin level read-only */
51 GPDR, /* pin direction */
52 GPSR, /* pin set */
53 GPCR, /* pin clear */
54 GRER, /* rising edge detect */
55 GFER, /* falling edge detect */
56 GEDR, /* edge detect result */
42}; 57};
43 58
44struct lnw_gpio { 59struct lnw_gpio {
45 struct gpio_chip chip; 60 struct gpio_chip chip;
46 struct lnw_gpio_register *reg_base; 61 void *reg_base;
47 spinlock_t lock; 62 spinlock_t lock;
48 unsigned irq_base; 63 unsigned irq_base;
49}; 64};
50 65
51static int lnw_gpio_get(struct gpio_chip *chip, unsigned offset) 66static void __iomem *gpio_reg(struct gpio_chip *chip, unsigned offset,
67 enum GPIO_REG reg_type)
52{ 68{
53 struct lnw_gpio *lnw = container_of(chip, struct lnw_gpio, chip); 69 struct lnw_gpio *lnw = container_of(chip, struct lnw_gpio, chip);
70 unsigned nreg = chip->ngpio / 32;
54 u8 reg = offset / 32; 71 u8 reg = offset / 32;
55 void __iomem *gplr; 72 void __iomem *ptr;
73
74 ptr = (void __iomem *)(lnw->reg_base + reg_type * nreg * 4 + reg * 4);
75 return ptr;
76}
77
78static int lnw_gpio_get(struct gpio_chip *chip, unsigned offset)
79{
80 void __iomem *gplr = gpio_reg(chip, offset, GPLR);
56 81
57 gplr = (void __iomem *)(&lnw->reg_base->GPLR[reg]);
58 return readl(gplr) & BIT(offset % 32); 82 return readl(gplr) & BIT(offset % 32);
59} 83}
60 84
61static void lnw_gpio_set(struct gpio_chip *chip, unsigned offset, int value) 85static void lnw_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
62{ 86{
63 struct lnw_gpio *lnw = container_of(chip, struct lnw_gpio, chip);
64 u8 reg = offset / 32;
65 void __iomem *gpsr, *gpcr; 87 void __iomem *gpsr, *gpcr;
66 88
67 if (value) { 89 if (value) {
68 gpsr = (void __iomem *)(&lnw->reg_base->GPSR[reg]); 90 gpsr = gpio_reg(chip, offset, GPSR);
69 writel(BIT(offset % 32), gpsr); 91 writel(BIT(offset % 32), gpsr);
70 } else { 92 } else {
71 gpcr = (void __iomem *)(&lnw->reg_base->GPCR[reg]); 93 gpcr = gpio_reg(chip, offset, GPCR);
72 writel(BIT(offset % 32), gpcr); 94 writel(BIT(offset % 32), gpcr);
73 } 95 }
74} 96}
@@ -76,12 +98,10 @@ static void lnw_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
76static int lnw_gpio_direction_input(struct gpio_chip *chip, unsigned offset) 98static int lnw_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
77{ 99{
78 struct lnw_gpio *lnw = container_of(chip, struct lnw_gpio, chip); 100 struct lnw_gpio *lnw = container_of(chip, struct lnw_gpio, chip);
79 u8 reg = offset / 32; 101 void __iomem *gpdr = gpio_reg(chip, offset, GPDR);
80 u32 value; 102 u32 value;
81 unsigned long flags; 103 unsigned long flags;
82 void __iomem *gpdr;
83 104
84 gpdr = (void __iomem *)(&lnw->reg_base->GPDR[reg]);
85 spin_lock_irqsave(&lnw->lock, flags); 105 spin_lock_irqsave(&lnw->lock, flags);
86 value = readl(gpdr); 106 value = readl(gpdr);
87 value &= ~BIT(offset % 32); 107 value &= ~BIT(offset % 32);
@@ -94,12 +114,10 @@ static int lnw_gpio_direction_output(struct gpio_chip *chip,
94 unsigned offset, int value) 114 unsigned offset, int value)
95{ 115{
96 struct lnw_gpio *lnw = container_of(chip, struct lnw_gpio, chip); 116 struct lnw_gpio *lnw = container_of(chip, struct lnw_gpio, chip);
97 u8 reg = offset / 32; 117 void __iomem *gpdr = gpio_reg(chip, offset, GPDR);
98 unsigned long flags; 118 unsigned long flags;
99 void __iomem *gpdr;
100 119
101 lnw_gpio_set(chip, offset, value); 120 lnw_gpio_set(chip, offset, value);
102 gpdr = (void __iomem *)(&lnw->reg_base->GPDR[reg]);
103 spin_lock_irqsave(&lnw->lock, flags); 121 spin_lock_irqsave(&lnw->lock, flags);
104 value = readl(gpdr); 122 value = readl(gpdr);
105 value |= BIT(offset % 32);; 123 value |= BIT(offset % 32);;
@@ -118,11 +136,10 @@ static int lnw_irq_type(unsigned irq, unsigned type)
118{ 136{
119 struct lnw_gpio *lnw = get_irq_chip_data(irq); 137 struct lnw_gpio *lnw = get_irq_chip_data(irq);
120 u32 gpio = irq - lnw->irq_base; 138 u32 gpio = irq - lnw->irq_base;
121 u8 reg = gpio / 32;
122 unsigned long flags; 139 unsigned long flags;
123 u32 value; 140 u32 value;
124 void __iomem *grer = (void __iomem *)(&lnw->reg_base->GRER[reg]); 141 void __iomem *grer = gpio_reg(&lnw->chip, gpio, GRER);
125 void __iomem *gfer = (void __iomem *)(&lnw->reg_base->GFER[reg]); 142 void __iomem *gfer = gpio_reg(&lnw->chip, gpio, GFER);
126 143
127 if (gpio >= lnw->chip.ngpio) 144 if (gpio >= lnw->chip.ngpio)
128 return -EINVAL; 145 return -EINVAL;
@@ -158,8 +175,10 @@ static struct irq_chip lnw_irqchip = {
158 .set_type = lnw_irq_type, 175 .set_type = lnw_irq_type,
159}; 176};
160 177
161static struct pci_device_id lnw_gpio_ids[] = { 178static DEFINE_PCI_DEVICE_TABLE(lnw_gpio_ids) = { /* pin number */
162 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x080f) }, 179 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x080f), .driver_data = 64 },
180 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x081f), .driver_data = 96 },
181 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x081a), .driver_data = 96 },
163 { 0, } 182 { 0, }
164}; 183};
165MODULE_DEVICE_TABLE(pci, lnw_gpio_ids); 184MODULE_DEVICE_TABLE(pci, lnw_gpio_ids);
@@ -167,17 +186,17 @@ MODULE_DEVICE_TABLE(pci, lnw_gpio_ids);
167static void lnw_irq_handler(unsigned irq, struct irq_desc *desc) 186static void lnw_irq_handler(unsigned irq, struct irq_desc *desc)
168{ 187{
169 struct lnw_gpio *lnw = (struct lnw_gpio *)get_irq_data(irq); 188 struct lnw_gpio *lnw = (struct lnw_gpio *)get_irq_data(irq);
170 u32 reg, gpio; 189 u32 base, gpio;
171 void __iomem *gedr; 190 void __iomem *gedr;
172 u32 gedr_v; 191 u32 gedr_v;
173 192
174 /* check GPIO controller to check which pin triggered the interrupt */ 193 /* check GPIO controller to check which pin triggered the interrupt */
175 for (reg = 0; reg < lnw->chip.ngpio / 32; reg++) { 194 for (base = 0; base < lnw->chip.ngpio; base += 32) {
176 gedr = (void __iomem *)(&lnw->reg_base->GEDR[reg]); 195 gedr = gpio_reg(&lnw->chip, base, GEDR);
177 gedr_v = readl(gedr); 196 gedr_v = readl(gedr);
178 if (!gedr_v) 197 if (!gedr_v)
179 continue; 198 continue;
180 for (gpio = reg*32; gpio < reg*32+32; gpio++) 199 for (gpio = base; gpio < base + 32; gpio++)
181 if (gedr_v & BIT(gpio % 32)) { 200 if (gedr_v & BIT(gpio % 32)) {
182 pr_debug("pin %d triggered\n", gpio); 201 pr_debug("pin %d triggered\n", gpio);
183 generic_handle_irq(lnw->irq_base + gpio); 202 generic_handle_irq(lnw->irq_base + gpio);
@@ -245,7 +264,7 @@ static int __devinit lnw_gpio_probe(struct pci_dev *pdev,
245 lnw->chip.set = lnw_gpio_set; 264 lnw->chip.set = lnw_gpio_set;
246 lnw->chip.to_irq = lnw_gpio_to_irq; 265 lnw->chip.to_irq = lnw_gpio_to_irq;
247 lnw->chip.base = gpio_base; 266 lnw->chip.base = gpio_base;
248 lnw->chip.ngpio = 64; 267 lnw->chip.ngpio = id->driver_data;
249 lnw->chip.can_sleep = 0; 268 lnw->chip.can_sleep = 0;
250 pci_set_drvdata(pdev, lnw); 269 pci_set_drvdata(pdev, lnw);
251 retval = gpiochip_add(&lnw->chip); 270 retval = gpiochip_add(&lnw->chip);
diff --git a/drivers/gpio/max732x.c b/drivers/gpio/max732x.c
index f7868243af89..9cad60f9e962 100644
--- a/drivers/gpio/max732x.c
+++ b/drivers/gpio/max732x.c
@@ -17,7 +17,8 @@
17#include <linux/slab.h> 17#include <linux/slab.h>
18#include <linux/string.h> 18#include <linux/string.h>
19#include <linux/gpio.h> 19#include <linux/gpio.h>
20 20#include <linux/interrupt.h>
21#include <linux/irq.h>
21#include <linux/i2c.h> 22#include <linux/i2c.h>
22#include <linux/i2c/max732x.h> 23#include <linux/i2c/max732x.h>
23 24
@@ -31,7 +32,8 @@
31 * - Open Drain I/O 32 * - Open Drain I/O
32 * 33 *
33 * designated by 'O', 'I' and 'P' individually according to MAXIM's 34 * designated by 'O', 'I' and 'P' individually according to MAXIM's
34 * datasheets. 35 * datasheets. 'I' and 'P' ports are interrupt capables, some with
36 * a dedicated interrupt mask.
35 * 37 *
36 * There are two groups of I/O ports, each group usually includes 38 * There are two groups of I/O ports, each group usually includes
37 * up to 8 I/O ports, and is accessed by a specific I2C address: 39 * up to 8 I/O ports, and is accessed by a specific I2C address:
@@ -44,7 +46,8 @@
44 * 46 *
45 * Within each group of ports, there are five known combinations of 47 * Within each group of ports, there are five known combinations of
46 * I/O ports: 4I4O, 4P4O, 8I, 8P, 8O, see the definitions below for 48 * I/O ports: 4I4O, 4P4O, 8I, 8P, 8O, see the definitions below for
47 * the detailed organization of these ports. 49 * the detailed organization of these ports. Only Goup A is interrupt
50 * capable.
48 * 51 *
49 * GPIO numbers start from 'gpio_base + 0' to 'gpio_base + 8/16', 52 * GPIO numbers start from 'gpio_base + 0' to 'gpio_base + 8/16',
50 * and GPIOs from GROUP_A are numbered before those from GROUP_B 53 * and GPIOs from GROUP_A are numbered before those from GROUP_B
@@ -68,16 +71,47 @@
68#define GROUP_A(x) ((x) & 0xffff) /* I2C Addr: 0b'110xxxx */ 71#define GROUP_A(x) ((x) & 0xffff) /* I2C Addr: 0b'110xxxx */
69#define GROUP_B(x) ((x) << 16) /* I2C Addr: 0b'101xxxx */ 72#define GROUP_B(x) ((x) << 16) /* I2C Addr: 0b'101xxxx */
70 73
74#define INT_NONE 0x0 /* No interrupt capability */
75#define INT_NO_MASK 0x1 /* Has interrupts, no mask */
76#define INT_INDEP_MASK 0x2 /* Has interrupts, independent mask */
77#define INT_MERGED_MASK 0x3 /* Has interrupts, merged mask */
78
79#define INT_CAPS(x) (((uint64_t)(x)) << 32)
80
81enum {
82 MAX7319,
83 MAX7320,
84 MAX7321,
85 MAX7322,
86 MAX7323,
87 MAX7324,
88 MAX7325,
89 MAX7326,
90 MAX7327,
91};
92
93static uint64_t max732x_features[] = {
94 [MAX7319] = GROUP_A(IO_8I) | INT_CAPS(INT_MERGED_MASK),
95 [MAX7320] = GROUP_B(IO_8O),
96 [MAX7321] = GROUP_A(IO_8P) | INT_CAPS(INT_NO_MASK),
97 [MAX7322] = GROUP_A(IO_4I4O) | INT_CAPS(INT_MERGED_MASK),
98 [MAX7323] = GROUP_A(IO_4P4O) | INT_CAPS(INT_INDEP_MASK),
99 [MAX7324] = GROUP_A(IO_8I) | GROUP_B(IO_8O) | INT_CAPS(INT_MERGED_MASK),
100 [MAX7325] = GROUP_A(IO_8P) | GROUP_B(IO_8O) | INT_CAPS(INT_NO_MASK),
101 [MAX7326] = GROUP_A(IO_4I4O) | GROUP_B(IO_8O) | INT_CAPS(INT_MERGED_MASK),
102 [MAX7327] = GROUP_A(IO_4P4O) | GROUP_B(IO_8O) | INT_CAPS(INT_NO_MASK),
103};
104
71static const struct i2c_device_id max732x_id[] = { 105static const struct i2c_device_id max732x_id[] = {
72 { "max7319", GROUP_A(IO_8I) }, 106 { "max7319", MAX7319 },
73 { "max7320", GROUP_B(IO_8O) }, 107 { "max7320", MAX7320 },
74 { "max7321", GROUP_A(IO_8P) }, 108 { "max7321", MAX7321 },
75 { "max7322", GROUP_A(IO_4I4O) }, 109 { "max7322", MAX7322 },
76 { "max7323", GROUP_A(IO_4P4O) }, 110 { "max7323", MAX7323 },
77 { "max7324", GROUP_A(IO_8I) | GROUP_B(IO_8O) }, 111 { "max7324", MAX7324 },
78 { "max7325", GROUP_A(IO_8P) | GROUP_B(IO_8O) }, 112 { "max7325", MAX7325 },
79 { "max7326", GROUP_A(IO_4I4O) | GROUP_B(IO_8O) }, 113 { "max7326", MAX7326 },
80 { "max7327", GROUP_A(IO_4P4O) | GROUP_B(IO_8O) }, 114 { "max7327", MAX7327 },
81 { }, 115 { },
82}; 116};
83MODULE_DEVICE_TABLE(i2c, max732x_id); 117MODULE_DEVICE_TABLE(i2c, max732x_id);
@@ -96,9 +130,19 @@ struct max732x_chip {
96 130
97 struct mutex lock; 131 struct mutex lock;
98 uint8_t reg_out[2]; 132 uint8_t reg_out[2];
133
134#ifdef CONFIG_GPIO_MAX732X_IRQ
135 struct mutex irq_lock;
136 int irq_base;
137 uint8_t irq_mask;
138 uint8_t irq_mask_cur;
139 uint8_t irq_trig_raise;
140 uint8_t irq_trig_fall;
141 uint8_t irq_features;
142#endif
99}; 143};
100 144
101static int max732x_write(struct max732x_chip *chip, int group_a, uint8_t val) 145static int max732x_writeb(struct max732x_chip *chip, int group_a, uint8_t val)
102{ 146{
103 struct i2c_client *client; 147 struct i2c_client *client;
104 int ret; 148 int ret;
@@ -113,7 +157,7 @@ static int max732x_write(struct max732x_chip *chip, int group_a, uint8_t val)
113 return 0; 157 return 0;
114} 158}
115 159
116static int max732x_read(struct max732x_chip *chip, int group_a, uint8_t *val) 160static int max732x_readb(struct max732x_chip *chip, int group_a, uint8_t *val)
117{ 161{
118 struct i2c_client *client; 162 struct i2c_client *client;
119 int ret; 163 int ret;
@@ -142,7 +186,7 @@ static int max732x_gpio_get_value(struct gpio_chip *gc, unsigned off)
142 186
143 chip = container_of(gc, struct max732x_chip, gpio_chip); 187 chip = container_of(gc, struct max732x_chip, gpio_chip);
144 188
145 ret = max732x_read(chip, is_group_a(chip, off), &reg_val); 189 ret = max732x_readb(chip, is_group_a(chip, off), &reg_val);
146 if (ret < 0) 190 if (ret < 0)
147 return 0; 191 return 0;
148 192
@@ -162,7 +206,7 @@ static void max732x_gpio_set_value(struct gpio_chip *gc, unsigned off, int val)
162 reg_out = (off > 7) ? chip->reg_out[1] : chip->reg_out[0]; 206 reg_out = (off > 7) ? chip->reg_out[1] : chip->reg_out[0];
163 reg_out = (val) ? reg_out | mask : reg_out & ~mask; 207 reg_out = (val) ? reg_out | mask : reg_out & ~mask;
164 208
165 ret = max732x_write(chip, is_group_a(chip, off), reg_out); 209 ret = max732x_writeb(chip, is_group_a(chip, off), reg_out);
166 if (ret < 0) 210 if (ret < 0)
167 goto out; 211 goto out;
168 212
@@ -188,6 +232,13 @@ static int max732x_gpio_direction_input(struct gpio_chip *gc, unsigned off)
188 return -EACCES; 232 return -EACCES;
189 } 233 }
190 234
235 /*
236 * Open-drain pins must be set to high impedance (which is
237 * equivalent to output-high) to be turned into an input.
238 */
239 if ((mask & chip->dir_output))
240 max732x_gpio_set_value(gc, off, 1);
241
191 return 0; 242 return 0;
192} 243}
193 244
@@ -209,12 +260,278 @@ static int max732x_gpio_direction_output(struct gpio_chip *gc,
209 return 0; 260 return 0;
210} 261}
211 262
263#ifdef CONFIG_GPIO_MAX732X_IRQ
264static int max732x_writew(struct max732x_chip *chip, uint16_t val)
265{
266 int ret;
267
268 val = cpu_to_le16(val);
269
270 ret = i2c_master_send(chip->client_group_a, (char *)&val, 2);
271 if (ret < 0) {
272 dev_err(&chip->client_group_a->dev, "failed writing\n");
273 return ret;
274 }
275
276 return 0;
277}
278
279static int max732x_readw(struct max732x_chip *chip, uint16_t *val)
280{
281 int ret;
282
283 ret = i2c_master_recv(chip->client_group_a, (char *)val, 2);
284 if (ret < 0) {
285 dev_err(&chip->client_group_a->dev, "failed reading\n");
286 return ret;
287 }
288
289 *val = le16_to_cpu(*val);
290 return 0;
291}
292
293static void max732x_irq_update_mask(struct max732x_chip *chip)
294{
295 uint16_t msg;
296
297 if (chip->irq_mask == chip->irq_mask_cur)
298 return;
299
300 chip->irq_mask = chip->irq_mask_cur;
301
302 if (chip->irq_features == INT_NO_MASK)
303 return;
304
305 mutex_lock(&chip->lock);
306
307 switch (chip->irq_features) {
308 case INT_INDEP_MASK:
309 msg = (chip->irq_mask << 8) | chip->reg_out[0];
310 max732x_writew(chip, msg);
311 break;
312
313 case INT_MERGED_MASK:
314 msg = chip->irq_mask | chip->reg_out[0];
315 max732x_writeb(chip, 1, (uint8_t)msg);
316 break;
317 }
318
319 mutex_unlock(&chip->lock);
320}
321
322static int max732x_gpio_to_irq(struct gpio_chip *gc, unsigned off)
323{
324 struct max732x_chip *chip;
325
326 chip = container_of(gc, struct max732x_chip, gpio_chip);
327 return chip->irq_base + off;
328}
329
330static void max732x_irq_mask(unsigned int irq)
331{
332 struct max732x_chip *chip = get_irq_chip_data(irq);
333
334 chip->irq_mask_cur &= ~(1 << (irq - chip->irq_base));
335}
336
337static void max732x_irq_unmask(unsigned int irq)
338{
339 struct max732x_chip *chip = get_irq_chip_data(irq);
340
341 chip->irq_mask_cur |= 1 << (irq - chip->irq_base);
342}
343
344static void max732x_irq_bus_lock(unsigned int irq)
345{
346 struct max732x_chip *chip = get_irq_chip_data(irq);
347
348 mutex_lock(&chip->irq_lock);
349 chip->irq_mask_cur = chip->irq_mask;
350}
351
352static void max732x_irq_bus_sync_unlock(unsigned int irq)
353{
354 struct max732x_chip *chip = get_irq_chip_data(irq);
355
356 max732x_irq_update_mask(chip);
357 mutex_unlock(&chip->irq_lock);
358}
359
360static int max732x_irq_set_type(unsigned int irq, unsigned int type)
361{
362 struct max732x_chip *chip = get_irq_chip_data(irq);
363 uint16_t off = irq - chip->irq_base;
364 uint16_t mask = 1 << off;
365
366 if (!(mask & chip->dir_input)) {
367 dev_dbg(&chip->client->dev, "%s port %d is output only\n",
368 chip->client->name, off);
369 return -EACCES;
370 }
371
372 if (!(type & IRQ_TYPE_EDGE_BOTH)) {
373 dev_err(&chip->client->dev, "irq %d: unsupported type %d\n",
374 irq, type);
375 return -EINVAL;
376 }
377
378 if (type & IRQ_TYPE_EDGE_FALLING)
379 chip->irq_trig_fall |= mask;
380 else
381 chip->irq_trig_fall &= ~mask;
382
383 if (type & IRQ_TYPE_EDGE_RISING)
384 chip->irq_trig_raise |= mask;
385 else
386 chip->irq_trig_raise &= ~mask;
387
388 return max732x_gpio_direction_input(&chip->gpio_chip, off);
389}
390
391static struct irq_chip max732x_irq_chip = {
392 .name = "max732x",
393 .mask = max732x_irq_mask,
394 .unmask = max732x_irq_unmask,
395 .bus_lock = max732x_irq_bus_lock,
396 .bus_sync_unlock = max732x_irq_bus_sync_unlock,
397 .set_type = max732x_irq_set_type,
398};
399
400static uint8_t max732x_irq_pending(struct max732x_chip *chip)
401{
402 uint8_t cur_stat;
403 uint8_t old_stat;
404 uint8_t trigger;
405 uint8_t pending;
406 uint16_t status;
407 int ret;
408
409 ret = max732x_readw(chip, &status);
410 if (ret)
411 return 0;
412
413 trigger = status >> 8;
414 trigger &= chip->irq_mask;
415
416 if (!trigger)
417 return 0;
418
419 cur_stat = status & 0xFF;
420 cur_stat &= chip->irq_mask;
421
422 old_stat = cur_stat ^ trigger;
423
424 pending = (old_stat & chip->irq_trig_fall) |
425 (cur_stat & chip->irq_trig_raise);
426 pending &= trigger;
427
428 return pending;
429}
430
431static irqreturn_t max732x_irq_handler(int irq, void *devid)
432{
433 struct max732x_chip *chip = devid;
434 uint8_t pending;
435 uint8_t level;
436
437 pending = max732x_irq_pending(chip);
438
439 if (!pending)
440 return IRQ_HANDLED;
441
442 do {
443 level = __ffs(pending);
444 handle_nested_irq(level + chip->irq_base);
445
446 pending &= ~(1 << level);
447 } while (pending);
448
449 return IRQ_HANDLED;
450}
451
452static int max732x_irq_setup(struct max732x_chip *chip,
453 const struct i2c_device_id *id)
454{
455 struct i2c_client *client = chip->client;
456 struct max732x_platform_data *pdata = client->dev.platform_data;
457 int has_irq = max732x_features[id->driver_data] >> 32;
458 int ret;
459
460 if (pdata->irq_base && has_irq != INT_NONE) {
461 int lvl;
462
463 chip->irq_base = pdata->irq_base;
464 chip->irq_features = has_irq;
465 mutex_init(&chip->irq_lock);
466
467 for (lvl = 0; lvl < chip->gpio_chip.ngpio; lvl++) {
468 int irq = lvl + chip->irq_base;
469
470 if (!(chip->dir_input & (1 << lvl)))
471 continue;
472
473 set_irq_chip_data(irq, chip);
474 set_irq_chip_and_handler(irq, &max732x_irq_chip,
475 handle_edge_irq);
476 set_irq_nested_thread(irq, 1);
477#ifdef CONFIG_ARM
478 set_irq_flags(irq, IRQF_VALID);
479#else
480 set_irq_noprobe(irq);
481#endif
482 }
483
484 ret = request_threaded_irq(client->irq,
485 NULL,
486 max732x_irq_handler,
487 IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
488 dev_name(&client->dev), chip);
489 if (ret) {
490 dev_err(&client->dev, "failed to request irq %d\n",
491 client->irq);
492 goto out_failed;
493 }
494
495 chip->gpio_chip.to_irq = max732x_gpio_to_irq;
496 }
497
498 return 0;
499
500out_failed:
501 chip->irq_base = 0;
502 return ret;
503}
504
505static void max732x_irq_teardown(struct max732x_chip *chip)
506{
507 if (chip->irq_base)
508 free_irq(chip->client->irq, chip);
509}
510#else /* CONFIG_GPIO_MAX732X_IRQ */
511static int max732x_irq_setup(struct max732x_chip *chip,
512 const struct i2c_device_id *id)
513{
514 struct i2c_client *client = chip->client;
515 struct max732x_platform_data *pdata = client->dev.platform_data;
516 int has_irq = max732x_features[id->driver_data] >> 32;
517
518 if (pdata->irq_base && has_irq != INT_NONE)
519 dev_warn(&client->dev, "interrupt support not compiled in\n");
520
521 return 0;
522}
523
524static void max732x_irq_teardown(struct max732x_chip *chip)
525{
526}
527#endif
528
212static int __devinit max732x_setup_gpio(struct max732x_chip *chip, 529static int __devinit max732x_setup_gpio(struct max732x_chip *chip,
213 const struct i2c_device_id *id, 530 const struct i2c_device_id *id,
214 unsigned gpio_start) 531 unsigned gpio_start)
215{ 532{
216 struct gpio_chip *gc = &chip->gpio_chip; 533 struct gpio_chip *gc = &chip->gpio_chip;
217 uint32_t id_data = id->driver_data; 534 uint32_t id_data = (uint32_t)max732x_features[id->driver_data];
218 int i, port = 0; 535 int i, port = 0;
219 536
220 for (i = 0; i < 16; i++, id_data >>= 2) { 537 for (i = 0; i < 16; i++, id_data >>= 2) {
@@ -285,14 +602,14 @@ static int __devinit max732x_probe(struct i2c_client *client,
285 switch (client->addr & 0x70) { 602 switch (client->addr & 0x70) {
286 case 0x60: 603 case 0x60:
287 chip->client_group_a = client; 604 chip->client_group_a = client;
288 if (nr_port > 7) { 605 if (nr_port > 8) {
289 c = i2c_new_dummy(client->adapter, addr_b); 606 c = i2c_new_dummy(client->adapter, addr_b);
290 chip->client_group_b = chip->client_dummy = c; 607 chip->client_group_b = chip->client_dummy = c;
291 } 608 }
292 break; 609 break;
293 case 0x50: 610 case 0x50:
294 chip->client_group_b = client; 611 chip->client_group_b = client;
295 if (nr_port > 7) { 612 if (nr_port > 8) {
296 c = i2c_new_dummy(client->adapter, addr_a); 613 c = i2c_new_dummy(client->adapter, addr_a);
297 chip->client_group_a = chip->client_dummy = c; 614 chip->client_group_a = chip->client_dummy = c;
298 } 615 }
@@ -306,9 +623,13 @@ static int __devinit max732x_probe(struct i2c_client *client,
306 623
307 mutex_init(&chip->lock); 624 mutex_init(&chip->lock);
308 625
309 max732x_read(chip, is_group_a(chip, 0), &chip->reg_out[0]); 626 max732x_readb(chip, is_group_a(chip, 0), &chip->reg_out[0]);
310 if (nr_port > 7) 627 if (nr_port > 8)
311 max732x_read(chip, is_group_a(chip, 8), &chip->reg_out[1]); 628 max732x_readb(chip, is_group_a(chip, 8), &chip->reg_out[1]);
629
630 ret = max732x_irq_setup(chip, id);
631 if (ret)
632 goto out_failed;
312 633
313 ret = gpiochip_add(&chip->gpio_chip); 634 ret = gpiochip_add(&chip->gpio_chip);
314 if (ret) 635 if (ret)
@@ -325,6 +646,7 @@ static int __devinit max732x_probe(struct i2c_client *client,
325 return 0; 646 return 0;
326 647
327out_failed: 648out_failed:
649 max732x_irq_teardown(chip);
328 kfree(chip); 650 kfree(chip);
329 return ret; 651 return ret;
330} 652}
@@ -352,6 +674,8 @@ static int __devexit max732x_remove(struct i2c_client *client)
352 return ret; 674 return ret;
353 } 675 }
354 676
677 max732x_irq_teardown(chip);
678
355 /* unregister any dummy i2c_client */ 679 /* unregister any dummy i2c_client */
356 if (chip->client_dummy) 680 if (chip->client_dummy)
357 i2c_unregister_device(chip->client_dummy); 681 i2c_unregister_device(chip->client_dummy);
diff --git a/drivers/gpio/pca953x.c b/drivers/gpio/pca953x.c
index f156ab3bb6ed..a2b12aa1f2b9 100644
--- a/drivers/gpio/pca953x.c
+++ b/drivers/gpio/pca953x.c
@@ -73,7 +73,7 @@ struct pca953x_chip {
73 struct i2c_client *client; 73 struct i2c_client *client;
74 struct pca953x_platform_data *dyn_pdata; 74 struct pca953x_platform_data *dyn_pdata;
75 struct gpio_chip gpio_chip; 75 struct gpio_chip gpio_chip;
76 char **names; 76 const char *const *names;
77}; 77};
78 78
79static int pca953x_write_reg(struct pca953x_chip *chip, int reg, uint16_t val) 79static int pca953x_write_reg(struct pca953x_chip *chip, int reg, uint16_t val)
diff --git a/drivers/gpio/pl061.c b/drivers/gpio/pl061.c
index 105701a1f05b..ee568c8fcbd0 100644
--- a/drivers/gpio/pl061.c
+++ b/drivers/gpio/pl061.c
@@ -164,7 +164,7 @@ static int pl061_irq_type(unsigned irq, unsigned trigger)
164 unsigned long flags; 164 unsigned long flags;
165 u8 gpiois, gpioibe, gpioiev; 165 u8 gpiois, gpioibe, gpioiev;
166 166
167 if (offset < 0 || offset > PL061_GPIO_NR) 167 if (offset < 0 || offset >= PL061_GPIO_NR)
168 return -EINVAL; 168 return -EINVAL;
169 169
170 spin_lock_irqsave(&chip->irq_lock, flags); 170 spin_lock_irqsave(&chip->irq_lock, flags);
diff --git a/drivers/gpio/rdc321x-gpio.c b/drivers/gpio/rdc321x-gpio.c
new file mode 100644
index 000000000000..2762698e0204
--- /dev/null
+++ b/drivers/gpio/rdc321x-gpio.c
@@ -0,0 +1,246 @@
1/*
2 * RDC321x GPIO driver
3 *
4 * Copyright (C) 2008, Volker Weiss <dev@tintuc.de>
5 * Copyright (C) 2007-2010 Florian Fainelli <florian@openwrt.org>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20 *
21 */
22#include <linux/module.h>
23#include <linux/kernel.h>
24#include <linux/init.h>
25#include <linux/spinlock.h>
26#include <linux/platform_device.h>
27#include <linux/pci.h>
28#include <linux/gpio.h>
29#include <linux/mfd/rdc321x.h>
30#include <linux/slab.h>
31
32struct rdc321x_gpio {
33 spinlock_t lock;
34 struct pci_dev *sb_pdev;
35 u32 data_reg[2];
36 int reg1_ctrl_base;
37 int reg1_data_base;
38 int reg2_ctrl_base;
39 int reg2_data_base;
40 struct gpio_chip chip;
41};
42
43/* read GPIO pin */
44static int rdc_gpio_get_value(struct gpio_chip *chip, unsigned gpio)
45{
46 struct rdc321x_gpio *gpch;
47 u32 value = 0;
48 int reg;
49
50 gpch = container_of(chip, struct rdc321x_gpio, chip);
51 reg = gpio < 32 ? gpch->reg1_data_base : gpch->reg2_data_base;
52
53 spin_lock(&gpch->lock);
54 pci_write_config_dword(gpch->sb_pdev, reg,
55 gpch->data_reg[gpio < 32 ? 0 : 1]);
56 pci_read_config_dword(gpch->sb_pdev, reg, &value);
57 spin_unlock(&gpch->lock);
58
59 return (1 << (gpio & 0x1f)) & value ? 1 : 0;
60}
61
62static void rdc_gpio_set_value_impl(struct gpio_chip *chip,
63 unsigned gpio, int value)
64{
65 struct rdc321x_gpio *gpch;
66 int reg = (gpio < 32) ? 0 : 1;
67
68 gpch = container_of(chip, struct rdc321x_gpio, chip);
69
70 if (value)
71 gpch->data_reg[reg] |= 1 << (gpio & 0x1f);
72 else
73 gpch->data_reg[reg] &= ~(1 << (gpio & 0x1f));
74
75 pci_write_config_dword(gpch->sb_pdev,
76 reg ? gpch->reg2_data_base : gpch->reg1_data_base,
77 gpch->data_reg[reg]);
78}
79
80/* set GPIO pin to value */
81static void rdc_gpio_set_value(struct gpio_chip *chip,
82 unsigned gpio, int value)
83{
84 struct rdc321x_gpio *gpch;
85
86 gpch = container_of(chip, struct rdc321x_gpio, chip);
87 spin_lock(&gpch->lock);
88 rdc_gpio_set_value_impl(chip, gpio, value);
89 spin_unlock(&gpch->lock);
90}
91
92static int rdc_gpio_config(struct gpio_chip *chip,
93 unsigned gpio, int value)
94{
95 struct rdc321x_gpio *gpch;
96 int err;
97 u32 reg;
98
99 gpch = container_of(chip, struct rdc321x_gpio, chip);
100
101 spin_lock(&gpch->lock);
102 err = pci_read_config_dword(gpch->sb_pdev, gpio < 32 ?
103 gpch->reg1_ctrl_base : gpch->reg2_ctrl_base, &reg);
104 if (err)
105 goto unlock;
106
107 reg |= 1 << (gpio & 0x1f);
108
109 err = pci_write_config_dword(gpch->sb_pdev, gpio < 32 ?
110 gpch->reg1_ctrl_base : gpch->reg2_ctrl_base, reg);
111 if (err)
112 goto unlock;
113
114 rdc_gpio_set_value_impl(chip, gpio, value);
115
116unlock:
117 spin_unlock(&gpch->lock);
118
119 return err;
120}
121
122/* configure GPIO pin as input */
123static int rdc_gpio_direction_input(struct gpio_chip *chip, unsigned gpio)
124{
125 return rdc_gpio_config(chip, gpio, 1);
126}
127
128/*
129 * Cache the initial value of both GPIO data registers
130 */
131static int __devinit rdc321x_gpio_probe(struct platform_device *pdev)
132{
133 int err;
134 struct resource *r;
135 struct rdc321x_gpio *rdc321x_gpio_dev;
136 struct rdc321x_gpio_pdata *pdata;
137
138 pdata = pdev->dev.platform_data;
139 if (!pdata) {
140 dev_err(&pdev->dev, "no platform data supplied\n");
141 return -ENODEV;
142 }
143
144 rdc321x_gpio_dev = kzalloc(sizeof(struct rdc321x_gpio), GFP_KERNEL);
145 if (!rdc321x_gpio_dev) {
146 dev_err(&pdev->dev, "failed to allocate private data\n");
147 return -ENOMEM;
148 }
149
150 r = platform_get_resource_byname(pdev, IORESOURCE_IO, "gpio-reg1");
151 if (!r) {
152 dev_err(&pdev->dev, "failed to get gpio-reg1 resource\n");
153 err = -ENODEV;
154 goto out_free;
155 }
156
157 spin_lock_init(&rdc321x_gpio_dev->lock);
158 rdc321x_gpio_dev->sb_pdev = pdata->sb_pdev;
159 rdc321x_gpio_dev->reg1_ctrl_base = r->start;
160 rdc321x_gpio_dev->reg1_data_base = r->start + 0x4;
161
162 r = platform_get_resource_byname(pdev, IORESOURCE_IO, "gpio-reg2");
163 if (!r) {
164 dev_err(&pdev->dev, "failed to get gpio-reg2 resource\n");
165 err = -ENODEV;
166 goto out_free;
167 }
168
169 rdc321x_gpio_dev->reg2_ctrl_base = r->start;
170 rdc321x_gpio_dev->reg2_data_base = r->start + 0x4;
171
172 rdc321x_gpio_dev->chip.label = "rdc321x-gpio";
173 rdc321x_gpio_dev->chip.direction_input = rdc_gpio_direction_input;
174 rdc321x_gpio_dev->chip.direction_output = rdc_gpio_config;
175 rdc321x_gpio_dev->chip.get = rdc_gpio_get_value;
176 rdc321x_gpio_dev->chip.set = rdc_gpio_set_value;
177 rdc321x_gpio_dev->chip.base = 0;
178 rdc321x_gpio_dev->chip.ngpio = pdata->max_gpios;
179
180 platform_set_drvdata(pdev, rdc321x_gpio_dev);
181
182 /* This might not be, what others (BIOS, bootloader, etc.)
183 wrote to these registers before, but it's a good guess. Still
184 better than just using 0xffffffff. */
185 err = pci_read_config_dword(rdc321x_gpio_dev->sb_pdev,
186 rdc321x_gpio_dev->reg1_data_base,
187 &rdc321x_gpio_dev->data_reg[0]);
188 if (err)
189 goto out_drvdata;
190
191 err = pci_read_config_dword(rdc321x_gpio_dev->sb_pdev,
192 rdc321x_gpio_dev->reg2_data_base,
193 &rdc321x_gpio_dev->data_reg[1]);
194 if (err)
195 goto out_drvdata;
196
197 dev_info(&pdev->dev, "registering %d GPIOs\n",
198 rdc321x_gpio_dev->chip.ngpio);
199 return gpiochip_add(&rdc321x_gpio_dev->chip);
200
201out_drvdata:
202 platform_set_drvdata(pdev, NULL);
203out_free:
204 kfree(rdc321x_gpio_dev);
205 return err;
206}
207
208static int __devexit rdc321x_gpio_remove(struct platform_device *pdev)
209{
210 int ret;
211 struct rdc321x_gpio *rdc321x_gpio_dev = platform_get_drvdata(pdev);
212
213 ret = gpiochip_remove(&rdc321x_gpio_dev->chip);
214 if (ret)
215 dev_err(&pdev->dev, "failed to unregister chip\n");
216
217 kfree(rdc321x_gpio_dev);
218 platform_set_drvdata(pdev, NULL);
219
220 return ret;
221}
222
223static struct platform_driver rdc321x_gpio_driver = {
224 .driver.name = "rdc321x-gpio",
225 .driver.owner = THIS_MODULE,
226 .probe = rdc321x_gpio_probe,
227 .remove = __devexit_p(rdc321x_gpio_remove),
228};
229
230static int __init rdc321x_gpio_init(void)
231{
232 return platform_driver_register(&rdc321x_gpio_driver);
233}
234
235static void __exit rdc321x_gpio_exit(void)
236{
237 platform_driver_unregister(&rdc321x_gpio_driver);
238}
239
240module_init(rdc321x_gpio_init);
241module_exit(rdc321x_gpio_exit);
242
243MODULE_AUTHOR("Florian Fainelli <florian@openwrt.org>");
244MODULE_DESCRIPTION("RDC321x GPIO driver");
245MODULE_LICENSE("GPL");
246MODULE_ALIAS("platform:rdc321x-gpio");
diff --git a/drivers/gpio/tc35892-gpio.c b/drivers/gpio/tc35892-gpio.c
new file mode 100644
index 000000000000..1be6288780de
--- /dev/null
+++ b/drivers/gpio/tc35892-gpio.c
@@ -0,0 +1,381 @@
1/*
2 * Copyright (C) ST-Ericsson SA 2010
3 *
4 * License Terms: GNU General Public License, version 2
5 * Author: Hanumath Prasad <hanumath.prasad@stericsson.com> for ST-Ericsson
6 * Author: Rabin Vincent <rabin.vincent@stericsson.com> for ST-Ericsson
7 */
8
9#include <linux/module.h>
10#include <linux/init.h>
11#include <linux/platform_device.h>
12#include <linux/slab.h>
13#include <linux/gpio.h>
14#include <linux/irq.h>
15#include <linux/interrupt.h>
16#include <linux/mfd/tc35892.h>
17
18/*
19 * These registers are modified under the irq bus lock and cached to avoid
20 * unnecessary writes in bus_sync_unlock.
21 */
22enum { REG_IBE, REG_IEV, REG_IS, REG_IE };
23
24#define CACHE_NR_REGS 4
25#define CACHE_NR_BANKS 3
26
27struct tc35892_gpio {
28 struct gpio_chip chip;
29 struct tc35892 *tc35892;
30 struct device *dev;
31 struct mutex irq_lock;
32
33 int irq_base;
34
35 /* Caches of interrupt control registers for bus_lock */
36 u8 regs[CACHE_NR_REGS][CACHE_NR_BANKS];
37 u8 oldregs[CACHE_NR_REGS][CACHE_NR_BANKS];
38};
39
40static inline struct tc35892_gpio *to_tc35892_gpio(struct gpio_chip *chip)
41{
42 return container_of(chip, struct tc35892_gpio, chip);
43}
44
45static int tc35892_gpio_get(struct gpio_chip *chip, unsigned offset)
46{
47 struct tc35892_gpio *tc35892_gpio = to_tc35892_gpio(chip);
48 struct tc35892 *tc35892 = tc35892_gpio->tc35892;
49 u8 reg = TC35892_GPIODATA0 + (offset / 8) * 2;
50 u8 mask = 1 << (offset % 8);
51 int ret;
52
53 ret = tc35892_reg_read(tc35892, reg);
54 if (ret < 0)
55 return ret;
56
57 return ret & mask;
58}
59
60static void tc35892_gpio_set(struct gpio_chip *chip, unsigned offset, int val)
61{
62 struct tc35892_gpio *tc35892_gpio = to_tc35892_gpio(chip);
63 struct tc35892 *tc35892 = tc35892_gpio->tc35892;
64 u8 reg = TC35892_GPIODATA0 + (offset / 8) * 2;
65 unsigned pos = offset % 8;
66 u8 data[] = {!!val << pos, 1 << pos};
67
68 tc35892_block_write(tc35892, reg, ARRAY_SIZE(data), data);
69}
70
71static int tc35892_gpio_direction_output(struct gpio_chip *chip,
72 unsigned offset, int val)
73{
74 struct tc35892_gpio *tc35892_gpio = to_tc35892_gpio(chip);
75 struct tc35892 *tc35892 = tc35892_gpio->tc35892;
76 u8 reg = TC35892_GPIODIR0 + offset / 8;
77 unsigned pos = offset % 8;
78
79 tc35892_gpio_set(chip, offset, val);
80
81 return tc35892_set_bits(tc35892, reg, 1 << pos, 1 << pos);
82}
83
84static int tc35892_gpio_direction_input(struct gpio_chip *chip,
85 unsigned offset)
86{
87 struct tc35892_gpio *tc35892_gpio = to_tc35892_gpio(chip);
88 struct tc35892 *tc35892 = tc35892_gpio->tc35892;
89 u8 reg = TC35892_GPIODIR0 + offset / 8;
90 unsigned pos = offset % 8;
91
92 return tc35892_set_bits(tc35892, reg, 1 << pos, 0);
93}
94
95static int tc35892_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
96{
97 struct tc35892_gpio *tc35892_gpio = to_tc35892_gpio(chip);
98
99 return tc35892_gpio->irq_base + offset;
100}
101
102static struct gpio_chip template_chip = {
103 .label = "tc35892",
104 .owner = THIS_MODULE,
105 .direction_input = tc35892_gpio_direction_input,
106 .get = tc35892_gpio_get,
107 .direction_output = tc35892_gpio_direction_output,
108 .set = tc35892_gpio_set,
109 .to_irq = tc35892_gpio_to_irq,
110 .can_sleep = 1,
111};
112
113static int tc35892_gpio_irq_set_type(unsigned int irq, unsigned int type)
114{
115 struct tc35892_gpio *tc35892_gpio = get_irq_chip_data(irq);
116 int offset = irq - tc35892_gpio->irq_base;
117 int regoffset = offset / 8;
118 int mask = 1 << (offset % 8);
119
120 if (type == IRQ_TYPE_EDGE_BOTH) {
121 tc35892_gpio->regs[REG_IBE][regoffset] |= mask;
122 return 0;
123 }
124
125 tc35892_gpio->regs[REG_IBE][regoffset] &= ~mask;
126
127 if (type == IRQ_TYPE_LEVEL_LOW || type == IRQ_TYPE_LEVEL_HIGH)
128 tc35892_gpio->regs[REG_IS][regoffset] |= mask;
129 else
130 tc35892_gpio->regs[REG_IS][regoffset] &= ~mask;
131
132 if (type == IRQ_TYPE_EDGE_RISING || type == IRQ_TYPE_LEVEL_HIGH)
133 tc35892_gpio->regs[REG_IEV][regoffset] |= mask;
134 else
135 tc35892_gpio->regs[REG_IEV][regoffset] &= ~mask;
136
137 return 0;
138}
139
140static void tc35892_gpio_irq_lock(unsigned int irq)
141{
142 struct tc35892_gpio *tc35892_gpio = get_irq_chip_data(irq);
143
144 mutex_lock(&tc35892_gpio->irq_lock);
145}
146
147static void tc35892_gpio_irq_sync_unlock(unsigned int irq)
148{
149 struct tc35892_gpio *tc35892_gpio = get_irq_chip_data(irq);
150 struct tc35892 *tc35892 = tc35892_gpio->tc35892;
151 static const u8 regmap[] = {
152 [REG_IBE] = TC35892_GPIOIBE0,
153 [REG_IEV] = TC35892_GPIOIEV0,
154 [REG_IS] = TC35892_GPIOIS0,
155 [REG_IE] = TC35892_GPIOIE0,
156 };
157 int i, j;
158
159 for (i = 0; i < CACHE_NR_REGS; i++) {
160 for (j = 0; j < CACHE_NR_BANKS; j++) {
161 u8 old = tc35892_gpio->oldregs[i][j];
162 u8 new = tc35892_gpio->regs[i][j];
163
164 if (new == old)
165 continue;
166
167 tc35892_gpio->oldregs[i][j] = new;
168 tc35892_reg_write(tc35892, regmap[i] + j * 8, new);
169 }
170 }
171
172 mutex_unlock(&tc35892_gpio->irq_lock);
173}
174
175static void tc35892_gpio_irq_mask(unsigned int irq)
176{
177 struct tc35892_gpio *tc35892_gpio = get_irq_chip_data(irq);
178 int offset = irq - tc35892_gpio->irq_base;
179 int regoffset = offset / 8;
180 int mask = 1 << (offset % 8);
181
182 tc35892_gpio->regs[REG_IE][regoffset] &= ~mask;
183}
184
185static void tc35892_gpio_irq_unmask(unsigned int irq)
186{
187 struct tc35892_gpio *tc35892_gpio = get_irq_chip_data(irq);
188 int offset = irq - tc35892_gpio->irq_base;
189 int regoffset = offset / 8;
190 int mask = 1 << (offset % 8);
191
192 tc35892_gpio->regs[REG_IE][regoffset] |= mask;
193}
194
195static struct irq_chip tc35892_gpio_irq_chip = {
196 .name = "tc35892-gpio",
197 .bus_lock = tc35892_gpio_irq_lock,
198 .bus_sync_unlock = tc35892_gpio_irq_sync_unlock,
199 .mask = tc35892_gpio_irq_mask,
200 .unmask = tc35892_gpio_irq_unmask,
201 .set_type = tc35892_gpio_irq_set_type,
202};
203
204static irqreturn_t tc35892_gpio_irq(int irq, void *dev)
205{
206 struct tc35892_gpio *tc35892_gpio = dev;
207 struct tc35892 *tc35892 = tc35892_gpio->tc35892;
208 u8 status[CACHE_NR_BANKS];
209 int ret;
210 int i;
211
212 ret = tc35892_block_read(tc35892, TC35892_GPIOMIS0,
213 ARRAY_SIZE(status), status);
214 if (ret < 0)
215 return IRQ_NONE;
216
217 for (i = 0; i < ARRAY_SIZE(status); i++) {
218 unsigned int stat = status[i];
219 if (!stat)
220 continue;
221
222 while (stat) {
223 int bit = __ffs(stat);
224 int line = i * 8 + bit;
225
226 handle_nested_irq(tc35892_gpio->irq_base + line);
227 stat &= ~(1 << bit);
228 }
229
230 tc35892_reg_write(tc35892, TC35892_GPIOIC0 + i, status[i]);
231 }
232
233 return IRQ_HANDLED;
234}
235
236static int tc35892_gpio_irq_init(struct tc35892_gpio *tc35892_gpio)
237{
238 int base = tc35892_gpio->irq_base;
239 int irq;
240
241 for (irq = base; irq < base + tc35892_gpio->chip.ngpio; irq++) {
242 set_irq_chip_data(irq, tc35892_gpio);
243 set_irq_chip_and_handler(irq, &tc35892_gpio_irq_chip,
244 handle_simple_irq);
245 set_irq_nested_thread(irq, 1);
246#ifdef CONFIG_ARM
247 set_irq_flags(irq, IRQF_VALID);
248#else
249 set_irq_noprobe(irq);
250#endif
251 }
252
253 return 0;
254}
255
256static void tc35892_gpio_irq_remove(struct tc35892_gpio *tc35892_gpio)
257{
258 int base = tc35892_gpio->irq_base;
259 int irq;
260
261 for (irq = base; irq < base + tc35892_gpio->chip.ngpio; irq++) {
262#ifdef CONFIG_ARM
263 set_irq_flags(irq, 0);
264#endif
265 set_irq_chip_and_handler(irq, NULL, NULL);
266 set_irq_chip_data(irq, NULL);
267 }
268}
269
270static int __devinit tc35892_gpio_probe(struct platform_device *pdev)
271{
272 struct tc35892 *tc35892 = dev_get_drvdata(pdev->dev.parent);
273 struct tc35892_gpio_platform_data *pdata;
274 struct tc35892_gpio *tc35892_gpio;
275 int ret;
276 int irq;
277
278 pdata = tc35892->pdata->gpio;
279 if (!pdata)
280 return -ENODEV;
281
282 irq = platform_get_irq(pdev, 0);
283 if (irq < 0)
284 return irq;
285
286 tc35892_gpio = kzalloc(sizeof(struct tc35892_gpio), GFP_KERNEL);
287 if (!tc35892_gpio)
288 return -ENOMEM;
289
290 mutex_init(&tc35892_gpio->irq_lock);
291
292 tc35892_gpio->dev = &pdev->dev;
293 tc35892_gpio->tc35892 = tc35892;
294
295 tc35892_gpio->chip = template_chip;
296 tc35892_gpio->chip.ngpio = tc35892->num_gpio;
297 tc35892_gpio->chip.dev = &pdev->dev;
298 tc35892_gpio->chip.base = pdata->gpio_base;
299
300 tc35892_gpio->irq_base = tc35892->irq_base + TC35892_INT_GPIO(0);
301
302 /* Bring the GPIO module out of reset */
303 ret = tc35892_set_bits(tc35892, TC35892_RSTCTRL,
304 TC35892_RSTCTRL_GPIRST, 0);
305 if (ret < 0)
306 goto out_free;
307
308 ret = tc35892_gpio_irq_init(tc35892_gpio);
309 if (ret)
310 goto out_free;
311
312 ret = request_threaded_irq(irq, NULL, tc35892_gpio_irq, IRQF_ONESHOT,
313 "tc35892-gpio", tc35892_gpio);
314 if (ret) {
315 dev_err(&pdev->dev, "unable to get irq: %d\n", ret);
316 goto out_removeirq;
317 }
318
319 ret = gpiochip_add(&tc35892_gpio->chip);
320 if (ret) {
321 dev_err(&pdev->dev, "unable to add gpiochip: %d\n", ret);
322 goto out_freeirq;
323 }
324
325 platform_set_drvdata(pdev, tc35892_gpio);
326
327 return 0;
328
329out_freeirq:
330 free_irq(irq, tc35892_gpio);
331out_removeirq:
332 tc35892_gpio_irq_remove(tc35892_gpio);
333out_free:
334 kfree(tc35892_gpio);
335 return ret;
336}
337
338static int __devexit tc35892_gpio_remove(struct platform_device *pdev)
339{
340 struct tc35892_gpio *tc35892_gpio = platform_get_drvdata(pdev);
341 int irq = platform_get_irq(pdev, 0);
342 int ret;
343
344 ret = gpiochip_remove(&tc35892_gpio->chip);
345 if (ret < 0) {
346 dev_err(tc35892_gpio->dev,
347 "unable to remove gpiochip: %d\n", ret);
348 return ret;
349 }
350
351 free_irq(irq, tc35892_gpio);
352 tc35892_gpio_irq_remove(tc35892_gpio);
353
354 platform_set_drvdata(pdev, NULL);
355 kfree(tc35892_gpio);
356
357 return 0;
358}
359
360static struct platform_driver tc35892_gpio_driver = {
361 .driver.name = "tc35892-gpio",
362 .driver.owner = THIS_MODULE,
363 .probe = tc35892_gpio_probe,
364 .remove = __devexit_p(tc35892_gpio_remove),
365};
366
367static int __init tc35892_gpio_init(void)
368{
369 return platform_driver_register(&tc35892_gpio_driver);
370}
371subsys_initcall(tc35892_gpio_init);
372
373static void __exit tc35892_gpio_exit(void)
374{
375 platform_driver_unregister(&tc35892_gpio_driver);
376}
377module_exit(tc35892_gpio_exit);
378
379MODULE_LICENSE("GPL v2");
380MODULE_DESCRIPTION("TC35892 GPIO driver");
381MODULE_AUTHOR("Hanumath Prasad, Rabin Vincent");
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index f569ae88ab38..c1981861bbbd 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -147,7 +147,10 @@ drm_edid_block_valid(u8 *raw_edid)
147 csum += raw_edid[i]; 147 csum += raw_edid[i];
148 if (csum) { 148 if (csum) {
149 DRM_ERROR("EDID checksum is invalid, remainder is %d\n", csum); 149 DRM_ERROR("EDID checksum is invalid, remainder is %d\n", csum);
150 goto bad; 150
151 /* allow CEA to slide through, switches mangle this */
152 if (raw_edid[0] != 0x02)
153 goto bad;
151 } 154 }
152 155
153 /* per-block-type checks */ 156 /* per-block-type checks */
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index 7e663a79829f..266b0ff441af 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -241,7 +241,8 @@ nouveau_connector_detect(struct drm_connector *connector)
241 if (nv_encoder && nv_connector->native_mode) { 241 if (nv_encoder && nv_connector->native_mode) {
242 unsigned status = connector_status_connected; 242 unsigned status = connector_status_connected;
243 243
244#ifdef CONFIG_ACPI 244#if defined(CONFIG_ACPI_BUTTON) || \
245 (defined(CONFIG_ACPI_BUTTON_MODULE) && defined(MODULE))
245 if (!nouveau_ignorelid && !acpi_lid_open()) 246 if (!nouveau_ignorelid && !acpi_lid_open())
246 status = connector_status_unknown; 247 status = connector_status_unknown;
247#endif 248#endif
diff --git a/drivers/gpu/drm/nouveau/nv40_graph.c b/drivers/gpu/drm/nouveau/nv40_graph.c
index 0616c96e4b67..704a25d04ac9 100644
--- a/drivers/gpu/drm/nouveau/nv40_graph.c
+++ b/drivers/gpu/drm/nouveau/nv40_graph.c
@@ -253,7 +253,11 @@ nv40_graph_init(struct drm_device *dev)
253 253
254 if (!dev_priv->engine.graph.ctxprog) { 254 if (!dev_priv->engine.graph.ctxprog) {
255 struct nouveau_grctx ctx = {}; 255 struct nouveau_grctx ctx = {};
256 uint32_t cp[256]; 256 uint32_t *cp;
257
258 cp = kmalloc(sizeof(*cp) * 256, GFP_KERNEL);
259 if (!cp)
260 return -ENOMEM;
257 261
258 ctx.dev = dev; 262 ctx.dev = dev;
259 ctx.mode = NOUVEAU_GRCTX_PROG; 263 ctx.mode = NOUVEAU_GRCTX_PROG;
@@ -265,6 +269,8 @@ nv40_graph_init(struct drm_device *dev)
265 nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_INDEX, 0); 269 nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_INDEX, 0);
266 for (i = 0; i < ctx.ctxprog_len; i++) 270 for (i = 0; i < ctx.ctxprog_len; i++)
267 nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_DATA, cp[i]); 271 nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_DATA, cp[i]);
272
273 kfree(cp);
268 } 274 }
269 275
270 /* No context present currently */ 276 /* No context present currently */
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index 03dd6c41dc19..f3f2827017ef 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -707,6 +707,7 @@ static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode
707 break; 707 break;
708 case ATOM_DCPLL: 708 case ATOM_DCPLL:
709 case ATOM_PPLL_INVALID: 709 case ATOM_PPLL_INVALID:
710 default:
710 pll = &rdev->clock.dcpll; 711 pll = &rdev->clock.dcpll;
711 break; 712 break;
712 } 713 }
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 66a37fb75839..669feb689bfc 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -576,6 +576,7 @@ typedef int (*radeon_packet3_check_t)(struct radeon_cs_parser *p,
576 */ 576 */
577int radeon_agp_init(struct radeon_device *rdev); 577int radeon_agp_init(struct radeon_device *rdev);
578void radeon_agp_resume(struct radeon_device *rdev); 578void radeon_agp_resume(struct radeon_device *rdev);
579void radeon_agp_suspend(struct radeon_device *rdev);
579void radeon_agp_fini(struct radeon_device *rdev); 580void radeon_agp_fini(struct radeon_device *rdev);
580 581
581 582
diff --git a/drivers/gpu/drm/radeon/radeon_agp.c b/drivers/gpu/drm/radeon/radeon_agp.c
index 28e473f1f56f..f40dfb77f9b1 100644
--- a/drivers/gpu/drm/radeon/radeon_agp.c
+++ b/drivers/gpu/drm/radeon/radeon_agp.c
@@ -270,3 +270,8 @@ void radeon_agp_fini(struct radeon_device *rdev)
270 } 270 }
271#endif 271#endif
272} 272}
273
274void radeon_agp_suspend(struct radeon_device *rdev)
275{
276 radeon_agp_fini(rdev);
277}
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index 6e733fdc3349..24ea683f7cf5 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -680,10 +680,18 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct
680 uint8_t dac; 680 uint8_t dac;
681 union atom_supported_devices *supported_devices; 681 union atom_supported_devices *supported_devices;
682 int i, j, max_device; 682 int i, j, max_device;
683 struct bios_connector bios_connectors[ATOM_MAX_SUPPORTED_DEVICE]; 683 struct bios_connector *bios_connectors;
684 size_t bc_size = sizeof(*bios_connectors) * ATOM_MAX_SUPPORTED_DEVICE;
684 685
685 if (!atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset)) 686 bios_connectors = kzalloc(bc_size, GFP_KERNEL);
687 if (!bios_connectors)
688 return false;
689
690 if (!atom_parse_data_header(ctx, index, &size, &frev, &crev,
691 &data_offset)) {
692 kfree(bios_connectors);
686 return false; 693 return false;
694 }
687 695
688 supported_devices = 696 supported_devices =
689 (union atom_supported_devices *)(ctx->bios + data_offset); 697 (union atom_supported_devices *)(ctx->bios + data_offset);
@@ -851,6 +859,7 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct
851 859
852 radeon_link_encoder_connector(dev); 860 radeon_link_encoder_connector(dev);
853 861
862 kfree(bios_connectors);
854 return true; 863 return true;
855} 864}
856 865
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index a20b612ffe75..fdc3fdf78acb 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -754,6 +754,8 @@ int radeon_suspend_kms(struct drm_device *dev, pm_message_t state)
754 /* evict remaining vram memory */ 754 /* evict remaining vram memory */
755 radeon_bo_evict_vram(rdev); 755 radeon_bo_evict_vram(rdev);
756 756
757 radeon_agp_suspend(rdev);
758
757 pci_save_state(dev->pdev); 759 pci_save_state(dev->pdev);
758 if (state.event == PM_EVENT_SUSPEND) { 760 if (state.event == PM_EVENT_SUSPEND) {
759 /* Shut down the device */ 761 /* Shut down the device */
diff --git a/drivers/gpu/drm/radeon/radeon_state.c b/drivers/gpu/drm/radeon/radeon_state.c
index cc5316dcf580..b3ba44c0a818 100644
--- a/drivers/gpu/drm/radeon/radeon_state.c
+++ b/drivers/gpu/drm/radeon/radeon_state.c
@@ -900,9 +900,10 @@ static void radeon_cp_dispatch_clear(struct drm_device * dev,
900 flags |= RADEON_FRONT; 900 flags |= RADEON_FRONT;
901 } 901 }
902 if (flags & (RADEON_DEPTH|RADEON_STENCIL)) { 902 if (flags & (RADEON_DEPTH|RADEON_STENCIL)) {
903 if (!dev_priv->have_z_offset) 903 if (!dev_priv->have_z_offset) {
904 printk_once(KERN_ERR "radeon: illegal depth clear request. Buggy mesa detected - please update.\n"); 904 printk_once(KERN_ERR "radeon: illegal depth clear request. Buggy mesa detected - please update.\n");
905 flags &= ~(RADEON_DEPTH | RADEON_STENCIL); 905 flags &= ~(RADEON_DEPTH | RADEON_STENCIL);
906 }
906 } 907 }
907 908
908 if (flags & (RADEON_FRONT | RADEON_BACK)) { 909 if (flags & (RADEON_FRONT | RADEON_BACK)) {
diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
index 76ba59b9fea1..132278fa6240 100644
--- a/drivers/hid/Kconfig
+++ b/drivers/hid/Kconfig
@@ -347,6 +347,14 @@ config HID_QUANTA
347 ---help--- 347 ---help---
348 Support for Quanta Optical Touch dual-touch panels. 348 Support for Quanta Optical Touch dual-touch panels.
349 349
350config HID_ROCCAT
351 tristate "Roccat special event support"
352 depends on USB_HID
353 ---help---
354 Support for Roccat special events.
355 Say Y here if you have a Roccat mouse or keyboard and want OSD or
356 macro execution support.
357
350config HID_ROCCAT_KONE 358config HID_ROCCAT_KONE
351 tristate "Roccat Kone Mouse support" 359 tristate "Roccat Kone Mouse support"
352 depends on USB_HID 360 depends on USB_HID
diff --git a/drivers/hid/Makefile b/drivers/hid/Makefile
index 22e47eaeea32..987fa0627367 100644
--- a/drivers/hid/Makefile
+++ b/drivers/hid/Makefile
@@ -48,6 +48,7 @@ obj-$(CONFIG_HID_QUANTA) += hid-quanta.o
48obj-$(CONFIG_HID_PANTHERLORD) += hid-pl.o 48obj-$(CONFIG_HID_PANTHERLORD) += hid-pl.o
49obj-$(CONFIG_HID_PETALYNX) += hid-petalynx.o 49obj-$(CONFIG_HID_PETALYNX) += hid-petalynx.o
50obj-$(CONFIG_HID_PICOLCD) += hid-picolcd.o 50obj-$(CONFIG_HID_PICOLCD) += hid-picolcd.o
51obj-$(CONFIG_HID_ROCCAT) += hid-roccat.o
51obj-$(CONFIG_HID_ROCCAT_KONE) += hid-roccat-kone.o 52obj-$(CONFIG_HID_ROCCAT_KONE) += hid-roccat-kone.o
52obj-$(CONFIG_HID_SAMSUNG) += hid-samsung.o 53obj-$(CONFIG_HID_SAMSUNG) += hid-samsung.o
53obj-$(CONFIG_HID_SMARTJOYPLUS) += hid-sjoy.o 54obj-$(CONFIG_HID_SMARTJOYPLUS) += hid-sjoy.o
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index e10e314d38cc..aa0f7dcabcd7 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -1301,6 +1301,7 @@ static const struct hid_device_id hid_blacklist[] = {
1301 { HID_USB_DEVICE(USB_VENDOR_ID_GREENASIA, 0x0012) }, 1301 { HID_USB_DEVICE(USB_VENDOR_ID_GREENASIA, 0x0012) },
1302 { HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE) }, 1302 { HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE) },
1303 { HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE_2) }, 1303 { HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE_2) },
1304 { HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE_3) },
1304 { HID_USB_DEVICE(USB_VENDOR_ID_KENSINGTON, USB_DEVICE_ID_KS_SLIMBLADE) }, 1305 { HID_USB_DEVICE(USB_VENDOR_ID_KENSINGTON, USB_DEVICE_ID_KS_SLIMBLADE) },
1305 { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_ERGO_525V) }, 1306 { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_ERGO_525V) },
1306 { HID_USB_DEVICE(USB_VENDOR_ID_LABTEC, USB_DEVICE_ID_LABTEC_WIRELESS_KEYBOARD) }, 1307 { HID_USB_DEVICE(USB_VENDOR_ID_LABTEC, USB_DEVICE_ID_LABTEC_WIRELESS_KEYBOARD) },
diff --git a/drivers/hid/hid-debug.c b/drivers/hid/hid-debug.c
index 56f314fbd4f9..c94026768570 100644
--- a/drivers/hid/hid-debug.c
+++ b/drivers/hid/hid-debug.c
@@ -811,7 +811,7 @@ static const char *relatives[REL_MAX + 1] = {
811 [REL_WHEEL] = "Wheel", [REL_MISC] = "Misc", 811 [REL_WHEEL] = "Wheel", [REL_MISC] = "Misc",
812}; 812};
813 813
814static const char *absolutes[ABS_MAX + 1] = { 814static const char *absolutes[ABS_CNT] = {
815 [ABS_X] = "X", [ABS_Y] = "Y", 815 [ABS_X] = "X", [ABS_Y] = "Y",
816 [ABS_Z] = "Z", [ABS_RX] = "Rx", 816 [ABS_Z] = "Z", [ABS_RX] = "Rx",
817 [ABS_RY] = "Ry", [ABS_RZ] = "Rz", 817 [ABS_RY] = "Ry", [ABS_RZ] = "Rz",
diff --git a/drivers/hid/hid-gyration.c b/drivers/hid/hid-gyration.c
index 62416e6baeca..3975e039c3dd 100644
--- a/drivers/hid/hid-gyration.c
+++ b/drivers/hid/hid-gyration.c
@@ -73,6 +73,7 @@ static int gyration_event(struct hid_device *hdev, struct hid_field *field,
73static const struct hid_device_id gyration_devices[] = { 73static const struct hid_device_id gyration_devices[] = {
74 { HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE) }, 74 { HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE) },
75 { HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE_2) }, 75 { HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE_2) },
76 { HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE_3) },
76 { } 77 { }
77}; 78};
78MODULE_DEVICE_TABLE(hid, gyration_devices); 79MODULE_DEVICE_TABLE(hid, gyration_devices);
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 9776896cc4fc..6af77ed0b555 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -282,6 +282,7 @@
282#define USB_VENDOR_ID_GYRATION 0x0c16 282#define USB_VENDOR_ID_GYRATION 0x0c16
283#define USB_DEVICE_ID_GYRATION_REMOTE 0x0002 283#define USB_DEVICE_ID_GYRATION_REMOTE 0x0002
284#define USB_DEVICE_ID_GYRATION_REMOTE_2 0x0003 284#define USB_DEVICE_ID_GYRATION_REMOTE_2 0x0003
285#define USB_DEVICE_ID_GYRATION_REMOTE_3 0x0008
285 286
286#define USB_VENDOR_ID_HAPP 0x078b 287#define USB_VENDOR_ID_HAPP 0x078b
287#define USB_DEVICE_ID_UGCI_DRIVING 0x0010 288#define USB_DEVICE_ID_UGCI_DRIVING 0x0010
diff --git a/drivers/hid/hid-roccat-kone.c b/drivers/hid/hid-roccat-kone.c
index 66e694054ba2..17f2dc04f883 100644
--- a/drivers/hid/hid-roccat-kone.c
+++ b/drivers/hid/hid-roccat-kone.c
@@ -37,6 +37,7 @@
37#include <linux/module.h> 37#include <linux/module.h>
38#include <linux/slab.h> 38#include <linux/slab.h>
39#include "hid-ids.h" 39#include "hid-ids.h"
40#include "hid-roccat.h"
40#include "hid-roccat-kone.h" 41#include "hid-roccat-kone.h"
41 42
42static void kone_set_settings_checksum(struct kone_settings *settings) 43static void kone_set_settings_checksum(struct kone_settings *settings)
@@ -263,7 +264,7 @@ static int kone_get_firmware_version(struct usb_device *usb_dev, int *result)
263 return 0; 264 return 0;
264} 265}
265 266
266static ssize_t kone_sysfs_read_settings(struct kobject *kobj, 267static ssize_t kone_sysfs_read_settings(struct file *fp, struct kobject *kobj,
267 struct bin_attribute *attr, char *buf, 268 struct bin_attribute *attr, char *buf,
268 loff_t off, size_t count) { 269 loff_t off, size_t count) {
269 struct device *dev = container_of(kobj, struct device, kobj); 270 struct device *dev = container_of(kobj, struct device, kobj);
@@ -287,7 +288,7 @@ static ssize_t kone_sysfs_read_settings(struct kobject *kobj,
287 * This function keeps values in kone_device up to date and assumes that in 288 * This function keeps values in kone_device up to date and assumes that in
288 * case of error the old data is still valid 289 * case of error the old data is still valid
289 */ 290 */
290static ssize_t kone_sysfs_write_settings(struct kobject *kobj, 291static ssize_t kone_sysfs_write_settings(struct file *fp, struct kobject *kobj,
291 struct bin_attribute *attr, char *buf, 292 struct bin_attribute *attr, char *buf,
292 loff_t off, size_t count) { 293 loff_t off, size_t count) {
293 struct device *dev = container_of(kobj, struct device, kobj); 294 struct device *dev = container_of(kobj, struct device, kobj);
@@ -342,31 +343,31 @@ static ssize_t kone_sysfs_read_profilex(struct kobject *kobj,
342 return count; 343 return count;
343} 344}
344 345
345static ssize_t kone_sysfs_read_profile1(struct kobject *kobj, 346static ssize_t kone_sysfs_read_profile1(struct file *fp, struct kobject *kobj,
346 struct bin_attribute *attr, char *buf, 347 struct bin_attribute *attr, char *buf,
347 loff_t off, size_t count) { 348 loff_t off, size_t count) {
348 return kone_sysfs_read_profilex(kobj, attr, buf, off, count, 1); 349 return kone_sysfs_read_profilex(kobj, attr, buf, off, count, 1);
349} 350}
350 351
351static ssize_t kone_sysfs_read_profile2(struct kobject *kobj, 352static ssize_t kone_sysfs_read_profile2(struct file *fp, struct kobject *kobj,
352 struct bin_attribute *attr, char *buf, 353 struct bin_attribute *attr, char *buf,
353 loff_t off, size_t count) { 354 loff_t off, size_t count) {
354 return kone_sysfs_read_profilex(kobj, attr, buf, off, count, 2); 355 return kone_sysfs_read_profilex(kobj, attr, buf, off, count, 2);
355} 356}
356 357
357static ssize_t kone_sysfs_read_profile3(struct kobject *kobj, 358static ssize_t kone_sysfs_read_profile3(struct file *fp, struct kobject *kobj,
358 struct bin_attribute *attr, char *buf, 359 struct bin_attribute *attr, char *buf,
359 loff_t off, size_t count) { 360 loff_t off, size_t count) {
360 return kone_sysfs_read_profilex(kobj, attr, buf, off, count, 3); 361 return kone_sysfs_read_profilex(kobj, attr, buf, off, count, 3);
361} 362}
362 363
363static ssize_t kone_sysfs_read_profile4(struct kobject *kobj, 364static ssize_t kone_sysfs_read_profile4(struct file *fp, struct kobject *kobj,
364 struct bin_attribute *attr, char *buf, 365 struct bin_attribute *attr, char *buf,
365 loff_t off, size_t count) { 366 loff_t off, size_t count) {
366 return kone_sysfs_read_profilex(kobj, attr, buf, off, count, 4); 367 return kone_sysfs_read_profilex(kobj, attr, buf, off, count, 4);
367} 368}
368 369
369static ssize_t kone_sysfs_read_profile5(struct kobject *kobj, 370static ssize_t kone_sysfs_read_profile5(struct file *fp, struct kobject *kobj,
370 struct bin_attribute *attr, char *buf, 371 struct bin_attribute *attr, char *buf,
371 loff_t off, size_t count) { 372 loff_t off, size_t count) {
372 return kone_sysfs_read_profilex(kobj, attr, buf, off, count, 5); 373 return kone_sysfs_read_profilex(kobj, attr, buf, off, count, 5);
@@ -404,31 +405,31 @@ static ssize_t kone_sysfs_write_profilex(struct kobject *kobj,
404 return sizeof(struct kone_profile); 405 return sizeof(struct kone_profile);
405} 406}
406 407
407static ssize_t kone_sysfs_write_profile1(struct kobject *kobj, 408static ssize_t kone_sysfs_write_profile1(struct file *fp, struct kobject *kobj,
408 struct bin_attribute *attr, char *buf, 409 struct bin_attribute *attr, char *buf,
409 loff_t off, size_t count) { 410 loff_t off, size_t count) {
410 return kone_sysfs_write_profilex(kobj, attr, buf, off, count, 1); 411 return kone_sysfs_write_profilex(kobj, attr, buf, off, count, 1);
411} 412}
412 413
413static ssize_t kone_sysfs_write_profile2(struct kobject *kobj, 414static ssize_t kone_sysfs_write_profile2(struct file *fp, struct kobject *kobj,
414 struct bin_attribute *attr, char *buf, 415 struct bin_attribute *attr, char *buf,
415 loff_t off, size_t count) { 416 loff_t off, size_t count) {
416 return kone_sysfs_write_profilex(kobj, attr, buf, off, count, 2); 417 return kone_sysfs_write_profilex(kobj, attr, buf, off, count, 2);
417} 418}
418 419
419static ssize_t kone_sysfs_write_profile3(struct kobject *kobj, 420static ssize_t kone_sysfs_write_profile3(struct file *fp, struct kobject *kobj,
420 struct bin_attribute *attr, char *buf, 421 struct bin_attribute *attr, char *buf,
421 loff_t off, size_t count) { 422 loff_t off, size_t count) {
422 return kone_sysfs_write_profilex(kobj, attr, buf, off, count, 3); 423 return kone_sysfs_write_profilex(kobj, attr, buf, off, count, 3);
423} 424}
424 425
425static ssize_t kone_sysfs_write_profile4(struct kobject *kobj, 426static ssize_t kone_sysfs_write_profile4(struct file *fp, struct kobject *kobj,
426 struct bin_attribute *attr, char *buf, 427 struct bin_attribute *attr, char *buf,
427 loff_t off, size_t count) { 428 loff_t off, size_t count) {
428 return kone_sysfs_write_profilex(kobj, attr, buf, off, count, 4); 429 return kone_sysfs_write_profilex(kobj, attr, buf, off, count, 4);
429} 430}
430 431
431static ssize_t kone_sysfs_write_profile5(struct kobject *kobj, 432static ssize_t kone_sysfs_write_profile5(struct file *fp, struct kobject *kobj,
432 struct bin_attribute *attr, char *buf, 433 struct bin_attribute *attr, char *buf,
433 loff_t off, size_t count) { 434 loff_t off, size_t count) {
434 return kone_sysfs_write_profilex(kobj, attr, buf, off, count, 5); 435 return kone_sysfs_write_profilex(kobj, attr, buf, off, count, 5);
@@ -849,6 +850,16 @@ static int kone_init_specials(struct hid_device *hdev)
849 "couldn't init struct kone_device\n"); 850 "couldn't init struct kone_device\n");
850 goto exit_free; 851 goto exit_free;
851 } 852 }
853
854 retval = roccat_connect(hdev);
855 if (retval < 0) {
856 dev_err(&hdev->dev, "couldn't init char dev\n");
857 /* be tolerant about not getting chrdev */
858 } else {
859 kone->roccat_claimed = 1;
860 kone->chrdev_minor = retval;
861 }
862
852 retval = kone_create_sysfs_attributes(intf); 863 retval = kone_create_sysfs_attributes(intf);
853 if (retval) { 864 if (retval) {
854 dev_err(&hdev->dev, "cannot create sysfs files\n"); 865 dev_err(&hdev->dev, "cannot create sysfs files\n");
@@ -868,10 +879,14 @@ exit_free:
868static void kone_remove_specials(struct hid_device *hdev) 879static void kone_remove_specials(struct hid_device *hdev)
869{ 880{
870 struct usb_interface *intf = to_usb_interface(hdev->dev.parent); 881 struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
882 struct kone_device *kone;
871 883
872 if (intf->cur_altsetting->desc.bInterfaceProtocol 884 if (intf->cur_altsetting->desc.bInterfaceProtocol
873 == USB_INTERFACE_PROTOCOL_MOUSE) { 885 == USB_INTERFACE_PROTOCOL_MOUSE) {
874 kone_remove_sysfs_attributes(intf); 886 kone_remove_sysfs_attributes(intf);
887 kone = hid_get_drvdata(hdev);
888 if (kone->roccat_claimed)
889 roccat_disconnect(kone->chrdev_minor);
875 kfree(hid_get_drvdata(hdev)); 890 kfree(hid_get_drvdata(hdev));
876 } 891 }
877} 892}
@@ -930,6 +945,37 @@ static void kone_keep_values_up_to_date(struct kone_device *kone,
930 } 945 }
931} 946}
932 947
948static void kone_report_to_chrdev(struct kone_device const *kone,
949 struct kone_mouse_event const *event)
950{
951 struct kone_roccat_report roccat_report;
952
953 switch (event->event) {
954 case kone_mouse_event_switch_profile:
955 case kone_mouse_event_switch_dpi:
956 case kone_mouse_event_osd_profile:
957 case kone_mouse_event_osd_dpi:
958 roccat_report.event = event->event;
959 roccat_report.value = event->value;
960 roccat_report.key = 0;
961 roccat_report_event(kone->chrdev_minor,
962 (uint8_t *)&roccat_report,
963 sizeof(struct kone_roccat_report));
964 break;
965 case kone_mouse_event_call_overlong_macro:
966 if (event->value == kone_keystroke_action_press) {
967 roccat_report.event = kone_mouse_event_call_overlong_macro;
968 roccat_report.value = kone->actual_profile;
969 roccat_report.key = event->macro_key;
970 roccat_report_event(kone->chrdev_minor,
971 (uint8_t *)&roccat_report,
972 sizeof(struct kone_roccat_report));
973 }
974 break;
975 }
976
977}
978
933/* 979/*
934 * Is called for keyboard- and mousepart. 980 * Is called for keyboard- and mousepart.
935 * Only mousepart gets informations about special events in its extended event 981 * Only mousepart gets informations about special events in its extended event
@@ -958,6 +1004,9 @@ static int kone_raw_event(struct hid_device *hdev, struct hid_report *report,
958 1004
959 kone_keep_values_up_to_date(kone, event); 1005 kone_keep_values_up_to_date(kone, event);
960 1006
1007 if (kone->roccat_claimed)
1008 kone_report_to_chrdev(kone, event);
1009
961 return 0; /* always do further processing */ 1010 return 0; /* always do further processing */
962} 1011}
963 1012
diff --git a/drivers/hid/hid-roccat-kone.h b/drivers/hid/hid-roccat-kone.h
index b413b10a7f8a..003e6f81c195 100644
--- a/drivers/hid/hid-roccat-kone.h
+++ b/drivers/hid/hid-roccat-kone.h
@@ -189,6 +189,12 @@ enum kone_commands {
189 kone_command_firmware = 0xe5a 189 kone_command_firmware = 0xe5a
190}; 190};
191 191
192struct kone_roccat_report {
193 uint8_t event;
194 uint8_t value; /* holds dpi or profile value */
195 uint8_t key; /* macro key on overlong macro execution */
196};
197
192#pragma pack(pop) 198#pragma pack(pop)
193 199
194struct kone_device { 200struct kone_device {
@@ -219,6 +225,9 @@ struct kone_device {
219 * so it's read only once 225 * so it's read only once
220 */ 226 */
221 int firmware_version; 227 int firmware_version;
228
229 int roccat_claimed;
230 int chrdev_minor;
222}; 231};
223 232
224#endif 233#endif
diff --git a/drivers/hid/hid-roccat.c b/drivers/hid/hid-roccat.c
new file mode 100644
index 000000000000..e05d48edb66f
--- /dev/null
+++ b/drivers/hid/hid-roccat.c
@@ -0,0 +1,428 @@
1/*
2 * Roccat driver for Linux
3 *
4 * Copyright (c) 2010 Stefan Achatz <erazor_de@users.sourceforge.net>
5 */
6
7/*
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the Free
10 * Software Foundation; either version 2 of the License, or (at your option)
11 * any later version.
12 */
13
14/*
15 * Module roccat is a char device used to report special events of roccat
16 * hardware to userland. These events include requests for on-screen-display of
17 * profile or dpi settings or requests for execution of macro sequences that are
18 * not stored in device. The information in these events depends on hid device
19 * implementation and contains data that is not available in a single hid event
20 * or else hidraw could have been used.
21 * It is inspired by hidraw, but uses only one circular buffer for all readers.
22 */
23
24#include <linux/cdev.h>
25#include <linux/poll.h>
26#include <linux/sched.h>
27
28#include "hid-roccat.h"
29
30#define ROCCAT_FIRST_MINOR 0
31#define ROCCAT_MAX_DEVICES 8
32
33/* should be a power of 2 for performance reason */
34#define ROCCAT_CBUF_SIZE 16
35
36struct roccat_report {
37 uint8_t *value;
38 int len;
39};
40
41struct roccat_device {
42 unsigned int minor;
43 int open;
44 int exist;
45 wait_queue_head_t wait;
46 struct device *dev;
47 struct hid_device *hid;
48 struct list_head readers;
49 /* protects modifications of readers list */
50 struct mutex readers_lock;
51
52 /*
53 * circular_buffer has one writer and multiple readers with their own
54 * read pointers
55 */
56 struct roccat_report cbuf[ROCCAT_CBUF_SIZE];
57 int cbuf_end;
58 struct mutex cbuf_lock;
59};
60
61struct roccat_reader {
62 struct list_head node;
63 struct roccat_device *device;
64 int cbuf_start;
65};
66
67static int roccat_major;
68static struct class *roccat_class;
69static struct cdev roccat_cdev;
70
71static struct roccat_device *devices[ROCCAT_MAX_DEVICES];
72/* protects modifications of devices array */
73static DEFINE_MUTEX(devices_lock);
74
75static ssize_t roccat_read(struct file *file, char __user *buffer,
76 size_t count, loff_t *ppos)
77{
78 struct roccat_reader *reader = file->private_data;
79 struct roccat_device *device = reader->device;
80 struct roccat_report *report;
81 ssize_t retval = 0, len;
82 DECLARE_WAITQUEUE(wait, current);
83
84 mutex_lock(&device->cbuf_lock);
85
86 /* no data? */
87 if (reader->cbuf_start == device->cbuf_end) {
88 add_wait_queue(&device->wait, &wait);
89 set_current_state(TASK_INTERRUPTIBLE);
90
91 /* wait for data */
92 while (reader->cbuf_start == device->cbuf_end) {
93 if (file->f_flags & O_NONBLOCK) {
94 retval = -EAGAIN;
95 break;
96 }
97 if (signal_pending(current)) {
98 retval = -ERESTARTSYS;
99 break;
100 }
101 if (!device->exist) {
102 retval = -EIO;
103 break;
104 }
105
106 mutex_unlock(&device->cbuf_lock);
107 schedule();
108 mutex_lock(&device->cbuf_lock);
109 set_current_state(TASK_INTERRUPTIBLE);
110 }
111
112 set_current_state(TASK_RUNNING);
113 remove_wait_queue(&device->wait, &wait);
114 }
115
116 /* here we either have data or a reason to return if retval is set */
117 if (retval)
118 goto exit_unlock;
119
120 report = &device->cbuf[reader->cbuf_start];
121 /*
122 * If report is larger than requested amount of data, rest of report
123 * is lost!
124 */
125 len = report->len > count ? count : report->len;
126
127 if (copy_to_user(buffer, report->value, len)) {
128 retval = -EFAULT;
129 goto exit_unlock;
130 }
131 retval += len;
132 reader->cbuf_start = (reader->cbuf_start + 1) % ROCCAT_CBUF_SIZE;
133
134exit_unlock:
135 mutex_unlock(&device->cbuf_lock);
136 return retval;
137}
138
139static unsigned int roccat_poll(struct file *file, poll_table *wait)
140{
141 struct roccat_reader *reader = file->private_data;
142 poll_wait(file, &reader->device->wait, wait);
143 if (reader->cbuf_start != reader->device->cbuf_end)
144 return POLLIN | POLLRDNORM;
145 if (!reader->device->exist)
146 return POLLERR | POLLHUP;
147 return 0;
148}
149
150static int roccat_open(struct inode *inode, struct file *file)
151{
152 unsigned int minor = iminor(inode);
153 struct roccat_reader *reader;
154 struct roccat_device *device;
155 int error = 0;
156
157 reader = kzalloc(sizeof(struct roccat_reader), GFP_KERNEL);
158 if (!reader)
159 return -ENOMEM;
160
161 mutex_lock(&devices_lock);
162
163 device = devices[minor];
164
165 mutex_lock(&device->readers_lock);
166
167 if (!device) {
168 printk(KERN_EMERG "roccat device with minor %d doesn't exist\n",
169 minor);
170 error = -ENODEV;
171 goto exit_unlock;
172 }
173
174 if (!device->open++) {
175 /* power on device on adding first reader */
176 if (device->hid->ll_driver->power) {
177 error = device->hid->ll_driver->power(device->hid,
178 PM_HINT_FULLON);
179 if (error < 0) {
180 --device->open;
181 goto exit_unlock;
182 }
183 }
184 error = device->hid->ll_driver->open(device->hid);
185 if (error < 0) {
186 if (device->hid->ll_driver->power)
187 device->hid->ll_driver->power(device->hid,
188 PM_HINT_NORMAL);
189 --device->open;
190 goto exit_unlock;
191 }
192 }
193
194 reader->device = device;
195 /* new reader doesn't get old events */
196 reader->cbuf_start = device->cbuf_end;
197
198 list_add_tail(&reader->node, &device->readers);
199 file->private_data = reader;
200
201exit_unlock:
202 mutex_unlock(&device->readers_lock);
203 mutex_unlock(&devices_lock);
204 return error;
205}
206
207static int roccat_release(struct inode *inode, struct file *file)
208{
209 unsigned int minor = iminor(inode);
210 struct roccat_reader *reader = file->private_data;
211 struct roccat_device *device;
212
213 mutex_lock(&devices_lock);
214
215 device = devices[minor];
216 if (!device) {
217 mutex_unlock(&devices_lock);
218 printk(KERN_EMERG "roccat device with minor %d doesn't exist\n",
219 minor);
220 return -ENODEV;
221 }
222
223 mutex_lock(&device->readers_lock);
224 list_del(&reader->node);
225 mutex_unlock(&device->readers_lock);
226 kfree(reader);
227
228 if (!--device->open) {
229 /* removing last reader */
230 if (device->exist) {
231 if (device->hid->ll_driver->power)
232 device->hid->ll_driver->power(device->hid,
233 PM_HINT_NORMAL);
234 device->hid->ll_driver->close(device->hid);
235 } else {
236 kfree(device);
237 }
238 }
239
240 mutex_unlock(&devices_lock);
241
242 return 0;
243}
244
245/*
246 * roccat_report_event() - output data to readers
247 * @minor: minor device number returned by roccat_connect()
248 * @data: pointer to data
249 * @len: size of data
250 *
251 * Return value is zero on success, a negative error code on failure.
252 *
253 * This is called from interrupt handler.
254 */
255int roccat_report_event(int minor, u8 const *data, int len)
256{
257 struct roccat_device *device;
258 struct roccat_reader *reader;
259 struct roccat_report *report;
260 uint8_t *new_value;
261
262 new_value = kmemdup(data, len, GFP_ATOMIC);
263 if (!new_value)
264 return -ENOMEM;
265
266 device = devices[minor];
267
268 report = &device->cbuf[device->cbuf_end];
269
270 /* passing NULL is safe */
271 kfree(report->value);
272
273 report->value = new_value;
274 report->len = len;
275 device->cbuf_end = (device->cbuf_end + 1) % ROCCAT_CBUF_SIZE;
276
277 list_for_each_entry(reader, &device->readers, node) {
278 /*
279 * As we already inserted one element, the buffer can't be
280 * empty. If start and end are equal, buffer is full and we
281 * increase start, so that slow reader misses one event, but
282 * gets the newer ones in the right order.
283 */
284 if (reader->cbuf_start == device->cbuf_end)
285 reader->cbuf_start = (reader->cbuf_start + 1) % ROCCAT_CBUF_SIZE;
286 }
287
288 wake_up_interruptible(&device->wait);
289 return 0;
290}
291EXPORT_SYMBOL_GPL(roccat_report_event);
292
293/*
294 * roccat_connect() - create a char device for special event output
295 * @hid: the hid device the char device should be connected to.
296 *
297 * Return value is minor device number in Range [0, ROCCAT_MAX_DEVICES] on
298 * success, a negative error code on failure.
299 */
300int roccat_connect(struct hid_device *hid)
301{
302 unsigned int minor;
303 struct roccat_device *device;
304 int temp;
305
306 device = kzalloc(sizeof(struct roccat_device), GFP_KERNEL);
307 if (!device)
308 return -ENOMEM;
309
310 mutex_lock(&devices_lock);
311
312 for (minor = 0; minor < ROCCAT_MAX_DEVICES; ++minor) {
313 if (devices[minor])
314 continue;
315 break;
316 }
317
318 if (minor < ROCCAT_MAX_DEVICES) {
319 devices[minor] = device;
320 } else {
321 mutex_unlock(&devices_lock);
322 kfree(device);
323 return -EINVAL;
324 }
325
326 device->dev = device_create(roccat_class, &hid->dev,
327 MKDEV(roccat_major, minor), NULL,
328 "%s%s%d", "roccat", hid->driver->name, minor);
329
330 if (IS_ERR(device->dev)) {
331 devices[minor] = NULL;
332 mutex_unlock(&devices_lock);
333 temp = PTR_ERR(device->dev);
334 kfree(device);
335 return temp;
336 }
337
338 mutex_unlock(&devices_lock);
339
340 init_waitqueue_head(&device->wait);
341 INIT_LIST_HEAD(&device->readers);
342 mutex_init(&device->readers_lock);
343 mutex_init(&device->cbuf_lock);
344 device->minor = minor;
345 device->hid = hid;
346 device->exist = 1;
347 device->cbuf_end = 0;
348
349 return minor;
350}
351EXPORT_SYMBOL_GPL(roccat_connect);
352
353/* roccat_disconnect() - remove char device from hid device
354 * @minor: the minor device number returned by roccat_connect()
355 */
356void roccat_disconnect(int minor)
357{
358 struct roccat_device *device;
359
360 mutex_lock(&devices_lock);
361 device = devices[minor];
362 devices[minor] = NULL;
363 mutex_unlock(&devices_lock);
364
365 device->exist = 0; /* TODO exist maybe not needed */
366
367 device_destroy(roccat_class, MKDEV(roccat_major, minor));
368
369 if (device->open) {
370 device->hid->ll_driver->close(device->hid);
371 wake_up_interruptible(&device->wait);
372 } else {
373 kfree(device);
374 }
375}
376EXPORT_SYMBOL_GPL(roccat_disconnect);
377
378static const struct file_operations roccat_ops = {
379 .owner = THIS_MODULE,
380 .read = roccat_read,
381 .poll = roccat_poll,
382 .open = roccat_open,
383 .release = roccat_release,
384};
385
386static int __init roccat_init(void)
387{
388 int retval;
389 dev_t dev_id;
390
391 retval = alloc_chrdev_region(&dev_id, ROCCAT_FIRST_MINOR,
392 ROCCAT_MAX_DEVICES, "roccat");
393
394 roccat_major = MAJOR(dev_id);
395
396 if (retval < 0) {
397 printk(KERN_WARNING "roccat: can't get major number\n");
398 return retval;
399 }
400
401 roccat_class = class_create(THIS_MODULE, "roccat");
402 if (IS_ERR(roccat_class)) {
403 retval = PTR_ERR(roccat_class);
404 unregister_chrdev_region(dev_id, ROCCAT_MAX_DEVICES);
405 return retval;
406 }
407
408 cdev_init(&roccat_cdev, &roccat_ops);
409 cdev_add(&roccat_cdev, dev_id, ROCCAT_MAX_DEVICES);
410
411 return 0;
412}
413
414static void __exit roccat_exit(void)
415{
416 dev_t dev_id = MKDEV(roccat_major, 0);
417
418 cdev_del(&roccat_cdev);
419 class_destroy(roccat_class);
420 unregister_chrdev_region(dev_id, ROCCAT_MAX_DEVICES);
421}
422
423module_init(roccat_init);
424module_exit(roccat_exit);
425
426MODULE_AUTHOR("Stefan Achatz");
427MODULE_DESCRIPTION("USB Roccat char device");
428MODULE_LICENSE("GPL v2");
diff --git a/drivers/hid/hid-roccat.h b/drivers/hid/hid-roccat.h
new file mode 100644
index 000000000000..d8aae0c1fa7e
--- /dev/null
+++ b/drivers/hid/hid-roccat.h
@@ -0,0 +1,31 @@
1#ifndef __HID_ROCCAT_H
2#define __HID_ROCCAT_H
3
4/*
5 * Copyright (c) 2010 Stefan Achatz <erazor_de@users.sourceforge.net>
6 */
7
8/*
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License as published by the Free
11 * Software Foundation; either version 2 of the License, or (at your option)
12 * any later version.
13 */
14
15#include <linux/hid.h>
16#include <linux/types.h>
17
18#if defined(CONFIG_HID_ROCCAT) || defined (CONFIG_HID_ROCCAT_MODULE)
19int roccat_connect(struct hid_device *hid);
20void roccat_disconnect(int minor);
21int roccat_report_event(int minor, u8 const *data, int len);
22#else
23static inline int roccat_connect(struct hid_device *hid) { return -1; }
24static inline void roccat_disconnect(int minor) {}
25static inline int roccat_report_event(int minor, u8 const *data, int len)
26{
27 return 0;
28}
29#endif
30
31#endif
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index 6a9ac754ca5d..e19cf8eb6ccf 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -447,13 +447,14 @@ config SENSORS_IT87
447 will be called it87. 447 will be called it87.
448 448
449config SENSORS_LM63 449config SENSORS_LM63
450 tristate "National Semiconductor LM63" 450 tristate "National Semiconductor LM63 and LM64"
451 depends on I2C 451 depends on I2C
452 help 452 help
453 If you say yes here you get support for the National Semiconductor 453 If you say yes here you get support for the National
454 LM63 remote diode digital temperature sensor with integrated fan 454 Semiconductor LM63 and LM64 remote diode digital temperature
455 control. Such chips are found on the Tyan S4882 (Thunder K8QS Pro) 455 sensors with integrated fan control. Such chips are found
456 motherboard, among others. 456 on the Tyan S4882 (Thunder K8QS Pro) motherboard, among
457 others.
457 458
458 This driver can also be built as a module. If so, the module 459 This driver can also be built as a module. If so, the module
459 will be called lm63. 460 will be called lm63.
@@ -492,7 +493,8 @@ config SENSORS_LM75
492 - NXP's LM75A 493 - NXP's LM75A
493 - ST Microelectronics STDS75 494 - ST Microelectronics STDS75
494 - TelCom (now Microchip) TCN75 495 - TelCom (now Microchip) TCN75
495 - Texas Instruments TMP100, TMP101, TMP75, TMP175, TMP275 496 - Texas Instruments TMP100, TMP101, TMP105, TMP75, TMP175,
497 TMP275
496 498
497 This driver supports driver model based binding through board 499 This driver supports driver model based binding through board
498 specific I2C device tables. 500 specific I2C device tables.
@@ -749,6 +751,16 @@ config SENSORS_DME1737
749 This driver can also be built as a module. If so, the module 751 This driver can also be built as a module. If so, the module
750 will be called dme1737. 752 will be called dme1737.
751 753
754config SENSORS_EMC1403
755 tristate "SMSC EMC1403 thermal sensor"
756 depends on I2C
757 help
758 If you say yes here you get support for the SMSC EMC1403
759 temperature monitoring chip.
760
761 Threshold values can be configured using sysfs.
762 Data from the different diodes are accessible via sysfs.
763
752config SENSORS_SMSC47M1 764config SENSORS_SMSC47M1
753 tristate "SMSC LPC47M10x and compatibles" 765 tristate "SMSC LPC47M10x and compatibles"
754 help 766 help
@@ -831,6 +843,16 @@ config SENSORS_THMC50
831 This driver can also be built as a module. If so, the module 843 This driver can also be built as a module. If so, the module
832 will be called thmc50. 844 will be called thmc50.
833 845
846config SENSORS_TMP102
847 tristate "Texas Instruments TMP102"
848 depends on I2C && EXPERIMENTAL
849 help
850 If you say yes here you get support for Texas Instruments TMP102
851 sensor chips.
852
853 This driver can also be built as a module. If so, the module
854 will be called tmp102.
855
834config SENSORS_TMP401 856config SENSORS_TMP401
835 tristate "Texas Instruments TMP401 and compatibles" 857 tristate "Texas Instruments TMP401 and compatibles"
836 depends on I2C && EXPERIMENTAL 858 depends on I2C && EXPERIMENTAL
diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile
index 86920fb34118..2138ceb1a713 100644
--- a/drivers/hwmon/Makefile
+++ b/drivers/hwmon/Makefile
@@ -41,6 +41,7 @@ obj-$(CONFIG_SENSORS_ATXP1) += atxp1.o
41obj-$(CONFIG_SENSORS_CORETEMP) += coretemp.o 41obj-$(CONFIG_SENSORS_CORETEMP) += coretemp.o
42obj-$(CONFIG_SENSORS_DME1737) += dme1737.o 42obj-$(CONFIG_SENSORS_DME1737) += dme1737.o
43obj-$(CONFIG_SENSORS_DS1621) += ds1621.o 43obj-$(CONFIG_SENSORS_DS1621) += ds1621.o
44obj-$(CONFIG_SENSORS_EMC1403) += emc1403.o
44obj-$(CONFIG_SENSORS_F71805F) += f71805f.o 45obj-$(CONFIG_SENSORS_F71805F) += f71805f.o
45obj-$(CONFIG_SENSORS_F71882FG) += f71882fg.o 46obj-$(CONFIG_SENSORS_F71882FG) += f71882fg.o
46obj-$(CONFIG_SENSORS_F75375S) += f75375s.o 47obj-$(CONFIG_SENSORS_F75375S) += f75375s.o
@@ -90,6 +91,7 @@ obj-$(CONFIG_SENSORS_SMSC47M1) += smsc47m1.o
90obj-$(CONFIG_SENSORS_SMSC47M192)+= smsc47m192.o 91obj-$(CONFIG_SENSORS_SMSC47M192)+= smsc47m192.o
91obj-$(CONFIG_SENSORS_AMC6821) += amc6821.o 92obj-$(CONFIG_SENSORS_AMC6821) += amc6821.o
92obj-$(CONFIG_SENSORS_THMC50) += thmc50.o 93obj-$(CONFIG_SENSORS_THMC50) += thmc50.o
94obj-$(CONFIG_SENSORS_TMP102) += tmp102.o
93obj-$(CONFIG_SENSORS_TMP401) += tmp401.o 95obj-$(CONFIG_SENSORS_TMP401) += tmp401.o
94obj-$(CONFIG_SENSORS_TMP421) += tmp421.o 96obj-$(CONFIG_SENSORS_TMP421) += tmp421.o
95obj-$(CONFIG_SENSORS_VIA_CPUTEMP)+= via-cputemp.o 97obj-$(CONFIG_SENSORS_VIA_CPUTEMP)+= via-cputemp.o
diff --git a/drivers/hwmon/adm1031.c b/drivers/hwmon/adm1031.c
index 1644b92e7cc4..15c1a9616af3 100644
--- a/drivers/hwmon/adm1031.c
+++ b/drivers/hwmon/adm1031.c
@@ -36,6 +36,7 @@
36#define ADM1031_REG_FAN_DIV(nr) (0x20 + (nr)) 36#define ADM1031_REG_FAN_DIV(nr) (0x20 + (nr))
37#define ADM1031_REG_PWM (0x22) 37#define ADM1031_REG_PWM (0x22)
38#define ADM1031_REG_FAN_MIN(nr) (0x10 + (nr)) 38#define ADM1031_REG_FAN_MIN(nr) (0x10 + (nr))
39#define ADM1031_REG_FAN_FILTER (0x23)
39 40
40#define ADM1031_REG_TEMP_OFFSET(nr) (0x0d + (nr)) 41#define ADM1031_REG_TEMP_OFFSET(nr) (0x0d + (nr))
41#define ADM1031_REG_TEMP_MAX(nr) (0x14 + 4 * (nr)) 42#define ADM1031_REG_TEMP_MAX(nr) (0x14 + 4 * (nr))
@@ -61,6 +62,9 @@
61#define ADM1031_CONF2_TACH2_ENABLE 0x08 62#define ADM1031_CONF2_TACH2_ENABLE 0x08
62#define ADM1031_CONF2_TEMP_ENABLE(chan) (0x10 << (chan)) 63#define ADM1031_CONF2_TEMP_ENABLE(chan) (0x10 << (chan))
63 64
65#define ADM1031_UPDATE_RATE_MASK 0x1c
66#define ADM1031_UPDATE_RATE_SHIFT 2
67
64/* Addresses to scan */ 68/* Addresses to scan */
65static const unsigned short normal_i2c[] = { 0x2c, 0x2d, 0x2e, I2C_CLIENT_END }; 69static const unsigned short normal_i2c[] = { 0x2c, 0x2d, 0x2e, I2C_CLIENT_END };
66 70
@@ -75,6 +79,7 @@ struct adm1031_data {
75 int chip_type; 79 int chip_type;
76 char valid; /* !=0 if following fields are valid */ 80 char valid; /* !=0 if following fields are valid */
77 unsigned long last_updated; /* In jiffies */ 81 unsigned long last_updated; /* In jiffies */
82 unsigned int update_rate; /* In milliseconds */
78 /* The chan_select_table contains the possible configurations for 83 /* The chan_select_table contains the possible configurations for
79 * auto fan control. 84 * auto fan control.
80 */ 85 */
@@ -738,6 +743,57 @@ static SENSOR_DEVICE_ATTR(temp3_crit_alarm, S_IRUGO, show_alarm, NULL, 12);
738static SENSOR_DEVICE_ATTR(temp3_fault, S_IRUGO, show_alarm, NULL, 13); 743static SENSOR_DEVICE_ATTR(temp3_fault, S_IRUGO, show_alarm, NULL, 13);
739static SENSOR_DEVICE_ATTR(temp1_crit_alarm, S_IRUGO, show_alarm, NULL, 14); 744static SENSOR_DEVICE_ATTR(temp1_crit_alarm, S_IRUGO, show_alarm, NULL, 14);
740 745
746/* Update Rate */
747static const unsigned int update_rates[] = {
748 16000, 8000, 4000, 2000, 1000, 500, 250, 125,
749};
750
751static ssize_t show_update_rate(struct device *dev,
752 struct device_attribute *attr, char *buf)
753{
754 struct i2c_client *client = to_i2c_client(dev);
755 struct adm1031_data *data = i2c_get_clientdata(client);
756
757 return sprintf(buf, "%u\n", data->update_rate);
758}
759
760static ssize_t set_update_rate(struct device *dev,
761 struct device_attribute *attr,
762 const char *buf, size_t count)
763{
764 struct i2c_client *client = to_i2c_client(dev);
765 struct adm1031_data *data = i2c_get_clientdata(client);
766 unsigned long val;
767 int i, err;
768 u8 reg;
769
770 err = strict_strtoul(buf, 10, &val);
771 if (err)
772 return err;
773
774 /* find the nearest update rate from the table */
775 for (i = 0; i < ARRAY_SIZE(update_rates) - 1; i++) {
776 if (val >= update_rates[i])
777 break;
778 }
779 /* if not found, we point to the last entry (lowest update rate) */
780
781 /* set the new update rate while preserving other settings */
782 reg = adm1031_read_value(client, ADM1031_REG_FAN_FILTER);
783 reg &= ~ADM1031_UPDATE_RATE_MASK;
784 reg |= i << ADM1031_UPDATE_RATE_SHIFT;
785 adm1031_write_value(client, ADM1031_REG_FAN_FILTER, reg);
786
787 mutex_lock(&data->update_lock);
788 data->update_rate = update_rates[i];
789 mutex_unlock(&data->update_lock);
790
791 return count;
792}
793
794static DEVICE_ATTR(update_rate, S_IRUGO | S_IWUSR, show_update_rate,
795 set_update_rate);
796
741static struct attribute *adm1031_attributes[] = { 797static struct attribute *adm1031_attributes[] = {
742 &sensor_dev_attr_fan1_input.dev_attr.attr, 798 &sensor_dev_attr_fan1_input.dev_attr.attr,
743 &sensor_dev_attr_fan1_div.dev_attr.attr, 799 &sensor_dev_attr_fan1_div.dev_attr.attr,
@@ -774,6 +830,7 @@ static struct attribute *adm1031_attributes[] = {
774 830
775 &sensor_dev_attr_auto_fan1_min_pwm.dev_attr.attr, 831 &sensor_dev_attr_auto_fan1_min_pwm.dev_attr.attr,
776 832
833 &dev_attr_update_rate.attr,
777 &dev_attr_alarms.attr, 834 &dev_attr_alarms.attr,
778 835
779 NULL 836 NULL
@@ -900,6 +957,7 @@ static void adm1031_init_client(struct i2c_client *client)
900{ 957{
901 unsigned int read_val; 958 unsigned int read_val;
902 unsigned int mask; 959 unsigned int mask;
960 int i;
903 struct adm1031_data *data = i2c_get_clientdata(client); 961 struct adm1031_data *data = i2c_get_clientdata(client);
904 962
905 mask = (ADM1031_CONF2_PWM1_ENABLE | ADM1031_CONF2_TACH1_ENABLE); 963 mask = (ADM1031_CONF2_PWM1_ENABLE | ADM1031_CONF2_TACH1_ENABLE);
@@ -919,18 +977,24 @@ static void adm1031_init_client(struct i2c_client *client)
919 ADM1031_CONF1_MONITOR_ENABLE); 977 ADM1031_CONF1_MONITOR_ENABLE);
920 } 978 }
921 979
980 /* Read the chip's update rate */
981 mask = ADM1031_UPDATE_RATE_MASK;
982 read_val = adm1031_read_value(client, ADM1031_REG_FAN_FILTER);
983 i = (read_val & mask) >> ADM1031_UPDATE_RATE_SHIFT;
984 data->update_rate = update_rates[i];
922} 985}
923 986
924static struct adm1031_data *adm1031_update_device(struct device *dev) 987static struct adm1031_data *adm1031_update_device(struct device *dev)
925{ 988{
926 struct i2c_client *client = to_i2c_client(dev); 989 struct i2c_client *client = to_i2c_client(dev);
927 struct adm1031_data *data = i2c_get_clientdata(client); 990 struct adm1031_data *data = i2c_get_clientdata(client);
991 unsigned long next_update;
928 int chan; 992 int chan;
929 993
930 mutex_lock(&data->update_lock); 994 mutex_lock(&data->update_lock);
931 995
932 if (time_after(jiffies, data->last_updated + HZ + HZ / 2) 996 next_update = data->last_updated + msecs_to_jiffies(data->update_rate);
933 || !data->valid) { 997 if (time_after(jiffies, next_update) || !data->valid) {
934 998
935 dev_dbg(&client->dev, "Starting adm1031 update\n"); 999 dev_dbg(&client->dev, "Starting adm1031 update\n");
936 for (chan = 0; 1000 for (chan = 0;
diff --git a/drivers/hwmon/applesmc.c b/drivers/hwmon/applesmc.c
index f085c18d2905..b6598aa557a0 100644
--- a/drivers/hwmon/applesmc.c
+++ b/drivers/hwmon/applesmc.c
@@ -148,6 +148,20 @@ static const char *temperature_sensors_sets[][41] = {
148/* Set 18: MacBook Pro 2,2 */ 148/* Set 18: MacBook Pro 2,2 */
149 { "TB0T", "TC0D", "TC0P", "TG0H", "TG0P", "TG0T", "TM0P", "TTF0", 149 { "TB0T", "TC0D", "TC0P", "TG0H", "TG0P", "TG0T", "TM0P", "TTF0",
150 "Th0H", "Th1H", "Tm0P", "Ts0P", NULL }, 150 "Th0H", "Th1H", "Tm0P", "Ts0P", NULL },
151/* Set 19: Macbook Pro 5,3 */
152 { "TB0T", "TB1T", "TB2T", "TB3T", "TC0D", "TC0F", "TC0P", "TG0D",
153 "TG0F", "TG0H", "TG0P", "TG0T", "TN0D", "TN0P", "TTF0", "Th2H",
154 "Tm0P", "Ts0P", "Ts0S", NULL },
155/* Set 20: MacBook Pro 5,4 */
156 { "TB0T", "TB1T", "TB2T", "TB3T", "TC0D", "TC0F", "TC0P", "TN0D",
157 "TN0P", "TTF0", "Th2H", "Ts0P", "Ts0S", NULL },
158/* Set 21: MacBook Pro 6,2 */
159 { "TB0T", "TB1T", "TB2T", "TC0C", "TC0D", "TC0P", "TC1C", "TG0D",
160 "TG0P", "TG0T", "TMCD", "TP0P", "TPCD", "Th1H", "Th2H", "Tm0P",
161 "Ts0P", "Ts0S", NULL },
162/* Set 22: MacBook Pro 7,1 */
163 { "TB0T", "TB1T", "TB2T", "TC0D", "TC0P", "TN0D", "TN0P", "TN0S",
164 "TN1D", "TN1F", "TN1G", "TN1S", "Th1H", "Ts0P", "Ts0S", NULL },
151}; 165};
152 166
153/* List of keys used to read/write fan speeds */ 167/* List of keys used to read/write fan speeds */
@@ -646,6 +660,17 @@ out:
646 return snprintf(sysfsbuf, PAGE_SIZE, "(%d,%d)\n", left, right); 660 return snprintf(sysfsbuf, PAGE_SIZE, "(%d,%d)\n", left, right);
647} 661}
648 662
663/* Displays sensor key as label */
664static ssize_t applesmc_show_sensor_label(struct device *dev,
665 struct device_attribute *devattr, char *sysfsbuf)
666{
667 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
668 const char *key =
669 temperature_sensors_sets[applesmc_temperature_set][attr->index];
670
671 return snprintf(sysfsbuf, PAGE_SIZE, "%s\n", key);
672}
673
649/* Displays degree Celsius * 1000 */ 674/* Displays degree Celsius * 1000 */
650static ssize_t applesmc_show_temperature(struct device *dev, 675static ssize_t applesmc_show_temperature(struct device *dev,
651 struct device_attribute *devattr, char *sysfsbuf) 676 struct device_attribute *devattr, char *sysfsbuf)
@@ -1113,6 +1138,86 @@ static const struct attribute_group fan_attribute_groups[] = {
1113/* 1138/*
1114 * Temperature sensors sysfs entries. 1139 * Temperature sensors sysfs entries.
1115 */ 1140 */
1141static SENSOR_DEVICE_ATTR(temp1_label, S_IRUGO,
1142 applesmc_show_sensor_label, NULL, 0);
1143static SENSOR_DEVICE_ATTR(temp2_label, S_IRUGO,
1144 applesmc_show_sensor_label, NULL, 1);
1145static SENSOR_DEVICE_ATTR(temp3_label, S_IRUGO,
1146 applesmc_show_sensor_label, NULL, 2);
1147static SENSOR_DEVICE_ATTR(temp4_label, S_IRUGO,
1148 applesmc_show_sensor_label, NULL, 3);
1149static SENSOR_DEVICE_ATTR(temp5_label, S_IRUGO,
1150 applesmc_show_sensor_label, NULL, 4);
1151static SENSOR_DEVICE_ATTR(temp6_label, S_IRUGO,
1152 applesmc_show_sensor_label, NULL, 5);
1153static SENSOR_DEVICE_ATTR(temp7_label, S_IRUGO,
1154 applesmc_show_sensor_label, NULL, 6);
1155static SENSOR_DEVICE_ATTR(temp8_label, S_IRUGO,
1156 applesmc_show_sensor_label, NULL, 7);
1157static SENSOR_DEVICE_ATTR(temp9_label, S_IRUGO,
1158 applesmc_show_sensor_label, NULL, 8);
1159static SENSOR_DEVICE_ATTR(temp10_label, S_IRUGO,
1160 applesmc_show_sensor_label, NULL, 9);
1161static SENSOR_DEVICE_ATTR(temp11_label, S_IRUGO,
1162 applesmc_show_sensor_label, NULL, 10);
1163static SENSOR_DEVICE_ATTR(temp12_label, S_IRUGO,
1164 applesmc_show_sensor_label, NULL, 11);
1165static SENSOR_DEVICE_ATTR(temp13_label, S_IRUGO,
1166 applesmc_show_sensor_label, NULL, 12);
1167static SENSOR_DEVICE_ATTR(temp14_label, S_IRUGO,
1168 applesmc_show_sensor_label, NULL, 13);
1169static SENSOR_DEVICE_ATTR(temp15_label, S_IRUGO,
1170 applesmc_show_sensor_label, NULL, 14);
1171static SENSOR_DEVICE_ATTR(temp16_label, S_IRUGO,
1172 applesmc_show_sensor_label, NULL, 15);
1173static SENSOR_DEVICE_ATTR(temp17_label, S_IRUGO,
1174 applesmc_show_sensor_label, NULL, 16);
1175static SENSOR_DEVICE_ATTR(temp18_label, S_IRUGO,
1176 applesmc_show_sensor_label, NULL, 17);
1177static SENSOR_DEVICE_ATTR(temp19_label, S_IRUGO,
1178 applesmc_show_sensor_label, NULL, 18);
1179static SENSOR_DEVICE_ATTR(temp20_label, S_IRUGO,
1180 applesmc_show_sensor_label, NULL, 19);
1181static SENSOR_DEVICE_ATTR(temp21_label, S_IRUGO,
1182 applesmc_show_sensor_label, NULL, 20);
1183static SENSOR_DEVICE_ATTR(temp22_label, S_IRUGO,
1184 applesmc_show_sensor_label, NULL, 21);
1185static SENSOR_DEVICE_ATTR(temp23_label, S_IRUGO,
1186 applesmc_show_sensor_label, NULL, 22);
1187static SENSOR_DEVICE_ATTR(temp24_label, S_IRUGO,
1188 applesmc_show_sensor_label, NULL, 23);
1189static SENSOR_DEVICE_ATTR(temp25_label, S_IRUGO,
1190 applesmc_show_sensor_label, NULL, 24);
1191static SENSOR_DEVICE_ATTR(temp26_label, S_IRUGO,
1192 applesmc_show_sensor_label, NULL, 25);
1193static SENSOR_DEVICE_ATTR(temp27_label, S_IRUGO,
1194 applesmc_show_sensor_label, NULL, 26);
1195static SENSOR_DEVICE_ATTR(temp28_label, S_IRUGO,
1196 applesmc_show_sensor_label, NULL, 27);
1197static SENSOR_DEVICE_ATTR(temp29_label, S_IRUGO,
1198 applesmc_show_sensor_label, NULL, 28);
1199static SENSOR_DEVICE_ATTR(temp30_label, S_IRUGO,
1200 applesmc_show_sensor_label, NULL, 29);
1201static SENSOR_DEVICE_ATTR(temp31_label, S_IRUGO,
1202 applesmc_show_sensor_label, NULL, 30);
1203static SENSOR_DEVICE_ATTR(temp32_label, S_IRUGO,
1204 applesmc_show_sensor_label, NULL, 31);
1205static SENSOR_DEVICE_ATTR(temp33_label, S_IRUGO,
1206 applesmc_show_sensor_label, NULL, 32);
1207static SENSOR_DEVICE_ATTR(temp34_label, S_IRUGO,
1208 applesmc_show_sensor_label, NULL, 33);
1209static SENSOR_DEVICE_ATTR(temp35_label, S_IRUGO,
1210 applesmc_show_sensor_label, NULL, 34);
1211static SENSOR_DEVICE_ATTR(temp36_label, S_IRUGO,
1212 applesmc_show_sensor_label, NULL, 35);
1213static SENSOR_DEVICE_ATTR(temp37_label, S_IRUGO,
1214 applesmc_show_sensor_label, NULL, 36);
1215static SENSOR_DEVICE_ATTR(temp38_label, S_IRUGO,
1216 applesmc_show_sensor_label, NULL, 37);
1217static SENSOR_DEVICE_ATTR(temp39_label, S_IRUGO,
1218 applesmc_show_sensor_label, NULL, 38);
1219static SENSOR_DEVICE_ATTR(temp40_label, S_IRUGO,
1220 applesmc_show_sensor_label, NULL, 39);
1116static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, 1221static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO,
1117 applesmc_show_temperature, NULL, 0); 1222 applesmc_show_temperature, NULL, 0);
1118static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO, 1223static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO,
@@ -1194,6 +1299,50 @@ static SENSOR_DEVICE_ATTR(temp39_input, S_IRUGO,
1194static SENSOR_DEVICE_ATTR(temp40_input, S_IRUGO, 1299static SENSOR_DEVICE_ATTR(temp40_input, S_IRUGO,
1195 applesmc_show_temperature, NULL, 39); 1300 applesmc_show_temperature, NULL, 39);
1196 1301
1302static struct attribute *label_attributes[] = {
1303 &sensor_dev_attr_temp1_label.dev_attr.attr,
1304 &sensor_dev_attr_temp2_label.dev_attr.attr,
1305 &sensor_dev_attr_temp3_label.dev_attr.attr,
1306 &sensor_dev_attr_temp4_label.dev_attr.attr,
1307 &sensor_dev_attr_temp5_label.dev_attr.attr,
1308 &sensor_dev_attr_temp6_label.dev_attr.attr,
1309 &sensor_dev_attr_temp7_label.dev_attr.attr,
1310 &sensor_dev_attr_temp8_label.dev_attr.attr,
1311 &sensor_dev_attr_temp9_label.dev_attr.attr,
1312 &sensor_dev_attr_temp10_label.dev_attr.attr,
1313 &sensor_dev_attr_temp11_label.dev_attr.attr,
1314 &sensor_dev_attr_temp12_label.dev_attr.attr,
1315 &sensor_dev_attr_temp13_label.dev_attr.attr,
1316 &sensor_dev_attr_temp14_label.dev_attr.attr,
1317 &sensor_dev_attr_temp15_label.dev_attr.attr,
1318 &sensor_dev_attr_temp16_label.dev_attr.attr,
1319 &sensor_dev_attr_temp17_label.dev_attr.attr,
1320 &sensor_dev_attr_temp18_label.dev_attr.attr,
1321 &sensor_dev_attr_temp19_label.dev_attr.attr,
1322 &sensor_dev_attr_temp20_label.dev_attr.attr,
1323 &sensor_dev_attr_temp21_label.dev_attr.attr,
1324 &sensor_dev_attr_temp22_label.dev_attr.attr,
1325 &sensor_dev_attr_temp23_label.dev_attr.attr,
1326 &sensor_dev_attr_temp24_label.dev_attr.attr,
1327 &sensor_dev_attr_temp25_label.dev_attr.attr,
1328 &sensor_dev_attr_temp26_label.dev_attr.attr,
1329 &sensor_dev_attr_temp27_label.dev_attr.attr,
1330 &sensor_dev_attr_temp28_label.dev_attr.attr,
1331 &sensor_dev_attr_temp29_label.dev_attr.attr,
1332 &sensor_dev_attr_temp30_label.dev_attr.attr,
1333 &sensor_dev_attr_temp31_label.dev_attr.attr,
1334 &sensor_dev_attr_temp32_label.dev_attr.attr,
1335 &sensor_dev_attr_temp33_label.dev_attr.attr,
1336 &sensor_dev_attr_temp34_label.dev_attr.attr,
1337 &sensor_dev_attr_temp35_label.dev_attr.attr,
1338 &sensor_dev_attr_temp36_label.dev_attr.attr,
1339 &sensor_dev_attr_temp37_label.dev_attr.attr,
1340 &sensor_dev_attr_temp38_label.dev_attr.attr,
1341 &sensor_dev_attr_temp39_label.dev_attr.attr,
1342 &sensor_dev_attr_temp40_label.dev_attr.attr,
1343 NULL
1344};
1345
1197static struct attribute *temperature_attributes[] = { 1346static struct attribute *temperature_attributes[] = {
1198 &sensor_dev_attr_temp1_input.dev_attr.attr, 1347 &sensor_dev_attr_temp1_input.dev_attr.attr,
1199 &sensor_dev_attr_temp2_input.dev_attr.attr, 1348 &sensor_dev_attr_temp2_input.dev_attr.attr,
@@ -1241,6 +1390,10 @@ static struct attribute *temperature_attributes[] = {
1241static const struct attribute_group temperature_attributes_group = 1390static const struct attribute_group temperature_attributes_group =
1242 { .attrs = temperature_attributes }; 1391 { .attrs = temperature_attributes };
1243 1392
1393static const struct attribute_group label_attributes_group = {
1394 .attrs = label_attributes
1395};
1396
1244/* Module stuff */ 1397/* Module stuff */
1245 1398
1246/* 1399/*
@@ -1363,6 +1516,14 @@ static __initdata struct dmi_match_data applesmc_dmi_data[] = {
1363 { .accelerometer = 0, .light = 0, .temperature_set = 17 }, 1516 { .accelerometer = 0, .light = 0, .temperature_set = 17 },
1364/* MacBook Pro 2,2: accelerometer, backlight and temperature set 18 */ 1517/* MacBook Pro 2,2: accelerometer, backlight and temperature set 18 */
1365 { .accelerometer = 1, .light = 1, .temperature_set = 18 }, 1518 { .accelerometer = 1, .light = 1, .temperature_set = 18 },
1519/* MacBook Pro 5,3: accelerometer, backlight and temperature set 19 */
1520 { .accelerometer = 1, .light = 1, .temperature_set = 19 },
1521/* MacBook Pro 5,4: accelerometer, backlight and temperature set 20 */
1522 { .accelerometer = 1, .light = 1, .temperature_set = 20 },
1523/* MacBook Pro 6,2: accelerometer, backlight and temperature set 21 */
1524 { .accelerometer = 1, .light = 1, .temperature_set = 21 },
1525/* MacBook Pro 7,1: accelerometer, backlight and temperature set 22 */
1526 { .accelerometer = 1, .light = 1, .temperature_set = 22 },
1366}; 1527};
1367 1528
1368/* Note that DMI_MATCH(...,"MacBook") will match "MacBookPro1,1". 1529/* Note that DMI_MATCH(...,"MacBook") will match "MacBookPro1,1".
@@ -1376,6 +1537,22 @@ static __initdata struct dmi_system_id applesmc_whitelist[] = {
1376 DMI_MATCH(DMI_BOARD_VENDOR, "Apple"), 1537 DMI_MATCH(DMI_BOARD_VENDOR, "Apple"),
1377 DMI_MATCH(DMI_PRODUCT_NAME, "MacBookAir") }, 1538 DMI_MATCH(DMI_PRODUCT_NAME, "MacBookAir") },
1378 &applesmc_dmi_data[7]}, 1539 &applesmc_dmi_data[7]},
1540 { applesmc_dmi_match, "Apple MacBook Pro 7", {
1541 DMI_MATCH(DMI_BOARD_VENDOR, "Apple"),
1542 DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro7") },
1543 &applesmc_dmi_data[22]},
1544 { applesmc_dmi_match, "Apple MacBook Pro 5,4", {
1545 DMI_MATCH(DMI_BOARD_VENDOR, "Apple"),
1546 DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro5,4") },
1547 &applesmc_dmi_data[20]},
1548 { applesmc_dmi_match, "Apple MacBook Pro 5,3", {
1549 DMI_MATCH(DMI_BOARD_VENDOR, "Apple"),
1550 DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro5,3") },
1551 &applesmc_dmi_data[19]},
1552 { applesmc_dmi_match, "Apple MacBook Pro 6", {
1553 DMI_MATCH(DMI_BOARD_VENDOR, "Apple"),
1554 DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro6") },
1555 &applesmc_dmi_data[21]},
1379 { applesmc_dmi_match, "Apple MacBook Pro 5", { 1556 { applesmc_dmi_match, "Apple MacBook Pro 5", {
1380 DMI_MATCH(DMI_BOARD_VENDOR, "Apple"), 1557 DMI_MATCH(DMI_BOARD_VENDOR, "Apple"),
1381 DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro5") }, 1558 DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro5") },
@@ -1518,7 +1695,8 @@ static int __init applesmc_init(void)
1518 for (i = 0; 1695 for (i = 0;
1519 temperature_sensors_sets[applesmc_temperature_set][i] != NULL; 1696 temperature_sensors_sets[applesmc_temperature_set][i] != NULL;
1520 i++) { 1697 i++) {
1521 if (temperature_attributes[i] == NULL) { 1698 if (temperature_attributes[i] == NULL ||
1699 label_attributes[i] == NULL) {
1522 printk(KERN_ERR "applesmc: More temperature sensors " 1700 printk(KERN_ERR "applesmc: More temperature sensors "
1523 "in temperature_sensors_sets (at least %i)" 1701 "in temperature_sensors_sets (at least %i)"
1524 "than available sysfs files in " 1702 "than available sysfs files in "
@@ -1530,6 +1708,10 @@ static int __init applesmc_init(void)
1530 temperature_attributes[i]); 1708 temperature_attributes[i]);
1531 if (ret) 1709 if (ret)
1532 goto out_temperature; 1710 goto out_temperature;
1711 ret = sysfs_create_file(&pdev->dev.kobj,
1712 label_attributes[i]);
1713 if (ret)
1714 goto out_temperature;
1533 } 1715 }
1534 1716
1535 if (applesmc_accelerometer) { 1717 if (applesmc_accelerometer) {
@@ -1580,6 +1762,7 @@ out_accelerometer:
1580 if (applesmc_accelerometer) 1762 if (applesmc_accelerometer)
1581 applesmc_release_accelerometer(); 1763 applesmc_release_accelerometer();
1582out_temperature: 1764out_temperature:
1765 sysfs_remove_group(&pdev->dev.kobj, &label_attributes_group);
1583 sysfs_remove_group(&pdev->dev.kobj, &temperature_attributes_group); 1766 sysfs_remove_group(&pdev->dev.kobj, &temperature_attributes_group);
1584out_fans: 1767out_fans:
1585 while (fans_handled) 1768 while (fans_handled)
@@ -1609,6 +1792,7 @@ static void __exit applesmc_exit(void)
1609 } 1792 }
1610 if (applesmc_accelerometer) 1793 if (applesmc_accelerometer)
1611 applesmc_release_accelerometer(); 1794 applesmc_release_accelerometer();
1795 sysfs_remove_group(&pdev->dev.kobj, &label_attributes_group);
1612 sysfs_remove_group(&pdev->dev.kobj, &temperature_attributes_group); 1796 sysfs_remove_group(&pdev->dev.kobj, &temperature_attributes_group);
1613 while (fans_handled) 1797 while (fans_handled)
1614 sysfs_remove_group(&pdev->dev.kobj, 1798 sysfs_remove_group(&pdev->dev.kobj,
diff --git a/drivers/hwmon/asus_atk0110.c b/drivers/hwmon/asus_atk0110.c
index 16c420240724..653db1bda934 100644
--- a/drivers/hwmon/asus_atk0110.c
+++ b/drivers/hwmon/asus_atk0110.c
@@ -1411,6 +1411,13 @@ static int __init atk0110_init(void)
1411{ 1411{
1412 int ret; 1412 int ret;
1413 1413
1414 /* Make sure it's safe to access the device through ACPI */
1415 if (!acpi_resources_are_enforced()) {
1416 pr_err("atk: Resources not safely usable due to "
1417 "acpi_enforce_resources kernel parameter\n");
1418 return -EBUSY;
1419 }
1420
1414 ret = acpi_bus_register_driver(&atk_driver); 1421 ret = acpi_bus_register_driver(&atk_driver);
1415 if (ret) 1422 if (ret)
1416 pr_info("atk: acpi_bus_register_driver failed: %d\n", ret); 1423 pr_info("atk: acpi_bus_register_driver failed: %d\n", ret);
diff --git a/drivers/hwmon/dme1737.c b/drivers/hwmon/dme1737.c
index 823dd28a902c..980c17d5eeae 100644
--- a/drivers/hwmon/dme1737.c
+++ b/drivers/hwmon/dme1737.c
@@ -1,12 +1,14 @@
1/* 1/*
2 * dme1737.c - Driver for the SMSC DME1737, Asus A8000, SMSC SCH311x and 2 * dme1737.c - Driver for the SMSC DME1737, Asus A8000, SMSC SCH311x, SCH5027,
3 * SCH5027 Super-I/O chips integrated hardware monitoring features. 3 * and SCH5127 Super-I/O chips integrated hardware monitoring
4 * Copyright (c) 2007, 2008 Juerg Haefliger <juergh@gmail.com> 4 * features.
5 * Copyright (c) 2007, 2008, 2009, 2010 Juerg Haefliger <juergh@gmail.com>
5 * 6 *
6 * This driver is an I2C/ISA hybrid, meaning that it uses the I2C bus to access 7 * This driver is an I2C/ISA hybrid, meaning that it uses the I2C bus to access
7 * the chip registers if a DME1737, A8000, or SCH5027 is found and the ISA bus 8 * the chip registers if a DME1737, A8000, or SCH5027 is found and the ISA bus
8 * if a SCH311x chip is found. Both types of chips have very similar hardware 9 * if a SCH311x or SCH5127 chip is found. Both types of chips have very
9 * monitoring capabilities but differ in the way they can be accessed. 10 * similar hardware monitoring capabilities but differ in the way they can be
11 * accessed.
10 * 12 *
11 * This program is free software; you can redistribute it and/or modify 13 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by 14 * it under the terms of the GNU General Public License as published by
@@ -57,7 +59,7 @@ MODULE_PARM_DESC(probe_all_addr, "Include probing of non-standard LPC "
57/* Addresses to scan */ 59/* Addresses to scan */
58static const unsigned short normal_i2c[] = {0x2c, 0x2d, 0x2e, I2C_CLIENT_END}; 60static const unsigned short normal_i2c[] = {0x2c, 0x2d, 0x2e, I2C_CLIENT_END};
59 61
60enum chips { dme1737, sch5027, sch311x }; 62enum chips { dme1737, sch5027, sch311x, sch5127 };
61 63
62/* --------------------------------------------------------------------- 64/* ---------------------------------------------------------------------
63 * Registers 65 * Registers
@@ -164,10 +166,29 @@ static const u8 DME1737_BIT_ALARM_FAN[] = {10, 11, 12, 13, 22, 23};
164#define DME1737_VERSTEP_MASK 0xf8 166#define DME1737_VERSTEP_MASK 0xf8
165#define SCH311X_DEVICE 0x8c 167#define SCH311X_DEVICE 0x8c
166#define SCH5027_VERSTEP 0x69 168#define SCH5027_VERSTEP 0x69
169#define SCH5127_DEVICE 0x8e
170
171/* Device ID values (global configuration register index 0x20) */
172#define DME1737_ID_1 0x77
173#define DME1737_ID_2 0x78
174#define SCH3112_ID 0x7c
175#define SCH3114_ID 0x7d
176#define SCH3116_ID 0x7f
177#define SCH5027_ID 0x89
178#define SCH5127_ID 0x86
167 179
168/* Length of ISA address segment */ 180/* Length of ISA address segment */
169#define DME1737_EXTENT 2 181#define DME1737_EXTENT 2
170 182
183/* chip-dependent features */
184#define HAS_TEMP_OFFSET (1 << 0) /* bit 0 */
185#define HAS_VID (1 << 1) /* bit 1 */
186#define HAS_ZONE3 (1 << 2) /* bit 2 */
187#define HAS_ZONE_HYST (1 << 3) /* bit 3 */
188#define HAS_PWM_MIN (1 << 4) /* bit 4 */
189#define HAS_FAN(ix) (1 << ((ix) + 5)) /* bits 5-10 */
190#define HAS_PWM(ix) (1 << ((ix) + 11)) /* bits 11-16 */
191
171/* --------------------------------------------------------------------- 192/* ---------------------------------------------------------------------
172 * Data structures and manipulation thereof 193 * Data structures and manipulation thereof
173 * --------------------------------------------------------------------- */ 194 * --------------------------------------------------------------------- */
@@ -187,8 +208,7 @@ struct dme1737_data {
187 208
188 u8 vid; 209 u8 vid;
189 u8 pwm_rr_en; 210 u8 pwm_rr_en;
190 u8 has_pwm; 211 u32 has_features;
191 u8 has_fan;
192 212
193 /* Register values */ 213 /* Register values */
194 u16 in[7]; 214 u16 in[7];
@@ -224,8 +244,11 @@ static const int IN_NOMINAL_SCH311x[] = {2500, 1500, 3300, 5000, 12000, 3300,
224 3300}; 244 3300};
225static const int IN_NOMINAL_SCH5027[] = {5000, 2250, 3300, 1125, 1125, 3300, 245static const int IN_NOMINAL_SCH5027[] = {5000, 2250, 3300, 1125, 1125, 3300,
226 3300}; 246 3300};
247static const int IN_NOMINAL_SCH5127[] = {2500, 2250, 3300, 1125, 1125, 3300,
248 3300};
227#define IN_NOMINAL(type) ((type) == sch311x ? IN_NOMINAL_SCH311x : \ 249#define IN_NOMINAL(type) ((type) == sch311x ? IN_NOMINAL_SCH311x : \
228 (type) == sch5027 ? IN_NOMINAL_SCH5027 : \ 250 (type) == sch5027 ? IN_NOMINAL_SCH5027 : \
251 (type) == sch5127 ? IN_NOMINAL_SCH5127 : \
229 IN_NOMINAL_DME1737) 252 IN_NOMINAL_DME1737)
230 253
231/* Voltage input 254/* Voltage input
@@ -568,7 +591,7 @@ static struct dme1737_data *dme1737_update_device(struct device *dev)
568 591
569 /* Sample register contents every 1 sec */ 592 /* Sample register contents every 1 sec */
570 if (time_after(jiffies, data->last_update + HZ) || !data->valid) { 593 if (time_after(jiffies, data->last_update + HZ) || !data->valid) {
571 if (data->type == dme1737) { 594 if (data->has_features & HAS_VID) {
572 data->vid = dme1737_read(data, DME1737_REG_VID) & 595 data->vid = dme1737_read(data, DME1737_REG_VID) &
573 0x3f; 596 0x3f;
574 } 597 }
@@ -599,7 +622,7 @@ static struct dme1737_data *dme1737_update_device(struct device *dev)
599 DME1737_REG_TEMP_MIN(ix)); 622 DME1737_REG_TEMP_MIN(ix));
600 data->temp_max[ix] = dme1737_read(data, 623 data->temp_max[ix] = dme1737_read(data,
601 DME1737_REG_TEMP_MAX(ix)); 624 DME1737_REG_TEMP_MAX(ix));
602 if (data->type != sch5027) { 625 if (data->has_features & HAS_TEMP_OFFSET) {
603 data->temp_offset[ix] = dme1737_read(data, 626 data->temp_offset[ix] = dme1737_read(data,
604 DME1737_REG_TEMP_OFFSET(ix)); 627 DME1737_REG_TEMP_OFFSET(ix));
605 } 628 }
@@ -626,7 +649,7 @@ static struct dme1737_data *dme1737_update_device(struct device *dev)
626 for (ix = 0; ix < ARRAY_SIZE(data->fan); ix++) { 649 for (ix = 0; ix < ARRAY_SIZE(data->fan); ix++) {
627 /* Skip reading registers if optional fans are not 650 /* Skip reading registers if optional fans are not
628 * present */ 651 * present */
629 if (!(data->has_fan & (1 << ix))) { 652 if (!(data->has_features & HAS_FAN(ix))) {
630 continue; 653 continue;
631 } 654 }
632 data->fan[ix] = dme1737_read(data, 655 data->fan[ix] = dme1737_read(data,
@@ -650,7 +673,7 @@ static struct dme1737_data *dme1737_update_device(struct device *dev)
650 for (ix = 0; ix < ARRAY_SIZE(data->pwm); ix++) { 673 for (ix = 0; ix < ARRAY_SIZE(data->pwm); ix++) {
651 /* Skip reading registers if optional PWMs are not 674 /* Skip reading registers if optional PWMs are not
652 * present */ 675 * present */
653 if (!(data->has_pwm & (1 << ix))) { 676 if (!(data->has_features & HAS_PWM(ix))) {
654 continue; 677 continue;
655 } 678 }
656 data->pwm[ix] = dme1737_read(data, 679 data->pwm[ix] = dme1737_read(data,
@@ -672,12 +695,24 @@ static struct dme1737_data *dme1737_update_device(struct device *dev)
672 695
673 /* Thermal zone registers */ 696 /* Thermal zone registers */
674 for (ix = 0; ix < ARRAY_SIZE(data->zone_low); ix++) { 697 for (ix = 0; ix < ARRAY_SIZE(data->zone_low); ix++) {
675 data->zone_low[ix] = dme1737_read(data, 698 /* Skip reading registers if zone3 is not present */
676 DME1737_REG_ZONE_LOW(ix)); 699 if ((ix == 2) && !(data->has_features & HAS_ZONE3)) {
677 data->zone_abs[ix] = dme1737_read(data, 700 continue;
678 DME1737_REG_ZONE_ABS(ix)); 701 }
702 /* sch5127 zone2 registers are special */
703 if ((ix == 1) && (data->type == sch5127)) {
704 data->zone_low[1] = dme1737_read(data,
705 DME1737_REG_ZONE_LOW(2));
706 data->zone_abs[1] = dme1737_read(data,
707 DME1737_REG_ZONE_ABS(2));
708 } else {
709 data->zone_low[ix] = dme1737_read(data,
710 DME1737_REG_ZONE_LOW(ix));
711 data->zone_abs[ix] = dme1737_read(data,
712 DME1737_REG_ZONE_ABS(ix));
713 }
679 } 714 }
680 if (data->type != sch5027) { 715 if (data->has_features & HAS_ZONE_HYST) {
681 for (ix = 0; ix < ARRAY_SIZE(data->zone_hyst); ix++) { 716 for (ix = 0; ix < ARRAY_SIZE(data->zone_hyst); ix++) {
682 data->zone_hyst[ix] = dme1737_read(data, 717 data->zone_hyst[ix] = dme1737_read(data,
683 DME1737_REG_ZONE_HYST(ix)); 718 DME1737_REG_ZONE_HYST(ix));
@@ -1594,10 +1629,6 @@ static struct attribute *dme1737_attr[] ={
1594 &sensor_dev_attr_zone2_auto_point2_temp.dev_attr.attr, 1629 &sensor_dev_attr_zone2_auto_point2_temp.dev_attr.attr,
1595 &sensor_dev_attr_zone2_auto_point3_temp.dev_attr.attr, 1630 &sensor_dev_attr_zone2_auto_point3_temp.dev_attr.attr,
1596 &sensor_dev_attr_zone2_auto_channels_temp.dev_attr.attr, 1631 &sensor_dev_attr_zone2_auto_channels_temp.dev_attr.attr,
1597 &sensor_dev_attr_zone3_auto_point1_temp.dev_attr.attr,
1598 &sensor_dev_attr_zone3_auto_point2_temp.dev_attr.attr,
1599 &sensor_dev_attr_zone3_auto_point3_temp.dev_attr.attr,
1600 &sensor_dev_attr_zone3_auto_channels_temp.dev_attr.attr,
1601 NULL 1632 NULL
1602}; 1633};
1603 1634
@@ -1605,27 +1636,23 @@ static const struct attribute_group dme1737_group = {
1605 .attrs = dme1737_attr, 1636 .attrs = dme1737_attr,
1606}; 1637};
1607 1638
1608/* The following struct holds misc attributes, which are not available in all 1639/* The following struct holds temp offset attributes, which are not available
1609 * chips. Their creation depends on the chip type which is determined during 1640 * in all chips. The following chips support them:
1610 * module load. */ 1641 * DME1737, SCH311x */
1611static struct attribute *dme1737_misc_attr[] = { 1642static struct attribute *dme1737_temp_offset_attr[] = {
1612 /* Temperatures */
1613 &sensor_dev_attr_temp1_offset.dev_attr.attr, 1643 &sensor_dev_attr_temp1_offset.dev_attr.attr,
1614 &sensor_dev_attr_temp2_offset.dev_attr.attr, 1644 &sensor_dev_attr_temp2_offset.dev_attr.attr,
1615 &sensor_dev_attr_temp3_offset.dev_attr.attr, 1645 &sensor_dev_attr_temp3_offset.dev_attr.attr,
1616 /* Zones */
1617 &sensor_dev_attr_zone1_auto_point1_temp_hyst.dev_attr.attr,
1618 &sensor_dev_attr_zone2_auto_point1_temp_hyst.dev_attr.attr,
1619 &sensor_dev_attr_zone3_auto_point1_temp_hyst.dev_attr.attr,
1620 NULL 1646 NULL
1621}; 1647};
1622 1648
1623static const struct attribute_group dme1737_misc_group = { 1649static const struct attribute_group dme1737_temp_offset_group = {
1624 .attrs = dme1737_misc_attr, 1650 .attrs = dme1737_temp_offset_attr,
1625}; 1651};
1626 1652
1627/* The following struct holds VID-related attributes. Their creation 1653/* The following struct holds VID related attributes, which are not available
1628 depends on the chip type which is determined during module load. */ 1654 * in all chips. The following chips support them:
1655 * DME1737 */
1629static struct attribute *dme1737_vid_attr[] = { 1656static struct attribute *dme1737_vid_attr[] = {
1630 &dev_attr_vrm.attr, 1657 &dev_attr_vrm.attr,
1631 &dev_attr_cpu0_vid.attr, 1658 &dev_attr_cpu0_vid.attr,
@@ -1636,6 +1663,36 @@ static const struct attribute_group dme1737_vid_group = {
1636 .attrs = dme1737_vid_attr, 1663 .attrs = dme1737_vid_attr,
1637}; 1664};
1638 1665
1666/* The following struct holds temp zone 3 related attributes, which are not
1667 * available in all chips. The following chips support them:
1668 * DME1737, SCH311x, SCH5027 */
1669static struct attribute *dme1737_zone3_attr[] = {
1670 &sensor_dev_attr_zone3_auto_point1_temp.dev_attr.attr,
1671 &sensor_dev_attr_zone3_auto_point2_temp.dev_attr.attr,
1672 &sensor_dev_attr_zone3_auto_point3_temp.dev_attr.attr,
1673 &sensor_dev_attr_zone3_auto_channels_temp.dev_attr.attr,
1674 NULL
1675};
1676
1677static const struct attribute_group dme1737_zone3_group = {
1678 .attrs = dme1737_zone3_attr,
1679};
1680
1681
1682/* The following struct holds temp zone hysteresis related attributes, which
1683 * are not available in all chips. The following chips support them:
1684 * DME1737, SCH311x */
1685static struct attribute *dme1737_zone_hyst_attr[] = {
1686 &sensor_dev_attr_zone1_auto_point1_temp_hyst.dev_attr.attr,
1687 &sensor_dev_attr_zone2_auto_point1_temp_hyst.dev_attr.attr,
1688 &sensor_dev_attr_zone3_auto_point1_temp_hyst.dev_attr.attr,
1689 NULL
1690};
1691
1692static const struct attribute_group dme1737_zone_hyst_group = {
1693 .attrs = dme1737_zone_hyst_attr,
1694};
1695
1639/* The following structs hold the PWM attributes, some of which are optional. 1696/* The following structs hold the PWM attributes, some of which are optional.
1640 * Their creation depends on the chip configuration which is determined during 1697 * Their creation depends on the chip configuration which is determined during
1641 * module load. */ 1698 * module load. */
@@ -1691,10 +1748,10 @@ static const struct attribute_group dme1737_pwm_group[] = {
1691 { .attrs = dme1737_pwm6_attr }, 1748 { .attrs = dme1737_pwm6_attr },
1692}; 1749};
1693 1750
1694/* The following struct holds misc PWM attributes, which are not available in 1751/* The following struct holds auto PWM min attributes, which are not available
1695 * all chips. Their creation depends on the chip type which is determined 1752 * in all chips. Their creation depends on the chip type which is determined
1696 * during module load. */ 1753 * during module load. */
1697static struct attribute *dme1737_pwm_misc_attr[] = { 1754static struct attribute *dme1737_auto_pwm_min_attr[] = {
1698 &sensor_dev_attr_pwm1_auto_pwm_min.dev_attr.attr, 1755 &sensor_dev_attr_pwm1_auto_pwm_min.dev_attr.attr,
1699 &sensor_dev_attr_pwm2_auto_pwm_min.dev_attr.attr, 1756 &sensor_dev_attr_pwm2_auto_pwm_min.dev_attr.attr,
1700 &sensor_dev_attr_pwm3_auto_pwm_min.dev_attr.attr, 1757 &sensor_dev_attr_pwm3_auto_pwm_min.dev_attr.attr,
@@ -1764,14 +1821,25 @@ static struct attribute *dme1737_zone_chmod_attr[] = {
1764 &sensor_dev_attr_zone2_auto_point1_temp.dev_attr.attr, 1821 &sensor_dev_attr_zone2_auto_point1_temp.dev_attr.attr,
1765 &sensor_dev_attr_zone2_auto_point2_temp.dev_attr.attr, 1822 &sensor_dev_attr_zone2_auto_point2_temp.dev_attr.attr,
1766 &sensor_dev_attr_zone2_auto_point3_temp.dev_attr.attr, 1823 &sensor_dev_attr_zone2_auto_point3_temp.dev_attr.attr,
1824 NULL
1825};
1826
1827static const struct attribute_group dme1737_zone_chmod_group = {
1828 .attrs = dme1737_zone_chmod_attr,
1829};
1830
1831
1832/* The permissions of the following zone 3 attributes are changed to read-
1833 * writeable if the chip is *not* locked. Otherwise they stay read-only. */
1834static struct attribute *dme1737_zone3_chmod_attr[] = {
1767 &sensor_dev_attr_zone3_auto_point1_temp.dev_attr.attr, 1835 &sensor_dev_attr_zone3_auto_point1_temp.dev_attr.attr,
1768 &sensor_dev_attr_zone3_auto_point2_temp.dev_attr.attr, 1836 &sensor_dev_attr_zone3_auto_point2_temp.dev_attr.attr,
1769 &sensor_dev_attr_zone3_auto_point3_temp.dev_attr.attr, 1837 &sensor_dev_attr_zone3_auto_point3_temp.dev_attr.attr,
1770 NULL 1838 NULL
1771}; 1839};
1772 1840
1773static const struct attribute_group dme1737_zone_chmod_group = { 1841static const struct attribute_group dme1737_zone3_chmod_group = {
1774 .attrs = dme1737_zone_chmod_attr, 1842 .attrs = dme1737_zone3_chmod_attr,
1775}; 1843};
1776 1844
1777/* The permissions of the following PWM attributes are changed to read- 1845/* The permissions of the following PWM attributes are changed to read-
@@ -1887,30 +1955,35 @@ static void dme1737_remove_files(struct device *dev)
1887 int ix; 1955 int ix;
1888 1956
1889 for (ix = 0; ix < ARRAY_SIZE(dme1737_fan_group); ix++) { 1957 for (ix = 0; ix < ARRAY_SIZE(dme1737_fan_group); ix++) {
1890 if (data->has_fan & (1 << ix)) { 1958 if (data->has_features & HAS_FAN(ix)) {
1891 sysfs_remove_group(&dev->kobj, 1959 sysfs_remove_group(&dev->kobj,
1892 &dme1737_fan_group[ix]); 1960 &dme1737_fan_group[ix]);
1893 } 1961 }
1894 } 1962 }
1895 1963
1896 for (ix = 0; ix < ARRAY_SIZE(dme1737_pwm_group); ix++) { 1964 for (ix = 0; ix < ARRAY_SIZE(dme1737_pwm_group); ix++) {
1897 if (data->has_pwm & (1 << ix)) { 1965 if (data->has_features & HAS_PWM(ix)) {
1898 sysfs_remove_group(&dev->kobj, 1966 sysfs_remove_group(&dev->kobj,
1899 &dme1737_pwm_group[ix]); 1967 &dme1737_pwm_group[ix]);
1900 if (data->type != sch5027 && ix < 3) { 1968 if ((data->has_features & HAS_PWM_MIN) && ix < 3) {
1901 sysfs_remove_file(&dev->kobj, 1969 sysfs_remove_file(&dev->kobj,
1902 dme1737_pwm_misc_attr[ix]); 1970 dme1737_auto_pwm_min_attr[ix]);
1903 } 1971 }
1904 } 1972 }
1905 } 1973 }
1906 1974
1907 if (data->type != sch5027) { 1975 if (data->has_features & HAS_TEMP_OFFSET) {
1908 sysfs_remove_group(&dev->kobj, &dme1737_misc_group); 1976 sysfs_remove_group(&dev->kobj, &dme1737_temp_offset_group);
1909 } 1977 }
1910 if (data->type == dme1737) { 1978 if (data->has_features & HAS_VID) {
1911 sysfs_remove_group(&dev->kobj, &dme1737_vid_group); 1979 sysfs_remove_group(&dev->kobj, &dme1737_vid_group);
1912 } 1980 }
1913 1981 if (data->has_features & HAS_ZONE3) {
1982 sysfs_remove_group(&dev->kobj, &dme1737_zone3_group);
1983 }
1984 if (data->has_features & HAS_ZONE_HYST) {
1985 sysfs_remove_group(&dev->kobj, &dme1737_zone_hyst_group);
1986 }
1914 sysfs_remove_group(&dev->kobj, &dme1737_group); 1987 sysfs_remove_group(&dev->kobj, &dme1737_group);
1915 1988
1916 if (!data->client) { 1989 if (!data->client) {
@@ -1934,23 +2007,31 @@ static int dme1737_create_files(struct device *dev)
1934 goto exit_remove; 2007 goto exit_remove;
1935 } 2008 }
1936 2009
1937 /* Create misc sysfs attributes */ 2010 /* Create chip-dependent sysfs attributes */
1938 if ((data->type != sch5027) && 2011 if ((data->has_features & HAS_TEMP_OFFSET) &&
1939 (err = sysfs_create_group(&dev->kobj, 2012 (err = sysfs_create_group(&dev->kobj,
1940 &dme1737_misc_group))) { 2013 &dme1737_temp_offset_group))) {
1941 goto exit_remove; 2014 goto exit_remove;
1942 } 2015 }
1943 2016 if ((data->has_features & HAS_VID) &&
1944 /* Create VID-related sysfs attributes */
1945 if ((data->type == dme1737) &&
1946 (err = sysfs_create_group(&dev->kobj, 2017 (err = sysfs_create_group(&dev->kobj,
1947 &dme1737_vid_group))) { 2018 &dme1737_vid_group))) {
1948 goto exit_remove; 2019 goto exit_remove;
1949 } 2020 }
2021 if ((data->has_features & HAS_ZONE3) &&
2022 (err = sysfs_create_group(&dev->kobj,
2023 &dme1737_zone3_group))) {
2024 goto exit_remove;
2025 }
2026 if ((data->has_features & HAS_ZONE_HYST) &&
2027 (err = sysfs_create_group(&dev->kobj,
2028 &dme1737_zone_hyst_group))) {
2029 goto exit_remove;
2030 }
1950 2031
1951 /* Create fan sysfs attributes */ 2032 /* Create fan sysfs attributes */
1952 for (ix = 0; ix < ARRAY_SIZE(dme1737_fan_group); ix++) { 2033 for (ix = 0; ix < ARRAY_SIZE(dme1737_fan_group); ix++) {
1953 if (data->has_fan & (1 << ix)) { 2034 if (data->has_features & HAS_FAN(ix)) {
1954 if ((err = sysfs_create_group(&dev->kobj, 2035 if ((err = sysfs_create_group(&dev->kobj,
1955 &dme1737_fan_group[ix]))) { 2036 &dme1737_fan_group[ix]))) {
1956 goto exit_remove; 2037 goto exit_remove;
@@ -1960,14 +2041,14 @@ static int dme1737_create_files(struct device *dev)
1960 2041
1961 /* Create PWM sysfs attributes */ 2042 /* Create PWM sysfs attributes */
1962 for (ix = 0; ix < ARRAY_SIZE(dme1737_pwm_group); ix++) { 2043 for (ix = 0; ix < ARRAY_SIZE(dme1737_pwm_group); ix++) {
1963 if (data->has_pwm & (1 << ix)) { 2044 if (data->has_features & HAS_PWM(ix)) {
1964 if ((err = sysfs_create_group(&dev->kobj, 2045 if ((err = sysfs_create_group(&dev->kobj,
1965 &dme1737_pwm_group[ix]))) { 2046 &dme1737_pwm_group[ix]))) {
1966 goto exit_remove; 2047 goto exit_remove;
1967 } 2048 }
1968 if (data->type != sch5027 && ix < 3 && 2049 if ((data->has_features & HAS_PWM_MIN) && ix < 3 &&
1969 (err = sysfs_create_file(&dev->kobj, 2050 (err = sysfs_create_file(&dev->kobj,
1970 dme1737_pwm_misc_attr[ix]))) { 2051 dme1737_auto_pwm_min_attr[ix]))) {
1971 goto exit_remove; 2052 goto exit_remove;
1972 } 2053 }
1973 } 2054 }
@@ -1983,21 +2064,30 @@ static int dme1737_create_files(struct device *dev)
1983 dme1737_chmod_group(dev, &dme1737_zone_chmod_group, 2064 dme1737_chmod_group(dev, &dme1737_zone_chmod_group,
1984 S_IRUGO | S_IWUSR); 2065 S_IRUGO | S_IWUSR);
1985 2066
1986 /* Change permissions of misc sysfs attributes */ 2067 /* Change permissions of chip-dependent sysfs attributes */
1987 if (data->type != sch5027) { 2068 if (data->has_features & HAS_TEMP_OFFSET) {
1988 dme1737_chmod_group(dev, &dme1737_misc_group, 2069 dme1737_chmod_group(dev, &dme1737_temp_offset_group,
2070 S_IRUGO | S_IWUSR);
2071 }
2072 if (data->has_features & HAS_ZONE3) {
2073 dme1737_chmod_group(dev, &dme1737_zone3_chmod_group,
2074 S_IRUGO | S_IWUSR);
2075 }
2076 if (data->has_features & HAS_ZONE_HYST) {
2077 dme1737_chmod_group(dev, &dme1737_zone_hyst_group,
1989 S_IRUGO | S_IWUSR); 2078 S_IRUGO | S_IWUSR);
1990 } 2079 }
1991 2080
1992 /* Change permissions of PWM sysfs attributes */ 2081 /* Change permissions of PWM sysfs attributes */
1993 for (ix = 0; ix < ARRAY_SIZE(dme1737_pwm_chmod_group); ix++) { 2082 for (ix = 0; ix < ARRAY_SIZE(dme1737_pwm_chmod_group); ix++) {
1994 if (data->has_pwm & (1 << ix)) { 2083 if (data->has_features & HAS_PWM(ix)) {
1995 dme1737_chmod_group(dev, 2084 dme1737_chmod_group(dev,
1996 &dme1737_pwm_chmod_group[ix], 2085 &dme1737_pwm_chmod_group[ix],
1997 S_IRUGO | S_IWUSR); 2086 S_IRUGO | S_IWUSR);
1998 if (data->type != sch5027 && ix < 3) { 2087 if ((data->has_features & HAS_PWM_MIN) &&
2088 ix < 3) {
1999 dme1737_chmod_file(dev, 2089 dme1737_chmod_file(dev,
2000 dme1737_pwm_misc_attr[ix], 2090 dme1737_auto_pwm_min_attr[ix],
2001 S_IRUGO | S_IWUSR); 2091 S_IRUGO | S_IWUSR);
2002 } 2092 }
2003 } 2093 }
@@ -2005,7 +2095,7 @@ static int dme1737_create_files(struct device *dev)
2005 2095
2006 /* Change permissions of pwm[1-3] if in manual mode */ 2096 /* Change permissions of pwm[1-3] if in manual mode */
2007 for (ix = 0; ix < 3; ix++) { 2097 for (ix = 0; ix < 3; ix++) {
2008 if ((data->has_pwm & (1 << ix)) && 2098 if ((data->has_features & HAS_PWM(ix)) &&
2009 (PWM_EN_FROM_REG(data->pwm_config[ix]) == 1)) { 2099 (PWM_EN_FROM_REG(data->pwm_config[ix]) == 1)) {
2010 dme1737_chmod_file(dev, 2100 dme1737_chmod_file(dev,
2011 dme1737_pwm_chmod_attr[ix], 2101 dme1737_pwm_chmod_attr[ix],
@@ -2052,20 +2142,20 @@ static int dme1737_init_device(struct device *dev)
2052 return -EFAULT; 2142 return -EFAULT;
2053 } 2143 }
2054 2144
2055 /* Determine which optional fan and pwm features are enabled/present */ 2145 /* Determine which optional fan and pwm features are enabled (only
2146 * valid for I2C devices) */
2056 if (client) { /* I2C chip */ 2147 if (client) { /* I2C chip */
2057 data->config2 = dme1737_read(data, DME1737_REG_CONFIG2); 2148 data->config2 = dme1737_read(data, DME1737_REG_CONFIG2);
2058 /* Check if optional fan3 input is enabled */ 2149 /* Check if optional fan3 input is enabled */
2059 if (data->config2 & 0x04) { 2150 if (data->config2 & 0x04) {
2060 data->has_fan |= (1 << 2); 2151 data->has_features |= HAS_FAN(2);
2061 } 2152 }
2062 2153
2063 /* Fan4 and pwm3 are only available if the client's I2C address 2154 /* Fan4 and pwm3 are only available if the client's I2C address
2064 * is the default 0x2e. Otherwise the I/Os associated with 2155 * is the default 0x2e. Otherwise the I/Os associated with
2065 * these functions are used for addr enable/select. */ 2156 * these functions are used for addr enable/select. */
2066 if (client->addr == 0x2e) { 2157 if (client->addr == 0x2e) {
2067 data->has_fan |= (1 << 3); 2158 data->has_features |= HAS_FAN(3) | HAS_PWM(2);
2068 data->has_pwm |= (1 << 2);
2069 } 2159 }
2070 2160
2071 /* Determine which of the optional fan[5-6] and pwm[5-6] 2161 /* Determine which of the optional fan[5-6] and pwm[5-6]
@@ -2077,26 +2167,40 @@ static int dme1737_init_device(struct device *dev)
2077 dev_warn(dev, "Failed to query Super-IO for optional " 2167 dev_warn(dev, "Failed to query Super-IO for optional "
2078 "features.\n"); 2168 "features.\n");
2079 } 2169 }
2080 } else { /* ISA chip */
2081 /* Fan3 and pwm3 are always available. Fan[4-5] and pwm[5-6]
2082 * don't exist in the ISA chip. */
2083 data->has_fan |= (1 << 2);
2084 data->has_pwm |= (1 << 2);
2085 } 2170 }
2086 2171
2087 /* Fan1, fan2, pwm1, and pwm2 are always present */ 2172 /* Fan[1-2] and pwm[1-2] are present in all chips */
2088 data->has_fan |= 0x03; 2173 data->has_features |= HAS_FAN(0) | HAS_FAN(1) | HAS_PWM(0) | HAS_PWM(1);
2089 data->has_pwm |= 0x03; 2174
2175 /* Chip-dependent features */
2176 switch (data->type) {
2177 case dme1737:
2178 data->has_features |= HAS_TEMP_OFFSET | HAS_VID | HAS_ZONE3 |
2179 HAS_ZONE_HYST | HAS_PWM_MIN;
2180 break;
2181 case sch311x:
2182 data->has_features |= HAS_TEMP_OFFSET | HAS_ZONE3 |
2183 HAS_ZONE_HYST | HAS_PWM_MIN | HAS_FAN(2) | HAS_PWM(2);
2184 break;
2185 case sch5027:
2186 data->has_features |= HAS_ZONE3;
2187 break;
2188 case sch5127:
2189 data->has_features |= HAS_FAN(2) | HAS_PWM(2);
2190 break;
2191 default:
2192 break;
2193 }
2090 2194
2091 dev_info(dev, "Optional features: pwm3=%s, pwm5=%s, pwm6=%s, " 2195 dev_info(dev, "Optional features: pwm3=%s, pwm5=%s, pwm6=%s, "
2092 "fan3=%s, fan4=%s, fan5=%s, fan6=%s.\n", 2196 "fan3=%s, fan4=%s, fan5=%s, fan6=%s.\n",
2093 (data->has_pwm & (1 << 2)) ? "yes" : "no", 2197 (data->has_features & HAS_PWM(2)) ? "yes" : "no",
2094 (data->has_pwm & (1 << 4)) ? "yes" : "no", 2198 (data->has_features & HAS_PWM(4)) ? "yes" : "no",
2095 (data->has_pwm & (1 << 5)) ? "yes" : "no", 2199 (data->has_features & HAS_PWM(5)) ? "yes" : "no",
2096 (data->has_fan & (1 << 2)) ? "yes" : "no", 2200 (data->has_features & HAS_FAN(2)) ? "yes" : "no",
2097 (data->has_fan & (1 << 3)) ? "yes" : "no", 2201 (data->has_features & HAS_FAN(3)) ? "yes" : "no",
2098 (data->has_fan & (1 << 4)) ? "yes" : "no", 2202 (data->has_features & HAS_FAN(4)) ? "yes" : "no",
2099 (data->has_fan & (1 << 5)) ? "yes" : "no"); 2203 (data->has_features & HAS_FAN(5)) ? "yes" : "no");
2100 2204
2101 reg = dme1737_read(data, DME1737_REG_TACH_PWM); 2205 reg = dme1737_read(data, DME1737_REG_TACH_PWM);
2102 /* Inform if fan-to-pwm mapping differs from the default */ 2206 /* Inform if fan-to-pwm mapping differs from the default */
@@ -2122,7 +2226,7 @@ static int dme1737_init_device(struct device *dev)
2122 for (ix = 0; ix < 3; ix++) { 2226 for (ix = 0; ix < 3; ix++) {
2123 data->pwm_config[ix] = dme1737_read(data, 2227 data->pwm_config[ix] = dme1737_read(data,
2124 DME1737_REG_PWM_CONFIG(ix)); 2228 DME1737_REG_PWM_CONFIG(ix));
2125 if ((data->has_pwm & (1 << ix)) && 2229 if ((data->has_features & HAS_PWM(ix)) &&
2126 (PWM_EN_FROM_REG(data->pwm_config[ix]) == -1)) { 2230 (PWM_EN_FROM_REG(data->pwm_config[ix]) == -1)) {
2127 dev_info(dev, "Switching pwm%d to " 2231 dev_info(dev, "Switching pwm%d to "
2128 "manual mode.\n", ix + 1); 2232 "manual mode.\n", ix + 1);
@@ -2142,7 +2246,7 @@ static int dme1737_init_device(struct device *dev)
2142 data->pwm_acz[2] = 4; /* pwm3 -> zone3 */ 2246 data->pwm_acz[2] = 4; /* pwm3 -> zone3 */
2143 2247
2144 /* Set VRM */ 2248 /* Set VRM */
2145 if (data->type == dme1737) { 2249 if (data->has_features & HAS_VID) {
2146 data->vrm = vid_which_vrm(); 2250 data->vrm = vid_which_vrm();
2147 } 2251 }
2148 2252
@@ -2163,10 +2267,10 @@ static int dme1737_i2c_get_features(int sio_cip, struct dme1737_data *data)
2163 dme1737_sio_enter(sio_cip); 2267 dme1737_sio_enter(sio_cip);
2164 2268
2165 /* Check device ID 2269 /* Check device ID
2166 * The DME1737 can return either 0x78 or 0x77 as its device ID. 2270 * We currently know about two kinds of DME1737 and SCH5027. */
2167 * The SCH5027 returns 0x89 as its device ID. */
2168 reg = force_id ? force_id : dme1737_sio_inb(sio_cip, 0x20); 2271 reg = force_id ? force_id : dme1737_sio_inb(sio_cip, 0x20);
2169 if (!(reg == 0x77 || reg == 0x78 || reg == 0x89)) { 2272 if (!(reg == DME1737_ID_1 || reg == DME1737_ID_2 ||
2273 reg == SCH5027_ID)) {
2170 err = -ENODEV; 2274 err = -ENODEV;
2171 goto exit; 2275 goto exit;
2172 } 2276 }
@@ -2185,16 +2289,16 @@ static int dme1737_i2c_get_features(int sio_cip, struct dme1737_data *data)
2185 * are enabled and available. Bits [3:2] of registers 0x43-0x46 are set 2289 * are enabled and available. Bits [3:2] of registers 0x43-0x46 are set
2186 * to '10' if the respective feature is enabled. */ 2290 * to '10' if the respective feature is enabled. */
2187 if ((inb(addr + 0x43) & 0x0c) == 0x08) { /* fan6 */ 2291 if ((inb(addr + 0x43) & 0x0c) == 0x08) { /* fan6 */
2188 data->has_fan |= (1 << 5); 2292 data->has_features |= HAS_FAN(5);
2189 } 2293 }
2190 if ((inb(addr + 0x44) & 0x0c) == 0x08) { /* pwm6 */ 2294 if ((inb(addr + 0x44) & 0x0c) == 0x08) { /* pwm6 */
2191 data->has_pwm |= (1 << 5); 2295 data->has_features |= HAS_PWM(5);
2192 } 2296 }
2193 if ((inb(addr + 0x45) & 0x0c) == 0x08) { /* fan5 */ 2297 if ((inb(addr + 0x45) & 0x0c) == 0x08) { /* fan5 */
2194 data->has_fan |= (1 << 4); 2298 data->has_features |= HAS_FAN(4);
2195 } 2299 }
2196 if ((inb(addr + 0x46) & 0x0c) == 0x08) { /* pwm5 */ 2300 if ((inb(addr + 0x46) & 0x0c) == 0x08) { /* pwm5 */
2197 data->has_pwm |= (1 << 4); 2301 data->has_features |= HAS_PWM(4);
2198 } 2302 }
2199 2303
2200exit: 2304exit:
@@ -2222,7 +2326,6 @@ static int dme1737_i2c_detect(struct i2c_client *client,
2222 if (company == DME1737_COMPANY_SMSC && 2326 if (company == DME1737_COMPANY_SMSC &&
2223 verstep == SCH5027_VERSTEP) { 2327 verstep == SCH5027_VERSTEP) {
2224 name = "sch5027"; 2328 name = "sch5027";
2225
2226 } else if (company == DME1737_COMPANY_SMSC && 2329 } else if (company == DME1737_COMPANY_SMSC &&
2227 (verstep & DME1737_VERSTEP_MASK) == DME1737_VERSTEP) { 2330 (verstep & DME1737_VERSTEP_MASK) == DME1737_VERSTEP) {
2228 name = "dme1737"; 2331 name = "dme1737";
@@ -2329,10 +2432,10 @@ static int __init dme1737_isa_detect(int sio_cip, unsigned short *addr)
2329 dme1737_sio_enter(sio_cip); 2432 dme1737_sio_enter(sio_cip);
2330 2433
2331 /* Check device ID 2434 /* Check device ID
2332 * We currently know about SCH3112 (0x7c), SCH3114 (0x7d), and 2435 * We currently know about SCH3112, SCH3114, SCH3116, and SCH5127 */
2333 * SCH3116 (0x7f). */
2334 reg = force_id ? force_id : dme1737_sio_inb(sio_cip, 0x20); 2436 reg = force_id ? force_id : dme1737_sio_inb(sio_cip, 0x20);
2335 if (!(reg == 0x7c || reg == 0x7d || reg == 0x7f)) { 2437 if (!(reg == SCH3112_ID || reg == SCH3114_ID || reg == SCH3116_ID ||
2438 reg == SCH5127_ID)) {
2336 err = -ENODEV; 2439 err = -ENODEV;
2337 goto exit; 2440 goto exit;
2338 } 2441 }
@@ -2424,23 +2527,42 @@ static int __devinit dme1737_isa_probe(struct platform_device *pdev)
2424 platform_set_drvdata(pdev, data); 2527 platform_set_drvdata(pdev, data);
2425 2528
2426 /* Skip chip detection if module is loaded with force_id parameter */ 2529 /* Skip chip detection if module is loaded with force_id parameter */
2427 if (!force_id) { 2530 switch (force_id) {
2531 case SCH3112_ID:
2532 case SCH3114_ID:
2533 case SCH3116_ID:
2534 data->type = sch311x;
2535 break;
2536 case SCH5127_ID:
2537 data->type = sch5127;
2538 break;
2539 default:
2428 company = dme1737_read(data, DME1737_REG_COMPANY); 2540 company = dme1737_read(data, DME1737_REG_COMPANY);
2429 device = dme1737_read(data, DME1737_REG_DEVICE); 2541 device = dme1737_read(data, DME1737_REG_DEVICE);
2430 2542
2431 if (!((company == DME1737_COMPANY_SMSC) && 2543 if ((company == DME1737_COMPANY_SMSC) &&
2432 (device == SCH311X_DEVICE))) { 2544 (device == SCH311X_DEVICE)) {
2545 data->type = sch311x;
2546 } else if ((company == DME1737_COMPANY_SMSC) &&
2547 (device == SCH5127_DEVICE)) {
2548 data->type = sch5127;
2549 } else {
2433 err = -ENODEV; 2550 err = -ENODEV;
2434 goto exit_kfree; 2551 goto exit_kfree;
2435 } 2552 }
2436 } 2553 }
2437 data->type = sch311x;
2438 2554
2439 /* Fill in the remaining client fields and initialize the mutex */ 2555 if (data->type == sch5127) {
2440 data->name = "sch311x"; 2556 data->name = "sch5127";
2557 } else {
2558 data->name = "sch311x";
2559 }
2560
2561 /* Initialize the mutex */
2441 mutex_init(&data->update_lock); 2562 mutex_init(&data->update_lock);
2442 2563
2443 dev_info(dev, "Found a SCH311x chip at 0x%04x\n", data->addr); 2564 dev_info(dev, "Found a %s chip at 0x%04x\n",
2565 data->type == sch5127 ? "SCH5127" : "SCH311x", data->addr);
2444 2566
2445 /* Initialize the chip */ 2567 /* Initialize the chip */
2446 if ((err = dme1737_init_device(dev))) { 2568 if ((err = dme1737_init_device(dev))) {
diff --git a/drivers/hwmon/emc1403.c b/drivers/hwmon/emc1403.c
new file mode 100644
index 000000000000..0e4b5642638d
--- /dev/null
+++ b/drivers/hwmon/emc1403.c
@@ -0,0 +1,344 @@
1/*
2 * emc1403.c - SMSC Thermal Driver
3 *
4 * Copyright (C) 2008 Intel Corp
5 *
6 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; version 2 of the License.
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License along
18 * with this program; if not, write to the Free Software Foundation, Inc.,
19 * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
20 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
21 *
22 * TODO
23 * - cache alarm and critical limit registers
24 * - add emc1404 support
25 */
26
27#include <linux/module.h>
28#include <linux/init.h>
29#include <linux/slab.h>
30#include <linux/i2c.h>
31#include <linux/hwmon.h>
32#include <linux/hwmon-sysfs.h>
33#include <linux/err.h>
34#include <linux/sysfs.h>
35#include <linux/mutex.h>
36
37#define THERMAL_PID_REG 0xfd
38#define THERMAL_SMSC_ID_REG 0xfe
39#define THERMAL_REVISION_REG 0xff
40
41struct thermal_data {
42 struct device *hwmon_dev;
43 struct mutex mutex;
44 /* Cache the hyst value so we don't keep re-reading it. In theory
45 we could cache it forever as nobody else should be writing it. */
46 u8 cached_hyst;
47 unsigned long hyst_valid;
48};
49
50static ssize_t show_temp(struct device *dev,
51 struct device_attribute *attr, char *buf)
52{
53 struct i2c_client *client = to_i2c_client(dev);
54 struct sensor_device_attribute *sda = to_sensor_dev_attr(attr);
55 int retval = i2c_smbus_read_byte_data(client, sda->index);
56
57 if (retval < 0)
58 return retval;
59 return sprintf(buf, "%d000\n", retval);
60}
61
62static ssize_t show_bit(struct device *dev,
63 struct device_attribute *attr, char *buf)
64{
65 struct i2c_client *client = to_i2c_client(dev);
66 struct sensor_device_attribute_2 *sda = to_sensor_dev_attr_2(attr);
67 int retval = i2c_smbus_read_byte_data(client, sda->nr);
68
69 if (retval < 0)
70 return retval;
71 retval &= sda->index;
72 return sprintf(buf, "%d\n", retval ? 1 : 0);
73}
74
75static ssize_t store_temp(struct device *dev,
76 struct device_attribute *attr, const char *buf, size_t count)
77{
78 struct sensor_device_attribute *sda = to_sensor_dev_attr(attr);
79 struct i2c_client *client = to_i2c_client(dev);
80 unsigned long val;
81 int retval;
82
83 if (strict_strtoul(buf, 10, &val))
84 return -EINVAL;
85 retval = i2c_smbus_write_byte_data(client, sda->index,
86 DIV_ROUND_CLOSEST(val, 1000));
87 if (retval < 0)
88 return retval;
89 return count;
90}
91
92static ssize_t show_hyst(struct device *dev,
93 struct device_attribute *attr, char *buf)
94{
95 struct i2c_client *client = to_i2c_client(dev);
96 struct thermal_data *data = i2c_get_clientdata(client);
97 struct sensor_device_attribute *sda = to_sensor_dev_attr(attr);
98 int retval;
99 int hyst;
100
101 retval = i2c_smbus_read_byte_data(client, sda->index);
102 if (retval < 0)
103 return retval;
104
105 if (time_after(jiffies, data->hyst_valid)) {
106 hyst = i2c_smbus_read_byte_data(client, 0x21);
107 if (hyst < 0)
108 return retval;
109 data->cached_hyst = hyst;
110 data->hyst_valid = jiffies + HZ;
111 }
112 return sprintf(buf, "%d000\n", retval - data->cached_hyst);
113}
114
115static ssize_t store_hyst(struct device *dev,
116 struct device_attribute *attr, const char *buf, size_t count)
117{
118 struct i2c_client *client = to_i2c_client(dev);
119 struct thermal_data *data = i2c_get_clientdata(client);
120 struct sensor_device_attribute *sda = to_sensor_dev_attr(attr);
121 int retval;
122 int hyst;
123 unsigned long val;
124
125 if (strict_strtoul(buf, 10, &val))
126 return -EINVAL;
127
128 mutex_lock(&data->mutex);
129 retval = i2c_smbus_read_byte_data(client, sda->index);
130 if (retval < 0)
131 goto fail;
132
133 hyst = val - retval * 1000;
134 hyst = DIV_ROUND_CLOSEST(hyst, 1000);
135 if (hyst < 0 || hyst > 255) {
136 retval = -ERANGE;
137 goto fail;
138 }
139
140 retval = i2c_smbus_write_byte_data(client, 0x21, hyst);
141 if (retval == 0) {
142 retval = count;
143 data->cached_hyst = hyst;
144 data->hyst_valid = jiffies + HZ;
145 }
146fail:
147 mutex_unlock(&data->mutex);
148 return retval;
149}
150
151/*
152 * Sensors. We pass the actual i2c register to the methods.
153 */
154
155static SENSOR_DEVICE_ATTR(temp1_min, S_IRUGO | S_IWUSR,
156 show_temp, store_temp, 0x06);
157static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO | S_IWUSR,
158 show_temp, store_temp, 0x05);
159static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO | S_IWUSR,
160 show_temp, store_temp, 0x20);
161static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, show_temp, NULL, 0x00);
162static SENSOR_DEVICE_ATTR_2(temp1_min_alarm, S_IRUGO,
163 show_bit, NULL, 0x36, 0x01);
164static SENSOR_DEVICE_ATTR_2(temp1_max_alarm, S_IRUGO,
165 show_bit, NULL, 0x35, 0x01);
166static SENSOR_DEVICE_ATTR_2(temp1_crit_alarm, S_IRUGO,
167 show_bit, NULL, 0x37, 0x01);
168static SENSOR_DEVICE_ATTR(temp1_crit_hyst, S_IRUGO | S_IWUSR,
169 show_hyst, store_hyst, 0x20);
170
171static SENSOR_DEVICE_ATTR(temp2_min, S_IRUGO | S_IWUSR,
172 show_temp, store_temp, 0x08);
173static SENSOR_DEVICE_ATTR(temp2_max, S_IRUGO | S_IWUSR,
174 show_temp, store_temp, 0x07);
175static SENSOR_DEVICE_ATTR(temp2_crit, S_IRUGO | S_IWUSR,
176 show_temp, store_temp, 0x19);
177static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO, show_temp, NULL, 0x01);
178static SENSOR_DEVICE_ATTR_2(temp2_min_alarm, S_IRUGO,
179 show_bit, NULL, 0x36, 0x02);
180static SENSOR_DEVICE_ATTR_2(temp2_max_alarm, S_IRUGO,
181 show_bit, NULL, 0x35, 0x02);
182static SENSOR_DEVICE_ATTR_2(temp2_crit_alarm, S_IRUGO,
183 show_bit, NULL, 0x37, 0x02);
184static SENSOR_DEVICE_ATTR(temp2_crit_hyst, S_IRUGO | S_IWUSR,
185 show_hyst, store_hyst, 0x19);
186
187static SENSOR_DEVICE_ATTR(temp3_min, S_IRUGO | S_IWUSR,
188 show_temp, store_temp, 0x16);
189static SENSOR_DEVICE_ATTR(temp3_max, S_IRUGO | S_IWUSR,
190 show_temp, store_temp, 0x15);
191static SENSOR_DEVICE_ATTR(temp3_crit, S_IRUGO | S_IWUSR,
192 show_temp, store_temp, 0x1A);
193static SENSOR_DEVICE_ATTR(temp3_input, S_IRUGO, show_temp, NULL, 0x23);
194static SENSOR_DEVICE_ATTR_2(temp3_min_alarm, S_IRUGO,
195 show_bit, NULL, 0x36, 0x04);
196static SENSOR_DEVICE_ATTR_2(temp3_max_alarm, S_IRUGO,
197 show_bit, NULL, 0x35, 0x04);
198static SENSOR_DEVICE_ATTR_2(temp3_crit_alarm, S_IRUGO,
199 show_bit, NULL, 0x37, 0x04);
200static SENSOR_DEVICE_ATTR(temp3_crit_hyst, S_IRUGO | S_IWUSR,
201 show_hyst, store_hyst, 0x1A);
202
203static struct attribute *mid_att_thermal[] = {
204 &sensor_dev_attr_temp1_min.dev_attr.attr,
205 &sensor_dev_attr_temp1_max.dev_attr.attr,
206 &sensor_dev_attr_temp1_crit.dev_attr.attr,
207 &sensor_dev_attr_temp1_input.dev_attr.attr,
208 &sensor_dev_attr_temp1_min_alarm.dev_attr.attr,
209 &sensor_dev_attr_temp1_max_alarm.dev_attr.attr,
210 &sensor_dev_attr_temp1_crit_alarm.dev_attr.attr,
211 &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr,
212 &sensor_dev_attr_temp2_min.dev_attr.attr,
213 &sensor_dev_attr_temp2_max.dev_attr.attr,
214 &sensor_dev_attr_temp2_crit.dev_attr.attr,
215 &sensor_dev_attr_temp2_input.dev_attr.attr,
216 &sensor_dev_attr_temp2_min_alarm.dev_attr.attr,
217 &sensor_dev_attr_temp2_max_alarm.dev_attr.attr,
218 &sensor_dev_attr_temp2_crit_alarm.dev_attr.attr,
219 &sensor_dev_attr_temp2_crit_hyst.dev_attr.attr,
220 &sensor_dev_attr_temp3_min.dev_attr.attr,
221 &sensor_dev_attr_temp3_max.dev_attr.attr,
222 &sensor_dev_attr_temp3_crit.dev_attr.attr,
223 &sensor_dev_attr_temp3_input.dev_attr.attr,
224 &sensor_dev_attr_temp3_min_alarm.dev_attr.attr,
225 &sensor_dev_attr_temp3_max_alarm.dev_attr.attr,
226 &sensor_dev_attr_temp3_crit_alarm.dev_attr.attr,
227 &sensor_dev_attr_temp3_crit_hyst.dev_attr.attr,
228 NULL
229};
230
231static const struct attribute_group m_thermal_gr = {
232 .attrs = mid_att_thermal
233};
234
235static int emc1403_detect(struct i2c_client *client,
236 struct i2c_board_info *info)
237{
238 int id;
239 /* Check if thermal chip is SMSC and EMC1403 */
240
241 id = i2c_smbus_read_byte_data(client, THERMAL_SMSC_ID_REG);
242 if (id != 0x5d)
243 return -ENODEV;
244
245 /* Note: 0x25 is the 1404 which is very similar and this
246 driver could be extended */
247 id = i2c_smbus_read_byte_data(client, THERMAL_PID_REG);
248 if (id != 0x21)
249 return -ENODEV;
250
251 id = i2c_smbus_read_byte_data(client, THERMAL_REVISION_REG);
252 if (id != 0x01)
253 return -ENODEV;
254
255 strlcpy(info->type, "emc1403", I2C_NAME_SIZE);
256 return 0;
257}
258
259static int emc1403_probe(struct i2c_client *client,
260 const struct i2c_device_id *id)
261{
262 int res;
263 struct thermal_data *data;
264
265 data = kzalloc(sizeof(struct thermal_data), GFP_KERNEL);
266 if (data == NULL) {
267 dev_warn(&client->dev, "out of memory");
268 return -ENOMEM;
269 }
270
271 i2c_set_clientdata(client, data);
272 mutex_init(&data->mutex);
273 data->hyst_valid = jiffies - 1; /* Expired */
274
275 res = sysfs_create_group(&client->dev.kobj, &m_thermal_gr);
276 if (res) {
277 dev_warn(&client->dev, "create group failed\n");
278 hwmon_device_unregister(data->hwmon_dev);
279 goto thermal_error1;
280 }
281 data->hwmon_dev = hwmon_device_register(&client->dev);
282 if (IS_ERR(data->hwmon_dev)) {
283 res = PTR_ERR(data->hwmon_dev);
284 dev_warn(&client->dev, "register hwmon dev failed\n");
285 goto thermal_error2;
286 }
287 dev_info(&client->dev, "EMC1403 Thermal chip found\n");
288 return res;
289
290thermal_error2:
291 sysfs_remove_group(&client->dev.kobj, &m_thermal_gr);
292thermal_error1:
293 kfree(data);
294 return res;
295}
296
297static int emc1403_remove(struct i2c_client *client)
298{
299 struct thermal_data *data = i2c_get_clientdata(client);
300
301 hwmon_device_unregister(data->hwmon_dev);
302 sysfs_remove_group(&client->dev.kobj, &m_thermal_gr);
303 kfree(data);
304 return 0;
305}
306
307static const unsigned short emc1403_address_list[] = {
308 0x18, 0x2a, 0x4c, 0x4d, I2C_CLIENT_END
309};
310
311static const struct i2c_device_id emc1403_idtable[] = {
312 { "emc1403", 0 },
313 { }
314};
315MODULE_DEVICE_TABLE(i2c, emc1403_idtable);
316
317static struct i2c_driver sensor_emc1403 = {
318 .class = I2C_CLASS_HWMON,
319 .driver = {
320 .name = "emc1403",
321 },
322 .detect = emc1403_detect,
323 .probe = emc1403_probe,
324 .remove = emc1403_remove,
325 .id_table = emc1403_idtable,
326 .address_list = emc1403_address_list,
327};
328
329static int __init sensor_emc1403_init(void)
330{
331 return i2c_add_driver(&sensor_emc1403);
332}
333
334static void __exit sensor_emc1403_exit(void)
335{
336 i2c_del_driver(&sensor_emc1403);
337}
338
339module_init(sensor_emc1403_init);
340module_exit(sensor_emc1403_exit);
341
342MODULE_AUTHOR("Kalhan Trisal <kalhan.trisal@intel.com");
343MODULE_DESCRIPTION("emc1403 Thermal Driver");
344MODULE_LICENSE("GPL v2");
diff --git a/drivers/hwmon/f71882fg.c b/drivers/hwmon/f71882fg.c
index a95fa4256caa..537841ef44b9 100644
--- a/drivers/hwmon/f71882fg.c
+++ b/drivers/hwmon/f71882fg.c
@@ -856,21 +856,19 @@ static inline int superio_inb(int base, int reg)
856static int superio_inw(int base, int reg) 856static int superio_inw(int base, int reg)
857{ 857{
858 int val; 858 int val;
859 outb(reg++, base); 859 val = superio_inb(base, reg) << 8;
860 val = inb(base + 1) << 8; 860 val |= superio_inb(base, reg + 1);
861 outb(reg, base);
862 val |= inb(base + 1);
863 return val; 861 return val;
864} 862}
865 863
866static inline void superio_enter(int base) 864static inline void superio_enter(int base)
867{ 865{
868 /* according to the datasheet the key must be send twice! */ 866 /* according to the datasheet the key must be send twice! */
869 outb( SIO_UNLOCK_KEY, base); 867 outb(SIO_UNLOCK_KEY, base);
870 outb( SIO_UNLOCK_KEY, base); 868 outb(SIO_UNLOCK_KEY, base);
871} 869}
872 870
873static inline void superio_select( int base, int ld) 871static inline void superio_select(int base, int ld)
874{ 872{
875 outb(SIO_REG_LDSEL, base); 873 outb(SIO_REG_LDSEL, base);
876 outb(ld, base + 1); 874 outb(ld, base + 1);
@@ -905,10 +903,8 @@ static u16 f71882fg_read16(struct f71882fg_data *data, u8 reg)
905{ 903{
906 u16 val; 904 u16 val;
907 905
908 outb(reg++, data->addr + ADDR_REG_OFFSET); 906 val = f71882fg_read8(data, reg) << 8;
909 val = inb(data->addr + DATA_REG_OFFSET) << 8; 907 val |= f71882fg_read8(data, reg + 1);
910 outb(reg, data->addr + ADDR_REG_OFFSET);
911 val |= inb(data->addr + DATA_REG_OFFSET);
912 908
913 return val; 909 return val;
914} 910}
@@ -921,10 +917,8 @@ static void f71882fg_write8(struct f71882fg_data *data, u8 reg, u8 val)
921 917
922static void f71882fg_write16(struct f71882fg_data *data, u8 reg, u16 val) 918static void f71882fg_write16(struct f71882fg_data *data, u8 reg, u16 val)
923{ 919{
924 outb(reg++, data->addr + ADDR_REG_OFFSET); 920 f71882fg_write8(data, reg, val >> 8);
925 outb(val >> 8, data->addr + DATA_REG_OFFSET); 921 f71882fg_write8(data, reg + 1, val & 0xff);
926 outb(reg, data->addr + ADDR_REG_OFFSET);
927 outb(val & 255, data->addr + DATA_REG_OFFSET);
928} 922}
929 923
930static u16 f71882fg_read_temp(struct f71882fg_data *data, int nr) 924static u16 f71882fg_read_temp(struct f71882fg_data *data, int nr)
@@ -945,7 +939,7 @@ static struct f71882fg_data *f71882fg_update_device(struct device *dev)
945 mutex_lock(&data->update_lock); 939 mutex_lock(&data->update_lock);
946 940
947 /* Update once every 60 seconds */ 941 /* Update once every 60 seconds */
948 if ( time_after(jiffies, data->last_limits + 60 * HZ ) || 942 if (time_after(jiffies, data->last_limits + 60 * HZ) ||
949 !data->valid) { 943 !data->valid) {
950 if (data->type == f71882fg || data->type == f71889fg) { 944 if (data->type == f71882fg || data->type == f71889fg) {
951 data->in1_max = 945 data->in1_max =
@@ -1127,8 +1121,12 @@ static ssize_t store_fan_full_speed(struct device *dev,
1127 const char *buf, size_t count) 1121 const char *buf, size_t count)
1128{ 1122{
1129 struct f71882fg_data *data = dev_get_drvdata(dev); 1123 struct f71882fg_data *data = dev_get_drvdata(dev);
1130 int nr = to_sensor_dev_attr_2(devattr)->index; 1124 int err, nr = to_sensor_dev_attr_2(devattr)->index;
1131 long val = simple_strtol(buf, NULL, 10); 1125 long val;
1126
1127 err = strict_strtol(buf, 10, &val);
1128 if (err)
1129 return err;
1132 1130
1133 val = SENSORS_LIMIT(val, 23, 1500000); 1131 val = SENSORS_LIMIT(val, 23, 1500000);
1134 val = fan_to_reg(val); 1132 val = fan_to_reg(val);
@@ -1157,8 +1155,12 @@ static ssize_t store_fan_beep(struct device *dev, struct device_attribute
1157 *devattr, const char *buf, size_t count) 1155 *devattr, const char *buf, size_t count)
1158{ 1156{
1159 struct f71882fg_data *data = dev_get_drvdata(dev); 1157 struct f71882fg_data *data = dev_get_drvdata(dev);
1160 int nr = to_sensor_dev_attr_2(devattr)->index; 1158 int err, nr = to_sensor_dev_attr_2(devattr)->index;
1161 unsigned long val = simple_strtoul(buf, NULL, 10); 1159 unsigned long val;
1160
1161 err = strict_strtoul(buf, 10, &val);
1162 if (err)
1163 return err;
1162 1164
1163 mutex_lock(&data->update_lock); 1165 mutex_lock(&data->update_lock);
1164 data->fan_beep = f71882fg_read8(data, F71882FG_REG_FAN_BEEP); 1166 data->fan_beep = f71882fg_read8(data, F71882FG_REG_FAN_BEEP);
@@ -1206,7 +1208,14 @@ static ssize_t store_in_max(struct device *dev, struct device_attribute
1206 *devattr, const char *buf, size_t count) 1208 *devattr, const char *buf, size_t count)
1207{ 1209{
1208 struct f71882fg_data *data = dev_get_drvdata(dev); 1210 struct f71882fg_data *data = dev_get_drvdata(dev);
1209 long val = simple_strtol(buf, NULL, 10) / 8; 1211 int err;
1212 long val;
1213
1214 err = strict_strtol(buf, 10, &val);
1215 if (err)
1216 return err;
1217
1218 val /= 8;
1210 val = SENSORS_LIMIT(val, 0, 255); 1219 val = SENSORS_LIMIT(val, 0, 255);
1211 1220
1212 mutex_lock(&data->update_lock); 1221 mutex_lock(&data->update_lock);
@@ -1233,8 +1242,12 @@ static ssize_t store_in_beep(struct device *dev, struct device_attribute
1233 *devattr, const char *buf, size_t count) 1242 *devattr, const char *buf, size_t count)
1234{ 1243{
1235 struct f71882fg_data *data = dev_get_drvdata(dev); 1244 struct f71882fg_data *data = dev_get_drvdata(dev);
1236 int nr = to_sensor_dev_attr_2(devattr)->index; 1245 int err, nr = to_sensor_dev_attr_2(devattr)->index;
1237 unsigned long val = simple_strtoul(buf, NULL, 10); 1246 unsigned long val;
1247
1248 err = strict_strtoul(buf, 10, &val);
1249 if (err)
1250 return err;
1238 1251
1239 mutex_lock(&data->update_lock); 1252 mutex_lock(&data->update_lock);
1240 data->in_beep = f71882fg_read8(data, F71882FG_REG_IN_BEEP); 1253 data->in_beep = f71882fg_read8(data, F71882FG_REG_IN_BEEP);
@@ -1299,8 +1312,14 @@ static ssize_t store_temp_max(struct device *dev, struct device_attribute
1299 *devattr, const char *buf, size_t count) 1312 *devattr, const char *buf, size_t count)
1300{ 1313{
1301 struct f71882fg_data *data = dev_get_drvdata(dev); 1314 struct f71882fg_data *data = dev_get_drvdata(dev);
1302 int nr = to_sensor_dev_attr_2(devattr)->index; 1315 int err, nr = to_sensor_dev_attr_2(devattr)->index;
1303 long val = simple_strtol(buf, NULL, 10) / 1000; 1316 long val;
1317
1318 err = strict_strtol(buf, 10, &val);
1319 if (err)
1320 return err;
1321
1322 val /= 1000;
1304 val = SENSORS_LIMIT(val, 0, 255); 1323 val = SENSORS_LIMIT(val, 0, 255);
1305 1324
1306 mutex_lock(&data->update_lock); 1325 mutex_lock(&data->update_lock);
@@ -1333,10 +1352,16 @@ static ssize_t store_temp_max_hyst(struct device *dev, struct device_attribute
1333 *devattr, const char *buf, size_t count) 1352 *devattr, const char *buf, size_t count)
1334{ 1353{
1335 struct f71882fg_data *data = dev_get_drvdata(dev); 1354 struct f71882fg_data *data = dev_get_drvdata(dev);
1336 int nr = to_sensor_dev_attr_2(devattr)->index; 1355 int err, nr = to_sensor_dev_attr_2(devattr)->index;
1337 long val = simple_strtol(buf, NULL, 10) / 1000;
1338 ssize_t ret = count; 1356 ssize_t ret = count;
1339 u8 reg; 1357 u8 reg;
1358 long val;
1359
1360 err = strict_strtol(buf, 10, &val);
1361 if (err)
1362 return err;
1363
1364 val /= 1000;
1340 1365
1341 mutex_lock(&data->update_lock); 1366 mutex_lock(&data->update_lock);
1342 1367
@@ -1372,8 +1397,14 @@ static ssize_t store_temp_crit(struct device *dev, struct device_attribute
1372 *devattr, const char *buf, size_t count) 1397 *devattr, const char *buf, size_t count)
1373{ 1398{
1374 struct f71882fg_data *data = dev_get_drvdata(dev); 1399 struct f71882fg_data *data = dev_get_drvdata(dev);
1375 int nr = to_sensor_dev_attr_2(devattr)->index; 1400 int err, nr = to_sensor_dev_attr_2(devattr)->index;
1376 long val = simple_strtol(buf, NULL, 10) / 1000; 1401 long val;
1402
1403 err = strict_strtol(buf, 10, &val);
1404 if (err)
1405 return err;
1406
1407 val /= 1000;
1377 val = SENSORS_LIMIT(val, 0, 255); 1408 val = SENSORS_LIMIT(val, 0, 255);
1378 1409
1379 mutex_lock(&data->update_lock); 1410 mutex_lock(&data->update_lock);
@@ -1427,8 +1458,12 @@ static ssize_t store_temp_beep(struct device *dev, struct device_attribute
1427 *devattr, const char *buf, size_t count) 1458 *devattr, const char *buf, size_t count)
1428{ 1459{
1429 struct f71882fg_data *data = dev_get_drvdata(dev); 1460 struct f71882fg_data *data = dev_get_drvdata(dev);
1430 int nr = to_sensor_dev_attr_2(devattr)->index; 1461 int err, nr = to_sensor_dev_attr_2(devattr)->index;
1431 unsigned long val = simple_strtoul(buf, NULL, 10); 1462 unsigned long val;
1463
1464 err = strict_strtoul(buf, 10, &val);
1465 if (err)
1466 return err;
1432 1467
1433 mutex_lock(&data->update_lock); 1468 mutex_lock(&data->update_lock);
1434 data->temp_beep = f71882fg_read8(data, F71882FG_REG_TEMP_BEEP); 1469 data->temp_beep = f71882fg_read8(data, F71882FG_REG_TEMP_BEEP);
@@ -1490,8 +1525,13 @@ static ssize_t store_pwm(struct device *dev,
1490 size_t count) 1525 size_t count)
1491{ 1526{
1492 struct f71882fg_data *data = dev_get_drvdata(dev); 1527 struct f71882fg_data *data = dev_get_drvdata(dev);
1493 int nr = to_sensor_dev_attr_2(devattr)->index; 1528 int err, nr = to_sensor_dev_attr_2(devattr)->index;
1494 long val = simple_strtol(buf, NULL, 10); 1529 long val;
1530
1531 err = strict_strtol(buf, 10, &val);
1532 if (err)
1533 return err;
1534
1495 val = SENSORS_LIMIT(val, 0, 255); 1535 val = SENSORS_LIMIT(val, 0, 255);
1496 1536
1497 mutex_lock(&data->update_lock); 1537 mutex_lock(&data->update_lock);
@@ -1551,8 +1591,12 @@ static ssize_t store_pwm_enable(struct device *dev, struct device_attribute
1551 *devattr, const char *buf, size_t count) 1591 *devattr, const char *buf, size_t count)
1552{ 1592{
1553 struct f71882fg_data *data = dev_get_drvdata(dev); 1593 struct f71882fg_data *data = dev_get_drvdata(dev);
1554 int nr = to_sensor_dev_attr_2(devattr)->index; 1594 int err, nr = to_sensor_dev_attr_2(devattr)->index;
1555 long val = simple_strtol(buf, NULL, 10); 1595 long val;
1596
1597 err = strict_strtol(buf, 10, &val);
1598 if (err)
1599 return err;
1556 1600
1557 /* Special case for F8000 pwm channel 3 which only does auto mode */ 1601 /* Special case for F8000 pwm channel 3 which only does auto mode */
1558 if (data->type == f8000 && nr == 2 && val != 2) 1602 if (data->type == f8000 && nr == 2 && val != 2)
@@ -1626,9 +1670,14 @@ static ssize_t store_pwm_auto_point_pwm(struct device *dev,
1626 const char *buf, size_t count) 1670 const char *buf, size_t count)
1627{ 1671{
1628 struct f71882fg_data *data = dev_get_drvdata(dev); 1672 struct f71882fg_data *data = dev_get_drvdata(dev);
1629 int pwm = to_sensor_dev_attr_2(devattr)->index; 1673 int err, pwm = to_sensor_dev_attr_2(devattr)->index;
1630 int point = to_sensor_dev_attr_2(devattr)->nr; 1674 int point = to_sensor_dev_attr_2(devattr)->nr;
1631 long val = simple_strtol(buf, NULL, 10); 1675 long val;
1676
1677 err = strict_strtol(buf, 10, &val);
1678 if (err)
1679 return err;
1680
1632 val = SENSORS_LIMIT(val, 0, 255); 1681 val = SENSORS_LIMIT(val, 0, 255);
1633 1682
1634 mutex_lock(&data->update_lock); 1683 mutex_lock(&data->update_lock);
@@ -1674,10 +1723,16 @@ static ssize_t store_pwm_auto_point_temp_hyst(struct device *dev,
1674 const char *buf, size_t count) 1723 const char *buf, size_t count)
1675{ 1724{
1676 struct f71882fg_data *data = dev_get_drvdata(dev); 1725 struct f71882fg_data *data = dev_get_drvdata(dev);
1677 int nr = to_sensor_dev_attr_2(devattr)->index; 1726 int err, nr = to_sensor_dev_attr_2(devattr)->index;
1678 int point = to_sensor_dev_attr_2(devattr)->nr; 1727 int point = to_sensor_dev_attr_2(devattr)->nr;
1679 long val = simple_strtol(buf, NULL, 10) / 1000;
1680 u8 reg; 1728 u8 reg;
1729 long val;
1730
1731 err = strict_strtol(buf, 10, &val);
1732 if (err)
1733 return err;
1734
1735 val /= 1000;
1681 1736
1682 mutex_lock(&data->update_lock); 1737 mutex_lock(&data->update_lock);
1683 data->pwm_auto_point_temp[nr][point] = 1738 data->pwm_auto_point_temp[nr][point] =
@@ -1716,8 +1771,12 @@ static ssize_t store_pwm_interpolate(struct device *dev,
1716 const char *buf, size_t count) 1771 const char *buf, size_t count)
1717{ 1772{
1718 struct f71882fg_data *data = dev_get_drvdata(dev); 1773 struct f71882fg_data *data = dev_get_drvdata(dev);
1719 int nr = to_sensor_dev_attr_2(devattr)->index; 1774 int err, nr = to_sensor_dev_attr_2(devattr)->index;
1720 unsigned long val = simple_strtoul(buf, NULL, 10); 1775 unsigned long val;
1776
1777 err = strict_strtoul(buf, 10, &val);
1778 if (err)
1779 return err;
1721 1780
1722 mutex_lock(&data->update_lock); 1781 mutex_lock(&data->update_lock);
1723 data->pwm_auto_point_mapping[nr] = 1782 data->pwm_auto_point_mapping[nr] =
@@ -1752,8 +1811,12 @@ static ssize_t store_pwm_auto_point_channel(struct device *dev,
1752 const char *buf, size_t count) 1811 const char *buf, size_t count)
1753{ 1812{
1754 struct f71882fg_data *data = dev_get_drvdata(dev); 1813 struct f71882fg_data *data = dev_get_drvdata(dev);
1755 int nr = to_sensor_dev_attr_2(devattr)->index; 1814 int err, nr = to_sensor_dev_attr_2(devattr)->index;
1756 long val = simple_strtol(buf, NULL, 10); 1815 long val;
1816
1817 err = strict_strtol(buf, 10, &val);
1818 if (err)
1819 return err;
1757 1820
1758 switch (val) { 1821 switch (val) {
1759 case 1: 1822 case 1:
@@ -1798,9 +1861,15 @@ static ssize_t store_pwm_auto_point_temp(struct device *dev,
1798 const char *buf, size_t count) 1861 const char *buf, size_t count)
1799{ 1862{
1800 struct f71882fg_data *data = dev_get_drvdata(dev); 1863 struct f71882fg_data *data = dev_get_drvdata(dev);
1801 int pwm = to_sensor_dev_attr_2(devattr)->index; 1864 int err, pwm = to_sensor_dev_attr_2(devattr)->index;
1802 int point = to_sensor_dev_attr_2(devattr)->nr; 1865 int point = to_sensor_dev_attr_2(devattr)->nr;
1803 long val = simple_strtol(buf, NULL, 10) / 1000; 1866 long val;
1867
1868 err = strict_strtol(buf, 10, &val);
1869 if (err)
1870 return err;
1871
1872 val /= 1000;
1804 1873
1805 if (data->type == f71889fg) 1874 if (data->type == f71889fg)
1806 val = SENSORS_LIMIT(val, -128, 127); 1875 val = SENSORS_LIMIT(val, -128, 127);
@@ -2109,6 +2178,13 @@ static int __init f71882fg_find(int sioaddr, unsigned short *address,
2109 int err = -ENODEV; 2178 int err = -ENODEV;
2110 u16 devid; 2179 u16 devid;
2111 2180
2181 /* Don't step on other drivers' I/O space by accident */
2182 if (!request_region(sioaddr, 2, DRVNAME)) {
2183 printk(KERN_ERR DRVNAME ": I/O address 0x%04x already in use\n",
2184 (int)sioaddr);
2185 return -EBUSY;
2186 }
2187
2112 superio_enter(sioaddr); 2188 superio_enter(sioaddr);
2113 2189
2114 devid = superio_inw(sioaddr, SIO_REG_MANID); 2190 devid = superio_inw(sioaddr, SIO_REG_MANID);
@@ -2151,8 +2227,7 @@ static int __init f71882fg_find(int sioaddr, unsigned short *address,
2151 } 2227 }
2152 2228
2153 *address = superio_inw(sioaddr, SIO_REG_ADDR); 2229 *address = superio_inw(sioaddr, SIO_REG_ADDR);
2154 if (*address == 0) 2230 if (*address == 0) {
2155 {
2156 printk(KERN_WARNING DRVNAME ": Base address not set\n"); 2231 printk(KERN_WARNING DRVNAME ": Base address not set\n");
2157 goto exit; 2232 goto exit;
2158 } 2233 }
@@ -2164,6 +2239,7 @@ static int __init f71882fg_find(int sioaddr, unsigned short *address,
2164 (int)superio_inb(sioaddr, SIO_REG_DEVREV)); 2239 (int)superio_inb(sioaddr, SIO_REG_DEVREV));
2165exit: 2240exit:
2166 superio_exit(sioaddr); 2241 superio_exit(sioaddr);
2242 release_region(sioaddr, 2);
2167 return err; 2243 return err;
2168} 2244}
2169 2245
diff --git a/drivers/hwmon/lm63.c b/drivers/hwmon/lm63.c
index bf81aff7051d..776aeb3019d2 100644
--- a/drivers/hwmon/lm63.c
+++ b/drivers/hwmon/lm63.c
@@ -53,7 +53,7 @@
53 * Address is fully defined internally and cannot be changed. 53 * Address is fully defined internally and cannot be changed.
54 */ 54 */
55 55
56static const unsigned short normal_i2c[] = { 0x4c, I2C_CLIENT_END }; 56static const unsigned short normal_i2c[] = { 0x18, 0x4c, 0x4e, I2C_CLIENT_END };
57 57
58/* 58/*
59 * The LM63 registers 59 * The LM63 registers
@@ -131,12 +131,15 @@ static struct lm63_data *lm63_update_device(struct device *dev);
131static int lm63_detect(struct i2c_client *client, struct i2c_board_info *info); 131static int lm63_detect(struct i2c_client *client, struct i2c_board_info *info);
132static void lm63_init_client(struct i2c_client *client); 132static void lm63_init_client(struct i2c_client *client);
133 133
134enum chips { lm63, lm64 };
135
134/* 136/*
135 * Driver data (common to all clients) 137 * Driver data (common to all clients)
136 */ 138 */
137 139
138static const struct i2c_device_id lm63_id[] = { 140static const struct i2c_device_id lm63_id[] = {
139 { "lm63", 0 }, 141 { "lm63", lm63 },
142 { "lm64", lm64 },
140 { } 143 { }
141}; 144};
142MODULE_DEVICE_TABLE(i2c, lm63_id); 145MODULE_DEVICE_TABLE(i2c, lm63_id);
@@ -422,6 +425,7 @@ static int lm63_detect(struct i2c_client *new_client,
422 struct i2c_adapter *adapter = new_client->adapter; 425 struct i2c_adapter *adapter = new_client->adapter;
423 u8 man_id, chip_id, reg_config1, reg_config2; 426 u8 man_id, chip_id, reg_config1, reg_config2;
424 u8 reg_alert_status, reg_alert_mask; 427 u8 reg_alert_status, reg_alert_mask;
428 int address = new_client->addr;
425 429
426 if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) 430 if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA))
427 return -ENODEV; 431 return -ENODEV;
@@ -439,7 +443,6 @@ static int lm63_detect(struct i2c_client *new_client,
439 LM63_REG_ALERT_MASK); 443 LM63_REG_ALERT_MASK);
440 444
441 if (man_id != 0x01 /* National Semiconductor */ 445 if (man_id != 0x01 /* National Semiconductor */
442 || chip_id != 0x41 /* LM63 */
443 || (reg_config1 & 0x18) != 0x00 446 || (reg_config1 & 0x18) != 0x00
444 || (reg_config2 & 0xF8) != 0x00 447 || (reg_config2 & 0xF8) != 0x00
445 || (reg_alert_status & 0x20) != 0x00 448 || (reg_alert_status & 0x20) != 0x00
@@ -450,7 +453,12 @@ static int lm63_detect(struct i2c_client *new_client,
450 return -ENODEV; 453 return -ENODEV;
451 } 454 }
452 455
453 strlcpy(info->type, "lm63", I2C_NAME_SIZE); 456 if (chip_id == 0x41 && address == 0x4c)
457 strlcpy(info->type, "lm63", I2C_NAME_SIZE);
458 else if (chip_id == 0x51 && (address == 0x18 || address == 0x4e))
459 strlcpy(info->type, "lm64", I2C_NAME_SIZE);
460 else
461 return -ENODEV;
454 462
455 return 0; 463 return 0;
456} 464}
diff --git a/drivers/hwmon/lm75.c b/drivers/hwmon/lm75.c
index 8ae2cfe2d827..56463428a419 100644
--- a/drivers/hwmon/lm75.c
+++ b/drivers/hwmon/lm75.c
@@ -46,6 +46,7 @@ enum lm75_type { /* keep sorted in alphabetical order */
46 tcn75, 46 tcn75,
47 tmp100, 47 tmp100,
48 tmp101, 48 tmp101,
49 tmp105,
49 tmp175, 50 tmp175,
50 tmp275, 51 tmp275,
51 tmp75, 52 tmp75,
@@ -220,6 +221,7 @@ static const struct i2c_device_id lm75_ids[] = {
220 { "tcn75", tcn75, }, 221 { "tcn75", tcn75, },
221 { "tmp100", tmp100, }, 222 { "tmp100", tmp100, },
222 { "tmp101", tmp101, }, 223 { "tmp101", tmp101, },
224 { "tmp105", tmp105, },
223 { "tmp175", tmp175, }, 225 { "tmp175", tmp175, },
224 { "tmp275", tmp275, }, 226 { "tmp275", tmp275, },
225 { "tmp75", tmp75, }, 227 { "tmp75", tmp75, },
diff --git a/drivers/hwmon/lm90.c b/drivers/hwmon/lm90.c
index 7cc2708871ab..760ef72eea56 100644
--- a/drivers/hwmon/lm90.c
+++ b/drivers/hwmon/lm90.c
@@ -982,7 +982,8 @@ static struct lm90_data *lm90_update_device(struct device *dev)
982 982
983 mutex_lock(&data->update_lock); 983 mutex_lock(&data->update_lock);
984 984
985 if (time_after(jiffies, data->last_updated + HZ * 2) || !data->valid) { 985 if (time_after(jiffies, data->last_updated + HZ / 2 + HZ / 10)
986 || !data->valid) {
986 u8 h, l; 987 u8 h, l;
987 988
988 dev_dbg(&client->dev, "Updating lm90 data.\n"); 989 dev_dbg(&client->dev, "Updating lm90 data.\n");
diff --git a/drivers/hwmon/ltc4245.c b/drivers/hwmon/ltc4245.c
index 65c232a9d0c5..21d201befc2c 100644
--- a/drivers/hwmon/ltc4245.c
+++ b/drivers/hwmon/ltc4245.c
@@ -45,9 +45,7 @@ enum ltc4245_cmd {
45 LTC4245_VEEIN = 0x19, 45 LTC4245_VEEIN = 0x19,
46 LTC4245_VEESENSE = 0x1a, 46 LTC4245_VEESENSE = 0x1a,
47 LTC4245_VEEOUT = 0x1b, 47 LTC4245_VEEOUT = 0x1b,
48 LTC4245_GPIOADC1 = 0x1c, 48 LTC4245_GPIOADC = 0x1c,
49 LTC4245_GPIOADC2 = 0x1d,
50 LTC4245_GPIOADC3 = 0x1e,
51}; 49};
52 50
53struct ltc4245_data { 51struct ltc4245_data {
@@ -61,7 +59,7 @@ struct ltc4245_data {
61 u8 cregs[0x08]; 59 u8 cregs[0x08];
62 60
63 /* Voltage registers */ 61 /* Voltage registers */
64 u8 vregs[0x0f]; 62 u8 vregs[0x0d];
65}; 63};
66 64
67static struct ltc4245_data *ltc4245_update_device(struct device *dev) 65static struct ltc4245_data *ltc4245_update_device(struct device *dev)
@@ -86,7 +84,7 @@ static struct ltc4245_data *ltc4245_update_device(struct device *dev)
86 data->cregs[i] = val; 84 data->cregs[i] = val;
87 } 85 }
88 86
89 /* Read voltage registers -- 0x10 to 0x1f */ 87 /* Read voltage registers -- 0x10 to 0x1c */
90 for (i = 0; i < ARRAY_SIZE(data->vregs); i++) { 88 for (i = 0; i < ARRAY_SIZE(data->vregs); i++) {
91 val = i2c_smbus_read_byte_data(client, i+0x10); 89 val = i2c_smbus_read_byte_data(client, i+0x10);
92 if (unlikely(val < 0)) 90 if (unlikely(val < 0))
@@ -128,9 +126,7 @@ static int ltc4245_get_voltage(struct device *dev, u8 reg)
128 case LTC4245_VEEOUT: 126 case LTC4245_VEEOUT:
129 voltage = regval * -55; 127 voltage = regval * -55;
130 break; 128 break;
131 case LTC4245_GPIOADC1: 129 case LTC4245_GPIOADC:
132 case LTC4245_GPIOADC2:
133 case LTC4245_GPIOADC3:
134 voltage = regval * 10; 130 voltage = regval * 10;
135 break; 131 break;
136 default: 132 default:
@@ -297,9 +293,7 @@ LTC4245_ALARM(in7_min_alarm, (1 << 2), LTC4245_FAULT2);
297LTC4245_ALARM(in8_min_alarm, (1 << 3), LTC4245_FAULT2); 293LTC4245_ALARM(in8_min_alarm, (1 << 3), LTC4245_FAULT2);
298 294
299/* GPIO voltages */ 295/* GPIO voltages */
300LTC4245_VOLTAGE(in9_input, LTC4245_GPIOADC1); 296LTC4245_VOLTAGE(in9_input, LTC4245_GPIOADC);
301LTC4245_VOLTAGE(in10_input, LTC4245_GPIOADC2);
302LTC4245_VOLTAGE(in11_input, LTC4245_GPIOADC3);
303 297
304/* Power Consumption (virtual) */ 298/* Power Consumption (virtual) */
305LTC4245_POWER(power1_input, LTC4245_12VSENSE); 299LTC4245_POWER(power1_input, LTC4245_12VSENSE);
@@ -342,8 +336,6 @@ static struct attribute *ltc4245_attributes[] = {
342 &sensor_dev_attr_in8_min_alarm.dev_attr.attr, 336 &sensor_dev_attr_in8_min_alarm.dev_attr.attr,
343 337
344 &sensor_dev_attr_in9_input.dev_attr.attr, 338 &sensor_dev_attr_in9_input.dev_attr.attr,
345 &sensor_dev_attr_in10_input.dev_attr.attr,
346 &sensor_dev_attr_in11_input.dev_attr.attr,
347 339
348 &sensor_dev_attr_power1_input.dev_attr.attr, 340 &sensor_dev_attr_power1_input.dev_attr.attr,
349 &sensor_dev_attr_power2_input.dev_attr.attr, 341 &sensor_dev_attr_power2_input.dev_attr.attr,
diff --git a/drivers/hwmon/tmp102.c b/drivers/hwmon/tmp102.c
new file mode 100644
index 000000000000..8013895a1faf
--- /dev/null
+++ b/drivers/hwmon/tmp102.c
@@ -0,0 +1,321 @@
1/* Texas Instruments TMP102 SMBus temperature sensor driver
2 *
3 * Copyright (C) 2010 Steven King <sfking@fdwdc.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA
18 */
19
20#include <linux/module.h>
21#include <linux/init.h>
22#include <linux/slab.h>
23#include <linux/i2c.h>
24#include <linux/hwmon.h>
25#include <linux/hwmon-sysfs.h>
26#include <linux/err.h>
27#include <linux/mutex.h>
28#include <linux/device.h>
29
30#define DRIVER_NAME "tmp102"
31
32#define TMP102_TEMP_REG 0x00
33#define TMP102_CONF_REG 0x01
34/* note: these bit definitions are byte swapped */
35#define TMP102_CONF_SD 0x0100
36#define TMP102_CONF_TM 0x0200
37#define TMP102_CONF_POL 0x0400
38#define TMP102_CONF_F0 0x0800
39#define TMP102_CONF_F1 0x1000
40#define TMP102_CONF_R0 0x2000
41#define TMP102_CONF_R1 0x4000
42#define TMP102_CONF_OS 0x8000
43#define TMP102_CONF_EM 0x0010
44#define TMP102_CONF_AL 0x0020
45#define TMP102_CONF_CR0 0x0040
46#define TMP102_CONF_CR1 0x0080
47#define TMP102_TLOW_REG 0x02
48#define TMP102_THIGH_REG 0x03
49
50struct tmp102 {
51 struct device *hwmon_dev;
52 struct mutex lock;
53 u16 config_orig;
54 unsigned long last_update;
55 int temp[3];
56};
57
58/* SMBus specifies low byte first, but the TMP102 returns high byte first,
59 * so we have to swab16 the values */
60static inline int tmp102_read_reg(struct i2c_client *client, u8 reg)
61{
62 int result = i2c_smbus_read_word_data(client, reg);
63 return result < 0 ? result : swab16(result);
64}
65
66static inline int tmp102_write_reg(struct i2c_client *client, u8 reg, u16 val)
67{
68 return i2c_smbus_write_word_data(client, reg, swab16(val));
69}
70
71/* convert left adjusted 13-bit TMP102 register value to milliCelsius */
72static inline int tmp102_reg_to_mC(s16 val)
73{
74 return ((val & ~0x01) * 1000) / 128;
75}
76
77/* convert milliCelsius to left adjusted 13-bit TMP102 register value */
78static inline u16 tmp102_mC_to_reg(int val)
79{
80 return (val * 128) / 1000;
81}
82
83static const u8 tmp102_reg[] = {
84 TMP102_TEMP_REG,
85 TMP102_TLOW_REG,
86 TMP102_THIGH_REG,
87};
88
89static struct tmp102 *tmp102_update_device(struct i2c_client *client)
90{
91 struct tmp102 *tmp102 = i2c_get_clientdata(client);
92
93 mutex_lock(&tmp102->lock);
94 if (time_after(jiffies, tmp102->last_update + HZ / 3)) {
95 int i;
96 for (i = 0; i < ARRAY_SIZE(tmp102->temp); ++i) {
97 int status = tmp102_read_reg(client, tmp102_reg[i]);
98 if (status > -1)
99 tmp102->temp[i] = tmp102_reg_to_mC(status);
100 }
101 tmp102->last_update = jiffies;
102 }
103 mutex_unlock(&tmp102->lock);
104 return tmp102;
105}
106
107static ssize_t tmp102_show_temp(struct device *dev,
108 struct device_attribute *attr,
109 char *buf)
110{
111 struct sensor_device_attribute *sda = to_sensor_dev_attr(attr);
112 struct tmp102 *tmp102 = tmp102_update_device(to_i2c_client(dev));
113
114 return sprintf(buf, "%d\n", tmp102->temp[sda->index]);
115}
116
117static ssize_t tmp102_set_temp(struct device *dev,
118 struct device_attribute *attr,
119 const char *buf, size_t count)
120{
121 struct sensor_device_attribute *sda = to_sensor_dev_attr(attr);
122 struct i2c_client *client = to_i2c_client(dev);
123 struct tmp102 *tmp102 = i2c_get_clientdata(client);
124 long val;
125 int status;
126
127 if (strict_strtol(buf, 10, &val) < 0)
128 return -EINVAL;
129 val = SENSORS_LIMIT(val, -256000, 255000);
130
131 mutex_lock(&tmp102->lock);
132 tmp102->temp[sda->index] = val;
133 status = tmp102_write_reg(client, tmp102_reg[sda->index],
134 tmp102_mC_to_reg(val));
135 mutex_unlock(&tmp102->lock);
136 return status ? : count;
137}
138
139static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, tmp102_show_temp, NULL , 0);
140
141static SENSOR_DEVICE_ATTR(temp1_max_hyst, S_IWUSR | S_IRUGO, tmp102_show_temp,
142 tmp102_set_temp, 1);
143
144static SENSOR_DEVICE_ATTR(temp1_max, S_IWUSR | S_IRUGO, tmp102_show_temp,
145 tmp102_set_temp, 2);
146
147static struct attribute *tmp102_attributes[] = {
148 &sensor_dev_attr_temp1_input.dev_attr.attr,
149 &sensor_dev_attr_temp1_max_hyst.dev_attr.attr,
150 &sensor_dev_attr_temp1_max.dev_attr.attr,
151 NULL
152};
153
154static const struct attribute_group tmp102_attr_group = {
155 .attrs = tmp102_attributes,
156};
157
158#define TMP102_CONFIG (TMP102_CONF_TM | TMP102_CONF_EM | TMP102_CONF_CR1)
159#define TMP102_CONFIG_RD_ONLY (TMP102_CONF_R0 | TMP102_CONF_R1 | TMP102_CONF_AL)
160
161static int __devinit tmp102_probe(struct i2c_client *client,
162 const struct i2c_device_id *id)
163{
164 struct tmp102 *tmp102;
165 int status;
166
167 if (!i2c_check_functionality(client->adapter,
168 I2C_FUNC_SMBUS_WORD_DATA)) {
169 dev_err(&client->dev, "adapter doesnt support SMBus word "
170 "transactions\n");
171 return -ENODEV;
172 }
173
174 tmp102 = kzalloc(sizeof(*tmp102), GFP_KERNEL);
175 if (!tmp102) {
176 dev_dbg(&client->dev, "kzalloc failed\n");
177 return -ENOMEM;
178 }
179 i2c_set_clientdata(client, tmp102);
180
181 status = tmp102_read_reg(client, TMP102_CONF_REG);
182 if (status < 0) {
183 dev_err(&client->dev, "error reading config register\n");
184 goto fail_free;
185 }
186 tmp102->config_orig = status;
187 status = tmp102_write_reg(client, TMP102_CONF_REG, TMP102_CONFIG);
188 if (status < 0) {
189 dev_err(&client->dev, "error writing config register\n");
190 goto fail_restore_config;
191 }
192 status = tmp102_read_reg(client, TMP102_CONF_REG);
193 if (status < 0) {
194 dev_err(&client->dev, "error reading config register\n");
195 goto fail_restore_config;
196 }
197 status &= ~TMP102_CONFIG_RD_ONLY;
198 if (status != TMP102_CONFIG) {
199 dev_err(&client->dev, "config settings did not stick\n");
200 status = -ENODEV;
201 goto fail_restore_config;
202 }
203 tmp102->last_update = jiffies - HZ;
204 mutex_init(&tmp102->lock);
205
206 status = sysfs_create_group(&client->dev.kobj, &tmp102_attr_group);
207 if (status) {
208 dev_dbg(&client->dev, "could not create sysfs files\n");
209 goto fail_restore_config;
210 }
211 tmp102->hwmon_dev = hwmon_device_register(&client->dev);
212 if (IS_ERR(tmp102->hwmon_dev)) {
213 dev_dbg(&client->dev, "unable to register hwmon device\n");
214 status = PTR_ERR(tmp102->hwmon_dev);
215 goto fail_remove_sysfs;
216 }
217
218 dev_info(&client->dev, "initialized\n");
219
220 return 0;
221
222fail_remove_sysfs:
223 sysfs_remove_group(&client->dev.kobj, &tmp102_attr_group);
224fail_restore_config:
225 tmp102_write_reg(client, TMP102_CONF_REG, tmp102->config_orig);
226fail_free:
227 i2c_set_clientdata(client, NULL);
228 kfree(tmp102);
229
230 return status;
231}
232
233static int __devexit tmp102_remove(struct i2c_client *client)
234{
235 struct tmp102 *tmp102 = i2c_get_clientdata(client);
236
237 hwmon_device_unregister(tmp102->hwmon_dev);
238 sysfs_remove_group(&client->dev.kobj, &tmp102_attr_group);
239
240 /* Stop monitoring if device was stopped originally */
241 if (tmp102->config_orig & TMP102_CONF_SD) {
242 int config;
243
244 config = tmp102_read_reg(client, TMP102_CONF_REG);
245 if (config >= 0)
246 tmp102_write_reg(client, TMP102_CONF_REG,
247 config | TMP102_CONF_SD);
248 }
249
250 i2c_set_clientdata(client, NULL);
251 kfree(tmp102);
252
253 return 0;
254}
255
256#ifdef CONFIG_PM
257static int tmp102_suspend(struct device *dev)
258{
259 struct i2c_client *client = to_i2c_client(dev);
260 int config;
261
262 config = tmp102_read_reg(client, TMP102_CONF_REG);
263 if (config < 0)
264 return config;
265
266 config |= TMP102_CONF_SD;
267 return tmp102_write_reg(client, TMP102_CONF_REG, config);
268}
269
270static int tmp102_resume(struct device *dev)
271{
272 struct i2c_client *client = to_i2c_client(dev);
273 int config;
274
275 config = tmp102_read_reg(client, TMP102_CONF_REG);
276 if (config < 0)
277 return config;
278
279 config &= ~TMP102_CONF_SD;
280 return tmp102_write_reg(client, TMP102_CONF_REG, config);
281}
282
283static const struct dev_pm_ops tmp102_dev_pm_ops = {
284 .suspend = tmp102_suspend,
285 .resume = tmp102_resume,
286};
287
288#define TMP102_DEV_PM_OPS (&tmp102_dev_pm_ops)
289#else
290#define TMP102_DEV_PM_OPS NULL
291#endif /* CONFIG_PM */
292
293static const struct i2c_device_id tmp102_id[] = {
294 { "tmp102", 0 },
295 { }
296};
297MODULE_DEVICE_TABLE(i2c, tmp102_id);
298
299static struct i2c_driver tmp102_driver = {
300 .driver.name = DRIVER_NAME,
301 .driver.pm = TMP102_DEV_PM_OPS,
302 .probe = tmp102_probe,
303 .remove = __devexit_p(tmp102_remove),
304 .id_table = tmp102_id,
305};
306
307static int __init tmp102_init(void)
308{
309 return i2c_add_driver(&tmp102_driver);
310}
311module_init(tmp102_init);
312
313static void __exit tmp102_exit(void)
314{
315 i2c_del_driver(&tmp102_driver);
316}
317module_exit(tmp102_exit);
318
319MODULE_AUTHOR("Steven King <sfking@fdwdc.com>");
320MODULE_DESCRIPTION("Texas Instruments TMP102 temperature sensor driver");
321MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/tmp401.c b/drivers/hwmon/tmp401.c
index d14a1af9f550..ad8d535235c5 100644
--- a/drivers/hwmon/tmp401.c
+++ b/drivers/hwmon/tmp401.c
@@ -92,17 +92,6 @@ static const u8 TMP411_TEMP_HIGHEST_LSB[2] = { 0x33, 0x37 };
92#define TMP411_DEVICE_ID 0x12 92#define TMP411_DEVICE_ID 0x12
93 93
94/* 94/*
95 * Functions declarations
96 */
97
98static int tmp401_probe(struct i2c_client *client,
99 const struct i2c_device_id *id);
100static int tmp401_detect(struct i2c_client *client,
101 struct i2c_board_info *info);
102static int tmp401_remove(struct i2c_client *client);
103static struct tmp401_data *tmp401_update_device(struct device *dev);
104
105/*
106 * Driver data (common to all clients) 95 * Driver data (common to all clients)
107 */ 96 */
108 97
@@ -113,18 +102,6 @@ static const struct i2c_device_id tmp401_id[] = {
113}; 102};
114MODULE_DEVICE_TABLE(i2c, tmp401_id); 103MODULE_DEVICE_TABLE(i2c, tmp401_id);
115 104
116static struct i2c_driver tmp401_driver = {
117 .class = I2C_CLASS_HWMON,
118 .driver = {
119 .name = "tmp401",
120 },
121 .probe = tmp401_probe,
122 .remove = tmp401_remove,
123 .id_table = tmp401_id,
124 .detect = tmp401_detect,
125 .address_list = normal_i2c,
126};
127
128/* 105/*
129 * Client data (each client gets its own) 106 * Client data (each client gets its own)
130 */ 107 */
@@ -194,6 +171,71 @@ static u8 tmp401_crit_temp_to_register(long temp, u8 config)
194 return (temp + 500) / 1000; 171 return (temp + 500) / 1000;
195} 172}
196 173
174static struct tmp401_data *tmp401_update_device_reg16(
175 struct i2c_client *client, struct tmp401_data *data)
176{
177 int i;
178
179 for (i = 0; i < 2; i++) {
180 /*
181 * High byte must be read first immediately followed
182 * by the low byte
183 */
184 data->temp[i] = i2c_smbus_read_byte_data(client,
185 TMP401_TEMP_MSB[i]) << 8;
186 data->temp[i] |= i2c_smbus_read_byte_data(client,
187 TMP401_TEMP_LSB[i]);
188 data->temp_low[i] = i2c_smbus_read_byte_data(client,
189 TMP401_TEMP_LOW_LIMIT_MSB_READ[i]) << 8;
190 data->temp_low[i] |= i2c_smbus_read_byte_data(client,
191 TMP401_TEMP_LOW_LIMIT_LSB[i]);
192 data->temp_high[i] = i2c_smbus_read_byte_data(client,
193 TMP401_TEMP_HIGH_LIMIT_MSB_READ[i]) << 8;
194 data->temp_high[i] |= i2c_smbus_read_byte_data(client,
195 TMP401_TEMP_HIGH_LIMIT_LSB[i]);
196 data->temp_crit[i] = i2c_smbus_read_byte_data(client,
197 TMP401_TEMP_CRIT_LIMIT[i]);
198
199 if (data->kind == tmp411) {
200 data->temp_lowest[i] = i2c_smbus_read_byte_data(client,
201 TMP411_TEMP_LOWEST_MSB[i]) << 8;
202 data->temp_lowest[i] |= i2c_smbus_read_byte_data(
203 client, TMP411_TEMP_LOWEST_LSB[i]);
204
205 data->temp_highest[i] = i2c_smbus_read_byte_data(
206 client, TMP411_TEMP_HIGHEST_MSB[i]) << 8;
207 data->temp_highest[i] |= i2c_smbus_read_byte_data(
208 client, TMP411_TEMP_HIGHEST_LSB[i]);
209 }
210 }
211 return data;
212}
213
214static struct tmp401_data *tmp401_update_device(struct device *dev)
215{
216 struct i2c_client *client = to_i2c_client(dev);
217 struct tmp401_data *data = i2c_get_clientdata(client);
218
219 mutex_lock(&data->update_lock);
220
221 if (time_after(jiffies, data->last_updated + HZ) || !data->valid) {
222 data->status = i2c_smbus_read_byte_data(client, TMP401_STATUS);
223 data->config = i2c_smbus_read_byte_data(client,
224 TMP401_CONFIG_READ);
225 tmp401_update_device_reg16(client, data);
226
227 data->temp_crit_hyst = i2c_smbus_read_byte_data(client,
228 TMP401_TEMP_CRIT_HYST);
229
230 data->last_updated = jiffies;
231 data->valid = 1;
232 }
233
234 mutex_unlock(&data->update_lock);
235
236 return data;
237}
238
197static ssize_t show_temp_value(struct device *dev, 239static ssize_t show_temp_value(struct device *dev,
198 struct device_attribute *devattr, char *buf) 240 struct device_attribute *devattr, char *buf)
199{ 241{
@@ -420,30 +462,36 @@ static ssize_t reset_temp_history(struct device *dev,
420} 462}
421 463
422static struct sensor_device_attribute tmp401_attr[] = { 464static struct sensor_device_attribute tmp401_attr[] = {
423 SENSOR_ATTR(temp1_input, 0444, show_temp_value, NULL, 0), 465 SENSOR_ATTR(temp1_input, S_IRUGO, show_temp_value, NULL, 0),
424 SENSOR_ATTR(temp1_min, 0644, show_temp_min, store_temp_min, 0), 466 SENSOR_ATTR(temp1_min, S_IWUSR | S_IRUGO, show_temp_min,
425 SENSOR_ATTR(temp1_max, 0644, show_temp_max, store_temp_max, 0), 467 store_temp_min, 0),
426 SENSOR_ATTR(temp1_crit, 0644, show_temp_crit, store_temp_crit, 0), 468 SENSOR_ATTR(temp1_max, S_IWUSR | S_IRUGO, show_temp_max,
427 SENSOR_ATTR(temp1_crit_hyst, 0644, show_temp_crit_hyst, 469 store_temp_max, 0),
470 SENSOR_ATTR(temp1_crit, S_IWUSR | S_IRUGO, show_temp_crit,
471 store_temp_crit, 0),
472 SENSOR_ATTR(temp1_crit_hyst, S_IWUSR | S_IRUGO, show_temp_crit_hyst,
428 store_temp_crit_hyst, 0), 473 store_temp_crit_hyst, 0),
429 SENSOR_ATTR(temp1_min_alarm, 0444, show_status, NULL, 474 SENSOR_ATTR(temp1_min_alarm, S_IRUGO, show_status, NULL,
430 TMP401_STATUS_LOCAL_LOW), 475 TMP401_STATUS_LOCAL_LOW),
431 SENSOR_ATTR(temp1_max_alarm, 0444, show_status, NULL, 476 SENSOR_ATTR(temp1_max_alarm, S_IRUGO, show_status, NULL,
432 TMP401_STATUS_LOCAL_HIGH), 477 TMP401_STATUS_LOCAL_HIGH),
433 SENSOR_ATTR(temp1_crit_alarm, 0444, show_status, NULL, 478 SENSOR_ATTR(temp1_crit_alarm, S_IRUGO, show_status, NULL,
434 TMP401_STATUS_LOCAL_CRIT), 479 TMP401_STATUS_LOCAL_CRIT),
435 SENSOR_ATTR(temp2_input, 0444, show_temp_value, NULL, 1), 480 SENSOR_ATTR(temp2_input, S_IRUGO, show_temp_value, NULL, 1),
436 SENSOR_ATTR(temp2_min, 0644, show_temp_min, store_temp_min, 1), 481 SENSOR_ATTR(temp2_min, S_IWUSR | S_IRUGO, show_temp_min,
437 SENSOR_ATTR(temp2_max, 0644, show_temp_max, store_temp_max, 1), 482 store_temp_min, 1),
438 SENSOR_ATTR(temp2_crit, 0644, show_temp_crit, store_temp_crit, 1), 483 SENSOR_ATTR(temp2_max, S_IWUSR | S_IRUGO, show_temp_max,
439 SENSOR_ATTR(temp2_crit_hyst, 0444, show_temp_crit_hyst, NULL, 1), 484 store_temp_max, 1),
440 SENSOR_ATTR(temp2_fault, 0444, show_status, NULL, 485 SENSOR_ATTR(temp2_crit, S_IWUSR | S_IRUGO, show_temp_crit,
486 store_temp_crit, 1),
487 SENSOR_ATTR(temp2_crit_hyst, S_IRUGO, show_temp_crit_hyst, NULL, 1),
488 SENSOR_ATTR(temp2_fault, S_IRUGO, show_status, NULL,
441 TMP401_STATUS_REMOTE_OPEN), 489 TMP401_STATUS_REMOTE_OPEN),
442 SENSOR_ATTR(temp2_min_alarm, 0444, show_status, NULL, 490 SENSOR_ATTR(temp2_min_alarm, S_IRUGO, show_status, NULL,
443 TMP401_STATUS_REMOTE_LOW), 491 TMP401_STATUS_REMOTE_LOW),
444 SENSOR_ATTR(temp2_max_alarm, 0444, show_status, NULL, 492 SENSOR_ATTR(temp2_max_alarm, S_IRUGO, show_status, NULL,
445 TMP401_STATUS_REMOTE_HIGH), 493 TMP401_STATUS_REMOTE_HIGH),
446 SENSOR_ATTR(temp2_crit_alarm, 0444, show_status, NULL, 494 SENSOR_ATTR(temp2_crit_alarm, S_IRUGO, show_status, NULL,
447 TMP401_STATUS_REMOTE_CRIT), 495 TMP401_STATUS_REMOTE_CRIT),
448}; 496};
449 497
@@ -455,11 +503,11 @@ static struct sensor_device_attribute tmp401_attr[] = {
455 * and remote channels. 503 * and remote channels.
456 */ 504 */
457static struct sensor_device_attribute tmp411_attr[] = { 505static struct sensor_device_attribute tmp411_attr[] = {
458 SENSOR_ATTR(temp1_highest, 0444, show_temp_highest, NULL, 0), 506 SENSOR_ATTR(temp1_highest, S_IRUGO, show_temp_highest, NULL, 0),
459 SENSOR_ATTR(temp1_lowest, 0444, show_temp_lowest, NULL, 0), 507 SENSOR_ATTR(temp1_lowest, S_IRUGO, show_temp_lowest, NULL, 0),
460 SENSOR_ATTR(temp2_highest, 0444, show_temp_highest, NULL, 1), 508 SENSOR_ATTR(temp2_highest, S_IRUGO, show_temp_highest, NULL, 1),
461 SENSOR_ATTR(temp2_lowest, 0444, show_temp_lowest, NULL, 1), 509 SENSOR_ATTR(temp2_lowest, S_IRUGO, show_temp_lowest, NULL, 1),
462 SENSOR_ATTR(temp_reset_history, 0200, NULL, reset_temp_history, 0), 510 SENSOR_ATTR(temp_reset_history, S_IWUSR, NULL, reset_temp_history, 0),
463}; 511};
464 512
465/* 513/*
@@ -529,6 +577,27 @@ static int tmp401_detect(struct i2c_client *client,
529 return 0; 577 return 0;
530} 578}
531 579
580static int tmp401_remove(struct i2c_client *client)
581{
582 struct tmp401_data *data = i2c_get_clientdata(client);
583 int i;
584
585 if (data->hwmon_dev)
586 hwmon_device_unregister(data->hwmon_dev);
587
588 for (i = 0; i < ARRAY_SIZE(tmp401_attr); i++)
589 device_remove_file(&client->dev, &tmp401_attr[i].dev_attr);
590
591 if (data->kind == tmp411) {
592 for (i = 0; i < ARRAY_SIZE(tmp411_attr); i++)
593 device_remove_file(&client->dev,
594 &tmp411_attr[i].dev_attr);
595 }
596
597 kfree(data);
598 return 0;
599}
600
532static int tmp401_probe(struct i2c_client *client, 601static int tmp401_probe(struct i2c_client *client,
533 const struct i2c_device_id *id) 602 const struct i2c_device_id *id)
534{ 603{
@@ -581,91 +650,17 @@ exit_remove:
581 return err; 650 return err;
582} 651}
583 652
584static int tmp401_remove(struct i2c_client *client) 653static struct i2c_driver tmp401_driver = {
585{ 654 .class = I2C_CLASS_HWMON,
586 struct tmp401_data *data = i2c_get_clientdata(client); 655 .driver = {
587 int i; 656 .name = "tmp401",
588 657 },
589 if (data->hwmon_dev) 658 .probe = tmp401_probe,
590 hwmon_device_unregister(data->hwmon_dev); 659 .remove = tmp401_remove,
591 660 .id_table = tmp401_id,
592 for (i = 0; i < ARRAY_SIZE(tmp401_attr); i++) 661 .detect = tmp401_detect,
593 device_remove_file(&client->dev, &tmp401_attr[i].dev_attr); 662 .address_list = normal_i2c,
594 663};
595 if (data->kind == tmp411) {
596 for (i = 0; i < ARRAY_SIZE(tmp411_attr); i++)
597 device_remove_file(&client->dev,
598 &tmp411_attr[i].dev_attr);
599 }
600
601 kfree(data);
602 return 0;
603}
604
605static struct tmp401_data *tmp401_update_device_reg16(
606 struct i2c_client *client, struct tmp401_data *data)
607{
608 int i;
609
610 for (i = 0; i < 2; i++) {
611 /*
612 * High byte must be read first immediately followed
613 * by the low byte
614 */
615 data->temp[i] = i2c_smbus_read_byte_data(client,
616 TMP401_TEMP_MSB[i]) << 8;
617 data->temp[i] |= i2c_smbus_read_byte_data(client,
618 TMP401_TEMP_LSB[i]);
619 data->temp_low[i] = i2c_smbus_read_byte_data(client,
620 TMP401_TEMP_LOW_LIMIT_MSB_READ[i]) << 8;
621 data->temp_low[i] |= i2c_smbus_read_byte_data(client,
622 TMP401_TEMP_LOW_LIMIT_LSB[i]);
623 data->temp_high[i] = i2c_smbus_read_byte_data(client,
624 TMP401_TEMP_HIGH_LIMIT_MSB_READ[i]) << 8;
625 data->temp_high[i] |= i2c_smbus_read_byte_data(client,
626 TMP401_TEMP_HIGH_LIMIT_LSB[i]);
627 data->temp_crit[i] = i2c_smbus_read_byte_data(client,
628 TMP401_TEMP_CRIT_LIMIT[i]);
629
630 if (data->kind == tmp411) {
631 data->temp_lowest[i] = i2c_smbus_read_byte_data(client,
632 TMP411_TEMP_LOWEST_MSB[i]) << 8;
633 data->temp_lowest[i] |= i2c_smbus_read_byte_data(
634 client, TMP411_TEMP_LOWEST_LSB[i]);
635
636 data->temp_highest[i] = i2c_smbus_read_byte_data(
637 client, TMP411_TEMP_HIGHEST_MSB[i]) << 8;
638 data->temp_highest[i] |= i2c_smbus_read_byte_data(
639 client, TMP411_TEMP_HIGHEST_LSB[i]);
640 }
641 }
642 return data;
643}
644
645static struct tmp401_data *tmp401_update_device(struct device *dev)
646{
647 struct i2c_client *client = to_i2c_client(dev);
648 struct tmp401_data *data = i2c_get_clientdata(client);
649
650 mutex_lock(&data->update_lock);
651
652 if (time_after(jiffies, data->last_updated + HZ) || !data->valid) {
653 data->status = i2c_smbus_read_byte_data(client, TMP401_STATUS);
654 data->config = i2c_smbus_read_byte_data(client,
655 TMP401_CONFIG_READ);
656 tmp401_update_device_reg16(client, data);
657
658 data->temp_crit_hyst = i2c_smbus_read_byte_data(client,
659 TMP401_TEMP_CRIT_HYST);
660
661 data->last_updated = jiffies;
662 data->valid = 1;
663 }
664
665 mutex_unlock(&data->update_lock);
666
667 return data;
668}
669 664
670static int __init tmp401_init(void) 665static int __init tmp401_init(void)
671{ 666{
diff --git a/drivers/ide/gayle.c b/drivers/ide/gayle.c
index b9e517de6a82..3feaa26410be 100644
--- a/drivers/ide/gayle.c
+++ b/drivers/ide/gayle.c
@@ -16,6 +16,7 @@
16#include <linux/init.h> 16#include <linux/init.h>
17#include <linux/zorro.h> 17#include <linux/zorro.h>
18#include <linux/module.h> 18#include <linux/module.h>
19#include <linux/platform_device.h>
19 20
20#include <asm/setup.h> 21#include <asm/setup.h>
21#include <asm/amigahw.h> 22#include <asm/amigahw.h>
@@ -24,15 +25,6 @@
24 25
25 26
26 /* 27 /*
27 * Bases of the IDE interfaces
28 */
29
30#define GAYLE_BASE_4000 0xdd2020 /* A4000/A4000T */
31#define GAYLE_BASE_1200 0xda0000 /* A1200/A600 and E-Matrix 530 */
32
33#define GAYLE_IDEREG_SIZE 0x2000
34
35 /*
36 * Offsets from one of the above bases 28 * Offsets from one of the above bases
37 */ 29 */
38 30
@@ -68,20 +60,20 @@ MODULE_PARM_DESC(doubler, "enable support for IDE doublers");
68 60
69static int gayle_test_irq(ide_hwif_t *hwif) 61static int gayle_test_irq(ide_hwif_t *hwif)
70{ 62{
71 unsigned char ch; 63 unsigned char ch;
72 64
73 ch = z_readb(hwif->io_ports.irq_addr); 65 ch = z_readb(hwif->io_ports.irq_addr);
74 if (!(ch & GAYLE_IRQ_IDE)) 66 if (!(ch & GAYLE_IRQ_IDE))
75 return 0; 67 return 0;
76 return 1; 68 return 1;
77} 69}
78 70
79static void gayle_a1200_clear_irq(ide_drive_t *drive) 71static void gayle_a1200_clear_irq(ide_drive_t *drive)
80{ 72{
81 ide_hwif_t *hwif = drive->hwif; 73 ide_hwif_t *hwif = drive->hwif;
82 74
83 (void)z_readb(hwif->io_ports.status_addr); 75 (void)z_readb(hwif->io_ports.status_addr);
84 z_writeb(0x7c, hwif->io_ports.irq_addr); 76 z_writeb(0x7c, hwif->io_ports.irq_addr);
85} 77}
86 78
87static void __init gayle_setup_ports(struct ide_hw *hw, unsigned long base, 79static void __init gayle_setup_ports(struct ide_hw *hw, unsigned long base,
@@ -122,64 +114,89 @@ static const struct ide_port_info gayle_port_info = {
122 * Probe for a Gayle IDE interface (and optionally for an IDE doubler) 114 * Probe for a Gayle IDE interface (and optionally for an IDE doubler)
123 */ 115 */
124 116
125static int __init gayle_init(void) 117static int __init amiga_gayle_ide_probe(struct platform_device *pdev)
126{ 118{
127 unsigned long phys_base, res_start, res_n; 119 struct resource *res;
128 unsigned long base, ctrlport, irqport; 120 struct gayle_ide_platform_data *pdata;
129 int a4000, i, rc; 121 unsigned long base, ctrlport, irqport;
130 struct ide_hw hw[GAYLE_NUM_HWIFS], *hws[GAYLE_NUM_HWIFS]; 122 unsigned int i;
131 struct ide_port_info d = gayle_port_info; 123 int error;
132 124 struct ide_hw hw[GAYLE_NUM_HWIFS], *hws[GAYLE_NUM_HWIFS];
133 if (!MACH_IS_AMIGA) 125 struct ide_port_info d = gayle_port_info;
134 return -ENODEV; 126 struct ide_host *host;
135 127
136 if ((a4000 = AMIGAHW_PRESENT(A4000_IDE)) || AMIGAHW_PRESENT(A1200_IDE)) 128 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
137 goto found; 129 if (!res)
138 130 return -ENODEV;
139#ifdef CONFIG_ZORRO 131
140 if (zorro_find_device(ZORRO_PROD_MTEC_VIPER_MK_V_E_MATRIX_530_SCSI_IDE, 132 if (!request_mem_region(res->start, resource_size(res), "IDE"))
141 NULL)) 133 return -EBUSY;
142 goto found; 134
143#endif 135 pdata = pdev->dev.platform_data;
144 return -ENODEV; 136 pr_info("ide: Gayle IDE controller (A%u style%s)\n",
145 137 pdata->explicit_ack ? 1200 : 4000,
146found: 138 ide_doubler ? ", IDE doubler" : "");
147 printk(KERN_INFO "ide: Gayle IDE controller (A%d style%s)\n", 139
148 a4000 ? 4000 : 1200, 140 base = (unsigned long)ZTWO_VADDR(pdata->base);
149 ide_doubler ? ", IDE doubler" : ""); 141 ctrlport = 0;
150 142 irqport = (unsigned long)ZTWO_VADDR(pdata->irqport);
151 if (a4000) { 143 if (pdata->explicit_ack)
152 phys_base = GAYLE_BASE_4000; 144 d.port_ops = &gayle_a1200_port_ops;
153 irqport = (unsigned long)ZTWO_VADDR(GAYLE_IRQ_4000); 145 else
154 d.port_ops = &gayle_a4000_port_ops; 146 d.port_ops = &gayle_a4000_port_ops;
155 } else { 147
156 phys_base = GAYLE_BASE_1200; 148 for (i = 0; i < GAYLE_NUM_PROBE_HWIFS; i++, base += GAYLE_NEXT_PORT) {
157 irqport = (unsigned long)ZTWO_VADDR(GAYLE_IRQ_1200); 149 if (GAYLE_HAS_CONTROL_REG)
158 d.port_ops = &gayle_a1200_port_ops; 150 ctrlport = base + GAYLE_CONTROL;
151
152 gayle_setup_ports(&hw[i], base, ctrlport, irqport);
153 hws[i] = &hw[i];
159 } 154 }
160 155
161 res_start = ((unsigned long)phys_base) & ~(GAYLE_NEXT_PORT-1); 156 error = ide_host_add(&d, hws, i, &host);
162 res_n = GAYLE_IDEREG_SIZE; 157 if (error)
158 goto out;
163 159
164 if (!request_mem_region(res_start, res_n, "IDE")) 160 platform_set_drvdata(pdev, host);
165 return -EBUSY; 161 return 0;
166 162
167 for (i = 0; i < GAYLE_NUM_PROBE_HWIFS; i++) { 163out:
168 base = (unsigned long)ZTWO_VADDR(phys_base + i * GAYLE_NEXT_PORT); 164 release_mem_region(res->start, resource_size(res));
169 ctrlport = GAYLE_HAS_CONTROL_REG ? (base + GAYLE_CONTROL) : 0; 165 return error;
166}
167
168static int __exit amiga_gayle_ide_remove(struct platform_device *pdev)
169{
170 struct ide_host *host = platform_get_drvdata(pdev);
171 struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
172
173 ide_host_remove(host);
174 release_mem_region(res->start, resource_size(res));
175 return 0;
176}
170 177
171 gayle_setup_ports(&hw[i], base, ctrlport, irqport); 178static struct platform_driver amiga_gayle_ide_driver = {
179 .remove = __exit_p(amiga_gayle_ide_remove),
180 .driver = {
181 .name = "amiga-gayle-ide",
182 .owner = THIS_MODULE,
183 },
184};
172 185
173 hws[i] = &hw[i]; 186static int __init amiga_gayle_ide_init(void)
174 } 187{
188 return platform_driver_probe(&amiga_gayle_ide_driver,
189 amiga_gayle_ide_probe);
190}
175 191
176 rc = ide_host_add(&d, hws, i, NULL); 192module_init(amiga_gayle_ide_init);
177 if (rc)
178 release_mem_region(res_start, res_n);
179 193
180 return rc; 194static void __exit amiga_gayle_ide_exit(void)
195{
196 platform_driver_unregister(&amiga_gayle_ide_driver);
181} 197}
182 198
183module_init(gayle_init); 199module_exit(amiga_gayle_ide_exit);
184 200
185MODULE_LICENSE("GPL"); 201MODULE_LICENSE("GPL");
202MODULE_ALIAS("platform:amiga-gayle-ide");
diff --git a/drivers/idle/Kconfig b/drivers/idle/Kconfig
index f15e90a453d1..fb5c5186d4aa 100644
--- a/drivers/idle/Kconfig
+++ b/drivers/idle/Kconfig
@@ -1,3 +1,14 @@
1config INTEL_IDLE
2 tristate "Cpuidle Driver for Intel Processors"
3 depends on CPU_IDLE
4 depends on X86
5 depends on CPU_SUP_INTEL
6 depends on EXPERIMENTAL
7 help
8 Enable intel_idle, a cpuidle driver that includes knowledge of
9 native Intel hardware idle features. The acpi_idle driver
10 can be configured at the same time, in order to handle
11 processors intel_idle does not support.
1 12
2menu "Memory power savings" 13menu "Memory power savings"
3depends on X86_64 14depends on X86_64
diff --git a/drivers/idle/Makefile b/drivers/idle/Makefile
index 5f68fc377e21..23d295cf10f2 100644
--- a/drivers/idle/Makefile
+++ b/drivers/idle/Makefile
@@ -1,2 +1,3 @@
1obj-$(CONFIG_I7300_IDLE) += i7300_idle.o 1obj-$(CONFIG_I7300_IDLE) += i7300_idle.o
2obj-$(CONFIG_INTEL_IDLE) += intel_idle.o
2 3
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
new file mode 100755
index 000000000000..54f0fb4cd5d2
--- /dev/null
+++ b/drivers/idle/intel_idle.c
@@ -0,0 +1,461 @@
1/*
2 * intel_idle.c - native hardware idle loop for modern Intel processors
3 *
4 * Copyright (c) 2010, Intel Corporation.
5 * Len Brown <len.brown@intel.com>
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along with
17 * this program; if not, write to the Free Software Foundation, Inc.,
18 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
19 */
20
21/*
22 * intel_idle is a cpuidle driver that loads on specific Intel processors
23 * in lieu of the legacy ACPI processor_idle driver. The intent is to
24 * make Linux more efficient on these processors, as intel_idle knows
25 * more than ACPI, as well as make Linux more immune to ACPI BIOS bugs.
26 */
27
28/*
29 * Design Assumptions
30 *
31 * All CPUs have same idle states as boot CPU
32 *
33 * Chipset BM_STS (bus master status) bit is a NOP
34 * for preventing entry into deep C-stats
35 */
36
37/*
38 * Known limitations
39 *
40 * The driver currently initializes for_each_online_cpu() upon modprobe.
41 * It it unaware of subsequent processors hot-added to the system.
42 * This means that if you boot with maxcpus=n and later online
43 * processors above n, those processors will use C1 only.
44 *
45 * ACPI has a .suspend hack to turn off deep c-statees during suspend
46 * to avoid complications with the lapic timer workaround.
47 * Have not seen issues with suspend, but may need same workaround here.
48 *
49 * There is currently no kernel-based automatic probing/loading mechanism
50 * if the driver is built as a module.
51 */
52
53/* un-comment DEBUG to enable pr_debug() statements */
54#define DEBUG
55
56#include <linux/kernel.h>
57#include <linux/cpuidle.h>
58#include <linux/clockchips.h>
59#include <linux/hrtimer.h> /* ktime_get_real() */
60#include <trace/events/power.h>
61#include <linux/sched.h>
62
63#define INTEL_IDLE_VERSION "0.4"
64#define PREFIX "intel_idle: "
65
66#define MWAIT_SUBSTATE_MASK (0xf)
67#define MWAIT_CSTATE_MASK (0xf)
68#define MWAIT_SUBSTATE_SIZE (4)
69#define MWAIT_MAX_NUM_CSTATES 8
70#define CPUID_MWAIT_LEAF (5)
71#define CPUID5_ECX_EXTENSIONS_SUPPORTED (0x1)
72#define CPUID5_ECX_INTERRUPT_BREAK (0x2)
73
74static struct cpuidle_driver intel_idle_driver = {
75 .name = "intel_idle",
76 .owner = THIS_MODULE,
77};
78/* intel_idle.max_cstate=0 disables driver */
79static int max_cstate = MWAIT_MAX_NUM_CSTATES - 1;
80static int power_policy = 7; /* 0 = max perf; 15 = max powersave */
81
82static unsigned int substates;
83static int (*choose_substate)(int);
84
85/* Reliable LAPIC Timer States, bit 1 for C1 etc. */
86static unsigned int lapic_timer_reliable_states;
87
88static struct cpuidle_device *intel_idle_cpuidle_devices;
89static int intel_idle(struct cpuidle_device *dev, struct cpuidle_state *state);
90
91static struct cpuidle_state *cpuidle_state_table;
92
93/*
94 * States are indexed by the cstate number,
95 * which is also the index into the MWAIT hint array.
96 * Thus C0 is a dummy.
97 */
98static struct cpuidle_state nehalem_cstates[MWAIT_MAX_NUM_CSTATES] = {
99 { /* MWAIT C0 */ },
100 { /* MWAIT C1 */
101 .name = "NHM-C1",
102 .desc = "MWAIT 0x00",
103 .driver_data = (void *) 0x00,
104 .flags = CPUIDLE_FLAG_TIME_VALID,
105 .exit_latency = 3,
106 .power_usage = 1000,
107 .target_residency = 6,
108 .enter = &intel_idle },
109 { /* MWAIT C2 */
110 .name = "NHM-C3",
111 .desc = "MWAIT 0x10",
112 .driver_data = (void *) 0x10,
113 .flags = CPUIDLE_FLAG_TIME_VALID,
114 .exit_latency = 20,
115 .power_usage = 500,
116 .target_residency = 80,
117 .enter = &intel_idle },
118 { /* MWAIT C3 */
119 .name = "NHM-C6",
120 .desc = "MWAIT 0x20",
121 .driver_data = (void *) 0x20,
122 .flags = CPUIDLE_FLAG_TIME_VALID,
123 .exit_latency = 200,
124 .power_usage = 350,
125 .target_residency = 800,
126 .enter = &intel_idle },
127};
128
129static struct cpuidle_state atom_cstates[MWAIT_MAX_NUM_CSTATES] = {
130 { /* MWAIT C0 */ },
131 { /* MWAIT C1 */
132 .name = "ATM-C1",
133 .desc = "MWAIT 0x00",
134 .driver_data = (void *) 0x00,
135 .flags = CPUIDLE_FLAG_TIME_VALID,
136 .exit_latency = 1,
137 .power_usage = 1000,
138 .target_residency = 4,
139 .enter = &intel_idle },
140 { /* MWAIT C2 */
141 .name = "ATM-C2",
142 .desc = "MWAIT 0x10",
143 .driver_data = (void *) 0x10,
144 .flags = CPUIDLE_FLAG_TIME_VALID,
145 .exit_latency = 20,
146 .power_usage = 500,
147 .target_residency = 80,
148 .enter = &intel_idle },
149 { /* MWAIT C3 */ },
150 { /* MWAIT C4 */
151 .name = "ATM-C4",
152 .desc = "MWAIT 0x30",
153 .driver_data = (void *) 0x30,
154 .flags = CPUIDLE_FLAG_TIME_VALID,
155 .exit_latency = 100,
156 .power_usage = 250,
157 .target_residency = 400,
158 .enter = &intel_idle },
159 { /* MWAIT C5 */ },
160 { /* MWAIT C6 */
161 .name = "ATM-C6",
162 .desc = "MWAIT 0x40",
163 .driver_data = (void *) 0x40,
164 .flags = CPUIDLE_FLAG_TIME_VALID,
165 .exit_latency = 200,
166 .power_usage = 150,
167 .target_residency = 800,
168 .enter = NULL }, /* disabled */
169};
170
171/*
172 * choose_tunable_substate()
173 *
174 * Run-time decision on which C-state substate to invoke
175 * If power_policy = 0, choose shallowest substate (0)
176 * If power_policy = 15, choose deepest substate
177 * If power_policy = middle, choose middle substate etc.
178 */
179static int choose_tunable_substate(int cstate)
180{
181 unsigned int num_substates;
182 unsigned int substate_choice;
183
184 power_policy &= 0xF; /* valid range: 0-15 */
185 cstate &= 7; /* valid range: 0-7 */
186
187 num_substates = (substates >> ((cstate) * 4)) & MWAIT_SUBSTATE_MASK;
188
189 if (num_substates <= 1)
190 return 0;
191
192 substate_choice = ((power_policy + (power_policy + 1) *
193 (num_substates - 1)) / 16);
194
195 return substate_choice;
196}
197
198/*
199 * choose_zero_substate()
200 */
201static int choose_zero_substate(int cstate)
202{
203 return 0;
204}
205
206/**
207 * intel_idle
208 * @dev: cpuidle_device
209 * @state: cpuidle state
210 *
211 */
212static int intel_idle(struct cpuidle_device *dev, struct cpuidle_state *state)
213{
214 unsigned long ecx = 1; /* break on interrupt flag */
215 unsigned long eax = (unsigned long)cpuidle_get_statedata(state);
216 unsigned int cstate;
217 ktime_t kt_before, kt_after;
218 s64 usec_delta;
219 int cpu = smp_processor_id();
220
221 cstate = (((eax) >> MWAIT_SUBSTATE_SIZE) & MWAIT_CSTATE_MASK) + 1;
222
223 eax = eax + (choose_substate)(cstate);
224
225 local_irq_disable();
226
227 if (!(lapic_timer_reliable_states & (1 << (cstate))))
228 clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu);
229
230 kt_before = ktime_get_real();
231
232 stop_critical_timings();
233#ifndef MODULE
234 trace_power_start(POWER_CSTATE, (eax >> 4) + 1);
235#endif
236 if (!need_resched()) {
237
238 __monitor((void *)&current_thread_info()->flags, 0, 0);
239 smp_mb();
240 if (!need_resched())
241 __mwait(eax, ecx);
242 }
243
244 start_critical_timings();
245
246 kt_after = ktime_get_real();
247 usec_delta = ktime_to_us(ktime_sub(kt_after, kt_before));
248
249 local_irq_enable();
250
251 if (!(lapic_timer_reliable_states & (1 << (cstate))))
252 clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &cpu);
253
254 return usec_delta;
255}
256
257/*
258 * intel_idle_probe()
259 */
260static int intel_idle_probe(void)
261{
262 unsigned int eax, ebx, ecx, edx;
263
264 if (max_cstate == 0) {
265 pr_debug(PREFIX "disabled\n");
266 return -EPERM;
267 }
268
269 if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
270 return -ENODEV;
271
272 if (!boot_cpu_has(X86_FEATURE_MWAIT))
273 return -ENODEV;
274
275 if (boot_cpu_data.cpuid_level < CPUID_MWAIT_LEAF)
276 return -ENODEV;
277
278 cpuid(CPUID_MWAIT_LEAF, &eax, &ebx, &ecx, &edx);
279
280 if (!(ecx & CPUID5_ECX_EXTENSIONS_SUPPORTED) ||
281 !(ecx & CPUID5_ECX_INTERRUPT_BREAK))
282 return -ENODEV;
283#ifdef DEBUG
284 if (substates == 0) /* can over-ride via modparam */
285#endif
286 substates = edx;
287
288 pr_debug(PREFIX "MWAIT substates: 0x%x\n", substates);
289
290 if (boot_cpu_has(X86_FEATURE_ARAT)) /* Always Reliable APIC Timer */
291 lapic_timer_reliable_states = 0xFFFFFFFF;
292
293 if (boot_cpu_data.x86 != 6) /* family 6 */
294 return -ENODEV;
295
296 switch (boot_cpu_data.x86_model) {
297
298 case 0x1A: /* Core i7, Xeon 5500 series */
299 case 0x1E: /* Core i7 and i5 Processor - Lynnfield Jasper Forest */
300 case 0x1F: /* Core i7 and i5 Processor - Nehalem */
301 case 0x2E: /* Nehalem-EX Xeon */
302 lapic_timer_reliable_states = (1 << 1); /* C1 */
303
304 case 0x25: /* Westmere */
305 case 0x2C: /* Westmere */
306 cpuidle_state_table = nehalem_cstates;
307 choose_substate = choose_tunable_substate;
308 break;
309
310 case 0x1C: /* 28 - Atom Processor */
311 lapic_timer_reliable_states = (1 << 2) | (1 << 1); /* C2, C1 */
312 cpuidle_state_table = atom_cstates;
313 choose_substate = choose_zero_substate;
314 break;
315#ifdef FUTURE_USE
316 case 0x17: /* 23 - Core 2 Duo */
317 lapic_timer_reliable_states = (1 << 2) | (1 << 1); /* C2, C1 */
318#endif
319
320 default:
321 pr_debug(PREFIX "does not run on family %d model %d\n",
322 boot_cpu_data.x86, boot_cpu_data.x86_model);
323 return -ENODEV;
324 }
325
326 pr_debug(PREFIX "v" INTEL_IDLE_VERSION
327 " model 0x%X\n", boot_cpu_data.x86_model);
328
329 pr_debug(PREFIX "lapic_timer_reliable_states 0x%x\n",
330 lapic_timer_reliable_states);
331 return 0;
332}
333
334/*
335 * intel_idle_cpuidle_devices_uninit()
336 * unregister, free cpuidle_devices
337 */
338static void intel_idle_cpuidle_devices_uninit(void)
339{
340 int i;
341 struct cpuidle_device *dev;
342
343 for_each_online_cpu(i) {
344 dev = per_cpu_ptr(intel_idle_cpuidle_devices, i);
345 cpuidle_unregister_device(dev);
346 }
347
348 free_percpu(intel_idle_cpuidle_devices);
349 return;
350}
351/*
352 * intel_idle_cpuidle_devices_init()
353 * allocate, initialize, register cpuidle_devices
354 */
355static int intel_idle_cpuidle_devices_init(void)
356{
357 int i, cstate;
358 struct cpuidle_device *dev;
359
360 intel_idle_cpuidle_devices = alloc_percpu(struct cpuidle_device);
361 if (intel_idle_cpuidle_devices == NULL)
362 return -ENOMEM;
363
364 for_each_online_cpu(i) {
365 dev = per_cpu_ptr(intel_idle_cpuidle_devices, i);
366
367 dev->state_count = 1;
368
369 for (cstate = 1; cstate < MWAIT_MAX_NUM_CSTATES; ++cstate) {
370 int num_substates;
371
372 if (cstate > max_cstate) {
373 printk(PREFIX "max_cstate %d reached\n",
374 max_cstate);
375 break;
376 }
377
378 /* does the state exist in CPUID.MWAIT? */
379 num_substates = (substates >> ((cstate) * 4))
380 & MWAIT_SUBSTATE_MASK;
381 if (num_substates == 0)
382 continue;
383 /* is the state not enabled? */
384 if (cpuidle_state_table[cstate].enter == NULL) {
385 /* does the driver not know about the state? */
386 if (*cpuidle_state_table[cstate].name == '\0')
387 pr_debug(PREFIX "unaware of model 0x%x"
388 " MWAIT %d please"
389 " contact lenb@kernel.org",
390 boot_cpu_data.x86_model, cstate);
391 continue;
392 }
393
394 if ((cstate > 2) &&
395 !boot_cpu_has(X86_FEATURE_NONSTOP_TSC))
396 mark_tsc_unstable("TSC halts in idle"
397 " states deeper than C2");
398
399 dev->states[dev->state_count] = /* structure copy */
400 cpuidle_state_table[cstate];
401
402 dev->state_count += 1;
403 }
404
405 dev->cpu = i;
406 if (cpuidle_register_device(dev)) {
407 pr_debug(PREFIX "cpuidle_register_device %d failed!\n",
408 i);
409 intel_idle_cpuidle_devices_uninit();
410 return -EIO;
411 }
412 }
413
414 return 0;
415}
416
417
418static int __init intel_idle_init(void)
419{
420 int retval;
421
422 retval = intel_idle_probe();
423 if (retval)
424 return retval;
425
426 retval = cpuidle_register_driver(&intel_idle_driver);
427 if (retval) {
428 printk(KERN_DEBUG PREFIX "intel_idle yielding to %s",
429 cpuidle_get_driver()->name);
430 return retval;
431 }
432
433 retval = intel_idle_cpuidle_devices_init();
434 if (retval) {
435 cpuidle_unregister_driver(&intel_idle_driver);
436 return retval;
437 }
438
439 return 0;
440}
441
442static void __exit intel_idle_exit(void)
443{
444 intel_idle_cpuidle_devices_uninit();
445 cpuidle_unregister_driver(&intel_idle_driver);
446
447 return;
448}
449
450module_init(intel_idle_init);
451module_exit(intel_idle_exit);
452
453module_param(power_policy, int, 0644);
454module_param(max_cstate, int, 0444);
455#ifdef DEBUG
456module_param(substates, int, 0444);
457#endif
458
459MODULE_AUTHOR("Len Brown <len.brown@intel.com>");
460MODULE_DESCRIPTION("Cpuidle driver for Intel Hardware v" INTEL_IDLE_VERSION);
461MODULE_LICENSE("GPL");
diff --git a/drivers/ieee1394/dv1394.c b/drivers/ieee1394/dv1394.c
index 9fd4a0d3206e..adaefabc40e9 100644
--- a/drivers/ieee1394/dv1394.c
+++ b/drivers/ieee1394/dv1394.c
@@ -1824,7 +1824,7 @@ static int dv1394_open(struct inode *inode, struct file *file)
1824 "and will not be available in the new firewire driver stack. " 1824 "and will not be available in the new firewire driver stack. "
1825 "Try libraw1394 based programs instead.\n", current->comm); 1825 "Try libraw1394 based programs instead.\n", current->comm);
1826 1826
1827 return 0; 1827 return nonseekable_open(inode, file);
1828} 1828}
1829 1829
1830 1830
@@ -2153,17 +2153,18 @@ static struct cdev dv1394_cdev;
2153static const struct file_operations dv1394_fops= 2153static const struct file_operations dv1394_fops=
2154{ 2154{
2155 .owner = THIS_MODULE, 2155 .owner = THIS_MODULE,
2156 .poll = dv1394_poll, 2156 .poll = dv1394_poll,
2157 .unlocked_ioctl = dv1394_ioctl, 2157 .unlocked_ioctl = dv1394_ioctl,
2158#ifdef CONFIG_COMPAT 2158#ifdef CONFIG_COMPAT
2159 .compat_ioctl = dv1394_compat_ioctl, 2159 .compat_ioctl = dv1394_compat_ioctl,
2160#endif 2160#endif
2161 .mmap = dv1394_mmap, 2161 .mmap = dv1394_mmap,
2162 .open = dv1394_open, 2162 .open = dv1394_open,
2163 .write = dv1394_write, 2163 .write = dv1394_write,
2164 .read = dv1394_read, 2164 .read = dv1394_read,
2165 .release = dv1394_release, 2165 .release = dv1394_release,
2166 .fasync = dv1394_fasync, 2166 .fasync = dv1394_fasync,
2167 .llseek = no_llseek,
2167}; 2168};
2168 2169
2169 2170
diff --git a/drivers/ieee1394/raw1394.c b/drivers/ieee1394/raw1394.c
index 8aa56ac07e29..b563d5e9fa2e 100644
--- a/drivers/ieee1394/raw1394.c
+++ b/drivers/ieee1394/raw1394.c
@@ -2834,7 +2834,7 @@ static int raw1394_open(struct inode *inode, struct file *file)
2834 2834
2835 file->private_data = fi; 2835 file->private_data = fi;
2836 2836
2837 return 0; 2837 return nonseekable_open(inode, file);
2838} 2838}
2839 2839
2840static int raw1394_release(struct inode *inode, struct file *file) 2840static int raw1394_release(struct inode *inode, struct file *file)
@@ -3035,6 +3035,7 @@ static const struct file_operations raw1394_fops = {
3035 .poll = raw1394_poll, 3035 .poll = raw1394_poll,
3036 .open = raw1394_open, 3036 .open = raw1394_open,
3037 .release = raw1394_release, 3037 .release = raw1394_release,
3038 .llseek = no_llseek,
3038}; 3039};
3039 3040
3040static int __init init_raw1394(void) 3041static int __init init_raw1394(void)
diff --git a/drivers/ieee1394/video1394.c b/drivers/ieee1394/video1394.c
index 949064a05675..a42bd6893bcf 100644
--- a/drivers/ieee1394/video1394.c
+++ b/drivers/ieee1394/video1394.c
@@ -1239,7 +1239,7 @@ static int video1394_open(struct inode *inode, struct file *file)
1239 ctx->current_ctx = NULL; 1239 ctx->current_ctx = NULL;
1240 file->private_data = ctx; 1240 file->private_data = ctx;
1241 1241
1242 return 0; 1242 return nonseekable_open(inode, file);
1243} 1243}
1244 1244
1245static int video1394_release(struct inode *inode, struct file *file) 1245static int video1394_release(struct inode *inode, struct file *file)
@@ -1287,7 +1287,8 @@ static const struct file_operations video1394_fops=
1287 .poll = video1394_poll, 1287 .poll = video1394_poll,
1288 .mmap = video1394_mmap, 1288 .mmap = video1394_mmap,
1289 .open = video1394_open, 1289 .open = video1394_open,
1290 .release = video1394_release 1290 .release = video1394_release,
1291 .llseek = no_llseek,
1291}; 1292};
1292 1293
1293/*** HOTPLUG STUFF **********************************************************/ 1294/*** HOTPLUG STUFF **********************************************************/
diff --git a/drivers/infiniband/core/ucm.c b/drivers/infiniband/core/ucm.c
index 46474842cfe9..08f948df8fa9 100644
--- a/drivers/infiniband/core/ucm.c
+++ b/drivers/infiniband/core/ucm.c
@@ -706,14 +706,9 @@ static int ib_ucm_alloc_data(const void **dest, u64 src, u32 len)
706 if (!len) 706 if (!len)
707 return 0; 707 return 0;
708 708
709 data = kmalloc(len, GFP_KERNEL); 709 data = memdup_user((void __user *)(unsigned long)src, len);
710 if (!data) 710 if (IS_ERR(data))
711 return -ENOMEM; 711 return PTR_ERR(data);
712
713 if (copy_from_user(data, (void __user *)(unsigned long)src, len)) {
714 kfree(data);
715 return -EFAULT;
716 }
717 712
718 *dest = data; 713 *dest = data;
719 return 0; 714 return 0;
diff --git a/drivers/infiniband/hw/ehca/ehca_irq.c b/drivers/infiniband/hw/ehca/ehca_irq.c
index 07cae552cafb..e571e60ecb88 100644
--- a/drivers/infiniband/hw/ehca/ehca_irq.c
+++ b/drivers/infiniband/hw/ehca/ehca_irq.c
@@ -847,7 +847,7 @@ static int __cpuinit comp_pool_callback(struct notifier_block *nfb,
847 ehca_gen_dbg("CPU: %x (CPU_PREPARE)", cpu); 847 ehca_gen_dbg("CPU: %x (CPU_PREPARE)", cpu);
848 if (!create_comp_task(pool, cpu)) { 848 if (!create_comp_task(pool, cpu)) {
849 ehca_gen_err("Can't create comp_task for cpu: %x", cpu); 849 ehca_gen_err("Can't create comp_task for cpu: %x", cpu);
850 return NOTIFY_BAD; 850 return notifier_from_errno(-ENOMEM);
851 } 851 }
852 break; 852 break;
853 case CPU_UP_CANCELED: 853 case CPU_UP_CANCELED:
diff --git a/drivers/infiniband/hw/qib/qib_fs.c b/drivers/infiniband/hw/qib/qib_fs.c
index 755470440ef1..edef8527eb34 100644
--- a/drivers/infiniband/hw/qib/qib_fs.c
+++ b/drivers/infiniband/hw/qib/qib_fs.c
@@ -144,10 +144,11 @@ static ssize_t dev_counters_read(struct file *file, char __user *buf,
144 size_t count, loff_t *ppos) 144 size_t count, loff_t *ppos)
145{ 145{
146 u64 *counters; 146 u64 *counters;
147 size_t avail;
147 struct qib_devdata *dd = private2dd(file); 148 struct qib_devdata *dd = private2dd(file);
148 149
149 return simple_read_from_buffer(buf, count, ppos, counters, 150 avail = dd->f_read_cntrs(dd, *ppos, NULL, &counters);
150 dd->f_read_cntrs(dd, *ppos, NULL, &counters)); 151 return simple_read_from_buffer(buf, count, ppos, counters, avail);
151} 152}
152 153
153/* read the per-device counters */ 154/* read the per-device counters */
@@ -155,10 +156,11 @@ static ssize_t dev_names_read(struct file *file, char __user *buf,
155 size_t count, loff_t *ppos) 156 size_t count, loff_t *ppos)
156{ 157{
157 char *names; 158 char *names;
159 size_t avail;
158 struct qib_devdata *dd = private2dd(file); 160 struct qib_devdata *dd = private2dd(file);
159 161
160 return simple_read_from_buffer(buf, count, ppos, names, 162 avail = dd->f_read_cntrs(dd, *ppos, &names, NULL);
161 dd->f_read_cntrs(dd, *ppos, &names, NULL)); 163 return simple_read_from_buffer(buf, count, ppos, names, avail);
162} 164}
163 165
164static const struct file_operations cntr_ops[] = { 166static const struct file_operations cntr_ops[] = {
@@ -176,10 +178,11 @@ static ssize_t portnames_read(struct file *file, char __user *buf,
176 size_t count, loff_t *ppos) 178 size_t count, loff_t *ppos)
177{ 179{
178 char *names; 180 char *names;
181 size_t avail;
179 struct qib_devdata *dd = private2dd(file); 182 struct qib_devdata *dd = private2dd(file);
180 183
181 return simple_read_from_buffer(buf, count, ppos, names, 184 avail = dd->f_read_portcntrs(dd, *ppos, 0, &names, NULL);
182 dd->f_read_portcntrs(dd, *ppos, 0, &names, NULL)); 185 return simple_read_from_buffer(buf, count, ppos, names, avail);
183} 186}
184 187
185/* read the per-port counters for port 1 (pidx 0) */ 188/* read the per-port counters for port 1 (pidx 0) */
@@ -187,10 +190,11 @@ static ssize_t portcntrs_1_read(struct file *file, char __user *buf,
187 size_t count, loff_t *ppos) 190 size_t count, loff_t *ppos)
188{ 191{
189 u64 *counters; 192 u64 *counters;
193 size_t avail;
190 struct qib_devdata *dd = private2dd(file); 194 struct qib_devdata *dd = private2dd(file);
191 195
192 return simple_read_from_buffer(buf, count, ppos, counters, 196 avail = dd->f_read_portcntrs(dd, *ppos, 0, NULL, &counters);
193 dd->f_read_portcntrs(dd, *ppos, 0, NULL, &counters)); 197 return simple_read_from_buffer(buf, count, ppos, counters, avail);
194} 198}
195 199
196/* read the per-port counters for port 2 (pidx 1) */ 200/* read the per-port counters for port 2 (pidx 1) */
@@ -198,10 +202,11 @@ static ssize_t portcntrs_2_read(struct file *file, char __user *buf,
198 size_t count, loff_t *ppos) 202 size_t count, loff_t *ppos)
199{ 203{
200 u64 *counters; 204 u64 *counters;
205 size_t avail;
201 struct qib_devdata *dd = private2dd(file); 206 struct qib_devdata *dd = private2dd(file);
202 207
203 return simple_read_from_buffer(buf, count, ppos, counters, 208 avail = dd->f_read_portcntrs(dd, *ppos, 1, NULL, &counters);
204 dd->f_read_portcntrs(dd, *ppos, 1, NULL, &counters)); 209 return simple_read_from_buffer(buf, count, ppos, counters, avail);
205} 210}
206 211
207static const struct file_operations portcntr_ops[] = { 212static const struct file_operations portcntr_ops[] = {
diff --git a/drivers/infiniband/hw/qib/qib_iba6120.c b/drivers/infiniband/hw/qib/qib_iba6120.c
index 7b6549fd429b..1eadadc13da8 100644
--- a/drivers/infiniband/hw/qib/qib_iba6120.c
+++ b/drivers/infiniband/hw/qib/qib_iba6120.c
@@ -3475,14 +3475,6 @@ struct qib_devdata *qib_init_iba6120_funcs(struct pci_dev *pdev,
3475 struct qib_devdata *dd; 3475 struct qib_devdata *dd;
3476 int ret; 3476 int ret;
3477 3477
3478#ifndef CONFIG_PCI_MSI
3479 qib_early_err(&pdev->dev, "QLogic PCIE device 0x%x cannot "
3480 "work if CONFIG_PCI_MSI is not enabled\n",
3481 ent->device);
3482 dd = ERR_PTR(-ENODEV);
3483 goto bail;
3484#endif
3485
3486 dd = qib_alloc_devdata(pdev, sizeof(struct qib_pportdata) + 3478 dd = qib_alloc_devdata(pdev, sizeof(struct qib_pportdata) +
3487 sizeof(struct qib_chip_specific)); 3479 sizeof(struct qib_chip_specific));
3488 if (IS_ERR(dd)) 3480 if (IS_ERR(dd))
@@ -3554,10 +3546,6 @@ struct qib_devdata *qib_init_iba6120_funcs(struct pci_dev *pdev,
3554 if (qib_mini_init) 3546 if (qib_mini_init)
3555 goto bail; 3547 goto bail;
3556 3548
3557#ifndef CONFIG_PCI_MSI
3558 qib_dev_err(dd, "PCI_MSI not configured, NO interrupts\n");
3559#endif
3560
3561 if (qib_pcie_params(dd, 8, NULL, NULL)) 3549 if (qib_pcie_params(dd, 8, NULL, NULL))
3562 qib_dev_err(dd, "Failed to setup PCIe or interrupts; " 3550 qib_dev_err(dd, "Failed to setup PCIe or interrupts; "
3563 "continuing anyway\n"); 3551 "continuing anyway\n");
diff --git a/drivers/infiniband/hw/qib/qib_iba7322.c b/drivers/infiniband/hw/qib/qib_iba7322.c
index 2c24eab35b54..503992d9c5ce 100644
--- a/drivers/infiniband/hw/qib/qib_iba7322.c
+++ b/drivers/infiniband/hw/qib/qib_iba7322.c
@@ -42,9 +42,6 @@
42#include <linux/jiffies.h> 42#include <linux/jiffies.h>
43#include <rdma/ib_verbs.h> 43#include <rdma/ib_verbs.h>
44#include <rdma/ib_smi.h> 44#include <rdma/ib_smi.h>
45#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
46#include <linux/dca.h>
47#endif
48 45
49#include "qib.h" 46#include "qib.h"
50#include "qib_7322_regs.h" 47#include "qib_7322_regs.h"
@@ -114,40 +111,18 @@ static ushort qib_singleport;
114module_param_named(singleport, qib_singleport, ushort, S_IRUGO); 111module_param_named(singleport, qib_singleport, ushort, S_IRUGO);
115MODULE_PARM_DESC(singleport, "Use only IB port 1; more per-port buffer space"); 112MODULE_PARM_DESC(singleport, "Use only IB port 1; more per-port buffer space");
116 113
117
118/*
119 * Setup QMH7342 receive and transmit parameters, necessary because
120 * each bay, Mez connector, and IB port need different tuning, beyond
121 * what the switch and HCA can do automatically.
122 * It's expected to be done by cat'ing files to the modules file,
123 * rather than setting up as a module parameter.
124 * It's a "write-only" file, returns 0 when read back.
125 * The unit, port, bay (if given), and values MUST be done as a single write.
126 * The unit, port, and bay must precede the values to be effective.
127 */
128static int setup_qmh_params(const char *, struct kernel_param *);
129static unsigned dummy_qmh_params;
130module_param_call(qmh_serdes_setup, setup_qmh_params, param_get_uint,
131 &dummy_qmh_params, S_IWUSR | S_IRUGO);
132
133/* similarly for QME7342, but it's simpler */
134static int setup_qme_params(const char *, struct kernel_param *);
135static unsigned dummy_qme_params;
136module_param_call(qme_serdes_setup, setup_qme_params, param_get_uint,
137 &dummy_qme_params, S_IWUSR | S_IRUGO);
138
139#define MAX_ATTEN_LEN 64 /* plenty for any real system */ 114#define MAX_ATTEN_LEN 64 /* plenty for any real system */
140/* for read back, default index is ~5m copper cable */ 115/* for read back, default index is ~5m copper cable */
141static char cable_atten_list[MAX_ATTEN_LEN] = "10"; 116static char txselect_list[MAX_ATTEN_LEN] = "10";
142static struct kparam_string kp_cable_atten = { 117static struct kparam_string kp_txselect = {
143 .string = cable_atten_list, 118 .string = txselect_list,
144 .maxlen = MAX_ATTEN_LEN 119 .maxlen = MAX_ATTEN_LEN
145}; 120};
146static int setup_cable_atten(const char *, struct kernel_param *); 121static int setup_txselect(const char *, struct kernel_param *);
147module_param_call(cable_atten, setup_cable_atten, param_get_string, 122module_param_call(txselect, setup_txselect, param_get_string,
148 &kp_cable_atten, S_IWUSR | S_IRUGO); 123 &kp_txselect, S_IWUSR | S_IRUGO);
149MODULE_PARM_DESC(cable_atten, \ 124MODULE_PARM_DESC(txselect, \
150 "cable attenuation indices for cables with invalid EEPROM"); 125 "Tx serdes indices (for no QSFP or invalid QSFP data)");
151 126
152#define BOARD_QME7342 5 127#define BOARD_QME7342 5
153#define BOARD_QMH7342 6 128#define BOARD_QMH7342 6
@@ -540,12 +515,6 @@ struct qib_chip_specific {
540 u32 lastbuf_for_pio; 515 u32 lastbuf_for_pio;
541 u32 stay_in_freeze; 516 u32 stay_in_freeze;
542 u32 recovery_ports_initted; 517 u32 recovery_ports_initted;
543#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
544 u32 dca_ctrl;
545 int rhdr_cpu[18];
546 int sdma_cpu[2];
547 u64 dca_rcvhdr_ctrl[5]; /* B, C, D, E, F */
548#endif
549 struct msix_entry *msix_entries; 518 struct msix_entry *msix_entries;
550 void **msix_arg; 519 void **msix_arg;
551 unsigned long *sendchkenable; 520 unsigned long *sendchkenable;
@@ -574,11 +543,12 @@ struct vendor_txdds_ent {
574static void write_tx_serdes_param(struct qib_pportdata *, struct txdds_ent *); 543static void write_tx_serdes_param(struct qib_pportdata *, struct txdds_ent *);
575 544
576#define TXDDS_TABLE_SZ 16 /* number of entries per speed in onchip table */ 545#define TXDDS_TABLE_SZ 16 /* number of entries per speed in onchip table */
546#define TXDDS_EXTRA_SZ 11 /* number of extra tx settings entries */
577#define SERDES_CHANS 4 /* yes, it's obvious, but one less magic number */ 547#define SERDES_CHANS 4 /* yes, it's obvious, but one less magic number */
578 548
579#define H1_FORCE_VAL 8 549#define H1_FORCE_VAL 8
580#define H1_FORCE_QME 1 /* may be overridden via setup_qme_params() */ 550#define H1_FORCE_QME 1 /* may be overridden via setup_txselect() */
581#define H1_FORCE_QMH 7 /* may be overridden via setup_qmh_params() */ 551#define H1_FORCE_QMH 7 /* may be overridden via setup_txselect() */
582 552
583/* The static and dynamic registers are paired, and the pairs indexed by spd */ 553/* The static and dynamic registers are paired, and the pairs indexed by spd */
584#define krp_static_adapt_dis(spd) (KREG_IBPORT_IDX(ADAPT_DISABLE_STATIC_SDR) \ 554#define krp_static_adapt_dis(spd) (KREG_IBPORT_IDX(ADAPT_DISABLE_STATIC_SDR) \
@@ -590,15 +560,6 @@ static void write_tx_serdes_param(struct qib_pportdata *, struct txdds_ent *);
590#define QDR_STATIC_ADAPT_INIT 0xffffffffffULL /* up, disable H0,H1-8, LE */ 560#define QDR_STATIC_ADAPT_INIT 0xffffffffffULL /* up, disable H0,H1-8, LE */
591#define QDR_STATIC_ADAPT_INIT_R1 0xf0ffffffffULL /* r1 up, disable H0,H1-8 */ 561#define QDR_STATIC_ADAPT_INIT_R1 0xf0ffffffffULL /* r1 up, disable H0,H1-8 */
592 562
593static const struct txdds_ent qmh_sdr_txdds = { 11, 0, 5, 6 };
594static const struct txdds_ent qmh_ddr_txdds = { 7, 0, 2, 8 };
595static const struct txdds_ent qmh_qdr_txdds = { 0, 1, 3, 10 };
596
597/* this is used for unknown mez cards also */
598static const struct txdds_ent qme_sdr_txdds = { 11, 0, 4, 4 };
599static const struct txdds_ent qme_ddr_txdds = { 7, 0, 2, 7 };
600static const struct txdds_ent qme_qdr_txdds = { 0, 1, 12, 11 };
601
602struct qib_chippport_specific { 563struct qib_chippport_specific {
603 u64 __iomem *kpregbase; 564 u64 __iomem *kpregbase;
604 u64 __iomem *cpregbase; 565 u64 __iomem *cpregbase;
@@ -637,12 +598,8 @@ struct qib_chippport_specific {
637 * Per-bay per-channel rcv QMH H1 values and Tx values for QDR. 598 * Per-bay per-channel rcv QMH H1 values and Tx values for QDR.
638 * entry zero is unused, to simplify indexing 599 * entry zero is unused, to simplify indexing
639 */ 600 */
640 u16 h1_val; 601 u8 h1_val;
641 u8 amp[SERDES_CHANS]; 602 u8 no_eep; /* txselect table index to use if no qsfp info */
642 u8 pre[SERDES_CHANS];
643 u8 mainv[SERDES_CHANS];
644 u8 post[SERDES_CHANS];
645 u8 no_eep; /* attenuation index to use if no qsfp info */
646 u8 ipg_tries; 603 u8 ipg_tries;
647 u8 ibmalfusesnap; 604 u8 ibmalfusesnap;
648 struct qib_qsfp_data qsfp_data; 605 struct qib_qsfp_data qsfp_data;
@@ -676,52 +633,6 @@ static struct {
676 SYM_LSB(IntStatus, SDmaCleanupDone_1), 2 }, 633 SYM_LSB(IntStatus, SDmaCleanupDone_1), 2 },
677}; 634};
678 635
679#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
680static const struct dca_reg_map {
681 int shadow_inx;
682 int lsb;
683 u64 mask;
684 u16 regno;
685} dca_rcvhdr_reg_map[] = {
686 { 0, SYM_LSB(DCACtrlB, RcvHdrq0DCAOPH),
687 ~SYM_MASK(DCACtrlB, RcvHdrq0DCAOPH) , KREG_IDX(DCACtrlB) },
688 { 0, SYM_LSB(DCACtrlB, RcvHdrq1DCAOPH),
689 ~SYM_MASK(DCACtrlB, RcvHdrq1DCAOPH) , KREG_IDX(DCACtrlB) },
690 { 0, SYM_LSB(DCACtrlB, RcvHdrq2DCAOPH),
691 ~SYM_MASK(DCACtrlB, RcvHdrq2DCAOPH) , KREG_IDX(DCACtrlB) },
692 { 0, SYM_LSB(DCACtrlB, RcvHdrq3DCAOPH),
693 ~SYM_MASK(DCACtrlB, RcvHdrq3DCAOPH) , KREG_IDX(DCACtrlB) },
694 { 1, SYM_LSB(DCACtrlC, RcvHdrq4DCAOPH),
695 ~SYM_MASK(DCACtrlC, RcvHdrq4DCAOPH) , KREG_IDX(DCACtrlC) },
696 { 1, SYM_LSB(DCACtrlC, RcvHdrq5DCAOPH),
697 ~SYM_MASK(DCACtrlC, RcvHdrq5DCAOPH) , KREG_IDX(DCACtrlC) },
698 { 1, SYM_LSB(DCACtrlC, RcvHdrq6DCAOPH),
699 ~SYM_MASK(DCACtrlC, RcvHdrq6DCAOPH) , KREG_IDX(DCACtrlC) },
700 { 1, SYM_LSB(DCACtrlC, RcvHdrq7DCAOPH),
701 ~SYM_MASK(DCACtrlC, RcvHdrq7DCAOPH) , KREG_IDX(DCACtrlC) },
702 { 2, SYM_LSB(DCACtrlD, RcvHdrq8DCAOPH),
703 ~SYM_MASK(DCACtrlD, RcvHdrq8DCAOPH) , KREG_IDX(DCACtrlD) },
704 { 2, SYM_LSB(DCACtrlD, RcvHdrq9DCAOPH),
705 ~SYM_MASK(DCACtrlD, RcvHdrq9DCAOPH) , KREG_IDX(DCACtrlD) },
706 { 2, SYM_LSB(DCACtrlD, RcvHdrq10DCAOPH),
707 ~SYM_MASK(DCACtrlD, RcvHdrq10DCAOPH) , KREG_IDX(DCACtrlD) },
708 { 2, SYM_LSB(DCACtrlD, RcvHdrq11DCAOPH),
709 ~SYM_MASK(DCACtrlD, RcvHdrq11DCAOPH) , KREG_IDX(DCACtrlD) },
710 { 3, SYM_LSB(DCACtrlE, RcvHdrq12DCAOPH),
711 ~SYM_MASK(DCACtrlE, RcvHdrq12DCAOPH) , KREG_IDX(DCACtrlE) },
712 { 3, SYM_LSB(DCACtrlE, RcvHdrq13DCAOPH),
713 ~SYM_MASK(DCACtrlE, RcvHdrq13DCAOPH) , KREG_IDX(DCACtrlE) },
714 { 3, SYM_LSB(DCACtrlE, RcvHdrq14DCAOPH),
715 ~SYM_MASK(DCACtrlE, RcvHdrq14DCAOPH) , KREG_IDX(DCACtrlE) },
716 { 3, SYM_LSB(DCACtrlE, RcvHdrq15DCAOPH),
717 ~SYM_MASK(DCACtrlE, RcvHdrq15DCAOPH) , KREG_IDX(DCACtrlE) },
718 { 4, SYM_LSB(DCACtrlF, RcvHdrq16DCAOPH),
719 ~SYM_MASK(DCACtrlF, RcvHdrq16DCAOPH) , KREG_IDX(DCACtrlF) },
720 { 4, SYM_LSB(DCACtrlF, RcvHdrq17DCAOPH),
721 ~SYM_MASK(DCACtrlF, RcvHdrq17DCAOPH) , KREG_IDX(DCACtrlF) },
722};
723#endif
724
725/* ibcctrl bits */ 636/* ibcctrl bits */
726#define QLOGIC_IB_IBCC_LINKINITCMD_DISABLE 1 637#define QLOGIC_IB_IBCC_LINKINITCMD_DISABLE 1
727/* cycle through TS1/TS2 till OK */ 638/* cycle through TS1/TS2 till OK */
@@ -2572,95 +2483,6 @@ static void qib_setup_7322_setextled(struct qib_pportdata *ppd, u32 on)
2572 qib_write_kreg_port(ppd, krp_rcvpktledcnt, ledblink); 2483 qib_write_kreg_port(ppd, krp_rcvpktledcnt, ledblink);
2573} 2484}
2574 2485
2575#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
2576static void qib_update_rhdrq_dca(struct qib_ctxtdata *rcd)
2577{
2578 struct qib_devdata *dd = rcd->dd;
2579 struct qib_chip_specific *cspec = dd->cspec;
2580 int cpu = get_cpu();
2581
2582 if (cspec->rhdr_cpu[rcd->ctxt] != cpu) {
2583 const struct dca_reg_map *rmp;
2584
2585 cspec->rhdr_cpu[rcd->ctxt] = cpu;
2586 rmp = &dca_rcvhdr_reg_map[rcd->ctxt];
2587 cspec->dca_rcvhdr_ctrl[rmp->shadow_inx] &= rmp->mask;
2588 cspec->dca_rcvhdr_ctrl[rmp->shadow_inx] |=
2589 (u64) dca3_get_tag(&dd->pcidev->dev, cpu) << rmp->lsb;
2590 qib_write_kreg(dd, rmp->regno,
2591 cspec->dca_rcvhdr_ctrl[rmp->shadow_inx]);
2592 cspec->dca_ctrl |= SYM_MASK(DCACtrlA, RcvHdrqDCAEnable);
2593 qib_write_kreg(dd, KREG_IDX(DCACtrlA), cspec->dca_ctrl);
2594 }
2595 put_cpu();
2596}
2597
2598static void qib_update_sdma_dca(struct qib_pportdata *ppd)
2599{
2600 struct qib_devdata *dd = ppd->dd;
2601 struct qib_chip_specific *cspec = dd->cspec;
2602 int cpu = get_cpu();
2603 unsigned pidx = ppd->port - 1;
2604
2605 if (cspec->sdma_cpu[pidx] != cpu) {
2606 cspec->sdma_cpu[pidx] = cpu;
2607 cspec->dca_rcvhdr_ctrl[4] &= ~(ppd->hw_pidx ?
2608 SYM_MASK(DCACtrlF, SendDma1DCAOPH) :
2609 SYM_MASK(DCACtrlF, SendDma0DCAOPH));
2610 cspec->dca_rcvhdr_ctrl[4] |=
2611 (u64) dca3_get_tag(&dd->pcidev->dev, cpu) <<
2612 (ppd->hw_pidx ?
2613 SYM_LSB(DCACtrlF, SendDma1DCAOPH) :
2614 SYM_LSB(DCACtrlF, SendDma0DCAOPH));
2615 qib_write_kreg(dd, KREG_IDX(DCACtrlF),
2616 cspec->dca_rcvhdr_ctrl[4]);
2617 cspec->dca_ctrl |= ppd->hw_pidx ?
2618 SYM_MASK(DCACtrlA, SendDMAHead1DCAEnable) :
2619 SYM_MASK(DCACtrlA, SendDMAHead0DCAEnable);
2620 qib_write_kreg(dd, KREG_IDX(DCACtrlA), cspec->dca_ctrl);
2621 }
2622 put_cpu();
2623}
2624
2625static void qib_setup_dca(struct qib_devdata *dd)
2626{
2627 struct qib_chip_specific *cspec = dd->cspec;
2628 int i;
2629
2630 for (i = 0; i < ARRAY_SIZE(cspec->rhdr_cpu); i++)
2631 cspec->rhdr_cpu[i] = -1;
2632 for (i = 0; i < ARRAY_SIZE(cspec->sdma_cpu); i++)
2633 cspec->sdma_cpu[i] = -1;
2634 cspec->dca_rcvhdr_ctrl[0] =
2635 (1ULL << SYM_LSB(DCACtrlB, RcvHdrq0DCAXfrCnt)) |
2636 (1ULL << SYM_LSB(DCACtrlB, RcvHdrq1DCAXfrCnt)) |
2637 (1ULL << SYM_LSB(DCACtrlB, RcvHdrq2DCAXfrCnt)) |
2638 (1ULL << SYM_LSB(DCACtrlB, RcvHdrq3DCAXfrCnt));
2639 cspec->dca_rcvhdr_ctrl[1] =
2640 (1ULL << SYM_LSB(DCACtrlC, RcvHdrq4DCAXfrCnt)) |
2641 (1ULL << SYM_LSB(DCACtrlC, RcvHdrq5DCAXfrCnt)) |
2642 (1ULL << SYM_LSB(DCACtrlC, RcvHdrq6DCAXfrCnt)) |
2643 (1ULL << SYM_LSB(DCACtrlC, RcvHdrq7DCAXfrCnt));
2644 cspec->dca_rcvhdr_ctrl[2] =
2645 (1ULL << SYM_LSB(DCACtrlD, RcvHdrq8DCAXfrCnt)) |
2646 (1ULL << SYM_LSB(DCACtrlD, RcvHdrq9DCAXfrCnt)) |
2647 (1ULL << SYM_LSB(DCACtrlD, RcvHdrq10DCAXfrCnt)) |
2648 (1ULL << SYM_LSB(DCACtrlD, RcvHdrq11DCAXfrCnt));
2649 cspec->dca_rcvhdr_ctrl[3] =
2650 (1ULL << SYM_LSB(DCACtrlE, RcvHdrq12DCAXfrCnt)) |
2651 (1ULL << SYM_LSB(DCACtrlE, RcvHdrq13DCAXfrCnt)) |
2652 (1ULL << SYM_LSB(DCACtrlE, RcvHdrq14DCAXfrCnt)) |
2653 (1ULL << SYM_LSB(DCACtrlE, RcvHdrq15DCAXfrCnt));
2654 cspec->dca_rcvhdr_ctrl[4] =
2655 (1ULL << SYM_LSB(DCACtrlF, RcvHdrq16DCAXfrCnt)) |
2656 (1ULL << SYM_LSB(DCACtrlF, RcvHdrq17DCAXfrCnt));
2657 for (i = 0; i < ARRAY_SIZE(cspec->sdma_cpu); i++)
2658 qib_write_kreg(dd, KREG_IDX(DCACtrlB) + i,
2659 cspec->dca_rcvhdr_ctrl[i]);
2660}
2661
2662#endif
2663
2664/* 2486/*
2665 * Disable MSIx interrupt if enabled, call generic MSIx code 2487 * Disable MSIx interrupt if enabled, call generic MSIx code
2666 * to cleanup, and clear pending MSIx interrupts. 2488 * to cleanup, and clear pending MSIx interrupts.
@@ -2701,15 +2523,6 @@ static void qib_setup_7322_cleanup(struct qib_devdata *dd)
2701{ 2523{
2702 int i; 2524 int i;
2703 2525
2704#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
2705 if (dd->flags & QIB_DCA_ENABLED) {
2706 dca_remove_requester(&dd->pcidev->dev);
2707 dd->flags &= ~QIB_DCA_ENABLED;
2708 dd->cspec->dca_ctrl = 0;
2709 qib_write_kreg(dd, KREG_IDX(DCACtrlA), dd->cspec->dca_ctrl);
2710 }
2711#endif
2712
2713 qib_7322_free_irq(dd); 2526 qib_7322_free_irq(dd);
2714 kfree(dd->cspec->cntrs); 2527 kfree(dd->cspec->cntrs);
2715 kfree(dd->cspec->sendchkenable); 2528 kfree(dd->cspec->sendchkenable);
@@ -3017,11 +2830,6 @@ static irqreturn_t qib_7322pintr(int irq, void *data)
3017 if (dd->int_counter != (u32) -1) 2830 if (dd->int_counter != (u32) -1)
3018 dd->int_counter++; 2831 dd->int_counter++;
3019 2832
3020#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
3021 if (dd->flags & QIB_DCA_ENABLED)
3022 qib_update_rhdrq_dca(rcd);
3023#endif
3024
3025 /* Clear the interrupt bit we expect to be set. */ 2833 /* Clear the interrupt bit we expect to be set. */
3026 qib_write_kreg(dd, kr_intclear, ((1ULL << QIB_I_RCVAVAIL_LSB) | 2834 qib_write_kreg(dd, kr_intclear, ((1ULL << QIB_I_RCVAVAIL_LSB) |
3027 (1ULL << QIB_I_RCVURG_LSB)) << rcd->ctxt); 2835 (1ULL << QIB_I_RCVURG_LSB)) << rcd->ctxt);
@@ -3085,11 +2893,6 @@ static irqreturn_t sdma_intr(int irq, void *data)
3085 if (dd->int_counter != (u32) -1) 2893 if (dd->int_counter != (u32) -1)
3086 dd->int_counter++; 2894 dd->int_counter++;
3087 2895
3088#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
3089 if (dd->flags & QIB_DCA_ENABLED)
3090 qib_update_sdma_dca(ppd);
3091#endif
3092
3093 /* Clear the interrupt bit we expect to be set. */ 2896 /* Clear the interrupt bit we expect to be set. */
3094 qib_write_kreg(dd, kr_intclear, ppd->hw_pidx ? 2897 qib_write_kreg(dd, kr_intclear, ppd->hw_pidx ?
3095 INT_MASK_P(SDma, 1) : INT_MASK_P(SDma, 0)); 2898 INT_MASK_P(SDma, 1) : INT_MASK_P(SDma, 0));
@@ -3119,11 +2922,6 @@ static irqreturn_t sdma_idle_intr(int irq, void *data)
3119 if (dd->int_counter != (u32) -1) 2922 if (dd->int_counter != (u32) -1)
3120 dd->int_counter++; 2923 dd->int_counter++;
3121 2924
3122#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
3123 if (dd->flags & QIB_DCA_ENABLED)
3124 qib_update_sdma_dca(ppd);
3125#endif
3126
3127 /* Clear the interrupt bit we expect to be set. */ 2925 /* Clear the interrupt bit we expect to be set. */
3128 qib_write_kreg(dd, kr_intclear, ppd->hw_pidx ? 2926 qib_write_kreg(dd, kr_intclear, ppd->hw_pidx ?
3129 INT_MASK_P(SDmaIdle, 1) : INT_MASK_P(SDmaIdle, 0)); 2927 INT_MASK_P(SDmaIdle, 1) : INT_MASK_P(SDmaIdle, 0));
@@ -3153,11 +2951,6 @@ static irqreturn_t sdma_progress_intr(int irq, void *data)
3153 if (dd->int_counter != (u32) -1) 2951 if (dd->int_counter != (u32) -1)
3154 dd->int_counter++; 2952 dd->int_counter++;
3155 2953
3156#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
3157 if (dd->flags & QIB_DCA_ENABLED)
3158 qib_update_sdma_dca(ppd);
3159#endif
3160
3161 /* Clear the interrupt bit we expect to be set. */ 2954 /* Clear the interrupt bit we expect to be set. */
3162 qib_write_kreg(dd, kr_intclear, ppd->hw_pidx ? 2955 qib_write_kreg(dd, kr_intclear, ppd->hw_pidx ?
3163 INT_MASK_P(SDmaProgress, 1) : 2956 INT_MASK_P(SDmaProgress, 1) :
@@ -3188,11 +2981,6 @@ static irqreturn_t sdma_cleanup_intr(int irq, void *data)
3188 if (dd->int_counter != (u32) -1) 2981 if (dd->int_counter != (u32) -1)
3189 dd->int_counter++; 2982 dd->int_counter++;
3190 2983
3191#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
3192 if (dd->flags & QIB_DCA_ENABLED)
3193 qib_update_sdma_dca(ppd);
3194#endif
3195
3196 /* Clear the interrupt bit we expect to be set. */ 2984 /* Clear the interrupt bit we expect to be set. */
3197 qib_write_kreg(dd, kr_intclear, ppd->hw_pidx ? 2985 qib_write_kreg(dd, kr_intclear, ppd->hw_pidx ?
3198 INT_MASK_PM(SDmaCleanupDone, 1) : 2986 INT_MASK_PM(SDmaCleanupDone, 1) :
@@ -4299,10 +4087,6 @@ static void rcvctrl_7322_mod(struct qib_pportdata *ppd, unsigned int op,
4299 qib_write_kreg_ctxt(dd, krc_rcvhdraddr, ctxt, 4087 qib_write_kreg_ctxt(dd, krc_rcvhdraddr, ctxt,
4300 rcd->rcvhdrq_phys); 4088 rcd->rcvhdrq_phys);
4301 rcd->seq_cnt = 1; 4089 rcd->seq_cnt = 1;
4302#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
4303 if (dd->flags & QIB_DCA_ENABLED)
4304 qib_update_rhdrq_dca(rcd);
4305#endif
4306 } 4090 }
4307 if (op & QIB_RCVCTRL_CTXT_DIS) 4091 if (op & QIB_RCVCTRL_CTXT_DIS)
4308 ppd->p_rcvctrl &= 4092 ppd->p_rcvctrl &=
@@ -5360,7 +5144,13 @@ static int qib_7322_ib_updown(struct qib_pportdata *ppd, int ibup, u64 ibcs)
5360 QIBL_IB_AUTONEG_INPROG))) 5144 QIBL_IB_AUTONEG_INPROG)))
5361 set_7322_ibspeed_fast(ppd, ppd->link_speed_enabled); 5145 set_7322_ibspeed_fast(ppd, ppd->link_speed_enabled);
5362 if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG)) { 5146 if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG)) {
5147 /* unlock the Tx settings, speed may change */
5148 qib_write_kreg_port(ppd, krp_tx_deemph_override,
5149 SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
5150 reset_tx_deemphasis_override));
5363 qib_cancel_sends(ppd); 5151 qib_cancel_sends(ppd);
5152 /* on link down, ensure sane pcs state */
5153 qib_7322_mini_pcs_reset(ppd);
5364 spin_lock_irqsave(&ppd->sdma_lock, flags); 5154 spin_lock_irqsave(&ppd->sdma_lock, flags);
5365 if (__qib_sdma_running(ppd)) 5155 if (__qib_sdma_running(ppd))
5366 __qib_sdma_process_event(ppd, 5156 __qib_sdma_process_event(ppd,
@@ -5766,26 +5556,28 @@ static void qib_init_7322_qsfp(struct qib_pportdata *ppd)
5766} 5556}
5767 5557
5768/* 5558/*
5769 * called at device initialization time, and also if the cable_atten 5559 * called at device initialization time, and also if the txselect
5770 * module parameter is changed. This is used for cables that don't 5560 * module parameter is changed. This is used for cables that don't
5771 * have valid QSFP EEPROMs (not present, or attenuation is zero). 5561 * have valid QSFP EEPROMs (not present, or attenuation is zero).
5772 * We initialize to the default, then if there is a specific 5562 * We initialize to the default, then if there is a specific
5773 * unit,port match, we use that. 5563 * unit,port match, we use that (and set it immediately, for the
5564 * current speed, if the link is at INIT or better).
5774 * String format is "default# unit#,port#=# ... u,p=#", separators must 5565 * String format is "default# unit#,port#=# ... u,p=#", separators must
5775 * be a SPACE character. A newline terminates. 5566 * be a SPACE character. A newline terminates. The u,p=# tuples may
5567 * optionally have "u,p=#,#", where the final # is the H1 value
5776 * The last specific match is used (actually, all are used, but last 5568 * The last specific match is used (actually, all are used, but last
5777 * one is the one that winds up set); if none at all, fall back on default. 5569 * one is the one that winds up set); if none at all, fall back on default.
5778 */ 5570 */
5779static void set_no_qsfp_atten(struct qib_devdata *dd, int change) 5571static void set_no_qsfp_atten(struct qib_devdata *dd, int change)
5780{ 5572{
5781 char *nxt, *str; 5573 char *nxt, *str;
5782 int pidx, unit, port, deflt; 5574 u32 pidx, unit, port, deflt, h1;
5783 unsigned long val; 5575 unsigned long val;
5784 int any = 0; 5576 int any = 0, seth1;
5785 5577
5786 str = cable_atten_list; 5578 str = txselect_list;
5787 5579
5788 /* default number is validated in setup_cable_atten() */ 5580 /* default number is validated in setup_txselect() */
5789 deflt = simple_strtoul(str, &nxt, 0); 5581 deflt = simple_strtoul(str, &nxt, 0);
5790 for (pidx = 0; pidx < dd->num_pports; ++pidx) 5582 for (pidx = 0; pidx < dd->num_pports; ++pidx)
5791 dd->pport[pidx].cpspec->no_eep = deflt; 5583 dd->pport[pidx].cpspec->no_eep = deflt;
@@ -5812,16 +5604,28 @@ static void set_no_qsfp_atten(struct qib_devdata *dd, int change)
5812 ; 5604 ;
5813 continue; 5605 continue;
5814 } 5606 }
5815 if (val >= TXDDS_TABLE_SZ) 5607 if (val >= TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ)
5816 continue; 5608 continue;
5609 seth1 = 0;
5610 h1 = 0; /* gcc thinks it might be used uninitted */
5611 if (*nxt == ',' && nxt[1]) {
5612 str = ++nxt;
5613 h1 = (u32)simple_strtoul(str, &nxt, 0);
5614 if (nxt == str)
5615 while (*nxt && *nxt++ != ' ') /* skip */
5616 ;
5617 else
5618 seth1 = 1;
5619 }
5817 for (pidx = 0; dd->unit == unit && pidx < dd->num_pports; 5620 for (pidx = 0; dd->unit == unit && pidx < dd->num_pports;
5818 ++pidx) { 5621 ++pidx) {
5819 if (dd->pport[pidx].port != port || 5622 struct qib_pportdata *ppd = &dd->pport[pidx];
5820 !dd->pport[pidx].link_speed_supported) 5623
5624 if (ppd->port != port || !ppd->link_speed_supported)
5821 continue; 5625 continue;
5822 dd->pport[pidx].cpspec->no_eep = val; 5626 ppd->cpspec->no_eep = val;
5823 /* now change the IBC and serdes, overriding generic */ 5627 /* now change the IBC and serdes, overriding generic */
5824 init_txdds_table(&dd->pport[pidx], 1); 5628 init_txdds_table(ppd, 1);
5825 any++; 5629 any++;
5826 } 5630 }
5827 if (*nxt == '\n') 5631 if (*nxt == '\n')
@@ -5832,35 +5636,35 @@ static void set_no_qsfp_atten(struct qib_devdata *dd, int change)
5832 * Change the IBC and serdes, but since it's 5636 * Change the IBC and serdes, but since it's
5833 * general, don't override specific settings. 5637 * general, don't override specific settings.
5834 */ 5638 */
5835 for (pidx = 0; pidx < dd->num_pports; ++pidx) { 5639 for (pidx = 0; pidx < dd->num_pports; ++pidx)
5836 if (!dd->pport[pidx].link_speed_supported) 5640 if (dd->pport[pidx].link_speed_supported)
5837 continue; 5641 init_txdds_table(&dd->pport[pidx], 0);
5838 init_txdds_table(&dd->pport[pidx], 0);
5839 }
5840 } 5642 }
5841} 5643}
5842 5644
5843/* handle the cable_atten parameter changing */ 5645/* handle the txselect parameter changing */
5844static int setup_cable_atten(const char *str, struct kernel_param *kp) 5646static int setup_txselect(const char *str, struct kernel_param *kp)
5845{ 5647{
5846 struct qib_devdata *dd; 5648 struct qib_devdata *dd;
5847 unsigned long val; 5649 unsigned long val;
5848 char *n; 5650 char *n;
5849 if (strlen(str) >= MAX_ATTEN_LEN) { 5651 if (strlen(str) >= MAX_ATTEN_LEN) {
5850 printk(KERN_INFO QIB_DRV_NAME " cable_atten_values string " 5652 printk(KERN_INFO QIB_DRV_NAME " txselect_values string "
5851 "too long\n"); 5653 "too long\n");
5852 return -ENOSPC; 5654 return -ENOSPC;
5853 } 5655 }
5854 val = simple_strtoul(str, &n, 0); 5656 val = simple_strtoul(str, &n, 0);
5855 if (n == str || val >= TXDDS_TABLE_SZ) { 5657 if (n == str || val >= (TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ)) {
5856 printk(KERN_INFO QIB_DRV_NAME 5658 printk(KERN_INFO QIB_DRV_NAME
5857 "cable_atten_values must start with a number\n"); 5659 "txselect_values must start with a number < %d\n",
5660 TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ);
5858 return -EINVAL; 5661 return -EINVAL;
5859 } 5662 }
5860 strcpy(cable_atten_list, str); 5663 strcpy(txselect_list, str);
5861 5664
5862 list_for_each_entry(dd, &qib_dev_list, list) 5665 list_for_each_entry(dd, &qib_dev_list, list)
5863 set_no_qsfp_atten(dd, 1); 5666 if (dd->deviceid == PCI_DEVICE_ID_QLOGIC_IB_7322)
5667 set_no_qsfp_atten(dd, 1);
5864 return 0; 5668 return 0;
5865} 5669}
5866 5670
@@ -6261,28 +6065,17 @@ static int qib_init_7322_variables(struct qib_devdata *dd)
6261 * in adapter-specific routines. 6065 * in adapter-specific routines.
6262 */ 6066 */
6263 if (!(ppd->dd->flags & QIB_HAS_QSFP)) { 6067 if (!(ppd->dd->flags & QIB_HAS_QSFP)) {
6264 int i;
6265 const struct txdds_ent *txdds;
6266
6267 if (!IS_QMH(ppd->dd) && !IS_QME(ppd->dd)) 6068 if (!IS_QMH(ppd->dd) && !IS_QME(ppd->dd))
6268 qib_devinfo(ppd->dd->pcidev, "IB%u:%u: " 6069 qib_devinfo(ppd->dd->pcidev, "IB%u:%u: "
6269 "Unknown mezzanine card type\n", 6070 "Unknown mezzanine card type\n",
6270 ppd->dd->unit, ppd->port); 6071 dd->unit, ppd->port);
6271 txdds = IS_QMH(ppd->dd) ? &qmh_qdr_txdds : 6072 cp->h1_val = IS_QMH(dd) ? H1_FORCE_QMH : H1_FORCE_QME;
6272 &qme_qdr_txdds;
6273
6274 /* 6073 /*
6275 * set values in case link comes up 6074 * Choose center value as default tx serdes setting
6276 * before table is written to driver. 6075 * until changed through module parameter.
6277 */ 6076 */
6278 cp->h1_val = IS_QMH(ppd->dd) ? H1_FORCE_QMH : 6077 ppd->cpspec->no_eep = IS_QMH(dd) ?
6279 H1_FORCE_QME; 6078 TXDDS_TABLE_SZ + 2 : TXDDS_TABLE_SZ + 4;
6280 for (i = 0; i < SERDES_CHANS; i++) {
6281 cp->amp[i] = txdds->amp;
6282 cp->pre[i] = txdds->pre;
6283 cp->mainv[i] = txdds->main;
6284 cp->post[i] = txdds->post;
6285 }
6286 } else 6079 } else
6287 cp->h1_val = H1_FORCE_VAL; 6080 cp->h1_val = H1_FORCE_VAL;
6288 6081
@@ -6299,8 +6092,7 @@ static int qib_init_7322_variables(struct qib_devdata *dd)
6299 6092
6300 dd->rcvhdrentsize = QIB_RCVHDR_ENTSIZE; 6093 dd->rcvhdrentsize = QIB_RCVHDR_ENTSIZE;
6301 dd->rcvhdrsize = QIB_DFLT_RCVHDRSIZE; 6094 dd->rcvhdrsize = QIB_DFLT_RCVHDRSIZE;
6302 dd->rhf_offset = 6095 dd->rhf_offset = dd->rcvhdrentsize - sizeof(u64) / sizeof(u32);
6303 dd->rcvhdrentsize - sizeof(u64) / sizeof(u32);
6304 6096
6305 /* we always allocate at least 2048 bytes for eager buffers */ 6097 /* we always allocate at least 2048 bytes for eager buffers */
6306 dd->rcvegrbufsize = max(mtu, 2048); 6098 dd->rcvegrbufsize = max(mtu, 2048);
@@ -6919,13 +6711,6 @@ struct qib_devdata *qib_init_iba7322_funcs(struct pci_dev *pdev,
6919 /* clear diagctrl register, in case diags were running and crashed */ 6711 /* clear diagctrl register, in case diags were running and crashed */
6920 qib_write_kreg(dd, kr_hwdiagctrl, 0); 6712 qib_write_kreg(dd, kr_hwdiagctrl, 0);
6921 6713
6922#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
6923 ret = dca_add_requester(&pdev->dev);
6924 if (!ret) {
6925 dd->flags |= QIB_DCA_ENABLED;
6926 qib_setup_dca(dd);
6927 }
6928#endif
6929 goto bail; 6714 goto bail;
6930 6715
6931bail_cleanup: 6716bail_cleanup:
@@ -7111,8 +6896,8 @@ static const struct txdds_ent txdds_ddr[TXDDS_TABLE_SZ] = {
7111static const struct txdds_ent txdds_qdr[TXDDS_TABLE_SZ] = { 6896static const struct txdds_ent txdds_qdr[TXDDS_TABLE_SZ] = {
7112 /* amp, pre, main, post */ 6897 /* amp, pre, main, post */
7113 { 2, 2, 15, 6 }, /* Loopback */ 6898 { 2, 2, 15, 6 }, /* Loopback */
7114 { 0, 1, 0, 7 }, /* 2 dB */ 6899 { 0, 1, 0, 7 }, /* 2 dB (also QMH7342) */
7115 { 0, 1, 0, 9 }, /* 3 dB */ 6900 { 0, 1, 0, 9 }, /* 3 dB (also QMH7342) */
7116 { 0, 1, 0, 11 }, /* 4 dB */ 6901 { 0, 1, 0, 11 }, /* 4 dB */
7117 { 0, 1, 0, 13 }, /* 5 dB */ 6902 { 0, 1, 0, 13 }, /* 5 dB */
7118 { 0, 1, 0, 15 }, /* 6 dB */ 6903 { 0, 1, 0, 15 }, /* 6 dB */
@@ -7128,6 +6913,57 @@ static const struct txdds_ent txdds_qdr[TXDDS_TABLE_SZ] = {
7128 { 0, 2, 9, 15 }, /* 16 dB */ 6913 { 0, 2, 9, 15 }, /* 16 dB */
7129}; 6914};
7130 6915
6916/*
6917 * extra entries for use with txselect, for indices >= TXDDS_TABLE_SZ.
6918 * These are mostly used for mez cards going through connectors
6919 * and backplane traces, but can be used to add other "unusual"
6920 * table values as well.
6921 */
6922static const struct txdds_ent txdds_extra_sdr[TXDDS_EXTRA_SZ] = {
6923 /* amp, pre, main, post */
6924 { 0, 0, 0, 1 }, /* QMH7342 backplane settings */
6925 { 0, 0, 0, 1 }, /* QMH7342 backplane settings */
6926 { 0, 0, 0, 2 }, /* QMH7342 backplane settings */
6927 { 0, 0, 0, 2 }, /* QMH7342 backplane settings */
6928 { 0, 0, 0, 11 }, /* QME7342 backplane settings */
6929 { 0, 0, 0, 11 }, /* QME7342 backplane settings */
6930 { 0, 0, 0, 11 }, /* QME7342 backplane settings */
6931 { 0, 0, 0, 11 }, /* QME7342 backplane settings */
6932 { 0, 0, 0, 11 }, /* QME7342 backplane settings */
6933 { 0, 0, 0, 11 }, /* QME7342 backplane settings */
6934 { 0, 0, 0, 11 }, /* QME7342 backplane settings */
6935};
6936
6937static const struct txdds_ent txdds_extra_ddr[TXDDS_EXTRA_SZ] = {
6938 /* amp, pre, main, post */
6939 { 0, 0, 0, 7 }, /* QMH7342 backplane settings */
6940 { 0, 0, 0, 7 }, /* QMH7342 backplane settings */
6941 { 0, 0, 0, 8 }, /* QMH7342 backplane settings */
6942 { 0, 0, 0, 8 }, /* QMH7342 backplane settings */
6943 { 0, 0, 0, 13 }, /* QME7342 backplane settings */
6944 { 0, 0, 0, 13 }, /* QME7342 backplane settings */
6945 { 0, 0, 0, 13 }, /* QME7342 backplane settings */
6946 { 0, 0, 0, 13 }, /* QME7342 backplane settings */
6947 { 0, 0, 0, 13 }, /* QME7342 backplane settings */
6948 { 0, 0, 0, 13 }, /* QME7342 backplane settings */
6949 { 0, 0, 0, 13 }, /* QME7342 backplane settings */
6950};
6951
6952static const struct txdds_ent txdds_extra_qdr[TXDDS_EXTRA_SZ] = {
6953 /* amp, pre, main, post */
6954 { 0, 1, 0, 4 }, /* QMH7342 backplane settings */
6955 { 0, 1, 0, 5 }, /* QMH7342 backplane settings */
6956 { 0, 1, 0, 6 }, /* QMH7342 backplane settings */
6957 { 0, 1, 0, 8 }, /* QMH7342 backplane settings */
6958 { 0, 1, 12, 10 }, /* QME7342 backplane setting */
6959 { 0, 1, 12, 11 }, /* QME7342 backplane setting */
6960 { 0, 1, 12, 12 }, /* QME7342 backplane setting */
6961 { 0, 1, 12, 14 }, /* QME7342 backplane setting */
6962 { 0, 1, 12, 6 }, /* QME7342 backplane setting */
6963 { 0, 1, 12, 7 }, /* QME7342 backplane setting */
6964 { 0, 1, 12, 8 }, /* QME7342 backplane setting */
6965};
6966
7131static const struct txdds_ent *get_atten_table(const struct txdds_ent *txdds, 6967static const struct txdds_ent *get_atten_table(const struct txdds_ent *txdds,
7132 unsigned atten) 6968 unsigned atten)
7133{ 6969{
@@ -7145,7 +6981,7 @@ static const struct txdds_ent *get_atten_table(const struct txdds_ent *txdds,
7145} 6981}
7146 6982
7147/* 6983/*
7148 * if override is set, the module parameter cable_atten has a value 6984 * if override is set, the module parameter txselect has a value
7149 * for this specific port, so use it, rather than our normal mechanism. 6985 * for this specific port, so use it, rather than our normal mechanism.
7150 */ 6986 */
7151static void find_best_ent(struct qib_pportdata *ppd, 6987static void find_best_ent(struct qib_pportdata *ppd,
@@ -7184,15 +7020,28 @@ static void find_best_ent(struct qib_pportdata *ppd,
7184 *ddr_dds = get_atten_table(txdds_ddr, qd->atten[0]); 7020 *ddr_dds = get_atten_table(txdds_ddr, qd->atten[0]);
7185 *qdr_dds = get_atten_table(txdds_qdr, qd->atten[1]); 7021 *qdr_dds = get_atten_table(txdds_qdr, qd->atten[1]);
7186 return; 7022 return;
7187 } else { 7023 } else if (ppd->cpspec->no_eep < TXDDS_TABLE_SZ) {
7188 /* 7024 /*
7189 * If we have no (or incomplete) data from the cable 7025 * If we have no (or incomplete) data from the cable
7190 * EEPROM, or no QSFP, use the module parameter value 7026 * EEPROM, or no QSFP, or override is set, use the
7191 * to index into the attentuation table. 7027 * module parameter value to index into the attentuation
7028 * table.
7192 */ 7029 */
7193 *sdr_dds = &txdds_sdr[ppd->cpspec->no_eep]; 7030 idx = ppd->cpspec->no_eep;
7194 *ddr_dds = &txdds_ddr[ppd->cpspec->no_eep]; 7031 *sdr_dds = &txdds_sdr[idx];
7195 *qdr_dds = &txdds_qdr[ppd->cpspec->no_eep]; 7032 *ddr_dds = &txdds_ddr[idx];
7033 *qdr_dds = &txdds_qdr[idx];
7034 } else if (ppd->cpspec->no_eep < (TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ)) {
7035 /* similar to above, but index into the "extra" table. */
7036 idx = ppd->cpspec->no_eep - TXDDS_TABLE_SZ;
7037 *sdr_dds = &txdds_extra_sdr[idx];
7038 *ddr_dds = &txdds_extra_ddr[idx];
7039 *qdr_dds = &txdds_extra_qdr[idx];
7040 } else {
7041 /* this shouldn't happen, it's range checked */
7042 *sdr_dds = txdds_sdr + qib_long_atten;
7043 *ddr_dds = txdds_ddr + qib_long_atten;
7044 *qdr_dds = txdds_qdr + qib_long_atten;
7196 } 7045 }
7197} 7046}
7198 7047
@@ -7203,33 +7052,24 @@ static void init_txdds_table(struct qib_pportdata *ppd, int override)
7203 int idx; 7052 int idx;
7204 int single_ent = 0; 7053 int single_ent = 0;
7205 7054
7206 if (IS_QMH(ppd->dd)) { 7055 find_best_ent(ppd, &sdr_dds, &ddr_dds, &qdr_dds, override);
7207 /* normally will be overridden, via setup_qmh() */ 7056
7208 sdr_dds = &qmh_sdr_txdds; 7057 /* for mez cards or override, use the selected value for all entries */
7209 ddr_dds = &qmh_ddr_txdds; 7058 if (!(ppd->dd->flags & QIB_HAS_QSFP) || override)
7210 qdr_dds = &qmh_qdr_txdds;
7211 single_ent = 1;
7212 } else if (IS_QME(ppd->dd)) {
7213 sdr_dds = &qme_sdr_txdds;
7214 ddr_dds = &qme_ddr_txdds;
7215 qdr_dds = &qme_qdr_txdds;
7216 single_ent = 1; 7059 single_ent = 1;
7217 } else
7218 find_best_ent(ppd, &sdr_dds, &ddr_dds, &qdr_dds, override);
7219 7060
7220 /* Fill in the first entry with the best entry found. */ 7061 /* Fill in the first entry with the best entry found. */
7221 set_txdds(ppd, 0, sdr_dds); 7062 set_txdds(ppd, 0, sdr_dds);
7222 set_txdds(ppd, TXDDS_TABLE_SZ, ddr_dds); 7063 set_txdds(ppd, TXDDS_TABLE_SZ, ddr_dds);
7223 set_txdds(ppd, 2 * TXDDS_TABLE_SZ, qdr_dds); 7064 set_txdds(ppd, 2 * TXDDS_TABLE_SZ, qdr_dds);
7224 7065 if (ppd->lflags & (QIBL_LINKINIT | QIBL_LINKARMED |
7225 /* 7066 QIBL_LINKACTIVE)) {
7226 * for our current speed, also write that value into the 7067 dds = (struct txdds_ent *)(ppd->link_speed_active ==
7227 * tx serdes registers. 7068 QIB_IB_QDR ? qdr_dds :
7228 */ 7069 (ppd->link_speed_active ==
7229 dds = (struct txdds_ent *)(ppd->link_speed_active == QIB_IB_QDR ? 7070 QIB_IB_DDR ? ddr_dds : sdr_dds));
7230 qdr_dds : (ppd->link_speed_active == 7071 write_tx_serdes_param(ppd, dds);
7231 QIB_IB_DDR ? ddr_dds : sdr_dds)); 7072 }
7232 write_tx_serdes_param(ppd, dds);
7233 7073
7234 /* Fill in the remaining entries with the default table values. */ 7074 /* Fill in the remaining entries with the default table values. */
7235 for (idx = 1; idx < ARRAY_SIZE(txdds_sdr); ++idx) { 7075 for (idx = 1; idx < ARRAY_SIZE(txdds_sdr); ++idx) {
@@ -7352,6 +7192,11 @@ static int serdes_7322_init(struct qib_pportdata *ppd)
7352 */ 7192 */
7353 init_txdds_table(ppd, 0); 7193 init_txdds_table(ppd, 0);
7354 7194
7195 /* ensure no tx overrides from earlier driver loads */
7196 qib_write_kreg_port(ppd, krp_tx_deemph_override,
7197 SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
7198 reset_tx_deemphasis_override));
7199
7355 /* Patch some SerDes defaults to "Better for IB" */ 7200 /* Patch some SerDes defaults to "Better for IB" */
7356 /* Timing Loop Bandwidth: cdr_timing[11:9] = 0 */ 7201 /* Timing Loop Bandwidth: cdr_timing[11:9] = 0 */
7357 ibsd_wr_allchans(ppd, 2, 0, BMASK(11, 9)); 7202 ibsd_wr_allchans(ppd, 2, 0, BMASK(11, 9));
@@ -7421,7 +7266,7 @@ static int serdes_7322_init(struct qib_pportdata *ppd)
7421 QDR_STATIC_ADAPT_DOWN_R1 : QDR_STATIC_ADAPT_DOWN); 7266 QDR_STATIC_ADAPT_DOWN_R1 : QDR_STATIC_ADAPT_DOWN);
7422 ppd->cpspec->qdr_dfe_on = 1; 7267 ppd->cpspec->qdr_dfe_on = 1;
7423 7268
7424 /* (FLoop LOS gate: PPM filter enabled */ 7269 /* FLoop LOS gate: PPM filter enabled */
7425 ibsd_wr_allchans(ppd, 38, 0 << 10, 1 << 10); 7270 ibsd_wr_allchans(ppd, 38, 0 << 10, 1 << 10);
7426 7271
7427 /* rx offset center enabled */ 7272 /* rx offset center enabled */
@@ -7486,68 +7331,39 @@ static void write_tx_serdes_param(struct qib_pportdata *ppd,
7486 SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, txc0_ena) | 7331 SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, txc0_ena) |
7487 SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, txcp1_ena) | 7332 SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, txcp1_ena) |
7488 SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, txcn1_ena)); 7333 SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, txcn1_ena));
7489 deemph |= 1ULL << SYM_LSB(IBSD_TX_DEEMPHASIS_OVERRIDE_0, 7334
7490 tx_override_deemphasis_select); 7335 deemph |= SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
7491 deemph |= txdds->amp << SYM_LSB(IBSD_TX_DEEMPHASIS_OVERRIDE_0, 7336 tx_override_deemphasis_select);
7492 txampcntl_d2a); 7337 deemph |= (txdds->amp & SYM_RMASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
7493 deemph |= txdds->main << SYM_LSB(IBSD_TX_DEEMPHASIS_OVERRIDE_0, 7338 txampcntl_d2a)) << SYM_LSB(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
7494 txc0_ena); 7339 txampcntl_d2a);
7495 deemph |= txdds->post << SYM_LSB(IBSD_TX_DEEMPHASIS_OVERRIDE_0, 7340 deemph |= (txdds->main & SYM_RMASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
7496 txcp1_ena); 7341 txc0_ena)) << SYM_LSB(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
7497 deemph |= txdds->pre << SYM_LSB(IBSD_TX_DEEMPHASIS_OVERRIDE_0, 7342 txc0_ena);
7343 deemph |= (txdds->post & SYM_RMASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
7344 txcp1_ena)) << SYM_LSB(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
7345 txcp1_ena);
7346 deemph |= (txdds->pre & SYM_RMASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
7347 txcn1_ena)) << SYM_LSB(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
7498 txcn1_ena); 7348 txcn1_ena);
7499 qib_write_kreg_port(ppd, krp_tx_deemph_override, deemph); 7349 qib_write_kreg_port(ppd, krp_tx_deemph_override, deemph);
7500} 7350}
7501 7351
7502/* 7352/*
7503 * set per-bay, per channel parameters. For now, we ignore 7353 * Set the parameters for mez cards on link bounce, so they are
7504 * do_tx, and always set tx parameters, and set them with the same value 7354 * always exactly what was requested. Similar logic to init_txdds
7505 * for all channels, using the channel 0 value. We may switch to 7355 * but does just the serdes.
7506 * per-channel settings in the future, and that method only needs
7507 * to be done once.
7508 * Because this also writes the IBC txdds table with a single set
7509 * of values, it should be called only for cases where we want to completely
7510 * force a specific setting, typically only for mez cards.
7511 */ 7356 */
7512static void adj_tx_serdes(struct qib_pportdata *ppd) 7357static void adj_tx_serdes(struct qib_pportdata *ppd)
7513{ 7358{
7514 struct txdds_ent txdds; 7359 const struct txdds_ent *sdr_dds, *ddr_dds, *qdr_dds;
7515 int i; 7360 struct txdds_ent *dds;
7516 u8 *amp, *pre, *mainv, *post;
7517
7518 /*
7519 * Because we use TX_DEEMPHASIS_OVERRIDE, we need to
7520 * always do tx side, just like H1, since it is cleared
7521 * by link down
7522 */
7523 amp = ppd->cpspec->amp;
7524 pre = ppd->cpspec->pre;
7525 mainv = ppd->cpspec->mainv;
7526 post = ppd->cpspec->post;
7527
7528 amp[0] &= SYM_RMASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
7529 txampcntl_d2a);
7530 mainv[0] &= SYM_RMASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
7531 txc0_ena);
7532 post[0] &= SYM_RMASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
7533 txcp1_ena);
7534 pre[0] &= SYM_RMASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
7535 txcn1_ena);
7536
7537 /*
7538 * Use the channel zero values, only, for now, for
7539 * all channels
7540 */
7541 txdds.amp = amp[0];
7542 txdds.pre = pre[0];
7543 txdds.main = mainv[0];
7544 txdds.post = post[0];
7545
7546 /* write the QDR table for IBC use, as backup for link down */
7547 for (i = 0; i < ARRAY_SIZE(txdds_qdr); ++i)
7548 set_txdds(ppd, i + 32, &txdds);
7549 7361
7550 write_tx_serdes_param(ppd, &txdds); 7362 find_best_ent(ppd, &sdr_dds, &ddr_dds, &qdr_dds, 1);
7363 dds = (struct txdds_ent *)(ppd->link_speed_active == QIB_IB_QDR ?
7364 qdr_dds : (ppd->link_speed_active == QIB_IB_DDR ?
7365 ddr_dds : sdr_dds));
7366 write_tx_serdes_param(ppd, dds);
7551} 7367}
7552 7368
7553/* set QDR forced value for H1, if needed */ 7369/* set QDR forced value for H1, if needed */
@@ -7567,235 +7383,6 @@ static void force_h1(struct qib_pportdata *ppd)
7567 } 7383 }
7568} 7384}
7569 7385
7570/*
7571 * Parse the parameters for the QMH7342, to get rx and tx serdes
7572 * settings for that Bay, for both possible mez connectors (PCIe bus)
7573 * and IB link (one link on mez1, two possible on mez2).
7574 *
7575 * Data is comma or white space separated.
7576 *
7577 * A set of data has 7 groups, rx and tx groups have SERDES_CHANS values,
7578 * one per IB lane (serdes channel).
7579 * The groups are Bay, bus# H1 rcv, and amp, pre, post, main Tx values (QDR).
7580 * The Bay # is used only for debugging currently.
7581 * H1 values are set whenever the link goes down, or is at cfg_test or
7582 * cfg_wait_enh. Tx values are programmed once, when this routine is called
7583 * (and with default values at chip initialization). Values are any base, in
7584 * strtoul style, and values are seperated by comma, or any white space
7585 * (space, tab, newline).
7586 *
7587 * An example set might look like this (white space vs
7588 * comma used for human ease of reading)
7589 * The ordering is a set of Bay# Bus# H1, amp, pre, post, and main for mez1 IB1,
7590 * repeat for mez2 IB1, then mez2 IB2.
7591 *
7592 * B B H1:0 amp:0 pre:0 post: 0 main:0
7593 * a u H1: 1 amp: 1 pre: 1 post: 1 main: 1
7594 * y s H1: 2 amp: 2 pre: 2 post: 2 main: 2
7595 * H1: 4 amp: 3 pre: 3 post: 3 main: 3
7596 * 1 3 8,6,5,6 0,0,0,0 1,1,1,1 10,10,10,10 3,3,3,3
7597 * 1 6 7,6,6,7 0,0,0,0 1,1,1,1 10,10,10,10 3,3,3,3
7598 * 1 6 9,7,7,8 0,0,0,0 1,1,1,1 10,10,10,10 3,3,3,3
7599 */
7600#define N_QMH_FIELDS 22
7601static int setup_qmh_params(const char *str, struct kernel_param *kp)
7602{
7603 char *abuf, *v, *nv, *nvp;
7604 struct qib_devdata *dd;
7605 struct qib_pportdata *ppd;
7606 u32 mez, vlen, nf, port, bay;
7607 int ret = 0, found = 0;
7608
7609 vlen = strlen(str) + 1;
7610 abuf = kmalloc(vlen, GFP_KERNEL);
7611 if (!abuf) {
7612 printk(KERN_INFO QIB_DRV_NAME
7613 " Unable to allocate QMH param buffer; ignoring\n");
7614 return 0;
7615 }
7616 memcpy(abuf, str, vlen);
7617 v = abuf;
7618
7619 /* these 3 are because gcc can't know they are set before used */
7620 port = 1;
7621 mez = 1; /* used only for debugging */
7622 bay = 0; /* used only for debugging */
7623 ppd = NULL;
7624 for (nf = 0; (nv = strsep(&v, ", \t\n\r")) &&
7625 nf < (N_QMH_FIELDS * 3);) {
7626 u32 val;
7627
7628 if (!*nv)
7629 /* allow for multiple separators */
7630 continue;
7631
7632 val = simple_strtoul(nv, &nvp, 0);
7633 if (nv == nvp) {
7634 printk(KERN_INFO QIB_DRV_NAME
7635 " Bay%u, mez%u IB%u non-numeric value (%s) "
7636 "field #%u, ignoring rest\n", bay, mez,
7637 port, nv, nf % (N_QMH_FIELDS * 3));
7638 ret = -EINVAL;
7639 goto bail;
7640 }
7641 if (!(nf % N_QMH_FIELDS)) {
7642 ppd = NULL;
7643 bay = val;
7644 if (!bay || bay > 16) {
7645 printk(KERN_INFO QIB_DRV_NAME
7646 " Invalid bay # %u, field %u, "
7647 "ignoring rest\n", bay, nf);
7648 ret = -EINVAL;
7649 goto bail;
7650 }
7651 } else if ((nf % N_QMH_FIELDS) == 1) {
7652 u32 bus = val;
7653 if (nf == 1) {
7654 mez = 1;
7655 port = 1;
7656 } else if (nf == (N_QMH_FIELDS + 1)) {
7657 mez = 2;
7658 port = 1;
7659 } else {
7660 mez = 2;
7661 port = 2;
7662 }
7663 list_for_each_entry(dd, &qib_dev_list, list) {
7664 if (dd->deviceid != PCI_DEVICE_ID_QLOGIC_IB_7322
7665 || !IS_QMH(dd))
7666 continue; /* only for QMH cards */
7667 if (dd->pcidev->bus->number == bus) {
7668 found++;
7669 ppd = &dd->pport[port - 1];
7670 }
7671 }
7672 } else if (ppd) {
7673 u32 parm = (nf % N_QMH_FIELDS) - 2;
7674 if (parm < SERDES_CHANS && !(parm % SERDES_CHANS))
7675 ppd->cpspec->h1_val = val;
7676 else if (parm < (2 * SERDES_CHANS))
7677 ppd->cpspec->amp[parm % SERDES_CHANS] = val;
7678 else if (parm < (3 * SERDES_CHANS))
7679 ppd->cpspec->pre[parm % SERDES_CHANS] = val;
7680 else if (parm < (4 * SERDES_CHANS))
7681 ppd->cpspec->post[parm % SERDES_CHANS] = val;
7682 else {
7683 ppd->cpspec->mainv[parm % SERDES_CHANS] = val;
7684 /* At the end of a port, set params */
7685 if (parm == ((5 * SERDES_CHANS) - 1))
7686 adj_tx_serdes(ppd);
7687 }
7688 }
7689 nf++;
7690 }
7691 if (!found) {
7692 printk(KERN_ERR QIB_DRV_NAME
7693 ": No match found for qmh_serdes_setup parameter\n");
7694 ret = -EINVAL;
7695 }
7696bail:
7697 kfree(abuf);
7698 return ret;
7699}
7700
7701/*
7702 * Similarly for QME7342, but the format is simpler, values are the
7703 * same for all mez card positions in a blade (2 or 4 per blade), but
7704 * are different for some blades vs others, and we don't need to
7705 * specify different parameters for different serdes channels or different
7706 * IB ports.
7707 * Format is: h1 amp,pre,post,main
7708 * Alternate format (so ports can be different): Pport# h1 amp,pre,post,main
7709 */
7710#define N_QME_FIELDS 5
7711static int setup_qme_params(const char *str, struct kernel_param *kp)
7712{
7713 char *abuf, *v, *nv, *nvp;
7714 struct qib_devdata *dd;
7715 u32 vlen, nf, port = 0;
7716 u8 h1, tx[4]; /* amp, pre, post, main */
7717 int ret = -EINVAL;
7718 char *seplist;
7719
7720 vlen = strlen(str) + 1;
7721 abuf = kmalloc(vlen, GFP_KERNEL);
7722 if (!abuf) {
7723 printk(KERN_INFO QIB_DRV_NAME
7724 " Unable to allocate QME param buffer; ignoring\n");
7725 return 0;
7726 }
7727 strncpy(abuf, str, vlen);
7728
7729 v = abuf;
7730 seplist = " \t";
7731 h1 = H1_FORCE_QME; /* gcc can't figure out always set before used */
7732
7733 for (nf = 0; (nv = strsep(&v, seplist)); ) {
7734 u32 val;
7735
7736 if (!*nv)
7737 /* allow for multiple separators */
7738 continue;
7739
7740 if (!nf && *nv == 'P') {
7741 /* alternate format with port */
7742 val = simple_strtoul(++nv, &nvp, 0);
7743 if (nv == nvp || port >= NUM_IB_PORTS) {
7744 printk(KERN_INFO QIB_DRV_NAME
7745 " %s: non-numeric port value (%s) "
7746 "ignoring rest\n", __func__, nv);
7747 goto done;
7748 }
7749 port = val;
7750 continue; /* without incrementing nf */
7751 }
7752 val = simple_strtoul(nv, &nvp, 0);
7753 if (nv == nvp) {
7754 printk(KERN_INFO QIB_DRV_NAME
7755 " %s: non-numeric value (%s) "
7756 "field #%u, ignoring rest\n", __func__,
7757 nv, nf);
7758 goto done;
7759 }
7760 if (!nf) {
7761 h1 = val;
7762 seplist = ",";
7763 } else
7764 tx[nf - 1] = val;
7765 if (++nf == N_QME_FIELDS) {
7766 list_for_each_entry(dd, &qib_dev_list, list) {
7767 int pidx, i;
7768 if (dd->deviceid != PCI_DEVICE_ID_QLOGIC_IB_7322
7769 || !IS_QME(dd))
7770 continue; /* only for QME cards */
7771 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
7772 struct qib_pportdata *ppd;
7773 ppd = &dd->pport[pidx];
7774 if ((port && ppd->port != port) ||
7775 !ppd->link_speed_supported)
7776 continue;
7777 ppd->cpspec->h1_val = h1;
7778 for (i = 0; i < SERDES_CHANS; i++) {
7779 ppd->cpspec->amp[i] = tx[0];
7780 ppd->cpspec->pre[i] = tx[1];
7781 ppd->cpspec->post[i] = tx[2];
7782 ppd->cpspec->mainv[i] = tx[3];
7783 }
7784 adj_tx_serdes(ppd);
7785 }
7786 }
7787 ret = 0;
7788 goto done;
7789 }
7790 }
7791 printk(KERN_INFO QIB_DRV_NAME
7792 " %s: Only %u of %u fields provided, skipping\n",
7793 __func__, nf, N_QME_FIELDS);
7794done:
7795 kfree(abuf);
7796 return ret;
7797}
7798
7799#define SJA_EN SYM_MASK(SPC_JTAG_ACCESS_REG, SPC_JTAG_ACCESS_EN) 7386#define SJA_EN SYM_MASK(SPC_JTAG_ACCESS_REG, SPC_JTAG_ACCESS_EN)
7800#define BISTEN_LSB SYM_LSB(SPC_JTAG_ACCESS_REG, bist_en) 7387#define BISTEN_LSB SYM_LSB(SPC_JTAG_ACCESS_REG, bist_en)
7801 7388
diff --git a/drivers/infiniband/hw/qib/qib_init.c b/drivers/infiniband/hw/qib/qib_init.c
index c0139c07e97e..9b40f345ac3f 100644
--- a/drivers/infiniband/hw/qib/qib_init.c
+++ b/drivers/infiniband/hw/qib/qib_init.c
@@ -1237,7 +1237,13 @@ static int __devinit qib_init_one(struct pci_dev *pdev,
1237 */ 1237 */
1238 switch (ent->device) { 1238 switch (ent->device) {
1239 case PCI_DEVICE_ID_QLOGIC_IB_6120: 1239 case PCI_DEVICE_ID_QLOGIC_IB_6120:
1240#ifdef CONFIG_PCI_MSI
1240 dd = qib_init_iba6120_funcs(pdev, ent); 1241 dd = qib_init_iba6120_funcs(pdev, ent);
1242#else
1243 qib_early_err(&pdev->dev, "QLogic PCIE device 0x%x cannot "
1244 "work if CONFIG_PCI_MSI is not enabled\n",
1245 ent->device);
1246#endif
1241 break; 1247 break;
1242 1248
1243 case PCI_DEVICE_ID_QLOGIC_IB_7220: 1249 case PCI_DEVICE_ID_QLOGIC_IB_7220:
diff --git a/drivers/input/joydev.c b/drivers/input/joydev.c
index 423e0e6031ab..34157bb97ed6 100644
--- a/drivers/input/joydev.c
+++ b/drivers/input/joydev.c
@@ -47,15 +47,15 @@ struct joydev {
47 struct mutex mutex; 47 struct mutex mutex;
48 struct device dev; 48 struct device dev;
49 49
50 struct js_corr corr[ABS_MAX + 1]; 50 struct js_corr corr[ABS_CNT];
51 struct JS_DATA_SAVE_TYPE glue; 51 struct JS_DATA_SAVE_TYPE glue;
52 int nabs; 52 int nabs;
53 int nkey; 53 int nkey;
54 __u16 keymap[KEY_MAX - BTN_MISC + 1]; 54 __u16 keymap[KEY_MAX - BTN_MISC + 1];
55 __u16 keypam[KEY_MAX - BTN_MISC + 1]; 55 __u16 keypam[KEY_MAX - BTN_MISC + 1];
56 __u8 absmap[ABS_MAX + 1]; 56 __u8 absmap[ABS_CNT];
57 __u8 abspam[ABS_MAX + 1]; 57 __u8 abspam[ABS_CNT];
58 __s16 abs[ABS_MAX + 1]; 58 __s16 abs[ABS_CNT];
59}; 59};
60 60
61struct joydev_client { 61struct joydev_client {
@@ -826,7 +826,7 @@ static int joydev_connect(struct input_handler *handler, struct input_dev *dev,
826 joydev->handle.handler = handler; 826 joydev->handle.handler = handler;
827 joydev->handle.private = joydev; 827 joydev->handle.private = joydev;
828 828
829 for (i = 0; i < ABS_MAX + 1; i++) 829 for (i = 0; i < ABS_CNT; i++)
830 if (test_bit(i, dev->absbit)) { 830 if (test_bit(i, dev->absbit)) {
831 joydev->absmap[i] = joydev->nabs; 831 joydev->absmap[i] = joydev->nabs;
832 joydev->abspam[joydev->nabs] = i; 832 joydev->abspam[joydev->nabs] = i;
diff --git a/drivers/input/keyboard/amikbd.c b/drivers/input/keyboard/amikbd.c
index 35149ec455a9..79172af164f2 100644
--- a/drivers/input/keyboard/amikbd.c
+++ b/drivers/input/keyboard/amikbd.c
@@ -35,6 +35,7 @@
35#include <linux/delay.h> 35#include <linux/delay.h>
36#include <linux/interrupt.h> 36#include <linux/interrupt.h>
37#include <linux/keyboard.h> 37#include <linux/keyboard.h>
38#include <linux/platform_device.h>
38 39
39#include <asm/amigaints.h> 40#include <asm/amigaints.h>
40#include <asm/amigahw.h> 41#include <asm/amigahw.h>
@@ -154,10 +155,9 @@ static const char *amikbd_messages[8] = {
154 [7] = KERN_WARNING "amikbd: keyboard interrupt\n" 155 [7] = KERN_WARNING "amikbd: keyboard interrupt\n"
155}; 156};
156 157
157static struct input_dev *amikbd_dev; 158static irqreturn_t amikbd_interrupt(int irq, void *data)
158
159static irqreturn_t amikbd_interrupt(int irq, void *dummy)
160{ 159{
160 struct input_dev *dev = data;
161 unsigned char scancode, down; 161 unsigned char scancode, down;
162 162
163 scancode = ~ciaa.sdr; /* get and invert scancode (keyboard is active low) */ 163 scancode = ~ciaa.sdr; /* get and invert scancode (keyboard is active low) */
@@ -170,47 +170,42 @@ static irqreturn_t amikbd_interrupt(int irq, void *dummy)
170 170
171 if (scancode < 0x78) { /* scancodes < 0x78 are keys */ 171 if (scancode < 0x78) { /* scancodes < 0x78 are keys */
172 if (scancode == 98) { /* CapsLock is a toggle switch key on Amiga */ 172 if (scancode == 98) { /* CapsLock is a toggle switch key on Amiga */
173 input_report_key(amikbd_dev, scancode, 1); 173 input_report_key(dev, scancode, 1);
174 input_report_key(amikbd_dev, scancode, 0); 174 input_report_key(dev, scancode, 0);
175 } else { 175 } else {
176 input_report_key(amikbd_dev, scancode, down); 176 input_report_key(dev, scancode, down);
177 } 177 }
178 178
179 input_sync(amikbd_dev); 179 input_sync(dev);
180 } else /* scancodes >= 0x78 are error codes */ 180 } else /* scancodes >= 0x78 are error codes */
181 printk(amikbd_messages[scancode - 0x78]); 181 printk(amikbd_messages[scancode - 0x78]);
182 182
183 return IRQ_HANDLED; 183 return IRQ_HANDLED;
184} 184}
185 185
186static int __init amikbd_init(void) 186static int __init amikbd_probe(struct platform_device *pdev)
187{ 187{
188 struct input_dev *dev;
188 int i, j, err; 189 int i, j, err;
189 190
190 if (!AMIGAHW_PRESENT(AMI_KEYBOARD)) 191 dev = input_allocate_device();
191 return -ENODEV; 192 if (!dev) {
192 193 dev_err(&pdev->dev, "Not enough memory for input device\n");
193 if (!request_mem_region(CIAA_PHYSADDR-1+0xb00, 0x100, "amikeyb")) 194 return -ENOMEM;
194 return -EBUSY;
195
196 amikbd_dev = input_allocate_device();
197 if (!amikbd_dev) {
198 printk(KERN_ERR "amikbd: not enough memory for input device\n");
199 err = -ENOMEM;
200 goto fail1;
201 } 195 }
202 196
203 amikbd_dev->name = "Amiga Keyboard"; 197 dev->name = pdev->name;
204 amikbd_dev->phys = "amikbd/input0"; 198 dev->phys = "amikbd/input0";
205 amikbd_dev->id.bustype = BUS_AMIGA; 199 dev->id.bustype = BUS_AMIGA;
206 amikbd_dev->id.vendor = 0x0001; 200 dev->id.vendor = 0x0001;
207 amikbd_dev->id.product = 0x0001; 201 dev->id.product = 0x0001;
208 amikbd_dev->id.version = 0x0100; 202 dev->id.version = 0x0100;
203 dev->dev.parent = &pdev->dev;
209 204
210 amikbd_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_REP); 205 dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_REP);
211 206
212 for (i = 0; i < 0x78; i++) 207 for (i = 0; i < 0x78; i++)
213 set_bit(i, amikbd_dev->keybit); 208 set_bit(i, dev->keybit);
214 209
215 for (i = 0; i < MAX_NR_KEYMAPS; i++) { 210 for (i = 0; i < MAX_NR_KEYMAPS; i++) {
216 static u_short temp_map[NR_KEYS] __initdata; 211 static u_short temp_map[NR_KEYS] __initdata;
@@ -229,30 +224,54 @@ static int __init amikbd_init(void)
229 memcpy(key_maps[i], temp_map, sizeof(temp_map)); 224 memcpy(key_maps[i], temp_map, sizeof(temp_map));
230 } 225 }
231 ciaa.cra &= ~0x41; /* serial data in, turn off TA */ 226 ciaa.cra &= ~0x41; /* serial data in, turn off TA */
232 if (request_irq(IRQ_AMIGA_CIAA_SP, amikbd_interrupt, 0, "amikbd", 227 err = request_irq(IRQ_AMIGA_CIAA_SP, amikbd_interrupt, 0, "amikbd",
233 amikbd_interrupt)) { 228 dev);
234 err = -EBUSY; 229 if (err)
235 goto fail2; 230 goto fail2;
236 }
237 231
238 err = input_register_device(amikbd_dev); 232 err = input_register_device(dev);
239 if (err) 233 if (err)
240 goto fail3; 234 goto fail3;
241 235
236 platform_set_drvdata(pdev, dev);
237
242 return 0; 238 return 0;
243 239
244 fail3: free_irq(IRQ_AMIGA_CIAA_SP, amikbd_interrupt); 240 fail3: free_irq(IRQ_AMIGA_CIAA_SP, dev);
245 fail2: input_free_device(amikbd_dev); 241 fail2: input_free_device(dev);
246 fail1: release_mem_region(CIAA_PHYSADDR - 1 + 0xb00, 0x100);
247 return err; 242 return err;
248} 243}
249 244
250static void __exit amikbd_exit(void) 245static int __exit amikbd_remove(struct platform_device *pdev)
246{
247 struct input_dev *dev = platform_get_drvdata(pdev);
248
249 platform_set_drvdata(pdev, NULL);
250 free_irq(IRQ_AMIGA_CIAA_SP, dev);
251 input_unregister_device(dev);
252 return 0;
253}
254
255static struct platform_driver amikbd_driver = {
256 .remove = __exit_p(amikbd_remove),
257 .driver = {
258 .name = "amiga-keyboard",
259 .owner = THIS_MODULE,
260 },
261};
262
263static int __init amikbd_init(void)
251{ 264{
252 free_irq(IRQ_AMIGA_CIAA_SP, amikbd_interrupt); 265 return platform_driver_probe(&amikbd_driver, amikbd_probe);
253 input_unregister_device(amikbd_dev);
254 release_mem_region(CIAA_PHYSADDR - 1 + 0xb00, 0x100);
255} 266}
256 267
257module_init(amikbd_init); 268module_init(amikbd_init);
269
270static void __exit amikbd_exit(void)
271{
272 platform_driver_unregister(&amikbd_driver);
273}
274
258module_exit(amikbd_exit); 275module_exit(amikbd_exit);
276
277MODULE_ALIAS("platform:amiga-keyboard");
diff --git a/drivers/input/misc/Kconfig b/drivers/input/misc/Kconfig
index 48cdabec372a..c44b9eafc556 100644
--- a/drivers/input/misc/Kconfig
+++ b/drivers/input/misc/Kconfig
@@ -80,6 +80,16 @@ config INPUT_M68K_BEEP
80 tristate "M68k Beeper support" 80 tristate "M68k Beeper support"
81 depends on M68K 81 depends on M68K
82 82
83config INPUT_MAX8925_ONKEY
84 tristate "MAX8925 ONKEY support"
85 depends on MFD_MAX8925
86 help
87 Support the ONKEY of MAX8925 PMICs as an input device
88 reporting power button status.
89
90 To compile this driver as a module, choose M here: the module
91 will be called max8925_onkey.
92
83config INPUT_APANEL 93config INPUT_APANEL
84 tristate "Fujitsu Lifebook Application Panel buttons" 94 tristate "Fujitsu Lifebook Application Panel buttons"
85 depends on X86 && I2C && LEDS_CLASS 95 depends on X86 && I2C && LEDS_CLASS
diff --git a/drivers/input/misc/Makefile b/drivers/input/misc/Makefile
index f9f577031e06..71fe57d8023f 100644
--- a/drivers/input/misc/Makefile
+++ b/drivers/input/misc/Makefile
@@ -20,6 +20,7 @@ obj-$(CONFIG_HP_SDC_RTC) += hp_sdc_rtc.o
20obj-$(CONFIG_INPUT_IXP4XX_BEEPER) += ixp4xx-beeper.o 20obj-$(CONFIG_INPUT_IXP4XX_BEEPER) += ixp4xx-beeper.o
21obj-$(CONFIG_INPUT_KEYSPAN_REMOTE) += keyspan_remote.o 21obj-$(CONFIG_INPUT_KEYSPAN_REMOTE) += keyspan_remote.o
22obj-$(CONFIG_INPUT_M68K_BEEP) += m68kspkr.o 22obj-$(CONFIG_INPUT_M68K_BEEP) += m68kspkr.o
23obj-$(CONFIG_INPUT_MAX8925_ONKEY) += max8925_onkey.o
23obj-$(CONFIG_INPUT_PCAP) += pcap_keys.o 24obj-$(CONFIG_INPUT_PCAP) += pcap_keys.o
24obj-$(CONFIG_INPUT_PCF50633_PMU) += pcf50633-input.o 25obj-$(CONFIG_INPUT_PCF50633_PMU) += pcf50633-input.o
25obj-$(CONFIG_INPUT_PCF8574) += pcf8574_keypad.o 26obj-$(CONFIG_INPUT_PCF8574) += pcf8574_keypad.o
diff --git a/drivers/input/misc/max8925_onkey.c b/drivers/input/misc/max8925_onkey.c
new file mode 100644
index 000000000000..80af44608018
--- /dev/null
+++ b/drivers/input/misc/max8925_onkey.c
@@ -0,0 +1,148 @@
1/**
2 * max8925_onkey.c - MAX8925 ONKEY driver
3 *
4 * Copyright (C) 2009 Marvell International Ltd.
5 * Haojian Zhuang <haojian.zhuang@marvell.com>
6 *
7 * This file is subject to the terms and conditions of the GNU General
8 * Public License. See the file "COPYING" in the main directory of this
9 * archive for more details.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20
21#include <linux/kernel.h>
22#include <linux/module.h>
23#include <linux/platform_device.h>
24#include <linux/i2c.h>
25#include <linux/input.h>
26#include <linux/interrupt.h>
27#include <linux/mfd/max8925.h>
28#include <linux/slab.h>
29
30#define HARDRESET_EN (1 << 7)
31#define PWREN_EN (1 << 7)
32
33struct max8925_onkey_info {
34 struct input_dev *idev;
35 struct i2c_client *i2c;
36 int irq;
37};
38
39/*
40 * MAX8925 gives us an interrupt when ONKEY is held for 3 seconds.
41 * max8925_set_bits() operates I2C bus and may sleep. So implement
42 * it in thread IRQ handler.
43 */
44static irqreturn_t max8925_onkey_handler(int irq, void *data)
45{
46 struct max8925_onkey_info *info = data;
47
48 input_report_key(info->idev, KEY_POWER, 1);
49 input_sync(info->idev);
50
51 /* Enable hardreset to halt if system isn't shutdown on time */
52 max8925_set_bits(info->i2c, MAX8925_SYSENSEL,
53 HARDRESET_EN, HARDRESET_EN);
54
55 return IRQ_HANDLED;
56}
57
58static int __devinit max8925_onkey_probe(struct platform_device *pdev)
59{
60 struct max8925_chip *chip = dev_get_drvdata(pdev->dev.parent);
61 struct max8925_onkey_info *info;
62 int error;
63
64 info = kzalloc(sizeof(struct max8925_onkey_info), GFP_KERNEL);
65 if (!info)
66 return -ENOMEM;
67
68 info->i2c = chip->i2c;
69 info->irq = chip->irq_base + MAX8925_IRQ_GPM_SW_3SEC;
70
71 info->idev = input_allocate_device();
72 if (!info->idev) {
73 dev_err(chip->dev, "Failed to allocate input dev\n");
74 error = -ENOMEM;
75 goto out_input;
76 }
77
78 info->idev->name = "max8925_on";
79 info->idev->phys = "max8925_on/input0";
80 info->idev->id.bustype = BUS_I2C;
81 info->idev->dev.parent = &pdev->dev;
82 info->idev->evbit[0] = BIT_MASK(EV_KEY);
83 info->idev->keybit[BIT_WORD(KEY_POWER)] = BIT_MASK(KEY_POWER);
84
85 error = request_threaded_irq(info->irq, NULL, max8925_onkey_handler,
86 IRQF_ONESHOT, "onkey", info);
87 if (error < 0) {
88 dev_err(chip->dev, "Failed to request IRQ: #%d: %d\n",
89 info->irq, error);
90 goto out_irq;
91 }
92
93 error = input_register_device(info->idev);
94 if (error) {
95 dev_err(chip->dev, "Can't register input device: %d\n", error);
96 goto out;
97 }
98
99 platform_set_drvdata(pdev, info);
100
101 return 0;
102
103out:
104 free_irq(info->irq, info);
105out_irq:
106 input_free_device(info->idev);
107out_input:
108 kfree(info);
109 return error;
110}
111
112static int __devexit max8925_onkey_remove(struct platform_device *pdev)
113{
114 struct max8925_onkey_info *info = platform_get_drvdata(pdev);
115
116 free_irq(info->irq, info);
117 input_unregister_device(info->idev);
118 kfree(info);
119
120 platform_set_drvdata(pdev, NULL);
121
122 return 0;
123}
124
125static struct platform_driver max8925_onkey_driver = {
126 .driver = {
127 .name = "max8925-onkey",
128 .owner = THIS_MODULE,
129 },
130 .probe = max8925_onkey_probe,
131 .remove = __devexit_p(max8925_onkey_remove),
132};
133
134static int __init max8925_onkey_init(void)
135{
136 return platform_driver_register(&max8925_onkey_driver);
137}
138module_init(max8925_onkey_init);
139
140static void __exit max8925_onkey_exit(void)
141{
142 platform_driver_unregister(&max8925_onkey_driver);
143}
144module_exit(max8925_onkey_exit);
145
146MODULE_DESCRIPTION("Maxim MAX8925 ONKEY driver");
147MODULE_AUTHOR("Haojian Zhuang <haojian.zhuang@marvell.com>");
148MODULE_LICENSE("GPL");
diff --git a/drivers/input/misc/twl4030-vibra.c b/drivers/input/misc/twl4030-vibra.c
index fee9eac8e04a..4f9b2afc24e8 100644
--- a/drivers/input/misc/twl4030-vibra.c
+++ b/drivers/input/misc/twl4030-vibra.c
@@ -90,8 +90,8 @@ static void vibra_disable(struct vibra_info *info)
90 twl_i2c_write_u8(TWL4030_MODULE_AUDIO_VOICE, 90 twl_i2c_write_u8(TWL4030_MODULE_AUDIO_VOICE,
91 (reg & ~TWL4030_VIBRA_EN), TWL4030_REG_VIBRA_CTL); 91 (reg & ~TWL4030_VIBRA_EN), TWL4030_REG_VIBRA_CTL);
92 92
93 twl4030_codec_disable_resource(TWL4030_CODEC_RES_POWER);
94 twl4030_codec_disable_resource(TWL4030_CODEC_RES_APLL); 93 twl4030_codec_disable_resource(TWL4030_CODEC_RES_APLL);
94 twl4030_codec_disable_resource(TWL4030_CODEC_RES_POWER);
95 95
96 info->enabled = false; 96 info->enabled = false;
97} 97}
diff --git a/drivers/input/misc/uinput.c b/drivers/input/misc/uinput.c
index 1477466076ad..b71eb55f2dbc 100644
--- a/drivers/input/misc/uinput.c
+++ b/drivers/input/misc/uinput.c
@@ -300,7 +300,7 @@ static int uinput_validate_absbits(struct input_dev *dev)
300 unsigned int cnt; 300 unsigned int cnt;
301 int retval = 0; 301 int retval = 0;
302 302
303 for (cnt = 0; cnt < ABS_MAX + 1; cnt++) { 303 for (cnt = 0; cnt < ABS_CNT; cnt++) {
304 if (!test_bit(cnt, dev->absbit)) 304 if (!test_bit(cnt, dev->absbit))
305 continue; 305 continue;
306 306
@@ -387,7 +387,7 @@ static int uinput_setup_device(struct uinput_device *udev, const char __user *bu
387 dev->id.product = user_dev->id.product; 387 dev->id.product = user_dev->id.product;
388 dev->id.version = user_dev->id.version; 388 dev->id.version = user_dev->id.version;
389 389
390 size = sizeof(int) * (ABS_MAX + 1); 390 size = sizeof(int) * ABS_CNT;
391 memcpy(dev->absmax, user_dev->absmax, size); 391 memcpy(dev->absmax, user_dev->absmax, size);
392 memcpy(dev->absmin, user_dev->absmin, size); 392 memcpy(dev->absmin, user_dev->absmin, size);
393 memcpy(dev->absfuzz, user_dev->absfuzz, size); 393 memcpy(dev->absfuzz, user_dev->absfuzz, size);
diff --git a/drivers/input/mouse/amimouse.c b/drivers/input/mouse/amimouse.c
index a185ac78a42c..ff5f61a0fd3a 100644
--- a/drivers/input/mouse/amimouse.c
+++ b/drivers/input/mouse/amimouse.c
@@ -21,6 +21,7 @@
21#include <linux/init.h> 21#include <linux/init.h>
22#include <linux/input.h> 22#include <linux/input.h>
23#include <linux/interrupt.h> 23#include <linux/interrupt.h>
24#include <linux/platform_device.h>
24 25
25#include <asm/irq.h> 26#include <asm/irq.h>
26#include <asm/setup.h> 27#include <asm/setup.h>
@@ -34,10 +35,10 @@ MODULE_DESCRIPTION("Amiga mouse driver");
34MODULE_LICENSE("GPL"); 35MODULE_LICENSE("GPL");
35 36
36static int amimouse_lastx, amimouse_lasty; 37static int amimouse_lastx, amimouse_lasty;
37static struct input_dev *amimouse_dev;
38 38
39static irqreturn_t amimouse_interrupt(int irq, void *dummy) 39static irqreturn_t amimouse_interrupt(int irq, void *data)
40{ 40{
41 struct input_dev *dev = data;
41 unsigned short joy0dat, potgor; 42 unsigned short joy0dat, potgor;
42 int nx, ny, dx, dy; 43 int nx, ny, dx, dy;
43 44
@@ -59,14 +60,14 @@ static irqreturn_t amimouse_interrupt(int irq, void *dummy)
59 60
60 potgor = amiga_custom.potgor; 61 potgor = amiga_custom.potgor;
61 62
62 input_report_rel(amimouse_dev, REL_X, dx); 63 input_report_rel(dev, REL_X, dx);
63 input_report_rel(amimouse_dev, REL_Y, dy); 64 input_report_rel(dev, REL_Y, dy);
64 65
65 input_report_key(amimouse_dev, BTN_LEFT, ciaa.pra & 0x40); 66 input_report_key(dev, BTN_LEFT, ciaa.pra & 0x40);
66 input_report_key(amimouse_dev, BTN_MIDDLE, potgor & 0x0100); 67 input_report_key(dev, BTN_MIDDLE, potgor & 0x0100);
67 input_report_key(amimouse_dev, BTN_RIGHT, potgor & 0x0400); 68 input_report_key(dev, BTN_RIGHT, potgor & 0x0400);
68 69
69 input_sync(amimouse_dev); 70 input_sync(dev);
70 71
71 return IRQ_HANDLED; 72 return IRQ_HANDLED;
72} 73}
@@ -74,63 +75,90 @@ static irqreturn_t amimouse_interrupt(int irq, void *dummy)
74static int amimouse_open(struct input_dev *dev) 75static int amimouse_open(struct input_dev *dev)
75{ 76{
76 unsigned short joy0dat; 77 unsigned short joy0dat;
78 int error;
77 79
78 joy0dat = amiga_custom.joy0dat; 80 joy0dat = amiga_custom.joy0dat;
79 81
80 amimouse_lastx = joy0dat & 0xff; 82 amimouse_lastx = joy0dat & 0xff;
81 amimouse_lasty = joy0dat >> 8; 83 amimouse_lasty = joy0dat >> 8;
82 84
83 if (request_irq(IRQ_AMIGA_VERTB, amimouse_interrupt, 0, "amimouse", amimouse_interrupt)) { 85 error = request_irq(IRQ_AMIGA_VERTB, amimouse_interrupt, 0, "amimouse",
84 printk(KERN_ERR "amimouse.c: Can't allocate irq %d\n", IRQ_AMIGA_VERTB); 86 dev);
85 return -EBUSY; 87 if (error)
86 } 88 dev_err(&dev->dev, "Can't allocate irq %d\n", IRQ_AMIGA_VERTB);
87 89
88 return 0; 90 return error;
89} 91}
90 92
91static void amimouse_close(struct input_dev *dev) 93static void amimouse_close(struct input_dev *dev)
92{ 94{
93 free_irq(IRQ_AMIGA_VERTB, amimouse_interrupt); 95 free_irq(IRQ_AMIGA_VERTB, dev);
94} 96}
95 97
96static int __init amimouse_init(void) 98static int __init amimouse_probe(struct platform_device *pdev)
97{ 99{
98 int err; 100 int err;
101 struct input_dev *dev;
99 102
100 if (!MACH_IS_AMIGA || !AMIGAHW_PRESENT(AMI_MOUSE)) 103 dev = input_allocate_device();
101 return -ENODEV; 104 if (!dev)
102
103 amimouse_dev = input_allocate_device();
104 if (!amimouse_dev)
105 return -ENOMEM; 105 return -ENOMEM;
106 106
107 amimouse_dev->name = "Amiga mouse"; 107 dev->name = pdev->name;
108 amimouse_dev->phys = "amimouse/input0"; 108 dev->phys = "amimouse/input0";
109 amimouse_dev->id.bustype = BUS_AMIGA; 109 dev->id.bustype = BUS_AMIGA;
110 amimouse_dev->id.vendor = 0x0001; 110 dev->id.vendor = 0x0001;
111 amimouse_dev->id.product = 0x0002; 111 dev->id.product = 0x0002;
112 amimouse_dev->id.version = 0x0100; 112 dev->id.version = 0x0100;
113 113
114 amimouse_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_REL); 114 dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_REL);
115 amimouse_dev->relbit[0] = BIT_MASK(REL_X) | BIT_MASK(REL_Y); 115 dev->relbit[0] = BIT_MASK(REL_X) | BIT_MASK(REL_Y);
116 amimouse_dev->keybit[BIT_WORD(BTN_LEFT)] = BIT_MASK(BTN_LEFT) | 116 dev->keybit[BIT_WORD(BTN_LEFT)] = BIT_MASK(BTN_LEFT) |
117 BIT_MASK(BTN_MIDDLE) | BIT_MASK(BTN_RIGHT); 117 BIT_MASK(BTN_MIDDLE) | BIT_MASK(BTN_RIGHT);
118 amimouse_dev->open = amimouse_open; 118 dev->open = amimouse_open;
119 amimouse_dev->close = amimouse_close; 119 dev->close = amimouse_close;
120 dev->dev.parent = &pdev->dev;
120 121
121 err = input_register_device(amimouse_dev); 122 err = input_register_device(dev);
122 if (err) { 123 if (err) {
123 input_free_device(amimouse_dev); 124 input_free_device(dev);
124 return err; 125 return err;
125 } 126 }
126 127
128 platform_set_drvdata(pdev, dev);
129
127 return 0; 130 return 0;
128} 131}
129 132
130static void __exit amimouse_exit(void) 133static int __exit amimouse_remove(struct platform_device *pdev)
131{ 134{
132 input_unregister_device(amimouse_dev); 135 struct input_dev *dev = platform_get_drvdata(pdev);
136
137 platform_set_drvdata(pdev, NULL);
138 input_unregister_device(dev);
139 return 0;
140}
141
142static struct platform_driver amimouse_driver = {
143 .remove = __exit_p(amimouse_remove),
144 .driver = {
145 .name = "amiga-mouse",
146 .owner = THIS_MODULE,
147 },
148};
149
150static int __init amimouse_init(void)
151{
152 return platform_driver_probe(&amimouse_driver, amimouse_probe);
133} 153}
134 154
135module_init(amimouse_init); 155module_init(amimouse_init);
156
157static void __exit amimouse_exit(void)
158{
159 platform_driver_unregister(&amimouse_driver);
160}
161
136module_exit(amimouse_exit); 162module_exit(amimouse_exit);
163
164MODULE_ALIAS("platform:amiga-mouse");
diff --git a/drivers/input/touchscreen/Kconfig b/drivers/input/touchscreen/Kconfig
index b9f58ca82fd1..6703c6b9800a 100644
--- a/drivers/input/touchscreen/Kconfig
+++ b/drivers/input/touchscreen/Kconfig
@@ -590,4 +590,17 @@ config TOUCHSCREEN_PCAP
590 590
591 To compile this driver as a module, choose M here: the 591 To compile this driver as a module, choose M here: the
592 module will be called pcap_ts. 592 module will be called pcap_ts.
593
594config TOUCHSCREEN_TPS6507X
595 tristate "TPS6507x based touchscreens"
596 depends on I2C
597 help
598 Say Y here if you have a TPS6507x based touchscreen
599 controller.
600
601 If unsure, say N.
602
603 To compile this driver as a module, choose M here: the
604 module will be called tps6507x_ts.
605
593endif 606endif
diff --git a/drivers/input/touchscreen/Makefile b/drivers/input/touchscreen/Makefile
index 8ad36eef90a2..497964a7a214 100644
--- a/drivers/input/touchscreen/Makefile
+++ b/drivers/input/touchscreen/Makefile
@@ -46,3 +46,4 @@ obj-$(CONFIG_TOUCHSCREEN_WM97XX_ATMEL) += atmel-wm97xx.o
46obj-$(CONFIG_TOUCHSCREEN_WM97XX_MAINSTONE) += mainstone-wm97xx.o 46obj-$(CONFIG_TOUCHSCREEN_WM97XX_MAINSTONE) += mainstone-wm97xx.o
47obj-$(CONFIG_TOUCHSCREEN_WM97XX_ZYLONITE) += zylonite-wm97xx.o 47obj-$(CONFIG_TOUCHSCREEN_WM97XX_ZYLONITE) += zylonite-wm97xx.o
48obj-$(CONFIG_TOUCHSCREEN_W90X900) += w90p910_ts.o 48obj-$(CONFIG_TOUCHSCREEN_W90X900) += w90p910_ts.o
49obj-$(CONFIG_TOUCHSCREEN_TPS6507X) += tps6507x-ts.o
diff --git a/drivers/input/touchscreen/ads7846.c b/drivers/input/touchscreen/ads7846.c
index 532279cda0e4..634f6f6b9b13 100644
--- a/drivers/input/touchscreen/ads7846.c
+++ b/drivers/input/touchscreen/ads7846.c
@@ -1163,8 +1163,8 @@ static int __devinit ads7846_probe(struct spi_device *spi)
1163 1163
1164 ts->reg = regulator_get(&spi->dev, "vcc"); 1164 ts->reg = regulator_get(&spi->dev, "vcc");
1165 if (IS_ERR(ts->reg)) { 1165 if (IS_ERR(ts->reg)) {
1166 dev_err(&spi->dev, "unable to get regulator: %ld\n", 1166 err = PTR_ERR(ts->reg);
1167 PTR_ERR(ts->reg)); 1167 dev_err(&spi->dev, "unable to get regulator: %ld\n", err);
1168 goto err_free_gpio; 1168 goto err_free_gpio;
1169 } 1169 }
1170 1170
diff --git a/drivers/input/touchscreen/s3c2410_ts.c b/drivers/input/touchscreen/s3c2410_ts.c
index e0b7c834111d..ac5d0f9b0cb1 100644
--- a/drivers/input/touchscreen/s3c2410_ts.c
+++ b/drivers/input/touchscreen/s3c2410_ts.c
@@ -413,6 +413,8 @@ static struct dev_pm_ops s3c_ts_pmops = {
413#endif 413#endif
414 414
415static struct platform_device_id s3cts_driver_ids[] = { 415static struct platform_device_id s3cts_driver_ids[] = {
416 { "s3c2410-ts", 0 },
417 { "s3c2440-ts", 0 },
416 { "s3c64xx-ts", FEAT_PEN_IRQ }, 418 { "s3c64xx-ts", FEAT_PEN_IRQ },
417 { } 419 { }
418}; 420};
diff --git a/drivers/input/touchscreen/tps6507x-ts.c b/drivers/input/touchscreen/tps6507x-ts.c
new file mode 100644
index 000000000000..5de80a1a730b
--- /dev/null
+++ b/drivers/input/touchscreen/tps6507x-ts.c
@@ -0,0 +1,400 @@
1/*
2 * drivers/input/touchscreen/tps6507x_ts.c
3 *
4 * Touchscreen driver for the tps6507x chip.
5 *
6 * Copyright (c) 2009 RidgeRun (todd.fischer@ridgerun.com)
7 *
8 * Credits:
9 *
10 * Using code from tsc2007, MtekVision Co., Ltd.
11 *
12 * For licencing details see kernel-base/COPYING
13 *
14 * TPS65070, TPS65073, TPS650731, and TPS650732 support
15 * 10 bit touch screen interface.
16 */
17
18#include <linux/module.h>
19#include <linux/workqueue.h>
20#include <linux/slab.h>
21#include <linux/input.h>
22#include <linux/platform_device.h>
23#include <linux/mfd/tps6507x.h>
24#include <linux/input/tps6507x-ts.h>
25#include <linux/delay.h>
26
27#define TSC_DEFAULT_POLL_PERIOD 30 /* ms */
28#define TPS_DEFAULT_MIN_PRESSURE 0x30
29#define MAX_10BIT ((1 << 10) - 1)
30
31#define TPS6507X_ADCONFIG_CONVERT_TS (TPS6507X_ADCONFIG_AD_ENABLE | \
32 TPS6507X_ADCONFIG_START_CONVERSION | \
33 TPS6507X_ADCONFIG_INPUT_REAL_TSC)
34#define TPS6507X_ADCONFIG_POWER_DOWN_TS (TPS6507X_ADCONFIG_INPUT_REAL_TSC)
35
36struct ts_event {
37 u16 x;
38 u16 y;
39 u16 pressure;
40};
41
42struct tps6507x_ts {
43 struct input_dev *input_dev;
44 struct device *dev;
45 char phys[32];
46 struct workqueue_struct *wq;
47 struct delayed_work work;
48 unsigned polling; /* polling is active */
49 struct ts_event tc;
50 struct tps6507x_dev *mfd;
51 u16 model;
52 unsigned pendown;
53 int irq;
54 void (*clear_penirq)(void);
55 unsigned long poll_period; /* ms */
56 u16 min_pressure;
57 int vref; /* non-zero to leave vref on */
58};
59
60static int tps6507x_read_u8(struct tps6507x_ts *tsc, u8 reg, u8 *data)
61{
62 int err;
63
64 err = tsc->mfd->read_dev(tsc->mfd, reg, 1, data);
65
66 if (err)
67 return err;
68
69 return 0;
70}
71
72static int tps6507x_write_u8(struct tps6507x_ts *tsc, u8 reg, u8 data)
73{
74 return tsc->mfd->write_dev(tsc->mfd, reg, 1, &data);
75}
76
77static s32 tps6507x_adc_conversion(struct tps6507x_ts *tsc,
78 u8 tsc_mode, u16 *value)
79{
80 s32 ret;
81 u8 adc_status;
82 u8 result;
83
84 /* Route input signal to A/D converter */
85
86 ret = tps6507x_write_u8(tsc, TPS6507X_REG_TSCMODE, tsc_mode);
87 if (ret) {
88 dev_err(tsc->dev, "TSC mode read failed\n");
89 goto err;
90 }
91
92 /* Start A/D conversion */
93
94 ret = tps6507x_write_u8(tsc, TPS6507X_REG_ADCONFIG,
95 TPS6507X_ADCONFIG_CONVERT_TS);
96 if (ret) {
97 dev_err(tsc->dev, "ADC config write failed\n");
98 return ret;
99 }
100
101 do {
102 ret = tps6507x_read_u8(tsc, TPS6507X_REG_ADCONFIG,
103 &adc_status);
104 if (ret) {
105 dev_err(tsc->dev, "ADC config read failed\n");
106 goto err;
107 }
108 } while (adc_status & TPS6507X_ADCONFIG_START_CONVERSION);
109
110 ret = tps6507x_read_u8(tsc, TPS6507X_REG_ADRESULT_2, &result);
111 if (ret) {
112 dev_err(tsc->dev, "ADC result 2 read failed\n");
113 goto err;
114 }
115
116 *value = (result & TPS6507X_REG_ADRESULT_2_MASK) << 8;
117
118 ret = tps6507x_read_u8(tsc, TPS6507X_REG_ADRESULT_1, &result);
119 if (ret) {
120 dev_err(tsc->dev, "ADC result 1 read failed\n");
121 goto err;
122 }
123
124 *value |= result;
125
126 dev_dbg(tsc->dev, "TSC channel %d = 0x%X\n", tsc_mode, *value);
127
128err:
129 return ret;
130}
131
132/* Need to call tps6507x_adc_standby() after using A/D converter for the
133 * touch screen interrupt to work properly.
134 */
135
136static s32 tps6507x_adc_standby(struct tps6507x_ts *tsc)
137{
138 s32 ret;
139 s32 loops = 0;
140 u8 val;
141
142 ret = tps6507x_write_u8(tsc, TPS6507X_REG_ADCONFIG,
143 TPS6507X_ADCONFIG_INPUT_TSC);
144 if (ret)
145 return ret;
146
147 ret = tps6507x_write_u8(tsc, TPS6507X_REG_TSCMODE,
148 TPS6507X_TSCMODE_STANDBY);
149 if (ret)
150 return ret;
151
152 ret = tps6507x_read_u8(tsc, TPS6507X_REG_INT, &val);
153 if (ret)
154 return ret;
155
156 while (val & TPS6507X_REG_TSC_INT) {
157 mdelay(10);
158 ret = tps6507x_read_u8(tsc, TPS6507X_REG_INT, &val);
159 if (ret)
160 return ret;
161 loops++;
162 }
163
164 return ret;
165}
166
167static void tps6507x_ts_handler(struct work_struct *work)
168{
169 struct tps6507x_ts *tsc = container_of(work,
170 struct tps6507x_ts, work.work);
171 struct input_dev *input_dev = tsc->input_dev;
172 int pendown;
173 int schd;
174 int poll = 0;
175 s32 ret;
176
177 ret = tps6507x_adc_conversion(tsc, TPS6507X_TSCMODE_PRESSURE,
178 &tsc->tc.pressure);
179 if (ret)
180 goto done;
181
182 pendown = tsc->tc.pressure > tsc->min_pressure;
183
184 if (unlikely(!pendown && tsc->pendown)) {
185 dev_dbg(tsc->dev, "UP\n");
186 input_report_key(input_dev, BTN_TOUCH, 0);
187 input_report_abs(input_dev, ABS_PRESSURE, 0);
188 input_sync(input_dev);
189 tsc->pendown = 0;
190 }
191
192 if (pendown) {
193
194 if (!tsc->pendown) {
195 dev_dbg(tsc->dev, "DOWN\n");
196 input_report_key(input_dev, BTN_TOUCH, 1);
197 } else
198 dev_dbg(tsc->dev, "still down\n");
199
200 ret = tps6507x_adc_conversion(tsc, TPS6507X_TSCMODE_X_POSITION,
201 &tsc->tc.x);
202 if (ret)
203 goto done;
204
205 ret = tps6507x_adc_conversion(tsc, TPS6507X_TSCMODE_Y_POSITION,
206 &tsc->tc.y);
207 if (ret)
208 goto done;
209
210 input_report_abs(input_dev, ABS_X, tsc->tc.x);
211 input_report_abs(input_dev, ABS_Y, tsc->tc.y);
212 input_report_abs(input_dev, ABS_PRESSURE, tsc->tc.pressure);
213 input_sync(input_dev);
214 tsc->pendown = 1;
215 poll = 1;
216 }
217
218done:
219 /* always poll if not using interrupts */
220 poll = 1;
221
222 if (poll) {
223 schd = queue_delayed_work(tsc->wq, &tsc->work,
224 tsc->poll_period * HZ / 1000);
225 if (schd)
226 tsc->polling = 1;
227 else {
228 tsc->polling = 0;
229 dev_err(tsc->dev, "re-schedule failed");
230 }
231 } else
232 tsc->polling = 0;
233
234 ret = tps6507x_adc_standby(tsc);
235}
236
237static int tps6507x_ts_probe(struct platform_device *pdev)
238{
239 int error;
240 struct tps6507x_ts *tsc;
241 struct tps6507x_dev *tps6507x_dev = dev_get_drvdata(pdev->dev.parent);
242 struct touchscreen_init_data *init_data;
243 struct input_dev *input_dev;
244 struct tps6507x_board *tps_board;
245 int schd;
246
247 /**
248 * tps_board points to pmic related constants
249 * coming from the board-evm file.
250 */
251
252 tps_board = (struct tps6507x_board *)tps6507x_dev->dev->platform_data;
253
254 if (!tps_board) {
255 dev_err(tps6507x_dev->dev,
256 "Could not find tps6507x platform data\n");
257 return -EIO;
258 }
259
260 /**
261 * init_data points to array of regulator_init structures
262 * coming from the board-evm file.
263 */
264
265 init_data = tps_board->tps6507x_ts_init_data;
266
267 tsc = kzalloc(sizeof(struct tps6507x_ts), GFP_KERNEL);
268 if (!tsc) {
269 dev_err(tps6507x_dev->dev, "failed to allocate driver data\n");
270 error = -ENOMEM;
271 goto err0;
272 }
273
274 tps6507x_dev->ts = tsc;
275 tsc->mfd = tps6507x_dev;
276 tsc->dev = tps6507x_dev->dev;
277 input_dev = input_allocate_device();
278 if (!input_dev) {
279 dev_err(tsc->dev, "Failed to allocate input device.\n");
280 error = -ENOMEM;
281 goto err1;
282 }
283
284 input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);
285 input_dev->keybit[BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH);
286
287 input_set_abs_params(input_dev, ABS_X, 0, MAX_10BIT, 0, 0);
288 input_set_abs_params(input_dev, ABS_Y, 0, MAX_10BIT, 0, 0);
289 input_set_abs_params(input_dev, ABS_PRESSURE, 0, MAX_10BIT, 0, 0);
290
291 input_dev->name = "TPS6507x Touchscreen";
292 input_dev->id.bustype = BUS_I2C;
293 input_dev->dev.parent = tsc->dev;
294
295 snprintf(tsc->phys, sizeof(tsc->phys),
296 "%s/input0", dev_name(tsc->dev));
297 input_dev->phys = tsc->phys;
298
299 dev_dbg(tsc->dev, "device: %s\n", input_dev->phys);
300
301 input_set_drvdata(input_dev, tsc);
302
303 tsc->input_dev = input_dev;
304
305 INIT_DELAYED_WORK(&tsc->work, tps6507x_ts_handler);
306 tsc->wq = create_workqueue("TPS6507x Touchscreen");
307
308 if (init_data) {
309 tsc->poll_period = init_data->poll_period;
310 tsc->vref = init_data->vref;
311 tsc->min_pressure = init_data->min_pressure;
312 input_dev->id.vendor = init_data->vendor;
313 input_dev->id.product = init_data->product;
314 input_dev->id.version = init_data->version;
315 } else {
316 tsc->poll_period = TSC_DEFAULT_POLL_PERIOD;
317 tsc->min_pressure = TPS_DEFAULT_MIN_PRESSURE;
318 }
319
320 error = tps6507x_adc_standby(tsc);
321 if (error)
322 goto err2;
323
324 error = input_register_device(input_dev);
325 if (error)
326 goto err2;
327
328 schd = queue_delayed_work(tsc->wq, &tsc->work,
329 tsc->poll_period * HZ / 1000);
330
331 if (schd)
332 tsc->polling = 1;
333 else {
334 tsc->polling = 0;
335 dev_err(tsc->dev, "schedule failed");
336 goto err2;
337 }
338
339 return 0;
340
341err2:
342 cancel_delayed_work(&tsc->work);
343 flush_workqueue(tsc->wq);
344 destroy_workqueue(tsc->wq);
345 tsc->wq = 0;
346 input_free_device(input_dev);
347err1:
348 kfree(tsc);
349 tps6507x_dev->ts = NULL;
350err0:
351 return error;
352}
353
354static int __devexit tps6507x_ts_remove(struct platform_device *pdev)
355{
356 struct tps6507x_dev *tps6507x_dev = platform_get_drvdata(pdev);
357 struct tps6507x_ts *tsc = tps6507x_dev->ts;
358 struct input_dev *input_dev = tsc->input_dev;
359
360 if (!tsc)
361 return 0;
362
363 cancel_delayed_work(&tsc->work);
364 flush_workqueue(tsc->wq);
365 destroy_workqueue(tsc->wq);
366 tsc->wq = 0;
367
368 input_free_device(input_dev);
369
370 tps6507x_dev->ts = NULL;
371 kfree(tsc);
372
373 return 0;
374}
375
376static struct platform_driver tps6507x_ts_driver = {
377 .driver = {
378 .name = "tps6507x-ts",
379 .owner = THIS_MODULE,
380 },
381 .probe = tps6507x_ts_probe,
382 .remove = __devexit_p(tps6507x_ts_remove),
383};
384
385static int __init tps6507x_ts_init(void)
386{
387 return platform_driver_register(&tps6507x_ts_driver);
388}
389module_init(tps6507x_ts_init);
390
391static void __exit tps6507x_ts_exit(void)
392{
393 platform_driver_unregister(&tps6507x_ts_driver);
394}
395module_exit(tps6507x_ts_exit);
396
397MODULE_AUTHOR("Todd Fischer <todd.fischer@ridgerun.com>");
398MODULE_DESCRIPTION("TPS6507x - TouchScreen driver");
399MODULE_LICENSE("GPL v2");
400MODULE_ALIAS("platform:tps6507x-tsc");
diff --git a/drivers/input/touchscreen/usbtouchscreen.c b/drivers/input/touchscreen/usbtouchscreen.c
index 29a8bbf3f086..567d57215c28 100644
--- a/drivers/input/touchscreen/usbtouchscreen.c
+++ b/drivers/input/touchscreen/usbtouchscreen.c
@@ -857,6 +857,11 @@ static int nexio_read_data(struct usbtouch_usb *usbtouch, unsigned char *pkt)
857 if ((pkt[0] & 0xe0) != 0xe0) 857 if ((pkt[0] & 0xe0) != 0xe0)
858 return 0; 858 return 0;
859 859
860 if (be16_to_cpu(packet->data_len) > 0xff)
861 packet->data_len = cpu_to_be16(be16_to_cpu(packet->data_len) - 0x100);
862 if (be16_to_cpu(packet->x_len) > 0xff)
863 packet->x_len = cpu_to_be16(be16_to_cpu(packet->x_len) - 0x80);
864
860 /* send ACK */ 865 /* send ACK */
861 ret = usb_submit_urb(priv->ack, GFP_ATOMIC); 866 ret = usb_submit_urb(priv->ack, GFP_ATOMIC);
862 867
@@ -1112,7 +1117,7 @@ static struct usbtouch_device_info usbtouch_dev_info[] = {
1112 1117
1113#ifdef CONFIG_TOUCHSCREEN_USB_NEXIO 1118#ifdef CONFIG_TOUCHSCREEN_USB_NEXIO
1114 [DEVTYPE_NEXIO] = { 1119 [DEVTYPE_NEXIO] = {
1115 .rept_size = 128, 1120 .rept_size = 1024,
1116 .irq_always = true, 1121 .irq_always = true,
1117 .read_data = nexio_read_data, 1122 .read_data = nexio_read_data,
1118 .init = nexio_init, 1123 .init = nexio_init,
diff --git a/drivers/isdn/mISDN/timerdev.c b/drivers/isdn/mISDN/timerdev.c
index c3243c913ec0..81048b8ed8ad 100644
--- a/drivers/isdn/mISDN/timerdev.c
+++ b/drivers/isdn/mISDN/timerdev.c
@@ -98,8 +98,6 @@ mISDN_read(struct file *filep, char __user *buf, size_t count, loff_t *off)
98 if (*debug & DEBUG_TIMER) 98 if (*debug & DEBUG_TIMER)
99 printk(KERN_DEBUG "%s(%p, %p, %d, %p)\n", __func__, 99 printk(KERN_DEBUG "%s(%p, %p, %d, %p)\n", __func__,
100 filep, buf, (int)count, off); 100 filep, buf, (int)count, off);
101 if (*off != filep->f_pos)
102 return -ESPIPE;
103 101
104 if (list_empty(&dev->expired) && (dev->work == 0)) { 102 if (list_empty(&dev->expired) && (dev->work == 0)) {
105 if (filep->f_flags & O_NONBLOCK) 103 if (filep->f_flags & O_NONBLOCK)
diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig
index 505eb64c329c..81bf25e67ce1 100644
--- a/drivers/leds/Kconfig
+++ b/drivers/leds/Kconfig
@@ -21,7 +21,7 @@ comment "LED drivers"
21 21
22config LEDS_88PM860X 22config LEDS_88PM860X
23 tristate "LED Support for Marvell 88PM860x PMIC" 23 tristate "LED Support for Marvell 88PM860x PMIC"
24 depends on LEDS_CLASS && MFD_88PM860X 24 depends on MFD_88PM860X
25 help 25 help
26 This option enables support for on-chip LED drivers found on Marvell 26 This option enables support for on-chip LED drivers found on Marvell
27 Semiconductor 88PM8606 PMIC. 27 Semiconductor 88PM8606 PMIC.
@@ -67,6 +67,16 @@ config LEDS_NET48XX
67 This option enables support for the Soekris net4801 and net4826 error 67 This option enables support for the Soekris net4801 and net4826 error
68 LED. 68 LED.
69 69
70config LEDS_NET5501
71 tristate "LED Support for Soekris net5501 series Error LED"
72 depends on LEDS_TRIGGERS
73 depends on X86 && LEDS_GPIO_PLATFORM && GPIO_CS5535
74 select LEDS_TRIGGER_DEFAULT_ON
75 default n
76 help
77 Add support for the Soekris net5501 board (detection, error led
78 and GPIO).
79
70config LEDS_FSG 80config LEDS_FSG
71 tristate "LED Support for the Freecom FSG-3" 81 tristate "LED Support for the Freecom FSG-3"
72 depends on MACH_FSG 82 depends on MACH_FSG
@@ -285,6 +295,13 @@ config LEDS_DELL_NETBOOKS
285 This adds support for the Latitude 2100 and similar 295 This adds support for the Latitude 2100 and similar
286 notebooks that have an external LED. 296 notebooks that have an external LED.
287 297
298config LEDS_MC13783
299 tristate "LED Support for MC13783 PMIC"
300 depends on MFD_MC13783
301 help
302 This option enable support for on-chip LED drivers found
303 on Freescale Semiconductor MC13783 PMIC.
304
288config LEDS_TRIGGERS 305config LEDS_TRIGGERS
289 bool "LED Trigger support" 306 bool "LED Trigger support"
290 help 307 help
diff --git a/drivers/leds/Makefile b/drivers/leds/Makefile
index 0cd8b9957380..2493de499374 100644
--- a/drivers/leds/Makefile
+++ b/drivers/leds/Makefile
@@ -13,6 +13,7 @@ obj-$(CONFIG_LEDS_MIKROTIK_RB532) += leds-rb532.o
13obj-$(CONFIG_LEDS_S3C24XX) += leds-s3c24xx.o 13obj-$(CONFIG_LEDS_S3C24XX) += leds-s3c24xx.o
14obj-$(CONFIG_LEDS_AMS_DELTA) += leds-ams-delta.o 14obj-$(CONFIG_LEDS_AMS_DELTA) += leds-ams-delta.o
15obj-$(CONFIG_LEDS_NET48XX) += leds-net48xx.o 15obj-$(CONFIG_LEDS_NET48XX) += leds-net48xx.o
16obj-$(CONFIG_LEDS_NET5501) += leds-net5501.o
16obj-$(CONFIG_LEDS_WRAP) += leds-wrap.o 17obj-$(CONFIG_LEDS_WRAP) += leds-wrap.o
17obj-$(CONFIG_LEDS_ALIX2) += leds-alix2.o 18obj-$(CONFIG_LEDS_ALIX2) += leds-alix2.o
18obj-$(CONFIG_LEDS_H1940) += leds-h1940.o 19obj-$(CONFIG_LEDS_H1940) += leds-h1940.o
@@ -35,6 +36,7 @@ obj-$(CONFIG_LEDS_INTEL_SS4200) += leds-ss4200.o
35obj-$(CONFIG_LEDS_LT3593) += leds-lt3593.o 36obj-$(CONFIG_LEDS_LT3593) += leds-lt3593.o
36obj-$(CONFIG_LEDS_ADP5520) += leds-adp5520.o 37obj-$(CONFIG_LEDS_ADP5520) += leds-adp5520.o
37obj-$(CONFIG_LEDS_DELL_NETBOOKS) += dell-led.o 38obj-$(CONFIG_LEDS_DELL_NETBOOKS) += dell-led.o
39obj-$(CONFIG_LEDS_MC13783) += leds-mc13783.o
38 40
39# LED SPI Drivers 41# LED SPI Drivers
40obj-$(CONFIG_LEDS_DAC124S085) += leds-dac124s085.o 42obj-$(CONFIG_LEDS_DAC124S085) += leds-dac124s085.o
diff --git a/drivers/leds/led-class.c b/drivers/leds/led-class.c
index 69e7d86a5143..260660076507 100644
--- a/drivers/leds/led-class.c
+++ b/drivers/leds/led-class.c
@@ -74,7 +74,7 @@ static ssize_t led_max_brightness_show(struct device *dev,
74 74
75static struct device_attribute led_class_attrs[] = { 75static struct device_attribute led_class_attrs[] = {
76 __ATTR(brightness, 0644, led_brightness_show, led_brightness_store), 76 __ATTR(brightness, 0644, led_brightness_show, led_brightness_store),
77 __ATTR(max_brightness, 0644, led_max_brightness_show, NULL), 77 __ATTR(max_brightness, 0444, led_max_brightness_show, NULL),
78#ifdef CONFIG_LEDS_TRIGGERS 78#ifdef CONFIG_LEDS_TRIGGERS
79 __ATTR(trigger, 0644, led_trigger_show, led_trigger_store), 79 __ATTR(trigger, 0644, led_trigger_show, led_trigger_store),
80#endif 80#endif
diff --git a/drivers/leds/leds-88pm860x.c b/drivers/leds/leds-88pm860x.c
index 16a60c06c96c..b7677106cff8 100644
--- a/drivers/leds/leds-88pm860x.c
+++ b/drivers/leds/leds-88pm860x.c
@@ -256,8 +256,10 @@ static int pm860x_led_probe(struct platform_device *pdev)
256 if (pdev->dev.parent->platform_data) { 256 if (pdev->dev.parent->platform_data) {
257 pm860x_pdata = pdev->dev.parent->platform_data; 257 pm860x_pdata = pdev->dev.parent->platform_data;
258 pdata = pm860x_pdata->led; 258 pdata = pm860x_pdata->led;
259 } else 259 } else {
260 pdata = NULL; 260 dev_err(&pdev->dev, "missing platform data\n");
261 return -EINVAL;
262 }
261 263
262 data = kzalloc(sizeof(struct pm860x_led), GFP_KERNEL); 264 data = kzalloc(sizeof(struct pm860x_led), GFP_KERNEL);
263 if (data == NULL) 265 if (data == NULL)
@@ -268,8 +270,11 @@ static int pm860x_led_probe(struct platform_device *pdev)
268 data->i2c = (chip->id == CHIP_PM8606) ? chip->client : chip->companion; 270 data->i2c = (chip->id == CHIP_PM8606) ? chip->client : chip->companion;
269 data->iset = pdata->iset; 271 data->iset = pdata->iset;
270 data->port = __check_device(pdata, data->name); 272 data->port = __check_device(pdata, data->name);
271 if (data->port < 0) 273 if (data->port < 0) {
274 dev_err(&pdev->dev, "check device failed\n");
275 kfree(data);
272 return -EINVAL; 276 return -EINVAL;
277 }
273 278
274 data->current_brightness = 0; 279 data->current_brightness = 0;
275 data->cdev.name = data->name; 280 data->cdev.name = data->name;
diff --git a/drivers/leds/leds-gpio.c b/drivers/leds/leds-gpio.c
index 6d94b0b9979c..cc22eeefa10b 100644
--- a/drivers/leds/leds-gpio.c
+++ b/drivers/leds/leds-gpio.c
@@ -26,7 +26,8 @@ struct gpio_led_data {
26 u8 new_level; 26 u8 new_level;
27 u8 can_sleep; 27 u8 can_sleep;
28 u8 active_low; 28 u8 active_low;
29 int (*platform_gpio_blink_set)(unsigned gpio, 29 u8 blinking;
30 int (*platform_gpio_blink_set)(unsigned gpio, int state,
30 unsigned long *delay_on, unsigned long *delay_off); 31 unsigned long *delay_on, unsigned long *delay_off);
31}; 32};
32 33
@@ -35,7 +36,13 @@ static void gpio_led_work(struct work_struct *work)
35 struct gpio_led_data *led_dat = 36 struct gpio_led_data *led_dat =
36 container_of(work, struct gpio_led_data, work); 37 container_of(work, struct gpio_led_data, work);
37 38
38 gpio_set_value_cansleep(led_dat->gpio, led_dat->new_level); 39 if (led_dat->blinking) {
40 led_dat->platform_gpio_blink_set(led_dat->gpio,
41 led_dat->new_level,
42 NULL, NULL);
43 led_dat->blinking = 0;
44 } else
45 gpio_set_value_cansleep(led_dat->gpio, led_dat->new_level);
39} 46}
40 47
41static void gpio_led_set(struct led_classdev *led_cdev, 48static void gpio_led_set(struct led_classdev *led_cdev,
@@ -60,8 +67,14 @@ static void gpio_led_set(struct led_classdev *led_cdev,
60 if (led_dat->can_sleep) { 67 if (led_dat->can_sleep) {
61 led_dat->new_level = level; 68 led_dat->new_level = level;
62 schedule_work(&led_dat->work); 69 schedule_work(&led_dat->work);
63 } else 70 } else {
64 gpio_set_value(led_dat->gpio, level); 71 if (led_dat->blinking) {
72 led_dat->platform_gpio_blink_set(led_dat->gpio, level,
73 NULL, NULL);
74 led_dat->blinking = 0;
75 } else
76 gpio_set_value(led_dat->gpio, level);
77 }
65} 78}
66 79
67static int gpio_blink_set(struct led_classdev *led_cdev, 80static int gpio_blink_set(struct led_classdev *led_cdev,
@@ -70,12 +83,14 @@ static int gpio_blink_set(struct led_classdev *led_cdev,
70 struct gpio_led_data *led_dat = 83 struct gpio_led_data *led_dat =
71 container_of(led_cdev, struct gpio_led_data, cdev); 84 container_of(led_cdev, struct gpio_led_data, cdev);
72 85
73 return led_dat->platform_gpio_blink_set(led_dat->gpio, delay_on, delay_off); 86 led_dat->blinking = 1;
87 return led_dat->platform_gpio_blink_set(led_dat->gpio, GPIO_LED_BLINK,
88 delay_on, delay_off);
74} 89}
75 90
76static int __devinit create_gpio_led(const struct gpio_led *template, 91static int __devinit create_gpio_led(const struct gpio_led *template,
77 struct gpio_led_data *led_dat, struct device *parent, 92 struct gpio_led_data *led_dat, struct device *parent,
78 int (*blink_set)(unsigned, unsigned long *, unsigned long *)) 93 int (*blink_set)(unsigned, int, unsigned long *, unsigned long *))
79{ 94{
80 int ret, state; 95 int ret, state;
81 96
@@ -97,6 +112,7 @@ static int __devinit create_gpio_led(const struct gpio_led *template,
97 led_dat->gpio = template->gpio; 112 led_dat->gpio = template->gpio;
98 led_dat->can_sleep = gpio_cansleep(template->gpio); 113 led_dat->can_sleep = gpio_cansleep(template->gpio);
99 led_dat->active_low = template->active_low; 114 led_dat->active_low = template->active_low;
115 led_dat->blinking = 0;
100 if (blink_set) { 116 if (blink_set) {
101 led_dat->platform_gpio_blink_set = blink_set; 117 led_dat->platform_gpio_blink_set = blink_set;
102 led_dat->cdev.blink_set = gpio_blink_set; 118 led_dat->cdev.blink_set = gpio_blink_set;
@@ -113,7 +129,7 @@ static int __devinit create_gpio_led(const struct gpio_led *template,
113 ret = gpio_direction_output(led_dat->gpio, led_dat->active_low ^ state); 129 ret = gpio_direction_output(led_dat->gpio, led_dat->active_low ^ state);
114 if (ret < 0) 130 if (ret < 0)
115 goto err; 131 goto err;
116 132
117 INIT_WORK(&led_dat->work, gpio_led_work); 133 INIT_WORK(&led_dat->work, gpio_led_work);
118 134
119 ret = led_classdev_register(parent, &led_dat->cdev); 135 ret = led_classdev_register(parent, &led_dat->cdev);
diff --git a/drivers/leds/leds-lp3944.c b/drivers/leds/leds-lp3944.c
index 8d5ecceba181..932a58da76c4 100644
--- a/drivers/leds/leds-lp3944.c
+++ b/drivers/leds/leds-lp3944.c
@@ -379,6 +379,7 @@ static int __devinit lp3944_probe(struct i2c_client *client,
379{ 379{
380 struct lp3944_platform_data *lp3944_pdata = client->dev.platform_data; 380 struct lp3944_platform_data *lp3944_pdata = client->dev.platform_data;
381 struct lp3944_data *data; 381 struct lp3944_data *data;
382 int err;
382 383
383 if (lp3944_pdata == NULL) { 384 if (lp3944_pdata == NULL) {
384 dev_err(&client->dev, "no platform data\n"); 385 dev_err(&client->dev, "no platform data\n");
@@ -401,9 +402,13 @@ static int __devinit lp3944_probe(struct i2c_client *client,
401 402
402 mutex_init(&data->lock); 403 mutex_init(&data->lock);
403 404
404 dev_info(&client->dev, "lp3944 enabled\n"); 405 err = lp3944_configure(client, data, lp3944_pdata);
406 if (err < 0) {
407 kfree(data);
408 return err;
409 }
405 410
406 lp3944_configure(client, data, lp3944_pdata); 411 dev_info(&client->dev, "lp3944 enabled\n");
407 return 0; 412 return 0;
408} 413}
409 414
diff --git a/drivers/leds/leds-mc13783.c b/drivers/leds/leds-mc13783.c
new file mode 100644
index 000000000000..f05bb08d0f09
--- /dev/null
+++ b/drivers/leds/leds-mc13783.c
@@ -0,0 +1,403 @@
1/*
2 * LEDs driver for Freescale MC13783
3 *
4 * Copyright (C) 2010 Philippe Rétornaz
5 *
6 * Based on leds-da903x:
7 * Copyright (C) 2008 Compulab, Ltd.
8 * Mike Rapoport <mike@compulab.co.il>
9 *
10 * Copyright (C) 2006-2008 Marvell International Ltd.
11 * Eric Miao <eric.miao@marvell.com>
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License version 2 as
15 * published by the Free Software Foundation.
16 */
17
18#include <linux/module.h>
19#include <linux/kernel.h>
20#include <linux/init.h>
21#include <linux/platform_device.h>
22#include <linux/leds.h>
23#include <linux/workqueue.h>
24#include <linux/mfd/mc13783.h>
25#include <linux/slab.h>
26
27struct mc13783_led {
28 struct led_classdev cdev;
29 struct work_struct work;
30 struct mc13783 *master;
31 enum led_brightness new_brightness;
32 int id;
33};
34
35#define MC13783_REG_LED_CONTROL_0 51
36#define MC13783_LED_C0_ENABLE_BIT (1 << 0)
37#define MC13783_LED_C0_TRIODE_MD_BIT (1 << 7)
38#define MC13783_LED_C0_TRIODE_AD_BIT (1 << 8)
39#define MC13783_LED_C0_TRIODE_KP_BIT (1 << 9)
40#define MC13783_LED_C0_BOOST_BIT (1 << 10)
41#define MC13783_LED_C0_ABMODE_MASK 0x7
42#define MC13783_LED_C0_ABMODE 11
43#define MC13783_LED_C0_ABREF_MASK 0x3
44#define MC13783_LED_C0_ABREF 14
45
46#define MC13783_REG_LED_CONTROL_1 52
47#define MC13783_LED_C1_TC1HALF_BIT (1 << 18)
48
49#define MC13783_REG_LED_CONTROL_2 53
50#define MC13783_LED_C2_BL_P_MASK 0xf
51#define MC13783_LED_C2_MD_P 9
52#define MC13783_LED_C2_AD_P 13
53#define MC13783_LED_C2_KP_P 17
54#define MC13783_LED_C2_BL_C_MASK 0x7
55#define MC13783_LED_C2_MD_C 0
56#define MC13783_LED_C2_AD_C 3
57#define MC13783_LED_C2_KP_C 6
58
59#define MC13783_REG_LED_CONTROL_3 54
60#define MC13783_LED_C3_TC_P 6
61#define MC13783_LED_C3_TC_P_MASK 0x1f
62
63#define MC13783_REG_LED_CONTROL_4 55
64#define MC13783_REG_LED_CONTROL_5 56
65
66#define MC13783_LED_Cx_PERIOD 21
67#define MC13783_LED_Cx_PERIOD_MASK 0x3
68#define MC13783_LED_Cx_SLEWLIM_BIT (1 << 23)
69#define MC13783_LED_Cx_TRIODE_TC_BIT (1 << 23)
70#define MC13783_LED_Cx_TC_C_MASK 0x3
71
72static void mc13783_led_work(struct work_struct *work)
73{
74 struct mc13783_led *led = container_of(work, struct mc13783_led, work);
75 int reg = 0;
76 int mask = 0;
77 int value = 0;
78 int bank, off, shift;
79
80 switch (led->id) {
81 case MC13783_LED_MD:
82 reg = MC13783_REG_LED_CONTROL_2;
83 mask = MC13783_LED_C2_BL_P_MASK << MC13783_LED_C2_MD_P;
84 value = (led->new_brightness >> 4) << MC13783_LED_C2_MD_P;
85 break;
86 case MC13783_LED_AD:
87 reg = MC13783_REG_LED_CONTROL_2;
88 mask = MC13783_LED_C2_BL_P_MASK << MC13783_LED_C2_AD_P;
89 value = (led->new_brightness >> 4) << MC13783_LED_C2_AD_P;
90 break;
91 case MC13783_LED_KP:
92 reg = MC13783_REG_LED_CONTROL_2;
93 mask = MC13783_LED_C2_BL_P_MASK << MC13783_LED_C2_KP_P;
94 value = (led->new_brightness >> 4) << MC13783_LED_C2_KP_P;
95 break;
96 case MC13783_LED_R1:
97 case MC13783_LED_G1:
98 case MC13783_LED_B1:
99 case MC13783_LED_R2:
100 case MC13783_LED_G2:
101 case MC13783_LED_B2:
102 case MC13783_LED_R3:
103 case MC13783_LED_G3:
104 case MC13783_LED_B3:
105 off = led->id - MC13783_LED_R1;
106 bank = off/3;
107 reg = MC13783_REG_LED_CONTROL_3 + off/3;
108 shift = (off - bank * 3) * 5 + MC13783_LED_C3_TC_P;
109 value = (led->new_brightness >> 3) << shift;
110 mask = MC13783_LED_C3_TC_P_MASK << shift;
111 break;
112 }
113
114 mc13783_lock(led->master);
115
116 mc13783_reg_rmw(led->master, reg, mask, value);
117
118 mc13783_unlock(led->master);
119}
120
121static void mc13783_led_set(struct led_classdev *led_cdev,
122 enum led_brightness value)
123{
124 struct mc13783_led *led;
125
126 led = container_of(led_cdev, struct mc13783_led, cdev);
127 led->new_brightness = value;
128 schedule_work(&led->work);
129}
130
131static int __devinit mc13783_led_setup(struct mc13783_led *led, int max_current)
132{
133 int shift = 0;
134 int mask = 0;
135 int value = 0;
136 int reg = 0;
137 int ret, bank;
138
139 switch (led->id) {
140 case MC13783_LED_MD:
141 shift = MC13783_LED_C2_MD_C;
142 mask = MC13783_LED_C2_BL_C_MASK;
143 value = max_current & MC13783_LED_C2_BL_C_MASK;
144 reg = MC13783_REG_LED_CONTROL_2;
145 break;
146 case MC13783_LED_AD:
147 shift = MC13783_LED_C2_AD_C;
148 mask = MC13783_LED_C2_BL_C_MASK;
149 value = max_current & MC13783_LED_C2_BL_C_MASK;
150 reg = MC13783_REG_LED_CONTROL_2;
151 break;
152 case MC13783_LED_KP:
153 shift = MC13783_LED_C2_KP_C;
154 mask = MC13783_LED_C2_BL_C_MASK;
155 value = max_current & MC13783_LED_C2_BL_C_MASK;
156 reg = MC13783_REG_LED_CONTROL_2;
157 break;
158 case MC13783_LED_R1:
159 case MC13783_LED_G1:
160 case MC13783_LED_B1:
161 case MC13783_LED_R2:
162 case MC13783_LED_G2:
163 case MC13783_LED_B2:
164 case MC13783_LED_R3:
165 case MC13783_LED_G3:
166 case MC13783_LED_B3:
167 bank = (led->id - MC13783_LED_R1)/3;
168 reg = MC13783_REG_LED_CONTROL_3 + bank;
169 shift = ((led->id - MC13783_LED_R1) - bank * 3) * 2;
170 mask = MC13783_LED_Cx_TC_C_MASK;
171 value = max_current & MC13783_LED_Cx_TC_C_MASK;
172 break;
173 }
174
175 mc13783_lock(led->master);
176
177 ret = mc13783_reg_rmw(led->master, reg, mask << shift,
178 value << shift);
179
180 mc13783_unlock(led->master);
181 return ret;
182}
183
184static int __devinit mc13783_leds_prepare(struct platform_device *pdev)
185{
186 struct mc13783_leds_platform_data *pdata = dev_get_platdata(&pdev->dev);
187 struct mc13783 *dev = dev_get_drvdata(pdev->dev.parent);
188 int ret = 0;
189 int reg = 0;
190
191 mc13783_lock(dev);
192
193 if (pdata->flags & MC13783_LED_TC1HALF)
194 reg |= MC13783_LED_C1_TC1HALF_BIT;
195
196 if (pdata->flags & MC13783_LED_SLEWLIMTC)
197 reg |= MC13783_LED_Cx_SLEWLIM_BIT;
198
199 ret = mc13783_reg_write(dev, MC13783_REG_LED_CONTROL_1, reg);
200 if (ret)
201 goto out;
202
203 reg = (pdata->bl_period & MC13783_LED_Cx_PERIOD_MASK) <<
204 MC13783_LED_Cx_PERIOD;
205
206 if (pdata->flags & MC13783_LED_SLEWLIMBL)
207 reg |= MC13783_LED_Cx_SLEWLIM_BIT;
208
209 ret = mc13783_reg_write(dev, MC13783_REG_LED_CONTROL_2, reg);
210 if (ret)
211 goto out;
212
213 reg = (pdata->tc1_period & MC13783_LED_Cx_PERIOD_MASK) <<
214 MC13783_LED_Cx_PERIOD;
215
216 if (pdata->flags & MC13783_LED_TRIODE_TC1)
217 reg |= MC13783_LED_Cx_TRIODE_TC_BIT;
218
219 ret = mc13783_reg_write(dev, MC13783_REG_LED_CONTROL_3, reg);
220 if (ret)
221 goto out;
222
223 reg = (pdata->tc2_period & MC13783_LED_Cx_PERIOD_MASK) <<
224 MC13783_LED_Cx_PERIOD;
225
226 if (pdata->flags & MC13783_LED_TRIODE_TC2)
227 reg |= MC13783_LED_Cx_TRIODE_TC_BIT;
228
229 ret = mc13783_reg_write(dev, MC13783_REG_LED_CONTROL_4, reg);
230 if (ret)
231 goto out;
232
233 reg = (pdata->tc3_period & MC13783_LED_Cx_PERIOD_MASK) <<
234 MC13783_LED_Cx_PERIOD;
235
236 if (pdata->flags & MC13783_LED_TRIODE_TC3)
237 reg |= MC13783_LED_Cx_TRIODE_TC_BIT;;
238
239 ret = mc13783_reg_write(dev, MC13783_REG_LED_CONTROL_5, reg);
240 if (ret)
241 goto out;
242
243 reg = MC13783_LED_C0_ENABLE_BIT;
244 if (pdata->flags & MC13783_LED_TRIODE_MD)
245 reg |= MC13783_LED_C0_TRIODE_MD_BIT;
246 if (pdata->flags & MC13783_LED_TRIODE_AD)
247 reg |= MC13783_LED_C0_TRIODE_AD_BIT;
248 if (pdata->flags & MC13783_LED_TRIODE_KP)
249 reg |= MC13783_LED_C0_TRIODE_KP_BIT;
250 if (pdata->flags & MC13783_LED_BOOST_EN)
251 reg |= MC13783_LED_C0_BOOST_BIT;
252
253 reg |= (pdata->abmode & MC13783_LED_C0_ABMODE_MASK) <<
254 MC13783_LED_C0_ABMODE;
255 reg |= (pdata->abref & MC13783_LED_C0_ABREF_MASK) <<
256 MC13783_LED_C0_ABREF;
257
258 ret = mc13783_reg_write(dev, MC13783_REG_LED_CONTROL_0, reg);
259
260out:
261 mc13783_unlock(dev);
262 return ret;
263}
264
265static int __devinit mc13783_led_probe(struct platform_device *pdev)
266{
267 struct mc13783_leds_platform_data *pdata = dev_get_platdata(&pdev->dev);
268 struct mc13783_led_platform_data *led_cur;
269 struct mc13783_led *led, *led_dat;
270 int ret, i;
271 int init_led = 0;
272
273 if (pdata == NULL) {
274 dev_err(&pdev->dev, "missing platform data\n");
275 return -ENODEV;
276 }
277
278 if (pdata->num_leds < 1 || pdata->num_leds > MC13783_LED_MAX) {
279 dev_err(&pdev->dev, "Invalid led count %d\n", pdata->num_leds);
280 return -EINVAL;
281 }
282
283 led = kzalloc(sizeof(*led) * pdata->num_leds, GFP_KERNEL);
284 if (led == NULL) {
285 dev_err(&pdev->dev, "failed to alloc memory\n");
286 return -ENOMEM;
287 }
288
289 ret = mc13783_leds_prepare(pdev);
290 if (ret) {
291 dev_err(&pdev->dev, "unable to init led driver\n");
292 goto err_free;
293 }
294
295 for (i = 0; i < pdata->num_leds; i++) {
296 led_dat = &led[i];
297 led_cur = &pdata->led[i];
298
299 if (led_cur->id > MC13783_LED_MAX || led_cur->id < 0) {
300 dev_err(&pdev->dev, "invalid id %d\n", led_cur->id);
301 ret = -EINVAL;
302 goto err_register;
303 }
304
305 if (init_led & (1 << led_cur->id)) {
306 dev_err(&pdev->dev, "led %d already initialized\n",
307 led_cur->id);
308 ret = -EINVAL;
309 goto err_register;
310 }
311
312 init_led |= 1 << led_cur->id;
313 led_dat->cdev.name = led_cur->name;
314 led_dat->cdev.default_trigger = led_cur->default_trigger;
315 led_dat->cdev.brightness_set = mc13783_led_set;
316 led_dat->cdev.brightness = LED_OFF;
317 led_dat->id = led_cur->id;
318 led_dat->master = dev_get_drvdata(pdev->dev.parent);
319
320 INIT_WORK(&led_dat->work, mc13783_led_work);
321
322 ret = led_classdev_register(pdev->dev.parent, &led_dat->cdev);
323 if (ret) {
324 dev_err(&pdev->dev, "failed to register led %d\n",
325 led_dat->id);
326 goto err_register;
327 }
328
329 ret = mc13783_led_setup(led_dat, led_cur->max_current);
330 if (ret) {
331 dev_err(&pdev->dev, "unable to init led %d\n",
332 led_dat->id);
333 i++;
334 goto err_register;
335 }
336 }
337
338 platform_set_drvdata(pdev, led);
339 return 0;
340
341err_register:
342 for (i = i - 1; i >= 0; i--) {
343 led_classdev_unregister(&led[i].cdev);
344 cancel_work_sync(&led[i].work);
345 }
346
347err_free:
348 kfree(led);
349 return ret;
350}
351
352static int __devexit mc13783_led_remove(struct platform_device *pdev)
353{
354 struct mc13783_leds_platform_data *pdata = dev_get_platdata(&pdev->dev);
355 struct mc13783_led *led = platform_get_drvdata(pdev);
356 struct mc13783 *dev = dev_get_drvdata(pdev->dev.parent);
357 int i;
358
359 for (i = 0; i < pdata->num_leds; i++) {
360 led_classdev_unregister(&led[i].cdev);
361 cancel_work_sync(&led[i].work);
362 }
363
364 mc13783_lock(dev);
365
366 mc13783_reg_write(dev, MC13783_REG_LED_CONTROL_0, 0);
367 mc13783_reg_write(dev, MC13783_REG_LED_CONTROL_1, 0);
368 mc13783_reg_write(dev, MC13783_REG_LED_CONTROL_2, 0);
369 mc13783_reg_write(dev, MC13783_REG_LED_CONTROL_3, 0);
370 mc13783_reg_write(dev, MC13783_REG_LED_CONTROL_4, 0);
371 mc13783_reg_write(dev, MC13783_REG_LED_CONTROL_5, 0);
372
373 mc13783_unlock(dev);
374
375 kfree(led);
376 return 0;
377}
378
379static struct platform_driver mc13783_led_driver = {
380 .driver = {
381 .name = "mc13783-led",
382 .owner = THIS_MODULE,
383 },
384 .probe = mc13783_led_probe,
385 .remove = __devexit_p(mc13783_led_remove),
386};
387
388static int __init mc13783_led_init(void)
389{
390 return platform_driver_register(&mc13783_led_driver);
391}
392module_init(mc13783_led_init);
393
394static void __exit mc13783_led_exit(void)
395{
396 platform_driver_unregister(&mc13783_led_driver);
397}
398module_exit(mc13783_led_exit);
399
400MODULE_DESCRIPTION("LEDs driver for Freescale MC13783 PMIC");
401MODULE_AUTHOR("Philippe Retornaz <philippe.retornaz@epfl.ch>");
402MODULE_LICENSE("GPL");
403MODULE_ALIAS("platform:mc13783-led");
diff --git a/drivers/leds/leds-net5501.c b/drivers/leds/leds-net5501.c
new file mode 100644
index 000000000000..3063f591f0dc
--- /dev/null
+++ b/drivers/leds/leds-net5501.c
@@ -0,0 +1,94 @@
1/*
2 * Soekris board support code
3 *
4 * Copyright (C) 2008-2009 Tower Technologies
5 * Written by Alessandro Zummo <a.zummo@towertech.it>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2
9 * as published by the Free Software Foundation.
10 */
11
12#include <linux/kernel.h>
13#include <linux/init.h>
14#include <linux/io.h>
15#include <linux/string.h>
16#include <linux/leds.h>
17#include <linux/platform_device.h>
18#include <linux/gpio.h>
19
20#include <asm/geode.h>
21
22static struct gpio_led net5501_leds[] = {
23 {
24 .name = "error",
25 .gpio = 6,
26 .default_trigger = "default-on",
27 },
28};
29
30static struct gpio_led_platform_data net5501_leds_data = {
31 .num_leds = ARRAY_SIZE(net5501_leds),
32 .leds = net5501_leds,
33};
34
35static struct platform_device net5501_leds_dev = {
36 .name = "leds-gpio",
37 .id = -1,
38 .dev.platform_data = &net5501_leds_data,
39};
40
41static void __init init_net5501(void)
42{
43 platform_device_register(&net5501_leds_dev);
44}
45
46struct soekris_board {
47 u16 offset;
48 char *sig;
49 u8 len;
50 void (*init)(void);
51};
52
53static struct soekris_board __initdata boards[] = {
54 { 0xb7b, "net5501", 7, init_net5501 }, /* net5501 v1.33/1.33c */
55 { 0xb1f, "net5501", 7, init_net5501 }, /* net5501 v1.32i */
56};
57
58static int __init soekris_init(void)
59{
60 int i;
61 unsigned char *rombase, *bios;
62
63 if (!is_geode())
64 return 0;
65
66 rombase = ioremap(0xffff0000, 0xffff);
67 if (!rombase) {
68 printk(KERN_INFO "Soekris net5501 LED driver failed to get rombase");
69 return 0;
70 }
71
72 bios = rombase + 0x20; /* null terminated */
73
74 if (strncmp(bios, "comBIOS", 7))
75 goto unmap;
76
77 for (i = 0; i < ARRAY_SIZE(boards); i++) {
78 unsigned char *model = rombase + boards[i].offset;
79
80 if (strncmp(model, boards[i].sig, boards[i].len) == 0) {
81 printk(KERN_INFO "Soekris %s: %s\n", model, bios);
82
83 if (boards[i].init)
84 boards[i].init();
85 break;
86 }
87 }
88
89unmap:
90 iounmap(rombase);
91 return 0;
92}
93
94arch_initcall(soekris_init);
diff --git a/drivers/leds/leds-ss4200.c b/drivers/leds/leds-ss4200.c
index 51477ec71391..a688293abd0b 100644
--- a/drivers/leds/leds-ss4200.c
+++ b/drivers/leds/leds-ss4200.c
@@ -534,7 +534,7 @@ static int __init nas_gpio_init(void)
534 set_power_light_amber_noblink(); 534 set_power_light_amber_noblink();
535 return 0; 535 return 0;
536out_err: 536out_err:
537 for (; i >= 0; i--) 537 for (i--; i >= 0; i--)
538 unregister_nasgpio_led(i); 538 unregister_nasgpio_led(i);
539 pci_unregister_driver(&nas_gpio_pci_driver); 539 pci_unregister_driver(&nas_gpio_pci_driver);
540 return ret; 540 return ret;
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 9ea17d6c799b..d2c0f94fa37d 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -4645,7 +4645,7 @@ static int raid456_cpu_notify(struct notifier_block *nfb, unsigned long action,
4645 kfree(percpu->scribble); 4645 kfree(percpu->scribble);
4646 pr_err("%s: failed memory allocation for cpu%ld\n", 4646 pr_err("%s: failed memory allocation for cpu%ld\n",
4647 __func__, cpu); 4647 __func__, cpu);
4648 return NOTIFY_BAD; 4648 return notifier_from_errno(-ENOMEM);
4649 } 4649 }
4650 break; 4650 break;
4651 case CPU_DEAD: 4651 case CPU_DEAD:
diff --git a/drivers/message/i2o/i2o_config.c b/drivers/message/i2o/i2o_config.c
index d33693c13368..c4b117f5fb70 100644
--- a/drivers/message/i2o/i2o_config.c
+++ b/drivers/message/i2o/i2o_config.c
@@ -186,14 +186,9 @@ static int i2o_cfg_parms(unsigned long arg, unsigned int type)
186 if (!dev) 186 if (!dev)
187 return -ENXIO; 187 return -ENXIO;
188 188
189 ops = kmalloc(kcmd.oplen, GFP_KERNEL); 189 ops = memdup_user(kcmd.opbuf, kcmd.oplen);
190 if (!ops) 190 if (IS_ERR(ops))
191 return -ENOMEM; 191 return PTR_ERR(ops);
192
193 if (copy_from_user(ops, kcmd.opbuf, kcmd.oplen)) {
194 kfree(ops);
195 return -EFAULT;
196 }
197 192
198 /* 193 /*
199 * It's possible to have a _very_ large table 194 * It's possible to have a _very_ large table
diff --git a/drivers/mfd/88pm860x-core.c b/drivers/mfd/88pm860x-core.c
index 405d2d5183cf..2c65a2c57294 100644
--- a/drivers/mfd/88pm860x-core.c
+++ b/drivers/mfd/88pm860x-core.c
@@ -566,7 +566,7 @@ out:
566 return ret; 566 return ret;
567} 567}
568 568
569static void __devexit device_irq_exit(struct pm860x_chip *chip) 569static void device_irq_exit(struct pm860x_chip *chip)
570{ 570{
571 if (chip->core_irq) 571 if (chip->core_irq)
572 free_irq(chip->core_irq, chip); 572 free_irq(chip->core_irq, chip);
@@ -703,7 +703,7 @@ out:
703 return; 703 return;
704} 704}
705 705
706int pm860x_device_init(struct pm860x_chip *chip, 706int __devinit pm860x_device_init(struct pm860x_chip *chip,
707 struct pm860x_platform_data *pdata) 707 struct pm860x_platform_data *pdata)
708{ 708{
709 chip->core_irq = 0; 709 chip->core_irq = 0;
@@ -731,7 +731,7 @@ int pm860x_device_init(struct pm860x_chip *chip,
731 return 0; 731 return 0;
732} 732}
733 733
734void pm860x_device_exit(struct pm860x_chip *chip) 734void __devexit pm860x_device_exit(struct pm860x_chip *chip)
735{ 735{
736 device_irq_exit(chip); 736 device_irq_exit(chip);
737 mfd_remove_devices(chip->dev); 737 mfd_remove_devices(chip->dev);
diff --git a/drivers/mfd/88pm860x-i2c.c b/drivers/mfd/88pm860x-i2c.c
index 4a6e7186334e..c933b64d1283 100644
--- a/drivers/mfd/88pm860x-i2c.c
+++ b/drivers/mfd/88pm860x-i2c.c
@@ -200,8 +200,8 @@ static int __devexit pm860x_remove(struct i2c_client *client)
200 200
201 pm860x_device_exit(chip); 201 pm860x_device_exit(chip);
202 i2c_unregister_device(chip->companion); 202 i2c_unregister_device(chip->companion);
203 i2c_set_clientdata(chip->companion, NULL);
204 i2c_set_clientdata(chip->client, NULL); 203 i2c_set_clientdata(chip->client, NULL);
204 i2c_set_clientdata(client, NULL);
205 kfree(chip); 205 kfree(chip);
206 return 0; 206 return 0;
207} 207}
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index 3c6a9860dd9c..9da0e504bbe9 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -2,8 +2,14 @@
2# Multifunction miscellaneous devices 2# Multifunction miscellaneous devices
3# 3#
4 4
5menu "Multifunction device drivers" 5menuconfig MFD_SUPPORT
6 bool "Multifunction device drivers"
6 depends on HAS_IOMEM 7 depends on HAS_IOMEM
8 default y
9 help
10 Configure MFD device drivers.
11
12if MFD_SUPPORT
7 13
8config MFD_CORE 14config MFD_CORE
9 tristate 15 tristate
@@ -116,6 +122,18 @@ config TPS65010
116 This driver can also be built as a module. If so, the module 122 This driver can also be built as a module. If so, the module
117 will be called tps65010. 123 will be called tps65010.
118 124
125config TPS6507X
126 tristate "TPS6507x Power Management / Touch Screen chips"
127 select MFD_CORE
128 depends on I2C
129 help
130 If you say yes here you get support for the TPS6507x series of
131 Power Management / Touch Screen chips. These include voltage
132 regulators, lithium ion/polymer battery charging, touch screen
133 and other features that are often used in portable devices.
134 This driver can also be built as a module. If so, the module
135 will be called tps6507x.
136
119config MENELAUS 137config MENELAUS
120 bool "Texas Instruments TWL92330/Menelaus PM chip" 138 bool "Texas Instruments TWL92330/Menelaus PM chip"
121 depends on I2C=y && ARCH_OMAP2 139 depends on I2C=y && ARCH_OMAP2
@@ -159,6 +177,17 @@ config TWL4030_CODEC
159 select MFD_CORE 177 select MFD_CORE
160 default n 178 default n
161 179
180config MFD_TC35892
181 bool "Support Toshiba TC35892"
182 depends on I2C=y && GENERIC_HARDIRQS
183 select MFD_CORE
184 help
185 Support for the Toshiba TC35892 I/O Expander.
186
187 This driver provides common support for accessing the device,
188 additional drivers must be enabled in order to use the
189 functionality of the device.
190
162config MFD_TMIO 191config MFD_TMIO
163 bool 192 bool
164 default n 193 default n
@@ -351,9 +380,19 @@ config PCF50633_GPIO
351 Say yes here if you want to include support GPIO for pins on 380 Say yes here if you want to include support GPIO for pins on
352 the PCF50633 chip. 381 the PCF50633 chip.
353 382
383config ABX500_CORE
384 bool "ST-Ericsson ABX500 Mixed Signal Circuit register functions"
385 default y if ARCH_U300
386 help
387 Say yes here if you have the ABX500 Mixed Signal IC family
388 chips. This core driver expose register access functions.
389 Functionality specific drivers using these functions can
390 remain unchanged when IC changes. Binding of the functions to
391 actual register access is done by the IC core driver.
392
354config AB3100_CORE 393config AB3100_CORE
355 bool "ST-Ericsson AB3100 Mixed Signal Circuit core functions" 394 bool "ST-Ericsson AB3100 Mixed Signal Circuit core functions"
356 depends on I2C=y 395 depends on I2C=y && ABX500_CORE
357 default y if ARCH_U300 396 default y if ARCH_U300
358 help 397 help
359 Select this to enable the AB3100 Mixed Signal IC core 398 Select this to enable the AB3100 Mixed Signal IC core
@@ -381,15 +420,30 @@ config EZX_PCAP
381 This enables the PCAP ASIC present on EZX Phones. This is 420 This enables the PCAP ASIC present on EZX Phones. This is
382 needed for MMC, TouchScreen, Sound, USB, etc.. 421 needed for MMC, TouchScreen, Sound, USB, etc..
383 422
384config AB4500_CORE 423config AB8500_CORE
385 tristate "ST-Ericsson's AB4500 Mixed Signal Power management chip" 424 bool "ST-Ericsson AB8500 Mixed Signal Power Management chip"
386 depends on SPI 425 depends on SPI=y && GENERIC_HARDIRQS
426 select MFD_CORE
387 help 427 help
388 Select this option to enable access to AB4500 power management 428 Select this option to enable access to AB8500 power management
389 chip. This connects to U8500 on the SSP/SPI bus and exports 429 chip. This connects to U8500 on the SSP/SPI bus and exports
390 read/write functions for the devices to get access to this chip. 430 read/write functions for the devices to get access to this chip.
391 This chip embeds various other multimedia funtionalities as well. 431 This chip embeds various other multimedia funtionalities as well.
392 432
433config AB3550_CORE
434 bool "ST-Ericsson AB3550 Mixed Signal Circuit core functions"
435 select MFD_CORE
436 depends on I2C=y && GENERIC_HARDIRQS && ABX500_CORE
437 help
438 Select this to enable the AB3550 Mixed Signal IC core
439 functionality. This connects to a AB3550 on the I2C bus
440 and expose a number of symbols needed for dependent devices
441 to read and write registers and subscribe to events from
442 this multi-functional IC. This is needed to use other features
443 of the AB3550 such as battery-backed RTC, charging control,
444 LEDs, vibrator, system power and temperature, power management
445 and ALSA sound.
446
393config MFD_TIMBERDALE 447config MFD_TIMBERDALE
394 tristate "Support for the Timberdale FPGA" 448 tristate "Support for the Timberdale FPGA"
395 select MFD_CORE 449 select MFD_CORE
@@ -409,7 +463,26 @@ config LPC_SCH
409 LPC bridge function of the Intel SCH provides support for 463 LPC bridge function of the Intel SCH provides support for
410 System Management Bus and General Purpose I/O. 464 System Management Bus and General Purpose I/O.
411 465
412endmenu 466config MFD_RDC321X
467 tristate "Support for RDC-R321x southbridge"
468 select MFD_CORE
469 depends on PCI
470 help
471 Say yes here if you want to have support for the RDC R-321x SoC
472 southbridge which provides access to GPIOs and Watchdog using the
473 southbridge PCI device configuration space.
474
475config MFD_JANZ_CMODIO
476 tristate "Support for Janz CMOD-IO PCI MODULbus Carrier Board"
477 select MFD_CORE
478 depends on PCI
479 help
480 This is the core driver for the Janz CMOD-IO PCI MODULbus
481 carrier board. This device is a PCI to MODULbus bridge which may
482 host many different types of MODULbus daughterboards, including
483 CAN and GPIO controllers.
484
485endif # MFD_SUPPORT
413 486
414menu "Multimedia Capabilities Port drivers" 487menu "Multimedia Capabilities Port drivers"
415 depends on ARCH_SA1100 488 depends on ARCH_SA1100
diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile
index 87935f967aa0..fb503e77dc60 100644
--- a/drivers/mfd/Makefile
+++ b/drivers/mfd/Makefile
@@ -15,6 +15,7 @@ obj-$(CONFIG_HTC_I2CPLD) += htc-i2cpld.o
15obj-$(CONFIG_MFD_DAVINCI_VOICECODEC) += davinci_voicecodec.o 15obj-$(CONFIG_MFD_DAVINCI_VOICECODEC) += davinci_voicecodec.o
16obj-$(CONFIG_MFD_DM355EVM_MSP) += dm355evm_msp.o 16obj-$(CONFIG_MFD_DM355EVM_MSP) += dm355evm_msp.o
17 17
18obj-$(CONFIG_MFD_TC35892) += tc35892.o
18obj-$(CONFIG_MFD_T7L66XB) += t7l66xb.o tmio_core.o 19obj-$(CONFIG_MFD_T7L66XB) += t7l66xb.o tmio_core.o
19obj-$(CONFIG_MFD_TC6387XB) += tc6387xb.o tmio_core.o 20obj-$(CONFIG_MFD_TC6387XB) += tc6387xb.o tmio_core.o
20obj-$(CONFIG_MFD_TC6393XB) += tc6393xb.o tmio_core.o 21obj-$(CONFIG_MFD_TC6393XB) += tc6393xb.o tmio_core.o
@@ -29,6 +30,7 @@ obj-$(CONFIG_MFD_WM8350_I2C) += wm8350-i2c.o
29obj-$(CONFIG_MFD_WM8994) += wm8994-core.o wm8994-irq.o 30obj-$(CONFIG_MFD_WM8994) += wm8994-core.o wm8994-irq.o
30 31
31obj-$(CONFIG_TPS65010) += tps65010.o 32obj-$(CONFIG_TPS65010) += tps65010.o
33obj-$(CONFIG_TPS6507X) += tps6507x.o
32obj-$(CONFIG_MENELAUS) += menelaus.o 34obj-$(CONFIG_MENELAUS) += menelaus.o
33 35
34obj-$(CONFIG_TWL4030_CORE) += twl-core.o twl4030-irq.o twl6030-irq.o 36obj-$(CONFIG_TWL4030_CORE) += twl-core.o twl4030-irq.o twl6030-irq.o
@@ -55,12 +57,17 @@ obj-$(CONFIG_PMIC_DA903X) += da903x.o
55max8925-objs := max8925-core.o max8925-i2c.o 57max8925-objs := max8925-core.o max8925-i2c.o
56obj-$(CONFIG_MFD_MAX8925) += max8925.o 58obj-$(CONFIG_MFD_MAX8925) += max8925.o
57 59
58obj-$(CONFIG_MFD_PCF50633) += pcf50633-core.o 60pcf50633-objs := pcf50633-core.o pcf50633-irq.o
61obj-$(CONFIG_MFD_PCF50633) += pcf50633.o
59obj-$(CONFIG_PCF50633_ADC) += pcf50633-adc.o 62obj-$(CONFIG_PCF50633_ADC) += pcf50633-adc.o
60obj-$(CONFIG_PCF50633_GPIO) += pcf50633-gpio.o 63obj-$(CONFIG_PCF50633_GPIO) += pcf50633-gpio.o
64obj-$(CONFIG_ABX500_CORE) += abx500-core.o
61obj-$(CONFIG_AB3100_CORE) += ab3100-core.o 65obj-$(CONFIG_AB3100_CORE) += ab3100-core.o
62obj-$(CONFIG_AB3100_OTP) += ab3100-otp.o 66obj-$(CONFIG_AB3100_OTP) += ab3100-otp.o
63obj-$(CONFIG_AB4500_CORE) += ab4500-core.o 67obj-$(CONFIG_AB3550_CORE) += ab3550-core.o
68obj-$(CONFIG_AB8500_CORE) += ab8500-core.o ab8500-spi.o
64obj-$(CONFIG_MFD_TIMBERDALE) += timberdale.o 69obj-$(CONFIG_MFD_TIMBERDALE) += timberdale.o
65obj-$(CONFIG_PMIC_ADP5520) += adp5520.o 70obj-$(CONFIG_PMIC_ADP5520) += adp5520.o
66obj-$(CONFIG_LPC_SCH) += lpc_sch.o \ No newline at end of file 71obj-$(CONFIG_LPC_SCH) += lpc_sch.o
72obj-$(CONFIG_MFD_RDC321X) += rdc321x-southbridge.o
73obj-$(CONFIG_MFD_JANZ_CMODIO) += janz-cmodio.o
diff --git a/drivers/mfd/ab3100-core.c b/drivers/mfd/ab3100-core.c
index e4ca5909e424..53ebfee548fa 100644
--- a/drivers/mfd/ab3100-core.c
+++ b/drivers/mfd/ab3100-core.c
@@ -19,7 +19,7 @@
19#include <linux/debugfs.h> 19#include <linux/debugfs.h>
20#include <linux/seq_file.h> 20#include <linux/seq_file.h>
21#include <linux/uaccess.h> 21#include <linux/uaccess.h>
22#include <linux/mfd/ab3100.h> 22#include <linux/mfd/abx500.h>
23 23
24/* These are the only registers inside AB3100 used in this main file */ 24/* These are the only registers inside AB3100 used in this main file */
25 25
@@ -59,24 +59,15 @@
59 * The AB3100 is usually assigned address 0x48 (7-bit) 59 * The AB3100 is usually assigned address 0x48 (7-bit)
60 * The chip is defined in the platform i2c_board_data section. 60 * The chip is defined in the platform i2c_board_data section.
61 */ 61 */
62 62static int ab3100_get_chip_id(struct device *dev)
63u8 ab3100_get_chip_type(struct ab3100 *ab3100)
64{ 63{
65 u8 chip = ABUNKNOWN; 64 struct ab3100 *ab3100 = dev_get_drvdata(dev->parent);
66 65
67 switch (ab3100->chip_id & 0xf0) { 66 return (int)ab3100->chip_id;
68 case 0xa0:
69 chip = AB3000;
70 break;
71 case 0xc0:
72 chip = AB3100;
73 break;
74 }
75 return chip;
76} 67}
77EXPORT_SYMBOL(ab3100_get_chip_type);
78 68
79int ab3100_set_register_interruptible(struct ab3100 *ab3100, u8 reg, u8 regval) 69static int ab3100_set_register_interruptible(struct ab3100 *ab3100,
70 u8 reg, u8 regval)
80{ 71{
81 u8 regandval[2] = {reg, regval}; 72 u8 regandval[2] = {reg, regval};
82 int err; 73 int err;
@@ -108,8 +99,14 @@ int ab3100_set_register_interruptible(struct ab3100 *ab3100, u8 reg, u8 regval)
108 mutex_unlock(&ab3100->access_mutex); 99 mutex_unlock(&ab3100->access_mutex);
109 return err; 100 return err;
110} 101}
111EXPORT_SYMBOL(ab3100_set_register_interruptible);
112 102
103static int set_register_interruptible(struct device *dev,
104 u8 bank, u8 reg, u8 value)
105{
106 struct ab3100 *ab3100 = dev_get_drvdata(dev->parent);
107
108 return ab3100_set_register_interruptible(ab3100, reg, value);
109}
113 110
114/* 111/*
115 * The test registers exist at an I2C bus address up one 112 * The test registers exist at an I2C bus address up one
@@ -148,8 +145,8 @@ static int ab3100_set_test_register_interruptible(struct ab3100 *ab3100,
148 return err; 145 return err;
149} 146}
150 147
151 148static int ab3100_get_register_interruptible(struct ab3100 *ab3100,
152int ab3100_get_register_interruptible(struct ab3100 *ab3100, u8 reg, u8 *regval) 149 u8 reg, u8 *regval)
153{ 150{
154 int err; 151 int err;
155 152
@@ -203,10 +200,16 @@ int ab3100_get_register_interruptible(struct ab3100 *ab3100, u8 reg, u8 *regval)
203 mutex_unlock(&ab3100->access_mutex); 200 mutex_unlock(&ab3100->access_mutex);
204 return err; 201 return err;
205} 202}
206EXPORT_SYMBOL(ab3100_get_register_interruptible);
207 203
204static int get_register_interruptible(struct device *dev, u8 bank, u8 reg,
205 u8 *value)
206{
207 struct ab3100 *ab3100 = dev_get_drvdata(dev->parent);
208
209 return ab3100_get_register_interruptible(ab3100, reg, value);
210}
208 211
209int ab3100_get_register_page_interruptible(struct ab3100 *ab3100, 212static int ab3100_get_register_page_interruptible(struct ab3100 *ab3100,
210 u8 first_reg, u8 *regvals, u8 numregs) 213 u8 first_reg, u8 *regvals, u8 numregs)
211{ 214{
212 int err; 215 int err;
@@ -260,10 +263,17 @@ int ab3100_get_register_page_interruptible(struct ab3100 *ab3100,
260 mutex_unlock(&ab3100->access_mutex); 263 mutex_unlock(&ab3100->access_mutex);
261 return err; 264 return err;
262} 265}
263EXPORT_SYMBOL(ab3100_get_register_page_interruptible);
264 266
267static int get_register_page_interruptible(struct device *dev, u8 bank,
268 u8 first_reg, u8 *regvals, u8 numregs)
269{
270 struct ab3100 *ab3100 = dev_get_drvdata(dev->parent);
271
272 return ab3100_get_register_page_interruptible(ab3100,
273 first_reg, regvals, numregs);
274}
265 275
266int ab3100_mask_and_set_register_interruptible(struct ab3100 *ab3100, 276static int ab3100_mask_and_set_register_interruptible(struct ab3100 *ab3100,
267 u8 reg, u8 andmask, u8 ormask) 277 u8 reg, u8 andmask, u8 ormask)
268{ 278{
269 u8 regandval[2] = {reg, 0}; 279 u8 regandval[2] = {reg, 0};
@@ -331,8 +341,15 @@ int ab3100_mask_and_set_register_interruptible(struct ab3100 *ab3100,
331 mutex_unlock(&ab3100->access_mutex); 341 mutex_unlock(&ab3100->access_mutex);
332 return err; 342 return err;
333} 343}
334EXPORT_SYMBOL(ab3100_mask_and_set_register_interruptible);
335 344
345static int mask_and_set_register_interruptible(struct device *dev, u8 bank,
346 u8 reg, u8 bitmask, u8 bitvalues)
347{
348 struct ab3100 *ab3100 = dev_get_drvdata(dev->parent);
349
350 return ab3100_mask_and_set_register_interruptible(ab3100,
351 reg, bitmask, (bitmask & bitvalues));
352}
336 353
337/* 354/*
338 * Register a simple callback for handling any AB3100 events. 355 * Register a simple callback for handling any AB3100 events.
@@ -357,15 +374,27 @@ int ab3100_event_unregister(struct ab3100 *ab3100,
357EXPORT_SYMBOL(ab3100_event_unregister); 374EXPORT_SYMBOL(ab3100_event_unregister);
358 375
359 376
360int ab3100_event_registers_startup_state_get(struct ab3100 *ab3100, 377static int ab3100_event_registers_startup_state_get(struct device *dev,
361 u32 *fatevent) 378 u8 *event)
362{ 379{
380 struct ab3100 *ab3100 = dev_get_drvdata(dev->parent);
363 if (!ab3100->startup_events_read) 381 if (!ab3100->startup_events_read)
364 return -EAGAIN; /* Try again later */ 382 return -EAGAIN; /* Try again later */
365 *fatevent = ab3100->startup_events; 383 memcpy(event, ab3100->startup_events, 3);
366 return 0; 384 return 0;
367} 385}
368EXPORT_SYMBOL(ab3100_event_registers_startup_state_get); 386
387static struct abx500_ops ab3100_ops = {
388 .get_chip_id = ab3100_get_chip_id,
389 .set_register = set_register_interruptible,
390 .get_register = get_register_interruptible,
391 .get_register_page = get_register_page_interruptible,
392 .set_register_page = NULL,
393 .mask_and_set_register = mask_and_set_register_interruptible,
394 .event_registers_startup_state_get =
395 ab3100_event_registers_startup_state_get,
396 .startup_irq_enabled = NULL,
397};
369 398
370/* 399/*
371 * This is a threaded interrupt handler so we can make some 400 * This is a threaded interrupt handler so we can make some
@@ -390,7 +419,9 @@ static irqreturn_t ab3100_irq_handler(int irq, void *data)
390 event_regs[2]; 419 event_regs[2];
391 420
392 if (!ab3100->startup_events_read) { 421 if (!ab3100->startup_events_read) {
393 ab3100->startup_events = fatevent; 422 ab3100->startup_events[0] = event_regs[0];
423 ab3100->startup_events[1] = event_regs[1];
424 ab3100->startup_events[2] = event_regs[2];
394 ab3100->startup_events_read = true; 425 ab3100->startup_events_read = true;
395 } 426 }
396 /* 427 /*
@@ -703,7 +734,8 @@ static int __init ab3100_setup(struct ab3100 *ab3100)
703 dev_warn(ab3100->dev, 734 dev_warn(ab3100->dev,
704 "AB3100 P1E variant detected, " 735 "AB3100 P1E variant detected, "
705 "forcing chip to 32KHz\n"); 736 "forcing chip to 32KHz\n");
706 err = ab3100_set_test_register_interruptible(ab3100, 0x02, 0x08); 737 err = ab3100_set_test_register_interruptible(ab3100,
738 0x02, 0x08);
707 } 739 }
708 740
709 exit_no_setup: 741 exit_no_setup:
@@ -898,6 +930,10 @@ static int __init ab3100_probe(struct i2c_client *client,
898 if (err) 930 if (err)
899 goto exit_no_irq; 931 goto exit_no_irq;
900 932
933 err = abx500_register_ops(&client->dev, &ab3100_ops);
934 if (err)
935 goto exit_no_ops;
936
901 /* Set parent and a pointer back to the container in device data */ 937 /* Set parent and a pointer back to the container in device data */
902 for (i = 0; i < ARRAY_SIZE(ab3100_platform_devs); i++) { 938 for (i = 0; i < ARRAY_SIZE(ab3100_platform_devs); i++) {
903 ab3100_platform_devs[i]->dev.parent = 939 ab3100_platform_devs[i]->dev.parent =
@@ -915,11 +951,13 @@ static int __init ab3100_probe(struct i2c_client *client,
915 951
916 return 0; 952 return 0;
917 953
954 exit_no_ops:
918 exit_no_irq: 955 exit_no_irq:
919 exit_no_setup: 956 exit_no_setup:
920 i2c_unregister_device(ab3100->testreg_client); 957 i2c_unregister_device(ab3100->testreg_client);
921 exit_no_testreg_client: 958 exit_no_testreg_client:
922 exit_no_detect: 959 exit_no_detect:
960 i2c_set_clientdata(client, NULL);
923 kfree(ab3100); 961 kfree(ab3100);
924 return err; 962 return err;
925} 963}
@@ -941,6 +979,7 @@ static int __exit ab3100_remove(struct i2c_client *client)
941 * their notifiers so deactivate IRQ 979 * their notifiers so deactivate IRQ
942 */ 980 */
943 free_irq(client->irq, ab3100); 981 free_irq(client->irq, ab3100);
982 i2c_set_clientdata(client, NULL);
944 kfree(ab3100); 983 kfree(ab3100);
945 return 0; 984 return 0;
946} 985}
diff --git a/drivers/mfd/ab3100-otp.c b/drivers/mfd/ab3100-otp.c
index 2d14655fdebd..63d2b727ddbb 100644
--- a/drivers/mfd/ab3100-otp.c
+++ b/drivers/mfd/ab3100-otp.c
@@ -12,7 +12,7 @@
12#include <linux/slab.h> 12#include <linux/slab.h>
13#include <linux/init.h> 13#include <linux/init.h>
14#include <linux/platform_device.h> 14#include <linux/platform_device.h>
15#include <linux/mfd/ab3100.h> 15#include <linux/mfd/abx500.h>
16#include <linux/debugfs.h> 16#include <linux/debugfs.h>
17#include <linux/seq_file.h> 17#include <linux/seq_file.h>
18 18
@@ -30,7 +30,6 @@
30/** 30/**
31 * struct ab3100_otp 31 * struct ab3100_otp
32 * @dev containing device 32 * @dev containing device
33 * @ab3100 a pointer to the parent ab3100 device struct
34 * @locked whether the OTP is locked, after locking, no more bits 33 * @locked whether the OTP is locked, after locking, no more bits
35 * can be changed but before locking it is still possible 34 * can be changed but before locking it is still possible
36 * to change bits from 1->0. 35 * to change bits from 1->0.
@@ -49,7 +48,6 @@
49 */ 48 */
50struct ab3100_otp { 49struct ab3100_otp {
51 struct device *dev; 50 struct device *dev;
52 struct ab3100 *ab3100;
53 bool locked; 51 bool locked;
54 u32 freq; 52 u32 freq;
55 bool paf; 53 bool paf;
@@ -63,19 +61,19 @@ struct ab3100_otp {
63 61
64static int __init ab3100_otp_read(struct ab3100_otp *otp) 62static int __init ab3100_otp_read(struct ab3100_otp *otp)
65{ 63{
66 struct ab3100 *ab = otp->ab3100;
67 u8 otpval[8]; 64 u8 otpval[8];
68 u8 otpp; 65 u8 otpp;
69 int err; 66 int err;
70 67
71 err = ab3100_get_register_interruptible(ab, AB3100_OTPP, &otpp); 68 err = abx500_get_register_interruptible(otp->dev, 0,
69 AB3100_OTPP, &otpp);
72 if (err) { 70 if (err) {
73 dev_err(otp->dev, "unable to read OTPP register\n"); 71 dev_err(otp->dev, "unable to read OTPP register\n");
74 return err; 72 return err;
75 } 73 }
76 74
77 err = ab3100_get_register_page_interruptible(ab, AB3100_OTP0, 75 err = abx500_get_register_page_interruptible(otp->dev, 0,
78 otpval, 8); 76 AB3100_OTP0, otpval, 8);
79 if (err) { 77 if (err) {
80 dev_err(otp->dev, "unable to read OTP register page\n"); 78 dev_err(otp->dev, "unable to read OTP register page\n");
81 return err; 79 return err;
@@ -197,7 +195,6 @@ static int __init ab3100_otp_probe(struct platform_device *pdev)
197 otp->dev = &pdev->dev; 195 otp->dev = &pdev->dev;
198 196
199 /* Replace platform data coming in with a local struct */ 197 /* Replace platform data coming in with a local struct */
200 otp->ab3100 = platform_get_drvdata(pdev);
201 platform_set_drvdata(pdev, otp); 198 platform_set_drvdata(pdev, otp);
202 199
203 err = ab3100_otp_read(otp); 200 err = ab3100_otp_read(otp);
diff --git a/drivers/mfd/ab3550-core.c b/drivers/mfd/ab3550-core.c
new file mode 100644
index 000000000000..1060f8e1c40a
--- /dev/null
+++ b/drivers/mfd/ab3550-core.c
@@ -0,0 +1,1401 @@
1/*
2 * Copyright (C) 2007-2010 ST-Ericsson
3 * License terms: GNU General Public License (GPL) version 2
4 * Low-level core for exclusive access to the AB3550 IC on the I2C bus
5 * and some basic chip-configuration.
6 * Author: Bengt Jonsson <bengt.g.jonsson@stericsson.com>
7 * Author: Mattias Nilsson <mattias.i.nilsson@stericsson.com>
8 * Author: Mattias Wallin <mattias.wallin@stericsson.com>
9 * Author: Rickard Andersson <rickard.andersson@stericsson.com>
10 */
11
12#include <linux/i2c.h>
13#include <linux/mutex.h>
14#include <linux/err.h>
15#include <linux/platform_device.h>
16#include <linux/slab.h>
17#include <linux/device.h>
18#include <linux/irq.h>
19#include <linux/interrupt.h>
20#include <linux/random.h>
21#include <linux/workqueue.h>
22#include <linux/debugfs.h>
23#include <linux/seq_file.h>
24#include <linux/uaccess.h>
25#include <linux/mfd/abx500.h>
26#include <linux/list.h>
27#include <linux/bitops.h>
28#include <linux/spinlock.h>
29#include <linux/mfd/core.h>
30
31#define AB3550_NAME_STRING "ab3550"
32#define AB3550_ID_FORMAT_STRING "AB3550 %s"
33#define AB3550_NUM_BANKS 2
34#define AB3550_NUM_EVENT_REG 5
35
36/* These are the only registers inside AB3550 used in this main file */
37
38/* Chip ID register */
39#define AB3550_CID_REG 0x20
40
41/* Interrupt event registers */
42#define AB3550_EVENT_BANK 0
43#define AB3550_EVENT_REG 0x22
44
45/* Read/write operation values. */
46#define AB3550_PERM_RD (0x01)
47#define AB3550_PERM_WR (0x02)
48
49/* Read/write permissions. */
50#define AB3550_PERM_RO (AB3550_PERM_RD)
51#define AB3550_PERM_RW (AB3550_PERM_RD | AB3550_PERM_WR)
52
53/**
54 * struct ab3550
55 * @access_mutex: lock out concurrent accesses to the AB registers
56 * @i2c_client: I2C client for this chip
57 * @chip_name: name of this chip variant
58 * @chip_id: 8 bit chip ID for this chip variant
59 * @mask_work: a worker for writing to mask registers
60 * @event_lock: a lock to protect the event_mask
61 * @event_mask: a local copy of the mask event registers
62 * @startup_events: a copy of the first reading of the event registers
63 * @startup_events_read: whether the first events have been read
64 */
65struct ab3550 {
66 struct mutex access_mutex;
67 struct i2c_client *i2c_client[AB3550_NUM_BANKS];
68 char chip_name[32];
69 u8 chip_id;
70 struct work_struct mask_work;
71 spinlock_t event_lock;
72 u8 event_mask[AB3550_NUM_EVENT_REG];
73 u8 startup_events[AB3550_NUM_EVENT_REG];
74 bool startup_events_read;
75#ifdef CONFIG_DEBUG_FS
76 unsigned int debug_bank;
77 unsigned int debug_address;
78#endif
79};
80
81/**
82 * struct ab3550_reg_range
83 * @first: the first address of the range
84 * @last: the last address of the range
85 * @perm: access permissions for the range
86 */
87struct ab3550_reg_range {
88 u8 first;
89 u8 last;
90 u8 perm;
91};
92
93/**
94 * struct ab3550_reg_ranges
95 * @count: the number of ranges in the list
96 * @range: the list of register ranges
97 */
98struct ab3550_reg_ranges {
99 u8 count;
100 const struct ab3550_reg_range *range;
101};
102
103/*
104 * Permissible register ranges for reading and writing per device and bank.
105 *
106 * The ranges must be listed in increasing address order, and no overlaps are
107 * allowed. It is assumed that write permission implies read permission
108 * (i.e. only RO and RW permissions should be used). Ranges with write
109 * permission must not be split up.
110 */
111
112#define NO_RANGE {.count = 0, .range = NULL,}
113
114static struct
115ab3550_reg_ranges ab3550_reg_ranges[AB3550_NUM_DEVICES][AB3550_NUM_BANKS] = {
116 [AB3550_DEVID_DAC] = {
117 NO_RANGE,
118 {
119 .count = 2,
120 .range = (struct ab3550_reg_range[]) {
121 {
122 .first = 0xb0,
123 .last = 0xba,
124 .perm = AB3550_PERM_RW,
125 },
126 {
127 .first = 0xbc,
128 .last = 0xc3,
129 .perm = AB3550_PERM_RW,
130 },
131 },
132 },
133 },
134 [AB3550_DEVID_LEDS] = {
135 NO_RANGE,
136 {
137 .count = 2,
138 .range = (struct ab3550_reg_range[]) {
139 {
140 .first = 0x5a,
141 .last = 0x88,
142 .perm = AB3550_PERM_RW,
143 },
144 {
145 .first = 0x8a,
146 .last = 0xad,
147 .perm = AB3550_PERM_RW,
148 },
149 }
150 },
151 },
152 [AB3550_DEVID_POWER] = {
153 {
154 .count = 1,
155 .range = (struct ab3550_reg_range[]) {
156 {
157 .first = 0x21,
158 .last = 0x21,
159 .perm = AB3550_PERM_RO,
160 },
161 }
162 },
163 NO_RANGE,
164 },
165 [AB3550_DEVID_REGULATORS] = {
166 {
167 .count = 1,
168 .range = (struct ab3550_reg_range[]) {
169 {
170 .first = 0x69,
171 .last = 0xa3,
172 .perm = AB3550_PERM_RW,
173 },
174 }
175 },
176 {
177 .count = 1,
178 .range = (struct ab3550_reg_range[]) {
179 {
180 .first = 0x14,
181 .last = 0x16,
182 .perm = AB3550_PERM_RW,
183 },
184 }
185 },
186 },
187 [AB3550_DEVID_SIM] = {
188 {
189 .count = 1,
190 .range = (struct ab3550_reg_range[]) {
191 {
192 .first = 0x21,
193 .last = 0x21,
194 .perm = AB3550_PERM_RO,
195 },
196 }
197 },
198 {
199 .count = 1,
200 .range = (struct ab3550_reg_range[]) {
201 {
202 .first = 0x14,
203 .last = 0x17,
204 .perm = AB3550_PERM_RW,
205 },
206 }
207
208 },
209 },
210 [AB3550_DEVID_UART] = {
211 NO_RANGE,
212 NO_RANGE,
213 },
214 [AB3550_DEVID_RTC] = {
215 {
216 .count = 1,
217 .range = (struct ab3550_reg_range[]) {
218 {
219 .first = 0x00,
220 .last = 0x0c,
221 .perm = AB3550_PERM_RW,
222 },
223 }
224 },
225 NO_RANGE,
226 },
227 [AB3550_DEVID_CHARGER] = {
228 {
229 .count = 2,
230 .range = (struct ab3550_reg_range[]) {
231 {
232 .first = 0x10,
233 .last = 0x1a,
234 .perm = AB3550_PERM_RW,
235 },
236 {
237 .first = 0x21,
238 .last = 0x21,
239 .perm = AB3550_PERM_RO,
240 },
241 }
242 },
243 NO_RANGE,
244 },
245 [AB3550_DEVID_ADC] = {
246 NO_RANGE,
247 {
248 .count = 1,
249 .range = (struct ab3550_reg_range[]) {
250 {
251 .first = 0x20,
252 .last = 0x56,
253 .perm = AB3550_PERM_RW,
254 },
255
256 }
257 },
258 },
259 [AB3550_DEVID_FUELGAUGE] = {
260 {
261 .count = 1,
262 .range = (struct ab3550_reg_range[]) {
263 {
264 .first = 0x21,
265 .last = 0x21,
266 .perm = AB3550_PERM_RO,
267 },
268 }
269 },
270 {
271 .count = 1,
272 .range = (struct ab3550_reg_range[]) {
273 {
274 .first = 0x00,
275 .last = 0x0e,
276 .perm = AB3550_PERM_RW,
277 },
278 }
279 },
280 },
281 [AB3550_DEVID_VIBRATOR] = {
282 NO_RANGE,
283 {
284 .count = 1,
285 .range = (struct ab3550_reg_range[]) {
286 {
287 .first = 0x10,
288 .last = 0x13,
289 .perm = AB3550_PERM_RW,
290 },
291
292 }
293 },
294 },
295 [AB3550_DEVID_CODEC] = {
296 {
297 .count = 2,
298 .range = (struct ab3550_reg_range[]) {
299 {
300 .first = 0x31,
301 .last = 0x63,
302 .perm = AB3550_PERM_RW,
303 },
304 {
305 .first = 0x65,
306 .last = 0x68,
307 .perm = AB3550_PERM_RW,
308 },
309 }
310 },
311 NO_RANGE,
312 },
313};
314
315static struct mfd_cell ab3550_devs[AB3550_NUM_DEVICES] = {
316 [AB3550_DEVID_DAC] = {
317 .name = "ab3550-dac",
318 .id = AB3550_DEVID_DAC,
319 .num_resources = 0,
320 },
321 [AB3550_DEVID_LEDS] = {
322 .name = "ab3550-leds",
323 .id = AB3550_DEVID_LEDS,
324 },
325 [AB3550_DEVID_POWER] = {
326 .name = "ab3550-power",
327 .id = AB3550_DEVID_POWER,
328 },
329 [AB3550_DEVID_REGULATORS] = {
330 .name = "ab3550-regulators",
331 .id = AB3550_DEVID_REGULATORS,
332 },
333 [AB3550_DEVID_SIM] = {
334 .name = "ab3550-sim",
335 .id = AB3550_DEVID_SIM,
336 },
337 [AB3550_DEVID_UART] = {
338 .name = "ab3550-uart",
339 .id = AB3550_DEVID_UART,
340 },
341 [AB3550_DEVID_RTC] = {
342 .name = "ab3550-rtc",
343 .id = AB3550_DEVID_RTC,
344 },
345 [AB3550_DEVID_CHARGER] = {
346 .name = "ab3550-charger",
347 .id = AB3550_DEVID_CHARGER,
348 },
349 [AB3550_DEVID_ADC] = {
350 .name = "ab3550-adc",
351 .id = AB3550_DEVID_ADC,
352 .num_resources = 10,
353 .resources = (struct resource[]) {
354 {
355 .name = "TRIGGER-0",
356 .flags = IORESOURCE_IRQ,
357 .start = 16,
358 .end = 16,
359 },
360 {
361 .name = "TRIGGER-1",
362 .flags = IORESOURCE_IRQ,
363 .start = 17,
364 .end = 17,
365 },
366 {
367 .name = "TRIGGER-2",
368 .flags = IORESOURCE_IRQ,
369 .start = 18,
370 .end = 18,
371 },
372 {
373 .name = "TRIGGER-3",
374 .flags = IORESOURCE_IRQ,
375 .start = 19,
376 .end = 19,
377 },
378 {
379 .name = "TRIGGER-4",
380 .flags = IORESOURCE_IRQ,
381 .start = 20,
382 .end = 20,
383 },
384 {
385 .name = "TRIGGER-5",
386 .flags = IORESOURCE_IRQ,
387 .start = 21,
388 .end = 21,
389 },
390 {
391 .name = "TRIGGER-6",
392 .flags = IORESOURCE_IRQ,
393 .start = 22,
394 .end = 22,
395 },
396 {
397 .name = "TRIGGER-7",
398 .flags = IORESOURCE_IRQ,
399 .start = 23,
400 .end = 23,
401 },
402 {
403 .name = "TRIGGER-VBAT-TXON",
404 .flags = IORESOURCE_IRQ,
405 .start = 13,
406 .end = 13,
407 },
408 {
409 .name = "TRIGGER-VBAT",
410 .flags = IORESOURCE_IRQ,
411 .start = 12,
412 .end = 12,
413 },
414 },
415 },
416 [AB3550_DEVID_FUELGAUGE] = {
417 .name = "ab3550-fuelgauge",
418 .id = AB3550_DEVID_FUELGAUGE,
419 },
420 [AB3550_DEVID_VIBRATOR] = {
421 .name = "ab3550-vibrator",
422 .id = AB3550_DEVID_VIBRATOR,
423 },
424 [AB3550_DEVID_CODEC] = {
425 .name = "ab3550-codec",
426 .id = AB3550_DEVID_CODEC,
427 },
428};
429
430/*
431 * I2C transactions with error messages.
432 */
433static int ab3550_i2c_master_send(struct ab3550 *ab, u8 bank, u8 *data,
434 u8 count)
435{
436 int err;
437
438 err = i2c_master_send(ab->i2c_client[bank], data, count);
439 if (err < 0) {
440 dev_err(&ab->i2c_client[0]->dev, "send error: %d\n", err);
441 return err;
442 }
443 return 0;
444}
445
446static int ab3550_i2c_master_recv(struct ab3550 *ab, u8 bank, u8 *data,
447 u8 count)
448{
449 int err;
450
451 err = i2c_master_recv(ab->i2c_client[bank], data, count);
452 if (err < 0) {
453 dev_err(&ab->i2c_client[0]->dev, "receive error: %d\n", err);
454 return err;
455 }
456 return 0;
457}
458
459/*
460 * Functionality for getting/setting register values.
461 */
462static int get_register_interruptible(struct ab3550 *ab, u8 bank, u8 reg,
463 u8 *value)
464{
465 int err;
466
467 err = mutex_lock_interruptible(&ab->access_mutex);
468 if (err)
469 return err;
470
471 err = ab3550_i2c_master_send(ab, bank, &reg, 1);
472 if (!err)
473 err = ab3550_i2c_master_recv(ab, bank, value, 1);
474
475 mutex_unlock(&ab->access_mutex);
476 return err;
477}
478
479static int get_register_page_interruptible(struct ab3550 *ab, u8 bank,
480 u8 first_reg, u8 *regvals, u8 numregs)
481{
482 int err;
483
484 err = mutex_lock_interruptible(&ab->access_mutex);
485 if (err)
486 return err;
487
488 err = ab3550_i2c_master_send(ab, bank, &first_reg, 1);
489 if (!err)
490 err = ab3550_i2c_master_recv(ab, bank, regvals, numregs);
491
492 mutex_unlock(&ab->access_mutex);
493 return err;
494}
495
496static int mask_and_set_register_interruptible(struct ab3550 *ab, u8 bank,
497 u8 reg, u8 bitmask, u8 bitvalues)
498{
499 int err = 0;
500
501 if (likely(bitmask)) {
502 u8 reg_bits[2] = {reg, 0};
503
504 err = mutex_lock_interruptible(&ab->access_mutex);
505 if (err)
506 return err;
507
508 if (bitmask == 0xFF) /* No need to read in this case. */
509 reg_bits[1] = bitvalues;
510 else { /* Read and modify the register value. */
511 u8 bits;
512
513 err = ab3550_i2c_master_send(ab, bank, &reg, 1);
514 if (err)
515 goto unlock_and_return;
516 err = ab3550_i2c_master_recv(ab, bank, &bits, 1);
517 if (err)
518 goto unlock_and_return;
519 reg_bits[1] = ((~bitmask & bits) |
520 (bitmask & bitvalues));
521 }
522 /* Write the new value. */
523 err = ab3550_i2c_master_send(ab, bank, reg_bits, 2);
524unlock_and_return:
525 mutex_unlock(&ab->access_mutex);
526 }
527 return err;
528}
529
530/*
531 * Read/write permission checking functions.
532 */
533static bool page_write_allowed(const struct ab3550_reg_ranges *ranges,
534 u8 first_reg, u8 last_reg)
535{
536 u8 i;
537
538 if (last_reg < first_reg)
539 return false;
540
541 for (i = 0; i < ranges->count; i++) {
542 if (first_reg < ranges->range[i].first)
543 break;
544 if ((last_reg <= ranges->range[i].last) &&
545 (ranges->range[i].perm & AB3550_PERM_WR))
546 return true;
547 }
548 return false;
549}
550
551static bool reg_write_allowed(const struct ab3550_reg_ranges *ranges, u8 reg)
552{
553 return page_write_allowed(ranges, reg, reg);
554}
555
556static bool page_read_allowed(const struct ab3550_reg_ranges *ranges,
557 u8 first_reg, u8 last_reg)
558{
559 u8 i;
560
561 if (last_reg < first_reg)
562 return false;
563 /* Find the range (if it exists in the list) that includes first_reg. */
564 for (i = 0; i < ranges->count; i++) {
565 if (first_reg < ranges->range[i].first)
566 return false;
567 if (first_reg <= ranges->range[i].last)
568 break;
569 }
570 /* Make sure that the entire range up to and including last_reg is
571 * readable. This may span several of the ranges in the list.
572 */
573 while ((i < ranges->count) &&
574 (ranges->range[i].perm & AB3550_PERM_RD)) {
575 if (last_reg <= ranges->range[i].last)
576 return true;
577 if ((++i >= ranges->count) ||
578 (ranges->range[i].first !=
579 (ranges->range[i - 1].last + 1))) {
580 break;
581 }
582 }
583 return false;
584}
585
586static bool reg_read_allowed(const struct ab3550_reg_ranges *ranges, u8 reg)
587{
588 return page_read_allowed(ranges, reg, reg);
589}
590
591/*
592 * The exported register access functionality.
593 */
594int ab3550_get_chip_id(struct device *dev)
595{
596 struct ab3550 *ab = dev_get_drvdata(dev->parent);
597 return (int)ab->chip_id;
598}
599
600int ab3550_mask_and_set_register_interruptible(struct device *dev, u8 bank,
601 u8 reg, u8 bitmask, u8 bitvalues)
602{
603 struct ab3550 *ab;
604 struct platform_device *pdev = to_platform_device(dev);
605
606 if ((AB3550_NUM_BANKS <= bank) ||
607 !reg_write_allowed(&ab3550_reg_ranges[pdev->id][bank], reg))
608 return -EINVAL;
609
610 ab = dev_get_drvdata(dev->parent);
611 return mask_and_set_register_interruptible(ab, bank, reg,
612 bitmask, bitvalues);
613}
614
615int ab3550_set_register_interruptible(struct device *dev, u8 bank, u8 reg,
616 u8 value)
617{
618 return ab3550_mask_and_set_register_interruptible(dev, bank, reg, 0xFF,
619 value);
620}
621
622int ab3550_get_register_interruptible(struct device *dev, u8 bank, u8 reg,
623 u8 *value)
624{
625 struct ab3550 *ab;
626 struct platform_device *pdev = to_platform_device(dev);
627
628 if ((AB3550_NUM_BANKS <= bank) ||
629 !reg_read_allowed(&ab3550_reg_ranges[pdev->id][bank], reg))
630 return -EINVAL;
631
632 ab = dev_get_drvdata(dev->parent);
633 return get_register_interruptible(ab, bank, reg, value);
634}
635
636int ab3550_get_register_page_interruptible(struct device *dev, u8 bank,
637 u8 first_reg, u8 *regvals, u8 numregs)
638{
639 struct ab3550 *ab;
640 struct platform_device *pdev = to_platform_device(dev);
641
642 if ((AB3550_NUM_BANKS <= bank) ||
643 !page_read_allowed(&ab3550_reg_ranges[pdev->id][bank],
644 first_reg, (first_reg + numregs - 1)))
645 return -EINVAL;
646
647 ab = dev_get_drvdata(dev->parent);
648 return get_register_page_interruptible(ab, bank, first_reg, regvals,
649 numregs);
650}
651
652int ab3550_event_registers_startup_state_get(struct device *dev, u8 *event)
653{
654 struct ab3550 *ab;
655
656 ab = dev_get_drvdata(dev->parent);
657 if (!ab->startup_events_read)
658 return -EAGAIN; /* Try again later */
659
660 memcpy(event, ab->startup_events, AB3550_NUM_EVENT_REG);
661 return 0;
662}
663
664int ab3550_startup_irq_enabled(struct device *dev, unsigned int irq)
665{
666 struct ab3550 *ab;
667 struct ab3550_platform_data *plf_data;
668 bool val;
669
670 ab = get_irq_chip_data(irq);
671 plf_data = ab->i2c_client[0]->dev.platform_data;
672 irq -= plf_data->irq.base;
673 val = ((ab->startup_events[irq / 8] & BIT(irq % 8)) != 0);
674
675 return val;
676}
677
678static struct abx500_ops ab3550_ops = {
679 .get_chip_id = ab3550_get_chip_id,
680 .get_register = ab3550_get_register_interruptible,
681 .set_register = ab3550_set_register_interruptible,
682 .get_register_page = ab3550_get_register_page_interruptible,
683 .set_register_page = NULL,
684 .mask_and_set_register = ab3550_mask_and_set_register_interruptible,
685 .event_registers_startup_state_get =
686 ab3550_event_registers_startup_state_get,
687 .startup_irq_enabled = ab3550_startup_irq_enabled,
688};
689
690static irqreturn_t ab3550_irq_handler(int irq, void *data)
691{
692 struct ab3550 *ab = data;
693 int err;
694 unsigned int i;
695 u8 e[AB3550_NUM_EVENT_REG];
696 u8 *events;
697 unsigned long flags;
698
699 events = (ab->startup_events_read ? e : ab->startup_events);
700
701 err = get_register_page_interruptible(ab, AB3550_EVENT_BANK,
702 AB3550_EVENT_REG, events, AB3550_NUM_EVENT_REG);
703 if (err)
704 goto err_event_rd;
705
706 if (!ab->startup_events_read) {
707 dev_info(&ab->i2c_client[0]->dev,
708 "startup events 0x%x,0x%x,0x%x,0x%x,0x%x\n",
709 ab->startup_events[0], ab->startup_events[1],
710 ab->startup_events[2], ab->startup_events[3],
711 ab->startup_events[4]);
712 ab->startup_events_read = true;
713 goto out;
714 }
715
716 /* The two highest bits in event[4] are not used. */
717 events[4] &= 0x3f;
718
719 spin_lock_irqsave(&ab->event_lock, flags);
720 for (i = 0; i < AB3550_NUM_EVENT_REG; i++)
721 events[i] &= ~ab->event_mask[i];
722 spin_unlock_irqrestore(&ab->event_lock, flags);
723
724 for (i = 0; i < AB3550_NUM_EVENT_REG; i++) {
725 u8 bit;
726 u8 event_reg;
727
728 dev_dbg(&ab->i2c_client[0]->dev, "IRQ Event[%d]: 0x%2x\n",
729 i, events[i]);
730
731 event_reg = events[i];
732 for (bit = 0; event_reg; bit++, event_reg /= 2) {
733 if (event_reg % 2) {
734 unsigned int irq;
735 struct ab3550_platform_data *plf_data;
736
737 plf_data = ab->i2c_client[0]->dev.platform_data;
738 irq = plf_data->irq.base + (i * 8) + bit;
739 handle_nested_irq(irq);
740 }
741 }
742 }
743out:
744 return IRQ_HANDLED;
745
746err_event_rd:
747 dev_dbg(&ab->i2c_client[0]->dev, "error reading event registers\n");
748 return IRQ_HANDLED;
749}
750
751#ifdef CONFIG_DEBUG_FS
752static struct ab3550_reg_ranges debug_ranges[AB3550_NUM_BANKS] = {
753 {
754 .count = 6,
755 .range = (struct ab3550_reg_range[]) {
756 {
757 .first = 0x00,
758 .last = 0x0e,
759 },
760 {
761 .first = 0x10,
762 .last = 0x1a,
763 },
764 {
765 .first = 0x1e,
766 .last = 0x4f,
767 },
768 {
769 .first = 0x51,
770 .last = 0x63,
771 },
772 {
773 .first = 0x65,
774 .last = 0xa3,
775 },
776 {
777 .first = 0xa5,
778 .last = 0xa8,
779 },
780 }
781 },
782 {
783 .count = 8,
784 .range = (struct ab3550_reg_range[]) {
785 {
786 .first = 0x00,
787 .last = 0x0e,
788 },
789 {
790 .first = 0x10,
791 .last = 0x17,
792 },
793 {
794 .first = 0x1a,
795 .last = 0x1c,
796 },
797 {
798 .first = 0x20,
799 .last = 0x56,
800 },
801 {
802 .first = 0x5a,
803 .last = 0x88,
804 },
805 {
806 .first = 0x8a,
807 .last = 0xad,
808 },
809 {
810 .first = 0xb0,
811 .last = 0xba,
812 },
813 {
814 .first = 0xbc,
815 .last = 0xc3,
816 },
817 }
818 },
819};
820
821static int ab3550_registers_print(struct seq_file *s, void *p)
822{
823 struct ab3550 *ab = s->private;
824 int bank;
825
826 seq_printf(s, AB3550_NAME_STRING " register values:\n");
827
828 for (bank = 0; bank < AB3550_NUM_BANKS; bank++) {
829 unsigned int i;
830
831 seq_printf(s, " bank %d:\n", bank);
832 for (i = 0; i < debug_ranges[bank].count; i++) {
833 u8 reg;
834
835 for (reg = debug_ranges[bank].range[i].first;
836 reg <= debug_ranges[bank].range[i].last;
837 reg++) {
838 u8 value;
839
840 get_register_interruptible(ab, bank, reg,
841 &value);
842 seq_printf(s, " [%d/0x%02X]: 0x%02X\n", bank,
843 reg, value);
844 }
845 }
846 }
847 return 0;
848}
849
850static int ab3550_registers_open(struct inode *inode, struct file *file)
851{
852 return single_open(file, ab3550_registers_print, inode->i_private);
853}
854
855static const struct file_operations ab3550_registers_fops = {
856 .open = ab3550_registers_open,
857 .read = seq_read,
858 .llseek = seq_lseek,
859 .release = single_release,
860 .owner = THIS_MODULE,
861};
862
863static int ab3550_bank_print(struct seq_file *s, void *p)
864{
865 struct ab3550 *ab = s->private;
866
867 seq_printf(s, "%d\n", ab->debug_bank);
868 return 0;
869}
870
871static int ab3550_bank_open(struct inode *inode, struct file *file)
872{
873 return single_open(file, ab3550_bank_print, inode->i_private);
874}
875
876static ssize_t ab3550_bank_write(struct file *file,
877 const char __user *user_buf,
878 size_t count, loff_t *ppos)
879{
880 struct ab3550 *ab = ((struct seq_file *)(file->private_data))->private;
881 char buf[32];
882 int buf_size;
883 unsigned long user_bank;
884 int err;
885
886 /* Get userspace string and assure termination */
887 buf_size = min(count, (sizeof(buf) - 1));
888 if (copy_from_user(buf, user_buf, buf_size))
889 return -EFAULT;
890 buf[buf_size] = 0;
891
892 err = strict_strtoul(buf, 0, &user_bank);
893 if (err)
894 return -EINVAL;
895
896 if (user_bank >= AB3550_NUM_BANKS) {
897 dev_err(&ab->i2c_client[0]->dev,
898 "debugfs error input > number of banks\n");
899 return -EINVAL;
900 }
901
902 ab->debug_bank = user_bank;
903
904 return buf_size;
905}
906
907static int ab3550_address_print(struct seq_file *s, void *p)
908{
909 struct ab3550 *ab = s->private;
910
911 seq_printf(s, "0x%02X\n", ab->debug_address);
912 return 0;
913}
914
915static int ab3550_address_open(struct inode *inode, struct file *file)
916{
917 return single_open(file, ab3550_address_print, inode->i_private);
918}
919
920static ssize_t ab3550_address_write(struct file *file,
921 const char __user *user_buf,
922 size_t count, loff_t *ppos)
923{
924 struct ab3550 *ab = ((struct seq_file *)(file->private_data))->private;
925 char buf[32];
926 int buf_size;
927 unsigned long user_address;
928 int err;
929
930 /* Get userspace string and assure termination */
931 buf_size = min(count, (sizeof(buf) - 1));
932 if (copy_from_user(buf, user_buf, buf_size))
933 return -EFAULT;
934 buf[buf_size] = 0;
935
936 err = strict_strtoul(buf, 0, &user_address);
937 if (err)
938 return -EINVAL;
939 if (user_address > 0xff) {
940 dev_err(&ab->i2c_client[0]->dev,
941 "debugfs error input > 0xff\n");
942 return -EINVAL;
943 }
944 ab->debug_address = user_address;
945 return buf_size;
946}
947
948static int ab3550_val_print(struct seq_file *s, void *p)
949{
950 struct ab3550 *ab = s->private;
951 int err;
952 u8 regvalue;
953
954 err = get_register_interruptible(ab, (u8)ab->debug_bank,
955 (u8)ab->debug_address, &regvalue);
956 if (err)
957 return -EINVAL;
958 seq_printf(s, "0x%02X\n", regvalue);
959
960 return 0;
961}
962
963static int ab3550_val_open(struct inode *inode, struct file *file)
964{
965 return single_open(file, ab3550_val_print, inode->i_private);
966}
967
968static ssize_t ab3550_val_write(struct file *file,
969 const char __user *user_buf,
970 size_t count, loff_t *ppos)
971{
972 struct ab3550 *ab = ((struct seq_file *)(file->private_data))->private;
973 char buf[32];
974 int buf_size;
975 unsigned long user_val;
976 int err;
977 u8 regvalue;
978
979 /* Get userspace string and assure termination */
980 buf_size = min(count, (sizeof(buf)-1));
981 if (copy_from_user(buf, user_buf, buf_size))
982 return -EFAULT;
983 buf[buf_size] = 0;
984
985 err = strict_strtoul(buf, 0, &user_val);
986 if (err)
987 return -EINVAL;
988 if (user_val > 0xff) {
989 dev_err(&ab->i2c_client[0]->dev,
990 "debugfs error input > 0xff\n");
991 return -EINVAL;
992 }
993 err = mask_and_set_register_interruptible(
994 ab, (u8)ab->debug_bank,
995 (u8)ab->debug_address, 0xFF, (u8)user_val);
996 if (err)
997 return -EINVAL;
998
999 get_register_interruptible(ab, (u8)ab->debug_bank,
1000 (u8)ab->debug_address, &regvalue);
1001 if (err)
1002 return -EINVAL;
1003
1004 return buf_size;
1005}
1006
1007static const struct file_operations ab3550_bank_fops = {
1008 .open = ab3550_bank_open,
1009 .write = ab3550_bank_write,
1010 .read = seq_read,
1011 .llseek = seq_lseek,
1012 .release = single_release,
1013 .owner = THIS_MODULE,
1014};
1015
1016static const struct file_operations ab3550_address_fops = {
1017 .open = ab3550_address_open,
1018 .write = ab3550_address_write,
1019 .read = seq_read,
1020 .llseek = seq_lseek,
1021 .release = single_release,
1022 .owner = THIS_MODULE,
1023};
1024
1025static const struct file_operations ab3550_val_fops = {
1026 .open = ab3550_val_open,
1027 .write = ab3550_val_write,
1028 .read = seq_read,
1029 .llseek = seq_lseek,
1030 .release = single_release,
1031 .owner = THIS_MODULE,
1032};
1033
1034static struct dentry *ab3550_dir;
1035static struct dentry *ab3550_reg_file;
1036static struct dentry *ab3550_bank_file;
1037static struct dentry *ab3550_address_file;
1038static struct dentry *ab3550_val_file;
1039
1040static inline void ab3550_setup_debugfs(struct ab3550 *ab)
1041{
1042 ab->debug_bank = 0;
1043 ab->debug_address = 0x00;
1044
1045 ab3550_dir = debugfs_create_dir(AB3550_NAME_STRING, NULL);
1046 if (!ab3550_dir)
1047 goto exit_no_debugfs;
1048
1049 ab3550_reg_file = debugfs_create_file("all-registers",
1050 S_IRUGO, ab3550_dir, ab, &ab3550_registers_fops);
1051 if (!ab3550_reg_file)
1052 goto exit_destroy_dir;
1053
1054 ab3550_bank_file = debugfs_create_file("register-bank",
1055 (S_IRUGO | S_IWUGO), ab3550_dir, ab, &ab3550_bank_fops);
1056 if (!ab3550_bank_file)
1057 goto exit_destroy_reg;
1058
1059 ab3550_address_file = debugfs_create_file("register-address",
1060 (S_IRUGO | S_IWUGO), ab3550_dir, ab, &ab3550_address_fops);
1061 if (!ab3550_address_file)
1062 goto exit_destroy_bank;
1063
1064 ab3550_val_file = debugfs_create_file("register-value",
1065 (S_IRUGO | S_IWUGO), ab3550_dir, ab, &ab3550_val_fops);
1066 if (!ab3550_val_file)
1067 goto exit_destroy_address;
1068
1069 return;
1070
1071exit_destroy_address:
1072 debugfs_remove(ab3550_address_file);
1073exit_destroy_bank:
1074 debugfs_remove(ab3550_bank_file);
1075exit_destroy_reg:
1076 debugfs_remove(ab3550_reg_file);
1077exit_destroy_dir:
1078 debugfs_remove(ab3550_dir);
1079exit_no_debugfs:
1080 dev_err(&ab->i2c_client[0]->dev, "failed to create debugfs entries.\n");
1081 return;
1082}
1083
1084static inline void ab3550_remove_debugfs(void)
1085{
1086 debugfs_remove(ab3550_val_file);
1087 debugfs_remove(ab3550_address_file);
1088 debugfs_remove(ab3550_bank_file);
1089 debugfs_remove(ab3550_reg_file);
1090 debugfs_remove(ab3550_dir);
1091}
1092
1093#else /* !CONFIG_DEBUG_FS */
1094static inline void ab3550_setup_debugfs(struct ab3550 *ab)
1095{
1096}
1097static inline void ab3550_remove_debugfs(void)
1098{
1099}
1100#endif
1101
1102/*
1103 * Basic set-up, datastructure creation/destruction and I2C interface.
1104 * This sets up a default config in the AB3550 chip so that it
1105 * will work as expected.
1106 */
1107static int __init ab3550_setup(struct ab3550 *ab)
1108{
1109 int err = 0;
1110 int i;
1111 struct ab3550_platform_data *plf_data;
1112 struct abx500_init_settings *settings;
1113
1114 plf_data = ab->i2c_client[0]->dev.platform_data;
1115 settings = plf_data->init_settings;
1116
1117 for (i = 0; i < plf_data->init_settings_sz; i++) {
1118 err = mask_and_set_register_interruptible(ab,
1119 settings[i].bank,
1120 settings[i].reg,
1121 0xFF, settings[i].setting);
1122 if (err)
1123 goto exit_no_setup;
1124
1125 /* If event mask register update the event mask in ab3550 */
1126 if ((settings[i].bank == 0) &&
1127 (AB3550_IMR1 <= settings[i].reg) &&
1128 (settings[i].reg <= AB3550_IMR5)) {
1129 ab->event_mask[settings[i].reg - AB3550_IMR1] =
1130 settings[i].setting;
1131 }
1132 }
1133exit_no_setup:
1134 return err;
1135}
1136
1137static void ab3550_mask_work(struct work_struct *work)
1138{
1139 struct ab3550 *ab = container_of(work, struct ab3550, mask_work);
1140 int i;
1141 unsigned long flags;
1142 u8 mask[AB3550_NUM_EVENT_REG];
1143
1144 spin_lock_irqsave(&ab->event_lock, flags);
1145 for (i = 0; i < AB3550_NUM_EVENT_REG; i++)
1146 mask[i] = ab->event_mask[i];
1147 spin_unlock_irqrestore(&ab->event_lock, flags);
1148
1149 for (i = 0; i < AB3550_NUM_EVENT_REG; i++) {
1150 int err;
1151
1152 err = mask_and_set_register_interruptible(ab, 0,
1153 (AB3550_IMR1 + i), ~0, mask[i]);
1154 if (err)
1155 dev_err(&ab->i2c_client[0]->dev,
1156 "ab3550_mask_work failed 0x%x,0x%x\n",
1157 (AB3550_IMR1 + i), mask[i]);
1158 }
1159}
1160
1161static void ab3550_mask(unsigned int irq)
1162{
1163 unsigned long flags;
1164 struct ab3550 *ab;
1165 struct ab3550_platform_data *plf_data;
1166
1167 ab = get_irq_chip_data(irq);
1168 plf_data = ab->i2c_client[0]->dev.platform_data;
1169 irq -= plf_data->irq.base;
1170
1171 spin_lock_irqsave(&ab->event_lock, flags);
1172 ab->event_mask[irq / 8] |= BIT(irq % 8);
1173 spin_unlock_irqrestore(&ab->event_lock, flags);
1174
1175 schedule_work(&ab->mask_work);
1176}
1177
1178static void ab3550_unmask(unsigned int irq)
1179{
1180 unsigned long flags;
1181 struct ab3550 *ab;
1182 struct ab3550_platform_data *plf_data;
1183
1184 ab = get_irq_chip_data(irq);
1185 plf_data = ab->i2c_client[0]->dev.platform_data;
1186 irq -= plf_data->irq.base;
1187
1188 spin_lock_irqsave(&ab->event_lock, flags);
1189 ab->event_mask[irq / 8] &= ~BIT(irq % 8);
1190 spin_unlock_irqrestore(&ab->event_lock, flags);
1191
1192 schedule_work(&ab->mask_work);
1193}
1194
1195static void noop(unsigned int irq)
1196{
1197}
1198
1199static struct irq_chip ab3550_irq_chip = {
1200 .name = "ab3550-core", /* Keep the same name as the request */
1201 .startup = NULL, /* defaults to enable */
1202 .shutdown = NULL, /* defaults to disable */
1203 .enable = NULL, /* defaults to unmask */
1204 .disable = ab3550_mask, /* No default to mask in chip.c */
1205 .ack = noop,
1206 .mask = ab3550_mask,
1207 .unmask = ab3550_unmask,
1208 .end = NULL,
1209};
1210
1211struct ab_family_id {
1212 u8 id;
1213 char *name;
1214};
1215
1216static const struct ab_family_id ids[] __initdata = {
1217 /* AB3550 */
1218 {
1219 .id = AB3550_P1A,
1220 .name = "P1A"
1221 },
1222 /* Terminator */
1223 {
1224 .id = 0x00,
1225 }
1226};
1227
1228static int __init ab3550_probe(struct i2c_client *client,
1229 const struct i2c_device_id *id)
1230{
1231 struct ab3550 *ab;
1232 struct ab3550_platform_data *ab3550_plf_data =
1233 client->dev.platform_data;
1234 int err;
1235 int i;
1236 int num_i2c_clients = 0;
1237
1238 ab = kzalloc(sizeof(struct ab3550), GFP_KERNEL);
1239 if (!ab) {
1240 dev_err(&client->dev,
1241 "could not allocate " AB3550_NAME_STRING " device\n");
1242 return -ENOMEM;
1243 }
1244
1245 /* Initialize data structure */
1246 mutex_init(&ab->access_mutex);
1247 spin_lock_init(&ab->event_lock);
1248 ab->i2c_client[0] = client;
1249
1250 i2c_set_clientdata(client, ab);
1251
1252 /* Read chip ID register */
1253 err = get_register_interruptible(ab, 0, AB3550_CID_REG, &ab->chip_id);
1254 if (err) {
1255 dev_err(&client->dev, "could not communicate with the analog "
1256 "baseband chip\n");
1257 goto exit_no_detect;
1258 }
1259
1260 for (i = 0; ids[i].id != 0x0; i++) {
1261 if (ids[i].id == ab->chip_id) {
1262 snprintf(&ab->chip_name[0], sizeof(ab->chip_name) - 1,
1263 AB3550_ID_FORMAT_STRING, ids[i].name);
1264 break;
1265 }
1266 }
1267
1268 if (ids[i].id == 0x0) {
1269 dev_err(&client->dev, "unknown analog baseband chip id: 0x%x\n",
1270 ab->chip_id);
1271 dev_err(&client->dev, "driver not started!\n");
1272 goto exit_no_detect;
1273 }
1274
1275 dev_info(&client->dev, "detected AB chip: %s\n", &ab->chip_name[0]);
1276
1277 /* Attach other dummy I2C clients. */
1278 while (++num_i2c_clients < AB3550_NUM_BANKS) {
1279 ab->i2c_client[num_i2c_clients] =
1280 i2c_new_dummy(client->adapter,
1281 (client->addr + num_i2c_clients));
1282 if (!ab->i2c_client[num_i2c_clients]) {
1283 err = -ENOMEM;
1284 goto exit_no_dummy_client;
1285 }
1286 strlcpy(ab->i2c_client[num_i2c_clients]->name, id->name,
1287 sizeof(ab->i2c_client[num_i2c_clients]->name));
1288 }
1289
1290 err = ab3550_setup(ab);
1291 if (err)
1292 goto exit_no_setup;
1293
1294 INIT_WORK(&ab->mask_work, ab3550_mask_work);
1295
1296 for (i = 0; i < ab3550_plf_data->irq.count; i++) {
1297 unsigned int irq;
1298
1299 irq = ab3550_plf_data->irq.base + i;
1300 set_irq_chip_data(irq, ab);
1301 set_irq_chip_and_handler(irq, &ab3550_irq_chip,
1302 handle_simple_irq);
1303 set_irq_nested_thread(irq, 1);
1304#ifdef CONFIG_ARM
1305 set_irq_flags(irq, IRQF_VALID);
1306#else
1307 set_irq_noprobe(irq);
1308#endif
1309 }
1310
1311 err = request_threaded_irq(client->irq, NULL, ab3550_irq_handler,
1312 IRQF_ONESHOT, "ab3550-core", ab);
1313 /* This real unpredictable IRQ is of course sampled for entropy */
1314 rand_initialize_irq(client->irq);
1315
1316 if (err)
1317 goto exit_no_irq;
1318
1319 err = abx500_register_ops(&client->dev, &ab3550_ops);
1320 if (err)
1321 goto exit_no_ops;
1322
1323 /* Set up and register the platform devices. */
1324 for (i = 0; i < AB3550_NUM_DEVICES; i++) {
1325 ab3550_devs[i].platform_data = ab3550_plf_data->dev_data[i];
1326 ab3550_devs[i].data_size = ab3550_plf_data->dev_data_sz[i];
1327 }
1328
1329 err = mfd_add_devices(&client->dev, 0, ab3550_devs,
1330 ARRAY_SIZE(ab3550_devs), NULL,
1331 ab3550_plf_data->irq.base);
1332
1333 ab3550_setup_debugfs(ab);
1334
1335 return 0;
1336
1337exit_no_ops:
1338exit_no_irq:
1339exit_no_setup:
1340exit_no_dummy_client:
1341 /* Unregister the dummy i2c clients. */
1342 while (--num_i2c_clients)
1343 i2c_unregister_device(ab->i2c_client[num_i2c_clients]);
1344exit_no_detect:
1345 kfree(ab);
1346 return err;
1347}
1348
1349static int __exit ab3550_remove(struct i2c_client *client)
1350{
1351 struct ab3550 *ab = i2c_get_clientdata(client);
1352 int num_i2c_clients = AB3550_NUM_BANKS;
1353
1354 mfd_remove_devices(&client->dev);
1355 ab3550_remove_debugfs();
1356
1357 while (--num_i2c_clients)
1358 i2c_unregister_device(ab->i2c_client[num_i2c_clients]);
1359
1360 /*
1361 * At this point, all subscribers should have unregistered
1362 * their notifiers so deactivate IRQ
1363 */
1364 free_irq(client->irq, ab);
1365 i2c_set_clientdata(client, NULL);
1366 kfree(ab);
1367 return 0;
1368}
1369
1370static const struct i2c_device_id ab3550_id[] = {
1371 {AB3550_NAME_STRING, 0},
1372 {}
1373};
1374MODULE_DEVICE_TABLE(i2c, ab3550_id);
1375
1376static struct i2c_driver ab3550_driver = {
1377 .driver = {
1378 .name = AB3550_NAME_STRING,
1379 .owner = THIS_MODULE,
1380 },
1381 .id_table = ab3550_id,
1382 .probe = ab3550_probe,
1383 .remove = __exit_p(ab3550_remove),
1384};
1385
1386static int __init ab3550_i2c_init(void)
1387{
1388 return i2c_add_driver(&ab3550_driver);
1389}
1390
1391static void __exit ab3550_i2c_exit(void)
1392{
1393 i2c_del_driver(&ab3550_driver);
1394}
1395
1396subsys_initcall(ab3550_i2c_init);
1397module_exit(ab3550_i2c_exit);
1398
1399MODULE_AUTHOR("Mattias Wallin <mattias.wallin@stericsson.com>");
1400MODULE_DESCRIPTION("AB3550 core driver");
1401MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/ab4500-core.c b/drivers/mfd/ab4500-core.c
deleted file mode 100644
index c275daa3ab1a..000000000000
--- a/drivers/mfd/ab4500-core.c
+++ /dev/null
@@ -1,209 +0,0 @@
1/*
2 * Copyright (C) 2009 ST-Ericsson
3 *
4 * Author: Srinidhi KASAGAR <srinidhi.kasagar@stericsson.com>
5 *
6 * This program is free software; you can redistribute it
7 * and/or modify it under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation.
9 *
10 * AB4500 is a companion power management chip used with U8500.
11 * On this platform, this is interfaced with SSP0 controller
12 * which is a ARM primecell pl022.
13 *
14 * At the moment the module just exports read/write features.
15 * Interrupt management to be added - TODO.
16 */
17#include <linux/kernel.h>
18#include <linux/slab.h>
19#include <linux/init.h>
20#include <linux/module.h>
21#include <linux/platform_device.h>
22#include <linux/spi/spi.h>
23#include <linux/mfd/ab4500.h>
24
25/* just required if probe fails, we need to
26 * unregister the device
27 */
28static struct spi_driver ab4500_driver;
29
30/*
31 * This funtion writes to any AB4500 registers using
32 * SPI protocol & before it writes it packs the data
33 * in the below 24 bit frame format
34 *
35 * *|------------------------------------|
36 * *| 23|22...18|17.......10|9|8|7......0|
37 * *| r/w bank adr data |
38 * * ------------------------------------
39 *
40 * This function shouldn't be called from interrupt
41 * context
42 */
43int ab4500_write(struct ab4500 *ab4500, unsigned char block,
44 unsigned long addr, unsigned char data)
45{
46 struct spi_transfer xfer;
47 struct spi_message msg;
48 int err;
49 unsigned long spi_data =
50 block << 18 | addr << 10 | data;
51
52 mutex_lock(&ab4500->lock);
53 ab4500->tx_buf[0] = spi_data;
54 ab4500->rx_buf[0] = 0;
55
56 xfer.tx_buf = ab4500->tx_buf;
57 xfer.rx_buf = NULL;
58 xfer.len = sizeof(unsigned long);
59
60 spi_message_init(&msg);
61 spi_message_add_tail(&xfer, &msg);
62
63 err = spi_sync(ab4500->spi, &msg);
64 mutex_unlock(&ab4500->lock);
65
66 return err;
67}
68EXPORT_SYMBOL(ab4500_write);
69
70int ab4500_read(struct ab4500 *ab4500, unsigned char block,
71 unsigned long addr)
72{
73 struct spi_transfer xfer;
74 struct spi_message msg;
75 unsigned long spi_data =
76 1 << 23 | block << 18 | addr << 10;
77
78 mutex_lock(&ab4500->lock);
79 ab4500->tx_buf[0] = spi_data;
80 ab4500->rx_buf[0] = 0;
81
82 xfer.tx_buf = ab4500->tx_buf;
83 xfer.rx_buf = ab4500->rx_buf;
84 xfer.len = sizeof(unsigned long);
85
86 spi_message_init(&msg);
87 spi_message_add_tail(&xfer, &msg);
88
89 spi_sync(ab4500->spi, &msg);
90 mutex_unlock(&ab4500->lock);
91
92 return ab4500->rx_buf[0];
93}
94EXPORT_SYMBOL(ab4500_read);
95
96/* ref: ab3100 core */
97#define AB4500_DEVICE(devname, devid) \
98static struct platform_device ab4500_##devname##_device = { \
99 .name = devid, \
100 .id = -1, \
101}
102
103/* list of childern devices of ab4500 - all are
104 * not populated here - TODO
105 */
106AB4500_DEVICE(charger, "ab4500-charger");
107AB4500_DEVICE(audio, "ab4500-audio");
108AB4500_DEVICE(usb, "ab4500-usb");
109AB4500_DEVICE(tvout, "ab4500-tvout");
110AB4500_DEVICE(sim, "ab4500-sim");
111AB4500_DEVICE(gpadc, "ab4500-gpadc");
112AB4500_DEVICE(clkmgt, "ab4500-clkmgt");
113AB4500_DEVICE(misc, "ab4500-misc");
114
115static struct platform_device *ab4500_platform_devs[] = {
116 &ab4500_charger_device,
117 &ab4500_audio_device,
118 &ab4500_usb_device,
119 &ab4500_tvout_device,
120 &ab4500_sim_device,
121 &ab4500_gpadc_device,
122 &ab4500_clkmgt_device,
123 &ab4500_misc_device,
124};
125
126static int __init ab4500_probe(struct spi_device *spi)
127{
128 struct ab4500 *ab4500;
129 unsigned char revision;
130 int err = 0;
131 int i;
132
133 ab4500 = kzalloc(sizeof *ab4500, GFP_KERNEL);
134 if (!ab4500) {
135 dev_err(&spi->dev, "could not allocate AB4500\n");
136 err = -ENOMEM;
137 goto not_detect;
138 }
139
140 ab4500->spi = spi;
141 spi_set_drvdata(spi, ab4500);
142
143 mutex_init(&ab4500->lock);
144
145 /* read the revision register */
146 revision = ab4500_read(ab4500, AB4500_MISC, AB4500_REV_REG);
147
148 /* revision id 0x0 is for early drop, 0x10 is for cut1.0 */
149 if (revision == 0x0 || revision == 0x10)
150 dev_info(&spi->dev, "Detected chip: %s, revision = %x\n",
151 ab4500_driver.driver.name, revision);
152 else {
153 dev_err(&spi->dev, "unknown chip: 0x%x\n", revision);
154 goto not_detect;
155 }
156
157 for (i = 0; i < ARRAY_SIZE(ab4500_platform_devs); i++) {
158 ab4500_platform_devs[i]->dev.parent =
159 &spi->dev;
160 platform_set_drvdata(ab4500_platform_devs[i], ab4500);
161 }
162
163 /* register the ab4500 platform devices */
164 platform_add_devices(ab4500_platform_devs,
165 ARRAY_SIZE(ab4500_platform_devs));
166
167 return err;
168
169 not_detect:
170 spi_unregister_driver(&ab4500_driver);
171 kfree(ab4500);
172 return err;
173}
174
175static int __devexit ab4500_remove(struct spi_device *spi)
176{
177 struct ab4500 *ab4500 =
178 spi_get_drvdata(spi);
179
180 kfree(ab4500);
181
182 return 0;
183}
184
185static struct spi_driver ab4500_driver = {
186 .driver = {
187 .name = "ab4500",
188 .owner = THIS_MODULE,
189 },
190 .probe = ab4500_probe,
191 .remove = __devexit_p(ab4500_remove)
192};
193
194static int __devinit ab4500_init(void)
195{
196 return spi_register_driver(&ab4500_driver);
197}
198
199static void __exit ab4500_exit(void)
200{
201 spi_unregister_driver(&ab4500_driver);
202}
203
204subsys_initcall(ab4500_init);
205module_exit(ab4500_exit);
206
207MODULE_AUTHOR("Srinidhi KASAGAR <srinidhi.kasagar@stericsson.com");
208MODULE_DESCRIPTION("AB4500 core driver");
209MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/ab8500-core.c b/drivers/mfd/ab8500-core.c
new file mode 100644
index 000000000000..f3d26fa9c34d
--- /dev/null
+++ b/drivers/mfd/ab8500-core.c
@@ -0,0 +1,444 @@
1/*
2 * Copyright (C) ST-Ericsson SA 2010
3 *
4 * License Terms: GNU General Public License v2
5 * Author: Srinidhi Kasagar <srinidhi.kasagar@stericsson.com>
6 * Author: Rabin Vincent <rabin.vincent@stericsson.com>
7 */
8
9#include <linux/kernel.h>
10#include <linux/slab.h>
11#include <linux/init.h>
12#include <linux/irq.h>
13#include <linux/delay.h>
14#include <linux/interrupt.h>
15#include <linux/module.h>
16#include <linux/platform_device.h>
17#include <linux/mfd/core.h>
18#include <linux/mfd/ab8500.h>
19
20/*
21 * Interrupt register offsets
22 * Bank : 0x0E
23 */
24#define AB8500_IT_SOURCE1_REG 0x0E00
25#define AB8500_IT_SOURCE2_REG 0x0E01
26#define AB8500_IT_SOURCE3_REG 0x0E02
27#define AB8500_IT_SOURCE4_REG 0x0E03
28#define AB8500_IT_SOURCE5_REG 0x0E04
29#define AB8500_IT_SOURCE6_REG 0x0E05
30#define AB8500_IT_SOURCE7_REG 0x0E06
31#define AB8500_IT_SOURCE8_REG 0x0E07
32#define AB8500_IT_SOURCE19_REG 0x0E12
33#define AB8500_IT_SOURCE20_REG 0x0E13
34#define AB8500_IT_SOURCE21_REG 0x0E14
35#define AB8500_IT_SOURCE22_REG 0x0E15
36#define AB8500_IT_SOURCE23_REG 0x0E16
37#define AB8500_IT_SOURCE24_REG 0x0E17
38
39/*
40 * latch registers
41 */
42#define AB8500_IT_LATCH1_REG 0x0E20
43#define AB8500_IT_LATCH2_REG 0x0E21
44#define AB8500_IT_LATCH3_REG 0x0E22
45#define AB8500_IT_LATCH4_REG 0x0E23
46#define AB8500_IT_LATCH5_REG 0x0E24
47#define AB8500_IT_LATCH6_REG 0x0E25
48#define AB8500_IT_LATCH7_REG 0x0E26
49#define AB8500_IT_LATCH8_REG 0x0E27
50#define AB8500_IT_LATCH9_REG 0x0E28
51#define AB8500_IT_LATCH10_REG 0x0E29
52#define AB8500_IT_LATCH19_REG 0x0E32
53#define AB8500_IT_LATCH20_REG 0x0E33
54#define AB8500_IT_LATCH21_REG 0x0E34
55#define AB8500_IT_LATCH22_REG 0x0E35
56#define AB8500_IT_LATCH23_REG 0x0E36
57#define AB8500_IT_LATCH24_REG 0x0E37
58
59/*
60 * mask registers
61 */
62
63#define AB8500_IT_MASK1_REG 0x0E40
64#define AB8500_IT_MASK2_REG 0x0E41
65#define AB8500_IT_MASK3_REG 0x0E42
66#define AB8500_IT_MASK4_REG 0x0E43
67#define AB8500_IT_MASK5_REG 0x0E44
68#define AB8500_IT_MASK6_REG 0x0E45
69#define AB8500_IT_MASK7_REG 0x0E46
70#define AB8500_IT_MASK8_REG 0x0E47
71#define AB8500_IT_MASK9_REG 0x0E48
72#define AB8500_IT_MASK10_REG 0x0E49
73#define AB8500_IT_MASK11_REG 0x0E4A
74#define AB8500_IT_MASK12_REG 0x0E4B
75#define AB8500_IT_MASK13_REG 0x0E4C
76#define AB8500_IT_MASK14_REG 0x0E4D
77#define AB8500_IT_MASK15_REG 0x0E4E
78#define AB8500_IT_MASK16_REG 0x0E4F
79#define AB8500_IT_MASK17_REG 0x0E50
80#define AB8500_IT_MASK18_REG 0x0E51
81#define AB8500_IT_MASK19_REG 0x0E52
82#define AB8500_IT_MASK20_REG 0x0E53
83#define AB8500_IT_MASK21_REG 0x0E54
84#define AB8500_IT_MASK22_REG 0x0E55
85#define AB8500_IT_MASK23_REG 0x0E56
86#define AB8500_IT_MASK24_REG 0x0E57
87
88#define AB8500_REV_REG 0x1080
89
90/*
91 * Map interrupt numbers to the LATCH and MASK register offsets, Interrupt
92 * numbers are indexed into this array with (num / 8).
93 *
94 * This is one off from the register names, i.e. AB8500_IT_MASK1_REG is at
95 * offset 0.
96 */
97static const int ab8500_irq_regoffset[AB8500_NUM_IRQ_REGS] = {
98 0, 1, 2, 3, 4, 6, 7, 8, 9, 18, 19, 20, 21,
99};
100
101static int __ab8500_write(struct ab8500 *ab8500, u16 addr, u8 data)
102{
103 int ret;
104
105 dev_vdbg(ab8500->dev, "wr: addr %#x <= %#x\n", addr, data);
106
107 ret = ab8500->write(ab8500, addr, data);
108 if (ret < 0)
109 dev_err(ab8500->dev, "failed to write reg %#x: %d\n",
110 addr, ret);
111
112 return ret;
113}
114
115/**
116 * ab8500_write() - write an AB8500 register
117 * @ab8500: device to write to
118 * @addr: address of the register
119 * @data: value to write
120 */
121int ab8500_write(struct ab8500 *ab8500, u16 addr, u8 data)
122{
123 int ret;
124
125 mutex_lock(&ab8500->lock);
126 ret = __ab8500_write(ab8500, addr, data);
127 mutex_unlock(&ab8500->lock);
128
129 return ret;
130}
131EXPORT_SYMBOL_GPL(ab8500_write);
132
133static int __ab8500_read(struct ab8500 *ab8500, u16 addr)
134{
135 int ret;
136
137 ret = ab8500->read(ab8500, addr);
138 if (ret < 0)
139 dev_err(ab8500->dev, "failed to read reg %#x: %d\n",
140 addr, ret);
141
142 dev_vdbg(ab8500->dev, "rd: addr %#x => data %#x\n", addr, ret);
143
144 return ret;
145}
146
147/**
148 * ab8500_read() - read an AB8500 register
149 * @ab8500: device to read from
150 * @addr: address of the register
151 */
152int ab8500_read(struct ab8500 *ab8500, u16 addr)
153{
154 int ret;
155
156 mutex_lock(&ab8500->lock);
157 ret = __ab8500_read(ab8500, addr);
158 mutex_unlock(&ab8500->lock);
159
160 return ret;
161}
162EXPORT_SYMBOL_GPL(ab8500_read);
163
164/**
165 * ab8500_set_bits() - set a bitfield in an AB8500 register
166 * @ab8500: device to read from
167 * @addr: address of the register
168 * @mask: mask of the bitfield to modify
169 * @data: value to set to the bitfield
170 */
171int ab8500_set_bits(struct ab8500 *ab8500, u16 addr, u8 mask, u8 data)
172{
173 int ret;
174
175 mutex_lock(&ab8500->lock);
176
177 ret = __ab8500_read(ab8500, addr);
178 if (ret < 0)
179 goto out;
180
181 ret &= ~mask;
182 ret |= data;
183
184 ret = __ab8500_write(ab8500, addr, ret);
185
186out:
187 mutex_unlock(&ab8500->lock);
188 return ret;
189}
190EXPORT_SYMBOL_GPL(ab8500_set_bits);
191
192static void ab8500_irq_lock(unsigned int irq)
193{
194 struct ab8500 *ab8500 = get_irq_chip_data(irq);
195
196 mutex_lock(&ab8500->irq_lock);
197}
198
199static void ab8500_irq_sync_unlock(unsigned int irq)
200{
201 struct ab8500 *ab8500 = get_irq_chip_data(irq);
202 int i;
203
204 for (i = 0; i < AB8500_NUM_IRQ_REGS; i++) {
205 u8 old = ab8500->oldmask[i];
206 u8 new = ab8500->mask[i];
207 int reg;
208
209 if (new == old)
210 continue;
211
212 ab8500->oldmask[i] = new;
213
214 reg = AB8500_IT_MASK1_REG + ab8500_irq_regoffset[i];
215 ab8500_write(ab8500, reg, new);
216 }
217
218 mutex_unlock(&ab8500->irq_lock);
219}
220
221static void ab8500_irq_mask(unsigned int irq)
222{
223 struct ab8500 *ab8500 = get_irq_chip_data(irq);
224 int offset = irq - ab8500->irq_base;
225 int index = offset / 8;
226 int mask = 1 << (offset % 8);
227
228 ab8500->mask[index] |= mask;
229}
230
231static void ab8500_irq_unmask(unsigned int irq)
232{
233 struct ab8500 *ab8500 = get_irq_chip_data(irq);
234 int offset = irq - ab8500->irq_base;
235 int index = offset / 8;
236 int mask = 1 << (offset % 8);
237
238 ab8500->mask[index] &= ~mask;
239}
240
241static struct irq_chip ab8500_irq_chip = {
242 .name = "ab8500",
243 .bus_lock = ab8500_irq_lock,
244 .bus_sync_unlock = ab8500_irq_sync_unlock,
245 .mask = ab8500_irq_mask,
246 .unmask = ab8500_irq_unmask,
247};
248
249static irqreturn_t ab8500_irq(int irq, void *dev)
250{
251 struct ab8500 *ab8500 = dev;
252 int i;
253
254 dev_vdbg(ab8500->dev, "interrupt\n");
255
256 for (i = 0; i < AB8500_NUM_IRQ_REGS; i++) {
257 int regoffset = ab8500_irq_regoffset[i];
258 int status;
259
260 status = ab8500_read(ab8500, AB8500_IT_LATCH1_REG + regoffset);
261 if (status <= 0)
262 continue;
263
264 do {
265 int bit = __ffs(status);
266 int line = i * 8 + bit;
267
268 handle_nested_irq(ab8500->irq_base + line);
269 status &= ~(1 << bit);
270 } while (status);
271 }
272
273 return IRQ_HANDLED;
274}
275
276static int ab8500_irq_init(struct ab8500 *ab8500)
277{
278 int base = ab8500->irq_base;
279 int irq;
280
281 for (irq = base; irq < base + AB8500_NR_IRQS; irq++) {
282 set_irq_chip_data(irq, ab8500);
283 set_irq_chip_and_handler(irq, &ab8500_irq_chip,
284 handle_simple_irq);
285 set_irq_nested_thread(irq, 1);
286#ifdef CONFIG_ARM
287 set_irq_flags(irq, IRQF_VALID);
288#else
289 set_irq_noprobe(irq);
290#endif
291 }
292
293 return 0;
294}
295
296static void ab8500_irq_remove(struct ab8500 *ab8500)
297{
298 int base = ab8500->irq_base;
299 int irq;
300
301 for (irq = base; irq < base + AB8500_NR_IRQS; irq++) {
302#ifdef CONFIG_ARM
303 set_irq_flags(irq, 0);
304#endif
305 set_irq_chip_and_handler(irq, NULL, NULL);
306 set_irq_chip_data(irq, NULL);
307 }
308}
309
310static struct resource ab8500_gpadc_resources[] = {
311 {
312 .name = "HW_CONV_END",
313 .start = AB8500_INT_GP_HW_ADC_CONV_END,
314 .end = AB8500_INT_GP_HW_ADC_CONV_END,
315 .flags = IORESOURCE_IRQ,
316 },
317 {
318 .name = "SW_CONV_END",
319 .start = AB8500_INT_GP_SW_ADC_CONV_END,
320 .end = AB8500_INT_GP_SW_ADC_CONV_END,
321 .flags = IORESOURCE_IRQ,
322 },
323};
324
325static struct resource ab8500_rtc_resources[] = {
326 {
327 .name = "60S",
328 .start = AB8500_INT_RTC_60S,
329 .end = AB8500_INT_RTC_60S,
330 .flags = IORESOURCE_IRQ,
331 },
332 {
333 .name = "ALARM",
334 .start = AB8500_INT_RTC_ALARM,
335 .end = AB8500_INT_RTC_ALARM,
336 .flags = IORESOURCE_IRQ,
337 },
338};
339
340static struct mfd_cell ab8500_devs[] = {
341 {
342 .name = "ab8500-gpadc",
343 .num_resources = ARRAY_SIZE(ab8500_gpadc_resources),
344 .resources = ab8500_gpadc_resources,
345 },
346 {
347 .name = "ab8500-rtc",
348 .num_resources = ARRAY_SIZE(ab8500_rtc_resources),
349 .resources = ab8500_rtc_resources,
350 },
351 { .name = "ab8500-charger", },
352 { .name = "ab8500-audio", },
353 { .name = "ab8500-usb", },
354 { .name = "ab8500-pwm", },
355};
356
357int __devinit ab8500_init(struct ab8500 *ab8500)
358{
359 struct ab8500_platform_data *plat = dev_get_platdata(ab8500->dev);
360 int ret;
361 int i;
362
363 if (plat)
364 ab8500->irq_base = plat->irq_base;
365
366 mutex_init(&ab8500->lock);
367 mutex_init(&ab8500->irq_lock);
368
369 ret = ab8500_read(ab8500, AB8500_REV_REG);
370 if (ret < 0)
371 return ret;
372
373 /*
374 * 0x0 - Early Drop
375 * 0x10 - Cut 1.0
376 * 0x11 - Cut 1.1
377 */
378 if (ret == 0x0 || ret == 0x10 || ret == 0x11) {
379 ab8500->revision = ret;
380 dev_info(ab8500->dev, "detected chip, revision: %#x\n", ret);
381 } else {
382 dev_err(ab8500->dev, "unknown chip, revision: %#x\n", ret);
383 return -EINVAL;
384 }
385
386 if (plat && plat->init)
387 plat->init(ab8500);
388
389 /* Clear and mask all interrupts */
390 for (i = 0; i < 10; i++) {
391 ab8500_read(ab8500, AB8500_IT_LATCH1_REG + i);
392 ab8500_write(ab8500, AB8500_IT_MASK1_REG + i, 0xff);
393 }
394
395 for (i = 18; i < 24; i++) {
396 ab8500_read(ab8500, AB8500_IT_LATCH1_REG + i);
397 ab8500_write(ab8500, AB8500_IT_MASK1_REG + i, 0xff);
398 }
399
400 for (i = 0; i < AB8500_NUM_IRQ_REGS; i++)
401 ab8500->mask[i] = ab8500->oldmask[i] = 0xff;
402
403 if (ab8500->irq_base) {
404 ret = ab8500_irq_init(ab8500);
405 if (ret)
406 return ret;
407
408 ret = request_threaded_irq(ab8500->irq, NULL, ab8500_irq,
409 IRQF_ONESHOT, "ab8500", ab8500);
410 if (ret)
411 goto out_removeirq;
412 }
413
414 ret = mfd_add_devices(ab8500->dev, -1, ab8500_devs,
415 ARRAY_SIZE(ab8500_devs), NULL,
416 ab8500->irq_base);
417 if (ret)
418 goto out_freeirq;
419
420 return ret;
421
422out_freeirq:
423 if (ab8500->irq_base) {
424 free_irq(ab8500->irq, ab8500);
425out_removeirq:
426 ab8500_irq_remove(ab8500);
427 }
428 return ret;
429}
430
431int __devexit ab8500_exit(struct ab8500 *ab8500)
432{
433 mfd_remove_devices(ab8500->dev);
434 if (ab8500->irq_base) {
435 free_irq(ab8500->irq, ab8500);
436 ab8500_irq_remove(ab8500);
437 }
438
439 return 0;
440}
441
442MODULE_AUTHOR("Srinidhi Kasagar, Rabin Vincent");
443MODULE_DESCRIPTION("AB8500 MFD core");
444MODULE_LICENSE("GPL v2");
diff --git a/drivers/mfd/ab8500-spi.c b/drivers/mfd/ab8500-spi.c
new file mode 100644
index 000000000000..b81d4f768ef6
--- /dev/null
+++ b/drivers/mfd/ab8500-spi.c
@@ -0,0 +1,133 @@
1/*
2 * Copyright (C) ST-Ericsson SA 2010
3 *
4 * License Terms: GNU General Public License v2
5 * Author: Srinidhi Kasagar <srinidhi.kasagar@stericsson.com>
6 */
7
8#include <linux/kernel.h>
9#include <linux/slab.h>
10#include <linux/init.h>
11#include <linux/module.h>
12#include <linux/platform_device.h>
13#include <linux/spi/spi.h>
14#include <linux/mfd/ab8500.h>
15
16/*
17 * This funtion writes to any AB8500 registers using
18 * SPI protocol & before it writes it packs the data
19 * in the below 24 bit frame format
20 *
21 * *|------------------------------------|
22 * *| 23|22...18|17.......10|9|8|7......0|
23 * *| r/w bank adr data |
24 * * ------------------------------------
25 *
26 * This function shouldn't be called from interrupt
27 * context
28 */
29static int ab8500_spi_write(struct ab8500 *ab8500, u16 addr, u8 data)
30{
31 struct spi_device *spi = container_of(ab8500->dev, struct spi_device,
32 dev);
33 unsigned long spi_data = addr << 10 | data;
34 struct spi_transfer xfer;
35 struct spi_message msg;
36
37 ab8500->tx_buf[0] = spi_data;
38 ab8500->rx_buf[0] = 0;
39
40 xfer.tx_buf = ab8500->tx_buf;
41 xfer.rx_buf = NULL;
42 xfer.len = sizeof(unsigned long);
43
44 spi_message_init(&msg);
45 spi_message_add_tail(&xfer, &msg);
46
47 return spi_sync(spi, &msg);
48}
49
50static int ab8500_spi_read(struct ab8500 *ab8500, u16 addr)
51{
52 struct spi_device *spi = container_of(ab8500->dev, struct spi_device,
53 dev);
54 unsigned long spi_data = 1 << 23 | addr << 10;
55 struct spi_transfer xfer;
56 struct spi_message msg;
57 int ret;
58
59 ab8500->tx_buf[0] = spi_data;
60 ab8500->rx_buf[0] = 0;
61
62 xfer.tx_buf = ab8500->tx_buf;
63 xfer.rx_buf = ab8500->rx_buf;
64 xfer.len = sizeof(unsigned long);
65
66 spi_message_init(&msg);
67 spi_message_add_tail(&xfer, &msg);
68
69 ret = spi_sync(spi, &msg);
70 if (!ret)
71 ret = ab8500->rx_buf[0];
72
73 return ret;
74}
75
76static int __devinit ab8500_spi_probe(struct spi_device *spi)
77{
78 struct ab8500 *ab8500;
79 int ret;
80
81 ab8500 = kzalloc(sizeof *ab8500, GFP_KERNEL);
82 if (!ab8500)
83 return -ENOMEM;
84
85 ab8500->dev = &spi->dev;
86 ab8500->irq = spi->irq;
87
88 ab8500->read = ab8500_spi_read;
89 ab8500->write = ab8500_spi_write;
90
91 spi_set_drvdata(spi, ab8500);
92
93 ret = ab8500_init(ab8500);
94 if (ret)
95 kfree(ab8500);
96
97 return ret;
98}
99
100static int __devexit ab8500_spi_remove(struct spi_device *spi)
101{
102 struct ab8500 *ab8500 = spi_get_drvdata(spi);
103
104 ab8500_exit(ab8500);
105 kfree(ab8500);
106
107 return 0;
108}
109
110static struct spi_driver ab8500_spi_driver = {
111 .driver = {
112 .name = "ab8500",
113 .owner = THIS_MODULE,
114 },
115 .probe = ab8500_spi_probe,
116 .remove = __devexit_p(ab8500_spi_remove)
117};
118
119static int __init ab8500_spi_init(void)
120{
121 return spi_register_driver(&ab8500_spi_driver);
122}
123subsys_initcall(ab8500_spi_init);
124
125static void __exit ab8500_spi_exit(void)
126{
127 spi_unregister_driver(&ab8500_spi_driver);
128}
129module_exit(ab8500_spi_exit);
130
131MODULE_AUTHOR("Srinidhi KASAGAR <srinidhi.kasagar@stericsson.com");
132MODULE_DESCRIPTION("AB8500 SPI");
133MODULE_LICENSE("GPL v2");
diff --git a/drivers/mfd/abx500-core.c b/drivers/mfd/abx500-core.c
new file mode 100644
index 000000000000..3b3b97ec32a7
--- /dev/null
+++ b/drivers/mfd/abx500-core.c
@@ -0,0 +1,157 @@
1/*
2 * Copyright (C) 2007-2010 ST-Ericsson
3 * License terms: GNU General Public License (GPL) version 2
4 * Register access functions for the ABX500 Mixed Signal IC family.
5 * Author: Mattias Wallin <mattias.wallin@stericsson.com>
6 */
7
8#include <linux/list.h>
9#include <linux/slab.h>
10#include <linux/err.h>
11#include <linux/mfd/abx500.h>
12
13static LIST_HEAD(abx500_list);
14
15struct abx500_device_entry {
16 struct list_head list;
17 struct abx500_ops ops;
18 struct device *dev;
19};
20
21static void lookup_ops(struct device *dev, struct abx500_ops **ops)
22{
23 struct abx500_device_entry *dev_entry;
24
25 *ops = NULL;
26 list_for_each_entry(dev_entry, &abx500_list, list) {
27 if (dev_entry->dev == dev) {
28 *ops = &dev_entry->ops;
29 return;
30 }
31 }
32}
33
34int abx500_register_ops(struct device *dev, struct abx500_ops *ops)
35{
36 struct abx500_device_entry *dev_entry;
37
38 dev_entry = kzalloc(sizeof(struct abx500_device_entry), GFP_KERNEL);
39 if (IS_ERR(dev_entry)) {
40 dev_err(dev, "register_ops kzalloc failed");
41 return -ENOMEM;
42 }
43 dev_entry->dev = dev;
44 memcpy(&dev_entry->ops, ops, sizeof(struct abx500_ops));
45
46 list_add_tail(&dev_entry->list, &abx500_list);
47 return 0;
48}
49EXPORT_SYMBOL(abx500_register_ops);
50
51void abx500_remove_ops(struct device *dev)
52{
53 struct abx500_device_entry *dev_entry, *tmp;
54
55 list_for_each_entry_safe(dev_entry, tmp, &abx500_list, list)
56 {
57 if (dev_entry->dev == dev) {
58 list_del(&dev_entry->list);
59 kfree(dev_entry);
60 }
61 }
62}
63EXPORT_SYMBOL(abx500_remove_ops);
64
65int abx500_set_register_interruptible(struct device *dev, u8 bank, u8 reg,
66 u8 value)
67{
68 struct abx500_ops *ops;
69
70 lookup_ops(dev->parent, &ops);
71 if ((ops != NULL) && (ops->set_register != NULL))
72 return ops->set_register(dev, bank, reg, value);
73 else
74 return -ENOTSUPP;
75}
76EXPORT_SYMBOL(abx500_set_register_interruptible);
77
78int abx500_get_register_interruptible(struct device *dev, u8 bank, u8 reg,
79 u8 *value)
80{
81 struct abx500_ops *ops;
82
83 lookup_ops(dev->parent, &ops);
84 if ((ops != NULL) && (ops->get_register != NULL))
85 return ops->get_register(dev, bank, reg, value);
86 else
87 return -ENOTSUPP;
88}
89EXPORT_SYMBOL(abx500_get_register_interruptible);
90
91int abx500_get_register_page_interruptible(struct device *dev, u8 bank,
92 u8 first_reg, u8 *regvals, u8 numregs)
93{
94 struct abx500_ops *ops;
95
96 lookup_ops(dev->parent, &ops);
97 if ((ops != NULL) && (ops->get_register_page != NULL))
98 return ops->get_register_page(dev, bank,
99 first_reg, regvals, numregs);
100 else
101 return -ENOTSUPP;
102}
103EXPORT_SYMBOL(abx500_get_register_page_interruptible);
104
105int abx500_mask_and_set_register_interruptible(struct device *dev, u8 bank,
106 u8 reg, u8 bitmask, u8 bitvalues)
107{
108 struct abx500_ops *ops;
109
110 lookup_ops(dev->parent, &ops);
111 if ((ops != NULL) && (ops->mask_and_set_register != NULL))
112 return ops->mask_and_set_register(dev, bank,
113 reg, bitmask, bitvalues);
114 else
115 return -ENOTSUPP;
116}
117EXPORT_SYMBOL(abx500_mask_and_set_register_interruptible);
118
119int abx500_get_chip_id(struct device *dev)
120{
121 struct abx500_ops *ops;
122
123 lookup_ops(dev->parent, &ops);
124 if ((ops != NULL) && (ops->get_chip_id != NULL))
125 return ops->get_chip_id(dev);
126 else
127 return -ENOTSUPP;
128}
129EXPORT_SYMBOL(abx500_get_chip_id);
130
131int abx500_event_registers_startup_state_get(struct device *dev, u8 *event)
132{
133 struct abx500_ops *ops;
134
135 lookup_ops(dev->parent, &ops);
136 if ((ops != NULL) && (ops->event_registers_startup_state_get != NULL))
137 return ops->event_registers_startup_state_get(dev, event);
138 else
139 return -ENOTSUPP;
140}
141EXPORT_SYMBOL(abx500_event_registers_startup_state_get);
142
143int abx500_startup_irq_enabled(struct device *dev, unsigned int irq)
144{
145 struct abx500_ops *ops;
146
147 lookup_ops(dev->parent, &ops);
148 if ((ops != NULL) && (ops->startup_irq_enabled != NULL))
149 return ops->startup_irq_enabled(dev, irq);
150 else
151 return -ENOTSUPP;
152}
153EXPORT_SYMBOL(abx500_startup_irq_enabled);
154
155MODULE_AUTHOR("Mattias Wallin <mattias.wallin@stericsson.com>");
156MODULE_DESCRIPTION("ABX500 core driver");
157MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/da903x.c b/drivers/mfd/da903x.c
index 67181b147ab3..3ad915d0589c 100644
--- a/drivers/mfd/da903x.c
+++ b/drivers/mfd/da903x.c
@@ -544,6 +544,7 @@ static int __devexit da903x_remove(struct i2c_client *client)
544 struct da903x_chip *chip = i2c_get_clientdata(client); 544 struct da903x_chip *chip = i2c_get_clientdata(client);
545 545
546 da903x_remove_subdevs(chip); 546 da903x_remove_subdevs(chip);
547 i2c_set_clientdata(client, NULL);
547 kfree(chip); 548 kfree(chip);
548 return 0; 549 return 0;
549} 550}
diff --git a/drivers/mfd/janz-cmodio.c b/drivers/mfd/janz-cmodio.c
new file mode 100644
index 000000000000..9ed630799acc
--- /dev/null
+++ b/drivers/mfd/janz-cmodio.c
@@ -0,0 +1,304 @@
1/*
2 * Janz CMOD-IO MODULbus Carrier Board PCI Driver
3 *
4 * Copyright (c) 2010 Ira W. Snyder <iws@ovro.caltech.edu>
5 *
6 * Lots of inspiration and code was copied from drivers/mfd/sm501.c
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2 of the License, or (at your
11 * option) any later version.
12 */
13
14#include <linux/kernel.h>
15#include <linux/module.h>
16#include <linux/init.h>
17#include <linux/pci.h>
18#include <linux/interrupt.h>
19#include <linux/delay.h>
20#include <linux/platform_device.h>
21#include <linux/mfd/core.h>
22
23#include <linux/mfd/janz.h>
24
25#define DRV_NAME "janz-cmodio"
26
27/* Size of each MODULbus module in PCI BAR4 */
28#define CMODIO_MODULBUS_SIZE 0x200
29
30/* Maximum number of MODULbus modules on a CMOD-IO carrier board */
31#define CMODIO_MAX_MODULES 4
32
33/* Module Parameters */
34static unsigned int num_modules = CMODIO_MAX_MODULES;
35static unsigned char *modules[CMODIO_MAX_MODULES] = {
36 "empty", "empty", "empty", "empty",
37};
38
39module_param_array(modules, charp, &num_modules, S_IRUGO);
40MODULE_PARM_DESC(modules, "MODULbus modules attached to the carrier board");
41
42/* Unique Device Id */
43static unsigned int cmodio_id;
44
45struct cmodio_device {
46 /* Parent PCI device */
47 struct pci_dev *pdev;
48
49 /* PLX control registers */
50 struct janz_cmodio_onboard_regs __iomem *ctrl;
51
52 /* hex switch position */
53 u8 hex;
54
55 /* mfd-core API */
56 struct mfd_cell cells[CMODIO_MAX_MODULES];
57 struct resource resources[3 * CMODIO_MAX_MODULES];
58 struct janz_platform_data pdata[CMODIO_MAX_MODULES];
59};
60
61/*
62 * Subdevices using the mfd-core API
63 */
64
65static int __devinit cmodio_setup_subdevice(struct cmodio_device *priv,
66 char *name, unsigned int devno,
67 unsigned int modno)
68{
69 struct janz_platform_data *pdata;
70 struct mfd_cell *cell;
71 struct resource *res;
72 struct pci_dev *pci;
73
74 pci = priv->pdev;
75 cell = &priv->cells[devno];
76 res = &priv->resources[devno * 3];
77 pdata = &priv->pdata[devno];
78
79 cell->name = name;
80 cell->resources = res;
81 cell->num_resources = 3;
82
83 /* Setup the subdevice ID -- must be unique */
84 cell->id = cmodio_id++;
85
86 /* Add platform data */
87 pdata->modno = modno;
88 cell->platform_data = pdata;
89 cell->data_size = sizeof(*pdata);
90
91 /* MODULbus registers -- PCI BAR3 is big-endian MODULbus access */
92 res->flags = IORESOURCE_MEM;
93 res->parent = &pci->resource[3];
94 res->start = pci->resource[3].start + (CMODIO_MODULBUS_SIZE * modno);
95 res->end = res->start + CMODIO_MODULBUS_SIZE - 1;
96 res++;
97
98 /* PLX Control Registers -- PCI BAR4 is interrupt and other registers */
99 res->flags = IORESOURCE_MEM;
100 res->parent = &pci->resource[4];
101 res->start = pci->resource[4].start;
102 res->end = pci->resource[4].end;
103 res++;
104
105 /*
106 * IRQ
107 *
108 * The start and end fields are used as an offset to the irq_base
109 * parameter passed into the mfd_add_devices() function call. All
110 * devices share the same IRQ.
111 */
112 res->flags = IORESOURCE_IRQ;
113 res->parent = NULL;
114 res->start = 0;
115 res->end = 0;
116 res++;
117
118 return 0;
119}
120
121/* Probe each submodule using kernel parameters */
122static int __devinit cmodio_probe_submodules(struct cmodio_device *priv)
123{
124 struct pci_dev *pdev = priv->pdev;
125 unsigned int num_probed = 0;
126 char *name;
127 int i;
128
129 for (i = 0; i < num_modules; i++) {
130 name = modules[i];
131 if (!strcmp(name, "") || !strcmp(name, "empty"))
132 continue;
133
134 dev_dbg(&priv->pdev->dev, "MODULbus %d: name %s\n", i, name);
135 cmodio_setup_subdevice(priv, name, num_probed, i);
136 num_probed++;
137 }
138
139 /* print an error message if no modules were probed */
140 if (num_probed == 0) {
141 dev_err(&priv->pdev->dev, "no MODULbus modules specified, "
142 "please set the ``modules'' kernel "
143 "parameter according to your "
144 "hardware configuration\n");
145 return -ENODEV;
146 }
147
148 return mfd_add_devices(&pdev->dev, 0, priv->cells,
149 num_probed, NULL, pdev->irq);
150}
151
152/*
153 * SYSFS Attributes
154 */
155
156static ssize_t mbus_show(struct device *dev, struct device_attribute *attr,
157 char *buf)
158{
159 struct cmodio_device *priv = dev_get_drvdata(dev);
160
161 return snprintf(buf, PAGE_SIZE, "%x\n", priv->hex);
162}
163
164static DEVICE_ATTR(modulbus_number, S_IRUGO, mbus_show, NULL);
165
166static struct attribute *cmodio_sysfs_attrs[] = {
167 &dev_attr_modulbus_number.attr,
168 NULL,
169};
170
171static const struct attribute_group cmodio_sysfs_attr_group = {
172 .attrs = cmodio_sysfs_attrs,
173};
174
175/*
176 * PCI Driver
177 */
178
179static int __devinit cmodio_pci_probe(struct pci_dev *dev,
180 const struct pci_device_id *id)
181{
182 struct cmodio_device *priv;
183 int ret;
184
185 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
186 if (!priv) {
187 dev_err(&dev->dev, "unable to allocate private data\n");
188 ret = -ENOMEM;
189 goto out_return;
190 }
191
192 pci_set_drvdata(dev, priv);
193 priv->pdev = dev;
194
195 /* Hardware Initialization */
196 ret = pci_enable_device(dev);
197 if (ret) {
198 dev_err(&dev->dev, "unable to enable device\n");
199 goto out_free_priv;
200 }
201
202 pci_set_master(dev);
203 ret = pci_request_regions(dev, DRV_NAME);
204 if (ret) {
205 dev_err(&dev->dev, "unable to request regions\n");
206 goto out_pci_disable_device;
207 }
208
209 /* Onboard configuration registers */
210 priv->ctrl = pci_ioremap_bar(dev, 4);
211 if (!priv->ctrl) {
212 dev_err(&dev->dev, "unable to remap onboard regs\n");
213 ret = -ENOMEM;
214 goto out_pci_release_regions;
215 }
216
217 /* Read the hex switch on the carrier board */
218 priv->hex = ioread8(&priv->ctrl->int_enable);
219
220 /* Add the MODULbus number (hex switch value) to the device's sysfs */
221 ret = sysfs_create_group(&dev->dev.kobj, &cmodio_sysfs_attr_group);
222 if (ret) {
223 dev_err(&dev->dev, "unable to create sysfs attributes\n");
224 goto out_unmap_ctrl;
225 }
226
227 /*
228 * Disable all interrupt lines, each submodule will enable its
229 * own interrupt line if needed
230 */
231 iowrite8(0xf, &priv->ctrl->int_disable);
232
233 /* Register drivers for all submodules */
234 ret = cmodio_probe_submodules(priv);
235 if (ret) {
236 dev_err(&dev->dev, "unable to probe submodules\n");
237 goto out_sysfs_remove_group;
238 }
239
240 return 0;
241
242out_sysfs_remove_group:
243 sysfs_remove_group(&dev->dev.kobj, &cmodio_sysfs_attr_group);
244out_unmap_ctrl:
245 iounmap(priv->ctrl);
246out_pci_release_regions:
247 pci_release_regions(dev);
248out_pci_disable_device:
249 pci_disable_device(dev);
250out_free_priv:
251 kfree(priv);
252out_return:
253 return ret;
254}
255
256static void __devexit cmodio_pci_remove(struct pci_dev *dev)
257{
258 struct cmodio_device *priv = pci_get_drvdata(dev);
259
260 mfd_remove_devices(&dev->dev);
261 sysfs_remove_group(&dev->dev.kobj, &cmodio_sysfs_attr_group);
262 iounmap(priv->ctrl);
263 pci_release_regions(dev);
264 pci_disable_device(dev);
265 kfree(priv);
266}
267
268#define PCI_VENDOR_ID_JANZ 0x13c3
269
270/* The list of devices that this module will support */
271static DEFINE_PCI_DEVICE_TABLE(cmodio_pci_ids) = {
272 { PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9030, PCI_VENDOR_ID_JANZ, 0x0101 },
273 { PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9050, PCI_VENDOR_ID_JANZ, 0x0100 },
274 { 0, }
275};
276MODULE_DEVICE_TABLE(pci, cmodio_pci_ids);
277
278static struct pci_driver cmodio_pci_driver = {
279 .name = DRV_NAME,
280 .id_table = cmodio_pci_ids,
281 .probe = cmodio_pci_probe,
282 .remove = __devexit_p(cmodio_pci_remove),
283};
284
285/*
286 * Module Init / Exit
287 */
288
289static int __init cmodio_init(void)
290{
291 return pci_register_driver(&cmodio_pci_driver);
292}
293
294static void __exit cmodio_exit(void)
295{
296 pci_unregister_driver(&cmodio_pci_driver);
297}
298
299MODULE_AUTHOR("Ira W. Snyder <iws@ovro.caltech.edu>");
300MODULE_DESCRIPTION("Janz CMOD-IO PCI MODULbus Carrier Board Driver");
301MODULE_LICENSE("GPL");
302
303module_init(cmodio_init);
304module_exit(cmodio_exit);
diff --git a/drivers/mfd/max8925-core.c b/drivers/mfd/max8925-core.c
index 85d63c04749b..f621bcea3d02 100644
--- a/drivers/mfd/max8925-core.c
+++ b/drivers/mfd/max8925-core.c
@@ -508,7 +508,7 @@ static int max8925_irq_init(struct max8925_chip *chip, int irq,
508 max8925_reg_read(chip->i2c, MAX8925_ON_OFF_IRQ2); 508 max8925_reg_read(chip->i2c, MAX8925_ON_OFF_IRQ2);
509 max8925_reg_read(chip->rtc, MAX8925_RTC_IRQ); 509 max8925_reg_read(chip->rtc, MAX8925_RTC_IRQ);
510 max8925_reg_read(chip->adc, MAX8925_TSC_IRQ); 510 max8925_reg_read(chip->adc, MAX8925_TSC_IRQ);
511 /* mask all interrupts */ 511 /* mask all interrupts except for TSC */
512 max8925_reg_write(chip->rtc, MAX8925_ALARM0_CNTL, 0); 512 max8925_reg_write(chip->rtc, MAX8925_ALARM0_CNTL, 0);
513 max8925_reg_write(chip->rtc, MAX8925_ALARM1_CNTL, 0); 513 max8925_reg_write(chip->rtc, MAX8925_ALARM1_CNTL, 0);
514 max8925_reg_write(chip->i2c, MAX8925_CHG_IRQ1_MASK, 0xff); 514 max8925_reg_write(chip->i2c, MAX8925_CHG_IRQ1_MASK, 0xff);
@@ -516,7 +516,6 @@ static int max8925_irq_init(struct max8925_chip *chip, int irq,
516 max8925_reg_write(chip->i2c, MAX8925_ON_OFF_IRQ1_MASK, 0xff); 516 max8925_reg_write(chip->i2c, MAX8925_ON_OFF_IRQ1_MASK, 0xff);
517 max8925_reg_write(chip->i2c, MAX8925_ON_OFF_IRQ2_MASK, 0xff); 517 max8925_reg_write(chip->i2c, MAX8925_ON_OFF_IRQ2_MASK, 0xff);
518 max8925_reg_write(chip->rtc, MAX8925_RTC_IRQ_MASK, 0xff); 518 max8925_reg_write(chip->rtc, MAX8925_RTC_IRQ_MASK, 0xff);
519 max8925_reg_write(chip->adc, MAX8925_TSC_IRQ_MASK, 0xff);
520 519
521 mutex_init(&chip->irq_lock); 520 mutex_init(&chip->irq_lock);
522 chip->core_irq = irq; 521 chip->core_irq = irq;
@@ -547,7 +546,11 @@ static int max8925_irq_init(struct max8925_chip *chip, int irq,
547 dev_err(chip->dev, "Failed to request core IRQ: %d\n", ret); 546 dev_err(chip->dev, "Failed to request core IRQ: %d\n", ret);
548 chip->core_irq = 0; 547 chip->core_irq = 0;
549 } 548 }
549
550tsc_irq: 550tsc_irq:
551 /* mask TSC interrupt */
552 max8925_reg_write(chip->adc, MAX8925_TSC_IRQ_MASK, 0x0f);
553
551 if (!pdata->tsc_irq) { 554 if (!pdata->tsc_irq) {
552 dev_warn(chip->dev, "No interrupt support on TSC IRQ\n"); 555 dev_warn(chip->dev, "No interrupt support on TSC IRQ\n");
553 return 0; 556 return 0;
diff --git a/drivers/mfd/max8925-i2c.c b/drivers/mfd/max8925-i2c.c
index d9fd8785da4d..e73f3f5252a8 100644
--- a/drivers/mfd/max8925-i2c.c
+++ b/drivers/mfd/max8925-i2c.c
@@ -173,8 +173,6 @@ static int __devexit max8925_remove(struct i2c_client *client)
173 max8925_device_exit(chip); 173 max8925_device_exit(chip);
174 i2c_unregister_device(chip->adc); 174 i2c_unregister_device(chip->adc);
175 i2c_unregister_device(chip->rtc); 175 i2c_unregister_device(chip->rtc);
176 i2c_set_clientdata(chip->adc, NULL);
177 i2c_set_clientdata(chip->rtc, NULL);
178 i2c_set_clientdata(chip->i2c, NULL); 176 i2c_set_clientdata(chip->i2c, NULL);
179 kfree(chip); 177 kfree(chip);
180 return 0; 178 return 0;
diff --git a/drivers/mfd/mc13783-core.c b/drivers/mfd/mc13783-core.c
index 1f68ecadddc2..fecf38a4f025 100644
--- a/drivers/mfd/mc13783-core.c
+++ b/drivers/mfd/mc13783-core.c
@@ -679,6 +679,10 @@ err_revision:
679 if (pdata->flags & MC13783_USE_TOUCHSCREEN) 679 if (pdata->flags & MC13783_USE_TOUCHSCREEN)
680 mc13783_add_subdevice(mc13783, "mc13783-ts"); 680 mc13783_add_subdevice(mc13783, "mc13783-ts");
681 681
682 if (pdata->flags & MC13783_USE_LED)
683 mc13783_add_subdevice_pdata(mc13783, "mc13783-led",
684 pdata->leds, sizeof(*pdata->leds));
685
682 return 0; 686 return 0;
683} 687}
684 688
diff --git a/drivers/mfd/menelaus.c b/drivers/mfd/menelaus.c
index a94b131a18ef..721948be12c7 100644
--- a/drivers/mfd/menelaus.c
+++ b/drivers/mfd/menelaus.c
@@ -1228,6 +1228,7 @@ fail2:
1228 free_irq(client->irq, menelaus); 1228 free_irq(client->irq, menelaus);
1229 flush_scheduled_work(); 1229 flush_scheduled_work();
1230fail1: 1230fail1:
1231 i2c_set_clientdata(client, NULL);
1231 kfree(menelaus); 1232 kfree(menelaus);
1232 return err; 1233 return err;
1233} 1234}
@@ -1237,8 +1238,8 @@ static int __exit menelaus_remove(struct i2c_client *client)
1237 struct menelaus_chip *menelaus = i2c_get_clientdata(client); 1238 struct menelaus_chip *menelaus = i2c_get_clientdata(client);
1238 1239
1239 free_irq(client->irq, menelaus); 1240 free_irq(client->irq, menelaus);
1240 kfree(menelaus);
1241 i2c_set_clientdata(client, NULL); 1241 i2c_set_clientdata(client, NULL);
1242 kfree(menelaus);
1242 the_menelaus = NULL; 1243 the_menelaus = NULL;
1243 return 0; 1244 return 0;
1244} 1245}
diff --git a/drivers/mfd/mfd-core.c b/drivers/mfd/mfd-core.c
index 8ffbb7a85a7e..7dd76bceaae8 100644
--- a/drivers/mfd/mfd-core.c
+++ b/drivers/mfd/mfd-core.c
@@ -48,7 +48,7 @@ static int mfd_add_device(struct device *parent, int id,
48 res[r].flags = cell->resources[r].flags; 48 res[r].flags = cell->resources[r].flags;
49 49
50 /* Find out base to use */ 50 /* Find out base to use */
51 if (cell->resources[r].flags & IORESOURCE_MEM) { 51 if ((cell->resources[r].flags & IORESOURCE_MEM) && mem_base) {
52 res[r].parent = mem_base; 52 res[r].parent = mem_base;
53 res[r].start = mem_base->start + 53 res[r].start = mem_base->start +
54 cell->resources[r].start; 54 cell->resources[r].start;
diff --git a/drivers/mfd/pcf50633-adc.c b/drivers/mfd/pcf50633-adc.c
index fe8f922f6654..aed0d2a9b032 100644
--- a/drivers/mfd/pcf50633-adc.c
+++ b/drivers/mfd/pcf50633-adc.c
@@ -30,13 +30,13 @@
30struct pcf50633_adc_request { 30struct pcf50633_adc_request {
31 int mux; 31 int mux;
32 int avg; 32 int avg;
33 int result;
34 void (*callback)(struct pcf50633 *, void *, int); 33 void (*callback)(struct pcf50633 *, void *, int);
35 void *callback_param; 34 void *callback_param;
35};
36 36
37 /* Used in case of sync requests */ 37struct pcf50633_adc_sync_request {
38 int result;
38 struct completion completion; 39 struct completion completion;
39
40}; 40};
41 41
42#define PCF50633_MAX_ADC_FIFO_DEPTH 8 42#define PCF50633_MAX_ADC_FIFO_DEPTH 8
@@ -109,10 +109,10 @@ adc_enqueue_request(struct pcf50633 *pcf, struct pcf50633_adc_request *req)
109 return 0; 109 return 0;
110} 110}
111 111
112static void 112static void pcf50633_adc_sync_read_callback(struct pcf50633 *pcf, void *param,
113pcf50633_adc_sync_read_callback(struct pcf50633 *pcf, void *param, int result) 113 int result)
114{ 114{
115 struct pcf50633_adc_request *req = param; 115 struct pcf50633_adc_sync_request *req = param;
116 116
117 req->result = result; 117 req->result = result;
118 complete(&req->completion); 118 complete(&req->completion);
@@ -120,28 +120,19 @@ pcf50633_adc_sync_read_callback(struct pcf50633 *pcf, void *param, int result)
120 120
121int pcf50633_adc_sync_read(struct pcf50633 *pcf, int mux, int avg) 121int pcf50633_adc_sync_read(struct pcf50633 *pcf, int mux, int avg)
122{ 122{
123 struct pcf50633_adc_request *req; 123 struct pcf50633_adc_sync_request req;
124 int err; 124 int ret;
125 125
126 /* req is freed when the result is ready, in interrupt handler */ 126 init_completion(&req.completion);
127 req = kzalloc(sizeof(*req), GFP_KERNEL);
128 if (!req)
129 return -ENOMEM;
130
131 req->mux = mux;
132 req->avg = avg;
133 req->callback = pcf50633_adc_sync_read_callback;
134 req->callback_param = req;
135 127
136 init_completion(&req->completion); 128 ret = pcf50633_adc_async_read(pcf, mux, avg,
137 err = adc_enqueue_request(pcf, req); 129 pcf50633_adc_sync_read_callback, &req);
138 if (err) 130 if (ret)
139 return err; 131 return ret;
140 132
141 wait_for_completion(&req->completion); 133 wait_for_completion(&req.completion);
142 134
143 /* FIXME by this time req might be already freed */ 135 return req.result;
144 return req->result;
145} 136}
146EXPORT_SYMBOL_GPL(pcf50633_adc_sync_read); 137EXPORT_SYMBOL_GPL(pcf50633_adc_sync_read);
147 138
diff --git a/drivers/mfd/pcf50633-core.c b/drivers/mfd/pcf50633-core.c
index 63a614d696c1..704736e6e9b9 100644
--- a/drivers/mfd/pcf50633-core.c
+++ b/drivers/mfd/pcf50633-core.c
@@ -21,16 +21,16 @@
21#include <linux/workqueue.h> 21#include <linux/workqueue.h>
22#include <linux/platform_device.h> 22#include <linux/platform_device.h>
23#include <linux/i2c.h> 23#include <linux/i2c.h>
24#include <linux/irq.h>
25#include <linux/slab.h> 24#include <linux/slab.h>
26 25
27#include <linux/mfd/pcf50633/core.h> 26#include <linux/mfd/pcf50633/core.h>
28 27
29/* Two MBCS registers used during cold start */ 28int pcf50633_irq_init(struct pcf50633 *pcf, int irq);
30#define PCF50633_REG_MBCS1 0x4b 29void pcf50633_irq_free(struct pcf50633 *pcf);
31#define PCF50633_REG_MBCS2 0x4c 30#ifdef CONFIG_PM
32#define PCF50633_MBCS1_USBPRES 0x01 31int pcf50633_irq_suspend(struct pcf50633 *pcf);
33#define PCF50633_MBCS1_ADAPTPRES 0x01 32int pcf50633_irq_resume(struct pcf50633 *pcf);
33#endif
34 34
35static int __pcf50633_read(struct pcf50633 *pcf, u8 reg, int num, u8 *data) 35static int __pcf50633_read(struct pcf50633 *pcf, u8 reg, int num, u8 *data)
36{ 36{
@@ -215,244 +215,6 @@ static struct attribute_group pcf_attr_group = {
215 .attrs = pcf_sysfs_entries, 215 .attrs = pcf_sysfs_entries,
216}; 216};
217 217
218int pcf50633_register_irq(struct pcf50633 *pcf, int irq,
219 void (*handler) (int, void *), void *data)
220{
221 if (irq < 0 || irq > PCF50633_NUM_IRQ || !handler)
222 return -EINVAL;
223
224 if (WARN_ON(pcf->irq_handler[irq].handler))
225 return -EBUSY;
226
227 mutex_lock(&pcf->lock);
228 pcf->irq_handler[irq].handler = handler;
229 pcf->irq_handler[irq].data = data;
230 mutex_unlock(&pcf->lock);
231
232 return 0;
233}
234EXPORT_SYMBOL_GPL(pcf50633_register_irq);
235
236int pcf50633_free_irq(struct pcf50633 *pcf, int irq)
237{
238 if (irq < 0 || irq > PCF50633_NUM_IRQ)
239 return -EINVAL;
240
241 mutex_lock(&pcf->lock);
242 pcf->irq_handler[irq].handler = NULL;
243 mutex_unlock(&pcf->lock);
244
245 return 0;
246}
247EXPORT_SYMBOL_GPL(pcf50633_free_irq);
248
249static int __pcf50633_irq_mask_set(struct pcf50633 *pcf, int irq, u8 mask)
250{
251 u8 reg, bits, tmp;
252 int ret = 0, idx;
253
254 idx = irq >> 3;
255 reg = PCF50633_REG_INT1M + idx;
256 bits = 1 << (irq & 0x07);
257
258 mutex_lock(&pcf->lock);
259
260 if (mask) {
261 ret = __pcf50633_read(pcf, reg, 1, &tmp);
262 if (ret < 0)
263 goto out;
264
265 tmp |= bits;
266
267 ret = __pcf50633_write(pcf, reg, 1, &tmp);
268 if (ret < 0)
269 goto out;
270
271 pcf->mask_regs[idx] &= ~bits;
272 pcf->mask_regs[idx] |= bits;
273 } else {
274 ret = __pcf50633_read(pcf, reg, 1, &tmp);
275 if (ret < 0)
276 goto out;
277
278 tmp &= ~bits;
279
280 ret = __pcf50633_write(pcf, reg, 1, &tmp);
281 if (ret < 0)
282 goto out;
283
284 pcf->mask_regs[idx] &= ~bits;
285 }
286out:
287 mutex_unlock(&pcf->lock);
288
289 return ret;
290}
291
292int pcf50633_irq_mask(struct pcf50633 *pcf, int irq)
293{
294 dev_dbg(pcf->dev, "Masking IRQ %d\n", irq);
295
296 return __pcf50633_irq_mask_set(pcf, irq, 1);
297}
298EXPORT_SYMBOL_GPL(pcf50633_irq_mask);
299
300int pcf50633_irq_unmask(struct pcf50633 *pcf, int irq)
301{
302 dev_dbg(pcf->dev, "Unmasking IRQ %d\n", irq);
303
304 return __pcf50633_irq_mask_set(pcf, irq, 0);
305}
306EXPORT_SYMBOL_GPL(pcf50633_irq_unmask);
307
308int pcf50633_irq_mask_get(struct pcf50633 *pcf, int irq)
309{
310 u8 reg, bits;
311
312 reg = irq >> 3;
313 bits = 1 << (irq & 0x07);
314
315 return pcf->mask_regs[reg] & bits;
316}
317EXPORT_SYMBOL_GPL(pcf50633_irq_mask_get);
318
319static void pcf50633_irq_call_handler(struct pcf50633 *pcf, int irq)
320{
321 if (pcf->irq_handler[irq].handler)
322 pcf->irq_handler[irq].handler(irq, pcf->irq_handler[irq].data);
323}
324
325/* Maximum amount of time ONKEY is held before emergency action is taken */
326#define PCF50633_ONKEY1S_TIMEOUT 8
327
328static void pcf50633_irq_worker(struct work_struct *work)
329{
330 struct pcf50633 *pcf;
331 int ret, i, j;
332 u8 pcf_int[5], chgstat;
333
334 pcf = container_of(work, struct pcf50633, irq_work);
335
336 /* Read the 5 INT regs in one transaction */
337 ret = pcf50633_read_block(pcf, PCF50633_REG_INT1,
338 ARRAY_SIZE(pcf_int), pcf_int);
339 if (ret != ARRAY_SIZE(pcf_int)) {
340 dev_err(pcf->dev, "Error reading INT registers\n");
341
342 /*
343 * If this doesn't ACK the interrupt to the chip, we'll be
344 * called once again as we're level triggered.
345 */
346 goto out;
347 }
348
349 /* defeat 8s death from lowsys on A5 */
350 pcf50633_reg_write(pcf, PCF50633_REG_OOCSHDWN, 0x04);
351
352 /* We immediately read the usb and adapter status. We thus make sure
353 * only of USBINS/USBREM IRQ handlers are called */
354 if (pcf_int[0] & (PCF50633_INT1_USBINS | PCF50633_INT1_USBREM)) {
355 chgstat = pcf50633_reg_read(pcf, PCF50633_REG_MBCS2);
356 if (chgstat & (0x3 << 4))
357 pcf_int[0] &= ~(1 << PCF50633_INT1_USBREM);
358 else
359 pcf_int[0] &= ~(1 << PCF50633_INT1_USBINS);
360 }
361
362 /* Make sure only one of ADPINS or ADPREM is set */
363 if (pcf_int[0] & (PCF50633_INT1_ADPINS | PCF50633_INT1_ADPREM)) {
364 chgstat = pcf50633_reg_read(pcf, PCF50633_REG_MBCS2);
365 if (chgstat & (0x3 << 4))
366 pcf_int[0] &= ~(1 << PCF50633_INT1_ADPREM);
367 else
368 pcf_int[0] &= ~(1 << PCF50633_INT1_ADPINS);
369 }
370
371 dev_dbg(pcf->dev, "INT1=0x%02x INT2=0x%02x INT3=0x%02x "
372 "INT4=0x%02x INT5=0x%02x\n", pcf_int[0],
373 pcf_int[1], pcf_int[2], pcf_int[3], pcf_int[4]);
374
375 /* Some revisions of the chip don't have a 8s standby mode on
376 * ONKEY1S press. We try to manually do it in such cases. */
377 if ((pcf_int[0] & PCF50633_INT1_SECOND) && pcf->onkey1s_held) {
378 dev_info(pcf->dev, "ONKEY1S held for %d secs\n",
379 pcf->onkey1s_held);
380 if (pcf->onkey1s_held++ == PCF50633_ONKEY1S_TIMEOUT)
381 if (pcf->pdata->force_shutdown)
382 pcf->pdata->force_shutdown(pcf);
383 }
384
385 if (pcf_int[2] & PCF50633_INT3_ONKEY1S) {
386 dev_info(pcf->dev, "ONKEY1S held\n");
387 pcf->onkey1s_held = 1 ;
388
389 /* Unmask IRQ_SECOND */
390 pcf50633_reg_clear_bits(pcf, PCF50633_REG_INT1M,
391 PCF50633_INT1_SECOND);
392
393 /* Unmask IRQ_ONKEYR */
394 pcf50633_reg_clear_bits(pcf, PCF50633_REG_INT2M,
395 PCF50633_INT2_ONKEYR);
396 }
397
398 if ((pcf_int[1] & PCF50633_INT2_ONKEYR) && pcf->onkey1s_held) {
399 pcf->onkey1s_held = 0;
400
401 /* Mask SECOND and ONKEYR interrupts */
402 if (pcf->mask_regs[0] & PCF50633_INT1_SECOND)
403 pcf50633_reg_set_bit_mask(pcf,
404 PCF50633_REG_INT1M,
405 PCF50633_INT1_SECOND,
406 PCF50633_INT1_SECOND);
407
408 if (pcf->mask_regs[1] & PCF50633_INT2_ONKEYR)
409 pcf50633_reg_set_bit_mask(pcf,
410 PCF50633_REG_INT2M,
411 PCF50633_INT2_ONKEYR,
412 PCF50633_INT2_ONKEYR);
413 }
414
415 /* Have we just resumed ? */
416 if (pcf->is_suspended) {
417 pcf->is_suspended = 0;
418
419 /* Set the resume reason filtering out non resumers */
420 for (i = 0; i < ARRAY_SIZE(pcf_int); i++)
421 pcf->resume_reason[i] = pcf_int[i] &
422 pcf->pdata->resumers[i];
423
424 /* Make sure we don't pass on any ONKEY events to
425 * userspace now */
426 pcf_int[1] &= ~(PCF50633_INT2_ONKEYR | PCF50633_INT2_ONKEYF);
427 }
428
429 for (i = 0; i < ARRAY_SIZE(pcf_int); i++) {
430 /* Unset masked interrupts */
431 pcf_int[i] &= ~pcf->mask_regs[i];
432
433 for (j = 0; j < 8 ; j++)
434 if (pcf_int[i] & (1 << j))
435 pcf50633_irq_call_handler(pcf, (i * 8) + j);
436 }
437
438out:
439 put_device(pcf->dev);
440 enable_irq(pcf->irq);
441}
442
443static irqreturn_t pcf50633_irq(int irq, void *data)
444{
445 struct pcf50633 *pcf = data;
446
447 dev_dbg(pcf->dev, "pcf50633_irq\n");
448
449 get_device(pcf->dev);
450 disable_irq_nosync(pcf->irq);
451 queue_work(pcf->work_queue, &pcf->irq_work);
452
453 return IRQ_HANDLED;
454}
455
456static void 218static void
457pcf50633_client_dev_register(struct pcf50633 *pcf, const char *name, 219pcf50633_client_dev_register(struct pcf50633 *pcf, const char *name,
458 struct platform_device **pdev) 220 struct platform_device **pdev)
@@ -479,70 +241,17 @@ pcf50633_client_dev_register(struct pcf50633 *pcf, const char *name,
479static int pcf50633_suspend(struct i2c_client *client, pm_message_t state) 241static int pcf50633_suspend(struct i2c_client *client, pm_message_t state)
480{ 242{
481 struct pcf50633 *pcf; 243 struct pcf50633 *pcf;
482 int ret = 0, i;
483 u8 res[5];
484
485 pcf = i2c_get_clientdata(client); 244 pcf = i2c_get_clientdata(client);
486 245
487 /* Make sure our interrupt handlers are not called 246 return pcf50633_irq_suspend(pcf);
488 * henceforth */
489 disable_irq(pcf->irq);
490
491 /* Make sure that any running IRQ worker has quit */
492 cancel_work_sync(&pcf->irq_work);
493
494 /* Save the masks */
495 ret = pcf50633_read_block(pcf, PCF50633_REG_INT1M,
496 ARRAY_SIZE(pcf->suspend_irq_masks),
497 pcf->suspend_irq_masks);
498 if (ret < 0) {
499 dev_err(pcf->dev, "error saving irq masks\n");
500 goto out;
501 }
502
503 /* Write wakeup irq masks */
504 for (i = 0; i < ARRAY_SIZE(res); i++)
505 res[i] = ~pcf->pdata->resumers[i];
506
507 ret = pcf50633_write_block(pcf, PCF50633_REG_INT1M,
508 ARRAY_SIZE(res), &res[0]);
509 if (ret < 0) {
510 dev_err(pcf->dev, "error writing wakeup irq masks\n");
511 goto out;
512 }
513
514 pcf->is_suspended = 1;
515
516out:
517 return ret;
518} 247}
519 248
520static int pcf50633_resume(struct i2c_client *client) 249static int pcf50633_resume(struct i2c_client *client)
521{ 250{
522 struct pcf50633 *pcf; 251 struct pcf50633 *pcf;
523 int ret;
524
525 pcf = i2c_get_clientdata(client); 252 pcf = i2c_get_clientdata(client);
526 253
527 /* Write the saved mask registers */ 254 return pcf50633_irq_resume(pcf);
528 ret = pcf50633_write_block(pcf, PCF50633_REG_INT1M,
529 ARRAY_SIZE(pcf->suspend_irq_masks),
530 pcf->suspend_irq_masks);
531 if (ret < 0)
532 dev_err(pcf->dev, "Error restoring saved suspend masks\n");
533
534 /* Restore regulators' state */
535
536
537 get_device(pcf->dev);
538
539 /*
540 * Clear any pending interrupts and set resume reason if any.
541 * This will leave with enable_irq()
542 */
543 pcf50633_irq_worker(&pcf->irq_work);
544
545 return 0;
546} 255}
547#else 256#else
548#define pcf50633_suspend NULL 257#define pcf50633_suspend NULL
@@ -573,43 +282,19 @@ static int __devinit pcf50633_probe(struct i2c_client *client,
573 i2c_set_clientdata(client, pcf); 282 i2c_set_clientdata(client, pcf);
574 pcf->dev = &client->dev; 283 pcf->dev = &client->dev;
575 pcf->i2c_client = client; 284 pcf->i2c_client = client;
576 pcf->irq = client->irq;
577 pcf->work_queue = create_singlethread_workqueue("pcf50633");
578
579 if (!pcf->work_queue) {
580 dev_err(&client->dev, "Failed to alloc workqueue\n");
581 ret = -ENOMEM;
582 goto err_free;
583 }
584
585 INIT_WORK(&pcf->irq_work, pcf50633_irq_worker);
586 285
587 version = pcf50633_reg_read(pcf, 0); 286 version = pcf50633_reg_read(pcf, 0);
588 variant = pcf50633_reg_read(pcf, 1); 287 variant = pcf50633_reg_read(pcf, 1);
589 if (version < 0 || variant < 0) { 288 if (version < 0 || variant < 0) {
590 dev_err(pcf->dev, "Unable to probe pcf50633\n"); 289 dev_err(pcf->dev, "Unable to probe pcf50633\n");
591 ret = -ENODEV; 290 ret = -ENODEV;
592 goto err_destroy_workqueue; 291 goto err_free;
593 } 292 }
594 293
595 dev_info(pcf->dev, "Probed device version %d variant %d\n", 294 dev_info(pcf->dev, "Probed device version %d variant %d\n",
596 version, variant); 295 version, variant);
597 296
598 /* Enable all interrupts except RTC SECOND */ 297 pcf50633_irq_init(pcf, client->irq);
599 pcf->mask_regs[0] = 0x80;
600 pcf50633_reg_write(pcf, PCF50633_REG_INT1M, pcf->mask_regs[0]);
601 pcf50633_reg_write(pcf, PCF50633_REG_INT2M, 0x00);
602 pcf50633_reg_write(pcf, PCF50633_REG_INT3M, 0x00);
603 pcf50633_reg_write(pcf, PCF50633_REG_INT4M, 0x00);
604 pcf50633_reg_write(pcf, PCF50633_REG_INT5M, 0x00);
605
606 ret = request_irq(client->irq, pcf50633_irq,
607 IRQF_TRIGGER_LOW, "pcf50633", pcf);
608
609 if (ret) {
610 dev_err(pcf->dev, "Failed to request IRQ %d\n", ret);
611 goto err_destroy_workqueue;
612 }
613 298
614 /* Create sub devices */ 299 /* Create sub devices */
615 pcf50633_client_dev_register(pcf, "pcf50633-input", 300 pcf50633_client_dev_register(pcf, "pcf50633-input",
@@ -620,6 +305,9 @@ static int __devinit pcf50633_probe(struct i2c_client *client,
620 &pcf->mbc_pdev); 305 &pcf->mbc_pdev);
621 pcf50633_client_dev_register(pcf, "pcf50633-adc", 306 pcf50633_client_dev_register(pcf, "pcf50633-adc",
622 &pcf->adc_pdev); 307 &pcf->adc_pdev);
308 pcf50633_client_dev_register(pcf, "pcf50633-backlight",
309 &pcf->bl_pdev);
310
623 311
624 for (i = 0; i < PCF50633_NUM_REGULATORS; i++) { 312 for (i = 0; i < PCF50633_NUM_REGULATORS; i++) {
625 struct platform_device *pdev; 313 struct platform_device *pdev;
@@ -638,10 +326,6 @@ static int __devinit pcf50633_probe(struct i2c_client *client,
638 platform_device_add(pdev); 326 platform_device_add(pdev);
639 } 327 }
640 328
641 if (enable_irq_wake(client->irq) < 0)
642 dev_err(pcf->dev, "IRQ %u cannot be enabled as wake-up source"
643 "in this hardware revision", client->irq);
644
645 ret = sysfs_create_group(&client->dev.kobj, &pcf_attr_group); 329 ret = sysfs_create_group(&client->dev.kobj, &pcf_attr_group);
646 if (ret) 330 if (ret)
647 dev_err(pcf->dev, "error creating sysfs entries\n"); 331 dev_err(pcf->dev, "error creating sysfs entries\n");
@@ -651,8 +335,6 @@ static int __devinit pcf50633_probe(struct i2c_client *client,
651 335
652 return 0; 336 return 0;
653 337
654err_destroy_workqueue:
655 destroy_workqueue(pcf->work_queue);
656err_free: 338err_free:
657 i2c_set_clientdata(client, NULL); 339 i2c_set_clientdata(client, NULL);
658 kfree(pcf); 340 kfree(pcf);
@@ -665,8 +347,7 @@ static int __devexit pcf50633_remove(struct i2c_client *client)
665 struct pcf50633 *pcf = i2c_get_clientdata(client); 347 struct pcf50633 *pcf = i2c_get_clientdata(client);
666 int i; 348 int i;
667 349
668 free_irq(pcf->irq, pcf); 350 pcf50633_irq_free(pcf);
669 destroy_workqueue(pcf->work_queue);
670 351
671 platform_device_unregister(pcf->input_pdev); 352 platform_device_unregister(pcf->input_pdev);
672 platform_device_unregister(pcf->rtc_pdev); 353 platform_device_unregister(pcf->rtc_pdev);
@@ -676,6 +357,7 @@ static int __devexit pcf50633_remove(struct i2c_client *client)
676 for (i = 0; i < PCF50633_NUM_REGULATORS; i++) 357 for (i = 0; i < PCF50633_NUM_REGULATORS; i++)
677 platform_device_unregister(pcf->regulator_pdev[i]); 358 platform_device_unregister(pcf->regulator_pdev[i]);
678 359
360 i2c_set_clientdata(client, NULL);
679 kfree(pcf); 361 kfree(pcf);
680 362
681 return 0; 363 return 0;
diff --git a/drivers/mfd/pcf50633-irq.c b/drivers/mfd/pcf50633-irq.c
new file mode 100644
index 000000000000..1b0192f1efff
--- /dev/null
+++ b/drivers/mfd/pcf50633-irq.c
@@ -0,0 +1,318 @@
1/* NXP PCF50633 Power Management Unit (PMU) driver
2 *
3 * (C) 2006-2008 by Openmoko, Inc.
4 * Author: Harald Welte <laforge@openmoko.org>
5 * Balaji Rao <balajirrao@openmoko.org>
6 * All rights reserved.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2 of the License, or (at your
11 * option) any later version.
12 *
13 */
14
15#include <linux/interrupt.h>
16#include <linux/kernel.h>
17#include <linux/mutex.h>
18#include <linux/slab.h>
19
20#include <linux/mfd/pcf50633/core.h>
21
22/* Two MBCS registers used during cold start */
23#define PCF50633_REG_MBCS1 0x4b
24#define PCF50633_REG_MBCS2 0x4c
25#define PCF50633_MBCS1_USBPRES 0x01
26#define PCF50633_MBCS1_ADAPTPRES 0x01
27
28int pcf50633_register_irq(struct pcf50633 *pcf, int irq,
29 void (*handler) (int, void *), void *data)
30{
31 if (irq < 0 || irq >= PCF50633_NUM_IRQ || !handler)
32 return -EINVAL;
33
34 if (WARN_ON(pcf->irq_handler[irq].handler))
35 return -EBUSY;
36
37 mutex_lock(&pcf->lock);
38 pcf->irq_handler[irq].handler = handler;
39 pcf->irq_handler[irq].data = data;
40 mutex_unlock(&pcf->lock);
41
42 return 0;
43}
44EXPORT_SYMBOL_GPL(pcf50633_register_irq);
45
46int pcf50633_free_irq(struct pcf50633 *pcf, int irq)
47{
48 if (irq < 0 || irq >= PCF50633_NUM_IRQ)
49 return -EINVAL;
50
51 mutex_lock(&pcf->lock);
52 pcf->irq_handler[irq].handler = NULL;
53 mutex_unlock(&pcf->lock);
54
55 return 0;
56}
57EXPORT_SYMBOL_GPL(pcf50633_free_irq);
58
59static int __pcf50633_irq_mask_set(struct pcf50633 *pcf, int irq, u8 mask)
60{
61 u8 reg, bit;
62 int ret = 0, idx;
63
64 idx = irq >> 3;
65 reg = PCF50633_REG_INT1M + idx;
66 bit = 1 << (irq & 0x07);
67
68 pcf50633_reg_set_bit_mask(pcf, reg, bit, mask ? bit : 0);
69
70 mutex_lock(&pcf->lock);
71
72 if (mask)
73 pcf->mask_regs[idx] |= bit;
74 else
75 pcf->mask_regs[idx] &= ~bit;
76
77 mutex_unlock(&pcf->lock);
78
79 return ret;
80}
81
82int pcf50633_irq_mask(struct pcf50633 *pcf, int irq)
83{
84 dev_dbg(pcf->dev, "Masking IRQ %d\n", irq);
85
86 return __pcf50633_irq_mask_set(pcf, irq, 1);
87}
88EXPORT_SYMBOL_GPL(pcf50633_irq_mask);
89
90int pcf50633_irq_unmask(struct pcf50633 *pcf, int irq)
91{
92 dev_dbg(pcf->dev, "Unmasking IRQ %d\n", irq);
93
94 return __pcf50633_irq_mask_set(pcf, irq, 0);
95}
96EXPORT_SYMBOL_GPL(pcf50633_irq_unmask);
97
98int pcf50633_irq_mask_get(struct pcf50633 *pcf, int irq)
99{
100 u8 reg, bits;
101
102 reg = irq >> 3;
103 bits = 1 << (irq & 0x07);
104
105 return pcf->mask_regs[reg] & bits;
106}
107EXPORT_SYMBOL_GPL(pcf50633_irq_mask_get);
108
109static void pcf50633_irq_call_handler(struct pcf50633 *pcf, int irq)
110{
111 if (pcf->irq_handler[irq].handler)
112 pcf->irq_handler[irq].handler(irq, pcf->irq_handler[irq].data);
113}
114
115/* Maximum amount of time ONKEY is held before emergency action is taken */
116#define PCF50633_ONKEY1S_TIMEOUT 8
117
118static irqreturn_t pcf50633_irq(int irq, void *data)
119{
120 struct pcf50633 *pcf = data;
121 int ret, i, j;
122 u8 pcf_int[5], chgstat;
123
124 /* Read the 5 INT regs in one transaction */
125 ret = pcf50633_read_block(pcf, PCF50633_REG_INT1,
126 ARRAY_SIZE(pcf_int), pcf_int);
127 if (ret != ARRAY_SIZE(pcf_int)) {
128 dev_err(pcf->dev, "Error reading INT registers\n");
129
130 /*
131 * If this doesn't ACK the interrupt to the chip, we'll be
132 * called once again as we're level triggered.
133 */
134 goto out;
135 }
136
137 /* defeat 8s death from lowsys on A5 */
138 pcf50633_reg_write(pcf, PCF50633_REG_OOCSHDWN, 0x04);
139
140 /* We immediately read the usb and adapter status. We thus make sure
141 * only of USBINS/USBREM IRQ handlers are called */
142 if (pcf_int[0] & (PCF50633_INT1_USBINS | PCF50633_INT1_USBREM)) {
143 chgstat = pcf50633_reg_read(pcf, PCF50633_REG_MBCS2);
144 if (chgstat & (0x3 << 4))
145 pcf_int[0] &= ~PCF50633_INT1_USBREM;
146 else
147 pcf_int[0] &= ~PCF50633_INT1_USBINS;
148 }
149
150 /* Make sure only one of ADPINS or ADPREM is set */
151 if (pcf_int[0] & (PCF50633_INT1_ADPINS | PCF50633_INT1_ADPREM)) {
152 chgstat = pcf50633_reg_read(pcf, PCF50633_REG_MBCS2);
153 if (chgstat & (0x3 << 4))
154 pcf_int[0] &= ~PCF50633_INT1_ADPREM;
155 else
156 pcf_int[0] &= ~PCF50633_INT1_ADPINS;
157 }
158
159 dev_dbg(pcf->dev, "INT1=0x%02x INT2=0x%02x INT3=0x%02x "
160 "INT4=0x%02x INT5=0x%02x\n", pcf_int[0],
161 pcf_int[1], pcf_int[2], pcf_int[3], pcf_int[4]);
162
163 /* Some revisions of the chip don't have a 8s standby mode on
164 * ONKEY1S press. We try to manually do it in such cases. */
165 if ((pcf_int[0] & PCF50633_INT1_SECOND) && pcf->onkey1s_held) {
166 dev_info(pcf->dev, "ONKEY1S held for %d secs\n",
167 pcf->onkey1s_held);
168 if (pcf->onkey1s_held++ == PCF50633_ONKEY1S_TIMEOUT)
169 if (pcf->pdata->force_shutdown)
170 pcf->pdata->force_shutdown(pcf);
171 }
172
173 if (pcf_int[2] & PCF50633_INT3_ONKEY1S) {
174 dev_info(pcf->dev, "ONKEY1S held\n");
175 pcf->onkey1s_held = 1 ;
176
177 /* Unmask IRQ_SECOND */
178 pcf50633_reg_clear_bits(pcf, PCF50633_REG_INT1M,
179 PCF50633_INT1_SECOND);
180
181 /* Unmask IRQ_ONKEYR */
182 pcf50633_reg_clear_bits(pcf, PCF50633_REG_INT2M,
183 PCF50633_INT2_ONKEYR);
184 }
185
186 if ((pcf_int[1] & PCF50633_INT2_ONKEYR) && pcf->onkey1s_held) {
187 pcf->onkey1s_held = 0;
188
189 /* Mask SECOND and ONKEYR interrupts */
190 if (pcf->mask_regs[0] & PCF50633_INT1_SECOND)
191 pcf50633_reg_set_bit_mask(pcf,
192 PCF50633_REG_INT1M,
193 PCF50633_INT1_SECOND,
194 PCF50633_INT1_SECOND);
195
196 if (pcf->mask_regs[1] & PCF50633_INT2_ONKEYR)
197 pcf50633_reg_set_bit_mask(pcf,
198 PCF50633_REG_INT2M,
199 PCF50633_INT2_ONKEYR,
200 PCF50633_INT2_ONKEYR);
201 }
202
203 /* Have we just resumed ? */
204 if (pcf->is_suspended) {
205 pcf->is_suspended = 0;
206
207 /* Set the resume reason filtering out non resumers */
208 for (i = 0; i < ARRAY_SIZE(pcf_int); i++)
209 pcf->resume_reason[i] = pcf_int[i] &
210 pcf->pdata->resumers[i];
211
212 /* Make sure we don't pass on any ONKEY events to
213 * userspace now */
214 pcf_int[1] &= ~(PCF50633_INT2_ONKEYR | PCF50633_INT2_ONKEYF);
215 }
216
217 for (i = 0; i < ARRAY_SIZE(pcf_int); i++) {
218 /* Unset masked interrupts */
219 pcf_int[i] &= ~pcf->mask_regs[i];
220
221 for (j = 0; j < 8 ; j++)
222 if (pcf_int[i] & (1 << j))
223 pcf50633_irq_call_handler(pcf, (i * 8) + j);
224 }
225
226out:
227 return IRQ_HANDLED;
228}
229
230#ifdef CONFIG_PM
231
232int pcf50633_irq_suspend(struct pcf50633 *pcf)
233{
234 int ret;
235 int i;
236 u8 res[5];
237
238
239 /* Make sure our interrupt handlers are not called
240 * henceforth */
241 disable_irq(pcf->irq);
242
243 /* Save the masks */
244 ret = pcf50633_read_block(pcf, PCF50633_REG_INT1M,
245 ARRAY_SIZE(pcf->suspend_irq_masks),
246 pcf->suspend_irq_masks);
247 if (ret < 0) {
248 dev_err(pcf->dev, "error saving irq masks\n");
249 goto out;
250 }
251
252 /* Write wakeup irq masks */
253 for (i = 0; i < ARRAY_SIZE(res); i++)
254 res[i] = ~pcf->pdata->resumers[i];
255
256 ret = pcf50633_write_block(pcf, PCF50633_REG_INT1M,
257 ARRAY_SIZE(res), &res[0]);
258 if (ret < 0) {
259 dev_err(pcf->dev, "error writing wakeup irq masks\n");
260 goto out;
261 }
262
263 pcf->is_suspended = 1;
264
265out:
266 return ret;
267}
268
269int pcf50633_irq_resume(struct pcf50633 *pcf)
270{
271 int ret;
272
273 /* Write the saved mask registers */
274 ret = pcf50633_write_block(pcf, PCF50633_REG_INT1M,
275 ARRAY_SIZE(pcf->suspend_irq_masks),
276 pcf->suspend_irq_masks);
277 if (ret < 0)
278 dev_err(pcf->dev, "Error restoring saved suspend masks\n");
279
280 enable_irq(pcf->irq);
281
282 return ret;
283}
284
285#endif
286
287int pcf50633_irq_init(struct pcf50633 *pcf, int irq)
288{
289 int ret;
290
291 pcf->irq = irq;
292
293 /* Enable all interrupts except RTC SECOND */
294 pcf->mask_regs[0] = 0x80;
295 pcf50633_reg_write(pcf, PCF50633_REG_INT1M, pcf->mask_regs[0]);
296 pcf50633_reg_write(pcf, PCF50633_REG_INT2M, 0x00);
297 pcf50633_reg_write(pcf, PCF50633_REG_INT3M, 0x00);
298 pcf50633_reg_write(pcf, PCF50633_REG_INT4M, 0x00);
299 pcf50633_reg_write(pcf, PCF50633_REG_INT5M, 0x00);
300
301 ret = request_threaded_irq(irq, NULL, pcf50633_irq,
302 IRQF_TRIGGER_LOW | IRQF_ONESHOT,
303 "pcf50633", pcf);
304
305 if (ret)
306 dev_err(pcf->dev, "Failed to request IRQ %d\n", ret);
307
308 if (enable_irq_wake(irq) < 0)
309 dev_err(pcf->dev, "IRQ %u cannot be enabled as wake-up source"
310 "in this hardware revision", irq);
311
312 return ret;
313}
314
315void pcf50633_irq_free(struct pcf50633 *pcf)
316{
317 free_irq(pcf->irq, pcf);
318}
diff --git a/drivers/mfd/rdc321x-southbridge.c b/drivers/mfd/rdc321x-southbridge.c
new file mode 100644
index 000000000000..50922975bda3
--- /dev/null
+++ b/drivers/mfd/rdc321x-southbridge.c
@@ -0,0 +1,123 @@
1/*
2 * RDC321x MFD southbrige driver
3 *
4 * Copyright (C) 2007-2010 Florian Fainelli <florian@openwrt.org>
5 * Copyright (C) 2010 Bernhard Loos <bernhardloos@googlemail.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20 *
21 */
22#include <linux/init.h>
23#include <linux/module.h>
24#include <linux/kernel.h>
25#include <linux/platform_device.h>
26#include <linux/pci.h>
27#include <linux/mfd/core.h>
28#include <linux/mfd/rdc321x.h>
29
30static struct rdc321x_wdt_pdata rdc321x_wdt_pdata;
31
32static struct resource rdc321x_wdt_resource[] = {
33 {
34 .name = "wdt-reg",
35 .start = RDC321X_WDT_CTRL,
36 .end = RDC321X_WDT_CTRL + 0x3,
37 .flags = IORESOURCE_IO,
38 }
39};
40
41static struct rdc321x_gpio_pdata rdc321x_gpio_pdata = {
42 .max_gpios = RDC321X_MAX_GPIO,
43};
44
45static struct resource rdc321x_gpio_resources[] = {
46 {
47 .name = "gpio-reg1",
48 .start = RDC321X_GPIO_CTRL_REG1,
49 .end = RDC321X_GPIO_CTRL_REG1 + 0x7,
50 .flags = IORESOURCE_IO,
51 }, {
52 .name = "gpio-reg2",
53 .start = RDC321X_GPIO_CTRL_REG2,
54 .end = RDC321X_GPIO_CTRL_REG2 + 0x7,
55 .flags = IORESOURCE_IO,
56 }
57};
58
59static struct mfd_cell rdc321x_sb_cells[] = {
60 {
61 .name = "rdc321x-wdt",
62 .resources = rdc321x_wdt_resource,
63 .num_resources = ARRAY_SIZE(rdc321x_wdt_resource),
64 .driver_data = &rdc321x_wdt_pdata,
65 }, {
66 .name = "rdc321x-gpio",
67 .resources = rdc321x_gpio_resources,
68 .num_resources = ARRAY_SIZE(rdc321x_gpio_resources),
69 .driver_data = &rdc321x_gpio_pdata,
70 },
71};
72
73static int __devinit rdc321x_sb_probe(struct pci_dev *pdev,
74 const struct pci_device_id *ent)
75{
76 int err;
77
78 err = pci_enable_device(pdev);
79 if (err) {
80 dev_err(&pdev->dev, "failed to enable device\n");
81 return err;
82 }
83
84 rdc321x_gpio_pdata.sb_pdev = pdev;
85 rdc321x_wdt_pdata.sb_pdev = pdev;
86
87 return mfd_add_devices(&pdev->dev, -1,
88 rdc321x_sb_cells, ARRAY_SIZE(rdc321x_sb_cells), NULL, 0);
89}
90
91static void __devexit rdc321x_sb_remove(struct pci_dev *pdev)
92{
93 mfd_remove_devices(&pdev->dev);
94}
95
96static DEFINE_PCI_DEVICE_TABLE(rdc321x_sb_table) = {
97 { PCI_DEVICE(PCI_VENDOR_ID_RDC, PCI_DEVICE_ID_RDC_R6030) },
98 {}
99};
100
101static struct pci_driver rdc321x_sb_driver = {
102 .name = "RDC321x Southbridge",
103 .id_table = rdc321x_sb_table,
104 .probe = rdc321x_sb_probe,
105 .remove = __devexit_p(rdc321x_sb_remove),
106};
107
108static int __init rdc321x_sb_init(void)
109{
110 return pci_register_driver(&rdc321x_sb_driver);
111}
112
113static void __exit rdc321x_sb_exit(void)
114{
115 pci_unregister_driver(&rdc321x_sb_driver);
116}
117
118module_init(rdc321x_sb_init);
119module_exit(rdc321x_sb_exit);
120
121MODULE_AUTHOR("Florian Fainelli <florian@openwrt.org>");
122MODULE_LICENSE("GPL");
123MODULE_DESCRIPTION("RDC R-321x MFD southbridge driver");
diff --git a/drivers/mfd/t7l66xb.c b/drivers/mfd/t7l66xb.c
index da6383a934ac..5041d33adf0b 100644
--- a/drivers/mfd/t7l66xb.c
+++ b/drivers/mfd/t7l66xb.c
@@ -318,6 +318,9 @@ static int t7l66xb_probe(struct platform_device *dev)
318 struct resource *iomem, *rscr; 318 struct resource *iomem, *rscr;
319 int ret; 319 int ret;
320 320
321 if (pdata == NULL)
322 return -EINVAL;
323
321 iomem = platform_get_resource(dev, IORESOURCE_MEM, 0); 324 iomem = platform_get_resource(dev, IORESOURCE_MEM, 0);
322 if (!iomem) 325 if (!iomem)
323 return -EINVAL; 326 return -EINVAL;
diff --git a/drivers/mfd/tc35892.c b/drivers/mfd/tc35892.c
new file mode 100644
index 000000000000..715f095dd7a6
--- /dev/null
+++ b/drivers/mfd/tc35892.c
@@ -0,0 +1,347 @@
1/*
2 * Copyright (C) ST-Ericsson SA 2010
3 *
4 * License Terms: GNU General Public License, version 2
5 * Author: Hanumath Prasad <hanumath.prasad@stericsson.com> for ST-Ericsson
6 * Author: Rabin Vincent <rabin.vincent@stericsson.com> for ST-Ericsson
7 */
8
9#include <linux/module.h>
10#include <linux/interrupt.h>
11#include <linux/irq.h>
12#include <linux/slab.h>
13#include <linux/i2c.h>
14#include <linux/mfd/core.h>
15#include <linux/mfd/tc35892.h>
16
17/**
18 * tc35892_reg_read() - read a single TC35892 register
19 * @tc35892: Device to read from
20 * @reg: Register to read
21 */
22int tc35892_reg_read(struct tc35892 *tc35892, u8 reg)
23{
24 int ret;
25
26 ret = i2c_smbus_read_byte_data(tc35892->i2c, reg);
27 if (ret < 0)
28 dev_err(tc35892->dev, "failed to read reg %#x: %d\n",
29 reg, ret);
30
31 return ret;
32}
33EXPORT_SYMBOL_GPL(tc35892_reg_read);
34
35/**
36 * tc35892_reg_read() - write a single TC35892 register
37 * @tc35892: Device to write to
38 * @reg: Register to read
39 * @data: Value to write
40 */
41int tc35892_reg_write(struct tc35892 *tc35892, u8 reg, u8 data)
42{
43 int ret;
44
45 ret = i2c_smbus_write_byte_data(tc35892->i2c, reg, data);
46 if (ret < 0)
47 dev_err(tc35892->dev, "failed to write reg %#x: %d\n",
48 reg, ret);
49
50 return ret;
51}
52EXPORT_SYMBOL_GPL(tc35892_reg_write);
53
54/**
55 * tc35892_block_read() - read multiple TC35892 registers
56 * @tc35892: Device to read from
57 * @reg: First register
58 * @length: Number of registers
59 * @values: Buffer to write to
60 */
61int tc35892_block_read(struct tc35892 *tc35892, u8 reg, u8 length, u8 *values)
62{
63 int ret;
64
65 ret = i2c_smbus_read_i2c_block_data(tc35892->i2c, reg, length, values);
66 if (ret < 0)
67 dev_err(tc35892->dev, "failed to read regs %#x: %d\n",
68 reg, ret);
69
70 return ret;
71}
72EXPORT_SYMBOL_GPL(tc35892_block_read);
73
74/**
75 * tc35892_block_write() - write multiple TC35892 registers
76 * @tc35892: Device to write to
77 * @reg: First register
78 * @length: Number of registers
79 * @values: Values to write
80 */
81int tc35892_block_write(struct tc35892 *tc35892, u8 reg, u8 length,
82 const u8 *values)
83{
84 int ret;
85
86 ret = i2c_smbus_write_i2c_block_data(tc35892->i2c, reg, length,
87 values);
88 if (ret < 0)
89 dev_err(tc35892->dev, "failed to write regs %#x: %d\n",
90 reg, ret);
91
92 return ret;
93}
94EXPORT_SYMBOL_GPL(tc35892_block_write);
95
96/**
97 * tc35892_set_bits() - set the value of a bitfield in a TC35892 register
98 * @tc35892: Device to write to
99 * @reg: Register to write
100 * @mask: Mask of bits to set
101 * @values: Value to set
102 */
103int tc35892_set_bits(struct tc35892 *tc35892, u8 reg, u8 mask, u8 val)
104{
105 int ret;
106
107 mutex_lock(&tc35892->lock);
108
109 ret = tc35892_reg_read(tc35892, reg);
110 if (ret < 0)
111 goto out;
112
113 ret &= ~mask;
114 ret |= val;
115
116 ret = tc35892_reg_write(tc35892, reg, ret);
117
118out:
119 mutex_unlock(&tc35892->lock);
120 return ret;
121}
122EXPORT_SYMBOL_GPL(tc35892_set_bits);
123
124static struct resource gpio_resources[] = {
125 {
126 .start = TC35892_INT_GPIIRQ,
127 .end = TC35892_INT_GPIIRQ,
128 .flags = IORESOURCE_IRQ,
129 },
130};
131
132static struct mfd_cell tc35892_devs[] = {
133 {
134 .name = "tc35892-gpio",
135 .num_resources = ARRAY_SIZE(gpio_resources),
136 .resources = &gpio_resources[0],
137 },
138};
139
140static irqreturn_t tc35892_irq(int irq, void *data)
141{
142 struct tc35892 *tc35892 = data;
143 int status;
144
145 status = tc35892_reg_read(tc35892, TC35892_IRQST);
146 if (status < 0)
147 return IRQ_NONE;
148
149 while (status) {
150 int bit = __ffs(status);
151
152 handle_nested_irq(tc35892->irq_base + bit);
153 status &= ~(1 << bit);
154 }
155
156 /*
157 * A dummy read or write (to any register) appears to be necessary to
158 * have the last interrupt clear (for example, GPIO IC write) take
159 * effect.
160 */
161 tc35892_reg_read(tc35892, TC35892_IRQST);
162
163 return IRQ_HANDLED;
164}
165
166static void tc35892_irq_dummy(unsigned int irq)
167{
168 /* No mask/unmask at this level */
169}
170
171static struct irq_chip tc35892_irq_chip = {
172 .name = "tc35892",
173 .mask = tc35892_irq_dummy,
174 .unmask = tc35892_irq_dummy,
175};
176
177static int tc35892_irq_init(struct tc35892 *tc35892)
178{
179 int base = tc35892->irq_base;
180 int irq;
181
182 for (irq = base; irq < base + TC35892_NR_INTERNAL_IRQS; irq++) {
183 set_irq_chip_data(irq, tc35892);
184 set_irq_chip_and_handler(irq, &tc35892_irq_chip,
185 handle_edge_irq);
186 set_irq_nested_thread(irq, 1);
187#ifdef CONFIG_ARM
188 set_irq_flags(irq, IRQF_VALID);
189#else
190 set_irq_noprobe(irq);
191#endif
192 }
193
194 return 0;
195}
196
197static void tc35892_irq_remove(struct tc35892 *tc35892)
198{
199 int base = tc35892->irq_base;
200 int irq;
201
202 for (irq = base; irq < base + TC35892_NR_INTERNAL_IRQS; irq++) {
203#ifdef CONFIG_ARM
204 set_irq_flags(irq, 0);
205#endif
206 set_irq_chip_and_handler(irq, NULL, NULL);
207 set_irq_chip_data(irq, NULL);
208 }
209}
210
211static int tc35892_chip_init(struct tc35892 *tc35892)
212{
213 int manf, ver, ret;
214
215 manf = tc35892_reg_read(tc35892, TC35892_MANFCODE);
216 if (manf < 0)
217 return manf;
218
219 ver = tc35892_reg_read(tc35892, TC35892_VERSION);
220 if (ver < 0)
221 return ver;
222
223 if (manf != TC35892_MANFCODE_MAGIC) {
224 dev_err(tc35892->dev, "unknown manufacturer: %#x\n", manf);
225 return -EINVAL;
226 }
227
228 dev_info(tc35892->dev, "manufacturer: %#x, version: %#x\n", manf, ver);
229
230 /* Put everything except the IRQ module into reset */
231 ret = tc35892_reg_write(tc35892, TC35892_RSTCTRL,
232 TC35892_RSTCTRL_TIMRST
233 | TC35892_RSTCTRL_ROTRST
234 | TC35892_RSTCTRL_KBDRST
235 | TC35892_RSTCTRL_GPIRST);
236 if (ret < 0)
237 return ret;
238
239 /* Clear the reset interrupt. */
240 return tc35892_reg_write(tc35892, TC35892_RSTINTCLR, 0x1);
241}
242
243static int __devinit tc35892_probe(struct i2c_client *i2c,
244 const struct i2c_device_id *id)
245{
246 struct tc35892_platform_data *pdata = i2c->dev.platform_data;
247 struct tc35892 *tc35892;
248 int ret;
249
250 if (!i2c_check_functionality(i2c->adapter, I2C_FUNC_SMBUS_BYTE_DATA
251 | I2C_FUNC_SMBUS_I2C_BLOCK))
252 return -EIO;
253
254 tc35892 = kzalloc(sizeof(struct tc35892), GFP_KERNEL);
255 if (!tc35892)
256 return -ENOMEM;
257
258 mutex_init(&tc35892->lock);
259
260 tc35892->dev = &i2c->dev;
261 tc35892->i2c = i2c;
262 tc35892->pdata = pdata;
263 tc35892->irq_base = pdata->irq_base;
264 tc35892->num_gpio = id->driver_data;
265
266 i2c_set_clientdata(i2c, tc35892);
267
268 ret = tc35892_chip_init(tc35892);
269 if (ret)
270 goto out_free;
271
272 ret = tc35892_irq_init(tc35892);
273 if (ret)
274 goto out_free;
275
276 ret = request_threaded_irq(tc35892->i2c->irq, NULL, tc35892_irq,
277 IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
278 "tc35892", tc35892);
279 if (ret) {
280 dev_err(tc35892->dev, "failed to request IRQ: %d\n", ret);
281 goto out_removeirq;
282 }
283
284 ret = mfd_add_devices(tc35892->dev, -1, tc35892_devs,
285 ARRAY_SIZE(tc35892_devs), NULL,
286 tc35892->irq_base);
287 if (ret) {
288 dev_err(tc35892->dev, "failed to add children\n");
289 goto out_freeirq;
290 }
291
292 return 0;
293
294out_freeirq:
295 free_irq(tc35892->i2c->irq, tc35892);
296out_removeirq:
297 tc35892_irq_remove(tc35892);
298out_free:
299 i2c_set_clientdata(i2c, NULL);
300 kfree(tc35892);
301 return ret;
302}
303
304static int __devexit tc35892_remove(struct i2c_client *client)
305{
306 struct tc35892 *tc35892 = i2c_get_clientdata(client);
307
308 mfd_remove_devices(tc35892->dev);
309
310 free_irq(tc35892->i2c->irq, tc35892);
311 tc35892_irq_remove(tc35892);
312
313 i2c_set_clientdata(client, NULL);
314 kfree(tc35892);
315
316 return 0;
317}
318
319static const struct i2c_device_id tc35892_id[] = {
320 { "tc35892", 24 },
321 { }
322};
323MODULE_DEVICE_TABLE(i2c, tc35892_id);
324
325static struct i2c_driver tc35892_driver = {
326 .driver.name = "tc35892",
327 .driver.owner = THIS_MODULE,
328 .probe = tc35892_probe,
329 .remove = __devexit_p(tc35892_remove),
330 .id_table = tc35892_id,
331};
332
333static int __init tc35892_init(void)
334{
335 return i2c_add_driver(&tc35892_driver);
336}
337subsys_initcall(tc35892_init);
338
339static void __exit tc35892_exit(void)
340{
341 i2c_del_driver(&tc35892_driver);
342}
343module_exit(tc35892_exit);
344
345MODULE_LICENSE("GPL v2");
346MODULE_DESCRIPTION("TC35892 MFD core driver");
347MODULE_AUTHOR("Hanumath Prasad, Rabin Vincent");
diff --git a/drivers/mfd/timberdale.c b/drivers/mfd/timberdale.c
index 7f478ec4184b..ac5995026c88 100644
--- a/drivers/mfd/timberdale.c
+++ b/drivers/mfd/timberdale.c
@@ -31,6 +31,7 @@
31 31
32#include <linux/i2c.h> 32#include <linux/i2c.h>
33#include <linux/i2c-ocores.h> 33#include <linux/i2c-ocores.h>
34#include <linux/i2c-xiic.h>
34#include <linux/i2c/tsc2007.h> 35#include <linux/i2c/tsc2007.h>
35 36
36#include <linux/spi/spi.h> 37#include <linux/spi/spi.h>
@@ -40,6 +41,8 @@
40 41
41#include <media/timb_radio.h> 42#include <media/timb_radio.h>
42 43
44#include <linux/timb_dma.h>
45
43#include "timberdale.h" 46#include "timberdale.h"
44 47
45#define DRIVER_NAME "timberdale" 48#define DRIVER_NAME "timberdale"
@@ -69,6 +72,12 @@ static struct i2c_board_info timberdale_i2c_board_info[] = {
69 }, 72 },
70}; 73};
71 74
75static __devinitdata struct xiic_i2c_platform_data
76timberdale_xiic_platform_data = {
77 .devices = timberdale_i2c_board_info,
78 .num_devices = ARRAY_SIZE(timberdale_i2c_board_info)
79};
80
72static __devinitdata struct ocores_i2c_platform_data 81static __devinitdata struct ocores_i2c_platform_data
73timberdale_ocores_platform_data = { 82timberdale_ocores_platform_data = {
74 .regstep = 4, 83 .regstep = 4,
@@ -77,7 +86,20 @@ timberdale_ocores_platform_data = {
77 .num_devices = ARRAY_SIZE(timberdale_i2c_board_info) 86 .num_devices = ARRAY_SIZE(timberdale_i2c_board_info)
78}; 87};
79 88
80const static __devinitconst struct resource timberdale_ocores_resources[] = { 89static const __devinitconst struct resource timberdale_xiic_resources[] = {
90 {
91 .start = XIICOFFSET,
92 .end = XIICEND,
93 .flags = IORESOURCE_MEM,
94 },
95 {
96 .start = IRQ_TIMBERDALE_I2C,
97 .end = IRQ_TIMBERDALE_I2C,
98 .flags = IORESOURCE_IRQ,
99 },
100};
101
102static const __devinitconst struct resource timberdale_ocores_resources[] = {
81 { 103 {
82 .start = OCORESOFFSET, 104 .start = OCORESOFFSET,
83 .end = OCORESEND, 105 .end = OCORESEND,
@@ -126,7 +148,7 @@ static __devinitdata struct xspi_platform_data timberdale_xspi_platform_data = {
126 */ 148 */
127}; 149};
128 150
129const static __devinitconst struct resource timberdale_spi_resources[] = { 151static const __devinitconst struct resource timberdale_spi_resources[] = {
130 { 152 {
131 .start = SPIOFFSET, 153 .start = SPIOFFSET,
132 .end = SPIEND, 154 .end = SPIEND,
@@ -139,7 +161,7 @@ const static __devinitconst struct resource timberdale_spi_resources[] = {
139 }, 161 },
140}; 162};
141 163
142const static __devinitconst struct resource timberdale_eth_resources[] = { 164static const __devinitconst struct resource timberdale_eth_resources[] = {
143 { 165 {
144 .start = ETHOFFSET, 166 .start = ETHOFFSET,
145 .end = ETHEND, 167 .end = ETHEND,
@@ -159,7 +181,7 @@ static __devinitdata struct timbgpio_platform_data
159 .irq_base = 200, 181 .irq_base = 200,
160}; 182};
161 183
162const static __devinitconst struct resource timberdale_gpio_resources[] = { 184static const __devinitconst struct resource timberdale_gpio_resources[] = {
163 { 185 {
164 .start = GPIOOFFSET, 186 .start = GPIOOFFSET,
165 .end = GPIOEND, 187 .end = GPIOEND,
@@ -172,7 +194,7 @@ const static __devinitconst struct resource timberdale_gpio_resources[] = {
172 }, 194 },
173}; 195};
174 196
175const static __devinitconst struct resource timberdale_mlogicore_resources[] = { 197static const __devinitconst struct resource timberdale_mlogicore_resources[] = {
176 { 198 {
177 .start = MLCOREOFFSET, 199 .start = MLCOREOFFSET,
178 .end = MLCOREEND, 200 .end = MLCOREEND,
@@ -190,7 +212,7 @@ const static __devinitconst struct resource timberdale_mlogicore_resources[] = {
190 }, 212 },
191}; 213};
192 214
193const static __devinitconst struct resource timberdale_uart_resources[] = { 215static const __devinitconst struct resource timberdale_uart_resources[] = {
194 { 216 {
195 .start = UARTOFFSET, 217 .start = UARTOFFSET,
196 .end = UARTEND, 218 .end = UARTEND,
@@ -203,7 +225,7 @@ const static __devinitconst struct resource timberdale_uart_resources[] = {
203 }, 225 },
204}; 226};
205 227
206const static __devinitconst struct resource timberdale_uartlite_resources[] = { 228static const __devinitconst struct resource timberdale_uartlite_resources[] = {
207 { 229 {
208 .start = UARTLITEOFFSET, 230 .start = UARTLITEOFFSET,
209 .end = UARTLITEEND, 231 .end = UARTLITEEND,
@@ -216,7 +238,7 @@ const static __devinitconst struct resource timberdale_uartlite_resources[] = {
216 }, 238 },
217}; 239};
218 240
219const static __devinitconst struct resource timberdale_radio_resources[] = { 241static const __devinitconst struct resource timberdale_radio_resources[] = {
220 { 242 {
221 .start = RDSOFFSET, 243 .start = RDSOFFSET,
222 .end = RDSEND, 244 .end = RDSEND,
@@ -250,7 +272,66 @@ static __devinitdata struct timb_radio_platform_data
250 } 272 }
251}; 273};
252 274
253const static __devinitconst struct resource timberdale_dma_resources[] = { 275static __devinitdata struct timb_dma_platform_data timb_dma_platform_data = {
276 .nr_channels = 10,
277 .channels = {
278 {
279 /* UART RX */
280 .rx = true,
281 .descriptors = 2,
282 .descriptor_elements = 1
283 },
284 {
285 /* UART TX */
286 .rx = false,
287 .descriptors = 2,
288 .descriptor_elements = 1
289 },
290 {
291 /* MLB RX */
292 .rx = true,
293 .descriptors = 2,
294 .descriptor_elements = 1
295 },
296 {
297 /* MLB TX */
298 .rx = false,
299 .descriptors = 2,
300 .descriptor_elements = 1
301 },
302 {
303 /* Video RX */
304 .rx = true,
305 .bytes_per_line = 1440,
306 .descriptors = 2,
307 .descriptor_elements = 16
308 },
309 {
310 /* Video framedrop */
311 },
312 {
313 /* SDHCI RX */
314 .rx = true,
315 },
316 {
317 /* SDHCI TX */
318 },
319 {
320 /* ETH RX */
321 .rx = true,
322 .descriptors = 2,
323 .descriptor_elements = 1
324 },
325 {
326 /* ETH TX */
327 .rx = false,
328 .descriptors = 2,
329 .descriptor_elements = 1
330 },
331 }
332};
333
334static const __devinitconst struct resource timberdale_dma_resources[] = {
254 { 335 {
255 .start = DMAOFFSET, 336 .start = DMAOFFSET,
256 .end = DMAEND, 337 .end = DMAEND,
@@ -265,11 +346,25 @@ const static __devinitconst struct resource timberdale_dma_resources[] = {
265 346
266static __devinitdata struct mfd_cell timberdale_cells_bar0_cfg0[] = { 347static __devinitdata struct mfd_cell timberdale_cells_bar0_cfg0[] = {
267 { 348 {
349 .name = "timb-dma",
350 .num_resources = ARRAY_SIZE(timberdale_dma_resources),
351 .resources = timberdale_dma_resources,
352 .platform_data = &timb_dma_platform_data,
353 .data_size = sizeof(timb_dma_platform_data),
354 },
355 {
268 .name = "timb-uart", 356 .name = "timb-uart",
269 .num_resources = ARRAY_SIZE(timberdale_uart_resources), 357 .num_resources = ARRAY_SIZE(timberdale_uart_resources),
270 .resources = timberdale_uart_resources, 358 .resources = timberdale_uart_resources,
271 }, 359 },
272 { 360 {
361 .name = "xiic-i2c",
362 .num_resources = ARRAY_SIZE(timberdale_xiic_resources),
363 .resources = timberdale_xiic_resources,
364 .platform_data = &timberdale_xiic_platform_data,
365 .data_size = sizeof(timberdale_xiic_platform_data),
366 },
367 {
273 .name = "timb-gpio", 368 .name = "timb-gpio",
274 .num_resources = ARRAY_SIZE(timberdale_gpio_resources), 369 .num_resources = ARRAY_SIZE(timberdale_gpio_resources),
275 .resources = timberdale_gpio_resources, 370 .resources = timberdale_gpio_resources,
@@ -295,14 +390,16 @@ static __devinitdata struct mfd_cell timberdale_cells_bar0_cfg0[] = {
295 .num_resources = ARRAY_SIZE(timberdale_eth_resources), 390 .num_resources = ARRAY_SIZE(timberdale_eth_resources),
296 .resources = timberdale_eth_resources, 391 .resources = timberdale_eth_resources,
297 }, 392 },
393};
394
395static __devinitdata struct mfd_cell timberdale_cells_bar0_cfg1[] = {
298 { 396 {
299 .name = "timb-dma", 397 .name = "timb-dma",
300 .num_resources = ARRAY_SIZE(timberdale_dma_resources), 398 .num_resources = ARRAY_SIZE(timberdale_dma_resources),
301 .resources = timberdale_dma_resources, 399 .resources = timberdale_dma_resources,
400 .platform_data = &timb_dma_platform_data,
401 .data_size = sizeof(timb_dma_platform_data),
302 }, 402 },
303};
304
305static __devinitdata struct mfd_cell timberdale_cells_bar0_cfg1[] = {
306 { 403 {
307 .name = "timb-uart", 404 .name = "timb-uart",
308 .num_resources = ARRAY_SIZE(timberdale_uart_resources), 405 .num_resources = ARRAY_SIZE(timberdale_uart_resources),
@@ -314,6 +411,13 @@ static __devinitdata struct mfd_cell timberdale_cells_bar0_cfg1[] = {
314 .resources = timberdale_uartlite_resources, 411 .resources = timberdale_uartlite_resources,
315 }, 412 },
316 { 413 {
414 .name = "xiic-i2c",
415 .num_resources = ARRAY_SIZE(timberdale_xiic_resources),
416 .resources = timberdale_xiic_resources,
417 .platform_data = &timberdale_xiic_platform_data,
418 .data_size = sizeof(timberdale_xiic_platform_data),
419 },
420 {
317 .name = "timb-gpio", 421 .name = "timb-gpio",
318 .num_resources = ARRAY_SIZE(timberdale_gpio_resources), 422 .num_resources = ARRAY_SIZE(timberdale_gpio_resources),
319 .resources = timberdale_gpio_resources, 423 .resources = timberdale_gpio_resources,
@@ -344,20 +448,29 @@ static __devinitdata struct mfd_cell timberdale_cells_bar0_cfg1[] = {
344 .num_resources = ARRAY_SIZE(timberdale_eth_resources), 448 .num_resources = ARRAY_SIZE(timberdale_eth_resources),
345 .resources = timberdale_eth_resources, 449 .resources = timberdale_eth_resources,
346 }, 450 },
451};
452
453static __devinitdata struct mfd_cell timberdale_cells_bar0_cfg2[] = {
347 { 454 {
348 .name = "timb-dma", 455 .name = "timb-dma",
349 .num_resources = ARRAY_SIZE(timberdale_dma_resources), 456 .num_resources = ARRAY_SIZE(timberdale_dma_resources),
350 .resources = timberdale_dma_resources, 457 .resources = timberdale_dma_resources,
458 .platform_data = &timb_dma_platform_data,
459 .data_size = sizeof(timb_dma_platform_data),
351 }, 460 },
352};
353
354static __devinitdata struct mfd_cell timberdale_cells_bar0_cfg2[] = {
355 { 461 {
356 .name = "timb-uart", 462 .name = "timb-uart",
357 .num_resources = ARRAY_SIZE(timberdale_uart_resources), 463 .num_resources = ARRAY_SIZE(timberdale_uart_resources),
358 .resources = timberdale_uart_resources, 464 .resources = timberdale_uart_resources,
359 }, 465 },
360 { 466 {
467 .name = "xiic-i2c",
468 .num_resources = ARRAY_SIZE(timberdale_xiic_resources),
469 .resources = timberdale_xiic_resources,
470 .platform_data = &timberdale_xiic_platform_data,
471 .data_size = sizeof(timberdale_xiic_platform_data),
472 },
473 {
361 .name = "timb-gpio", 474 .name = "timb-gpio",
362 .num_resources = ARRAY_SIZE(timberdale_gpio_resources), 475 .num_resources = ARRAY_SIZE(timberdale_gpio_resources),
363 .resources = timberdale_gpio_resources, 476 .resources = timberdale_gpio_resources,
@@ -378,14 +491,16 @@ static __devinitdata struct mfd_cell timberdale_cells_bar0_cfg2[] = {
378 .platform_data = &timberdale_xspi_platform_data, 491 .platform_data = &timberdale_xspi_platform_data,
379 .data_size = sizeof(timberdale_xspi_platform_data), 492 .data_size = sizeof(timberdale_xspi_platform_data),
380 }, 493 },
494};
495
496static __devinitdata struct mfd_cell timberdale_cells_bar0_cfg3[] = {
381 { 497 {
382 .name = "timb-dma", 498 .name = "timb-dma",
383 .num_resources = ARRAY_SIZE(timberdale_dma_resources), 499 .num_resources = ARRAY_SIZE(timberdale_dma_resources),
384 .resources = timberdale_dma_resources, 500 .resources = timberdale_dma_resources,
501 .platform_data = &timb_dma_platform_data,
502 .data_size = sizeof(timb_dma_platform_data),
385 }, 503 },
386};
387
388static __devinitdata struct mfd_cell timberdale_cells_bar0_cfg3[] = {
389 { 504 {
390 .name = "timb-uart", 505 .name = "timb-uart",
391 .num_resources = ARRAY_SIZE(timberdale_uart_resources), 506 .num_resources = ARRAY_SIZE(timberdale_uart_resources),
@@ -424,11 +539,6 @@ static __devinitdata struct mfd_cell timberdale_cells_bar0_cfg3[] = {
424 .num_resources = ARRAY_SIZE(timberdale_eth_resources), 539 .num_resources = ARRAY_SIZE(timberdale_eth_resources),
425 .resources = timberdale_eth_resources, 540 .resources = timberdale_eth_resources,
426 }, 541 },
427 {
428 .name = "timb-dma",
429 .num_resources = ARRAY_SIZE(timberdale_dma_resources),
430 .resources = timberdale_dma_resources,
431 },
432}; 542};
433 543
434static const __devinitconst struct resource timberdale_sdhc_resources[] = { 544static const __devinitconst struct resource timberdale_sdhc_resources[] = {
diff --git a/drivers/mfd/timberdale.h b/drivers/mfd/timberdale.h
index 8d27ffabc25d..c11bf6ebfe00 100644
--- a/drivers/mfd/timberdale.h
+++ b/drivers/mfd/timberdale.h
@@ -23,7 +23,7 @@
23#ifndef MFD_TIMBERDALE_H 23#ifndef MFD_TIMBERDALE_H
24#define MFD_TIMBERDALE_H 24#define MFD_TIMBERDALE_H
25 25
26#define DRV_VERSION "0.1" 26#define DRV_VERSION "0.2"
27 27
28/* This driver only support versions >= 3.8 and < 4.0 */ 28/* This driver only support versions >= 3.8 and < 4.0 */
29#define TIMB_SUPPORTED_MAJOR 3 29#define TIMB_SUPPORTED_MAJOR 3
@@ -66,7 +66,7 @@
66 66
67#define CHIPCTLOFFSET 0x800 67#define CHIPCTLOFFSET 0x800
68#define CHIPCTLEND 0x8ff 68#define CHIPCTLEND 0x8ff
69#define CHIPCTLSIZE (CHIPCTLEND - CHIPCTLOFFSET) 69#define CHIPCTLSIZE (CHIPCTLEND - CHIPCTLOFFSET + 1)
70 70
71#define INTCOFFSET 0xc00 71#define INTCOFFSET 0xc00
72#define INTCEND 0xfff 72#define INTCEND 0xfff
@@ -127,4 +127,16 @@
127#define GPIO_PIN_BT_RST 15 127#define GPIO_PIN_BT_RST 15
128#define GPIO_NR_PINS 16 128#define GPIO_NR_PINS 16
129 129
130/* DMA Channels */
131#define DMA_UART_RX 0
132#define DMA_UART_TX 1
133#define DMA_MLB_RX 2
134#define DMA_MLB_TX 3
135#define DMA_VIDEO_RX 4
136#define DMA_VIDEO_DROP 5
137#define DMA_SDHCI_RX 6
138#define DMA_SDHCI_TX 7
139#define DMA_ETH_RX 8
140#define DMA_ETH_TX 9
141
130#endif 142#endif
diff --git a/drivers/mfd/tps65010.c b/drivers/mfd/tps65010.c
index e5955306c2fa..9b22a77f70f5 100644
--- a/drivers/mfd/tps65010.c
+++ b/drivers/mfd/tps65010.c
@@ -530,8 +530,8 @@ static int __exit tps65010_remove(struct i2c_client *client)
530 cancel_delayed_work(&tps->work); 530 cancel_delayed_work(&tps->work);
531 flush_scheduled_work(); 531 flush_scheduled_work();
532 debugfs_remove(tps->file); 532 debugfs_remove(tps->file);
533 kfree(tps);
534 i2c_set_clientdata(client, NULL); 533 i2c_set_clientdata(client, NULL);
534 kfree(tps);
535 the_tps = NULL; 535 the_tps = NULL;
536 return 0; 536 return 0;
537} 537}
diff --git a/drivers/mfd/tps6507x.c b/drivers/mfd/tps6507x.c
new file mode 100644
index 000000000000..d859dffed39f
--- /dev/null
+++ b/drivers/mfd/tps6507x.c
@@ -0,0 +1,159 @@
1/*
2 * tps6507x.c -- TPS6507x chip family multi-function driver
3 *
4 * Copyright (c) 2010 RidgeRun (todd.fischer@ridgerun.com)
5 *
6 * Author: Todd Fischer
7 * todd.fischer@ridgerun.com
8 *
9 * Credits:
10 *
11 * Using code from wm831x-*.c, wm8400-core, Wolfson Microelectronics PLC.
12 *
13 * For licencing details see kernel-base/COPYING
14 *
15 */
16
17#include <linux/module.h>
18#include <linux/moduleparam.h>
19#include <linux/init.h>
20#include <linux/slab.h>
21#include <linux/i2c.h>
22#include <linux/mfd/core.h>
23#include <linux/mfd/tps6507x.h>
24
25static struct mfd_cell tps6507x_devs[] = {
26 {
27 .name = "tps6507x-pmic",
28 },
29 {
30 .name = "tps6507x-ts",
31 },
32};
33
34
35static int tps6507x_i2c_read_device(struct tps6507x_dev *tps6507x, char reg,
36 int bytes, void *dest)
37{
38 struct i2c_client *i2c = tps6507x->i2c_client;
39 struct i2c_msg xfer[2];
40 int ret;
41
42 /* Write register */
43 xfer[0].addr = i2c->addr;
44 xfer[0].flags = 0;
45 xfer[0].len = 1;
46 xfer[0].buf = &reg;
47
48 /* Read data */
49 xfer[1].addr = i2c->addr;
50 xfer[1].flags = I2C_M_RD;
51 xfer[1].len = bytes;
52 xfer[1].buf = dest;
53
54 ret = i2c_transfer(i2c->adapter, xfer, 2);
55 if (ret == 2)
56 ret = 0;
57 else if (ret >= 0)
58 ret = -EIO;
59
60 return ret;
61}
62
63static int tps6507x_i2c_write_device(struct tps6507x_dev *tps6507x, char reg,
64 int bytes, void *src)
65{
66 struct i2c_client *i2c = tps6507x->i2c_client;
67 /* we add 1 byte for device register */
68 u8 msg[TPS6507X_MAX_REGISTER + 1];
69 int ret;
70
71 if (bytes > (TPS6507X_MAX_REGISTER + 1))
72 return -EINVAL;
73
74 msg[0] = reg;
75 memcpy(&msg[1], src, bytes);
76
77 ret = i2c_master_send(i2c, msg, bytes + 1);
78 if (ret < 0)
79 return ret;
80 if (ret != bytes + 1)
81 return -EIO;
82 return 0;
83}
84
85static int tps6507x_i2c_probe(struct i2c_client *i2c,
86 const struct i2c_device_id *id)
87{
88 struct tps6507x_dev *tps6507x;
89 int ret = 0;
90
91 tps6507x = kzalloc(sizeof(struct tps6507x_dev), GFP_KERNEL);
92 if (tps6507x == NULL) {
93 kfree(i2c);
94 return -ENOMEM;
95 }
96
97 i2c_set_clientdata(i2c, tps6507x);
98 tps6507x->dev = &i2c->dev;
99 tps6507x->i2c_client = i2c;
100 tps6507x->read_dev = tps6507x_i2c_read_device;
101 tps6507x->write_dev = tps6507x_i2c_write_device;
102
103 ret = mfd_add_devices(tps6507x->dev, -1,
104 tps6507x_devs, ARRAY_SIZE(tps6507x_devs),
105 NULL, 0);
106
107 if (ret < 0)
108 goto err;
109
110 return ret;
111
112err:
113 mfd_remove_devices(tps6507x->dev);
114 kfree(tps6507x);
115 return ret;
116}
117
118static int tps6507x_i2c_remove(struct i2c_client *i2c)
119{
120 struct tps6507x_dev *tps6507x = i2c_get_clientdata(i2c);
121
122 mfd_remove_devices(tps6507x->dev);
123 kfree(tps6507x);
124
125 return 0;
126}
127
128static const struct i2c_device_id tps6507x_i2c_id[] = {
129 { "tps6507x", 0 },
130 { }
131};
132MODULE_DEVICE_TABLE(i2c, tps6507x_i2c_id);
133
134
135static struct i2c_driver tps6507x_i2c_driver = {
136 .driver = {
137 .name = "tps6507x",
138 .owner = THIS_MODULE,
139 },
140 .probe = tps6507x_i2c_probe,
141 .remove = tps6507x_i2c_remove,
142 .id_table = tps6507x_i2c_id,
143};
144
145static int __init tps6507x_i2c_init(void)
146{
147 return i2c_add_driver(&tps6507x_i2c_driver);
148}
149/* init early so consumer devices can complete system boot */
150subsys_initcall(tps6507x_i2c_init);
151
152static void __exit tps6507x_i2c_exit(void)
153{
154 i2c_del_driver(&tps6507x_i2c_driver);
155}
156module_exit(tps6507x_i2c_exit);
157
158MODULE_DESCRIPTION("TPS6507x chip family multi-function driver");
159MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/twl4030-irq.c b/drivers/mfd/twl4030-irq.c
index 202bdd59632d..097f24d8bceb 100644
--- a/drivers/mfd/twl4030-irq.c
+++ b/drivers/mfd/twl4030-irq.c
@@ -232,10 +232,11 @@ static const struct sih sih_modules_twl5031[8] = {
232 }, 232 },
233 [6] = { 233 [6] = {
234 /* 234 /*
235 * ACI doesn't use the same SIH organization. 235 * ECI/DBI doesn't use the same SIH organization.
236 * For example, it supports only one interrupt line 236 * For example, it supports only one interrupt output line.
237 * That is, the interrupts are seen on both INT1 and INT2 lines.
237 */ 238 */
238 .name = "aci", 239 .name = "eci_dbi",
239 .module = TWL5031_MODULE_ACCESSORY, 240 .module = TWL5031_MODULE_ACCESSORY,
240 .bits = 9, 241 .bits = 9,
241 .bytes_ixr = 2, 242 .bytes_ixr = 2,
@@ -247,8 +248,8 @@ static const struct sih sih_modules_twl5031[8] = {
247 248
248 }, 249 },
249 [7] = { 250 [7] = {
250 /* Accessory */ 251 /* Audio accessory */
251 .name = "acc", 252 .name = "audio",
252 .module = TWL5031_MODULE_ACCESSORY, 253 .module = TWL5031_MODULE_ACCESSORY,
253 .control_offset = TWL5031_ACCSIHCTRL, 254 .control_offset = TWL5031_ACCSIHCTRL,
254 .bits = 2, 255 .bits = 2,
diff --git a/drivers/mfd/wm831x-core.c b/drivers/mfd/wm831x-core.c
index f2ab025ad97a..1a968f34d679 100644
--- a/drivers/mfd/wm831x-core.c
+++ b/drivers/mfd/wm831x-core.c
@@ -322,7 +322,11 @@ EXPORT_SYMBOL_GPL(wm831x_set_bits);
322 */ 322 */
323int wm831x_auxadc_read(struct wm831x *wm831x, enum wm831x_auxadc input) 323int wm831x_auxadc_read(struct wm831x *wm831x, enum wm831x_auxadc input)
324{ 324{
325 int ret, src; 325 int ret, src, irq_masked, timeout;
326
327 /* Are we using the interrupt? */
328 irq_masked = wm831x_reg_read(wm831x, WM831X_INTERRUPT_STATUS_1_MASK);
329 irq_masked &= WM831X_AUXADC_DATA_EINT;
326 330
327 mutex_lock(&wm831x->auxadc_lock); 331 mutex_lock(&wm831x->auxadc_lock);
328 332
@@ -342,6 +346,9 @@ int wm831x_auxadc_read(struct wm831x *wm831x, enum wm831x_auxadc input)
342 goto out; 346 goto out;
343 } 347 }
344 348
349 /* Clear any notification from a very late arriving interrupt */
350 try_wait_for_completion(&wm831x->auxadc_done);
351
345 ret = wm831x_set_bits(wm831x, WM831X_AUXADC_CONTROL, 352 ret = wm831x_set_bits(wm831x, WM831X_AUXADC_CONTROL,
346 WM831X_AUX_CVT_ENA, WM831X_AUX_CVT_ENA); 353 WM831X_AUX_CVT_ENA, WM831X_AUX_CVT_ENA);
347 if (ret < 0) { 354 if (ret < 0) {
@@ -349,22 +356,46 @@ int wm831x_auxadc_read(struct wm831x *wm831x, enum wm831x_auxadc input)
349 goto disable; 356 goto disable;
350 } 357 }
351 358
352 /* If an interrupt arrived late clean up after it */ 359 if (irq_masked) {
353 try_wait_for_completion(&wm831x->auxadc_done); 360 /* If we're not using interrupts then poll the
354 361 * interrupt status register */
355 /* Ignore the result to allow us to soldier on without IRQ hookup */ 362 timeout = 5;
356 wait_for_completion_timeout(&wm831x->auxadc_done, msecs_to_jiffies(5)); 363 while (timeout) {
357 364 msleep(1);
358 ret = wm831x_reg_read(wm831x, WM831X_AUXADC_CONTROL); 365
359 if (ret < 0) { 366 ret = wm831x_reg_read(wm831x,
360 dev_err(wm831x->dev, "AUXADC status read failed: %d\n", ret); 367 WM831X_INTERRUPT_STATUS_1);
361 goto disable; 368 if (ret < 0) {
362 } 369 dev_err(wm831x->dev,
363 370 "ISR 1 read failed: %d\n", ret);
364 if (ret & WM831X_AUX_CVT_ENA) { 371 goto disable;
365 dev_err(wm831x->dev, "Timed out reading AUXADC\n"); 372 }
366 ret = -EBUSY; 373
367 goto disable; 374 /* Did it complete? */
375 if (ret & WM831X_AUXADC_DATA_EINT) {
376 wm831x_reg_write(wm831x,
377 WM831X_INTERRUPT_STATUS_1,
378 WM831X_AUXADC_DATA_EINT);
379 break;
380 } else {
381 dev_err(wm831x->dev,
382 "AUXADC conversion timeout\n");
383 ret = -EBUSY;
384 goto disable;
385 }
386 }
387 } else {
388 /* If we are using interrupts then wait for the
389 * interrupt to complete. Use an extremely long
390 * timeout to handle situations with heavy load where
391 * the notification of the interrupt may be delayed by
392 * threaded IRQ handling. */
393 if (!wait_for_completion_timeout(&wm831x->auxadc_done,
394 msecs_to_jiffies(500))) {
395 dev_err(wm831x->dev, "Timed out waiting for AUXADC\n");
396 ret = -EBUSY;
397 goto disable;
398 }
368 } 399 }
369 400
370 ret = wm831x_reg_read(wm831x, WM831X_AUXADC_DATA); 401 ret = wm831x_reg_read(wm831x, WM831X_AUXADC_DATA);
@@ -1463,6 +1494,7 @@ static int wm831x_device_init(struct wm831x *wm831x, unsigned long id, int irq)
1463 case WM8310: 1494 case WM8310:
1464 parent = WM8310; 1495 parent = WM8310;
1465 wm831x->num_gpio = 16; 1496 wm831x->num_gpio = 16;
1497 wm831x->charger_irq_wake = 1;
1466 if (rev > 0) { 1498 if (rev > 0) {
1467 wm831x->has_gpio_ena = 1; 1499 wm831x->has_gpio_ena = 1;
1468 wm831x->has_cs_sts = 1; 1500 wm831x->has_cs_sts = 1;
@@ -1474,6 +1506,7 @@ static int wm831x_device_init(struct wm831x *wm831x, unsigned long id, int irq)
1474 case WM8311: 1506 case WM8311:
1475 parent = WM8311; 1507 parent = WM8311;
1476 wm831x->num_gpio = 16; 1508 wm831x->num_gpio = 16;
1509 wm831x->charger_irq_wake = 1;
1477 if (rev > 0) { 1510 if (rev > 0) {
1478 wm831x->has_gpio_ena = 1; 1511 wm831x->has_gpio_ena = 1;
1479 wm831x->has_cs_sts = 1; 1512 wm831x->has_cs_sts = 1;
@@ -1485,6 +1518,7 @@ static int wm831x_device_init(struct wm831x *wm831x, unsigned long id, int irq)
1485 case WM8312: 1518 case WM8312:
1486 parent = WM8312; 1519 parent = WM8312;
1487 wm831x->num_gpio = 16; 1520 wm831x->num_gpio = 16;
1521 wm831x->charger_irq_wake = 1;
1488 if (rev > 0) { 1522 if (rev > 0) {
1489 wm831x->has_gpio_ena = 1; 1523 wm831x->has_gpio_ena = 1;
1490 wm831x->has_cs_sts = 1; 1524 wm831x->has_cs_sts = 1;
@@ -1623,6 +1657,42 @@ static void wm831x_device_exit(struct wm831x *wm831x)
1623 kfree(wm831x); 1657 kfree(wm831x);
1624} 1658}
1625 1659
1660static int wm831x_device_suspend(struct wm831x *wm831x)
1661{
1662 int reg, mask;
1663
1664 /* If the charger IRQs are a wake source then make sure we ack
1665 * them even if they're not actively being used (eg, no power
1666 * driver or no IRQ line wired up) then acknowledge the
1667 * interrupts otherwise suspend won't last very long.
1668 */
1669 if (wm831x->charger_irq_wake) {
1670 reg = wm831x_reg_read(wm831x, WM831X_INTERRUPT_STATUS_2_MASK);
1671
1672 mask = WM831X_CHG_BATT_HOT_EINT |
1673 WM831X_CHG_BATT_COLD_EINT |
1674 WM831X_CHG_BATT_FAIL_EINT |
1675 WM831X_CHG_OV_EINT | WM831X_CHG_END_EINT |
1676 WM831X_CHG_TO_EINT | WM831X_CHG_MODE_EINT |
1677 WM831X_CHG_START_EINT;
1678
1679 /* If any of the interrupts are masked read the statuses */
1680 if (reg & mask)
1681 reg = wm831x_reg_read(wm831x,
1682 WM831X_INTERRUPT_STATUS_2);
1683
1684 if (reg & mask) {
1685 dev_info(wm831x->dev,
1686 "Acknowledging masked charger IRQs: %x\n",
1687 reg & mask);
1688 wm831x_reg_write(wm831x, WM831X_INTERRUPT_STATUS_2,
1689 reg & mask);
1690 }
1691 }
1692
1693 return 0;
1694}
1695
1626static int wm831x_i2c_read_device(struct wm831x *wm831x, unsigned short reg, 1696static int wm831x_i2c_read_device(struct wm831x *wm831x, unsigned short reg,
1627 int bytes, void *dest) 1697 int bytes, void *dest)
1628{ 1698{
@@ -1697,6 +1767,13 @@ static int wm831x_i2c_remove(struct i2c_client *i2c)
1697 return 0; 1767 return 0;
1698} 1768}
1699 1769
1770static int wm831x_i2c_suspend(struct i2c_client *i2c, pm_message_t mesg)
1771{
1772 struct wm831x *wm831x = i2c_get_clientdata(i2c);
1773
1774 return wm831x_device_suspend(wm831x);
1775}
1776
1700static const struct i2c_device_id wm831x_i2c_id[] = { 1777static const struct i2c_device_id wm831x_i2c_id[] = {
1701 { "wm8310", WM8310 }, 1778 { "wm8310", WM8310 },
1702 { "wm8311", WM8311 }, 1779 { "wm8311", WM8311 },
@@ -1714,6 +1791,7 @@ static struct i2c_driver wm831x_i2c_driver = {
1714 }, 1791 },
1715 .probe = wm831x_i2c_probe, 1792 .probe = wm831x_i2c_probe,
1716 .remove = wm831x_i2c_remove, 1793 .remove = wm831x_i2c_remove,
1794 .suspend = wm831x_i2c_suspend,
1717 .id_table = wm831x_i2c_id, 1795 .id_table = wm831x_i2c_id,
1718}; 1796};
1719 1797
diff --git a/drivers/mfd/wm831x-irq.c b/drivers/mfd/wm831x-irq.c
index 4c1122ceb443..7dabe4dbd373 100644
--- a/drivers/mfd/wm831x-irq.c
+++ b/drivers/mfd/wm831x-irq.c
@@ -39,8 +39,6 @@ struct wm831x_irq_data {
39 int primary; 39 int primary;
40 int reg; 40 int reg;
41 int mask; 41 int mask;
42 irq_handler_t handler;
43 void *handler_data;
44}; 42};
45 43
46static struct wm831x_irq_data wm831x_irqs[] = { 44static struct wm831x_irq_data wm831x_irqs[] = {
@@ -492,6 +490,14 @@ int wm831x_irq_init(struct wm831x *wm831x, int irq)
492 490
493 mutex_init(&wm831x->irq_lock); 491 mutex_init(&wm831x->irq_lock);
494 492
493 /* Mask the individual interrupt sources */
494 for (i = 0; i < ARRAY_SIZE(wm831x->irq_masks_cur); i++) {
495 wm831x->irq_masks_cur[i] = 0xffff;
496 wm831x->irq_masks_cache[i] = 0xffff;
497 wm831x_reg_write(wm831x, WM831X_INTERRUPT_STATUS_1_MASK + i,
498 0xffff);
499 }
500
495 if (!irq) { 501 if (!irq) {
496 dev_warn(wm831x->dev, 502 dev_warn(wm831x->dev,
497 "No interrupt specified - functionality limited\n"); 503 "No interrupt specified - functionality limited\n");
@@ -507,14 +513,6 @@ int wm831x_irq_init(struct wm831x *wm831x, int irq)
507 wm831x->irq = irq; 513 wm831x->irq = irq;
508 wm831x->irq_base = pdata->irq_base; 514 wm831x->irq_base = pdata->irq_base;
509 515
510 /* Mask the individual interrupt sources */
511 for (i = 0; i < ARRAY_SIZE(wm831x->irq_masks_cur); i++) {
512 wm831x->irq_masks_cur[i] = 0xffff;
513 wm831x->irq_masks_cache[i] = 0xffff;
514 wm831x_reg_write(wm831x, WM831X_INTERRUPT_STATUS_1_MASK + i,
515 0xffff);
516 }
517
518 /* Register them with genirq */ 516 /* Register them with genirq */
519 for (cur_irq = wm831x->irq_base; 517 for (cur_irq = wm831x->irq_base;
520 cur_irq < ARRAY_SIZE(wm831x_irqs) + wm831x->irq_base; 518 cur_irq < ARRAY_SIZE(wm831x_irqs) + wm831x->irq_base;
diff --git a/drivers/mfd/wm8350-i2c.c b/drivers/mfd/wm8350-i2c.c
index 65830f57c093..7795af4b1fe1 100644
--- a/drivers/mfd/wm8350-i2c.c
+++ b/drivers/mfd/wm8350-i2c.c
@@ -64,10 +64,8 @@ static int wm8350_i2c_probe(struct i2c_client *i2c,
64 int ret = 0; 64 int ret = 0;
65 65
66 wm8350 = kzalloc(sizeof(struct wm8350), GFP_KERNEL); 66 wm8350 = kzalloc(sizeof(struct wm8350), GFP_KERNEL);
67 if (wm8350 == NULL) { 67 if (wm8350 == NULL)
68 kfree(i2c);
69 return -ENOMEM; 68 return -ENOMEM;
70 }
71 69
72 i2c_set_clientdata(i2c, wm8350); 70 i2c_set_clientdata(i2c, wm8350);
73 wm8350->dev = &i2c->dev; 71 wm8350->dev = &i2c->dev;
@@ -82,6 +80,7 @@ static int wm8350_i2c_probe(struct i2c_client *i2c,
82 return ret; 80 return ret;
83 81
84err: 82err:
83 i2c_set_clientdata(i2c, NULL);
85 kfree(wm8350); 84 kfree(wm8350);
86 return ret; 85 return ret;
87} 86}
@@ -91,6 +90,7 @@ static int wm8350_i2c_remove(struct i2c_client *i2c)
91 struct wm8350 *wm8350 = i2c_get_clientdata(i2c); 90 struct wm8350 *wm8350 = i2c_get_clientdata(i2c);
92 91
93 wm8350_device_exit(wm8350); 92 wm8350_device_exit(wm8350);
93 i2c_set_clientdata(i2c, NULL);
94 kfree(wm8350); 94 kfree(wm8350);
95 95
96 return 0; 96 return 0;
diff --git a/drivers/mfd/wm8400-core.c b/drivers/mfd/wm8400-core.c
index 865ce013a821..e08aafa663dc 100644
--- a/drivers/mfd/wm8400-core.c
+++ b/drivers/mfd/wm8400-core.c
@@ -118,7 +118,7 @@ static int wm8400_read(struct wm8400 *wm8400, u8 reg, int num_regs, u16 *dest)
118{ 118{
119 int i, ret = 0; 119 int i, ret = 0;
120 120
121 BUG_ON(reg + num_regs - 1 > ARRAY_SIZE(wm8400->reg_cache)); 121 BUG_ON(reg + num_regs > ARRAY_SIZE(wm8400->reg_cache));
122 122
123 /* If there are any volatile reads then read back the entire block */ 123 /* If there are any volatile reads then read back the entire block */
124 for (i = reg; i < reg + num_regs; i++) 124 for (i = reg; i < reg + num_regs; i++)
@@ -144,7 +144,7 @@ static int wm8400_write(struct wm8400 *wm8400, u8 reg, int num_regs,
144{ 144{
145 int ret, i; 145 int ret, i;
146 146
147 BUG_ON(reg + num_regs - 1 > ARRAY_SIZE(wm8400->reg_cache)); 147 BUG_ON(reg + num_regs > ARRAY_SIZE(wm8400->reg_cache));
148 148
149 for (i = 0; i < num_regs; i++) { 149 for (i = 0; i < num_regs; i++) {
150 BUG_ON(!reg_data[reg + i].writable); 150 BUG_ON(!reg_data[reg + i].writable);
diff --git a/drivers/misc/lkdtm.c b/drivers/misc/lkdtm.c
index 31a991161f0a..5bfb2a2041b8 100644
--- a/drivers/misc/lkdtm.c
+++ b/drivers/misc/lkdtm.c
@@ -75,6 +75,9 @@ enum ctype {
75 UNALIGNED_LOAD_STORE_WRITE, 75 UNALIGNED_LOAD_STORE_WRITE,
76 OVERWRITE_ALLOCATION, 76 OVERWRITE_ALLOCATION,
77 WRITE_AFTER_FREE, 77 WRITE_AFTER_FREE,
78 SOFTLOCKUP,
79 HARDLOCKUP,
80 HUNG_TASK,
78}; 81};
79 82
80static char* cp_name[] = { 83static char* cp_name[] = {
@@ -99,6 +102,9 @@ static char* cp_type[] = {
99 "UNALIGNED_LOAD_STORE_WRITE", 102 "UNALIGNED_LOAD_STORE_WRITE",
100 "OVERWRITE_ALLOCATION", 103 "OVERWRITE_ALLOCATION",
101 "WRITE_AFTER_FREE", 104 "WRITE_AFTER_FREE",
105 "SOFTLOCKUP",
106 "HARDLOCKUP",
107 "HUNG_TASK",
102}; 108};
103 109
104static struct jprobe lkdtm; 110static struct jprobe lkdtm;
@@ -320,6 +326,20 @@ static void lkdtm_do_action(enum ctype which)
320 memset(data, 0x78, len); 326 memset(data, 0x78, len);
321 break; 327 break;
322 } 328 }
329 case SOFTLOCKUP:
330 preempt_disable();
331 for (;;)
332 cpu_relax();
333 break;
334 case HARDLOCKUP:
335 local_irq_disable();
336 for (;;)
337 cpu_relax();
338 break;
339 case HUNG_TASK:
340 set_current_state(TASK_UNINTERRUPTIBLE);
341 schedule();
342 break;
323 case NONE: 343 case NONE:
324 default: 344 default:
325 break; 345 break;
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index 3168ebd616b2..569e94da844c 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -1252,9 +1252,8 @@ EXPORT_SYMBOL(mmc_card_can_sleep);
1252/** 1252/**
1253 * mmc_suspend_host - suspend a host 1253 * mmc_suspend_host - suspend a host
1254 * @host: mmc host 1254 * @host: mmc host
1255 * @state: suspend mode (PM_SUSPEND_xxx)
1256 */ 1255 */
1257int mmc_suspend_host(struct mmc_host *host, pm_message_t state) 1256int mmc_suspend_host(struct mmc_host *host)
1258{ 1257{
1259 int err = 0; 1258 int err = 0;
1260 1259
diff --git a/drivers/mmc/core/sd_ops.c b/drivers/mmc/core/sd_ops.c
index 0d96080d44b0..63772e7e7608 100644
--- a/drivers/mmc/core/sd_ops.c
+++ b/drivers/mmc/core/sd_ops.c
@@ -79,8 +79,6 @@ int mmc_wait_for_app_cmd(struct mmc_host *host, struct mmc_card *card,
79 * we cannot use the retries field in mmc_command. 79 * we cannot use the retries field in mmc_command.
80 */ 80 */
81 for (i = 0;i <= retries;i++) { 81 for (i = 0;i <= retries;i++) {
82 memset(&mrq, 0, sizeof(struct mmc_request));
83
84 err = mmc_app_cmd(host, card); 82 err = mmc_app_cmd(host, card);
85 if (err) { 83 if (err) {
86 /* no point in retrying; no APP commands allowed */ 84 /* no point in retrying; no APP commands allowed */
diff --git a/drivers/mmc/core/sdio_io.c b/drivers/mmc/core/sdio_io.c
index ff27c8c71355..0f687cdeb064 100644
--- a/drivers/mmc/core/sdio_io.c
+++ b/drivers/mmc/core/sdio_io.c
@@ -406,6 +406,36 @@ void sdio_writeb(struct sdio_func *func, u8 b, unsigned int addr, int *err_ret)
406EXPORT_SYMBOL_GPL(sdio_writeb); 406EXPORT_SYMBOL_GPL(sdio_writeb);
407 407
408/** 408/**
409 * sdio_writeb_readb - write and read a byte from SDIO function
410 * @func: SDIO function to access
411 * @write_byte: byte to write
412 * @addr: address to write to
413 * @err_ret: optional status value from transfer
414 *
415 * Performs a RAW (Read after Write) operation as defined by SDIO spec -
416 * single byte is written to address space of a given SDIO function and
417 * response is read back from the same address, both using single request.
418 * If there is a problem with the operation, 0xff is returned and
419 * @err_ret will contain the error code.
420 */
421u8 sdio_writeb_readb(struct sdio_func *func, u8 write_byte,
422 unsigned int addr, int *err_ret)
423{
424 int ret;
425 u8 val;
426
427 ret = mmc_io_rw_direct(func->card, 1, func->num, addr,
428 write_byte, &val);
429 if (err_ret)
430 *err_ret = ret;
431 if (ret)
432 val = 0xff;
433
434 return val;
435}
436EXPORT_SYMBOL_GPL(sdio_writeb_readb);
437
438/**
409 * sdio_memcpy_fromio - read a chunk of memory from a SDIO function 439 * sdio_memcpy_fromio - read a chunk of memory from a SDIO function
410 * @func: SDIO function to access 440 * @func: SDIO function to access
411 * @dst: buffer to store the data 441 * @dst: buffer to store the data
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
index 2e13b94769fd..e171e77f6129 100644
--- a/drivers/mmc/host/Kconfig
+++ b/drivers/mmc/host/Kconfig
@@ -136,6 +136,18 @@ config MMC_SDHCI_S3C
136 136
137 If unsure, say N. 137 If unsure, say N.
138 138
139config MMC_SDHCI_SPEAR
140 tristate "SDHCI support on ST SPEAr platform"
141 depends on MMC_SDHCI && PLAT_SPEAR
142 help
143 This selects the Secure Digital Host Controller Interface (SDHCI)
144 often referrered to as the HSMMC block in some of the ST SPEAR range
145 of SoC
146
147 If you have a controller with this interface, say Y or M here.
148
149 If unsure, say N.
150
139config MMC_SDHCI_S3C_DMA 151config MMC_SDHCI_S3C_DMA
140 bool "DMA support on S3C SDHCI" 152 bool "DMA support on S3C SDHCI"
141 depends on MMC_SDHCI_S3C && EXPERIMENTAL 153 depends on MMC_SDHCI_S3C && EXPERIMENTAL
@@ -412,3 +424,11 @@ config SDH_BFIN_MISSING_CMD_PULLUP_WORKAROUND
412 depends on SDH_BFIN 424 depends on SDH_BFIN
413 help 425 help
414 If you say yes here SD-Cards may work on the EZkit. 426 If you say yes here SD-Cards may work on the EZkit.
427
428config MMC_SH_MMCIF
429 tristate "SuperH Internal MMCIF support"
430 depends on MMC_BLOCK && (SUPERH || ARCH_SHMOBILE)
431 help
432 This selects the MMC Host Interface controler (MMCIF).
433
434 This driver supports MMCIF in sh7724/sh7757/sh7372.
diff --git a/drivers/mmc/host/Makefile b/drivers/mmc/host/Makefile
index f4803977dfce..e30c2ee48894 100644
--- a/drivers/mmc/host/Makefile
+++ b/drivers/mmc/host/Makefile
@@ -14,6 +14,7 @@ obj-$(CONFIG_MMC_SDHCI) += sdhci.o
14obj-$(CONFIG_MMC_SDHCI_PCI) += sdhci-pci.o 14obj-$(CONFIG_MMC_SDHCI_PCI) += sdhci-pci.o
15obj-$(CONFIG_MMC_SDHCI_PLTFM) += sdhci-pltfm.o 15obj-$(CONFIG_MMC_SDHCI_PLTFM) += sdhci-pltfm.o
16obj-$(CONFIG_MMC_SDHCI_S3C) += sdhci-s3c.o 16obj-$(CONFIG_MMC_SDHCI_S3C) += sdhci-s3c.o
17obj-$(CONFIG_MMC_SDHCI_SPEAR) += sdhci-spear.o
17obj-$(CONFIG_MMC_WBSD) += wbsd.o 18obj-$(CONFIG_MMC_WBSD) += wbsd.o
18obj-$(CONFIG_MMC_AU1X) += au1xmmc.o 19obj-$(CONFIG_MMC_AU1X) += au1xmmc.o
19obj-$(CONFIG_MMC_OMAP) += omap.o 20obj-$(CONFIG_MMC_OMAP) += omap.o
@@ -34,6 +35,7 @@ obj-$(CONFIG_MMC_TMIO) += tmio_mmc.o
34obj-$(CONFIG_MMC_CB710) += cb710-mmc.o 35obj-$(CONFIG_MMC_CB710) += cb710-mmc.o
35obj-$(CONFIG_MMC_VIA_SDMMC) += via-sdmmc.o 36obj-$(CONFIG_MMC_VIA_SDMMC) += via-sdmmc.o
36obj-$(CONFIG_SDH_BFIN) += bfin_sdh.o 37obj-$(CONFIG_SDH_BFIN) += bfin_sdh.o
38obj-$(CONFIG_MMC_SH_MMCIF) += sh_mmcif.o
37 39
38obj-$(CONFIG_MMC_SDHCI_OF) += sdhci-of.o 40obj-$(CONFIG_MMC_SDHCI_OF) += sdhci-of.o
39sdhci-of-y := sdhci-of-core.o 41sdhci-of-y := sdhci-of-core.o
diff --git a/drivers/mmc/host/at91_mci.c b/drivers/mmc/host/at91_mci.c
index 336d9f553f3e..5f3a599ead07 100644
--- a/drivers/mmc/host/at91_mci.c
+++ b/drivers/mmc/host/at91_mci.c
@@ -1157,7 +1157,7 @@ static int at91_mci_suspend(struct platform_device *pdev, pm_message_t state)
1157 enable_irq_wake(host->board->det_pin); 1157 enable_irq_wake(host->board->det_pin);
1158 1158
1159 if (mmc) 1159 if (mmc)
1160 ret = mmc_suspend_host(mmc, state); 1160 ret = mmc_suspend_host(mmc);
1161 1161
1162 return ret; 1162 return ret;
1163} 1163}
diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c
index df0e8a88d85f..95ef864ad8f9 100644
--- a/drivers/mmc/host/atmel-mci.c
+++ b/drivers/mmc/host/atmel-mci.c
@@ -173,6 +173,7 @@ struct atmel_mci {
173 * @mmc: The mmc_host representing this slot. 173 * @mmc: The mmc_host representing this slot.
174 * @host: The MMC controller this slot is using. 174 * @host: The MMC controller this slot is using.
175 * @sdc_reg: Value of SDCR to be written before using this slot. 175 * @sdc_reg: Value of SDCR to be written before using this slot.
176 * @sdio_irq: SDIO irq mask for this slot.
176 * @mrq: mmc_request currently being processed or waiting to be 177 * @mrq: mmc_request currently being processed or waiting to be
177 * processed, or NULL when the slot is idle. 178 * processed, or NULL when the slot is idle.
178 * @queue_node: List node for placing this node in the @queue list of 179 * @queue_node: List node for placing this node in the @queue list of
@@ -191,6 +192,7 @@ struct atmel_mci_slot {
191 struct atmel_mci *host; 192 struct atmel_mci *host;
192 193
193 u32 sdc_reg; 194 u32 sdc_reg;
195 u32 sdio_irq;
194 196
195 struct mmc_request *mrq; 197 struct mmc_request *mrq;
196 struct list_head queue_node; 198 struct list_head queue_node;
@@ -792,7 +794,7 @@ static void atmci_start_request(struct atmel_mci *host,
792 mci_writel(host, SDCR, slot->sdc_reg); 794 mci_writel(host, SDCR, slot->sdc_reg);
793 795
794 iflags = mci_readl(host, IMR); 796 iflags = mci_readl(host, IMR);
795 if (iflags) 797 if (iflags & ~(MCI_SDIOIRQA | MCI_SDIOIRQB))
796 dev_warn(&slot->mmc->class_dev, "WARNING: IMR=0x%08x\n", 798 dev_warn(&slot->mmc->class_dev, "WARNING: IMR=0x%08x\n",
797 iflags); 799 iflags);
798 800
@@ -952,10 +954,21 @@ static void atmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
952 if (mci_has_rwproof()) 954 if (mci_has_rwproof())
953 host->mode_reg |= (MCI_MR_WRPROOF | MCI_MR_RDPROOF); 955 host->mode_reg |= (MCI_MR_WRPROOF | MCI_MR_RDPROOF);
954 956
955 if (list_empty(&host->queue)) 957 if (atmci_is_mci2()) {
958 /* setup High Speed mode in relation with card capacity */
959 if (ios->timing == MMC_TIMING_SD_HS)
960 host->cfg_reg |= MCI_CFG_HSMODE;
961 else
962 host->cfg_reg &= ~MCI_CFG_HSMODE;
963 }
964
965 if (list_empty(&host->queue)) {
956 mci_writel(host, MR, host->mode_reg); 966 mci_writel(host, MR, host->mode_reg);
957 else 967 if (atmci_is_mci2())
968 mci_writel(host, CFG, host->cfg_reg);
969 } else {
958 host->need_clock_update = true; 970 host->need_clock_update = true;
971 }
959 972
960 spin_unlock_bh(&host->lock); 973 spin_unlock_bh(&host->lock);
961 } else { 974 } else {
@@ -1030,11 +1043,23 @@ static int atmci_get_cd(struct mmc_host *mmc)
1030 return present; 1043 return present;
1031} 1044}
1032 1045
1046static void atmci_enable_sdio_irq(struct mmc_host *mmc, int enable)
1047{
1048 struct atmel_mci_slot *slot = mmc_priv(mmc);
1049 struct atmel_mci *host = slot->host;
1050
1051 if (enable)
1052 mci_writel(host, IER, slot->sdio_irq);
1053 else
1054 mci_writel(host, IDR, slot->sdio_irq);
1055}
1056
1033static const struct mmc_host_ops atmci_ops = { 1057static const struct mmc_host_ops atmci_ops = {
1034 .request = atmci_request, 1058 .request = atmci_request,
1035 .set_ios = atmci_set_ios, 1059 .set_ios = atmci_set_ios,
1036 .get_ro = atmci_get_ro, 1060 .get_ro = atmci_get_ro,
1037 .get_cd = atmci_get_cd, 1061 .get_cd = atmci_get_cd,
1062 .enable_sdio_irq = atmci_enable_sdio_irq,
1038}; 1063};
1039 1064
1040/* Called with host->lock held */ 1065/* Called with host->lock held */
@@ -1052,8 +1077,11 @@ static void atmci_request_end(struct atmel_mci *host, struct mmc_request *mrq)
1052 * necessary if set_ios() is called when a different slot is 1077 * necessary if set_ios() is called when a different slot is
1053 * busy transfering data. 1078 * busy transfering data.
1054 */ 1079 */
1055 if (host->need_clock_update) 1080 if (host->need_clock_update) {
1056 mci_writel(host, MR, host->mode_reg); 1081 mci_writel(host, MR, host->mode_reg);
1082 if (atmci_is_mci2())
1083 mci_writel(host, CFG, host->cfg_reg);
1084 }
1057 1085
1058 host->cur_slot->mrq = NULL; 1086 host->cur_slot->mrq = NULL;
1059 host->mrq = NULL; 1087 host->mrq = NULL;
@@ -1483,6 +1511,19 @@ static void atmci_cmd_interrupt(struct atmel_mci *host, u32 status)
1483 tasklet_schedule(&host->tasklet); 1511 tasklet_schedule(&host->tasklet);
1484} 1512}
1485 1513
1514static void atmci_sdio_interrupt(struct atmel_mci *host, u32 status)
1515{
1516 int i;
1517
1518 for (i = 0; i < ATMEL_MCI_MAX_NR_SLOTS; i++) {
1519 struct atmel_mci_slot *slot = host->slot[i];
1520 if (slot && (status & slot->sdio_irq)) {
1521 mmc_signal_sdio_irq(slot->mmc);
1522 }
1523 }
1524}
1525
1526
1486static irqreturn_t atmci_interrupt(int irq, void *dev_id) 1527static irqreturn_t atmci_interrupt(int irq, void *dev_id)
1487{ 1528{
1488 struct atmel_mci *host = dev_id; 1529 struct atmel_mci *host = dev_id;
@@ -1522,6 +1563,10 @@ static irqreturn_t atmci_interrupt(int irq, void *dev_id)
1522 1563
1523 if (pending & MCI_CMDRDY) 1564 if (pending & MCI_CMDRDY)
1524 atmci_cmd_interrupt(host, status); 1565 atmci_cmd_interrupt(host, status);
1566
1567 if (pending & (MCI_SDIOIRQA | MCI_SDIOIRQB))
1568 atmci_sdio_interrupt(host, status);
1569
1525 } while (pass_count++ < 5); 1570 } while (pass_count++ < 5);
1526 1571
1527 return pass_count ? IRQ_HANDLED : IRQ_NONE; 1572 return pass_count ? IRQ_HANDLED : IRQ_NONE;
@@ -1544,7 +1589,7 @@ static irqreturn_t atmci_detect_interrupt(int irq, void *dev_id)
1544 1589
1545static int __init atmci_init_slot(struct atmel_mci *host, 1590static int __init atmci_init_slot(struct atmel_mci *host,
1546 struct mci_slot_pdata *slot_data, unsigned int id, 1591 struct mci_slot_pdata *slot_data, unsigned int id,
1547 u32 sdc_reg) 1592 u32 sdc_reg, u32 sdio_irq)
1548{ 1593{
1549 struct mmc_host *mmc; 1594 struct mmc_host *mmc;
1550 struct atmel_mci_slot *slot; 1595 struct atmel_mci_slot *slot;
@@ -1560,11 +1605,16 @@ static int __init atmci_init_slot(struct atmel_mci *host,
1560 slot->wp_pin = slot_data->wp_pin; 1605 slot->wp_pin = slot_data->wp_pin;
1561 slot->detect_is_active_high = slot_data->detect_is_active_high; 1606 slot->detect_is_active_high = slot_data->detect_is_active_high;
1562 slot->sdc_reg = sdc_reg; 1607 slot->sdc_reg = sdc_reg;
1608 slot->sdio_irq = sdio_irq;
1563 1609
1564 mmc->ops = &atmci_ops; 1610 mmc->ops = &atmci_ops;
1565 mmc->f_min = DIV_ROUND_UP(host->bus_hz, 512); 1611 mmc->f_min = DIV_ROUND_UP(host->bus_hz, 512);
1566 mmc->f_max = host->bus_hz / 2; 1612 mmc->f_max = host->bus_hz / 2;
1567 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; 1613 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
1614 if (sdio_irq)
1615 mmc->caps |= MMC_CAP_SDIO_IRQ;
1616 if (atmci_is_mci2())
1617 mmc->caps |= MMC_CAP_SD_HIGHSPEED;
1568 if (slot_data->bus_width >= 4) 1618 if (slot_data->bus_width >= 4)
1569 mmc->caps |= MMC_CAP_4_BIT_DATA; 1619 mmc->caps |= MMC_CAP_4_BIT_DATA;
1570 1620
@@ -1753,13 +1803,13 @@ static int __init atmci_probe(struct platform_device *pdev)
1753 ret = -ENODEV; 1803 ret = -ENODEV;
1754 if (pdata->slot[0].bus_width) { 1804 if (pdata->slot[0].bus_width) {
1755 ret = atmci_init_slot(host, &pdata->slot[0], 1805 ret = atmci_init_slot(host, &pdata->slot[0],
1756 0, MCI_SDCSEL_SLOT_A); 1806 0, MCI_SDCSEL_SLOT_A, MCI_SDIOIRQA);
1757 if (!ret) 1807 if (!ret)
1758 nr_slots++; 1808 nr_slots++;
1759 } 1809 }
1760 if (pdata->slot[1].bus_width) { 1810 if (pdata->slot[1].bus_width) {
1761 ret = atmci_init_slot(host, &pdata->slot[1], 1811 ret = atmci_init_slot(host, &pdata->slot[1],
1762 1, MCI_SDCSEL_SLOT_B); 1812 1, MCI_SDCSEL_SLOT_B, MCI_SDIOIRQB);
1763 if (!ret) 1813 if (!ret)
1764 nr_slots++; 1814 nr_slots++;
1765 } 1815 }
diff --git a/drivers/mmc/host/au1xmmc.c b/drivers/mmc/host/au1xmmc.c
index f5834449400e..c8da5d30a861 100644
--- a/drivers/mmc/host/au1xmmc.c
+++ b/drivers/mmc/host/au1xmmc.c
@@ -1142,7 +1142,7 @@ static int au1xmmc_suspend(struct platform_device *pdev, pm_message_t state)
1142 struct au1xmmc_host *host = platform_get_drvdata(pdev); 1142 struct au1xmmc_host *host = platform_get_drvdata(pdev);
1143 int ret; 1143 int ret;
1144 1144
1145 ret = mmc_suspend_host(host->mmc, state); 1145 ret = mmc_suspend_host(host->mmc);
1146 if (ret) 1146 if (ret)
1147 return ret; 1147 return ret;
1148 1148
diff --git a/drivers/mmc/host/bfin_sdh.c b/drivers/mmc/host/bfin_sdh.c
index 6919e844072c..4b0e677d7295 100644
--- a/drivers/mmc/host/bfin_sdh.c
+++ b/drivers/mmc/host/bfin_sdh.c
@@ -576,7 +576,7 @@ static int sdh_suspend(struct platform_device *dev, pm_message_t state)
576 int ret = 0; 576 int ret = 0;
577 577
578 if (mmc) 578 if (mmc)
579 ret = mmc_suspend_host(mmc, state); 579 ret = mmc_suspend_host(mmc);
580 580
581 bfin_write_SDH_PWR_CTL(bfin_read_SDH_PWR_CTL() & ~PWR_ON); 581 bfin_write_SDH_PWR_CTL(bfin_read_SDH_PWR_CTL() & ~PWR_ON);
582 peripheral_free_list(drv_data->pin_req); 582 peripheral_free_list(drv_data->pin_req);
diff --git a/drivers/mmc/host/cb710-mmc.c b/drivers/mmc/host/cb710-mmc.c
index 92a324f7417c..ca3bdc831900 100644
--- a/drivers/mmc/host/cb710-mmc.c
+++ b/drivers/mmc/host/cb710-mmc.c
@@ -675,7 +675,7 @@ static int cb710_mmc_suspend(struct platform_device *pdev, pm_message_t state)
675 struct mmc_host *mmc = cb710_slot_to_mmc(slot); 675 struct mmc_host *mmc = cb710_slot_to_mmc(slot);
676 int err; 676 int err;
677 677
678 err = mmc_suspend_host(mmc, state); 678 err = mmc_suspend_host(mmc);
679 if (err) 679 if (err)
680 return err; 680 return err;
681 681
diff --git a/drivers/mmc/host/davinci_mmc.c b/drivers/mmc/host/davinci_mmc.c
index 3bd0ba294e9d..33d9f1b00862 100644
--- a/drivers/mmc/host/davinci_mmc.c
+++ b/drivers/mmc/host/davinci_mmc.c
@@ -137,15 +137,15 @@
137 137
138/* 138/*
139 * One scatterlist dma "segment" is at most MAX_CCNT rw_threshold units, 139 * One scatterlist dma "segment" is at most MAX_CCNT rw_threshold units,
140 * and we handle up to NR_SG segments. MMC_BLOCK_BOUNCE kicks in only 140 * and we handle up to MAX_NR_SG segments. MMC_BLOCK_BOUNCE kicks in only
141 * for drivers with max_hw_segs == 1, making the segments bigger (64KB) 141 * for drivers with max_hw_segs == 1, making the segments bigger (64KB)
142 * than the page or two that's otherwise typical. NR_SG == 16 gives at 142 * than the page or two that's otherwise typical. nr_sg (passed from
143 * least the same throughput boost, using EDMA transfer linkage instead 143 * platform data) == 16 gives at least the same throughput boost, using
144 * of spending CPU time copying pages. 144 * EDMA transfer linkage instead of spending CPU time copying pages.
145 */ 145 */
146#define MAX_CCNT ((1 << 16) - 1) 146#define MAX_CCNT ((1 << 16) - 1)
147 147
148#define NR_SG 16 148#define MAX_NR_SG 16
149 149
150static unsigned rw_threshold = 32; 150static unsigned rw_threshold = 32;
151module_param(rw_threshold, uint, S_IRUGO); 151module_param(rw_threshold, uint, S_IRUGO);
@@ -171,6 +171,7 @@ struct mmc_davinci_host {
171#define DAVINCI_MMC_DATADIR_READ 1 171#define DAVINCI_MMC_DATADIR_READ 1
172#define DAVINCI_MMC_DATADIR_WRITE 2 172#define DAVINCI_MMC_DATADIR_WRITE 2
173 unsigned char data_dir; 173 unsigned char data_dir;
174 unsigned char suspended;
174 175
175 /* buffer is used during PIO of one scatterlist segment, and 176 /* buffer is used during PIO of one scatterlist segment, and
176 * is updated along with buffer_bytes_left. bytes_left applies 177 * is updated along with buffer_bytes_left. bytes_left applies
@@ -192,7 +193,7 @@ struct mmc_davinci_host {
192 struct edmacc_param tx_template; 193 struct edmacc_param tx_template;
193 struct edmacc_param rx_template; 194 struct edmacc_param rx_template;
194 unsigned n_link; 195 unsigned n_link;
195 u32 links[NR_SG - 1]; 196 u32 links[MAX_NR_SG - 1];
196 197
197 /* For PIO we walk scatterlists one segment at a time. */ 198 /* For PIO we walk scatterlists one segment at a time. */
198 unsigned int sg_len; 199 unsigned int sg_len;
@@ -202,6 +203,8 @@ struct mmc_davinci_host {
202 u8 version; 203 u8 version;
203 /* for ns in one cycle calculation */ 204 /* for ns in one cycle calculation */
204 unsigned ns_in_one_cycle; 205 unsigned ns_in_one_cycle;
206 /* Number of sg segments */
207 u8 nr_sg;
205#ifdef CONFIG_CPU_FREQ 208#ifdef CONFIG_CPU_FREQ
206 struct notifier_block freq_transition; 209 struct notifier_block freq_transition;
207#endif 210#endif
@@ -568,6 +571,7 @@ davinci_release_dma_channels(struct mmc_davinci_host *host)
568 571
569static int __init davinci_acquire_dma_channels(struct mmc_davinci_host *host) 572static int __init davinci_acquire_dma_channels(struct mmc_davinci_host *host)
570{ 573{
574 u32 link_size;
571 int r, i; 575 int r, i;
572 576
573 /* Acquire master DMA write channel */ 577 /* Acquire master DMA write channel */
@@ -593,7 +597,8 @@ static int __init davinci_acquire_dma_channels(struct mmc_davinci_host *host)
593 /* Allocate parameter RAM slots, which will later be bound to a 597 /* Allocate parameter RAM slots, which will later be bound to a
594 * channel as needed to handle a scatterlist. 598 * channel as needed to handle a scatterlist.
595 */ 599 */
596 for (i = 0; i < ARRAY_SIZE(host->links); i++) { 600 link_size = min_t(unsigned, host->nr_sg, ARRAY_SIZE(host->links));
601 for (i = 0; i < link_size; i++) {
597 r = edma_alloc_slot(EDMA_CTLR(host->txdma), EDMA_SLOT_ANY); 602 r = edma_alloc_slot(EDMA_CTLR(host->txdma), EDMA_SLOT_ANY);
598 if (r < 0) { 603 if (r < 0) {
599 dev_dbg(mmc_dev(host->mmc), "dma PaRAM alloc --> %d\n", 604 dev_dbg(mmc_dev(host->mmc), "dma PaRAM alloc --> %d\n",
@@ -905,19 +910,26 @@ static void mmc_davinci_cmd_done(struct mmc_davinci_host *host,
905 } 910 }
906} 911}
907 912
908static void 913static inline void mmc_davinci_reset_ctrl(struct mmc_davinci_host *host,
909davinci_abort_data(struct mmc_davinci_host *host, struct mmc_data *data) 914 int val)
910{ 915{
911 u32 temp; 916 u32 temp;
912 917
913 /* reset command and data state machines */
914 temp = readl(host->base + DAVINCI_MMCCTL); 918 temp = readl(host->base + DAVINCI_MMCCTL);
915 writel(temp | MMCCTL_CMDRST | MMCCTL_DATRST, 919 if (val) /* reset */
916 host->base + DAVINCI_MMCCTL); 920 temp |= MMCCTL_CMDRST | MMCCTL_DATRST;
921 else /* enable */
922 temp &= ~(MMCCTL_CMDRST | MMCCTL_DATRST);
917 923
918 temp &= ~(MMCCTL_CMDRST | MMCCTL_DATRST);
919 udelay(10);
920 writel(temp, host->base + DAVINCI_MMCCTL); 924 writel(temp, host->base + DAVINCI_MMCCTL);
925 udelay(10);
926}
927
928static void
929davinci_abort_data(struct mmc_davinci_host *host, struct mmc_data *data)
930{
931 mmc_davinci_reset_ctrl(host, 1);
932 mmc_davinci_reset_ctrl(host, 0);
921} 933}
922 934
923static irqreturn_t mmc_davinci_irq(int irq, void *dev_id) 935static irqreturn_t mmc_davinci_irq(int irq, void *dev_id)
@@ -1121,15 +1133,8 @@ static inline void mmc_davinci_cpufreq_deregister(struct mmc_davinci_host *host)
1121#endif 1133#endif
1122static void __init init_mmcsd_host(struct mmc_davinci_host *host) 1134static void __init init_mmcsd_host(struct mmc_davinci_host *host)
1123{ 1135{
1124 /* DAT line portion is diabled and in reset state */
1125 writel(readl(host->base + DAVINCI_MMCCTL) | MMCCTL_DATRST,
1126 host->base + DAVINCI_MMCCTL);
1127
1128 /* CMD line portion is diabled and in reset state */
1129 writel(readl(host->base + DAVINCI_MMCCTL) | MMCCTL_CMDRST,
1130 host->base + DAVINCI_MMCCTL);
1131 1136
1132 udelay(10); 1137 mmc_davinci_reset_ctrl(host, 1);
1133 1138
1134 writel(0, host->base + DAVINCI_MMCCLK); 1139 writel(0, host->base + DAVINCI_MMCCLK);
1135 writel(MMCCLK_CLKEN, host->base + DAVINCI_MMCCLK); 1140 writel(MMCCLK_CLKEN, host->base + DAVINCI_MMCCLK);
@@ -1137,12 +1142,7 @@ static void __init init_mmcsd_host(struct mmc_davinci_host *host)
1137 writel(0x1FFF, host->base + DAVINCI_MMCTOR); 1142 writel(0x1FFF, host->base + DAVINCI_MMCTOR);
1138 writel(0xFFFF, host->base + DAVINCI_MMCTOD); 1143 writel(0xFFFF, host->base + DAVINCI_MMCTOD);
1139 1144
1140 writel(readl(host->base + DAVINCI_MMCCTL) & ~MMCCTL_DATRST, 1145 mmc_davinci_reset_ctrl(host, 0);
1141 host->base + DAVINCI_MMCCTL);
1142 writel(readl(host->base + DAVINCI_MMCCTL) & ~MMCCTL_CMDRST,
1143 host->base + DAVINCI_MMCCTL);
1144
1145 udelay(10);
1146} 1146}
1147 1147
1148static int __init davinci_mmcsd_probe(struct platform_device *pdev) 1148static int __init davinci_mmcsd_probe(struct platform_device *pdev)
@@ -1202,6 +1202,12 @@ static int __init davinci_mmcsd_probe(struct platform_device *pdev)
1202 1202
1203 init_mmcsd_host(host); 1203 init_mmcsd_host(host);
1204 1204
1205 if (pdata->nr_sg)
1206 host->nr_sg = pdata->nr_sg - 1;
1207
1208 if (host->nr_sg > MAX_NR_SG || !host->nr_sg)
1209 host->nr_sg = MAX_NR_SG;
1210
1205 host->use_dma = use_dma; 1211 host->use_dma = use_dma;
1206 host->irq = irq; 1212 host->irq = irq;
1207 1213
@@ -1327,32 +1333,65 @@ static int __exit davinci_mmcsd_remove(struct platform_device *pdev)
1327} 1333}
1328 1334
1329#ifdef CONFIG_PM 1335#ifdef CONFIG_PM
1330static int davinci_mmcsd_suspend(struct platform_device *pdev, pm_message_t msg) 1336static int davinci_mmcsd_suspend(struct device *dev)
1331{ 1337{
1338 struct platform_device *pdev = to_platform_device(dev);
1332 struct mmc_davinci_host *host = platform_get_drvdata(pdev); 1339 struct mmc_davinci_host *host = platform_get_drvdata(pdev);
1340 int ret;
1333 1341
1334 return mmc_suspend_host(host->mmc, msg); 1342 mmc_host_enable(host->mmc);
1343 ret = mmc_suspend_host(host->mmc);
1344 if (!ret) {
1345 writel(0, host->base + DAVINCI_MMCIM);
1346 mmc_davinci_reset_ctrl(host, 1);
1347 mmc_host_disable(host->mmc);
1348 clk_disable(host->clk);
1349 host->suspended = 1;
1350 } else {
1351 host->suspended = 0;
1352 mmc_host_disable(host->mmc);
1353 }
1354
1355 return ret;
1335} 1356}
1336 1357
1337static int davinci_mmcsd_resume(struct platform_device *pdev) 1358static int davinci_mmcsd_resume(struct device *dev)
1338{ 1359{
1360 struct platform_device *pdev = to_platform_device(dev);
1339 struct mmc_davinci_host *host = platform_get_drvdata(pdev); 1361 struct mmc_davinci_host *host = platform_get_drvdata(pdev);
1362 int ret;
1363
1364 if (!host->suspended)
1365 return 0;
1340 1366
1341 return mmc_resume_host(host->mmc); 1367 clk_enable(host->clk);
1368 mmc_host_enable(host->mmc);
1369
1370 mmc_davinci_reset_ctrl(host, 0);
1371 ret = mmc_resume_host(host->mmc);
1372 if (!ret)
1373 host->suspended = 0;
1374
1375 return ret;
1342} 1376}
1377
1378static const struct dev_pm_ops davinci_mmcsd_pm = {
1379 .suspend = davinci_mmcsd_suspend,
1380 .resume = davinci_mmcsd_resume,
1381};
1382
1383#define davinci_mmcsd_pm_ops (&davinci_mmcsd_pm)
1343#else 1384#else
1344#define davinci_mmcsd_suspend NULL 1385#define davinci_mmcsd_pm_ops NULL
1345#define davinci_mmcsd_resume NULL
1346#endif 1386#endif
1347 1387
1348static struct platform_driver davinci_mmcsd_driver = { 1388static struct platform_driver davinci_mmcsd_driver = {
1349 .driver = { 1389 .driver = {
1350 .name = "davinci_mmc", 1390 .name = "davinci_mmc",
1351 .owner = THIS_MODULE, 1391 .owner = THIS_MODULE,
1392 .pm = davinci_mmcsd_pm_ops,
1352 }, 1393 },
1353 .remove = __exit_p(davinci_mmcsd_remove), 1394 .remove = __exit_p(davinci_mmcsd_remove),
1354 .suspend = davinci_mmcsd_suspend,
1355 .resume = davinci_mmcsd_resume,
1356}; 1395};
1357 1396
1358static int __init davinci_mmcsd_init(void) 1397static int __init davinci_mmcsd_init(void)
diff --git a/drivers/mmc/host/imxmmc.c b/drivers/mmc/host/imxmmc.c
index bf98d7cc928a..9a68ff4353a2 100644
--- a/drivers/mmc/host/imxmmc.c
+++ b/drivers/mmc/host/imxmmc.c
@@ -1115,7 +1115,7 @@ static int imxmci_suspend(struct platform_device *dev, pm_message_t state)
1115 int ret = 0; 1115 int ret = 0;
1116 1116
1117 if (mmc) 1117 if (mmc)
1118 ret = mmc_suspend_host(mmc, state); 1118 ret = mmc_suspend_host(mmc);
1119 1119
1120 return ret; 1120 return ret;
1121} 1121}
diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
index ff115d920888..4917af96bae1 100644
--- a/drivers/mmc/host/mmci.c
+++ b/drivers/mmc/host/mmci.c
@@ -824,7 +824,7 @@ static int mmci_suspend(struct amba_device *dev, pm_message_t state)
824 if (mmc) { 824 if (mmc) {
825 struct mmci_host *host = mmc_priv(mmc); 825 struct mmci_host *host = mmc_priv(mmc);
826 826
827 ret = mmc_suspend_host(mmc, state); 827 ret = mmc_suspend_host(mmc);
828 if (ret == 0) 828 if (ret == 0)
829 writel(0, host->base + MMCIMASK0); 829 writel(0, host->base + MMCIMASK0);
830 } 830 }
diff --git a/drivers/mmc/host/msm_sdcc.c b/drivers/mmc/host/msm_sdcc.c
index 61f1d27fed3f..24e09454e522 100644
--- a/drivers/mmc/host/msm_sdcc.c
+++ b/drivers/mmc/host/msm_sdcc.c
@@ -1327,7 +1327,7 @@ msmsdcc_suspend(struct platform_device *dev, pm_message_t state)
1327 disable_irq(host->stat_irq); 1327 disable_irq(host->stat_irq);
1328 1328
1329 if (mmc->card && mmc->card->type != MMC_TYPE_SDIO) 1329 if (mmc->card && mmc->card->type != MMC_TYPE_SDIO)
1330 rc = mmc_suspend_host(mmc, state); 1330 rc = mmc_suspend_host(mmc);
1331 if (!rc) 1331 if (!rc)
1332 msmsdcc_writel(host, 0, MMCIMASK0); 1332 msmsdcc_writel(host, 0, MMCIMASK0);
1333 if (host->clks_on) 1333 if (host->clks_on)
diff --git a/drivers/mmc/host/mvsdio.c b/drivers/mmc/host/mvsdio.c
index 34e23489811a..366eefa77c5a 100644
--- a/drivers/mmc/host/mvsdio.c
+++ b/drivers/mmc/host/mvsdio.c
@@ -865,7 +865,7 @@ static int mvsd_suspend(struct platform_device *dev, pm_message_t state)
865 int ret = 0; 865 int ret = 0;
866 866
867 if (mmc) 867 if (mmc)
868 ret = mmc_suspend_host(mmc, state); 868 ret = mmc_suspend_host(mmc);
869 869
870 return ret; 870 return ret;
871} 871}
diff --git a/drivers/mmc/host/mxcmmc.c b/drivers/mmc/host/mxcmmc.c
index 74c87e023866..fdf33e837a73 100644
--- a/drivers/mmc/host/mxcmmc.c
+++ b/drivers/mmc/host/mxcmmc.c
@@ -942,7 +942,7 @@ static int mxcmci_suspend(struct platform_device *dev, pm_message_t state)
942 int ret = 0; 942 int ret = 0;
943 943
944 if (mmc) 944 if (mmc)
945 ret = mmc_suspend_host(mmc, state); 945 ret = mmc_suspend_host(mmc);
946 946
947 return ret; 947 return ret;
948} 948}
diff --git a/drivers/mmc/host/omap.c b/drivers/mmc/host/omap.c
index 84d280406341..2b281680e320 100644
--- a/drivers/mmc/host/omap.c
+++ b/drivers/mmc/host/omap.c
@@ -39,30 +39,30 @@
39#include <plat/fpga.h> 39#include <plat/fpga.h>
40 40
41#define OMAP_MMC_REG_CMD 0x00 41#define OMAP_MMC_REG_CMD 0x00
42#define OMAP_MMC_REG_ARGL 0x04 42#define OMAP_MMC_REG_ARGL 0x01
43#define OMAP_MMC_REG_ARGH 0x08 43#define OMAP_MMC_REG_ARGH 0x02
44#define OMAP_MMC_REG_CON 0x0c 44#define OMAP_MMC_REG_CON 0x03
45#define OMAP_MMC_REG_STAT 0x10 45#define OMAP_MMC_REG_STAT 0x04
46#define OMAP_MMC_REG_IE 0x14 46#define OMAP_MMC_REG_IE 0x05
47#define OMAP_MMC_REG_CTO 0x18 47#define OMAP_MMC_REG_CTO 0x06
48#define OMAP_MMC_REG_DTO 0x1c 48#define OMAP_MMC_REG_DTO 0x07
49#define OMAP_MMC_REG_DATA 0x20 49#define OMAP_MMC_REG_DATA 0x08
50#define OMAP_MMC_REG_BLEN 0x24 50#define OMAP_MMC_REG_BLEN 0x09
51#define OMAP_MMC_REG_NBLK 0x28 51#define OMAP_MMC_REG_NBLK 0x0a
52#define OMAP_MMC_REG_BUF 0x2c 52#define OMAP_MMC_REG_BUF 0x0b
53#define OMAP_MMC_REG_SDIO 0x34 53#define OMAP_MMC_REG_SDIO 0x0d
54#define OMAP_MMC_REG_REV 0x3c 54#define OMAP_MMC_REG_REV 0x0f
55#define OMAP_MMC_REG_RSP0 0x40 55#define OMAP_MMC_REG_RSP0 0x10
56#define OMAP_MMC_REG_RSP1 0x44 56#define OMAP_MMC_REG_RSP1 0x11
57#define OMAP_MMC_REG_RSP2 0x48 57#define OMAP_MMC_REG_RSP2 0x12
58#define OMAP_MMC_REG_RSP3 0x4c 58#define OMAP_MMC_REG_RSP3 0x13
59#define OMAP_MMC_REG_RSP4 0x50 59#define OMAP_MMC_REG_RSP4 0x14
60#define OMAP_MMC_REG_RSP5 0x54 60#define OMAP_MMC_REG_RSP5 0x15
61#define OMAP_MMC_REG_RSP6 0x58 61#define OMAP_MMC_REG_RSP6 0x16
62#define OMAP_MMC_REG_RSP7 0x5c 62#define OMAP_MMC_REG_RSP7 0x17
63#define OMAP_MMC_REG_IOSR 0x60 63#define OMAP_MMC_REG_IOSR 0x18
64#define OMAP_MMC_REG_SYSC 0x64 64#define OMAP_MMC_REG_SYSC 0x19
65#define OMAP_MMC_REG_SYSS 0x68 65#define OMAP_MMC_REG_SYSS 0x1a
66 66
67#define OMAP_MMC_STAT_CARD_ERR (1 << 14) 67#define OMAP_MMC_STAT_CARD_ERR (1 << 14)
68#define OMAP_MMC_STAT_CARD_IRQ (1 << 13) 68#define OMAP_MMC_STAT_CARD_IRQ (1 << 13)
@@ -78,8 +78,9 @@
78#define OMAP_MMC_STAT_CARD_BUSY (1 << 2) 78#define OMAP_MMC_STAT_CARD_BUSY (1 << 2)
79#define OMAP_MMC_STAT_END_OF_CMD (1 << 0) 79#define OMAP_MMC_STAT_END_OF_CMD (1 << 0)
80 80
81#define OMAP_MMC_READ(host, reg) __raw_readw((host)->virt_base + OMAP_MMC_REG_##reg) 81#define OMAP_MMC_REG(host, reg) (OMAP_MMC_REG_##reg << (host)->reg_shift)
82#define OMAP_MMC_WRITE(host, reg, val) __raw_writew((val), (host)->virt_base + OMAP_MMC_REG_##reg) 82#define OMAP_MMC_READ(host, reg) __raw_readw((host)->virt_base + OMAP_MMC_REG(host, reg))
83#define OMAP_MMC_WRITE(host, reg, val) __raw_writew((val), (host)->virt_base + OMAP_MMC_REG(host, reg))
83 84
84/* 85/*
85 * Command types 86 * Command types
@@ -133,6 +134,7 @@ struct mmc_omap_host {
133 int irq; 134 int irq;
134 unsigned char bus_mode; 135 unsigned char bus_mode;
135 unsigned char hw_bus_mode; 136 unsigned char hw_bus_mode;
137 unsigned int reg_shift;
136 138
137 struct work_struct cmd_abort_work; 139 struct work_struct cmd_abort_work;
138 unsigned abort:1; 140 unsigned abort:1;
@@ -680,9 +682,9 @@ mmc_omap_xfer_data(struct mmc_omap_host *host, int write)
680 host->data->bytes_xfered += n; 682 host->data->bytes_xfered += n;
681 683
682 if (write) { 684 if (write) {
683 __raw_writesw(host->virt_base + OMAP_MMC_REG_DATA, host->buffer, n); 685 __raw_writesw(host->virt_base + OMAP_MMC_REG(host, DATA), host->buffer, n);
684 } else { 686 } else {
685 __raw_readsw(host->virt_base + OMAP_MMC_REG_DATA, host->buffer, n); 687 __raw_readsw(host->virt_base + OMAP_MMC_REG(host, DATA), host->buffer, n);
686 } 688 }
687} 689}
688 690
@@ -900,7 +902,7 @@ mmc_omap_prepare_dma(struct mmc_omap_host *host, struct mmc_data *data)
900 int dst_port = 0; 902 int dst_port = 0;
901 int sync_dev = 0; 903 int sync_dev = 0;
902 904
903 data_addr = host->phys_base + OMAP_MMC_REG_DATA; 905 data_addr = host->phys_base + OMAP_MMC_REG(host, DATA);
904 frame = data->blksz; 906 frame = data->blksz;
905 count = sg_dma_len(sg); 907 count = sg_dma_len(sg);
906 908
@@ -1493,6 +1495,8 @@ static int __init mmc_omap_probe(struct platform_device *pdev)
1493 } 1495 }
1494 } 1496 }
1495 1497
1498 host->reg_shift = (cpu_is_omap7xx() ? 1 : 2);
1499
1496 return 0; 1500 return 0;
1497 1501
1498err_plat_cleanup: 1502err_plat_cleanup:
@@ -1557,7 +1561,7 @@ static int mmc_omap_suspend(struct platform_device *pdev, pm_message_t mesg)
1557 struct mmc_omap_slot *slot; 1561 struct mmc_omap_slot *slot;
1558 1562
1559 slot = host->slots[i]; 1563 slot = host->slots[i];
1560 ret = mmc_suspend_host(slot->mmc, mesg); 1564 ret = mmc_suspend_host(slot->mmc);
1561 if (ret < 0) { 1565 if (ret < 0) {
1562 while (--i >= 0) { 1566 while (--i >= 0) {
1563 slot = host->slots[i]; 1567 slot = host->slots[i];
diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
index e9caf694c59e..b032828c6126 100644
--- a/drivers/mmc/host/omap_hsmmc.c
+++ b/drivers/mmc/host/omap_hsmmc.c
@@ -157,12 +157,10 @@ struct omap_hsmmc_host {
157 */ 157 */
158 struct regulator *vcc; 158 struct regulator *vcc;
159 struct regulator *vcc_aux; 159 struct regulator *vcc_aux;
160 struct semaphore sem;
161 struct work_struct mmc_carddetect_work; 160 struct work_struct mmc_carddetect_work;
162 void __iomem *base; 161 void __iomem *base;
163 resource_size_t mapbase; 162 resource_size_t mapbase;
164 spinlock_t irq_lock; /* Prevent races with irq handler */ 163 spinlock_t irq_lock; /* Prevent races with irq handler */
165 unsigned long flags;
166 unsigned int id; 164 unsigned int id;
167 unsigned int dma_len; 165 unsigned int dma_len;
168 unsigned int dma_sg_idx; 166 unsigned int dma_sg_idx;
@@ -183,6 +181,7 @@ struct omap_hsmmc_host {
183 int protect_card; 181 int protect_card;
184 int reqs_blocked; 182 int reqs_blocked;
185 int use_reg; 183 int use_reg;
184 int req_in_progress;
186 185
187 struct omap_mmc_platform_data *pdata; 186 struct omap_mmc_platform_data *pdata;
188}; 187};
@@ -524,6 +523,27 @@ static void omap_hsmmc_stop_clock(struct omap_hsmmc_host *host)
524 dev_dbg(mmc_dev(host->mmc), "MMC Clock is not stoped\n"); 523 dev_dbg(mmc_dev(host->mmc), "MMC Clock is not stoped\n");
525} 524}
526 525
526static void omap_hsmmc_enable_irq(struct omap_hsmmc_host *host)
527{
528 unsigned int irq_mask;
529
530 if (host->use_dma)
531 irq_mask = INT_EN_MASK & ~(BRR_ENABLE | BWR_ENABLE);
532 else
533 irq_mask = INT_EN_MASK;
534
535 OMAP_HSMMC_WRITE(host->base, STAT, STAT_CLEAR);
536 OMAP_HSMMC_WRITE(host->base, ISE, irq_mask);
537 OMAP_HSMMC_WRITE(host->base, IE, irq_mask);
538}
539
540static void omap_hsmmc_disable_irq(struct omap_hsmmc_host *host)
541{
542 OMAP_HSMMC_WRITE(host->base, ISE, 0);
543 OMAP_HSMMC_WRITE(host->base, IE, 0);
544 OMAP_HSMMC_WRITE(host->base, STAT, STAT_CLEAR);
545}
546
527#ifdef CONFIG_PM 547#ifdef CONFIG_PM
528 548
529/* 549/*
@@ -592,9 +612,7 @@ static int omap_hsmmc_context_restore(struct omap_hsmmc_host *host)
592 && time_before(jiffies, timeout)) 612 && time_before(jiffies, timeout))
593 ; 613 ;
594 614
595 OMAP_HSMMC_WRITE(host->base, STAT, STAT_CLEAR); 615 omap_hsmmc_disable_irq(host);
596 OMAP_HSMMC_WRITE(host->base, ISE, INT_EN_MASK);
597 OMAP_HSMMC_WRITE(host->base, IE, INT_EN_MASK);
598 616
599 /* Do not initialize card-specific things if the power is off */ 617 /* Do not initialize card-specific things if the power is off */
600 if (host->power_mode == MMC_POWER_OFF) 618 if (host->power_mode == MMC_POWER_OFF)
@@ -697,6 +715,8 @@ static void send_init_stream(struct omap_hsmmc_host *host)
697 return; 715 return;
698 716
699 disable_irq(host->irq); 717 disable_irq(host->irq);
718
719 OMAP_HSMMC_WRITE(host->base, IE, INT_EN_MASK);
700 OMAP_HSMMC_WRITE(host->base, CON, 720 OMAP_HSMMC_WRITE(host->base, CON,
701 OMAP_HSMMC_READ(host->base, CON) | INIT_STREAM); 721 OMAP_HSMMC_READ(host->base, CON) | INIT_STREAM);
702 OMAP_HSMMC_WRITE(host->base, CMD, INIT_STREAM_CMD); 722 OMAP_HSMMC_WRITE(host->base, CMD, INIT_STREAM_CMD);
@@ -762,17 +782,7 @@ omap_hsmmc_start_command(struct omap_hsmmc_host *host, struct mmc_command *cmd,
762 mmc_hostname(host->mmc), cmd->opcode, cmd->arg); 782 mmc_hostname(host->mmc), cmd->opcode, cmd->arg);
763 host->cmd = cmd; 783 host->cmd = cmd;
764 784
765 /* 785 omap_hsmmc_enable_irq(host);
766 * Clear status bits and enable interrupts
767 */
768 OMAP_HSMMC_WRITE(host->base, STAT, STAT_CLEAR);
769 OMAP_HSMMC_WRITE(host->base, ISE, INT_EN_MASK);
770
771 if (host->use_dma)
772 OMAP_HSMMC_WRITE(host->base, IE,
773 INT_EN_MASK & ~(BRR_ENABLE | BWR_ENABLE));
774 else
775 OMAP_HSMMC_WRITE(host->base, IE, INT_EN_MASK);
776 786
777 host->response_busy = 0; 787 host->response_busy = 0;
778 if (cmd->flags & MMC_RSP_PRESENT) { 788 if (cmd->flags & MMC_RSP_PRESENT) {
@@ -806,13 +816,7 @@ omap_hsmmc_start_command(struct omap_hsmmc_host *host, struct mmc_command *cmd,
806 if (host->use_dma) 816 if (host->use_dma)
807 cmdreg |= DMA_EN; 817 cmdreg |= DMA_EN;
808 818
809 /* 819 host->req_in_progress = 1;
810 * In an interrupt context (i.e. STOP command), the spinlock is unlocked
811 * by the interrupt handler, otherwise (i.e. for a new request) it is
812 * unlocked here.
813 */
814 if (!in_interrupt())
815 spin_unlock_irqrestore(&host->irq_lock, host->flags);
816 820
817 OMAP_HSMMC_WRITE(host->base, ARG, cmd->arg); 821 OMAP_HSMMC_WRITE(host->base, ARG, cmd->arg);
818 OMAP_HSMMC_WRITE(host->base, CMD, cmdreg); 822 OMAP_HSMMC_WRITE(host->base, CMD, cmdreg);
@@ -827,6 +831,23 @@ omap_hsmmc_get_dma_dir(struct omap_hsmmc_host *host, struct mmc_data *data)
827 return DMA_FROM_DEVICE; 831 return DMA_FROM_DEVICE;
828} 832}
829 833
834static void omap_hsmmc_request_done(struct omap_hsmmc_host *host, struct mmc_request *mrq)
835{
836 int dma_ch;
837
838 spin_lock(&host->irq_lock);
839 host->req_in_progress = 0;
840 dma_ch = host->dma_ch;
841 spin_unlock(&host->irq_lock);
842
843 omap_hsmmc_disable_irq(host);
844 /* Do not complete the request if DMA is still in progress */
845 if (mrq->data && host->use_dma && dma_ch != -1)
846 return;
847 host->mrq = NULL;
848 mmc_request_done(host->mmc, mrq);
849}
850
830/* 851/*
831 * Notify the transfer complete to MMC core 852 * Notify the transfer complete to MMC core
832 */ 853 */
@@ -843,25 +864,19 @@ omap_hsmmc_xfer_done(struct omap_hsmmc_host *host, struct mmc_data *data)
843 return; 864 return;
844 } 865 }
845 866
846 host->mrq = NULL; 867 omap_hsmmc_request_done(host, mrq);
847 mmc_request_done(host->mmc, mrq);
848 return; 868 return;
849 } 869 }
850 870
851 host->data = NULL; 871 host->data = NULL;
852 872
853 if (host->use_dma && host->dma_ch != -1)
854 dma_unmap_sg(mmc_dev(host->mmc), data->sg, host->dma_len,
855 omap_hsmmc_get_dma_dir(host, data));
856
857 if (!data->error) 873 if (!data->error)
858 data->bytes_xfered += data->blocks * (data->blksz); 874 data->bytes_xfered += data->blocks * (data->blksz);
859 else 875 else
860 data->bytes_xfered = 0; 876 data->bytes_xfered = 0;
861 877
862 if (!data->stop) { 878 if (!data->stop) {
863 host->mrq = NULL; 879 omap_hsmmc_request_done(host, data->mrq);
864 mmc_request_done(host->mmc, data->mrq);
865 return; 880 return;
866 } 881 }
867 omap_hsmmc_start_command(host, data->stop, NULL); 882 omap_hsmmc_start_command(host, data->stop, NULL);
@@ -887,10 +902,8 @@ omap_hsmmc_cmd_done(struct omap_hsmmc_host *host, struct mmc_command *cmd)
887 cmd->resp[0] = OMAP_HSMMC_READ(host->base, RSP10); 902 cmd->resp[0] = OMAP_HSMMC_READ(host->base, RSP10);
888 } 903 }
889 } 904 }
890 if ((host->data == NULL && !host->response_busy) || cmd->error) { 905 if ((host->data == NULL && !host->response_busy) || cmd->error)
891 host->mrq = NULL; 906 omap_hsmmc_request_done(host, cmd->mrq);
892 mmc_request_done(host->mmc, cmd->mrq);
893 }
894} 907}
895 908
896/* 909/*
@@ -898,14 +911,19 @@ omap_hsmmc_cmd_done(struct omap_hsmmc_host *host, struct mmc_command *cmd)
898 */ 911 */
899static void omap_hsmmc_dma_cleanup(struct omap_hsmmc_host *host, int errno) 912static void omap_hsmmc_dma_cleanup(struct omap_hsmmc_host *host, int errno)
900{ 913{
914 int dma_ch;
915
901 host->data->error = errno; 916 host->data->error = errno;
902 917
903 if (host->use_dma && host->dma_ch != -1) { 918 spin_lock(&host->irq_lock);
919 dma_ch = host->dma_ch;
920 host->dma_ch = -1;
921 spin_unlock(&host->irq_lock);
922
923 if (host->use_dma && dma_ch != -1) {
904 dma_unmap_sg(mmc_dev(host->mmc), host->data->sg, host->dma_len, 924 dma_unmap_sg(mmc_dev(host->mmc), host->data->sg, host->dma_len,
905 omap_hsmmc_get_dma_dir(host, host->data)); 925 omap_hsmmc_get_dma_dir(host, host->data));
906 omap_free_dma(host->dma_ch); 926 omap_free_dma(dma_ch);
907 host->dma_ch = -1;
908 up(&host->sem);
909 } 927 }
910 host->data = NULL; 928 host->data = NULL;
911} 929}
@@ -967,28 +985,21 @@ static inline void omap_hsmmc_reset_controller_fsm(struct omap_hsmmc_host *host,
967 __func__); 985 __func__);
968} 986}
969 987
970/* 988static void omap_hsmmc_do_irq(struct omap_hsmmc_host *host, int status)
971 * MMC controller IRQ handler
972 */
973static irqreturn_t omap_hsmmc_irq(int irq, void *dev_id)
974{ 989{
975 struct omap_hsmmc_host *host = dev_id;
976 struct mmc_data *data; 990 struct mmc_data *data;
977 int end_cmd = 0, end_trans = 0, status; 991 int end_cmd = 0, end_trans = 0;
978 992
979 spin_lock(&host->irq_lock); 993 if (!host->req_in_progress) {
980 994 do {
981 if (host->mrq == NULL) { 995 OMAP_HSMMC_WRITE(host->base, STAT, status);
982 OMAP_HSMMC_WRITE(host->base, STAT, 996 /* Flush posted write */
983 OMAP_HSMMC_READ(host->base, STAT)); 997 status = OMAP_HSMMC_READ(host->base, STAT);
984 /* Flush posted write */ 998 } while (status & INT_EN_MASK);
985 OMAP_HSMMC_READ(host->base, STAT); 999 return;
986 spin_unlock(&host->irq_lock);
987 return IRQ_HANDLED;
988 } 1000 }
989 1001
990 data = host->data; 1002 data = host->data;
991 status = OMAP_HSMMC_READ(host->base, STAT);
992 dev_dbg(mmc_dev(host->mmc), "IRQ Status is %x\n", status); 1003 dev_dbg(mmc_dev(host->mmc), "IRQ Status is %x\n", status);
993 1004
994 if (status & ERR) { 1005 if (status & ERR) {
@@ -1041,15 +1052,27 @@ static irqreturn_t omap_hsmmc_irq(int irq, void *dev_id)
1041 } 1052 }
1042 1053
1043 OMAP_HSMMC_WRITE(host->base, STAT, status); 1054 OMAP_HSMMC_WRITE(host->base, STAT, status);
1044 /* Flush posted write */
1045 OMAP_HSMMC_READ(host->base, STAT);
1046 1055
1047 if (end_cmd || ((status & CC) && host->cmd)) 1056 if (end_cmd || ((status & CC) && host->cmd))
1048 omap_hsmmc_cmd_done(host, host->cmd); 1057 omap_hsmmc_cmd_done(host, host->cmd);
1049 if ((end_trans || (status & TC)) && host->mrq) 1058 if ((end_trans || (status & TC)) && host->mrq)
1050 omap_hsmmc_xfer_done(host, data); 1059 omap_hsmmc_xfer_done(host, data);
1060}
1051 1061
1052 spin_unlock(&host->irq_lock); 1062/*
1063 * MMC controller IRQ handler
1064 */
1065static irqreturn_t omap_hsmmc_irq(int irq, void *dev_id)
1066{
1067 struct omap_hsmmc_host *host = dev_id;
1068 int status;
1069
1070 status = OMAP_HSMMC_READ(host->base, STAT);
1071 do {
1072 omap_hsmmc_do_irq(host, status);
1073 /* Flush posted write */
1074 status = OMAP_HSMMC_READ(host->base, STAT);
1075 } while (status & INT_EN_MASK);
1053 1076
1054 return IRQ_HANDLED; 1077 return IRQ_HANDLED;
1055} 1078}
@@ -1244,31 +1267,47 @@ static void omap_hsmmc_config_dma_params(struct omap_hsmmc_host *host,
1244/* 1267/*
1245 * DMA call back function 1268 * DMA call back function
1246 */ 1269 */
1247static void omap_hsmmc_dma_cb(int lch, u16 ch_status, void *data) 1270static void omap_hsmmc_dma_cb(int lch, u16 ch_status, void *cb_data)
1248{ 1271{
1249 struct omap_hsmmc_host *host = data; 1272 struct omap_hsmmc_host *host = cb_data;
1273 struct mmc_data *data = host->mrq->data;
1274 int dma_ch, req_in_progress;
1250 1275
1251 if (ch_status & OMAP2_DMA_MISALIGNED_ERR_IRQ) 1276 if (ch_status & OMAP2_DMA_MISALIGNED_ERR_IRQ)
1252 dev_dbg(mmc_dev(host->mmc), "MISALIGNED_ADRS_ERR\n"); 1277 dev_dbg(mmc_dev(host->mmc), "MISALIGNED_ADRS_ERR\n");
1253 1278
1254 if (host->dma_ch < 0) 1279 spin_lock(&host->irq_lock);
1280 if (host->dma_ch < 0) {
1281 spin_unlock(&host->irq_lock);
1255 return; 1282 return;
1283 }
1256 1284
1257 host->dma_sg_idx++; 1285 host->dma_sg_idx++;
1258 if (host->dma_sg_idx < host->dma_len) { 1286 if (host->dma_sg_idx < host->dma_len) {
1259 /* Fire up the next transfer. */ 1287 /* Fire up the next transfer. */
1260 omap_hsmmc_config_dma_params(host, host->data, 1288 omap_hsmmc_config_dma_params(host, data,
1261 host->data->sg + host->dma_sg_idx); 1289 data->sg + host->dma_sg_idx);
1290 spin_unlock(&host->irq_lock);
1262 return; 1291 return;
1263 } 1292 }
1264 1293
1265 omap_free_dma(host->dma_ch); 1294 dma_unmap_sg(mmc_dev(host->mmc), data->sg, host->dma_len,
1295 omap_hsmmc_get_dma_dir(host, data));
1296
1297 req_in_progress = host->req_in_progress;
1298 dma_ch = host->dma_ch;
1266 host->dma_ch = -1; 1299 host->dma_ch = -1;
1267 /* 1300 spin_unlock(&host->irq_lock);
1268 * DMA Callback: run in interrupt context. 1301
1269 * mutex_unlock will throw a kernel warning if used. 1302 omap_free_dma(dma_ch);
1270 */ 1303
1271 up(&host->sem); 1304 /* If DMA has finished after TC, complete the request */
1305 if (!req_in_progress) {
1306 struct mmc_request *mrq = host->mrq;
1307
1308 host->mrq = NULL;
1309 mmc_request_done(host->mmc, mrq);
1310 }
1272} 1311}
1273 1312
1274/* 1313/*
@@ -1277,7 +1316,7 @@ static void omap_hsmmc_dma_cb(int lch, u16 ch_status, void *data)
1277static int omap_hsmmc_start_dma_transfer(struct omap_hsmmc_host *host, 1316static int omap_hsmmc_start_dma_transfer(struct omap_hsmmc_host *host,
1278 struct mmc_request *req) 1317 struct mmc_request *req)
1279{ 1318{
1280 int dma_ch = 0, ret = 0, err = 1, i; 1319 int dma_ch = 0, ret = 0, i;
1281 struct mmc_data *data = req->data; 1320 struct mmc_data *data = req->data;
1282 1321
1283 /* Sanity check: all the SG entries must be aligned by block size. */ 1322 /* Sanity check: all the SG entries must be aligned by block size. */
@@ -1294,23 +1333,7 @@ static int omap_hsmmc_start_dma_transfer(struct omap_hsmmc_host *host,
1294 */ 1333 */
1295 return -EINVAL; 1334 return -EINVAL;
1296 1335
1297 /* 1336 BUG_ON(host->dma_ch != -1);
1298 * If for some reason the DMA transfer is still active,
1299 * we wait for timeout period and free the dma
1300 */
1301 if (host->dma_ch != -1) {
1302 set_current_state(TASK_UNINTERRUPTIBLE);
1303 schedule_timeout(100);
1304 if (down_trylock(&host->sem)) {
1305 omap_free_dma(host->dma_ch);
1306 host->dma_ch = -1;
1307 up(&host->sem);
1308 return err;
1309 }
1310 } else {
1311 if (down_trylock(&host->sem))
1312 return err;
1313 }
1314 1337
1315 ret = omap_request_dma(omap_hsmmc_get_dma_sync_dev(host, data), 1338 ret = omap_request_dma(omap_hsmmc_get_dma_sync_dev(host, data),
1316 "MMC/SD", omap_hsmmc_dma_cb, host, &dma_ch); 1339 "MMC/SD", omap_hsmmc_dma_cb, host, &dma_ch);
@@ -1410,37 +1433,27 @@ static void omap_hsmmc_request(struct mmc_host *mmc, struct mmc_request *req)
1410 struct omap_hsmmc_host *host = mmc_priv(mmc); 1433 struct omap_hsmmc_host *host = mmc_priv(mmc);
1411 int err; 1434 int err;
1412 1435
1413 /* 1436 BUG_ON(host->req_in_progress);
1414 * Prevent races with the interrupt handler because of unexpected 1437 BUG_ON(host->dma_ch != -1);
1415 * interrupts, but not if we are already in interrupt context i.e. 1438 if (host->protect_card) {
1416 * retries. 1439 if (host->reqs_blocked < 3) {
1417 */ 1440 /*
1418 if (!in_interrupt()) { 1441 * Ensure the controller is left in a consistent
1419 spin_lock_irqsave(&host->irq_lock, host->flags); 1442 * state by resetting the command and data state
1420 /* 1443 * machines.
1421 * Protect the card from I/O if there is a possibility 1444 */
1422 * it can be removed. 1445 omap_hsmmc_reset_controller_fsm(host, SRD);
1423 */ 1446 omap_hsmmc_reset_controller_fsm(host, SRC);
1424 if (host->protect_card) { 1447 host->reqs_blocked += 1;
1425 if (host->reqs_blocked < 3) { 1448 }
1426 /* 1449 req->cmd->error = -EBADF;
1427 * Ensure the controller is left in a consistent 1450 if (req->data)
1428 * state by resetting the command and data state 1451 req->data->error = -EBADF;
1429 * machines. 1452 req->cmd->retries = 0;
1430 */ 1453 mmc_request_done(mmc, req);
1431 omap_hsmmc_reset_controller_fsm(host, SRD); 1454 return;
1432 omap_hsmmc_reset_controller_fsm(host, SRC); 1455 } else if (host->reqs_blocked)
1433 host->reqs_blocked += 1; 1456 host->reqs_blocked = 0;
1434 }
1435 req->cmd->error = -EBADF;
1436 if (req->data)
1437 req->data->error = -EBADF;
1438 spin_unlock_irqrestore(&host->irq_lock, host->flags);
1439 mmc_request_done(mmc, req);
1440 return;
1441 } else if (host->reqs_blocked)
1442 host->reqs_blocked = 0;
1443 }
1444 WARN_ON(host->mrq != NULL); 1457 WARN_ON(host->mrq != NULL);
1445 host->mrq = req; 1458 host->mrq = req;
1446 err = omap_hsmmc_prepare_data(host, req); 1459 err = omap_hsmmc_prepare_data(host, req);
@@ -1449,8 +1462,6 @@ static void omap_hsmmc_request(struct mmc_host *mmc, struct mmc_request *req)
1449 if (req->data) 1462 if (req->data)
1450 req->data->error = err; 1463 req->data->error = err;
1451 host->mrq = NULL; 1464 host->mrq = NULL;
1452 if (!in_interrupt())
1453 spin_unlock_irqrestore(&host->irq_lock, host->flags);
1454 mmc_request_done(mmc, req); 1465 mmc_request_done(mmc, req);
1455 return; 1466 return;
1456 } 1467 }
@@ -2019,7 +2030,6 @@ static int __init omap_hsmmc_probe(struct platform_device *pdev)
2019 mmc->f_min = 400000; 2030 mmc->f_min = 400000;
2020 mmc->f_max = 52000000; 2031 mmc->f_max = 52000000;
2021 2032
2022 sema_init(&host->sem, 1);
2023 spin_lock_init(&host->irq_lock); 2033 spin_lock_init(&host->irq_lock);
2024 2034
2025 host->iclk = clk_get(&pdev->dev, "ick"); 2035 host->iclk = clk_get(&pdev->dev, "ick");
@@ -2162,8 +2172,7 @@ static int __init omap_hsmmc_probe(struct platform_device *pdev)
2162 } 2172 }
2163 } 2173 }
2164 2174
2165 OMAP_HSMMC_WRITE(host->base, ISE, INT_EN_MASK); 2175 omap_hsmmc_disable_irq(host);
2166 OMAP_HSMMC_WRITE(host->base, IE, INT_EN_MASK);
2167 2176
2168 mmc_host_lazy_disable(host->mmc); 2177 mmc_host_lazy_disable(host->mmc);
2169 2178
@@ -2258,10 +2267,12 @@ static int omap_hsmmc_remove(struct platform_device *pdev)
2258} 2267}
2259 2268
2260#ifdef CONFIG_PM 2269#ifdef CONFIG_PM
2261static int omap_hsmmc_suspend(struct platform_device *pdev, pm_message_t state) 2270static int omap_hsmmc_suspend(struct device *dev)
2262{ 2271{
2263 int ret = 0; 2272 int ret = 0;
2273 struct platform_device *pdev = to_platform_device(dev);
2264 struct omap_hsmmc_host *host = platform_get_drvdata(pdev); 2274 struct omap_hsmmc_host *host = platform_get_drvdata(pdev);
2275 pm_message_t state = PMSG_SUSPEND; /* unused by MMC core */
2265 2276
2266 if (host && host->suspended) 2277 if (host && host->suspended)
2267 return 0; 2278 return 0;
@@ -2281,12 +2292,9 @@ static int omap_hsmmc_suspend(struct platform_device *pdev, pm_message_t state)
2281 } 2292 }
2282 cancel_work_sync(&host->mmc_carddetect_work); 2293 cancel_work_sync(&host->mmc_carddetect_work);
2283 mmc_host_enable(host->mmc); 2294 mmc_host_enable(host->mmc);
2284 ret = mmc_suspend_host(host->mmc, state); 2295 ret = mmc_suspend_host(host->mmc);
2285 if (ret == 0) { 2296 if (ret == 0) {
2286 OMAP_HSMMC_WRITE(host->base, ISE, 0); 2297 omap_hsmmc_disable_irq(host);
2287 OMAP_HSMMC_WRITE(host->base, IE, 0);
2288
2289
2290 OMAP_HSMMC_WRITE(host->base, HCTL, 2298 OMAP_HSMMC_WRITE(host->base, HCTL,
2291 OMAP_HSMMC_READ(host->base, HCTL) & ~SDBP); 2299 OMAP_HSMMC_READ(host->base, HCTL) & ~SDBP);
2292 mmc_host_disable(host->mmc); 2300 mmc_host_disable(host->mmc);
@@ -2310,9 +2318,10 @@ static int omap_hsmmc_suspend(struct platform_device *pdev, pm_message_t state)
2310} 2318}
2311 2319
2312/* Routine to resume the MMC device */ 2320/* Routine to resume the MMC device */
2313static int omap_hsmmc_resume(struct platform_device *pdev) 2321static int omap_hsmmc_resume(struct device *dev)
2314{ 2322{
2315 int ret = 0; 2323 int ret = 0;
2324 struct platform_device *pdev = to_platform_device(dev);
2316 struct omap_hsmmc_host *host = platform_get_drvdata(pdev); 2325 struct omap_hsmmc_host *host = platform_get_drvdata(pdev);
2317 2326
2318 if (host && !host->suspended) 2327 if (host && !host->suspended)
@@ -2363,13 +2372,17 @@ clk_en_err:
2363#define omap_hsmmc_resume NULL 2372#define omap_hsmmc_resume NULL
2364#endif 2373#endif
2365 2374
2366static struct platform_driver omap_hsmmc_driver = { 2375static struct dev_pm_ops omap_hsmmc_dev_pm_ops = {
2367 .remove = omap_hsmmc_remove,
2368 .suspend = omap_hsmmc_suspend, 2376 .suspend = omap_hsmmc_suspend,
2369 .resume = omap_hsmmc_resume, 2377 .resume = omap_hsmmc_resume,
2378};
2379
2380static struct platform_driver omap_hsmmc_driver = {
2381 .remove = omap_hsmmc_remove,
2370 .driver = { 2382 .driver = {
2371 .name = DRIVER_NAME, 2383 .name = DRIVER_NAME,
2372 .owner = THIS_MODULE, 2384 .owner = THIS_MODULE,
2385 .pm = &omap_hsmmc_dev_pm_ops,
2373 }, 2386 },
2374}; 2387};
2375 2388
diff --git a/drivers/mmc/host/pxamci.c b/drivers/mmc/host/pxamci.c
index e4f00e70a749..0a4e43f37140 100644
--- a/drivers/mmc/host/pxamci.c
+++ b/drivers/mmc/host/pxamci.c
@@ -813,7 +813,7 @@ static int pxamci_suspend(struct device *dev)
813 int ret = 0; 813 int ret = 0;
814 814
815 if (mmc) 815 if (mmc)
816 ret = mmc_suspend_host(mmc, PMSG_SUSPEND); 816 ret = mmc_suspend_host(mmc);
817 817
818 return ret; 818 return ret;
819} 819}
diff --git a/drivers/mmc/host/s3cmci.c b/drivers/mmc/host/s3cmci.c
index 2fdf7689ae6c..2e16e0a90a5e 100644
--- a/drivers/mmc/host/s3cmci.c
+++ b/drivers/mmc/host/s3cmci.c
@@ -1881,9 +1881,8 @@ MODULE_DEVICE_TABLE(platform, s3cmci_driver_ids);
1881static int s3cmci_suspend(struct device *dev) 1881static int s3cmci_suspend(struct device *dev)
1882{ 1882{
1883 struct mmc_host *mmc = platform_get_drvdata(to_platform_device(dev)); 1883 struct mmc_host *mmc = platform_get_drvdata(to_platform_device(dev));
1884 struct pm_message event = { PM_EVENT_SUSPEND };
1885 1884
1886 return mmc_suspend_host(mmc, event); 1885 return mmc_suspend_host(mmc);
1887} 1886}
1888 1887
1889static int s3cmci_resume(struct device *dev) 1888static int s3cmci_resume(struct device *dev)
diff --git a/drivers/mmc/host/sdhci-of-core.c b/drivers/mmc/host/sdhci-of-core.c
index 7802a543d8fc..a2e9820cd42f 100644
--- a/drivers/mmc/host/sdhci-of-core.c
+++ b/drivers/mmc/host/sdhci-of-core.c
@@ -89,7 +89,7 @@ static int sdhci_of_suspend(struct of_device *ofdev, pm_message_t state)
89{ 89{
90 struct sdhci_host *host = dev_get_drvdata(&ofdev->dev); 90 struct sdhci_host *host = dev_get_drvdata(&ofdev->dev);
91 91
92 return mmc_suspend_host(host->mmc, state); 92 return mmc_suspend_host(host->mmc);
93} 93}
94 94
95static int sdhci_of_resume(struct of_device *ofdev) 95static int sdhci_of_resume(struct of_device *ofdev)
diff --git a/drivers/mmc/host/sdhci-of-esdhc.c b/drivers/mmc/host/sdhci-of-esdhc.c
index d5b11a17e648..c8623de13af3 100644
--- a/drivers/mmc/host/sdhci-of-esdhc.c
+++ b/drivers/mmc/host/sdhci-of-esdhc.c
@@ -129,12 +129,12 @@ struct sdhci_of_data sdhci_esdhc = {
129 SDHCI_QUIRK_RESTORE_IRQS_AFTER_RESET | 129 SDHCI_QUIRK_RESTORE_IRQS_AFTER_RESET |
130 SDHCI_QUIRK_NO_CARD_NO_RESET, 130 SDHCI_QUIRK_NO_CARD_NO_RESET,
131 .ops = { 131 .ops = {
132 .readl = sdhci_be32bs_readl, 132 .read_l = sdhci_be32bs_readl,
133 .readw = esdhc_readw, 133 .read_w = esdhc_readw,
134 .readb = sdhci_be32bs_readb, 134 .read_b = sdhci_be32bs_readb,
135 .writel = sdhci_be32bs_writel, 135 .write_l = sdhci_be32bs_writel,
136 .writew = esdhc_writew, 136 .write_w = esdhc_writew,
137 .writeb = esdhc_writeb, 137 .write_b = esdhc_writeb,
138 .set_clock = esdhc_set_clock, 138 .set_clock = esdhc_set_clock,
139 .enable_dma = esdhc_enable_dma, 139 .enable_dma = esdhc_enable_dma,
140 .get_max_clock = esdhc_get_max_clock, 140 .get_max_clock = esdhc_get_max_clock,
diff --git a/drivers/mmc/host/sdhci-of-hlwd.c b/drivers/mmc/host/sdhci-of-hlwd.c
index 35117f3ed757..68ddb7546ae2 100644
--- a/drivers/mmc/host/sdhci-of-hlwd.c
+++ b/drivers/mmc/host/sdhci-of-hlwd.c
@@ -55,11 +55,11 @@ struct sdhci_of_data sdhci_hlwd = {
55 .quirks = SDHCI_QUIRK_32BIT_DMA_ADDR | 55 .quirks = SDHCI_QUIRK_32BIT_DMA_ADDR |
56 SDHCI_QUIRK_32BIT_DMA_SIZE, 56 SDHCI_QUIRK_32BIT_DMA_SIZE,
57 .ops = { 57 .ops = {
58 .readl = sdhci_be32bs_readl, 58 .read_l = sdhci_be32bs_readl,
59 .readw = sdhci_be32bs_readw, 59 .read_w = sdhci_be32bs_readw,
60 .readb = sdhci_be32bs_readb, 60 .read_b = sdhci_be32bs_readb,
61 .writel = sdhci_hlwd_writel, 61 .write_l = sdhci_hlwd_writel,
62 .writew = sdhci_hlwd_writew, 62 .write_w = sdhci_hlwd_writew,
63 .writeb = sdhci_hlwd_writeb, 63 .write_b = sdhci_hlwd_writeb,
64 }, 64 },
65}; 65};
diff --git a/drivers/mmc/host/sdhci-pci.c b/drivers/mmc/host/sdhci-pci.c
index 6701af629c30..65483fdea45b 100644
--- a/drivers/mmc/host/sdhci-pci.c
+++ b/drivers/mmc/host/sdhci-pci.c
@@ -628,7 +628,7 @@ static struct sdhci_pci_slot * __devinit sdhci_pci_probe_slot(
628 host = sdhci_alloc_host(&pdev->dev, sizeof(struct sdhci_pci_slot)); 628 host = sdhci_alloc_host(&pdev->dev, sizeof(struct sdhci_pci_slot));
629 if (IS_ERR(host)) { 629 if (IS_ERR(host)) {
630 dev_err(&pdev->dev, "cannot allocate host\n"); 630 dev_err(&pdev->dev, "cannot allocate host\n");
631 return ERR_PTR(PTR_ERR(host)); 631 return ERR_CAST(host);
632 } 632 }
633 633
634 slot = sdhci_priv(host); 634 slot = sdhci_priv(host);
diff --git a/drivers/mmc/host/sdhci-pltfm.c b/drivers/mmc/host/sdhci-pltfm.c
index 297f40ae6ad5..b6ee0d719698 100644
--- a/drivers/mmc/host/sdhci-pltfm.c
+++ b/drivers/mmc/host/sdhci-pltfm.c
@@ -29,6 +29,7 @@
29#include <linux/mmc/host.h> 29#include <linux/mmc/host.h>
30 30
31#include <linux/io.h> 31#include <linux/io.h>
32#include <linux/sdhci-pltfm.h>
32 33
33#include "sdhci.h" 34#include "sdhci.h"
34 35
@@ -49,19 +50,18 @@ static struct sdhci_ops sdhci_pltfm_ops = {
49 50
50static int __devinit sdhci_pltfm_probe(struct platform_device *pdev) 51static int __devinit sdhci_pltfm_probe(struct platform_device *pdev)
51{ 52{
53 struct sdhci_pltfm_data *pdata = pdev->dev.platform_data;
52 struct sdhci_host *host; 54 struct sdhci_host *host;
53 struct resource *iomem; 55 struct resource *iomem;
54 int ret; 56 int ret;
55 57
56 BUG_ON(pdev == NULL);
57
58 iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0); 58 iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
59 if (!iomem) { 59 if (!iomem) {
60 ret = -ENOMEM; 60 ret = -ENOMEM;
61 goto err; 61 goto err;
62 } 62 }
63 63
64 if (resource_size(iomem) != 0x100) 64 if (resource_size(iomem) < 0x100)
65 dev_err(&pdev->dev, "Invalid iomem size. You may " 65 dev_err(&pdev->dev, "Invalid iomem size. You may "
66 "experience problems.\n"); 66 "experience problems.\n");
67 67
@@ -76,7 +76,12 @@ static int __devinit sdhci_pltfm_probe(struct platform_device *pdev)
76 } 76 }
77 77
78 host->hw_name = "platform"; 78 host->hw_name = "platform";
79 host->ops = &sdhci_pltfm_ops; 79 if (pdata && pdata->ops)
80 host->ops = pdata->ops;
81 else
82 host->ops = &sdhci_pltfm_ops;
83 if (pdata)
84 host->quirks = pdata->quirks;
80 host->irq = platform_get_irq(pdev, 0); 85 host->irq = platform_get_irq(pdev, 0);
81 86
82 if (!request_mem_region(iomem->start, resource_size(iomem), 87 if (!request_mem_region(iomem->start, resource_size(iomem),
@@ -93,6 +98,12 @@ static int __devinit sdhci_pltfm_probe(struct platform_device *pdev)
93 goto err_remap; 98 goto err_remap;
94 } 99 }
95 100
101 if (pdata && pdata->init) {
102 ret = pdata->init(host);
103 if (ret)
104 goto err_plat_init;
105 }
106
96 ret = sdhci_add_host(host); 107 ret = sdhci_add_host(host);
97 if (ret) 108 if (ret)
98 goto err_add_host; 109 goto err_add_host;
@@ -102,6 +113,9 @@ static int __devinit sdhci_pltfm_probe(struct platform_device *pdev)
102 return 0; 113 return 0;
103 114
104err_add_host: 115err_add_host:
116 if (pdata && pdata->exit)
117 pdata->exit(host);
118err_plat_init:
105 iounmap(host->ioaddr); 119 iounmap(host->ioaddr);
106err_remap: 120err_remap:
107 release_mem_region(iomem->start, resource_size(iomem)); 121 release_mem_region(iomem->start, resource_size(iomem));
@@ -114,6 +128,7 @@ err:
114 128
115static int __devexit sdhci_pltfm_remove(struct platform_device *pdev) 129static int __devexit sdhci_pltfm_remove(struct platform_device *pdev)
116{ 130{
131 struct sdhci_pltfm_data *pdata = pdev->dev.platform_data;
117 struct sdhci_host *host = platform_get_drvdata(pdev); 132 struct sdhci_host *host = platform_get_drvdata(pdev);
118 struct resource *iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0); 133 struct resource *iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
119 int dead; 134 int dead;
@@ -125,6 +140,8 @@ static int __devexit sdhci_pltfm_remove(struct platform_device *pdev)
125 dead = 1; 140 dead = 1;
126 141
127 sdhci_remove_host(host, dead); 142 sdhci_remove_host(host, dead);
143 if (pdata && pdata->exit)
144 pdata->exit(host);
128 iounmap(host->ioaddr); 145 iounmap(host->ioaddr);
129 release_mem_region(iomem->start, resource_size(iomem)); 146 release_mem_region(iomem->start, resource_size(iomem));
130 sdhci_free_host(host); 147 sdhci_free_host(host);
@@ -165,4 +182,3 @@ MODULE_DESCRIPTION("Secure Digital Host Controller Interface platform driver");
165MODULE_AUTHOR("Mocean Laboratories <info@mocean-labs.com>"); 182MODULE_AUTHOR("Mocean Laboratories <info@mocean-labs.com>");
166MODULE_LICENSE("GPL v2"); 183MODULE_LICENSE("GPL v2");
167MODULE_ALIAS("platform:sdhci"); 184MODULE_ALIAS("platform:sdhci");
168
diff --git a/drivers/mmc/host/sdhci-s3c.c b/drivers/mmc/host/sdhci-s3c.c
index 2136794c0cfa..af217924a76e 100644
--- a/drivers/mmc/host/sdhci-s3c.c
+++ b/drivers/mmc/host/sdhci-s3c.c
@@ -317,12 +317,7 @@ static int __devinit sdhci_s3c_probe(struct platform_device *pdev)
317 host->irq = irq; 317 host->irq = irq;
318 318
319 /* Setup quirks for the controller */ 319 /* Setup quirks for the controller */
320 320 host->quirks |= SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC;
321 /* Currently with ADMA enabled we are getting some length
322 * interrupts that are not being dealt with, do disable
323 * ADMA until this is sorted out. */
324 host->quirks |= SDHCI_QUIRK_BROKEN_ADMA;
325 host->quirks |= SDHCI_QUIRK_32BIT_ADMA_SIZE;
326 321
327#ifndef CONFIG_MMC_SDHCI_S3C_DMA 322#ifndef CONFIG_MMC_SDHCI_S3C_DMA
328 323
@@ -330,9 +325,6 @@ static int __devinit sdhci_s3c_probe(struct platform_device *pdev)
330 * support as well. */ 325 * support as well. */
331 host->quirks |= SDHCI_QUIRK_BROKEN_DMA; 326 host->quirks |= SDHCI_QUIRK_BROKEN_DMA;
332 327
333 /* PIO currently has problems with multi-block IO */
334 host->quirks |= SDHCI_QUIRK_NO_MULTIBLOCK;
335
336#endif /* CONFIG_MMC_SDHCI_S3C_DMA */ 328#endif /* CONFIG_MMC_SDHCI_S3C_DMA */
337 329
338 /* It seems we do not get an DATA transfer complete on non-busy 330 /* It seems we do not get an DATA transfer complete on non-busy
diff --git a/drivers/mmc/host/sdhci-spear.c b/drivers/mmc/host/sdhci-spear.c
new file mode 100644
index 000000000000..d70c54c7b70a
--- /dev/null
+++ b/drivers/mmc/host/sdhci-spear.c
@@ -0,0 +1,298 @@
1/*
2 * drivers/mmc/host/sdhci-spear.c
3 *
4 * Support of SDHCI platform devices for spear soc family
5 *
6 * Copyright (C) 2010 ST Microelectronics
7 * Viresh Kumar<viresh.kumar@st.com>
8 *
9 * Inspired by sdhci-pltfm.c
10 *
11 * This file is licensed under the terms of the GNU General Public
12 * License version 2. This program is licensed "as is" without any
13 * warranty of any kind, whether express or implied.
14 */
15
16#include <linux/clk.h>
17#include <linux/delay.h>
18#include <linux/gpio.h>
19#include <linux/highmem.h>
20#include <linux/interrupt.h>
21#include <linux/irq.h>
22#include <linux/platform_device.h>
23#include <linux/slab.h>
24#include <linux/mmc/host.h>
25#include <linux/mmc/sdhci-spear.h>
26#include <linux/io.h>
27#include "sdhci.h"
28
29struct spear_sdhci {
30 struct clk *clk;
31 struct sdhci_plat_data *data;
32};
33
34/* sdhci ops */
35static struct sdhci_ops sdhci_pltfm_ops = {
36 /* Nothing to do for now. */
37};
38
39/* gpio card detection interrupt handler */
40static irqreturn_t sdhci_gpio_irq(int irq, void *dev_id)
41{
42 struct platform_device *pdev = dev_id;
43 struct sdhci_host *host = platform_get_drvdata(pdev);
44 struct spear_sdhci *sdhci = dev_get_platdata(&pdev->dev);
45 unsigned long gpio_irq_type;
46 int val;
47
48 val = gpio_get_value(sdhci->data->card_int_gpio);
49
50 /* val == 1 -> card removed, val == 0 -> card inserted */
51 /* if card removed - set irq for low level, else vice versa */
52 gpio_irq_type = val ? IRQF_TRIGGER_LOW : IRQF_TRIGGER_HIGH;
53 set_irq_type(irq, gpio_irq_type);
54
55 if (sdhci->data->card_power_gpio >= 0) {
56 if (!sdhci->data->power_always_enb) {
57 /* if card inserted, give power, otherwise remove it */
58 val = sdhci->data->power_active_high ? !val : val ;
59 gpio_set_value(sdhci->data->card_power_gpio, val);
60 }
61 }
62
63 /* inform sdhci driver about card insertion/removal */
64 tasklet_schedule(&host->card_tasklet);
65
66 return IRQ_HANDLED;
67}
68
69static int __devinit sdhci_probe(struct platform_device *pdev)
70{
71 struct sdhci_host *host;
72 struct resource *iomem;
73 struct spear_sdhci *sdhci;
74 int ret;
75
76 BUG_ON(pdev == NULL);
77
78 iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
79 if (!iomem) {
80 ret = -ENOMEM;
81 dev_dbg(&pdev->dev, "memory resource not defined\n");
82 goto err;
83 }
84
85 if (!request_mem_region(iomem->start, resource_size(iomem),
86 "spear-sdhci")) {
87 ret = -EBUSY;
88 dev_dbg(&pdev->dev, "cannot request region\n");
89 goto err;
90 }
91
92 sdhci = kzalloc(sizeof(*sdhci), GFP_KERNEL);
93 if (!sdhci) {
94 ret = -ENOMEM;
95 dev_dbg(&pdev->dev, "cannot allocate memory for sdhci\n");
96 goto err_kzalloc;
97 }
98
99 /* clk enable */
100 sdhci->clk = clk_get(&pdev->dev, NULL);
101 if (IS_ERR(sdhci->clk)) {
102 ret = PTR_ERR(sdhci->clk);
103 dev_dbg(&pdev->dev, "Error getting clock\n");
104 goto err_clk_get;
105 }
106
107 ret = clk_enable(sdhci->clk);
108 if (ret) {
109 dev_dbg(&pdev->dev, "Error enabling clock\n");
110 goto err_clk_enb;
111 }
112
113 /* overwrite platform_data */
114 sdhci->data = dev_get_platdata(&pdev->dev);
115 pdev->dev.platform_data = sdhci;
116
117 if (pdev->dev.parent)
118 host = sdhci_alloc_host(pdev->dev.parent, 0);
119 else
120 host = sdhci_alloc_host(&pdev->dev, 0);
121
122 if (IS_ERR(host)) {
123 ret = PTR_ERR(host);
124 dev_dbg(&pdev->dev, "error allocating host\n");
125 goto err_alloc_host;
126 }
127
128 host->hw_name = "sdhci";
129 host->ops = &sdhci_pltfm_ops;
130 host->irq = platform_get_irq(pdev, 0);
131 host->quirks = SDHCI_QUIRK_BROKEN_ADMA;
132
133 host->ioaddr = ioremap(iomem->start, resource_size(iomem));
134 if (!host->ioaddr) {
135 ret = -ENOMEM;
136 dev_dbg(&pdev->dev, "failed to remap registers\n");
137 goto err_ioremap;
138 }
139
140 ret = sdhci_add_host(host);
141 if (ret) {
142 dev_dbg(&pdev->dev, "error adding host\n");
143 goto err_add_host;
144 }
145
146 platform_set_drvdata(pdev, host);
147
148 /*
149 * It is optional to use GPIOs for sdhci Power control & sdhci card
150 * interrupt detection. If sdhci->data is NULL, then use original sdhci
151 * lines otherwise GPIO lines.
152 * If GPIO is selected for power control, then power should be disabled
153 * after card removal and should be enabled when card insertion
154 * interrupt occurs
155 */
156 if (!sdhci->data)
157 return 0;
158
159 if (sdhci->data->card_power_gpio >= 0) {
160 int val = 0;
161
162 ret = gpio_request(sdhci->data->card_power_gpio, "sdhci");
163 if (ret < 0) {
164 dev_dbg(&pdev->dev, "gpio request fail: %d\n",
165 sdhci->data->card_power_gpio);
166 goto err_pgpio_request;
167 }
168
169 if (sdhci->data->power_always_enb)
170 val = sdhci->data->power_active_high;
171 else
172 val = !sdhci->data->power_active_high;
173
174 ret = gpio_direction_output(sdhci->data->card_power_gpio, val);
175 if (ret) {
176 dev_dbg(&pdev->dev, "gpio set direction fail: %d\n",
177 sdhci->data->card_power_gpio);
178 goto err_pgpio_direction;
179 }
180
181 gpio_set_value(sdhci->data->card_power_gpio, 1);
182 }
183
184 if (sdhci->data->card_int_gpio >= 0) {
185 ret = gpio_request(sdhci->data->card_int_gpio, "sdhci");
186 if (ret < 0) {
187 dev_dbg(&pdev->dev, "gpio request fail: %d\n",
188 sdhci->data->card_int_gpio);
189 goto err_igpio_request;
190 }
191
192 ret = gpio_direction_input(sdhci->data->card_int_gpio);
193 if (ret) {
194 dev_dbg(&pdev->dev, "gpio set direction fail: %d\n",
195 sdhci->data->card_int_gpio);
196 goto err_igpio_direction;
197 }
198 ret = request_irq(gpio_to_irq(sdhci->data->card_int_gpio),
199 sdhci_gpio_irq, IRQF_TRIGGER_LOW,
200 mmc_hostname(host->mmc), pdev);
201 if (ret) {
202 dev_dbg(&pdev->dev, "gpio request irq fail: %d\n",
203 sdhci->data->card_int_gpio);
204 goto err_igpio_request_irq;
205 }
206
207 }
208
209 return 0;
210
211err_igpio_request_irq:
212err_igpio_direction:
213 if (sdhci->data->card_int_gpio >= 0)
214 gpio_free(sdhci->data->card_int_gpio);
215err_igpio_request:
216err_pgpio_direction:
217 if (sdhci->data->card_power_gpio >= 0)
218 gpio_free(sdhci->data->card_power_gpio);
219err_pgpio_request:
220 platform_set_drvdata(pdev, NULL);
221 sdhci_remove_host(host, 1);
222err_add_host:
223 iounmap(host->ioaddr);
224err_ioremap:
225 sdhci_free_host(host);
226err_alloc_host:
227 clk_disable(sdhci->clk);
228err_clk_enb:
229 clk_put(sdhci->clk);
230err_clk_get:
231 kfree(sdhci);
232err_kzalloc:
233 release_mem_region(iomem->start, resource_size(iomem));
234err:
235 dev_err(&pdev->dev, "spear-sdhci probe failed: %d\n", ret);
236 return ret;
237}
238
239static int __devexit sdhci_remove(struct platform_device *pdev)
240{
241 struct sdhci_host *host = platform_get_drvdata(pdev);
242 struct resource *iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
243 struct spear_sdhci *sdhci = dev_get_platdata(&pdev->dev);
244 int dead;
245 u32 scratch;
246
247 if (sdhci->data) {
248 if (sdhci->data->card_int_gpio >= 0) {
249 free_irq(gpio_to_irq(sdhci->data->card_int_gpio), pdev);
250 gpio_free(sdhci->data->card_int_gpio);
251 }
252
253 if (sdhci->data->card_power_gpio >= 0)
254 gpio_free(sdhci->data->card_power_gpio);
255 }
256
257 platform_set_drvdata(pdev, NULL);
258 dead = 0;
259 scratch = readl(host->ioaddr + SDHCI_INT_STATUS);
260 if (scratch == (u32)-1)
261 dead = 1;
262
263 sdhci_remove_host(host, dead);
264 iounmap(host->ioaddr);
265 sdhci_free_host(host);
266 clk_disable(sdhci->clk);
267 clk_put(sdhci->clk);
268 kfree(sdhci);
269 if (iomem)
270 release_mem_region(iomem->start, resource_size(iomem));
271
272 return 0;
273}
274
275static struct platform_driver sdhci_driver = {
276 .driver = {
277 .name = "sdhci",
278 .owner = THIS_MODULE,
279 },
280 .probe = sdhci_probe,
281 .remove = __devexit_p(sdhci_remove),
282};
283
284static int __init sdhci_init(void)
285{
286 return platform_driver_register(&sdhci_driver);
287}
288module_init(sdhci_init);
289
290static void __exit sdhci_exit(void)
291{
292 platform_driver_unregister(&sdhci_driver);
293}
294module_exit(sdhci_exit);
295
296MODULE_DESCRIPTION("SPEAr Secure Digital Host Controller Interface driver");
297MODULE_AUTHOR("Viresh Kumar <viresh.kumar@st.com>");
298MODULE_LICENSE("GPL v2");
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index 9d4fdfa685e5..c6d1bd8d4ac4 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -496,12 +496,22 @@ static int sdhci_adma_table_pre(struct sdhci_host *host,
496 WARN_ON((desc - host->adma_desc) > (128 * 2 + 1) * 4); 496 WARN_ON((desc - host->adma_desc) > (128 * 2 + 1) * 4);
497 } 497 }
498 498
499 /* 499 if (host->quirks & SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC) {
500 * Add a terminating entry. 500 /*
501 */ 501 * Mark the last descriptor as the terminating descriptor
502 */
503 if (desc != host->adma_desc) {
504 desc -= 8;
505 desc[0] |= 0x2; /* end */
506 }
507 } else {
508 /*
509 * Add a terminating entry.
510 */
502 511
503 /* nop, end, valid */ 512 /* nop, end, valid */
504 sdhci_set_adma_desc(desc, 0, 0, 0x3); 513 sdhci_set_adma_desc(desc, 0, 0, 0x3);
514 }
505 515
506 /* 516 /*
507 * Resync align buffer as we might have changed it. 517 * Resync align buffer as we might have changed it.
@@ -1587,7 +1597,7 @@ int sdhci_suspend_host(struct sdhci_host *host, pm_message_t state)
1587 1597
1588 sdhci_disable_card_detection(host); 1598 sdhci_disable_card_detection(host);
1589 1599
1590 ret = mmc_suspend_host(host->mmc, state); 1600 ret = mmc_suspend_host(host->mmc);
1591 if (ret) 1601 if (ret)
1592 return ret; 1602 return ret;
1593 1603
@@ -1744,7 +1754,8 @@ int sdhci_add_host(struct sdhci_host *host)
1744 host->max_clk = 1754 host->max_clk =
1745 (caps & SDHCI_CLOCK_BASE_MASK) >> SDHCI_CLOCK_BASE_SHIFT; 1755 (caps & SDHCI_CLOCK_BASE_MASK) >> SDHCI_CLOCK_BASE_SHIFT;
1746 host->max_clk *= 1000000; 1756 host->max_clk *= 1000000;
1747 if (host->max_clk == 0) { 1757 if (host->max_clk == 0 || host->quirks &
1758 SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN) {
1748 if (!host->ops->get_max_clock) { 1759 if (!host->ops->get_max_clock) {
1749 printk(KERN_ERR 1760 printk(KERN_ERR
1750 "%s: Hardware doesn't specify base clock " 1761 "%s: Hardware doesn't specify base clock "
diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
index 842f46f94284..c8468134adc9 100644
--- a/drivers/mmc/host/sdhci.h
+++ b/drivers/mmc/host/sdhci.h
@@ -127,7 +127,7 @@
127#define SDHCI_INT_DATA_MASK (SDHCI_INT_DATA_END | SDHCI_INT_DMA_END | \ 127#define SDHCI_INT_DATA_MASK (SDHCI_INT_DATA_END | SDHCI_INT_DMA_END | \
128 SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL | \ 128 SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL | \
129 SDHCI_INT_DATA_TIMEOUT | SDHCI_INT_DATA_CRC | \ 129 SDHCI_INT_DATA_TIMEOUT | SDHCI_INT_DATA_CRC | \
130 SDHCI_INT_DATA_END_BIT | SDHCI_ADMA_ERROR) 130 SDHCI_INT_DATA_END_BIT | SDHCI_INT_ADMA_ERROR)
131#define SDHCI_INT_ALL_MASK ((unsigned int)-1) 131#define SDHCI_INT_ALL_MASK ((unsigned int)-1)
132 132
133#define SDHCI_ACMD12_ERR 0x3C 133#define SDHCI_ACMD12_ERR 0x3C
@@ -236,6 +236,10 @@ struct sdhci_host {
236#define SDHCI_QUIRK_DELAY_AFTER_POWER (1<<23) 236#define SDHCI_QUIRK_DELAY_AFTER_POWER (1<<23)
237/* Controller uses SDCLK instead of TMCLK for data timeouts */ 237/* Controller uses SDCLK instead of TMCLK for data timeouts */
238#define SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK (1<<24) 238#define SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK (1<<24)
239/* Controller reports wrong base clock capability */
240#define SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN (1<<25)
241/* Controller cannot support End Attribute in NOP ADMA descriptor */
242#define SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC (1<<26)
239 243
240 int irq; /* Device IRQ */ 244 int irq; /* Device IRQ */
241 void __iomem * ioaddr; /* Mapped address */ 245 void __iomem * ioaddr; /* Mapped address */
@@ -294,12 +298,12 @@ struct sdhci_host {
294 298
295struct sdhci_ops { 299struct sdhci_ops {
296#ifdef CONFIG_MMC_SDHCI_IO_ACCESSORS 300#ifdef CONFIG_MMC_SDHCI_IO_ACCESSORS
297 u32 (*readl)(struct sdhci_host *host, int reg); 301 u32 (*read_l)(struct sdhci_host *host, int reg);
298 u16 (*readw)(struct sdhci_host *host, int reg); 302 u16 (*read_w)(struct sdhci_host *host, int reg);
299 u8 (*readb)(struct sdhci_host *host, int reg); 303 u8 (*read_b)(struct sdhci_host *host, int reg);
300 void (*writel)(struct sdhci_host *host, u32 val, int reg); 304 void (*write_l)(struct sdhci_host *host, u32 val, int reg);
301 void (*writew)(struct sdhci_host *host, u16 val, int reg); 305 void (*write_w)(struct sdhci_host *host, u16 val, int reg);
302 void (*writeb)(struct sdhci_host *host, u8 val, int reg); 306 void (*write_b)(struct sdhci_host *host, u8 val, int reg);
303#endif 307#endif
304 308
305 void (*set_clock)(struct sdhci_host *host, unsigned int clock); 309 void (*set_clock)(struct sdhci_host *host, unsigned int clock);
@@ -314,48 +318,48 @@ struct sdhci_ops {
314 318
315static inline void sdhci_writel(struct sdhci_host *host, u32 val, int reg) 319static inline void sdhci_writel(struct sdhci_host *host, u32 val, int reg)
316{ 320{
317 if (unlikely(host->ops->writel)) 321 if (unlikely(host->ops->write_l))
318 host->ops->writel(host, val, reg); 322 host->ops->write_l(host, val, reg);
319 else 323 else
320 writel(val, host->ioaddr + reg); 324 writel(val, host->ioaddr + reg);
321} 325}
322 326
323static inline void sdhci_writew(struct sdhci_host *host, u16 val, int reg) 327static inline void sdhci_writew(struct sdhci_host *host, u16 val, int reg)
324{ 328{
325 if (unlikely(host->ops->writew)) 329 if (unlikely(host->ops->write_w))
326 host->ops->writew(host, val, reg); 330 host->ops->write_w(host, val, reg);
327 else 331 else
328 writew(val, host->ioaddr + reg); 332 writew(val, host->ioaddr + reg);
329} 333}
330 334
331static inline void sdhci_writeb(struct sdhci_host *host, u8 val, int reg) 335static inline void sdhci_writeb(struct sdhci_host *host, u8 val, int reg)
332{ 336{
333 if (unlikely(host->ops->writeb)) 337 if (unlikely(host->ops->write_b))
334 host->ops->writeb(host, val, reg); 338 host->ops->write_b(host, val, reg);
335 else 339 else
336 writeb(val, host->ioaddr + reg); 340 writeb(val, host->ioaddr + reg);
337} 341}
338 342
339static inline u32 sdhci_readl(struct sdhci_host *host, int reg) 343static inline u32 sdhci_readl(struct sdhci_host *host, int reg)
340{ 344{
341 if (unlikely(host->ops->readl)) 345 if (unlikely(host->ops->read_l))
342 return host->ops->readl(host, reg); 346 return host->ops->read_l(host, reg);
343 else 347 else
344 return readl(host->ioaddr + reg); 348 return readl(host->ioaddr + reg);
345} 349}
346 350
347static inline u16 sdhci_readw(struct sdhci_host *host, int reg) 351static inline u16 sdhci_readw(struct sdhci_host *host, int reg)
348{ 352{
349 if (unlikely(host->ops->readw)) 353 if (unlikely(host->ops->read_w))
350 return host->ops->readw(host, reg); 354 return host->ops->read_w(host, reg);
351 else 355 else
352 return readw(host->ioaddr + reg); 356 return readw(host->ioaddr + reg);
353} 357}
354 358
355static inline u8 sdhci_readb(struct sdhci_host *host, int reg) 359static inline u8 sdhci_readb(struct sdhci_host *host, int reg)
356{ 360{
357 if (unlikely(host->ops->readb)) 361 if (unlikely(host->ops->read_b))
358 return host->ops->readb(host, reg); 362 return host->ops->read_b(host, reg);
359 else 363 else
360 return readb(host->ioaddr + reg); 364 return readb(host->ioaddr + reg);
361} 365}
diff --git a/drivers/mmc/host/sdricoh_cs.c b/drivers/mmc/host/sdricoh_cs.c
index cb41e9c3ac07..e7507af3856e 100644
--- a/drivers/mmc/host/sdricoh_cs.c
+++ b/drivers/mmc/host/sdricoh_cs.c
@@ -519,7 +519,7 @@ static int sdricoh_pcmcia_suspend(struct pcmcia_device *link)
519{ 519{
520 struct mmc_host *mmc = link->priv; 520 struct mmc_host *mmc = link->priv;
521 dev_dbg(&link->dev, "suspend\n"); 521 dev_dbg(&link->dev, "suspend\n");
522 mmc_suspend_host(mmc, PMSG_SUSPEND); 522 mmc_suspend_host(mmc);
523 return 0; 523 return 0;
524} 524}
525 525
diff --git a/drivers/mmc/host/sh_mmcif.c b/drivers/mmc/host/sh_mmcif.c
new file mode 100644
index 000000000000..eb97830c0344
--- /dev/null
+++ b/drivers/mmc/host/sh_mmcif.c
@@ -0,0 +1,965 @@
1/*
2 * MMCIF eMMC driver.
3 *
4 * Copyright (C) 2010 Renesas Solutions Corp.
5 * Yusuke Goda <yusuke.goda.sx@renesas.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License.
10 *
11 *
12 * TODO
13 * 1. DMA
14 * 2. Power management
15 * 3. Handle MMC errors better
16 *
17 */
18
19#include <linux/dma-mapping.h>
20#include <linux/mmc/host.h>
21#include <linux/mmc/card.h>
22#include <linux/mmc/core.h>
23#include <linux/mmc/mmc.h>
24#include <linux/mmc/sdio.h>
25#include <linux/delay.h>
26#include <linux/platform_device.h>
27#include <linux/clk.h>
28#include <linux/mmc/sh_mmcif.h>
29
30#define DRIVER_NAME "sh_mmcif"
31#define DRIVER_VERSION "2010-04-28"
32
33#define MMCIF_CE_CMD_SET 0x00000000
34#define MMCIF_CE_ARG 0x00000008
35#define MMCIF_CE_ARG_CMD12 0x0000000C
36#define MMCIF_CE_CMD_CTRL 0x00000010
37#define MMCIF_CE_BLOCK_SET 0x00000014
38#define MMCIF_CE_CLK_CTRL 0x00000018
39#define MMCIF_CE_BUF_ACC 0x0000001C
40#define MMCIF_CE_RESP3 0x00000020
41#define MMCIF_CE_RESP2 0x00000024
42#define MMCIF_CE_RESP1 0x00000028
43#define MMCIF_CE_RESP0 0x0000002C
44#define MMCIF_CE_RESP_CMD12 0x00000030
45#define MMCIF_CE_DATA 0x00000034
46#define MMCIF_CE_INT 0x00000040
47#define MMCIF_CE_INT_MASK 0x00000044
48#define MMCIF_CE_HOST_STS1 0x00000048
49#define MMCIF_CE_HOST_STS2 0x0000004C
50#define MMCIF_CE_VERSION 0x0000007C
51
52/* CE_CMD_SET */
53#define CMD_MASK 0x3f000000
54#define CMD_SET_RTYP_NO ((0 << 23) | (0 << 22))
55#define CMD_SET_RTYP_6B ((0 << 23) | (1 << 22)) /* R1/R1b/R3/R4/R5 */
56#define CMD_SET_RTYP_17B ((1 << 23) | (0 << 22)) /* R2 */
57#define CMD_SET_RBSY (1 << 21) /* R1b */
58#define CMD_SET_CCSEN (1 << 20)
59#define CMD_SET_WDAT (1 << 19) /* 1: on data, 0: no data */
60#define CMD_SET_DWEN (1 << 18) /* 1: write, 0: read */
61#define CMD_SET_CMLTE (1 << 17) /* 1: multi block trans, 0: single */
62#define CMD_SET_CMD12EN (1 << 16) /* 1: CMD12 auto issue */
63#define CMD_SET_RIDXC_INDEX ((0 << 15) | (0 << 14)) /* index check */
64#define CMD_SET_RIDXC_BITS ((0 << 15) | (1 << 14)) /* check bits check */
65#define CMD_SET_RIDXC_NO ((1 << 15) | (0 << 14)) /* no check */
66#define CMD_SET_CRC7C ((0 << 13) | (0 << 12)) /* CRC7 check*/
67#define CMD_SET_CRC7C_BITS ((0 << 13) | (1 << 12)) /* check bits check*/
68#define CMD_SET_CRC7C_INTERNAL ((1 << 13) | (0 << 12)) /* internal CRC7 check*/
69#define CMD_SET_CRC16C (1 << 10) /* 0: CRC16 check*/
70#define CMD_SET_CRCSTE (1 << 8) /* 1: not receive CRC status */
71#define CMD_SET_TBIT (1 << 7) /* 1: tran mission bit "Low" */
72#define CMD_SET_OPDM (1 << 6) /* 1: open/drain */
73#define CMD_SET_CCSH (1 << 5)
74#define CMD_SET_DATW_1 ((0 << 1) | (0 << 0)) /* 1bit */
75#define CMD_SET_DATW_4 ((0 << 1) | (1 << 0)) /* 4bit */
76#define CMD_SET_DATW_8 ((1 << 1) | (0 << 0)) /* 8bit */
77
78/* CE_CMD_CTRL */
79#define CMD_CTRL_BREAK (1 << 0)
80
81/* CE_BLOCK_SET */
82#define BLOCK_SIZE_MASK 0x0000ffff
83
84/* CE_CLK_CTRL */
85#define CLK_ENABLE (1 << 24) /* 1: output mmc clock */
86#define CLK_CLEAR ((1 << 19) | (1 << 18) | (1 << 17) | (1 << 16))
87#define CLK_SUP_PCLK ((1 << 19) | (1 << 18) | (1 << 17) | (1 << 16))
88#define SRSPTO_256 ((1 << 13) | (0 << 12)) /* resp timeout */
89#define SRBSYTO_29 ((1 << 11) | (1 << 10) | \
90 (1 << 9) | (1 << 8)) /* resp busy timeout */
91#define SRWDTO_29 ((1 << 7) | (1 << 6) | \
92 (1 << 5) | (1 << 4)) /* read/write timeout */
93#define SCCSTO_29 ((1 << 3) | (1 << 2) | \
94 (1 << 1) | (1 << 0)) /* ccs timeout */
95
96/* CE_BUF_ACC */
97#define BUF_ACC_DMAWEN (1 << 25)
98#define BUF_ACC_DMAREN (1 << 24)
99#define BUF_ACC_BUSW_32 (0 << 17)
100#define BUF_ACC_BUSW_16 (1 << 17)
101#define BUF_ACC_ATYP (1 << 16)
102
103/* CE_INT */
104#define INT_CCSDE (1 << 29)
105#define INT_CMD12DRE (1 << 26)
106#define INT_CMD12RBE (1 << 25)
107#define INT_CMD12CRE (1 << 24)
108#define INT_DTRANE (1 << 23)
109#define INT_BUFRE (1 << 22)
110#define INT_BUFWEN (1 << 21)
111#define INT_BUFREN (1 << 20)
112#define INT_CCSRCV (1 << 19)
113#define INT_RBSYE (1 << 17)
114#define INT_CRSPE (1 << 16)
115#define INT_CMDVIO (1 << 15)
116#define INT_BUFVIO (1 << 14)
117#define INT_WDATERR (1 << 11)
118#define INT_RDATERR (1 << 10)
119#define INT_RIDXERR (1 << 9)
120#define INT_RSPERR (1 << 8)
121#define INT_CCSTO (1 << 5)
122#define INT_CRCSTO (1 << 4)
123#define INT_WDATTO (1 << 3)
124#define INT_RDATTO (1 << 2)
125#define INT_RBSYTO (1 << 1)
126#define INT_RSPTO (1 << 0)
127#define INT_ERR_STS (INT_CMDVIO | INT_BUFVIO | INT_WDATERR | \
128 INT_RDATERR | INT_RIDXERR | INT_RSPERR | \
129 INT_CCSTO | INT_CRCSTO | INT_WDATTO | \
130 INT_RDATTO | INT_RBSYTO | INT_RSPTO)
131
132/* CE_INT_MASK */
133#define MASK_ALL 0x00000000
134#define MASK_MCCSDE (1 << 29)
135#define MASK_MCMD12DRE (1 << 26)
136#define MASK_MCMD12RBE (1 << 25)
137#define MASK_MCMD12CRE (1 << 24)
138#define MASK_MDTRANE (1 << 23)
139#define MASK_MBUFRE (1 << 22)
140#define MASK_MBUFWEN (1 << 21)
141#define MASK_MBUFREN (1 << 20)
142#define MASK_MCCSRCV (1 << 19)
143#define MASK_MRBSYE (1 << 17)
144#define MASK_MCRSPE (1 << 16)
145#define MASK_MCMDVIO (1 << 15)
146#define MASK_MBUFVIO (1 << 14)
147#define MASK_MWDATERR (1 << 11)
148#define MASK_MRDATERR (1 << 10)
149#define MASK_MRIDXERR (1 << 9)
150#define MASK_MRSPERR (1 << 8)
151#define MASK_MCCSTO (1 << 5)
152#define MASK_MCRCSTO (1 << 4)
153#define MASK_MWDATTO (1 << 3)
154#define MASK_MRDATTO (1 << 2)
155#define MASK_MRBSYTO (1 << 1)
156#define MASK_MRSPTO (1 << 0)
157
158/* CE_HOST_STS1 */
159#define STS1_CMDSEQ (1 << 31)
160
161/* CE_HOST_STS2 */
162#define STS2_CRCSTE (1 << 31)
163#define STS2_CRC16E (1 << 30)
164#define STS2_AC12CRCE (1 << 29)
165#define STS2_RSPCRC7E (1 << 28)
166#define STS2_CRCSTEBE (1 << 27)
167#define STS2_RDATEBE (1 << 26)
168#define STS2_AC12REBE (1 << 25)
169#define STS2_RSPEBE (1 << 24)
170#define STS2_AC12IDXE (1 << 23)
171#define STS2_RSPIDXE (1 << 22)
172#define STS2_CCSTO (1 << 15)
173#define STS2_RDATTO (1 << 14)
174#define STS2_DATBSYTO (1 << 13)
175#define STS2_CRCSTTO (1 << 12)
176#define STS2_AC12BSYTO (1 << 11)
177#define STS2_RSPBSYTO (1 << 10)
178#define STS2_AC12RSPTO (1 << 9)
179#define STS2_RSPTO (1 << 8)
180#define STS2_CRC_ERR (STS2_CRCSTE | STS2_CRC16E | \
181 STS2_AC12CRCE | STS2_RSPCRC7E | STS2_CRCSTEBE)
182#define STS2_TIMEOUT_ERR (STS2_CCSTO | STS2_RDATTO | \
183 STS2_DATBSYTO | STS2_CRCSTTO | \
184 STS2_AC12BSYTO | STS2_RSPBSYTO | \
185 STS2_AC12RSPTO | STS2_RSPTO)
186
187/* CE_VERSION */
188#define SOFT_RST_ON (1 << 31)
189#define SOFT_RST_OFF (0 << 31)
190
191#define CLKDEV_EMMC_DATA 52000000 /* 52MHz */
192#define CLKDEV_MMC_DATA 20000000 /* 20MHz */
193#define CLKDEV_INIT 400000 /* 400 KHz */
194
195struct sh_mmcif_host {
196 struct mmc_host *mmc;
197 struct mmc_data *data;
198 struct mmc_command *cmd;
199 struct platform_device *pd;
200 struct clk *hclk;
201 unsigned int clk;
202 int bus_width;
203 u16 wait_int;
204 u16 sd_error;
205 long timeout;
206 void __iomem *addr;
207 wait_queue_head_t intr_wait;
208};
209
210static inline u32 sh_mmcif_readl(struct sh_mmcif_host *host, unsigned int reg)
211{
212 return readl(host->addr + reg);
213}
214
215static inline void sh_mmcif_writel(struct sh_mmcif_host *host,
216 unsigned int reg, u32 val)
217{
218 writel(val, host->addr + reg);
219}
220
221static inline void sh_mmcif_bitset(struct sh_mmcif_host *host,
222 unsigned int reg, u32 val)
223{
224 writel(val | sh_mmcif_readl(host, reg), host->addr + reg);
225}
226
227static inline void sh_mmcif_bitclr(struct sh_mmcif_host *host,
228 unsigned int reg, u32 val)
229{
230 writel(~val & sh_mmcif_readl(host, reg), host->addr + reg);
231}
232
233
234static void sh_mmcif_clock_control(struct sh_mmcif_host *host, unsigned int clk)
235{
236 struct sh_mmcif_plat_data *p = host->pd->dev.platform_data;
237
238 sh_mmcif_bitclr(host, MMCIF_CE_CLK_CTRL, CLK_ENABLE);
239 sh_mmcif_bitclr(host, MMCIF_CE_CLK_CTRL, CLK_CLEAR);
240
241 if (!clk)
242 return;
243 if (p->sup_pclk && clk == host->clk)
244 sh_mmcif_bitset(host, MMCIF_CE_CLK_CTRL, CLK_SUP_PCLK);
245 else
246 sh_mmcif_bitset(host, MMCIF_CE_CLK_CTRL, CLK_CLEAR &
247 (ilog2(__rounddown_pow_of_two(host->clk / clk)) << 16));
248
249 sh_mmcif_bitset(host, MMCIF_CE_CLK_CTRL, CLK_ENABLE);
250}
251
252static void sh_mmcif_sync_reset(struct sh_mmcif_host *host)
253{
254 u32 tmp;
255
256 tmp = 0x010f0000 & sh_mmcif_readl(host, MMCIF_CE_CLK_CTRL);
257
258 sh_mmcif_writel(host, MMCIF_CE_VERSION, SOFT_RST_ON);
259 sh_mmcif_writel(host, MMCIF_CE_VERSION, SOFT_RST_OFF);
260 sh_mmcif_bitset(host, MMCIF_CE_CLK_CTRL, tmp |
261 SRSPTO_256 | SRBSYTO_29 | SRWDTO_29 | SCCSTO_29);
262 /* byte swap on */
263 sh_mmcif_bitset(host, MMCIF_CE_BUF_ACC, BUF_ACC_ATYP);
264}
265
266static int sh_mmcif_error_manage(struct sh_mmcif_host *host)
267{
268 u32 state1, state2;
269 int ret, timeout = 10000000;
270
271 host->sd_error = 0;
272 host->wait_int = 0;
273
274 state1 = sh_mmcif_readl(host, MMCIF_CE_HOST_STS1);
275 state2 = sh_mmcif_readl(host, MMCIF_CE_HOST_STS2);
276 pr_debug("%s: ERR HOST_STS1 = %08x\n", \
277 DRIVER_NAME, sh_mmcif_readl(host, MMCIF_CE_HOST_STS1));
278 pr_debug("%s: ERR HOST_STS2 = %08x\n", \
279 DRIVER_NAME, sh_mmcif_readl(host, MMCIF_CE_HOST_STS2));
280
281 if (state1 & STS1_CMDSEQ) {
282 sh_mmcif_bitset(host, MMCIF_CE_CMD_CTRL, CMD_CTRL_BREAK);
283 sh_mmcif_bitset(host, MMCIF_CE_CMD_CTRL, ~CMD_CTRL_BREAK);
284 while (1) {
285 timeout--;
286 if (timeout < 0) {
287 pr_err(DRIVER_NAME": Forceed end of " \
288 "command sequence timeout err\n");
289 return -EIO;
290 }
291 if (!(sh_mmcif_readl(host, MMCIF_CE_HOST_STS1)
292 & STS1_CMDSEQ))
293 break;
294 mdelay(1);
295 }
296 sh_mmcif_sync_reset(host);
297 pr_debug(DRIVER_NAME": Forced end of command sequence\n");
298 return -EIO;
299 }
300
301 if (state2 & STS2_CRC_ERR) {
302 pr_debug(DRIVER_NAME": Happened CRC error\n");
303 ret = -EIO;
304 } else if (state2 & STS2_TIMEOUT_ERR) {
305 pr_debug(DRIVER_NAME": Happened Timeout error\n");
306 ret = -ETIMEDOUT;
307 } else {
308 pr_debug(DRIVER_NAME": Happened End/Index error\n");
309 ret = -EIO;
310 }
311 return ret;
312}
313
314static int sh_mmcif_single_read(struct sh_mmcif_host *host,
315 struct mmc_request *mrq)
316{
317 struct mmc_data *data = mrq->data;
318 long time;
319 u32 blocksize, i, *p = sg_virt(data->sg);
320
321 host->wait_int = 0;
322
323 /* buf read enable */
324 sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFREN);
325 time = wait_event_interruptible_timeout(host->intr_wait,
326 host->wait_int == 1 ||
327 host->sd_error == 1, host->timeout);
328 if (host->wait_int != 1 && (time == 0 || host->sd_error != 0))
329 return sh_mmcif_error_manage(host);
330
331 host->wait_int = 0;
332 blocksize = (BLOCK_SIZE_MASK &
333 sh_mmcif_readl(host, MMCIF_CE_BLOCK_SET)) + 3;
334 for (i = 0; i < blocksize / 4; i++)
335 *p++ = sh_mmcif_readl(host, MMCIF_CE_DATA);
336
337 /* buffer read end */
338 sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFRE);
339 time = wait_event_interruptible_timeout(host->intr_wait,
340 host->wait_int == 1 ||
341 host->sd_error == 1, host->timeout);
342 if (host->wait_int != 1 && (time == 0 || host->sd_error != 0))
343 return sh_mmcif_error_manage(host);
344
345 host->wait_int = 0;
346 return 0;
347}
348
349static int sh_mmcif_multi_read(struct sh_mmcif_host *host,
350 struct mmc_request *mrq)
351{
352 struct mmc_data *data = mrq->data;
353 long time;
354 u32 blocksize, i, j, sec, *p;
355
356 blocksize = BLOCK_SIZE_MASK & sh_mmcif_readl(host, MMCIF_CE_BLOCK_SET);
357 for (j = 0; j < data->sg_len; j++) {
358 p = sg_virt(data->sg);
359 host->wait_int = 0;
360 for (sec = 0; sec < data->sg->length / blocksize; sec++) {
361 sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFREN);
362 /* buf read enable */
363 time = wait_event_interruptible_timeout(host->intr_wait,
364 host->wait_int == 1 ||
365 host->sd_error == 1, host->timeout);
366
367 if (host->wait_int != 1 &&
368 (time == 0 || host->sd_error != 0))
369 return sh_mmcif_error_manage(host);
370
371 host->wait_int = 0;
372 for (i = 0; i < blocksize / 4; i++)
373 *p++ = sh_mmcif_readl(host, MMCIF_CE_DATA);
374 }
375 if (j < data->sg_len - 1)
376 data->sg++;
377 }
378 return 0;
379}
380
381static int sh_mmcif_single_write(struct sh_mmcif_host *host,
382 struct mmc_request *mrq)
383{
384 struct mmc_data *data = mrq->data;
385 long time;
386 u32 blocksize, i, *p = sg_virt(data->sg);
387
388 host->wait_int = 0;
389 sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN);
390
391 /* buf write enable */
392 time = wait_event_interruptible_timeout(host->intr_wait,
393 host->wait_int == 1 ||
394 host->sd_error == 1, host->timeout);
395 if (host->wait_int != 1 && (time == 0 || host->sd_error != 0))
396 return sh_mmcif_error_manage(host);
397
398 host->wait_int = 0;
399 blocksize = (BLOCK_SIZE_MASK &
400 sh_mmcif_readl(host, MMCIF_CE_BLOCK_SET)) + 3;
401 for (i = 0; i < blocksize / 4; i++)
402 sh_mmcif_writel(host, MMCIF_CE_DATA, *p++);
403
404 /* buffer write end */
405 sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MDTRANE);
406
407 time = wait_event_interruptible_timeout(host->intr_wait,
408 host->wait_int == 1 ||
409 host->sd_error == 1, host->timeout);
410 if (host->wait_int != 1 && (time == 0 || host->sd_error != 0))
411 return sh_mmcif_error_manage(host);
412
413 host->wait_int = 0;
414 return 0;
415}
416
417static int sh_mmcif_multi_write(struct sh_mmcif_host *host,
418 struct mmc_request *mrq)
419{
420 struct mmc_data *data = mrq->data;
421 long time;
422 u32 i, sec, j, blocksize, *p;
423
424 blocksize = BLOCK_SIZE_MASK & sh_mmcif_readl(host, MMCIF_CE_BLOCK_SET);
425
426 for (j = 0; j < data->sg_len; j++) {
427 p = sg_virt(data->sg);
428 host->wait_int = 0;
429 for (sec = 0; sec < data->sg->length / blocksize; sec++) {
430 sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN);
431 /* buf write enable*/
432 time = wait_event_interruptible_timeout(host->intr_wait,
433 host->wait_int == 1 ||
434 host->sd_error == 1, host->timeout);
435
436 if (host->wait_int != 1 &&
437 (time == 0 || host->sd_error != 0))
438 return sh_mmcif_error_manage(host);
439
440 host->wait_int = 0;
441 for (i = 0; i < blocksize / 4; i++)
442 sh_mmcif_writel(host, MMCIF_CE_DATA, *p++);
443 }
444 if (j < data->sg_len - 1)
445 data->sg++;
446 }
447 return 0;
448}
449
450static void sh_mmcif_get_response(struct sh_mmcif_host *host,
451 struct mmc_command *cmd)
452{
453 if (cmd->flags & MMC_RSP_136) {
454 cmd->resp[0] = sh_mmcif_readl(host, MMCIF_CE_RESP3);
455 cmd->resp[1] = sh_mmcif_readl(host, MMCIF_CE_RESP2);
456 cmd->resp[2] = sh_mmcif_readl(host, MMCIF_CE_RESP1);
457 cmd->resp[3] = sh_mmcif_readl(host, MMCIF_CE_RESP0);
458 } else
459 cmd->resp[0] = sh_mmcif_readl(host, MMCIF_CE_RESP0);
460}
461
462static void sh_mmcif_get_cmd12response(struct sh_mmcif_host *host,
463 struct mmc_command *cmd)
464{
465 cmd->resp[0] = sh_mmcif_readl(host, MMCIF_CE_RESP_CMD12);
466}
467
468static u32 sh_mmcif_set_cmd(struct sh_mmcif_host *host,
469 struct mmc_request *mrq, struct mmc_command *cmd, u32 opc)
470{
471 u32 tmp = 0;
472
473 /* Response Type check */
474 switch (mmc_resp_type(cmd)) {
475 case MMC_RSP_NONE:
476 tmp |= CMD_SET_RTYP_NO;
477 break;
478 case MMC_RSP_R1:
479 case MMC_RSP_R1B:
480 case MMC_RSP_R3:
481 tmp |= CMD_SET_RTYP_6B;
482 break;
483 case MMC_RSP_R2:
484 tmp |= CMD_SET_RTYP_17B;
485 break;
486 default:
487 pr_err(DRIVER_NAME": Not support type response.\n");
488 break;
489 }
490 switch (opc) {
491 /* RBSY */
492 case MMC_SWITCH:
493 case MMC_STOP_TRANSMISSION:
494 case MMC_SET_WRITE_PROT:
495 case MMC_CLR_WRITE_PROT:
496 case MMC_ERASE:
497 case MMC_GEN_CMD:
498 tmp |= CMD_SET_RBSY;
499 break;
500 }
501 /* WDAT / DATW */
502 if (host->data) {
503 tmp |= CMD_SET_WDAT;
504 switch (host->bus_width) {
505 case MMC_BUS_WIDTH_1:
506 tmp |= CMD_SET_DATW_1;
507 break;
508 case MMC_BUS_WIDTH_4:
509 tmp |= CMD_SET_DATW_4;
510 break;
511 case MMC_BUS_WIDTH_8:
512 tmp |= CMD_SET_DATW_8;
513 break;
514 default:
515 pr_err(DRIVER_NAME": Not support bus width.\n");
516 break;
517 }
518 }
519 /* DWEN */
520 if (opc == MMC_WRITE_BLOCK || opc == MMC_WRITE_MULTIPLE_BLOCK)
521 tmp |= CMD_SET_DWEN;
522 /* CMLTE/CMD12EN */
523 if (opc == MMC_READ_MULTIPLE_BLOCK || opc == MMC_WRITE_MULTIPLE_BLOCK) {
524 tmp |= CMD_SET_CMLTE | CMD_SET_CMD12EN;
525 sh_mmcif_bitset(host, MMCIF_CE_BLOCK_SET,
526 mrq->data->blocks << 16);
527 }
528 /* RIDXC[1:0] check bits */
529 if (opc == MMC_SEND_OP_COND || opc == MMC_ALL_SEND_CID ||
530 opc == MMC_SEND_CSD || opc == MMC_SEND_CID)
531 tmp |= CMD_SET_RIDXC_BITS;
532 /* RCRC7C[1:0] check bits */
533 if (opc == MMC_SEND_OP_COND)
534 tmp |= CMD_SET_CRC7C_BITS;
535 /* RCRC7C[1:0] internal CRC7 */
536 if (opc == MMC_ALL_SEND_CID ||
537 opc == MMC_SEND_CSD || opc == MMC_SEND_CID)
538 tmp |= CMD_SET_CRC7C_INTERNAL;
539
540 return opc = ((opc << 24) | tmp);
541}
542
543static u32 sh_mmcif_data_trans(struct sh_mmcif_host *host,
544 struct mmc_request *mrq, u32 opc)
545{
546 u32 ret;
547
548 switch (opc) {
549 case MMC_READ_MULTIPLE_BLOCK:
550 ret = sh_mmcif_multi_read(host, mrq);
551 break;
552 case MMC_WRITE_MULTIPLE_BLOCK:
553 ret = sh_mmcif_multi_write(host, mrq);
554 break;
555 case MMC_WRITE_BLOCK:
556 ret = sh_mmcif_single_write(host, mrq);
557 break;
558 case MMC_READ_SINGLE_BLOCK:
559 case MMC_SEND_EXT_CSD:
560 ret = sh_mmcif_single_read(host, mrq);
561 break;
562 default:
563 pr_err(DRIVER_NAME": NOT SUPPORT CMD = d'%08d\n", opc);
564 ret = -EINVAL;
565 break;
566 }
567 return ret;
568}
569
570static void sh_mmcif_start_cmd(struct sh_mmcif_host *host,
571 struct mmc_request *mrq, struct mmc_command *cmd)
572{
573 long time;
574 int ret = 0, mask = 0;
575 u32 opc = cmd->opcode;
576
577 host->cmd = cmd;
578
579 switch (opc) {
580 /* respons busy check */
581 case MMC_SWITCH:
582 case MMC_STOP_TRANSMISSION:
583 case MMC_SET_WRITE_PROT:
584 case MMC_CLR_WRITE_PROT:
585 case MMC_ERASE:
586 case MMC_GEN_CMD:
587 mask = MASK_MRBSYE;
588 break;
589 default:
590 mask = MASK_MCRSPE;
591 break;
592 }
593 mask |= MASK_MCMDVIO | MASK_MBUFVIO | MASK_MWDATERR |
594 MASK_MRDATERR | MASK_MRIDXERR | MASK_MRSPERR |
595 MASK_MCCSTO | MASK_MCRCSTO | MASK_MWDATTO |
596 MASK_MRDATTO | MASK_MRBSYTO | MASK_MRSPTO;
597
598 if (host->data) {
599 sh_mmcif_writel(host, MMCIF_CE_BLOCK_SET, 0);
600 sh_mmcif_writel(host, MMCIF_CE_BLOCK_SET, mrq->data->blksz);
601 }
602 opc = sh_mmcif_set_cmd(host, mrq, cmd, opc);
603
604 sh_mmcif_writel(host, MMCIF_CE_INT, 0xD80430C0);
605 sh_mmcif_writel(host, MMCIF_CE_INT_MASK, mask);
606 /* set arg */
607 sh_mmcif_writel(host, MMCIF_CE_ARG, cmd->arg);
608 host->wait_int = 0;
609 /* set cmd */
610 sh_mmcif_writel(host, MMCIF_CE_CMD_SET, opc);
611
612 time = wait_event_interruptible_timeout(host->intr_wait,
613 host->wait_int == 1 || host->sd_error == 1, host->timeout);
614 if (host->wait_int != 1 && time == 0) {
615 cmd->error = sh_mmcif_error_manage(host);
616 return;
617 }
618 if (host->sd_error) {
619 switch (cmd->opcode) {
620 case MMC_ALL_SEND_CID:
621 case MMC_SELECT_CARD:
622 case MMC_APP_CMD:
623 cmd->error = -ETIMEDOUT;
624 break;
625 default:
626 pr_debug("%s: Cmd(d'%d) err\n",
627 DRIVER_NAME, cmd->opcode);
628 cmd->error = sh_mmcif_error_manage(host);
629 break;
630 }
631 host->sd_error = 0;
632 host->wait_int = 0;
633 return;
634 }
635 if (!(cmd->flags & MMC_RSP_PRESENT)) {
636 cmd->error = ret;
637 host->wait_int = 0;
638 return;
639 }
640 if (host->wait_int == 1) {
641 sh_mmcif_get_response(host, cmd);
642 host->wait_int = 0;
643 }
644 if (host->data) {
645 ret = sh_mmcif_data_trans(host, mrq, cmd->opcode);
646 if (ret < 0)
647 mrq->data->bytes_xfered = 0;
648 else
649 mrq->data->bytes_xfered =
650 mrq->data->blocks * mrq->data->blksz;
651 }
652 cmd->error = ret;
653}
654
655static void sh_mmcif_stop_cmd(struct sh_mmcif_host *host,
656 struct mmc_request *mrq, struct mmc_command *cmd)
657{
658 long time;
659
660 if (mrq->cmd->opcode == MMC_READ_MULTIPLE_BLOCK)
661 sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MCMD12DRE);
662 else if (mrq->cmd->opcode == MMC_WRITE_MULTIPLE_BLOCK)
663 sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MCMD12RBE);
664 else {
665 pr_err(DRIVER_NAME": not support stop cmd\n");
666 cmd->error = sh_mmcif_error_manage(host);
667 return;
668 }
669
670 time = wait_event_interruptible_timeout(host->intr_wait,
671 host->wait_int == 1 ||
672 host->sd_error == 1, host->timeout);
673 if (host->wait_int != 1 && (time == 0 || host->sd_error != 0)) {
674 cmd->error = sh_mmcif_error_manage(host);
675 return;
676 }
677 sh_mmcif_get_cmd12response(host, cmd);
678 host->wait_int = 0;
679 cmd->error = 0;
680}
681
682static void sh_mmcif_request(struct mmc_host *mmc, struct mmc_request *mrq)
683{
684 struct sh_mmcif_host *host = mmc_priv(mmc);
685
686 switch (mrq->cmd->opcode) {
687 /* MMCIF does not support SD/SDIO command */
688 case SD_IO_SEND_OP_COND:
689 case MMC_APP_CMD:
690 mrq->cmd->error = -ETIMEDOUT;
691 mmc_request_done(mmc, mrq);
692 return;
693 case MMC_SEND_EXT_CSD: /* = SD_SEND_IF_COND (8) */
694 if (!mrq->data) {
695 /* send_if_cond cmd (not support) */
696 mrq->cmd->error = -ETIMEDOUT;
697 mmc_request_done(mmc, mrq);
698 return;
699 }
700 break;
701 default:
702 break;
703 }
704 host->data = mrq->data;
705 sh_mmcif_start_cmd(host, mrq, mrq->cmd);
706 host->data = NULL;
707
708 if (mrq->cmd->error != 0) {
709 mmc_request_done(mmc, mrq);
710 return;
711 }
712 if (mrq->stop)
713 sh_mmcif_stop_cmd(host, mrq, mrq->stop);
714 mmc_request_done(mmc, mrq);
715}
716
717static void sh_mmcif_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
718{
719 struct sh_mmcif_host *host = mmc_priv(mmc);
720 struct sh_mmcif_plat_data *p = host->pd->dev.platform_data;
721
722 if (ios->power_mode == MMC_POWER_OFF) {
723 /* clock stop */
724 sh_mmcif_clock_control(host, 0);
725 if (p->down_pwr)
726 p->down_pwr(host->pd);
727 return;
728 } else if (ios->power_mode == MMC_POWER_UP) {
729 if (p->set_pwr)
730 p->set_pwr(host->pd, ios->power_mode);
731 }
732
733 if (ios->clock)
734 sh_mmcif_clock_control(host, ios->clock);
735
736 host->bus_width = ios->bus_width;
737}
738
739static struct mmc_host_ops sh_mmcif_ops = {
740 .request = sh_mmcif_request,
741 .set_ios = sh_mmcif_set_ios,
742};
743
744static void sh_mmcif_detect(struct mmc_host *mmc)
745{
746 mmc_detect_change(mmc, 0);
747}
748
749static irqreturn_t sh_mmcif_intr(int irq, void *dev_id)
750{
751 struct sh_mmcif_host *host = dev_id;
752 u32 state = 0;
753 int err = 0;
754
755 state = sh_mmcif_readl(host, MMCIF_CE_INT);
756
757 if (state & INT_RBSYE) {
758 sh_mmcif_writel(host, MMCIF_CE_INT, ~(INT_RBSYE | INT_CRSPE));
759 sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MRBSYE);
760 } else if (state & INT_CRSPE) {
761 sh_mmcif_writel(host, MMCIF_CE_INT, ~INT_CRSPE);
762 sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MCRSPE);
763 } else if (state & INT_BUFREN) {
764 sh_mmcif_writel(host, MMCIF_CE_INT, ~INT_BUFREN);
765 sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MBUFREN);
766 } else if (state & INT_BUFWEN) {
767 sh_mmcif_writel(host, MMCIF_CE_INT, ~INT_BUFWEN);
768 sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN);
769 } else if (state & INT_CMD12DRE) {
770 sh_mmcif_writel(host, MMCIF_CE_INT,
771 ~(INT_CMD12DRE | INT_CMD12RBE |
772 INT_CMD12CRE | INT_BUFRE));
773 sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MCMD12DRE);
774 } else if (state & INT_BUFRE) {
775 sh_mmcif_writel(host, MMCIF_CE_INT, ~INT_BUFRE);
776 sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MBUFRE);
777 } else if (state & INT_DTRANE) {
778 sh_mmcif_writel(host, MMCIF_CE_INT, ~INT_DTRANE);
779 sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MDTRANE);
780 } else if (state & INT_CMD12RBE) {
781 sh_mmcif_writel(host, MMCIF_CE_INT,
782 ~(INT_CMD12RBE | INT_CMD12CRE));
783 sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MCMD12RBE);
784 } else if (state & INT_ERR_STS) {
785 /* err interrupts */
786 sh_mmcif_writel(host, MMCIF_CE_INT, ~state);
787 sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, state);
788 err = 1;
789 } else {
790 pr_debug("%s: Not support int\n", DRIVER_NAME);
791 sh_mmcif_writel(host, MMCIF_CE_INT, ~state);
792 sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, state);
793 err = 1;
794 }
795 if (err) {
796 host->sd_error = 1;
797 pr_debug("%s: int err state = %08x\n", DRIVER_NAME, state);
798 }
799 host->wait_int = 1;
800 wake_up(&host->intr_wait);
801
802 return IRQ_HANDLED;
803}
804
805static int __devinit sh_mmcif_probe(struct platform_device *pdev)
806{
807 int ret = 0, irq[2];
808 struct mmc_host *mmc;
809 struct sh_mmcif_host *host = NULL;
810 struct sh_mmcif_plat_data *pd = NULL;
811 struct resource *res;
812 void __iomem *reg;
813 char clk_name[8];
814
815 irq[0] = platform_get_irq(pdev, 0);
816 irq[1] = platform_get_irq(pdev, 1);
817 if (irq[0] < 0 || irq[1] < 0) {
818 pr_err(DRIVER_NAME": Get irq error\n");
819 return -ENXIO;
820 }
821 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
822 if (!res) {
823 dev_err(&pdev->dev, "platform_get_resource error.\n");
824 return -ENXIO;
825 }
826 reg = ioremap(res->start, resource_size(res));
827 if (!reg) {
828 dev_err(&pdev->dev, "ioremap error.\n");
829 return -ENOMEM;
830 }
831 pd = (struct sh_mmcif_plat_data *)(pdev->dev.platform_data);
832 if (!pd) {
833 dev_err(&pdev->dev, "sh_mmcif plat data error.\n");
834 ret = -ENXIO;
835 goto clean_up;
836 }
837 mmc = mmc_alloc_host(sizeof(struct sh_mmcif_host), &pdev->dev);
838 if (!mmc) {
839 ret = -ENOMEM;
840 goto clean_up;
841 }
842 host = mmc_priv(mmc);
843 host->mmc = mmc;
844 host->addr = reg;
845 host->timeout = 1000;
846
847 snprintf(clk_name, sizeof(clk_name), "mmc%d", pdev->id);
848 host->hclk = clk_get(&pdev->dev, clk_name);
849 if (IS_ERR(host->hclk)) {
850 dev_err(&pdev->dev, "cannot get clock \"%s\"\n", clk_name);
851 ret = PTR_ERR(host->hclk);
852 goto clean_up1;
853 }
854 clk_enable(host->hclk);
855 host->clk = clk_get_rate(host->hclk);
856 host->pd = pdev;
857
858 init_waitqueue_head(&host->intr_wait);
859
860 mmc->ops = &sh_mmcif_ops;
861 mmc->f_max = host->clk;
862 /* close to 400KHz */
863 if (mmc->f_max < 51200000)
864 mmc->f_min = mmc->f_max / 128;
865 else if (mmc->f_max < 102400000)
866 mmc->f_min = mmc->f_max / 256;
867 else
868 mmc->f_min = mmc->f_max / 512;
869 if (pd->ocr)
870 mmc->ocr_avail = pd->ocr;
871 mmc->caps = MMC_CAP_MMC_HIGHSPEED;
872 if (pd->caps)
873 mmc->caps |= pd->caps;
874 mmc->max_phys_segs = 128;
875 mmc->max_hw_segs = 128;
876 mmc->max_blk_size = 512;
877 mmc->max_blk_count = 65535;
878 mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
879 mmc->max_seg_size = mmc->max_req_size;
880
881 sh_mmcif_sync_reset(host);
882 platform_set_drvdata(pdev, host);
883 mmc_add_host(mmc);
884
885 ret = request_irq(irq[0], sh_mmcif_intr, 0, "sh_mmc:error", host);
886 if (ret) {
887 pr_err(DRIVER_NAME": request_irq error (sh_mmc:error)\n");
888 goto clean_up2;
889 }
890 ret = request_irq(irq[1], sh_mmcif_intr, 0, "sh_mmc:int", host);
891 if (ret) {
892 free_irq(irq[0], host);
893 pr_err(DRIVER_NAME": request_irq error (sh_mmc:int)\n");
894 goto clean_up2;
895 }
896
897 sh_mmcif_writel(host, MMCIF_CE_INT_MASK, MASK_ALL);
898 sh_mmcif_detect(host->mmc);
899
900 pr_info("%s: driver version %s\n", DRIVER_NAME, DRIVER_VERSION);
901 pr_debug("%s: chip ver H'%04x\n", DRIVER_NAME,
902 sh_mmcif_readl(host, MMCIF_CE_VERSION) & 0x0000ffff);
903 return ret;
904
905clean_up2:
906 clk_disable(host->hclk);
907clean_up1:
908 mmc_free_host(mmc);
909clean_up:
910 if (reg)
911 iounmap(reg);
912 return ret;
913}
914
915static int __devexit sh_mmcif_remove(struct platform_device *pdev)
916{
917 struct sh_mmcif_host *host = platform_get_drvdata(pdev);
918 int irq[2];
919
920 sh_mmcif_writel(host, MMCIF_CE_INT_MASK, MASK_ALL);
921
922 irq[0] = platform_get_irq(pdev, 0);
923 irq[1] = platform_get_irq(pdev, 1);
924
925 if (host->addr)
926 iounmap(host->addr);
927
928 platform_set_drvdata(pdev, NULL);
929 mmc_remove_host(host->mmc);
930
931 free_irq(irq[0], host);
932 free_irq(irq[1], host);
933
934 clk_disable(host->hclk);
935 mmc_free_host(host->mmc);
936
937 return 0;
938}
939
940static struct platform_driver sh_mmcif_driver = {
941 .probe = sh_mmcif_probe,
942 .remove = sh_mmcif_remove,
943 .driver = {
944 .name = DRIVER_NAME,
945 },
946};
947
948static int __init sh_mmcif_init(void)
949{
950 return platform_driver_register(&sh_mmcif_driver);
951}
952
953static void __exit sh_mmcif_exit(void)
954{
955 platform_driver_unregister(&sh_mmcif_driver);
956}
957
958module_init(sh_mmcif_init);
959module_exit(sh_mmcif_exit);
960
961
962MODULE_DESCRIPTION("SuperH on-chip MMC/eMMC interface driver");
963MODULE_LICENSE("GPL");
964MODULE_ALIAS(DRIVER_NAME);
965MODULE_AUTHOR("Yusuke Goda <yusuke.goda.sx@renesas.com>");
diff --git a/drivers/mmc/host/tifm_sd.c b/drivers/mmc/host/tifm_sd.c
index 82554ddec6b3..cec99958b652 100644
--- a/drivers/mmc/host/tifm_sd.c
+++ b/drivers/mmc/host/tifm_sd.c
@@ -1032,7 +1032,7 @@ static void tifm_sd_remove(struct tifm_dev *sock)
1032 1032
1033static int tifm_sd_suspend(struct tifm_dev *sock, pm_message_t state) 1033static int tifm_sd_suspend(struct tifm_dev *sock, pm_message_t state)
1034{ 1034{
1035 return mmc_suspend_host(tifm_get_drvdata(sock), state); 1035 return mmc_suspend_host(tifm_get_drvdata(sock));
1036} 1036}
1037 1037
1038static int tifm_sd_resume(struct tifm_dev *sock) 1038static int tifm_sd_resume(struct tifm_dev *sock)
diff --git a/drivers/mmc/host/tmio_mmc.c b/drivers/mmc/host/tmio_mmc.c
index 883fcac21004..ee7d0a5a51c4 100644
--- a/drivers/mmc/host/tmio_mmc.c
+++ b/drivers/mmc/host/tmio_mmc.c
@@ -768,7 +768,7 @@ static int tmio_mmc_suspend(struct platform_device *dev, pm_message_t state)
768 struct mmc_host *mmc = platform_get_drvdata(dev); 768 struct mmc_host *mmc = platform_get_drvdata(dev);
769 int ret; 769 int ret;
770 770
771 ret = mmc_suspend_host(mmc, state); 771 ret = mmc_suspend_host(mmc);
772 772
773 /* Tell MFD core it can disable us now.*/ 773 /* Tell MFD core it can disable us now.*/
774 if (!ret && cell->disable) 774 if (!ret && cell->disable)
diff --git a/drivers/mmc/host/via-sdmmc.c b/drivers/mmc/host/via-sdmmc.c
index 632858a94376..19f2d72dbca5 100644
--- a/drivers/mmc/host/via-sdmmc.c
+++ b/drivers/mmc/host/via-sdmmc.c
@@ -1280,7 +1280,7 @@ static int via_sd_suspend(struct pci_dev *pcidev, pm_message_t state)
1280 via_save_pcictrlreg(host); 1280 via_save_pcictrlreg(host);
1281 via_save_sdcreg(host); 1281 via_save_sdcreg(host);
1282 1282
1283 ret = mmc_suspend_host(host->mmc, state); 1283 ret = mmc_suspend_host(host->mmc);
1284 1284
1285 pci_save_state(pcidev); 1285 pci_save_state(pcidev);
1286 pci_enable_wake(pcidev, pci_choose_state(pcidev, state), 0); 1286 pci_enable_wake(pcidev, pci_choose_state(pcidev, state), 0);
diff --git a/drivers/mmc/host/wbsd.c b/drivers/mmc/host/wbsd.c
index 69efe01eece8..0012f5d13d28 100644
--- a/drivers/mmc/host/wbsd.c
+++ b/drivers/mmc/host/wbsd.c
@@ -1819,7 +1819,7 @@ static int wbsd_suspend(struct wbsd_host *host, pm_message_t state)
1819{ 1819{
1820 BUG_ON(host == NULL); 1820 BUG_ON(host == NULL);
1821 1821
1822 return mmc_suspend_host(host->mmc, state); 1822 return mmc_suspend_host(host->mmc);
1823} 1823}
1824 1824
1825static int wbsd_resume(struct wbsd_host *host) 1825static int wbsd_resume(struct wbsd_host *host)
diff --git a/drivers/mtd/ubi/cdev.c b/drivers/mtd/ubi/cdev.c
index 72ebb3f06b86..4dfa6b90c21c 100644
--- a/drivers/mtd/ubi/cdev.c
+++ b/drivers/mtd/ubi/cdev.c
@@ -189,8 +189,7 @@ static loff_t vol_cdev_llseek(struct file *file, loff_t offset, int origin)
189 return new_offset; 189 return new_offset;
190} 190}
191 191
192static int vol_cdev_fsync(struct file *file, struct dentry *dentry, 192static int vol_cdev_fsync(struct file *file, int datasync)
193 int datasync)
194{ 193{
195 struct ubi_volume_desc *desc = file->private_data; 194 struct ubi_volume_desc *desc = file->private_data;
196 struct ubi_device *ubi = desc->vol->ubi; 195 struct ubi_device *ubi = desc->vol->ubi;
diff --git a/drivers/net/3c507.c b/drivers/net/3c507.c
index 82eaf65d2d85..ea9b7a098c9b 100644
--- a/drivers/net/3c507.c
+++ b/drivers/net/3c507.c
@@ -551,8 +551,7 @@ static irqreturn_t el16_interrupt(int irq, void *dev_id)
551 void __iomem *shmem; 551 void __iomem *shmem;
552 552
553 if (dev == NULL) { 553 if (dev == NULL) {
554 pr_err("%s: net_interrupt(): irq %d for unknown device.\n", 554 pr_err("net_interrupt(): irq %d for unknown device.\n", irq);
555 dev->name, irq);
556 return IRQ_NONE; 555 return IRQ_NONE;
557 } 556 }
558 557
diff --git a/drivers/net/benet/be_cmds.c b/drivers/net/benet/be_cmds.c
index c911bfb55b19..9d11dbf5e4da 100644
--- a/drivers/net/benet/be_cmds.c
+++ b/drivers/net/benet/be_cmds.c
@@ -294,7 +294,7 @@ int be_cmd_POST(struct be_adapter *adapter)
294 } else { 294 } else {
295 return 0; 295 return 0;
296 } 296 }
297 } while (timeout < 20); 297 } while (timeout < 40);
298 298
299 dev_err(&adapter->pdev->dev, "POST timeout; stage=0x%x\n", stage); 299 dev_err(&adapter->pdev->dev, "POST timeout; stage=0x%x\n", stage);
300 return -1; 300 return -1;
diff --git a/drivers/net/benet/be_main.c b/drivers/net/benet/be_main.c
index aa065c71ddd8..54b14272f333 100644
--- a/drivers/net/benet/be_main.c
+++ b/drivers/net/benet/be_main.c
@@ -1861,7 +1861,7 @@ static int be_setup(struct be_adapter *adapter)
1861 goto if_destroy; 1861 goto if_destroy;
1862 } 1862 }
1863 vf++; 1863 vf++;
1864 } while (vf < num_vfs); 1864 }
1865 } else if (!be_physfn(adapter)) { 1865 } else if (!be_physfn(adapter)) {
1866 status = be_cmd_mac_addr_query(adapter, mac, 1866 status = be_cmd_mac_addr_query(adapter, mac,
1867 MAC_ADDRESS_TYPE_NETWORK, false, adapter->if_handle); 1867 MAC_ADDRESS_TYPE_NETWORK, false, adapter->if_handle);
diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig
index 05b751719bd5..2c5227c02fa0 100644
--- a/drivers/net/can/Kconfig
+++ b/drivers/net/can/Kconfig
@@ -63,6 +63,16 @@ config CAN_BFIN
63 To compile this driver as a module, choose M here: the 63 To compile this driver as a module, choose M here: the
64 module will be called bfin_can. 64 module will be called bfin_can.
65 65
66config CAN_JANZ_ICAN3
67 tristate "Janz VMOD-ICAN3 Intelligent CAN controller"
68 depends on CAN_DEV && MFD_JANZ_CMODIO
69 ---help---
70 Driver for Janz VMOD-ICAN3 Intelligent CAN controller module, which
71 connects to a MODULbus carrier board.
72
73 This driver can also be built as a module. If so, the module will be
74 called janz-ican3.ko.
75
66source "drivers/net/can/mscan/Kconfig" 76source "drivers/net/can/mscan/Kconfig"
67 77
68source "drivers/net/can/sja1000/Kconfig" 78source "drivers/net/can/sja1000/Kconfig"
diff --git a/drivers/net/can/Makefile b/drivers/net/can/Makefile
index 7a702f28d01c..9047cd066fea 100644
--- a/drivers/net/can/Makefile
+++ b/drivers/net/can/Makefile
@@ -15,5 +15,6 @@ obj-$(CONFIG_CAN_AT91) += at91_can.o
15obj-$(CONFIG_CAN_TI_HECC) += ti_hecc.o 15obj-$(CONFIG_CAN_TI_HECC) += ti_hecc.o
16obj-$(CONFIG_CAN_MCP251X) += mcp251x.o 16obj-$(CONFIG_CAN_MCP251X) += mcp251x.o
17obj-$(CONFIG_CAN_BFIN) += bfin_can.o 17obj-$(CONFIG_CAN_BFIN) += bfin_can.o
18obj-$(CONFIG_CAN_JANZ_ICAN3) += janz-ican3.o
18 19
19ccflags-$(CONFIG_CAN_DEBUG_DEVICES) := -DDEBUG 20ccflags-$(CONFIG_CAN_DEBUG_DEVICES) := -DDEBUG
diff --git a/drivers/net/can/janz-ican3.c b/drivers/net/can/janz-ican3.c
new file mode 100644
index 000000000000..6e533dcc36c0
--- /dev/null
+++ b/drivers/net/can/janz-ican3.c
@@ -0,0 +1,1830 @@
1/*
2 * Janz MODULbus VMOD-ICAN3 CAN Interface Driver
3 *
4 * Copyright (c) 2010 Ira W. Snyder <iws@ovro.caltech.edu>
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; either version 2 of the License, or (at your
9 * option) any later version.
10 */
11
12#include <linux/kernel.h>
13#include <linux/module.h>
14#include <linux/init.h>
15#include <linux/interrupt.h>
16#include <linux/delay.h>
17#include <linux/platform_device.h>
18
19#include <linux/netdevice.h>
20#include <linux/can.h>
21#include <linux/can/dev.h>
22#include <linux/can/error.h>
23
24#include <linux/mfd/janz.h>
25
26/* the DPM has 64k of memory, organized into 256x 256 byte pages */
27#define DPM_NUM_PAGES 256
28#define DPM_PAGE_SIZE 256
29#define DPM_PAGE_ADDR(p) ((p) * DPM_PAGE_SIZE)
30
31/* JANZ ICAN3 "old-style" host interface queue page numbers */
32#define QUEUE_OLD_CONTROL 0
33#define QUEUE_OLD_RB0 1
34#define QUEUE_OLD_RB1 2
35#define QUEUE_OLD_WB0 3
36#define QUEUE_OLD_WB1 4
37
38/* Janz ICAN3 "old-style" host interface control registers */
39#define MSYNC_PEER 0x00 /* ICAN only */
40#define MSYNC_LOCL 0x01 /* host only */
41#define TARGET_RUNNING 0x02
42
43#define MSYNC_RB0 0x01
44#define MSYNC_RB1 0x02
45#define MSYNC_RBLW 0x04
46#define MSYNC_RB_MASK (MSYNC_RB0 | MSYNC_RB1)
47
48#define MSYNC_WB0 0x10
49#define MSYNC_WB1 0x20
50#define MSYNC_WBLW 0x40
51#define MSYNC_WB_MASK (MSYNC_WB0 | MSYNC_WB1)
52
53/* Janz ICAN3 "new-style" host interface queue page numbers */
54#define QUEUE_TOHOST 5
55#define QUEUE_FROMHOST_MID 6
56#define QUEUE_FROMHOST_HIGH 7
57#define QUEUE_FROMHOST_LOW 8
58
59/* The first free page in the DPM is #9 */
60#define DPM_FREE_START 9
61
62/* Janz ICAN3 "new-style" and "fast" host interface descriptor flags */
63#define DESC_VALID 0x80
64#define DESC_WRAP 0x40
65#define DESC_INTERRUPT 0x20
66#define DESC_IVALID 0x10
67#define DESC_LEN(len) (len)
68
69/* Janz ICAN3 Firmware Messages */
70#define MSG_CONNECTI 0x02
71#define MSG_DISCONNECT 0x03
72#define MSG_IDVERS 0x04
73#define MSG_MSGLOST 0x05
74#define MSG_NEWHOSTIF 0x08
75#define MSG_INQUIRY 0x0a
76#define MSG_SETAFILMASK 0x10
77#define MSG_INITFDPMQUEUE 0x11
78#define MSG_HWCONF 0x12
79#define MSG_FMSGLOST 0x15
80#define MSG_CEVTIND 0x37
81#define MSG_CBTRREQ 0x41
82#define MSG_COFFREQ 0x42
83#define MSG_CONREQ 0x43
84#define MSG_CCONFREQ 0x47
85
86/*
87 * Janz ICAN3 CAN Inquiry Message Types
88 *
89 * NOTE: there appears to be a firmware bug here. You must send
90 * NOTE: INQUIRY_STATUS and expect to receive an INQUIRY_EXTENDED
91 * NOTE: response. The controller never responds to a message with
92 * NOTE: the INQUIRY_EXTENDED subspec :(
93 */
94#define INQUIRY_STATUS 0x00
95#define INQUIRY_TERMINATION 0x01
96#define INQUIRY_EXTENDED 0x04
97
98/* Janz ICAN3 CAN Set Acceptance Filter Mask Message Types */
99#define SETAFILMASK_REJECT 0x00
100#define SETAFILMASK_FASTIF 0x02
101
102/* Janz ICAN3 CAN Hardware Configuration Message Types */
103#define HWCONF_TERMINATE_ON 0x01
104#define HWCONF_TERMINATE_OFF 0x00
105
106/* Janz ICAN3 CAN Event Indication Message Types */
107#define CEVTIND_EI 0x01
108#define CEVTIND_DOI 0x02
109#define CEVTIND_LOST 0x04
110#define CEVTIND_FULL 0x08
111#define CEVTIND_BEI 0x10
112
113#define CEVTIND_CHIP_SJA1000 0x02
114
115#define ICAN3_BUSERR_QUOTA_MAX 255
116
117/* Janz ICAN3 CAN Frame Conversion */
118#define ICAN3_ECHO 0x10
119#define ICAN3_EFF_RTR 0x40
120#define ICAN3_SFF_RTR 0x10
121#define ICAN3_EFF 0x80
122
123#define ICAN3_CAN_TYPE_MASK 0x0f
124#define ICAN3_CAN_TYPE_SFF 0x00
125#define ICAN3_CAN_TYPE_EFF 0x01
126
127#define ICAN3_CAN_DLC_MASK 0x0f
128
129/*
130 * SJA1000 Status and Error Register Definitions
131 *
132 * Copied from drivers/net/can/sja1000/sja1000.h
133 */
134
135/* status register content */
136#define SR_BS 0x80
137#define SR_ES 0x40
138#define SR_TS 0x20
139#define SR_RS 0x10
140#define SR_TCS 0x08
141#define SR_TBS 0x04
142#define SR_DOS 0x02
143#define SR_RBS 0x01
144
145#define SR_CRIT (SR_BS|SR_ES)
146
147/* ECC register */
148#define ECC_SEG 0x1F
149#define ECC_DIR 0x20
150#define ECC_ERR 6
151#define ECC_BIT 0x00
152#define ECC_FORM 0x40
153#define ECC_STUFF 0x80
154#define ECC_MASK 0xc0
155
156/* Number of buffers for use in the "new-style" host interface */
157#define ICAN3_NEW_BUFFERS 16
158
159/* Number of buffers for use in the "fast" host interface */
160#define ICAN3_TX_BUFFERS 512
161#define ICAN3_RX_BUFFERS 1024
162
163/* SJA1000 Clock Input */
164#define ICAN3_CAN_CLOCK 8000000
165
166/* Driver Name */
167#define DRV_NAME "janz-ican3"
168
169/* DPM Control Registers -- starts at offset 0x100 in the MODULbus registers */
170struct ican3_dpm_control {
171 /* window address register */
172 u8 window_address;
173 u8 unused1;
174
175 /*
176 * Read access: clear interrupt from microcontroller
177 * Write access: send interrupt to microcontroller
178 */
179 u8 interrupt;
180 u8 unused2;
181
182 /* write-only: reset all hardware on the module */
183 u8 hwreset;
184 u8 unused3;
185
186 /* write-only: generate an interrupt to the TPU */
187 u8 tpuinterrupt;
188};
189
190struct ican3_dev {
191
192 /* must be the first member */
193 struct can_priv can;
194
195 /* CAN network device */
196 struct net_device *ndev;
197 struct napi_struct napi;
198
199 /* Device for printing */
200 struct device *dev;
201
202 /* module number */
203 unsigned int num;
204
205 /* base address of registers and IRQ */
206 struct janz_cmodio_onboard_regs __iomem *ctrl;
207 struct ican3_dpm_control __iomem *dpmctrl;
208 void __iomem *dpm;
209 int irq;
210
211 /* CAN bus termination status */
212 struct completion termination_comp;
213 bool termination_enabled;
214
215 /* CAN bus error status registers */
216 struct completion buserror_comp;
217 struct can_berr_counter bec;
218
219 /* old and new style host interface */
220 unsigned int iftype;
221
222 /*
223 * Any function which changes the current DPM page must hold this
224 * lock while it is performing data accesses. This ensures that the
225 * function will not be preempted and end up reading data from a
226 * different DPM page than it expects.
227 */
228 spinlock_t lock;
229
230 /* new host interface */
231 unsigned int rx_int;
232 unsigned int rx_num;
233 unsigned int tx_num;
234
235 /* fast host interface */
236 unsigned int fastrx_start;
237 unsigned int fastrx_int;
238 unsigned int fastrx_num;
239 unsigned int fasttx_start;
240 unsigned int fasttx_num;
241
242 /* first free DPM page */
243 unsigned int free_page;
244};
245
246struct ican3_msg {
247 u8 control;
248 u8 spec;
249 __le16 len;
250 u8 data[252];
251};
252
253struct ican3_new_desc {
254 u8 control;
255 u8 pointer;
256};
257
258struct ican3_fast_desc {
259 u8 control;
260 u8 command;
261 u8 data[14];
262};
263
264/* write to the window basic address register */
265static inline void ican3_set_page(struct ican3_dev *mod, unsigned int page)
266{
267 BUG_ON(page >= DPM_NUM_PAGES);
268 iowrite8(page, &mod->dpmctrl->window_address);
269}
270
271/*
272 * ICAN3 "old-style" host interface
273 */
274
275/*
276 * Recieve a message from the ICAN3 "old-style" firmware interface
277 *
278 * LOCKING: must hold mod->lock
279 *
280 * returns 0 on success, -ENOMEM when no message exists
281 */
282static int ican3_old_recv_msg(struct ican3_dev *mod, struct ican3_msg *msg)
283{
284 unsigned int mbox, mbox_page;
285 u8 locl, peer, xord;
286
287 /* get the MSYNC registers */
288 ican3_set_page(mod, QUEUE_OLD_CONTROL);
289 peer = ioread8(mod->dpm + MSYNC_PEER);
290 locl = ioread8(mod->dpm + MSYNC_LOCL);
291 xord = locl ^ peer;
292
293 if ((xord & MSYNC_RB_MASK) == 0x00) {
294 dev_dbg(mod->dev, "no mbox for reading\n");
295 return -ENOMEM;
296 }
297
298 /* find the first free mbox to read */
299 if ((xord & MSYNC_RB_MASK) == MSYNC_RB_MASK)
300 mbox = (xord & MSYNC_RBLW) ? MSYNC_RB0 : MSYNC_RB1;
301 else
302 mbox = (xord & MSYNC_RB0) ? MSYNC_RB0 : MSYNC_RB1;
303
304 /* copy the message */
305 mbox_page = (mbox == MSYNC_RB0) ? QUEUE_OLD_RB0 : QUEUE_OLD_RB1;
306 ican3_set_page(mod, mbox_page);
307 memcpy_fromio(msg, mod->dpm, sizeof(*msg));
308
309 /*
310 * notify the firmware that the read buffer is available
311 * for it to fill again
312 */
313 locl ^= mbox;
314
315 ican3_set_page(mod, QUEUE_OLD_CONTROL);
316 iowrite8(locl, mod->dpm + MSYNC_LOCL);
317 return 0;
318}
319
320/*
321 * Send a message through the "old-style" firmware interface
322 *
323 * LOCKING: must hold mod->lock
324 *
325 * returns 0 on success, -ENOMEM when no free space exists
326 */
327static int ican3_old_send_msg(struct ican3_dev *mod, struct ican3_msg *msg)
328{
329 unsigned int mbox, mbox_page;
330 u8 locl, peer, xord;
331
332 /* get the MSYNC registers */
333 ican3_set_page(mod, QUEUE_OLD_CONTROL);
334 peer = ioread8(mod->dpm + MSYNC_PEER);
335 locl = ioread8(mod->dpm + MSYNC_LOCL);
336 xord = locl ^ peer;
337
338 if ((xord & MSYNC_WB_MASK) == MSYNC_WB_MASK) {
339 dev_err(mod->dev, "no mbox for writing\n");
340 return -ENOMEM;
341 }
342
343 /* calculate a free mbox to use */
344 mbox = (xord & MSYNC_WB0) ? MSYNC_WB1 : MSYNC_WB0;
345
346 /* copy the message to the DPM */
347 mbox_page = (mbox == MSYNC_WB0) ? QUEUE_OLD_WB0 : QUEUE_OLD_WB1;
348 ican3_set_page(mod, mbox_page);
349 memcpy_toio(mod->dpm, msg, sizeof(*msg));
350
351 locl ^= mbox;
352 if (mbox == MSYNC_WB1)
353 locl |= MSYNC_WBLW;
354
355 ican3_set_page(mod, QUEUE_OLD_CONTROL);
356 iowrite8(locl, mod->dpm + MSYNC_LOCL);
357 return 0;
358}
359
360/*
361 * ICAN3 "new-style" Host Interface Setup
362 */
363
364static void __devinit ican3_init_new_host_interface(struct ican3_dev *mod)
365{
366 struct ican3_new_desc desc;
367 unsigned long flags;
368 void __iomem *dst;
369 int i;
370
371 spin_lock_irqsave(&mod->lock, flags);
372
373 /* setup the internal datastructures for RX */
374 mod->rx_num = 0;
375 mod->rx_int = 0;
376
377 /* tohost queue descriptors are in page 5 */
378 ican3_set_page(mod, QUEUE_TOHOST);
379 dst = mod->dpm;
380
381 /* initialize the tohost (rx) queue descriptors: pages 9-24 */
382 for (i = 0; i < ICAN3_NEW_BUFFERS; i++) {
383 desc.control = DESC_INTERRUPT | DESC_LEN(1); /* I L=1 */
384 desc.pointer = mod->free_page;
385
386 /* set wrap flag on last buffer */
387 if (i == ICAN3_NEW_BUFFERS - 1)
388 desc.control |= DESC_WRAP;
389
390 memcpy_toio(dst, &desc, sizeof(desc));
391 dst += sizeof(desc);
392 mod->free_page++;
393 }
394
395 /* fromhost (tx) mid queue descriptors are in page 6 */
396 ican3_set_page(mod, QUEUE_FROMHOST_MID);
397 dst = mod->dpm;
398
399 /* setup the internal datastructures for TX */
400 mod->tx_num = 0;
401
402 /* initialize the fromhost mid queue descriptors: pages 25-40 */
403 for (i = 0; i < ICAN3_NEW_BUFFERS; i++) {
404 desc.control = DESC_VALID | DESC_LEN(1); /* V L=1 */
405 desc.pointer = mod->free_page;
406
407 /* set wrap flag on last buffer */
408 if (i == ICAN3_NEW_BUFFERS - 1)
409 desc.control |= DESC_WRAP;
410
411 memcpy_toio(dst, &desc, sizeof(desc));
412 dst += sizeof(desc);
413 mod->free_page++;
414 }
415
416 /* fromhost hi queue descriptors are in page 7 */
417 ican3_set_page(mod, QUEUE_FROMHOST_HIGH);
418 dst = mod->dpm;
419
420 /* initialize only a single buffer in the fromhost hi queue (unused) */
421 desc.control = DESC_VALID | DESC_WRAP | DESC_LEN(1); /* VW L=1 */
422 desc.pointer = mod->free_page;
423 memcpy_toio(dst, &desc, sizeof(desc));
424 mod->free_page++;
425
426 /* fromhost low queue descriptors are in page 8 */
427 ican3_set_page(mod, QUEUE_FROMHOST_LOW);
428 dst = mod->dpm;
429
430 /* initialize only a single buffer in the fromhost low queue (unused) */
431 desc.control = DESC_VALID | DESC_WRAP | DESC_LEN(1); /* VW L=1 */
432 desc.pointer = mod->free_page;
433 memcpy_toio(dst, &desc, sizeof(desc));
434 mod->free_page++;
435
436 spin_unlock_irqrestore(&mod->lock, flags);
437}
438
439/*
440 * ICAN3 Fast Host Interface Setup
441 */
442
443static void __devinit ican3_init_fast_host_interface(struct ican3_dev *mod)
444{
445 struct ican3_fast_desc desc;
446 unsigned long flags;
447 unsigned int addr;
448 void __iomem *dst;
449 int i;
450
451 spin_lock_irqsave(&mod->lock, flags);
452
453 /* save the start recv page */
454 mod->fastrx_start = mod->free_page;
455 mod->fastrx_num = 0;
456 mod->fastrx_int = 0;
457
458 /* build a single fast tohost queue descriptor */
459 memset(&desc, 0, sizeof(desc));
460 desc.control = 0x00;
461 desc.command = 1;
462
463 /* build the tohost queue descriptor ring in memory */
464 addr = 0;
465 for (i = 0; i < ICAN3_RX_BUFFERS; i++) {
466
467 /* set the wrap bit on the last buffer */
468 if (i == ICAN3_RX_BUFFERS - 1)
469 desc.control |= DESC_WRAP;
470
471 /* switch to the correct page */
472 ican3_set_page(mod, mod->free_page);
473
474 /* copy the descriptor to the DPM */
475 dst = mod->dpm + addr;
476 memcpy_toio(dst, &desc, sizeof(desc));
477 addr += sizeof(desc);
478
479 /* move to the next page if necessary */
480 if (addr >= DPM_PAGE_SIZE) {
481 addr = 0;
482 mod->free_page++;
483 }
484 }
485
486 /* make sure we page-align the next queue */
487 if (addr != 0)
488 mod->free_page++;
489
490 /* save the start xmit page */
491 mod->fasttx_start = mod->free_page;
492 mod->fasttx_num = 0;
493
494 /* build a single fast fromhost queue descriptor */
495 memset(&desc, 0, sizeof(desc));
496 desc.control = DESC_VALID;
497 desc.command = 1;
498
499 /* build the fromhost queue descriptor ring in memory */
500 addr = 0;
501 for (i = 0; i < ICAN3_TX_BUFFERS; i++) {
502
503 /* set the wrap bit on the last buffer */
504 if (i == ICAN3_TX_BUFFERS - 1)
505 desc.control |= DESC_WRAP;
506
507 /* switch to the correct page */
508 ican3_set_page(mod, mod->free_page);
509
510 /* copy the descriptor to the DPM */
511 dst = mod->dpm + addr;
512 memcpy_toio(dst, &desc, sizeof(desc));
513 addr += sizeof(desc);
514
515 /* move to the next page if necessary */
516 if (addr >= DPM_PAGE_SIZE) {
517 addr = 0;
518 mod->free_page++;
519 }
520 }
521
522 spin_unlock_irqrestore(&mod->lock, flags);
523}
524
525/*
526 * ICAN3 "new-style" Host Interface Message Helpers
527 */
528
529/*
530 * LOCKING: must hold mod->lock
531 */
532static int ican3_new_send_msg(struct ican3_dev *mod, struct ican3_msg *msg)
533{
534 struct ican3_new_desc desc;
535 void __iomem *desc_addr = mod->dpm + (mod->tx_num * sizeof(desc));
536
537 /* switch to the fromhost mid queue, and read the buffer descriptor */
538 ican3_set_page(mod, QUEUE_FROMHOST_MID);
539 memcpy_fromio(&desc, desc_addr, sizeof(desc));
540
541 if (!(desc.control & DESC_VALID)) {
542 dev_dbg(mod->dev, "%s: no free buffers\n", __func__);
543 return -ENOMEM;
544 }
545
546 /* switch to the data page, copy the data */
547 ican3_set_page(mod, desc.pointer);
548 memcpy_toio(mod->dpm, msg, sizeof(*msg));
549
550 /* switch back to the descriptor, set the valid bit, write it back */
551 ican3_set_page(mod, QUEUE_FROMHOST_MID);
552 desc.control ^= DESC_VALID;
553 memcpy_toio(desc_addr, &desc, sizeof(desc));
554
555 /* update the tx number */
556 mod->tx_num = (desc.control & DESC_WRAP) ? 0 : (mod->tx_num + 1);
557 return 0;
558}
559
560/*
561 * LOCKING: must hold mod->lock
562 */
563static int ican3_new_recv_msg(struct ican3_dev *mod, struct ican3_msg *msg)
564{
565 struct ican3_new_desc desc;
566 void __iomem *desc_addr = mod->dpm + (mod->rx_num * sizeof(desc));
567
568 /* switch to the tohost queue, and read the buffer descriptor */
569 ican3_set_page(mod, QUEUE_TOHOST);
570 memcpy_fromio(&desc, desc_addr, sizeof(desc));
571
572 if (!(desc.control & DESC_VALID)) {
573 dev_dbg(mod->dev, "%s: no buffers to recv\n", __func__);
574 return -ENOMEM;
575 }
576
577 /* switch to the data page, copy the data */
578 ican3_set_page(mod, desc.pointer);
579 memcpy_fromio(msg, mod->dpm, sizeof(*msg));
580
581 /* switch back to the descriptor, toggle the valid bit, write it back */
582 ican3_set_page(mod, QUEUE_TOHOST);
583 desc.control ^= DESC_VALID;
584 memcpy_toio(desc_addr, &desc, sizeof(desc));
585
586 /* update the rx number */
587 mod->rx_num = (desc.control & DESC_WRAP) ? 0 : (mod->rx_num + 1);
588 return 0;
589}
590
591/*
592 * Message Send / Recv Helpers
593 */
594
595static int ican3_send_msg(struct ican3_dev *mod, struct ican3_msg *msg)
596{
597 unsigned long flags;
598 int ret;
599
600 spin_lock_irqsave(&mod->lock, flags);
601
602 if (mod->iftype == 0)
603 ret = ican3_old_send_msg(mod, msg);
604 else
605 ret = ican3_new_send_msg(mod, msg);
606
607 spin_unlock_irqrestore(&mod->lock, flags);
608 return ret;
609}
610
611static int ican3_recv_msg(struct ican3_dev *mod, struct ican3_msg *msg)
612{
613 unsigned long flags;
614 int ret;
615
616 spin_lock_irqsave(&mod->lock, flags);
617
618 if (mod->iftype == 0)
619 ret = ican3_old_recv_msg(mod, msg);
620 else
621 ret = ican3_new_recv_msg(mod, msg);
622
623 spin_unlock_irqrestore(&mod->lock, flags);
624 return ret;
625}
626
627/*
628 * Quick Pre-constructed Messages
629 */
630
631static int __devinit ican3_msg_connect(struct ican3_dev *mod)
632{
633 struct ican3_msg msg;
634
635 memset(&msg, 0, sizeof(msg));
636 msg.spec = MSG_CONNECTI;
637 msg.len = cpu_to_le16(0);
638
639 return ican3_send_msg(mod, &msg);
640}
641
642static int __devexit ican3_msg_disconnect(struct ican3_dev *mod)
643{
644 struct ican3_msg msg;
645
646 memset(&msg, 0, sizeof(msg));
647 msg.spec = MSG_DISCONNECT;
648 msg.len = cpu_to_le16(0);
649
650 return ican3_send_msg(mod, &msg);
651}
652
653static int __devinit ican3_msg_newhostif(struct ican3_dev *mod)
654{
655 struct ican3_msg msg;
656 int ret;
657
658 memset(&msg, 0, sizeof(msg));
659 msg.spec = MSG_NEWHOSTIF;
660 msg.len = cpu_to_le16(0);
661
662 /* If we're not using the old interface, switching seems bogus */
663 WARN_ON(mod->iftype != 0);
664
665 ret = ican3_send_msg(mod, &msg);
666 if (ret)
667 return ret;
668
669 /* mark the module as using the new host interface */
670 mod->iftype = 1;
671 return 0;
672}
673
674static int __devinit ican3_msg_fasthostif(struct ican3_dev *mod)
675{
676 struct ican3_msg msg;
677 unsigned int addr;
678
679 memset(&msg, 0, sizeof(msg));
680 msg.spec = MSG_INITFDPMQUEUE;
681 msg.len = cpu_to_le16(8);
682
683 /* write the tohost queue start address */
684 addr = DPM_PAGE_ADDR(mod->fastrx_start);
685 msg.data[0] = addr & 0xff;
686 msg.data[1] = (addr >> 8) & 0xff;
687 msg.data[2] = (addr >> 16) & 0xff;
688 msg.data[3] = (addr >> 24) & 0xff;
689
690 /* write the fromhost queue start address */
691 addr = DPM_PAGE_ADDR(mod->fasttx_start);
692 msg.data[4] = addr & 0xff;
693 msg.data[5] = (addr >> 8) & 0xff;
694 msg.data[6] = (addr >> 16) & 0xff;
695 msg.data[7] = (addr >> 24) & 0xff;
696
697 /* If we're not using the new interface yet, we cannot do this */
698 WARN_ON(mod->iftype != 1);
699
700 return ican3_send_msg(mod, &msg);
701}
702
703/*
704 * Setup the CAN filter to either accept or reject all
705 * messages from the CAN bus.
706 */
707static int __devinit ican3_set_id_filter(struct ican3_dev *mod, bool accept)
708{
709 struct ican3_msg msg;
710 int ret;
711
712 /* Standard Frame Format */
713 memset(&msg, 0, sizeof(msg));
714 msg.spec = MSG_SETAFILMASK;
715 msg.len = cpu_to_le16(5);
716 msg.data[0] = 0x00; /* IDLo LSB */
717 msg.data[1] = 0x00; /* IDLo MSB */
718 msg.data[2] = 0xff; /* IDHi LSB */
719 msg.data[3] = 0x07; /* IDHi MSB */
720
721 /* accept all frames for fast host if, or reject all frames */
722 msg.data[4] = accept ? SETAFILMASK_FASTIF : SETAFILMASK_REJECT;
723
724 ret = ican3_send_msg(mod, &msg);
725 if (ret)
726 return ret;
727
728 /* Extended Frame Format */
729 memset(&msg, 0, sizeof(msg));
730 msg.spec = MSG_SETAFILMASK;
731 msg.len = cpu_to_le16(13);
732 msg.data[0] = 0; /* MUX = 0 */
733 msg.data[1] = 0x00; /* IDLo LSB */
734 msg.data[2] = 0x00;
735 msg.data[3] = 0x00;
736 msg.data[4] = 0x20; /* IDLo MSB */
737 msg.data[5] = 0xff; /* IDHi LSB */
738 msg.data[6] = 0xff;
739 msg.data[7] = 0xff;
740 msg.data[8] = 0x3f; /* IDHi MSB */
741
742 /* accept all frames for fast host if, or reject all frames */
743 msg.data[9] = accept ? SETAFILMASK_FASTIF : SETAFILMASK_REJECT;
744
745 return ican3_send_msg(mod, &msg);
746}
747
748/*
749 * Bring the CAN bus online or offline
750 */
751static int ican3_set_bus_state(struct ican3_dev *mod, bool on)
752{
753 struct ican3_msg msg;
754
755 memset(&msg, 0, sizeof(msg));
756 msg.spec = on ? MSG_CONREQ : MSG_COFFREQ;
757 msg.len = cpu_to_le16(0);
758
759 return ican3_send_msg(mod, &msg);
760}
761
762static int ican3_set_termination(struct ican3_dev *mod, bool on)
763{
764 struct ican3_msg msg;
765
766 memset(&msg, 0, sizeof(msg));
767 msg.spec = MSG_HWCONF;
768 msg.len = cpu_to_le16(2);
769 msg.data[0] = 0x00;
770 msg.data[1] = on ? HWCONF_TERMINATE_ON : HWCONF_TERMINATE_OFF;
771
772 return ican3_send_msg(mod, &msg);
773}
774
775static int ican3_send_inquiry(struct ican3_dev *mod, u8 subspec)
776{
777 struct ican3_msg msg;
778
779 memset(&msg, 0, sizeof(msg));
780 msg.spec = MSG_INQUIRY;
781 msg.len = cpu_to_le16(2);
782 msg.data[0] = subspec;
783 msg.data[1] = 0x00;
784
785 return ican3_send_msg(mod, &msg);
786}
787
788static int ican3_set_buserror(struct ican3_dev *mod, u8 quota)
789{
790 struct ican3_msg msg;
791
792 memset(&msg, 0, sizeof(msg));
793 msg.spec = MSG_CCONFREQ;
794 msg.len = cpu_to_le16(2);
795 msg.data[0] = 0x00;
796 msg.data[1] = quota;
797
798 return ican3_send_msg(mod, &msg);
799}
800
801/*
802 * ICAN3 to Linux CAN Frame Conversion
803 */
804
805static void ican3_to_can_frame(struct ican3_dev *mod,
806 struct ican3_fast_desc *desc,
807 struct can_frame *cf)
808{
809 if ((desc->command & ICAN3_CAN_TYPE_MASK) == ICAN3_CAN_TYPE_SFF) {
810 if (desc->data[1] & ICAN3_SFF_RTR)
811 cf->can_id |= CAN_RTR_FLAG;
812
813 cf->can_id |= desc->data[0] << 3;
814 cf->can_id |= (desc->data[1] & 0xe0) >> 5;
815 cf->can_dlc = desc->data[1] & ICAN3_CAN_DLC_MASK;
816 memcpy(cf->data, &desc->data[2], sizeof(cf->data));
817 } else {
818 cf->can_dlc = desc->data[0] & ICAN3_CAN_DLC_MASK;
819 if (desc->data[0] & ICAN3_EFF_RTR)
820 cf->can_id |= CAN_RTR_FLAG;
821
822 if (desc->data[0] & ICAN3_EFF) {
823 cf->can_id |= CAN_EFF_FLAG;
824 cf->can_id |= desc->data[2] << 21; /* 28-21 */
825 cf->can_id |= desc->data[3] << 13; /* 20-13 */
826 cf->can_id |= desc->data[4] << 5; /* 12-5 */
827 cf->can_id |= (desc->data[5] & 0xf8) >> 3;
828 } else {
829 cf->can_id |= desc->data[2] << 3; /* 10-3 */
830 cf->can_id |= desc->data[3] >> 5; /* 2-0 */
831 }
832
833 memcpy(cf->data, &desc->data[6], sizeof(cf->data));
834 }
835}
836
837static void can_frame_to_ican3(struct ican3_dev *mod,
838 struct can_frame *cf,
839 struct ican3_fast_desc *desc)
840{
841 /* clear out any stale data in the descriptor */
842 memset(desc->data, 0, sizeof(desc->data));
843
844 /* we always use the extended format, with the ECHO flag set */
845 desc->command = ICAN3_CAN_TYPE_EFF;
846 desc->data[0] |= cf->can_dlc;
847 desc->data[1] |= ICAN3_ECHO;
848
849 if (cf->can_id & CAN_RTR_FLAG)
850 desc->data[0] |= ICAN3_EFF_RTR;
851
852 /* pack the id into the correct places */
853 if (cf->can_id & CAN_EFF_FLAG) {
854 desc->data[0] |= ICAN3_EFF;
855 desc->data[2] = (cf->can_id & 0x1fe00000) >> 21; /* 28-21 */
856 desc->data[3] = (cf->can_id & 0x001fe000) >> 13; /* 20-13 */
857 desc->data[4] = (cf->can_id & 0x00001fe0) >> 5; /* 12-5 */
858 desc->data[5] = (cf->can_id & 0x0000001f) << 3; /* 4-0 */
859 } else {
860 desc->data[2] = (cf->can_id & 0x7F8) >> 3; /* bits 10-3 */
861 desc->data[3] = (cf->can_id & 0x007) << 5; /* bits 2-0 */
862 }
863
864 /* copy the data bits into the descriptor */
865 memcpy(&desc->data[6], cf->data, sizeof(cf->data));
866}
867
868/*
869 * Interrupt Handling
870 */
871
872/*
873 * Handle an ID + Version message response from the firmware. We never generate
874 * this message in production code, but it is very useful when debugging to be
875 * able to display this message.
876 */
877static void ican3_handle_idvers(struct ican3_dev *mod, struct ican3_msg *msg)
878{
879 dev_dbg(mod->dev, "IDVERS response: %s\n", msg->data);
880}
881
882static void ican3_handle_msglost(struct ican3_dev *mod, struct ican3_msg *msg)
883{
884 struct net_device *dev = mod->ndev;
885 struct net_device_stats *stats = &dev->stats;
886 struct can_frame *cf;
887 struct sk_buff *skb;
888
889 /*
890 * Report that communication messages with the microcontroller firmware
891 * are being lost. These are never CAN frames, so we do not generate an
892 * error frame for userspace
893 */
894 if (msg->spec == MSG_MSGLOST) {
895 dev_err(mod->dev, "lost %d control messages\n", msg->data[0]);
896 return;
897 }
898
899 /*
900 * Oops, this indicates that we have lost messages in the fast queue,
901 * which are exclusively CAN messages. Our driver isn't reading CAN
902 * frames fast enough.
903 *
904 * We'll pretend that the SJA1000 told us that it ran out of buffer
905 * space, because there is not a better message for this.
906 */
907 skb = alloc_can_err_skb(dev, &cf);
908 if (skb) {
909 cf->can_id |= CAN_ERR_CRTL;
910 cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
911 stats->rx_errors++;
912 stats->rx_bytes += cf->can_dlc;
913 netif_rx(skb);
914 }
915}
916
917/*
918 * Handle CAN Event Indication Messages from the firmware
919 *
920 * The ICAN3 firmware provides the values of some SJA1000 registers when it
921 * generates this message. The code below is largely copied from the
922 * drivers/net/can/sja1000/sja1000.c file, and adapted as necessary
923 */
924static int ican3_handle_cevtind(struct ican3_dev *mod, struct ican3_msg *msg)
925{
926 struct net_device *dev = mod->ndev;
927 struct net_device_stats *stats = &dev->stats;
928 enum can_state state = mod->can.state;
929 u8 status, isrc, rxerr, txerr;
930 struct can_frame *cf;
931 struct sk_buff *skb;
932
933 /* we can only handle the SJA1000 part */
934 if (msg->data[1] != CEVTIND_CHIP_SJA1000) {
935 dev_err(mod->dev, "unable to handle errors on non-SJA1000\n");
936 return -ENODEV;
937 }
938
939 /* check the message length for sanity */
940 if (le16_to_cpu(msg->len) < 6) {
941 dev_err(mod->dev, "error message too short\n");
942 return -EINVAL;
943 }
944
945 skb = alloc_can_err_skb(dev, &cf);
946 if (skb == NULL)
947 return -ENOMEM;
948
949 isrc = msg->data[0];
950 status = msg->data[3];
951 rxerr = msg->data[4];
952 txerr = msg->data[5];
953
954 /* data overrun interrupt */
955 if (isrc == CEVTIND_DOI || isrc == CEVTIND_LOST) {
956 dev_dbg(mod->dev, "data overrun interrupt\n");
957 cf->can_id |= CAN_ERR_CRTL;
958 cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
959 stats->rx_over_errors++;
960 stats->rx_errors++;
961 }
962
963 /* error warning + passive interrupt */
964 if (isrc == CEVTIND_EI) {
965 dev_dbg(mod->dev, "error warning + passive interrupt\n");
966 if (status & SR_BS) {
967 state = CAN_STATE_BUS_OFF;
968 cf->can_id |= CAN_ERR_BUSOFF;
969 can_bus_off(dev);
970 } else if (status & SR_ES) {
971 if (rxerr >= 128 || txerr >= 128)
972 state = CAN_STATE_ERROR_PASSIVE;
973 else
974 state = CAN_STATE_ERROR_WARNING;
975 } else {
976 state = CAN_STATE_ERROR_ACTIVE;
977 }
978 }
979
980 /* bus error interrupt */
981 if (isrc == CEVTIND_BEI) {
982 u8 ecc = msg->data[2];
983
984 dev_dbg(mod->dev, "bus error interrupt\n");
985 mod->can.can_stats.bus_error++;
986 stats->rx_errors++;
987 cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
988
989 switch (ecc & ECC_MASK) {
990 case ECC_BIT:
991 cf->data[2] |= CAN_ERR_PROT_BIT;
992 break;
993 case ECC_FORM:
994 cf->data[2] |= CAN_ERR_PROT_FORM;
995 break;
996 case ECC_STUFF:
997 cf->data[2] |= CAN_ERR_PROT_STUFF;
998 break;
999 default:
1000 cf->data[2] |= CAN_ERR_PROT_UNSPEC;
1001 cf->data[3] = ecc & ECC_SEG;
1002 break;
1003 }
1004
1005 if ((ecc & ECC_DIR) == 0)
1006 cf->data[2] |= CAN_ERR_PROT_TX;
1007
1008 cf->data[6] = txerr;
1009 cf->data[7] = rxerr;
1010 }
1011
1012 if (state != mod->can.state && (state == CAN_STATE_ERROR_WARNING ||
1013 state == CAN_STATE_ERROR_PASSIVE)) {
1014 cf->can_id |= CAN_ERR_CRTL;
1015 if (state == CAN_STATE_ERROR_WARNING) {
1016 mod->can.can_stats.error_warning++;
1017 cf->data[1] = (txerr > rxerr) ?
1018 CAN_ERR_CRTL_TX_WARNING :
1019 CAN_ERR_CRTL_RX_WARNING;
1020 } else {
1021 mod->can.can_stats.error_passive++;
1022 cf->data[1] = (txerr > rxerr) ?
1023 CAN_ERR_CRTL_TX_PASSIVE :
1024 CAN_ERR_CRTL_RX_PASSIVE;
1025 }
1026
1027 cf->data[6] = txerr;
1028 cf->data[7] = rxerr;
1029 }
1030
1031 mod->can.state = state;
1032 stats->rx_errors++;
1033 stats->rx_bytes += cf->can_dlc;
1034 netif_rx(skb);
1035 return 0;
1036}
1037
1038static void ican3_handle_inquiry(struct ican3_dev *mod, struct ican3_msg *msg)
1039{
1040 switch (msg->data[0]) {
1041 case INQUIRY_STATUS:
1042 case INQUIRY_EXTENDED:
1043 mod->bec.rxerr = msg->data[5];
1044 mod->bec.txerr = msg->data[6];
1045 complete(&mod->buserror_comp);
1046 break;
1047 case INQUIRY_TERMINATION:
1048 mod->termination_enabled = msg->data[6] & HWCONF_TERMINATE_ON;
1049 complete(&mod->termination_comp);
1050 break;
1051 default:
1052 dev_err(mod->dev, "recieved an unknown inquiry response\n");
1053 break;
1054 }
1055}
1056
1057static void ican3_handle_unknown_message(struct ican3_dev *mod,
1058 struct ican3_msg *msg)
1059{
1060 dev_warn(mod->dev, "recieved unknown message: spec 0x%.2x length %d\n",
1061 msg->spec, le16_to_cpu(msg->len));
1062}
1063
1064/*
1065 * Handle a control message from the firmware
1066 */
1067static void ican3_handle_message(struct ican3_dev *mod, struct ican3_msg *msg)
1068{
1069 dev_dbg(mod->dev, "%s: modno %d spec 0x%.2x len %d bytes\n", __func__,
1070 mod->num, msg->spec, le16_to_cpu(msg->len));
1071
1072 switch (msg->spec) {
1073 case MSG_IDVERS:
1074 ican3_handle_idvers(mod, msg);
1075 break;
1076 case MSG_MSGLOST:
1077 case MSG_FMSGLOST:
1078 ican3_handle_msglost(mod, msg);
1079 break;
1080 case MSG_CEVTIND:
1081 ican3_handle_cevtind(mod, msg);
1082 break;
1083 case MSG_INQUIRY:
1084 ican3_handle_inquiry(mod, msg);
1085 break;
1086 default:
1087 ican3_handle_unknown_message(mod, msg);
1088 break;
1089 }
1090}
1091
1092/*
1093 * Check that there is room in the TX ring to transmit another skb
1094 *
1095 * LOCKING: must hold mod->lock
1096 */
1097static bool ican3_txok(struct ican3_dev *mod)
1098{
1099 struct ican3_fast_desc __iomem *desc;
1100 u8 control;
1101
1102 /* copy the control bits of the descriptor */
1103 ican3_set_page(mod, mod->fasttx_start + (mod->fasttx_num / 16));
1104 desc = mod->dpm + ((mod->fasttx_num % 16) * sizeof(*desc));
1105 control = ioread8(&desc->control);
1106
1107 /* if the control bits are not valid, then we have no more space */
1108 if (!(control & DESC_VALID))
1109 return false;
1110
1111 return true;
1112}
1113
1114/*
1115 * Recieve one CAN frame from the hardware
1116 *
1117 * This works like the core of a NAPI function, but is intended to be called
1118 * from workqueue context instead. This driver already needs a workqueue to
1119 * process control messages, so we use the workqueue instead of using NAPI.
1120 * This was done to simplify locking.
1121 *
1122 * CONTEXT: must be called from user context
1123 */
1124static int ican3_recv_skb(struct ican3_dev *mod)
1125{
1126 struct net_device *ndev = mod->ndev;
1127 struct net_device_stats *stats = &ndev->stats;
1128 struct ican3_fast_desc desc;
1129 void __iomem *desc_addr;
1130 struct can_frame *cf;
1131 struct sk_buff *skb;
1132 unsigned long flags;
1133
1134 spin_lock_irqsave(&mod->lock, flags);
1135
1136 /* copy the whole descriptor */
1137 ican3_set_page(mod, mod->fastrx_start + (mod->fastrx_num / 16));
1138 desc_addr = mod->dpm + ((mod->fastrx_num % 16) * sizeof(desc));
1139 memcpy_fromio(&desc, desc_addr, sizeof(desc));
1140
1141 spin_unlock_irqrestore(&mod->lock, flags);
1142
1143 /* check that we actually have a CAN frame */
1144 if (!(desc.control & DESC_VALID))
1145 return -ENOBUFS;
1146
1147 /* allocate an skb */
1148 skb = alloc_can_skb(ndev, &cf);
1149 if (unlikely(skb == NULL)) {
1150 stats->rx_dropped++;
1151 goto err_noalloc;
1152 }
1153
1154 /* convert the ICAN3 frame into Linux CAN format */
1155 ican3_to_can_frame(mod, &desc, cf);
1156
1157 /* receive the skb, update statistics */
1158 netif_receive_skb(skb);
1159 stats->rx_packets++;
1160 stats->rx_bytes += cf->can_dlc;
1161
1162err_noalloc:
1163 /* toggle the valid bit and return the descriptor to the ring */
1164 desc.control ^= DESC_VALID;
1165
1166 spin_lock_irqsave(&mod->lock, flags);
1167
1168 ican3_set_page(mod, mod->fastrx_start + (mod->fastrx_num / 16));
1169 memcpy_toio(desc_addr, &desc, 1);
1170
1171 /* update the next buffer pointer */
1172 mod->fastrx_num = (desc.control & DESC_WRAP) ? 0
1173 : (mod->fastrx_num + 1);
1174
1175 /* there are still more buffers to process */
1176 spin_unlock_irqrestore(&mod->lock, flags);
1177 return 0;
1178}
1179
1180static int ican3_napi(struct napi_struct *napi, int budget)
1181{
1182 struct ican3_dev *mod = container_of(napi, struct ican3_dev, napi);
1183 struct ican3_msg msg;
1184 unsigned long flags;
1185 int received = 0;
1186 int ret;
1187
1188 /* process all communication messages */
1189 while (true) {
1190 ret = ican3_recv_msg(mod, &msg);
1191 if (ret)
1192 break;
1193
1194 ican3_handle_message(mod, &msg);
1195 }
1196
1197 /* process all CAN frames from the fast interface */
1198 while (received < budget) {
1199 ret = ican3_recv_skb(mod);
1200 if (ret)
1201 break;
1202
1203 received++;
1204 }
1205
1206 /* We have processed all packets that the adapter had, but it
1207 * was less than our budget, stop polling */
1208 if (received < budget)
1209 napi_complete(napi);
1210
1211 spin_lock_irqsave(&mod->lock, flags);
1212
1213 /* Wake up the transmit queue if necessary */
1214 if (netif_queue_stopped(mod->ndev) && ican3_txok(mod))
1215 netif_wake_queue(mod->ndev);
1216
1217 spin_unlock_irqrestore(&mod->lock, flags);
1218
1219 /* re-enable interrupt generation */
1220 iowrite8(1 << mod->num, &mod->ctrl->int_enable);
1221 return received;
1222}
1223
1224static irqreturn_t ican3_irq(int irq, void *dev_id)
1225{
1226 struct ican3_dev *mod = dev_id;
1227 u8 stat;
1228
1229 /*
1230 * The interrupt status register on this device reports interrupts
1231 * as zeroes instead of using ones like most other devices
1232 */
1233 stat = ioread8(&mod->ctrl->int_disable) & (1 << mod->num);
1234 if (stat == (1 << mod->num))
1235 return IRQ_NONE;
1236
1237 /* clear the MODULbus interrupt from the microcontroller */
1238 ioread8(&mod->dpmctrl->interrupt);
1239
1240 /* disable interrupt generation, schedule the NAPI poller */
1241 iowrite8(1 << mod->num, &mod->ctrl->int_disable);
1242 napi_schedule(&mod->napi);
1243 return IRQ_HANDLED;
1244}
1245
1246/*
1247 * Firmware reset, startup, and shutdown
1248 */
1249
1250/*
1251 * Reset an ICAN module to its power-on state
1252 *
1253 * CONTEXT: no network device registered
1254 * LOCKING: work function disabled
1255 */
1256static int ican3_reset_module(struct ican3_dev *mod)
1257{
1258 u8 val = 1 << mod->num;
1259 unsigned long start;
1260 u8 runold, runnew;
1261
1262 /* disable interrupts so no more work is scheduled */
1263 iowrite8(1 << mod->num, &mod->ctrl->int_disable);
1264
1265 /* flush any pending work */
1266 flush_scheduled_work();
1267
1268 /* the first unallocated page in the DPM is #9 */
1269 mod->free_page = DPM_FREE_START;
1270
1271 ican3_set_page(mod, QUEUE_OLD_CONTROL);
1272 runold = ioread8(mod->dpm + TARGET_RUNNING);
1273
1274 /* reset the module */
1275 iowrite8(val, &mod->ctrl->reset_assert);
1276 iowrite8(val, &mod->ctrl->reset_deassert);
1277
1278 /* wait until the module has finished resetting and is running */
1279 start = jiffies;
1280 do {
1281 ican3_set_page(mod, QUEUE_OLD_CONTROL);
1282 runnew = ioread8(mod->dpm + TARGET_RUNNING);
1283 if (runnew == (runold ^ 0xff))
1284 return 0;
1285
1286 msleep(10);
1287 } while (time_before(jiffies, start + HZ / 4));
1288
1289 dev_err(mod->dev, "failed to reset CAN module\n");
1290 return -ETIMEDOUT;
1291}
1292
1293static void __devexit ican3_shutdown_module(struct ican3_dev *mod)
1294{
1295 ican3_msg_disconnect(mod);
1296 ican3_reset_module(mod);
1297}
1298
1299/*
1300 * Startup an ICAN module, bringing it into fast mode
1301 */
1302static int __devinit ican3_startup_module(struct ican3_dev *mod)
1303{
1304 int ret;
1305
1306 ret = ican3_reset_module(mod);
1307 if (ret) {
1308 dev_err(mod->dev, "unable to reset module\n");
1309 return ret;
1310 }
1311
1312 /* re-enable interrupts so we can send messages */
1313 iowrite8(1 << mod->num, &mod->ctrl->int_enable);
1314
1315 ret = ican3_msg_connect(mod);
1316 if (ret) {
1317 dev_err(mod->dev, "unable to connect to module\n");
1318 return ret;
1319 }
1320
1321 ican3_init_new_host_interface(mod);
1322 ret = ican3_msg_newhostif(mod);
1323 if (ret) {
1324 dev_err(mod->dev, "unable to switch to new-style interface\n");
1325 return ret;
1326 }
1327
1328 /* default to "termination on" */
1329 ret = ican3_set_termination(mod, true);
1330 if (ret) {
1331 dev_err(mod->dev, "unable to enable termination\n");
1332 return ret;
1333 }
1334
1335 /* default to "bus errors enabled" */
1336 ret = ican3_set_buserror(mod, ICAN3_BUSERR_QUOTA_MAX);
1337 if (ret) {
1338 dev_err(mod->dev, "unable to set bus-error\n");
1339 return ret;
1340 }
1341
1342 ican3_init_fast_host_interface(mod);
1343 ret = ican3_msg_fasthostif(mod);
1344 if (ret) {
1345 dev_err(mod->dev, "unable to switch to fast host interface\n");
1346 return ret;
1347 }
1348
1349 ret = ican3_set_id_filter(mod, true);
1350 if (ret) {
1351 dev_err(mod->dev, "unable to set acceptance filter\n");
1352 return ret;
1353 }
1354
1355 return 0;
1356}
1357
1358/*
1359 * CAN Network Device
1360 */
1361
1362static int ican3_open(struct net_device *ndev)
1363{
1364 struct ican3_dev *mod = netdev_priv(ndev);
1365 u8 quota;
1366 int ret;
1367
1368 /* open the CAN layer */
1369 ret = open_candev(ndev);
1370 if (ret) {
1371 dev_err(mod->dev, "unable to start CAN layer\n");
1372 return ret;
1373 }
1374
1375 /* set the bus error generation state appropriately */
1376 if (mod->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING)
1377 quota = ICAN3_BUSERR_QUOTA_MAX;
1378 else
1379 quota = 0;
1380
1381 ret = ican3_set_buserror(mod, quota);
1382 if (ret) {
1383 dev_err(mod->dev, "unable to set bus-error\n");
1384 close_candev(ndev);
1385 return ret;
1386 }
1387
1388 /* bring the bus online */
1389 ret = ican3_set_bus_state(mod, true);
1390 if (ret) {
1391 dev_err(mod->dev, "unable to set bus-on\n");
1392 close_candev(ndev);
1393 return ret;
1394 }
1395
1396 /* start up the network device */
1397 mod->can.state = CAN_STATE_ERROR_ACTIVE;
1398 netif_start_queue(ndev);
1399
1400 return 0;
1401}
1402
1403static int ican3_stop(struct net_device *ndev)
1404{
1405 struct ican3_dev *mod = netdev_priv(ndev);
1406 int ret;
1407
1408 /* stop the network device xmit routine */
1409 netif_stop_queue(ndev);
1410 mod->can.state = CAN_STATE_STOPPED;
1411
1412 /* bring the bus offline, stop receiving packets */
1413 ret = ican3_set_bus_state(mod, false);
1414 if (ret) {
1415 dev_err(mod->dev, "unable to set bus-off\n");
1416 return ret;
1417 }
1418
1419 /* close the CAN layer */
1420 close_candev(ndev);
1421 return 0;
1422}
1423
1424static int ican3_xmit(struct sk_buff *skb, struct net_device *ndev)
1425{
1426 struct ican3_dev *mod = netdev_priv(ndev);
1427 struct net_device_stats *stats = &ndev->stats;
1428 struct can_frame *cf = (struct can_frame *)skb->data;
1429 struct ican3_fast_desc desc;
1430 void __iomem *desc_addr;
1431 unsigned long flags;
1432
1433 spin_lock_irqsave(&mod->lock, flags);
1434
1435 /* check that we can actually transmit */
1436 if (!ican3_txok(mod)) {
1437 dev_err(mod->dev, "no free descriptors, stopping queue\n");
1438 netif_stop_queue(ndev);
1439 spin_unlock_irqrestore(&mod->lock, flags);
1440 return NETDEV_TX_BUSY;
1441 }
1442
1443 /* copy the control bits of the descriptor */
1444 ican3_set_page(mod, mod->fasttx_start + (mod->fasttx_num / 16));
1445 desc_addr = mod->dpm + ((mod->fasttx_num % 16) * sizeof(desc));
1446 memset(&desc, 0, sizeof(desc));
1447 memcpy_fromio(&desc, desc_addr, 1);
1448
1449 /* convert the Linux CAN frame into ICAN3 format */
1450 can_frame_to_ican3(mod, cf, &desc);
1451
1452 /*
1453 * the programming manual says that you must set the IVALID bit, then
1454 * interrupt, then set the valid bit. Quite weird, but it seems to be
1455 * required for this to work
1456 */
1457 desc.control |= DESC_IVALID;
1458 memcpy_toio(desc_addr, &desc, sizeof(desc));
1459
1460 /* generate a MODULbus interrupt to the microcontroller */
1461 iowrite8(0x01, &mod->dpmctrl->interrupt);
1462
1463 desc.control ^= DESC_VALID;
1464 memcpy_toio(desc_addr, &desc, sizeof(desc));
1465
1466 /* update the next buffer pointer */
1467 mod->fasttx_num = (desc.control & DESC_WRAP) ? 0
1468 : (mod->fasttx_num + 1);
1469
1470 /* update statistics */
1471 stats->tx_packets++;
1472 stats->tx_bytes += cf->can_dlc;
1473 kfree_skb(skb);
1474
1475 /*
1476 * This hardware doesn't have TX-done notifications, so we'll try and
1477 * emulate it the best we can using ECHO skbs. Get the next TX
1478 * descriptor, and see if we have room to send. If not, stop the queue.
1479 * It will be woken when the ECHO skb for the current packet is recv'd.
1480 */
1481
1482 /* copy the control bits of the descriptor */
1483 if (!ican3_txok(mod))
1484 netif_stop_queue(ndev);
1485
1486 spin_unlock_irqrestore(&mod->lock, flags);
1487 return NETDEV_TX_OK;
1488}
1489
1490static const struct net_device_ops ican3_netdev_ops = {
1491 .ndo_open = ican3_open,
1492 .ndo_stop = ican3_stop,
1493 .ndo_start_xmit = ican3_xmit,
1494};
1495
1496/*
1497 * Low-level CAN Device
1498 */
1499
1500/* This structure was stolen from drivers/net/can/sja1000/sja1000.c */
1501static struct can_bittiming_const ican3_bittiming_const = {
1502 .name = DRV_NAME,
1503 .tseg1_min = 1,
1504 .tseg1_max = 16,
1505 .tseg2_min = 1,
1506 .tseg2_max = 8,
1507 .sjw_max = 4,
1508 .brp_min = 1,
1509 .brp_max = 64,
1510 .brp_inc = 1,
1511};
1512
1513/*
1514 * This routine was stolen from drivers/net/can/sja1000/sja1000.c
1515 *
1516 * The bittiming register command for the ICAN3 just sets the bit timing
1517 * registers on the SJA1000 chip directly
1518 */
1519static int ican3_set_bittiming(struct net_device *ndev)
1520{
1521 struct ican3_dev *mod = netdev_priv(ndev);
1522 struct can_bittiming *bt = &mod->can.bittiming;
1523 struct ican3_msg msg;
1524 u8 btr0, btr1;
1525
1526 btr0 = ((bt->brp - 1) & 0x3f) | (((bt->sjw - 1) & 0x3) << 6);
1527 btr1 = ((bt->prop_seg + bt->phase_seg1 - 1) & 0xf) |
1528 (((bt->phase_seg2 - 1) & 0x7) << 4);
1529 if (mod->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES)
1530 btr1 |= 0x80;
1531
1532 memset(&msg, 0, sizeof(msg));
1533 msg.spec = MSG_CBTRREQ;
1534 msg.len = cpu_to_le16(4);
1535 msg.data[0] = 0x00;
1536 msg.data[1] = 0x00;
1537 msg.data[2] = btr0;
1538 msg.data[3] = btr1;
1539
1540 return ican3_send_msg(mod, &msg);
1541}
1542
1543static int ican3_set_mode(struct net_device *ndev, enum can_mode mode)
1544{
1545 struct ican3_dev *mod = netdev_priv(ndev);
1546 int ret;
1547
1548 if (mode != CAN_MODE_START)
1549 return -ENOTSUPP;
1550
1551 /* bring the bus online */
1552 ret = ican3_set_bus_state(mod, true);
1553 if (ret) {
1554 dev_err(mod->dev, "unable to set bus-on\n");
1555 return ret;
1556 }
1557
1558 /* start up the network device */
1559 mod->can.state = CAN_STATE_ERROR_ACTIVE;
1560
1561 if (netif_queue_stopped(ndev))
1562 netif_wake_queue(ndev);
1563
1564 return 0;
1565}
1566
1567static int ican3_get_berr_counter(const struct net_device *ndev,
1568 struct can_berr_counter *bec)
1569{
1570 struct ican3_dev *mod = netdev_priv(ndev);
1571 int ret;
1572
1573 ret = ican3_send_inquiry(mod, INQUIRY_STATUS);
1574 if (ret)
1575 return ret;
1576
1577 ret = wait_for_completion_timeout(&mod->buserror_comp, HZ);
1578 if (ret <= 0) {
1579 dev_info(mod->dev, "%s timed out\n", __func__);
1580 return -ETIMEDOUT;
1581 }
1582
1583 bec->rxerr = mod->bec.rxerr;
1584 bec->txerr = mod->bec.txerr;
1585 return 0;
1586}
1587
1588/*
1589 * Sysfs Attributes
1590 */
1591
1592static ssize_t ican3_sysfs_show_term(struct device *dev,
1593 struct device_attribute *attr,
1594 char *buf)
1595{
1596 struct ican3_dev *mod = netdev_priv(to_net_dev(dev));
1597 int ret;
1598
1599 ret = ican3_send_inquiry(mod, INQUIRY_TERMINATION);
1600 if (ret)
1601 return ret;
1602
1603 ret = wait_for_completion_timeout(&mod->termination_comp, HZ);
1604 if (ret <= 0) {
1605 dev_info(mod->dev, "%s timed out\n", __func__);
1606 return -ETIMEDOUT;
1607 }
1608
1609 return snprintf(buf, PAGE_SIZE, "%u\n", mod->termination_enabled);
1610}
1611
1612static ssize_t ican3_sysfs_set_term(struct device *dev,
1613 struct device_attribute *attr,
1614 const char *buf, size_t count)
1615{
1616 struct ican3_dev *mod = netdev_priv(to_net_dev(dev));
1617 unsigned long enable;
1618 int ret;
1619
1620 if (strict_strtoul(buf, 0, &enable))
1621 return -EINVAL;
1622
1623 ret = ican3_set_termination(mod, enable);
1624 if (ret)
1625 return ret;
1626
1627 return count;
1628}
1629
1630static DEVICE_ATTR(termination, S_IWUGO | S_IRUGO, ican3_sysfs_show_term,
1631 ican3_sysfs_set_term);
1632
1633static struct attribute *ican3_sysfs_attrs[] = {
1634 &dev_attr_termination.attr,
1635 NULL,
1636};
1637
1638static struct attribute_group ican3_sysfs_attr_group = {
1639 .attrs = ican3_sysfs_attrs,
1640};
1641
1642/*
1643 * PCI Subsystem
1644 */
1645
1646static int __devinit ican3_probe(struct platform_device *pdev)
1647{
1648 struct janz_platform_data *pdata;
1649 struct net_device *ndev;
1650 struct ican3_dev *mod;
1651 struct resource *res;
1652 struct device *dev;
1653 int ret;
1654
1655 pdata = pdev->dev.platform_data;
1656 if (!pdata)
1657 return -ENXIO;
1658
1659 dev_dbg(&pdev->dev, "probe: module number %d\n", pdata->modno);
1660
1661 /* save the struct device for printing */
1662 dev = &pdev->dev;
1663
1664 /* allocate the CAN device and private data */
1665 ndev = alloc_candev(sizeof(*mod), 0);
1666 if (!ndev) {
1667 dev_err(dev, "unable to allocate CANdev\n");
1668 ret = -ENOMEM;
1669 goto out_return;
1670 }
1671
1672 platform_set_drvdata(pdev, ndev);
1673 mod = netdev_priv(ndev);
1674 mod->ndev = ndev;
1675 mod->dev = &pdev->dev;
1676 mod->num = pdata->modno;
1677 netif_napi_add(ndev, &mod->napi, ican3_napi, ICAN3_RX_BUFFERS);
1678 spin_lock_init(&mod->lock);
1679 init_completion(&mod->termination_comp);
1680 init_completion(&mod->buserror_comp);
1681
1682 /* setup device-specific sysfs attributes */
1683 ndev->sysfs_groups[0] = &ican3_sysfs_attr_group;
1684
1685 /* the first unallocated page in the DPM is 9 */
1686 mod->free_page = DPM_FREE_START;
1687
1688 ndev->netdev_ops = &ican3_netdev_ops;
1689 ndev->flags |= IFF_ECHO;
1690 SET_NETDEV_DEV(ndev, &pdev->dev);
1691
1692 mod->can.clock.freq = ICAN3_CAN_CLOCK;
1693 mod->can.bittiming_const = &ican3_bittiming_const;
1694 mod->can.do_set_bittiming = ican3_set_bittiming;
1695 mod->can.do_set_mode = ican3_set_mode;
1696 mod->can.do_get_berr_counter = ican3_get_berr_counter;
1697 mod->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES
1698 | CAN_CTRLMODE_BERR_REPORTING;
1699
1700 /* find our IRQ number */
1701 mod->irq = platform_get_irq(pdev, 0);
1702 if (mod->irq < 0) {
1703 dev_err(dev, "IRQ line not found\n");
1704 ret = -ENODEV;
1705 goto out_free_ndev;
1706 }
1707
1708 ndev->irq = mod->irq;
1709
1710 /* get access to the MODULbus registers for this module */
1711 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1712 if (!res) {
1713 dev_err(dev, "MODULbus registers not found\n");
1714 ret = -ENODEV;
1715 goto out_free_ndev;
1716 }
1717
1718 mod->dpm = ioremap(res->start, resource_size(res));
1719 if (!mod->dpm) {
1720 dev_err(dev, "MODULbus registers not ioremap\n");
1721 ret = -ENOMEM;
1722 goto out_free_ndev;
1723 }
1724
1725 mod->dpmctrl = mod->dpm + DPM_PAGE_SIZE;
1726
1727 /* get access to the control registers for this module */
1728 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1729 if (!res) {
1730 dev_err(dev, "CONTROL registers not found\n");
1731 ret = -ENODEV;
1732 goto out_iounmap_dpm;
1733 }
1734
1735 mod->ctrl = ioremap(res->start, resource_size(res));
1736 if (!mod->ctrl) {
1737 dev_err(dev, "CONTROL registers not ioremap\n");
1738 ret = -ENOMEM;
1739 goto out_iounmap_dpm;
1740 }
1741
1742 /* disable our IRQ, then hookup the IRQ handler */
1743 iowrite8(1 << mod->num, &mod->ctrl->int_disable);
1744 ret = request_irq(mod->irq, ican3_irq, IRQF_SHARED, DRV_NAME, mod);
1745 if (ret) {
1746 dev_err(dev, "unable to request IRQ\n");
1747 goto out_iounmap_ctrl;
1748 }
1749
1750 /* reset and initialize the CAN controller into fast mode */
1751 napi_enable(&mod->napi);
1752 ret = ican3_startup_module(mod);
1753 if (ret) {
1754 dev_err(dev, "%s: unable to start CANdev\n", __func__);
1755 goto out_free_irq;
1756 }
1757
1758 /* register with the Linux CAN layer */
1759 ret = register_candev(ndev);
1760 if (ret) {
1761 dev_err(dev, "%s: unable to register CANdev\n", __func__);
1762 goto out_free_irq;
1763 }
1764
1765 dev_info(dev, "module %d: registered CAN device\n", pdata->modno);
1766 return 0;
1767
1768out_free_irq:
1769 napi_disable(&mod->napi);
1770 iowrite8(1 << mod->num, &mod->ctrl->int_disable);
1771 free_irq(mod->irq, mod);
1772out_iounmap_ctrl:
1773 iounmap(mod->ctrl);
1774out_iounmap_dpm:
1775 iounmap(mod->dpm);
1776out_free_ndev:
1777 free_candev(ndev);
1778out_return:
1779 return ret;
1780}
1781
1782static int __devexit ican3_remove(struct platform_device *pdev)
1783{
1784 struct net_device *ndev = platform_get_drvdata(pdev);
1785 struct ican3_dev *mod = netdev_priv(ndev);
1786
1787 /* unregister the netdevice, stop interrupts */
1788 unregister_netdev(ndev);
1789 napi_disable(&mod->napi);
1790 iowrite8(1 << mod->num, &mod->ctrl->int_disable);
1791 free_irq(mod->irq, mod);
1792
1793 /* put the module into reset */
1794 ican3_shutdown_module(mod);
1795
1796 /* unmap all registers */
1797 iounmap(mod->ctrl);
1798 iounmap(mod->dpm);
1799
1800 free_candev(ndev);
1801
1802 return 0;
1803}
1804
1805static struct platform_driver ican3_driver = {
1806 .driver = {
1807 .name = DRV_NAME,
1808 .owner = THIS_MODULE,
1809 },
1810 .probe = ican3_probe,
1811 .remove = __devexit_p(ican3_remove),
1812};
1813
1814static int __init ican3_init(void)
1815{
1816 return platform_driver_register(&ican3_driver);
1817}
1818
1819static void __exit ican3_exit(void)
1820{
1821 platform_driver_unregister(&ican3_driver);
1822}
1823
1824MODULE_AUTHOR("Ira W. Snyder <iws@ovro.caltech.edu>");
1825MODULE_DESCRIPTION("Janz MODULbus VMOD-ICAN3 Driver");
1826MODULE_LICENSE("GPL");
1827MODULE_ALIAS("platform:janz-ican3");
1828
1829module_init(ican3_init);
1830module_exit(ican3_exit);
diff --git a/drivers/net/cnic.c b/drivers/net/cnic.c
index be90d3598bca..fe925663d39a 100644
--- a/drivers/net/cnic.c
+++ b/drivers/net/cnic.c
@@ -3367,13 +3367,9 @@ static int cnic_cm_shutdown(struct cnic_dev *dev)
3367 3367
3368static void cnic_init_context(struct cnic_dev *dev, u32 cid) 3368static void cnic_init_context(struct cnic_dev *dev, u32 cid)
3369{ 3369{
3370 struct cnic_local *cp = dev->cnic_priv;
3371 u32 cid_addr; 3370 u32 cid_addr;
3372 int i; 3371 int i;
3373 3372
3374 if (CHIP_NUM(cp) == CHIP_NUM_5709)
3375 return;
3376
3377 cid_addr = GET_CID_ADDR(cid); 3373 cid_addr = GET_CID_ADDR(cid);
3378 3374
3379 for (i = 0; i < CTX_SIZE; i += 4) 3375 for (i = 0; i < CTX_SIZE; i += 4)
@@ -3530,14 +3526,11 @@ static void cnic_init_bnx2_tx_ring(struct cnic_dev *dev)
3530 3526
3531 sb_id = cp->status_blk_num; 3527 sb_id = cp->status_blk_num;
3532 tx_cid = 20; 3528 tx_cid = 20;
3533 cnic_init_context(dev, tx_cid);
3534 cnic_init_context(dev, tx_cid + 1);
3535 cp->tx_cons_ptr = &s_blk->status_tx_quick_consumer_index2; 3529 cp->tx_cons_ptr = &s_blk->status_tx_quick_consumer_index2;
3536 if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) { 3530 if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
3537 struct status_block_msix *sblk = cp->status_blk.bnx2; 3531 struct status_block_msix *sblk = cp->status_blk.bnx2;
3538 3532
3539 tx_cid = TX_TSS_CID + sb_id - 1; 3533 tx_cid = TX_TSS_CID + sb_id - 1;
3540 cnic_init_context(dev, tx_cid);
3541 CNIC_WR(dev, BNX2_TSCH_TSS_CFG, (sb_id << 24) | 3534 CNIC_WR(dev, BNX2_TSCH_TSS_CFG, (sb_id << 24) |
3542 (TX_TSS_CID << 7)); 3535 (TX_TSS_CID << 7));
3543 cp->tx_cons_ptr = &sblk->status_tx_quick_consumer_index; 3536 cp->tx_cons_ptr = &sblk->status_tx_quick_consumer_index;
@@ -3556,6 +3549,9 @@ static void cnic_init_bnx2_tx_ring(struct cnic_dev *dev)
3556 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI; 3549 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
3557 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI; 3550 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
3558 } else { 3551 } else {
3552 cnic_init_context(dev, tx_cid);
3553 cnic_init_context(dev, tx_cid + 1);
3554
3559 offset0 = BNX2_L2CTX_TYPE; 3555 offset0 = BNX2_L2CTX_TYPE;
3560 offset1 = BNX2_L2CTX_CMD_TYPE; 3556 offset1 = BNX2_L2CTX_CMD_TYPE;
3561 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI; 3557 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
diff --git a/drivers/net/cnic_if.h b/drivers/net/cnic_if.h
index 110c62072e6f..0c55177db046 100644
--- a/drivers/net/cnic_if.h
+++ b/drivers/net/cnic_if.h
@@ -12,8 +12,8 @@
12#ifndef CNIC_IF_H 12#ifndef CNIC_IF_H
13#define CNIC_IF_H 13#define CNIC_IF_H
14 14
15#define CNIC_MODULE_VERSION "2.1.1" 15#define CNIC_MODULE_VERSION "2.1.2"
16#define CNIC_MODULE_RELDATE "Feb 22, 2010" 16#define CNIC_MODULE_RELDATE "May 26, 2010"
17 17
18#define CNIC_ULP_RDMA 0 18#define CNIC_ULP_RDMA 0
19#define CNIC_ULP_ISCSI 1 19#define CNIC_ULP_ISCSI 1
diff --git a/drivers/net/fec.c b/drivers/net/fec.c
index 326465ffbb23..ddf7a86cd466 100644
--- a/drivers/net/fec.c
+++ b/drivers/net/fec.c
@@ -681,6 +681,8 @@ static int fec_enet_mii_probe(struct net_device *dev)
681 struct phy_device *phy_dev = NULL; 681 struct phy_device *phy_dev = NULL;
682 int phy_addr; 682 int phy_addr;
683 683
684 fep->phy_dev = NULL;
685
684 /* find the first phy */ 686 /* find the first phy */
685 for (phy_addr = 0; phy_addr < PHY_MAX_ADDR; phy_addr++) { 687 for (phy_addr = 0; phy_addr < PHY_MAX_ADDR; phy_addr++) {
686 if (fep->mii_bus->phy_map[phy_addr]) { 688 if (fep->mii_bus->phy_map[phy_addr]) {
@@ -711,6 +713,11 @@ static int fec_enet_mii_probe(struct net_device *dev)
711 fep->link = 0; 713 fep->link = 0;
712 fep->full_duplex = 0; 714 fep->full_duplex = 0;
713 715
716 printk(KERN_INFO "%s: Freescale FEC PHY driver [%s] "
717 "(mii_bus:phy_addr=%s, irq=%d)\n", dev->name,
718 fep->phy_dev->drv->name, dev_name(&fep->phy_dev->dev),
719 fep->phy_dev->irq);
720
714 return 0; 721 return 0;
715} 722}
716 723
@@ -756,13 +763,8 @@ static int fec_enet_mii_init(struct platform_device *pdev)
756 if (mdiobus_register(fep->mii_bus)) 763 if (mdiobus_register(fep->mii_bus))
757 goto err_out_free_mdio_irq; 764 goto err_out_free_mdio_irq;
758 765
759 if (fec_enet_mii_probe(dev) != 0)
760 goto err_out_unregister_bus;
761
762 return 0; 766 return 0;
763 767
764err_out_unregister_bus:
765 mdiobus_unregister(fep->mii_bus);
766err_out_free_mdio_irq: 768err_out_free_mdio_irq:
767 kfree(fep->mii_bus->irq); 769 kfree(fep->mii_bus->irq);
768err_out_free_mdiobus: 770err_out_free_mdiobus:
@@ -915,7 +917,12 @@ fec_enet_open(struct net_device *dev)
915 if (ret) 917 if (ret)
916 return ret; 918 return ret;
917 919
918 /* schedule a link state check */ 920 /* Probe and connect to PHY when open the interface */
921 ret = fec_enet_mii_probe(dev);
922 if (ret) {
923 fec_enet_free_buffers(dev);
924 return ret;
925 }
919 phy_start(fep->phy_dev); 926 phy_start(fep->phy_dev);
920 netif_start_queue(dev); 927 netif_start_queue(dev);
921 fep->opened = 1; 928 fep->opened = 1;
@@ -929,10 +936,12 @@ fec_enet_close(struct net_device *dev)
929 936
930 /* Don't know what to do yet. */ 937 /* Don't know what to do yet. */
931 fep->opened = 0; 938 fep->opened = 0;
932 phy_stop(fep->phy_dev);
933 netif_stop_queue(dev); 939 netif_stop_queue(dev);
934 fec_stop(dev); 940 fec_stop(dev);
935 941
942 if (fep->phy_dev)
943 phy_disconnect(fep->phy_dev);
944
936 fec_enet_free_buffers(dev); 945 fec_enet_free_buffers(dev);
937 946
938 return 0; 947 return 0;
@@ -1316,11 +1325,6 @@ fec_probe(struct platform_device *pdev)
1316 if (ret) 1325 if (ret)
1317 goto failed_register; 1326 goto failed_register;
1318 1327
1319 printk(KERN_INFO "%s: Freescale FEC PHY driver [%s] "
1320 "(mii_bus:phy_addr=%s, irq=%d)\n", ndev->name,
1321 fep->phy_dev->drv->name, dev_name(&fep->phy_dev->dev),
1322 fep->phy_dev->irq);
1323
1324 return 0; 1328 return 0;
1325 1329
1326failed_register: 1330failed_register:
diff --git a/drivers/net/hamradio/yam.c b/drivers/net/hamradio/yam.c
index 694132e04af6..4e7d1d0a2340 100644
--- a/drivers/net/hamradio/yam.c
+++ b/drivers/net/hamradio/yam.c
@@ -1151,8 +1151,7 @@ static int __init yam_init_driver(void)
1151 dev = alloc_netdev(sizeof(struct yam_port), name, 1151 dev = alloc_netdev(sizeof(struct yam_port), name,
1152 yam_setup); 1152 yam_setup);
1153 if (!dev) { 1153 if (!dev) {
1154 printk(KERN_ERR "yam: cannot allocate net device %s\n", 1154 pr_err("yam: cannot allocate net device\n");
1155 dev->name);
1156 err = -ENOMEM; 1155 err = -ENOMEM;
1157 goto error; 1156 goto error;
1158 } 1157 }
diff --git a/drivers/net/ll_temac.h b/drivers/net/ll_temac.h
index c03358434acb..522abe2ff25a 100644
--- a/drivers/net/ll_temac.h
+++ b/drivers/net/ll_temac.h
@@ -295,6 +295,10 @@ This option defaults to enabled (set) */
295 295
296#define MULTICAST_CAM_TABLE_NUM 4 296#define MULTICAST_CAM_TABLE_NUM 4
297 297
298/* TEMAC Synthesis features */
299#define TEMAC_FEATURE_RX_CSUM (1 << 0)
300#define TEMAC_FEATURE_TX_CSUM (1 << 1)
301
298/* TX/RX CURDESC_PTR points to first descriptor */ 302/* TX/RX CURDESC_PTR points to first descriptor */
299/* TX/RX TAILDESC_PTR points to last descriptor in linked list */ 303/* TX/RX TAILDESC_PTR points to last descriptor in linked list */
300 304
@@ -353,6 +357,7 @@ struct temac_local {
353 struct mutex indirect_mutex; 357 struct mutex indirect_mutex;
354 u32 options; /* Current options word */ 358 u32 options; /* Current options word */
355 int last_link; 359 int last_link;
360 unsigned int temac_features;
356 361
357 /* Buffer descriptors */ 362 /* Buffer descriptors */
358 struct cdmac_bd *tx_bd_v; 363 struct cdmac_bd *tx_bd_v;
diff --git a/drivers/net/ll_temac_main.c b/drivers/net/ll_temac_main.c
index fa7620e28404..52dcc8495647 100644
--- a/drivers/net/ll_temac_main.c
+++ b/drivers/net/ll_temac_main.c
@@ -245,7 +245,7 @@ static int temac_dma_bd_init(struct net_device *ndev)
245 CHNL_CTRL_IRQ_COAL_EN); 245 CHNL_CTRL_IRQ_COAL_EN);
246 /* 0x10220483 */ 246 /* 0x10220483 */
247 /* 0x00100483 */ 247 /* 0x00100483 */
248 lp->dma_out(lp, RX_CHNL_CTRL, 0xff010000 | 248 lp->dma_out(lp, RX_CHNL_CTRL, 0xff070000 |
249 CHNL_CTRL_IRQ_EN | 249 CHNL_CTRL_IRQ_EN |
250 CHNL_CTRL_IRQ_DLY_EN | 250 CHNL_CTRL_IRQ_DLY_EN |
251 CHNL_CTRL_IRQ_COAL_EN | 251 CHNL_CTRL_IRQ_COAL_EN |
@@ -574,6 +574,10 @@ static void temac_start_xmit_done(struct net_device *ndev)
574 if (cur_p->app4) 574 if (cur_p->app4)
575 dev_kfree_skb_irq((struct sk_buff *)cur_p->app4); 575 dev_kfree_skb_irq((struct sk_buff *)cur_p->app4);
576 cur_p->app0 = 0; 576 cur_p->app0 = 0;
577 cur_p->app1 = 0;
578 cur_p->app2 = 0;
579 cur_p->app3 = 0;
580 cur_p->app4 = 0;
577 581
578 ndev->stats.tx_packets++; 582 ndev->stats.tx_packets++;
579 ndev->stats.tx_bytes += cur_p->len; 583 ndev->stats.tx_bytes += cur_p->len;
@@ -589,6 +593,29 @@ static void temac_start_xmit_done(struct net_device *ndev)
589 netif_wake_queue(ndev); 593 netif_wake_queue(ndev);
590} 594}
591 595
596static inline int temac_check_tx_bd_space(struct temac_local *lp, int num_frag)
597{
598 struct cdmac_bd *cur_p;
599 int tail;
600
601 tail = lp->tx_bd_tail;
602 cur_p = &lp->tx_bd_v[tail];
603
604 do {
605 if (cur_p->app0)
606 return NETDEV_TX_BUSY;
607
608 tail++;
609 if (tail >= TX_BD_NUM)
610 tail = 0;
611
612 cur_p = &lp->tx_bd_v[tail];
613 num_frag--;
614 } while (num_frag >= 0);
615
616 return 0;
617}
618
592static int temac_start_xmit(struct sk_buff *skb, struct net_device *ndev) 619static int temac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
593{ 620{
594 struct temac_local *lp = netdev_priv(ndev); 621 struct temac_local *lp = netdev_priv(ndev);
@@ -603,7 +630,7 @@ static int temac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
603 start_p = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * lp->tx_bd_tail; 630 start_p = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * lp->tx_bd_tail;
604 cur_p = &lp->tx_bd_v[lp->tx_bd_tail]; 631 cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
605 632
606 if (cur_p->app0 & STS_CTRL_APP0_CMPLT) { 633 if (temac_check_tx_bd_space(lp, num_frag)) {
607 if (!netif_queue_stopped(ndev)) { 634 if (!netif_queue_stopped(ndev)) {
608 netif_stop_queue(ndev); 635 netif_stop_queue(ndev);
609 return NETDEV_TX_BUSY; 636 return NETDEV_TX_BUSY;
@@ -613,29 +640,14 @@ static int temac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
613 640
614 cur_p->app0 = 0; 641 cur_p->app0 = 0;
615 if (skb->ip_summed == CHECKSUM_PARTIAL) { 642 if (skb->ip_summed == CHECKSUM_PARTIAL) {
616 const struct iphdr *ip = ip_hdr(skb); 643 unsigned int csum_start_off = skb_transport_offset(skb);
617 int length = 0, start = 0, insert = 0; 644 unsigned int csum_index_off = csum_start_off + skb->csum_offset;
618 645
619 switch (ip->protocol) { 646 cur_p->app0 |= 1; /* TX Checksum Enabled */
620 case IPPROTO_TCP: 647 cur_p->app1 = (csum_start_off << 16) | csum_index_off;
621 start = sizeof(struct iphdr) + ETH_HLEN; 648 cur_p->app2 = 0; /* initial checksum seed */
622 insert = sizeof(struct iphdr) + ETH_HLEN + 16;
623 length = ip->tot_len - sizeof(struct iphdr);
624 break;
625 case IPPROTO_UDP:
626 start = sizeof(struct iphdr) + ETH_HLEN;
627 insert = sizeof(struct iphdr) + ETH_HLEN + 6;
628 length = ip->tot_len - sizeof(struct iphdr);
629 break;
630 default:
631 break;
632 }
633 cur_p->app1 = ((start << 16) | insert);
634 cur_p->app2 = csum_tcpudp_magic(ip->saddr, ip->daddr,
635 length, ip->protocol, 0);
636 skb->data[insert] = 0;
637 skb->data[insert + 1] = 0;
638 } 649 }
650
639 cur_p->app0 |= STS_CTRL_APP0_SOP; 651 cur_p->app0 |= STS_CTRL_APP0_SOP;
640 cur_p->len = skb_headlen(skb); 652 cur_p->len = skb_headlen(skb);
641 cur_p->phys = dma_map_single(ndev->dev.parent, skb->data, skb->len, 653 cur_p->phys = dma_map_single(ndev->dev.parent, skb->data, skb->len,
@@ -699,6 +711,15 @@ static void ll_temac_recv(struct net_device *ndev)
699 skb->protocol = eth_type_trans(skb, ndev); 711 skb->protocol = eth_type_trans(skb, ndev);
700 skb->ip_summed = CHECKSUM_NONE; 712 skb->ip_summed = CHECKSUM_NONE;
701 713
714 /* if we're doing rx csum offload, set it up */
715 if (((lp->temac_features & TEMAC_FEATURE_RX_CSUM) != 0) &&
716 (skb->protocol == __constant_htons(ETH_P_IP)) &&
717 (skb->len > 64)) {
718
719 skb->csum = cur_p->app3 & 0xFFFF;
720 skb->ip_summed = CHECKSUM_COMPLETE;
721 }
722
702 netif_rx(skb); 723 netif_rx(skb);
703 724
704 ndev->stats.rx_packets++; 725 ndev->stats.rx_packets++;
@@ -883,6 +904,7 @@ temac_of_probe(struct of_device *op, const struct of_device_id *match)
883 struct temac_local *lp; 904 struct temac_local *lp;
884 struct net_device *ndev; 905 struct net_device *ndev;
885 const void *addr; 906 const void *addr;
907 __be32 *p;
886 int size, rc = 0; 908 int size, rc = 0;
887 909
888 /* Init network device structure */ 910 /* Init network device structure */
@@ -926,6 +948,18 @@ temac_of_probe(struct of_device *op, const struct of_device_id *match)
926 goto nodev; 948 goto nodev;
927 } 949 }
928 950
951 /* Setup checksum offload, but default to off if not specified */
952 lp->temac_features = 0;
953 p = (__be32 *)of_get_property(op->dev.of_node, "xlnx,txcsum", NULL);
954 if (p && be32_to_cpu(*p)) {
955 lp->temac_features |= TEMAC_FEATURE_TX_CSUM;
956 /* Can checksum TCP/UDP over IPv4. */
957 ndev->features |= NETIF_F_IP_CSUM;
958 }
959 p = (__be32 *)of_get_property(op->dev.of_node, "xlnx,rxcsum", NULL);
960 if (p && be32_to_cpu(*p))
961 lp->temac_features |= TEMAC_FEATURE_RX_CSUM;
962
929 /* Find the DMA node, map the DMA registers, and decode the DMA IRQs */ 963 /* Find the DMA node, map the DMA registers, and decode the DMA IRQs */
930 np = of_parse_phandle(op->dev.of_node, "llink-connected", 0); 964 np = of_parse_phandle(op->dev.of_node, "llink-connected", 0);
931 if (!np) { 965 if (!np) {
@@ -950,7 +984,7 @@ temac_of_probe(struct of_device *op, const struct of_device_id *match)
950 984
951 lp->rx_irq = irq_of_parse_and_map(np, 0); 985 lp->rx_irq = irq_of_parse_and_map(np, 0);
952 lp->tx_irq = irq_of_parse_and_map(np, 1); 986 lp->tx_irq = irq_of_parse_and_map(np, 1);
953 if (!lp->rx_irq || !lp->tx_irq) { 987 if ((lp->rx_irq == NO_IRQ) || (lp->tx_irq == NO_IRQ)) {
954 dev_err(&op->dev, "could not determine irqs\n"); 988 dev_err(&op->dev, "could not determine irqs\n");
955 rc = -ENOMEM; 989 rc = -ENOMEM;
956 goto nodev; 990 goto nodev;
diff --git a/drivers/parport/parport_amiga.c b/drivers/parport/parport_amiga.c
index 1586e1caa2f5..8bef6d60f88b 100644
--- a/drivers/parport/parport_amiga.c
+++ b/drivers/parport/parport_amiga.c
@@ -18,6 +18,8 @@
18#include <linux/parport.h> 18#include <linux/parport.h>
19#include <linux/ioport.h> 19#include <linux/ioport.h>
20#include <linux/interrupt.h> 20#include <linux/interrupt.h>
21#include <linux/platform_device.h>
22
21#include <asm/setup.h> 23#include <asm/setup.h>
22#include <asm/amigahw.h> 24#include <asm/amigahw.h>
23#include <asm/irq.h> 25#include <asm/irq.h>
@@ -31,7 +33,6 @@
31#define DPRINTK(x...) do { } while (0) 33#define DPRINTK(x...) do { } while (0)
32#endif 34#endif
33 35
34static struct parport *this_port = NULL;
35 36
36static void amiga_write_data(struct parport *p, unsigned char data) 37static void amiga_write_data(struct parport *p, unsigned char data)
37{ 38{
@@ -227,18 +228,11 @@ static struct parport_operations pp_amiga_ops = {
227 228
228/* ----------- Initialisation code --------------------------------- */ 229/* ----------- Initialisation code --------------------------------- */
229 230
230static int __init parport_amiga_init(void) 231static int __init amiga_parallel_probe(struct platform_device *pdev)
231{ 232{
232 struct parport *p; 233 struct parport *p;
233 int err; 234 int err;
234 235
235 if (!MACH_IS_AMIGA || !AMIGAHW_PRESENT(AMI_PARALLEL))
236 return -ENODEV;
237
238 err = -EBUSY;
239 if (!request_mem_region(CIAA_PHYSADDR-1+0x100, 0x100, "parallel"))
240 goto out_mem;
241
242 ciaa.ddrb = 0xff; 236 ciaa.ddrb = 0xff;
243 ciab.ddra &= 0xf8; 237 ciab.ddra &= 0xf8;
244 mb(); 238 mb();
@@ -246,41 +240,63 @@ static int __init parport_amiga_init(void)
246 p = parport_register_port((unsigned long)&ciaa.prb, IRQ_AMIGA_CIAA_FLG, 240 p = parport_register_port((unsigned long)&ciaa.prb, IRQ_AMIGA_CIAA_FLG,
247 PARPORT_DMA_NONE, &pp_amiga_ops); 241 PARPORT_DMA_NONE, &pp_amiga_ops);
248 if (!p) 242 if (!p)
249 goto out_port; 243 return -EBUSY;
250 244
251 err = request_irq(IRQ_AMIGA_CIAA_FLG, parport_irq_handler, 0, p->name, p); 245 err = request_irq(IRQ_AMIGA_CIAA_FLG, parport_irq_handler, 0, p->name,
246 p);
252 if (err) 247 if (err)
253 goto out_irq; 248 goto out_irq;
254 249
255 this_port = p;
256 printk(KERN_INFO "%s: Amiga built-in port using irq\n", p->name); 250 printk(KERN_INFO "%s: Amiga built-in port using irq\n", p->name);
257 /* XXX: set operating mode */ 251 /* XXX: set operating mode */
258 parport_announce_port(p); 252 parport_announce_port(p);
259 253
254 platform_set_drvdata(pdev, p);
255
260 return 0; 256 return 0;
261 257
262out_irq: 258out_irq:
263 parport_put_port(p); 259 parport_put_port(p);
264out_port:
265 release_mem_region(CIAA_PHYSADDR-1+0x100, 0x100);
266out_mem:
267 return err; 260 return err;
268} 261}
269 262
270static void __exit parport_amiga_exit(void) 263static int __exit amiga_parallel_remove(struct platform_device *pdev)
264{
265 struct parport *port = platform_get_drvdata(pdev);
266
267 parport_remove_port(port);
268 if (port->irq != PARPORT_IRQ_NONE)
269 free_irq(IRQ_AMIGA_CIAA_FLG, port);
270 parport_put_port(port);
271 platform_set_drvdata(pdev, NULL);
272 return 0;
273}
274
275static struct platform_driver amiga_parallel_driver = {
276 .remove = __exit_p(amiga_parallel_remove),
277 .driver = {
278 .name = "amiga-parallel",
279 .owner = THIS_MODULE,
280 },
281};
282
283static int __init amiga_parallel_init(void)
284{
285 return platform_driver_probe(&amiga_parallel_driver,
286 amiga_parallel_probe);
287}
288
289module_init(amiga_parallel_init);
290
291static void __exit amiga_parallel_exit(void)
271{ 292{
272 parport_remove_port(this_port); 293 platform_driver_unregister(&amiga_parallel_driver);
273 if (this_port->irq != PARPORT_IRQ_NONE)
274 free_irq(IRQ_AMIGA_CIAA_FLG, this_port);
275 parport_put_port(this_port);
276 release_mem_region(CIAA_PHYSADDR-1+0x100, 0x100);
277} 294}
278 295
296module_exit(amiga_parallel_exit);
279 297
280MODULE_AUTHOR("Joerg Dorchain <joerg@dorchain.net>"); 298MODULE_AUTHOR("Joerg Dorchain <joerg@dorchain.net>");
281MODULE_DESCRIPTION("Parport Driver for Amiga builtin Port"); 299MODULE_DESCRIPTION("Parport Driver for Amiga builtin Port");
282MODULE_SUPPORTED_DEVICE("Amiga builtin Parallel Port"); 300MODULE_SUPPORTED_DEVICE("Amiga builtin Parallel Port");
283MODULE_LICENSE("GPL"); 301MODULE_LICENSE("GPL");
284 302MODULE_ALIAS("platform:amiga-parallel");
285module_init(parport_amiga_init)
286module_exit(parport_amiga_exit)
diff --git a/drivers/pci/pcie/aer/aerdrv.h b/drivers/pci/pcie/aer/aerdrv.h
index 7aaae2d2bd67..80c11d131499 100644
--- a/drivers/pci/pcie/aer/aerdrv.h
+++ b/drivers/pci/pcie/aer/aerdrv.h
@@ -130,4 +130,21 @@ static inline int aer_osc_setup(struct pcie_device *pciedev)
130} 130}
131#endif 131#endif
132 132
133#ifdef CONFIG_ACPI_APEI
134extern int pcie_aer_get_firmware_first(struct pci_dev *pci_dev);
135#else
136static inline int pcie_aer_get_firmware_first(struct pci_dev *pci_dev)
137{
138 if (pci_dev->__aer_firmware_first_valid)
139 return pci_dev->__aer_firmware_first;
140 return 0;
141}
142#endif
143
144static inline void pcie_aer_force_firmware_first(struct pci_dev *pci_dev,
145 int enable)
146{
147 pci_dev->__aer_firmware_first = !!enable;
148 pci_dev->__aer_firmware_first_valid = 1;
149}
133#endif /* _AERDRV_H_ */ 150#endif /* _AERDRV_H_ */
diff --git a/drivers/pci/pcie/aer/aerdrv_acpi.c b/drivers/pci/pcie/aer/aerdrv_acpi.c
index 04814087658d..f278d7b0d95d 100644
--- a/drivers/pci/pcie/aer/aerdrv_acpi.c
+++ b/drivers/pci/pcie/aer/aerdrv_acpi.c
@@ -16,6 +16,7 @@
16#include <linux/acpi.h> 16#include <linux/acpi.h>
17#include <linux/pci-acpi.h> 17#include <linux/pci-acpi.h>
18#include <linux/delay.h> 18#include <linux/delay.h>
19#include <acpi/apei.h>
19#include "aerdrv.h" 20#include "aerdrv.h"
20 21
21/** 22/**
@@ -53,3 +54,79 @@ int aer_osc_setup(struct pcie_device *pciedev)
53 54
54 return 0; 55 return 0;
55} 56}
57
58#ifdef CONFIG_ACPI_APEI
59static inline int hest_match_pci(struct acpi_hest_aer_common *p,
60 struct pci_dev *pci)
61{
62 return (0 == pci_domain_nr(pci->bus) &&
63 p->bus == pci->bus->number &&
64 p->device == PCI_SLOT(pci->devfn) &&
65 p->function == PCI_FUNC(pci->devfn));
66}
67
68struct aer_hest_parse_info {
69 struct pci_dev *pci_dev;
70 int firmware_first;
71};
72
73static int aer_hest_parse(struct acpi_hest_header *hest_hdr, void *data)
74{
75 struct aer_hest_parse_info *info = data;
76 struct acpi_hest_aer_common *p;
77 u8 pcie_type = 0;
78 u8 bridge = 0;
79 int ff = 0;
80
81 switch (hest_hdr->type) {
82 case ACPI_HEST_TYPE_AER_ROOT_PORT:
83 pcie_type = PCI_EXP_TYPE_ROOT_PORT;
84 break;
85 case ACPI_HEST_TYPE_AER_ENDPOINT:
86 pcie_type = PCI_EXP_TYPE_ENDPOINT;
87 break;
88 case ACPI_HEST_TYPE_AER_BRIDGE:
89 if ((info->pci_dev->class >> 16) == PCI_BASE_CLASS_BRIDGE)
90 bridge = 1;
91 break;
92 default:
93 return 0;
94 }
95
96 p = (struct acpi_hest_aer_common *)(hest_hdr + 1);
97 if (p->flags & ACPI_HEST_GLOBAL) {
98 if ((info->pci_dev->is_pcie &&
99 info->pci_dev->pcie_type == pcie_type) || bridge)
100 ff = !!(p->flags & ACPI_HEST_FIRMWARE_FIRST);
101 } else
102 if (hest_match_pci(p, info->pci_dev))
103 ff = !!(p->flags & ACPI_HEST_FIRMWARE_FIRST);
104 info->firmware_first = ff;
105
106 return 0;
107}
108
109static void aer_set_firmware_first(struct pci_dev *pci_dev)
110{
111 int rc;
112 struct aer_hest_parse_info info = {
113 .pci_dev = pci_dev,
114 .firmware_first = 0,
115 };
116
117 rc = apei_hest_parse(aer_hest_parse, &info);
118
119 if (rc)
120 pci_dev->__aer_firmware_first = 0;
121 else
122 pci_dev->__aer_firmware_first = info.firmware_first;
123 pci_dev->__aer_firmware_first_valid = 1;
124}
125
126int pcie_aer_get_firmware_first(struct pci_dev *dev)
127{
128 if (!dev->__aer_firmware_first_valid)
129 aer_set_firmware_first(dev);
130 return dev->__aer_firmware_first;
131}
132#endif
diff --git a/drivers/pci/pcie/aer/aerdrv_core.c b/drivers/pci/pcie/aer/aerdrv_core.c
index df2d686fe3dd..8af4f619bba2 100644
--- a/drivers/pci/pcie/aer/aerdrv_core.c
+++ b/drivers/pci/pcie/aer/aerdrv_core.c
@@ -36,7 +36,7 @@ int pci_enable_pcie_error_reporting(struct pci_dev *dev)
36 u16 reg16 = 0; 36 u16 reg16 = 0;
37 int pos; 37 int pos;
38 38
39 if (dev->aer_firmware_first) 39 if (pcie_aer_get_firmware_first(dev))
40 return -EIO; 40 return -EIO;
41 41
42 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR); 42 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
@@ -63,7 +63,7 @@ int pci_disable_pcie_error_reporting(struct pci_dev *dev)
63 u16 reg16 = 0; 63 u16 reg16 = 0;
64 int pos; 64 int pos;
65 65
66 if (dev->aer_firmware_first) 66 if (pcie_aer_get_firmware_first(dev))
67 return -EIO; 67 return -EIO;
68 68
69 pos = pci_pcie_cap(dev); 69 pos = pci_pcie_cap(dev);
@@ -771,7 +771,7 @@ void aer_isr(struct work_struct *work)
771 */ 771 */
772int aer_init(struct pcie_device *dev) 772int aer_init(struct pcie_device *dev)
773{ 773{
774 if (dev->port->aer_firmware_first) { 774 if (pcie_aer_get_firmware_first(dev->port)) {
775 dev_printk(KERN_DEBUG, &dev->device, 775 dev_printk(KERN_DEBUG, &dev->device,
776 "PCIe errors handled by platform firmware.\n"); 776 "PCIe errors handled by platform firmware.\n");
777 goto out; 777 goto out;
@@ -785,7 +785,7 @@ out:
785 if (forceload) { 785 if (forceload) {
786 dev_printk(KERN_DEBUG, &dev->device, 786 dev_printk(KERN_DEBUG, &dev->device,
787 "aerdrv forceload requested.\n"); 787 "aerdrv forceload requested.\n");
788 dev->port->aer_firmware_first = 0; 788 pcie_aer_force_firmware_first(dev->port, 0);
789 return 0; 789 return 0;
790 } 790 }
791 return -ENXIO; 791 return -ENXIO;
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index c82548afcd5c..f4adba2d1dd3 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -10,7 +10,6 @@
10#include <linux/module.h> 10#include <linux/module.h>
11#include <linux/cpumask.h> 11#include <linux/cpumask.h>
12#include <linux/pci-aspm.h> 12#include <linux/pci-aspm.h>
13#include <acpi/acpi_hest.h>
14#include "pci.h" 13#include "pci.h"
15 14
16#define CARDBUS_LATENCY_TIMER 176 /* secondary latency timer */ 15#define CARDBUS_LATENCY_TIMER 176 /* secondary latency timer */
@@ -904,12 +903,6 @@ void set_pcie_hotplug_bridge(struct pci_dev *pdev)
904 pdev->is_hotplug_bridge = 1; 903 pdev->is_hotplug_bridge = 1;
905} 904}
906 905
907static void set_pci_aer_firmware_first(struct pci_dev *pdev)
908{
909 if (acpi_hest_firmware_first_pci(pdev))
910 pdev->aer_firmware_first = 1;
911}
912
913#define LEGACY_IO_RESOURCE (IORESOURCE_IO | IORESOURCE_PCI_FIXED) 906#define LEGACY_IO_RESOURCE (IORESOURCE_IO | IORESOURCE_PCI_FIXED)
914 907
915/** 908/**
@@ -939,7 +932,6 @@ int pci_setup_device(struct pci_dev *dev)
939 dev->multifunction = !!(hdr_type & 0x80); 932 dev->multifunction = !!(hdr_type & 0x80);
940 dev->error_state = pci_channel_io_normal; 933 dev->error_state = pci_channel_io_normal;
941 set_pcie_port_type(dev); 934 set_pcie_port_type(dev);
942 set_pci_aer_firmware_first(dev);
943 935
944 list_for_each_entry(slot, &dev->bus->slots, list) 936 list_for_each_entry(slot, &dev->bus->slots, list)
945 if (PCI_SLOT(dev->devfn) == slot->number) 937 if (PCI_SLOT(dev->devfn) == slot->number)
diff --git a/drivers/rapidio/Kconfig b/drivers/rapidio/Kconfig
index c32822ad84a4..070211a5955c 100644
--- a/drivers/rapidio/Kconfig
+++ b/drivers/rapidio/Kconfig
@@ -8,3 +8,27 @@ config RAPIDIO_DISC_TIMEOUT
8 ---help--- 8 ---help---
9 Amount of time a discovery node waits for a host to complete 9 Amount of time a discovery node waits for a host to complete
10 enumeration before giving up. 10 enumeration before giving up.
11
12config RAPIDIO_ENABLE_RX_TX_PORTS
13 bool "Enable RapidIO Input/Output Ports"
14 depends on RAPIDIO
15 ---help---
16 The RapidIO specification describes a Output port transmit
17 enable and a Input port receive enable. The recommended state
18 for Input ports and Output ports should be disabled. When
19 this switch is set the RapidIO subsystem will enable all
20 ports for Input/Output direction to allow other traffic
21 than Maintenance transfers.
22
23source "drivers/rapidio/switches/Kconfig"
24
25config RAPIDIO_DEBUG
26 bool "RapidIO subsystem debug messages"
27 depends on RAPIDIO
28 help
29 Say Y here if you want the RapidIO subsystem to produce a bunch of
30 debug messages to the system log. Select this if you are having a
31 problem with the RapidIO subsystem and want to see more of what is
32 going on.
33
34 If you are unsure about this, say N here.
diff --git a/drivers/rapidio/Makefile b/drivers/rapidio/Makefile
index 7c0e1818de51..b6139fe187bf 100644
--- a/drivers/rapidio/Makefile
+++ b/drivers/rapidio/Makefile
@@ -4,3 +4,7 @@
4obj-y += rio.o rio-access.o rio-driver.o rio-scan.o rio-sysfs.o 4obj-y += rio.o rio-access.o rio-driver.o rio-scan.o rio-sysfs.o
5 5
6obj-$(CONFIG_RAPIDIO) += switches/ 6obj-$(CONFIG_RAPIDIO) += switches/
7
8ifeq ($(CONFIG_RAPIDIO_DEBUG),y)
9EXTRA_CFLAGS += -DDEBUG
10endif
diff --git a/drivers/rapidio/rio-scan.c b/drivers/rapidio/rio-scan.c
index 45415096c294..8070e074c739 100644
--- a/drivers/rapidio/rio-scan.c
+++ b/drivers/rapidio/rio-scan.c
@@ -4,6 +4,14 @@
4 * Copyright 2005 MontaVista Software, Inc. 4 * Copyright 2005 MontaVista Software, Inc.
5 * Matt Porter <mporter@kernel.crashing.org> 5 * Matt Porter <mporter@kernel.crashing.org>
6 * 6 *
7 * Copyright 2009 Integrated Device Technology, Inc.
8 * Alex Bounine <alexandre.bounine@idt.com>
9 * - Added Port-Write/Error Management initialization and handling
10 *
11 * Copyright 2009 Sysgo AG
12 * Thomas Moll <thomas.moll@sysgo.com>
13 * - Added Input- Output- enable functionality, to allow full communication
14 *
7 * This program is free software; you can redistribute it and/or modify it 15 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License as published by the 16 * under the terms of the GNU General Public License as published by the
9 * Free Software Foundation; either version 2 of the License, or (at your 17 * Free Software Foundation; either version 2 of the License, or (at your
@@ -31,15 +39,16 @@
31LIST_HEAD(rio_devices); 39LIST_HEAD(rio_devices);
32static LIST_HEAD(rio_switches); 40static LIST_HEAD(rio_switches);
33 41
34#define RIO_ENUM_CMPL_MAGIC 0xdeadbeef
35
36static void rio_enum_timeout(unsigned long); 42static void rio_enum_timeout(unsigned long);
37 43
44static void rio_init_em(struct rio_dev *rdev);
45
38DEFINE_SPINLOCK(rio_global_list_lock); 46DEFINE_SPINLOCK(rio_global_list_lock);
39 47
40static int next_destid = 0; 48static int next_destid = 0;
41static int next_switchid = 0; 49static int next_switchid = 0;
42static int next_net = 0; 50static int next_net = 0;
51static int next_comptag;
43 52
44static struct timer_list rio_enum_timer = 53static struct timer_list rio_enum_timer =
45TIMER_INITIALIZER(rio_enum_timeout, 0, 0); 54TIMER_INITIALIZER(rio_enum_timeout, 0, 0);
@@ -52,12 +61,6 @@ static int rio_mport_phys_table[] = {
52 -1, 61 -1,
53}; 62};
54 63
55static int rio_sport_phys_table[] = {
56 RIO_EFB_PAR_EP_FREE_ID,
57 RIO_EFB_SER_EP_FREE_ID,
58 -1,
59};
60
61/** 64/**
62 * rio_get_device_id - Get the base/extended device id for a device 65 * rio_get_device_id - Get the base/extended device id for a device
63 * @port: RIO master port 66 * @port: RIO master port
@@ -118,12 +121,26 @@ static int rio_clear_locks(struct rio_mport *port)
118 u32 result; 121 u32 result;
119 int ret = 0; 122 int ret = 0;
120 123
121 /* Write component tag CSR magic complete value */ 124 /* Assign component tag to all devices */
122 rio_local_write_config_32(port, RIO_COMPONENT_TAG_CSR, 125 next_comptag = 1;
123 RIO_ENUM_CMPL_MAGIC); 126 rio_local_write_config_32(port, RIO_COMPONENT_TAG_CSR, next_comptag++);
124 list_for_each_entry(rdev, &rio_devices, global_list) 127
125 rio_write_config_32(rdev, RIO_COMPONENT_TAG_CSR, 128 list_for_each_entry(rdev, &rio_devices, global_list) {
126 RIO_ENUM_CMPL_MAGIC); 129 /* Mark device as discovered */
130 rio_read_config_32(rdev,
131 rdev->phys_efptr + RIO_PORT_GEN_CTL_CSR,
132 &result);
133 rio_write_config_32(rdev,
134 rdev->phys_efptr + RIO_PORT_GEN_CTL_CSR,
135 result | RIO_PORT_GEN_DISCOVERED);
136
137 rio_write_config_32(rdev, RIO_COMPONENT_TAG_CSR, next_comptag);
138 rdev->comp_tag = next_comptag++;
139 if (next_comptag >= 0x10000) {
140 pr_err("RIO: Component Tag Counter Overflow\n");
141 break;
142 }
143 }
127 144
128 /* Release host device id locks */ 145 /* Release host device id locks */
129 rio_local_write_config_32(port, RIO_HOST_DID_LOCK_CSR, 146 rio_local_write_config_32(port, RIO_HOST_DID_LOCK_CSR,
@@ -229,27 +246,37 @@ static int rio_is_switch(struct rio_dev *rdev)
229} 246}
230 247
231/** 248/**
232 * rio_route_set_ops- Sets routing operations for a particular vendor switch 249 * rio_switch_init - Sets switch operations for a particular vendor switch
233 * @rdev: RIO device 250 * @rdev: RIO device
251 * @do_enum: Enumeration/Discovery mode flag
234 * 252 *
235 * Searches the RIO route ops table for known switch types. If the vid 253 * Searches the RIO switch ops table for known switch types. If the vid
236 * and did match a switch table entry, then set the add_entry() and 254 * and did match a switch table entry, then call switch initialization
237 * get_entry() ops to the table entry values. 255 * routine to setup switch-specific routines.
238 */ 256 */
239static void rio_route_set_ops(struct rio_dev *rdev) 257static void rio_switch_init(struct rio_dev *rdev, int do_enum)
240{ 258{
241 struct rio_route_ops *cur = __start_rio_route_ops; 259 struct rio_switch_ops *cur = __start_rio_switch_ops;
242 struct rio_route_ops *end = __end_rio_route_ops; 260 struct rio_switch_ops *end = __end_rio_switch_ops;
243 261
244 while (cur < end) { 262 while (cur < end) {
245 if ((cur->vid == rdev->vid) && (cur->did == rdev->did)) { 263 if ((cur->vid == rdev->vid) && (cur->did == rdev->did)) {
246 pr_debug("RIO: adding routing ops for %s\n", rio_name(rdev)); 264 pr_debug("RIO: calling init routine for %s\n",
247 rdev->rswitch->add_entry = cur->add_hook; 265 rio_name(rdev));
248 rdev->rswitch->get_entry = cur->get_hook; 266 cur->init_hook(rdev, do_enum);
267 break;
249 } 268 }
250 cur++; 269 cur++;
251 } 270 }
252 271
272 if ((cur >= end) && (rdev->pef & RIO_PEF_STD_RT)) {
273 pr_debug("RIO: adding STD routing ops for %s\n",
274 rio_name(rdev));
275 rdev->rswitch->add_entry = rio_std_route_add_entry;
276 rdev->rswitch->get_entry = rio_std_route_get_entry;
277 rdev->rswitch->clr_table = rio_std_route_clr_table;
278 }
279
253 if (!rdev->rswitch->add_entry || !rdev->rswitch->get_entry) 280 if (!rdev->rswitch->add_entry || !rdev->rswitch->get_entry)
254 printk(KERN_ERR "RIO: missing routing ops for %s\n", 281 printk(KERN_ERR "RIO: missing routing ops for %s\n",
255 rio_name(rdev)); 282 rio_name(rdev));
@@ -281,6 +308,65 @@ static int __devinit rio_add_device(struct rio_dev *rdev)
281} 308}
282 309
283/** 310/**
311 * rio_enable_rx_tx_port - enable input reciever and output transmitter of
312 * given port
313 * @port: Master port associated with the RIO network
314 * @local: local=1 select local port otherwise a far device is reached
315 * @destid: Destination ID of the device to check host bit
316 * @hopcount: Number of hops to reach the target
317 * @port_num: Port (-number on switch) to enable on a far end device
318 *
319 * Returns 0 or 1 from on General Control Command and Status Register
320 * (EXT_PTR+0x3C)
321 */
322inline int rio_enable_rx_tx_port(struct rio_mport *port,
323 int local, u16 destid,
324 u8 hopcount, u8 port_num) {
325#ifdef CONFIG_RAPIDIO_ENABLE_RX_TX_PORTS
326 u32 regval;
327 u32 ext_ftr_ptr;
328
329 /*
330 * enable rx input tx output port
331 */
332 pr_debug("rio_enable_rx_tx_port(local = %d, destid = %d, hopcount = "
333 "%d, port_num = %d)\n", local, destid, hopcount, port_num);
334
335 ext_ftr_ptr = rio_mport_get_physefb(port, local, destid, hopcount);
336
337 if (local) {
338 rio_local_read_config_32(port, ext_ftr_ptr +
339 RIO_PORT_N_CTL_CSR(0),
340 &regval);
341 } else {
342 if (rio_mport_read_config_32(port, destid, hopcount,
343 ext_ftr_ptr + RIO_PORT_N_CTL_CSR(port_num), &regval) < 0)
344 return -EIO;
345 }
346
347 if (regval & RIO_PORT_N_CTL_P_TYP_SER) {
348 /* serial */
349 regval = regval | RIO_PORT_N_CTL_EN_RX_SER
350 | RIO_PORT_N_CTL_EN_TX_SER;
351 } else {
352 /* parallel */
353 regval = regval | RIO_PORT_N_CTL_EN_RX_PAR
354 | RIO_PORT_N_CTL_EN_TX_PAR;
355 }
356
357 if (local) {
358 rio_local_write_config_32(port, ext_ftr_ptr +
359 RIO_PORT_N_CTL_CSR(0), regval);
360 } else {
361 if (rio_mport_write_config_32(port, destid, hopcount,
362 ext_ftr_ptr + RIO_PORT_N_CTL_CSR(port_num), regval) < 0)
363 return -EIO;
364 }
365#endif
366 return 0;
367}
368
369/**
284 * rio_setup_device- Allocates and sets up a RIO device 370 * rio_setup_device- Allocates and sets up a RIO device
285 * @net: RIO network 371 * @net: RIO network
286 * @port: Master port to send transactions 372 * @port: Master port to send transactions
@@ -325,8 +411,14 @@ static struct rio_dev __devinit *rio_setup_device(struct rio_net *net,
325 rdev->asm_rev = result >> 16; 411 rdev->asm_rev = result >> 16;
326 rio_mport_read_config_32(port, destid, hopcount, RIO_PEF_CAR, 412 rio_mport_read_config_32(port, destid, hopcount, RIO_PEF_CAR,
327 &rdev->pef); 413 &rdev->pef);
328 if (rdev->pef & RIO_PEF_EXT_FEATURES) 414 if (rdev->pef & RIO_PEF_EXT_FEATURES) {
329 rdev->efptr = result & 0xffff; 415 rdev->efptr = result & 0xffff;
416 rdev->phys_efptr = rio_mport_get_physefb(port, 0, destid,
417 hopcount);
418
419 rdev->em_efptr = rio_mport_get_feature(port, 0, destid,
420 hopcount, RIO_EFB_ERR_MGMNT);
421 }
330 422
331 rio_mport_read_config_32(port, destid, hopcount, RIO_SRC_OPS_CAR, 423 rio_mport_read_config_32(port, destid, hopcount, RIO_SRC_OPS_CAR,
332 &rdev->src_ops); 424 &rdev->src_ops);
@@ -349,12 +441,13 @@ static struct rio_dev __devinit *rio_setup_device(struct rio_net *net,
349 if (rio_is_switch(rdev)) { 441 if (rio_is_switch(rdev)) {
350 rio_mport_read_config_32(port, destid, hopcount, 442 rio_mport_read_config_32(port, destid, hopcount,
351 RIO_SWP_INFO_CAR, &rdev->swpinfo); 443 RIO_SWP_INFO_CAR, &rdev->swpinfo);
352 rswitch = kmalloc(sizeof(struct rio_switch), GFP_KERNEL); 444 rswitch = kzalloc(sizeof(struct rio_switch), GFP_KERNEL);
353 if (!rswitch) 445 if (!rswitch)
354 goto cleanup; 446 goto cleanup;
355 rswitch->switchid = next_switchid; 447 rswitch->switchid = next_switchid;
356 rswitch->hopcount = hopcount; 448 rswitch->hopcount = hopcount;
357 rswitch->destid = destid; 449 rswitch->destid = destid;
450 rswitch->port_ok = 0;
358 rswitch->route_table = kzalloc(sizeof(u8)* 451 rswitch->route_table = kzalloc(sizeof(u8)*
359 RIO_MAX_ROUTE_ENTRIES(port->sys_size), 452 RIO_MAX_ROUTE_ENTRIES(port->sys_size),
360 GFP_KERNEL); 453 GFP_KERNEL);
@@ -367,13 +460,22 @@ static struct rio_dev __devinit *rio_setup_device(struct rio_net *net,
367 rdev->rswitch = rswitch; 460 rdev->rswitch = rswitch;
368 dev_set_name(&rdev->dev, "%02x:s:%04x", rdev->net->id, 461 dev_set_name(&rdev->dev, "%02x:s:%04x", rdev->net->id,
369 rdev->rswitch->switchid); 462 rdev->rswitch->switchid);
370 rio_route_set_ops(rdev); 463 rio_switch_init(rdev, do_enum);
464
465 if (do_enum && rdev->rswitch->clr_table)
466 rdev->rswitch->clr_table(port, destid, hopcount,
467 RIO_GLOBAL_TABLE);
371 468
372 list_add_tail(&rswitch->node, &rio_switches); 469 list_add_tail(&rswitch->node, &rio_switches);
373 470
374 } else 471 } else {
472 if (do_enum)
473 /*Enable Input Output Port (transmitter reviever)*/
474 rio_enable_rx_tx_port(port, 0, destid, hopcount, 0);
475
375 dev_set_name(&rdev->dev, "%02x:e:%04x", rdev->net->id, 476 dev_set_name(&rdev->dev, "%02x:e:%04x", rdev->net->id,
376 rdev->destid); 477 rdev->destid);
478 }
377 479
378 rdev->dev.bus = &rio_bus_type; 480 rdev->dev.bus = &rio_bus_type;
379 481
@@ -414,23 +516,29 @@ cleanup:
414 * 516 *
415 * Reads the port error status CSR for a particular switch port to 517 * Reads the port error status CSR for a particular switch port to
416 * determine if the port has an active link. Returns 518 * determine if the port has an active link. Returns
417 * %PORT_N_ERR_STS_PORT_OK if the port is active or %0 if it is 519 * %RIO_PORT_N_ERR_STS_PORT_OK if the port is active or %0 if it is
418 * inactive. 520 * inactive.
419 */ 521 */
420static int 522static int
421rio_sport_is_active(struct rio_mport *port, u16 destid, u8 hopcount, int sport) 523rio_sport_is_active(struct rio_mport *port, u16 destid, u8 hopcount, int sport)
422{ 524{
423 u32 result; 525 u32 result = 0;
424 u32 ext_ftr_ptr; 526 u32 ext_ftr_ptr;
425 527
426 int *entry = rio_sport_phys_table; 528 ext_ftr_ptr = rio_mport_get_efb(port, 0, destid, hopcount, 0);
427
428 do {
429 if ((ext_ftr_ptr =
430 rio_mport_get_feature(port, 0, destid, hopcount, *entry)))
431 529
530 while (ext_ftr_ptr) {
531 rio_mport_read_config_32(port, destid, hopcount,
532 ext_ftr_ptr, &result);
533 result = RIO_GET_BLOCK_ID(result);
534 if ((result == RIO_EFB_SER_EP_FREE_ID) ||
535 (result == RIO_EFB_SER_EP_FREE_ID_V13P) ||
536 (result == RIO_EFB_SER_EP_FREC_ID))
432 break; 537 break;
433 } while (*++entry >= 0); 538
539 ext_ftr_ptr = rio_mport_get_efb(port, 0, destid, hopcount,
540 ext_ftr_ptr);
541 }
434 542
435 if (ext_ftr_ptr) 543 if (ext_ftr_ptr)
436 rio_mport_read_config_32(port, destid, hopcount, 544 rio_mport_read_config_32(port, destid, hopcount,
@@ -438,7 +546,81 @@ rio_sport_is_active(struct rio_mport *port, u16 destid, u8 hopcount, int sport)
438 RIO_PORT_N_ERR_STS_CSR(sport), 546 RIO_PORT_N_ERR_STS_CSR(sport),
439 &result); 547 &result);
440 548
441 return (result & PORT_N_ERR_STS_PORT_OK); 549 return result & RIO_PORT_N_ERR_STS_PORT_OK;
550}
551
552/**
553 * rio_lock_device - Acquires host device lock for specified device
554 * @port: Master port to send transaction
555 * @destid: Destination ID for device/switch
556 * @hopcount: Hopcount to reach switch
557 * @wait_ms: Max wait time in msec (0 = no timeout)
558 *
559 * Attepts to acquire host device lock for specified device
560 * Returns 0 if device lock acquired or EINVAL if timeout expires.
561 */
562static int
563rio_lock_device(struct rio_mport *port, u16 destid, u8 hopcount, int wait_ms)
564{
565 u32 result;
566 int tcnt = 0;
567
568 /* Attempt to acquire device lock */
569 rio_mport_write_config_32(port, destid, hopcount,
570 RIO_HOST_DID_LOCK_CSR, port->host_deviceid);
571 rio_mport_read_config_32(port, destid, hopcount,
572 RIO_HOST_DID_LOCK_CSR, &result);
573
574 while (result != port->host_deviceid) {
575 if (wait_ms != 0 && tcnt == wait_ms) {
576 pr_debug("RIO: timeout when locking device %x:%x\n",
577 destid, hopcount);
578 return -EINVAL;
579 }
580
581 /* Delay a bit */
582 mdelay(1);
583 tcnt++;
584 /* Try to acquire device lock again */
585 rio_mport_write_config_32(port, destid,
586 hopcount,
587 RIO_HOST_DID_LOCK_CSR,
588 port->host_deviceid);
589 rio_mport_read_config_32(port, destid,
590 hopcount,
591 RIO_HOST_DID_LOCK_CSR, &result);
592 }
593
594 return 0;
595}
596
597/**
598 * rio_unlock_device - Releases host device lock for specified device
599 * @port: Master port to send transaction
600 * @destid: Destination ID for device/switch
601 * @hopcount: Hopcount to reach switch
602 *
603 * Returns 0 if device lock released or EINVAL if fails.
604 */
605static int
606rio_unlock_device(struct rio_mport *port, u16 destid, u8 hopcount)
607{
608 u32 result;
609
610 /* Release device lock */
611 rio_mport_write_config_32(port, destid,
612 hopcount,
613 RIO_HOST_DID_LOCK_CSR,
614 port->host_deviceid);
615 rio_mport_read_config_32(port, destid, hopcount,
616 RIO_HOST_DID_LOCK_CSR, &result);
617 if ((result & 0xffff) != 0xffff) {
618 pr_debug("RIO: badness when releasing device lock %x:%x\n",
619 destid, hopcount);
620 return -EINVAL;
621 }
622
623 return 0;
442} 624}
443 625
444/** 626/**
@@ -448,6 +630,7 @@ rio_sport_is_active(struct rio_mport *port, u16 destid, u8 hopcount, int sport)
448 * @table: Routing table ID 630 * @table: Routing table ID
449 * @route_destid: Destination ID to be routed 631 * @route_destid: Destination ID to be routed
450 * @route_port: Port number to be routed 632 * @route_port: Port number to be routed
633 * @lock: lock switch device flag
451 * 634 *
452 * Calls the switch specific add_entry() method to add a route entry 635 * Calls the switch specific add_entry() method to add a route entry
453 * on a switch. The route table can be specified using the @table 636 * on a switch. The route table can be specified using the @table
@@ -456,12 +639,26 @@ rio_sport_is_active(struct rio_mport *port, u16 destid, u8 hopcount, int sport)
456 * %RIO_GLOBAL_TABLE in @table. Returns %0 on success or %-EINVAL 639 * %RIO_GLOBAL_TABLE in @table. Returns %0 on success or %-EINVAL
457 * on failure. 640 * on failure.
458 */ 641 */
459static int rio_route_add_entry(struct rio_mport *mport, struct rio_switch *rswitch, 642static int
460 u16 table, u16 route_destid, u8 route_port) 643rio_route_add_entry(struct rio_mport *mport, struct rio_switch *rswitch,
644 u16 table, u16 route_destid, u8 route_port, int lock)
461{ 645{
462 return rswitch->add_entry(mport, rswitch->destid, 646 int rc;
647
648 if (lock) {
649 rc = rio_lock_device(mport, rswitch->destid,
650 rswitch->hopcount, 1000);
651 if (rc)
652 return rc;
653 }
654
655 rc = rswitch->add_entry(mport, rswitch->destid,
463 rswitch->hopcount, table, 656 rswitch->hopcount, table,
464 route_destid, route_port); 657 route_destid, route_port);
658 if (lock)
659 rio_unlock_device(mport, rswitch->destid, rswitch->hopcount);
660
661 return rc;
465} 662}
466 663
467/** 664/**
@@ -471,6 +668,7 @@ static int rio_route_add_entry(struct rio_mport *mport, struct rio_switch *rswit
471 * @table: Routing table ID 668 * @table: Routing table ID
472 * @route_destid: Destination ID to be routed 669 * @route_destid: Destination ID to be routed
473 * @route_port: Pointer to read port number into 670 * @route_port: Pointer to read port number into
671 * @lock: lock switch device flag
474 * 672 *
475 * Calls the switch specific get_entry() method to read a route entry 673 * Calls the switch specific get_entry() method to read a route entry
476 * in a switch. The route table can be specified using the @table 674 * in a switch. The route table can be specified using the @table
@@ -481,11 +679,24 @@ static int rio_route_add_entry(struct rio_mport *mport, struct rio_switch *rswit
481 */ 679 */
482static int 680static int
483rio_route_get_entry(struct rio_mport *mport, struct rio_switch *rswitch, u16 table, 681rio_route_get_entry(struct rio_mport *mport, struct rio_switch *rswitch, u16 table,
484 u16 route_destid, u8 * route_port) 682 u16 route_destid, u8 *route_port, int lock)
485{ 683{
486 return rswitch->get_entry(mport, rswitch->destid, 684 int rc;
685
686 if (lock) {
687 rc = rio_lock_device(mport, rswitch->destid,
688 rswitch->hopcount, 1000);
689 if (rc)
690 return rc;
691 }
692
693 rc = rswitch->get_entry(mport, rswitch->destid,
487 rswitch->hopcount, table, 694 rswitch->hopcount, table,
488 route_destid, route_port); 695 route_destid, route_port);
696 if (lock)
697 rio_unlock_device(mport, rswitch->destid, rswitch->hopcount);
698
699 return rc;
489} 700}
490 701
491/** 702/**
@@ -625,14 +836,14 @@ static int __devinit rio_enum_peer(struct rio_net *net, struct rio_mport *port,
625 sw_inport = rio_get_swpinfo_inport(port, 836 sw_inport = rio_get_swpinfo_inport(port,
626 RIO_ANY_DESTID(port->sys_size), hopcount); 837 RIO_ANY_DESTID(port->sys_size), hopcount);
627 rio_route_add_entry(port, rdev->rswitch, RIO_GLOBAL_TABLE, 838 rio_route_add_entry(port, rdev->rswitch, RIO_GLOBAL_TABLE,
628 port->host_deviceid, sw_inport); 839 port->host_deviceid, sw_inport, 0);
629 rdev->rswitch->route_table[port->host_deviceid] = sw_inport; 840 rdev->rswitch->route_table[port->host_deviceid] = sw_inport;
630 841
631 for (destid = 0; destid < next_destid; destid++) { 842 for (destid = 0; destid < next_destid; destid++) {
632 if (destid == port->host_deviceid) 843 if (destid == port->host_deviceid)
633 continue; 844 continue;
634 rio_route_add_entry(port, rdev->rswitch, RIO_GLOBAL_TABLE, 845 rio_route_add_entry(port, rdev->rswitch, RIO_GLOBAL_TABLE,
635 destid, sw_inport); 846 destid, sw_inport, 0);
636 rdev->rswitch->route_table[destid] = sw_inport; 847 rdev->rswitch->route_table[destid] = sw_inport;
637 } 848 }
638 849
@@ -644,8 +855,15 @@ static int __devinit rio_enum_peer(struct rio_net *net, struct rio_mport *port,
644 rio_name(rdev), rdev->vid, rdev->did, num_ports); 855 rio_name(rdev), rdev->vid, rdev->did, num_ports);
645 sw_destid = next_destid; 856 sw_destid = next_destid;
646 for (port_num = 0; port_num < num_ports; port_num++) { 857 for (port_num = 0; port_num < num_ports; port_num++) {
647 if (sw_inport == port_num) 858 /*Enable Input Output Port (transmitter reviever)*/
859 rio_enable_rx_tx_port(port, 0,
860 RIO_ANY_DESTID(port->sys_size),
861 hopcount, port_num);
862
863 if (sw_inport == port_num) {
864 rdev->rswitch->port_ok |= (1 << port_num);
648 continue; 865 continue;
866 }
649 867
650 cur_destid = next_destid; 868 cur_destid = next_destid;
651 869
@@ -655,10 +873,11 @@ static int __devinit rio_enum_peer(struct rio_net *net, struct rio_mport *port,
655 pr_debug( 873 pr_debug(
656 "RIO: scanning device on port %d\n", 874 "RIO: scanning device on port %d\n",
657 port_num); 875 port_num);
876 rdev->rswitch->port_ok |= (1 << port_num);
658 rio_route_add_entry(port, rdev->rswitch, 877 rio_route_add_entry(port, rdev->rswitch,
659 RIO_GLOBAL_TABLE, 878 RIO_GLOBAL_TABLE,
660 RIO_ANY_DESTID(port->sys_size), 879 RIO_ANY_DESTID(port->sys_size),
661 port_num); 880 port_num, 0);
662 881
663 if (rio_enum_peer(net, port, hopcount + 1) < 0) 882 if (rio_enum_peer(net, port, hopcount + 1) < 0)
664 return -1; 883 return -1;
@@ -672,15 +891,35 @@ static int __devinit rio_enum_peer(struct rio_net *net, struct rio_mport *port,
672 rio_route_add_entry(port, rdev->rswitch, 891 rio_route_add_entry(port, rdev->rswitch,
673 RIO_GLOBAL_TABLE, 892 RIO_GLOBAL_TABLE,
674 destid, 893 destid,
675 port_num); 894 port_num,
895 0);
676 rdev->rswitch-> 896 rdev->rswitch->
677 route_table[destid] = 897 route_table[destid] =
678 port_num; 898 port_num;
679 } 899 }
680 } 900 }
901 } else {
902 /* If switch supports Error Management,
903 * set PORT_LOCKOUT bit for unused port
904 */
905 if (rdev->em_efptr)
906 rio_set_port_lockout(rdev, port_num, 1);
907
908 rdev->rswitch->port_ok &= ~(1 << port_num);
681 } 909 }
682 } 910 }
683 911
912 /* Direct Port-write messages to the enumeratiing host */
913 if ((rdev->src_ops & RIO_SRC_OPS_PORT_WRITE) &&
914 (rdev->em_efptr)) {
915 rio_write_config_32(rdev,
916 rdev->em_efptr + RIO_EM_PW_TGT_DEVID,
917 (port->host_deviceid << 16) |
918 (port->sys_size << 15));
919 }
920
921 rio_init_em(rdev);
922
684 /* Check for empty switch */ 923 /* Check for empty switch */
685 if (next_destid == sw_destid) { 924 if (next_destid == sw_destid) {
686 next_destid++; 925 next_destid++;
@@ -700,21 +939,16 @@ static int __devinit rio_enum_peer(struct rio_net *net, struct rio_mport *port,
700 * rio_enum_complete- Tests if enumeration of a network is complete 939 * rio_enum_complete- Tests if enumeration of a network is complete
701 * @port: Master port to send transaction 940 * @port: Master port to send transaction
702 * 941 *
703 * Tests the Component Tag CSR for presence of the magic enumeration 942 * Tests the Component Tag CSR for non-zero value (enumeration
704 * complete flag. Return %1 if enumeration is complete or %0 if 943 * complete flag). Return %1 if enumeration is complete or %0 if
705 * enumeration is incomplete. 944 * enumeration is incomplete.
706 */ 945 */
707static int rio_enum_complete(struct rio_mport *port) 946static int rio_enum_complete(struct rio_mport *port)
708{ 947{
709 u32 tag_csr; 948 u32 tag_csr;
710 int ret = 0;
711 949
712 rio_local_read_config_32(port, RIO_COMPONENT_TAG_CSR, &tag_csr); 950 rio_local_read_config_32(port, RIO_COMPONENT_TAG_CSR, &tag_csr);
713 951 return (tag_csr & 0xffff) ? 1 : 0;
714 if (tag_csr == RIO_ENUM_CMPL_MAGIC)
715 ret = 1;
716
717 return ret;
718} 952}
719 953
720/** 954/**
@@ -763,17 +997,21 @@ rio_disc_peer(struct rio_net *net, struct rio_mport *port, u16 destid,
763 pr_debug( 997 pr_debug(
764 "RIO: scanning device on port %d\n", 998 "RIO: scanning device on port %d\n",
765 port_num); 999 port_num);
1000
1001 rio_lock_device(port, destid, hopcount, 1000);
1002
766 for (ndestid = 0; 1003 for (ndestid = 0;
767 ndestid < RIO_ANY_DESTID(port->sys_size); 1004 ndestid < RIO_ANY_DESTID(port->sys_size);
768 ndestid++) { 1005 ndestid++) {
769 rio_route_get_entry(port, rdev->rswitch, 1006 rio_route_get_entry(port, rdev->rswitch,
770 RIO_GLOBAL_TABLE, 1007 RIO_GLOBAL_TABLE,
771 ndestid, 1008 ndestid,
772 &route_port); 1009 &route_port, 0);
773 if (route_port == port_num) 1010 if (route_port == port_num)
774 break; 1011 break;
775 } 1012 }
776 1013
1014 rio_unlock_device(port, destid, hopcount);
777 if (rio_disc_peer 1015 if (rio_disc_peer
778 (net, port, ndestid, hopcount + 1) < 0) 1016 (net, port, ndestid, hopcount + 1) < 0)
779 return -1; 1017 return -1;
@@ -792,7 +1030,7 @@ rio_disc_peer(struct rio_net *net, struct rio_mport *port, u16 destid,
792 * 1030 *
793 * Reads the port error status CSR for the master port to 1031 * Reads the port error status CSR for the master port to
794 * determine if the port has an active link. Returns 1032 * determine if the port has an active link. Returns
795 * %PORT_N_ERR_STS_PORT_OK if the master port is active 1033 * %RIO_PORT_N_ERR_STS_PORT_OK if the master port is active
796 * or %0 if it is inactive. 1034 * or %0 if it is inactive.
797 */ 1035 */
798static int rio_mport_is_active(struct rio_mport *port) 1036static int rio_mport_is_active(struct rio_mport *port)
@@ -813,7 +1051,7 @@ static int rio_mport_is_active(struct rio_mport *port)
813 RIO_PORT_N_ERR_STS_CSR(port->index), 1051 RIO_PORT_N_ERR_STS_CSR(port->index),
814 &result); 1052 &result);
815 1053
816 return (result & PORT_N_ERR_STS_PORT_OK); 1054 return result & RIO_PORT_N_ERR_STS_PORT_OK;
817} 1055}
818 1056
819/** 1057/**
@@ -866,12 +1104,17 @@ static void rio_update_route_tables(struct rio_mport *port)
866 continue; 1104 continue;
867 1105
868 if (RIO_INVALID_ROUTE == rswitch->route_table[destid]) { 1106 if (RIO_INVALID_ROUTE == rswitch->route_table[destid]) {
1107 /* Skip if destid ends in empty switch*/
1108 if (rswitch->destid == destid)
1109 continue;
869 1110
870 sport = rio_get_swpinfo_inport(port, 1111 sport = rio_get_swpinfo_inport(port,
871 rswitch->destid, rswitch->hopcount); 1112 rswitch->destid, rswitch->hopcount);
872 1113
873 if (rswitch->add_entry) { 1114 if (rswitch->add_entry) {
874 rio_route_add_entry(port, rswitch, RIO_GLOBAL_TABLE, destid, sport); 1115 rio_route_add_entry(port, rswitch,
1116 RIO_GLOBAL_TABLE, destid,
1117 sport, 0);
875 rswitch->route_table[destid] = sport; 1118 rswitch->route_table[destid] = sport;
876 } 1119 }
877 } 1120 }
@@ -880,6 +1123,32 @@ static void rio_update_route_tables(struct rio_mport *port)
880} 1123}
881 1124
882/** 1125/**
1126 * rio_init_em - Initializes RIO Error Management (for switches)
1127 * @rdev: RIO device
1128 *
1129 * For each enumerated switch, call device-specific error management
1130 * initialization routine (if supplied by the switch driver).
1131 */
1132static void rio_init_em(struct rio_dev *rdev)
1133{
1134 if (rio_is_switch(rdev) && (rdev->em_efptr) &&
1135 (rdev->rswitch->em_init)) {
1136 rdev->rswitch->em_init(rdev);
1137 }
1138}
1139
1140/**
1141 * rio_pw_enable - Enables/disables port-write handling by a master port
1142 * @port: Master port associated with port-write handling
1143 * @enable: 1=enable, 0=disable
1144 */
1145static void rio_pw_enable(struct rio_mport *port, int enable)
1146{
1147 if (port->ops->pwenable)
1148 port->ops->pwenable(port, enable);
1149}
1150
1151/**
883 * rio_enum_mport- Start enumeration through a master port 1152 * rio_enum_mport- Start enumeration through a master port
884 * @mport: Master port to send transactions 1153 * @mport: Master port to send transactions
885 * 1154 *
@@ -911,6 +1180,10 @@ int __devinit rio_enum_mport(struct rio_mport *mport)
911 rc = -ENOMEM; 1180 rc = -ENOMEM;
912 goto out; 1181 goto out;
913 } 1182 }
1183
1184 /* Enable Input Output Port (transmitter reviever) */
1185 rio_enable_rx_tx_port(mport, 1, 0, 0, 0);
1186
914 if (rio_enum_peer(net, mport, 0) < 0) { 1187 if (rio_enum_peer(net, mport, 0) < 0) {
915 /* A higher priority host won enumeration, bail. */ 1188 /* A higher priority host won enumeration, bail. */
916 printk(KERN_INFO 1189 printk(KERN_INFO
@@ -922,6 +1195,7 @@ int __devinit rio_enum_mport(struct rio_mport *mport)
922 } 1195 }
923 rio_update_route_tables(mport); 1196 rio_update_route_tables(mport);
924 rio_clear_locks(mport); 1197 rio_clear_locks(mport);
1198 rio_pw_enable(mport, 1);
925 } else { 1199 } else {
926 printk(KERN_INFO "RIO: master port %d link inactive\n", 1200 printk(KERN_INFO "RIO: master port %d link inactive\n",
927 mport->id); 1201 mport->id);
@@ -945,15 +1219,22 @@ static void rio_build_route_tables(void)
945 u8 sport; 1219 u8 sport;
946 1220
947 list_for_each_entry(rdev, &rio_devices, global_list) 1221 list_for_each_entry(rdev, &rio_devices, global_list)
948 if (rio_is_switch(rdev)) 1222 if (rio_is_switch(rdev)) {
949 for (i = 0; 1223 rio_lock_device(rdev->net->hport, rdev->rswitch->destid,
950 i < RIO_MAX_ROUTE_ENTRIES(rdev->net->hport->sys_size); 1224 rdev->rswitch->hopcount, 1000);
951 i++) { 1225 for (i = 0;
952 if (rio_route_get_entry 1226 i < RIO_MAX_ROUTE_ENTRIES(rdev->net->hport->sys_size);
953 (rdev->net->hport, rdev->rswitch, RIO_GLOBAL_TABLE, 1227 i++) {
954 i, &sport) < 0) 1228 if (rio_route_get_entry
955 continue; 1229 (rdev->net->hport, rdev->rswitch,
956 rdev->rswitch->route_table[i] = sport; 1230 RIO_GLOBAL_TABLE, i, &sport, 0) < 0)
1231 continue;
1232 rdev->rswitch->route_table[i] = sport;
1233 }
1234
1235 rio_unlock_device(rdev->net->hport,
1236 rdev->rswitch->destid,
1237 rdev->rswitch->hopcount);
957 } 1238 }
958} 1239}
959 1240
@@ -1012,6 +1293,13 @@ int __devinit rio_disc_mport(struct rio_mport *mport)
1012 del_timer_sync(&rio_enum_timer); 1293 del_timer_sync(&rio_enum_timer);
1013 1294
1014 pr_debug("done\n"); 1295 pr_debug("done\n");
1296
1297 /* Read DestID assigned by enumerator */
1298 rio_local_read_config_32(mport, RIO_DID_CSR,
1299 &mport->host_deviceid);
1300 mport->host_deviceid = RIO_GET_DID(mport->sys_size,
1301 mport->host_deviceid);
1302
1015 if (rio_disc_peer(net, mport, RIO_ANY_DESTID(mport->sys_size), 1303 if (rio_disc_peer(net, mport, RIO_ANY_DESTID(mport->sys_size),
1016 0) < 0) { 1304 0) < 0) {
1017 printk(KERN_INFO 1305 printk(KERN_INFO
diff --git a/drivers/rapidio/rio.c b/drivers/rapidio/rio.c
index 6395c780008b..08fa453af974 100644
--- a/drivers/rapidio/rio.c
+++ b/drivers/rapidio/rio.c
@@ -5,6 +5,10 @@
5 * Copyright 2005 MontaVista Software, Inc. 5 * Copyright 2005 MontaVista Software, Inc.
6 * Matt Porter <mporter@kernel.crashing.org> 6 * Matt Porter <mporter@kernel.crashing.org>
7 * 7 *
8 * Copyright 2009 Integrated Device Technology, Inc.
9 * Alex Bounine <alexandre.bounine@idt.com>
10 * - Added Port-Write/Error Management initialization and handling
11 *
8 * This program is free software; you can redistribute it and/or modify it 12 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the 13 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2 of the License, or (at your 14 * Free Software Foundation; either version 2 of the License, or (at your
@@ -333,6 +337,331 @@ int rio_release_outb_dbell(struct rio_dev *rdev, struct resource *res)
333} 337}
334 338
335/** 339/**
340 * rio_request_inb_pwrite - request inbound port-write message service
341 * @rdev: RIO device to which register inbound port-write callback routine
342 * @pwcback: Callback routine to execute when port-write is received
343 *
344 * Binds a port-write callback function to the RapidIO device.
345 * Returns 0 if the request has been satisfied.
346 */
347int rio_request_inb_pwrite(struct rio_dev *rdev,
348 int (*pwcback)(struct rio_dev *rdev, union rio_pw_msg *msg, int step))
349{
350 int rc = 0;
351
352 spin_lock(&rio_global_list_lock);
353 if (rdev->pwcback != NULL)
354 rc = -ENOMEM;
355 else
356 rdev->pwcback = pwcback;
357
358 spin_unlock(&rio_global_list_lock);
359 return rc;
360}
361EXPORT_SYMBOL_GPL(rio_request_inb_pwrite);
362
363/**
364 * rio_release_inb_pwrite - release inbound port-write message service
365 * @rdev: RIO device which registered for inbound port-write callback
366 *
367 * Removes callback from the rio_dev structure. Returns 0 if the request
368 * has been satisfied.
369 */
370int rio_release_inb_pwrite(struct rio_dev *rdev)
371{
372 int rc = -ENOMEM;
373
374 spin_lock(&rio_global_list_lock);
375 if (rdev->pwcback) {
376 rdev->pwcback = NULL;
377 rc = 0;
378 }
379
380 spin_unlock(&rio_global_list_lock);
381 return rc;
382}
383EXPORT_SYMBOL_GPL(rio_release_inb_pwrite);
384
385/**
386 * rio_mport_get_physefb - Helper function that returns register offset
387 * for Physical Layer Extended Features Block.
388 * @port: Master port to issue transaction
389 * @local: Indicate a local master port or remote device access
390 * @destid: Destination ID of the device
391 * @hopcount: Number of switch hops to the device
392 */
393u32
394rio_mport_get_physefb(struct rio_mport *port, int local,
395 u16 destid, u8 hopcount)
396{
397 u32 ext_ftr_ptr;
398 u32 ftr_header;
399
400 ext_ftr_ptr = rio_mport_get_efb(port, local, destid, hopcount, 0);
401
402 while (ext_ftr_ptr) {
403 if (local)
404 rio_local_read_config_32(port, ext_ftr_ptr,
405 &ftr_header);
406 else
407 rio_mport_read_config_32(port, destid, hopcount,
408 ext_ftr_ptr, &ftr_header);
409
410 ftr_header = RIO_GET_BLOCK_ID(ftr_header);
411 switch (ftr_header) {
412
413 case RIO_EFB_SER_EP_ID_V13P:
414 case RIO_EFB_SER_EP_REC_ID_V13P:
415 case RIO_EFB_SER_EP_FREE_ID_V13P:
416 case RIO_EFB_SER_EP_ID:
417 case RIO_EFB_SER_EP_REC_ID:
418 case RIO_EFB_SER_EP_FREE_ID:
419 case RIO_EFB_SER_EP_FREC_ID:
420
421 return ext_ftr_ptr;
422
423 default:
424 break;
425 }
426
427 ext_ftr_ptr = rio_mport_get_efb(port, local, destid,
428 hopcount, ext_ftr_ptr);
429 }
430
431 return ext_ftr_ptr;
432}
433
434/**
435 * rio_get_comptag - Begin or continue searching for a RIO device by component tag
436 * @comp_tag: RIO component tag to match
437 * @from: Previous RIO device found in search, or %NULL for new search
438 *
439 * Iterates through the list of known RIO devices. If a RIO device is
440 * found with a matching @comp_tag, a pointer to its device
441 * structure is returned. Otherwise, %NULL is returned. A new search
442 * is initiated by passing %NULL to the @from argument. Otherwise, if
443 * @from is not %NULL, searches continue from next device on the global
444 * list.
445 */
446static struct rio_dev *rio_get_comptag(u32 comp_tag, struct rio_dev *from)
447{
448 struct list_head *n;
449 struct rio_dev *rdev;
450
451 spin_lock(&rio_global_list_lock);
452 n = from ? from->global_list.next : rio_devices.next;
453
454 while (n && (n != &rio_devices)) {
455 rdev = rio_dev_g(n);
456 if (rdev->comp_tag == comp_tag)
457 goto exit;
458 n = n->next;
459 }
460 rdev = NULL;
461exit:
462 spin_unlock(&rio_global_list_lock);
463 return rdev;
464}
465
466/**
467 * rio_set_port_lockout - Sets/clears LOCKOUT bit (RIO EM 1.3) for a switch port.
468 * @rdev: Pointer to RIO device control structure
469 * @pnum: Switch port number to set LOCKOUT bit
470 * @lock: Operation : set (=1) or clear (=0)
471 */
472int rio_set_port_lockout(struct rio_dev *rdev, u32 pnum, int lock)
473{
474 u8 hopcount = 0xff;
475 u16 destid = rdev->destid;
476 u32 regval;
477
478 if (rdev->rswitch) {
479 destid = rdev->rswitch->destid;
480 hopcount = rdev->rswitch->hopcount;
481 }
482
483 rio_mport_read_config_32(rdev->net->hport, destid, hopcount,
484 rdev->phys_efptr + RIO_PORT_N_CTL_CSR(pnum),
485 &regval);
486 if (lock)
487 regval |= RIO_PORT_N_CTL_LOCKOUT;
488 else
489 regval &= ~RIO_PORT_N_CTL_LOCKOUT;
490
491 rio_mport_write_config_32(rdev->net->hport, destid, hopcount,
492 rdev->phys_efptr + RIO_PORT_N_CTL_CSR(pnum),
493 regval);
494 return 0;
495}
496
497/**
498 * rio_inb_pwrite_handler - process inbound port-write message
499 * @pw_msg: pointer to inbound port-write message
500 *
501 * Processes an inbound port-write message. Returns 0 if the request
502 * has been satisfied.
503 */
504int rio_inb_pwrite_handler(union rio_pw_msg *pw_msg)
505{
506 struct rio_dev *rdev;
507 struct rio_mport *mport;
508 u8 hopcount;
509 u16 destid;
510 u32 err_status;
511 int rc, portnum;
512
513 rdev = rio_get_comptag(pw_msg->em.comptag, NULL);
514 if (rdev == NULL) {
515 /* Someting bad here (probably enumeration error) */
516 pr_err("RIO: %s No matching device for CTag 0x%08x\n",
517 __func__, pw_msg->em.comptag);
518 return -EIO;
519 }
520
521 pr_debug("RIO: Port-Write message from %s\n", rio_name(rdev));
522
523#ifdef DEBUG_PW
524 {
525 u32 i;
526 for (i = 0; i < RIO_PW_MSG_SIZE/sizeof(u32);) {
527 pr_debug("0x%02x: %08x %08x %08x %08x",
528 i*4, pw_msg->raw[i], pw_msg->raw[i + 1],
529 pw_msg->raw[i + 2], pw_msg->raw[i + 3]);
530 i += 4;
531 }
532 pr_debug("\n");
533 }
534#endif
535
536 /* Call an external service function (if such is registered
537 * for this device). This may be the service for endpoints that send
538 * device-specific port-write messages. End-point messages expected
539 * to be handled completely by EP specific device driver.
540 * For switches rc==0 signals that no standard processing required.
541 */
542 if (rdev->pwcback != NULL) {
543 rc = rdev->pwcback(rdev, pw_msg, 0);
544 if (rc == 0)
545 return 0;
546 }
547
548 /* For End-point devices processing stops here */
549 if (!(rdev->pef & RIO_PEF_SWITCH))
550 return 0;
551
552 if (rdev->phys_efptr == 0) {
553 pr_err("RIO_PW: Bad switch initialization for %s\n",
554 rio_name(rdev));
555 return 0;
556 }
557
558 mport = rdev->net->hport;
559 destid = rdev->rswitch->destid;
560 hopcount = rdev->rswitch->hopcount;
561
562 /*
563 * Process the port-write notification from switch
564 */
565
566 portnum = pw_msg->em.is_port & 0xFF;
567
568 if (rdev->rswitch->em_handle)
569 rdev->rswitch->em_handle(rdev, portnum);
570
571 rio_mport_read_config_32(mport, destid, hopcount,
572 rdev->phys_efptr + RIO_PORT_N_ERR_STS_CSR(portnum),
573 &err_status);
574 pr_debug("RIO_PW: SP%d_ERR_STS_CSR=0x%08x\n", portnum, err_status);
575
576 if (pw_msg->em.errdetect) {
577 pr_debug("RIO_PW: RIO_EM_P%d_ERR_DETECT=0x%08x\n",
578 portnum, pw_msg->em.errdetect);
579 /* Clear EM Port N Error Detect CSR */
580 rio_mport_write_config_32(mport, destid, hopcount,
581 rdev->em_efptr + RIO_EM_PN_ERR_DETECT(portnum), 0);
582 }
583
584 if (pw_msg->em.ltlerrdet) {
585 pr_debug("RIO_PW: RIO_EM_LTL_ERR_DETECT=0x%08x\n",
586 pw_msg->em.ltlerrdet);
587 /* Clear EM L/T Layer Error Detect CSR */
588 rio_mport_write_config_32(mport, destid, hopcount,
589 rdev->em_efptr + RIO_EM_LTL_ERR_DETECT, 0);
590 }
591
592 /* Clear Port Errors */
593 rio_mport_write_config_32(mport, destid, hopcount,
594 rdev->phys_efptr + RIO_PORT_N_ERR_STS_CSR(portnum),
595 err_status & RIO_PORT_N_ERR_STS_CLR_MASK);
596
597 if (rdev->rswitch->port_ok & (1 << portnum)) {
598 if (err_status & RIO_PORT_N_ERR_STS_PORT_UNINIT) {
599 rdev->rswitch->port_ok &= ~(1 << portnum);
600 rio_set_port_lockout(rdev, portnum, 1);
601
602 rio_mport_write_config_32(mport, destid, hopcount,
603 rdev->phys_efptr +
604 RIO_PORT_N_ACK_STS_CSR(portnum),
605 RIO_PORT_N_ACK_CLEAR);
606
607 /* Schedule Extraction Service */
608 pr_debug("RIO_PW: Device Extraction on [%s]-P%d\n",
609 rio_name(rdev), portnum);
610 }
611 } else {
612 if (err_status & RIO_PORT_N_ERR_STS_PORT_OK) {
613 rdev->rswitch->port_ok |= (1 << portnum);
614 rio_set_port_lockout(rdev, portnum, 0);
615
616 /* Schedule Insertion Service */
617 pr_debug("RIO_PW: Device Insertion on [%s]-P%d\n",
618 rio_name(rdev), portnum);
619 }
620 }
621
622 /* Clear Port-Write Pending bit */
623 rio_mport_write_config_32(mport, destid, hopcount,
624 rdev->phys_efptr + RIO_PORT_N_ERR_STS_CSR(portnum),
625 RIO_PORT_N_ERR_STS_PW_PEND);
626
627 return 0;
628}
629EXPORT_SYMBOL_GPL(rio_inb_pwrite_handler);
630
631/**
632 * rio_mport_get_efb - get pointer to next extended features block
633 * @port: Master port to issue transaction
634 * @local: Indicate a local master port or remote device access
635 * @destid: Destination ID of the device
636 * @hopcount: Number of switch hops to the device
637 * @from: Offset of current Extended Feature block header (if 0 starts
638 * from ExtFeaturePtr)
639 */
640u32
641rio_mport_get_efb(struct rio_mport *port, int local, u16 destid,
642 u8 hopcount, u32 from)
643{
644 u32 reg_val;
645
646 if (from == 0) {
647 if (local)
648 rio_local_read_config_32(port, RIO_ASM_INFO_CAR,
649 &reg_val);
650 else
651 rio_mport_read_config_32(port, destid, hopcount,
652 RIO_ASM_INFO_CAR, &reg_val);
653 return reg_val & RIO_EXT_FTR_PTR_MASK;
654 } else {
655 if (local)
656 rio_local_read_config_32(port, from, &reg_val);
657 else
658 rio_mport_read_config_32(port, destid, hopcount,
659 from, &reg_val);
660 return RIO_GET_BLOCK_ID(reg_val);
661 }
662}
663
664/**
336 * rio_mport_get_feature - query for devices' extended features 665 * rio_mport_get_feature - query for devices' extended features
337 * @port: Master port to issue transaction 666 * @port: Master port to issue transaction
338 * @local: Indicate a local master port or remote device access 667 * @local: Indicate a local master port or remote device access
@@ -451,6 +780,110 @@ struct rio_dev *rio_get_device(u16 vid, u16 did, struct rio_dev *from)
451 return rio_get_asm(vid, did, RIO_ANY_ID, RIO_ANY_ID, from); 780 return rio_get_asm(vid, did, RIO_ANY_ID, RIO_ANY_ID, from);
452} 781}
453 782
783/**
784 * rio_std_route_add_entry - Add switch route table entry using standard
785 * registers defined in RIO specification rev.1.3
786 * @mport: Master port to issue transaction
787 * @destid: Destination ID of the device
788 * @hopcount: Number of switch hops to the device
789 * @table: routing table ID (global or port-specific)
790 * @route_destid: destID entry in the RT
791 * @route_port: destination port for specified destID
792 */
793int rio_std_route_add_entry(struct rio_mport *mport, u16 destid, u8 hopcount,
794 u16 table, u16 route_destid, u8 route_port)
795{
796 if (table == RIO_GLOBAL_TABLE) {
797 rio_mport_write_config_32(mport, destid, hopcount,
798 RIO_STD_RTE_CONF_DESTID_SEL_CSR,
799 (u32)route_destid);
800 rio_mport_write_config_32(mport, destid, hopcount,
801 RIO_STD_RTE_CONF_PORT_SEL_CSR,
802 (u32)route_port);
803 }
804
805 udelay(10);
806 return 0;
807}
808
809/**
810 * rio_std_route_get_entry - Read switch route table entry (port number)
811 * assosiated with specified destID using standard registers defined in RIO
812 * specification rev.1.3
813 * @mport: Master port to issue transaction
814 * @destid: Destination ID of the device
815 * @hopcount: Number of switch hops to the device
816 * @table: routing table ID (global or port-specific)
817 * @route_destid: destID entry in the RT
818 * @route_port: returned destination port for specified destID
819 */
820int rio_std_route_get_entry(struct rio_mport *mport, u16 destid, u8 hopcount,
821 u16 table, u16 route_destid, u8 *route_port)
822{
823 u32 result;
824
825 if (table == RIO_GLOBAL_TABLE) {
826 rio_mport_write_config_32(mport, destid, hopcount,
827 RIO_STD_RTE_CONF_DESTID_SEL_CSR, route_destid);
828 rio_mport_read_config_32(mport, destid, hopcount,
829 RIO_STD_RTE_CONF_PORT_SEL_CSR, &result);
830
831 *route_port = (u8)result;
832 }
833
834 return 0;
835}
836
837/**
838 * rio_std_route_clr_table - Clear swotch route table using standard registers
839 * defined in RIO specification rev.1.3.
840 * @mport: Master port to issue transaction
841 * @destid: Destination ID of the device
842 * @hopcount: Number of switch hops to the device
843 * @table: routing table ID (global or port-specific)
844 */
845int rio_std_route_clr_table(struct rio_mport *mport, u16 destid, u8 hopcount,
846 u16 table)
847{
848 u32 max_destid = 0xff;
849 u32 i, pef, id_inc = 1, ext_cfg = 0;
850 u32 port_sel = RIO_INVALID_ROUTE;
851
852 if (table == RIO_GLOBAL_TABLE) {
853 rio_mport_read_config_32(mport, destid, hopcount,
854 RIO_PEF_CAR, &pef);
855
856 if (mport->sys_size) {
857 rio_mport_read_config_32(mport, destid, hopcount,
858 RIO_SWITCH_RT_LIMIT,
859 &max_destid);
860 max_destid &= RIO_RT_MAX_DESTID;
861 }
862
863 if (pef & RIO_PEF_EXT_RT) {
864 ext_cfg = 0x80000000;
865 id_inc = 4;
866 port_sel = (RIO_INVALID_ROUTE << 24) |
867 (RIO_INVALID_ROUTE << 16) |
868 (RIO_INVALID_ROUTE << 8) |
869 RIO_INVALID_ROUTE;
870 }
871
872 for (i = 0; i <= max_destid;) {
873 rio_mport_write_config_32(mport, destid, hopcount,
874 RIO_STD_RTE_CONF_DESTID_SEL_CSR,
875 ext_cfg | i);
876 rio_mport_write_config_32(mport, destid, hopcount,
877 RIO_STD_RTE_CONF_PORT_SEL_CSR,
878 port_sel);
879 i += id_inc;
880 }
881 }
882
883 udelay(10);
884 return 0;
885}
886
454static void rio_fixup_device(struct rio_dev *dev) 887static void rio_fixup_device(struct rio_dev *dev)
455{ 888{
456} 889}
diff --git a/drivers/rapidio/rio.h b/drivers/rapidio/rio.h
index 7786d02581f2..f27b7a9c47d2 100644
--- a/drivers/rapidio/rio.h
+++ b/drivers/rapidio/rio.h
@@ -18,38 +18,50 @@
18 18
19extern u32 rio_mport_get_feature(struct rio_mport *mport, int local, u16 destid, 19extern u32 rio_mport_get_feature(struct rio_mport *mport, int local, u16 destid,
20 u8 hopcount, int ftr); 20 u8 hopcount, int ftr);
21extern u32 rio_mport_get_physefb(struct rio_mport *port, int local,
22 u16 destid, u8 hopcount);
23extern u32 rio_mport_get_efb(struct rio_mport *port, int local, u16 destid,
24 u8 hopcount, u32 from);
21extern int rio_create_sysfs_dev_files(struct rio_dev *rdev); 25extern int rio_create_sysfs_dev_files(struct rio_dev *rdev);
22extern int rio_enum_mport(struct rio_mport *mport); 26extern int rio_enum_mport(struct rio_mport *mport);
23extern int rio_disc_mport(struct rio_mport *mport); 27extern int rio_disc_mport(struct rio_mport *mport);
28extern int rio_std_route_add_entry(struct rio_mport *mport, u16 destid,
29 u8 hopcount, u16 table, u16 route_destid,
30 u8 route_port);
31extern int rio_std_route_get_entry(struct rio_mport *mport, u16 destid,
32 u8 hopcount, u16 table, u16 route_destid,
33 u8 *route_port);
34extern int rio_std_route_clr_table(struct rio_mport *mport, u16 destid,
35 u8 hopcount, u16 table);
36extern int rio_set_port_lockout(struct rio_dev *rdev, u32 pnum, int lock);
24 37
25/* Structures internal to the RIO core code */ 38/* Structures internal to the RIO core code */
26extern struct device_attribute rio_dev_attrs[]; 39extern struct device_attribute rio_dev_attrs[];
27extern spinlock_t rio_global_list_lock; 40extern spinlock_t rio_global_list_lock;
28 41
29extern struct rio_route_ops __start_rio_route_ops[]; 42extern struct rio_switch_ops __start_rio_switch_ops[];
30extern struct rio_route_ops __end_rio_route_ops[]; 43extern struct rio_switch_ops __end_rio_switch_ops[];
31 44
32/* Helpers internal to the RIO core code */ 45/* Helpers internal to the RIO core code */
33#define DECLARE_RIO_ROUTE_SECTION(section, vid, did, add_hook, get_hook) \ 46#define DECLARE_RIO_SWITCH_SECTION(section, name, vid, did, init_hook) \
34 static struct rio_route_ops __rio_route_ops __used \ 47 static const struct rio_switch_ops __rio_switch_##name __used \
35 __section(section)= { vid, did, add_hook, get_hook }; 48 __section(section) = { vid, did, init_hook };
36 49
37/** 50/**
38 * DECLARE_RIO_ROUTE_OPS - Registers switch routing operations 51 * DECLARE_RIO_SWITCH_INIT - Registers switch initialization routine
39 * @vid: RIO vendor ID 52 * @vid: RIO vendor ID
40 * @did: RIO device ID 53 * @did: RIO device ID
41 * @add_hook: Callback that adds a route entry 54 * @init_hook: Callback that performs switch-specific initialization
42 * @get_hook: Callback that gets a route entry
43 * 55 *
44 * Manipulating switch route tables in RIO is switch specific. This 56 * Manipulating switch route tables and error management in RIO
45 * registers a switch by vendor and device ID with two callbacks for 57 * is switch specific. This registers a switch by vendor and device ID with
46 * modifying and retrieving route entries in a switch. A &struct 58 * initialization callback for setting up switch operations and (if required)
47 * rio_route_ops is initialized with the ops and placed into a 59 * hardware initialization. A &struct rio_switch_ops is initialized with
48 * RIO-specific kernel section. 60 * pointer to the init routine and placed into a RIO-specific kernel section.
49 */ 61 */
50#define DECLARE_RIO_ROUTE_OPS(vid, did, add_hook, get_hook) \ 62#define DECLARE_RIO_SWITCH_INIT(vid, did, init_hook) \
51 DECLARE_RIO_ROUTE_SECTION(.rio_route_ops, \ 63 DECLARE_RIO_SWITCH_SECTION(.rio_switch_ops, vid##did, \
52 vid, did, add_hook, get_hook) 64 vid, did, init_hook)
53 65
54#define RIO_GET_DID(size, x) (size ? (x & 0xffff) : ((x & 0x00ff0000) >> 16)) 66#define RIO_GET_DID(size, x) (size ? (x & 0xffff) : ((x & 0x00ff0000) >> 16))
55#define RIO_SET_DID(size, x) (size ? (x & 0xffff) : ((x & 0x000000ff) << 16)) 67#define RIO_SET_DID(size, x) (size ? (x & 0xffff) : ((x & 0x000000ff) << 16))
diff --git a/drivers/rapidio/switches/Kconfig b/drivers/rapidio/switches/Kconfig
new file mode 100644
index 000000000000..2b4e9b2b6631
--- /dev/null
+++ b/drivers/rapidio/switches/Kconfig
@@ -0,0 +1,28 @@
1#
2# RapidIO switches configuration
3#
4config RAPIDIO_TSI57X
5 bool "IDT Tsi57x SRIO switches support"
6 depends on RAPIDIO
7 ---help---
8 Includes support for IDT Tsi57x family of serial RapidIO switches.
9
10config RAPIDIO_CPS_XX
11 bool "IDT CPS-xx SRIO switches support"
12 depends on RAPIDIO
13 ---help---
14 Includes support for IDT CPS-16/12/10/8 serial RapidIO switches.
15
16config RAPIDIO_TSI568
17 bool "Tsi568 SRIO switch support"
18 depends on RAPIDIO
19 default n
20 ---help---
21 Includes support for IDT Tsi568 serial RapidIO switch.
22
23config RAPIDIO_TSI500
24 bool "Tsi500 Parallel RapidIO switch support"
25 depends on RAPIDIO
26 default n
27 ---help---
28 Includes support for IDT Tsi500 parallel RapidIO switch.
diff --git a/drivers/rapidio/switches/Makefile b/drivers/rapidio/switches/Makefile
index b924f8301761..fe4adc3e8d5f 100644
--- a/drivers/rapidio/switches/Makefile
+++ b/drivers/rapidio/switches/Makefile
@@ -2,4 +2,11 @@
2# Makefile for RIO switches 2# Makefile for RIO switches
3# 3#
4 4
5obj-$(CONFIG_RAPIDIO) += tsi500.o 5obj-$(CONFIG_RAPIDIO_TSI57X) += tsi57x.o
6obj-$(CONFIG_RAPIDIO_CPS_XX) += idtcps.o
7obj-$(CONFIG_RAPIDIO_TSI568) += tsi568.o
8obj-$(CONFIG_RAPIDIO_TSI500) += tsi500.o
9
10ifeq ($(CONFIG_RAPIDIO_DEBUG),y)
11EXTRA_CFLAGS += -DDEBUG
12endif
diff --git a/drivers/rapidio/switches/idtcps.c b/drivers/rapidio/switches/idtcps.c
new file mode 100644
index 000000000000..2c790c144f89
--- /dev/null
+++ b/drivers/rapidio/switches/idtcps.c
@@ -0,0 +1,137 @@
1/*
2 * IDT CPS RapidIO switches support
3 *
4 * Copyright 2009-2010 Integrated Device Technology, Inc.
5 * Alexandre Bounine <alexandre.bounine@idt.com>
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License as published by the
9 * Free Software Foundation; either version 2 of the License, or (at your
10 * option) any later version.
11 */
12
13#include <linux/rio.h>
14#include <linux/rio_drv.h>
15#include <linux/rio_ids.h>
16#include "../rio.h"
17
18#define CPS_DEFAULT_ROUTE 0xde
19#define CPS_NO_ROUTE 0xdf
20
21#define IDTCPS_RIO_DOMAIN 0xf20020
22
23static int
24idtcps_route_add_entry(struct rio_mport *mport, u16 destid, u8 hopcount,
25 u16 table, u16 route_destid, u8 route_port)
26{
27 u32 result;
28
29 if (table == RIO_GLOBAL_TABLE) {
30 rio_mport_write_config_32(mport, destid, hopcount,
31 RIO_STD_RTE_CONF_DESTID_SEL_CSR, route_destid);
32
33 rio_mport_read_config_32(mport, destid, hopcount,
34 RIO_STD_RTE_CONF_PORT_SEL_CSR, &result);
35
36 result = (0xffffff00 & result) | (u32)route_port;
37 rio_mport_write_config_32(mport, destid, hopcount,
38 RIO_STD_RTE_CONF_PORT_SEL_CSR, result);
39 }
40
41 return 0;
42}
43
44static int
45idtcps_route_get_entry(struct rio_mport *mport, u16 destid, u8 hopcount,
46 u16 table, u16 route_destid, u8 *route_port)
47{
48 u32 result;
49
50 if (table == RIO_GLOBAL_TABLE) {
51 rio_mport_write_config_32(mport, destid, hopcount,
52 RIO_STD_RTE_CONF_DESTID_SEL_CSR, route_destid);
53
54 rio_mport_read_config_32(mport, destid, hopcount,
55 RIO_STD_RTE_CONF_PORT_SEL_CSR, &result);
56
57 if (CPS_DEFAULT_ROUTE == (u8)result ||
58 CPS_NO_ROUTE == (u8)result)
59 *route_port = RIO_INVALID_ROUTE;
60 else
61 *route_port = (u8)result;
62 }
63
64 return 0;
65}
66
67static int
68idtcps_route_clr_table(struct rio_mport *mport, u16 destid, u8 hopcount,
69 u16 table)
70{
71 u32 i;
72
73 if (table == RIO_GLOBAL_TABLE) {
74 for (i = 0x80000000; i <= 0x800000ff;) {
75 rio_mport_write_config_32(mport, destid, hopcount,
76 RIO_STD_RTE_CONF_DESTID_SEL_CSR, i);
77 rio_mport_write_config_32(mport, destid, hopcount,
78 RIO_STD_RTE_CONF_PORT_SEL_CSR,
79 (CPS_DEFAULT_ROUTE << 24) |
80 (CPS_DEFAULT_ROUTE << 16) |
81 (CPS_DEFAULT_ROUTE << 8) | CPS_DEFAULT_ROUTE);
82 i += 4;
83 }
84 }
85
86 return 0;
87}
88
89static int
90idtcps_set_domain(struct rio_mport *mport, u16 destid, u8 hopcount,
91 u8 sw_domain)
92{
93 /*
94 * Switch domain configuration operates only at global level
95 */
96 rio_mport_write_config_32(mport, destid, hopcount,
97 IDTCPS_RIO_DOMAIN, (u32)sw_domain);
98 return 0;
99}
100
101static int
102idtcps_get_domain(struct rio_mport *mport, u16 destid, u8 hopcount,
103 u8 *sw_domain)
104{
105 u32 regval;
106
107 /*
108 * Switch domain configuration operates only at global level
109 */
110 rio_mport_read_config_32(mport, destid, hopcount,
111 IDTCPS_RIO_DOMAIN, &regval);
112
113 *sw_domain = (u8)(regval & 0xff);
114
115 return 0;
116}
117
118static int idtcps_switch_init(struct rio_dev *rdev, int do_enum)
119{
120 pr_debug("RIO: %s for %s\n", __func__, rio_name(rdev));
121 rdev->rswitch->add_entry = idtcps_route_add_entry;
122 rdev->rswitch->get_entry = idtcps_route_get_entry;
123 rdev->rswitch->clr_table = idtcps_route_clr_table;
124 rdev->rswitch->set_domain = idtcps_set_domain;
125 rdev->rswitch->get_domain = idtcps_get_domain;
126 rdev->rswitch->em_init = NULL;
127 rdev->rswitch->em_handle = NULL;
128
129 return 0;
130}
131
132DECLARE_RIO_SWITCH_INIT(RIO_VID_IDT, RIO_DID_IDTCPS6Q, idtcps_switch_init);
133DECLARE_RIO_SWITCH_INIT(RIO_VID_IDT, RIO_DID_IDTCPS8, idtcps_switch_init);
134DECLARE_RIO_SWITCH_INIT(RIO_VID_IDT, RIO_DID_IDTCPS10Q, idtcps_switch_init);
135DECLARE_RIO_SWITCH_INIT(RIO_VID_IDT, RIO_DID_IDTCPS12, idtcps_switch_init);
136DECLARE_RIO_SWITCH_INIT(RIO_VID_IDT, RIO_DID_IDTCPS16, idtcps_switch_init);
137DECLARE_RIO_SWITCH_INIT(RIO_VID_IDT, RIO_DID_IDT70K200, idtcps_switch_init);
diff --git a/drivers/rapidio/switches/tsi500.c b/drivers/rapidio/switches/tsi500.c
index c77c23bd9840..914eddd5aa42 100644
--- a/drivers/rapidio/switches/tsi500.c
+++ b/drivers/rapidio/switches/tsi500.c
@@ -1,6 +1,10 @@
1/* 1/*
2 * RapidIO Tsi500 switch support 2 * RapidIO Tsi500 switch support
3 * 3 *
4 * Copyright 2009-2010 Integrated Device Technology, Inc.
5 * Alexandre Bounine <alexandre.bounine@idt.com>
6 * - Modified switch operations initialization.
7 *
4 * Copyright 2005 MontaVista Software, Inc. 8 * Copyright 2005 MontaVista Software, Inc.
5 * Matt Porter <mporter@kernel.crashing.org> 9 * Matt Porter <mporter@kernel.crashing.org>
6 * 10 *
@@ -57,4 +61,18 @@ tsi500_route_get_entry(struct rio_mport *mport, u16 destid, u8 hopcount, u16 tab
57 return ret; 61 return ret;
58} 62}
59 63
60DECLARE_RIO_ROUTE_OPS(RIO_VID_TUNDRA, RIO_DID_TSI500, tsi500_route_add_entry, tsi500_route_get_entry); 64static int tsi500_switch_init(struct rio_dev *rdev, int do_enum)
65{
66 pr_debug("RIO: %s for %s\n", __func__, rio_name(rdev));
67 rdev->rswitch->add_entry = tsi500_route_add_entry;
68 rdev->rswitch->get_entry = tsi500_route_get_entry;
69 rdev->rswitch->clr_table = NULL;
70 rdev->rswitch->set_domain = NULL;
71 rdev->rswitch->get_domain = NULL;
72 rdev->rswitch->em_init = NULL;
73 rdev->rswitch->em_handle = NULL;
74
75 return 0;
76}
77
78DECLARE_RIO_SWITCH_INIT(RIO_VID_TUNDRA, RIO_DID_TSI500, tsi500_switch_init);
diff --git a/drivers/rapidio/switches/tsi568.c b/drivers/rapidio/switches/tsi568.c
new file mode 100644
index 000000000000..f7fd7898606e
--- /dev/null
+++ b/drivers/rapidio/switches/tsi568.c
@@ -0,0 +1,146 @@
1/*
2 * RapidIO Tsi568 switch support
3 *
4 * Copyright 2009-2010 Integrated Device Technology, Inc.
5 * Alexandre Bounine <alexandre.bounine@idt.com>
6 * - Added EM support
7 * - Modified switch operations initialization.
8 *
9 * Copyright 2005 MontaVista Software, Inc.
10 * Matt Porter <mporter@kernel.crashing.org>
11 *
12 * This program is free software; you can redistribute it and/or modify it
13 * under the terms of the GNU General Public License as published by the
14 * Free Software Foundation; either version 2 of the License, or (at your
15 * option) any later version.
16 */
17
18#include <linux/rio.h>
19#include <linux/rio_drv.h>
20#include <linux/rio_ids.h>
21#include <linux/delay.h>
22#include "../rio.h"
23
24/* Global (broadcast) route registers */
25#define SPBC_ROUTE_CFG_DESTID 0x10070
26#define SPBC_ROUTE_CFG_PORT 0x10074
27
28/* Per port route registers */
29#define SPP_ROUTE_CFG_DESTID(n) (0x11070 + 0x100*n)
30#define SPP_ROUTE_CFG_PORT(n) (0x11074 + 0x100*n)
31
32#define TSI568_SP_MODE_BC 0x10004
33#define TSI568_SP_MODE_PW_DIS 0x08000000
34
35static int
36tsi568_route_add_entry(struct rio_mport *mport, u16 destid, u8 hopcount,
37 u16 table, u16 route_destid, u8 route_port)
38{
39 if (table == RIO_GLOBAL_TABLE) {
40 rio_mport_write_config_32(mport, destid, hopcount,
41 SPBC_ROUTE_CFG_DESTID, route_destid);
42 rio_mport_write_config_32(mport, destid, hopcount,
43 SPBC_ROUTE_CFG_PORT, route_port);
44 } else {
45 rio_mport_write_config_32(mport, destid, hopcount,
46 SPP_ROUTE_CFG_DESTID(table),
47 route_destid);
48 rio_mport_write_config_32(mport, destid, hopcount,
49 SPP_ROUTE_CFG_PORT(table), route_port);
50 }
51
52 udelay(10);
53
54 return 0;
55}
56
57static int
58tsi568_route_get_entry(struct rio_mport *mport, u16 destid, u8 hopcount,
59 u16 table, u16 route_destid, u8 *route_port)
60{
61 int ret = 0;
62 u32 result;
63
64 if (table == RIO_GLOBAL_TABLE) {
65 rio_mport_write_config_32(mport, destid, hopcount,
66 SPBC_ROUTE_CFG_DESTID, route_destid);
67 rio_mport_read_config_32(mport, destid, hopcount,
68 SPBC_ROUTE_CFG_PORT, &result);
69 } else {
70 rio_mport_write_config_32(mport, destid, hopcount,
71 SPP_ROUTE_CFG_DESTID(table),
72 route_destid);
73 rio_mport_read_config_32(mport, destid, hopcount,
74 SPP_ROUTE_CFG_PORT(table), &result);
75 }
76
77 *route_port = result;
78 if (*route_port > 15)
79 ret = -1;
80
81 return ret;
82}
83
84static int
85tsi568_route_clr_table(struct rio_mport *mport, u16 destid, u8 hopcount,
86 u16 table)
87{
88 u32 route_idx;
89 u32 lut_size;
90
91 lut_size = (mport->sys_size) ? 0x1ff : 0xff;
92
93 if (table == RIO_GLOBAL_TABLE) {
94 rio_mport_write_config_32(mport, destid, hopcount,
95 SPBC_ROUTE_CFG_DESTID, 0x80000000);
96 for (route_idx = 0; route_idx <= lut_size; route_idx++)
97 rio_mport_write_config_32(mport, destid, hopcount,
98 SPBC_ROUTE_CFG_PORT,
99 RIO_INVALID_ROUTE);
100 } else {
101 rio_mport_write_config_32(mport, destid, hopcount,
102 SPP_ROUTE_CFG_DESTID(table),
103 0x80000000);
104 for (route_idx = 0; route_idx <= lut_size; route_idx++)
105 rio_mport_write_config_32(mport, destid, hopcount,
106 SPP_ROUTE_CFG_PORT(table),
107 RIO_INVALID_ROUTE);
108 }
109
110 return 0;
111}
112
113static int
114tsi568_em_init(struct rio_dev *rdev)
115{
116 struct rio_mport *mport = rdev->net->hport;
117 u16 destid = rdev->rswitch->destid;
118 u8 hopcount = rdev->rswitch->hopcount;
119 u32 regval;
120
121 pr_debug("TSI568 %s [%d:%d]\n", __func__, destid, hopcount);
122
123 /* Make sure that Port-Writes are disabled (for all ports) */
124 rio_mport_read_config_32(mport, destid, hopcount,
125 TSI568_SP_MODE_BC, &regval);
126 rio_mport_write_config_32(mport, destid, hopcount,
127 TSI568_SP_MODE_BC, regval | TSI568_SP_MODE_PW_DIS);
128
129 return 0;
130}
131
132static int tsi568_switch_init(struct rio_dev *rdev, int do_enum)
133{
134 pr_debug("RIO: %s for %s\n", __func__, rio_name(rdev));
135 rdev->rswitch->add_entry = tsi568_route_add_entry;
136 rdev->rswitch->get_entry = tsi568_route_get_entry;
137 rdev->rswitch->clr_table = tsi568_route_clr_table;
138 rdev->rswitch->set_domain = NULL;
139 rdev->rswitch->get_domain = NULL;
140 rdev->rswitch->em_init = tsi568_em_init;
141 rdev->rswitch->em_handle = NULL;
142
143 return 0;
144}
145
146DECLARE_RIO_SWITCH_INIT(RIO_VID_TUNDRA, RIO_DID_TSI568, tsi568_switch_init);
diff --git a/drivers/rapidio/switches/tsi57x.c b/drivers/rapidio/switches/tsi57x.c
new file mode 100644
index 000000000000..d34df722d95f
--- /dev/null
+++ b/drivers/rapidio/switches/tsi57x.c
@@ -0,0 +1,315 @@
1/*
2 * RapidIO Tsi57x switch family support
3 *
4 * Copyright 2009-2010 Integrated Device Technology, Inc.
5 * Alexandre Bounine <alexandre.bounine@idt.com>
6 * - Added EM support
7 * - Modified switch operations initialization.
8 *
9 * Copyright 2005 MontaVista Software, Inc.
10 * Matt Porter <mporter@kernel.crashing.org>
11 *
12 * This program is free software; you can redistribute it and/or modify it
13 * under the terms of the GNU General Public License as published by the
14 * Free Software Foundation; either version 2 of the License, or (at your
15 * option) any later version.
16 */
17
18#include <linux/rio.h>
19#include <linux/rio_drv.h>
20#include <linux/rio_ids.h>
21#include <linux/delay.h>
22#include "../rio.h"
23
24/* Global (broadcast) route registers */
25#define SPBC_ROUTE_CFG_DESTID 0x10070
26#define SPBC_ROUTE_CFG_PORT 0x10074
27
28/* Per port route registers */
29#define SPP_ROUTE_CFG_DESTID(n) (0x11070 + 0x100*n)
30#define SPP_ROUTE_CFG_PORT(n) (0x11074 + 0x100*n)
31
32#define TSI578_SP_MODE(n) (0x11004 + n*0x100)
33#define TSI578_SP_MODE_GLBL 0x10004
34#define TSI578_SP_MODE_PW_DIS 0x08000000
35#define TSI578_SP_MODE_LUT_512 0x01000000
36
37#define TSI578_SP_CTL_INDEP(n) (0x13004 + n*0x100)
38#define TSI578_SP_LUT_PEINF(n) (0x13010 + n*0x100)
39#define TSI578_SP_CS_TX(n) (0x13014 + n*0x100)
40#define TSI578_SP_INT_STATUS(n) (0x13018 + n*0x100)
41
42#define TSI578_GLBL_ROUTE_BASE 0x10078
43
44static int
45tsi57x_route_add_entry(struct rio_mport *mport, u16 destid, u8 hopcount,
46 u16 table, u16 route_destid, u8 route_port)
47{
48 if (table == RIO_GLOBAL_TABLE) {
49 rio_mport_write_config_32(mport, destid, hopcount,
50 SPBC_ROUTE_CFG_DESTID, route_destid);
51 rio_mport_write_config_32(mport, destid, hopcount,
52 SPBC_ROUTE_CFG_PORT, route_port);
53 } else {
54 rio_mport_write_config_32(mport, destid, hopcount,
55 SPP_ROUTE_CFG_DESTID(table), route_destid);
56 rio_mport_write_config_32(mport, destid, hopcount,
57 SPP_ROUTE_CFG_PORT(table), route_port);
58 }
59
60 udelay(10);
61
62 return 0;
63}
64
65static int
66tsi57x_route_get_entry(struct rio_mport *mport, u16 destid, u8 hopcount,
67 u16 table, u16 route_destid, u8 *route_port)
68{
69 int ret = 0;
70 u32 result;
71
72 if (table == RIO_GLOBAL_TABLE) {
73 /* Use local RT of the ingress port to avoid possible
74 race condition */
75 rio_mport_read_config_32(mport, destid, hopcount,
76 RIO_SWP_INFO_CAR, &result);
77 table = (result & RIO_SWP_INFO_PORT_NUM_MASK);
78 }
79
80 rio_mport_write_config_32(mport, destid, hopcount,
81 SPP_ROUTE_CFG_DESTID(table), route_destid);
82 rio_mport_read_config_32(mport, destid, hopcount,
83 SPP_ROUTE_CFG_PORT(table), &result);
84
85 *route_port = (u8)result;
86 if (*route_port > 15)
87 ret = -1;
88
89 return ret;
90}
91
92static int
93tsi57x_route_clr_table(struct rio_mport *mport, u16 destid, u8 hopcount,
94 u16 table)
95{
96 u32 route_idx;
97 u32 lut_size;
98
99 lut_size = (mport->sys_size) ? 0x1ff : 0xff;
100
101 if (table == RIO_GLOBAL_TABLE) {
102 rio_mport_write_config_32(mport, destid, hopcount,
103 SPBC_ROUTE_CFG_DESTID, 0x80000000);
104 for (route_idx = 0; route_idx <= lut_size; route_idx++)
105 rio_mport_write_config_32(mport, destid, hopcount,
106 SPBC_ROUTE_CFG_PORT,
107 RIO_INVALID_ROUTE);
108 } else {
109 rio_mport_write_config_32(mport, destid, hopcount,
110 SPP_ROUTE_CFG_DESTID(table), 0x80000000);
111 for (route_idx = 0; route_idx <= lut_size; route_idx++)
112 rio_mport_write_config_32(mport, destid, hopcount,
113 SPP_ROUTE_CFG_PORT(table) , RIO_INVALID_ROUTE);
114 }
115
116 return 0;
117}
118
119static int
120tsi57x_set_domain(struct rio_mport *mport, u16 destid, u8 hopcount,
121 u8 sw_domain)
122{
123 u32 regval;
124
125 /*
126 * Switch domain configuration operates only at global level
127 */
128
129 /* Turn off flat (LUT_512) mode */
130 rio_mport_read_config_32(mport, destid, hopcount,
131 TSI578_SP_MODE_GLBL, &regval);
132 rio_mport_write_config_32(mport, destid, hopcount, TSI578_SP_MODE_GLBL,
133 regval & ~TSI578_SP_MODE_LUT_512);
134 /* Set switch domain base */
135 rio_mport_write_config_32(mport, destid, hopcount,
136 TSI578_GLBL_ROUTE_BASE,
137 (u32)(sw_domain << 24));
138 return 0;
139}
140
141static int
142tsi57x_get_domain(struct rio_mport *mport, u16 destid, u8 hopcount,
143 u8 *sw_domain)
144{
145 u32 regval;
146
147 /*
148 * Switch domain configuration operates only at global level
149 */
150 rio_mport_read_config_32(mport, destid, hopcount,
151 TSI578_GLBL_ROUTE_BASE, &regval);
152
153 *sw_domain = (u8)(regval >> 24);
154
155 return 0;
156}
157
158static int
159tsi57x_em_init(struct rio_dev *rdev)
160{
161 struct rio_mport *mport = rdev->net->hport;
162 u16 destid = rdev->rswitch->destid;
163 u8 hopcount = rdev->rswitch->hopcount;
164 u32 regval;
165 int portnum;
166
167 pr_debug("TSI578 %s [%d:%d]\n", __func__, destid, hopcount);
168
169 for (portnum = 0; portnum < 16; portnum++) {
170 /* Make sure that Port-Writes are enabled (for all ports) */
171 rio_mport_read_config_32(mport, destid, hopcount,
172 TSI578_SP_MODE(portnum), &regval);
173 rio_mport_write_config_32(mport, destid, hopcount,
174 TSI578_SP_MODE(portnum),
175 regval & ~TSI578_SP_MODE_PW_DIS);
176
177 /* Clear all pending interrupts */
178 rio_mport_read_config_32(mport, destid, hopcount,
179 rdev->phys_efptr +
180 RIO_PORT_N_ERR_STS_CSR(portnum),
181 &regval);
182 rio_mport_write_config_32(mport, destid, hopcount,
183 rdev->phys_efptr +
184 RIO_PORT_N_ERR_STS_CSR(portnum),
185 regval & 0x07120214);
186
187 rio_mport_read_config_32(mport, destid, hopcount,
188 TSI578_SP_INT_STATUS(portnum), &regval);
189 rio_mport_write_config_32(mport, destid, hopcount,
190 TSI578_SP_INT_STATUS(portnum),
191 regval & 0x000700bd);
192
193 /* Enable all interrupts to allow ports to send a port-write */
194 rio_mport_read_config_32(mport, destid, hopcount,
195 TSI578_SP_CTL_INDEP(portnum), &regval);
196 rio_mport_write_config_32(mport, destid, hopcount,
197 TSI578_SP_CTL_INDEP(portnum),
198 regval | 0x000b0000);
199
200 /* Skip next (odd) port if the current port is in x4 mode */
201 rio_mport_read_config_32(mport, destid, hopcount,
202 rdev->phys_efptr + RIO_PORT_N_CTL_CSR(portnum),
203 &regval);
204 if ((regval & RIO_PORT_N_CTL_PWIDTH) == RIO_PORT_N_CTL_PWIDTH_4)
205 portnum++;
206 }
207
208 return 0;
209}
210
211static int
212tsi57x_em_handler(struct rio_dev *rdev, u8 portnum)
213{
214 struct rio_mport *mport = rdev->net->hport;
215 u16 destid = rdev->rswitch->destid;
216 u8 hopcount = rdev->rswitch->hopcount;
217 u32 intstat, err_status;
218 int sendcount, checkcount;
219 u8 route_port;
220 u32 regval;
221
222 rio_mport_read_config_32(mport, destid, hopcount,
223 rdev->phys_efptr + RIO_PORT_N_ERR_STS_CSR(portnum),
224 &err_status);
225
226 if ((err_status & RIO_PORT_N_ERR_STS_PORT_OK) &&
227 (err_status & (RIO_PORT_N_ERR_STS_PW_OUT_ES |
228 RIO_PORT_N_ERR_STS_PW_INP_ES))) {
229 /* Remove any queued packets by locking/unlocking port */
230 rio_mport_read_config_32(mport, destid, hopcount,
231 rdev->phys_efptr + RIO_PORT_N_CTL_CSR(portnum),
232 &regval);
233 if (!(regval & RIO_PORT_N_CTL_LOCKOUT)) {
234 rio_mport_write_config_32(mport, destid, hopcount,
235 rdev->phys_efptr + RIO_PORT_N_CTL_CSR(portnum),
236 regval | RIO_PORT_N_CTL_LOCKOUT);
237 udelay(50);
238 rio_mport_write_config_32(mport, destid, hopcount,
239 rdev->phys_efptr + RIO_PORT_N_CTL_CSR(portnum),
240 regval);
241 }
242
243 /* Read from link maintenance response register to clear
244 * valid bit
245 */
246 rio_mport_read_config_32(mport, destid, hopcount,
247 rdev->phys_efptr + RIO_PORT_N_MNT_RSP_CSR(portnum),
248 &regval);
249
250 /* Send a Packet-Not-Accepted/Link-Request-Input-Status control
251 * symbol to recover from IES/OES
252 */
253 sendcount = 3;
254 while (sendcount) {
255 rio_mport_write_config_32(mport, destid, hopcount,
256 TSI578_SP_CS_TX(portnum), 0x40fc8000);
257 checkcount = 3;
258 while (checkcount--) {
259 udelay(50);
260 rio_mport_read_config_32(
261 mport, destid, hopcount,
262 rdev->phys_efptr +
263 RIO_PORT_N_MNT_RSP_CSR(portnum),
264 &regval);
265 if (regval & RIO_PORT_N_MNT_RSP_RVAL)
266 goto exit_es;
267 }
268
269 sendcount--;
270 }
271 }
272
273exit_es:
274 /* Clear implementation specific error status bits */
275 rio_mport_read_config_32(mport, destid, hopcount,
276 TSI578_SP_INT_STATUS(portnum), &intstat);
277 pr_debug("TSI578[%x:%x] SP%d_INT_STATUS=0x%08x\n",
278 destid, hopcount, portnum, intstat);
279
280 if (intstat & 0x10000) {
281 rio_mport_read_config_32(mport, destid, hopcount,
282 TSI578_SP_LUT_PEINF(portnum), &regval);
283 regval = (mport->sys_size) ? (regval >> 16) : (regval >> 24);
284 route_port = rdev->rswitch->route_table[regval];
285 pr_debug("RIO: TSI578[%s] P%d LUT Parity Error (destID=%d)\n",
286 rio_name(rdev), portnum, regval);
287 tsi57x_route_add_entry(mport, destid, hopcount,
288 RIO_GLOBAL_TABLE, regval, route_port);
289 }
290
291 rio_mport_write_config_32(mport, destid, hopcount,
292 TSI578_SP_INT_STATUS(portnum),
293 intstat & 0x000700bd);
294
295 return 0;
296}
297
298static int tsi57x_switch_init(struct rio_dev *rdev, int do_enum)
299{
300 pr_debug("RIO: %s for %s\n", __func__, rio_name(rdev));
301 rdev->rswitch->add_entry = tsi57x_route_add_entry;
302 rdev->rswitch->get_entry = tsi57x_route_get_entry;
303 rdev->rswitch->clr_table = tsi57x_route_clr_table;
304 rdev->rswitch->set_domain = tsi57x_set_domain;
305 rdev->rswitch->get_domain = tsi57x_get_domain;
306 rdev->rswitch->em_init = tsi57x_em_init;
307 rdev->rswitch->em_handle = tsi57x_em_handler;
308
309 return 0;
310}
311
312DECLARE_RIO_SWITCH_INIT(RIO_VID_TUNDRA, RIO_DID_TSI572, tsi57x_switch_init);
313DECLARE_RIO_SWITCH_INIT(RIO_VID_TUNDRA, RIO_DID_TSI574, tsi57x_switch_init);
314DECLARE_RIO_SWITCH_INIT(RIO_VID_TUNDRA, RIO_DID_TSI577, tsi57x_switch_init);
315DECLARE_RIO_SWITCH_INIT(RIO_VID_TUNDRA, RIO_DID_TSI578, tsi57x_switch_init);
diff --git a/drivers/regulator/ab3100.c b/drivers/regulator/ab3100.c
index 1afd008ca957..7b14a67bdca2 100644
--- a/drivers/regulator/ab3100.c
+++ b/drivers/regulator/ab3100.c
@@ -16,7 +16,7 @@
16#include <linux/delay.h> 16#include <linux/delay.h>
17#include <linux/platform_device.h> 17#include <linux/platform_device.h>
18#include <linux/regulator/driver.h> 18#include <linux/regulator/driver.h>
19#include <linux/mfd/ab3100.h> 19#include <linux/mfd/abx500.h>
20 20
21/* LDO registers and some handy masking definitions for AB3100 */ 21/* LDO registers and some handy masking definitions for AB3100 */
22#define AB3100_LDO_A 0x40 22#define AB3100_LDO_A 0x40
@@ -41,7 +41,7 @@
41 * struct ab3100_regulator 41 * struct ab3100_regulator
42 * A struct passed around the individual regulator functions 42 * A struct passed around the individual regulator functions
43 * @platform_device: platform device holding this regulator 43 * @platform_device: platform device holding this regulator
44 * @ab3100: handle to the AB3100 parent chip 44 * @dev: handle to the device
45 * @plfdata: AB3100 platform data passed in at probe time 45 * @plfdata: AB3100 platform data passed in at probe time
46 * @regreg: regulator register number in the AB3100 46 * @regreg: regulator register number in the AB3100
47 * @fixed_voltage: a fixed voltage for this regulator, if this 47 * @fixed_voltage: a fixed voltage for this regulator, if this
@@ -52,7 +52,7 @@
52 */ 52 */
53struct ab3100_regulator { 53struct ab3100_regulator {
54 struct regulator_dev *rdev; 54 struct regulator_dev *rdev;
55 struct ab3100 *ab3100; 55 struct device *dev;
56 struct ab3100_platform_data *plfdata; 56 struct ab3100_platform_data *plfdata;
57 u8 regreg; 57 u8 regreg;
58 int fixed_voltage; 58 int fixed_voltage;
@@ -183,7 +183,7 @@ static int ab3100_enable_regulator(struct regulator_dev *reg)
183 int err; 183 int err;
184 u8 regval; 184 u8 regval;
185 185
186 err = ab3100_get_register_interruptible(abreg->ab3100, abreg->regreg, 186 err = abx500_get_register_interruptible(abreg->dev, 0, abreg->regreg,
187 &regval); 187 &regval);
188 if (err) { 188 if (err) {
189 dev_warn(&reg->dev, "failed to get regid %d value\n", 189 dev_warn(&reg->dev, "failed to get regid %d value\n",
@@ -197,7 +197,7 @@ static int ab3100_enable_regulator(struct regulator_dev *reg)
197 197
198 regval |= AB3100_REG_ON_MASK; 198 regval |= AB3100_REG_ON_MASK;
199 199
200 err = ab3100_set_register_interruptible(abreg->ab3100, abreg->regreg, 200 err = abx500_set_register_interruptible(abreg->dev, 0, abreg->regreg,
201 regval); 201 regval);
202 if (err) { 202 if (err) {
203 dev_warn(&reg->dev, "failed to set regid %d value\n", 203 dev_warn(&reg->dev, "failed to set regid %d value\n",
@@ -245,14 +245,14 @@ static int ab3100_disable_regulator(struct regulator_dev *reg)
245 if (abreg->regreg == AB3100_LDO_D) { 245 if (abreg->regreg == AB3100_LDO_D) {
246 dev_info(&reg->dev, "disabling LDO D - shut down system\n"); 246 dev_info(&reg->dev, "disabling LDO D - shut down system\n");
247 /* Setting LDO D to 0x00 cuts the power to the SoC */ 247 /* Setting LDO D to 0x00 cuts the power to the SoC */
248 return ab3100_set_register_interruptible(abreg->ab3100, 248 return abx500_set_register_interruptible(abreg->dev, 0,
249 AB3100_LDO_D, 0x00U); 249 AB3100_LDO_D, 0x00U);
250 } 250 }
251 251
252 /* 252 /*
253 * All other regulators are handled here 253 * All other regulators are handled here
254 */ 254 */
255 err = ab3100_get_register_interruptible(abreg->ab3100, abreg->regreg, 255 err = abx500_get_register_interruptible(abreg->dev, 0, abreg->regreg,
256 &regval); 256 &regval);
257 if (err) { 257 if (err) {
258 dev_err(&reg->dev, "unable to get register 0x%x\n", 258 dev_err(&reg->dev, "unable to get register 0x%x\n",
@@ -260,7 +260,7 @@ static int ab3100_disable_regulator(struct regulator_dev *reg)
260 return err; 260 return err;
261 } 261 }
262 regval &= ~AB3100_REG_ON_MASK; 262 regval &= ~AB3100_REG_ON_MASK;
263 return ab3100_set_register_interruptible(abreg->ab3100, abreg->regreg, 263 return abx500_set_register_interruptible(abreg->dev, 0, abreg->regreg,
264 regval); 264 regval);
265} 265}
266 266
@@ -270,7 +270,7 @@ static int ab3100_is_enabled_regulator(struct regulator_dev *reg)
270 u8 regval; 270 u8 regval;
271 int err; 271 int err;
272 272
273 err = ab3100_get_register_interruptible(abreg->ab3100, abreg->regreg, 273 err = abx500_get_register_interruptible(abreg->dev, 0, abreg->regreg,
274 &regval); 274 &regval);
275 if (err) { 275 if (err) {
276 dev_err(&reg->dev, "unable to get register 0x%x\n", 276 dev_err(&reg->dev, "unable to get register 0x%x\n",
@@ -305,7 +305,7 @@ static int ab3100_get_voltage_regulator(struct regulator_dev *reg)
305 * For variable types, read out setting and index into 305 * For variable types, read out setting and index into
306 * supplied voltage list. 306 * supplied voltage list.
307 */ 307 */
308 err = ab3100_get_register_interruptible(abreg->ab3100, 308 err = abx500_get_register_interruptible(abreg->dev, 0,
309 abreg->regreg, &regval); 309 abreg->regreg, &regval);
310 if (err) { 310 if (err) {
311 dev_warn(&reg->dev, 311 dev_warn(&reg->dev,
@@ -373,7 +373,7 @@ static int ab3100_set_voltage_regulator(struct regulator_dev *reg,
373 if (bestindex < 0) 373 if (bestindex < 0)
374 return bestindex; 374 return bestindex;
375 375
376 err = ab3100_get_register_interruptible(abreg->ab3100, 376 err = abx500_get_register_interruptible(abreg->dev, 0,
377 abreg->regreg, &regval); 377 abreg->regreg, &regval);
378 if (err) { 378 if (err) {
379 dev_warn(&reg->dev, 379 dev_warn(&reg->dev,
@@ -386,7 +386,7 @@ static int ab3100_set_voltage_regulator(struct regulator_dev *reg,
386 regval &= ~0xE0; 386 regval &= ~0xE0;
387 regval |= (bestindex << 5); 387 regval |= (bestindex << 5);
388 388
389 err = ab3100_set_register_interruptible(abreg->ab3100, 389 err = abx500_set_register_interruptible(abreg->dev, 0,
390 abreg->regreg, regval); 390 abreg->regreg, regval);
391 if (err) 391 if (err)
392 dev_warn(&reg->dev, "failed to set regulator register %02x\n", 392 dev_warn(&reg->dev, "failed to set regulator register %02x\n",
@@ -414,7 +414,7 @@ static int ab3100_set_suspend_voltage_regulator(struct regulator_dev *reg,
414 /* LDO E and BUCK have special suspend voltages you can set */ 414 /* LDO E and BUCK have special suspend voltages you can set */
415 bestindex = ab3100_get_best_voltage_index(reg, uV, uV); 415 bestindex = ab3100_get_best_voltage_index(reg, uV, uV);
416 416
417 err = ab3100_get_register_interruptible(abreg->ab3100, 417 err = abx500_get_register_interruptible(abreg->dev, 0,
418 targetreg, &regval); 418 targetreg, &regval);
419 if (err) { 419 if (err) {
420 dev_warn(&reg->dev, 420 dev_warn(&reg->dev,
@@ -427,7 +427,7 @@ static int ab3100_set_suspend_voltage_regulator(struct regulator_dev *reg,
427 regval &= ~0xE0; 427 regval &= ~0xE0;
428 regval |= (bestindex << 5); 428 regval |= (bestindex << 5);
429 429
430 err = ab3100_set_register_interruptible(abreg->ab3100, 430 err = abx500_set_register_interruptible(abreg->dev, 0,
431 targetreg, regval); 431 targetreg, regval);
432 if (err) 432 if (err)
433 dev_warn(&reg->dev, "failed to set regulator register %02x\n", 433 dev_warn(&reg->dev, "failed to set regulator register %02x\n",
@@ -574,13 +574,12 @@ ab3100_regulator_desc[AB3100_NUM_REGULATORS] = {
574static int __devinit ab3100_regulators_probe(struct platform_device *pdev) 574static int __devinit ab3100_regulators_probe(struct platform_device *pdev)
575{ 575{
576 struct ab3100_platform_data *plfdata = pdev->dev.platform_data; 576 struct ab3100_platform_data *plfdata = pdev->dev.platform_data;
577 struct ab3100 *ab3100 = platform_get_drvdata(pdev);
578 int err = 0; 577 int err = 0;
579 u8 data; 578 u8 data;
580 int i; 579 int i;
581 580
582 /* Check chip state */ 581 /* Check chip state */
583 err = ab3100_get_register_interruptible(ab3100, 582 err = abx500_get_register_interruptible(&pdev->dev, 0,
584 AB3100_LDO_D, &data); 583 AB3100_LDO_D, &data);
585 if (err) { 584 if (err) {
586 dev_err(&pdev->dev, "could not read initial status of LDO_D\n"); 585 dev_err(&pdev->dev, "could not read initial status of LDO_D\n");
@@ -595,7 +594,7 @@ static int __devinit ab3100_regulators_probe(struct platform_device *pdev)
595 594
596 /* Set up regulators */ 595 /* Set up regulators */
597 for (i = 0; i < ARRAY_SIZE(ab3100_reg_init_order); i++) { 596 for (i = 0; i < ARRAY_SIZE(ab3100_reg_init_order); i++) {
598 err = ab3100_set_register_interruptible(ab3100, 597 err = abx500_set_register_interruptible(&pdev->dev, 0,
599 ab3100_reg_init_order[i], 598 ab3100_reg_init_order[i],
600 plfdata->reg_initvals[i]); 599 plfdata->reg_initvals[i]);
601 if (err) { 600 if (err) {
@@ -617,7 +616,7 @@ static int __devinit ab3100_regulators_probe(struct platform_device *pdev)
617 * see what it looks like for a certain machine, go 616 * see what it looks like for a certain machine, go
618 * into the machine I2C setup. 617 * into the machine I2C setup.
619 */ 618 */
620 reg->ab3100 = ab3100; 619 reg->dev = &pdev->dev;
621 reg->plfdata = plfdata; 620 reg->plfdata = plfdata;
622 621
623 /* 622 /*
diff --git a/drivers/regulator/tps6507x-regulator.c b/drivers/regulator/tps6507x-regulator.c
index 74841abcc9cc..14b4576281c5 100644
--- a/drivers/regulator/tps6507x-regulator.c
+++ b/drivers/regulator/tps6507x-regulator.c
@@ -22,68 +22,9 @@
22#include <linux/platform_device.h> 22#include <linux/platform_device.h>
23#include <linux/regulator/driver.h> 23#include <linux/regulator/driver.h>
24#include <linux/regulator/machine.h> 24#include <linux/regulator/machine.h>
25#include <linux/i2c.h>
26#include <linux/delay.h> 25#include <linux/delay.h>
27#include <linux/slab.h> 26#include <linux/slab.h>
28 27#include <linux/mfd/tps6507x.h>
29/* Register definitions */
30#define TPS6507X_REG_PPATH1 0X01
31#define TPS6507X_REG_INT 0X02
32#define TPS6507X_REG_CHGCONFIG0 0X03
33#define TPS6507X_REG_CHGCONFIG1 0X04
34#define TPS6507X_REG_CHGCONFIG2 0X05
35#define TPS6507X_REG_CHGCONFIG3 0X06
36#define TPS6507X_REG_REG_ADCONFIG 0X07
37#define TPS6507X_REG_TSCMODE 0X08
38#define TPS6507X_REG_ADRESULT_1 0X09
39#define TPS6507X_REG_ADRESULT_2 0X0A
40#define TPS6507X_REG_PGOOD 0X0B
41#define TPS6507X_REG_PGOODMASK 0X0C
42#define TPS6507X_REG_CON_CTRL1 0X0D
43#define TPS6507X_REG_CON_CTRL2 0X0E
44#define TPS6507X_REG_CON_CTRL3 0X0F
45#define TPS6507X_REG_DEFDCDC1 0X10
46#define TPS6507X_REG_DEFDCDC2_LOW 0X11
47#define TPS6507X_REG_DEFDCDC2_HIGH 0X12
48#define TPS6507X_REG_DEFDCDC3_LOW 0X13
49#define TPS6507X_REG_DEFDCDC3_HIGH 0X14
50#define TPS6507X_REG_DEFSLEW 0X15
51#define TPS6507X_REG_LDO_CTRL1 0X16
52#define TPS6507X_REG_DEFLDO2 0X17
53#define TPS6507X_REG_WLED_CTRL1 0X18
54#define TPS6507X_REG_WLED_CTRL2 0X19
55
56/* CON_CTRL1 bitfields */
57#define TPS6507X_CON_CTRL1_DCDC1_ENABLE BIT(4)
58#define TPS6507X_CON_CTRL1_DCDC2_ENABLE BIT(3)
59#define TPS6507X_CON_CTRL1_DCDC3_ENABLE BIT(2)
60#define TPS6507X_CON_CTRL1_LDO1_ENABLE BIT(1)
61#define TPS6507X_CON_CTRL1_LDO2_ENABLE BIT(0)
62
63/* DEFDCDC1 bitfields */
64#define TPS6507X_DEFDCDC1_DCDC1_EXT_ADJ_EN BIT(7)
65#define TPS6507X_DEFDCDC1_DCDC1_MASK 0X3F
66
67/* DEFDCDC2_LOW bitfields */
68#define TPS6507X_DEFDCDC2_LOW_DCDC2_MASK 0X3F
69
70/* DEFDCDC2_HIGH bitfields */
71#define TPS6507X_DEFDCDC2_HIGH_DCDC2_MASK 0X3F
72
73/* DEFDCDC3_LOW bitfields */
74#define TPS6507X_DEFDCDC3_LOW_DCDC3_MASK 0X3F
75
76/* DEFDCDC3_HIGH bitfields */
77#define TPS6507X_DEFDCDC3_HIGH_DCDC3_MASK 0X3F
78
79/* TPS6507X_REG_LDO_CTRL1 bitfields */
80#define TPS6507X_REG_LDO_CTRL1_LDO1_MASK 0X0F
81
82/* TPS6507X_REG_DEFLDO2 bitfields */
83#define TPS6507X_REG_DEFLDO2_LDO2_MASK 0X3F
84
85/* VDCDC MASK */
86#define TPS6507X_DEFDCDCX_DCDC_MASK 0X3F
87 28
88/* DCDC's */ 29/* DCDC's */
89#define TPS6507X_DCDC_1 0 30#define TPS6507X_DCDC_1 0
@@ -162,101 +103,146 @@ struct tps_info {
162 const u16 *table; 103 const u16 *table;
163}; 104};
164 105
165struct tps_pmic { 106static const struct tps_info tps6507x_pmic_regs[] = {
107 {
108 .name = "VDCDC1",
109 .min_uV = 725000,
110 .max_uV = 3300000,
111 .table_len = ARRAY_SIZE(VDCDCx_VSEL_table),
112 .table = VDCDCx_VSEL_table,
113 },
114 {
115 .name = "VDCDC2",
116 .min_uV = 725000,
117 .max_uV = 3300000,
118 .table_len = ARRAY_SIZE(VDCDCx_VSEL_table),
119 .table = VDCDCx_VSEL_table,
120 },
121 {
122 .name = "VDCDC3",
123 .min_uV = 725000,
124 .max_uV = 3300000,
125 .table_len = ARRAY_SIZE(VDCDCx_VSEL_table),
126 .table = VDCDCx_VSEL_table,
127 },
128 {
129 .name = "LDO1",
130 .min_uV = 1000000,
131 .max_uV = 3300000,
132 .table_len = ARRAY_SIZE(LDO1_VSEL_table),
133 .table = LDO1_VSEL_table,
134 },
135 {
136 .name = "LDO2",
137 .min_uV = 725000,
138 .max_uV = 3300000,
139 .table_len = ARRAY_SIZE(LDO2_VSEL_table),
140 .table = LDO2_VSEL_table,
141 },
142};
143
144struct tps6507x_pmic {
166 struct regulator_desc desc[TPS6507X_NUM_REGULATOR]; 145 struct regulator_desc desc[TPS6507X_NUM_REGULATOR];
167 struct i2c_client *client; 146 struct tps6507x_dev *mfd;
168 struct regulator_dev *rdev[TPS6507X_NUM_REGULATOR]; 147 struct regulator_dev *rdev[TPS6507X_NUM_REGULATOR];
169 const struct tps_info *info[TPS6507X_NUM_REGULATOR]; 148 const struct tps_info *info[TPS6507X_NUM_REGULATOR];
170 struct mutex io_lock; 149 struct mutex io_lock;
171}; 150};
172 151static inline int tps6507x_pmic_read(struct tps6507x_pmic *tps, u8 reg)
173static inline int tps_6507x_read(struct tps_pmic *tps, u8 reg)
174{ 152{
175 return i2c_smbus_read_byte_data(tps->client, reg); 153 u8 val;
154 int err;
155
156 err = tps->mfd->read_dev(tps->mfd, reg, 1, &val);
157
158 if (err)
159 return err;
160
161 return val;
176} 162}
177 163
178static inline int tps_6507x_write(struct tps_pmic *tps, u8 reg, u8 val) 164static inline int tps6507x_pmic_write(struct tps6507x_pmic *tps, u8 reg, u8 val)
179{ 165{
180 return i2c_smbus_write_byte_data(tps->client, reg, val); 166 return tps->mfd->write_dev(tps->mfd, reg, 1, &val);
181} 167}
182 168
183static int tps_6507x_set_bits(struct tps_pmic *tps, u8 reg, u8 mask) 169static int tps6507x_pmic_set_bits(struct tps6507x_pmic *tps, u8 reg, u8 mask)
184{ 170{
185 int err, data; 171 int err, data;
186 172
187 mutex_lock(&tps->io_lock); 173 mutex_lock(&tps->io_lock);
188 174
189 data = tps_6507x_read(tps, reg); 175 data = tps6507x_pmic_read(tps, reg);
190 if (data < 0) { 176 if (data < 0) {
191 dev_err(&tps->client->dev, "Read from reg 0x%x failed\n", reg); 177 dev_err(tps->mfd->dev, "Read from reg 0x%x failed\n", reg);
192 err = data; 178 err = data;
193 goto out; 179 goto out;
194 } 180 }
195 181
196 data |= mask; 182 data |= mask;
197 err = tps_6507x_write(tps, reg, data); 183 err = tps6507x_pmic_write(tps, reg, data);
198 if (err) 184 if (err)
199 dev_err(&tps->client->dev, "Write for reg 0x%x failed\n", reg); 185 dev_err(tps->mfd->dev, "Write for reg 0x%x failed\n", reg);
200 186
201out: 187out:
202 mutex_unlock(&tps->io_lock); 188 mutex_unlock(&tps->io_lock);
203 return err; 189 return err;
204} 190}
205 191
206static int tps_6507x_clear_bits(struct tps_pmic *tps, u8 reg, u8 mask) 192static int tps6507x_pmic_clear_bits(struct tps6507x_pmic *tps, u8 reg, u8 mask)
207{ 193{
208 int err, data; 194 int err, data;
209 195
210 mutex_lock(&tps->io_lock); 196 mutex_lock(&tps->io_lock);
211 197
212 data = tps_6507x_read(tps, reg); 198 data = tps6507x_pmic_read(tps, reg);
213 if (data < 0) { 199 if (data < 0) {
214 dev_err(&tps->client->dev, "Read from reg 0x%x failed\n", reg); 200 dev_err(tps->mfd->dev, "Read from reg 0x%x failed\n", reg);
215 err = data; 201 err = data;
216 goto out; 202 goto out;
217 } 203 }
218 204
219 data &= ~mask; 205 data &= ~mask;
220 err = tps_6507x_write(tps, reg, data); 206 err = tps6507x_pmic_write(tps, reg, data);
221 if (err) 207 if (err)
222 dev_err(&tps->client->dev, "Write for reg 0x%x failed\n", reg); 208 dev_err(tps->mfd->dev, "Write for reg 0x%x failed\n", reg);
223 209
224out: 210out:
225 mutex_unlock(&tps->io_lock); 211 mutex_unlock(&tps->io_lock);
226 return err; 212 return err;
227} 213}
228 214
229static int tps_6507x_reg_read(struct tps_pmic *tps, u8 reg) 215static int tps6507x_pmic_reg_read(struct tps6507x_pmic *tps, u8 reg)
230{ 216{
231 int data; 217 int data;
232 218
233 mutex_lock(&tps->io_lock); 219 mutex_lock(&tps->io_lock);
234 220
235 data = tps_6507x_read(tps, reg); 221 data = tps6507x_pmic_read(tps, reg);
236 if (data < 0) 222 if (data < 0)
237 dev_err(&tps->client->dev, "Read from reg 0x%x failed\n", reg); 223 dev_err(tps->mfd->dev, "Read from reg 0x%x failed\n", reg);
238 224
239 mutex_unlock(&tps->io_lock); 225 mutex_unlock(&tps->io_lock);
240 return data; 226 return data;
241} 227}
242 228
243static int tps_6507x_reg_write(struct tps_pmic *tps, u8 reg, u8 val) 229static int tps6507x_pmic_reg_write(struct tps6507x_pmic *tps, u8 reg, u8 val)
244{ 230{
245 int err; 231 int err;
246 232
247 mutex_lock(&tps->io_lock); 233 mutex_lock(&tps->io_lock);
248 234
249 err = tps_6507x_write(tps, reg, val); 235 err = tps6507x_pmic_write(tps, reg, val);
250 if (err < 0) 236 if (err < 0)
251 dev_err(&tps->client->dev, "Write for reg 0x%x failed\n", reg); 237 dev_err(tps->mfd->dev, "Write for reg 0x%x failed\n", reg);
252 238
253 mutex_unlock(&tps->io_lock); 239 mutex_unlock(&tps->io_lock);
254 return err; 240 return err;
255} 241}
256 242
257static int tps6507x_dcdc_is_enabled(struct regulator_dev *dev) 243static int tps6507x_pmic_dcdc_is_enabled(struct regulator_dev *dev)
258{ 244{
259 struct tps_pmic *tps = rdev_get_drvdata(dev); 245 struct tps6507x_pmic *tps = rdev_get_drvdata(dev);
260 int data, dcdc = rdev_get_id(dev); 246 int data, dcdc = rdev_get_id(dev);
261 u8 shift; 247 u8 shift;
262 248
@@ -264,7 +250,7 @@ static int tps6507x_dcdc_is_enabled(struct regulator_dev *dev)
264 return -EINVAL; 250 return -EINVAL;
265 251
266 shift = TPS6507X_MAX_REG_ID - dcdc; 252 shift = TPS6507X_MAX_REG_ID - dcdc;
267 data = tps_6507x_reg_read(tps, TPS6507X_REG_CON_CTRL1); 253 data = tps6507x_pmic_reg_read(tps, TPS6507X_REG_CON_CTRL1);
268 254
269 if (data < 0) 255 if (data < 0)
270 return data; 256 return data;
@@ -272,9 +258,9 @@ static int tps6507x_dcdc_is_enabled(struct regulator_dev *dev)
272 return (data & 1<<shift) ? 1 : 0; 258 return (data & 1<<shift) ? 1 : 0;
273} 259}
274 260
275static int tps6507x_ldo_is_enabled(struct regulator_dev *dev) 261static int tps6507x_pmic_ldo_is_enabled(struct regulator_dev *dev)
276{ 262{
277 struct tps_pmic *tps = rdev_get_drvdata(dev); 263 struct tps6507x_pmic *tps = rdev_get_drvdata(dev);
278 int data, ldo = rdev_get_id(dev); 264 int data, ldo = rdev_get_id(dev);
279 u8 shift; 265 u8 shift;
280 266
@@ -282,7 +268,7 @@ static int tps6507x_ldo_is_enabled(struct regulator_dev *dev)
282 return -EINVAL; 268 return -EINVAL;
283 269
284 shift = TPS6507X_MAX_REG_ID - ldo; 270 shift = TPS6507X_MAX_REG_ID - ldo;
285 data = tps_6507x_reg_read(tps, TPS6507X_REG_CON_CTRL1); 271 data = tps6507x_pmic_reg_read(tps, TPS6507X_REG_CON_CTRL1);
286 272
287 if (data < 0) 273 if (data < 0)
288 return data; 274 return data;
@@ -290,9 +276,9 @@ static int tps6507x_ldo_is_enabled(struct regulator_dev *dev)
290 return (data & 1<<shift) ? 1 : 0; 276 return (data & 1<<shift) ? 1 : 0;
291} 277}
292 278
293static int tps6507x_dcdc_enable(struct regulator_dev *dev) 279static int tps6507x_pmic_dcdc_enable(struct regulator_dev *dev)
294{ 280{
295 struct tps_pmic *tps = rdev_get_drvdata(dev); 281 struct tps6507x_pmic *tps = rdev_get_drvdata(dev);
296 int dcdc = rdev_get_id(dev); 282 int dcdc = rdev_get_id(dev);
297 u8 shift; 283 u8 shift;
298 284
@@ -300,12 +286,12 @@ static int tps6507x_dcdc_enable(struct regulator_dev *dev)
300 return -EINVAL; 286 return -EINVAL;
301 287
302 shift = TPS6507X_MAX_REG_ID - dcdc; 288 shift = TPS6507X_MAX_REG_ID - dcdc;
303 return tps_6507x_set_bits(tps, TPS6507X_REG_CON_CTRL1, 1 << shift); 289 return tps6507x_pmic_set_bits(tps, TPS6507X_REG_CON_CTRL1, 1 << shift);
304} 290}
305 291
306static int tps6507x_dcdc_disable(struct regulator_dev *dev) 292static int tps6507x_pmic_dcdc_disable(struct regulator_dev *dev)
307{ 293{
308 struct tps_pmic *tps = rdev_get_drvdata(dev); 294 struct tps6507x_pmic *tps = rdev_get_drvdata(dev);
309 int dcdc = rdev_get_id(dev); 295 int dcdc = rdev_get_id(dev);
310 u8 shift; 296 u8 shift;
311 297
@@ -313,12 +299,13 @@ static int tps6507x_dcdc_disable(struct regulator_dev *dev)
313 return -EINVAL; 299 return -EINVAL;
314 300
315 shift = TPS6507X_MAX_REG_ID - dcdc; 301 shift = TPS6507X_MAX_REG_ID - dcdc;
316 return tps_6507x_clear_bits(tps, TPS6507X_REG_CON_CTRL1, 1 << shift); 302 return tps6507x_pmic_clear_bits(tps, TPS6507X_REG_CON_CTRL1,
303 1 << shift);
317} 304}
318 305
319static int tps6507x_ldo_enable(struct regulator_dev *dev) 306static int tps6507x_pmic_ldo_enable(struct regulator_dev *dev)
320{ 307{
321 struct tps_pmic *tps = rdev_get_drvdata(dev); 308 struct tps6507x_pmic *tps = rdev_get_drvdata(dev);
322 int ldo = rdev_get_id(dev); 309 int ldo = rdev_get_id(dev);
323 u8 shift; 310 u8 shift;
324 311
@@ -326,12 +313,12 @@ static int tps6507x_ldo_enable(struct regulator_dev *dev)
326 return -EINVAL; 313 return -EINVAL;
327 314
328 shift = TPS6507X_MAX_REG_ID - ldo; 315 shift = TPS6507X_MAX_REG_ID - ldo;
329 return tps_6507x_set_bits(tps, TPS6507X_REG_CON_CTRL1, 1 << shift); 316 return tps6507x_pmic_set_bits(tps, TPS6507X_REG_CON_CTRL1, 1 << shift);
330} 317}
331 318
332static int tps6507x_ldo_disable(struct regulator_dev *dev) 319static int tps6507x_pmic_ldo_disable(struct regulator_dev *dev)
333{ 320{
334 struct tps_pmic *tps = rdev_get_drvdata(dev); 321 struct tps6507x_pmic *tps = rdev_get_drvdata(dev);
335 int ldo = rdev_get_id(dev); 322 int ldo = rdev_get_id(dev);
336 u8 shift; 323 u8 shift;
337 324
@@ -339,12 +326,13 @@ static int tps6507x_ldo_disable(struct regulator_dev *dev)
339 return -EINVAL; 326 return -EINVAL;
340 327
341 shift = TPS6507X_MAX_REG_ID - ldo; 328 shift = TPS6507X_MAX_REG_ID - ldo;
342 return tps_6507x_clear_bits(tps, TPS6507X_REG_CON_CTRL1, 1 << shift); 329 return tps6507x_pmic_clear_bits(tps, TPS6507X_REG_CON_CTRL1,
330 1 << shift);
343} 331}
344 332
345static int tps6507x_dcdc_get_voltage(struct regulator_dev *dev) 333static int tps6507x_pmic_dcdc_get_voltage(struct regulator_dev *dev)
346{ 334{
347 struct tps_pmic *tps = rdev_get_drvdata(dev); 335 struct tps6507x_pmic *tps = rdev_get_drvdata(dev);
348 int data, dcdc = rdev_get_id(dev); 336 int data, dcdc = rdev_get_id(dev);
349 u8 reg; 337 u8 reg;
350 338
@@ -362,7 +350,7 @@ static int tps6507x_dcdc_get_voltage(struct regulator_dev *dev)
362 return -EINVAL; 350 return -EINVAL;
363 } 351 }
364 352
365 data = tps_6507x_reg_read(tps, reg); 353 data = tps6507x_pmic_reg_read(tps, reg);
366 if (data < 0) 354 if (data < 0)
367 return data; 355 return data;
368 356
@@ -370,10 +358,10 @@ static int tps6507x_dcdc_get_voltage(struct regulator_dev *dev)
370 return tps->info[dcdc]->table[data] * 1000; 358 return tps->info[dcdc]->table[data] * 1000;
371} 359}
372 360
373static int tps6507x_dcdc_set_voltage(struct regulator_dev *dev, 361static int tps6507x_pmic_dcdc_set_voltage(struct regulator_dev *dev,
374 int min_uV, int max_uV) 362 int min_uV, int max_uV)
375{ 363{
376 struct tps_pmic *tps = rdev_get_drvdata(dev); 364 struct tps6507x_pmic *tps = rdev_get_drvdata(dev);
377 int data, vsel, dcdc = rdev_get_id(dev); 365 int data, vsel, dcdc = rdev_get_id(dev);
378 u8 reg; 366 u8 reg;
379 367
@@ -411,19 +399,19 @@ static int tps6507x_dcdc_set_voltage(struct regulator_dev *dev,
411 if (vsel == tps->info[dcdc]->table_len) 399 if (vsel == tps->info[dcdc]->table_len)
412 return -EINVAL; 400 return -EINVAL;
413 401
414 data = tps_6507x_reg_read(tps, reg); 402 data = tps6507x_pmic_reg_read(tps, reg);
415 if (data < 0) 403 if (data < 0)
416 return data; 404 return data;
417 405
418 data &= ~TPS6507X_DEFDCDCX_DCDC_MASK; 406 data &= ~TPS6507X_DEFDCDCX_DCDC_MASK;
419 data |= vsel; 407 data |= vsel;
420 408
421 return tps_6507x_reg_write(tps, reg, data); 409 return tps6507x_pmic_reg_write(tps, reg, data);
422} 410}
423 411
424static int tps6507x_ldo_get_voltage(struct regulator_dev *dev) 412static int tps6507x_pmic_ldo_get_voltage(struct regulator_dev *dev)
425{ 413{
426 struct tps_pmic *tps = rdev_get_drvdata(dev); 414 struct tps6507x_pmic *tps = rdev_get_drvdata(dev);
427 int data, ldo = rdev_get_id(dev); 415 int data, ldo = rdev_get_id(dev);
428 u8 reg, mask; 416 u8 reg, mask;
429 417
@@ -437,7 +425,7 @@ static int tps6507x_ldo_get_voltage(struct regulator_dev *dev)
437 TPS6507X_REG_DEFLDO2_LDO2_MASK); 425 TPS6507X_REG_DEFLDO2_LDO2_MASK);
438 } 426 }
439 427
440 data = tps_6507x_reg_read(tps, reg); 428 data = tps6507x_pmic_reg_read(tps, reg);
441 if (data < 0) 429 if (data < 0)
442 return data; 430 return data;
443 431
@@ -445,10 +433,10 @@ static int tps6507x_ldo_get_voltage(struct regulator_dev *dev)
445 return tps->info[ldo]->table[data] * 1000; 433 return tps->info[ldo]->table[data] * 1000;
446} 434}
447 435
448static int tps6507x_ldo_set_voltage(struct regulator_dev *dev, 436static int tps6507x_pmic_ldo_set_voltage(struct regulator_dev *dev,
449 int min_uV, int max_uV) 437 int min_uV, int max_uV)
450{ 438{
451 struct tps_pmic *tps = rdev_get_drvdata(dev); 439 struct tps6507x_pmic *tps = rdev_get_drvdata(dev);
452 int data, vsel, ldo = rdev_get_id(dev); 440 int data, vsel, ldo = rdev_get_id(dev);
453 u8 reg, mask; 441 u8 reg, mask;
454 442
@@ -479,20 +467,20 @@ static int tps6507x_ldo_set_voltage(struct regulator_dev *dev,
479 if (vsel == tps->info[ldo]->table_len) 467 if (vsel == tps->info[ldo]->table_len)
480 return -EINVAL; 468 return -EINVAL;
481 469
482 data = tps_6507x_reg_read(tps, reg); 470 data = tps6507x_pmic_reg_read(tps, reg);
483 if (data < 0) 471 if (data < 0)
484 return data; 472 return data;
485 473
486 data &= ~mask; 474 data &= ~mask;
487 data |= vsel; 475 data |= vsel;
488 476
489 return tps_6507x_reg_write(tps, reg, data); 477 return tps6507x_pmic_reg_write(tps, reg, data);
490} 478}
491 479
492static int tps6507x_dcdc_list_voltage(struct regulator_dev *dev, 480static int tps6507x_pmic_dcdc_list_voltage(struct regulator_dev *dev,
493 unsigned selector) 481 unsigned selector)
494{ 482{
495 struct tps_pmic *tps = rdev_get_drvdata(dev); 483 struct tps6507x_pmic *tps = rdev_get_drvdata(dev);
496 int dcdc = rdev_get_id(dev); 484 int dcdc = rdev_get_id(dev);
497 485
498 if (dcdc < TPS6507X_DCDC_1 || dcdc > TPS6507X_DCDC_3) 486 if (dcdc < TPS6507X_DCDC_1 || dcdc > TPS6507X_DCDC_3)
@@ -504,10 +492,10 @@ static int tps6507x_dcdc_list_voltage(struct regulator_dev *dev,
504 return tps->info[dcdc]->table[selector] * 1000; 492 return tps->info[dcdc]->table[selector] * 1000;
505} 493}
506 494
507static int tps6507x_ldo_list_voltage(struct regulator_dev *dev, 495static int tps6507x_pmic_ldo_list_voltage(struct regulator_dev *dev,
508 unsigned selector) 496 unsigned selector)
509{ 497{
510 struct tps_pmic *tps = rdev_get_drvdata(dev); 498 struct tps6507x_pmic *tps = rdev_get_drvdata(dev);
511 int ldo = rdev_get_id(dev); 499 int ldo = rdev_get_id(dev);
512 500
513 if (ldo < TPS6507X_LDO_1 || ldo > TPS6507X_LDO_2) 501 if (ldo < TPS6507X_LDO_1 || ldo > TPS6507X_LDO_2)
@@ -520,47 +508,54 @@ static int tps6507x_ldo_list_voltage(struct regulator_dev *dev,
520} 508}
521 509
522/* Operations permitted on VDCDCx */ 510/* Operations permitted on VDCDCx */
523static struct regulator_ops tps6507x_dcdc_ops = { 511static struct regulator_ops tps6507x_pmic_dcdc_ops = {
524 .is_enabled = tps6507x_dcdc_is_enabled, 512 .is_enabled = tps6507x_pmic_dcdc_is_enabled,
525 .enable = tps6507x_dcdc_enable, 513 .enable = tps6507x_pmic_dcdc_enable,
526 .disable = tps6507x_dcdc_disable, 514 .disable = tps6507x_pmic_dcdc_disable,
527 .get_voltage = tps6507x_dcdc_get_voltage, 515 .get_voltage = tps6507x_pmic_dcdc_get_voltage,
528 .set_voltage = tps6507x_dcdc_set_voltage, 516 .set_voltage = tps6507x_pmic_dcdc_set_voltage,
529 .list_voltage = tps6507x_dcdc_list_voltage, 517 .list_voltage = tps6507x_pmic_dcdc_list_voltage,
530}; 518};
531 519
532/* Operations permitted on LDOx */ 520/* Operations permitted on LDOx */
533static struct regulator_ops tps6507x_ldo_ops = { 521static struct regulator_ops tps6507x_pmic_ldo_ops = {
534 .is_enabled = tps6507x_ldo_is_enabled, 522 .is_enabled = tps6507x_pmic_ldo_is_enabled,
535 .enable = tps6507x_ldo_enable, 523 .enable = tps6507x_pmic_ldo_enable,
536 .disable = tps6507x_ldo_disable, 524 .disable = tps6507x_pmic_ldo_disable,
537 .get_voltage = tps6507x_ldo_get_voltage, 525 .get_voltage = tps6507x_pmic_ldo_get_voltage,
538 .set_voltage = tps6507x_ldo_set_voltage, 526 .set_voltage = tps6507x_pmic_ldo_set_voltage,
539 .list_voltage = tps6507x_ldo_list_voltage, 527 .list_voltage = tps6507x_pmic_ldo_list_voltage,
540}; 528};
541 529
542static int __devinit tps_6507x_probe(struct i2c_client *client, 530static __devinit
543 const struct i2c_device_id *id) 531int tps6507x_pmic_probe(struct platform_device *pdev)
544{ 532{
533 struct tps6507x_dev *tps6507x_dev = dev_get_drvdata(pdev->dev.parent);
545 static int desc_id; 534 static int desc_id;
546 const struct tps_info *info = (void *)id->driver_data; 535 const struct tps_info *info = &tps6507x_pmic_regs[0];
547 struct regulator_init_data *init_data; 536 struct regulator_init_data *init_data;
548 struct regulator_dev *rdev; 537 struct regulator_dev *rdev;
549 struct tps_pmic *tps; 538 struct tps6507x_pmic *tps;
539 struct tps6507x_board *tps_board;
550 int i; 540 int i;
551 int error; 541 int error;
552 542
553 if (!i2c_check_functionality(client->adapter, 543 /**
554 I2C_FUNC_SMBUS_BYTE_DATA)) 544 * tps_board points to pmic related constants
555 return -EIO; 545 * coming from the board-evm file.
546 */
547
548 tps_board = dev_get_platdata(tps6507x_dev->dev);
549 if (!tps_board)
550 return -EINVAL;
556 551
557 /** 552 /**
558 * init_data points to array of regulator_init structures 553 * init_data points to array of regulator_init structures
559 * coming from the board-evm file. 554 * coming from the board-evm file.
560 */ 555 */
561 init_data = client->dev.platform_data; 556 init_data = tps_board->tps6507x_pmic_init_data;
562 if (!init_data) 557 if (!init_data)
563 return -EIO; 558 return -EINVAL;
564 559
565 tps = kzalloc(sizeof(*tps), GFP_KERNEL); 560 tps = kzalloc(sizeof(*tps), GFP_KERNEL);
566 if (!tps) 561 if (!tps)
@@ -569,7 +564,7 @@ static int __devinit tps_6507x_probe(struct i2c_client *client,
569 mutex_init(&tps->io_lock); 564 mutex_init(&tps->io_lock);
570 565
571 /* common for all regulators */ 566 /* common for all regulators */
572 tps->client = client; 567 tps->mfd = tps6507x_dev;
573 568
574 for (i = 0; i < TPS6507X_NUM_REGULATOR; i++, info++, init_data++) { 569 for (i = 0; i < TPS6507X_NUM_REGULATOR; i++, info++, init_data++) {
575 /* Register the regulators */ 570 /* Register the regulators */
@@ -578,15 +573,16 @@ static int __devinit tps_6507x_probe(struct i2c_client *client,
578 tps->desc[i].id = desc_id++; 573 tps->desc[i].id = desc_id++;
579 tps->desc[i].n_voltages = num_voltages[i]; 574 tps->desc[i].n_voltages = num_voltages[i];
580 tps->desc[i].ops = (i > TPS6507X_DCDC_3 ? 575 tps->desc[i].ops = (i > TPS6507X_DCDC_3 ?
581 &tps6507x_ldo_ops : &tps6507x_dcdc_ops); 576 &tps6507x_pmic_ldo_ops : &tps6507x_pmic_dcdc_ops);
582 tps->desc[i].type = REGULATOR_VOLTAGE; 577 tps->desc[i].type = REGULATOR_VOLTAGE;
583 tps->desc[i].owner = THIS_MODULE; 578 tps->desc[i].owner = THIS_MODULE;
584 579
585 rdev = regulator_register(&tps->desc[i], 580 rdev = regulator_register(&tps->desc[i],
586 &client->dev, init_data, tps); 581 tps6507x_dev->dev, init_data, tps);
587 if (IS_ERR(rdev)) { 582 if (IS_ERR(rdev)) {
588 dev_err(&client->dev, "failed to register %s\n", 583 dev_err(tps6507x_dev->dev,
589 id->name); 584 "failed to register %s regulator\n",
585 pdev->name);
590 error = PTR_ERR(rdev); 586 error = PTR_ERR(rdev);
591 goto fail; 587 goto fail;
592 } 588 }
@@ -595,7 +591,7 @@ static int __devinit tps_6507x_probe(struct i2c_client *client,
595 tps->rdev[i] = rdev; 591 tps->rdev[i] = rdev;
596 } 592 }
597 593
598 i2c_set_clientdata(client, tps); 594 tps6507x_dev->pmic = tps;
599 595
600 return 0; 596 return 0;
601 597
@@ -608,19 +604,17 @@ fail:
608} 604}
609 605
610/** 606/**
611 * tps_6507x_remove - TPS6507x driver i2c remove handler 607 * tps6507x_remove - TPS6507x driver i2c remove handler
612 * @client: i2c driver client device structure 608 * @client: i2c driver client device structure
613 * 609 *
614 * Unregister TPS driver as an i2c client device driver 610 * Unregister TPS driver as an i2c client device driver
615 */ 611 */
616static int __devexit tps_6507x_remove(struct i2c_client *client) 612static int __devexit tps6507x_pmic_remove(struct platform_device *pdev)
617{ 613{
618 struct tps_pmic *tps = i2c_get_clientdata(client); 614 struct tps6507x_dev *tps6507x_dev = platform_get_drvdata(pdev);
615 struct tps6507x_pmic *tps = tps6507x_dev->pmic;
619 int i; 616 int i;
620 617
621 /* clear the client data in i2c */
622 i2c_set_clientdata(client, NULL);
623
624 for (i = 0; i < TPS6507X_NUM_REGULATOR; i++) 618 for (i = 0; i < TPS6507X_NUM_REGULATOR; i++)
625 regulator_unregister(tps->rdev[i]); 619 regulator_unregister(tps->rdev[i]);
626 620
@@ -629,83 +623,38 @@ static int __devexit tps_6507x_remove(struct i2c_client *client)
629 return 0; 623 return 0;
630} 624}
631 625
632static const struct tps_info tps6507x_regs[] = { 626static struct platform_driver tps6507x_pmic_driver = {
633 {
634 .name = "VDCDC1",
635 .min_uV = 725000,
636 .max_uV = 3300000,
637 .table_len = ARRAY_SIZE(VDCDCx_VSEL_table),
638 .table = VDCDCx_VSEL_table,
639 },
640 {
641 .name = "VDCDC2",
642 .min_uV = 725000,
643 .max_uV = 3300000,
644 .table_len = ARRAY_SIZE(VDCDCx_VSEL_table),
645 .table = VDCDCx_VSEL_table,
646 },
647 {
648 .name = "VDCDC3",
649 .min_uV = 725000,
650 .max_uV = 3300000,
651 .table_len = ARRAY_SIZE(VDCDCx_VSEL_table),
652 .table = VDCDCx_VSEL_table,
653 },
654 {
655 .name = "LDO1",
656 .min_uV = 1000000,
657 .max_uV = 3300000,
658 .table_len = ARRAY_SIZE(LDO1_VSEL_table),
659 .table = LDO1_VSEL_table,
660 },
661 {
662 .name = "LDO2",
663 .min_uV = 725000,
664 .max_uV = 3300000,
665 .table_len = ARRAY_SIZE(LDO2_VSEL_table),
666 .table = LDO2_VSEL_table,
667 },
668};
669
670static const struct i2c_device_id tps_6507x_id[] = {
671 {.name = "tps6507x",
672 .driver_data = (unsigned long) tps6507x_regs,},
673 { },
674};
675MODULE_DEVICE_TABLE(i2c, tps_6507x_id);
676
677static struct i2c_driver tps_6507x_i2c_driver = {
678 .driver = { 627 .driver = {
679 .name = "tps6507x", 628 .name = "tps6507x-pmic",
680 .owner = THIS_MODULE, 629 .owner = THIS_MODULE,
681 }, 630 },
682 .probe = tps_6507x_probe, 631 .probe = tps6507x_pmic_probe,
683 .remove = __devexit_p(tps_6507x_remove), 632 .remove = __devexit_p(tps6507x_pmic_remove),
684 .id_table = tps_6507x_id,
685}; 633};
686 634
687/** 635/**
688 * tps_6507x_init 636 * tps6507x_pmic_init
689 * 637 *
690 * Module init function 638 * Module init function
691 */ 639 */
692static int __init tps_6507x_init(void) 640static int __init tps6507x_pmic_init(void)
693{ 641{
694 return i2c_add_driver(&tps_6507x_i2c_driver); 642 return platform_driver_register(&tps6507x_pmic_driver);
695} 643}
696subsys_initcall(tps_6507x_init); 644subsys_initcall(tps6507x_pmic_init);
697 645
698/** 646/**
699 * tps_6507x_cleanup 647 * tps6507x_pmic_cleanup
700 * 648 *
701 * Module exit function 649 * Module exit function
702 */ 650 */
703static void __exit tps_6507x_cleanup(void) 651static void __exit tps6507x_pmic_cleanup(void)
704{ 652{
705 i2c_del_driver(&tps_6507x_i2c_driver); 653 platform_driver_unregister(&tps6507x_pmic_driver);
706} 654}
707module_exit(tps_6507x_cleanup); 655module_exit(tps6507x_pmic_cleanup);
708 656
709MODULE_AUTHOR("Texas Instruments"); 657MODULE_AUTHOR("Texas Instruments");
710MODULE_DESCRIPTION("TPS6507x voltage regulator driver"); 658MODULE_DESCRIPTION("TPS6507x voltage regulator driver");
711MODULE_LICENSE("GPL v2"); 659MODULE_LICENSE("GPL v2");
660MODULE_ALIAS("platform:tps6507x-pmic");
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index f1598324344c..10ba12c8c5e0 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -611,6 +611,13 @@ config RTC_DRV_AB3100
611 Select this to enable the ST-Ericsson AB3100 Mixed Signal IC RTC 611 Select this to enable the ST-Ericsson AB3100 Mixed Signal IC RTC
612 support. This chip contains a battery- and capacitor-backed RTC. 612 support. This chip contains a battery- and capacitor-backed RTC.
613 613
614config RTC_DRV_AB8500
615 tristate "ST-Ericsson AB8500 RTC"
616 depends on AB8500_CORE
617 help
618 Select this to enable the ST-Ericsson AB8500 power management IC RTC
619 support. This chip contains a battery- and capacitor-backed RTC.
620
614config RTC_DRV_NUC900 621config RTC_DRV_NUC900
615 tristate "NUC910/NUC920 RTC driver" 622 tristate "NUC910/NUC920 RTC driver"
616 depends on RTC_CLASS && ARCH_W90X900 623 depends on RTC_CLASS && ARCH_W90X900
diff --git a/drivers/rtc/Makefile b/drivers/rtc/Makefile
index 245311a1348f..5adbba7cf89c 100644
--- a/drivers/rtc/Makefile
+++ b/drivers/rtc/Makefile
@@ -18,6 +18,7 @@ rtc-core-$(CONFIG_RTC_INTF_SYSFS) += rtc-sysfs.o
18# Keep the list ordered. 18# Keep the list ordered.
19 19
20obj-$(CONFIG_RTC_DRV_AB3100) += rtc-ab3100.o 20obj-$(CONFIG_RTC_DRV_AB3100) += rtc-ab3100.o
21obj-$(CONFIG_RTC_DRV_AB8500) += rtc-ab8500.o
21obj-$(CONFIG_RTC_DRV_AT32AP700X)+= rtc-at32ap700x.o 22obj-$(CONFIG_RTC_DRV_AT32AP700X)+= rtc-at32ap700x.o
22obj-$(CONFIG_RTC_DRV_AT91RM9200)+= rtc-at91rm9200.o 23obj-$(CONFIG_RTC_DRV_AT91RM9200)+= rtc-at91rm9200.o
23obj-$(CONFIG_RTC_DRV_AT91SAM9) += rtc-at91sam9.o 24obj-$(CONFIG_RTC_DRV_AT91SAM9) += rtc-at91sam9.o
diff --git a/drivers/rtc/rtc-ab3100.c b/drivers/rtc/rtc-ab3100.c
index 4704aac2b5af..d26780ea254b 100644
--- a/drivers/rtc/rtc-ab3100.c
+++ b/drivers/rtc/rtc-ab3100.c
@@ -9,7 +9,7 @@
9#include <linux/init.h> 9#include <linux/init.h>
10#include <linux/platform_device.h> 10#include <linux/platform_device.h>
11#include <linux/rtc.h> 11#include <linux/rtc.h>
12#include <linux/mfd/ab3100.h> 12#include <linux/mfd/abx500.h>
13 13
14/* Clock rate in Hz */ 14/* Clock rate in Hz */
15#define AB3100_RTC_CLOCK_RATE 32768 15#define AB3100_RTC_CLOCK_RATE 32768
@@ -45,7 +45,6 @@
45 */ 45 */
46static int ab3100_rtc_set_mmss(struct device *dev, unsigned long secs) 46static int ab3100_rtc_set_mmss(struct device *dev, unsigned long secs)
47{ 47{
48 struct ab3100 *ab3100_data = dev_get_drvdata(dev);
49 u8 regs[] = {AB3100_TI0, AB3100_TI1, AB3100_TI2, 48 u8 regs[] = {AB3100_TI0, AB3100_TI1, AB3100_TI2,
50 AB3100_TI3, AB3100_TI4, AB3100_TI5}; 49 AB3100_TI3, AB3100_TI4, AB3100_TI5};
51 unsigned char buf[6]; 50 unsigned char buf[6];
@@ -61,27 +60,26 @@ static int ab3100_rtc_set_mmss(struct device *dev, unsigned long secs)
61 buf[5] = (fat_time >> 40) & 0xFF; 60 buf[5] = (fat_time >> 40) & 0xFF;
62 61
63 for (i = 0; i < 6; i++) { 62 for (i = 0; i < 6; i++) {
64 err = ab3100_set_register_interruptible(ab3100_data, 63 err = abx500_set_register_interruptible(dev, 0,
65 regs[i], buf[i]); 64 regs[i], buf[i]);
66 if (err) 65 if (err)
67 return err; 66 return err;
68 } 67 }
69 68
70 /* Set the flag to mark that the clock is now set */ 69 /* Set the flag to mark that the clock is now set */
71 return ab3100_mask_and_set_register_interruptible(ab3100_data, 70 return abx500_mask_and_set_register_interruptible(dev, 0,
72 AB3100_RTC, 71 AB3100_RTC,
73 0xFE, 0x01); 72 0x01, 0x01);
74 73
75} 74}
76 75
77static int ab3100_rtc_read_time(struct device *dev, struct rtc_time *tm) 76static int ab3100_rtc_read_time(struct device *dev, struct rtc_time *tm)
78{ 77{
79 struct ab3100 *ab3100_data = dev_get_drvdata(dev);
80 unsigned long time; 78 unsigned long time;
81 u8 rtcval; 79 u8 rtcval;
82 int err; 80 int err;
83 81
84 err = ab3100_get_register_interruptible(ab3100_data, 82 err = abx500_get_register_interruptible(dev, 0,
85 AB3100_RTC, &rtcval); 83 AB3100_RTC, &rtcval);
86 if (err) 84 if (err)
87 return err; 85 return err;
@@ -94,7 +92,7 @@ static int ab3100_rtc_read_time(struct device *dev, struct rtc_time *tm)
94 u8 buf[6]; 92 u8 buf[6];
95 93
96 /* Read out time registers */ 94 /* Read out time registers */
97 err = ab3100_get_register_page_interruptible(ab3100_data, 95 err = abx500_get_register_page_interruptible(dev, 0,
98 AB3100_TI0, 96 AB3100_TI0,
99 buf, 6); 97 buf, 6);
100 if (err != 0) 98 if (err != 0)
@@ -114,7 +112,6 @@ static int ab3100_rtc_read_time(struct device *dev, struct rtc_time *tm)
114 112
115static int ab3100_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alarm) 113static int ab3100_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alarm)
116{ 114{
117 struct ab3100 *ab3100_data = dev_get_drvdata(dev);
118 unsigned long time; 115 unsigned long time;
119 u64 fat_time; 116 u64 fat_time;
120 u8 buf[6]; 117 u8 buf[6];
@@ -122,7 +119,7 @@ static int ab3100_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alarm)
122 int err; 119 int err;
123 120
124 /* Figure out if alarm is enabled or not */ 121 /* Figure out if alarm is enabled or not */
125 err = ab3100_get_register_interruptible(ab3100_data, 122 err = abx500_get_register_interruptible(dev, 0,
126 AB3100_RTC, &rtcval); 123 AB3100_RTC, &rtcval);
127 if (err) 124 if (err)
128 return err; 125 return err;
@@ -133,7 +130,7 @@ static int ab3100_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alarm)
133 /* No idea how this could be represented */ 130 /* No idea how this could be represented */
134 alarm->pending = 0; 131 alarm->pending = 0;
135 /* Read out alarm registers, only 4 bytes */ 132 /* Read out alarm registers, only 4 bytes */
136 err = ab3100_get_register_page_interruptible(ab3100_data, 133 err = abx500_get_register_page_interruptible(dev, 0,
137 AB3100_AL0, buf, 4); 134 AB3100_AL0, buf, 4);
138 if (err) 135 if (err)
139 return err; 136 return err;
@@ -148,7 +145,6 @@ static int ab3100_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alarm)
148 145
149static int ab3100_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alarm) 146static int ab3100_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alarm)
150{ 147{
151 struct ab3100 *ab3100_data = dev_get_drvdata(dev);
152 u8 regs[] = {AB3100_AL0, AB3100_AL1, AB3100_AL2, AB3100_AL3}; 148 u8 regs[] = {AB3100_AL0, AB3100_AL1, AB3100_AL2, AB3100_AL3};
153 unsigned char buf[4]; 149 unsigned char buf[4];
154 unsigned long secs; 150 unsigned long secs;
@@ -165,21 +161,19 @@ static int ab3100_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alarm)
165 161
166 /* Set the alarm */ 162 /* Set the alarm */
167 for (i = 0; i < 4; i++) { 163 for (i = 0; i < 4; i++) {
168 err = ab3100_set_register_interruptible(ab3100_data, 164 err = abx500_set_register_interruptible(dev, 0,
169 regs[i], buf[i]); 165 regs[i], buf[i]);
170 if (err) 166 if (err)
171 return err; 167 return err;
172 } 168 }
173 /* Then enable the alarm */ 169 /* Then enable the alarm */
174 return ab3100_mask_and_set_register_interruptible(ab3100_data, 170 return abx500_mask_and_set_register_interruptible(dev, 0,
175 AB3100_RTC, ~(1 << 2), 171 AB3100_RTC, (1 << 2),
176 alarm->enabled << 2); 172 alarm->enabled << 2);
177} 173}
178 174
179static int ab3100_rtc_irq_enable(struct device *dev, unsigned int enabled) 175static int ab3100_rtc_irq_enable(struct device *dev, unsigned int enabled)
180{ 176{
181 struct ab3100 *ab3100_data = dev_get_drvdata(dev);
182
183 /* 177 /*
184 * It's not possible to enable/disable the alarm IRQ for this RTC. 178 * It's not possible to enable/disable the alarm IRQ for this RTC.
185 * It does not actually trigger any IRQ: instead its only function is 179 * It does not actually trigger any IRQ: instead its only function is
@@ -188,12 +182,12 @@ static int ab3100_rtc_irq_enable(struct device *dev, unsigned int enabled)
188 * and need to be handled there instead. 182 * and need to be handled there instead.
189 */ 183 */
190 if (enabled) 184 if (enabled)
191 return ab3100_mask_and_set_register_interruptible(ab3100_data, 185 return abx500_mask_and_set_register_interruptible(dev, 0,
192 AB3100_RTC, ~(1 << 2), 186 AB3100_RTC, (1 << 2),
193 1 << 2); 187 1 << 2);
194 else 188 else
195 return ab3100_mask_and_set_register_interruptible(ab3100_data, 189 return abx500_mask_and_set_register_interruptible(dev, 0,
196 AB3100_RTC, ~(1 << 2), 190 AB3100_RTC, (1 << 2),
197 0); 191 0);
198} 192}
199 193
@@ -210,10 +204,9 @@ static int __init ab3100_rtc_probe(struct platform_device *pdev)
210 int err; 204 int err;
211 u8 regval; 205 u8 regval;
212 struct rtc_device *rtc; 206 struct rtc_device *rtc;
213 struct ab3100 *ab3100_data = platform_get_drvdata(pdev);
214 207
215 /* The first RTC register needs special treatment */ 208 /* The first RTC register needs special treatment */
216 err = ab3100_get_register_interruptible(ab3100_data, 209 err = abx500_get_register_interruptible(&pdev->dev, 0,
217 AB3100_RTC, &regval); 210 AB3100_RTC, &regval);
218 if (err) { 211 if (err) {
219 dev_err(&pdev->dev, "unable to read RTC register\n"); 212 dev_err(&pdev->dev, "unable to read RTC register\n");
@@ -231,7 +224,7 @@ static int __init ab3100_rtc_probe(struct platform_device *pdev)
231 * This bit remains until RTC power is lost. 224 * This bit remains until RTC power is lost.
232 */ 225 */
233 regval = 1 | RTC_SETTING; 226 regval = 1 | RTC_SETTING;
234 err = ab3100_set_register_interruptible(ab3100_data, 227 err = abx500_set_register_interruptible(&pdev->dev, 0,
235 AB3100_RTC, regval); 228 AB3100_RTC, regval);
236 /* Ignore any error on this write */ 229 /* Ignore any error on this write */
237 } 230 }
diff --git a/drivers/rtc/rtc-ab8500.c b/drivers/rtc/rtc-ab8500.c
new file mode 100644
index 000000000000..2fda03125e55
--- /dev/null
+++ b/drivers/rtc/rtc-ab8500.c
@@ -0,0 +1,363 @@
1/*
2 * Copyright (C) ST-Ericsson SA 2010
3 *
4 * License terms: GNU General Public License (GPL) version 2
5 * Author: Virupax Sadashivpetimath <virupax.sadashivpetimath@stericsson.com>
6 *
7 * RTC clock driver for the RTC part of the AB8500 Power management chip.
8 * Based on RTC clock driver for the AB3100 Analog Baseband Chip by
9 * Linus Walleij <linus.walleij@stericsson.com>
10 */
11
12#include <linux/module.h>
13#include <linux/kernel.h>
14#include <linux/init.h>
15#include <linux/platform_device.h>
16#include <linux/rtc.h>
17#include <linux/mfd/ab8500.h>
18#include <linux/delay.h>
19
20#define AB8500_RTC_SOFF_STAT_REG 0x0F00
21#define AB8500_RTC_CC_CONF_REG 0x0F01
22#define AB8500_RTC_READ_REQ_REG 0x0F02
23#define AB8500_RTC_WATCH_TSECMID_REG 0x0F03
24#define AB8500_RTC_WATCH_TSECHI_REG 0x0F04
25#define AB8500_RTC_WATCH_TMIN_LOW_REG 0x0F05
26#define AB8500_RTC_WATCH_TMIN_MID_REG 0x0F06
27#define AB8500_RTC_WATCH_TMIN_HI_REG 0x0F07
28#define AB8500_RTC_ALRM_MIN_LOW_REG 0x0F08
29#define AB8500_RTC_ALRM_MIN_MID_REG 0x0F09
30#define AB8500_RTC_ALRM_MIN_HI_REG 0x0F0A
31#define AB8500_RTC_STAT_REG 0x0F0B
32#define AB8500_RTC_BKUP_CHG_REG 0x0F0C
33#define AB8500_RTC_FORCE_BKUP_REG 0x0F0D
34#define AB8500_RTC_CALIB_REG 0x0F0E
35#define AB8500_RTC_SWITCH_STAT_REG 0x0F0F
36#define AB8500_REV_REG 0x1080
37
38/* RtcReadRequest bits */
39#define RTC_READ_REQUEST 0x01
40#define RTC_WRITE_REQUEST 0x02
41
42/* RtcCtrl bits */
43#define RTC_ALARM_ENA 0x04
44#define RTC_STATUS_DATA 0x01
45
46#define COUNTS_PER_SEC (0xF000 / 60)
47#define AB8500_RTC_EPOCH 2000
48
49static const unsigned long ab8500_rtc_time_regs[] = {
50 AB8500_RTC_WATCH_TMIN_HI_REG, AB8500_RTC_WATCH_TMIN_MID_REG,
51 AB8500_RTC_WATCH_TMIN_LOW_REG, AB8500_RTC_WATCH_TSECHI_REG,
52 AB8500_RTC_WATCH_TSECMID_REG
53};
54
55static const unsigned long ab8500_rtc_alarm_regs[] = {
56 AB8500_RTC_ALRM_MIN_HI_REG, AB8500_RTC_ALRM_MIN_MID_REG,
57 AB8500_RTC_ALRM_MIN_LOW_REG
58};
59
60/* Calculate the seconds from 1970 to 01-01-2000 00:00:00 */
61static unsigned long get_elapsed_seconds(int year)
62{
63 unsigned long secs;
64 struct rtc_time tm = {
65 .tm_year = year - 1900,
66 .tm_mday = 1,
67 };
68
69 /*
70 * This function calculates secs from 1970 and not from
71 * 1900, even if we supply the offset from year 1900.
72 */
73 rtc_tm_to_time(&tm, &secs);
74 return secs;
75}
76
77static int ab8500_rtc_read_time(struct device *dev, struct rtc_time *tm)
78{
79 struct ab8500 *ab8500 = dev_get_drvdata(dev->parent);
80 unsigned long timeout = jiffies + HZ;
81 int retval, i;
82 unsigned long mins, secs;
83 unsigned char buf[ARRAY_SIZE(ab8500_rtc_time_regs)];
84
85 /* Request a data read */
86 retval = ab8500_write(ab8500, AB8500_RTC_READ_REQ_REG,
87 RTC_READ_REQUEST);
88 if (retval < 0)
89 return retval;
90
91 /* Early AB8500 chips will not clear the rtc read request bit */
92 if (ab8500->revision == 0) {
93 msleep(1);
94 } else {
95 /* Wait for some cycles after enabling the rtc read in ab8500 */
96 while (time_before(jiffies, timeout)) {
97 retval = ab8500_read(ab8500, AB8500_RTC_READ_REQ_REG);
98 if (retval < 0)
99 return retval;
100
101 if (!(retval & RTC_READ_REQUEST))
102 break;
103
104 msleep(1);
105 }
106 }
107
108 /* Read the Watchtime registers */
109 for (i = 0; i < ARRAY_SIZE(ab8500_rtc_time_regs); i++) {
110 retval = ab8500_read(ab8500, ab8500_rtc_time_regs[i]);
111 if (retval < 0)
112 return retval;
113 buf[i] = retval;
114 }
115
116 mins = (buf[0] << 16) | (buf[1] << 8) | buf[2];
117
118 secs = (buf[3] << 8) | buf[4];
119 secs = secs / COUNTS_PER_SEC;
120 secs = secs + (mins * 60);
121
122 /* Add back the initially subtracted number of seconds */
123 secs += get_elapsed_seconds(AB8500_RTC_EPOCH);
124
125 rtc_time_to_tm(secs, tm);
126 return rtc_valid_tm(tm);
127}
128
129static int ab8500_rtc_set_time(struct device *dev, struct rtc_time *tm)
130{
131 struct ab8500 *ab8500 = dev_get_drvdata(dev->parent);
132 int retval, i;
133 unsigned char buf[ARRAY_SIZE(ab8500_rtc_time_regs)];
134 unsigned long no_secs, no_mins, secs = 0;
135
136 if (tm->tm_year < (AB8500_RTC_EPOCH - 1900)) {
137 dev_dbg(dev, "year should be equal to or greater than %d\n",
138 AB8500_RTC_EPOCH);
139 return -EINVAL;
140 }
141
142 /* Get the number of seconds since 1970 */
143 rtc_tm_to_time(tm, &secs);
144
145 /*
146 * Convert it to the number of seconds since 01-01-2000 00:00:00, since
147 * we only have a small counter in the RTC.
148 */
149 secs -= get_elapsed_seconds(AB8500_RTC_EPOCH);
150
151 no_mins = secs / 60;
152
153 no_secs = secs % 60;
154 /* Make the seconds count as per the RTC resolution */
155 no_secs = no_secs * COUNTS_PER_SEC;
156
157 buf[4] = no_secs & 0xFF;
158 buf[3] = (no_secs >> 8) & 0xFF;
159
160 buf[2] = no_mins & 0xFF;
161 buf[1] = (no_mins >> 8) & 0xFF;
162 buf[0] = (no_mins >> 16) & 0xFF;
163
164 for (i = 0; i < ARRAY_SIZE(ab8500_rtc_time_regs); i++) {
165 retval = ab8500_write(ab8500, ab8500_rtc_time_regs[i], buf[i]);
166 if (retval < 0)
167 return retval;
168 }
169
170 /* Request a data write */
171 return ab8500_write(ab8500, AB8500_RTC_READ_REQ_REG, RTC_WRITE_REQUEST);
172}
173
174static int ab8500_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alarm)
175{
176 struct ab8500 *ab8500 = dev_get_drvdata(dev->parent);
177 int retval, i;
178 int rtc_ctrl;
179 unsigned char buf[ARRAY_SIZE(ab8500_rtc_alarm_regs)];
180 unsigned long secs, mins;
181
182 /* Check if the alarm is enabled or not */
183 rtc_ctrl = ab8500_read(ab8500, AB8500_RTC_STAT_REG);
184 if (rtc_ctrl < 0)
185 return rtc_ctrl;
186
187 if (rtc_ctrl & RTC_ALARM_ENA)
188 alarm->enabled = 1;
189 else
190 alarm->enabled = 0;
191
192 alarm->pending = 0;
193
194 for (i = 0; i < ARRAY_SIZE(ab8500_rtc_alarm_regs); i++) {
195 retval = ab8500_read(ab8500, ab8500_rtc_alarm_regs[i]);
196 if (retval < 0)
197 return retval;
198 buf[i] = retval;
199 }
200
201 mins = (buf[0] << 16) | (buf[1] << 8) | (buf[2]);
202 secs = mins * 60;
203
204 /* Add back the initially subtracted number of seconds */
205 secs += get_elapsed_seconds(AB8500_RTC_EPOCH);
206
207 rtc_time_to_tm(secs, &alarm->time);
208
209 return rtc_valid_tm(&alarm->time);
210}
211
212static int ab8500_rtc_irq_enable(struct device *dev, unsigned int enabled)
213{
214 struct ab8500 *ab8500 = dev_get_drvdata(dev->parent);
215
216 return ab8500_set_bits(ab8500, AB8500_RTC_STAT_REG, RTC_ALARM_ENA,
217 enabled ? RTC_ALARM_ENA : 0);
218}
219
220static int ab8500_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alarm)
221{
222 struct ab8500 *ab8500 = dev_get_drvdata(dev->parent);
223 int retval, i;
224 unsigned char buf[ARRAY_SIZE(ab8500_rtc_alarm_regs)];
225 unsigned long mins, secs = 0;
226
227 if (alarm->time.tm_year < (AB8500_RTC_EPOCH - 1900)) {
228 dev_dbg(dev, "year should be equal to or greater than %d\n",
229 AB8500_RTC_EPOCH);
230 return -EINVAL;
231 }
232
233 /* Get the number of seconds since 1970 */
234 rtc_tm_to_time(&alarm->time, &secs);
235
236 /*
237 * Convert it to the number of seconds since 01-01-2000 00:00:00, since
238 * we only have a small counter in the RTC.
239 */
240 secs -= get_elapsed_seconds(AB8500_RTC_EPOCH);
241
242 mins = secs / 60;
243
244 buf[2] = mins & 0xFF;
245 buf[1] = (mins >> 8) & 0xFF;
246 buf[0] = (mins >> 16) & 0xFF;
247
248 /* Set the alarm time */
249 for (i = 0; i < ARRAY_SIZE(ab8500_rtc_alarm_regs); i++) {
250 retval = ab8500_write(ab8500, ab8500_rtc_alarm_regs[i], buf[i]);
251 if (retval < 0)
252 return retval;
253 }
254
255 return ab8500_rtc_irq_enable(dev, alarm->enabled);
256}
257
258static irqreturn_t rtc_alarm_handler(int irq, void *data)
259{
260 struct rtc_device *rtc = data;
261 unsigned long events = RTC_IRQF | RTC_AF;
262
263 dev_dbg(&rtc->dev, "%s\n", __func__);
264 rtc_update_irq(rtc, 1, events);
265
266 return IRQ_HANDLED;
267}
268
269static const struct rtc_class_ops ab8500_rtc_ops = {
270 .read_time = ab8500_rtc_read_time,
271 .set_time = ab8500_rtc_set_time,
272 .read_alarm = ab8500_rtc_read_alarm,
273 .set_alarm = ab8500_rtc_set_alarm,
274 .alarm_irq_enable = ab8500_rtc_irq_enable,
275};
276
277static int __devinit ab8500_rtc_probe(struct platform_device *pdev)
278{
279 struct ab8500 *ab8500 = dev_get_drvdata(pdev->dev.parent);
280 int err;
281 struct rtc_device *rtc;
282 int rtc_ctrl;
283 int irq;
284
285 irq = platform_get_irq_byname(pdev, "ALARM");
286 if (irq < 0)
287 return irq;
288
289 /* For RTC supply test */
290 err = ab8500_set_bits(ab8500, AB8500_RTC_STAT_REG, RTC_STATUS_DATA,
291 RTC_STATUS_DATA);
292 if (err < 0)
293 return err;
294
295 /* Wait for reset by the PorRtc */
296 msleep(1);
297
298 rtc_ctrl = ab8500_read(ab8500, AB8500_RTC_STAT_REG);
299 if (rtc_ctrl < 0)
300 return rtc_ctrl;
301
302 /* Check if the RTC Supply fails */
303 if (!(rtc_ctrl & RTC_STATUS_DATA)) {
304 dev_err(&pdev->dev, "RTC supply failure\n");
305 return -ENODEV;
306 }
307
308 rtc = rtc_device_register("ab8500-rtc", &pdev->dev, &ab8500_rtc_ops,
309 THIS_MODULE);
310 if (IS_ERR(rtc)) {
311 dev_err(&pdev->dev, "Registration failed\n");
312 err = PTR_ERR(rtc);
313 return err;
314 }
315
316 err = request_threaded_irq(irq, NULL, rtc_alarm_handler, 0,
317 "ab8500-rtc", rtc);
318 if (err < 0) {
319 rtc_device_unregister(rtc);
320 return err;
321 }
322
323 platform_set_drvdata(pdev, rtc);
324
325 return 0;
326}
327
328static int __devexit ab8500_rtc_remove(struct platform_device *pdev)
329{
330 struct rtc_device *rtc = platform_get_drvdata(pdev);
331 int irq = platform_get_irq_byname(pdev, "ALARM");
332
333 free_irq(irq, rtc);
334 rtc_device_unregister(rtc);
335 platform_set_drvdata(pdev, NULL);
336
337 return 0;
338}
339
340static struct platform_driver ab8500_rtc_driver = {
341 .driver = {
342 .name = "ab8500-rtc",
343 .owner = THIS_MODULE,
344 },
345 .probe = ab8500_rtc_probe,
346 .remove = __devexit_p(ab8500_rtc_remove),
347};
348
349static int __init ab8500_rtc_init(void)
350{
351 return platform_driver_register(&ab8500_rtc_driver);
352}
353
354static void __exit ab8500_rtc_exit(void)
355{
356 platform_driver_unregister(&ab8500_rtc_driver);
357}
358
359module_init(ab8500_rtc_init);
360module_exit(ab8500_rtc_exit);
361MODULE_AUTHOR("Virupax Sadashivpetimath <virupax.sadashivpetimath@stericsson.com>");
362MODULE_DESCRIPTION("AB8500 RTC Driver");
363MODULE_LICENSE("GPL v2");
diff --git a/drivers/rtc/rtc-m41t80.c b/drivers/rtc/rtc-m41t80.c
index 038095d99976..6dc4e6241418 100644
--- a/drivers/rtc/rtc-m41t80.c
+++ b/drivers/rtc/rtc-m41t80.c
@@ -595,10 +595,6 @@ static void wdt_disable(void)
595static ssize_t wdt_write(struct file *file, const char __user *buf, 595static ssize_t wdt_write(struct file *file, const char __user *buf,
596 size_t count, loff_t *ppos) 596 size_t count, loff_t *ppos)
597{ 597{
598 /* Can't seek (pwrite) on this device
599 if (ppos != &file->f_pos)
600 return -ESPIPE;
601 */
602 if (count) { 598 if (count) {
603 wdt_ping(); 599 wdt_ping();
604 return 1; 600 return 1;
@@ -707,7 +703,7 @@ static int wdt_open(struct inode *inode, struct file *file)
707 */ 703 */
708 wdt_is_open = 1; 704 wdt_is_open = 1;
709 unlock_kernel(); 705 unlock_kernel();
710 return 0; 706 return nonseekable_open(inode, file);
711 } 707 }
712 return -ENODEV; 708 return -ENODEV;
713} 709}
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index 0e86247d791e..33975e922d65 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -1186,6 +1186,29 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
1186 dasd_schedule_device_bh(device); 1186 dasd_schedule_device_bh(device);
1187} 1187}
1188 1188
1189enum uc_todo dasd_generic_uc_handler(struct ccw_device *cdev, struct irb *irb)
1190{
1191 struct dasd_device *device;
1192
1193 device = dasd_device_from_cdev_locked(cdev);
1194
1195 if (IS_ERR(device))
1196 goto out;
1197 if (test_bit(DASD_FLAG_OFFLINE, &device->flags) ||
1198 device->state != device->target ||
1199 !device->discipline->handle_unsolicited_interrupt){
1200 dasd_put_device(device);
1201 goto out;
1202 }
1203
1204 dasd_device_clear_timer(device);
1205 device->discipline->handle_unsolicited_interrupt(device, irb);
1206 dasd_put_device(device);
1207out:
1208 return UC_TODO_RETRY;
1209}
1210EXPORT_SYMBOL_GPL(dasd_generic_uc_handler);
1211
1189/* 1212/*
1190 * If we have an error on a dasd_block layer request then we cancel 1213 * If we have an error on a dasd_block layer request then we cancel
1191 * and return all further requests from the same dasd_block as well. 1214 * and return all further requests from the same dasd_block as well.
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index 5b1cd8d6e971..ab84da5592e8 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -3436,6 +3436,7 @@ static struct ccw_driver dasd_eckd_driver = {
3436 .freeze = dasd_generic_pm_freeze, 3436 .freeze = dasd_generic_pm_freeze,
3437 .thaw = dasd_generic_restore_device, 3437 .thaw = dasd_generic_restore_device,
3438 .restore = dasd_generic_restore_device, 3438 .restore = dasd_generic_restore_device,
3439 .uc_handler = dasd_generic_uc_handler,
3439}; 3440};
3440 3441
3441/* 3442/*
diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h
index 32fac186ba3f..49b431d135e0 100644
--- a/drivers/s390/block/dasd_int.h
+++ b/drivers/s390/block/dasd_int.h
@@ -617,6 +617,7 @@ int dasd_generic_notify(struct ccw_device *, int);
617void dasd_generic_handle_state_change(struct dasd_device *); 617void dasd_generic_handle_state_change(struct dasd_device *);
618int dasd_generic_pm_freeze(struct ccw_device *); 618int dasd_generic_pm_freeze(struct ccw_device *);
619int dasd_generic_restore_device(struct ccw_device *); 619int dasd_generic_restore_device(struct ccw_device *);
620enum uc_todo dasd_generic_uc_handler(struct ccw_device *, struct irb *);
620 621
621int dasd_generic_read_dev_chars(struct dasd_device *, int, void *, int); 622int dasd_generic_read_dev_chars(struct dasd_device *, int, void *, int);
622char *dasd_get_sense(struct irb *); 623char *dasd_get_sense(struct irb *);
diff --git a/drivers/s390/cio/ccwgroup.c b/drivers/s390/cio/ccwgroup.c
index 5f97ea2ee6b1..97b25d68e3e7 100644
--- a/drivers/s390/cio/ccwgroup.c
+++ b/drivers/s390/cio/ccwgroup.c
@@ -123,8 +123,10 @@ ccwgroup_release (struct device *dev)
123 123
124 for (i = 0; i < gdev->count; i++) { 124 for (i = 0; i < gdev->count; i++) {
125 if (gdev->cdev[i]) { 125 if (gdev->cdev[i]) {
126 spin_lock_irq(gdev->cdev[i]->ccwlock);
126 if (dev_get_drvdata(&gdev->cdev[i]->dev) == gdev) 127 if (dev_get_drvdata(&gdev->cdev[i]->dev) == gdev)
127 dev_set_drvdata(&gdev->cdev[i]->dev, NULL); 128 dev_set_drvdata(&gdev->cdev[i]->dev, NULL);
129 spin_unlock_irq(gdev->cdev[i]->ccwlock);
128 put_device(&gdev->cdev[i]->dev); 130 put_device(&gdev->cdev[i]->dev);
129 } 131 }
130 } 132 }
@@ -262,11 +264,14 @@ int ccwgroup_create_from_string(struct device *root, unsigned int creator_id,
262 goto error; 264 goto error;
263 } 265 }
264 /* Don't allow a device to belong to more than one group. */ 266 /* Don't allow a device to belong to more than one group. */
267 spin_lock_irq(gdev->cdev[i]->ccwlock);
265 if (dev_get_drvdata(&gdev->cdev[i]->dev)) { 268 if (dev_get_drvdata(&gdev->cdev[i]->dev)) {
269 spin_unlock_irq(gdev->cdev[i]->ccwlock);
266 rc = -EINVAL; 270 rc = -EINVAL;
267 goto error; 271 goto error;
268 } 272 }
269 dev_set_drvdata(&gdev->cdev[i]->dev, gdev); 273 dev_set_drvdata(&gdev->cdev[i]->dev, gdev);
274 spin_unlock_irq(gdev->cdev[i]->ccwlock);
270 } 275 }
271 /* Check for sufficient number of bus ids. */ 276 /* Check for sufficient number of bus ids. */
272 if (i < num_devices && !curr_buf) { 277 if (i < num_devices && !curr_buf) {
@@ -303,8 +308,10 @@ int ccwgroup_create_from_string(struct device *root, unsigned int creator_id,
303error: 308error:
304 for (i = 0; i < num_devices; i++) 309 for (i = 0; i < num_devices; i++)
305 if (gdev->cdev[i]) { 310 if (gdev->cdev[i]) {
311 spin_lock_irq(gdev->cdev[i]->ccwlock);
306 if (dev_get_drvdata(&gdev->cdev[i]->dev) == gdev) 312 if (dev_get_drvdata(&gdev->cdev[i]->dev) == gdev)
307 dev_set_drvdata(&gdev->cdev[i]->dev, NULL); 313 dev_set_drvdata(&gdev->cdev[i]->dev, NULL);
314 spin_unlock_irq(gdev->cdev[i]->ccwlock);
308 put_device(&gdev->cdev[i]->dev); 315 put_device(&gdev->cdev[i]->dev);
309 gdev->cdev[i] = NULL; 316 gdev->cdev[i] = NULL;
310 } 317 }
diff --git a/drivers/s390/cio/ccwreq.c b/drivers/s390/cio/ccwreq.c
index 37df42af05ec..7f206ed44fdf 100644
--- a/drivers/s390/cio/ccwreq.c
+++ b/drivers/s390/cio/ccwreq.c
@@ -159,6 +159,7 @@ static enum io_status ccwreq_status(struct ccw_device *cdev, struct irb *lcirb)
159{ 159{
160 struct irb *irb = &cdev->private->irb; 160 struct irb *irb = &cdev->private->irb;
161 struct cmd_scsw *scsw = &irb->scsw.cmd; 161 struct cmd_scsw *scsw = &irb->scsw.cmd;
162 enum uc_todo todo;
162 163
163 /* Perform BASIC SENSE if needed. */ 164 /* Perform BASIC SENSE if needed. */
164 if (ccw_device_accumulate_and_sense(cdev, lcirb)) 165 if (ccw_device_accumulate_and_sense(cdev, lcirb))
@@ -178,6 +179,20 @@ static enum io_status ccwreq_status(struct ccw_device *cdev, struct irb *lcirb)
178 /* Check for command reject. */ 179 /* Check for command reject. */
179 if (irb->ecw[0] & SNS0_CMD_REJECT) 180 if (irb->ecw[0] & SNS0_CMD_REJECT)
180 return IO_REJECTED; 181 return IO_REJECTED;
182 /* Ask the driver what to do */
183 if (cdev->drv && cdev->drv->uc_handler) {
184 todo = cdev->drv->uc_handler(cdev, lcirb);
185 switch (todo) {
186 case UC_TODO_RETRY:
187 return IO_STATUS_ERROR;
188 case UC_TODO_RETRY_ON_NEW_PATH:
189 return IO_PATH_ERROR;
190 case UC_TODO_STOP:
191 return IO_REJECTED;
192 default:
193 return IO_STATUS_ERROR;
194 }
195 }
181 /* Assume that unexpected SENSE data implies an error. */ 196 /* Assume that unexpected SENSE data implies an error. */
182 return IO_STATUS_ERROR; 197 return IO_STATUS_ERROR;
183 } 198 }
diff --git a/drivers/s390/cio/ioasm.h b/drivers/s390/cio/ioasm.h
index 759262792633..fac06155773f 100644
--- a/drivers/s390/cio/ioasm.h
+++ b/drivers/s390/cio/ioasm.h
@@ -23,21 +23,6 @@ struct tpi_info {
23 * Some S390 specific IO instructions as inline 23 * Some S390 specific IO instructions as inline
24 */ 24 */
25 25
26static inline int stsch(struct subchannel_id schid, struct schib *addr)
27{
28 register struct subchannel_id reg1 asm ("1") = schid;
29 int ccode;
30
31 asm volatile(
32 " stsch 0(%3)\n"
33 " ipm %0\n"
34 " srl %0,28"
35 : "=d" (ccode), "=m" (*addr)
36 : "d" (reg1), "a" (addr)
37 : "cc");
38 return ccode;
39}
40
41static inline int stsch_err(struct subchannel_id schid, struct schib *addr) 26static inline int stsch_err(struct subchannel_id schid, struct schib *addr)
42{ 27{
43 register struct subchannel_id reg1 asm ("1") = schid; 28 register struct subchannel_id reg1 asm ("1") = schid;
diff --git a/drivers/scsi/a2091.c b/drivers/scsi/a2091.c
index 308541ff85cf..1bb5d3f0e260 100644
--- a/drivers/scsi/a2091.c
+++ b/drivers/scsi/a2091.c
@@ -1,34 +1,31 @@
1#include <linux/types.h> 1#include <linux/types.h>
2#include <linux/mm.h>
3#include <linux/slab.h>
4#include <linux/blkdev.h>
5#include <linux/init.h> 2#include <linux/init.h>
6#include <linux/interrupt.h> 3#include <linux/interrupt.h>
4#include <linux/mm.h>
5#include <linux/slab.h>
6#include <linux/spinlock.h>
7#include <linux/zorro.h>
7 8
8#include <asm/setup.h>
9#include <asm/page.h> 9#include <asm/page.h>
10#include <asm/pgtable.h> 10#include <asm/pgtable.h>
11#include <asm/amigaints.h> 11#include <asm/amigaints.h>
12#include <asm/amigahw.h> 12#include <asm/amigahw.h>
13#include <linux/zorro.h>
14#include <asm/irq.h>
15#include <linux/spinlock.h>
16 13
17#include "scsi.h" 14#include "scsi.h"
18#include <scsi/scsi_host.h>
19#include "wd33c93.h" 15#include "wd33c93.h"
20#include "a2091.h" 16#include "a2091.h"
21 17
22#include <linux/stat.h>
23
24 18
25static int a2091_release(struct Scsi_Host *instance); 19struct a2091_hostdata {
20 struct WD33C93_hostdata wh;
21 struct a2091_scsiregs *regs;
22};
26 23
27static irqreturn_t a2091_intr(int irq, void *data) 24static irqreturn_t a2091_intr(int irq, void *data)
28{ 25{
29 struct Scsi_Host *instance = data; 26 struct Scsi_Host *instance = data;
30 a2091_scsiregs *regs = (a2091_scsiregs *)(instance->base); 27 struct a2091_hostdata *hdata = shost_priv(instance);
31 unsigned int status = regs->ISTR; 28 unsigned int status = hdata->regs->ISTR;
32 unsigned long flags; 29 unsigned long flags;
33 30
34 if (!(status & (ISTR_INT_F | ISTR_INT_P)) || !(status & ISTR_INTS)) 31 if (!(status & (ISTR_INT_F | ISTR_INT_P)) || !(status & ISTR_INTS))
@@ -43,38 +40,39 @@ static irqreturn_t a2091_intr(int irq, void *data)
43static int dma_setup(struct scsi_cmnd *cmd, int dir_in) 40static int dma_setup(struct scsi_cmnd *cmd, int dir_in)
44{ 41{
45 struct Scsi_Host *instance = cmd->device->host; 42 struct Scsi_Host *instance = cmd->device->host;
46 struct WD33C93_hostdata *hdata = shost_priv(instance); 43 struct a2091_hostdata *hdata = shost_priv(instance);
47 a2091_scsiregs *regs = (a2091_scsiregs *)(instance->base); 44 struct WD33C93_hostdata *wh = &hdata->wh;
45 struct a2091_scsiregs *regs = hdata->regs;
48 unsigned short cntr = CNTR_PDMD | CNTR_INTEN; 46 unsigned short cntr = CNTR_PDMD | CNTR_INTEN;
49 unsigned long addr = virt_to_bus(cmd->SCp.ptr); 47 unsigned long addr = virt_to_bus(cmd->SCp.ptr);
50 48
51 /* don't allow DMA if the physical address is bad */ 49 /* don't allow DMA if the physical address is bad */
52 if (addr & A2091_XFER_MASK) { 50 if (addr & A2091_XFER_MASK) {
53 hdata->dma_bounce_len = (cmd->SCp.this_residual + 511) & ~0x1ff; 51 wh->dma_bounce_len = (cmd->SCp.this_residual + 511) & ~0x1ff;
54 hdata->dma_bounce_buffer = kmalloc(hdata->dma_bounce_len, 52 wh->dma_bounce_buffer = kmalloc(wh->dma_bounce_len,
55 GFP_KERNEL); 53 GFP_KERNEL);
56 54
57 /* can't allocate memory; use PIO */ 55 /* can't allocate memory; use PIO */
58 if (!hdata->dma_bounce_buffer) { 56 if (!wh->dma_bounce_buffer) {
59 hdata->dma_bounce_len = 0; 57 wh->dma_bounce_len = 0;
60 return 1; 58 return 1;
61 } 59 }
62 60
63 /* get the physical address of the bounce buffer */ 61 /* get the physical address of the bounce buffer */
64 addr = virt_to_bus(hdata->dma_bounce_buffer); 62 addr = virt_to_bus(wh->dma_bounce_buffer);
65 63
66 /* the bounce buffer may not be in the first 16M of physmem */ 64 /* the bounce buffer may not be in the first 16M of physmem */
67 if (addr & A2091_XFER_MASK) { 65 if (addr & A2091_XFER_MASK) {
68 /* we could use chipmem... maybe later */ 66 /* we could use chipmem... maybe later */
69 kfree(hdata->dma_bounce_buffer); 67 kfree(wh->dma_bounce_buffer);
70 hdata->dma_bounce_buffer = NULL; 68 wh->dma_bounce_buffer = NULL;
71 hdata->dma_bounce_len = 0; 69 wh->dma_bounce_len = 0;
72 return 1; 70 return 1;
73 } 71 }
74 72
75 if (!dir_in) { 73 if (!dir_in) {
76 /* copy to bounce buffer for a write */ 74 /* copy to bounce buffer for a write */
77 memcpy(hdata->dma_bounce_buffer, cmd->SCp.ptr, 75 memcpy(wh->dma_bounce_buffer, cmd->SCp.ptr,
78 cmd->SCp.this_residual); 76 cmd->SCp.this_residual);
79 } 77 }
80 } 78 }
@@ -84,7 +82,7 @@ static int dma_setup(struct scsi_cmnd *cmd, int dir_in)
84 cntr |= CNTR_DDIR; 82 cntr |= CNTR_DDIR;
85 83
86 /* remember direction */ 84 /* remember direction */
87 hdata->dma_dir = dir_in; 85 wh->dma_dir = dir_in;
88 86
89 regs->CNTR = cntr; 87 regs->CNTR = cntr;
90 88
@@ -108,20 +106,21 @@ static int dma_setup(struct scsi_cmnd *cmd, int dir_in)
108static void dma_stop(struct Scsi_Host *instance, struct scsi_cmnd *SCpnt, 106static void dma_stop(struct Scsi_Host *instance, struct scsi_cmnd *SCpnt,
109 int status) 107 int status)
110{ 108{
111 struct WD33C93_hostdata *hdata = shost_priv(instance); 109 struct a2091_hostdata *hdata = shost_priv(instance);
112 a2091_scsiregs *regs = (a2091_scsiregs *)(instance->base); 110 struct WD33C93_hostdata *wh = &hdata->wh;
111 struct a2091_scsiregs *regs = hdata->regs;
113 112
114 /* disable SCSI interrupts */ 113 /* disable SCSI interrupts */
115 unsigned short cntr = CNTR_PDMD; 114 unsigned short cntr = CNTR_PDMD;
116 115
117 if (!hdata->dma_dir) 116 if (!wh->dma_dir)
118 cntr |= CNTR_DDIR; 117 cntr |= CNTR_DDIR;
119 118
120 /* disable SCSI interrupts */ 119 /* disable SCSI interrupts */
121 regs->CNTR = cntr; 120 regs->CNTR = cntr;
122 121
123 /* flush if we were reading */ 122 /* flush if we were reading */
124 if (hdata->dma_dir) { 123 if (wh->dma_dir) {
125 regs->FLUSH = 1; 124 regs->FLUSH = 1;
126 while (!(regs->ISTR & ISTR_FE_FLG)) 125 while (!(regs->ISTR & ISTR_FE_FLG))
127 ; 126 ;
@@ -137,95 +136,37 @@ static void dma_stop(struct Scsi_Host *instance, struct scsi_cmnd *SCpnt,
137 regs->CNTR = CNTR_PDMD | CNTR_INTEN; 136 regs->CNTR = CNTR_PDMD | CNTR_INTEN;
138 137
139 /* copy from a bounce buffer, if necessary */ 138 /* copy from a bounce buffer, if necessary */
140 if (status && hdata->dma_bounce_buffer) { 139 if (status && wh->dma_bounce_buffer) {
141 if (hdata->dma_dir) 140 if (wh->dma_dir)
142 memcpy(SCpnt->SCp.ptr, hdata->dma_bounce_buffer, 141 memcpy(SCpnt->SCp.ptr, wh->dma_bounce_buffer,
143 SCpnt->SCp.this_residual); 142 SCpnt->SCp.this_residual);
144 kfree(hdata->dma_bounce_buffer); 143 kfree(wh->dma_bounce_buffer);
145 hdata->dma_bounce_buffer = NULL; 144 wh->dma_bounce_buffer = NULL;
146 hdata->dma_bounce_len = 0; 145 wh->dma_bounce_len = 0;
147 }
148}
149
150static int __init a2091_detect(struct scsi_host_template *tpnt)
151{
152 static unsigned char called = 0;
153 struct Scsi_Host *instance;
154 unsigned long address;
155 struct zorro_dev *z = NULL;
156 wd33c93_regs wdregs;
157 a2091_scsiregs *regs;
158 struct WD33C93_hostdata *hdata;
159 int num_a2091 = 0;
160
161 if (!MACH_IS_AMIGA || called)
162 return 0;
163 called = 1;
164
165 tpnt->proc_name = "A2091";
166 tpnt->proc_info = &wd33c93_proc_info;
167
168 while ((z = zorro_find_device(ZORRO_WILDCARD, z))) {
169 if (z->id != ZORRO_PROD_CBM_A590_A2091_1 &&
170 z->id != ZORRO_PROD_CBM_A590_A2091_2)
171 continue;
172 address = z->resource.start;
173 if (!request_mem_region(address, 256, "wd33c93"))
174 continue;
175
176 instance = scsi_register(tpnt, sizeof(struct WD33C93_hostdata));
177 if (instance == NULL)
178 goto release;
179 instance->base = ZTWO_VADDR(address);
180 instance->irq = IRQ_AMIGA_PORTS;
181 instance->unique_id = z->slotaddr;
182 regs = (a2091_scsiregs *)(instance->base);
183 regs->DAWR = DAWR_A2091;
184 wdregs.SASR = &regs->SASR;
185 wdregs.SCMD = &regs->SCMD;
186 hdata = shost_priv(instance);
187 hdata->no_sync = 0xff;
188 hdata->fast = 0;
189 hdata->dma_mode = CTRL_DMA;
190 wd33c93_init(instance, wdregs, dma_setup, dma_stop,
191 WD33C93_FS_8_10);
192 if (request_irq(IRQ_AMIGA_PORTS, a2091_intr, IRQF_SHARED,
193 "A2091 SCSI", instance))
194 goto unregister;
195 regs->CNTR = CNTR_PDMD | CNTR_INTEN;
196 num_a2091++;
197 continue;
198
199unregister:
200 scsi_unregister(instance);
201release:
202 release_mem_region(address, 256);
203 } 146 }
204
205 return num_a2091;
206} 147}
207 148
208static int a2091_bus_reset(struct scsi_cmnd *cmd) 149static int a2091_bus_reset(struct scsi_cmnd *cmd)
209{ 150{
151 struct Scsi_Host *instance = cmd->device->host;
152
210 /* FIXME perform bus-specific reset */ 153 /* FIXME perform bus-specific reset */
211 154
212 /* FIXME 2: kill this function, and let midlayer fall back 155 /* FIXME 2: kill this function, and let midlayer fall back
213 to the same action, calling wd33c93_host_reset() */ 156 to the same action, calling wd33c93_host_reset() */
214 157
215 spin_lock_irq(cmd->device->host->host_lock); 158 spin_lock_irq(instance->host_lock);
216 wd33c93_host_reset(cmd); 159 wd33c93_host_reset(cmd);
217 spin_unlock_irq(cmd->device->host->host_lock); 160 spin_unlock_irq(instance->host_lock);
218 161
219 return SUCCESS; 162 return SUCCESS;
220} 163}
221 164
222#define HOSTS_C 165static struct scsi_host_template a2091_scsi_template = {
223 166 .module = THIS_MODULE,
224static struct scsi_host_template driver_template = {
225 .proc_name = "A2901",
226 .name = "Commodore A2091/A590 SCSI", 167 .name = "Commodore A2091/A590 SCSI",
227 .detect = a2091_detect, 168 .proc_info = wd33c93_proc_info,
228 .release = a2091_release, 169 .proc_name = "A2901",
229 .queuecommand = wd33c93_queuecommand, 170 .queuecommand = wd33c93_queuecommand,
230 .eh_abort_handler = wd33c93_abort, 171 .eh_abort_handler = wd33c93_abort,
231 .eh_bus_reset_handler = a2091_bus_reset, 172 .eh_bus_reset_handler = a2091_bus_reset,
@@ -237,19 +178,103 @@ static struct scsi_host_template driver_template = {
237 .use_clustering = DISABLE_CLUSTERING 178 .use_clustering = DISABLE_CLUSTERING
238}; 179};
239 180
181static int __devinit a2091_probe(struct zorro_dev *z,
182 const struct zorro_device_id *ent)
183{
184 struct Scsi_Host *instance;
185 int error;
186 struct a2091_scsiregs *regs;
187 wd33c93_regs wdregs;
188 struct a2091_hostdata *hdata;
240 189
241#include "scsi_module.c" 190 if (!request_mem_region(z->resource.start, 256, "wd33c93"))
191 return -EBUSY;
242 192
243static int a2091_release(struct Scsi_Host *instance) 193 instance = scsi_host_alloc(&a2091_scsi_template,
194 sizeof(struct a2091_hostdata));
195 if (!instance) {
196 error = -ENOMEM;
197 goto fail_alloc;
198 }
199
200 instance->irq = IRQ_AMIGA_PORTS;
201 instance->unique_id = z->slotaddr;
202
203 regs = (struct a2091_scsiregs *)ZTWO_VADDR(z->resource.start);
204 regs->DAWR = DAWR_A2091;
205
206 wdregs.SASR = &regs->SASR;
207 wdregs.SCMD = &regs->SCMD;
208
209 hdata = shost_priv(instance);
210 hdata->wh.no_sync = 0xff;
211 hdata->wh.fast = 0;
212 hdata->wh.dma_mode = CTRL_DMA;
213 hdata->regs = regs;
214
215 wd33c93_init(instance, wdregs, dma_setup, dma_stop, WD33C93_FS_8_10);
216 error = request_irq(IRQ_AMIGA_PORTS, a2091_intr, IRQF_SHARED,
217 "A2091 SCSI", instance);
218 if (error)
219 goto fail_irq;
220
221 regs->CNTR = CNTR_PDMD | CNTR_INTEN;
222
223 error = scsi_add_host(instance, NULL);
224 if (error)
225 goto fail_host;
226
227 zorro_set_drvdata(z, instance);
228
229 scsi_scan_host(instance);
230 return 0;
231
232fail_host:
233 free_irq(IRQ_AMIGA_PORTS, instance);
234fail_irq:
235 scsi_host_put(instance);
236fail_alloc:
237 release_mem_region(z->resource.start, 256);
238 return error;
239}
240
241static void __devexit a2091_remove(struct zorro_dev *z)
244{ 242{
245#ifdef MODULE 243 struct Scsi_Host *instance = zorro_get_drvdata(z);
246 a2091_scsiregs *regs = (a2091_scsiregs *)(instance->base); 244 struct a2091_hostdata *hdata = shost_priv(instance);
247 245
248 regs->CNTR = 0; 246 hdata->regs->CNTR = 0;
249 release_mem_region(ZTWO_PADDR(instance->base), 256); 247 scsi_remove_host(instance);
250 free_irq(IRQ_AMIGA_PORTS, instance); 248 free_irq(IRQ_AMIGA_PORTS, instance);
251#endif 249 scsi_host_put(instance);
252 return 1; 250 release_mem_region(z->resource.start, 256);
251}
252
253static struct zorro_device_id a2091_zorro_tbl[] __devinitdata = {
254 { ZORRO_PROD_CBM_A590_A2091_1 },
255 { ZORRO_PROD_CBM_A590_A2091_2 },
256 { 0 }
257};
258MODULE_DEVICE_TABLE(zorro, a2091_zorro_tbl);
259
260static struct zorro_driver a2091_driver = {
261 .name = "a2091",
262 .id_table = a2091_zorro_tbl,
263 .probe = a2091_probe,
264 .remove = __devexit_p(a2091_remove),
265};
266
267static int __init a2091_init(void)
268{
269 return zorro_register_driver(&a2091_driver);
270}
271module_init(a2091_init);
272
273static void __exit a2091_exit(void)
274{
275 zorro_unregister_driver(&a2091_driver);
253} 276}
277module_exit(a2091_exit);
254 278
279MODULE_DESCRIPTION("Commodore A2091/A590 SCSI");
255MODULE_LICENSE("GPL"); 280MODULE_LICENSE("GPL");
diff --git a/drivers/scsi/a2091.h b/drivers/scsi/a2091.h
index 1c3daa1fd754..794b8e65c711 100644
--- a/drivers/scsi/a2091.h
+++ b/drivers/scsi/a2091.h
@@ -25,7 +25,7 @@
25 */ 25 */
26#define A2091_XFER_MASK (0xff000001) 26#define A2091_XFER_MASK (0xff000001)
27 27
28typedef struct { 28struct a2091_scsiregs {
29 unsigned char pad1[64]; 29 unsigned char pad1[64];
30 volatile unsigned short ISTR; 30 volatile unsigned short ISTR;
31 volatile unsigned short CNTR; 31 volatile unsigned short CNTR;
@@ -44,7 +44,7 @@ typedef struct {
44 volatile unsigned short CINT; 44 volatile unsigned short CINT;
45 unsigned char pad7[2]; 45 unsigned char pad7[2];
46 volatile unsigned short FLUSH; 46 volatile unsigned short FLUSH;
47} a2091_scsiregs; 47};
48 48
49#define DAWR_A2091 (3) 49#define DAWR_A2091 (3)
50 50
diff --git a/drivers/scsi/a3000.c b/drivers/scsi/a3000.c
index bc6eb69f5fd0..d9468027fb61 100644
--- a/drivers/scsi/a3000.c
+++ b/drivers/scsi/a3000.c
@@ -1,53 +1,52 @@
1#include <linux/types.h> 1#include <linux/types.h>
2#include <linux/mm.h> 2#include <linux/mm.h>
3#include <linux/slab.h>
4#include <linux/blkdev.h>
5#include <linux/ioport.h> 3#include <linux/ioport.h>
6#include <linux/init.h> 4#include <linux/init.h>
5#include <linux/slab.h>
7#include <linux/spinlock.h> 6#include <linux/spinlock.h>
8#include <linux/interrupt.h> 7#include <linux/interrupt.h>
8#include <linux/platform_device.h>
9 9
10#include <asm/setup.h>
11#include <asm/page.h> 10#include <asm/page.h>
12#include <asm/pgtable.h> 11#include <asm/pgtable.h>
13#include <asm/amigaints.h> 12#include <asm/amigaints.h>
14#include <asm/amigahw.h> 13#include <asm/amigahw.h>
15#include <asm/irq.h>
16 14
17#include "scsi.h" 15#include "scsi.h"
18#include <scsi/scsi_host.h>
19#include "wd33c93.h" 16#include "wd33c93.h"
20#include "a3000.h" 17#include "a3000.h"
21 18
22#include <linux/stat.h>
23
24 19
25#define DMA(ptr) ((a3000_scsiregs *)((ptr)->base)) 20struct a3000_hostdata {
26 21 struct WD33C93_hostdata wh;
27static struct Scsi_Host *a3000_host = NULL; 22 struct a3000_scsiregs *regs;
28 23};
29static int a3000_release(struct Scsi_Host *instance);
30 24
31static irqreturn_t a3000_intr(int irq, void *dummy) 25static irqreturn_t a3000_intr(int irq, void *data)
32{ 26{
27 struct Scsi_Host *instance = data;
28 struct a3000_hostdata *hdata = shost_priv(instance);
29 unsigned int status = hdata->regs->ISTR;
33 unsigned long flags; 30 unsigned long flags;
34 unsigned int status = DMA(a3000_host)->ISTR;
35 31
36 if (!(status & ISTR_INT_P)) 32 if (!(status & ISTR_INT_P))
37 return IRQ_NONE; 33 return IRQ_NONE;
38 if (status & ISTR_INTS) { 34 if (status & ISTR_INTS) {
39 spin_lock_irqsave(a3000_host->host_lock, flags); 35 spin_lock_irqsave(instance->host_lock, flags);
40 wd33c93_intr(a3000_host); 36 wd33c93_intr(instance);
41 spin_unlock_irqrestore(a3000_host->host_lock, flags); 37 spin_unlock_irqrestore(instance->host_lock, flags);
42 return IRQ_HANDLED; 38 return IRQ_HANDLED;
43 } 39 }
44 printk("Non-serviced A3000 SCSI-interrupt? ISTR = %02x\n", status); 40 pr_warning("Non-serviced A3000 SCSI-interrupt? ISTR = %02x\n", status);
45 return IRQ_NONE; 41 return IRQ_NONE;
46} 42}
47 43
48static int dma_setup(struct scsi_cmnd *cmd, int dir_in) 44static int dma_setup(struct scsi_cmnd *cmd, int dir_in)
49{ 45{
50 struct WD33C93_hostdata *hdata = shost_priv(a3000_host); 46 struct Scsi_Host *instance = cmd->device->host;
47 struct a3000_hostdata *hdata = shost_priv(instance);
48 struct WD33C93_hostdata *wh = &hdata->wh;
49 struct a3000_scsiregs *regs = hdata->regs;
51 unsigned short cntr = CNTR_PDMD | CNTR_INTEN; 50 unsigned short cntr = CNTR_PDMD | CNTR_INTEN;
52 unsigned long addr = virt_to_bus(cmd->SCp.ptr); 51 unsigned long addr = virt_to_bus(cmd->SCp.ptr);
53 52
@@ -58,23 +57,23 @@ static int dma_setup(struct scsi_cmnd *cmd, int dir_in)
58 * buffer 57 * buffer
59 */ 58 */
60 if (addr & A3000_XFER_MASK) { 59 if (addr & A3000_XFER_MASK) {
61 hdata->dma_bounce_len = (cmd->SCp.this_residual + 511) & ~0x1ff; 60 wh->dma_bounce_len = (cmd->SCp.this_residual + 511) & ~0x1ff;
62 hdata->dma_bounce_buffer = kmalloc(hdata->dma_bounce_len, 61 wh->dma_bounce_buffer = kmalloc(wh->dma_bounce_len,
63 GFP_KERNEL); 62 GFP_KERNEL);
64 63
65 /* can't allocate memory; use PIO */ 64 /* can't allocate memory; use PIO */
66 if (!hdata->dma_bounce_buffer) { 65 if (!wh->dma_bounce_buffer) {
67 hdata->dma_bounce_len = 0; 66 wh->dma_bounce_len = 0;
68 return 1; 67 return 1;
69 } 68 }
70 69
71 if (!dir_in) { 70 if (!dir_in) {
72 /* copy to bounce buffer for a write */ 71 /* copy to bounce buffer for a write */
73 memcpy(hdata->dma_bounce_buffer, cmd->SCp.ptr, 72 memcpy(wh->dma_bounce_buffer, cmd->SCp.ptr,
74 cmd->SCp.this_residual); 73 cmd->SCp.this_residual);
75 } 74 }
76 75
77 addr = virt_to_bus(hdata->dma_bounce_buffer); 76 addr = virt_to_bus(wh->dma_bounce_buffer);
78 } 77 }
79 78
80 /* setup dma direction */ 79 /* setup dma direction */
@@ -82,12 +81,12 @@ static int dma_setup(struct scsi_cmnd *cmd, int dir_in)
82 cntr |= CNTR_DDIR; 81 cntr |= CNTR_DDIR;
83 82
84 /* remember direction */ 83 /* remember direction */
85 hdata->dma_dir = dir_in; 84 wh->dma_dir = dir_in;
86 85
87 DMA(a3000_host)->CNTR = cntr; 86 regs->CNTR = cntr;
88 87
89 /* setup DMA *physical* address */ 88 /* setup DMA *physical* address */
90 DMA(a3000_host)->ACR = addr; 89 regs->ACR = addr;
91 90
92 if (dir_in) { 91 if (dir_in) {
93 /* invalidate any cache */ 92 /* invalidate any cache */
@@ -99,7 +98,7 @@ static int dma_setup(struct scsi_cmnd *cmd, int dir_in)
99 98
100 /* start DMA */ 99 /* start DMA */
101 mb(); /* make sure setup is completed */ 100 mb(); /* make sure setup is completed */
102 DMA(a3000_host)->ST_DMA = 1; 101 regs->ST_DMA = 1;
103 mb(); /* make sure DMA has started before next IO */ 102 mb(); /* make sure DMA has started before next IO */
104 103
105 /* return success */ 104 /* return success */
@@ -109,22 +108,24 @@ static int dma_setup(struct scsi_cmnd *cmd, int dir_in)
109static void dma_stop(struct Scsi_Host *instance, struct scsi_cmnd *SCpnt, 108static void dma_stop(struct Scsi_Host *instance, struct scsi_cmnd *SCpnt,
110 int status) 109 int status)
111{ 110{
112 struct WD33C93_hostdata *hdata = shost_priv(instance); 111 struct a3000_hostdata *hdata = shost_priv(instance);
112 struct WD33C93_hostdata *wh = &hdata->wh;
113 struct a3000_scsiregs *regs = hdata->regs;
113 114
114 /* disable SCSI interrupts */ 115 /* disable SCSI interrupts */
115 unsigned short cntr = CNTR_PDMD; 116 unsigned short cntr = CNTR_PDMD;
116 117
117 if (!hdata->dma_dir) 118 if (!wh->dma_dir)
118 cntr |= CNTR_DDIR; 119 cntr |= CNTR_DDIR;
119 120
120 DMA(instance)->CNTR = cntr; 121 regs->CNTR = cntr;
121 mb(); /* make sure CNTR is updated before next IO */ 122 mb(); /* make sure CNTR is updated before next IO */
122 123
123 /* flush if we were reading */ 124 /* flush if we were reading */
124 if (hdata->dma_dir) { 125 if (wh->dma_dir) {
125 DMA(instance)->FLUSH = 1; 126 regs->FLUSH = 1;
126 mb(); /* don't allow prefetch */ 127 mb(); /* don't allow prefetch */
127 while (!(DMA(instance)->ISTR & ISTR_FE_FLG)) 128 while (!(regs->ISTR & ISTR_FE_FLG))
128 barrier(); 129 barrier();
129 mb(); /* no IO until FLUSH is done */ 130 mb(); /* no IO until FLUSH is done */
130 } 131 }
@@ -133,96 +134,54 @@ static void dma_stop(struct Scsi_Host *instance, struct scsi_cmnd *SCpnt,
133 /* I think that this CINT is only necessary if you are 134 /* I think that this CINT is only necessary if you are
134 * using the terminal count features. HM 7 Mar 1994 135 * using the terminal count features. HM 7 Mar 1994
135 */ 136 */
136 DMA(instance)->CINT = 1; 137 regs->CINT = 1;
137 138
138 /* stop DMA */ 139 /* stop DMA */
139 DMA(instance)->SP_DMA = 1; 140 regs->SP_DMA = 1;
140 mb(); /* make sure DMA is stopped before next IO */ 141 mb(); /* make sure DMA is stopped before next IO */
141 142
142 /* restore the CONTROL bits (minus the direction flag) */ 143 /* restore the CONTROL bits (minus the direction flag) */
143 DMA(instance)->CNTR = CNTR_PDMD | CNTR_INTEN; 144 regs->CNTR = CNTR_PDMD | CNTR_INTEN;
144 mb(); /* make sure CNTR is updated before next IO */ 145 mb(); /* make sure CNTR is updated before next IO */
145 146
146 /* copy from a bounce buffer, if necessary */ 147 /* copy from a bounce buffer, if necessary */
147 if (status && hdata->dma_bounce_buffer) { 148 if (status && wh->dma_bounce_buffer) {
148 if (SCpnt) { 149 if (SCpnt) {
149 if (hdata->dma_dir && SCpnt) 150 if (wh->dma_dir && SCpnt)
150 memcpy(SCpnt->SCp.ptr, 151 memcpy(SCpnt->SCp.ptr, wh->dma_bounce_buffer,
151 hdata->dma_bounce_buffer,
152 SCpnt->SCp.this_residual); 152 SCpnt->SCp.this_residual);
153 kfree(hdata->dma_bounce_buffer); 153 kfree(wh->dma_bounce_buffer);
154 hdata->dma_bounce_buffer = NULL; 154 wh->dma_bounce_buffer = NULL;
155 hdata->dma_bounce_len = 0; 155 wh->dma_bounce_len = 0;
156 } else { 156 } else {
157 kfree(hdata->dma_bounce_buffer); 157 kfree(wh->dma_bounce_buffer);
158 hdata->dma_bounce_buffer = NULL; 158 wh->dma_bounce_buffer = NULL;
159 hdata->dma_bounce_len = 0; 159 wh->dma_bounce_len = 0;
160 } 160 }
161 } 161 }
162} 162}
163 163
164static int __init a3000_detect(struct scsi_host_template *tpnt)
165{
166 wd33c93_regs regs;
167 struct WD33C93_hostdata *hdata;
168
169 if (!MACH_IS_AMIGA || !AMIGAHW_PRESENT(A3000_SCSI))
170 return 0;
171 if (!request_mem_region(0xDD0000, 256, "wd33c93"))
172 return 0;
173
174 tpnt->proc_name = "A3000";
175 tpnt->proc_info = &wd33c93_proc_info;
176
177 a3000_host = scsi_register(tpnt, sizeof(struct WD33C93_hostdata));
178 if (a3000_host == NULL)
179 goto fail_register;
180
181 a3000_host->base = ZTWO_VADDR(0xDD0000);
182 a3000_host->irq = IRQ_AMIGA_PORTS;
183 DMA(a3000_host)->DAWR = DAWR_A3000;
184 regs.SASR = &(DMA(a3000_host)->SASR);
185 regs.SCMD = &(DMA(a3000_host)->SCMD);
186 hdata = shost_priv(a3000_host);
187 hdata->no_sync = 0xff;
188 hdata->fast = 0;
189 hdata->dma_mode = CTRL_DMA;
190 wd33c93_init(a3000_host, regs, dma_setup, dma_stop, WD33C93_FS_12_15);
191 if (request_irq(IRQ_AMIGA_PORTS, a3000_intr, IRQF_SHARED, "A3000 SCSI",
192 a3000_intr))
193 goto fail_irq;
194 DMA(a3000_host)->CNTR = CNTR_PDMD | CNTR_INTEN;
195
196 return 1;
197
198fail_irq:
199 scsi_unregister(a3000_host);
200fail_register:
201 release_mem_region(0xDD0000, 256);
202 return 0;
203}
204
205static int a3000_bus_reset(struct scsi_cmnd *cmd) 164static int a3000_bus_reset(struct scsi_cmnd *cmd)
206{ 165{
166 struct Scsi_Host *instance = cmd->device->host;
167
207 /* FIXME perform bus-specific reset */ 168 /* FIXME perform bus-specific reset */
208 169
209 /* FIXME 2: kill this entire function, which should 170 /* FIXME 2: kill this entire function, which should
210 cause mid-layer to call wd33c93_host_reset anyway? */ 171 cause mid-layer to call wd33c93_host_reset anyway? */
211 172
212 spin_lock_irq(cmd->device->host->host_lock); 173 spin_lock_irq(instance->host_lock);
213 wd33c93_host_reset(cmd); 174 wd33c93_host_reset(cmd);
214 spin_unlock_irq(cmd->device->host->host_lock); 175 spin_unlock_irq(instance->host_lock);
215 176
216 return SUCCESS; 177 return SUCCESS;
217} 178}
218 179
219#define HOSTS_C 180static struct scsi_host_template amiga_a3000_scsi_template = {
220 181 .module = THIS_MODULE,
221static struct scsi_host_template driver_template = {
222 .proc_name = "A3000",
223 .name = "Amiga 3000 built-in SCSI", 182 .name = "Amiga 3000 built-in SCSI",
224 .detect = a3000_detect, 183 .proc_info = wd33c93_proc_info,
225 .release = a3000_release, 184 .proc_name = "A3000",
226 .queuecommand = wd33c93_queuecommand, 185 .queuecommand = wd33c93_queuecommand,
227 .eh_abort_handler = wd33c93_abort, 186 .eh_abort_handler = wd33c93_abort,
228 .eh_bus_reset_handler = a3000_bus_reset, 187 .eh_bus_reset_handler = a3000_bus_reset,
@@ -234,15 +193,104 @@ static struct scsi_host_template driver_template = {
234 .use_clustering = ENABLE_CLUSTERING 193 .use_clustering = ENABLE_CLUSTERING
235}; 194};
236 195
196static int __init amiga_a3000_scsi_probe(struct platform_device *pdev)
197{
198 struct resource *res;
199 struct Scsi_Host *instance;
200 int error;
201 struct a3000_scsiregs *regs;
202 wd33c93_regs wdregs;
203 struct a3000_hostdata *hdata;
204
205 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
206 if (!res)
207 return -ENODEV;
208
209 if (!request_mem_region(res->start, resource_size(res), "wd33c93"))
210 return -EBUSY;
211
212 instance = scsi_host_alloc(&amiga_a3000_scsi_template,
213 sizeof(struct a3000_hostdata));
214 if (!instance) {
215 error = -ENOMEM;
216 goto fail_alloc;
217 }
218
219 instance->irq = IRQ_AMIGA_PORTS;
237 220
238#include "scsi_module.c" 221 regs = (struct a3000_scsiregs *)ZTWO_VADDR(res->start);
222 regs->DAWR = DAWR_A3000;
223
224 wdregs.SASR = &regs->SASR;
225 wdregs.SCMD = &regs->SCMD;
226
227 hdata = shost_priv(instance);
228 hdata->wh.no_sync = 0xff;
229 hdata->wh.fast = 0;
230 hdata->wh.dma_mode = CTRL_DMA;
231 hdata->regs = regs;
232
233 wd33c93_init(instance, wdregs, dma_setup, dma_stop, WD33C93_FS_12_15);
234 error = request_irq(IRQ_AMIGA_PORTS, a3000_intr, IRQF_SHARED,
235 "A3000 SCSI", instance);
236 if (error)
237 goto fail_irq;
238
239 regs->CNTR = CNTR_PDMD | CNTR_INTEN;
240
241 error = scsi_add_host(instance, NULL);
242 if (error)
243 goto fail_host;
244
245 platform_set_drvdata(pdev, instance);
246
247 scsi_scan_host(instance);
248 return 0;
249
250fail_host:
251 free_irq(IRQ_AMIGA_PORTS, instance);
252fail_irq:
253 scsi_host_put(instance);
254fail_alloc:
255 release_mem_region(res->start, resource_size(res));
256 return error;
257}
258
259static int __exit amiga_a3000_scsi_remove(struct platform_device *pdev)
260{
261 struct Scsi_Host *instance = platform_get_drvdata(pdev);
262 struct a3000_hostdata *hdata = shost_priv(instance);
263 struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
264
265 hdata->regs->CNTR = 0;
266 scsi_remove_host(instance);
267 free_irq(IRQ_AMIGA_PORTS, instance);
268 scsi_host_put(instance);
269 release_mem_region(res->start, resource_size(res));
270 return 0;
271}
272
273static struct platform_driver amiga_a3000_scsi_driver = {
274 .remove = __exit_p(amiga_a3000_scsi_remove),
275 .driver = {
276 .name = "amiga-a3000-scsi",
277 .owner = THIS_MODULE,
278 },
279};
280
281static int __init amiga_a3000_scsi_init(void)
282{
283 return platform_driver_probe(&amiga_a3000_scsi_driver,
284 amiga_a3000_scsi_probe);
285}
286module_init(amiga_a3000_scsi_init);
239 287
240static int a3000_release(struct Scsi_Host *instance) 288static void __exit amiga_a3000_scsi_exit(void)
241{ 289{
242 DMA(instance)->CNTR = 0; 290 platform_driver_unregister(&amiga_a3000_scsi_driver);
243 release_mem_region(0xDD0000, 256);
244 free_irq(IRQ_AMIGA_PORTS, a3000_intr);
245 return 1;
246} 291}
292module_exit(amiga_a3000_scsi_exit);
247 293
294MODULE_DESCRIPTION("Amiga 3000 built-in SCSI");
248MODULE_LICENSE("GPL"); 295MODULE_LICENSE("GPL");
296MODULE_ALIAS("platform:amiga-a3000-scsi");
diff --git a/drivers/scsi/a3000.h b/drivers/scsi/a3000.h
index 684813ee378c..49db4a335aab 100644
--- a/drivers/scsi/a3000.h
+++ b/drivers/scsi/a3000.h
@@ -25,7 +25,7 @@
25 */ 25 */
26#define A3000_XFER_MASK (0x00000003) 26#define A3000_XFER_MASK (0x00000003)
27 27
28typedef struct { 28struct a3000_scsiregs {
29 unsigned char pad1[2]; 29 unsigned char pad1[2];
30 volatile unsigned short DAWR; 30 volatile unsigned short DAWR;
31 volatile unsigned int WTC; 31 volatile unsigned int WTC;
@@ -46,7 +46,7 @@ typedef struct {
46 volatile unsigned char SASR; 46 volatile unsigned char SASR;
47 unsigned char pad9; 47 unsigned char pad9;
48 volatile unsigned char SCMD; 48 volatile unsigned char SCMD;
49} a3000_scsiregs; 49};
50 50
51#define DAWR_A3000 (3) 51#define DAWR_A3000 (3)
52 52
diff --git a/drivers/scsi/a4000t.c b/drivers/scsi/a4000t.c
index 11ae6be8aeaf..23c76f41883c 100644
--- a/drivers/scsi/a4000t.c
+++ b/drivers/scsi/a4000t.c
@@ -20,10 +20,6 @@
20 20
21#include "53c700.h" 21#include "53c700.h"
22 22
23MODULE_AUTHOR("Alan Hourihane <alanh@fairlite.demon.co.uk> / Kars de Jong <jongk@linux-m68k.org>");
24MODULE_DESCRIPTION("Amiga A4000T NCR53C710 driver");
25MODULE_LICENSE("GPL");
26
27 23
28static struct scsi_host_template a4000t_scsi_driver_template = { 24static struct scsi_host_template a4000t_scsi_driver_template = {
29 .name = "A4000T builtin SCSI", 25 .name = "A4000T builtin SCSI",
@@ -32,30 +28,35 @@ static struct scsi_host_template a4000t_scsi_driver_template = {
32 .module = THIS_MODULE, 28 .module = THIS_MODULE,
33}; 29};
34 30
35static struct platform_device *a4000t_scsi_device;
36 31
37#define A4000T_SCSI_ADDR 0xdd0040 32#define A4000T_SCSI_OFFSET 0x40
38 33
39static int __devinit a4000t_probe(struct platform_device *dev) 34static int __init amiga_a4000t_scsi_probe(struct platform_device *pdev)
40{ 35{
41 struct Scsi_Host *host; 36 struct resource *res;
37 phys_addr_t scsi_addr;
42 struct NCR_700_Host_Parameters *hostdata; 38 struct NCR_700_Host_Parameters *hostdata;
39 struct Scsi_Host *host;
43 40
44 if (!(MACH_IS_AMIGA && AMIGAHW_PRESENT(A4000_SCSI))) 41 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
45 goto out; 42 if (!res)
43 return -ENODEV;
46 44
47 if (!request_mem_region(A4000T_SCSI_ADDR, 0x1000, 45 if (!request_mem_region(res->start, resource_size(res),
48 "A4000T builtin SCSI")) 46 "A4000T builtin SCSI"))
49 goto out; 47 return -EBUSY;
50 48
51 hostdata = kzalloc(sizeof(struct NCR_700_Host_Parameters), GFP_KERNEL); 49 hostdata = kzalloc(sizeof(struct NCR_700_Host_Parameters),
50 GFP_KERNEL);
52 if (!hostdata) { 51 if (!hostdata) {
53 printk(KERN_ERR "a4000t-scsi: Failed to allocate host data\n"); 52 dev_err(&pdev->dev, "Failed to allocate host data\n");
54 goto out_release; 53 goto out_release;
55 } 54 }
56 55
56 scsi_addr = res->start + A4000T_SCSI_OFFSET;
57
57 /* Fill in the required pieces of hostdata */ 58 /* Fill in the required pieces of hostdata */
58 hostdata->base = (void __iomem *)ZTWO_VADDR(A4000T_SCSI_ADDR); 59 hostdata->base = (void __iomem *)ZTWO_VADDR(scsi_addr);
59 hostdata->clock = 50; 60 hostdata->clock = 50;
60 hostdata->chip710 = 1; 61 hostdata->chip710 = 1;
61 hostdata->dmode_extra = DMODE_FC2; 62 hostdata->dmode_extra = DMODE_FC2;
@@ -63,26 +64,25 @@ static int __devinit a4000t_probe(struct platform_device *dev)
63 64
64 /* and register the chip */ 65 /* and register the chip */
65 host = NCR_700_detect(&a4000t_scsi_driver_template, hostdata, 66 host = NCR_700_detect(&a4000t_scsi_driver_template, hostdata,
66 &dev->dev); 67 &pdev->dev);
67 if (!host) { 68 if (!host) {
68 printk(KERN_ERR "a4000t-scsi: No host detected; " 69 dev_err(&pdev->dev,
69 "board configuration problem?\n"); 70 "No host detected; board configuration problem?\n");
70 goto out_free; 71 goto out_free;
71 } 72 }
72 73
73 host->this_id = 7; 74 host->this_id = 7;
74 host->base = A4000T_SCSI_ADDR; 75 host->base = scsi_addr;
75 host->irq = IRQ_AMIGA_PORTS; 76 host->irq = IRQ_AMIGA_PORTS;
76 77
77 if (request_irq(host->irq, NCR_700_intr, IRQF_SHARED, "a4000t-scsi", 78 if (request_irq(host->irq, NCR_700_intr, IRQF_SHARED, "a4000t-scsi",
78 host)) { 79 host)) {
79 printk(KERN_ERR "a4000t-scsi: request_irq failed\n"); 80 dev_err(&pdev->dev, "request_irq failed\n");
80 goto out_put_host; 81 goto out_put_host;
81 } 82 }
82 83
83 platform_set_drvdata(dev, host); 84 platform_set_drvdata(pdev, host);
84 scsi_scan_host(host); 85 scsi_scan_host(host);
85
86 return 0; 86 return 0;
87 87
88 out_put_host: 88 out_put_host:
@@ -90,58 +90,49 @@ static int __devinit a4000t_probe(struct platform_device *dev)
90 out_free: 90 out_free:
91 kfree(hostdata); 91 kfree(hostdata);
92 out_release: 92 out_release:
93 release_mem_region(A4000T_SCSI_ADDR, 0x1000); 93 release_mem_region(res->start, resource_size(res));
94 out:
95 return -ENODEV; 94 return -ENODEV;
96} 95}
97 96
98static __devexit int a4000t_device_remove(struct platform_device *dev) 97static int __exit amiga_a4000t_scsi_remove(struct platform_device *pdev)
99{ 98{
100 struct Scsi_Host *host = platform_get_drvdata(dev); 99 struct Scsi_Host *host = platform_get_drvdata(pdev);
101 struct NCR_700_Host_Parameters *hostdata = shost_priv(host); 100 struct NCR_700_Host_Parameters *hostdata = shost_priv(host);
101 struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
102 102
103 scsi_remove_host(host); 103 scsi_remove_host(host);
104
105 NCR_700_release(host); 104 NCR_700_release(host);
106 kfree(hostdata); 105 kfree(hostdata);
107 free_irq(host->irq, host); 106 free_irq(host->irq, host);
108 release_mem_region(A4000T_SCSI_ADDR, 0x1000); 107 release_mem_region(res->start, resource_size(res));
109
110 return 0; 108 return 0;
111} 109}
112 110
113static struct platform_driver a4000t_scsi_driver = { 111static struct platform_driver amiga_a4000t_scsi_driver = {
114 .driver = { 112 .remove = __exit_p(amiga_a4000t_scsi_remove),
115 .name = "a4000t-scsi", 113 .driver = {
116 .owner = THIS_MODULE, 114 .name = "amiga-a4000t-scsi",
115 .owner = THIS_MODULE,
117 }, 116 },
118 .probe = a4000t_probe,
119 .remove = __devexit_p(a4000t_device_remove),
120}; 117};
121 118
122static int __init a4000t_scsi_init(void) 119static int __init amiga_a4000t_scsi_init(void)
123{ 120{
124 int err; 121 return platform_driver_probe(&amiga_a4000t_scsi_driver,
125 122 amiga_a4000t_scsi_probe);
126 err = platform_driver_register(&a4000t_scsi_driver);
127 if (err)
128 return err;
129
130 a4000t_scsi_device = platform_device_register_simple("a4000t-scsi",
131 -1, NULL, 0);
132 if (IS_ERR(a4000t_scsi_device)) {
133 platform_driver_unregister(&a4000t_scsi_driver);
134 return PTR_ERR(a4000t_scsi_device);
135 }
136
137 return err;
138} 123}
139 124
140static void __exit a4000t_scsi_exit(void) 125module_init(amiga_a4000t_scsi_init);
126
127static void __exit amiga_a4000t_scsi_exit(void)
141{ 128{
142 platform_device_unregister(a4000t_scsi_device); 129 platform_driver_unregister(&amiga_a4000t_scsi_driver);
143 platform_driver_unregister(&a4000t_scsi_driver);
144} 130}
145 131
146module_init(a4000t_scsi_init); 132module_exit(amiga_a4000t_scsi_exit);
147module_exit(a4000t_scsi_exit); 133
134MODULE_AUTHOR("Alan Hourihane <alanh@fairlite.demon.co.uk> / "
135 "Kars de Jong <jongk@linux-m68k.org>");
136MODULE_DESCRIPTION("Amiga A4000T NCR53C710 driver");
137MODULE_LICENSE("GPL");
138MODULE_ALIAS("platform:amiga-a4000t-scsi");
diff --git a/drivers/scsi/aacraid/commctrl.c b/drivers/scsi/aacraid/commctrl.c
index 9c0c91178538..1a5bf5724750 100644
--- a/drivers/scsi/aacraid/commctrl.c
+++ b/drivers/scsi/aacraid/commctrl.c
@@ -655,9 +655,9 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
655 /* Does this really need to be GFP_DMA? */ 655 /* Does this really need to be GFP_DMA? */
656 p = kmalloc(usg->sg[i].count,GFP_KERNEL|__GFP_DMA); 656 p = kmalloc(usg->sg[i].count,GFP_KERNEL|__GFP_DMA);
657 if(!p) { 657 if(!p) {
658 kfree (usg); 658 dprintk((KERN_DEBUG "aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
659 dprintk((KERN_DEBUG"aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
660 usg->sg[i].count,i,usg->count)); 659 usg->sg[i].count,i,usg->count));
660 kfree(usg);
661 rcode = -ENOMEM; 661 rcode = -ENOMEM;
662 goto cleanup; 662 goto cleanup;
663 } 663 }
diff --git a/drivers/scsi/arcmsr/arcmsr.h b/drivers/scsi/arcmsr/arcmsr.h
index ab646e580d64..ce5371b3cdd5 100644
--- a/drivers/scsi/arcmsr/arcmsr.h
+++ b/drivers/scsi/arcmsr/arcmsr.h
@@ -48,7 +48,7 @@ struct device_attribute;
48/*The limit of outstanding scsi command that firmware can handle*/ 48/*The limit of outstanding scsi command that firmware can handle*/
49#define ARCMSR_MAX_OUTSTANDING_CMD 256 49#define ARCMSR_MAX_OUTSTANDING_CMD 256
50#define ARCMSR_MAX_FREECCB_NUM 320 50#define ARCMSR_MAX_FREECCB_NUM 320
51#define ARCMSR_DRIVER_VERSION "Driver Version 1.20.00.15 2008/02/27" 51#define ARCMSR_DRIVER_VERSION "Driver Version 1.20.00.15 2008/11/03"
52#define ARCMSR_SCSI_INITIATOR_ID 255 52#define ARCMSR_SCSI_INITIATOR_ID 255
53#define ARCMSR_MAX_XFER_SECTORS 512 53#define ARCMSR_MAX_XFER_SECTORS 512
54#define ARCMSR_MAX_XFER_SECTORS_B 4096 54#define ARCMSR_MAX_XFER_SECTORS_B 4096
@@ -110,6 +110,8 @@ struct CMD_MESSAGE_FIELD
110#define FUNCTION_SAY_HELLO 0x0807 110#define FUNCTION_SAY_HELLO 0x0807
111#define FUNCTION_SAY_GOODBYE 0x0808 111#define FUNCTION_SAY_GOODBYE 0x0808
112#define FUNCTION_FLUSH_ADAPTER_CACHE 0x0809 112#define FUNCTION_FLUSH_ADAPTER_CACHE 0x0809
113#define FUNCTION_GET_FIRMWARE_STATUS 0x080A
114#define FUNCTION_HARDWARE_RESET 0x080B
113/* ARECA IO CONTROL CODE*/ 115/* ARECA IO CONTROL CODE*/
114#define ARCMSR_MESSAGE_READ_RQBUFFER \ 116#define ARCMSR_MESSAGE_READ_RQBUFFER \
115 ARECA_SATA_RAID | FUNCTION_READ_RQBUFFER 117 ARECA_SATA_RAID | FUNCTION_READ_RQBUFFER
@@ -133,6 +135,7 @@ struct CMD_MESSAGE_FIELD
133#define ARCMSR_MESSAGE_RETURNCODE_OK 0x00000001 135#define ARCMSR_MESSAGE_RETURNCODE_OK 0x00000001
134#define ARCMSR_MESSAGE_RETURNCODE_ERROR 0x00000006 136#define ARCMSR_MESSAGE_RETURNCODE_ERROR 0x00000006
135#define ARCMSR_MESSAGE_RETURNCODE_3F 0x0000003F 137#define ARCMSR_MESSAGE_RETURNCODE_3F 0x0000003F
138#define ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON 0x00000088
136/* 139/*
137************************************************************* 140*************************************************************
138** structure for holding DMA address data 141** structure for holding DMA address data
@@ -341,13 +344,13 @@ struct MessageUnit_B
341 uint32_t done_qbuffer[ARCMSR_MAX_HBB_POSTQUEUE]; 344 uint32_t done_qbuffer[ARCMSR_MAX_HBB_POSTQUEUE];
342 uint32_t postq_index; 345 uint32_t postq_index;
343 uint32_t doneq_index; 346 uint32_t doneq_index;
344 void __iomem *drv2iop_doorbell_reg; 347 uint32_t __iomem *drv2iop_doorbell_reg;
345 void __iomem *drv2iop_doorbell_mask_reg; 348 uint32_t __iomem *drv2iop_doorbell_mask_reg;
346 void __iomem *iop2drv_doorbell_reg; 349 uint32_t __iomem *iop2drv_doorbell_reg;
347 void __iomem *iop2drv_doorbell_mask_reg; 350 uint32_t __iomem *iop2drv_doorbell_mask_reg;
348 void __iomem *msgcode_rwbuffer_reg; 351 uint32_t __iomem *msgcode_rwbuffer_reg;
349 void __iomem *ioctl_wbuffer_reg; 352 uint32_t __iomem *ioctl_wbuffer_reg;
350 void __iomem *ioctl_rbuffer_reg; 353 uint32_t __iomem *ioctl_rbuffer_reg;
351}; 354};
352 355
353/* 356/*
@@ -375,6 +378,7 @@ struct AdapterControlBlock
375 /* message unit ATU inbound base address0 */ 378 /* message unit ATU inbound base address0 */
376 379
377 uint32_t acb_flags; 380 uint32_t acb_flags;
381 uint8_t adapter_index;
378 #define ACB_F_SCSISTOPADAPTER 0x0001 382 #define ACB_F_SCSISTOPADAPTER 0x0001
379 #define ACB_F_MSG_STOP_BGRB 0x0002 383 #define ACB_F_MSG_STOP_BGRB 0x0002
380 /* stop RAID background rebuild */ 384 /* stop RAID background rebuild */
@@ -390,7 +394,7 @@ struct AdapterControlBlock
390 #define ACB_F_BUS_RESET 0x0080 394 #define ACB_F_BUS_RESET 0x0080
391 #define ACB_F_IOP_INITED 0x0100 395 #define ACB_F_IOP_INITED 0x0100
392 /* iop init */ 396 /* iop init */
393 397 #define ACB_F_FIRMWARE_TRAP 0x0400
394 struct CommandControlBlock * pccb_pool[ARCMSR_MAX_FREECCB_NUM]; 398 struct CommandControlBlock * pccb_pool[ARCMSR_MAX_FREECCB_NUM];
395 /* used for memory free */ 399 /* used for memory free */
396 struct list_head ccb_free_list; 400 struct list_head ccb_free_list;
@@ -423,12 +427,19 @@ struct AdapterControlBlock
423#define ARECA_RAID_GOOD 0xaa 427#define ARECA_RAID_GOOD 0xaa
424 uint32_t num_resets; 428 uint32_t num_resets;
425 uint32_t num_aborts; 429 uint32_t num_aborts;
430 uint32_t signature;
426 uint32_t firm_request_len; 431 uint32_t firm_request_len;
427 uint32_t firm_numbers_queue; 432 uint32_t firm_numbers_queue;
428 uint32_t firm_sdram_size; 433 uint32_t firm_sdram_size;
429 uint32_t firm_hd_channels; 434 uint32_t firm_hd_channels;
430 char firm_model[12]; 435 char firm_model[12];
431 char firm_version[20]; 436 char firm_version[20];
437 char device_map[20]; /*21,84-99*/
438 struct work_struct arcmsr_do_message_isr_bh;
439 struct timer_list eternal_timer;
440 unsigned short fw_state;
441 atomic_t rq_map_token;
442 int ante_token_value;
432};/* HW_DEVICE_EXTENSION */ 443};/* HW_DEVICE_EXTENSION */
433/* 444/*
434******************************************************************************* 445*******************************************************************************
diff --git a/drivers/scsi/arcmsr/arcmsr_attr.c b/drivers/scsi/arcmsr/arcmsr_attr.c
index a4e04c50c436..07fdfe57e38e 100644
--- a/drivers/scsi/arcmsr/arcmsr_attr.c
+++ b/drivers/scsi/arcmsr/arcmsr_attr.c
@@ -192,6 +192,7 @@ static struct bin_attribute arcmsr_sysfs_message_read_attr = {
192 .attr = { 192 .attr = {
193 .name = "mu_read", 193 .name = "mu_read",
194 .mode = S_IRUSR , 194 .mode = S_IRUSR ,
195 .owner = THIS_MODULE,
195 }, 196 },
196 .size = 1032, 197 .size = 1032,
197 .read = arcmsr_sysfs_iop_message_read, 198 .read = arcmsr_sysfs_iop_message_read,
@@ -201,6 +202,7 @@ static struct bin_attribute arcmsr_sysfs_message_write_attr = {
201 .attr = { 202 .attr = {
202 .name = "mu_write", 203 .name = "mu_write",
203 .mode = S_IWUSR, 204 .mode = S_IWUSR,
205 .owner = THIS_MODULE,
204 }, 206 },
205 .size = 1032, 207 .size = 1032,
206 .write = arcmsr_sysfs_iop_message_write, 208 .write = arcmsr_sysfs_iop_message_write,
@@ -210,6 +212,7 @@ static struct bin_attribute arcmsr_sysfs_message_clear_attr = {
210 .attr = { 212 .attr = {
211 .name = "mu_clear", 213 .name = "mu_clear",
212 .mode = S_IWUSR, 214 .mode = S_IWUSR,
215 .owner = THIS_MODULE,
213 }, 216 },
214 .size = 1, 217 .size = 1,
215 .write = arcmsr_sysfs_iop_message_clear, 218 .write = arcmsr_sysfs_iop_message_clear,
diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c
index ffbe2192da3c..ffa54792bb33 100644
--- a/drivers/scsi/arcmsr/arcmsr_hba.c
+++ b/drivers/scsi/arcmsr/arcmsr_hba.c
@@ -72,8 +72,16 @@
72#include <scsi/scsicam.h> 72#include <scsi/scsicam.h>
73#include "arcmsr.h" 73#include "arcmsr.h"
74 74
75#ifdef CONFIG_SCSI_ARCMSR_RESET
76 static int sleeptime = 20;
77 static int retrycount = 12;
78 module_param(sleeptime, int, S_IRUGO|S_IWUSR);
79 MODULE_PARM_DESC(sleeptime, "The waiting period for FW ready while bus reset");
80 module_param(retrycount, int, S_IRUGO|S_IWUSR);
81 MODULE_PARM_DESC(retrycount, "The retry count for FW ready while bus reset");
82#endif
75MODULE_AUTHOR("Erich Chen <support@areca.com.tw>"); 83MODULE_AUTHOR("Erich Chen <support@areca.com.tw>");
76MODULE_DESCRIPTION("ARECA (ARC11xx/12xx/13xx/16xx) SATA/SAS RAID HOST Adapter"); 84MODULE_DESCRIPTION("ARECA (ARC11xx/12xx/13xx/16xx) SATA/SAS RAID Host Bus Adapter");
77MODULE_LICENSE("Dual BSD/GPL"); 85MODULE_LICENSE("Dual BSD/GPL");
78MODULE_VERSION(ARCMSR_DRIVER_VERSION); 86MODULE_VERSION(ARCMSR_DRIVER_VERSION);
79 87
@@ -96,6 +104,13 @@ static u32 arcmsr_disable_outbound_ints(struct AdapterControlBlock *acb);
96static void arcmsr_stop_adapter_bgrb(struct AdapterControlBlock *acb); 104static void arcmsr_stop_adapter_bgrb(struct AdapterControlBlock *acb);
97static void arcmsr_flush_hba_cache(struct AdapterControlBlock *acb); 105static void arcmsr_flush_hba_cache(struct AdapterControlBlock *acb);
98static void arcmsr_flush_hbb_cache(struct AdapterControlBlock *acb); 106static void arcmsr_flush_hbb_cache(struct AdapterControlBlock *acb);
107static void arcmsr_request_device_map(unsigned long pacb);
108static void arcmsr_request_hba_device_map(struct AdapterControlBlock *acb);
109static void arcmsr_request_hbb_device_map(struct AdapterControlBlock *acb);
110static void arcmsr_message_isr_bh_fn(struct work_struct *work);
111static void *arcmsr_get_firmware_spec(struct AdapterControlBlock *acb, int mode);
112static void arcmsr_start_adapter_bgrb(struct AdapterControlBlock *acb);
113
99static const char *arcmsr_info(struct Scsi_Host *); 114static const char *arcmsr_info(struct Scsi_Host *);
100static irqreturn_t arcmsr_interrupt(struct AdapterControlBlock *acb); 115static irqreturn_t arcmsr_interrupt(struct AdapterControlBlock *acb);
101static int arcmsr_adjust_disk_queue_depth(struct scsi_device *sdev, 116static int arcmsr_adjust_disk_queue_depth(struct scsi_device *sdev,
@@ -112,7 +127,7 @@ static int arcmsr_adjust_disk_queue_depth(struct scsi_device *sdev,
112 127
113static struct scsi_host_template arcmsr_scsi_host_template = { 128static struct scsi_host_template arcmsr_scsi_host_template = {
114 .module = THIS_MODULE, 129 .module = THIS_MODULE,
115 .name = "ARCMSR ARECA SATA/SAS RAID HOST Adapter" 130 .name = "ARCMSR ARECA SATA/SAS RAID Host Bus Adapter"
116 ARCMSR_DRIVER_VERSION, 131 ARCMSR_DRIVER_VERSION,
117 .info = arcmsr_info, 132 .info = arcmsr_info,
118 .queuecommand = arcmsr_queue_command, 133 .queuecommand = arcmsr_queue_command,
@@ -128,16 +143,6 @@ static struct scsi_host_template arcmsr_scsi_host_template = {
128 .use_clustering = ENABLE_CLUSTERING, 143 .use_clustering = ENABLE_CLUSTERING,
129 .shost_attrs = arcmsr_host_attrs, 144 .shost_attrs = arcmsr_host_attrs,
130}; 145};
131#ifdef CONFIG_SCSI_ARCMSR_AER
132static pci_ers_result_t arcmsr_pci_slot_reset(struct pci_dev *pdev);
133static pci_ers_result_t arcmsr_pci_error_detected(struct pci_dev *pdev,
134 pci_channel_state_t state);
135
136static struct pci_error_handlers arcmsr_pci_error_handlers = {
137 .error_detected = arcmsr_pci_error_detected,
138 .slot_reset = arcmsr_pci_slot_reset,
139};
140#endif
141static struct pci_device_id arcmsr_device_id_table[] = { 146static struct pci_device_id arcmsr_device_id_table[] = {
142 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1110)}, 147 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1110)},
143 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1120)}, 148 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1120)},
@@ -166,9 +171,6 @@ static struct pci_driver arcmsr_pci_driver = {
166 .probe = arcmsr_probe, 171 .probe = arcmsr_probe,
167 .remove = arcmsr_remove, 172 .remove = arcmsr_remove,
168 .shutdown = arcmsr_shutdown, 173 .shutdown = arcmsr_shutdown,
169 #ifdef CONFIG_SCSI_ARCMSR_AER
170 .err_handler = &arcmsr_pci_error_handlers,
171 #endif
172}; 174};
173 175
174static irqreturn_t arcmsr_do_interrupt(int irq, void *dev_id) 176static irqreturn_t arcmsr_do_interrupt(int irq, void *dev_id)
@@ -236,10 +238,9 @@ static int arcmsr_alloc_ccb_pool(struct AdapterControlBlock *acb)
236 void *dma_coherent; 238 void *dma_coherent;
237 dma_addr_t dma_coherent_handle, dma_addr; 239 dma_addr_t dma_coherent_handle, dma_addr;
238 struct CommandControlBlock *ccb_tmp; 240 struct CommandControlBlock *ccb_tmp;
239 uint32_t intmask_org;
240 int i, j; 241 int i, j;
241 242
242 acb->pmuA = pci_ioremap_bar(pdev, 0); 243 acb->pmuA = ioremap(pci_resource_start(pdev, 0), pci_resource_len(pdev, 0));
243 if (!acb->pmuA) { 244 if (!acb->pmuA) {
244 printk(KERN_NOTICE "arcmsr%d: memory mapping region fail \n", 245 printk(KERN_NOTICE "arcmsr%d: memory mapping region fail \n",
245 acb->host->host_no); 246 acb->host->host_no);
@@ -281,12 +282,6 @@ static int arcmsr_alloc_ccb_pool(struct AdapterControlBlock *acb)
281 for (i = 0; i < ARCMSR_MAX_TARGETID; i++) 282 for (i = 0; i < ARCMSR_MAX_TARGETID; i++)
282 for (j = 0; j < ARCMSR_MAX_TARGETLUN; j++) 283 for (j = 0; j < ARCMSR_MAX_TARGETLUN; j++)
283 acb->devstate[i][j] = ARECA_RAID_GONE; 284 acb->devstate[i][j] = ARECA_RAID_GONE;
284
285 /*
286 ** here we need to tell iop 331 our ccb_tmp.HighPart
287 ** if ccb_tmp.HighPart is not zero
288 */
289 intmask_org = arcmsr_disable_outbound_ints(acb);
290 } 285 }
291 break; 286 break;
292 287
@@ -297,7 +292,6 @@ static int arcmsr_alloc_ccb_pool(struct AdapterControlBlock *acb)
297 void __iomem *mem_base0, *mem_base1; 292 void __iomem *mem_base0, *mem_base1;
298 void *dma_coherent; 293 void *dma_coherent;
299 dma_addr_t dma_coherent_handle, dma_addr; 294 dma_addr_t dma_coherent_handle, dma_addr;
300 uint32_t intmask_org;
301 struct CommandControlBlock *ccb_tmp; 295 struct CommandControlBlock *ccb_tmp;
302 int i, j; 296 int i, j;
303 297
@@ -333,11 +327,13 @@ static int arcmsr_alloc_ccb_pool(struct AdapterControlBlock *acb)
333 reg = (struct MessageUnit_B *)(dma_coherent + 327 reg = (struct MessageUnit_B *)(dma_coherent +
334 ARCMSR_MAX_FREECCB_NUM * sizeof(struct CommandControlBlock)); 328 ARCMSR_MAX_FREECCB_NUM * sizeof(struct CommandControlBlock));
335 acb->pmuB = reg; 329 acb->pmuB = reg;
336 mem_base0 = pci_ioremap_bar(pdev, 0); 330 mem_base0 = ioremap(pci_resource_start(pdev, 0),
331 pci_resource_len(pdev, 0));
337 if (!mem_base0) 332 if (!mem_base0)
338 goto out; 333 goto out;
339 334
340 mem_base1 = pci_ioremap_bar(pdev, 2); 335 mem_base1 = ioremap(pci_resource_start(pdev, 2),
336 pci_resource_len(pdev, 2));
341 if (!mem_base1) { 337 if (!mem_base1) {
342 iounmap(mem_base0); 338 iounmap(mem_base0);
343 goto out; 339 goto out;
@@ -357,12 +353,6 @@ static int arcmsr_alloc_ccb_pool(struct AdapterControlBlock *acb)
357 for (i = 0; i < ARCMSR_MAX_TARGETID; i++) 353 for (i = 0; i < ARCMSR_MAX_TARGETID; i++)
358 for (j = 0; j < ARCMSR_MAX_TARGETLUN; j++) 354 for (j = 0; j < ARCMSR_MAX_TARGETLUN; j++)
359 acb->devstate[i][j] = ARECA_RAID_GOOD; 355 acb->devstate[i][j] = ARECA_RAID_GOOD;
360
361 /*
362 ** here we need to tell iop 331 our ccb_tmp.HighPart
363 ** if ccb_tmp.HighPart is not zero
364 */
365 intmask_org = arcmsr_disable_outbound_ints(acb);
366 } 356 }
367 break; 357 break;
368 } 358 }
@@ -374,6 +364,88 @@ out:
374 sizeof(struct MessageUnit_B)), acb->dma_coherent, acb->dma_coherent_handle); 364 sizeof(struct MessageUnit_B)), acb->dma_coherent, acb->dma_coherent_handle);
375 return -ENOMEM; 365 return -ENOMEM;
376} 366}
367static void arcmsr_message_isr_bh_fn(struct work_struct *work)
368{
369 struct AdapterControlBlock *acb = container_of(work, struct AdapterControlBlock, arcmsr_do_message_isr_bh);
370
371 switch (acb->adapter_type) {
372 case ACB_ADAPTER_TYPE_A: {
373
374 struct MessageUnit_A __iomem *reg = acb->pmuA;
375 char *acb_dev_map = (char *)acb->device_map;
376 uint32_t __iomem *signature = (uint32_t __iomem *) (&reg->message_rwbuffer[0]);
377 char __iomem *devicemap = (char __iomem *) (&reg->message_rwbuffer[21]);
378 int target, lun;
379 struct scsi_device *psdev;
380 char diff;
381
382 atomic_inc(&acb->rq_map_token);
383 if (readl(signature) == ARCMSR_SIGNATURE_GET_CONFIG) {
384 for (target = 0; target < ARCMSR_MAX_TARGETID - 1; target++) {
385 diff = (*acb_dev_map)^readb(devicemap);
386 if (diff != 0) {
387 char temp;
388 *acb_dev_map = readb(devicemap);
389 temp = *acb_dev_map;
390 for (lun = 0; lun < ARCMSR_MAX_TARGETLUN; lun++) {
391 if ((temp & 0x01) == 1 && (diff & 0x01) == 1) {
392 scsi_add_device(acb->host, 0, target, lun);
393 } else if ((temp & 0x01) == 0 && (diff & 0x01) == 1) {
394 psdev = scsi_device_lookup(acb->host, 0, target, lun);
395 if (psdev != NULL) {
396 scsi_remove_device(psdev);
397 scsi_device_put(psdev);
398 }
399 }
400 temp >>= 1;
401 diff >>= 1;
402 }
403 }
404 devicemap++;
405 acb_dev_map++;
406 }
407 }
408 break;
409 }
410
411 case ACB_ADAPTER_TYPE_B: {
412 struct MessageUnit_B *reg = acb->pmuB;
413 char *acb_dev_map = (char *)acb->device_map;
414 uint32_t __iomem *signature = (uint32_t __iomem *)(&reg->msgcode_rwbuffer_reg[0]);
415 char __iomem *devicemap = (char __iomem *)(&reg->msgcode_rwbuffer_reg[21]);
416 int target, lun;
417 struct scsi_device *psdev;
418 char diff;
419
420 atomic_inc(&acb->rq_map_token);
421 if (readl(signature) == ARCMSR_SIGNATURE_GET_CONFIG) {
422 for (target = 0; target < ARCMSR_MAX_TARGETID - 1; target++) {
423 diff = (*acb_dev_map)^readb(devicemap);
424 if (diff != 0) {
425 char temp;
426 *acb_dev_map = readb(devicemap);
427 temp = *acb_dev_map;
428 for (lun = 0; lun < ARCMSR_MAX_TARGETLUN; lun++) {
429 if ((temp & 0x01) == 1 && (diff & 0x01) == 1) {
430 scsi_add_device(acb->host, 0, target, lun);
431 } else if ((temp & 0x01) == 0 && (diff & 0x01) == 1) {
432 psdev = scsi_device_lookup(acb->host, 0, target, lun);
433 if (psdev != NULL) {
434 scsi_remove_device(psdev);
435 scsi_device_put(psdev);
436 }
437 }
438 temp >>= 1;
439 diff >>= 1;
440 }
441 }
442 devicemap++;
443 acb_dev_map++;
444 }
445 }
446 }
447 }
448}
377 449
378static int arcmsr_probe(struct pci_dev *pdev, 450static int arcmsr_probe(struct pci_dev *pdev,
379 const struct pci_device_id *id) 451 const struct pci_device_id *id)
@@ -432,17 +504,17 @@ static int arcmsr_probe(struct pci_dev *pdev,
432 ACB_F_MESSAGE_WQBUFFER_READED); 504 ACB_F_MESSAGE_WQBUFFER_READED);
433 acb->acb_flags &= ~ACB_F_SCSISTOPADAPTER; 505 acb->acb_flags &= ~ACB_F_SCSISTOPADAPTER;
434 INIT_LIST_HEAD(&acb->ccb_free_list); 506 INIT_LIST_HEAD(&acb->ccb_free_list);
435 507 INIT_WORK(&acb->arcmsr_do_message_isr_bh, arcmsr_message_isr_bh_fn);
436 error = arcmsr_alloc_ccb_pool(acb); 508 error = arcmsr_alloc_ccb_pool(acb);
437 if (error) 509 if (error)
438 goto out_release_regions; 510 goto out_release_regions;
439 511
512 arcmsr_iop_init(acb);
440 error = request_irq(pdev->irq, arcmsr_do_interrupt, 513 error = request_irq(pdev->irq, arcmsr_do_interrupt,
441 IRQF_SHARED, "arcmsr", acb); 514 IRQF_SHARED, "arcmsr", acb);
442 if (error) 515 if (error)
443 goto out_free_ccb_pool; 516 goto out_free_ccb_pool;
444 517
445 arcmsr_iop_init(acb);
446 pci_set_drvdata(pdev, host); 518 pci_set_drvdata(pdev, host);
447 if (strncmp(acb->firm_version, "V1.42", 5) >= 0) 519 if (strncmp(acb->firm_version, "V1.42", 5) >= 0)
448 host->max_sectors= ARCMSR_MAX_XFER_SECTORS_B; 520 host->max_sectors= ARCMSR_MAX_XFER_SECTORS_B;
@@ -459,6 +531,14 @@ static int arcmsr_probe(struct pci_dev *pdev,
459 #ifdef CONFIG_SCSI_ARCMSR_AER 531 #ifdef CONFIG_SCSI_ARCMSR_AER
460 pci_enable_pcie_error_reporting(pdev); 532 pci_enable_pcie_error_reporting(pdev);
461 #endif 533 #endif
534 atomic_set(&acb->rq_map_token, 16);
535 acb->fw_state = true;
536 init_timer(&acb->eternal_timer);
537 acb->eternal_timer.expires = jiffies + msecs_to_jiffies(10*HZ);
538 acb->eternal_timer.data = (unsigned long) acb;
539 acb->eternal_timer.function = &arcmsr_request_device_map;
540 add_timer(&acb->eternal_timer);
541
462 return 0; 542 return 0;
463 out_free_sysfs: 543 out_free_sysfs:
464 out_free_irq: 544 out_free_irq:
@@ -518,40 +598,48 @@ static uint8_t arcmsr_hbb_wait_msgint_ready(struct AdapterControlBlock *acb)
518 return 0xff; 598 return 0xff;
519} 599}
520 600
521static void arcmsr_abort_hba_allcmd(struct AdapterControlBlock *acb) 601static uint8_t arcmsr_abort_hba_allcmd(struct AdapterControlBlock *acb)
522{ 602{
523 struct MessageUnit_A __iomem *reg = acb->pmuA; 603 struct MessageUnit_A __iomem *reg = acb->pmuA;
524 604
525 writel(ARCMSR_INBOUND_MESG0_ABORT_CMD, &reg->inbound_msgaddr0); 605 writel(ARCMSR_INBOUND_MESG0_ABORT_CMD, &reg->inbound_msgaddr0);
526 if (arcmsr_hba_wait_msgint_ready(acb)) 606 if (arcmsr_hba_wait_msgint_ready(acb)) {
527 printk(KERN_NOTICE 607 printk(KERN_NOTICE
528 "arcmsr%d: wait 'abort all outstanding command' timeout \n" 608 "arcmsr%d: wait 'abort all outstanding command' timeout \n"
529 , acb->host->host_no); 609 , acb->host->host_no);
610 return 0xff;
611 }
612 return 0x00;
530} 613}
531 614
532static void arcmsr_abort_hbb_allcmd(struct AdapterControlBlock *acb) 615static uint8_t arcmsr_abort_hbb_allcmd(struct AdapterControlBlock *acb)
533{ 616{
534 struct MessageUnit_B *reg = acb->pmuB; 617 struct MessageUnit_B *reg = acb->pmuB;
535 618
536 writel(ARCMSR_MESSAGE_ABORT_CMD, reg->drv2iop_doorbell_reg); 619 writel(ARCMSR_MESSAGE_ABORT_CMD, reg->drv2iop_doorbell_reg);
537 if (arcmsr_hbb_wait_msgint_ready(acb)) 620 if (arcmsr_hbb_wait_msgint_ready(acb)) {
538 printk(KERN_NOTICE 621 printk(KERN_NOTICE
539 "arcmsr%d: wait 'abort all outstanding command' timeout \n" 622 "arcmsr%d: wait 'abort all outstanding command' timeout \n"
540 , acb->host->host_no); 623 , acb->host->host_no);
624 return 0xff;
625 }
626 return 0x00;
541} 627}
542 628
543static void arcmsr_abort_allcmd(struct AdapterControlBlock *acb) 629static uint8_t arcmsr_abort_allcmd(struct AdapterControlBlock *acb)
544{ 630{
631 uint8_t rtnval = 0;
545 switch (acb->adapter_type) { 632 switch (acb->adapter_type) {
546 case ACB_ADAPTER_TYPE_A: { 633 case ACB_ADAPTER_TYPE_A: {
547 arcmsr_abort_hba_allcmd(acb); 634 rtnval = arcmsr_abort_hba_allcmd(acb);
548 } 635 }
549 break; 636 break;
550 637
551 case ACB_ADAPTER_TYPE_B: { 638 case ACB_ADAPTER_TYPE_B: {
552 arcmsr_abort_hbb_allcmd(acb); 639 rtnval = arcmsr_abort_hbb_allcmd(acb);
553 } 640 }
554 } 641 }
642 return rtnval;
555} 643}
556 644
557static void arcmsr_pci_unmap_dma(struct CommandControlBlock *ccb) 645static void arcmsr_pci_unmap_dma(struct CommandControlBlock *ccb)
@@ -649,8 +737,7 @@ static u32 arcmsr_disable_outbound_ints(struct AdapterControlBlock *acb)
649 737
650 case ACB_ADAPTER_TYPE_A : { 738 case ACB_ADAPTER_TYPE_A : {
651 struct MessageUnit_A __iomem *reg = acb->pmuA; 739 struct MessageUnit_A __iomem *reg = acb->pmuA;
652 orig_mask = readl(&reg->outbound_intmask)|\ 740 orig_mask = readl(&reg->outbound_intmask);
653 ARCMSR_MU_OUTBOUND_MESSAGE0_INTMASKENABLE;
654 writel(orig_mask|ARCMSR_MU_OUTBOUND_ALL_INTMASKENABLE, \ 741 writel(orig_mask|ARCMSR_MU_OUTBOUND_ALL_INTMASKENABLE, \
655 &reg->outbound_intmask); 742 &reg->outbound_intmask);
656 } 743 }
@@ -658,8 +745,7 @@ static u32 arcmsr_disable_outbound_ints(struct AdapterControlBlock *acb)
658 745
659 case ACB_ADAPTER_TYPE_B : { 746 case ACB_ADAPTER_TYPE_B : {
660 struct MessageUnit_B *reg = acb->pmuB; 747 struct MessageUnit_B *reg = acb->pmuB;
661 orig_mask = readl(reg->iop2drv_doorbell_mask_reg) & \ 748 orig_mask = readl(reg->iop2drv_doorbell_mask_reg);
662 (~ARCMSR_IOP2DRV_MESSAGE_CMD_DONE);
663 writel(0, reg->iop2drv_doorbell_mask_reg); 749 writel(0, reg->iop2drv_doorbell_mask_reg);
664 } 750 }
665 break; 751 break;
@@ -795,12 +881,13 @@ static void arcmsr_remove(struct pci_dev *pdev)
795 struct AdapterControlBlock *acb = 881 struct AdapterControlBlock *acb =
796 (struct AdapterControlBlock *) host->hostdata; 882 (struct AdapterControlBlock *) host->hostdata;
797 int poll_count = 0; 883 int poll_count = 0;
798
799 arcmsr_free_sysfs_attr(acb); 884 arcmsr_free_sysfs_attr(acb);
800 scsi_remove_host(host); 885 scsi_remove_host(host);
886 flush_scheduled_work();
887 del_timer_sync(&acb->eternal_timer);
888 arcmsr_disable_outbound_ints(acb);
801 arcmsr_stop_adapter_bgrb(acb); 889 arcmsr_stop_adapter_bgrb(acb);
802 arcmsr_flush_adapter_cache(acb); 890 arcmsr_flush_adapter_cache(acb);
803 arcmsr_disable_outbound_ints(acb);
804 acb->acb_flags |= ACB_F_SCSISTOPADAPTER; 891 acb->acb_flags |= ACB_F_SCSISTOPADAPTER;
805 acb->acb_flags &= ~ACB_F_IOP_INITED; 892 acb->acb_flags &= ~ACB_F_IOP_INITED;
806 893
@@ -841,7 +928,9 @@ static void arcmsr_shutdown(struct pci_dev *pdev)
841 struct Scsi_Host *host = pci_get_drvdata(pdev); 928 struct Scsi_Host *host = pci_get_drvdata(pdev);
842 struct AdapterControlBlock *acb = 929 struct AdapterControlBlock *acb =
843 (struct AdapterControlBlock *)host->hostdata; 930 (struct AdapterControlBlock *)host->hostdata;
844 931 del_timer_sync(&acb->eternal_timer);
932 arcmsr_disable_outbound_ints(acb);
933 flush_scheduled_work();
845 arcmsr_stop_adapter_bgrb(acb); 934 arcmsr_stop_adapter_bgrb(acb);
846 arcmsr_flush_adapter_cache(acb); 935 arcmsr_flush_adapter_cache(acb);
847} 936}
@@ -861,7 +950,7 @@ static void arcmsr_module_exit(void)
861module_init(arcmsr_module_init); 950module_init(arcmsr_module_init);
862module_exit(arcmsr_module_exit); 951module_exit(arcmsr_module_exit);
863 952
864static void arcmsr_enable_outbound_ints(struct AdapterControlBlock *acb, \ 953static void arcmsr_enable_outbound_ints(struct AdapterControlBlock *acb,
865 u32 intmask_org) 954 u32 intmask_org)
866{ 955{
867 u32 mask; 956 u32 mask;
@@ -871,7 +960,8 @@ static void arcmsr_enable_outbound_ints(struct AdapterControlBlock *acb, \
871 case ACB_ADAPTER_TYPE_A : { 960 case ACB_ADAPTER_TYPE_A : {
872 struct MessageUnit_A __iomem *reg = acb->pmuA; 961 struct MessageUnit_A __iomem *reg = acb->pmuA;
873 mask = intmask_org & ~(ARCMSR_MU_OUTBOUND_POSTQUEUE_INTMASKENABLE | 962 mask = intmask_org & ~(ARCMSR_MU_OUTBOUND_POSTQUEUE_INTMASKENABLE |
874 ARCMSR_MU_OUTBOUND_DOORBELL_INTMASKENABLE); 963 ARCMSR_MU_OUTBOUND_DOORBELL_INTMASKENABLE|
964 ARCMSR_MU_OUTBOUND_MESSAGE0_INTMASKENABLE);
875 writel(mask, &reg->outbound_intmask); 965 writel(mask, &reg->outbound_intmask);
876 acb->outbound_int_enable = ~(intmask_org & mask) & 0x000000ff; 966 acb->outbound_int_enable = ~(intmask_org & mask) & 0x000000ff;
877 } 967 }
@@ -879,8 +969,10 @@ static void arcmsr_enable_outbound_ints(struct AdapterControlBlock *acb, \
879 969
880 case ACB_ADAPTER_TYPE_B : { 970 case ACB_ADAPTER_TYPE_B : {
881 struct MessageUnit_B *reg = acb->pmuB; 971 struct MessageUnit_B *reg = acb->pmuB;
882 mask = intmask_org | (ARCMSR_IOP2DRV_DATA_WRITE_OK | \ 972 mask = intmask_org | (ARCMSR_IOP2DRV_DATA_WRITE_OK |
883 ARCMSR_IOP2DRV_DATA_READ_OK | ARCMSR_IOP2DRV_CDB_DONE); 973 ARCMSR_IOP2DRV_DATA_READ_OK |
974 ARCMSR_IOP2DRV_CDB_DONE |
975 ARCMSR_IOP2DRV_MESSAGE_CMD_DONE);
884 writel(mask, reg->iop2drv_doorbell_mask_reg); 976 writel(mask, reg->iop2drv_doorbell_mask_reg);
885 acb->outbound_int_enable = (intmask_org | mask) & 0x0000000f; 977 acb->outbound_int_enable = (intmask_org | mask) & 0x0000000f;
886 } 978 }
@@ -1048,8 +1140,8 @@ static void arcmsr_free_ccb_pool(struct AdapterControlBlock *acb)
1048 } 1140 }
1049 case ACB_ADAPTER_TYPE_B: { 1141 case ACB_ADAPTER_TYPE_B: {
1050 struct MessageUnit_B *reg = acb->pmuB; 1142 struct MessageUnit_B *reg = acb->pmuB;
1051 iounmap(reg->drv2iop_doorbell_reg - ARCMSR_DRV2IOP_DOORBELL); 1143 iounmap((u8 *)reg->drv2iop_doorbell_reg - ARCMSR_DRV2IOP_DOORBELL);
1052 iounmap(reg->ioctl_wbuffer_reg - ARCMSR_IOCTL_WBUFFER); 1144 iounmap((u8 *)reg->ioctl_wbuffer_reg - ARCMSR_IOCTL_WBUFFER);
1053 dma_free_coherent(&acb->pdev->dev, 1145 dma_free_coherent(&acb->pdev->dev,
1054 (ARCMSR_MAX_FREECCB_NUM * sizeof(struct CommandControlBlock) + 0x20 + 1146 (ARCMSR_MAX_FREECCB_NUM * sizeof(struct CommandControlBlock) + 0x20 +
1055 sizeof(struct MessageUnit_B)), acb->dma_coherent, acb->dma_coherent_handle); 1147 sizeof(struct MessageUnit_B)), acb->dma_coherent, acb->dma_coherent_handle);
@@ -1249,13 +1341,36 @@ static void arcmsr_hbb_postqueue_isr(struct AdapterControlBlock *acb)
1249 reg->doneq_index = index; 1341 reg->doneq_index = index;
1250 } 1342 }
1251} 1343}
1344/*
1345**********************************************************************************
1346** Handle a message interrupt
1347**
1348** The only message interrupt we expect is in response to a query for the current adapter config.
1349** We want this in order to compare the drivemap so that we can detect newly-attached drives.
1350**********************************************************************************
1351*/
1352static void arcmsr_hba_message_isr(struct AdapterControlBlock *acb)
1353{
1354 struct MessageUnit_A *reg = acb->pmuA;
1355
1356 /*clear interrupt and message state*/
1357 writel(ARCMSR_MU_OUTBOUND_MESSAGE0_INT, &reg->outbound_intstatus);
1358 schedule_work(&acb->arcmsr_do_message_isr_bh);
1359}
1360static void arcmsr_hbb_message_isr(struct AdapterControlBlock *acb)
1361{
1362 struct MessageUnit_B *reg = acb->pmuB;
1252 1363
1364 /*clear interrupt and message state*/
1365 writel(ARCMSR_MESSAGE_INT_CLEAR_PATTERN, reg->iop2drv_doorbell_reg);
1366 schedule_work(&acb->arcmsr_do_message_isr_bh);
1367}
1253static int arcmsr_handle_hba_isr(struct AdapterControlBlock *acb) 1368static int arcmsr_handle_hba_isr(struct AdapterControlBlock *acb)
1254{ 1369{
1255 uint32_t outbound_intstatus; 1370 uint32_t outbound_intstatus;
1256 struct MessageUnit_A __iomem *reg = acb->pmuA; 1371 struct MessageUnit_A __iomem *reg = acb->pmuA;
1257 1372
1258 outbound_intstatus = readl(&reg->outbound_intstatus) & \ 1373 outbound_intstatus = readl(&reg->outbound_intstatus) &
1259 acb->outbound_int_enable; 1374 acb->outbound_int_enable;
1260 if (!(outbound_intstatus & ARCMSR_MU_OUTBOUND_HANDLE_INT)) { 1375 if (!(outbound_intstatus & ARCMSR_MU_OUTBOUND_HANDLE_INT)) {
1261 return 1; 1376 return 1;
@@ -1267,6 +1382,10 @@ static int arcmsr_handle_hba_isr(struct AdapterControlBlock *acb)
1267 if (outbound_intstatus & ARCMSR_MU_OUTBOUND_POSTQUEUE_INT) { 1382 if (outbound_intstatus & ARCMSR_MU_OUTBOUND_POSTQUEUE_INT) {
1268 arcmsr_hba_postqueue_isr(acb); 1383 arcmsr_hba_postqueue_isr(acb);
1269 } 1384 }
1385 if (outbound_intstatus & ARCMSR_MU_OUTBOUND_MESSAGE0_INT) {
1386 /* messenger of "driver to iop commands" */
1387 arcmsr_hba_message_isr(acb);
1388 }
1270 return 0; 1389 return 0;
1271} 1390}
1272 1391
@@ -1275,13 +1394,14 @@ static int arcmsr_handle_hbb_isr(struct AdapterControlBlock *acb)
1275 uint32_t outbound_doorbell; 1394 uint32_t outbound_doorbell;
1276 struct MessageUnit_B *reg = acb->pmuB; 1395 struct MessageUnit_B *reg = acb->pmuB;
1277 1396
1278 outbound_doorbell = readl(reg->iop2drv_doorbell_reg) & \ 1397 outbound_doorbell = readl(reg->iop2drv_doorbell_reg) &
1279 acb->outbound_int_enable; 1398 acb->outbound_int_enable;
1280 if (!outbound_doorbell) 1399 if (!outbound_doorbell)
1281 return 1; 1400 return 1;
1282 1401
1283 writel(~outbound_doorbell, reg->iop2drv_doorbell_reg); 1402 writel(~outbound_doorbell, reg->iop2drv_doorbell_reg);
1284 /*in case the last action of doorbell interrupt clearance is cached, this action can push HW to write down the clear bit*/ 1403 /*in case the last action of doorbell interrupt clearance is cached,
1404 this action can push HW to write down the clear bit*/
1285 readl(reg->iop2drv_doorbell_reg); 1405 readl(reg->iop2drv_doorbell_reg);
1286 writel(ARCMSR_DRV2IOP_END_OF_INTERRUPT, reg->drv2iop_doorbell_reg); 1406 writel(ARCMSR_DRV2IOP_END_OF_INTERRUPT, reg->drv2iop_doorbell_reg);
1287 if (outbound_doorbell & ARCMSR_IOP2DRV_DATA_WRITE_OK) { 1407 if (outbound_doorbell & ARCMSR_IOP2DRV_DATA_WRITE_OK) {
@@ -1293,6 +1413,10 @@ static int arcmsr_handle_hbb_isr(struct AdapterControlBlock *acb)
1293 if (outbound_doorbell & ARCMSR_IOP2DRV_CDB_DONE) { 1413 if (outbound_doorbell & ARCMSR_IOP2DRV_CDB_DONE) {
1294 arcmsr_hbb_postqueue_isr(acb); 1414 arcmsr_hbb_postqueue_isr(acb);
1295 } 1415 }
1416 if (outbound_doorbell & ARCMSR_IOP2DRV_MESSAGE_CMD_DONE) {
1417 /* messenger of "driver to iop commands" */
1418 arcmsr_hbb_message_isr(acb);
1419 }
1296 1420
1297 return 0; 1421 return 0;
1298} 1422}
@@ -1360,7 +1484,7 @@ void arcmsr_post_ioctldata2iop(struct AdapterControlBlock *acb)
1360 } 1484 }
1361} 1485}
1362 1486
1363static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, \ 1487static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb,
1364 struct scsi_cmnd *cmd) 1488 struct scsi_cmnd *cmd)
1365{ 1489{
1366 struct CMD_MESSAGE_FIELD *pcmdmessagefld; 1490 struct CMD_MESSAGE_FIELD *pcmdmessagefld;
@@ -1398,6 +1522,13 @@ static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, \
1398 retvalue = ARCMSR_MESSAGE_FAIL; 1522 retvalue = ARCMSR_MESSAGE_FAIL;
1399 goto message_out; 1523 goto message_out;
1400 } 1524 }
1525
1526 if (!acb->fw_state) {
1527 pcmdmessagefld->cmdmessage.ReturnCode =
1528 ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
1529 goto message_out;
1530 }
1531
1401 ptmpQbuffer = ver_addr; 1532 ptmpQbuffer = ver_addr;
1402 while ((acb->rqbuf_firstindex != acb->rqbuf_lastindex) 1533 while ((acb->rqbuf_firstindex != acb->rqbuf_lastindex)
1403 && (allxfer_len < 1031)) { 1534 && (allxfer_len < 1031)) {
@@ -1444,6 +1575,12 @@ static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, \
1444 retvalue = ARCMSR_MESSAGE_FAIL; 1575 retvalue = ARCMSR_MESSAGE_FAIL;
1445 goto message_out; 1576 goto message_out;
1446 } 1577 }
1578 if (!acb->fw_state) {
1579 pcmdmessagefld->cmdmessage.ReturnCode =
1580 ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
1581 goto message_out;
1582 }
1583
1447 ptmpuserbuffer = ver_addr; 1584 ptmpuserbuffer = ver_addr;
1448 user_len = pcmdmessagefld->cmdmessage.Length; 1585 user_len = pcmdmessagefld->cmdmessage.Length;
1449 memcpy(ptmpuserbuffer, pcmdmessagefld->messagedatabuffer, user_len); 1586 memcpy(ptmpuserbuffer, pcmdmessagefld->messagedatabuffer, user_len);
@@ -1496,6 +1633,11 @@ static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, \
1496 1633
1497 case ARCMSR_MESSAGE_CLEAR_RQBUFFER: { 1634 case ARCMSR_MESSAGE_CLEAR_RQBUFFER: {
1498 uint8_t *pQbuffer = acb->rqbuffer; 1635 uint8_t *pQbuffer = acb->rqbuffer;
1636 if (!acb->fw_state) {
1637 pcmdmessagefld->cmdmessage.ReturnCode =
1638 ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
1639 goto message_out;
1640 }
1499 1641
1500 if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) { 1642 if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
1501 acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW; 1643 acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
@@ -1511,6 +1653,11 @@ static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, \
1511 1653
1512 case ARCMSR_MESSAGE_CLEAR_WQBUFFER: { 1654 case ARCMSR_MESSAGE_CLEAR_WQBUFFER: {
1513 uint8_t *pQbuffer = acb->wqbuffer; 1655 uint8_t *pQbuffer = acb->wqbuffer;
1656 if (!acb->fw_state) {
1657 pcmdmessagefld->cmdmessage.ReturnCode =
1658 ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
1659 goto message_out;
1660 }
1514 1661
1515 if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) { 1662 if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
1516 acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW; 1663 acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
@@ -1529,6 +1676,11 @@ static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, \
1529 1676
1530 case ARCMSR_MESSAGE_CLEAR_ALLQBUFFER: { 1677 case ARCMSR_MESSAGE_CLEAR_ALLQBUFFER: {
1531 uint8_t *pQbuffer; 1678 uint8_t *pQbuffer;
1679 if (!acb->fw_state) {
1680 pcmdmessagefld->cmdmessage.ReturnCode =
1681 ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
1682 goto message_out;
1683 }
1532 1684
1533 if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) { 1685 if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
1534 acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW; 1686 acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
@@ -1551,13 +1703,22 @@ static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, \
1551 break; 1703 break;
1552 1704
1553 case ARCMSR_MESSAGE_RETURN_CODE_3F: { 1705 case ARCMSR_MESSAGE_RETURN_CODE_3F: {
1706 if (!acb->fw_state) {
1707 pcmdmessagefld->cmdmessage.ReturnCode =
1708 ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
1709 goto message_out;
1710 }
1554 pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_3F; 1711 pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_3F;
1555 } 1712 }
1556 break; 1713 break;
1557 1714
1558 case ARCMSR_MESSAGE_SAY_HELLO: { 1715 case ARCMSR_MESSAGE_SAY_HELLO: {
1559 int8_t *hello_string = "Hello! I am ARCMSR"; 1716 int8_t *hello_string = "Hello! I am ARCMSR";
1560 1717 if (!acb->fw_state) {
1718 pcmdmessagefld->cmdmessage.ReturnCode =
1719 ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
1720 goto message_out;
1721 }
1561 memcpy(pcmdmessagefld->messagedatabuffer, hello_string 1722 memcpy(pcmdmessagefld->messagedatabuffer, hello_string
1562 , (int16_t)strlen(hello_string)); 1723 , (int16_t)strlen(hello_string));
1563 pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK; 1724 pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK;
@@ -1565,10 +1726,20 @@ static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, \
1565 break; 1726 break;
1566 1727
1567 case ARCMSR_MESSAGE_SAY_GOODBYE: 1728 case ARCMSR_MESSAGE_SAY_GOODBYE:
1729 if (!acb->fw_state) {
1730 pcmdmessagefld->cmdmessage.ReturnCode =
1731 ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
1732 goto message_out;
1733 }
1568 arcmsr_iop_parking(acb); 1734 arcmsr_iop_parking(acb);
1569 break; 1735 break;
1570 1736
1571 case ARCMSR_MESSAGE_FLUSH_ADAPTER_CACHE: 1737 case ARCMSR_MESSAGE_FLUSH_ADAPTER_CACHE:
1738 if (!acb->fw_state) {
1739 pcmdmessagefld->cmdmessage.ReturnCode =
1740 ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
1741 goto message_out;
1742 }
1572 arcmsr_flush_adapter_cache(acb); 1743 arcmsr_flush_adapter_cache(acb);
1573 break; 1744 break;
1574 1745
@@ -1651,16 +1822,57 @@ static int arcmsr_queue_command(struct scsi_cmnd *cmd,
1651 struct CommandControlBlock *ccb; 1822 struct CommandControlBlock *ccb;
1652 int target = cmd->device->id; 1823 int target = cmd->device->id;
1653 int lun = cmd->device->lun; 1824 int lun = cmd->device->lun;
1654 1825 uint8_t scsicmd = cmd->cmnd[0];
1655 cmd->scsi_done = done; 1826 cmd->scsi_done = done;
1656 cmd->host_scribble = NULL; 1827 cmd->host_scribble = NULL;
1657 cmd->result = 0; 1828 cmd->result = 0;
1829
1830 if ((scsicmd == SYNCHRONIZE_CACHE) || (scsicmd == SEND_DIAGNOSTIC)) {
1831 if (acb->devstate[target][lun] == ARECA_RAID_GONE) {
1832 cmd->result = (DID_NO_CONNECT << 16);
1833 }
1834 cmd->scsi_done(cmd);
1835 return 0;
1836 }
1837
1658 if (acb->acb_flags & ACB_F_BUS_RESET) { 1838 if (acb->acb_flags & ACB_F_BUS_RESET) {
1659 printk(KERN_NOTICE "arcmsr%d: bus reset" 1839 switch (acb->adapter_type) {
1660 " and return busy \n" 1840 case ACB_ADAPTER_TYPE_A: {
1661 , acb->host->host_no); 1841 struct MessageUnit_A __iomem *reg = acb->pmuA;
1842 uint32_t intmask_org, outbound_doorbell;
1843
1844 if ((readl(&reg->outbound_msgaddr1) &
1845 ARCMSR_OUTBOUND_MESG1_FIRMWARE_OK) == 0) {
1846 printk(KERN_NOTICE "arcmsr%d: bus reset and return busy\n",
1847 acb->host->host_no);
1662 return SCSI_MLQUEUE_HOST_BUSY; 1848 return SCSI_MLQUEUE_HOST_BUSY;
1663 } 1849 }
1850
1851 acb->acb_flags &= ~ACB_F_FIRMWARE_TRAP;
1852 printk(KERN_NOTICE "arcmsr%d: hardware bus reset and reset ok\n",
1853 acb->host->host_no);
1854 /* disable all outbound interrupt */
1855 intmask_org = arcmsr_disable_outbound_ints(acb);
1856 arcmsr_get_firmware_spec(acb, 1);
1857 /*start background rebuild*/
1858 arcmsr_start_adapter_bgrb(acb);
1859 /* clear Qbuffer if door bell ringed */
1860 outbound_doorbell = readl(&reg->outbound_doorbell);
1861 /*clear interrupt */
1862 writel(outbound_doorbell, &reg->outbound_doorbell);
1863 writel(ARCMSR_INBOUND_DRIVER_DATA_READ_OK,
1864 &reg->inbound_doorbell);
1865 /* enable outbound Post Queue,outbound doorbell Interrupt */
1866 arcmsr_enable_outbound_ints(acb, intmask_org);
1867 acb->acb_flags |= ACB_F_IOP_INITED;
1868 acb->acb_flags &= ~ACB_F_BUS_RESET;
1869 }
1870 break;
1871 case ACB_ADAPTER_TYPE_B: {
1872 }
1873 }
1874 }
1875
1664 if (target == 16) { 1876 if (target == 16) {
1665 /* virtual device for iop message transfer */ 1877 /* virtual device for iop message transfer */
1666 arcmsr_handle_virtual_command(acb, cmd); 1878 arcmsr_handle_virtual_command(acb, cmd);
@@ -1699,21 +1911,25 @@ static int arcmsr_queue_command(struct scsi_cmnd *cmd,
1699 return 0; 1911 return 0;
1700} 1912}
1701 1913
1702static void arcmsr_get_hba_config(struct AdapterControlBlock *acb) 1914static void *arcmsr_get_hba_config(struct AdapterControlBlock *acb, int mode)
1703{ 1915{
1704 struct MessageUnit_A __iomem *reg = acb->pmuA; 1916 struct MessageUnit_A __iomem *reg = acb->pmuA;
1705 char *acb_firm_model = acb->firm_model; 1917 char *acb_firm_model = acb->firm_model;
1706 char *acb_firm_version = acb->firm_version; 1918 char *acb_firm_version = acb->firm_version;
1919 char *acb_device_map = acb->device_map;
1707 char __iomem *iop_firm_model = (char __iomem *)(&reg->message_rwbuffer[15]); 1920 char __iomem *iop_firm_model = (char __iomem *)(&reg->message_rwbuffer[15]);
1708 char __iomem *iop_firm_version = (char __iomem *)(&reg->message_rwbuffer[17]); 1921 char __iomem *iop_firm_version = (char __iomem *)(&reg->message_rwbuffer[17]);
1922 char __iomem *iop_device_map = (char __iomem *) (&reg->message_rwbuffer[21]);
1709 int count; 1923 int count;
1710 1924
1711 writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, &reg->inbound_msgaddr0); 1925 writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, &reg->inbound_msgaddr0);
1712 if (arcmsr_hba_wait_msgint_ready(acb)) { 1926 if (arcmsr_hba_wait_msgint_ready(acb)) {
1713 printk(KERN_NOTICE "arcmsr%d: wait 'get adapter firmware \ 1927 printk(KERN_NOTICE "arcmsr%d: wait 'get adapter firmware \
1714 miscellaneous data' timeout \n", acb->host->host_no); 1928 miscellaneous data' timeout \n", acb->host->host_no);
1929 return NULL;
1715 } 1930 }
1716 1931
1932 if (mode == 1) {
1717 count = 8; 1933 count = 8;
1718 while (count) { 1934 while (count) {
1719 *acb_firm_model = readb(iop_firm_model); 1935 *acb_firm_model = readb(iop_firm_model);
@@ -1730,34 +1946,48 @@ static void arcmsr_get_hba_config(struct AdapterControlBlock *acb)
1730 count--; 1946 count--;
1731 } 1947 }
1732 1948
1949 count = 16;
1950 while (count) {
1951 *acb_device_map = readb(iop_device_map);
1952 acb_device_map++;
1953 iop_device_map++;
1954 count--;
1955 }
1956
1733 printk(KERN_INFO "ARECA RAID ADAPTER%d: FIRMWARE VERSION %s \n" 1957 printk(KERN_INFO "ARECA RAID ADAPTER%d: FIRMWARE VERSION %s \n"
1734 , acb->host->host_no 1958 , acb->host->host_no
1735 , acb->firm_version); 1959 , acb->firm_version);
1736 1960 acb->signature = readl(&reg->message_rwbuffer[0]);
1737 acb->firm_request_len = readl(&reg->message_rwbuffer[1]); 1961 acb->firm_request_len = readl(&reg->message_rwbuffer[1]);
1738 acb->firm_numbers_queue = readl(&reg->message_rwbuffer[2]); 1962 acb->firm_numbers_queue = readl(&reg->message_rwbuffer[2]);
1739 acb->firm_sdram_size = readl(&reg->message_rwbuffer[3]); 1963 acb->firm_sdram_size = readl(&reg->message_rwbuffer[3]);
1740 acb->firm_hd_channels = readl(&reg->message_rwbuffer[4]); 1964 acb->firm_hd_channels = readl(&reg->message_rwbuffer[4]);
1741} 1965}
1742 1966 return reg->message_rwbuffer;
1743static void arcmsr_get_hbb_config(struct AdapterControlBlock *acb) 1967}
1968static void __iomem *arcmsr_get_hbb_config(struct AdapterControlBlock *acb, int mode)
1744{ 1969{
1745 struct MessageUnit_B *reg = acb->pmuB; 1970 struct MessageUnit_B *reg = acb->pmuB;
1746 uint32_t __iomem *lrwbuffer = reg->msgcode_rwbuffer_reg; 1971 uint32_t __iomem *lrwbuffer = reg->msgcode_rwbuffer_reg;
1747 char *acb_firm_model = acb->firm_model; 1972 char *acb_firm_model = acb->firm_model;
1748 char *acb_firm_version = acb->firm_version; 1973 char *acb_firm_version = acb->firm_version;
1974 char *acb_device_map = acb->device_map;
1749 char __iomem *iop_firm_model = (char __iomem *)(&lrwbuffer[15]); 1975 char __iomem *iop_firm_model = (char __iomem *)(&lrwbuffer[15]);
1750 /*firm_model,15,60-67*/ 1976 /*firm_model,15,60-67*/
1751 char __iomem *iop_firm_version = (char __iomem *)(&lrwbuffer[17]); 1977 char __iomem *iop_firm_version = (char __iomem *)(&lrwbuffer[17]);
1752 /*firm_version,17,68-83*/ 1978 /*firm_version,17,68-83*/
1979 char __iomem *iop_device_map = (char __iomem *) (&lrwbuffer[21]);
1980 /*firm_version,21,84-99*/
1753 int count; 1981 int count;
1754 1982
1755 writel(ARCMSR_MESSAGE_GET_CONFIG, reg->drv2iop_doorbell_reg); 1983 writel(ARCMSR_MESSAGE_GET_CONFIG, reg->drv2iop_doorbell_reg);
1756 if (arcmsr_hbb_wait_msgint_ready(acb)) { 1984 if (arcmsr_hbb_wait_msgint_ready(acb)) {
1757 printk(KERN_NOTICE "arcmsr%d: wait 'get adapter firmware \ 1985 printk(KERN_NOTICE "arcmsr%d: wait 'get adapter firmware \
1758 miscellaneous data' timeout \n", acb->host->host_no); 1986 miscellaneous data' timeout \n", acb->host->host_no);
1987 return NULL;
1759 } 1988 }
1760 1989
1990 if (mode == 1) {
1761 count = 8; 1991 count = 8;
1762 while (count) 1992 while (count)
1763 { 1993 {
@@ -1776,11 +2006,20 @@ static void arcmsr_get_hbb_config(struct AdapterControlBlock *acb)
1776 count--; 2006 count--;
1777 } 2007 }
1778 2008
2009 count = 16;
2010 while (count) {
2011 *acb_device_map = readb(iop_device_map);
2012 acb_device_map++;
2013 iop_device_map++;
2014 count--;
2015 }
2016
1779 printk(KERN_INFO "ARECA RAID ADAPTER%d: FIRMWARE VERSION %s \n", 2017 printk(KERN_INFO "ARECA RAID ADAPTER%d: FIRMWARE VERSION %s \n",
1780 acb->host->host_no, 2018 acb->host->host_no,
1781 acb->firm_version); 2019 acb->firm_version);
1782 2020
1783 lrwbuffer++; 2021 acb->signature = readl(lrwbuffer++);
2022 /*firm_signature,1,00-03*/
1784 acb->firm_request_len = readl(lrwbuffer++); 2023 acb->firm_request_len = readl(lrwbuffer++);
1785 /*firm_request_len,1,04-07*/ 2024 /*firm_request_len,1,04-07*/
1786 acb->firm_numbers_queue = readl(lrwbuffer++); 2025 acb->firm_numbers_queue = readl(lrwbuffer++);
@@ -1790,20 +2029,23 @@ static void arcmsr_get_hbb_config(struct AdapterControlBlock *acb)
1790 acb->firm_hd_channels = readl(lrwbuffer); 2029 acb->firm_hd_channels = readl(lrwbuffer);
1791 /*firm_ide_channels,4,16-19*/ 2030 /*firm_ide_channels,4,16-19*/
1792} 2031}
1793 2032 return reg->msgcode_rwbuffer_reg;
1794static void arcmsr_get_firmware_spec(struct AdapterControlBlock *acb) 2033}
2034static void *arcmsr_get_firmware_spec(struct AdapterControlBlock *acb, int mode)
1795{ 2035{
2036 void *rtnval = 0;
1796 switch (acb->adapter_type) { 2037 switch (acb->adapter_type) {
1797 case ACB_ADAPTER_TYPE_A: { 2038 case ACB_ADAPTER_TYPE_A: {
1798 arcmsr_get_hba_config(acb); 2039 rtnval = arcmsr_get_hba_config(acb, mode);
1799 } 2040 }
1800 break; 2041 break;
1801 2042
1802 case ACB_ADAPTER_TYPE_B: { 2043 case ACB_ADAPTER_TYPE_B: {
1803 arcmsr_get_hbb_config(acb); 2044 rtnval = arcmsr_get_hbb_config(acb, mode);
1804 } 2045 }
1805 break; 2046 break;
1806 } 2047 }
2048 return rtnval;
1807} 2049}
1808 2050
1809static void arcmsr_polling_hba_ccbdone(struct AdapterControlBlock *acb, 2051static void arcmsr_polling_hba_ccbdone(struct AdapterControlBlock *acb,
@@ -2043,6 +2285,66 @@ static void arcmsr_wait_firmware_ready(struct AdapterControlBlock *acb)
2043 } 2285 }
2044} 2286}
2045 2287
2288static void arcmsr_request_hba_device_map(struct AdapterControlBlock *acb)
2289{
2290 struct MessageUnit_A __iomem *reg = acb->pmuA;
2291
2292 if (unlikely(atomic_read(&acb->rq_map_token) == 0)) {
2293 acb->fw_state = false;
2294 } else {
2295 /*to prevent rq_map_token from changing by other interrupt, then
2296 avoid the dead-lock*/
2297 acb->fw_state = true;
2298 atomic_dec(&acb->rq_map_token);
2299 if (!(acb->fw_state) ||
2300 (acb->ante_token_value == atomic_read(&acb->rq_map_token))) {
2301 atomic_set(&acb->rq_map_token, 16);
2302 }
2303 acb->ante_token_value = atomic_read(&acb->rq_map_token);
2304 writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, &reg->inbound_msgaddr0);
2305 }
2306 mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6000));
2307 return;
2308}
2309
2310static void arcmsr_request_hbb_device_map(struct AdapterControlBlock *acb)
2311{
2312 struct MessageUnit_B __iomem *reg = acb->pmuB;
2313
2314 if (unlikely(atomic_read(&acb->rq_map_token) == 0)) {
2315 acb->fw_state = false;
2316 } else {
2317 /*to prevent rq_map_token from changing by other interrupt, then
2318 avoid the dead-lock*/
2319 acb->fw_state = true;
2320 atomic_dec(&acb->rq_map_token);
2321 if (!(acb->fw_state) ||
2322 (acb->ante_token_value == atomic_read(&acb->rq_map_token))) {
2323 atomic_set(&acb->rq_map_token, 16);
2324 }
2325 acb->ante_token_value = atomic_read(&acb->rq_map_token);
2326 writel(ARCMSR_MESSAGE_GET_CONFIG, reg->drv2iop_doorbell_reg);
2327 }
2328 mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6000));
2329 return;
2330}
2331
2332static void arcmsr_request_device_map(unsigned long pacb)
2333{
2334 struct AdapterControlBlock *acb = (struct AdapterControlBlock *)pacb;
2335
2336 switch (acb->adapter_type) {
2337 case ACB_ADAPTER_TYPE_A: {
2338 arcmsr_request_hba_device_map(acb);
2339 }
2340 break;
2341 case ACB_ADAPTER_TYPE_B: {
2342 arcmsr_request_hbb_device_map(acb);
2343 }
2344 break;
2345 }
2346}
2347
2046static void arcmsr_start_hba_bgrb(struct AdapterControlBlock *acb) 2348static void arcmsr_start_hba_bgrb(struct AdapterControlBlock *acb)
2047{ 2349{
2048 struct MessageUnit_A __iomem *reg = acb->pmuA; 2350 struct MessageUnit_A __iomem *reg = acb->pmuA;
@@ -2121,6 +2423,60 @@ static void arcmsr_enable_eoi_mode(struct AdapterControlBlock *acb)
2121 return; 2423 return;
2122} 2424}
2123 2425
2426static void arcmsr_hardware_reset(struct AdapterControlBlock *acb)
2427{
2428 uint8_t value[64];
2429 int i;
2430
2431 /* backup pci config data */
2432 for (i = 0; i < 64; i++) {
2433 pci_read_config_byte(acb->pdev, i, &value[i]);
2434 }
2435 /* hardware reset signal */
2436 pci_write_config_byte(acb->pdev, 0x84, 0x20);
2437 msleep(1000);
2438 /* write back pci config data */
2439 for (i = 0; i < 64; i++) {
2440 pci_write_config_byte(acb->pdev, i, value[i]);
2441 }
2442 msleep(1000);
2443 return;
2444}
2445/*
2446****************************************************************************
2447****************************************************************************
2448*/
2449#ifdef CONFIG_SCSI_ARCMSR_RESET
2450 int arcmsr_sleep_for_bus_reset(struct scsi_cmnd *cmd)
2451 {
2452 struct Scsi_Host *shost = NULL;
2453 spinlock_t *host_lock = NULL;
2454 int i, isleep;
2455
2456 shost = cmd->device->host;
2457 host_lock = shost->host_lock;
2458
2459 printk(KERN_NOTICE "Host %d bus reset over, sleep %d seconds (busy %d, can queue %d) ...........\n",
2460 shost->host_no, sleeptime, shost->host_busy, shost->can_queue);
2461 isleep = sleeptime / 10;
2462 spin_unlock_irq(host_lock);
2463 if (isleep > 0) {
2464 for (i = 0; i < isleep; i++) {
2465 msleep(10000);
2466 printk(KERN_NOTICE "^%d^\n", i);
2467 }
2468 }
2469
2470 isleep = sleeptime % 10;
2471 if (isleep > 0) {
2472 msleep(isleep * 1000);
2473 printk(KERN_NOTICE "^v^\n");
2474 }
2475 spin_lock_irq(host_lock);
2476 printk(KERN_NOTICE "***** wake up *****\n");
2477 return 0;
2478 }
2479#endif
2124static void arcmsr_iop_init(struct AdapterControlBlock *acb) 2480static void arcmsr_iop_init(struct AdapterControlBlock *acb)
2125{ 2481{
2126 uint32_t intmask_org; 2482 uint32_t intmask_org;
@@ -2129,7 +2485,7 @@ static void arcmsr_iop_init(struct AdapterControlBlock *acb)
2129 intmask_org = arcmsr_disable_outbound_ints(acb); 2485 intmask_org = arcmsr_disable_outbound_ints(acb);
2130 arcmsr_wait_firmware_ready(acb); 2486 arcmsr_wait_firmware_ready(acb);
2131 arcmsr_iop_confirm(acb); 2487 arcmsr_iop_confirm(acb);
2132 arcmsr_get_firmware_spec(acb); 2488 arcmsr_get_firmware_spec(acb, 1);
2133 /*start background rebuild*/ 2489 /*start background rebuild*/
2134 arcmsr_start_adapter_bgrb(acb); 2490 arcmsr_start_adapter_bgrb(acb);
2135 /* empty doorbell Qbuffer if door bell ringed */ 2491 /* empty doorbell Qbuffer if door bell ringed */
@@ -2140,51 +2496,110 @@ static void arcmsr_iop_init(struct AdapterControlBlock *acb)
2140 acb->acb_flags |= ACB_F_IOP_INITED; 2496 acb->acb_flags |= ACB_F_IOP_INITED;
2141} 2497}
2142 2498
2143static void arcmsr_iop_reset(struct AdapterControlBlock *acb) 2499static uint8_t arcmsr_iop_reset(struct AdapterControlBlock *acb)
2144{ 2500{
2145 struct CommandControlBlock *ccb; 2501 struct CommandControlBlock *ccb;
2146 uint32_t intmask_org; 2502 uint32_t intmask_org;
2503 uint8_t rtnval = 0x00;
2147 int i = 0; 2504 int i = 0;
2148 2505
2149 if (atomic_read(&acb->ccboutstandingcount) != 0) { 2506 if (atomic_read(&acb->ccboutstandingcount) != 0) {
2507 /* disable all outbound interrupt */
2508 intmask_org = arcmsr_disable_outbound_ints(acb);
2150 /* talk to iop 331 outstanding command aborted */ 2509 /* talk to iop 331 outstanding command aborted */
2151 arcmsr_abort_allcmd(acb); 2510 rtnval = arcmsr_abort_allcmd(acb);
2152
2153 /* wait for 3 sec for all command aborted*/ 2511 /* wait for 3 sec for all command aborted*/
2154 ssleep(3); 2512 ssleep(3);
2155
2156 /* disable all outbound interrupt */
2157 intmask_org = arcmsr_disable_outbound_ints(acb);
2158 /* clear all outbound posted Q */ 2513 /* clear all outbound posted Q */
2159 arcmsr_done4abort_postqueue(acb); 2514 arcmsr_done4abort_postqueue(acb);
2160 for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) { 2515 for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) {
2161 ccb = acb->pccb_pool[i]; 2516 ccb = acb->pccb_pool[i];
2162 if (ccb->startdone == ARCMSR_CCB_START) { 2517 if (ccb->startdone == ARCMSR_CCB_START) {
2163 ccb->startdone = ARCMSR_CCB_ABORTED;
2164 arcmsr_ccb_complete(ccb, 1); 2518 arcmsr_ccb_complete(ccb, 1);
2165 } 2519 }
2166 } 2520 }
2521 atomic_set(&acb->ccboutstandingcount, 0);
2167 /* enable all outbound interrupt */ 2522 /* enable all outbound interrupt */
2168 arcmsr_enable_outbound_ints(acb, intmask_org); 2523 arcmsr_enable_outbound_ints(acb, intmask_org);
2524 return rtnval;
2169 } 2525 }
2526 return rtnval;
2170} 2527}
2171 2528
2172static int arcmsr_bus_reset(struct scsi_cmnd *cmd) 2529static int arcmsr_bus_reset(struct scsi_cmnd *cmd)
2173{ 2530{
2174 struct AdapterControlBlock *acb = 2531 struct AdapterControlBlock *acb =
2175 (struct AdapterControlBlock *)cmd->device->host->hostdata; 2532 (struct AdapterControlBlock *)cmd->device->host->hostdata;
2176 int i; 2533 int retry = 0;
2177 2534
2178 acb->num_resets++; 2535 if (acb->acb_flags & ACB_F_BUS_RESET)
2536 return SUCCESS;
2537
2538 printk(KERN_NOTICE "arcmsr%d: bus reset ..... \n", acb->adapter_index);
2179 acb->acb_flags |= ACB_F_BUS_RESET; 2539 acb->acb_flags |= ACB_F_BUS_RESET;
2180 for (i = 0; i < 400; i++) { 2540 acb->num_resets++;
2181 if (!atomic_read(&acb->ccboutstandingcount)) 2541 while (atomic_read(&acb->ccboutstandingcount) != 0 && retry < 4) {
2542 arcmsr_interrupt(acb);
2543 retry++;
2544 }
2545
2546 if (arcmsr_iop_reset(acb)) {
2547 switch (acb->adapter_type) {
2548 case ACB_ADAPTER_TYPE_A: {
2549 printk(KERN_NOTICE "arcmsr%d: do hardware bus reset, num_resets = %d num_aborts = %d \n",
2550 acb->adapter_index, acb->num_resets, acb->num_aborts);
2551 arcmsr_hardware_reset(acb);
2552 acb->acb_flags |= ACB_F_FIRMWARE_TRAP;
2553 acb->acb_flags &= ~ACB_F_IOP_INITED;
2554 #ifdef CONFIG_SCSI_ARCMSR_RESET
2555 struct MessageUnit_A __iomem *reg = acb->pmuA;
2556 uint32_t intmask_org, outbound_doorbell;
2557 int retry_count = 0;
2558sleep_again:
2559 arcmsr_sleep_for_bus_reset(cmd);
2560 if ((readl(&reg->outbound_msgaddr1) &
2561 ARCMSR_OUTBOUND_MESG1_FIRMWARE_OK) == 0) {
2562 printk(KERN_NOTICE "arcmsr%d: hardware bus reset and return busy, retry=%d \n",
2563 acb->host->host_no, retry_count);
2564 if (retry_count > retrycount) {
2565 printk(KERN_NOTICE "arcmsr%d: hardware bus reset and return busy, retry aborted \n",
2566 acb->host->host_no);
2567 return SUCCESS;
2568 }
2569 retry_count++;
2570 goto sleep_again;
2571 }
2572 acb->acb_flags &= ~ACB_F_FIRMWARE_TRAP;
2573 acb->acb_flags |= ACB_F_IOP_INITED;
2574 acb->acb_flags &= ~ACB_F_BUS_RESET;
2575 printk(KERN_NOTICE "arcmsr%d: hardware bus reset and reset ok \n",
2576 acb->host->host_no);
2577 /* disable all outbound interrupt */
2578 intmask_org = arcmsr_disable_outbound_ints(acb);
2579 arcmsr_get_firmware_spec(acb, 1);
2580 /*start background rebuild*/
2581 arcmsr_start_adapter_bgrb(acb);
2582 /* clear Qbuffer if door bell ringed */
2583 outbound_doorbell = readl(&reg->outbound_doorbell);
2584 writel(outbound_doorbell, &reg->outbound_doorbell); /*clear interrupt */
2585 writel(ARCMSR_INBOUND_DRIVER_DATA_READ_OK, &reg->inbound_doorbell);
2586 /* enable outbound Post Queue,outbound doorbell Interrupt */
2587 arcmsr_enable_outbound_ints(acb, intmask_org);
2588 atomic_set(&acb->rq_map_token, 16);
2589 init_timer(&acb->eternal_timer);
2590 acb->eternal_timer.expires = jiffies + msecs_to_jiffies(20*HZ);
2591 acb->eternal_timer.data = (unsigned long) acb;
2592 acb->eternal_timer.function = &arcmsr_request_device_map;
2593 add_timer(&acb->eternal_timer);
2594 #endif
2595 }
2182 break; 2596 break;
2183 arcmsr_interrupt(acb);/* FIXME: need spinlock */ 2597 case ACB_ADAPTER_TYPE_B: {
2184 msleep(25);
2185 } 2598 }
2186 arcmsr_iop_reset(acb); 2599 }
2600 } else {
2187 acb->acb_flags &= ~ACB_F_BUS_RESET; 2601 acb->acb_flags &= ~ACB_F_BUS_RESET;
2602 }
2188 return SUCCESS; 2603 return SUCCESS;
2189} 2604}
2190 2605
@@ -2277,98 +2692,3 @@ static const char *arcmsr_info(struct Scsi_Host *host)
2277 ARCMSR_DRIVER_VERSION); 2692 ARCMSR_DRIVER_VERSION);
2278 return buf; 2693 return buf;
2279} 2694}
2280#ifdef CONFIG_SCSI_ARCMSR_AER
2281static pci_ers_result_t arcmsr_pci_slot_reset(struct pci_dev *pdev)
2282{
2283 struct Scsi_Host *host = pci_get_drvdata(pdev);
2284 struct AdapterControlBlock *acb =
2285 (struct AdapterControlBlock *) host->hostdata;
2286 uint32_t intmask_org;
2287 int i, j;
2288
2289 if (pci_enable_device(pdev)) {
2290 return PCI_ERS_RESULT_DISCONNECT;
2291 }
2292 pci_set_master(pdev);
2293 intmask_org = arcmsr_disable_outbound_ints(acb);
2294 acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED |
2295 ACB_F_MESSAGE_RQBUFFER_CLEARED |
2296 ACB_F_MESSAGE_WQBUFFER_READED);
2297 acb->acb_flags &= ~ACB_F_SCSISTOPADAPTER;
2298 for (i = 0; i < ARCMSR_MAX_TARGETID; i++)
2299 for (j = 0; j < ARCMSR_MAX_TARGETLUN; j++)
2300 acb->devstate[i][j] = ARECA_RAID_GONE;
2301
2302 arcmsr_wait_firmware_ready(acb);
2303 arcmsr_iop_confirm(acb);
2304 /* disable all outbound interrupt */
2305 arcmsr_get_firmware_spec(acb);
2306 /*start background rebuild*/
2307 arcmsr_start_adapter_bgrb(acb);
2308 /* empty doorbell Qbuffer if door bell ringed */
2309 arcmsr_clear_doorbell_queue_buffer(acb);
2310 arcmsr_enable_eoi_mode(acb);
2311 /* enable outbound Post Queue,outbound doorbell Interrupt */
2312 arcmsr_enable_outbound_ints(acb, intmask_org);
2313 acb->acb_flags |= ACB_F_IOP_INITED;
2314
2315 pci_enable_pcie_error_reporting(pdev);
2316 return PCI_ERS_RESULT_RECOVERED;
2317}
2318
2319static void arcmsr_pci_ers_need_reset_forepart(struct pci_dev *pdev)
2320{
2321 struct Scsi_Host *host = pci_get_drvdata(pdev);
2322 struct AdapterControlBlock *acb = (struct AdapterControlBlock *)host->hostdata;
2323 struct CommandControlBlock *ccb;
2324 uint32_t intmask_org;
2325 int i = 0;
2326
2327 if (atomic_read(&acb->ccboutstandingcount) != 0) {
2328 /* talk to iop 331 outstanding command aborted */
2329 arcmsr_abort_allcmd(acb);
2330 /* wait for 3 sec for all command aborted*/
2331 ssleep(3);
2332 /* disable all outbound interrupt */
2333 intmask_org = arcmsr_disable_outbound_ints(acb);
2334 /* clear all outbound posted Q */
2335 arcmsr_done4abort_postqueue(acb);
2336 for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) {
2337 ccb = acb->pccb_pool[i];
2338 if (ccb->startdone == ARCMSR_CCB_START) {
2339 ccb->startdone = ARCMSR_CCB_ABORTED;
2340 arcmsr_ccb_complete(ccb, 1);
2341 }
2342 }
2343 /* enable all outbound interrupt */
2344 arcmsr_enable_outbound_ints(acb, intmask_org);
2345 }
2346 pci_disable_device(pdev);
2347}
2348
2349static void arcmsr_pci_ers_disconnect_forepart(struct pci_dev *pdev)
2350{
2351 struct Scsi_Host *host = pci_get_drvdata(pdev);
2352 struct AdapterControlBlock *acb = \
2353 (struct AdapterControlBlock *)host->hostdata;
2354
2355 arcmsr_stop_adapter_bgrb(acb);
2356 arcmsr_flush_adapter_cache(acb);
2357}
2358
2359static pci_ers_result_t arcmsr_pci_error_detected(struct pci_dev *pdev,
2360 pci_channel_state_t state)
2361{
2362 switch (state) {
2363 case pci_channel_io_frozen:
2364 arcmsr_pci_ers_need_reset_forepart(pdev);
2365 return PCI_ERS_RESULT_NEED_RESET;
2366 case pci_channel_io_perm_failure:
2367 arcmsr_pci_ers_disconnect_forepart(pdev);
2368 return PCI_ERS_RESULT_DISCONNECT;
2369 break;
2370 default:
2371 return PCI_ERS_RESULT_NEED_RESET;
2372 }
2373}
2374#endif
diff --git a/drivers/scsi/be2iscsi/be_mgmt.c b/drivers/scsi/be2iscsi/be_mgmt.c
index e641922f20bc..350cbeaae160 100644
--- a/drivers/scsi/be2iscsi/be_mgmt.c
+++ b/drivers/scsi/be2iscsi/be_mgmt.c
@@ -167,10 +167,9 @@ unsigned char mgmt_invalidate_icds(struct beiscsi_hba *phba,
167 &nonemb_cmd.dma); 167 &nonemb_cmd.dma);
168 if (nonemb_cmd.va == NULL) { 168 if (nonemb_cmd.va == NULL) {
169 SE_DEBUG(DBG_LVL_1, 169 SE_DEBUG(DBG_LVL_1,
170 "Failed to allocate memory for" 170 "Failed to allocate memory for mgmt_invalidate_icds\n");
171 "mgmt_invalidate_icds \n");
172 spin_unlock(&ctrl->mbox_lock); 171 spin_unlock(&ctrl->mbox_lock);
173 return -1; 172 return 0;
174 } 173 }
175 nonemb_cmd.size = sizeof(struct invalidate_commands_params_in); 174 nonemb_cmd.size = sizeof(struct invalidate_commands_params_in);
176 req = nonemb_cmd.va; 175 req = nonemb_cmd.va;
diff --git a/drivers/scsi/bfa/bfa_core.c b/drivers/scsi/bfa/bfa_core.c
index 0c08e185a766..3a7b3f88932f 100644
--- a/drivers/scsi/bfa/bfa_core.c
+++ b/drivers/scsi/bfa/bfa_core.c
@@ -84,11 +84,32 @@ bfa_cfg_get_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *meminfo)
84 for (i = 0; hal_mods[i]; i++) 84 for (i = 0; hal_mods[i]; i++)
85 hal_mods[i]->meminfo(cfg, &km_len, &dm_len); 85 hal_mods[i]->meminfo(cfg, &km_len, &dm_len);
86 86
87 dm_len += bfa_port_meminfo();
87 88
88 meminfo->meminfo[BFA_MEM_TYPE_KVA - 1].mem_len = km_len; 89 meminfo->meminfo[BFA_MEM_TYPE_KVA - 1].mem_len = km_len;
89 meminfo->meminfo[BFA_MEM_TYPE_DMA - 1].mem_len = dm_len; 90 meminfo->meminfo[BFA_MEM_TYPE_DMA - 1].mem_len = dm_len;
90} 91}
91 92
93static void
94bfa_com_port_attach(struct bfa_s *bfa, struct bfa_meminfo_s *mi)
95{
96 struct bfa_port_s *port = &bfa->modules.port;
97 uint32_t dm_len;
98 uint8_t *dm_kva;
99 uint64_t dm_pa;
100
101 dm_len = bfa_port_meminfo();
102 dm_kva = bfa_meminfo_dma_virt(mi);
103 dm_pa = bfa_meminfo_dma_phys(mi);
104
105 memset(port, 0, sizeof(struct bfa_port_s));
106 bfa_port_attach(port, &bfa->ioc, bfa, bfa->trcmod, bfa->logm);
107 bfa_port_mem_claim(port, dm_kva, dm_pa);
108
109 bfa_meminfo_dma_virt(mi) = dm_kva + dm_len;
110 bfa_meminfo_dma_phys(mi) = dm_pa + dm_len;
111}
112
92/** 113/**
93 * Use this function to do attach the driver instance with the BFA 114 * Use this function to do attach the driver instance with the BFA
94 * library. This function will not trigger any HW initialization 115 * library. This function will not trigger any HW initialization
@@ -140,6 +161,7 @@ bfa_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
140 for (i = 0; hal_mods[i]; i++) 161 for (i = 0; hal_mods[i]; i++)
141 hal_mods[i]->attach(bfa, bfad, cfg, meminfo, pcidev); 162 hal_mods[i]->attach(bfa, bfad, cfg, meminfo, pcidev);
142 163
164 bfa_com_port_attach(bfa, meminfo);
143} 165}
144 166
145/** 167/**
diff --git a/drivers/scsi/gvp11.c b/drivers/scsi/gvp11.c
index 18b7102bb80e..2ce26eb7a1ec 100644
--- a/drivers/scsi/gvp11.c
+++ b/drivers/scsi/gvp11.c
@@ -1,36 +1,35 @@
1#include <linux/types.h> 1#include <linux/types.h>
2#include <linux/mm.h>
3#include <linux/slab.h>
4#include <linux/blkdev.h>
5#include <linux/init.h> 2#include <linux/init.h>
6#include <linux/interrupt.h> 3#include <linux/interrupt.h>
4#include <linux/mm.h>
5#include <linux/slab.h>
6#include <linux/spinlock.h>
7#include <linux/zorro.h>
7 8
8#include <asm/setup.h>
9#include <asm/page.h> 9#include <asm/page.h>
10#include <asm/pgtable.h> 10#include <asm/pgtable.h>
11#include <asm/amigaints.h> 11#include <asm/amigaints.h>
12#include <asm/amigahw.h> 12#include <asm/amigahw.h>
13#include <linux/zorro.h>
14#include <asm/irq.h>
15#include <linux/spinlock.h>
16 13
17#include "scsi.h" 14#include "scsi.h"
18#include <scsi/scsi_host.h>
19#include "wd33c93.h" 15#include "wd33c93.h"
20#include "gvp11.h" 16#include "gvp11.h"
21 17
22#include <linux/stat.h>
23 18
19#define CHECK_WD33C93
24 20
25#define DMA(ptr) ((gvp11_scsiregs *)((ptr)->base)) 21struct gvp11_hostdata {
22 struct WD33C93_hostdata wh;
23 struct gvp11_scsiregs *regs;
24};
26 25
27static irqreturn_t gvp11_intr(int irq, void *_instance) 26static irqreturn_t gvp11_intr(int irq, void *data)
28{ 27{
28 struct Scsi_Host *instance = data;
29 struct gvp11_hostdata *hdata = shost_priv(instance);
30 unsigned int status = hdata->regs->CNTR;
29 unsigned long flags; 31 unsigned long flags;
30 unsigned int status;
31 struct Scsi_Host *instance = (struct Scsi_Host *)_instance;
32 32
33 status = DMA(instance)->CNTR;
34 if (!(status & GVP11_DMAC_INT_PENDING)) 33 if (!(status & GVP11_DMAC_INT_PENDING))
35 return IRQ_NONE; 34 return IRQ_NONE;
36 35
@@ -50,64 +49,66 @@ void gvp11_setup(char *str, int *ints)
50static int dma_setup(struct scsi_cmnd *cmd, int dir_in) 49static int dma_setup(struct scsi_cmnd *cmd, int dir_in)
51{ 50{
52 struct Scsi_Host *instance = cmd->device->host; 51 struct Scsi_Host *instance = cmd->device->host;
53 struct WD33C93_hostdata *hdata = shost_priv(instance); 52 struct gvp11_hostdata *hdata = shost_priv(instance);
53 struct WD33C93_hostdata *wh = &hdata->wh;
54 struct gvp11_scsiregs *regs = hdata->regs;
54 unsigned short cntr = GVP11_DMAC_INT_ENABLE; 55 unsigned short cntr = GVP11_DMAC_INT_ENABLE;
55 unsigned long addr = virt_to_bus(cmd->SCp.ptr); 56 unsigned long addr = virt_to_bus(cmd->SCp.ptr);
56 int bank_mask; 57 int bank_mask;
57 static int scsi_alloc_out_of_range = 0; 58 static int scsi_alloc_out_of_range = 0;
58 59
59 /* use bounce buffer if the physical address is bad */ 60 /* use bounce buffer if the physical address is bad */
60 if (addr & hdata->dma_xfer_mask) { 61 if (addr & wh->dma_xfer_mask) {
61 hdata->dma_bounce_len = (cmd->SCp.this_residual + 511) & ~0x1ff; 62 wh->dma_bounce_len = (cmd->SCp.this_residual + 511) & ~0x1ff;
62 63
63 if (!scsi_alloc_out_of_range) { 64 if (!scsi_alloc_out_of_range) {
64 hdata->dma_bounce_buffer = 65 wh->dma_bounce_buffer =
65 kmalloc(hdata->dma_bounce_len, GFP_KERNEL); 66 kmalloc(wh->dma_bounce_len, GFP_KERNEL);
66 hdata->dma_buffer_pool = BUF_SCSI_ALLOCED; 67 wh->dma_buffer_pool = BUF_SCSI_ALLOCED;
67 } 68 }
68 69
69 if (scsi_alloc_out_of_range || 70 if (scsi_alloc_out_of_range ||
70 !hdata->dma_bounce_buffer) { 71 !wh->dma_bounce_buffer) {
71 hdata->dma_bounce_buffer = 72 wh->dma_bounce_buffer =
72 amiga_chip_alloc(hdata->dma_bounce_len, 73 amiga_chip_alloc(wh->dma_bounce_len,
73 "GVP II SCSI Bounce Buffer"); 74 "GVP II SCSI Bounce Buffer");
74 75
75 if (!hdata->dma_bounce_buffer) { 76 if (!wh->dma_bounce_buffer) {
76 hdata->dma_bounce_len = 0; 77 wh->dma_bounce_len = 0;
77 return 1; 78 return 1;
78 } 79 }
79 80
80 hdata->dma_buffer_pool = BUF_CHIP_ALLOCED; 81 wh->dma_buffer_pool = BUF_CHIP_ALLOCED;
81 } 82 }
82 83
83 /* check if the address of the bounce buffer is OK */ 84 /* check if the address of the bounce buffer is OK */
84 addr = virt_to_bus(hdata->dma_bounce_buffer); 85 addr = virt_to_bus(wh->dma_bounce_buffer);
85 86
86 if (addr & hdata->dma_xfer_mask) { 87 if (addr & wh->dma_xfer_mask) {
87 /* fall back to Chip RAM if address out of range */ 88 /* fall back to Chip RAM if address out of range */
88 if (hdata->dma_buffer_pool == BUF_SCSI_ALLOCED) { 89 if (wh->dma_buffer_pool == BUF_SCSI_ALLOCED) {
89 kfree(hdata->dma_bounce_buffer); 90 kfree(wh->dma_bounce_buffer);
90 scsi_alloc_out_of_range = 1; 91 scsi_alloc_out_of_range = 1;
91 } else { 92 } else {
92 amiga_chip_free(hdata->dma_bounce_buffer); 93 amiga_chip_free(wh->dma_bounce_buffer);
93 } 94 }
94 95
95 hdata->dma_bounce_buffer = 96 wh->dma_bounce_buffer =
96 amiga_chip_alloc(hdata->dma_bounce_len, 97 amiga_chip_alloc(wh->dma_bounce_len,
97 "GVP II SCSI Bounce Buffer"); 98 "GVP II SCSI Bounce Buffer");
98 99
99 if (!hdata->dma_bounce_buffer) { 100 if (!wh->dma_bounce_buffer) {
100 hdata->dma_bounce_len = 0; 101 wh->dma_bounce_len = 0;
101 return 1; 102 return 1;
102 } 103 }
103 104
104 addr = virt_to_bus(hdata->dma_bounce_buffer); 105 addr = virt_to_bus(wh->dma_bounce_buffer);
105 hdata->dma_buffer_pool = BUF_CHIP_ALLOCED; 106 wh->dma_buffer_pool = BUF_CHIP_ALLOCED;
106 } 107 }
107 108
108 if (!dir_in) { 109 if (!dir_in) {
109 /* copy to bounce buffer for a write */ 110 /* copy to bounce buffer for a write */
110 memcpy(hdata->dma_bounce_buffer, cmd->SCp.ptr, 111 memcpy(wh->dma_bounce_buffer, cmd->SCp.ptr,
111 cmd->SCp.this_residual); 112 cmd->SCp.this_residual);
112 } 113 }
113 } 114 }
@@ -116,11 +117,11 @@ static int dma_setup(struct scsi_cmnd *cmd, int dir_in)
116 if (!dir_in) 117 if (!dir_in)
117 cntr |= GVP11_DMAC_DIR_WRITE; 118 cntr |= GVP11_DMAC_DIR_WRITE;
118 119
119 hdata->dma_dir = dir_in; 120 wh->dma_dir = dir_in;
120 DMA(cmd->device->host)->CNTR = cntr; 121 regs->CNTR = cntr;
121 122
122 /* setup DMA *physical* address */ 123 /* setup DMA *physical* address */
123 DMA(cmd->device->host)->ACR = addr; 124 regs->ACR = addr;
124 125
125 if (dir_in) { 126 if (dir_in) {
126 /* invalidate any cache */ 127 /* invalidate any cache */
@@ -130,12 +131,12 @@ static int dma_setup(struct scsi_cmnd *cmd, int dir_in)
130 cache_push(addr, cmd->SCp.this_residual); 131 cache_push(addr, cmd->SCp.this_residual);
131 } 132 }
132 133
133 bank_mask = (~hdata->dma_xfer_mask >> 18) & 0x01c0; 134 bank_mask = (~wh->dma_xfer_mask >> 18) & 0x01c0;
134 if (bank_mask) 135 if (bank_mask)
135 DMA(cmd->device->host)->BANK = bank_mask & (addr >> 18); 136 regs->BANK = bank_mask & (addr >> 18);
136 137
137 /* start DMA */ 138 /* start DMA */
138 DMA(cmd->device->host)->ST_DMA = 1; 139 regs->ST_DMA = 1;
139 140
140 /* return success */ 141 /* return success */
141 return 0; 142 return 0;
@@ -144,236 +145,53 @@ static int dma_setup(struct scsi_cmnd *cmd, int dir_in)
144static void dma_stop(struct Scsi_Host *instance, struct scsi_cmnd *SCpnt, 145static void dma_stop(struct Scsi_Host *instance, struct scsi_cmnd *SCpnt,
145 int status) 146 int status)
146{ 147{
147 struct WD33C93_hostdata *hdata = shost_priv(instance); 148 struct gvp11_hostdata *hdata = shost_priv(instance);
149 struct WD33C93_hostdata *wh = &hdata->wh;
150 struct gvp11_scsiregs *regs = hdata->regs;
148 151
149 /* stop DMA */ 152 /* stop DMA */
150 DMA(instance)->SP_DMA = 1; 153 regs->SP_DMA = 1;
151 /* remove write bit from CONTROL bits */ 154 /* remove write bit from CONTROL bits */
152 DMA(instance)->CNTR = GVP11_DMAC_INT_ENABLE; 155 regs->CNTR = GVP11_DMAC_INT_ENABLE;
153 156
154 /* copy from a bounce buffer, if necessary */ 157 /* copy from a bounce buffer, if necessary */
155 if (status && hdata->dma_bounce_buffer) { 158 if (status && wh->dma_bounce_buffer) {
156 if (hdata->dma_dir && SCpnt) 159 if (wh->dma_dir && SCpnt)
157 memcpy(SCpnt->SCp.ptr, hdata->dma_bounce_buffer, 160 memcpy(SCpnt->SCp.ptr, wh->dma_bounce_buffer,
158 SCpnt->SCp.this_residual); 161 SCpnt->SCp.this_residual);
159 162
160 if (hdata->dma_buffer_pool == BUF_SCSI_ALLOCED) 163 if (wh->dma_buffer_pool == BUF_SCSI_ALLOCED)
161 kfree(hdata->dma_bounce_buffer); 164 kfree(wh->dma_bounce_buffer);
162 else
163 amiga_chip_free(hdata->dma_bounce_buffer);
164
165 hdata->dma_bounce_buffer = NULL;
166 hdata->dma_bounce_len = 0;
167 }
168}
169
170#define CHECK_WD33C93
171
172int __init gvp11_detect(struct scsi_host_template *tpnt)
173{
174 static unsigned char called = 0;
175 struct Scsi_Host *instance;
176 unsigned long address;
177 unsigned int epc;
178 struct zorro_dev *z = NULL;
179 unsigned int default_dma_xfer_mask;
180 struct WD33C93_hostdata *hdata;
181 wd33c93_regs regs;
182 int num_gvp11 = 0;
183#ifdef CHECK_WD33C93
184 volatile unsigned char *sasr_3393, *scmd_3393;
185 unsigned char save_sasr;
186 unsigned char q, qq;
187#endif
188
189 if (!MACH_IS_AMIGA || called)
190 return 0;
191 called = 1;
192
193 tpnt->proc_name = "GVP11";
194 tpnt->proc_info = &wd33c93_proc_info;
195
196 while ((z = zorro_find_device(ZORRO_WILDCARD, z))) {
197 /*
198 * This should (hopefully) be the correct way to identify
199 * all the different GVP SCSI controllers (except for the
200 * SERIES I though).
201 */
202
203 if (z->id == ZORRO_PROD_GVP_COMBO_030_R3_SCSI ||
204 z->id == ZORRO_PROD_GVP_SERIES_II)
205 default_dma_xfer_mask = ~0x00ffffff;
206 else if (z->id == ZORRO_PROD_GVP_GFORCE_030_SCSI ||
207 z->id == ZORRO_PROD_GVP_A530_SCSI ||
208 z->id == ZORRO_PROD_GVP_COMBO_030_R4_SCSI)
209 default_dma_xfer_mask = ~0x01ffffff;
210 else if (z->id == ZORRO_PROD_GVP_A1291 ||
211 z->id == ZORRO_PROD_GVP_GFORCE_040_SCSI_1)
212 default_dma_xfer_mask = ~0x07ffffff;
213 else 165 else
214 continue; 166 amiga_chip_free(wh->dma_bounce_buffer);
215
216 /*
217 * Rumors state that some GVP ram boards use the same product
218 * code as the SCSI controllers. Therefore if the board-size
219 * is not 64KB we asume it is a ram board and bail out.
220 */
221 if (z->resource.end - z->resource.start != 0xffff)
222 continue;
223 167
224 address = z->resource.start; 168 wh->dma_bounce_buffer = NULL;
225 if (!request_mem_region(address, 256, "wd33c93")) 169 wh->dma_bounce_len = 0;
226 continue;
227
228#ifdef CHECK_WD33C93
229
230 /*
231 * These darn GVP boards are a problem - it can be tough to tell
232 * whether or not they include a SCSI controller. This is the
233 * ultimate Yet-Another-GVP-Detection-Hack in that it actually
234 * probes for a WD33c93 chip: If we find one, it's extremely
235 * likely that this card supports SCSI, regardless of Product_
236 * Code, Board_Size, etc.
237 */
238
239 /* Get pointers to the presumed register locations and save contents */
240
241 sasr_3393 = &(((gvp11_scsiregs *)(ZTWO_VADDR(address)))->SASR);
242 scmd_3393 = &(((gvp11_scsiregs *)(ZTWO_VADDR(address)))->SCMD);
243 save_sasr = *sasr_3393;
244
245 /* First test the AuxStatus Reg */
246
247 q = *sasr_3393; /* read it */
248 if (q & 0x08) /* bit 3 should always be clear */
249 goto release;
250 *sasr_3393 = WD_AUXILIARY_STATUS; /* setup indirect address */
251 if (*sasr_3393 == WD_AUXILIARY_STATUS) { /* shouldn't retain the write */
252 *sasr_3393 = save_sasr; /* Oops - restore this byte */
253 goto release;
254 }
255 if (*sasr_3393 != q) { /* should still read the same */
256 *sasr_3393 = save_sasr; /* Oops - restore this byte */
257 goto release;
258 }
259 if (*scmd_3393 != q) /* and so should the image at 0x1f */
260 goto release;
261
262 /*
263 * Ok, we probably have a wd33c93, but let's check a few other places
264 * for good measure. Make sure that this works for both 'A and 'B
265 * chip versions.
266 */
267
268 *sasr_3393 = WD_SCSI_STATUS;
269 q = *scmd_3393;
270 *sasr_3393 = WD_SCSI_STATUS;
271 *scmd_3393 = ~q;
272 *sasr_3393 = WD_SCSI_STATUS;
273 qq = *scmd_3393;
274 *sasr_3393 = WD_SCSI_STATUS;
275 *scmd_3393 = q;
276 if (qq != q) /* should be read only */
277 goto release;
278 *sasr_3393 = 0x1e; /* this register is unimplemented */
279 q = *scmd_3393;
280 *sasr_3393 = 0x1e;
281 *scmd_3393 = ~q;
282 *sasr_3393 = 0x1e;
283 qq = *scmd_3393;
284 *sasr_3393 = 0x1e;
285 *scmd_3393 = q;
286 if (qq != q || qq != 0xff) /* should be read only, all 1's */
287 goto release;
288 *sasr_3393 = WD_TIMEOUT_PERIOD;
289 q = *scmd_3393;
290 *sasr_3393 = WD_TIMEOUT_PERIOD;
291 *scmd_3393 = ~q;
292 *sasr_3393 = WD_TIMEOUT_PERIOD;
293 qq = *scmd_3393;
294 *sasr_3393 = WD_TIMEOUT_PERIOD;
295 *scmd_3393 = q;
296 if (qq != (~q & 0xff)) /* should be read/write */
297 goto release;
298#endif
299
300 instance = scsi_register(tpnt, sizeof(struct WD33C93_hostdata));
301 if (instance == NULL)
302 goto release;
303 instance->base = ZTWO_VADDR(address);
304 instance->irq = IRQ_AMIGA_PORTS;
305 instance->unique_id = z->slotaddr;
306
307 hdata = shost_priv(instance);
308 if (gvp11_xfer_mask)
309 hdata->dma_xfer_mask = gvp11_xfer_mask;
310 else
311 hdata->dma_xfer_mask = default_dma_xfer_mask;
312
313 DMA(instance)->secret2 = 1;
314 DMA(instance)->secret1 = 0;
315 DMA(instance)->secret3 = 15;
316 while (DMA(instance)->CNTR & GVP11_DMAC_BUSY)
317 ;
318 DMA(instance)->CNTR = 0;
319
320 DMA(instance)->BANK = 0;
321
322 epc = *(unsigned short *)(ZTWO_VADDR(address) + 0x8000);
323
324 /*
325 * Check for 14MHz SCSI clock
326 */
327 regs.SASR = &(DMA(instance)->SASR);
328 regs.SCMD = &(DMA(instance)->SCMD);
329 hdata->no_sync = 0xff;
330 hdata->fast = 0;
331 hdata->dma_mode = CTRL_DMA;
332 wd33c93_init(instance, regs, dma_setup, dma_stop,
333 (epc & GVP_SCSICLKMASK) ? WD33C93_FS_8_10
334 : WD33C93_FS_12_15);
335
336 if (request_irq(IRQ_AMIGA_PORTS, gvp11_intr, IRQF_SHARED,
337 "GVP11 SCSI", instance))
338 goto unregister;
339 DMA(instance)->CNTR = GVP11_DMAC_INT_ENABLE;
340 num_gvp11++;
341 continue;
342
343unregister:
344 scsi_unregister(instance);
345release:
346 release_mem_region(address, 256);
347 } 170 }
348
349 return num_gvp11;
350} 171}
351 172
352static int gvp11_bus_reset(struct scsi_cmnd *cmd) 173static int gvp11_bus_reset(struct scsi_cmnd *cmd)
353{ 174{
175 struct Scsi_Host *instance = cmd->device->host;
176
354 /* FIXME perform bus-specific reset */ 177 /* FIXME perform bus-specific reset */
355 178
356 /* FIXME 2: shouldn't we no-op this function (return 179 /* FIXME 2: shouldn't we no-op this function (return
357 FAILED), and fall back to host reset function, 180 FAILED), and fall back to host reset function,
358 wd33c93_host_reset ? */ 181 wd33c93_host_reset ? */
359 182
360 spin_lock_irq(cmd->device->host->host_lock); 183 spin_lock_irq(instance->host_lock);
361 wd33c93_host_reset(cmd); 184 wd33c93_host_reset(cmd);
362 spin_unlock_irq(cmd->device->host->host_lock); 185 spin_unlock_irq(instance->host_lock);
363 186
364 return SUCCESS; 187 return SUCCESS;
365} 188}
366 189
367 190static struct scsi_host_template gvp11_scsi_template = {
368#define HOSTS_C 191 .module = THIS_MODULE,
369
370#include "gvp11.h"
371
372static struct scsi_host_template driver_template = {
373 .proc_name = "GVP11",
374 .name = "GVP Series II SCSI", 192 .name = "GVP Series II SCSI",
375 .detect = gvp11_detect, 193 .proc_info = wd33c93_proc_info,
376 .release = gvp11_release, 194 .proc_name = "GVP11",
377 .queuecommand = wd33c93_queuecommand, 195 .queuecommand = wd33c93_queuecommand,
378 .eh_abort_handler = wd33c93_abort, 196 .eh_abort_handler = wd33c93_abort,
379 .eh_bus_reset_handler = gvp11_bus_reset, 197 .eh_bus_reset_handler = gvp11_bus_reset,
@@ -385,17 +203,230 @@ static struct scsi_host_template driver_template = {
385 .use_clustering = DISABLE_CLUSTERING 203 .use_clustering = DISABLE_CLUSTERING
386}; 204};
387 205
206static int __devinit check_wd33c93(struct gvp11_scsiregs *regs)
207{
208#ifdef CHECK_WD33C93
209 volatile unsigned char *sasr_3393, *scmd_3393;
210 unsigned char save_sasr;
211 unsigned char q, qq;
388 212
389#include "scsi_module.c" 213 /*
214 * These darn GVP boards are a problem - it can be tough to tell
215 * whether or not they include a SCSI controller. This is the
216 * ultimate Yet-Another-GVP-Detection-Hack in that it actually
217 * probes for a WD33c93 chip: If we find one, it's extremely
218 * likely that this card supports SCSI, regardless of Product_
219 * Code, Board_Size, etc.
220 */
221
222 /* Get pointers to the presumed register locations and save contents */
223
224 sasr_3393 = &regs->SASR;
225 scmd_3393 = &regs->SCMD;
226 save_sasr = *sasr_3393;
227
228 /* First test the AuxStatus Reg */
229
230 q = *sasr_3393; /* read it */
231 if (q & 0x08) /* bit 3 should always be clear */
232 return -ENODEV;
233 *sasr_3393 = WD_AUXILIARY_STATUS; /* setup indirect address */
234 if (*sasr_3393 == WD_AUXILIARY_STATUS) { /* shouldn't retain the write */
235 *sasr_3393 = save_sasr; /* Oops - restore this byte */
236 return -ENODEV;
237 }
238 if (*sasr_3393 != q) { /* should still read the same */
239 *sasr_3393 = save_sasr; /* Oops - restore this byte */
240 return -ENODEV;
241 }
242 if (*scmd_3393 != q) /* and so should the image at 0x1f */
243 return -ENODEV;
244
245 /*
246 * Ok, we probably have a wd33c93, but let's check a few other places
247 * for good measure. Make sure that this works for both 'A and 'B
248 * chip versions.
249 */
250
251 *sasr_3393 = WD_SCSI_STATUS;
252 q = *scmd_3393;
253 *sasr_3393 = WD_SCSI_STATUS;
254 *scmd_3393 = ~q;
255 *sasr_3393 = WD_SCSI_STATUS;
256 qq = *scmd_3393;
257 *sasr_3393 = WD_SCSI_STATUS;
258 *scmd_3393 = q;
259 if (qq != q) /* should be read only */
260 return -ENODEV;
261 *sasr_3393 = 0x1e; /* this register is unimplemented */
262 q = *scmd_3393;
263 *sasr_3393 = 0x1e;
264 *scmd_3393 = ~q;
265 *sasr_3393 = 0x1e;
266 qq = *scmd_3393;
267 *sasr_3393 = 0x1e;
268 *scmd_3393 = q;
269 if (qq != q || qq != 0xff) /* should be read only, all 1's */
270 return -ENODEV;
271 *sasr_3393 = WD_TIMEOUT_PERIOD;
272 q = *scmd_3393;
273 *sasr_3393 = WD_TIMEOUT_PERIOD;
274 *scmd_3393 = ~q;
275 *sasr_3393 = WD_TIMEOUT_PERIOD;
276 qq = *scmd_3393;
277 *sasr_3393 = WD_TIMEOUT_PERIOD;
278 *scmd_3393 = q;
279 if (qq != (~q & 0xff)) /* should be read/write */
280 return -ENODEV;
281#endif /* CHECK_WD33C93 */
390 282
391int gvp11_release(struct Scsi_Host *instance) 283 return 0;
284}
285
286static int __devinit gvp11_probe(struct zorro_dev *z,
287 const struct zorro_device_id *ent)
392{ 288{
393#ifdef MODULE 289 struct Scsi_Host *instance;
394 DMA(instance)->CNTR = 0; 290 unsigned long address;
395 release_mem_region(ZTWO_PADDR(instance->base), 256); 291 int error;
292 unsigned int epc;
293 unsigned int default_dma_xfer_mask;
294 struct gvp11_hostdata *hdata;
295 struct gvp11_scsiregs *regs;
296 wd33c93_regs wdregs;
297
298 default_dma_xfer_mask = ent->driver_data;
299
300 /*
301 * Rumors state that some GVP ram boards use the same product
302 * code as the SCSI controllers. Therefore if the board-size
303 * is not 64KB we asume it is a ram board and bail out.
304 */
305 if (zorro_resource_len(z) != 0x10000)
306 return -ENODEV;
307
308 address = z->resource.start;
309 if (!request_mem_region(address, 256, "wd33c93"))
310 return -EBUSY;
311
312 regs = (struct gvp11_scsiregs *)(ZTWO_VADDR(address));
313
314 error = check_wd33c93(regs);
315 if (error)
316 goto fail_check_or_alloc;
317
318 instance = scsi_host_alloc(&gvp11_scsi_template,
319 sizeof(struct gvp11_hostdata));
320 if (!instance) {
321 error = -ENOMEM;
322 goto fail_check_or_alloc;
323 }
324
325 instance->irq = IRQ_AMIGA_PORTS;
326 instance->unique_id = z->slotaddr;
327
328 regs->secret2 = 1;
329 regs->secret1 = 0;
330 regs->secret3 = 15;
331 while (regs->CNTR & GVP11_DMAC_BUSY)
332 ;
333 regs->CNTR = 0;
334 regs->BANK = 0;
335
336 wdregs.SASR = &regs->SASR;
337 wdregs.SCMD = &regs->SCMD;
338
339 hdata = shost_priv(instance);
340 if (gvp11_xfer_mask)
341 hdata->wh.dma_xfer_mask = gvp11_xfer_mask;
342 else
343 hdata->wh.dma_xfer_mask = default_dma_xfer_mask;
344
345 hdata->wh.no_sync = 0xff;
346 hdata->wh.fast = 0;
347 hdata->wh.dma_mode = CTRL_DMA;
348 hdata->regs = regs;
349
350 /*
351 * Check for 14MHz SCSI clock
352 */
353 epc = *(unsigned short *)(ZTWO_VADDR(address) + 0x8000);
354 wd33c93_init(instance, wdregs, dma_setup, dma_stop,
355 (epc & GVP_SCSICLKMASK) ? WD33C93_FS_8_10
356 : WD33C93_FS_12_15);
357
358 error = request_irq(IRQ_AMIGA_PORTS, gvp11_intr, IRQF_SHARED,
359 "GVP11 SCSI", instance);
360 if (error)
361 goto fail_irq;
362
363 regs->CNTR = GVP11_DMAC_INT_ENABLE;
364
365 error = scsi_add_host(instance, NULL);
366 if (error)
367 goto fail_host;
368
369 zorro_set_drvdata(z, instance);
370 scsi_scan_host(instance);
371 return 0;
372
373fail_host:
396 free_irq(IRQ_AMIGA_PORTS, instance); 374 free_irq(IRQ_AMIGA_PORTS, instance);
397#endif 375fail_irq:
398 return 1; 376 scsi_host_put(instance);
377fail_check_or_alloc:
378 release_mem_region(address, 256);
379 return error;
380}
381
382static void __devexit gvp11_remove(struct zorro_dev *z)
383{
384 struct Scsi_Host *instance = zorro_get_drvdata(z);
385 struct gvp11_hostdata *hdata = shost_priv(instance);
386
387 hdata->regs->CNTR = 0;
388 scsi_remove_host(instance);
389 free_irq(IRQ_AMIGA_PORTS, instance);
390 scsi_host_put(instance);
391 release_mem_region(z->resource.start, 256);
392}
393
394 /*
395 * This should (hopefully) be the correct way to identify
396 * all the different GVP SCSI controllers (except for the
397 * SERIES I though).
398 */
399
400static struct zorro_device_id gvp11_zorro_tbl[] __devinitdata = {
401 { ZORRO_PROD_GVP_COMBO_030_R3_SCSI, ~0x00ffffff },
402 { ZORRO_PROD_GVP_SERIES_II, ~0x00ffffff },
403 { ZORRO_PROD_GVP_GFORCE_030_SCSI, ~0x01ffffff },
404 { ZORRO_PROD_GVP_A530_SCSI, ~0x01ffffff },
405 { ZORRO_PROD_GVP_COMBO_030_R4_SCSI, ~0x01ffffff },
406 { ZORRO_PROD_GVP_A1291, ~0x07ffffff },
407 { ZORRO_PROD_GVP_GFORCE_040_SCSI_1, ~0x07ffffff },
408 { 0 }
409};
410MODULE_DEVICE_TABLE(zorro, gvp11_zorro_tbl);
411
412static struct zorro_driver gvp11_driver = {
413 .name = "gvp11",
414 .id_table = gvp11_zorro_tbl,
415 .probe = gvp11_probe,
416 .remove = __devexit_p(gvp11_remove),
417};
418
419static int __init gvp11_init(void)
420{
421 return zorro_register_driver(&gvp11_driver);
422}
423module_init(gvp11_init);
424
425static void __exit gvp11_exit(void)
426{
427 zorro_unregister_driver(&gvp11_driver);
399} 428}
429module_exit(gvp11_exit);
400 430
431MODULE_DESCRIPTION("GVP Series II SCSI");
401MODULE_LICENSE("GPL"); 432MODULE_LICENSE("GPL");
diff --git a/drivers/scsi/gvp11.h b/drivers/scsi/gvp11.h
index e2efdf9601ef..852913cde5dd 100644
--- a/drivers/scsi/gvp11.h
+++ b/drivers/scsi/gvp11.h
@@ -11,9 +11,6 @@
11 11
12#include <linux/types.h> 12#include <linux/types.h>
13 13
14int gvp11_detect(struct scsi_host_template *);
15int gvp11_release(struct Scsi_Host *);
16
17#ifndef CMD_PER_LUN 14#ifndef CMD_PER_LUN
18#define CMD_PER_LUN 2 15#define CMD_PER_LUN 2
19#endif 16#endif
@@ -22,15 +19,13 @@ int gvp11_release(struct Scsi_Host *);
22#define CAN_QUEUE 16 19#define CAN_QUEUE 16
23#endif 20#endif
24 21
25#ifndef HOSTS_C
26
27/* 22/*
28 * if the transfer address ANDed with this results in a non-zero 23 * if the transfer address ANDed with this results in a non-zero
29 * result, then we can't use DMA. 24 * result, then we can't use DMA.
30 */ 25 */
31#define GVP11_XFER_MASK (0xff000001) 26#define GVP11_XFER_MASK (0xff000001)
32 27
33typedef struct { 28struct gvp11_scsiregs {
34 unsigned char pad1[64]; 29 unsigned char pad1[64];
35 volatile unsigned short CNTR; 30 volatile unsigned short CNTR;
36 unsigned char pad2[31]; 31 unsigned char pad2[31];
@@ -46,7 +41,7 @@ typedef struct {
46 volatile unsigned short SP_DMA; 41 volatile unsigned short SP_DMA;
47 volatile unsigned short secret2; /* store 1 here */ 42 volatile unsigned short secret2; /* store 1 here */
48 volatile unsigned short secret3; /* store 15 here */ 43 volatile unsigned short secret3; /* store 15 here */
49} gvp11_scsiregs; 44};
50 45
51/* bits in CNTR */ 46/* bits in CNTR */
52#define GVP11_DMAC_BUSY (1<<0) 47#define GVP11_DMAC_BUSY (1<<0)
@@ -54,6 +49,4 @@ typedef struct {
54#define GVP11_DMAC_INT_ENABLE (1<<3) 49#define GVP11_DMAC_INT_ENABLE (1<<3)
55#define GVP11_DMAC_DIR_WRITE (1<<4) 50#define GVP11_DMAC_DIR_WRITE (1<<4)
56 51
57#endif /* else def HOSTS_C */
58
59#endif /* GVP11_H */ 52#endif /* GVP11_H */
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index 6a6661c35b2f..82ea4a8226b0 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -567,7 +567,8 @@ static void ipr_trc_hook(struct ipr_cmnd *ipr_cmd,
567static void ipr_reinit_ipr_cmnd(struct ipr_cmnd *ipr_cmd) 567static void ipr_reinit_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
568{ 568{
569 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; 569 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
570 struct ipr_ioasa *ioasa = &ipr_cmd->ioasa; 570 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
571 struct ipr_ioasa64 *ioasa64 = &ipr_cmd->s.ioasa64;
571 dma_addr_t dma_addr = ipr_cmd->dma_addr; 572 dma_addr_t dma_addr = ipr_cmd->dma_addr;
572 573
573 memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt)); 574 memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
@@ -576,19 +577,19 @@ static void ipr_reinit_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
576 ioarcb->ioadl_len = 0; 577 ioarcb->ioadl_len = 0;
577 ioarcb->read_ioadl_len = 0; 578 ioarcb->read_ioadl_len = 0;
578 579
579 if (ipr_cmd->ioa_cfg->sis64) 580 if (ipr_cmd->ioa_cfg->sis64) {
580 ioarcb->u.sis64_addr_data.data_ioadl_addr = 581 ioarcb->u.sis64_addr_data.data_ioadl_addr =
581 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64)); 582 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
582 else { 583 ioasa64->u.gata.status = 0;
584 } else {
583 ioarcb->write_ioadl_addr = 585 ioarcb->write_ioadl_addr =
584 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl)); 586 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
585 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr; 587 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
588 ioasa->u.gata.status = 0;
586 } 589 }
587 590
588 ioasa->ioasc = 0; 591 ioasa->hdr.ioasc = 0;
589 ioasa->residual_data_len = 0; 592 ioasa->hdr.residual_data_len = 0;
590 ioasa->u.gata.status = 0;
591
592 ipr_cmd->scsi_cmd = NULL; 593 ipr_cmd->scsi_cmd = NULL;
593 ipr_cmd->qc = NULL; 594 ipr_cmd->qc = NULL;
594 ipr_cmd->sense_buffer[0] = 0; 595 ipr_cmd->sense_buffer[0] = 0;
@@ -768,8 +769,8 @@ static void ipr_fail_all_ops(struct ipr_ioa_cfg *ioa_cfg)
768 list_for_each_entry_safe(ipr_cmd, temp, &ioa_cfg->pending_q, queue) { 769 list_for_each_entry_safe(ipr_cmd, temp, &ioa_cfg->pending_q, queue) {
769 list_del(&ipr_cmd->queue); 770 list_del(&ipr_cmd->queue);
770 771
771 ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_IOA_WAS_RESET); 772 ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_IOA_WAS_RESET);
772 ipr_cmd->ioasa.ilid = cpu_to_be32(IPR_DRIVER_ILID); 773 ipr_cmd->s.ioasa.hdr.ilid = cpu_to_be32(IPR_DRIVER_ILID);
773 774
774 if (ipr_cmd->scsi_cmd) 775 if (ipr_cmd->scsi_cmd)
775 ipr_cmd->done = ipr_scsi_eh_done; 776 ipr_cmd->done = ipr_scsi_eh_done;
@@ -1040,7 +1041,7 @@ static void ipr_init_res_entry(struct ipr_resource_entry *res,
1040 proto = cfgtew->u.cfgte64->proto; 1041 proto = cfgtew->u.cfgte64->proto;
1041 res->res_flags = cfgtew->u.cfgte64->res_flags; 1042 res->res_flags = cfgtew->u.cfgte64->res_flags;
1042 res->qmodel = IPR_QUEUEING_MODEL64(res); 1043 res->qmodel = IPR_QUEUEING_MODEL64(res);
1043 res->type = cfgtew->u.cfgte64->res_type & 0x0f; 1044 res->type = cfgtew->u.cfgte64->res_type;
1044 1045
1045 memcpy(res->res_path, &cfgtew->u.cfgte64->res_path, 1046 memcpy(res->res_path, &cfgtew->u.cfgte64->res_path,
1046 sizeof(res->res_path)); 1047 sizeof(res->res_path));
@@ -1319,7 +1320,7 @@ static void ipr_process_ccn(struct ipr_cmnd *ipr_cmd)
1319{ 1320{
1320 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 1321 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1321 struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb; 1322 struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
1322 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc); 1323 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
1323 1324
1324 list_del(&hostrcb->queue); 1325 list_del(&hostrcb->queue);
1325 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q); 1326 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
@@ -2354,7 +2355,7 @@ static void ipr_process_error(struct ipr_cmnd *ipr_cmd)
2354{ 2355{
2355 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 2356 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2356 struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb; 2357 struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
2357 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc); 2358 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
2358 u32 fd_ioasc; 2359 u32 fd_ioasc;
2359 2360
2360 if (ioa_cfg->sis64) 2361 if (ioa_cfg->sis64)
@@ -4509,11 +4510,16 @@ static int ipr_device_reset(struct ipr_ioa_cfg *ioa_cfg,
4509 } 4510 }
4510 4511
4511 ipr_send_blocking_cmd(ipr_cmd, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT); 4512 ipr_send_blocking_cmd(ipr_cmd, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
4512 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc); 4513 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
4513 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q); 4514 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4514 if (ipr_is_gata(res) && res->sata_port && ioasc != IPR_IOASC_IOA_WAS_RESET) 4515 if (ipr_is_gata(res) && res->sata_port && ioasc != IPR_IOASC_IOA_WAS_RESET) {
4515 memcpy(&res->sata_port->ioasa, &ipr_cmd->ioasa.u.gata, 4516 if (ipr_cmd->ioa_cfg->sis64)
4516 sizeof(struct ipr_ioasa_gata)); 4517 memcpy(&res->sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata,
4518 sizeof(struct ipr_ioasa_gata));
4519 else
4520 memcpy(&res->sata_port->ioasa, &ipr_cmd->s.ioasa.u.gata,
4521 sizeof(struct ipr_ioasa_gata));
4522 }
4517 4523
4518 LEAVE; 4524 LEAVE;
4519 return (IPR_IOASC_SENSE_KEY(ioasc) ? -EIO : 0); 4525 return (IPR_IOASC_SENSE_KEY(ioasc) ? -EIO : 0);
@@ -4768,7 +4774,7 @@ static int ipr_cancel_op(struct scsi_cmnd * scsi_cmd)
4768 scmd_printk(KERN_ERR, scsi_cmd, "Aborting command: %02X\n", 4774 scmd_printk(KERN_ERR, scsi_cmd, "Aborting command: %02X\n",
4769 scsi_cmd->cmnd[0]); 4775 scsi_cmd->cmnd[0]);
4770 ipr_send_blocking_cmd(ipr_cmd, ipr_abort_timeout, IPR_CANCEL_ALL_TIMEOUT); 4776 ipr_send_blocking_cmd(ipr_cmd, ipr_abort_timeout, IPR_CANCEL_ALL_TIMEOUT);
4771 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc); 4777 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
4772 4778
4773 /* 4779 /*
4774 * If the abort task timed out and we sent a bus reset, we will get 4780 * If the abort task timed out and we sent a bus reset, we will get
@@ -4812,15 +4818,39 @@ static int ipr_eh_abort(struct scsi_cmnd * scsi_cmd)
4812/** 4818/**
4813 * ipr_handle_other_interrupt - Handle "other" interrupts 4819 * ipr_handle_other_interrupt - Handle "other" interrupts
4814 * @ioa_cfg: ioa config struct 4820 * @ioa_cfg: ioa config struct
4815 * @int_reg: interrupt register
4816 * 4821 *
4817 * Return value: 4822 * Return value:
4818 * IRQ_NONE / IRQ_HANDLED 4823 * IRQ_NONE / IRQ_HANDLED
4819 **/ 4824 **/
4820static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg, 4825static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg)
4821 volatile u32 int_reg)
4822{ 4826{
4823 irqreturn_t rc = IRQ_HANDLED; 4827 irqreturn_t rc = IRQ_HANDLED;
4828 volatile u32 int_reg, int_mask_reg;
4829
4830 int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
4831 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32) & ~int_mask_reg;
4832
4833 /* If an interrupt on the adapter did not occur, ignore it.
4834 * Or in the case of SIS 64, check for a stage change interrupt.
4835 */
4836 if ((int_reg & IPR_PCII_OPER_INTERRUPTS) == 0) {
4837 if (ioa_cfg->sis64) {
4838 int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
4839 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
4840 if (int_reg & IPR_PCII_IPL_STAGE_CHANGE) {
4841
4842 /* clear stage change */
4843 writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.clr_interrupt_reg);
4844 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
4845 list_del(&ioa_cfg->reset_cmd->queue);
4846 del_timer(&ioa_cfg->reset_cmd->timer);
4847 ipr_reset_ioa_job(ioa_cfg->reset_cmd);
4848 return IRQ_HANDLED;
4849 }
4850 }
4851
4852 return IRQ_NONE;
4853 }
4824 4854
4825 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) { 4855 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
4826 /* Mask the interrupt */ 4856 /* Mask the interrupt */
@@ -4881,7 +4911,7 @@ static irqreturn_t ipr_isr(int irq, void *devp)
4881{ 4911{
4882 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp; 4912 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp;
4883 unsigned long lock_flags = 0; 4913 unsigned long lock_flags = 0;
4884 volatile u32 int_reg, int_mask_reg; 4914 volatile u32 int_reg;
4885 u32 ioasc; 4915 u32 ioasc;
4886 u16 cmd_index; 4916 u16 cmd_index;
4887 int num_hrrq = 0; 4917 int num_hrrq = 0;
@@ -4896,33 +4926,6 @@ static irqreturn_t ipr_isr(int irq, void *devp)
4896 return IRQ_NONE; 4926 return IRQ_NONE;
4897 } 4927 }
4898 4928
4899 int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
4900 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32) & ~int_mask_reg;
4901
4902 /* If an interrupt on the adapter did not occur, ignore it.
4903 * Or in the case of SIS 64, check for a stage change interrupt.
4904 */
4905 if (unlikely((int_reg & IPR_PCII_OPER_INTERRUPTS) == 0)) {
4906 if (ioa_cfg->sis64) {
4907 int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
4908 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
4909 if (int_reg & IPR_PCII_IPL_STAGE_CHANGE) {
4910
4911 /* clear stage change */
4912 writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.clr_interrupt_reg);
4913 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
4914 list_del(&ioa_cfg->reset_cmd->queue);
4915 del_timer(&ioa_cfg->reset_cmd->timer);
4916 ipr_reset_ioa_job(ioa_cfg->reset_cmd);
4917 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4918 return IRQ_HANDLED;
4919 }
4920 }
4921
4922 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4923 return IRQ_NONE;
4924 }
4925
4926 while (1) { 4929 while (1) {
4927 ipr_cmd = NULL; 4930 ipr_cmd = NULL;
4928 4931
@@ -4940,7 +4943,7 @@ static irqreturn_t ipr_isr(int irq, void *devp)
4940 4943
4941 ipr_cmd = ioa_cfg->ipr_cmnd_list[cmd_index]; 4944 ipr_cmd = ioa_cfg->ipr_cmnd_list[cmd_index];
4942 4945
4943 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc); 4946 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
4944 4947
4945 ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, ioasc); 4948 ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, ioasc);
4946 4949
@@ -4962,7 +4965,7 @@ static irqreturn_t ipr_isr(int irq, void *devp)
4962 /* Clear the PCI interrupt */ 4965 /* Clear the PCI interrupt */
4963 do { 4966 do {
4964 writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg32); 4967 writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg32);
4965 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32) & ~int_mask_reg; 4968 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
4966 } while (int_reg & IPR_PCII_HRRQ_UPDATED && 4969 } while (int_reg & IPR_PCII_HRRQ_UPDATED &&
4967 num_hrrq++ < IPR_MAX_HRRQ_RETRIES); 4970 num_hrrq++ < IPR_MAX_HRRQ_RETRIES);
4968 4971
@@ -4977,7 +4980,7 @@ static irqreturn_t ipr_isr(int irq, void *devp)
4977 } 4980 }
4978 4981
4979 if (unlikely(rc == IRQ_NONE)) 4982 if (unlikely(rc == IRQ_NONE))
4980 rc = ipr_handle_other_interrupt(ioa_cfg, int_reg); 4983 rc = ipr_handle_other_interrupt(ioa_cfg);
4981 4984
4982 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 4985 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4983 return rc; 4986 return rc;
@@ -5014,6 +5017,10 @@ static int ipr_build_ioadl64(struct ipr_ioa_cfg *ioa_cfg,
5014 5017
5015 ipr_cmd->dma_use_sg = nseg; 5018 ipr_cmd->dma_use_sg = nseg;
5016 5019
5020 ioarcb->data_transfer_length = cpu_to_be32(length);
5021 ioarcb->ioadl_len =
5022 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
5023
5017 if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) { 5024 if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
5018 ioadl_flags = IPR_IOADL_FLAGS_WRITE; 5025 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5019 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ; 5026 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
@@ -5135,7 +5142,7 @@ static void ipr_erp_done(struct ipr_cmnd *ipr_cmd)
5135 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd; 5142 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5136 struct ipr_resource_entry *res = scsi_cmd->device->hostdata; 5143 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
5137 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 5144 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5138 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc); 5145 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5139 5146
5140 if (IPR_IOASC_SENSE_KEY(ioasc) > 0) { 5147 if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
5141 scsi_cmd->result |= (DID_ERROR << 16); 5148 scsi_cmd->result |= (DID_ERROR << 16);
@@ -5166,7 +5173,7 @@ static void ipr_erp_done(struct ipr_cmnd *ipr_cmd)
5166static void ipr_reinit_ipr_cmnd_for_erp(struct ipr_cmnd *ipr_cmd) 5173static void ipr_reinit_ipr_cmnd_for_erp(struct ipr_cmnd *ipr_cmd)
5167{ 5174{
5168 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; 5175 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5169 struct ipr_ioasa *ioasa = &ipr_cmd->ioasa; 5176 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
5170 dma_addr_t dma_addr = ipr_cmd->dma_addr; 5177 dma_addr_t dma_addr = ipr_cmd->dma_addr;
5171 5178
5172 memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt)); 5179 memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
@@ -5174,8 +5181,8 @@ static void ipr_reinit_ipr_cmnd_for_erp(struct ipr_cmnd *ipr_cmd)
5174 ioarcb->read_data_transfer_length = 0; 5181 ioarcb->read_data_transfer_length = 0;
5175 ioarcb->ioadl_len = 0; 5182 ioarcb->ioadl_len = 0;
5176 ioarcb->read_ioadl_len = 0; 5183 ioarcb->read_ioadl_len = 0;
5177 ioasa->ioasc = 0; 5184 ioasa->hdr.ioasc = 0;
5178 ioasa->residual_data_len = 0; 5185 ioasa->hdr.residual_data_len = 0;
5179 5186
5180 if (ipr_cmd->ioa_cfg->sis64) 5187 if (ipr_cmd->ioa_cfg->sis64)
5181 ioarcb->u.sis64_addr_data.data_ioadl_addr = 5188 ioarcb->u.sis64_addr_data.data_ioadl_addr =
@@ -5200,7 +5207,7 @@ static void ipr_reinit_ipr_cmnd_for_erp(struct ipr_cmnd *ipr_cmd)
5200static void ipr_erp_request_sense(struct ipr_cmnd *ipr_cmd) 5207static void ipr_erp_request_sense(struct ipr_cmnd *ipr_cmd)
5201{ 5208{
5202 struct ipr_cmd_pkt *cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt; 5209 struct ipr_cmd_pkt *cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
5203 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc); 5210 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5204 5211
5205 if (IPR_IOASC_SENSE_KEY(ioasc) > 0) { 5212 if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
5206 ipr_erp_done(ipr_cmd); 5213 ipr_erp_done(ipr_cmd);
@@ -5277,12 +5284,12 @@ static void ipr_dump_ioasa(struct ipr_ioa_cfg *ioa_cfg,
5277 int i; 5284 int i;
5278 u16 data_len; 5285 u16 data_len;
5279 u32 ioasc, fd_ioasc; 5286 u32 ioasc, fd_ioasc;
5280 struct ipr_ioasa *ioasa = &ipr_cmd->ioasa; 5287 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
5281 __be32 *ioasa_data = (__be32 *)ioasa; 5288 __be32 *ioasa_data = (__be32 *)ioasa;
5282 int error_index; 5289 int error_index;
5283 5290
5284 ioasc = be32_to_cpu(ioasa->ioasc) & IPR_IOASC_IOASC_MASK; 5291 ioasc = be32_to_cpu(ioasa->hdr.ioasc) & IPR_IOASC_IOASC_MASK;
5285 fd_ioasc = be32_to_cpu(ioasa->fd_ioasc) & IPR_IOASC_IOASC_MASK; 5292 fd_ioasc = be32_to_cpu(ioasa->hdr.fd_ioasc) & IPR_IOASC_IOASC_MASK;
5286 5293
5287 if (0 == ioasc) 5294 if (0 == ioasc)
5288 return; 5295 return;
@@ -5297,7 +5304,7 @@ static void ipr_dump_ioasa(struct ipr_ioa_cfg *ioa_cfg,
5297 5304
5298 if (ioa_cfg->log_level < IPR_MAX_LOG_LEVEL) { 5305 if (ioa_cfg->log_level < IPR_MAX_LOG_LEVEL) {
5299 /* Don't log an error if the IOA already logged one */ 5306 /* Don't log an error if the IOA already logged one */
5300 if (ioasa->ilid != 0) 5307 if (ioasa->hdr.ilid != 0)
5301 return; 5308 return;
5302 5309
5303 if (!ipr_is_gscsi(res)) 5310 if (!ipr_is_gscsi(res))
@@ -5309,10 +5316,11 @@ static void ipr_dump_ioasa(struct ipr_ioa_cfg *ioa_cfg,
5309 5316
5310 ipr_res_err(ioa_cfg, res, "%s\n", ipr_error_table[error_index].error); 5317 ipr_res_err(ioa_cfg, res, "%s\n", ipr_error_table[error_index].error);
5311 5318
5312 if (sizeof(struct ipr_ioasa) < be16_to_cpu(ioasa->ret_stat_len)) 5319 data_len = be16_to_cpu(ioasa->hdr.ret_stat_len);
5320 if (ioa_cfg->sis64 && sizeof(struct ipr_ioasa64) < data_len)
5321 data_len = sizeof(struct ipr_ioasa64);
5322 else if (!ioa_cfg->sis64 && sizeof(struct ipr_ioasa) < data_len)
5313 data_len = sizeof(struct ipr_ioasa); 5323 data_len = sizeof(struct ipr_ioasa);
5314 else
5315 data_len = be16_to_cpu(ioasa->ret_stat_len);
5316 5324
5317 ipr_err("IOASA Dump:\n"); 5325 ipr_err("IOASA Dump:\n");
5318 5326
@@ -5338,8 +5346,8 @@ static void ipr_gen_sense(struct ipr_cmnd *ipr_cmd)
5338 u32 failing_lba; 5346 u32 failing_lba;
5339 u8 *sense_buf = ipr_cmd->scsi_cmd->sense_buffer; 5347 u8 *sense_buf = ipr_cmd->scsi_cmd->sense_buffer;
5340 struct ipr_resource_entry *res = ipr_cmd->scsi_cmd->device->hostdata; 5348 struct ipr_resource_entry *res = ipr_cmd->scsi_cmd->device->hostdata;
5341 struct ipr_ioasa *ioasa = &ipr_cmd->ioasa; 5349 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
5342 u32 ioasc = be32_to_cpu(ioasa->ioasc); 5350 u32 ioasc = be32_to_cpu(ioasa->hdr.ioasc);
5343 5351
5344 memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE); 5352 memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE);
5345 5353
@@ -5382,7 +5390,7 @@ static void ipr_gen_sense(struct ipr_cmnd *ipr_cmd)
5382 5390
5383 /* Illegal request */ 5391 /* Illegal request */
5384 if ((IPR_IOASC_SENSE_KEY(ioasc) == 0x05) && 5392 if ((IPR_IOASC_SENSE_KEY(ioasc) == 0x05) &&
5385 (be32_to_cpu(ioasa->ioasc_specific) & IPR_FIELD_POINTER_VALID)) { 5393 (be32_to_cpu(ioasa->hdr.ioasc_specific) & IPR_FIELD_POINTER_VALID)) {
5386 sense_buf[7] = 10; /* additional length */ 5394 sense_buf[7] = 10; /* additional length */
5387 5395
5388 /* IOARCB was in error */ 5396 /* IOARCB was in error */
@@ -5393,10 +5401,10 @@ static void ipr_gen_sense(struct ipr_cmnd *ipr_cmd)
5393 5401
5394 sense_buf[16] = 5402 sense_buf[16] =
5395 ((IPR_FIELD_POINTER_MASK & 5403 ((IPR_FIELD_POINTER_MASK &
5396 be32_to_cpu(ioasa->ioasc_specific)) >> 8) & 0xff; 5404 be32_to_cpu(ioasa->hdr.ioasc_specific)) >> 8) & 0xff;
5397 sense_buf[17] = 5405 sense_buf[17] =
5398 (IPR_FIELD_POINTER_MASK & 5406 (IPR_FIELD_POINTER_MASK &
5399 be32_to_cpu(ioasa->ioasc_specific)) & 0xff; 5407 be32_to_cpu(ioasa->hdr.ioasc_specific)) & 0xff;
5400 } else { 5408 } else {
5401 if (ioasc == IPR_IOASC_MED_DO_NOT_REALLOC) { 5409 if (ioasc == IPR_IOASC_MED_DO_NOT_REALLOC) {
5402 if (ipr_is_vset_device(res)) 5410 if (ipr_is_vset_device(res))
@@ -5428,14 +5436,20 @@ static void ipr_gen_sense(struct ipr_cmnd *ipr_cmd)
5428 **/ 5436 **/
5429static int ipr_get_autosense(struct ipr_cmnd *ipr_cmd) 5437static int ipr_get_autosense(struct ipr_cmnd *ipr_cmd)
5430{ 5438{
5431 struct ipr_ioasa *ioasa = &ipr_cmd->ioasa; 5439 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
5440 struct ipr_ioasa64 *ioasa64 = &ipr_cmd->s.ioasa64;
5432 5441
5433 if ((be32_to_cpu(ioasa->ioasc_specific) & IPR_AUTOSENSE_VALID) == 0) 5442 if ((be32_to_cpu(ioasa->hdr.ioasc_specific) & IPR_AUTOSENSE_VALID) == 0)
5434 return 0; 5443 return 0;
5435 5444
5436 memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa->auto_sense.data, 5445 if (ipr_cmd->ioa_cfg->sis64)
5437 min_t(u16, be16_to_cpu(ioasa->auto_sense.auto_sense_len), 5446 memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa64->auto_sense.data,
5438 SCSI_SENSE_BUFFERSIZE)); 5447 min_t(u16, be16_to_cpu(ioasa64->auto_sense.auto_sense_len),
5448 SCSI_SENSE_BUFFERSIZE));
5449 else
5450 memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa->auto_sense.data,
5451 min_t(u16, be16_to_cpu(ioasa->auto_sense.auto_sense_len),
5452 SCSI_SENSE_BUFFERSIZE));
5439 return 1; 5453 return 1;
5440} 5454}
5441 5455
@@ -5455,7 +5469,7 @@ static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg,
5455{ 5469{
5456 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd; 5470 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5457 struct ipr_resource_entry *res = scsi_cmd->device->hostdata; 5471 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
5458 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc); 5472 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5459 u32 masked_ioasc = ioasc & IPR_IOASC_IOASC_MASK; 5473 u32 masked_ioasc = ioasc & IPR_IOASC_IOASC_MASK;
5460 5474
5461 if (!res) { 5475 if (!res) {
@@ -5547,9 +5561,9 @@ static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd)
5547{ 5561{
5548 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 5562 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5549 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd; 5563 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5550 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc); 5564 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5551 5565
5552 scsi_set_resid(scsi_cmd, be32_to_cpu(ipr_cmd->ioasa.residual_data_len)); 5566 scsi_set_resid(scsi_cmd, be32_to_cpu(ipr_cmd->s.ioasa.hdr.residual_data_len));
5553 5567
5554 if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) { 5568 if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) {
5555 scsi_dma_unmap(ipr_cmd->scsi_cmd); 5569 scsi_dma_unmap(ipr_cmd->scsi_cmd);
@@ -5839,19 +5853,23 @@ static void ipr_sata_done(struct ipr_cmnd *ipr_cmd)
5839 struct ata_queued_cmd *qc = ipr_cmd->qc; 5853 struct ata_queued_cmd *qc = ipr_cmd->qc;
5840 struct ipr_sata_port *sata_port = qc->ap->private_data; 5854 struct ipr_sata_port *sata_port = qc->ap->private_data;
5841 struct ipr_resource_entry *res = sata_port->res; 5855 struct ipr_resource_entry *res = sata_port->res;
5842 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc); 5856 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5843 5857
5844 memcpy(&sata_port->ioasa, &ipr_cmd->ioasa.u.gata, 5858 if (ipr_cmd->ioa_cfg->sis64)
5845 sizeof(struct ipr_ioasa_gata)); 5859 memcpy(&sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata,
5860 sizeof(struct ipr_ioasa_gata));
5861 else
5862 memcpy(&sata_port->ioasa, &ipr_cmd->s.ioasa.u.gata,
5863 sizeof(struct ipr_ioasa_gata));
5846 ipr_dump_ioasa(ioa_cfg, ipr_cmd, res); 5864 ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
5847 5865
5848 if (be32_to_cpu(ipr_cmd->ioasa.ioasc_specific) & IPR_ATA_DEVICE_WAS_RESET) 5866 if (be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc_specific) & IPR_ATA_DEVICE_WAS_RESET)
5849 scsi_report_device_reset(ioa_cfg->host, res->bus, res->target); 5867 scsi_report_device_reset(ioa_cfg->host, res->bus, res->target);
5850 5868
5851 if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR) 5869 if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
5852 qc->err_mask |= __ac_err_mask(ipr_cmd->ioasa.u.gata.status); 5870 qc->err_mask |= __ac_err_mask(sata_port->ioasa.status);
5853 else 5871 else
5854 qc->err_mask |= ac_err_mask(ipr_cmd->ioasa.u.gata.status); 5872 qc->err_mask |= ac_err_mask(sata_port->ioasa.status);
5855 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q); 5873 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5856 ata_qc_complete(qc); 5874 ata_qc_complete(qc);
5857} 5875}
@@ -6520,7 +6538,7 @@ static void ipr_build_mode_sense(struct ipr_cmnd *ipr_cmd,
6520static int ipr_reset_cmd_failed(struct ipr_cmnd *ipr_cmd) 6538static int ipr_reset_cmd_failed(struct ipr_cmnd *ipr_cmd)
6521{ 6539{
6522 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 6540 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6523 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc); 6541 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6524 6542
6525 dev_err(&ioa_cfg->pdev->dev, 6543 dev_err(&ioa_cfg->pdev->dev,
6526 "0x%02X failed with IOASC: 0x%08X\n", 6544 "0x%02X failed with IOASC: 0x%08X\n",
@@ -6544,7 +6562,7 @@ static int ipr_reset_cmd_failed(struct ipr_cmnd *ipr_cmd)
6544static int ipr_reset_mode_sense_failed(struct ipr_cmnd *ipr_cmd) 6562static int ipr_reset_mode_sense_failed(struct ipr_cmnd *ipr_cmd)
6545{ 6563{
6546 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 6564 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6547 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc); 6565 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6548 6566
6549 if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) { 6567 if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
6550 ipr_cmd->job_step = ipr_set_supported_devs; 6568 ipr_cmd->job_step = ipr_set_supported_devs;
@@ -6634,7 +6652,7 @@ static int ipr_ioafp_mode_select_page24(struct ipr_cmnd *ipr_cmd)
6634 **/ 6652 **/
6635static int ipr_reset_mode_sense_page24_failed(struct ipr_cmnd *ipr_cmd) 6653static int ipr_reset_mode_sense_page24_failed(struct ipr_cmnd *ipr_cmd)
6636{ 6654{
6637 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc); 6655 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6638 6656
6639 if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) { 6657 if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
6640 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28; 6658 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
@@ -6706,7 +6724,7 @@ static int ipr_init_res_table(struct ipr_cmnd *ipr_cmd)
6706 list_move_tail(&res->queue, &old_res); 6724 list_move_tail(&res->queue, &old_res);
6707 6725
6708 if (ioa_cfg->sis64) 6726 if (ioa_cfg->sis64)
6709 entries = ioa_cfg->u.cfg_table64->hdr64.num_entries; 6727 entries = be16_to_cpu(ioa_cfg->u.cfg_table64->hdr64.num_entries);
6710 else 6728 else
6711 entries = ioa_cfg->u.cfg_table->hdr.num_entries; 6729 entries = ioa_cfg->u.cfg_table->hdr.num_entries;
6712 6730
@@ -6792,6 +6810,7 @@ static int ipr_ioafp_query_ioa_cfg(struct ipr_cmnd *ipr_cmd)
6792 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE); 6810 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
6793 6811
6794 ioarcb->cmd_pkt.cdb[0] = IPR_QUERY_IOA_CONFIG; 6812 ioarcb->cmd_pkt.cdb[0] = IPR_QUERY_IOA_CONFIG;
6813 ioarcb->cmd_pkt.cdb[6] = (ioa_cfg->cfg_table_size >> 16) & 0xff;
6795 ioarcb->cmd_pkt.cdb[7] = (ioa_cfg->cfg_table_size >> 8) & 0xff; 6814 ioarcb->cmd_pkt.cdb[7] = (ioa_cfg->cfg_table_size >> 8) & 0xff;
6796 ioarcb->cmd_pkt.cdb[8] = ioa_cfg->cfg_table_size & 0xff; 6815 ioarcb->cmd_pkt.cdb[8] = ioa_cfg->cfg_table_size & 0xff;
6797 6816
@@ -7122,7 +7141,9 @@ static int ipr_reset_next_stage(struct ipr_cmnd *ipr_cmd)
7122 ipr_dbg("IPL stage = 0x%lx, IPL stage time = %ld\n", stage, stage_time); 7141 ipr_dbg("IPL stage = 0x%lx, IPL stage time = %ld\n", stage, stage_time);
7123 7142
7124 /* sanity check the stage_time value */ 7143 /* sanity check the stage_time value */
7125 if (stage_time < IPR_IPL_INIT_MIN_STAGE_TIME) 7144 if (stage_time == 0)
7145 stage_time = IPR_IPL_INIT_DEFAULT_STAGE_TIME;
7146 else if (stage_time < IPR_IPL_INIT_MIN_STAGE_TIME)
7126 stage_time = IPR_IPL_INIT_MIN_STAGE_TIME; 7147 stage_time = IPR_IPL_INIT_MIN_STAGE_TIME;
7127 else if (stage_time > IPR_LONG_OPERATIONAL_TIMEOUT) 7148 else if (stage_time > IPR_LONG_OPERATIONAL_TIMEOUT)
7128 stage_time = IPR_LONG_OPERATIONAL_TIMEOUT; 7149 stage_time = IPR_LONG_OPERATIONAL_TIMEOUT;
@@ -7165,13 +7186,14 @@ static int ipr_reset_enable_ioa(struct ipr_cmnd *ipr_cmd)
7165{ 7186{
7166 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 7187 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7167 volatile u32 int_reg; 7188 volatile u32 int_reg;
7189 volatile u64 maskval;
7168 7190
7169 ENTER; 7191 ENTER;
7170 ipr_cmd->job_step = ipr_ioafp_identify_hrrq; 7192 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
7171 ipr_init_ioa_mem(ioa_cfg); 7193 ipr_init_ioa_mem(ioa_cfg);
7172 7194
7173 ioa_cfg->allow_interrupts = 1; 7195 ioa_cfg->allow_interrupts = 1;
7174 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg); 7196 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
7175 7197
7176 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) { 7198 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
7177 writel((IPR_PCII_ERROR_INTERRUPTS | IPR_PCII_HRRQ_UPDATED), 7199 writel((IPR_PCII_ERROR_INTERRUPTS | IPR_PCII_HRRQ_UPDATED),
@@ -7183,9 +7205,12 @@ static int ipr_reset_enable_ioa(struct ipr_cmnd *ipr_cmd)
7183 /* Enable destructive diagnostics on IOA */ 7205 /* Enable destructive diagnostics on IOA */
7184 writel(ioa_cfg->doorbell, ioa_cfg->regs.set_uproc_interrupt_reg32); 7206 writel(ioa_cfg->doorbell, ioa_cfg->regs.set_uproc_interrupt_reg32);
7185 7207
7186 writel(IPR_PCII_OPER_INTERRUPTS, ioa_cfg->regs.clr_interrupt_mask_reg32); 7208 if (ioa_cfg->sis64) {
7187 if (ioa_cfg->sis64) 7209 maskval = IPR_PCII_IPL_STAGE_CHANGE;
7188 writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.clr_interrupt_mask_reg); 7210 maskval = (maskval << 32) | IPR_PCII_OPER_INTERRUPTS;
7211 writeq(maskval, ioa_cfg->regs.clr_interrupt_mask_reg);
7212 } else
7213 writel(IPR_PCII_OPER_INTERRUPTS, ioa_cfg->regs.clr_interrupt_mask_reg32);
7189 7214
7190 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg); 7215 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7191 7216
@@ -7332,12 +7357,12 @@ static int ipr_reset_restore_cfg_space(struct ipr_cmnd *ipr_cmd)
7332 rc = pci_restore_state(ioa_cfg->pdev); 7357 rc = pci_restore_state(ioa_cfg->pdev);
7333 7358
7334 if (rc != PCIBIOS_SUCCESSFUL) { 7359 if (rc != PCIBIOS_SUCCESSFUL) {
7335 ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR); 7360 ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
7336 return IPR_RC_JOB_CONTINUE; 7361 return IPR_RC_JOB_CONTINUE;
7337 } 7362 }
7338 7363
7339 if (ipr_set_pcix_cmd_reg(ioa_cfg)) { 7364 if (ipr_set_pcix_cmd_reg(ioa_cfg)) {
7340 ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR); 7365 ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
7341 return IPR_RC_JOB_CONTINUE; 7366 return IPR_RC_JOB_CONTINUE;
7342 } 7367 }
7343 7368
@@ -7364,7 +7389,7 @@ static int ipr_reset_restore_cfg_space(struct ipr_cmnd *ipr_cmd)
7364 } 7389 }
7365 } 7390 }
7366 7391
7367 ENTER; 7392 LEAVE;
7368 return IPR_RC_JOB_CONTINUE; 7393 return IPR_RC_JOB_CONTINUE;
7369} 7394}
7370 7395
@@ -7406,7 +7431,7 @@ static int ipr_reset_start_bist(struct ipr_cmnd *ipr_cmd)
7406 7431
7407 if (rc != PCIBIOS_SUCCESSFUL) { 7432 if (rc != PCIBIOS_SUCCESSFUL) {
7408 pci_unblock_user_cfg_access(ipr_cmd->ioa_cfg->pdev); 7433 pci_unblock_user_cfg_access(ipr_cmd->ioa_cfg->pdev);
7409 ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR); 7434 ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
7410 rc = IPR_RC_JOB_CONTINUE; 7435 rc = IPR_RC_JOB_CONTINUE;
7411 } else { 7436 } else {
7412 ipr_cmd->job_step = ipr_reset_bist_done; 7437 ipr_cmd->job_step = ipr_reset_bist_done;
@@ -7665,7 +7690,7 @@ static void ipr_reset_ioa_job(struct ipr_cmnd *ipr_cmd)
7665 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 7690 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7666 7691
7667 do { 7692 do {
7668 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc); 7693 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
7669 7694
7670 if (ioa_cfg->reset_cmd != ipr_cmd) { 7695 if (ioa_cfg->reset_cmd != ipr_cmd) {
7671 /* 7696 /*
@@ -8048,13 +8073,13 @@ static int __devinit ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
8048 ioarcb->u.sis64_addr_data.data_ioadl_addr = 8073 ioarcb->u.sis64_addr_data.data_ioadl_addr =
8049 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64)); 8074 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
8050 ioarcb->u.sis64_addr_data.ioasa_host_pci_addr = 8075 ioarcb->u.sis64_addr_data.ioasa_host_pci_addr =
8051 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, ioasa)); 8076 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, s.ioasa64));
8052 } else { 8077 } else {
8053 ioarcb->write_ioadl_addr = 8078 ioarcb->write_ioadl_addr =
8054 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl)); 8079 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
8055 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr; 8080 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
8056 ioarcb->ioasa_host_pci_addr = 8081 ioarcb->ioasa_host_pci_addr =
8057 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, ioasa)); 8082 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, s.ioasa));
8058 } 8083 }
8059 ioarcb->ioasa_len = cpu_to_be16(sizeof(struct ipr_ioasa)); 8084 ioarcb->ioasa_len = cpu_to_be16(sizeof(struct ipr_ioasa));
8060 ipr_cmd->cmd_index = i; 8085 ipr_cmd->cmd_index = i;
diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h
index 4c267b5e0b96..9ecd2259eb39 100644
--- a/drivers/scsi/ipr.h
+++ b/drivers/scsi/ipr.h
@@ -244,6 +244,7 @@
244#define IPR_RUNTIME_RESET 0x40000000 244#define IPR_RUNTIME_RESET 0x40000000
245 245
246#define IPR_IPL_INIT_MIN_STAGE_TIME 5 246#define IPR_IPL_INIT_MIN_STAGE_TIME 5
247#define IPR_IPL_INIT_DEFAULT_STAGE_TIME 15
247#define IPR_IPL_INIT_STAGE_UNKNOWN 0x0 248#define IPR_IPL_INIT_STAGE_UNKNOWN 0x0
248#define IPR_IPL_INIT_STAGE_TRANSOP 0xB0000000 249#define IPR_IPL_INIT_STAGE_TRANSOP 0xB0000000
249#define IPR_IPL_INIT_STAGE_MASK 0xff000000 250#define IPR_IPL_INIT_STAGE_MASK 0xff000000
@@ -613,7 +614,7 @@ struct ipr_auto_sense {
613 __be32 data[SCSI_SENSE_BUFFERSIZE/sizeof(__be32)]; 614 __be32 data[SCSI_SENSE_BUFFERSIZE/sizeof(__be32)];
614}; 615};
615 616
616struct ipr_ioasa { 617struct ipr_ioasa_hdr {
617 __be32 ioasc; 618 __be32 ioasc;
618#define IPR_IOASC_SENSE_KEY(ioasc) ((ioasc) >> 24) 619#define IPR_IOASC_SENSE_KEY(ioasc) ((ioasc) >> 24)
619#define IPR_IOASC_SENSE_CODE(ioasc) (((ioasc) & 0x00ff0000) >> 16) 620#define IPR_IOASC_SENSE_CODE(ioasc) (((ioasc) & 0x00ff0000) >> 16)
@@ -645,6 +646,25 @@ struct ipr_ioasa {
645#define IPR_FIELD_POINTER_VALID (0x80000000 >> 8) 646#define IPR_FIELD_POINTER_VALID (0x80000000 >> 8)
646#define IPR_FIELD_POINTER_MASK 0x0000ffff 647#define IPR_FIELD_POINTER_MASK 0x0000ffff
647 648
649}__attribute__((packed, aligned (4)));
650
651struct ipr_ioasa {
652 struct ipr_ioasa_hdr hdr;
653
654 union {
655 struct ipr_ioasa_vset vset;
656 struct ipr_ioasa_af_dasd dasd;
657 struct ipr_ioasa_gpdd gpdd;
658 struct ipr_ioasa_gata gata;
659 } u;
660
661 struct ipr_auto_sense auto_sense;
662}__attribute__((packed, aligned (4)));
663
664struct ipr_ioasa64 {
665 struct ipr_ioasa_hdr hdr;
666 u8 fd_res_path[8];
667
648 union { 668 union {
649 struct ipr_ioasa_vset vset; 669 struct ipr_ioasa_vset vset;
650 struct ipr_ioasa_af_dasd dasd; 670 struct ipr_ioasa_af_dasd dasd;
@@ -804,7 +824,7 @@ struct ipr_hostrcb_array_data_entry_enhanced {
804}__attribute__((packed, aligned (4))); 824}__attribute__((packed, aligned (4)));
805 825
806struct ipr_hostrcb_type_ff_error { 826struct ipr_hostrcb_type_ff_error {
807 __be32 ioa_data[502]; 827 __be32 ioa_data[758];
808}__attribute__((packed, aligned (4))); 828}__attribute__((packed, aligned (4)));
809 829
810struct ipr_hostrcb_type_01_error { 830struct ipr_hostrcb_type_01_error {
@@ -1181,7 +1201,7 @@ struct ipr_resource_entry {
1181 u8 flags; 1201 u8 flags;
1182 __be16 res_flags; 1202 __be16 res_flags;
1183 1203
1184 __be32 type; 1204 u8 type;
1185 1205
1186 u8 qmodel; 1206 u8 qmodel;
1187 struct ipr_std_inq_data std_inq_data; 1207 struct ipr_std_inq_data std_inq_data;
@@ -1464,7 +1484,10 @@ struct ipr_cmnd {
1464 struct ipr_ioadl64_desc ioadl64[IPR_NUM_IOADL_ENTRIES]; 1484 struct ipr_ioadl64_desc ioadl64[IPR_NUM_IOADL_ENTRIES];
1465 struct ipr_ata64_ioadl ata_ioadl; 1485 struct ipr_ata64_ioadl ata_ioadl;
1466 } i; 1486 } i;
1467 struct ipr_ioasa ioasa; 1487 union {
1488 struct ipr_ioasa ioasa;
1489 struct ipr_ioasa64 ioasa64;
1490 } s;
1468 struct list_head queue; 1491 struct list_head queue;
1469 struct scsi_cmnd *scsi_cmd; 1492 struct scsi_cmnd *scsi_cmd;
1470 struct ata_queued_cmd *qc; 1493 struct ata_queued_cmd *qc;
diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
index bf55d3057413..fec47de72535 100644
--- a/drivers/scsi/iscsi_tcp.c
+++ b/drivers/scsi/iscsi_tcp.c
@@ -601,10 +601,8 @@ static void iscsi_sw_tcp_conn_stop(struct iscsi_cls_conn *cls_conn, int flag)
601 set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx); 601 set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx);
602 write_unlock_bh(&tcp_sw_conn->sock->sk->sk_callback_lock); 602 write_unlock_bh(&tcp_sw_conn->sock->sk->sk_callback_lock);
603 603
604 if (sk_sleep(sock->sk)) { 604 sock->sk->sk_err = EIO;
605 sock->sk->sk_err = EIO; 605 wake_up_interruptible(sk_sleep(sock->sk));
606 wake_up_interruptible(sk_sleep(sock->sk));
607 }
608 606
609 iscsi_conn_stop(cls_conn, flag); 607 iscsi_conn_stop(cls_conn, flag);
610 iscsi_sw_tcp_release_conn(conn); 608 iscsi_sw_tcp_release_conn(conn);
diff --git a/drivers/scsi/mvme147.c b/drivers/scsi/mvme147.c
index 716d1785cda7..c29d0dbb9660 100644
--- a/drivers/scsi/mvme147.c
+++ b/drivers/scsi/mvme147.c
@@ -16,12 +16,12 @@
16#include <linux/stat.h> 16#include <linux/stat.h>
17 17
18 18
19static struct Scsi_Host *mvme147_host = NULL; 19static irqreturn_t mvme147_intr(int irq, void *data)
20
21static irqreturn_t mvme147_intr(int irq, void *dummy)
22{ 20{
21 struct Scsi_Host *instance = data;
22
23 if (irq == MVME147_IRQ_SCSI_PORT) 23 if (irq == MVME147_IRQ_SCSI_PORT)
24 wd33c93_intr(mvme147_host); 24 wd33c93_intr(instance);
25 else 25 else
26 m147_pcc->dma_intr = 0x89; /* Ack and enable ints */ 26 m147_pcc->dma_intr = 0x89; /* Ack and enable ints */
27 return IRQ_HANDLED; 27 return IRQ_HANDLED;
@@ -29,7 +29,8 @@ static irqreturn_t mvme147_intr(int irq, void *dummy)
29 29
30static int dma_setup(struct scsi_cmnd *cmd, int dir_in) 30static int dma_setup(struct scsi_cmnd *cmd, int dir_in)
31{ 31{
32 struct WD33C93_hostdata *hdata = shost_priv(mvme147_host); 32 struct Scsi_Host *instance = cmd->device->host;
33 struct WD33C93_hostdata *hdata = shost_priv(instance);
33 unsigned char flags = 0x01; 34 unsigned char flags = 0x01;
34 unsigned long addr = virt_to_bus(cmd->SCp.ptr); 35 unsigned long addr = virt_to_bus(cmd->SCp.ptr);
35 36
@@ -66,6 +67,7 @@ static void dma_stop(struct Scsi_Host *instance, struct scsi_cmnd *SCpnt,
66int mvme147_detect(struct scsi_host_template *tpnt) 67int mvme147_detect(struct scsi_host_template *tpnt)
67{ 68{
68 static unsigned char called = 0; 69 static unsigned char called = 0;
70 struct Scsi_Host *instance;
69 wd33c93_regs regs; 71 wd33c93_regs regs;
70 struct WD33C93_hostdata *hdata; 72 struct WD33C93_hostdata *hdata;
71 73
@@ -76,25 +78,25 @@ int mvme147_detect(struct scsi_host_template *tpnt)
76 tpnt->proc_name = "MVME147"; 78 tpnt->proc_name = "MVME147";
77 tpnt->proc_info = &wd33c93_proc_info; 79 tpnt->proc_info = &wd33c93_proc_info;
78 80
79 mvme147_host = scsi_register(tpnt, sizeof(struct WD33C93_hostdata)); 81 instance = scsi_register(tpnt, sizeof(struct WD33C93_hostdata));
80 if (!mvme147_host) 82 if (!instance)
81 goto err_out; 83 goto err_out;
82 84
83 mvme147_host->base = 0xfffe4000; 85 instance->base = 0xfffe4000;
84 mvme147_host->irq = MVME147_IRQ_SCSI_PORT; 86 instance->irq = MVME147_IRQ_SCSI_PORT;
85 regs.SASR = (volatile unsigned char *)0xfffe4000; 87 regs.SASR = (volatile unsigned char *)0xfffe4000;
86 regs.SCMD = (volatile unsigned char *)0xfffe4001; 88 regs.SCMD = (volatile unsigned char *)0xfffe4001;
87 hdata = shost_priv(mvme147_host); 89 hdata = shost_priv(instance);
88 hdata->no_sync = 0xff; 90 hdata->no_sync = 0xff;
89 hdata->fast = 0; 91 hdata->fast = 0;
90 hdata->dma_mode = CTRL_DMA; 92 hdata->dma_mode = CTRL_DMA;
91 wd33c93_init(mvme147_host, regs, dma_setup, dma_stop, WD33C93_FS_8_10); 93 wd33c93_init(instance, regs, dma_setup, dma_stop, WD33C93_FS_8_10);
92 94
93 if (request_irq(MVME147_IRQ_SCSI_PORT, mvme147_intr, 0, 95 if (request_irq(MVME147_IRQ_SCSI_PORT, mvme147_intr, 0,
94 "MVME147 SCSI PORT", mvme147_intr)) 96 "MVME147 SCSI PORT", instance))
95 goto err_unregister; 97 goto err_unregister;
96 if (request_irq(MVME147_IRQ_SCSI_DMA, mvme147_intr, 0, 98 if (request_irq(MVME147_IRQ_SCSI_DMA, mvme147_intr, 0,
97 "MVME147 SCSI DMA", mvme147_intr)) 99 "MVME147 SCSI DMA", instance))
98 goto err_free_irq; 100 goto err_free_irq;
99#if 0 /* Disabled; causes problems booting */ 101#if 0 /* Disabled; causes problems booting */
100 m147_pcc->scsi_interrupt = 0x10; /* Assert SCSI bus reset */ 102 m147_pcc->scsi_interrupt = 0x10; /* Assert SCSI bus reset */
@@ -113,7 +115,7 @@ int mvme147_detect(struct scsi_host_template *tpnt)
113err_free_irq: 115err_free_irq:
114 free_irq(MVME147_IRQ_SCSI_PORT, mvme147_intr); 116 free_irq(MVME147_IRQ_SCSI_PORT, mvme147_intr);
115err_unregister: 117err_unregister:
116 scsi_unregister(mvme147_host); 118 scsi_unregister(instance);
117err_out: 119err_out:
118 return 0; 120 return 0;
119} 121}
@@ -132,9 +134,6 @@ static int mvme147_bus_reset(struct scsi_cmnd *cmd)
132 return SUCCESS; 134 return SUCCESS;
133} 135}
134 136
135#define HOSTS_C
136
137#include "mvme147.h"
138 137
139static struct scsi_host_template driver_template = { 138static struct scsi_host_template driver_template = {
140 .proc_name = "MVME147", 139 .proc_name = "MVME147",
diff --git a/drivers/scsi/osst.c b/drivers/scsi/osst.c
index 8dbf1c3afb7b..d64b7178fa08 100644
--- a/drivers/scsi/osst.c
+++ b/drivers/scsi/osst.c
@@ -3587,7 +3587,7 @@ if (SRpnt) printk(KERN_ERR "%s:A: Not supposed to have SRpnt at line %d\n", name
3587 if (i == (-ENOSPC)) { 3587 if (i == (-ENOSPC)) {
3588 transfer = STp->buffer->writing; /* FIXME -- check this logic */ 3588 transfer = STp->buffer->writing; /* FIXME -- check this logic */
3589 if (transfer <= do_count) { 3589 if (transfer <= do_count) {
3590 filp->f_pos += do_count - transfer; 3590 *ppos += do_count - transfer;
3591 count -= do_count - transfer; 3591 count -= do_count - transfer;
3592 if (STps->drv_block >= 0) { 3592 if (STps->drv_block >= 0) {
3593 STps->drv_block += (do_count - transfer) / STp->block_size; 3593 STps->drv_block += (do_count - transfer) / STp->block_size;
@@ -3625,7 +3625,7 @@ if (SRpnt) printk(KERN_ERR "%s:A: Not supposed to have SRpnt at line %d\n", name
3625 goto out; 3625 goto out;
3626 } 3626 }
3627 3627
3628 filp->f_pos += do_count; 3628 *ppos += do_count;
3629 b_point += do_count; 3629 b_point += do_count;
3630 count -= do_count; 3630 count -= do_count;
3631 if (STps->drv_block >= 0) { 3631 if (STps->drv_block >= 0) {
@@ -3647,7 +3647,7 @@ if (SRpnt) printk(KERN_ERR "%s:A: Not supposed to have SRpnt at line %d\n", name
3647 if (STps->drv_block >= 0) { 3647 if (STps->drv_block >= 0) {
3648 STps->drv_block += blks; 3648 STps->drv_block += blks;
3649 } 3649 }
3650 filp->f_pos += count; 3650 *ppos += count;
3651 count = 0; 3651 count = 0;
3652 } 3652 }
3653 3653
@@ -3823,7 +3823,7 @@ static ssize_t osst_read(struct file * filp, char __user * buf, size_t count, lo
3823 } 3823 }
3824 STp->logical_blk_num += transfer / STp->block_size; 3824 STp->logical_blk_num += transfer / STp->block_size;
3825 STps->drv_block += transfer / STp->block_size; 3825 STps->drv_block += transfer / STp->block_size;
3826 filp->f_pos += transfer; 3826 *ppos += transfer;
3827 buf += transfer; 3827 buf += transfer;
3828 total += transfer; 3828 total += transfer;
3829 } 3829 }
@@ -5626,6 +5626,7 @@ static const struct file_operations osst_fops = {
5626 .open = os_scsi_tape_open, 5626 .open = os_scsi_tape_open,
5627 .flush = os_scsi_tape_flush, 5627 .flush = os_scsi_tape_flush,
5628 .release = os_scsi_tape_close, 5628 .release = os_scsi_tape_close,
5629 .llseek = noop_llseek,
5629}; 5630};
5630 5631
5631static int osst_supports(struct scsi_device * SDp) 5632static int osst_supports(struct scsi_device * SDp)
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index 9798c2c06b93..1c027a97d8b9 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -492,19 +492,20 @@ void scsi_target_reap(struct scsi_target *starget)
492 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); 492 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
493 unsigned long flags; 493 unsigned long flags;
494 enum scsi_target_state state; 494 enum scsi_target_state state;
495 int empty; 495 int empty = 0;
496 496
497 spin_lock_irqsave(shost->host_lock, flags); 497 spin_lock_irqsave(shost->host_lock, flags);
498 state = starget->state; 498 state = starget->state;
499 empty = --starget->reap_ref == 0 && 499 if (--starget->reap_ref == 0 && list_empty(&starget->devices)) {
500 list_empty(&starget->devices) ? 1 : 0; 500 empty = 1;
501 starget->state = STARGET_DEL;
502 }
501 spin_unlock_irqrestore(shost->host_lock, flags); 503 spin_unlock_irqrestore(shost->host_lock, flags);
502 504
503 if (!empty) 505 if (!empty)
504 return; 506 return;
505 507
506 BUG_ON(state == STARGET_DEL); 508 BUG_ON(state == STARGET_DEL);
507 starget->state = STARGET_DEL;
508 if (state == STARGET_CREATED) 509 if (state == STARGET_CREATED)
509 scsi_target_destroy(starget); 510 scsi_target_destroy(starget);
510 else 511 else
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index 3ea1a713ef25..24211d0efa6d 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -3962,6 +3962,7 @@ static const struct file_operations st_fops =
3962 .open = st_open, 3962 .open = st_open,
3963 .flush = st_flush, 3963 .flush = st_flush,
3964 .release = st_release, 3964 .release = st_release,
3965 .llseek = noop_llseek,
3965}; 3966};
3966 3967
3967static int st_probe(struct device *dev) 3968static int st_probe(struct device *dev)
diff --git a/drivers/serial/s5pv210.c b/drivers/serial/s5pv210.c
index 8dc03837617b..4a789e5361a4 100644
--- a/drivers/serial/s5pv210.c
+++ b/drivers/serial/s5pv210.c
@@ -119,7 +119,7 @@ static int s5p_serial_probe(struct platform_device *pdev)
119 return s3c24xx_serial_probe(pdev, s5p_uart_inf[pdev->id]); 119 return s3c24xx_serial_probe(pdev, s5p_uart_inf[pdev->id]);
120} 120}
121 121
122static struct platform_driver s5p_serial_drv = { 122static struct platform_driver s5p_serial_driver = {
123 .probe = s5p_serial_probe, 123 .probe = s5p_serial_probe,
124 .remove = __devexit_p(s3c24xx_serial_remove), 124 .remove = __devexit_p(s3c24xx_serial_remove),
125 .driver = { 125 .driver = {
@@ -130,19 +130,19 @@ static struct platform_driver s5p_serial_drv = {
130 130
131static int __init s5pv210_serial_console_init(void) 131static int __init s5pv210_serial_console_init(void)
132{ 132{
133 return s3c24xx_serial_initconsole(&s5p_serial_drv, s5p_uart_inf); 133 return s3c24xx_serial_initconsole(&s5p_serial_driver, s5p_uart_inf);
134} 134}
135 135
136console_initcall(s5pv210_serial_console_init); 136console_initcall(s5pv210_serial_console_init);
137 137
138static int __init s5p_serial_init(void) 138static int __init s5p_serial_init(void)
139{ 139{
140 return s3c24xx_serial_init(&s5p_serial_drv, *s5p_uart_inf); 140 return s3c24xx_serial_init(&s5p_serial_driver, *s5p_uart_inf);
141} 141}
142 142
143static void __exit s5p_serial_exit(void) 143static void __exit s5p_serial_exit(void)
144{ 144{
145 platform_driver_unregister(&s5p_serial_drv); 145 platform_driver_unregister(&s5p_serial_driver);
146} 146}
147 147
148module_init(s5p_serial_init); 148module_init(s5p_serial_init);
diff --git a/drivers/sfi/sfi_acpi.c b/drivers/sfi/sfi_acpi.c
index 34aba30eb84b..f5b4ca581541 100644
--- a/drivers/sfi/sfi_acpi.c
+++ b/drivers/sfi/sfi_acpi.c
@@ -173,3 +173,44 @@ int sfi_acpi_table_parse(char *signature, char *oem_id, char *oem_table_id,
173 sfi_acpi_put_table(table); 173 sfi_acpi_put_table(table);
174 return ret; 174 return ret;
175} 175}
176
177static ssize_t sfi_acpi_table_show(struct file *filp, struct kobject *kobj,
178 struct bin_attribute *bin_attr, char *buf,
179 loff_t offset, size_t count)
180{
181 struct sfi_table_attr *tbl_attr =
182 container_of(bin_attr, struct sfi_table_attr, attr);
183 struct acpi_table_header *th = NULL;
184 struct sfi_table_key key;
185 ssize_t cnt;
186
187 key.sig = tbl_attr->name;
188 key.oem_id = NULL;
189 key.oem_table_id = NULL;
190
191 th = sfi_acpi_get_table(&key);
192 if (!th)
193 return 0;
194
195 cnt = memory_read_from_buffer(buf, count, &offset,
196 th, th->length);
197 sfi_acpi_put_table(th);
198
199 return cnt;
200}
201
202
203void __init sfi_acpi_sysfs_init(void)
204{
205 u32 tbl_cnt, i;
206 struct sfi_table_attr *tbl_attr;
207
208 tbl_cnt = XSDT_GET_NUM_ENTRIES(xsdt_va, u64);
209 for (i = 0; i < tbl_cnt; i++) {
210 tbl_attr =
211 sfi_sysfs_install_table(xsdt_va->table_offset_entry[i]);
212 tbl_attr->attr.read = sfi_acpi_table_show;
213 }
214
215 return;
216}
diff --git a/drivers/sfi/sfi_core.c b/drivers/sfi/sfi_core.c
index b204a0929139..005195958647 100644
--- a/drivers/sfi/sfi_core.c
+++ b/drivers/sfi/sfi_core.c
@@ -67,6 +67,7 @@
67#include <linux/acpi.h> 67#include <linux/acpi.h>
68#include <linux/init.h> 68#include <linux/init.h>
69#include <linux/sfi.h> 69#include <linux/sfi.h>
70#include <linux/slab.h>
70 71
71#include "sfi_core.h" 72#include "sfi_core.h"
72 73
@@ -382,6 +383,102 @@ static __init int sfi_find_syst(void)
382 return -1; 383 return -1;
383} 384}
384 385
386static struct kobject *sfi_kobj;
387static struct kobject *tables_kobj;
388
389static ssize_t sfi_table_show(struct file *filp, struct kobject *kobj,
390 struct bin_attribute *bin_attr, char *buf,
391 loff_t offset, size_t count)
392{
393 struct sfi_table_attr *tbl_attr =
394 container_of(bin_attr, struct sfi_table_attr, attr);
395 struct sfi_table_header *th = NULL;
396 struct sfi_table_key key;
397 ssize_t cnt;
398
399 key.sig = tbl_attr->name;
400 key.oem_id = NULL;
401 key.oem_table_id = NULL;
402
403 if (strncmp(SFI_SIG_SYST, tbl_attr->name, SFI_SIGNATURE_SIZE)) {
404 th = sfi_get_table(&key);
405 if (!th)
406 return 0;
407
408 cnt = memory_read_from_buffer(buf, count, &offset,
409 th, th->len);
410 sfi_put_table(th);
411 } else
412 cnt = memory_read_from_buffer(buf, count, &offset,
413 syst_va, syst_va->header.len);
414
415 return cnt;
416}
417
418struct sfi_table_attr __init *sfi_sysfs_install_table(u64 pa)
419{
420 struct sfi_table_attr *tbl_attr;
421 struct sfi_table_header *th;
422 int ret;
423
424 tbl_attr = kzalloc(sizeof(struct sfi_table_attr), GFP_KERNEL);
425 if (!tbl_attr)
426 return NULL;
427
428 th = sfi_map_table(pa);
429 if (!th || !th->sig[0]) {
430 kfree(tbl_attr);
431 return NULL;
432 }
433
434 sysfs_attr_init(&tbl_attr->attr.attr);
435 memcpy(tbl_attr->name, th->sig, SFI_SIGNATURE_SIZE);
436
437 tbl_attr->attr.size = 0;
438 tbl_attr->attr.read = sfi_table_show;
439 tbl_attr->attr.attr.name = tbl_attr->name;
440 tbl_attr->attr.attr.mode = 0400;
441
442 ret = sysfs_create_bin_file(tables_kobj,
443 &tbl_attr->attr);
444 if (ret)
445 kfree(tbl_attr);
446
447 sfi_unmap_table(th);
448 return tbl_attr;
449}
450
451static int __init sfi_sysfs_init(void)
452{
453 int tbl_cnt, i;
454
455 if (sfi_disabled)
456 return 0;
457
458 sfi_kobj = kobject_create_and_add("sfi", firmware_kobj);
459 if (!sfi_kobj)
460 return 0;
461
462 tables_kobj = kobject_create_and_add("tables", sfi_kobj);
463 if (!tables_kobj) {
464 kobject_put(sfi_kobj);
465 return 0;
466 }
467
468 sfi_sysfs_install_table(syst_pa);
469
470 tbl_cnt = SFI_GET_NUM_ENTRIES(syst_va, u64);
471
472 for (i = 0; i < tbl_cnt; i++)
473 sfi_sysfs_install_table(syst_va->pentry[i]);
474
475 sfi_acpi_sysfs_init();
476 kobject_uevent(sfi_kobj, KOBJ_ADD);
477 kobject_uevent(tables_kobj, KOBJ_ADD);
478 pr_info("SFI sysfs interfaces init success\n");
479 return 0;
480}
481
385void __init sfi_init(void) 482void __init sfi_init(void)
386{ 483{
387 if (!acpi_disabled) 484 if (!acpi_disabled)
@@ -390,7 +487,7 @@ void __init sfi_init(void)
390 if (sfi_disabled) 487 if (sfi_disabled)
391 return; 488 return;
392 489
393 pr_info("Simple Firmware Interface v0.7 http://simplefirmware.org\n"); 490 pr_info("Simple Firmware Interface v0.81 http://simplefirmware.org\n");
394 491
395 if (sfi_find_syst() || sfi_parse_syst() || sfi_platform_init()) 492 if (sfi_find_syst() || sfi_parse_syst() || sfi_platform_init())
396 disable_sfi(); 493 disable_sfi();
@@ -414,3 +511,9 @@ void __init sfi_init_late(void)
414 511
415 sfi_acpi_init(); 512 sfi_acpi_init();
416} 513}
514
515/*
516 * The reason we put it here becasue we need wait till the /sys/firmware
517 * is setup, then our interface can be registered in /sys/firmware/sfi
518 */
519core_initcall(sfi_sysfs_init);
diff --git a/drivers/sfi/sfi_core.h b/drivers/sfi/sfi_core.h
index da82d39e104d..b7cf220d44ec 100644
--- a/drivers/sfi/sfi_core.h
+++ b/drivers/sfi/sfi_core.h
@@ -61,6 +61,12 @@ struct sfi_table_key{
61 char *oem_table_id; 61 char *oem_table_id;
62}; 62};
63 63
64/* sysfs interface */
65struct sfi_table_attr {
66 struct bin_attribute attr;
67 char name[8];
68};
69
64#define SFI_ANY_KEY { .sig = NULL, .oem_id = NULL, .oem_table_id = NULL } 70#define SFI_ANY_KEY { .sig = NULL, .oem_id = NULL, .oem_table_id = NULL }
65 71
66extern int __init sfi_acpi_init(void); 72extern int __init sfi_acpi_init(void);
@@ -68,3 +74,5 @@ extern struct sfi_table_header *sfi_check_table(u64 paddr,
68 struct sfi_table_key *key); 74 struct sfi_table_key *key);
69struct sfi_table_header *sfi_get_table(struct sfi_table_key *key); 75struct sfi_table_header *sfi_get_table(struct sfi_table_key *key);
70extern void sfi_put_table(struct sfi_table_header *table); 76extern void sfi_put_table(struct sfi_table_header *table);
77extern struct sfi_table_attr __init *sfi_sysfs_install_table(u64 pa);
78extern void __init sfi_acpi_sysfs_init(void);
diff --git a/drivers/staging/go7007/saa7134-go7007.c b/drivers/staging/go7007/saa7134-go7007.c
index 49f0d31c118a..cf7c34a99459 100644
--- a/drivers/staging/go7007/saa7134-go7007.c
+++ b/drivers/staging/go7007/saa7134-go7007.c
@@ -242,13 +242,13 @@ static void saa7134_go7007_irq_ts_done(struct saa7134_dev *dev,
242 printk(KERN_DEBUG "saa7134-go7007: irq: lost %ld\n", 242 printk(KERN_DEBUG "saa7134-go7007: irq: lost %ld\n",
243 (status >> 16) & 0x0f); 243 (status >> 16) & 0x0f);
244 if (status & 0x100000) { 244 if (status & 0x100000) {
245 dma_sync_single(&dev->pci->dev, 245 dma_sync_single_for_cpu(&dev->pci->dev,
246 saa->bottom_dma, PAGE_SIZE, DMA_FROM_DEVICE); 246 saa->bottom_dma, PAGE_SIZE, DMA_FROM_DEVICE);
247 go7007_parse_video_stream(go, saa->bottom, PAGE_SIZE); 247 go7007_parse_video_stream(go, saa->bottom, PAGE_SIZE);
248 saa_writel(SAA7134_RS_BA2(5), cpu_to_le32(saa->bottom_dma)); 248 saa_writel(SAA7134_RS_BA2(5), cpu_to_le32(saa->bottom_dma));
249 } else { 249 } else {
250 dma_sync_single(&dev->pci->dev, 250 dma_sync_single_for_cpu(&dev->pci->dev,
251 saa->top_dma, PAGE_SIZE, DMA_FROM_DEVICE); 251 saa->top_dma, PAGE_SIZE, DMA_FROM_DEVICE);
252 go7007_parse_video_stream(go, saa->top, PAGE_SIZE); 252 go7007_parse_video_stream(go, saa->top, PAGE_SIZE);
253 saa_writel(SAA7134_RS_BA1(5), cpu_to_le32(saa->top_dma)); 253 saa_writel(SAA7134_RS_BA1(5), cpu_to_le32(saa->top_dma));
254 } 254 }
diff --git a/drivers/staging/pohmelfs/inode.c b/drivers/staging/pohmelfs/inode.c
index 9286e863b0e7..643b413d9f0f 100644
--- a/drivers/staging/pohmelfs/inode.c
+++ b/drivers/staging/pohmelfs/inode.c
@@ -29,7 +29,6 @@
29#include <linux/slab.h> 29#include <linux/slab.h>
30#include <linux/statfs.h> 30#include <linux/statfs.h>
31#include <linux/writeback.h> 31#include <linux/writeback.h>
32#include <linux/quotaops.h>
33 32
34#include "netfs.h" 33#include "netfs.h"
35 34
@@ -880,7 +879,7 @@ static struct inode *pohmelfs_alloc_inode(struct super_block *sb)
880/* 879/*
881 * We want fsync() to work on POHMELFS. 880 * We want fsync() to work on POHMELFS.
882 */ 881 */
883static int pohmelfs_fsync(struct file *file, struct dentry *dentry, int datasync) 882static int pohmelfs_fsync(struct file *file, int datasync)
884{ 883{
885 struct inode *inode = file->f_mapping->host; 884 struct inode *inode = file->f_mapping->host;
886 struct writeback_control wbc = { 885 struct writeback_control wbc = {
@@ -969,13 +968,6 @@ int pohmelfs_setattr_raw(struct inode *inode, struct iattr *attr)
969 goto err_out_exit; 968 goto err_out_exit;
970 } 969 }
971 970
972 if ((attr->ia_valid & ATTR_UID && attr->ia_uid != inode->i_uid) ||
973 (attr->ia_valid & ATTR_GID && attr->ia_gid != inode->i_gid)) {
974 err = dquot_transfer(inode, attr);
975 if (err)
976 goto err_out_exit;
977 }
978
979 err = inode_setattr(inode, attr); 971 err = inode_setattr(inode, attr);
980 if (err) { 972 if (err) {
981 dprintk("%s: ino: %llu, failed to set the attributes.\n", __func__, POHMELFS_I(inode)->ino); 973 dprintk("%s: ino: %llu, failed to set the attributes.\n", __func__, POHMELFS_I(inode)->ino);
diff --git a/drivers/telephony/ixj.c b/drivers/telephony/ixj.c
index e89304c72568..b53deee25d74 100644
--- a/drivers/telephony/ixj.c
+++ b/drivers/telephony/ixj.c
@@ -5879,20 +5879,13 @@ out:
5879static int ixj_build_filter_cadence(IXJ *j, IXJ_FILTER_CADENCE __user * cp) 5879static int ixj_build_filter_cadence(IXJ *j, IXJ_FILTER_CADENCE __user * cp)
5880{ 5880{
5881 IXJ_FILTER_CADENCE *lcp; 5881 IXJ_FILTER_CADENCE *lcp;
5882 lcp = kmalloc(sizeof(IXJ_FILTER_CADENCE), GFP_KERNEL); 5882 lcp = memdup_user(cp, sizeof(IXJ_FILTER_CADENCE));
5883 if (lcp == NULL) { 5883 if (IS_ERR(lcp)) {
5884 if(ixjdebug & 0x0001) { 5884 if(ixjdebug & 0x0001) {
5885 printk(KERN_INFO "Could not allocate memory for cadence\n"); 5885 printk(KERN_INFO "Could not allocate memory for cadence or could not copy cadence to kernel\n");
5886 } 5886 }
5887 return -ENOMEM; 5887 return PTR_ERR(lcp);
5888 } 5888 }
5889 if (copy_from_user(lcp, cp, sizeof(IXJ_FILTER_CADENCE))) {
5890 if(ixjdebug & 0x0001) {
5891 printk(KERN_INFO "Could not copy cadence to kernel\n");
5892 }
5893 kfree(lcp);
5894 return -EFAULT;
5895 }
5896 if (lcp->filter > 5) { 5889 if (lcp->filter > 5) {
5897 if(ixjdebug & 0x0001) { 5890 if(ixjdebug & 0x0001) {
5898 printk(KERN_INFO "Cadence out of range\n"); 5891 printk(KERN_INFO "Cadence out of range\n");
diff --git a/drivers/usb/gadget/printer.c b/drivers/usb/gadget/printer.c
index 6b8bf8c781c4..43abf55d8c60 100644
--- a/drivers/usb/gadget/printer.c
+++ b/drivers/usb/gadget/printer.c
@@ -794,7 +794,7 @@ printer_write(struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
794} 794}
795 795
796static int 796static int
797printer_fsync(struct file *fd, struct dentry *dentry, int datasync) 797printer_fsync(struct file *fd, int datasync)
798{ 798{
799 struct printer_dev *dev = fd->private_data; 799 struct printer_dev *dev = fd->private_data;
800 unsigned long flags; 800 unsigned long flags;
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index aa88911c9504..0f41c9195e9b 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -593,17 +593,17 @@ static long vhost_net_ioctl(struct file *f, unsigned int ioctl,
593 int r; 593 int r;
594 switch (ioctl) { 594 switch (ioctl) {
595 case VHOST_NET_SET_BACKEND: 595 case VHOST_NET_SET_BACKEND:
596 r = copy_from_user(&backend, argp, sizeof backend); 596 if (copy_from_user(&backend, argp, sizeof backend))
597 if (r < 0) 597 return -EFAULT;
598 return r;
599 return vhost_net_set_backend(n, backend.index, backend.fd); 598 return vhost_net_set_backend(n, backend.index, backend.fd);
600 case VHOST_GET_FEATURES: 599 case VHOST_GET_FEATURES:
601 features = VHOST_FEATURES; 600 features = VHOST_FEATURES;
602 return copy_to_user(featurep, &features, sizeof features); 601 if (copy_to_user(featurep, &features, sizeof features))
602 return -EFAULT;
603 return 0;
603 case VHOST_SET_FEATURES: 604 case VHOST_SET_FEATURES:
604 r = copy_from_user(&features, featurep, sizeof features); 605 if (copy_from_user(&features, featurep, sizeof features))
605 if (r < 0) 606 return -EFAULT;
606 return r;
607 if (features & ~VHOST_FEATURES) 607 if (features & ~VHOST_FEATURES)
608 return -EOPNOTSUPP; 608 return -EOPNOTSUPP;
609 return vhost_net_set_features(n, features); 609 return vhost_net_set_features(n, features);
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index c6fb8e968f21..3b83382e06eb 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -320,10 +320,8 @@ static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m)
320{ 320{
321 struct vhost_memory mem, *newmem, *oldmem; 321 struct vhost_memory mem, *newmem, *oldmem;
322 unsigned long size = offsetof(struct vhost_memory, regions); 322 unsigned long size = offsetof(struct vhost_memory, regions);
323 long r; 323 if (copy_from_user(&mem, m, size))
324 r = copy_from_user(&mem, m, size); 324 return -EFAULT;
325 if (r)
326 return r;
327 if (mem.padding) 325 if (mem.padding)
328 return -EOPNOTSUPP; 326 return -EOPNOTSUPP;
329 if (mem.nregions > VHOST_MEMORY_MAX_NREGIONS) 327 if (mem.nregions > VHOST_MEMORY_MAX_NREGIONS)
@@ -333,15 +331,16 @@ static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m)
333 return -ENOMEM; 331 return -ENOMEM;
334 332
335 memcpy(newmem, &mem, size); 333 memcpy(newmem, &mem, size);
336 r = copy_from_user(newmem->regions, m->regions, 334 if (copy_from_user(newmem->regions, m->regions,
337 mem.nregions * sizeof *m->regions); 335 mem.nregions * sizeof *m->regions)) {
338 if (r) {
339 kfree(newmem); 336 kfree(newmem);
340 return r; 337 return -EFAULT;
341 } 338 }
342 339
343 if (!memory_access_ok(d, newmem, vhost_has_feature(d, VHOST_F_LOG_ALL))) 340 if (!memory_access_ok(d, newmem, vhost_has_feature(d, VHOST_F_LOG_ALL))) {
341 kfree(newmem);
344 return -EFAULT; 342 return -EFAULT;
343 }
345 oldmem = d->memory; 344 oldmem = d->memory;
346 rcu_assign_pointer(d->memory, newmem); 345 rcu_assign_pointer(d->memory, newmem);
347 synchronize_rcu(); 346 synchronize_rcu();
@@ -374,7 +373,7 @@ static long vhost_set_vring(struct vhost_dev *d, int ioctl, void __user *argp)
374 r = get_user(idx, idxp); 373 r = get_user(idx, idxp);
375 if (r < 0) 374 if (r < 0)
376 return r; 375 return r;
377 if (idx > d->nvqs) 376 if (idx >= d->nvqs)
378 return -ENOBUFS; 377 return -ENOBUFS;
379 378
380 vq = d->vqs + idx; 379 vq = d->vqs + idx;
@@ -389,9 +388,10 @@ static long vhost_set_vring(struct vhost_dev *d, int ioctl, void __user *argp)
389 r = -EBUSY; 388 r = -EBUSY;
390 break; 389 break;
391 } 390 }
392 r = copy_from_user(&s, argp, sizeof s); 391 if (copy_from_user(&s, argp, sizeof s)) {
393 if (r < 0) 392 r = -EFAULT;
394 break; 393 break;
394 }
395 if (!s.num || s.num > 0xffff || (s.num & (s.num - 1))) { 395 if (!s.num || s.num > 0xffff || (s.num & (s.num - 1))) {
396 r = -EINVAL; 396 r = -EINVAL;
397 break; 397 break;
@@ -405,9 +405,10 @@ static long vhost_set_vring(struct vhost_dev *d, int ioctl, void __user *argp)
405 r = -EBUSY; 405 r = -EBUSY;
406 break; 406 break;
407 } 407 }
408 r = copy_from_user(&s, argp, sizeof s); 408 if (copy_from_user(&s, argp, sizeof s)) {
409 if (r < 0) 409 r = -EFAULT;
410 break; 410 break;
411 }
411 if (s.num > 0xffff) { 412 if (s.num > 0xffff) {
412 r = -EINVAL; 413 r = -EINVAL;
413 break; 414 break;
@@ -419,12 +420,14 @@ static long vhost_set_vring(struct vhost_dev *d, int ioctl, void __user *argp)
419 case VHOST_GET_VRING_BASE: 420 case VHOST_GET_VRING_BASE:
420 s.index = idx; 421 s.index = idx;
421 s.num = vq->last_avail_idx; 422 s.num = vq->last_avail_idx;
422 r = copy_to_user(argp, &s, sizeof s); 423 if (copy_to_user(argp, &s, sizeof s))
424 r = -EFAULT;
423 break; 425 break;
424 case VHOST_SET_VRING_ADDR: 426 case VHOST_SET_VRING_ADDR:
425 r = copy_from_user(&a, argp, sizeof a); 427 if (copy_from_user(&a, argp, sizeof a)) {
426 if (r < 0) 428 r = -EFAULT;
427 break; 429 break;
430 }
428 if (a.flags & ~(0x1 << VHOST_VRING_F_LOG)) { 431 if (a.flags & ~(0x1 << VHOST_VRING_F_LOG)) {
429 r = -EOPNOTSUPP; 432 r = -EOPNOTSUPP;
430 break; 433 break;
@@ -477,9 +480,10 @@ static long vhost_set_vring(struct vhost_dev *d, int ioctl, void __user *argp)
477 vq->used = (void __user *)(unsigned long)a.used_user_addr; 480 vq->used = (void __user *)(unsigned long)a.used_user_addr;
478 break; 481 break;
479 case VHOST_SET_VRING_KICK: 482 case VHOST_SET_VRING_KICK:
480 r = copy_from_user(&f, argp, sizeof f); 483 if (copy_from_user(&f, argp, sizeof f)) {
481 if (r < 0) 484 r = -EFAULT;
482 break; 485 break;
486 }
483 eventfp = f.fd == -1 ? NULL : eventfd_fget(f.fd); 487 eventfp = f.fd == -1 ? NULL : eventfd_fget(f.fd);
484 if (IS_ERR(eventfp)) { 488 if (IS_ERR(eventfp)) {
485 r = PTR_ERR(eventfp); 489 r = PTR_ERR(eventfp);
@@ -492,9 +496,10 @@ static long vhost_set_vring(struct vhost_dev *d, int ioctl, void __user *argp)
492 filep = eventfp; 496 filep = eventfp;
493 break; 497 break;
494 case VHOST_SET_VRING_CALL: 498 case VHOST_SET_VRING_CALL:
495 r = copy_from_user(&f, argp, sizeof f); 499 if (copy_from_user(&f, argp, sizeof f)) {
496 if (r < 0) 500 r = -EFAULT;
497 break; 501 break;
502 }
498 eventfp = f.fd == -1 ? NULL : eventfd_fget(f.fd); 503 eventfp = f.fd == -1 ? NULL : eventfd_fget(f.fd);
499 if (IS_ERR(eventfp)) { 504 if (IS_ERR(eventfp)) {
500 r = PTR_ERR(eventfp); 505 r = PTR_ERR(eventfp);
@@ -510,9 +515,10 @@ static long vhost_set_vring(struct vhost_dev *d, int ioctl, void __user *argp)
510 filep = eventfp; 515 filep = eventfp;
511 break; 516 break;
512 case VHOST_SET_VRING_ERR: 517 case VHOST_SET_VRING_ERR:
513 r = copy_from_user(&f, argp, sizeof f); 518 if (copy_from_user(&f, argp, sizeof f)) {
514 if (r < 0) 519 r = -EFAULT;
515 break; 520 break;
521 }
516 eventfp = f.fd == -1 ? NULL : eventfd_fget(f.fd); 522 eventfp = f.fd == -1 ? NULL : eventfd_fget(f.fd);
517 if (IS_ERR(eventfp)) { 523 if (IS_ERR(eventfp)) {
518 r = PTR_ERR(eventfp); 524 r = PTR_ERR(eventfp);
@@ -575,9 +581,10 @@ long vhost_dev_ioctl(struct vhost_dev *d, unsigned int ioctl, unsigned long arg)
575 r = vhost_set_memory(d, argp); 581 r = vhost_set_memory(d, argp);
576 break; 582 break;
577 case VHOST_SET_LOG_BASE: 583 case VHOST_SET_LOG_BASE:
578 r = copy_from_user(&p, argp, sizeof p); 584 if (copy_from_user(&p, argp, sizeof p)) {
579 if (r < 0) 585 r = -EFAULT;
580 break; 586 break;
587 }
581 if ((u64)(unsigned long)p != p) { 588 if ((u64)(unsigned long)p != p) {
582 r = -EFAULT; 589 r = -EFAULT;
583 break; 590 break;
diff --git a/drivers/video/backlight/88pm860x_bl.c b/drivers/video/backlight/88pm860x_bl.c
index 68d2518fadaa..38ffc3fbcbe4 100644
--- a/drivers/video/backlight/88pm860x_bl.c
+++ b/drivers/video/backlight/88pm860x_bl.c
@@ -222,6 +222,7 @@ static int pm860x_backlight_probe(struct platform_device *pdev)
222 data->port = __check_device(pdata, name); 222 data->port = __check_device(pdata, name);
223 if (data->port < 0) { 223 if (data->port < 0) {
224 dev_err(&pdev->dev, "wrong platform data is assigned"); 224 dev_err(&pdev->dev, "wrong platform data is assigned");
225 kfree(data);
225 return -EINVAL; 226 return -EINVAL;
226 } 227 }
227 228
@@ -266,6 +267,7 @@ static int pm860x_backlight_probe(struct platform_device *pdev)
266 backlight_update_status(bl); 267 backlight_update_status(bl);
267 return 0; 268 return 0;
268out: 269out:
270 backlight_device_unregister(bl);
269 kfree(data); 271 kfree(data);
270 return ret; 272 return ret;
271} 273}
diff --git a/drivers/video/backlight/Kconfig b/drivers/video/backlight/Kconfig
index c025c84601b0..e54a337227ea 100644
--- a/drivers/video/backlight/Kconfig
+++ b/drivers/video/backlight/Kconfig
@@ -8,12 +8,13 @@ menuconfig BACKLIGHT_LCD_SUPPORT
8 Enable this to be able to choose the drivers for controlling the 8 Enable this to be able to choose the drivers for controlling the
9 backlight and the LCD panel on some platforms, for example on PDAs. 9 backlight and the LCD panel on some platforms, for example on PDAs.
10 10
11if BACKLIGHT_LCD_SUPPORT
12
11# 13#
12# LCD 14# LCD
13# 15#
14config LCD_CLASS_DEVICE 16config LCD_CLASS_DEVICE
15 tristate "Lowlevel LCD controls" 17 tristate "Lowlevel LCD controls"
16 depends on BACKLIGHT_LCD_SUPPORT
17 default m 18 default m
18 help 19 help
19 This framework adds support for low-level control of LCD. 20 This framework adds support for low-level control of LCD.
@@ -24,31 +25,32 @@ config LCD_CLASS_DEVICE
24 To have support for your specific LCD panel you will have to 25 To have support for your specific LCD panel you will have to
25 select the proper drivers which depend on this option. 26 select the proper drivers which depend on this option.
26 27
28if LCD_CLASS_DEVICE
29
27config LCD_CORGI 30config LCD_CORGI
28 tristate "LCD Panel support for SHARP corgi/spitz model" 31 tristate "LCD Panel support for SHARP corgi/spitz model"
29 depends on LCD_CLASS_DEVICE && SPI_MASTER && PXA_SHARPSL 32 depends on SPI_MASTER && PXA_SHARPSL
30 help 33 help
31 Say y here to support the LCD panels usually found on SHARP 34 Say y here to support the LCD panels usually found on SHARP
32 corgi (C7x0) and spitz (Cxx00) models. 35 corgi (C7x0) and spitz (Cxx00) models.
33 36
34config LCD_L4F00242T03 37config LCD_L4F00242T03
35 tristate "Epson L4F00242T03 LCD" 38 tristate "Epson L4F00242T03 LCD"
36 depends on LCD_CLASS_DEVICE && SPI_MASTER && GENERIC_GPIO 39 depends on SPI_MASTER && GENERIC_GPIO
37 help 40 help
38 SPI driver for Epson L4F00242T03. This provides basic support 41 SPI driver for Epson L4F00242T03. This provides basic support
39 for init and powering the LCD up/down through a sysfs interface. 42 for init and powering the LCD up/down through a sysfs interface.
40 43
41config LCD_LMS283GF05 44config LCD_LMS283GF05
42 tristate "Samsung LMS283GF05 LCD" 45 tristate "Samsung LMS283GF05 LCD"
43 depends on LCD_CLASS_DEVICE && SPI_MASTER && GENERIC_GPIO 46 depends on SPI_MASTER && GENERIC_GPIO
44 help 47 help
45 SPI driver for Samsung LMS283GF05. This provides basic support 48 SPI driver for Samsung LMS283GF05. This provides basic support
46 for powering the LCD up/down through a sysfs interface. 49 for powering the LCD up/down through a sysfs interface.
47 50
48config LCD_LTV350QV 51config LCD_LTV350QV
49 tristate "Samsung LTV350QV LCD Panel" 52 tristate "Samsung LTV350QV LCD Panel"
50 depends on LCD_CLASS_DEVICE && SPI_MASTER 53 depends on SPI_MASTER
51 default n
52 help 54 help
53 If you have a Samsung LTV350QV LCD panel, say y to include a 55 If you have a Samsung LTV350QV LCD panel, say y to include a
54 power control driver for it. The panel starts up in power 56 power control driver for it. The panel starts up in power
@@ -59,60 +61,61 @@ config LCD_LTV350QV
59 61
60config LCD_ILI9320 62config LCD_ILI9320
61 tristate 63 tristate
62 depends on LCD_CLASS_DEVICE && BACKLIGHT_LCD_SUPPORT
63 default n
64 help 64 help
65 If you have a panel based on the ILI9320 controller chip 65 If you have a panel based on the ILI9320 controller chip
66 then say y to include a power driver for it. 66 then say y to include a power driver for it.
67 67
68config LCD_TDO24M 68config LCD_TDO24M
69 tristate "Toppoly TDO24M and TDO35S LCD Panels support" 69 tristate "Toppoly TDO24M and TDO35S LCD Panels support"
70 depends on LCD_CLASS_DEVICE && SPI_MASTER 70 depends on SPI_MASTER
71 default n
72 help 71 help
73 If you have a Toppoly TDO24M/TDO35S series LCD panel, say y here to 72 If you have a Toppoly TDO24M/TDO35S series LCD panel, say y here to
74 include the support for it. 73 include the support for it.
75 74
76config LCD_VGG2432A4 75config LCD_VGG2432A4
77 tristate "VGG2432A4 LCM device support" 76 tristate "VGG2432A4 LCM device support"
78 depends on BACKLIGHT_LCD_SUPPORT && LCD_CLASS_DEVICE && SPI_MASTER 77 depends on SPI_MASTER
79 select LCD_ILI9320 78 select LCD_ILI9320
80 default n
81 help 79 help
82 If you have a VGG2432A4 panel based on the ILI9320 controller chip 80 If you have a VGG2432A4 panel based on the ILI9320 controller chip
83 then say y to include a power driver for it. 81 then say y to include a power driver for it.
84 82
85config LCD_PLATFORM 83config LCD_PLATFORM
86 tristate "Platform LCD controls" 84 tristate "Platform LCD controls"
87 depends on LCD_CLASS_DEVICE
88 help 85 help
89 This driver provides a platform-device registered LCD power 86 This driver provides a platform-device registered LCD power
90 control interface. 87 control interface.
91 88
92config LCD_TOSA 89config LCD_TOSA
93 tristate "Sharp SL-6000 LCD Driver" 90 tristate "Sharp SL-6000 LCD Driver"
94 depends on LCD_CLASS_DEVICE && SPI 91 depends on SPI && MACH_TOSA
95 depends on MACH_TOSA
96 default n
97 help 92 help
98 If you have an Sharp SL-6000 Zaurus say Y to enable a driver 93 If you have an Sharp SL-6000 Zaurus say Y to enable a driver
99 for its LCD. 94 for its LCD.
100 95
101config LCD_HP700 96config LCD_HP700
102 tristate "HP Jornada 700 series LCD Driver" 97 tristate "HP Jornada 700 series LCD Driver"
103 depends on LCD_CLASS_DEVICE
104 depends on SA1100_JORNADA720_SSP && !PREEMPT 98 depends on SA1100_JORNADA720_SSP && !PREEMPT
105 default y 99 default y
106 help 100 help
107 If you have an HP Jornada 700 series handheld (710/720/728) 101 If you have an HP Jornada 700 series handheld (710/720/728)
108 say Y to enable LCD control driver. 102 say Y to enable LCD control driver.
109 103
104config LCD_S6E63M0
105 tristate "S6E63M0 AMOLED LCD Driver"
106 depends on SPI && BACKLIGHT_CLASS_DEVICE
107 default n
108 help
109 If you have an S6E63M0 LCD Panel, say Y to enable its
110 LCD control driver.
111
112endif # LCD_CLASS_DEVICE
113
110# 114#
111# Backlight 115# Backlight
112# 116#
113config BACKLIGHT_CLASS_DEVICE 117config BACKLIGHT_CLASS_DEVICE
114 tristate "Lowlevel Backlight controls" 118 tristate "Lowlevel Backlight controls"
115 depends on BACKLIGHT_LCD_SUPPORT
116 default m 119 default m
117 help 120 help
118 This framework adds support for low-level control of the LCD 121 This framework adds support for low-level control of the LCD
@@ -121,9 +124,11 @@ config BACKLIGHT_CLASS_DEVICE
121 To have support for your specific LCD panel you will have to 124 To have support for your specific LCD panel you will have to
122 select the proper drivers which depend on this option. 125 select the proper drivers which depend on this option.
123 126
127if BACKLIGHT_CLASS_DEVICE
128
124config BACKLIGHT_ATMEL_LCDC 129config BACKLIGHT_ATMEL_LCDC
125 bool "Atmel LCDC Contrast-as-Backlight control" 130 bool "Atmel LCDC Contrast-as-Backlight control"
126 depends on BACKLIGHT_CLASS_DEVICE && FB_ATMEL 131 depends on FB_ATMEL
127 default y if MACH_SAM9261EK || MACH_SAM9G10EK || MACH_SAM9263EK 132 default y if MACH_SAM9261EK || MACH_SAM9G10EK || MACH_SAM9263EK
128 help 133 help
129 This provides a backlight control internal to the Atmel LCDC 134 This provides a backlight control internal to the Atmel LCDC
@@ -136,8 +141,7 @@ config BACKLIGHT_ATMEL_LCDC
136 141
137config BACKLIGHT_ATMEL_PWM 142config BACKLIGHT_ATMEL_PWM
138 tristate "Atmel PWM backlight control" 143 tristate "Atmel PWM backlight control"
139 depends on BACKLIGHT_CLASS_DEVICE && ATMEL_PWM 144 depends on ATMEL_PWM
140 default n
141 help 145 help
142 Say Y here if you want to use the PWM peripheral in Atmel AT91 and 146 Say Y here if you want to use the PWM peripheral in Atmel AT91 and
143 AVR32 devices. This driver will need additional platform data to know 147 AVR32 devices. This driver will need additional platform data to know
@@ -146,9 +150,18 @@ config BACKLIGHT_ATMEL_PWM
146 To compile this driver as a module, choose M here: the module will be 150 To compile this driver as a module, choose M here: the module will be
147 called atmel-pwm-bl. 151 called atmel-pwm-bl.
148 152
153config BACKLIGHT_EP93XX
154 tristate "Cirrus EP93xx Backlight Driver"
155 depends on FB_EP93XX
156 help
157 If you have a LCD backlight connected to the BRIGHT output of
158 the EP93xx, say Y here to enable this driver.
159
160 To compile this driver as a module, choose M here: the module will
161 be called ep93xx_bl.
162
149config BACKLIGHT_GENERIC 163config BACKLIGHT_GENERIC
150 tristate "Generic (aka Sharp Corgi) Backlight Driver" 164 tristate "Generic (aka Sharp Corgi) Backlight Driver"
151 depends on BACKLIGHT_CLASS_DEVICE
152 default y 165 default y
153 help 166 help
154 Say y to enable the generic platform backlight driver previously 167 Say y to enable the generic platform backlight driver previously
@@ -157,7 +170,7 @@ config BACKLIGHT_GENERIC
157 170
158config BACKLIGHT_LOCOMO 171config BACKLIGHT_LOCOMO
159 tristate "Sharp LOCOMO LCD/Backlight Driver" 172 tristate "Sharp LOCOMO LCD/Backlight Driver"
160 depends on BACKLIGHT_CLASS_DEVICE && SHARP_LOCOMO 173 depends on SHARP_LOCOMO
161 default y 174 default y
162 help 175 help
163 If you have a Sharp Zaurus SL-5500 (Collie) or SL-5600 (Poodle) say y to 176 If you have a Sharp Zaurus SL-5500 (Collie) or SL-5600 (Poodle) say y to
@@ -165,7 +178,7 @@ config BACKLIGHT_LOCOMO
165 178
166config BACKLIGHT_OMAP1 179config BACKLIGHT_OMAP1
167 tristate "OMAP1 PWL-based LCD Backlight" 180 tristate "OMAP1 PWL-based LCD Backlight"
168 depends on BACKLIGHT_CLASS_DEVICE && ARCH_OMAP1 181 depends on ARCH_OMAP1
169 default y 182 default y
170 help 183 help
171 This driver controls the LCD backlight level and power for 184 This driver controls the LCD backlight level and power for
@@ -174,7 +187,7 @@ config BACKLIGHT_OMAP1
174 187
175config BACKLIGHT_HP680 188config BACKLIGHT_HP680
176 tristate "HP Jornada 680 Backlight Driver" 189 tristate "HP Jornada 680 Backlight Driver"
177 depends on BACKLIGHT_CLASS_DEVICE && SH_HP6XX 190 depends on SH_HP6XX
178 default y 191 default y
179 help 192 help
180 If you have a HP Jornada 680, say y to enable the 193 If you have a HP Jornada 680, say y to enable the
@@ -182,7 +195,6 @@ config BACKLIGHT_HP680
182 195
183config BACKLIGHT_HP700 196config BACKLIGHT_HP700
184 tristate "HP Jornada 700 series Backlight Driver" 197 tristate "HP Jornada 700 series Backlight Driver"
185 depends on BACKLIGHT_CLASS_DEVICE
186 depends on SA1100_JORNADA720_SSP && !PREEMPT 198 depends on SA1100_JORNADA720_SSP && !PREEMPT
187 default y 199 default y
188 help 200 help
@@ -191,76 +203,70 @@ config BACKLIGHT_HP700
191 203
192config BACKLIGHT_PROGEAR 204config BACKLIGHT_PROGEAR
193 tristate "Frontpath ProGear Backlight Driver" 205 tristate "Frontpath ProGear Backlight Driver"
194 depends on BACKLIGHT_CLASS_DEVICE && PCI && X86 206 depends on PCI && X86
195 default n
196 help 207 help
197 If you have a Frontpath ProGear say Y to enable the 208 If you have a Frontpath ProGear say Y to enable the
198 backlight driver. 209 backlight driver.
199 210
200config BACKLIGHT_CARILLO_RANCH 211config BACKLIGHT_CARILLO_RANCH
201 tristate "Intel Carillo Ranch Backlight Driver" 212 tristate "Intel Carillo Ranch Backlight Driver"
202 depends on BACKLIGHT_CLASS_DEVICE && LCD_CLASS_DEVICE && PCI && X86 && FB_LE80578 213 depends on LCD_CLASS_DEVICE && PCI && X86 && FB_LE80578
203 default n
204 help 214 help
205 If you have a Intel LE80578 (Carillo Ranch) say Y to enable the 215 If you have a Intel LE80578 (Carillo Ranch) say Y to enable the
206 backlight driver. 216 backlight driver.
207 217
208config BACKLIGHT_PWM 218config BACKLIGHT_PWM
209 tristate "Generic PWM based Backlight Driver" 219 tristate "Generic PWM based Backlight Driver"
210 depends on BACKLIGHT_CLASS_DEVICE && HAVE_PWM 220 depends on HAVE_PWM
211 help 221 help
212 If you have a LCD backlight adjustable by PWM, say Y to enable 222 If you have a LCD backlight adjustable by PWM, say Y to enable
213 this driver. 223 this driver.
214 224
215config BACKLIGHT_DA903X 225config BACKLIGHT_DA903X
216 tristate "Backlight Driver for DA9030/DA9034 using WLED" 226 tristate "Backlight Driver for DA9030/DA9034 using WLED"
217 depends on BACKLIGHT_CLASS_DEVICE && PMIC_DA903X 227 depends on PMIC_DA903X
218 help 228 help
219 If you have a LCD backlight connected to the WLED output of DA9030 229 If you have a LCD backlight connected to the WLED output of DA9030
220 or DA9034 WLED output, say Y here to enable this driver. 230 or DA9034 WLED output, say Y here to enable this driver.
221 231
222config BACKLIGHT_MAX8925 232config BACKLIGHT_MAX8925
223 tristate "Backlight driver for MAX8925" 233 tristate "Backlight driver for MAX8925"
224 depends on BACKLIGHT_CLASS_DEVICE && MFD_MAX8925 234 depends on MFD_MAX8925
225 help 235 help
226 If you have a LCD backlight connected to the WLED output of MAX8925 236 If you have a LCD backlight connected to the WLED output of MAX8925
227 WLED output, say Y here to enable this driver. 237 WLED output, say Y here to enable this driver.
228 238
229config BACKLIGHT_MBP_NVIDIA 239config BACKLIGHT_MBP_NVIDIA
230 tristate "MacBook Pro Nvidia Backlight Driver" 240 tristate "MacBook Pro Nvidia Backlight Driver"
231 depends on BACKLIGHT_CLASS_DEVICE && X86 241 depends on X86
232 default n
233 help 242 help
234 If you have an Apple Macbook Pro with Nvidia graphics hardware say Y 243 If you have an Apple Macbook Pro with Nvidia graphics hardware say Y
235 to enable a driver for its backlight 244 to enable a driver for its backlight
236 245
237config BACKLIGHT_TOSA 246config BACKLIGHT_TOSA
238 tristate "Sharp SL-6000 Backlight Driver" 247 tristate "Sharp SL-6000 Backlight Driver"
239 depends on BACKLIGHT_CLASS_DEVICE && I2C 248 depends on I2C && MACH_TOSA && LCD_TOSA
240 depends on MACH_TOSA && LCD_TOSA
241 default n
242 help 249 help
243 If you have an Sharp SL-6000 Zaurus say Y to enable a driver 250 If you have an Sharp SL-6000 Zaurus say Y to enable a driver
244 for its backlight 251 for its backlight
245 252
246config BACKLIGHT_SAHARA 253config BACKLIGHT_SAHARA
247 tristate "Tabletkiosk Sahara Touch-iT Backlight Driver" 254 tristate "Tabletkiosk Sahara Touch-iT Backlight Driver"
248 depends on BACKLIGHT_CLASS_DEVICE && X86 255 depends on X86
249 default n
250 help 256 help
251 If you have a Tabletkiosk Sahara Touch-iT, say y to enable the 257 If you have a Tabletkiosk Sahara Touch-iT, say y to enable the
252 backlight driver. 258 backlight driver.
253 259
254config BACKLIGHT_WM831X 260config BACKLIGHT_WM831X
255 tristate "WM831x PMIC Backlight Driver" 261 tristate "WM831x PMIC Backlight Driver"
256 depends on BACKLIGHT_CLASS_DEVICE && MFD_WM831X 262 depends on MFD_WM831X
257 help 263 help
258 If you have a backlight driven by the ISINK and DCDC of a 264 If you have a backlight driven by the ISINK and DCDC of a
259 WM831x PMIC say y to enable the backlight driver for it. 265 WM831x PMIC say y to enable the backlight driver for it.
260 266
261config BACKLIGHT_ADX 267config BACKLIGHT_ADX
262 tristate "Avionic Design Xanthos Backlight Driver" 268 tristate "Avionic Design Xanthos Backlight Driver"
263 depends on BACKLIGHT_CLASS_DEVICE && ARCH_PXA_ADX 269 depends on ARCH_PXA_ADX
264 default y 270 default y
265 help 271 help
266 Say Y to enable the backlight driver on Avionic Design Xanthos-based 272 Say Y to enable the backlight driver on Avionic Design Xanthos-based
@@ -268,7 +274,7 @@ config BACKLIGHT_ADX
268 274
269config BACKLIGHT_ADP5520 275config BACKLIGHT_ADP5520
270 tristate "Backlight Driver for ADP5520/ADP5501 using WLED" 276 tristate "Backlight Driver for ADP5520/ADP5501 using WLED"
271 depends on BACKLIGHT_CLASS_DEVICE && PMIC_ADP5520 277 depends on PMIC_ADP5520
272 help 278 help
273 If you have a LCD backlight connected to the BST/BL_SNK output of 279 If you have a LCD backlight connected to the BST/BL_SNK output of
274 ADP5520 or ADP5501, say Y here to enable this driver. 280 ADP5520 or ADP5501, say Y here to enable this driver.
@@ -276,9 +282,31 @@ config BACKLIGHT_ADP5520
276 To compile this driver as a module, choose M here: the module will 282 To compile this driver as a module, choose M here: the module will
277 be called adp5520_bl. 283 be called adp5520_bl.
278 284
285config BACKLIGHT_ADP8860
286 tristate "Backlight Driver for ADP8860/ADP8861/ADP8863 using WLED"
287 depends on BACKLIGHT_CLASS_DEVICE && I2C
288 select NEW_LEDS
289 select LEDS_CLASS
290 help
291 If you have a LCD backlight connected to the ADP8860, ADP8861 or
292 ADP8863 say Y here to enable this driver.
293
294 To compile this driver as a module, choose M here: the module will
295 be called adp8860_bl.
296
279config BACKLIGHT_88PM860X 297config BACKLIGHT_88PM860X
280 tristate "Backlight Driver for 88PM8606 using WLED" 298 tristate "Backlight Driver for 88PM8606 using WLED"
281 depends on BACKLIGHT_CLASS_DEVICE && MFD_88PM860X 299 depends on MFD_88PM860X
282 help 300 help
283 Say Y to enable the backlight driver for Marvell 88PM8606. 301 Say Y to enable the backlight driver for Marvell 88PM8606.
284 302
303config BACKLIGHT_PCF50633
304 tristate "Backlight driver for NXP PCF50633 MFD"
305 depends on BACKLIGHT_CLASS_DEVICE && MFD_PCF50633
306 help
307 If you have a backlight driven by a NXP PCF50633 MFD, say Y here to
308 enable its driver.
309
310endif # BACKLIGHT_CLASS_DEVICE
311
312endif # BACKLIGHT_LCD_SUPPORT
diff --git a/drivers/video/backlight/Makefile b/drivers/video/backlight/Makefile
index 09d1f14d6257..44c0f81ad85d 100644
--- a/drivers/video/backlight/Makefile
+++ b/drivers/video/backlight/Makefile
@@ -11,9 +11,11 @@ obj-$(CONFIG_LCD_PLATFORM) += platform_lcd.o
11obj-$(CONFIG_LCD_VGG2432A4) += vgg2432a4.o 11obj-$(CONFIG_LCD_VGG2432A4) += vgg2432a4.o
12obj-$(CONFIG_LCD_TDO24M) += tdo24m.o 12obj-$(CONFIG_LCD_TDO24M) += tdo24m.o
13obj-$(CONFIG_LCD_TOSA) += tosa_lcd.o 13obj-$(CONFIG_LCD_TOSA) += tosa_lcd.o
14obj-$(CONFIG_LCD_S6E63M0) += s6e63m0.o
14 15
15obj-$(CONFIG_BACKLIGHT_CLASS_DEVICE) += backlight.o 16obj-$(CONFIG_BACKLIGHT_CLASS_DEVICE) += backlight.o
16obj-$(CONFIG_BACKLIGHT_ATMEL_PWM) += atmel-pwm-bl.o 17obj-$(CONFIG_BACKLIGHT_ATMEL_PWM) += atmel-pwm-bl.o
18obj-$(CONFIG_BACKLIGHT_EP93XX) += ep93xx_bl.o
17obj-$(CONFIG_BACKLIGHT_GENERIC) += generic_bl.o 19obj-$(CONFIG_BACKLIGHT_GENERIC) += generic_bl.o
18obj-$(CONFIG_BACKLIGHT_HP700) += jornada720_bl.o 20obj-$(CONFIG_BACKLIGHT_HP700) += jornada720_bl.o
19obj-$(CONFIG_BACKLIGHT_HP680) += hp680_bl.o 21obj-$(CONFIG_BACKLIGHT_HP680) += hp680_bl.o
@@ -30,5 +32,7 @@ obj-$(CONFIG_BACKLIGHT_SAHARA) += kb3886_bl.o
30obj-$(CONFIG_BACKLIGHT_WM831X) += wm831x_bl.o 32obj-$(CONFIG_BACKLIGHT_WM831X) += wm831x_bl.o
31obj-$(CONFIG_BACKLIGHT_ADX) += adx_bl.o 33obj-$(CONFIG_BACKLIGHT_ADX) += adx_bl.o
32obj-$(CONFIG_BACKLIGHT_ADP5520) += adp5520_bl.o 34obj-$(CONFIG_BACKLIGHT_ADP5520) += adp5520_bl.o
35obj-$(CONFIG_BACKLIGHT_ADP8860) += adp8860_bl.o
33obj-$(CONFIG_BACKLIGHT_88PM860X) += 88pm860x_bl.o 36obj-$(CONFIG_BACKLIGHT_88PM860X) += 88pm860x_bl.o
37obj-$(CONFIG_BACKLIGHT_PCF50633) += pcf50633-backlight.o
34 38
diff --git a/drivers/video/backlight/adp8860_bl.c b/drivers/video/backlight/adp8860_bl.c
new file mode 100644
index 000000000000..921ca37398f3
--- /dev/null
+++ b/drivers/video/backlight/adp8860_bl.c
@@ -0,0 +1,838 @@
1/*
2 * Backlight driver for Analog Devices ADP8860 Backlight Devices
3 *
4 * Copyright 2009-2010 Analog Devices Inc.
5 *
6 * Licensed under the GPL-2 or later.
7 */
8
9#include <linux/module.h>
10#include <linux/version.h>
11#include <linux/init.h>
12#include <linux/errno.h>
13#include <linux/pm.h>
14#include <linux/platform_device.h>
15#include <linux/i2c.h>
16#include <linux/fb.h>
17#include <linux/backlight.h>
18#include <linux/leds.h>
19#include <linux/slab.h>
20#include <linux/workqueue.h>
21
22#include <linux/i2c/adp8860.h>
23#define ADP8860_EXT_FEATURES
24#define ADP8860_USE_LEDS
25
26#define ADP8860_MFDVID 0x00 /* Manufacturer and device ID */
27#define ADP8860_MDCR 0x01 /* Device mode and status */
28#define ADP8860_MDCR2 0x02 /* Device mode and Status Register 2 */
29#define ADP8860_INTR_EN 0x03 /* Interrupts enable */
30#define ADP8860_CFGR 0x04 /* Configuration register */
31#define ADP8860_BLSEN 0x05 /* Sink enable backlight or independent */
32#define ADP8860_BLOFF 0x06 /* Backlight off timeout */
33#define ADP8860_BLDIM 0x07 /* Backlight dim timeout */
34#define ADP8860_BLFR 0x08 /* Backlight fade in and out rates */
35#define ADP8860_BLMX1 0x09 /* Backlight (Brightness Level 1-daylight) maximum current */
36#define ADP8860_BLDM1 0x0A /* Backlight (Brightness Level 1-daylight) dim current */
37#define ADP8860_BLMX2 0x0B /* Backlight (Brightness Level 2-office) maximum current */
38#define ADP8860_BLDM2 0x0C /* Backlight (Brightness Level 2-office) dim current */
39#define ADP8860_BLMX3 0x0D /* Backlight (Brightness Level 3-dark) maximum current */
40#define ADP8860_BLDM3 0x0E /* Backlight (Brightness Level 3-dark) dim current */
41#define ADP8860_ISCFR 0x0F /* Independent sink current fade control register */
42#define ADP8860_ISCC 0x10 /* Independent sink current control register */
43#define ADP8860_ISCT1 0x11 /* Independent Sink Current Timer Register LED[7:5] */
44#define ADP8860_ISCT2 0x12 /* Independent Sink Current Timer Register LED[4:1] */
45#define ADP8860_ISCF 0x13 /* Independent sink current fade register */
46#define ADP8860_ISC7 0x14 /* Independent Sink Current LED7 */
47#define ADP8860_ISC6 0x15 /* Independent Sink Current LED6 */
48#define ADP8860_ISC5 0x16 /* Independent Sink Current LED5 */
49#define ADP8860_ISC4 0x17 /* Independent Sink Current LED4 */
50#define ADP8860_ISC3 0x18 /* Independent Sink Current LED3 */
51#define ADP8860_ISC2 0x19 /* Independent Sink Current LED2 */
52#define ADP8860_ISC1 0x1A /* Independent Sink Current LED1 */
53#define ADP8860_CCFG 0x1B /* Comparator configuration */
54#define ADP8860_CCFG2 0x1C /* Second comparator configuration */
55#define ADP8860_L2_TRP 0x1D /* L2 comparator reference */
56#define ADP8860_L2_HYS 0x1E /* L2 hysteresis */
57#define ADP8860_L3_TRP 0x1F /* L3 comparator reference */
58#define ADP8860_L3_HYS 0x20 /* L3 hysteresis */
59#define ADP8860_PH1LEVL 0x21 /* First phototransistor ambient light level-low byte register */
60#define ADP8860_PH1LEVH 0x22 /* First phototransistor ambient light level-high byte register */
61#define ADP8860_PH2LEVL 0x23 /* Second phototransistor ambient light level-low byte register */
62#define ADP8860_PH2LEVH 0x24 /* Second phototransistor ambient light level-high byte register */
63
64#define ADP8860_MANUFID 0x0 /* Analog Devices ADP8860 Manufacturer ID */
65#define ADP8861_MANUFID 0x4 /* Analog Devices ADP8861 Manufacturer ID */
66#define ADP8863_MANUFID 0x2 /* Analog Devices ADP8863 Manufacturer ID */
67
68#define ADP8860_DEVID(x) ((x) & 0xF)
69#define ADP8860_MANID(x) ((x) >> 4)
70
71/* MDCR Device mode and status */
72#define INT_CFG (1 << 6)
73#define NSTBY (1 << 5)
74#define DIM_EN (1 << 4)
75#define GDWN_DIS (1 << 3)
76#define SIS_EN (1 << 2)
77#define CMP_AUTOEN (1 << 1)
78#define BLEN (1 << 0)
79
80/* ADP8860_CCFG Main ALS comparator level enable */
81#define L3_EN (1 << 1)
82#define L2_EN (1 << 0)
83
84#define CFGR_BLV_SHIFT 3
85#define CFGR_BLV_MASK 0x3
86#define ADP8860_FLAG_LED_MASK 0xFF
87
88#define FADE_VAL(in, out) ((0xF & (in)) | ((0xF & (out)) << 4))
89#define BL_CFGR_VAL(law, blv) ((((blv) & CFGR_BLV_MASK) << CFGR_BLV_SHIFT) | ((0x3 & (law)) << 1))
90#define ALS_CCFG_VAL(filt) ((0x7 & filt) << 5)
91
92enum {
93 adp8860,
94 adp8861,
95 adp8863
96};
97
98struct adp8860_led {
99 struct led_classdev cdev;
100 struct work_struct work;
101 struct i2c_client *client;
102 enum led_brightness new_brightness;
103 int id;
104 int flags;
105};
106
107struct adp8860_bl {
108 struct i2c_client *client;
109 struct backlight_device *bl;
110 struct adp8860_led *led;
111 struct adp8860_backlight_platform_data *pdata;
112 struct mutex lock;
113 unsigned long cached_daylight_max;
114 int id;
115 int revid;
116 int current_brightness;
117 unsigned en_ambl_sens:1;
118 unsigned gdwn_dis:1;
119};
120
121static int adp8860_read(struct i2c_client *client, int reg, uint8_t *val)
122{
123 int ret;
124
125 ret = i2c_smbus_read_byte_data(client, reg);
126 if (ret < 0) {
127 dev_err(&client->dev, "failed reading at 0x%02x\n", reg);
128 return ret;
129 }
130
131 *val = (uint8_t)ret;
132 return 0;
133}
134
135static int adp8860_write(struct i2c_client *client, u8 reg, u8 val)
136{
137 return i2c_smbus_write_byte_data(client, reg, val);
138}
139
140static int adp8860_set_bits(struct i2c_client *client, int reg, uint8_t bit_mask)
141{
142 struct adp8860_bl *data = i2c_get_clientdata(client);
143 uint8_t reg_val;
144 int ret;
145
146 mutex_lock(&data->lock);
147
148 ret = adp8860_read(client, reg, &reg_val);
149
150 if (!ret && ((reg_val & bit_mask) == 0)) {
151 reg_val |= bit_mask;
152 ret = adp8860_write(client, reg, reg_val);
153 }
154
155 mutex_unlock(&data->lock);
156 return ret;
157}
158
159static int adp8860_clr_bits(struct i2c_client *client, int reg, uint8_t bit_mask)
160{
161 struct adp8860_bl *data = i2c_get_clientdata(client);
162 uint8_t reg_val;
163 int ret;
164
165 mutex_lock(&data->lock);
166
167 ret = adp8860_read(client, reg, &reg_val);
168
169 if (!ret && (reg_val & bit_mask)) {
170 reg_val &= ~bit_mask;
171 ret = adp8860_write(client, reg, reg_val);
172 }
173
174 mutex_unlock(&data->lock);
175 return ret;
176}
177
178/*
179 * Independent sink / LED
180 */
181#if defined(ADP8860_USE_LEDS)
182static void adp8860_led_work(struct work_struct *work)
183{
184 struct adp8860_led *led = container_of(work, struct adp8860_led, work);
185 adp8860_write(led->client, ADP8860_ISC1 - led->id + 1,
186 led->new_brightness >> 1);
187}
188
189static void adp8860_led_set(struct led_classdev *led_cdev,
190 enum led_brightness value)
191{
192 struct adp8860_led *led;
193
194 led = container_of(led_cdev, struct adp8860_led, cdev);
195 led->new_brightness = value;
196 schedule_work(&led->work);
197}
198
199static int adp8860_led_setup(struct adp8860_led *led)
200{
201 struct i2c_client *client = led->client;
202 int ret = 0;
203
204 ret = adp8860_write(client, ADP8860_ISC1 - led->id + 1, 0);
205 ret |= adp8860_set_bits(client, ADP8860_ISCC, 1 << (led->id - 1));
206
207 if (led->id > 4)
208 ret |= adp8860_set_bits(client, ADP8860_ISCT1,
209 (led->flags & 0x3) << ((led->id - 5) * 2));
210 else
211 ret |= adp8860_set_bits(client, ADP8860_ISCT2,
212 (led->flags & 0x3) << ((led->id - 1) * 2));
213
214 return ret;
215}
216
217static int __devinit adp8860_led_probe(struct i2c_client *client)
218{
219 struct adp8860_backlight_platform_data *pdata =
220 client->dev.platform_data;
221 struct adp8860_bl *data = i2c_get_clientdata(client);
222 struct adp8860_led *led, *led_dat;
223 struct led_info *cur_led;
224 int ret, i;
225
226 led = kzalloc(sizeof(*led) * pdata->num_leds, GFP_KERNEL);
227 if (led == NULL) {
228 dev_err(&client->dev, "failed to alloc memory\n");
229 return -ENOMEM;
230 }
231
232 ret = adp8860_write(client, ADP8860_ISCFR, pdata->led_fade_law);
233 ret = adp8860_write(client, ADP8860_ISCT1,
234 (pdata->led_on_time & 0x3) << 6);
235 ret |= adp8860_write(client, ADP8860_ISCF,
236 FADE_VAL(pdata->led_fade_in, pdata->led_fade_out));
237
238 if (ret) {
239 dev_err(&client->dev, "failed to write\n");
240 goto err_free;
241 }
242
243 for (i = 0; i < pdata->num_leds; ++i) {
244 cur_led = &pdata->leds[i];
245 led_dat = &led[i];
246
247 led_dat->id = cur_led->flags & ADP8860_FLAG_LED_MASK;
248
249 if (led_dat->id > 7 || led_dat->id < 1) {
250 dev_err(&client->dev, "Invalid LED ID %d\n",
251 led_dat->id);
252 goto err;
253 }
254
255 if (pdata->bl_led_assign & (1 << (led_dat->id - 1))) {
256 dev_err(&client->dev, "LED %d used by Backlight\n",
257 led_dat->id);
258 goto err;
259 }
260
261 led_dat->cdev.name = cur_led->name;
262 led_dat->cdev.default_trigger = cur_led->default_trigger;
263 led_dat->cdev.brightness_set = adp8860_led_set;
264 led_dat->cdev.brightness = LED_OFF;
265 led_dat->flags = cur_led->flags >> FLAG_OFFT_SHIFT;
266 led_dat->client = client;
267 led_dat->new_brightness = LED_OFF;
268 INIT_WORK(&led_dat->work, adp8860_led_work);
269
270 ret = led_classdev_register(&client->dev, &led_dat->cdev);
271 if (ret) {
272 dev_err(&client->dev, "failed to register LED %d\n",
273 led_dat->id);
274 goto err;
275 }
276
277 ret = adp8860_led_setup(led_dat);
278 if (ret) {
279 dev_err(&client->dev, "failed to write\n");
280 i++;
281 goto err;
282 }
283 }
284
285 data->led = led;
286
287 return 0;
288
289 err:
290 for (i = i - 1; i >= 0; --i) {
291 led_classdev_unregister(&led[i].cdev);
292 cancel_work_sync(&led[i].work);
293 }
294
295 err_free:
296 kfree(led);
297
298 return ret;
299}
300
301static int __devexit adp8860_led_remove(struct i2c_client *client)
302{
303 struct adp8860_backlight_platform_data *pdata =
304 client->dev.platform_data;
305 struct adp8860_bl *data = i2c_get_clientdata(client);
306 int i;
307
308 for (i = 0; i < pdata->num_leds; i++) {
309 led_classdev_unregister(&data->led[i].cdev);
310 cancel_work_sync(&data->led[i].work);
311 }
312
313 kfree(data->led);
314 return 0;
315}
316#else
317static int __devinit adp8860_led_probe(struct i2c_client *client)
318{
319 return 0;
320}
321
322static int __devexit adp8860_led_remove(struct i2c_client *client)
323{
324 return 0;
325}
326#endif
327
328static int adp8860_bl_set(struct backlight_device *bl, int brightness)
329{
330 struct adp8860_bl *data = bl_get_data(bl);
331 struct i2c_client *client = data->client;
332 int ret = 0;
333
334 if (data->en_ambl_sens) {
335 if ((brightness > 0) && (brightness < ADP8860_MAX_BRIGHTNESS)) {
336 /* Disable Ambient Light auto adjust */
337 ret |= adp8860_clr_bits(client, ADP8860_MDCR,
338 CMP_AUTOEN);
339 ret |= adp8860_write(client, ADP8860_BLMX1, brightness);
340 } else {
341 /*
342 * MAX_BRIGHTNESS -> Enable Ambient Light auto adjust
343 * restore daylight l1 sysfs brightness
344 */
345 ret |= adp8860_write(client, ADP8860_BLMX1,
346 data->cached_daylight_max);
347 ret |= adp8860_set_bits(client, ADP8860_MDCR,
348 CMP_AUTOEN);
349 }
350 } else
351 ret |= adp8860_write(client, ADP8860_BLMX1, brightness);
352
353 if (data->current_brightness && brightness == 0)
354 ret |= adp8860_set_bits(client,
355 ADP8860_MDCR, DIM_EN);
356 else if (data->current_brightness == 0 && brightness)
357 ret |= adp8860_clr_bits(client,
358 ADP8860_MDCR, DIM_EN);
359
360 if (!ret)
361 data->current_brightness = brightness;
362
363 return ret;
364}
365
366static int adp8860_bl_update_status(struct backlight_device *bl)
367{
368 int brightness = bl->props.brightness;
369 if (bl->props.power != FB_BLANK_UNBLANK)
370 brightness = 0;
371
372 if (bl->props.fb_blank != FB_BLANK_UNBLANK)
373 brightness = 0;
374
375 return adp8860_bl_set(bl, brightness);
376}
377
378static int adp8860_bl_get_brightness(struct backlight_device *bl)
379{
380 struct adp8860_bl *data = bl_get_data(bl);
381
382 return data->current_brightness;
383}
384
385static const struct backlight_ops adp8860_bl_ops = {
386 .update_status = adp8860_bl_update_status,
387 .get_brightness = adp8860_bl_get_brightness,
388};
389
390static int adp8860_bl_setup(struct backlight_device *bl)
391{
392 struct adp8860_bl *data = bl_get_data(bl);
393 struct i2c_client *client = data->client;
394 struct adp8860_backlight_platform_data *pdata = data->pdata;
395 int ret = 0;
396
397 ret |= adp8860_write(client, ADP8860_BLSEN, ~pdata->bl_led_assign);
398 ret |= adp8860_write(client, ADP8860_BLMX1, pdata->l1_daylight_max);
399 ret |= adp8860_write(client, ADP8860_BLDM1, pdata->l1_daylight_dim);
400
401 if (data->en_ambl_sens) {
402 data->cached_daylight_max = pdata->l1_daylight_max;
403 ret |= adp8860_write(client, ADP8860_BLMX2,
404 pdata->l2_office_max);
405 ret |= adp8860_write(client, ADP8860_BLDM2,
406 pdata->l2_office_dim);
407 ret |= adp8860_write(client, ADP8860_BLMX3,
408 pdata->l3_dark_max);
409 ret |= adp8860_write(client, ADP8860_BLDM3,
410 pdata->l3_dark_dim);
411
412 ret |= adp8860_write(client, ADP8860_L2_TRP, pdata->l2_trip);
413 ret |= adp8860_write(client, ADP8860_L2_HYS, pdata->l2_hyst);
414 ret |= adp8860_write(client, ADP8860_L3_TRP, pdata->l3_trip);
415 ret |= adp8860_write(client, ADP8860_L3_HYS, pdata->l3_hyst);
416 ret |= adp8860_write(client, ADP8860_CCFG, L2_EN | L3_EN |
417 ALS_CCFG_VAL(pdata->abml_filt));
418 }
419
420 ret |= adp8860_write(client, ADP8860_CFGR,
421 BL_CFGR_VAL(pdata->bl_fade_law, 0));
422
423 ret |= adp8860_write(client, ADP8860_BLFR, FADE_VAL(pdata->bl_fade_in,
424 pdata->bl_fade_out));
425
426 ret |= adp8860_set_bits(client, ADP8860_MDCR, BLEN | DIM_EN | NSTBY |
427 (data->gdwn_dis ? GDWN_DIS : 0));
428
429 return ret;
430}
431
432static ssize_t adp8860_show(struct device *dev, char *buf, int reg)
433{
434 struct adp8860_bl *data = dev_get_drvdata(dev);
435 int error;
436 uint8_t reg_val;
437
438 mutex_lock(&data->lock);
439 error = adp8860_read(data->client, reg, &reg_val);
440 mutex_unlock(&data->lock);
441
442 if (error < 0)
443 return error;
444
445 return sprintf(buf, "%u\n", reg_val);
446}
447
448static ssize_t adp8860_store(struct device *dev, const char *buf,
449 size_t count, int reg)
450{
451 struct adp8860_bl *data = dev_get_drvdata(dev);
452 unsigned long val;
453 int ret;
454
455 ret = strict_strtoul(buf, 10, &val);
456 if (ret)
457 return ret;
458
459 mutex_lock(&data->lock);
460 adp8860_write(data->client, reg, val);
461 mutex_unlock(&data->lock);
462
463 return count;
464}
465
466static ssize_t adp8860_bl_l3_dark_max_show(struct device *dev,
467 struct device_attribute *attr, char *buf)
468{
469 return adp8860_show(dev, buf, ADP8860_BLMX3);
470}
471
472static ssize_t adp8860_bl_l3_dark_max_store(struct device *dev,
473 struct device_attribute *attr, const char *buf, size_t count)
474{
475 return adp8860_store(dev, buf, count, ADP8860_BLMX3);
476}
477
478static DEVICE_ATTR(l3_dark_max, 0664, adp8860_bl_l3_dark_max_show,
479 adp8860_bl_l3_dark_max_store);
480
481static ssize_t adp8860_bl_l2_office_max_show(struct device *dev,
482 struct device_attribute *attr, char *buf)
483{
484 return adp8860_show(dev, buf, ADP8860_BLMX2);
485}
486
487static ssize_t adp8860_bl_l2_office_max_store(struct device *dev,
488 struct device_attribute *attr, const char *buf, size_t count)
489{
490 return adp8860_store(dev, buf, count, ADP8860_BLMX2);
491}
492static DEVICE_ATTR(l2_office_max, 0664, adp8860_bl_l2_office_max_show,
493 adp8860_bl_l2_office_max_store);
494
495static ssize_t adp8860_bl_l1_daylight_max_show(struct device *dev,
496 struct device_attribute *attr, char *buf)
497{
498 return adp8860_show(dev, buf, ADP8860_BLMX1);
499}
500
501static ssize_t adp8860_bl_l1_daylight_max_store(struct device *dev,
502 struct device_attribute *attr, const char *buf, size_t count)
503{
504 struct adp8860_bl *data = dev_get_drvdata(dev);
505
506 strict_strtoul(buf, 10, &data->cached_daylight_max);
507 return adp8860_store(dev, buf, count, ADP8860_BLMX1);
508}
509static DEVICE_ATTR(l1_daylight_max, 0664, adp8860_bl_l1_daylight_max_show,
510 adp8860_bl_l1_daylight_max_store);
511
512static ssize_t adp8860_bl_l3_dark_dim_show(struct device *dev,
513 struct device_attribute *attr, char *buf)
514{
515 return adp8860_show(dev, buf, ADP8860_BLDM3);
516}
517
518static ssize_t adp8860_bl_l3_dark_dim_store(struct device *dev,
519 struct device_attribute *attr,
520 const char *buf, size_t count)
521{
522 return adp8860_store(dev, buf, count, ADP8860_BLDM3);
523}
524static DEVICE_ATTR(l3_dark_dim, 0664, adp8860_bl_l3_dark_dim_show,
525 adp8860_bl_l3_dark_dim_store);
526
527static ssize_t adp8860_bl_l2_office_dim_show(struct device *dev,
528 struct device_attribute *attr, char *buf)
529{
530 return adp8860_show(dev, buf, ADP8860_BLDM2);
531}
532
533static ssize_t adp8860_bl_l2_office_dim_store(struct device *dev,
534 struct device_attribute *attr,
535 const char *buf, size_t count)
536{
537 return adp8860_store(dev, buf, count, ADP8860_BLDM2);
538}
539static DEVICE_ATTR(l2_office_dim, 0664, adp8860_bl_l2_office_dim_show,
540 adp8860_bl_l2_office_dim_store);
541
542static ssize_t adp8860_bl_l1_daylight_dim_show(struct device *dev,
543 struct device_attribute *attr, char *buf)
544{
545 return adp8860_show(dev, buf, ADP8860_BLDM1);
546}
547
548static ssize_t adp8860_bl_l1_daylight_dim_store(struct device *dev,
549 struct device_attribute *attr,
550 const char *buf, size_t count)
551{
552 return adp8860_store(dev, buf, count, ADP8860_BLDM1);
553}
554static DEVICE_ATTR(l1_daylight_dim, 0664, adp8860_bl_l1_daylight_dim_show,
555 adp8860_bl_l1_daylight_dim_store);
556
557#ifdef ADP8860_EXT_FEATURES
558static ssize_t adp8860_bl_ambient_light_level_show(struct device *dev,
559 struct device_attribute *attr, char *buf)
560{
561 struct adp8860_bl *data = dev_get_drvdata(dev);
562 int error;
563 uint8_t reg_val;
564 uint16_t ret_val;
565
566 mutex_lock(&data->lock);
567 error = adp8860_read(data->client, ADP8860_PH1LEVL, &reg_val);
568 ret_val = reg_val;
569 error |= adp8860_read(data->client, ADP8860_PH1LEVH, &reg_val);
570 mutex_unlock(&data->lock);
571
572 if (error < 0)
573 return error;
574
575 /* Return 13-bit conversion value for the first light sensor */
576 ret_val += (reg_val & 0x1F) << 8;
577
578 return sprintf(buf, "%u\n", ret_val);
579}
580static DEVICE_ATTR(ambient_light_level, 0444,
581 adp8860_bl_ambient_light_level_show, NULL);
582
583static ssize_t adp8860_bl_ambient_light_zone_show(struct device *dev,
584 struct device_attribute *attr, char *buf)
585{
586 struct adp8860_bl *data = dev_get_drvdata(dev);
587 int error;
588 uint8_t reg_val;
589
590 mutex_lock(&data->lock);
591 error = adp8860_read(data->client, ADP8860_CFGR, &reg_val);
592 mutex_unlock(&data->lock);
593
594 if (error < 0)
595 return error;
596
597 return sprintf(buf, "%u\n",
598 ((reg_val >> CFGR_BLV_SHIFT) & CFGR_BLV_MASK) + 1);
599}
600
601static ssize_t adp8860_bl_ambient_light_zone_store(struct device *dev,
602 struct device_attribute *attr,
603 const char *buf, size_t count)
604{
605 struct adp8860_bl *data = dev_get_drvdata(dev);
606 unsigned long val;
607 uint8_t reg_val;
608 int ret;
609
610 ret = strict_strtoul(buf, 10, &val);
611 if (ret)
612 return ret;
613
614 if (val == 0) {
615 /* Enable automatic ambient light sensing */
616 adp8860_set_bits(data->client, ADP8860_MDCR, CMP_AUTOEN);
617 } else if ((val > 0) && (val < 6)) {
618 /* Disable automatic ambient light sensing */
619 adp8860_clr_bits(data->client, ADP8860_MDCR, CMP_AUTOEN);
620
621 /* Set user supplied ambient light zone */
622 mutex_lock(&data->lock);
623 adp8860_read(data->client, ADP8860_CFGR, &reg_val);
624 reg_val &= ~(CFGR_BLV_MASK << CFGR_BLV_SHIFT);
625 reg_val |= val << CFGR_BLV_SHIFT;
626 adp8860_write(data->client, ADP8860_CFGR, reg_val);
627 mutex_unlock(&data->lock);
628 }
629
630 return count;
631}
632static DEVICE_ATTR(ambient_light_zone, 0664,
633 adp8860_bl_ambient_light_zone_show,
634 adp8860_bl_ambient_light_zone_store);
635#endif
636
637static struct attribute *adp8860_bl_attributes[] = {
638 &dev_attr_l3_dark_max.attr,
639 &dev_attr_l3_dark_dim.attr,
640 &dev_attr_l2_office_max.attr,
641 &dev_attr_l2_office_dim.attr,
642 &dev_attr_l1_daylight_max.attr,
643 &dev_attr_l1_daylight_dim.attr,
644#ifdef ADP8860_EXT_FEATURES
645 &dev_attr_ambient_light_level.attr,
646 &dev_attr_ambient_light_zone.attr,
647#endif
648 NULL
649};
650
651static const struct attribute_group adp8860_bl_attr_group = {
652 .attrs = adp8860_bl_attributes,
653};
654
655static int __devinit adp8860_probe(struct i2c_client *client,
656 const struct i2c_device_id *id)
657{
658 struct backlight_device *bl;
659 struct adp8860_bl *data;
660 struct adp8860_backlight_platform_data *pdata =
661 client->dev.platform_data;
662 struct backlight_properties props;
663 uint8_t reg_val;
664 int ret;
665
666 if (!i2c_check_functionality(client->adapter,
667 I2C_FUNC_SMBUS_BYTE_DATA)) {
668 dev_err(&client->dev, "SMBUS Byte Data not Supported\n");
669 return -EIO;
670 }
671
672 if (!pdata) {
673 dev_err(&client->dev, "no platform data?\n");
674 return -EINVAL;
675 }
676
677 data = kzalloc(sizeof(*data), GFP_KERNEL);
678 if (data == NULL)
679 return -ENOMEM;
680
681 ret = adp8860_read(client, ADP8860_MFDVID, &reg_val);
682 if (ret < 0)
683 goto out2;
684
685 switch (ADP8860_MANID(reg_val)) {
686 case ADP8863_MANUFID:
687 data->gdwn_dis = !!pdata->gdwn_dis;
688 case ADP8860_MANUFID:
689 data->en_ambl_sens = !!pdata->en_ambl_sens;
690 break;
691 case ADP8861_MANUFID:
692 data->gdwn_dis = !!pdata->gdwn_dis;
693 break;
694 default:
695 dev_err(&client->dev, "failed to probe\n");
696 ret = -ENODEV;
697 goto out2;
698 }
699
700 /* It's confirmed that the DEVID field is actually a REVID */
701
702 data->revid = ADP8860_DEVID(reg_val);
703 data->client = client;
704 data->pdata = pdata;
705 data->id = id->driver_data;
706 data->current_brightness = 0;
707 i2c_set_clientdata(client, data);
708
709 memset(&props, 0, sizeof(props));
710 props.max_brightness = ADP8860_MAX_BRIGHTNESS;
711
712 mutex_init(&data->lock);
713
714 bl = backlight_device_register(dev_driver_string(&client->dev),
715 &client->dev, data, &adp8860_bl_ops, &props);
716 if (IS_ERR(bl)) {
717 dev_err(&client->dev, "failed to register backlight\n");
718 ret = PTR_ERR(bl);
719 goto out2;
720 }
721
722 bl->props.max_brightness =
723 bl->props.brightness = ADP8860_MAX_BRIGHTNESS;
724
725 data->bl = bl;
726
727 if (data->en_ambl_sens)
728 ret = sysfs_create_group(&bl->dev.kobj,
729 &adp8860_bl_attr_group);
730
731 if (ret) {
732 dev_err(&client->dev, "failed to register sysfs\n");
733 goto out1;
734 }
735
736 ret = adp8860_bl_setup(bl);
737 if (ret) {
738 ret = -EIO;
739 goto out;
740 }
741
742 backlight_update_status(bl);
743
744 dev_info(&client->dev, "%s Rev.%d Backlight\n",
745 client->name, data->revid);
746
747 if (pdata->num_leds)
748 adp8860_led_probe(client);
749
750 return 0;
751
752out:
753 if (data->en_ambl_sens)
754 sysfs_remove_group(&data->bl->dev.kobj,
755 &adp8860_bl_attr_group);
756out1:
757 backlight_device_unregister(bl);
758out2:
759 i2c_set_clientdata(client, NULL);
760 kfree(data);
761
762 return ret;
763}
764
765static int __devexit adp8860_remove(struct i2c_client *client)
766{
767 struct adp8860_bl *data = i2c_get_clientdata(client);
768
769 adp8860_clr_bits(client, ADP8860_MDCR, NSTBY);
770
771 if (data->led)
772 adp8860_led_remove(client);
773
774 if (data->en_ambl_sens)
775 sysfs_remove_group(&data->bl->dev.kobj,
776 &adp8860_bl_attr_group);
777
778 backlight_device_unregister(data->bl);
779 i2c_set_clientdata(client, NULL);
780 kfree(data);
781
782 return 0;
783}
784
785#ifdef CONFIG_PM
786static int adp8860_i2c_suspend(struct i2c_client *client, pm_message_t message)
787{
788 adp8860_clr_bits(client, ADP8860_MDCR, NSTBY);
789
790 return 0;
791}
792
793static int adp8860_i2c_resume(struct i2c_client *client)
794{
795 adp8860_set_bits(client, ADP8860_MDCR, NSTBY);
796
797 return 0;
798}
799#else
800#define adp8860_i2c_suspend NULL
801#define adp8860_i2c_resume NULL
802#endif
803
804static const struct i2c_device_id adp8860_id[] = {
805 { "adp8860", adp8860 },
806 { "adp8861", adp8861 },
807 { "adp8863", adp8863 },
808 { }
809};
810MODULE_DEVICE_TABLE(i2c, adp8860_id);
811
812static struct i2c_driver adp8860_driver = {
813 .driver = {
814 .name = KBUILD_MODNAME,
815 },
816 .probe = adp8860_probe,
817 .remove = __devexit_p(adp8860_remove),
818 .suspend = adp8860_i2c_suspend,
819 .resume = adp8860_i2c_resume,
820 .id_table = adp8860_id,
821};
822
823static int __init adp8860_init(void)
824{
825 return i2c_add_driver(&adp8860_driver);
826}
827module_init(adp8860_init);
828
829static void __exit adp8860_exit(void)
830{
831 i2c_del_driver(&adp8860_driver);
832}
833module_exit(adp8860_exit);
834
835MODULE_LICENSE("GPL v2");
836MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
837MODULE_DESCRIPTION("ADP8860 Backlight driver");
838MODULE_ALIAS("i2c:adp8860-backlight");
diff --git a/drivers/video/backlight/adx_bl.c b/drivers/video/backlight/adx_bl.c
index 7f4a7c30a98b..fe9af129c5dd 100644
--- a/drivers/video/backlight/adx_bl.c
+++ b/drivers/video/backlight/adx_bl.c
@@ -107,8 +107,8 @@ static int __devinit adx_backlight_probe(struct platform_device *pdev)
107 props.max_brightness = 0xff; 107 props.max_brightness = 0xff;
108 bldev = backlight_device_register(dev_name(&pdev->dev), &pdev->dev, 108 bldev = backlight_device_register(dev_name(&pdev->dev), &pdev->dev,
109 bl, &adx_backlight_ops, &props); 109 bl, &adx_backlight_ops, &props);
110 if (!bldev) { 110 if (IS_ERR(bldev)) {
111 ret = -ENOMEM; 111 ret = PTR_ERR(bldev);
112 goto out; 112 goto out;
113 } 113 }
114 114
diff --git a/drivers/video/backlight/ep93xx_bl.c b/drivers/video/backlight/ep93xx_bl.c
new file mode 100644
index 000000000000..b0cc49184803
--- /dev/null
+++ b/drivers/video/backlight/ep93xx_bl.c
@@ -0,0 +1,160 @@
1/*
2 * Driver for the Cirrus EP93xx lcd backlight
3 *
4 * Copyright (c) 2010 H Hartley Sweeten <hsweeten@visionengravers.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * This driver controls the pulse width modulated brightness control output,
11 * BRIGHT, on the Cirrus EP9307, EP9312, and EP9315 processors.
12 */
13
14
15#include <linux/platform_device.h>
16#include <linux/io.h>
17#include <linux/fb.h>
18#include <linux/backlight.h>
19
20#include <mach/hardware.h>
21
22#define EP93XX_RASTER_REG(x) (EP93XX_RASTER_BASE + (x))
23#define EP93XX_RASTER_BRIGHTNESS EP93XX_RASTER_REG(0x20)
24
25#define EP93XX_MAX_COUNT 255
26#define EP93XX_MAX_BRIGHT 255
27#define EP93XX_DEF_BRIGHT 128
28
29struct ep93xxbl {
30 void __iomem *mmio;
31 int brightness;
32};
33
34static int ep93xxbl_set(struct backlight_device *bl, int brightness)
35{
36 struct ep93xxbl *ep93xxbl = bl_get_data(bl);
37
38 __raw_writel((brightness << 8) | EP93XX_MAX_COUNT, ep93xxbl->mmio);
39
40 ep93xxbl->brightness = brightness;
41
42 return 0;
43}
44
45static int ep93xxbl_update_status(struct backlight_device *bl)
46{
47 int brightness = bl->props.brightness;
48
49 if (bl->props.power != FB_BLANK_UNBLANK ||
50 bl->props.fb_blank != FB_BLANK_UNBLANK)
51 brightness = 0;
52
53 return ep93xxbl_set(bl, brightness);
54}
55
56static int ep93xxbl_get_brightness(struct backlight_device *bl)
57{
58 struct ep93xxbl *ep93xxbl = bl_get_data(bl);
59
60 return ep93xxbl->brightness;
61}
62
63static const struct backlight_ops ep93xxbl_ops = {
64 .update_status = ep93xxbl_update_status,
65 .get_brightness = ep93xxbl_get_brightness,
66};
67
68static int __init ep93xxbl_probe(struct platform_device *dev)
69{
70 struct ep93xxbl *ep93xxbl;
71 struct backlight_device *bl;
72 struct backlight_properties props;
73
74 ep93xxbl = devm_kzalloc(&dev->dev, sizeof(*ep93xxbl), GFP_KERNEL);
75 if (!ep93xxbl)
76 return -ENOMEM;
77
78 /*
79 * This register is located in the range already ioremap'ed by
80 * the framebuffer driver. A MFD driver seems a bit of overkill
81 * to handle this so use the static I/O mapping; this address
82 * is already virtual.
83 *
84 * NOTE: No locking is required; the framebuffer does not touch
85 * this register.
86 */
87 ep93xxbl->mmio = EP93XX_RASTER_BRIGHTNESS;
88
89 memset(&props, 0, sizeof(struct backlight_properties));
90 props.max_brightness = EP93XX_MAX_BRIGHT;
91 bl = backlight_device_register(dev->name, &dev->dev, ep93xxbl,
92 &ep93xxbl_ops, &props);
93 if (IS_ERR(bl))
94 return PTR_ERR(bl);
95
96 bl->props.brightness = EP93XX_DEF_BRIGHT;
97
98 platform_set_drvdata(dev, bl);
99
100 ep93xxbl_update_status(bl);
101
102 return 0;
103}
104
105static int ep93xxbl_remove(struct platform_device *dev)
106{
107 struct backlight_device *bl = platform_get_drvdata(dev);
108
109 backlight_device_unregister(bl);
110 platform_set_drvdata(dev, NULL);
111 return 0;
112}
113
114#ifdef CONFIG_PM
115static int ep93xxbl_suspend(struct platform_device *dev, pm_message_t state)
116{
117 struct backlight_device *bl = platform_get_drvdata(dev);
118
119 return ep93xxbl_set(bl, 0);
120}
121
122static int ep93xxbl_resume(struct platform_device *dev)
123{
124 struct backlight_device *bl = platform_get_drvdata(dev);
125
126 backlight_update_status(bl);
127 return 0;
128}
129#else
130#define ep93xxbl_suspend NULL
131#define ep93xxbl_resume NULL
132#endif
133
134static struct platform_driver ep93xxbl_driver = {
135 .driver = {
136 .name = "ep93xx-bl",
137 .owner = THIS_MODULE,
138 },
139 .probe = ep93xxbl_probe,
140 .remove = __devexit_p(ep93xxbl_remove),
141 .suspend = ep93xxbl_suspend,
142 .resume = ep93xxbl_resume,
143};
144
145static int __init ep93xxbl_init(void)
146{
147 return platform_driver_register(&ep93xxbl_driver);
148}
149module_init(ep93xxbl_init);
150
151static void __exit ep93xxbl_exit(void)
152{
153 platform_driver_unregister(&ep93xxbl_driver);
154}
155module_exit(ep93xxbl_exit);
156
157MODULE_DESCRIPTION("EP93xx Backlight Driver");
158MODULE_AUTHOR("H Hartley Sweeten <hsweeten@visionengravers.com>");
159MODULE_LICENSE("GPL");
160MODULE_ALIAS("platform:ep93xx-bl");
diff --git a/drivers/video/backlight/l4f00242t03.c b/drivers/video/backlight/l4f00242t03.c
index bcdb12c93efd..9093ef0fa869 100644
--- a/drivers/video/backlight/l4f00242t03.c
+++ b/drivers/video/backlight/l4f00242t03.c
@@ -125,8 +125,7 @@ static int __devinit l4f00242t03_probe(struct spi_device *spi)
125 125
126 if (priv == NULL) { 126 if (priv == NULL) {
127 dev_err(&spi->dev, "No memory for this device.\n"); 127 dev_err(&spi->dev, "No memory for this device.\n");
128 ret = -ENOMEM; 128 return -ENOMEM;
129 goto err;
130 } 129 }
131 130
132 dev_set_drvdata(&spi->dev, priv); 131 dev_set_drvdata(&spi->dev, priv);
@@ -139,7 +138,7 @@ static int __devinit l4f00242t03_probe(struct spi_device *spi)
139 if (ret) { 138 if (ret) {
140 dev_err(&spi->dev, 139 dev_err(&spi->dev,
141 "Unable to get the lcd l4f00242t03 reset gpio.\n"); 140 "Unable to get the lcd l4f00242t03 reset gpio.\n");
142 return ret; 141 goto err;
143 } 142 }
144 143
145 ret = gpio_direction_output(pdata->reset_gpio, 1); 144 ret = gpio_direction_output(pdata->reset_gpio, 1);
@@ -151,7 +150,7 @@ static int __devinit l4f00242t03_probe(struct spi_device *spi)
151 if (ret) { 150 if (ret) {
152 dev_err(&spi->dev, 151 dev_err(&spi->dev,
153 "Unable to get the lcd l4f00242t03 data en gpio.\n"); 152 "Unable to get the lcd l4f00242t03 data en gpio.\n");
154 return ret; 153 goto err2;
155 } 154 }
156 155
157 ret = gpio_direction_output(pdata->data_enable_gpio, 0); 156 ret = gpio_direction_output(pdata->data_enable_gpio, 0);
@@ -222,9 +221,9 @@ static int __devexit l4f00242t03_remove(struct spi_device *spi)
222 gpio_free(pdata->reset_gpio); 221 gpio_free(pdata->reset_gpio);
223 222
224 if (priv->io_reg) 223 if (priv->io_reg)
225 regulator_put(priv->core_reg);
226 if (priv->core_reg)
227 regulator_put(priv->io_reg); 224 regulator_put(priv->io_reg);
225 if (priv->core_reg)
226 regulator_put(priv->core_reg);
228 227
229 kfree(priv); 228 kfree(priv);
230 229
diff --git a/drivers/video/backlight/max8925_bl.c b/drivers/video/backlight/max8925_bl.c
index b5accc957ad3..b2b2c7ba1f63 100644
--- a/drivers/video/backlight/max8925_bl.c
+++ b/drivers/video/backlight/max8925_bl.c
@@ -162,6 +162,7 @@ static int __devinit max8925_backlight_probe(struct platform_device *pdev)
162 backlight_update_status(bl); 162 backlight_update_status(bl);
163 return 0; 163 return 0;
164out: 164out:
165 backlight_device_unregister(bl);
165 kfree(data); 166 kfree(data);
166 return ret; 167 return ret;
167} 168}
diff --git a/drivers/video/backlight/mbp_nvidia_bl.c b/drivers/video/backlight/mbp_nvidia_bl.c
index 1b5d3fe6bbbc..9fb533f6373e 100644
--- a/drivers/video/backlight/mbp_nvidia_bl.c
+++ b/drivers/video/backlight/mbp_nvidia_bl.c
@@ -141,7 +141,7 @@ static const struct dmi_system_id __initdata mbp_device_table[] = {
141 .callback = mbp_dmi_match, 141 .callback = mbp_dmi_match,
142 .ident = "MacBook 1,1", 142 .ident = "MacBook 1,1",
143 .matches = { 143 .matches = {
144 DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."), 144 DMI_MATCH(DMI_SYS_VENDOR, "Apple Computer, Inc."),
145 DMI_MATCH(DMI_PRODUCT_NAME, "MacBook1,1"), 145 DMI_MATCH(DMI_PRODUCT_NAME, "MacBook1,1"),
146 }, 146 },
147 .driver_data = (void *)&intel_chipset_data, 147 .driver_data = (void *)&intel_chipset_data,
@@ -184,6 +184,42 @@ static const struct dmi_system_id __initdata mbp_device_table[] = {
184 }, 184 },
185 { 185 {
186 .callback = mbp_dmi_match, 186 .callback = mbp_dmi_match,
187 .ident = "MacBookPro 1,1",
188 .matches = {
189 DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
190 DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro1,1"),
191 },
192 .driver_data = (void *)&intel_chipset_data,
193 },
194 {
195 .callback = mbp_dmi_match,
196 .ident = "MacBookPro 1,2",
197 .matches = {
198 DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
199 DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro1,2"),
200 },
201 .driver_data = (void *)&intel_chipset_data,
202 },
203 {
204 .callback = mbp_dmi_match,
205 .ident = "MacBookPro 2,1",
206 .matches = {
207 DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
208 DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro2,1"),
209 },
210 .driver_data = (void *)&intel_chipset_data,
211 },
212 {
213 .callback = mbp_dmi_match,
214 .ident = "MacBookPro 2,2",
215 .matches = {
216 DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
217 DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro2,2"),
218 },
219 .driver_data = (void *)&intel_chipset_data,
220 },
221 {
222 .callback = mbp_dmi_match,
187 .ident = "MacBookPro 3,1", 223 .ident = "MacBookPro 3,1",
188 .matches = { 224 .matches = {
189 DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."), 225 DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
@@ -238,6 +274,15 @@ static const struct dmi_system_id __initdata mbp_device_table[] = {
238 }, 274 },
239 { 275 {
240 .callback = mbp_dmi_match, 276 .callback = mbp_dmi_match,
277 .ident = "MacBook 6,1",
278 .matches = {
279 DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
280 DMI_MATCH(DMI_PRODUCT_NAME, "MacBook6,1"),
281 },
282 .driver_data = (void *)&nvidia_chipset_data,
283 },
284 {
285 .callback = mbp_dmi_match,
241 .ident = "MacBookAir 2,1", 286 .ident = "MacBookAir 2,1",
242 .matches = { 287 .matches = {
243 DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."), 288 DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
diff --git a/drivers/video/backlight/pcf50633-backlight.c b/drivers/video/backlight/pcf50633-backlight.c
new file mode 100644
index 000000000000..3c424f7efdcc
--- /dev/null
+++ b/drivers/video/backlight/pcf50633-backlight.c
@@ -0,0 +1,190 @@
1/*
2 * Copyright (C) 2009-2010, Lars-Peter Clausen <lars@metafoo.de>
3 * PCF50633 backlight device driver
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License as published by the
7 * Free Software Foundation; either version 2 of the License, or (at your
8 * option) any later version.
9 *
10 * You should have received a copy of the GNU General Public License along
11 * with this program; if not, write to the Free Software Foundation, Inc.,
12 * 675 Mass Ave, Cambridge, MA 02139, USA.
13 *
14 */
15
16#include <linux/kernel.h>
17#include <linux/module.h>
18#include <linux/slab.h>
19#include <linux/platform_device.h>
20
21#include <linux/backlight.h>
22#include <linux/fb.h>
23
24#include <linux/mfd/pcf50633/core.h>
25#include <linux/mfd/pcf50633/backlight.h>
26
27struct pcf50633_bl {
28 struct pcf50633 *pcf;
29 struct backlight_device *bl;
30
31 unsigned int brightness;
32 unsigned int brightness_limit;
33};
34
35/*
36 * pcf50633_bl_set_brightness_limit
37 *
38 * Update the brightness limit for the pc50633 backlight. The actual brightness
39 * will not go above the limit. This is useful to limit power drain for example
40 * on low battery.
41 *
42 * @dev: Pointer to a pcf50633 device
43 * @limit: The brightness limit. Valid values are 0-63
44 */
45int pcf50633_bl_set_brightness_limit(struct pcf50633 *pcf, unsigned int limit)
46{
47 struct pcf50633_bl *pcf_bl = platform_get_drvdata(pcf->bl_pdev);
48
49 if (!pcf_bl)
50 return -ENODEV;
51
52 pcf_bl->brightness_limit = limit & 0x3f;
53 backlight_update_status(pcf_bl->bl);
54
55 return 0;
56}
57
58static int pcf50633_bl_update_status(struct backlight_device *bl)
59{
60 struct pcf50633_bl *pcf_bl = bl_get_data(bl);
61 unsigned int new_brightness;
62
63
64 if (bl->props.state & (BL_CORE_SUSPENDED | BL_CORE_FBBLANK) ||
65 bl->props.power != FB_BLANK_UNBLANK)
66 new_brightness = 0;
67 else if (bl->props.brightness < pcf_bl->brightness_limit)
68 new_brightness = bl->props.brightness;
69 else
70 new_brightness = pcf_bl->brightness_limit;
71
72
73 if (pcf_bl->brightness == new_brightness)
74 return 0;
75
76 if (new_brightness) {
77 pcf50633_reg_write(pcf_bl->pcf, PCF50633_REG_LEDOUT,
78 new_brightness);
79 if (!pcf_bl->brightness)
80 pcf50633_reg_write(pcf_bl->pcf, PCF50633_REG_LEDENA, 1);
81 } else {
82 pcf50633_reg_write(pcf_bl->pcf, PCF50633_REG_LEDENA, 0);
83 }
84
85 pcf_bl->brightness = new_brightness;
86
87 return 0;
88}
89
90static int pcf50633_bl_get_brightness(struct backlight_device *bl)
91{
92 struct pcf50633_bl *pcf_bl = bl_get_data(bl);
93 return pcf_bl->brightness;
94}
95
96static const struct backlight_ops pcf50633_bl_ops = {
97 .get_brightness = pcf50633_bl_get_brightness,
98 .update_status = pcf50633_bl_update_status,
99 .options = BL_CORE_SUSPENDRESUME,
100};
101
102static int __devinit pcf50633_bl_probe(struct platform_device *pdev)
103{
104 int ret;
105 struct pcf50633_bl *pcf_bl;
106 struct device *parent = pdev->dev.parent;
107 struct pcf50633_platform_data *pcf50633_data = parent->platform_data;
108 struct pcf50633_bl_platform_data *pdata = pcf50633_data->backlight_data;
109 struct backlight_properties bl_props;
110
111 pcf_bl = kzalloc(sizeof(*pcf_bl), GFP_KERNEL);
112 if (!pcf_bl)
113 return -ENOMEM;
114
115 bl_props.max_brightness = 0x3f;
116 bl_props.power = FB_BLANK_UNBLANK;
117
118 if (pdata) {
119 bl_props.brightness = pdata->default_brightness;
120 pcf_bl->brightness_limit = pdata->default_brightness_limit;
121 } else {
122 bl_props.brightness = 0x3f;
123 pcf_bl->brightness_limit = 0x3f;
124 }
125
126 pcf_bl->pcf = dev_to_pcf50633(pdev->dev.parent);
127
128 pcf_bl->bl = backlight_device_register(pdev->name, &pdev->dev, pcf_bl,
129 &pcf50633_bl_ops, &bl_props);
130
131 if (IS_ERR(pcf_bl->bl)) {
132 ret = PTR_ERR(pcf_bl->bl);
133 goto err_free;
134 }
135
136 platform_set_drvdata(pdev, pcf_bl);
137
138 pcf50633_reg_write(pcf_bl->pcf, PCF50633_REG_LEDDIM, pdata->ramp_time);
139
140 /* Should be different from bl_props.brightness, so we do not exit
141 * update_status early the first time it's called */
142 pcf_bl->brightness = pcf_bl->bl->props.brightness + 1;
143
144 backlight_update_status(pcf_bl->bl);
145
146 return 0;
147
148err_free:
149 kfree(pcf_bl);
150
151 return ret;
152}
153
154static int __devexit pcf50633_bl_remove(struct platform_device *pdev)
155{
156 struct pcf50633_bl *pcf_bl = platform_get_drvdata(pdev);
157
158 backlight_device_unregister(pcf_bl->bl);
159
160 platform_set_drvdata(pdev, NULL);
161
162 kfree(pcf_bl);
163
164 return 0;
165}
166
167static struct platform_driver pcf50633_bl_driver = {
168 .probe = pcf50633_bl_probe,
169 .remove = __devexit_p(pcf50633_bl_remove),
170 .driver = {
171 .name = "pcf50633-backlight",
172 },
173};
174
175static int __init pcf50633_bl_init(void)
176{
177 return platform_driver_register(&pcf50633_bl_driver);
178}
179module_init(pcf50633_bl_init);
180
181static void __exit pcf50633_bl_exit(void)
182{
183 platform_driver_unregister(&pcf50633_bl_driver);
184}
185module_exit(pcf50633_bl_exit);
186
187MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
188MODULE_DESCRIPTION("PCF50633 backlight driver");
189MODULE_LICENSE("GPL");
190MODULE_ALIAS("platform:pcf50633-backlight");
diff --git a/drivers/video/backlight/s6e63m0.c b/drivers/video/backlight/s6e63m0.c
new file mode 100644
index 000000000000..a3128c9cb7ad
--- /dev/null
+++ b/drivers/video/backlight/s6e63m0.c
@@ -0,0 +1,920 @@
1/*
2 * S6E63M0 AMOLED LCD panel driver.
3 *
4 * Author: InKi Dae <inki.dae@samsung.com>
5 *
6 * Derived from drivers/video/omap/lcd-apollon.c
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2 of the License, or (at your
11 * option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License along
19 * with this program; if not, write to the Free Software Foundation, Inc.,
20 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
21 */
22
23#include <linux/wait.h>
24#include <linux/fb.h>
25#include <linux/delay.h>
26#include <linux/gpio.h>
27#include <linux/spi/spi.h>
28#include <linux/irq.h>
29#include <linux/interrupt.h>
30#include <linux/kernel.h>
31#include <linux/lcd.h>
32#include <linux/backlight.h>
33
34#include "s6e63m0_gamma.h"
35
36#define SLEEPMSEC 0x1000
37#define ENDDEF 0x2000
38#define DEFMASK 0xFF00
39#define COMMAND_ONLY 0xFE
40#define DATA_ONLY 0xFF
41
42#define MIN_BRIGHTNESS 0
43#define MAX_BRIGHTNESS 10
44
45#define POWER_IS_ON(pwr) ((pwr) <= FB_BLANK_NORMAL)
46
47struct s6e63m0 {
48 struct device *dev;
49 struct spi_device *spi;
50 unsigned int power;
51 unsigned int current_brightness;
52 unsigned int gamma_mode;
53 unsigned int gamma_table_count;
54 struct lcd_device *ld;
55 struct backlight_device *bd;
56 struct lcd_platform_data *lcd_pd;
57};
58
59static const unsigned short SEQ_PANEL_CONDITION_SET[] = {
60 0xF8, 0x01,
61 DATA_ONLY, 0x27,
62 DATA_ONLY, 0x27,
63 DATA_ONLY, 0x07,
64 DATA_ONLY, 0x07,
65 DATA_ONLY, 0x54,
66 DATA_ONLY, 0x9f,
67 DATA_ONLY, 0x63,
68 DATA_ONLY, 0x86,
69 DATA_ONLY, 0x1a,
70 DATA_ONLY, 0x33,
71 DATA_ONLY, 0x0d,
72 DATA_ONLY, 0x00,
73 DATA_ONLY, 0x00,
74
75 ENDDEF, 0x0000
76};
77
78static const unsigned short SEQ_DISPLAY_CONDITION_SET[] = {
79 0xf2, 0x02,
80 DATA_ONLY, 0x03,
81 DATA_ONLY, 0x1c,
82 DATA_ONLY, 0x10,
83 DATA_ONLY, 0x10,
84
85 0xf7, 0x03,
86 DATA_ONLY, 0x00,
87 DATA_ONLY, 0x00,
88
89 ENDDEF, 0x0000
90};
91
92static const unsigned short SEQ_GAMMA_SETTING[] = {
93 0xfa, 0x00,
94 DATA_ONLY, 0x18,
95 DATA_ONLY, 0x08,
96 DATA_ONLY, 0x24,
97 DATA_ONLY, 0x64,
98 DATA_ONLY, 0x56,
99 DATA_ONLY, 0x33,
100 DATA_ONLY, 0xb6,
101 DATA_ONLY, 0xba,
102 DATA_ONLY, 0xa8,
103 DATA_ONLY, 0xac,
104 DATA_ONLY, 0xb1,
105 DATA_ONLY, 0x9d,
106 DATA_ONLY, 0xc1,
107 DATA_ONLY, 0xc1,
108 DATA_ONLY, 0xb7,
109 DATA_ONLY, 0x00,
110 DATA_ONLY, 0x9c,
111 DATA_ONLY, 0x00,
112 DATA_ONLY, 0x9f,
113 DATA_ONLY, 0x00,
114 DATA_ONLY, 0xd6,
115
116 0xfa, 0x01,
117
118 ENDDEF, 0x0000
119};
120
121static const unsigned short SEQ_ETC_CONDITION_SET[] = {
122 0xf6, 0x00,
123 DATA_ONLY, 0x8c,
124 DATA_ONLY, 0x07,
125
126 0xb3, 0xc,
127
128 0xb5, 0x2c,
129 DATA_ONLY, 0x12,
130 DATA_ONLY, 0x0c,
131 DATA_ONLY, 0x0a,
132 DATA_ONLY, 0x10,
133 DATA_ONLY, 0x0e,
134 DATA_ONLY, 0x17,
135 DATA_ONLY, 0x13,
136 DATA_ONLY, 0x1f,
137 DATA_ONLY, 0x1a,
138 DATA_ONLY, 0x2a,
139 DATA_ONLY, 0x24,
140 DATA_ONLY, 0x1f,
141 DATA_ONLY, 0x1b,
142 DATA_ONLY, 0x1a,
143 DATA_ONLY, 0x17,
144
145 DATA_ONLY, 0x2b,
146 DATA_ONLY, 0x26,
147 DATA_ONLY, 0x22,
148 DATA_ONLY, 0x20,
149 DATA_ONLY, 0x3a,
150 DATA_ONLY, 0x34,
151 DATA_ONLY, 0x30,
152 DATA_ONLY, 0x2c,
153 DATA_ONLY, 0x29,
154 DATA_ONLY, 0x26,
155 DATA_ONLY, 0x25,
156 DATA_ONLY, 0x23,
157 DATA_ONLY, 0x21,
158 DATA_ONLY, 0x20,
159 DATA_ONLY, 0x1e,
160 DATA_ONLY, 0x1e,
161
162 0xb6, 0x00,
163 DATA_ONLY, 0x00,
164 DATA_ONLY, 0x11,
165 DATA_ONLY, 0x22,
166 DATA_ONLY, 0x33,
167 DATA_ONLY, 0x44,
168 DATA_ONLY, 0x44,
169 DATA_ONLY, 0x44,
170
171 DATA_ONLY, 0x55,
172 DATA_ONLY, 0x55,
173 DATA_ONLY, 0x66,
174 DATA_ONLY, 0x66,
175 DATA_ONLY, 0x66,
176 DATA_ONLY, 0x66,
177 DATA_ONLY, 0x66,
178 DATA_ONLY, 0x66,
179
180 0xb7, 0x2c,
181 DATA_ONLY, 0x12,
182 DATA_ONLY, 0x0c,
183 DATA_ONLY, 0x0a,
184 DATA_ONLY, 0x10,
185 DATA_ONLY, 0x0e,
186 DATA_ONLY, 0x17,
187 DATA_ONLY, 0x13,
188 DATA_ONLY, 0x1f,
189 DATA_ONLY, 0x1a,
190 DATA_ONLY, 0x2a,
191 DATA_ONLY, 0x24,
192 DATA_ONLY, 0x1f,
193 DATA_ONLY, 0x1b,
194 DATA_ONLY, 0x1a,
195 DATA_ONLY, 0x17,
196
197 DATA_ONLY, 0x2b,
198 DATA_ONLY, 0x26,
199 DATA_ONLY, 0x22,
200 DATA_ONLY, 0x20,
201 DATA_ONLY, 0x3a,
202 DATA_ONLY, 0x34,
203 DATA_ONLY, 0x30,
204 DATA_ONLY, 0x2c,
205 DATA_ONLY, 0x29,
206 DATA_ONLY, 0x26,
207 DATA_ONLY, 0x25,
208 DATA_ONLY, 0x23,
209 DATA_ONLY, 0x21,
210 DATA_ONLY, 0x20,
211 DATA_ONLY, 0x1e,
212 DATA_ONLY, 0x1e,
213
214 0xb8, 0x00,
215 DATA_ONLY, 0x00,
216 DATA_ONLY, 0x11,
217 DATA_ONLY, 0x22,
218 DATA_ONLY, 0x33,
219 DATA_ONLY, 0x44,
220 DATA_ONLY, 0x44,
221 DATA_ONLY, 0x44,
222
223 DATA_ONLY, 0x55,
224 DATA_ONLY, 0x55,
225 DATA_ONLY, 0x66,
226 DATA_ONLY, 0x66,
227 DATA_ONLY, 0x66,
228 DATA_ONLY, 0x66,
229 DATA_ONLY, 0x66,
230 DATA_ONLY, 0x66,
231
232 0xb9, 0x2c,
233 DATA_ONLY, 0x12,
234 DATA_ONLY, 0x0c,
235 DATA_ONLY, 0x0a,
236 DATA_ONLY, 0x10,
237 DATA_ONLY, 0x0e,
238 DATA_ONLY, 0x17,
239 DATA_ONLY, 0x13,
240 DATA_ONLY, 0x1f,
241 DATA_ONLY, 0x1a,
242 DATA_ONLY, 0x2a,
243 DATA_ONLY, 0x24,
244 DATA_ONLY, 0x1f,
245 DATA_ONLY, 0x1b,
246 DATA_ONLY, 0x1a,
247 DATA_ONLY, 0x17,
248
249 DATA_ONLY, 0x2b,
250 DATA_ONLY, 0x26,
251 DATA_ONLY, 0x22,
252 DATA_ONLY, 0x20,
253 DATA_ONLY, 0x3a,
254 DATA_ONLY, 0x34,
255 DATA_ONLY, 0x30,
256 DATA_ONLY, 0x2c,
257 DATA_ONLY, 0x29,
258 DATA_ONLY, 0x26,
259 DATA_ONLY, 0x25,
260 DATA_ONLY, 0x23,
261 DATA_ONLY, 0x21,
262 DATA_ONLY, 0x20,
263 DATA_ONLY, 0x1e,
264 DATA_ONLY, 0x1e,
265
266 0xba, 0x00,
267 DATA_ONLY, 0x00,
268 DATA_ONLY, 0x11,
269 DATA_ONLY, 0x22,
270 DATA_ONLY, 0x33,
271 DATA_ONLY, 0x44,
272 DATA_ONLY, 0x44,
273 DATA_ONLY, 0x44,
274
275 DATA_ONLY, 0x55,
276 DATA_ONLY, 0x55,
277 DATA_ONLY, 0x66,
278 DATA_ONLY, 0x66,
279 DATA_ONLY, 0x66,
280 DATA_ONLY, 0x66,
281 DATA_ONLY, 0x66,
282 DATA_ONLY, 0x66,
283
284 0xc1, 0x4d,
285 DATA_ONLY, 0x96,
286 DATA_ONLY, 0x1d,
287 DATA_ONLY, 0x00,
288 DATA_ONLY, 0x00,
289 DATA_ONLY, 0x01,
290 DATA_ONLY, 0xdf,
291 DATA_ONLY, 0x00,
292 DATA_ONLY, 0x00,
293 DATA_ONLY, 0x03,
294 DATA_ONLY, 0x1f,
295 DATA_ONLY, 0x00,
296 DATA_ONLY, 0x00,
297 DATA_ONLY, 0x00,
298 DATA_ONLY, 0x00,
299 DATA_ONLY, 0x00,
300 DATA_ONLY, 0x00,
301 DATA_ONLY, 0x00,
302 DATA_ONLY, 0x00,
303 DATA_ONLY, 0x03,
304 DATA_ONLY, 0x06,
305 DATA_ONLY, 0x09,
306 DATA_ONLY, 0x0d,
307 DATA_ONLY, 0x0f,
308 DATA_ONLY, 0x12,
309 DATA_ONLY, 0x15,
310 DATA_ONLY, 0x18,
311
312 0xb2, 0x10,
313 DATA_ONLY, 0x10,
314 DATA_ONLY, 0x0b,
315 DATA_ONLY, 0x05,
316
317 ENDDEF, 0x0000
318};
319
320static const unsigned short SEQ_ACL_ON[] = {
321 /* ACL on */
322 0xc0, 0x01,
323
324 ENDDEF, 0x0000
325};
326
327static const unsigned short SEQ_ACL_OFF[] = {
328 /* ACL off */
329 0xc0, 0x00,
330
331 ENDDEF, 0x0000
332};
333
334static const unsigned short SEQ_ELVSS_ON[] = {
335 /* ELVSS on */
336 0xb1, 0x0b,
337
338 ENDDEF, 0x0000
339};
340
341static const unsigned short SEQ_ELVSS_OFF[] = {
342 /* ELVSS off */
343 0xb1, 0x0a,
344
345 ENDDEF, 0x0000
346};
347
348static const unsigned short SEQ_STAND_BY_OFF[] = {
349 0x11, COMMAND_ONLY,
350
351 ENDDEF, 0x0000
352};
353
354static const unsigned short SEQ_STAND_BY_ON[] = {
355 0x10, COMMAND_ONLY,
356
357 ENDDEF, 0x0000
358};
359
360static const unsigned short SEQ_DISPLAY_ON[] = {
361 0x29, COMMAND_ONLY,
362
363 ENDDEF, 0x0000
364};
365
366
367static int s6e63m0_spi_write_byte(struct s6e63m0 *lcd, int addr, int data)
368{
369 u16 buf[1];
370 struct spi_message msg;
371
372 struct spi_transfer xfer = {
373 .len = 2,
374 .tx_buf = buf,
375 };
376
377 buf[0] = (addr << 8) | data;
378
379 spi_message_init(&msg);
380 spi_message_add_tail(&xfer, &msg);
381
382 return spi_sync(lcd->spi, &msg);
383}
384
385static int s6e63m0_spi_write(struct s6e63m0 *lcd, unsigned char address,
386 unsigned char command)
387{
388 int ret = 0;
389
390 if (address != DATA_ONLY)
391 ret = s6e63m0_spi_write_byte(lcd, 0x0, address);
392 if (command != COMMAND_ONLY)
393 ret = s6e63m0_spi_write_byte(lcd, 0x1, command);
394
395 return ret;
396}
397
398static int s6e63m0_panel_send_sequence(struct s6e63m0 *lcd,
399 const unsigned short *wbuf)
400{
401 int ret = 0, i = 0;
402
403 while ((wbuf[i] & DEFMASK) != ENDDEF) {
404 if ((wbuf[i] & DEFMASK) != SLEEPMSEC) {
405 ret = s6e63m0_spi_write(lcd, wbuf[i], wbuf[i+1]);
406 if (ret)
407 break;
408 } else
409 udelay(wbuf[i+1]*1000);
410 i += 2;
411 }
412
413 return ret;
414}
415
416static int _s6e63m0_gamma_ctl(struct s6e63m0 *lcd, const unsigned int *gamma)
417{
418 unsigned int i = 0;
419 int ret = 0;
420
421 /* disable gamma table updating. */
422 ret = s6e63m0_spi_write(lcd, 0xfa, 0x00);
423 if (ret) {
424 dev_err(lcd->dev, "failed to disable gamma table updating.\n");
425 goto gamma_err;
426 }
427
428 for (i = 0 ; i < GAMMA_TABLE_COUNT; i++) {
429 ret = s6e63m0_spi_write(lcd, DATA_ONLY, gamma[i]);
430 if (ret) {
431 dev_err(lcd->dev, "failed to set gamma table.\n");
432 goto gamma_err;
433 }
434 }
435
436 /* update gamma table. */
437 ret = s6e63m0_spi_write(lcd, 0xfa, 0x01);
438 if (ret)
439 dev_err(lcd->dev, "failed to update gamma table.\n");
440
441gamma_err:
442 return ret;
443}
444
445static int s6e63m0_gamma_ctl(struct s6e63m0 *lcd, int gamma)
446{
447 int ret = 0;
448
449 ret = _s6e63m0_gamma_ctl(lcd, gamma_table.gamma_22_table[gamma]);
450
451 return ret;
452}
453
454
455static int s6e63m0_ldi_init(struct s6e63m0 *lcd)
456{
457 int ret, i;
458 const unsigned short *init_seq[] = {
459 SEQ_PANEL_CONDITION_SET,
460 SEQ_DISPLAY_CONDITION_SET,
461 SEQ_GAMMA_SETTING,
462 SEQ_ETC_CONDITION_SET,
463 SEQ_ACL_ON,
464 SEQ_ELVSS_ON,
465 };
466
467 for (i = 0; i < ARRAY_SIZE(init_seq); i++) {
468 ret = s6e63m0_panel_send_sequence(lcd, init_seq[i]);
469 if (ret)
470 break;
471 }
472
473 return ret;
474}
475
476static int s6e63m0_ldi_enable(struct s6e63m0 *lcd)
477{
478 int ret = 0, i;
479 const unsigned short *enable_seq[] = {
480 SEQ_STAND_BY_OFF,
481 SEQ_DISPLAY_ON,
482 };
483
484 for (i = 0; i < ARRAY_SIZE(enable_seq); i++) {
485 ret = s6e63m0_panel_send_sequence(lcd, enable_seq[i]);
486 if (ret)
487 break;
488 }
489
490 return ret;
491}
492
493static int s6e63m0_ldi_disable(struct s6e63m0 *lcd)
494{
495 int ret;
496
497 ret = s6e63m0_panel_send_sequence(lcd, SEQ_STAND_BY_ON);
498
499 return ret;
500}
501
502static int s6e63m0_power_on(struct s6e63m0 *lcd)
503{
504 int ret = 0;
505 struct lcd_platform_data *pd = NULL;
506 struct backlight_device *bd = NULL;
507
508 pd = lcd->lcd_pd;
509 if (!pd) {
510 dev_err(lcd->dev, "platform data is NULL.\n");
511 return -EFAULT;
512 }
513
514 bd = lcd->bd;
515 if (!bd) {
516 dev_err(lcd->dev, "backlight device is NULL.\n");
517 return -EFAULT;
518 }
519
520 if (!pd->power_on) {
521 dev_err(lcd->dev, "power_on is NULL.\n");
522 return -EFAULT;
523 } else {
524 pd->power_on(lcd->ld, 1);
525 mdelay(pd->power_on_delay);
526 }
527
528 if (!pd->reset) {
529 dev_err(lcd->dev, "reset is NULL.\n");
530 return -EFAULT;
531 } else {
532 pd->reset(lcd->ld);
533 mdelay(pd->reset_delay);
534 }
535
536 ret = s6e63m0_ldi_init(lcd);
537 if (ret) {
538 dev_err(lcd->dev, "failed to initialize ldi.\n");
539 return ret;
540 }
541
542 ret = s6e63m0_ldi_enable(lcd);
543 if (ret) {
544 dev_err(lcd->dev, "failed to enable ldi.\n");
545 return ret;
546 }
547
548 /* set brightness to current value after power on or resume. */
549 ret = s6e63m0_gamma_ctl(lcd, bd->props.brightness);
550 if (ret) {
551 dev_err(lcd->dev, "lcd gamma setting failed.\n");
552 return ret;
553 }
554
555 return 0;
556}
557
558static int s6e63m0_power_off(struct s6e63m0 *lcd)
559{
560 int ret = 0;
561 struct lcd_platform_data *pd = NULL;
562
563 pd = lcd->lcd_pd;
564 if (!pd) {
565 dev_err(lcd->dev, "platform data is NULL.\n");
566 return -EFAULT;
567 }
568
569 ret = s6e63m0_ldi_disable(lcd);
570 if (ret) {
571 dev_err(lcd->dev, "lcd setting failed.\n");
572 return -EIO;
573 }
574
575 mdelay(pd->power_off_delay);
576
577 if (!pd->power_on) {
578 dev_err(lcd->dev, "power_on is NULL.\n");
579 return -EFAULT;
580 } else
581 pd->power_on(lcd->ld, 0);
582
583 return 0;
584}
585
586static int s6e63m0_power(struct s6e63m0 *lcd, int power)
587{
588 int ret = 0;
589
590 if (POWER_IS_ON(power) && !POWER_IS_ON(lcd->power))
591 ret = s6e63m0_power_on(lcd);
592 else if (!POWER_IS_ON(power) && POWER_IS_ON(lcd->power))
593 ret = s6e63m0_power_off(lcd);
594
595 if (!ret)
596 lcd->power = power;
597
598 return ret;
599}
600
601static int s6e63m0_set_power(struct lcd_device *ld, int power)
602{
603 struct s6e63m0 *lcd = lcd_get_data(ld);
604
605 if (power != FB_BLANK_UNBLANK && power != FB_BLANK_POWERDOWN &&
606 power != FB_BLANK_NORMAL) {
607 dev_err(lcd->dev, "power value should be 0, 1 or 4.\n");
608 return -EINVAL;
609 }
610
611 return s6e63m0_power(lcd, power);
612}
613
614static int s6e63m0_get_power(struct lcd_device *ld)
615{
616 struct s6e63m0 *lcd = lcd_get_data(ld);
617
618 return lcd->power;
619}
620
621static int s6e63m0_get_brightness(struct backlight_device *bd)
622{
623 return bd->props.brightness;
624}
625
626static int s6e63m0_set_brightness(struct backlight_device *bd)
627{
628 int ret = 0, brightness = bd->props.brightness;
629 struct s6e63m0 *lcd = bl_get_data(bd);
630
631 if (brightness < MIN_BRIGHTNESS ||
632 brightness > bd->props.max_brightness) {
633 dev_err(&bd->dev, "lcd brightness should be %d to %d.\n",
634 MIN_BRIGHTNESS, MAX_BRIGHTNESS);
635 return -EINVAL;
636 }
637
638 ret = s6e63m0_gamma_ctl(lcd, bd->props.brightness);
639 if (ret) {
640 dev_err(&bd->dev, "lcd brightness setting failed.\n");
641 return -EIO;
642 }
643
644 return ret;
645}
646
647static struct lcd_ops s6e63m0_lcd_ops = {
648 .set_power = s6e63m0_set_power,
649 .get_power = s6e63m0_get_power,
650};
651
652static const struct backlight_ops s6e63m0_backlight_ops = {
653 .get_brightness = s6e63m0_get_brightness,
654 .update_status = s6e63m0_set_brightness,
655};
656
657static ssize_t s6e63m0_sysfs_show_gamma_mode(struct device *dev,
658 struct device_attribute *attr, char *buf)
659{
660 struct s6e63m0 *lcd = dev_get_drvdata(dev);
661 char temp[10];
662
663 switch (lcd->gamma_mode) {
664 case 0:
665 sprintf(temp, "2.2 mode\n");
666 strcat(buf, temp);
667 break;
668 case 1:
669 sprintf(temp, "1.9 mode\n");
670 strcat(buf, temp);
671 break;
672 case 2:
673 sprintf(temp, "1.7 mode\n");
674 strcat(buf, temp);
675 break;
676 default:
677 dev_info(dev, "gamma mode could be 0:2.2, 1:1.9 or 2:1.7)n");
678 break;
679 }
680
681 return strlen(buf);
682}
683
684static ssize_t s6e63m0_sysfs_store_gamma_mode(struct device *dev,
685 struct device_attribute *attr,
686 const char *buf, size_t len)
687{
688 struct s6e63m0 *lcd = dev_get_drvdata(dev);
689 struct backlight_device *bd = NULL;
690 int brightness, rc;
691
692 rc = strict_strtoul(buf, 0, (unsigned long *)&lcd->gamma_mode);
693 if (rc < 0)
694 return rc;
695
696 bd = lcd->bd;
697
698 brightness = bd->props.brightness;
699
700 switch (lcd->gamma_mode) {
701 case 0:
702 _s6e63m0_gamma_ctl(lcd, gamma_table.gamma_22_table[brightness]);
703 break;
704 case 1:
705 _s6e63m0_gamma_ctl(lcd, gamma_table.gamma_19_table[brightness]);
706 break;
707 case 2:
708 _s6e63m0_gamma_ctl(lcd, gamma_table.gamma_17_table[brightness]);
709 break;
710 default:
711 dev_info(dev, "gamma mode could be 0:2.2, 1:1.9 or 2:1.7\n");
712 _s6e63m0_gamma_ctl(lcd, gamma_table.gamma_22_table[brightness]);
713 break;
714 }
715 return len;
716}
717
718static DEVICE_ATTR(gamma_mode, 0644,
719 s6e63m0_sysfs_show_gamma_mode, s6e63m0_sysfs_store_gamma_mode);
720
721static ssize_t s6e63m0_sysfs_show_gamma_table(struct device *dev,
722 struct device_attribute *attr, char *buf)
723{
724 struct s6e63m0 *lcd = dev_get_drvdata(dev);
725 char temp[3];
726
727 sprintf(temp, "%d\n", lcd->gamma_table_count);
728 strcpy(buf, temp);
729
730 return strlen(buf);
731}
732static DEVICE_ATTR(gamma_table, 0644,
733 s6e63m0_sysfs_show_gamma_table, NULL);
734
735static int __init s6e63m0_probe(struct spi_device *spi)
736{
737 int ret = 0;
738 struct s6e63m0 *lcd = NULL;
739 struct lcd_device *ld = NULL;
740 struct backlight_device *bd = NULL;
741
742 lcd = kzalloc(sizeof(struct s6e63m0), GFP_KERNEL);
743 if (!lcd)
744 return -ENOMEM;
745
746 /* s6e63m0 lcd panel uses 3-wire 9bits SPI Mode. */
747 spi->bits_per_word = 9;
748
749 ret = spi_setup(spi);
750 if (ret < 0) {
751 dev_err(&spi->dev, "spi setup failed.\n");
752 goto out_free_lcd;
753 }
754
755 lcd->spi = spi;
756 lcd->dev = &spi->dev;
757
758 lcd->lcd_pd = (struct lcd_platform_data *)spi->dev.platform_data;
759 if (!lcd->lcd_pd) {
760 dev_err(&spi->dev, "platform data is NULL.\n");
761 goto out_free_lcd;
762 }
763
764 ld = lcd_device_register("s6e63m0", &spi->dev, lcd, &s6e63m0_lcd_ops);
765 if (IS_ERR(ld)) {
766 ret = PTR_ERR(ld);
767 goto out_free_lcd;
768 }
769
770 lcd->ld = ld;
771
772 bd = backlight_device_register("s6e63m0bl-bl", &spi->dev, lcd,
773 &s6e63m0_backlight_ops, NULL);
774 if (IS_ERR(bd)) {
775 ret = PTR_ERR(bd);
776 goto out_lcd_unregister;
777 }
778
779 bd->props.max_brightness = MAX_BRIGHTNESS;
780 bd->props.brightness = MAX_BRIGHTNESS;
781 lcd->bd = bd;
782
783 /*
784 * it gets gamma table count available so it gets user
785 * know that.
786 */
787 lcd->gamma_table_count =
788 sizeof(gamma_table) / (MAX_GAMMA_LEVEL * sizeof(int));
789
790 ret = device_create_file(&(spi->dev), &dev_attr_gamma_mode);
791 if (ret < 0)
792 dev_err(&(spi->dev), "failed to add sysfs entries\n");
793
794 ret = device_create_file(&(spi->dev), &dev_attr_gamma_table);
795 if (ret < 0)
796 dev_err(&(spi->dev), "failed to add sysfs entries\n");
797
798 /*
799 * if lcd panel was on from bootloader like u-boot then
800 * do not lcd on.
801 */
802 if (!lcd->lcd_pd->lcd_enabled) {
803 /*
804 * if lcd panel was off from bootloader then
805 * current lcd status is powerdown and then
806 * it enables lcd panel.
807 */
808 lcd->power = FB_BLANK_POWERDOWN;
809
810 s6e63m0_power(lcd, FB_BLANK_UNBLANK);
811 } else
812 lcd->power = FB_BLANK_UNBLANK;
813
814 dev_set_drvdata(&spi->dev, lcd);
815
816 dev_info(&spi->dev, "s6e63m0 panel driver has been probed.\n");
817
818 return 0;
819
820out_lcd_unregister:
821 lcd_device_unregister(ld);
822out_free_lcd:
823 kfree(lcd);
824 return ret;
825}
826
827static int __devexit s6e63m0_remove(struct spi_device *spi)
828{
829 struct s6e63m0 *lcd = dev_get_drvdata(&spi->dev);
830
831 s6e63m0_power(lcd, FB_BLANK_POWERDOWN);
832 lcd_device_unregister(lcd->ld);
833 kfree(lcd);
834
835 return 0;
836}
837
838#if defined(CONFIG_PM)
839unsigned int before_power;
840
841static int s6e63m0_suspend(struct spi_device *spi, pm_message_t mesg)
842{
843 int ret = 0;
844 struct s6e63m0 *lcd = dev_get_drvdata(&spi->dev);
845
846 dev_dbg(&spi->dev, "lcd->power = %d\n", lcd->power);
847
848 before_power = lcd->power;
849
850 /*
851 * when lcd panel is suspend, lcd panel becomes off
852 * regardless of status.
853 */
854 ret = s6e63m0_power(lcd, FB_BLANK_POWERDOWN);
855
856 return ret;
857}
858
859static int s6e63m0_resume(struct spi_device *spi)
860{
861 int ret = 0;
862 struct s6e63m0 *lcd = dev_get_drvdata(&spi->dev);
863
864 /*
865 * after suspended, if lcd panel status is FB_BLANK_UNBLANK
866 * (at that time, before_power is FB_BLANK_UNBLANK) then
867 * it changes that status to FB_BLANK_POWERDOWN to get lcd on.
868 */
869 if (before_power == FB_BLANK_UNBLANK)
870 lcd->power = FB_BLANK_POWERDOWN;
871
872 dev_dbg(&spi->dev, "before_power = %d\n", before_power);
873
874 ret = s6e63m0_power(lcd, before_power);
875
876 return ret;
877}
878#else
879#define s6e63m0_suspend NULL
880#define s6e63m0_resume NULL
881#endif
882
883/* Power down all displays on reboot, poweroff or halt. */
884static void s6e63m0_shutdown(struct spi_device *spi)
885{
886 struct s6e63m0 *lcd = dev_get_drvdata(&spi->dev);
887
888 s6e63m0_power(lcd, FB_BLANK_POWERDOWN);
889}
890
891static struct spi_driver s6e63m0_driver = {
892 .driver = {
893 .name = "s6e63m0",
894 .bus = &spi_bus_type,
895 .owner = THIS_MODULE,
896 },
897 .probe = s6e63m0_probe,
898 .remove = __devexit_p(s6e63m0_remove),
899 .shutdown = s6e63m0_shutdown,
900 .suspend = s6e63m0_suspend,
901 .resume = s6e63m0_resume,
902};
903
904static int __init s6e63m0_init(void)
905{
906 return spi_register_driver(&s6e63m0_driver);
907}
908
909static void __exit s6e63m0_exit(void)
910{
911 spi_unregister_driver(&s6e63m0_driver);
912}
913
914module_init(s6e63m0_init);
915module_exit(s6e63m0_exit);
916
917MODULE_AUTHOR("InKi Dae <inki.dae@samsung.com>");
918MODULE_DESCRIPTION("S6E63M0 LCD Driver");
919MODULE_LICENSE("GPL");
920
diff --git a/drivers/video/backlight/s6e63m0_gamma.h b/drivers/video/backlight/s6e63m0_gamma.h
new file mode 100644
index 000000000000..2c44bdb0696b
--- /dev/null
+++ b/drivers/video/backlight/s6e63m0_gamma.h
@@ -0,0 +1,266 @@
1/* linux/drivers/video/samsung/s6e63m0_brightness.h
2 *
3 * Gamma level definitions.
4 *
5 * Copyright (c) 2009 Samsung Electronics
6 * InKi Dae <inki.dae@samsung.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11*/
12
13#ifndef _S6E63M0_BRIGHTNESS_H
14#define _S6E63M0_BRIGHTNESS_H
15
16#define MAX_GAMMA_LEVEL 11
17#define GAMMA_TABLE_COUNT 21
18
19/* gamma value: 2.2 */
20static const unsigned int s6e63m0_22_300[] = {
21 0x18, 0x08, 0x24, 0x5f, 0x50, 0x2d, 0xB6,
22 0xB9, 0xA7, 0xAd, 0xB1, 0x9f, 0xbe, 0xC0,
23 0xB5, 0x00, 0xa0, 0x00, 0xa4, 0x00, 0xdb
24};
25
26static const unsigned int s6e63m0_22_280[] = {
27 0x18, 0x08, 0x24, 0x64, 0x56, 0x33, 0xB6,
28 0xBA, 0xA8, 0xAC, 0xB1, 0x9D, 0xC1, 0xC1,
29 0xB7, 0x00, 0x9C, 0x00, 0x9F, 0x00, 0xD6
30};
31
32static const unsigned int s6e63m0_22_260[] = {
33 0x18, 0x08, 0x24, 0x66, 0x58, 0x34, 0xB6,
34 0xBA, 0xA7, 0xAF, 0xB3, 0xA0, 0xC1, 0xC2,
35 0xB7, 0x00, 0x97, 0x00, 0x9A, 0x00, 0xD1
36
37};
38
39static const unsigned int s6e63m0_22_240[] = {
40 0x18, 0x08, 0x24, 0x62, 0x54, 0x30, 0xB9,
41 0xBB, 0xA9, 0xB0, 0xB3, 0xA1, 0xC1, 0xC3,
42 0xB7, 0x00, 0x91, 0x00, 0x95, 0x00, 0xDA
43
44};
45static const unsigned int s6e63m0_22_220[] = {
46 0x18, 0x08, 0x24, 0x63, 0x53, 0x31, 0xB8,
47 0xBC, 0xA9, 0xB0, 0xB5, 0xA2, 0xC4, 0xC4,
48 0xB8, 0x00, 0x8B, 0x00, 0x8E, 0x00, 0xC2
49};
50
51static const unsigned int s6e63m0_22_200[] = {
52 0x18, 0x08, 0x24, 0x66, 0x55, 0x34, 0xBA,
53 0xBD, 0xAB, 0xB1, 0xB5, 0xA3, 0xC5, 0xC6,
54 0xB9, 0x00, 0x85, 0x00, 0x88, 0x00, 0xBA
55};
56
57static const unsigned int s6e63m0_22_170[] = {
58 0x18, 0x08, 0x24, 0x69, 0x54, 0x37, 0xBB,
59 0xBE, 0xAC, 0xB4, 0xB7, 0xA6, 0xC7, 0xC8,
60 0xBC, 0x00, 0x7B, 0x00, 0x7E, 0x00, 0xAB
61};
62
63static const unsigned int s6e63m0_22_140[] = {
64 0x18, 0x08, 0x24, 0x6C, 0x54, 0x3A, 0xBC,
65 0xBF, 0xAC, 0xB7, 0xBB, 0xA9, 0xC9, 0xC9,
66 0xBE, 0x00, 0x71, 0x00, 0x73, 0x00, 0x9E
67};
68
69static const unsigned int s6e63m0_22_110[] = {
70 0x18, 0x08, 0x24, 0x70, 0x51, 0x3E, 0xBF,
71 0xC1, 0xAF, 0xB9, 0xBC, 0xAB, 0xCC, 0xCC,
72 0xC2, 0x00, 0x65, 0x00, 0x67, 0x00, 0x8D
73};
74
75static const unsigned int s6e63m0_22_90[] = {
76 0x18, 0x08, 0x24, 0x73, 0x4A, 0x3D, 0xC0,
77 0xC2, 0xB1, 0xBB, 0xBE, 0xAC, 0xCE, 0xCF,
78 0xC5, 0x00, 0x5D, 0x00, 0x5E, 0x00, 0x82
79};
80
81static const unsigned int s6e63m0_22_30[] = {
82 0x18, 0x08, 0x24, 0x78, 0xEC, 0x3D, 0xC8,
83 0xC2, 0xB6, 0xC4, 0xC7, 0xB6, 0xD5, 0xD7,
84 0xCC, 0x00, 0x39, 0x00, 0x36, 0x00, 0x51
85};
86
87/* gamma value: 1.9 */
88static const unsigned int s6e63m0_19_300[] = {
89 0x18, 0x08, 0x24, 0x61, 0x5F, 0x39, 0xBA,
90 0xBD, 0xAD, 0xB1, 0xB6, 0xA5, 0xC4, 0xC5,
91 0xBC, 0x00, 0xA0, 0x00, 0xA4, 0x00, 0xDB
92};
93
94static const unsigned int s6e63m0_19_280[] = {
95 0x18, 0x08, 0x24, 0x61, 0x60, 0x39, 0xBB,
96 0xBE, 0xAD, 0xB2, 0xB6, 0xA6, 0xC5, 0xC7,
97 0xBD, 0x00, 0x9B, 0x00, 0x9E, 0x00, 0xD5
98};
99
100static const unsigned int s6e63m0_19_260[] = {
101 0x18, 0x08, 0x24, 0x63, 0x61, 0x3B, 0xBA,
102 0xBE, 0xAC, 0xB3, 0xB8, 0xA7, 0xC6, 0xC8,
103 0xBD, 0x00, 0x96, 0x00, 0x98, 0x00, 0xCF
104};
105
106static const unsigned int s6e63m0_19_240[] = {
107 0x18, 0x08, 0x24, 0x67, 0x64, 0x3F, 0xBB,
108 0xBE, 0xAD, 0xB3, 0xB9, 0xA7, 0xC8, 0xC9,
109 0xBE, 0x00, 0x90, 0x00, 0x92, 0x00, 0xC8
110};
111
112static const unsigned int s6e63m0_19_220[] = {
113 0x18, 0x08, 0x24, 0x68, 0x64, 0x40, 0xBC,
114 0xBF, 0xAF, 0xB4, 0xBA, 0xA9, 0xC8, 0xCA,
115 0xBE, 0x00, 0x8B, 0x00, 0x8C, 0x00, 0xC0
116};
117
118static const unsigned int s6e63m0_19_200[] = {
119 0x18, 0x08, 0x24, 0x68, 0x64, 0x3F, 0xBE,
120 0xC0, 0xB0, 0xB6, 0xBB, 0xAB, 0xC8, 0xCB,
121 0xBF, 0x00, 0x85, 0x00, 0x86, 0x00, 0xB8
122};
123
124static const unsigned int s6e63m0_19_170[] = {
125 0x18, 0x08, 0x24, 0x69, 0x64, 0x40, 0xBF,
126 0xC1, 0xB0, 0xB9, 0xBE, 0xAD, 0xCB, 0xCD,
127 0xC2, 0x00, 0x7A, 0x00, 0x7B, 0x00, 0xAA
128};
129
130static const unsigned int s6e63m0_19_140[] = {
131 0x18, 0x08, 0x24, 0x6E, 0x65, 0x45, 0xC0,
132 0xC3, 0xB2, 0xBA, 0xBE, 0xAE, 0xCD, 0xD0,
133 0xC4, 0x00, 0x70, 0x00, 0x70, 0x00, 0x9C
134};
135
136static const unsigned int s6e63m0_19_110[] = {
137 0x18, 0x08, 0x24, 0x6F, 0x65, 0x46, 0xC2,
138 0xC4, 0xB3, 0xBF, 0xC2, 0xB2, 0xCF, 0xD1,
139 0xC6, 0x00, 0x64, 0x00, 0x64, 0x00, 0x8D
140};
141
142static const unsigned int s6e63m0_19_90[] = {
143 0x18, 0x08, 0x24, 0x74, 0x60, 0x4A, 0xC3,
144 0xC6, 0xB5, 0xBF, 0xC3, 0xB2, 0xD2, 0xD3,
145 0xC8, 0x00, 0x5B, 0x00, 0x5B, 0x00, 0x81
146};
147
148static const unsigned int s6e63m0_19_30[] = {
149 0x18, 0x08, 0x24, 0x84, 0x45, 0x4F, 0xCA,
150 0xCB, 0xBC, 0xC9, 0xCB, 0xBC, 0xDA, 0xDA,
151 0xD0, 0x00, 0x35, 0x00, 0x34, 0x00, 0x4E
152};
153
154/* gamma value: 1.7 */
155static const unsigned int s6e63m0_17_300[] = {
156 0x18, 0x08, 0x24, 0x70, 0x70, 0x4F, 0xBF,
157 0xC2, 0xB2, 0xB8, 0xBC, 0xAC, 0xCB, 0xCD,
158 0xC3, 0x00, 0xA0, 0x00, 0xA4, 0x00, 0xDB
159};
160
161static const unsigned int s6e63m0_17_280[] = {
162 0x18, 0x08, 0x24, 0x71, 0x71, 0x50, 0xBF,
163 0xC2, 0xB2, 0xBA, 0xBE, 0xAE, 0xCB, 0xCD,
164 0xC3, 0x00, 0x9C, 0x00, 0x9F, 0x00, 0xD6
165};
166
167static const unsigned int s6e63m0_17_260[] = {
168 0x18, 0x08, 0x24, 0x72, 0x72, 0x50, 0xC0,
169 0xC3, 0xB4, 0xB9, 0xBE, 0xAE, 0xCC, 0xCF,
170 0xC4, 0x00, 0x97, 0x00, 0x9A, 0x00, 0xD1
171};
172
173static const unsigned int s6e63m0_17_240[] = {
174 0x18, 0x08, 0x24, 0x71, 0x72, 0x4F, 0xC2,
175 0xC4, 0xB5, 0xBB, 0xBF, 0xB0, 0xCC, 0xCF,
176 0xC3, 0x00, 0x91, 0x00, 0x95, 0x00, 0xCA
177};
178
179static const unsigned int s6e63m0_17_220[] = {
180 0x18, 0x08, 0x24, 0x71, 0x73, 0x4F, 0xC2,
181 0xC5, 0xB5, 0xBD, 0xC0, 0xB2, 0xCD, 0xD1,
182 0xC5, 0x00, 0x8B, 0x00, 0x8E, 0x00, 0xC2
183};
184
185static const unsigned int s6e63m0_17_200[] = {
186 0x18, 0x08, 0x24, 0x72, 0x75, 0x51, 0xC2,
187 0xC6, 0xB5, 0xBF, 0xC1, 0xB3, 0xCE, 0xD1,
188 0xC6, 0x00, 0x85, 0x00, 0x88, 0x00, 0xBA
189};
190
191static const unsigned int s6e63m0_17_170[] = {
192 0x18, 0x08, 0x24, 0x75, 0x77, 0x54, 0xC3,
193 0xC7, 0xB7, 0xC0, 0xC3, 0xB4, 0xD1, 0xD3,
194 0xC9, 0x00, 0x7B, 0x00, 0x7E, 0x00, 0xAB
195};
196
197static const unsigned int s6e63m0_17_140[] = {
198 0x18, 0x08, 0x24, 0x7B, 0x77, 0x58, 0xC3,
199 0xC8, 0xB8, 0xC2, 0xC6, 0xB6, 0xD3, 0xD4,
200 0xCA, 0x00, 0x71, 0x00, 0x73, 0x00, 0x9E
201};
202
203static const unsigned int s6e63m0_17_110[] = {
204 0x18, 0x08, 0x24, 0x81, 0x7B, 0x5D, 0xC6,
205 0xCA, 0xBB, 0xC3, 0xC7, 0xB8, 0xD6, 0xD8,
206 0xCD, 0x00, 0x65, 0x00, 0x67, 0x00, 0x8D
207};
208
209static const unsigned int s6e63m0_17_90[] = {
210 0x18, 0x08, 0x24, 0x82, 0x7A, 0x5B, 0xC8,
211 0xCB, 0xBD, 0xC5, 0xCA, 0xBA, 0xD6, 0xD8,
212 0xCE, 0x00, 0x5D, 0x00, 0x5E, 0x00, 0x82
213};
214
215static const unsigned int s6e63m0_17_30[] = {
216 0x18, 0x08, 0x24, 0x8F, 0x73, 0x63, 0xD1,
217 0xD0, 0xC5, 0xCC, 0xD1, 0xC2, 0xDE, 0xE0,
218 0xD6, 0x00, 0x39, 0x00, 0x36, 0x00, 0x51
219};
220
221struct s6e63m0_gamma {
222 unsigned int *gamma_22_table[MAX_GAMMA_LEVEL];
223 unsigned int *gamma_19_table[MAX_GAMMA_LEVEL];
224 unsigned int *gamma_17_table[MAX_GAMMA_LEVEL];
225};
226
227static struct s6e63m0_gamma gamma_table = {
228 .gamma_22_table[0] = (unsigned int *)&s6e63m0_22_30,
229 .gamma_22_table[1] = (unsigned int *)&s6e63m0_22_90,
230 .gamma_22_table[2] = (unsigned int *)&s6e63m0_22_110,
231 .gamma_22_table[3] = (unsigned int *)&s6e63m0_22_140,
232 .gamma_22_table[4] = (unsigned int *)&s6e63m0_22_170,
233 .gamma_22_table[5] = (unsigned int *)&s6e63m0_22_200,
234 .gamma_22_table[6] = (unsigned int *)&s6e63m0_22_220,
235 .gamma_22_table[7] = (unsigned int *)&s6e63m0_22_240,
236 .gamma_22_table[8] = (unsigned int *)&s6e63m0_22_260,
237 .gamma_22_table[9] = (unsigned int *)&s6e63m0_22_280,
238 .gamma_22_table[10] = (unsigned int *)&s6e63m0_22_300,
239
240 .gamma_19_table[0] = (unsigned int *)&s6e63m0_19_30,
241 .gamma_19_table[1] = (unsigned int *)&s6e63m0_19_90,
242 .gamma_19_table[2] = (unsigned int *)&s6e63m0_19_110,
243 .gamma_19_table[3] = (unsigned int *)&s6e63m0_19_140,
244 .gamma_19_table[4] = (unsigned int *)&s6e63m0_19_170,
245 .gamma_19_table[5] = (unsigned int *)&s6e63m0_19_200,
246 .gamma_19_table[6] = (unsigned int *)&s6e63m0_19_220,
247 .gamma_19_table[7] = (unsigned int *)&s6e63m0_19_240,
248 .gamma_19_table[8] = (unsigned int *)&s6e63m0_19_260,
249 .gamma_19_table[9] = (unsigned int *)&s6e63m0_19_280,
250 .gamma_19_table[10] = (unsigned int *)&s6e63m0_19_300,
251
252 .gamma_17_table[0] = (unsigned int *)&s6e63m0_17_30,
253 .gamma_17_table[1] = (unsigned int *)&s6e63m0_17_90,
254 .gamma_17_table[2] = (unsigned int *)&s6e63m0_17_110,
255 .gamma_17_table[3] = (unsigned int *)&s6e63m0_17_140,
256 .gamma_17_table[4] = (unsigned int *)&s6e63m0_17_170,
257 .gamma_17_table[5] = (unsigned int *)&s6e63m0_17_200,
258 .gamma_17_table[6] = (unsigned int *)&s6e63m0_17_220,
259 .gamma_17_table[7] = (unsigned int *)&s6e63m0_17_240,
260 .gamma_17_table[8] = (unsigned int *)&s6e63m0_17_260,
261 .gamma_17_table[9] = (unsigned int *)&s6e63m0_17_280,
262 .gamma_17_table[10] = (unsigned int *)&s6e63m0_17_300,
263};
264
265#endif
266
diff --git a/drivers/video/bf54x-lq043fb.c b/drivers/video/bf54x-lq043fb.c
index 23b2a8c0dbfc..b020ba7f1cf2 100644
--- a/drivers/video/bf54x-lq043fb.c
+++ b/drivers/video/bf54x-lq043fb.c
@@ -501,7 +501,9 @@ static irqreturn_t bfin_bf54x_irq_error(int irq, void *dev_id)
501 501
502static int __devinit bfin_bf54x_probe(struct platform_device *pdev) 502static int __devinit bfin_bf54x_probe(struct platform_device *pdev)
503{ 503{
504#ifndef NO_BL_SUPPORT
504 struct backlight_properties props; 505 struct backlight_properties props;
506#endif
505 struct bfin_bf54xfb_info *info; 507 struct bfin_bf54xfb_info *info;
506 struct fb_info *fbinfo; 508 struct fb_info *fbinfo;
507 int ret; 509 int ret;
@@ -654,7 +656,8 @@ static int __devinit bfin_bf54x_probe(struct platform_device *pdev)
654 printk(KERN_ERR DRIVER_NAME 656 printk(KERN_ERR DRIVER_NAME
655 ": unable to register backlight.\n"); 657 ": unable to register backlight.\n");
656 ret = -EINVAL; 658 ret = -EINVAL;
657 goto out9; 659 unregister_framebuffer(fbinfo);
660 goto out8;
658 } 661 }
659 662
660 lcd_dev = lcd_device_register(DRIVER_NAME, &pdev->dev, NULL, &bfin_lcd_ops); 663 lcd_dev = lcd_device_register(DRIVER_NAME, &pdev->dev, NULL, &bfin_lcd_ops);
@@ -663,8 +666,6 @@ static int __devinit bfin_bf54x_probe(struct platform_device *pdev)
663 666
664 return 0; 667 return 0;
665 668
666out9:
667 unregister_framebuffer(fbinfo);
668out8: 669out8:
669 free_irq(info->irq, info); 670 free_irq(info->irq, info);
670out7: 671out7:
diff --git a/drivers/video/bfin-t350mcqb-fb.c b/drivers/video/bfin-t350mcqb-fb.c
index c2ec3dcd4e91..7a50272eaab9 100644
--- a/drivers/video/bfin-t350mcqb-fb.c
+++ b/drivers/video/bfin-t350mcqb-fb.c
@@ -420,7 +420,9 @@ static irqreturn_t bfin_t350mcqb_irq_error(int irq, void *dev_id)
420 420
421static int __devinit bfin_t350mcqb_probe(struct platform_device *pdev) 421static int __devinit bfin_t350mcqb_probe(struct platform_device *pdev)
422{ 422{
423#ifndef NO_BL_SUPPORT
423 struct backlight_properties props; 424 struct backlight_properties props;
425#endif
424 struct bfin_t350mcqbfb_info *info; 426 struct bfin_t350mcqbfb_info *info;
425 struct fb_info *fbinfo; 427 struct fb_info *fbinfo;
426 int ret; 428 int ret;
@@ -550,7 +552,8 @@ static int __devinit bfin_t350mcqb_probe(struct platform_device *pdev)
550 printk(KERN_ERR DRIVER_NAME 552 printk(KERN_ERR DRIVER_NAME
551 ": unable to register backlight.\n"); 553 ": unable to register backlight.\n");
552 ret = -EINVAL; 554 ret = -EINVAL;
553 goto out9; 555 unregister_framebuffer(fbinfo);
556 goto out8;
554 } 557 }
555 558
556 lcd_dev = lcd_device_register(DRIVER_NAME, NULL, &bfin_lcd_ops); 559 lcd_dev = lcd_device_register(DRIVER_NAME, NULL, &bfin_lcd_ops);
@@ -559,8 +562,6 @@ static int __devinit bfin_t350mcqb_probe(struct platform_device *pdev)
559 562
560 return 0; 563 return 0;
561 564
562out9:
563 unregister_framebuffer(fbinfo);
564out8: 565out8:
565 free_irq(info->irq, info); 566 free_irq(info->irq, info);
566out7: 567out7:
diff --git a/drivers/video/fb_defio.c b/drivers/video/fb_defio.c
index 1105a591dcc1..073c9b408cf7 100644
--- a/drivers/video/fb_defio.c
+++ b/drivers/video/fb_defio.c
@@ -66,7 +66,7 @@ static int fb_deferred_io_fault(struct vm_area_struct *vma,
66 return 0; 66 return 0;
67} 67}
68 68
69int fb_deferred_io_fsync(struct file *file, struct dentry *dentry, int datasync) 69int fb_deferred_io_fsync(struct file *file, int datasync)
70{ 70{
71 struct fb_info *info = file->private_data; 71 struct fb_info *info = file->private_data;
72 72
diff --git a/drivers/video/s3fb.c b/drivers/video/s3fb.c
index d4471b4c0374..dce8c97b4333 100644
--- a/drivers/video/s3fb.c
+++ b/drivers/video/s3fb.c
@@ -71,7 +71,8 @@ static const char * const s3_names[] = {"S3 Unknown", "S3 Trio32", "S3 Trio64",
71 "S3 Trio64UV+", "S3 Trio64V2/DX", "S3 Trio64V2/GX", 71 "S3 Trio64UV+", "S3 Trio64V2/DX", "S3 Trio64V2/GX",
72 "S3 Plato/PX", "S3 Aurora64VP", "S3 Virge", 72 "S3 Plato/PX", "S3 Aurora64VP", "S3 Virge",
73 "S3 Virge/VX", "S3 Virge/DX", "S3 Virge/GX", 73 "S3 Virge/VX", "S3 Virge/DX", "S3 Virge/GX",
74 "S3 Virge/GX2", "S3 Virge/GX2P", "S3 Virge/GX2P"}; 74 "S3 Virge/GX2", "S3 Virge/GX2P", "S3 Virge/GX2P",
75 "S3 Trio3D/1X", "S3 Trio3D/2X", "S3 Trio3D/2X"};
75 76
76#define CHIP_UNKNOWN 0x00 77#define CHIP_UNKNOWN 0x00
77#define CHIP_732_TRIO32 0x01 78#define CHIP_732_TRIO32 0x01
@@ -89,10 +90,14 @@ static const char * const s3_names[] = {"S3 Unknown", "S3 Trio32", "S3 Trio64",
89#define CHIP_356_VIRGE_GX2 0x0D 90#define CHIP_356_VIRGE_GX2 0x0D
90#define CHIP_357_VIRGE_GX2P 0x0E 91#define CHIP_357_VIRGE_GX2P 0x0E
91#define CHIP_359_VIRGE_GX2P 0x0F 92#define CHIP_359_VIRGE_GX2P 0x0F
93#define CHIP_360_TRIO3D_1X 0x10
94#define CHIP_362_TRIO3D_2X 0x11
95#define CHIP_368_TRIO3D_2X 0x12
92 96
93#define CHIP_XXX_TRIO 0x80 97#define CHIP_XXX_TRIO 0x80
94#define CHIP_XXX_TRIO64V2_DXGX 0x81 98#define CHIP_XXX_TRIO64V2_DXGX 0x81
95#define CHIP_XXX_VIRGE_DXGX 0x82 99#define CHIP_XXX_VIRGE_DXGX 0x82
100#define CHIP_36X_TRIO3D_1X_2X 0x83
96 101
97#define CHIP_UNDECIDED_FLAG 0x80 102#define CHIP_UNDECIDED_FLAG 0x80
98#define CHIP_MASK 0xFF 103#define CHIP_MASK 0xFF
@@ -324,6 +329,7 @@ static void s3fb_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
324 329
325static void s3_set_pixclock(struct fb_info *info, u32 pixclock) 330static void s3_set_pixclock(struct fb_info *info, u32 pixclock)
326{ 331{
332 struct s3fb_info *par = info->par;
327 u16 m, n, r; 333 u16 m, n, r;
328 u8 regval; 334 u8 regval;
329 int rv; 335 int rv;
@@ -339,7 +345,13 @@ static void s3_set_pixclock(struct fb_info *info, u32 pixclock)
339 vga_w(NULL, VGA_MIS_W, regval | VGA_MIS_ENB_PLL_LOAD); 345 vga_w(NULL, VGA_MIS_W, regval | VGA_MIS_ENB_PLL_LOAD);
340 346
341 /* Set S3 clock registers */ 347 /* Set S3 clock registers */
342 vga_wseq(NULL, 0x12, ((n - 2) | (r << 5))); 348 if (par->chip == CHIP_360_TRIO3D_1X ||
349 par->chip == CHIP_362_TRIO3D_2X ||
350 par->chip == CHIP_368_TRIO3D_2X) {
351 vga_wseq(NULL, 0x12, (n - 2) | ((r & 3) << 6)); /* n and two bits of r */
352 vga_wseq(NULL, 0x29, r >> 2); /* remaining highest bit of r */
353 } else
354 vga_wseq(NULL, 0x12, (n - 2) | (r << 5));
343 vga_wseq(NULL, 0x13, m - 2); 355 vga_wseq(NULL, 0x13, m - 2);
344 356
345 udelay(1000); 357 udelay(1000);
@@ -456,7 +468,7 @@ static int s3fb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
456static int s3fb_set_par(struct fb_info *info) 468static int s3fb_set_par(struct fb_info *info)
457{ 469{
458 struct s3fb_info *par = info->par; 470 struct s3fb_info *par = info->par;
459 u32 value, mode, hmul, offset_value, screen_size, multiplex; 471 u32 value, mode, hmul, offset_value, screen_size, multiplex, dbytes;
460 u32 bpp = info->var.bits_per_pixel; 472 u32 bpp = info->var.bits_per_pixel;
461 473
462 if (bpp != 0) { 474 if (bpp != 0) {
@@ -518,7 +530,7 @@ static int s3fb_set_par(struct fb_info *info)
518 svga_wcrt_mask(0x33, 0x00, 0x08); /* no DDR ? */ 530 svga_wcrt_mask(0x33, 0x00, 0x08); /* no DDR ? */
519 svga_wcrt_mask(0x43, 0x00, 0x01); /* no DDR ? */ 531 svga_wcrt_mask(0x43, 0x00, 0x01); /* no DDR ? */
520 532
521 svga_wcrt_mask(0x5D, 0x00, 0x28); // Clear strange HSlen bits 533 svga_wcrt_mask(0x5D, 0x00, 0x28); /* Clear strange HSlen bits */
522 534
523/* svga_wcrt_mask(0x58, 0x03, 0x03); */ 535/* svga_wcrt_mask(0x58, 0x03, 0x03); */
524 536
@@ -530,10 +542,14 @@ static int s3fb_set_par(struct fb_info *info)
530 pr_debug("fb%d: offset register : %d\n", info->node, offset_value); 542 pr_debug("fb%d: offset register : %d\n", info->node, offset_value);
531 svga_wcrt_multi(s3_offset_regs, offset_value); 543 svga_wcrt_multi(s3_offset_regs, offset_value);
532 544
533 vga_wcrt(NULL, 0x54, 0x18); /* M parameter */ 545 if (par->chip != CHIP_360_TRIO3D_1X &&
534 vga_wcrt(NULL, 0x60, 0xff); /* N parameter */ 546 par->chip != CHIP_362_TRIO3D_2X &&
535 vga_wcrt(NULL, 0x61, 0xff); /* L parameter */ 547 par->chip != CHIP_368_TRIO3D_2X) {
536 vga_wcrt(NULL, 0x62, 0xff); /* L parameter */ 548 vga_wcrt(NULL, 0x54, 0x18); /* M parameter */
549 vga_wcrt(NULL, 0x60, 0xff); /* N parameter */
550 vga_wcrt(NULL, 0x61, 0xff); /* L parameter */
551 vga_wcrt(NULL, 0x62, 0xff); /* L parameter */
552 }
537 553
538 vga_wcrt(NULL, 0x3A, 0x35); 554 vga_wcrt(NULL, 0x3A, 0x35);
539 svga_wattr(0x33, 0x00); 555 svga_wattr(0x33, 0x00);
@@ -570,6 +586,16 @@ static int s3fb_set_par(struct fb_info *info)
570 vga_wcrt(NULL, 0x66, 0x90); 586 vga_wcrt(NULL, 0x66, 0x90);
571 } 587 }
572 588
589 if (par->chip == CHIP_360_TRIO3D_1X ||
590 par->chip == CHIP_362_TRIO3D_2X ||
591 par->chip == CHIP_368_TRIO3D_2X) {
592 dbytes = info->var.xres * ((bpp+7)/8);
593 vga_wcrt(NULL, 0x91, (dbytes + 7) / 8);
594 vga_wcrt(NULL, 0x90, (((dbytes + 7) / 8) >> 8) | 0x80);
595
596 vga_wcrt(NULL, 0x66, 0x81);
597 }
598
573 svga_wcrt_mask(0x31, 0x00, 0x40); 599 svga_wcrt_mask(0x31, 0x00, 0x40);
574 multiplex = 0; 600 multiplex = 0;
575 hmul = 1; 601 hmul = 1;
@@ -615,11 +641,13 @@ static int s3fb_set_par(struct fb_info *info)
615 break; 641 break;
616 case 3: 642 case 3:
617 pr_debug("fb%d: 8 bit pseudocolor\n", info->node); 643 pr_debug("fb%d: 8 bit pseudocolor\n", info->node);
618 if (info->var.pixclock > 20000) { 644 svga_wcrt_mask(0x50, 0x00, 0x30);
619 svga_wcrt_mask(0x50, 0x00, 0x30); 645 if (info->var.pixclock > 20000 ||
646 par->chip == CHIP_360_TRIO3D_1X ||
647 par->chip == CHIP_362_TRIO3D_2X ||
648 par->chip == CHIP_368_TRIO3D_2X)
620 svga_wcrt_mask(0x67, 0x00, 0xF0); 649 svga_wcrt_mask(0x67, 0x00, 0xF0);
621 } else { 650 else {
622 svga_wcrt_mask(0x50, 0x00, 0x30);
623 svga_wcrt_mask(0x67, 0x10, 0xF0); 651 svga_wcrt_mask(0x67, 0x10, 0xF0);
624 multiplex = 1; 652 multiplex = 1;
625 } 653 }
@@ -634,7 +662,10 @@ static int s3fb_set_par(struct fb_info *info)
634 } else { 662 } else {
635 svga_wcrt_mask(0x50, 0x10, 0x30); 663 svga_wcrt_mask(0x50, 0x10, 0x30);
636 svga_wcrt_mask(0x67, 0x30, 0xF0); 664 svga_wcrt_mask(0x67, 0x30, 0xF0);
637 hmul = 2; 665 if (par->chip != CHIP_360_TRIO3D_1X &&
666 par->chip != CHIP_362_TRIO3D_2X &&
667 par->chip != CHIP_368_TRIO3D_2X)
668 hmul = 2;
638 } 669 }
639 break; 670 break;
640 case 5: 671 case 5:
@@ -647,7 +678,10 @@ static int s3fb_set_par(struct fb_info *info)
647 } else { 678 } else {
648 svga_wcrt_mask(0x50, 0x10, 0x30); 679 svga_wcrt_mask(0x50, 0x10, 0x30);
649 svga_wcrt_mask(0x67, 0x50, 0xF0); 680 svga_wcrt_mask(0x67, 0x50, 0xF0);
650 hmul = 2; 681 if (par->chip != CHIP_360_TRIO3D_1X &&
682 par->chip != CHIP_362_TRIO3D_2X &&
683 par->chip != CHIP_368_TRIO3D_2X)
684 hmul = 2;
651 } 685 }
652 break; 686 break;
653 case 6: 687 case 6:
@@ -866,6 +900,17 @@ static int __devinit s3_identification(int chip)
866 return CHIP_385_VIRGE_GX; 900 return CHIP_385_VIRGE_GX;
867 } 901 }
868 902
903 if (chip == CHIP_36X_TRIO3D_1X_2X) {
904 switch (vga_rcrt(NULL, 0x2f)) {
905 case 0x00:
906 return CHIP_360_TRIO3D_1X;
907 case 0x01:
908 return CHIP_362_TRIO3D_2X;
909 case 0x02:
910 return CHIP_368_TRIO3D_2X;
911 }
912 }
913
869 return CHIP_UNKNOWN; 914 return CHIP_UNKNOWN;
870} 915}
871 916
@@ -930,17 +975,32 @@ static int __devinit s3_pci_probe(struct pci_dev *dev, const struct pci_device_i
930 vga_wcrt(NULL, 0x38, 0x48); 975 vga_wcrt(NULL, 0x38, 0x48);
931 vga_wcrt(NULL, 0x39, 0xA5); 976 vga_wcrt(NULL, 0x39, 0xA5);
932 977
933 /* Find how many physical memory there is on card */ 978 /* Identify chip type */
934 /* 0x36 register is accessible even if other registers are locked */
935 regval = vga_rcrt(NULL, 0x36);
936 info->screen_size = s3_memsizes[regval >> 5] << 10;
937 info->fix.smem_len = info->screen_size;
938
939 par->chip = id->driver_data & CHIP_MASK; 979 par->chip = id->driver_data & CHIP_MASK;
940 par->rev = vga_rcrt(NULL, 0x2f); 980 par->rev = vga_rcrt(NULL, 0x2f);
941 if (par->chip & CHIP_UNDECIDED_FLAG) 981 if (par->chip & CHIP_UNDECIDED_FLAG)
942 par->chip = s3_identification(par->chip); 982 par->chip = s3_identification(par->chip);
943 983
984 /* Find how many physical memory there is on card */
985 /* 0x36 register is accessible even if other registers are locked */
986 regval = vga_rcrt(NULL, 0x36);
987 if (par->chip == CHIP_360_TRIO3D_1X ||
988 par->chip == CHIP_362_TRIO3D_2X ||
989 par->chip == CHIP_368_TRIO3D_2X) {
990 switch ((regval & 0xE0) >> 5) {
991 case 0: /* 8MB -- only 4MB usable for display */
992 case 1: /* 4MB with 32-bit bus */
993 case 2: /* 4MB */
994 info->screen_size = 4 << 20;
995 break;
996 case 6: /* 2MB */
997 info->screen_size = 2 << 20;
998 break;
999 }
1000 } else
1001 info->screen_size = s3_memsizes[regval >> 5] << 10;
1002 info->fix.smem_len = info->screen_size;
1003
944 /* Find MCLK frequency */ 1004 /* Find MCLK frequency */
945 regval = vga_rseq(NULL, 0x10); 1005 regval = vga_rseq(NULL, 0x10);
946 par->mclk_freq = ((vga_rseq(NULL, 0x11) + 2) * 14318) / ((regval & 0x1F) + 2); 1006 par->mclk_freq = ((vga_rseq(NULL, 0x11) + 2) * 14318) / ((regval & 0x1F) + 2);
@@ -1131,6 +1191,7 @@ static struct pci_device_id s3_devices[] __devinitdata = {
1131 {PCI_DEVICE(PCI_VENDOR_ID_S3, 0x8A10), .driver_data = CHIP_356_VIRGE_GX2}, 1191 {PCI_DEVICE(PCI_VENDOR_ID_S3, 0x8A10), .driver_data = CHIP_356_VIRGE_GX2},
1132 {PCI_DEVICE(PCI_VENDOR_ID_S3, 0x8A11), .driver_data = CHIP_357_VIRGE_GX2P}, 1192 {PCI_DEVICE(PCI_VENDOR_ID_S3, 0x8A11), .driver_data = CHIP_357_VIRGE_GX2P},
1133 {PCI_DEVICE(PCI_VENDOR_ID_S3, 0x8A12), .driver_data = CHIP_359_VIRGE_GX2P}, 1193 {PCI_DEVICE(PCI_VENDOR_ID_S3, 0x8A12), .driver_data = CHIP_359_VIRGE_GX2P},
1194 {PCI_DEVICE(PCI_VENDOR_ID_S3, 0x8A13), .driver_data = CHIP_36X_TRIO3D_1X_2X},
1134 1195
1135 {0, 0, 0, 0, 0, 0, 0} 1196 {0, 0, 0, 0, 0, 0, 0}
1136}; 1197};
diff --git a/drivers/video/via/viafbdev.c b/drivers/video/via/viafbdev.c
index 2bc40e682f95..1082541358f0 100644
--- a/drivers/video/via/viafbdev.c
+++ b/drivers/video/via/viafbdev.c
@@ -578,14 +578,9 @@ static int viafb_ioctl(struct fb_info *info, u_int cmd, u_long arg)
578 break; 578 break;
579 579
580 case VIAFB_SET_GAMMA_LUT: 580 case VIAFB_SET_GAMMA_LUT:
581 viafb_gamma_table = kmalloc(256 * sizeof(u32), GFP_KERNEL); 581 viafb_gamma_table = memdup_user(argp, 256 * sizeof(u32));
582 if (!viafb_gamma_table) 582 if (IS_ERR(viafb_gamma_table))
583 return -ENOMEM; 583 return PTR_ERR(viafb_gamma_table);
584 if (copy_from_user(viafb_gamma_table, argp,
585 256 * sizeof(u32))) {
586 kfree(viafb_gamma_table);
587 return -EFAULT;
588 }
589 viafb_set_gamma_table(viafb_bpp, viafb_gamma_table); 584 viafb_set_gamma_table(viafb_bpp, viafb_gamma_table);
590 kfree(viafb_gamma_table); 585 kfree(viafb_gamma_table);
591 break; 586 break;
diff --git a/drivers/watchdog/rdc321x_wdt.c b/drivers/watchdog/rdc321x_wdt.c
index 69c6adbd8205..428f8a1583e8 100644
--- a/drivers/watchdog/rdc321x_wdt.c
+++ b/drivers/watchdog/rdc321x_wdt.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * RDC321x watchdog driver 2 * RDC321x watchdog driver
3 * 3 *
4 * Copyright (C) 2007 Florian Fainelli <florian@openwrt.org> 4 * Copyright (C) 2007-2010 Florian Fainelli <florian@openwrt.org>
5 * 5 *
6 * This driver is highly inspired from the cpu5_wdt driver 6 * This driver is highly inspired from the cpu5_wdt driver
7 * 7 *
@@ -36,8 +36,7 @@
36#include <linux/watchdog.h> 36#include <linux/watchdog.h>
37#include <linux/io.h> 37#include <linux/io.h>
38#include <linux/uaccess.h> 38#include <linux/uaccess.h>
39 39#include <linux/mfd/rdc321x.h>
40#include <asm/rdc321x_defs.h>
41 40
42#define RDC_WDT_MASK 0x80000000 /* Mask */ 41#define RDC_WDT_MASK 0x80000000 /* Mask */
43#define RDC_WDT_EN 0x00800000 /* Enable bit */ 42#define RDC_WDT_EN 0x00800000 /* Enable bit */
@@ -63,6 +62,8 @@ static struct {
63 int default_ticks; 62 int default_ticks;
64 unsigned long inuse; 63 unsigned long inuse;
65 spinlock_t lock; 64 spinlock_t lock;
65 struct pci_dev *sb_pdev;
66 int base_reg;
66} rdc321x_wdt_device; 67} rdc321x_wdt_device;
67 68
68/* generic helper functions */ 69/* generic helper functions */
@@ -70,14 +71,18 @@ static struct {
70static void rdc321x_wdt_trigger(unsigned long unused) 71static void rdc321x_wdt_trigger(unsigned long unused)
71{ 72{
72 unsigned long flags; 73 unsigned long flags;
74 u32 val;
73 75
74 if (rdc321x_wdt_device.running) 76 if (rdc321x_wdt_device.running)
75 ticks--; 77 ticks--;
76 78
77 /* keep watchdog alive */ 79 /* keep watchdog alive */
78 spin_lock_irqsave(&rdc321x_wdt_device.lock, flags); 80 spin_lock_irqsave(&rdc321x_wdt_device.lock, flags);
79 outl(RDC_WDT_EN | inl(RDC3210_CFGREG_DATA), 81 pci_read_config_dword(rdc321x_wdt_device.sb_pdev,
80 RDC3210_CFGREG_DATA); 82 rdc321x_wdt_device.base_reg, &val);
83 val |= RDC_WDT_EN;
84 pci_write_config_dword(rdc321x_wdt_device.sb_pdev,
85 rdc321x_wdt_device.base_reg, val);
81 spin_unlock_irqrestore(&rdc321x_wdt_device.lock, flags); 86 spin_unlock_irqrestore(&rdc321x_wdt_device.lock, flags);
82 87
83 /* requeue?? */ 88 /* requeue?? */
@@ -105,10 +110,13 @@ static void rdc321x_wdt_start(void)
105 110
106 /* Clear the timer */ 111 /* Clear the timer */
107 spin_lock_irqsave(&rdc321x_wdt_device.lock, flags); 112 spin_lock_irqsave(&rdc321x_wdt_device.lock, flags);
108 outl(RDC_CLS_TMR, RDC3210_CFGREG_ADDR); 113 pci_write_config_dword(rdc321x_wdt_device.sb_pdev,
114 rdc321x_wdt_device.base_reg, RDC_CLS_TMR);
109 115
110 /* Enable watchdog and set the timeout to 81.92 us */ 116 /* Enable watchdog and set the timeout to 81.92 us */
111 outl(RDC_WDT_EN | RDC_WDT_CNT, RDC3210_CFGREG_DATA); 117 pci_write_config_dword(rdc321x_wdt_device.sb_pdev,
118 rdc321x_wdt_device.base_reg,
119 RDC_WDT_EN | RDC_WDT_CNT);
112 spin_unlock_irqrestore(&rdc321x_wdt_device.lock, flags); 120 spin_unlock_irqrestore(&rdc321x_wdt_device.lock, flags);
113 121
114 mod_timer(&rdc321x_wdt_device.timer, 122 mod_timer(&rdc321x_wdt_device.timer,
@@ -148,7 +156,7 @@ static long rdc321x_wdt_ioctl(struct file *file, unsigned int cmd,
148 unsigned long arg) 156 unsigned long arg)
149{ 157{
150 void __user *argp = (void __user *)arg; 158 void __user *argp = (void __user *)arg;
151 unsigned int value; 159 u32 value;
152 static const struct watchdog_info ident = { 160 static const struct watchdog_info ident = {
153 .options = WDIOF_CARDRESET, 161 .options = WDIOF_CARDRESET,
154 .identity = "RDC321x WDT", 162 .identity = "RDC321x WDT",
@@ -162,9 +170,10 @@ static long rdc321x_wdt_ioctl(struct file *file, unsigned int cmd,
162 case WDIOC_GETSTATUS: 170 case WDIOC_GETSTATUS:
163 /* Read the value from the DATA register */ 171 /* Read the value from the DATA register */
164 spin_lock_irqsave(&rdc321x_wdt_device.lock, flags); 172 spin_lock_irqsave(&rdc321x_wdt_device.lock, flags);
165 value = inl(RDC3210_CFGREG_DATA); 173 pci_read_config_dword(rdc321x_wdt_device.sb_pdev,
174 rdc321x_wdt_device.base_reg, &value);
166 spin_unlock_irqrestore(&rdc321x_wdt_device.lock, flags); 175 spin_unlock_irqrestore(&rdc321x_wdt_device.lock, flags);
167 if (copy_to_user(argp, &value, sizeof(int))) 176 if (copy_to_user(argp, &value, sizeof(u32)))
168 return -EFAULT; 177 return -EFAULT;
169 break; 178 break;
170 case WDIOC_GETSUPPORT: 179 case WDIOC_GETSUPPORT:
@@ -219,17 +228,35 @@ static struct miscdevice rdc321x_wdt_misc = {
219static int __devinit rdc321x_wdt_probe(struct platform_device *pdev) 228static int __devinit rdc321x_wdt_probe(struct platform_device *pdev)
220{ 229{
221 int err; 230 int err;
231 struct resource *r;
232 struct rdc321x_wdt_pdata *pdata;
233
234 pdata = pdev->dev.platform_data;
235 if (!pdata) {
236 dev_err(&pdev->dev, "no platform data supplied\n");
237 return -ENODEV;
238 }
239
240 r = platform_get_resource_byname(pdev, IORESOURCE_IO, "wdt-reg");
241 if (!r) {
242 dev_err(&pdev->dev, "failed to get wdt-reg resource\n");
243 return -ENODEV;
244 }
245
246 rdc321x_wdt_device.sb_pdev = pdata->sb_pdev;
247 rdc321x_wdt_device.base_reg = r->start;
222 248
223 err = misc_register(&rdc321x_wdt_misc); 249 err = misc_register(&rdc321x_wdt_misc);
224 if (err < 0) { 250 if (err < 0) {
225 printk(KERN_ERR PFX "watchdog misc_register failed\n"); 251 dev_err(&pdev->dev, "misc_register failed\n");
226 return err; 252 return err;
227 } 253 }
228 254
229 spin_lock_init(&rdc321x_wdt_device.lock); 255 spin_lock_init(&rdc321x_wdt_device.lock);
230 256
231 /* Reset the watchdog */ 257 /* Reset the watchdog */
232 outl(RDC_WDT_RST, RDC3210_CFGREG_DATA); 258 pci_write_config_dword(rdc321x_wdt_device.sb_pdev,
259 rdc321x_wdt_device.base_reg, RDC_WDT_RST);
233 260
234 init_completion(&rdc321x_wdt_device.stop); 261 init_completion(&rdc321x_wdt_device.stop);
235 rdc321x_wdt_device.queue = 0; 262 rdc321x_wdt_device.queue = 0;
@@ -240,7 +267,7 @@ static int __devinit rdc321x_wdt_probe(struct platform_device *pdev)
240 267
241 rdc321x_wdt_device.default_ticks = ticks; 268 rdc321x_wdt_device.default_ticks = ticks;
242 269
243 printk(KERN_INFO PFX "watchdog init success\n"); 270 dev_info(&pdev->dev, "watchdog init success\n");
244 271
245 return 0; 272 return 0;
246} 273}
diff --git a/fs/9p/vfs_file.c b/fs/9p/vfs_file.c
index 25b300e1c9d7..2bedc6c94fc2 100644
--- a/fs/9p/vfs_file.c
+++ b/fs/9p/vfs_file.c
@@ -257,15 +257,13 @@ v9fs_file_write(struct file *filp, const char __user * data,
257 return total; 257 return total;
258} 258}
259 259
260static int v9fs_file_fsync(struct file *filp, struct dentry *dentry, 260static int v9fs_file_fsync(struct file *filp, int datasync)
261 int datasync)
262{ 261{
263 struct p9_fid *fid; 262 struct p9_fid *fid;
264 struct p9_wstat wstat; 263 struct p9_wstat wstat;
265 int retval; 264 int retval;
266 265
267 P9_DPRINTK(P9_DEBUG_VFS, "filp %p dentry %p datasync %x\n", filp, 266 P9_DPRINTK(P9_DEBUG_VFS, "filp %p datasync %x\n", filp, datasync);
268 dentry, datasync);
269 267
270 fid = filp->private_data; 268 fid = filp->private_data;
271 v9fs_blank_wstat(&wstat); 269 v9fs_blank_wstat(&wstat);
diff --git a/fs/adfs/dir.c b/fs/adfs/dir.c
index 23aa52f548a0..f4287e4de744 100644
--- a/fs/adfs/dir.c
+++ b/fs/adfs/dir.c
@@ -197,7 +197,7 @@ const struct file_operations adfs_dir_operations = {
197 .read = generic_read_dir, 197 .read = generic_read_dir,
198 .llseek = generic_file_llseek, 198 .llseek = generic_file_llseek,
199 .readdir = adfs_readdir, 199 .readdir = adfs_readdir,
200 .fsync = simple_fsync, 200 .fsync = generic_file_fsync,
201}; 201};
202 202
203static int 203static int
diff --git a/fs/adfs/file.c b/fs/adfs/file.c
index 005ea34d1758..a36da5382b40 100644
--- a/fs/adfs/file.c
+++ b/fs/adfs/file.c
@@ -26,7 +26,7 @@ const struct file_operations adfs_file_operations = {
26 .read = do_sync_read, 26 .read = do_sync_read,
27 .aio_read = generic_file_aio_read, 27 .aio_read = generic_file_aio_read,
28 .mmap = generic_file_mmap, 28 .mmap = generic_file_mmap,
29 .fsync = simple_fsync, 29 .fsync = generic_file_fsync,
30 .write = do_sync_write, 30 .write = do_sync_write,
31 .aio_write = generic_file_aio_write, 31 .aio_write = generic_file_aio_write,
32 .splice_read = generic_file_splice_read, 32 .splice_read = generic_file_splice_read,
diff --git a/fs/adfs/inode.c b/fs/adfs/inode.c
index 0f5e30978135..6f850b06ab62 100644
--- a/fs/adfs/inode.c
+++ b/fs/adfs/inode.c
@@ -322,8 +322,9 @@ adfs_notify_change(struct dentry *dentry, struct iattr *attr)
322 if (error) 322 if (error)
323 goto out; 323 goto out;
324 324
325 /* XXX: this is missing some actual on-disk truncation.. */
325 if (ia_valid & ATTR_SIZE) 326 if (ia_valid & ATTR_SIZE)
326 error = vmtruncate(inode, attr->ia_size); 327 error = simple_setsize(inode, attr->ia_size);
327 328
328 if (error) 329 if (error)
329 goto out; 330 goto out;
diff --git a/fs/affs/affs.h b/fs/affs/affs.h
index 861dae68ac12..f05b6155ccc8 100644
--- a/fs/affs/affs.h
+++ b/fs/affs/affs.h
@@ -183,7 +183,7 @@ extern int affs_add_entry(struct inode *dir, struct inode *inode, struct dent
183 183
184void affs_free_prealloc(struct inode *inode); 184void affs_free_prealloc(struct inode *inode);
185extern void affs_truncate(struct inode *); 185extern void affs_truncate(struct inode *);
186int affs_file_fsync(struct file *, struct dentry *, int); 186int affs_file_fsync(struct file *, int);
187 187
188/* dir.c */ 188/* dir.c */
189 189
diff --git a/fs/affs/file.c b/fs/affs/file.c
index 184e55c1c9ba..322710c3eedf 100644
--- a/fs/affs/file.c
+++ b/fs/affs/file.c
@@ -916,9 +916,9 @@ affs_truncate(struct inode *inode)
916 affs_free_prealloc(inode); 916 affs_free_prealloc(inode);
917} 917}
918 918
919int affs_file_fsync(struct file *filp, struct dentry *dentry, int datasync) 919int affs_file_fsync(struct file *filp, int datasync)
920{ 920{
921 struct inode * inode = dentry->d_inode; 921 struct inode *inode = filp->f_mapping->host;
922 int ret, err; 922 int ret, err;
923 923
924 ret = write_inode_now(inode, 0); 924 ret = write_inode_now(inode, 0);
diff --git a/fs/affs/namei.c b/fs/affs/namei.c
index d70bbbac6b7b..914d1c0bc07a 100644
--- a/fs/affs/namei.c
+++ b/fs/affs/namei.c
@@ -224,7 +224,7 @@ affs_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd)
224 affs_brelse(bh); 224 affs_brelse(bh);
225 inode = affs_iget(sb, ino); 225 inode = affs_iget(sb, ino);
226 if (IS_ERR(inode)) 226 if (IS_ERR(inode))
227 return ERR_PTR(PTR_ERR(inode)); 227 return ERR_CAST(inode);
228 } 228 }
229 dentry->d_op = AFFS_SB(sb)->s_flags & SF_INTL ? &affs_intl_dentry_operations : &affs_dentry_operations; 229 dentry->d_op = AFFS_SB(sb)->s_flags & SF_INTL ? &affs_intl_dentry_operations : &affs_dentry_operations;
230 d_add(dentry, inode); 230 d_add(dentry, inode);
diff --git a/fs/afs/internal.h b/fs/afs/internal.h
index 807f284cc75e..5f679b77ce24 100644
--- a/fs/afs/internal.h
+++ b/fs/afs/internal.h
@@ -740,7 +740,7 @@ extern void afs_pages_written_back(struct afs_vnode *, struct afs_call *);
740extern ssize_t afs_file_write(struct kiocb *, const struct iovec *, 740extern ssize_t afs_file_write(struct kiocb *, const struct iovec *,
741 unsigned long, loff_t); 741 unsigned long, loff_t);
742extern int afs_writeback_all(struct afs_vnode *); 742extern int afs_writeback_all(struct afs_vnode *);
743extern int afs_fsync(struct file *, struct dentry *, int); 743extern int afs_fsync(struct file *, int);
744 744
745 745
746/*****************************************************************************/ 746/*****************************************************************************/
diff --git a/fs/afs/write.c b/fs/afs/write.c
index 3bed54a294d4..3dab9e9948d0 100644
--- a/fs/afs/write.c
+++ b/fs/afs/write.c
@@ -701,8 +701,9 @@ int afs_writeback_all(struct afs_vnode *vnode)
701 * - the return status from this call provides a reliable indication of 701 * - the return status from this call provides a reliable indication of
702 * whether any write errors occurred for this process. 702 * whether any write errors occurred for this process.
703 */ 703 */
704int afs_fsync(struct file *file, struct dentry *dentry, int datasync) 704int afs_fsync(struct file *file, int datasync)
705{ 705{
706 struct dentry *dentry = file->f_path.dentry;
706 struct afs_writeback *wb, *xwb; 707 struct afs_writeback *wb, *xwb;
707 struct afs_vnode *vnode = AFS_FS_I(dentry->d_inode); 708 struct afs_vnode *vnode = AFS_FS_I(dentry->d_inode);
708 int ret; 709 int ret;
diff --git a/fs/aio.c b/fs/aio.c
index 1cf12b3dd83a..1ccf25cef1f0 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -36,6 +36,7 @@
36#include <linux/blkdev.h> 36#include <linux/blkdev.h>
37#include <linux/mempool.h> 37#include <linux/mempool.h>
38#include <linux/hash.h> 38#include <linux/hash.h>
39#include <linux/compat.h>
39 40
40#include <asm/kmap_types.h> 41#include <asm/kmap_types.h>
41#include <asm/uaccess.h> 42#include <asm/uaccess.h>
@@ -526,7 +527,7 @@ static void aio_fput_routine(struct work_struct *data)
526 527
527 /* Complete the fput(s) */ 528 /* Complete the fput(s) */
528 if (req->ki_filp != NULL) 529 if (req->ki_filp != NULL)
529 __fput(req->ki_filp); 530 fput(req->ki_filp);
530 531
531 /* Link the iocb into the context's free list */ 532 /* Link the iocb into the context's free list */
532 spin_lock_irq(&ctx->ctx_lock); 533 spin_lock_irq(&ctx->ctx_lock);
@@ -559,11 +560,11 @@ static int __aio_put_req(struct kioctx *ctx, struct kiocb *req)
559 560
560 /* 561 /*
561 * Try to optimize the aio and eventfd file* puts, by avoiding to 562 * Try to optimize the aio and eventfd file* puts, by avoiding to
562 * schedule work in case it is not __fput() time. In normal cases, 563 * schedule work in case it is not final fput() time. In normal cases,
563 * we would not be holding the last reference to the file*, so 564 * we would not be holding the last reference to the file*, so
564 * this function will be executed w/out any aio kthread wakeup. 565 * this function will be executed w/out any aio kthread wakeup.
565 */ 566 */
566 if (unlikely(atomic_long_dec_and_test(&req->ki_filp->f_count))) { 567 if (unlikely(!fput_atomic(req->ki_filp))) {
567 get_ioctx(ctx); 568 get_ioctx(ctx);
568 spin_lock(&fput_lock); 569 spin_lock(&fput_lock);
569 list_add(&req->ki_list, &fput_head); 570 list_add(&req->ki_list, &fput_head);
@@ -1384,13 +1385,22 @@ static ssize_t aio_fsync(struct kiocb *iocb)
1384 return ret; 1385 return ret;
1385} 1386}
1386 1387
1387static ssize_t aio_setup_vectored_rw(int type, struct kiocb *kiocb) 1388static ssize_t aio_setup_vectored_rw(int type, struct kiocb *kiocb, bool compat)
1388{ 1389{
1389 ssize_t ret; 1390 ssize_t ret;
1390 1391
1391 ret = rw_copy_check_uvector(type, (struct iovec __user *)kiocb->ki_buf, 1392#ifdef CONFIG_COMPAT
1392 kiocb->ki_nbytes, 1, 1393 if (compat)
1393 &kiocb->ki_inline_vec, &kiocb->ki_iovec); 1394 ret = compat_rw_copy_check_uvector(type,
1395 (struct compat_iovec __user *)kiocb->ki_buf,
1396 kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec,
1397 &kiocb->ki_iovec);
1398 else
1399#endif
1400 ret = rw_copy_check_uvector(type,
1401 (struct iovec __user *)kiocb->ki_buf,
1402 kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec,
1403 &kiocb->ki_iovec);
1394 if (ret < 0) 1404 if (ret < 0)
1395 goto out; 1405 goto out;
1396 1406
@@ -1420,7 +1430,7 @@ static ssize_t aio_setup_single_vector(struct kiocb *kiocb)
1420 * Performs the initial checks and aio retry method 1430 * Performs the initial checks and aio retry method
1421 * setup for the kiocb at the time of io submission. 1431 * setup for the kiocb at the time of io submission.
1422 */ 1432 */
1423static ssize_t aio_setup_iocb(struct kiocb *kiocb) 1433static ssize_t aio_setup_iocb(struct kiocb *kiocb, bool compat)
1424{ 1434{
1425 struct file *file = kiocb->ki_filp; 1435 struct file *file = kiocb->ki_filp;
1426 ssize_t ret = 0; 1436 ssize_t ret = 0;
@@ -1469,7 +1479,7 @@ static ssize_t aio_setup_iocb(struct kiocb *kiocb)
1469 ret = security_file_permission(file, MAY_READ); 1479 ret = security_file_permission(file, MAY_READ);
1470 if (unlikely(ret)) 1480 if (unlikely(ret))
1471 break; 1481 break;
1472 ret = aio_setup_vectored_rw(READ, kiocb); 1482 ret = aio_setup_vectored_rw(READ, kiocb, compat);
1473 if (ret) 1483 if (ret)
1474 break; 1484 break;
1475 ret = -EINVAL; 1485 ret = -EINVAL;
@@ -1483,7 +1493,7 @@ static ssize_t aio_setup_iocb(struct kiocb *kiocb)
1483 ret = security_file_permission(file, MAY_WRITE); 1493 ret = security_file_permission(file, MAY_WRITE);
1484 if (unlikely(ret)) 1494 if (unlikely(ret))
1485 break; 1495 break;
1486 ret = aio_setup_vectored_rw(WRITE, kiocb); 1496 ret = aio_setup_vectored_rw(WRITE, kiocb, compat);
1487 if (ret) 1497 if (ret)
1488 break; 1498 break;
1489 ret = -EINVAL; 1499 ret = -EINVAL;
@@ -1548,7 +1558,8 @@ static void aio_batch_free(struct hlist_head *batch_hash)
1548} 1558}
1549 1559
1550static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb, 1560static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
1551 struct iocb *iocb, struct hlist_head *batch_hash) 1561 struct iocb *iocb, struct hlist_head *batch_hash,
1562 bool compat)
1552{ 1563{
1553 struct kiocb *req; 1564 struct kiocb *req;
1554 struct file *file; 1565 struct file *file;
@@ -1609,7 +1620,7 @@ static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
1609 req->ki_left = req->ki_nbytes = iocb->aio_nbytes; 1620 req->ki_left = req->ki_nbytes = iocb->aio_nbytes;
1610 req->ki_opcode = iocb->aio_lio_opcode; 1621 req->ki_opcode = iocb->aio_lio_opcode;
1611 1622
1612 ret = aio_setup_iocb(req); 1623 ret = aio_setup_iocb(req, compat);
1613 1624
1614 if (ret) 1625 if (ret)
1615 goto out_put_req; 1626 goto out_put_req;
@@ -1637,20 +1648,8 @@ out_put_req:
1637 return ret; 1648 return ret;
1638} 1649}
1639 1650
1640/* sys_io_submit: 1651long do_io_submit(aio_context_t ctx_id, long nr,
1641 * Queue the nr iocbs pointed to by iocbpp for processing. Returns 1652 struct iocb __user *__user *iocbpp, bool compat)
1642 * the number of iocbs queued. May return -EINVAL if the aio_context
1643 * specified by ctx_id is invalid, if nr is < 0, if the iocb at
1644 * *iocbpp[0] is not properly initialized, if the operation specified
1645 * is invalid for the file descriptor in the iocb. May fail with
1646 * -EFAULT if any of the data structures point to invalid data. May
1647 * fail with -EBADF if the file descriptor specified in the first
1648 * iocb is invalid. May fail with -EAGAIN if insufficient resources
1649 * are available to queue any iocbs. Will return 0 if nr is 0. Will
1650 * fail with -ENOSYS if not implemented.
1651 */
1652SYSCALL_DEFINE3(io_submit, aio_context_t, ctx_id, long, nr,
1653 struct iocb __user * __user *, iocbpp)
1654{ 1653{
1655 struct kioctx *ctx; 1654 struct kioctx *ctx;
1656 long ret = 0; 1655 long ret = 0;
@@ -1687,7 +1686,7 @@ SYSCALL_DEFINE3(io_submit, aio_context_t, ctx_id, long, nr,
1687 break; 1686 break;
1688 } 1687 }
1689 1688
1690 ret = io_submit_one(ctx, user_iocb, &tmp, batch_hash); 1689 ret = io_submit_one(ctx, user_iocb, &tmp, batch_hash, compat);
1691 if (ret) 1690 if (ret)
1692 break; 1691 break;
1693 } 1692 }
@@ -1697,6 +1696,24 @@ SYSCALL_DEFINE3(io_submit, aio_context_t, ctx_id, long, nr,
1697 return i ? i : ret; 1696 return i ? i : ret;
1698} 1697}
1699 1698
1699/* sys_io_submit:
1700 * Queue the nr iocbs pointed to by iocbpp for processing. Returns
1701 * the number of iocbs queued. May return -EINVAL if the aio_context
1702 * specified by ctx_id is invalid, if nr is < 0, if the iocb at
1703 * *iocbpp[0] is not properly initialized, if the operation specified
1704 * is invalid for the file descriptor in the iocb. May fail with
1705 * -EFAULT if any of the data structures point to invalid data. May
1706 * fail with -EBADF if the file descriptor specified in the first
1707 * iocb is invalid. May fail with -EAGAIN if insufficient resources
1708 * are available to queue any iocbs. Will return 0 if nr is 0. Will
1709 * fail with -ENOSYS if not implemented.
1710 */
1711SYSCALL_DEFINE3(io_submit, aio_context_t, ctx_id, long, nr,
1712 struct iocb __user * __user *, iocbpp)
1713{
1714 return do_io_submit(ctx_id, nr, iocbpp, 0);
1715}
1716
1700/* lookup_kiocb 1717/* lookup_kiocb
1701 * Finds a given iocb for cancellation. 1718 * Finds a given iocb for cancellation.
1702 */ 1719 */
diff --git a/fs/anon_inodes.c b/fs/anon_inodes.c
index 9bd4b3876c99..e4b75d6eda83 100644
--- a/fs/anon_inodes.c
+++ b/fs/anon_inodes.c
@@ -205,7 +205,7 @@ static struct inode *anon_inode_mkinode(void)
205 * that it already _is_ on the dirty list. 205 * that it already _is_ on the dirty list.
206 */ 206 */
207 inode->i_state = I_DIRTY; 207 inode->i_state = I_DIRTY;
208 inode->i_mode = S_IFREG | S_IRUSR | S_IWUSR; 208 inode->i_mode = S_IRUSR | S_IWUSR;
209 inode->i_uid = current_fsuid(); 209 inode->i_uid = current_fsuid();
210 inode->i_gid = current_fsgid(); 210 inode->i_gid = current_fsgid();
211 inode->i_flags |= S_PRIVATE; 211 inode->i_flags |= S_PRIVATE;
diff --git a/fs/attr.c b/fs/attr.c
index 0815e93bb487..b4fa3b0aa596 100644
--- a/fs/attr.c
+++ b/fs/attr.c
@@ -67,14 +67,14 @@ EXPORT_SYMBOL(inode_change_ok);
67 * @offset: the new size to assign to the inode 67 * @offset: the new size to assign to the inode
68 * @Returns: 0 on success, -ve errno on failure 68 * @Returns: 0 on success, -ve errno on failure
69 * 69 *
70 * inode_newsize_ok must be called with i_mutex held.
71 *
70 * inode_newsize_ok will check filesystem limits and ulimits to check that the 72 * inode_newsize_ok will check filesystem limits and ulimits to check that the
71 * new inode size is within limits. inode_newsize_ok will also send SIGXFSZ 73 * new inode size is within limits. inode_newsize_ok will also send SIGXFSZ
72 * when necessary. Caller must not proceed with inode size change if failure is 74 * when necessary. Caller must not proceed with inode size change if failure is
73 * returned. @inode must be a file (not directory), with appropriate 75 * returned. @inode must be a file (not directory), with appropriate
74 * permissions to allow truncate (inode_newsize_ok does NOT check these 76 * permissions to allow truncate (inode_newsize_ok does NOT check these
75 * conditions). 77 * conditions).
76 *
77 * inode_newsize_ok must be called with i_mutex held.
78 */ 78 */
79int inode_newsize_ok(const struct inode *inode, loff_t offset) 79int inode_newsize_ok(const struct inode *inode, loff_t offset)
80{ 80{
@@ -104,17 +104,25 @@ out_big:
104} 104}
105EXPORT_SYMBOL(inode_newsize_ok); 105EXPORT_SYMBOL(inode_newsize_ok);
106 106
107int inode_setattr(struct inode * inode, struct iattr * attr) 107/**
108 * generic_setattr - copy simple metadata updates into the generic inode
109 * @inode: the inode to be updated
110 * @attr: the new attributes
111 *
112 * generic_setattr must be called with i_mutex held.
113 *
114 * generic_setattr updates the inode's metadata with that specified
115 * in attr. Noticably missing is inode size update, which is more complex
116 * as it requires pagecache updates. See simple_setsize.
117 *
118 * The inode is not marked as dirty after this operation. The rationale is
119 * that for "simple" filesystems, the struct inode is the inode storage.
120 * The caller is free to mark the inode dirty afterwards if needed.
121 */
122void generic_setattr(struct inode *inode, const struct iattr *attr)
108{ 123{
109 unsigned int ia_valid = attr->ia_valid; 124 unsigned int ia_valid = attr->ia_valid;
110 125
111 if (ia_valid & ATTR_SIZE &&
112 attr->ia_size != i_size_read(inode)) {
113 int error = vmtruncate(inode, attr->ia_size);
114 if (error)
115 return error;
116 }
117
118 if (ia_valid & ATTR_UID) 126 if (ia_valid & ATTR_UID)
119 inode->i_uid = attr->ia_uid; 127 inode->i_uid = attr->ia_uid;
120 if (ia_valid & ATTR_GID) 128 if (ia_valid & ATTR_GID)
@@ -135,6 +143,28 @@ int inode_setattr(struct inode * inode, struct iattr * attr)
135 mode &= ~S_ISGID; 143 mode &= ~S_ISGID;
136 inode->i_mode = mode; 144 inode->i_mode = mode;
137 } 145 }
146}
147EXPORT_SYMBOL(generic_setattr);
148
149/*
150 * note this function is deprecated, the new truncate sequence should be
151 * used instead -- see eg. simple_setsize, generic_setattr.
152 */
153int inode_setattr(struct inode *inode, const struct iattr *attr)
154{
155 unsigned int ia_valid = attr->ia_valid;
156
157 if (ia_valid & ATTR_SIZE &&
158 attr->ia_size != i_size_read(inode)) {
159 int error;
160
161 error = vmtruncate(inode, attr->ia_size);
162 if (error)
163 return error;
164 }
165
166 generic_setattr(inode, attr);
167
138 mark_inode_dirty(inode); 168 mark_inode_dirty(inode);
139 169
140 return 0; 170 return 0;
diff --git a/fs/autofs/root.c b/fs/autofs/root.c
index 8713c7cfbc79..9a0520b50663 100644
--- a/fs/autofs/root.c
+++ b/fs/autofs/root.c
@@ -28,6 +28,7 @@ static int autofs_root_mkdir(struct inode *,struct dentry *,int);
28static int autofs_root_ioctl(struct inode *, struct file *,unsigned int,unsigned long); 28static int autofs_root_ioctl(struct inode *, struct file *,unsigned int,unsigned long);
29 29
30const struct file_operations autofs_root_operations = { 30const struct file_operations autofs_root_operations = {
31 .llseek = generic_file_llseek,
31 .read = generic_read_dir, 32 .read = generic_read_dir,
32 .readdir = autofs_root_readdir, 33 .readdir = autofs_root_readdir,
33 .ioctl = autofs_root_ioctl, 34 .ioctl = autofs_root_ioctl,
diff --git a/fs/autofs4/dev-ioctl.c b/fs/autofs4/dev-ioctl.c
index d832062869f6..ba4a38b9c22f 100644
--- a/fs/autofs4/dev-ioctl.c
+++ b/fs/autofs4/dev-ioctl.c
@@ -95,7 +95,7 @@ static int check_dev_ioctl_version(int cmd, struct autofs_dev_ioctl *param)
95 */ 95 */
96static struct autofs_dev_ioctl *copy_dev_ioctl(struct autofs_dev_ioctl __user *in) 96static struct autofs_dev_ioctl *copy_dev_ioctl(struct autofs_dev_ioctl __user *in)
97{ 97{
98 struct autofs_dev_ioctl tmp, *ads; 98 struct autofs_dev_ioctl tmp;
99 99
100 if (copy_from_user(&tmp, in, sizeof(tmp))) 100 if (copy_from_user(&tmp, in, sizeof(tmp)))
101 return ERR_PTR(-EFAULT); 101 return ERR_PTR(-EFAULT);
@@ -103,16 +103,7 @@ static struct autofs_dev_ioctl *copy_dev_ioctl(struct autofs_dev_ioctl __user *i
103 if (tmp.size < sizeof(tmp)) 103 if (tmp.size < sizeof(tmp))
104 return ERR_PTR(-EINVAL); 104 return ERR_PTR(-EINVAL);
105 105
106 ads = kmalloc(tmp.size, GFP_KERNEL); 106 return memdup_user(in, tmp.size);
107 if (!ads)
108 return ERR_PTR(-ENOMEM);
109
110 if (copy_from_user(ads, in, tmp.size)) {
111 kfree(ads);
112 return ERR_PTR(-EFAULT);
113 }
114
115 return ads;
116} 107}
117 108
118static inline void free_dev_ioctl(struct autofs_dev_ioctl *param) 109static inline void free_dev_ioctl(struct autofs_dev_ioctl *param)
diff --git a/fs/bad_inode.c b/fs/bad_inode.c
index a05287a23f62..52e59bf4aa5f 100644
--- a/fs/bad_inode.c
+++ b/fs/bad_inode.c
@@ -93,8 +93,7 @@ static int bad_file_release(struct inode *inode, struct file *filp)
93 return -EIO; 93 return -EIO;
94} 94}
95 95
96static int bad_file_fsync(struct file *file, struct dentry *dentry, 96static int bad_file_fsync(struct file *file, int datasync)
97 int datasync)
98{ 97{
99 return -EIO; 98 return -EIO;
100} 99}
diff --git a/fs/bfs/dir.c b/fs/bfs/dir.c
index 8f73841fc974..d967e052b779 100644
--- a/fs/bfs/dir.c
+++ b/fs/bfs/dir.c
@@ -78,7 +78,7 @@ static int bfs_readdir(struct file *f, void *dirent, filldir_t filldir)
78const struct file_operations bfs_dir_operations = { 78const struct file_operations bfs_dir_operations = {
79 .read = generic_read_dir, 79 .read = generic_read_dir,
80 .readdir = bfs_readdir, 80 .readdir = bfs_readdir,
81 .fsync = simple_fsync, 81 .fsync = generic_file_fsync,
82 .llseek = generic_file_llseek, 82 .llseek = generic_file_llseek,
83}; 83};
84 84
diff --git a/fs/block_dev.c b/fs/block_dev.c
index 26e5f5026620..7346c96308a5 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -172,8 +172,9 @@ blkdev_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
172 struct file *file = iocb->ki_filp; 172 struct file *file = iocb->ki_filp;
173 struct inode *inode = file->f_mapping->host; 173 struct inode *inode = file->f_mapping->host;
174 174
175 return blockdev_direct_IO_no_locking(rw, iocb, inode, I_BDEV(inode), 175 return blockdev_direct_IO_no_locking_newtrunc(rw, iocb, inode,
176 iov, offset, nr_segs, blkdev_get_blocks, NULL); 176 I_BDEV(inode), iov, offset, nr_segs,
177 blkdev_get_blocks, NULL);
177} 178}
178 179
179int __sync_blockdev(struct block_device *bdev, int wait) 180int __sync_blockdev(struct block_device *bdev, int wait)
@@ -309,8 +310,8 @@ static int blkdev_write_begin(struct file *file, struct address_space *mapping,
309 struct page **pagep, void **fsdata) 310 struct page **pagep, void **fsdata)
310{ 311{
311 *pagep = NULL; 312 *pagep = NULL;
312 return block_write_begin(file, mapping, pos, len, flags, pagep, fsdata, 313 return block_write_begin_newtrunc(file, mapping, pos, len, flags,
313 blkdev_get_block); 314 pagep, fsdata, blkdev_get_block);
314} 315}
315 316
316static int blkdev_write_end(struct file *file, struct address_space *mapping, 317static int blkdev_write_end(struct file *file, struct address_space *mapping,
@@ -358,12 +359,7 @@ static loff_t block_llseek(struct file *file, loff_t offset, int origin)
358 return retval; 359 return retval;
359} 360}
360 361
361/* 362int blkdev_fsync(struct file *filp, int datasync)
362 * Filp is never NULL; the only case when ->fsync() is called with
363 * NULL first argument is nfsd_sync_dir() and that's not a directory.
364 */
365
366int blkdev_fsync(struct file *filp, struct dentry *dentry, int datasync)
367{ 363{
368 struct inode *bd_inode = filp->f_mapping->host; 364 struct inode *bd_inode = filp->f_mapping->host;
369 struct block_device *bdev = I_BDEV(bd_inode); 365 struct block_device *bdev = I_BDEV(bd_inode);
diff --git a/fs/btrfs/async-thread.c b/fs/btrfs/async-thread.c
index 462859a30141..7ec14097fef1 100644
--- a/fs/btrfs/async-thread.c
+++ b/fs/btrfs/async-thread.c
@@ -377,6 +377,7 @@ again:
377 if (!list_empty(&worker->pending) || 377 if (!list_empty(&worker->pending) ||
378 !list_empty(&worker->prio_pending)) { 378 !list_empty(&worker->prio_pending)) {
379 spin_unlock_irq(&worker->lock); 379 spin_unlock_irq(&worker->lock);
380 set_current_state(TASK_RUNNING);
380 goto again; 381 goto again;
381 } 382 }
382 383
diff --git a/fs/btrfs/btrfs_inode.h b/fs/btrfs/btrfs_inode.h
index 7a4dee199832..6ad63f17eca0 100644
--- a/fs/btrfs/btrfs_inode.h
+++ b/fs/btrfs/btrfs_inode.h
@@ -137,8 +137,8 @@ struct btrfs_inode {
137 * of extent items we've reserved metadata for. 137 * of extent items we've reserved metadata for.
138 */ 138 */
139 spinlock_t accounting_lock; 139 spinlock_t accounting_lock;
140 atomic_t outstanding_extents;
140 int reserved_extents; 141 int reserved_extents;
141 int outstanding_extents;
142 142
143 /* 143 /*
144 * ordered_data_close is set by truncate when a file that used 144 * ordered_data_close is set by truncate when a file that used
@@ -151,6 +151,7 @@ struct btrfs_inode {
151 * of these. 151 * of these.
152 */ 152 */
153 unsigned ordered_data_close:1; 153 unsigned ordered_data_close:1;
154 unsigned orphan_meta_reserved:1;
154 unsigned dummy_inode:1; 155 unsigned dummy_inode:1;
155 156
156 /* 157 /*
diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
index 6795a713b205..0d1d966b0fe4 100644
--- a/fs/btrfs/ctree.c
+++ b/fs/btrfs/ctree.c
@@ -280,7 +280,8 @@ int btrfs_block_can_be_shared(struct btrfs_root *root,
280static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans, 280static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans,
281 struct btrfs_root *root, 281 struct btrfs_root *root,
282 struct extent_buffer *buf, 282 struct extent_buffer *buf,
283 struct extent_buffer *cow) 283 struct extent_buffer *cow,
284 int *last_ref)
284{ 285{
285 u64 refs; 286 u64 refs;
286 u64 owner; 287 u64 owner;
@@ -366,6 +367,7 @@ static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans,
366 BUG_ON(ret); 367 BUG_ON(ret);
367 } 368 }
368 clean_tree_block(trans, root, buf); 369 clean_tree_block(trans, root, buf);
370 *last_ref = 1;
369 } 371 }
370 return 0; 372 return 0;
371} 373}
@@ -392,6 +394,7 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
392 struct btrfs_disk_key disk_key; 394 struct btrfs_disk_key disk_key;
393 struct extent_buffer *cow; 395 struct extent_buffer *cow;
394 int level; 396 int level;
397 int last_ref = 0;
395 int unlock_orig = 0; 398 int unlock_orig = 0;
396 u64 parent_start; 399 u64 parent_start;
397 400
@@ -442,7 +445,10 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
442 (unsigned long)btrfs_header_fsid(cow), 445 (unsigned long)btrfs_header_fsid(cow),
443 BTRFS_FSID_SIZE); 446 BTRFS_FSID_SIZE);
444 447
445 update_ref_for_cow(trans, root, buf, cow); 448 update_ref_for_cow(trans, root, buf, cow, &last_ref);
449
450 if (root->ref_cows)
451 btrfs_reloc_cow_block(trans, root, buf, cow);
446 452
447 if (buf == root->node) { 453 if (buf == root->node) {
448 WARN_ON(parent && parent != buf); 454 WARN_ON(parent && parent != buf);
@@ -457,8 +463,8 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
457 extent_buffer_get(cow); 463 extent_buffer_get(cow);
458 spin_unlock(&root->node_lock); 464 spin_unlock(&root->node_lock);
459 465
460 btrfs_free_tree_block(trans, root, buf->start, buf->len, 466 btrfs_free_tree_block(trans, root, buf, parent_start,
461 parent_start, root->root_key.objectid, level); 467 last_ref);
462 free_extent_buffer(buf); 468 free_extent_buffer(buf);
463 add_root_to_dirty_list(root); 469 add_root_to_dirty_list(root);
464 } else { 470 } else {
@@ -473,8 +479,8 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
473 btrfs_set_node_ptr_generation(parent, parent_slot, 479 btrfs_set_node_ptr_generation(parent, parent_slot,
474 trans->transid); 480 trans->transid);
475 btrfs_mark_buffer_dirty(parent); 481 btrfs_mark_buffer_dirty(parent);
476 btrfs_free_tree_block(trans, root, buf->start, buf->len, 482 btrfs_free_tree_block(trans, root, buf, parent_start,
477 parent_start, root->root_key.objectid, level); 483 last_ref);
478 } 484 }
479 if (unlock_orig) 485 if (unlock_orig)
480 btrfs_tree_unlock(buf); 486 btrfs_tree_unlock(buf);
@@ -949,6 +955,22 @@ int btrfs_bin_search(struct extent_buffer *eb, struct btrfs_key *key,
949 return bin_search(eb, key, level, slot); 955 return bin_search(eb, key, level, slot);
950} 956}
951 957
958static void root_add_used(struct btrfs_root *root, u32 size)
959{
960 spin_lock(&root->accounting_lock);
961 btrfs_set_root_used(&root->root_item,
962 btrfs_root_used(&root->root_item) + size);
963 spin_unlock(&root->accounting_lock);
964}
965
966static void root_sub_used(struct btrfs_root *root, u32 size)
967{
968 spin_lock(&root->accounting_lock);
969 btrfs_set_root_used(&root->root_item,
970 btrfs_root_used(&root->root_item) - size);
971 spin_unlock(&root->accounting_lock);
972}
973
952/* given a node and slot number, this reads the blocks it points to. The 974/* given a node and slot number, this reads the blocks it points to. The
953 * extent buffer is returned with a reference taken (but unlocked). 975 * extent buffer is returned with a reference taken (but unlocked).
954 * NULL is returned on error. 976 * NULL is returned on error.
@@ -1019,7 +1041,11 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,
1019 btrfs_tree_lock(child); 1041 btrfs_tree_lock(child);
1020 btrfs_set_lock_blocking(child); 1042 btrfs_set_lock_blocking(child);
1021 ret = btrfs_cow_block(trans, root, child, mid, 0, &child); 1043 ret = btrfs_cow_block(trans, root, child, mid, 0, &child);
1022 BUG_ON(ret); 1044 if (ret) {
1045 btrfs_tree_unlock(child);
1046 free_extent_buffer(child);
1047 goto enospc;
1048 }
1023 1049
1024 spin_lock(&root->node_lock); 1050 spin_lock(&root->node_lock);
1025 root->node = child; 1051 root->node = child;
@@ -1034,11 +1060,12 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,
1034 btrfs_tree_unlock(mid); 1060 btrfs_tree_unlock(mid);
1035 /* once for the path */ 1061 /* once for the path */
1036 free_extent_buffer(mid); 1062 free_extent_buffer(mid);
1037 ret = btrfs_free_tree_block(trans, root, mid->start, mid->len, 1063
1038 0, root->root_key.objectid, level); 1064 root_sub_used(root, mid->len);
1065 btrfs_free_tree_block(trans, root, mid, 0, 1);
1039 /* once for the root ptr */ 1066 /* once for the root ptr */
1040 free_extent_buffer(mid); 1067 free_extent_buffer(mid);
1041 return ret; 1068 return 0;
1042 } 1069 }
1043 if (btrfs_header_nritems(mid) > 1070 if (btrfs_header_nritems(mid) >
1044 BTRFS_NODEPTRS_PER_BLOCK(root) / 4) 1071 BTRFS_NODEPTRS_PER_BLOCK(root) / 4)
@@ -1088,23 +1115,16 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,
1088 if (wret < 0 && wret != -ENOSPC) 1115 if (wret < 0 && wret != -ENOSPC)
1089 ret = wret; 1116 ret = wret;
1090 if (btrfs_header_nritems(right) == 0) { 1117 if (btrfs_header_nritems(right) == 0) {
1091 u64 bytenr = right->start;
1092 u32 blocksize = right->len;
1093
1094 clean_tree_block(trans, root, right); 1118 clean_tree_block(trans, root, right);
1095 btrfs_tree_unlock(right); 1119 btrfs_tree_unlock(right);
1096 free_extent_buffer(right);
1097 right = NULL;
1098 wret = del_ptr(trans, root, path, level + 1, pslot + 1120 wret = del_ptr(trans, root, path, level + 1, pslot +
1099 1); 1121 1);
1100 if (wret) 1122 if (wret)
1101 ret = wret; 1123 ret = wret;
1102 wret = btrfs_free_tree_block(trans, root, 1124 root_sub_used(root, right->len);
1103 bytenr, blocksize, 0, 1125 btrfs_free_tree_block(trans, root, right, 0, 1);
1104 root->root_key.objectid, 1126 free_extent_buffer(right);
1105 level); 1127 right = NULL;
1106 if (wret)
1107 ret = wret;
1108 } else { 1128 } else {
1109 struct btrfs_disk_key right_key; 1129 struct btrfs_disk_key right_key;
1110 btrfs_node_key(right, &right_key, 0); 1130 btrfs_node_key(right, &right_key, 0);
@@ -1136,21 +1156,15 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,
1136 BUG_ON(wret == 1); 1156 BUG_ON(wret == 1);
1137 } 1157 }
1138 if (btrfs_header_nritems(mid) == 0) { 1158 if (btrfs_header_nritems(mid) == 0) {
1139 /* we've managed to empty the middle node, drop it */
1140 u64 bytenr = mid->start;
1141 u32 blocksize = mid->len;
1142
1143 clean_tree_block(trans, root, mid); 1159 clean_tree_block(trans, root, mid);
1144 btrfs_tree_unlock(mid); 1160 btrfs_tree_unlock(mid);
1145 free_extent_buffer(mid);
1146 mid = NULL;
1147 wret = del_ptr(trans, root, path, level + 1, pslot); 1161 wret = del_ptr(trans, root, path, level + 1, pslot);
1148 if (wret) 1162 if (wret)
1149 ret = wret; 1163 ret = wret;
1150 wret = btrfs_free_tree_block(trans, root, bytenr, blocksize, 1164 root_sub_used(root, mid->len);
1151 0, root->root_key.objectid, level); 1165 btrfs_free_tree_block(trans, root, mid, 0, 1);
1152 if (wret) 1166 free_extent_buffer(mid);
1153 ret = wret; 1167 mid = NULL;
1154 } else { 1168 } else {
1155 /* update the parent key to reflect our changes */ 1169 /* update the parent key to reflect our changes */
1156 struct btrfs_disk_key mid_key; 1170 struct btrfs_disk_key mid_key;
@@ -1590,7 +1604,7 @@ read_block_for_search(struct btrfs_trans_handle *trans,
1590 btrfs_release_path(NULL, p); 1604 btrfs_release_path(NULL, p);
1591 1605
1592 ret = -EAGAIN; 1606 ret = -EAGAIN;
1593 tmp = read_tree_block(root, blocknr, blocksize, gen); 1607 tmp = read_tree_block(root, blocknr, blocksize, 0);
1594 if (tmp) { 1608 if (tmp) {
1595 /* 1609 /*
1596 * If the read above didn't mark this buffer up to date, 1610 * If the read above didn't mark this buffer up to date,
@@ -1740,7 +1754,6 @@ again:
1740 p->nodes[level + 1], 1754 p->nodes[level + 1],
1741 p->slots[level + 1], &b); 1755 p->slots[level + 1], &b);
1742 if (err) { 1756 if (err) {
1743 free_extent_buffer(b);
1744 ret = err; 1757 ret = err;
1745 goto done; 1758 goto done;
1746 } 1759 }
@@ -2076,6 +2089,8 @@ static noinline int insert_new_root(struct btrfs_trans_handle *trans,
2076 if (IS_ERR(c)) 2089 if (IS_ERR(c))
2077 return PTR_ERR(c); 2090 return PTR_ERR(c);
2078 2091
2092 root_add_used(root, root->nodesize);
2093
2079 memset_extent_buffer(c, 0, 0, sizeof(struct btrfs_header)); 2094 memset_extent_buffer(c, 0, 0, sizeof(struct btrfs_header));
2080 btrfs_set_header_nritems(c, 1); 2095 btrfs_set_header_nritems(c, 1);
2081 btrfs_set_header_level(c, level); 2096 btrfs_set_header_level(c, level);
@@ -2134,6 +2149,7 @@ static int insert_ptr(struct btrfs_trans_handle *trans, struct btrfs_root
2134 int nritems; 2149 int nritems;
2135 2150
2136 BUG_ON(!path->nodes[level]); 2151 BUG_ON(!path->nodes[level]);
2152 btrfs_assert_tree_locked(path->nodes[level]);
2137 lower = path->nodes[level]; 2153 lower = path->nodes[level];
2138 nritems = btrfs_header_nritems(lower); 2154 nritems = btrfs_header_nritems(lower);
2139 BUG_ON(slot > nritems); 2155 BUG_ON(slot > nritems);
@@ -2202,6 +2218,8 @@ static noinline int split_node(struct btrfs_trans_handle *trans,
2202 if (IS_ERR(split)) 2218 if (IS_ERR(split))
2203 return PTR_ERR(split); 2219 return PTR_ERR(split);
2204 2220
2221 root_add_used(root, root->nodesize);
2222
2205 memset_extent_buffer(split, 0, 0, sizeof(struct btrfs_header)); 2223 memset_extent_buffer(split, 0, 0, sizeof(struct btrfs_header));
2206 btrfs_set_header_level(split, btrfs_header_level(c)); 2224 btrfs_set_header_level(split, btrfs_header_level(c));
2207 btrfs_set_header_bytenr(split, split->start); 2225 btrfs_set_header_bytenr(split, split->start);
@@ -2415,6 +2433,9 @@ static noinline int __push_leaf_right(struct btrfs_trans_handle *trans,
2415 2433
2416 if (left_nritems) 2434 if (left_nritems)
2417 btrfs_mark_buffer_dirty(left); 2435 btrfs_mark_buffer_dirty(left);
2436 else
2437 clean_tree_block(trans, root, left);
2438
2418 btrfs_mark_buffer_dirty(right); 2439 btrfs_mark_buffer_dirty(right);
2419 2440
2420 btrfs_item_key(right, &disk_key, 0); 2441 btrfs_item_key(right, &disk_key, 0);
@@ -2660,6 +2681,8 @@ static noinline int __push_leaf_left(struct btrfs_trans_handle *trans,
2660 btrfs_mark_buffer_dirty(left); 2681 btrfs_mark_buffer_dirty(left);
2661 if (right_nritems) 2682 if (right_nritems)
2662 btrfs_mark_buffer_dirty(right); 2683 btrfs_mark_buffer_dirty(right);
2684 else
2685 clean_tree_block(trans, root, right);
2663 2686
2664 btrfs_item_key(right, &disk_key, 0); 2687 btrfs_item_key(right, &disk_key, 0);
2665 wret = fixup_low_keys(trans, root, path, &disk_key, 1); 2688 wret = fixup_low_keys(trans, root, path, &disk_key, 1);
@@ -2669,8 +2692,6 @@ static noinline int __push_leaf_left(struct btrfs_trans_handle *trans,
2669 /* then fixup the leaf pointer in the path */ 2692 /* then fixup the leaf pointer in the path */
2670 if (path->slots[0] < push_items) { 2693 if (path->slots[0] < push_items) {
2671 path->slots[0] += old_left_nritems; 2694 path->slots[0] += old_left_nritems;
2672 if (btrfs_header_nritems(path->nodes[0]) == 0)
2673 clean_tree_block(trans, root, path->nodes[0]);
2674 btrfs_tree_unlock(path->nodes[0]); 2695 btrfs_tree_unlock(path->nodes[0]);
2675 free_extent_buffer(path->nodes[0]); 2696 free_extent_buffer(path->nodes[0]);
2676 path->nodes[0] = left; 2697 path->nodes[0] = left;
@@ -2932,10 +2953,10 @@ again:
2932 right = btrfs_alloc_free_block(trans, root, root->leafsize, 0, 2953 right = btrfs_alloc_free_block(trans, root, root->leafsize, 0,
2933 root->root_key.objectid, 2954 root->root_key.objectid,
2934 &disk_key, 0, l->start, 0); 2955 &disk_key, 0, l->start, 0);
2935 if (IS_ERR(right)) { 2956 if (IS_ERR(right))
2936 BUG_ON(1);
2937 return PTR_ERR(right); 2957 return PTR_ERR(right);
2938 } 2958
2959 root_add_used(root, root->leafsize);
2939 2960
2940 memset_extent_buffer(right, 0, 0, sizeof(struct btrfs_header)); 2961 memset_extent_buffer(right, 0, 0, sizeof(struct btrfs_header));
2941 btrfs_set_header_bytenr(right, right->start); 2962 btrfs_set_header_bytenr(right, right->start);
@@ -3054,7 +3075,8 @@ static noinline int setup_leaf_for_split(struct btrfs_trans_handle *trans,
3054 3075
3055 btrfs_set_path_blocking(path); 3076 btrfs_set_path_blocking(path);
3056 ret = split_leaf(trans, root, &key, path, ins_len, 1); 3077 ret = split_leaf(trans, root, &key, path, ins_len, 1);
3057 BUG_ON(ret); 3078 if (ret)
3079 goto err;
3058 3080
3059 path->keep_locks = 0; 3081 path->keep_locks = 0;
3060 btrfs_unlock_up_safe(path, 1); 3082 btrfs_unlock_up_safe(path, 1);
@@ -3796,9 +3818,10 @@ static noinline int btrfs_del_leaf(struct btrfs_trans_handle *trans,
3796 */ 3818 */
3797 btrfs_unlock_up_safe(path, 0); 3819 btrfs_unlock_up_safe(path, 0);
3798 3820
3799 ret = btrfs_free_tree_block(trans, root, leaf->start, leaf->len, 3821 root_sub_used(root, leaf->len);
3800 0, root->root_key.objectid, 0); 3822
3801 return ret; 3823 btrfs_free_tree_block(trans, root, leaf, 0, 1);
3824 return 0;
3802} 3825}
3803/* 3826/*
3804 * delete the item at the leaf level in path. If that empties 3827 * delete the item at the leaf level in path. If that empties
@@ -3865,6 +3888,8 @@ int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root,
3865 if (leaf == root->node) { 3888 if (leaf == root->node) {
3866 btrfs_set_header_level(leaf, 0); 3889 btrfs_set_header_level(leaf, 0);
3867 } else { 3890 } else {
3891 btrfs_set_path_blocking(path);
3892 clean_tree_block(trans, root, leaf);
3868 ret = btrfs_del_leaf(trans, root, path, leaf); 3893 ret = btrfs_del_leaf(trans, root, path, leaf);
3869 BUG_ON(ret); 3894 BUG_ON(ret);
3870 } 3895 }
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index 746a7248678e..29c20092847e 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -34,6 +34,7 @@
34 34
35struct btrfs_trans_handle; 35struct btrfs_trans_handle;
36struct btrfs_transaction; 36struct btrfs_transaction;
37struct btrfs_pending_snapshot;
37extern struct kmem_cache *btrfs_trans_handle_cachep; 38extern struct kmem_cache *btrfs_trans_handle_cachep;
38extern struct kmem_cache *btrfs_transaction_cachep; 39extern struct kmem_cache *btrfs_transaction_cachep;
39extern struct kmem_cache *btrfs_bit_radix_cachep; 40extern struct kmem_cache *btrfs_bit_radix_cachep;
@@ -663,6 +664,7 @@ struct btrfs_csum_item {
663#define BTRFS_BLOCK_GROUP_RAID1 (1 << 4) 664#define BTRFS_BLOCK_GROUP_RAID1 (1 << 4)
664#define BTRFS_BLOCK_GROUP_DUP (1 << 5) 665#define BTRFS_BLOCK_GROUP_DUP (1 << 5)
665#define BTRFS_BLOCK_GROUP_RAID10 (1 << 6) 666#define BTRFS_BLOCK_GROUP_RAID10 (1 << 6)
667#define BTRFS_NR_RAID_TYPES 5
666 668
667struct btrfs_block_group_item { 669struct btrfs_block_group_item {
668 __le64 used; 670 __le64 used;
@@ -674,42 +676,46 @@ struct btrfs_space_info {
674 u64 flags; 676 u64 flags;
675 677
676 u64 total_bytes; /* total bytes in the space */ 678 u64 total_bytes; /* total bytes in the space */
677 u64 bytes_used; /* total bytes used on disk */ 679 u64 bytes_used; /* total bytes used,
680 this does't take mirrors into account */
678 u64 bytes_pinned; /* total bytes pinned, will be freed when the 681 u64 bytes_pinned; /* total bytes pinned, will be freed when the
679 transaction finishes */ 682 transaction finishes */
680 u64 bytes_reserved; /* total bytes the allocator has reserved for 683 u64 bytes_reserved; /* total bytes the allocator has reserved for
681 current allocations */ 684 current allocations */
682 u64 bytes_readonly; /* total bytes that are read only */ 685 u64 bytes_readonly; /* total bytes that are read only */
683 u64 bytes_super; /* total bytes reserved for the super blocks */ 686
684 u64 bytes_root; /* the number of bytes needed to commit a
685 transaction */
686 u64 bytes_may_use; /* number of bytes that may be used for 687 u64 bytes_may_use; /* number of bytes that may be used for
687 delalloc/allocations */ 688 delalloc/allocations */
688 u64 bytes_delalloc; /* number of bytes currently reserved for 689 u64 disk_used; /* total bytes used on disk */
689 delayed allocation */
690 690
691 int full; /* indicates that we cannot allocate any more 691 int full; /* indicates that we cannot allocate any more
692 chunks for this space */ 692 chunks for this space */
693 int force_alloc; /* set if we need to force a chunk alloc for 693 int force_alloc; /* set if we need to force a chunk alloc for
694 this space */ 694 this space */
695 int force_delalloc; /* make people start doing filemap_flush until
696 we're under a threshold */
697 695
698 struct list_head list; 696 struct list_head list;
699 697
700 /* for controlling how we free up space for allocations */
701 wait_queue_head_t allocate_wait;
702 wait_queue_head_t flush_wait;
703 int allocating_chunk;
704 int flushing;
705
706 /* for block groups in our same type */ 698 /* for block groups in our same type */
707 struct list_head block_groups; 699 struct list_head block_groups[BTRFS_NR_RAID_TYPES];
708 spinlock_t lock; 700 spinlock_t lock;
709 struct rw_semaphore groups_sem; 701 struct rw_semaphore groups_sem;
710 atomic_t caching_threads; 702 atomic_t caching_threads;
711}; 703};
712 704
705struct btrfs_block_rsv {
706 u64 size;
707 u64 reserved;
708 u64 freed[2];
709 struct btrfs_space_info *space_info;
710 struct list_head list;
711 spinlock_t lock;
712 atomic_t usage;
713 unsigned int priority:8;
714 unsigned int durable:1;
715 unsigned int refill_used:1;
716 unsigned int full:1;
717};
718
713/* 719/*
714 * free clusters are used to claim free space in relatively large chunks, 720 * free clusters are used to claim free space in relatively large chunks,
715 * allowing us to do less seeky writes. They are used for all metadata 721 * allowing us to do less seeky writes. They are used for all metadata
@@ -760,6 +766,7 @@ struct btrfs_block_group_cache {
760 spinlock_t lock; 766 spinlock_t lock;
761 u64 pinned; 767 u64 pinned;
762 u64 reserved; 768 u64 reserved;
769 u64 reserved_pinned;
763 u64 bytes_super; 770 u64 bytes_super;
764 u64 flags; 771 u64 flags;
765 u64 sectorsize; 772 u64 sectorsize;
@@ -825,6 +832,22 @@ struct btrfs_fs_info {
825 /* logical->physical extent mapping */ 832 /* logical->physical extent mapping */
826 struct btrfs_mapping_tree mapping_tree; 833 struct btrfs_mapping_tree mapping_tree;
827 834
835 /* block reservation for extent, checksum and root tree */
836 struct btrfs_block_rsv global_block_rsv;
837 /* block reservation for delay allocation */
838 struct btrfs_block_rsv delalloc_block_rsv;
839 /* block reservation for metadata operations */
840 struct btrfs_block_rsv trans_block_rsv;
841 /* block reservation for chunk tree */
842 struct btrfs_block_rsv chunk_block_rsv;
843
844 struct btrfs_block_rsv empty_block_rsv;
845
846 /* list of block reservations that cross multiple transactions */
847 struct list_head durable_block_rsv_list;
848
849 struct mutex durable_block_rsv_mutex;
850
828 u64 generation; 851 u64 generation;
829 u64 last_trans_committed; 852 u64 last_trans_committed;
830 853
@@ -927,7 +950,6 @@ struct btrfs_fs_info {
927 struct btrfs_workers endio_meta_write_workers; 950 struct btrfs_workers endio_meta_write_workers;
928 struct btrfs_workers endio_write_workers; 951 struct btrfs_workers endio_write_workers;
929 struct btrfs_workers submit_workers; 952 struct btrfs_workers submit_workers;
930 struct btrfs_workers enospc_workers;
931 /* 953 /*
932 * fixup workers take dirty pages that didn't properly go through 954 * fixup workers take dirty pages that didn't properly go through
933 * the cow mechanism and make them safe to write. It happens 955 * the cow mechanism and make them safe to write. It happens
@@ -943,6 +965,7 @@ struct btrfs_fs_info {
943 int do_barriers; 965 int do_barriers;
944 int closing; 966 int closing;
945 int log_root_recovering; 967 int log_root_recovering;
968 int enospc_unlink;
946 969
947 u64 total_pinned; 970 u64 total_pinned;
948 971
@@ -1012,6 +1035,9 @@ struct btrfs_root {
1012 struct completion kobj_unregister; 1035 struct completion kobj_unregister;
1013 struct mutex objectid_mutex; 1036 struct mutex objectid_mutex;
1014 1037
1038 spinlock_t accounting_lock;
1039 struct btrfs_block_rsv *block_rsv;
1040
1015 struct mutex log_mutex; 1041 struct mutex log_mutex;
1016 wait_queue_head_t log_writer_wait; 1042 wait_queue_head_t log_writer_wait;
1017 wait_queue_head_t log_commit_wait[2]; 1043 wait_queue_head_t log_commit_wait[2];
@@ -1043,7 +1069,6 @@ struct btrfs_root {
1043 int ref_cows; 1069 int ref_cows;
1044 int track_dirty; 1070 int track_dirty;
1045 int in_radix; 1071 int in_radix;
1046 int clean_orphans;
1047 1072
1048 u64 defrag_trans_start; 1073 u64 defrag_trans_start;
1049 struct btrfs_key defrag_progress; 1074 struct btrfs_key defrag_progress;
@@ -1057,8 +1082,11 @@ struct btrfs_root {
1057 1082
1058 struct list_head root_list; 1083 struct list_head root_list;
1059 1084
1060 spinlock_t list_lock; 1085 spinlock_t orphan_lock;
1061 struct list_head orphan_list; 1086 struct list_head orphan_list;
1087 struct btrfs_block_rsv *orphan_block_rsv;
1088 int orphan_item_inserted;
1089 int orphan_cleanup_state;
1062 1090
1063 spinlock_t inode_lock; 1091 spinlock_t inode_lock;
1064 /* red-black tree that keeps track of in-memory inodes */ 1092 /* red-black tree that keeps track of in-memory inodes */
@@ -1965,6 +1993,9 @@ void btrfs_put_block_group(struct btrfs_block_group_cache *cache);
1965int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans, 1993int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
1966 struct btrfs_root *root, unsigned long count); 1994 struct btrfs_root *root, unsigned long count);
1967int btrfs_lookup_extent(struct btrfs_root *root, u64 start, u64 len); 1995int btrfs_lookup_extent(struct btrfs_root *root, u64 start, u64 len);
1996int btrfs_lookup_extent_info(struct btrfs_trans_handle *trans,
1997 struct btrfs_root *root, u64 bytenr,
1998 u64 num_bytes, u64 *refs, u64 *flags);
1968int btrfs_pin_extent(struct btrfs_root *root, 1999int btrfs_pin_extent(struct btrfs_root *root,
1969 u64 bytenr, u64 num, int reserved); 2000 u64 bytenr, u64 num, int reserved);
1970int btrfs_drop_leaf_ref(struct btrfs_trans_handle *trans, 2001int btrfs_drop_leaf_ref(struct btrfs_trans_handle *trans,
@@ -1984,10 +2015,10 @@ struct extent_buffer *btrfs_alloc_free_block(struct btrfs_trans_handle *trans,
1984 u64 parent, u64 root_objectid, 2015 u64 parent, u64 root_objectid,
1985 struct btrfs_disk_key *key, int level, 2016 struct btrfs_disk_key *key, int level,
1986 u64 hint, u64 empty_size); 2017 u64 hint, u64 empty_size);
1987int btrfs_free_tree_block(struct btrfs_trans_handle *trans, 2018void btrfs_free_tree_block(struct btrfs_trans_handle *trans,
1988 struct btrfs_root *root, 2019 struct btrfs_root *root,
1989 u64 bytenr, u32 blocksize, 2020 struct extent_buffer *buf,
1990 u64 parent, u64 root_objectid, int level); 2021 u64 parent, int last_ref);
1991struct extent_buffer *btrfs_init_new_buffer(struct btrfs_trans_handle *trans, 2022struct extent_buffer *btrfs_init_new_buffer(struct btrfs_trans_handle *trans,
1992 struct btrfs_root *root, 2023 struct btrfs_root *root,
1993 u64 bytenr, u32 blocksize, 2024 u64 bytenr, u32 blocksize,
@@ -2041,27 +2072,49 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans,
2041 u64 size); 2072 u64 size);
2042int btrfs_remove_block_group(struct btrfs_trans_handle *trans, 2073int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
2043 struct btrfs_root *root, u64 group_start); 2074 struct btrfs_root *root, u64 group_start);
2044int btrfs_prepare_block_group_relocation(struct btrfs_root *root,
2045 struct btrfs_block_group_cache *group);
2046
2047u64 btrfs_reduce_alloc_profile(struct btrfs_root *root, u64 flags); 2075u64 btrfs_reduce_alloc_profile(struct btrfs_root *root, u64 flags);
2048void btrfs_set_inode_space_info(struct btrfs_root *root, struct inode *ionde); 2076void btrfs_set_inode_space_info(struct btrfs_root *root, struct inode *ionde);
2049void btrfs_clear_space_info_full(struct btrfs_fs_info *info); 2077void btrfs_clear_space_info_full(struct btrfs_fs_info *info);
2050 2078int btrfs_check_data_free_space(struct inode *inode, u64 bytes);
2051int btrfs_reserve_metadata_space(struct btrfs_root *root, int num_items); 2079void btrfs_free_reserved_data_space(struct inode *inode, u64 bytes);
2052int btrfs_unreserve_metadata_space(struct btrfs_root *root, int num_items); 2080int btrfs_trans_reserve_metadata(struct btrfs_trans_handle *trans,
2053int btrfs_unreserve_metadata_for_delalloc(struct btrfs_root *root, 2081 struct btrfs_root *root,
2054 struct inode *inode, int num_items); 2082 int num_items, int *retries);
2055int btrfs_reserve_metadata_for_delalloc(struct btrfs_root *root, 2083void btrfs_trans_release_metadata(struct btrfs_trans_handle *trans,
2056 struct inode *inode, int num_items); 2084 struct btrfs_root *root);
2057int btrfs_check_data_free_space(struct btrfs_root *root, struct inode *inode, 2085int btrfs_orphan_reserve_metadata(struct btrfs_trans_handle *trans,
2058 u64 bytes); 2086 struct inode *inode);
2059void btrfs_free_reserved_data_space(struct btrfs_root *root, 2087void btrfs_orphan_release_metadata(struct inode *inode);
2060 struct inode *inode, u64 bytes); 2088int btrfs_snap_reserve_metadata(struct btrfs_trans_handle *trans,
2061void btrfs_delalloc_reserve_space(struct btrfs_root *root, struct inode *inode, 2089 struct btrfs_pending_snapshot *pending);
2062 u64 bytes); 2090int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes);
2063void btrfs_delalloc_free_space(struct btrfs_root *root, struct inode *inode, 2091void btrfs_delalloc_release_metadata(struct inode *inode, u64 num_bytes);
2064 u64 bytes); 2092int btrfs_delalloc_reserve_space(struct inode *inode, u64 num_bytes);
2093void btrfs_delalloc_release_space(struct inode *inode, u64 num_bytes);
2094void btrfs_init_block_rsv(struct btrfs_block_rsv *rsv);
2095struct btrfs_block_rsv *btrfs_alloc_block_rsv(struct btrfs_root *root);
2096void btrfs_free_block_rsv(struct btrfs_root *root,
2097 struct btrfs_block_rsv *rsv);
2098void btrfs_add_durable_block_rsv(struct btrfs_fs_info *fs_info,
2099 struct btrfs_block_rsv *rsv);
2100int btrfs_block_rsv_add(struct btrfs_trans_handle *trans,
2101 struct btrfs_root *root,
2102 struct btrfs_block_rsv *block_rsv,
2103 u64 num_bytes, int *retries);
2104int btrfs_block_rsv_check(struct btrfs_trans_handle *trans,
2105 struct btrfs_root *root,
2106 struct btrfs_block_rsv *block_rsv,
2107 u64 min_reserved, int min_factor);
2108int btrfs_block_rsv_migrate(struct btrfs_block_rsv *src_rsv,
2109 struct btrfs_block_rsv *dst_rsv,
2110 u64 num_bytes);
2111void btrfs_block_rsv_release(struct btrfs_root *root,
2112 struct btrfs_block_rsv *block_rsv,
2113 u64 num_bytes);
2114int btrfs_set_block_group_ro(struct btrfs_root *root,
2115 struct btrfs_block_group_cache *cache);
2116int btrfs_set_block_group_rw(struct btrfs_root *root,
2117 struct btrfs_block_group_cache *cache);
2065/* ctree.c */ 2118/* ctree.c */
2066int btrfs_bin_search(struct extent_buffer *eb, struct btrfs_key *key, 2119int btrfs_bin_search(struct extent_buffer *eb, struct btrfs_key *key,
2067 int level, int *slot); 2120 int level, int *slot);
@@ -2152,7 +2205,8 @@ static inline int btrfs_insert_empty_item(struct btrfs_trans_handle *trans,
2152int btrfs_next_leaf(struct btrfs_root *root, struct btrfs_path *path); 2205int btrfs_next_leaf(struct btrfs_root *root, struct btrfs_path *path);
2153int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path); 2206int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path);
2154int btrfs_leaf_free_space(struct btrfs_root *root, struct extent_buffer *leaf); 2207int btrfs_leaf_free_space(struct btrfs_root *root, struct extent_buffer *leaf);
2155int btrfs_drop_snapshot(struct btrfs_root *root, int update_ref); 2208int btrfs_drop_snapshot(struct btrfs_root *root,
2209 struct btrfs_block_rsv *block_rsv, int update_ref);
2156int btrfs_drop_subtree(struct btrfs_trans_handle *trans, 2210int btrfs_drop_subtree(struct btrfs_trans_handle *trans,
2157 struct btrfs_root *root, 2211 struct btrfs_root *root,
2158 struct extent_buffer *node, 2212 struct extent_buffer *node,
@@ -2245,6 +2299,12 @@ int btrfs_del_inode_ref(struct btrfs_trans_handle *trans,
2245 struct btrfs_root *root, 2299 struct btrfs_root *root,
2246 const char *name, int name_len, 2300 const char *name, int name_len,
2247 u64 inode_objectid, u64 ref_objectid, u64 *index); 2301 u64 inode_objectid, u64 ref_objectid, u64 *index);
2302struct btrfs_inode_ref *
2303btrfs_lookup_inode_ref(struct btrfs_trans_handle *trans,
2304 struct btrfs_root *root,
2305 struct btrfs_path *path,
2306 const char *name, int name_len,
2307 u64 inode_objectid, u64 ref_objectid, int mod);
2248int btrfs_insert_empty_inode(struct btrfs_trans_handle *trans, 2308int btrfs_insert_empty_inode(struct btrfs_trans_handle *trans,
2249 struct btrfs_root *root, 2309 struct btrfs_root *root,
2250 struct btrfs_path *path, u64 objectid); 2310 struct btrfs_path *path, u64 objectid);
@@ -2257,6 +2317,8 @@ int btrfs_del_csums(struct btrfs_trans_handle *trans,
2257 struct btrfs_root *root, u64 bytenr, u64 len); 2317 struct btrfs_root *root, u64 bytenr, u64 len);
2258int btrfs_lookup_bio_sums(struct btrfs_root *root, struct inode *inode, 2318int btrfs_lookup_bio_sums(struct btrfs_root *root, struct inode *inode,
2259 struct bio *bio, u32 *dst); 2319 struct bio *bio, u32 *dst);
2320int btrfs_lookup_bio_sums_dio(struct btrfs_root *root, struct inode *inode,
2321 struct bio *bio, u64 logical_offset, u32 *dst);
2260int btrfs_insert_file_extent(struct btrfs_trans_handle *trans, 2322int btrfs_insert_file_extent(struct btrfs_trans_handle *trans,
2261 struct btrfs_root *root, 2323 struct btrfs_root *root,
2262 u64 objectid, u64 pos, 2324 u64 objectid, u64 pos,
@@ -2311,6 +2373,7 @@ int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
2311 u32 min_type); 2373 u32 min_type);
2312 2374
2313int btrfs_start_delalloc_inodes(struct btrfs_root *root, int delay_iput); 2375int btrfs_start_delalloc_inodes(struct btrfs_root *root, int delay_iput);
2376int btrfs_start_one_delalloc_inode(struct btrfs_root *root, int delay_iput);
2314int btrfs_set_extent_delalloc(struct inode *inode, u64 start, u64 end, 2377int btrfs_set_extent_delalloc(struct inode *inode, u64 start, u64 end,
2315 struct extent_state **cached_state); 2378 struct extent_state **cached_state);
2316int btrfs_writepages(struct address_space *mapping, 2379int btrfs_writepages(struct address_space *mapping,
@@ -2349,10 +2412,20 @@ int btrfs_update_inode(struct btrfs_trans_handle *trans,
2349int btrfs_orphan_add(struct btrfs_trans_handle *trans, struct inode *inode); 2412int btrfs_orphan_add(struct btrfs_trans_handle *trans, struct inode *inode);
2350int btrfs_orphan_del(struct btrfs_trans_handle *trans, struct inode *inode); 2413int btrfs_orphan_del(struct btrfs_trans_handle *trans, struct inode *inode);
2351void btrfs_orphan_cleanup(struct btrfs_root *root); 2414void btrfs_orphan_cleanup(struct btrfs_root *root);
2415void btrfs_orphan_pre_snapshot(struct btrfs_trans_handle *trans,
2416 struct btrfs_pending_snapshot *pending,
2417 u64 *bytes_to_reserve);
2418void btrfs_orphan_post_snapshot(struct btrfs_trans_handle *trans,
2419 struct btrfs_pending_snapshot *pending);
2420void btrfs_orphan_commit_root(struct btrfs_trans_handle *trans,
2421 struct btrfs_root *root);
2352int btrfs_cont_expand(struct inode *inode, loff_t size); 2422int btrfs_cont_expand(struct inode *inode, loff_t size);
2353int btrfs_invalidate_inodes(struct btrfs_root *root); 2423int btrfs_invalidate_inodes(struct btrfs_root *root);
2354void btrfs_add_delayed_iput(struct inode *inode); 2424void btrfs_add_delayed_iput(struct inode *inode);
2355void btrfs_run_delayed_iputs(struct btrfs_root *root); 2425void btrfs_run_delayed_iputs(struct btrfs_root *root);
2426int btrfs_prealloc_file_range(struct inode *inode, int mode,
2427 u64 start, u64 num_bytes, u64 min_size,
2428 loff_t actual_len, u64 *alloc_hint);
2356extern const struct dentry_operations btrfs_dentry_operations; 2429extern const struct dentry_operations btrfs_dentry_operations;
2357 2430
2358/* ioctl.c */ 2431/* ioctl.c */
@@ -2361,7 +2434,7 @@ void btrfs_update_iflags(struct inode *inode);
2361void btrfs_inherit_iflags(struct inode *inode, struct inode *dir); 2434void btrfs_inherit_iflags(struct inode *inode, struct inode *dir);
2362 2435
2363/* file.c */ 2436/* file.c */
2364int btrfs_sync_file(struct file *file, struct dentry *dentry, int datasync); 2437int btrfs_sync_file(struct file *file, int datasync);
2365int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end, 2438int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end,
2366 int skip_pinned); 2439 int skip_pinned);
2367int btrfs_check_file(struct btrfs_root *root, struct inode *inode); 2440int btrfs_check_file(struct btrfs_root *root, struct inode *inode);
@@ -2409,4 +2482,12 @@ int btrfs_update_reloc_root(struct btrfs_trans_handle *trans,
2409 struct btrfs_root *root); 2482 struct btrfs_root *root);
2410int btrfs_recover_relocation(struct btrfs_root *root); 2483int btrfs_recover_relocation(struct btrfs_root *root);
2411int btrfs_reloc_clone_csums(struct inode *inode, u64 file_pos, u64 len); 2484int btrfs_reloc_clone_csums(struct inode *inode, u64 file_pos, u64 len);
2485void btrfs_reloc_cow_block(struct btrfs_trans_handle *trans,
2486 struct btrfs_root *root, struct extent_buffer *buf,
2487 struct extent_buffer *cow);
2488void btrfs_reloc_pre_snapshot(struct btrfs_trans_handle *trans,
2489 struct btrfs_pending_snapshot *pending,
2490 u64 *bytes_to_reserve);
2491void btrfs_reloc_post_snapshot(struct btrfs_trans_handle *trans,
2492 struct btrfs_pending_snapshot *pending);
2412#endif 2493#endif
diff --git a/fs/btrfs/delayed-ref.c b/fs/btrfs/delayed-ref.c
index 902ce507c4e3..e807b143b857 100644
--- a/fs/btrfs/delayed-ref.c
+++ b/fs/btrfs/delayed-ref.c
@@ -319,107 +319,6 @@ out:
319} 319}
320 320
321/* 321/*
322 * helper function to lookup reference count and flags of extent.
323 *
324 * the head node for delayed ref is used to store the sum of all the
325 * reference count modifications queued up in the rbtree. the head
326 * node may also store the extent flags to set. This way you can check
327 * to see what the reference count and extent flags would be if all of
328 * the delayed refs are not processed.
329 */
330int btrfs_lookup_extent_info(struct btrfs_trans_handle *trans,
331 struct btrfs_root *root, u64 bytenr,
332 u64 num_bytes, u64 *refs, u64 *flags)
333{
334 struct btrfs_delayed_ref_node *ref;
335 struct btrfs_delayed_ref_head *head;
336 struct btrfs_delayed_ref_root *delayed_refs;
337 struct btrfs_path *path;
338 struct btrfs_extent_item *ei;
339 struct extent_buffer *leaf;
340 struct btrfs_key key;
341 u32 item_size;
342 u64 num_refs;
343 u64 extent_flags;
344 int ret;
345
346 path = btrfs_alloc_path();
347 if (!path)
348 return -ENOMEM;
349
350 key.objectid = bytenr;
351 key.type = BTRFS_EXTENT_ITEM_KEY;
352 key.offset = num_bytes;
353 delayed_refs = &trans->transaction->delayed_refs;
354again:
355 ret = btrfs_search_slot(trans, root->fs_info->extent_root,
356 &key, path, 0, 0);
357 if (ret < 0)
358 goto out;
359
360 if (ret == 0) {
361 leaf = path->nodes[0];
362 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
363 if (item_size >= sizeof(*ei)) {
364 ei = btrfs_item_ptr(leaf, path->slots[0],
365 struct btrfs_extent_item);
366 num_refs = btrfs_extent_refs(leaf, ei);
367 extent_flags = btrfs_extent_flags(leaf, ei);
368 } else {
369#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
370 struct btrfs_extent_item_v0 *ei0;
371 BUG_ON(item_size != sizeof(*ei0));
372 ei0 = btrfs_item_ptr(leaf, path->slots[0],
373 struct btrfs_extent_item_v0);
374 num_refs = btrfs_extent_refs_v0(leaf, ei0);
375 /* FIXME: this isn't correct for data */
376 extent_flags = BTRFS_BLOCK_FLAG_FULL_BACKREF;
377#else
378 BUG();
379#endif
380 }
381 BUG_ON(num_refs == 0);
382 } else {
383 num_refs = 0;
384 extent_flags = 0;
385 ret = 0;
386 }
387
388 spin_lock(&delayed_refs->lock);
389 ref = find_ref_head(&delayed_refs->root, bytenr, NULL);
390 if (ref) {
391 head = btrfs_delayed_node_to_head(ref);
392 if (!mutex_trylock(&head->mutex)) {
393 atomic_inc(&ref->refs);
394 spin_unlock(&delayed_refs->lock);
395
396 btrfs_release_path(root->fs_info->extent_root, path);
397
398 mutex_lock(&head->mutex);
399 mutex_unlock(&head->mutex);
400 btrfs_put_delayed_ref(ref);
401 goto again;
402 }
403 if (head->extent_op && head->extent_op->update_flags)
404 extent_flags |= head->extent_op->flags_to_set;
405 else
406 BUG_ON(num_refs == 0);
407
408 num_refs += ref->ref_mod;
409 mutex_unlock(&head->mutex);
410 }
411 WARN_ON(num_refs == 0);
412 if (refs)
413 *refs = num_refs;
414 if (flags)
415 *flags = extent_flags;
416out:
417 spin_unlock(&delayed_refs->lock);
418 btrfs_free_path(path);
419 return ret;
420}
421
422/*
423 * helper function to update an extent delayed ref in the 322 * helper function to update an extent delayed ref in the
424 * rbtree. existing and update must both have the same 323 * rbtree. existing and update must both have the same
425 * bytenr and parent 324 * bytenr and parent
diff --git a/fs/btrfs/delayed-ref.h b/fs/btrfs/delayed-ref.h
index f6fc67ddad36..50e3cf92fbda 100644
--- a/fs/btrfs/delayed-ref.h
+++ b/fs/btrfs/delayed-ref.h
@@ -167,9 +167,6 @@ int btrfs_add_delayed_extent_op(struct btrfs_trans_handle *trans,
167struct btrfs_delayed_ref_head * 167struct btrfs_delayed_ref_head *
168btrfs_find_delayed_ref_head(struct btrfs_trans_handle *trans, u64 bytenr); 168btrfs_find_delayed_ref_head(struct btrfs_trans_handle *trans, u64 bytenr);
169int btrfs_delayed_ref_pending(struct btrfs_trans_handle *trans, u64 bytenr); 169int btrfs_delayed_ref_pending(struct btrfs_trans_handle *trans, u64 bytenr);
170int btrfs_lookup_extent_info(struct btrfs_trans_handle *trans,
171 struct btrfs_root *root, u64 bytenr,
172 u64 num_bytes, u64 *refs, u64 *flags);
173int btrfs_update_delayed_ref(struct btrfs_trans_handle *trans, 170int btrfs_update_delayed_ref(struct btrfs_trans_handle *trans,
174 u64 bytenr, u64 num_bytes, u64 orig_parent, 171 u64 bytenr, u64 num_bytes, u64 orig_parent,
175 u64 parent, u64 orig_ref_root, u64 ref_root, 172 u64 parent, u64 orig_ref_root, u64 ref_root,
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index feca04197d02..f3b287c22caf 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -74,6 +74,11 @@ struct async_submit_bio {
74 int rw; 74 int rw;
75 int mirror_num; 75 int mirror_num;
76 unsigned long bio_flags; 76 unsigned long bio_flags;
77 /*
78 * bio_offset is optional, can be used if the pages in the bio
79 * can't tell us where in the file the bio should go
80 */
81 u64 bio_offset;
77 struct btrfs_work work; 82 struct btrfs_work work;
78}; 83};
79 84
@@ -534,7 +539,8 @@ static void run_one_async_start(struct btrfs_work *work)
534 async = container_of(work, struct async_submit_bio, work); 539 async = container_of(work, struct async_submit_bio, work);
535 fs_info = BTRFS_I(async->inode)->root->fs_info; 540 fs_info = BTRFS_I(async->inode)->root->fs_info;
536 async->submit_bio_start(async->inode, async->rw, async->bio, 541 async->submit_bio_start(async->inode, async->rw, async->bio,
537 async->mirror_num, async->bio_flags); 542 async->mirror_num, async->bio_flags,
543 async->bio_offset);
538} 544}
539 545
540static void run_one_async_done(struct btrfs_work *work) 546static void run_one_async_done(struct btrfs_work *work)
@@ -556,7 +562,8 @@ static void run_one_async_done(struct btrfs_work *work)
556 wake_up(&fs_info->async_submit_wait); 562 wake_up(&fs_info->async_submit_wait);
557 563
558 async->submit_bio_done(async->inode, async->rw, async->bio, 564 async->submit_bio_done(async->inode, async->rw, async->bio,
559 async->mirror_num, async->bio_flags); 565 async->mirror_num, async->bio_flags,
566 async->bio_offset);
560} 567}
561 568
562static void run_one_async_free(struct btrfs_work *work) 569static void run_one_async_free(struct btrfs_work *work)
@@ -570,6 +577,7 @@ static void run_one_async_free(struct btrfs_work *work)
570int btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct inode *inode, 577int btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct inode *inode,
571 int rw, struct bio *bio, int mirror_num, 578 int rw, struct bio *bio, int mirror_num,
572 unsigned long bio_flags, 579 unsigned long bio_flags,
580 u64 bio_offset,
573 extent_submit_bio_hook_t *submit_bio_start, 581 extent_submit_bio_hook_t *submit_bio_start,
574 extent_submit_bio_hook_t *submit_bio_done) 582 extent_submit_bio_hook_t *submit_bio_done)
575{ 583{
@@ -592,6 +600,7 @@ int btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct inode *inode,
592 600
593 async->work.flags = 0; 601 async->work.flags = 0;
594 async->bio_flags = bio_flags; 602 async->bio_flags = bio_flags;
603 async->bio_offset = bio_offset;
595 604
596 atomic_inc(&fs_info->nr_async_submits); 605 atomic_inc(&fs_info->nr_async_submits);
597 606
@@ -627,7 +636,8 @@ static int btree_csum_one_bio(struct bio *bio)
627 636
628static int __btree_submit_bio_start(struct inode *inode, int rw, 637static int __btree_submit_bio_start(struct inode *inode, int rw,
629 struct bio *bio, int mirror_num, 638 struct bio *bio, int mirror_num,
630 unsigned long bio_flags) 639 unsigned long bio_flags,
640 u64 bio_offset)
631{ 641{
632 /* 642 /*
633 * when we're called for a write, we're already in the async 643 * when we're called for a write, we're already in the async
@@ -638,7 +648,8 @@ static int __btree_submit_bio_start(struct inode *inode, int rw,
638} 648}
639 649
640static int __btree_submit_bio_done(struct inode *inode, int rw, struct bio *bio, 650static int __btree_submit_bio_done(struct inode *inode, int rw, struct bio *bio,
641 int mirror_num, unsigned long bio_flags) 651 int mirror_num, unsigned long bio_flags,
652 u64 bio_offset)
642{ 653{
643 /* 654 /*
644 * when we're called for a write, we're already in the async 655 * when we're called for a write, we're already in the async
@@ -648,7 +659,8 @@ static int __btree_submit_bio_done(struct inode *inode, int rw, struct bio *bio,
648} 659}
649 660
650static int btree_submit_bio_hook(struct inode *inode, int rw, struct bio *bio, 661static int btree_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
651 int mirror_num, unsigned long bio_flags) 662 int mirror_num, unsigned long bio_flags,
663 u64 bio_offset)
652{ 664{
653 int ret; 665 int ret;
654 666
@@ -671,6 +683,7 @@ static int btree_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
671 */ 683 */
672 return btrfs_wq_submit_bio(BTRFS_I(inode)->root->fs_info, 684 return btrfs_wq_submit_bio(BTRFS_I(inode)->root->fs_info,
673 inode, rw, bio, mirror_num, 0, 685 inode, rw, bio, mirror_num, 0,
686 bio_offset,
674 __btree_submit_bio_start, 687 __btree_submit_bio_start,
675 __btree_submit_bio_done); 688 __btree_submit_bio_done);
676} 689}
@@ -894,7 +907,8 @@ static int __setup_root(u32 nodesize, u32 leafsize, u32 sectorsize,
894 root->ref_cows = 0; 907 root->ref_cows = 0;
895 root->track_dirty = 0; 908 root->track_dirty = 0;
896 root->in_radix = 0; 909 root->in_radix = 0;
897 root->clean_orphans = 0; 910 root->orphan_item_inserted = 0;
911 root->orphan_cleanup_state = 0;
898 912
899 root->fs_info = fs_info; 913 root->fs_info = fs_info;
900 root->objectid = objectid; 914 root->objectid = objectid;
@@ -903,13 +917,16 @@ static int __setup_root(u32 nodesize, u32 leafsize, u32 sectorsize,
903 root->name = NULL; 917 root->name = NULL;
904 root->in_sysfs = 0; 918 root->in_sysfs = 0;
905 root->inode_tree = RB_ROOT; 919 root->inode_tree = RB_ROOT;
920 root->block_rsv = NULL;
921 root->orphan_block_rsv = NULL;
906 922
907 INIT_LIST_HEAD(&root->dirty_list); 923 INIT_LIST_HEAD(&root->dirty_list);
908 INIT_LIST_HEAD(&root->orphan_list); 924 INIT_LIST_HEAD(&root->orphan_list);
909 INIT_LIST_HEAD(&root->root_list); 925 INIT_LIST_HEAD(&root->root_list);
910 spin_lock_init(&root->node_lock); 926 spin_lock_init(&root->node_lock);
911 spin_lock_init(&root->list_lock); 927 spin_lock_init(&root->orphan_lock);
912 spin_lock_init(&root->inode_lock); 928 spin_lock_init(&root->inode_lock);
929 spin_lock_init(&root->accounting_lock);
913 mutex_init(&root->objectid_mutex); 930 mutex_init(&root->objectid_mutex);
914 mutex_init(&root->log_mutex); 931 mutex_init(&root->log_mutex);
915 init_waitqueue_head(&root->log_writer_wait); 932 init_waitqueue_head(&root->log_writer_wait);
@@ -968,42 +985,6 @@ static int find_and_setup_root(struct btrfs_root *tree_root,
968 return 0; 985 return 0;
969} 986}
970 987
971int btrfs_free_log_root_tree(struct btrfs_trans_handle *trans,
972 struct btrfs_fs_info *fs_info)
973{
974 struct extent_buffer *eb;
975 struct btrfs_root *log_root_tree = fs_info->log_root_tree;
976 u64 start = 0;
977 u64 end = 0;
978 int ret;
979
980 if (!log_root_tree)
981 return 0;
982
983 while (1) {
984 ret = find_first_extent_bit(&log_root_tree->dirty_log_pages,
985 0, &start, &end, EXTENT_DIRTY | EXTENT_NEW);
986 if (ret)
987 break;
988
989 clear_extent_bits(&log_root_tree->dirty_log_pages, start, end,
990 EXTENT_DIRTY | EXTENT_NEW, GFP_NOFS);
991 }
992 eb = fs_info->log_root_tree->node;
993
994 WARN_ON(btrfs_header_level(eb) != 0);
995 WARN_ON(btrfs_header_nritems(eb) != 0);
996
997 ret = btrfs_free_reserved_extent(fs_info->tree_root,
998 eb->start, eb->len);
999 BUG_ON(ret);
1000
1001 free_extent_buffer(eb);
1002 kfree(fs_info->log_root_tree);
1003 fs_info->log_root_tree = NULL;
1004 return 0;
1005}
1006
1007static struct btrfs_root *alloc_log_tree(struct btrfs_trans_handle *trans, 988static struct btrfs_root *alloc_log_tree(struct btrfs_trans_handle *trans,
1008 struct btrfs_fs_info *fs_info) 989 struct btrfs_fs_info *fs_info)
1009{ 990{
@@ -1191,19 +1172,23 @@ again:
1191 if (root) 1172 if (root)
1192 return root; 1173 return root;
1193 1174
1194 ret = btrfs_find_orphan_item(fs_info->tree_root, location->objectid);
1195 if (ret == 0)
1196 ret = -ENOENT;
1197 if (ret < 0)
1198 return ERR_PTR(ret);
1199
1200 root = btrfs_read_fs_root_no_radix(fs_info->tree_root, location); 1175 root = btrfs_read_fs_root_no_radix(fs_info->tree_root, location);
1201 if (IS_ERR(root)) 1176 if (IS_ERR(root))
1202 return root; 1177 return root;
1203 1178
1204 WARN_ON(btrfs_root_refs(&root->root_item) == 0);
1205 set_anon_super(&root->anon_super, NULL); 1179 set_anon_super(&root->anon_super, NULL);
1206 1180
1181 if (btrfs_root_refs(&root->root_item) == 0) {
1182 ret = -ENOENT;
1183 goto fail;
1184 }
1185
1186 ret = btrfs_find_orphan_item(fs_info->tree_root, location->objectid);
1187 if (ret < 0)
1188 goto fail;
1189 if (ret == 0)
1190 root->orphan_item_inserted = 1;
1191
1207 ret = radix_tree_preload(GFP_NOFS & ~__GFP_HIGHMEM); 1192 ret = radix_tree_preload(GFP_NOFS & ~__GFP_HIGHMEM);
1208 if (ret) 1193 if (ret)
1209 goto fail; 1194 goto fail;
@@ -1212,10 +1197,9 @@ again:
1212 ret = radix_tree_insert(&fs_info->fs_roots_radix, 1197 ret = radix_tree_insert(&fs_info->fs_roots_radix,
1213 (unsigned long)root->root_key.objectid, 1198 (unsigned long)root->root_key.objectid,
1214 root); 1199 root);
1215 if (ret == 0) { 1200 if (ret == 0)
1216 root->in_radix = 1; 1201 root->in_radix = 1;
1217 root->clean_orphans = 1; 1202
1218 }
1219 spin_unlock(&fs_info->fs_roots_radix_lock); 1203 spin_unlock(&fs_info->fs_roots_radix_lock);
1220 radix_tree_preload_end(); 1204 radix_tree_preload_end();
1221 if (ret) { 1205 if (ret) {
@@ -1461,10 +1445,6 @@ static int cleaner_kthread(void *arg)
1461 struct btrfs_root *root = arg; 1445 struct btrfs_root *root = arg;
1462 1446
1463 do { 1447 do {
1464 smp_mb();
1465 if (root->fs_info->closing)
1466 break;
1467
1468 vfs_check_frozen(root->fs_info->sb, SB_FREEZE_WRITE); 1448 vfs_check_frozen(root->fs_info->sb, SB_FREEZE_WRITE);
1469 1449
1470 if (!(root->fs_info->sb->s_flags & MS_RDONLY) && 1450 if (!(root->fs_info->sb->s_flags & MS_RDONLY) &&
@@ -1477,11 +1457,9 @@ static int cleaner_kthread(void *arg)
1477 if (freezing(current)) { 1457 if (freezing(current)) {
1478 refrigerator(); 1458 refrigerator();
1479 } else { 1459 } else {
1480 smp_mb();
1481 if (root->fs_info->closing)
1482 break;
1483 set_current_state(TASK_INTERRUPTIBLE); 1460 set_current_state(TASK_INTERRUPTIBLE);
1484 schedule(); 1461 if (!kthread_should_stop())
1462 schedule();
1485 __set_current_state(TASK_RUNNING); 1463 __set_current_state(TASK_RUNNING);
1486 } 1464 }
1487 } while (!kthread_should_stop()); 1465 } while (!kthread_should_stop());
@@ -1493,36 +1471,40 @@ static int transaction_kthread(void *arg)
1493 struct btrfs_root *root = arg; 1471 struct btrfs_root *root = arg;
1494 struct btrfs_trans_handle *trans; 1472 struct btrfs_trans_handle *trans;
1495 struct btrfs_transaction *cur; 1473 struct btrfs_transaction *cur;
1474 u64 transid;
1496 unsigned long now; 1475 unsigned long now;
1497 unsigned long delay; 1476 unsigned long delay;
1498 int ret; 1477 int ret;
1499 1478
1500 do { 1479 do {
1501 smp_mb();
1502 if (root->fs_info->closing)
1503 break;
1504
1505 delay = HZ * 30; 1480 delay = HZ * 30;
1506 vfs_check_frozen(root->fs_info->sb, SB_FREEZE_WRITE); 1481 vfs_check_frozen(root->fs_info->sb, SB_FREEZE_WRITE);
1507 mutex_lock(&root->fs_info->transaction_kthread_mutex); 1482 mutex_lock(&root->fs_info->transaction_kthread_mutex);
1508 1483
1509 mutex_lock(&root->fs_info->trans_mutex); 1484 spin_lock(&root->fs_info->new_trans_lock);
1510 cur = root->fs_info->running_transaction; 1485 cur = root->fs_info->running_transaction;
1511 if (!cur) { 1486 if (!cur) {
1512 mutex_unlock(&root->fs_info->trans_mutex); 1487 spin_unlock(&root->fs_info->new_trans_lock);
1513 goto sleep; 1488 goto sleep;
1514 } 1489 }
1515 1490
1516 now = get_seconds(); 1491 now = get_seconds();
1517 if (now < cur->start_time || now - cur->start_time < 30) { 1492 if (!cur->blocked &&
1518 mutex_unlock(&root->fs_info->trans_mutex); 1493 (now < cur->start_time || now - cur->start_time < 30)) {
1494 spin_unlock(&root->fs_info->new_trans_lock);
1519 delay = HZ * 5; 1495 delay = HZ * 5;
1520 goto sleep; 1496 goto sleep;
1521 } 1497 }
1522 mutex_unlock(&root->fs_info->trans_mutex); 1498 transid = cur->transid;
1523 trans = btrfs_start_transaction(root, 1); 1499 spin_unlock(&root->fs_info->new_trans_lock);
1524 ret = btrfs_commit_transaction(trans, root);
1525 1500
1501 trans = btrfs_join_transaction(root, 1);
1502 if (transid == trans->transid) {
1503 ret = btrfs_commit_transaction(trans, root);
1504 BUG_ON(ret);
1505 } else {
1506 btrfs_end_transaction(trans, root);
1507 }
1526sleep: 1508sleep:
1527 wake_up_process(root->fs_info->cleaner_kthread); 1509 wake_up_process(root->fs_info->cleaner_kthread);
1528 mutex_unlock(&root->fs_info->transaction_kthread_mutex); 1510 mutex_unlock(&root->fs_info->transaction_kthread_mutex);
@@ -1530,10 +1512,10 @@ sleep:
1530 if (freezing(current)) { 1512 if (freezing(current)) {
1531 refrigerator(); 1513 refrigerator();
1532 } else { 1514 } else {
1533 if (root->fs_info->closing)
1534 break;
1535 set_current_state(TASK_INTERRUPTIBLE); 1515 set_current_state(TASK_INTERRUPTIBLE);
1536 schedule_timeout(delay); 1516 if (!kthread_should_stop() &&
1517 !btrfs_transaction_blocked(root->fs_info))
1518 schedule_timeout(delay);
1537 __set_current_state(TASK_RUNNING); 1519 __set_current_state(TASK_RUNNING);
1538 } 1520 }
1539 } while (!kthread_should_stop()); 1521 } while (!kthread_should_stop());
@@ -1620,6 +1602,13 @@ struct btrfs_root *open_ctree(struct super_block *sb,
1620 INIT_LIST_HEAD(&fs_info->dirty_cowonly_roots); 1602 INIT_LIST_HEAD(&fs_info->dirty_cowonly_roots);
1621 INIT_LIST_HEAD(&fs_info->space_info); 1603 INIT_LIST_HEAD(&fs_info->space_info);
1622 btrfs_mapping_init(&fs_info->mapping_tree); 1604 btrfs_mapping_init(&fs_info->mapping_tree);
1605 btrfs_init_block_rsv(&fs_info->global_block_rsv);
1606 btrfs_init_block_rsv(&fs_info->delalloc_block_rsv);
1607 btrfs_init_block_rsv(&fs_info->trans_block_rsv);
1608 btrfs_init_block_rsv(&fs_info->chunk_block_rsv);
1609 btrfs_init_block_rsv(&fs_info->empty_block_rsv);
1610 INIT_LIST_HEAD(&fs_info->durable_block_rsv_list);
1611 mutex_init(&fs_info->durable_block_rsv_mutex);
1623 atomic_set(&fs_info->nr_async_submits, 0); 1612 atomic_set(&fs_info->nr_async_submits, 0);
1624 atomic_set(&fs_info->async_delalloc_pages, 0); 1613 atomic_set(&fs_info->async_delalloc_pages, 0);
1625 atomic_set(&fs_info->async_submit_draining, 0); 1614 atomic_set(&fs_info->async_submit_draining, 0);
@@ -1759,9 +1748,6 @@ struct btrfs_root *open_ctree(struct super_block *sb,
1759 min_t(u64, fs_devices->num_devices, 1748 min_t(u64, fs_devices->num_devices,
1760 fs_info->thread_pool_size), 1749 fs_info->thread_pool_size),
1761 &fs_info->generic_worker); 1750 &fs_info->generic_worker);
1762 btrfs_init_workers(&fs_info->enospc_workers, "enospc",
1763 fs_info->thread_pool_size,
1764 &fs_info->generic_worker);
1765 1751
1766 /* a higher idle thresh on the submit workers makes it much more 1752 /* a higher idle thresh on the submit workers makes it much more
1767 * likely that bios will be send down in a sane order to the 1753 * likely that bios will be send down in a sane order to the
@@ -1809,7 +1795,6 @@ struct btrfs_root *open_ctree(struct super_block *sb,
1809 btrfs_start_workers(&fs_info->endio_meta_workers, 1); 1795 btrfs_start_workers(&fs_info->endio_meta_workers, 1);
1810 btrfs_start_workers(&fs_info->endio_meta_write_workers, 1); 1796 btrfs_start_workers(&fs_info->endio_meta_write_workers, 1);
1811 btrfs_start_workers(&fs_info->endio_write_workers, 1); 1797 btrfs_start_workers(&fs_info->endio_write_workers, 1);
1812 btrfs_start_workers(&fs_info->enospc_workers, 1);
1813 1798
1814 fs_info->bdi.ra_pages *= btrfs_super_num_devices(disk_super); 1799 fs_info->bdi.ra_pages *= btrfs_super_num_devices(disk_super);
1815 fs_info->bdi.ra_pages = max(fs_info->bdi.ra_pages, 1800 fs_info->bdi.ra_pages = max(fs_info->bdi.ra_pages,
@@ -1912,17 +1897,18 @@ struct btrfs_root *open_ctree(struct super_block *sb,
1912 1897
1913 csum_root->track_dirty = 1; 1898 csum_root->track_dirty = 1;
1914 1899
1900 fs_info->generation = generation;
1901 fs_info->last_trans_committed = generation;
1902 fs_info->data_alloc_profile = (u64)-1;
1903 fs_info->metadata_alloc_profile = (u64)-1;
1904 fs_info->system_alloc_profile = fs_info->metadata_alloc_profile;
1905
1915 ret = btrfs_read_block_groups(extent_root); 1906 ret = btrfs_read_block_groups(extent_root);
1916 if (ret) { 1907 if (ret) {
1917 printk(KERN_ERR "Failed to read block groups: %d\n", ret); 1908 printk(KERN_ERR "Failed to read block groups: %d\n", ret);
1918 goto fail_block_groups; 1909 goto fail_block_groups;
1919 } 1910 }
1920 1911
1921 fs_info->generation = generation;
1922 fs_info->last_trans_committed = generation;
1923 fs_info->data_alloc_profile = (u64)-1;
1924 fs_info->metadata_alloc_profile = (u64)-1;
1925 fs_info->system_alloc_profile = fs_info->metadata_alloc_profile;
1926 fs_info->cleaner_kthread = kthread_run(cleaner_kthread, tree_root, 1912 fs_info->cleaner_kthread = kthread_run(cleaner_kthread, tree_root,
1927 "btrfs-cleaner"); 1913 "btrfs-cleaner");
1928 if (IS_ERR(fs_info->cleaner_kthread)) 1914 if (IS_ERR(fs_info->cleaner_kthread))
@@ -1977,6 +1963,9 @@ struct btrfs_root *open_ctree(struct super_block *sb,
1977 BUG_ON(ret); 1963 BUG_ON(ret);
1978 1964
1979 if (!(sb->s_flags & MS_RDONLY)) { 1965 if (!(sb->s_flags & MS_RDONLY)) {
1966 ret = btrfs_cleanup_fs_roots(fs_info);
1967 BUG_ON(ret);
1968
1980 ret = btrfs_recover_relocation(tree_root); 1969 ret = btrfs_recover_relocation(tree_root);
1981 if (ret < 0) { 1970 if (ret < 0) {
1982 printk(KERN_WARNING 1971 printk(KERN_WARNING
@@ -2040,7 +2029,6 @@ fail_sb_buffer:
2040 btrfs_stop_workers(&fs_info->endio_meta_write_workers); 2029 btrfs_stop_workers(&fs_info->endio_meta_write_workers);
2041 btrfs_stop_workers(&fs_info->endio_write_workers); 2030 btrfs_stop_workers(&fs_info->endio_write_workers);
2042 btrfs_stop_workers(&fs_info->submit_workers); 2031 btrfs_stop_workers(&fs_info->submit_workers);
2043 btrfs_stop_workers(&fs_info->enospc_workers);
2044fail_iput: 2032fail_iput:
2045 invalidate_inode_pages2(fs_info->btree_inode->i_mapping); 2033 invalidate_inode_pages2(fs_info->btree_inode->i_mapping);
2046 iput(fs_info->btree_inode); 2034 iput(fs_info->btree_inode);
@@ -2405,11 +2393,11 @@ int btrfs_commit_super(struct btrfs_root *root)
2405 down_write(&root->fs_info->cleanup_work_sem); 2393 down_write(&root->fs_info->cleanup_work_sem);
2406 up_write(&root->fs_info->cleanup_work_sem); 2394 up_write(&root->fs_info->cleanup_work_sem);
2407 2395
2408 trans = btrfs_start_transaction(root, 1); 2396 trans = btrfs_join_transaction(root, 1);
2409 ret = btrfs_commit_transaction(trans, root); 2397 ret = btrfs_commit_transaction(trans, root);
2410 BUG_ON(ret); 2398 BUG_ON(ret);
2411 /* run commit again to drop the original snapshot */ 2399 /* run commit again to drop the original snapshot */
2412 trans = btrfs_start_transaction(root, 1); 2400 trans = btrfs_join_transaction(root, 1);
2413 btrfs_commit_transaction(trans, root); 2401 btrfs_commit_transaction(trans, root);
2414 ret = btrfs_write_and_wait_transaction(NULL, root); 2402 ret = btrfs_write_and_wait_transaction(NULL, root);
2415 BUG_ON(ret); 2403 BUG_ON(ret);
@@ -2426,15 +2414,15 @@ int close_ctree(struct btrfs_root *root)
2426 fs_info->closing = 1; 2414 fs_info->closing = 1;
2427 smp_mb(); 2415 smp_mb();
2428 2416
2429 kthread_stop(root->fs_info->transaction_kthread);
2430 kthread_stop(root->fs_info->cleaner_kthread);
2431
2432 if (!(fs_info->sb->s_flags & MS_RDONLY)) { 2417 if (!(fs_info->sb->s_flags & MS_RDONLY)) {
2433 ret = btrfs_commit_super(root); 2418 ret = btrfs_commit_super(root);
2434 if (ret) 2419 if (ret)
2435 printk(KERN_ERR "btrfs: commit super ret %d\n", ret); 2420 printk(KERN_ERR "btrfs: commit super ret %d\n", ret);
2436 } 2421 }
2437 2422
2423 kthread_stop(root->fs_info->transaction_kthread);
2424 kthread_stop(root->fs_info->cleaner_kthread);
2425
2438 fs_info->closing = 2; 2426 fs_info->closing = 2;
2439 smp_mb(); 2427 smp_mb();
2440 2428
@@ -2473,7 +2461,6 @@ int close_ctree(struct btrfs_root *root)
2473 btrfs_stop_workers(&fs_info->endio_meta_write_workers); 2461 btrfs_stop_workers(&fs_info->endio_meta_write_workers);
2474 btrfs_stop_workers(&fs_info->endio_write_workers); 2462 btrfs_stop_workers(&fs_info->endio_write_workers);
2475 btrfs_stop_workers(&fs_info->submit_workers); 2463 btrfs_stop_workers(&fs_info->submit_workers);
2476 btrfs_stop_workers(&fs_info->enospc_workers);
2477 2464
2478 btrfs_close_devices(fs_info->fs_devices); 2465 btrfs_close_devices(fs_info->fs_devices);
2479 btrfs_mapping_tree_free(&fs_info->mapping_tree); 2466 btrfs_mapping_tree_free(&fs_info->mapping_tree);
diff --git a/fs/btrfs/disk-io.h b/fs/btrfs/disk-io.h
index c958ecbc1916..88e825a0bf21 100644
--- a/fs/btrfs/disk-io.h
+++ b/fs/btrfs/disk-io.h
@@ -87,7 +87,7 @@ int btrfs_bio_wq_end_io(struct btrfs_fs_info *info, struct bio *bio,
87 int metadata); 87 int metadata);
88int btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct inode *inode, 88int btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct inode *inode,
89 int rw, struct bio *bio, int mirror_num, 89 int rw, struct bio *bio, int mirror_num,
90 unsigned long bio_flags, 90 unsigned long bio_flags, u64 bio_offset,
91 extent_submit_bio_hook_t *submit_bio_start, 91 extent_submit_bio_hook_t *submit_bio_start,
92 extent_submit_bio_hook_t *submit_bio_done); 92 extent_submit_bio_hook_t *submit_bio_done);
93 93
@@ -95,8 +95,6 @@ int btrfs_congested_async(struct btrfs_fs_info *info, int iodone);
95unsigned long btrfs_async_submit_limit(struct btrfs_fs_info *info); 95unsigned long btrfs_async_submit_limit(struct btrfs_fs_info *info);
96int btrfs_write_tree_block(struct extent_buffer *buf); 96int btrfs_write_tree_block(struct extent_buffer *buf);
97int btrfs_wait_tree_block_writeback(struct extent_buffer *buf); 97int btrfs_wait_tree_block_writeback(struct extent_buffer *buf);
98int btrfs_free_log_root_tree(struct btrfs_trans_handle *trans,
99 struct btrfs_fs_info *fs_info);
100int btrfs_init_log_root_tree(struct btrfs_trans_handle *trans, 98int btrfs_init_log_root_tree(struct btrfs_trans_handle *trans,
101 struct btrfs_fs_info *fs_info); 99 struct btrfs_fs_info *fs_info);
102int btrfs_add_log_tree(struct btrfs_trans_handle *trans, 100int btrfs_add_log_tree(struct btrfs_trans_handle *trans,
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index c6a4f459ad76..b9080d71991a 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -35,10 +35,9 @@
35 35
36static int update_block_group(struct btrfs_trans_handle *trans, 36static int update_block_group(struct btrfs_trans_handle *trans,
37 struct btrfs_root *root, 37 struct btrfs_root *root,
38 u64 bytenr, u64 num_bytes, int alloc, 38 u64 bytenr, u64 num_bytes, int alloc);
39 int mark_free); 39static int update_reserved_bytes(struct btrfs_block_group_cache *cache,
40static int update_reserved_extents(struct btrfs_block_group_cache *cache, 40 u64 num_bytes, int reserve, int sinfo);
41 u64 num_bytes, int reserve);
42static int __btrfs_free_extent(struct btrfs_trans_handle *trans, 41static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
43 struct btrfs_root *root, 42 struct btrfs_root *root,
44 u64 bytenr, u64 num_bytes, u64 parent, 43 u64 bytenr, u64 num_bytes, u64 parent,
@@ -61,12 +60,6 @@ static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
61static int do_chunk_alloc(struct btrfs_trans_handle *trans, 60static int do_chunk_alloc(struct btrfs_trans_handle *trans,
62 struct btrfs_root *extent_root, u64 alloc_bytes, 61 struct btrfs_root *extent_root, u64 alloc_bytes,
63 u64 flags, int force); 62 u64 flags, int force);
64static int pin_down_bytes(struct btrfs_trans_handle *trans,
65 struct btrfs_root *root,
66 struct btrfs_path *path,
67 u64 bytenr, u64 num_bytes,
68 int is_data, int reserved,
69 struct extent_buffer **must_clean);
70static int find_next_key(struct btrfs_path *path, int level, 63static int find_next_key(struct btrfs_path *path, int level,
71 struct btrfs_key *key); 64 struct btrfs_key *key);
72static void dump_space_info(struct btrfs_space_info *info, u64 bytes, 65static void dump_space_info(struct btrfs_space_info *info, u64 bytes,
@@ -91,8 +84,12 @@ void btrfs_get_block_group(struct btrfs_block_group_cache *cache)
91 84
92void btrfs_put_block_group(struct btrfs_block_group_cache *cache) 85void btrfs_put_block_group(struct btrfs_block_group_cache *cache)
93{ 86{
94 if (atomic_dec_and_test(&cache->count)) 87 if (atomic_dec_and_test(&cache->count)) {
88 WARN_ON(cache->pinned > 0);
89 WARN_ON(cache->reserved > 0);
90 WARN_ON(cache->reserved_pinned > 0);
95 kfree(cache); 91 kfree(cache);
92 }
96} 93}
97 94
98/* 95/*
@@ -319,7 +316,7 @@ static int caching_kthread(void *data)
319 316
320 exclude_super_stripes(extent_root, block_group); 317 exclude_super_stripes(extent_root, block_group);
321 spin_lock(&block_group->space_info->lock); 318 spin_lock(&block_group->space_info->lock);
322 block_group->space_info->bytes_super += block_group->bytes_super; 319 block_group->space_info->bytes_readonly += block_group->bytes_super;
323 spin_unlock(&block_group->space_info->lock); 320 spin_unlock(&block_group->space_info->lock);
324 321
325 last = max_t(u64, block_group->key.objectid, BTRFS_SUPER_INFO_OFFSET); 322 last = max_t(u64, block_group->key.objectid, BTRFS_SUPER_INFO_OFFSET);
@@ -507,6 +504,9 @@ static struct btrfs_space_info *__find_space_info(struct btrfs_fs_info *info,
507 struct list_head *head = &info->space_info; 504 struct list_head *head = &info->space_info;
508 struct btrfs_space_info *found; 505 struct btrfs_space_info *found;
509 506
507 flags &= BTRFS_BLOCK_GROUP_DATA | BTRFS_BLOCK_GROUP_SYSTEM |
508 BTRFS_BLOCK_GROUP_METADATA;
509
510 rcu_read_lock(); 510 rcu_read_lock();
511 list_for_each_entry_rcu(found, head, list) { 511 list_for_each_entry_rcu(found, head, list) {
512 if (found->flags == flags) { 512 if (found->flags == flags) {
@@ -610,6 +610,113 @@ int btrfs_lookup_extent(struct btrfs_root *root, u64 start, u64 len)
610} 610}
611 611
612/* 612/*
613 * helper function to lookup reference count and flags of extent.
614 *
615 * the head node for delayed ref is used to store the sum of all the
616 * reference count modifications queued up in the rbtree. the head
617 * node may also store the extent flags to set. This way you can check
618 * to see what the reference count and extent flags would be if all of
619 * the delayed refs are not processed.
620 */
621int btrfs_lookup_extent_info(struct btrfs_trans_handle *trans,
622 struct btrfs_root *root, u64 bytenr,
623 u64 num_bytes, u64 *refs, u64 *flags)
624{
625 struct btrfs_delayed_ref_head *head;
626 struct btrfs_delayed_ref_root *delayed_refs;
627 struct btrfs_path *path;
628 struct btrfs_extent_item *ei;
629 struct extent_buffer *leaf;
630 struct btrfs_key key;
631 u32 item_size;
632 u64 num_refs;
633 u64 extent_flags;
634 int ret;
635
636 path = btrfs_alloc_path();
637 if (!path)
638 return -ENOMEM;
639
640 key.objectid = bytenr;
641 key.type = BTRFS_EXTENT_ITEM_KEY;
642 key.offset = num_bytes;
643 if (!trans) {
644 path->skip_locking = 1;
645 path->search_commit_root = 1;
646 }
647again:
648 ret = btrfs_search_slot(trans, root->fs_info->extent_root,
649 &key, path, 0, 0);
650 if (ret < 0)
651 goto out_free;
652
653 if (ret == 0) {
654 leaf = path->nodes[0];
655 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
656 if (item_size >= sizeof(*ei)) {
657 ei = btrfs_item_ptr(leaf, path->slots[0],
658 struct btrfs_extent_item);
659 num_refs = btrfs_extent_refs(leaf, ei);
660 extent_flags = btrfs_extent_flags(leaf, ei);
661 } else {
662#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
663 struct btrfs_extent_item_v0 *ei0;
664 BUG_ON(item_size != sizeof(*ei0));
665 ei0 = btrfs_item_ptr(leaf, path->slots[0],
666 struct btrfs_extent_item_v0);
667 num_refs = btrfs_extent_refs_v0(leaf, ei0);
668 /* FIXME: this isn't correct for data */
669 extent_flags = BTRFS_BLOCK_FLAG_FULL_BACKREF;
670#else
671 BUG();
672#endif
673 }
674 BUG_ON(num_refs == 0);
675 } else {
676 num_refs = 0;
677 extent_flags = 0;
678 ret = 0;
679 }
680
681 if (!trans)
682 goto out;
683
684 delayed_refs = &trans->transaction->delayed_refs;
685 spin_lock(&delayed_refs->lock);
686 head = btrfs_find_delayed_ref_head(trans, bytenr);
687 if (head) {
688 if (!mutex_trylock(&head->mutex)) {
689 atomic_inc(&head->node.refs);
690 spin_unlock(&delayed_refs->lock);
691
692 btrfs_release_path(root->fs_info->extent_root, path);
693
694 mutex_lock(&head->mutex);
695 mutex_unlock(&head->mutex);
696 btrfs_put_delayed_ref(&head->node);
697 goto again;
698 }
699 if (head->extent_op && head->extent_op->update_flags)
700 extent_flags |= head->extent_op->flags_to_set;
701 else
702 BUG_ON(num_refs == 0);
703
704 num_refs += head->node.ref_mod;
705 mutex_unlock(&head->mutex);
706 }
707 spin_unlock(&delayed_refs->lock);
708out:
709 WARN_ON(num_refs == 0);
710 if (refs)
711 *refs = num_refs;
712 if (flags)
713 *flags = extent_flags;
714out_free:
715 btrfs_free_path(path);
716 return ret;
717}
718
719/*
613 * Back reference rules. Back refs have three main goals: 720 * Back reference rules. Back refs have three main goals:
614 * 721 *
615 * 1) differentiate between all holders of references to an extent so that 722 * 1) differentiate between all holders of references to an extent so that
@@ -1871,7 +1978,6 @@ static int run_delayed_tree_ref(struct btrfs_trans_handle *trans,
1871 return ret; 1978 return ret;
1872} 1979}
1873 1980
1874
1875/* helper function to actually process a single delayed ref entry */ 1981/* helper function to actually process a single delayed ref entry */
1876static int run_one_delayed_ref(struct btrfs_trans_handle *trans, 1982static int run_one_delayed_ref(struct btrfs_trans_handle *trans,
1877 struct btrfs_root *root, 1983 struct btrfs_root *root,
@@ -1891,32 +1997,14 @@ static int run_one_delayed_ref(struct btrfs_trans_handle *trans,
1891 BUG_ON(extent_op); 1997 BUG_ON(extent_op);
1892 head = btrfs_delayed_node_to_head(node); 1998 head = btrfs_delayed_node_to_head(node);
1893 if (insert_reserved) { 1999 if (insert_reserved) {
1894 int mark_free = 0; 2000 btrfs_pin_extent(root, node->bytenr,
1895 struct extent_buffer *must_clean = NULL; 2001 node->num_bytes, 1);
1896
1897 ret = pin_down_bytes(trans, root, NULL,
1898 node->bytenr, node->num_bytes,
1899 head->is_data, 1, &must_clean);
1900 if (ret > 0)
1901 mark_free = 1;
1902
1903 if (must_clean) {
1904 clean_tree_block(NULL, root, must_clean);
1905 btrfs_tree_unlock(must_clean);
1906 free_extent_buffer(must_clean);
1907 }
1908 if (head->is_data) { 2002 if (head->is_data) {
1909 ret = btrfs_del_csums(trans, root, 2003 ret = btrfs_del_csums(trans, root,
1910 node->bytenr, 2004 node->bytenr,
1911 node->num_bytes); 2005 node->num_bytes);
1912 BUG_ON(ret); 2006 BUG_ON(ret);
1913 } 2007 }
1914 if (mark_free) {
1915 ret = btrfs_free_reserved_extent(root,
1916 node->bytenr,
1917 node->num_bytes);
1918 BUG_ON(ret);
1919 }
1920 } 2008 }
1921 mutex_unlock(&head->mutex); 2009 mutex_unlock(&head->mutex);
1922 return 0; 2010 return 0;
@@ -2347,6 +2435,8 @@ int btrfs_cross_ref_exist(struct btrfs_trans_handle *trans,
2347 ret = 0; 2435 ret = 0;
2348out: 2436out:
2349 btrfs_free_path(path); 2437 btrfs_free_path(path);
2438 if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID)
2439 WARN_ON(ret > 0);
2350 return ret; 2440 return ret;
2351} 2441}
2352 2442
@@ -2660,12 +2750,21 @@ static int update_space_info(struct btrfs_fs_info *info, u64 flags,
2660 struct btrfs_space_info **space_info) 2750 struct btrfs_space_info **space_info)
2661{ 2751{
2662 struct btrfs_space_info *found; 2752 struct btrfs_space_info *found;
2753 int i;
2754 int factor;
2755
2756 if (flags & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1 |
2757 BTRFS_BLOCK_GROUP_RAID10))
2758 factor = 2;
2759 else
2760 factor = 1;
2663 2761
2664 found = __find_space_info(info, flags); 2762 found = __find_space_info(info, flags);
2665 if (found) { 2763 if (found) {
2666 spin_lock(&found->lock); 2764 spin_lock(&found->lock);
2667 found->total_bytes += total_bytes; 2765 found->total_bytes += total_bytes;
2668 found->bytes_used += bytes_used; 2766 found->bytes_used += bytes_used;
2767 found->disk_used += bytes_used * factor;
2669 found->full = 0; 2768 found->full = 0;
2670 spin_unlock(&found->lock); 2769 spin_unlock(&found->lock);
2671 *space_info = found; 2770 *space_info = found;
@@ -2675,18 +2774,20 @@ static int update_space_info(struct btrfs_fs_info *info, u64 flags,
2675 if (!found) 2774 if (!found)
2676 return -ENOMEM; 2775 return -ENOMEM;
2677 2776
2678 INIT_LIST_HEAD(&found->block_groups); 2777 for (i = 0; i < BTRFS_NR_RAID_TYPES; i++)
2778 INIT_LIST_HEAD(&found->block_groups[i]);
2679 init_rwsem(&found->groups_sem); 2779 init_rwsem(&found->groups_sem);
2680 init_waitqueue_head(&found->flush_wait);
2681 init_waitqueue_head(&found->allocate_wait);
2682 spin_lock_init(&found->lock); 2780 spin_lock_init(&found->lock);
2683 found->flags = flags; 2781 found->flags = flags & (BTRFS_BLOCK_GROUP_DATA |
2782 BTRFS_BLOCK_GROUP_SYSTEM |
2783 BTRFS_BLOCK_GROUP_METADATA);
2684 found->total_bytes = total_bytes; 2784 found->total_bytes = total_bytes;
2685 found->bytes_used = bytes_used; 2785 found->bytes_used = bytes_used;
2786 found->disk_used = bytes_used * factor;
2686 found->bytes_pinned = 0; 2787 found->bytes_pinned = 0;
2687 found->bytes_reserved = 0; 2788 found->bytes_reserved = 0;
2688 found->bytes_readonly = 0; 2789 found->bytes_readonly = 0;
2689 found->bytes_delalloc = 0; 2790 found->bytes_may_use = 0;
2690 found->full = 0; 2791 found->full = 0;
2691 found->force_alloc = 0; 2792 found->force_alloc = 0;
2692 *space_info = found; 2793 *space_info = found;
@@ -2711,19 +2812,6 @@ static void set_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
2711 } 2812 }
2712} 2813}
2713 2814
2714static void set_block_group_readonly(struct btrfs_block_group_cache *cache)
2715{
2716 spin_lock(&cache->space_info->lock);
2717 spin_lock(&cache->lock);
2718 if (!cache->ro) {
2719 cache->space_info->bytes_readonly += cache->key.offset -
2720 btrfs_block_group_used(&cache->item);
2721 cache->ro = 1;
2722 }
2723 spin_unlock(&cache->lock);
2724 spin_unlock(&cache->space_info->lock);
2725}
2726
2727u64 btrfs_reduce_alloc_profile(struct btrfs_root *root, u64 flags) 2815u64 btrfs_reduce_alloc_profile(struct btrfs_root *root, u64 flags)
2728{ 2816{
2729 u64 num_devices = root->fs_info->fs_devices->rw_devices; 2817 u64 num_devices = root->fs_info->fs_devices->rw_devices;
@@ -2752,491 +2840,50 @@ u64 btrfs_reduce_alloc_profile(struct btrfs_root *root, u64 flags)
2752 return flags; 2840 return flags;
2753} 2841}
2754 2842
2755static u64 btrfs_get_alloc_profile(struct btrfs_root *root, u64 data) 2843static u64 get_alloc_profile(struct btrfs_root *root, u64 flags)
2756{
2757 struct btrfs_fs_info *info = root->fs_info;
2758 u64 alloc_profile;
2759
2760 if (data) {
2761 alloc_profile = info->avail_data_alloc_bits &
2762 info->data_alloc_profile;
2763 data = BTRFS_BLOCK_GROUP_DATA | alloc_profile;
2764 } else if (root == root->fs_info->chunk_root) {
2765 alloc_profile = info->avail_system_alloc_bits &
2766 info->system_alloc_profile;
2767 data = BTRFS_BLOCK_GROUP_SYSTEM | alloc_profile;
2768 } else {
2769 alloc_profile = info->avail_metadata_alloc_bits &
2770 info->metadata_alloc_profile;
2771 data = BTRFS_BLOCK_GROUP_METADATA | alloc_profile;
2772 }
2773
2774 return btrfs_reduce_alloc_profile(root, data);
2775}
2776
2777void btrfs_set_inode_space_info(struct btrfs_root *root, struct inode *inode)
2778{
2779 u64 alloc_target;
2780
2781 alloc_target = btrfs_get_alloc_profile(root, 1);
2782 BTRFS_I(inode)->space_info = __find_space_info(root->fs_info,
2783 alloc_target);
2784}
2785
2786static u64 calculate_bytes_needed(struct btrfs_root *root, int num_items)
2787{
2788 u64 num_bytes;
2789 int level;
2790
2791 level = BTRFS_MAX_LEVEL - 2;
2792 /*
2793 * NOTE: these calculations are absolutely the worst possible case.
2794 * This assumes that _every_ item we insert will require a new leaf, and
2795 * that the tree has grown to its maximum level size.
2796 */
2797
2798 /*
2799 * for every item we insert we could insert both an extent item and a
2800 * extent ref item. Then for ever item we insert, we will need to cow
2801 * both the original leaf, plus the leaf to the left and right of it.
2802 *
2803 * Unless we are talking about the extent root, then we just want the
2804 * number of items * 2, since we just need the extent item plus its ref.
2805 */
2806 if (root == root->fs_info->extent_root)
2807 num_bytes = num_items * 2;
2808 else
2809 num_bytes = (num_items + (2 * num_items)) * 3;
2810
2811 /*
2812 * num_bytes is total number of leaves we could need times the leaf
2813 * size, and then for every leaf we could end up cow'ing 2 nodes per
2814 * level, down to the leaf level.
2815 */
2816 num_bytes = (num_bytes * root->leafsize) +
2817 (num_bytes * (level * 2)) * root->nodesize;
2818
2819 return num_bytes;
2820}
2821
2822/*
2823 * Unreserve metadata space for delalloc. If we have less reserved credits than
2824 * we have extents, this function does nothing.
2825 */
2826int btrfs_unreserve_metadata_for_delalloc(struct btrfs_root *root,
2827 struct inode *inode, int num_items)
2828{
2829 struct btrfs_fs_info *info = root->fs_info;
2830 struct btrfs_space_info *meta_sinfo;
2831 u64 num_bytes;
2832 u64 alloc_target;
2833 bool bug = false;
2834
2835 /* get the space info for where the metadata will live */
2836 alloc_target = btrfs_get_alloc_profile(root, 0);
2837 meta_sinfo = __find_space_info(info, alloc_target);
2838
2839 num_bytes = calculate_bytes_needed(root->fs_info->extent_root,
2840 num_items);
2841
2842 spin_lock(&meta_sinfo->lock);
2843 spin_lock(&BTRFS_I(inode)->accounting_lock);
2844 if (BTRFS_I(inode)->reserved_extents <=
2845 BTRFS_I(inode)->outstanding_extents) {
2846 spin_unlock(&BTRFS_I(inode)->accounting_lock);
2847 spin_unlock(&meta_sinfo->lock);
2848 return 0;
2849 }
2850 spin_unlock(&BTRFS_I(inode)->accounting_lock);
2851
2852 BTRFS_I(inode)->reserved_extents -= num_items;
2853 BUG_ON(BTRFS_I(inode)->reserved_extents < 0);
2854
2855 if (meta_sinfo->bytes_delalloc < num_bytes) {
2856 bug = true;
2857 meta_sinfo->bytes_delalloc = 0;
2858 } else {
2859 meta_sinfo->bytes_delalloc -= num_bytes;
2860 }
2861 spin_unlock(&meta_sinfo->lock);
2862
2863 BUG_ON(bug);
2864
2865 return 0;
2866}
2867
2868static void check_force_delalloc(struct btrfs_space_info *meta_sinfo)
2869{ 2844{
2870 u64 thresh; 2845 if (flags & BTRFS_BLOCK_GROUP_DATA)
2871 2846 flags |= root->fs_info->avail_data_alloc_bits &
2872 thresh = meta_sinfo->bytes_used + meta_sinfo->bytes_reserved + 2847 root->fs_info->data_alloc_profile;
2873 meta_sinfo->bytes_pinned + meta_sinfo->bytes_readonly + 2848 else if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
2874 meta_sinfo->bytes_super + meta_sinfo->bytes_root + 2849 flags |= root->fs_info->avail_system_alloc_bits &
2875 meta_sinfo->bytes_may_use; 2850 root->fs_info->system_alloc_profile;
2876 2851 else if (flags & BTRFS_BLOCK_GROUP_METADATA)
2877 thresh = meta_sinfo->total_bytes - thresh; 2852 flags |= root->fs_info->avail_metadata_alloc_bits &
2878 thresh *= 80; 2853 root->fs_info->metadata_alloc_profile;
2879 do_div(thresh, 100); 2854 return btrfs_reduce_alloc_profile(root, flags);
2880 if (thresh <= meta_sinfo->bytes_delalloc)
2881 meta_sinfo->force_delalloc = 1;
2882 else
2883 meta_sinfo->force_delalloc = 0;
2884} 2855}
2885 2856
2886struct async_flush { 2857static u64 btrfs_get_alloc_profile(struct btrfs_root *root, int data)
2887 struct btrfs_root *root;
2888 struct btrfs_space_info *info;
2889 struct btrfs_work work;
2890};
2891
2892static noinline void flush_delalloc_async(struct btrfs_work *work)
2893{ 2858{
2894 struct async_flush *async; 2859 u64 flags;
2895 struct btrfs_root *root;
2896 struct btrfs_space_info *info;
2897
2898 async = container_of(work, struct async_flush, work);
2899 root = async->root;
2900 info = async->info;
2901
2902 btrfs_start_delalloc_inodes(root, 0);
2903 wake_up(&info->flush_wait);
2904 btrfs_wait_ordered_extents(root, 0, 0);
2905
2906 spin_lock(&info->lock);
2907 info->flushing = 0;
2908 spin_unlock(&info->lock);
2909 wake_up(&info->flush_wait);
2910
2911 kfree(async);
2912}
2913
2914static void wait_on_flush(struct btrfs_space_info *info)
2915{
2916 DEFINE_WAIT(wait);
2917 u64 used;
2918
2919 while (1) {
2920 prepare_to_wait(&info->flush_wait, &wait,
2921 TASK_UNINTERRUPTIBLE);
2922 spin_lock(&info->lock);
2923 if (!info->flushing) {
2924 spin_unlock(&info->lock);
2925 break;
2926 }
2927
2928 used = info->bytes_used + info->bytes_reserved +
2929 info->bytes_pinned + info->bytes_readonly +
2930 info->bytes_super + info->bytes_root +
2931 info->bytes_may_use + info->bytes_delalloc;
2932 if (used < info->total_bytes) {
2933 spin_unlock(&info->lock);
2934 break;
2935 }
2936 spin_unlock(&info->lock);
2937 schedule();
2938 }
2939 finish_wait(&info->flush_wait, &wait);
2940}
2941
2942static void flush_delalloc(struct btrfs_root *root,
2943 struct btrfs_space_info *info)
2944{
2945 struct async_flush *async;
2946 bool wait = false;
2947
2948 spin_lock(&info->lock);
2949 2860
2950 if (!info->flushing) 2861 if (data)
2951 info->flushing = 1; 2862 flags = BTRFS_BLOCK_GROUP_DATA;
2863 else if (root == root->fs_info->chunk_root)
2864 flags = BTRFS_BLOCK_GROUP_SYSTEM;
2952 else 2865 else
2953 wait = true; 2866 flags = BTRFS_BLOCK_GROUP_METADATA;
2954
2955 spin_unlock(&info->lock);
2956
2957 if (wait) {
2958 wait_on_flush(info);
2959 return;
2960 }
2961
2962 async = kzalloc(sizeof(*async), GFP_NOFS);
2963 if (!async)
2964 goto flush;
2965
2966 async->root = root;
2967 async->info = info;
2968 async->work.func = flush_delalloc_async;
2969 2867
2970 btrfs_queue_worker(&root->fs_info->enospc_workers, 2868 return get_alloc_profile(root, flags);
2971 &async->work);
2972 wait_on_flush(info);
2973 return;
2974
2975flush:
2976 btrfs_start_delalloc_inodes(root, 0);
2977 btrfs_wait_ordered_extents(root, 0, 0);
2978
2979 spin_lock(&info->lock);
2980 info->flushing = 0;
2981 spin_unlock(&info->lock);
2982 wake_up(&info->flush_wait);
2983} 2869}
2984 2870
2985static int maybe_allocate_chunk(struct btrfs_root *root, 2871void btrfs_set_inode_space_info(struct btrfs_root *root, struct inode *inode)
2986 struct btrfs_space_info *info)
2987{
2988 struct btrfs_super_block *disk_super = &root->fs_info->super_copy;
2989 struct btrfs_trans_handle *trans;
2990 bool wait = false;
2991 int ret = 0;
2992 u64 min_metadata;
2993 u64 free_space;
2994
2995 free_space = btrfs_super_total_bytes(disk_super);
2996 /*
2997 * we allow the metadata to grow to a max of either 10gb or 5% of the
2998 * space in the volume.
2999 */
3000 min_metadata = min((u64)10 * 1024 * 1024 * 1024,
3001 div64_u64(free_space * 5, 100));
3002 if (info->total_bytes >= min_metadata) {
3003 spin_unlock(&info->lock);
3004 return 0;
3005 }
3006
3007 if (info->full) {
3008 spin_unlock(&info->lock);
3009 return 0;
3010 }
3011
3012 if (!info->allocating_chunk) {
3013 info->force_alloc = 1;
3014 info->allocating_chunk = 1;
3015 } else {
3016 wait = true;
3017 }
3018
3019 spin_unlock(&info->lock);
3020
3021 if (wait) {
3022 wait_event(info->allocate_wait,
3023 !info->allocating_chunk);
3024 return 1;
3025 }
3026
3027 trans = btrfs_start_transaction(root, 1);
3028 if (!trans) {
3029 ret = -ENOMEM;
3030 goto out;
3031 }
3032
3033 ret = do_chunk_alloc(trans, root->fs_info->extent_root,
3034 4096 + 2 * 1024 * 1024,
3035 info->flags, 0);
3036 btrfs_end_transaction(trans, root);
3037 if (ret)
3038 goto out;
3039out:
3040 spin_lock(&info->lock);
3041 info->allocating_chunk = 0;
3042 spin_unlock(&info->lock);
3043 wake_up(&info->allocate_wait);
3044
3045 if (ret)
3046 return 0;
3047 return 1;
3048}
3049
3050/*
3051 * Reserve metadata space for delalloc.
3052 */
3053int btrfs_reserve_metadata_for_delalloc(struct btrfs_root *root,
3054 struct inode *inode, int num_items)
3055{
3056 struct btrfs_fs_info *info = root->fs_info;
3057 struct btrfs_space_info *meta_sinfo;
3058 u64 num_bytes;
3059 u64 used;
3060 u64 alloc_target;
3061 int flushed = 0;
3062 int force_delalloc;
3063
3064 /* get the space info for where the metadata will live */
3065 alloc_target = btrfs_get_alloc_profile(root, 0);
3066 meta_sinfo = __find_space_info(info, alloc_target);
3067
3068 num_bytes = calculate_bytes_needed(root->fs_info->extent_root,
3069 num_items);
3070again:
3071 spin_lock(&meta_sinfo->lock);
3072
3073 force_delalloc = meta_sinfo->force_delalloc;
3074
3075 if (unlikely(!meta_sinfo->bytes_root))
3076 meta_sinfo->bytes_root = calculate_bytes_needed(root, 6);
3077
3078 if (!flushed)
3079 meta_sinfo->bytes_delalloc += num_bytes;
3080
3081 used = meta_sinfo->bytes_used + meta_sinfo->bytes_reserved +
3082 meta_sinfo->bytes_pinned + meta_sinfo->bytes_readonly +
3083 meta_sinfo->bytes_super + meta_sinfo->bytes_root +
3084 meta_sinfo->bytes_may_use + meta_sinfo->bytes_delalloc;
3085
3086 if (used > meta_sinfo->total_bytes) {
3087 flushed++;
3088
3089 if (flushed == 1) {
3090 if (maybe_allocate_chunk(root, meta_sinfo))
3091 goto again;
3092 flushed++;
3093 } else {
3094 spin_unlock(&meta_sinfo->lock);
3095 }
3096
3097 if (flushed == 2) {
3098 filemap_flush(inode->i_mapping);
3099 goto again;
3100 } else if (flushed == 3) {
3101 flush_delalloc(root, meta_sinfo);
3102 goto again;
3103 }
3104 spin_lock(&meta_sinfo->lock);
3105 meta_sinfo->bytes_delalloc -= num_bytes;
3106 spin_unlock(&meta_sinfo->lock);
3107 printk(KERN_ERR "enospc, has %d, reserved %d\n",
3108 BTRFS_I(inode)->outstanding_extents,
3109 BTRFS_I(inode)->reserved_extents);
3110 dump_space_info(meta_sinfo, 0, 0);
3111 return -ENOSPC;
3112 }
3113
3114 BTRFS_I(inode)->reserved_extents += num_items;
3115 check_force_delalloc(meta_sinfo);
3116 spin_unlock(&meta_sinfo->lock);
3117
3118 if (!flushed && force_delalloc)
3119 filemap_flush(inode->i_mapping);
3120
3121 return 0;
3122}
3123
3124/*
3125 * unreserve num_items number of items worth of metadata space. This needs to
3126 * be paired with btrfs_reserve_metadata_space.
3127 *
3128 * NOTE: if you have the option, run this _AFTER_ you do a
3129 * btrfs_end_transaction, since btrfs_end_transaction will run delayed ref
3130 * oprations which will result in more used metadata, so we want to make sure we
3131 * can do that without issue.
3132 */
3133int btrfs_unreserve_metadata_space(struct btrfs_root *root, int num_items)
3134{
3135 struct btrfs_fs_info *info = root->fs_info;
3136 struct btrfs_space_info *meta_sinfo;
3137 u64 num_bytes;
3138 u64 alloc_target;
3139 bool bug = false;
3140
3141 /* get the space info for where the metadata will live */
3142 alloc_target = btrfs_get_alloc_profile(root, 0);
3143 meta_sinfo = __find_space_info(info, alloc_target);
3144
3145 num_bytes = calculate_bytes_needed(root, num_items);
3146
3147 spin_lock(&meta_sinfo->lock);
3148 if (meta_sinfo->bytes_may_use < num_bytes) {
3149 bug = true;
3150 meta_sinfo->bytes_may_use = 0;
3151 } else {
3152 meta_sinfo->bytes_may_use -= num_bytes;
3153 }
3154 spin_unlock(&meta_sinfo->lock);
3155
3156 BUG_ON(bug);
3157
3158 return 0;
3159}
3160
3161/*
3162 * Reserve some metadata space for use. We'll calculate the worste case number
3163 * of bytes that would be needed to modify num_items number of items. If we
3164 * have space, fantastic, if not, you get -ENOSPC. Please call
3165 * btrfs_unreserve_metadata_space when you are done for the _SAME_ number of
3166 * items you reserved, since whatever metadata you needed should have already
3167 * been allocated.
3168 *
3169 * This will commit the transaction to make more space if we don't have enough
3170 * metadata space. THe only time we don't do this is if we're reserving space
3171 * inside of a transaction, then we will just return -ENOSPC and it is the
3172 * callers responsibility to handle it properly.
3173 */
3174int btrfs_reserve_metadata_space(struct btrfs_root *root, int num_items)
3175{ 2872{
3176 struct btrfs_fs_info *info = root->fs_info; 2873 BTRFS_I(inode)->space_info = __find_space_info(root->fs_info,
3177 struct btrfs_space_info *meta_sinfo; 2874 BTRFS_BLOCK_GROUP_DATA);
3178 u64 num_bytes;
3179 u64 used;
3180 u64 alloc_target;
3181 int retries = 0;
3182
3183 /* get the space info for where the metadata will live */
3184 alloc_target = btrfs_get_alloc_profile(root, 0);
3185 meta_sinfo = __find_space_info(info, alloc_target);
3186
3187 num_bytes = calculate_bytes_needed(root, num_items);
3188again:
3189 spin_lock(&meta_sinfo->lock);
3190
3191 if (unlikely(!meta_sinfo->bytes_root))
3192 meta_sinfo->bytes_root = calculate_bytes_needed(root, 6);
3193
3194 if (!retries)
3195 meta_sinfo->bytes_may_use += num_bytes;
3196
3197 used = meta_sinfo->bytes_used + meta_sinfo->bytes_reserved +
3198 meta_sinfo->bytes_pinned + meta_sinfo->bytes_readonly +
3199 meta_sinfo->bytes_super + meta_sinfo->bytes_root +
3200 meta_sinfo->bytes_may_use + meta_sinfo->bytes_delalloc;
3201
3202 if (used > meta_sinfo->total_bytes) {
3203 retries++;
3204 if (retries == 1) {
3205 if (maybe_allocate_chunk(root, meta_sinfo))
3206 goto again;
3207 retries++;
3208 } else {
3209 spin_unlock(&meta_sinfo->lock);
3210 }
3211
3212 if (retries == 2) {
3213 flush_delalloc(root, meta_sinfo);
3214 goto again;
3215 }
3216 spin_lock(&meta_sinfo->lock);
3217 meta_sinfo->bytes_may_use -= num_bytes;
3218 spin_unlock(&meta_sinfo->lock);
3219
3220 dump_space_info(meta_sinfo, 0, 0);
3221 return -ENOSPC;
3222 }
3223
3224 check_force_delalloc(meta_sinfo);
3225 spin_unlock(&meta_sinfo->lock);
3226
3227 return 0;
3228} 2875}
3229 2876
3230/* 2877/*
3231 * This will check the space that the inode allocates from to make sure we have 2878 * This will check the space that the inode allocates from to make sure we have
3232 * enough space for bytes. 2879 * enough space for bytes.
3233 */ 2880 */
3234int btrfs_check_data_free_space(struct btrfs_root *root, struct inode *inode, 2881int btrfs_check_data_free_space(struct inode *inode, u64 bytes)
3235 u64 bytes)
3236{ 2882{
3237 struct btrfs_space_info *data_sinfo; 2883 struct btrfs_space_info *data_sinfo;
2884 struct btrfs_root *root = BTRFS_I(inode)->root;
3238 u64 used; 2885 u64 used;
3239 int ret = 0, committed = 0, flushed = 0; 2886 int ret = 0, committed = 0;
3240 2887
3241 /* make sure bytes are sectorsize aligned */ 2888 /* make sure bytes are sectorsize aligned */
3242 bytes = (bytes + root->sectorsize - 1) & ~((u64)root->sectorsize - 1); 2889 bytes = (bytes + root->sectorsize - 1) & ~((u64)root->sectorsize - 1);
@@ -3248,21 +2895,13 @@ int btrfs_check_data_free_space(struct btrfs_root *root, struct inode *inode,
3248again: 2895again:
3249 /* make sure we have enough space to handle the data first */ 2896 /* make sure we have enough space to handle the data first */
3250 spin_lock(&data_sinfo->lock); 2897 spin_lock(&data_sinfo->lock);
3251 used = data_sinfo->bytes_used + data_sinfo->bytes_delalloc + 2898 used = data_sinfo->bytes_used + data_sinfo->bytes_reserved +
3252 data_sinfo->bytes_reserved + data_sinfo->bytes_pinned + 2899 data_sinfo->bytes_pinned + data_sinfo->bytes_readonly +
3253 data_sinfo->bytes_readonly + data_sinfo->bytes_may_use + 2900 data_sinfo->bytes_may_use;
3254 data_sinfo->bytes_super;
3255 2901
3256 if (used + bytes > data_sinfo->total_bytes) { 2902 if (used + bytes > data_sinfo->total_bytes) {
3257 struct btrfs_trans_handle *trans; 2903 struct btrfs_trans_handle *trans;
3258 2904
3259 if (!flushed) {
3260 spin_unlock(&data_sinfo->lock);
3261 flush_delalloc(root, data_sinfo);
3262 flushed = 1;
3263 goto again;
3264 }
3265
3266 /* 2905 /*
3267 * if we don't have enough free bytes in this space then we need 2906 * if we don't have enough free bytes in this space then we need
3268 * to alloc a new chunk. 2907 * to alloc a new chunk.
@@ -3274,15 +2913,15 @@ again:
3274 spin_unlock(&data_sinfo->lock); 2913 spin_unlock(&data_sinfo->lock);
3275alloc: 2914alloc:
3276 alloc_target = btrfs_get_alloc_profile(root, 1); 2915 alloc_target = btrfs_get_alloc_profile(root, 1);
3277 trans = btrfs_start_transaction(root, 1); 2916 trans = btrfs_join_transaction(root, 1);
3278 if (!trans) 2917 if (IS_ERR(trans))
3279 return -ENOMEM; 2918 return PTR_ERR(trans);
3280 2919
3281 ret = do_chunk_alloc(trans, root->fs_info->extent_root, 2920 ret = do_chunk_alloc(trans, root->fs_info->extent_root,
3282 bytes + 2 * 1024 * 1024, 2921 bytes + 2 * 1024 * 1024,
3283 alloc_target, 0); 2922 alloc_target, 0);
3284 btrfs_end_transaction(trans, root); 2923 btrfs_end_transaction(trans, root);
3285 if (ret) 2924 if (ret < 0)
3286 return ret; 2925 return ret;
3287 2926
3288 if (!data_sinfo) { 2927 if (!data_sinfo) {
@@ -3297,25 +2936,26 @@ alloc:
3297 if (!committed && !root->fs_info->open_ioctl_trans) { 2936 if (!committed && !root->fs_info->open_ioctl_trans) {
3298 committed = 1; 2937 committed = 1;
3299 trans = btrfs_join_transaction(root, 1); 2938 trans = btrfs_join_transaction(root, 1);
3300 if (!trans) 2939 if (IS_ERR(trans))
3301 return -ENOMEM; 2940 return PTR_ERR(trans);
3302 ret = btrfs_commit_transaction(trans, root); 2941 ret = btrfs_commit_transaction(trans, root);
3303 if (ret) 2942 if (ret)
3304 return ret; 2943 return ret;
3305 goto again; 2944 goto again;
3306 } 2945 }
3307 2946
3308 printk(KERN_ERR "no space left, need %llu, %llu delalloc bytes" 2947#if 0 /* I hope we never need this code again, just in case */
3309 ", %llu bytes_used, %llu bytes_reserved, " 2948 printk(KERN_ERR "no space left, need %llu, %llu bytes_used, "
3310 "%llu bytes_pinned, %llu bytes_readonly, %llu may use " 2949 "%llu bytes_reserved, " "%llu bytes_pinned, "
3311 "%llu total\n", (unsigned long long)bytes, 2950 "%llu bytes_readonly, %llu may use %llu total\n",
3312 (unsigned long long)data_sinfo->bytes_delalloc, 2951 (unsigned long long)bytes,
3313 (unsigned long long)data_sinfo->bytes_used, 2952 (unsigned long long)data_sinfo->bytes_used,
3314 (unsigned long long)data_sinfo->bytes_reserved, 2953 (unsigned long long)data_sinfo->bytes_reserved,
3315 (unsigned long long)data_sinfo->bytes_pinned, 2954 (unsigned long long)data_sinfo->bytes_pinned,
3316 (unsigned long long)data_sinfo->bytes_readonly, 2955 (unsigned long long)data_sinfo->bytes_readonly,
3317 (unsigned long long)data_sinfo->bytes_may_use, 2956 (unsigned long long)data_sinfo->bytes_may_use,
3318 (unsigned long long)data_sinfo->total_bytes); 2957 (unsigned long long)data_sinfo->total_bytes);
2958#endif
3319 return -ENOSPC; 2959 return -ENOSPC;
3320 } 2960 }
3321 data_sinfo->bytes_may_use += bytes; 2961 data_sinfo->bytes_may_use += bytes;
@@ -3326,12 +2966,13 @@ alloc:
3326} 2966}
3327 2967
3328/* 2968/*
3329 * if there was an error for whatever reason after calling 2969 * called when we are clearing an delalloc extent from the
3330 * btrfs_check_data_free_space, call this so we can cleanup the counters. 2970 * inode's io_tree or there was an error for whatever reason
2971 * after calling btrfs_check_data_free_space
3331 */ 2972 */
3332void btrfs_free_reserved_data_space(struct btrfs_root *root, 2973void btrfs_free_reserved_data_space(struct inode *inode, u64 bytes)
3333 struct inode *inode, u64 bytes)
3334{ 2974{
2975 struct btrfs_root *root = BTRFS_I(inode)->root;
3335 struct btrfs_space_info *data_sinfo; 2976 struct btrfs_space_info *data_sinfo;
3336 2977
3337 /* make sure bytes are sectorsize aligned */ 2978 /* make sure bytes are sectorsize aligned */
@@ -3344,48 +2985,6 @@ void btrfs_free_reserved_data_space(struct btrfs_root *root,
3344 spin_unlock(&data_sinfo->lock); 2985 spin_unlock(&data_sinfo->lock);
3345} 2986}
3346 2987
3347/* called when we are adding a delalloc extent to the inode's io_tree */
3348void btrfs_delalloc_reserve_space(struct btrfs_root *root, struct inode *inode,
3349 u64 bytes)
3350{
3351 struct btrfs_space_info *data_sinfo;
3352
3353 /* get the space info for where this inode will be storing its data */
3354 data_sinfo = BTRFS_I(inode)->space_info;
3355
3356 /* make sure we have enough space to handle the data first */
3357 spin_lock(&data_sinfo->lock);
3358 data_sinfo->bytes_delalloc += bytes;
3359
3360 /*
3361 * we are adding a delalloc extent without calling
3362 * btrfs_check_data_free_space first. This happens on a weird
3363 * writepage condition, but shouldn't hurt our accounting
3364 */
3365 if (unlikely(bytes > BTRFS_I(inode)->reserved_bytes)) {
3366 data_sinfo->bytes_may_use -= BTRFS_I(inode)->reserved_bytes;
3367 BTRFS_I(inode)->reserved_bytes = 0;
3368 } else {
3369 data_sinfo->bytes_may_use -= bytes;
3370 BTRFS_I(inode)->reserved_bytes -= bytes;
3371 }
3372
3373 spin_unlock(&data_sinfo->lock);
3374}
3375
3376/* called when we are clearing an delalloc extent from the inode's io_tree */
3377void btrfs_delalloc_free_space(struct btrfs_root *root, struct inode *inode,
3378 u64 bytes)
3379{
3380 struct btrfs_space_info *info;
3381
3382 info = BTRFS_I(inode)->space_info;
3383
3384 spin_lock(&info->lock);
3385 info->bytes_delalloc -= bytes;
3386 spin_unlock(&info->lock);
3387}
3388
3389static void force_metadata_allocation(struct btrfs_fs_info *info) 2988static void force_metadata_allocation(struct btrfs_fs_info *info)
3390{ 2989{
3391 struct list_head *head = &info->space_info; 2990 struct list_head *head = &info->space_info;
@@ -3399,13 +2998,28 @@ static void force_metadata_allocation(struct btrfs_fs_info *info)
3399 rcu_read_unlock(); 2998 rcu_read_unlock();
3400} 2999}
3401 3000
3001static int should_alloc_chunk(struct btrfs_space_info *sinfo,
3002 u64 alloc_bytes)
3003{
3004 u64 num_bytes = sinfo->total_bytes - sinfo->bytes_readonly;
3005
3006 if (sinfo->bytes_used + sinfo->bytes_reserved +
3007 alloc_bytes + 256 * 1024 * 1024 < num_bytes)
3008 return 0;
3009
3010 if (sinfo->bytes_used + sinfo->bytes_reserved +
3011 alloc_bytes < div_factor(num_bytes, 8))
3012 return 0;
3013
3014 return 1;
3015}
3016
3402static int do_chunk_alloc(struct btrfs_trans_handle *trans, 3017static int do_chunk_alloc(struct btrfs_trans_handle *trans,
3403 struct btrfs_root *extent_root, u64 alloc_bytes, 3018 struct btrfs_root *extent_root, u64 alloc_bytes,
3404 u64 flags, int force) 3019 u64 flags, int force)
3405{ 3020{
3406 struct btrfs_space_info *space_info; 3021 struct btrfs_space_info *space_info;
3407 struct btrfs_fs_info *fs_info = extent_root->fs_info; 3022 struct btrfs_fs_info *fs_info = extent_root->fs_info;
3408 u64 thresh;
3409 int ret = 0; 3023 int ret = 0;
3410 3024
3411 mutex_lock(&fs_info->chunk_mutex); 3025 mutex_lock(&fs_info->chunk_mutex);
@@ -3428,11 +3042,7 @@ static int do_chunk_alloc(struct btrfs_trans_handle *trans,
3428 goto out; 3042 goto out;
3429 } 3043 }
3430 3044
3431 thresh = space_info->total_bytes - space_info->bytes_readonly; 3045 if (!force && !should_alloc_chunk(space_info, alloc_bytes)) {
3432 thresh = div_factor(thresh, 8);
3433 if (!force &&
3434 (space_info->bytes_used + space_info->bytes_pinned +
3435 space_info->bytes_reserved + alloc_bytes) < thresh) {
3436 spin_unlock(&space_info->lock); 3046 spin_unlock(&space_info->lock);
3437 goto out; 3047 goto out;
3438 } 3048 }
@@ -3454,6 +3064,8 @@ static int do_chunk_alloc(struct btrfs_trans_handle *trans,
3454 spin_lock(&space_info->lock); 3064 spin_lock(&space_info->lock);
3455 if (ret) 3065 if (ret)
3456 space_info->full = 1; 3066 space_info->full = 1;
3067 else
3068 ret = 1;
3457 space_info->force_alloc = 0; 3069 space_info->force_alloc = 0;
3458 spin_unlock(&space_info->lock); 3070 spin_unlock(&space_info->lock);
3459out: 3071out:
@@ -3461,13 +3073,713 @@ out:
3461 return ret; 3073 return ret;
3462} 3074}
3463 3075
3076static int maybe_allocate_chunk(struct btrfs_trans_handle *trans,
3077 struct btrfs_root *root,
3078 struct btrfs_space_info *sinfo, u64 num_bytes)
3079{
3080 int ret;
3081 int end_trans = 0;
3082
3083 if (sinfo->full)
3084 return 0;
3085
3086 spin_lock(&sinfo->lock);
3087 ret = should_alloc_chunk(sinfo, num_bytes + 2 * 1024 * 1024);
3088 spin_unlock(&sinfo->lock);
3089 if (!ret)
3090 return 0;
3091
3092 if (!trans) {
3093 trans = btrfs_join_transaction(root, 1);
3094 BUG_ON(IS_ERR(trans));
3095 end_trans = 1;
3096 }
3097
3098 ret = do_chunk_alloc(trans, root->fs_info->extent_root,
3099 num_bytes + 2 * 1024 * 1024,
3100 get_alloc_profile(root, sinfo->flags), 0);
3101
3102 if (end_trans)
3103 btrfs_end_transaction(trans, root);
3104
3105 return ret == 1 ? 1 : 0;
3106}
3107
3108/*
3109 * shrink metadata reservation for delalloc
3110 */
3111static int shrink_delalloc(struct btrfs_trans_handle *trans,
3112 struct btrfs_root *root, u64 to_reclaim)
3113{
3114 struct btrfs_block_rsv *block_rsv;
3115 u64 reserved;
3116 u64 max_reclaim;
3117 u64 reclaimed = 0;
3118 int pause = 1;
3119 int ret;
3120
3121 block_rsv = &root->fs_info->delalloc_block_rsv;
3122 spin_lock(&block_rsv->lock);
3123 reserved = block_rsv->reserved;
3124 spin_unlock(&block_rsv->lock);
3125
3126 if (reserved == 0)
3127 return 0;
3128
3129 max_reclaim = min(reserved, to_reclaim);
3130
3131 while (1) {
3132 ret = btrfs_start_one_delalloc_inode(root, trans ? 1 : 0);
3133 if (!ret) {
3134 __set_current_state(TASK_INTERRUPTIBLE);
3135 schedule_timeout(pause);
3136 pause <<= 1;
3137 if (pause > HZ / 10)
3138 pause = HZ / 10;
3139 } else {
3140 pause = 1;
3141 }
3142
3143 spin_lock(&block_rsv->lock);
3144 if (reserved > block_rsv->reserved)
3145 reclaimed = reserved - block_rsv->reserved;
3146 reserved = block_rsv->reserved;
3147 spin_unlock(&block_rsv->lock);
3148
3149 if (reserved == 0 || reclaimed >= max_reclaim)
3150 break;
3151
3152 if (trans && trans->transaction->blocked)
3153 return -EAGAIN;
3154 }
3155 return reclaimed >= to_reclaim;
3156}
3157
3158static int should_retry_reserve(struct btrfs_trans_handle *trans,
3159 struct btrfs_root *root,
3160 struct btrfs_block_rsv *block_rsv,
3161 u64 num_bytes, int *retries)
3162{
3163 struct btrfs_space_info *space_info = block_rsv->space_info;
3164 int ret;
3165
3166 if ((*retries) > 2)
3167 return -ENOSPC;
3168
3169 ret = maybe_allocate_chunk(trans, root, space_info, num_bytes);
3170 if (ret)
3171 return 1;
3172
3173 if (trans && trans->transaction->in_commit)
3174 return -ENOSPC;
3175
3176 ret = shrink_delalloc(trans, root, num_bytes);
3177 if (ret)
3178 return ret;
3179
3180 spin_lock(&space_info->lock);
3181 if (space_info->bytes_pinned < num_bytes)
3182 ret = 1;
3183 spin_unlock(&space_info->lock);
3184 if (ret)
3185 return -ENOSPC;
3186
3187 (*retries)++;
3188
3189 if (trans)
3190 return -EAGAIN;
3191
3192 trans = btrfs_join_transaction(root, 1);
3193 BUG_ON(IS_ERR(trans));
3194 ret = btrfs_commit_transaction(trans, root);
3195 BUG_ON(ret);
3196
3197 return 1;
3198}
3199
3200static int reserve_metadata_bytes(struct btrfs_block_rsv *block_rsv,
3201 u64 num_bytes)
3202{
3203 struct btrfs_space_info *space_info = block_rsv->space_info;
3204 u64 unused;
3205 int ret = -ENOSPC;
3206
3207 spin_lock(&space_info->lock);
3208 unused = space_info->bytes_used + space_info->bytes_reserved +
3209 space_info->bytes_pinned + space_info->bytes_readonly;
3210
3211 if (unused < space_info->total_bytes)
3212 unused = space_info->total_bytes - unused;
3213 else
3214 unused = 0;
3215
3216 if (unused >= num_bytes) {
3217 if (block_rsv->priority >= 10) {
3218 space_info->bytes_reserved += num_bytes;
3219 ret = 0;
3220 } else {
3221 if ((unused + block_rsv->reserved) *
3222 block_rsv->priority >=
3223 (num_bytes + block_rsv->reserved) * 10) {
3224 space_info->bytes_reserved += num_bytes;
3225 ret = 0;
3226 }
3227 }
3228 }
3229 spin_unlock(&space_info->lock);
3230
3231 return ret;
3232}
3233
3234static struct btrfs_block_rsv *get_block_rsv(struct btrfs_trans_handle *trans,
3235 struct btrfs_root *root)
3236{
3237 struct btrfs_block_rsv *block_rsv;
3238 if (root->ref_cows)
3239 block_rsv = trans->block_rsv;
3240 else
3241 block_rsv = root->block_rsv;
3242
3243 if (!block_rsv)
3244 block_rsv = &root->fs_info->empty_block_rsv;
3245
3246 return block_rsv;
3247}
3248
3249static int block_rsv_use_bytes(struct btrfs_block_rsv *block_rsv,
3250 u64 num_bytes)
3251{
3252 int ret = -ENOSPC;
3253 spin_lock(&block_rsv->lock);
3254 if (block_rsv->reserved >= num_bytes) {
3255 block_rsv->reserved -= num_bytes;
3256 if (block_rsv->reserved < block_rsv->size)
3257 block_rsv->full = 0;
3258 ret = 0;
3259 }
3260 spin_unlock(&block_rsv->lock);
3261 return ret;
3262}
3263
3264static void block_rsv_add_bytes(struct btrfs_block_rsv *block_rsv,
3265 u64 num_bytes, int update_size)
3266{
3267 spin_lock(&block_rsv->lock);
3268 block_rsv->reserved += num_bytes;
3269 if (update_size)
3270 block_rsv->size += num_bytes;
3271 else if (block_rsv->reserved >= block_rsv->size)
3272 block_rsv->full = 1;
3273 spin_unlock(&block_rsv->lock);
3274}
3275
3276void block_rsv_release_bytes(struct btrfs_block_rsv *block_rsv,
3277 struct btrfs_block_rsv *dest, u64 num_bytes)
3278{
3279 struct btrfs_space_info *space_info = block_rsv->space_info;
3280
3281 spin_lock(&block_rsv->lock);
3282 if (num_bytes == (u64)-1)
3283 num_bytes = block_rsv->size;
3284 block_rsv->size -= num_bytes;
3285 if (block_rsv->reserved >= block_rsv->size) {
3286 num_bytes = block_rsv->reserved - block_rsv->size;
3287 block_rsv->reserved = block_rsv->size;
3288 block_rsv->full = 1;
3289 } else {
3290 num_bytes = 0;
3291 }
3292 spin_unlock(&block_rsv->lock);
3293
3294 if (num_bytes > 0) {
3295 if (dest) {
3296 block_rsv_add_bytes(dest, num_bytes, 0);
3297 } else {
3298 spin_lock(&space_info->lock);
3299 space_info->bytes_reserved -= num_bytes;
3300 spin_unlock(&space_info->lock);
3301 }
3302 }
3303}
3304
3305static int block_rsv_migrate_bytes(struct btrfs_block_rsv *src,
3306 struct btrfs_block_rsv *dst, u64 num_bytes)
3307{
3308 int ret;
3309
3310 ret = block_rsv_use_bytes(src, num_bytes);
3311 if (ret)
3312 return ret;
3313
3314 block_rsv_add_bytes(dst, num_bytes, 1);
3315 return 0;
3316}
3317
3318void btrfs_init_block_rsv(struct btrfs_block_rsv *rsv)
3319{
3320 memset(rsv, 0, sizeof(*rsv));
3321 spin_lock_init(&rsv->lock);
3322 atomic_set(&rsv->usage, 1);
3323 rsv->priority = 6;
3324 INIT_LIST_HEAD(&rsv->list);
3325}
3326
3327struct btrfs_block_rsv *btrfs_alloc_block_rsv(struct btrfs_root *root)
3328{
3329 struct btrfs_block_rsv *block_rsv;
3330 struct btrfs_fs_info *fs_info = root->fs_info;
3331 u64 alloc_target;
3332
3333 block_rsv = kmalloc(sizeof(*block_rsv), GFP_NOFS);
3334 if (!block_rsv)
3335 return NULL;
3336
3337 btrfs_init_block_rsv(block_rsv);
3338
3339 alloc_target = btrfs_get_alloc_profile(root, 0);
3340 block_rsv->space_info = __find_space_info(fs_info,
3341 BTRFS_BLOCK_GROUP_METADATA);
3342
3343 return block_rsv;
3344}
3345
3346void btrfs_free_block_rsv(struct btrfs_root *root,
3347 struct btrfs_block_rsv *rsv)
3348{
3349 if (rsv && atomic_dec_and_test(&rsv->usage)) {
3350 btrfs_block_rsv_release(root, rsv, (u64)-1);
3351 if (!rsv->durable)
3352 kfree(rsv);
3353 }
3354}
3355
3356/*
3357 * make the block_rsv struct be able to capture freed space.
3358 * the captured space will re-add to the the block_rsv struct
3359 * after transaction commit
3360 */
3361void btrfs_add_durable_block_rsv(struct btrfs_fs_info *fs_info,
3362 struct btrfs_block_rsv *block_rsv)
3363{
3364 block_rsv->durable = 1;
3365 mutex_lock(&fs_info->durable_block_rsv_mutex);
3366 list_add_tail(&block_rsv->list, &fs_info->durable_block_rsv_list);
3367 mutex_unlock(&fs_info->durable_block_rsv_mutex);
3368}
3369
3370int btrfs_block_rsv_add(struct btrfs_trans_handle *trans,
3371 struct btrfs_root *root,
3372 struct btrfs_block_rsv *block_rsv,
3373 u64 num_bytes, int *retries)
3374{
3375 int ret;
3376
3377 if (num_bytes == 0)
3378 return 0;
3379again:
3380 ret = reserve_metadata_bytes(block_rsv, num_bytes);
3381 if (!ret) {
3382 block_rsv_add_bytes(block_rsv, num_bytes, 1);
3383 return 0;
3384 }
3385
3386 ret = should_retry_reserve(trans, root, block_rsv, num_bytes, retries);
3387 if (ret > 0)
3388 goto again;
3389
3390 return ret;
3391}
3392
3393int btrfs_block_rsv_check(struct btrfs_trans_handle *trans,
3394 struct btrfs_root *root,
3395 struct btrfs_block_rsv *block_rsv,
3396 u64 min_reserved, int min_factor)
3397{
3398 u64 num_bytes = 0;
3399 int commit_trans = 0;
3400 int ret = -ENOSPC;
3401
3402 if (!block_rsv)
3403 return 0;
3404
3405 spin_lock(&block_rsv->lock);
3406 if (min_factor > 0)
3407 num_bytes = div_factor(block_rsv->size, min_factor);
3408 if (min_reserved > num_bytes)
3409 num_bytes = min_reserved;
3410
3411 if (block_rsv->reserved >= num_bytes) {
3412 ret = 0;
3413 } else {
3414 num_bytes -= block_rsv->reserved;
3415 if (block_rsv->durable &&
3416 block_rsv->freed[0] + block_rsv->freed[1] >= num_bytes)
3417 commit_trans = 1;
3418 }
3419 spin_unlock(&block_rsv->lock);
3420 if (!ret)
3421 return 0;
3422
3423 if (block_rsv->refill_used) {
3424 ret = reserve_metadata_bytes(block_rsv, num_bytes);
3425 if (!ret) {
3426 block_rsv_add_bytes(block_rsv, num_bytes, 0);
3427 return 0;
3428 }
3429 }
3430
3431 if (commit_trans) {
3432 if (trans)
3433 return -EAGAIN;
3434
3435 trans = btrfs_join_transaction(root, 1);
3436 BUG_ON(IS_ERR(trans));
3437 ret = btrfs_commit_transaction(trans, root);
3438 return 0;
3439 }
3440
3441 WARN_ON(1);
3442 printk(KERN_INFO"block_rsv size %llu reserved %llu freed %llu %llu\n",
3443 block_rsv->size, block_rsv->reserved,
3444 block_rsv->freed[0], block_rsv->freed[1]);
3445
3446 return -ENOSPC;
3447}
3448
3449int btrfs_block_rsv_migrate(struct btrfs_block_rsv *src_rsv,
3450 struct btrfs_block_rsv *dst_rsv,
3451 u64 num_bytes)
3452{
3453 return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes);
3454}
3455
3456void btrfs_block_rsv_release(struct btrfs_root *root,
3457 struct btrfs_block_rsv *block_rsv,
3458 u64 num_bytes)
3459{
3460 struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
3461 if (global_rsv->full || global_rsv == block_rsv ||
3462 block_rsv->space_info != global_rsv->space_info)
3463 global_rsv = NULL;
3464 block_rsv_release_bytes(block_rsv, global_rsv, num_bytes);
3465}
3466
3467/*
3468 * helper to calculate size of global block reservation.
3469 * the desired value is sum of space used by extent tree,
3470 * checksum tree and root tree
3471 */
3472static u64 calc_global_metadata_size(struct btrfs_fs_info *fs_info)
3473{
3474 struct btrfs_space_info *sinfo;
3475 u64 num_bytes;
3476 u64 meta_used;
3477 u64 data_used;
3478 int csum_size = btrfs_super_csum_size(&fs_info->super_copy);
3479#if 0
3480 /*
3481 * per tree used space accounting can be inaccuracy, so we
3482 * can't rely on it.
3483 */
3484 spin_lock(&fs_info->extent_root->accounting_lock);
3485 num_bytes = btrfs_root_used(&fs_info->extent_root->root_item);
3486 spin_unlock(&fs_info->extent_root->accounting_lock);
3487
3488 spin_lock(&fs_info->csum_root->accounting_lock);
3489 num_bytes += btrfs_root_used(&fs_info->csum_root->root_item);
3490 spin_unlock(&fs_info->csum_root->accounting_lock);
3491
3492 spin_lock(&fs_info->tree_root->accounting_lock);
3493 num_bytes += btrfs_root_used(&fs_info->tree_root->root_item);
3494 spin_unlock(&fs_info->tree_root->accounting_lock);
3495#endif
3496 sinfo = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_DATA);
3497 spin_lock(&sinfo->lock);
3498 data_used = sinfo->bytes_used;
3499 spin_unlock(&sinfo->lock);
3500
3501 sinfo = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
3502 spin_lock(&sinfo->lock);
3503 meta_used = sinfo->bytes_used;
3504 spin_unlock(&sinfo->lock);
3505
3506 num_bytes = (data_used >> fs_info->sb->s_blocksize_bits) *
3507 csum_size * 2;
3508 num_bytes += div64_u64(data_used + meta_used, 50);
3509
3510 if (num_bytes * 3 > meta_used)
3511 num_bytes = div64_u64(meta_used, 3);
3512
3513 return ALIGN(num_bytes, fs_info->extent_root->leafsize << 10);
3514}
3515
3516static void update_global_block_rsv(struct btrfs_fs_info *fs_info)
3517{
3518 struct btrfs_block_rsv *block_rsv = &fs_info->global_block_rsv;
3519 struct btrfs_space_info *sinfo = block_rsv->space_info;
3520 u64 num_bytes;
3521
3522 num_bytes = calc_global_metadata_size(fs_info);
3523
3524 spin_lock(&block_rsv->lock);
3525 spin_lock(&sinfo->lock);
3526
3527 block_rsv->size = num_bytes;
3528
3529 num_bytes = sinfo->bytes_used + sinfo->bytes_pinned +
3530 sinfo->bytes_reserved + sinfo->bytes_readonly;
3531
3532 if (sinfo->total_bytes > num_bytes) {
3533 num_bytes = sinfo->total_bytes - num_bytes;
3534 block_rsv->reserved += num_bytes;
3535 sinfo->bytes_reserved += num_bytes;
3536 }
3537
3538 if (block_rsv->reserved >= block_rsv->size) {
3539 num_bytes = block_rsv->reserved - block_rsv->size;
3540 sinfo->bytes_reserved -= num_bytes;
3541 block_rsv->reserved = block_rsv->size;
3542 block_rsv->full = 1;
3543 }
3544#if 0
3545 printk(KERN_INFO"global block rsv size %llu reserved %llu\n",
3546 block_rsv->size, block_rsv->reserved);
3547#endif
3548 spin_unlock(&sinfo->lock);
3549 spin_unlock(&block_rsv->lock);
3550}
3551
3552static void init_global_block_rsv(struct btrfs_fs_info *fs_info)
3553{
3554 struct btrfs_space_info *space_info;
3555
3556 space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
3557 fs_info->chunk_block_rsv.space_info = space_info;
3558 fs_info->chunk_block_rsv.priority = 10;
3559
3560 space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
3561 fs_info->global_block_rsv.space_info = space_info;
3562 fs_info->global_block_rsv.priority = 10;
3563 fs_info->global_block_rsv.refill_used = 1;
3564 fs_info->delalloc_block_rsv.space_info = space_info;
3565 fs_info->trans_block_rsv.space_info = space_info;
3566 fs_info->empty_block_rsv.space_info = space_info;
3567 fs_info->empty_block_rsv.priority = 10;
3568
3569 fs_info->extent_root->block_rsv = &fs_info->global_block_rsv;
3570 fs_info->csum_root->block_rsv = &fs_info->global_block_rsv;
3571 fs_info->dev_root->block_rsv = &fs_info->global_block_rsv;
3572 fs_info->tree_root->block_rsv = &fs_info->global_block_rsv;
3573 fs_info->chunk_root->block_rsv = &fs_info->chunk_block_rsv;
3574
3575 btrfs_add_durable_block_rsv(fs_info, &fs_info->global_block_rsv);
3576
3577 btrfs_add_durable_block_rsv(fs_info, &fs_info->delalloc_block_rsv);
3578
3579 update_global_block_rsv(fs_info);
3580}
3581
3582static void release_global_block_rsv(struct btrfs_fs_info *fs_info)
3583{
3584 block_rsv_release_bytes(&fs_info->global_block_rsv, NULL, (u64)-1);
3585 WARN_ON(fs_info->delalloc_block_rsv.size > 0);
3586 WARN_ON(fs_info->delalloc_block_rsv.reserved > 0);
3587 WARN_ON(fs_info->trans_block_rsv.size > 0);
3588 WARN_ON(fs_info->trans_block_rsv.reserved > 0);
3589 WARN_ON(fs_info->chunk_block_rsv.size > 0);
3590 WARN_ON(fs_info->chunk_block_rsv.reserved > 0);
3591}
3592
3593static u64 calc_trans_metadata_size(struct btrfs_root *root, int num_items)
3594{
3595 return (root->leafsize + root->nodesize * (BTRFS_MAX_LEVEL - 1)) *
3596 3 * num_items;
3597}
3598
3599int btrfs_trans_reserve_metadata(struct btrfs_trans_handle *trans,
3600 struct btrfs_root *root,
3601 int num_items, int *retries)
3602{
3603 u64 num_bytes;
3604 int ret;
3605
3606 if (num_items == 0 || root->fs_info->chunk_root == root)
3607 return 0;
3608
3609 num_bytes = calc_trans_metadata_size(root, num_items);
3610 ret = btrfs_block_rsv_add(trans, root, &root->fs_info->trans_block_rsv,
3611 num_bytes, retries);
3612 if (!ret) {
3613 trans->bytes_reserved += num_bytes;
3614 trans->block_rsv = &root->fs_info->trans_block_rsv;
3615 }
3616 return ret;
3617}
3618
3619void btrfs_trans_release_metadata(struct btrfs_trans_handle *trans,
3620 struct btrfs_root *root)
3621{
3622 if (!trans->bytes_reserved)
3623 return;
3624
3625 BUG_ON(trans->block_rsv != &root->fs_info->trans_block_rsv);
3626 btrfs_block_rsv_release(root, trans->block_rsv,
3627 trans->bytes_reserved);
3628 trans->bytes_reserved = 0;
3629}
3630
3631int btrfs_orphan_reserve_metadata(struct btrfs_trans_handle *trans,
3632 struct inode *inode)
3633{
3634 struct btrfs_root *root = BTRFS_I(inode)->root;
3635 struct btrfs_block_rsv *src_rsv = get_block_rsv(trans, root);
3636 struct btrfs_block_rsv *dst_rsv = root->orphan_block_rsv;
3637
3638 /*
3639 * one for deleting orphan item, one for updating inode and
3640 * two for calling btrfs_truncate_inode_items.
3641 *
3642 * btrfs_truncate_inode_items is a delete operation, it frees
3643 * more space than it uses in most cases. So two units of
3644 * metadata space should be enough for calling it many times.
3645 * If all of the metadata space is used, we can commit
3646 * transaction and use space it freed.
3647 */
3648 u64 num_bytes = calc_trans_metadata_size(root, 4);
3649 return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes);
3650}
3651
3652void btrfs_orphan_release_metadata(struct inode *inode)
3653{
3654 struct btrfs_root *root = BTRFS_I(inode)->root;
3655 u64 num_bytes = calc_trans_metadata_size(root, 4);
3656 btrfs_block_rsv_release(root, root->orphan_block_rsv, num_bytes);
3657}
3658
3659int btrfs_snap_reserve_metadata(struct btrfs_trans_handle *trans,
3660 struct btrfs_pending_snapshot *pending)
3661{
3662 struct btrfs_root *root = pending->root;
3663 struct btrfs_block_rsv *src_rsv = get_block_rsv(trans, root);
3664 struct btrfs_block_rsv *dst_rsv = &pending->block_rsv;
3665 /*
3666 * two for root back/forward refs, two for directory entries
3667 * and one for root of the snapshot.
3668 */
3669 u64 num_bytes = calc_trans_metadata_size(root, 5);
3670 dst_rsv->space_info = src_rsv->space_info;
3671 return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes);
3672}
3673
3674static u64 calc_csum_metadata_size(struct inode *inode, u64 num_bytes)
3675{
3676 return num_bytes >>= 3;
3677}
3678
3679int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
3680{
3681 struct btrfs_root *root = BTRFS_I(inode)->root;
3682 struct btrfs_block_rsv *block_rsv = &root->fs_info->delalloc_block_rsv;
3683 u64 to_reserve;
3684 int nr_extents;
3685 int retries = 0;
3686 int ret;
3687
3688 if (btrfs_transaction_in_commit(root->fs_info))
3689 schedule_timeout(1);
3690
3691 num_bytes = ALIGN(num_bytes, root->sectorsize);
3692again:
3693 spin_lock(&BTRFS_I(inode)->accounting_lock);
3694 nr_extents = atomic_read(&BTRFS_I(inode)->outstanding_extents) + 1;
3695 if (nr_extents > BTRFS_I(inode)->reserved_extents) {
3696 nr_extents -= BTRFS_I(inode)->reserved_extents;
3697 to_reserve = calc_trans_metadata_size(root, nr_extents);
3698 } else {
3699 nr_extents = 0;
3700 to_reserve = 0;
3701 }
3702
3703 to_reserve += calc_csum_metadata_size(inode, num_bytes);
3704 ret = reserve_metadata_bytes(block_rsv, to_reserve);
3705 if (ret) {
3706 spin_unlock(&BTRFS_I(inode)->accounting_lock);
3707 ret = should_retry_reserve(NULL, root, block_rsv, to_reserve,
3708 &retries);
3709 if (ret > 0)
3710 goto again;
3711 return ret;
3712 }
3713
3714 BTRFS_I(inode)->reserved_extents += nr_extents;
3715 atomic_inc(&BTRFS_I(inode)->outstanding_extents);
3716 spin_unlock(&BTRFS_I(inode)->accounting_lock);
3717
3718 block_rsv_add_bytes(block_rsv, to_reserve, 1);
3719
3720 if (block_rsv->size > 512 * 1024 * 1024)
3721 shrink_delalloc(NULL, root, to_reserve);
3722
3723 return 0;
3724}
3725
3726void btrfs_delalloc_release_metadata(struct inode *inode, u64 num_bytes)
3727{
3728 struct btrfs_root *root = BTRFS_I(inode)->root;
3729 u64 to_free;
3730 int nr_extents;
3731
3732 num_bytes = ALIGN(num_bytes, root->sectorsize);
3733 atomic_dec(&BTRFS_I(inode)->outstanding_extents);
3734
3735 spin_lock(&BTRFS_I(inode)->accounting_lock);
3736 nr_extents = atomic_read(&BTRFS_I(inode)->outstanding_extents);
3737 if (nr_extents < BTRFS_I(inode)->reserved_extents) {
3738 nr_extents = BTRFS_I(inode)->reserved_extents - nr_extents;
3739 BTRFS_I(inode)->reserved_extents -= nr_extents;
3740 } else {
3741 nr_extents = 0;
3742 }
3743 spin_unlock(&BTRFS_I(inode)->accounting_lock);
3744
3745 to_free = calc_csum_metadata_size(inode, num_bytes);
3746 if (nr_extents > 0)
3747 to_free += calc_trans_metadata_size(root, nr_extents);
3748
3749 btrfs_block_rsv_release(root, &root->fs_info->delalloc_block_rsv,
3750 to_free);
3751}
3752
3753int btrfs_delalloc_reserve_space(struct inode *inode, u64 num_bytes)
3754{
3755 int ret;
3756
3757 ret = btrfs_check_data_free_space(inode, num_bytes);
3758 if (ret)
3759 return ret;
3760
3761 ret = btrfs_delalloc_reserve_metadata(inode, num_bytes);
3762 if (ret) {
3763 btrfs_free_reserved_data_space(inode, num_bytes);
3764 return ret;
3765 }
3766
3767 return 0;
3768}
3769
3770void btrfs_delalloc_release_space(struct inode *inode, u64 num_bytes)
3771{
3772 btrfs_delalloc_release_metadata(inode, num_bytes);
3773 btrfs_free_reserved_data_space(inode, num_bytes);
3774}
3775
3464static int update_block_group(struct btrfs_trans_handle *trans, 3776static int update_block_group(struct btrfs_trans_handle *trans,
3465 struct btrfs_root *root, 3777 struct btrfs_root *root,
3466 u64 bytenr, u64 num_bytes, int alloc, 3778 u64 bytenr, u64 num_bytes, int alloc)
3467 int mark_free)
3468{ 3779{
3469 struct btrfs_block_group_cache *cache; 3780 struct btrfs_block_group_cache *cache;
3470 struct btrfs_fs_info *info = root->fs_info; 3781 struct btrfs_fs_info *info = root->fs_info;
3782 int factor;
3471 u64 total = num_bytes; 3783 u64 total = num_bytes;
3472 u64 old_val; 3784 u64 old_val;
3473 u64 byte_in_group; 3785 u64 byte_in_group;
@@ -3486,6 +3798,12 @@ static int update_block_group(struct btrfs_trans_handle *trans,
3486 cache = btrfs_lookup_block_group(info, bytenr); 3798 cache = btrfs_lookup_block_group(info, bytenr);
3487 if (!cache) 3799 if (!cache)
3488 return -1; 3800 return -1;
3801 if (cache->flags & (BTRFS_BLOCK_GROUP_DUP |
3802 BTRFS_BLOCK_GROUP_RAID1 |
3803 BTRFS_BLOCK_GROUP_RAID10))
3804 factor = 2;
3805 else
3806 factor = 1;
3489 byte_in_group = bytenr - cache->key.objectid; 3807 byte_in_group = bytenr - cache->key.objectid;
3490 WARN_ON(byte_in_group > cache->key.offset); 3808 WARN_ON(byte_in_group > cache->key.offset);
3491 3809
@@ -3498,31 +3816,24 @@ static int update_block_group(struct btrfs_trans_handle *trans,
3498 old_val += num_bytes; 3816 old_val += num_bytes;
3499 btrfs_set_block_group_used(&cache->item, old_val); 3817 btrfs_set_block_group_used(&cache->item, old_val);
3500 cache->reserved -= num_bytes; 3818 cache->reserved -= num_bytes;
3501 cache->space_info->bytes_used += num_bytes;
3502 cache->space_info->bytes_reserved -= num_bytes; 3819 cache->space_info->bytes_reserved -= num_bytes;
3503 if (cache->ro) 3820 cache->space_info->bytes_used += num_bytes;
3504 cache->space_info->bytes_readonly -= num_bytes; 3821 cache->space_info->disk_used += num_bytes * factor;
3505 spin_unlock(&cache->lock); 3822 spin_unlock(&cache->lock);
3506 spin_unlock(&cache->space_info->lock); 3823 spin_unlock(&cache->space_info->lock);
3507 } else { 3824 } else {
3508 old_val -= num_bytes; 3825 old_val -= num_bytes;
3509 cache->space_info->bytes_used -= num_bytes;
3510 if (cache->ro)
3511 cache->space_info->bytes_readonly += num_bytes;
3512 btrfs_set_block_group_used(&cache->item, old_val); 3826 btrfs_set_block_group_used(&cache->item, old_val);
3827 cache->pinned += num_bytes;
3828 cache->space_info->bytes_pinned += num_bytes;
3829 cache->space_info->bytes_used -= num_bytes;
3830 cache->space_info->disk_used -= num_bytes * factor;
3513 spin_unlock(&cache->lock); 3831 spin_unlock(&cache->lock);
3514 spin_unlock(&cache->space_info->lock); 3832 spin_unlock(&cache->space_info->lock);
3515 if (mark_free) {
3516 int ret;
3517 3833
3518 ret = btrfs_discard_extent(root, bytenr, 3834 set_extent_dirty(info->pinned_extents,
3519 num_bytes); 3835 bytenr, bytenr + num_bytes - 1,
3520 WARN_ON(ret); 3836 GFP_NOFS | __GFP_NOFAIL);
3521
3522 ret = btrfs_add_free_space(cache, bytenr,
3523 num_bytes);
3524 WARN_ON(ret);
3525 }
3526 } 3837 }
3527 btrfs_put_block_group(cache); 3838 btrfs_put_block_group(cache);
3528 total -= num_bytes; 3839 total -= num_bytes;
@@ -3546,18 +3857,10 @@ static u64 first_logical_byte(struct btrfs_root *root, u64 search_start)
3546 return bytenr; 3857 return bytenr;
3547} 3858}
3548 3859
3549/* 3860static int pin_down_extent(struct btrfs_root *root,
3550 * this function must be called within transaction 3861 struct btrfs_block_group_cache *cache,
3551 */ 3862 u64 bytenr, u64 num_bytes, int reserved)
3552int btrfs_pin_extent(struct btrfs_root *root,
3553 u64 bytenr, u64 num_bytes, int reserved)
3554{ 3863{
3555 struct btrfs_fs_info *fs_info = root->fs_info;
3556 struct btrfs_block_group_cache *cache;
3557
3558 cache = btrfs_lookup_block_group(fs_info, bytenr);
3559 BUG_ON(!cache);
3560
3561 spin_lock(&cache->space_info->lock); 3864 spin_lock(&cache->space_info->lock);
3562 spin_lock(&cache->lock); 3865 spin_lock(&cache->lock);
3563 cache->pinned += num_bytes; 3866 cache->pinned += num_bytes;
@@ -3569,28 +3872,68 @@ int btrfs_pin_extent(struct btrfs_root *root,
3569 spin_unlock(&cache->lock); 3872 spin_unlock(&cache->lock);
3570 spin_unlock(&cache->space_info->lock); 3873 spin_unlock(&cache->space_info->lock);
3571 3874
3572 btrfs_put_block_group(cache); 3875 set_extent_dirty(root->fs_info->pinned_extents, bytenr,
3876 bytenr + num_bytes - 1, GFP_NOFS | __GFP_NOFAIL);
3877 return 0;
3878}
3879
3880/*
3881 * this function must be called within transaction
3882 */
3883int btrfs_pin_extent(struct btrfs_root *root,
3884 u64 bytenr, u64 num_bytes, int reserved)
3885{
3886 struct btrfs_block_group_cache *cache;
3887
3888 cache = btrfs_lookup_block_group(root->fs_info, bytenr);
3889 BUG_ON(!cache);
3890
3891 pin_down_extent(root, cache, bytenr, num_bytes, reserved);
3573 3892
3574 set_extent_dirty(fs_info->pinned_extents, 3893 btrfs_put_block_group(cache);
3575 bytenr, bytenr + num_bytes - 1, GFP_NOFS);
3576 return 0; 3894 return 0;
3577} 3895}
3578 3896
3579static int update_reserved_extents(struct btrfs_block_group_cache *cache, 3897/*
3580 u64 num_bytes, int reserve) 3898 * update size of reserved extents. this function may return -EAGAIN
3899 * if 'reserve' is true or 'sinfo' is false.
3900 */
3901static int update_reserved_bytes(struct btrfs_block_group_cache *cache,
3902 u64 num_bytes, int reserve, int sinfo)
3581{ 3903{
3582 spin_lock(&cache->space_info->lock); 3904 int ret = 0;
3583 spin_lock(&cache->lock); 3905 if (sinfo) {
3584 if (reserve) { 3906 struct btrfs_space_info *space_info = cache->space_info;
3585 cache->reserved += num_bytes; 3907 spin_lock(&space_info->lock);
3586 cache->space_info->bytes_reserved += num_bytes; 3908 spin_lock(&cache->lock);
3909 if (reserve) {
3910 if (cache->ro) {
3911 ret = -EAGAIN;
3912 } else {
3913 cache->reserved += num_bytes;
3914 space_info->bytes_reserved += num_bytes;
3915 }
3916 } else {
3917 if (cache->ro)
3918 space_info->bytes_readonly += num_bytes;
3919 cache->reserved -= num_bytes;
3920 space_info->bytes_reserved -= num_bytes;
3921 }
3922 spin_unlock(&cache->lock);
3923 spin_unlock(&space_info->lock);
3587 } else { 3924 } else {
3588 cache->reserved -= num_bytes; 3925 spin_lock(&cache->lock);
3589 cache->space_info->bytes_reserved -= num_bytes; 3926 if (cache->ro) {
3927 ret = -EAGAIN;
3928 } else {
3929 if (reserve)
3930 cache->reserved += num_bytes;
3931 else
3932 cache->reserved -= num_bytes;
3933 }
3934 spin_unlock(&cache->lock);
3590 } 3935 }
3591 spin_unlock(&cache->lock); 3936 return ret;
3592 spin_unlock(&cache->space_info->lock);
3593 return 0;
3594} 3937}
3595 3938
3596int btrfs_prepare_extent_commit(struct btrfs_trans_handle *trans, 3939int btrfs_prepare_extent_commit(struct btrfs_trans_handle *trans,
@@ -3621,6 +3964,8 @@ int btrfs_prepare_extent_commit(struct btrfs_trans_handle *trans,
3621 fs_info->pinned_extents = &fs_info->freed_extents[0]; 3964 fs_info->pinned_extents = &fs_info->freed_extents[0];
3622 3965
3623 up_write(&fs_info->extent_commit_sem); 3966 up_write(&fs_info->extent_commit_sem);
3967
3968 update_global_block_rsv(fs_info);
3624 return 0; 3969 return 0;
3625} 3970}
3626 3971
@@ -3647,14 +3992,21 @@ static int unpin_extent_range(struct btrfs_root *root, u64 start, u64 end)
3647 btrfs_add_free_space(cache, start, len); 3992 btrfs_add_free_space(cache, start, len);
3648 } 3993 }
3649 3994
3995 start += len;
3996
3650 spin_lock(&cache->space_info->lock); 3997 spin_lock(&cache->space_info->lock);
3651 spin_lock(&cache->lock); 3998 spin_lock(&cache->lock);
3652 cache->pinned -= len; 3999 cache->pinned -= len;
3653 cache->space_info->bytes_pinned -= len; 4000 cache->space_info->bytes_pinned -= len;
4001 if (cache->ro) {
4002 cache->space_info->bytes_readonly += len;
4003 } else if (cache->reserved_pinned > 0) {
4004 len = min(len, cache->reserved_pinned);
4005 cache->reserved_pinned -= len;
4006 cache->space_info->bytes_reserved += len;
4007 }
3654 spin_unlock(&cache->lock); 4008 spin_unlock(&cache->lock);
3655 spin_unlock(&cache->space_info->lock); 4009 spin_unlock(&cache->space_info->lock);
3656
3657 start += len;
3658 } 4010 }
3659 4011
3660 if (cache) 4012 if (cache)
@@ -3667,8 +4019,11 @@ int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans,
3667{ 4019{
3668 struct btrfs_fs_info *fs_info = root->fs_info; 4020 struct btrfs_fs_info *fs_info = root->fs_info;
3669 struct extent_io_tree *unpin; 4021 struct extent_io_tree *unpin;
4022 struct btrfs_block_rsv *block_rsv;
4023 struct btrfs_block_rsv *next_rsv;
3670 u64 start; 4024 u64 start;
3671 u64 end; 4025 u64 end;
4026 int idx;
3672 int ret; 4027 int ret;
3673 4028
3674 if (fs_info->pinned_extents == &fs_info->freed_extents[0]) 4029 if (fs_info->pinned_extents == &fs_info->freed_extents[0])
@@ -3689,59 +4044,30 @@ int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans,
3689 cond_resched(); 4044 cond_resched();
3690 } 4045 }
3691 4046
3692 return ret; 4047 mutex_lock(&fs_info->durable_block_rsv_mutex);
3693} 4048 list_for_each_entry_safe(block_rsv, next_rsv,
4049 &fs_info->durable_block_rsv_list, list) {
3694 4050
3695static int pin_down_bytes(struct btrfs_trans_handle *trans, 4051 idx = trans->transid & 0x1;
3696 struct btrfs_root *root, 4052 if (block_rsv->freed[idx] > 0) {
3697 struct btrfs_path *path, 4053 block_rsv_add_bytes(block_rsv,
3698 u64 bytenr, u64 num_bytes, 4054 block_rsv->freed[idx], 0);
3699 int is_data, int reserved, 4055 block_rsv->freed[idx] = 0;
3700 struct extent_buffer **must_clean) 4056 }
3701{ 4057 if (atomic_read(&block_rsv->usage) == 0) {
3702 int err = 0; 4058 btrfs_block_rsv_release(root, block_rsv, (u64)-1);
3703 struct extent_buffer *buf;
3704
3705 if (is_data)
3706 goto pinit;
3707
3708 /*
3709 * discard is sloooow, and so triggering discards on
3710 * individual btree blocks isn't a good plan. Just
3711 * pin everything in discard mode.
3712 */
3713 if (btrfs_test_opt(root, DISCARD))
3714 goto pinit;
3715
3716 buf = btrfs_find_tree_block(root, bytenr, num_bytes);
3717 if (!buf)
3718 goto pinit;
3719 4059
3720 /* we can reuse a block if it hasn't been written 4060 if (block_rsv->freed[0] == 0 &&
3721 * and it is from this transaction. We can't 4061 block_rsv->freed[1] == 0) {
3722 * reuse anything from the tree log root because 4062 list_del_init(&block_rsv->list);
3723 * it has tiny sub-transactions. 4063 kfree(block_rsv);
3724 */ 4064 }
3725 if (btrfs_buffer_uptodate(buf, 0) && 4065 } else {
3726 btrfs_try_tree_lock(buf)) { 4066 btrfs_block_rsv_release(root, block_rsv, 0);
3727 u64 header_owner = btrfs_header_owner(buf);
3728 u64 header_transid = btrfs_header_generation(buf);
3729 if (header_owner != BTRFS_TREE_LOG_OBJECTID &&
3730 header_transid == trans->transid &&
3731 !btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN)) {
3732 *must_clean = buf;
3733 return 1;
3734 } 4067 }
3735 btrfs_tree_unlock(buf);
3736 } 4068 }
3737 free_extent_buffer(buf); 4069 mutex_unlock(&fs_info->durable_block_rsv_mutex);
3738pinit:
3739 if (path)
3740 btrfs_set_path_blocking(path);
3741 /* unlocks the pinned mutex */
3742 btrfs_pin_extent(root, bytenr, num_bytes, reserved);
3743 4070
3744 BUG_ON(err < 0);
3745 return 0; 4071 return 0;
3746} 4072}
3747 4073
@@ -3902,9 +4228,6 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
3902 BUG_ON(ret); 4228 BUG_ON(ret);
3903 } 4229 }
3904 } else { 4230 } else {
3905 int mark_free = 0;
3906 struct extent_buffer *must_clean = NULL;
3907
3908 if (found_extent) { 4231 if (found_extent) {
3909 BUG_ON(is_data && refs_to_drop != 4232 BUG_ON(is_data && refs_to_drop !=
3910 extent_data_ref_count(root, path, iref)); 4233 extent_data_ref_count(root, path, iref));
@@ -3917,31 +4240,11 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
3917 } 4240 }
3918 } 4241 }
3919 4242
3920 ret = pin_down_bytes(trans, root, path, bytenr,
3921 num_bytes, is_data, 0, &must_clean);
3922 if (ret > 0)
3923 mark_free = 1;
3924 BUG_ON(ret < 0);
3925 /*
3926 * it is going to be very rare for someone to be waiting
3927 * on the block we're freeing. del_items might need to
3928 * schedule, so rather than get fancy, just force it
3929 * to blocking here
3930 */
3931 if (must_clean)
3932 btrfs_set_lock_blocking(must_clean);
3933
3934 ret = btrfs_del_items(trans, extent_root, path, path->slots[0], 4243 ret = btrfs_del_items(trans, extent_root, path, path->slots[0],
3935 num_to_del); 4244 num_to_del);
3936 BUG_ON(ret); 4245 BUG_ON(ret);
3937 btrfs_release_path(extent_root, path); 4246 btrfs_release_path(extent_root, path);
3938 4247
3939 if (must_clean) {
3940 clean_tree_block(NULL, root, must_clean);
3941 btrfs_tree_unlock(must_clean);
3942 free_extent_buffer(must_clean);
3943 }
3944
3945 if (is_data) { 4248 if (is_data) {
3946 ret = btrfs_del_csums(trans, root, bytenr, num_bytes); 4249 ret = btrfs_del_csums(trans, root, bytenr, num_bytes);
3947 BUG_ON(ret); 4250 BUG_ON(ret);
@@ -3951,8 +4254,7 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
3951 (bytenr + num_bytes - 1) >> PAGE_CACHE_SHIFT); 4254 (bytenr + num_bytes - 1) >> PAGE_CACHE_SHIFT);
3952 } 4255 }
3953 4256
3954 ret = update_block_group(trans, root, bytenr, num_bytes, 0, 4257 ret = update_block_group(trans, root, bytenr, num_bytes, 0);
3955 mark_free);
3956 BUG_ON(ret); 4258 BUG_ON(ret);
3957 } 4259 }
3958 btrfs_free_path(path); 4260 btrfs_free_path(path);
@@ -3960,7 +4262,7 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
3960} 4262}
3961 4263
3962/* 4264/*
3963 * when we free an extent, it is possible (and likely) that we free the last 4265 * when we free an block, it is possible (and likely) that we free the last
3964 * delayed ref for that extent as well. This searches the delayed ref tree for 4266 * delayed ref for that extent as well. This searches the delayed ref tree for
3965 * a given extent, and if there are no other delayed refs to be processed, it 4267 * a given extent, and if there are no other delayed refs to be processed, it
3966 * removes it from the tree. 4268 * removes it from the tree.
@@ -3972,7 +4274,7 @@ static noinline int check_ref_cleanup(struct btrfs_trans_handle *trans,
3972 struct btrfs_delayed_ref_root *delayed_refs; 4274 struct btrfs_delayed_ref_root *delayed_refs;
3973 struct btrfs_delayed_ref_node *ref; 4275 struct btrfs_delayed_ref_node *ref;
3974 struct rb_node *node; 4276 struct rb_node *node;
3975 int ret; 4277 int ret = 0;
3976 4278
3977 delayed_refs = &trans->transaction->delayed_refs; 4279 delayed_refs = &trans->transaction->delayed_refs;
3978 spin_lock(&delayed_refs->lock); 4280 spin_lock(&delayed_refs->lock);
@@ -4024,17 +4326,99 @@ static noinline int check_ref_cleanup(struct btrfs_trans_handle *trans,
4024 list_del_init(&head->cluster); 4326 list_del_init(&head->cluster);
4025 spin_unlock(&delayed_refs->lock); 4327 spin_unlock(&delayed_refs->lock);
4026 4328
4027 ret = run_one_delayed_ref(trans, root->fs_info->tree_root, 4329 BUG_ON(head->extent_op);
4028 &head->node, head->extent_op, 4330 if (head->must_insert_reserved)
4029 head->must_insert_reserved); 4331 ret = 1;
4030 BUG_ON(ret); 4332
4333 mutex_unlock(&head->mutex);
4031 btrfs_put_delayed_ref(&head->node); 4334 btrfs_put_delayed_ref(&head->node);
4032 return 0; 4335 return ret;
4033out: 4336out:
4034 spin_unlock(&delayed_refs->lock); 4337 spin_unlock(&delayed_refs->lock);
4035 return 0; 4338 return 0;
4036} 4339}
4037 4340
4341void btrfs_free_tree_block(struct btrfs_trans_handle *trans,
4342 struct btrfs_root *root,
4343 struct extent_buffer *buf,
4344 u64 parent, int last_ref)
4345{
4346 struct btrfs_block_rsv *block_rsv;
4347 struct btrfs_block_group_cache *cache = NULL;
4348 int ret;
4349
4350 if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
4351 ret = btrfs_add_delayed_tree_ref(trans, buf->start, buf->len,
4352 parent, root->root_key.objectid,
4353 btrfs_header_level(buf),
4354 BTRFS_DROP_DELAYED_REF, NULL);
4355 BUG_ON(ret);
4356 }
4357
4358 if (!last_ref)
4359 return;
4360
4361 block_rsv = get_block_rsv(trans, root);
4362 cache = btrfs_lookup_block_group(root->fs_info, buf->start);
4363 BUG_ON(block_rsv->space_info != cache->space_info);
4364
4365 if (btrfs_header_generation(buf) == trans->transid) {
4366 if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
4367 ret = check_ref_cleanup(trans, root, buf->start);
4368 if (!ret)
4369 goto pin;
4370 }
4371
4372 if (btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN)) {
4373 pin_down_extent(root, cache, buf->start, buf->len, 1);
4374 goto pin;
4375 }
4376
4377 WARN_ON(test_bit(EXTENT_BUFFER_DIRTY, &buf->bflags));
4378
4379 btrfs_add_free_space(cache, buf->start, buf->len);
4380 ret = update_reserved_bytes(cache, buf->len, 0, 0);
4381 if (ret == -EAGAIN) {
4382 /* block group became read-only */
4383 update_reserved_bytes(cache, buf->len, 0, 1);
4384 goto out;
4385 }
4386
4387 ret = 1;
4388 spin_lock(&block_rsv->lock);
4389 if (block_rsv->reserved < block_rsv->size) {
4390 block_rsv->reserved += buf->len;
4391 ret = 0;
4392 }
4393 spin_unlock(&block_rsv->lock);
4394
4395 if (ret) {
4396 spin_lock(&cache->space_info->lock);
4397 cache->space_info->bytes_reserved -= buf->len;
4398 spin_unlock(&cache->space_info->lock);
4399 }
4400 goto out;
4401 }
4402pin:
4403 if (block_rsv->durable && !cache->ro) {
4404 ret = 0;
4405 spin_lock(&cache->lock);
4406 if (!cache->ro) {
4407 cache->reserved_pinned += buf->len;
4408 ret = 1;
4409 }
4410 spin_unlock(&cache->lock);
4411
4412 if (ret) {
4413 spin_lock(&block_rsv->lock);
4414 block_rsv->freed[trans->transid & 0x1] += buf->len;
4415 spin_unlock(&block_rsv->lock);
4416 }
4417 }
4418out:
4419 btrfs_put_block_group(cache);
4420}
4421
4038int btrfs_free_extent(struct btrfs_trans_handle *trans, 4422int btrfs_free_extent(struct btrfs_trans_handle *trans,
4039 struct btrfs_root *root, 4423 struct btrfs_root *root,
4040 u64 bytenr, u64 num_bytes, u64 parent, 4424 u64 bytenr, u64 num_bytes, u64 parent,
@@ -4056,8 +4440,6 @@ int btrfs_free_extent(struct btrfs_trans_handle *trans,
4056 parent, root_objectid, (int)owner, 4440 parent, root_objectid, (int)owner,
4057 BTRFS_DROP_DELAYED_REF, NULL); 4441 BTRFS_DROP_DELAYED_REF, NULL);
4058 BUG_ON(ret); 4442 BUG_ON(ret);
4059 ret = check_ref_cleanup(trans, root, bytenr);
4060 BUG_ON(ret);
4061 } else { 4443 } else {
4062 ret = btrfs_add_delayed_data_ref(trans, bytenr, num_bytes, 4444 ret = btrfs_add_delayed_data_ref(trans, bytenr, num_bytes,
4063 parent, root_objectid, owner, 4445 parent, root_objectid, owner,
@@ -4067,21 +4449,6 @@ int btrfs_free_extent(struct btrfs_trans_handle *trans,
4067 return ret; 4449 return ret;
4068} 4450}
4069 4451
4070int btrfs_free_tree_block(struct btrfs_trans_handle *trans,
4071 struct btrfs_root *root,
4072 u64 bytenr, u32 blocksize,
4073 u64 parent, u64 root_objectid, int level)
4074{
4075 u64 used;
4076 spin_lock(&root->node_lock);
4077 used = btrfs_root_used(&root->root_item) - blocksize;
4078 btrfs_set_root_used(&root->root_item, used);
4079 spin_unlock(&root->node_lock);
4080
4081 return btrfs_free_extent(trans, root, bytenr, blocksize,
4082 parent, root_objectid, level, 0);
4083}
4084
4085static u64 stripe_align(struct btrfs_root *root, u64 val) 4452static u64 stripe_align(struct btrfs_root *root, u64 val)
4086{ 4453{
4087 u64 mask = ((u64)root->stripesize - 1); 4454 u64 mask = ((u64)root->stripesize - 1);
@@ -4134,6 +4501,22 @@ wait_block_group_cache_done(struct btrfs_block_group_cache *cache)
4134 return 0; 4501 return 0;
4135} 4502}
4136 4503
4504static int get_block_group_index(struct btrfs_block_group_cache *cache)
4505{
4506 int index;
4507 if (cache->flags & BTRFS_BLOCK_GROUP_RAID10)
4508 index = 0;
4509 else if (cache->flags & BTRFS_BLOCK_GROUP_RAID1)
4510 index = 1;
4511 else if (cache->flags & BTRFS_BLOCK_GROUP_DUP)
4512 index = 2;
4513 else if (cache->flags & BTRFS_BLOCK_GROUP_RAID0)
4514 index = 3;
4515 else
4516 index = 4;
4517 return index;
4518}
4519
4137enum btrfs_loop_type { 4520enum btrfs_loop_type {
4138 LOOP_FIND_IDEAL = 0, 4521 LOOP_FIND_IDEAL = 0,
4139 LOOP_CACHING_NOWAIT = 1, 4522 LOOP_CACHING_NOWAIT = 1,
@@ -4155,7 +4538,6 @@ static noinline int find_free_extent(struct btrfs_trans_handle *trans,
4155 u64 num_bytes, u64 empty_size, 4538 u64 num_bytes, u64 empty_size,
4156 u64 search_start, u64 search_end, 4539 u64 search_start, u64 search_end,
4157 u64 hint_byte, struct btrfs_key *ins, 4540 u64 hint_byte, struct btrfs_key *ins,
4158 u64 exclude_start, u64 exclude_nr,
4159 int data) 4541 int data)
4160{ 4542{
4161 int ret = 0; 4543 int ret = 0;
@@ -4168,6 +4550,7 @@ static noinline int find_free_extent(struct btrfs_trans_handle *trans,
4168 struct btrfs_space_info *space_info; 4550 struct btrfs_space_info *space_info;
4169 int last_ptr_loop = 0; 4551 int last_ptr_loop = 0;
4170 int loop = 0; 4552 int loop = 0;
4553 int index = 0;
4171 bool found_uncached_bg = false; 4554 bool found_uncached_bg = false;
4172 bool failed_cluster_refill = false; 4555 bool failed_cluster_refill = false;
4173 bool failed_alloc = false; 4556 bool failed_alloc = false;
@@ -4237,6 +4620,7 @@ ideal_cache:
4237 btrfs_put_block_group(block_group); 4620 btrfs_put_block_group(block_group);
4238 up_read(&space_info->groups_sem); 4621 up_read(&space_info->groups_sem);
4239 } else { 4622 } else {
4623 index = get_block_group_index(block_group);
4240 goto have_block_group; 4624 goto have_block_group;
4241 } 4625 }
4242 } else if (block_group) { 4626 } else if (block_group) {
@@ -4245,7 +4629,8 @@ ideal_cache:
4245 } 4629 }
4246search: 4630search:
4247 down_read(&space_info->groups_sem); 4631 down_read(&space_info->groups_sem);
4248 list_for_each_entry(block_group, &space_info->block_groups, list) { 4632 list_for_each_entry(block_group, &space_info->block_groups[index],
4633 list) {
4249 u64 offset; 4634 u64 offset;
4250 int cached; 4635 int cached;
4251 4636
@@ -4436,23 +4821,22 @@ checks:
4436 goto loop; 4821 goto loop;
4437 } 4822 }
4438 4823
4439 if (exclude_nr > 0 && 4824 ins->objectid = search_start;
4440 (search_start + num_bytes > exclude_start && 4825 ins->offset = num_bytes;
4441 search_start < exclude_start + exclude_nr)) { 4826
4442 search_start = exclude_start + exclude_nr; 4827 if (offset < search_start)
4828 btrfs_add_free_space(block_group, offset,
4829 search_start - offset);
4830 BUG_ON(offset > search_start);
4443 4831
4832 ret = update_reserved_bytes(block_group, num_bytes, 1,
4833 (data & BTRFS_BLOCK_GROUP_DATA));
4834 if (ret == -EAGAIN) {
4444 btrfs_add_free_space(block_group, offset, num_bytes); 4835 btrfs_add_free_space(block_group, offset, num_bytes);
4445 /*
4446 * if search_start is still in this block group
4447 * then we just re-search this block group
4448 */
4449 if (search_start >= block_group->key.objectid &&
4450 search_start < (block_group->key.objectid +
4451 block_group->key.offset))
4452 goto have_block_group;
4453 goto loop; 4836 goto loop;
4454 } 4837 }
4455 4838
4839 /* we are all good, lets return */
4456 ins->objectid = search_start; 4840 ins->objectid = search_start;
4457 ins->offset = num_bytes; 4841 ins->offset = num_bytes;
4458 4842
@@ -4460,18 +4844,18 @@ checks:
4460 btrfs_add_free_space(block_group, offset, 4844 btrfs_add_free_space(block_group, offset,
4461 search_start - offset); 4845 search_start - offset);
4462 BUG_ON(offset > search_start); 4846 BUG_ON(offset > search_start);
4463
4464 update_reserved_extents(block_group, num_bytes, 1);
4465
4466 /* we are all good, lets return */
4467 break; 4847 break;
4468loop: 4848loop:
4469 failed_cluster_refill = false; 4849 failed_cluster_refill = false;
4470 failed_alloc = false; 4850 failed_alloc = false;
4851 BUG_ON(index != get_block_group_index(block_group));
4471 btrfs_put_block_group(block_group); 4852 btrfs_put_block_group(block_group);
4472 } 4853 }
4473 up_read(&space_info->groups_sem); 4854 up_read(&space_info->groups_sem);
4474 4855
4856 if (!ins->objectid && ++index < BTRFS_NR_RAID_TYPES)
4857 goto search;
4858
4475 /* LOOP_FIND_IDEAL, only search caching/cached bg's, and don't wait for 4859 /* LOOP_FIND_IDEAL, only search caching/cached bg's, and don't wait for
4476 * for them to make caching progress. Also 4860 * for them to make caching progress. Also
4477 * determine the best possible bg to cache 4861 * determine the best possible bg to cache
@@ -4485,6 +4869,7 @@ loop:
4485 if (!ins->objectid && loop < LOOP_NO_EMPTY_SIZE && 4869 if (!ins->objectid && loop < LOOP_NO_EMPTY_SIZE &&
4486 (found_uncached_bg || empty_size || empty_cluster || 4870 (found_uncached_bg || empty_size || empty_cluster ||
4487 allowed_chunk_alloc)) { 4871 allowed_chunk_alloc)) {
4872 index = 0;
4488 if (loop == LOOP_FIND_IDEAL && found_uncached_bg) { 4873 if (loop == LOOP_FIND_IDEAL && found_uncached_bg) {
4489 found_uncached_bg = false; 4874 found_uncached_bg = false;
4490 loop++; 4875 loop++;
@@ -4567,31 +4952,30 @@ static void dump_space_info(struct btrfs_space_info *info, u64 bytes,
4567 int dump_block_groups) 4952 int dump_block_groups)
4568{ 4953{
4569 struct btrfs_block_group_cache *cache; 4954 struct btrfs_block_group_cache *cache;
4955 int index = 0;
4570 4956
4571 spin_lock(&info->lock); 4957 spin_lock(&info->lock);
4572 printk(KERN_INFO "space_info has %llu free, is %sfull\n", 4958 printk(KERN_INFO "space_info has %llu free, is %sfull\n",
4573 (unsigned long long)(info->total_bytes - info->bytes_used - 4959 (unsigned long long)(info->total_bytes - info->bytes_used -
4574 info->bytes_pinned - info->bytes_reserved - 4960 info->bytes_pinned - info->bytes_reserved -
4575 info->bytes_super), 4961 info->bytes_readonly),
4576 (info->full) ? "" : "not "); 4962 (info->full) ? "" : "not ");
4577 printk(KERN_INFO "space_info total=%llu, pinned=%llu, delalloc=%llu," 4963 printk(KERN_INFO "space_info total=%llu, used=%llu, pinned=%llu, "
4578 " may_use=%llu, used=%llu, root=%llu, super=%llu, reserved=%llu" 4964 "reserved=%llu, may_use=%llu, readonly=%llu\n",
4579 "\n",
4580 (unsigned long long)info->total_bytes, 4965 (unsigned long long)info->total_bytes,
4966 (unsigned long long)info->bytes_used,
4581 (unsigned long long)info->bytes_pinned, 4967 (unsigned long long)info->bytes_pinned,
4582 (unsigned long long)info->bytes_delalloc, 4968 (unsigned long long)info->bytes_reserved,
4583 (unsigned long long)info->bytes_may_use, 4969 (unsigned long long)info->bytes_may_use,
4584 (unsigned long long)info->bytes_used, 4970 (unsigned long long)info->bytes_readonly);
4585 (unsigned long long)info->bytes_root,
4586 (unsigned long long)info->bytes_super,
4587 (unsigned long long)info->bytes_reserved);
4588 spin_unlock(&info->lock); 4971 spin_unlock(&info->lock);
4589 4972
4590 if (!dump_block_groups) 4973 if (!dump_block_groups)
4591 return; 4974 return;
4592 4975
4593 down_read(&info->groups_sem); 4976 down_read(&info->groups_sem);
4594 list_for_each_entry(cache, &info->block_groups, list) { 4977again:
4978 list_for_each_entry(cache, &info->block_groups[index], list) {
4595 spin_lock(&cache->lock); 4979 spin_lock(&cache->lock);
4596 printk(KERN_INFO "block group %llu has %llu bytes, %llu used " 4980 printk(KERN_INFO "block group %llu has %llu bytes, %llu used "
4597 "%llu pinned %llu reserved\n", 4981 "%llu pinned %llu reserved\n",
@@ -4603,6 +4987,8 @@ static void dump_space_info(struct btrfs_space_info *info, u64 bytes,
4603 btrfs_dump_free_space(cache, bytes); 4987 btrfs_dump_free_space(cache, bytes);
4604 spin_unlock(&cache->lock); 4988 spin_unlock(&cache->lock);
4605 } 4989 }
4990 if (++index < BTRFS_NR_RAID_TYPES)
4991 goto again;
4606 up_read(&info->groups_sem); 4992 up_read(&info->groups_sem);
4607} 4993}
4608 4994
@@ -4628,9 +5014,8 @@ again:
4628 5014
4629 WARN_ON(num_bytes < root->sectorsize); 5015 WARN_ON(num_bytes < root->sectorsize);
4630 ret = find_free_extent(trans, root, num_bytes, empty_size, 5016 ret = find_free_extent(trans, root, num_bytes, empty_size,
4631 search_start, search_end, hint_byte, ins, 5017 search_start, search_end, hint_byte,
4632 trans->alloc_exclude_start, 5018 ins, data);
4633 trans->alloc_exclude_nr, data);
4634 5019
4635 if (ret == -ENOSPC && num_bytes > min_alloc_size) { 5020 if (ret == -ENOSPC && num_bytes > min_alloc_size) {
4636 num_bytes = num_bytes >> 1; 5021 num_bytes = num_bytes >> 1;
@@ -4668,7 +5053,7 @@ int btrfs_free_reserved_extent(struct btrfs_root *root, u64 start, u64 len)
4668 ret = btrfs_discard_extent(root, start, len); 5053 ret = btrfs_discard_extent(root, start, len);
4669 5054
4670 btrfs_add_free_space(cache, start, len); 5055 btrfs_add_free_space(cache, start, len);
4671 update_reserved_extents(cache, len, 0); 5056 update_reserved_bytes(cache, len, 0, 1);
4672 btrfs_put_block_group(cache); 5057 btrfs_put_block_group(cache);
4673 5058
4674 return ret; 5059 return ret;
@@ -4731,8 +5116,7 @@ static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
4731 btrfs_mark_buffer_dirty(path->nodes[0]); 5116 btrfs_mark_buffer_dirty(path->nodes[0]);
4732 btrfs_free_path(path); 5117 btrfs_free_path(path);
4733 5118
4734 ret = update_block_group(trans, root, ins->objectid, ins->offset, 5119 ret = update_block_group(trans, root, ins->objectid, ins->offset, 1);
4735 1, 0);
4736 if (ret) { 5120 if (ret) {
4737 printk(KERN_ERR "btrfs update block group failed for %llu " 5121 printk(KERN_ERR "btrfs update block group failed for %llu "
4738 "%llu\n", (unsigned long long)ins->objectid, 5122 "%llu\n", (unsigned long long)ins->objectid,
@@ -4792,8 +5176,7 @@ static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
4792 btrfs_mark_buffer_dirty(leaf); 5176 btrfs_mark_buffer_dirty(leaf);
4793 btrfs_free_path(path); 5177 btrfs_free_path(path);
4794 5178
4795 ret = update_block_group(trans, root, ins->objectid, ins->offset, 5179 ret = update_block_group(trans, root, ins->objectid, ins->offset, 1);
4796 1, 0);
4797 if (ret) { 5180 if (ret) {
4798 printk(KERN_ERR "btrfs update block group failed for %llu " 5181 printk(KERN_ERR "btrfs update block group failed for %llu "
4799 "%llu\n", (unsigned long long)ins->objectid, 5182 "%llu\n", (unsigned long long)ins->objectid,
@@ -4869,73 +5252,14 @@ int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans,
4869 put_caching_control(caching_ctl); 5252 put_caching_control(caching_ctl);
4870 } 5253 }
4871 5254
4872 update_reserved_extents(block_group, ins->offset, 1); 5255 ret = update_reserved_bytes(block_group, ins->offset, 1, 1);
5256 BUG_ON(ret);
4873 btrfs_put_block_group(block_group); 5257 btrfs_put_block_group(block_group);
4874 ret = alloc_reserved_file_extent(trans, root, 0, root_objectid, 5258 ret = alloc_reserved_file_extent(trans, root, 0, root_objectid,
4875 0, owner, offset, ins, 1); 5259 0, owner, offset, ins, 1);
4876 return ret; 5260 return ret;
4877} 5261}
4878 5262
4879/*
4880 * finds a free extent and does all the dirty work required for allocation
4881 * returns the key for the extent through ins, and a tree buffer for
4882 * the first block of the extent through buf.
4883 *
4884 * returns 0 if everything worked, non-zero otherwise.
4885 */
4886static int alloc_tree_block(struct btrfs_trans_handle *trans,
4887 struct btrfs_root *root,
4888 u64 num_bytes, u64 parent, u64 root_objectid,
4889 struct btrfs_disk_key *key, int level,
4890 u64 empty_size, u64 hint_byte, u64 search_end,
4891 struct btrfs_key *ins)
4892{
4893 int ret;
4894 u64 flags = 0;
4895
4896 ret = btrfs_reserve_extent(trans, root, num_bytes, num_bytes,
4897 empty_size, hint_byte, search_end,
4898 ins, 0);
4899 if (ret)
4900 return ret;
4901
4902 if (root_objectid == BTRFS_TREE_RELOC_OBJECTID) {
4903 if (parent == 0)
4904 parent = ins->objectid;
4905 flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF;
4906 } else
4907 BUG_ON(parent > 0);
4908
4909 if (root_objectid != BTRFS_TREE_LOG_OBJECTID) {
4910 struct btrfs_delayed_extent_op *extent_op;
4911 extent_op = kmalloc(sizeof(*extent_op), GFP_NOFS);
4912 BUG_ON(!extent_op);
4913 if (key)
4914 memcpy(&extent_op->key, key, sizeof(extent_op->key));
4915 else
4916 memset(&extent_op->key, 0, sizeof(extent_op->key));
4917 extent_op->flags_to_set = flags;
4918 extent_op->update_key = 1;
4919 extent_op->update_flags = 1;
4920 extent_op->is_data = 0;
4921
4922 ret = btrfs_add_delayed_tree_ref(trans, ins->objectid,
4923 ins->offset, parent, root_objectid,
4924 level, BTRFS_ADD_DELAYED_EXTENT,
4925 extent_op);
4926 BUG_ON(ret);
4927 }
4928
4929 if (root_objectid == root->root_key.objectid) {
4930 u64 used;
4931 spin_lock(&root->node_lock);
4932 used = btrfs_root_used(&root->root_item) + num_bytes;
4933 btrfs_set_root_used(&root->root_item, used);
4934 spin_unlock(&root->node_lock);
4935 }
4936 return ret;
4937}
4938
4939struct extent_buffer *btrfs_init_new_buffer(struct btrfs_trans_handle *trans, 5263struct extent_buffer *btrfs_init_new_buffer(struct btrfs_trans_handle *trans,
4940 struct btrfs_root *root, 5264 struct btrfs_root *root,
4941 u64 bytenr, u32 blocksize, 5265 u64 bytenr, u32 blocksize,
@@ -4974,8 +5298,45 @@ struct extent_buffer *btrfs_init_new_buffer(struct btrfs_trans_handle *trans,
4974 return buf; 5298 return buf;
4975} 5299}
4976 5300
5301static struct btrfs_block_rsv *
5302use_block_rsv(struct btrfs_trans_handle *trans,
5303 struct btrfs_root *root, u32 blocksize)
5304{
5305 struct btrfs_block_rsv *block_rsv;
5306 int ret;
5307
5308 block_rsv = get_block_rsv(trans, root);
5309
5310 if (block_rsv->size == 0) {
5311 ret = reserve_metadata_bytes(block_rsv, blocksize);
5312 if (ret)
5313 return ERR_PTR(ret);
5314 return block_rsv;
5315 }
5316
5317 ret = block_rsv_use_bytes(block_rsv, blocksize);
5318 if (!ret)
5319 return block_rsv;
5320
5321 WARN_ON(1);
5322 printk(KERN_INFO"block_rsv size %llu reserved %llu freed %llu %llu\n",
5323 block_rsv->size, block_rsv->reserved,
5324 block_rsv->freed[0], block_rsv->freed[1]);
5325
5326 return ERR_PTR(-ENOSPC);
5327}
5328
5329static void unuse_block_rsv(struct btrfs_block_rsv *block_rsv, u32 blocksize)
5330{
5331 block_rsv_add_bytes(block_rsv, blocksize, 0);
5332 block_rsv_release_bytes(block_rsv, NULL, 0);
5333}
5334
4977/* 5335/*
4978 * helper function to allocate a block for a given tree 5336 * finds a free extent and does all the dirty work required for allocation
5337 * returns the key for the extent through ins, and a tree buffer for
5338 * the first block of the extent through buf.
5339 *
4979 * returns the tree buffer or NULL. 5340 * returns the tree buffer or NULL.
4980 */ 5341 */
4981struct extent_buffer *btrfs_alloc_free_block(struct btrfs_trans_handle *trans, 5342struct extent_buffer *btrfs_alloc_free_block(struct btrfs_trans_handle *trans,
@@ -4985,18 +5346,53 @@ struct extent_buffer *btrfs_alloc_free_block(struct btrfs_trans_handle *trans,
4985 u64 hint, u64 empty_size) 5346 u64 hint, u64 empty_size)
4986{ 5347{
4987 struct btrfs_key ins; 5348 struct btrfs_key ins;
4988 int ret; 5349 struct btrfs_block_rsv *block_rsv;
4989 struct extent_buffer *buf; 5350 struct extent_buffer *buf;
5351 u64 flags = 0;
5352 int ret;
5353
4990 5354
4991 ret = alloc_tree_block(trans, root, blocksize, parent, root_objectid, 5355 block_rsv = use_block_rsv(trans, root, blocksize);
4992 key, level, empty_size, hint, (u64)-1, &ins); 5356 if (IS_ERR(block_rsv))
5357 return ERR_CAST(block_rsv);
5358
5359 ret = btrfs_reserve_extent(trans, root, blocksize, blocksize,
5360 empty_size, hint, (u64)-1, &ins, 0);
4993 if (ret) { 5361 if (ret) {
4994 BUG_ON(ret > 0); 5362 unuse_block_rsv(block_rsv, blocksize);
4995 return ERR_PTR(ret); 5363 return ERR_PTR(ret);
4996 } 5364 }
4997 5365
4998 buf = btrfs_init_new_buffer(trans, root, ins.objectid, 5366 buf = btrfs_init_new_buffer(trans, root, ins.objectid,
4999 blocksize, level); 5367 blocksize, level);
5368 BUG_ON(IS_ERR(buf));
5369
5370 if (root_objectid == BTRFS_TREE_RELOC_OBJECTID) {
5371 if (parent == 0)
5372 parent = ins.objectid;
5373 flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF;
5374 } else
5375 BUG_ON(parent > 0);
5376
5377 if (root_objectid != BTRFS_TREE_LOG_OBJECTID) {
5378 struct btrfs_delayed_extent_op *extent_op;
5379 extent_op = kmalloc(sizeof(*extent_op), GFP_NOFS);
5380 BUG_ON(!extent_op);
5381 if (key)
5382 memcpy(&extent_op->key, key, sizeof(extent_op->key));
5383 else
5384 memset(&extent_op->key, 0, sizeof(extent_op->key));
5385 extent_op->flags_to_set = flags;
5386 extent_op->update_key = 1;
5387 extent_op->update_flags = 1;
5388 extent_op->is_data = 0;
5389
5390 ret = btrfs_add_delayed_tree_ref(trans, ins.objectid,
5391 ins.offset, parent, root_objectid,
5392 level, BTRFS_ADD_DELAYED_EXTENT,
5393 extent_op);
5394 BUG_ON(ret);
5395 }
5000 return buf; 5396 return buf;
5001} 5397}
5002 5398
@@ -5321,7 +5717,7 @@ static noinline int walk_up_proc(struct btrfs_trans_handle *trans,
5321 struct btrfs_path *path, 5717 struct btrfs_path *path,
5322 struct walk_control *wc) 5718 struct walk_control *wc)
5323{ 5719{
5324 int ret = 0; 5720 int ret;
5325 int level = wc->level; 5721 int level = wc->level;
5326 struct extent_buffer *eb = path->nodes[level]; 5722 struct extent_buffer *eb = path->nodes[level];
5327 u64 parent = 0; 5723 u64 parent = 0;
@@ -5399,13 +5795,11 @@ static noinline int walk_up_proc(struct btrfs_trans_handle *trans,
5399 btrfs_header_owner(path->nodes[level + 1])); 5795 btrfs_header_owner(path->nodes[level + 1]));
5400 } 5796 }
5401 5797
5402 ret = btrfs_free_extent(trans, root, eb->start, eb->len, parent, 5798 btrfs_free_tree_block(trans, root, eb, parent, wc->refs[level] == 1);
5403 root->root_key.objectid, level, 0);
5404 BUG_ON(ret);
5405out: 5799out:
5406 wc->refs[level] = 0; 5800 wc->refs[level] = 0;
5407 wc->flags[level] = 0; 5801 wc->flags[level] = 0;
5408 return ret; 5802 return 0;
5409} 5803}
5410 5804
5411static noinline int walk_down_tree(struct btrfs_trans_handle *trans, 5805static noinline int walk_down_tree(struct btrfs_trans_handle *trans,
@@ -5483,7 +5877,8 @@ static noinline int walk_up_tree(struct btrfs_trans_handle *trans,
5483 * also make sure backrefs for the shared block and all lower level 5877 * also make sure backrefs for the shared block and all lower level
5484 * blocks are properly updated. 5878 * blocks are properly updated.
5485 */ 5879 */
5486int btrfs_drop_snapshot(struct btrfs_root *root, int update_ref) 5880int btrfs_drop_snapshot(struct btrfs_root *root,
5881 struct btrfs_block_rsv *block_rsv, int update_ref)
5487{ 5882{
5488 struct btrfs_path *path; 5883 struct btrfs_path *path;
5489 struct btrfs_trans_handle *trans; 5884 struct btrfs_trans_handle *trans;
@@ -5501,7 +5896,9 @@ int btrfs_drop_snapshot(struct btrfs_root *root, int update_ref)
5501 wc = kzalloc(sizeof(*wc), GFP_NOFS); 5896 wc = kzalloc(sizeof(*wc), GFP_NOFS);
5502 BUG_ON(!wc); 5897 BUG_ON(!wc);
5503 5898
5504 trans = btrfs_start_transaction(tree_root, 1); 5899 trans = btrfs_start_transaction(tree_root, 0);
5900 if (block_rsv)
5901 trans->block_rsv = block_rsv;
5505 5902
5506 if (btrfs_disk_key_objectid(&root_item->drop_progress) == 0) { 5903 if (btrfs_disk_key_objectid(&root_item->drop_progress) == 0) {
5507 level = btrfs_header_level(root->node); 5904 level = btrfs_header_level(root->node);
@@ -5589,22 +5986,16 @@ int btrfs_drop_snapshot(struct btrfs_root *root, int update_ref)
5589 } 5986 }
5590 5987
5591 BUG_ON(wc->level == 0); 5988 BUG_ON(wc->level == 0);
5592 if (trans->transaction->in_commit || 5989 if (btrfs_should_end_transaction(trans, tree_root)) {
5593 trans->transaction->delayed_refs.flushing) {
5594 ret = btrfs_update_root(trans, tree_root, 5990 ret = btrfs_update_root(trans, tree_root,
5595 &root->root_key, 5991 &root->root_key,
5596 root_item); 5992 root_item);
5597 BUG_ON(ret); 5993 BUG_ON(ret);
5598 5994
5599 btrfs_end_transaction(trans, tree_root); 5995 btrfs_end_transaction_throttle(trans, tree_root);
5600 trans = btrfs_start_transaction(tree_root, 1); 5996 trans = btrfs_start_transaction(tree_root, 0);
5601 } else { 5997 if (block_rsv)
5602 unsigned long update; 5998 trans->block_rsv = block_rsv;
5603 update = trans->delayed_ref_updates;
5604 trans->delayed_ref_updates = 0;
5605 if (update)
5606 btrfs_run_delayed_refs(trans, tree_root,
5607 update);
5608 } 5999 }
5609 } 6000 }
5610 btrfs_release_path(root, path); 6001 btrfs_release_path(root, path);
@@ -5632,7 +6023,7 @@ int btrfs_drop_snapshot(struct btrfs_root *root, int update_ref)
5632 kfree(root); 6023 kfree(root);
5633 } 6024 }
5634out: 6025out:
5635 btrfs_end_transaction(trans, tree_root); 6026 btrfs_end_transaction_throttle(trans, tree_root);
5636 kfree(wc); 6027 kfree(wc);
5637 btrfs_free_path(path); 6028 btrfs_free_path(path);
5638 return err; 6029 return err;
@@ -7228,48 +7619,80 @@ static u64 update_block_group_flags(struct btrfs_root *root, u64 flags)
7228 return flags; 7619 return flags;
7229} 7620}
7230 7621
7231static int __alloc_chunk_for_shrink(struct btrfs_root *root, 7622static int set_block_group_ro(struct btrfs_block_group_cache *cache)
7232 struct btrfs_block_group_cache *shrink_block_group,
7233 int force)
7234{ 7623{
7235 struct btrfs_trans_handle *trans; 7624 struct btrfs_space_info *sinfo = cache->space_info;
7236 u64 new_alloc_flags; 7625 u64 num_bytes;
7237 u64 calc; 7626 int ret = -ENOSPC;
7238 7627
7239 spin_lock(&shrink_block_group->lock); 7628 if (cache->ro)
7240 if (btrfs_block_group_used(&shrink_block_group->item) + 7629 return 0;
7241 shrink_block_group->reserved > 0) {
7242 spin_unlock(&shrink_block_group->lock);
7243 7630
7244 trans = btrfs_start_transaction(root, 1); 7631 spin_lock(&sinfo->lock);
7245 spin_lock(&shrink_block_group->lock); 7632 spin_lock(&cache->lock);
7633 num_bytes = cache->key.offset - cache->reserved - cache->pinned -
7634 cache->bytes_super - btrfs_block_group_used(&cache->item);
7635
7636 if (sinfo->bytes_used + sinfo->bytes_reserved + sinfo->bytes_pinned +
7637 sinfo->bytes_may_use + sinfo->bytes_readonly +
7638 cache->reserved_pinned + num_bytes < sinfo->total_bytes) {
7639 sinfo->bytes_readonly += num_bytes;
7640 sinfo->bytes_reserved += cache->reserved_pinned;
7641 cache->reserved_pinned = 0;
7642 cache->ro = 1;
7643 ret = 0;
7644 }
7645 spin_unlock(&cache->lock);
7646 spin_unlock(&sinfo->lock);
7647 return ret;
7648}
7246 7649
7247 new_alloc_flags = update_block_group_flags(root, 7650int btrfs_set_block_group_ro(struct btrfs_root *root,
7248 shrink_block_group->flags); 7651 struct btrfs_block_group_cache *cache)
7249 if (new_alloc_flags != shrink_block_group->flags) {
7250 calc =
7251 btrfs_block_group_used(&shrink_block_group->item);
7252 } else {
7253 calc = shrink_block_group->key.offset;
7254 }
7255 spin_unlock(&shrink_block_group->lock);
7256 7652
7257 do_chunk_alloc(trans, root->fs_info->extent_root, 7653{
7258 calc + 2 * 1024 * 1024, new_alloc_flags, force); 7654 struct btrfs_trans_handle *trans;
7655 u64 alloc_flags;
7656 int ret;
7259 7657
7260 btrfs_end_transaction(trans, root); 7658 BUG_ON(cache->ro);
7261 } else 7659
7262 spin_unlock(&shrink_block_group->lock); 7660 trans = btrfs_join_transaction(root, 1);
7263 return 0; 7661 BUG_ON(IS_ERR(trans));
7264}
7265 7662
7663 alloc_flags = update_block_group_flags(root, cache->flags);
7664 if (alloc_flags != cache->flags)
7665 do_chunk_alloc(trans, root, 2 * 1024 * 1024, alloc_flags, 1);
7266 7666
7267int btrfs_prepare_block_group_relocation(struct btrfs_root *root, 7667 ret = set_block_group_ro(cache);
7268 struct btrfs_block_group_cache *group) 7668 if (!ret)
7669 goto out;
7670 alloc_flags = get_alloc_profile(root, cache->space_info->flags);
7671 ret = do_chunk_alloc(trans, root, 2 * 1024 * 1024, alloc_flags, 1);
7672 if (ret < 0)
7673 goto out;
7674 ret = set_block_group_ro(cache);
7675out:
7676 btrfs_end_transaction(trans, root);
7677 return ret;
7678}
7269 7679
7680int btrfs_set_block_group_rw(struct btrfs_root *root,
7681 struct btrfs_block_group_cache *cache)
7270{ 7682{
7271 __alloc_chunk_for_shrink(root, group, 1); 7683 struct btrfs_space_info *sinfo = cache->space_info;
7272 set_block_group_readonly(group); 7684 u64 num_bytes;
7685
7686 BUG_ON(!cache->ro);
7687
7688 spin_lock(&sinfo->lock);
7689 spin_lock(&cache->lock);
7690 num_bytes = cache->key.offset - cache->reserved - cache->pinned -
7691 cache->bytes_super - btrfs_block_group_used(&cache->item);
7692 sinfo->bytes_readonly -= num_bytes;
7693 cache->ro = 0;
7694 spin_unlock(&cache->lock);
7695 spin_unlock(&sinfo->lock);
7273 return 0; 7696 return 0;
7274} 7697}
7275 7698
@@ -7436,17 +7859,33 @@ int btrfs_free_block_groups(struct btrfs_fs_info *info)
7436 */ 7859 */
7437 synchronize_rcu(); 7860 synchronize_rcu();
7438 7861
7862 release_global_block_rsv(info);
7863
7439 while(!list_empty(&info->space_info)) { 7864 while(!list_empty(&info->space_info)) {
7440 space_info = list_entry(info->space_info.next, 7865 space_info = list_entry(info->space_info.next,
7441 struct btrfs_space_info, 7866 struct btrfs_space_info,
7442 list); 7867 list);
7443 7868 if (space_info->bytes_pinned > 0 ||
7869 space_info->bytes_reserved > 0) {
7870 WARN_ON(1);
7871 dump_space_info(space_info, 0, 0);
7872 }
7444 list_del(&space_info->list); 7873 list_del(&space_info->list);
7445 kfree(space_info); 7874 kfree(space_info);
7446 } 7875 }
7447 return 0; 7876 return 0;
7448} 7877}
7449 7878
7879static void __link_block_group(struct btrfs_space_info *space_info,
7880 struct btrfs_block_group_cache *cache)
7881{
7882 int index = get_block_group_index(cache);
7883
7884 down_write(&space_info->groups_sem);
7885 list_add_tail(&cache->list, &space_info->block_groups[index]);
7886 up_write(&space_info->groups_sem);
7887}
7888
7450int btrfs_read_block_groups(struct btrfs_root *root) 7889int btrfs_read_block_groups(struct btrfs_root *root)
7451{ 7890{
7452 struct btrfs_path *path; 7891 struct btrfs_path *path;
@@ -7468,10 +7907,8 @@ int btrfs_read_block_groups(struct btrfs_root *root)
7468 7907
7469 while (1) { 7908 while (1) {
7470 ret = find_first_block_group(root, path, &key); 7909 ret = find_first_block_group(root, path, &key);
7471 if (ret > 0) { 7910 if (ret > 0)
7472 ret = 0; 7911 break;
7473 goto error;
7474 }
7475 if (ret != 0) 7912 if (ret != 0)
7476 goto error; 7913 goto error;
7477 7914
@@ -7480,7 +7917,7 @@ int btrfs_read_block_groups(struct btrfs_root *root)
7480 cache = kzalloc(sizeof(*cache), GFP_NOFS); 7917 cache = kzalloc(sizeof(*cache), GFP_NOFS);
7481 if (!cache) { 7918 if (!cache) {
7482 ret = -ENOMEM; 7919 ret = -ENOMEM;
7483 break; 7920 goto error;
7484 } 7921 }
7485 7922
7486 atomic_set(&cache->count, 1); 7923 atomic_set(&cache->count, 1);
@@ -7537,20 +7974,36 @@ int btrfs_read_block_groups(struct btrfs_root *root)
7537 BUG_ON(ret); 7974 BUG_ON(ret);
7538 cache->space_info = space_info; 7975 cache->space_info = space_info;
7539 spin_lock(&cache->space_info->lock); 7976 spin_lock(&cache->space_info->lock);
7540 cache->space_info->bytes_super += cache->bytes_super; 7977 cache->space_info->bytes_readonly += cache->bytes_super;
7541 spin_unlock(&cache->space_info->lock); 7978 spin_unlock(&cache->space_info->lock);
7542 7979
7543 down_write(&space_info->groups_sem); 7980 __link_block_group(space_info, cache);
7544 list_add_tail(&cache->list, &space_info->block_groups);
7545 up_write(&space_info->groups_sem);
7546 7981
7547 ret = btrfs_add_block_group_cache(root->fs_info, cache); 7982 ret = btrfs_add_block_group_cache(root->fs_info, cache);
7548 BUG_ON(ret); 7983 BUG_ON(ret);
7549 7984
7550 set_avail_alloc_bits(root->fs_info, cache->flags); 7985 set_avail_alloc_bits(root->fs_info, cache->flags);
7551 if (btrfs_chunk_readonly(root, cache->key.objectid)) 7986 if (btrfs_chunk_readonly(root, cache->key.objectid))
7552 set_block_group_readonly(cache); 7987 set_block_group_ro(cache);
7553 } 7988 }
7989
7990 list_for_each_entry_rcu(space_info, &root->fs_info->space_info, list) {
7991 if (!(get_alloc_profile(root, space_info->flags) &
7992 (BTRFS_BLOCK_GROUP_RAID10 |
7993 BTRFS_BLOCK_GROUP_RAID1 |
7994 BTRFS_BLOCK_GROUP_DUP)))
7995 continue;
7996 /*
7997 * avoid allocating from un-mirrored block group if there are
7998 * mirrored block groups.
7999 */
8000 list_for_each_entry(cache, &space_info->block_groups[3], list)
8001 set_block_group_ro(cache);
8002 list_for_each_entry(cache, &space_info->block_groups[4], list)
8003 set_block_group_ro(cache);
8004 }
8005
8006 init_global_block_rsv(info);
7554 ret = 0; 8007 ret = 0;
7555error: 8008error:
7556 btrfs_free_path(path); 8009 btrfs_free_path(path);
@@ -7611,12 +8064,10 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans,
7611 BUG_ON(ret); 8064 BUG_ON(ret);
7612 8065
7613 spin_lock(&cache->space_info->lock); 8066 spin_lock(&cache->space_info->lock);
7614 cache->space_info->bytes_super += cache->bytes_super; 8067 cache->space_info->bytes_readonly += cache->bytes_super;
7615 spin_unlock(&cache->space_info->lock); 8068 spin_unlock(&cache->space_info->lock);
7616 8069
7617 down_write(&cache->space_info->groups_sem); 8070 __link_block_group(cache->space_info, cache);
7618 list_add_tail(&cache->list, &cache->space_info->block_groups);
7619 up_write(&cache->space_info->groups_sem);
7620 8071
7621 ret = btrfs_add_block_group_cache(root->fs_info, cache); 8072 ret = btrfs_add_block_group_cache(root->fs_info, cache);
7622 BUG_ON(ret); 8073 BUG_ON(ret);
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index d2d03684fab2..a4080c21ec55 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -135,7 +135,7 @@ static struct extent_state *alloc_extent_state(gfp_t mask)
135 return state; 135 return state;
136} 136}
137 137
138static void free_extent_state(struct extent_state *state) 138void free_extent_state(struct extent_state *state)
139{ 139{
140 if (!state) 140 if (!state)
141 return; 141 return;
@@ -335,21 +335,18 @@ static int merge_state(struct extent_io_tree *tree,
335} 335}
336 336
337static int set_state_cb(struct extent_io_tree *tree, 337static int set_state_cb(struct extent_io_tree *tree,
338 struct extent_state *state, 338 struct extent_state *state, int *bits)
339 unsigned long bits)
340{ 339{
341 if (tree->ops && tree->ops->set_bit_hook) { 340 if (tree->ops && tree->ops->set_bit_hook) {
342 return tree->ops->set_bit_hook(tree->mapping->host, 341 return tree->ops->set_bit_hook(tree->mapping->host,
343 state->start, state->end, 342 state, bits);
344 state->state, bits);
345 } 343 }
346 344
347 return 0; 345 return 0;
348} 346}
349 347
350static void clear_state_cb(struct extent_io_tree *tree, 348static void clear_state_cb(struct extent_io_tree *tree,
351 struct extent_state *state, 349 struct extent_state *state, int *bits)
352 unsigned long bits)
353{ 350{
354 if (tree->ops && tree->ops->clear_bit_hook) 351 if (tree->ops && tree->ops->clear_bit_hook)
355 tree->ops->clear_bit_hook(tree->mapping->host, state, bits); 352 tree->ops->clear_bit_hook(tree->mapping->host, state, bits);
@@ -367,9 +364,10 @@ static void clear_state_cb(struct extent_io_tree *tree,
367 */ 364 */
368static int insert_state(struct extent_io_tree *tree, 365static int insert_state(struct extent_io_tree *tree,
369 struct extent_state *state, u64 start, u64 end, 366 struct extent_state *state, u64 start, u64 end,
370 int bits) 367 int *bits)
371{ 368{
372 struct rb_node *node; 369 struct rb_node *node;
370 int bits_to_set = *bits & ~EXTENT_CTLBITS;
373 int ret; 371 int ret;
374 372
375 if (end < start) { 373 if (end < start) {
@@ -384,9 +382,9 @@ static int insert_state(struct extent_io_tree *tree,
384 if (ret) 382 if (ret)
385 return ret; 383 return ret;
386 384
387 if (bits & EXTENT_DIRTY) 385 if (bits_to_set & EXTENT_DIRTY)
388 tree->dirty_bytes += end - start + 1; 386 tree->dirty_bytes += end - start + 1;
389 state->state |= bits; 387 state->state |= bits_to_set;
390 node = tree_insert(&tree->state, end, &state->rb_node); 388 node = tree_insert(&tree->state, end, &state->rb_node);
391 if (node) { 389 if (node) {
392 struct extent_state *found; 390 struct extent_state *found;
@@ -456,13 +454,13 @@ static int split_state(struct extent_io_tree *tree, struct extent_state *orig,
456 * struct is freed and removed from the tree 454 * struct is freed and removed from the tree
457 */ 455 */
458static int clear_state_bit(struct extent_io_tree *tree, 456static int clear_state_bit(struct extent_io_tree *tree,
459 struct extent_state *state, int bits, int wake, 457 struct extent_state *state,
460 int delete) 458 int *bits, int wake)
461{ 459{
462 int bits_to_clear = bits & ~EXTENT_DO_ACCOUNTING; 460 int bits_to_clear = *bits & ~EXTENT_CTLBITS;
463 int ret = state->state & bits_to_clear; 461 int ret = state->state & bits_to_clear;
464 462
465 if ((bits & EXTENT_DIRTY) && (state->state & EXTENT_DIRTY)) { 463 if ((bits_to_clear & EXTENT_DIRTY) && (state->state & EXTENT_DIRTY)) {
466 u64 range = state->end - state->start + 1; 464 u64 range = state->end - state->start + 1;
467 WARN_ON(range > tree->dirty_bytes); 465 WARN_ON(range > tree->dirty_bytes);
468 tree->dirty_bytes -= range; 466 tree->dirty_bytes -= range;
@@ -471,9 +469,8 @@ static int clear_state_bit(struct extent_io_tree *tree,
471 state->state &= ~bits_to_clear; 469 state->state &= ~bits_to_clear;
472 if (wake) 470 if (wake)
473 wake_up(&state->wq); 471 wake_up(&state->wq);
474 if (delete || state->state == 0) { 472 if (state->state == 0) {
475 if (state->tree) { 473 if (state->tree) {
476 clear_state_cb(tree, state, state->state);
477 rb_erase(&state->rb_node, &tree->state); 474 rb_erase(&state->rb_node, &tree->state);
478 state->tree = NULL; 475 state->tree = NULL;
479 free_extent_state(state); 476 free_extent_state(state);
@@ -514,6 +511,10 @@ int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
514 int set = 0; 511 int set = 0;
515 int clear = 0; 512 int clear = 0;
516 513
514 if (delete)
515 bits |= ~EXTENT_CTLBITS;
516 bits |= EXTENT_FIRST_DELALLOC;
517
517 if (bits & (EXTENT_IOBITS | EXTENT_BOUNDARY)) 518 if (bits & (EXTENT_IOBITS | EXTENT_BOUNDARY))
518 clear = 1; 519 clear = 1;
519again: 520again:
@@ -580,8 +581,7 @@ hit_next:
580 if (err) 581 if (err)
581 goto out; 582 goto out;
582 if (state->end <= end) { 583 if (state->end <= end) {
583 set |= clear_state_bit(tree, state, bits, wake, 584 set |= clear_state_bit(tree, state, &bits, wake);
584 delete);
585 if (last_end == (u64)-1) 585 if (last_end == (u64)-1)
586 goto out; 586 goto out;
587 start = last_end + 1; 587 start = last_end + 1;
@@ -602,7 +602,7 @@ hit_next:
602 if (wake) 602 if (wake)
603 wake_up(&state->wq); 603 wake_up(&state->wq);
604 604
605 set |= clear_state_bit(tree, prealloc, bits, wake, delete); 605 set |= clear_state_bit(tree, prealloc, &bits, wake);
606 606
607 prealloc = NULL; 607 prealloc = NULL;
608 goto out; 608 goto out;
@@ -613,7 +613,7 @@ hit_next:
613 else 613 else
614 next_node = NULL; 614 next_node = NULL;
615 615
616 set |= clear_state_bit(tree, state, bits, wake, delete); 616 set |= clear_state_bit(tree, state, &bits, wake);
617 if (last_end == (u64)-1) 617 if (last_end == (u64)-1)
618 goto out; 618 goto out;
619 start = last_end + 1; 619 start = last_end + 1;
@@ -706,19 +706,19 @@ out:
706 706
707static int set_state_bits(struct extent_io_tree *tree, 707static int set_state_bits(struct extent_io_tree *tree,
708 struct extent_state *state, 708 struct extent_state *state,
709 int bits) 709 int *bits)
710{ 710{
711 int ret; 711 int ret;
712 int bits_to_set = *bits & ~EXTENT_CTLBITS;
712 713
713 ret = set_state_cb(tree, state, bits); 714 ret = set_state_cb(tree, state, bits);
714 if (ret) 715 if (ret)
715 return ret; 716 return ret;
716 717 if ((bits_to_set & EXTENT_DIRTY) && !(state->state & EXTENT_DIRTY)) {
717 if ((bits & EXTENT_DIRTY) && !(state->state & EXTENT_DIRTY)) {
718 u64 range = state->end - state->start + 1; 718 u64 range = state->end - state->start + 1;
719 tree->dirty_bytes += range; 719 tree->dirty_bytes += range;
720 } 720 }
721 state->state |= bits; 721 state->state |= bits_to_set;
722 722
723 return 0; 723 return 0;
724} 724}
@@ -745,10 +745,9 @@ static void cache_state(struct extent_state *state,
745 * [start, end] is inclusive This takes the tree lock. 745 * [start, end] is inclusive This takes the tree lock.
746 */ 746 */
747 747
748static int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, 748int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
749 int bits, int exclusive_bits, u64 *failed_start, 749 int bits, int exclusive_bits, u64 *failed_start,
750 struct extent_state **cached_state, 750 struct extent_state **cached_state, gfp_t mask)
751 gfp_t mask)
752{ 751{
753 struct extent_state *state; 752 struct extent_state *state;
754 struct extent_state *prealloc = NULL; 753 struct extent_state *prealloc = NULL;
@@ -757,6 +756,7 @@ static int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
757 u64 last_start; 756 u64 last_start;
758 u64 last_end; 757 u64 last_end;
759 758
759 bits |= EXTENT_FIRST_DELALLOC;
760again: 760again:
761 if (!prealloc && (mask & __GFP_WAIT)) { 761 if (!prealloc && (mask & __GFP_WAIT)) {
762 prealloc = alloc_extent_state(mask); 762 prealloc = alloc_extent_state(mask);
@@ -778,7 +778,7 @@ again:
778 */ 778 */
779 node = tree_search(tree, start); 779 node = tree_search(tree, start);
780 if (!node) { 780 if (!node) {
781 err = insert_state(tree, prealloc, start, end, bits); 781 err = insert_state(tree, prealloc, start, end, &bits);
782 prealloc = NULL; 782 prealloc = NULL;
783 BUG_ON(err == -EEXIST); 783 BUG_ON(err == -EEXIST);
784 goto out; 784 goto out;
@@ -802,7 +802,7 @@ hit_next:
802 goto out; 802 goto out;
803 } 803 }
804 804
805 err = set_state_bits(tree, state, bits); 805 err = set_state_bits(tree, state, &bits);
806 if (err) 806 if (err)
807 goto out; 807 goto out;
808 808
@@ -852,7 +852,7 @@ hit_next:
852 if (err) 852 if (err)
853 goto out; 853 goto out;
854 if (state->end <= end) { 854 if (state->end <= end) {
855 err = set_state_bits(tree, state, bits); 855 err = set_state_bits(tree, state, &bits);
856 if (err) 856 if (err)
857 goto out; 857 goto out;
858 cache_state(state, cached_state); 858 cache_state(state, cached_state);
@@ -877,7 +877,7 @@ hit_next:
877 else 877 else
878 this_end = last_start - 1; 878 this_end = last_start - 1;
879 err = insert_state(tree, prealloc, start, this_end, 879 err = insert_state(tree, prealloc, start, this_end,
880 bits); 880 &bits);
881 BUG_ON(err == -EEXIST); 881 BUG_ON(err == -EEXIST);
882 if (err) { 882 if (err) {
883 prealloc = NULL; 883 prealloc = NULL;
@@ -903,7 +903,7 @@ hit_next:
903 err = split_state(tree, state, prealloc, end + 1); 903 err = split_state(tree, state, prealloc, end + 1);
904 BUG_ON(err == -EEXIST); 904 BUG_ON(err == -EEXIST);
905 905
906 err = set_state_bits(tree, prealloc, bits); 906 err = set_state_bits(tree, prealloc, &bits);
907 if (err) { 907 if (err) {
908 prealloc = NULL; 908 prealloc = NULL;
909 goto out; 909 goto out;
@@ -966,8 +966,7 @@ int clear_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end,
966{ 966{
967 return clear_extent_bit(tree, start, end, 967 return clear_extent_bit(tree, start, end,
968 EXTENT_DIRTY | EXTENT_DELALLOC | 968 EXTENT_DIRTY | EXTENT_DELALLOC |
969 EXTENT_DO_ACCOUNTING, 0, 0, 969 EXTENT_DO_ACCOUNTING, 0, 0, NULL, mask);
970 NULL, mask);
971} 970}
972 971
973int set_extent_new(struct extent_io_tree *tree, u64 start, u64 end, 972int set_extent_new(struct extent_io_tree *tree, u64 start, u64 end,
@@ -1435,9 +1434,6 @@ int extent_clear_unlock_delalloc(struct inode *inode,
1435 if (op & EXTENT_CLEAR_DELALLOC) 1434 if (op & EXTENT_CLEAR_DELALLOC)
1436 clear_bits |= EXTENT_DELALLOC; 1435 clear_bits |= EXTENT_DELALLOC;
1437 1436
1438 if (op & EXTENT_CLEAR_ACCOUNTING)
1439 clear_bits |= EXTENT_DO_ACCOUNTING;
1440
1441 clear_extent_bit(tree, start, end, clear_bits, 1, 0, NULL, GFP_NOFS); 1437 clear_extent_bit(tree, start, end, clear_bits, 1, 0, NULL, GFP_NOFS);
1442 if (!(op & (EXTENT_CLEAR_UNLOCK_PAGE | EXTENT_CLEAR_DIRTY | 1438 if (!(op & (EXTENT_CLEAR_UNLOCK_PAGE | EXTENT_CLEAR_DIRTY |
1443 EXTENT_SET_WRITEBACK | EXTENT_END_WRITEBACK | 1439 EXTENT_SET_WRITEBACK | EXTENT_END_WRITEBACK |
@@ -1916,7 +1912,7 @@ static int submit_one_bio(int rw, struct bio *bio, int mirror_num,
1916 1912
1917 if (tree->ops && tree->ops->submit_bio_hook) 1913 if (tree->ops && tree->ops->submit_bio_hook)
1918 tree->ops->submit_bio_hook(page->mapping->host, rw, bio, 1914 tree->ops->submit_bio_hook(page->mapping->host, rw, bio,
1919 mirror_num, bio_flags); 1915 mirror_num, bio_flags, start);
1920 else 1916 else
1921 submit_bio(rw, bio); 1917 submit_bio(rw, bio);
1922 if (bio_flagged(bio, BIO_EOPNOTSUPP)) 1918 if (bio_flagged(bio, BIO_EOPNOTSUPP))
@@ -2020,6 +2016,7 @@ static int __extent_read_full_page(struct extent_io_tree *tree,
2020 sector_t sector; 2016 sector_t sector;
2021 struct extent_map *em; 2017 struct extent_map *em;
2022 struct block_device *bdev; 2018 struct block_device *bdev;
2019 struct btrfs_ordered_extent *ordered;
2023 int ret; 2020 int ret;
2024 int nr = 0; 2021 int nr = 0;
2025 size_t page_offset = 0; 2022 size_t page_offset = 0;
@@ -2031,7 +2028,15 @@ static int __extent_read_full_page(struct extent_io_tree *tree,
2031 set_page_extent_mapped(page); 2028 set_page_extent_mapped(page);
2032 2029
2033 end = page_end; 2030 end = page_end;
2034 lock_extent(tree, start, end, GFP_NOFS); 2031 while (1) {
2032 lock_extent(tree, start, end, GFP_NOFS);
2033 ordered = btrfs_lookup_ordered_extent(inode, start);
2034 if (!ordered)
2035 break;
2036 unlock_extent(tree, start, end, GFP_NOFS);
2037 btrfs_start_ordered_extent(inode, ordered, 1);
2038 btrfs_put_ordered_extent(ordered);
2039 }
2035 2040
2036 if (page->index == last_byte >> PAGE_CACHE_SHIFT) { 2041 if (page->index == last_byte >> PAGE_CACHE_SHIFT) {
2037 char *userpage; 2042 char *userpage;
diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h
index bbab4813646f..5691c7b590da 100644
--- a/fs/btrfs/extent_io.h
+++ b/fs/btrfs/extent_io.h
@@ -16,7 +16,9 @@
16#define EXTENT_BOUNDARY (1 << 9) 16#define EXTENT_BOUNDARY (1 << 9)
17#define EXTENT_NODATASUM (1 << 10) 17#define EXTENT_NODATASUM (1 << 10)
18#define EXTENT_DO_ACCOUNTING (1 << 11) 18#define EXTENT_DO_ACCOUNTING (1 << 11)
19#define EXTENT_FIRST_DELALLOC (1 << 12)
19#define EXTENT_IOBITS (EXTENT_LOCKED | EXTENT_WRITEBACK) 20#define EXTENT_IOBITS (EXTENT_LOCKED | EXTENT_WRITEBACK)
21#define EXTENT_CTLBITS (EXTENT_DO_ACCOUNTING | EXTENT_FIRST_DELALLOC)
20 22
21/* flags for bio submission */ 23/* flags for bio submission */
22#define EXTENT_BIO_COMPRESSED 1 24#define EXTENT_BIO_COMPRESSED 1
@@ -47,7 +49,7 @@ struct extent_state;
47 49
48typedef int (extent_submit_bio_hook_t)(struct inode *inode, int rw, 50typedef int (extent_submit_bio_hook_t)(struct inode *inode, int rw,
49 struct bio *bio, int mirror_num, 51 struct bio *bio, int mirror_num,
50 unsigned long bio_flags); 52 unsigned long bio_flags, u64 bio_offset);
51struct extent_io_ops { 53struct extent_io_ops {
52 int (*fill_delalloc)(struct inode *inode, struct page *locked_page, 54 int (*fill_delalloc)(struct inode *inode, struct page *locked_page,
53 u64 start, u64 end, int *page_started, 55 u64 start, u64 end, int *page_started,
@@ -69,10 +71,10 @@ struct extent_io_ops {
69 struct extent_state *state); 71 struct extent_state *state);
70 int (*writepage_end_io_hook)(struct page *page, u64 start, u64 end, 72 int (*writepage_end_io_hook)(struct page *page, u64 start, u64 end,
71 struct extent_state *state, int uptodate); 73 struct extent_state *state, int uptodate);
72 int (*set_bit_hook)(struct inode *inode, u64 start, u64 end, 74 int (*set_bit_hook)(struct inode *inode, struct extent_state *state,
73 unsigned long old, unsigned long bits); 75 int *bits);
74 int (*clear_bit_hook)(struct inode *inode, struct extent_state *state, 76 int (*clear_bit_hook)(struct inode *inode, struct extent_state *state,
75 unsigned long bits); 77 int *bits);
76 int (*merge_extent_hook)(struct inode *inode, 78 int (*merge_extent_hook)(struct inode *inode,
77 struct extent_state *new, 79 struct extent_state *new,
78 struct extent_state *other); 80 struct extent_state *other);
@@ -176,6 +178,7 @@ u64 count_range_bits(struct extent_io_tree *tree,
176 u64 *start, u64 search_end, 178 u64 *start, u64 search_end,
177 u64 max_bytes, unsigned long bits); 179 u64 max_bytes, unsigned long bits);
178 180
181void free_extent_state(struct extent_state *state);
179int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end, 182int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end,
180 int bits, int filled, struct extent_state *cached_state); 183 int bits, int filled, struct extent_state *cached_state);
181int clear_extent_bits(struct extent_io_tree *tree, u64 start, u64 end, 184int clear_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
@@ -185,6 +188,9 @@ int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
185 gfp_t mask); 188 gfp_t mask);
186int set_extent_bits(struct extent_io_tree *tree, u64 start, u64 end, 189int set_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
187 int bits, gfp_t mask); 190 int bits, gfp_t mask);
191int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
192 int bits, int exclusive_bits, u64 *failed_start,
193 struct extent_state **cached_state, gfp_t mask);
188int set_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end, 194int set_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end,
189 gfp_t mask); 195 gfp_t mask);
190int set_extent_new(struct extent_io_tree *tree, u64 start, u64 end, 196int set_extent_new(struct extent_io_tree *tree, u64 start, u64 end,
diff --git a/fs/btrfs/file-item.c b/fs/btrfs/file-item.c
index 54a255065aa3..a562a250ae77 100644
--- a/fs/btrfs/file-item.c
+++ b/fs/btrfs/file-item.c
@@ -149,13 +149,14 @@ int btrfs_lookup_file_extent(struct btrfs_trans_handle *trans,
149} 149}
150 150
151 151
152int btrfs_lookup_bio_sums(struct btrfs_root *root, struct inode *inode, 152static int __btrfs_lookup_bio_sums(struct btrfs_root *root,
153 struct bio *bio, u32 *dst) 153 struct inode *inode, struct bio *bio,
154 u64 logical_offset, u32 *dst, int dio)
154{ 155{
155 u32 sum; 156 u32 sum;
156 struct bio_vec *bvec = bio->bi_io_vec; 157 struct bio_vec *bvec = bio->bi_io_vec;
157 int bio_index = 0; 158 int bio_index = 0;
158 u64 offset; 159 u64 offset = 0;
159 u64 item_start_offset = 0; 160 u64 item_start_offset = 0;
160 u64 item_last_offset = 0; 161 u64 item_last_offset = 0;
161 u64 disk_bytenr; 162 u64 disk_bytenr;
@@ -174,8 +175,11 @@ int btrfs_lookup_bio_sums(struct btrfs_root *root, struct inode *inode,
174 WARN_ON(bio->bi_vcnt <= 0); 175 WARN_ON(bio->bi_vcnt <= 0);
175 176
176 disk_bytenr = (u64)bio->bi_sector << 9; 177 disk_bytenr = (u64)bio->bi_sector << 9;
178 if (dio)
179 offset = logical_offset;
177 while (bio_index < bio->bi_vcnt) { 180 while (bio_index < bio->bi_vcnt) {
178 offset = page_offset(bvec->bv_page) + bvec->bv_offset; 181 if (!dio)
182 offset = page_offset(bvec->bv_page) + bvec->bv_offset;
179 ret = btrfs_find_ordered_sum(inode, offset, disk_bytenr, &sum); 183 ret = btrfs_find_ordered_sum(inode, offset, disk_bytenr, &sum);
180 if (ret == 0) 184 if (ret == 0)
181 goto found; 185 goto found;
@@ -238,6 +242,7 @@ found:
238 else 242 else
239 set_state_private(io_tree, offset, sum); 243 set_state_private(io_tree, offset, sum);
240 disk_bytenr += bvec->bv_len; 244 disk_bytenr += bvec->bv_len;
245 offset += bvec->bv_len;
241 bio_index++; 246 bio_index++;
242 bvec++; 247 bvec++;
243 } 248 }
@@ -245,6 +250,18 @@ found:
245 return 0; 250 return 0;
246} 251}
247 252
253int btrfs_lookup_bio_sums(struct btrfs_root *root, struct inode *inode,
254 struct bio *bio, u32 *dst)
255{
256 return __btrfs_lookup_bio_sums(root, inode, bio, 0, dst, 0);
257}
258
259int btrfs_lookup_bio_sums_dio(struct btrfs_root *root, struct inode *inode,
260 struct bio *bio, u64 offset, u32 *dst)
261{
262 return __btrfs_lookup_bio_sums(root, inode, bio, offset, dst, 1);
263}
264
248int btrfs_lookup_csums_range(struct btrfs_root *root, u64 start, u64 end, 265int btrfs_lookup_csums_range(struct btrfs_root *root, u64 start, u64 end,
249 struct list_head *list) 266 struct list_head *list)
250{ 267{
@@ -657,6 +674,9 @@ again:
657 goto found; 674 goto found;
658 } 675 }
659 ret = PTR_ERR(item); 676 ret = PTR_ERR(item);
677 if (ret != -EFBIG && ret != -ENOENT)
678 goto fail_unlock;
679
660 if (ret == -EFBIG) { 680 if (ret == -EFBIG) {
661 u32 item_size; 681 u32 item_size;
662 /* we found one, but it isn't big enough yet */ 682 /* we found one, but it isn't big enough yet */
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index 29ff749ff4ca..787b50a16a14 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -46,32 +46,42 @@
46static noinline int btrfs_copy_from_user(loff_t pos, int num_pages, 46static noinline int btrfs_copy_from_user(loff_t pos, int num_pages,
47 int write_bytes, 47 int write_bytes,
48 struct page **prepared_pages, 48 struct page **prepared_pages,
49 const char __user *buf) 49 struct iov_iter *i)
50{ 50{
51 long page_fault = 0; 51 size_t copied;
52 int i; 52 int pg = 0;
53 int offset = pos & (PAGE_CACHE_SIZE - 1); 53 int offset = pos & (PAGE_CACHE_SIZE - 1);
54 54
55 for (i = 0; i < num_pages && write_bytes > 0; i++, offset = 0) { 55 while (write_bytes > 0) {
56 size_t count = min_t(size_t, 56 size_t count = min_t(size_t,
57 PAGE_CACHE_SIZE - offset, write_bytes); 57 PAGE_CACHE_SIZE - offset, write_bytes);
58 struct page *page = prepared_pages[i]; 58 struct page *page = prepared_pages[pg];
59 fault_in_pages_readable(buf, count); 59again:
60 if (unlikely(iov_iter_fault_in_readable(i, count)))
61 return -EFAULT;
60 62
61 /* Copy data from userspace to the current page */ 63 /* Copy data from userspace to the current page */
62 kmap(page); 64 copied = iov_iter_copy_from_user(page, i, offset, count);
63 page_fault = __copy_from_user(page_address(page) + offset, 65
64 buf, count);
65 /* Flush processor's dcache for this page */ 66 /* Flush processor's dcache for this page */
66 flush_dcache_page(page); 67 flush_dcache_page(page);
67 kunmap(page); 68 iov_iter_advance(i, copied);
68 buf += count; 69 write_bytes -= copied;
69 write_bytes -= count;
70 70
71 if (page_fault) 71 if (unlikely(copied == 0)) {
72 break; 72 count = min_t(size_t, PAGE_CACHE_SIZE - offset,
73 iov_iter_single_seg_count(i));
74 goto again;
75 }
76
77 if (unlikely(copied < PAGE_CACHE_SIZE - offset)) {
78 offset += copied;
79 } else {
80 pg++;
81 offset = 0;
82 }
73 } 83 }
74 return page_fault ? -EFAULT : 0; 84 return 0;
75} 85}
76 86
77/* 87/*
@@ -126,8 +136,7 @@ static noinline int dirty_and_release_pages(struct btrfs_trans_handle *trans,
126 end_of_last_block = start_pos + num_bytes - 1; 136 end_of_last_block = start_pos + num_bytes - 1;
127 err = btrfs_set_extent_delalloc(inode, start_pos, end_of_last_block, 137 err = btrfs_set_extent_delalloc(inode, start_pos, end_of_last_block,
128 NULL); 138 NULL);
129 if (err) 139 BUG_ON(err);
130 return err;
131 140
132 for (i = 0; i < num_pages; i++) { 141 for (i = 0; i < num_pages; i++) {
133 struct page *p = pages[i]; 142 struct page *p = pages[i];
@@ -142,7 +151,7 @@ static noinline int dirty_and_release_pages(struct btrfs_trans_handle *trans,
142 * at this time. 151 * at this time.
143 */ 152 */
144 } 153 }
145 return err; 154 return 0;
146} 155}
147 156
148/* 157/*
@@ -823,45 +832,46 @@ again:
823 return 0; 832 return 0;
824} 833}
825 834
826static ssize_t btrfs_file_write(struct file *file, const char __user *buf, 835static ssize_t btrfs_file_aio_write(struct kiocb *iocb,
827 size_t count, loff_t *ppos) 836 const struct iovec *iov,
837 unsigned long nr_segs, loff_t pos)
828{ 838{
829 loff_t pos; 839 struct file *file = iocb->ki_filp;
840 struct inode *inode = fdentry(file)->d_inode;
841 struct btrfs_root *root = BTRFS_I(inode)->root;
842 struct page *pinned[2];
843 struct page **pages = NULL;
844 struct iov_iter i;
845 loff_t *ppos = &iocb->ki_pos;
830 loff_t start_pos; 846 loff_t start_pos;
831 ssize_t num_written = 0; 847 ssize_t num_written = 0;
832 ssize_t err = 0; 848 ssize_t err = 0;
849 size_t count;
850 size_t ocount;
833 int ret = 0; 851 int ret = 0;
834 struct inode *inode = fdentry(file)->d_inode;
835 struct btrfs_root *root = BTRFS_I(inode)->root;
836 struct page **pages = NULL;
837 int nrptrs; 852 int nrptrs;
838 struct page *pinned[2];
839 unsigned long first_index; 853 unsigned long first_index;
840 unsigned long last_index; 854 unsigned long last_index;
841 int will_write; 855 int will_write;
856 int buffered = 0;
842 857
843 will_write = ((file->f_flags & O_DSYNC) || IS_SYNC(inode) || 858 will_write = ((file->f_flags & O_DSYNC) || IS_SYNC(inode) ||
844 (file->f_flags & O_DIRECT)); 859 (file->f_flags & O_DIRECT));
845 860
846 nrptrs = min((count + PAGE_CACHE_SIZE - 1) / PAGE_CACHE_SIZE,
847 PAGE_CACHE_SIZE / (sizeof(struct page *)));
848 pinned[0] = NULL; 861 pinned[0] = NULL;
849 pinned[1] = NULL; 862 pinned[1] = NULL;
850 863
851 pos = *ppos;
852 start_pos = pos; 864 start_pos = pos;
853 865
854 vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE); 866 vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE);
855 867
856 /* do the reserve before the mutex lock in case we have to do some
857 * flushing. We wouldn't deadlock, but this is more polite.
858 */
859 err = btrfs_reserve_metadata_for_delalloc(root, inode, 1);
860 if (err)
861 goto out_nolock;
862
863 mutex_lock(&inode->i_mutex); 868 mutex_lock(&inode->i_mutex);
864 869
870 err = generic_segment_checks(iov, &nr_segs, &ocount, VERIFY_READ);
871 if (err)
872 goto out;
873 count = ocount;
874
865 current->backing_dev_info = inode->i_mapping->backing_dev_info; 875 current->backing_dev_info = inode->i_mapping->backing_dev_info;
866 err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode)); 876 err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode));
867 if (err) 877 if (err)
@@ -875,15 +885,53 @@ static ssize_t btrfs_file_write(struct file *file, const char __user *buf,
875 goto out; 885 goto out;
876 886
877 file_update_time(file); 887 file_update_time(file);
888 BTRFS_I(inode)->sequence++;
889
890 if (unlikely(file->f_flags & O_DIRECT)) {
891 num_written = generic_file_direct_write(iocb, iov, &nr_segs,
892 pos, ppos, count,
893 ocount);
894 /*
895 * the generic O_DIRECT will update in-memory i_size after the
896 * DIOs are done. But our endio handlers that update the on
897 * disk i_size never update past the in memory i_size. So we
898 * need one more update here to catch any additions to the
899 * file
900 */
901 if (inode->i_size != BTRFS_I(inode)->disk_i_size) {
902 btrfs_ordered_update_i_size(inode, inode->i_size, NULL);
903 mark_inode_dirty(inode);
904 }
878 905
906 if (num_written < 0) {
907 ret = num_written;
908 num_written = 0;
909 goto out;
910 } else if (num_written == count) {
911 /* pick up pos changes done by the generic code */
912 pos = *ppos;
913 goto out;
914 }
915 /*
916 * We are going to do buffered for the rest of the range, so we
917 * need to make sure to invalidate the buffered pages when we're
918 * done.
919 */
920 buffered = 1;
921 pos += num_written;
922 }
923
924 iov_iter_init(&i, iov, nr_segs, count, num_written);
925 nrptrs = min((iov_iter_count(&i) + PAGE_CACHE_SIZE - 1) /
926 PAGE_CACHE_SIZE, PAGE_CACHE_SIZE /
927 (sizeof(struct page *)));
879 pages = kmalloc(nrptrs * sizeof(struct page *), GFP_KERNEL); 928 pages = kmalloc(nrptrs * sizeof(struct page *), GFP_KERNEL);
880 929
881 /* generic_write_checks can change our pos */ 930 /* generic_write_checks can change our pos */
882 start_pos = pos; 931 start_pos = pos;
883 932
884 BTRFS_I(inode)->sequence++;
885 first_index = pos >> PAGE_CACHE_SHIFT; 933 first_index = pos >> PAGE_CACHE_SHIFT;
886 last_index = (pos + count) >> PAGE_CACHE_SHIFT; 934 last_index = (pos + iov_iter_count(&i)) >> PAGE_CACHE_SHIFT;
887 935
888 /* 936 /*
889 * there are lots of better ways to do this, but this code 937 * there are lots of better ways to do this, but this code
@@ -900,7 +948,7 @@ static ssize_t btrfs_file_write(struct file *file, const char __user *buf,
900 unlock_page(pinned[0]); 948 unlock_page(pinned[0]);
901 } 949 }
902 } 950 }
903 if ((pos + count) & (PAGE_CACHE_SIZE - 1)) { 951 if ((pos + iov_iter_count(&i)) & (PAGE_CACHE_SIZE - 1)) {
904 pinned[1] = grab_cache_page(inode->i_mapping, last_index); 952 pinned[1] = grab_cache_page(inode->i_mapping, last_index);
905 if (!PageUptodate(pinned[1])) { 953 if (!PageUptodate(pinned[1])) {
906 ret = btrfs_readpage(NULL, pinned[1]); 954 ret = btrfs_readpage(NULL, pinned[1]);
@@ -911,10 +959,10 @@ static ssize_t btrfs_file_write(struct file *file, const char __user *buf,
911 } 959 }
912 } 960 }
913 961
914 while (count > 0) { 962 while (iov_iter_count(&i) > 0) {
915 size_t offset = pos & (PAGE_CACHE_SIZE - 1); 963 size_t offset = pos & (PAGE_CACHE_SIZE - 1);
916 size_t write_bytes = min(count, nrptrs * 964 size_t write_bytes = min(iov_iter_count(&i),
917 (size_t)PAGE_CACHE_SIZE - 965 nrptrs * (size_t)PAGE_CACHE_SIZE -
918 offset); 966 offset);
919 size_t num_pages = (write_bytes + PAGE_CACHE_SIZE - 1) >> 967 size_t num_pages = (write_bytes + PAGE_CACHE_SIZE - 1) >>
920 PAGE_CACHE_SHIFT; 968 PAGE_CACHE_SHIFT;
@@ -922,7 +970,7 @@ static ssize_t btrfs_file_write(struct file *file, const char __user *buf,
922 WARN_ON(num_pages > nrptrs); 970 WARN_ON(num_pages > nrptrs);
923 memset(pages, 0, sizeof(struct page *) * nrptrs); 971 memset(pages, 0, sizeof(struct page *) * nrptrs);
924 972
925 ret = btrfs_check_data_free_space(root, inode, write_bytes); 973 ret = btrfs_delalloc_reserve_space(inode, write_bytes);
926 if (ret) 974 if (ret)
927 goto out; 975 goto out;
928 976
@@ -930,26 +978,20 @@ static ssize_t btrfs_file_write(struct file *file, const char __user *buf,
930 pos, first_index, last_index, 978 pos, first_index, last_index,
931 write_bytes); 979 write_bytes);
932 if (ret) { 980 if (ret) {
933 btrfs_free_reserved_data_space(root, inode, 981 btrfs_delalloc_release_space(inode, write_bytes);
934 write_bytes);
935 goto out; 982 goto out;
936 } 983 }
937 984
938 ret = btrfs_copy_from_user(pos, num_pages, 985 ret = btrfs_copy_from_user(pos, num_pages,
939 write_bytes, pages, buf); 986 write_bytes, pages, &i);
940 if (ret) { 987 if (ret == 0) {
941 btrfs_free_reserved_data_space(root, inode, 988 dirty_and_release_pages(NULL, root, file, pages,
942 write_bytes); 989 num_pages, pos, write_bytes);
943 btrfs_drop_pages(pages, num_pages);
944 goto out;
945 } 990 }
946 991
947 ret = dirty_and_release_pages(NULL, root, file, pages,
948 num_pages, pos, write_bytes);
949 btrfs_drop_pages(pages, num_pages); 992 btrfs_drop_pages(pages, num_pages);
950 if (ret) { 993 if (ret) {
951 btrfs_free_reserved_data_space(root, inode, 994 btrfs_delalloc_release_space(inode, write_bytes);
952 write_bytes);
953 goto out; 995 goto out;
954 } 996 }
955 997
@@ -965,8 +1007,6 @@ static ssize_t btrfs_file_write(struct file *file, const char __user *buf,
965 btrfs_throttle(root); 1007 btrfs_throttle(root);
966 } 1008 }
967 1009
968 buf += write_bytes;
969 count -= write_bytes;
970 pos += write_bytes; 1010 pos += write_bytes;
971 num_written += write_bytes; 1011 num_written += write_bytes;
972 1012
@@ -976,9 +1016,7 @@ out:
976 mutex_unlock(&inode->i_mutex); 1016 mutex_unlock(&inode->i_mutex);
977 if (ret) 1017 if (ret)
978 err = ret; 1018 err = ret;
979 btrfs_unreserve_metadata_for_delalloc(root, inode, 1);
980 1019
981out_nolock:
982 kfree(pages); 1020 kfree(pages);
983 if (pinned[0]) 1021 if (pinned[0])
984 page_cache_release(pinned[0]); 1022 page_cache_release(pinned[0]);
@@ -1008,7 +1046,7 @@ out_nolock:
1008 num_written = err; 1046 num_written = err;
1009 1047
1010 if ((file->f_flags & O_DSYNC) || IS_SYNC(inode)) { 1048 if ((file->f_flags & O_DSYNC) || IS_SYNC(inode)) {
1011 trans = btrfs_start_transaction(root, 1); 1049 trans = btrfs_start_transaction(root, 0);
1012 ret = btrfs_log_dentry_safe(trans, root, 1050 ret = btrfs_log_dentry_safe(trans, root,
1013 file->f_dentry); 1051 file->f_dentry);
1014 if (ret == 0) { 1052 if (ret == 0) {
@@ -1023,7 +1061,7 @@ out_nolock:
1023 btrfs_end_transaction(trans, root); 1061 btrfs_end_transaction(trans, root);
1024 } 1062 }
1025 } 1063 }
1026 if (file->f_flags & O_DIRECT) { 1064 if (file->f_flags & O_DIRECT && buffered) {
1027 invalidate_mapping_pages(inode->i_mapping, 1065 invalidate_mapping_pages(inode->i_mapping,
1028 start_pos >> PAGE_CACHE_SHIFT, 1066 start_pos >> PAGE_CACHE_SHIFT,
1029 (start_pos + num_written - 1) >> PAGE_CACHE_SHIFT); 1067 (start_pos + num_written - 1) >> PAGE_CACHE_SHIFT);
@@ -1063,8 +1101,9 @@ int btrfs_release_file(struct inode *inode, struct file *filp)
1063 * important optimization for directories because holding the mutex prevents 1101 * important optimization for directories because holding the mutex prevents
1064 * new operations on the dir while we write to disk. 1102 * new operations on the dir while we write to disk.
1065 */ 1103 */
1066int btrfs_sync_file(struct file *file, struct dentry *dentry, int datasync) 1104int btrfs_sync_file(struct file *file, int datasync)
1067{ 1105{
1106 struct dentry *dentry = file->f_path.dentry;
1068 struct inode *inode = dentry->d_inode; 1107 struct inode *inode = dentry->d_inode;
1069 struct btrfs_root *root = BTRFS_I(inode)->root; 1108 struct btrfs_root *root = BTRFS_I(inode)->root;
1070 int ret = 0; 1109 int ret = 0;
@@ -1104,9 +1143,9 @@ int btrfs_sync_file(struct file *file, struct dentry *dentry, int datasync)
1104 if (file && file->private_data) 1143 if (file && file->private_data)
1105 btrfs_ioctl_trans_end(file); 1144 btrfs_ioctl_trans_end(file);
1106 1145
1107 trans = btrfs_start_transaction(root, 1); 1146 trans = btrfs_start_transaction(root, 0);
1108 if (!trans) { 1147 if (IS_ERR(trans)) {
1109 ret = -ENOMEM; 1148 ret = PTR_ERR(trans);
1110 goto out; 1149 goto out;
1111 } 1150 }
1112 1151
@@ -1161,7 +1200,7 @@ const struct file_operations btrfs_file_operations = {
1161 .read = do_sync_read, 1200 .read = do_sync_read,
1162 .aio_read = generic_file_aio_read, 1201 .aio_read = generic_file_aio_read,
1163 .splice_read = generic_file_splice_read, 1202 .splice_read = generic_file_splice_read,
1164 .write = btrfs_file_write, 1203 .aio_write = btrfs_file_aio_write,
1165 .mmap = btrfs_file_mmap, 1204 .mmap = btrfs_file_mmap,
1166 .open = generic_file_open, 1205 .open = generic_file_open,
1167 .release = btrfs_release_file, 1206 .release = btrfs_release_file,
diff --git a/fs/btrfs/inode-item.c b/fs/btrfs/inode-item.c
index 72ce3c173d6a..64f1150bb48d 100644
--- a/fs/btrfs/inode-item.c
+++ b/fs/btrfs/inode-item.c
@@ -49,6 +49,33 @@ static int find_name_in_backref(struct btrfs_path *path, const char *name,
49 return 0; 49 return 0;
50} 50}
51 51
52struct btrfs_inode_ref *
53btrfs_lookup_inode_ref(struct btrfs_trans_handle *trans,
54 struct btrfs_root *root,
55 struct btrfs_path *path,
56 const char *name, int name_len,
57 u64 inode_objectid, u64 ref_objectid, int mod)
58{
59 struct btrfs_key key;
60 struct btrfs_inode_ref *ref;
61 int ins_len = mod < 0 ? -1 : 0;
62 int cow = mod != 0;
63 int ret;
64
65 key.objectid = inode_objectid;
66 key.type = BTRFS_INODE_REF_KEY;
67 key.offset = ref_objectid;
68
69 ret = btrfs_search_slot(trans, root, &key, path, ins_len, cow);
70 if (ret < 0)
71 return ERR_PTR(ret);
72 if (ret > 0)
73 return NULL;
74 if (!find_name_in_backref(path, name, name_len, &ref))
75 return NULL;
76 return ref;
77}
78
52int btrfs_del_inode_ref(struct btrfs_trans_handle *trans, 79int btrfs_del_inode_ref(struct btrfs_trans_handle *trans,
53 struct btrfs_root *root, 80 struct btrfs_root *root,
54 const char *name, int name_len, 81 const char *name, int name_len,
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index d601629b85d1..fa6ccc1bfe2a 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -252,6 +252,7 @@ static noinline int cow_file_range_inline(struct btrfs_trans_handle *trans,
252 inline_len, compressed_size, 252 inline_len, compressed_size,
253 compressed_pages); 253 compressed_pages);
254 BUG_ON(ret); 254 BUG_ON(ret);
255 btrfs_delalloc_release_metadata(inode, end + 1 - start);
255 btrfs_drop_extent_cache(inode, start, aligned_end - 1, 0); 256 btrfs_drop_extent_cache(inode, start, aligned_end - 1, 0);
256 return 0; 257 return 0;
257} 258}
@@ -414,6 +415,7 @@ again:
414 trans = btrfs_join_transaction(root, 1); 415 trans = btrfs_join_transaction(root, 1);
415 BUG_ON(!trans); 416 BUG_ON(!trans);
416 btrfs_set_trans_block_group(trans, inode); 417 btrfs_set_trans_block_group(trans, inode);
418 trans->block_rsv = &root->fs_info->delalloc_block_rsv;
417 419
418 /* lets try to make an inline extent */ 420 /* lets try to make an inline extent */
419 if (ret || total_in < (actual_end - start)) { 421 if (ret || total_in < (actual_end - start)) {
@@ -439,7 +441,6 @@ again:
439 start, end, NULL, 441 start, end, NULL,
440 EXTENT_CLEAR_UNLOCK_PAGE | EXTENT_CLEAR_DIRTY | 442 EXTENT_CLEAR_UNLOCK_PAGE | EXTENT_CLEAR_DIRTY |
441 EXTENT_CLEAR_DELALLOC | 443 EXTENT_CLEAR_DELALLOC |
442 EXTENT_CLEAR_ACCOUNTING |
443 EXTENT_SET_WRITEBACK | EXTENT_END_WRITEBACK); 444 EXTENT_SET_WRITEBACK | EXTENT_END_WRITEBACK);
444 445
445 btrfs_end_transaction(trans, root); 446 btrfs_end_transaction(trans, root);
@@ -697,6 +698,38 @@ retry:
697 return 0; 698 return 0;
698} 699}
699 700
701static u64 get_extent_allocation_hint(struct inode *inode, u64 start,
702 u64 num_bytes)
703{
704 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
705 struct extent_map *em;
706 u64 alloc_hint = 0;
707
708 read_lock(&em_tree->lock);
709 em = search_extent_mapping(em_tree, start, num_bytes);
710 if (em) {
711 /*
712 * if block start isn't an actual block number then find the
713 * first block in this inode and use that as a hint. If that
714 * block is also bogus then just don't worry about it.
715 */
716 if (em->block_start >= EXTENT_MAP_LAST_BYTE) {
717 free_extent_map(em);
718 em = search_extent_mapping(em_tree, 0, 0);
719 if (em && em->block_start < EXTENT_MAP_LAST_BYTE)
720 alloc_hint = em->block_start;
721 if (em)
722 free_extent_map(em);
723 } else {
724 alloc_hint = em->block_start;
725 free_extent_map(em);
726 }
727 }
728 read_unlock(&em_tree->lock);
729
730 return alloc_hint;
731}
732
700/* 733/*
701 * when extent_io.c finds a delayed allocation range in the file, 734 * when extent_io.c finds a delayed allocation range in the file,
702 * the call backs end up in this code. The basic idea is to 735 * the call backs end up in this code. The basic idea is to
@@ -734,6 +767,7 @@ static noinline int cow_file_range(struct inode *inode,
734 trans = btrfs_join_transaction(root, 1); 767 trans = btrfs_join_transaction(root, 1);
735 BUG_ON(!trans); 768 BUG_ON(!trans);
736 btrfs_set_trans_block_group(trans, inode); 769 btrfs_set_trans_block_group(trans, inode);
770 trans->block_rsv = &root->fs_info->delalloc_block_rsv;
737 771
738 actual_end = min_t(u64, isize, end + 1); 772 actual_end = min_t(u64, isize, end + 1);
739 773
@@ -753,7 +787,6 @@ static noinline int cow_file_range(struct inode *inode,
753 EXTENT_CLEAR_UNLOCK_PAGE | 787 EXTENT_CLEAR_UNLOCK_PAGE |
754 EXTENT_CLEAR_UNLOCK | 788 EXTENT_CLEAR_UNLOCK |
755 EXTENT_CLEAR_DELALLOC | 789 EXTENT_CLEAR_DELALLOC |
756 EXTENT_CLEAR_ACCOUNTING |
757 EXTENT_CLEAR_DIRTY | 790 EXTENT_CLEAR_DIRTY |
758 EXTENT_SET_WRITEBACK | 791 EXTENT_SET_WRITEBACK |
759 EXTENT_END_WRITEBACK); 792 EXTENT_END_WRITEBACK);
@@ -769,29 +802,7 @@ static noinline int cow_file_range(struct inode *inode,
769 BUG_ON(disk_num_bytes > 802 BUG_ON(disk_num_bytes >
770 btrfs_super_total_bytes(&root->fs_info->super_copy)); 803 btrfs_super_total_bytes(&root->fs_info->super_copy));
771 804
772 805 alloc_hint = get_extent_allocation_hint(inode, start, num_bytes);
773 read_lock(&BTRFS_I(inode)->extent_tree.lock);
774 em = search_extent_mapping(&BTRFS_I(inode)->extent_tree,
775 start, num_bytes);
776 if (em) {
777 /*
778 * if block start isn't an actual block number then find the
779 * first block in this inode and use that as a hint. If that
780 * block is also bogus then just don't worry about it.
781 */
782 if (em->block_start >= EXTENT_MAP_LAST_BYTE) {
783 free_extent_map(em);
784 em = search_extent_mapping(em_tree, 0, 0);
785 if (em && em->block_start < EXTENT_MAP_LAST_BYTE)
786 alloc_hint = em->block_start;
787 if (em)
788 free_extent_map(em);
789 } else {
790 alloc_hint = em->block_start;
791 free_extent_map(em);
792 }
793 }
794 read_unlock(&BTRFS_I(inode)->extent_tree.lock);
795 btrfs_drop_extent_cache(inode, start, start + num_bytes - 1, 0); 806 btrfs_drop_extent_cache(inode, start, start + num_bytes - 1, 0);
796 807
797 while (disk_num_bytes > 0) { 808 while (disk_num_bytes > 0) {
@@ -1174,6 +1185,13 @@ out_check:
1174 num_bytes, num_bytes, type); 1185 num_bytes, num_bytes, type);
1175 BUG_ON(ret); 1186 BUG_ON(ret);
1176 1187
1188 if (root->root_key.objectid ==
1189 BTRFS_DATA_RELOC_TREE_OBJECTID) {
1190 ret = btrfs_reloc_clone_csums(inode, cur_offset,
1191 num_bytes);
1192 BUG_ON(ret);
1193 }
1194
1177 extent_clear_unlock_delalloc(inode, &BTRFS_I(inode)->io_tree, 1195 extent_clear_unlock_delalloc(inode, &BTRFS_I(inode)->io_tree,
1178 cur_offset, cur_offset + num_bytes - 1, 1196 cur_offset, cur_offset + num_bytes - 1,
1179 locked_page, EXTENT_CLEAR_UNLOCK_PAGE | 1197 locked_page, EXTENT_CLEAR_UNLOCK_PAGE |
@@ -1226,15 +1244,13 @@ static int run_delalloc_range(struct inode *inode, struct page *locked_page,
1226} 1244}
1227 1245
1228static int btrfs_split_extent_hook(struct inode *inode, 1246static int btrfs_split_extent_hook(struct inode *inode,
1229 struct extent_state *orig, u64 split) 1247 struct extent_state *orig, u64 split)
1230{ 1248{
1249 /* not delalloc, ignore it */
1231 if (!(orig->state & EXTENT_DELALLOC)) 1250 if (!(orig->state & EXTENT_DELALLOC))
1232 return 0; 1251 return 0;
1233 1252
1234 spin_lock(&BTRFS_I(inode)->accounting_lock); 1253 atomic_inc(&BTRFS_I(inode)->outstanding_extents);
1235 BTRFS_I(inode)->outstanding_extents++;
1236 spin_unlock(&BTRFS_I(inode)->accounting_lock);
1237
1238 return 0; 1254 return 0;
1239} 1255}
1240 1256
@@ -1252,10 +1268,7 @@ static int btrfs_merge_extent_hook(struct inode *inode,
1252 if (!(other->state & EXTENT_DELALLOC)) 1268 if (!(other->state & EXTENT_DELALLOC))
1253 return 0; 1269 return 0;
1254 1270
1255 spin_lock(&BTRFS_I(inode)->accounting_lock); 1271 atomic_dec(&BTRFS_I(inode)->outstanding_extents);
1256 BTRFS_I(inode)->outstanding_extents--;
1257 spin_unlock(&BTRFS_I(inode)->accounting_lock);
1258
1259 return 0; 1272 return 0;
1260} 1273}
1261 1274
@@ -1264,8 +1277,8 @@ static int btrfs_merge_extent_hook(struct inode *inode,
1264 * bytes in this file, and to maintain the list of inodes that 1277 * bytes in this file, and to maintain the list of inodes that
1265 * have pending delalloc work to be done. 1278 * have pending delalloc work to be done.
1266 */ 1279 */
1267static int btrfs_set_bit_hook(struct inode *inode, u64 start, u64 end, 1280static int btrfs_set_bit_hook(struct inode *inode,
1268 unsigned long old, unsigned long bits) 1281 struct extent_state *state, int *bits)
1269{ 1282{
1270 1283
1271 /* 1284 /*
@@ -1273,17 +1286,18 @@ static int btrfs_set_bit_hook(struct inode *inode, u64 start, u64 end,
1273 * but in this case, we are only testeing for the DELALLOC 1286 * but in this case, we are only testeing for the DELALLOC
1274 * bit, which is only set or cleared with irqs on 1287 * bit, which is only set or cleared with irqs on
1275 */ 1288 */
1276 if (!(old & EXTENT_DELALLOC) && (bits & EXTENT_DELALLOC)) { 1289 if (!(state->state & EXTENT_DELALLOC) && (*bits & EXTENT_DELALLOC)) {
1277 struct btrfs_root *root = BTRFS_I(inode)->root; 1290 struct btrfs_root *root = BTRFS_I(inode)->root;
1291 u64 len = state->end + 1 - state->start;
1278 1292
1279 spin_lock(&BTRFS_I(inode)->accounting_lock); 1293 if (*bits & EXTENT_FIRST_DELALLOC)
1280 BTRFS_I(inode)->outstanding_extents++; 1294 *bits &= ~EXTENT_FIRST_DELALLOC;
1281 spin_unlock(&BTRFS_I(inode)->accounting_lock); 1295 else
1282 btrfs_delalloc_reserve_space(root, inode, end - start + 1); 1296 atomic_inc(&BTRFS_I(inode)->outstanding_extents);
1283 1297
1284 spin_lock(&root->fs_info->delalloc_lock); 1298 spin_lock(&root->fs_info->delalloc_lock);
1285 BTRFS_I(inode)->delalloc_bytes += end - start + 1; 1299 BTRFS_I(inode)->delalloc_bytes += len;
1286 root->fs_info->delalloc_bytes += end - start + 1; 1300 root->fs_info->delalloc_bytes += len;
1287 if (list_empty(&BTRFS_I(inode)->delalloc_inodes)) { 1301 if (list_empty(&BTRFS_I(inode)->delalloc_inodes)) {
1288 list_add_tail(&BTRFS_I(inode)->delalloc_inodes, 1302 list_add_tail(&BTRFS_I(inode)->delalloc_inodes,
1289 &root->fs_info->delalloc_inodes); 1303 &root->fs_info->delalloc_inodes);
@@ -1297,45 +1311,32 @@ static int btrfs_set_bit_hook(struct inode *inode, u64 start, u64 end,
1297 * extent_io.c clear_bit_hook, see set_bit_hook for why 1311 * extent_io.c clear_bit_hook, see set_bit_hook for why
1298 */ 1312 */
1299static int btrfs_clear_bit_hook(struct inode *inode, 1313static int btrfs_clear_bit_hook(struct inode *inode,
1300 struct extent_state *state, unsigned long bits) 1314 struct extent_state *state, int *bits)
1301{ 1315{
1302 /* 1316 /*
1303 * set_bit and clear bit hooks normally require _irqsave/restore 1317 * set_bit and clear bit hooks normally require _irqsave/restore
1304 * but in this case, we are only testeing for the DELALLOC 1318 * but in this case, we are only testeing for the DELALLOC
1305 * bit, which is only set or cleared with irqs on 1319 * bit, which is only set or cleared with irqs on
1306 */ 1320 */
1307 if ((state->state & EXTENT_DELALLOC) && (bits & EXTENT_DELALLOC)) { 1321 if ((state->state & EXTENT_DELALLOC) && (*bits & EXTENT_DELALLOC)) {
1308 struct btrfs_root *root = BTRFS_I(inode)->root; 1322 struct btrfs_root *root = BTRFS_I(inode)->root;
1323 u64 len = state->end + 1 - state->start;
1309 1324
1310 if (bits & EXTENT_DO_ACCOUNTING) { 1325 if (*bits & EXTENT_FIRST_DELALLOC)
1311 spin_lock(&BTRFS_I(inode)->accounting_lock); 1326 *bits &= ~EXTENT_FIRST_DELALLOC;
1312 WARN_ON(!BTRFS_I(inode)->outstanding_extents); 1327 else if (!(*bits & EXTENT_DO_ACCOUNTING))
1313 BTRFS_I(inode)->outstanding_extents--; 1328 atomic_dec(&BTRFS_I(inode)->outstanding_extents);
1314 spin_unlock(&BTRFS_I(inode)->accounting_lock); 1329
1315 btrfs_unreserve_metadata_for_delalloc(root, inode, 1); 1330 if (*bits & EXTENT_DO_ACCOUNTING)
1316 } 1331 btrfs_delalloc_release_metadata(inode, len);
1332
1333 if (root->root_key.objectid != BTRFS_DATA_RELOC_TREE_OBJECTID)
1334 btrfs_free_reserved_data_space(inode, len);
1317 1335
1318 spin_lock(&root->fs_info->delalloc_lock); 1336 spin_lock(&root->fs_info->delalloc_lock);
1319 if (state->end - state->start + 1 > 1337 root->fs_info->delalloc_bytes -= len;
1320 root->fs_info->delalloc_bytes) { 1338 BTRFS_I(inode)->delalloc_bytes -= len;
1321 printk(KERN_INFO "btrfs warning: delalloc account " 1339
1322 "%llu %llu\n",
1323 (unsigned long long)
1324 state->end - state->start + 1,
1325 (unsigned long long)
1326 root->fs_info->delalloc_bytes);
1327 btrfs_delalloc_free_space(root, inode, (u64)-1);
1328 root->fs_info->delalloc_bytes = 0;
1329 BTRFS_I(inode)->delalloc_bytes = 0;
1330 } else {
1331 btrfs_delalloc_free_space(root, inode,
1332 state->end -
1333 state->start + 1);
1334 root->fs_info->delalloc_bytes -= state->end -
1335 state->start + 1;
1336 BTRFS_I(inode)->delalloc_bytes -= state->end -
1337 state->start + 1;
1338 }
1339 if (BTRFS_I(inode)->delalloc_bytes == 0 && 1340 if (BTRFS_I(inode)->delalloc_bytes == 0 &&
1340 !list_empty(&BTRFS_I(inode)->delalloc_inodes)) { 1341 !list_empty(&BTRFS_I(inode)->delalloc_inodes)) {
1341 list_del_init(&BTRFS_I(inode)->delalloc_inodes); 1342 list_del_init(&BTRFS_I(inode)->delalloc_inodes);
@@ -1384,7 +1385,8 @@ int btrfs_merge_bio_hook(struct page *page, unsigned long offset,
1384 */ 1385 */
1385static int __btrfs_submit_bio_start(struct inode *inode, int rw, 1386static int __btrfs_submit_bio_start(struct inode *inode, int rw,
1386 struct bio *bio, int mirror_num, 1387 struct bio *bio, int mirror_num,
1387 unsigned long bio_flags) 1388 unsigned long bio_flags,
1389 u64 bio_offset)
1388{ 1390{
1389 struct btrfs_root *root = BTRFS_I(inode)->root; 1391 struct btrfs_root *root = BTRFS_I(inode)->root;
1390 int ret = 0; 1392 int ret = 0;
@@ -1403,7 +1405,8 @@ static int __btrfs_submit_bio_start(struct inode *inode, int rw,
1403 * are inserted into the btree 1405 * are inserted into the btree
1404 */ 1406 */
1405static int __btrfs_submit_bio_done(struct inode *inode, int rw, struct bio *bio, 1407static int __btrfs_submit_bio_done(struct inode *inode, int rw, struct bio *bio,
1406 int mirror_num, unsigned long bio_flags) 1408 int mirror_num, unsigned long bio_flags,
1409 u64 bio_offset)
1407{ 1410{
1408 struct btrfs_root *root = BTRFS_I(inode)->root; 1411 struct btrfs_root *root = BTRFS_I(inode)->root;
1409 return btrfs_map_bio(root, rw, bio, mirror_num, 1); 1412 return btrfs_map_bio(root, rw, bio, mirror_num, 1);
@@ -1414,7 +1417,8 @@ static int __btrfs_submit_bio_done(struct inode *inode, int rw, struct bio *bio,
1414 * on write, or reading the csums from the tree before a read 1417 * on write, or reading the csums from the tree before a read
1415 */ 1418 */
1416static int btrfs_submit_bio_hook(struct inode *inode, int rw, struct bio *bio, 1419static int btrfs_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
1417 int mirror_num, unsigned long bio_flags) 1420 int mirror_num, unsigned long bio_flags,
1421 u64 bio_offset)
1418{ 1422{
1419 struct btrfs_root *root = BTRFS_I(inode)->root; 1423 struct btrfs_root *root = BTRFS_I(inode)->root;
1420 int ret = 0; 1424 int ret = 0;
@@ -1439,7 +1443,8 @@ static int btrfs_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
1439 /* we're doing a write, do the async checksumming */ 1443 /* we're doing a write, do the async checksumming */
1440 return btrfs_wq_submit_bio(BTRFS_I(inode)->root->fs_info, 1444 return btrfs_wq_submit_bio(BTRFS_I(inode)->root->fs_info,
1441 inode, rw, bio, mirror_num, 1445 inode, rw, bio, mirror_num,
1442 bio_flags, __btrfs_submit_bio_start, 1446 bio_flags, bio_offset,
1447 __btrfs_submit_bio_start,
1443 __btrfs_submit_bio_done); 1448 __btrfs_submit_bio_done);
1444 } 1449 }
1445 1450
@@ -1520,6 +1525,7 @@ again:
1520 goto again; 1525 goto again;
1521 } 1526 }
1522 1527
1528 BUG();
1523 btrfs_set_extent_delalloc(inode, page_start, page_end, &cached_state); 1529 btrfs_set_extent_delalloc(inode, page_start, page_end, &cached_state);
1524 ClearPageChecked(page); 1530 ClearPageChecked(page);
1525out: 1531out:
@@ -1650,7 +1656,7 @@ static int insert_reserved_file_extent(struct btrfs_trans_handle *trans,
1650static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end) 1656static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end)
1651{ 1657{
1652 struct btrfs_root *root = BTRFS_I(inode)->root; 1658 struct btrfs_root *root = BTRFS_I(inode)->root;
1653 struct btrfs_trans_handle *trans; 1659 struct btrfs_trans_handle *trans = NULL;
1654 struct btrfs_ordered_extent *ordered_extent = NULL; 1660 struct btrfs_ordered_extent *ordered_extent = NULL;
1655 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; 1661 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
1656 struct extent_state *cached_state = NULL; 1662 struct extent_state *cached_state = NULL;
@@ -1668,9 +1674,10 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end)
1668 ret = btrfs_ordered_update_i_size(inode, 0, ordered_extent); 1674 ret = btrfs_ordered_update_i_size(inode, 0, ordered_extent);
1669 if (!ret) { 1675 if (!ret) {
1670 trans = btrfs_join_transaction(root, 1); 1676 trans = btrfs_join_transaction(root, 1);
1677 btrfs_set_trans_block_group(trans, inode);
1678 trans->block_rsv = &root->fs_info->delalloc_block_rsv;
1671 ret = btrfs_update_inode(trans, root, inode); 1679 ret = btrfs_update_inode(trans, root, inode);
1672 BUG_ON(ret); 1680 BUG_ON(ret);
1673 btrfs_end_transaction(trans, root);
1674 } 1681 }
1675 goto out; 1682 goto out;
1676 } 1683 }
@@ -1680,6 +1687,8 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end)
1680 0, &cached_state, GFP_NOFS); 1687 0, &cached_state, GFP_NOFS);
1681 1688
1682 trans = btrfs_join_transaction(root, 1); 1689 trans = btrfs_join_transaction(root, 1);
1690 btrfs_set_trans_block_group(trans, inode);
1691 trans->block_rsv = &root->fs_info->delalloc_block_rsv;
1683 1692
1684 if (test_bit(BTRFS_ORDERED_COMPRESSED, &ordered_extent->flags)) 1693 if (test_bit(BTRFS_ORDERED_COMPRESSED, &ordered_extent->flags))
1685 compressed = 1; 1694 compressed = 1;
@@ -1711,12 +1720,13 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end)
1711 add_pending_csums(trans, inode, ordered_extent->file_offset, 1720 add_pending_csums(trans, inode, ordered_extent->file_offset,
1712 &ordered_extent->list); 1721 &ordered_extent->list);
1713 1722
1714 /* this also removes the ordered extent from the tree */
1715 btrfs_ordered_update_i_size(inode, 0, ordered_extent); 1723 btrfs_ordered_update_i_size(inode, 0, ordered_extent);
1716 ret = btrfs_update_inode(trans, root, inode); 1724 ret = btrfs_update_inode(trans, root, inode);
1717 BUG_ON(ret); 1725 BUG_ON(ret);
1718 btrfs_end_transaction(trans, root);
1719out: 1726out:
1727 btrfs_delalloc_release_metadata(inode, ordered_extent->len);
1728 if (trans)
1729 btrfs_end_transaction(trans, root);
1720 /* once for us */ 1730 /* once for us */
1721 btrfs_put_ordered_extent(ordered_extent); 1731 btrfs_put_ordered_extent(ordered_extent);
1722 /* once for the tree */ 1732 /* once for the tree */
@@ -1838,7 +1848,7 @@ static int btrfs_io_failed_hook(struct bio *failed_bio,
1838 1848
1839 BTRFS_I(inode)->io_tree.ops->submit_bio_hook(inode, rw, bio, 1849 BTRFS_I(inode)->io_tree.ops->submit_bio_hook(inode, rw, bio,
1840 failrec->last_mirror, 1850 failrec->last_mirror,
1841 failrec->bio_flags); 1851 failrec->bio_flags, 0);
1842 return 0; 1852 return 0;
1843} 1853}
1844 1854
@@ -1993,32 +2003,196 @@ void btrfs_run_delayed_iputs(struct btrfs_root *root)
1993} 2003}
1994 2004
1995/* 2005/*
2006 * calculate extra metadata reservation when snapshotting a subvolume
2007 * contains orphan files.
2008 */
2009void btrfs_orphan_pre_snapshot(struct btrfs_trans_handle *trans,
2010 struct btrfs_pending_snapshot *pending,
2011 u64 *bytes_to_reserve)
2012{
2013 struct btrfs_root *root;
2014 struct btrfs_block_rsv *block_rsv;
2015 u64 num_bytes;
2016 int index;
2017
2018 root = pending->root;
2019 if (!root->orphan_block_rsv || list_empty(&root->orphan_list))
2020 return;
2021
2022 block_rsv = root->orphan_block_rsv;
2023
2024 /* orphan block reservation for the snapshot */
2025 num_bytes = block_rsv->size;
2026
2027 /*
2028 * after the snapshot is created, COWing tree blocks may use more
2029 * space than it frees. So we should make sure there is enough
2030 * reserved space.
2031 */
2032 index = trans->transid & 0x1;
2033 if (block_rsv->reserved + block_rsv->freed[index] < block_rsv->size) {
2034 num_bytes += block_rsv->size -
2035 (block_rsv->reserved + block_rsv->freed[index]);
2036 }
2037
2038 *bytes_to_reserve += num_bytes;
2039}
2040
2041void btrfs_orphan_post_snapshot(struct btrfs_trans_handle *trans,
2042 struct btrfs_pending_snapshot *pending)
2043{
2044 struct btrfs_root *root = pending->root;
2045 struct btrfs_root *snap = pending->snap;
2046 struct btrfs_block_rsv *block_rsv;
2047 u64 num_bytes;
2048 int index;
2049 int ret;
2050
2051 if (!root->orphan_block_rsv || list_empty(&root->orphan_list))
2052 return;
2053
2054 /* refill source subvolume's orphan block reservation */
2055 block_rsv = root->orphan_block_rsv;
2056 index = trans->transid & 0x1;
2057 if (block_rsv->reserved + block_rsv->freed[index] < block_rsv->size) {
2058 num_bytes = block_rsv->size -
2059 (block_rsv->reserved + block_rsv->freed[index]);
2060 ret = btrfs_block_rsv_migrate(&pending->block_rsv,
2061 root->orphan_block_rsv,
2062 num_bytes);
2063 BUG_ON(ret);
2064 }
2065
2066 /* setup orphan block reservation for the snapshot */
2067 block_rsv = btrfs_alloc_block_rsv(snap);
2068 BUG_ON(!block_rsv);
2069
2070 btrfs_add_durable_block_rsv(root->fs_info, block_rsv);
2071 snap->orphan_block_rsv = block_rsv;
2072
2073 num_bytes = root->orphan_block_rsv->size;
2074 ret = btrfs_block_rsv_migrate(&pending->block_rsv,
2075 block_rsv, num_bytes);
2076 BUG_ON(ret);
2077
2078#if 0
2079 /* insert orphan item for the snapshot */
2080 WARN_ON(!root->orphan_item_inserted);
2081 ret = btrfs_insert_orphan_item(trans, root->fs_info->tree_root,
2082 snap->root_key.objectid);
2083 BUG_ON(ret);
2084 snap->orphan_item_inserted = 1;
2085#endif
2086}
2087
2088enum btrfs_orphan_cleanup_state {
2089 ORPHAN_CLEANUP_STARTED = 1,
2090 ORPHAN_CLEANUP_DONE = 2,
2091};
2092
2093/*
2094 * This is called in transaction commmit time. If there are no orphan
2095 * files in the subvolume, it removes orphan item and frees block_rsv
2096 * structure.
2097 */
2098void btrfs_orphan_commit_root(struct btrfs_trans_handle *trans,
2099 struct btrfs_root *root)
2100{
2101 int ret;
2102
2103 if (!list_empty(&root->orphan_list) ||
2104 root->orphan_cleanup_state != ORPHAN_CLEANUP_DONE)
2105 return;
2106
2107 if (root->orphan_item_inserted &&
2108 btrfs_root_refs(&root->root_item) > 0) {
2109 ret = btrfs_del_orphan_item(trans, root->fs_info->tree_root,
2110 root->root_key.objectid);
2111 BUG_ON(ret);
2112 root->orphan_item_inserted = 0;
2113 }
2114
2115 if (root->orphan_block_rsv) {
2116 WARN_ON(root->orphan_block_rsv->size > 0);
2117 btrfs_free_block_rsv(root, root->orphan_block_rsv);
2118 root->orphan_block_rsv = NULL;
2119 }
2120}
2121
2122/*
1996 * This creates an orphan entry for the given inode in case something goes 2123 * This creates an orphan entry for the given inode in case something goes
1997 * wrong in the middle of an unlink/truncate. 2124 * wrong in the middle of an unlink/truncate.
2125 *
2126 * NOTE: caller of this function should reserve 5 units of metadata for
2127 * this function.
1998 */ 2128 */
1999int btrfs_orphan_add(struct btrfs_trans_handle *trans, struct inode *inode) 2129int btrfs_orphan_add(struct btrfs_trans_handle *trans, struct inode *inode)
2000{ 2130{
2001 struct btrfs_root *root = BTRFS_I(inode)->root; 2131 struct btrfs_root *root = BTRFS_I(inode)->root;
2002 int ret = 0; 2132 struct btrfs_block_rsv *block_rsv = NULL;
2133 int reserve = 0;
2134 int insert = 0;
2135 int ret;
2003 2136
2004 spin_lock(&root->list_lock); 2137 if (!root->orphan_block_rsv) {
2138 block_rsv = btrfs_alloc_block_rsv(root);
2139 BUG_ON(!block_rsv);
2140 }
2005 2141
2006 /* already on the orphan list, we're good */ 2142 spin_lock(&root->orphan_lock);
2007 if (!list_empty(&BTRFS_I(inode)->i_orphan)) { 2143 if (!root->orphan_block_rsv) {
2008 spin_unlock(&root->list_lock); 2144 root->orphan_block_rsv = block_rsv;
2009 return 0; 2145 } else if (block_rsv) {
2146 btrfs_free_block_rsv(root, block_rsv);
2147 block_rsv = NULL;
2148 }
2149
2150 if (list_empty(&BTRFS_I(inode)->i_orphan)) {
2151 list_add(&BTRFS_I(inode)->i_orphan, &root->orphan_list);
2152#if 0
2153 /*
2154 * For proper ENOSPC handling, we should do orphan
2155 * cleanup when mounting. But this introduces backward
2156 * compatibility issue.
2157 */
2158 if (!xchg(&root->orphan_item_inserted, 1))
2159 insert = 2;
2160 else
2161 insert = 1;
2162#endif
2163 insert = 1;
2164 } else {
2165 WARN_ON(!BTRFS_I(inode)->orphan_meta_reserved);
2010 } 2166 }
2011 2167
2012 list_add(&BTRFS_I(inode)->i_orphan, &root->orphan_list); 2168 if (!BTRFS_I(inode)->orphan_meta_reserved) {
2169 BTRFS_I(inode)->orphan_meta_reserved = 1;
2170 reserve = 1;
2171 }
2172 spin_unlock(&root->orphan_lock);
2013 2173
2014 spin_unlock(&root->list_lock); 2174 if (block_rsv)
2175 btrfs_add_durable_block_rsv(root->fs_info, block_rsv);
2015 2176
2016 /* 2177 /* grab metadata reservation from transaction handle */
2017 * insert an orphan item to track this unlinked/truncated file 2178 if (reserve) {
2018 */ 2179 ret = btrfs_orphan_reserve_metadata(trans, inode);
2019 ret = btrfs_insert_orphan_item(trans, root, inode->i_ino); 2180 BUG_ON(ret);
2181 }
2020 2182
2021 return ret; 2183 /* insert an orphan item to track this unlinked/truncated file */
2184 if (insert >= 1) {
2185 ret = btrfs_insert_orphan_item(trans, root, inode->i_ino);
2186 BUG_ON(ret);
2187 }
2188
2189 /* insert an orphan item to track subvolume contains orphan files */
2190 if (insert >= 2) {
2191 ret = btrfs_insert_orphan_item(trans, root->fs_info->tree_root,
2192 root->root_key.objectid);
2193 BUG_ON(ret);
2194 }
2195 return 0;
2022} 2196}
2023 2197
2024/* 2198/*
@@ -2028,26 +2202,31 @@ int btrfs_orphan_add(struct btrfs_trans_handle *trans, struct inode *inode)
2028int btrfs_orphan_del(struct btrfs_trans_handle *trans, struct inode *inode) 2202int btrfs_orphan_del(struct btrfs_trans_handle *trans, struct inode *inode)
2029{ 2203{
2030 struct btrfs_root *root = BTRFS_I(inode)->root; 2204 struct btrfs_root *root = BTRFS_I(inode)->root;
2205 int delete_item = 0;
2206 int release_rsv = 0;
2031 int ret = 0; 2207 int ret = 0;
2032 2208
2033 spin_lock(&root->list_lock); 2209 spin_lock(&root->orphan_lock);
2034 2210 if (!list_empty(&BTRFS_I(inode)->i_orphan)) {
2035 if (list_empty(&BTRFS_I(inode)->i_orphan)) { 2211 list_del_init(&BTRFS_I(inode)->i_orphan);
2036 spin_unlock(&root->list_lock); 2212 delete_item = 1;
2037 return 0;
2038 } 2213 }
2039 2214
2040 list_del_init(&BTRFS_I(inode)->i_orphan); 2215 if (BTRFS_I(inode)->orphan_meta_reserved) {
2041 if (!trans) { 2216 BTRFS_I(inode)->orphan_meta_reserved = 0;
2042 spin_unlock(&root->list_lock); 2217 release_rsv = 1;
2043 return 0;
2044 } 2218 }
2219 spin_unlock(&root->orphan_lock);
2045 2220
2046 spin_unlock(&root->list_lock); 2221 if (trans && delete_item) {
2222 ret = btrfs_del_orphan_item(trans, root, inode->i_ino);
2223 BUG_ON(ret);
2224 }
2047 2225
2048 ret = btrfs_del_orphan_item(trans, root, inode->i_ino); 2226 if (release_rsv)
2227 btrfs_orphan_release_metadata(inode);
2049 2228
2050 return ret; 2229 return 0;
2051} 2230}
2052 2231
2053/* 2232/*
@@ -2064,7 +2243,7 @@ void btrfs_orphan_cleanup(struct btrfs_root *root)
2064 struct inode *inode; 2243 struct inode *inode;
2065 int ret = 0, nr_unlink = 0, nr_truncate = 0; 2244 int ret = 0, nr_unlink = 0, nr_truncate = 0;
2066 2245
2067 if (!xchg(&root->clean_orphans, 0)) 2246 if (cmpxchg(&root->orphan_cleanup_state, 0, ORPHAN_CLEANUP_STARTED))
2068 return; 2247 return;
2069 2248
2070 path = btrfs_alloc_path(); 2249 path = btrfs_alloc_path();
@@ -2117,16 +2296,15 @@ void btrfs_orphan_cleanup(struct btrfs_root *root)
2117 found_key.type = BTRFS_INODE_ITEM_KEY; 2296 found_key.type = BTRFS_INODE_ITEM_KEY;
2118 found_key.offset = 0; 2297 found_key.offset = 0;
2119 inode = btrfs_iget(root->fs_info->sb, &found_key, root, NULL); 2298 inode = btrfs_iget(root->fs_info->sb, &found_key, root, NULL);
2120 if (IS_ERR(inode)) 2299 BUG_ON(IS_ERR(inode));
2121 break;
2122 2300
2123 /* 2301 /*
2124 * add this inode to the orphan list so btrfs_orphan_del does 2302 * add this inode to the orphan list so btrfs_orphan_del does
2125 * the proper thing when we hit it 2303 * the proper thing when we hit it
2126 */ 2304 */
2127 spin_lock(&root->list_lock); 2305 spin_lock(&root->orphan_lock);
2128 list_add(&BTRFS_I(inode)->i_orphan, &root->orphan_list); 2306 list_add(&BTRFS_I(inode)->i_orphan, &root->orphan_list);
2129 spin_unlock(&root->list_lock); 2307 spin_unlock(&root->orphan_lock);
2130 2308
2131 /* 2309 /*
2132 * if this is a bad inode, means we actually succeeded in 2310 * if this is a bad inode, means we actually succeeded in
@@ -2135,7 +2313,7 @@ void btrfs_orphan_cleanup(struct btrfs_root *root)
2135 * do a destroy_inode 2313 * do a destroy_inode
2136 */ 2314 */
2137 if (is_bad_inode(inode)) { 2315 if (is_bad_inode(inode)) {
2138 trans = btrfs_start_transaction(root, 1); 2316 trans = btrfs_start_transaction(root, 0);
2139 btrfs_orphan_del(trans, inode); 2317 btrfs_orphan_del(trans, inode);
2140 btrfs_end_transaction(trans, root); 2318 btrfs_end_transaction(trans, root);
2141 iput(inode); 2319 iput(inode);
@@ -2153,13 +2331,23 @@ void btrfs_orphan_cleanup(struct btrfs_root *root)
2153 /* this will do delete_inode and everything for us */ 2331 /* this will do delete_inode and everything for us */
2154 iput(inode); 2332 iput(inode);
2155 } 2333 }
2334 btrfs_free_path(path);
2335
2336 root->orphan_cleanup_state = ORPHAN_CLEANUP_DONE;
2337
2338 if (root->orphan_block_rsv)
2339 btrfs_block_rsv_release(root, root->orphan_block_rsv,
2340 (u64)-1);
2341
2342 if (root->orphan_block_rsv || root->orphan_item_inserted) {
2343 trans = btrfs_join_transaction(root, 1);
2344 btrfs_end_transaction(trans, root);
2345 }
2156 2346
2157 if (nr_unlink) 2347 if (nr_unlink)
2158 printk(KERN_INFO "btrfs: unlinked %d orphans\n", nr_unlink); 2348 printk(KERN_INFO "btrfs: unlinked %d orphans\n", nr_unlink);
2159 if (nr_truncate) 2349 if (nr_truncate)
2160 printk(KERN_INFO "btrfs: truncated %d orphans\n", nr_truncate); 2350 printk(KERN_INFO "btrfs: truncated %d orphans\n", nr_truncate);
2161
2162 btrfs_free_path(path);
2163} 2351}
2164 2352
2165/* 2353/*
@@ -2478,29 +2666,201 @@ out:
2478 return ret; 2666 return ret;
2479} 2667}
2480 2668
2481static int btrfs_unlink(struct inode *dir, struct dentry *dentry) 2669/* helper to check if there is any shared block in the path */
2670static int check_path_shared(struct btrfs_root *root,
2671 struct btrfs_path *path)
2672{
2673 struct extent_buffer *eb;
2674 int level;
2675 int ret;
2676 u64 refs;
2677
2678 for (level = 0; level < BTRFS_MAX_LEVEL; level++) {
2679 if (!path->nodes[level])
2680 break;
2681 eb = path->nodes[level];
2682 if (!btrfs_block_can_be_shared(root, eb))
2683 continue;
2684 ret = btrfs_lookup_extent_info(NULL, root, eb->start, eb->len,
2685 &refs, NULL);
2686 if (refs > 1)
2687 return 1;
2688 }
2689 return 0;
2690}
2691
2692/*
2693 * helper to start transaction for unlink and rmdir.
2694 *
2695 * unlink and rmdir are special in btrfs, they do not always free space.
2696 * so in enospc case, we should make sure they will free space before
2697 * allowing them to use the global metadata reservation.
2698 */
2699static struct btrfs_trans_handle *__unlink_start_trans(struct inode *dir,
2700 struct dentry *dentry)
2482{ 2701{
2483 struct btrfs_root *root;
2484 struct btrfs_trans_handle *trans; 2702 struct btrfs_trans_handle *trans;
2703 struct btrfs_root *root = BTRFS_I(dir)->root;
2704 struct btrfs_path *path;
2705 struct btrfs_inode_ref *ref;
2706 struct btrfs_dir_item *di;
2485 struct inode *inode = dentry->d_inode; 2707 struct inode *inode = dentry->d_inode;
2708 u64 index;
2709 int check_link = 1;
2710 int err = -ENOSPC;
2486 int ret; 2711 int ret;
2487 unsigned long nr = 0;
2488 2712
2489 root = BTRFS_I(dir)->root; 2713 trans = btrfs_start_transaction(root, 10);
2714 if (!IS_ERR(trans) || PTR_ERR(trans) != -ENOSPC)
2715 return trans;
2490 2716
2491 /* 2717 if (inode->i_ino == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)
2492 * 5 items for unlink inode 2718 return ERR_PTR(-ENOSPC);
2493 * 1 for orphan 2719
2494 */ 2720 /* check if there is someone else holds reference */
2495 ret = btrfs_reserve_metadata_space(root, 6); 2721 if (S_ISDIR(inode->i_mode) && atomic_read(&inode->i_count) > 1)
2496 if (ret) 2722 return ERR_PTR(-ENOSPC);
2497 return ret; 2723
2724 if (atomic_read(&inode->i_count) > 2)
2725 return ERR_PTR(-ENOSPC);
2726
2727 if (xchg(&root->fs_info->enospc_unlink, 1))
2728 return ERR_PTR(-ENOSPC);
2498 2729
2499 trans = btrfs_start_transaction(root, 1); 2730 path = btrfs_alloc_path();
2731 if (!path) {
2732 root->fs_info->enospc_unlink = 0;
2733 return ERR_PTR(-ENOMEM);
2734 }
2735
2736 trans = btrfs_start_transaction(root, 0);
2500 if (IS_ERR(trans)) { 2737 if (IS_ERR(trans)) {
2501 btrfs_unreserve_metadata_space(root, 6); 2738 btrfs_free_path(path);
2502 return PTR_ERR(trans); 2739 root->fs_info->enospc_unlink = 0;
2740 return trans;
2741 }
2742
2743 path->skip_locking = 1;
2744 path->search_commit_root = 1;
2745
2746 ret = btrfs_lookup_inode(trans, root, path,
2747 &BTRFS_I(dir)->location, 0);
2748 if (ret < 0) {
2749 err = ret;
2750 goto out;
2751 }
2752 if (ret == 0) {
2753 if (check_path_shared(root, path))
2754 goto out;
2755 } else {
2756 check_link = 0;
2757 }
2758 btrfs_release_path(root, path);
2759
2760 ret = btrfs_lookup_inode(trans, root, path,
2761 &BTRFS_I(inode)->location, 0);
2762 if (ret < 0) {
2763 err = ret;
2764 goto out;
2765 }
2766 if (ret == 0) {
2767 if (check_path_shared(root, path))
2768 goto out;
2769 } else {
2770 check_link = 0;
2771 }
2772 btrfs_release_path(root, path);
2773
2774 if (ret == 0 && S_ISREG(inode->i_mode)) {
2775 ret = btrfs_lookup_file_extent(trans, root, path,
2776 inode->i_ino, (u64)-1, 0);
2777 if (ret < 0) {
2778 err = ret;
2779 goto out;
2780 }
2781 BUG_ON(ret == 0);
2782 if (check_path_shared(root, path))
2783 goto out;
2784 btrfs_release_path(root, path);
2785 }
2786
2787 if (!check_link) {
2788 err = 0;
2789 goto out;
2790 }
2791
2792 di = btrfs_lookup_dir_item(trans, root, path, dir->i_ino,
2793 dentry->d_name.name, dentry->d_name.len, 0);
2794 if (IS_ERR(di)) {
2795 err = PTR_ERR(di);
2796 goto out;
2797 }
2798 if (di) {
2799 if (check_path_shared(root, path))
2800 goto out;
2801 } else {
2802 err = 0;
2803 goto out;
2503 } 2804 }
2805 btrfs_release_path(root, path);
2806
2807 ref = btrfs_lookup_inode_ref(trans, root, path,
2808 dentry->d_name.name, dentry->d_name.len,
2809 inode->i_ino, dir->i_ino, 0);
2810 if (IS_ERR(ref)) {
2811 err = PTR_ERR(ref);
2812 goto out;
2813 }
2814 BUG_ON(!ref);
2815 if (check_path_shared(root, path))
2816 goto out;
2817 index = btrfs_inode_ref_index(path->nodes[0], ref);
2818 btrfs_release_path(root, path);
2819
2820 di = btrfs_lookup_dir_index_item(trans, root, path, dir->i_ino, index,
2821 dentry->d_name.name, dentry->d_name.len, 0);
2822 if (IS_ERR(di)) {
2823 err = PTR_ERR(di);
2824 goto out;
2825 }
2826 BUG_ON(ret == -ENOENT);
2827 if (check_path_shared(root, path))
2828 goto out;
2829
2830 err = 0;
2831out:
2832 btrfs_free_path(path);
2833 if (err) {
2834 btrfs_end_transaction(trans, root);
2835 root->fs_info->enospc_unlink = 0;
2836 return ERR_PTR(err);
2837 }
2838
2839 trans->block_rsv = &root->fs_info->global_block_rsv;
2840 return trans;
2841}
2842
2843static void __unlink_end_trans(struct btrfs_trans_handle *trans,
2844 struct btrfs_root *root)
2845{
2846 if (trans->block_rsv == &root->fs_info->global_block_rsv) {
2847 BUG_ON(!root->fs_info->enospc_unlink);
2848 root->fs_info->enospc_unlink = 0;
2849 }
2850 btrfs_end_transaction_throttle(trans, root);
2851}
2852
2853static int btrfs_unlink(struct inode *dir, struct dentry *dentry)
2854{
2855 struct btrfs_root *root = BTRFS_I(dir)->root;
2856 struct btrfs_trans_handle *trans;
2857 struct inode *inode = dentry->d_inode;
2858 int ret;
2859 unsigned long nr = 0;
2860
2861 trans = __unlink_start_trans(dir, dentry);
2862 if (IS_ERR(trans))
2863 return PTR_ERR(trans);
2504 2864
2505 btrfs_set_trans_block_group(trans, dir); 2865 btrfs_set_trans_block_group(trans, dir);
2506 2866
@@ -2508,14 +2868,15 @@ static int btrfs_unlink(struct inode *dir, struct dentry *dentry)
2508 2868
2509 ret = btrfs_unlink_inode(trans, root, dir, dentry->d_inode, 2869 ret = btrfs_unlink_inode(trans, root, dir, dentry->d_inode,
2510 dentry->d_name.name, dentry->d_name.len); 2870 dentry->d_name.name, dentry->d_name.len);
2871 BUG_ON(ret);
2511 2872
2512 if (inode->i_nlink == 0) 2873 if (inode->i_nlink == 0) {
2513 ret = btrfs_orphan_add(trans, inode); 2874 ret = btrfs_orphan_add(trans, inode);
2875 BUG_ON(ret);
2876 }
2514 2877
2515 nr = trans->blocks_used; 2878 nr = trans->blocks_used;
2516 2879 __unlink_end_trans(trans, root);
2517 btrfs_end_transaction_throttle(trans, root);
2518 btrfs_unreserve_metadata_space(root, 6);
2519 btrfs_btree_balance_dirty(root, nr); 2880 btrfs_btree_balance_dirty(root, nr);
2520 return ret; 2881 return ret;
2521} 2882}
@@ -2587,7 +2948,6 @@ static int btrfs_rmdir(struct inode *dir, struct dentry *dentry)
2587{ 2948{
2588 struct inode *inode = dentry->d_inode; 2949 struct inode *inode = dentry->d_inode;
2589 int err = 0; 2950 int err = 0;
2590 int ret;
2591 struct btrfs_root *root = BTRFS_I(dir)->root; 2951 struct btrfs_root *root = BTRFS_I(dir)->root;
2592 struct btrfs_trans_handle *trans; 2952 struct btrfs_trans_handle *trans;
2593 unsigned long nr = 0; 2953 unsigned long nr = 0;
@@ -2596,15 +2956,9 @@ static int btrfs_rmdir(struct inode *dir, struct dentry *dentry)
2596 inode->i_ino == BTRFS_FIRST_FREE_OBJECTID) 2956 inode->i_ino == BTRFS_FIRST_FREE_OBJECTID)
2597 return -ENOTEMPTY; 2957 return -ENOTEMPTY;
2598 2958
2599 ret = btrfs_reserve_metadata_space(root, 5); 2959 trans = __unlink_start_trans(dir, dentry);
2600 if (ret) 2960 if (IS_ERR(trans))
2601 return ret;
2602
2603 trans = btrfs_start_transaction(root, 1);
2604 if (IS_ERR(trans)) {
2605 btrfs_unreserve_metadata_space(root, 5);
2606 return PTR_ERR(trans); 2961 return PTR_ERR(trans);
2607 }
2608 2962
2609 btrfs_set_trans_block_group(trans, dir); 2963 btrfs_set_trans_block_group(trans, dir);
2610 2964
@@ -2627,12 +2981,9 @@ static int btrfs_rmdir(struct inode *dir, struct dentry *dentry)
2627 btrfs_i_size_write(inode, 0); 2981 btrfs_i_size_write(inode, 0);
2628out: 2982out:
2629 nr = trans->blocks_used; 2983 nr = trans->blocks_used;
2630 ret = btrfs_end_transaction_throttle(trans, root); 2984 __unlink_end_trans(trans, root);
2631 btrfs_unreserve_metadata_space(root, 5);
2632 btrfs_btree_balance_dirty(root, nr); 2985 btrfs_btree_balance_dirty(root, nr);
2633 2986
2634 if (ret && !err)
2635 err = ret;
2636 return err; 2987 return err;
2637} 2988}
2638 2989
@@ -3029,6 +3380,7 @@ out:
3029 if (pending_del_nr) { 3380 if (pending_del_nr) {
3030 ret = btrfs_del_items(trans, root, path, pending_del_slot, 3381 ret = btrfs_del_items(trans, root, path, pending_del_slot,
3031 pending_del_nr); 3382 pending_del_nr);
3383 BUG_ON(ret);
3032 } 3384 }
3033 btrfs_free_path(path); 3385 btrfs_free_path(path);
3034 return err; 3386 return err;
@@ -3056,11 +3408,7 @@ static int btrfs_truncate_page(struct address_space *mapping, loff_t from)
3056 3408
3057 if ((offset & (blocksize - 1)) == 0) 3409 if ((offset & (blocksize - 1)) == 0)
3058 goto out; 3410 goto out;
3059 ret = btrfs_check_data_free_space(root, inode, PAGE_CACHE_SIZE); 3411 ret = btrfs_delalloc_reserve_space(inode, PAGE_CACHE_SIZE);
3060 if (ret)
3061 goto out;
3062
3063 ret = btrfs_reserve_metadata_for_delalloc(root, inode, 1);
3064 if (ret) 3412 if (ret)
3065 goto out; 3413 goto out;
3066 3414
@@ -3068,8 +3416,7 @@ static int btrfs_truncate_page(struct address_space *mapping, loff_t from)
3068again: 3416again:
3069 page = grab_cache_page(mapping, index); 3417 page = grab_cache_page(mapping, index);
3070 if (!page) { 3418 if (!page) {
3071 btrfs_free_reserved_data_space(root, inode, PAGE_CACHE_SIZE); 3419 btrfs_delalloc_release_space(inode, PAGE_CACHE_SIZE);
3072 btrfs_unreserve_metadata_for_delalloc(root, inode, 1);
3073 goto out; 3420 goto out;
3074 } 3421 }
3075 3422
@@ -3132,8 +3479,7 @@ again:
3132 3479
3133out_unlock: 3480out_unlock:
3134 if (ret) 3481 if (ret)
3135 btrfs_free_reserved_data_space(root, inode, PAGE_CACHE_SIZE); 3482 btrfs_delalloc_release_space(inode, PAGE_CACHE_SIZE);
3136 btrfs_unreserve_metadata_for_delalloc(root, inode, 1);
3137 unlock_page(page); 3483 unlock_page(page);
3138 page_cache_release(page); 3484 page_cache_release(page);
3139out: 3485out:
@@ -3145,7 +3491,7 @@ int btrfs_cont_expand(struct inode *inode, loff_t size)
3145 struct btrfs_trans_handle *trans; 3491 struct btrfs_trans_handle *trans;
3146 struct btrfs_root *root = BTRFS_I(inode)->root; 3492 struct btrfs_root *root = BTRFS_I(inode)->root;
3147 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; 3493 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
3148 struct extent_map *em; 3494 struct extent_map *em = NULL;
3149 struct extent_state *cached_state = NULL; 3495 struct extent_state *cached_state = NULL;
3150 u64 mask = root->sectorsize - 1; 3496 u64 mask = root->sectorsize - 1;
3151 u64 hole_start = (inode->i_size + mask) & ~mask; 3497 u64 hole_start = (inode->i_size + mask) & ~mask;
@@ -3183,11 +3529,11 @@ int btrfs_cont_expand(struct inode *inode, loff_t size)
3183 u64 hint_byte = 0; 3529 u64 hint_byte = 0;
3184 hole_size = last_byte - cur_offset; 3530 hole_size = last_byte - cur_offset;
3185 3531
3186 err = btrfs_reserve_metadata_space(root, 2); 3532 trans = btrfs_start_transaction(root, 2);
3187 if (err) 3533 if (IS_ERR(trans)) {
3534 err = PTR_ERR(trans);
3188 break; 3535 break;
3189 3536 }
3190 trans = btrfs_start_transaction(root, 1);
3191 btrfs_set_trans_block_group(trans, inode); 3537 btrfs_set_trans_block_group(trans, inode);
3192 3538
3193 err = btrfs_drop_extents(trans, inode, cur_offset, 3539 err = btrfs_drop_extents(trans, inode, cur_offset,
@@ -3205,14 +3551,15 @@ int btrfs_cont_expand(struct inode *inode, loff_t size)
3205 last_byte - 1, 0); 3551 last_byte - 1, 0);
3206 3552
3207 btrfs_end_transaction(trans, root); 3553 btrfs_end_transaction(trans, root);
3208 btrfs_unreserve_metadata_space(root, 2);
3209 } 3554 }
3210 free_extent_map(em); 3555 free_extent_map(em);
3556 em = NULL;
3211 cur_offset = last_byte; 3557 cur_offset = last_byte;
3212 if (cur_offset >= block_end) 3558 if (cur_offset >= block_end)
3213 break; 3559 break;
3214 } 3560 }
3215 3561
3562 free_extent_map(em);
3216 unlock_extent_cached(io_tree, hole_start, block_end - 1, &cached_state, 3563 unlock_extent_cached(io_tree, hole_start, block_end - 1, &cached_state,
3217 GFP_NOFS); 3564 GFP_NOFS);
3218 return err; 3565 return err;
@@ -3239,11 +3586,10 @@ static int btrfs_setattr_size(struct inode *inode, struct iattr *attr)
3239 } 3586 }
3240 } 3587 }
3241 3588
3242 ret = btrfs_reserve_metadata_space(root, 1); 3589 trans = btrfs_start_transaction(root, 5);
3243 if (ret) 3590 if (IS_ERR(trans))
3244 return ret; 3591 return PTR_ERR(trans);
3245 3592
3246 trans = btrfs_start_transaction(root, 1);
3247 btrfs_set_trans_block_group(trans, inode); 3593 btrfs_set_trans_block_group(trans, inode);
3248 3594
3249 ret = btrfs_orphan_add(trans, inode); 3595 ret = btrfs_orphan_add(trans, inode);
@@ -3251,7 +3597,6 @@ static int btrfs_setattr_size(struct inode *inode, struct iattr *attr)
3251 3597
3252 nr = trans->blocks_used; 3598 nr = trans->blocks_used;
3253 btrfs_end_transaction(trans, root); 3599 btrfs_end_transaction(trans, root);
3254 btrfs_unreserve_metadata_space(root, 1);
3255 btrfs_btree_balance_dirty(root, nr); 3600 btrfs_btree_balance_dirty(root, nr);
3256 3601
3257 if (attr->ia_size > inode->i_size) { 3602 if (attr->ia_size > inode->i_size) {
@@ -3264,8 +3609,11 @@ static int btrfs_setattr_size(struct inode *inode, struct iattr *attr)
3264 i_size_write(inode, attr->ia_size); 3609 i_size_write(inode, attr->ia_size);
3265 btrfs_ordered_update_i_size(inode, inode->i_size, NULL); 3610 btrfs_ordered_update_i_size(inode, inode->i_size, NULL);
3266 3611
3267 trans = btrfs_start_transaction(root, 1); 3612 trans = btrfs_start_transaction(root, 0);
3613 BUG_ON(IS_ERR(trans));
3268 btrfs_set_trans_block_group(trans, inode); 3614 btrfs_set_trans_block_group(trans, inode);
3615 trans->block_rsv = root->orphan_block_rsv;
3616 BUG_ON(!trans->block_rsv);
3269 3617
3270 ret = btrfs_update_inode(trans, root, inode); 3618 ret = btrfs_update_inode(trans, root, inode);
3271 BUG_ON(ret); 3619 BUG_ON(ret);
@@ -3345,10 +3693,21 @@ void btrfs_delete_inode(struct inode *inode)
3345 btrfs_i_size_write(inode, 0); 3693 btrfs_i_size_write(inode, 0);
3346 3694
3347 while (1) { 3695 while (1) {
3348 trans = btrfs_start_transaction(root, 1); 3696 trans = btrfs_start_transaction(root, 0);
3697 BUG_ON(IS_ERR(trans));
3349 btrfs_set_trans_block_group(trans, inode); 3698 btrfs_set_trans_block_group(trans, inode);
3350 ret = btrfs_truncate_inode_items(trans, root, inode, 0, 0); 3699 trans->block_rsv = root->orphan_block_rsv;
3700
3701 ret = btrfs_block_rsv_check(trans, root,
3702 root->orphan_block_rsv, 0, 5);
3703 if (ret) {
3704 BUG_ON(ret != -EAGAIN);
3705 ret = btrfs_commit_transaction(trans, root);
3706 BUG_ON(ret);
3707 continue;
3708 }
3351 3709
3710 ret = btrfs_truncate_inode_items(trans, root, inode, 0, 0);
3352 if (ret != -EAGAIN) 3711 if (ret != -EAGAIN)
3353 break; 3712 break;
3354 3713
@@ -3356,6 +3715,7 @@ void btrfs_delete_inode(struct inode *inode)
3356 btrfs_end_transaction(trans, root); 3715 btrfs_end_transaction(trans, root);
3357 trans = NULL; 3716 trans = NULL;
3358 btrfs_btree_balance_dirty(root, nr); 3717 btrfs_btree_balance_dirty(root, nr);
3718
3359 } 3719 }
3360 3720
3361 if (ret == 0) { 3721 if (ret == 0) {
@@ -3596,40 +3956,10 @@ again:
3596 return 0; 3956 return 0;
3597} 3957}
3598 3958
3599static noinline void init_btrfs_i(struct inode *inode)
3600{
3601 struct btrfs_inode *bi = BTRFS_I(inode);
3602
3603 bi->generation = 0;
3604 bi->sequence = 0;
3605 bi->last_trans = 0;
3606 bi->last_sub_trans = 0;
3607 bi->logged_trans = 0;
3608 bi->delalloc_bytes = 0;
3609 bi->reserved_bytes = 0;
3610 bi->disk_i_size = 0;
3611 bi->flags = 0;
3612 bi->index_cnt = (u64)-1;
3613 bi->last_unlink_trans = 0;
3614 bi->ordered_data_close = 0;
3615 bi->force_compress = 0;
3616 extent_map_tree_init(&BTRFS_I(inode)->extent_tree, GFP_NOFS);
3617 extent_io_tree_init(&BTRFS_I(inode)->io_tree,
3618 inode->i_mapping, GFP_NOFS);
3619 extent_io_tree_init(&BTRFS_I(inode)->io_failure_tree,
3620 inode->i_mapping, GFP_NOFS);
3621 INIT_LIST_HEAD(&BTRFS_I(inode)->delalloc_inodes);
3622 INIT_LIST_HEAD(&BTRFS_I(inode)->ordered_operations);
3623 RB_CLEAR_NODE(&BTRFS_I(inode)->rb_node);
3624 btrfs_ordered_inode_tree_init(&BTRFS_I(inode)->ordered_tree);
3625 mutex_init(&BTRFS_I(inode)->log_mutex);
3626}
3627
3628static int btrfs_init_locked_inode(struct inode *inode, void *p) 3959static int btrfs_init_locked_inode(struct inode *inode, void *p)
3629{ 3960{
3630 struct btrfs_iget_args *args = p; 3961 struct btrfs_iget_args *args = p;
3631 inode->i_ino = args->ino; 3962 inode->i_ino = args->ino;
3632 init_btrfs_i(inode);
3633 BTRFS_I(inode)->root = args->root; 3963 BTRFS_I(inode)->root = args->root;
3634 btrfs_set_inode_space_info(args->root, inode); 3964 btrfs_set_inode_space_info(args->root, inode);
3635 return 0; 3965 return 0;
@@ -3692,8 +4022,6 @@ static struct inode *new_simple_dir(struct super_block *s,
3692 if (!inode) 4022 if (!inode)
3693 return ERR_PTR(-ENOMEM); 4023 return ERR_PTR(-ENOMEM);
3694 4024
3695 init_btrfs_i(inode);
3696
3697 BTRFS_I(inode)->root = root; 4025 BTRFS_I(inode)->root = root;
3698 memcpy(&BTRFS_I(inode)->location, key, sizeof(*key)); 4026 memcpy(&BTRFS_I(inode)->location, key, sizeof(*key));
3699 BTRFS_I(inode)->dummy_inode = 1; 4027 BTRFS_I(inode)->dummy_inode = 1;
@@ -3950,7 +4278,7 @@ int btrfs_write_inode(struct inode *inode, struct writeback_control *wbc)
3950 struct btrfs_trans_handle *trans; 4278 struct btrfs_trans_handle *trans;
3951 int ret = 0; 4279 int ret = 0;
3952 4280
3953 if (root->fs_info->btree_inode == inode) 4281 if (BTRFS_I(inode)->dummy_inode)
3954 return 0; 4282 return 0;
3955 4283
3956 if (wbc->sync_mode == WB_SYNC_ALL) { 4284 if (wbc->sync_mode == WB_SYNC_ALL) {
@@ -3971,10 +4299,38 @@ void btrfs_dirty_inode(struct inode *inode)
3971{ 4299{
3972 struct btrfs_root *root = BTRFS_I(inode)->root; 4300 struct btrfs_root *root = BTRFS_I(inode)->root;
3973 struct btrfs_trans_handle *trans; 4301 struct btrfs_trans_handle *trans;
4302 int ret;
4303
4304 if (BTRFS_I(inode)->dummy_inode)
4305 return;
3974 4306
3975 trans = btrfs_join_transaction(root, 1); 4307 trans = btrfs_join_transaction(root, 1);
3976 btrfs_set_trans_block_group(trans, inode); 4308 btrfs_set_trans_block_group(trans, inode);
3977 btrfs_update_inode(trans, root, inode); 4309
4310 ret = btrfs_update_inode(trans, root, inode);
4311 if (ret && ret == -ENOSPC) {
4312 /* whoops, lets try again with the full transaction */
4313 btrfs_end_transaction(trans, root);
4314 trans = btrfs_start_transaction(root, 1);
4315 if (IS_ERR(trans)) {
4316 if (printk_ratelimit()) {
4317 printk(KERN_ERR "btrfs: fail to "
4318 "dirty inode %lu error %ld\n",
4319 inode->i_ino, PTR_ERR(trans));
4320 }
4321 return;
4322 }
4323 btrfs_set_trans_block_group(trans, inode);
4324
4325 ret = btrfs_update_inode(trans, root, inode);
4326 if (ret) {
4327 if (printk_ratelimit()) {
4328 printk(KERN_ERR "btrfs: fail to "
4329 "dirty inode %lu error %d\n",
4330 inode->i_ino, ret);
4331 }
4332 }
4333 }
3978 btrfs_end_transaction(trans, root); 4334 btrfs_end_transaction(trans, root);
3979} 4335}
3980 4336
@@ -4092,7 +4448,6 @@ static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans,
4092 * btrfs_get_inode_index_count has an explanation for the magic 4448 * btrfs_get_inode_index_count has an explanation for the magic
4093 * number 4449 * number
4094 */ 4450 */
4095 init_btrfs_i(inode);
4096 BTRFS_I(inode)->index_cnt = 2; 4451 BTRFS_I(inode)->index_cnt = 2;
4097 BTRFS_I(inode)->root = root; 4452 BTRFS_I(inode)->root = root;
4098 BTRFS_I(inode)->generation = trans->transid; 4453 BTRFS_I(inode)->generation = trans->transid;
@@ -4247,26 +4602,21 @@ static int btrfs_mknod(struct inode *dir, struct dentry *dentry,
4247 if (!new_valid_dev(rdev)) 4602 if (!new_valid_dev(rdev))
4248 return -EINVAL; 4603 return -EINVAL;
4249 4604
4605 err = btrfs_find_free_objectid(NULL, root, dir->i_ino, &objectid);
4606 if (err)
4607 return err;
4608
4250 /* 4609 /*
4251 * 2 for inode item and ref 4610 * 2 for inode item and ref
4252 * 2 for dir items 4611 * 2 for dir items
4253 * 1 for xattr if selinux is on 4612 * 1 for xattr if selinux is on
4254 */ 4613 */
4255 err = btrfs_reserve_metadata_space(root, 5); 4614 trans = btrfs_start_transaction(root, 5);
4256 if (err) 4615 if (IS_ERR(trans))
4257 return err; 4616 return PTR_ERR(trans);
4258 4617
4259 trans = btrfs_start_transaction(root, 1);
4260 if (!trans)
4261 goto fail;
4262 btrfs_set_trans_block_group(trans, dir); 4618 btrfs_set_trans_block_group(trans, dir);
4263 4619
4264 err = btrfs_find_free_objectid(trans, root, dir->i_ino, &objectid);
4265 if (err) {
4266 err = -ENOSPC;
4267 goto out_unlock;
4268 }
4269
4270 inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name, 4620 inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
4271 dentry->d_name.len, 4621 dentry->d_name.len,
4272 dentry->d_parent->d_inode->i_ino, objectid, 4622 dentry->d_parent->d_inode->i_ino, objectid,
@@ -4295,13 +4645,11 @@ static int btrfs_mknod(struct inode *dir, struct dentry *dentry,
4295out_unlock: 4645out_unlock:
4296 nr = trans->blocks_used; 4646 nr = trans->blocks_used;
4297 btrfs_end_transaction_throttle(trans, root); 4647 btrfs_end_transaction_throttle(trans, root);
4298fail: 4648 btrfs_btree_balance_dirty(root, nr);
4299 btrfs_unreserve_metadata_space(root, 5);
4300 if (drop_inode) { 4649 if (drop_inode) {
4301 inode_dec_link_count(inode); 4650 inode_dec_link_count(inode);
4302 iput(inode); 4651 iput(inode);
4303 } 4652 }
4304 btrfs_btree_balance_dirty(root, nr);
4305 return err; 4653 return err;
4306} 4654}
4307 4655
@@ -4311,32 +4659,26 @@ static int btrfs_create(struct inode *dir, struct dentry *dentry,
4311 struct btrfs_trans_handle *trans; 4659 struct btrfs_trans_handle *trans;
4312 struct btrfs_root *root = BTRFS_I(dir)->root; 4660 struct btrfs_root *root = BTRFS_I(dir)->root;
4313 struct inode *inode = NULL; 4661 struct inode *inode = NULL;
4314 int err;
4315 int drop_inode = 0; 4662 int drop_inode = 0;
4663 int err;
4316 unsigned long nr = 0; 4664 unsigned long nr = 0;
4317 u64 objectid; 4665 u64 objectid;
4318 u64 index = 0; 4666 u64 index = 0;
4319 4667
4668 err = btrfs_find_free_objectid(NULL, root, dir->i_ino, &objectid);
4669 if (err)
4670 return err;
4320 /* 4671 /*
4321 * 2 for inode item and ref 4672 * 2 for inode item and ref
4322 * 2 for dir items 4673 * 2 for dir items
4323 * 1 for xattr if selinux is on 4674 * 1 for xattr if selinux is on
4324 */ 4675 */
4325 err = btrfs_reserve_metadata_space(root, 5); 4676 trans = btrfs_start_transaction(root, 5);
4326 if (err) 4677 if (IS_ERR(trans))
4327 return err; 4678 return PTR_ERR(trans);
4328 4679
4329 trans = btrfs_start_transaction(root, 1);
4330 if (!trans)
4331 goto fail;
4332 btrfs_set_trans_block_group(trans, dir); 4680 btrfs_set_trans_block_group(trans, dir);
4333 4681
4334 err = btrfs_find_free_objectid(trans, root, dir->i_ino, &objectid);
4335 if (err) {
4336 err = -ENOSPC;
4337 goto out_unlock;
4338 }
4339
4340 inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name, 4682 inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
4341 dentry->d_name.len, 4683 dentry->d_name.len,
4342 dentry->d_parent->d_inode->i_ino, 4684 dentry->d_parent->d_inode->i_ino,
@@ -4368,8 +4710,6 @@ static int btrfs_create(struct inode *dir, struct dentry *dentry,
4368out_unlock: 4710out_unlock:
4369 nr = trans->blocks_used; 4711 nr = trans->blocks_used;
4370 btrfs_end_transaction_throttle(trans, root); 4712 btrfs_end_transaction_throttle(trans, root);
4371fail:
4372 btrfs_unreserve_metadata_space(root, 5);
4373 if (drop_inode) { 4713 if (drop_inode) {
4374 inode_dec_link_count(inode); 4714 inode_dec_link_count(inode);
4375 iput(inode); 4715 iput(inode);
@@ -4396,21 +4736,21 @@ static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
4396 if (root->objectid != BTRFS_I(inode)->root->objectid) 4736 if (root->objectid != BTRFS_I(inode)->root->objectid)
4397 return -EPERM; 4737 return -EPERM;
4398 4738
4399 /*
4400 * 1 item for inode ref
4401 * 2 items for dir items
4402 */
4403 err = btrfs_reserve_metadata_space(root, 3);
4404 if (err)
4405 return err;
4406
4407 btrfs_inc_nlink(inode); 4739 btrfs_inc_nlink(inode);
4408 4740
4409 err = btrfs_set_inode_index(dir, &index); 4741 err = btrfs_set_inode_index(dir, &index);
4410 if (err) 4742 if (err)
4411 goto fail; 4743 goto fail;
4412 4744
4413 trans = btrfs_start_transaction(root, 1); 4745 /*
4746 * 1 item for inode ref
4747 * 2 items for dir items
4748 */
4749 trans = btrfs_start_transaction(root, 3);
4750 if (IS_ERR(trans)) {
4751 err = PTR_ERR(trans);
4752 goto fail;
4753 }
4414 4754
4415 btrfs_set_trans_block_group(trans, dir); 4755 btrfs_set_trans_block_group(trans, dir);
4416 atomic_inc(&inode->i_count); 4756 atomic_inc(&inode->i_count);
@@ -4429,7 +4769,6 @@ static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
4429 nr = trans->blocks_used; 4769 nr = trans->blocks_used;
4430 btrfs_end_transaction_throttle(trans, root); 4770 btrfs_end_transaction_throttle(trans, root);
4431fail: 4771fail:
4432 btrfs_unreserve_metadata_space(root, 3);
4433 if (drop_inode) { 4772 if (drop_inode) {
4434 inode_dec_link_count(inode); 4773 inode_dec_link_count(inode);
4435 iput(inode); 4774 iput(inode);
@@ -4449,28 +4788,20 @@ static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
4449 u64 index = 0; 4788 u64 index = 0;
4450 unsigned long nr = 1; 4789 unsigned long nr = 1;
4451 4790
4791 err = btrfs_find_free_objectid(NULL, root, dir->i_ino, &objectid);
4792 if (err)
4793 return err;
4794
4452 /* 4795 /*
4453 * 2 items for inode and ref 4796 * 2 items for inode and ref
4454 * 2 items for dir items 4797 * 2 items for dir items
4455 * 1 for xattr if selinux is on 4798 * 1 for xattr if selinux is on
4456 */ 4799 */
4457 err = btrfs_reserve_metadata_space(root, 5); 4800 trans = btrfs_start_transaction(root, 5);
4458 if (err) 4801 if (IS_ERR(trans))
4459 return err; 4802 return PTR_ERR(trans);
4460
4461 trans = btrfs_start_transaction(root, 1);
4462 if (!trans) {
4463 err = -ENOMEM;
4464 goto out_unlock;
4465 }
4466 btrfs_set_trans_block_group(trans, dir); 4803 btrfs_set_trans_block_group(trans, dir);
4467 4804
4468 err = btrfs_find_free_objectid(trans, root, dir->i_ino, &objectid);
4469 if (err) {
4470 err = -ENOSPC;
4471 goto out_fail;
4472 }
4473
4474 inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name, 4805 inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
4475 dentry->d_name.len, 4806 dentry->d_name.len,
4476 dentry->d_parent->d_inode->i_ino, objectid, 4807 dentry->d_parent->d_inode->i_ino, objectid,
@@ -4510,9 +4841,6 @@ static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
4510out_fail: 4841out_fail:
4511 nr = trans->blocks_used; 4842 nr = trans->blocks_used;
4512 btrfs_end_transaction_throttle(trans, root); 4843 btrfs_end_transaction_throttle(trans, root);
4513
4514out_unlock:
4515 btrfs_unreserve_metadata_space(root, 5);
4516 if (drop_on_err) 4844 if (drop_on_err)
4517 iput(inode); 4845 iput(inode);
4518 btrfs_btree_balance_dirty(root, nr); 4846 btrfs_btree_balance_dirty(root, nr);
@@ -4770,6 +5098,7 @@ again:
4770 } 5098 }
4771 flush_dcache_page(page); 5099 flush_dcache_page(page);
4772 } else if (create && PageUptodate(page)) { 5100 } else if (create && PageUptodate(page)) {
5101 WARN_ON(1);
4773 if (!trans) { 5102 if (!trans) {
4774 kunmap(page); 5103 kunmap(page);
4775 free_extent_map(em); 5104 free_extent_map(em);
@@ -4866,11 +5195,651 @@ out:
4866 return em; 5195 return em;
4867} 5196}
4868 5197
5198static struct extent_map *btrfs_new_extent_direct(struct inode *inode,
5199 u64 start, u64 len)
5200{
5201 struct btrfs_root *root = BTRFS_I(inode)->root;
5202 struct btrfs_trans_handle *trans;
5203 struct extent_map *em;
5204 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
5205 struct btrfs_key ins;
5206 u64 alloc_hint;
5207 int ret;
5208
5209 btrfs_drop_extent_cache(inode, start, start + len - 1, 0);
5210
5211 trans = btrfs_join_transaction(root, 0);
5212 if (!trans)
5213 return ERR_PTR(-ENOMEM);
5214
5215 trans->block_rsv = &root->fs_info->delalloc_block_rsv;
5216
5217 alloc_hint = get_extent_allocation_hint(inode, start, len);
5218 ret = btrfs_reserve_extent(trans, root, len, root->sectorsize, 0,
5219 alloc_hint, (u64)-1, &ins, 1);
5220 if (ret) {
5221 em = ERR_PTR(ret);
5222 goto out;
5223 }
5224
5225 em = alloc_extent_map(GFP_NOFS);
5226 if (!em) {
5227 em = ERR_PTR(-ENOMEM);
5228 goto out;
5229 }
5230
5231 em->start = start;
5232 em->orig_start = em->start;
5233 em->len = ins.offset;
5234
5235 em->block_start = ins.objectid;
5236 em->block_len = ins.offset;
5237 em->bdev = root->fs_info->fs_devices->latest_bdev;
5238 set_bit(EXTENT_FLAG_PINNED, &em->flags);
5239
5240 while (1) {
5241 write_lock(&em_tree->lock);
5242 ret = add_extent_mapping(em_tree, em);
5243 write_unlock(&em_tree->lock);
5244 if (ret != -EEXIST)
5245 break;
5246 btrfs_drop_extent_cache(inode, start, start + em->len - 1, 0);
5247 }
5248
5249 ret = btrfs_add_ordered_extent_dio(inode, start, ins.objectid,
5250 ins.offset, ins.offset, 0);
5251 if (ret) {
5252 btrfs_free_reserved_extent(root, ins.objectid, ins.offset);
5253 em = ERR_PTR(ret);
5254 }
5255out:
5256 btrfs_end_transaction(trans, root);
5257 return em;
5258}
5259
5260/*
5261 * returns 1 when the nocow is safe, < 1 on error, 0 if the
5262 * block must be cow'd
5263 */
5264static noinline int can_nocow_odirect(struct btrfs_trans_handle *trans,
5265 struct inode *inode, u64 offset, u64 len)
5266{
5267 struct btrfs_path *path;
5268 int ret;
5269 struct extent_buffer *leaf;
5270 struct btrfs_root *root = BTRFS_I(inode)->root;
5271 struct btrfs_file_extent_item *fi;
5272 struct btrfs_key key;
5273 u64 disk_bytenr;
5274 u64 backref_offset;
5275 u64 extent_end;
5276 u64 num_bytes;
5277 int slot;
5278 int found_type;
5279
5280 path = btrfs_alloc_path();
5281 if (!path)
5282 return -ENOMEM;
5283
5284 ret = btrfs_lookup_file_extent(trans, root, path, inode->i_ino,
5285 offset, 0);
5286 if (ret < 0)
5287 goto out;
5288
5289 slot = path->slots[0];
5290 if (ret == 1) {
5291 if (slot == 0) {
5292 /* can't find the item, must cow */
5293 ret = 0;
5294 goto out;
5295 }
5296 slot--;
5297 }
5298 ret = 0;
5299 leaf = path->nodes[0];
5300 btrfs_item_key_to_cpu(leaf, &key, slot);
5301 if (key.objectid != inode->i_ino ||
5302 key.type != BTRFS_EXTENT_DATA_KEY) {
5303 /* not our file or wrong item type, must cow */
5304 goto out;
5305 }
5306
5307 if (key.offset > offset) {
5308 /* Wrong offset, must cow */
5309 goto out;
5310 }
5311
5312 fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
5313 found_type = btrfs_file_extent_type(leaf, fi);
5314 if (found_type != BTRFS_FILE_EXTENT_REG &&
5315 found_type != BTRFS_FILE_EXTENT_PREALLOC) {
5316 /* not a regular extent, must cow */
5317 goto out;
5318 }
5319 disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
5320 backref_offset = btrfs_file_extent_offset(leaf, fi);
5321
5322 extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi);
5323 if (extent_end < offset + len) {
5324 /* extent doesn't include our full range, must cow */
5325 goto out;
5326 }
5327
5328 if (btrfs_extent_readonly(root, disk_bytenr))
5329 goto out;
5330
5331 /*
5332 * look for other files referencing this extent, if we
5333 * find any we must cow
5334 */
5335 if (btrfs_cross_ref_exist(trans, root, inode->i_ino,
5336 key.offset - backref_offset, disk_bytenr))
5337 goto out;
5338
5339 /*
5340 * adjust disk_bytenr and num_bytes to cover just the bytes
5341 * in this extent we are about to write. If there
5342 * are any csums in that range we have to cow in order
5343 * to keep the csums correct
5344 */
5345 disk_bytenr += backref_offset;
5346 disk_bytenr += offset - key.offset;
5347 num_bytes = min(offset + len, extent_end) - offset;
5348 if (csum_exist_in_range(root, disk_bytenr, num_bytes))
5349 goto out;
5350 /*
5351 * all of the above have passed, it is safe to overwrite this extent
5352 * without cow
5353 */
5354 ret = 1;
5355out:
5356 btrfs_free_path(path);
5357 return ret;
5358}
5359
5360static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock,
5361 struct buffer_head *bh_result, int create)
5362{
5363 struct extent_map *em;
5364 struct btrfs_root *root = BTRFS_I(inode)->root;
5365 u64 start = iblock << inode->i_blkbits;
5366 u64 len = bh_result->b_size;
5367 struct btrfs_trans_handle *trans;
5368
5369 em = btrfs_get_extent(inode, NULL, 0, start, len, 0);
5370 if (IS_ERR(em))
5371 return PTR_ERR(em);
5372
5373 /*
5374 * Ok for INLINE and COMPRESSED extents we need to fallback on buffered
5375 * io. INLINE is special, and we could probably kludge it in here, but
5376 * it's still buffered so for safety lets just fall back to the generic
5377 * buffered path.
5378 *
5379 * For COMPRESSED we _have_ to read the entire extent in so we can
5380 * decompress it, so there will be buffering required no matter what we
5381 * do, so go ahead and fallback to buffered.
5382 *
5383 * We return -ENOTBLK because thats what makes DIO go ahead and go back
5384 * to buffered IO. Don't blame me, this is the price we pay for using
5385 * the generic code.
5386 */
5387 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags) ||
5388 em->block_start == EXTENT_MAP_INLINE) {
5389 free_extent_map(em);
5390 return -ENOTBLK;
5391 }
5392
5393 /* Just a good old fashioned hole, return */
5394 if (!create && (em->block_start == EXTENT_MAP_HOLE ||
5395 test_bit(EXTENT_FLAG_PREALLOC, &em->flags))) {
5396 free_extent_map(em);
5397 /* DIO will do one hole at a time, so just unlock a sector */
5398 unlock_extent(&BTRFS_I(inode)->io_tree, start,
5399 start + root->sectorsize - 1, GFP_NOFS);
5400 return 0;
5401 }
5402
5403 /*
5404 * We don't allocate a new extent in the following cases
5405 *
5406 * 1) The inode is marked as NODATACOW. In this case we'll just use the
5407 * existing extent.
5408 * 2) The extent is marked as PREALLOC. We're good to go here and can
5409 * just use the extent.
5410 *
5411 */
5412 if (!create) {
5413 len = em->len - (start - em->start);
5414 goto map;
5415 }
5416
5417 if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags) ||
5418 ((BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW) &&
5419 em->block_start != EXTENT_MAP_HOLE)) {
5420 int type;
5421 int ret;
5422 u64 block_start;
5423
5424 if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
5425 type = BTRFS_ORDERED_PREALLOC;
5426 else
5427 type = BTRFS_ORDERED_NOCOW;
5428 len = min(len, em->len - (start - em->start));
5429 block_start = em->block_start + (start - em->start);
5430
5431 /*
5432 * we're not going to log anything, but we do need
5433 * to make sure the current transaction stays open
5434 * while we look for nocow cross refs
5435 */
5436 trans = btrfs_join_transaction(root, 0);
5437 if (!trans)
5438 goto must_cow;
5439
5440 if (can_nocow_odirect(trans, inode, start, len) == 1) {
5441 ret = btrfs_add_ordered_extent_dio(inode, start,
5442 block_start, len, len, type);
5443 btrfs_end_transaction(trans, root);
5444 if (ret) {
5445 free_extent_map(em);
5446 return ret;
5447 }
5448 goto unlock;
5449 }
5450 btrfs_end_transaction(trans, root);
5451 }
5452must_cow:
5453 /*
5454 * this will cow the extent, reset the len in case we changed
5455 * it above
5456 */
5457 len = bh_result->b_size;
5458 free_extent_map(em);
5459 em = btrfs_new_extent_direct(inode, start, len);
5460 if (IS_ERR(em))
5461 return PTR_ERR(em);
5462 len = min(len, em->len - (start - em->start));
5463unlock:
5464 clear_extent_bit(&BTRFS_I(inode)->io_tree, start, start + len - 1,
5465 EXTENT_LOCKED | EXTENT_DELALLOC | EXTENT_DIRTY, 1,
5466 0, NULL, GFP_NOFS);
5467map:
5468 bh_result->b_blocknr = (em->block_start + (start - em->start)) >>
5469 inode->i_blkbits;
5470 bh_result->b_size = len;
5471 bh_result->b_bdev = em->bdev;
5472 set_buffer_mapped(bh_result);
5473 if (create && !test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
5474 set_buffer_new(bh_result);
5475
5476 free_extent_map(em);
5477
5478 return 0;
5479}
5480
5481struct btrfs_dio_private {
5482 struct inode *inode;
5483 u64 logical_offset;
5484 u64 disk_bytenr;
5485 u64 bytes;
5486 u32 *csums;
5487 void *private;
5488};
5489
5490static void btrfs_endio_direct_read(struct bio *bio, int err)
5491{
5492 struct bio_vec *bvec_end = bio->bi_io_vec + bio->bi_vcnt - 1;
5493 struct bio_vec *bvec = bio->bi_io_vec;
5494 struct btrfs_dio_private *dip = bio->bi_private;
5495 struct inode *inode = dip->inode;
5496 struct btrfs_root *root = BTRFS_I(inode)->root;
5497 u64 start;
5498 u32 *private = dip->csums;
5499
5500 start = dip->logical_offset;
5501 do {
5502 if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) {
5503 struct page *page = bvec->bv_page;
5504 char *kaddr;
5505 u32 csum = ~(u32)0;
5506 unsigned long flags;
5507
5508 local_irq_save(flags);
5509 kaddr = kmap_atomic(page, KM_IRQ0);
5510 csum = btrfs_csum_data(root, kaddr + bvec->bv_offset,
5511 csum, bvec->bv_len);
5512 btrfs_csum_final(csum, (char *)&csum);
5513 kunmap_atomic(kaddr, KM_IRQ0);
5514 local_irq_restore(flags);
5515
5516 flush_dcache_page(bvec->bv_page);
5517 if (csum != *private) {
5518 printk(KERN_ERR "btrfs csum failed ino %lu off"
5519 " %llu csum %u private %u\n",
5520 inode->i_ino, (unsigned long long)start,
5521 csum, *private);
5522 err = -EIO;
5523 }
5524 }
5525
5526 start += bvec->bv_len;
5527 private++;
5528 bvec++;
5529 } while (bvec <= bvec_end);
5530
5531 unlock_extent(&BTRFS_I(inode)->io_tree, dip->logical_offset,
5532 dip->logical_offset + dip->bytes - 1, GFP_NOFS);
5533 bio->bi_private = dip->private;
5534
5535 kfree(dip->csums);
5536 kfree(dip);
5537 dio_end_io(bio, err);
5538}
5539
5540static void btrfs_endio_direct_write(struct bio *bio, int err)
5541{
5542 struct btrfs_dio_private *dip = bio->bi_private;
5543 struct inode *inode = dip->inode;
5544 struct btrfs_root *root = BTRFS_I(inode)->root;
5545 struct btrfs_trans_handle *trans;
5546 struct btrfs_ordered_extent *ordered = NULL;
5547 struct extent_state *cached_state = NULL;
5548 int ret;
5549
5550 if (err)
5551 goto out_done;
5552
5553 ret = btrfs_dec_test_ordered_pending(inode, &ordered,
5554 dip->logical_offset, dip->bytes);
5555 if (!ret)
5556 goto out_done;
5557
5558 BUG_ON(!ordered);
5559
5560 trans = btrfs_join_transaction(root, 1);
5561 if (!trans) {
5562 err = -ENOMEM;
5563 goto out;
5564 }
5565 trans->block_rsv = &root->fs_info->delalloc_block_rsv;
5566
5567 if (test_bit(BTRFS_ORDERED_NOCOW, &ordered->flags)) {
5568 ret = btrfs_ordered_update_i_size(inode, 0, ordered);
5569 if (!ret)
5570 ret = btrfs_update_inode(trans, root, inode);
5571 err = ret;
5572 goto out;
5573 }
5574
5575 lock_extent_bits(&BTRFS_I(inode)->io_tree, ordered->file_offset,
5576 ordered->file_offset + ordered->len - 1, 0,
5577 &cached_state, GFP_NOFS);
5578
5579 if (test_bit(BTRFS_ORDERED_PREALLOC, &ordered->flags)) {
5580 ret = btrfs_mark_extent_written(trans, inode,
5581 ordered->file_offset,
5582 ordered->file_offset +
5583 ordered->len);
5584 if (ret) {
5585 err = ret;
5586 goto out_unlock;
5587 }
5588 } else {
5589 ret = insert_reserved_file_extent(trans, inode,
5590 ordered->file_offset,
5591 ordered->start,
5592 ordered->disk_len,
5593 ordered->len,
5594 ordered->len,
5595 0, 0, 0,
5596 BTRFS_FILE_EXTENT_REG);
5597 unpin_extent_cache(&BTRFS_I(inode)->extent_tree,
5598 ordered->file_offset, ordered->len);
5599 if (ret) {
5600 err = ret;
5601 WARN_ON(1);
5602 goto out_unlock;
5603 }
5604 }
5605
5606 add_pending_csums(trans, inode, ordered->file_offset, &ordered->list);
5607 btrfs_ordered_update_i_size(inode, 0, ordered);
5608 btrfs_update_inode(trans, root, inode);
5609out_unlock:
5610 unlock_extent_cached(&BTRFS_I(inode)->io_tree, ordered->file_offset,
5611 ordered->file_offset + ordered->len - 1,
5612 &cached_state, GFP_NOFS);
5613out:
5614 btrfs_delalloc_release_metadata(inode, ordered->len);
5615 btrfs_end_transaction(trans, root);
5616 btrfs_put_ordered_extent(ordered);
5617 btrfs_put_ordered_extent(ordered);
5618out_done:
5619 bio->bi_private = dip->private;
5620
5621 kfree(dip->csums);
5622 kfree(dip);
5623 dio_end_io(bio, err);
5624}
5625
5626static int __btrfs_submit_bio_start_direct_io(struct inode *inode, int rw,
5627 struct bio *bio, int mirror_num,
5628 unsigned long bio_flags, u64 offset)
5629{
5630 int ret;
5631 struct btrfs_root *root = BTRFS_I(inode)->root;
5632 ret = btrfs_csum_one_bio(root, inode, bio, offset, 1);
5633 BUG_ON(ret);
5634 return 0;
5635}
5636
5637static void btrfs_submit_direct(int rw, struct bio *bio, struct inode *inode,
5638 loff_t file_offset)
5639{
5640 struct btrfs_root *root = BTRFS_I(inode)->root;
5641 struct btrfs_dio_private *dip;
5642 struct bio_vec *bvec = bio->bi_io_vec;
5643 u64 start;
5644 int skip_sum;
5645 int write = rw & (1 << BIO_RW);
5646 int ret = 0;
5647
5648 skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;
5649
5650 dip = kmalloc(sizeof(*dip), GFP_NOFS);
5651 if (!dip) {
5652 ret = -ENOMEM;
5653 goto free_ordered;
5654 }
5655 dip->csums = NULL;
5656
5657 if (!skip_sum) {
5658 dip->csums = kmalloc(sizeof(u32) * bio->bi_vcnt, GFP_NOFS);
5659 if (!dip->csums) {
5660 ret = -ENOMEM;
5661 goto free_ordered;
5662 }
5663 }
5664
5665 dip->private = bio->bi_private;
5666 dip->inode = inode;
5667 dip->logical_offset = file_offset;
5668
5669 start = dip->logical_offset;
5670 dip->bytes = 0;
5671 do {
5672 dip->bytes += bvec->bv_len;
5673 bvec++;
5674 } while (bvec <= (bio->bi_io_vec + bio->bi_vcnt - 1));
5675
5676 dip->disk_bytenr = (u64)bio->bi_sector << 9;
5677 bio->bi_private = dip;
5678
5679 if (write)
5680 bio->bi_end_io = btrfs_endio_direct_write;
5681 else
5682 bio->bi_end_io = btrfs_endio_direct_read;
5683
5684 ret = btrfs_bio_wq_end_io(root->fs_info, bio, 0);
5685 if (ret)
5686 goto out_err;
5687
5688 if (write && !skip_sum) {
5689 ret = btrfs_wq_submit_bio(BTRFS_I(inode)->root->fs_info,
5690 inode, rw, bio, 0, 0,
5691 dip->logical_offset,
5692 __btrfs_submit_bio_start_direct_io,
5693 __btrfs_submit_bio_done);
5694 if (ret)
5695 goto out_err;
5696 return;
5697 } else if (!skip_sum)
5698 btrfs_lookup_bio_sums_dio(root, inode, bio,
5699 dip->logical_offset, dip->csums);
5700
5701 ret = btrfs_map_bio(root, rw, bio, 0, 1);
5702 if (ret)
5703 goto out_err;
5704 return;
5705out_err:
5706 kfree(dip->csums);
5707 kfree(dip);
5708free_ordered:
5709 /*
5710 * If this is a write, we need to clean up the reserved space and kill
5711 * the ordered extent.
5712 */
5713 if (write) {
5714 struct btrfs_ordered_extent *ordered;
5715 ordered = btrfs_lookup_ordered_extent(inode,
5716 dip->logical_offset);
5717 if (!test_bit(BTRFS_ORDERED_PREALLOC, &ordered->flags) &&
5718 !test_bit(BTRFS_ORDERED_NOCOW, &ordered->flags))
5719 btrfs_free_reserved_extent(root, ordered->start,
5720 ordered->disk_len);
5721 btrfs_put_ordered_extent(ordered);
5722 btrfs_put_ordered_extent(ordered);
5723 }
5724 bio_endio(bio, ret);
5725}
5726
5727static ssize_t check_direct_IO(struct btrfs_root *root, int rw, struct kiocb *iocb,
5728 const struct iovec *iov, loff_t offset,
5729 unsigned long nr_segs)
5730{
5731 int seg;
5732 size_t size;
5733 unsigned long addr;
5734 unsigned blocksize_mask = root->sectorsize - 1;
5735 ssize_t retval = -EINVAL;
5736 loff_t end = offset;
5737
5738 if (offset & blocksize_mask)
5739 goto out;
5740
5741 /* Check the memory alignment. Blocks cannot straddle pages */
5742 for (seg = 0; seg < nr_segs; seg++) {
5743 addr = (unsigned long)iov[seg].iov_base;
5744 size = iov[seg].iov_len;
5745 end += size;
5746 if ((addr & blocksize_mask) || (size & blocksize_mask))
5747 goto out;
5748 }
5749 retval = 0;
5750out:
5751 return retval;
5752}
4869static ssize_t btrfs_direct_IO(int rw, struct kiocb *iocb, 5753static ssize_t btrfs_direct_IO(int rw, struct kiocb *iocb,
4870 const struct iovec *iov, loff_t offset, 5754 const struct iovec *iov, loff_t offset,
4871 unsigned long nr_segs) 5755 unsigned long nr_segs)
4872{ 5756{
4873 return -EINVAL; 5757 struct file *file = iocb->ki_filp;
5758 struct inode *inode = file->f_mapping->host;
5759 struct btrfs_ordered_extent *ordered;
5760 struct extent_state *cached_state = NULL;
5761 u64 lockstart, lockend;
5762 ssize_t ret;
5763 int writing = rw & WRITE;
5764 int write_bits = 0;
5765 size_t count = iov_length(iov, nr_segs);
5766
5767 if (check_direct_IO(BTRFS_I(inode)->root, rw, iocb, iov,
5768 offset, nr_segs)) {
5769 return 0;
5770 }
5771
5772 lockstart = offset;
5773 lockend = offset + count - 1;
5774
5775 if (writing) {
5776 ret = btrfs_delalloc_reserve_space(inode, count);
5777 if (ret)
5778 goto out;
5779 }
5780
5781 while (1) {
5782 lock_extent_bits(&BTRFS_I(inode)->io_tree, lockstart, lockend,
5783 0, &cached_state, GFP_NOFS);
5784 /*
5785 * We're concerned with the entire range that we're going to be
5786 * doing DIO to, so we need to make sure theres no ordered
5787 * extents in this range.
5788 */
5789 ordered = btrfs_lookup_ordered_range(inode, lockstart,
5790 lockend - lockstart + 1);
5791 if (!ordered)
5792 break;
5793 unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend,
5794 &cached_state, GFP_NOFS);
5795 btrfs_start_ordered_extent(inode, ordered, 1);
5796 btrfs_put_ordered_extent(ordered);
5797 cond_resched();
5798 }
5799
5800 /*
5801 * we don't use btrfs_set_extent_delalloc because we don't want
5802 * the dirty or uptodate bits
5803 */
5804 if (writing) {
5805 write_bits = EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING;
5806 ret = set_extent_bit(&BTRFS_I(inode)->io_tree, lockstart, lockend,
5807 EXTENT_DELALLOC, 0, NULL, &cached_state,
5808 GFP_NOFS);
5809 if (ret) {
5810 clear_extent_bit(&BTRFS_I(inode)->io_tree, lockstart,
5811 lockend, EXTENT_LOCKED | write_bits,
5812 1, 0, &cached_state, GFP_NOFS);
5813 goto out;
5814 }
5815 }
5816
5817 free_extent_state(cached_state);
5818 cached_state = NULL;
5819
5820 ret = __blockdev_direct_IO(rw, iocb, inode,
5821 BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev,
5822 iov, offset, nr_segs, btrfs_get_blocks_direct, NULL,
5823 btrfs_submit_direct, 0);
5824
5825 if (ret < 0 && ret != -EIOCBQUEUED) {
5826 clear_extent_bit(&BTRFS_I(inode)->io_tree, offset,
5827 offset + iov_length(iov, nr_segs) - 1,
5828 EXTENT_LOCKED | write_bits, 1, 0,
5829 &cached_state, GFP_NOFS);
5830 } else if (ret >= 0 && ret < iov_length(iov, nr_segs)) {
5831 /*
5832 * We're falling back to buffered, unlock the section we didn't
5833 * do IO on.
5834 */
5835 clear_extent_bit(&BTRFS_I(inode)->io_tree, offset + ret,
5836 offset + iov_length(iov, nr_segs) - 1,
5837 EXTENT_LOCKED | write_bits, 1, 0,
5838 &cached_state, GFP_NOFS);
5839 }
5840out:
5841 free_extent_state(cached_state);
5842 return ret;
4874} 5843}
4875 5844
4876static int btrfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, 5845static int btrfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
@@ -5034,7 +6003,7 @@ int btrfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
5034 u64 page_start; 6003 u64 page_start;
5035 u64 page_end; 6004 u64 page_end;
5036 6005
5037 ret = btrfs_check_data_free_space(root, inode, PAGE_CACHE_SIZE); 6006 ret = btrfs_delalloc_reserve_space(inode, PAGE_CACHE_SIZE);
5038 if (ret) { 6007 if (ret) {
5039 if (ret == -ENOMEM) 6008 if (ret == -ENOMEM)
5040 ret = VM_FAULT_OOM; 6009 ret = VM_FAULT_OOM;
@@ -5043,13 +6012,6 @@ int btrfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
5043 goto out; 6012 goto out;
5044 } 6013 }
5045 6014
5046 ret = btrfs_reserve_metadata_for_delalloc(root, inode, 1);
5047 if (ret) {
5048 btrfs_free_reserved_data_space(root, inode, PAGE_CACHE_SIZE);
5049 ret = VM_FAULT_SIGBUS;
5050 goto out;
5051 }
5052
5053 ret = VM_FAULT_NOPAGE; /* make the VM retry the fault */ 6015 ret = VM_FAULT_NOPAGE; /* make the VM retry the fault */
5054again: 6016again:
5055 lock_page(page); 6017 lock_page(page);
@@ -5059,7 +6021,6 @@ again:
5059 6021
5060 if ((page->mapping != inode->i_mapping) || 6022 if ((page->mapping != inode->i_mapping) ||
5061 (page_start >= size)) { 6023 (page_start >= size)) {
5062 btrfs_free_reserved_data_space(root, inode, PAGE_CACHE_SIZE);
5063 /* page got truncated out from underneath us */ 6024 /* page got truncated out from underneath us */
5064 goto out_unlock; 6025 goto out_unlock;
5065 } 6026 }
@@ -5100,7 +6061,6 @@ again:
5100 unlock_extent_cached(io_tree, page_start, page_end, 6061 unlock_extent_cached(io_tree, page_start, page_end,
5101 &cached_state, GFP_NOFS); 6062 &cached_state, GFP_NOFS);
5102 ret = VM_FAULT_SIGBUS; 6063 ret = VM_FAULT_SIGBUS;
5103 btrfs_free_reserved_data_space(root, inode, PAGE_CACHE_SIZE);
5104 goto out_unlock; 6064 goto out_unlock;
5105 } 6065 }
5106 ret = 0; 6066 ret = 0;
@@ -5127,10 +6087,10 @@ again:
5127 unlock_extent_cached(io_tree, page_start, page_end, &cached_state, GFP_NOFS); 6087 unlock_extent_cached(io_tree, page_start, page_end, &cached_state, GFP_NOFS);
5128 6088
5129out_unlock: 6089out_unlock:
5130 btrfs_unreserve_metadata_for_delalloc(root, inode, 1);
5131 if (!ret) 6090 if (!ret)
5132 return VM_FAULT_LOCKED; 6091 return VM_FAULT_LOCKED;
5133 unlock_page(page); 6092 unlock_page(page);
6093 btrfs_delalloc_release_space(inode, PAGE_CACHE_SIZE);
5134out: 6094out:
5135 return ret; 6095 return ret;
5136} 6096}
@@ -5155,8 +6115,10 @@ static void btrfs_truncate(struct inode *inode)
5155 btrfs_wait_ordered_range(inode, inode->i_size & (~mask), (u64)-1); 6115 btrfs_wait_ordered_range(inode, inode->i_size & (~mask), (u64)-1);
5156 btrfs_ordered_update_i_size(inode, inode->i_size, NULL); 6116 btrfs_ordered_update_i_size(inode, inode->i_size, NULL);
5157 6117
5158 trans = btrfs_start_transaction(root, 1); 6118 trans = btrfs_start_transaction(root, 0);
6119 BUG_ON(IS_ERR(trans));
5159 btrfs_set_trans_block_group(trans, inode); 6120 btrfs_set_trans_block_group(trans, inode);
6121 trans->block_rsv = root->orphan_block_rsv;
5160 6122
5161 /* 6123 /*
5162 * setattr is responsible for setting the ordered_data_close flag, 6124 * setattr is responsible for setting the ordered_data_close flag,
@@ -5179,6 +6141,23 @@ static void btrfs_truncate(struct inode *inode)
5179 btrfs_add_ordered_operation(trans, root, inode); 6141 btrfs_add_ordered_operation(trans, root, inode);
5180 6142
5181 while (1) { 6143 while (1) {
6144 if (!trans) {
6145 trans = btrfs_start_transaction(root, 0);
6146 BUG_ON(IS_ERR(trans));
6147 btrfs_set_trans_block_group(trans, inode);
6148 trans->block_rsv = root->orphan_block_rsv;
6149 }
6150
6151 ret = btrfs_block_rsv_check(trans, root,
6152 root->orphan_block_rsv, 0, 5);
6153 if (ret) {
6154 BUG_ON(ret != -EAGAIN);
6155 ret = btrfs_commit_transaction(trans, root);
6156 BUG_ON(ret);
6157 trans = NULL;
6158 continue;
6159 }
6160
5182 ret = btrfs_truncate_inode_items(trans, root, inode, 6161 ret = btrfs_truncate_inode_items(trans, root, inode,
5183 inode->i_size, 6162 inode->i_size,
5184 BTRFS_EXTENT_DATA_KEY); 6163 BTRFS_EXTENT_DATA_KEY);
@@ -5190,10 +6169,8 @@ static void btrfs_truncate(struct inode *inode)
5190 6169
5191 nr = trans->blocks_used; 6170 nr = trans->blocks_used;
5192 btrfs_end_transaction(trans, root); 6171 btrfs_end_transaction(trans, root);
6172 trans = NULL;
5193 btrfs_btree_balance_dirty(root, nr); 6173 btrfs_btree_balance_dirty(root, nr);
5194
5195 trans = btrfs_start_transaction(root, 1);
5196 btrfs_set_trans_block_group(trans, inode);
5197 } 6174 }
5198 6175
5199 if (ret == 0 && inode->i_nlink > 0) { 6176 if (ret == 0 && inode->i_nlink > 0) {
@@ -5254,21 +6231,47 @@ unsigned long btrfs_force_ra(struct address_space *mapping,
5254struct inode *btrfs_alloc_inode(struct super_block *sb) 6231struct inode *btrfs_alloc_inode(struct super_block *sb)
5255{ 6232{
5256 struct btrfs_inode *ei; 6233 struct btrfs_inode *ei;
6234 struct inode *inode;
5257 6235
5258 ei = kmem_cache_alloc(btrfs_inode_cachep, GFP_NOFS); 6236 ei = kmem_cache_alloc(btrfs_inode_cachep, GFP_NOFS);
5259 if (!ei) 6237 if (!ei)
5260 return NULL; 6238 return NULL;
6239
6240 ei->root = NULL;
6241 ei->space_info = NULL;
6242 ei->generation = 0;
6243 ei->sequence = 0;
5261 ei->last_trans = 0; 6244 ei->last_trans = 0;
5262 ei->last_sub_trans = 0; 6245 ei->last_sub_trans = 0;
5263 ei->logged_trans = 0; 6246 ei->logged_trans = 0;
5264 ei->outstanding_extents = 0; 6247 ei->delalloc_bytes = 0;
5265 ei->reserved_extents = 0; 6248 ei->reserved_bytes = 0;
5266 ei->root = NULL; 6249 ei->disk_i_size = 0;
6250 ei->flags = 0;
6251 ei->index_cnt = (u64)-1;
6252 ei->last_unlink_trans = 0;
6253
5267 spin_lock_init(&ei->accounting_lock); 6254 spin_lock_init(&ei->accounting_lock);
6255 atomic_set(&ei->outstanding_extents, 0);
6256 ei->reserved_extents = 0;
6257
6258 ei->ordered_data_close = 0;
6259 ei->orphan_meta_reserved = 0;
6260 ei->dummy_inode = 0;
6261 ei->force_compress = 0;
6262
6263 inode = &ei->vfs_inode;
6264 extent_map_tree_init(&ei->extent_tree, GFP_NOFS);
6265 extent_io_tree_init(&ei->io_tree, &inode->i_data, GFP_NOFS);
6266 extent_io_tree_init(&ei->io_failure_tree, &inode->i_data, GFP_NOFS);
6267 mutex_init(&ei->log_mutex);
5268 btrfs_ordered_inode_tree_init(&ei->ordered_tree); 6268 btrfs_ordered_inode_tree_init(&ei->ordered_tree);
5269 INIT_LIST_HEAD(&ei->i_orphan); 6269 INIT_LIST_HEAD(&ei->i_orphan);
6270 INIT_LIST_HEAD(&ei->delalloc_inodes);
5270 INIT_LIST_HEAD(&ei->ordered_operations); 6271 INIT_LIST_HEAD(&ei->ordered_operations);
5271 return &ei->vfs_inode; 6272 RB_CLEAR_NODE(&ei->rb_node);
6273
6274 return inode;
5272} 6275}
5273 6276
5274void btrfs_destroy_inode(struct inode *inode) 6277void btrfs_destroy_inode(struct inode *inode)
@@ -5278,6 +6281,8 @@ void btrfs_destroy_inode(struct inode *inode)
5278 6281
5279 WARN_ON(!list_empty(&inode->i_dentry)); 6282 WARN_ON(!list_empty(&inode->i_dentry));
5280 WARN_ON(inode->i_data.nrpages); 6283 WARN_ON(inode->i_data.nrpages);
6284 WARN_ON(atomic_read(&BTRFS_I(inode)->outstanding_extents));
6285 WARN_ON(BTRFS_I(inode)->reserved_extents);
5281 6286
5282 /* 6287 /*
5283 * This can happen where we create an inode, but somebody else also 6288 * This can happen where we create an inode, but somebody else also
@@ -5298,13 +6303,13 @@ void btrfs_destroy_inode(struct inode *inode)
5298 spin_unlock(&root->fs_info->ordered_extent_lock); 6303 spin_unlock(&root->fs_info->ordered_extent_lock);
5299 } 6304 }
5300 6305
5301 spin_lock(&root->list_lock); 6306 spin_lock(&root->orphan_lock);
5302 if (!list_empty(&BTRFS_I(inode)->i_orphan)) { 6307 if (!list_empty(&BTRFS_I(inode)->i_orphan)) {
5303 printk(KERN_INFO "BTRFS: inode %lu still on the orphan list\n", 6308 printk(KERN_INFO "BTRFS: inode %lu still on the orphan list\n",
5304 inode->i_ino); 6309 inode->i_ino);
5305 list_del_init(&BTRFS_I(inode)->i_orphan); 6310 list_del_init(&BTRFS_I(inode)->i_orphan);
5306 } 6311 }
5307 spin_unlock(&root->list_lock); 6312 spin_unlock(&root->orphan_lock);
5308 6313
5309 while (1) { 6314 while (1) {
5310 ordered = btrfs_lookup_first_ordered_extent(inode, (u64)-1); 6315 ordered = btrfs_lookup_first_ordered_extent(inode, (u64)-1);
@@ -5425,19 +6430,6 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
5425 if (S_ISDIR(old_inode->i_mode) && new_inode && 6430 if (S_ISDIR(old_inode->i_mode) && new_inode &&
5426 new_inode->i_size > BTRFS_EMPTY_DIR_SIZE) 6431 new_inode->i_size > BTRFS_EMPTY_DIR_SIZE)
5427 return -ENOTEMPTY; 6432 return -ENOTEMPTY;
5428
5429 /*
5430 * We want to reserve the absolute worst case amount of items. So if
5431 * both inodes are subvols and we need to unlink them then that would
5432 * require 4 item modifications, but if they are both normal inodes it
5433 * would require 5 item modifications, so we'll assume their normal
5434 * inodes. So 5 * 2 is 10, plus 1 for the new link, so 11 total items
5435 * should cover the worst case number of items we'll modify.
5436 */
5437 ret = btrfs_reserve_metadata_space(root, 11);
5438 if (ret)
5439 return ret;
5440
5441 /* 6433 /*
5442 * we're using rename to replace one file with another. 6434 * we're using rename to replace one file with another.
5443 * and the replacement file is large. Start IO on it now so 6435 * and the replacement file is large. Start IO on it now so
@@ -5450,8 +6442,18 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
5450 /* close the racy window with snapshot create/destroy ioctl */ 6442 /* close the racy window with snapshot create/destroy ioctl */
5451 if (old_inode->i_ino == BTRFS_FIRST_FREE_OBJECTID) 6443 if (old_inode->i_ino == BTRFS_FIRST_FREE_OBJECTID)
5452 down_read(&root->fs_info->subvol_sem); 6444 down_read(&root->fs_info->subvol_sem);
6445 /*
6446 * We want to reserve the absolute worst case amount of items. So if
6447 * both inodes are subvols and we need to unlink them then that would
6448 * require 4 item modifications, but if they are both normal inodes it
6449 * would require 5 item modifications, so we'll assume their normal
6450 * inodes. So 5 * 2 is 10, plus 1 for the new link, so 11 total items
6451 * should cover the worst case number of items we'll modify.
6452 */
6453 trans = btrfs_start_transaction(root, 20);
6454 if (IS_ERR(trans))
6455 return PTR_ERR(trans);
5453 6456
5454 trans = btrfs_start_transaction(root, 1);
5455 btrfs_set_trans_block_group(trans, new_dir); 6457 btrfs_set_trans_block_group(trans, new_dir);
5456 6458
5457 if (dest != root) 6459 if (dest != root)
@@ -5550,7 +6552,6 @@ out_fail:
5550 if (old_inode->i_ino == BTRFS_FIRST_FREE_OBJECTID) 6552 if (old_inode->i_ino == BTRFS_FIRST_FREE_OBJECTID)
5551 up_read(&root->fs_info->subvol_sem); 6553 up_read(&root->fs_info->subvol_sem);
5552 6554
5553 btrfs_unreserve_metadata_space(root, 11);
5554 return ret; 6555 return ret;
5555} 6556}
5556 6557
@@ -5602,6 +6603,38 @@ int btrfs_start_delalloc_inodes(struct btrfs_root *root, int delay_iput)
5602 return 0; 6603 return 0;
5603} 6604}
5604 6605
6606int btrfs_start_one_delalloc_inode(struct btrfs_root *root, int delay_iput)
6607{
6608 struct btrfs_inode *binode;
6609 struct inode *inode = NULL;
6610
6611 spin_lock(&root->fs_info->delalloc_lock);
6612 while (!list_empty(&root->fs_info->delalloc_inodes)) {
6613 binode = list_entry(root->fs_info->delalloc_inodes.next,
6614 struct btrfs_inode, delalloc_inodes);
6615 inode = igrab(&binode->vfs_inode);
6616 if (inode) {
6617 list_move_tail(&binode->delalloc_inodes,
6618 &root->fs_info->delalloc_inodes);
6619 break;
6620 }
6621
6622 list_del_init(&binode->delalloc_inodes);
6623 cond_resched_lock(&root->fs_info->delalloc_lock);
6624 }
6625 spin_unlock(&root->fs_info->delalloc_lock);
6626
6627 if (inode) {
6628 write_inode_now(inode, 0);
6629 if (delay_iput)
6630 btrfs_add_delayed_iput(inode);
6631 else
6632 iput(inode);
6633 return 1;
6634 }
6635 return 0;
6636}
6637
5605static int btrfs_symlink(struct inode *dir, struct dentry *dentry, 6638static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
5606 const char *symname) 6639 const char *symname)
5607{ 6640{
@@ -5625,26 +6658,20 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
5625 if (name_len > BTRFS_MAX_INLINE_DATA_SIZE(root)) 6658 if (name_len > BTRFS_MAX_INLINE_DATA_SIZE(root))
5626 return -ENAMETOOLONG; 6659 return -ENAMETOOLONG;
5627 6660
6661 err = btrfs_find_free_objectid(NULL, root, dir->i_ino, &objectid);
6662 if (err)
6663 return err;
5628 /* 6664 /*
5629 * 2 items for inode item and ref 6665 * 2 items for inode item and ref
5630 * 2 items for dir items 6666 * 2 items for dir items
5631 * 1 item for xattr if selinux is on 6667 * 1 item for xattr if selinux is on
5632 */ 6668 */
5633 err = btrfs_reserve_metadata_space(root, 5); 6669 trans = btrfs_start_transaction(root, 5);
5634 if (err) 6670 if (IS_ERR(trans))
5635 return err; 6671 return PTR_ERR(trans);
5636 6672
5637 trans = btrfs_start_transaction(root, 1);
5638 if (!trans)
5639 goto out_fail;
5640 btrfs_set_trans_block_group(trans, dir); 6673 btrfs_set_trans_block_group(trans, dir);
5641 6674
5642 err = btrfs_find_free_objectid(trans, root, dir->i_ino, &objectid);
5643 if (err) {
5644 err = -ENOSPC;
5645 goto out_unlock;
5646 }
5647
5648 inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name, 6675 inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
5649 dentry->d_name.len, 6676 dentry->d_name.len,
5650 dentry->d_parent->d_inode->i_ino, objectid, 6677 dentry->d_parent->d_inode->i_ino, objectid,
@@ -5716,8 +6743,6 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
5716out_unlock: 6743out_unlock:
5717 nr = trans->blocks_used; 6744 nr = trans->blocks_used;
5718 btrfs_end_transaction_throttle(trans, root); 6745 btrfs_end_transaction_throttle(trans, root);
5719out_fail:
5720 btrfs_unreserve_metadata_space(root, 5);
5721 if (drop_inode) { 6746 if (drop_inode) {
5722 inode_dec_link_count(inode); 6747 inode_dec_link_count(inode);
5723 iput(inode); 6748 iput(inode);
@@ -5726,33 +6751,28 @@ out_fail:
5726 return err; 6751 return err;
5727} 6752}
5728 6753
5729static int prealloc_file_range(struct inode *inode, u64 start, u64 end, 6754int btrfs_prealloc_file_range(struct inode *inode, int mode,
5730 u64 alloc_hint, int mode, loff_t actual_len) 6755 u64 start, u64 num_bytes, u64 min_size,
6756 loff_t actual_len, u64 *alloc_hint)
5731{ 6757{
5732 struct btrfs_trans_handle *trans; 6758 struct btrfs_trans_handle *trans;
5733 struct btrfs_root *root = BTRFS_I(inode)->root; 6759 struct btrfs_root *root = BTRFS_I(inode)->root;
5734 struct btrfs_key ins; 6760 struct btrfs_key ins;
5735 u64 cur_offset = start; 6761 u64 cur_offset = start;
5736 u64 num_bytes = end - start;
5737 int ret = 0; 6762 int ret = 0;
5738 u64 i_size;
5739 6763
5740 while (num_bytes > 0) { 6764 while (num_bytes > 0) {
5741 trans = btrfs_start_transaction(root, 1); 6765 trans = btrfs_start_transaction(root, 3);
5742 6766 if (IS_ERR(trans)) {
5743 ret = btrfs_reserve_extent(trans, root, num_bytes, 6767 ret = PTR_ERR(trans);
5744 root->sectorsize, 0, alloc_hint, 6768 break;
5745 (u64)-1, &ins, 1);
5746 if (ret) {
5747 WARN_ON(1);
5748 goto stop_trans;
5749 } 6769 }
5750 6770
5751 ret = btrfs_reserve_metadata_space(root, 3); 6771 ret = btrfs_reserve_extent(trans, root, num_bytes, min_size,
6772 0, *alloc_hint, (u64)-1, &ins, 1);
5752 if (ret) { 6773 if (ret) {
5753 btrfs_free_reserved_extent(root, ins.objectid, 6774 btrfs_end_transaction(trans, root);
5754 ins.offset); 6775 break;
5755 goto stop_trans;
5756 } 6776 }
5757 6777
5758 ret = insert_reserved_file_extent(trans, inode, 6778 ret = insert_reserved_file_extent(trans, inode,
@@ -5766,34 +6786,27 @@ static int prealloc_file_range(struct inode *inode, u64 start, u64 end,
5766 6786
5767 num_bytes -= ins.offset; 6787 num_bytes -= ins.offset;
5768 cur_offset += ins.offset; 6788 cur_offset += ins.offset;
5769 alloc_hint = ins.objectid + ins.offset; 6789 *alloc_hint = ins.objectid + ins.offset;
5770 6790
5771 inode->i_ctime = CURRENT_TIME; 6791 inode->i_ctime = CURRENT_TIME;
5772 BTRFS_I(inode)->flags |= BTRFS_INODE_PREALLOC; 6792 BTRFS_I(inode)->flags |= BTRFS_INODE_PREALLOC;
5773 if (!(mode & FALLOC_FL_KEEP_SIZE) && 6793 if (!(mode & FALLOC_FL_KEEP_SIZE) &&
5774 (actual_len > inode->i_size) && 6794 (actual_len > inode->i_size) &&
5775 (cur_offset > inode->i_size)) { 6795 (cur_offset > inode->i_size)) {
5776
5777 if (cur_offset > actual_len) 6796 if (cur_offset > actual_len)
5778 i_size = actual_len; 6797 i_size_write(inode, actual_len);
5779 else 6798 else
5780 i_size = cur_offset; 6799 i_size_write(inode, cur_offset);
5781 i_size_write(inode, i_size); 6800 i_size_write(inode, cur_offset);
5782 btrfs_ordered_update_i_size(inode, i_size, NULL); 6801 btrfs_ordered_update_i_size(inode, cur_offset, NULL);
5783 } 6802 }
5784 6803
5785 ret = btrfs_update_inode(trans, root, inode); 6804 ret = btrfs_update_inode(trans, root, inode);
5786 BUG_ON(ret); 6805 BUG_ON(ret);
5787 6806
5788 btrfs_end_transaction(trans, root); 6807 btrfs_end_transaction(trans, root);
5789 btrfs_unreserve_metadata_space(root, 3);
5790 } 6808 }
5791 return ret; 6809 return ret;
5792
5793stop_trans:
5794 btrfs_end_transaction(trans, root);
5795 return ret;
5796
5797} 6810}
5798 6811
5799static long btrfs_fallocate(struct inode *inode, int mode, 6812static long btrfs_fallocate(struct inode *inode, int mode,
@@ -5826,8 +6839,7 @@ static long btrfs_fallocate(struct inode *inode, int mode,
5826 goto out; 6839 goto out;
5827 } 6840 }
5828 6841
5829 ret = btrfs_check_data_free_space(BTRFS_I(inode)->root, inode, 6842 ret = btrfs_check_data_free_space(inode, alloc_end - alloc_start);
5830 alloc_end - alloc_start);
5831 if (ret) 6843 if (ret)
5832 goto out; 6844 goto out;
5833 6845
@@ -5872,16 +6884,16 @@ static long btrfs_fallocate(struct inode *inode, int mode,
5872 if (em->block_start == EXTENT_MAP_HOLE || 6884 if (em->block_start == EXTENT_MAP_HOLE ||
5873 (cur_offset >= inode->i_size && 6885 (cur_offset >= inode->i_size &&
5874 !test_bit(EXTENT_FLAG_PREALLOC, &em->flags))) { 6886 !test_bit(EXTENT_FLAG_PREALLOC, &em->flags))) {
5875 ret = prealloc_file_range(inode, 6887 ret = btrfs_prealloc_file_range(inode, 0, cur_offset,
5876 cur_offset, last_byte, 6888 last_byte - cur_offset,
5877 alloc_hint, mode, offset+len); 6889 1 << inode->i_blkbits,
6890 offset + len,
6891 &alloc_hint);
5878 if (ret < 0) { 6892 if (ret < 0) {
5879 free_extent_map(em); 6893 free_extent_map(em);
5880 break; 6894 break;
5881 } 6895 }
5882 } 6896 }
5883 if (em->block_start <= EXTENT_MAP_LAST_BYTE)
5884 alloc_hint = em->block_start;
5885 free_extent_map(em); 6897 free_extent_map(em);
5886 6898
5887 cur_offset = last_byte; 6899 cur_offset = last_byte;
@@ -5893,8 +6905,7 @@ static long btrfs_fallocate(struct inode *inode, int mode,
5893 unlock_extent_cached(&BTRFS_I(inode)->io_tree, alloc_start, locked_end, 6905 unlock_extent_cached(&BTRFS_I(inode)->io_tree, alloc_start, locked_end,
5894 &cached_state, GFP_NOFS); 6906 &cached_state, GFP_NOFS);
5895 6907
5896 btrfs_free_reserved_data_space(BTRFS_I(inode)->root, inode, 6908 btrfs_free_reserved_data_space(inode, alloc_end - alloc_start);
5897 alloc_end - alloc_start);
5898out: 6909out:
5899 mutex_unlock(&inode->i_mutex); 6910 mutex_unlock(&inode->i_mutex);
5900 return ret; 6911 return ret;
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index 97a97839a867..4cdb98cf26de 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -239,23 +239,19 @@ static noinline int create_subvol(struct btrfs_root *root,
239 u64 new_dirid = BTRFS_FIRST_FREE_OBJECTID; 239 u64 new_dirid = BTRFS_FIRST_FREE_OBJECTID;
240 u64 index = 0; 240 u64 index = 0;
241 241
242 ret = btrfs_find_free_objectid(NULL, root->fs_info->tree_root,
243 0, &objectid);
244 if (ret)
245 return ret;
242 /* 246 /*
243 * 1 - inode item 247 * 1 - inode item
244 * 2 - refs 248 * 2 - refs
245 * 1 - root item 249 * 1 - root item
246 * 2 - dir items 250 * 2 - dir items
247 */ 251 */
248 ret = btrfs_reserve_metadata_space(root, 6); 252 trans = btrfs_start_transaction(root, 6);
249 if (ret) 253 if (IS_ERR(trans))
250 return ret; 254 return PTR_ERR(trans);
251
252 trans = btrfs_start_transaction(root, 1);
253 BUG_ON(!trans);
254
255 ret = btrfs_find_free_objectid(trans, root->fs_info->tree_root,
256 0, &objectid);
257 if (ret)
258 goto fail;
259 255
260 leaf = btrfs_alloc_free_block(trans, root, root->leafsize, 256 leaf = btrfs_alloc_free_block(trans, root, root->leafsize,
261 0, objectid, NULL, 0, 0, 0); 257 0, objectid, NULL, 0, 0, 0);
@@ -345,13 +341,10 @@ fail:
345 err = btrfs_commit_transaction(trans, root); 341 err = btrfs_commit_transaction(trans, root);
346 if (err && !ret) 342 if (err && !ret)
347 ret = err; 343 ret = err;
348
349 btrfs_unreserve_metadata_space(root, 6);
350 return ret; 344 return ret;
351} 345}
352 346
353static int create_snapshot(struct btrfs_root *root, struct dentry *dentry, 347static int create_snapshot(struct btrfs_root *root, struct dentry *dentry)
354 char *name, int namelen)
355{ 348{
356 struct inode *inode; 349 struct inode *inode;
357 struct btrfs_pending_snapshot *pending_snapshot; 350 struct btrfs_pending_snapshot *pending_snapshot;
@@ -361,40 +354,33 @@ static int create_snapshot(struct btrfs_root *root, struct dentry *dentry,
361 if (!root->ref_cows) 354 if (!root->ref_cows)
362 return -EINVAL; 355 return -EINVAL;
363 356
364 /*
365 * 1 - inode item
366 * 2 - refs
367 * 1 - root item
368 * 2 - dir items
369 */
370 ret = btrfs_reserve_metadata_space(root, 6);
371 if (ret)
372 goto fail;
373
374 pending_snapshot = kzalloc(sizeof(*pending_snapshot), GFP_NOFS); 357 pending_snapshot = kzalloc(sizeof(*pending_snapshot), GFP_NOFS);
375 if (!pending_snapshot) { 358 if (!pending_snapshot)
376 ret = -ENOMEM; 359 return -ENOMEM;
377 btrfs_unreserve_metadata_space(root, 6); 360
378 goto fail; 361 btrfs_init_block_rsv(&pending_snapshot->block_rsv);
379 }
380 pending_snapshot->name = kmalloc(namelen + 1, GFP_NOFS);
381 if (!pending_snapshot->name) {
382 ret = -ENOMEM;
383 kfree(pending_snapshot);
384 btrfs_unreserve_metadata_space(root, 6);
385 goto fail;
386 }
387 memcpy(pending_snapshot->name, name, namelen);
388 pending_snapshot->name[namelen] = '\0';
389 pending_snapshot->dentry = dentry; 362 pending_snapshot->dentry = dentry;
390 trans = btrfs_start_transaction(root, 1);
391 BUG_ON(!trans);
392 pending_snapshot->root = root; 363 pending_snapshot->root = root;
364
365 trans = btrfs_start_transaction(root->fs_info->extent_root, 5);
366 if (IS_ERR(trans)) {
367 ret = PTR_ERR(trans);
368 goto fail;
369 }
370
371 ret = btrfs_snap_reserve_metadata(trans, pending_snapshot);
372 BUG_ON(ret);
373
393 list_add(&pending_snapshot->list, 374 list_add(&pending_snapshot->list,
394 &trans->transaction->pending_snapshots); 375 &trans->transaction->pending_snapshots);
395 ret = btrfs_commit_transaction(trans, root); 376 ret = btrfs_commit_transaction(trans, root->fs_info->extent_root);
396 BUG_ON(ret); 377 BUG_ON(ret);
397 btrfs_unreserve_metadata_space(root, 6); 378
379 ret = pending_snapshot->error;
380 if (ret)
381 goto fail;
382
383 btrfs_orphan_cleanup(pending_snapshot->snap);
398 384
399 inode = btrfs_lookup_dentry(dentry->d_parent->d_inode, dentry); 385 inode = btrfs_lookup_dentry(dentry->d_parent->d_inode, dentry);
400 if (IS_ERR(inode)) { 386 if (IS_ERR(inode)) {
@@ -405,6 +391,7 @@ static int create_snapshot(struct btrfs_root *root, struct dentry *dentry,
405 d_instantiate(dentry, inode); 391 d_instantiate(dentry, inode);
406 ret = 0; 392 ret = 0;
407fail: 393fail:
394 kfree(pending_snapshot);
408 return ret; 395 return ret;
409} 396}
410 397
@@ -456,8 +443,7 @@ static noinline int btrfs_mksubvol(struct path *parent,
456 goto out_up_read; 443 goto out_up_read;
457 444
458 if (snap_src) { 445 if (snap_src) {
459 error = create_snapshot(snap_src, dentry, 446 error = create_snapshot(snap_src, dentry);
460 name, namelen);
461 } else { 447 } else {
462 error = create_subvol(BTRFS_I(dir)->root, dentry, 448 error = create_subvol(BTRFS_I(dir)->root, dentry,
463 name, namelen); 449 name, namelen);
@@ -601,19 +587,9 @@ static int btrfs_defrag_file(struct file *file,
601 if (range->flags & BTRFS_DEFRAG_RANGE_COMPRESS) 587 if (range->flags & BTRFS_DEFRAG_RANGE_COMPRESS)
602 BTRFS_I(inode)->force_compress = 1; 588 BTRFS_I(inode)->force_compress = 1;
603 589
604 ret = btrfs_check_data_free_space(root, inode, PAGE_CACHE_SIZE); 590 ret = btrfs_delalloc_reserve_space(inode, PAGE_CACHE_SIZE);
605 if (ret) { 591 if (ret)
606 ret = -ENOSPC; 592 goto err_unlock;
607 break;
608 }
609
610 ret = btrfs_reserve_metadata_for_delalloc(root, inode, 1);
611 if (ret) {
612 btrfs_free_reserved_data_space(root, inode,
613 PAGE_CACHE_SIZE);
614 ret = -ENOSPC;
615 break;
616 }
617again: 593again:
618 if (inode->i_size == 0 || 594 if (inode->i_size == 0 ||
619 i > ((inode->i_size - 1) >> PAGE_CACHE_SHIFT)) { 595 i > ((inode->i_size - 1) >> PAGE_CACHE_SHIFT)) {
@@ -622,8 +598,10 @@ again:
622 } 598 }
623 599
624 page = grab_cache_page(inode->i_mapping, i); 600 page = grab_cache_page(inode->i_mapping, i);
625 if (!page) 601 if (!page) {
602 ret = -ENOMEM;
626 goto err_reservations; 603 goto err_reservations;
604 }
627 605
628 if (!PageUptodate(page)) { 606 if (!PageUptodate(page)) {
629 btrfs_readpage(NULL, page); 607 btrfs_readpage(NULL, page);
@@ -631,6 +609,7 @@ again:
631 if (!PageUptodate(page)) { 609 if (!PageUptodate(page)) {
632 unlock_page(page); 610 unlock_page(page);
633 page_cache_release(page); 611 page_cache_release(page);
612 ret = -EIO;
634 goto err_reservations; 613 goto err_reservations;
635 } 614 }
636 } 615 }
@@ -644,8 +623,7 @@ again:
644 wait_on_page_writeback(page); 623 wait_on_page_writeback(page);
645 624
646 if (PageDirty(page)) { 625 if (PageDirty(page)) {
647 btrfs_free_reserved_data_space(root, inode, 626 btrfs_delalloc_release_space(inode, PAGE_CACHE_SIZE);
648 PAGE_CACHE_SIZE);
649 goto loop_unlock; 627 goto loop_unlock;
650 } 628 }
651 629
@@ -683,7 +661,6 @@ loop_unlock:
683 page_cache_release(page); 661 page_cache_release(page);
684 mutex_unlock(&inode->i_mutex); 662 mutex_unlock(&inode->i_mutex);
685 663
686 btrfs_unreserve_metadata_for_delalloc(root, inode, 1);
687 balance_dirty_pages_ratelimited_nr(inode->i_mapping, 1); 664 balance_dirty_pages_ratelimited_nr(inode->i_mapping, 1);
688 i++; 665 i++;
689 } 666 }
@@ -713,9 +690,9 @@ loop_unlock:
713 return 0; 690 return 0;
714 691
715err_reservations: 692err_reservations:
693 btrfs_delalloc_release_space(inode, PAGE_CACHE_SIZE);
694err_unlock:
716 mutex_unlock(&inode->i_mutex); 695 mutex_unlock(&inode->i_mutex);
717 btrfs_free_reserved_data_space(root, inode, PAGE_CACHE_SIZE);
718 btrfs_unreserve_metadata_for_delalloc(root, inode, 1);
719 return ret; 696 return ret;
720} 697}
721 698
@@ -811,7 +788,7 @@ static noinline int btrfs_ioctl_resize(struct btrfs_root *root,
811 device->name, (unsigned long long)new_size); 788 device->name, (unsigned long long)new_size);
812 789
813 if (new_size > old_size) { 790 if (new_size > old_size) {
814 trans = btrfs_start_transaction(root, 1); 791 trans = btrfs_start_transaction(root, 0);
815 ret = btrfs_grow_device(trans, device, new_size); 792 ret = btrfs_grow_device(trans, device, new_size);
816 btrfs_commit_transaction(trans, root); 793 btrfs_commit_transaction(trans, root);
817 } else { 794 } else {
@@ -1300,7 +1277,13 @@ static noinline int btrfs_ioctl_snap_destroy(struct file *file,
1300 if (err) 1277 if (err)
1301 goto out_up_write; 1278 goto out_up_write;
1302 1279
1303 trans = btrfs_start_transaction(root, 1); 1280 trans = btrfs_start_transaction(root, 0);
1281 if (IS_ERR(trans)) {
1282 err = PTR_ERR(trans);
1283 goto out;
1284 }
1285 trans->block_rsv = &root->fs_info->global_block_rsv;
1286
1304 ret = btrfs_unlink_subvol(trans, root, dir, 1287 ret = btrfs_unlink_subvol(trans, root, dir,
1305 dest->root_key.objectid, 1288 dest->root_key.objectid,
1306 dentry->d_name.name, 1289 dentry->d_name.name,
@@ -1314,10 +1297,12 @@ static noinline int btrfs_ioctl_snap_destroy(struct file *file,
1314 dest->root_item.drop_level = 0; 1297 dest->root_item.drop_level = 0;
1315 btrfs_set_root_refs(&dest->root_item, 0); 1298 btrfs_set_root_refs(&dest->root_item, 0);
1316 1299
1317 ret = btrfs_insert_orphan_item(trans, 1300 if (!xchg(&dest->orphan_item_inserted, 1)) {
1318 root->fs_info->tree_root, 1301 ret = btrfs_insert_orphan_item(trans,
1319 dest->root_key.objectid); 1302 root->fs_info->tree_root,
1320 BUG_ON(ret); 1303 dest->root_key.objectid);
1304 BUG_ON(ret);
1305 }
1321 1306
1322 ret = btrfs_commit_transaction(trans, root); 1307 ret = btrfs_commit_transaction(trans, root);
1323 BUG_ON(ret); 1308 BUG_ON(ret);
@@ -1358,8 +1343,10 @@ static int btrfs_ioctl_defrag(struct file *file, void __user *argp)
1358 ret = -EPERM; 1343 ret = -EPERM;
1359 goto out; 1344 goto out;
1360 } 1345 }
1361 btrfs_defrag_root(root, 0); 1346 ret = btrfs_defrag_root(root, 0);
1362 btrfs_defrag_root(root->fs_info->extent_root, 0); 1347 if (ret)
1348 goto out;
1349 ret = btrfs_defrag_root(root->fs_info->extent_root, 0);
1363 break; 1350 break;
1364 case S_IFREG: 1351 case S_IFREG:
1365 if (!(file->f_mode & FMODE_WRITE)) { 1352 if (!(file->f_mode & FMODE_WRITE)) {
@@ -1389,9 +1376,11 @@ static int btrfs_ioctl_defrag(struct file *file, void __user *argp)
1389 /* the rest are all set to zero by kzalloc */ 1376 /* the rest are all set to zero by kzalloc */
1390 range->len = (u64)-1; 1377 range->len = (u64)-1;
1391 } 1378 }
1392 btrfs_defrag_file(file, range); 1379 ret = btrfs_defrag_file(file, range);
1393 kfree(range); 1380 kfree(range);
1394 break; 1381 break;
1382 default:
1383 ret = -EINVAL;
1395 } 1384 }
1396out: 1385out:
1397 mnt_drop_write(file->f_path.mnt); 1386 mnt_drop_write(file->f_path.mnt);
@@ -1550,12 +1539,6 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,
1550 btrfs_wait_ordered_range(src, off, off+len); 1539 btrfs_wait_ordered_range(src, off, off+len);
1551 } 1540 }
1552 1541
1553 trans = btrfs_start_transaction(root, 1);
1554 BUG_ON(!trans);
1555
1556 /* punch hole in destination first */
1557 btrfs_drop_extents(trans, inode, off, off + len, &hint_byte, 1);
1558
1559 /* clone data */ 1542 /* clone data */
1560 key.objectid = src->i_ino; 1543 key.objectid = src->i_ino;
1561 key.type = BTRFS_EXTENT_DATA_KEY; 1544 key.type = BTRFS_EXTENT_DATA_KEY;
@@ -1566,7 +1549,7 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,
1566 * note the key will change type as we walk through the 1549 * note the key will change type as we walk through the
1567 * tree. 1550 * tree.
1568 */ 1551 */
1569 ret = btrfs_search_slot(trans, root, &key, path, 0, 0); 1552 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1570 if (ret < 0) 1553 if (ret < 0)
1571 goto out; 1554 goto out;
1572 1555
@@ -1629,12 +1612,31 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,
1629 new_key.objectid = inode->i_ino; 1612 new_key.objectid = inode->i_ino;
1630 new_key.offset = key.offset + destoff - off; 1613 new_key.offset = key.offset + destoff - off;
1631 1614
1615 trans = btrfs_start_transaction(root, 1);
1616 if (IS_ERR(trans)) {
1617 ret = PTR_ERR(trans);
1618 goto out;
1619 }
1620
1632 if (type == BTRFS_FILE_EXTENT_REG || 1621 if (type == BTRFS_FILE_EXTENT_REG ||
1633 type == BTRFS_FILE_EXTENT_PREALLOC) { 1622 type == BTRFS_FILE_EXTENT_PREALLOC) {
1623 if (off > key.offset) {
1624 datao += off - key.offset;
1625 datal -= off - key.offset;
1626 }
1627
1628 if (key.offset + datal > off + len)
1629 datal = off + len - key.offset;
1630
1631 ret = btrfs_drop_extents(trans, inode,
1632 new_key.offset,
1633 new_key.offset + datal,
1634 &hint_byte, 1);
1635 BUG_ON(ret);
1636
1634 ret = btrfs_insert_empty_item(trans, root, path, 1637 ret = btrfs_insert_empty_item(trans, root, path,
1635 &new_key, size); 1638 &new_key, size);
1636 if (ret) 1639 BUG_ON(ret);
1637 goto out;
1638 1640
1639 leaf = path->nodes[0]; 1641 leaf = path->nodes[0];
1640 slot = path->slots[0]; 1642 slot = path->slots[0];
@@ -1645,14 +1647,6 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,
1645 extent = btrfs_item_ptr(leaf, slot, 1647 extent = btrfs_item_ptr(leaf, slot,
1646 struct btrfs_file_extent_item); 1648 struct btrfs_file_extent_item);
1647 1649
1648 if (off > key.offset) {
1649 datao += off - key.offset;
1650 datal -= off - key.offset;
1651 }
1652
1653 if (key.offset + datal > off + len)
1654 datal = off + len - key.offset;
1655
1656 /* disko == 0 means it's a hole */ 1650 /* disko == 0 means it's a hole */
1657 if (!disko) 1651 if (!disko)
1658 datao = 0; 1652 datao = 0;
@@ -1683,14 +1677,21 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,
1683 1677
1684 if (comp && (skip || trim)) { 1678 if (comp && (skip || trim)) {
1685 ret = -EINVAL; 1679 ret = -EINVAL;
1680 btrfs_end_transaction(trans, root);
1686 goto out; 1681 goto out;
1687 } 1682 }
1688 size -= skip + trim; 1683 size -= skip + trim;
1689 datal -= skip + trim; 1684 datal -= skip + trim;
1685
1686 ret = btrfs_drop_extents(trans, inode,
1687 new_key.offset,
1688 new_key.offset + datal,
1689 &hint_byte, 1);
1690 BUG_ON(ret);
1691
1690 ret = btrfs_insert_empty_item(trans, root, path, 1692 ret = btrfs_insert_empty_item(trans, root, path,
1691 &new_key, size); 1693 &new_key, size);
1692 if (ret) 1694 BUG_ON(ret);
1693 goto out;
1694 1695
1695 if (skip) { 1696 if (skip) {
1696 u32 start = 1697 u32 start =
@@ -1708,8 +1709,17 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,
1708 } 1709 }
1709 1710
1710 btrfs_mark_buffer_dirty(leaf); 1711 btrfs_mark_buffer_dirty(leaf);
1711 } 1712 btrfs_release_path(root, path);
1712 1713
1714 inode->i_mtime = inode->i_ctime = CURRENT_TIME;
1715 if (new_key.offset + datal > inode->i_size)
1716 btrfs_i_size_write(inode,
1717 new_key.offset + datal);
1718 BTRFS_I(inode)->flags = BTRFS_I(src)->flags;
1719 ret = btrfs_update_inode(trans, root, inode);
1720 BUG_ON(ret);
1721 btrfs_end_transaction(trans, root);
1722 }
1713next: 1723next:
1714 btrfs_release_path(root, path); 1724 btrfs_release_path(root, path);
1715 key.offset++; 1725 key.offset++;
@@ -1717,17 +1727,7 @@ next:
1717 ret = 0; 1727 ret = 0;
1718out: 1728out:
1719 btrfs_release_path(root, path); 1729 btrfs_release_path(root, path);
1720 if (ret == 0) {
1721 inode->i_mtime = inode->i_ctime = CURRENT_TIME;
1722 if (destoff + olen > inode->i_size)
1723 btrfs_i_size_write(inode, destoff + olen);
1724 BTRFS_I(inode)->flags = BTRFS_I(src)->flags;
1725 ret = btrfs_update_inode(trans, root, inode);
1726 }
1727 btrfs_end_transaction(trans, root);
1728 unlock_extent(&BTRFS_I(src)->io_tree, off, off+len, GFP_NOFS); 1730 unlock_extent(&BTRFS_I(src)->io_tree, off, off+len, GFP_NOFS);
1729 if (ret)
1730 vmtruncate(inode, 0);
1731out_unlock: 1731out_unlock:
1732 mutex_unlock(&src->i_mutex); 1732 mutex_unlock(&src->i_mutex);
1733 mutex_unlock(&inode->i_mutex); 1733 mutex_unlock(&inode->i_mutex);
diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c
index a127c0ebb2dc..e56c72bc5add 100644
--- a/fs/btrfs/ordered-data.c
+++ b/fs/btrfs/ordered-data.c
@@ -124,6 +124,15 @@ static int offset_in_entry(struct btrfs_ordered_extent *entry, u64 file_offset)
124 return 1; 124 return 1;
125} 125}
126 126
127static int range_overlaps(struct btrfs_ordered_extent *entry, u64 file_offset,
128 u64 len)
129{
130 if (file_offset + len <= entry->file_offset ||
131 entry->file_offset + entry->len <= file_offset)
132 return 0;
133 return 1;
134}
135
127/* 136/*
128 * look find the first ordered struct that has this offset, otherwise 137 * look find the first ordered struct that has this offset, otherwise
129 * the first one less than this offset 138 * the first one less than this offset
@@ -161,8 +170,9 @@ static inline struct rb_node *tree_search(struct btrfs_ordered_inode_tree *tree,
161 * The tree is given a single reference on the ordered extent that was 170 * The tree is given a single reference on the ordered extent that was
162 * inserted. 171 * inserted.
163 */ 172 */
164int btrfs_add_ordered_extent(struct inode *inode, u64 file_offset, 173static int __btrfs_add_ordered_extent(struct inode *inode, u64 file_offset,
165 u64 start, u64 len, u64 disk_len, int type) 174 u64 start, u64 len, u64 disk_len,
175 int type, int dio)
166{ 176{
167 struct btrfs_ordered_inode_tree *tree; 177 struct btrfs_ordered_inode_tree *tree;
168 struct rb_node *node; 178 struct rb_node *node;
@@ -182,6 +192,9 @@ int btrfs_add_ordered_extent(struct inode *inode, u64 file_offset,
182 if (type != BTRFS_ORDERED_IO_DONE && type != BTRFS_ORDERED_COMPLETE) 192 if (type != BTRFS_ORDERED_IO_DONE && type != BTRFS_ORDERED_COMPLETE)
183 set_bit(type, &entry->flags); 193 set_bit(type, &entry->flags);
184 194
195 if (dio)
196 set_bit(BTRFS_ORDERED_DIRECT, &entry->flags);
197
185 /* one ref for the tree */ 198 /* one ref for the tree */
186 atomic_set(&entry->refs, 1); 199 atomic_set(&entry->refs, 1);
187 init_waitqueue_head(&entry->wait); 200 init_waitqueue_head(&entry->wait);
@@ -203,6 +216,20 @@ int btrfs_add_ordered_extent(struct inode *inode, u64 file_offset,
203 return 0; 216 return 0;
204} 217}
205 218
219int btrfs_add_ordered_extent(struct inode *inode, u64 file_offset,
220 u64 start, u64 len, u64 disk_len, int type)
221{
222 return __btrfs_add_ordered_extent(inode, file_offset, start, len,
223 disk_len, type, 0);
224}
225
226int btrfs_add_ordered_extent_dio(struct inode *inode, u64 file_offset,
227 u64 start, u64 len, u64 disk_len, int type)
228{
229 return __btrfs_add_ordered_extent(inode, file_offset, start, len,
230 disk_len, type, 1);
231}
232
206/* 233/*
207 * Add a struct btrfs_ordered_sum into the list of checksums to be inserted 234 * Add a struct btrfs_ordered_sum into the list of checksums to be inserted
208 * when an ordered extent is finished. If the list covers more than one 235 * when an ordered extent is finished. If the list covers more than one
@@ -311,13 +338,6 @@ static int __btrfs_remove_ordered_extent(struct inode *inode,
311 tree->last = NULL; 338 tree->last = NULL;
312 set_bit(BTRFS_ORDERED_COMPLETE, &entry->flags); 339 set_bit(BTRFS_ORDERED_COMPLETE, &entry->flags);
313 340
314 spin_lock(&BTRFS_I(inode)->accounting_lock);
315 WARN_ON(!BTRFS_I(inode)->outstanding_extents);
316 BTRFS_I(inode)->outstanding_extents--;
317 spin_unlock(&BTRFS_I(inode)->accounting_lock);
318 btrfs_unreserve_metadata_for_delalloc(BTRFS_I(inode)->root,
319 inode, 1);
320
321 spin_lock(&root->fs_info->ordered_extent_lock); 341 spin_lock(&root->fs_info->ordered_extent_lock);
322 list_del_init(&entry->root_extent_list); 342 list_del_init(&entry->root_extent_list);
323 343
@@ -491,7 +511,8 @@ void btrfs_start_ordered_extent(struct inode *inode,
491 * start IO on any dirty ones so the wait doesn't stall waiting 511 * start IO on any dirty ones so the wait doesn't stall waiting
492 * for pdflush to find them 512 * for pdflush to find them
493 */ 513 */
494 filemap_fdatawrite_range(inode->i_mapping, start, end); 514 if (!test_bit(BTRFS_ORDERED_DIRECT, &entry->flags))
515 filemap_fdatawrite_range(inode->i_mapping, start, end);
495 if (wait) { 516 if (wait) {
496 wait_event(entry->wait, test_bit(BTRFS_ORDERED_COMPLETE, 517 wait_event(entry->wait, test_bit(BTRFS_ORDERED_COMPLETE,
497 &entry->flags)); 518 &entry->flags));
@@ -588,6 +609,47 @@ out:
588 return entry; 609 return entry;
589} 610}
590 611
612/* Since the DIO code tries to lock a wide area we need to look for any ordered
613 * extents that exist in the range, rather than just the start of the range.
614 */
615struct btrfs_ordered_extent *btrfs_lookup_ordered_range(struct inode *inode,
616 u64 file_offset,
617 u64 len)
618{
619 struct btrfs_ordered_inode_tree *tree;
620 struct rb_node *node;
621 struct btrfs_ordered_extent *entry = NULL;
622
623 tree = &BTRFS_I(inode)->ordered_tree;
624 spin_lock(&tree->lock);
625 node = tree_search(tree, file_offset);
626 if (!node) {
627 node = tree_search(tree, file_offset + len);
628 if (!node)
629 goto out;
630 }
631
632 while (1) {
633 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
634 if (range_overlaps(entry, file_offset, len))
635 break;
636
637 if (entry->file_offset >= file_offset + len) {
638 entry = NULL;
639 break;
640 }
641 entry = NULL;
642 node = rb_next(node);
643 if (!node)
644 break;
645 }
646out:
647 if (entry)
648 atomic_inc(&entry->refs);
649 spin_unlock(&tree->lock);
650 return entry;
651}
652
591/* 653/*
592 * lookup and return any extent before 'file_offset'. NULL is returned 654 * lookup and return any extent before 'file_offset'. NULL is returned
593 * if none is found 655 * if none is found
diff --git a/fs/btrfs/ordered-data.h b/fs/btrfs/ordered-data.h
index c82f76a9f040..8ac365492a3f 100644
--- a/fs/btrfs/ordered-data.h
+++ b/fs/btrfs/ordered-data.h
@@ -72,6 +72,8 @@ struct btrfs_ordered_sum {
72 72
73#define BTRFS_ORDERED_PREALLOC 4 /* set when writing to prealloced extent */ 73#define BTRFS_ORDERED_PREALLOC 4 /* set when writing to prealloced extent */
74 74
75#define BTRFS_ORDERED_DIRECT 5 /* set when we're doing DIO with this extent */
76
75struct btrfs_ordered_extent { 77struct btrfs_ordered_extent {
76 /* logical offset in the file */ 78 /* logical offset in the file */
77 u64 file_offset; 79 u64 file_offset;
@@ -140,7 +142,9 @@ int btrfs_dec_test_ordered_pending(struct inode *inode,
140 struct btrfs_ordered_extent **cached, 142 struct btrfs_ordered_extent **cached,
141 u64 file_offset, u64 io_size); 143 u64 file_offset, u64 io_size);
142int btrfs_add_ordered_extent(struct inode *inode, u64 file_offset, 144int btrfs_add_ordered_extent(struct inode *inode, u64 file_offset,
143 u64 start, u64 len, u64 disk_len, int tyep); 145 u64 start, u64 len, u64 disk_len, int type);
146int btrfs_add_ordered_extent_dio(struct inode *inode, u64 file_offset,
147 u64 start, u64 len, u64 disk_len, int type);
144int btrfs_add_ordered_sum(struct inode *inode, 148int btrfs_add_ordered_sum(struct inode *inode,
145 struct btrfs_ordered_extent *entry, 149 struct btrfs_ordered_extent *entry,
146 struct btrfs_ordered_sum *sum); 150 struct btrfs_ordered_sum *sum);
@@ -151,6 +155,9 @@ void btrfs_start_ordered_extent(struct inode *inode,
151int btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len); 155int btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len);
152struct btrfs_ordered_extent * 156struct btrfs_ordered_extent *
153btrfs_lookup_first_ordered_extent(struct inode * inode, u64 file_offset); 157btrfs_lookup_first_ordered_extent(struct inode * inode, u64 file_offset);
158struct btrfs_ordered_extent *btrfs_lookup_ordered_range(struct inode *inode,
159 u64 file_offset,
160 u64 len);
154int btrfs_ordered_update_i_size(struct inode *inode, u64 offset, 161int btrfs_ordered_update_i_size(struct inode *inode, u64 offset,
155 struct btrfs_ordered_extent *ordered); 162 struct btrfs_ordered_extent *ordered);
156int btrfs_find_ordered_sum(struct inode *inode, u64 offset, u64 disk_bytenr, u32 *sum); 163int btrfs_find_ordered_sum(struct inode *inode, u64 offset, u64 disk_bytenr, u32 *sum);
diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
index e558dd941ded..05d41e569236 100644
--- a/fs/btrfs/relocation.c
+++ b/fs/btrfs/relocation.c
@@ -44,8 +44,12 @@ struct tree_entry {
44struct backref_node { 44struct backref_node {
45 struct rb_node rb_node; 45 struct rb_node rb_node;
46 u64 bytenr; 46 u64 bytenr;
47 /* objectid tree block owner */ 47
48 u64 new_bytenr;
49 /* objectid of tree block owner, can be not uptodate */
48 u64 owner; 50 u64 owner;
51 /* link to pending, changed or detached list */
52 struct list_head list;
49 /* list of upper level blocks reference this block */ 53 /* list of upper level blocks reference this block */
50 struct list_head upper; 54 struct list_head upper;
51 /* list of child blocks in the cache */ 55 /* list of child blocks in the cache */
@@ -56,9 +60,9 @@ struct backref_node {
56 struct extent_buffer *eb; 60 struct extent_buffer *eb;
57 /* level of tree block */ 61 /* level of tree block */
58 unsigned int level:8; 62 unsigned int level:8;
59 /* 1 if the block is root of old snapshot */ 63 /* is the block in non-reference counted tree */
60 unsigned int old_root:1; 64 unsigned int cowonly:1;
61 /* 1 if no child blocks in the cache */ 65 /* 1 if no child node in the cache */
62 unsigned int lowest:1; 66 unsigned int lowest:1;
63 /* is the extent buffer locked */ 67 /* is the extent buffer locked */
64 unsigned int locked:1; 68 unsigned int locked:1;
@@ -66,6 +70,16 @@ struct backref_node {
66 unsigned int processed:1; 70 unsigned int processed:1;
67 /* have backrefs of this block been checked */ 71 /* have backrefs of this block been checked */
68 unsigned int checked:1; 72 unsigned int checked:1;
73 /*
74 * 1 if corresponding block has been cowed but some upper
75 * level block pointers may not point to the new location
76 */
77 unsigned int pending:1;
78 /*
79 * 1 if the backref node isn't connected to any other
80 * backref node.
81 */
82 unsigned int detached:1;
69}; 83};
70 84
71/* 85/*
@@ -74,7 +88,6 @@ struct backref_node {
74struct backref_edge { 88struct backref_edge {
75 struct list_head list[2]; 89 struct list_head list[2];
76 struct backref_node *node[2]; 90 struct backref_node *node[2];
77 u64 blockptr;
78}; 91};
79 92
80#define LOWER 0 93#define LOWER 0
@@ -83,9 +96,25 @@ struct backref_edge {
83struct backref_cache { 96struct backref_cache {
84 /* red black tree of all backref nodes in the cache */ 97 /* red black tree of all backref nodes in the cache */
85 struct rb_root rb_root; 98 struct rb_root rb_root;
86 /* list of backref nodes with no child block in the cache */ 99 /* for passing backref nodes to btrfs_reloc_cow_block */
100 struct backref_node *path[BTRFS_MAX_LEVEL];
101 /*
102 * list of blocks that have been cowed but some block
103 * pointers in upper level blocks may not reflect the
104 * new location
105 */
87 struct list_head pending[BTRFS_MAX_LEVEL]; 106 struct list_head pending[BTRFS_MAX_LEVEL];
88 spinlock_t lock; 107 /* list of backref nodes with no child node */
108 struct list_head leaves;
109 /* list of blocks that have been cowed in current transaction */
110 struct list_head changed;
111 /* list of detached backref node. */
112 struct list_head detached;
113
114 u64 last_trans;
115
116 int nr_nodes;
117 int nr_edges;
89}; 118};
90 119
91/* 120/*
@@ -113,15 +142,6 @@ struct tree_block {
113 unsigned int key_ready:1; 142 unsigned int key_ready:1;
114}; 143};
115 144
116/* inode vector */
117#define INODEVEC_SIZE 16
118
119struct inodevec {
120 struct list_head list;
121 struct inode *inode[INODEVEC_SIZE];
122 int nr;
123};
124
125#define MAX_EXTENTS 128 145#define MAX_EXTENTS 128
126 146
127struct file_extent_cluster { 147struct file_extent_cluster {
@@ -138,36 +158,43 @@ struct reloc_control {
138 struct btrfs_root *extent_root; 158 struct btrfs_root *extent_root;
139 /* inode for moving data */ 159 /* inode for moving data */
140 struct inode *data_inode; 160 struct inode *data_inode;
141 struct btrfs_workers workers; 161
162 struct btrfs_block_rsv *block_rsv;
163
164 struct backref_cache backref_cache;
165
166 struct file_extent_cluster cluster;
142 /* tree blocks have been processed */ 167 /* tree blocks have been processed */
143 struct extent_io_tree processed_blocks; 168 struct extent_io_tree processed_blocks;
144 /* map start of tree root to corresponding reloc tree */ 169 /* map start of tree root to corresponding reloc tree */
145 struct mapping_tree reloc_root_tree; 170 struct mapping_tree reloc_root_tree;
146 /* list of reloc trees */ 171 /* list of reloc trees */
147 struct list_head reloc_roots; 172 struct list_head reloc_roots;
173 /* size of metadata reservation for merging reloc trees */
174 u64 merging_rsv_size;
175 /* size of relocated tree nodes */
176 u64 nodes_relocated;
177
148 u64 search_start; 178 u64 search_start;
149 u64 extents_found; 179 u64 extents_found;
150 u64 extents_skipped; 180
151 int stage; 181 int block_rsv_retries;
152 int create_reloc_root; 182
183 unsigned int stage:8;
184 unsigned int create_reloc_tree:1;
185 unsigned int merge_reloc_tree:1;
153 unsigned int found_file_extent:1; 186 unsigned int found_file_extent:1;
154 unsigned int found_old_snapshot:1; 187 unsigned int commit_transaction:1;
155}; 188};
156 189
157/* stages of data relocation */ 190/* stages of data relocation */
158#define MOVE_DATA_EXTENTS 0 191#define MOVE_DATA_EXTENTS 0
159#define UPDATE_DATA_PTRS 1 192#define UPDATE_DATA_PTRS 1
160 193
161/* 194static void remove_backref_node(struct backref_cache *cache,
162 * merge reloc tree to corresponding fs tree in worker threads 195 struct backref_node *node);
163 */ 196static void __mark_block_processed(struct reloc_control *rc,
164struct async_merge { 197 struct backref_node *node);
165 struct btrfs_work work;
166 struct reloc_control *rc;
167 struct btrfs_root *root;
168 struct completion *done;
169 atomic_t *num_pending;
170};
171 198
172static void mapping_tree_init(struct mapping_tree *tree) 199static void mapping_tree_init(struct mapping_tree *tree)
173{ 200{
@@ -181,15 +208,80 @@ static void backref_cache_init(struct backref_cache *cache)
181 cache->rb_root = RB_ROOT; 208 cache->rb_root = RB_ROOT;
182 for (i = 0; i < BTRFS_MAX_LEVEL; i++) 209 for (i = 0; i < BTRFS_MAX_LEVEL; i++)
183 INIT_LIST_HEAD(&cache->pending[i]); 210 INIT_LIST_HEAD(&cache->pending[i]);
184 spin_lock_init(&cache->lock); 211 INIT_LIST_HEAD(&cache->changed);
212 INIT_LIST_HEAD(&cache->detached);
213 INIT_LIST_HEAD(&cache->leaves);
214}
215
216static void backref_cache_cleanup(struct backref_cache *cache)
217{
218 struct backref_node *node;
219 int i;
220
221 while (!list_empty(&cache->detached)) {
222 node = list_entry(cache->detached.next,
223 struct backref_node, list);
224 remove_backref_node(cache, node);
225 }
226
227 while (!list_empty(&cache->leaves)) {
228 node = list_entry(cache->leaves.next,
229 struct backref_node, lower);
230 remove_backref_node(cache, node);
231 }
232
233 cache->last_trans = 0;
234
235 for (i = 0; i < BTRFS_MAX_LEVEL; i++)
236 BUG_ON(!list_empty(&cache->pending[i]));
237 BUG_ON(!list_empty(&cache->changed));
238 BUG_ON(!list_empty(&cache->detached));
239 BUG_ON(!RB_EMPTY_ROOT(&cache->rb_root));
240 BUG_ON(cache->nr_nodes);
241 BUG_ON(cache->nr_edges);
242}
243
244static struct backref_node *alloc_backref_node(struct backref_cache *cache)
245{
246 struct backref_node *node;
247
248 node = kzalloc(sizeof(*node), GFP_NOFS);
249 if (node) {
250 INIT_LIST_HEAD(&node->list);
251 INIT_LIST_HEAD(&node->upper);
252 INIT_LIST_HEAD(&node->lower);
253 RB_CLEAR_NODE(&node->rb_node);
254 cache->nr_nodes++;
255 }
256 return node;
257}
258
259static void free_backref_node(struct backref_cache *cache,
260 struct backref_node *node)
261{
262 if (node) {
263 cache->nr_nodes--;
264 kfree(node);
265 }
266}
267
268static struct backref_edge *alloc_backref_edge(struct backref_cache *cache)
269{
270 struct backref_edge *edge;
271
272 edge = kzalloc(sizeof(*edge), GFP_NOFS);
273 if (edge)
274 cache->nr_edges++;
275 return edge;
185} 276}
186 277
187static void backref_node_init(struct backref_node *node) 278static void free_backref_edge(struct backref_cache *cache,
279 struct backref_edge *edge)
188{ 280{
189 memset(node, 0, sizeof(*node)); 281 if (edge) {
190 INIT_LIST_HEAD(&node->upper); 282 cache->nr_edges--;
191 INIT_LIST_HEAD(&node->lower); 283 kfree(edge);
192 RB_CLEAR_NODE(&node->rb_node); 284 }
193} 285}
194 286
195static struct rb_node *tree_insert(struct rb_root *root, u64 bytenr, 287static struct rb_node *tree_insert(struct rb_root *root, u64 bytenr,
@@ -250,6 +342,7 @@ static struct backref_node *walk_up_backref(struct backref_node *node,
250 edges[idx++] = edge; 342 edges[idx++] = edge;
251 node = edge->node[UPPER]; 343 node = edge->node[UPPER];
252 } 344 }
345 BUG_ON(node->detached);
253 *index = idx; 346 *index = idx;
254 return node; 347 return node;
255} 348}
@@ -281,13 +374,18 @@ static struct backref_node *walk_down_backref(struct backref_edge *edges[],
281 return NULL; 374 return NULL;
282} 375}
283 376
377static void unlock_node_buffer(struct backref_node *node)
378{
379 if (node->locked) {
380 btrfs_tree_unlock(node->eb);
381 node->locked = 0;
382 }
383}
384
284static void drop_node_buffer(struct backref_node *node) 385static void drop_node_buffer(struct backref_node *node)
285{ 386{
286 if (node->eb) { 387 if (node->eb) {
287 if (node->locked) { 388 unlock_node_buffer(node);
288 btrfs_tree_unlock(node->eb);
289 node->locked = 0;
290 }
291 free_extent_buffer(node->eb); 389 free_extent_buffer(node->eb);
292 node->eb = NULL; 390 node->eb = NULL;
293 } 391 }
@@ -296,14 +394,14 @@ static void drop_node_buffer(struct backref_node *node)
296static void drop_backref_node(struct backref_cache *tree, 394static void drop_backref_node(struct backref_cache *tree,
297 struct backref_node *node) 395 struct backref_node *node)
298{ 396{
299 BUG_ON(!node->lowest);
300 BUG_ON(!list_empty(&node->upper)); 397 BUG_ON(!list_empty(&node->upper));
301 398
302 drop_node_buffer(node); 399 drop_node_buffer(node);
400 list_del(&node->list);
303 list_del(&node->lower); 401 list_del(&node->lower);
304 402 if (!RB_EMPTY_NODE(&node->rb_node))
305 rb_erase(&node->rb_node, &tree->rb_root); 403 rb_erase(&node->rb_node, &tree->rb_root);
306 kfree(node); 404 free_backref_node(tree, node);
307} 405}
308 406
309/* 407/*
@@ -318,27 +416,121 @@ static void remove_backref_node(struct backref_cache *cache,
318 if (!node) 416 if (!node)
319 return; 417 return;
320 418
321 BUG_ON(!node->lowest); 419 BUG_ON(!node->lowest && !node->detached);
322 while (!list_empty(&node->upper)) { 420 while (!list_empty(&node->upper)) {
323 edge = list_entry(node->upper.next, struct backref_edge, 421 edge = list_entry(node->upper.next, struct backref_edge,
324 list[LOWER]); 422 list[LOWER]);
325 upper = edge->node[UPPER]; 423 upper = edge->node[UPPER];
326 list_del(&edge->list[LOWER]); 424 list_del(&edge->list[LOWER]);
327 list_del(&edge->list[UPPER]); 425 list_del(&edge->list[UPPER]);
328 kfree(edge); 426 free_backref_edge(cache, edge);
427
428 if (RB_EMPTY_NODE(&upper->rb_node)) {
429 BUG_ON(!list_empty(&node->upper));
430 drop_backref_node(cache, node);
431 node = upper;
432 node->lowest = 1;
433 continue;
434 }
329 /* 435 /*
330 * add the node to pending list if no other 436 * add the node to leaf node list if no other
331 * child block cached. 437 * child block cached.
332 */ 438 */
333 if (list_empty(&upper->lower)) { 439 if (list_empty(&upper->lower)) {
334 list_add_tail(&upper->lower, 440 list_add_tail(&upper->lower, &cache->leaves);
335 &cache->pending[upper->level]);
336 upper->lowest = 1; 441 upper->lowest = 1;
337 } 442 }
338 } 443 }
444
339 drop_backref_node(cache, node); 445 drop_backref_node(cache, node);
340} 446}
341 447
448static void update_backref_node(struct backref_cache *cache,
449 struct backref_node *node, u64 bytenr)
450{
451 struct rb_node *rb_node;
452 rb_erase(&node->rb_node, &cache->rb_root);
453 node->bytenr = bytenr;
454 rb_node = tree_insert(&cache->rb_root, node->bytenr, &node->rb_node);
455 BUG_ON(rb_node);
456}
457
458/*
459 * update backref cache after a transaction commit
460 */
461static int update_backref_cache(struct btrfs_trans_handle *trans,
462 struct backref_cache *cache)
463{
464 struct backref_node *node;
465 int level = 0;
466
467 if (cache->last_trans == 0) {
468 cache->last_trans = trans->transid;
469 return 0;
470 }
471
472 if (cache->last_trans == trans->transid)
473 return 0;
474
475 /*
476 * detached nodes are used to avoid unnecessary backref
477 * lookup. transaction commit changes the extent tree.
478 * so the detached nodes are no longer useful.
479 */
480 while (!list_empty(&cache->detached)) {
481 node = list_entry(cache->detached.next,
482 struct backref_node, list);
483 remove_backref_node(cache, node);
484 }
485
486 while (!list_empty(&cache->changed)) {
487 node = list_entry(cache->changed.next,
488 struct backref_node, list);
489 list_del_init(&node->list);
490 BUG_ON(node->pending);
491 update_backref_node(cache, node, node->new_bytenr);
492 }
493
494 /*
495 * some nodes can be left in the pending list if there were
496 * errors during processing the pending nodes.
497 */
498 for (level = 0; level < BTRFS_MAX_LEVEL; level++) {
499 list_for_each_entry(node, &cache->pending[level], list) {
500 BUG_ON(!node->pending);
501 if (node->bytenr == node->new_bytenr)
502 continue;
503 update_backref_node(cache, node, node->new_bytenr);
504 }
505 }
506
507 cache->last_trans = 0;
508 return 1;
509}
510
511static int should_ignore_root(struct btrfs_root *root)
512{
513 struct btrfs_root *reloc_root;
514
515 if (!root->ref_cows)
516 return 0;
517
518 reloc_root = root->reloc_root;
519 if (!reloc_root)
520 return 0;
521
522 if (btrfs_root_last_snapshot(&reloc_root->root_item) ==
523 root->fs_info->running_transaction->transid - 1)
524 return 0;
525 /*
526 * if there is reloc tree and it was created in previous
527 * transaction backref lookup can find the reloc tree,
528 * so backref node for the fs tree root is useless for
529 * relocation.
530 */
531 return 1;
532}
533
342/* 534/*
343 * find reloc tree by address of tree root 535 * find reloc tree by address of tree root
344 */ 536 */
@@ -453,11 +645,12 @@ int find_inline_backref(struct extent_buffer *leaf, int slot,
453 * for all upper level blocks that directly/indirectly reference the 645 * for all upper level blocks that directly/indirectly reference the
454 * block are also cached. 646 * block are also cached.
455 */ 647 */
456static struct backref_node *build_backref_tree(struct reloc_control *rc, 648static noinline_for_stack
457 struct backref_cache *cache, 649struct backref_node *build_backref_tree(struct reloc_control *rc,
458 struct btrfs_key *node_key, 650 struct btrfs_key *node_key,
459 int level, u64 bytenr) 651 int level, u64 bytenr)
460{ 652{
653 struct backref_cache *cache = &rc->backref_cache;
461 struct btrfs_path *path1; 654 struct btrfs_path *path1;
462 struct btrfs_path *path2; 655 struct btrfs_path *path2;
463 struct extent_buffer *eb; 656 struct extent_buffer *eb;
@@ -473,6 +666,8 @@ static struct backref_node *build_backref_tree(struct reloc_control *rc,
473 unsigned long end; 666 unsigned long end;
474 unsigned long ptr; 667 unsigned long ptr;
475 LIST_HEAD(list); 668 LIST_HEAD(list);
669 LIST_HEAD(useless);
670 int cowonly;
476 int ret; 671 int ret;
477 int err = 0; 672 int err = 0;
478 673
@@ -483,15 +678,13 @@ static struct backref_node *build_backref_tree(struct reloc_control *rc,
483 goto out; 678 goto out;
484 } 679 }
485 680
486 node = kmalloc(sizeof(*node), GFP_NOFS); 681 node = alloc_backref_node(cache);
487 if (!node) { 682 if (!node) {
488 err = -ENOMEM; 683 err = -ENOMEM;
489 goto out; 684 goto out;
490 } 685 }
491 686
492 backref_node_init(node);
493 node->bytenr = bytenr; 687 node->bytenr = bytenr;
494 node->owner = 0;
495 node->level = level; 688 node->level = level;
496 node->lowest = 1; 689 node->lowest = 1;
497 cur = node; 690 cur = node;
@@ -587,17 +780,20 @@ again:
587#ifdef BTRFS_COMPAT_EXTENT_TREE_V0 780#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
588 if (key.type == BTRFS_SHARED_BLOCK_REF_KEY || 781 if (key.type == BTRFS_SHARED_BLOCK_REF_KEY ||
589 key.type == BTRFS_EXTENT_REF_V0_KEY) { 782 key.type == BTRFS_EXTENT_REF_V0_KEY) {
590 if (key.objectid == key.offset && 783 if (key.type == BTRFS_EXTENT_REF_V0_KEY) {
591 key.type == BTRFS_EXTENT_REF_V0_KEY) {
592 struct btrfs_extent_ref_v0 *ref0; 784 struct btrfs_extent_ref_v0 *ref0;
593 ref0 = btrfs_item_ptr(eb, path1->slots[0], 785 ref0 = btrfs_item_ptr(eb, path1->slots[0],
594 struct btrfs_extent_ref_v0); 786 struct btrfs_extent_ref_v0);
595 root = find_tree_root(rc, eb, ref0); 787 root = find_tree_root(rc, eb, ref0);
596 if (root) 788 if (!root->ref_cows)
597 cur->root = root; 789 cur->cowonly = 1;
598 else 790 if (key.objectid == key.offset) {
599 cur->old_root = 1; 791 if (root && !should_ignore_root(root))
600 break; 792 cur->root = root;
793 else
794 list_add(&cur->list, &useless);
795 break;
796 }
601 } 797 }
602#else 798#else
603 BUG_ON(key.type == BTRFS_EXTENT_REF_V0_KEY); 799 BUG_ON(key.type == BTRFS_EXTENT_REF_V0_KEY);
@@ -614,22 +810,20 @@ again:
614 break; 810 break;
615 } 811 }
616 812
617 edge = kzalloc(sizeof(*edge), GFP_NOFS); 813 edge = alloc_backref_edge(cache);
618 if (!edge) { 814 if (!edge) {
619 err = -ENOMEM; 815 err = -ENOMEM;
620 goto out; 816 goto out;
621 } 817 }
622 rb_node = tree_search(&cache->rb_root, key.offset); 818 rb_node = tree_search(&cache->rb_root, key.offset);
623 if (!rb_node) { 819 if (!rb_node) {
624 upper = kmalloc(sizeof(*upper), GFP_NOFS); 820 upper = alloc_backref_node(cache);
625 if (!upper) { 821 if (!upper) {
626 kfree(edge); 822 free_backref_edge(cache, edge);
627 err = -ENOMEM; 823 err = -ENOMEM;
628 goto out; 824 goto out;
629 } 825 }
630 backref_node_init(upper);
631 upper->bytenr = key.offset; 826 upper->bytenr = key.offset;
632 upper->owner = 0;
633 upper->level = cur->level + 1; 827 upper->level = cur->level + 1;
634 /* 828 /*
635 * backrefs for the upper level block isn't 829 * backrefs for the upper level block isn't
@@ -639,11 +833,12 @@ again:
639 } else { 833 } else {
640 upper = rb_entry(rb_node, struct backref_node, 834 upper = rb_entry(rb_node, struct backref_node,
641 rb_node); 835 rb_node);
836 BUG_ON(!upper->checked);
642 INIT_LIST_HEAD(&edge->list[UPPER]); 837 INIT_LIST_HEAD(&edge->list[UPPER]);
643 } 838 }
644 list_add(&edge->list[LOWER], &cur->upper); 839 list_add_tail(&edge->list[LOWER], &cur->upper);
645 edge->node[UPPER] = upper;
646 edge->node[LOWER] = cur; 840 edge->node[LOWER] = cur;
841 edge->node[UPPER] = upper;
647 842
648 goto next; 843 goto next;
649 } else if (key.type != BTRFS_TREE_BLOCK_REF_KEY) { 844 } else if (key.type != BTRFS_TREE_BLOCK_REF_KEY) {
@@ -657,11 +852,17 @@ again:
657 goto out; 852 goto out;
658 } 853 }
659 854
855 if (!root->ref_cows)
856 cur->cowonly = 1;
857
660 if (btrfs_root_level(&root->root_item) == cur->level) { 858 if (btrfs_root_level(&root->root_item) == cur->level) {
661 /* tree root */ 859 /* tree root */
662 BUG_ON(btrfs_root_bytenr(&root->root_item) != 860 BUG_ON(btrfs_root_bytenr(&root->root_item) !=
663 cur->bytenr); 861 cur->bytenr);
664 cur->root = root; 862 if (should_ignore_root(root))
863 list_add(&cur->list, &useless);
864 else
865 cur->root = root;
665 break; 866 break;
666 } 867 }
667 868
@@ -692,11 +893,14 @@ again:
692 if (!path2->nodes[level]) { 893 if (!path2->nodes[level]) {
693 BUG_ON(btrfs_root_bytenr(&root->root_item) != 894 BUG_ON(btrfs_root_bytenr(&root->root_item) !=
694 lower->bytenr); 895 lower->bytenr);
695 lower->root = root; 896 if (should_ignore_root(root))
897 list_add(&lower->list, &useless);
898 else
899 lower->root = root;
696 break; 900 break;
697 } 901 }
698 902
699 edge = kzalloc(sizeof(*edge), GFP_NOFS); 903 edge = alloc_backref_edge(cache);
700 if (!edge) { 904 if (!edge) {
701 err = -ENOMEM; 905 err = -ENOMEM;
702 goto out; 906 goto out;
@@ -705,16 +909,17 @@ again:
705 eb = path2->nodes[level]; 909 eb = path2->nodes[level];
706 rb_node = tree_search(&cache->rb_root, eb->start); 910 rb_node = tree_search(&cache->rb_root, eb->start);
707 if (!rb_node) { 911 if (!rb_node) {
708 upper = kmalloc(sizeof(*upper), GFP_NOFS); 912 upper = alloc_backref_node(cache);
709 if (!upper) { 913 if (!upper) {
710 kfree(edge); 914 free_backref_edge(cache, edge);
711 err = -ENOMEM; 915 err = -ENOMEM;
712 goto out; 916 goto out;
713 } 917 }
714 backref_node_init(upper);
715 upper->bytenr = eb->start; 918 upper->bytenr = eb->start;
716 upper->owner = btrfs_header_owner(eb); 919 upper->owner = btrfs_header_owner(eb);
717 upper->level = lower->level + 1; 920 upper->level = lower->level + 1;
921 if (!root->ref_cows)
922 upper->cowonly = 1;
718 923
719 /* 924 /*
720 * if we know the block isn't shared 925 * if we know the block isn't shared
@@ -744,10 +949,12 @@ again:
744 rb_node); 949 rb_node);
745 BUG_ON(!upper->checked); 950 BUG_ON(!upper->checked);
746 INIT_LIST_HEAD(&edge->list[UPPER]); 951 INIT_LIST_HEAD(&edge->list[UPPER]);
952 if (!upper->owner)
953 upper->owner = btrfs_header_owner(eb);
747 } 954 }
748 list_add_tail(&edge->list[LOWER], &lower->upper); 955 list_add_tail(&edge->list[LOWER], &lower->upper);
749 edge->node[UPPER] = upper;
750 edge->node[LOWER] = lower; 956 edge->node[LOWER] = lower;
957 edge->node[UPPER] = upper;
751 958
752 if (rb_node) 959 if (rb_node)
753 break; 960 break;
@@ -785,8 +992,13 @@ next:
785 * into the cache. 992 * into the cache.
786 */ 993 */
787 BUG_ON(!node->checked); 994 BUG_ON(!node->checked);
788 rb_node = tree_insert(&cache->rb_root, node->bytenr, &node->rb_node); 995 cowonly = node->cowonly;
789 BUG_ON(rb_node); 996 if (!cowonly) {
997 rb_node = tree_insert(&cache->rb_root, node->bytenr,
998 &node->rb_node);
999 BUG_ON(rb_node);
1000 list_add_tail(&node->lower, &cache->leaves);
1001 }
790 1002
791 list_for_each_entry(edge, &node->upper, list[LOWER]) 1003 list_for_each_entry(edge, &node->upper, list[LOWER])
792 list_add_tail(&edge->list[UPPER], &list); 1004 list_add_tail(&edge->list[UPPER], &list);
@@ -795,6 +1007,14 @@ next:
795 edge = list_entry(list.next, struct backref_edge, list[UPPER]); 1007 edge = list_entry(list.next, struct backref_edge, list[UPPER]);
796 list_del_init(&edge->list[UPPER]); 1008 list_del_init(&edge->list[UPPER]);
797 upper = edge->node[UPPER]; 1009 upper = edge->node[UPPER];
1010 if (upper->detached) {
1011 list_del(&edge->list[LOWER]);
1012 lower = edge->node[LOWER];
1013 free_backref_edge(cache, edge);
1014 if (list_empty(&lower->upper))
1015 list_add(&lower->list, &useless);
1016 continue;
1017 }
798 1018
799 if (!RB_EMPTY_NODE(&upper->rb_node)) { 1019 if (!RB_EMPTY_NODE(&upper->rb_node)) {
800 if (upper->lowest) { 1020 if (upper->lowest) {
@@ -807,25 +1027,69 @@ next:
807 } 1027 }
808 1028
809 BUG_ON(!upper->checked); 1029 BUG_ON(!upper->checked);
810 rb_node = tree_insert(&cache->rb_root, upper->bytenr, 1030 BUG_ON(cowonly != upper->cowonly);
811 &upper->rb_node); 1031 if (!cowonly) {
812 BUG_ON(rb_node); 1032 rb_node = tree_insert(&cache->rb_root, upper->bytenr,
1033 &upper->rb_node);
1034 BUG_ON(rb_node);
1035 }
813 1036
814 list_add_tail(&edge->list[UPPER], &upper->lower); 1037 list_add_tail(&edge->list[UPPER], &upper->lower);
815 1038
816 list_for_each_entry(edge, &upper->upper, list[LOWER]) 1039 list_for_each_entry(edge, &upper->upper, list[LOWER])
817 list_add_tail(&edge->list[UPPER], &list); 1040 list_add_tail(&edge->list[UPPER], &list);
818 } 1041 }
1042 /*
1043 * process useless backref nodes. backref nodes for tree leaves
1044 * are deleted from the cache. backref nodes for upper level
1045 * tree blocks are left in the cache to avoid unnecessary backref
1046 * lookup.
1047 */
1048 while (!list_empty(&useless)) {
1049 upper = list_entry(useless.next, struct backref_node, list);
1050 list_del_init(&upper->list);
1051 BUG_ON(!list_empty(&upper->upper));
1052 if (upper == node)
1053 node = NULL;
1054 if (upper->lowest) {
1055 list_del_init(&upper->lower);
1056 upper->lowest = 0;
1057 }
1058 while (!list_empty(&upper->lower)) {
1059 edge = list_entry(upper->lower.next,
1060 struct backref_edge, list[UPPER]);
1061 list_del(&edge->list[UPPER]);
1062 list_del(&edge->list[LOWER]);
1063 lower = edge->node[LOWER];
1064 free_backref_edge(cache, edge);
1065
1066 if (list_empty(&lower->upper))
1067 list_add(&lower->list, &useless);
1068 }
1069 __mark_block_processed(rc, upper);
1070 if (upper->level > 0) {
1071 list_add(&upper->list, &cache->detached);
1072 upper->detached = 1;
1073 } else {
1074 rb_erase(&upper->rb_node, &cache->rb_root);
1075 free_backref_node(cache, upper);
1076 }
1077 }
819out: 1078out:
820 btrfs_free_path(path1); 1079 btrfs_free_path(path1);
821 btrfs_free_path(path2); 1080 btrfs_free_path(path2);
822 if (err) { 1081 if (err) {
823 INIT_LIST_HEAD(&list); 1082 while (!list_empty(&useless)) {
1083 lower = list_entry(useless.next,
1084 struct backref_node, upper);
1085 list_del_init(&lower->upper);
1086 }
824 upper = node; 1087 upper = node;
1088 INIT_LIST_HEAD(&list);
825 while (upper) { 1089 while (upper) {
826 if (RB_EMPTY_NODE(&upper->rb_node)) { 1090 if (RB_EMPTY_NODE(&upper->rb_node)) {
827 list_splice_tail(&upper->upper, &list); 1091 list_splice_tail(&upper->upper, &list);
828 kfree(upper); 1092 free_backref_node(cache, upper);
829 } 1093 }
830 1094
831 if (list_empty(&list)) 1095 if (list_empty(&list))
@@ -833,15 +1097,104 @@ out:
833 1097
834 edge = list_entry(list.next, struct backref_edge, 1098 edge = list_entry(list.next, struct backref_edge,
835 list[LOWER]); 1099 list[LOWER]);
1100 list_del(&edge->list[LOWER]);
836 upper = edge->node[UPPER]; 1101 upper = edge->node[UPPER];
837 kfree(edge); 1102 free_backref_edge(cache, edge);
838 } 1103 }
839 return ERR_PTR(err); 1104 return ERR_PTR(err);
840 } 1105 }
1106 BUG_ON(node && node->detached);
841 return node; 1107 return node;
842} 1108}
843 1109
844/* 1110/*
1111 * helper to add backref node for the newly created snapshot.
1112 * the backref node is created by cloning backref node that
1113 * corresponds to root of source tree
1114 */
1115static int clone_backref_node(struct btrfs_trans_handle *trans,
1116 struct reloc_control *rc,
1117 struct btrfs_root *src,
1118 struct btrfs_root *dest)
1119{
1120 struct btrfs_root *reloc_root = src->reloc_root;
1121 struct backref_cache *cache = &rc->backref_cache;
1122 struct backref_node *node = NULL;
1123 struct backref_node *new_node;
1124 struct backref_edge *edge;
1125 struct backref_edge *new_edge;
1126 struct rb_node *rb_node;
1127
1128 if (cache->last_trans > 0)
1129 update_backref_cache(trans, cache);
1130
1131 rb_node = tree_search(&cache->rb_root, src->commit_root->start);
1132 if (rb_node) {
1133 node = rb_entry(rb_node, struct backref_node, rb_node);
1134 if (node->detached)
1135 node = NULL;
1136 else
1137 BUG_ON(node->new_bytenr != reloc_root->node->start);
1138 }
1139
1140 if (!node) {
1141 rb_node = tree_search(&cache->rb_root,
1142 reloc_root->commit_root->start);
1143 if (rb_node) {
1144 node = rb_entry(rb_node, struct backref_node,
1145 rb_node);
1146 BUG_ON(node->detached);
1147 }
1148 }
1149
1150 if (!node)
1151 return 0;
1152
1153 new_node = alloc_backref_node(cache);
1154 if (!new_node)
1155 return -ENOMEM;
1156
1157 new_node->bytenr = dest->node->start;
1158 new_node->level = node->level;
1159 new_node->lowest = node->lowest;
1160 new_node->root = dest;
1161
1162 if (!node->lowest) {
1163 list_for_each_entry(edge, &node->lower, list[UPPER]) {
1164 new_edge = alloc_backref_edge(cache);
1165 if (!new_edge)
1166 goto fail;
1167
1168 new_edge->node[UPPER] = new_node;
1169 new_edge->node[LOWER] = edge->node[LOWER];
1170 list_add_tail(&new_edge->list[UPPER],
1171 &new_node->lower);
1172 }
1173 }
1174
1175 rb_node = tree_insert(&cache->rb_root, new_node->bytenr,
1176 &new_node->rb_node);
1177 BUG_ON(rb_node);
1178
1179 if (!new_node->lowest) {
1180 list_for_each_entry(new_edge, &new_node->lower, list[UPPER]) {
1181 list_add_tail(&new_edge->list[LOWER],
1182 &new_edge->node[LOWER]->upper);
1183 }
1184 }
1185 return 0;
1186fail:
1187 while (!list_empty(&new_node->lower)) {
1188 new_edge = list_entry(new_node->lower.next,
1189 struct backref_edge, list[UPPER]);
1190 list_del(&new_edge->list[UPPER]);
1191 free_backref_edge(cache, new_edge);
1192 }
1193 free_backref_node(cache, new_node);
1194 return -ENOMEM;
1195}
1196
1197/*
845 * helper to add 'address of tree root -> reloc tree' mapping 1198 * helper to add 'address of tree root -> reloc tree' mapping
846 */ 1199 */
847static int __add_reloc_root(struct btrfs_root *root) 1200static int __add_reloc_root(struct btrfs_root *root)
@@ -901,12 +1254,8 @@ static int __update_reloc_root(struct btrfs_root *root, int del)
901 return 0; 1254 return 0;
902} 1255}
903 1256
904/* 1257static struct btrfs_root *create_reloc_root(struct btrfs_trans_handle *trans,
905 * create reloc tree for a given fs tree. reloc tree is just a 1258 struct btrfs_root *root, u64 objectid)
906 * snapshot of the fs tree with special root objectid.
907 */
908int btrfs_init_reloc_root(struct btrfs_trans_handle *trans,
909 struct btrfs_root *root)
910{ 1259{
911 struct btrfs_root *reloc_root; 1260 struct btrfs_root *reloc_root;
912 struct extent_buffer *eb; 1261 struct extent_buffer *eb;
@@ -914,36 +1263,45 @@ int btrfs_init_reloc_root(struct btrfs_trans_handle *trans,
914 struct btrfs_key root_key; 1263 struct btrfs_key root_key;
915 int ret; 1264 int ret;
916 1265
917 if (root->reloc_root) {
918 reloc_root = root->reloc_root;
919 reloc_root->last_trans = trans->transid;
920 return 0;
921 }
922
923 if (!root->fs_info->reloc_ctl ||
924 !root->fs_info->reloc_ctl->create_reloc_root ||
925 root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
926 return 0;
927
928 root_item = kmalloc(sizeof(*root_item), GFP_NOFS); 1266 root_item = kmalloc(sizeof(*root_item), GFP_NOFS);
929 BUG_ON(!root_item); 1267 BUG_ON(!root_item);
930 1268
931 root_key.objectid = BTRFS_TREE_RELOC_OBJECTID; 1269 root_key.objectid = BTRFS_TREE_RELOC_OBJECTID;
932 root_key.type = BTRFS_ROOT_ITEM_KEY; 1270 root_key.type = BTRFS_ROOT_ITEM_KEY;
933 root_key.offset = root->root_key.objectid; 1271 root_key.offset = objectid;
934 1272
935 ret = btrfs_copy_root(trans, root, root->commit_root, &eb, 1273 if (root->root_key.objectid == objectid) {
936 BTRFS_TREE_RELOC_OBJECTID); 1274 /* called by btrfs_init_reloc_root */
937 BUG_ON(ret); 1275 ret = btrfs_copy_root(trans, root, root->commit_root, &eb,
1276 BTRFS_TREE_RELOC_OBJECTID);
1277 BUG_ON(ret);
1278
1279 btrfs_set_root_last_snapshot(&root->root_item,
1280 trans->transid - 1);
1281 } else {
1282 /*
1283 * called by btrfs_reloc_post_snapshot_hook.
1284 * the source tree is a reloc tree, all tree blocks
1285 * modified after it was created have RELOC flag
1286 * set in their headers. so it's OK to not update
1287 * the 'last_snapshot'.
1288 */
1289 ret = btrfs_copy_root(trans, root, root->node, &eb,
1290 BTRFS_TREE_RELOC_OBJECTID);
1291 BUG_ON(ret);
1292 }
938 1293
939 btrfs_set_root_last_snapshot(&root->root_item, trans->transid - 1);
940 memcpy(root_item, &root->root_item, sizeof(*root_item)); 1294 memcpy(root_item, &root->root_item, sizeof(*root_item));
941 btrfs_set_root_refs(root_item, 1);
942 btrfs_set_root_bytenr(root_item, eb->start); 1295 btrfs_set_root_bytenr(root_item, eb->start);
943 btrfs_set_root_level(root_item, btrfs_header_level(eb)); 1296 btrfs_set_root_level(root_item, btrfs_header_level(eb));
944 btrfs_set_root_generation(root_item, trans->transid); 1297 btrfs_set_root_generation(root_item, trans->transid);
945 memset(&root_item->drop_progress, 0, sizeof(struct btrfs_disk_key)); 1298
946 root_item->drop_level = 0; 1299 if (root->root_key.objectid == objectid) {
1300 btrfs_set_root_refs(root_item, 0);
1301 memset(&root_item->drop_progress, 0,
1302 sizeof(struct btrfs_disk_key));
1303 root_item->drop_level = 0;
1304 }
947 1305
948 btrfs_tree_unlock(eb); 1306 btrfs_tree_unlock(eb);
949 free_extent_buffer(eb); 1307 free_extent_buffer(eb);
@@ -957,6 +1315,37 @@ int btrfs_init_reloc_root(struct btrfs_trans_handle *trans,
957 &root_key); 1315 &root_key);
958 BUG_ON(IS_ERR(reloc_root)); 1316 BUG_ON(IS_ERR(reloc_root));
959 reloc_root->last_trans = trans->transid; 1317 reloc_root->last_trans = trans->transid;
1318 return reloc_root;
1319}
1320
1321/*
1322 * create reloc tree for a given fs tree. reloc tree is just a
1323 * snapshot of the fs tree with special root objectid.
1324 */
1325int btrfs_init_reloc_root(struct btrfs_trans_handle *trans,
1326 struct btrfs_root *root)
1327{
1328 struct btrfs_root *reloc_root;
1329 struct reloc_control *rc = root->fs_info->reloc_ctl;
1330 int clear_rsv = 0;
1331
1332 if (root->reloc_root) {
1333 reloc_root = root->reloc_root;
1334 reloc_root->last_trans = trans->transid;
1335 return 0;
1336 }
1337
1338 if (!rc || !rc->create_reloc_tree ||
1339 root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
1340 return 0;
1341
1342 if (!trans->block_rsv) {
1343 trans->block_rsv = rc->block_rsv;
1344 clear_rsv = 1;
1345 }
1346 reloc_root = create_reloc_root(trans, root, root->root_key.objectid);
1347 if (clear_rsv)
1348 trans->block_rsv = NULL;
960 1349
961 __add_reloc_root(reloc_root); 1350 __add_reloc_root(reloc_root);
962 root->reloc_root = reloc_root; 1351 root->reloc_root = reloc_root;
@@ -980,7 +1369,8 @@ int btrfs_update_reloc_root(struct btrfs_trans_handle *trans,
980 reloc_root = root->reloc_root; 1369 reloc_root = root->reloc_root;
981 root_item = &reloc_root->root_item; 1370 root_item = &reloc_root->root_item;
982 1371
983 if (btrfs_root_refs(root_item) == 0) { 1372 if (root->fs_info->reloc_ctl->merge_reloc_tree &&
1373 btrfs_root_refs(root_item) == 0) {
984 root->reloc_root = NULL; 1374 root->reloc_root = NULL;
985 del = 1; 1375 del = 1;
986 } 1376 }
@@ -1102,8 +1492,7 @@ static int get_new_location(struct inode *reloc_inode, u64 *new_bytenr,
1102 goto out; 1492 goto out;
1103 } 1493 }
1104 1494
1105 if (new_bytenr) 1495 *new_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
1106 *new_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
1107 ret = 0; 1496 ret = 0;
1108out: 1497out:
1109 btrfs_free_path(path); 1498 btrfs_free_path(path);
@@ -1114,19 +1503,18 @@ out:
1114 * update file extent items in the tree leaf to point to 1503 * update file extent items in the tree leaf to point to
1115 * the new locations. 1504 * the new locations.
1116 */ 1505 */
1117static int replace_file_extents(struct btrfs_trans_handle *trans, 1506static noinline_for_stack
1118 struct reloc_control *rc, 1507int replace_file_extents(struct btrfs_trans_handle *trans,
1119 struct btrfs_root *root, 1508 struct reloc_control *rc,
1120 struct extent_buffer *leaf, 1509 struct btrfs_root *root,
1121 struct list_head *inode_list) 1510 struct extent_buffer *leaf)
1122{ 1511{
1123 struct btrfs_key key; 1512 struct btrfs_key key;
1124 struct btrfs_file_extent_item *fi; 1513 struct btrfs_file_extent_item *fi;
1125 struct inode *inode = NULL; 1514 struct inode *inode = NULL;
1126 struct inodevec *ivec = NULL;
1127 u64 parent; 1515 u64 parent;
1128 u64 bytenr; 1516 u64 bytenr;
1129 u64 new_bytenr; 1517 u64 new_bytenr = 0;
1130 u64 num_bytes; 1518 u64 num_bytes;
1131 u64 end; 1519 u64 end;
1132 u32 nritems; 1520 u32 nritems;
@@ -1166,21 +1554,12 @@ static int replace_file_extents(struct btrfs_trans_handle *trans,
1166 * to complete and drop the extent cache 1554 * to complete and drop the extent cache
1167 */ 1555 */
1168 if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) { 1556 if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) {
1169 if (!ivec || ivec->nr == INODEVEC_SIZE) {
1170 ivec = kmalloc(sizeof(*ivec), GFP_NOFS);
1171 BUG_ON(!ivec);
1172 ivec->nr = 0;
1173 list_add_tail(&ivec->list, inode_list);
1174 }
1175 if (first) { 1557 if (first) {
1176 inode = find_next_inode(root, key.objectid); 1558 inode = find_next_inode(root, key.objectid);
1177 if (inode)
1178 ivec->inode[ivec->nr++] = inode;
1179 first = 0; 1559 first = 0;
1180 } else if (inode && inode->i_ino < key.objectid) { 1560 } else if (inode && inode->i_ino < key.objectid) {
1561 btrfs_add_delayed_iput(inode);
1181 inode = find_next_inode(root, key.objectid); 1562 inode = find_next_inode(root, key.objectid);
1182 if (inode)
1183 ivec->inode[ivec->nr++] = inode;
1184 } 1563 }
1185 if (inode && inode->i_ino == key.objectid) { 1564 if (inode && inode->i_ino == key.objectid) {
1186 end = key.offset + 1565 end = key.offset +
@@ -1204,8 +1583,10 @@ static int replace_file_extents(struct btrfs_trans_handle *trans,
1204 1583
1205 ret = get_new_location(rc->data_inode, &new_bytenr, 1584 ret = get_new_location(rc->data_inode, &new_bytenr,
1206 bytenr, num_bytes); 1585 bytenr, num_bytes);
1207 if (ret > 0) 1586 if (ret > 0) {
1587 WARN_ON(1);
1208 continue; 1588 continue;
1589 }
1209 BUG_ON(ret < 0); 1590 BUG_ON(ret < 0);
1210 1591
1211 btrfs_set_file_extent_disk_bytenr(leaf, fi, new_bytenr); 1592 btrfs_set_file_extent_disk_bytenr(leaf, fi, new_bytenr);
@@ -1225,6 +1606,8 @@ static int replace_file_extents(struct btrfs_trans_handle *trans,
1225 } 1606 }
1226 if (dirty) 1607 if (dirty)
1227 btrfs_mark_buffer_dirty(leaf); 1608 btrfs_mark_buffer_dirty(leaf);
1609 if (inode)
1610 btrfs_add_delayed_iput(inode);
1228 return 0; 1611 return 0;
1229} 1612}
1230 1613
@@ -1248,11 +1631,11 @@ int memcmp_node_keys(struct extent_buffer *eb, int slot,
1248 * if no block got replaced, 0 is returned. if there are other 1631 * if no block got replaced, 0 is returned. if there are other
1249 * errors, a negative error number is returned. 1632 * errors, a negative error number is returned.
1250 */ 1633 */
1251static int replace_path(struct btrfs_trans_handle *trans, 1634static noinline_for_stack
1252 struct btrfs_root *dest, struct btrfs_root *src, 1635int replace_path(struct btrfs_trans_handle *trans,
1253 struct btrfs_path *path, struct btrfs_key *next_key, 1636 struct btrfs_root *dest, struct btrfs_root *src,
1254 struct extent_buffer **leaf, 1637 struct btrfs_path *path, struct btrfs_key *next_key,
1255 int lowest_level, int max_level) 1638 int lowest_level, int max_level)
1256{ 1639{
1257 struct extent_buffer *eb; 1640 struct extent_buffer *eb;
1258 struct extent_buffer *parent; 1641 struct extent_buffer *parent;
@@ -1263,16 +1646,16 @@ static int replace_path(struct btrfs_trans_handle *trans,
1263 u64 new_ptr_gen; 1646 u64 new_ptr_gen;
1264 u64 last_snapshot; 1647 u64 last_snapshot;
1265 u32 blocksize; 1648 u32 blocksize;
1649 int cow = 0;
1266 int level; 1650 int level;
1267 int ret; 1651 int ret;
1268 int slot; 1652 int slot;
1269 1653
1270 BUG_ON(src->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID); 1654 BUG_ON(src->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID);
1271 BUG_ON(dest->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID); 1655 BUG_ON(dest->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID);
1272 BUG_ON(lowest_level > 1 && leaf);
1273 1656
1274 last_snapshot = btrfs_root_last_snapshot(&src->root_item); 1657 last_snapshot = btrfs_root_last_snapshot(&src->root_item);
1275 1658again:
1276 slot = path->slots[lowest_level]; 1659 slot = path->slots[lowest_level];
1277 btrfs_node_key_to_cpu(path->nodes[lowest_level], &key, slot); 1660 btrfs_node_key_to_cpu(path->nodes[lowest_level], &key, slot);
1278 1661
@@ -1286,8 +1669,10 @@ static int replace_path(struct btrfs_trans_handle *trans,
1286 return 0; 1669 return 0;
1287 } 1670 }
1288 1671
1289 ret = btrfs_cow_block(trans, dest, eb, NULL, 0, &eb); 1672 if (cow) {
1290 BUG_ON(ret); 1673 ret = btrfs_cow_block(trans, dest, eb, NULL, 0, &eb);
1674 BUG_ON(ret);
1675 }
1291 btrfs_set_lock_blocking(eb); 1676 btrfs_set_lock_blocking(eb);
1292 1677
1293 if (next_key) { 1678 if (next_key) {
@@ -1331,7 +1716,7 @@ static int replace_path(struct btrfs_trans_handle *trans,
1331 1716
1332 if (new_bytenr == 0 || old_ptr_gen > last_snapshot || 1717 if (new_bytenr == 0 || old_ptr_gen > last_snapshot ||
1333 memcmp_node_keys(parent, slot, path, level)) { 1718 memcmp_node_keys(parent, slot, path, level)) {
1334 if (level <= lowest_level && !leaf) { 1719 if (level <= lowest_level) {
1335 ret = 0; 1720 ret = 0;
1336 break; 1721 break;
1337 } 1722 }
@@ -1339,16 +1724,12 @@ static int replace_path(struct btrfs_trans_handle *trans,
1339 eb = read_tree_block(dest, old_bytenr, blocksize, 1724 eb = read_tree_block(dest, old_bytenr, blocksize,
1340 old_ptr_gen); 1725 old_ptr_gen);
1341 btrfs_tree_lock(eb); 1726 btrfs_tree_lock(eb);
1342 ret = btrfs_cow_block(trans, dest, eb, parent, 1727 if (cow) {
1343 slot, &eb); 1728 ret = btrfs_cow_block(trans, dest, eb, parent,
1344 BUG_ON(ret); 1729 slot, &eb);
1345 btrfs_set_lock_blocking(eb); 1730 BUG_ON(ret);
1346
1347 if (level <= lowest_level) {
1348 *leaf = eb;
1349 ret = 0;
1350 break;
1351 } 1731 }
1732 btrfs_set_lock_blocking(eb);
1352 1733
1353 btrfs_tree_unlock(parent); 1734 btrfs_tree_unlock(parent);
1354 free_extent_buffer(parent); 1735 free_extent_buffer(parent);
@@ -1357,6 +1738,13 @@ static int replace_path(struct btrfs_trans_handle *trans,
1357 continue; 1738 continue;
1358 } 1739 }
1359 1740
1741 if (!cow) {
1742 btrfs_tree_unlock(parent);
1743 free_extent_buffer(parent);
1744 cow = 1;
1745 goto again;
1746 }
1747
1360 btrfs_node_key_to_cpu(path->nodes[level], &key, 1748 btrfs_node_key_to_cpu(path->nodes[level], &key,
1361 path->slots[level]); 1749 path->slots[level]);
1362 btrfs_release_path(src, path); 1750 btrfs_release_path(src, path);
@@ -1562,20 +1950,6 @@ static int invalidate_extent_cache(struct btrfs_root *root,
1562 return 0; 1950 return 0;
1563} 1951}
1564 1952
1565static void put_inodes(struct list_head *list)
1566{
1567 struct inodevec *ivec;
1568 while (!list_empty(list)) {
1569 ivec = list_entry(list->next, struct inodevec, list);
1570 list_del(&ivec->list);
1571 while (ivec->nr > 0) {
1572 ivec->nr--;
1573 iput(ivec->inode[ivec->nr]);
1574 }
1575 kfree(ivec);
1576 }
1577}
1578
1579static int find_next_key(struct btrfs_path *path, int level, 1953static int find_next_key(struct btrfs_path *path, int level,
1580 struct btrfs_key *key) 1954 struct btrfs_key *key)
1581 1955
@@ -1608,13 +1982,14 @@ static noinline_for_stack int merge_reloc_root(struct reloc_control *rc,
1608 struct btrfs_root *reloc_root; 1982 struct btrfs_root *reloc_root;
1609 struct btrfs_root_item *root_item; 1983 struct btrfs_root_item *root_item;
1610 struct btrfs_path *path; 1984 struct btrfs_path *path;
1611 struct extent_buffer *leaf = NULL; 1985 struct extent_buffer *leaf;
1612 unsigned long nr; 1986 unsigned long nr;
1613 int level; 1987 int level;
1614 int max_level; 1988 int max_level;
1615 int replaced = 0; 1989 int replaced = 0;
1616 int ret; 1990 int ret;
1617 int err = 0; 1991 int err = 0;
1992 u32 min_reserved;
1618 1993
1619 path = btrfs_alloc_path(); 1994 path = btrfs_alloc_path();
1620 if (!path) 1995 if (!path)
@@ -1648,34 +2023,23 @@ static noinline_for_stack int merge_reloc_root(struct reloc_control *rc,
1648 btrfs_unlock_up_safe(path, 0); 2023 btrfs_unlock_up_safe(path, 0);
1649 } 2024 }
1650 2025
1651 if (level == 0 && rc->stage == UPDATE_DATA_PTRS) { 2026 min_reserved = root->nodesize * (BTRFS_MAX_LEVEL - 1) * 2;
1652 trans = btrfs_start_transaction(root, 1); 2027 memset(&next_key, 0, sizeof(next_key));
1653 2028
1654 leaf = path->nodes[0]; 2029 while (1) {
1655 btrfs_item_key_to_cpu(leaf, &key, 0); 2030 trans = btrfs_start_transaction(root, 0);
1656 btrfs_release_path(reloc_root, path); 2031 trans->block_rsv = rc->block_rsv;
1657 2032
1658 ret = btrfs_search_slot(trans, root, &key, path, 0, 1); 2033 ret = btrfs_block_rsv_check(trans, root, rc->block_rsv,
1659 if (ret < 0) { 2034 min_reserved, 0);
1660 err = ret; 2035 if (ret) {
1661 goto out; 2036 BUG_ON(ret != -EAGAIN);
2037 ret = btrfs_commit_transaction(trans, root);
2038 BUG_ON(ret);
2039 continue;
1662 } 2040 }
1663 2041
1664 leaf = path->nodes[0];
1665 btrfs_unlock_up_safe(path, 1);
1666 ret = replace_file_extents(trans, rc, root, leaf,
1667 &inode_list);
1668 if (ret < 0)
1669 err = ret;
1670 goto out;
1671 }
1672
1673 memset(&next_key, 0, sizeof(next_key));
1674
1675 while (1) {
1676 leaf = NULL;
1677 replaced = 0; 2042 replaced = 0;
1678 trans = btrfs_start_transaction(root, 1);
1679 max_level = level; 2043 max_level = level;
1680 2044
1681 ret = walk_down_reloc_tree(reloc_root, path, &level); 2045 ret = walk_down_reloc_tree(reloc_root, path, &level);
@@ -1689,14 +2053,9 @@ static noinline_for_stack int merge_reloc_root(struct reloc_control *rc,
1689 if (!find_next_key(path, level, &key) && 2053 if (!find_next_key(path, level, &key) &&
1690 btrfs_comp_cpu_keys(&next_key, &key) >= 0) { 2054 btrfs_comp_cpu_keys(&next_key, &key) >= 0) {
1691 ret = 0; 2055 ret = 0;
1692 } else if (level == 1 && rc->stage == UPDATE_DATA_PTRS) {
1693 ret = replace_path(trans, root, reloc_root,
1694 path, &next_key, &leaf,
1695 level, max_level);
1696 } else { 2056 } else {
1697 ret = replace_path(trans, root, reloc_root, 2057 ret = replace_path(trans, root, reloc_root, path,
1698 path, &next_key, NULL, 2058 &next_key, level, max_level);
1699 level, max_level);
1700 } 2059 }
1701 if (ret < 0) { 2060 if (ret < 0) {
1702 err = ret; 2061 err = ret;
@@ -1708,16 +2067,6 @@ static noinline_for_stack int merge_reloc_root(struct reloc_control *rc,
1708 btrfs_node_key_to_cpu(path->nodes[level], &key, 2067 btrfs_node_key_to_cpu(path->nodes[level], &key,
1709 path->slots[level]); 2068 path->slots[level]);
1710 replaced = 1; 2069 replaced = 1;
1711 } else if (leaf) {
1712 /*
1713 * no block got replaced, try replacing file extents
1714 */
1715 btrfs_item_key_to_cpu(leaf, &key, 0);
1716 ret = replace_file_extents(trans, rc, root, leaf,
1717 &inode_list);
1718 btrfs_tree_unlock(leaf);
1719 free_extent_buffer(leaf);
1720 BUG_ON(ret < 0);
1721 } 2070 }
1722 2071
1723 ret = walk_up_reloc_tree(reloc_root, path, &level); 2072 ret = walk_up_reloc_tree(reloc_root, path, &level);
@@ -1734,15 +2083,10 @@ static noinline_for_stack int merge_reloc_root(struct reloc_control *rc,
1734 root_item->drop_level = level; 2083 root_item->drop_level = level;
1735 2084
1736 nr = trans->blocks_used; 2085 nr = trans->blocks_used;
1737 btrfs_end_transaction(trans, root); 2086 btrfs_end_transaction_throttle(trans, root);
1738 2087
1739 btrfs_btree_balance_dirty(root, nr); 2088 btrfs_btree_balance_dirty(root, nr);
1740 2089
1741 /*
1742 * put inodes outside transaction, otherwise we may deadlock.
1743 */
1744 put_inodes(&inode_list);
1745
1746 if (replaced && rc->stage == UPDATE_DATA_PTRS) 2090 if (replaced && rc->stage == UPDATE_DATA_PTRS)
1747 invalidate_extent_cache(root, &key, &next_key); 2091 invalidate_extent_cache(root, &key, &next_key);
1748 } 2092 }
@@ -1765,87 +2109,125 @@ out:
1765 sizeof(root_item->drop_progress)); 2109 sizeof(root_item->drop_progress));
1766 root_item->drop_level = 0; 2110 root_item->drop_level = 0;
1767 btrfs_set_root_refs(root_item, 0); 2111 btrfs_set_root_refs(root_item, 0);
2112 btrfs_update_reloc_root(trans, root);
1768 } 2113 }
1769 2114
1770 nr = trans->blocks_used; 2115 nr = trans->blocks_used;
1771 btrfs_end_transaction(trans, root); 2116 btrfs_end_transaction_throttle(trans, root);
1772 2117
1773 btrfs_btree_balance_dirty(root, nr); 2118 btrfs_btree_balance_dirty(root, nr);
1774 2119
1775 put_inodes(&inode_list);
1776
1777 if (replaced && rc->stage == UPDATE_DATA_PTRS) 2120 if (replaced && rc->stage == UPDATE_DATA_PTRS)
1778 invalidate_extent_cache(root, &key, &next_key); 2121 invalidate_extent_cache(root, &key, &next_key);
1779 2122
1780 return err; 2123 return err;
1781} 2124}
1782 2125
1783/* 2126static noinline_for_stack
1784 * callback for the work threads. 2127int prepare_to_merge(struct reloc_control *rc, int err)
1785 * this function merges reloc tree with corresponding fs tree,
1786 * and then drops the reloc tree.
1787 */
1788static void merge_func(struct btrfs_work *work)
1789{ 2128{
1790 struct btrfs_trans_handle *trans; 2129 struct btrfs_root *root = rc->extent_root;
1791 struct btrfs_root *root;
1792 struct btrfs_root *reloc_root; 2130 struct btrfs_root *reloc_root;
1793 struct async_merge *async; 2131 struct btrfs_trans_handle *trans;
2132 LIST_HEAD(reloc_roots);
2133 u64 num_bytes = 0;
2134 int ret;
2135 int retries = 0;
2136
2137 mutex_lock(&root->fs_info->trans_mutex);
2138 rc->merging_rsv_size += root->nodesize * (BTRFS_MAX_LEVEL - 1) * 2;
2139 rc->merging_rsv_size += rc->nodes_relocated * 2;
2140 mutex_unlock(&root->fs_info->trans_mutex);
2141again:
2142 if (!err) {
2143 num_bytes = rc->merging_rsv_size;
2144 ret = btrfs_block_rsv_add(NULL, root, rc->block_rsv,
2145 num_bytes, &retries);
2146 if (ret)
2147 err = ret;
2148 }
2149
2150 trans = btrfs_join_transaction(rc->extent_root, 1);
2151
2152 if (!err) {
2153 if (num_bytes != rc->merging_rsv_size) {
2154 btrfs_end_transaction(trans, rc->extent_root);
2155 btrfs_block_rsv_release(rc->extent_root,
2156 rc->block_rsv, num_bytes);
2157 retries = 0;
2158 goto again;
2159 }
2160 }
1794 2161
1795 async = container_of(work, struct async_merge, work); 2162 rc->merge_reloc_tree = 1;
1796 reloc_root = async->root; 2163
2164 while (!list_empty(&rc->reloc_roots)) {
2165 reloc_root = list_entry(rc->reloc_roots.next,
2166 struct btrfs_root, root_list);
2167 list_del_init(&reloc_root->root_list);
1797 2168
1798 if (btrfs_root_refs(&reloc_root->root_item) > 0) {
1799 root = read_fs_root(reloc_root->fs_info, 2169 root = read_fs_root(reloc_root->fs_info,
1800 reloc_root->root_key.offset); 2170 reloc_root->root_key.offset);
1801 BUG_ON(IS_ERR(root)); 2171 BUG_ON(IS_ERR(root));
1802 BUG_ON(root->reloc_root != reloc_root); 2172 BUG_ON(root->reloc_root != reloc_root);
1803 2173
1804 merge_reloc_root(async->rc, root); 2174 /*
1805 2175 * set reference count to 1, so btrfs_recover_relocation
1806 trans = btrfs_start_transaction(root, 1); 2176 * knows it should resumes merging
2177 */
2178 if (!err)
2179 btrfs_set_root_refs(&reloc_root->root_item, 1);
1807 btrfs_update_reloc_root(trans, root); 2180 btrfs_update_reloc_root(trans, root);
1808 btrfs_end_transaction(trans, root);
1809 }
1810 2181
1811 btrfs_drop_snapshot(reloc_root, 0); 2182 list_add(&reloc_root->root_list, &reloc_roots);
2183 }
1812 2184
1813 if (atomic_dec_and_test(async->num_pending)) 2185 list_splice(&reloc_roots, &rc->reloc_roots);
1814 complete(async->done);
1815 2186
1816 kfree(async); 2187 if (!err)
2188 btrfs_commit_transaction(trans, rc->extent_root);
2189 else
2190 btrfs_end_transaction(trans, rc->extent_root);
2191 return err;
1817} 2192}
1818 2193
1819static int merge_reloc_roots(struct reloc_control *rc) 2194static noinline_for_stack
2195int merge_reloc_roots(struct reloc_control *rc)
1820{ 2196{
1821 struct async_merge *async;
1822 struct btrfs_root *root; 2197 struct btrfs_root *root;
1823 struct completion done; 2198 struct btrfs_root *reloc_root;
1824 atomic_t num_pending; 2199 LIST_HEAD(reloc_roots);
2200 int found = 0;
2201 int ret;
2202again:
2203 root = rc->extent_root;
2204 mutex_lock(&root->fs_info->trans_mutex);
2205 list_splice_init(&rc->reloc_roots, &reloc_roots);
2206 mutex_unlock(&root->fs_info->trans_mutex);
1825 2207
1826 init_completion(&done); 2208 while (!list_empty(&reloc_roots)) {
1827 atomic_set(&num_pending, 1); 2209 found = 1;
2210 reloc_root = list_entry(reloc_roots.next,
2211 struct btrfs_root, root_list);
1828 2212
1829 while (!list_empty(&rc->reloc_roots)) { 2213 if (btrfs_root_refs(&reloc_root->root_item) > 0) {
1830 root = list_entry(rc->reloc_roots.next, 2214 root = read_fs_root(reloc_root->fs_info,
1831 struct btrfs_root, root_list); 2215 reloc_root->root_key.offset);
1832 list_del_init(&root->root_list); 2216 BUG_ON(IS_ERR(root));
2217 BUG_ON(root->reloc_root != reloc_root);
1833 2218
1834 async = kmalloc(sizeof(*async), GFP_NOFS); 2219 ret = merge_reloc_root(rc, root);
1835 BUG_ON(!async); 2220 BUG_ON(ret);
1836 async->work.func = merge_func; 2221 } else {
1837 async->work.flags = 0; 2222 list_del_init(&reloc_root->root_list);
1838 async->rc = rc; 2223 }
1839 async->root = root; 2224 btrfs_drop_snapshot(reloc_root, rc->block_rsv, 0);
1840 async->done = &done;
1841 async->num_pending = &num_pending;
1842 atomic_inc(&num_pending);
1843 btrfs_queue_worker(&rc->workers, &async->work);
1844 } 2225 }
1845 2226
1846 if (!atomic_dec_and_test(&num_pending)) 2227 if (found) {
1847 wait_for_completion(&done); 2228 found = 0;
1848 2229 goto again;
2230 }
1849 BUG_ON(!RB_EMPTY_ROOT(&rc->reloc_root_tree.rb_root)); 2231 BUG_ON(!RB_EMPTY_ROOT(&rc->reloc_root_tree.rb_root));
1850 return 0; 2232 return 0;
1851} 2233}
@@ -1876,119 +2258,169 @@ static int record_reloc_root_in_trans(struct btrfs_trans_handle *trans,
1876 return btrfs_record_root_in_trans(trans, root); 2258 return btrfs_record_root_in_trans(trans, root);
1877} 2259}
1878 2260
1879/* 2261static noinline_for_stack
1880 * select one tree from trees that references the block. 2262struct btrfs_root *select_reloc_root(struct btrfs_trans_handle *trans,
1881 * for blocks in refernce counted trees, we preper reloc tree. 2263 struct reloc_control *rc,
1882 * if no reloc tree found and reloc_only is true, NULL is returned. 2264 struct backref_node *node,
1883 */ 2265 struct backref_edge *edges[], int *nr)
1884static struct btrfs_root *__select_one_root(struct btrfs_trans_handle *trans,
1885 struct backref_node *node,
1886 struct backref_edge *edges[],
1887 int *nr, int reloc_only)
1888{ 2266{
1889 struct backref_node *next; 2267 struct backref_node *next;
1890 struct btrfs_root *root; 2268 struct btrfs_root *root;
1891 int index; 2269 int index = 0;
1892 int loop = 0; 2270
1893again:
1894 index = 0;
1895 next = node; 2271 next = node;
1896 while (1) { 2272 while (1) {
1897 cond_resched(); 2273 cond_resched();
1898 next = walk_up_backref(next, edges, &index); 2274 next = walk_up_backref(next, edges, &index);
1899 root = next->root; 2275 root = next->root;
1900 if (!root) { 2276 BUG_ON(!root);
1901 BUG_ON(!node->old_root); 2277 BUG_ON(!root->ref_cows);
1902 goto skip;
1903 }
1904
1905 /* no other choice for non-refernce counted tree */
1906 if (!root->ref_cows) {
1907 BUG_ON(reloc_only);
1908 break;
1909 }
1910 2278
1911 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) { 2279 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
1912 record_reloc_root_in_trans(trans, root); 2280 record_reloc_root_in_trans(trans, root);
1913 break; 2281 break;
1914 } 2282 }
1915 2283
1916 if (loop) { 2284 btrfs_record_root_in_trans(trans, root);
1917 btrfs_record_root_in_trans(trans, root); 2285 root = root->reloc_root;
2286
2287 if (next->new_bytenr != root->node->start) {
2288 BUG_ON(next->new_bytenr);
2289 BUG_ON(!list_empty(&next->list));
2290 next->new_bytenr = root->node->start;
2291 next->root = root;
2292 list_add_tail(&next->list,
2293 &rc->backref_cache.changed);
2294 __mark_block_processed(rc, next);
1918 break; 2295 break;
1919 } 2296 }
1920 2297
1921 if (reloc_only || next != node) { 2298 WARN_ON(1);
1922 if (!root->reloc_root)
1923 btrfs_record_root_in_trans(trans, root);
1924 root = root->reloc_root;
1925 /*
1926 * if the reloc tree was created in current
1927 * transation, there is no node in backref tree
1928 * corresponds to the root of the reloc tree.
1929 */
1930 if (btrfs_root_last_snapshot(&root->root_item) ==
1931 trans->transid - 1)
1932 break;
1933 }
1934skip:
1935 root = NULL; 2299 root = NULL;
1936 next = walk_down_backref(edges, &index); 2300 next = walk_down_backref(edges, &index);
1937 if (!next || next->level <= node->level) 2301 if (!next || next->level <= node->level)
1938 break; 2302 break;
1939 } 2303 }
2304 if (!root)
2305 return NULL;
1940 2306
1941 if (!root && !loop && !reloc_only) { 2307 *nr = index;
1942 loop = 1; 2308 next = node;
1943 goto again; 2309 /* setup backref node path for btrfs_reloc_cow_block */
2310 while (1) {
2311 rc->backref_cache.path[next->level] = next;
2312 if (--index < 0)
2313 break;
2314 next = edges[index]->node[UPPER];
1944 } 2315 }
1945
1946 if (root)
1947 *nr = index;
1948 else
1949 *nr = 0;
1950
1951 return root; 2316 return root;
1952} 2317}
1953 2318
2319/*
2320 * select a tree root for relocation. return NULL if the block
2321 * is reference counted. we should use do_relocation() in this
2322 * case. return a tree root pointer if the block isn't reference
2323 * counted. return -ENOENT if the block is root of reloc tree.
2324 */
1954static noinline_for_stack 2325static noinline_for_stack
1955struct btrfs_root *select_one_root(struct btrfs_trans_handle *trans, 2326struct btrfs_root *select_one_root(struct btrfs_trans_handle *trans,
1956 struct backref_node *node) 2327 struct backref_node *node)
1957{ 2328{
2329 struct backref_node *next;
2330 struct btrfs_root *root;
2331 struct btrfs_root *fs_root = NULL;
1958 struct backref_edge *edges[BTRFS_MAX_LEVEL - 1]; 2332 struct backref_edge *edges[BTRFS_MAX_LEVEL - 1];
1959 int nr; 2333 int index = 0;
1960 return __select_one_root(trans, node, edges, &nr, 0); 2334
2335 next = node;
2336 while (1) {
2337 cond_resched();
2338 next = walk_up_backref(next, edges, &index);
2339 root = next->root;
2340 BUG_ON(!root);
2341
2342 /* no other choice for non-refernce counted tree */
2343 if (!root->ref_cows)
2344 return root;
2345
2346 if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID)
2347 fs_root = root;
2348
2349 if (next != node)
2350 return NULL;
2351
2352 next = walk_down_backref(edges, &index);
2353 if (!next || next->level <= node->level)
2354 break;
2355 }
2356
2357 if (!fs_root)
2358 return ERR_PTR(-ENOENT);
2359 return fs_root;
1961} 2360}
1962 2361
1963static noinline_for_stack 2362static noinline_for_stack
1964struct btrfs_root *select_reloc_root(struct btrfs_trans_handle *trans, 2363u64 calcu_metadata_size(struct reloc_control *rc,
1965 struct backref_node *node, 2364 struct backref_node *node, int reserve)
1966 struct backref_edge *edges[], int *nr)
1967{ 2365{
1968 return __select_one_root(trans, node, edges, nr, 1); 2366 struct backref_node *next = node;
2367 struct backref_edge *edge;
2368 struct backref_edge *edges[BTRFS_MAX_LEVEL - 1];
2369 u64 num_bytes = 0;
2370 int index = 0;
2371
2372 BUG_ON(reserve && node->processed);
2373
2374 while (next) {
2375 cond_resched();
2376 while (1) {
2377 if (next->processed && (reserve || next != node))
2378 break;
2379
2380 num_bytes += btrfs_level_size(rc->extent_root,
2381 next->level);
2382
2383 if (list_empty(&next->upper))
2384 break;
2385
2386 edge = list_entry(next->upper.next,
2387 struct backref_edge, list[LOWER]);
2388 edges[index++] = edge;
2389 next = edge->node[UPPER];
2390 }
2391 next = walk_down_backref(edges, &index);
2392 }
2393 return num_bytes;
1969} 2394}
1970 2395
1971static void grab_path_buffers(struct btrfs_path *path, 2396static int reserve_metadata_space(struct btrfs_trans_handle *trans,
1972 struct backref_node *node, 2397 struct reloc_control *rc,
1973 struct backref_edge *edges[], int nr) 2398 struct backref_node *node)
1974{ 2399{
1975 int i = 0; 2400 struct btrfs_root *root = rc->extent_root;
1976 while (1) { 2401 u64 num_bytes;
1977 drop_node_buffer(node); 2402 int ret;
1978 node->eb = path->nodes[node->level]; 2403
1979 BUG_ON(!node->eb); 2404 num_bytes = calcu_metadata_size(rc, node, 1) * 2;
1980 if (path->locks[node->level])
1981 node->locked = 1;
1982 path->nodes[node->level] = NULL;
1983 path->locks[node->level] = 0;
1984
1985 if (i >= nr)
1986 break;
1987 2405
1988 edges[i]->blockptr = node->eb->start; 2406 trans->block_rsv = rc->block_rsv;
1989 node = edges[i]->node[UPPER]; 2407 ret = btrfs_block_rsv_add(trans, root, rc->block_rsv, num_bytes,
1990 i++; 2408 &rc->block_rsv_retries);
2409 if (ret) {
2410 if (ret == -EAGAIN)
2411 rc->commit_transaction = 1;
2412 return ret;
1991 } 2413 }
2414
2415 rc->block_rsv_retries = 0;
2416 return 0;
2417}
2418
2419static void release_metadata_space(struct reloc_control *rc,
2420 struct backref_node *node)
2421{
2422 u64 num_bytes = calcu_metadata_size(rc, node, 0) * 2;
2423 btrfs_block_rsv_release(rc->extent_root, rc->block_rsv, num_bytes);
1992} 2424}
1993 2425
1994/* 2426/*
@@ -1999,6 +2431,7 @@ static void grab_path_buffers(struct btrfs_path *path,
1999 * in that case this function just updates pointers. 2431 * in that case this function just updates pointers.
2000 */ 2432 */
2001static int do_relocation(struct btrfs_trans_handle *trans, 2433static int do_relocation(struct btrfs_trans_handle *trans,
2434 struct reloc_control *rc,
2002 struct backref_node *node, 2435 struct backref_node *node,
2003 struct btrfs_key *key, 2436 struct btrfs_key *key,
2004 struct btrfs_path *path, int lowest) 2437 struct btrfs_path *path, int lowest)
@@ -2019,18 +2452,25 @@ static int do_relocation(struct btrfs_trans_handle *trans,
2019 BUG_ON(lowest && node->eb); 2452 BUG_ON(lowest && node->eb);
2020 2453
2021 path->lowest_level = node->level + 1; 2454 path->lowest_level = node->level + 1;
2455 rc->backref_cache.path[node->level] = node;
2022 list_for_each_entry(edge, &node->upper, list[LOWER]) { 2456 list_for_each_entry(edge, &node->upper, list[LOWER]) {
2023 cond_resched(); 2457 cond_resched();
2024 if (node->eb && node->eb->start == edge->blockptr)
2025 continue;
2026 2458
2027 upper = edge->node[UPPER]; 2459 upper = edge->node[UPPER];
2028 root = select_reloc_root(trans, upper, edges, &nr); 2460 root = select_reloc_root(trans, rc, upper, edges, &nr);
2029 if (!root) 2461 BUG_ON(!root);
2030 continue; 2462
2031 2463 if (upper->eb && !upper->locked) {
2032 if (upper->eb && !upper->locked) 2464 if (!lowest) {
2465 ret = btrfs_bin_search(upper->eb, key,
2466 upper->level, &slot);
2467 BUG_ON(ret);
2468 bytenr = btrfs_node_blockptr(upper->eb, slot);
2469 if (node->eb->start == bytenr)
2470 goto next;
2471 }
2033 drop_node_buffer(upper); 2472 drop_node_buffer(upper);
2473 }
2034 2474
2035 if (!upper->eb) { 2475 if (!upper->eb) {
2036 ret = btrfs_search_slot(trans, root, key, path, 0, 1); 2476 ret = btrfs_search_slot(trans, root, key, path, 0, 1);
@@ -2040,11 +2480,17 @@ static int do_relocation(struct btrfs_trans_handle *trans,
2040 } 2480 }
2041 BUG_ON(ret > 0); 2481 BUG_ON(ret > 0);
2042 2482
2043 slot = path->slots[upper->level]; 2483 if (!upper->eb) {
2484 upper->eb = path->nodes[upper->level];
2485 path->nodes[upper->level] = NULL;
2486 } else {
2487 BUG_ON(upper->eb != path->nodes[upper->level]);
2488 }
2044 2489
2045 btrfs_unlock_up_safe(path, upper->level + 1); 2490 upper->locked = 1;
2046 grab_path_buffers(path, upper, edges, nr); 2491 path->locks[upper->level] = 0;
2047 2492
2493 slot = path->slots[upper->level];
2048 btrfs_release_path(NULL, path); 2494 btrfs_release_path(NULL, path);
2049 } else { 2495 } else {
2050 ret = btrfs_bin_search(upper->eb, key, upper->level, 2496 ret = btrfs_bin_search(upper->eb, key, upper->level,
@@ -2053,14 +2499,11 @@ static int do_relocation(struct btrfs_trans_handle *trans,
2053 } 2499 }
2054 2500
2055 bytenr = btrfs_node_blockptr(upper->eb, slot); 2501 bytenr = btrfs_node_blockptr(upper->eb, slot);
2056 if (!lowest) { 2502 if (lowest) {
2057 if (node->eb->start == bytenr) { 2503 BUG_ON(bytenr != node->bytenr);
2058 btrfs_tree_unlock(upper->eb);
2059 upper->locked = 0;
2060 continue;
2061 }
2062 } else { 2504 } else {
2063 BUG_ON(node->bytenr != bytenr); 2505 if (node->eb->start == bytenr)
2506 goto next;
2064 } 2507 }
2065 2508
2066 blocksize = btrfs_level_size(root, node->level); 2509 blocksize = btrfs_level_size(root, node->level);
@@ -2072,13 +2515,13 @@ static int do_relocation(struct btrfs_trans_handle *trans,
2072 if (!node->eb) { 2515 if (!node->eb) {
2073 ret = btrfs_cow_block(trans, root, eb, upper->eb, 2516 ret = btrfs_cow_block(trans, root, eb, upper->eb,
2074 slot, &eb); 2517 slot, &eb);
2518 btrfs_tree_unlock(eb);
2519 free_extent_buffer(eb);
2075 if (ret < 0) { 2520 if (ret < 0) {
2076 err = ret; 2521 err = ret;
2077 break; 2522 goto next;
2078 } 2523 }
2079 btrfs_set_lock_blocking(eb); 2524 BUG_ON(node->eb != eb);
2080 node->eb = eb;
2081 node->locked = 1;
2082 } else { 2525 } else {
2083 btrfs_set_node_blockptr(upper->eb, slot, 2526 btrfs_set_node_blockptr(upper->eb, slot,
2084 node->eb->start); 2527 node->eb->start);
@@ -2096,67 +2539,80 @@ static int do_relocation(struct btrfs_trans_handle *trans,
2096 ret = btrfs_drop_subtree(trans, root, eb, upper->eb); 2539 ret = btrfs_drop_subtree(trans, root, eb, upper->eb);
2097 BUG_ON(ret); 2540 BUG_ON(ret);
2098 } 2541 }
2099 if (!lowest) { 2542next:
2100 btrfs_tree_unlock(upper->eb); 2543 if (!upper->pending)
2101 upper->locked = 0; 2544 drop_node_buffer(upper);
2102 } 2545 else
2546 unlock_node_buffer(upper);
2547 if (err)
2548 break;
2103 } 2549 }
2550
2551 if (!err && node->pending) {
2552 drop_node_buffer(node);
2553 list_move_tail(&node->list, &rc->backref_cache.changed);
2554 node->pending = 0;
2555 }
2556
2104 path->lowest_level = 0; 2557 path->lowest_level = 0;
2558 BUG_ON(err == -ENOSPC);
2105 return err; 2559 return err;
2106} 2560}
2107 2561
2108static int link_to_upper(struct btrfs_trans_handle *trans, 2562static int link_to_upper(struct btrfs_trans_handle *trans,
2563 struct reloc_control *rc,
2109 struct backref_node *node, 2564 struct backref_node *node,
2110 struct btrfs_path *path) 2565 struct btrfs_path *path)
2111{ 2566{
2112 struct btrfs_key key; 2567 struct btrfs_key key;
2113 if (!node->eb || list_empty(&node->upper))
2114 return 0;
2115 2568
2116 btrfs_node_key_to_cpu(node->eb, &key, 0); 2569 btrfs_node_key_to_cpu(node->eb, &key, 0);
2117 return do_relocation(trans, node, &key, path, 0); 2570 return do_relocation(trans, rc, node, &key, path, 0);
2118} 2571}
2119 2572
2120static int finish_pending_nodes(struct btrfs_trans_handle *trans, 2573static int finish_pending_nodes(struct btrfs_trans_handle *trans,
2121 struct backref_cache *cache, 2574 struct reloc_control *rc,
2122 struct btrfs_path *path) 2575 struct btrfs_path *path, int err)
2123{ 2576{
2577 LIST_HEAD(list);
2578 struct backref_cache *cache = &rc->backref_cache;
2124 struct backref_node *node; 2579 struct backref_node *node;
2125 int level; 2580 int level;
2126 int ret; 2581 int ret;
2127 int err = 0;
2128 2582
2129 for (level = 0; level < BTRFS_MAX_LEVEL; level++) { 2583 for (level = 0; level < BTRFS_MAX_LEVEL; level++) {
2130 while (!list_empty(&cache->pending[level])) { 2584 while (!list_empty(&cache->pending[level])) {
2131 node = list_entry(cache->pending[level].next, 2585 node = list_entry(cache->pending[level].next,
2132 struct backref_node, lower); 2586 struct backref_node, list);
2133 BUG_ON(node->level != level); 2587 list_move_tail(&node->list, &list);
2588 BUG_ON(!node->pending);
2134 2589
2135 ret = link_to_upper(trans, node, path); 2590 if (!err) {
2136 if (ret < 0) 2591 ret = link_to_upper(trans, rc, node, path);
2137 err = ret; 2592 if (ret < 0)
2138 /* 2593 err = ret;
2139 * this remove the node from the pending list and 2594 }
2140 * may add some other nodes to the level + 1
2141 * pending list
2142 */
2143 remove_backref_node(cache, node);
2144 } 2595 }
2596 list_splice_init(&list, &cache->pending[level]);
2145 } 2597 }
2146 BUG_ON(!RB_EMPTY_ROOT(&cache->rb_root));
2147 return err; 2598 return err;
2148} 2599}
2149 2600
2150static void mark_block_processed(struct reloc_control *rc, 2601static void mark_block_processed(struct reloc_control *rc,
2151 struct backref_node *node) 2602 u64 bytenr, u32 blocksize)
2603{
2604 set_extent_bits(&rc->processed_blocks, bytenr, bytenr + blocksize - 1,
2605 EXTENT_DIRTY, GFP_NOFS);
2606}
2607
2608static void __mark_block_processed(struct reloc_control *rc,
2609 struct backref_node *node)
2152{ 2610{
2153 u32 blocksize; 2611 u32 blocksize;
2154 if (node->level == 0 || 2612 if (node->level == 0 ||
2155 in_block_group(node->bytenr, rc->block_group)) { 2613 in_block_group(node->bytenr, rc->block_group)) {
2156 blocksize = btrfs_level_size(rc->extent_root, node->level); 2614 blocksize = btrfs_level_size(rc->extent_root, node->level);
2157 set_extent_bits(&rc->processed_blocks, node->bytenr, 2615 mark_block_processed(rc, node->bytenr, blocksize);
2158 node->bytenr + blocksize - 1, EXTENT_DIRTY,
2159 GFP_NOFS);
2160 } 2616 }
2161 node->processed = 1; 2617 node->processed = 1;
2162} 2618}
@@ -2179,7 +2635,7 @@ static void update_processed_blocks(struct reloc_control *rc,
2179 if (next->processed) 2635 if (next->processed)
2180 break; 2636 break;
2181 2637
2182 mark_block_processed(rc, next); 2638 __mark_block_processed(rc, next);
2183 2639
2184 if (list_empty(&next->upper)) 2640 if (list_empty(&next->upper))
2185 break; 2641 break;
@@ -2202,138 +2658,6 @@ static int tree_block_processed(u64 bytenr, u32 blocksize,
2202 return 0; 2658 return 0;
2203} 2659}
2204 2660
2205/*
2206 * check if there are any file extent pointers in the leaf point to
2207 * data require processing
2208 */
2209static int check_file_extents(struct reloc_control *rc,
2210 u64 bytenr, u32 blocksize, u64 ptr_gen)
2211{
2212 struct btrfs_key found_key;
2213 struct btrfs_file_extent_item *fi;
2214 struct extent_buffer *leaf;
2215 u32 nritems;
2216 int i;
2217 int ret = 0;
2218
2219 leaf = read_tree_block(rc->extent_root, bytenr, blocksize, ptr_gen);
2220
2221 nritems = btrfs_header_nritems(leaf);
2222 for (i = 0; i < nritems; i++) {
2223 cond_resched();
2224 btrfs_item_key_to_cpu(leaf, &found_key, i);
2225 if (found_key.type != BTRFS_EXTENT_DATA_KEY)
2226 continue;
2227 fi = btrfs_item_ptr(leaf, i, struct btrfs_file_extent_item);
2228 if (btrfs_file_extent_type(leaf, fi) ==
2229 BTRFS_FILE_EXTENT_INLINE)
2230 continue;
2231 bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
2232 if (bytenr == 0)
2233 continue;
2234 if (in_block_group(bytenr, rc->block_group)) {
2235 ret = 1;
2236 break;
2237 }
2238 }
2239 free_extent_buffer(leaf);
2240 return ret;
2241}
2242
2243/*
2244 * scan child blocks of a given block to find blocks require processing
2245 */
2246static int add_child_blocks(struct btrfs_trans_handle *trans,
2247 struct reloc_control *rc,
2248 struct backref_node *node,
2249 struct rb_root *blocks)
2250{
2251 struct tree_block *block;
2252 struct rb_node *rb_node;
2253 u64 bytenr;
2254 u64 ptr_gen;
2255 u32 blocksize;
2256 u32 nritems;
2257 int i;
2258 int err = 0;
2259
2260 nritems = btrfs_header_nritems(node->eb);
2261 blocksize = btrfs_level_size(rc->extent_root, node->level - 1);
2262 for (i = 0; i < nritems; i++) {
2263 cond_resched();
2264 bytenr = btrfs_node_blockptr(node->eb, i);
2265 ptr_gen = btrfs_node_ptr_generation(node->eb, i);
2266 if (ptr_gen == trans->transid)
2267 continue;
2268 if (!in_block_group(bytenr, rc->block_group) &&
2269 (node->level > 1 || rc->stage == MOVE_DATA_EXTENTS))
2270 continue;
2271 if (tree_block_processed(bytenr, blocksize, rc))
2272 continue;
2273
2274 readahead_tree_block(rc->extent_root,
2275 bytenr, blocksize, ptr_gen);
2276 }
2277
2278 for (i = 0; i < nritems; i++) {
2279 cond_resched();
2280 bytenr = btrfs_node_blockptr(node->eb, i);
2281 ptr_gen = btrfs_node_ptr_generation(node->eb, i);
2282 if (ptr_gen == trans->transid)
2283 continue;
2284 if (!in_block_group(bytenr, rc->block_group) &&
2285 (node->level > 1 || rc->stage == MOVE_DATA_EXTENTS))
2286 continue;
2287 if (tree_block_processed(bytenr, blocksize, rc))
2288 continue;
2289 if (!in_block_group(bytenr, rc->block_group) &&
2290 !check_file_extents(rc, bytenr, blocksize, ptr_gen))
2291 continue;
2292
2293 block = kmalloc(sizeof(*block), GFP_NOFS);
2294 if (!block) {
2295 err = -ENOMEM;
2296 break;
2297 }
2298 block->bytenr = bytenr;
2299 btrfs_node_key_to_cpu(node->eb, &block->key, i);
2300 block->level = node->level - 1;
2301 block->key_ready = 1;
2302 rb_node = tree_insert(blocks, block->bytenr, &block->rb_node);
2303 BUG_ON(rb_node);
2304 }
2305 if (err)
2306 free_block_list(blocks);
2307 return err;
2308}
2309
2310/*
2311 * find adjacent blocks require processing
2312 */
2313static noinline_for_stack
2314int add_adjacent_blocks(struct btrfs_trans_handle *trans,
2315 struct reloc_control *rc,
2316 struct backref_cache *cache,
2317 struct rb_root *blocks, int level,
2318 struct backref_node **upper)
2319{
2320 struct backref_node *node;
2321 int ret = 0;
2322
2323 WARN_ON(!list_empty(&cache->pending[level]));
2324
2325 if (list_empty(&cache->pending[level + 1]))
2326 return 1;
2327
2328 node = list_entry(cache->pending[level + 1].next,
2329 struct backref_node, lower);
2330 if (node->eb)
2331 ret = add_child_blocks(trans, rc, node, blocks);
2332
2333 *upper = node;
2334 return ret;
2335}
2336
2337static int get_tree_block_key(struct reloc_control *rc, 2661static int get_tree_block_key(struct reloc_control *rc,
2338 struct tree_block *block) 2662 struct tree_block *block)
2339{ 2663{
@@ -2371,40 +2695,53 @@ static int relocate_tree_block(struct btrfs_trans_handle *trans,
2371 struct btrfs_path *path) 2695 struct btrfs_path *path)
2372{ 2696{
2373 struct btrfs_root *root; 2697 struct btrfs_root *root;
2374 int ret; 2698 int release = 0;
2699 int ret = 0;
2375 2700
2701 if (!node)
2702 return 0;
2703
2704 BUG_ON(node->processed);
2376 root = select_one_root(trans, node); 2705 root = select_one_root(trans, node);
2377 if (unlikely(!root)) { 2706 if (root == ERR_PTR(-ENOENT)) {
2378 rc->found_old_snapshot = 1;
2379 update_processed_blocks(rc, node); 2707 update_processed_blocks(rc, node);
2380 return 0; 2708 goto out;
2381 } 2709 }
2382 2710
2383 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) { 2711 if (!root || root->ref_cows) {
2384 ret = do_relocation(trans, node, key, path, 1); 2712 ret = reserve_metadata_space(trans, rc, node);
2385 if (ret < 0) 2713 if (ret)
2386 goto out;
2387 if (node->level == 0 && rc->stage == UPDATE_DATA_PTRS) {
2388 ret = replace_file_extents(trans, rc, root,
2389 node->eb, NULL);
2390 if (ret < 0)
2391 goto out;
2392 }
2393 drop_node_buffer(node);
2394 } else if (!root->ref_cows) {
2395 path->lowest_level = node->level;
2396 ret = btrfs_search_slot(trans, root, key, path, 0, 1);
2397 btrfs_release_path(root, path);
2398 if (ret < 0)
2399 goto out; 2714 goto out;
2400 } else if (root != node->root) { 2715 release = 1;
2401 WARN_ON(node->level > 0 || rc->stage != UPDATE_DATA_PTRS);
2402 } 2716 }
2403 2717
2404 update_processed_blocks(rc, node); 2718 if (root) {
2405 ret = 0; 2719 if (root->ref_cows) {
2720 BUG_ON(node->new_bytenr);
2721 BUG_ON(!list_empty(&node->list));
2722 btrfs_record_root_in_trans(trans, root);
2723 root = root->reloc_root;
2724 node->new_bytenr = root->node->start;
2725 node->root = root;
2726 list_add_tail(&node->list, &rc->backref_cache.changed);
2727 } else {
2728 path->lowest_level = node->level;
2729 ret = btrfs_search_slot(trans, root, key, path, 0, 1);
2730 btrfs_release_path(root, path);
2731 if (ret > 0)
2732 ret = 0;
2733 }
2734 if (!ret)
2735 update_processed_blocks(rc, node);
2736 } else {
2737 ret = do_relocation(trans, rc, node, key, path, 1);
2738 }
2406out: 2739out:
2407 drop_node_buffer(node); 2740 if (ret || node->level == 0 || node->cowonly) {
2741 if (release)
2742 release_metadata_space(rc, node);
2743 remove_backref_node(&rc->backref_cache, node);
2744 }
2408 return ret; 2745 return ret;
2409} 2746}
2410 2747
@@ -2415,12 +2752,10 @@ static noinline_for_stack
2415int relocate_tree_blocks(struct btrfs_trans_handle *trans, 2752int relocate_tree_blocks(struct btrfs_trans_handle *trans,
2416 struct reloc_control *rc, struct rb_root *blocks) 2753 struct reloc_control *rc, struct rb_root *blocks)
2417{ 2754{
2418 struct backref_cache *cache;
2419 struct backref_node *node; 2755 struct backref_node *node;
2420 struct btrfs_path *path; 2756 struct btrfs_path *path;
2421 struct tree_block *block; 2757 struct tree_block *block;
2422 struct rb_node *rb_node; 2758 struct rb_node *rb_node;
2423 int level = -1;
2424 int ret; 2759 int ret;
2425 int err = 0; 2760 int err = 0;
2426 2761
@@ -2428,21 +2763,9 @@ int relocate_tree_blocks(struct btrfs_trans_handle *trans,
2428 if (!path) 2763 if (!path)
2429 return -ENOMEM; 2764 return -ENOMEM;
2430 2765
2431 cache = kmalloc(sizeof(*cache), GFP_NOFS);
2432 if (!cache) {
2433 btrfs_free_path(path);
2434 return -ENOMEM;
2435 }
2436
2437 backref_cache_init(cache);
2438
2439 rb_node = rb_first(blocks); 2766 rb_node = rb_first(blocks);
2440 while (rb_node) { 2767 while (rb_node) {
2441 block = rb_entry(rb_node, struct tree_block, rb_node); 2768 block = rb_entry(rb_node, struct tree_block, rb_node);
2442 if (level == -1)
2443 level = block->level;
2444 else
2445 BUG_ON(level != block->level);
2446 if (!block->key_ready) 2769 if (!block->key_ready)
2447 reada_tree_block(rc, block); 2770 reada_tree_block(rc, block);
2448 rb_node = rb_next(rb_node); 2771 rb_node = rb_next(rb_node);
@@ -2460,7 +2783,7 @@ int relocate_tree_blocks(struct btrfs_trans_handle *trans,
2460 while (rb_node) { 2783 while (rb_node) {
2461 block = rb_entry(rb_node, struct tree_block, rb_node); 2784 block = rb_entry(rb_node, struct tree_block, rb_node);
2462 2785
2463 node = build_backref_tree(rc, cache, &block->key, 2786 node = build_backref_tree(rc, &block->key,
2464 block->level, block->bytenr); 2787 block->level, block->bytenr);
2465 if (IS_ERR(node)) { 2788 if (IS_ERR(node)) {
2466 err = PTR_ERR(node); 2789 err = PTR_ERR(node);
@@ -2470,79 +2793,62 @@ int relocate_tree_blocks(struct btrfs_trans_handle *trans,
2470 ret = relocate_tree_block(trans, rc, node, &block->key, 2793 ret = relocate_tree_block(trans, rc, node, &block->key,
2471 path); 2794 path);
2472 if (ret < 0) { 2795 if (ret < 0) {
2473 err = ret; 2796 if (ret != -EAGAIN || rb_node == rb_first(blocks))
2797 err = ret;
2474 goto out; 2798 goto out;
2475 } 2799 }
2476 remove_backref_node(cache, node);
2477 rb_node = rb_next(rb_node); 2800 rb_node = rb_next(rb_node);
2478 } 2801 }
2479 2802out:
2480 if (level > 0)
2481 goto out;
2482
2483 free_block_list(blocks); 2803 free_block_list(blocks);
2804 err = finish_pending_nodes(trans, rc, path, err);
2484 2805
2485 /* 2806 btrfs_free_path(path);
2486 * now backrefs of some upper level tree blocks have been cached, 2807 return err;
2487 * try relocating blocks referenced by these upper level blocks. 2808}
2488 */
2489 while (1) {
2490 struct backref_node *upper = NULL;
2491 if (trans->transaction->in_commit ||
2492 trans->transaction->delayed_refs.flushing)
2493 break;
2494 2809
2495 ret = add_adjacent_blocks(trans, rc, cache, blocks, level, 2810static noinline_for_stack
2496 &upper); 2811int prealloc_file_extent_cluster(struct inode *inode,
2497 if (ret < 0) 2812 struct file_extent_cluster *cluster)
2498 err = ret; 2813{
2499 if (ret != 0) 2814 u64 alloc_hint = 0;
2500 break; 2815 u64 start;
2816 u64 end;
2817 u64 offset = BTRFS_I(inode)->index_cnt;
2818 u64 num_bytes;
2819 int nr = 0;
2820 int ret = 0;
2501 2821
2502 rb_node = rb_first(blocks); 2822 BUG_ON(cluster->start != cluster->boundary[0]);
2503 while (rb_node) { 2823 mutex_lock(&inode->i_mutex);
2504 block = rb_entry(rb_node, struct tree_block, rb_node);
2505 if (trans->transaction->in_commit ||
2506 trans->transaction->delayed_refs.flushing)
2507 goto out;
2508 BUG_ON(!block->key_ready);
2509 node = build_backref_tree(rc, cache, &block->key,
2510 level, block->bytenr);
2511 if (IS_ERR(node)) {
2512 err = PTR_ERR(node);
2513 goto out;
2514 }
2515 2824
2516 ret = relocate_tree_block(trans, rc, node, 2825 ret = btrfs_check_data_free_space(inode, cluster->end +
2517 &block->key, path); 2826 1 - cluster->start);
2518 if (ret < 0) { 2827 if (ret)
2519 err = ret; 2828 goto out;
2520 goto out;
2521 }
2522 remove_backref_node(cache, node);
2523 rb_node = rb_next(rb_node);
2524 }
2525 free_block_list(blocks);
2526 2829
2527 if (upper) { 2830 while (nr < cluster->nr) {
2528 ret = link_to_upper(trans, upper, path); 2831 start = cluster->boundary[nr] - offset;
2529 if (ret < 0) { 2832 if (nr + 1 < cluster->nr)
2530 err = ret; 2833 end = cluster->boundary[nr + 1] - 1 - offset;
2531 break; 2834 else
2532 } 2835 end = cluster->end - offset;
2533 remove_backref_node(cache, upper); 2836
2534 } 2837 lock_extent(&BTRFS_I(inode)->io_tree, start, end, GFP_NOFS);
2838 num_bytes = end + 1 - start;
2839 ret = btrfs_prealloc_file_range(inode, 0, start,
2840 num_bytes, num_bytes,
2841 end + 1, &alloc_hint);
2842 unlock_extent(&BTRFS_I(inode)->io_tree, start, end, GFP_NOFS);
2843 if (ret)
2844 break;
2845 nr++;
2535 } 2846 }
2847 btrfs_free_reserved_data_space(inode, cluster->end +
2848 1 - cluster->start);
2536out: 2849out:
2537 free_block_list(blocks); 2850 mutex_unlock(&inode->i_mutex);
2538 2851 return ret;
2539 ret = finish_pending_nodes(trans, cache, path);
2540 if (ret < 0)
2541 err = ret;
2542
2543 kfree(cache);
2544 btrfs_free_path(path);
2545 return err;
2546} 2852}
2547 2853
2548static noinline_for_stack 2854static noinline_for_stack
@@ -2588,7 +2894,6 @@ static int relocate_file_extent_cluster(struct inode *inode,
2588 u64 offset = BTRFS_I(inode)->index_cnt; 2894 u64 offset = BTRFS_I(inode)->index_cnt;
2589 unsigned long index; 2895 unsigned long index;
2590 unsigned long last_index; 2896 unsigned long last_index;
2591 unsigned int dirty_page = 0;
2592 struct page *page; 2897 struct page *page;
2593 struct file_ra_state *ra; 2898 struct file_ra_state *ra;
2594 int nr = 0; 2899 int nr = 0;
@@ -2601,21 +2906,24 @@ static int relocate_file_extent_cluster(struct inode *inode,
2601 if (!ra) 2906 if (!ra)
2602 return -ENOMEM; 2907 return -ENOMEM;
2603 2908
2604 index = (cluster->start - offset) >> PAGE_CACHE_SHIFT; 2909 ret = prealloc_file_extent_cluster(inode, cluster);
2605 last_index = (cluster->end - offset) >> PAGE_CACHE_SHIFT; 2910 if (ret)
2911 goto out;
2606 2912
2607 mutex_lock(&inode->i_mutex); 2913 file_ra_state_init(ra, inode->i_mapping);
2608 2914
2609 i_size_write(inode, cluster->end + 1 - offset);
2610 ret = setup_extent_mapping(inode, cluster->start - offset, 2915 ret = setup_extent_mapping(inode, cluster->start - offset,
2611 cluster->end - offset, cluster->start); 2916 cluster->end - offset, cluster->start);
2612 if (ret) 2917 if (ret)
2613 goto out_unlock; 2918 goto out;
2614
2615 file_ra_state_init(ra, inode->i_mapping);
2616 2919
2617 WARN_ON(cluster->start != cluster->boundary[0]); 2920 index = (cluster->start - offset) >> PAGE_CACHE_SHIFT;
2921 last_index = (cluster->end - offset) >> PAGE_CACHE_SHIFT;
2618 while (index <= last_index) { 2922 while (index <= last_index) {
2923 ret = btrfs_delalloc_reserve_metadata(inode, PAGE_CACHE_SIZE);
2924 if (ret)
2925 goto out;
2926
2619 page = find_lock_page(inode->i_mapping, index); 2927 page = find_lock_page(inode->i_mapping, index);
2620 if (!page) { 2928 if (!page) {
2621 page_cache_sync_readahead(inode->i_mapping, 2929 page_cache_sync_readahead(inode->i_mapping,
@@ -2623,8 +2931,10 @@ static int relocate_file_extent_cluster(struct inode *inode,
2623 last_index + 1 - index); 2931 last_index + 1 - index);
2624 page = grab_cache_page(inode->i_mapping, index); 2932 page = grab_cache_page(inode->i_mapping, index);
2625 if (!page) { 2933 if (!page) {
2934 btrfs_delalloc_release_metadata(inode,
2935 PAGE_CACHE_SIZE);
2626 ret = -ENOMEM; 2936 ret = -ENOMEM;
2627 goto out_unlock; 2937 goto out;
2628 } 2938 }
2629 } 2939 }
2630 2940
@@ -2640,8 +2950,10 @@ static int relocate_file_extent_cluster(struct inode *inode,
2640 if (!PageUptodate(page)) { 2950 if (!PageUptodate(page)) {
2641 unlock_page(page); 2951 unlock_page(page);
2642 page_cache_release(page); 2952 page_cache_release(page);
2953 btrfs_delalloc_release_metadata(inode,
2954 PAGE_CACHE_SIZE);
2643 ret = -EIO; 2955 ret = -EIO;
2644 goto out_unlock; 2956 goto out;
2645 } 2957 }
2646 } 2958 }
2647 2959
@@ -2660,10 +2972,9 @@ static int relocate_file_extent_cluster(struct inode *inode,
2660 EXTENT_BOUNDARY, GFP_NOFS); 2972 EXTENT_BOUNDARY, GFP_NOFS);
2661 nr++; 2973 nr++;
2662 } 2974 }
2663 btrfs_set_extent_delalloc(inode, page_start, page_end, NULL);
2664 2975
2976 btrfs_set_extent_delalloc(inode, page_start, page_end, NULL);
2665 set_page_dirty(page); 2977 set_page_dirty(page);
2666 dirty_page++;
2667 2978
2668 unlock_extent(&BTRFS_I(inode)->io_tree, 2979 unlock_extent(&BTRFS_I(inode)->io_tree,
2669 page_start, page_end, GFP_NOFS); 2980 page_start, page_end, GFP_NOFS);
@@ -2671,20 +2982,11 @@ static int relocate_file_extent_cluster(struct inode *inode,
2671 page_cache_release(page); 2982 page_cache_release(page);
2672 2983
2673 index++; 2984 index++;
2674 if (nr < cluster->nr && 2985 balance_dirty_pages_ratelimited(inode->i_mapping);
2675 page_end + 1 + offset == cluster->boundary[nr]) { 2986 btrfs_throttle(BTRFS_I(inode)->root);
2676 balance_dirty_pages_ratelimited_nr(inode->i_mapping,
2677 dirty_page);
2678 dirty_page = 0;
2679 }
2680 }
2681 if (dirty_page) {
2682 balance_dirty_pages_ratelimited_nr(inode->i_mapping,
2683 dirty_page);
2684 } 2987 }
2685 WARN_ON(nr != cluster->nr); 2988 WARN_ON(nr != cluster->nr);
2686out_unlock: 2989out:
2687 mutex_unlock(&inode->i_mutex);
2688 kfree(ra); 2990 kfree(ra);
2689 return ret; 2991 return ret;
2690} 2992}
@@ -2870,9 +3172,6 @@ out:
2870static int block_use_full_backref(struct reloc_control *rc, 3172static int block_use_full_backref(struct reloc_control *rc,
2871 struct extent_buffer *eb) 3173 struct extent_buffer *eb)
2872{ 3174{
2873 struct btrfs_path *path;
2874 struct btrfs_extent_item *ei;
2875 struct btrfs_key key;
2876 u64 flags; 3175 u64 flags;
2877 int ret; 3176 int ret;
2878 3177
@@ -2880,28 +3179,14 @@ static int block_use_full_backref(struct reloc_control *rc,
2880 btrfs_header_backref_rev(eb) < BTRFS_MIXED_BACKREF_REV) 3179 btrfs_header_backref_rev(eb) < BTRFS_MIXED_BACKREF_REV)
2881 return 1; 3180 return 1;
2882 3181
2883 path = btrfs_alloc_path(); 3182 ret = btrfs_lookup_extent_info(NULL, rc->extent_root,
2884 BUG_ON(!path); 3183 eb->start, eb->len, NULL, &flags);
2885
2886 key.objectid = eb->start;
2887 key.type = BTRFS_EXTENT_ITEM_KEY;
2888 key.offset = eb->len;
2889
2890 path->search_commit_root = 1;
2891 path->skip_locking = 1;
2892 ret = btrfs_search_slot(NULL, rc->extent_root,
2893 &key, path, 0, 0);
2894 BUG_ON(ret); 3184 BUG_ON(ret);
2895 3185
2896 ei = btrfs_item_ptr(path->nodes[0], path->slots[0],
2897 struct btrfs_extent_item);
2898 flags = btrfs_extent_flags(path->nodes[0], ei);
2899 BUG_ON(!(flags & BTRFS_EXTENT_FLAG_TREE_BLOCK));
2900 if (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF) 3186 if (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF)
2901 ret = 1; 3187 ret = 1;
2902 else 3188 else
2903 ret = 0; 3189 ret = 0;
2904 btrfs_free_path(path);
2905 return ret; 3190 return ret;
2906} 3191}
2907 3192
@@ -3074,22 +3359,10 @@ int add_data_references(struct reloc_control *rc,
3074 struct btrfs_extent_inline_ref *iref; 3359 struct btrfs_extent_inline_ref *iref;
3075 unsigned long ptr; 3360 unsigned long ptr;
3076 unsigned long end; 3361 unsigned long end;
3077 u32 blocksize; 3362 u32 blocksize = btrfs_level_size(rc->extent_root, 0);
3078 int ret; 3363 int ret;
3079 int err = 0; 3364 int err = 0;
3080 3365
3081 ret = get_new_location(rc->data_inode, NULL, extent_key->objectid,
3082 extent_key->offset);
3083 BUG_ON(ret < 0);
3084 if (ret > 0) {
3085 /* the relocated data is fragmented */
3086 rc->extents_skipped++;
3087 btrfs_release_path(rc->extent_root, path);
3088 return 0;
3089 }
3090
3091 blocksize = btrfs_level_size(rc->extent_root, 0);
3092
3093 eb = path->nodes[0]; 3366 eb = path->nodes[0];
3094 ptr = btrfs_item_ptr_offset(eb, path->slots[0]); 3367 ptr = btrfs_item_ptr_offset(eb, path->slots[0]);
3095 end = ptr + btrfs_item_size_nr(eb, path->slots[0]); 3368 end = ptr + btrfs_item_size_nr(eb, path->slots[0]);
@@ -3170,7 +3443,8 @@ int add_data_references(struct reloc_control *rc,
3170 */ 3443 */
3171static noinline_for_stack 3444static noinline_for_stack
3172int find_next_extent(struct btrfs_trans_handle *trans, 3445int find_next_extent(struct btrfs_trans_handle *trans,
3173 struct reloc_control *rc, struct btrfs_path *path) 3446 struct reloc_control *rc, struct btrfs_path *path,
3447 struct btrfs_key *extent_key)
3174{ 3448{
3175 struct btrfs_key key; 3449 struct btrfs_key key;
3176 struct extent_buffer *leaf; 3450 struct extent_buffer *leaf;
@@ -3225,6 +3499,7 @@ next:
3225 rc->search_start = end + 1; 3499 rc->search_start = end + 1;
3226 } else { 3500 } else {
3227 rc->search_start = key.objectid + key.offset; 3501 rc->search_start = key.objectid + key.offset;
3502 memcpy(extent_key, &key, sizeof(key));
3228 return 0; 3503 return 0;
3229 } 3504 }
3230 } 3505 }
@@ -3262,12 +3537,49 @@ static int check_extent_flags(u64 flags)
3262 return 0; 3537 return 0;
3263} 3538}
3264 3539
3540static noinline_for_stack
3541int prepare_to_relocate(struct reloc_control *rc)
3542{
3543 struct btrfs_trans_handle *trans;
3544 int ret;
3545
3546 rc->block_rsv = btrfs_alloc_block_rsv(rc->extent_root);
3547 if (!rc->block_rsv)
3548 return -ENOMEM;
3549
3550 /*
3551 * reserve some space for creating reloc trees.
3552 * btrfs_init_reloc_root will use them when there
3553 * is no reservation in transaction handle.
3554 */
3555 ret = btrfs_block_rsv_add(NULL, rc->extent_root, rc->block_rsv,
3556 rc->extent_root->nodesize * 256,
3557 &rc->block_rsv_retries);
3558 if (ret)
3559 return ret;
3560
3561 rc->block_rsv->refill_used = 1;
3562 btrfs_add_durable_block_rsv(rc->extent_root->fs_info, rc->block_rsv);
3563
3564 memset(&rc->cluster, 0, sizeof(rc->cluster));
3565 rc->search_start = rc->block_group->key.objectid;
3566 rc->extents_found = 0;
3567 rc->nodes_relocated = 0;
3568 rc->merging_rsv_size = 0;
3569 rc->block_rsv_retries = 0;
3570
3571 rc->create_reloc_tree = 1;
3572 set_reloc_control(rc);
3573
3574 trans = btrfs_join_transaction(rc->extent_root, 1);
3575 btrfs_commit_transaction(trans, rc->extent_root);
3576 return 0;
3577}
3265 3578
3266static noinline_for_stack int relocate_block_group(struct reloc_control *rc) 3579static noinline_for_stack int relocate_block_group(struct reloc_control *rc)
3267{ 3580{
3268 struct rb_root blocks = RB_ROOT; 3581 struct rb_root blocks = RB_ROOT;
3269 struct btrfs_key key; 3582 struct btrfs_key key;
3270 struct file_extent_cluster *cluster;
3271 struct btrfs_trans_handle *trans = NULL; 3583 struct btrfs_trans_handle *trans = NULL;
3272 struct btrfs_path *path; 3584 struct btrfs_path *path;
3273 struct btrfs_extent_item *ei; 3585 struct btrfs_extent_item *ei;
@@ -3277,33 +3589,25 @@ static noinline_for_stack int relocate_block_group(struct reloc_control *rc)
3277 int ret; 3589 int ret;
3278 int err = 0; 3590 int err = 0;
3279 3591
3280 cluster = kzalloc(sizeof(*cluster), GFP_NOFS);
3281 if (!cluster)
3282 return -ENOMEM;
3283
3284 path = btrfs_alloc_path(); 3592 path = btrfs_alloc_path();
3285 if (!path) { 3593 if (!path)
3286 kfree(cluster);
3287 return -ENOMEM; 3594 return -ENOMEM;
3288 }
3289
3290 rc->extents_found = 0;
3291 rc->extents_skipped = 0;
3292
3293 rc->search_start = rc->block_group->key.objectid;
3294 clear_extent_bits(&rc->processed_blocks, 0, (u64)-1, EXTENT_DIRTY,
3295 GFP_NOFS);
3296
3297 rc->create_reloc_root = 1;
3298 set_reloc_control(rc);
3299 3595
3300 trans = btrfs_start_transaction(rc->extent_root, 1); 3596 ret = prepare_to_relocate(rc);
3301 btrfs_commit_transaction(trans, rc->extent_root); 3597 if (ret) {
3598 err = ret;
3599 goto out_free;
3600 }
3302 3601
3303 while (1) { 3602 while (1) {
3304 trans = btrfs_start_transaction(rc->extent_root, 1); 3603 trans = btrfs_start_transaction(rc->extent_root, 0);
3604
3605 if (update_backref_cache(trans, &rc->backref_cache)) {
3606 btrfs_end_transaction(trans, rc->extent_root);
3607 continue;
3608 }
3305 3609
3306 ret = find_next_extent(trans, rc, path); 3610 ret = find_next_extent(trans, rc, path, &key);
3307 if (ret < 0) 3611 if (ret < 0)
3308 err = ret; 3612 err = ret;
3309 if (ret != 0) 3613 if (ret != 0)
@@ -3313,9 +3617,7 @@ static noinline_for_stack int relocate_block_group(struct reloc_control *rc)
3313 3617
3314 ei = btrfs_item_ptr(path->nodes[0], path->slots[0], 3618 ei = btrfs_item_ptr(path->nodes[0], path->slots[0],
3315 struct btrfs_extent_item); 3619 struct btrfs_extent_item);
3316 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); 3620 item_size = btrfs_item_size_nr(path->nodes[0], path->slots[0]);
3317 item_size = btrfs_item_size_nr(path->nodes[0],
3318 path->slots[0]);
3319 if (item_size >= sizeof(*ei)) { 3621 if (item_size >= sizeof(*ei)) {
3320 flags = btrfs_extent_flags(path->nodes[0], ei); 3622 flags = btrfs_extent_flags(path->nodes[0], ei);
3321 ret = check_extent_flags(flags); 3623 ret = check_extent_flags(flags);
@@ -3356,73 +3658,100 @@ static noinline_for_stack int relocate_block_group(struct reloc_control *rc)
3356 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) { 3658 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
3357 ret = add_tree_block(rc, &key, path, &blocks); 3659 ret = add_tree_block(rc, &key, path, &blocks);
3358 } else if (rc->stage == UPDATE_DATA_PTRS && 3660 } else if (rc->stage == UPDATE_DATA_PTRS &&
3359 (flags & BTRFS_EXTENT_FLAG_DATA)) { 3661 (flags & BTRFS_EXTENT_FLAG_DATA)) {
3360 ret = add_data_references(rc, &key, path, &blocks); 3662 ret = add_data_references(rc, &key, path, &blocks);
3361 } else { 3663 } else {
3362 btrfs_release_path(rc->extent_root, path); 3664 btrfs_release_path(rc->extent_root, path);
3363 ret = 0; 3665 ret = 0;
3364 } 3666 }
3365 if (ret < 0) { 3667 if (ret < 0) {
3366 err = 0; 3668 err = ret;
3367 break; 3669 break;
3368 } 3670 }
3369 3671
3370 if (!RB_EMPTY_ROOT(&blocks)) { 3672 if (!RB_EMPTY_ROOT(&blocks)) {
3371 ret = relocate_tree_blocks(trans, rc, &blocks); 3673 ret = relocate_tree_blocks(trans, rc, &blocks);
3372 if (ret < 0) { 3674 if (ret < 0) {
3675 if (ret != -EAGAIN) {
3676 err = ret;
3677 break;
3678 }
3679 rc->extents_found--;
3680 rc->search_start = key.objectid;
3681 }
3682 }
3683
3684 ret = btrfs_block_rsv_check(trans, rc->extent_root,
3685 rc->block_rsv, 0, 5);
3686 if (ret < 0) {
3687 if (ret != -EAGAIN) {
3373 err = ret; 3688 err = ret;
3689 WARN_ON(1);
3374 break; 3690 break;
3375 } 3691 }
3692 rc->commit_transaction = 1;
3376 } 3693 }
3377 3694
3378 nr = trans->blocks_used; 3695 if (rc->commit_transaction) {
3379 btrfs_end_transaction(trans, rc->extent_root); 3696 rc->commit_transaction = 0;
3697 ret = btrfs_commit_transaction(trans, rc->extent_root);
3698 BUG_ON(ret);
3699 } else {
3700 nr = trans->blocks_used;
3701 btrfs_end_transaction_throttle(trans, rc->extent_root);
3702 btrfs_btree_balance_dirty(rc->extent_root, nr);
3703 }
3380 trans = NULL; 3704 trans = NULL;
3381 btrfs_btree_balance_dirty(rc->extent_root, nr);
3382 3705
3383 if (rc->stage == MOVE_DATA_EXTENTS && 3706 if (rc->stage == MOVE_DATA_EXTENTS &&
3384 (flags & BTRFS_EXTENT_FLAG_DATA)) { 3707 (flags & BTRFS_EXTENT_FLAG_DATA)) {
3385 rc->found_file_extent = 1; 3708 rc->found_file_extent = 1;
3386 ret = relocate_data_extent(rc->data_inode, 3709 ret = relocate_data_extent(rc->data_inode,
3387 &key, cluster); 3710 &key, &rc->cluster);
3388 if (ret < 0) { 3711 if (ret < 0) {
3389 err = ret; 3712 err = ret;
3390 break; 3713 break;
3391 } 3714 }
3392 } 3715 }
3393 } 3716 }
3394 btrfs_free_path(path); 3717
3718 btrfs_release_path(rc->extent_root, path);
3719 clear_extent_bits(&rc->processed_blocks, 0, (u64)-1, EXTENT_DIRTY,
3720 GFP_NOFS);
3395 3721
3396 if (trans) { 3722 if (trans) {
3397 nr = trans->blocks_used; 3723 nr = trans->blocks_used;
3398 btrfs_end_transaction(trans, rc->extent_root); 3724 btrfs_end_transaction_throttle(trans, rc->extent_root);
3399 btrfs_btree_balance_dirty(rc->extent_root, nr); 3725 btrfs_btree_balance_dirty(rc->extent_root, nr);
3400 } 3726 }
3401 3727
3402 if (!err) { 3728 if (!err) {
3403 ret = relocate_file_extent_cluster(rc->data_inode, cluster); 3729 ret = relocate_file_extent_cluster(rc->data_inode,
3730 &rc->cluster);
3404 if (ret < 0) 3731 if (ret < 0)
3405 err = ret; 3732 err = ret;
3406 } 3733 }
3407 3734
3408 kfree(cluster); 3735 rc->create_reloc_tree = 0;
3736 set_reloc_control(rc);
3409 3737
3410 rc->create_reloc_root = 0; 3738 backref_cache_cleanup(&rc->backref_cache);
3411 smp_mb(); 3739 btrfs_block_rsv_release(rc->extent_root, rc->block_rsv, (u64)-1);
3412 3740
3413 if (rc->extents_found > 0) { 3741 err = prepare_to_merge(rc, err);
3414 trans = btrfs_start_transaction(rc->extent_root, 1);
3415 btrfs_commit_transaction(trans, rc->extent_root);
3416 }
3417 3742
3418 merge_reloc_roots(rc); 3743 merge_reloc_roots(rc);
3419 3744
3745 rc->merge_reloc_tree = 0;
3420 unset_reloc_control(rc); 3746 unset_reloc_control(rc);
3747 btrfs_block_rsv_release(rc->extent_root, rc->block_rsv, (u64)-1);
3421 3748
3422 /* get rid of pinned extents */ 3749 /* get rid of pinned extents */
3423 trans = btrfs_start_transaction(rc->extent_root, 1); 3750 trans = btrfs_join_transaction(rc->extent_root, 1);
3424 btrfs_commit_transaction(trans, rc->extent_root); 3751 btrfs_commit_transaction(trans, rc->extent_root);
3425 3752out_free:
3753 btrfs_free_block_rsv(rc->extent_root, rc->block_rsv);
3754 btrfs_free_path(path);
3426 return err; 3755 return err;
3427} 3756}
3428 3757
@@ -3448,7 +3777,8 @@ static int __insert_orphan_inode(struct btrfs_trans_handle *trans,
3448 btrfs_set_inode_generation(leaf, item, 1); 3777 btrfs_set_inode_generation(leaf, item, 1);
3449 btrfs_set_inode_size(leaf, item, 0); 3778 btrfs_set_inode_size(leaf, item, 0);
3450 btrfs_set_inode_mode(leaf, item, S_IFREG | 0600); 3779 btrfs_set_inode_mode(leaf, item, S_IFREG | 0600);
3451 btrfs_set_inode_flags(leaf, item, BTRFS_INODE_NOCOMPRESS); 3780 btrfs_set_inode_flags(leaf, item, BTRFS_INODE_NOCOMPRESS |
3781 BTRFS_INODE_PREALLOC);
3452 btrfs_mark_buffer_dirty(leaf); 3782 btrfs_mark_buffer_dirty(leaf);
3453 btrfs_release_path(root, path); 3783 btrfs_release_path(root, path);
3454out: 3784out:
@@ -3460,8 +3790,9 @@ out:
3460 * helper to create inode for data relocation. 3790 * helper to create inode for data relocation.
3461 * the inode is in data relocation tree and its link count is 0 3791 * the inode is in data relocation tree and its link count is 0
3462 */ 3792 */
3463static struct inode *create_reloc_inode(struct btrfs_fs_info *fs_info, 3793static noinline_for_stack
3464 struct btrfs_block_group_cache *group) 3794struct inode *create_reloc_inode(struct btrfs_fs_info *fs_info,
3795 struct btrfs_block_group_cache *group)
3465{ 3796{
3466 struct inode *inode = NULL; 3797 struct inode *inode = NULL;
3467 struct btrfs_trans_handle *trans; 3798 struct btrfs_trans_handle *trans;
@@ -3475,8 +3806,9 @@ static struct inode *create_reloc_inode(struct btrfs_fs_info *fs_info,
3475 if (IS_ERR(root)) 3806 if (IS_ERR(root))
3476 return ERR_CAST(root); 3807 return ERR_CAST(root);
3477 3808
3478 trans = btrfs_start_transaction(root, 1); 3809 trans = btrfs_start_transaction(root, 6);
3479 BUG_ON(!trans); 3810 if (IS_ERR(trans))
3811 return ERR_CAST(trans);
3480 3812
3481 err = btrfs_find_free_objectid(trans, root, objectid, &objectid); 3813 err = btrfs_find_free_objectid(trans, root, objectid, &objectid);
3482 if (err) 3814 if (err)
@@ -3496,7 +3828,6 @@ static struct inode *create_reloc_inode(struct btrfs_fs_info *fs_info,
3496out: 3828out:
3497 nr = trans->blocks_used; 3829 nr = trans->blocks_used;
3498 btrfs_end_transaction(trans, root); 3830 btrfs_end_transaction(trans, root);
3499
3500 btrfs_btree_balance_dirty(root, nr); 3831 btrfs_btree_balance_dirty(root, nr);
3501 if (err) { 3832 if (err) {
3502 if (inode) 3833 if (inode)
@@ -3506,6 +3837,21 @@ out:
3506 return inode; 3837 return inode;
3507} 3838}
3508 3839
3840static struct reloc_control *alloc_reloc_control(void)
3841{
3842 struct reloc_control *rc;
3843
3844 rc = kzalloc(sizeof(*rc), GFP_NOFS);
3845 if (!rc)
3846 return NULL;
3847
3848 INIT_LIST_HEAD(&rc->reloc_roots);
3849 backref_cache_init(&rc->backref_cache);
3850 mapping_tree_init(&rc->reloc_root_tree);
3851 extent_io_tree_init(&rc->processed_blocks, NULL, GFP_NOFS);
3852 return rc;
3853}
3854
3509/* 3855/*
3510 * function to relocate all extents in a block group. 3856 * function to relocate all extents in a block group.
3511 */ 3857 */
@@ -3514,24 +3860,26 @@ int btrfs_relocate_block_group(struct btrfs_root *extent_root, u64 group_start)
3514 struct btrfs_fs_info *fs_info = extent_root->fs_info; 3860 struct btrfs_fs_info *fs_info = extent_root->fs_info;
3515 struct reloc_control *rc; 3861 struct reloc_control *rc;
3516 int ret; 3862 int ret;
3863 int rw = 0;
3517 int err = 0; 3864 int err = 0;
3518 3865
3519 rc = kzalloc(sizeof(*rc), GFP_NOFS); 3866 rc = alloc_reloc_control();
3520 if (!rc) 3867 if (!rc)
3521 return -ENOMEM; 3868 return -ENOMEM;
3522 3869
3523 mapping_tree_init(&rc->reloc_root_tree); 3870 rc->extent_root = extent_root;
3524 extent_io_tree_init(&rc->processed_blocks, NULL, GFP_NOFS);
3525 INIT_LIST_HEAD(&rc->reloc_roots);
3526 3871
3527 rc->block_group = btrfs_lookup_block_group(fs_info, group_start); 3872 rc->block_group = btrfs_lookup_block_group(fs_info, group_start);
3528 BUG_ON(!rc->block_group); 3873 BUG_ON(!rc->block_group);
3529 3874
3530 btrfs_init_workers(&rc->workers, "relocate", 3875 if (!rc->block_group->ro) {
3531 fs_info->thread_pool_size, NULL); 3876 ret = btrfs_set_block_group_ro(extent_root, rc->block_group);
3532 3877 if (ret) {
3533 rc->extent_root = extent_root; 3878 err = ret;
3534 btrfs_prepare_block_group_relocation(extent_root, rc->block_group); 3879 goto out;
3880 }
3881 rw = 1;
3882 }
3535 3883
3536 rc->data_inode = create_reloc_inode(fs_info, rc->block_group); 3884 rc->data_inode = create_reloc_inode(fs_info, rc->block_group);
3537 if (IS_ERR(rc->data_inode)) { 3885 if (IS_ERR(rc->data_inode)) {
@@ -3548,9 +3896,6 @@ int btrfs_relocate_block_group(struct btrfs_root *extent_root, u64 group_start)
3548 btrfs_wait_ordered_extents(fs_info->tree_root, 0, 0); 3896 btrfs_wait_ordered_extents(fs_info->tree_root, 0, 0);
3549 3897
3550 while (1) { 3898 while (1) {
3551 rc->extents_found = 0;
3552 rc->extents_skipped = 0;
3553
3554 mutex_lock(&fs_info->cleaner_mutex); 3899 mutex_lock(&fs_info->cleaner_mutex);
3555 3900
3556 btrfs_clean_old_snapshots(fs_info->tree_root); 3901 btrfs_clean_old_snapshots(fs_info->tree_root);
@@ -3559,7 +3904,7 @@ int btrfs_relocate_block_group(struct btrfs_root *extent_root, u64 group_start)
3559 mutex_unlock(&fs_info->cleaner_mutex); 3904 mutex_unlock(&fs_info->cleaner_mutex);
3560 if (ret < 0) { 3905 if (ret < 0) {
3561 err = ret; 3906 err = ret;
3562 break; 3907 goto out;
3563 } 3908 }
3564 3909
3565 if (rc->extents_found == 0) 3910 if (rc->extents_found == 0)
@@ -3573,18 +3918,6 @@ int btrfs_relocate_block_group(struct btrfs_root *extent_root, u64 group_start)
3573 invalidate_mapping_pages(rc->data_inode->i_mapping, 3918 invalidate_mapping_pages(rc->data_inode->i_mapping,
3574 0, -1); 3919 0, -1);
3575 rc->stage = UPDATE_DATA_PTRS; 3920 rc->stage = UPDATE_DATA_PTRS;
3576 } else if (rc->stage == UPDATE_DATA_PTRS &&
3577 rc->extents_skipped >= rc->extents_found) {
3578 iput(rc->data_inode);
3579 rc->data_inode = create_reloc_inode(fs_info,
3580 rc->block_group);
3581 if (IS_ERR(rc->data_inode)) {
3582 err = PTR_ERR(rc->data_inode);
3583 rc->data_inode = NULL;
3584 break;
3585 }
3586 rc->stage = MOVE_DATA_EXTENTS;
3587 rc->found_file_extent = 0;
3588 } 3921 }
3589 } 3922 }
3590 3923
@@ -3597,8 +3930,9 @@ int btrfs_relocate_block_group(struct btrfs_root *extent_root, u64 group_start)
3597 WARN_ON(rc->block_group->reserved > 0); 3930 WARN_ON(rc->block_group->reserved > 0);
3598 WARN_ON(btrfs_block_group_used(&rc->block_group->item) > 0); 3931 WARN_ON(btrfs_block_group_used(&rc->block_group->item) > 0);
3599out: 3932out:
3933 if (err && rw)
3934 btrfs_set_block_group_rw(extent_root, rc->block_group);
3600 iput(rc->data_inode); 3935 iput(rc->data_inode);
3601 btrfs_stop_workers(&rc->workers);
3602 btrfs_put_block_group(rc->block_group); 3936 btrfs_put_block_group(rc->block_group);
3603 kfree(rc); 3937 kfree(rc);
3604 return err; 3938 return err;
@@ -3609,7 +3943,7 @@ static noinline_for_stack int mark_garbage_root(struct btrfs_root *root)
3609 struct btrfs_trans_handle *trans; 3943 struct btrfs_trans_handle *trans;
3610 int ret; 3944 int ret;
3611 3945
3612 trans = btrfs_start_transaction(root->fs_info->tree_root, 1); 3946 trans = btrfs_start_transaction(root->fs_info->tree_root, 0);
3613 3947
3614 memset(&root->root_item.drop_progress, 0, 3948 memset(&root->root_item.drop_progress, 0,
3615 sizeof(root->root_item.drop_progress)); 3949 sizeof(root->root_item.drop_progress));
@@ -3702,20 +4036,20 @@ int btrfs_recover_relocation(struct btrfs_root *root)
3702 if (list_empty(&reloc_roots)) 4036 if (list_empty(&reloc_roots))
3703 goto out; 4037 goto out;
3704 4038
3705 rc = kzalloc(sizeof(*rc), GFP_NOFS); 4039 rc = alloc_reloc_control();
3706 if (!rc) { 4040 if (!rc) {
3707 err = -ENOMEM; 4041 err = -ENOMEM;
3708 goto out; 4042 goto out;
3709 } 4043 }
3710 4044
3711 mapping_tree_init(&rc->reloc_root_tree);
3712 INIT_LIST_HEAD(&rc->reloc_roots);
3713 btrfs_init_workers(&rc->workers, "relocate",
3714 root->fs_info->thread_pool_size, NULL);
3715 rc->extent_root = root->fs_info->extent_root; 4045 rc->extent_root = root->fs_info->extent_root;
3716 4046
3717 set_reloc_control(rc); 4047 set_reloc_control(rc);
3718 4048
4049 trans = btrfs_join_transaction(rc->extent_root, 1);
4050
4051 rc->merge_reloc_tree = 1;
4052
3719 while (!list_empty(&reloc_roots)) { 4053 while (!list_empty(&reloc_roots)) {
3720 reloc_root = list_entry(reloc_roots.next, 4054 reloc_root = list_entry(reloc_roots.next,
3721 struct btrfs_root, root_list); 4055 struct btrfs_root, root_list);
@@ -3735,20 +4069,16 @@ int btrfs_recover_relocation(struct btrfs_root *root)
3735 fs_root->reloc_root = reloc_root; 4069 fs_root->reloc_root = reloc_root;
3736 } 4070 }
3737 4071
3738 trans = btrfs_start_transaction(rc->extent_root, 1);
3739 btrfs_commit_transaction(trans, rc->extent_root); 4072 btrfs_commit_transaction(trans, rc->extent_root);
3740 4073
3741 merge_reloc_roots(rc); 4074 merge_reloc_roots(rc);
3742 4075
3743 unset_reloc_control(rc); 4076 unset_reloc_control(rc);
3744 4077
3745 trans = btrfs_start_transaction(rc->extent_root, 1); 4078 trans = btrfs_join_transaction(rc->extent_root, 1);
3746 btrfs_commit_transaction(trans, rc->extent_root); 4079 btrfs_commit_transaction(trans, rc->extent_root);
3747out: 4080out:
3748 if (rc) { 4081 kfree(rc);
3749 btrfs_stop_workers(&rc->workers);
3750 kfree(rc);
3751 }
3752 while (!list_empty(&reloc_roots)) { 4082 while (!list_empty(&reloc_roots)) {
3753 reloc_root = list_entry(reloc_roots.next, 4083 reloc_root = list_entry(reloc_roots.next,
3754 struct btrfs_root, root_list); 4084 struct btrfs_root, root_list);
@@ -3814,3 +4144,130 @@ int btrfs_reloc_clone_csums(struct inode *inode, u64 file_pos, u64 len)
3814 btrfs_put_ordered_extent(ordered); 4144 btrfs_put_ordered_extent(ordered);
3815 return 0; 4145 return 0;
3816} 4146}
4147
4148void btrfs_reloc_cow_block(struct btrfs_trans_handle *trans,
4149 struct btrfs_root *root, struct extent_buffer *buf,
4150 struct extent_buffer *cow)
4151{
4152 struct reloc_control *rc;
4153 struct backref_node *node;
4154 int first_cow = 0;
4155 int level;
4156 int ret;
4157
4158 rc = root->fs_info->reloc_ctl;
4159 if (!rc)
4160 return;
4161
4162 BUG_ON(rc->stage == UPDATE_DATA_PTRS &&
4163 root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID);
4164
4165 level = btrfs_header_level(buf);
4166 if (btrfs_header_generation(buf) <=
4167 btrfs_root_last_snapshot(&root->root_item))
4168 first_cow = 1;
4169
4170 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID &&
4171 rc->create_reloc_tree) {
4172 WARN_ON(!first_cow && level == 0);
4173
4174 node = rc->backref_cache.path[level];
4175 BUG_ON(node->bytenr != buf->start &&
4176 node->new_bytenr != buf->start);
4177
4178 drop_node_buffer(node);
4179 extent_buffer_get(cow);
4180 node->eb = cow;
4181 node->new_bytenr = cow->start;
4182
4183 if (!node->pending) {
4184 list_move_tail(&node->list,
4185 &rc->backref_cache.pending[level]);
4186 node->pending = 1;
4187 }
4188
4189 if (first_cow)
4190 __mark_block_processed(rc, node);
4191
4192 if (first_cow && level > 0)
4193 rc->nodes_relocated += buf->len;
4194 }
4195
4196 if (level == 0 && first_cow && rc->stage == UPDATE_DATA_PTRS) {
4197 ret = replace_file_extents(trans, rc, root, cow);
4198 BUG_ON(ret);
4199 }
4200}
4201
4202/*
4203 * called before creating snapshot. it calculates metadata reservation
4204 * requried for relocating tree blocks in the snapshot
4205 */
4206void btrfs_reloc_pre_snapshot(struct btrfs_trans_handle *trans,
4207 struct btrfs_pending_snapshot *pending,
4208 u64 *bytes_to_reserve)
4209{
4210 struct btrfs_root *root;
4211 struct reloc_control *rc;
4212
4213 root = pending->root;
4214 if (!root->reloc_root)
4215 return;
4216
4217 rc = root->fs_info->reloc_ctl;
4218 if (!rc->merge_reloc_tree)
4219 return;
4220
4221 root = root->reloc_root;
4222 BUG_ON(btrfs_root_refs(&root->root_item) == 0);
4223 /*
4224 * relocation is in the stage of merging trees. the space
4225 * used by merging a reloc tree is twice the size of
4226 * relocated tree nodes in the worst case. half for cowing
4227 * the reloc tree, half for cowing the fs tree. the space
4228 * used by cowing the reloc tree will be freed after the
4229 * tree is dropped. if we create snapshot, cowing the fs
4230 * tree may use more space than it frees. so we need
4231 * reserve extra space.
4232 */
4233 *bytes_to_reserve += rc->nodes_relocated;
4234}
4235
4236/*
4237 * called after snapshot is created. migrate block reservation
4238 * and create reloc root for the newly created snapshot
4239 */
4240void btrfs_reloc_post_snapshot(struct btrfs_trans_handle *trans,
4241 struct btrfs_pending_snapshot *pending)
4242{
4243 struct btrfs_root *root = pending->root;
4244 struct btrfs_root *reloc_root;
4245 struct btrfs_root *new_root;
4246 struct reloc_control *rc;
4247 int ret;
4248
4249 if (!root->reloc_root)
4250 return;
4251
4252 rc = root->fs_info->reloc_ctl;
4253 rc->merging_rsv_size += rc->nodes_relocated;
4254
4255 if (rc->merge_reloc_tree) {
4256 ret = btrfs_block_rsv_migrate(&pending->block_rsv,
4257 rc->block_rsv,
4258 rc->nodes_relocated);
4259 BUG_ON(ret);
4260 }
4261
4262 new_root = pending->snap;
4263 reloc_root = create_reloc_root(trans, root->reloc_root,
4264 new_root->root_key.objectid);
4265
4266 __add_reloc_root(reloc_root);
4267 new_root->reloc_root = reloc_root;
4268
4269 if (rc->create_reloc_tree) {
4270 ret = clone_backref_node(trans, rc, root, reloc_root);
4271 BUG_ON(ret);
4272 }
4273}
diff --git a/fs/btrfs/root-tree.c b/fs/btrfs/root-tree.c
index 67fa2d29d663..b91ccd972644 100644
--- a/fs/btrfs/root-tree.c
+++ b/fs/btrfs/root-tree.c
@@ -259,6 +259,8 @@ int btrfs_find_orphan_roots(struct btrfs_root *tree_root)
259 struct extent_buffer *leaf; 259 struct extent_buffer *leaf;
260 struct btrfs_path *path; 260 struct btrfs_path *path;
261 struct btrfs_key key; 261 struct btrfs_key key;
262 struct btrfs_key root_key;
263 struct btrfs_root *root;
262 int err = 0; 264 int err = 0;
263 int ret; 265 int ret;
264 266
@@ -270,6 +272,9 @@ int btrfs_find_orphan_roots(struct btrfs_root *tree_root)
270 key.type = BTRFS_ORPHAN_ITEM_KEY; 272 key.type = BTRFS_ORPHAN_ITEM_KEY;
271 key.offset = 0; 273 key.offset = 0;
272 274
275 root_key.type = BTRFS_ROOT_ITEM_KEY;
276 root_key.offset = (u64)-1;
277
273 while (1) { 278 while (1) {
274 ret = btrfs_search_slot(NULL, tree_root, &key, path, 0, 0); 279 ret = btrfs_search_slot(NULL, tree_root, &key, path, 0, 0);
275 if (ret < 0) { 280 if (ret < 0) {
@@ -294,13 +299,25 @@ int btrfs_find_orphan_roots(struct btrfs_root *tree_root)
294 key.type != BTRFS_ORPHAN_ITEM_KEY) 299 key.type != BTRFS_ORPHAN_ITEM_KEY)
295 break; 300 break;
296 301
297 ret = btrfs_find_dead_roots(tree_root, key.offset); 302 root_key.objectid = key.offset;
298 if (ret) { 303 key.offset++;
304
305 root = btrfs_read_fs_root_no_name(tree_root->fs_info,
306 &root_key);
307 if (!IS_ERR(root))
308 continue;
309
310 ret = PTR_ERR(root);
311 if (ret != -ENOENT) {
299 err = ret; 312 err = ret;
300 break; 313 break;
301 } 314 }
302 315
303 key.offset++; 316 ret = btrfs_find_dead_roots(tree_root, root_key.objectid);
317 if (ret) {
318 err = ret;
319 break;
320 }
304 } 321 }
305 322
306 btrfs_free_path(path); 323 btrfs_free_path(path);
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
index 2909a03e5230..d34b2dfc9628 100644
--- a/fs/btrfs/super.c
+++ b/fs/btrfs/super.c
@@ -498,7 +498,7 @@ int btrfs_sync_fs(struct super_block *sb, int wait)
498 btrfs_start_delalloc_inodes(root, 0); 498 btrfs_start_delalloc_inodes(root, 0);
499 btrfs_wait_ordered_extents(root, 0, 0); 499 btrfs_wait_ordered_extents(root, 0, 0);
500 500
501 trans = btrfs_start_transaction(root, 1); 501 trans = btrfs_start_transaction(root, 0);
502 ret = btrfs_commit_transaction(trans, root); 502 ret = btrfs_commit_transaction(trans, root);
503 return ret; 503 return ret;
504} 504}
@@ -694,11 +694,11 @@ static int btrfs_remount(struct super_block *sb, int *flags, char *data)
694 if (btrfs_super_log_root(&root->fs_info->super_copy) != 0) 694 if (btrfs_super_log_root(&root->fs_info->super_copy) != 0)
695 return -EINVAL; 695 return -EINVAL;
696 696
697 /* recover relocation */ 697 ret = btrfs_cleanup_fs_roots(root->fs_info);
698 ret = btrfs_recover_relocation(root);
699 WARN_ON(ret); 698 WARN_ON(ret);
700 699
701 ret = btrfs_cleanup_fs_roots(root->fs_info); 700 /* recover relocation */
701 ret = btrfs_recover_relocation(root);
702 WARN_ON(ret); 702 WARN_ON(ret);
703 703
704 sb->s_flags &= ~MS_RDONLY; 704 sb->s_flags &= ~MS_RDONLY;
@@ -714,34 +714,18 @@ static int btrfs_statfs(struct dentry *dentry, struct kstatfs *buf)
714 struct list_head *head = &root->fs_info->space_info; 714 struct list_head *head = &root->fs_info->space_info;
715 struct btrfs_space_info *found; 715 struct btrfs_space_info *found;
716 u64 total_used = 0; 716 u64 total_used = 0;
717 u64 data_used = 0;
718 int bits = dentry->d_sb->s_blocksize_bits; 717 int bits = dentry->d_sb->s_blocksize_bits;
719 __be32 *fsid = (__be32 *)root->fs_info->fsid; 718 __be32 *fsid = (__be32 *)root->fs_info->fsid;
720 719
721 rcu_read_lock(); 720 rcu_read_lock();
722 list_for_each_entry_rcu(found, head, list) { 721 list_for_each_entry_rcu(found, head, list)
723 if (found->flags & (BTRFS_BLOCK_GROUP_DUP| 722 total_used += found->disk_used;
724 BTRFS_BLOCK_GROUP_RAID10|
725 BTRFS_BLOCK_GROUP_RAID1)) {
726 total_used += found->bytes_used;
727 if (found->flags & BTRFS_BLOCK_GROUP_DATA)
728 data_used += found->bytes_used;
729 else
730 data_used += found->total_bytes;
731 }
732
733 total_used += found->bytes_used;
734 if (found->flags & BTRFS_BLOCK_GROUP_DATA)
735 data_used += found->bytes_used;
736 else
737 data_used += found->total_bytes;
738 }
739 rcu_read_unlock(); 723 rcu_read_unlock();
740 724
741 buf->f_namelen = BTRFS_NAME_LEN; 725 buf->f_namelen = BTRFS_NAME_LEN;
742 buf->f_blocks = btrfs_super_total_bytes(disk_super) >> bits; 726 buf->f_blocks = btrfs_super_total_bytes(disk_super) >> bits;
743 buf->f_bfree = buf->f_blocks - (total_used >> bits); 727 buf->f_bfree = buf->f_blocks - (total_used >> bits);
744 buf->f_bavail = buf->f_blocks - (data_used >> bits); 728 buf->f_bavail = buf->f_bfree;
745 buf->f_bsize = dentry->d_sb->s_blocksize; 729 buf->f_bsize = dentry->d_sb->s_blocksize;
746 buf->f_type = BTRFS_SUPER_MAGIC; 730 buf->f_type = BTRFS_SUPER_MAGIC;
747 731
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
index 2cb116099b90..66e4c66cc63b 100644
--- a/fs/btrfs/transaction.c
+++ b/fs/btrfs/transaction.c
@@ -165,54 +165,89 @@ enum btrfs_trans_type {
165 TRANS_USERSPACE, 165 TRANS_USERSPACE,
166}; 166};
167 167
168static int may_wait_transaction(struct btrfs_root *root, int type)
169{
170 if (!root->fs_info->log_root_recovering &&
171 ((type == TRANS_START && !root->fs_info->open_ioctl_trans) ||
172 type == TRANS_USERSPACE))
173 return 1;
174 return 0;
175}
176
168static struct btrfs_trans_handle *start_transaction(struct btrfs_root *root, 177static struct btrfs_trans_handle *start_transaction(struct btrfs_root *root,
169 int num_blocks, int type) 178 u64 num_items, int type)
170{ 179{
171 struct btrfs_trans_handle *h = 180 struct btrfs_trans_handle *h;
172 kmem_cache_alloc(btrfs_trans_handle_cachep, GFP_NOFS); 181 struct btrfs_transaction *cur_trans;
182 int retries = 0;
173 int ret; 183 int ret;
184again:
185 h = kmem_cache_alloc(btrfs_trans_handle_cachep, GFP_NOFS);
186 if (!h)
187 return ERR_PTR(-ENOMEM);
174 188
175 mutex_lock(&root->fs_info->trans_mutex); 189 mutex_lock(&root->fs_info->trans_mutex);
176 if (!root->fs_info->log_root_recovering && 190 if (may_wait_transaction(root, type))
177 ((type == TRANS_START && !root->fs_info->open_ioctl_trans) ||
178 type == TRANS_USERSPACE))
179 wait_current_trans(root); 191 wait_current_trans(root);
192
180 ret = join_transaction(root); 193 ret = join_transaction(root);
181 BUG_ON(ret); 194 BUG_ON(ret);
182 195
183 h->transid = root->fs_info->running_transaction->transid; 196 cur_trans = root->fs_info->running_transaction;
184 h->transaction = root->fs_info->running_transaction; 197 cur_trans->use_count++;
185 h->blocks_reserved = num_blocks; 198 mutex_unlock(&root->fs_info->trans_mutex);
199
200 h->transid = cur_trans->transid;
201 h->transaction = cur_trans;
186 h->blocks_used = 0; 202 h->blocks_used = 0;
187 h->block_group = 0; 203 h->block_group = 0;
188 h->alloc_exclude_nr = 0; 204 h->bytes_reserved = 0;
189 h->alloc_exclude_start = 0;
190 h->delayed_ref_updates = 0; 205 h->delayed_ref_updates = 0;
206 h->block_rsv = NULL;
191 207
192 if (!current->journal_info && type != TRANS_USERSPACE) 208 smp_mb();
193 current->journal_info = h; 209 if (cur_trans->blocked && may_wait_transaction(root, type)) {
210 btrfs_commit_transaction(h, root);
211 goto again;
212 }
213
214 if (num_items > 0) {
215 ret = btrfs_trans_reserve_metadata(h, root, num_items,
216 &retries);
217 if (ret == -EAGAIN) {
218 btrfs_commit_transaction(h, root);
219 goto again;
220 }
221 if (ret < 0) {
222 btrfs_end_transaction(h, root);
223 return ERR_PTR(ret);
224 }
225 }
194 226
195 root->fs_info->running_transaction->use_count++; 227 mutex_lock(&root->fs_info->trans_mutex);
196 record_root_in_trans(h, root); 228 record_root_in_trans(h, root);
197 mutex_unlock(&root->fs_info->trans_mutex); 229 mutex_unlock(&root->fs_info->trans_mutex);
230
231 if (!current->journal_info && type != TRANS_USERSPACE)
232 current->journal_info = h;
198 return h; 233 return h;
199} 234}
200 235
201struct btrfs_trans_handle *btrfs_start_transaction(struct btrfs_root *root, 236struct btrfs_trans_handle *btrfs_start_transaction(struct btrfs_root *root,
202 int num_blocks) 237 int num_items)
203{ 238{
204 return start_transaction(root, num_blocks, TRANS_START); 239 return start_transaction(root, num_items, TRANS_START);
205} 240}
206struct btrfs_trans_handle *btrfs_join_transaction(struct btrfs_root *root, 241struct btrfs_trans_handle *btrfs_join_transaction(struct btrfs_root *root,
207 int num_blocks) 242 int num_blocks)
208{ 243{
209 return start_transaction(root, num_blocks, TRANS_JOIN); 244 return start_transaction(root, 0, TRANS_JOIN);
210} 245}
211 246
212struct btrfs_trans_handle *btrfs_start_ioctl_transaction(struct btrfs_root *r, 247struct btrfs_trans_handle *btrfs_start_ioctl_transaction(struct btrfs_root *r,
213 int num_blocks) 248 int num_blocks)
214{ 249{
215 return start_transaction(r, num_blocks, TRANS_USERSPACE); 250 return start_transaction(r, 0, TRANS_USERSPACE);
216} 251}
217 252
218/* wait for a transaction commit to be fully complete */ 253/* wait for a transaction commit to be fully complete */
@@ -286,10 +321,36 @@ void btrfs_throttle(struct btrfs_root *root)
286 mutex_unlock(&root->fs_info->trans_mutex); 321 mutex_unlock(&root->fs_info->trans_mutex);
287} 322}
288 323
324static int should_end_transaction(struct btrfs_trans_handle *trans,
325 struct btrfs_root *root)
326{
327 int ret;
328 ret = btrfs_block_rsv_check(trans, root,
329 &root->fs_info->global_block_rsv, 0, 5);
330 return ret ? 1 : 0;
331}
332
333int btrfs_should_end_transaction(struct btrfs_trans_handle *trans,
334 struct btrfs_root *root)
335{
336 struct btrfs_transaction *cur_trans = trans->transaction;
337 int updates;
338
339 if (cur_trans->blocked || cur_trans->delayed_refs.flushing)
340 return 1;
341
342 updates = trans->delayed_ref_updates;
343 trans->delayed_ref_updates = 0;
344 if (updates)
345 btrfs_run_delayed_refs(trans, root, updates);
346
347 return should_end_transaction(trans, root);
348}
349
289static int __btrfs_end_transaction(struct btrfs_trans_handle *trans, 350static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
290 struct btrfs_root *root, int throttle) 351 struct btrfs_root *root, int throttle)
291{ 352{
292 struct btrfs_transaction *cur_trans; 353 struct btrfs_transaction *cur_trans = trans->transaction;
293 struct btrfs_fs_info *info = root->fs_info; 354 struct btrfs_fs_info *info = root->fs_info;
294 int count = 0; 355 int count = 0;
295 356
@@ -313,9 +374,21 @@ static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
313 count++; 374 count++;
314 } 375 }
315 376
377 btrfs_trans_release_metadata(trans, root);
378
379 if (!root->fs_info->open_ioctl_trans &&
380 should_end_transaction(trans, root))
381 trans->transaction->blocked = 1;
382
383 if (cur_trans->blocked && !cur_trans->in_commit) {
384 if (throttle)
385 return btrfs_commit_transaction(trans, root);
386 else
387 wake_up_process(info->transaction_kthread);
388 }
389
316 mutex_lock(&info->trans_mutex); 390 mutex_lock(&info->trans_mutex);
317 cur_trans = info->running_transaction; 391 WARN_ON(cur_trans != info->running_transaction);
318 WARN_ON(cur_trans != trans->transaction);
319 WARN_ON(cur_trans->num_writers < 1); 392 WARN_ON(cur_trans->num_writers < 1);
320 cur_trans->num_writers--; 393 cur_trans->num_writers--;
321 394
@@ -603,6 +676,7 @@ static noinline int commit_fs_roots(struct btrfs_trans_handle *trans,
603 676
604 btrfs_free_log(trans, root); 677 btrfs_free_log(trans, root);
605 btrfs_update_reloc_root(trans, root); 678 btrfs_update_reloc_root(trans, root);
679 btrfs_orphan_commit_root(trans, root);
606 680
607 if (root->commit_root != root->node) { 681 if (root->commit_root != root->node) {
608 switch_commit_root(root); 682 switch_commit_root(root);
@@ -627,30 +701,30 @@ static noinline int commit_fs_roots(struct btrfs_trans_handle *trans,
627int btrfs_defrag_root(struct btrfs_root *root, int cacheonly) 701int btrfs_defrag_root(struct btrfs_root *root, int cacheonly)
628{ 702{
629 struct btrfs_fs_info *info = root->fs_info; 703 struct btrfs_fs_info *info = root->fs_info;
630 int ret;
631 struct btrfs_trans_handle *trans; 704 struct btrfs_trans_handle *trans;
705 int ret;
632 unsigned long nr; 706 unsigned long nr;
633 707
634 smp_mb(); 708 if (xchg(&root->defrag_running, 1))
635 if (root->defrag_running)
636 return 0; 709 return 0;
637 trans = btrfs_start_transaction(root, 1); 710
638 while (1) { 711 while (1) {
639 root->defrag_running = 1; 712 trans = btrfs_start_transaction(root, 0);
713 if (IS_ERR(trans))
714 return PTR_ERR(trans);
715
640 ret = btrfs_defrag_leaves(trans, root, cacheonly); 716 ret = btrfs_defrag_leaves(trans, root, cacheonly);
717
641 nr = trans->blocks_used; 718 nr = trans->blocks_used;
642 btrfs_end_transaction(trans, root); 719 btrfs_end_transaction(trans, root);
643 btrfs_btree_balance_dirty(info->tree_root, nr); 720 btrfs_btree_balance_dirty(info->tree_root, nr);
644 cond_resched(); 721 cond_resched();
645 722
646 trans = btrfs_start_transaction(root, 1);
647 if (root->fs_info->closing || ret != -EAGAIN) 723 if (root->fs_info->closing || ret != -EAGAIN)
648 break; 724 break;
649 } 725 }
650 root->defrag_running = 0; 726 root->defrag_running = 0;
651 smp_mb(); 727 return ret;
652 btrfs_end_transaction(trans, root);
653 return 0;
654} 728}
655 729
656#if 0 730#if 0
@@ -758,47 +832,63 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
758 struct btrfs_root *root = pending->root; 832 struct btrfs_root *root = pending->root;
759 struct btrfs_root *parent_root; 833 struct btrfs_root *parent_root;
760 struct inode *parent_inode; 834 struct inode *parent_inode;
835 struct dentry *dentry;
761 struct extent_buffer *tmp; 836 struct extent_buffer *tmp;
762 struct extent_buffer *old; 837 struct extent_buffer *old;
763 int ret; 838 int ret;
764 u64 objectid; 839 int retries = 0;
765 int namelen; 840 u64 to_reserve = 0;
766 u64 index = 0; 841 u64 index = 0;
767 842 u64 objectid;
768 parent_inode = pending->dentry->d_parent->d_inode;
769 parent_root = BTRFS_I(parent_inode)->root;
770 843
771 new_root_item = kmalloc(sizeof(*new_root_item), GFP_NOFS); 844 new_root_item = kmalloc(sizeof(*new_root_item), GFP_NOFS);
772 if (!new_root_item) { 845 if (!new_root_item) {
773 ret = -ENOMEM; 846 pending->error = -ENOMEM;
774 goto fail; 847 goto fail;
775 } 848 }
849
776 ret = btrfs_find_free_objectid(trans, tree_root, 0, &objectid); 850 ret = btrfs_find_free_objectid(trans, tree_root, 0, &objectid);
777 if (ret) 851 if (ret) {
852 pending->error = ret;
778 goto fail; 853 goto fail;
854 }
855
856 btrfs_reloc_pre_snapshot(trans, pending, &to_reserve);
857 btrfs_orphan_pre_snapshot(trans, pending, &to_reserve);
858
859 if (to_reserve > 0) {
860 ret = btrfs_block_rsv_add(trans, root, &pending->block_rsv,
861 to_reserve, &retries);
862 if (ret) {
863 pending->error = ret;
864 goto fail;
865 }
866 }
779 867
780 key.objectid = objectid; 868 key.objectid = objectid;
781 /* record when the snapshot was created in key.offset */ 869 key.offset = (u64)-1;
782 key.offset = trans->transid; 870 key.type = BTRFS_ROOT_ITEM_KEY;
783 btrfs_set_key_type(&key, BTRFS_ROOT_ITEM_KEY);
784 871
785 memcpy(&pending->root_key, &key, sizeof(key)); 872 trans->block_rsv = &pending->block_rsv;
786 pending->root_key.offset = (u64)-1;
787 873
874 dentry = pending->dentry;
875 parent_inode = dentry->d_parent->d_inode;
876 parent_root = BTRFS_I(parent_inode)->root;
788 record_root_in_trans(trans, parent_root); 877 record_root_in_trans(trans, parent_root);
878
789 /* 879 /*
790 * insert the directory item 880 * insert the directory item
791 */ 881 */
792 namelen = strlen(pending->name);
793 ret = btrfs_set_inode_index(parent_inode, &index); 882 ret = btrfs_set_inode_index(parent_inode, &index);
794 BUG_ON(ret); 883 BUG_ON(ret);
795 ret = btrfs_insert_dir_item(trans, parent_root, 884 ret = btrfs_insert_dir_item(trans, parent_root,
796 pending->name, namelen, 885 dentry->d_name.name, dentry->d_name.len,
797 parent_inode->i_ino, 886 parent_inode->i_ino, &key,
798 &pending->root_key, BTRFS_FT_DIR, index); 887 BTRFS_FT_DIR, index);
799 BUG_ON(ret); 888 BUG_ON(ret);
800 889
801 btrfs_i_size_write(parent_inode, parent_inode->i_size + namelen * 2); 890 btrfs_i_size_write(parent_inode, parent_inode->i_size +
891 dentry->d_name.len * 2);
802 ret = btrfs_update_inode(trans, parent_root, parent_inode); 892 ret = btrfs_update_inode(trans, parent_root, parent_inode);
803 BUG_ON(ret); 893 BUG_ON(ret);
804 894
@@ -815,22 +905,32 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
815 free_extent_buffer(old); 905 free_extent_buffer(old);
816 906
817 btrfs_set_root_node(new_root_item, tmp); 907 btrfs_set_root_node(new_root_item, tmp);
818 ret = btrfs_insert_root(trans, root->fs_info->tree_root, &key, 908 /* record when the snapshot was created in key.offset */
819 new_root_item); 909 key.offset = trans->transid;
820 BUG_ON(ret); 910 ret = btrfs_insert_root(trans, tree_root, &key, new_root_item);
821 btrfs_tree_unlock(tmp); 911 btrfs_tree_unlock(tmp);
822 free_extent_buffer(tmp); 912 free_extent_buffer(tmp);
913 BUG_ON(ret);
823 914
824 ret = btrfs_add_root_ref(trans, parent_root->fs_info->tree_root, 915 /*
825 pending->root_key.objectid, 916 * insert root back/forward references
917 */
918 ret = btrfs_add_root_ref(trans, tree_root, objectid,
826 parent_root->root_key.objectid, 919 parent_root->root_key.objectid,
827 parent_inode->i_ino, index, pending->name, 920 parent_inode->i_ino, index,
828 namelen); 921 dentry->d_name.name, dentry->d_name.len);
829 BUG_ON(ret); 922 BUG_ON(ret);
830 923
924 key.offset = (u64)-1;
925 pending->snap = btrfs_read_fs_root_no_name(root->fs_info, &key);
926 BUG_ON(IS_ERR(pending->snap));
927
928 btrfs_reloc_post_snapshot(trans, pending);
929 btrfs_orphan_post_snapshot(trans, pending);
831fail: 930fail:
832 kfree(new_root_item); 931 kfree(new_root_item);
833 return ret; 932 btrfs_block_rsv_release(root, &pending->block_rsv, (u64)-1);
933 return 0;
834} 934}
835 935
836/* 936/*
@@ -878,6 +978,16 @@ int btrfs_transaction_in_commit(struct btrfs_fs_info *info)
878 return ret; 978 return ret;
879} 979}
880 980
981int btrfs_transaction_blocked(struct btrfs_fs_info *info)
982{
983 int ret = 0;
984 spin_lock(&info->new_trans_lock);
985 if (info->running_transaction)
986 ret = info->running_transaction->blocked;
987 spin_unlock(&info->new_trans_lock);
988 return ret;
989}
990
881int btrfs_commit_transaction(struct btrfs_trans_handle *trans, 991int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
882 struct btrfs_root *root) 992 struct btrfs_root *root)
883{ 993{
@@ -899,6 +1009,8 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
899 ret = btrfs_run_delayed_refs(trans, root, 0); 1009 ret = btrfs_run_delayed_refs(trans, root, 0);
900 BUG_ON(ret); 1010 BUG_ON(ret);
901 1011
1012 btrfs_trans_release_metadata(trans, root);
1013
902 cur_trans = trans->transaction; 1014 cur_trans = trans->transaction;
903 /* 1015 /*
904 * set the flushing flag so procs in this transaction have to 1016 * set the flushing flag so procs in this transaction have to
@@ -951,9 +1063,6 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
951 snap_pending = 1; 1063 snap_pending = 1;
952 1064
953 WARN_ON(cur_trans != trans->transaction); 1065 WARN_ON(cur_trans != trans->transaction);
954 prepare_to_wait(&cur_trans->writer_wait, &wait,
955 TASK_UNINTERRUPTIBLE);
956
957 if (cur_trans->num_writers > 1) 1066 if (cur_trans->num_writers > 1)
958 timeout = MAX_SCHEDULE_TIMEOUT; 1067 timeout = MAX_SCHEDULE_TIMEOUT;
959 else if (should_grow) 1068 else if (should_grow)
@@ -976,6 +1085,9 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
976 */ 1085 */
977 btrfs_run_ordered_operations(root, 1); 1086 btrfs_run_ordered_operations(root, 1);
978 1087
1088 prepare_to_wait(&cur_trans->writer_wait, &wait,
1089 TASK_UNINTERRUPTIBLE);
1090
979 smp_mb(); 1091 smp_mb();
980 if (cur_trans->num_writers > 1 || should_grow) 1092 if (cur_trans->num_writers > 1 || should_grow)
981 schedule_timeout(timeout); 1093 schedule_timeout(timeout);
@@ -1103,9 +1215,9 @@ int btrfs_clean_old_snapshots(struct btrfs_root *root)
1103 1215
1104 if (btrfs_header_backref_rev(root->node) < 1216 if (btrfs_header_backref_rev(root->node) <
1105 BTRFS_MIXED_BACKREF_REV) 1217 BTRFS_MIXED_BACKREF_REV)
1106 btrfs_drop_snapshot(root, 0); 1218 btrfs_drop_snapshot(root, NULL, 0);
1107 else 1219 else
1108 btrfs_drop_snapshot(root, 1); 1220 btrfs_drop_snapshot(root, NULL, 1);
1109 } 1221 }
1110 return 0; 1222 return 0;
1111} 1223}
diff --git a/fs/btrfs/transaction.h b/fs/btrfs/transaction.h
index 93c7ccb33118..e104986d0bfd 100644
--- a/fs/btrfs/transaction.h
+++ b/fs/btrfs/transaction.h
@@ -45,20 +45,23 @@ struct btrfs_transaction {
45 45
46struct btrfs_trans_handle { 46struct btrfs_trans_handle {
47 u64 transid; 47 u64 transid;
48 u64 block_group;
49 u64 bytes_reserved;
48 unsigned long blocks_reserved; 50 unsigned long blocks_reserved;
49 unsigned long blocks_used; 51 unsigned long blocks_used;
50 struct btrfs_transaction *transaction;
51 u64 block_group;
52 u64 alloc_exclude_start;
53 u64 alloc_exclude_nr;
54 unsigned long delayed_ref_updates; 52 unsigned long delayed_ref_updates;
53 struct btrfs_transaction *transaction;
54 struct btrfs_block_rsv *block_rsv;
55}; 55};
56 56
57struct btrfs_pending_snapshot { 57struct btrfs_pending_snapshot {
58 struct dentry *dentry; 58 struct dentry *dentry;
59 struct btrfs_root *root; 59 struct btrfs_root *root;
60 char *name; 60 struct btrfs_root *snap;
61 struct btrfs_key root_key; 61 /* block reservation for the operation */
62 struct btrfs_block_rsv block_rsv;
63 /* extra metadata reseration for relocation */
64 int error;
62 struct list_head list; 65 struct list_head list;
63}; 66};
64 67
@@ -85,11 +88,11 @@ static inline void btrfs_set_inode_last_trans(struct btrfs_trans_handle *trans,
85int btrfs_end_transaction(struct btrfs_trans_handle *trans, 88int btrfs_end_transaction(struct btrfs_trans_handle *trans,
86 struct btrfs_root *root); 89 struct btrfs_root *root);
87struct btrfs_trans_handle *btrfs_start_transaction(struct btrfs_root *root, 90struct btrfs_trans_handle *btrfs_start_transaction(struct btrfs_root *root,
88 int num_blocks); 91 int num_items);
89struct btrfs_trans_handle *btrfs_join_transaction(struct btrfs_root *root, 92struct btrfs_trans_handle *btrfs_join_transaction(struct btrfs_root *root,
90 int num_blocks); 93 int num_blocks);
91struct btrfs_trans_handle *btrfs_start_ioctl_transaction(struct btrfs_root *r, 94struct btrfs_trans_handle *btrfs_start_ioctl_transaction(struct btrfs_root *r,
92 int num_blocks); 95 int num_blocks);
93int btrfs_write_and_wait_transaction(struct btrfs_trans_handle *trans, 96int btrfs_write_and_wait_transaction(struct btrfs_trans_handle *trans,
94 struct btrfs_root *root); 97 struct btrfs_root *root);
95int btrfs_commit_tree_roots(struct btrfs_trans_handle *trans, 98int btrfs_commit_tree_roots(struct btrfs_trans_handle *trans,
@@ -103,6 +106,8 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
103 struct btrfs_root *root); 106 struct btrfs_root *root);
104int btrfs_end_transaction_throttle(struct btrfs_trans_handle *trans, 107int btrfs_end_transaction_throttle(struct btrfs_trans_handle *trans,
105 struct btrfs_root *root); 108 struct btrfs_root *root);
109int btrfs_should_end_transaction(struct btrfs_trans_handle *trans,
110 struct btrfs_root *root);
106void btrfs_throttle(struct btrfs_root *root); 111void btrfs_throttle(struct btrfs_root *root);
107int btrfs_record_root_in_trans(struct btrfs_trans_handle *trans, 112int btrfs_record_root_in_trans(struct btrfs_trans_handle *trans,
108 struct btrfs_root *root); 113 struct btrfs_root *root);
@@ -112,5 +117,6 @@ int btrfs_write_marked_extents(struct btrfs_root *root,
112 struct extent_io_tree *dirty_pages, int mark); 117 struct extent_io_tree *dirty_pages, int mark);
113int btrfs_wait_marked_extents(struct btrfs_root *root, 118int btrfs_wait_marked_extents(struct btrfs_root *root,
114 struct extent_io_tree *dirty_pages, int mark); 119 struct extent_io_tree *dirty_pages, int mark);
120int btrfs_transaction_blocked(struct btrfs_fs_info *info);
115int btrfs_transaction_in_commit(struct btrfs_fs_info *info); 121int btrfs_transaction_in_commit(struct btrfs_fs_info *info);
116#endif 122#endif
diff --git a/fs/btrfs/tree-defrag.c b/fs/btrfs/tree-defrag.c
index b10eacdb1620..f7ac8e013ed7 100644
--- a/fs/btrfs/tree-defrag.c
+++ b/fs/btrfs/tree-defrag.c
@@ -117,13 +117,14 @@ int btrfs_defrag_leaves(struct btrfs_trans_handle *trans,
117 path->nodes[1], 0, 117 path->nodes[1], 0,
118 cache_only, &last_ret, 118 cache_only, &last_ret,
119 &root->defrag_progress); 119 &root->defrag_progress);
120 WARN_ON(ret && ret != -EAGAIN); 120 if (ret) {
121 WARN_ON(ret == -EAGAIN);
122 goto out;
123 }
121 if (next_key_ret == 0) { 124 if (next_key_ret == 0) {
122 memcpy(&root->defrag_progress, &key, sizeof(key)); 125 memcpy(&root->defrag_progress, &key, sizeof(key));
123 ret = -EAGAIN; 126 ret = -EAGAIN;
124 } 127 }
125
126 btrfs_release_path(root, path);
127out: 128out:
128 if (path) 129 if (path)
129 btrfs_free_path(path); 130 btrfs_free_path(path);
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
index af57dd2b43d4..fb102a9aee9c 100644
--- a/fs/btrfs/tree-log.c
+++ b/fs/btrfs/tree-log.c
@@ -135,6 +135,7 @@ static int start_log_trans(struct btrfs_trans_handle *trans,
135 struct btrfs_root *root) 135 struct btrfs_root *root)
136{ 136{
137 int ret; 137 int ret;
138 int err = 0;
138 139
139 mutex_lock(&root->log_mutex); 140 mutex_lock(&root->log_mutex);
140 if (root->log_root) { 141 if (root->log_root) {
@@ -155,17 +156,19 @@ static int start_log_trans(struct btrfs_trans_handle *trans,
155 mutex_lock(&root->fs_info->tree_log_mutex); 156 mutex_lock(&root->fs_info->tree_log_mutex);
156 if (!root->fs_info->log_root_tree) { 157 if (!root->fs_info->log_root_tree) {
157 ret = btrfs_init_log_root_tree(trans, root->fs_info); 158 ret = btrfs_init_log_root_tree(trans, root->fs_info);
158 BUG_ON(ret); 159 if (ret)
160 err = ret;
159 } 161 }
160 if (!root->log_root) { 162 if (err == 0 && !root->log_root) {
161 ret = btrfs_add_log_tree(trans, root); 163 ret = btrfs_add_log_tree(trans, root);
162 BUG_ON(ret); 164 if (ret)
165 err = ret;
163 } 166 }
164 mutex_unlock(&root->fs_info->tree_log_mutex); 167 mutex_unlock(&root->fs_info->tree_log_mutex);
165 root->log_batch++; 168 root->log_batch++;
166 atomic_inc(&root->log_writers); 169 atomic_inc(&root->log_writers);
167 mutex_unlock(&root->log_mutex); 170 mutex_unlock(&root->log_mutex);
168 return 0; 171 return err;
169} 172}
170 173
171/* 174/*
@@ -376,7 +379,7 @@ insert:
376 BUG_ON(ret); 379 BUG_ON(ret);
377 } 380 }
378 } else if (ret) { 381 } else if (ret) {
379 BUG(); 382 return ret;
380 } 383 }
381 dst_ptr = btrfs_item_ptr_offset(path->nodes[0], 384 dst_ptr = btrfs_item_ptr_offset(path->nodes[0],
382 path->slots[0]); 385 path->slots[0]);
@@ -1699,9 +1702,9 @@ static noinline int walk_down_log_tree(struct btrfs_trans_handle *trans,
1699 1702
1700 next = btrfs_find_create_tree_block(root, bytenr, blocksize); 1703 next = btrfs_find_create_tree_block(root, bytenr, blocksize);
1701 1704
1702 wc->process_func(root, next, wc, ptr_gen);
1703
1704 if (*level == 1) { 1705 if (*level == 1) {
1706 wc->process_func(root, next, wc, ptr_gen);
1707
1705 path->slots[*level]++; 1708 path->slots[*level]++;
1706 if (wc->free) { 1709 if (wc->free) {
1707 btrfs_read_buffer(next, ptr_gen); 1710 btrfs_read_buffer(next, ptr_gen);
@@ -1734,35 +1737,7 @@ static noinline int walk_down_log_tree(struct btrfs_trans_handle *trans,
1734 WARN_ON(*level < 0); 1737 WARN_ON(*level < 0);
1735 WARN_ON(*level >= BTRFS_MAX_LEVEL); 1738 WARN_ON(*level >= BTRFS_MAX_LEVEL);
1736 1739
1737 if (path->nodes[*level] == root->node) 1740 path->slots[*level] = btrfs_header_nritems(path->nodes[*level]);
1738 parent = path->nodes[*level];
1739 else
1740 parent = path->nodes[*level + 1];
1741
1742 bytenr = path->nodes[*level]->start;
1743
1744 blocksize = btrfs_level_size(root, *level);
1745 root_owner = btrfs_header_owner(parent);
1746 root_gen = btrfs_header_generation(parent);
1747
1748 wc->process_func(root, path->nodes[*level], wc,
1749 btrfs_header_generation(path->nodes[*level]));
1750
1751 if (wc->free) {
1752 next = path->nodes[*level];
1753 btrfs_tree_lock(next);
1754 clean_tree_block(trans, root, next);
1755 btrfs_set_lock_blocking(next);
1756 btrfs_wait_tree_block_writeback(next);
1757 btrfs_tree_unlock(next);
1758
1759 WARN_ON(root_owner != BTRFS_TREE_LOG_OBJECTID);
1760 ret = btrfs_free_reserved_extent(root, bytenr, blocksize);
1761 BUG_ON(ret);
1762 }
1763 free_extent_buffer(path->nodes[*level]);
1764 path->nodes[*level] = NULL;
1765 *level += 1;
1766 1741
1767 cond_resched(); 1742 cond_resched();
1768 return 0; 1743 return 0;
@@ -1781,7 +1756,7 @@ static noinline int walk_up_log_tree(struct btrfs_trans_handle *trans,
1781 1756
1782 for (i = *level; i < BTRFS_MAX_LEVEL - 1 && path->nodes[i]; i++) { 1757 for (i = *level; i < BTRFS_MAX_LEVEL - 1 && path->nodes[i]; i++) {
1783 slot = path->slots[i]; 1758 slot = path->slots[i];
1784 if (slot < btrfs_header_nritems(path->nodes[i]) - 1) { 1759 if (slot + 1 < btrfs_header_nritems(path->nodes[i])) {
1785 struct extent_buffer *node; 1760 struct extent_buffer *node;
1786 node = path->nodes[i]; 1761 node = path->nodes[i];
1787 path->slots[i]++; 1762 path->slots[i]++;
@@ -2047,7 +2022,6 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
2047 mutex_unlock(&log_root_tree->log_mutex); 2022 mutex_unlock(&log_root_tree->log_mutex);
2048 2023
2049 ret = update_log_root(trans, log); 2024 ret = update_log_root(trans, log);
2050 BUG_ON(ret);
2051 2025
2052 mutex_lock(&log_root_tree->log_mutex); 2026 mutex_lock(&log_root_tree->log_mutex);
2053 if (atomic_dec_and_test(&log_root_tree->log_writers)) { 2027 if (atomic_dec_and_test(&log_root_tree->log_writers)) {
@@ -2056,6 +2030,15 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
2056 wake_up(&log_root_tree->log_writer_wait); 2030 wake_up(&log_root_tree->log_writer_wait);
2057 } 2031 }
2058 2032
2033 if (ret) {
2034 BUG_ON(ret != -ENOSPC);
2035 root->fs_info->last_trans_log_full_commit = trans->transid;
2036 btrfs_wait_marked_extents(log, &log->dirty_log_pages, mark);
2037 mutex_unlock(&log_root_tree->log_mutex);
2038 ret = -EAGAIN;
2039 goto out;
2040 }
2041
2059 index2 = log_root_tree->log_transid % 2; 2042 index2 = log_root_tree->log_transid % 2;
2060 if (atomic_read(&log_root_tree->log_commit[index2])) { 2043 if (atomic_read(&log_root_tree->log_commit[index2])) {
2061 btrfs_wait_marked_extents(log, &log->dirty_log_pages, mark); 2044 btrfs_wait_marked_extents(log, &log->dirty_log_pages, mark);
@@ -2129,15 +2112,10 @@ out:
2129 return 0; 2112 return 0;
2130} 2113}
2131 2114
2132/* 2115static void free_log_tree(struct btrfs_trans_handle *trans,
2133 * free all the extents used by the tree log. This should be called 2116 struct btrfs_root *log)
2134 * at commit time of the full transaction
2135 */
2136int btrfs_free_log(struct btrfs_trans_handle *trans, struct btrfs_root *root)
2137{ 2117{
2138 int ret; 2118 int ret;
2139 struct btrfs_root *log;
2140 struct key;
2141 u64 start; 2119 u64 start;
2142 u64 end; 2120 u64 end;
2143 struct walk_control wc = { 2121 struct walk_control wc = {
@@ -2145,10 +2123,6 @@ int btrfs_free_log(struct btrfs_trans_handle *trans, struct btrfs_root *root)
2145 .process_func = process_one_buffer 2123 .process_func = process_one_buffer
2146 }; 2124 };
2147 2125
2148 if (!root->log_root || root->fs_info->log_root_recovering)
2149 return 0;
2150
2151 log = root->log_root;
2152 ret = walk_log_tree(trans, log, &wc); 2126 ret = walk_log_tree(trans, log, &wc);
2153 BUG_ON(ret); 2127 BUG_ON(ret);
2154 2128
@@ -2162,14 +2136,30 @@ int btrfs_free_log(struct btrfs_trans_handle *trans, struct btrfs_root *root)
2162 EXTENT_DIRTY | EXTENT_NEW, GFP_NOFS); 2136 EXTENT_DIRTY | EXTENT_NEW, GFP_NOFS);
2163 } 2137 }
2164 2138
2165 if (log->log_transid > 0) {
2166 ret = btrfs_del_root(trans, root->fs_info->log_root_tree,
2167 &log->root_key);
2168 BUG_ON(ret);
2169 }
2170 root->log_root = NULL;
2171 free_extent_buffer(log->node); 2139 free_extent_buffer(log->node);
2172 kfree(log); 2140 kfree(log);
2141}
2142
2143/*
2144 * free all the extents used by the tree log. This should be called
2145 * at commit time of the full transaction
2146 */
2147int btrfs_free_log(struct btrfs_trans_handle *trans, struct btrfs_root *root)
2148{
2149 if (root->log_root) {
2150 free_log_tree(trans, root->log_root);
2151 root->log_root = NULL;
2152 }
2153 return 0;
2154}
2155
2156int btrfs_free_log_root_tree(struct btrfs_trans_handle *trans,
2157 struct btrfs_fs_info *fs_info)
2158{
2159 if (fs_info->log_root_tree) {
2160 free_log_tree(trans, fs_info->log_root_tree);
2161 fs_info->log_root_tree = NULL;
2162 }
2173 return 0; 2163 return 0;
2174} 2164}
2175 2165
@@ -2203,6 +2193,7 @@ int btrfs_del_dir_entries_in_log(struct btrfs_trans_handle *trans,
2203 struct btrfs_dir_item *di; 2193 struct btrfs_dir_item *di;
2204 struct btrfs_path *path; 2194 struct btrfs_path *path;
2205 int ret; 2195 int ret;
2196 int err = 0;
2206 int bytes_del = 0; 2197 int bytes_del = 0;
2207 2198
2208 if (BTRFS_I(dir)->logged_trans < trans->transid) 2199 if (BTRFS_I(dir)->logged_trans < trans->transid)
@@ -2218,7 +2209,11 @@ int btrfs_del_dir_entries_in_log(struct btrfs_trans_handle *trans,
2218 path = btrfs_alloc_path(); 2209 path = btrfs_alloc_path();
2219 di = btrfs_lookup_dir_item(trans, log, path, dir->i_ino, 2210 di = btrfs_lookup_dir_item(trans, log, path, dir->i_ino,
2220 name, name_len, -1); 2211 name, name_len, -1);
2221 if (di && !IS_ERR(di)) { 2212 if (IS_ERR(di)) {
2213 err = PTR_ERR(di);
2214 goto fail;
2215 }
2216 if (di) {
2222 ret = btrfs_delete_one_dir_name(trans, log, path, di); 2217 ret = btrfs_delete_one_dir_name(trans, log, path, di);
2223 bytes_del += name_len; 2218 bytes_del += name_len;
2224 BUG_ON(ret); 2219 BUG_ON(ret);
@@ -2226,7 +2221,11 @@ int btrfs_del_dir_entries_in_log(struct btrfs_trans_handle *trans,
2226 btrfs_release_path(log, path); 2221 btrfs_release_path(log, path);
2227 di = btrfs_lookup_dir_index_item(trans, log, path, dir->i_ino, 2222 di = btrfs_lookup_dir_index_item(trans, log, path, dir->i_ino,
2228 index, name, name_len, -1); 2223 index, name, name_len, -1);
2229 if (di && !IS_ERR(di)) { 2224 if (IS_ERR(di)) {
2225 err = PTR_ERR(di);
2226 goto fail;
2227 }
2228 if (di) {
2230 ret = btrfs_delete_one_dir_name(trans, log, path, di); 2229 ret = btrfs_delete_one_dir_name(trans, log, path, di);
2231 bytes_del += name_len; 2230 bytes_del += name_len;
2232 BUG_ON(ret); 2231 BUG_ON(ret);
@@ -2244,6 +2243,10 @@ int btrfs_del_dir_entries_in_log(struct btrfs_trans_handle *trans,
2244 btrfs_release_path(log, path); 2243 btrfs_release_path(log, path);
2245 2244
2246 ret = btrfs_search_slot(trans, log, &key, path, 0, 1); 2245 ret = btrfs_search_slot(trans, log, &key, path, 0, 1);
2246 if (ret < 0) {
2247 err = ret;
2248 goto fail;
2249 }
2247 if (ret == 0) { 2250 if (ret == 0) {
2248 struct btrfs_inode_item *item; 2251 struct btrfs_inode_item *item;
2249 u64 i_size; 2252 u64 i_size;
@@ -2261,9 +2264,13 @@ int btrfs_del_dir_entries_in_log(struct btrfs_trans_handle *trans,
2261 ret = 0; 2264 ret = 0;
2262 btrfs_release_path(log, path); 2265 btrfs_release_path(log, path);
2263 } 2266 }
2264 2267fail:
2265 btrfs_free_path(path); 2268 btrfs_free_path(path);
2266 mutex_unlock(&BTRFS_I(dir)->log_mutex); 2269 mutex_unlock(&BTRFS_I(dir)->log_mutex);
2270 if (ret == -ENOSPC) {
2271 root->fs_info->last_trans_log_full_commit = trans->transid;
2272 ret = 0;
2273 }
2267 btrfs_end_log_trans(root); 2274 btrfs_end_log_trans(root);
2268 2275
2269 return 0; 2276 return 0;
@@ -2291,6 +2298,10 @@ int btrfs_del_inode_ref_in_log(struct btrfs_trans_handle *trans,
2291 ret = btrfs_del_inode_ref(trans, log, name, name_len, inode->i_ino, 2298 ret = btrfs_del_inode_ref(trans, log, name, name_len, inode->i_ino,
2292 dirid, &index); 2299 dirid, &index);
2293 mutex_unlock(&BTRFS_I(inode)->log_mutex); 2300 mutex_unlock(&BTRFS_I(inode)->log_mutex);
2301 if (ret == -ENOSPC) {
2302 root->fs_info->last_trans_log_full_commit = trans->transid;
2303 ret = 0;
2304 }
2294 btrfs_end_log_trans(root); 2305 btrfs_end_log_trans(root);
2295 2306
2296 return ret; 2307 return ret;
@@ -2318,7 +2329,8 @@ static noinline int insert_dir_log_key(struct btrfs_trans_handle *trans,
2318 else 2329 else
2319 key.type = BTRFS_DIR_LOG_INDEX_KEY; 2330 key.type = BTRFS_DIR_LOG_INDEX_KEY;
2320 ret = btrfs_insert_empty_item(trans, log, path, &key, sizeof(*item)); 2331 ret = btrfs_insert_empty_item(trans, log, path, &key, sizeof(*item));
2321 BUG_ON(ret); 2332 if (ret)
2333 return ret;
2322 2334
2323 item = btrfs_item_ptr(path->nodes[0], path->slots[0], 2335 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
2324 struct btrfs_dir_log_item); 2336 struct btrfs_dir_log_item);
@@ -2343,6 +2355,7 @@ static noinline int log_dir_items(struct btrfs_trans_handle *trans,
2343 struct btrfs_key max_key; 2355 struct btrfs_key max_key;
2344 struct btrfs_root *log = root->log_root; 2356 struct btrfs_root *log = root->log_root;
2345 struct extent_buffer *src; 2357 struct extent_buffer *src;
2358 int err = 0;
2346 int ret; 2359 int ret;
2347 int i; 2360 int i;
2348 int nritems; 2361 int nritems;
@@ -2405,6 +2418,10 @@ static noinline int log_dir_items(struct btrfs_trans_handle *trans,
2405 ret = overwrite_item(trans, log, dst_path, 2418 ret = overwrite_item(trans, log, dst_path,
2406 path->nodes[0], path->slots[0], 2419 path->nodes[0], path->slots[0],
2407 &tmp); 2420 &tmp);
2421 if (ret) {
2422 err = ret;
2423 goto done;
2424 }
2408 } 2425 }
2409 } 2426 }
2410 btrfs_release_path(root, path); 2427 btrfs_release_path(root, path);
@@ -2432,7 +2449,10 @@ static noinline int log_dir_items(struct btrfs_trans_handle *trans,
2432 goto done; 2449 goto done;
2433 ret = overwrite_item(trans, log, dst_path, src, i, 2450 ret = overwrite_item(trans, log, dst_path, src, i,
2434 &min_key); 2451 &min_key);
2435 BUG_ON(ret); 2452 if (ret) {
2453 err = ret;
2454 goto done;
2455 }
2436 } 2456 }
2437 path->slots[0] = nritems; 2457 path->slots[0] = nritems;
2438 2458
@@ -2454,22 +2474,30 @@ static noinline int log_dir_items(struct btrfs_trans_handle *trans,
2454 ret = overwrite_item(trans, log, dst_path, 2474 ret = overwrite_item(trans, log, dst_path,
2455 path->nodes[0], path->slots[0], 2475 path->nodes[0], path->slots[0],
2456 &tmp); 2476 &tmp);
2457 2477 if (ret)
2458 BUG_ON(ret); 2478 err = ret;
2459 last_offset = tmp.offset; 2479 else
2480 last_offset = tmp.offset;
2460 goto done; 2481 goto done;
2461 } 2482 }
2462 } 2483 }
2463done: 2484done:
2464 *last_offset_ret = last_offset;
2465 btrfs_release_path(root, path); 2485 btrfs_release_path(root, path);
2466 btrfs_release_path(log, dst_path); 2486 btrfs_release_path(log, dst_path);
2467 2487
2468 /* insert the log range keys to indicate where the log is valid */ 2488 if (err == 0) {
2469 ret = insert_dir_log_key(trans, log, path, key_type, inode->i_ino, 2489 *last_offset_ret = last_offset;
2470 first_offset, last_offset); 2490 /*
2471 BUG_ON(ret); 2491 * insert the log range keys to indicate where the log
2472 return 0; 2492 * is valid
2493 */
2494 ret = insert_dir_log_key(trans, log, path, key_type,
2495 inode->i_ino, first_offset,
2496 last_offset);
2497 if (ret)
2498 err = ret;
2499 }
2500 return err;
2473} 2501}
2474 2502
2475/* 2503/*
@@ -2501,7 +2529,8 @@ again:
2501 ret = log_dir_items(trans, root, inode, path, 2529 ret = log_dir_items(trans, root, inode, path,
2502 dst_path, key_type, min_key, 2530 dst_path, key_type, min_key,
2503 &max_key); 2531 &max_key);
2504 BUG_ON(ret); 2532 if (ret)
2533 return ret;
2505 if (max_key == (u64)-1) 2534 if (max_key == (u64)-1)
2506 break; 2535 break;
2507 min_key = max_key + 1; 2536 min_key = max_key + 1;
@@ -2535,8 +2564,8 @@ static int drop_objectid_items(struct btrfs_trans_handle *trans,
2535 2564
2536 while (1) { 2565 while (1) {
2537 ret = btrfs_search_slot(trans, log, &key, path, -1, 1); 2566 ret = btrfs_search_slot(trans, log, &key, path, -1, 1);
2538 2567 BUG_ON(ret == 0);
2539 if (ret != 1) 2568 if (ret < 0)
2540 break; 2569 break;
2541 2570
2542 if (path->slots[0] == 0) 2571 if (path->slots[0] == 0)
@@ -2554,7 +2583,7 @@ static int drop_objectid_items(struct btrfs_trans_handle *trans,
2554 btrfs_release_path(log, path); 2583 btrfs_release_path(log, path);
2555 } 2584 }
2556 btrfs_release_path(log, path); 2585 btrfs_release_path(log, path);
2557 return 0; 2586 return ret;
2558} 2587}
2559 2588
2560static noinline int copy_items(struct btrfs_trans_handle *trans, 2589static noinline int copy_items(struct btrfs_trans_handle *trans,
@@ -2587,7 +2616,10 @@ static noinline int copy_items(struct btrfs_trans_handle *trans,
2587 } 2616 }
2588 ret = btrfs_insert_empty_items(trans, log, dst_path, 2617 ret = btrfs_insert_empty_items(trans, log, dst_path,
2589 ins_keys, ins_sizes, nr); 2618 ins_keys, ins_sizes, nr);
2590 BUG_ON(ret); 2619 if (ret) {
2620 kfree(ins_data);
2621 return ret;
2622 }
2591 2623
2592 for (i = 0; i < nr; i++, dst_path->slots[0]++) { 2624 for (i = 0; i < nr; i++, dst_path->slots[0]++) {
2593 dst_offset = btrfs_item_ptr_offset(dst_path->nodes[0], 2625 dst_offset = btrfs_item_ptr_offset(dst_path->nodes[0],
@@ -2660,16 +2692,17 @@ static noinline int copy_items(struct btrfs_trans_handle *trans,
2660 * we have to do this after the loop above to avoid changing the 2692 * we have to do this after the loop above to avoid changing the
2661 * log tree while trying to change the log tree. 2693 * log tree while trying to change the log tree.
2662 */ 2694 */
2695 ret = 0;
2663 while (!list_empty(&ordered_sums)) { 2696 while (!list_empty(&ordered_sums)) {
2664 struct btrfs_ordered_sum *sums = list_entry(ordered_sums.next, 2697 struct btrfs_ordered_sum *sums = list_entry(ordered_sums.next,
2665 struct btrfs_ordered_sum, 2698 struct btrfs_ordered_sum,
2666 list); 2699 list);
2667 ret = btrfs_csum_file_blocks(trans, log, sums); 2700 if (!ret)
2668 BUG_ON(ret); 2701 ret = btrfs_csum_file_blocks(trans, log, sums);
2669 list_del(&sums->list); 2702 list_del(&sums->list);
2670 kfree(sums); 2703 kfree(sums);
2671 } 2704 }
2672 return 0; 2705 return ret;
2673} 2706}
2674 2707
2675/* log a single inode in the tree log. 2708/* log a single inode in the tree log.
@@ -2697,6 +2730,7 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans,
2697 struct btrfs_root *log = root->log_root; 2730 struct btrfs_root *log = root->log_root;
2698 struct extent_buffer *src = NULL; 2731 struct extent_buffer *src = NULL;
2699 u32 size; 2732 u32 size;
2733 int err = 0;
2700 int ret; 2734 int ret;
2701 int nritems; 2735 int nritems;
2702 int ins_start_slot = 0; 2736 int ins_start_slot = 0;
@@ -2739,7 +2773,10 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans,
2739 } else { 2773 } else {
2740 ret = btrfs_truncate_inode_items(trans, log, inode, 0, 0); 2774 ret = btrfs_truncate_inode_items(trans, log, inode, 0, 0);
2741 } 2775 }
2742 BUG_ON(ret); 2776 if (ret) {
2777 err = ret;
2778 goto out_unlock;
2779 }
2743 path->keep_locks = 1; 2780 path->keep_locks = 1;
2744 2781
2745 while (1) { 2782 while (1) {
@@ -2768,7 +2805,10 @@ again:
2768 2805
2769 ret = copy_items(trans, log, dst_path, src, ins_start_slot, 2806 ret = copy_items(trans, log, dst_path, src, ins_start_slot,
2770 ins_nr, inode_only); 2807 ins_nr, inode_only);
2771 BUG_ON(ret); 2808 if (ret) {
2809 err = ret;
2810 goto out_unlock;
2811 }
2772 ins_nr = 1; 2812 ins_nr = 1;
2773 ins_start_slot = path->slots[0]; 2813 ins_start_slot = path->slots[0];
2774next_slot: 2814next_slot:
@@ -2784,7 +2824,10 @@ next_slot:
2784 ret = copy_items(trans, log, dst_path, src, 2824 ret = copy_items(trans, log, dst_path, src,
2785 ins_start_slot, 2825 ins_start_slot,
2786 ins_nr, inode_only); 2826 ins_nr, inode_only);
2787 BUG_ON(ret); 2827 if (ret) {
2828 err = ret;
2829 goto out_unlock;
2830 }
2788 ins_nr = 0; 2831 ins_nr = 0;
2789 } 2832 }
2790 btrfs_release_path(root, path); 2833 btrfs_release_path(root, path);
@@ -2802,7 +2845,10 @@ next_slot:
2802 ret = copy_items(trans, log, dst_path, src, 2845 ret = copy_items(trans, log, dst_path, src,
2803 ins_start_slot, 2846 ins_start_slot,
2804 ins_nr, inode_only); 2847 ins_nr, inode_only);
2805 BUG_ON(ret); 2848 if (ret) {
2849 err = ret;
2850 goto out_unlock;
2851 }
2806 ins_nr = 0; 2852 ins_nr = 0;
2807 } 2853 }
2808 WARN_ON(ins_nr); 2854 WARN_ON(ins_nr);
@@ -2810,14 +2856,18 @@ next_slot:
2810 btrfs_release_path(root, path); 2856 btrfs_release_path(root, path);
2811 btrfs_release_path(log, dst_path); 2857 btrfs_release_path(log, dst_path);
2812 ret = log_directory_changes(trans, root, inode, path, dst_path); 2858 ret = log_directory_changes(trans, root, inode, path, dst_path);
2813 BUG_ON(ret); 2859 if (ret) {
2860 err = ret;
2861 goto out_unlock;
2862 }
2814 } 2863 }
2815 BTRFS_I(inode)->logged_trans = trans->transid; 2864 BTRFS_I(inode)->logged_trans = trans->transid;
2865out_unlock:
2816 mutex_unlock(&BTRFS_I(inode)->log_mutex); 2866 mutex_unlock(&BTRFS_I(inode)->log_mutex);
2817 2867
2818 btrfs_free_path(path); 2868 btrfs_free_path(path);
2819 btrfs_free_path(dst_path); 2869 btrfs_free_path(dst_path);
2820 return 0; 2870 return err;
2821} 2871}
2822 2872
2823/* 2873/*
@@ -2942,10 +2992,13 @@ int btrfs_log_inode_parent(struct btrfs_trans_handle *trans,
2942 goto end_no_trans; 2992 goto end_no_trans;
2943 } 2993 }
2944 2994
2945 start_log_trans(trans, root); 2995 ret = start_log_trans(trans, root);
2996 if (ret)
2997 goto end_trans;
2946 2998
2947 ret = btrfs_log_inode(trans, root, inode, inode_only); 2999 ret = btrfs_log_inode(trans, root, inode, inode_only);
2948 BUG_ON(ret); 3000 if (ret)
3001 goto end_trans;
2949 3002
2950 /* 3003 /*
2951 * for regular files, if its inode is already on disk, we don't 3004 * for regular files, if its inode is already on disk, we don't
@@ -2955,8 +3008,10 @@ int btrfs_log_inode_parent(struct btrfs_trans_handle *trans,
2955 */ 3008 */
2956 if (S_ISREG(inode->i_mode) && 3009 if (S_ISREG(inode->i_mode) &&
2957 BTRFS_I(inode)->generation <= last_committed && 3010 BTRFS_I(inode)->generation <= last_committed &&
2958 BTRFS_I(inode)->last_unlink_trans <= last_committed) 3011 BTRFS_I(inode)->last_unlink_trans <= last_committed) {
2959 goto no_parent; 3012 ret = 0;
3013 goto end_trans;
3014 }
2960 3015
2961 inode_only = LOG_INODE_EXISTS; 3016 inode_only = LOG_INODE_EXISTS;
2962 while (1) { 3017 while (1) {
@@ -2970,15 +3025,21 @@ int btrfs_log_inode_parent(struct btrfs_trans_handle *trans,
2970 if (BTRFS_I(inode)->generation > 3025 if (BTRFS_I(inode)->generation >
2971 root->fs_info->last_trans_committed) { 3026 root->fs_info->last_trans_committed) {
2972 ret = btrfs_log_inode(trans, root, inode, inode_only); 3027 ret = btrfs_log_inode(trans, root, inode, inode_only);
2973 BUG_ON(ret); 3028 if (ret)
3029 goto end_trans;
2974 } 3030 }
2975 if (IS_ROOT(parent)) 3031 if (IS_ROOT(parent))
2976 break; 3032 break;
2977 3033
2978 parent = parent->d_parent; 3034 parent = parent->d_parent;
2979 } 3035 }
2980no_parent:
2981 ret = 0; 3036 ret = 0;
3037end_trans:
3038 if (ret < 0) {
3039 BUG_ON(ret != -ENOSPC);
3040 root->fs_info->last_trans_log_full_commit = trans->transid;
3041 ret = 1;
3042 }
2982 btrfs_end_log_trans(root); 3043 btrfs_end_log_trans(root);
2983end_no_trans: 3044end_no_trans:
2984 return ret; 3045 return ret;
@@ -3020,7 +3081,7 @@ int btrfs_recover_log_trees(struct btrfs_root *log_root_tree)
3020 path = btrfs_alloc_path(); 3081 path = btrfs_alloc_path();
3021 BUG_ON(!path); 3082 BUG_ON(!path);
3022 3083
3023 trans = btrfs_start_transaction(fs_info->tree_root, 1); 3084 trans = btrfs_start_transaction(fs_info->tree_root, 0);
3024 3085
3025 wc.trans = trans; 3086 wc.trans = trans;
3026 wc.pin = 1; 3087 wc.pin = 1;
diff --git a/fs/btrfs/tree-log.h b/fs/btrfs/tree-log.h
index 0776eacb5083..3dfae84c8cc8 100644
--- a/fs/btrfs/tree-log.h
+++ b/fs/btrfs/tree-log.h
@@ -25,6 +25,8 @@
25int btrfs_sync_log(struct btrfs_trans_handle *trans, 25int btrfs_sync_log(struct btrfs_trans_handle *trans,
26 struct btrfs_root *root); 26 struct btrfs_root *root);
27int btrfs_free_log(struct btrfs_trans_handle *trans, struct btrfs_root *root); 27int btrfs_free_log(struct btrfs_trans_handle *trans, struct btrfs_root *root);
28int btrfs_free_log_root_tree(struct btrfs_trans_handle *trans,
29 struct btrfs_fs_info *fs_info);
28int btrfs_recover_log_trees(struct btrfs_root *tree_root); 30int btrfs_recover_log_trees(struct btrfs_root *tree_root);
29int btrfs_log_dentry_safe(struct btrfs_trans_handle *trans, 31int btrfs_log_dentry_safe(struct btrfs_trans_handle *trans,
30 struct btrfs_root *root, struct dentry *dentry); 32 struct btrfs_root *root, struct dentry *dentry);
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index 8db7b14bbae8..d6e3af8be95b 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -1097,7 +1097,7 @@ static int btrfs_rm_dev_item(struct btrfs_root *root,
1097 if (!path) 1097 if (!path)
1098 return -ENOMEM; 1098 return -ENOMEM;
1099 1099
1100 trans = btrfs_start_transaction(root, 1); 1100 trans = btrfs_start_transaction(root, 0);
1101 key.objectid = BTRFS_DEV_ITEMS_OBJECTID; 1101 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1102 key.type = BTRFS_DEV_ITEM_KEY; 1102 key.type = BTRFS_DEV_ITEM_KEY;
1103 key.offset = device->devid; 1103 key.offset = device->devid;
@@ -1486,7 +1486,7 @@ int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
1486 goto error; 1486 goto error;
1487 } 1487 }
1488 1488
1489 trans = btrfs_start_transaction(root, 1); 1489 trans = btrfs_start_transaction(root, 0);
1490 lock_chunks(root); 1490 lock_chunks(root);
1491 1491
1492 device->barriers = 1; 1492 device->barriers = 1;
@@ -1751,9 +1751,10 @@ static int btrfs_relocate_chunk(struct btrfs_root *root,
1751 1751
1752 /* step one, relocate all the extents inside this chunk */ 1752 /* step one, relocate all the extents inside this chunk */
1753 ret = btrfs_relocate_block_group(extent_root, chunk_offset); 1753 ret = btrfs_relocate_block_group(extent_root, chunk_offset);
1754 BUG_ON(ret); 1754 if (ret)
1755 return ret;
1755 1756
1756 trans = btrfs_start_transaction(root, 1); 1757 trans = btrfs_start_transaction(root, 0);
1757 BUG_ON(!trans); 1758 BUG_ON(!trans);
1758 1759
1759 lock_chunks(root); 1760 lock_chunks(root);
@@ -1925,7 +1926,7 @@ int btrfs_balance(struct btrfs_root *dev_root)
1925 break; 1926 break;
1926 BUG_ON(ret); 1927 BUG_ON(ret);
1927 1928
1928 trans = btrfs_start_transaction(dev_root, 1); 1929 trans = btrfs_start_transaction(dev_root, 0);
1929 BUG_ON(!trans); 1930 BUG_ON(!trans);
1930 1931
1931 ret = btrfs_grow_device(trans, device, old_size); 1932 ret = btrfs_grow_device(trans, device, old_size);
@@ -2094,11 +2095,7 @@ again:
2094 } 2095 }
2095 2096
2096 /* Shrinking succeeded, else we would be at "done". */ 2097 /* Shrinking succeeded, else we would be at "done". */
2097 trans = btrfs_start_transaction(root, 1); 2098 trans = btrfs_start_transaction(root, 0);
2098 if (!trans) {
2099 ret = -ENOMEM;
2100 goto done;
2101 }
2102 lock_chunks(root); 2099 lock_chunks(root);
2103 2100
2104 device->disk_total_bytes = new_size; 2101 device->disk_total_bytes = new_size;
diff --git a/fs/btrfs/xattr.c b/fs/btrfs/xattr.c
index 59acd3eb288a..88ecbb215878 100644
--- a/fs/btrfs/xattr.c
+++ b/fs/btrfs/xattr.c
@@ -154,15 +154,10 @@ int __btrfs_setxattr(struct btrfs_trans_handle *trans,
154 if (trans) 154 if (trans)
155 return do_setxattr(trans, inode, name, value, size, flags); 155 return do_setxattr(trans, inode, name, value, size, flags);
156 156
157 ret = btrfs_reserve_metadata_space(root, 2); 157 trans = btrfs_start_transaction(root, 2);
158 if (ret) 158 if (IS_ERR(trans))
159 return ret; 159 return PTR_ERR(trans);
160 160
161 trans = btrfs_start_transaction(root, 1);
162 if (!trans) {
163 ret = -ENOMEM;
164 goto out;
165 }
166 btrfs_set_trans_block_group(trans, inode); 161 btrfs_set_trans_block_group(trans, inode);
167 162
168 ret = do_setxattr(trans, inode, name, value, size, flags); 163 ret = do_setxattr(trans, inode, name, value, size, flags);
@@ -174,7 +169,6 @@ int __btrfs_setxattr(struct btrfs_trans_handle *trans,
174 BUG_ON(ret); 169 BUG_ON(ret);
175out: 170out:
176 btrfs_end_transaction_throttle(trans, root); 171 btrfs_end_transaction_throttle(trans, root);
177 btrfs_unreserve_metadata_space(root, 2);
178 return ret; 172 return ret;
179} 173}
180 174
diff --git a/fs/buffer.c b/fs/buffer.c
index e8aa7081d25c..d54812b198e9 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -1949,14 +1949,11 @@ static int __block_commit_write(struct inode *inode, struct page *page,
1949} 1949}
1950 1950
1951/* 1951/*
1952 * block_write_begin takes care of the basic task of block allocation and 1952 * Filesystems implementing the new truncate sequence should use the
1953 * bringing partial write blocks uptodate first. 1953 * _newtrunc postfix variant which won't incorrectly call vmtruncate.
1954 * 1954 * The filesystem needs to handle block truncation upon failure.
1955 * If *pagep is not NULL, then block_write_begin uses the locked page
1956 * at *pagep rather than allocating its own. In this case, the page will
1957 * not be unlocked or deallocated on failure.
1958 */ 1955 */
1959int block_write_begin(struct file *file, struct address_space *mapping, 1956int block_write_begin_newtrunc(struct file *file, struct address_space *mapping,
1960 loff_t pos, unsigned len, unsigned flags, 1957 loff_t pos, unsigned len, unsigned flags,
1961 struct page **pagep, void **fsdata, 1958 struct page **pagep, void **fsdata,
1962 get_block_t *get_block) 1959 get_block_t *get_block)
@@ -1992,20 +1989,50 @@ int block_write_begin(struct file *file, struct address_space *mapping,
1992 unlock_page(page); 1989 unlock_page(page);
1993 page_cache_release(page); 1990 page_cache_release(page);
1994 *pagep = NULL; 1991 *pagep = NULL;
1995
1996 /*
1997 * prepare_write() may have instantiated a few blocks
1998 * outside i_size. Trim these off again. Don't need
1999 * i_size_read because we hold i_mutex.
2000 */
2001 if (pos + len > inode->i_size)
2002 vmtruncate(inode, inode->i_size);
2003 } 1992 }
2004 } 1993 }
2005 1994
2006out: 1995out:
2007 return status; 1996 return status;
2008} 1997}
1998EXPORT_SYMBOL(block_write_begin_newtrunc);
1999
2000/*
2001 * block_write_begin takes care of the basic task of block allocation and
2002 * bringing partial write blocks uptodate first.
2003 *
2004 * If *pagep is not NULL, then block_write_begin uses the locked page
2005 * at *pagep rather than allocating its own. In this case, the page will
2006 * not be unlocked or deallocated on failure.
2007 */
2008int block_write_begin(struct file *file, struct address_space *mapping,
2009 loff_t pos, unsigned len, unsigned flags,
2010 struct page **pagep, void **fsdata,
2011 get_block_t *get_block)
2012{
2013 int ret;
2014
2015 ret = block_write_begin_newtrunc(file, mapping, pos, len, flags,
2016 pagep, fsdata, get_block);
2017
2018 /*
2019 * prepare_write() may have instantiated a few blocks
2020 * outside i_size. Trim these off again. Don't need
2021 * i_size_read because we hold i_mutex.
2022 *
2023 * Filesystems which pass down their own page also cannot
2024 * call into vmtruncate here because it would lead to lock
2025 * inversion problems (*pagep is locked). This is a further
2026 * example of where the old truncate sequence is inadequate.
2027 */
2028 if (unlikely(ret) && *pagep == NULL) {
2029 loff_t isize = mapping->host->i_size;
2030 if (pos + len > isize)
2031 vmtruncate(mapping->host, isize);
2032 }
2033
2034 return ret;
2035}
2009EXPORT_SYMBOL(block_write_begin); 2036EXPORT_SYMBOL(block_write_begin);
2010 2037
2011int block_write_end(struct file *file, struct address_space *mapping, 2038int block_write_end(struct file *file, struct address_space *mapping,
@@ -2324,7 +2351,7 @@ out:
2324 * For moronic filesystems that do not allow holes in file. 2351 * For moronic filesystems that do not allow holes in file.
2325 * We may have to extend the file. 2352 * We may have to extend the file.
2326 */ 2353 */
2327int cont_write_begin(struct file *file, struct address_space *mapping, 2354int cont_write_begin_newtrunc(struct file *file, struct address_space *mapping,
2328 loff_t pos, unsigned len, unsigned flags, 2355 loff_t pos, unsigned len, unsigned flags,
2329 struct page **pagep, void **fsdata, 2356 struct page **pagep, void **fsdata,
2330 get_block_t *get_block, loff_t *bytes) 2357 get_block_t *get_block, loff_t *bytes)
@@ -2345,11 +2372,30 @@ int cont_write_begin(struct file *file, struct address_space *mapping,
2345 } 2372 }
2346 2373
2347 *pagep = NULL; 2374 *pagep = NULL;
2348 err = block_write_begin(file, mapping, pos, len, 2375 err = block_write_begin_newtrunc(file, mapping, pos, len,
2349 flags, pagep, fsdata, get_block); 2376 flags, pagep, fsdata, get_block);
2350out: 2377out:
2351 return err; 2378 return err;
2352} 2379}
2380EXPORT_SYMBOL(cont_write_begin_newtrunc);
2381
2382int cont_write_begin(struct file *file, struct address_space *mapping,
2383 loff_t pos, unsigned len, unsigned flags,
2384 struct page **pagep, void **fsdata,
2385 get_block_t *get_block, loff_t *bytes)
2386{
2387 int ret;
2388
2389 ret = cont_write_begin_newtrunc(file, mapping, pos, len, flags,
2390 pagep, fsdata, get_block, bytes);
2391 if (unlikely(ret)) {
2392 loff_t isize = mapping->host->i_size;
2393 if (pos + len > isize)
2394 vmtruncate(mapping->host, isize);
2395 }
2396
2397 return ret;
2398}
2353EXPORT_SYMBOL(cont_write_begin); 2399EXPORT_SYMBOL(cont_write_begin);
2354 2400
2355int block_prepare_write(struct page *page, unsigned from, unsigned to, 2401int block_prepare_write(struct page *page, unsigned from, unsigned to,
@@ -2381,7 +2427,7 @@ EXPORT_SYMBOL(block_commit_write);
2381 * 2427 *
2382 * We are not allowed to take the i_mutex here so we have to play games to 2428 * We are not allowed to take the i_mutex here so we have to play games to
2383 * protect against truncate races as the page could now be beyond EOF. Because 2429 * protect against truncate races as the page could now be beyond EOF. Because
2384 * vmtruncate() writes the inode size before removing pages, once we have the 2430 * truncate writes the inode size before removing pages, once we have the
2385 * page lock we can determine safely if the page is beyond EOF. If it is not 2431 * page lock we can determine safely if the page is beyond EOF. If it is not
2386 * beyond EOF, then the page is guaranteed safe against truncation until we 2432 * beyond EOF, then the page is guaranteed safe against truncation until we
2387 * unlock the page. 2433 * unlock the page.
@@ -2464,10 +2510,11 @@ static void attach_nobh_buffers(struct page *page, struct buffer_head *head)
2464} 2510}
2465 2511
2466/* 2512/*
2467 * On entry, the page is fully not uptodate. 2513 * Filesystems implementing the new truncate sequence should use the
2468 * On exit the page is fully uptodate in the areas outside (from,to) 2514 * _newtrunc postfix variant which won't incorrectly call vmtruncate.
2515 * The filesystem needs to handle block truncation upon failure.
2469 */ 2516 */
2470int nobh_write_begin(struct file *file, struct address_space *mapping, 2517int nobh_write_begin_newtrunc(struct file *file, struct address_space *mapping,
2471 loff_t pos, unsigned len, unsigned flags, 2518 loff_t pos, unsigned len, unsigned flags,
2472 struct page **pagep, void **fsdata, 2519 struct page **pagep, void **fsdata,
2473 get_block_t *get_block) 2520 get_block_t *get_block)
@@ -2500,8 +2547,8 @@ int nobh_write_begin(struct file *file, struct address_space *mapping,
2500 unlock_page(page); 2547 unlock_page(page);
2501 page_cache_release(page); 2548 page_cache_release(page);
2502 *pagep = NULL; 2549 *pagep = NULL;
2503 return block_write_begin(file, mapping, pos, len, flags, pagep, 2550 return block_write_begin_newtrunc(file, mapping, pos, len,
2504 fsdata, get_block); 2551 flags, pagep, fsdata, get_block);
2505 } 2552 }
2506 2553
2507 if (PageMappedToDisk(page)) 2554 if (PageMappedToDisk(page))
@@ -2605,8 +2652,34 @@ out_release:
2605 page_cache_release(page); 2652 page_cache_release(page);
2606 *pagep = NULL; 2653 *pagep = NULL;
2607 2654
2608 if (pos + len > inode->i_size) 2655 return ret;
2609 vmtruncate(inode, inode->i_size); 2656}
2657EXPORT_SYMBOL(nobh_write_begin_newtrunc);
2658
2659/*
2660 * On entry, the page is fully not uptodate.
2661 * On exit the page is fully uptodate in the areas outside (from,to)
2662 */
2663int nobh_write_begin(struct file *file, struct address_space *mapping,
2664 loff_t pos, unsigned len, unsigned flags,
2665 struct page **pagep, void **fsdata,
2666 get_block_t *get_block)
2667{
2668 int ret;
2669
2670 ret = nobh_write_begin_newtrunc(file, mapping, pos, len, flags,
2671 pagep, fsdata, get_block);
2672
2673 /*
2674 * prepare_write() may have instantiated a few blocks
2675 * outside i_size. Trim these off again. Don't need
2676 * i_size_read because we hold i_mutex.
2677 */
2678 if (unlikely(ret)) {
2679 loff_t isize = mapping->host->i_size;
2680 if (pos + len > isize)
2681 vmtruncate(mapping->host, isize);
2682 }
2610 2683
2611 return ret; 2684 return ret;
2612} 2685}
diff --git a/fs/ceph/auth.c b/fs/ceph/auth.c
index 9f46de2ba7a7..89490beaf537 100644
--- a/fs/ceph/auth.c
+++ b/fs/ceph/auth.c
@@ -1,7 +1,6 @@
1#include "ceph_debug.h" 1#include "ceph_debug.h"
2 2
3#include <linux/module.h> 3#include <linux/module.h>
4#include <linux/slab.h>
5#include <linux/err.h> 4#include <linux/err.h>
6#include <linux/slab.h> 5#include <linux/slab.h>
7 6
@@ -217,8 +216,8 @@ int ceph_handle_auth_reply(struct ceph_auth_client *ac,
217 if (ac->protocol != protocol) { 216 if (ac->protocol != protocol) {
218 ret = ceph_auth_init_protocol(ac, protocol); 217 ret = ceph_auth_init_protocol(ac, protocol);
219 if (ret) { 218 if (ret) {
220 pr_err("error %d on auth method %s init\n", 219 pr_err("error %d on auth protocol %d init\n",
221 ret, ac->ops->name); 220 ret, protocol);
222 goto out; 221 goto out;
223 } 222 }
224 } 223 }
@@ -247,7 +246,7 @@ int ceph_build_auth(struct ceph_auth_client *ac,
247 if (!ac->protocol) 246 if (!ac->protocol)
248 return ceph_auth_build_hello(ac, msg_buf, msg_len); 247 return ceph_auth_build_hello(ac, msg_buf, msg_len);
249 BUG_ON(!ac->ops); 248 BUG_ON(!ac->ops);
250 if (!ac->ops->is_authenticated(ac)) 249 if (ac->ops->should_authenticate(ac))
251 return ceph_build_auth_request(ac, msg_buf, msg_len); 250 return ceph_build_auth_request(ac, msg_buf, msg_len);
252 return 0; 251 return 0;
253} 252}
diff --git a/fs/ceph/auth.h b/fs/ceph/auth.h
index 4429a707c021..d38a2fb4a137 100644
--- a/fs/ceph/auth.h
+++ b/fs/ceph/auth.h
@@ -24,6 +24,12 @@ struct ceph_auth_client_ops {
24 int (*is_authenticated)(struct ceph_auth_client *ac); 24 int (*is_authenticated)(struct ceph_auth_client *ac);
25 25
26 /* 26 /*
27 * true if we should (re)authenticate, e.g., when our tickets
28 * are getting old and crusty.
29 */
30 int (*should_authenticate)(struct ceph_auth_client *ac);
31
32 /*
27 * build requests and process replies during monitor 33 * build requests and process replies during monitor
28 * handshake. if handle_reply returns -EAGAIN, we build 34 * handshake. if handle_reply returns -EAGAIN, we build
29 * another request. 35 * another request.
diff --git a/fs/ceph/auth_none.c b/fs/ceph/auth_none.c
index 24407c119291..ad1dc21286c7 100644
--- a/fs/ceph/auth_none.c
+++ b/fs/ceph/auth_none.c
@@ -31,6 +31,13 @@ static int is_authenticated(struct ceph_auth_client *ac)
31 return !xi->starting; 31 return !xi->starting;
32} 32}
33 33
34static int should_authenticate(struct ceph_auth_client *ac)
35{
36 struct ceph_auth_none_info *xi = ac->private;
37
38 return xi->starting;
39}
40
34/* 41/*
35 * the generic auth code decode the global_id, and we carry no actual 42 * the generic auth code decode the global_id, and we carry no actual
36 * authenticate state, so nothing happens here. 43 * authenticate state, so nothing happens here.
@@ -98,6 +105,7 @@ static const struct ceph_auth_client_ops ceph_auth_none_ops = {
98 .reset = reset, 105 .reset = reset,
99 .destroy = destroy, 106 .destroy = destroy,
100 .is_authenticated = is_authenticated, 107 .is_authenticated = is_authenticated,
108 .should_authenticate = should_authenticate,
101 .handle_reply = handle_reply, 109 .handle_reply = handle_reply,
102 .create_authorizer = ceph_auth_none_create_authorizer, 110 .create_authorizer = ceph_auth_none_create_authorizer,
103 .destroy_authorizer = ceph_auth_none_destroy_authorizer, 111 .destroy_authorizer = ceph_auth_none_destroy_authorizer,
diff --git a/fs/ceph/auth_x.c b/fs/ceph/auth_x.c
index 7b206231566d..83d4d2785ffe 100644
--- a/fs/ceph/auth_x.c
+++ b/fs/ceph/auth_x.c
@@ -27,6 +27,17 @@ static int ceph_x_is_authenticated(struct ceph_auth_client *ac)
27 return (ac->want_keys & xi->have_keys) == ac->want_keys; 27 return (ac->want_keys & xi->have_keys) == ac->want_keys;
28} 28}
29 29
30static int ceph_x_should_authenticate(struct ceph_auth_client *ac)
31{
32 struct ceph_x_info *xi = ac->private;
33 int need;
34
35 ceph_x_validate_tickets(ac, &need);
36 dout("ceph_x_should_authenticate want=%d need=%d have=%d\n",
37 ac->want_keys, need, xi->have_keys);
38 return need != 0;
39}
40
30static int ceph_x_encrypt_buflen(int ilen) 41static int ceph_x_encrypt_buflen(int ilen)
31{ 42{
32 return sizeof(struct ceph_x_encrypt_header) + ilen + 16 + 43 return sizeof(struct ceph_x_encrypt_header) + ilen + 16 +
@@ -620,6 +631,7 @@ static void ceph_x_invalidate_authorizer(struct ceph_auth_client *ac,
620static const struct ceph_auth_client_ops ceph_x_ops = { 631static const struct ceph_auth_client_ops ceph_x_ops = {
621 .name = "x", 632 .name = "x",
622 .is_authenticated = ceph_x_is_authenticated, 633 .is_authenticated = ceph_x_is_authenticated,
634 .should_authenticate = ceph_x_should_authenticate,
623 .build_request = ceph_x_build_request, 635 .build_request = ceph_x_build_request,
624 .handle_reply = ceph_x_handle_reply, 636 .handle_reply = ceph_x_handle_reply,
625 .create_authorizer = ceph_x_create_authorizer, 637 .create_authorizer = ceph_x_create_authorizer,
diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
index 0dd0b81e64f7..ae3e3a306445 100644
--- a/fs/ceph/caps.c
+++ b/fs/ceph/caps.c
@@ -1776,9 +1776,9 @@ out:
1776 spin_unlock(&ci->i_unsafe_lock); 1776 spin_unlock(&ci->i_unsafe_lock);
1777} 1777}
1778 1778
1779int ceph_fsync(struct file *file, struct dentry *dentry, int datasync) 1779int ceph_fsync(struct file *file, int datasync)
1780{ 1780{
1781 struct inode *inode = dentry->d_inode; 1781 struct inode *inode = file->f_mapping->host;
1782 struct ceph_inode_info *ci = ceph_inode(inode); 1782 struct ceph_inode_info *ci = ceph_inode(inode);
1783 unsigned flush_tid; 1783 unsigned flush_tid;
1784 int ret; 1784 int ret;
diff --git a/fs/ceph/ceph_fs.h b/fs/ceph/ceph_fs.h
index 3b9eeed097b3..2fa992eaf7da 100644
--- a/fs/ceph/ceph_fs.h
+++ b/fs/ceph/ceph_fs.h
@@ -265,16 +265,17 @@ extern const char *ceph_mds_state_name(int s);
265 * - they also define the lock ordering by the MDS 265 * - they also define the lock ordering by the MDS
266 * - a few of these are internal to the mds 266 * - a few of these are internal to the mds
267 */ 267 */
268#define CEPH_LOCK_DN 1 268#define CEPH_LOCK_DVERSION 1
269#define CEPH_LOCK_ISNAP 2 269#define CEPH_LOCK_DN 2
270#define CEPH_LOCK_IVERSION 4 /* mds internal */ 270#define CEPH_LOCK_ISNAP 16
271#define CEPH_LOCK_IFILE 8 /* mds internal */ 271#define CEPH_LOCK_IVERSION 32 /* mds internal */
272#define CEPH_LOCK_IAUTH 32 272#define CEPH_LOCK_IFILE 64
273#define CEPH_LOCK_ILINK 64 273#define CEPH_LOCK_IAUTH 128
274#define CEPH_LOCK_IDFT 128 /* dir frag tree */ 274#define CEPH_LOCK_ILINK 256
275#define CEPH_LOCK_INEST 256 /* mds internal */ 275#define CEPH_LOCK_IDFT 512 /* dir frag tree */
276#define CEPH_LOCK_IXATTR 512 276#define CEPH_LOCK_INEST 1024 /* mds internal */
277#define CEPH_LOCK_INO 2048 /* immutable inode bits; not a lock */ 277#define CEPH_LOCK_IXATTR 2048
278#define CEPH_LOCK_INO 8192 /* immutable inode bits; not a lock */
278 279
279/* client_session ops */ 280/* client_session ops */
280enum { 281enum {
diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
index 4fd30900eff7..f85719310db2 100644
--- a/fs/ceph/dir.c
+++ b/fs/ceph/dir.c
@@ -587,7 +587,7 @@ static struct dentry *ceph_lookup(struct inode *dir, struct dentry *dentry,
587 CEPH_MDS_OP_LOOKUPSNAP : CEPH_MDS_OP_LOOKUP; 587 CEPH_MDS_OP_LOOKUPSNAP : CEPH_MDS_OP_LOOKUP;
588 req = ceph_mdsc_create_request(mdsc, op, USE_ANY_MDS); 588 req = ceph_mdsc_create_request(mdsc, op, USE_ANY_MDS);
589 if (IS_ERR(req)) 589 if (IS_ERR(req))
590 return ERR_PTR(PTR_ERR(req)); 590 return ERR_CAST(req);
591 req->r_dentry = dget(dentry); 591 req->r_dentry = dget(dentry);
592 req->r_num_caps = 2; 592 req->r_num_caps = 2;
593 /* we only need inode linkage */ 593 /* we only need inode linkage */
@@ -1107,10 +1107,9 @@ static ssize_t ceph_read_dir(struct file *file, char __user *buf, size_t size,
1107 * an fsync() on a dir will wait for any uncommitted directory 1107 * an fsync() on a dir will wait for any uncommitted directory
1108 * operations to commit. 1108 * operations to commit.
1109 */ 1109 */
1110static int ceph_dir_fsync(struct file *file, struct dentry *dentry, 1110static int ceph_dir_fsync(struct file *file, int datasync)
1111 int datasync)
1112{ 1111{
1113 struct inode *inode = dentry->d_inode; 1112 struct inode *inode = file->f_path.dentry->d_inode;
1114 struct ceph_inode_info *ci = ceph_inode(inode); 1113 struct ceph_inode_info *ci = ceph_inode(inode);
1115 struct list_head *head = &ci->i_unsafe_dirops; 1114 struct list_head *head = &ci->i_unsafe_dirops;
1116 struct ceph_mds_request *req; 1115 struct ceph_mds_request *req;
diff --git a/fs/ceph/export.c b/fs/ceph/export.c
index 17447644d675..4480cb1c63e7 100644
--- a/fs/ceph/export.c
+++ b/fs/ceph/export.c
@@ -133,7 +133,7 @@ static struct dentry *__cfh_to_dentry(struct super_block *sb,
133 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_LOOKUPHASH, 133 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_LOOKUPHASH,
134 USE_ANY_MDS); 134 USE_ANY_MDS);
135 if (IS_ERR(req)) 135 if (IS_ERR(req))
136 return ERR_PTR(PTR_ERR(req)); 136 return ERR_CAST(req);
137 137
138 req->r_ino1 = vino; 138 req->r_ino1 = vino;
139 req->r_ino2.ino = cfh->parent_ino; 139 req->r_ino2.ino = cfh->parent_ino;
diff --git a/fs/ceph/file.c b/fs/ceph/file.c
index 6512b6701b9e..6251a1574b94 100644
--- a/fs/ceph/file.c
+++ b/fs/ceph/file.c
@@ -230,7 +230,7 @@ struct dentry *ceph_lookup_open(struct inode *dir, struct dentry *dentry,
230 /* do the open */ 230 /* do the open */
231 req = prepare_open_request(dir->i_sb, flags, mode); 231 req = prepare_open_request(dir->i_sb, flags, mode);
232 if (IS_ERR(req)) 232 if (IS_ERR(req))
233 return ERR_PTR(PTR_ERR(req)); 233 return ERR_CAST(req);
234 req->r_dentry = dget(dentry); 234 req->r_dentry = dget(dentry);
235 req->r_num_caps = 2; 235 req->r_num_caps = 2;
236 if (flags & O_CREAT) { 236 if (flags & O_CREAT) {
diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
index a81b8b662c7b..226f5a50d362 100644
--- a/fs/ceph/inode.c
+++ b/fs/ceph/inode.c
@@ -69,7 +69,7 @@ struct inode *ceph_get_snapdir(struct inode *parent)
69 69
70 BUG_ON(!S_ISDIR(parent->i_mode)); 70 BUG_ON(!S_ISDIR(parent->i_mode));
71 if (IS_ERR(inode)) 71 if (IS_ERR(inode))
72 return ERR_PTR(PTR_ERR(inode)); 72 return inode;
73 inode->i_mode = parent->i_mode; 73 inode->i_mode = parent->i_mode;
74 inode->i_uid = parent->i_uid; 74 inode->i_uid = parent->i_uid;
75 inode->i_gid = parent->i_gid; 75 inode->i_gid = parent->i_gid;
diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
index 885aa5710cfd..b49f12822cbc 100644
--- a/fs/ceph/mds_client.c
+++ b/fs/ceph/mds_client.c
@@ -1768,12 +1768,12 @@ int ceph_mdsc_do_request(struct ceph_mds_client *mdsc,
1768 mutex_unlock(&mdsc->mutex); 1768 mutex_unlock(&mdsc->mutex);
1769 dout("do_request waiting\n"); 1769 dout("do_request waiting\n");
1770 if (req->r_timeout) { 1770 if (req->r_timeout) {
1771 err = (long)wait_for_completion_interruptible_timeout( 1771 err = (long)wait_for_completion_killable_timeout(
1772 &req->r_completion, req->r_timeout); 1772 &req->r_completion, req->r_timeout);
1773 if (err == 0) 1773 if (err == 0)
1774 err = -EIO; 1774 err = -EIO;
1775 } else { 1775 } else {
1776 err = wait_for_completion_interruptible(&req->r_completion); 1776 err = wait_for_completion_killable(&req->r_completion);
1777 } 1777 }
1778 dout("do_request waited, got %d\n", err); 1778 dout("do_request waited, got %d\n", err);
1779 mutex_lock(&mdsc->mutex); 1779 mutex_lock(&mdsc->mutex);
@@ -2014,16 +2014,21 @@ static void handle_forward(struct ceph_mds_client *mdsc,
2014 mutex_lock(&mdsc->mutex); 2014 mutex_lock(&mdsc->mutex);
2015 req = __lookup_request(mdsc, tid); 2015 req = __lookup_request(mdsc, tid);
2016 if (!req) { 2016 if (!req) {
2017 dout("forward %llu to mds%d - req dne\n", tid, next_mds); 2017 dout("forward tid %llu to mds%d - req dne\n", tid, next_mds);
2018 goto out; /* dup reply? */ 2018 goto out; /* dup reply? */
2019 } 2019 }
2020 2020
2021 if (fwd_seq <= req->r_num_fwd) { 2021 if (req->r_aborted) {
2022 dout("forward %llu to mds%d - old seq %d <= %d\n", 2022 dout("forward tid %llu aborted, unregistering\n", tid);
2023 __unregister_request(mdsc, req);
2024 } else if (fwd_seq <= req->r_num_fwd) {
2025 dout("forward tid %llu to mds%d - old seq %d <= %d\n",
2023 tid, next_mds, req->r_num_fwd, fwd_seq); 2026 tid, next_mds, req->r_num_fwd, fwd_seq);
2024 } else { 2027 } else {
2025 /* resend. forward race not possible; mds would drop */ 2028 /* resend. forward race not possible; mds would drop */
2026 dout("forward %llu to mds%d (we resend)\n", tid, next_mds); 2029 dout("forward tid %llu to mds%d (we resend)\n", tid, next_mds);
2030 BUG_ON(req->r_err);
2031 BUG_ON(req->r_got_result);
2027 req->r_num_fwd = fwd_seq; 2032 req->r_num_fwd = fwd_seq;
2028 req->r_resend_mds = next_mds; 2033 req->r_resend_mds = next_mds;
2029 put_request_session(req); 2034 put_request_session(req);
@@ -2541,7 +2546,7 @@ void ceph_mdsc_lease_send_msg(struct ceph_mds_session *session,
2541 return; 2546 return;
2542 lease = msg->front.iov_base; 2547 lease = msg->front.iov_base;
2543 lease->action = action; 2548 lease->action = action;
2544 lease->mask = cpu_to_le16(CEPH_LOCK_DN); 2549 lease->mask = cpu_to_le16(1);
2545 lease->ino = cpu_to_le64(ceph_vino(inode).ino); 2550 lease->ino = cpu_to_le64(ceph_vino(inode).ino);
2546 lease->first = lease->last = cpu_to_le64(ceph_vino(inode).snap); 2551 lease->first = lease->last = cpu_to_le64(ceph_vino(inode).snap);
2547 lease->seq = cpu_to_le32(seq); 2552 lease->seq = cpu_to_le32(seq);
@@ -2571,7 +2576,7 @@ void ceph_mdsc_lease_release(struct ceph_mds_client *mdsc, struct inode *inode,
2571 2576
2572 BUG_ON(inode == NULL); 2577 BUG_ON(inode == NULL);
2573 BUG_ON(dentry == NULL); 2578 BUG_ON(dentry == NULL);
2574 BUG_ON(mask != CEPH_LOCK_DN); 2579 BUG_ON(mask == 0);
2575 2580
2576 /* is dentry lease valid? */ 2581 /* is dentry lease valid? */
2577 spin_lock(&dentry->d_lock); 2582 spin_lock(&dentry->d_lock);
diff --git a/fs/ceph/messenger.c b/fs/ceph/messenger.c
index 60b74839ebec..64b8b1f7863d 100644
--- a/fs/ceph/messenger.c
+++ b/fs/ceph/messenger.c
@@ -120,6 +120,12 @@ void ceph_msgr_exit(void)
120 destroy_workqueue(ceph_msgr_wq); 120 destroy_workqueue(ceph_msgr_wq);
121} 121}
122 122
123void ceph_msgr_flush()
124{
125 flush_workqueue(ceph_msgr_wq);
126}
127
128
123/* 129/*
124 * socket callback functions 130 * socket callback functions
125 */ 131 */
diff --git a/fs/ceph/messenger.h b/fs/ceph/messenger.h
index 00a9430b1ffc..76fbc957bc13 100644
--- a/fs/ceph/messenger.h
+++ b/fs/ceph/messenger.h
@@ -213,6 +213,7 @@ extern int ceph_parse_ips(const char *c, const char *end,
213 213
214extern int ceph_msgr_init(void); 214extern int ceph_msgr_init(void);
215extern void ceph_msgr_exit(void); 215extern void ceph_msgr_exit(void);
216extern void ceph_msgr_flush(void);
216 217
217extern struct ceph_messenger *ceph_messenger_create( 218extern struct ceph_messenger *ceph_messenger_create(
218 struct ceph_entity_addr *myaddr); 219 struct ceph_entity_addr *myaddr);
diff --git a/fs/ceph/mon_client.c b/fs/ceph/mon_client.c
index f6510a476e7e..21c62e9b7d1d 100644
--- a/fs/ceph/mon_client.c
+++ b/fs/ceph/mon_client.c
@@ -704,8 +704,11 @@ static void handle_auth_reply(struct ceph_mon_client *monc,
704 struct ceph_msg *msg) 704 struct ceph_msg *msg)
705{ 705{
706 int ret; 706 int ret;
707 int was_auth = 0;
707 708
708 mutex_lock(&monc->mutex); 709 mutex_lock(&monc->mutex);
710 if (monc->auth->ops)
711 was_auth = monc->auth->ops->is_authenticated(monc->auth);
709 monc->pending_auth = 0; 712 monc->pending_auth = 0;
710 ret = ceph_handle_auth_reply(monc->auth, msg->front.iov_base, 713 ret = ceph_handle_auth_reply(monc->auth, msg->front.iov_base,
711 msg->front.iov_len, 714 msg->front.iov_len,
@@ -716,7 +719,7 @@ static void handle_auth_reply(struct ceph_mon_client *monc,
716 wake_up(&monc->client->auth_wq); 719 wake_up(&monc->client->auth_wq);
717 } else if (ret > 0) { 720 } else if (ret > 0) {
718 __send_prepared_auth_request(monc, ret); 721 __send_prepared_auth_request(monc, ret);
719 } else if (monc->auth->ops->is_authenticated(monc->auth)) { 722 } else if (!was_auth && monc->auth->ops->is_authenticated(monc->auth)) {
720 dout("authenticated, starting session\n"); 723 dout("authenticated, starting session\n");
721 724
722 monc->client->msgr->inst.name.type = CEPH_ENTITY_TYPE_CLIENT; 725 monc->client->msgr->inst.name.type = CEPH_ENTITY_TYPE_CLIENT;
diff --git a/fs/ceph/osd_client.c b/fs/ceph/osd_client.c
index afa7bb3895c4..d25b4add85b4 100644
--- a/fs/ceph/osd_client.c
+++ b/fs/ceph/osd_client.c
@@ -361,8 +361,13 @@ static void put_osd(struct ceph_osd *osd)
361{ 361{
362 dout("put_osd %p %d -> %d\n", osd, atomic_read(&osd->o_ref), 362 dout("put_osd %p %d -> %d\n", osd, atomic_read(&osd->o_ref),
363 atomic_read(&osd->o_ref) - 1); 363 atomic_read(&osd->o_ref) - 1);
364 if (atomic_dec_and_test(&osd->o_ref)) 364 if (atomic_dec_and_test(&osd->o_ref)) {
365 struct ceph_auth_client *ac = osd->o_osdc->client->monc.auth;
366
367 if (osd->o_authorizer)
368 ac->ops->destroy_authorizer(ac, osd->o_authorizer);
365 kfree(osd); 369 kfree(osd);
370 }
366} 371}
367 372
368/* 373/*
diff --git a/fs/ceph/osdmap.c b/fs/ceph/osdmap.c
index cfdd8f4388b7..ddc656fb5c05 100644
--- a/fs/ceph/osdmap.c
+++ b/fs/ceph/osdmap.c
@@ -706,7 +706,7 @@ struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end,
706 len, *p, end); 706 len, *p, end);
707 newcrush = crush_decode(*p, min(*p+len, end)); 707 newcrush = crush_decode(*p, min(*p+len, end));
708 if (IS_ERR(newcrush)) 708 if (IS_ERR(newcrush))
709 return ERR_PTR(PTR_ERR(newcrush)); 709 return ERR_CAST(newcrush);
710 } 710 }
711 711
712 /* new flags? */ 712 /* new flags? */
diff --git a/fs/ceph/super.c b/fs/ceph/super.c
index 7c663d9b9f81..4e0bee240b9d 100644
--- a/fs/ceph/super.c
+++ b/fs/ceph/super.c
@@ -669,9 +669,17 @@ static void ceph_destroy_client(struct ceph_client *client)
669 669
670 /* unmount */ 670 /* unmount */
671 ceph_mdsc_stop(&client->mdsc); 671 ceph_mdsc_stop(&client->mdsc);
672 ceph_monc_stop(&client->monc);
673 ceph_osdc_stop(&client->osdc); 672 ceph_osdc_stop(&client->osdc);
674 673
674 /*
675 * make sure mds and osd connections close out before destroying
676 * the auth module, which is needed to free those connections'
677 * ceph_authorizers.
678 */
679 ceph_msgr_flush();
680
681 ceph_monc_stop(&client->monc);
682
675 ceph_adjust_min_caps(-client->min_caps); 683 ceph_adjust_min_caps(-client->min_caps);
676 684
677 ceph_debugfs_client_cleanup(client); 685 ceph_debugfs_client_cleanup(client);
@@ -738,7 +746,7 @@ static struct dentry *open_root_dentry(struct ceph_client *client,
738 dout("open_root_inode opening '%s'\n", path); 746 dout("open_root_inode opening '%s'\n", path);
739 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_GETATTR, USE_ANY_MDS); 747 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_GETATTR, USE_ANY_MDS);
740 if (IS_ERR(req)) 748 if (IS_ERR(req))
741 return ERR_PTR(PTR_ERR(req)); 749 return ERR_CAST(req);
742 req->r_path1 = kstrdup(path, GFP_NOFS); 750 req->r_path1 = kstrdup(path, GFP_NOFS);
743 req->r_ino1.ino = CEPH_INO_ROOT; 751 req->r_ino1.ino = CEPH_INO_ROOT;
744 req->r_ino1.snap = CEPH_NOSNAP; 752 req->r_ino1.snap = CEPH_NOSNAP;
diff --git a/fs/ceph/super.h b/fs/ceph/super.h
index 3725c9ee9d08..10a4a406e887 100644
--- a/fs/ceph/super.h
+++ b/fs/ceph/super.h
@@ -10,7 +10,6 @@
10#include <linux/fs.h> 10#include <linux/fs.h>
11#include <linux/mempool.h> 11#include <linux/mempool.h>
12#include <linux/pagemap.h> 12#include <linux/pagemap.h>
13#include <linux/slab.h>
14#include <linux/wait.h> 13#include <linux/wait.h>
15#include <linux/writeback.h> 14#include <linux/writeback.h>
16#include <linux/slab.h> 15#include <linux/slab.h>
@@ -811,7 +810,7 @@ extern void ceph_put_cap(struct ceph_cap *cap);
811 810
812extern void ceph_queue_caps_release(struct inode *inode); 811extern void ceph_queue_caps_release(struct inode *inode);
813extern int ceph_write_inode(struct inode *inode, struct writeback_control *wbc); 812extern int ceph_write_inode(struct inode *inode, struct writeback_control *wbc);
814extern int ceph_fsync(struct file *file, struct dentry *dentry, int datasync); 813extern int ceph_fsync(struct file *file, int datasync);
815extern void ceph_kick_flushing_caps(struct ceph_mds_client *mdsc, 814extern void ceph_kick_flushing_caps(struct ceph_mds_client *mdsc,
816 struct ceph_mds_session *session); 815 struct ceph_mds_session *session);
817extern int ceph_get_cap_mds(struct inode *inode); 816extern int ceph_get_cap_mds(struct inode *inode);
diff --git a/fs/cifs/cifsfs.h b/fs/cifs/cifsfs.h
index 0242ff9cbf41..a7eb65c84b1c 100644
--- a/fs/cifs/cifsfs.h
+++ b/fs/cifs/cifsfs.h
@@ -84,7 +84,7 @@ extern ssize_t cifs_user_read(struct file *file, char __user *read_data,
84extern ssize_t cifs_user_write(struct file *file, const char __user *write_data, 84extern ssize_t cifs_user_write(struct file *file, const char __user *write_data,
85 size_t write_size, loff_t *poffset); 85 size_t write_size, loff_t *poffset);
86extern int cifs_lock(struct file *, int, struct file_lock *); 86extern int cifs_lock(struct file *, int, struct file_lock *);
87extern int cifs_fsync(struct file *, struct dentry *, int); 87extern int cifs_fsync(struct file *, int);
88extern int cifs_flush(struct file *, fl_owner_t id); 88extern int cifs_flush(struct file *, fl_owner_t id);
89extern int cifs_file_mmap(struct file * , struct vm_area_struct *); 89extern int cifs_file_mmap(struct file * , struct vm_area_struct *);
90extern const struct file_operations cifs_dir_ops; 90extern const struct file_operations cifs_dir_ops;
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index a83541ec9713..f1ff785b2292 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -1676,7 +1676,7 @@ static int cifs_write_end(struct file *file, struct address_space *mapping,
1676 return rc; 1676 return rc;
1677} 1677}
1678 1678
1679int cifs_fsync(struct file *file, struct dentry *dentry, int datasync) 1679int cifs_fsync(struct file *file, int datasync)
1680{ 1680{
1681 int xid; 1681 int xid;
1682 int rc = 0; 1682 int rc = 0;
@@ -1688,7 +1688,7 @@ int cifs_fsync(struct file *file, struct dentry *dentry, int datasync)
1688 xid = GetXid(); 1688 xid = GetXid();
1689 1689
1690 cFYI(1, "Sync file - name: %s datasync: 0x%x", 1690 cFYI(1, "Sync file - name: %s datasync: 0x%x",
1691 dentry->d_name.name, datasync); 1691 file->f_path.dentry->d_name.name, datasync);
1692 1692
1693 rc = filemap_write_and_wait(inode->i_mapping); 1693 rc = filemap_write_and_wait(inode->i_mapping);
1694 if (rc == 0) { 1694 if (rc == 0) {
diff --git a/fs/coda/coda_int.h b/fs/coda/coda_int.h
index d99860a33890..6b443ff43a19 100644
--- a/fs/coda/coda_int.h
+++ b/fs/coda/coda_int.h
@@ -11,8 +11,7 @@ extern int coda_fake_statfs;
11 11
12void coda_destroy_inodecache(void); 12void coda_destroy_inodecache(void);
13int coda_init_inodecache(void); 13int coda_init_inodecache(void);
14int coda_fsync(struct file *coda_file, struct dentry *coda_dentry, 14int coda_fsync(struct file *coda_file, int datasync);
15 int datasync);
16void coda_sysctl_init(void); 15void coda_sysctl_init(void);
17void coda_sysctl_clean(void); 16void coda_sysctl_clean(void);
18 17
diff --git a/fs/coda/file.c b/fs/coda/file.c
index 7196077b1688..ad3cd2abeeb4 100644
--- a/fs/coda/file.c
+++ b/fs/coda/file.c
@@ -202,10 +202,10 @@ int coda_release(struct inode *coda_inode, struct file *coda_file)
202 return 0; 202 return 0;
203} 203}
204 204
205int coda_fsync(struct file *coda_file, struct dentry *coda_dentry, int datasync) 205int coda_fsync(struct file *coda_file, int datasync)
206{ 206{
207 struct file *host_file; 207 struct file *host_file;
208 struct inode *coda_inode = coda_dentry->d_inode; 208 struct inode *coda_inode = coda_file->f_path.dentry->d_inode;
209 struct coda_file_info *cfi; 209 struct coda_file_info *cfi;
210 int err = 0; 210 int err = 0;
211 211
diff --git a/fs/compat.c b/fs/compat.c
index 05448730f840..f0b391c50552 100644
--- a/fs/compat.c
+++ b/fs/compat.c
@@ -568,6 +568,79 @@ out:
568 return ret; 568 return ret;
569} 569}
570 570
571/* A write operation does a read from user space and vice versa */
572#define vrfy_dir(type) ((type) == READ ? VERIFY_WRITE : VERIFY_READ)
573
574ssize_t compat_rw_copy_check_uvector(int type,
575 const struct compat_iovec __user *uvector, unsigned long nr_segs,
576 unsigned long fast_segs, struct iovec *fast_pointer,
577 struct iovec **ret_pointer)
578{
579 compat_ssize_t tot_len;
580 struct iovec *iov = *ret_pointer = fast_pointer;
581 ssize_t ret = 0;
582 int seg;
583
584 /*
585 * SuS says "The readv() function *may* fail if the iovcnt argument
586 * was less than or equal to 0, or greater than {IOV_MAX}. Linux has
587 * traditionally returned zero for zero segments, so...
588 */
589 if (nr_segs == 0)
590 goto out;
591
592 ret = -EINVAL;
593 if (nr_segs > UIO_MAXIOV || nr_segs < 0)
594 goto out;
595 if (nr_segs > fast_segs) {
596 ret = -ENOMEM;
597 iov = kmalloc(nr_segs*sizeof(struct iovec), GFP_KERNEL);
598 if (iov == NULL) {
599 *ret_pointer = fast_pointer;
600 goto out;
601 }
602 }
603 *ret_pointer = iov;
604
605 /*
606 * Single unix specification:
607 * We should -EINVAL if an element length is not >= 0 and fitting an
608 * ssize_t. The total length is fitting an ssize_t
609 *
610 * Be careful here because iov_len is a size_t not an ssize_t
611 */
612 tot_len = 0;
613 ret = -EINVAL;
614 for (seg = 0; seg < nr_segs; seg++) {
615 compat_ssize_t tmp = tot_len;
616 compat_uptr_t buf;
617 compat_ssize_t len;
618
619 if (__get_user(len, &uvector->iov_len) ||
620 __get_user(buf, &uvector->iov_base)) {
621 ret = -EFAULT;
622 goto out;
623 }
624 if (len < 0) /* size_t not fitting in compat_ssize_t .. */
625 goto out;
626 tot_len += len;
627 if (tot_len < tmp) /* maths overflow on the compat_ssize_t */
628 goto out;
629 if (!access_ok(vrfy_dir(type), buf, len)) {
630 ret = -EFAULT;
631 goto out;
632 }
633 iov->iov_base = compat_ptr(buf);
634 iov->iov_len = (compat_size_t) len;
635 uvector++;
636 iov++;
637 }
638 ret = tot_len;
639
640out:
641 return ret;
642}
643
571static inline long 644static inline long
572copy_iocb(long nr, u32 __user *ptr32, struct iocb __user * __user *ptr64) 645copy_iocb(long nr, u32 __user *ptr32, struct iocb __user * __user *ptr64)
573{ 646{
@@ -600,7 +673,7 @@ compat_sys_io_submit(aio_context_t ctx_id, int nr, u32 __user *iocb)
600 iocb64 = compat_alloc_user_space(nr * sizeof(*iocb64)); 673 iocb64 = compat_alloc_user_space(nr * sizeof(*iocb64));
601 ret = copy_iocb(nr, iocb, iocb64); 674 ret = copy_iocb(nr, iocb, iocb64);
602 if (!ret) 675 if (!ret)
603 ret = sys_io_submit(ctx_id, nr, iocb64); 676 ret = do_io_submit(ctx_id, nr, iocb64, 1);
604 return ret; 677 return ret;
605} 678}
606 679
@@ -1077,70 +1150,21 @@ static ssize_t compat_do_readv_writev(int type, struct file *file,
1077{ 1150{
1078 compat_ssize_t tot_len; 1151 compat_ssize_t tot_len;
1079 struct iovec iovstack[UIO_FASTIOV]; 1152 struct iovec iovstack[UIO_FASTIOV];
1080 struct iovec *iov=iovstack, *vector; 1153 struct iovec *iov;
1081 ssize_t ret; 1154 ssize_t ret;
1082 int seg;
1083 io_fn_t fn; 1155 io_fn_t fn;
1084 iov_fn_t fnv; 1156 iov_fn_t fnv;
1085 1157
1086 /*
1087 * SuS says "The readv() function *may* fail if the iovcnt argument
1088 * was less than or equal to 0, or greater than {IOV_MAX}. Linux has
1089 * traditionally returned zero for zero segments, so...
1090 */
1091 ret = 0;
1092 if (nr_segs == 0)
1093 goto out;
1094
1095 /*
1096 * First get the "struct iovec" from user memory and
1097 * verify all the pointers
1098 */
1099 ret = -EINVAL; 1158 ret = -EINVAL;
1100 if ((nr_segs > UIO_MAXIOV) || (nr_segs <= 0))
1101 goto out;
1102 if (!file->f_op) 1159 if (!file->f_op)
1103 goto out; 1160 goto out;
1104 if (nr_segs > UIO_FASTIOV) { 1161
1105 ret = -ENOMEM;
1106 iov = kmalloc(nr_segs*sizeof(struct iovec), GFP_KERNEL);
1107 if (!iov)
1108 goto out;
1109 }
1110 ret = -EFAULT; 1162 ret = -EFAULT;
1111 if (!access_ok(VERIFY_READ, uvector, nr_segs*sizeof(*uvector))) 1163 if (!access_ok(VERIFY_READ, uvector, nr_segs*sizeof(*uvector)))
1112 goto out; 1164 goto out;
1113 1165
1114 /* 1166 tot_len = compat_rw_copy_check_uvector(type, uvector, nr_segs,
1115 * Single unix specification: 1167 UIO_FASTIOV, iovstack, &iov);
1116 * We should -EINVAL if an element length is not >= 0 and fitting an
1117 * ssize_t. The total length is fitting an ssize_t
1118 *
1119 * Be careful here because iov_len is a size_t not an ssize_t
1120 */
1121 tot_len = 0;
1122 vector = iov;
1123 ret = -EINVAL;
1124 for (seg = 0 ; seg < nr_segs; seg++) {
1125 compat_ssize_t tmp = tot_len;
1126 compat_ssize_t len;
1127 compat_uptr_t buf;
1128
1129 if (__get_user(len, &uvector->iov_len) ||
1130 __get_user(buf, &uvector->iov_base)) {
1131 ret = -EFAULT;
1132 goto out;
1133 }
1134 if (len < 0) /* size_t not fitting an compat_ssize_t .. */
1135 goto out;
1136 tot_len += len;
1137 if (tot_len < tmp) /* maths overflow on the compat_ssize_t */
1138 goto out;
1139 vector->iov_base = compat_ptr(buf);
1140 vector->iov_len = (compat_size_t) len;
1141 uvector++;
1142 vector++;
1143 }
1144 if (tot_len == 0) { 1168 if (tot_len == 0) {
1145 ret = 0; 1169 ret = 0;
1146 goto out; 1170 goto out;
diff --git a/fs/configfs/inode.c b/fs/configfs/inode.c
index c8af2d91174b..41645142b88b 100644
--- a/fs/configfs/inode.c
+++ b/fs/configfs/inode.c
@@ -72,16 +72,11 @@ int configfs_setattr(struct dentry * dentry, struct iattr * iattr)
72 if (!sd) 72 if (!sd)
73 return -EINVAL; 73 return -EINVAL;
74 74
75 sd_iattr = sd->s_iattr; 75 error = simple_setattr(dentry, iattr);
76
77 error = inode_change_ok(inode, iattr);
78 if (error)
79 return error;
80
81 error = inode_setattr(inode, iattr);
82 if (error) 76 if (error)
83 return error; 77 return error;
84 78
79 sd_iattr = sd->s_iattr;
85 if (!sd_iattr) { 80 if (!sd_iattr) {
86 /* setting attributes for the first time, allocate now */ 81 /* setting attributes for the first time, allocate now */
87 sd_iattr = kzalloc(sizeof(struct iattr), GFP_KERNEL); 82 sd_iattr = kzalloc(sizeof(struct iattr), GFP_KERNEL);
diff --git a/fs/debugfs/file.c b/fs/debugfs/file.c
index 4d74fc72c195..0210898458b2 100644
--- a/fs/debugfs/file.c
+++ b/fs/debugfs/file.c
@@ -277,8 +277,10 @@ DEFINE_SIMPLE_ATTRIBUTE(fops_x32, debugfs_u32_get, debugfs_u32_set, "0x%08llx\n"
277DEFINE_SIMPLE_ATTRIBUTE(fops_x32_ro, debugfs_u32_get, NULL, "0x%08llx\n"); 277DEFINE_SIMPLE_ATTRIBUTE(fops_x32_ro, debugfs_u32_get, NULL, "0x%08llx\n");
278DEFINE_SIMPLE_ATTRIBUTE(fops_x32_wo, NULL, debugfs_u32_set, "0x%08llx\n"); 278DEFINE_SIMPLE_ATTRIBUTE(fops_x32_wo, NULL, debugfs_u32_set, "0x%08llx\n");
279 279
280DEFINE_SIMPLE_ATTRIBUTE(fops_x64, debugfs_u64_get, debugfs_u64_set, "0x%016llx\n");
281
280/* 282/*
281 * debugfs_create_x{8,16,32} - create a debugfs file that is used to read and write an unsigned {8,16,32}-bit value 283 * debugfs_create_x{8,16,32,64} - create a debugfs file that is used to read and write an unsigned {8,16,32,64}-bit value
282 * 284 *
283 * These functions are exactly the same as the above functions (but use a hex 285 * These functions are exactly the same as the above functions (but use a hex
284 * output for the decimal challenged). For details look at the above unsigned 286 * output for the decimal challenged). For details look at the above unsigned
@@ -357,6 +359,23 @@ struct dentry *debugfs_create_x32(const char *name, mode_t mode,
357} 359}
358EXPORT_SYMBOL_GPL(debugfs_create_x32); 360EXPORT_SYMBOL_GPL(debugfs_create_x32);
359 361
362/**
363 * debugfs_create_x64 - create a debugfs file that is used to read and write an unsigned 64-bit value
364 * @name: a pointer to a string containing the name of the file to create.
365 * @mode: the permission that the file should have
366 * @parent: a pointer to the parent dentry for this file. This should be a
367 * directory dentry if set. If this parameter is %NULL, then the
368 * file will be created in the root of the debugfs filesystem.
369 * @value: a pointer to the variable that the file should read to and write
370 * from.
371 */
372struct dentry *debugfs_create_x64(const char *name, mode_t mode,
373 struct dentry *parent, u64 *value)
374{
375 return debugfs_create_file(name, mode, parent, value, &fops_x64);
376}
377EXPORT_SYMBOL_GPL(debugfs_create_x64);
378
360 379
361static int debugfs_size_t_set(void *data, u64 val) 380static int debugfs_size_t_set(void *data, u64 val)
362{ 381{
diff --git a/fs/direct-io.c b/fs/direct-io.c
index e82adc2debb7..7600aacf531d 100644
--- a/fs/direct-io.c
+++ b/fs/direct-io.c
@@ -82,6 +82,8 @@ struct dio {
82 int reap_counter; /* rate limit reaping */ 82 int reap_counter; /* rate limit reaping */
83 get_block_t *get_block; /* block mapping function */ 83 get_block_t *get_block; /* block mapping function */
84 dio_iodone_t *end_io; /* IO completion function */ 84 dio_iodone_t *end_io; /* IO completion function */
85 dio_submit_t *submit_io; /* IO submition function */
86 loff_t logical_offset_in_bio; /* current first logical block in bio */
85 sector_t final_block_in_bio; /* current final block in bio + 1 */ 87 sector_t final_block_in_bio; /* current final block in bio + 1 */
86 sector_t next_block_for_io; /* next block to be put under IO, 88 sector_t next_block_for_io; /* next block to be put under IO,
87 in dio_blocks units */ 89 in dio_blocks units */
@@ -96,6 +98,7 @@ struct dio {
96 unsigned cur_page_offset; /* Offset into it, in bytes */ 98 unsigned cur_page_offset; /* Offset into it, in bytes */
97 unsigned cur_page_len; /* Nr of bytes at cur_page_offset */ 99 unsigned cur_page_len; /* Nr of bytes at cur_page_offset */
98 sector_t cur_page_block; /* Where it starts */ 100 sector_t cur_page_block; /* Where it starts */
101 loff_t cur_page_fs_offset; /* Offset in file */
99 102
100 /* BIO completion state */ 103 /* BIO completion state */
101 spinlock_t bio_lock; /* protects BIO fields below */ 104 spinlock_t bio_lock; /* protects BIO fields below */
@@ -300,6 +303,26 @@ static void dio_bio_end_io(struct bio *bio, int error)
300 spin_unlock_irqrestore(&dio->bio_lock, flags); 303 spin_unlock_irqrestore(&dio->bio_lock, flags);
301} 304}
302 305
306/**
307 * dio_end_io - handle the end io action for the given bio
308 * @bio: The direct io bio thats being completed
309 * @error: Error if there was one
310 *
311 * This is meant to be called by any filesystem that uses their own dio_submit_t
312 * so that the DIO specific endio actions are dealt with after the filesystem
313 * has done it's completion work.
314 */
315void dio_end_io(struct bio *bio, int error)
316{
317 struct dio *dio = bio->bi_private;
318
319 if (dio->is_async)
320 dio_bio_end_aio(bio, error);
321 else
322 dio_bio_end_io(bio, error);
323}
324EXPORT_SYMBOL_GPL(dio_end_io);
325
303static int 326static int
304dio_bio_alloc(struct dio *dio, struct block_device *bdev, 327dio_bio_alloc(struct dio *dio, struct block_device *bdev,
305 sector_t first_sector, int nr_vecs) 328 sector_t first_sector, int nr_vecs)
@@ -316,6 +339,7 @@ dio_bio_alloc(struct dio *dio, struct block_device *bdev,
316 bio->bi_end_io = dio_bio_end_io; 339 bio->bi_end_io = dio_bio_end_io;
317 340
318 dio->bio = bio; 341 dio->bio = bio;
342 dio->logical_offset_in_bio = dio->cur_page_fs_offset;
319 return 0; 343 return 0;
320} 344}
321 345
@@ -340,10 +364,15 @@ static void dio_bio_submit(struct dio *dio)
340 if (dio->is_async && dio->rw == READ) 364 if (dio->is_async && dio->rw == READ)
341 bio_set_pages_dirty(bio); 365 bio_set_pages_dirty(bio);
342 366
343 submit_bio(dio->rw, bio); 367 if (dio->submit_io)
368 dio->submit_io(dio->rw, bio, dio->inode,
369 dio->logical_offset_in_bio);
370 else
371 submit_bio(dio->rw, bio);
344 372
345 dio->bio = NULL; 373 dio->bio = NULL;
346 dio->boundary = 0; 374 dio->boundary = 0;
375 dio->logical_offset_in_bio = 0;
347} 376}
348 377
349/* 378/*
@@ -603,10 +632,26 @@ static int dio_send_cur_page(struct dio *dio)
603 int ret = 0; 632 int ret = 0;
604 633
605 if (dio->bio) { 634 if (dio->bio) {
635 loff_t cur_offset = dio->block_in_file << dio->blkbits;
636 loff_t bio_next_offset = dio->logical_offset_in_bio +
637 dio->bio->bi_size;
638
606 /* 639 /*
607 * See whether this new request is contiguous with the old 640 * See whether this new request is contiguous with the old.
641 *
642 * Btrfs cannot handl having logically non-contiguous requests
643 * submitted. For exmple if you have
644 *
645 * Logical: [0-4095][HOLE][8192-12287]
646 * Phyiscal: [0-4095] [4096-8181]
647 *
648 * We cannot submit those pages together as one BIO. So if our
649 * current logical offset in the file does not equal what would
650 * be the next logical offset in the bio, submit the bio we
651 * have.
608 */ 652 */
609 if (dio->final_block_in_bio != dio->cur_page_block) 653 if (dio->final_block_in_bio != dio->cur_page_block ||
654 cur_offset != bio_next_offset)
610 dio_bio_submit(dio); 655 dio_bio_submit(dio);
611 /* 656 /*
612 * Submit now if the underlying fs is about to perform a 657 * Submit now if the underlying fs is about to perform a
@@ -701,6 +746,7 @@ submit_page_section(struct dio *dio, struct page *page,
701 dio->cur_page_offset = offset; 746 dio->cur_page_offset = offset;
702 dio->cur_page_len = len; 747 dio->cur_page_len = len;
703 dio->cur_page_block = blocknr; 748 dio->cur_page_block = blocknr;
749 dio->cur_page_fs_offset = dio->block_in_file << dio->blkbits;
704out: 750out:
705 return ret; 751 return ret;
706} 752}
@@ -935,7 +981,7 @@ static ssize_t
935direct_io_worker(int rw, struct kiocb *iocb, struct inode *inode, 981direct_io_worker(int rw, struct kiocb *iocb, struct inode *inode,
936 const struct iovec *iov, loff_t offset, unsigned long nr_segs, 982 const struct iovec *iov, loff_t offset, unsigned long nr_segs,
937 unsigned blkbits, get_block_t get_block, dio_iodone_t end_io, 983 unsigned blkbits, get_block_t get_block, dio_iodone_t end_io,
938 struct dio *dio) 984 dio_submit_t submit_io, struct dio *dio)
939{ 985{
940 unsigned long user_addr; 986 unsigned long user_addr;
941 unsigned long flags; 987 unsigned long flags;
@@ -952,6 +998,7 @@ direct_io_worker(int rw, struct kiocb *iocb, struct inode *inode,
952 998
953 dio->get_block = get_block; 999 dio->get_block = get_block;
954 dio->end_io = end_io; 1000 dio->end_io = end_io;
1001 dio->submit_io = submit_io;
955 dio->final_block_in_bio = -1; 1002 dio->final_block_in_bio = -1;
956 dio->next_block_for_io = -1; 1003 dio->next_block_for_io = -1;
957 1004
@@ -1008,7 +1055,7 @@ direct_io_worker(int rw, struct kiocb *iocb, struct inode *inode,
1008 } 1055 }
1009 } /* end iovec loop */ 1056 } /* end iovec loop */
1010 1057
1011 if (ret == -ENOTBLK && (rw & WRITE)) { 1058 if (ret == -ENOTBLK) {
1012 /* 1059 /*
1013 * The remaining part of the request will be 1060 * The remaining part of the request will be
1014 * be handled by buffered I/O when we return 1061 * be handled by buffered I/O when we return
@@ -1087,30 +1134,11 @@ direct_io_worker(int rw, struct kiocb *iocb, struct inode *inode,
1087 return ret; 1134 return ret;
1088} 1135}
1089 1136
1090/*
1091 * This is a library function for use by filesystem drivers.
1092 *
1093 * The locking rules are governed by the flags parameter:
1094 * - if the flags value contains DIO_LOCKING we use a fancy locking
1095 * scheme for dumb filesystems.
1096 * For writes this function is called under i_mutex and returns with
1097 * i_mutex held, for reads, i_mutex is not held on entry, but it is
1098 * taken and dropped again before returning.
1099 * For reads and writes i_alloc_sem is taken in shared mode and released
1100 * on I/O completion (which may happen asynchronously after returning to
1101 * the caller).
1102 *
1103 * - if the flags value does NOT contain DIO_LOCKING we don't use any
1104 * internal locking but rather rely on the filesystem to synchronize
1105 * direct I/O reads/writes versus each other and truncate.
1106 * For reads and writes both i_mutex and i_alloc_sem are not held on
1107 * entry and are never taken.
1108 */
1109ssize_t 1137ssize_t
1110__blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode, 1138__blockdev_direct_IO_newtrunc(int rw, struct kiocb *iocb, struct inode *inode,
1111 struct block_device *bdev, const struct iovec *iov, loff_t offset, 1139 struct block_device *bdev, const struct iovec *iov, loff_t offset,
1112 unsigned long nr_segs, get_block_t get_block, dio_iodone_t end_io, 1140 unsigned long nr_segs, get_block_t get_block, dio_iodone_t end_io,
1113 int flags) 1141 dio_submit_t submit_io, int flags)
1114{ 1142{
1115 int seg; 1143 int seg;
1116 size_t size; 1144 size_t size;
@@ -1197,11 +1225,49 @@ __blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
1197 (end > i_size_read(inode))); 1225 (end > i_size_read(inode)));
1198 1226
1199 retval = direct_io_worker(rw, iocb, inode, iov, offset, 1227 retval = direct_io_worker(rw, iocb, inode, iov, offset,
1200 nr_segs, blkbits, get_block, end_io, dio); 1228 nr_segs, blkbits, get_block, end_io,
1229 submit_io, dio);
1230
1231out:
1232 return retval;
1233}
1234EXPORT_SYMBOL(__blockdev_direct_IO_newtrunc);
1235
1236/*
1237 * This is a library function for use by filesystem drivers.
1238 *
1239 * The locking rules are governed by the flags parameter:
1240 * - if the flags value contains DIO_LOCKING we use a fancy locking
1241 * scheme for dumb filesystems.
1242 * For writes this function is called under i_mutex and returns with
1243 * i_mutex held, for reads, i_mutex is not held on entry, but it is
1244 * taken and dropped again before returning.
1245 * For reads and writes i_alloc_sem is taken in shared mode and released
1246 * on I/O completion (which may happen asynchronously after returning to
1247 * the caller).
1248 *
1249 * - if the flags value does NOT contain DIO_LOCKING we don't use any
1250 * internal locking but rather rely on the filesystem to synchronize
1251 * direct I/O reads/writes versus each other and truncate.
1252 * For reads and writes both i_mutex and i_alloc_sem are not held on
1253 * entry and are never taken.
1254 */
1255ssize_t
1256__blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
1257 struct block_device *bdev, const struct iovec *iov, loff_t offset,
1258 unsigned long nr_segs, get_block_t get_block, dio_iodone_t end_io,
1259 dio_submit_t submit_io, int flags)
1260{
1261 ssize_t retval;
1201 1262
1263 retval = __blockdev_direct_IO_newtrunc(rw, iocb, inode, bdev, iov,
1264 offset, nr_segs, get_block, end_io, submit_io, flags);
1202 /* 1265 /*
1203 * In case of error extending write may have instantiated a few 1266 * In case of error extending write may have instantiated a few
1204 * blocks outside i_size. Trim these off again for DIO_LOCKING. 1267 * blocks outside i_size. Trim these off again for DIO_LOCKING.
1268 * NOTE: DIO_NO_LOCK/DIO_OWN_LOCK callers have to handle this in
1269 * their own manner. This is a further example of where the old
1270 * truncate sequence is inadequate.
1205 * 1271 *
1206 * NOTE: filesystems with their own locking have to handle this 1272 * NOTE: filesystems with their own locking have to handle this
1207 * on their own. 1273 * on their own.
@@ -1209,12 +1275,13 @@ __blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
1209 if (flags & DIO_LOCKING) { 1275 if (flags & DIO_LOCKING) {
1210 if (unlikely((rw & WRITE) && retval < 0)) { 1276 if (unlikely((rw & WRITE) && retval < 0)) {
1211 loff_t isize = i_size_read(inode); 1277 loff_t isize = i_size_read(inode);
1278 loff_t end = offset + iov_length(iov, nr_segs);
1279
1212 if (end > isize) 1280 if (end > isize)
1213 vmtruncate(inode, isize); 1281 vmtruncate(inode, isize);
1214 } 1282 }
1215 } 1283 }
1216 1284
1217out:
1218 return retval; 1285 return retval;
1219} 1286}
1220EXPORT_SYMBOL(__blockdev_direct_IO); 1287EXPORT_SYMBOL(__blockdev_direct_IO);
diff --git a/fs/ecryptfs/file.c b/fs/ecryptfs/file.c
index 3bdddbcc785f..e8fcf4e2ed7d 100644
--- a/fs/ecryptfs/file.c
+++ b/fs/ecryptfs/file.c
@@ -274,7 +274,7 @@ static int ecryptfs_release(struct inode *inode, struct file *file)
274} 274}
275 275
276static int 276static int
277ecryptfs_fsync(struct file *file, struct dentry *dentry, int datasync) 277ecryptfs_fsync(struct file *file, int datasync)
278{ 278{
279 return vfs_fsync(ecryptfs_file_to_lower(file), datasync); 279 return vfs_fsync(ecryptfs_file_to_lower(file), datasync);
280} 280}
diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
index 65dee2f336ae..31ef5252f0fe 100644
--- a/fs/ecryptfs/inode.c
+++ b/fs/ecryptfs/inode.c
@@ -805,7 +805,7 @@ static int truncate_upper(struct dentry *dentry, struct iattr *ia,
805 - (ia->ia_size & ~PAGE_CACHE_MASK)); 805 - (ia->ia_size & ~PAGE_CACHE_MASK));
806 806
807 if (!(crypt_stat->flags & ECRYPTFS_ENCRYPTED)) { 807 if (!(crypt_stat->flags & ECRYPTFS_ENCRYPTED)) {
808 rc = vmtruncate(inode, ia->ia_size); 808 rc = simple_setsize(inode, ia->ia_size);
809 if (rc) 809 if (rc)
810 goto out; 810 goto out;
811 lower_ia->ia_size = ia->ia_size; 811 lower_ia->ia_size = ia->ia_size;
@@ -830,7 +830,7 @@ static int truncate_upper(struct dentry *dentry, struct iattr *ia,
830 goto out; 830 goto out;
831 } 831 }
832 } 832 }
833 vmtruncate(inode, ia->ia_size); 833 simple_setsize(inode, ia->ia_size);
834 rc = ecryptfs_write_inode_size_to_metadata(inode); 834 rc = ecryptfs_write_inode_size_to_metadata(inode);
835 if (rc) { 835 if (rc) {
836 printk(KERN_ERR "Problem with " 836 printk(KERN_ERR "Problem with "
diff --git a/fs/exec.c b/fs/exec.c
index 9badbc0bfb1d..e19de6a80339 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -768,7 +768,6 @@ static int de_thread(struct task_struct *tsk)
768 struct signal_struct *sig = tsk->signal; 768 struct signal_struct *sig = tsk->signal;
769 struct sighand_struct *oldsighand = tsk->sighand; 769 struct sighand_struct *oldsighand = tsk->sighand;
770 spinlock_t *lock = &oldsighand->siglock; 770 spinlock_t *lock = &oldsighand->siglock;
771 int count;
772 771
773 if (thread_group_empty(tsk)) 772 if (thread_group_empty(tsk))
774 goto no_thread_group; 773 goto no_thread_group;
@@ -785,13 +784,13 @@ static int de_thread(struct task_struct *tsk)
785 spin_unlock_irq(lock); 784 spin_unlock_irq(lock);
786 return -EAGAIN; 785 return -EAGAIN;
787 } 786 }
787
788 sig->group_exit_task = tsk; 788 sig->group_exit_task = tsk;
789 zap_other_threads(tsk); 789 sig->notify_count = zap_other_threads(tsk);
790 if (!thread_group_leader(tsk))
791 sig->notify_count--;
790 792
791 /* Account for the thread group leader hanging around: */ 793 while (sig->notify_count) {
792 count = thread_group_leader(tsk) ? 1 : 2;
793 sig->notify_count = count;
794 while (atomic_read(&sig->count) > count) {
795 __set_current_state(TASK_UNINTERRUPTIBLE); 794 __set_current_state(TASK_UNINTERRUPTIBLE);
796 spin_unlock_irq(lock); 795 spin_unlock_irq(lock);
797 schedule(); 796 schedule();
@@ -1662,12 +1661,15 @@ static int coredump_wait(int exit_code, struct core_state *core_state)
1662 struct task_struct *tsk = current; 1661 struct task_struct *tsk = current;
1663 struct mm_struct *mm = tsk->mm; 1662 struct mm_struct *mm = tsk->mm;
1664 struct completion *vfork_done; 1663 struct completion *vfork_done;
1665 int core_waiters; 1664 int core_waiters = -EBUSY;
1666 1665
1667 init_completion(&core_state->startup); 1666 init_completion(&core_state->startup);
1668 core_state->dumper.task = tsk; 1667 core_state->dumper.task = tsk;
1669 core_state->dumper.next = NULL; 1668 core_state->dumper.next = NULL;
1670 core_waiters = zap_threads(tsk, mm, core_state, exit_code); 1669
1670 down_write(&mm->mmap_sem);
1671 if (!mm->core_state)
1672 core_waiters = zap_threads(tsk, mm, core_state, exit_code);
1671 up_write(&mm->mmap_sem); 1673 up_write(&mm->mmap_sem);
1672 1674
1673 if (unlikely(core_waiters < 0)) 1675 if (unlikely(core_waiters < 0))
@@ -1787,21 +1789,61 @@ static void wait_for_dump_helpers(struct file *file)
1787} 1789}
1788 1790
1789 1791
1792/*
1793 * uhm_pipe_setup
1794 * helper function to customize the process used
1795 * to collect the core in userspace. Specifically
1796 * it sets up a pipe and installs it as fd 0 (stdin)
1797 * for the process. Returns 0 on success, or
1798 * PTR_ERR on failure.
1799 * Note that it also sets the core limit to 1. This
1800 * is a special value that we use to trap recursive
1801 * core dumps
1802 */
1803static int umh_pipe_setup(struct subprocess_info *info)
1804{
1805 struct file *rp, *wp;
1806 struct fdtable *fdt;
1807 struct coredump_params *cp = (struct coredump_params *)info->data;
1808 struct files_struct *cf = current->files;
1809
1810 wp = create_write_pipe(0);
1811 if (IS_ERR(wp))
1812 return PTR_ERR(wp);
1813
1814 rp = create_read_pipe(wp, 0);
1815 if (IS_ERR(rp)) {
1816 free_write_pipe(wp);
1817 return PTR_ERR(rp);
1818 }
1819
1820 cp->file = wp;
1821
1822 sys_close(0);
1823 fd_install(0, rp);
1824 spin_lock(&cf->file_lock);
1825 fdt = files_fdtable(cf);
1826 FD_SET(0, fdt->open_fds);
1827 FD_CLR(0, fdt->close_on_exec);
1828 spin_unlock(&cf->file_lock);
1829
1830 /* and disallow core files too */
1831 current->signal->rlim[RLIMIT_CORE] = (struct rlimit){1, 1};
1832
1833 return 0;
1834}
1835
1790void do_coredump(long signr, int exit_code, struct pt_regs *regs) 1836void do_coredump(long signr, int exit_code, struct pt_regs *regs)
1791{ 1837{
1792 struct core_state core_state; 1838 struct core_state core_state;
1793 char corename[CORENAME_MAX_SIZE + 1]; 1839 char corename[CORENAME_MAX_SIZE + 1];
1794 struct mm_struct *mm = current->mm; 1840 struct mm_struct *mm = current->mm;
1795 struct linux_binfmt * binfmt; 1841 struct linux_binfmt * binfmt;
1796 struct inode * inode;
1797 const struct cred *old_cred; 1842 const struct cred *old_cred;
1798 struct cred *cred; 1843 struct cred *cred;
1799 int retval = 0; 1844 int retval = 0;
1800 int flag = 0; 1845 int flag = 0;
1801 int ispipe = 0; 1846 int ispipe;
1802 char **helper_argv = NULL;
1803 int helper_argc = 0;
1804 int dump_count = 0;
1805 static atomic_t core_dump_count = ATOMIC_INIT(0); 1847 static atomic_t core_dump_count = ATOMIC_INIT(0);
1806 struct coredump_params cprm = { 1848 struct coredump_params cprm = {
1807 .signr = signr, 1849 .signr = signr,
@@ -1820,23 +1862,12 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
1820 binfmt = mm->binfmt; 1862 binfmt = mm->binfmt;
1821 if (!binfmt || !binfmt->core_dump) 1863 if (!binfmt || !binfmt->core_dump)
1822 goto fail; 1864 goto fail;
1823 1865 if (!__get_dumpable(cprm.mm_flags))
1824 cred = prepare_creds();
1825 if (!cred) {
1826 retval = -ENOMEM;
1827 goto fail; 1866 goto fail;
1828 }
1829 1867
1830 down_write(&mm->mmap_sem); 1868 cred = prepare_creds();
1831 /* 1869 if (!cred)
1832 * If another thread got here first, or we are not dumpable, bail out.
1833 */
1834 if (mm->core_state || !__get_dumpable(cprm.mm_flags)) {
1835 up_write(&mm->mmap_sem);
1836 put_cred(cred);
1837 goto fail; 1870 goto fail;
1838 }
1839
1840 /* 1871 /*
1841 * We cannot trust fsuid as being the "true" uid of the 1872 * We cannot trust fsuid as being the "true" uid of the
1842 * process nor do we know its entire history. We only know it 1873 * process nor do we know its entire history. We only know it
@@ -1849,10 +1880,8 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
1849 } 1880 }
1850 1881
1851 retval = coredump_wait(exit_code, &core_state); 1882 retval = coredump_wait(exit_code, &core_state);
1852 if (retval < 0) { 1883 if (retval < 0)
1853 put_cred(cred); 1884 goto fail_creds;
1854 goto fail;
1855 }
1856 1885
1857 old_cred = override_creds(cred); 1886 old_cred = override_creds(cred);
1858 1887
@@ -1870,19 +1899,19 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
1870 ispipe = format_corename(corename, signr); 1899 ispipe = format_corename(corename, signr);
1871 unlock_kernel(); 1900 unlock_kernel();
1872 1901
1873 if ((!ispipe) && (cprm.limit < binfmt->min_coredump))
1874 goto fail_unlock;
1875
1876 if (ispipe) { 1902 if (ispipe) {
1877 if (cprm.limit == 0) { 1903 int dump_count;
1904 char **helper_argv;
1905
1906 if (cprm.limit == 1) {
1878 /* 1907 /*
1879 * Normally core limits are irrelevant to pipes, since 1908 * Normally core limits are irrelevant to pipes, since
1880 * we're not writing to the file system, but we use 1909 * we're not writing to the file system, but we use
1881 * cprm.limit of 0 here as a speacial value. Any 1910 * cprm.limit of 1 here as a speacial value. Any
1882 * non-zero limit gets set to RLIM_INFINITY below, but 1911 * non-1 limit gets set to RLIM_INFINITY below, but
1883 * a limit of 0 skips the dump. This is a consistent 1912 * a limit of 0 skips the dump. This is a consistent
1884 * way to catch recursive crashes. We can still crash 1913 * way to catch recursive crashes. We can still crash
1885 * if the core_pattern binary sets RLIM_CORE = !0 1914 * if the core_pattern binary sets RLIM_CORE = !1
1886 * but it runs as root, and can do lots of stupid things 1915 * but it runs as root, and can do lots of stupid things
1887 * Note that we use task_tgid_vnr here to grab the pid 1916 * Note that we use task_tgid_vnr here to grab the pid
1888 * of the process group leader. That way we get the 1917 * of the process group leader. That way we get the
@@ -1890,11 +1919,12 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
1890 * core_pattern process dies. 1919 * core_pattern process dies.
1891 */ 1920 */
1892 printk(KERN_WARNING 1921 printk(KERN_WARNING
1893 "Process %d(%s) has RLIMIT_CORE set to 0\n", 1922 "Process %d(%s) has RLIMIT_CORE set to 1\n",
1894 task_tgid_vnr(current), current->comm); 1923 task_tgid_vnr(current), current->comm);
1895 printk(KERN_WARNING "Aborting core\n"); 1924 printk(KERN_WARNING "Aborting core\n");
1896 goto fail_unlock; 1925 goto fail_unlock;
1897 } 1926 }
1927 cprm.limit = RLIM_INFINITY;
1898 1928
1899 dump_count = atomic_inc_return(&core_dump_count); 1929 dump_count = atomic_inc_return(&core_dump_count);
1900 if (core_pipe_limit && (core_pipe_limit < dump_count)) { 1930 if (core_pipe_limit && (core_pipe_limit < dump_count)) {
@@ -1904,71 +1934,74 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
1904 goto fail_dropcount; 1934 goto fail_dropcount;
1905 } 1935 }
1906 1936
1907 helper_argv = argv_split(GFP_KERNEL, corename+1, &helper_argc); 1937 helper_argv = argv_split(GFP_KERNEL, corename+1, NULL);
1908 if (!helper_argv) { 1938 if (!helper_argv) {
1909 printk(KERN_WARNING "%s failed to allocate memory\n", 1939 printk(KERN_WARNING "%s failed to allocate memory\n",
1910 __func__); 1940 __func__);
1911 goto fail_dropcount; 1941 goto fail_dropcount;
1912 } 1942 }
1913 1943
1914 cprm.limit = RLIM_INFINITY; 1944 retval = call_usermodehelper_fns(helper_argv[0], helper_argv,
1915 1945 NULL, UMH_WAIT_EXEC, umh_pipe_setup,
1916 /* SIGPIPE can happen, but it's just never processed */ 1946 NULL, &cprm);
1917 if (call_usermodehelper_pipe(helper_argv[0], helper_argv, NULL, 1947 argv_free(helper_argv);
1918 &cprm.file)) { 1948 if (retval) {
1919 printk(KERN_INFO "Core dump to %s pipe failed\n", 1949 printk(KERN_INFO "Core dump to %s pipe failed\n",
1920 corename); 1950 corename);
1921 goto fail_dropcount; 1951 goto close_fail;
1922 } 1952 }
1923 } else 1953 } else {
1954 struct inode *inode;
1955
1956 if (cprm.limit < binfmt->min_coredump)
1957 goto fail_unlock;
1958
1924 cprm.file = filp_open(corename, 1959 cprm.file = filp_open(corename,
1925 O_CREAT | 2 | O_NOFOLLOW | O_LARGEFILE | flag, 1960 O_CREAT | 2 | O_NOFOLLOW | O_LARGEFILE | flag,
1926 0600); 1961 0600);
1927 if (IS_ERR(cprm.file)) 1962 if (IS_ERR(cprm.file))
1928 goto fail_dropcount; 1963 goto fail_unlock;
1929 inode = cprm.file->f_path.dentry->d_inode;
1930 if (inode->i_nlink > 1)
1931 goto close_fail; /* multiple links - don't dump */
1932 if (!ispipe && d_unhashed(cprm.file->f_path.dentry))
1933 goto close_fail;
1934
1935 /* AK: actually i see no reason to not allow this for named pipes etc.,
1936 but keep the previous behaviour for now. */
1937 if (!ispipe && !S_ISREG(inode->i_mode))
1938 goto close_fail;
1939 /*
1940 * Dont allow local users get cute and trick others to coredump
1941 * into their pre-created files:
1942 * Note, this is not relevant for pipes
1943 */
1944 if (!ispipe && (inode->i_uid != current_fsuid()))
1945 goto close_fail;
1946 if (!cprm.file->f_op)
1947 goto close_fail;
1948 if (!cprm.file->f_op->write)
1949 goto close_fail;
1950 if (!ispipe &&
1951 do_truncate(cprm.file->f_path.dentry, 0, 0, cprm.file) != 0)
1952 goto close_fail;
1953 1964
1954 retval = binfmt->core_dump(&cprm); 1965 inode = cprm.file->f_path.dentry->d_inode;
1966 if (inode->i_nlink > 1)
1967 goto close_fail;
1968 if (d_unhashed(cprm.file->f_path.dentry))
1969 goto close_fail;
1970 /*
1971 * AK: actually i see no reason to not allow this for named
1972 * pipes etc, but keep the previous behaviour for now.
1973 */
1974 if (!S_ISREG(inode->i_mode))
1975 goto close_fail;
1976 /*
1977 * Dont allow local users get cute and trick others to coredump
1978 * into their pre-created files.
1979 */
1980 if (inode->i_uid != current_fsuid())
1981 goto close_fail;
1982 if (!cprm.file->f_op || !cprm.file->f_op->write)
1983 goto close_fail;
1984 if (do_truncate(cprm.file->f_path.dentry, 0, 0, cprm.file))
1985 goto close_fail;
1986 }
1955 1987
1988 retval = binfmt->core_dump(&cprm);
1956 if (retval) 1989 if (retval)
1957 current->signal->group_exit_code |= 0x80; 1990 current->signal->group_exit_code |= 0x80;
1958close_fail: 1991
1959 if (ispipe && core_pipe_limit) 1992 if (ispipe && core_pipe_limit)
1960 wait_for_dump_helpers(cprm.file); 1993 wait_for_dump_helpers(cprm.file);
1961 filp_close(cprm.file, NULL); 1994close_fail:
1995 if (cprm.file)
1996 filp_close(cprm.file, NULL);
1962fail_dropcount: 1997fail_dropcount:
1963 if (dump_count) 1998 if (ispipe)
1964 atomic_dec(&core_dump_count); 1999 atomic_dec(&core_dump_count);
1965fail_unlock: 2000fail_unlock:
1966 if (helper_argv) 2001 coredump_finish(mm);
1967 argv_free(helper_argv);
1968
1969 revert_creds(old_cred); 2002 revert_creds(old_cred);
2003fail_creds:
1970 put_cred(cred); 2004 put_cred(cred);
1971 coredump_finish(mm);
1972fail: 2005fail:
1973 return; 2006 return;
1974} 2007}
diff --git a/fs/exofs/file.c b/fs/exofs/file.c
index 839b9dc1e70f..fef6899be397 100644
--- a/fs/exofs/file.c
+++ b/fs/exofs/file.c
@@ -40,12 +40,11 @@ static int exofs_release_file(struct inode *inode, struct file *filp)
40 return 0; 40 return 0;
41} 41}
42 42
43static int exofs_file_fsync(struct file *filp, struct dentry *dentry, 43static int exofs_file_fsync(struct file *filp, int datasync)
44 int datasync)
45{ 44{
46 int ret; 45 int ret;
47 struct address_space *mapping = filp->f_mapping; 46 struct address_space *mapping = filp->f_mapping;
48 struct inode *inode = dentry->d_inode; 47 struct inode *inode = mapping->host;
49 struct super_block *sb; 48 struct super_block *sb;
50 49
51 ret = filemap_write_and_wait(mapping); 50 ret = filemap_write_and_wait(mapping);
@@ -66,7 +65,7 @@ static int exofs_file_fsync(struct file *filp, struct dentry *dentry,
66 65
67static int exofs_flush(struct file *file, fl_owner_t id) 66static int exofs_flush(struct file *file, fl_owner_t id)
68{ 67{
69 exofs_file_fsync(file, file->f_path.dentry, 1); 68 exofs_file_fsync(file, 1);
70 /* TODO: Flush the OSD target */ 69 /* TODO: Flush the OSD target */
71 return 0; 70 return 0;
72} 71}
diff --git a/fs/ext2/ext2.h b/fs/ext2/ext2.h
index 0b038e47ad2f..52b34f1d2738 100644
--- a/fs/ext2/ext2.h
+++ b/fs/ext2/ext2.h
@@ -122,7 +122,6 @@ extern int ext2_write_inode (struct inode *, struct writeback_control *);
122extern void ext2_delete_inode (struct inode *); 122extern void ext2_delete_inode (struct inode *);
123extern int ext2_sync_inode (struct inode *); 123extern int ext2_sync_inode (struct inode *);
124extern int ext2_get_block(struct inode *, sector_t, struct buffer_head *, int); 124extern int ext2_get_block(struct inode *, sector_t, struct buffer_head *, int);
125extern void ext2_truncate (struct inode *);
126extern int ext2_setattr (struct dentry *, struct iattr *); 125extern int ext2_setattr (struct dentry *, struct iattr *);
127extern void ext2_set_inode_flags(struct inode *inode); 126extern void ext2_set_inode_flags(struct inode *inode);
128extern void ext2_get_inode_flags(struct ext2_inode_info *); 127extern void ext2_get_inode_flags(struct ext2_inode_info *);
@@ -155,7 +154,7 @@ extern void ext2_write_super (struct super_block *);
155extern const struct file_operations ext2_dir_operations; 154extern const struct file_operations ext2_dir_operations;
156 155
157/* file.c */ 156/* file.c */
158extern int ext2_fsync(struct file *file, struct dentry *dentry, int datasync); 157extern int ext2_fsync(struct file *file, int datasync);
159extern const struct inode_operations ext2_file_inode_operations; 158extern const struct inode_operations ext2_file_inode_operations;
160extern const struct file_operations ext2_file_operations; 159extern const struct file_operations ext2_file_operations;
161extern const struct file_operations ext2_xip_file_operations; 160extern const struct file_operations ext2_xip_file_operations;
diff --git a/fs/ext2/file.c b/fs/ext2/file.c
index 5d198d0697fb..49eec9456c5b 100644
--- a/fs/ext2/file.c
+++ b/fs/ext2/file.c
@@ -40,13 +40,13 @@ static int ext2_release_file (struct inode * inode, struct file * filp)
40 return 0; 40 return 0;
41} 41}
42 42
43int ext2_fsync(struct file *file, struct dentry *dentry, int datasync) 43int ext2_fsync(struct file *file, int datasync)
44{ 44{
45 int ret; 45 int ret;
46 struct super_block *sb = dentry->d_inode->i_sb; 46 struct super_block *sb = file->f_mapping->host->i_sb;
47 struct address_space *mapping = sb->s_bdev->bd_inode->i_mapping; 47 struct address_space *mapping = sb->s_bdev->bd_inode->i_mapping;
48 48
49 ret = simple_fsync(file, dentry, datasync); 49 ret = generic_file_fsync(file, datasync);
50 if (ret == -EIO || test_and_clear_bit(AS_EIO, &mapping->flags)) { 50 if (ret == -EIO || test_and_clear_bit(AS_EIO, &mapping->flags)) {
51 /* We don't really know where the IO error happened... */ 51 /* We don't really know where the IO error happened... */
52 ext2_error(sb, __func__, 52 ext2_error(sb, __func__,
@@ -95,7 +95,6 @@ const struct file_operations ext2_xip_file_operations = {
95#endif 95#endif
96 96
97const struct inode_operations ext2_file_inode_operations = { 97const struct inode_operations ext2_file_inode_operations = {
98 .truncate = ext2_truncate,
99#ifdef CONFIG_EXT2_FS_XATTR 98#ifdef CONFIG_EXT2_FS_XATTR
100 .setxattr = generic_setxattr, 99 .setxattr = generic_setxattr,
101 .getxattr = generic_getxattr, 100 .getxattr = generic_getxattr,
diff --git a/fs/ext2/inode.c b/fs/ext2/inode.c
index 527c46d9bc1f..19214435b752 100644
--- a/fs/ext2/inode.c
+++ b/fs/ext2/inode.c
@@ -54,6 +54,18 @@ static inline int ext2_inode_is_fast_symlink(struct inode *inode)
54 inode->i_blocks - ea_blocks == 0); 54 inode->i_blocks - ea_blocks == 0);
55} 55}
56 56
57static void ext2_truncate_blocks(struct inode *inode, loff_t offset);
58
59static void ext2_write_failed(struct address_space *mapping, loff_t to)
60{
61 struct inode *inode = mapping->host;
62
63 if (to > inode->i_size) {
64 truncate_pagecache(inode, to, inode->i_size);
65 ext2_truncate_blocks(inode, inode->i_size);
66 }
67}
68
57/* 69/*
58 * Called at the last iput() if i_nlink is zero. 70 * Called at the last iput() if i_nlink is zero.
59 */ 71 */
@@ -71,7 +83,7 @@ void ext2_delete_inode (struct inode * inode)
71 83
72 inode->i_size = 0; 84 inode->i_size = 0;
73 if (inode->i_blocks) 85 if (inode->i_blocks)
74 ext2_truncate (inode); 86 ext2_truncate_blocks(inode, 0);
75 ext2_free_inode (inode); 87 ext2_free_inode (inode);
76 88
77 return; 89 return;
@@ -757,8 +769,8 @@ int __ext2_write_begin(struct file *file, struct address_space *mapping,
757 loff_t pos, unsigned len, unsigned flags, 769 loff_t pos, unsigned len, unsigned flags,
758 struct page **pagep, void **fsdata) 770 struct page **pagep, void **fsdata)
759{ 771{
760 return block_write_begin(file, mapping, pos, len, flags, pagep, fsdata, 772 return block_write_begin_newtrunc(file, mapping, pos, len, flags,
761 ext2_get_block); 773 pagep, fsdata, ext2_get_block);
762} 774}
763 775
764static int 776static int
@@ -766,8 +778,25 @@ ext2_write_begin(struct file *file, struct address_space *mapping,
766 loff_t pos, unsigned len, unsigned flags, 778 loff_t pos, unsigned len, unsigned flags,
767 struct page **pagep, void **fsdata) 779 struct page **pagep, void **fsdata)
768{ 780{
781 int ret;
782
769 *pagep = NULL; 783 *pagep = NULL;
770 return __ext2_write_begin(file, mapping, pos, len, flags, pagep,fsdata); 784 ret = __ext2_write_begin(file, mapping, pos, len, flags, pagep, fsdata);
785 if (ret < 0)
786 ext2_write_failed(mapping, pos + len);
787 return ret;
788}
789
790static int ext2_write_end(struct file *file, struct address_space *mapping,
791 loff_t pos, unsigned len, unsigned copied,
792 struct page *page, void *fsdata)
793{
794 int ret;
795
796 ret = generic_write_end(file, mapping, pos, len, copied, page, fsdata);
797 if (ret < len)
798 ext2_write_failed(mapping, pos + len);
799 return ret;
771} 800}
772 801
773static int 802static int
@@ -775,13 +804,18 @@ ext2_nobh_write_begin(struct file *file, struct address_space *mapping,
775 loff_t pos, unsigned len, unsigned flags, 804 loff_t pos, unsigned len, unsigned flags,
776 struct page **pagep, void **fsdata) 805 struct page **pagep, void **fsdata)
777{ 806{
807 int ret;
808
778 /* 809 /*
779 * Dir-in-pagecache still uses ext2_write_begin. Would have to rework 810 * Dir-in-pagecache still uses ext2_write_begin. Would have to rework
780 * directory handling code to pass around offsets rather than struct 811 * directory handling code to pass around offsets rather than struct
781 * pages in order to make this work easily. 812 * pages in order to make this work easily.
782 */ 813 */
783 return nobh_write_begin(file, mapping, pos, len, flags, pagep, fsdata, 814 ret = nobh_write_begin_newtrunc(file, mapping, pos, len, flags, pagep,
784 ext2_get_block); 815 fsdata, ext2_get_block);
816 if (ret < 0)
817 ext2_write_failed(mapping, pos + len);
818 return ret;
785} 819}
786 820
787static int ext2_nobh_writepage(struct page *page, 821static int ext2_nobh_writepage(struct page *page,
@@ -800,10 +834,15 @@ ext2_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
800 loff_t offset, unsigned long nr_segs) 834 loff_t offset, unsigned long nr_segs)
801{ 835{
802 struct file *file = iocb->ki_filp; 836 struct file *file = iocb->ki_filp;
803 struct inode *inode = file->f_mapping->host; 837 struct address_space *mapping = file->f_mapping;
804 838 struct inode *inode = mapping->host;
805 return blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov, 839 ssize_t ret;
806 offset, nr_segs, ext2_get_block, NULL); 840
841 ret = blockdev_direct_IO_newtrunc(rw, iocb, inode, inode->i_sb->s_bdev,
842 iov, offset, nr_segs, ext2_get_block, NULL);
843 if (ret < 0 && (rw & WRITE))
844 ext2_write_failed(mapping, offset + iov_length(iov, nr_segs));
845 return ret;
807} 846}
808 847
809static int 848static int
@@ -818,7 +857,7 @@ const struct address_space_operations ext2_aops = {
818 .writepage = ext2_writepage, 857 .writepage = ext2_writepage,
819 .sync_page = block_sync_page, 858 .sync_page = block_sync_page,
820 .write_begin = ext2_write_begin, 859 .write_begin = ext2_write_begin,
821 .write_end = generic_write_end, 860 .write_end = ext2_write_end,
822 .bmap = ext2_bmap, 861 .bmap = ext2_bmap,
823 .direct_IO = ext2_direct_IO, 862 .direct_IO = ext2_direct_IO,
824 .writepages = ext2_writepages, 863 .writepages = ext2_writepages,
@@ -1027,7 +1066,7 @@ static void ext2_free_branches(struct inode *inode, __le32 *p, __le32 *q, int de
1027 ext2_free_data(inode, p, q); 1066 ext2_free_data(inode, p, q);
1028} 1067}
1029 1068
1030void ext2_truncate(struct inode *inode) 1069static void __ext2_truncate_blocks(struct inode *inode, loff_t offset)
1031{ 1070{
1032 __le32 *i_data = EXT2_I(inode)->i_data; 1071 __le32 *i_data = EXT2_I(inode)->i_data;
1033 struct ext2_inode_info *ei = EXT2_I(inode); 1072 struct ext2_inode_info *ei = EXT2_I(inode);
@@ -1039,27 +1078,8 @@ void ext2_truncate(struct inode *inode)
1039 int n; 1078 int n;
1040 long iblock; 1079 long iblock;
1041 unsigned blocksize; 1080 unsigned blocksize;
1042
1043 if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
1044 S_ISLNK(inode->i_mode)))
1045 return;
1046 if (ext2_inode_is_fast_symlink(inode))
1047 return;
1048 if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
1049 return;
1050
1051 blocksize = inode->i_sb->s_blocksize; 1081 blocksize = inode->i_sb->s_blocksize;
1052 iblock = (inode->i_size + blocksize-1) 1082 iblock = (offset + blocksize-1) >> EXT2_BLOCK_SIZE_BITS(inode->i_sb);
1053 >> EXT2_BLOCK_SIZE_BITS(inode->i_sb);
1054
1055 if (mapping_is_xip(inode->i_mapping))
1056 xip_truncate_page(inode->i_mapping, inode->i_size);
1057 else if (test_opt(inode->i_sb, NOBH))
1058 nobh_truncate_page(inode->i_mapping,
1059 inode->i_size, ext2_get_block);
1060 else
1061 block_truncate_page(inode->i_mapping,
1062 inode->i_size, ext2_get_block);
1063 1083
1064 n = ext2_block_to_path(inode, iblock, offsets, NULL); 1084 n = ext2_block_to_path(inode, iblock, offsets, NULL);
1065 if (n == 0) 1085 if (n == 0)
@@ -1127,6 +1147,62 @@ do_indirects:
1127 ext2_discard_reservation(inode); 1147 ext2_discard_reservation(inode);
1128 1148
1129 mutex_unlock(&ei->truncate_mutex); 1149 mutex_unlock(&ei->truncate_mutex);
1150}
1151
1152static void ext2_truncate_blocks(struct inode *inode, loff_t offset)
1153{
1154 /*
1155 * XXX: it seems like a bug here that we don't allow
1156 * IS_APPEND inode to have blocks-past-i_size trimmed off.
1157 * review and fix this.
1158 *
1159 * Also would be nice to be able to handle IO errors and such,
1160 * but that's probably too much to ask.
1161 */
1162 if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
1163 S_ISLNK(inode->i_mode)))
1164 return;
1165 if (ext2_inode_is_fast_symlink(inode))
1166 return;
1167 if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
1168 return;
1169 __ext2_truncate_blocks(inode, offset);
1170}
1171
1172int ext2_setsize(struct inode *inode, loff_t newsize)
1173{
1174 loff_t oldsize;
1175 int error;
1176
1177 error = inode_newsize_ok(inode, newsize);
1178 if (error)
1179 return error;
1180
1181 if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
1182 S_ISLNK(inode->i_mode)))
1183 return -EINVAL;
1184 if (ext2_inode_is_fast_symlink(inode))
1185 return -EINVAL;
1186 if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
1187 return -EPERM;
1188
1189 if (mapping_is_xip(inode->i_mapping))
1190 error = xip_truncate_page(inode->i_mapping, newsize);
1191 else if (test_opt(inode->i_sb, NOBH))
1192 error = nobh_truncate_page(inode->i_mapping,
1193 newsize, ext2_get_block);
1194 else
1195 error = block_truncate_page(inode->i_mapping,
1196 newsize, ext2_get_block);
1197 if (error)
1198 return error;
1199
1200 oldsize = inode->i_size;
1201 i_size_write(inode, newsize);
1202 truncate_pagecache(inode, oldsize, newsize);
1203
1204 __ext2_truncate_blocks(inode, newsize);
1205
1130 inode->i_mtime = inode->i_ctime = CURRENT_TIME_SEC; 1206 inode->i_mtime = inode->i_ctime = CURRENT_TIME_SEC;
1131 if (inode_needs_sync(inode)) { 1207 if (inode_needs_sync(inode)) {
1132 sync_mapping_buffers(inode->i_mapping); 1208 sync_mapping_buffers(inode->i_mapping);
@@ -1134,6 +1210,8 @@ do_indirects:
1134 } else { 1210 } else {
1135 mark_inode_dirty(inode); 1211 mark_inode_dirty(inode);
1136 } 1212 }
1213
1214 return 0;
1137} 1215}
1138 1216
1139static struct ext2_inode *ext2_get_inode(struct super_block *sb, ino_t ino, 1217static struct ext2_inode *ext2_get_inode(struct super_block *sb, ino_t ino,
@@ -1474,8 +1552,15 @@ int ext2_setattr(struct dentry *dentry, struct iattr *iattr)
1474 if (error) 1552 if (error)
1475 return error; 1553 return error;
1476 } 1554 }
1477 error = inode_setattr(inode, iattr); 1555 if (iattr->ia_valid & ATTR_SIZE) {
1478 if (!error && (iattr->ia_valid & ATTR_MODE)) 1556 error = ext2_setsize(inode, iattr->ia_size);
1557 if (error)
1558 return error;
1559 }
1560 generic_setattr(inode, iattr);
1561 if (iattr->ia_valid & ATTR_MODE)
1479 error = ext2_acl_chmod(inode); 1562 error = ext2_acl_chmod(inode);
1563 mark_inode_dirty(inode);
1564
1480 return error; 1565 return error;
1481} 1566}
diff --git a/fs/ext2/super.c b/fs/ext2/super.c
index 71e9eb1fa696..7ff43f4a59cd 100644
--- a/fs/ext2/super.c
+++ b/fs/ext2/super.c
@@ -119,6 +119,8 @@ static void ext2_put_super (struct super_block * sb)
119 int i; 119 int i;
120 struct ext2_sb_info *sbi = EXT2_SB(sb); 120 struct ext2_sb_info *sbi = EXT2_SB(sb);
121 121
122 dquot_disable(sb, -1, DQUOT_USAGE_ENABLED | DQUOT_LIMITS_ENABLED);
123
122 if (sb->s_dirt) 124 if (sb->s_dirt)
123 ext2_write_super(sb); 125 ext2_write_super(sb);
124 126
@@ -1063,6 +1065,12 @@ static int ext2_fill_super(struct super_block *sb, void *data, int silent)
1063 sb->s_op = &ext2_sops; 1065 sb->s_op = &ext2_sops;
1064 sb->s_export_op = &ext2_export_ops; 1066 sb->s_export_op = &ext2_export_ops;
1065 sb->s_xattr = ext2_xattr_handlers; 1067 sb->s_xattr = ext2_xattr_handlers;
1068
1069#ifdef CONFIG_QUOTA
1070 sb->dq_op = &dquot_operations;
1071 sb->s_qcop = &dquot_quotactl_ops;
1072#endif
1073
1066 root = ext2_iget(sb, EXT2_ROOT_INO); 1074 root = ext2_iget(sb, EXT2_ROOT_INO);
1067 if (IS_ERR(root)) { 1075 if (IS_ERR(root)) {
1068 ret = PTR_ERR(root); 1076 ret = PTR_ERR(root);
@@ -1241,6 +1249,7 @@ static int ext2_remount (struct super_block * sb, int * flags, char * data)
1241 spin_unlock(&sbi->s_lock); 1249 spin_unlock(&sbi->s_lock);
1242 return 0; 1250 return 0;
1243 } 1251 }
1252
1244 /* 1253 /*
1245 * OK, we are remounting a valid rw partition rdonly, so set 1254 * OK, we are remounting a valid rw partition rdonly, so set
1246 * the rdonly flag and then mark the partition as valid again. 1255 * the rdonly flag and then mark the partition as valid again.
@@ -1248,6 +1257,13 @@ static int ext2_remount (struct super_block * sb, int * flags, char * data)
1248 es->s_state = cpu_to_le16(sbi->s_mount_state); 1257 es->s_state = cpu_to_le16(sbi->s_mount_state);
1249 es->s_mtime = cpu_to_le32(get_seconds()); 1258 es->s_mtime = cpu_to_le32(get_seconds());
1250 spin_unlock(&sbi->s_lock); 1259 spin_unlock(&sbi->s_lock);
1260
1261 err = dquot_suspend(sb, -1);
1262 if (err < 0) {
1263 spin_lock(&sbi->s_lock);
1264 goto restore_opts;
1265 }
1266
1251 ext2_sync_super(sb, es, 1); 1267 ext2_sync_super(sb, es, 1);
1252 } else { 1268 } else {
1253 __le32 ret = EXT2_HAS_RO_COMPAT_FEATURE(sb, 1269 __le32 ret = EXT2_HAS_RO_COMPAT_FEATURE(sb,
@@ -1269,8 +1285,12 @@ static int ext2_remount (struct super_block * sb, int * flags, char * data)
1269 if (!ext2_setup_super (sb, es, 0)) 1285 if (!ext2_setup_super (sb, es, 0))
1270 sb->s_flags &= ~MS_RDONLY; 1286 sb->s_flags &= ~MS_RDONLY;
1271 spin_unlock(&sbi->s_lock); 1287 spin_unlock(&sbi->s_lock);
1288
1272 ext2_write_super(sb); 1289 ext2_write_super(sb);
1290
1291 dquot_resume(sb, -1);
1273 } 1292 }
1293
1274 return 0; 1294 return 0;
1275restore_opts: 1295restore_opts:
1276 sbi->s_mount_opt = old_opts.s_mount_opt; 1296 sbi->s_mount_opt = old_opts.s_mount_opt;
diff --git a/fs/ext3/dir.c b/fs/ext3/dir.c
index 373fa90c796a..e2e72c367cf6 100644
--- a/fs/ext3/dir.c
+++ b/fs/ext3/dir.c
@@ -297,7 +297,7 @@ static void free_rb_tree_fname(struct rb_root *root)
297 kfree (old); 297 kfree (old);
298 } 298 }
299 if (!parent) 299 if (!parent)
300 root->rb_node = NULL; 300 *root = RB_ROOT;
301 else if (parent->rb_left == n) 301 else if (parent->rb_left == n)
302 parent->rb_left = NULL; 302 parent->rb_left = NULL;
303 else if (parent->rb_right == n) 303 else if (parent->rb_right == n)
diff --git a/fs/ext3/fsync.c b/fs/ext3/fsync.c
index fcf7487734b6..d7e9f74dc3a6 100644
--- a/fs/ext3/fsync.c
+++ b/fs/ext3/fsync.c
@@ -43,9 +43,9 @@
43 * inode to disk. 43 * inode to disk.
44 */ 44 */
45 45
46int ext3_sync_file(struct file * file, struct dentry *dentry, int datasync) 46int ext3_sync_file(struct file *file, int datasync)
47{ 47{
48 struct inode *inode = dentry->d_inode; 48 struct inode *inode = file->f_mapping->host;
49 struct ext3_inode_info *ei = EXT3_I(inode); 49 struct ext3_inode_info *ei = EXT3_I(inode);
50 journal_t *journal = EXT3_SB(inode->i_sb)->s_journal; 50 journal_t *journal = EXT3_SB(inode->i_sb)->s_journal;
51 int ret, needs_barrier = 0; 51 int ret, needs_barrier = 0;
diff --git a/fs/ext3/super.c b/fs/ext3/super.c
index 0fc1293d0e96..6c953bb255e7 100644
--- a/fs/ext3/super.c
+++ b/fs/ext3/super.c
@@ -410,6 +410,8 @@ static void ext3_put_super (struct super_block * sb)
410 struct ext3_super_block *es = sbi->s_es; 410 struct ext3_super_block *es = sbi->s_es;
411 int i, err; 411 int i, err;
412 412
413 dquot_disable(sb, -1, DQUOT_USAGE_ENABLED | DQUOT_LIMITS_ENABLED);
414
413 lock_kernel(); 415 lock_kernel();
414 416
415 ext3_xattr_put_super(sb); 417 ext3_xattr_put_super(sb);
@@ -748,7 +750,7 @@ static int ext3_release_dquot(struct dquot *dquot);
748static int ext3_mark_dquot_dirty(struct dquot *dquot); 750static int ext3_mark_dquot_dirty(struct dquot *dquot);
749static int ext3_write_info(struct super_block *sb, int type); 751static int ext3_write_info(struct super_block *sb, int type);
750static int ext3_quota_on(struct super_block *sb, int type, int format_id, 752static int ext3_quota_on(struct super_block *sb, int type, int format_id,
751 char *path, int remount); 753 char *path);
752static int ext3_quota_on_mount(struct super_block *sb, int type); 754static int ext3_quota_on_mount(struct super_block *sb, int type);
753static ssize_t ext3_quota_read(struct super_block *sb, int type, char *data, 755static ssize_t ext3_quota_read(struct super_block *sb, int type, char *data,
754 size_t len, loff_t off); 756 size_t len, loff_t off);
@@ -767,12 +769,12 @@ static const struct dquot_operations ext3_quota_operations = {
767 769
768static const struct quotactl_ops ext3_qctl_operations = { 770static const struct quotactl_ops ext3_qctl_operations = {
769 .quota_on = ext3_quota_on, 771 .quota_on = ext3_quota_on,
770 .quota_off = vfs_quota_off, 772 .quota_off = dquot_quota_off,
771 .quota_sync = vfs_quota_sync, 773 .quota_sync = dquot_quota_sync,
772 .get_info = vfs_get_dqinfo, 774 .get_info = dquot_get_dqinfo,
773 .set_info = vfs_set_dqinfo, 775 .set_info = dquot_set_dqinfo,
774 .get_dqblk = vfs_get_dqblk, 776 .get_dqblk = dquot_get_dqblk,
775 .set_dqblk = vfs_set_dqblk 777 .set_dqblk = dquot_set_dqblk
776}; 778};
777#endif 779#endif
778 780
@@ -1527,7 +1529,7 @@ static void ext3_orphan_cleanup (struct super_block * sb,
1527 /* Turn quotas off */ 1529 /* Turn quotas off */
1528 for (i = 0; i < MAXQUOTAS; i++) { 1530 for (i = 0; i < MAXQUOTAS; i++) {
1529 if (sb_dqopt(sb)->files[i]) 1531 if (sb_dqopt(sb)->files[i])
1530 vfs_quota_off(sb, i, 0); 1532 dquot_quota_off(sb, i);
1531 } 1533 }
1532#endif 1534#endif
1533 sb->s_flags = s_flags; /* Restore MS_RDONLY status */ 1535 sb->s_flags = s_flags; /* Restore MS_RDONLY status */
@@ -2551,6 +2553,7 @@ static int ext3_remount (struct super_block * sb, int * flags, char * data)
2551 ext3_fsblk_t n_blocks_count = 0; 2553 ext3_fsblk_t n_blocks_count = 0;
2552 unsigned long old_sb_flags; 2554 unsigned long old_sb_flags;
2553 struct ext3_mount_options old_opts; 2555 struct ext3_mount_options old_opts;
2556 int enable_quota = 0;
2554 int err; 2557 int err;
2555#ifdef CONFIG_QUOTA 2558#ifdef CONFIG_QUOTA
2556 int i; 2559 int i;
@@ -2597,6 +2600,10 @@ static int ext3_remount (struct super_block * sb, int * flags, char * data)
2597 } 2600 }
2598 2601
2599 if (*flags & MS_RDONLY) { 2602 if (*flags & MS_RDONLY) {
2603 err = dquot_suspend(sb, -1);
2604 if (err < 0)
2605 goto restore_opts;
2606
2600 /* 2607 /*
2601 * First of all, the unconditional stuff we have to do 2608 * First of all, the unconditional stuff we have to do
2602 * to disable replay of the journal when we next remount 2609 * to disable replay of the journal when we next remount
@@ -2651,6 +2658,7 @@ static int ext3_remount (struct super_block * sb, int * flags, char * data)
2651 goto restore_opts; 2658 goto restore_opts;
2652 if (!ext3_setup_super (sb, es, 0)) 2659 if (!ext3_setup_super (sb, es, 0))
2653 sb->s_flags &= ~MS_RDONLY; 2660 sb->s_flags &= ~MS_RDONLY;
2661 enable_quota = 1;
2654 } 2662 }
2655 } 2663 }
2656#ifdef CONFIG_QUOTA 2664#ifdef CONFIG_QUOTA
@@ -2662,6 +2670,9 @@ static int ext3_remount (struct super_block * sb, int * flags, char * data)
2662#endif 2670#endif
2663 unlock_super(sb); 2671 unlock_super(sb);
2664 unlock_kernel(); 2672 unlock_kernel();
2673
2674 if (enable_quota)
2675 dquot_resume(sb, -1);
2665 return 0; 2676 return 0;
2666restore_opts: 2677restore_opts:
2667 sb->s_flags = old_sb_flags; 2678 sb->s_flags = old_sb_flags;
@@ -2851,24 +2862,21 @@ static int ext3_write_info(struct super_block *sb, int type)
2851 */ 2862 */
2852static int ext3_quota_on_mount(struct super_block *sb, int type) 2863static int ext3_quota_on_mount(struct super_block *sb, int type)
2853{ 2864{
2854 return vfs_quota_on_mount(sb, EXT3_SB(sb)->s_qf_names[type], 2865 return dquot_quota_on_mount(sb, EXT3_SB(sb)->s_qf_names[type],
2855 EXT3_SB(sb)->s_jquota_fmt, type); 2866 EXT3_SB(sb)->s_jquota_fmt, type);
2856} 2867}
2857 2868
2858/* 2869/*
2859 * Standard function to be called on quota_on 2870 * Standard function to be called on quota_on
2860 */ 2871 */
2861static int ext3_quota_on(struct super_block *sb, int type, int format_id, 2872static int ext3_quota_on(struct super_block *sb, int type, int format_id,
2862 char *name, int remount) 2873 char *name)
2863{ 2874{
2864 int err; 2875 int err;
2865 struct path path; 2876 struct path path;
2866 2877
2867 if (!test_opt(sb, QUOTA)) 2878 if (!test_opt(sb, QUOTA))
2868 return -EINVAL; 2879 return -EINVAL;
2869 /* When remounting, no checks are needed and in fact, name is NULL */
2870 if (remount)
2871 return vfs_quota_on(sb, type, format_id, name, remount);
2872 2880
2873 err = kern_path(name, LOOKUP_FOLLOW, &path); 2881 err = kern_path(name, LOOKUP_FOLLOW, &path);
2874 if (err) 2882 if (err)
@@ -2906,7 +2914,7 @@ static int ext3_quota_on(struct super_block *sb, int type, int format_id,
2906 } 2914 }
2907 } 2915 }
2908 2916
2909 err = vfs_quota_on_path(sb, type, format_id, &path); 2917 err = dquot_quota_on_path(sb, type, format_id, &path);
2910 path_put(&path); 2918 path_put(&path);
2911 return err; 2919 return err;
2912} 2920}
diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
index d2f37a5516c7..95b7594c76f9 100644
--- a/fs/ext4/balloc.c
+++ b/fs/ext4/balloc.c
@@ -591,14 +591,15 @@ ext4_fsblk_t ext4_new_meta_blocks(handle_t *handle, struct inode *inode,
591 ret = ext4_mb_new_blocks(handle, &ar, errp); 591 ret = ext4_mb_new_blocks(handle, &ar, errp);
592 if (count) 592 if (count)
593 *count = ar.len; 593 *count = ar.len;
594
595 /* 594 /*
596 * Account for the allocated meta blocks 595 * Account for the allocated meta blocks. We will never
596 * fail EDQUOT for metdata, but we do account for it.
597 */ 597 */
598 if (!(*errp) && EXT4_I(inode)->i_delalloc_reserved_flag) { 598 if (!(*errp) && EXT4_I(inode)->i_delalloc_reserved_flag) {
599 spin_lock(&EXT4_I(inode)->i_block_reservation_lock); 599 spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
600 EXT4_I(inode)->i_allocated_meta_blocks += ar.len; 600 EXT4_I(inode)->i_allocated_meta_blocks += ar.len;
601 spin_unlock(&EXT4_I(inode)->i_block_reservation_lock); 601 spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
602 dquot_alloc_block_nofail(inode, ar.len);
602 } 603 }
603 return ret; 604 return ret;
604} 605}
diff --git a/fs/ext4/block_validity.c b/fs/ext4/block_validity.c
index 538c48655084..5b6973fbf1bd 100644
--- a/fs/ext4/block_validity.c
+++ b/fs/ext4/block_validity.c
@@ -72,9 +72,9 @@ static int add_system_zone(struct ext4_sb_info *sbi,
72 else if (start_blk >= (entry->start_blk + entry->count)) 72 else if (start_blk >= (entry->start_blk + entry->count))
73 n = &(*n)->rb_right; 73 n = &(*n)->rb_right;
74 else { 74 else {
75 if (start_blk + count > (entry->start_blk + 75 if (start_blk + count > (entry->start_blk +
76 entry->count)) 76 entry->count))
77 entry->count = (start_blk + count - 77 entry->count = (start_blk + count -
78 entry->start_blk); 78 entry->start_blk);
79 new_node = *n; 79 new_node = *n;
80 new_entry = rb_entry(new_node, struct ext4_system_zone, 80 new_entry = rb_entry(new_node, struct ext4_system_zone,
diff --git a/fs/ext4/dir.c b/fs/ext4/dir.c
index 86cb6d86a048..ea5e6cb7e2a5 100644
--- a/fs/ext4/dir.c
+++ b/fs/ext4/dir.c
@@ -83,11 +83,10 @@ int ext4_check_dir_entry(const char *function, struct inode *dir,
83 error_msg = "inode out of bounds"; 83 error_msg = "inode out of bounds";
84 84
85 if (error_msg != NULL) 85 if (error_msg != NULL)
86 __ext4_error(dir->i_sb, function, 86 ext4_error_inode(function, dir,
87 "bad entry in directory #%lu: %s - block=%llu" 87 "bad entry in directory: %s - block=%llu"
88 "offset=%u(%u), inode=%u, rec_len=%d, name_len=%d", 88 "offset=%u(%u), inode=%u, rec_len=%d, name_len=%d",
89 dir->i_ino, error_msg, 89 error_msg, (unsigned long long) bh->b_blocknr,
90 (unsigned long long) bh->b_blocknr,
91 (unsigned) (offset%bh->b_size), offset, 90 (unsigned) (offset%bh->b_size), offset,
92 le32_to_cpu(de->inode), 91 le32_to_cpu(de->inode),
93 rlen, de->name_len); 92 rlen, de->name_len);
@@ -111,7 +110,7 @@ static int ext4_readdir(struct file *filp,
111 110
112 if (EXT4_HAS_COMPAT_FEATURE(inode->i_sb, 111 if (EXT4_HAS_COMPAT_FEATURE(inode->i_sb,
113 EXT4_FEATURE_COMPAT_DIR_INDEX) && 112 EXT4_FEATURE_COMPAT_DIR_INDEX) &&
114 ((EXT4_I(inode)->i_flags & EXT4_INDEX_FL) || 113 ((ext4_test_inode_flag(inode, EXT4_INODE_INDEX)) ||
115 ((inode->i_size >> sb->s_blocksize_bits) == 1))) { 114 ((inode->i_size >> sb->s_blocksize_bits) == 1))) {
116 err = ext4_dx_readdir(filp, dirent, filldir); 115 err = ext4_dx_readdir(filp, dirent, filldir);
117 if (err != ERR_BAD_DX_DIR) { 116 if (err != ERR_BAD_DX_DIR) {
@@ -122,20 +121,20 @@ static int ext4_readdir(struct file *filp,
122 * We don't set the inode dirty flag since it's not 121 * We don't set the inode dirty flag since it's not
123 * critical that it get flushed back to the disk. 122 * critical that it get flushed back to the disk.
124 */ 123 */
125 EXT4_I(filp->f_path.dentry->d_inode)->i_flags &= ~EXT4_INDEX_FL; 124 ext4_clear_inode_flag(filp->f_path.dentry->d_inode, EXT4_INODE_INDEX);
126 } 125 }
127 stored = 0; 126 stored = 0;
128 offset = filp->f_pos & (sb->s_blocksize - 1); 127 offset = filp->f_pos & (sb->s_blocksize - 1);
129 128
130 while (!error && !stored && filp->f_pos < inode->i_size) { 129 while (!error && !stored && filp->f_pos < inode->i_size) {
131 ext4_lblk_t blk = filp->f_pos >> EXT4_BLOCK_SIZE_BITS(sb); 130 struct ext4_map_blocks map;
132 struct buffer_head map_bh;
133 struct buffer_head *bh = NULL; 131 struct buffer_head *bh = NULL;
134 132
135 map_bh.b_state = 0; 133 map.m_lblk = filp->f_pos >> EXT4_BLOCK_SIZE_BITS(sb);
136 err = ext4_get_blocks(NULL, inode, blk, 1, &map_bh, 0); 134 map.m_len = 1;
135 err = ext4_map_blocks(NULL, inode, &map, 0);
137 if (err > 0) { 136 if (err > 0) {
138 pgoff_t index = map_bh.b_blocknr >> 137 pgoff_t index = map.m_pblk >>
139 (PAGE_CACHE_SHIFT - inode->i_blkbits); 138 (PAGE_CACHE_SHIFT - inode->i_blkbits);
140 if (!ra_has_index(&filp->f_ra, index)) 139 if (!ra_has_index(&filp->f_ra, index))
141 page_cache_sync_readahead( 140 page_cache_sync_readahead(
@@ -143,7 +142,7 @@ static int ext4_readdir(struct file *filp,
143 &filp->f_ra, filp, 142 &filp->f_ra, filp,
144 index, 1); 143 index, 1);
145 filp->f_ra.prev_pos = (loff_t)index << PAGE_CACHE_SHIFT; 144 filp->f_ra.prev_pos = (loff_t)index << PAGE_CACHE_SHIFT;
146 bh = ext4_bread(NULL, inode, blk, 0, &err); 145 bh = ext4_bread(NULL, inode, map.m_lblk, 0, &err);
147 } 146 }
148 147
149 /* 148 /*
@@ -152,9 +151,8 @@ static int ext4_readdir(struct file *filp,
152 */ 151 */
153 if (!bh) { 152 if (!bh) {
154 if (!dir_has_error) { 153 if (!dir_has_error) {
155 ext4_error(sb, "directory #%lu " 154 EXT4_ERROR_INODE(inode, "directory "
156 "contains a hole at offset %Lu", 155 "contains a hole at offset %Lu",
157 inode->i_ino,
158 (unsigned long long) filp->f_pos); 156 (unsigned long long) filp->f_pos);
159 dir_has_error = 1; 157 dir_has_error = 1;
160 } 158 }
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index bf938cf7c5f0..19a4de57128a 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -29,6 +29,9 @@
29#include <linux/wait.h> 29#include <linux/wait.h>
30#include <linux/blockgroup_lock.h> 30#include <linux/blockgroup_lock.h>
31#include <linux/percpu_counter.h> 31#include <linux/percpu_counter.h>
32#ifdef __KERNEL__
33#include <linux/compat.h>
34#endif
32 35
33/* 36/*
34 * The fourth extended filesystem constants/structures 37 * The fourth extended filesystem constants/structures
@@ -54,10 +57,10 @@
54#endif 57#endif
55 58
56#define EXT4_ERROR_INODE(inode, fmt, a...) \ 59#define EXT4_ERROR_INODE(inode, fmt, a...) \
57 ext4_error_inode(__func__, (inode), (fmt), ## a); 60 ext4_error_inode(__func__, (inode), (fmt), ## a)
58 61
59#define EXT4_ERROR_FILE(file, fmt, a...) \ 62#define EXT4_ERROR_FILE(file, fmt, a...) \
60 ext4_error_file(__func__, (file), (fmt), ## a); 63 ext4_error_file(__func__, (file), (fmt), ## a)
61 64
62/* data type for block offset of block group */ 65/* data type for block offset of block group */
63typedef int ext4_grpblk_t; 66typedef int ext4_grpblk_t;
@@ -72,7 +75,7 @@ typedef __u32 ext4_lblk_t;
72typedef unsigned int ext4_group_t; 75typedef unsigned int ext4_group_t;
73 76
74/* 77/*
75 * Flags used in mballoc's allocation_context flags field. 78 * Flags used in mballoc's allocation_context flags field.
76 * 79 *
77 * Also used to show what's going on for debugging purposes when the 80 * Also used to show what's going on for debugging purposes when the
78 * flag field is exported via the traceport interface 81 * flag field is exported via the traceport interface
@@ -126,6 +129,29 @@ struct ext4_allocation_request {
126}; 129};
127 130
128/* 131/*
132 * Logical to physical block mapping, used by ext4_map_blocks()
133 *
134 * This structure is used to pass requests into ext4_map_blocks() as
135 * well as to store the information returned by ext4_map_blocks(). It
136 * takes less room on the stack than a struct buffer_head.
137 */
138#define EXT4_MAP_NEW (1 << BH_New)
139#define EXT4_MAP_MAPPED (1 << BH_Mapped)
140#define EXT4_MAP_UNWRITTEN (1 << BH_Unwritten)
141#define EXT4_MAP_BOUNDARY (1 << BH_Boundary)
142#define EXT4_MAP_UNINIT (1 << BH_Uninit)
143#define EXT4_MAP_FLAGS (EXT4_MAP_NEW | EXT4_MAP_MAPPED |\
144 EXT4_MAP_UNWRITTEN | EXT4_MAP_BOUNDARY |\
145 EXT4_MAP_UNINIT)
146
147struct ext4_map_blocks {
148 ext4_fsblk_t m_pblk;
149 ext4_lblk_t m_lblk;
150 unsigned int m_len;
151 unsigned int m_flags;
152};
153
154/*
129 * For delayed allocation tracking 155 * For delayed allocation tracking
130 */ 156 */
131struct mpage_da_data { 157struct mpage_da_data {
@@ -321,6 +347,83 @@ static inline __u32 ext4_mask_flags(umode_t mode, __u32 flags)
321 return flags & EXT4_OTHER_FLMASK; 347 return flags & EXT4_OTHER_FLMASK;
322} 348}
323 349
350/*
351 * Inode flags used for atomic set/get
352 */
353enum {
354 EXT4_INODE_SECRM = 0, /* Secure deletion */
355 EXT4_INODE_UNRM = 1, /* Undelete */
356 EXT4_INODE_COMPR = 2, /* Compress file */
357 EXT4_INODE_SYNC = 3, /* Synchronous updates */
358 EXT4_INODE_IMMUTABLE = 4, /* Immutable file */
359 EXT4_INODE_APPEND = 5, /* writes to file may only append */
360 EXT4_INODE_NODUMP = 6, /* do not dump file */
361 EXT4_INODE_NOATIME = 7, /* do not update atime */
362/* Reserved for compression usage... */
363 EXT4_INODE_DIRTY = 8,
364 EXT4_INODE_COMPRBLK = 9, /* One or more compressed clusters */
365 EXT4_INODE_NOCOMPR = 10, /* Don't compress */
366 EXT4_INODE_ECOMPR = 11, /* Compression error */
367/* End compression flags --- maybe not all used */
368 EXT4_INODE_INDEX = 12, /* hash-indexed directory */
369 EXT4_INODE_IMAGIC = 13, /* AFS directory */
370 EXT4_INODE_JOURNAL_DATA = 14, /* file data should be journaled */
371 EXT4_INODE_NOTAIL = 15, /* file tail should not be merged */
372 EXT4_INODE_DIRSYNC = 16, /* dirsync behaviour (directories only) */
373 EXT4_INODE_TOPDIR = 17, /* Top of directory hierarchies*/
374 EXT4_INODE_HUGE_FILE = 18, /* Set to each huge file */
375 EXT4_INODE_EXTENTS = 19, /* Inode uses extents */
376 EXT4_INODE_EA_INODE = 21, /* Inode used for large EA */
377 EXT4_INODE_EOFBLOCKS = 22, /* Blocks allocated beyond EOF */
378 EXT4_INODE_RESERVED = 31, /* reserved for ext4 lib */
379};
380
381#define TEST_FLAG_VALUE(FLAG) (EXT4_##FLAG##_FL == (1 << EXT4_INODE_##FLAG))
382#define CHECK_FLAG_VALUE(FLAG) if (!TEST_FLAG_VALUE(FLAG)) { \
383 printk(KERN_EMERG "EXT4 flag fail: " #FLAG ": %d %d\n", \
384 EXT4_##FLAG##_FL, EXT4_INODE_##FLAG); BUG_ON(1); }
385
386/*
387 * Since it's pretty easy to mix up bit numbers and hex values, and we
388 * can't do a compile-time test for ENUM values, we use a run-time
389 * test to make sure that EXT4_XXX_FL is consistent with respect to
390 * EXT4_INODE_XXX. If all is well the printk and BUG_ON will all drop
391 * out so it won't cost any extra space in the compiled kernel image.
392 * But it's important that these values are the same, since we are
393 * using EXT4_INODE_XXX to test for the flag values, but EXT4_XX_FL
394 * must be consistent with the values of FS_XXX_FL defined in
395 * include/linux/fs.h and the on-disk values found in ext2, ext3, and
396 * ext4 filesystems, and of course the values defined in e2fsprogs.
397 *
398 * It's not paranoia if the Murphy's Law really *is* out to get you. :-)
399 */
400static inline void ext4_check_flag_values(void)
401{
402 CHECK_FLAG_VALUE(SECRM);
403 CHECK_FLAG_VALUE(UNRM);
404 CHECK_FLAG_VALUE(COMPR);
405 CHECK_FLAG_VALUE(SYNC);
406 CHECK_FLAG_VALUE(IMMUTABLE);
407 CHECK_FLAG_VALUE(APPEND);
408 CHECK_FLAG_VALUE(NODUMP);
409 CHECK_FLAG_VALUE(NOATIME);
410 CHECK_FLAG_VALUE(DIRTY);
411 CHECK_FLAG_VALUE(COMPRBLK);
412 CHECK_FLAG_VALUE(NOCOMPR);
413 CHECK_FLAG_VALUE(ECOMPR);
414 CHECK_FLAG_VALUE(INDEX);
415 CHECK_FLAG_VALUE(IMAGIC);
416 CHECK_FLAG_VALUE(JOURNAL_DATA);
417 CHECK_FLAG_VALUE(NOTAIL);
418 CHECK_FLAG_VALUE(DIRSYNC);
419 CHECK_FLAG_VALUE(TOPDIR);
420 CHECK_FLAG_VALUE(HUGE_FILE);
421 CHECK_FLAG_VALUE(EXTENTS);
422 CHECK_FLAG_VALUE(EA_INODE);
423 CHECK_FLAG_VALUE(EOFBLOCKS);
424 CHECK_FLAG_VALUE(RESERVED);
425}
426
324/* Used to pass group descriptor data when online resize is done */ 427/* Used to pass group descriptor data when online resize is done */
325struct ext4_new_group_input { 428struct ext4_new_group_input {
326 __u32 group; /* Group number for this data */ 429 __u32 group; /* Group number for this data */
@@ -332,6 +435,18 @@ struct ext4_new_group_input {
332 __u16 unused; 435 __u16 unused;
333}; 436};
334 437
438#if defined(__KERNEL__) && defined(CONFIG_COMPAT)
439struct compat_ext4_new_group_input {
440 u32 group;
441 compat_u64 block_bitmap;
442 compat_u64 inode_bitmap;
443 compat_u64 inode_table;
444 u32 blocks_count;
445 u16 reserved_blocks;
446 u16 unused;
447};
448#endif
449
335/* The struct ext4_new_group_input in kernel space, with free_blocks_count */ 450/* The struct ext4_new_group_input in kernel space, with free_blocks_count */
336struct ext4_new_group_data { 451struct ext4_new_group_data {
337 __u32 group; 452 __u32 group;
@@ -355,7 +470,7 @@ struct ext4_new_group_data {
355#define EXT4_GET_BLOCKS_CREATE_UNINIT_EXT (EXT4_GET_BLOCKS_UNINIT_EXT|\ 470#define EXT4_GET_BLOCKS_CREATE_UNINIT_EXT (EXT4_GET_BLOCKS_UNINIT_EXT|\
356 EXT4_GET_BLOCKS_CREATE) 471 EXT4_GET_BLOCKS_CREATE)
357 /* Caller is from the delayed allocation writeout path, 472 /* Caller is from the delayed allocation writeout path,
358 so set the magic i_delalloc_reserve_flag after taking the 473 so set the magic i_delalloc_reserve_flag after taking the
359 inode allocation semaphore for */ 474 inode allocation semaphore for */
360#define EXT4_GET_BLOCKS_DELALLOC_RESERVE 0x0004 475#define EXT4_GET_BLOCKS_DELALLOC_RESERVE 0x0004
361 /* caller is from the direct IO path, request to creation of an 476 /* caller is from the direct IO path, request to creation of an
@@ -398,6 +513,7 @@ struct ext4_new_group_data {
398#define EXT4_IOC_ALLOC_DA_BLKS _IO('f', 12) 513#define EXT4_IOC_ALLOC_DA_BLKS _IO('f', 12)
399#define EXT4_IOC_MOVE_EXT _IOWR('f', 15, struct move_extent) 514#define EXT4_IOC_MOVE_EXT _IOWR('f', 15, struct move_extent)
400 515
516#if defined(__KERNEL__) && defined(CONFIG_COMPAT)
401/* 517/*
402 * ioctl commands in 32 bit emulation 518 * ioctl commands in 32 bit emulation
403 */ 519 */
@@ -408,11 +524,13 @@ struct ext4_new_group_data {
408#define EXT4_IOC32_GETRSVSZ _IOR('f', 5, int) 524#define EXT4_IOC32_GETRSVSZ _IOR('f', 5, int)
409#define EXT4_IOC32_SETRSVSZ _IOW('f', 6, int) 525#define EXT4_IOC32_SETRSVSZ _IOW('f', 6, int)
410#define EXT4_IOC32_GROUP_EXTEND _IOW('f', 7, unsigned int) 526#define EXT4_IOC32_GROUP_EXTEND _IOW('f', 7, unsigned int)
527#define EXT4_IOC32_GROUP_ADD _IOW('f', 8, struct compat_ext4_new_group_input)
411#ifdef CONFIG_JBD2_DEBUG 528#ifdef CONFIG_JBD2_DEBUG
412#define EXT4_IOC32_WAIT_FOR_READONLY _IOR('f', 99, int) 529#define EXT4_IOC32_WAIT_FOR_READONLY _IOR('f', 99, int)
413#endif 530#endif
414#define EXT4_IOC32_GETVERSION_OLD FS_IOC32_GETVERSION 531#define EXT4_IOC32_GETVERSION_OLD FS_IOC32_GETVERSION
415#define EXT4_IOC32_SETVERSION_OLD FS_IOC32_SETVERSION 532#define EXT4_IOC32_SETVERSION_OLD FS_IOC32_SETVERSION
533#endif
416 534
417 535
418/* 536/*
@@ -616,9 +734,8 @@ struct ext4_ext_cache {
616 */ 734 */
617struct ext4_inode_info { 735struct ext4_inode_info {
618 __le32 i_data[15]; /* unconverted */ 736 __le32 i_data[15]; /* unconverted */
619 __u32 i_flags;
620 ext4_fsblk_t i_file_acl;
621 __u32 i_dtime; 737 __u32 i_dtime;
738 ext4_fsblk_t i_file_acl;
622 739
623 /* 740 /*
624 * i_block_group is the number of the block group which contains 741 * i_block_group is the number of the block group which contains
@@ -629,6 +746,7 @@ struct ext4_inode_info {
629 */ 746 */
630 ext4_group_t i_block_group; 747 ext4_group_t i_block_group;
631 unsigned long i_state_flags; /* Dynamic state flags */ 748 unsigned long i_state_flags; /* Dynamic state flags */
749 unsigned long i_flags;
632 750
633 ext4_lblk_t i_dir_start_lookup; 751 ext4_lblk_t i_dir_start_lookup;
634#ifdef CONFIG_EXT4_FS_XATTR 752#ifdef CONFIG_EXT4_FS_XATTR
@@ -1062,22 +1180,25 @@ enum {
1062 EXT4_STATE_DA_ALLOC_CLOSE, /* Alloc DA blks on close */ 1180 EXT4_STATE_DA_ALLOC_CLOSE, /* Alloc DA blks on close */
1063 EXT4_STATE_EXT_MIGRATE, /* Inode is migrating */ 1181 EXT4_STATE_EXT_MIGRATE, /* Inode is migrating */
1064 EXT4_STATE_DIO_UNWRITTEN, /* need convert on dio done*/ 1182 EXT4_STATE_DIO_UNWRITTEN, /* need convert on dio done*/
1183 EXT4_STATE_NEWENTRY, /* File just added to dir */
1065}; 1184};
1066 1185
1067static inline int ext4_test_inode_state(struct inode *inode, int bit) 1186#define EXT4_INODE_BIT_FNS(name, field) \
1068{ 1187static inline int ext4_test_inode_##name(struct inode *inode, int bit) \
1069 return test_bit(bit, &EXT4_I(inode)->i_state_flags); 1188{ \
1070} 1189 return test_bit(bit, &EXT4_I(inode)->i_##field); \
1071 1190} \
1072static inline void ext4_set_inode_state(struct inode *inode, int bit) 1191static inline void ext4_set_inode_##name(struct inode *inode, int bit) \
1073{ 1192{ \
1074 set_bit(bit, &EXT4_I(inode)->i_state_flags); 1193 set_bit(bit, &EXT4_I(inode)->i_##field); \
1194} \
1195static inline void ext4_clear_inode_##name(struct inode *inode, int bit) \
1196{ \
1197 clear_bit(bit, &EXT4_I(inode)->i_##field); \
1075} 1198}
1076 1199
1077static inline void ext4_clear_inode_state(struct inode *inode, int bit) 1200EXT4_INODE_BIT_FNS(flag, flags)
1078{ 1201EXT4_INODE_BIT_FNS(state, state_flags)
1079 clear_bit(bit, &EXT4_I(inode)->i_state_flags);
1080}
1081#else 1202#else
1082/* Assume that user mode programs are passing in an ext4fs superblock, not 1203/* Assume that user mode programs are passing in an ext4fs superblock, not
1083 * a kernel struct super_block. This will allow us to call the feature-test 1204 * a kernel struct super_block. This will allow us to call the feature-test
@@ -1264,7 +1385,7 @@ struct ext4_dir_entry_2 {
1264 1385
1265#define is_dx(dir) (EXT4_HAS_COMPAT_FEATURE(dir->i_sb, \ 1386#define is_dx(dir) (EXT4_HAS_COMPAT_FEATURE(dir->i_sb, \
1266 EXT4_FEATURE_COMPAT_DIR_INDEX) && \ 1387 EXT4_FEATURE_COMPAT_DIR_INDEX) && \
1267 (EXT4_I(dir)->i_flags & EXT4_INDEX_FL)) 1388 ext4_test_inode_flag((dir), EXT4_INODE_INDEX))
1268#define EXT4_DIR_LINK_MAX(dir) (!is_dx(dir) && (dir)->i_nlink >= EXT4_LINK_MAX) 1389#define EXT4_DIR_LINK_MAX(dir) (!is_dx(dir) && (dir)->i_nlink >= EXT4_LINK_MAX)
1269#define EXT4_DIR_LINK_EMPTY(dir) ((dir)->i_nlink == 2 || (dir)->i_nlink == 1) 1390#define EXT4_DIR_LINK_EMPTY(dir) ((dir)->i_nlink == 2 || (dir)->i_nlink == 1)
1270 1391
@@ -1398,7 +1519,7 @@ extern int ext4_htree_store_dirent(struct file *dir_file, __u32 hash,
1398extern void ext4_htree_free_dir_info(struct dir_private_info *p); 1519extern void ext4_htree_free_dir_info(struct dir_private_info *p);
1399 1520
1400/* fsync.c */ 1521/* fsync.c */
1401extern int ext4_sync_file(struct file *, struct dentry *, int); 1522extern int ext4_sync_file(struct file *, int);
1402 1523
1403/* hash.c */ 1524/* hash.c */
1404extern int ext4fs_dirhash(const char *name, int len, struct 1525extern int ext4fs_dirhash(const char *name, int len, struct
@@ -1678,6 +1799,7 @@ struct ext4_group_info {
1678 ext4_grpblk_t bb_first_free; /* first free block */ 1799 ext4_grpblk_t bb_first_free; /* first free block */
1679 ext4_grpblk_t bb_free; /* total free blocks */ 1800 ext4_grpblk_t bb_free; /* total free blocks */
1680 ext4_grpblk_t bb_fragments; /* nr of freespace fragments */ 1801 ext4_grpblk_t bb_fragments; /* nr of freespace fragments */
1802 ext4_grpblk_t bb_largest_free_order;/* order of largest frag in BG */
1681 struct list_head bb_prealloc_list; 1803 struct list_head bb_prealloc_list;
1682#ifdef DOUBLE_CHECK 1804#ifdef DOUBLE_CHECK
1683 void *bb_bitmap; 1805 void *bb_bitmap;
@@ -1772,9 +1894,8 @@ extern int ext4_ext_tree_init(handle_t *handle, struct inode *);
1772extern int ext4_ext_writepage_trans_blocks(struct inode *, int); 1894extern int ext4_ext_writepage_trans_blocks(struct inode *, int);
1773extern int ext4_ext_index_trans_blocks(struct inode *inode, int nrblocks, 1895extern int ext4_ext_index_trans_blocks(struct inode *inode, int nrblocks,
1774 int chunk); 1896 int chunk);
1775extern int ext4_ext_get_blocks(handle_t *handle, struct inode *inode, 1897extern int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
1776 ext4_lblk_t iblock, unsigned int max_blocks, 1898 struct ext4_map_blocks *map, int flags);
1777 struct buffer_head *bh_result, int flags);
1778extern void ext4_ext_truncate(struct inode *); 1899extern void ext4_ext_truncate(struct inode *);
1779extern void ext4_ext_init(struct super_block *); 1900extern void ext4_ext_init(struct super_block *);
1780extern void ext4_ext_release(struct super_block *); 1901extern void ext4_ext_release(struct super_block *);
@@ -1782,6 +1903,8 @@ extern long ext4_fallocate(struct inode *inode, int mode, loff_t offset,
1782 loff_t len); 1903 loff_t len);
1783extern int ext4_convert_unwritten_extents(struct inode *inode, loff_t offset, 1904extern int ext4_convert_unwritten_extents(struct inode *inode, loff_t offset,
1784 ssize_t len); 1905 ssize_t len);
1906extern int ext4_map_blocks(handle_t *handle, struct inode *inode,
1907 struct ext4_map_blocks *map, int flags);
1785extern int ext4_get_blocks(handle_t *handle, struct inode *inode, 1908extern int ext4_get_blocks(handle_t *handle, struct inode *inode,
1786 sector_t block, unsigned int max_blocks, 1909 sector_t block, unsigned int max_blocks,
1787 struct buffer_head *bh, int flags); 1910 struct buffer_head *bh, int flags);
diff --git a/fs/ext4/ext4_jbd2.h b/fs/ext4/ext4_jbd2.h
index b79ad5126468..dade0c024797 100644
--- a/fs/ext4/ext4_jbd2.h
+++ b/fs/ext4/ext4_jbd2.h
@@ -273,7 +273,7 @@ static inline int ext4_should_journal_data(struct inode *inode)
273 return 1; 273 return 1;
274 if (test_opt(inode->i_sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA) 274 if (test_opt(inode->i_sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA)
275 return 1; 275 return 1;
276 if (EXT4_I(inode)->i_flags & EXT4_JOURNAL_DATA_FL) 276 if (ext4_test_inode_flag(inode, EXT4_INODE_JOURNAL_DATA))
277 return 1; 277 return 1;
278 return 0; 278 return 0;
279} 279}
@@ -284,7 +284,7 @@ static inline int ext4_should_order_data(struct inode *inode)
284 return 0; 284 return 0;
285 if (!S_ISREG(inode->i_mode)) 285 if (!S_ISREG(inode->i_mode))
286 return 0; 286 return 0;
287 if (EXT4_I(inode)->i_flags & EXT4_JOURNAL_DATA_FL) 287 if (ext4_test_inode_flag(inode, EXT4_INODE_JOURNAL_DATA))
288 return 0; 288 return 0;
289 if (test_opt(inode->i_sb, DATA_FLAGS) == EXT4_MOUNT_ORDERED_DATA) 289 if (test_opt(inode->i_sb, DATA_FLAGS) == EXT4_MOUNT_ORDERED_DATA)
290 return 1; 290 return 1;
@@ -297,7 +297,7 @@ static inline int ext4_should_writeback_data(struct inode *inode)
297 return 0; 297 return 0;
298 if (EXT4_JOURNAL(inode) == NULL) 298 if (EXT4_JOURNAL(inode) == NULL)
299 return 1; 299 return 1;
300 if (EXT4_I(inode)->i_flags & EXT4_JOURNAL_DATA_FL) 300 if (ext4_test_inode_flag(inode, EXT4_INODE_JOURNAL_DATA))
301 return 0; 301 return 0;
302 if (test_opt(inode->i_sb, DATA_FLAGS) == EXT4_MOUNT_WRITEBACK_DATA) 302 if (test_opt(inode->i_sb, DATA_FLAGS) == EXT4_MOUNT_WRITEBACK_DATA)
303 return 1; 303 return 1;
@@ -321,7 +321,7 @@ static inline int ext4_should_dioread_nolock(struct inode *inode)
321 return 0; 321 return 0;
322 if (!S_ISREG(inode->i_mode)) 322 if (!S_ISREG(inode->i_mode))
323 return 0; 323 return 0;
324 if (!(EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL)) 324 if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
325 return 0; 325 return 0;
326 if (ext4_should_journal_data(inode)) 326 if (ext4_should_journal_data(inode))
327 return 0; 327 return 0;
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
index 236b834b4ca8..377309c1af65 100644
--- a/fs/ext4/extents.c
+++ b/fs/ext4/extents.c
@@ -107,11 +107,8 @@ static int ext4_ext_truncate_extend_restart(handle_t *handle,
107 if (err <= 0) 107 if (err <= 0)
108 return err; 108 return err;
109 err = ext4_truncate_restart_trans(handle, inode, needed); 109 err = ext4_truncate_restart_trans(handle, inode, needed);
110 /* 110 if (err == 0)
111 * We have dropped i_data_sem so someone might have cached again 111 err = -EAGAIN;
112 * an extent we are going to truncate.
113 */
114 ext4_ext_invalidate_cache(inode);
115 112
116 return err; 113 return err;
117} 114}
@@ -185,10 +182,10 @@ static ext4_fsblk_t ext4_ext_find_goal(struct inode *inode,
185 if (flex_size >= EXT4_FLEX_SIZE_DIR_ALLOC_SCHEME) { 182 if (flex_size >= EXT4_FLEX_SIZE_DIR_ALLOC_SCHEME) {
186 /* 183 /*
187 * If there are at least EXT4_FLEX_SIZE_DIR_ALLOC_SCHEME 184 * If there are at least EXT4_FLEX_SIZE_DIR_ALLOC_SCHEME
188 * block groups per flexgroup, reserve the first block 185 * block groups per flexgroup, reserve the first block
189 * group for directories and special files. Regular 186 * group for directories and special files. Regular
190 * files will start at the second block group. This 187 * files will start at the second block group. This
191 * tends to speed up directory access and improves 188 * tends to speed up directory access and improves
192 * fsck times. 189 * fsck times.
193 */ 190 */
194 block_group &= ~(flex_size-1); 191 block_group &= ~(flex_size-1);
@@ -439,10 +436,10 @@ static int __ext4_ext_check(const char *function, struct inode *inode,
439 return 0; 436 return 0;
440 437
441corrupted: 438corrupted:
442 __ext4_error(inode->i_sb, function, 439 ext4_error_inode(function, inode,
443 "bad header/extent in inode #%lu: %s - magic %x, " 440 "bad header/extent: %s - magic %x, "
444 "entries %u, max %u(%u), depth %u(%u)", 441 "entries %u, max %u(%u), depth %u(%u)",
445 inode->i_ino, error_msg, le16_to_cpu(eh->eh_magic), 442 error_msg, le16_to_cpu(eh->eh_magic),
446 le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max), 443 le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max),
447 max, le16_to_cpu(eh->eh_depth), depth); 444 max, le16_to_cpu(eh->eh_depth), depth);
448 445
@@ -1622,9 +1619,7 @@ int ext4_ext_try_to_merge(struct inode *inode,
1622 merge_done = 1; 1619 merge_done = 1;
1623 WARN_ON(eh->eh_entries == 0); 1620 WARN_ON(eh->eh_entries == 0);
1624 if (!eh->eh_entries) 1621 if (!eh->eh_entries)
1625 ext4_error(inode->i_sb, 1622 EXT4_ERROR_INODE(inode, "eh->eh_entries = 0!");
1626 "inode#%lu, eh->eh_entries = 0!",
1627 inode->i_ino);
1628 } 1623 }
1629 1624
1630 return merge_done; 1625 return merge_done;
@@ -2039,7 +2034,7 @@ ext4_ext_in_cache(struct inode *inode, ext4_lblk_t block,
2039 struct ext4_ext_cache *cex; 2034 struct ext4_ext_cache *cex;
2040 int ret = EXT4_EXT_CACHE_NO; 2035 int ret = EXT4_EXT_CACHE_NO;
2041 2036
2042 /* 2037 /*
2043 * We borrow i_block_reservation_lock to protect i_cached_extent 2038 * We borrow i_block_reservation_lock to protect i_cached_extent
2044 */ 2039 */
2045 spin_lock(&EXT4_I(inode)->i_block_reservation_lock); 2040 spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
@@ -2361,7 +2356,7 @@ static int ext4_ext_remove_space(struct inode *inode, ext4_lblk_t start)
2361 int depth = ext_depth(inode); 2356 int depth = ext_depth(inode);
2362 struct ext4_ext_path *path; 2357 struct ext4_ext_path *path;
2363 handle_t *handle; 2358 handle_t *handle;
2364 int i = 0, err = 0; 2359 int i, err;
2365 2360
2366 ext_debug("truncate since %u\n", start); 2361 ext_debug("truncate since %u\n", start);
2367 2362
@@ -2370,23 +2365,26 @@ static int ext4_ext_remove_space(struct inode *inode, ext4_lblk_t start)
2370 if (IS_ERR(handle)) 2365 if (IS_ERR(handle))
2371 return PTR_ERR(handle); 2366 return PTR_ERR(handle);
2372 2367
2368again:
2373 ext4_ext_invalidate_cache(inode); 2369 ext4_ext_invalidate_cache(inode);
2374 2370
2375 /* 2371 /*
2376 * We start scanning from right side, freeing all the blocks 2372 * We start scanning from right side, freeing all the blocks
2377 * after i_size and walking into the tree depth-wise. 2373 * after i_size and walking into the tree depth-wise.
2378 */ 2374 */
2375 depth = ext_depth(inode);
2379 path = kzalloc(sizeof(struct ext4_ext_path) * (depth + 1), GFP_NOFS); 2376 path = kzalloc(sizeof(struct ext4_ext_path) * (depth + 1), GFP_NOFS);
2380 if (path == NULL) { 2377 if (path == NULL) {
2381 ext4_journal_stop(handle); 2378 ext4_journal_stop(handle);
2382 return -ENOMEM; 2379 return -ENOMEM;
2383 } 2380 }
2381 path[0].p_depth = depth;
2384 path[0].p_hdr = ext_inode_hdr(inode); 2382 path[0].p_hdr = ext_inode_hdr(inode);
2385 if (ext4_ext_check(inode, path[0].p_hdr, depth)) { 2383 if (ext4_ext_check(inode, path[0].p_hdr, depth)) {
2386 err = -EIO; 2384 err = -EIO;
2387 goto out; 2385 goto out;
2388 } 2386 }
2389 path[0].p_depth = depth; 2387 i = err = 0;
2390 2388
2391 while (i >= 0 && err == 0) { 2389 while (i >= 0 && err == 0) {
2392 if (i == depth) { 2390 if (i == depth) {
@@ -2480,6 +2478,8 @@ static int ext4_ext_remove_space(struct inode *inode, ext4_lblk_t start)
2480out: 2478out:
2481 ext4_ext_drop_refs(path); 2479 ext4_ext_drop_refs(path);
2482 kfree(path); 2480 kfree(path);
2481 if (err == -EAGAIN)
2482 goto again;
2483 ext4_journal_stop(handle); 2483 ext4_journal_stop(handle);
2484 2484
2485 return err; 2485 return err;
@@ -2544,7 +2544,7 @@ static void bi_complete(struct bio *bio, int error)
2544/* FIXME!! we need to try to merge to left or right after zero-out */ 2544/* FIXME!! we need to try to merge to left or right after zero-out */
2545static int ext4_ext_zeroout(struct inode *inode, struct ext4_extent *ex) 2545static int ext4_ext_zeroout(struct inode *inode, struct ext4_extent *ex)
2546{ 2546{
2547 int ret = -EIO; 2547 int ret;
2548 struct bio *bio; 2548 struct bio *bio;
2549 int blkbits, blocksize; 2549 int blkbits, blocksize;
2550 sector_t ee_pblock; 2550 sector_t ee_pblock;
@@ -2568,6 +2568,9 @@ static int ext4_ext_zeroout(struct inode *inode, struct ext4_extent *ex)
2568 len = ee_len; 2568 len = ee_len;
2569 2569
2570 bio = bio_alloc(GFP_NOIO, len); 2570 bio = bio_alloc(GFP_NOIO, len);
2571 if (!bio)
2572 return -ENOMEM;
2573
2571 bio->bi_sector = ee_pblock; 2574 bio->bi_sector = ee_pblock;
2572 bio->bi_bdev = inode->i_sb->s_bdev; 2575 bio->bi_bdev = inode->i_sb->s_bdev;
2573 2576
@@ -2595,22 +2598,20 @@ static int ext4_ext_zeroout(struct inode *inode, struct ext4_extent *ex)
2595 submit_bio(WRITE, bio); 2598 submit_bio(WRITE, bio);
2596 wait_for_completion(&event); 2599 wait_for_completion(&event);
2597 2600
2598 if (test_bit(BIO_UPTODATE, &bio->bi_flags)) 2601 if (!test_bit(BIO_UPTODATE, &bio->bi_flags)) {
2599 ret = 0; 2602 bio_put(bio);
2600 else { 2603 return -EIO;
2601 ret = -EIO;
2602 break;
2603 } 2604 }
2604 bio_put(bio); 2605 bio_put(bio);
2605 ee_len -= done; 2606 ee_len -= done;
2606 ee_pblock += done << (blkbits - 9); 2607 ee_pblock += done << (blkbits - 9);
2607 } 2608 }
2608 return ret; 2609 return 0;
2609} 2610}
2610 2611
2611#define EXT4_EXT_ZERO_LEN 7 2612#define EXT4_EXT_ZERO_LEN 7
2612/* 2613/*
2613 * This function is called by ext4_ext_get_blocks() if someone tries to write 2614 * This function is called by ext4_ext_map_blocks() if someone tries to write
2614 * to an uninitialized extent. It may result in splitting the uninitialized 2615 * to an uninitialized extent. It may result in splitting the uninitialized
2615 * extent into multiple extents (upto three - one initialized and two 2616 * extent into multiple extents (upto three - one initialized and two
2616 * uninitialized). 2617 * uninitialized).
@@ -2620,39 +2621,55 @@ static int ext4_ext_zeroout(struct inode *inode, struct ext4_extent *ex)
2620 * c> Splits in three extents: Somone is writing in middle of the extent 2621 * c> Splits in three extents: Somone is writing in middle of the extent
2621 */ 2622 */
2622static int ext4_ext_convert_to_initialized(handle_t *handle, 2623static int ext4_ext_convert_to_initialized(handle_t *handle,
2623 struct inode *inode, 2624 struct inode *inode,
2624 struct ext4_ext_path *path, 2625 struct ext4_map_blocks *map,
2625 ext4_lblk_t iblock, 2626 struct ext4_ext_path *path)
2626 unsigned int max_blocks)
2627{ 2627{
2628 struct ext4_extent *ex, newex, orig_ex; 2628 struct ext4_extent *ex, newex, orig_ex;
2629 struct ext4_extent *ex1 = NULL; 2629 struct ext4_extent *ex1 = NULL;
2630 struct ext4_extent *ex2 = NULL; 2630 struct ext4_extent *ex2 = NULL;
2631 struct ext4_extent *ex3 = NULL; 2631 struct ext4_extent *ex3 = NULL;
2632 struct ext4_extent_header *eh; 2632 struct ext4_extent_header *eh;
2633 ext4_lblk_t ee_block; 2633 ext4_lblk_t ee_block, eof_block;
2634 unsigned int allocated, ee_len, depth; 2634 unsigned int allocated, ee_len, depth;
2635 ext4_fsblk_t newblock; 2635 ext4_fsblk_t newblock;
2636 int err = 0; 2636 int err = 0;
2637 int ret = 0; 2637 int ret = 0;
2638 int may_zeroout;
2639
2640 ext_debug("ext4_ext_convert_to_initialized: inode %lu, logical"
2641 "block %llu, max_blocks %u\n", inode->i_ino,
2642 (unsigned long long)map->m_lblk, map->m_len);
2643
2644 eof_block = (inode->i_size + inode->i_sb->s_blocksize - 1) >>
2645 inode->i_sb->s_blocksize_bits;
2646 if (eof_block < map->m_lblk + map->m_len)
2647 eof_block = map->m_lblk + map->m_len;
2638 2648
2639 depth = ext_depth(inode); 2649 depth = ext_depth(inode);
2640 eh = path[depth].p_hdr; 2650 eh = path[depth].p_hdr;
2641 ex = path[depth].p_ext; 2651 ex = path[depth].p_ext;
2642 ee_block = le32_to_cpu(ex->ee_block); 2652 ee_block = le32_to_cpu(ex->ee_block);
2643 ee_len = ext4_ext_get_actual_len(ex); 2653 ee_len = ext4_ext_get_actual_len(ex);
2644 allocated = ee_len - (iblock - ee_block); 2654 allocated = ee_len - (map->m_lblk - ee_block);
2645 newblock = iblock - ee_block + ext_pblock(ex); 2655 newblock = map->m_lblk - ee_block + ext_pblock(ex);
2656
2646 ex2 = ex; 2657 ex2 = ex;
2647 orig_ex.ee_block = ex->ee_block; 2658 orig_ex.ee_block = ex->ee_block;
2648 orig_ex.ee_len = cpu_to_le16(ee_len); 2659 orig_ex.ee_len = cpu_to_le16(ee_len);
2649 ext4_ext_store_pblock(&orig_ex, ext_pblock(ex)); 2660 ext4_ext_store_pblock(&orig_ex, ext_pblock(ex));
2650 2661
2662 /*
2663 * It is safe to convert extent to initialized via explicit
2664 * zeroout only if extent is fully insde i_size or new_size.
2665 */
2666 may_zeroout = ee_block + ee_len <= eof_block;
2667
2651 err = ext4_ext_get_access(handle, inode, path + depth); 2668 err = ext4_ext_get_access(handle, inode, path + depth);
2652 if (err) 2669 if (err)
2653 goto out; 2670 goto out;
2654 /* If extent has less than 2*EXT4_EXT_ZERO_LEN zerout directly */ 2671 /* If extent has less than 2*EXT4_EXT_ZERO_LEN zerout directly */
2655 if (ee_len <= 2*EXT4_EXT_ZERO_LEN) { 2672 if (ee_len <= 2*EXT4_EXT_ZERO_LEN && may_zeroout) {
2656 err = ext4_ext_zeroout(inode, &orig_ex); 2673 err = ext4_ext_zeroout(inode, &orig_ex);
2657 if (err) 2674 if (err)
2658 goto fix_extent_len; 2675 goto fix_extent_len;
@@ -2665,10 +2682,10 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
2665 return allocated; 2682 return allocated;
2666 } 2683 }
2667 2684
2668 /* ex1: ee_block to iblock - 1 : uninitialized */ 2685 /* ex1: ee_block to map->m_lblk - 1 : uninitialized */
2669 if (iblock > ee_block) { 2686 if (map->m_lblk > ee_block) {
2670 ex1 = ex; 2687 ex1 = ex;
2671 ex1->ee_len = cpu_to_le16(iblock - ee_block); 2688 ex1->ee_len = cpu_to_le16(map->m_lblk - ee_block);
2672 ext4_ext_mark_uninitialized(ex1); 2689 ext4_ext_mark_uninitialized(ex1);
2673 ex2 = &newex; 2690 ex2 = &newex;
2674 } 2691 }
@@ -2677,15 +2694,15 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
2677 * we insert ex3, if ex1 is NULL. This is to avoid temporary 2694 * we insert ex3, if ex1 is NULL. This is to avoid temporary
2678 * overlap of blocks. 2695 * overlap of blocks.
2679 */ 2696 */
2680 if (!ex1 && allocated > max_blocks) 2697 if (!ex1 && allocated > map->m_len)
2681 ex2->ee_len = cpu_to_le16(max_blocks); 2698 ex2->ee_len = cpu_to_le16(map->m_len);
2682 /* ex3: to ee_block + ee_len : uninitialised */ 2699 /* ex3: to ee_block + ee_len : uninitialised */
2683 if (allocated > max_blocks) { 2700 if (allocated > map->m_len) {
2684 unsigned int newdepth; 2701 unsigned int newdepth;
2685 /* If extent has less than EXT4_EXT_ZERO_LEN zerout directly */ 2702 /* If extent has less than EXT4_EXT_ZERO_LEN zerout directly */
2686 if (allocated <= EXT4_EXT_ZERO_LEN) { 2703 if (allocated <= EXT4_EXT_ZERO_LEN && may_zeroout) {
2687 /* 2704 /*
2688 * iblock == ee_block is handled by the zerouout 2705 * map->m_lblk == ee_block is handled by the zerouout
2689 * at the beginning. 2706 * at the beginning.
2690 * Mark first half uninitialized. 2707 * Mark first half uninitialized.
2691 * Mark second half initialized and zero out the 2708 * Mark second half initialized and zero out the
@@ -2698,7 +2715,7 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
2698 ext4_ext_dirty(handle, inode, path + depth); 2715 ext4_ext_dirty(handle, inode, path + depth);
2699 2716
2700 ex3 = &newex; 2717 ex3 = &newex;
2701 ex3->ee_block = cpu_to_le32(iblock); 2718 ex3->ee_block = cpu_to_le32(map->m_lblk);
2702 ext4_ext_store_pblock(ex3, newblock); 2719 ext4_ext_store_pblock(ex3, newblock);
2703 ex3->ee_len = cpu_to_le16(allocated); 2720 ex3->ee_len = cpu_to_le16(allocated);
2704 err = ext4_ext_insert_extent(handle, inode, path, 2721 err = ext4_ext_insert_extent(handle, inode, path,
@@ -2711,7 +2728,7 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
2711 ex->ee_len = orig_ex.ee_len; 2728 ex->ee_len = orig_ex.ee_len;
2712 ext4_ext_store_pblock(ex, ext_pblock(&orig_ex)); 2729 ext4_ext_store_pblock(ex, ext_pblock(&orig_ex));
2713 ext4_ext_dirty(handle, inode, path + depth); 2730 ext4_ext_dirty(handle, inode, path + depth);
2714 /* blocks available from iblock */ 2731 /* blocks available from map->m_lblk */
2715 return allocated; 2732 return allocated;
2716 2733
2717 } else if (err) 2734 } else if (err)
@@ -2733,8 +2750,8 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
2733 */ 2750 */
2734 depth = ext_depth(inode); 2751 depth = ext_depth(inode);
2735 ext4_ext_drop_refs(path); 2752 ext4_ext_drop_refs(path);
2736 path = ext4_ext_find_extent(inode, 2753 path = ext4_ext_find_extent(inode, map->m_lblk,
2737 iblock, path); 2754 path);
2738 if (IS_ERR(path)) { 2755 if (IS_ERR(path)) {
2739 err = PTR_ERR(path); 2756 err = PTR_ERR(path);
2740 return err; 2757 return err;
@@ -2754,12 +2771,12 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
2754 return allocated; 2771 return allocated;
2755 } 2772 }
2756 ex3 = &newex; 2773 ex3 = &newex;
2757 ex3->ee_block = cpu_to_le32(iblock + max_blocks); 2774 ex3->ee_block = cpu_to_le32(map->m_lblk + map->m_len);
2758 ext4_ext_store_pblock(ex3, newblock + max_blocks); 2775 ext4_ext_store_pblock(ex3, newblock + map->m_len);
2759 ex3->ee_len = cpu_to_le16(allocated - max_blocks); 2776 ex3->ee_len = cpu_to_le16(allocated - map->m_len);
2760 ext4_ext_mark_uninitialized(ex3); 2777 ext4_ext_mark_uninitialized(ex3);
2761 err = ext4_ext_insert_extent(handle, inode, path, ex3, 0); 2778 err = ext4_ext_insert_extent(handle, inode, path, ex3, 0);
2762 if (err == -ENOSPC) { 2779 if (err == -ENOSPC && may_zeroout) {
2763 err = ext4_ext_zeroout(inode, &orig_ex); 2780 err = ext4_ext_zeroout(inode, &orig_ex);
2764 if (err) 2781 if (err)
2765 goto fix_extent_len; 2782 goto fix_extent_len;
@@ -2769,7 +2786,7 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
2769 ext4_ext_store_pblock(ex, ext_pblock(&orig_ex)); 2786 ext4_ext_store_pblock(ex, ext_pblock(&orig_ex));
2770 ext4_ext_dirty(handle, inode, path + depth); 2787 ext4_ext_dirty(handle, inode, path + depth);
2771 /* zeroed the full extent */ 2788 /* zeroed the full extent */
2772 /* blocks available from iblock */ 2789 /* blocks available from map->m_lblk */
2773 return allocated; 2790 return allocated;
2774 2791
2775 } else if (err) 2792 } else if (err)
@@ -2783,11 +2800,13 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
2783 * update the extent length after successful insert of the 2800 * update the extent length after successful insert of the
2784 * split extent 2801 * split extent
2785 */ 2802 */
2786 orig_ex.ee_len = cpu_to_le16(ee_len - 2803 ee_len -= ext4_ext_get_actual_len(ex3);
2787 ext4_ext_get_actual_len(ex3)); 2804 orig_ex.ee_len = cpu_to_le16(ee_len);
2805 may_zeroout = ee_block + ee_len <= eof_block;
2806
2788 depth = newdepth; 2807 depth = newdepth;
2789 ext4_ext_drop_refs(path); 2808 ext4_ext_drop_refs(path);
2790 path = ext4_ext_find_extent(inode, iblock, path); 2809 path = ext4_ext_find_extent(inode, map->m_lblk, path);
2791 if (IS_ERR(path)) { 2810 if (IS_ERR(path)) {
2792 err = PTR_ERR(path); 2811 err = PTR_ERR(path);
2793 goto out; 2812 goto out;
@@ -2801,14 +2820,14 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
2801 if (err) 2820 if (err)
2802 goto out; 2821 goto out;
2803 2822
2804 allocated = max_blocks; 2823 allocated = map->m_len;
2805 2824
2806 /* If extent has less than EXT4_EXT_ZERO_LEN and we are trying 2825 /* If extent has less than EXT4_EXT_ZERO_LEN and we are trying
2807 * to insert a extent in the middle zerout directly 2826 * to insert a extent in the middle zerout directly
2808 * otherwise give the extent a chance to merge to left 2827 * otherwise give the extent a chance to merge to left
2809 */ 2828 */
2810 if (le16_to_cpu(orig_ex.ee_len) <= EXT4_EXT_ZERO_LEN && 2829 if (le16_to_cpu(orig_ex.ee_len) <= EXT4_EXT_ZERO_LEN &&
2811 iblock != ee_block) { 2830 map->m_lblk != ee_block && may_zeroout) {
2812 err = ext4_ext_zeroout(inode, &orig_ex); 2831 err = ext4_ext_zeroout(inode, &orig_ex);
2813 if (err) 2832 if (err)
2814 goto fix_extent_len; 2833 goto fix_extent_len;
@@ -2818,7 +2837,7 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
2818 ext4_ext_store_pblock(ex, ext_pblock(&orig_ex)); 2837 ext4_ext_store_pblock(ex, ext_pblock(&orig_ex));
2819 ext4_ext_dirty(handle, inode, path + depth); 2838 ext4_ext_dirty(handle, inode, path + depth);
2820 /* zero out the first half */ 2839 /* zero out the first half */
2821 /* blocks available from iblock */ 2840 /* blocks available from map->m_lblk */
2822 return allocated; 2841 return allocated;
2823 } 2842 }
2824 } 2843 }
@@ -2829,12 +2848,12 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
2829 */ 2848 */
2830 if (ex1 && ex1 != ex) { 2849 if (ex1 && ex1 != ex) {
2831 ex1 = ex; 2850 ex1 = ex;
2832 ex1->ee_len = cpu_to_le16(iblock - ee_block); 2851 ex1->ee_len = cpu_to_le16(map->m_lblk - ee_block);
2833 ext4_ext_mark_uninitialized(ex1); 2852 ext4_ext_mark_uninitialized(ex1);
2834 ex2 = &newex; 2853 ex2 = &newex;
2835 } 2854 }
2836 /* ex2: iblock to iblock + maxblocks-1 : initialised */ 2855 /* ex2: map->m_lblk to map->m_lblk + maxblocks-1 : initialised */
2837 ex2->ee_block = cpu_to_le32(iblock); 2856 ex2->ee_block = cpu_to_le32(map->m_lblk);
2838 ext4_ext_store_pblock(ex2, newblock); 2857 ext4_ext_store_pblock(ex2, newblock);
2839 ex2->ee_len = cpu_to_le16(allocated); 2858 ex2->ee_len = cpu_to_le16(allocated);
2840 if (ex2 != ex) 2859 if (ex2 != ex)
@@ -2877,7 +2896,7 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
2877 goto out; 2896 goto out;
2878insert: 2897insert:
2879 err = ext4_ext_insert_extent(handle, inode, path, &newex, 0); 2898 err = ext4_ext_insert_extent(handle, inode, path, &newex, 0);
2880 if (err == -ENOSPC) { 2899 if (err == -ENOSPC && may_zeroout) {
2881 err = ext4_ext_zeroout(inode, &orig_ex); 2900 err = ext4_ext_zeroout(inode, &orig_ex);
2882 if (err) 2901 if (err)
2883 goto fix_extent_len; 2902 goto fix_extent_len;
@@ -2904,7 +2923,7 @@ fix_extent_len:
2904} 2923}
2905 2924
2906/* 2925/*
2907 * This function is called by ext4_ext_get_blocks() from 2926 * This function is called by ext4_ext_map_blocks() from
2908 * ext4_get_blocks_dio_write() when DIO to write 2927 * ext4_get_blocks_dio_write() when DIO to write
2909 * to an uninitialized extent. 2928 * to an uninitialized extent.
2910 * 2929 *
@@ -2927,9 +2946,8 @@ fix_extent_len:
2927 */ 2946 */
2928static int ext4_split_unwritten_extents(handle_t *handle, 2947static int ext4_split_unwritten_extents(handle_t *handle,
2929 struct inode *inode, 2948 struct inode *inode,
2949 struct ext4_map_blocks *map,
2930 struct ext4_ext_path *path, 2950 struct ext4_ext_path *path,
2931 ext4_lblk_t iblock,
2932 unsigned int max_blocks,
2933 int flags) 2951 int flags)
2934{ 2952{
2935 struct ext4_extent *ex, newex, orig_ex; 2953 struct ext4_extent *ex, newex, orig_ex;
@@ -2937,41 +2955,55 @@ static int ext4_split_unwritten_extents(handle_t *handle,
2937 struct ext4_extent *ex2 = NULL; 2955 struct ext4_extent *ex2 = NULL;
2938 struct ext4_extent *ex3 = NULL; 2956 struct ext4_extent *ex3 = NULL;
2939 struct ext4_extent_header *eh; 2957 struct ext4_extent_header *eh;
2940 ext4_lblk_t ee_block; 2958 ext4_lblk_t ee_block, eof_block;
2941 unsigned int allocated, ee_len, depth; 2959 unsigned int allocated, ee_len, depth;
2942 ext4_fsblk_t newblock; 2960 ext4_fsblk_t newblock;
2943 int err = 0; 2961 int err = 0;
2962 int may_zeroout;
2963
2964 ext_debug("ext4_split_unwritten_extents: inode %lu, logical"
2965 "block %llu, max_blocks %u\n", inode->i_ino,
2966 (unsigned long long)map->m_lblk, map->m_len);
2967
2968 eof_block = (inode->i_size + inode->i_sb->s_blocksize - 1) >>
2969 inode->i_sb->s_blocksize_bits;
2970 if (eof_block < map->m_lblk + map->m_len)
2971 eof_block = map->m_lblk + map->m_len;
2944 2972
2945 ext_debug("ext4_split_unwritten_extents: inode %lu,"
2946 "iblock %llu, max_blocks %u\n", inode->i_ino,
2947 (unsigned long long)iblock, max_blocks);
2948 depth = ext_depth(inode); 2973 depth = ext_depth(inode);
2949 eh = path[depth].p_hdr; 2974 eh = path[depth].p_hdr;
2950 ex = path[depth].p_ext; 2975 ex = path[depth].p_ext;
2951 ee_block = le32_to_cpu(ex->ee_block); 2976 ee_block = le32_to_cpu(ex->ee_block);
2952 ee_len = ext4_ext_get_actual_len(ex); 2977 ee_len = ext4_ext_get_actual_len(ex);
2953 allocated = ee_len - (iblock - ee_block); 2978 allocated = ee_len - (map->m_lblk - ee_block);
2954 newblock = iblock - ee_block + ext_pblock(ex); 2979 newblock = map->m_lblk - ee_block + ext_pblock(ex);
2980
2955 ex2 = ex; 2981 ex2 = ex;
2956 orig_ex.ee_block = ex->ee_block; 2982 orig_ex.ee_block = ex->ee_block;
2957 orig_ex.ee_len = cpu_to_le16(ee_len); 2983 orig_ex.ee_len = cpu_to_le16(ee_len);
2958 ext4_ext_store_pblock(&orig_ex, ext_pblock(ex)); 2984 ext4_ext_store_pblock(&orig_ex, ext_pblock(ex));
2959 2985
2960 /* 2986 /*
2987 * It is safe to convert extent to initialized via explicit
2988 * zeroout only if extent is fully insde i_size or new_size.
2989 */
2990 may_zeroout = ee_block + ee_len <= eof_block;
2991
2992 /*
2961 * If the uninitialized extent begins at the same logical 2993 * If the uninitialized extent begins at the same logical
2962 * block where the write begins, and the write completely 2994 * block where the write begins, and the write completely
2963 * covers the extent, then we don't need to split it. 2995 * covers the extent, then we don't need to split it.
2964 */ 2996 */
2965 if ((iblock == ee_block) && (allocated <= max_blocks)) 2997 if ((map->m_lblk == ee_block) && (allocated <= map->m_len))
2966 return allocated; 2998 return allocated;
2967 2999
2968 err = ext4_ext_get_access(handle, inode, path + depth); 3000 err = ext4_ext_get_access(handle, inode, path + depth);
2969 if (err) 3001 if (err)
2970 goto out; 3002 goto out;
2971 /* ex1: ee_block to iblock - 1 : uninitialized */ 3003 /* ex1: ee_block to map->m_lblk - 1 : uninitialized */
2972 if (iblock > ee_block) { 3004 if (map->m_lblk > ee_block) {
2973 ex1 = ex; 3005 ex1 = ex;
2974 ex1->ee_len = cpu_to_le16(iblock - ee_block); 3006 ex1->ee_len = cpu_to_le16(map->m_lblk - ee_block);
2975 ext4_ext_mark_uninitialized(ex1); 3007 ext4_ext_mark_uninitialized(ex1);
2976 ex2 = &newex; 3008 ex2 = &newex;
2977 } 3009 }
@@ -2980,18 +3012,18 @@ static int ext4_split_unwritten_extents(handle_t *handle,
2980 * we insert ex3, if ex1 is NULL. This is to avoid temporary 3012 * we insert ex3, if ex1 is NULL. This is to avoid temporary
2981 * overlap of blocks. 3013 * overlap of blocks.
2982 */ 3014 */
2983 if (!ex1 && allocated > max_blocks) 3015 if (!ex1 && allocated > map->m_len)
2984 ex2->ee_len = cpu_to_le16(max_blocks); 3016 ex2->ee_len = cpu_to_le16(map->m_len);
2985 /* ex3: to ee_block + ee_len : uninitialised */ 3017 /* ex3: to ee_block + ee_len : uninitialised */
2986 if (allocated > max_blocks) { 3018 if (allocated > map->m_len) {
2987 unsigned int newdepth; 3019 unsigned int newdepth;
2988 ex3 = &newex; 3020 ex3 = &newex;
2989 ex3->ee_block = cpu_to_le32(iblock + max_blocks); 3021 ex3->ee_block = cpu_to_le32(map->m_lblk + map->m_len);
2990 ext4_ext_store_pblock(ex3, newblock + max_blocks); 3022 ext4_ext_store_pblock(ex3, newblock + map->m_len);
2991 ex3->ee_len = cpu_to_le16(allocated - max_blocks); 3023 ex3->ee_len = cpu_to_le16(allocated - map->m_len);
2992 ext4_ext_mark_uninitialized(ex3); 3024 ext4_ext_mark_uninitialized(ex3);
2993 err = ext4_ext_insert_extent(handle, inode, path, ex3, flags); 3025 err = ext4_ext_insert_extent(handle, inode, path, ex3, flags);
2994 if (err == -ENOSPC) { 3026 if (err == -ENOSPC && may_zeroout) {
2995 err = ext4_ext_zeroout(inode, &orig_ex); 3027 err = ext4_ext_zeroout(inode, &orig_ex);
2996 if (err) 3028 if (err)
2997 goto fix_extent_len; 3029 goto fix_extent_len;
@@ -3001,7 +3033,7 @@ static int ext4_split_unwritten_extents(handle_t *handle,
3001 ext4_ext_store_pblock(ex, ext_pblock(&orig_ex)); 3033 ext4_ext_store_pblock(ex, ext_pblock(&orig_ex));
3002 ext4_ext_dirty(handle, inode, path + depth); 3034 ext4_ext_dirty(handle, inode, path + depth);
3003 /* zeroed the full extent */ 3035 /* zeroed the full extent */
3004 /* blocks available from iblock */ 3036 /* blocks available from map->m_lblk */
3005 return allocated; 3037 return allocated;
3006 3038
3007 } else if (err) 3039 } else if (err)
@@ -3015,11 +3047,13 @@ static int ext4_split_unwritten_extents(handle_t *handle,
3015 * update the extent length after successful insert of the 3047 * update the extent length after successful insert of the
3016 * split extent 3048 * split extent
3017 */ 3049 */
3018 orig_ex.ee_len = cpu_to_le16(ee_len - 3050 ee_len -= ext4_ext_get_actual_len(ex3);
3019 ext4_ext_get_actual_len(ex3)); 3051 orig_ex.ee_len = cpu_to_le16(ee_len);
3052 may_zeroout = ee_block + ee_len <= eof_block;
3053
3020 depth = newdepth; 3054 depth = newdepth;
3021 ext4_ext_drop_refs(path); 3055 ext4_ext_drop_refs(path);
3022 path = ext4_ext_find_extent(inode, iblock, path); 3056 path = ext4_ext_find_extent(inode, map->m_lblk, path);
3023 if (IS_ERR(path)) { 3057 if (IS_ERR(path)) {
3024 err = PTR_ERR(path); 3058 err = PTR_ERR(path);
3025 goto out; 3059 goto out;
@@ -3033,7 +3067,7 @@ static int ext4_split_unwritten_extents(handle_t *handle,
3033 if (err) 3067 if (err)
3034 goto out; 3068 goto out;
3035 3069
3036 allocated = max_blocks; 3070 allocated = map->m_len;
3037 } 3071 }
3038 /* 3072 /*
3039 * If there was a change of depth as part of the 3073 * If there was a change of depth as part of the
@@ -3042,15 +3076,15 @@ static int ext4_split_unwritten_extents(handle_t *handle,
3042 */ 3076 */
3043 if (ex1 && ex1 != ex) { 3077 if (ex1 && ex1 != ex) {
3044 ex1 = ex; 3078 ex1 = ex;
3045 ex1->ee_len = cpu_to_le16(iblock - ee_block); 3079 ex1->ee_len = cpu_to_le16(map->m_lblk - ee_block);
3046 ext4_ext_mark_uninitialized(ex1); 3080 ext4_ext_mark_uninitialized(ex1);
3047 ex2 = &newex; 3081 ex2 = &newex;
3048 } 3082 }
3049 /* 3083 /*
3050 * ex2: iblock to iblock + maxblocks-1 : to be direct IO written, 3084 * ex2: map->m_lblk to map->m_lblk + map->m_len-1 : to be written
3051 * uninitialised still. 3085 * using direct I/O, uninitialised still.
3052 */ 3086 */
3053 ex2->ee_block = cpu_to_le32(iblock); 3087 ex2->ee_block = cpu_to_le32(map->m_lblk);
3054 ext4_ext_store_pblock(ex2, newblock); 3088 ext4_ext_store_pblock(ex2, newblock);
3055 ex2->ee_len = cpu_to_le16(allocated); 3089 ex2->ee_len = cpu_to_le16(allocated);
3056 ext4_ext_mark_uninitialized(ex2); 3090 ext4_ext_mark_uninitialized(ex2);
@@ -3062,7 +3096,7 @@ static int ext4_split_unwritten_extents(handle_t *handle,
3062 goto out; 3096 goto out;
3063insert: 3097insert:
3064 err = ext4_ext_insert_extent(handle, inode, path, &newex, flags); 3098 err = ext4_ext_insert_extent(handle, inode, path, &newex, flags);
3065 if (err == -ENOSPC) { 3099 if (err == -ENOSPC && may_zeroout) {
3066 err = ext4_ext_zeroout(inode, &orig_ex); 3100 err = ext4_ext_zeroout(inode, &orig_ex);
3067 if (err) 3101 if (err)
3068 goto fix_extent_len; 3102 goto fix_extent_len;
@@ -3152,10 +3186,9 @@ static void unmap_underlying_metadata_blocks(struct block_device *bdev,
3152 3186
3153static int 3187static int
3154ext4_ext_handle_uninitialized_extents(handle_t *handle, struct inode *inode, 3188ext4_ext_handle_uninitialized_extents(handle_t *handle, struct inode *inode,
3155 ext4_lblk_t iblock, unsigned int max_blocks, 3189 struct ext4_map_blocks *map,
3156 struct ext4_ext_path *path, int flags, 3190 struct ext4_ext_path *path, int flags,
3157 unsigned int allocated, struct buffer_head *bh_result, 3191 unsigned int allocated, ext4_fsblk_t newblock)
3158 ext4_fsblk_t newblock)
3159{ 3192{
3160 int ret = 0; 3193 int ret = 0;
3161 int err = 0; 3194 int err = 0;
@@ -3163,15 +3196,14 @@ ext4_ext_handle_uninitialized_extents(handle_t *handle, struct inode *inode,
3163 3196
3164 ext_debug("ext4_ext_handle_uninitialized_extents: inode %lu, logical" 3197 ext_debug("ext4_ext_handle_uninitialized_extents: inode %lu, logical"
3165 "block %llu, max_blocks %u, flags %d, allocated %u", 3198 "block %llu, max_blocks %u, flags %d, allocated %u",
3166 inode->i_ino, (unsigned long long)iblock, max_blocks, 3199 inode->i_ino, (unsigned long long)map->m_lblk, map->m_len,
3167 flags, allocated); 3200 flags, allocated);
3168 ext4_ext_show_leaf(inode, path); 3201 ext4_ext_show_leaf(inode, path);
3169 3202
3170 /* get_block() before submit the IO, split the extent */ 3203 /* get_block() before submit the IO, split the extent */
3171 if ((flags & EXT4_GET_BLOCKS_PRE_IO)) { 3204 if ((flags & EXT4_GET_BLOCKS_PRE_IO)) {
3172 ret = ext4_split_unwritten_extents(handle, 3205 ret = ext4_split_unwritten_extents(handle, inode, map,
3173 inode, path, iblock, 3206 path, flags);
3174 max_blocks, flags);
3175 /* 3207 /*
3176 * Flag the inode(non aio case) or end_io struct (aio case) 3208 * Flag the inode(non aio case) or end_io struct (aio case)
3177 * that this IO needs to convertion to written when IO is 3209 * that this IO needs to convertion to written when IO is
@@ -3182,7 +3214,7 @@ ext4_ext_handle_uninitialized_extents(handle_t *handle, struct inode *inode,
3182 else 3214 else
3183 ext4_set_inode_state(inode, EXT4_STATE_DIO_UNWRITTEN); 3215 ext4_set_inode_state(inode, EXT4_STATE_DIO_UNWRITTEN);
3184 if (ext4_should_dioread_nolock(inode)) 3216 if (ext4_should_dioread_nolock(inode))
3185 set_buffer_uninit(bh_result); 3217 map->m_flags |= EXT4_MAP_UNINIT;
3186 goto out; 3218 goto out;
3187 } 3219 }
3188 /* IO end_io complete, convert the filled extent to written */ 3220 /* IO end_io complete, convert the filled extent to written */
@@ -3210,14 +3242,12 @@ ext4_ext_handle_uninitialized_extents(handle_t *handle, struct inode *inode,
3210 * the buffer head will be unmapped so that 3242 * the buffer head will be unmapped so that
3211 * a read from the block returns 0s. 3243 * a read from the block returns 0s.
3212 */ 3244 */
3213 set_buffer_unwritten(bh_result); 3245 map->m_flags |= EXT4_MAP_UNWRITTEN;
3214 goto out1; 3246 goto out1;
3215 } 3247 }
3216 3248
3217 /* buffered write, writepage time, convert*/ 3249 /* buffered write, writepage time, convert*/
3218 ret = ext4_ext_convert_to_initialized(handle, inode, 3250 ret = ext4_ext_convert_to_initialized(handle, inode, map, path);
3219 path, iblock,
3220 max_blocks);
3221 if (ret >= 0) 3251 if (ret >= 0)
3222 ext4_update_inode_fsync_trans(handle, inode, 1); 3252 ext4_update_inode_fsync_trans(handle, inode, 1);
3223out: 3253out:
@@ -3226,7 +3256,7 @@ out:
3226 goto out2; 3256 goto out2;
3227 } else 3257 } else
3228 allocated = ret; 3258 allocated = ret;
3229 set_buffer_new(bh_result); 3259 map->m_flags |= EXT4_MAP_NEW;
3230 /* 3260 /*
3231 * if we allocated more blocks than requested 3261 * if we allocated more blocks than requested
3232 * we need to make sure we unmap the extra block 3262 * we need to make sure we unmap the extra block
@@ -3234,11 +3264,11 @@ out:
3234 * unmapped later when we find the buffer_head marked 3264 * unmapped later when we find the buffer_head marked
3235 * new. 3265 * new.
3236 */ 3266 */
3237 if (allocated > max_blocks) { 3267 if (allocated > map->m_len) {
3238 unmap_underlying_metadata_blocks(inode->i_sb->s_bdev, 3268 unmap_underlying_metadata_blocks(inode->i_sb->s_bdev,
3239 newblock + max_blocks, 3269 newblock + map->m_len,
3240 allocated - max_blocks); 3270 allocated - map->m_len);
3241 allocated = max_blocks; 3271 allocated = map->m_len;
3242 } 3272 }
3243 3273
3244 /* 3274 /*
@@ -3252,13 +3282,13 @@ out:
3252 ext4_da_update_reserve_space(inode, allocated, 0); 3282 ext4_da_update_reserve_space(inode, allocated, 0);
3253 3283
3254map_out: 3284map_out:
3255 set_buffer_mapped(bh_result); 3285 map->m_flags |= EXT4_MAP_MAPPED;
3256out1: 3286out1:
3257 if (allocated > max_blocks) 3287 if (allocated > map->m_len)
3258 allocated = max_blocks; 3288 allocated = map->m_len;
3259 ext4_ext_show_leaf(inode, path); 3289 ext4_ext_show_leaf(inode, path);
3260 bh_result->b_bdev = inode->i_sb->s_bdev; 3290 map->m_pblk = newblock;
3261 bh_result->b_blocknr = newblock; 3291 map->m_len = allocated;
3262out2: 3292out2:
3263 if (path) { 3293 if (path) {
3264 ext4_ext_drop_refs(path); 3294 ext4_ext_drop_refs(path);
@@ -3284,26 +3314,23 @@ out2:
3284 * 3314 *
3285 * return < 0, error case. 3315 * return < 0, error case.
3286 */ 3316 */
3287int ext4_ext_get_blocks(handle_t *handle, struct inode *inode, 3317int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
3288 ext4_lblk_t iblock, 3318 struct ext4_map_blocks *map, int flags)
3289 unsigned int max_blocks, struct buffer_head *bh_result,
3290 int flags)
3291{ 3319{
3292 struct ext4_ext_path *path = NULL; 3320 struct ext4_ext_path *path = NULL;
3293 struct ext4_extent_header *eh; 3321 struct ext4_extent_header *eh;
3294 struct ext4_extent newex, *ex, *last_ex; 3322 struct ext4_extent newex, *ex, *last_ex;
3295 ext4_fsblk_t newblock; 3323 ext4_fsblk_t newblock;
3296 int err = 0, depth, ret, cache_type; 3324 int i, err = 0, depth, ret, cache_type;
3297 unsigned int allocated = 0; 3325 unsigned int allocated = 0;
3298 struct ext4_allocation_request ar; 3326 struct ext4_allocation_request ar;
3299 ext4_io_end_t *io = EXT4_I(inode)->cur_aio_dio; 3327 ext4_io_end_t *io = EXT4_I(inode)->cur_aio_dio;
3300 3328
3301 __clear_bit(BH_New, &bh_result->b_state);
3302 ext_debug("blocks %u/%u requested for inode %lu\n", 3329 ext_debug("blocks %u/%u requested for inode %lu\n",
3303 iblock, max_blocks, inode->i_ino); 3330 map->m_lblk, map->m_len, inode->i_ino);
3304 3331
3305 /* check in cache */ 3332 /* check in cache */
3306 cache_type = ext4_ext_in_cache(inode, iblock, &newex); 3333 cache_type = ext4_ext_in_cache(inode, map->m_lblk, &newex);
3307 if (cache_type) { 3334 if (cache_type) {
3308 if (cache_type == EXT4_EXT_CACHE_GAP) { 3335 if (cache_type == EXT4_EXT_CACHE_GAP) {
3309 if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) { 3336 if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) {
@@ -3316,12 +3343,12 @@ int ext4_ext_get_blocks(handle_t *handle, struct inode *inode,
3316 /* we should allocate requested block */ 3343 /* we should allocate requested block */
3317 } else if (cache_type == EXT4_EXT_CACHE_EXTENT) { 3344 } else if (cache_type == EXT4_EXT_CACHE_EXTENT) {
3318 /* block is already allocated */ 3345 /* block is already allocated */
3319 newblock = iblock 3346 newblock = map->m_lblk
3320 - le32_to_cpu(newex.ee_block) 3347 - le32_to_cpu(newex.ee_block)
3321 + ext_pblock(&newex); 3348 + ext_pblock(&newex);
3322 /* number of remaining blocks in the extent */ 3349 /* number of remaining blocks in the extent */
3323 allocated = ext4_ext_get_actual_len(&newex) - 3350 allocated = ext4_ext_get_actual_len(&newex) -
3324 (iblock - le32_to_cpu(newex.ee_block)); 3351 (map->m_lblk - le32_to_cpu(newex.ee_block));
3325 goto out; 3352 goto out;
3326 } else { 3353 } else {
3327 BUG(); 3354 BUG();
@@ -3329,7 +3356,7 @@ int ext4_ext_get_blocks(handle_t *handle, struct inode *inode,
3329 } 3356 }
3330 3357
3331 /* find extent for this block */ 3358 /* find extent for this block */
3332 path = ext4_ext_find_extent(inode, iblock, NULL); 3359 path = ext4_ext_find_extent(inode, map->m_lblk, NULL);
3333 if (IS_ERR(path)) { 3360 if (IS_ERR(path)) {
3334 err = PTR_ERR(path); 3361 err = PTR_ERR(path);
3335 path = NULL; 3362 path = NULL;
@@ -3345,8 +3372,9 @@ int ext4_ext_get_blocks(handle_t *handle, struct inode *inode,
3345 */ 3372 */
3346 if (unlikely(path[depth].p_ext == NULL && depth != 0)) { 3373 if (unlikely(path[depth].p_ext == NULL && depth != 0)) {
3347 EXT4_ERROR_INODE(inode, "bad extent address " 3374 EXT4_ERROR_INODE(inode, "bad extent address "
3348 "iblock: %d, depth: %d pblock %lld", 3375 "lblock: %lu, depth: %d pblock %lld",
3349 iblock, depth, path[depth].p_block); 3376 (unsigned long) map->m_lblk, depth,
3377 path[depth].p_block);
3350 err = -EIO; 3378 err = -EIO;
3351 goto out2; 3379 goto out2;
3352 } 3380 }
@@ -3364,12 +3392,12 @@ int ext4_ext_get_blocks(handle_t *handle, struct inode *inode,
3364 */ 3392 */
3365 ee_len = ext4_ext_get_actual_len(ex); 3393 ee_len = ext4_ext_get_actual_len(ex);
3366 /* if found extent covers block, simply return it */ 3394 /* if found extent covers block, simply return it */
3367 if (in_range(iblock, ee_block, ee_len)) { 3395 if (in_range(map->m_lblk, ee_block, ee_len)) {
3368 newblock = iblock - ee_block + ee_start; 3396 newblock = map->m_lblk - ee_block + ee_start;
3369 /* number of remaining blocks in the extent */ 3397 /* number of remaining blocks in the extent */
3370 allocated = ee_len - (iblock - ee_block); 3398 allocated = ee_len - (map->m_lblk - ee_block);
3371 ext_debug("%u fit into %u:%d -> %llu\n", iblock, 3399 ext_debug("%u fit into %u:%d -> %llu\n", map->m_lblk,
3372 ee_block, ee_len, newblock); 3400 ee_block, ee_len, newblock);
3373 3401
3374 /* Do not put uninitialized extent in the cache */ 3402 /* Do not put uninitialized extent in the cache */
3375 if (!ext4_ext_is_uninitialized(ex)) { 3403 if (!ext4_ext_is_uninitialized(ex)) {
@@ -3379,8 +3407,8 @@ int ext4_ext_get_blocks(handle_t *handle, struct inode *inode,
3379 goto out; 3407 goto out;
3380 } 3408 }
3381 ret = ext4_ext_handle_uninitialized_extents(handle, 3409 ret = ext4_ext_handle_uninitialized_extents(handle,
3382 inode, iblock, max_blocks, path, 3410 inode, map, path, flags, allocated,
3383 flags, allocated, bh_result, newblock); 3411 newblock);
3384 return ret; 3412 return ret;
3385 } 3413 }
3386 } 3414 }
@@ -3394,7 +3422,7 @@ int ext4_ext_get_blocks(handle_t *handle, struct inode *inode,
3394 * put just found gap into cache to speed up 3422 * put just found gap into cache to speed up
3395 * subsequent requests 3423 * subsequent requests
3396 */ 3424 */
3397 ext4_ext_put_gap_in_cache(inode, path, iblock); 3425 ext4_ext_put_gap_in_cache(inode, path, map->m_lblk);
3398 goto out2; 3426 goto out2;
3399 } 3427 }
3400 /* 3428 /*
@@ -3402,11 +3430,11 @@ int ext4_ext_get_blocks(handle_t *handle, struct inode *inode,
3402 */ 3430 */
3403 3431
3404 /* find neighbour allocated blocks */ 3432 /* find neighbour allocated blocks */
3405 ar.lleft = iblock; 3433 ar.lleft = map->m_lblk;
3406 err = ext4_ext_search_left(inode, path, &ar.lleft, &ar.pleft); 3434 err = ext4_ext_search_left(inode, path, &ar.lleft, &ar.pleft);
3407 if (err) 3435 if (err)
3408 goto out2; 3436 goto out2;
3409 ar.lright = iblock; 3437 ar.lright = map->m_lblk;
3410 err = ext4_ext_search_right(inode, path, &ar.lright, &ar.pright); 3438 err = ext4_ext_search_right(inode, path, &ar.lright, &ar.pright);
3411 if (err) 3439 if (err)
3412 goto out2; 3440 goto out2;
@@ -3417,26 +3445,26 @@ int ext4_ext_get_blocks(handle_t *handle, struct inode *inode,
3417 * EXT_INIT_MAX_LEN and for an uninitialized extent this limit is 3445 * EXT_INIT_MAX_LEN and for an uninitialized extent this limit is
3418 * EXT_UNINIT_MAX_LEN. 3446 * EXT_UNINIT_MAX_LEN.
3419 */ 3447 */
3420 if (max_blocks > EXT_INIT_MAX_LEN && 3448 if (map->m_len > EXT_INIT_MAX_LEN &&
3421 !(flags & EXT4_GET_BLOCKS_UNINIT_EXT)) 3449 !(flags & EXT4_GET_BLOCKS_UNINIT_EXT))
3422 max_blocks = EXT_INIT_MAX_LEN; 3450 map->m_len = EXT_INIT_MAX_LEN;
3423 else if (max_blocks > EXT_UNINIT_MAX_LEN && 3451 else if (map->m_len > EXT_UNINIT_MAX_LEN &&
3424 (flags & EXT4_GET_BLOCKS_UNINIT_EXT)) 3452 (flags & EXT4_GET_BLOCKS_UNINIT_EXT))
3425 max_blocks = EXT_UNINIT_MAX_LEN; 3453 map->m_len = EXT_UNINIT_MAX_LEN;
3426 3454
3427 /* Check if we can really insert (iblock)::(iblock+max_blocks) extent */ 3455 /* Check if we can really insert (m_lblk)::(m_lblk + m_len) extent */
3428 newex.ee_block = cpu_to_le32(iblock); 3456 newex.ee_block = cpu_to_le32(map->m_lblk);
3429 newex.ee_len = cpu_to_le16(max_blocks); 3457 newex.ee_len = cpu_to_le16(map->m_len);
3430 err = ext4_ext_check_overlap(inode, &newex, path); 3458 err = ext4_ext_check_overlap(inode, &newex, path);
3431 if (err) 3459 if (err)
3432 allocated = ext4_ext_get_actual_len(&newex); 3460 allocated = ext4_ext_get_actual_len(&newex);
3433 else 3461 else
3434 allocated = max_blocks; 3462 allocated = map->m_len;
3435 3463
3436 /* allocate new block */ 3464 /* allocate new block */
3437 ar.inode = inode; 3465 ar.inode = inode;
3438 ar.goal = ext4_ext_find_goal(inode, path, iblock); 3466 ar.goal = ext4_ext_find_goal(inode, path, map->m_lblk);
3439 ar.logical = iblock; 3467 ar.logical = map->m_lblk;
3440 ar.len = allocated; 3468 ar.len = allocated;
3441 if (S_ISREG(inode->i_mode)) 3469 if (S_ISREG(inode->i_mode))
3442 ar.flags = EXT4_MB_HINT_DATA; 3470 ar.flags = EXT4_MB_HINT_DATA;
@@ -3470,21 +3498,33 @@ int ext4_ext_get_blocks(handle_t *handle, struct inode *inode,
3470 EXT4_STATE_DIO_UNWRITTEN); 3498 EXT4_STATE_DIO_UNWRITTEN);
3471 } 3499 }
3472 if (ext4_should_dioread_nolock(inode)) 3500 if (ext4_should_dioread_nolock(inode))
3473 set_buffer_uninit(bh_result); 3501 map->m_flags |= EXT4_MAP_UNINIT;
3474 } 3502 }
3475 3503
3476 if (unlikely(EXT4_I(inode)->i_flags & EXT4_EOFBLOCKS_FL)) { 3504 if (unlikely(ext4_test_inode_flag(inode, EXT4_INODE_EOFBLOCKS))) {
3477 if (unlikely(!eh->eh_entries)) { 3505 if (unlikely(!eh->eh_entries)) {
3478 EXT4_ERROR_INODE(inode, 3506 EXT4_ERROR_INODE(inode,
3479 "eh->eh_entries == 0 ee_block %d", 3507 "eh->eh_entries == 0 and "
3480 ex->ee_block); 3508 "EOFBLOCKS_FL set");
3481 err = -EIO; 3509 err = -EIO;
3482 goto out2; 3510 goto out2;
3483 } 3511 }
3484 last_ex = EXT_LAST_EXTENT(eh); 3512 last_ex = EXT_LAST_EXTENT(eh);
3485 if (iblock + ar.len > le32_to_cpu(last_ex->ee_block) 3513 /*
3486 + ext4_ext_get_actual_len(last_ex)) 3514 * If the current leaf block was reached by looking at
3487 EXT4_I(inode)->i_flags &= ~EXT4_EOFBLOCKS_FL; 3515 * the last index block all the way down the tree, and
3516 * we are extending the inode beyond the last extent
3517 * in the current leaf block, then clear the
3518 * EOFBLOCKS_FL flag.
3519 */
3520 for (i = depth-1; i >= 0; i--) {
3521 if (path[i].p_idx != EXT_LAST_INDEX(path[i].p_hdr))
3522 break;
3523 }
3524 if ((i < 0) &&
3525 (map->m_lblk + ar.len > le32_to_cpu(last_ex->ee_block) +
3526 ext4_ext_get_actual_len(last_ex)))
3527 ext4_clear_inode_flag(inode, EXT4_INODE_EOFBLOCKS);
3488 } 3528 }
3489 err = ext4_ext_insert_extent(handle, inode, path, &newex, flags); 3529 err = ext4_ext_insert_extent(handle, inode, path, &newex, flags);
3490 if (err) { 3530 if (err) {
@@ -3500,9 +3540,9 @@ int ext4_ext_get_blocks(handle_t *handle, struct inode *inode,
3500 /* previous routine could use block we allocated */ 3540 /* previous routine could use block we allocated */
3501 newblock = ext_pblock(&newex); 3541 newblock = ext_pblock(&newex);
3502 allocated = ext4_ext_get_actual_len(&newex); 3542 allocated = ext4_ext_get_actual_len(&newex);
3503 if (allocated > max_blocks) 3543 if (allocated > map->m_len)
3504 allocated = max_blocks; 3544 allocated = map->m_len;
3505 set_buffer_new(bh_result); 3545 map->m_flags |= EXT4_MAP_NEW;
3506 3546
3507 /* 3547 /*
3508 * Update reserved blocks/metadata blocks after successful 3548 * Update reserved blocks/metadata blocks after successful
@@ -3516,18 +3556,18 @@ int ext4_ext_get_blocks(handle_t *handle, struct inode *inode,
3516 * when it is _not_ an uninitialized extent. 3556 * when it is _not_ an uninitialized extent.
3517 */ 3557 */
3518 if ((flags & EXT4_GET_BLOCKS_UNINIT_EXT) == 0) { 3558 if ((flags & EXT4_GET_BLOCKS_UNINIT_EXT) == 0) {
3519 ext4_ext_put_in_cache(inode, iblock, allocated, newblock, 3559 ext4_ext_put_in_cache(inode, map->m_lblk, allocated, newblock,
3520 EXT4_EXT_CACHE_EXTENT); 3560 EXT4_EXT_CACHE_EXTENT);
3521 ext4_update_inode_fsync_trans(handle, inode, 1); 3561 ext4_update_inode_fsync_trans(handle, inode, 1);
3522 } else 3562 } else
3523 ext4_update_inode_fsync_trans(handle, inode, 0); 3563 ext4_update_inode_fsync_trans(handle, inode, 0);
3524out: 3564out:
3525 if (allocated > max_blocks) 3565 if (allocated > map->m_len)
3526 allocated = max_blocks; 3566 allocated = map->m_len;
3527 ext4_ext_show_leaf(inode, path); 3567 ext4_ext_show_leaf(inode, path);
3528 set_buffer_mapped(bh_result); 3568 map->m_flags |= EXT4_MAP_MAPPED;
3529 bh_result->b_bdev = inode->i_sb->s_bdev; 3569 map->m_pblk = newblock;
3530 bh_result->b_blocknr = newblock; 3570 map->m_len = allocated;
3531out2: 3571out2:
3532 if (path) { 3572 if (path) {
3533 ext4_ext_drop_refs(path); 3573 ext4_ext_drop_refs(path);
@@ -3625,7 +3665,7 @@ static void ext4_falloc_update_inode(struct inode *inode,
3625 * can proceed even if the new size is the same as i_size. 3665 * can proceed even if the new size is the same as i_size.
3626 */ 3666 */
3627 if (new_size > i_size_read(inode)) 3667 if (new_size > i_size_read(inode))
3628 EXT4_I(inode)->i_flags |= EXT4_EOFBLOCKS_FL; 3668 ext4_set_inode_flag(inode, EXT4_INODE_EOFBLOCKS);
3629 } 3669 }
3630 3670
3631} 3671}
@@ -3640,55 +3680,57 @@ static void ext4_falloc_update_inode(struct inode *inode,
3640long ext4_fallocate(struct inode *inode, int mode, loff_t offset, loff_t len) 3680long ext4_fallocate(struct inode *inode, int mode, loff_t offset, loff_t len)
3641{ 3681{
3642 handle_t *handle; 3682 handle_t *handle;
3643 ext4_lblk_t block;
3644 loff_t new_size; 3683 loff_t new_size;
3645 unsigned int max_blocks; 3684 unsigned int max_blocks;
3646 int ret = 0; 3685 int ret = 0;
3647 int ret2 = 0; 3686 int ret2 = 0;
3648 int retries = 0; 3687 int retries = 0;
3649 struct buffer_head map_bh; 3688 struct ext4_map_blocks map;
3650 unsigned int credits, blkbits = inode->i_blkbits; 3689 unsigned int credits, blkbits = inode->i_blkbits;
3651 3690
3652 /* 3691 /*
3653 * currently supporting (pre)allocate mode for extent-based 3692 * currently supporting (pre)allocate mode for extent-based
3654 * files _only_ 3693 * files _only_
3655 */ 3694 */
3656 if (!(EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL)) 3695 if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
3657 return -EOPNOTSUPP; 3696 return -EOPNOTSUPP;
3658 3697
3659 /* preallocation to directories is currently not supported */ 3698 /* preallocation to directories is currently not supported */
3660 if (S_ISDIR(inode->i_mode)) 3699 if (S_ISDIR(inode->i_mode))
3661 return -ENODEV; 3700 return -ENODEV;
3662 3701
3663 block = offset >> blkbits; 3702 map.m_lblk = offset >> blkbits;
3664 /* 3703 /*
3665 * We can't just convert len to max_blocks because 3704 * We can't just convert len to max_blocks because
3666 * If blocksize = 4096 offset = 3072 and len = 2048 3705 * If blocksize = 4096 offset = 3072 and len = 2048
3667 */ 3706 */
3668 max_blocks = (EXT4_BLOCK_ALIGN(len + offset, blkbits) >> blkbits) 3707 max_blocks = (EXT4_BLOCK_ALIGN(len + offset, blkbits) >> blkbits)
3669 - block; 3708 - map.m_lblk;
3670 /* 3709 /*
3671 * credits to insert 1 extent into extent tree 3710 * credits to insert 1 extent into extent tree
3672 */ 3711 */
3673 credits = ext4_chunk_trans_blocks(inode, max_blocks); 3712 credits = ext4_chunk_trans_blocks(inode, max_blocks);
3674 mutex_lock(&inode->i_mutex); 3713 mutex_lock(&inode->i_mutex);
3714 ret = inode_newsize_ok(inode, (len + offset));
3715 if (ret) {
3716 mutex_unlock(&inode->i_mutex);
3717 return ret;
3718 }
3675retry: 3719retry:
3676 while (ret >= 0 && ret < max_blocks) { 3720 while (ret >= 0 && ret < max_blocks) {
3677 block = block + ret; 3721 map.m_lblk = map.m_lblk + ret;
3678 max_blocks = max_blocks - ret; 3722 map.m_len = max_blocks = max_blocks - ret;
3679 handle = ext4_journal_start(inode, credits); 3723 handle = ext4_journal_start(inode, credits);
3680 if (IS_ERR(handle)) { 3724 if (IS_ERR(handle)) {
3681 ret = PTR_ERR(handle); 3725 ret = PTR_ERR(handle);
3682 break; 3726 break;
3683 } 3727 }
3684 map_bh.b_state = 0; 3728 ret = ext4_map_blocks(handle, inode, &map,
3685 ret = ext4_get_blocks(handle, inode, block,
3686 max_blocks, &map_bh,
3687 EXT4_GET_BLOCKS_CREATE_UNINIT_EXT); 3729 EXT4_GET_BLOCKS_CREATE_UNINIT_EXT);
3688 if (ret <= 0) { 3730 if (ret <= 0) {
3689#ifdef EXT4FS_DEBUG 3731#ifdef EXT4FS_DEBUG
3690 WARN_ON(ret <= 0); 3732 WARN_ON(ret <= 0);
3691 printk(KERN_ERR "%s: ext4_ext_get_blocks " 3733 printk(KERN_ERR "%s: ext4_ext_map_blocks "
3692 "returned error inode#%lu, block=%u, " 3734 "returned error inode#%lu, block=%u, "
3693 "max_blocks=%u", __func__, 3735 "max_blocks=%u", __func__,
3694 inode->i_ino, block, max_blocks); 3736 inode->i_ino, block, max_blocks);
@@ -3697,14 +3739,14 @@ retry:
3697 ret2 = ext4_journal_stop(handle); 3739 ret2 = ext4_journal_stop(handle);
3698 break; 3740 break;
3699 } 3741 }
3700 if ((block + ret) >= (EXT4_BLOCK_ALIGN(offset + len, 3742 if ((map.m_lblk + ret) >= (EXT4_BLOCK_ALIGN(offset + len,
3701 blkbits) >> blkbits)) 3743 blkbits) >> blkbits))
3702 new_size = offset + len; 3744 new_size = offset + len;
3703 else 3745 else
3704 new_size = (block + ret) << blkbits; 3746 new_size = (map.m_lblk + ret) << blkbits;
3705 3747
3706 ext4_falloc_update_inode(inode, mode, new_size, 3748 ext4_falloc_update_inode(inode, mode, new_size,
3707 buffer_new(&map_bh)); 3749 (map.m_flags & EXT4_MAP_NEW));
3708 ext4_mark_inode_dirty(handle, inode); 3750 ext4_mark_inode_dirty(handle, inode);
3709 ret2 = ext4_journal_stop(handle); 3751 ret2 = ext4_journal_stop(handle);
3710 if (ret2) 3752 if (ret2)
@@ -3733,42 +3775,39 @@ int ext4_convert_unwritten_extents(struct inode *inode, loff_t offset,
3733 ssize_t len) 3775 ssize_t len)
3734{ 3776{
3735 handle_t *handle; 3777 handle_t *handle;
3736 ext4_lblk_t block;
3737 unsigned int max_blocks; 3778 unsigned int max_blocks;
3738 int ret = 0; 3779 int ret = 0;
3739 int ret2 = 0; 3780 int ret2 = 0;
3740 struct buffer_head map_bh; 3781 struct ext4_map_blocks map;
3741 unsigned int credits, blkbits = inode->i_blkbits; 3782 unsigned int credits, blkbits = inode->i_blkbits;
3742 3783
3743 block = offset >> blkbits; 3784 map.m_lblk = offset >> blkbits;
3744 /* 3785 /*
3745 * We can't just convert len to max_blocks because 3786 * We can't just convert len to max_blocks because
3746 * If blocksize = 4096 offset = 3072 and len = 2048 3787 * If blocksize = 4096 offset = 3072 and len = 2048
3747 */ 3788 */
3748 max_blocks = (EXT4_BLOCK_ALIGN(len + offset, blkbits) >> blkbits) 3789 max_blocks = ((EXT4_BLOCK_ALIGN(len + offset, blkbits) >> blkbits) -
3749 - block; 3790 map.m_lblk);
3750 /* 3791 /*
3751 * credits to insert 1 extent into extent tree 3792 * credits to insert 1 extent into extent tree
3752 */ 3793 */
3753 credits = ext4_chunk_trans_blocks(inode, max_blocks); 3794 credits = ext4_chunk_trans_blocks(inode, max_blocks);
3754 while (ret >= 0 && ret < max_blocks) { 3795 while (ret >= 0 && ret < max_blocks) {
3755 block = block + ret; 3796 map.m_lblk += ret;
3756 max_blocks = max_blocks - ret; 3797 map.m_len = (max_blocks -= ret);
3757 handle = ext4_journal_start(inode, credits); 3798 handle = ext4_journal_start(inode, credits);
3758 if (IS_ERR(handle)) { 3799 if (IS_ERR(handle)) {
3759 ret = PTR_ERR(handle); 3800 ret = PTR_ERR(handle);
3760 break; 3801 break;
3761 } 3802 }
3762 map_bh.b_state = 0; 3803 ret = ext4_map_blocks(handle, inode, &map,
3763 ret = ext4_get_blocks(handle, inode, block,
3764 max_blocks, &map_bh,
3765 EXT4_GET_BLOCKS_IO_CONVERT_EXT); 3804 EXT4_GET_BLOCKS_IO_CONVERT_EXT);
3766 if (ret <= 0) { 3805 if (ret <= 0) {
3767 WARN_ON(ret <= 0); 3806 WARN_ON(ret <= 0);
3768 printk(KERN_ERR "%s: ext4_ext_get_blocks " 3807 printk(KERN_ERR "%s: ext4_ext_map_blocks "
3769 "returned error inode#%lu, block=%u, " 3808 "returned error inode#%lu, block=%u, "
3770 "max_blocks=%u", __func__, 3809 "max_blocks=%u", __func__,
3771 inode->i_ino, block, max_blocks); 3810 inode->i_ino, map.m_lblk, map.m_len);
3772 } 3811 }
3773 ext4_mark_inode_dirty(handle, inode); 3812 ext4_mark_inode_dirty(handle, inode);
3774 ret2 = ext4_journal_stop(handle); 3813 ret2 = ext4_journal_stop(handle);
@@ -3898,7 +3937,7 @@ int ext4_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
3898 int error = 0; 3937 int error = 0;
3899 3938
3900 /* fallback to generic here if not in extents fmt */ 3939 /* fallback to generic here if not in extents fmt */
3901 if (!(EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL)) 3940 if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
3902 return generic_block_fiemap(inode, fieinfo, start, len, 3941 return generic_block_fiemap(inode, fieinfo, start, len,
3903 ext4_get_block); 3942 ext4_get_block);
3904 3943
diff --git a/fs/ext4/file.c b/fs/ext4/file.c
index d0776e410f34..5313ae4cda2d 100644
--- a/fs/ext4/file.c
+++ b/fs/ext4/file.c
@@ -66,7 +66,7 @@ ext4_file_write(struct kiocb *iocb, const struct iovec *iov,
66 * is smaller than s_maxbytes, which is for extent-mapped files. 66 * is smaller than s_maxbytes, which is for extent-mapped files.
67 */ 67 */
68 68
69 if (!(EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL)) { 69 if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) {
70 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 70 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
71 size_t length = iov_length(iov, nr_segs); 71 size_t length = iov_length(iov, nr_segs);
72 72
diff --git a/fs/ext4/fsync.c b/fs/ext4/fsync.c
index ef3d980e67cb..592adf2e546e 100644
--- a/fs/ext4/fsync.c
+++ b/fs/ext4/fsync.c
@@ -35,6 +35,29 @@
35#include <trace/events/ext4.h> 35#include <trace/events/ext4.h>
36 36
37/* 37/*
38 * If we're not journaling and this is a just-created file, we have to
39 * sync our parent directory (if it was freshly created) since
40 * otherwise it will only be written by writeback, leaving a huge
41 * window during which a crash may lose the file. This may apply for
42 * the parent directory's parent as well, and so on recursively, if
43 * they are also freshly created.
44 */
45static void ext4_sync_parent(struct inode *inode)
46{
47 struct dentry *dentry = NULL;
48
49 while (inode && ext4_test_inode_state(inode, EXT4_STATE_NEWENTRY)) {
50 ext4_clear_inode_state(inode, EXT4_STATE_NEWENTRY);
51 dentry = list_entry(inode->i_dentry.next,
52 struct dentry, d_alias);
53 if (!dentry || !dentry->d_parent || !dentry->d_parent->d_inode)
54 break;
55 inode = dentry->d_parent->d_inode;
56 sync_mapping_buffers(inode->i_mapping);
57 }
58}
59
60/*
38 * akpm: A new design for ext4_sync_file(). 61 * akpm: A new design for ext4_sync_file().
39 * 62 *
40 * This is only called from sys_fsync(), sys_fdatasync() and sys_msync(). 63 * This is only called from sys_fsync(), sys_fdatasync() and sys_msync().
@@ -48,9 +71,9 @@
48 * i_mutex lock is held when entering and exiting this function 71 * i_mutex lock is held when entering and exiting this function
49 */ 72 */
50 73
51int ext4_sync_file(struct file *file, struct dentry *dentry, int datasync) 74int ext4_sync_file(struct file *file, int datasync)
52{ 75{
53 struct inode *inode = dentry->d_inode; 76 struct inode *inode = file->f_mapping->host;
54 struct ext4_inode_info *ei = EXT4_I(inode); 77 struct ext4_inode_info *ei = EXT4_I(inode);
55 journal_t *journal = EXT4_SB(inode->i_sb)->s_journal; 78 journal_t *journal = EXT4_SB(inode->i_sb)->s_journal;
56 int ret; 79 int ret;
@@ -58,7 +81,7 @@ int ext4_sync_file(struct file *file, struct dentry *dentry, int datasync)
58 81
59 J_ASSERT(ext4_journal_current_handle() == NULL); 82 J_ASSERT(ext4_journal_current_handle() == NULL);
60 83
61 trace_ext4_sync_file(file, dentry, datasync); 84 trace_ext4_sync_file(file, datasync);
62 85
63 if (inode->i_sb->s_flags & MS_RDONLY) 86 if (inode->i_sb->s_flags & MS_RDONLY)
64 return 0; 87 return 0;
@@ -66,9 +89,13 @@ int ext4_sync_file(struct file *file, struct dentry *dentry, int datasync)
66 ret = flush_completed_IO(inode); 89 ret = flush_completed_IO(inode);
67 if (ret < 0) 90 if (ret < 0)
68 return ret; 91 return ret;
69 92
70 if (!journal) 93 if (!journal) {
71 return simple_fsync(file, dentry, datasync); 94 ret = generic_file_fsync(file, datasync);
95 if (!ret && !list_empty(&inode->i_dentry))
96 ext4_sync_parent(inode);
97 return ret;
98 }
72 99
73 /* 100 /*
74 * data=writeback,ordered: 101 * data=writeback,ordered:
@@ -102,7 +129,7 @@ int ext4_sync_file(struct file *file, struct dentry *dentry, int datasync)
102 (journal->j_flags & JBD2_BARRIER)) 129 (journal->j_flags & JBD2_BARRIER))
103 blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL, 130 blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL,
104 NULL, BLKDEV_IFL_WAIT); 131 NULL, BLKDEV_IFL_WAIT);
105 jbd2_log_wait_commit(journal, commit_tid); 132 ret = jbd2_log_wait_commit(journal, commit_tid);
106 } else if (journal->j_flags & JBD2_BARRIER) 133 } else if (journal->j_flags & JBD2_BARRIER)
107 blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL, NULL, 134 blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL, NULL,
108 BLKDEV_IFL_WAIT); 135 BLKDEV_IFL_WAIT);
diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
index 1a0e183a2f04..25c4b3173fd9 100644
--- a/fs/ext4/ialloc.c
+++ b/fs/ext4/ialloc.c
@@ -240,56 +240,49 @@ void ext4_free_inode(handle_t *handle, struct inode *inode)
240 if (fatal) 240 if (fatal)
241 goto error_return; 241 goto error_return;
242 242
243 /* Ok, now we can actually update the inode bitmaps.. */ 243 fatal = -ESRCH;
244 cleared = ext4_clear_bit_atomic(ext4_group_lock_ptr(sb, block_group), 244 gdp = ext4_get_group_desc(sb, block_group, &bh2);
245 bit, bitmap_bh->b_data); 245 if (gdp) {
246 if (!cleared)
247 ext4_error(sb, "bit already cleared for inode %lu", ino);
248 else {
249 gdp = ext4_get_group_desc(sb, block_group, &bh2);
250
251 BUFFER_TRACE(bh2, "get_write_access"); 246 BUFFER_TRACE(bh2, "get_write_access");
252 fatal = ext4_journal_get_write_access(handle, bh2); 247 fatal = ext4_journal_get_write_access(handle, bh2);
253 if (fatal) goto error_return; 248 }
254 249 ext4_lock_group(sb, block_group);
255 if (gdp) { 250 cleared = ext4_clear_bit(bit, bitmap_bh->b_data);
256 ext4_lock_group(sb, block_group); 251 if (fatal || !cleared) {
257 count = ext4_free_inodes_count(sb, gdp) + 1; 252 ext4_unlock_group(sb, block_group);
258 ext4_free_inodes_set(sb, gdp, count); 253 goto out;
259 if (is_directory) { 254 }
260 count = ext4_used_dirs_count(sb, gdp) - 1;
261 ext4_used_dirs_set(sb, gdp, count);
262 if (sbi->s_log_groups_per_flex) {
263 ext4_group_t f;
264
265 f = ext4_flex_group(sbi, block_group);
266 atomic_dec(&sbi->s_flex_groups[f].used_dirs);
267 }
268 255
269 } 256 count = ext4_free_inodes_count(sb, gdp) + 1;
270 gdp->bg_checksum = ext4_group_desc_csum(sbi, 257 ext4_free_inodes_set(sb, gdp, count);
271 block_group, gdp); 258 if (is_directory) {
272 ext4_unlock_group(sb, block_group); 259 count = ext4_used_dirs_count(sb, gdp) - 1;
273 percpu_counter_inc(&sbi->s_freeinodes_counter); 260 ext4_used_dirs_set(sb, gdp, count);
274 if (is_directory) 261 percpu_counter_dec(&sbi->s_dirs_counter);
275 percpu_counter_dec(&sbi->s_dirs_counter);
276
277 if (sbi->s_log_groups_per_flex) {
278 ext4_group_t f;
279
280 f = ext4_flex_group(sbi, block_group);
281 atomic_inc(&sbi->s_flex_groups[f].free_inodes);
282 }
283 }
284 BUFFER_TRACE(bh2, "call ext4_handle_dirty_metadata");
285 err = ext4_handle_dirty_metadata(handle, NULL, bh2);
286 if (!fatal) fatal = err;
287 } 262 }
288 BUFFER_TRACE(bitmap_bh, "call ext4_handle_dirty_metadata"); 263 gdp->bg_checksum = ext4_group_desc_csum(sbi, block_group, gdp);
289 err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh); 264 ext4_unlock_group(sb, block_group);
290 if (!fatal) 265
291 fatal = err; 266 percpu_counter_inc(&sbi->s_freeinodes_counter);
292 sb->s_dirt = 1; 267 if (sbi->s_log_groups_per_flex) {
268 ext4_group_t f = ext4_flex_group(sbi, block_group);
269
270 atomic_inc(&sbi->s_flex_groups[f].free_inodes);
271 if (is_directory)
272 atomic_dec(&sbi->s_flex_groups[f].used_dirs);
273 }
274 BUFFER_TRACE(bh2, "call ext4_handle_dirty_metadata");
275 fatal = ext4_handle_dirty_metadata(handle, NULL, bh2);
276out:
277 if (cleared) {
278 BUFFER_TRACE(bitmap_bh, "call ext4_handle_dirty_metadata");
279 err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
280 if (!fatal)
281 fatal = err;
282 sb->s_dirt = 1;
283 } else
284 ext4_error(sb, "bit already cleared for inode %lu", ino);
285
293error_return: 286error_return:
294 brelse(bitmap_bh); 287 brelse(bitmap_bh);
295 ext4_std_error(sb, fatal); 288 ext4_std_error(sb, fatal);
@@ -499,7 +492,7 @@ static int find_group_orlov(struct super_block *sb, struct inode *parent,
499 492
500 if (S_ISDIR(mode) && 493 if (S_ISDIR(mode) &&
501 ((parent == sb->s_root->d_inode) || 494 ((parent == sb->s_root->d_inode) ||
502 (EXT4_I(parent)->i_flags & EXT4_TOPDIR_FL))) { 495 (ext4_test_inode_flag(parent, EXT4_INODE_TOPDIR)))) {
503 int best_ndir = inodes_per_group; 496 int best_ndir = inodes_per_group;
504 int ret = -1; 497 int ret = -1;
505 498
@@ -1041,7 +1034,7 @@ got:
1041 if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_EXTENTS)) { 1034 if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_EXTENTS)) {
1042 /* set extent flag only for directory, file and normal symlink*/ 1035 /* set extent flag only for directory, file and normal symlink*/
1043 if (S_ISDIR(mode) || S_ISREG(mode) || S_ISLNK(mode)) { 1036 if (S_ISDIR(mode) || S_ISREG(mode) || S_ISLNK(mode)) {
1044 EXT4_I(inode)->i_flags |= EXT4_EXTENTS_FL; 1037 ext4_set_inode_flag(inode, EXT4_INODE_EXTENTS);
1045 ext4_ext_tree_init(handle, inode); 1038 ext4_ext_tree_init(handle, inode);
1046 } 1039 }
1047 } 1040 }
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 3e0f6af9d08d..19df61c321fd 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -149,7 +149,7 @@ int ext4_truncate_restart_trans(handle_t *handle, struct inode *inode,
149 int ret; 149 int ret;
150 150
151 /* 151 /*
152 * Drop i_data_sem to avoid deadlock with ext4_get_blocks At this 152 * Drop i_data_sem to avoid deadlock with ext4_map_blocks. At this
153 * moment, get_block can be called only for blocks inside i_size since 153 * moment, get_block can be called only for blocks inside i_size since
154 * page cache has been already dropped and writes are blocked by 154 * page cache has been already dropped and writes are blocked by
155 * i_mutex. So we can safely drop the i_data_sem here. 155 * i_mutex. So we can safely drop the i_data_sem here.
@@ -348,9 +348,8 @@ static int __ext4_check_blockref(const char *function, struct inode *inode,
348 if (blk && 348 if (blk &&
349 unlikely(!ext4_data_block_valid(EXT4_SB(inode->i_sb), 349 unlikely(!ext4_data_block_valid(EXT4_SB(inode->i_sb),
350 blk, 1))) { 350 blk, 1))) {
351 __ext4_error(inode->i_sb, function, 351 ext4_error_inode(function, inode,
352 "invalid block reference %u " 352 "invalid block reference %u", blk);
353 "in inode #%lu", blk, inode->i_ino);
354 return -EIO; 353 return -EIO;
355 } 354 }
356 } 355 }
@@ -785,7 +784,7 @@ failed:
785 /* Allocation failed, free what we already allocated */ 784 /* Allocation failed, free what we already allocated */
786 ext4_free_blocks(handle, inode, 0, new_blocks[0], 1, 0); 785 ext4_free_blocks(handle, inode, 0, new_blocks[0], 1, 0);
787 for (i = 1; i <= n ; i++) { 786 for (i = 1; i <= n ; i++) {
788 /* 787 /*
789 * branch[i].bh is newly allocated, so there is no 788 * branch[i].bh is newly allocated, so there is no
790 * need to revoke the block, which is why we don't 789 * need to revoke the block, which is why we don't
791 * need to set EXT4_FREE_BLOCKS_METADATA. 790 * need to set EXT4_FREE_BLOCKS_METADATA.
@@ -875,7 +874,7 @@ static int ext4_splice_branch(handle_t *handle, struct inode *inode,
875 874
876err_out: 875err_out:
877 for (i = 1; i <= num; i++) { 876 for (i = 1; i <= num; i++) {
878 /* 877 /*
879 * branch[i].bh is newly allocated, so there is no 878 * branch[i].bh is newly allocated, so there is no
880 * need to revoke the block, which is why we don't 879 * need to revoke the block, which is why we don't
881 * need to set EXT4_FREE_BLOCKS_METADATA. 880 * need to set EXT4_FREE_BLOCKS_METADATA.
@@ -890,9 +889,9 @@ err_out:
890} 889}
891 890
892/* 891/*
893 * The ext4_ind_get_blocks() function handles non-extents inodes 892 * The ext4_ind_map_blocks() function handles non-extents inodes
894 * (i.e., using the traditional indirect/double-indirect i_blocks 893 * (i.e., using the traditional indirect/double-indirect i_blocks
895 * scheme) for ext4_get_blocks(). 894 * scheme) for ext4_map_blocks().
896 * 895 *
897 * Allocation strategy is simple: if we have to allocate something, we will 896 * Allocation strategy is simple: if we have to allocate something, we will
898 * have to go the whole way to leaf. So let's do it before attaching anything 897 * have to go the whole way to leaf. So let's do it before attaching anything
@@ -917,9 +916,8 @@ err_out:
917 * down_read(&EXT4_I(inode)->i_data_sem) if not allocating file system 916 * down_read(&EXT4_I(inode)->i_data_sem) if not allocating file system
918 * blocks. 917 * blocks.
919 */ 918 */
920static int ext4_ind_get_blocks(handle_t *handle, struct inode *inode, 919static int ext4_ind_map_blocks(handle_t *handle, struct inode *inode,
921 ext4_lblk_t iblock, unsigned int maxblocks, 920 struct ext4_map_blocks *map,
922 struct buffer_head *bh_result,
923 int flags) 921 int flags)
924{ 922{
925 int err = -EIO; 923 int err = -EIO;
@@ -933,9 +931,9 @@ static int ext4_ind_get_blocks(handle_t *handle, struct inode *inode,
933 int count = 0; 931 int count = 0;
934 ext4_fsblk_t first_block = 0; 932 ext4_fsblk_t first_block = 0;
935 933
936 J_ASSERT(!(EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL)); 934 J_ASSERT(!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)));
937 J_ASSERT(handle != NULL || (flags & EXT4_GET_BLOCKS_CREATE) == 0); 935 J_ASSERT(handle != NULL || (flags & EXT4_GET_BLOCKS_CREATE) == 0);
938 depth = ext4_block_to_path(inode, iblock, offsets, 936 depth = ext4_block_to_path(inode, map->m_lblk, offsets,
939 &blocks_to_boundary); 937 &blocks_to_boundary);
940 938
941 if (depth == 0) 939 if (depth == 0)
@@ -946,10 +944,9 @@ static int ext4_ind_get_blocks(handle_t *handle, struct inode *inode,
946 /* Simplest case - block found, no allocation needed */ 944 /* Simplest case - block found, no allocation needed */
947 if (!partial) { 945 if (!partial) {
948 first_block = le32_to_cpu(chain[depth - 1].key); 946 first_block = le32_to_cpu(chain[depth - 1].key);
949 clear_buffer_new(bh_result);
950 count++; 947 count++;
951 /*map more blocks*/ 948 /*map more blocks*/
952 while (count < maxblocks && count <= blocks_to_boundary) { 949 while (count < map->m_len && count <= blocks_to_boundary) {
953 ext4_fsblk_t blk; 950 ext4_fsblk_t blk;
954 951
955 blk = le32_to_cpu(*(chain[depth-1].p + count)); 952 blk = le32_to_cpu(*(chain[depth-1].p + count));
@@ -969,7 +966,7 @@ static int ext4_ind_get_blocks(handle_t *handle, struct inode *inode,
969 /* 966 /*
970 * Okay, we need to do block allocation. 967 * Okay, we need to do block allocation.
971 */ 968 */
972 goal = ext4_find_goal(inode, iblock, partial); 969 goal = ext4_find_goal(inode, map->m_lblk, partial);
973 970
974 /* the number of blocks need to allocate for [d,t]indirect blocks */ 971 /* the number of blocks need to allocate for [d,t]indirect blocks */
975 indirect_blks = (chain + depth) - partial - 1; 972 indirect_blks = (chain + depth) - partial - 1;
@@ -979,11 +976,11 @@ static int ext4_ind_get_blocks(handle_t *handle, struct inode *inode,
979 * direct blocks to allocate for this branch. 976 * direct blocks to allocate for this branch.
980 */ 977 */
981 count = ext4_blks_to_allocate(partial, indirect_blks, 978 count = ext4_blks_to_allocate(partial, indirect_blks,
982 maxblocks, blocks_to_boundary); 979 map->m_len, blocks_to_boundary);
983 /* 980 /*
984 * Block out ext4_truncate while we alter the tree 981 * Block out ext4_truncate while we alter the tree
985 */ 982 */
986 err = ext4_alloc_branch(handle, inode, iblock, indirect_blks, 983 err = ext4_alloc_branch(handle, inode, map->m_lblk, indirect_blks,
987 &count, goal, 984 &count, goal,
988 offsets + (partial - chain), partial); 985 offsets + (partial - chain), partial);
989 986
@@ -995,18 +992,20 @@ static int ext4_ind_get_blocks(handle_t *handle, struct inode *inode,
995 * may need to return -EAGAIN upwards in the worst case. --sct 992 * may need to return -EAGAIN upwards in the worst case. --sct
996 */ 993 */
997 if (!err) 994 if (!err)
998 err = ext4_splice_branch(handle, inode, iblock, 995 err = ext4_splice_branch(handle, inode, map->m_lblk,
999 partial, indirect_blks, count); 996 partial, indirect_blks, count);
1000 if (err) 997 if (err)
1001 goto cleanup; 998 goto cleanup;
1002 999
1003 set_buffer_new(bh_result); 1000 map->m_flags |= EXT4_MAP_NEW;
1004 1001
1005 ext4_update_inode_fsync_trans(handle, inode, 1); 1002 ext4_update_inode_fsync_trans(handle, inode, 1);
1006got_it: 1003got_it:
1007 map_bh(bh_result, inode->i_sb, le32_to_cpu(chain[depth-1].key)); 1004 map->m_flags |= EXT4_MAP_MAPPED;
1005 map->m_pblk = le32_to_cpu(chain[depth-1].key);
1006 map->m_len = count;
1008 if (count > blocks_to_boundary) 1007 if (count > blocks_to_boundary)
1009 set_buffer_boundary(bh_result); 1008 map->m_flags |= EXT4_MAP_BOUNDARY;
1010 err = count; 1009 err = count;
1011 /* Clean up and exit */ 1010 /* Clean up and exit */
1012 partial = chain + depth - 1; /* the whole chain */ 1011 partial = chain + depth - 1; /* the whole chain */
@@ -1016,7 +1015,6 @@ cleanup:
1016 brelse(partial->bh); 1015 brelse(partial->bh);
1017 partial--; 1016 partial--;
1018 } 1017 }
1019 BUFFER_TRACE(bh_result, "returned");
1020out: 1018out:
1021 return err; 1019 return err;
1022} 1020}
@@ -1061,7 +1059,7 @@ static int ext4_indirect_calc_metadata_amount(struct inode *inode,
1061 */ 1059 */
1062static int ext4_calc_metadata_amount(struct inode *inode, sector_t lblock) 1060static int ext4_calc_metadata_amount(struct inode *inode, sector_t lblock)
1063{ 1061{
1064 if (EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL) 1062 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
1065 return ext4_ext_calc_metadata_amount(inode, lblock); 1063 return ext4_ext_calc_metadata_amount(inode, lblock);
1066 1064
1067 return ext4_indirect_calc_metadata_amount(inode, lblock); 1065 return ext4_indirect_calc_metadata_amount(inode, lblock);
@@ -1076,7 +1074,6 @@ void ext4_da_update_reserve_space(struct inode *inode,
1076{ 1074{
1077 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 1075 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
1078 struct ext4_inode_info *ei = EXT4_I(inode); 1076 struct ext4_inode_info *ei = EXT4_I(inode);
1079 int mdb_free = 0, allocated_meta_blocks = 0;
1080 1077
1081 spin_lock(&ei->i_block_reservation_lock); 1078 spin_lock(&ei->i_block_reservation_lock);
1082 trace_ext4_da_update_reserve_space(inode, used); 1079 trace_ext4_da_update_reserve_space(inode, used);
@@ -1091,11 +1088,10 @@ void ext4_da_update_reserve_space(struct inode *inode,
1091 1088
1092 /* Update per-inode reservations */ 1089 /* Update per-inode reservations */
1093 ei->i_reserved_data_blocks -= used; 1090 ei->i_reserved_data_blocks -= used;
1094 used += ei->i_allocated_meta_blocks;
1095 ei->i_reserved_meta_blocks -= ei->i_allocated_meta_blocks; 1091 ei->i_reserved_meta_blocks -= ei->i_allocated_meta_blocks;
1096 allocated_meta_blocks = ei->i_allocated_meta_blocks; 1092 percpu_counter_sub(&sbi->s_dirtyblocks_counter,
1093 used + ei->i_allocated_meta_blocks);
1097 ei->i_allocated_meta_blocks = 0; 1094 ei->i_allocated_meta_blocks = 0;
1098 percpu_counter_sub(&sbi->s_dirtyblocks_counter, used);
1099 1095
1100 if (ei->i_reserved_data_blocks == 0) { 1096 if (ei->i_reserved_data_blocks == 0) {
1101 /* 1097 /*
@@ -1103,30 +1099,23 @@ void ext4_da_update_reserve_space(struct inode *inode,
1103 * only when we have written all of the delayed 1099 * only when we have written all of the delayed
1104 * allocation blocks. 1100 * allocation blocks.
1105 */ 1101 */
1106 mdb_free = ei->i_reserved_meta_blocks; 1102 percpu_counter_sub(&sbi->s_dirtyblocks_counter,
1103 ei->i_reserved_meta_blocks);
1107 ei->i_reserved_meta_blocks = 0; 1104 ei->i_reserved_meta_blocks = 0;
1108 ei->i_da_metadata_calc_len = 0; 1105 ei->i_da_metadata_calc_len = 0;
1109 percpu_counter_sub(&sbi->s_dirtyblocks_counter, mdb_free);
1110 } 1106 }
1111 spin_unlock(&EXT4_I(inode)->i_block_reservation_lock); 1107 spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
1112 1108
1113 /* Update quota subsystem */ 1109 /* Update quota subsystem for data blocks */
1114 if (quota_claim) { 1110 if (quota_claim)
1115 dquot_claim_block(inode, used); 1111 dquot_claim_block(inode, used);
1116 if (mdb_free) 1112 else {
1117 dquot_release_reservation_block(inode, mdb_free);
1118 } else {
1119 /* 1113 /*
1120 * We did fallocate with an offset that is already delayed 1114 * We did fallocate with an offset that is already delayed
1121 * allocated. So on delayed allocated writeback we should 1115 * allocated. So on delayed allocated writeback we should
1122 * not update the quota for allocated blocks. But then 1116 * not re-claim the quota for fallocated blocks.
1123 * converting an fallocate region to initialized region would
1124 * have caused a metadata allocation. So claim quota for
1125 * that
1126 */ 1117 */
1127 if (allocated_meta_blocks) 1118 dquot_release_reservation_block(inode, used);
1128 dquot_claim_block(inode, allocated_meta_blocks);
1129 dquot_release_reservation_block(inode, mdb_free + used);
1130 } 1119 }
1131 1120
1132 /* 1121 /*
@@ -1139,15 +1128,15 @@ void ext4_da_update_reserve_space(struct inode *inode,
1139 ext4_discard_preallocations(inode); 1128 ext4_discard_preallocations(inode);
1140} 1129}
1141 1130
1142static int check_block_validity(struct inode *inode, const char *msg, 1131static int check_block_validity(struct inode *inode, const char *func,
1143 sector_t logical, sector_t phys, int len) 1132 struct ext4_map_blocks *map)
1144{ 1133{
1145 if (!ext4_data_block_valid(EXT4_SB(inode->i_sb), phys, len)) { 1134 if (!ext4_data_block_valid(EXT4_SB(inode->i_sb), map->m_pblk,
1146 __ext4_error(inode->i_sb, msg, 1135 map->m_len)) {
1147 "inode #%lu logical block %llu mapped to %llu " 1136 ext4_error_inode(func, inode,
1148 "(size %d)", inode->i_ino, 1137 "lblock %lu mapped to illegal pblock %llu "
1149 (unsigned long long) logical, 1138 "(length %d)", (unsigned long) map->m_lblk,
1150 (unsigned long long) phys, len); 1139 map->m_pblk, map->m_len);
1151 return -EIO; 1140 return -EIO;
1152 } 1141 }
1153 return 0; 1142 return 0;
@@ -1212,15 +1201,15 @@ static pgoff_t ext4_num_dirty_pages(struct inode *inode, pgoff_t idx,
1212} 1201}
1213 1202
1214/* 1203/*
1215 * The ext4_get_blocks() function tries to look up the requested blocks, 1204 * The ext4_map_blocks() function tries to look up the requested blocks,
1216 * and returns if the blocks are already mapped. 1205 * and returns if the blocks are already mapped.
1217 * 1206 *
1218 * Otherwise it takes the write lock of the i_data_sem and allocate blocks 1207 * Otherwise it takes the write lock of the i_data_sem and allocate blocks
1219 * and store the allocated blocks in the result buffer head and mark it 1208 * and store the allocated blocks in the result buffer head and mark it
1220 * mapped. 1209 * mapped.
1221 * 1210 *
1222 * If file type is extents based, it will call ext4_ext_get_blocks(), 1211 * If file type is extents based, it will call ext4_ext_map_blocks(),
1223 * Otherwise, call with ext4_ind_get_blocks() to handle indirect mapping 1212 * Otherwise, call with ext4_ind_map_blocks() to handle indirect mapping
1224 * based files 1213 * based files
1225 * 1214 *
1226 * On success, it returns the number of blocks being mapped or allocate. 1215 * On success, it returns the number of blocks being mapped or allocate.
@@ -1233,35 +1222,29 @@ static pgoff_t ext4_num_dirty_pages(struct inode *inode, pgoff_t idx,
1233 * 1222 *
1234 * It returns the error in case of allocation failure. 1223 * It returns the error in case of allocation failure.
1235 */ 1224 */
1236int ext4_get_blocks(handle_t *handle, struct inode *inode, sector_t block, 1225int ext4_map_blocks(handle_t *handle, struct inode *inode,
1237 unsigned int max_blocks, struct buffer_head *bh, 1226 struct ext4_map_blocks *map, int flags)
1238 int flags)
1239{ 1227{
1240 int retval; 1228 int retval;
1241 1229
1242 clear_buffer_mapped(bh); 1230 map->m_flags = 0;
1243 clear_buffer_unwritten(bh); 1231 ext_debug("ext4_map_blocks(): inode %lu, flag %d, max_blocks %u,"
1244 1232 "logical block %lu\n", inode->i_ino, flags, map->m_len,
1245 ext_debug("ext4_get_blocks(): inode %lu, flag %d, max_blocks %u," 1233 (unsigned long) map->m_lblk);
1246 "logical block %lu\n", inode->i_ino, flags, max_blocks,
1247 (unsigned long)block);
1248 /* 1234 /*
1249 * Try to see if we can get the block without requesting a new 1235 * Try to see if we can get the block without requesting a new
1250 * file system block. 1236 * file system block.
1251 */ 1237 */
1252 down_read((&EXT4_I(inode)->i_data_sem)); 1238 down_read((&EXT4_I(inode)->i_data_sem));
1253 if (EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL) { 1239 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
1254 retval = ext4_ext_get_blocks(handle, inode, block, max_blocks, 1240 retval = ext4_ext_map_blocks(handle, inode, map, 0);
1255 bh, 0);
1256 } else { 1241 } else {
1257 retval = ext4_ind_get_blocks(handle, inode, block, max_blocks, 1242 retval = ext4_ind_map_blocks(handle, inode, map, 0);
1258 bh, 0);
1259 } 1243 }
1260 up_read((&EXT4_I(inode)->i_data_sem)); 1244 up_read((&EXT4_I(inode)->i_data_sem));
1261 1245
1262 if (retval > 0 && buffer_mapped(bh)) { 1246 if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) {
1263 int ret = check_block_validity(inode, "file system corruption", 1247 int ret = check_block_validity(inode, __func__, map);
1264 block, bh->b_blocknr, retval);
1265 if (ret != 0) 1248 if (ret != 0)
1266 return ret; 1249 return ret;
1267 } 1250 }
@@ -1277,7 +1260,7 @@ int ext4_get_blocks(handle_t *handle, struct inode *inode, sector_t block,
1277 * ext4_ext_get_block() returns th create = 0 1260 * ext4_ext_get_block() returns th create = 0
1278 * with buffer head unmapped. 1261 * with buffer head unmapped.
1279 */ 1262 */
1280 if (retval > 0 && buffer_mapped(bh)) 1263 if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED)
1281 return retval; 1264 return retval;
1282 1265
1283 /* 1266 /*
@@ -1290,7 +1273,7 @@ int ext4_get_blocks(handle_t *handle, struct inode *inode, sector_t block,
1290 * of BH_Unwritten and BH_Mapped flags being simultaneously 1273 * of BH_Unwritten and BH_Mapped flags being simultaneously
1291 * set on the buffer_head. 1274 * set on the buffer_head.
1292 */ 1275 */
1293 clear_buffer_unwritten(bh); 1276 map->m_flags &= ~EXT4_MAP_UNWRITTEN;
1294 1277
1295 /* 1278 /*
1296 * New blocks allocate and/or writing to uninitialized extent 1279 * New blocks allocate and/or writing to uninitialized extent
@@ -1312,14 +1295,12 @@ int ext4_get_blocks(handle_t *handle, struct inode *inode, sector_t block,
1312 * We need to check for EXT4 here because migrate 1295 * We need to check for EXT4 here because migrate
1313 * could have changed the inode type in between 1296 * could have changed the inode type in between
1314 */ 1297 */
1315 if (EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL) { 1298 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
1316 retval = ext4_ext_get_blocks(handle, inode, block, max_blocks, 1299 retval = ext4_ext_map_blocks(handle, inode, map, flags);
1317 bh, flags);
1318 } else { 1300 } else {
1319 retval = ext4_ind_get_blocks(handle, inode, block, 1301 retval = ext4_ind_map_blocks(handle, inode, map, flags);
1320 max_blocks, bh, flags);
1321 1302
1322 if (retval > 0 && buffer_new(bh)) { 1303 if (retval > 0 && map->m_flags & EXT4_MAP_NEW) {
1323 /* 1304 /*
1324 * We allocated new blocks which will result in 1305 * We allocated new blocks which will result in
1325 * i_data's format changing. Force the migrate 1306 * i_data's format changing. Force the migrate
@@ -1342,10 +1323,10 @@ int ext4_get_blocks(handle_t *handle, struct inode *inode, sector_t block,
1342 EXT4_I(inode)->i_delalloc_reserved_flag = 0; 1323 EXT4_I(inode)->i_delalloc_reserved_flag = 0;
1343 1324
1344 up_write((&EXT4_I(inode)->i_data_sem)); 1325 up_write((&EXT4_I(inode)->i_data_sem));
1345 if (retval > 0 && buffer_mapped(bh)) { 1326 if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) {
1346 int ret = check_block_validity(inode, "file system " 1327 int ret = check_block_validity(inode,
1347 "corruption after allocation", 1328 "ext4_map_blocks_after_alloc",
1348 block, bh->b_blocknr, retval); 1329 map);
1349 if (ret != 0) 1330 if (ret != 0)
1350 return ret; 1331 return ret;
1351 } 1332 }
@@ -1355,109 +1336,109 @@ int ext4_get_blocks(handle_t *handle, struct inode *inode, sector_t block,
1355/* Maximum number of blocks we map for direct IO at once. */ 1336/* Maximum number of blocks we map for direct IO at once. */
1356#define DIO_MAX_BLOCKS 4096 1337#define DIO_MAX_BLOCKS 4096
1357 1338
1358int ext4_get_block(struct inode *inode, sector_t iblock, 1339static int _ext4_get_block(struct inode *inode, sector_t iblock,
1359 struct buffer_head *bh_result, int create) 1340 struct buffer_head *bh, int flags)
1360{ 1341{
1361 handle_t *handle = ext4_journal_current_handle(); 1342 handle_t *handle = ext4_journal_current_handle();
1343 struct ext4_map_blocks map;
1362 int ret = 0, started = 0; 1344 int ret = 0, started = 0;
1363 unsigned max_blocks = bh_result->b_size >> inode->i_blkbits;
1364 int dio_credits; 1345 int dio_credits;
1365 1346
1366 if (create && !handle) { 1347 map.m_lblk = iblock;
1348 map.m_len = bh->b_size >> inode->i_blkbits;
1349
1350 if (flags && !handle) {
1367 /* Direct IO write... */ 1351 /* Direct IO write... */
1368 if (max_blocks > DIO_MAX_BLOCKS) 1352 if (map.m_len > DIO_MAX_BLOCKS)
1369 max_blocks = DIO_MAX_BLOCKS; 1353 map.m_len = DIO_MAX_BLOCKS;
1370 dio_credits = ext4_chunk_trans_blocks(inode, max_blocks); 1354 dio_credits = ext4_chunk_trans_blocks(inode, map.m_len);
1371 handle = ext4_journal_start(inode, dio_credits); 1355 handle = ext4_journal_start(inode, dio_credits);
1372 if (IS_ERR(handle)) { 1356 if (IS_ERR(handle)) {
1373 ret = PTR_ERR(handle); 1357 ret = PTR_ERR(handle);
1374 goto out; 1358 return ret;
1375 } 1359 }
1376 started = 1; 1360 started = 1;
1377 } 1361 }
1378 1362
1379 ret = ext4_get_blocks(handle, inode, iblock, max_blocks, bh_result, 1363 ret = ext4_map_blocks(handle, inode, &map, flags);
1380 create ? EXT4_GET_BLOCKS_CREATE : 0);
1381 if (ret > 0) { 1364 if (ret > 0) {
1382 bh_result->b_size = (ret << inode->i_blkbits); 1365 map_bh(bh, inode->i_sb, map.m_pblk);
1366 bh->b_state = (bh->b_state & ~EXT4_MAP_FLAGS) | map.m_flags;
1367 bh->b_size = inode->i_sb->s_blocksize * map.m_len;
1383 ret = 0; 1368 ret = 0;
1384 } 1369 }
1385 if (started) 1370 if (started)
1386 ext4_journal_stop(handle); 1371 ext4_journal_stop(handle);
1387out:
1388 return ret; 1372 return ret;
1389} 1373}
1390 1374
1375int ext4_get_block(struct inode *inode, sector_t iblock,
1376 struct buffer_head *bh, int create)
1377{
1378 return _ext4_get_block(inode, iblock, bh,
1379 create ? EXT4_GET_BLOCKS_CREATE : 0);
1380}
1381
1391/* 1382/*
1392 * `handle' can be NULL if create is zero 1383 * `handle' can be NULL if create is zero
1393 */ 1384 */
1394struct buffer_head *ext4_getblk(handle_t *handle, struct inode *inode, 1385struct buffer_head *ext4_getblk(handle_t *handle, struct inode *inode,
1395 ext4_lblk_t block, int create, int *errp) 1386 ext4_lblk_t block, int create, int *errp)
1396{ 1387{
1397 struct buffer_head dummy; 1388 struct ext4_map_blocks map;
1389 struct buffer_head *bh;
1398 int fatal = 0, err; 1390 int fatal = 0, err;
1399 int flags = 0;
1400 1391
1401 J_ASSERT(handle != NULL || create == 0); 1392 J_ASSERT(handle != NULL || create == 0);
1402 1393
1403 dummy.b_state = 0; 1394 map.m_lblk = block;
1404 dummy.b_blocknr = -1000; 1395 map.m_len = 1;
1405 buffer_trace_init(&dummy.b_history); 1396 err = ext4_map_blocks(handle, inode, &map,
1406 if (create) 1397 create ? EXT4_GET_BLOCKS_CREATE : 0);
1407 flags |= EXT4_GET_BLOCKS_CREATE; 1398
1408 err = ext4_get_blocks(handle, inode, block, 1, &dummy, flags); 1399 if (err < 0)
1409 /* 1400 *errp = err;
1410 * ext4_get_blocks() returns number of blocks mapped. 0 in 1401 if (err <= 0)
1411 * case of a HOLE. 1402 return NULL;
1412 */ 1403 *errp = 0;
1413 if (err > 0) { 1404
1414 if (err > 1) 1405 bh = sb_getblk(inode->i_sb, map.m_pblk);
1415 WARN_ON(1); 1406 if (!bh) {
1416 err = 0; 1407 *errp = -EIO;
1408 return NULL;
1417 } 1409 }
1418 *errp = err; 1410 if (map.m_flags & EXT4_MAP_NEW) {
1419 if (!err && buffer_mapped(&dummy)) { 1411 J_ASSERT(create != 0);
1420 struct buffer_head *bh; 1412 J_ASSERT(handle != NULL);
1421 bh = sb_getblk(inode->i_sb, dummy.b_blocknr);
1422 if (!bh) {
1423 *errp = -EIO;
1424 goto err;
1425 }
1426 if (buffer_new(&dummy)) {
1427 J_ASSERT(create != 0);
1428 J_ASSERT(handle != NULL);
1429 1413
1430 /* 1414 /*
1431 * Now that we do not always journal data, we should 1415 * Now that we do not always journal data, we should
1432 * keep in mind whether this should always journal the 1416 * keep in mind whether this should always journal the
1433 * new buffer as metadata. For now, regular file 1417 * new buffer as metadata. For now, regular file
1434 * writes use ext4_get_block instead, so it's not a 1418 * writes use ext4_get_block instead, so it's not a
1435 * problem. 1419 * problem.
1436 */ 1420 */
1437 lock_buffer(bh); 1421 lock_buffer(bh);
1438 BUFFER_TRACE(bh, "call get_create_access"); 1422 BUFFER_TRACE(bh, "call get_create_access");
1439 fatal = ext4_journal_get_create_access(handle, bh); 1423 fatal = ext4_journal_get_create_access(handle, bh);
1440 if (!fatal && !buffer_uptodate(bh)) { 1424 if (!fatal && !buffer_uptodate(bh)) {
1441 memset(bh->b_data, 0, inode->i_sb->s_blocksize); 1425 memset(bh->b_data, 0, inode->i_sb->s_blocksize);
1442 set_buffer_uptodate(bh); 1426 set_buffer_uptodate(bh);
1443 }
1444 unlock_buffer(bh);
1445 BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
1446 err = ext4_handle_dirty_metadata(handle, inode, bh);
1447 if (!fatal)
1448 fatal = err;
1449 } else {
1450 BUFFER_TRACE(bh, "not a new buffer");
1451 }
1452 if (fatal) {
1453 *errp = fatal;
1454 brelse(bh);
1455 bh = NULL;
1456 } 1427 }
1457 return bh; 1428 unlock_buffer(bh);
1429 BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
1430 err = ext4_handle_dirty_metadata(handle, inode, bh);
1431 if (!fatal)
1432 fatal = err;
1433 } else {
1434 BUFFER_TRACE(bh, "not a new buffer");
1458 } 1435 }
1459err: 1436 if (fatal) {
1460 return NULL; 1437 *errp = fatal;
1438 brelse(bh);
1439 bh = NULL;
1440 }
1441 return bh;
1461} 1442}
1462 1443
1463struct buffer_head *ext4_bread(handle_t *handle, struct inode *inode, 1444struct buffer_head *ext4_bread(handle_t *handle, struct inode *inode,
@@ -1860,7 +1841,7 @@ static int ext4_da_reserve_space(struct inode *inode, sector_t lblock)
1860 int retries = 0; 1841 int retries = 0;
1861 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 1842 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
1862 struct ext4_inode_info *ei = EXT4_I(inode); 1843 struct ext4_inode_info *ei = EXT4_I(inode);
1863 unsigned long md_needed, md_reserved; 1844 unsigned long md_needed;
1864 int ret; 1845 int ret;
1865 1846
1866 /* 1847 /*
@@ -1870,22 +1851,24 @@ static int ext4_da_reserve_space(struct inode *inode, sector_t lblock)
1870 */ 1851 */
1871repeat: 1852repeat:
1872 spin_lock(&ei->i_block_reservation_lock); 1853 spin_lock(&ei->i_block_reservation_lock);
1873 md_reserved = ei->i_reserved_meta_blocks;
1874 md_needed = ext4_calc_metadata_amount(inode, lblock); 1854 md_needed = ext4_calc_metadata_amount(inode, lblock);
1875 trace_ext4_da_reserve_space(inode, md_needed); 1855 trace_ext4_da_reserve_space(inode, md_needed);
1876 spin_unlock(&ei->i_block_reservation_lock); 1856 spin_unlock(&ei->i_block_reservation_lock);
1877 1857
1878 /* 1858 /*
1879 * Make quota reservation here to prevent quota overflow 1859 * We will charge metadata quota at writeout time; this saves
1880 * later. Real quota accounting is done at pages writeout 1860 * us from metadata over-estimation, though we may go over by
1881 * time. 1861 * a small amount in the end. Here we just reserve for data.
1882 */ 1862 */
1883 ret = dquot_reserve_block(inode, md_needed + 1); 1863 ret = dquot_reserve_block(inode, 1);
1884 if (ret) 1864 if (ret)
1885 return ret; 1865 return ret;
1886 1866 /*
1867 * We do still charge estimated metadata to the sb though;
1868 * we cannot afford to run out of free blocks.
1869 */
1887 if (ext4_claim_free_blocks(sbi, md_needed + 1)) { 1870 if (ext4_claim_free_blocks(sbi, md_needed + 1)) {
1888 dquot_release_reservation_block(inode, md_needed + 1); 1871 dquot_release_reservation_block(inode, 1);
1889 if (ext4_should_retry_alloc(inode->i_sb, &retries)) { 1872 if (ext4_should_retry_alloc(inode->i_sb, &retries)) {
1890 yield(); 1873 yield();
1891 goto repeat; 1874 goto repeat;
@@ -1910,6 +1893,7 @@ static void ext4_da_release_space(struct inode *inode, int to_free)
1910 1893
1911 spin_lock(&EXT4_I(inode)->i_block_reservation_lock); 1894 spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
1912 1895
1896 trace_ext4_da_release_space(inode, to_free);
1913 if (unlikely(to_free > ei->i_reserved_data_blocks)) { 1897 if (unlikely(to_free > ei->i_reserved_data_blocks)) {
1914 /* 1898 /*
1915 * if there aren't enough reserved blocks, then the 1899 * if there aren't enough reserved blocks, then the
@@ -1932,12 +1916,13 @@ static void ext4_da_release_space(struct inode *inode, int to_free)
1932 * only when we have written all of the delayed 1916 * only when we have written all of the delayed
1933 * allocation blocks. 1917 * allocation blocks.
1934 */ 1918 */
1935 to_free += ei->i_reserved_meta_blocks; 1919 percpu_counter_sub(&sbi->s_dirtyblocks_counter,
1920 ei->i_reserved_meta_blocks);
1936 ei->i_reserved_meta_blocks = 0; 1921 ei->i_reserved_meta_blocks = 0;
1937 ei->i_da_metadata_calc_len = 0; 1922 ei->i_da_metadata_calc_len = 0;
1938 } 1923 }
1939 1924
1940 /* update fs dirty blocks counter */ 1925 /* update fs dirty data blocks counter */
1941 percpu_counter_sub(&sbi->s_dirtyblocks_counter, to_free); 1926 percpu_counter_sub(&sbi->s_dirtyblocks_counter, to_free);
1942 1927
1943 spin_unlock(&EXT4_I(inode)->i_block_reservation_lock); 1928 spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
@@ -2042,28 +2027,23 @@ static int mpage_da_submit_io(struct mpage_da_data *mpd)
2042/* 2027/*
2043 * mpage_put_bnr_to_bhs - walk blocks and assign them actual numbers 2028 * mpage_put_bnr_to_bhs - walk blocks and assign them actual numbers
2044 * 2029 *
2045 * @mpd->inode - inode to walk through
2046 * @exbh->b_blocknr - first block on a disk
2047 * @exbh->b_size - amount of space in bytes
2048 * @logical - first logical block to start assignment with
2049 *
2050 * the function goes through all passed space and put actual disk 2030 * the function goes through all passed space and put actual disk
2051 * block numbers into buffer heads, dropping BH_Delay and BH_Unwritten 2031 * block numbers into buffer heads, dropping BH_Delay and BH_Unwritten
2052 */ 2032 */
2053static void mpage_put_bnr_to_bhs(struct mpage_da_data *mpd, sector_t logical, 2033static void mpage_put_bnr_to_bhs(struct mpage_da_data *mpd,
2054 struct buffer_head *exbh) 2034 struct ext4_map_blocks *map)
2055{ 2035{
2056 struct inode *inode = mpd->inode; 2036 struct inode *inode = mpd->inode;
2057 struct address_space *mapping = inode->i_mapping; 2037 struct address_space *mapping = inode->i_mapping;
2058 int blocks = exbh->b_size >> inode->i_blkbits; 2038 int blocks = map->m_len;
2059 sector_t pblock = exbh->b_blocknr, cur_logical; 2039 sector_t pblock = map->m_pblk, cur_logical;
2060 struct buffer_head *head, *bh; 2040 struct buffer_head *head, *bh;
2061 pgoff_t index, end; 2041 pgoff_t index, end;
2062 struct pagevec pvec; 2042 struct pagevec pvec;
2063 int nr_pages, i; 2043 int nr_pages, i;
2064 2044
2065 index = logical >> (PAGE_CACHE_SHIFT - inode->i_blkbits); 2045 index = map->m_lblk >> (PAGE_CACHE_SHIFT - inode->i_blkbits);
2066 end = (logical + blocks - 1) >> (PAGE_CACHE_SHIFT - inode->i_blkbits); 2046 end = (map->m_lblk + blocks - 1) >> (PAGE_CACHE_SHIFT - inode->i_blkbits);
2067 cur_logical = index << (PAGE_CACHE_SHIFT - inode->i_blkbits); 2047 cur_logical = index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
2068 2048
2069 pagevec_init(&pvec, 0); 2049 pagevec_init(&pvec, 0);
@@ -2090,17 +2070,16 @@ static void mpage_put_bnr_to_bhs(struct mpage_da_data *mpd, sector_t logical,
2090 2070
2091 /* skip blocks out of the range */ 2071 /* skip blocks out of the range */
2092 do { 2072 do {
2093 if (cur_logical >= logical) 2073 if (cur_logical >= map->m_lblk)
2094 break; 2074 break;
2095 cur_logical++; 2075 cur_logical++;
2096 } while ((bh = bh->b_this_page) != head); 2076 } while ((bh = bh->b_this_page) != head);
2097 2077
2098 do { 2078 do {
2099 if (cur_logical >= logical + blocks) 2079 if (cur_logical >= map->m_lblk + blocks)
2100 break; 2080 break;
2101 2081
2102 if (buffer_delay(bh) || 2082 if (buffer_delay(bh) || buffer_unwritten(bh)) {
2103 buffer_unwritten(bh)) {
2104 2083
2105 BUG_ON(bh->b_bdev != inode->i_sb->s_bdev); 2084 BUG_ON(bh->b_bdev != inode->i_sb->s_bdev);
2106 2085
@@ -2119,7 +2098,7 @@ static void mpage_put_bnr_to_bhs(struct mpage_da_data *mpd, sector_t logical,
2119 } else if (buffer_mapped(bh)) 2098 } else if (buffer_mapped(bh))
2120 BUG_ON(bh->b_blocknr != pblock); 2099 BUG_ON(bh->b_blocknr != pblock);
2121 2100
2122 if (buffer_uninit(exbh)) 2101 if (map->m_flags & EXT4_MAP_UNINIT)
2123 set_buffer_uninit(bh); 2102 set_buffer_uninit(bh);
2124 cur_logical++; 2103 cur_logical++;
2125 pblock++; 2104 pblock++;
@@ -2130,21 +2109,6 @@ static void mpage_put_bnr_to_bhs(struct mpage_da_data *mpd, sector_t logical,
2130} 2109}
2131 2110
2132 2111
2133/*
2134 * __unmap_underlying_blocks - just a helper function to unmap
2135 * set of blocks described by @bh
2136 */
2137static inline void __unmap_underlying_blocks(struct inode *inode,
2138 struct buffer_head *bh)
2139{
2140 struct block_device *bdev = inode->i_sb->s_bdev;
2141 int blocks, i;
2142
2143 blocks = bh->b_size >> inode->i_blkbits;
2144 for (i = 0; i < blocks; i++)
2145 unmap_underlying_metadata(bdev, bh->b_blocknr + i);
2146}
2147
2148static void ext4_da_block_invalidatepages(struct mpage_da_data *mpd, 2112static void ext4_da_block_invalidatepages(struct mpage_da_data *mpd,
2149 sector_t logical, long blk_cnt) 2113 sector_t logical, long blk_cnt)
2150{ 2114{
@@ -2206,7 +2170,7 @@ static void ext4_print_free_blocks(struct inode *inode)
2206static int mpage_da_map_blocks(struct mpage_da_data *mpd) 2170static int mpage_da_map_blocks(struct mpage_da_data *mpd)
2207{ 2171{
2208 int err, blks, get_blocks_flags; 2172 int err, blks, get_blocks_flags;
2209 struct buffer_head new; 2173 struct ext4_map_blocks map;
2210 sector_t next = mpd->b_blocknr; 2174 sector_t next = mpd->b_blocknr;
2211 unsigned max_blocks = mpd->b_size >> mpd->inode->i_blkbits; 2175 unsigned max_blocks = mpd->b_size >> mpd->inode->i_blkbits;
2212 loff_t disksize = EXT4_I(mpd->inode)->i_disksize; 2176 loff_t disksize = EXT4_I(mpd->inode)->i_disksize;
@@ -2247,15 +2211,15 @@ static int mpage_da_map_blocks(struct mpage_da_data *mpd)
2247 * EXT4_GET_BLOCKS_DELALLOC_RESERVE so the delalloc accounting 2211 * EXT4_GET_BLOCKS_DELALLOC_RESERVE so the delalloc accounting
2248 * variables are updated after the blocks have been allocated. 2212 * variables are updated after the blocks have been allocated.
2249 */ 2213 */
2250 new.b_state = 0; 2214 map.m_lblk = next;
2215 map.m_len = max_blocks;
2251 get_blocks_flags = EXT4_GET_BLOCKS_CREATE; 2216 get_blocks_flags = EXT4_GET_BLOCKS_CREATE;
2252 if (ext4_should_dioread_nolock(mpd->inode)) 2217 if (ext4_should_dioread_nolock(mpd->inode))
2253 get_blocks_flags |= EXT4_GET_BLOCKS_IO_CREATE_EXT; 2218 get_blocks_flags |= EXT4_GET_BLOCKS_IO_CREATE_EXT;
2254 if (mpd->b_state & (1 << BH_Delay)) 2219 if (mpd->b_state & (1 << BH_Delay))
2255 get_blocks_flags |= EXT4_GET_BLOCKS_DELALLOC_RESERVE; 2220 get_blocks_flags |= EXT4_GET_BLOCKS_DELALLOC_RESERVE;
2256 2221
2257 blks = ext4_get_blocks(handle, mpd->inode, next, max_blocks, 2222 blks = ext4_map_blocks(handle, mpd->inode, &map, get_blocks_flags);
2258 &new, get_blocks_flags);
2259 if (blks < 0) { 2223 if (blks < 0) {
2260 err = blks; 2224 err = blks;
2261 /* 2225 /*
@@ -2282,7 +2246,7 @@ static int mpage_da_map_blocks(struct mpage_da_data *mpd)
2282 ext4_msg(mpd->inode->i_sb, KERN_CRIT, 2246 ext4_msg(mpd->inode->i_sb, KERN_CRIT,
2283 "delayed block allocation failed for inode %lu at " 2247 "delayed block allocation failed for inode %lu at "
2284 "logical offset %llu with max blocks %zd with " 2248 "logical offset %llu with max blocks %zd with "
2285 "error %d\n", mpd->inode->i_ino, 2249 "error %d", mpd->inode->i_ino,
2286 (unsigned long long) next, 2250 (unsigned long long) next,
2287 mpd->b_size >> mpd->inode->i_blkbits, err); 2251 mpd->b_size >> mpd->inode->i_blkbits, err);
2288 printk(KERN_CRIT "This should not happen!! " 2252 printk(KERN_CRIT "This should not happen!! "
@@ -2297,10 +2261,13 @@ static int mpage_da_map_blocks(struct mpage_da_data *mpd)
2297 } 2261 }
2298 BUG_ON(blks == 0); 2262 BUG_ON(blks == 0);
2299 2263
2300 new.b_size = (blks << mpd->inode->i_blkbits); 2264 if (map.m_flags & EXT4_MAP_NEW) {
2265 struct block_device *bdev = mpd->inode->i_sb->s_bdev;
2266 int i;
2301 2267
2302 if (buffer_new(&new)) 2268 for (i = 0; i < map.m_len; i++)
2303 __unmap_underlying_blocks(mpd->inode, &new); 2269 unmap_underlying_metadata(bdev, map.m_pblk + i);
2270 }
2304 2271
2305 /* 2272 /*
2306 * If blocks are delayed marked, we need to 2273 * If blocks are delayed marked, we need to
@@ -2308,7 +2275,7 @@ static int mpage_da_map_blocks(struct mpage_da_data *mpd)
2308 */ 2275 */
2309 if ((mpd->b_state & (1 << BH_Delay)) || 2276 if ((mpd->b_state & (1 << BH_Delay)) ||
2310 (mpd->b_state & (1 << BH_Unwritten))) 2277 (mpd->b_state & (1 << BH_Unwritten)))
2311 mpage_put_bnr_to_bhs(mpd, next, &new); 2278 mpage_put_bnr_to_bhs(mpd, &map);
2312 2279
2313 if (ext4_should_order_data(mpd->inode)) { 2280 if (ext4_should_order_data(mpd->inode)) {
2314 err = ext4_jbd2_file_inode(handle, mpd->inode); 2281 err = ext4_jbd2_file_inode(handle, mpd->inode);
@@ -2349,8 +2316,17 @@ static void mpage_add_bh_to_extent(struct mpage_da_data *mpd,
2349 sector_t next; 2316 sector_t next;
2350 int nrblocks = mpd->b_size >> mpd->inode->i_blkbits; 2317 int nrblocks = mpd->b_size >> mpd->inode->i_blkbits;
2351 2318
2319 /*
2320 * XXX Don't go larger than mballoc is willing to allocate
2321 * This is a stopgap solution. We eventually need to fold
2322 * mpage_da_submit_io() into this function and then call
2323 * ext4_get_blocks() multiple times in a loop
2324 */
2325 if (nrblocks >= 8*1024*1024/mpd->inode->i_sb->s_blocksize)
2326 goto flush_it;
2327
2352 /* check if thereserved journal credits might overflow */ 2328 /* check if thereserved journal credits might overflow */
2353 if (!(EXT4_I(mpd->inode)->i_flags & EXT4_EXTENTS_FL)) { 2329 if (!(ext4_test_inode_flag(mpd->inode, EXT4_INODE_EXTENTS))) {
2354 if (nrblocks >= EXT4_MAX_TRANS_DATA) { 2330 if (nrblocks >= EXT4_MAX_TRANS_DATA) {
2355 /* 2331 /*
2356 * With non-extent format we are limited by the journal 2332 * With non-extent format we are limited by the journal
@@ -2423,17 +2399,6 @@ static int __mpage_da_writepage(struct page *page,
2423 struct buffer_head *bh, *head; 2399 struct buffer_head *bh, *head;
2424 sector_t logical; 2400 sector_t logical;
2425 2401
2426 if (mpd->io_done) {
2427 /*
2428 * Rest of the page in the page_vec
2429 * redirty then and skip then. We will
2430 * try to write them again after
2431 * starting a new transaction
2432 */
2433 redirty_page_for_writepage(wbc, page);
2434 unlock_page(page);
2435 return MPAGE_DA_EXTENT_TAIL;
2436 }
2437 /* 2402 /*
2438 * Can we merge this page to current extent? 2403 * Can we merge this page to current extent?
2439 */ 2404 */
@@ -2528,8 +2493,9 @@ static int __mpage_da_writepage(struct page *page,
2528 * initialized properly. 2493 * initialized properly.
2529 */ 2494 */
2530static int ext4_da_get_block_prep(struct inode *inode, sector_t iblock, 2495static int ext4_da_get_block_prep(struct inode *inode, sector_t iblock,
2531 struct buffer_head *bh_result, int create) 2496 struct buffer_head *bh, int create)
2532{ 2497{
2498 struct ext4_map_blocks map;
2533 int ret = 0; 2499 int ret = 0;
2534 sector_t invalid_block = ~((sector_t) 0xffff); 2500 sector_t invalid_block = ~((sector_t) 0xffff);
2535 2501
@@ -2537,16 +2503,22 @@ static int ext4_da_get_block_prep(struct inode *inode, sector_t iblock,
2537 invalid_block = ~0; 2503 invalid_block = ~0;
2538 2504
2539 BUG_ON(create == 0); 2505 BUG_ON(create == 0);
2540 BUG_ON(bh_result->b_size != inode->i_sb->s_blocksize); 2506 BUG_ON(bh->b_size != inode->i_sb->s_blocksize);
2507
2508 map.m_lblk = iblock;
2509 map.m_len = 1;
2541 2510
2542 /* 2511 /*
2543 * first, we need to know whether the block is allocated already 2512 * first, we need to know whether the block is allocated already
2544 * preallocated blocks are unmapped but should treated 2513 * preallocated blocks are unmapped but should treated
2545 * the same as allocated blocks. 2514 * the same as allocated blocks.
2546 */ 2515 */
2547 ret = ext4_get_blocks(NULL, inode, iblock, 1, bh_result, 0); 2516 ret = ext4_map_blocks(NULL, inode, &map, 0);
2548 if ((ret == 0) && !buffer_delay(bh_result)) { 2517 if (ret < 0)
2549 /* the block isn't (pre)allocated yet, let's reserve space */ 2518 return ret;
2519 if (ret == 0) {
2520 if (buffer_delay(bh))
2521 return 0; /* Not sure this could or should happen */
2550 /* 2522 /*
2551 * XXX: __block_prepare_write() unmaps passed block, 2523 * XXX: __block_prepare_write() unmaps passed block,
2552 * is it OK? 2524 * is it OK?
@@ -2556,26 +2528,26 @@ static int ext4_da_get_block_prep(struct inode *inode, sector_t iblock,
2556 /* not enough space to reserve */ 2528 /* not enough space to reserve */
2557 return ret; 2529 return ret;
2558 2530
2559 map_bh(bh_result, inode->i_sb, invalid_block); 2531 map_bh(bh, inode->i_sb, invalid_block);
2560 set_buffer_new(bh_result); 2532 set_buffer_new(bh);
2561 set_buffer_delay(bh_result); 2533 set_buffer_delay(bh);
2562 } else if (ret > 0) { 2534 return 0;
2563 bh_result->b_size = (ret << inode->i_blkbits);
2564 if (buffer_unwritten(bh_result)) {
2565 /* A delayed write to unwritten bh should
2566 * be marked new and mapped. Mapped ensures
2567 * that we don't do get_block multiple times
2568 * when we write to the same offset and new
2569 * ensures that we do proper zero out for
2570 * partial write.
2571 */
2572 set_buffer_new(bh_result);
2573 set_buffer_mapped(bh_result);
2574 }
2575 ret = 0;
2576 } 2535 }
2577 2536
2578 return ret; 2537 map_bh(bh, inode->i_sb, map.m_pblk);
2538 bh->b_state = (bh->b_state & ~EXT4_MAP_FLAGS) | map.m_flags;
2539
2540 if (buffer_unwritten(bh)) {
2541 /* A delayed write to unwritten bh should be marked
2542 * new and mapped. Mapped ensures that we don't do
2543 * get_block multiple times when we write to the same
2544 * offset and new ensures that we do proper zero out
2545 * for partial write.
2546 */
2547 set_buffer_new(bh);
2548 set_buffer_mapped(bh);
2549 }
2550 return 0;
2579} 2551}
2580 2552
2581/* 2553/*
@@ -2597,21 +2569,8 @@ static int ext4_da_get_block_prep(struct inode *inode, sector_t iblock,
2597static int noalloc_get_block_write(struct inode *inode, sector_t iblock, 2569static int noalloc_get_block_write(struct inode *inode, sector_t iblock,
2598 struct buffer_head *bh_result, int create) 2570 struct buffer_head *bh_result, int create)
2599{ 2571{
2600 int ret = 0;
2601 unsigned max_blocks = bh_result->b_size >> inode->i_blkbits;
2602
2603 BUG_ON(bh_result->b_size != inode->i_sb->s_blocksize); 2572 BUG_ON(bh_result->b_size != inode->i_sb->s_blocksize);
2604 2573 return _ext4_get_block(inode, iblock, bh_result, 0);
2605 /*
2606 * we don't want to do block allocation in writepage
2607 * so call get_block_wrap with create = 0
2608 */
2609 ret = ext4_get_blocks(NULL, inode, iblock, max_blocks, bh_result, 0);
2610 if (ret > 0) {
2611 bh_result->b_size = (ret << inode->i_blkbits);
2612 ret = 0;
2613 }
2614 return ret;
2615} 2574}
2616 2575
2617static int bget_one(handle_t *handle, struct buffer_head *bh) 2576static int bget_one(handle_t *handle, struct buffer_head *bh)
@@ -2821,13 +2780,131 @@ static int ext4_da_writepages_trans_blocks(struct inode *inode)
2821 * number of contiguous block. So we will limit 2780 * number of contiguous block. So we will limit
2822 * number of contiguous block to a sane value 2781 * number of contiguous block to a sane value
2823 */ 2782 */
2824 if (!(EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL) && 2783 if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) &&
2825 (max_blocks > EXT4_MAX_TRANS_DATA)) 2784 (max_blocks > EXT4_MAX_TRANS_DATA))
2826 max_blocks = EXT4_MAX_TRANS_DATA; 2785 max_blocks = EXT4_MAX_TRANS_DATA;
2827 2786
2828 return ext4_chunk_trans_blocks(inode, max_blocks); 2787 return ext4_chunk_trans_blocks(inode, max_blocks);
2829} 2788}
2830 2789
2790/*
2791 * write_cache_pages_da - walk the list of dirty pages of the given
2792 * address space and call the callback function (which usually writes
2793 * the pages).
2794 *
2795 * This is a forked version of write_cache_pages(). Differences:
2796 * Range cyclic is ignored.
2797 * no_nrwrite_index_update is always presumed true
2798 */
2799static int write_cache_pages_da(struct address_space *mapping,
2800 struct writeback_control *wbc,
2801 struct mpage_da_data *mpd)
2802{
2803 int ret = 0;
2804 int done = 0;
2805 struct pagevec pvec;
2806 int nr_pages;
2807 pgoff_t index;
2808 pgoff_t end; /* Inclusive */
2809 long nr_to_write = wbc->nr_to_write;
2810
2811 pagevec_init(&pvec, 0);
2812 index = wbc->range_start >> PAGE_CACHE_SHIFT;
2813 end = wbc->range_end >> PAGE_CACHE_SHIFT;
2814
2815 while (!done && (index <= end)) {
2816 int i;
2817
2818 nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
2819 PAGECACHE_TAG_DIRTY,
2820 min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1);
2821 if (nr_pages == 0)
2822 break;
2823
2824 for (i = 0; i < nr_pages; i++) {
2825 struct page *page = pvec.pages[i];
2826
2827 /*
2828 * At this point, the page may be truncated or
2829 * invalidated (changing page->mapping to NULL), or
2830 * even swizzled back from swapper_space to tmpfs file
2831 * mapping. However, page->index will not change
2832 * because we have a reference on the page.
2833 */
2834 if (page->index > end) {
2835 done = 1;
2836 break;
2837 }
2838
2839 lock_page(page);
2840
2841 /*
2842 * Page truncated or invalidated. We can freely skip it
2843 * then, even for data integrity operations: the page
2844 * has disappeared concurrently, so there could be no
2845 * real expectation of this data interity operation
2846 * even if there is now a new, dirty page at the same
2847 * pagecache address.
2848 */
2849 if (unlikely(page->mapping != mapping)) {
2850continue_unlock:
2851 unlock_page(page);
2852 continue;
2853 }
2854
2855 if (!PageDirty(page)) {
2856 /* someone wrote it for us */
2857 goto continue_unlock;
2858 }
2859
2860 if (PageWriteback(page)) {
2861 if (wbc->sync_mode != WB_SYNC_NONE)
2862 wait_on_page_writeback(page);
2863 else
2864 goto continue_unlock;
2865 }
2866
2867 BUG_ON(PageWriteback(page));
2868 if (!clear_page_dirty_for_io(page))
2869 goto continue_unlock;
2870
2871 ret = __mpage_da_writepage(page, wbc, mpd);
2872 if (unlikely(ret)) {
2873 if (ret == AOP_WRITEPAGE_ACTIVATE) {
2874 unlock_page(page);
2875 ret = 0;
2876 } else {
2877 done = 1;
2878 break;
2879 }
2880 }
2881
2882 if (nr_to_write > 0) {
2883 nr_to_write--;
2884 if (nr_to_write == 0 &&
2885 wbc->sync_mode == WB_SYNC_NONE) {
2886 /*
2887 * We stop writing back only if we are
2888 * not doing integrity sync. In case of
2889 * integrity sync we have to keep going
2890 * because someone may be concurrently
2891 * dirtying pages, and we might have
2892 * synced a lot of newly appeared dirty
2893 * pages, but have not synced all of the
2894 * old dirty pages.
2895 */
2896 done = 1;
2897 break;
2898 }
2899 }
2900 }
2901 pagevec_release(&pvec);
2902 cond_resched();
2903 }
2904 return ret;
2905}
2906
2907
2831static int ext4_da_writepages(struct address_space *mapping, 2908static int ext4_da_writepages(struct address_space *mapping,
2832 struct writeback_control *wbc) 2909 struct writeback_control *wbc)
2833{ 2910{
@@ -2836,7 +2913,6 @@ static int ext4_da_writepages(struct address_space *mapping,
2836 handle_t *handle = NULL; 2913 handle_t *handle = NULL;
2837 struct mpage_da_data mpd; 2914 struct mpage_da_data mpd;
2838 struct inode *inode = mapping->host; 2915 struct inode *inode = mapping->host;
2839 int no_nrwrite_index_update;
2840 int pages_written = 0; 2916 int pages_written = 0;
2841 long pages_skipped; 2917 long pages_skipped;
2842 unsigned int max_pages; 2918 unsigned int max_pages;
@@ -2916,12 +2992,6 @@ static int ext4_da_writepages(struct address_space *mapping,
2916 mpd.wbc = wbc; 2992 mpd.wbc = wbc;
2917 mpd.inode = mapping->host; 2993 mpd.inode = mapping->host;
2918 2994
2919 /*
2920 * we don't want write_cache_pages to update
2921 * nr_to_write and writeback_index
2922 */
2923 no_nrwrite_index_update = wbc->no_nrwrite_index_update;
2924 wbc->no_nrwrite_index_update = 1;
2925 pages_skipped = wbc->pages_skipped; 2995 pages_skipped = wbc->pages_skipped;
2926 2996
2927retry: 2997retry:
@@ -2941,7 +3011,7 @@ retry:
2941 if (IS_ERR(handle)) { 3011 if (IS_ERR(handle)) {
2942 ret = PTR_ERR(handle); 3012 ret = PTR_ERR(handle);
2943 ext4_msg(inode->i_sb, KERN_CRIT, "%s: jbd2_start: " 3013 ext4_msg(inode->i_sb, KERN_CRIT, "%s: jbd2_start: "
2944 "%ld pages, ino %lu; err %d\n", __func__, 3014 "%ld pages, ino %lu; err %d", __func__,
2945 wbc->nr_to_write, inode->i_ino, ret); 3015 wbc->nr_to_write, inode->i_ino, ret);
2946 goto out_writepages; 3016 goto out_writepages;
2947 } 3017 }
@@ -2963,8 +3033,7 @@ retry:
2963 mpd.io_done = 0; 3033 mpd.io_done = 0;
2964 mpd.pages_written = 0; 3034 mpd.pages_written = 0;
2965 mpd.retval = 0; 3035 mpd.retval = 0;
2966 ret = write_cache_pages(mapping, wbc, __mpage_da_writepage, 3036 ret = write_cache_pages_da(mapping, wbc, &mpd);
2967 &mpd);
2968 /* 3037 /*
2969 * If we have a contiguous extent of pages and we 3038 * If we have a contiguous extent of pages and we
2970 * haven't done the I/O yet, map the blocks and submit 3039 * haven't done the I/O yet, map the blocks and submit
@@ -3016,7 +3085,7 @@ retry:
3016 if (pages_skipped != wbc->pages_skipped) 3085 if (pages_skipped != wbc->pages_skipped)
3017 ext4_msg(inode->i_sb, KERN_CRIT, 3086 ext4_msg(inode->i_sb, KERN_CRIT,
3018 "This should not happen leaving %s " 3087 "This should not happen leaving %s "
3019 "with nr_to_write = %ld ret = %d\n", 3088 "with nr_to_write = %ld ret = %d",
3020 __func__, wbc->nr_to_write, ret); 3089 __func__, wbc->nr_to_write, ret);
3021 3090
3022 /* Update index */ 3091 /* Update index */
@@ -3030,8 +3099,6 @@ retry:
3030 mapping->writeback_index = index; 3099 mapping->writeback_index = index;
3031 3100
3032out_writepages: 3101out_writepages:
3033 if (!no_nrwrite_index_update)
3034 wbc->no_nrwrite_index_update = 0;
3035 wbc->nr_to_write -= nr_to_writebump; 3102 wbc->nr_to_write -= nr_to_writebump;
3036 wbc->range_start = range_start; 3103 wbc->range_start = range_start;
3037 trace_ext4_da_writepages_result(inode, wbc, ret, pages_written); 3104 trace_ext4_da_writepages_result(inode, wbc, ret, pages_written);
@@ -3076,7 +3143,7 @@ static int ext4_da_write_begin(struct file *file, struct address_space *mapping,
3076 loff_t pos, unsigned len, unsigned flags, 3143 loff_t pos, unsigned len, unsigned flags,
3077 struct page **pagep, void **fsdata) 3144 struct page **pagep, void **fsdata)
3078{ 3145{
3079 int ret, retries = 0, quota_retries = 0; 3146 int ret, retries = 0;
3080 struct page *page; 3147 struct page *page;
3081 pgoff_t index; 3148 pgoff_t index;
3082 unsigned from, to; 3149 unsigned from, to;
@@ -3135,22 +3202,6 @@ retry:
3135 3202
3136 if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries)) 3203 if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
3137 goto retry; 3204 goto retry;
3138
3139 if ((ret == -EDQUOT) &&
3140 EXT4_I(inode)->i_reserved_meta_blocks &&
3141 (quota_retries++ < 3)) {
3142 /*
3143 * Since we often over-estimate the number of meta
3144 * data blocks required, we may sometimes get a
3145 * spurios out of quota error even though there would
3146 * be enough space once we write the data blocks and
3147 * find out how many meta data blocks were _really_
3148 * required. So try forcing the inode write to see if
3149 * that helps.
3150 */
3151 write_inode_now(inode, (quota_retries == 3));
3152 goto retry;
3153 }
3154out: 3205out:
3155 return ret; 3206 return ret;
3156} 3207}
@@ -3546,46 +3597,18 @@ out:
3546 return ret; 3597 return ret;
3547} 3598}
3548 3599
3600/*
3601 * ext4_get_block used when preparing for a DIO write or buffer write.
3602 * We allocate an uinitialized extent if blocks haven't been allocated.
3603 * The extent will be converted to initialized after the IO is complete.
3604 */
3549static int ext4_get_block_write(struct inode *inode, sector_t iblock, 3605static int ext4_get_block_write(struct inode *inode, sector_t iblock,
3550 struct buffer_head *bh_result, int create) 3606 struct buffer_head *bh_result, int create)
3551{ 3607{
3552 handle_t *handle = ext4_journal_current_handle();
3553 int ret = 0;
3554 unsigned max_blocks = bh_result->b_size >> inode->i_blkbits;
3555 int dio_credits;
3556 int started = 0;
3557
3558 ext4_debug("ext4_get_block_write: inode %lu, create flag %d\n", 3608 ext4_debug("ext4_get_block_write: inode %lu, create flag %d\n",
3559 inode->i_ino, create); 3609 inode->i_ino, create);
3560 /* 3610 return _ext4_get_block(inode, iblock, bh_result,
3561 * ext4_get_block in prepare for a DIO write or buffer write. 3611 EXT4_GET_BLOCKS_IO_CREATE_EXT);
3562 * We allocate an uinitialized extent if blocks haven't been allocated.
3563 * The extent will be converted to initialized after IO complete.
3564 */
3565 create = EXT4_GET_BLOCKS_IO_CREATE_EXT;
3566
3567 if (!handle) {
3568 if (max_blocks > DIO_MAX_BLOCKS)
3569 max_blocks = DIO_MAX_BLOCKS;
3570 dio_credits = ext4_chunk_trans_blocks(inode, max_blocks);
3571 handle = ext4_journal_start(inode, dio_credits);
3572 if (IS_ERR(handle)) {
3573 ret = PTR_ERR(handle);
3574 goto out;
3575 }
3576 started = 1;
3577 }
3578
3579 ret = ext4_get_blocks(handle, inode, iblock, max_blocks, bh_result,
3580 create);
3581 if (ret > 0) {
3582 bh_result->b_size = (ret << inode->i_blkbits);
3583 ret = 0;
3584 }
3585 if (started)
3586 ext4_journal_stop(handle);
3587out:
3588 return ret;
3589} 3612}
3590 3613
3591static void dump_completed_IO(struct inode * inode) 3614static void dump_completed_IO(struct inode * inode)
@@ -3973,7 +3996,7 @@ static ssize_t ext4_direct_IO(int rw, struct kiocb *iocb,
3973 struct file *file = iocb->ki_filp; 3996 struct file *file = iocb->ki_filp;
3974 struct inode *inode = file->f_mapping->host; 3997 struct inode *inode = file->f_mapping->host;
3975 3998
3976 if (EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL) 3999 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
3977 return ext4_ext_direct_IO(rw, iocb, iov, offset, nr_segs); 4000 return ext4_ext_direct_IO(rw, iocb, iov, offset, nr_segs);
3978 4001
3979 return ext4_ind_direct_IO(rw, iocb, iov, offset, nr_segs); 4002 return ext4_ind_direct_IO(rw, iocb, iov, offset, nr_segs);
@@ -4302,10 +4325,9 @@ static int ext4_clear_blocks(handle_t *handle, struct inode *inode,
4302 4325
4303 if (!ext4_data_block_valid(EXT4_SB(inode->i_sb), block_to_free, 4326 if (!ext4_data_block_valid(EXT4_SB(inode->i_sb), block_to_free,
4304 count)) { 4327 count)) {
4305 ext4_error(inode->i_sb, "inode #%lu: " 4328 EXT4_ERROR_INODE(inode, "attempt to clear invalid "
4306 "attempt to clear blocks %llu len %lu, invalid", 4329 "blocks %llu len %lu",
4307 inode->i_ino, (unsigned long long) block_to_free, 4330 (unsigned long long) block_to_free, count);
4308 count);
4309 return 1; 4331 return 1;
4310 } 4332 }
4311 4333
@@ -4410,11 +4432,10 @@ static void ext4_free_data(handle_t *handle, struct inode *inode,
4410 if ((EXT4_JOURNAL(inode) == NULL) || bh2jh(this_bh)) 4432 if ((EXT4_JOURNAL(inode) == NULL) || bh2jh(this_bh))
4411 ext4_handle_dirty_metadata(handle, inode, this_bh); 4433 ext4_handle_dirty_metadata(handle, inode, this_bh);
4412 else 4434 else
4413 ext4_error(inode->i_sb, 4435 EXT4_ERROR_INODE(inode,
4414 "circular indirect block detected, " 4436 "circular indirect block detected at "
4415 "inode=%lu, block=%llu", 4437 "block %llu",
4416 inode->i_ino, 4438 (unsigned long long) this_bh->b_blocknr);
4417 (unsigned long long) this_bh->b_blocknr);
4418 } 4439 }
4419} 4440}
4420 4441
@@ -4452,11 +4473,10 @@ static void ext4_free_branches(handle_t *handle, struct inode *inode,
4452 4473
4453 if (!ext4_data_block_valid(EXT4_SB(inode->i_sb), 4474 if (!ext4_data_block_valid(EXT4_SB(inode->i_sb),
4454 nr, 1)) { 4475 nr, 1)) {
4455 ext4_error(inode->i_sb, 4476 EXT4_ERROR_INODE(inode,
4456 "indirect mapped block in inode " 4477 "invalid indirect mapped "
4457 "#%lu invalid (level %d, blk #%lu)", 4478 "block %lu (level %d)",
4458 inode->i_ino, depth, 4479 (unsigned long) nr, depth);
4459 (unsigned long) nr);
4460 break; 4480 break;
4461 } 4481 }
4462 4482
@@ -4468,9 +4488,9 @@ static void ext4_free_branches(handle_t *handle, struct inode *inode,
4468 * (should be rare). 4488 * (should be rare).
4469 */ 4489 */
4470 if (!bh) { 4490 if (!bh) {
4471 ext4_error(inode->i_sb, 4491 EXT4_ERROR_INODE(inode,
4472 "Read failure, inode=%lu, block=%llu", 4492 "Read failure block=%llu",
4473 inode->i_ino, nr); 4493 (unsigned long long) nr);
4474 continue; 4494 continue;
4475 } 4495 }
4476 4496
@@ -4612,12 +4632,12 @@ void ext4_truncate(struct inode *inode)
4612 if (!ext4_can_truncate(inode)) 4632 if (!ext4_can_truncate(inode))
4613 return; 4633 return;
4614 4634
4615 EXT4_I(inode)->i_flags &= ~EXT4_EOFBLOCKS_FL; 4635 ext4_clear_inode_flag(inode, EXT4_INODE_EOFBLOCKS);
4616 4636
4617 if (inode->i_size == 0 && !test_opt(inode->i_sb, NO_AUTO_DA_ALLOC)) 4637 if (inode->i_size == 0 && !test_opt(inode->i_sb, NO_AUTO_DA_ALLOC))
4618 ext4_set_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE); 4638 ext4_set_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE);
4619 4639
4620 if (EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL) { 4640 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
4621 ext4_ext_truncate(inode); 4641 ext4_ext_truncate(inode);
4622 return; 4642 return;
4623 } 4643 }
@@ -4785,8 +4805,8 @@ static int __ext4_get_inode_loc(struct inode *inode,
4785 4805
4786 bh = sb_getblk(sb, block); 4806 bh = sb_getblk(sb, block);
4787 if (!bh) { 4807 if (!bh) {
4788 ext4_error(sb, "unable to read inode block - " 4808 EXT4_ERROR_INODE(inode, "unable to read inode block - "
4789 "inode=%lu, block=%llu", inode->i_ino, block); 4809 "block %llu", block);
4790 return -EIO; 4810 return -EIO;
4791 } 4811 }
4792 if (!buffer_uptodate(bh)) { 4812 if (!buffer_uptodate(bh)) {
@@ -4884,8 +4904,8 @@ make_io:
4884 submit_bh(READ_META, bh); 4904 submit_bh(READ_META, bh);
4885 wait_on_buffer(bh); 4905 wait_on_buffer(bh);
4886 if (!buffer_uptodate(bh)) { 4906 if (!buffer_uptodate(bh)) {
4887 ext4_error(sb, "unable to read inode block - inode=%lu," 4907 EXT4_ERROR_INODE(inode, "unable to read inode "
4888 " block=%llu", inode->i_ino, block); 4908 "block %llu", block);
4889 brelse(bh); 4909 brelse(bh);
4890 return -EIO; 4910 return -EIO;
4891 } 4911 }
@@ -5096,8 +5116,8 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
5096 ret = 0; 5116 ret = 0;
5097 if (ei->i_file_acl && 5117 if (ei->i_file_acl &&
5098 !ext4_data_block_valid(EXT4_SB(sb), ei->i_file_acl, 1)) { 5118 !ext4_data_block_valid(EXT4_SB(sb), ei->i_file_acl, 1)) {
5099 ext4_error(sb, "bad extended attribute block %llu inode #%lu", 5119 EXT4_ERROR_INODE(inode, "bad extended attribute block %llu",
5100 ei->i_file_acl, inode->i_ino); 5120 ei->i_file_acl);
5101 ret = -EIO; 5121 ret = -EIO;
5102 goto bad_inode; 5122 goto bad_inode;
5103 } else if (ei->i_flags & EXT4_EXTENTS_FL) { 5123 } else if (ei->i_flags & EXT4_EXTENTS_FL) {
@@ -5142,8 +5162,7 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
5142 new_decode_dev(le32_to_cpu(raw_inode->i_block[1]))); 5162 new_decode_dev(le32_to_cpu(raw_inode->i_block[1])));
5143 } else { 5163 } else {
5144 ret = -EIO; 5164 ret = -EIO;
5145 ext4_error(inode->i_sb, "bogus i_mode (%o) for inode=%lu", 5165 EXT4_ERROR_INODE(inode, "bogus i_mode (%o)", inode->i_mode);
5146 inode->i_mode, inode->i_ino);
5147 goto bad_inode; 5166 goto bad_inode;
5148 } 5167 }
5149 brelse(iloc.bh); 5168 brelse(iloc.bh);
@@ -5381,9 +5400,9 @@ int ext4_write_inode(struct inode *inode, struct writeback_control *wbc)
5381 if (wbc->sync_mode == WB_SYNC_ALL) 5400 if (wbc->sync_mode == WB_SYNC_ALL)
5382 sync_dirty_buffer(iloc.bh); 5401 sync_dirty_buffer(iloc.bh);
5383 if (buffer_req(iloc.bh) && !buffer_uptodate(iloc.bh)) { 5402 if (buffer_req(iloc.bh) && !buffer_uptodate(iloc.bh)) {
5384 ext4_error(inode->i_sb, "IO error syncing inode, " 5403 EXT4_ERROR_INODE(inode,
5385 "inode=%lu, block=%llu", inode->i_ino, 5404 "IO error syncing inode (block=%llu)",
5386 (unsigned long long)iloc.bh->b_blocknr); 5405 (unsigned long long) iloc.bh->b_blocknr);
5387 err = -EIO; 5406 err = -EIO;
5388 } 5407 }
5389 brelse(iloc.bh); 5408 brelse(iloc.bh);
@@ -5455,7 +5474,7 @@ int ext4_setattr(struct dentry *dentry, struct iattr *attr)
5455 } 5474 }
5456 5475
5457 if (attr->ia_valid & ATTR_SIZE) { 5476 if (attr->ia_valid & ATTR_SIZE) {
5458 if (!(EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL)) { 5477 if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) {
5459 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 5478 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
5460 5479
5461 if (attr->ia_size > sbi->s_bitmap_maxbytes) { 5480 if (attr->ia_size > sbi->s_bitmap_maxbytes) {
@@ -5468,7 +5487,7 @@ int ext4_setattr(struct dentry *dentry, struct iattr *attr)
5468 if (S_ISREG(inode->i_mode) && 5487 if (S_ISREG(inode->i_mode) &&
5469 attr->ia_valid & ATTR_SIZE && 5488 attr->ia_valid & ATTR_SIZE &&
5470 (attr->ia_size < inode->i_size || 5489 (attr->ia_size < inode->i_size ||
5471 (EXT4_I(inode)->i_flags & EXT4_EOFBLOCKS_FL))) { 5490 (ext4_test_inode_flag(inode, EXT4_INODE_EOFBLOCKS)))) {
5472 handle_t *handle; 5491 handle_t *handle;
5473 5492
5474 handle = ext4_journal_start(inode, 3); 5493 handle = ext4_journal_start(inode, 3);
@@ -5500,7 +5519,7 @@ int ext4_setattr(struct dentry *dentry, struct iattr *attr)
5500 } 5519 }
5501 } 5520 }
5502 /* ext4_truncate will clear the flag */ 5521 /* ext4_truncate will clear the flag */
5503 if ((EXT4_I(inode)->i_flags & EXT4_EOFBLOCKS_FL)) 5522 if ((ext4_test_inode_flag(inode, EXT4_INODE_EOFBLOCKS)))
5504 ext4_truncate(inode); 5523 ext4_truncate(inode);
5505 } 5524 }
5506 5525
@@ -5576,7 +5595,7 @@ static int ext4_indirect_trans_blocks(struct inode *inode, int nrblocks,
5576 5595
5577static int ext4_index_trans_blocks(struct inode *inode, int nrblocks, int chunk) 5596static int ext4_index_trans_blocks(struct inode *inode, int nrblocks, int chunk)
5578{ 5597{
5579 if (!(EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL)) 5598 if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
5580 return ext4_indirect_trans_blocks(inode, nrblocks, chunk); 5599 return ext4_indirect_trans_blocks(inode, nrblocks, chunk);
5581 return ext4_ext_index_trans_blocks(inode, nrblocks, chunk); 5600 return ext4_ext_index_trans_blocks(inode, nrblocks, chunk);
5582} 5601}
@@ -5911,9 +5930,9 @@ int ext4_change_inode_journal_flag(struct inode *inode, int val)
5911 */ 5930 */
5912 5931
5913 if (val) 5932 if (val)
5914 EXT4_I(inode)->i_flags |= EXT4_JOURNAL_DATA_FL; 5933 ext4_set_inode_flag(inode, EXT4_INODE_JOURNAL_DATA);
5915 else 5934 else
5916 EXT4_I(inode)->i_flags &= ~EXT4_JOURNAL_DATA_FL; 5935 ext4_clear_inode_flag(inode, EXT4_INODE_JOURNAL_DATA);
5917 ext4_set_aops(inode); 5936 ext4_set_aops(inode);
5918 5937
5919 jbd2_journal_unlock_updates(journal); 5938 jbd2_journal_unlock_updates(journal);
diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c
index 016d0249294f..bf5ae883b1bd 100644
--- a/fs/ext4/ioctl.c
+++ b/fs/ext4/ioctl.c
@@ -258,7 +258,7 @@ setversion_out:
258 if (me.moved_len > 0) 258 if (me.moved_len > 0)
259 file_remove_suid(donor_filp); 259 file_remove_suid(donor_filp);
260 260
261 if (copy_to_user((struct move_extent __user *)arg, 261 if (copy_to_user((struct move_extent __user *)arg,
262 &me, sizeof(me))) 262 &me, sizeof(me)))
263 err = -EFAULT; 263 err = -EFAULT;
264mext_out: 264mext_out:
@@ -373,7 +373,30 @@ long ext4_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
373 case EXT4_IOC32_SETRSVSZ: 373 case EXT4_IOC32_SETRSVSZ:
374 cmd = EXT4_IOC_SETRSVSZ; 374 cmd = EXT4_IOC_SETRSVSZ;
375 break; 375 break;
376 case EXT4_IOC_GROUP_ADD: 376 case EXT4_IOC32_GROUP_ADD: {
377 struct compat_ext4_new_group_input __user *uinput;
378 struct ext4_new_group_input input;
379 mm_segment_t old_fs;
380 int err;
381
382 uinput = compat_ptr(arg);
383 err = get_user(input.group, &uinput->group);
384 err |= get_user(input.block_bitmap, &uinput->block_bitmap);
385 err |= get_user(input.inode_bitmap, &uinput->inode_bitmap);
386 err |= get_user(input.inode_table, &uinput->inode_table);
387 err |= get_user(input.blocks_count, &uinput->blocks_count);
388 err |= get_user(input.reserved_blocks,
389 &uinput->reserved_blocks);
390 if (err)
391 return -EFAULT;
392 old_fs = get_fs();
393 set_fs(KERNEL_DS);
394 err = ext4_ioctl(file, EXT4_IOC_GROUP_ADD,
395 (unsigned long) &input);
396 set_fs(old_fs);
397 return err;
398 }
399 case EXT4_IOC_MOVE_EXT:
377 break; 400 break;
378 default: 401 default:
379 return -ENOIOCTLCMD; 402 return -ENOIOCTLCMD;
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
index b423a364dca3..12b3bc026a68 100644
--- a/fs/ext4/mballoc.c
+++ b/fs/ext4/mballoc.c
@@ -658,6 +658,27 @@ static void ext4_mb_mark_free_simple(struct super_block *sb,
658 } 658 }
659} 659}
660 660
661/*
662 * Cache the order of the largest free extent we have available in this block
663 * group.
664 */
665static void
666mb_set_largest_free_order(struct super_block *sb, struct ext4_group_info *grp)
667{
668 int i;
669 int bits;
670
671 grp->bb_largest_free_order = -1; /* uninit */
672
673 bits = sb->s_blocksize_bits + 1;
674 for (i = bits; i >= 0; i--) {
675 if (grp->bb_counters[i] > 0) {
676 grp->bb_largest_free_order = i;
677 break;
678 }
679 }
680}
681
661static noinline_for_stack 682static noinline_for_stack
662void ext4_mb_generate_buddy(struct super_block *sb, 683void ext4_mb_generate_buddy(struct super_block *sb,
663 void *buddy, void *bitmap, ext4_group_t group) 684 void *buddy, void *bitmap, ext4_group_t group)
@@ -700,6 +721,7 @@ void ext4_mb_generate_buddy(struct super_block *sb,
700 */ 721 */
701 grp->bb_free = free; 722 grp->bb_free = free;
702 } 723 }
724 mb_set_largest_free_order(sb, grp);
703 725
704 clear_bit(EXT4_GROUP_INFO_NEED_INIT_BIT, &(grp->bb_state)); 726 clear_bit(EXT4_GROUP_INFO_NEED_INIT_BIT, &(grp->bb_state));
705 727
@@ -725,6 +747,9 @@ void ext4_mb_generate_buddy(struct super_block *sb,
725 * contain blocks_per_page (PAGE_CACHE_SIZE / blocksize) blocks. 747 * contain blocks_per_page (PAGE_CACHE_SIZE / blocksize) blocks.
726 * So it can have information regarding groups_per_page which 748 * So it can have information regarding groups_per_page which
727 * is blocks_per_page/2 749 * is blocks_per_page/2
750 *
751 * Locking note: This routine takes the block group lock of all groups
752 * for this page; do not hold this lock when calling this routine!
728 */ 753 */
729 754
730static int ext4_mb_init_cache(struct page *page, char *incore) 755static int ext4_mb_init_cache(struct page *page, char *incore)
@@ -865,6 +890,7 @@ static int ext4_mb_init_cache(struct page *page, char *incore)
865 BUG_ON(incore == NULL); 890 BUG_ON(incore == NULL);
866 mb_debug(1, "put buddy for group %u in page %lu/%x\n", 891 mb_debug(1, "put buddy for group %u in page %lu/%x\n",
867 group, page->index, i * blocksize); 892 group, page->index, i * blocksize);
893 trace_ext4_mb_buddy_bitmap_load(sb, group);
868 grinfo = ext4_get_group_info(sb, group); 894 grinfo = ext4_get_group_info(sb, group);
869 grinfo->bb_fragments = 0; 895 grinfo->bb_fragments = 0;
870 memset(grinfo->bb_counters, 0, 896 memset(grinfo->bb_counters, 0,
@@ -882,6 +908,7 @@ static int ext4_mb_init_cache(struct page *page, char *incore)
882 BUG_ON(incore != NULL); 908 BUG_ON(incore != NULL);
883 mb_debug(1, "put bitmap for group %u in page %lu/%x\n", 909 mb_debug(1, "put bitmap for group %u in page %lu/%x\n",
884 group, page->index, i * blocksize); 910 group, page->index, i * blocksize);
911 trace_ext4_mb_bitmap_load(sb, group);
885 912
886 /* see comments in ext4_mb_put_pa() */ 913 /* see comments in ext4_mb_put_pa() */
887 ext4_lock_group(sb, group); 914 ext4_lock_group(sb, group);
@@ -910,6 +937,11 @@ out:
910 return err; 937 return err;
911} 938}
912 939
940/*
941 * Locking note: This routine calls ext4_mb_init_cache(), which takes the
942 * block group lock of all groups for this page; do not hold the BG lock when
943 * calling this routine!
944 */
913static noinline_for_stack 945static noinline_for_stack
914int ext4_mb_init_group(struct super_block *sb, ext4_group_t group) 946int ext4_mb_init_group(struct super_block *sb, ext4_group_t group)
915{ 947{
@@ -1004,6 +1036,11 @@ err:
1004 return ret; 1036 return ret;
1005} 1037}
1006 1038
1039/*
1040 * Locking note: This routine calls ext4_mb_init_cache(), which takes the
1041 * block group lock of all groups for this page; do not hold the BG lock when
1042 * calling this routine!
1043 */
1007static noinline_for_stack int 1044static noinline_for_stack int
1008ext4_mb_load_buddy(struct super_block *sb, ext4_group_t group, 1045ext4_mb_load_buddy(struct super_block *sb, ext4_group_t group,
1009 struct ext4_buddy *e4b) 1046 struct ext4_buddy *e4b)
@@ -1150,7 +1187,7 @@ err:
1150 return ret; 1187 return ret;
1151} 1188}
1152 1189
1153static void ext4_mb_release_desc(struct ext4_buddy *e4b) 1190static void ext4_mb_unload_buddy(struct ext4_buddy *e4b)
1154{ 1191{
1155 if (e4b->bd_bitmap_page) 1192 if (e4b->bd_bitmap_page)
1156 page_cache_release(e4b->bd_bitmap_page); 1193 page_cache_release(e4b->bd_bitmap_page);
@@ -1299,6 +1336,7 @@ static void mb_free_blocks(struct inode *inode, struct ext4_buddy *e4b,
1299 buddy = buddy2; 1336 buddy = buddy2;
1300 } while (1); 1337 } while (1);
1301 } 1338 }
1339 mb_set_largest_free_order(sb, e4b->bd_info);
1302 mb_check_buddy(e4b); 1340 mb_check_buddy(e4b);
1303} 1341}
1304 1342
@@ -1427,6 +1465,7 @@ static int mb_mark_used(struct ext4_buddy *e4b, struct ext4_free_extent *ex)
1427 e4b->bd_info->bb_counters[ord]++; 1465 e4b->bd_info->bb_counters[ord]++;
1428 e4b->bd_info->bb_counters[ord]++; 1466 e4b->bd_info->bb_counters[ord]++;
1429 } 1467 }
1468 mb_set_largest_free_order(e4b->bd_sb, e4b->bd_info);
1430 1469
1431 mb_set_bits(EXT4_MB_BITMAP(e4b), ex->fe_start, len0); 1470 mb_set_bits(EXT4_MB_BITMAP(e4b), ex->fe_start, len0);
1432 mb_check_buddy(e4b); 1471 mb_check_buddy(e4b);
@@ -1617,7 +1656,7 @@ int ext4_mb_try_best_found(struct ext4_allocation_context *ac,
1617 } 1656 }
1618 1657
1619 ext4_unlock_group(ac->ac_sb, group); 1658 ext4_unlock_group(ac->ac_sb, group);
1620 ext4_mb_release_desc(e4b); 1659 ext4_mb_unload_buddy(e4b);
1621 1660
1622 return 0; 1661 return 0;
1623} 1662}
@@ -1672,7 +1711,7 @@ int ext4_mb_find_by_goal(struct ext4_allocation_context *ac,
1672 ext4_mb_use_best_found(ac, e4b); 1711 ext4_mb_use_best_found(ac, e4b);
1673 } 1712 }
1674 ext4_unlock_group(ac->ac_sb, group); 1713 ext4_unlock_group(ac->ac_sb, group);
1675 ext4_mb_release_desc(e4b); 1714 ext4_mb_unload_buddy(e4b);
1676 1715
1677 return 0; 1716 return 0;
1678} 1717}
@@ -1821,16 +1860,22 @@ void ext4_mb_scan_aligned(struct ext4_allocation_context *ac,
1821 } 1860 }
1822} 1861}
1823 1862
1863/* This is now called BEFORE we load the buddy bitmap. */
1824static int ext4_mb_good_group(struct ext4_allocation_context *ac, 1864static int ext4_mb_good_group(struct ext4_allocation_context *ac,
1825 ext4_group_t group, int cr) 1865 ext4_group_t group, int cr)
1826{ 1866{
1827 unsigned free, fragments; 1867 unsigned free, fragments;
1828 unsigned i, bits;
1829 int flex_size = ext4_flex_bg_size(EXT4_SB(ac->ac_sb)); 1868 int flex_size = ext4_flex_bg_size(EXT4_SB(ac->ac_sb));
1830 struct ext4_group_info *grp = ext4_get_group_info(ac->ac_sb, group); 1869 struct ext4_group_info *grp = ext4_get_group_info(ac->ac_sb, group);
1831 1870
1832 BUG_ON(cr < 0 || cr >= 4); 1871 BUG_ON(cr < 0 || cr >= 4);
1833 BUG_ON(EXT4_MB_GRP_NEED_INIT(grp)); 1872
1873 /* We only do this if the grp has never been initialized */
1874 if (unlikely(EXT4_MB_GRP_NEED_INIT(grp))) {
1875 int ret = ext4_mb_init_group(ac->ac_sb, group);
1876 if (ret)
1877 return 0;
1878 }
1834 1879
1835 free = grp->bb_free; 1880 free = grp->bb_free;
1836 fragments = grp->bb_fragments; 1881 fragments = grp->bb_fragments;
@@ -1843,17 +1888,16 @@ static int ext4_mb_good_group(struct ext4_allocation_context *ac,
1843 case 0: 1888 case 0:
1844 BUG_ON(ac->ac_2order == 0); 1889 BUG_ON(ac->ac_2order == 0);
1845 1890
1891 if (grp->bb_largest_free_order < ac->ac_2order)
1892 return 0;
1893
1846 /* Avoid using the first bg of a flexgroup for data files */ 1894 /* Avoid using the first bg of a flexgroup for data files */
1847 if ((ac->ac_flags & EXT4_MB_HINT_DATA) && 1895 if ((ac->ac_flags & EXT4_MB_HINT_DATA) &&
1848 (flex_size >= EXT4_FLEX_SIZE_DIR_ALLOC_SCHEME) && 1896 (flex_size >= EXT4_FLEX_SIZE_DIR_ALLOC_SCHEME) &&
1849 ((group % flex_size) == 0)) 1897 ((group % flex_size) == 0))
1850 return 0; 1898 return 0;
1851 1899
1852 bits = ac->ac_sb->s_blocksize_bits + 1; 1900 return 1;
1853 for (i = ac->ac_2order; i <= bits; i++)
1854 if (grp->bb_counters[i] > 0)
1855 return 1;
1856 break;
1857 case 1: 1901 case 1:
1858 if ((free / fragments) >= ac->ac_g_ex.fe_len) 1902 if ((free / fragments) >= ac->ac_g_ex.fe_len)
1859 return 1; 1903 return 1;
@@ -1964,7 +2008,7 @@ ext4_mb_regular_allocator(struct ext4_allocation_context *ac)
1964 sbi = EXT4_SB(sb); 2008 sbi = EXT4_SB(sb);
1965 ngroups = ext4_get_groups_count(sb); 2009 ngroups = ext4_get_groups_count(sb);
1966 /* non-extent files are limited to low blocks/groups */ 2010 /* non-extent files are limited to low blocks/groups */
1967 if (!(EXT4_I(ac->ac_inode)->i_flags & EXT4_EXTENTS_FL)) 2011 if (!(ext4_test_inode_flag(ac->ac_inode, EXT4_INODE_EXTENTS)))
1968 ngroups = sbi->s_blockfile_groups; 2012 ngroups = sbi->s_blockfile_groups;
1969 2013
1970 BUG_ON(ac->ac_status == AC_STATUS_FOUND); 2014 BUG_ON(ac->ac_status == AC_STATUS_FOUND);
@@ -2024,15 +2068,11 @@ repeat:
2024 group = ac->ac_g_ex.fe_group; 2068 group = ac->ac_g_ex.fe_group;
2025 2069
2026 for (i = 0; i < ngroups; group++, i++) { 2070 for (i = 0; i < ngroups; group++, i++) {
2027 struct ext4_group_info *grp;
2028 struct ext4_group_desc *desc;
2029
2030 if (group == ngroups) 2071 if (group == ngroups)
2031 group = 0; 2072 group = 0;
2032 2073
2033 /* quick check to skip empty groups */ 2074 /* This now checks without needing the buddy page */
2034 grp = ext4_get_group_info(sb, group); 2075 if (!ext4_mb_good_group(ac, group, cr))
2035 if (grp->bb_free == 0)
2036 continue; 2076 continue;
2037 2077
2038 err = ext4_mb_load_buddy(sb, group, &e4b); 2078 err = ext4_mb_load_buddy(sb, group, &e4b);
@@ -2040,15 +2080,18 @@ repeat:
2040 goto out; 2080 goto out;
2041 2081
2042 ext4_lock_group(sb, group); 2082 ext4_lock_group(sb, group);
2083
2084 /*
2085 * We need to check again after locking the
2086 * block group
2087 */
2043 if (!ext4_mb_good_group(ac, group, cr)) { 2088 if (!ext4_mb_good_group(ac, group, cr)) {
2044 /* someone did allocation from this group */
2045 ext4_unlock_group(sb, group); 2089 ext4_unlock_group(sb, group);
2046 ext4_mb_release_desc(&e4b); 2090 ext4_mb_unload_buddy(&e4b);
2047 continue; 2091 continue;
2048 } 2092 }
2049 2093
2050 ac->ac_groups_scanned++; 2094 ac->ac_groups_scanned++;
2051 desc = ext4_get_group_desc(sb, group, NULL);
2052 if (cr == 0) 2095 if (cr == 0)
2053 ext4_mb_simple_scan_group(ac, &e4b); 2096 ext4_mb_simple_scan_group(ac, &e4b);
2054 else if (cr == 1 && 2097 else if (cr == 1 &&
@@ -2058,7 +2101,7 @@ repeat:
2058 ext4_mb_complex_scan_group(ac, &e4b); 2101 ext4_mb_complex_scan_group(ac, &e4b);
2059 2102
2060 ext4_unlock_group(sb, group); 2103 ext4_unlock_group(sb, group);
2061 ext4_mb_release_desc(&e4b); 2104 ext4_mb_unload_buddy(&e4b);
2062 2105
2063 if (ac->ac_status != AC_STATUS_CONTINUE) 2106 if (ac->ac_status != AC_STATUS_CONTINUE)
2064 break; 2107 break;
@@ -2148,7 +2191,7 @@ static int ext4_mb_seq_groups_show(struct seq_file *seq, void *v)
2148 ext4_lock_group(sb, group); 2191 ext4_lock_group(sb, group);
2149 memcpy(&sg, ext4_get_group_info(sb, group), i); 2192 memcpy(&sg, ext4_get_group_info(sb, group), i);
2150 ext4_unlock_group(sb, group); 2193 ext4_unlock_group(sb, group);
2151 ext4_mb_release_desc(&e4b); 2194 ext4_mb_unload_buddy(&e4b);
2152 2195
2153 seq_printf(seq, "#%-5u: %-5u %-5u %-5u [", group, sg.info.bb_free, 2196 seq_printf(seq, "#%-5u: %-5u %-5u %-5u [", group, sg.info.bb_free,
2154 sg.info.bb_fragments, sg.info.bb_first_free); 2197 sg.info.bb_fragments, sg.info.bb_first_free);
@@ -2255,6 +2298,7 @@ int ext4_mb_add_groupinfo(struct super_block *sb, ext4_group_t group,
2255 INIT_LIST_HEAD(&meta_group_info[i]->bb_prealloc_list); 2298 INIT_LIST_HEAD(&meta_group_info[i]->bb_prealloc_list);
2256 init_rwsem(&meta_group_info[i]->alloc_sem); 2299 init_rwsem(&meta_group_info[i]->alloc_sem);
2257 meta_group_info[i]->bb_free_root = RB_ROOT; 2300 meta_group_info[i]->bb_free_root = RB_ROOT;
2301 meta_group_info[i]->bb_largest_free_order = -1; /* uninit */
2258 2302
2259#ifdef DOUBLE_CHECK 2303#ifdef DOUBLE_CHECK
2260 { 2304 {
@@ -2536,6 +2580,7 @@ static void release_blocks_on_commit(journal_t *journal, transaction_t *txn)
2536 entry->count, entry->group, entry); 2580 entry->count, entry->group, entry);
2537 2581
2538 if (test_opt(sb, DISCARD)) { 2582 if (test_opt(sb, DISCARD)) {
2583 int ret;
2539 ext4_fsblk_t discard_block; 2584 ext4_fsblk_t discard_block;
2540 2585
2541 discard_block = entry->start_blk + 2586 discard_block = entry->start_blk +
@@ -2543,7 +2588,12 @@ static void release_blocks_on_commit(journal_t *journal, transaction_t *txn)
2543 trace_ext4_discard_blocks(sb, 2588 trace_ext4_discard_blocks(sb,
2544 (unsigned long long)discard_block, 2589 (unsigned long long)discard_block,
2545 entry->count); 2590 entry->count);
2546 sb_issue_discard(sb, discard_block, entry->count); 2591 ret = sb_issue_discard(sb, discard_block, entry->count);
2592 if (ret == EOPNOTSUPP) {
2593 ext4_warning(sb,
2594 "discard not supported, disabling");
2595 clear_opt(EXT4_SB(sb)->s_mount_opt, DISCARD);
2596 }
2547 } 2597 }
2548 2598
2549 err = ext4_mb_load_buddy(sb, entry->group, &e4b); 2599 err = ext4_mb_load_buddy(sb, entry->group, &e4b);
@@ -2568,7 +2618,7 @@ static void release_blocks_on_commit(journal_t *journal, transaction_t *txn)
2568 } 2618 }
2569 ext4_unlock_group(sb, entry->group); 2619 ext4_unlock_group(sb, entry->group);
2570 kmem_cache_free(ext4_free_ext_cachep, entry); 2620 kmem_cache_free(ext4_free_ext_cachep, entry);
2571 ext4_mb_release_desc(&e4b); 2621 ext4_mb_unload_buddy(&e4b);
2572 } 2622 }
2573 2623
2574 mb_debug(1, "freed %u blocks in %u structures\n", count, count2); 2624 mb_debug(1, "freed %u blocks in %u structures\n", count, count2);
@@ -2641,7 +2691,7 @@ int __init init_ext4_mballoc(void)
2641 2691
2642void exit_ext4_mballoc(void) 2692void exit_ext4_mballoc(void)
2643{ 2693{
2644 /* 2694 /*
2645 * Wait for completion of call_rcu()'s on ext4_pspace_cachep 2695 * Wait for completion of call_rcu()'s on ext4_pspace_cachep
2646 * before destroying the slab cache. 2696 * before destroying the slab cache.
2647 */ 2697 */
@@ -2981,7 +3031,7 @@ static void ext4_mb_collect_stats(struct ext4_allocation_context *ac)
2981 if (sbi->s_mb_stats && ac->ac_g_ex.fe_len > 1) { 3031 if (sbi->s_mb_stats && ac->ac_g_ex.fe_len > 1) {
2982 atomic_inc(&sbi->s_bal_reqs); 3032 atomic_inc(&sbi->s_bal_reqs);
2983 atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated); 3033 atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
2984 if (ac->ac_o_ex.fe_len >= ac->ac_g_ex.fe_len) 3034 if (ac->ac_b_ex.fe_len >= ac->ac_o_ex.fe_len)
2985 atomic_inc(&sbi->s_bal_success); 3035 atomic_inc(&sbi->s_bal_success);
2986 atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned); 3036 atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned);
2987 if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start && 3037 if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
@@ -3123,7 +3173,7 @@ ext4_mb_use_preallocated(struct ext4_allocation_context *ac)
3123 continue; 3173 continue;
3124 3174
3125 /* non-extent files can't have physical blocks past 2^32 */ 3175 /* non-extent files can't have physical blocks past 2^32 */
3126 if (!(EXT4_I(ac->ac_inode)->i_flags & EXT4_EXTENTS_FL) && 3176 if (!(ext4_test_inode_flag(ac->ac_inode, EXT4_INODE_EXTENTS)) &&
3127 pa->pa_pstart + pa->pa_len > EXT4_MAX_BLOCK_FILE_PHYS) 3177 pa->pa_pstart + pa->pa_len > EXT4_MAX_BLOCK_FILE_PHYS)
3128 continue; 3178 continue;
3129 3179
@@ -3280,7 +3330,7 @@ static void ext4_mb_put_pa(struct ext4_allocation_context *ac,
3280 spin_unlock(&pa->pa_lock); 3330 spin_unlock(&pa->pa_lock);
3281 3331
3282 grp_blk = pa->pa_pstart; 3332 grp_blk = pa->pa_pstart;
3283 /* 3333 /*
3284 * If doing group-based preallocation, pa_pstart may be in the 3334 * If doing group-based preallocation, pa_pstart may be in the
3285 * next group when pa is used up 3335 * next group when pa is used up
3286 */ 3336 */
@@ -3697,7 +3747,7 @@ out:
3697 ext4_unlock_group(sb, group); 3747 ext4_unlock_group(sb, group);
3698 if (ac) 3748 if (ac)
3699 kmem_cache_free(ext4_ac_cachep, ac); 3749 kmem_cache_free(ext4_ac_cachep, ac);
3700 ext4_mb_release_desc(&e4b); 3750 ext4_mb_unload_buddy(&e4b);
3701 put_bh(bitmap_bh); 3751 put_bh(bitmap_bh);
3702 return free; 3752 return free;
3703} 3753}
@@ -3801,7 +3851,7 @@ repeat:
3801 if (bitmap_bh == NULL) { 3851 if (bitmap_bh == NULL) {
3802 ext4_error(sb, "Error reading block bitmap for %u", 3852 ext4_error(sb, "Error reading block bitmap for %u",
3803 group); 3853 group);
3804 ext4_mb_release_desc(&e4b); 3854 ext4_mb_unload_buddy(&e4b);
3805 continue; 3855 continue;
3806 } 3856 }
3807 3857
@@ -3810,7 +3860,7 @@ repeat:
3810 ext4_mb_release_inode_pa(&e4b, bitmap_bh, pa, ac); 3860 ext4_mb_release_inode_pa(&e4b, bitmap_bh, pa, ac);
3811 ext4_unlock_group(sb, group); 3861 ext4_unlock_group(sb, group);
3812 3862
3813 ext4_mb_release_desc(&e4b); 3863 ext4_mb_unload_buddy(&e4b);
3814 put_bh(bitmap_bh); 3864 put_bh(bitmap_bh);
3815 3865
3816 list_del(&pa->u.pa_tmp_list); 3866 list_del(&pa->u.pa_tmp_list);
@@ -4074,7 +4124,7 @@ ext4_mb_discard_lg_preallocations(struct super_block *sb,
4074 ext4_mb_release_group_pa(&e4b, pa, ac); 4124 ext4_mb_release_group_pa(&e4b, pa, ac);
4075 ext4_unlock_group(sb, group); 4125 ext4_unlock_group(sb, group);
4076 4126
4077 ext4_mb_release_desc(&e4b); 4127 ext4_mb_unload_buddy(&e4b);
4078 list_del(&pa->u.pa_tmp_list); 4128 list_del(&pa->u.pa_tmp_list);
4079 call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback); 4129 call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback);
4080 } 4130 }
@@ -4484,12 +4534,12 @@ void ext4_free_blocks(handle_t *handle, struct inode *inode,
4484 if (!bh) 4534 if (!bh)
4485 tbh = sb_find_get_block(inode->i_sb, 4535 tbh = sb_find_get_block(inode->i_sb,
4486 block + i); 4536 block + i);
4487 ext4_forget(handle, flags & EXT4_FREE_BLOCKS_METADATA, 4537 ext4_forget(handle, flags & EXT4_FREE_BLOCKS_METADATA,
4488 inode, tbh, block + i); 4538 inode, tbh, block + i);
4489 } 4539 }
4490 } 4540 }
4491 4541
4492 /* 4542 /*
4493 * We need to make sure we don't reuse the freed block until 4543 * We need to make sure we don't reuse the freed block until
4494 * after the transaction is committed, which we can do by 4544 * after the transaction is committed, which we can do by
4495 * treating the block as metadata, below. We make an 4545 * treating the block as metadata, below. We make an
@@ -4610,7 +4660,7 @@ do_more:
4610 atomic_add(count, &sbi->s_flex_groups[flex_group].free_blocks); 4660 atomic_add(count, &sbi->s_flex_groups[flex_group].free_blocks);
4611 } 4661 }
4612 4662
4613 ext4_mb_release_desc(&e4b); 4663 ext4_mb_unload_buddy(&e4b);
4614 4664
4615 freed += count; 4665 freed += count;
4616 4666
diff --git a/fs/ext4/migrate.c b/fs/ext4/migrate.c
index 34dcfc52ef44..6f3a27ec30bf 100644
--- a/fs/ext4/migrate.c
+++ b/fs/ext4/migrate.c
@@ -475,7 +475,7 @@ int ext4_ext_migrate(struct inode *inode)
475 */ 475 */
476 if (!EXT4_HAS_INCOMPAT_FEATURE(inode->i_sb, 476 if (!EXT4_HAS_INCOMPAT_FEATURE(inode->i_sb,
477 EXT4_FEATURE_INCOMPAT_EXTENTS) || 477 EXT4_FEATURE_INCOMPAT_EXTENTS) ||
478 (EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL)) 478 (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
479 return -EINVAL; 479 return -EINVAL;
480 480
481 if (S_ISLNK(inode->i_mode) && inode->i_blocks == 0) 481 if (S_ISLNK(inode->i_mode) && inode->i_blocks == 0)
diff --git a/fs/ext4/move_extent.c b/fs/ext4/move_extent.c
index d1fc662cc311..3a6c92ac131c 100644
--- a/fs/ext4/move_extent.c
+++ b/fs/ext4/move_extent.c
@@ -482,6 +482,7 @@ mext_leaf_block(handle_t *handle, struct inode *orig_inode,
482 int depth = ext_depth(orig_inode); 482 int depth = ext_depth(orig_inode);
483 int ret; 483 int ret;
484 484
485 start_ext.ee_block = end_ext.ee_block = 0;
485 o_start = o_end = oext = orig_path[depth].p_ext; 486 o_start = o_end = oext = orig_path[depth].p_ext;
486 oext_alen = ext4_ext_get_actual_len(oext); 487 oext_alen = ext4_ext_get_actual_len(oext);
487 start_ext.ee_len = end_ext.ee_len = 0; 488 start_ext.ee_len = end_ext.ee_len = 0;
@@ -529,7 +530,7 @@ mext_leaf_block(handle_t *handle, struct inode *orig_inode,
529 * new_ext |-------| 530 * new_ext |-------|
530 */ 531 */
531 if (le32_to_cpu(oext->ee_block) + oext_alen - 1 < new_ext_end) { 532 if (le32_to_cpu(oext->ee_block) + oext_alen - 1 < new_ext_end) {
532 ext4_error(orig_inode->i_sb, 533 EXT4_ERROR_INODE(orig_inode,
533 "new_ext_end(%u) should be less than or equal to " 534 "new_ext_end(%u) should be less than or equal to "
534 "oext->ee_block(%u) + oext_alen(%d) - 1", 535 "oext->ee_block(%u) + oext_alen(%d) - 1",
535 new_ext_end, le32_to_cpu(oext->ee_block), 536 new_ext_end, le32_to_cpu(oext->ee_block),
@@ -692,12 +693,12 @@ mext_replace_branches(handle_t *handle, struct inode *orig_inode,
692 while (1) { 693 while (1) {
693 /* The extent for donor must be found. */ 694 /* The extent for donor must be found. */
694 if (!dext) { 695 if (!dext) {
695 ext4_error(donor_inode->i_sb, 696 EXT4_ERROR_INODE(donor_inode,
696 "The extent for donor must be found"); 697 "The extent for donor must be found");
697 *err = -EIO; 698 *err = -EIO;
698 goto out; 699 goto out;
699 } else if (donor_off != le32_to_cpu(tmp_dext.ee_block)) { 700 } else if (donor_off != le32_to_cpu(tmp_dext.ee_block)) {
700 ext4_error(donor_inode->i_sb, 701 EXT4_ERROR_INODE(donor_inode,
701 "Donor offset(%u) and the first block of donor " 702 "Donor offset(%u) and the first block of donor "
702 "extent(%u) should be equal", 703 "extent(%u) should be equal",
703 donor_off, 704 donor_off,
@@ -976,11 +977,11 @@ mext_check_arguments(struct inode *orig_inode,
976 } 977 }
977 978
978 /* Ext4 move extent supports only extent based file */ 979 /* Ext4 move extent supports only extent based file */
979 if (!(EXT4_I(orig_inode)->i_flags & EXT4_EXTENTS_FL)) { 980 if (!(ext4_test_inode_flag(orig_inode, EXT4_INODE_EXTENTS))) {
980 ext4_debug("ext4 move extent: orig file is not extents " 981 ext4_debug("ext4 move extent: orig file is not extents "
981 "based file [ino:orig %lu]\n", orig_inode->i_ino); 982 "based file [ino:orig %lu]\n", orig_inode->i_ino);
982 return -EOPNOTSUPP; 983 return -EOPNOTSUPP;
983 } else if (!(EXT4_I(donor_inode)->i_flags & EXT4_EXTENTS_FL)) { 984 } else if (!(ext4_test_inode_flag(donor_inode, EXT4_INODE_EXTENTS))) {
984 ext4_debug("ext4 move extent: donor file is not extents " 985 ext4_debug("ext4 move extent: donor file is not extents "
985 "based file [ino:donor %lu]\n", donor_inode->i_ino); 986 "based file [ino:donor %lu]\n", donor_inode->i_ino);
986 return -EOPNOTSUPP; 987 return -EOPNOTSUPP;
@@ -1354,7 +1355,7 @@ ext4_move_extents(struct file *o_filp, struct file *d_filp,
1354 if (ret1 < 0) 1355 if (ret1 < 0)
1355 break; 1356 break;
1356 if (*moved_len > len) { 1357 if (*moved_len > len) {
1357 ext4_error(orig_inode->i_sb, 1358 EXT4_ERROR_INODE(orig_inode,
1358 "We replaced blocks too much! " 1359 "We replaced blocks too much! "
1359 "sum of replaced: %llu requested: %llu", 1360 "sum of replaced: %llu requested: %llu",
1360 *moved_len, len); 1361 *moved_len, len);
diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
index 0c070fabd108..a43e6617b351 100644
--- a/fs/ext4/namei.c
+++ b/fs/ext4/namei.c
@@ -187,7 +187,7 @@ unsigned int ext4_rec_len_from_disk(__le16 dlen, unsigned blocksize)
187 return blocksize; 187 return blocksize;
188 return (len & 65532) | ((len & 3) << 16); 188 return (len & 65532) | ((len & 3) << 16);
189} 189}
190 190
191__le16 ext4_rec_len_to_disk(unsigned len, unsigned blocksize) 191__le16 ext4_rec_len_to_disk(unsigned len, unsigned blocksize)
192{ 192{
193 if ((len > blocksize) || (blocksize > (1 << 18)) || (len & 3)) 193 if ((len > blocksize) || (blocksize > (1 << 18)) || (len & 3))
@@ -197,7 +197,7 @@ __le16 ext4_rec_len_to_disk(unsigned len, unsigned blocksize)
197 if (len == blocksize) { 197 if (len == blocksize) {
198 if (blocksize == 65536) 198 if (blocksize == 65536)
199 return cpu_to_le16(EXT4_MAX_REC_LEN); 199 return cpu_to_le16(EXT4_MAX_REC_LEN);
200 else 200 else
201 return cpu_to_le16(0); 201 return cpu_to_le16(0);
202 } 202 }
203 return cpu_to_le16((len & 65532) | ((len >> 16) & 3)); 203 return cpu_to_le16((len & 65532) | ((len >> 16) & 3));
@@ -349,7 +349,7 @@ struct stats dx_show_entries(struct dx_hash_info *hinfo, struct inode *dir,
349 brelse(bh); 349 brelse(bh);
350 } 350 }
351 if (bcount) 351 if (bcount)
352 printk(KERN_DEBUG "%snames %u, fullness %u (%u%%)\n", 352 printk(KERN_DEBUG "%snames %u, fullness %u (%u%%)\n",
353 levels ? "" : " ", names, space/bcount, 353 levels ? "" : " ", names, space/bcount,
354 (space/bcount)*100/blocksize); 354 (space/bcount)*100/blocksize);
355 return (struct stats) { names, space, bcount}; 355 return (struct stats) { names, space, bcount};
@@ -653,10 +653,10 @@ int ext4_htree_fill_tree(struct file *dir_file, __u32 start_hash,
653 int ret, err; 653 int ret, err;
654 __u32 hashval; 654 __u32 hashval;
655 655
656 dxtrace(printk(KERN_DEBUG "In htree_fill_tree, start hash: %x:%x\n", 656 dxtrace(printk(KERN_DEBUG "In htree_fill_tree, start hash: %x:%x\n",
657 start_hash, start_minor_hash)); 657 start_hash, start_minor_hash));
658 dir = dir_file->f_path.dentry->d_inode; 658 dir = dir_file->f_path.dentry->d_inode;
659 if (!(EXT4_I(dir)->i_flags & EXT4_INDEX_FL)) { 659 if (!(ext4_test_inode_flag(dir, EXT4_INODE_INDEX))) {
660 hinfo.hash_version = EXT4_SB(dir->i_sb)->s_def_hash_version; 660 hinfo.hash_version = EXT4_SB(dir->i_sb)->s_def_hash_version;
661 if (hinfo.hash_version <= DX_HASH_TEA) 661 if (hinfo.hash_version <= DX_HASH_TEA)
662 hinfo.hash_version += 662 hinfo.hash_version +=
@@ -801,7 +801,7 @@ static void ext4_update_dx_flag(struct inode *inode)
801{ 801{
802 if (!EXT4_HAS_COMPAT_FEATURE(inode->i_sb, 802 if (!EXT4_HAS_COMPAT_FEATURE(inode->i_sb,
803 EXT4_FEATURE_COMPAT_DIR_INDEX)) 803 EXT4_FEATURE_COMPAT_DIR_INDEX))
804 EXT4_I(inode)->i_flags &= ~EXT4_INDEX_FL; 804 ext4_clear_inode_flag(inode, EXT4_INODE_INDEX);
805} 805}
806 806
807/* 807/*
@@ -943,8 +943,8 @@ restart:
943 wait_on_buffer(bh); 943 wait_on_buffer(bh);
944 if (!buffer_uptodate(bh)) { 944 if (!buffer_uptodate(bh)) {
945 /* read error, skip block & hope for the best */ 945 /* read error, skip block & hope for the best */
946 ext4_error(sb, "reading directory #%lu offset %lu", 946 EXT4_ERROR_INODE(dir, "reading directory lblock %lu",
947 dir->i_ino, (unsigned long)block); 947 (unsigned long) block);
948 brelse(bh); 948 brelse(bh);
949 goto next; 949 goto next;
950 } 950 }
@@ -1066,15 +1066,15 @@ static struct dentry *ext4_lookup(struct inode *dir, struct dentry *dentry, stru
1066 __u32 ino = le32_to_cpu(de->inode); 1066 __u32 ino = le32_to_cpu(de->inode);
1067 brelse(bh); 1067 brelse(bh);
1068 if (!ext4_valid_inum(dir->i_sb, ino)) { 1068 if (!ext4_valid_inum(dir->i_sb, ino)) {
1069 ext4_error(dir->i_sb, "bad inode number: %u", ino); 1069 EXT4_ERROR_INODE(dir, "bad inode number: %u", ino);
1070 return ERR_PTR(-EIO); 1070 return ERR_PTR(-EIO);
1071 } 1071 }
1072 inode = ext4_iget(dir->i_sb, ino); 1072 inode = ext4_iget(dir->i_sb, ino);
1073 if (unlikely(IS_ERR(inode))) { 1073 if (unlikely(IS_ERR(inode))) {
1074 if (PTR_ERR(inode) == -ESTALE) { 1074 if (PTR_ERR(inode) == -ESTALE) {
1075 ext4_error(dir->i_sb, 1075 EXT4_ERROR_INODE(dir,
1076 "deleted inode referenced: %u", 1076 "deleted inode referenced: %u",
1077 ino); 1077 ino);
1078 return ERR_PTR(-EIO); 1078 return ERR_PTR(-EIO);
1079 } else { 1079 } else {
1080 return ERR_CAST(inode); 1080 return ERR_CAST(inode);
@@ -1104,8 +1104,8 @@ struct dentry *ext4_get_parent(struct dentry *child)
1104 brelse(bh); 1104 brelse(bh);
1105 1105
1106 if (!ext4_valid_inum(child->d_inode->i_sb, ino)) { 1106 if (!ext4_valid_inum(child->d_inode->i_sb, ino)) {
1107 ext4_error(child->d_inode->i_sb, 1107 EXT4_ERROR_INODE(child->d_inode,
1108 "bad inode number: %u", ino); 1108 "bad parent inode number: %u", ino);
1109 return ERR_PTR(-EIO); 1109 return ERR_PTR(-EIO);
1110 } 1110 }
1111 1111
@@ -1141,7 +1141,7 @@ dx_move_dirents(char *from, char *to, struct dx_map_entry *map, int count,
1141 unsigned rec_len = 0; 1141 unsigned rec_len = 0;
1142 1142
1143 while (count--) { 1143 while (count--) {
1144 struct ext4_dir_entry_2 *de = (struct ext4_dir_entry_2 *) 1144 struct ext4_dir_entry_2 *de = (struct ext4_dir_entry_2 *)
1145 (from + (map->offs<<2)); 1145 (from + (map->offs<<2));
1146 rec_len = EXT4_DIR_REC_LEN(de->name_len); 1146 rec_len = EXT4_DIR_REC_LEN(de->name_len);
1147 memcpy (to, de, rec_len); 1147 memcpy (to, de, rec_len);
@@ -1404,9 +1404,7 @@ static int make_indexed_dir(handle_t *handle, struct dentry *dentry,
1404 de = (struct ext4_dir_entry_2 *)((char *)fde + 1404 de = (struct ext4_dir_entry_2 *)((char *)fde +
1405 ext4_rec_len_from_disk(fde->rec_len, blocksize)); 1405 ext4_rec_len_from_disk(fde->rec_len, blocksize));
1406 if ((char *) de >= (((char *) root) + blocksize)) { 1406 if ((char *) de >= (((char *) root) + blocksize)) {
1407 ext4_error(dir->i_sb, 1407 EXT4_ERROR_INODE(dir, "invalid rec_len for '..'");
1408 "invalid rec_len for '..' in inode %lu",
1409 dir->i_ino);
1410 brelse(bh); 1408 brelse(bh);
1411 return -EIO; 1409 return -EIO;
1412 } 1410 }
@@ -1418,7 +1416,7 @@ static int make_indexed_dir(handle_t *handle, struct dentry *dentry,
1418 brelse(bh); 1416 brelse(bh);
1419 return retval; 1417 return retval;
1420 } 1418 }
1421 EXT4_I(dir)->i_flags |= EXT4_INDEX_FL; 1419 ext4_set_inode_flag(dir, EXT4_INODE_INDEX);
1422 data1 = bh2->b_data; 1420 data1 = bh2->b_data;
1423 1421
1424 memcpy (data1, de, len); 1422 memcpy (data1, de, len);
@@ -1491,7 +1489,7 @@ static int ext4_add_entry(handle_t *handle, struct dentry *dentry,
1491 retval = ext4_dx_add_entry(handle, dentry, inode); 1489 retval = ext4_dx_add_entry(handle, dentry, inode);
1492 if (!retval || (retval != ERR_BAD_DX_DIR)) 1490 if (!retval || (retval != ERR_BAD_DX_DIR))
1493 return retval; 1491 return retval;
1494 EXT4_I(dir)->i_flags &= ~EXT4_INDEX_FL; 1492 ext4_clear_inode_flag(dir, EXT4_INODE_INDEX);
1495 dx_fallback++; 1493 dx_fallback++;
1496 ext4_mark_inode_dirty(handle, dir); 1494 ext4_mark_inode_dirty(handle, dir);
1497 } 1495 }
@@ -1519,6 +1517,8 @@ static int ext4_add_entry(handle_t *handle, struct dentry *dentry,
1519 de->rec_len = ext4_rec_len_to_disk(blocksize, blocksize); 1517 de->rec_len = ext4_rec_len_to_disk(blocksize, blocksize);
1520 retval = add_dirent_to_buf(handle, dentry, inode, de, bh); 1518 retval = add_dirent_to_buf(handle, dentry, inode, de, bh);
1521 brelse(bh); 1519 brelse(bh);
1520 if (retval == 0)
1521 ext4_set_inode_state(inode, EXT4_STATE_NEWENTRY);
1522 return retval; 1522 return retval;
1523} 1523}
1524 1524
@@ -1915,9 +1915,8 @@ static int empty_dir(struct inode *inode)
1915 if (inode->i_size < EXT4_DIR_REC_LEN(1) + EXT4_DIR_REC_LEN(2) || 1915 if (inode->i_size < EXT4_DIR_REC_LEN(1) + EXT4_DIR_REC_LEN(2) ||
1916 !(bh = ext4_bread(NULL, inode, 0, 0, &err))) { 1916 !(bh = ext4_bread(NULL, inode, 0, 0, &err))) {
1917 if (err) 1917 if (err)
1918 ext4_error(inode->i_sb, 1918 EXT4_ERROR_INODE(inode,
1919 "error %d reading directory #%lu offset 0", 1919 "error %d reading directory lblock 0", err);
1920 err, inode->i_ino);
1921 else 1920 else
1922 ext4_warning(inode->i_sb, 1921 ext4_warning(inode->i_sb,
1923 "bad directory (dir #%lu) - no data block", 1922 "bad directory (dir #%lu) - no data block",
@@ -1941,17 +1940,17 @@ static int empty_dir(struct inode *inode)
1941 de = ext4_next_entry(de1, sb->s_blocksize); 1940 de = ext4_next_entry(de1, sb->s_blocksize);
1942 while (offset < inode->i_size) { 1941 while (offset < inode->i_size) {
1943 if (!bh || 1942 if (!bh ||
1944 (void *) de >= (void *) (bh->b_data+sb->s_blocksize)) { 1943 (void *) de >= (void *) (bh->b_data+sb->s_blocksize)) {
1944 unsigned int lblock;
1945 err = 0; 1945 err = 0;
1946 brelse(bh); 1946 brelse(bh);
1947 bh = ext4_bread(NULL, inode, 1947 lblock = offset >> EXT4_BLOCK_SIZE_BITS(sb);
1948 offset >> EXT4_BLOCK_SIZE_BITS(sb), 0, &err); 1948 bh = ext4_bread(NULL, inode, lblock, 0, &err);
1949 if (!bh) { 1949 if (!bh) {
1950 if (err) 1950 if (err)
1951 ext4_error(sb, 1951 EXT4_ERROR_INODE(inode,
1952 "error %d reading directory" 1952 "error %d reading directory "
1953 " #%lu offset %u", 1953 "lblock %u", err, lblock);
1954 err, inode->i_ino, offset);
1955 offset += sb->s_blocksize; 1954 offset += sb->s_blocksize;
1956 continue; 1955 continue;
1957 } 1956 }
@@ -2297,7 +2296,7 @@ retry:
2297 } 2296 }
2298 } else { 2297 } else {
2299 /* clear the extent format for fast symlink */ 2298 /* clear the extent format for fast symlink */
2300 EXT4_I(inode)->i_flags &= ~EXT4_EXTENTS_FL; 2299 ext4_clear_inode_flag(inode, EXT4_INODE_EXTENTS);
2301 inode->i_op = &ext4_fast_symlink_inode_operations; 2300 inode->i_op = &ext4_fast_symlink_inode_operations;
2302 memcpy((char *)&EXT4_I(inode)->i_data, symname, l); 2301 memcpy((char *)&EXT4_I(inode)->i_data, symname, l);
2303 inode->i_size = l-1; 2302 inode->i_size = l-1;
diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
index 5692c48754a0..6df797eb9aeb 100644
--- a/fs/ext4/resize.c
+++ b/fs/ext4/resize.c
@@ -911,7 +911,8 @@ int ext4_group_add(struct super_block *sb, struct ext4_new_group_data *input)
911 percpu_counter_add(&sbi->s_freeinodes_counter, 911 percpu_counter_add(&sbi->s_freeinodes_counter,
912 EXT4_INODES_PER_GROUP(sb)); 912 EXT4_INODES_PER_GROUP(sb));
913 913
914 if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_FLEX_BG)) { 914 if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_FLEX_BG) &&
915 sbi->s_log_groups_per_flex) {
915 ext4_group_t flex_group; 916 ext4_group_t flex_group;
916 flex_group = ext4_flex_group(sbi, input->group); 917 flex_group = ext4_flex_group(sbi, input->group);
917 atomic_add(input->free_blocks_count, 918 atomic_add(input->free_blocks_count,
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index e14d22c170d5..4e8983a9811b 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -241,6 +241,7 @@ handle_t *ext4_journal_start_sb(struct super_block *sb, int nblocks)
241 if (sb->s_flags & MS_RDONLY) 241 if (sb->s_flags & MS_RDONLY)
242 return ERR_PTR(-EROFS); 242 return ERR_PTR(-EROFS);
243 243
244 vfs_check_frozen(sb, SB_FREEZE_WRITE);
244 /* Special case here: if the journal has aborted behind our 245 /* Special case here: if the journal has aborted behind our
245 * backs (eg. EIO in the commit thread), then we still need to 246 * backs (eg. EIO in the commit thread), then we still need to
246 * take the FS itself readonly cleanly. */ 247 * take the FS itself readonly cleanly. */
@@ -645,6 +646,8 @@ static void ext4_put_super(struct super_block *sb)
645 struct ext4_super_block *es = sbi->s_es; 646 struct ext4_super_block *es = sbi->s_es;
646 int i, err; 647 int i, err;
647 648
649 dquot_disable(sb, -1, DQUOT_USAGE_ENABLED | DQUOT_LIMITS_ENABLED);
650
648 flush_workqueue(sbi->dio_unwritten_wq); 651 flush_workqueue(sbi->dio_unwritten_wq);
649 destroy_workqueue(sbi->dio_unwritten_wq); 652 destroy_workqueue(sbi->dio_unwritten_wq);
650 653
@@ -941,6 +944,8 @@ static int ext4_show_options(struct seq_file *seq, struct vfsmount *vfs)
941 seq_puts(seq, test_opt(sb, BARRIER) ? "1" : "0"); 944 seq_puts(seq, test_opt(sb, BARRIER) ? "1" : "0");
942 if (test_opt(sb, JOURNAL_ASYNC_COMMIT)) 945 if (test_opt(sb, JOURNAL_ASYNC_COMMIT))
943 seq_puts(seq, ",journal_async_commit"); 946 seq_puts(seq, ",journal_async_commit");
947 else if (test_opt(sb, JOURNAL_CHECKSUM))
948 seq_puts(seq, ",journal_checksum");
944 if (test_opt(sb, NOBH)) 949 if (test_opt(sb, NOBH))
945 seq_puts(seq, ",nobh"); 950 seq_puts(seq, ",nobh");
946 if (test_opt(sb, I_VERSION)) 951 if (test_opt(sb, I_VERSION))
@@ -1059,7 +1064,7 @@ static int ext4_release_dquot(struct dquot *dquot);
1059static int ext4_mark_dquot_dirty(struct dquot *dquot); 1064static int ext4_mark_dquot_dirty(struct dquot *dquot);
1060static int ext4_write_info(struct super_block *sb, int type); 1065static int ext4_write_info(struct super_block *sb, int type);
1061static int ext4_quota_on(struct super_block *sb, int type, int format_id, 1066static int ext4_quota_on(struct super_block *sb, int type, int format_id,
1062 char *path, int remount); 1067 char *path);
1063static int ext4_quota_on_mount(struct super_block *sb, int type); 1068static int ext4_quota_on_mount(struct super_block *sb, int type);
1064static ssize_t ext4_quota_read(struct super_block *sb, int type, char *data, 1069static ssize_t ext4_quota_read(struct super_block *sb, int type, char *data,
1065 size_t len, loff_t off); 1070 size_t len, loff_t off);
@@ -1081,12 +1086,12 @@ static const struct dquot_operations ext4_quota_operations = {
1081 1086
1082static const struct quotactl_ops ext4_qctl_operations = { 1087static const struct quotactl_ops ext4_qctl_operations = {
1083 .quota_on = ext4_quota_on, 1088 .quota_on = ext4_quota_on,
1084 .quota_off = vfs_quota_off, 1089 .quota_off = dquot_quota_off,
1085 .quota_sync = vfs_quota_sync, 1090 .quota_sync = dquot_quota_sync,
1086 .get_info = vfs_get_dqinfo, 1091 .get_info = dquot_get_dqinfo,
1087 .set_info = vfs_set_dqinfo, 1092 .set_info = dquot_set_dqinfo,
1088 .get_dqblk = vfs_get_dqblk, 1093 .get_dqblk = dquot_get_dqblk,
1089 .set_dqblk = vfs_set_dqblk 1094 .set_dqblk = dquot_set_dqblk
1090}; 1095};
1091#endif 1096#endif
1092 1097
@@ -2051,7 +2056,7 @@ static void ext4_orphan_cleanup(struct super_block *sb,
2051 /* Turn quotas off */ 2056 /* Turn quotas off */
2052 for (i = 0; i < MAXQUOTAS; i++) { 2057 for (i = 0; i < MAXQUOTAS; i++) {
2053 if (sb_dqopt(sb)->files[i]) 2058 if (sb_dqopt(sb)->files[i])
2054 vfs_quota_off(sb, i, 0); 2059 dquot_quota_off(sb, i);
2055 } 2060 }
2056#endif 2061#endif
2057 sb->s_flags = s_flags; /* Restore MS_RDONLY status */ 2062 sb->s_flags = s_flags; /* Restore MS_RDONLY status */
@@ -2213,7 +2218,7 @@ static unsigned long ext4_get_stripe_size(struct ext4_sb_info *sbi)
2213struct ext4_attr { 2218struct ext4_attr {
2214 struct attribute attr; 2219 struct attribute attr;
2215 ssize_t (*show)(struct ext4_attr *, struct ext4_sb_info *, char *); 2220 ssize_t (*show)(struct ext4_attr *, struct ext4_sb_info *, char *);
2216 ssize_t (*store)(struct ext4_attr *, struct ext4_sb_info *, 2221 ssize_t (*store)(struct ext4_attr *, struct ext4_sb_info *,
2217 const char *, size_t); 2222 const char *, size_t);
2218 int offset; 2223 int offset;
2219}; 2224};
@@ -2430,6 +2435,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
2430 __releases(kernel_lock) 2435 __releases(kernel_lock)
2431 __acquires(kernel_lock) 2436 __acquires(kernel_lock)
2432{ 2437{
2438 char *orig_data = kstrdup(data, GFP_KERNEL);
2433 struct buffer_head *bh; 2439 struct buffer_head *bh;
2434 struct ext4_super_block *es = NULL; 2440 struct ext4_super_block *es = NULL;
2435 struct ext4_sb_info *sbi; 2441 struct ext4_sb_info *sbi;
@@ -2793,24 +2799,6 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
2793 get_random_bytes(&sbi->s_next_generation, sizeof(u32)); 2799 get_random_bytes(&sbi->s_next_generation, sizeof(u32));
2794 spin_lock_init(&sbi->s_next_gen_lock); 2800 spin_lock_init(&sbi->s_next_gen_lock);
2795 2801
2796 err = percpu_counter_init(&sbi->s_freeblocks_counter,
2797 ext4_count_free_blocks(sb));
2798 if (!err) {
2799 err = percpu_counter_init(&sbi->s_freeinodes_counter,
2800 ext4_count_free_inodes(sb));
2801 }
2802 if (!err) {
2803 err = percpu_counter_init(&sbi->s_dirs_counter,
2804 ext4_count_dirs(sb));
2805 }
2806 if (!err) {
2807 err = percpu_counter_init(&sbi->s_dirtyblocks_counter, 0);
2808 }
2809 if (err) {
2810 ext4_msg(sb, KERN_ERR, "insufficient memory");
2811 goto failed_mount3;
2812 }
2813
2814 sbi->s_stripe = ext4_get_stripe_size(sbi); 2802 sbi->s_stripe = ext4_get_stripe_size(sbi);
2815 sbi->s_max_writeback_mb_bump = 128; 2803 sbi->s_max_writeback_mb_bump = 128;
2816 2804
@@ -2910,6 +2898,20 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
2910 set_task_ioprio(sbi->s_journal->j_task, journal_ioprio); 2898 set_task_ioprio(sbi->s_journal->j_task, journal_ioprio);
2911 2899
2912no_journal: 2900no_journal:
2901 err = percpu_counter_init(&sbi->s_freeblocks_counter,
2902 ext4_count_free_blocks(sb));
2903 if (!err)
2904 err = percpu_counter_init(&sbi->s_freeinodes_counter,
2905 ext4_count_free_inodes(sb));
2906 if (!err)
2907 err = percpu_counter_init(&sbi->s_dirs_counter,
2908 ext4_count_dirs(sb));
2909 if (!err)
2910 err = percpu_counter_init(&sbi->s_dirtyblocks_counter, 0);
2911 if (err) {
2912 ext4_msg(sb, KERN_ERR, "insufficient memory");
2913 goto failed_mount_wq;
2914 }
2913 if (test_opt(sb, NOBH)) { 2915 if (test_opt(sb, NOBH)) {
2914 if (!(test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_WRITEBACK_DATA)) { 2916 if (!(test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_WRITEBACK_DATA)) {
2915 ext4_msg(sb, KERN_WARNING, "Ignoring nobh option - " 2917 ext4_msg(sb, KERN_WARNING, "Ignoring nobh option - "
@@ -3001,7 +3003,7 @@ no_journal:
3001 err = ext4_setup_system_zone(sb); 3003 err = ext4_setup_system_zone(sb);
3002 if (err) { 3004 if (err) {
3003 ext4_msg(sb, KERN_ERR, "failed to initialize system " 3005 ext4_msg(sb, KERN_ERR, "failed to initialize system "
3004 "zone (%d)\n", err); 3006 "zone (%d)", err);
3005 goto failed_mount4; 3007 goto failed_mount4;
3006 } 3008 }
3007 3009
@@ -3040,9 +3042,11 @@ no_journal:
3040 } else 3042 } else
3041 descr = "out journal"; 3043 descr = "out journal";
3042 3044
3043 ext4_msg(sb, KERN_INFO, "mounted filesystem with%s", descr); 3045 ext4_msg(sb, KERN_INFO, "mounted filesystem with%s. "
3046 "Opts: %s", descr, orig_data);
3044 3047
3045 lock_kernel(); 3048 lock_kernel();
3049 kfree(orig_data);
3046 return 0; 3050 return 0;
3047 3051
3048cantfind_ext4: 3052cantfind_ext4:
@@ -3059,6 +3063,10 @@ failed_mount_wq:
3059 jbd2_journal_destroy(sbi->s_journal); 3063 jbd2_journal_destroy(sbi->s_journal);
3060 sbi->s_journal = NULL; 3064 sbi->s_journal = NULL;
3061 } 3065 }
3066 percpu_counter_destroy(&sbi->s_freeblocks_counter);
3067 percpu_counter_destroy(&sbi->s_freeinodes_counter);
3068 percpu_counter_destroy(&sbi->s_dirs_counter);
3069 percpu_counter_destroy(&sbi->s_dirtyblocks_counter);
3062failed_mount3: 3070failed_mount3:
3063 if (sbi->s_flex_groups) { 3071 if (sbi->s_flex_groups) {
3064 if (is_vmalloc_addr(sbi->s_flex_groups)) 3072 if (is_vmalloc_addr(sbi->s_flex_groups))
@@ -3066,10 +3074,6 @@ failed_mount3:
3066 else 3074 else
3067 kfree(sbi->s_flex_groups); 3075 kfree(sbi->s_flex_groups);
3068 } 3076 }
3069 percpu_counter_destroy(&sbi->s_freeblocks_counter);
3070 percpu_counter_destroy(&sbi->s_freeinodes_counter);
3071 percpu_counter_destroy(&sbi->s_dirs_counter);
3072 percpu_counter_destroy(&sbi->s_dirtyblocks_counter);
3073failed_mount2: 3077failed_mount2:
3074 for (i = 0; i < db_count; i++) 3078 for (i = 0; i < db_count; i++)
3075 brelse(sbi->s_group_desc[i]); 3079 brelse(sbi->s_group_desc[i]);
@@ -3089,6 +3093,7 @@ out_fail:
3089 kfree(sbi->s_blockgroup_lock); 3093 kfree(sbi->s_blockgroup_lock);
3090 kfree(sbi); 3094 kfree(sbi);
3091 lock_kernel(); 3095 lock_kernel();
3096 kfree(orig_data);
3092 return ret; 3097 return ret;
3093} 3098}
3094 3099
@@ -3380,7 +3385,7 @@ static int ext4_commit_super(struct super_block *sb, int sync)
3380 if (!(sb->s_flags & MS_RDONLY)) 3385 if (!(sb->s_flags & MS_RDONLY))
3381 es->s_wtime = cpu_to_le32(get_seconds()); 3386 es->s_wtime = cpu_to_le32(get_seconds());
3382 es->s_kbytes_written = 3387 es->s_kbytes_written =
3383 cpu_to_le64(EXT4_SB(sb)->s_kbytes_written + 3388 cpu_to_le64(EXT4_SB(sb)->s_kbytes_written +
3384 ((part_stat_read(sb->s_bdev->bd_part, sectors[1]) - 3389 ((part_stat_read(sb->s_bdev->bd_part, sectors[1]) -
3385 EXT4_SB(sb)->s_sectors_written_start) >> 1)); 3390 EXT4_SB(sb)->s_sectors_written_start) >> 1));
3386 ext4_free_blocks_count_set(es, percpu_counter_sum_positive( 3391 ext4_free_blocks_count_set(es, percpu_counter_sum_positive(
@@ -3485,8 +3490,10 @@ int ext4_force_commit(struct super_block *sb)
3485 return 0; 3490 return 0;
3486 3491
3487 journal = EXT4_SB(sb)->s_journal; 3492 journal = EXT4_SB(sb)->s_journal;
3488 if (journal) 3493 if (journal) {
3494 vfs_check_frozen(sb, SB_FREEZE_WRITE);
3489 ret = ext4_journal_force_commit(journal); 3495 ret = ext4_journal_force_commit(journal);
3496 }
3490 3497
3491 return ret; 3498 return ret;
3492} 3499}
@@ -3535,18 +3542,16 @@ static int ext4_freeze(struct super_block *sb)
3535 * the journal. 3542 * the journal.
3536 */ 3543 */
3537 error = jbd2_journal_flush(journal); 3544 error = jbd2_journal_flush(journal);
3538 if (error < 0) { 3545 if (error < 0)
3539 out: 3546 goto out;
3540 jbd2_journal_unlock_updates(journal);
3541 return error;
3542 }
3543 3547
3544 /* Journal blocked and flushed, clear needs_recovery flag. */ 3548 /* Journal blocked and flushed, clear needs_recovery flag. */
3545 EXT4_CLEAR_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_RECOVER); 3549 EXT4_CLEAR_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_RECOVER);
3546 error = ext4_commit_super(sb, 1); 3550 error = ext4_commit_super(sb, 1);
3547 if (error) 3551out:
3548 goto out; 3552 /* we rely on s_frozen to stop further updates */
3549 return 0; 3553 jbd2_journal_unlock_updates(EXT4_SB(sb)->s_journal);
3554 return error;
3550} 3555}
3551 3556
3552/* 3557/*
@@ -3563,7 +3568,6 @@ static int ext4_unfreeze(struct super_block *sb)
3563 EXT4_SET_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_RECOVER); 3568 EXT4_SET_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_RECOVER);
3564 ext4_commit_super(sb, 1); 3569 ext4_commit_super(sb, 1);
3565 unlock_super(sb); 3570 unlock_super(sb);
3566 jbd2_journal_unlock_updates(EXT4_SB(sb)->s_journal);
3567 return 0; 3571 return 0;
3568} 3572}
3569 3573
@@ -3574,12 +3578,14 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data)
3574 ext4_fsblk_t n_blocks_count = 0; 3578 ext4_fsblk_t n_blocks_count = 0;
3575 unsigned long old_sb_flags; 3579 unsigned long old_sb_flags;
3576 struct ext4_mount_options old_opts; 3580 struct ext4_mount_options old_opts;
3581 int enable_quota = 0;
3577 ext4_group_t g; 3582 ext4_group_t g;
3578 unsigned int journal_ioprio = DEFAULT_JOURNAL_IOPRIO; 3583 unsigned int journal_ioprio = DEFAULT_JOURNAL_IOPRIO;
3579 int err; 3584 int err;
3580#ifdef CONFIG_QUOTA 3585#ifdef CONFIG_QUOTA
3581 int i; 3586 int i;
3582#endif 3587#endif
3588 char *orig_data = kstrdup(data, GFP_KERNEL);
3583 3589
3584 lock_kernel(); 3590 lock_kernel();
3585 3591
@@ -3630,6 +3636,10 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data)
3630 } 3636 }
3631 3637
3632 if (*flags & MS_RDONLY) { 3638 if (*flags & MS_RDONLY) {
3639 err = dquot_suspend(sb, -1);
3640 if (err < 0)
3641 goto restore_opts;
3642
3633 /* 3643 /*
3634 * First of all, the unconditional stuff we have to do 3644 * First of all, the unconditional stuff we have to do
3635 * to disable replay of the journal when we next remount 3645 * to disable replay of the journal when we next remount
@@ -3698,6 +3708,7 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data)
3698 goto restore_opts; 3708 goto restore_opts;
3699 if (!ext4_setup_super(sb, es, 0)) 3709 if (!ext4_setup_super(sb, es, 0))
3700 sb->s_flags &= ~MS_RDONLY; 3710 sb->s_flags &= ~MS_RDONLY;
3711 enable_quota = 1;
3701 } 3712 }
3702 } 3713 }
3703 ext4_setup_system_zone(sb); 3714 ext4_setup_system_zone(sb);
@@ -3713,6 +3724,11 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data)
3713#endif 3724#endif
3714 unlock_super(sb); 3725 unlock_super(sb);
3715 unlock_kernel(); 3726 unlock_kernel();
3727 if (enable_quota)
3728 dquot_resume(sb, -1);
3729
3730 ext4_msg(sb, KERN_INFO, "re-mounted. Opts: %s", orig_data);
3731 kfree(orig_data);
3716 return 0; 3732 return 0;
3717 3733
3718restore_opts: 3734restore_opts:
@@ -3734,6 +3750,7 @@ restore_opts:
3734#endif 3750#endif
3735 unlock_super(sb); 3751 unlock_super(sb);
3736 unlock_kernel(); 3752 unlock_kernel();
3753 kfree(orig_data);
3737 return err; 3754 return err;
3738} 3755}
3739 3756
@@ -3906,24 +3923,21 @@ static int ext4_write_info(struct super_block *sb, int type)
3906 */ 3923 */
3907static int ext4_quota_on_mount(struct super_block *sb, int type) 3924static int ext4_quota_on_mount(struct super_block *sb, int type)
3908{ 3925{
3909 return vfs_quota_on_mount(sb, EXT4_SB(sb)->s_qf_names[type], 3926 return dquot_quota_on_mount(sb, EXT4_SB(sb)->s_qf_names[type],
3910 EXT4_SB(sb)->s_jquota_fmt, type); 3927 EXT4_SB(sb)->s_jquota_fmt, type);
3911} 3928}
3912 3929
3913/* 3930/*
3914 * Standard function to be called on quota_on 3931 * Standard function to be called on quota_on
3915 */ 3932 */
3916static int ext4_quota_on(struct super_block *sb, int type, int format_id, 3933static int ext4_quota_on(struct super_block *sb, int type, int format_id,
3917 char *name, int remount) 3934 char *name)
3918{ 3935{
3919 int err; 3936 int err;
3920 struct path path; 3937 struct path path;
3921 3938
3922 if (!test_opt(sb, QUOTA)) 3939 if (!test_opt(sb, QUOTA))
3923 return -EINVAL; 3940 return -EINVAL;
3924 /* When remounting, no checks are needed and in fact, name is NULL */
3925 if (remount)
3926 return vfs_quota_on(sb, type, format_id, name, remount);
3927 3941
3928 err = kern_path(name, LOOKUP_FOLLOW, &path); 3942 err = kern_path(name, LOOKUP_FOLLOW, &path);
3929 if (err) 3943 if (err)
@@ -3962,7 +3976,7 @@ static int ext4_quota_on(struct super_block *sb, int type, int format_id,
3962 } 3976 }
3963 } 3977 }
3964 3978
3965 err = vfs_quota_on_path(sb, type, format_id, &path); 3979 err = dquot_quota_on_path(sb, type, format_id, &path);
3966 path_put(&path); 3980 path_put(&path);
3967 return err; 3981 return err;
3968} 3982}
@@ -4141,6 +4155,7 @@ static int __init init_ext4_fs(void)
4141{ 4155{
4142 int err; 4156 int err;
4143 4157
4158 ext4_check_flag_values();
4144 err = init_ext4_system_zone(); 4159 err = init_ext4_system_zone();
4145 if (err) 4160 if (err)
4146 return err; 4161 return err;
diff --git a/fs/ext4/symlink.c b/fs/ext4/symlink.c
index 00740cb32be3..ed9354aff279 100644
--- a/fs/ext4/symlink.c
+++ b/fs/ext4/symlink.c
@@ -34,6 +34,7 @@ const struct inode_operations ext4_symlink_inode_operations = {
34 .readlink = generic_readlink, 34 .readlink = generic_readlink,
35 .follow_link = page_follow_link_light, 35 .follow_link = page_follow_link_light,
36 .put_link = page_put_link, 36 .put_link = page_put_link,
37 .setattr = ext4_setattr,
37#ifdef CONFIG_EXT4_FS_XATTR 38#ifdef CONFIG_EXT4_FS_XATTR
38 .setxattr = generic_setxattr, 39 .setxattr = generic_setxattr,
39 .getxattr = generic_getxattr, 40 .getxattr = generic_getxattr,
@@ -45,6 +46,7 @@ const struct inode_operations ext4_symlink_inode_operations = {
45const struct inode_operations ext4_fast_symlink_inode_operations = { 46const struct inode_operations ext4_fast_symlink_inode_operations = {
46 .readlink = generic_readlink, 47 .readlink = generic_readlink,
47 .follow_link = ext4_follow_link, 48 .follow_link = ext4_follow_link,
49 .setattr = ext4_setattr,
48#ifdef CONFIG_EXT4_FS_XATTR 50#ifdef CONFIG_EXT4_FS_XATTR
49 .setxattr = generic_setxattr, 51 .setxattr = generic_setxattr,
50 .getxattr = generic_getxattr, 52 .getxattr = generic_getxattr,
diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
index 2de0e9515089..04338009793a 100644
--- a/fs/ext4/xattr.c
+++ b/fs/ext4/xattr.c
@@ -228,9 +228,8 @@ ext4_xattr_block_get(struct inode *inode, int name_index, const char *name,
228 atomic_read(&(bh->b_count)), le32_to_cpu(BHDR(bh)->h_refcount)); 228 atomic_read(&(bh->b_count)), le32_to_cpu(BHDR(bh)->h_refcount));
229 if (ext4_xattr_check_block(bh)) { 229 if (ext4_xattr_check_block(bh)) {
230bad_block: 230bad_block:
231 ext4_error(inode->i_sb, 231 EXT4_ERROR_INODE(inode, "bad block %llu",
232 "inode %lu: bad block %llu", inode->i_ino, 232 EXT4_I(inode)->i_file_acl);
233 EXT4_I(inode)->i_file_acl);
234 error = -EIO; 233 error = -EIO;
235 goto cleanup; 234 goto cleanup;
236 } 235 }
@@ -372,9 +371,8 @@ ext4_xattr_block_list(struct dentry *dentry, char *buffer, size_t buffer_size)
372 ea_bdebug(bh, "b_count=%d, refcount=%d", 371 ea_bdebug(bh, "b_count=%d, refcount=%d",
373 atomic_read(&(bh->b_count)), le32_to_cpu(BHDR(bh)->h_refcount)); 372 atomic_read(&(bh->b_count)), le32_to_cpu(BHDR(bh)->h_refcount));
374 if (ext4_xattr_check_block(bh)) { 373 if (ext4_xattr_check_block(bh)) {
375 ext4_error(inode->i_sb, 374 EXT4_ERROR_INODE(inode, "bad block %llu",
376 "inode %lu: bad block %llu", inode->i_ino, 375 EXT4_I(inode)->i_file_acl);
377 EXT4_I(inode)->i_file_acl);
378 error = -EIO; 376 error = -EIO;
379 goto cleanup; 377 goto cleanup;
380 } 378 }
@@ -666,8 +664,8 @@ ext4_xattr_block_find(struct inode *inode, struct ext4_xattr_info *i,
666 atomic_read(&(bs->bh->b_count)), 664 atomic_read(&(bs->bh->b_count)),
667 le32_to_cpu(BHDR(bs->bh)->h_refcount)); 665 le32_to_cpu(BHDR(bs->bh)->h_refcount));
668 if (ext4_xattr_check_block(bs->bh)) { 666 if (ext4_xattr_check_block(bs->bh)) {
669 ext4_error(sb, "inode %lu: bad block %llu", 667 EXT4_ERROR_INODE(inode, "bad block %llu",
670 inode->i_ino, EXT4_I(inode)->i_file_acl); 668 EXT4_I(inode)->i_file_acl);
671 error = -EIO; 669 error = -EIO;
672 goto cleanup; 670 goto cleanup;
673 } 671 }
@@ -820,7 +818,7 @@ inserted:
820 EXT4_I(inode)->i_block_group); 818 EXT4_I(inode)->i_block_group);
821 819
822 /* non-extent files can't have physical blocks past 2^32 */ 820 /* non-extent files can't have physical blocks past 2^32 */
823 if (!(EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL)) 821 if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
824 goal = goal & EXT4_MAX_BLOCK_FILE_PHYS; 822 goal = goal & EXT4_MAX_BLOCK_FILE_PHYS;
825 823
826 block = ext4_new_meta_blocks(handle, inode, 824 block = ext4_new_meta_blocks(handle, inode,
@@ -828,7 +826,7 @@ inserted:
828 if (error) 826 if (error)
829 goto cleanup; 827 goto cleanup;
830 828
831 if (!(EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL)) 829 if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
832 BUG_ON(block > EXT4_MAX_BLOCK_FILE_PHYS); 830 BUG_ON(block > EXT4_MAX_BLOCK_FILE_PHYS);
833 831
834 ea_idebug(inode, "creating block %d", block); 832 ea_idebug(inode, "creating block %d", block);
@@ -880,8 +878,8 @@ cleanup_dquot:
880 goto cleanup; 878 goto cleanup;
881 879
882bad_block: 880bad_block:
883 ext4_error(inode->i_sb, "inode %lu: bad block %llu", 881 EXT4_ERROR_INODE(inode, "bad block %llu",
884 inode->i_ino, EXT4_I(inode)->i_file_acl); 882 EXT4_I(inode)->i_file_acl);
885 goto cleanup; 883 goto cleanup;
886 884
887#undef header 885#undef header
@@ -1194,8 +1192,8 @@ retry:
1194 if (!bh) 1192 if (!bh)
1195 goto cleanup; 1193 goto cleanup;
1196 if (ext4_xattr_check_block(bh)) { 1194 if (ext4_xattr_check_block(bh)) {
1197 ext4_error(inode->i_sb, "inode %lu: bad block %llu", 1195 EXT4_ERROR_INODE(inode, "bad block %llu",
1198 inode->i_ino, EXT4_I(inode)->i_file_acl); 1196 EXT4_I(inode)->i_file_acl);
1199 error = -EIO; 1197 error = -EIO;
1200 goto cleanup; 1198 goto cleanup;
1201 } 1199 }
@@ -1372,14 +1370,14 @@ ext4_xattr_delete_inode(handle_t *handle, struct inode *inode)
1372 goto cleanup; 1370 goto cleanup;
1373 bh = sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl); 1371 bh = sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl);
1374 if (!bh) { 1372 if (!bh) {
1375 ext4_error(inode->i_sb, "inode %lu: block %llu read error", 1373 EXT4_ERROR_INODE(inode, "block %llu read error",
1376 inode->i_ino, EXT4_I(inode)->i_file_acl); 1374 EXT4_I(inode)->i_file_acl);
1377 goto cleanup; 1375 goto cleanup;
1378 } 1376 }
1379 if (BHDR(bh)->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC) || 1377 if (BHDR(bh)->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC) ||
1380 BHDR(bh)->h_blocks != cpu_to_le32(1)) { 1378 BHDR(bh)->h_blocks != cpu_to_le32(1)) {
1381 ext4_error(inode->i_sb, "inode %lu: bad block %llu", 1379 EXT4_ERROR_INODE(inode, "bad block %llu",
1382 inode->i_ino, EXT4_I(inode)->i_file_acl); 1380 EXT4_I(inode)->i_file_acl);
1383 goto cleanup; 1381 goto cleanup;
1384 } 1382 }
1385 ext4_xattr_release_block(handle, inode, bh); 1383 ext4_xattr_release_block(handle, inode, bh);
@@ -1504,9 +1502,8 @@ again:
1504 } 1502 }
1505 bh = sb_bread(inode->i_sb, ce->e_block); 1503 bh = sb_bread(inode->i_sb, ce->e_block);
1506 if (!bh) { 1504 if (!bh) {
1507 ext4_error(inode->i_sb, 1505 EXT4_ERROR_INODE(inode, "block %lu read error",
1508 "inode %lu: block %lu read error", 1506 (unsigned long) ce->e_block);
1509 inode->i_ino, (unsigned long) ce->e_block);
1510 } else if (le32_to_cpu(BHDR(bh)->h_refcount) >= 1507 } else if (le32_to_cpu(BHDR(bh)->h_refcount) >=
1511 EXT4_XATTR_REFCOUNT_MAX) { 1508 EXT4_XATTR_REFCOUNT_MAX) {
1512 ea_idebug(inode, "block %lu refcount %d>=%d", 1509 ea_idebug(inode, "block %lu refcount %d>=%d",
diff --git a/fs/fat/fat.h b/fs/fat/fat.h
index 53dba57b49a1..27ac25725954 100644
--- a/fs/fat/fat.h
+++ b/fs/fat/fat.h
@@ -306,11 +306,11 @@ extern long fat_generic_ioctl(struct file *filp, unsigned int cmd,
306extern const struct file_operations fat_file_operations; 306extern const struct file_operations fat_file_operations;
307extern const struct inode_operations fat_file_inode_operations; 307extern const struct inode_operations fat_file_inode_operations;
308extern int fat_setattr(struct dentry * dentry, struct iattr * attr); 308extern int fat_setattr(struct dentry * dentry, struct iattr * attr);
309extern void fat_truncate(struct inode *inode); 309extern int fat_setsize(struct inode *inode, loff_t offset);
310extern void fat_truncate_blocks(struct inode *inode, loff_t offset);
310extern int fat_getattr(struct vfsmount *mnt, struct dentry *dentry, 311extern int fat_getattr(struct vfsmount *mnt, struct dentry *dentry,
311 struct kstat *stat); 312 struct kstat *stat);
312extern int fat_file_fsync(struct file *file, struct dentry *dentry, 313extern int fat_file_fsync(struct file *file, int datasync);
313 int datasync);
314 314
315/* fat/inode.c */ 315/* fat/inode.c */
316extern void fat_attach(struct inode *inode, loff_t i_pos); 316extern void fat_attach(struct inode *inode, loff_t i_pos);
diff --git a/fs/fat/file.c b/fs/fat/file.c
index a14c2f6a489e..990dfae022e5 100644
--- a/fs/fat/file.c
+++ b/fs/fat/file.c
@@ -149,12 +149,12 @@ static int fat_file_release(struct inode *inode, struct file *filp)
149 return 0; 149 return 0;
150} 150}
151 151
152int fat_file_fsync(struct file *filp, struct dentry *dentry, int datasync) 152int fat_file_fsync(struct file *filp, int datasync)
153{ 153{
154 struct inode *inode = dentry->d_inode; 154 struct inode *inode = filp->f_mapping->host;
155 int res, err; 155 int res, err;
156 156
157 res = simple_fsync(filp, dentry, datasync); 157 res = generic_file_fsync(filp, datasync);
158 err = sync_mapping_buffers(MSDOS_SB(inode->i_sb)->fat_inode->i_mapping); 158 err = sync_mapping_buffers(MSDOS_SB(inode->i_sb)->fat_inode->i_mapping);
159 159
160 return res ? res : err; 160 return res ? res : err;
@@ -283,7 +283,7 @@ static int fat_free(struct inode *inode, int skip)
283 return fat_free_clusters(inode, free_start); 283 return fat_free_clusters(inode, free_start);
284} 284}
285 285
286void fat_truncate(struct inode *inode) 286void fat_truncate_blocks(struct inode *inode, loff_t offset)
287{ 287{
288 struct msdos_sb_info *sbi = MSDOS_SB(inode->i_sb); 288 struct msdos_sb_info *sbi = MSDOS_SB(inode->i_sb);
289 const unsigned int cluster_size = sbi->cluster_size; 289 const unsigned int cluster_size = sbi->cluster_size;
@@ -293,10 +293,10 @@ void fat_truncate(struct inode *inode)
293 * This protects against truncating a file bigger than it was then 293 * This protects against truncating a file bigger than it was then
294 * trying to write into the hole. 294 * trying to write into the hole.
295 */ 295 */
296 if (MSDOS_I(inode)->mmu_private > inode->i_size) 296 if (MSDOS_I(inode)->mmu_private > offset)
297 MSDOS_I(inode)->mmu_private = inode->i_size; 297 MSDOS_I(inode)->mmu_private = offset;
298 298
299 nr_clusters = (inode->i_size + (cluster_size - 1)) >> sbi->cluster_bits; 299 nr_clusters = (offset + (cluster_size - 1)) >> sbi->cluster_bits;
300 300
301 fat_free(inode, nr_clusters); 301 fat_free(inode, nr_clusters);
302 fat_flush_inodes(inode->i_sb, inode, NULL); 302 fat_flush_inodes(inode->i_sb, inode, NULL);
@@ -364,6 +364,18 @@ static int fat_allow_set_time(struct msdos_sb_info *sbi, struct inode *inode)
364 return 0; 364 return 0;
365} 365}
366 366
367int fat_setsize(struct inode *inode, loff_t offset)
368{
369 int error;
370
371 error = simple_setsize(inode, offset);
372 if (error)
373 return error;
374 fat_truncate_blocks(inode, offset);
375
376 return error;
377}
378
367#define TIMES_SET_FLAGS (ATTR_MTIME_SET | ATTR_ATIME_SET | ATTR_TIMES_SET) 379#define TIMES_SET_FLAGS (ATTR_MTIME_SET | ATTR_ATIME_SET | ATTR_TIMES_SET)
368/* valid file mode bits */ 380/* valid file mode bits */
369#define FAT_VALID_MODE (S_IFREG | S_IFDIR | S_IRWXUGO) 381#define FAT_VALID_MODE (S_IFREG | S_IFDIR | S_IRWXUGO)
@@ -378,7 +390,8 @@ int fat_setattr(struct dentry *dentry, struct iattr *attr)
378 /* 390 /*
379 * Expand the file. Since inode_setattr() updates ->i_size 391 * Expand the file. Since inode_setattr() updates ->i_size
380 * before calling the ->truncate(), but FAT needs to fill the 392 * before calling the ->truncate(), but FAT needs to fill the
381 * hole before it. 393 * hole before it. XXX: this is no longer true with new truncate
394 * sequence.
382 */ 395 */
383 if (attr->ia_valid & ATTR_SIZE) { 396 if (attr->ia_valid & ATTR_SIZE) {
384 if (attr->ia_size > inode->i_size) { 397 if (attr->ia_size > inode->i_size) {
@@ -427,15 +440,20 @@ int fat_setattr(struct dentry *dentry, struct iattr *attr)
427 attr->ia_valid &= ~ATTR_MODE; 440 attr->ia_valid &= ~ATTR_MODE;
428 } 441 }
429 442
430 if (attr->ia_valid) 443 if (attr->ia_valid & ATTR_SIZE) {
431 error = inode_setattr(inode, attr); 444 error = fat_setsize(inode, attr->ia_size);
445 if (error)
446 goto out;
447 }
448
449 generic_setattr(inode, attr);
450 mark_inode_dirty(inode);
432out: 451out:
433 return error; 452 return error;
434} 453}
435EXPORT_SYMBOL_GPL(fat_setattr); 454EXPORT_SYMBOL_GPL(fat_setattr);
436 455
437const struct inode_operations fat_file_inode_operations = { 456const struct inode_operations fat_file_inode_operations = {
438 .truncate = fat_truncate,
439 .setattr = fat_setattr, 457 .setattr = fat_setattr,
440 .getattr = fat_getattr, 458 .getattr = fat_getattr,
441}; 459};
diff --git a/fs/fat/inode.c b/fs/fat/inode.c
index ed33904926ee..7bf45aee56d7 100644
--- a/fs/fat/inode.c
+++ b/fs/fat/inode.c
@@ -142,14 +142,29 @@ static int fat_readpages(struct file *file, struct address_space *mapping,
142 return mpage_readpages(mapping, pages, nr_pages, fat_get_block); 142 return mpage_readpages(mapping, pages, nr_pages, fat_get_block);
143} 143}
144 144
145static void fat_write_failed(struct address_space *mapping, loff_t to)
146{
147 struct inode *inode = mapping->host;
148
149 if (to > inode->i_size) {
150 truncate_pagecache(inode, to, inode->i_size);
151 fat_truncate_blocks(inode, inode->i_size);
152 }
153}
154
145static int fat_write_begin(struct file *file, struct address_space *mapping, 155static int fat_write_begin(struct file *file, struct address_space *mapping,
146 loff_t pos, unsigned len, unsigned flags, 156 loff_t pos, unsigned len, unsigned flags,
147 struct page **pagep, void **fsdata) 157 struct page **pagep, void **fsdata)
148{ 158{
159 int err;
160
149 *pagep = NULL; 161 *pagep = NULL;
150 return cont_write_begin(file, mapping, pos, len, flags, pagep, fsdata, 162 err = cont_write_begin_newtrunc(file, mapping, pos, len, flags,
151 fat_get_block, 163 pagep, fsdata, fat_get_block,
152 &MSDOS_I(mapping->host)->mmu_private); 164 &MSDOS_I(mapping->host)->mmu_private);
165 if (err < 0)
166 fat_write_failed(mapping, pos + len);
167 return err;
153} 168}
154 169
155static int fat_write_end(struct file *file, struct address_space *mapping, 170static int fat_write_end(struct file *file, struct address_space *mapping,
@@ -159,6 +174,8 @@ static int fat_write_end(struct file *file, struct address_space *mapping,
159 struct inode *inode = mapping->host; 174 struct inode *inode = mapping->host;
160 int err; 175 int err;
161 err = generic_write_end(file, mapping, pos, len, copied, pagep, fsdata); 176 err = generic_write_end(file, mapping, pos, len, copied, pagep, fsdata);
177 if (err < len)
178 fat_write_failed(mapping, pos + len);
162 if (!(err < 0) && !(MSDOS_I(inode)->i_attrs & ATTR_ARCH)) { 179 if (!(err < 0) && !(MSDOS_I(inode)->i_attrs & ATTR_ARCH)) {
163 inode->i_mtime = inode->i_ctime = CURRENT_TIME_SEC; 180 inode->i_mtime = inode->i_ctime = CURRENT_TIME_SEC;
164 MSDOS_I(inode)->i_attrs |= ATTR_ARCH; 181 MSDOS_I(inode)->i_attrs |= ATTR_ARCH;
@@ -172,7 +189,9 @@ static ssize_t fat_direct_IO(int rw, struct kiocb *iocb,
172 loff_t offset, unsigned long nr_segs) 189 loff_t offset, unsigned long nr_segs)
173{ 190{
174 struct file *file = iocb->ki_filp; 191 struct file *file = iocb->ki_filp;
175 struct inode *inode = file->f_mapping->host; 192 struct address_space *mapping = file->f_mapping;
193 struct inode *inode = mapping->host;
194 ssize_t ret;
176 195
177 if (rw == WRITE) { 196 if (rw == WRITE) {
178 /* 197 /*
@@ -193,8 +212,12 @@ static ssize_t fat_direct_IO(int rw, struct kiocb *iocb,
193 * FAT need to use the DIO_LOCKING for avoiding the race 212 * FAT need to use the DIO_LOCKING for avoiding the race
194 * condition of fat_get_block() and ->truncate(). 213 * condition of fat_get_block() and ->truncate().
195 */ 214 */
196 return blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov, 215 ret = blockdev_direct_IO_newtrunc(rw, iocb, inode, inode->i_sb->s_bdev,
197 offset, nr_segs, fat_get_block, NULL); 216 iov, offset, nr_segs, fat_get_block, NULL);
217 if (ret < 0 && (rw & WRITE))
218 fat_write_failed(mapping, offset + iov_length(iov, nr_segs));
219
220 return ret;
198} 221}
199 222
200static sector_t _fat_bmap(struct address_space *mapping, sector_t block) 223static sector_t _fat_bmap(struct address_space *mapping, sector_t block)
@@ -429,7 +452,7 @@ static void fat_delete_inode(struct inode *inode)
429{ 452{
430 truncate_inode_pages(&inode->i_data, 0); 453 truncate_inode_pages(&inode->i_data, 0);
431 inode->i_size = 0; 454 inode->i_size = 0;
432 fat_truncate(inode); 455 fat_truncate_blocks(inode, 0);
433 clear_inode(inode); 456 clear_inode(inode);
434} 457}
435 458
diff --git a/fs/file_table.c b/fs/file_table.c
index 32d12b78bac8..5c7d10ead4ad 100644
--- a/fs/file_table.c
+++ b/fs/file_table.c
@@ -194,14 +194,6 @@ struct file *alloc_file(struct path *path, fmode_t mode,
194} 194}
195EXPORT_SYMBOL(alloc_file); 195EXPORT_SYMBOL(alloc_file);
196 196
197void fput(struct file *file)
198{
199 if (atomic_long_dec_and_test(&file->f_count))
200 __fput(file);
201}
202
203EXPORT_SYMBOL(fput);
204
205/** 197/**
206 * drop_file_write_access - give up ability to write to a file 198 * drop_file_write_access - give up ability to write to a file
207 * @file: the file to which we will stop writing 199 * @file: the file to which we will stop writing
@@ -227,10 +219,9 @@ void drop_file_write_access(struct file *file)
227} 219}
228EXPORT_SYMBOL_GPL(drop_file_write_access); 220EXPORT_SYMBOL_GPL(drop_file_write_access);
229 221
230/* __fput is called from task context when aio completion releases the last 222/* the real guts of fput() - releasing the last reference to file
231 * last use of a struct file *. Do not use otherwise.
232 */ 223 */
233void __fput(struct file *file) 224static void __fput(struct file *file)
234{ 225{
235 struct dentry *dentry = file->f_path.dentry; 226 struct dentry *dentry = file->f_path.dentry;
236 struct vfsmount *mnt = file->f_path.mnt; 227 struct vfsmount *mnt = file->f_path.mnt;
@@ -268,6 +259,14 @@ void __fput(struct file *file)
268 mntput(mnt); 259 mntput(mnt);
269} 260}
270 261
262void fput(struct file *file)
263{
264 if (atomic_long_dec_and_test(&file->f_count))
265 __fput(file);
266}
267
268EXPORT_SYMBOL(fput);
269
271struct file *fget(unsigned int fd) 270struct file *fget(unsigned int fd)
272{ 271{
273 struct file *file; 272 struct file *file;
diff --git a/fs/freevxfs/vxfs_lookup.c b/fs/freevxfs/vxfs_lookup.c
index aee049cb9f84..0ec7bb2c95c6 100644
--- a/fs/freevxfs/vxfs_lookup.c
+++ b/fs/freevxfs/vxfs_lookup.c
@@ -57,6 +57,8 @@ const struct inode_operations vxfs_dir_inode_ops = {
57}; 57};
58 58
59const struct file_operations vxfs_dir_operations = { 59const struct file_operations vxfs_dir_operations = {
60 .llseek = generic_file_llseek,
61 .read = generic_read_dir,
60 .readdir = vxfs_readdir, 62 .readdir = vxfs_readdir,
61}; 63};
62 64
diff --git a/fs/fscache/object-list.c b/fs/fscache/object-list.c
index 1e1f286dd70e..4a8eb31c5338 100644
--- a/fs/fscache/object-list.c
+++ b/fs/fscache/object-list.c
@@ -103,7 +103,7 @@ static struct fscache_object *fscache_objlist_lookup(loff_t *_pos)
103 /* banners (can't represent line 0 by pos 0 as that would involve 103 /* banners (can't represent line 0 by pos 0 as that would involve
104 * returning a NULL pointer) */ 104 * returning a NULL pointer) */
105 if (pos == 0) 105 if (pos == 0)
106 return (struct fscache_object *) ++(*_pos); 106 return (struct fscache_object *)(long)++(*_pos);
107 if (pos < 3) 107 if (pos < 3)
108 return (struct fscache_object *)pos; 108 return (struct fscache_object *)pos;
109 109
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
index e53df5ebb2b8..9424796d6634 100644
--- a/fs/fuse/dev.c
+++ b/fs/fuse/dev.c
@@ -16,6 +16,9 @@
16#include <linux/pagemap.h> 16#include <linux/pagemap.h>
17#include <linux/file.h> 17#include <linux/file.h>
18#include <linux/slab.h> 18#include <linux/slab.h>
19#include <linux/pipe_fs_i.h>
20#include <linux/swap.h>
21#include <linux/splice.h>
19 22
20MODULE_ALIAS_MISCDEV(FUSE_MINOR); 23MODULE_ALIAS_MISCDEV(FUSE_MINOR);
21MODULE_ALIAS("devname:fuse"); 24MODULE_ALIAS("devname:fuse");
@@ -499,6 +502,9 @@ struct fuse_copy_state {
499 int write; 502 int write;
500 struct fuse_req *req; 503 struct fuse_req *req;
501 const struct iovec *iov; 504 const struct iovec *iov;
505 struct pipe_buffer *pipebufs;
506 struct pipe_buffer *currbuf;
507 struct pipe_inode_info *pipe;
502 unsigned long nr_segs; 508 unsigned long nr_segs;
503 unsigned long seglen; 509 unsigned long seglen;
504 unsigned long addr; 510 unsigned long addr;
@@ -506,16 +512,16 @@ struct fuse_copy_state {
506 void *mapaddr; 512 void *mapaddr;
507 void *buf; 513 void *buf;
508 unsigned len; 514 unsigned len;
515 unsigned move_pages:1;
509}; 516};
510 517
511static void fuse_copy_init(struct fuse_copy_state *cs, struct fuse_conn *fc, 518static void fuse_copy_init(struct fuse_copy_state *cs, struct fuse_conn *fc,
512 int write, struct fuse_req *req, 519 int write,
513 const struct iovec *iov, unsigned long nr_segs) 520 const struct iovec *iov, unsigned long nr_segs)
514{ 521{
515 memset(cs, 0, sizeof(*cs)); 522 memset(cs, 0, sizeof(*cs));
516 cs->fc = fc; 523 cs->fc = fc;
517 cs->write = write; 524 cs->write = write;
518 cs->req = req;
519 cs->iov = iov; 525 cs->iov = iov;
520 cs->nr_segs = nr_segs; 526 cs->nr_segs = nr_segs;
521} 527}
@@ -523,7 +529,18 @@ static void fuse_copy_init(struct fuse_copy_state *cs, struct fuse_conn *fc,
523/* Unmap and put previous page of userspace buffer */ 529/* Unmap and put previous page of userspace buffer */
524static void fuse_copy_finish(struct fuse_copy_state *cs) 530static void fuse_copy_finish(struct fuse_copy_state *cs)
525{ 531{
526 if (cs->mapaddr) { 532 if (cs->currbuf) {
533 struct pipe_buffer *buf = cs->currbuf;
534
535 if (!cs->write) {
536 buf->ops->unmap(cs->pipe, buf, cs->mapaddr);
537 } else {
538 kunmap_atomic(cs->mapaddr, KM_USER0);
539 buf->len = PAGE_SIZE - cs->len;
540 }
541 cs->currbuf = NULL;
542 cs->mapaddr = NULL;
543 } else if (cs->mapaddr) {
527 kunmap_atomic(cs->mapaddr, KM_USER0); 544 kunmap_atomic(cs->mapaddr, KM_USER0);
528 if (cs->write) { 545 if (cs->write) {
529 flush_dcache_page(cs->pg); 546 flush_dcache_page(cs->pg);
@@ -545,26 +562,61 @@ static int fuse_copy_fill(struct fuse_copy_state *cs)
545 562
546 unlock_request(cs->fc, cs->req); 563 unlock_request(cs->fc, cs->req);
547 fuse_copy_finish(cs); 564 fuse_copy_finish(cs);
548 if (!cs->seglen) { 565 if (cs->pipebufs) {
549 BUG_ON(!cs->nr_segs); 566 struct pipe_buffer *buf = cs->pipebufs;
550 cs->seglen = cs->iov[0].iov_len; 567
551 cs->addr = (unsigned long) cs->iov[0].iov_base; 568 if (!cs->write) {
552 cs->iov++; 569 err = buf->ops->confirm(cs->pipe, buf);
553 cs->nr_segs--; 570 if (err)
571 return err;
572
573 BUG_ON(!cs->nr_segs);
574 cs->currbuf = buf;
575 cs->mapaddr = buf->ops->map(cs->pipe, buf, 1);
576 cs->len = buf->len;
577 cs->buf = cs->mapaddr + buf->offset;
578 cs->pipebufs++;
579 cs->nr_segs--;
580 } else {
581 struct page *page;
582
583 if (cs->nr_segs == cs->pipe->buffers)
584 return -EIO;
585
586 page = alloc_page(GFP_HIGHUSER);
587 if (!page)
588 return -ENOMEM;
589
590 buf->page = page;
591 buf->offset = 0;
592 buf->len = 0;
593
594 cs->currbuf = buf;
595 cs->mapaddr = kmap_atomic(page, KM_USER0);
596 cs->buf = cs->mapaddr;
597 cs->len = PAGE_SIZE;
598 cs->pipebufs++;
599 cs->nr_segs++;
600 }
601 } else {
602 if (!cs->seglen) {
603 BUG_ON(!cs->nr_segs);
604 cs->seglen = cs->iov[0].iov_len;
605 cs->addr = (unsigned long) cs->iov[0].iov_base;
606 cs->iov++;
607 cs->nr_segs--;
608 }
609 err = get_user_pages_fast(cs->addr, 1, cs->write, &cs->pg);
610 if (err < 0)
611 return err;
612 BUG_ON(err != 1);
613 offset = cs->addr % PAGE_SIZE;
614 cs->mapaddr = kmap_atomic(cs->pg, KM_USER0);
615 cs->buf = cs->mapaddr + offset;
616 cs->len = min(PAGE_SIZE - offset, cs->seglen);
617 cs->seglen -= cs->len;
618 cs->addr += cs->len;
554 } 619 }
555 down_read(&current->mm->mmap_sem);
556 err = get_user_pages(current, current->mm, cs->addr, 1, cs->write, 0,
557 &cs->pg, NULL);
558 up_read(&current->mm->mmap_sem);
559 if (err < 0)
560 return err;
561 BUG_ON(err != 1);
562 offset = cs->addr % PAGE_SIZE;
563 cs->mapaddr = kmap_atomic(cs->pg, KM_USER0);
564 cs->buf = cs->mapaddr + offset;
565 cs->len = min(PAGE_SIZE - offset, cs->seglen);
566 cs->seglen -= cs->len;
567 cs->addr += cs->len;
568 620
569 return lock_request(cs->fc, cs->req); 621 return lock_request(cs->fc, cs->req);
570} 622}
@@ -586,23 +638,178 @@ static int fuse_copy_do(struct fuse_copy_state *cs, void **val, unsigned *size)
586 return ncpy; 638 return ncpy;
587} 639}
588 640
641static int fuse_check_page(struct page *page)
642{
643 if (page_mapcount(page) ||
644 page->mapping != NULL ||
645 page_count(page) != 1 ||
646 (page->flags & PAGE_FLAGS_CHECK_AT_PREP &
647 ~(1 << PG_locked |
648 1 << PG_referenced |
649 1 << PG_uptodate |
650 1 << PG_lru |
651 1 << PG_active |
652 1 << PG_reclaim))) {
653 printk(KERN_WARNING "fuse: trying to steal weird page\n");
654 printk(KERN_WARNING " page=%p index=%li flags=%08lx, count=%i, mapcount=%i, mapping=%p\n", page, page->index, page->flags, page_count(page), page_mapcount(page), page->mapping);
655 return 1;
656 }
657 return 0;
658}
659
660static int fuse_try_move_page(struct fuse_copy_state *cs, struct page **pagep)
661{
662 int err;
663 struct page *oldpage = *pagep;
664 struct page *newpage;
665 struct pipe_buffer *buf = cs->pipebufs;
666 struct address_space *mapping;
667 pgoff_t index;
668
669 unlock_request(cs->fc, cs->req);
670 fuse_copy_finish(cs);
671
672 err = buf->ops->confirm(cs->pipe, buf);
673 if (err)
674 return err;
675
676 BUG_ON(!cs->nr_segs);
677 cs->currbuf = buf;
678 cs->len = buf->len;
679 cs->pipebufs++;
680 cs->nr_segs--;
681
682 if (cs->len != PAGE_SIZE)
683 goto out_fallback;
684
685 if (buf->ops->steal(cs->pipe, buf) != 0)
686 goto out_fallback;
687
688 newpage = buf->page;
689
690 if (WARN_ON(!PageUptodate(newpage)))
691 return -EIO;
692
693 ClearPageMappedToDisk(newpage);
694
695 if (fuse_check_page(newpage) != 0)
696 goto out_fallback_unlock;
697
698 mapping = oldpage->mapping;
699 index = oldpage->index;
700
701 /*
702 * This is a new and locked page, it shouldn't be mapped or
703 * have any special flags on it
704 */
705 if (WARN_ON(page_mapped(oldpage)))
706 goto out_fallback_unlock;
707 if (WARN_ON(page_has_private(oldpage)))
708 goto out_fallback_unlock;
709 if (WARN_ON(PageDirty(oldpage) || PageWriteback(oldpage)))
710 goto out_fallback_unlock;
711 if (WARN_ON(PageMlocked(oldpage)))
712 goto out_fallback_unlock;
713
714 remove_from_page_cache(oldpage);
715 page_cache_release(oldpage);
716
717 err = add_to_page_cache_locked(newpage, mapping, index, GFP_KERNEL);
718 if (err) {
719 printk(KERN_WARNING "fuse_try_move_page: failed to add page");
720 goto out_fallback_unlock;
721 }
722 page_cache_get(newpage);
723
724 if (!(buf->flags & PIPE_BUF_FLAG_LRU))
725 lru_cache_add_file(newpage);
726
727 err = 0;
728 spin_lock(&cs->fc->lock);
729 if (cs->req->aborted)
730 err = -ENOENT;
731 else
732 *pagep = newpage;
733 spin_unlock(&cs->fc->lock);
734
735 if (err) {
736 unlock_page(newpage);
737 page_cache_release(newpage);
738 return err;
739 }
740
741 unlock_page(oldpage);
742 page_cache_release(oldpage);
743 cs->len = 0;
744
745 return 0;
746
747out_fallback_unlock:
748 unlock_page(newpage);
749out_fallback:
750 cs->mapaddr = buf->ops->map(cs->pipe, buf, 1);
751 cs->buf = cs->mapaddr + buf->offset;
752
753 err = lock_request(cs->fc, cs->req);
754 if (err)
755 return err;
756
757 return 1;
758}
759
760static int fuse_ref_page(struct fuse_copy_state *cs, struct page *page,
761 unsigned offset, unsigned count)
762{
763 struct pipe_buffer *buf;
764
765 if (cs->nr_segs == cs->pipe->buffers)
766 return -EIO;
767
768 unlock_request(cs->fc, cs->req);
769 fuse_copy_finish(cs);
770
771 buf = cs->pipebufs;
772 page_cache_get(page);
773 buf->page = page;
774 buf->offset = offset;
775 buf->len = count;
776
777 cs->pipebufs++;
778 cs->nr_segs++;
779 cs->len = 0;
780
781 return 0;
782}
783
589/* 784/*
590 * Copy a page in the request to/from the userspace buffer. Must be 785 * Copy a page in the request to/from the userspace buffer. Must be
591 * done atomically 786 * done atomically
592 */ 787 */
593static int fuse_copy_page(struct fuse_copy_state *cs, struct page *page, 788static int fuse_copy_page(struct fuse_copy_state *cs, struct page **pagep,
594 unsigned offset, unsigned count, int zeroing) 789 unsigned offset, unsigned count, int zeroing)
595{ 790{
791 int err;
792 struct page *page = *pagep;
793
596 if (page && zeroing && count < PAGE_SIZE) { 794 if (page && zeroing && count < PAGE_SIZE) {
597 void *mapaddr = kmap_atomic(page, KM_USER1); 795 void *mapaddr = kmap_atomic(page, KM_USER1);
598 memset(mapaddr, 0, PAGE_SIZE); 796 memset(mapaddr, 0, PAGE_SIZE);
599 kunmap_atomic(mapaddr, KM_USER1); 797 kunmap_atomic(mapaddr, KM_USER1);
600 } 798 }
601 while (count) { 799 while (count) {
602 if (!cs->len) { 800 if (cs->write && cs->pipebufs && page) {
603 int err = fuse_copy_fill(cs); 801 return fuse_ref_page(cs, page, offset, count);
604 if (err) 802 } else if (!cs->len) {
605 return err; 803 if (cs->move_pages && page &&
804 offset == 0 && count == PAGE_SIZE) {
805 err = fuse_try_move_page(cs, pagep);
806 if (err <= 0)
807 return err;
808 } else {
809 err = fuse_copy_fill(cs);
810 if (err)
811 return err;
812 }
606 } 813 }
607 if (page) { 814 if (page) {
608 void *mapaddr = kmap_atomic(page, KM_USER1); 815 void *mapaddr = kmap_atomic(page, KM_USER1);
@@ -627,8 +834,10 @@ static int fuse_copy_pages(struct fuse_copy_state *cs, unsigned nbytes,
627 unsigned count = min(nbytes, (unsigned) PAGE_SIZE - offset); 834 unsigned count = min(nbytes, (unsigned) PAGE_SIZE - offset);
628 835
629 for (i = 0; i < req->num_pages && (nbytes || zeroing); i++) { 836 for (i = 0; i < req->num_pages && (nbytes || zeroing); i++) {
630 struct page *page = req->pages[i]; 837 int err;
631 int err = fuse_copy_page(cs, page, offset, count, zeroing); 838
839 err = fuse_copy_page(cs, &req->pages[i], offset, count,
840 zeroing);
632 if (err) 841 if (err)
633 return err; 842 return err;
634 843
@@ -705,11 +914,10 @@ __acquires(&fc->lock)
705 * 914 *
706 * Called with fc->lock held, releases it 915 * Called with fc->lock held, releases it
707 */ 916 */
708static int fuse_read_interrupt(struct fuse_conn *fc, struct fuse_req *req, 917static int fuse_read_interrupt(struct fuse_conn *fc, struct fuse_copy_state *cs,
709 const struct iovec *iov, unsigned long nr_segs) 918 size_t nbytes, struct fuse_req *req)
710__releases(&fc->lock) 919__releases(&fc->lock)
711{ 920{
712 struct fuse_copy_state cs;
713 struct fuse_in_header ih; 921 struct fuse_in_header ih;
714 struct fuse_interrupt_in arg; 922 struct fuse_interrupt_in arg;
715 unsigned reqsize = sizeof(ih) + sizeof(arg); 923 unsigned reqsize = sizeof(ih) + sizeof(arg);
@@ -725,14 +933,13 @@ __releases(&fc->lock)
725 arg.unique = req->in.h.unique; 933 arg.unique = req->in.h.unique;
726 934
727 spin_unlock(&fc->lock); 935 spin_unlock(&fc->lock);
728 if (iov_length(iov, nr_segs) < reqsize) 936 if (nbytes < reqsize)
729 return -EINVAL; 937 return -EINVAL;
730 938
731 fuse_copy_init(&cs, fc, 1, NULL, iov, nr_segs); 939 err = fuse_copy_one(cs, &ih, sizeof(ih));
732 err = fuse_copy_one(&cs, &ih, sizeof(ih));
733 if (!err) 940 if (!err)
734 err = fuse_copy_one(&cs, &arg, sizeof(arg)); 941 err = fuse_copy_one(cs, &arg, sizeof(arg));
735 fuse_copy_finish(&cs); 942 fuse_copy_finish(cs);
736 943
737 return err ? err : reqsize; 944 return err ? err : reqsize;
738} 945}
@@ -746,18 +953,13 @@ __releases(&fc->lock)
746 * request_end(). Otherwise add it to the processing list, and set 953 * request_end(). Otherwise add it to the processing list, and set
747 * the 'sent' flag. 954 * the 'sent' flag.
748 */ 955 */
749static ssize_t fuse_dev_read(struct kiocb *iocb, const struct iovec *iov, 956static ssize_t fuse_dev_do_read(struct fuse_conn *fc, struct file *file,
750 unsigned long nr_segs, loff_t pos) 957 struct fuse_copy_state *cs, size_t nbytes)
751{ 958{
752 int err; 959 int err;
753 struct fuse_req *req; 960 struct fuse_req *req;
754 struct fuse_in *in; 961 struct fuse_in *in;
755 struct fuse_copy_state cs;
756 unsigned reqsize; 962 unsigned reqsize;
757 struct file *file = iocb->ki_filp;
758 struct fuse_conn *fc = fuse_get_conn(file);
759 if (!fc)
760 return -EPERM;
761 963
762 restart: 964 restart:
763 spin_lock(&fc->lock); 965 spin_lock(&fc->lock);
@@ -777,7 +979,7 @@ static ssize_t fuse_dev_read(struct kiocb *iocb, const struct iovec *iov,
777 if (!list_empty(&fc->interrupts)) { 979 if (!list_empty(&fc->interrupts)) {
778 req = list_entry(fc->interrupts.next, struct fuse_req, 980 req = list_entry(fc->interrupts.next, struct fuse_req,
779 intr_entry); 981 intr_entry);
780 return fuse_read_interrupt(fc, req, iov, nr_segs); 982 return fuse_read_interrupt(fc, cs, nbytes, req);
781 } 983 }
782 984
783 req = list_entry(fc->pending.next, struct fuse_req, list); 985 req = list_entry(fc->pending.next, struct fuse_req, list);
@@ -787,7 +989,7 @@ static ssize_t fuse_dev_read(struct kiocb *iocb, const struct iovec *iov,
787 in = &req->in; 989 in = &req->in;
788 reqsize = in->h.len; 990 reqsize = in->h.len;
789 /* If request is too large, reply with an error and restart the read */ 991 /* If request is too large, reply with an error and restart the read */
790 if (iov_length(iov, nr_segs) < reqsize) { 992 if (nbytes < reqsize) {
791 req->out.h.error = -EIO; 993 req->out.h.error = -EIO;
792 /* SETXATTR is special, since it may contain too large data */ 994 /* SETXATTR is special, since it may contain too large data */
793 if (in->h.opcode == FUSE_SETXATTR) 995 if (in->h.opcode == FUSE_SETXATTR)
@@ -796,12 +998,12 @@ static ssize_t fuse_dev_read(struct kiocb *iocb, const struct iovec *iov,
796 goto restart; 998 goto restart;
797 } 999 }
798 spin_unlock(&fc->lock); 1000 spin_unlock(&fc->lock);
799 fuse_copy_init(&cs, fc, 1, req, iov, nr_segs); 1001 cs->req = req;
800 err = fuse_copy_one(&cs, &in->h, sizeof(in->h)); 1002 err = fuse_copy_one(cs, &in->h, sizeof(in->h));
801 if (!err) 1003 if (!err)
802 err = fuse_copy_args(&cs, in->numargs, in->argpages, 1004 err = fuse_copy_args(cs, in->numargs, in->argpages,
803 (struct fuse_arg *) in->args, 0); 1005 (struct fuse_arg *) in->args, 0);
804 fuse_copy_finish(&cs); 1006 fuse_copy_finish(cs);
805 spin_lock(&fc->lock); 1007 spin_lock(&fc->lock);
806 req->locked = 0; 1008 req->locked = 0;
807 if (req->aborted) { 1009 if (req->aborted) {
@@ -829,6 +1031,110 @@ static ssize_t fuse_dev_read(struct kiocb *iocb, const struct iovec *iov,
829 return err; 1031 return err;
830} 1032}
831 1033
1034static ssize_t fuse_dev_read(struct kiocb *iocb, const struct iovec *iov,
1035 unsigned long nr_segs, loff_t pos)
1036{
1037 struct fuse_copy_state cs;
1038 struct file *file = iocb->ki_filp;
1039 struct fuse_conn *fc = fuse_get_conn(file);
1040 if (!fc)
1041 return -EPERM;
1042
1043 fuse_copy_init(&cs, fc, 1, iov, nr_segs);
1044
1045 return fuse_dev_do_read(fc, file, &cs, iov_length(iov, nr_segs));
1046}
1047
1048static int fuse_dev_pipe_buf_steal(struct pipe_inode_info *pipe,
1049 struct pipe_buffer *buf)
1050{
1051 return 1;
1052}
1053
1054static const struct pipe_buf_operations fuse_dev_pipe_buf_ops = {
1055 .can_merge = 0,
1056 .map = generic_pipe_buf_map,
1057 .unmap = generic_pipe_buf_unmap,
1058 .confirm = generic_pipe_buf_confirm,
1059 .release = generic_pipe_buf_release,
1060 .steal = fuse_dev_pipe_buf_steal,
1061 .get = generic_pipe_buf_get,
1062};
1063
1064static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos,
1065 struct pipe_inode_info *pipe,
1066 size_t len, unsigned int flags)
1067{
1068 int ret;
1069 int page_nr = 0;
1070 int do_wakeup = 0;
1071 struct pipe_buffer *bufs;
1072 struct fuse_copy_state cs;
1073 struct fuse_conn *fc = fuse_get_conn(in);
1074 if (!fc)
1075 return -EPERM;
1076
1077 bufs = kmalloc(pipe->buffers * sizeof (struct pipe_buffer), GFP_KERNEL);
1078 if (!bufs)
1079 return -ENOMEM;
1080
1081 fuse_copy_init(&cs, fc, 1, NULL, 0);
1082 cs.pipebufs = bufs;
1083 cs.pipe = pipe;
1084 ret = fuse_dev_do_read(fc, in, &cs, len);
1085 if (ret < 0)
1086 goto out;
1087
1088 ret = 0;
1089 pipe_lock(pipe);
1090
1091 if (!pipe->readers) {
1092 send_sig(SIGPIPE, current, 0);
1093 if (!ret)
1094 ret = -EPIPE;
1095 goto out_unlock;
1096 }
1097
1098 if (pipe->nrbufs + cs.nr_segs > pipe->buffers) {
1099 ret = -EIO;
1100 goto out_unlock;
1101 }
1102
1103 while (page_nr < cs.nr_segs) {
1104 int newbuf = (pipe->curbuf + pipe->nrbufs) & (pipe->buffers - 1);
1105 struct pipe_buffer *buf = pipe->bufs + newbuf;
1106
1107 buf->page = bufs[page_nr].page;
1108 buf->offset = bufs[page_nr].offset;
1109 buf->len = bufs[page_nr].len;
1110 buf->ops = &fuse_dev_pipe_buf_ops;
1111
1112 pipe->nrbufs++;
1113 page_nr++;
1114 ret += buf->len;
1115
1116 if (pipe->inode)
1117 do_wakeup = 1;
1118 }
1119
1120out_unlock:
1121 pipe_unlock(pipe);
1122
1123 if (do_wakeup) {
1124 smp_mb();
1125 if (waitqueue_active(&pipe->wait))
1126 wake_up_interruptible(&pipe->wait);
1127 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
1128 }
1129
1130out:
1131 for (; page_nr < cs.nr_segs; page_nr++)
1132 page_cache_release(bufs[page_nr].page);
1133
1134 kfree(bufs);
1135 return ret;
1136}
1137
832static int fuse_notify_poll(struct fuse_conn *fc, unsigned int size, 1138static int fuse_notify_poll(struct fuse_conn *fc, unsigned int size,
833 struct fuse_copy_state *cs) 1139 struct fuse_copy_state *cs)
834{ 1140{
@@ -988,23 +1294,17 @@ static int copy_out_args(struct fuse_copy_state *cs, struct fuse_out *out,
988 * it from the list and copy the rest of the buffer to the request. 1294 * it from the list and copy the rest of the buffer to the request.
989 * The request is finished by calling request_end() 1295 * The request is finished by calling request_end()
990 */ 1296 */
991static ssize_t fuse_dev_write(struct kiocb *iocb, const struct iovec *iov, 1297static ssize_t fuse_dev_do_write(struct fuse_conn *fc,
992 unsigned long nr_segs, loff_t pos) 1298 struct fuse_copy_state *cs, size_t nbytes)
993{ 1299{
994 int err; 1300 int err;
995 size_t nbytes = iov_length(iov, nr_segs);
996 struct fuse_req *req; 1301 struct fuse_req *req;
997 struct fuse_out_header oh; 1302 struct fuse_out_header oh;
998 struct fuse_copy_state cs;
999 struct fuse_conn *fc = fuse_get_conn(iocb->ki_filp);
1000 if (!fc)
1001 return -EPERM;
1002 1303
1003 fuse_copy_init(&cs, fc, 0, NULL, iov, nr_segs);
1004 if (nbytes < sizeof(struct fuse_out_header)) 1304 if (nbytes < sizeof(struct fuse_out_header))
1005 return -EINVAL; 1305 return -EINVAL;
1006 1306
1007 err = fuse_copy_one(&cs, &oh, sizeof(oh)); 1307 err = fuse_copy_one(cs, &oh, sizeof(oh));
1008 if (err) 1308 if (err)
1009 goto err_finish; 1309 goto err_finish;
1010 1310
@@ -1017,7 +1317,7 @@ static ssize_t fuse_dev_write(struct kiocb *iocb, const struct iovec *iov,
1017 * and error contains notification code. 1317 * and error contains notification code.
1018 */ 1318 */
1019 if (!oh.unique) { 1319 if (!oh.unique) {
1020 err = fuse_notify(fc, oh.error, nbytes - sizeof(oh), &cs); 1320 err = fuse_notify(fc, oh.error, nbytes - sizeof(oh), cs);
1021 return err ? err : nbytes; 1321 return err ? err : nbytes;
1022 } 1322 }
1023 1323
@@ -1036,7 +1336,7 @@ static ssize_t fuse_dev_write(struct kiocb *iocb, const struct iovec *iov,
1036 1336
1037 if (req->aborted) { 1337 if (req->aborted) {
1038 spin_unlock(&fc->lock); 1338 spin_unlock(&fc->lock);
1039 fuse_copy_finish(&cs); 1339 fuse_copy_finish(cs);
1040 spin_lock(&fc->lock); 1340 spin_lock(&fc->lock);
1041 request_end(fc, req); 1341 request_end(fc, req);
1042 return -ENOENT; 1342 return -ENOENT;
@@ -1053,7 +1353,7 @@ static ssize_t fuse_dev_write(struct kiocb *iocb, const struct iovec *iov,
1053 queue_interrupt(fc, req); 1353 queue_interrupt(fc, req);
1054 1354
1055 spin_unlock(&fc->lock); 1355 spin_unlock(&fc->lock);
1056 fuse_copy_finish(&cs); 1356 fuse_copy_finish(cs);
1057 return nbytes; 1357 return nbytes;
1058 } 1358 }
1059 1359
@@ -1061,11 +1361,13 @@ static ssize_t fuse_dev_write(struct kiocb *iocb, const struct iovec *iov,
1061 list_move(&req->list, &fc->io); 1361 list_move(&req->list, &fc->io);
1062 req->out.h = oh; 1362 req->out.h = oh;
1063 req->locked = 1; 1363 req->locked = 1;
1064 cs.req = req; 1364 cs->req = req;
1365 if (!req->out.page_replace)
1366 cs->move_pages = 0;
1065 spin_unlock(&fc->lock); 1367 spin_unlock(&fc->lock);
1066 1368
1067 err = copy_out_args(&cs, &req->out, nbytes); 1369 err = copy_out_args(cs, &req->out, nbytes);
1068 fuse_copy_finish(&cs); 1370 fuse_copy_finish(cs);
1069 1371
1070 spin_lock(&fc->lock); 1372 spin_lock(&fc->lock);
1071 req->locked = 0; 1373 req->locked = 0;
@@ -1081,10 +1383,101 @@ static ssize_t fuse_dev_write(struct kiocb *iocb, const struct iovec *iov,
1081 err_unlock: 1383 err_unlock:
1082 spin_unlock(&fc->lock); 1384 spin_unlock(&fc->lock);
1083 err_finish: 1385 err_finish:
1084 fuse_copy_finish(&cs); 1386 fuse_copy_finish(cs);
1085 return err; 1387 return err;
1086} 1388}
1087 1389
1390static ssize_t fuse_dev_write(struct kiocb *iocb, const struct iovec *iov,
1391 unsigned long nr_segs, loff_t pos)
1392{
1393 struct fuse_copy_state cs;
1394 struct fuse_conn *fc = fuse_get_conn(iocb->ki_filp);
1395 if (!fc)
1396 return -EPERM;
1397
1398 fuse_copy_init(&cs, fc, 0, iov, nr_segs);
1399
1400 return fuse_dev_do_write(fc, &cs, iov_length(iov, nr_segs));
1401}
1402
1403static ssize_t fuse_dev_splice_write(struct pipe_inode_info *pipe,
1404 struct file *out, loff_t *ppos,
1405 size_t len, unsigned int flags)
1406{
1407 unsigned nbuf;
1408 unsigned idx;
1409 struct pipe_buffer *bufs;
1410 struct fuse_copy_state cs;
1411 struct fuse_conn *fc;
1412 size_t rem;
1413 ssize_t ret;
1414
1415 fc = fuse_get_conn(out);
1416 if (!fc)
1417 return -EPERM;
1418
1419 bufs = kmalloc(pipe->buffers * sizeof (struct pipe_buffer), GFP_KERNEL);
1420 if (!bufs)
1421 return -ENOMEM;
1422
1423 pipe_lock(pipe);
1424 nbuf = 0;
1425 rem = 0;
1426 for (idx = 0; idx < pipe->nrbufs && rem < len; idx++)
1427 rem += pipe->bufs[(pipe->curbuf + idx) & (pipe->buffers - 1)].len;
1428
1429 ret = -EINVAL;
1430 if (rem < len) {
1431 pipe_unlock(pipe);
1432 goto out;
1433 }
1434
1435 rem = len;
1436 while (rem) {
1437 struct pipe_buffer *ibuf;
1438 struct pipe_buffer *obuf;
1439
1440 BUG_ON(nbuf >= pipe->buffers);
1441 BUG_ON(!pipe->nrbufs);
1442 ibuf = &pipe->bufs[pipe->curbuf];
1443 obuf = &bufs[nbuf];
1444
1445 if (rem >= ibuf->len) {
1446 *obuf = *ibuf;
1447 ibuf->ops = NULL;
1448 pipe->curbuf = (pipe->curbuf + 1) & (pipe->buffers - 1);
1449 pipe->nrbufs--;
1450 } else {
1451 ibuf->ops->get(pipe, ibuf);
1452 *obuf = *ibuf;
1453 obuf->flags &= ~PIPE_BUF_FLAG_GIFT;
1454 obuf->len = rem;
1455 ibuf->offset += obuf->len;
1456 ibuf->len -= obuf->len;
1457 }
1458 nbuf++;
1459 rem -= obuf->len;
1460 }
1461 pipe_unlock(pipe);
1462
1463 fuse_copy_init(&cs, fc, 0, NULL, nbuf);
1464 cs.pipebufs = bufs;
1465 cs.pipe = pipe;
1466
1467 if (flags & SPLICE_F_MOVE)
1468 cs.move_pages = 1;
1469
1470 ret = fuse_dev_do_write(fc, &cs, len);
1471
1472 for (idx = 0; idx < nbuf; idx++) {
1473 struct pipe_buffer *buf = &bufs[idx];
1474 buf->ops->release(pipe, buf);
1475 }
1476out:
1477 kfree(bufs);
1478 return ret;
1479}
1480
1088static unsigned fuse_dev_poll(struct file *file, poll_table *wait) 1481static unsigned fuse_dev_poll(struct file *file, poll_table *wait)
1089{ 1482{
1090 unsigned mask = POLLOUT | POLLWRNORM; 1483 unsigned mask = POLLOUT | POLLWRNORM;
@@ -1226,8 +1619,10 @@ const struct file_operations fuse_dev_operations = {
1226 .llseek = no_llseek, 1619 .llseek = no_llseek,
1227 .read = do_sync_read, 1620 .read = do_sync_read,
1228 .aio_read = fuse_dev_read, 1621 .aio_read = fuse_dev_read,
1622 .splice_read = fuse_dev_splice_read,
1229 .write = do_sync_write, 1623 .write = do_sync_write,
1230 .aio_write = fuse_dev_write, 1624 .aio_write = fuse_dev_write,
1625 .splice_write = fuse_dev_splice_write,
1231 .poll = fuse_dev_poll, 1626 .poll = fuse_dev_poll,
1232 .release = fuse_dev_release, 1627 .release = fuse_dev_release,
1233 .fasync = fuse_dev_fasync, 1628 .fasync = fuse_dev_fasync,
diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
index 4787ae6c5c1c..3cdc5f78a406 100644
--- a/fs/fuse/dir.c
+++ b/fs/fuse/dir.c
@@ -1156,10 +1156,9 @@ static int fuse_dir_release(struct inode *inode, struct file *file)
1156 return 0; 1156 return 0;
1157} 1157}
1158 1158
1159static int fuse_dir_fsync(struct file *file, struct dentry *de, int datasync) 1159static int fuse_dir_fsync(struct file *file, int datasync)
1160{ 1160{
1161 /* nfsd can call this with no file */ 1161 return fuse_fsync_common(file, datasync, 1);
1162 return file ? fuse_fsync_common(file, de, datasync, 1) : 0;
1163} 1162}
1164 1163
1165static bool update_mtime(unsigned ivalid) 1164static bool update_mtime(unsigned ivalid)
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index a9f5e137f1d3..ada0adeb3bb5 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -351,10 +351,9 @@ static void fuse_sync_writes(struct inode *inode)
351 fuse_release_nowrite(inode); 351 fuse_release_nowrite(inode);
352} 352}
353 353
354int fuse_fsync_common(struct file *file, struct dentry *de, int datasync, 354int fuse_fsync_common(struct file *file, int datasync, int isdir)
355 int isdir)
356{ 355{
357 struct inode *inode = de->d_inode; 356 struct inode *inode = file->f_mapping->host;
358 struct fuse_conn *fc = get_fuse_conn(inode); 357 struct fuse_conn *fc = get_fuse_conn(inode);
359 struct fuse_file *ff = file->private_data; 358 struct fuse_file *ff = file->private_data;
360 struct fuse_req *req; 359 struct fuse_req *req;
@@ -403,9 +402,9 @@ int fuse_fsync_common(struct file *file, struct dentry *de, int datasync,
403 return err; 402 return err;
404} 403}
405 404
406static int fuse_fsync(struct file *file, struct dentry *de, int datasync) 405static int fuse_fsync(struct file *file, int datasync)
407{ 406{
408 return fuse_fsync_common(file, de, datasync, 0); 407 return fuse_fsync_common(file, datasync, 0);
409} 408}
410 409
411void fuse_read_fill(struct fuse_req *req, struct file *file, loff_t pos, 410void fuse_read_fill(struct fuse_req *req, struct file *file, loff_t pos,
@@ -517,17 +516,26 @@ static void fuse_readpages_end(struct fuse_conn *fc, struct fuse_req *req)
517 int i; 516 int i;
518 size_t count = req->misc.read.in.size; 517 size_t count = req->misc.read.in.size;
519 size_t num_read = req->out.args[0].size; 518 size_t num_read = req->out.args[0].size;
520 struct inode *inode = req->pages[0]->mapping->host; 519 struct address_space *mapping = NULL;
521 520
522 /* 521 for (i = 0; mapping == NULL && i < req->num_pages; i++)
523 * Short read means EOF. If file size is larger, truncate it 522 mapping = req->pages[i]->mapping;
524 */
525 if (!req->out.h.error && num_read < count) {
526 loff_t pos = page_offset(req->pages[0]) + num_read;
527 fuse_read_update_size(inode, pos, req->misc.read.attr_ver);
528 }
529 523
530 fuse_invalidate_attr(inode); /* atime changed */ 524 if (mapping) {
525 struct inode *inode = mapping->host;
526
527 /*
528 * Short read means EOF. If file size is larger, truncate it
529 */
530 if (!req->out.h.error && num_read < count) {
531 loff_t pos;
532
533 pos = page_offset(req->pages[0]) + num_read;
534 fuse_read_update_size(inode, pos,
535 req->misc.read.attr_ver);
536 }
537 fuse_invalidate_attr(inode); /* atime changed */
538 }
531 539
532 for (i = 0; i < req->num_pages; i++) { 540 for (i = 0; i < req->num_pages; i++) {
533 struct page *page = req->pages[i]; 541 struct page *page = req->pages[i];
@@ -536,6 +544,7 @@ static void fuse_readpages_end(struct fuse_conn *fc, struct fuse_req *req)
536 else 544 else
537 SetPageError(page); 545 SetPageError(page);
538 unlock_page(page); 546 unlock_page(page);
547 page_cache_release(page);
539 } 548 }
540 if (req->ff) 549 if (req->ff)
541 fuse_file_put(req->ff); 550 fuse_file_put(req->ff);
@@ -550,6 +559,7 @@ static void fuse_send_readpages(struct fuse_req *req, struct file *file)
550 559
551 req->out.argpages = 1; 560 req->out.argpages = 1;
552 req->out.page_zeroing = 1; 561 req->out.page_zeroing = 1;
562 req->out.page_replace = 1;
553 fuse_read_fill(req, file, pos, count, FUSE_READ); 563 fuse_read_fill(req, file, pos, count, FUSE_READ);
554 req->misc.read.attr_ver = fuse_get_attr_version(fc); 564 req->misc.read.attr_ver = fuse_get_attr_version(fc);
555 if (fc->async_read) { 565 if (fc->async_read) {
@@ -589,6 +599,7 @@ static int fuse_readpages_fill(void *_data, struct page *page)
589 return PTR_ERR(req); 599 return PTR_ERR(req);
590 } 600 }
591 } 601 }
602 page_cache_get(page);
592 req->pages[req->num_pages] = page; 603 req->pages[req->num_pages] = page;
593 req->num_pages++; 604 req->num_pages++;
594 return 0; 605 return 0;
@@ -994,10 +1005,7 @@ static int fuse_get_user_pages(struct fuse_req *req, const char __user *buf,
994 nbytes = min_t(size_t, nbytes, FUSE_MAX_PAGES_PER_REQ << PAGE_SHIFT); 1005 nbytes = min_t(size_t, nbytes, FUSE_MAX_PAGES_PER_REQ << PAGE_SHIFT);
995 npages = (nbytes + offset + PAGE_SIZE - 1) >> PAGE_SHIFT; 1006 npages = (nbytes + offset + PAGE_SIZE - 1) >> PAGE_SHIFT;
996 npages = clamp(npages, 1, FUSE_MAX_PAGES_PER_REQ); 1007 npages = clamp(npages, 1, FUSE_MAX_PAGES_PER_REQ);
997 down_read(&current->mm->mmap_sem); 1008 npages = get_user_pages_fast(user_addr, npages, !write, req->pages);
998 npages = get_user_pages(current, current->mm, user_addr, npages, !write,
999 0, req->pages, NULL);
1000 up_read(&current->mm->mmap_sem);
1001 if (npages < 0) 1009 if (npages < 0)
1002 return npages; 1010 return npages;
1003 1011
@@ -1580,9 +1588,9 @@ static int fuse_ioctl_copy_user(struct page **pages, struct iovec *iov,
1580 while (iov_iter_count(&ii)) { 1588 while (iov_iter_count(&ii)) {
1581 struct page *page = pages[page_idx++]; 1589 struct page *page = pages[page_idx++];
1582 size_t todo = min_t(size_t, PAGE_SIZE, iov_iter_count(&ii)); 1590 size_t todo = min_t(size_t, PAGE_SIZE, iov_iter_count(&ii));
1583 void *kaddr, *map; 1591 void *kaddr;
1584 1592
1585 kaddr = map = kmap(page); 1593 kaddr = kmap(page);
1586 1594
1587 while (todo) { 1595 while (todo) {
1588 char __user *uaddr = ii.iov->iov_base + ii.iov_offset; 1596 char __user *uaddr = ii.iov->iov_base + ii.iov_offset;
diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h
index 01cc462ff45d..8f309f04064e 100644
--- a/fs/fuse/fuse_i.h
+++ b/fs/fuse/fuse_i.h
@@ -177,6 +177,9 @@ struct fuse_out {
177 /** Zero partially or not copied pages */ 177 /** Zero partially or not copied pages */
178 unsigned page_zeroing:1; 178 unsigned page_zeroing:1;
179 179
180 /** Pages may be replaced with new ones */
181 unsigned page_replace:1;
182
180 /** Number or arguments */ 183 /** Number or arguments */
181 unsigned numargs; 184 unsigned numargs;
182 185
@@ -568,8 +571,7 @@ void fuse_release_common(struct file *file, int opcode);
568/** 571/**
569 * Send FSYNC or FSYNCDIR request 572 * Send FSYNC or FSYNCDIR request
570 */ 573 */
571int fuse_fsync_common(struct file *file, struct dentry *de, int datasync, 574int fuse_fsync_common(struct file *file, int datasync, int isdir);
572 int isdir);
573 575
574/** 576/**
575 * Notify poll wakeup 577 * Notify poll wakeup
diff --git a/fs/gfs2/aops.c b/fs/gfs2/aops.c
index a739a0a48067..9f8b52500d63 100644
--- a/fs/gfs2/aops.c
+++ b/fs/gfs2/aops.c
@@ -700,8 +700,14 @@ out:
700 return 0; 700 return 0;
701 701
702 page_cache_release(page); 702 page_cache_release(page);
703
704 /*
705 * XXX(hch): the call below should probably be replaced with
706 * a call to the gfs2-specific truncate blocks helper to actually
707 * release disk blocks..
708 */
703 if (pos + len > ip->i_inode.i_size) 709 if (pos + len > ip->i_inode.i_size)
704 vmtruncate(&ip->i_inode, ip->i_inode.i_size); 710 simple_setsize(&ip->i_inode, ip->i_inode.i_size);
705out_endtrans: 711out_endtrans:
706 gfs2_trans_end(sdp); 712 gfs2_trans_end(sdp);
707out_trans_fail: 713out_trans_fail:
diff --git a/fs/gfs2/file.c b/fs/gfs2/file.c
index b20bfcc9fa2d..ed9a94f0ef15 100644
--- a/fs/gfs2/file.c
+++ b/fs/gfs2/file.c
@@ -554,9 +554,9 @@ static int gfs2_close(struct inode *inode, struct file *file)
554 * Returns: errno 554 * Returns: errno
555 */ 555 */
556 556
557static int gfs2_fsync(struct file *file, struct dentry *dentry, int datasync) 557static int gfs2_fsync(struct file *file, int datasync)
558{ 558{
559 struct inode *inode = dentry->d_inode; 559 struct inode *inode = file->f_mapping->host;
560 int sync_state = inode->i_state & (I_DIRTY_SYNC|I_DIRTY_DATASYNC); 560 int sync_state = inode->i_state & (I_DIRTY_SYNC|I_DIRTY_DATASYNC);
561 int ret = 0; 561 int ret = 0;
562 562
diff --git a/fs/gfs2/ops_inode.c b/fs/gfs2/ops_inode.c
index 4e64352d49de..98cdd05f3316 100644
--- a/fs/gfs2/ops_inode.c
+++ b/fs/gfs2/ops_inode.c
@@ -1071,6 +1071,9 @@ int gfs2_permission(struct inode *inode, int mask)
1071 return error; 1071 return error;
1072} 1072}
1073 1073
1074/*
1075 * XXX: should be changed to have proper ordering by opencoding simple_setsize
1076 */
1074static int setattr_size(struct inode *inode, struct iattr *attr) 1077static int setattr_size(struct inode *inode, struct iattr *attr)
1075{ 1078{
1076 struct gfs2_inode *ip = GFS2_I(inode); 1079 struct gfs2_inode *ip = GFS2_I(inode);
@@ -1081,7 +1084,7 @@ static int setattr_size(struct inode *inode, struct iattr *attr)
1081 error = gfs2_trans_begin(sdp, 0, sdp->sd_jdesc->jd_blocks); 1084 error = gfs2_trans_begin(sdp, 0, sdp->sd_jdesc->jd_blocks);
1082 if (error) 1085 if (error)
1083 return error; 1086 return error;
1084 error = vmtruncate(inode, attr->ia_size); 1087 error = simple_setsize(inode, attr->ia_size);
1085 gfs2_trans_end(sdp); 1088 gfs2_trans_end(sdp);
1086 if (error) 1089 if (error)
1087 return error; 1090 return error;
diff --git a/fs/hostfs/hostfs_kern.c b/fs/hostfs/hostfs_kern.c
index 3a029d8f4cf1..87ac1891a185 100644
--- a/fs/hostfs/hostfs_kern.c
+++ b/fs/hostfs/hostfs_kern.c
@@ -411,9 +411,9 @@ int hostfs_file_open(struct inode *ino, struct file *file)
411 return 0; 411 return 0;
412} 412}
413 413
414int hostfs_fsync(struct file *file, struct dentry *dentry, int datasync) 414int hostfs_fsync(struct file *file, int datasync)
415{ 415{
416 return fsync_file(HOSTFS_I(dentry->d_inode)->fd, datasync); 416 return fsync_file(HOSTFS_I(file->f_mapping->host)->fd, datasync);
417} 417}
418 418
419static const struct file_operations hostfs_file_fops = { 419static const struct file_operations hostfs_file_fops = {
diff --git a/fs/hpfs/file.c b/fs/hpfs/file.c
index 3efabff00367..a9ae9bfa752f 100644
--- a/fs/hpfs/file.c
+++ b/fs/hpfs/file.c
@@ -19,9 +19,9 @@ static int hpfs_file_release(struct inode *inode, struct file *file)
19 return 0; 19 return 0;
20} 20}
21 21
22int hpfs_file_fsync(struct file *file, struct dentry *dentry, int datasync) 22int hpfs_file_fsync(struct file *file, int datasync)
23{ 23{
24 /*return file_fsync(file, dentry);*/ 24 /*return file_fsync(file, datasync);*/
25 return 0; /* Don't fsync :-) */ 25 return 0; /* Don't fsync :-) */
26} 26}
27 27
diff --git a/fs/hpfs/hpfs_fn.h b/fs/hpfs/hpfs_fn.h
index 97bf738cd5d6..75f9d4324851 100644
--- a/fs/hpfs/hpfs_fn.h
+++ b/fs/hpfs/hpfs_fn.h
@@ -268,7 +268,7 @@ void hpfs_set_ea(struct inode *, struct fnode *, const char *,
268 268
269/* file.c */ 269/* file.c */
270 270
271int hpfs_file_fsync(struct file *, struct dentry *, int); 271int hpfs_file_fsync(struct file *, int);
272extern const struct file_operations hpfs_file_ops; 272extern const struct file_operations hpfs_file_ops;
273extern const struct inode_operations hpfs_file_iops; 273extern const struct inode_operations hpfs_file_iops;
274extern const struct address_space_operations hpfs_aops; 274extern const struct address_space_operations hpfs_aops;
diff --git a/fs/hppfs/hppfs.c b/fs/hppfs/hppfs.c
index 2e4dfa8593da..826c3f9d29ac 100644
--- a/fs/hppfs/hppfs.c
+++ b/fs/hppfs/hppfs.c
@@ -587,7 +587,7 @@ static int hppfs_readdir(struct file *file, void *ent, filldir_t filldir)
587 return err; 587 return err;
588} 588}
589 589
590static int hppfs_fsync(struct file *file, struct dentry *dentry, int datasync) 590static int hppfs_fsync(struct file *file, int datasync)
591{ 591{
592 return 0; 592 return 0;
593} 593}
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index a0bbd3d1b41a..a4e9a7ec3691 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -688,7 +688,7 @@ static void init_once(void *foo)
688const struct file_operations hugetlbfs_file_operations = { 688const struct file_operations hugetlbfs_file_operations = {
689 .read = hugetlbfs_read, 689 .read = hugetlbfs_read,
690 .mmap = hugetlbfs_file_mmap, 690 .mmap = hugetlbfs_file_mmap,
691 .fsync = simple_sync_file, 691 .fsync = noop_fsync,
692 .get_unmapped_area = hugetlb_get_unmapped_area, 692 .get_unmapped_area = hugetlb_get_unmapped_area,
693}; 693};
694 694
diff --git a/fs/isofs/dir.c b/fs/isofs/dir.c
index b9ab69b3a482..e0aca9a0ac68 100644
--- a/fs/isofs/dir.c
+++ b/fs/isofs/dir.c
@@ -272,6 +272,7 @@ static int isofs_readdir(struct file *filp,
272 272
273const struct file_operations isofs_dir_operations = 273const struct file_operations isofs_dir_operations =
274{ 274{
275 .llseek = generic_file_llseek,
275 .read = generic_read_dir, 276 .read = generic_read_dir,
276 .readdir = isofs_readdir, 277 .readdir = isofs_readdir,
277}; 278};
diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c
index bfc70f57900f..e214d68620ac 100644
--- a/fs/jbd2/transaction.c
+++ b/fs/jbd2/transaction.c
@@ -1311,7 +1311,6 @@ int jbd2_journal_stop(handle_t *handle)
1311 if (handle->h_sync) 1311 if (handle->h_sync)
1312 transaction->t_synchronous_commit = 1; 1312 transaction->t_synchronous_commit = 1;
1313 current->journal_info = NULL; 1313 current->journal_info = NULL;
1314 spin_lock(&journal->j_state_lock);
1315 spin_lock(&transaction->t_handle_lock); 1314 spin_lock(&transaction->t_handle_lock);
1316 transaction->t_outstanding_credits -= handle->h_buffer_credits; 1315 transaction->t_outstanding_credits -= handle->h_buffer_credits;
1317 transaction->t_updates--; 1316 transaction->t_updates--;
@@ -1340,8 +1339,7 @@ int jbd2_journal_stop(handle_t *handle)
1340 jbd_debug(2, "transaction too old, requesting commit for " 1339 jbd_debug(2, "transaction too old, requesting commit for "
1341 "handle %p\n", handle); 1340 "handle %p\n", handle);
1342 /* This is non-blocking */ 1341 /* This is non-blocking */
1343 __jbd2_log_start_commit(journal, transaction->t_tid); 1342 jbd2_log_start_commit(journal, transaction->t_tid);
1344 spin_unlock(&journal->j_state_lock);
1345 1343
1346 /* 1344 /*
1347 * Special case: JBD2_SYNC synchronous updates require us 1345 * Special case: JBD2_SYNC synchronous updates require us
@@ -1351,7 +1349,6 @@ int jbd2_journal_stop(handle_t *handle)
1351 err = jbd2_log_wait_commit(journal, tid); 1349 err = jbd2_log_wait_commit(journal, tid);
1352 } else { 1350 } else {
1353 spin_unlock(&transaction->t_handle_lock); 1351 spin_unlock(&transaction->t_handle_lock);
1354 spin_unlock(&journal->j_state_lock);
1355 } 1352 }
1356 1353
1357 lock_map_release(&handle->h_lockdep_map); 1354 lock_map_release(&handle->h_lockdep_map);
diff --git a/fs/jffs2/file.c b/fs/jffs2/file.c
index e7291c161a19..813497024437 100644
--- a/fs/jffs2/file.c
+++ b/fs/jffs2/file.c
@@ -26,9 +26,9 @@ static int jffs2_write_begin(struct file *filp, struct address_space *mapping,
26 struct page **pagep, void **fsdata); 26 struct page **pagep, void **fsdata);
27static int jffs2_readpage (struct file *filp, struct page *pg); 27static int jffs2_readpage (struct file *filp, struct page *pg);
28 28
29int jffs2_fsync(struct file *filp, struct dentry *dentry, int datasync) 29int jffs2_fsync(struct file *filp, int datasync)
30{ 30{
31 struct inode *inode = dentry->d_inode; 31 struct inode *inode = filp->f_mapping->host;
32 struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb); 32 struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb);
33 33
34 /* Trigger GC to flush any pending writes for this inode */ 34 /* Trigger GC to flush any pending writes for this inode */
diff --git a/fs/jffs2/fs.c b/fs/jffs2/fs.c
index 86e0821fc989..8bc2c80ab159 100644
--- a/fs/jffs2/fs.c
+++ b/fs/jffs2/fs.c
@@ -169,13 +169,13 @@ int jffs2_do_setattr (struct inode *inode, struct iattr *iattr)
169 mutex_unlock(&f->sem); 169 mutex_unlock(&f->sem);
170 jffs2_complete_reservation(c); 170 jffs2_complete_reservation(c);
171 171
172 /* We have to do the vmtruncate() without f->sem held, since 172 /* We have to do the simple_setsize() without f->sem held, since
173 some pages may be locked and waiting for it in readpage(). 173 some pages may be locked and waiting for it in readpage().
174 We are protected from a simultaneous write() extending i_size 174 We are protected from a simultaneous write() extending i_size
175 back past iattr->ia_size, because do_truncate() holds the 175 back past iattr->ia_size, because do_truncate() holds the
176 generic inode semaphore. */ 176 generic inode semaphore. */
177 if (ivalid & ATTR_SIZE && inode->i_size > iattr->ia_size) { 177 if (ivalid & ATTR_SIZE && inode->i_size > iattr->ia_size) {
178 vmtruncate(inode, iattr->ia_size); 178 simple_setsize(inode, iattr->ia_size);
179 inode->i_blocks = (inode->i_size + 511) >> 9; 179 inode->i_blocks = (inode->i_size + 511) >> 9;
180 } 180 }
181 181
diff --git a/fs/jffs2/os-linux.h b/fs/jffs2/os-linux.h
index 035a767f958b..4791aacf3084 100644
--- a/fs/jffs2/os-linux.h
+++ b/fs/jffs2/os-linux.h
@@ -158,7 +158,7 @@ extern const struct inode_operations jffs2_dir_inode_operations;
158extern const struct file_operations jffs2_file_operations; 158extern const struct file_operations jffs2_file_operations;
159extern const struct inode_operations jffs2_file_inode_operations; 159extern const struct inode_operations jffs2_file_inode_operations;
160extern const struct address_space_operations jffs2_file_address_operations; 160extern const struct address_space_operations jffs2_file_address_operations;
161int jffs2_fsync(struct file *, struct dentry *, int); 161int jffs2_fsync(struct file *, int);
162int jffs2_do_readpage_unlock (struct inode *inode, struct page *pg); 162int jffs2_do_readpage_unlock (struct inode *inode, struct page *pg);
163 163
164/* ioctl.c */ 164/* ioctl.c */
diff --git a/fs/jfs/file.c b/fs/jfs/file.c
index 85d9ec659225..127263cc8657 100644
--- a/fs/jfs/file.c
+++ b/fs/jfs/file.c
@@ -27,9 +27,9 @@
27#include "jfs_acl.h" 27#include "jfs_acl.h"
28#include "jfs_debug.h" 28#include "jfs_debug.h"
29 29
30int jfs_fsync(struct file *file, struct dentry *dentry, int datasync) 30int jfs_fsync(struct file *file, int datasync)
31{ 31{
32 struct inode *inode = dentry->d_inode; 32 struct inode *inode = file->f_mapping->host;
33 int rc = 0; 33 int rc = 0;
34 34
35 if (!(inode->i_state & I_DIRTY) || 35 if (!(inode->i_state & I_DIRTY) ||
diff --git a/fs/jfs/jfs_inode.h b/fs/jfs/jfs_inode.h
index 9e6bda30a6e8..11042b1f44b5 100644
--- a/fs/jfs/jfs_inode.h
+++ b/fs/jfs/jfs_inode.h
@@ -21,7 +21,7 @@
21struct fid; 21struct fid;
22 22
23extern struct inode *ialloc(struct inode *, umode_t); 23extern struct inode *ialloc(struct inode *, umode_t);
24extern int jfs_fsync(struct file *, struct dentry *, int); 24extern int jfs_fsync(struct file *, int);
25extern long jfs_ioctl(struct file *, unsigned int, unsigned long); 25extern long jfs_ioctl(struct file *, unsigned int, unsigned long);
26extern long jfs_compat_ioctl(struct file *, unsigned int, unsigned long); 26extern long jfs_compat_ioctl(struct file *, unsigned int, unsigned long);
27extern struct inode *jfs_iget(struct super_block *, unsigned long); 27extern struct inode *jfs_iget(struct super_block *, unsigned long);
diff --git a/fs/jfs/super.c b/fs/jfs/super.c
index b66832ac33ac..b38f96bef829 100644
--- a/fs/jfs/super.c
+++ b/fs/jfs/super.c
@@ -179,6 +179,8 @@ static void jfs_put_super(struct super_block *sb)
179 179
180 jfs_info("In jfs_put_super"); 180 jfs_info("In jfs_put_super");
181 181
182 dquot_disable(sb, -1, DQUOT_USAGE_ENABLED | DQUOT_LIMITS_ENABLED);
183
182 lock_kernel(); 184 lock_kernel();
183 185
184 rc = jfs_umount(sb); 186 rc = jfs_umount(sb);
@@ -396,10 +398,20 @@ static int jfs_remount(struct super_block *sb, int *flags, char *data)
396 398
397 JFS_SBI(sb)->flag = flag; 399 JFS_SBI(sb)->flag = flag;
398 ret = jfs_mount_rw(sb, 1); 400 ret = jfs_mount_rw(sb, 1);
401
402 /* mark the fs r/w for quota activity */
403 sb->s_flags &= ~MS_RDONLY;
404
399 unlock_kernel(); 405 unlock_kernel();
406 dquot_resume(sb, -1);
400 return ret; 407 return ret;
401 } 408 }
402 if ((!(sb->s_flags & MS_RDONLY)) && (*flags & MS_RDONLY)) { 409 if ((!(sb->s_flags & MS_RDONLY)) && (*flags & MS_RDONLY)) {
410 rc = dquot_suspend(sb, -1);
411 if (rc < 0) {
412 unlock_kernel();
413 return rc;
414 }
403 rc = jfs_umount_rw(sb); 415 rc = jfs_umount_rw(sb);
404 JFS_SBI(sb)->flag = flag; 416 JFS_SBI(sb)->flag = flag;
405 unlock_kernel(); 417 unlock_kernel();
@@ -469,6 +481,10 @@ static int jfs_fill_super(struct super_block *sb, void *data, int silent)
469 */ 481 */
470 sb->s_op = &jfs_super_operations; 482 sb->s_op = &jfs_super_operations;
471 sb->s_export_op = &jfs_export_operations; 483 sb->s_export_op = &jfs_export_operations;
484#ifdef CONFIG_QUOTA
485 sb->dq_op = &dquot_operations;
486 sb->s_qcop = &dquot_quotactl_ops;
487#endif
472 488
473 /* 489 /*
474 * Initialize direct-mapping inode/address-space 490 * Initialize direct-mapping inode/address-space
diff --git a/fs/libfs.c b/fs/libfs.c
index 232bea425b09..09e1016eb774 100644
--- a/fs/libfs.c
+++ b/fs/libfs.c
@@ -8,6 +8,7 @@
8#include <linux/slab.h> 8#include <linux/slab.h>
9#include <linux/mount.h> 9#include <linux/mount.h>
10#include <linux/vfs.h> 10#include <linux/vfs.h>
11#include <linux/quotaops.h>
11#include <linux/mutex.h> 12#include <linux/mutex.h>
12#include <linux/exportfs.h> 13#include <linux/exportfs.h>
13#include <linux/writeback.h> 14#include <linux/writeback.h>
@@ -58,11 +59,6 @@ struct dentry *simple_lookup(struct inode *dir, struct dentry *dentry, struct na
58 return NULL; 59 return NULL;
59} 60}
60 61
61int simple_sync_file(struct file * file, struct dentry *dentry, int datasync)
62{
63 return 0;
64}
65
66int dcache_dir_open(struct inode *inode, struct file *file) 62int dcache_dir_open(struct inode *inode, struct file *file)
67{ 63{
68 static struct qstr cursor_name = {.len = 1, .name = "."}; 64 static struct qstr cursor_name = {.len = 1, .name = "."};
@@ -190,7 +186,7 @@ const struct file_operations simple_dir_operations = {
190 .llseek = dcache_dir_lseek, 186 .llseek = dcache_dir_lseek,
191 .read = generic_read_dir, 187 .read = generic_read_dir,
192 .readdir = dcache_readdir, 188 .readdir = dcache_readdir,
193 .fsync = simple_sync_file, 189 .fsync = noop_fsync,
194}; 190};
195 191
196const struct inode_operations simple_dir_inode_operations = { 192const struct inode_operations simple_dir_inode_operations = {
@@ -330,6 +326,81 @@ int simple_rename(struct inode *old_dir, struct dentry *old_dentry,
330 return 0; 326 return 0;
331} 327}
332 328
329/**
330 * simple_setsize - handle core mm and vfs requirements for file size change
331 * @inode: inode
332 * @newsize: new file size
333 *
334 * Returns 0 on success, -error on failure.
335 *
336 * simple_setsize must be called with inode_mutex held.
337 *
338 * simple_setsize will check that the requested new size is OK (see
339 * inode_newsize_ok), and then will perform the necessary i_size update
340 * and pagecache truncation (if necessary). It will be typically be called
341 * from the filesystem's setattr function when ATTR_SIZE is passed in.
342 *
343 * The inode itself must have correct permissions and attributes to allow
344 * i_size to be changed, this function then just checks that the new size
345 * requested is valid.
346 *
347 * In the case of simple in-memory filesystems with inodes stored solely
348 * in the inode cache, and file data in the pagecache, nothing more needs
349 * to be done to satisfy a truncate request. Filesystems with on-disk
350 * blocks for example will need to free them in the case of truncate, in
351 * that case it may be easier not to use simple_setsize (but each of its
352 * components will likely be required at some point to update pagecache
353 * and inode etc).
354 */
355int simple_setsize(struct inode *inode, loff_t newsize)
356{
357 loff_t oldsize;
358 int error;
359
360 error = inode_newsize_ok(inode, newsize);
361 if (error)
362 return error;
363
364 oldsize = inode->i_size;
365 i_size_write(inode, newsize);
366 truncate_pagecache(inode, oldsize, newsize);
367
368 return error;
369}
370EXPORT_SYMBOL(simple_setsize);
371
372/**
373 * simple_setattr - setattr for simple in-memory filesystem
374 * @dentry: dentry
375 * @iattr: iattr structure
376 *
377 * Returns 0 on success, -error on failure.
378 *
379 * simple_setattr implements setattr for an in-memory filesystem which
380 * does not store its own file data or metadata (eg. uses the page cache
381 * and inode cache as its data store).
382 */
383int simple_setattr(struct dentry *dentry, struct iattr *iattr)
384{
385 struct inode *inode = dentry->d_inode;
386 int error;
387
388 error = inode_change_ok(inode, iattr);
389 if (error)
390 return error;
391
392 if (iattr->ia_valid & ATTR_SIZE) {
393 error = simple_setsize(inode, iattr->ia_size);
394 if (error)
395 return error;
396 }
397
398 generic_setattr(inode, iattr);
399
400 return error;
401}
402EXPORT_SYMBOL(simple_setattr);
403
333int simple_readpage(struct file *file, struct page *page) 404int simple_readpage(struct file *file, struct page *page)
334{ 405{
335 clear_highpage(page); 406 clear_highpage(page);
@@ -851,13 +922,22 @@ struct dentry *generic_fh_to_parent(struct super_block *sb, struct fid *fid,
851} 922}
852EXPORT_SYMBOL_GPL(generic_fh_to_parent); 923EXPORT_SYMBOL_GPL(generic_fh_to_parent);
853 924
854int simple_fsync(struct file *file, struct dentry *dentry, int datasync) 925/**
926 * generic_file_fsync - generic fsync implementation for simple filesystems
927 * @file: file to synchronize
928 * @datasync: only synchronize essential metadata if true
929 *
930 * This is a generic implementation of the fsync method for simple
931 * filesystems which track all non-inode metadata in the buffers list
932 * hanging off the address_space structure.
933 */
934int generic_file_fsync(struct file *file, int datasync)
855{ 935{
856 struct writeback_control wbc = { 936 struct writeback_control wbc = {
857 .sync_mode = WB_SYNC_ALL, 937 .sync_mode = WB_SYNC_ALL,
858 .nr_to_write = 0, /* metadata-only; caller takes care of data */ 938 .nr_to_write = 0, /* metadata-only; caller takes care of data */
859 }; 939 };
860 struct inode *inode = dentry->d_inode; 940 struct inode *inode = file->f_mapping->host;
861 int err; 941 int err;
862 int ret; 942 int ret;
863 943
@@ -872,7 +952,15 @@ int simple_fsync(struct file *file, struct dentry *dentry, int datasync)
872 ret = err; 952 ret = err;
873 return ret; 953 return ret;
874} 954}
875EXPORT_SYMBOL(simple_fsync); 955EXPORT_SYMBOL(generic_file_fsync);
956
957/*
958 * No-op implementation of ->fsync for in-memory filesystems.
959 */
960int noop_fsync(struct file *file, int datasync)
961{
962 return 0;
963}
876 964
877EXPORT_SYMBOL(dcache_dir_close); 965EXPORT_SYMBOL(dcache_dir_close);
878EXPORT_SYMBOL(dcache_dir_lseek); 966EXPORT_SYMBOL(dcache_dir_lseek);
@@ -895,7 +983,7 @@ EXPORT_SYMBOL(simple_release_fs);
895EXPORT_SYMBOL(simple_rename); 983EXPORT_SYMBOL(simple_rename);
896EXPORT_SYMBOL(simple_rmdir); 984EXPORT_SYMBOL(simple_rmdir);
897EXPORT_SYMBOL(simple_statfs); 985EXPORT_SYMBOL(simple_statfs);
898EXPORT_SYMBOL(simple_sync_file); 986EXPORT_SYMBOL(noop_fsync);
899EXPORT_SYMBOL(simple_unlink); 987EXPORT_SYMBOL(simple_unlink);
900EXPORT_SYMBOL(simple_read_from_buffer); 988EXPORT_SYMBOL(simple_read_from_buffer);
901EXPORT_SYMBOL(simple_write_to_buffer); 989EXPORT_SYMBOL(simple_write_to_buffer);
diff --git a/fs/logfs/file.c b/fs/logfs/file.c
index 0de524071870..abe1cafbd4c2 100644
--- a/fs/logfs/file.c
+++ b/fs/logfs/file.c
@@ -219,9 +219,9 @@ int logfs_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
219 } 219 }
220} 220}
221 221
222int logfs_fsync(struct file *file, struct dentry *dentry, int datasync) 222int logfs_fsync(struct file *file, int datasync)
223{ 223{
224 struct super_block *sb = dentry->d_inode->i_sb; 224 struct super_block *sb = file->f_mapping->host->i_sb;
225 225
226 logfs_write_anchor(sb); 226 logfs_write_anchor(sb);
227 return 0; 227 return 0;
diff --git a/fs/logfs/logfs.h b/fs/logfs/logfs.h
index 1a9db84f8d8f..c838c4d72111 100644
--- a/fs/logfs/logfs.h
+++ b/fs/logfs/logfs.h
@@ -506,7 +506,7 @@ extern const struct address_space_operations logfs_reg_aops;
506int logfs_readpage(struct file *file, struct page *page); 506int logfs_readpage(struct file *file, struct page *page);
507int logfs_ioctl(struct inode *inode, struct file *file, unsigned int cmd, 507int logfs_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
508 unsigned long arg); 508 unsigned long arg);
509int logfs_fsync(struct file *file, struct dentry *dentry, int datasync); 509int logfs_fsync(struct file *file, int datasync);
510 510
511/* gc.c */ 511/* gc.c */
512u32 get_best_cand(struct super_block *sb, struct candidate_list *list, u32 *ec); 512u32 get_best_cand(struct super_block *sb, struct candidate_list *list, u32 *ec);
diff --git a/fs/minix/dir.c b/fs/minix/dir.c
index 6198731d7fcd..91969589131c 100644
--- a/fs/minix/dir.c
+++ b/fs/minix/dir.c
@@ -22,7 +22,7 @@ const struct file_operations minix_dir_operations = {
22 .llseek = generic_file_llseek, 22 .llseek = generic_file_llseek,
23 .read = generic_read_dir, 23 .read = generic_read_dir,
24 .readdir = minix_readdir, 24 .readdir = minix_readdir,
25 .fsync = simple_fsync, 25 .fsync = generic_file_fsync,
26}; 26};
27 27
28static inline void dir_put_page(struct page *page) 28static inline void dir_put_page(struct page *page)
@@ -72,11 +72,8 @@ static struct page * dir_get_page(struct inode *dir, unsigned long n)
72{ 72{
73 struct address_space *mapping = dir->i_mapping; 73 struct address_space *mapping = dir->i_mapping;
74 struct page *page = read_mapping_page(mapping, n, NULL); 74 struct page *page = read_mapping_page(mapping, n, NULL);
75 if (!IS_ERR(page)) { 75 if (!IS_ERR(page))
76 kmap(page); 76 kmap(page);
77 if (!PageUptodate(page))
78 goto fail;
79 }
80 return page; 77 return page;
81 78
82fail: 79fail:
diff --git a/fs/minix/file.c b/fs/minix/file.c
index 3eec3e607a87..d5320ff23faf 100644
--- a/fs/minix/file.c
+++ b/fs/minix/file.c
@@ -19,7 +19,7 @@ const struct file_operations minix_file_operations = {
19 .write = do_sync_write, 19 .write = do_sync_write,
20 .aio_write = generic_file_aio_write, 20 .aio_write = generic_file_aio_write,
21 .mmap = generic_file_mmap, 21 .mmap = generic_file_mmap,
22 .fsync = simple_fsync, 22 .fsync = generic_file_fsync,
23 .splice_read = generic_file_splice_read, 23 .splice_read = generic_file_splice_read,
24}; 24};
25 25
diff --git a/fs/minix/itree_v2.c b/fs/minix/itree_v2.c
index f23010969369..13487ad16894 100644
--- a/fs/minix/itree_v2.c
+++ b/fs/minix/itree_v2.c
@@ -20,6 +20,9 @@ static inline block_t *i_data(struct inode *inode)
20 return (block_t *)minix_i(inode)->u.i2_data; 20 return (block_t *)minix_i(inode)->u.i2_data;
21} 21}
22 22
23#define DIRCOUNT 7
24#define INDIRCOUNT(sb) (1 << ((sb)->s_blocksize_bits - 2))
25
23static int block_to_path(struct inode * inode, long block, int offsets[DEPTH]) 26static int block_to_path(struct inode * inode, long block, int offsets[DEPTH])
24{ 27{
25 int n = 0; 28 int n = 0;
@@ -34,21 +37,21 @@ static int block_to_path(struct inode * inode, long block, int offsets[DEPTH])
34 printk("MINIX-fs: block_to_path: " 37 printk("MINIX-fs: block_to_path: "
35 "block %ld too big on dev %s\n", 38 "block %ld too big on dev %s\n",
36 block, bdevname(sb->s_bdev, b)); 39 block, bdevname(sb->s_bdev, b));
37 } else if (block < 7) { 40 } else if (block < DIRCOUNT) {
38 offsets[n++] = block; 41 offsets[n++] = block;
39 } else if ((block -= 7) < 256) { 42 } else if ((block -= DIRCOUNT) < INDIRCOUNT(sb)) {
40 offsets[n++] = 7; 43 offsets[n++] = DIRCOUNT;
41 offsets[n++] = block; 44 offsets[n++] = block;
42 } else if ((block -= 256) < 256*256) { 45 } else if ((block -= INDIRCOUNT(sb)) < INDIRCOUNT(sb) * INDIRCOUNT(sb)) {
43 offsets[n++] = 8; 46 offsets[n++] = DIRCOUNT + 1;
44 offsets[n++] = block>>8; 47 offsets[n++] = block / INDIRCOUNT(sb);
45 offsets[n++] = block & 255; 48 offsets[n++] = block % INDIRCOUNT(sb);
46 } else { 49 } else {
47 block -= 256*256; 50 block -= INDIRCOUNT(sb) * INDIRCOUNT(sb);
48 offsets[n++] = 9; 51 offsets[n++] = DIRCOUNT + 2;
49 offsets[n++] = block>>16; 52 offsets[n++] = (block / INDIRCOUNT(sb)) / INDIRCOUNT(sb);
50 offsets[n++] = (block>>8) & 255; 53 offsets[n++] = (block / INDIRCOUNT(sb)) % INDIRCOUNT(sb);
51 offsets[n++] = block & 255; 54 offsets[n++] = block % INDIRCOUNT(sb);
52 } 55 }
53 return n; 56 return n;
54} 57}
diff --git a/fs/namei.c b/fs/namei.c
index 48e1f60520ea..868d0cb9d473 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -1621,6 +1621,7 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
1621 case LAST_DOTDOT: 1621 case LAST_DOTDOT:
1622 follow_dotdot(nd); 1622 follow_dotdot(nd);
1623 dir = nd->path.dentry; 1623 dir = nd->path.dentry;
1624 case LAST_DOT:
1624 if (nd->path.mnt->mnt_sb->s_type->fs_flags & FS_REVAL_DOT) { 1625 if (nd->path.mnt->mnt_sb->s_type->fs_flags & FS_REVAL_DOT) {
1625 if (!dir->d_op->d_revalidate(dir, nd)) { 1626 if (!dir->d_op->d_revalidate(dir, nd)) {
1626 error = -ESTALE; 1627 error = -ESTALE;
@@ -1628,7 +1629,6 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
1628 } 1629 }
1629 } 1630 }
1630 /* fallthrough */ 1631 /* fallthrough */
1631 case LAST_DOT:
1632 case LAST_ROOT: 1632 case LAST_ROOT:
1633 if (open_flag & O_CREAT) 1633 if (open_flag & O_CREAT)
1634 goto exit; 1634 goto exit;
diff --git a/fs/ncpfs/dir.c b/fs/ncpfs/dir.c
index 92dde6f8d893..9578cbe0cd58 100644
--- a/fs/ncpfs/dir.c
+++ b/fs/ncpfs/dir.c
@@ -49,6 +49,7 @@ extern int ncp_symlink(struct inode *, struct dentry *, const char *);
49 49
50const struct file_operations ncp_dir_operations = 50const struct file_operations ncp_dir_operations =
51{ 51{
52 .llseek = generic_file_llseek,
52 .read = generic_read_dir, 53 .read = generic_read_dir,
53 .readdir = ncp_readdir, 54 .readdir = ncp_readdir,
54 .unlocked_ioctl = ncp_ioctl, 55 .unlocked_ioctl = ncp_ioctl,
diff --git a/fs/ncpfs/file.c b/fs/ncpfs/file.c
index b93870892892..3639cc5cbdae 100644
--- a/fs/ncpfs/file.c
+++ b/fs/ncpfs/file.c
@@ -22,7 +22,7 @@
22#include <linux/ncp_fs.h> 22#include <linux/ncp_fs.h>
23#include "ncplib_kernel.h" 23#include "ncplib_kernel.h"
24 24
25static int ncp_fsync(struct file *file, struct dentry *dentry, int datasync) 25static int ncp_fsync(struct file *file, int datasync)
26{ 26{
27 return 0; 27 return 0;
28} 28}
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
index ee9a179ebdf3..782b431ef91c 100644
--- a/fs/nfs/dir.c
+++ b/fs/nfs/dir.c
@@ -53,7 +53,7 @@ static int nfs_link(struct dentry *, struct inode *, struct dentry *);
53static int nfs_mknod(struct inode *, struct dentry *, int, dev_t); 53static int nfs_mknod(struct inode *, struct dentry *, int, dev_t);
54static int nfs_rename(struct inode *, struct dentry *, 54static int nfs_rename(struct inode *, struct dentry *,
55 struct inode *, struct dentry *); 55 struct inode *, struct dentry *);
56static int nfs_fsync_dir(struct file *, struct dentry *, int); 56static int nfs_fsync_dir(struct file *, int);
57static loff_t nfs_llseek_dir(struct file *, loff_t, int); 57static loff_t nfs_llseek_dir(struct file *, loff_t, int);
58 58
59const struct file_operations nfs_dir_operations = { 59const struct file_operations nfs_dir_operations = {
@@ -641,8 +641,10 @@ out:
641 * All directory operations under NFS are synchronous, so fsync() 641 * All directory operations under NFS are synchronous, so fsync()
642 * is a dummy operation. 642 * is a dummy operation.
643 */ 643 */
644static int nfs_fsync_dir(struct file *filp, struct dentry *dentry, int datasync) 644static int nfs_fsync_dir(struct file *filp, int datasync)
645{ 645{
646 struct dentry *dentry = filp->f_path.dentry;
647
646 dfprintk(FILE, "NFS: fsync dir(%s/%s) datasync %d\n", 648 dfprintk(FILE, "NFS: fsync dir(%s/%s) datasync %d\n",
647 dentry->d_parent->d_name.name, dentry->d_name.name, 649 dentry->d_parent->d_name.name, dentry->d_name.name,
648 datasync); 650 datasync);
@@ -1741,6 +1743,7 @@ remove_lru_entry:
1741 clear_bit(NFS_INO_ACL_LRU_SET, &nfsi->flags); 1743 clear_bit(NFS_INO_ACL_LRU_SET, &nfsi->flags);
1742 smp_mb__after_clear_bit(); 1744 smp_mb__after_clear_bit();
1743 } 1745 }
1746 spin_unlock(&inode->i_lock);
1744 } 1747 }
1745 spin_unlock(&nfs_access_lru_lock); 1748 spin_unlock(&nfs_access_lru_lock);
1746 nfs_access_free_list(&head); 1749 nfs_access_free_list(&head);
diff --git a/fs/nfs/file.c b/fs/nfs/file.c
index cac96bcc91e4..36a5e74f51b4 100644
--- a/fs/nfs/file.c
+++ b/fs/nfs/file.c
@@ -53,7 +53,7 @@ static ssize_t nfs_file_splice_write(struct pipe_inode_info *pipe,
53static ssize_t nfs_file_write(struct kiocb *, const struct iovec *iov, 53static ssize_t nfs_file_write(struct kiocb *, const struct iovec *iov,
54 unsigned long nr_segs, loff_t pos); 54 unsigned long nr_segs, loff_t pos);
55static int nfs_file_flush(struct file *, fl_owner_t id); 55static int nfs_file_flush(struct file *, fl_owner_t id);
56static int nfs_file_fsync(struct file *, struct dentry *dentry, int datasync); 56static int nfs_file_fsync(struct file *, int datasync);
57static int nfs_check_flags(int flags); 57static int nfs_check_flags(int flags);
58static int nfs_lock(struct file *filp, int cmd, struct file_lock *fl); 58static int nfs_lock(struct file *filp, int cmd, struct file_lock *fl);
59static int nfs_flock(struct file *filp, int cmd, struct file_lock *fl); 59static int nfs_flock(struct file *filp, int cmd, struct file_lock *fl);
@@ -322,8 +322,9 @@ nfs_file_mmap(struct file * file, struct vm_area_struct * vma)
322 * whether any write errors occurred for this process. 322 * whether any write errors occurred for this process.
323 */ 323 */
324static int 324static int
325nfs_file_fsync(struct file *file, struct dentry *dentry, int datasync) 325nfs_file_fsync(struct file *file, int datasync)
326{ 326{
327 struct dentry *dentry = file->f_path.dentry;
327 struct nfs_open_context *ctx = nfs_file_open_context(file); 328 struct nfs_open_context *ctx = nfs_file_open_context(file);
328 struct inode *inode = dentry->d_inode; 329 struct inode *inode = dentry->d_inode;
329 330
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index 3aea3ca98ab7..91679e2631ee 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -1386,7 +1386,7 @@ static int nfs_commit_inode(struct inode *inode, int how)
1386 int res = 0; 1386 int res = 0;
1387 1387
1388 if (!nfs_commit_set_lock(NFS_I(inode), may_wait)) 1388 if (!nfs_commit_set_lock(NFS_I(inode), may_wait))
1389 goto out; 1389 goto out_mark_dirty;
1390 spin_lock(&inode->i_lock); 1390 spin_lock(&inode->i_lock);
1391 res = nfs_scan_commit(inode, &head, 0, 0); 1391 res = nfs_scan_commit(inode, &head, 0, 0);
1392 spin_unlock(&inode->i_lock); 1392 spin_unlock(&inode->i_lock);
@@ -1398,9 +1398,18 @@ static int nfs_commit_inode(struct inode *inode, int how)
1398 wait_on_bit(&NFS_I(inode)->flags, NFS_INO_COMMIT, 1398 wait_on_bit(&NFS_I(inode)->flags, NFS_INO_COMMIT,
1399 nfs_wait_bit_killable, 1399 nfs_wait_bit_killable,
1400 TASK_KILLABLE); 1400 TASK_KILLABLE);
1401 else
1402 goto out_mark_dirty;
1401 } else 1403 } else
1402 nfs_commit_clear_lock(NFS_I(inode)); 1404 nfs_commit_clear_lock(NFS_I(inode));
1403out: 1405 return res;
1406 /* Note: If we exit without ensuring that the commit is complete,
1407 * we must mark the inode as dirty. Otherwise, future calls to
1408 * sync_inode() with the WB_SYNC_ALL flag set will fail to ensure
1409 * that the data is on the disk.
1410 */
1411out_mark_dirty:
1412 __mark_inode_dirty(inode, I_DIRTY_DATASYNC);
1404 return res; 1413 return res;
1405} 1414}
1406 1415
@@ -1509,14 +1518,17 @@ int nfs_wb_page(struct inode *inode, struct page *page)
1509 }; 1518 };
1510 int ret; 1519 int ret;
1511 1520
1512 while(PagePrivate(page)) { 1521 for (;;) {
1513 wait_on_page_writeback(page); 1522 wait_on_page_writeback(page);
1514 if (clear_page_dirty_for_io(page)) { 1523 if (clear_page_dirty_for_io(page)) {
1515 ret = nfs_writepage_locked(page, &wbc); 1524 ret = nfs_writepage_locked(page, &wbc);
1516 if (ret < 0) 1525 if (ret < 0)
1517 goto out_error; 1526 goto out_error;
1527 continue;
1518 } 1528 }
1519 ret = sync_inode(inode, &wbc); 1529 if (!PagePrivate(page))
1530 break;
1531 ret = nfs_commit_inode(inode, FLUSH_SYNC);
1520 if (ret < 0) 1532 if (ret < 0)
1521 goto out_error; 1533 goto out_error;
1522 } 1534 }
diff --git a/fs/nilfs2/file.c b/fs/nilfs2/file.c
index 30292df443ce..c9a30d7ff6fc 100644
--- a/fs/nilfs2/file.c
+++ b/fs/nilfs2/file.c
@@ -27,7 +27,7 @@
27#include "nilfs.h" 27#include "nilfs.h"
28#include "segment.h" 28#include "segment.h"
29 29
30int nilfs_sync_file(struct file *file, struct dentry *dentry, int datasync) 30int nilfs_sync_file(struct file *file, int datasync)
31{ 31{
32 /* 32 /*
33 * Called from fsync() system call 33 * Called from fsync() system call
@@ -37,7 +37,7 @@ int nilfs_sync_file(struct file *file, struct dentry *dentry, int datasync)
37 * This function should be implemented when the writeback function 37 * This function should be implemented when the writeback function
38 * will be implemented. 38 * will be implemented.
39 */ 39 */
40 struct inode *inode = dentry->d_inode; 40 struct inode *inode = file->f_mapping->host;
41 int err; 41 int err;
42 42
43 if (!nilfs_inode_dirty(inode)) 43 if (!nilfs_inode_dirty(inode))
diff --git a/fs/nilfs2/nilfs.h b/fs/nilfs2/nilfs.h
index 8723e5bfd071..47d6d7928122 100644
--- a/fs/nilfs2/nilfs.h
+++ b/fs/nilfs2/nilfs.h
@@ -228,7 +228,7 @@ extern void nilfs_set_link(struct inode *, struct nilfs_dir_entry *,
228 struct page *, struct inode *); 228 struct page *, struct inode *);
229 229
230/* file.c */ 230/* file.c */
231extern int nilfs_sync_file(struct file *, struct dentry *, int); 231extern int nilfs_sync_file(struct file *, int);
232 232
233/* ioctl.c */ 233/* ioctl.c */
234long nilfs_ioctl(struct file *, unsigned int, unsigned long); 234long nilfs_ioctl(struct file *, unsigned int, unsigned long);
diff --git a/fs/ntfs/dir.c b/fs/ntfs/dir.c
index fe44d3feee4a..0f48e7c5d9e1 100644
--- a/fs/ntfs/dir.c
+++ b/fs/ntfs/dir.c
@@ -1527,10 +1527,9 @@ static int ntfs_dir_open(struct inode *vi, struct file *filp)
1527 * this problem for now. We do write the $BITMAP attribute if it is present 1527 * this problem for now. We do write the $BITMAP attribute if it is present
1528 * which is the important one for a directory so things are not too bad. 1528 * which is the important one for a directory so things are not too bad.
1529 */ 1529 */
1530static int ntfs_dir_fsync(struct file *filp, struct dentry *dentry, 1530static int ntfs_dir_fsync(struct file *filp, int datasync)
1531 int datasync)
1532{ 1531{
1533 struct inode *bmp_vi, *vi = dentry->d_inode; 1532 struct inode *bmp_vi, *vi = filp->f_mapping->host;
1534 int err, ret; 1533 int err, ret;
1535 ntfs_attr na; 1534 ntfs_attr na;
1536 1535
diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c
index a1924a0d2ab0..113ebd9f25a4 100644
--- a/fs/ntfs/file.c
+++ b/fs/ntfs/file.c
@@ -2133,7 +2133,6 @@ static ssize_t ntfs_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
2133/** 2133/**
2134 * ntfs_file_fsync - sync a file to disk 2134 * ntfs_file_fsync - sync a file to disk
2135 * @filp: file to be synced 2135 * @filp: file to be synced
2136 * @dentry: dentry describing the file to sync
2137 * @datasync: if non-zero only flush user data and not metadata 2136 * @datasync: if non-zero only flush user data and not metadata
2138 * 2137 *
2139 * Data integrity sync of a file to disk. Used for fsync, fdatasync, and msync 2138 * Data integrity sync of a file to disk. Used for fsync, fdatasync, and msync
@@ -2149,19 +2148,15 @@ static ssize_t ntfs_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
2149 * Also, if @datasync is true, we do not wait on the inode to be written out 2148 * Also, if @datasync is true, we do not wait on the inode to be written out
2150 * but we always wait on the page cache pages to be written out. 2149 * but we always wait on the page cache pages to be written out.
2151 * 2150 *
2152 * Note: In the past @filp could be NULL so we ignore it as we don't need it
2153 * anyway.
2154 *
2155 * Locking: Caller must hold i_mutex on the inode. 2151 * Locking: Caller must hold i_mutex on the inode.
2156 * 2152 *
2157 * TODO: We should probably also write all attribute/index inodes associated 2153 * TODO: We should probably also write all attribute/index inodes associated
2158 * with this inode but since we have no simple way of getting to them we ignore 2154 * with this inode but since we have no simple way of getting to them we ignore
2159 * this problem for now. 2155 * this problem for now.
2160 */ 2156 */
2161static int ntfs_file_fsync(struct file *filp, struct dentry *dentry, 2157static int ntfs_file_fsync(struct file *filp, int datasync)
2162 int datasync)
2163{ 2158{
2164 struct inode *vi = dentry->d_inode; 2159 struct inode *vi = filp->f_mapping->host;
2165 int err, ret = 0; 2160 int err, ret = 0;
2166 2161
2167 ntfs_debug("Entering for inode 0x%lx.", vi->i_ino); 2162 ntfs_debug("Entering for inode 0x%lx.", vi->i_ino);
diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
index 97e54b9e654b..6a13ea64c447 100644
--- a/fs/ocfs2/file.c
+++ b/fs/ocfs2/file.c
@@ -175,13 +175,12 @@ static int ocfs2_dir_release(struct inode *inode, struct file *file)
175 return 0; 175 return 0;
176} 176}
177 177
178static int ocfs2_sync_file(struct file *file, 178static int ocfs2_sync_file(struct file *file, int datasync)
179 struct dentry *dentry,
180 int datasync)
181{ 179{
182 int err = 0; 180 int err = 0;
183 journal_t *journal; 181 journal_t *journal;
184 struct inode *inode = dentry->d_inode; 182 struct dentry *dentry = file->f_path.dentry;
183 struct inode *inode = file->f_mapping->host;
185 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); 184 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
186 185
187 mlog_entry("(0x%p, 0x%p, %d, '%.*s')\n", file, dentry, datasync, 186 mlog_entry("(0x%p, 0x%p, %d, '%.*s')\n", file, dentry, datasync,
@@ -1053,7 +1052,7 @@ int ocfs2_setattr(struct dentry *dentry, struct iattr *attr)
1053 } 1052 }
1054 1053
1055 /* 1054 /*
1056 * This will intentionally not wind up calling vmtruncate(), 1055 * This will intentionally not wind up calling simple_setsize(),
1057 * since all the work for a size change has been done above. 1056 * since all the work for a size change has been done above.
1058 * Otherwise, we could get into problems with truncate as 1057 * Otherwise, we could get into problems with truncate as
1059 * ip_alloc_sem is used there to protect against i_size 1058 * ip_alloc_sem is used there to protect against i_size
@@ -2119,9 +2118,13 @@ relock:
2119 * direct write may have instantiated a few 2118 * direct write may have instantiated a few
2120 * blocks outside i_size. Trim these off again. 2119 * blocks outside i_size. Trim these off again.
2121 * Don't need i_size_read because we hold i_mutex. 2120 * Don't need i_size_read because we hold i_mutex.
2121 *
2122 * XXX(hch): this looks buggy because ocfs2 did not
2123 * actually implement ->truncate. Take a look at
2124 * the new truncate sequence and update this accordingly
2122 */ 2125 */
2123 if (*ppos + count > inode->i_size) 2126 if (*ppos + count > inode->i_size)
2124 vmtruncate(inode, inode->i_size); 2127 simple_setsize(inode, inode->i_size);
2125 ret = written; 2128 ret = written;
2126 goto out_dio; 2129 goto out_dio;
2127 } 2130 }
diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
index 2c26ce251cb3..0eaa929a4dbf 100644
--- a/fs/ocfs2/super.c
+++ b/fs/ocfs2/super.c
@@ -879,13 +879,15 @@ static int ocfs2_susp_quotas(struct ocfs2_super *osb, int unsuspend)
879 if (!OCFS2_HAS_RO_COMPAT_FEATURE(sb, feature[type])) 879 if (!OCFS2_HAS_RO_COMPAT_FEATURE(sb, feature[type]))
880 continue; 880 continue;
881 if (unsuspend) 881 if (unsuspend)
882 status = vfs_quota_enable( 882 status = dquot_resume(sb, type);
883 sb_dqopt(sb)->files[type], 883 else {
884 type, QFMT_OCFS2, 884 struct ocfs2_mem_dqinfo *oinfo;
885 DQUOT_SUSPENDED); 885
886 else 886 /* Cancel periodic syncing before suspending */
887 status = vfs_quota_disable(sb, type, 887 oinfo = sb_dqinfo(sb, type)->dqi_priv;
888 DQUOT_SUSPENDED); 888 cancel_delayed_work_sync(&oinfo->dqi_sync_work);
889 status = dquot_suspend(sb, type);
890 }
889 if (status < 0) 891 if (status < 0)
890 break; 892 break;
891 } 893 }
@@ -916,8 +918,8 @@ static int ocfs2_enable_quotas(struct ocfs2_super *osb)
916 status = -ENOENT; 918 status = -ENOENT;
917 goto out_quota_off; 919 goto out_quota_off;
918 } 920 }
919 status = vfs_quota_enable(inode[type], type, QFMT_OCFS2, 921 status = dquot_enable(inode[type], type, QFMT_OCFS2,
920 DQUOT_USAGE_ENABLED); 922 DQUOT_USAGE_ENABLED);
921 if (status < 0) 923 if (status < 0)
922 goto out_quota_off; 924 goto out_quota_off;
923 } 925 }
@@ -952,8 +954,8 @@ static void ocfs2_disable_quotas(struct ocfs2_super *osb)
952 /* Turn off quotas. This will remove all dquot structures from 954 /* Turn off quotas. This will remove all dquot structures from
953 * memory and so they will be automatically synced to global 955 * memory and so they will be automatically synced to global
954 * quota files */ 956 * quota files */
955 vfs_quota_disable(sb, type, DQUOT_USAGE_ENABLED | 957 dquot_disable(sb, type, DQUOT_USAGE_ENABLED |
956 DQUOT_LIMITS_ENABLED); 958 DQUOT_LIMITS_ENABLED);
957 if (!inode) 959 if (!inode)
958 continue; 960 continue;
959 iput(inode); 961 iput(inode);
@@ -962,7 +964,7 @@ static void ocfs2_disable_quotas(struct ocfs2_super *osb)
962 964
963/* Handle quota on quotactl */ 965/* Handle quota on quotactl */
964static int ocfs2_quota_on(struct super_block *sb, int type, int format_id, 966static int ocfs2_quota_on(struct super_block *sb, int type, int format_id,
965 char *path, int remount) 967 char *path)
966{ 968{
967 unsigned int feature[MAXQUOTAS] = { OCFS2_FEATURE_RO_COMPAT_USRQUOTA, 969 unsigned int feature[MAXQUOTAS] = { OCFS2_FEATURE_RO_COMPAT_USRQUOTA,
968 OCFS2_FEATURE_RO_COMPAT_GRPQUOTA}; 970 OCFS2_FEATURE_RO_COMPAT_GRPQUOTA};
@@ -970,30 +972,24 @@ static int ocfs2_quota_on(struct super_block *sb, int type, int format_id,
970 if (!OCFS2_HAS_RO_COMPAT_FEATURE(sb, feature[type])) 972 if (!OCFS2_HAS_RO_COMPAT_FEATURE(sb, feature[type]))
971 return -EINVAL; 973 return -EINVAL;
972 974
973 if (remount) 975 return dquot_enable(sb_dqopt(sb)->files[type], type,
974 return 0; /* Just ignore it has been handled in 976 format_id, DQUOT_LIMITS_ENABLED);
975 * ocfs2_remount() */
976 return vfs_quota_enable(sb_dqopt(sb)->files[type], type,
977 format_id, DQUOT_LIMITS_ENABLED);
978} 977}
979 978
980/* Handle quota off quotactl */ 979/* Handle quota off quotactl */
981static int ocfs2_quota_off(struct super_block *sb, int type, int remount) 980static int ocfs2_quota_off(struct super_block *sb, int type)
982{ 981{
983 if (remount) 982 return dquot_disable(sb, type, DQUOT_LIMITS_ENABLED);
984 return 0; /* Ignore now and handle later in
985 * ocfs2_remount() */
986 return vfs_quota_disable(sb, type, DQUOT_LIMITS_ENABLED);
987} 983}
988 984
989static const struct quotactl_ops ocfs2_quotactl_ops = { 985static const struct quotactl_ops ocfs2_quotactl_ops = {
990 .quota_on = ocfs2_quota_on, 986 .quota_on = ocfs2_quota_on,
991 .quota_off = ocfs2_quota_off, 987 .quota_off = ocfs2_quota_off,
992 .quota_sync = vfs_quota_sync, 988 .quota_sync = dquot_quota_sync,
993 .get_info = vfs_get_dqinfo, 989 .get_info = dquot_get_dqinfo,
994 .set_info = vfs_set_dqinfo, 990 .set_info = dquot_set_dqinfo,
995 .get_dqblk = vfs_get_dqblk, 991 .get_dqblk = dquot_get_dqblk,
996 .set_dqblk = vfs_set_dqblk, 992 .set_dqblk = dquot_set_dqblk,
997}; 993};
998 994
999static int ocfs2_fill_super(struct super_block *sb, void *data, int silent) 995static int ocfs2_fill_super(struct super_block *sb, void *data, int silent)
diff --git a/fs/omfs/file.c b/fs/omfs/file.c
index 399487c09364..6e7a3291bbe8 100644
--- a/fs/omfs/file.c
+++ b/fs/omfs/file.c
@@ -329,7 +329,7 @@ const struct file_operations omfs_file_operations = {
329 .aio_read = generic_file_aio_read, 329 .aio_read = generic_file_aio_read,
330 .aio_write = generic_file_aio_write, 330 .aio_write = generic_file_aio_write,
331 .mmap = generic_file_mmap, 331 .mmap = generic_file_mmap,
332 .fsync = simple_fsync, 332 .fsync = generic_file_fsync,
333 .splice_read = generic_file_splice_read, 333 .splice_read = generic_file_splice_read,
334}; 334};
335 335
diff --git a/fs/pipe.c b/fs/pipe.c
index d79872eba09a..db6eaaba0dd8 100644
--- a/fs/pipe.c
+++ b/fs/pipe.c
@@ -230,6 +230,7 @@ void *generic_pipe_buf_map(struct pipe_inode_info *pipe,
230 230
231 return kmap(buf->page); 231 return kmap(buf->page);
232} 232}
233EXPORT_SYMBOL(generic_pipe_buf_map);
233 234
234/** 235/**
235 * generic_pipe_buf_unmap - unmap a previously mapped pipe buffer 236 * generic_pipe_buf_unmap - unmap a previously mapped pipe buffer
@@ -249,6 +250,7 @@ void generic_pipe_buf_unmap(struct pipe_inode_info *pipe,
249 } else 250 } else
250 kunmap(buf->page); 251 kunmap(buf->page);
251} 252}
253EXPORT_SYMBOL(generic_pipe_buf_unmap);
252 254
253/** 255/**
254 * generic_pipe_buf_steal - attempt to take ownership of a &pipe_buffer 256 * generic_pipe_buf_steal - attempt to take ownership of a &pipe_buffer
@@ -279,6 +281,7 @@ int generic_pipe_buf_steal(struct pipe_inode_info *pipe,
279 281
280 return 1; 282 return 1;
281} 283}
284EXPORT_SYMBOL(generic_pipe_buf_steal);
282 285
283/** 286/**
284 * generic_pipe_buf_get - get a reference to a &struct pipe_buffer 287 * generic_pipe_buf_get - get a reference to a &struct pipe_buffer
@@ -294,6 +297,7 @@ void generic_pipe_buf_get(struct pipe_inode_info *pipe, struct pipe_buffer *buf)
294{ 297{
295 page_cache_get(buf->page); 298 page_cache_get(buf->page);
296} 299}
300EXPORT_SYMBOL(generic_pipe_buf_get);
297 301
298/** 302/**
299 * generic_pipe_buf_confirm - verify contents of the pipe buffer 303 * generic_pipe_buf_confirm - verify contents of the pipe buffer
@@ -309,6 +313,7 @@ int generic_pipe_buf_confirm(struct pipe_inode_info *info,
309{ 313{
310 return 0; 314 return 0;
311} 315}
316EXPORT_SYMBOL(generic_pipe_buf_confirm);
312 317
313/** 318/**
314 * generic_pipe_buf_release - put a reference to a &struct pipe_buffer 319 * generic_pipe_buf_release - put a reference to a &struct pipe_buffer
@@ -323,6 +328,7 @@ void generic_pipe_buf_release(struct pipe_inode_info *pipe,
323{ 328{
324 page_cache_release(buf->page); 329 page_cache_release(buf->page);
325} 330}
331EXPORT_SYMBOL(generic_pipe_buf_release);
326 332
327static const struct pipe_buf_operations anon_pipe_buf_ops = { 333static const struct pipe_buf_operations anon_pipe_buf_ops = {
328 .can_merge = 1, 334 .can_merge = 1,
@@ -1169,14 +1175,18 @@ long pipe_fcntl(struct file *file, unsigned int cmd, unsigned long arg)
1169 1175
1170 switch (cmd) { 1176 switch (cmd) {
1171 case F_SETPIPE_SZ: 1177 case F_SETPIPE_SZ:
1172 if (!capable(CAP_SYS_ADMIN) && arg > pipe_max_pages) 1178 if (!capable(CAP_SYS_ADMIN) && arg > pipe_max_pages) {
1173 return -EINVAL; 1179 ret = -EINVAL;
1180 goto out;
1181 }
1174 /* 1182 /*
1175 * The pipe needs to be at least 2 pages large to 1183 * The pipe needs to be at least 2 pages large to
1176 * guarantee POSIX behaviour. 1184 * guarantee POSIX behaviour.
1177 */ 1185 */
1178 if (arg < 2) 1186 if (arg < 2) {
1179 return -EINVAL; 1187 ret = -EINVAL;
1188 goto out;
1189 }
1180 ret = pipe_set_size(pipe, arg); 1190 ret = pipe_set_size(pipe, arg);
1181 break; 1191 break;
1182 case F_GETPIPE_SZ: 1192 case F_GETPIPE_SZ:
@@ -1187,6 +1197,7 @@ long pipe_fcntl(struct file *file, unsigned int cmd, unsigned long arg)
1187 break; 1197 break;
1188 } 1198 }
1189 1199
1200out:
1190 mutex_unlock(&pipe->inode->i_mutex); 1201 mutex_unlock(&pipe->inode->i_mutex);
1191 return ret; 1202 return ret;
1192} 1203}
diff --git a/fs/proc/array.c b/fs/proc/array.c
index 885ab5513ac5..9b58d38bc911 100644
--- a/fs/proc/array.c
+++ b/fs/proc/array.c
@@ -267,7 +267,7 @@ static inline void task_sig(struct seq_file *m, struct task_struct *p)
267 shpending = p->signal->shared_pending.signal; 267 shpending = p->signal->shared_pending.signal;
268 blocked = p->blocked; 268 blocked = p->blocked;
269 collect_sigign_sigcatch(p, &ignored, &caught); 269 collect_sigign_sigcatch(p, &ignored, &caught);
270 num_threads = atomic_read(&p->signal->count); 270 num_threads = get_nr_threads(p);
271 rcu_read_lock(); /* FIXME: is this correct? */ 271 rcu_read_lock(); /* FIXME: is this correct? */
272 qsize = atomic_read(&__task_cred(p)->user->sigpending); 272 qsize = atomic_read(&__task_cred(p)->user->sigpending);
273 rcu_read_unlock(); 273 rcu_read_unlock();
@@ -410,7 +410,7 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
410 tty_nr = new_encode_dev(tty_devnum(sig->tty)); 410 tty_nr = new_encode_dev(tty_devnum(sig->tty));
411 } 411 }
412 412
413 num_threads = atomic_read(&sig->count); 413 num_threads = get_nr_threads(task);
414 collect_sigign_sigcatch(task, &sigign, &sigcatch); 414 collect_sigign_sigcatch(task, &sigign, &sigcatch);
415 415
416 cmin_flt = sig->cmin_flt; 416 cmin_flt = sig->cmin_flt;
diff --git a/fs/proc/base.c b/fs/proc/base.c
index c7f9f23449dc..acb7ef80ea4f 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -166,18 +166,6 @@ static int get_fs_path(struct task_struct *task, struct path *path, bool root)
166 return result; 166 return result;
167} 167}
168 168
169static int get_nr_threads(struct task_struct *tsk)
170{
171 unsigned long flags;
172 int count = 0;
173
174 if (lock_task_sighand(tsk, &flags)) {
175 count = atomic_read(&tsk->signal->count);
176 unlock_task_sighand(tsk, &flags);
177 }
178 return count;
179}
180
181static int proc_cwd_link(struct inode *inode, struct path *path) 169static int proc_cwd_link(struct inode *inode, struct path *path)
182{ 170{
183 struct task_struct *task = get_proc_task(inode); 171 struct task_struct *task = get_proc_task(inode);
@@ -2444,7 +2432,7 @@ static struct dentry *proc_base_instantiate(struct inode *dir,
2444 const struct pid_entry *p = ptr; 2432 const struct pid_entry *p = ptr;
2445 struct inode *inode; 2433 struct inode *inode;
2446 struct proc_inode *ei; 2434 struct proc_inode *ei;
2447 struct dentry *error = ERR_PTR(-EINVAL); 2435 struct dentry *error;
2448 2436
2449 /* Allocate the inode */ 2437 /* Allocate the inode */
2450 error = ERR_PTR(-ENOMEM); 2438 error = ERR_PTR(-ENOMEM);
@@ -2794,7 +2782,7 @@ out:
2794 2782
2795struct dentry *proc_pid_lookup(struct inode *dir, struct dentry * dentry, struct nameidata *nd) 2783struct dentry *proc_pid_lookup(struct inode *dir, struct dentry * dentry, struct nameidata *nd)
2796{ 2784{
2797 struct dentry *result = ERR_PTR(-ENOENT); 2785 struct dentry *result;
2798 struct task_struct *task; 2786 struct task_struct *task;
2799 unsigned tgid; 2787 unsigned tgid;
2800 struct pid_namespace *ns; 2788 struct pid_namespace *ns;
diff --git a/fs/proc/generic.c b/fs/proc/generic.c
index 43c127490606..2791907744ed 100644
--- a/fs/proc/generic.c
+++ b/fs/proc/generic.c
@@ -343,21 +343,6 @@ static DEFINE_SPINLOCK(proc_inum_lock); /* protects the above */
343/* 343/*
344 * Return an inode number between PROC_DYNAMIC_FIRST and 344 * Return an inode number between PROC_DYNAMIC_FIRST and
345 * 0xffffffff, or zero on failure. 345 * 0xffffffff, or zero on failure.
346 *
347 * Current inode allocations in the proc-fs (hex-numbers):
348 *
349 * 00000000 reserved
350 * 00000001-00000fff static entries (goners)
351 * 001 root-ino
352 *
353 * 00001000-00001fff unused
354 * 0001xxxx-7fffxxxx pid-dir entries for pid 1-7fff
355 * 80000000-efffffff unused
356 * f0000000-ffffffff dynamic entries
357 *
358 * Goal:
359 * Once we split the thing into several virtual filesystems,
360 * we will get rid of magical ranges (and this comment, BTW).
361 */ 346 */
362static unsigned int get_inode_number(void) 347static unsigned int get_inode_number(void)
363{ 348{
diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c
index c837a77351be..6f37c391468d 100644
--- a/fs/proc/kcore.c
+++ b/fs/proc/kcore.c
@@ -588,7 +588,7 @@ static struct kcore_list kcore_text;
588 */ 588 */
589static void __init proc_kcore_text_init(void) 589static void __init proc_kcore_text_init(void)
590{ 590{
591 kclist_add(&kcore_text, _stext, _end - _stext, KCORE_TEXT); 591 kclist_add(&kcore_text, _text, _end - _text, KCORE_TEXT);
592} 592}
593#else 593#else
594static void __init proc_kcore_text_init(void) 594static void __init proc_kcore_text_init(void)
diff --git a/fs/proc/root.c b/fs/proc/root.c
index 757c069f2a65..4258384ed22d 100644
--- a/fs/proc/root.c
+++ b/fs/proc/root.c
@@ -110,7 +110,6 @@ void __init proc_root_init(void)
110 if (err) 110 if (err)
111 return; 111 return;
112 proc_mnt = kern_mount_data(&proc_fs_type, &init_pid_ns); 112 proc_mnt = kern_mount_data(&proc_fs_type, &init_pid_ns);
113 err = PTR_ERR(proc_mnt);
114 if (IS_ERR(proc_mnt)) { 113 if (IS_ERR(proc_mnt)) {
115 unregister_filesystem(&proc_fs_type); 114 unregister_filesystem(&proc_fs_type);
116 return; 115 return;
diff --git a/fs/qnx4/dir.c b/fs/qnx4/dir.c
index 6f30c3d5bcbf..6e8fc62b40a8 100644
--- a/fs/qnx4/dir.c
+++ b/fs/qnx4/dir.c
@@ -77,9 +77,10 @@ out:
77 77
78const struct file_operations qnx4_dir_operations = 78const struct file_operations qnx4_dir_operations =
79{ 79{
80 .llseek = generic_file_llseek,
80 .read = generic_read_dir, 81 .read = generic_read_dir,
81 .readdir = qnx4_readdir, 82 .readdir = qnx4_readdir,
82 .fsync = simple_fsync, 83 .fsync = generic_file_fsync,
83}; 84};
84 85
85const struct inode_operations qnx4_dir_inode_operations = 86const struct inode_operations qnx4_dir_inode_operations =
diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c
index 655a4c52b8c3..12c233da1b6b 100644
--- a/fs/quota/dquot.c
+++ b/fs/quota/dquot.c
@@ -228,10 +228,6 @@ static struct hlist_head *dquot_hash;
228 228
229struct dqstats dqstats; 229struct dqstats dqstats;
230EXPORT_SYMBOL(dqstats); 230EXPORT_SYMBOL(dqstats);
231#ifdef CONFIG_SMP
232struct dqstats *dqstats_pcpu;
233EXPORT_SYMBOL(dqstats_pcpu);
234#endif
235 231
236static qsize_t inode_get_rsv_space(struct inode *inode); 232static qsize_t inode_get_rsv_space(struct inode *inode);
237static void __dquot_initialize(struct inode *inode, int type); 233static void __dquot_initialize(struct inode *inode, int type);
@@ -584,7 +580,7 @@ out:
584} 580}
585EXPORT_SYMBOL(dquot_scan_active); 581EXPORT_SYMBOL(dquot_scan_active);
586 582
587int vfs_quota_sync(struct super_block *sb, int type, int wait) 583int dquot_quota_sync(struct super_block *sb, int type, int wait)
588{ 584{
589 struct list_head *dirty; 585 struct list_head *dirty;
590 struct dquot *dquot; 586 struct dquot *dquot;
@@ -656,7 +652,7 @@ int vfs_quota_sync(struct super_block *sb, int type, int wait)
656 652
657 return 0; 653 return 0;
658} 654}
659EXPORT_SYMBOL(vfs_quota_sync); 655EXPORT_SYMBOL(dquot_quota_sync);
660 656
661/* Free unused dquots from cache */ 657/* Free unused dquots from cache */
662static void prune_dqcache(int count) 658static void prune_dqcache(int count)
@@ -676,27 +672,10 @@ static void prune_dqcache(int count)
676 } 672 }
677} 673}
678 674
679static int dqstats_read(unsigned int type)
680{
681 int count = 0;
682#ifdef CONFIG_SMP
683 int cpu;
684 for_each_possible_cpu(cpu)
685 count += per_cpu_ptr(dqstats_pcpu, cpu)->stat[type];
686 /* Statistics reading is racy, but absolute accuracy isn't required */
687 if (count < 0)
688 count = 0;
689#else
690 count = dqstats.stat[type];
691#endif
692 return count;
693}
694
695/* 675/*
696 * This is called from kswapd when we think we need some 676 * This is called from kswapd when we think we need some
697 * more memory 677 * more memory
698 */ 678 */
699
700static int shrink_dqcache_memory(int nr, gfp_t gfp_mask) 679static int shrink_dqcache_memory(int nr, gfp_t gfp_mask)
701{ 680{
702 if (nr) { 681 if (nr) {
@@ -704,7 +683,9 @@ static int shrink_dqcache_memory(int nr, gfp_t gfp_mask)
704 prune_dqcache(nr); 683 prune_dqcache(nr);
705 spin_unlock(&dq_list_lock); 684 spin_unlock(&dq_list_lock);
706 } 685 }
707 return (dqstats_read(DQST_FREE_DQUOTS)/100) * sysctl_vfs_cache_pressure; 686 return ((unsigned)
687 percpu_counter_read_positive(&dqstats.counter[DQST_FREE_DQUOTS])
688 /100) * sysctl_vfs_cache_pressure;
708} 689}
709 690
710static struct shrinker dqcache_shrinker = { 691static struct shrinker dqcache_shrinker = {
@@ -1514,11 +1495,13 @@ static void inode_decr_space(struct inode *inode, qsize_t number, int reserve)
1514/* 1495/*
1515 * This operation can block, but only after everything is updated 1496 * This operation can block, but only after everything is updated
1516 */ 1497 */
1517int __dquot_alloc_space(struct inode *inode, qsize_t number, 1498int __dquot_alloc_space(struct inode *inode, qsize_t number, int flags)
1518 int warn, int reserve)
1519{ 1499{
1520 int cnt, ret = 0; 1500 int cnt, ret = 0;
1521 char warntype[MAXQUOTAS]; 1501 char warntype[MAXQUOTAS];
1502 int warn = flags & DQUOT_SPACE_WARN;
1503 int reserve = flags & DQUOT_SPACE_RESERVE;
1504 int nofail = flags & DQUOT_SPACE_NOFAIL;
1522 1505
1523 /* 1506 /*
1524 * First test before acquiring mutex - solves deadlocks when we 1507 * First test before acquiring mutex - solves deadlocks when we
@@ -1539,7 +1522,7 @@ int __dquot_alloc_space(struct inode *inode, qsize_t number,
1539 continue; 1522 continue;
1540 ret = check_bdq(inode->i_dquot[cnt], number, !warn, 1523 ret = check_bdq(inode->i_dquot[cnt], number, !warn,
1541 warntype+cnt); 1524 warntype+cnt);
1542 if (ret) { 1525 if (ret && !nofail) {
1543 spin_unlock(&dq_data_lock); 1526 spin_unlock(&dq_data_lock);
1544 goto out_flush_warn; 1527 goto out_flush_warn;
1545 } 1528 }
@@ -1638,10 +1621,11 @@ EXPORT_SYMBOL(dquot_claim_space_nodirty);
1638/* 1621/*
1639 * This operation can block, but only after everything is updated 1622 * This operation can block, but only after everything is updated
1640 */ 1623 */
1641void __dquot_free_space(struct inode *inode, qsize_t number, int reserve) 1624void __dquot_free_space(struct inode *inode, qsize_t number, int flags)
1642{ 1625{
1643 unsigned int cnt; 1626 unsigned int cnt;
1644 char warntype[MAXQUOTAS]; 1627 char warntype[MAXQUOTAS];
1628 int reserve = flags & DQUOT_SPACE_RESERVE;
1645 1629
1646 /* First test before acquiring mutex - solves deadlocks when we 1630 /* First test before acquiring mutex - solves deadlocks when we
1647 * re-enter the quota code and are already holding the mutex */ 1631 * re-enter the quota code and are already holding the mutex */
@@ -1812,7 +1796,7 @@ int dquot_transfer(struct inode *inode, struct iattr *iattr)
1812 if (iattr->ia_valid & ATTR_UID && iattr->ia_uid != inode->i_uid) 1796 if (iattr->ia_valid & ATTR_UID && iattr->ia_uid != inode->i_uid)
1813 transfer_to[USRQUOTA] = dqget(sb, iattr->ia_uid, USRQUOTA); 1797 transfer_to[USRQUOTA] = dqget(sb, iattr->ia_uid, USRQUOTA);
1814 if (iattr->ia_valid & ATTR_GID && iattr->ia_gid != inode->i_gid) 1798 if (iattr->ia_valid & ATTR_GID && iattr->ia_gid != inode->i_gid)
1815 transfer_to[GRPQUOTA] = dqget(sb, iattr->ia_uid, GRPQUOTA); 1799 transfer_to[GRPQUOTA] = dqget(sb, iattr->ia_gid, GRPQUOTA);
1816 1800
1817 ret = __dquot_transfer(inode, transfer_to); 1801 ret = __dquot_transfer(inode, transfer_to);
1818 dqput_all(transfer_to); 1802 dqput_all(transfer_to);
@@ -1847,6 +1831,7 @@ const struct dquot_operations dquot_operations = {
1847 .alloc_dquot = dquot_alloc, 1831 .alloc_dquot = dquot_alloc,
1848 .destroy_dquot = dquot_destroy, 1832 .destroy_dquot = dquot_destroy,
1849}; 1833};
1834EXPORT_SYMBOL(dquot_operations);
1850 1835
1851/* 1836/*
1852 * Generic helper for ->open on filesystems supporting disk quotas. 1837 * Generic helper for ->open on filesystems supporting disk quotas.
@@ -1865,7 +1850,7 @@ EXPORT_SYMBOL(dquot_file_open);
1865/* 1850/*
1866 * Turn quota off on a device. type == -1 ==> quotaoff for all types (umount) 1851 * Turn quota off on a device. type == -1 ==> quotaoff for all types (umount)
1867 */ 1852 */
1868int vfs_quota_disable(struct super_block *sb, int type, unsigned int flags) 1853int dquot_disable(struct super_block *sb, int type, unsigned int flags)
1869{ 1854{
1870 int cnt, ret = 0; 1855 int cnt, ret = 0;
1871 struct quota_info *dqopt = sb_dqopt(sb); 1856 struct quota_info *dqopt = sb_dqopt(sb);
@@ -1995,14 +1980,15 @@ put_inodes:
1995 } 1980 }
1996 return ret; 1981 return ret;
1997} 1982}
1998EXPORT_SYMBOL(vfs_quota_disable); 1983EXPORT_SYMBOL(dquot_disable);
1999 1984
2000int vfs_quota_off(struct super_block *sb, int type, int remount) 1985int dquot_quota_off(struct super_block *sb, int type)
2001{ 1986{
2002 return vfs_quota_disable(sb, type, remount ? DQUOT_SUSPENDED : 1987 return dquot_disable(sb, type,
2003 (DQUOT_USAGE_ENABLED | DQUOT_LIMITS_ENABLED)); 1988 DQUOT_USAGE_ENABLED | DQUOT_LIMITS_ENABLED);
2004} 1989}
2005EXPORT_SYMBOL(vfs_quota_off); 1990EXPORT_SYMBOL(dquot_quota_off);
1991
2006/* 1992/*
2007 * Turn quotas on on a device 1993 * Turn quotas on on a device
2008 */ 1994 */
@@ -2120,36 +2106,43 @@ out_fmt:
2120} 2106}
2121 2107
2122/* Reenable quotas on remount RW */ 2108/* Reenable quotas on remount RW */
2123static int vfs_quota_on_remount(struct super_block *sb, int type) 2109int dquot_resume(struct super_block *sb, int type)
2124{ 2110{
2125 struct quota_info *dqopt = sb_dqopt(sb); 2111 struct quota_info *dqopt = sb_dqopt(sb);
2126 struct inode *inode; 2112 struct inode *inode;
2127 int ret; 2113 int ret = 0, cnt;
2128 unsigned int flags; 2114 unsigned int flags;
2129 2115
2130 mutex_lock(&dqopt->dqonoff_mutex); 2116 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
2131 if (!sb_has_quota_suspended(sb, type)) { 2117 if (type != -1 && cnt != type)
2118 continue;
2119
2120 mutex_lock(&dqopt->dqonoff_mutex);
2121 if (!sb_has_quota_suspended(sb, cnt)) {
2122 mutex_unlock(&dqopt->dqonoff_mutex);
2123 continue;
2124 }
2125 inode = dqopt->files[cnt];
2126 dqopt->files[cnt] = NULL;
2127 spin_lock(&dq_state_lock);
2128 flags = dqopt->flags & dquot_state_flag(DQUOT_USAGE_ENABLED |
2129 DQUOT_LIMITS_ENABLED,
2130 cnt);
2131 dqopt->flags &= ~dquot_state_flag(DQUOT_STATE_FLAGS, cnt);
2132 spin_unlock(&dq_state_lock);
2132 mutex_unlock(&dqopt->dqonoff_mutex); 2133 mutex_unlock(&dqopt->dqonoff_mutex);
2133 return 0;
2134 }
2135 inode = dqopt->files[type];
2136 dqopt->files[type] = NULL;
2137 spin_lock(&dq_state_lock);
2138 flags = dqopt->flags & dquot_state_flag(DQUOT_USAGE_ENABLED |
2139 DQUOT_LIMITS_ENABLED, type);
2140 dqopt->flags &= ~dquot_state_flag(DQUOT_STATE_FLAGS, type);
2141 spin_unlock(&dq_state_lock);
2142 mutex_unlock(&dqopt->dqonoff_mutex);
2143 2134
2144 flags = dquot_generic_flag(flags, type); 2135 flags = dquot_generic_flag(flags, cnt);
2145 ret = vfs_load_quota_inode(inode, type, dqopt->info[type].dqi_fmt_id, 2136 ret = vfs_load_quota_inode(inode, cnt,
2146 flags); 2137 dqopt->info[cnt].dqi_fmt_id, flags);
2147 iput(inode); 2138 iput(inode);
2139 }
2148 2140
2149 return ret; 2141 return ret;
2150} 2142}
2143EXPORT_SYMBOL(dquot_resume);
2151 2144
2152int vfs_quota_on_path(struct super_block *sb, int type, int format_id, 2145int dquot_quota_on_path(struct super_block *sb, int type, int format_id,
2153 struct path *path) 2146 struct path *path)
2154{ 2147{
2155 int error = security_quota_on(path->dentry); 2148 int error = security_quota_on(path->dentry);
@@ -2164,40 +2157,36 @@ int vfs_quota_on_path(struct super_block *sb, int type, int format_id,
2164 DQUOT_LIMITS_ENABLED); 2157 DQUOT_LIMITS_ENABLED);
2165 return error; 2158 return error;
2166} 2159}
2167EXPORT_SYMBOL(vfs_quota_on_path); 2160EXPORT_SYMBOL(dquot_quota_on_path);
2168 2161
2169int vfs_quota_on(struct super_block *sb, int type, int format_id, char *name, 2162int dquot_quota_on(struct super_block *sb, int type, int format_id, char *name)
2170 int remount)
2171{ 2163{
2172 struct path path; 2164 struct path path;
2173 int error; 2165 int error;
2174 2166
2175 if (remount)
2176 return vfs_quota_on_remount(sb, type);
2177
2178 error = kern_path(name, LOOKUP_FOLLOW, &path); 2167 error = kern_path(name, LOOKUP_FOLLOW, &path);
2179 if (!error) { 2168 if (!error) {
2180 error = vfs_quota_on_path(sb, type, format_id, &path); 2169 error = dquot_quota_on_path(sb, type, format_id, &path);
2181 path_put(&path); 2170 path_put(&path);
2182 } 2171 }
2183 return error; 2172 return error;
2184} 2173}
2185EXPORT_SYMBOL(vfs_quota_on); 2174EXPORT_SYMBOL(dquot_quota_on);
2186 2175
2187/* 2176/*
2188 * More powerful function for turning on quotas allowing setting 2177 * More powerful function for turning on quotas allowing setting
2189 * of individual quota flags 2178 * of individual quota flags
2190 */ 2179 */
2191int vfs_quota_enable(struct inode *inode, int type, int format_id, 2180int dquot_enable(struct inode *inode, int type, int format_id,
2192 unsigned int flags) 2181 unsigned int flags)
2193{ 2182{
2194 int ret = 0; 2183 int ret = 0;
2195 struct super_block *sb = inode->i_sb; 2184 struct super_block *sb = inode->i_sb;
2196 struct quota_info *dqopt = sb_dqopt(sb); 2185 struct quota_info *dqopt = sb_dqopt(sb);
2197 2186
2198 /* Just unsuspend quotas? */ 2187 /* Just unsuspend quotas? */
2199 if (flags & DQUOT_SUSPENDED) 2188 BUG_ON(flags & DQUOT_SUSPENDED);
2200 return vfs_quota_on_remount(sb, type); 2189
2201 if (!flags) 2190 if (!flags)
2202 return 0; 2191 return 0;
2203 /* Just updating flags needed? */ 2192 /* Just updating flags needed? */
@@ -2229,13 +2218,13 @@ out_lock:
2229load_quota: 2218load_quota:
2230 return vfs_load_quota_inode(inode, type, format_id, flags); 2219 return vfs_load_quota_inode(inode, type, format_id, flags);
2231} 2220}
2232EXPORT_SYMBOL(vfs_quota_enable); 2221EXPORT_SYMBOL(dquot_enable);
2233 2222
2234/* 2223/*
2235 * This function is used when filesystem needs to initialize quotas 2224 * This function is used when filesystem needs to initialize quotas
2236 * during mount time. 2225 * during mount time.
2237 */ 2226 */
2238int vfs_quota_on_mount(struct super_block *sb, char *qf_name, 2227int dquot_quota_on_mount(struct super_block *sb, char *qf_name,
2239 int format_id, int type) 2228 int format_id, int type)
2240{ 2229{
2241 struct dentry *dentry; 2230 struct dentry *dentry;
@@ -2261,24 +2250,7 @@ out:
2261 dput(dentry); 2250 dput(dentry);
2262 return error; 2251 return error;
2263} 2252}
2264EXPORT_SYMBOL(vfs_quota_on_mount); 2253EXPORT_SYMBOL(dquot_quota_on_mount);
2265
2266/* Wrapper to turn on quotas when remounting rw */
2267int vfs_dq_quota_on_remount(struct super_block *sb)
2268{
2269 int cnt;
2270 int ret = 0, err;
2271
2272 if (!sb->s_qcop || !sb->s_qcop->quota_on)
2273 return -ENOSYS;
2274 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
2275 err = sb->s_qcop->quota_on(sb, cnt, 0, NULL, 1);
2276 if (err < 0 && !ret)
2277 ret = err;
2278 }
2279 return ret;
2280}
2281EXPORT_SYMBOL(vfs_dq_quota_on_remount);
2282 2254
2283static inline qsize_t qbtos(qsize_t blocks) 2255static inline qsize_t qbtos(qsize_t blocks)
2284{ 2256{
@@ -2313,8 +2285,8 @@ static void do_get_dqblk(struct dquot *dquot, struct fs_disk_quota *di)
2313 spin_unlock(&dq_data_lock); 2285 spin_unlock(&dq_data_lock);
2314} 2286}
2315 2287
2316int vfs_get_dqblk(struct super_block *sb, int type, qid_t id, 2288int dquot_get_dqblk(struct super_block *sb, int type, qid_t id,
2317 struct fs_disk_quota *di) 2289 struct fs_disk_quota *di)
2318{ 2290{
2319 struct dquot *dquot; 2291 struct dquot *dquot;
2320 2292
@@ -2326,7 +2298,7 @@ int vfs_get_dqblk(struct super_block *sb, int type, qid_t id,
2326 2298
2327 return 0; 2299 return 0;
2328} 2300}
2329EXPORT_SYMBOL(vfs_get_dqblk); 2301EXPORT_SYMBOL(dquot_get_dqblk);
2330 2302
2331#define VFS_FS_DQ_MASK \ 2303#define VFS_FS_DQ_MASK \
2332 (FS_DQ_BCOUNT | FS_DQ_BSOFT | FS_DQ_BHARD | \ 2304 (FS_DQ_BCOUNT | FS_DQ_BSOFT | FS_DQ_BHARD | \
@@ -2425,7 +2397,7 @@ static int do_set_dqblk(struct dquot *dquot, struct fs_disk_quota *di)
2425 return 0; 2397 return 0;
2426} 2398}
2427 2399
2428int vfs_set_dqblk(struct super_block *sb, int type, qid_t id, 2400int dquot_set_dqblk(struct super_block *sb, int type, qid_t id,
2429 struct fs_disk_quota *di) 2401 struct fs_disk_quota *di)
2430{ 2402{
2431 struct dquot *dquot; 2403 struct dquot *dquot;
@@ -2441,10 +2413,10 @@ int vfs_set_dqblk(struct super_block *sb, int type, qid_t id,
2441out: 2413out:
2442 return rc; 2414 return rc;
2443} 2415}
2444EXPORT_SYMBOL(vfs_set_dqblk); 2416EXPORT_SYMBOL(dquot_set_dqblk);
2445 2417
2446/* Generic routine for getting common part of quota file information */ 2418/* Generic routine for getting common part of quota file information */
2447int vfs_get_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii) 2419int dquot_get_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii)
2448{ 2420{
2449 struct mem_dqinfo *mi; 2421 struct mem_dqinfo *mi;
2450 2422
@@ -2463,10 +2435,10 @@ int vfs_get_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii)
2463 mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex); 2435 mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex);
2464 return 0; 2436 return 0;
2465} 2437}
2466EXPORT_SYMBOL(vfs_get_dqinfo); 2438EXPORT_SYMBOL(dquot_get_dqinfo);
2467 2439
2468/* Generic routine for setting common part of quota file information */ 2440/* Generic routine for setting common part of quota file information */
2469int vfs_set_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii) 2441int dquot_set_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii)
2470{ 2442{
2471 struct mem_dqinfo *mi; 2443 struct mem_dqinfo *mi;
2472 int err = 0; 2444 int err = 0;
@@ -2493,27 +2465,27 @@ out:
2493 mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex); 2465 mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex);
2494 return err; 2466 return err;
2495} 2467}
2496EXPORT_SYMBOL(vfs_set_dqinfo); 2468EXPORT_SYMBOL(dquot_set_dqinfo);
2497 2469
2498const struct quotactl_ops vfs_quotactl_ops = { 2470const struct quotactl_ops dquot_quotactl_ops = {
2499 .quota_on = vfs_quota_on, 2471 .quota_on = dquot_quota_on,
2500 .quota_off = vfs_quota_off, 2472 .quota_off = dquot_quota_off,
2501 .quota_sync = vfs_quota_sync, 2473 .quota_sync = dquot_quota_sync,
2502 .get_info = vfs_get_dqinfo, 2474 .get_info = dquot_get_dqinfo,
2503 .set_info = vfs_set_dqinfo, 2475 .set_info = dquot_set_dqinfo,
2504 .get_dqblk = vfs_get_dqblk, 2476 .get_dqblk = dquot_get_dqblk,
2505 .set_dqblk = vfs_set_dqblk 2477 .set_dqblk = dquot_set_dqblk
2506}; 2478};
2507 2479EXPORT_SYMBOL(dquot_quotactl_ops);
2508 2480
2509static int do_proc_dqstats(struct ctl_table *table, int write, 2481static int do_proc_dqstats(struct ctl_table *table, int write,
2510 void __user *buffer, size_t *lenp, loff_t *ppos) 2482 void __user *buffer, size_t *lenp, loff_t *ppos)
2511{ 2483{
2512#ifdef CONFIG_SMP
2513 /* Update global table */
2514 unsigned int type = (int *)table->data - dqstats.stat; 2484 unsigned int type = (int *)table->data - dqstats.stat;
2515 dqstats.stat[type] = dqstats_read(type); 2485
2516#endif 2486 /* Update global table */
2487 dqstats.stat[type] =
2488 percpu_counter_sum_positive(&dqstats.counter[type]);
2517 return proc_dointvec(table, write, buffer, lenp, ppos); 2489 return proc_dointvec(table, write, buffer, lenp, ppos);
2518} 2490}
2519 2491
@@ -2606,7 +2578,7 @@ static ctl_table sys_table[] = {
2606 2578
2607static int __init dquot_init(void) 2579static int __init dquot_init(void)
2608{ 2580{
2609 int i; 2581 int i, ret;
2610 unsigned long nr_hash, order; 2582 unsigned long nr_hash, order;
2611 2583
2612 printk(KERN_NOTICE "VFS: Disk quotas %s\n", __DQUOT_VERSION__); 2584 printk(KERN_NOTICE "VFS: Disk quotas %s\n", __DQUOT_VERSION__);
@@ -2624,12 +2596,11 @@ static int __init dquot_init(void)
2624 if (!dquot_hash) 2596 if (!dquot_hash)
2625 panic("Cannot create dquot hash table"); 2597 panic("Cannot create dquot hash table");
2626 2598
2627#ifdef CONFIG_SMP 2599 for (i = 0; i < _DQST_DQSTAT_LAST; i++) {
2628 dqstats_pcpu = alloc_percpu(struct dqstats); 2600 ret = percpu_counter_init(&dqstats.counter[i], 0);
2629 if (!dqstats_pcpu) 2601 if (ret)
2630 panic("Cannot create dquot stats table"); 2602 panic("Cannot create dquot stat counters");
2631#endif 2603 }
2632 memset(&dqstats, 0, sizeof(struct dqstats));
2633 2604
2634 /* Find power-of-two hlist_heads which can fit into allocation */ 2605 /* Find power-of-two hlist_heads which can fit into allocation */
2635 nr_hash = (1UL << order) * PAGE_SIZE / sizeof(struct hlist_head); 2606 nr_hash = (1UL << order) * PAGE_SIZE / sizeof(struct hlist_head);
diff --git a/fs/quota/quota.c b/fs/quota/quota.c
index ce3dfd066f59..b299961e1edb 100644
--- a/fs/quota/quota.c
+++ b/fs/quota/quota.c
@@ -73,7 +73,7 @@ static int quota_quotaon(struct super_block *sb, int type, int cmd, qid_t id,
73 if (IS_ERR(pathname)) 73 if (IS_ERR(pathname))
74 return PTR_ERR(pathname); 74 return PTR_ERR(pathname);
75 if (sb->s_qcop->quota_on) 75 if (sb->s_qcop->quota_on)
76 ret = sb->s_qcop->quota_on(sb, type, id, pathname, 0); 76 ret = sb->s_qcop->quota_on(sb, type, id, pathname);
77 putname(pathname); 77 putname(pathname);
78 return ret; 78 return ret;
79} 79}
@@ -260,7 +260,7 @@ static int do_quotactl(struct super_block *sb, int type, int cmd, qid_t id,
260 case Q_QUOTAOFF: 260 case Q_QUOTAOFF:
261 if (!sb->s_qcop->quota_off) 261 if (!sb->s_qcop->quota_off)
262 return -ENOSYS; 262 return -ENOSYS;
263 return sb->s_qcop->quota_off(sb, type, 0); 263 return sb->s_qcop->quota_off(sb, type);
264 case Q_GETFMT: 264 case Q_GETFMT:
265 return quota_getfmt(sb, type, addr); 265 return quota_getfmt(sb, type, addr);
266 case Q_GETINFO: 266 case Q_GETINFO:
diff --git a/fs/ramfs/file-mmu.c b/fs/ramfs/file-mmu.c
index 78f613cb9c76..4884ac5ae9be 100644
--- a/fs/ramfs/file-mmu.c
+++ b/fs/ramfs/file-mmu.c
@@ -43,12 +43,13 @@ const struct file_operations ramfs_file_operations = {
43 .write = do_sync_write, 43 .write = do_sync_write,
44 .aio_write = generic_file_aio_write, 44 .aio_write = generic_file_aio_write,
45 .mmap = generic_file_mmap, 45 .mmap = generic_file_mmap,
46 .fsync = simple_sync_file, 46 .fsync = noop_fsync,
47 .splice_read = generic_file_splice_read, 47 .splice_read = generic_file_splice_read,
48 .splice_write = generic_file_splice_write, 48 .splice_write = generic_file_splice_write,
49 .llseek = generic_file_llseek, 49 .llseek = generic_file_llseek,
50}; 50};
51 51
52const struct inode_operations ramfs_file_inode_operations = { 52const struct inode_operations ramfs_file_inode_operations = {
53 .setattr = simple_setattr,
53 .getattr = simple_getattr, 54 .getattr = simple_getattr,
54}; 55};
diff --git a/fs/ramfs/file-nommu.c b/fs/ramfs/file-nommu.c
index 5ea4ad81a429..d532c20fc179 100644
--- a/fs/ramfs/file-nommu.c
+++ b/fs/ramfs/file-nommu.c
@@ -42,7 +42,7 @@ const struct file_operations ramfs_file_operations = {
42 .aio_read = generic_file_aio_read, 42 .aio_read = generic_file_aio_read,
43 .write = do_sync_write, 43 .write = do_sync_write,
44 .aio_write = generic_file_aio_write, 44 .aio_write = generic_file_aio_write,
45 .fsync = simple_sync_file, 45 .fsync = noop_fsync,
46 .splice_read = generic_file_splice_read, 46 .splice_read = generic_file_splice_read,
47 .splice_write = generic_file_splice_write, 47 .splice_write = generic_file_splice_write,
48 .llseek = generic_file_llseek, 48 .llseek = generic_file_llseek,
@@ -146,7 +146,7 @@ static int ramfs_nommu_resize(struct inode *inode, loff_t newsize, loff_t size)
146 return ret; 146 return ret;
147 } 147 }
148 148
149 ret = vmtruncate(inode, newsize); 149 ret = simple_setsize(inode, newsize);
150 150
151 return ret; 151 return ret;
152} 152}
@@ -169,7 +169,8 @@ static int ramfs_nommu_setattr(struct dentry *dentry, struct iattr *ia)
169 169
170 /* pick out size-changing events */ 170 /* pick out size-changing events */
171 if (ia->ia_valid & ATTR_SIZE) { 171 if (ia->ia_valid & ATTR_SIZE) {
172 loff_t size = i_size_read(inode); 172 loff_t size = inode->i_size;
173
173 if (ia->ia_size != size) { 174 if (ia->ia_size != size) {
174 ret = ramfs_nommu_resize(inode, ia->ia_size, size); 175 ret = ramfs_nommu_resize(inode, ia->ia_size, size);
175 if (ret < 0 || ia->ia_valid == ATTR_SIZE) 176 if (ret < 0 || ia->ia_valid == ATTR_SIZE)
@@ -182,7 +183,7 @@ static int ramfs_nommu_setattr(struct dentry *dentry, struct iattr *ia)
182 } 183 }
183 } 184 }
184 185
185 ret = inode_setattr(inode, ia); 186 generic_setattr(inode, ia);
186 out: 187 out:
187 ia->ia_valid = old_ia_valid; 188 ia->ia_valid = old_ia_valid;
188 return ret; 189 return ret;
diff --git a/fs/read_write.c b/fs/read_write.c
index 113386d6fd2d..9c0485236e68 100644
--- a/fs/read_write.c
+++ b/fs/read_write.c
@@ -97,6 +97,23 @@ loff_t generic_file_llseek(struct file *file, loff_t offset, int origin)
97} 97}
98EXPORT_SYMBOL(generic_file_llseek); 98EXPORT_SYMBOL(generic_file_llseek);
99 99
100/**
101 * noop_llseek - No Operation Performed llseek implementation
102 * @file: file structure to seek on
103 * @offset: file offset to seek to
104 * @origin: type of seek
105 *
106 * This is an implementation of ->llseek useable for the rare special case when
107 * userspace expects the seek to succeed but the (device) file is actually not
108 * able to perform the seek. In this case you use noop_llseek() instead of
109 * falling back to the default implementation of ->llseek.
110 */
111loff_t noop_llseek(struct file *file, loff_t offset, int origin)
112{
113 return file->f_pos;
114}
115EXPORT_SYMBOL(noop_llseek);
116
100loff_t no_llseek(struct file *file, loff_t offset, int origin) 117loff_t no_llseek(struct file *file, loff_t offset, int origin)
101{ 118{
102 return -ESPIPE; 119 return -ESPIPE;
diff --git a/fs/reiserfs/dir.c b/fs/reiserfs/dir.c
index 07930449a958..198dabf1b2bb 100644
--- a/fs/reiserfs/dir.c
+++ b/fs/reiserfs/dir.c
@@ -14,10 +14,10 @@
14extern const struct reiserfs_key MIN_KEY; 14extern const struct reiserfs_key MIN_KEY;
15 15
16static int reiserfs_readdir(struct file *, void *, filldir_t); 16static int reiserfs_readdir(struct file *, void *, filldir_t);
17static int reiserfs_dir_fsync(struct file *filp, struct dentry *dentry, 17static int reiserfs_dir_fsync(struct file *filp, int datasync);
18 int datasync);
19 18
20const struct file_operations reiserfs_dir_operations = { 19const struct file_operations reiserfs_dir_operations = {
20 .llseek = generic_file_llseek,
21 .read = generic_read_dir, 21 .read = generic_read_dir,
22 .readdir = reiserfs_readdir, 22 .readdir = reiserfs_readdir,
23 .fsync = reiserfs_dir_fsync, 23 .fsync = reiserfs_dir_fsync,
@@ -27,10 +27,9 @@ const struct file_operations reiserfs_dir_operations = {
27#endif 27#endif
28}; 28};
29 29
30static int reiserfs_dir_fsync(struct file *filp, struct dentry *dentry, 30static int reiserfs_dir_fsync(struct file *filp, int datasync)
31 int datasync)
32{ 31{
33 struct inode *inode = dentry->d_inode; 32 struct inode *inode = filp->f_mapping->host;
34 int err; 33 int err;
35 reiserfs_write_lock(inode->i_sb); 34 reiserfs_write_lock(inode->i_sb);
36 err = reiserfs_commit_for_inode(inode); 35 err = reiserfs_commit_for_inode(inode);
diff --git a/fs/reiserfs/file.c b/fs/reiserfs/file.c
index 9977df9f3a54..b82cdd8a45dd 100644
--- a/fs/reiserfs/file.c
+++ b/fs/reiserfs/file.c
@@ -134,10 +134,9 @@ static void reiserfs_vfs_truncate_file(struct inode *inode)
134 * be removed... 134 * be removed...
135 */ 135 */
136 136
137static int reiserfs_sync_file(struct file *filp, 137static int reiserfs_sync_file(struct file *filp, int datasync)
138 struct dentry *dentry, int datasync)
139{ 138{
140 struct inode *inode = dentry->d_inode; 139 struct inode *inode = filp->f_mapping->host;
141 int err; 140 int err;
142 int barrier_done; 141 int barrier_done;
143 142
diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c
index 59125fb36d42..9822fa15118b 100644
--- a/fs/reiserfs/super.c
+++ b/fs/reiserfs/super.c
@@ -158,6 +158,7 @@ static int finish_unfinished(struct super_block *s)
158#ifdef CONFIG_QUOTA 158#ifdef CONFIG_QUOTA
159 int i; 159 int i;
160 int ms_active_set; 160 int ms_active_set;
161 int quota_enabled[MAXQUOTAS];
161#endif 162#endif
162 163
163 /* compose key to look for "save" links */ 164 /* compose key to look for "save" links */
@@ -179,8 +180,15 @@ static int finish_unfinished(struct super_block *s)
179 } 180 }
180 /* Turn on quotas so that they are updated correctly */ 181 /* Turn on quotas so that they are updated correctly */
181 for (i = 0; i < MAXQUOTAS; i++) { 182 for (i = 0; i < MAXQUOTAS; i++) {
183 quota_enabled[i] = 1;
182 if (REISERFS_SB(s)->s_qf_names[i]) { 184 if (REISERFS_SB(s)->s_qf_names[i]) {
183 int ret = reiserfs_quota_on_mount(s, i); 185 int ret;
186
187 if (sb_has_quota_active(s, i)) {
188 quota_enabled[i] = 0;
189 continue;
190 }
191 ret = reiserfs_quota_on_mount(s, i);
184 if (ret < 0) 192 if (ret < 0)
185 reiserfs_warning(s, "reiserfs-2500", 193 reiserfs_warning(s, "reiserfs-2500",
186 "cannot turn on journaled " 194 "cannot turn on journaled "
@@ -304,8 +312,8 @@ static int finish_unfinished(struct super_block *s)
304#ifdef CONFIG_QUOTA 312#ifdef CONFIG_QUOTA
305 /* Turn quotas off */ 313 /* Turn quotas off */
306 for (i = 0; i < MAXQUOTAS; i++) { 314 for (i = 0; i < MAXQUOTAS; i++) {
307 if (sb_dqopt(s)->files[i]) 315 if (sb_dqopt(s)->files[i] && quota_enabled[i])
308 vfs_quota_off(s, i, 0); 316 dquot_quota_off(s, i);
309 } 317 }
310 if (ms_active_set) 318 if (ms_active_set)
311 /* Restore the flag back */ 319 /* Restore the flag back */
@@ -466,6 +474,8 @@ static void reiserfs_put_super(struct super_block *s)
466 struct reiserfs_transaction_handle th; 474 struct reiserfs_transaction_handle th;
467 th.t_trans_id = 0; 475 th.t_trans_id = 0;
468 476
477 dquot_disable(s, -1, DQUOT_USAGE_ENABLED | DQUOT_LIMITS_ENABLED);
478
469 reiserfs_write_lock(s); 479 reiserfs_write_lock(s);
470 480
471 if (s->s_dirt) 481 if (s->s_dirt)
@@ -620,7 +630,7 @@ static int reiserfs_acquire_dquot(struct dquot *);
620static int reiserfs_release_dquot(struct dquot *); 630static int reiserfs_release_dquot(struct dquot *);
621static int reiserfs_mark_dquot_dirty(struct dquot *); 631static int reiserfs_mark_dquot_dirty(struct dquot *);
622static int reiserfs_write_info(struct super_block *, int); 632static int reiserfs_write_info(struct super_block *, int);
623static int reiserfs_quota_on(struct super_block *, int, int, char *, int); 633static int reiserfs_quota_on(struct super_block *, int, int, char *);
624 634
625static const struct dquot_operations reiserfs_quota_operations = { 635static const struct dquot_operations reiserfs_quota_operations = {
626 .write_dquot = reiserfs_write_dquot, 636 .write_dquot = reiserfs_write_dquot,
@@ -634,12 +644,12 @@ static const struct dquot_operations reiserfs_quota_operations = {
634 644
635static const struct quotactl_ops reiserfs_qctl_operations = { 645static const struct quotactl_ops reiserfs_qctl_operations = {
636 .quota_on = reiserfs_quota_on, 646 .quota_on = reiserfs_quota_on,
637 .quota_off = vfs_quota_off, 647 .quota_off = dquot_quota_off,
638 .quota_sync = vfs_quota_sync, 648 .quota_sync = dquot_quota_sync,
639 .get_info = vfs_get_dqinfo, 649 .get_info = dquot_get_dqinfo,
640 .set_info = vfs_set_dqinfo, 650 .set_info = dquot_set_dqinfo,
641 .get_dqblk = vfs_get_dqblk, 651 .get_dqblk = dquot_get_dqblk,
642 .set_dqblk = vfs_set_dqblk, 652 .set_dqblk = dquot_set_dqblk,
643}; 653};
644#endif 654#endif
645 655
@@ -1242,6 +1252,11 @@ static int reiserfs_remount(struct super_block *s, int *mount_flags, char *arg)
1242 if (s->s_flags & MS_RDONLY) 1252 if (s->s_flags & MS_RDONLY)
1243 /* it is read-only already */ 1253 /* it is read-only already */
1244 goto out_ok; 1254 goto out_ok;
1255
1256 err = dquot_suspend(s, -1);
1257 if (err < 0)
1258 goto out_err;
1259
1245 /* try to remount file system with read-only permissions */ 1260 /* try to remount file system with read-only permissions */
1246 if (sb_umount_state(rs) == REISERFS_VALID_FS 1261 if (sb_umount_state(rs) == REISERFS_VALID_FS
1247 || REISERFS_SB(s)->s_mount_state != REISERFS_VALID_FS) { 1262 || REISERFS_SB(s)->s_mount_state != REISERFS_VALID_FS) {
@@ -1295,6 +1310,7 @@ static int reiserfs_remount(struct super_block *s, int *mount_flags, char *arg)
1295 s->s_dirt = 0; 1310 s->s_dirt = 0;
1296 1311
1297 if (!(*mount_flags & MS_RDONLY)) { 1312 if (!(*mount_flags & MS_RDONLY)) {
1313 dquot_resume(s, -1);
1298 finish_unfinished(s); 1314 finish_unfinished(s);
1299 reiserfs_xattr_init(s, *mount_flags); 1315 reiserfs_xattr_init(s, *mount_flags);
1300 } 1316 }
@@ -2022,15 +2038,15 @@ static int reiserfs_write_info(struct super_block *sb, int type)
2022 */ 2038 */
2023static int reiserfs_quota_on_mount(struct super_block *sb, int type) 2039static int reiserfs_quota_on_mount(struct super_block *sb, int type)
2024{ 2040{
2025 return vfs_quota_on_mount(sb, REISERFS_SB(sb)->s_qf_names[type], 2041 return dquot_quota_on_mount(sb, REISERFS_SB(sb)->s_qf_names[type],
2026 REISERFS_SB(sb)->s_jquota_fmt, type); 2042 REISERFS_SB(sb)->s_jquota_fmt, type);
2027} 2043}
2028 2044
2029/* 2045/*
2030 * Standard function to be called on quota_on 2046 * Standard function to be called on quota_on
2031 */ 2047 */
2032static int reiserfs_quota_on(struct super_block *sb, int type, int format_id, 2048static int reiserfs_quota_on(struct super_block *sb, int type, int format_id,
2033 char *name, int remount) 2049 char *name)
2034{ 2050{
2035 int err; 2051 int err;
2036 struct path path; 2052 struct path path;
@@ -2039,9 +2055,7 @@ static int reiserfs_quota_on(struct super_block *sb, int type, int format_id,
2039 2055
2040 if (!(REISERFS_SB(sb)->s_mount_opt & (1 << REISERFS_QUOTA))) 2056 if (!(REISERFS_SB(sb)->s_mount_opt & (1 << REISERFS_QUOTA)))
2041 return -EINVAL; 2057 return -EINVAL;
2042 /* No more checks needed? Path and format_id are bogus anyway... */ 2058
2043 if (remount)
2044 return vfs_quota_on(sb, type, format_id, name, 1);
2045 err = kern_path(name, LOOKUP_FOLLOW, &path); 2059 err = kern_path(name, LOOKUP_FOLLOW, &path);
2046 if (err) 2060 if (err)
2047 return err; 2061 return err;
@@ -2085,7 +2099,7 @@ static int reiserfs_quota_on(struct super_block *sb, int type, int format_id,
2085 if (err) 2099 if (err)
2086 goto out; 2100 goto out;
2087 } 2101 }
2088 err = vfs_quota_on_path(sb, type, format_id, &path); 2102 err = dquot_quota_on_path(sb, type, format_id, &path);
2089out: 2103out:
2090 path_put(&path); 2104 path_put(&path);
2091 return err; 2105 return err;
diff --git a/fs/smbfs/dir.c b/fs/smbfs/dir.c
index 6c978428892d..00a70cab1f36 100644
--- a/fs/smbfs/dir.c
+++ b/fs/smbfs/dir.c
@@ -37,6 +37,7 @@ static int smb_link(struct dentry *, struct inode *, struct dentry *);
37 37
38const struct file_operations smb_dir_operations = 38const struct file_operations smb_dir_operations =
39{ 39{
40 .llseek = generic_file_llseek,
40 .read = generic_read_dir, 41 .read = generic_read_dir,
41 .readdir = smb_readdir, 42 .readdir = smb_readdir,
42 .unlocked_ioctl = smb_ioctl, 43 .unlocked_ioctl = smb_ioctl,
diff --git a/fs/smbfs/file.c b/fs/smbfs/file.c
index 84ecf0e43f91..8e187a0f94bb 100644
--- a/fs/smbfs/file.c
+++ b/fs/smbfs/file.c
@@ -28,8 +28,9 @@
28#include "proto.h" 28#include "proto.h"
29 29
30static int 30static int
31smb_fsync(struct file *file, struct dentry * dentry, int datasync) 31smb_fsync(struct file *file, int datasync)
32{ 32{
33 struct dentry *dentry = file->f_path.dentry;
33 struct smb_sb_info *server = server_from_dentry(dentry); 34 struct smb_sb_info *server = server_from_dentry(dentry);
34 int result; 35 int result;
35 36
diff --git a/fs/smbfs/inode.c b/fs/smbfs/inode.c
index dfa1d67f8fca..9551cb6f7fe4 100644
--- a/fs/smbfs/inode.c
+++ b/fs/smbfs/inode.c
@@ -714,7 +714,7 @@ smb_notify_change(struct dentry *dentry, struct iattr *attr)
714 error = server->ops->truncate(inode, attr->ia_size); 714 error = server->ops->truncate(inode, attr->ia_size);
715 if (error) 715 if (error)
716 goto out; 716 goto out;
717 error = vmtruncate(inode, attr->ia_size); 717 error = simple_setsize(inode, attr->ia_size);
718 if (error) 718 if (error)
719 goto out; 719 goto out;
720 refresh = 1; 720 refresh = 1;
diff --git a/fs/squashfs/Kconfig b/fs/squashfs/Kconfig
index 25a00d19d686..cc6ce8a84c21 100644
--- a/fs/squashfs/Kconfig
+++ b/fs/squashfs/Kconfig
@@ -26,6 +26,17 @@ config SQUASHFS
26 26
27 If unsure, say N. 27 If unsure, say N.
28 28
29config SQUASHFS_XATTRS
30 bool "Squashfs XATTR support"
31 depends on SQUASHFS
32 default n
33 help
34 Saying Y here includes support for extended attributes (xattrs).
35 Xattrs are name:value pairs associated with inodes by
36 the kernel or by users (see the attr(5) manual page).
37
38 If unsure, say N.
39
29config SQUASHFS_EMBEDDED 40config SQUASHFS_EMBEDDED
30 41
31 bool "Additional option for memory-constrained systems" 42 bool "Additional option for memory-constrained systems"
diff --git a/fs/squashfs/Makefile b/fs/squashfs/Makefile
index df8a19ef870d..2cee3e9fa452 100644
--- a/fs/squashfs/Makefile
+++ b/fs/squashfs/Makefile
@@ -5,3 +5,5 @@
5obj-$(CONFIG_SQUASHFS) += squashfs.o 5obj-$(CONFIG_SQUASHFS) += squashfs.o
6squashfs-y += block.o cache.o dir.o export.o file.o fragment.o id.o inode.o 6squashfs-y += block.o cache.o dir.o export.o file.o fragment.o id.o inode.o
7squashfs-y += namei.o super.o symlink.o zlib_wrapper.o decompressor.o 7squashfs-y += namei.o super.o symlink.o zlib_wrapper.o decompressor.o
8squashfs-$(CONFIG_SQUASHFS_XATTRS) += xattr.o xattr_id.o
9
diff --git a/fs/squashfs/inode.c b/fs/squashfs/inode.c
index 49daaf669e41..62e63ad25075 100644
--- a/fs/squashfs/inode.c
+++ b/fs/squashfs/inode.c
@@ -40,11 +40,13 @@
40 40
41#include <linux/fs.h> 41#include <linux/fs.h>
42#include <linux/vfs.h> 42#include <linux/vfs.h>
43#include <linux/xattr.h>
43 44
44#include "squashfs_fs.h" 45#include "squashfs_fs.h"
45#include "squashfs_fs_sb.h" 46#include "squashfs_fs_sb.h"
46#include "squashfs_fs_i.h" 47#include "squashfs_fs_i.h"
47#include "squashfs.h" 48#include "squashfs.h"
49#include "xattr.h"
48 50
49/* 51/*
50 * Initialise VFS inode with the base inode information common to all 52 * Initialise VFS inode with the base inode information common to all
@@ -111,6 +113,7 @@ int squashfs_read_inode(struct inode *inode, long long ino)
111 int err, type, offset = SQUASHFS_INODE_OFFSET(ino); 113 int err, type, offset = SQUASHFS_INODE_OFFSET(ino);
112 union squashfs_inode squashfs_ino; 114 union squashfs_inode squashfs_ino;
113 struct squashfs_base_inode *sqshb_ino = &squashfs_ino.base; 115 struct squashfs_base_inode *sqshb_ino = &squashfs_ino.base;
116 int xattr_id = SQUASHFS_INVALID_XATTR;
114 117
115 TRACE("Entered squashfs_read_inode\n"); 118 TRACE("Entered squashfs_read_inode\n");
116 119
@@ -199,8 +202,10 @@ int squashfs_read_inode(struct inode *inode, long long ino)
199 frag_offset = 0; 202 frag_offset = 0;
200 } 203 }
201 204
205 xattr_id = le32_to_cpu(sqsh_ino->xattr);
202 inode->i_nlink = le32_to_cpu(sqsh_ino->nlink); 206 inode->i_nlink = le32_to_cpu(sqsh_ino->nlink);
203 inode->i_size = le64_to_cpu(sqsh_ino->file_size); 207 inode->i_size = le64_to_cpu(sqsh_ino->file_size);
208 inode->i_op = &squashfs_inode_ops;
204 inode->i_fop = &generic_ro_fops; 209 inode->i_fop = &generic_ro_fops;
205 inode->i_mode |= S_IFREG; 210 inode->i_mode |= S_IFREG;
206 inode->i_blocks = ((inode->i_size - 211 inode->i_blocks = ((inode->i_size -
@@ -251,6 +256,7 @@ int squashfs_read_inode(struct inode *inode, long long ino)
251 if (err < 0) 256 if (err < 0)
252 goto failed_read; 257 goto failed_read;
253 258
259 xattr_id = le32_to_cpu(sqsh_ino->xattr);
254 inode->i_nlink = le32_to_cpu(sqsh_ino->nlink); 260 inode->i_nlink = le32_to_cpu(sqsh_ino->nlink);
255 inode->i_size = le32_to_cpu(sqsh_ino->file_size); 261 inode->i_size = le32_to_cpu(sqsh_ino->file_size);
256 inode->i_op = &squashfs_dir_inode_ops; 262 inode->i_op = &squashfs_dir_inode_ops;
@@ -280,21 +286,33 @@ int squashfs_read_inode(struct inode *inode, long long ino)
280 286
281 inode->i_nlink = le32_to_cpu(sqsh_ino->nlink); 287 inode->i_nlink = le32_to_cpu(sqsh_ino->nlink);
282 inode->i_size = le32_to_cpu(sqsh_ino->symlink_size); 288 inode->i_size = le32_to_cpu(sqsh_ino->symlink_size);
283 inode->i_op = &page_symlink_inode_operations; 289 inode->i_op = &squashfs_symlink_inode_ops;
284 inode->i_data.a_ops = &squashfs_symlink_aops; 290 inode->i_data.a_ops = &squashfs_symlink_aops;
285 inode->i_mode |= S_IFLNK; 291 inode->i_mode |= S_IFLNK;
286 squashfs_i(inode)->start = block; 292 squashfs_i(inode)->start = block;
287 squashfs_i(inode)->offset = offset; 293 squashfs_i(inode)->offset = offset;
288 294
295 if (type == SQUASHFS_LSYMLINK_TYPE) {
296 __le32 xattr;
297
298 err = squashfs_read_metadata(sb, NULL, &block,
299 &offset, inode->i_size);
300 if (err < 0)
301 goto failed_read;
302 err = squashfs_read_metadata(sb, &xattr, &block,
303 &offset, sizeof(xattr));
304 if (err < 0)
305 goto failed_read;
306 xattr_id = le32_to_cpu(xattr);
307 }
308
289 TRACE("Symbolic link inode %x:%x, start_block %llx, offset " 309 TRACE("Symbolic link inode %x:%x, start_block %llx, offset "
290 "%x\n", SQUASHFS_INODE_BLK(ino), offset, 310 "%x\n", SQUASHFS_INODE_BLK(ino), offset,
291 block, offset); 311 block, offset);
292 break; 312 break;
293 } 313 }
294 case SQUASHFS_BLKDEV_TYPE: 314 case SQUASHFS_BLKDEV_TYPE:
295 case SQUASHFS_CHRDEV_TYPE: 315 case SQUASHFS_CHRDEV_TYPE: {
296 case SQUASHFS_LBLKDEV_TYPE:
297 case SQUASHFS_LCHRDEV_TYPE: {
298 struct squashfs_dev_inode *sqsh_ino = &squashfs_ino.dev; 316 struct squashfs_dev_inode *sqsh_ino = &squashfs_ino.dev;
299 unsigned int rdev; 317 unsigned int rdev;
300 318
@@ -315,10 +333,32 @@ int squashfs_read_inode(struct inode *inode, long long ino)
315 SQUASHFS_INODE_BLK(ino), offset, rdev); 333 SQUASHFS_INODE_BLK(ino), offset, rdev);
316 break; 334 break;
317 } 335 }
336 case SQUASHFS_LBLKDEV_TYPE:
337 case SQUASHFS_LCHRDEV_TYPE: {
338 struct squashfs_ldev_inode *sqsh_ino = &squashfs_ino.ldev;
339 unsigned int rdev;
340
341 err = squashfs_read_metadata(sb, sqsh_ino, &block, &offset,
342 sizeof(*sqsh_ino));
343 if (err < 0)
344 goto failed_read;
345
346 if (type == SQUASHFS_LCHRDEV_TYPE)
347 inode->i_mode |= S_IFCHR;
348 else
349 inode->i_mode |= S_IFBLK;
350 xattr_id = le32_to_cpu(sqsh_ino->xattr);
351 inode->i_op = &squashfs_inode_ops;
352 inode->i_nlink = le32_to_cpu(sqsh_ino->nlink);
353 rdev = le32_to_cpu(sqsh_ino->rdev);
354 init_special_inode(inode, inode->i_mode, new_decode_dev(rdev));
355
356 TRACE("Device inode %x:%x, rdev %x\n",
357 SQUASHFS_INODE_BLK(ino), offset, rdev);
358 break;
359 }
318 case SQUASHFS_FIFO_TYPE: 360 case SQUASHFS_FIFO_TYPE:
319 case SQUASHFS_SOCKET_TYPE: 361 case SQUASHFS_SOCKET_TYPE: {
320 case SQUASHFS_LFIFO_TYPE:
321 case SQUASHFS_LSOCKET_TYPE: {
322 struct squashfs_ipc_inode *sqsh_ino = &squashfs_ino.ipc; 362 struct squashfs_ipc_inode *sqsh_ino = &squashfs_ino.ipc;
323 363
324 err = squashfs_read_metadata(sb, sqsh_ino, &block, &offset, 364 err = squashfs_read_metadata(sb, sqsh_ino, &block, &offset,
@@ -334,14 +374,52 @@ int squashfs_read_inode(struct inode *inode, long long ino)
334 init_special_inode(inode, inode->i_mode, 0); 374 init_special_inode(inode, inode->i_mode, 0);
335 break; 375 break;
336 } 376 }
377 case SQUASHFS_LFIFO_TYPE:
378 case SQUASHFS_LSOCKET_TYPE: {
379 struct squashfs_lipc_inode *sqsh_ino = &squashfs_ino.lipc;
380
381 err = squashfs_read_metadata(sb, sqsh_ino, &block, &offset,
382 sizeof(*sqsh_ino));
383 if (err < 0)
384 goto failed_read;
385
386 if (type == SQUASHFS_LFIFO_TYPE)
387 inode->i_mode |= S_IFIFO;
388 else
389 inode->i_mode |= S_IFSOCK;
390 xattr_id = le32_to_cpu(sqsh_ino->xattr);
391 inode->i_op = &squashfs_inode_ops;
392 inode->i_nlink = le32_to_cpu(sqsh_ino->nlink);
393 init_special_inode(inode, inode->i_mode, 0);
394 break;
395 }
337 default: 396 default:
338 ERROR("Unknown inode type %d in squashfs_iget!\n", type); 397 ERROR("Unknown inode type %d in squashfs_iget!\n", type);
339 return -EINVAL; 398 return -EINVAL;
340 } 399 }
341 400
401 if (xattr_id != SQUASHFS_INVALID_XATTR && msblk->xattr_id_table) {
402 err = squashfs_xattr_lookup(sb, xattr_id,
403 &squashfs_i(inode)->xattr_count,
404 &squashfs_i(inode)->xattr_size,
405 &squashfs_i(inode)->xattr);
406 if (err < 0)
407 goto failed_read;
408 inode->i_blocks += ((squashfs_i(inode)->xattr_size - 1) >> 9)
409 + 1;
410 } else
411 squashfs_i(inode)->xattr_count = 0;
412
342 return 0; 413 return 0;
343 414
344failed_read: 415failed_read:
345 ERROR("Unable to read inode 0x%llx\n", ino); 416 ERROR("Unable to read inode 0x%llx\n", ino);
346 return err; 417 return err;
347} 418}
419
420
421const struct inode_operations squashfs_inode_ops = {
422 .getxattr = generic_getxattr,
423 .listxattr = squashfs_listxattr
424};
425
diff --git a/fs/squashfs/namei.c b/fs/squashfs/namei.c
index 5266bd8ad932..7a9464d08cf6 100644
--- a/fs/squashfs/namei.c
+++ b/fs/squashfs/namei.c
@@ -57,11 +57,13 @@
57#include <linux/slab.h> 57#include <linux/slab.h>
58#include <linux/string.h> 58#include <linux/string.h>
59#include <linux/dcache.h> 59#include <linux/dcache.h>
60#include <linux/xattr.h>
60 61
61#include "squashfs_fs.h" 62#include "squashfs_fs.h"
62#include "squashfs_fs_sb.h" 63#include "squashfs_fs_sb.h"
63#include "squashfs_fs_i.h" 64#include "squashfs_fs_i.h"
64#include "squashfs.h" 65#include "squashfs.h"
66#include "xattr.h"
65 67
66/* 68/*
67 * Lookup name in the directory index, returning the location of the metadata 69 * Lookup name in the directory index, returning the location of the metadata
@@ -237,5 +239,7 @@ failed:
237 239
238 240
239const struct inode_operations squashfs_dir_inode_ops = { 241const struct inode_operations squashfs_dir_inode_ops = {
240 .lookup = squashfs_lookup 242 .lookup = squashfs_lookup,
243 .getxattr = generic_getxattr,
244 .listxattr = squashfs_listxattr
241}; 245};
diff --git a/fs/squashfs/squashfs.h b/fs/squashfs/squashfs.h
index fe2587af5512..733a17c42945 100644
--- a/fs/squashfs/squashfs.h
+++ b/fs/squashfs/squashfs.h
@@ -73,8 +73,11 @@ extern struct inode *squashfs_iget(struct super_block *, long long,
73 unsigned int); 73 unsigned int);
74extern int squashfs_read_inode(struct inode *, long long); 74extern int squashfs_read_inode(struct inode *, long long);
75 75
76/* xattr.c */
77extern ssize_t squashfs_listxattr(struct dentry *, char *, size_t);
78
76/* 79/*
77 * Inodes, files and decompressor operations 80 * Inodes, files, decompressor and xattr operations
78 */ 81 */
79 82
80/* dir.c */ 83/* dir.c */
@@ -86,11 +89,18 @@ extern const struct export_operations squashfs_export_ops;
86/* file.c */ 89/* file.c */
87extern const struct address_space_operations squashfs_aops; 90extern const struct address_space_operations squashfs_aops;
88 91
92/* inode.c */
93extern const struct inode_operations squashfs_inode_ops;
94
89/* namei.c */ 95/* namei.c */
90extern const struct inode_operations squashfs_dir_inode_ops; 96extern const struct inode_operations squashfs_dir_inode_ops;
91 97
92/* symlink.c */ 98/* symlink.c */
93extern const struct address_space_operations squashfs_symlink_aops; 99extern const struct address_space_operations squashfs_symlink_aops;
100extern const struct inode_operations squashfs_symlink_inode_ops;
101
102/* xattr.c */
103extern const struct xattr_handler *squashfs_xattr_handlers[];
94 104
95/* zlib_wrapper.c */ 105/* zlib_wrapper.c */
96extern const struct squashfs_decompressor squashfs_zlib_comp_ops; 106extern const struct squashfs_decompressor squashfs_zlib_comp_ops;
diff --git a/fs/squashfs/squashfs_fs.h b/fs/squashfs/squashfs_fs.h
index 79024245ea00..8eabb808b78d 100644
--- a/fs/squashfs/squashfs_fs.h
+++ b/fs/squashfs/squashfs_fs.h
@@ -46,6 +46,7 @@
46#define SQUASHFS_NAME_LEN 256 46#define SQUASHFS_NAME_LEN 256
47 47
48#define SQUASHFS_INVALID_FRAG (0xffffffffU) 48#define SQUASHFS_INVALID_FRAG (0xffffffffU)
49#define SQUASHFS_INVALID_XATTR (0xffffffffU)
49#define SQUASHFS_INVALID_BLK (-1LL) 50#define SQUASHFS_INVALID_BLK (-1LL)
50 51
51/* Filesystem flags */ 52/* Filesystem flags */
@@ -96,6 +97,13 @@
96#define SQUASHFS_LFIFO_TYPE 13 97#define SQUASHFS_LFIFO_TYPE 13
97#define SQUASHFS_LSOCKET_TYPE 14 98#define SQUASHFS_LSOCKET_TYPE 14
98 99
100/* Xattr types */
101#define SQUASHFS_XATTR_USER 0
102#define SQUASHFS_XATTR_TRUSTED 1
103#define SQUASHFS_XATTR_SECURITY 2
104#define SQUASHFS_XATTR_VALUE_OOL 256
105#define SQUASHFS_XATTR_PREFIX_MASK 0xff
106
99/* Flag whether block is compressed or uncompressed, bit is set if block is 107/* Flag whether block is compressed or uncompressed, bit is set if block is
100 * uncompressed */ 108 * uncompressed */
101#define SQUASHFS_COMPRESSED_BIT (1 << 15) 109#define SQUASHFS_COMPRESSED_BIT (1 << 15)
@@ -174,6 +182,24 @@
174 182
175#define SQUASHFS_ID_BLOCK_BYTES(A) (SQUASHFS_ID_BLOCKS(A) *\ 183#define SQUASHFS_ID_BLOCK_BYTES(A) (SQUASHFS_ID_BLOCKS(A) *\
176 sizeof(u64)) 184 sizeof(u64))
185/* xattr id lookup table defines */
186#define SQUASHFS_XATTR_BYTES(A) ((A) * sizeof(struct squashfs_xattr_id))
187
188#define SQUASHFS_XATTR_BLOCK(A) (SQUASHFS_XATTR_BYTES(A) / \
189 SQUASHFS_METADATA_SIZE)
190
191#define SQUASHFS_XATTR_BLOCK_OFFSET(A) (SQUASHFS_XATTR_BYTES(A) % \
192 SQUASHFS_METADATA_SIZE)
193
194#define SQUASHFS_XATTR_BLOCKS(A) ((SQUASHFS_XATTR_BYTES(A) + \
195 SQUASHFS_METADATA_SIZE - 1) / \
196 SQUASHFS_METADATA_SIZE)
197
198#define SQUASHFS_XATTR_BLOCK_BYTES(A) (SQUASHFS_XATTR_BLOCKS(A) *\
199 sizeof(u64))
200#define SQUASHFS_XATTR_BLK(A) ((unsigned int) ((A) >> 16))
201
202#define SQUASHFS_XATTR_OFFSET(A) ((unsigned int) ((A) & 0xffff))
177 203
178/* cached data constants for filesystem */ 204/* cached data constants for filesystem */
179#define SQUASHFS_CACHED_BLKS 8 205#define SQUASHFS_CACHED_BLKS 8
@@ -228,7 +254,7 @@ struct squashfs_super_block {
228 __le64 root_inode; 254 __le64 root_inode;
229 __le64 bytes_used; 255 __le64 bytes_used;
230 __le64 id_table_start; 256 __le64 id_table_start;
231 __le64 xattr_table_start; 257 __le64 xattr_id_table_start;
232 __le64 inode_table_start; 258 __le64 inode_table_start;
233 __le64 directory_table_start; 259 __le64 directory_table_start;
234 __le64 fragment_table_start; 260 __le64 fragment_table_start;
@@ -261,6 +287,17 @@ struct squashfs_ipc_inode {
261 __le32 nlink; 287 __le32 nlink;
262}; 288};
263 289
290struct squashfs_lipc_inode {
291 __le16 inode_type;
292 __le16 mode;
293 __le16 uid;
294 __le16 guid;
295 __le32 mtime;
296 __le32 inode_number;
297 __le32 nlink;
298 __le32 xattr;
299};
300
264struct squashfs_dev_inode { 301struct squashfs_dev_inode {
265 __le16 inode_type; 302 __le16 inode_type;
266 __le16 mode; 303 __le16 mode;
@@ -272,6 +309,18 @@ struct squashfs_dev_inode {
272 __le32 rdev; 309 __le32 rdev;
273}; 310};
274 311
312struct squashfs_ldev_inode {
313 __le16 inode_type;
314 __le16 mode;
315 __le16 uid;
316 __le16 guid;
317 __le32 mtime;
318 __le32 inode_number;
319 __le32 nlink;
320 __le32 rdev;
321 __le32 xattr;
322};
323
275struct squashfs_symlink_inode { 324struct squashfs_symlink_inode {
276 __le16 inode_type; 325 __le16 inode_type;
277 __le16 mode; 326 __le16 mode;
@@ -349,12 +398,14 @@ struct squashfs_ldir_inode {
349union squashfs_inode { 398union squashfs_inode {
350 struct squashfs_base_inode base; 399 struct squashfs_base_inode base;
351 struct squashfs_dev_inode dev; 400 struct squashfs_dev_inode dev;
401 struct squashfs_ldev_inode ldev;
352 struct squashfs_symlink_inode symlink; 402 struct squashfs_symlink_inode symlink;
353 struct squashfs_reg_inode reg; 403 struct squashfs_reg_inode reg;
354 struct squashfs_lreg_inode lreg; 404 struct squashfs_lreg_inode lreg;
355 struct squashfs_dir_inode dir; 405 struct squashfs_dir_inode dir;
356 struct squashfs_ldir_inode ldir; 406 struct squashfs_ldir_inode ldir;
357 struct squashfs_ipc_inode ipc; 407 struct squashfs_ipc_inode ipc;
408 struct squashfs_lipc_inode lipc;
358}; 409};
359 410
360struct squashfs_dir_entry { 411struct squashfs_dir_entry {
@@ -377,4 +428,27 @@ struct squashfs_fragment_entry {
377 unsigned int unused; 428 unsigned int unused;
378}; 429};
379 430
431struct squashfs_xattr_entry {
432 __le16 type;
433 __le16 size;
434 char data[0];
435};
436
437struct squashfs_xattr_val {
438 __le32 vsize;
439 char value[0];
440};
441
442struct squashfs_xattr_id {
443 __le64 xattr;
444 __le32 count;
445 __le32 size;
446};
447
448struct squashfs_xattr_id_table {
449 __le64 xattr_table_start;
450 __le32 xattr_ids;
451 __le32 unused;
452};
453
380#endif 454#endif
diff --git a/fs/squashfs/squashfs_fs_i.h b/fs/squashfs/squashfs_fs_i.h
index fbfca30c0c68..d3e3a37f28a1 100644
--- a/fs/squashfs/squashfs_fs_i.h
+++ b/fs/squashfs/squashfs_fs_i.h
@@ -26,6 +26,9 @@
26struct squashfs_inode_info { 26struct squashfs_inode_info {
27 u64 start; 27 u64 start;
28 int offset; 28 int offset;
29 u64 xattr;
30 unsigned int xattr_size;
31 int xattr_count;
29 union { 32 union {
30 struct { 33 struct {
31 u64 fragment_block; 34 u64 fragment_block;
diff --git a/fs/squashfs/squashfs_fs_sb.h b/fs/squashfs/squashfs_fs_sb.h
index 2e77dc547e25..d9037a5215f0 100644
--- a/fs/squashfs/squashfs_fs_sb.h
+++ b/fs/squashfs/squashfs_fs_sb.h
@@ -61,6 +61,7 @@ struct squashfs_sb_info {
61 int next_meta_index; 61 int next_meta_index;
62 __le64 *id_table; 62 __le64 *id_table;
63 __le64 *fragment_index; 63 __le64 *fragment_index;
64 __le64 *xattr_id_table;
64 struct mutex read_data_mutex; 65 struct mutex read_data_mutex;
65 struct mutex meta_index_mutex; 66 struct mutex meta_index_mutex;
66 struct meta_index *meta_index; 67 struct meta_index *meta_index;
@@ -68,9 +69,11 @@ struct squashfs_sb_info {
68 __le64 *inode_lookup_table; 69 __le64 *inode_lookup_table;
69 u64 inode_table; 70 u64 inode_table;
70 u64 directory_table; 71 u64 directory_table;
72 u64 xattr_table;
71 unsigned int block_size; 73 unsigned int block_size;
72 unsigned short block_log; 74 unsigned short block_log;
73 long long bytes_used; 75 long long bytes_used;
74 unsigned int inodes; 76 unsigned int inodes;
77 int xattr_ids;
75}; 78};
76#endif 79#endif
diff --git a/fs/squashfs/super.c b/fs/squashfs/super.c
index 48b6f4a385a6..88b4f8606652 100644
--- a/fs/squashfs/super.c
+++ b/fs/squashfs/super.c
@@ -36,12 +36,14 @@
36#include <linux/init.h> 36#include <linux/init.h>
37#include <linux/module.h> 37#include <linux/module.h>
38#include <linux/magic.h> 38#include <linux/magic.h>
39#include <linux/xattr.h>
39 40
40#include "squashfs_fs.h" 41#include "squashfs_fs.h"
41#include "squashfs_fs_sb.h" 42#include "squashfs_fs_sb.h"
42#include "squashfs_fs_i.h" 43#include "squashfs_fs_i.h"
43#include "squashfs.h" 44#include "squashfs.h"
44#include "decompressor.h" 45#include "decompressor.h"
46#include "xattr.h"
45 47
46static struct file_system_type squashfs_fs_type; 48static struct file_system_type squashfs_fs_type;
47static const struct super_operations squashfs_super_ops; 49static const struct super_operations squashfs_super_ops;
@@ -82,7 +84,7 @@ static int squashfs_fill_super(struct super_block *sb, void *data, int silent)
82 long long root_inode; 84 long long root_inode;
83 unsigned short flags; 85 unsigned short flags;
84 unsigned int fragments; 86 unsigned int fragments;
85 u64 lookup_table_start; 87 u64 lookup_table_start, xattr_id_table_start;
86 int err; 88 int err;
87 89
88 TRACE("Entered squashfs_fill_superblock\n"); 90 TRACE("Entered squashfs_fill_superblock\n");
@@ -139,13 +141,6 @@ static int squashfs_fill_super(struct super_block *sb, void *data, int silent)
139 if (msblk->decompressor == NULL) 141 if (msblk->decompressor == NULL)
140 goto failed_mount; 142 goto failed_mount;
141 143
142 /*
143 * Check if there's xattrs in the filesystem. These are not
144 * supported in this version, so warn that they will be ignored.
145 */
146 if (le64_to_cpu(sblk->xattr_table_start) != SQUASHFS_INVALID_BLK)
147 ERROR("Xattrs in filesystem, these will be ignored\n");
148
149 /* Check the filesystem does not extend beyond the end of the 144 /* Check the filesystem does not extend beyond the end of the
150 block device */ 145 block device */
151 msblk->bytes_used = le64_to_cpu(sblk->bytes_used); 146 msblk->bytes_used = le64_to_cpu(sblk->bytes_used);
@@ -253,7 +248,7 @@ static int squashfs_fill_super(struct super_block *sb, void *data, int silent)
253allocate_lookup_table: 248allocate_lookup_table:
254 lookup_table_start = le64_to_cpu(sblk->lookup_table_start); 249 lookup_table_start = le64_to_cpu(sblk->lookup_table_start);
255 if (lookup_table_start == SQUASHFS_INVALID_BLK) 250 if (lookup_table_start == SQUASHFS_INVALID_BLK)
256 goto allocate_root; 251 goto allocate_xattr_table;
257 252
258 /* Allocate and read inode lookup table */ 253 /* Allocate and read inode lookup table */
259 msblk->inode_lookup_table = squashfs_read_inode_lookup_table(sb, 254 msblk->inode_lookup_table = squashfs_read_inode_lookup_table(sb,
@@ -266,6 +261,21 @@ allocate_lookup_table:
266 261
267 sb->s_export_op = &squashfs_export_ops; 262 sb->s_export_op = &squashfs_export_ops;
268 263
264allocate_xattr_table:
265 sb->s_xattr = squashfs_xattr_handlers;
266 xattr_id_table_start = le64_to_cpu(sblk->xattr_id_table_start);
267 if (xattr_id_table_start == SQUASHFS_INVALID_BLK)
268 goto allocate_root;
269
270 /* Allocate and read xattr id lookup table */
271 msblk->xattr_id_table = squashfs_read_xattr_id_table(sb,
272 xattr_id_table_start, &msblk->xattr_table, &msblk->xattr_ids);
273 if (IS_ERR(msblk->xattr_id_table)) {
274 err = PTR_ERR(msblk->xattr_id_table);
275 msblk->xattr_id_table = NULL;
276 if (err != -ENOTSUPP)
277 goto failed_mount;
278 }
269allocate_root: 279allocate_root:
270 root = new_inode(sb); 280 root = new_inode(sb);
271 if (!root) { 281 if (!root) {
@@ -301,6 +311,7 @@ failed_mount:
301 kfree(msblk->inode_lookup_table); 311 kfree(msblk->inode_lookup_table);
302 kfree(msblk->fragment_index); 312 kfree(msblk->fragment_index);
303 kfree(msblk->id_table); 313 kfree(msblk->id_table);
314 kfree(msblk->xattr_id_table);
304 kfree(sb->s_fs_info); 315 kfree(sb->s_fs_info);
305 sb->s_fs_info = NULL; 316 sb->s_fs_info = NULL;
306 kfree(sblk); 317 kfree(sblk);
@@ -355,6 +366,7 @@ static void squashfs_put_super(struct super_block *sb)
355 kfree(sbi->fragment_index); 366 kfree(sbi->fragment_index);
356 kfree(sbi->meta_index); 367 kfree(sbi->meta_index);
357 kfree(sbi->inode_lookup_table); 368 kfree(sbi->inode_lookup_table);
369 kfree(sbi->xattr_id_table);
358 kfree(sb->s_fs_info); 370 kfree(sb->s_fs_info);
359 sb->s_fs_info = NULL; 371 sb->s_fs_info = NULL;
360 } 372 }
diff --git a/fs/squashfs/symlink.c b/fs/squashfs/symlink.c
index 32b911f4ee39..ec86434921e1 100644
--- a/fs/squashfs/symlink.c
+++ b/fs/squashfs/symlink.c
@@ -35,11 +35,13 @@
35#include <linux/kernel.h> 35#include <linux/kernel.h>
36#include <linux/string.h> 36#include <linux/string.h>
37#include <linux/pagemap.h> 37#include <linux/pagemap.h>
38#include <linux/xattr.h>
38 39
39#include "squashfs_fs.h" 40#include "squashfs_fs.h"
40#include "squashfs_fs_sb.h" 41#include "squashfs_fs_sb.h"
41#include "squashfs_fs_i.h" 42#include "squashfs_fs_i.h"
42#include "squashfs.h" 43#include "squashfs.h"
44#include "xattr.h"
43 45
44static int squashfs_symlink_readpage(struct file *file, struct page *page) 46static int squashfs_symlink_readpage(struct file *file, struct page *page)
45{ 47{
@@ -114,3 +116,12 @@ error_out:
114const struct address_space_operations squashfs_symlink_aops = { 116const struct address_space_operations squashfs_symlink_aops = {
115 .readpage = squashfs_symlink_readpage 117 .readpage = squashfs_symlink_readpage
116}; 118};
119
120const struct inode_operations squashfs_symlink_inode_ops = {
121 .readlink = generic_readlink,
122 .follow_link = page_follow_link_light,
123 .put_link = page_put_link,
124 .getxattr = generic_getxattr,
125 .listxattr = squashfs_listxattr
126};
127
diff --git a/fs/squashfs/xattr.c b/fs/squashfs/xattr.c
new file mode 100644
index 000000000000..c7655e8b31cd
--- /dev/null
+++ b/fs/squashfs/xattr.c
@@ -0,0 +1,323 @@
1/*
2 * Squashfs - a compressed read only filesystem for Linux
3 *
4 * Copyright (c) 2010
5 * Phillip Lougher <phillip@lougher.demon.co.uk>
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version 2,
10 * or (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
20 *
21 * xattr_id.c
22 */
23
24#include <linux/init.h>
25#include <linux/module.h>
26#include <linux/string.h>
27#include <linux/fs.h>
28#include <linux/vfs.h>
29#include <linux/xattr.h>
30#include <linux/slab.h>
31
32#include "squashfs_fs.h"
33#include "squashfs_fs_sb.h"
34#include "squashfs_fs_i.h"
35#include "squashfs.h"
36
37static const struct xattr_handler *squashfs_xattr_handler(int);
38
39ssize_t squashfs_listxattr(struct dentry *d, char *buffer,
40 size_t buffer_size)
41{
42 struct inode *inode = d->d_inode;
43 struct super_block *sb = inode->i_sb;
44 struct squashfs_sb_info *msblk = sb->s_fs_info;
45 u64 start = SQUASHFS_XATTR_BLK(squashfs_i(inode)->xattr)
46 + msblk->xattr_table;
47 int offset = SQUASHFS_XATTR_OFFSET(squashfs_i(inode)->xattr);
48 int count = squashfs_i(inode)->xattr_count;
49 size_t rest = buffer_size;
50 int err;
51
52 /* check that the file system has xattrs */
53 if (msblk->xattr_id_table == NULL)
54 return -EOPNOTSUPP;
55
56 /* loop reading each xattr name */
57 while (count--) {
58 struct squashfs_xattr_entry entry;
59 struct squashfs_xattr_val val;
60 const struct xattr_handler *handler;
61 int name_size, prefix_size = 0;
62
63 err = squashfs_read_metadata(sb, &entry, &start, &offset,
64 sizeof(entry));
65 if (err < 0)
66 goto failed;
67
68 name_size = le16_to_cpu(entry.size);
69 handler = squashfs_xattr_handler(le16_to_cpu(entry.type));
70 if (handler)
71 prefix_size = handler->list(d, buffer, rest, NULL,
72 name_size, handler->flags);
73 if (prefix_size) {
74 if (buffer) {
75 if (prefix_size + name_size + 1 > rest) {
76 err = -ERANGE;
77 goto failed;
78 }
79 buffer += prefix_size;
80 }
81 err = squashfs_read_metadata(sb, buffer, &start,
82 &offset, name_size);
83 if (err < 0)
84 goto failed;
85 if (buffer) {
86 buffer[name_size] = '\0';
87 buffer += name_size + 1;
88 }
89 rest -= prefix_size + name_size + 1;
90 } else {
91 /* no handler or insuffficient privileges, so skip */
92 err = squashfs_read_metadata(sb, NULL, &start,
93 &offset, name_size);
94 if (err < 0)
95 goto failed;
96 }
97
98
99 /* skip remaining xattr entry */
100 err = squashfs_read_metadata(sb, &val, &start, &offset,
101 sizeof(val));
102 if (err < 0)
103 goto failed;
104
105 err = squashfs_read_metadata(sb, NULL, &start, &offset,
106 le32_to_cpu(val.vsize));
107 if (err < 0)
108 goto failed;
109 }
110 err = buffer_size - rest;
111
112failed:
113 return err;
114}
115
116
117static int squashfs_xattr_get(struct inode *inode, int name_index,
118 const char *name, void *buffer, size_t buffer_size)
119{
120 struct super_block *sb = inode->i_sb;
121 struct squashfs_sb_info *msblk = sb->s_fs_info;
122 u64 start = SQUASHFS_XATTR_BLK(squashfs_i(inode)->xattr)
123 + msblk->xattr_table;
124 int offset = SQUASHFS_XATTR_OFFSET(squashfs_i(inode)->xattr);
125 int count = squashfs_i(inode)->xattr_count;
126 int name_len = strlen(name);
127 int err, vsize;
128 char *target = kmalloc(name_len, GFP_KERNEL);
129
130 if (target == NULL)
131 return -ENOMEM;
132
133 /* loop reading each xattr name */
134 for (; count; count--) {
135 struct squashfs_xattr_entry entry;
136 struct squashfs_xattr_val val;
137 int type, prefix, name_size;
138
139 err = squashfs_read_metadata(sb, &entry, &start, &offset,
140 sizeof(entry));
141 if (err < 0)
142 goto failed;
143
144 name_size = le16_to_cpu(entry.size);
145 type = le16_to_cpu(entry.type);
146 prefix = type & SQUASHFS_XATTR_PREFIX_MASK;
147
148 if (prefix == name_index && name_size == name_len)
149 err = squashfs_read_metadata(sb, target, &start,
150 &offset, name_size);
151 else
152 err = squashfs_read_metadata(sb, NULL, &start,
153 &offset, name_size);
154 if (err < 0)
155 goto failed;
156
157 if (prefix == name_index && name_size == name_len &&
158 strncmp(target, name, name_size) == 0) {
159 /* found xattr */
160 if (type & SQUASHFS_XATTR_VALUE_OOL) {
161 __le64 xattr;
162 /* val is a reference to the real location */
163 err = squashfs_read_metadata(sb, &val, &start,
164 &offset, sizeof(val));
165 if (err < 0)
166 goto failed;
167 err = squashfs_read_metadata(sb, &xattr, &start,
168 &offset, sizeof(xattr));
169 if (err < 0)
170 goto failed;
171 xattr = le64_to_cpu(xattr);
172 start = SQUASHFS_XATTR_BLK(xattr) +
173 msblk->xattr_table;
174 offset = SQUASHFS_XATTR_OFFSET(xattr);
175 }
176 /* read xattr value */
177 err = squashfs_read_metadata(sb, &val, &start, &offset,
178 sizeof(val));
179 if (err < 0)
180 goto failed;
181
182 vsize = le32_to_cpu(val.vsize);
183 if (buffer) {
184 if (vsize > buffer_size) {
185 err = -ERANGE;
186 goto failed;
187 }
188 err = squashfs_read_metadata(sb, buffer, &start,
189 &offset, vsize);
190 if (err < 0)
191 goto failed;
192 }
193 break;
194 }
195
196 /* no match, skip remaining xattr entry */
197 err = squashfs_read_metadata(sb, &val, &start, &offset,
198 sizeof(val));
199 if (err < 0)
200 goto failed;
201 err = squashfs_read_metadata(sb, NULL, &start, &offset,
202 le32_to_cpu(val.vsize));
203 if (err < 0)
204 goto failed;
205 }
206 err = count ? vsize : -ENODATA;
207
208failed:
209 kfree(target);
210 return err;
211}
212
213
214/*
215 * User namespace support
216 */
217static size_t squashfs_user_list(struct dentry *d, char *list, size_t list_size,
218 const char *name, size_t name_len, int type)
219{
220 if (list && XATTR_USER_PREFIX_LEN <= list_size)
221 memcpy(list, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN);
222 return XATTR_USER_PREFIX_LEN;
223}
224
225static int squashfs_user_get(struct dentry *d, const char *name, void *buffer,
226 size_t size, int type)
227{
228 if (name[0] == '\0')
229 return -EINVAL;
230
231 return squashfs_xattr_get(d->d_inode, SQUASHFS_XATTR_USER, name,
232 buffer, size);
233}
234
235static const struct xattr_handler squashfs_xattr_user_handler = {
236 .prefix = XATTR_USER_PREFIX,
237 .list = squashfs_user_list,
238 .get = squashfs_user_get
239};
240
241/*
242 * Trusted namespace support
243 */
244static size_t squashfs_trusted_list(struct dentry *d, char *list,
245 size_t list_size, const char *name, size_t name_len, int type)
246{
247 if (!capable(CAP_SYS_ADMIN))
248 return 0;
249
250 if (list && XATTR_TRUSTED_PREFIX_LEN <= list_size)
251 memcpy(list, XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN);
252 return XATTR_TRUSTED_PREFIX_LEN;
253}
254
255static int squashfs_trusted_get(struct dentry *d, const char *name,
256 void *buffer, size_t size, int type)
257{
258 if (name[0] == '\0')
259 return -EINVAL;
260
261 return squashfs_xattr_get(d->d_inode, SQUASHFS_XATTR_TRUSTED, name,
262 buffer, size);
263}
264
265static const struct xattr_handler squashfs_xattr_trusted_handler = {
266 .prefix = XATTR_TRUSTED_PREFIX,
267 .list = squashfs_trusted_list,
268 .get = squashfs_trusted_get
269};
270
271/*
272 * Security namespace support
273 */
274static size_t squashfs_security_list(struct dentry *d, char *list,
275 size_t list_size, const char *name, size_t name_len, int type)
276{
277 if (list && XATTR_SECURITY_PREFIX_LEN <= list_size)
278 memcpy(list, XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN);
279 return XATTR_SECURITY_PREFIX_LEN;
280}
281
282static int squashfs_security_get(struct dentry *d, const char *name,
283 void *buffer, size_t size, int type)
284{
285 if (name[0] == '\0')
286 return -EINVAL;
287
288 return squashfs_xattr_get(d->d_inode, SQUASHFS_XATTR_SECURITY, name,
289 buffer, size);
290}
291
292static const struct xattr_handler squashfs_xattr_security_handler = {
293 .prefix = XATTR_SECURITY_PREFIX,
294 .list = squashfs_security_list,
295 .get = squashfs_security_get
296};
297
298static inline const struct xattr_handler *squashfs_xattr_handler(int type)
299{
300 if (type & ~(SQUASHFS_XATTR_PREFIX_MASK | SQUASHFS_XATTR_VALUE_OOL))
301 /* ignore unrecognised type */
302 return NULL;
303
304 switch (type & SQUASHFS_XATTR_PREFIX_MASK) {
305 case SQUASHFS_XATTR_USER:
306 return &squashfs_xattr_user_handler;
307 case SQUASHFS_XATTR_TRUSTED:
308 return &squashfs_xattr_trusted_handler;
309 case SQUASHFS_XATTR_SECURITY:
310 return &squashfs_xattr_security_handler;
311 default:
312 /* ignore unrecognised type */
313 return NULL;
314 }
315}
316
317const struct xattr_handler *squashfs_xattr_handlers[] = {
318 &squashfs_xattr_user_handler,
319 &squashfs_xattr_trusted_handler,
320 &squashfs_xattr_security_handler,
321 NULL
322};
323
diff --git a/fs/squashfs/xattr.h b/fs/squashfs/xattr.h
new file mode 100644
index 000000000000..9da071ae181c
--- /dev/null
+++ b/fs/squashfs/xattr.h
@@ -0,0 +1,46 @@
1/*
2 * Squashfs - a compressed read only filesystem for Linux
3 *
4 * Copyright (c) 2010
5 * Phillip Lougher <phillip@lougher.demon.co.uk>
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version 2,
10 * or (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
20 *
21 * xattr.h
22 */
23
24#ifdef CONFIG_SQUASHFS_XATTRS
25extern __le64 *squashfs_read_xattr_id_table(struct super_block *, u64,
26 u64 *, int *);
27extern int squashfs_xattr_lookup(struct super_block *, unsigned int, int *,
28 int *, unsigned long long *);
29#else
30static inline __le64 *squashfs_read_xattr_id_table(struct super_block *sb,
31 u64 start, u64 *xattr_table_start, int *xattr_ids)
32{
33 ERROR("Xattrs in filesystem, these will be ignored\n");
34 return ERR_PTR(-ENOTSUPP);
35}
36
37static inline int squashfs_xattr_lookup(struct super_block *sb,
38 unsigned int index, int *count, int *size,
39 unsigned long long *xattr)
40{
41 return 0;
42}
43#define squashfs_listxattr NULL
44#define generic_getxattr NULL
45#define squashfs_xattr_handlers NULL
46#endif
diff --git a/fs/squashfs/xattr_id.c b/fs/squashfs/xattr_id.c
new file mode 100644
index 000000000000..cfb41106098f
--- /dev/null
+++ b/fs/squashfs/xattr_id.c
@@ -0,0 +1,100 @@
1/*
2 * Squashfs - a compressed read only filesystem for Linux
3 *
4 * Copyright (c) 2010
5 * Phillip Lougher <phillip@lougher.demon.co.uk>
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version 2,
10 * or (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
20 *
21 * xattr_id.c
22 */
23
24/*
25 * This file implements code to map the 32-bit xattr id stored in the inode
26 * into the on disk location of the xattr data.
27 */
28
29#include <linux/fs.h>
30#include <linux/vfs.h>
31#include <linux/slab.h>
32
33#include "squashfs_fs.h"
34#include "squashfs_fs_sb.h"
35#include "squashfs_fs_i.h"
36#include "squashfs.h"
37
38/*
39 * Map xattr id using the xattr id look up table
40 */
41int squashfs_xattr_lookup(struct super_block *sb, unsigned int index,
42 int *count, unsigned int *size, unsigned long long *xattr)
43{
44 struct squashfs_sb_info *msblk = sb->s_fs_info;
45 int block = SQUASHFS_XATTR_BLOCK(index);
46 int offset = SQUASHFS_XATTR_BLOCK_OFFSET(index);
47 u64 start_block = le64_to_cpu(msblk->xattr_id_table[block]);
48 struct squashfs_xattr_id id;
49 int err;
50
51 err = squashfs_read_metadata(sb, &id, &start_block, &offset,
52 sizeof(id));
53 if (err < 0)
54 return err;
55
56 *xattr = le64_to_cpu(id.xattr);
57 *size = le32_to_cpu(id.size);
58 *count = le32_to_cpu(id.count);
59 return 0;
60}
61
62
63/*
64 * Read uncompressed xattr id lookup table indexes from disk into memory
65 */
66__le64 *squashfs_read_xattr_id_table(struct super_block *sb, u64 start,
67 u64 *xattr_table_start, int *xattr_ids)
68{
69 unsigned int len;
70 __le64 *xid_table;
71 struct squashfs_xattr_id_table id_table;
72 int err;
73
74 err = squashfs_read_table(sb, &id_table, start, sizeof(id_table));
75 if (err < 0) {
76 ERROR("unable to read xattr id table\n");
77 return ERR_PTR(err);
78 }
79 *xattr_table_start = le64_to_cpu(id_table.xattr_table_start);
80 *xattr_ids = le32_to_cpu(id_table.xattr_ids);
81 len = SQUASHFS_XATTR_BLOCK_BYTES(*xattr_ids);
82
83 TRACE("In read_xattr_index_table, length %d\n", len);
84
85 /* Allocate xattr id lookup table indexes */
86 xid_table = kmalloc(len, GFP_KERNEL);
87 if (xid_table == NULL) {
88 ERROR("Failed to allocate xattr id index table\n");
89 return ERR_PTR(-ENOMEM);
90 }
91
92 err = squashfs_read_table(sb, xid_table, start + sizeof(id_table), len);
93 if (err < 0) {
94 ERROR("unable to read xattr id index table\n");
95 kfree(xid_table);
96 return ERR_PTR(err);
97 }
98
99 return xid_table;
100}
diff --git a/fs/super.c b/fs/super.c
index 69688b15f1fa..5c35bc7a499e 100644
--- a/fs/super.c
+++ b/fs/super.c
@@ -24,7 +24,6 @@
24#include <linux/slab.h> 24#include <linux/slab.h>
25#include <linux/acct.h> 25#include <linux/acct.h>
26#include <linux/blkdev.h> 26#include <linux/blkdev.h>
27#include <linux/quotaops.h>
28#include <linux/mount.h> 27#include <linux/mount.h>
29#include <linux/security.h> 28#include <linux/security.h>
30#include <linux/writeback.h> /* for the emergency remount stuff */ 29#include <linux/writeback.h> /* for the emergency remount stuff */
@@ -94,8 +93,6 @@ static struct super_block *alloc_super(struct file_system_type *type)
94 init_rwsem(&s->s_dquot.dqptr_sem); 93 init_rwsem(&s->s_dquot.dqptr_sem);
95 init_waitqueue_head(&s->s_wait_unfrozen); 94 init_waitqueue_head(&s->s_wait_unfrozen);
96 s->s_maxbytes = MAX_NON_LFS; 95 s->s_maxbytes = MAX_NON_LFS;
97 s->dq_op = sb_dquot_ops;
98 s->s_qcop = sb_quotactl_ops;
99 s->s_op = &default_op; 96 s->s_op = &default_op;
100 s->s_time_gran = 1000000000; 97 s->s_time_gran = 1000000000;
101 } 98 }
@@ -160,7 +157,6 @@ void deactivate_locked_super(struct super_block *s)
160{ 157{
161 struct file_system_type *fs = s->s_type; 158 struct file_system_type *fs = s->s_type;
162 if (atomic_dec_and_test(&s->s_active)) { 159 if (atomic_dec_and_test(&s->s_active)) {
163 vfs_dq_off(s, 0);
164 fs->kill_sb(s); 160 fs->kill_sb(s);
165 put_filesystem(fs); 161 put_filesystem(fs);
166 put_super(s); 162 put_super(s);
@@ -524,7 +520,7 @@ rescan:
524int do_remount_sb(struct super_block *sb, int flags, void *data, int force) 520int do_remount_sb(struct super_block *sb, int flags, void *data, int force)
525{ 521{
526 int retval; 522 int retval;
527 int remount_rw, remount_ro; 523 int remount_ro;
528 524
529 if (sb->s_frozen != SB_UNFROZEN) 525 if (sb->s_frozen != SB_UNFROZEN)
530 return -EBUSY; 526 return -EBUSY;
@@ -540,7 +536,6 @@ int do_remount_sb(struct super_block *sb, int flags, void *data, int force)
540 sync_filesystem(sb); 536 sync_filesystem(sb);
541 537
542 remount_ro = (flags & MS_RDONLY) && !(sb->s_flags & MS_RDONLY); 538 remount_ro = (flags & MS_RDONLY) && !(sb->s_flags & MS_RDONLY);
543 remount_rw = !(flags & MS_RDONLY) && (sb->s_flags & MS_RDONLY);
544 539
545 /* If we are remounting RDONLY and current sb is read/write, 540 /* If we are remounting RDONLY and current sb is read/write,
546 make sure there are no rw files opened */ 541 make sure there are no rw files opened */
@@ -549,9 +544,6 @@ int do_remount_sb(struct super_block *sb, int flags, void *data, int force)
549 mark_files_ro(sb); 544 mark_files_ro(sb);
550 else if (!fs_may_remount_ro(sb)) 545 else if (!fs_may_remount_ro(sb))
551 return -EBUSY; 546 return -EBUSY;
552 retval = vfs_dq_off(sb, 1);
553 if (retval < 0 && retval != -ENOSYS)
554 return -EBUSY;
555 } 547 }
556 548
557 if (sb->s_op->remount_fs) { 549 if (sb->s_op->remount_fs) {
@@ -560,8 +552,7 @@ int do_remount_sb(struct super_block *sb, int flags, void *data, int force)
560 return retval; 552 return retval;
561 } 553 }
562 sb->s_flags = (sb->s_flags & ~MS_RMT_MASK) | (flags & MS_RMT_MASK); 554 sb->s_flags = (sb->s_flags & ~MS_RMT_MASK) | (flags & MS_RMT_MASK);
563 if (remount_rw) 555
564 vfs_dq_quota_on_remount(sb);
565 /* 556 /*
566 * Some filesystems modify their metadata via some other path than the 557 * Some filesystems modify their metadata via some other path than the
567 * bdev buffer cache (eg. use a private mapping, or directories in 558 * bdev buffer cache (eg. use a private mapping, or directories in
@@ -946,8 +937,8 @@ out:
946EXPORT_SYMBOL_GPL(vfs_kern_mount); 937EXPORT_SYMBOL_GPL(vfs_kern_mount);
947 938
948/** 939/**
949 * freeze_super -- lock the filesystem and force it into a consistent state 940 * freeze_super - lock the filesystem and force it into a consistent state
950 * @super: the super to lock 941 * @sb: the super to lock
951 * 942 *
952 * Syncs the super to make sure the filesystem is consistent and calls the fs's 943 * Syncs the super to make sure the filesystem is consistent and calls the fs's
953 * freeze_fs. Subsequent calls to this without first thawing the fs will return 944 * freeze_fs. Subsequent calls to this without first thawing the fs will return
diff --git a/fs/sync.c b/fs/sync.c
index e8cbd415e50a..c9f83f480ec5 100644
--- a/fs/sync.c
+++ b/fs/sync.c
@@ -130,12 +130,10 @@ void emergency_sync(void)
130 130
131/* 131/*
132 * Generic function to fsync a file. 132 * Generic function to fsync a file.
133 *
134 * filp may be NULL if called via the msync of a vma.
135 */ 133 */
136int file_fsync(struct file *filp, struct dentry *dentry, int datasync) 134int file_fsync(struct file *filp, int datasync)
137{ 135{
138 struct inode * inode = dentry->d_inode; 136 struct inode *inode = filp->f_mapping->host;
139 struct super_block * sb; 137 struct super_block * sb;
140 int ret, err; 138 int ret, err;
141 139
@@ -183,7 +181,7 @@ int vfs_fsync_range(struct file *file, loff_t start, loff_t end, int datasync)
183 * livelocks in fsync_buffers_list(). 181 * livelocks in fsync_buffers_list().
184 */ 182 */
185 mutex_lock(&mapping->host->i_mutex); 183 mutex_lock(&mapping->host->i_mutex);
186 err = file->f_op->fsync(file, file->f_path.dentry, datasync); 184 err = file->f_op->fsync(file, datasync);
187 if (!ret) 185 if (!ret)
188 ret = err; 186 ret = err;
189 mutex_unlock(&mapping->host->i_mutex); 187 mutex_unlock(&mapping->host->i_mutex);
diff --git a/fs/sysfs/inode.c b/fs/sysfs/inode.c
index bbd77e95cf7f..bde1a4c3679a 100644
--- a/fs/sysfs/inode.c
+++ b/fs/sysfs/inode.c
@@ -117,13 +117,11 @@ int sysfs_setattr(struct dentry *dentry, struct iattr *iattr)
117 if (error) 117 if (error)
118 goto out; 118 goto out;
119 119
120 iattr->ia_valid &= ~ATTR_SIZE; /* ignore size changes */ 120 /* this ignores size changes */
121 121 generic_setattr(inode, iattr);
122 error = inode_setattr(inode, iattr);
123 if (error)
124 goto out;
125 122
126 error = sysfs_sd_setattr(sd, iattr); 123 error = sysfs_sd_setattr(sd, iattr);
124
127out: 125out:
128 mutex_unlock(&sysfs_mutex); 126 mutex_unlock(&sysfs_mutex);
129 return error; 127 return error;
diff --git a/fs/sysv/dir.c b/fs/sysv/dir.c
index 1dabed286b4c..79941e4964a4 100644
--- a/fs/sysv/dir.c
+++ b/fs/sysv/dir.c
@@ -24,7 +24,7 @@ const struct file_operations sysv_dir_operations = {
24 .llseek = generic_file_llseek, 24 .llseek = generic_file_llseek,
25 .read = generic_read_dir, 25 .read = generic_read_dir,
26 .readdir = sysv_readdir, 26 .readdir = sysv_readdir,
27 .fsync = simple_fsync, 27 .fsync = generic_file_fsync,
28}; 28};
29 29
30static inline void dir_put_page(struct page *page) 30static inline void dir_put_page(struct page *page)
diff --git a/fs/sysv/file.c b/fs/sysv/file.c
index 96340c01f4a7..750cc22349bd 100644
--- a/fs/sysv/file.c
+++ b/fs/sysv/file.c
@@ -26,7 +26,7 @@ const struct file_operations sysv_file_operations = {
26 .write = do_sync_write, 26 .write = do_sync_write,
27 .aio_write = generic_file_aio_write, 27 .aio_write = generic_file_aio_write,
28 .mmap = generic_file_mmap, 28 .mmap = generic_file_mmap,
29 .fsync = simple_fsync, 29 .fsync = generic_file_fsync,
30 .splice_read = generic_file_splice_read, 30 .splice_read = generic_file_splice_read,
31}; 31};
32 32
diff --git a/fs/sysv/inode.c b/fs/sysv/inode.c
index 4573734d723d..d4a5380b5669 100644
--- a/fs/sysv/inode.c
+++ b/fs/sysv/inode.c
@@ -43,6 +43,7 @@ static int sysv_sync_fs(struct super_block *sb, int wait)
43 * then attach current time stamp. 43 * then attach current time stamp.
44 * But if the filesystem was marked clean, keep it clean. 44 * But if the filesystem was marked clean, keep it clean.
45 */ 45 */
46 sb->s_dirt = 0;
46 old_time = fs32_to_cpu(sbi, *sbi->s_sb_time); 47 old_time = fs32_to_cpu(sbi, *sbi->s_sb_time);
47 if (sbi->s_type == FSTYPE_SYSV4) { 48 if (sbi->s_type == FSTYPE_SYSV4) {
48 if (*sbi->s_sb_state == cpu_to_fs32(sbi, 0x7c269d38 - old_time)) 49 if (*sbi->s_sb_state == cpu_to_fs32(sbi, 0x7c269d38 - old_time))
diff --git a/fs/ubifs/file.c b/fs/ubifs/file.c
index 5692cf72b807..12f445cee9f7 100644
--- a/fs/ubifs/file.c
+++ b/fs/ubifs/file.c
@@ -967,12 +967,15 @@ static int do_writepage(struct page *page, int len)
967 * the page locked, and it locks @ui_mutex. However, write-back does take inode 967 * the page locked, and it locks @ui_mutex. However, write-back does take inode
968 * @i_mutex, which means other VFS operations may be run on this inode at the 968 * @i_mutex, which means other VFS operations may be run on this inode at the
969 * same time. And the problematic one is truncation to smaller size, from where 969 * same time. And the problematic one is truncation to smaller size, from where
970 * we have to call 'vmtruncate()', which first changes @inode->i_size, then 970 * we have to call 'simple_setsize()', which first changes @inode->i_size, then
971 * drops the truncated pages. And while dropping the pages, it takes the page 971 * drops the truncated pages. And while dropping the pages, it takes the page
972 * lock. This means that 'do_truncation()' cannot call 'vmtruncate()' with 972 * lock. This means that 'do_truncation()' cannot call 'simple_setsize()' with
973 * @ui_mutex locked, because it would deadlock with 'ubifs_writepage()'. This 973 * @ui_mutex locked, because it would deadlock with 'ubifs_writepage()'. This
974 * means that @inode->i_size is changed while @ui_mutex is unlocked. 974 * means that @inode->i_size is changed while @ui_mutex is unlocked.
975 * 975 *
976 * XXX: with the new truncate the above is not true anymore, the simple_setsize
977 * calls can be replaced with the individual components.
978 *
976 * But in 'ubifs_writepage()' we have to guarantee that we do not write beyond 979 * But in 'ubifs_writepage()' we have to guarantee that we do not write beyond
977 * inode size. How do we do this if @inode->i_size may became smaller while we 980 * inode size. How do we do this if @inode->i_size may became smaller while we
978 * are in the middle of 'ubifs_writepage()'? The UBIFS solution is the 981 * are in the middle of 'ubifs_writepage()'? The UBIFS solution is the
@@ -1125,7 +1128,7 @@ static int do_truncation(struct ubifs_info *c, struct inode *inode,
1125 budgeted = 0; 1128 budgeted = 0;
1126 } 1129 }
1127 1130
1128 err = vmtruncate(inode, new_size); 1131 err = simple_setsize(inode, new_size);
1129 if (err) 1132 if (err)
1130 goto out_budg; 1133 goto out_budg;
1131 1134
@@ -1214,7 +1217,7 @@ static int do_setattr(struct ubifs_info *c, struct inode *inode,
1214 1217
1215 if (attr->ia_valid & ATTR_SIZE) { 1218 if (attr->ia_valid & ATTR_SIZE) {
1216 dbg_gen("size %lld -> %lld", inode->i_size, new_size); 1219 dbg_gen("size %lld -> %lld", inode->i_size, new_size);
1217 err = vmtruncate(inode, new_size); 1220 err = simple_setsize(inode, new_size);
1218 if (err) 1221 if (err)
1219 goto out; 1222 goto out;
1220 } 1223 }
@@ -1223,7 +1226,7 @@ static int do_setattr(struct ubifs_info *c, struct inode *inode,
1223 if (attr->ia_valid & ATTR_SIZE) { 1226 if (attr->ia_valid & ATTR_SIZE) {
1224 /* Truncation changes inode [mc]time */ 1227 /* Truncation changes inode [mc]time */
1225 inode->i_mtime = inode->i_ctime = ubifs_current_time(inode); 1228 inode->i_mtime = inode->i_ctime = ubifs_current_time(inode);
1226 /* 'vmtruncate()' changed @i_size, update @ui_size */ 1229 /* 'simple_setsize()' changed @i_size, update @ui_size */
1227 ui->ui_size = inode->i_size; 1230 ui->ui_size = inode->i_size;
1228 } 1231 }
1229 1232
@@ -1304,9 +1307,9 @@ static void *ubifs_follow_link(struct dentry *dentry, struct nameidata *nd)
1304 return NULL; 1307 return NULL;
1305} 1308}
1306 1309
1307int ubifs_fsync(struct file *file, struct dentry *dentry, int datasync) 1310int ubifs_fsync(struct file *file, int datasync)
1308{ 1311{
1309 struct inode *inode = dentry->d_inode; 1312 struct inode *inode = file->f_mapping->host;
1310 struct ubifs_info *c = inode->i_sb->s_fs_info; 1313 struct ubifs_info *c = inode->i_sb->s_fs_info;
1311 int err; 1314 int err;
1312 1315
diff --git a/fs/ubifs/ubifs.h b/fs/ubifs/ubifs.h
index bd2542dad014..2eef553d50c8 100644
--- a/fs/ubifs/ubifs.h
+++ b/fs/ubifs/ubifs.h
@@ -379,7 +379,7 @@ struct ubifs_gced_idx_leb {
379 * The @ui_size is a "shadow" variable for @inode->i_size and UBIFS uses 379 * The @ui_size is a "shadow" variable for @inode->i_size and UBIFS uses
380 * @ui_size instead of @inode->i_size. The reason for this is that UBIFS cannot 380 * @ui_size instead of @inode->i_size. The reason for this is that UBIFS cannot
381 * make sure @inode->i_size is always changed under @ui_mutex, because it 381 * make sure @inode->i_size is always changed under @ui_mutex, because it
382 * cannot call 'vmtruncate()' with @ui_mutex locked, because it would deadlock 382 * cannot call 'simple_setsize()' with @ui_mutex locked, because it would deadlock
383 * with 'ubifs_writepage()' (see file.c). All the other inode fields are 383 * with 'ubifs_writepage()' (see file.c). All the other inode fields are
384 * changed under @ui_mutex, so they do not need "shadow" fields. Note, one 384 * changed under @ui_mutex, so they do not need "shadow" fields. Note, one
385 * could consider to rework locking and base it on "shadow" fields. 385 * could consider to rework locking and base it on "shadow" fields.
@@ -1678,7 +1678,7 @@ const struct ubifs_lprops *ubifs_fast_find_frdi_idx(struct ubifs_info *c);
1678int ubifs_calc_dark(const struct ubifs_info *c, int spc); 1678int ubifs_calc_dark(const struct ubifs_info *c, int spc);
1679 1679
1680/* file.c */ 1680/* file.c */
1681int ubifs_fsync(struct file *file, struct dentry *dentry, int datasync); 1681int ubifs_fsync(struct file *file, int datasync);
1682int ubifs_setattr(struct dentry *dentry, struct iattr *attr); 1682int ubifs_setattr(struct dentry *dentry, struct iattr *attr);
1683 1683
1684/* dir.c */ 1684/* dir.c */
diff --git a/fs/udf/balloc.c b/fs/udf/balloc.c
index 9a9378b4eb5a..b608efaa4cee 100644
--- a/fs/udf/balloc.c
+++ b/fs/udf/balloc.c
@@ -21,7 +21,6 @@
21 21
22#include "udfdecl.h" 22#include "udfdecl.h"
23 23
24#include <linux/quotaops.h>
25#include <linux/buffer_head.h> 24#include <linux/buffer_head.h>
26#include <linux/bitops.h> 25#include <linux/bitops.h>
27 26
@@ -159,8 +158,6 @@ static void udf_bitmap_free_blocks(struct super_block *sb,
159 udf_debug("byte=%2x\n", 158 udf_debug("byte=%2x\n",
160 ((char *)bh->b_data)[(bit + i) >> 3]); 159 ((char *)bh->b_data)[(bit + i) >> 3]);
161 } else { 160 } else {
162 if (inode)
163 dquot_free_block(inode, 1);
164 udf_add_free_space(sb, sbi->s_partition, 1); 161 udf_add_free_space(sb, sbi->s_partition, 1);
165 } 162 }
166 } 163 }
@@ -210,15 +207,8 @@ static int udf_bitmap_prealloc_blocks(struct super_block *sb,
210 bit = block % (sb->s_blocksize << 3); 207 bit = block % (sb->s_blocksize << 3);
211 208
212 while (bit < (sb->s_blocksize << 3) && block_count > 0) { 209 while (bit < (sb->s_blocksize << 3) && block_count > 0) {
213 if (!udf_test_bit(bit, bh->b_data)) 210 if (!udf_clear_bit(bit, bh->b_data))
214 goto out; 211 goto out;
215 else if (dquot_prealloc_block(inode, 1))
216 goto out;
217 else if (!udf_clear_bit(bit, bh->b_data)) {
218 udf_debug("bit already cleared for block %d\n", bit);
219 dquot_free_block(inode, 1);
220 goto out;
221 }
222 block_count--; 212 block_count--;
223 alloc_count++; 213 alloc_count++;
224 bit++; 214 bit++;
@@ -338,20 +328,6 @@ search_back:
338 } 328 }
339 329
340got_block: 330got_block:
341
342 /*
343 * Check quota for allocation of this block.
344 */
345 if (inode) {
346 int ret = dquot_alloc_block(inode, 1);
347
348 if (ret) {
349 mutex_unlock(&sbi->s_alloc_mutex);
350 *err = ret;
351 return 0;
352 }
353 }
354
355 newblock = bit + (block_group << (sb->s_blocksize_bits + 3)) - 331 newblock = bit + (block_group << (sb->s_blocksize_bits + 3)) -
356 (sizeof(struct spaceBitmapDesc) << 3); 332 (sizeof(struct spaceBitmapDesc) << 3);
357 333
@@ -401,10 +377,6 @@ static void udf_table_free_blocks(struct super_block *sb,
401 } 377 }
402 378
403 iinfo = UDF_I(table); 379 iinfo = UDF_I(table);
404 /* We do this up front - There are some error conditions that
405 could occure, but.. oh well */
406 if (inode)
407 dquot_free_block(inode, count);
408 udf_add_free_space(sb, sbi->s_partition, count); 380 udf_add_free_space(sb, sbi->s_partition, count);
409 381
410 start = bloc->logicalBlockNum + offset; 382 start = bloc->logicalBlockNum + offset;
@@ -649,10 +621,7 @@ static int udf_table_prealloc_blocks(struct super_block *sb,
649 epos.offset -= adsize; 621 epos.offset -= adsize;
650 622
651 alloc_count = (elen >> sb->s_blocksize_bits); 623 alloc_count = (elen >> sb->s_blocksize_bits);
652 if (inode && dquot_prealloc_block(inode, 624 if (alloc_count > block_count) {
653 alloc_count > block_count ? block_count : alloc_count))
654 alloc_count = 0;
655 else if (alloc_count > block_count) {
656 alloc_count = block_count; 625 alloc_count = block_count;
657 eloc.logicalBlockNum += alloc_count; 626 eloc.logicalBlockNum += alloc_count;
658 elen -= (alloc_count << sb->s_blocksize_bits); 627 elen -= (alloc_count << sb->s_blocksize_bits);
@@ -752,14 +721,6 @@ static int udf_table_new_block(struct super_block *sb,
752 newblock = goal_eloc.logicalBlockNum; 721 newblock = goal_eloc.logicalBlockNum;
753 goal_eloc.logicalBlockNum++; 722 goal_eloc.logicalBlockNum++;
754 goal_elen -= sb->s_blocksize; 723 goal_elen -= sb->s_blocksize;
755 if (inode) {
756 *err = dquot_alloc_block(inode, 1);
757 if (*err) {
758 brelse(goal_epos.bh);
759 mutex_unlock(&sbi->s_alloc_mutex);
760 return 0;
761 }
762 }
763 724
764 if (goal_elen) 725 if (goal_elen)
765 udf_write_aext(table, &goal_epos, &goal_eloc, goal_elen, 1); 726 udf_write_aext(table, &goal_epos, &goal_eloc, goal_elen, 1);
diff --git a/fs/udf/dir.c b/fs/udf/dir.c
index 3a84455c2a77..51552bf50225 100644
--- a/fs/udf/dir.c
+++ b/fs/udf/dir.c
@@ -207,8 +207,9 @@ static int udf_readdir(struct file *filp, void *dirent, filldir_t filldir)
207 207
208/* readdir and lookup functions */ 208/* readdir and lookup functions */
209const struct file_operations udf_dir_operations = { 209const struct file_operations udf_dir_operations = {
210 .llseek = generic_file_llseek,
210 .read = generic_read_dir, 211 .read = generic_read_dir,
211 .readdir = udf_readdir, 212 .readdir = udf_readdir,
212 .unlocked_ioctl = udf_ioctl, 213 .unlocked_ioctl = udf_ioctl,
213 .fsync = simple_fsync, 214 .fsync = generic_file_fsync,
214}; 215};
diff --git a/fs/udf/file.c b/fs/udf/file.c
index baae3a723946..94e06d6bddbd 100644
--- a/fs/udf/file.c
+++ b/fs/udf/file.c
@@ -34,7 +34,6 @@
34#include <linux/errno.h> 34#include <linux/errno.h>
35#include <linux/smp_lock.h> 35#include <linux/smp_lock.h>
36#include <linux/pagemap.h> 36#include <linux/pagemap.h>
37#include <linux/quotaops.h>
38#include <linux/buffer_head.h> 37#include <linux/buffer_head.h>
39#include <linux/aio.h> 38#include <linux/aio.h>
40#include <linux/smp_lock.h> 39#include <linux/smp_lock.h>
@@ -219,39 +218,16 @@ const struct file_operations udf_file_operations = {
219 .read = do_sync_read, 218 .read = do_sync_read,
220 .aio_read = generic_file_aio_read, 219 .aio_read = generic_file_aio_read,
221 .unlocked_ioctl = udf_ioctl, 220 .unlocked_ioctl = udf_ioctl,
222 .open = dquot_file_open, 221 .open = generic_file_open,
223 .mmap = generic_file_mmap, 222 .mmap = generic_file_mmap,
224 .write = do_sync_write, 223 .write = do_sync_write,
225 .aio_write = udf_file_aio_write, 224 .aio_write = udf_file_aio_write,
226 .release = udf_release_file, 225 .release = udf_release_file,
227 .fsync = simple_fsync, 226 .fsync = generic_file_fsync,
228 .splice_read = generic_file_splice_read, 227 .splice_read = generic_file_splice_read,
229 .llseek = generic_file_llseek, 228 .llseek = generic_file_llseek,
230}; 229};
231 230
232int udf_setattr(struct dentry *dentry, struct iattr *iattr)
233{
234 struct inode *inode = dentry->d_inode;
235 int error;
236
237 error = inode_change_ok(inode, iattr);
238 if (error)
239 return error;
240
241 if (is_quota_modification(inode, iattr))
242 dquot_initialize(inode);
243
244 if ((iattr->ia_valid & ATTR_UID && iattr->ia_uid != inode->i_uid) ||
245 (iattr->ia_valid & ATTR_GID && iattr->ia_gid != inode->i_gid)) {
246 error = dquot_transfer(inode, iattr);
247 if (error)
248 return error;
249 }
250
251 return inode_setattr(inode, iattr);
252}
253
254const struct inode_operations udf_file_inode_operations = { 231const struct inode_operations udf_file_inode_operations = {
255 .truncate = udf_truncate, 232 .truncate = udf_truncate,
256 .setattr = udf_setattr,
257}; 233};
diff --git a/fs/udf/ialloc.c b/fs/udf/ialloc.c
index 2b5586c7f02a..18cd7111185d 100644
--- a/fs/udf/ialloc.c
+++ b/fs/udf/ialloc.c
@@ -20,7 +20,6 @@
20 20
21#include "udfdecl.h" 21#include "udfdecl.h"
22#include <linux/fs.h> 22#include <linux/fs.h>
23#include <linux/quotaops.h>
24#include <linux/sched.h> 23#include <linux/sched.h>
25#include <linux/slab.h> 24#include <linux/slab.h>
26 25
@@ -32,13 +31,6 @@ void udf_free_inode(struct inode *inode)
32 struct super_block *sb = inode->i_sb; 31 struct super_block *sb = inode->i_sb;
33 struct udf_sb_info *sbi = UDF_SB(sb); 32 struct udf_sb_info *sbi = UDF_SB(sb);
34 33
35 /*
36 * Note: we must free any quota before locking the superblock,
37 * as writing the quota to disk may need the lock as well.
38 */
39 dquot_free_inode(inode);
40 dquot_drop(inode);
41
42 clear_inode(inode); 34 clear_inode(inode);
43 35
44 mutex_lock(&sbi->s_alloc_mutex); 36 mutex_lock(&sbi->s_alloc_mutex);
@@ -61,7 +53,7 @@ struct inode *udf_new_inode(struct inode *dir, int mode, int *err)
61 struct super_block *sb = dir->i_sb; 53 struct super_block *sb = dir->i_sb;
62 struct udf_sb_info *sbi = UDF_SB(sb); 54 struct udf_sb_info *sbi = UDF_SB(sb);
63 struct inode *inode; 55 struct inode *inode;
64 int block, ret; 56 int block;
65 uint32_t start = UDF_I(dir)->i_location.logicalBlockNum; 57 uint32_t start = UDF_I(dir)->i_location.logicalBlockNum;
66 struct udf_inode_info *iinfo; 58 struct udf_inode_info *iinfo;
67 struct udf_inode_info *dinfo = UDF_I(dir); 59 struct udf_inode_info *dinfo = UDF_I(dir);
@@ -146,17 +138,6 @@ struct inode *udf_new_inode(struct inode *dir, int mode, int *err)
146 insert_inode_hash(inode); 138 insert_inode_hash(inode);
147 mark_inode_dirty(inode); 139 mark_inode_dirty(inode);
148 140
149 dquot_initialize(inode);
150 ret = dquot_alloc_inode(inode);
151 if (ret) {
152 dquot_drop(inode);
153 inode->i_flags |= S_NOQUOTA;
154 inode->i_nlink = 0;
155 iput(inode);
156 *err = ret;
157 return NULL;
158 }
159
160 *err = 0; 141 *err = 0;
161 return inode; 142 return inode;
162} 143}
diff --git a/fs/udf/inode.c b/fs/udf/inode.c
index 8a3fbd177cab..124852bcf6fe 100644
--- a/fs/udf/inode.c
+++ b/fs/udf/inode.c
@@ -36,7 +36,6 @@
36#include <linux/pagemap.h> 36#include <linux/pagemap.h>
37#include <linux/buffer_head.h> 37#include <linux/buffer_head.h>
38#include <linux/writeback.h> 38#include <linux/writeback.h>
39#include <linux/quotaops.h>
40#include <linux/slab.h> 39#include <linux/slab.h>
41#include <linux/crc-itu-t.h> 40#include <linux/crc-itu-t.h>
42 41
@@ -71,9 +70,6 @@ static int udf_get_block(struct inode *, sector_t, struct buffer_head *, int);
71 70
72void udf_delete_inode(struct inode *inode) 71void udf_delete_inode(struct inode *inode)
73{ 72{
74 if (!is_bad_inode(inode))
75 dquot_initialize(inode);
76
77 truncate_inode_pages(&inode->i_data, 0); 73 truncate_inode_pages(&inode->i_data, 0);
78 74
79 if (is_bad_inode(inode)) 75 if (is_bad_inode(inode))
@@ -113,7 +109,6 @@ void udf_clear_inode(struct inode *inode)
113 (unsigned long long)iinfo->i_lenExtents); 109 (unsigned long long)iinfo->i_lenExtents);
114 } 110 }
115 111
116 dquot_drop(inode);
117 kfree(iinfo->i_ext.i_data); 112 kfree(iinfo->i_ext.i_data);
118 iinfo->i_ext.i_data = NULL; 113 iinfo->i_ext.i_data = NULL;
119} 114}
diff --git a/fs/udf/namei.c b/fs/udf/namei.c
index 585f733615dc..bf5fc674193c 100644
--- a/fs/udf/namei.c
+++ b/fs/udf/namei.c
@@ -27,7 +27,6 @@
27#include <linux/errno.h> 27#include <linux/errno.h>
28#include <linux/mm.h> 28#include <linux/mm.h>
29#include <linux/slab.h> 29#include <linux/slab.h>
30#include <linux/quotaops.h>
31#include <linux/smp_lock.h> 30#include <linux/smp_lock.h>
32#include <linux/buffer_head.h> 31#include <linux/buffer_head.h>
33#include <linux/sched.h> 32#include <linux/sched.h>
@@ -563,8 +562,6 @@ static int udf_create(struct inode *dir, struct dentry *dentry, int mode,
563 int err; 562 int err;
564 struct udf_inode_info *iinfo; 563 struct udf_inode_info *iinfo;
565 564
566 dquot_initialize(dir);
567
568 lock_kernel(); 565 lock_kernel();
569 inode = udf_new_inode(dir, mode, &err); 566 inode = udf_new_inode(dir, mode, &err);
570 if (!inode) { 567 if (!inode) {
@@ -617,8 +614,6 @@ static int udf_mknod(struct inode *dir, struct dentry *dentry, int mode,
617 if (!old_valid_dev(rdev)) 614 if (!old_valid_dev(rdev))
618 return -EINVAL; 615 return -EINVAL;
619 616
620 dquot_initialize(dir);
621
622 lock_kernel(); 617 lock_kernel();
623 err = -EIO; 618 err = -EIO;
624 inode = udf_new_inode(dir, mode, &err); 619 inode = udf_new_inode(dir, mode, &err);
@@ -664,8 +659,6 @@ static int udf_mkdir(struct inode *dir, struct dentry *dentry, int mode)
664 struct udf_inode_info *dinfo = UDF_I(dir); 659 struct udf_inode_info *dinfo = UDF_I(dir);
665 struct udf_inode_info *iinfo; 660 struct udf_inode_info *iinfo;
666 661
667 dquot_initialize(dir);
668
669 lock_kernel(); 662 lock_kernel();
670 err = -EMLINK; 663 err = -EMLINK;
671 if (dir->i_nlink >= (256 << sizeof(dir->i_nlink)) - 1) 664 if (dir->i_nlink >= (256 << sizeof(dir->i_nlink)) - 1)
@@ -800,8 +793,6 @@ static int udf_rmdir(struct inode *dir, struct dentry *dentry)
800 struct fileIdentDesc *fi, cfi; 793 struct fileIdentDesc *fi, cfi;
801 struct kernel_lb_addr tloc; 794 struct kernel_lb_addr tloc;
802 795
803 dquot_initialize(dir);
804
805 retval = -ENOENT; 796 retval = -ENOENT;
806 lock_kernel(); 797 lock_kernel();
807 fi = udf_find_entry(dir, &dentry->d_name, &fibh, &cfi); 798 fi = udf_find_entry(dir, &dentry->d_name, &fibh, &cfi);
@@ -848,8 +839,6 @@ static int udf_unlink(struct inode *dir, struct dentry *dentry)
848 struct fileIdentDesc cfi; 839 struct fileIdentDesc cfi;
849 struct kernel_lb_addr tloc; 840 struct kernel_lb_addr tloc;
850 841
851 dquot_initialize(dir);
852
853 retval = -ENOENT; 842 retval = -ENOENT;
854 lock_kernel(); 843 lock_kernel();
855 fi = udf_find_entry(dir, &dentry->d_name, &fibh, &cfi); 844 fi = udf_find_entry(dir, &dentry->d_name, &fibh, &cfi);
@@ -904,8 +893,6 @@ static int udf_symlink(struct inode *dir, struct dentry *dentry,
904 struct buffer_head *bh; 893 struct buffer_head *bh;
905 struct udf_inode_info *iinfo; 894 struct udf_inode_info *iinfo;
906 895
907 dquot_initialize(dir);
908
909 lock_kernel(); 896 lock_kernel();
910 inode = udf_new_inode(dir, S_IFLNK | S_IRWXUGO, &err); 897 inode = udf_new_inode(dir, S_IFLNK | S_IRWXUGO, &err);
911 if (!inode) 898 if (!inode)
@@ -1075,8 +1062,6 @@ static int udf_link(struct dentry *old_dentry, struct inode *dir,
1075 int err; 1062 int err;
1076 struct buffer_head *bh; 1063 struct buffer_head *bh;
1077 1064
1078 dquot_initialize(dir);
1079
1080 lock_kernel(); 1065 lock_kernel();
1081 if (inode->i_nlink >= (256 << sizeof(inode->i_nlink)) - 1) { 1066 if (inode->i_nlink >= (256 << sizeof(inode->i_nlink)) - 1) {
1082 unlock_kernel(); 1067 unlock_kernel();
@@ -1139,9 +1124,6 @@ static int udf_rename(struct inode *old_dir, struct dentry *old_dentry,
1139 struct kernel_lb_addr tloc; 1124 struct kernel_lb_addr tloc;
1140 struct udf_inode_info *old_iinfo = UDF_I(old_inode); 1125 struct udf_inode_info *old_iinfo = UDF_I(old_inode);
1141 1126
1142 dquot_initialize(old_dir);
1143 dquot_initialize(new_dir);
1144
1145 lock_kernel(); 1127 lock_kernel();
1146 ofi = udf_find_entry(old_dir, &old_dentry->d_name, &ofibh, &ocfi); 1128 ofi = udf_find_entry(old_dir, &old_dentry->d_name, &ofibh, &ocfi);
1147 if (ofi) { 1129 if (ofi) {
@@ -1387,7 +1369,6 @@ const struct export_operations udf_export_ops = {
1387const struct inode_operations udf_dir_inode_operations = { 1369const struct inode_operations udf_dir_inode_operations = {
1388 .lookup = udf_lookup, 1370 .lookup = udf_lookup,
1389 .create = udf_create, 1371 .create = udf_create,
1390 .setattr = udf_setattr,
1391 .link = udf_link, 1372 .link = udf_link,
1392 .unlink = udf_unlink, 1373 .unlink = udf_unlink,
1393 .symlink = udf_symlink, 1374 .symlink = udf_symlink,
@@ -1400,5 +1381,4 @@ const struct inode_operations udf_symlink_inode_operations = {
1400 .readlink = generic_readlink, 1381 .readlink = generic_readlink,
1401 .follow_link = page_follow_link_light, 1382 .follow_link = page_follow_link_light,
1402 .put_link = page_put_link, 1383 .put_link = page_put_link,
1403 .setattr = udf_setattr,
1404}; 1384};
diff --git a/fs/udf/super.c b/fs/udf/super.c
index 1e4543cbcd27..612d1e2e285a 100644
--- a/fs/udf/super.c
+++ b/fs/udf/super.c
@@ -557,6 +557,7 @@ static int udf_remount_fs(struct super_block *sb, int *flags, char *options)
557{ 557{
558 struct udf_options uopt; 558 struct udf_options uopt;
559 struct udf_sb_info *sbi = UDF_SB(sb); 559 struct udf_sb_info *sbi = UDF_SB(sb);
560 int error = 0;
560 561
561 uopt.flags = sbi->s_flags; 562 uopt.flags = sbi->s_flags;
562 uopt.uid = sbi->s_uid; 563 uopt.uid = sbi->s_uid;
@@ -582,17 +583,17 @@ static int udf_remount_fs(struct super_block *sb, int *flags, char *options)
582 *flags |= MS_RDONLY; 583 *flags |= MS_RDONLY;
583 } 584 }
584 585
585 if ((*flags & MS_RDONLY) == (sb->s_flags & MS_RDONLY)) { 586 if ((*flags & MS_RDONLY) == (sb->s_flags & MS_RDONLY))
586 unlock_kernel(); 587 goto out_unlock;
587 return 0; 588
588 }
589 if (*flags & MS_RDONLY) 589 if (*flags & MS_RDONLY)
590 udf_close_lvid(sb); 590 udf_close_lvid(sb);
591 else 591 else
592 udf_open_lvid(sb); 592 udf_open_lvid(sb);
593 593
594out_unlock:
594 unlock_kernel(); 595 unlock_kernel();
595 return 0; 596 return error;
596} 597}
597 598
598/* Check Volume Structure Descriptors (ECMA 167 2/9.1) */ 599/* Check Volume Structure Descriptors (ECMA 167 2/9.1) */
@@ -1939,7 +1940,7 @@ static int udf_fill_super(struct super_block *sb, void *options, int silent)
1939 /* Fill in the rest of the superblock */ 1940 /* Fill in the rest of the superblock */
1940 sb->s_op = &udf_sb_ops; 1941 sb->s_op = &udf_sb_ops;
1941 sb->s_export_op = &udf_export_ops; 1942 sb->s_export_op = &udf_export_ops;
1942 sb->dq_op = NULL; 1943
1943 sb->s_dirt = 0; 1944 sb->s_dirt = 0;
1944 sb->s_magic = UDF_SUPER_MAGIC; 1945 sb->s_magic = UDF_SUPER_MAGIC;
1945 sb->s_time_gran = 1000; 1946 sb->s_time_gran = 1000;
diff --git a/fs/udf/udfdecl.h b/fs/udf/udfdecl.h
index 9079ff7d6255..2bac0354891f 100644
--- a/fs/udf/udfdecl.h
+++ b/fs/udf/udfdecl.h
@@ -131,7 +131,6 @@ extern int udf_write_fi(struct inode *inode, struct fileIdentDesc *,
131 131
132/* file.c */ 132/* file.c */
133extern long udf_ioctl(struct file *, unsigned int, unsigned long); 133extern long udf_ioctl(struct file *, unsigned int, unsigned long);
134extern int udf_setattr(struct dentry *dentry, struct iattr *iattr);
135/* inode.c */ 134/* inode.c */
136extern struct inode *udf_iget(struct super_block *, struct kernel_lb_addr *); 135extern struct inode *udf_iget(struct super_block *, struct kernel_lb_addr *);
137extern int udf_sync_inode(struct inode *); 136extern int udf_sync_inode(struct inode *);
diff --git a/fs/ufs/balloc.c b/fs/ufs/balloc.c
index 5cfa4d85ccf2..048484fb10d2 100644
--- a/fs/ufs/balloc.c
+++ b/fs/ufs/balloc.c
@@ -12,7 +12,6 @@
12#include <linux/stat.h> 12#include <linux/stat.h>
13#include <linux/time.h> 13#include <linux/time.h>
14#include <linux/string.h> 14#include <linux/string.h>
15#include <linux/quotaops.h>
16#include <linux/buffer_head.h> 15#include <linux/buffer_head.h>
17#include <linux/capability.h> 16#include <linux/capability.h>
18#include <linux/bitops.h> 17#include <linux/bitops.h>
@@ -85,9 +84,6 @@ void ufs_free_fragments(struct inode *inode, u64 fragment, unsigned count)
85 "bit already cleared for fragment %u", i); 84 "bit already cleared for fragment %u", i);
86 } 85 }
87 86
88 dquot_free_block(inode, count);
89
90
91 fs32_add(sb, &ucg->cg_cs.cs_nffree, count); 87 fs32_add(sb, &ucg->cg_cs.cs_nffree, count);
92 uspi->cs_total.cs_nffree += count; 88 uspi->cs_total.cs_nffree += count;
93 fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count); 89 fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count);
@@ -195,7 +191,6 @@ do_more:
195 ubh_setblock(UCPI_UBH(ucpi), ucpi->c_freeoff, blkno); 191 ubh_setblock(UCPI_UBH(ucpi), ucpi->c_freeoff, blkno);
196 if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD) 192 if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
197 ufs_clusteracct (sb, ucpi, blkno, 1); 193 ufs_clusteracct (sb, ucpi, blkno, 1);
198 dquot_free_block(inode, uspi->s_fpb);
199 194
200 fs32_add(sb, &ucg->cg_cs.cs_nbfree, 1); 195 fs32_add(sb, &ucg->cg_cs.cs_nbfree, 1);
201 uspi->cs_total.cs_nbfree++; 196 uspi->cs_total.cs_nbfree++;
@@ -511,7 +506,6 @@ static u64 ufs_add_fragments(struct inode *inode, u64 fragment,
511 struct ufs_cg_private_info * ucpi; 506 struct ufs_cg_private_info * ucpi;
512 struct ufs_cylinder_group * ucg; 507 struct ufs_cylinder_group * ucg;
513 unsigned cgno, fragno, fragoff, count, fragsize, i; 508 unsigned cgno, fragno, fragoff, count, fragsize, i;
514 int ret;
515 509
516 UFSD("ENTER, fragment %llu, oldcount %u, newcount %u\n", 510 UFSD("ENTER, fragment %llu, oldcount %u, newcount %u\n",
517 (unsigned long long)fragment, oldcount, newcount); 511 (unsigned long long)fragment, oldcount, newcount);
@@ -557,11 +551,6 @@ static u64 ufs_add_fragments(struct inode *inode, u64 fragment,
557 fs32_add(sb, &ucg->cg_frsum[fragsize - count], 1); 551 fs32_add(sb, &ucg->cg_frsum[fragsize - count], 1);
558 for (i = oldcount; i < newcount; i++) 552 for (i = oldcount; i < newcount; i++)
559 ubh_clrbit (UCPI_UBH(ucpi), ucpi->c_freeoff, fragno + i); 553 ubh_clrbit (UCPI_UBH(ucpi), ucpi->c_freeoff, fragno + i);
560 ret = dquot_alloc_block(inode, count);
561 if (ret) {
562 *err = ret;
563 return 0;
564 }
565 554
566 fs32_sub(sb, &ucg->cg_cs.cs_nffree, count); 555 fs32_sub(sb, &ucg->cg_cs.cs_nffree, count);
567 fs32_sub(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count); 556 fs32_sub(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count);
@@ -598,7 +587,6 @@ static u64 ufs_alloc_fragments(struct inode *inode, unsigned cgno,
598 struct ufs_cylinder_group * ucg; 587 struct ufs_cylinder_group * ucg;
599 unsigned oldcg, i, j, k, allocsize; 588 unsigned oldcg, i, j, k, allocsize;
600 u64 result; 589 u64 result;
601 int ret;
602 590
603 UFSD("ENTER, ino %lu, cgno %u, goal %llu, count %u\n", 591 UFSD("ENTER, ino %lu, cgno %u, goal %llu, count %u\n",
604 inode->i_ino, cgno, (unsigned long long)goal, count); 592 inode->i_ino, cgno, (unsigned long long)goal, count);
@@ -667,7 +655,6 @@ cg_found:
667 for (i = count; i < uspi->s_fpb; i++) 655 for (i = count; i < uspi->s_fpb; i++)
668 ubh_setbit (UCPI_UBH(ucpi), ucpi->c_freeoff, goal + i); 656 ubh_setbit (UCPI_UBH(ucpi), ucpi->c_freeoff, goal + i);
669 i = uspi->s_fpb - count; 657 i = uspi->s_fpb - count;
670 dquot_free_block(inode, i);
671 658
672 fs32_add(sb, &ucg->cg_cs.cs_nffree, i); 659 fs32_add(sb, &ucg->cg_cs.cs_nffree, i);
673 uspi->cs_total.cs_nffree += i; 660 uspi->cs_total.cs_nffree += i;
@@ -679,11 +666,6 @@ cg_found:
679 result = ufs_bitmap_search (sb, ucpi, goal, allocsize); 666 result = ufs_bitmap_search (sb, ucpi, goal, allocsize);
680 if (result == INVBLOCK) 667 if (result == INVBLOCK)
681 return 0; 668 return 0;
682 ret = dquot_alloc_block(inode, count);
683 if (ret) {
684 *err = ret;
685 return 0;
686 }
687 for (i = 0; i < count; i++) 669 for (i = 0; i < count; i++)
688 ubh_clrbit (UCPI_UBH(ucpi), ucpi->c_freeoff, result + i); 670 ubh_clrbit (UCPI_UBH(ucpi), ucpi->c_freeoff, result + i);
689 671
@@ -718,7 +700,6 @@ static u64 ufs_alloccg_block(struct inode *inode,
718 struct ufs_super_block_first * usb1; 700 struct ufs_super_block_first * usb1;
719 struct ufs_cylinder_group * ucg; 701 struct ufs_cylinder_group * ucg;
720 u64 result, blkno; 702 u64 result, blkno;
721 int ret;
722 703
723 UFSD("ENTER, goal %llu\n", (unsigned long long)goal); 704 UFSD("ENTER, goal %llu\n", (unsigned long long)goal);
724 705
@@ -752,11 +733,6 @@ gotit:
752 ubh_clrblock (UCPI_UBH(ucpi), ucpi->c_freeoff, blkno); 733 ubh_clrblock (UCPI_UBH(ucpi), ucpi->c_freeoff, blkno);
753 if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD) 734 if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
754 ufs_clusteracct (sb, ucpi, blkno, -1); 735 ufs_clusteracct (sb, ucpi, blkno, -1);
755 ret = dquot_alloc_block(inode, uspi->s_fpb);
756 if (ret) {
757 *err = ret;
758 return INVBLOCK;
759 }
760 736
761 fs32_sub(sb, &ucg->cg_cs.cs_nbfree, 1); 737 fs32_sub(sb, &ucg->cg_cs.cs_nbfree, 1);
762 uspi->cs_total.cs_nbfree--; 738 uspi->cs_total.cs_nbfree--;
diff --git a/fs/ufs/dir.c b/fs/ufs/dir.c
index 317a0d444f6b..ec784756dc65 100644
--- a/fs/ufs/dir.c
+++ b/fs/ufs/dir.c
@@ -666,6 +666,6 @@ not_empty:
666const struct file_operations ufs_dir_operations = { 666const struct file_operations ufs_dir_operations = {
667 .read = generic_read_dir, 667 .read = generic_read_dir,
668 .readdir = ufs_readdir, 668 .readdir = ufs_readdir,
669 .fsync = simple_fsync, 669 .fsync = generic_file_fsync,
670 .llseek = generic_file_llseek, 670 .llseek = generic_file_llseek,
671}; 671};
diff --git a/fs/ufs/file.c b/fs/ufs/file.c
index a8962cecde5b..33afa20d4509 100644
--- a/fs/ufs/file.c
+++ b/fs/ufs/file.c
@@ -24,7 +24,6 @@
24 */ 24 */
25 25
26#include <linux/fs.h> 26#include <linux/fs.h>
27#include <linux/quotaops.h>
28 27
29#include "ufs_fs.h" 28#include "ufs_fs.h"
30#include "ufs.h" 29#include "ufs.h"
@@ -41,7 +40,7 @@ const struct file_operations ufs_file_operations = {
41 .write = do_sync_write, 40 .write = do_sync_write,
42 .aio_write = generic_file_aio_write, 41 .aio_write = generic_file_aio_write,
43 .mmap = generic_file_mmap, 42 .mmap = generic_file_mmap,
44 .open = dquot_file_open, 43 .open = generic_file_open,
45 .fsync = simple_fsync, 44 .fsync = generic_file_fsync,
46 .splice_read = generic_file_splice_read, 45 .splice_read = generic_file_splice_read,
47}; 46};
diff --git a/fs/ufs/ialloc.c b/fs/ufs/ialloc.c
index 3a959d55084d..594480e537d2 100644
--- a/fs/ufs/ialloc.c
+++ b/fs/ufs/ialloc.c
@@ -27,7 +27,6 @@
27#include <linux/time.h> 27#include <linux/time.h>
28#include <linux/stat.h> 28#include <linux/stat.h>
29#include <linux/string.h> 29#include <linux/string.h>
30#include <linux/quotaops.h>
31#include <linux/buffer_head.h> 30#include <linux/buffer_head.h>
32#include <linux/sched.h> 31#include <linux/sched.h>
33#include <linux/bitops.h> 32#include <linux/bitops.h>
@@ -95,9 +94,6 @@ void ufs_free_inode (struct inode * inode)
95 94
96 is_directory = S_ISDIR(inode->i_mode); 95 is_directory = S_ISDIR(inode->i_mode);
97 96
98 dquot_free_inode(inode);
99 dquot_drop(inode);
100
101 clear_inode (inode); 97 clear_inode (inode);
102 98
103 if (ubh_isclr (UCPI_UBH(ucpi), ucpi->c_iusedoff, bit)) 99 if (ubh_isclr (UCPI_UBH(ucpi), ucpi->c_iusedoff, bit))
@@ -347,21 +343,12 @@ cg_found:
347 343
348 unlock_super (sb); 344 unlock_super (sb);
349 345
350 dquot_initialize(inode);
351 err = dquot_alloc_inode(inode);
352 if (err) {
353 dquot_drop(inode);
354 goto fail_without_unlock;
355 }
356
357 UFSD("allocating inode %lu\n", inode->i_ino); 346 UFSD("allocating inode %lu\n", inode->i_ino);
358 UFSD("EXIT\n"); 347 UFSD("EXIT\n");
359 return inode; 348 return inode;
360 349
361fail_remove_inode: 350fail_remove_inode:
362 unlock_super(sb); 351 unlock_super(sb);
363fail_without_unlock:
364 inode->i_flags |= S_NOQUOTA;
365 inode->i_nlink = 0; 352 inode->i_nlink = 0;
366 iput(inode); 353 iput(inode);
367 UFSD("EXIT (FAILED): err %d\n", err); 354 UFSD("EXIT (FAILED): err %d\n", err);
diff --git a/fs/ufs/inode.c b/fs/ufs/inode.c
index cffa756f1047..73fe773aa034 100644
--- a/fs/ufs/inode.c
+++ b/fs/ufs/inode.c
@@ -37,7 +37,6 @@
37#include <linux/smp_lock.h> 37#include <linux/smp_lock.h>
38#include <linux/buffer_head.h> 38#include <linux/buffer_head.h>
39#include <linux/writeback.h> 39#include <linux/writeback.h>
40#include <linux/quotaops.h>
41 40
42#include "ufs_fs.h" 41#include "ufs_fs.h"
43#include "ufs.h" 42#include "ufs.h"
@@ -910,9 +909,6 @@ void ufs_delete_inode (struct inode * inode)
910{ 909{
911 loff_t old_i_size; 910 loff_t old_i_size;
912 911
913 if (!is_bad_inode(inode))
914 dquot_initialize(inode);
915
916 truncate_inode_pages(&inode->i_data, 0); 912 truncate_inode_pages(&inode->i_data, 0);
917 if (is_bad_inode(inode)) 913 if (is_bad_inode(inode))
918 goto no_delete; 914 goto no_delete;
diff --git a/fs/ufs/namei.c b/fs/ufs/namei.c
index eabc02eb1294..b056f02b1fb3 100644
--- a/fs/ufs/namei.c
+++ b/fs/ufs/namei.c
@@ -30,7 +30,6 @@
30#include <linux/time.h> 30#include <linux/time.h>
31#include <linux/fs.h> 31#include <linux/fs.h>
32#include <linux/smp_lock.h> 32#include <linux/smp_lock.h>
33#include <linux/quotaops.h>
34 33
35#include "ufs_fs.h" 34#include "ufs_fs.h"
36#include "ufs.h" 35#include "ufs.h"
@@ -86,8 +85,6 @@ static int ufs_create (struct inode * dir, struct dentry * dentry, int mode,
86 85
87 UFSD("BEGIN\n"); 86 UFSD("BEGIN\n");
88 87
89 dquot_initialize(dir);
90
91 inode = ufs_new_inode(dir, mode); 88 inode = ufs_new_inode(dir, mode);
92 err = PTR_ERR(inode); 89 err = PTR_ERR(inode);
93 90
@@ -112,8 +109,6 @@ static int ufs_mknod (struct inode * dir, struct dentry *dentry, int mode, dev_t
112 if (!old_valid_dev(rdev)) 109 if (!old_valid_dev(rdev))
113 return -EINVAL; 110 return -EINVAL;
114 111
115 dquot_initialize(dir);
116
117 inode = ufs_new_inode(dir, mode); 112 inode = ufs_new_inode(dir, mode);
118 err = PTR_ERR(inode); 113 err = PTR_ERR(inode);
119 if (!IS_ERR(inode)) { 114 if (!IS_ERR(inode)) {
@@ -138,8 +133,6 @@ static int ufs_symlink (struct inode * dir, struct dentry * dentry,
138 if (l > sb->s_blocksize) 133 if (l > sb->s_blocksize)
139 goto out_notlocked; 134 goto out_notlocked;
140 135
141 dquot_initialize(dir);
142
143 lock_kernel(); 136 lock_kernel();
144 inode = ufs_new_inode(dir, S_IFLNK | S_IRWXUGO); 137 inode = ufs_new_inode(dir, S_IFLNK | S_IRWXUGO);
145 err = PTR_ERR(inode); 138 err = PTR_ERR(inode);
@@ -185,8 +178,6 @@ static int ufs_link (struct dentry * old_dentry, struct inode * dir,
185 return -EMLINK; 178 return -EMLINK;
186 } 179 }
187 180
188 dquot_initialize(dir);
189
190 inode->i_ctime = CURRENT_TIME_SEC; 181 inode->i_ctime = CURRENT_TIME_SEC;
191 inode_inc_link_count(inode); 182 inode_inc_link_count(inode);
192 atomic_inc(&inode->i_count); 183 atomic_inc(&inode->i_count);
@@ -204,8 +195,6 @@ static int ufs_mkdir(struct inode * dir, struct dentry * dentry, int mode)
204 if (dir->i_nlink >= UFS_LINK_MAX) 195 if (dir->i_nlink >= UFS_LINK_MAX)
205 goto out; 196 goto out;
206 197
207 dquot_initialize(dir);
208
209 lock_kernel(); 198 lock_kernel();
210 inode_inc_link_count(dir); 199 inode_inc_link_count(dir);
211 200
@@ -250,8 +239,6 @@ static int ufs_unlink(struct inode *dir, struct dentry *dentry)
250 struct page *page; 239 struct page *page;
251 int err = -ENOENT; 240 int err = -ENOENT;
252 241
253 dquot_initialize(dir);
254
255 de = ufs_find_entry(dir, &dentry->d_name, &page); 242 de = ufs_find_entry(dir, &dentry->d_name, &page);
256 if (!de) 243 if (!de)
257 goto out; 244 goto out;
@@ -296,9 +283,6 @@ static int ufs_rename(struct inode *old_dir, struct dentry *old_dentry,
296 struct ufs_dir_entry *old_de; 283 struct ufs_dir_entry *old_de;
297 int err = -ENOENT; 284 int err = -ENOENT;
298 285
299 dquot_initialize(old_dir);
300 dquot_initialize(new_dir);
301
302 old_de = ufs_find_entry(old_dir, &old_dentry->d_name, &old_page); 286 old_de = ufs_find_entry(old_dir, &old_dentry->d_name, &old_page);
303 if (!old_de) 287 if (!old_de)
304 goto out; 288 goto out;
diff --git a/fs/ufs/super.c b/fs/ufs/super.c
index 14743d935a93..3ec5a9eb6efb 100644
--- a/fs/ufs/super.c
+++ b/fs/ufs/super.c
@@ -77,7 +77,6 @@
77 77
78#include <linux/errno.h> 78#include <linux/errno.h>
79#include <linux/fs.h> 79#include <linux/fs.h>
80#include <linux/quotaops.h>
81#include <linux/slab.h> 80#include <linux/slab.h>
82#include <linux/time.h> 81#include <linux/time.h>
83#include <linux/stat.h> 82#include <linux/stat.h>
@@ -918,6 +917,7 @@ again:
918 sbi->s_bytesex = BYTESEX_LE; 917 sbi->s_bytesex = BYTESEX_LE;
919 switch ((uspi->fs_magic = fs32_to_cpu(sb, usb3->fs_magic))) { 918 switch ((uspi->fs_magic = fs32_to_cpu(sb, usb3->fs_magic))) {
920 case UFS_MAGIC: 919 case UFS_MAGIC:
920 case UFS_MAGIC_BW:
921 case UFS2_MAGIC: 921 case UFS2_MAGIC:
922 case UFS_MAGIC_LFN: 922 case UFS_MAGIC_LFN:
923 case UFS_MAGIC_FEA: 923 case UFS_MAGIC_FEA:
@@ -927,6 +927,7 @@ again:
927 sbi->s_bytesex = BYTESEX_BE; 927 sbi->s_bytesex = BYTESEX_BE;
928 switch ((uspi->fs_magic = fs32_to_cpu(sb, usb3->fs_magic))) { 928 switch ((uspi->fs_magic = fs32_to_cpu(sb, usb3->fs_magic))) {
929 case UFS_MAGIC: 929 case UFS_MAGIC:
930 case UFS_MAGIC_BW:
930 case UFS2_MAGIC: 931 case UFS2_MAGIC:
931 case UFS_MAGIC_LFN: 932 case UFS_MAGIC_LFN:
932 case UFS_MAGIC_FEA: 933 case UFS_MAGIC_FEA:
@@ -1045,7 +1046,7 @@ magic_found:
1045 */ 1046 */
1046 sb->s_op = &ufs_super_ops; 1047 sb->s_op = &ufs_super_ops;
1047 sb->s_export_op = &ufs_export_ops; 1048 sb->s_export_op = &ufs_export_ops;
1048 sb->dq_op = NULL; /***/ 1049
1049 sb->s_magic = fs32_to_cpu(sb, usb3->fs_magic); 1050 sb->s_magic = fs32_to_cpu(sb, usb3->fs_magic);
1050 1051
1051 uspi->s_sblkno = fs32_to_cpu(sb, usb1->fs_sblkno); 1052 uspi->s_sblkno = fs32_to_cpu(sb, usb1->fs_sblkno);
@@ -1435,126 +1436,19 @@ static void destroy_inodecache(void)
1435 kmem_cache_destroy(ufs_inode_cachep); 1436 kmem_cache_destroy(ufs_inode_cachep);
1436} 1437}
1437 1438
1438static void ufs_clear_inode(struct inode *inode)
1439{
1440 dquot_drop(inode);
1441}
1442
1443#ifdef CONFIG_QUOTA
1444static ssize_t ufs_quota_read(struct super_block *, int, char *,size_t, loff_t);
1445static ssize_t ufs_quota_write(struct super_block *, int, const char *, size_t, loff_t);
1446#endif
1447
1448static const struct super_operations ufs_super_ops = { 1439static const struct super_operations ufs_super_ops = {
1449 .alloc_inode = ufs_alloc_inode, 1440 .alloc_inode = ufs_alloc_inode,
1450 .destroy_inode = ufs_destroy_inode, 1441 .destroy_inode = ufs_destroy_inode,
1451 .write_inode = ufs_write_inode, 1442 .write_inode = ufs_write_inode,
1452 .delete_inode = ufs_delete_inode, 1443 .delete_inode = ufs_delete_inode,
1453 .clear_inode = ufs_clear_inode,
1454 .put_super = ufs_put_super, 1444 .put_super = ufs_put_super,
1455 .write_super = ufs_write_super, 1445 .write_super = ufs_write_super,
1456 .sync_fs = ufs_sync_fs, 1446 .sync_fs = ufs_sync_fs,
1457 .statfs = ufs_statfs, 1447 .statfs = ufs_statfs,
1458 .remount_fs = ufs_remount, 1448 .remount_fs = ufs_remount,
1459 .show_options = ufs_show_options, 1449 .show_options = ufs_show_options,
1460#ifdef CONFIG_QUOTA
1461 .quota_read = ufs_quota_read,
1462 .quota_write = ufs_quota_write,
1463#endif
1464}; 1450};
1465 1451
1466#ifdef CONFIG_QUOTA
1467
1468/* Read data from quotafile - avoid pagecache and such because we cannot afford
1469 * acquiring the locks... As quota files are never truncated and quota code
1470 * itself serializes the operations (and noone else should touch the files)
1471 * we don't have to be afraid of races */
1472static ssize_t ufs_quota_read(struct super_block *sb, int type, char *data,
1473 size_t len, loff_t off)
1474{
1475 struct inode *inode = sb_dqopt(sb)->files[type];
1476 sector_t blk = off >> sb->s_blocksize_bits;
1477 int err = 0;
1478 int offset = off & (sb->s_blocksize - 1);
1479 int tocopy;
1480 size_t toread;
1481 struct buffer_head *bh;
1482 loff_t i_size = i_size_read(inode);
1483
1484 if (off > i_size)
1485 return 0;
1486 if (off+len > i_size)
1487 len = i_size-off;
1488 toread = len;
1489 while (toread > 0) {
1490 tocopy = sb->s_blocksize - offset < toread ?
1491 sb->s_blocksize - offset : toread;
1492
1493 bh = ufs_bread(inode, blk, 0, &err);
1494 if (err)
1495 return err;
1496 if (!bh) /* A hole? */
1497 memset(data, 0, tocopy);
1498 else {
1499 memcpy(data, bh->b_data+offset, tocopy);
1500 brelse(bh);
1501 }
1502 offset = 0;
1503 toread -= tocopy;
1504 data += tocopy;
1505 blk++;
1506 }
1507 return len;
1508}
1509
1510/* Write to quotafile */
1511static ssize_t ufs_quota_write(struct super_block *sb, int type,
1512 const char *data, size_t len, loff_t off)
1513{
1514 struct inode *inode = sb_dqopt(sb)->files[type];
1515 sector_t blk = off >> sb->s_blocksize_bits;
1516 int err = 0;
1517 int offset = off & (sb->s_blocksize - 1);
1518 int tocopy;
1519 size_t towrite = len;
1520 struct buffer_head *bh;
1521
1522 mutex_lock_nested(&inode->i_mutex, I_MUTEX_QUOTA);
1523 while (towrite > 0) {
1524 tocopy = sb->s_blocksize - offset < towrite ?
1525 sb->s_blocksize - offset : towrite;
1526
1527 bh = ufs_bread(inode, blk, 1, &err);
1528 if (!bh)
1529 goto out;
1530 lock_buffer(bh);
1531 memcpy(bh->b_data+offset, data, tocopy);
1532 flush_dcache_page(bh->b_page);
1533 set_buffer_uptodate(bh);
1534 mark_buffer_dirty(bh);
1535 unlock_buffer(bh);
1536 brelse(bh);
1537 offset = 0;
1538 towrite -= tocopy;
1539 data += tocopy;
1540 blk++;
1541 }
1542out:
1543 if (len == towrite) {
1544 mutex_unlock(&inode->i_mutex);
1545 return err;
1546 }
1547 if (inode->i_size < off+len-towrite)
1548 i_size_write(inode, off+len-towrite);
1549 inode->i_version++;
1550 inode->i_mtime = inode->i_ctime = CURRENT_TIME_SEC;
1551 mark_inode_dirty(inode);
1552 mutex_unlock(&inode->i_mutex);
1553 return len - towrite;
1554}
1555
1556#endif
1557
1558static int ufs_get_sb(struct file_system_type *fs_type, 1452static int ufs_get_sb(struct file_system_type *fs_type,
1559 int flags, const char *dev_name, void *data, struct vfsmount *mnt) 1453 int flags, const char *dev_name, void *data, struct vfsmount *mnt)
1560{ 1454{
diff --git a/fs/ufs/truncate.c b/fs/ufs/truncate.c
index f294c44577dc..589e01a465ba 100644
--- a/fs/ufs/truncate.c
+++ b/fs/ufs/truncate.c
@@ -44,7 +44,6 @@
44#include <linux/buffer_head.h> 44#include <linux/buffer_head.h>
45#include <linux/blkdev.h> 45#include <linux/blkdev.h>
46#include <linux/sched.h> 46#include <linux/sched.h>
47#include <linux/quotaops.h>
48 47
49#include "ufs_fs.h" 48#include "ufs_fs.h"
50#include "ufs.h" 49#include "ufs.h"
@@ -501,12 +500,10 @@ out:
501 return err; 500 return err;
502} 501}
503 502
504
505/* 503/*
506 * We don't define our `inode->i_op->truncate', and call it here, 504 * TODO:
507 * because of: 505 * - truncate case should use proper ordering instead of using
508 * - there is no way to know old size 506 * simple_setsize
509 * - there is no way inform user about error, if it happens in `truncate'
510 */ 507 */
511int ufs_setattr(struct dentry *dentry, struct iattr *attr) 508int ufs_setattr(struct dentry *dentry, struct iattr *attr)
512{ 509{
@@ -518,19 +515,10 @@ int ufs_setattr(struct dentry *dentry, struct iattr *attr)
518 if (error) 515 if (error)
519 return error; 516 return error;
520 517
521 if (is_quota_modification(inode, attr))
522 dquot_initialize(inode);
523
524 if ((ia_valid & ATTR_UID && attr->ia_uid != inode->i_uid) ||
525 (ia_valid & ATTR_GID && attr->ia_gid != inode->i_gid)) {
526 error = dquot_transfer(inode, attr);
527 if (error)
528 return error;
529 }
530 if (ia_valid & ATTR_SIZE && attr->ia_size != inode->i_size) { 518 if (ia_valid & ATTR_SIZE && attr->ia_size != inode->i_size) {
531 loff_t old_i_size = inode->i_size; 519 loff_t old_i_size = inode->i_size;
532 520
533 error = vmtruncate(inode, attr->ia_size); 521 error = simple_setsize(inode, attr->ia_size);
534 if (error) 522 if (error)
535 return error; 523 return error;
536 error = ufs_truncate(inode, old_i_size); 524 error = ufs_truncate(inode, old_i_size);
diff --git a/fs/ufs/ufs_fs.h b/fs/ufs/ufs_fs.h
index 6943ec677c0b..8aba544f9fad 100644
--- a/fs/ufs/ufs_fs.h
+++ b/fs/ufs/ufs_fs.h
@@ -48,6 +48,7 @@ typedef __u16 __bitwise __fs16;
48#define UFS_SECTOR_SIZE 512 48#define UFS_SECTOR_SIZE 512
49#define UFS_SECTOR_BITS 9 49#define UFS_SECTOR_BITS 9
50#define UFS_MAGIC 0x00011954 50#define UFS_MAGIC 0x00011954
51#define UFS_MAGIC_BW 0x0f242697
51#define UFS2_MAGIC 0x19540119 52#define UFS2_MAGIC 0x19540119
52#define UFS_CIGAM 0x54190100 /* byteswapped MAGIC */ 53#define UFS_CIGAM 0x54190100 /* byteswapped MAGIC */
53 54
diff --git a/fs/xfs/linux-2.6/xfs_file.c b/fs/xfs/linux-2.6/xfs_file.c
index d8fb1b5d6cb5..257a56b127cf 100644
--- a/fs/xfs/linux-2.6/xfs_file.c
+++ b/fs/xfs/linux-2.6/xfs_file.c
@@ -100,10 +100,10 @@ xfs_iozero(
100STATIC int 100STATIC int
101xfs_file_fsync( 101xfs_file_fsync(
102 struct file *file, 102 struct file *file,
103 struct dentry *dentry,
104 int datasync) 103 int datasync)
105{ 104{
106 struct xfs_inode *ip = XFS_I(dentry->d_inode); 105 struct inode *inode = file->f_mapping->host;
106 struct xfs_inode *ip = XFS_I(inode);
107 struct xfs_trans *tp; 107 struct xfs_trans *tp;
108 int error = 0; 108 int error = 0;
109 int log_flushed = 0; 109 int log_flushed = 0;
@@ -140,8 +140,8 @@ xfs_file_fsync(
140 * might gets cleared when the inode gets written out via the AIL 140 * might gets cleared when the inode gets written out via the AIL
141 * or xfs_iflush_cluster. 141 * or xfs_iflush_cluster.
142 */ 142 */
143 if (((dentry->d_inode->i_state & I_DIRTY_DATASYNC) || 143 if (((inode->i_state & I_DIRTY_DATASYNC) ||
144 ((dentry->d_inode->i_state & I_DIRTY_SYNC) && !datasync)) && 144 ((inode->i_state & I_DIRTY_SYNC) && !datasync)) &&
145 ip->i_update_core) { 145 ip->i_update_core) {
146 /* 146 /*
147 * Kick off a transaction to log the inode core to get the 147 * Kick off a transaction to log the inode core to get the
@@ -868,7 +868,7 @@ write_retry:
868 mutex_lock(&inode->i_mutex); 868 mutex_lock(&inode->i_mutex);
869 xfs_ilock(ip, iolock); 869 xfs_ilock(ip, iolock);
870 870
871 error2 = -xfs_file_fsync(file, file->f_path.dentry, 871 error2 = -xfs_file_fsync(file,
872 (file->f_flags & __O_SYNC) ? 0 : 1); 872 (file->f_flags & __O_SYNC) ? 0 : 1);
873 if (!error) 873 if (!error)
874 error = error2; 874 error = error2;
diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h
index 7bf83ddf82e0..baacd98e7cc6 100644
--- a/include/acpi/acpi_bus.h
+++ b/include/acpi/acpi_bus.h
@@ -373,7 +373,7 @@ struct acpi_pci_root {
373 struct acpi_pci_id id; 373 struct acpi_pci_id id;
374 struct pci_bus *bus; 374 struct pci_bus *bus;
375 u16 segment; 375 u16 segment;
376 u8 bus_nr; 376 struct resource secondary; /* downstream bus range */
377 377
378 u32 osc_support_set; /* _OSC state of support bits */ 378 u32 osc_support_set; /* _OSC state of support bits */
379 u32 osc_control_set; /* _OSC state of control bits */ 379 u32 osc_control_set; /* _OSC state of control bits */
diff --git a/include/acpi/acpi_drivers.h b/include/acpi/acpi_drivers.h
index 4f7b44866b76..23d78b4d088b 100644
--- a/include/acpi/acpi_drivers.h
+++ b/include/acpi/acpi_drivers.h
@@ -104,8 +104,7 @@ int acpi_pci_bind_root(struct acpi_device *device);
104 104
105/* Arch-defined function to add a bus to the system */ 105/* Arch-defined function to add a bus to the system */
106 106
107struct pci_bus *pci_acpi_scan_root(struct acpi_device *device, int domain, 107struct pci_bus *pci_acpi_scan_root(struct acpi_pci_root *root);
108 int bus);
109void pci_acpi_crs_quirks(void); 108void pci_acpi_crs_quirks(void);
110 109
111/* -------------------------------------------------------------------------- 110/* --------------------------------------------------------------------------
diff --git a/include/acpi/acpi_hest.h b/include/acpi/acpi_hest.h
deleted file mode 100644
index 63194d03cb2d..000000000000
--- a/include/acpi/acpi_hest.h
+++ /dev/null
@@ -1,12 +0,0 @@
1#ifndef __ACPI_HEST_H
2#define __ACPI_HEST_H
3
4#include <linux/pci.h>
5
6#ifdef CONFIG_ACPI
7extern int acpi_hest_firmware_first_pci(struct pci_dev *pci);
8#else
9static inline int acpi_hest_firmware_first_pci(struct pci_dev *pci) { return 0; }
10#endif
11
12#endif
diff --git a/include/acpi/apei.h b/include/acpi/apei.h
new file mode 100644
index 000000000000..b3365025ff8d
--- /dev/null
+++ b/include/acpi/apei.h
@@ -0,0 +1,34 @@
1/*
2 * apei.h - ACPI Platform Error Interface
3 */
4
5#ifndef ACPI_APEI_H
6#define ACPI_APEI_H
7
8#include <linux/acpi.h>
9#include <linux/cper.h>
10#include <asm/ioctls.h>
11
12#define APEI_ERST_INVALID_RECORD_ID 0xffffffffffffffffULL
13
14#define APEI_ERST_CLEAR_RECORD _IOW('E', 1, u64)
15#define APEI_ERST_GET_RECORD_COUNT _IOR('E', 2, u32)
16
17#ifdef __KERNEL__
18
19extern int hest_disable;
20extern int erst_disable;
21
22typedef int (*apei_hest_func_t)(struct acpi_hest_header *hest_hdr, void *data);
23int apei_hest_parse(apei_hest_func_t func, void *data);
24
25int erst_write(const struct cper_record_header *record);
26ssize_t erst_get_record_count(void);
27int erst_get_next_record_id(u64 *record_id);
28ssize_t erst_read(u64 record_id, struct cper_record_header *record,
29 size_t buflen);
30ssize_t erst_read_next(struct cper_record_header *record, size_t buflen);
31int erst_clear(u64 record_id);
32
33#endif
34#endif
diff --git a/include/acpi/atomicio.h b/include/acpi/atomicio.h
new file mode 100644
index 000000000000..8b9fb4b0b9ce
--- /dev/null
+++ b/include/acpi/atomicio.h
@@ -0,0 +1,10 @@
1#ifndef ACPI_ATOMIC_IO_H
2#define ACPI_ATOMIC_IO_H
3
4int acpi_pre_map_gar(struct acpi_generic_address *reg);
5int acpi_post_unmap_gar(struct acpi_generic_address *reg);
6
7int acpi_atomic_read(u64 *val, struct acpi_generic_address *reg);
8int acpi_atomic_write(u64 val, struct acpi_generic_address *reg);
9
10#endif
diff --git a/include/acpi/hed.h b/include/acpi/hed.h
new file mode 100644
index 000000000000..46e1249b70cc
--- /dev/null
+++ b/include/acpi/hed.h
@@ -0,0 +1,18 @@
1/*
2 * hed.h - ACPI Hardware Error Device
3 *
4 * Copyright (C) 2009, Intel Corp.
5 * Author: Huang Ying <ying.huang@intel.com>
6 *
7 * This file is released under the GPLv2.
8 */
9
10#ifndef ACPI_HED_H
11#define ACPI_HED_H
12
13#include <linux/notifier.h>
14
15int register_acpi_hed_notifier(struct notifier_block *nb);
16void unregister_acpi_hed_notifier(struct notifier_block *nb);
17
18#endif
diff --git a/include/acpi/processor.h b/include/acpi/processor.h
index 86825ddbe14e..da565a48240e 100644
--- a/include/acpi/processor.h
+++ b/include/acpi/processor.h
@@ -52,17 +52,6 @@ struct acpi_power_register {
52 u64 address; 52 u64 address;
53} __attribute__ ((packed)); 53} __attribute__ ((packed));
54 54
55struct acpi_processor_cx_policy {
56 u32 count;
57 struct acpi_processor_cx *state;
58 struct {
59 u32 time;
60 u32 ticks;
61 u32 count;
62 u32 bm;
63 } threshold;
64};
65
66struct acpi_processor_cx { 55struct acpi_processor_cx {
67 u8 valid; 56 u8 valid;
68 u8 type; 57 u8 type;
@@ -74,8 +63,6 @@ struct acpi_processor_cx {
74 u32 power; 63 u32 power;
75 u32 usage; 64 u32 usage;
76 u64 time; 65 u64 time;
77 struct acpi_processor_cx_policy promotion;
78 struct acpi_processor_cx_policy demotion;
79 char desc[ACPI_CX_DESC_LEN]; 66 char desc[ACPI_CX_DESC_LEN];
80}; 67};
81 68
diff --git a/include/acpi/video.h b/include/acpi/video.h
index cf7be3dd157b..551793c9b6e8 100644
--- a/include/acpi/video.h
+++ b/include/acpi/video.h
@@ -1,12 +1,28 @@
1#ifndef __ACPI_VIDEO_H 1#ifndef __ACPI_VIDEO_H
2#define __ACPI_VIDEO_H 2#define __ACPI_VIDEO_H
3 3
4#define ACPI_VIDEO_DISPLAY_CRT 1
5#define ACPI_VIDEO_DISPLAY_TV 2
6#define ACPI_VIDEO_DISPLAY_DVI 3
7#define ACPI_VIDEO_DISPLAY_LCD 4
8
9#define ACPI_VIDEO_DISPLAY_LEGACY_MONITOR 0x0100
10#define ACPI_VIDEO_DISPLAY_LEGACY_PANEL 0x0110
11#define ACPI_VIDEO_DISPLAY_LEGACY_TV 0x0200
12
4#if (defined CONFIG_ACPI_VIDEO || defined CONFIG_ACPI_VIDEO_MODULE) 13#if (defined CONFIG_ACPI_VIDEO || defined CONFIG_ACPI_VIDEO_MODULE)
5extern int acpi_video_register(void); 14extern int acpi_video_register(void);
6extern void acpi_video_unregister(void); 15extern void acpi_video_unregister(void);
16extern int acpi_video_get_edid(struct acpi_device *device, int type,
17 int device_id, void **edid);
7#else 18#else
8static inline int acpi_video_register(void) { return 0; } 19static inline int acpi_video_register(void) { return 0; }
9static inline void acpi_video_unregister(void) { return; } 20static inline void acpi_video_unregister(void) { return; }
21static inline int acpi_video_get_edid(struct acpi_device *device, int type,
22 int device_id, void **edid)
23{
24 return -ENODEV;
25}
10#endif 26#endif
11 27
12#endif 28#endif
diff --git a/include/asm-generic/dma-mapping-common.h b/include/asm-generic/dma-mapping-common.h
index 69206957b72c..0c80bb38773f 100644
--- a/include/asm-generic/dma-mapping-common.h
+++ b/include/asm-generic/dma-mapping-common.h
@@ -123,15 +123,7 @@ static inline void dma_sync_single_range_for_cpu(struct device *dev,
123 size_t size, 123 size_t size,
124 enum dma_data_direction dir) 124 enum dma_data_direction dir)
125{ 125{
126 struct dma_map_ops *ops = get_dma_ops(dev); 126 dma_sync_single_for_cpu(dev, addr + offset, size, dir);
127
128 BUG_ON(!valid_dma_direction(dir));
129 if (ops->sync_single_range_for_cpu) {
130 ops->sync_single_range_for_cpu(dev, addr, offset, size, dir);
131 debug_dma_sync_single_range_for_cpu(dev, addr, offset, size, dir);
132
133 } else
134 dma_sync_single_for_cpu(dev, addr + offset, size, dir);
135} 127}
136 128
137static inline void dma_sync_single_range_for_device(struct device *dev, 129static inline void dma_sync_single_range_for_device(struct device *dev,
@@ -140,15 +132,7 @@ static inline void dma_sync_single_range_for_device(struct device *dev,
140 size_t size, 132 size_t size,
141 enum dma_data_direction dir) 133 enum dma_data_direction dir)
142{ 134{
143 struct dma_map_ops *ops = get_dma_ops(dev); 135 dma_sync_single_for_device(dev, addr + offset, size, dir);
144
145 BUG_ON(!valid_dma_direction(dir));
146 if (ops->sync_single_range_for_device) {
147 ops->sync_single_range_for_device(dev, addr, offset, size, dir);
148 debug_dma_sync_single_range_for_device(dev, addr, offset, size, dir);
149
150 } else
151 dma_sync_single_for_device(dev, addr + offset, size, dir);
152} 136}
153 137
154static inline void 138static inline void
diff --git a/include/asm-generic/gpio.h b/include/asm-generic/gpio.h
index 979c6a57f2f1..4f3d75e1ad39 100644
--- a/include/asm-generic/gpio.h
+++ b/include/asm-generic/gpio.h
@@ -60,7 +60,9 @@ struct module;
60 * @names: if set, must be an array of strings to use as alternative 60 * @names: if set, must be an array of strings to use as alternative
61 * names for the GPIOs in this chip. Any entry in the array 61 * names for the GPIOs in this chip. Any entry in the array
62 * may be NULL if there is no alias for the GPIO, however the 62 * may be NULL if there is no alias for the GPIO, however the
63 * array must be @ngpio entries long. 63 * array must be @ngpio entries long. A name can include a single printk
64 * format specifier for an unsigned int. It is substituted by the actual
65 * number of the gpio.
64 * 66 *
65 * A gpio_chip can help platforms abstract various sources of GPIOs so 67 * A gpio_chip can help platforms abstract various sources of GPIOs so
66 * they can all be accessed through a common programing interface. 68 * they can all be accessed through a common programing interface.
@@ -88,6 +90,9 @@ struct gpio_chip {
88 unsigned offset); 90 unsigned offset);
89 int (*direction_output)(struct gpio_chip *chip, 91 int (*direction_output)(struct gpio_chip *chip,
90 unsigned offset, int value); 92 unsigned offset, int value);
93 int (*set_debounce)(struct gpio_chip *chip,
94 unsigned offset, unsigned debounce);
95
91 void (*set)(struct gpio_chip *chip, 96 void (*set)(struct gpio_chip *chip,
92 unsigned offset, int value); 97 unsigned offset, int value);
93 98
@@ -98,7 +103,7 @@ struct gpio_chip {
98 struct gpio_chip *chip); 103 struct gpio_chip *chip);
99 int base; 104 int base;
100 u16 ngpio; 105 u16 ngpio;
101 char **names; 106 const char *const *names;
102 unsigned can_sleep:1; 107 unsigned can_sleep:1;
103 unsigned exported:1; 108 unsigned exported:1;
104}; 109};
@@ -121,6 +126,8 @@ extern void gpio_free(unsigned gpio);
121extern int gpio_direction_input(unsigned gpio); 126extern int gpio_direction_input(unsigned gpio);
122extern int gpio_direction_output(unsigned gpio, int value); 127extern int gpio_direction_output(unsigned gpio, int value);
123 128
129extern int gpio_set_debounce(unsigned gpio, unsigned debounce);
130
124extern int gpio_get_value_cansleep(unsigned gpio); 131extern int gpio_get_value_cansleep(unsigned gpio);
125extern void gpio_set_value_cansleep(unsigned gpio, int value); 132extern void gpio_set_value_cansleep(unsigned gpio, int value);
126 133
diff --git a/include/asm-generic/scatterlist.h b/include/asm-generic/scatterlist.h
index 8b9454496a7c..5de07355fad4 100644
--- a/include/asm-generic/scatterlist.h
+++ b/include/asm-generic/scatterlist.h
@@ -11,7 +11,9 @@ struct scatterlist {
11 unsigned int offset; 11 unsigned int offset;
12 unsigned int length; 12 unsigned int length;
13 dma_addr_t dma_address; 13 dma_addr_t dma_address;
14#ifdef CONFIG_NEED_SG_DMA_LENGTH
14 unsigned int dma_length; 15 unsigned int dma_length;
16#endif
15}; 17};
16 18
17/* 19/*
@@ -22,22 +24,11 @@ struct scatterlist {
22 * is 0. 24 * is 0.
23 */ 25 */
24#define sg_dma_address(sg) ((sg)->dma_address) 26#define sg_dma_address(sg) ((sg)->dma_address)
25#ifndef sg_dma_len 27
26/* 28#ifdef CONFIG_NEED_SG_DMA_LENGTH
27 * Normally, you have an iommu on 64 bit machines, but not on 32 bit
28 * machines. Architectures that are differnt should override this.
29 */
30#if __BITS_PER_LONG == 64
31#define sg_dma_len(sg) ((sg)->dma_length) 29#define sg_dma_len(sg) ((sg)->dma_length)
32#else 30#else
33#define sg_dma_len(sg) ((sg)->length) 31#define sg_dma_len(sg) ((sg)->length)
34#endif /* 64 bit */
35#endif /* sg_dma_len */
36
37#ifndef ISA_DMA_THRESHOLD
38#define ISA_DMA_THRESHOLD (~0UL)
39#endif 32#endif
40 33
41#define ARCH_HAS_SG_CHAIN
42
43#endif /* __ASM_GENERIC_SCATTERLIST_H */ 34#endif /* __ASM_GENERIC_SCATTERLIST_H */
diff --git a/include/asm-generic/topology.h b/include/asm-generic/topology.h
index 510df36dd5d4..fd60700503c8 100644
--- a/include/asm-generic/topology.h
+++ b/include/asm-generic/topology.h
@@ -34,6 +34,9 @@
34#ifndef cpu_to_node 34#ifndef cpu_to_node
35#define cpu_to_node(cpu) ((void)(cpu),0) 35#define cpu_to_node(cpu) ((void)(cpu),0)
36#endif 36#endif
37#ifndef cpu_to_mem
38#define cpu_to_mem(cpu) ((void)(cpu),0)
39#endif
37#ifndef parent_node 40#ifndef parent_node
38#define parent_node(node) ((void)(node),0) 41#define parent_node(node) ((void)(node),0)
39#endif 42#endif
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index 67e652068e0e..ef779c6fc3d7 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -247,10 +247,10 @@
247 } \ 247 } \
248 \ 248 \
249 /* RapidIO route ops */ \ 249 /* RapidIO route ops */ \
250 .rio_route : AT(ADDR(.rio_route) - LOAD_OFFSET) { \ 250 .rio_ops : AT(ADDR(.rio_ops) - LOAD_OFFSET) { \
251 VMLINUX_SYMBOL(__start_rio_route_ops) = .; \ 251 VMLINUX_SYMBOL(__start_rio_switch_ops) = .; \
252 *(.rio_route_ops) \ 252 *(.rio_switch_ops) \
253 VMLINUX_SYMBOL(__end_rio_route_ops) = .; \ 253 VMLINUX_SYMBOL(__end_rio_switch_ops) = .; \
254 } \ 254 } \
255 \ 255 \
256 TRACEDATA \ 256 TRACEDATA \
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index 3da73f5f0ae9..224a38c960d4 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -248,11 +248,12 @@ int acpi_check_region(resource_size_t start, resource_size_t n,
248int acpi_check_mem_region(resource_size_t start, resource_size_t n, 248int acpi_check_mem_region(resource_size_t start, resource_size_t n,
249 const char *name); 249 const char *name);
250 250
251int acpi_resources_are_enforced(void);
252
251#ifdef CONFIG_PM_SLEEP 253#ifdef CONFIG_PM_SLEEP
252void __init acpi_no_s4_hw_signature(void); 254void __init acpi_no_s4_hw_signature(void);
253void __init acpi_old_suspend_ordering(void); 255void __init acpi_old_suspend_ordering(void);
254void __init acpi_s4_no_nvs(void); 256void __init acpi_s4_no_nvs(void);
255void __init acpi_set_sci_en_on_resume(void);
256#endif /* CONFIG_PM_SLEEP */ 257#endif /* CONFIG_PM_SLEEP */
257 258
258struct acpi_osc_context { 259struct acpi_osc_context {
diff --git a/include/linux/aio.h b/include/linux/aio.h
index 811dbb369379..7a8db4155281 100644
--- a/include/linux/aio.h
+++ b/include/linux/aio.h
@@ -212,6 +212,8 @@ extern void kick_iocb(struct kiocb *iocb);
212extern int aio_complete(struct kiocb *iocb, long res, long res2); 212extern int aio_complete(struct kiocb *iocb, long res, long res2);
213struct mm_struct; 213struct mm_struct;
214extern void exit_aio(struct mm_struct *mm); 214extern void exit_aio(struct mm_struct *mm);
215extern long do_io_submit(aio_context_t ctx_id, long nr,
216 struct iocb __user *__user *iocbpp, bool compat);
215#else 217#else
216static inline ssize_t wait_on_sync_kiocb(struct kiocb *iocb) { return 0; } 218static inline ssize_t wait_on_sync_kiocb(struct kiocb *iocb) { return 0; }
217static inline int aio_put_req(struct kiocb *iocb) { return 0; } 219static inline int aio_put_req(struct kiocb *iocb) { return 0; }
@@ -219,6 +221,9 @@ static inline void kick_iocb(struct kiocb *iocb) { }
219static inline int aio_complete(struct kiocb *iocb, long res, long res2) { return 0; } 221static inline int aio_complete(struct kiocb *iocb, long res, long res2) { return 0; }
220struct mm_struct; 222struct mm_struct;
221static inline void exit_aio(struct mm_struct *mm) { } 223static inline void exit_aio(struct mm_struct *mm) { }
224static inline long do_io_submit(aio_context_t ctx_id, long nr,
225 struct iocb __user * __user *iocbpp,
226 bool compat) { return 0; }
222#endif /* CONFIG_AIO */ 227#endif /* CONFIG_AIO */
223 228
224static inline struct kiocb *list_kiocb(struct list_head *h) 229static inline struct kiocb *list_kiocb(struct list_head *h)
diff --git a/include/linux/amba/pl330.h b/include/linux/amba/pl330.h
new file mode 100644
index 000000000000..cbee7de7dd36
--- /dev/null
+++ b/include/linux/amba/pl330.h
@@ -0,0 +1,45 @@
1/* linux/include/linux/amba/pl330.h
2 *
3 * Copyright (C) 2010 Samsung Electronics Co. Ltd.
4 * Jaswinder Singh <jassi.brar@samsung.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 */
11
12#ifndef __AMBA_PL330_H_
13#define __AMBA_PL330_H_
14
15#include <asm/hardware/pl330.h>
16
17struct dma_pl330_peri {
18 /*
19 * Peri_Req i/f of the DMAC that is
20 * peripheral could be reached from.
21 */
22 u8 peri_id; /* {0, 31} */
23 enum pl330_reqtype rqtype;
24
25 /* For M->D and D->M Channels */
26 int burst_sz; /* in power of 2 */
27 dma_addr_t fifo_addr;
28};
29
30struct dma_pl330_platdata {
31 /*
32 * Number of valid peripherals connected to DMAC.
33 * This may be different from the value read from
34 * CR0, as the PL330 implementation might have 'holes'
35 * in the peri list or the peri could also be reached
36 * from another DMAC which the platform prefers.
37 */
38 u8 nr_valid_peri;
39 /* Array of valid peripherals */
40 struct dma_pl330_peri *peri;
41 /* Bytes to allocate for MC buffer */
42 unsigned mcbuf_sz;
43};
44
45#endif /* __AMBA_PL330_H_ */
diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h
index 16ed0284d780..1b9ba193b789 100644
--- a/include/linux/buffer_head.h
+++ b/include/linux/buffer_head.h
@@ -203,6 +203,9 @@ int block_write_full_page_endio(struct page *page, get_block_t *get_block,
203int block_read_full_page(struct page*, get_block_t*); 203int block_read_full_page(struct page*, get_block_t*);
204int block_is_partially_uptodate(struct page *page, read_descriptor_t *desc, 204int block_is_partially_uptodate(struct page *page, read_descriptor_t *desc,
205 unsigned long from); 205 unsigned long from);
206int block_write_begin_newtrunc(struct file *, struct address_space *,
207 loff_t, unsigned, unsigned,
208 struct page **, void **, get_block_t*);
206int block_write_begin(struct file *, struct address_space *, 209int block_write_begin(struct file *, struct address_space *,
207 loff_t, unsigned, unsigned, 210 loff_t, unsigned, unsigned,
208 struct page **, void **, get_block_t*); 211 struct page **, void **, get_block_t*);
@@ -214,6 +217,9 @@ int generic_write_end(struct file *, struct address_space *,
214 struct page *, void *); 217 struct page *, void *);
215void page_zero_new_buffers(struct page *page, unsigned from, unsigned to); 218void page_zero_new_buffers(struct page *page, unsigned from, unsigned to);
216int block_prepare_write(struct page*, unsigned, unsigned, get_block_t*); 219int block_prepare_write(struct page*, unsigned, unsigned, get_block_t*);
220int cont_write_begin_newtrunc(struct file *, struct address_space *, loff_t,
221 unsigned, unsigned, struct page **, void **,
222 get_block_t *, loff_t *);
217int cont_write_begin(struct file *, struct address_space *, loff_t, 223int cont_write_begin(struct file *, struct address_space *, loff_t,
218 unsigned, unsigned, struct page **, void **, 224 unsigned, unsigned, struct page **, void **,
219 get_block_t *, loff_t *); 225 get_block_t *, loff_t *);
@@ -224,7 +230,10 @@ int block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
224void block_sync_page(struct page *); 230void block_sync_page(struct page *);
225sector_t generic_block_bmap(struct address_space *, sector_t, get_block_t *); 231sector_t generic_block_bmap(struct address_space *, sector_t, get_block_t *);
226int block_truncate_page(struct address_space *, loff_t, get_block_t *); 232int block_truncate_page(struct address_space *, loff_t, get_block_t *);
227int file_fsync(struct file *, struct dentry *, int); 233int file_fsync(struct file *, int);
234int nobh_write_begin_newtrunc(struct file *, struct address_space *,
235 loff_t, unsigned, unsigned,
236 struct page **, void **, get_block_t*);
228int nobh_write_begin(struct file *, struct address_space *, 237int nobh_write_begin(struct file *, struct address_space *,
229 loff_t, unsigned, unsigned, 238 loff_t, unsigned, unsigned,
230 struct page **, void **, get_block_t*); 239 struct page **, void **, get_block_t*);
diff --git a/include/linux/byteorder/big_endian.h b/include/linux/byteorder/big_endian.h
index d53a67dff018..3c80fd7e8b56 100644
--- a/include/linux/byteorder/big_endian.h
+++ b/include/linux/byteorder/big_endian.h
@@ -7,9 +7,6 @@
7#ifndef __BIG_ENDIAN_BITFIELD 7#ifndef __BIG_ENDIAN_BITFIELD
8#define __BIG_ENDIAN_BITFIELD 8#define __BIG_ENDIAN_BITFIELD
9#endif 9#endif
10#ifndef __BYTE_ORDER
11#define __BYTE_ORDER __BIG_ENDIAN
12#endif
13 10
14#include <linux/types.h> 11#include <linux/types.h>
15#include <linux/swab.h> 12#include <linux/swab.h>
diff --git a/include/linux/byteorder/little_endian.h b/include/linux/byteorder/little_endian.h
index f7f8ad13adb6..83195fb82962 100644
--- a/include/linux/byteorder/little_endian.h
+++ b/include/linux/byteorder/little_endian.h
@@ -7,9 +7,6 @@
7#ifndef __LITTLE_ENDIAN_BITFIELD 7#ifndef __LITTLE_ENDIAN_BITFIELD
8#define __LITTLE_ENDIAN_BITFIELD 8#define __LITTLE_ENDIAN_BITFIELD
9#endif 9#endif
10#ifndef __BYTE_ORDER
11#define __BYTE_ORDER __LITTLE_ENDIAN
12#endif
13 10
14#include <linux/types.h> 11#include <linux/types.h>
15#include <linux/swab.h> 12#include <linux/swab.h>
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
index 8f78073d7caa..0c621604baa1 100644
--- a/include/linux/cgroup.h
+++ b/include/linux/cgroup.h
@@ -397,7 +397,7 @@ struct cftype {
397 * This callback must be implemented, if you want provide 397 * This callback must be implemented, if you want provide
398 * notification functionality. 398 * notification functionality.
399 */ 399 */
400 int (*unregister_event)(struct cgroup *cgrp, struct cftype *cft, 400 void (*unregister_event)(struct cgroup *cgrp, struct cftype *cft,
401 struct eventfd_ctx *eventfd); 401 struct eventfd_ctx *eventfd);
402}; 402};
403 403
diff --git a/include/linux/compat.h b/include/linux/compat.h
index 717c691ecd8e..168f7daa7bde 100644
--- a/include/linux/compat.h
+++ b/include/linux/compat.h
@@ -356,5 +356,9 @@ asmlinkage long compat_sys_newfstatat(unsigned int dfd, char __user * filename,
356asmlinkage long compat_sys_openat(unsigned int dfd, const char __user *filename, 356asmlinkage long compat_sys_openat(unsigned int dfd, const char __user *filename,
357 int flags, int mode); 357 int flags, int mode);
358 358
359extern ssize_t compat_rw_copy_check_uvector(int type,
360 const struct compat_iovec __user *uvector, unsigned long nr_segs,
361 unsigned long fast_segs, struct iovec *fast_pointer,
362 struct iovec **ret_pointer);
359#endif /* CONFIG_COMPAT */ 363#endif /* CONFIG_COMPAT */
360#endif /* _LINUX_COMPAT_H */ 364#endif /* _LINUX_COMPAT_H */
diff --git a/include/linux/completion.h b/include/linux/completion.h
index 4a6b604ef7e4..51e3145196f6 100644
--- a/include/linux/completion.h
+++ b/include/linux/completion.h
@@ -83,6 +83,8 @@ extern unsigned long wait_for_completion_timeout(struct completion *x,
83 unsigned long timeout); 83 unsigned long timeout);
84extern unsigned long wait_for_completion_interruptible_timeout( 84extern unsigned long wait_for_completion_interruptible_timeout(
85 struct completion *x, unsigned long timeout); 85 struct completion *x, unsigned long timeout);
86extern unsigned long wait_for_completion_killable_timeout(
87 struct completion *x, unsigned long timeout);
86extern bool try_wait_for_completion(struct completion *x); 88extern bool try_wait_for_completion(struct completion *x);
87extern bool completion_done(struct completion *x); 89extern bool completion_done(struct completion *x);
88 90
diff --git a/include/linux/cper.h b/include/linux/cper.h
new file mode 100644
index 000000000000..4b38f905b705
--- /dev/null
+++ b/include/linux/cper.h
@@ -0,0 +1,314 @@
1/*
2 * UEFI Common Platform Error Record
3 *
4 * Copyright (C) 2010, Intel Corp.
5 * Author: Huang Ying <ying.huang@intel.com>
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License version
9 * 2 as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20
21#ifndef LINUX_CPER_H
22#define LINUX_CPER_H
23
24#include <linux/uuid.h>
25
26/* CPER record signature and the size */
27#define CPER_SIG_RECORD "CPER"
28#define CPER_SIG_SIZE 4
29/* Used in signature_end field in struct cper_record_header */
30#define CPER_SIG_END 0xffffffff
31
32/*
33 * CPER record header revision, used in revision field in struct
34 * cper_record_header
35 */
36#define CPER_RECORD_REV 0x0100
37
38/*
39 * Severity difinition for error_severity in struct cper_record_header
40 * and section_severity in struct cper_section_descriptor
41 */
42#define CPER_SER_RECOVERABLE 0x0
43#define CPER_SER_FATAL 0x1
44#define CPER_SER_CORRECTED 0x2
45#define CPER_SER_INFORMATIONAL 0x3
46
47/*
48 * Validation bits difinition for validation_bits in struct
49 * cper_record_header. If set, corresponding fields in struct
50 * cper_record_header contain valid information.
51 *
52 * corresponds platform_id
53 */
54#define CPER_VALID_PLATFORM_ID 0x0001
55/* corresponds timestamp */
56#define CPER_VALID_TIMESTAMP 0x0002
57/* corresponds partition_id */
58#define CPER_VALID_PARTITION_ID 0x0004
59
60/*
61 * Notification type used to generate error record, used in
62 * notification_type in struct cper_record_header
63 *
64 * Corrected Machine Check
65 */
66#define CPER_NOTIFY_CMC \
67 UUID_LE(0x2DCE8BB1, 0xBDD7, 0x450e, 0xB9, 0xAD, 0x9C, 0xF4, \
68 0xEB, 0xD4, 0xF8, 0x90)
69/* Corrected Platform Error */
70#define CPER_NOTIFY_CPE \
71 UUID_LE(0x4E292F96, 0xD843, 0x4a55, 0xA8, 0xC2, 0xD4, 0x81, \
72 0xF2, 0x7E, 0xBE, 0xEE)
73/* Machine Check Exception */
74#define CPER_NOTIFY_MCE \
75 UUID_LE(0xE8F56FFE, 0x919C, 0x4cc5, 0xBA, 0x88, 0x65, 0xAB, \
76 0xE1, 0x49, 0x13, 0xBB)
77/* PCI Express Error */
78#define CPER_NOTIFY_PCIE \
79 UUID_LE(0xCF93C01F, 0x1A16, 0x4dfc, 0xB8, 0xBC, 0x9C, 0x4D, \
80 0xAF, 0x67, 0xC1, 0x04)
81/* INIT Record (for IPF) */
82#define CPER_NOTIFY_INIT \
83 UUID_LE(0xCC5263E8, 0x9308, 0x454a, 0x89, 0xD0, 0x34, 0x0B, \
84 0xD3, 0x9B, 0xC9, 0x8E)
85/* Non-Maskable Interrupt */
86#define CPER_NOTIFY_NMI \
87 UUID_LE(0x5BAD89FF, 0xB7E6, 0x42c9, 0x81, 0x4A, 0xCF, 0x24, \
88 0x85, 0xD6, 0xE9, 0x8A)
89/* BOOT Error Record */
90#define CPER_NOTIFY_BOOT \
91 UUID_LE(0x3D61A466, 0xAB40, 0x409a, 0xA6, 0x98, 0xF3, 0x62, \
92 0xD4, 0x64, 0xB3, 0x8F)
93/* DMA Remapping Error */
94#define CPER_NOTIFY_DMAR \
95 UUID_LE(0x667DD791, 0xC6B3, 0x4c27, 0x8A, 0x6B, 0x0F, 0x8E, \
96 0x72, 0x2D, 0xEB, 0x41)
97
98/*
99 * Flags bits definitions for flags in struct cper_record_header
100 * If set, the error has been recovered
101 */
102#define CPER_HW_ERROR_FLAGS_RECOVERED 0x1
103/* If set, the error is for previous boot */
104#define CPER_HW_ERROR_FLAGS_PREVERR 0x2
105/* If set, the error is injected for testing */
106#define CPER_HW_ERROR_FLAGS_SIMULATED 0x4
107
108/*
109 * CPER section header revision, used in revision field in struct
110 * cper_section_descriptor
111 */
112#define CPER_SEC_REV 0x0100
113
114/*
115 * Validation bits difinition for validation_bits in struct
116 * cper_section_descriptor. If set, corresponding fields in struct
117 * cper_section_descriptor contain valid information.
118 *
119 * corresponds fru_id
120 */
121#define CPER_SEC_VALID_FRU_ID 0x1
122/* corresponds fru_text */
123#define CPER_SEC_VALID_FRU_TEXT 0x2
124
125/*
126 * Flags bits definitions for flags in struct cper_section_descriptor
127 *
128 * If set, the section is associated with the error condition
129 * directly, and should be focused on
130 */
131#define CPER_SEC_PRIMARY 0x0001
132/*
133 * If set, the error was not contained within the processor or memory
134 * hierarchy and the error may have propagated to persistent storage
135 * or network
136 */
137#define CPER_SEC_CONTAINMENT_WARNING 0x0002
138/* If set, the component must be re-initialized or re-enabled prior to use */
139#define CPER_SEC_RESET 0x0004
140/* If set, Linux may choose to discontinue use of the resource */
141#define CPER_SEC_ERROR_THRESHOLD_EXCEEDED 0x0008
142/*
143 * If set, resource could not be queried for error information due to
144 * conflicts with other system software or resources. Some fields of
145 * the section will be invalid
146 */
147#define CPER_SEC_RESOURCE_NOT_ACCESSIBLE 0x0010
148/*
149 * If set, action has been taken to ensure error containment (such as
150 * poisoning data), but the error has not been fully corrected and the
151 * data has not been consumed. Linux may choose to take further
152 * corrective action before the data is consumed
153 */
154#define CPER_SEC_LATENT_ERROR 0x0020
155
156/*
157 * Section type definitions, used in section_type field in struct
158 * cper_section_descriptor
159 *
160 * Processor Generic
161 */
162#define CPER_SEC_PROC_GENERIC \
163 UUID_LE(0x9876CCAD, 0x47B4, 0x4bdb, 0xB6, 0x5E, 0x16, 0xF1, \
164 0x93, 0xC4, 0xF3, 0xDB)
165/* Processor Specific: X86/X86_64 */
166#define CPER_SEC_PROC_IA \
167 UUID_LE(0xDC3EA0B0, 0xA144, 0x4797, 0xB9, 0x5B, 0x53, 0xFA, \
168 0x24, 0x2B, 0x6E, 0x1D)
169/* Processor Specific: IA64 */
170#define CPER_SEC_PROC_IPF \
171 UUID_LE(0xE429FAF1, 0x3CB7, 0x11D4, 0x0B, 0xCA, 0x07, 0x00, \
172 0x80, 0xC7, 0x3C, 0x88, 0x81)
173/* Platform Memory */
174#define CPER_SEC_PLATFORM_MEM \
175 UUID_LE(0xA5BC1114, 0x6F64, 0x4EDE, 0xB8, 0x63, 0x3E, 0x83, \
176 0xED, 0x7C, 0x83, 0xB1)
177#define CPER_SEC_PCIE \
178 UUID_LE(0xD995E954, 0xBBC1, 0x430F, 0xAD, 0x91, 0xB4, 0x4D, \
179 0xCB, 0x3C, 0x6F, 0x35)
180/* Firmware Error Record Reference */
181#define CPER_SEC_FW_ERR_REC_REF \
182 UUID_LE(0x81212A96, 0x09ED, 0x4996, 0x94, 0x71, 0x8D, 0x72, \
183 0x9C, 0x8E, 0x69, 0xED)
184/* PCI/PCI-X Bus */
185#define CPER_SEC_PCI_X_BUS \
186 UUID_LE(0xC5753963, 0x3B84, 0x4095, 0xBF, 0x78, 0xED, 0xDA, \
187 0xD3, 0xF9, 0xC9, 0xDD)
188/* PCI Component/Device */
189#define CPER_SEC_PCI_DEV \
190 UUID_LE(0xEB5E4685, 0xCA66, 0x4769, 0xB6, 0xA2, 0x26, 0x06, \
191 0x8B, 0x00, 0x13, 0x26)
192#define CPER_SEC_DMAR_GENERIC \
193 UUID_LE(0x5B51FEF7, 0xC79D, 0x4434, 0x8F, 0x1B, 0xAA, 0x62, \
194 0xDE, 0x3E, 0x2C, 0x64)
195/* Intel VT for Directed I/O specific DMAr */
196#define CPER_SEC_DMAR_VT \
197 UUID_LE(0x71761D37, 0x32B2, 0x45cd, 0xA7, 0xD0, 0xB0, 0xFE, \
198 0xDD, 0x93, 0xE8, 0xCF)
199/* IOMMU specific DMAr */
200#define CPER_SEC_DMAR_IOMMU \
201 UUID_LE(0x036F84E1, 0x7F37, 0x428c, 0xA7, 0x9E, 0x57, 0x5F, \
202 0xDF, 0xAA, 0x84, 0xEC)
203
204/*
205 * All tables and structs must be byte-packed to match CPER
206 * specification, since the tables are provided by the system BIOS
207 */
208#pragma pack(1)
209
210struct cper_record_header {
211 char signature[CPER_SIG_SIZE]; /* must be CPER_SIG_RECORD */
212 __u16 revision; /* must be CPER_RECORD_REV */
213 __u32 signature_end; /* must be CPER_SIG_END */
214 __u16 section_count;
215 __u32 error_severity;
216 __u32 validation_bits;
217 __u32 record_length;
218 __u64 timestamp;
219 uuid_le platform_id;
220 uuid_le partition_id;
221 uuid_le creator_id;
222 uuid_le notification_type;
223 __u64 record_id;
224 __u32 flags;
225 __u64 persistence_information;
226 __u8 reserved[12]; /* must be zero */
227};
228
229struct cper_section_descriptor {
230 __u32 section_offset; /* Offset in bytes of the
231 * section body from the base
232 * of the record header */
233 __u32 section_length;
234 __u16 revision; /* must be CPER_RECORD_REV */
235 __u8 validation_bits;
236 __u8 reserved; /* must be zero */
237 __u32 flags;
238 uuid_le section_type;
239 uuid_le fru_id;
240 __u32 section_severity;
241 __u8 fru_text[20];
242};
243
244/* Generic Processor Error Section */
245struct cper_sec_proc_generic {
246 __u64 validation_bits;
247 __u8 proc_type;
248 __u8 proc_isa;
249 __u8 proc_error_type;
250 __u8 operation;
251 __u8 flags;
252 __u8 level;
253 __u16 reserved;
254 __u64 cpu_version;
255 char cpu_brand[128];
256 __u64 proc_id;
257 __u64 target_addr;
258 __u64 requestor_id;
259 __u64 responder_id;
260 __u64 ip;
261};
262
263/* IA32/X64 Processor Error Section */
264struct cper_sec_proc_ia {
265 __u64 validation_bits;
266 __u8 lapic_id;
267 __u8 cpuid[48];
268};
269
270/* IA32/X64 Processor Error Infomation Structure */
271struct cper_ia_err_info {
272 uuid_le err_type;
273 __u64 validation_bits;
274 __u64 check_info;
275 __u64 target_id;
276 __u64 requestor_id;
277 __u64 responder_id;
278 __u64 ip;
279};
280
281/* IA32/X64 Processor Context Information Structure */
282struct cper_ia_proc_ctx {
283 __u16 reg_ctx_type;
284 __u16 reg_arr_size;
285 __u32 msr_addr;
286 __u64 mm_reg_addr;
287};
288
289/* Memory Error Section */
290struct cper_sec_mem_err {
291 __u64 validation_bits;
292 __u64 error_status;
293 __u64 physical_addr;
294 __u64 physical_addr_mask;
295 __u16 node;
296 __u16 card;
297 __u16 module;
298 __u16 bank;
299 __u16 device;
300 __u16 row;
301 __u16 column;
302 __u16 bit_pos;
303 __u64 requestor_id;
304 __u64 responder_id;
305 __u64 target_id;
306 __u8 error_type;
307};
308
309/* Reset to default packing */
310#pragma pack()
311
312u64 cper_next_record_id(void);
313
314#endif
diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h
index dcf77fa826b5..55215cce5005 100644
--- a/include/linux/cpuidle.h
+++ b/include/linux/cpuidle.h
@@ -125,6 +125,7 @@ struct cpuidle_driver {
125#ifdef CONFIG_CPU_IDLE 125#ifdef CONFIG_CPU_IDLE
126 126
127extern int cpuidle_register_driver(struct cpuidle_driver *drv); 127extern int cpuidle_register_driver(struct cpuidle_driver *drv);
128struct cpuidle_driver *cpuidle_get_driver(void);
128extern void cpuidle_unregister_driver(struct cpuidle_driver *drv); 129extern void cpuidle_unregister_driver(struct cpuidle_driver *drv);
129extern int cpuidle_register_device(struct cpuidle_device *dev); 130extern int cpuidle_register_device(struct cpuidle_device *dev);
130extern void cpuidle_unregister_device(struct cpuidle_device *dev); 131extern void cpuidle_unregister_device(struct cpuidle_device *dev);
@@ -137,16 +138,17 @@ extern void cpuidle_disable_device(struct cpuidle_device *dev);
137#else 138#else
138 139
139static inline int cpuidle_register_driver(struct cpuidle_driver *drv) 140static inline int cpuidle_register_driver(struct cpuidle_driver *drv)
140{return 0;} 141{return -ENODEV; }
142static inline struct cpuidle_driver *cpuidle_get_driver(void) {return NULL; }
141static inline void cpuidle_unregister_driver(struct cpuidle_driver *drv) { } 143static inline void cpuidle_unregister_driver(struct cpuidle_driver *drv) { }
142static inline int cpuidle_register_device(struct cpuidle_device *dev) 144static inline int cpuidle_register_device(struct cpuidle_device *dev)
143{return 0;} 145{return -ENODEV; }
144static inline void cpuidle_unregister_device(struct cpuidle_device *dev) { } 146static inline void cpuidle_unregister_device(struct cpuidle_device *dev) { }
145 147
146static inline void cpuidle_pause_and_lock(void) { } 148static inline void cpuidle_pause_and_lock(void) { }
147static inline void cpuidle_resume_and_unlock(void) { } 149static inline void cpuidle_resume_and_unlock(void) { }
148static inline int cpuidle_enable_device(struct cpuidle_device *dev) 150static inline int cpuidle_enable_device(struct cpuidle_device *dev)
149{return 0;} 151{return -ENODEV; }
150static inline void cpuidle_disable_device(struct cpuidle_device *dev) { } 152static inline void cpuidle_disable_device(struct cpuidle_device *dev) { }
151 153
152#endif 154#endif
diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h
index 20b51cab6593..457ed765a116 100644
--- a/include/linux/cpuset.h
+++ b/include/linux/cpuset.h
@@ -69,6 +69,7 @@ extern void cpuset_task_status_allowed(struct seq_file *m,
69 struct task_struct *task); 69 struct task_struct *task);
70 70
71extern int cpuset_mem_spread_node(void); 71extern int cpuset_mem_spread_node(void);
72extern int cpuset_slab_spread_node(void);
72 73
73static inline int cpuset_do_page_mem_spread(void) 74static inline int cpuset_do_page_mem_spread(void)
74{ 75{
@@ -194,6 +195,11 @@ static inline int cpuset_mem_spread_node(void)
194 return 0; 195 return 0;
195} 196}
196 197
198static inline int cpuset_slab_spread_node(void)
199{
200 return 0;
201}
202
197static inline int cpuset_do_page_mem_spread(void) 203static inline int cpuset_do_page_mem_spread(void)
198{ 204{
199 return 0; 205 return 0;
diff --git a/include/linux/cred.h b/include/linux/cred.h
index 52507c3e1387..75c0fa881308 100644
--- a/include/linux/cred.h
+++ b/include/linux/cred.h
@@ -156,7 +156,6 @@ extern int copy_creds(struct task_struct *, unsigned long);
156extern struct cred *cred_alloc_blank(void); 156extern struct cred *cred_alloc_blank(void);
157extern struct cred *prepare_creds(void); 157extern struct cred *prepare_creds(void);
158extern struct cred *prepare_exec_creds(void); 158extern struct cred *prepare_exec_creds(void);
159extern struct cred *prepare_usermodehelper_creds(void);
160extern int commit_creds(struct cred *); 159extern int commit_creds(struct cred *);
161extern void abort_creds(struct cred *); 160extern void abort_creds(struct cred *);
162extern const struct cred *override_creds(const struct cred *); 161extern const struct cred *override_creds(const struct cred *);
diff --git a/include/linux/debugfs.h b/include/linux/debugfs.h
index fc1b930f246c..e7d9b20ddc5b 100644
--- a/include/linux/debugfs.h
+++ b/include/linux/debugfs.h
@@ -63,6 +63,8 @@ struct dentry *debugfs_create_x16(const char *name, mode_t mode,
63 struct dentry *parent, u16 *value); 63 struct dentry *parent, u16 *value);
64struct dentry *debugfs_create_x32(const char *name, mode_t mode, 64struct dentry *debugfs_create_x32(const char *name, mode_t mode,
65 struct dentry *parent, u32 *value); 65 struct dentry *parent, u32 *value);
66struct dentry *debugfs_create_x64(const char *name, mode_t mode,
67 struct dentry *parent, u64 *value);
66struct dentry *debugfs_create_size_t(const char *name, mode_t mode, 68struct dentry *debugfs_create_size_t(const char *name, mode_t mode,
67 struct dentry *parent, size_t *value); 69 struct dentry *parent, size_t *value);
68struct dentry *debugfs_create_bool(const char *name, mode_t mode, 70struct dentry *debugfs_create_bool(const char *name, mode_t mode,
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
index ca32ed78b057..89b7e1a605b8 100644
--- a/include/linux/dma-mapping.h
+++ b/include/linux/dma-mapping.h
@@ -40,16 +40,6 @@ struct dma_map_ops {
40 void (*sync_single_for_device)(struct device *dev, 40 void (*sync_single_for_device)(struct device *dev,
41 dma_addr_t dma_handle, size_t size, 41 dma_addr_t dma_handle, size_t size,
42 enum dma_data_direction dir); 42 enum dma_data_direction dir);
43 void (*sync_single_range_for_cpu)(struct device *dev,
44 dma_addr_t dma_handle,
45 unsigned long offset,
46 size_t size,
47 enum dma_data_direction dir);
48 void (*sync_single_range_for_device)(struct device *dev,
49 dma_addr_t dma_handle,
50 unsigned long offset,
51 size_t size,
52 enum dma_data_direction dir);
53 void (*sync_sg_for_cpu)(struct device *dev, 43 void (*sync_sg_for_cpu)(struct device *dev,
54 struct scatterlist *sg, int nents, 44 struct scatterlist *sg, int nents,
55 enum dma_data_direction dir); 45 enum dma_data_direction dir);
@@ -105,21 +95,6 @@ static inline int is_device_dma_capable(struct device *dev)
105#include <asm-generic/dma-mapping-broken.h> 95#include <asm-generic/dma-mapping-broken.h>
106#endif 96#endif
107 97
108/* for backwards compatibility, removed soon */
109static inline void __deprecated dma_sync_single(struct device *dev,
110 dma_addr_t addr, size_t size,
111 enum dma_data_direction dir)
112{
113 dma_sync_single_for_cpu(dev, addr, size, dir);
114}
115
116static inline void __deprecated dma_sync_sg(struct device *dev,
117 struct scatterlist *sg, int nelems,
118 enum dma_data_direction dir)
119{
120 dma_sync_sg_for_cpu(dev, sg, nelems, dir);
121}
122
123static inline u64 dma_get_mask(struct device *dev) 98static inline u64 dma_get_mask(struct device *dev)
124{ 99{
125 if (dev && dev->dma_mask && *dev->dma_mask) 100 if (dev && dev->dma_mask && *dev->dma_mask)
diff --git a/include/linux/ext3_fs.h b/include/linux/ext3_fs.h
index 5f494b465097..7fc62d4550b2 100644
--- a/include/linux/ext3_fs.h
+++ b/include/linux/ext3_fs.h
@@ -868,7 +868,7 @@ extern int ext3_htree_store_dirent(struct file *dir_file, __u32 hash,
868extern void ext3_htree_free_dir_info(struct dir_private_info *p); 868extern void ext3_htree_free_dir_info(struct dir_private_info *p);
869 869
870/* fsync.c */ 870/* fsync.c */
871extern int ext3_sync_file (struct file *, struct dentry *, int); 871extern int ext3_sync_file(struct file *, int);
872 872
873/* hash.c */ 873/* hash.c */
874extern int ext3fs_dirhash(const char *name, int len, struct 874extern int ext3fs_dirhash(const char *name, int len, struct
diff --git a/include/linux/fb.h b/include/linux/fb.h
index f3793ebc241c..907ace3a64c8 100644
--- a/include/linux/fb.h
+++ b/include/linux/fb.h
@@ -4,8 +4,6 @@
4#include <linux/types.h> 4#include <linux/types.h>
5#include <linux/i2c.h> 5#include <linux/i2c.h>
6 6
7struct dentry;
8
9/* Definitions of frame buffers */ 7/* Definitions of frame buffers */
10 8
11#define FB_MAX 32 /* sufficient for now */ 9#define FB_MAX 32 /* sufficient for now */
@@ -1017,8 +1015,7 @@ extern void fb_deferred_io_open(struct fb_info *info,
1017 struct inode *inode, 1015 struct inode *inode,
1018 struct file *file); 1016 struct file *file);
1019extern void fb_deferred_io_cleanup(struct fb_info *info); 1017extern void fb_deferred_io_cleanup(struct fb_info *info);
1020extern int fb_deferred_io_fsync(struct file *file, struct dentry *dentry, 1018extern int fb_deferred_io_fsync(struct file *file, int datasync);
1021 int datasync);
1022 1019
1023static inline bool fb_be_math(struct fb_info *info) 1020static inline bool fb_be_math(struct fb_info *info)
1024{ 1021{
diff --git a/include/linux/file.h b/include/linux/file.h
index 5555508fd517..b1e12970f617 100644
--- a/include/linux/file.h
+++ b/include/linux/file.h
@@ -11,7 +11,6 @@
11 11
12struct file; 12struct file;
13 13
14extern void __fput(struct file *);
15extern void fput(struct file *); 14extern void fput(struct file *);
16extern void drop_file_write_access(struct file *file); 15extern void drop_file_write_access(struct file *file);
17 16
diff --git a/include/linux/firewire.h b/include/linux/firewire.h
index 4bd94bf5e739..72e2b8ac2a5a 100644
--- a/include/linux/firewire.h
+++ b/include/linux/firewire.h
@@ -55,13 +55,11 @@
55#define CSR_DESCRIPTOR 0x01 55#define CSR_DESCRIPTOR 0x01
56#define CSR_VENDOR 0x03 56#define CSR_VENDOR 0x03
57#define CSR_HARDWARE_VERSION 0x04 57#define CSR_HARDWARE_VERSION 0x04
58#define CSR_NODE_CAPABILITIES 0x0c
59#define CSR_UNIT 0x11 58#define CSR_UNIT 0x11
60#define CSR_SPECIFIER_ID 0x12 59#define CSR_SPECIFIER_ID 0x12
61#define CSR_VERSION 0x13 60#define CSR_VERSION 0x13
62#define CSR_DEPENDENT_INFO 0x14 61#define CSR_DEPENDENT_INFO 0x14
63#define CSR_MODEL 0x17 62#define CSR_MODEL 0x17
64#define CSR_INSTANCE 0x18
65#define CSR_DIRECTORY_ID 0x20 63#define CSR_DIRECTORY_ID 0x20
66 64
67struct fw_csr_iterator { 65struct fw_csr_iterator {
@@ -89,7 +87,6 @@ struct fw_card {
89 int current_tlabel; 87 int current_tlabel;
90 u64 tlabel_mask; 88 u64 tlabel_mask;
91 struct list_head transaction_list; 89 struct list_head transaction_list;
92 struct timer_list flush_timer;
93 unsigned long reset_jiffies; 90 unsigned long reset_jiffies;
94 91
95 unsigned long long guid; 92 unsigned long long guid;
@@ -290,6 +287,8 @@ struct fw_transaction {
290 int tlabel; 287 int tlabel;
291 int timestamp; 288 int timestamp;
292 struct list_head link; 289 struct list_head link;
290 struct fw_card *card;
291 struct timer_list split_timeout_timer;
293 292
294 struct fw_packet packet; 293 struct fw_packet packet;
295 294
diff --git a/include/linux/fs.h b/include/linux/fs.h
index b336cb9ca9a0..3428393942a6 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -954,6 +954,7 @@ extern spinlock_t files_lock;
954#define file_list_unlock() spin_unlock(&files_lock); 954#define file_list_unlock() spin_unlock(&files_lock);
955 955
956#define get_file(x) atomic_long_inc(&(x)->f_count) 956#define get_file(x) atomic_long_inc(&(x)->f_count)
957#define fput_atomic(x) atomic_long_add_unless(&(x)->f_count, -1, 1)
957#define file_count(x) atomic_long_read(&(x)->f_count) 958#define file_count(x) atomic_long_read(&(x)->f_count)
958 959
959#ifdef CONFIG_DEBUG_WRITECOUNT 960#ifdef CONFIG_DEBUG_WRITECOUNT
@@ -1497,7 +1498,7 @@ struct file_operations {
1497 int (*open) (struct inode *, struct file *); 1498 int (*open) (struct inode *, struct file *);
1498 int (*flush) (struct file *, fl_owner_t id); 1499 int (*flush) (struct file *, fl_owner_t id);
1499 int (*release) (struct inode *, struct file *); 1500 int (*release) (struct inode *, struct file *);
1500 int (*fsync) (struct file *, struct dentry *, int datasync); 1501 int (*fsync) (struct file *, int datasync);
1501 int (*aio_fsync) (struct kiocb *, int datasync); 1502 int (*aio_fsync) (struct kiocb *, int datasync);
1502 int (*fasync) (int, struct file *, int); 1503 int (*fasync) (int, struct file *, int);
1503 int (*lock) (struct file *, int, struct file_lock *); 1504 int (*lock) (struct file *, int, struct file_lock *);
@@ -2212,7 +2213,7 @@ extern int generic_segment_checks(const struct iovec *iov,
2212/* fs/block_dev.c */ 2213/* fs/block_dev.c */
2213extern ssize_t blkdev_aio_write(struct kiocb *iocb, const struct iovec *iov, 2214extern ssize_t blkdev_aio_write(struct kiocb *iocb, const struct iovec *iov,
2214 unsigned long nr_segs, loff_t pos); 2215 unsigned long nr_segs, loff_t pos);
2215extern int blkdev_fsync(struct file *filp, struct dentry *dentry, int datasync); 2216extern int blkdev_fsync(struct file *filp, int datasync);
2216 2217
2217/* fs/splice.c */ 2218/* fs/splice.c */
2218extern ssize_t generic_file_splice_read(struct file *, loff_t *, 2219extern ssize_t generic_file_splice_read(struct file *, loff_t *,
@@ -2228,6 +2229,7 @@ extern long do_splice_direct(struct file *in, loff_t *ppos, struct file *out,
2228 2229
2229extern void 2230extern void
2230file_ra_state_init(struct file_ra_state *ra, struct address_space *mapping); 2231file_ra_state_init(struct file_ra_state *ra, struct address_space *mapping);
2232extern loff_t noop_llseek(struct file *file, loff_t offset, int origin);
2231extern loff_t no_llseek(struct file *file, loff_t offset, int origin); 2233extern loff_t no_llseek(struct file *file, loff_t offset, int origin);
2232extern loff_t generic_file_llseek(struct file *file, loff_t offset, int origin); 2234extern loff_t generic_file_llseek(struct file *file, loff_t offset, int origin);
2233extern loff_t generic_file_llseek_unlocked(struct file *file, loff_t offset, 2235extern loff_t generic_file_llseek_unlocked(struct file *file, loff_t offset,
@@ -2250,10 +2252,19 @@ static inline int xip_truncate_page(struct address_space *mapping, loff_t from)
2250#endif 2252#endif
2251 2253
2252#ifdef CONFIG_BLOCK 2254#ifdef CONFIG_BLOCK
2255struct bio;
2256typedef void (dio_submit_t)(int rw, struct bio *bio, struct inode *inode,
2257 loff_t file_offset);
2258void dio_end_io(struct bio *bio, int error);
2259
2260ssize_t __blockdev_direct_IO_newtrunc(int rw, struct kiocb *iocb, struct inode *inode,
2261 struct block_device *bdev, const struct iovec *iov, loff_t offset,
2262 unsigned long nr_segs, get_block_t get_block, dio_iodone_t end_io,
2263 dio_submit_t submit_io, int lock_type);
2253ssize_t __blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode, 2264ssize_t __blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
2254 struct block_device *bdev, const struct iovec *iov, loff_t offset, 2265 struct block_device *bdev, const struct iovec *iov, loff_t offset,
2255 unsigned long nr_segs, get_block_t get_block, dio_iodone_t end_io, 2266 unsigned long nr_segs, get_block_t get_block, dio_iodone_t end_io,
2256 int lock_type); 2267 dio_submit_t submit_io, int lock_type);
2257 2268
2258enum { 2269enum {
2259 /* need locking between buffered and direct access */ 2270 /* need locking between buffered and direct access */
@@ -2263,13 +2274,31 @@ enum {
2263 DIO_SKIP_HOLES = 0x02, 2274 DIO_SKIP_HOLES = 0x02,
2264}; 2275};
2265 2276
2277static inline ssize_t blockdev_direct_IO_newtrunc(int rw, struct kiocb *iocb,
2278 struct inode *inode, struct block_device *bdev, const struct iovec *iov,
2279 loff_t offset, unsigned long nr_segs, get_block_t get_block,
2280 dio_iodone_t end_io)
2281{
2282 return __blockdev_direct_IO_newtrunc(rw, iocb, inode, bdev, iov, offset,
2283 nr_segs, get_block, end_io, NULL,
2284 DIO_LOCKING | DIO_SKIP_HOLES);
2285}
2286
2287static inline ssize_t blockdev_direct_IO_no_locking_newtrunc(int rw, struct kiocb *iocb,
2288 struct inode *inode, struct block_device *bdev, const struct iovec *iov,
2289 loff_t offset, unsigned long nr_segs, get_block_t get_block,
2290 dio_iodone_t end_io)
2291{
2292 return __blockdev_direct_IO_newtrunc(rw, iocb, inode, bdev, iov, offset,
2293 nr_segs, get_block, end_io, NULL, 0);
2294}
2266static inline ssize_t blockdev_direct_IO(int rw, struct kiocb *iocb, 2295static inline ssize_t blockdev_direct_IO(int rw, struct kiocb *iocb,
2267 struct inode *inode, struct block_device *bdev, const struct iovec *iov, 2296 struct inode *inode, struct block_device *bdev, const struct iovec *iov,
2268 loff_t offset, unsigned long nr_segs, get_block_t get_block, 2297 loff_t offset, unsigned long nr_segs, get_block_t get_block,
2269 dio_iodone_t end_io) 2298 dio_iodone_t end_io)
2270{ 2299{
2271 return __blockdev_direct_IO(rw, iocb, inode, bdev, iov, offset, 2300 return __blockdev_direct_IO(rw, iocb, inode, bdev, iov, offset,
2272 nr_segs, get_block, end_io, 2301 nr_segs, get_block, end_io, NULL,
2273 DIO_LOCKING | DIO_SKIP_HOLES); 2302 DIO_LOCKING | DIO_SKIP_HOLES);
2274} 2303}
2275 2304
@@ -2279,7 +2308,7 @@ static inline ssize_t blockdev_direct_IO_no_locking(int rw, struct kiocb *iocb,
2279 dio_iodone_t end_io) 2308 dio_iodone_t end_io)
2280{ 2309{
2281 return __blockdev_direct_IO(rw, iocb, inode, bdev, iov, offset, 2310 return __blockdev_direct_IO(rw, iocb, inode, bdev, iov, offset,
2282 nr_segs, get_block, end_io, 0); 2311 nr_segs, get_block, end_io, NULL, 0);
2283} 2312}
2284#endif 2313#endif
2285 2314
@@ -2335,13 +2364,15 @@ extern int dcache_dir_open(struct inode *, struct file *);
2335extern int dcache_dir_close(struct inode *, struct file *); 2364extern int dcache_dir_close(struct inode *, struct file *);
2336extern loff_t dcache_dir_lseek(struct file *, loff_t, int); 2365extern loff_t dcache_dir_lseek(struct file *, loff_t, int);
2337extern int dcache_readdir(struct file *, void *, filldir_t); 2366extern int dcache_readdir(struct file *, void *, filldir_t);
2367extern int simple_setattr(struct dentry *, struct iattr *);
2338extern int simple_getattr(struct vfsmount *, struct dentry *, struct kstat *); 2368extern int simple_getattr(struct vfsmount *, struct dentry *, struct kstat *);
2339extern int simple_statfs(struct dentry *, struct kstatfs *); 2369extern int simple_statfs(struct dentry *, struct kstatfs *);
2340extern int simple_link(struct dentry *, struct inode *, struct dentry *); 2370extern int simple_link(struct dentry *, struct inode *, struct dentry *);
2341extern int simple_unlink(struct inode *, struct dentry *); 2371extern int simple_unlink(struct inode *, struct dentry *);
2342extern int simple_rmdir(struct inode *, struct dentry *); 2372extern int simple_rmdir(struct inode *, struct dentry *);
2343extern int simple_rename(struct inode *, struct dentry *, struct inode *, struct dentry *); 2373extern int simple_rename(struct inode *, struct dentry *, struct inode *, struct dentry *);
2344extern int simple_sync_file(struct file *, struct dentry *, int); 2374extern int simple_setsize(struct inode *, loff_t);
2375extern int noop_fsync(struct file *, int);
2345extern int simple_empty(struct dentry *); 2376extern int simple_empty(struct dentry *);
2346extern int simple_readpage(struct file *file, struct page *page); 2377extern int simple_readpage(struct file *file, struct page *page);
2347extern int simple_write_begin(struct file *file, struct address_space *mapping, 2378extern int simple_write_begin(struct file *file, struct address_space *mapping,
@@ -2366,7 +2397,7 @@ extern ssize_t simple_read_from_buffer(void __user *to, size_t count,
2366extern ssize_t simple_write_to_buffer(void *to, size_t available, loff_t *ppos, 2397extern ssize_t simple_write_to_buffer(void *to, size_t available, loff_t *ppos,
2367 const void __user *from, size_t count); 2398 const void __user *from, size_t count);
2368 2399
2369extern int simple_fsync(struct file *, struct dentry *, int); 2400extern int generic_file_fsync(struct file *, int);
2370 2401
2371#ifdef CONFIG_MIGRATION 2402#ifdef CONFIG_MIGRATION
2372extern int buffer_migrate_page(struct address_space *, 2403extern int buffer_migrate_page(struct address_space *,
@@ -2377,7 +2408,8 @@ extern int buffer_migrate_page(struct address_space *,
2377 2408
2378extern int inode_change_ok(const struct inode *, struct iattr *); 2409extern int inode_change_ok(const struct inode *, struct iattr *);
2379extern int inode_newsize_ok(const struct inode *, loff_t offset); 2410extern int inode_newsize_ok(const struct inode *, loff_t offset);
2380extern int __must_check inode_setattr(struct inode *, struct iattr *); 2411extern int __must_check inode_setattr(struct inode *, const struct iattr *);
2412extern void generic_setattr(struct inode *inode, const struct iattr *attr);
2381 2413
2382extern void file_update_time(struct file *file); 2414extern void file_update_time(struct file *file);
2383 2415
diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h
index c082f223e2fe..3167f2df4126 100644
--- a/include/linux/ftrace_event.h
+++ b/include/linux/ftrace_event.h
@@ -73,18 +73,25 @@ struct trace_iterator {
73}; 73};
74 74
75 75
76struct trace_event;
77
76typedef enum print_line_t (*trace_print_func)(struct trace_iterator *iter, 78typedef enum print_line_t (*trace_print_func)(struct trace_iterator *iter,
77 int flags); 79 int flags, struct trace_event *event);
78struct trace_event { 80
79 struct hlist_node node; 81struct trace_event_functions {
80 struct list_head list;
81 int type;
82 trace_print_func trace; 82 trace_print_func trace;
83 trace_print_func raw; 83 trace_print_func raw;
84 trace_print_func hex; 84 trace_print_func hex;
85 trace_print_func binary; 85 trace_print_func binary;
86}; 86};
87 87
88struct trace_event {
89 struct hlist_node node;
90 struct list_head list;
91 int type;
92 struct trace_event_functions *funcs;
93};
94
88extern int register_ftrace_event(struct trace_event *event); 95extern int register_ftrace_event(struct trace_event *event);
89extern int unregister_ftrace_event(struct trace_event *event); 96extern int unregister_ftrace_event(struct trace_event *event);
90 97
@@ -116,28 +123,70 @@ void tracing_record_cmdline(struct task_struct *tsk);
116 123
117struct event_filter; 124struct event_filter;
118 125
126enum trace_reg {
127 TRACE_REG_REGISTER,
128 TRACE_REG_UNREGISTER,
129 TRACE_REG_PERF_REGISTER,
130 TRACE_REG_PERF_UNREGISTER,
131};
132
133struct ftrace_event_call;
134
135struct ftrace_event_class {
136 char *system;
137 void *probe;
138#ifdef CONFIG_PERF_EVENTS
139 void *perf_probe;
140#endif
141 int (*reg)(struct ftrace_event_call *event,
142 enum trace_reg type);
143 int (*define_fields)(struct ftrace_event_call *);
144 struct list_head *(*get_fields)(struct ftrace_event_call *);
145 struct list_head fields;
146 int (*raw_init)(struct ftrace_event_call *);
147};
148
149enum {
150 TRACE_EVENT_FL_ENABLED_BIT,
151 TRACE_EVENT_FL_FILTERED_BIT,
152};
153
154enum {
155 TRACE_EVENT_FL_ENABLED = (1 << TRACE_EVENT_FL_ENABLED_BIT),
156 TRACE_EVENT_FL_FILTERED = (1 << TRACE_EVENT_FL_FILTERED_BIT),
157};
158
119struct ftrace_event_call { 159struct ftrace_event_call {
120 struct list_head list; 160 struct list_head list;
161 struct ftrace_event_class *class;
121 char *name; 162 char *name;
122 char *system;
123 struct dentry *dir; 163 struct dentry *dir;
124 struct trace_event *event; 164 struct trace_event event;
125 int enabled;
126 int (*regfunc)(struct ftrace_event_call *);
127 void (*unregfunc)(struct ftrace_event_call *);
128 int id;
129 const char *print_fmt; 165 const char *print_fmt;
130 int (*raw_init)(struct ftrace_event_call *);
131 int (*define_fields)(struct ftrace_event_call *);
132 struct list_head fields;
133 int filter_active;
134 struct event_filter *filter; 166 struct event_filter *filter;
135 void *mod; 167 void *mod;
136 void *data; 168 void *data;
137 169
170 /*
171 * 32 bit flags:
172 * bit 1: enabled
173 * bit 2: filter_active
174 *
175 * Changes to flags must hold the event_mutex.
176 *
177 * Note: Reads of flags do not hold the event_mutex since
178 * they occur in critical sections. But the way flags
179 * is currently used, these changes do no affect the code
180 * except that when a change is made, it may have a slight
181 * delay in propagating the changes to other CPUs due to
182 * caching and such.
183 */
184 unsigned int flags;
185
186#ifdef CONFIG_PERF_EVENTS
138 int perf_refcount; 187 int perf_refcount;
139 int (*perf_event_enable)(struct ftrace_event_call *); 188 struct hlist_head *perf_events;
140 void (*perf_event_disable)(struct ftrace_event_call *); 189#endif
141}; 190};
142 191
143#define PERF_MAX_TRACE_SIZE 2048 192#define PERF_MAX_TRACE_SIZE 2048
@@ -194,24 +243,22 @@ struct perf_event;
194 243
195DECLARE_PER_CPU(struct pt_regs, perf_trace_regs); 244DECLARE_PER_CPU(struct pt_regs, perf_trace_regs);
196 245
197extern int perf_trace_enable(int event_id); 246extern int perf_trace_init(struct perf_event *event);
198extern void perf_trace_disable(int event_id); 247extern void perf_trace_destroy(struct perf_event *event);
199extern int ftrace_profile_set_filter(struct perf_event *event, int event_id, 248extern int perf_trace_enable(struct perf_event *event);
249extern void perf_trace_disable(struct perf_event *event);
250extern int ftrace_profile_set_filter(struct perf_event *event, int event_id,
200 char *filter_str); 251 char *filter_str);
201extern void ftrace_profile_free_filter(struct perf_event *event); 252extern void ftrace_profile_free_filter(struct perf_event *event);
202extern void * 253extern void *perf_trace_buf_prepare(int size, unsigned short type,
203perf_trace_buf_prepare(int size, unsigned short type, int *rctxp, 254 struct pt_regs *regs, int *rctxp);
204 unsigned long *irq_flags);
205 255
206static inline void 256static inline void
207perf_trace_buf_submit(void *raw_data, int size, int rctx, u64 addr, 257perf_trace_buf_submit(void *raw_data, int size, int rctx, u64 addr,
208 u64 count, unsigned long irq_flags, struct pt_regs *regs) 258 u64 count, struct pt_regs *regs, void *head)
209{ 259{
210 struct trace_entry *entry = raw_data; 260 perf_tp_event(addr, count, raw_data, size, regs, head);
211
212 perf_tp_event(entry->type, addr, count, raw_data, size, regs);
213 perf_swevent_put_recursion_context(rctx); 261 perf_swevent_put_recursion_context(rctx);
214 local_irq_restore(irq_flags);
215} 262}
216#endif 263#endif
217 264
diff --git a/include/linux/fuse.h b/include/linux/fuse.h
index 3e2925a34bf0..88e0eb596919 100644
--- a/include/linux/fuse.h
+++ b/include/linux/fuse.h
@@ -34,6 +34,9 @@
34 * 7.13 34 * 7.13
35 * - make max number of background requests and congestion threshold 35 * - make max number of background requests and congestion threshold
36 * tunables 36 * tunables
37 *
38 * 7.14
39 * - add splice support to fuse device
37 */ 40 */
38 41
39#ifndef _LINUX_FUSE_H 42#ifndef _LINUX_FUSE_H
@@ -65,7 +68,7 @@
65#define FUSE_KERNEL_VERSION 7 68#define FUSE_KERNEL_VERSION 7
66 69
67/** Minor version number of this interface */ 70/** Minor version number of this interface */
68#define FUSE_KERNEL_MINOR_VERSION 13 71#define FUSE_KERNEL_MINOR_VERSION 14
69 72
70/** The node ID of the root inode */ 73/** The node ID of the root inode */
71#define FUSE_ROOT_ID 1 74#define FUSE_ROOT_ID 1
diff --git a/include/linux/gpio.h b/include/linux/gpio.h
index 4e949a5b5b85..03f616b78cfa 100644
--- a/include/linux/gpio.h
+++ b/include/linux/gpio.h
@@ -51,6 +51,11 @@ static inline int gpio_direction_output(unsigned gpio, int value)
51 return -ENOSYS; 51 return -ENOSYS;
52} 52}
53 53
54static inline int gpio_set_debounce(unsigned gpio, unsigned debounce)
55{
56 return -ENOSYS;
57}
58
54static inline int gpio_get_value(unsigned gpio) 59static inline int gpio_get_value(unsigned gpio)
55{ 60{
56 /* GPIO can never have been requested or set as {in,out}put */ 61 /* GPIO can never have been requested or set as {in,out}put */
diff --git a/include/linux/i2c/adp8860.h b/include/linux/i2c/adp8860.h
new file mode 100644
index 000000000000..0b4d39855c91
--- /dev/null
+++ b/include/linux/i2c/adp8860.h
@@ -0,0 +1,154 @@
1/*
2 * Definitions and platform data for Analog Devices
3 * Backlight drivers ADP8860
4 *
5 * Copyright 2009-2010 Analog Devices Inc.
6 *
7 * Licensed under the GPL-2 or later.
8 */
9
10#ifndef __LINUX_I2C_ADP8860_H
11#define __LINUX_I2C_ADP8860_H
12
13#include <linux/leds.h>
14#include <linux/types.h>
15
16#define ID_ADP8860 8860
17
18#define ADP8860_MAX_BRIGHTNESS 0x7F
19#define FLAG_OFFT_SHIFT 8
20
21/*
22 * LEDs subdevice platform data
23 */
24
25#define ADP8860_LED_DIS_BLINK (0 << FLAG_OFFT_SHIFT)
26#define ADP8860_LED_OFFT_600ms (1 << FLAG_OFFT_SHIFT)
27#define ADP8860_LED_OFFT_1200ms (2 << FLAG_OFFT_SHIFT)
28#define ADP8860_LED_OFFT_1800ms (3 << FLAG_OFFT_SHIFT)
29
30#define ADP8860_LED_ONT_200ms 0
31#define ADP8860_LED_ONT_600ms 1
32#define ADP8860_LED_ONT_800ms 2
33#define ADP8860_LED_ONT_1200ms 3
34
35#define ADP8860_LED_D7 (7)
36#define ADP8860_LED_D6 (6)
37#define ADP8860_LED_D5 (5)
38#define ADP8860_LED_D4 (4)
39#define ADP8860_LED_D3 (3)
40#define ADP8860_LED_D2 (2)
41#define ADP8860_LED_D1 (1)
42
43/*
44 * Backlight subdevice platform data
45 */
46
47#define ADP8860_BL_D7 (1 << 6)
48#define ADP8860_BL_D6 (1 << 5)
49#define ADP8860_BL_D5 (1 << 4)
50#define ADP8860_BL_D4 (1 << 3)
51#define ADP8860_BL_D3 (1 << 2)
52#define ADP8860_BL_D2 (1 << 1)
53#define ADP8860_BL_D1 (1 << 0)
54
55#define ADP8860_FADE_T_DIS 0 /* Fade Timer Disabled */
56#define ADP8860_FADE_T_300ms 1 /* 0.3 Sec */
57#define ADP8860_FADE_T_600ms 2
58#define ADP8860_FADE_T_900ms 3
59#define ADP8860_FADE_T_1200ms 4
60#define ADP8860_FADE_T_1500ms 5
61#define ADP8860_FADE_T_1800ms 6
62#define ADP8860_FADE_T_2100ms 7
63#define ADP8860_FADE_T_2400ms 8
64#define ADP8860_FADE_T_2700ms 9
65#define ADP8860_FADE_T_3000ms 10
66#define ADP8860_FADE_T_3500ms 11
67#define ADP8860_FADE_T_4000ms 12
68#define ADP8860_FADE_T_4500ms 13
69#define ADP8860_FADE_T_5000ms 14
70#define ADP8860_FADE_T_5500ms 15 /* 5.5 Sec */
71
72#define ADP8860_FADE_LAW_LINEAR 0
73#define ADP8860_FADE_LAW_SQUARE 1
74#define ADP8860_FADE_LAW_CUBIC1 2
75#define ADP8860_FADE_LAW_CUBIC2 3
76
77#define ADP8860_BL_AMBL_FILT_80ms 0 /* Light sensor filter time */
78#define ADP8860_BL_AMBL_FILT_160ms 1
79#define ADP8860_BL_AMBL_FILT_320ms 2
80#define ADP8860_BL_AMBL_FILT_640ms 3
81#define ADP8860_BL_AMBL_FILT_1280ms 4
82#define ADP8860_BL_AMBL_FILT_2560ms 5
83#define ADP8860_BL_AMBL_FILT_5120ms 6
84#define ADP8860_BL_AMBL_FILT_10240ms 7 /* 10.24 sec */
85
86/*
87 * Blacklight current 0..30mA
88 */
89#define ADP8860_BL_CUR_mA(I) ((I * 127) / 30)
90
91/*
92 * L2 comparator current 0..1106uA
93 */
94#define ADP8860_L2_COMP_CURR_uA(I) ((I * 255) / 1106)
95
96/*
97 * L3 comparator current 0..138uA
98 */
99#define ADP8860_L3_COMP_CURR_uA(I) ((I * 255) / 138)
100
101struct adp8860_backlight_platform_data {
102 u8 bl_led_assign; /* 1 = Backlight 0 = Individual LED */
103
104 u8 bl_fade_in; /* Backlight Fade-In Timer */
105 u8 bl_fade_out; /* Backlight Fade-Out Timer */
106 u8 bl_fade_law; /* fade-on/fade-off transfer characteristic */
107
108 u8 en_ambl_sens; /* 1 = enable ambient light sensor */
109 u8 abml_filt; /* Light sensor filter time */
110
111 u8 l1_daylight_max; /* use BL_CUR_mA(I) 0 <= I <= 30 mA */
112 u8 l1_daylight_dim; /* typ = 0, use BL_CUR_mA(I) 0 <= I <= 30 mA */
113 u8 l2_office_max; /* use BL_CUR_mA(I) 0 <= I <= 30 mA */
114 u8 l2_office_dim; /* typ = 0, use BL_CUR_mA(I) 0 <= I <= 30 mA */
115 u8 l3_dark_max; /* use BL_CUR_mA(I) 0 <= I <= 30 mA */
116 u8 l3_dark_dim; /* typ = 0, use BL_CUR_mA(I) 0 <= I <= 30 mA */
117
118 u8 l2_trip; /* use L2_COMP_CURR_uA(I) 0 <= I <= 1106 uA */
119 u8 l2_hyst; /* use L2_COMP_CURR_uA(I) 0 <= I <= 1106 uA */
120 u8 l3_trip; /* use L3_COMP_CURR_uA(I) 0 <= I <= 551 uA */
121 u8 l3_hyst; /* use L3_COMP_CURR_uA(I) 0 <= I <= 551 uA */
122
123 /**
124 * Independent Current Sinks / LEDS
125 * Sinks not assigned to the Backlight can be exposed to
126 * user space using the LEDS CLASS interface
127 */
128
129 int num_leds;
130 struct led_info *leds;
131 u8 led_fade_in; /* LED Fade-In Timer */
132 u8 led_fade_out; /* LED Fade-Out Timer */
133 u8 led_fade_law; /* fade-on/fade-off transfer characteristic */
134 u8 led_on_time;
135
136 /**
137 * Gain down disable. Setting this option does not allow the
138 * charge pump to switch to lower gains. NOT AVAILABLE on ADP8860
139 * 1 = the charge pump doesn't switch down in gain until all LEDs are 0.
140 * The charge pump switches up in gain as needed. This feature is
141 * useful if the ADP8863 charge pump is used to drive an external load.
142 * This feature must be used when utilizing small fly capacitors
143 * (0402 or smaller).
144 * 0 = the charge pump automatically switches up and down in gain.
145 * This provides optimal efficiency, but is not suitable for driving
146 * loads that are not connected through the ADP8863 diode drivers.
147 * Additionally, the charge pump fly capacitors should be low ESR
148 * and sized 0603 or greater.
149 */
150
151 u8 gdwn_dis;
152};
153
154#endif /* __LINUX_I2C_ADP8860_H */
diff --git a/include/linux/i2c/max732x.h b/include/linux/i2c/max732x.h
index e10336631c62..c04bac8bf2fe 100644
--- a/include/linux/i2c/max732x.h
+++ b/include/linux/i2c/max732x.h
@@ -7,6 +7,9 @@ struct max732x_platform_data {
7 /* number of the first GPIO */ 7 /* number of the first GPIO */
8 unsigned gpio_base; 8 unsigned gpio_base;
9 9
10 /* interrupt base */
11 int irq_base;
12
10 void *context; /* param to setup/teardown */ 13 void *context; /* param to setup/teardown */
11 14
12 int (*setup)(struct i2c_client *client, 15 int (*setup)(struct i2c_client *client,
diff --git a/include/linux/i2c/pca953x.h b/include/linux/i2c/pca953x.h
index d5c5a60c8a0b..139ba52667c8 100644
--- a/include/linux/i2c/pca953x.h
+++ b/include/linux/i2c/pca953x.h
@@ -24,7 +24,7 @@ struct pca953x_platform_data {
24 int (*teardown)(struct i2c_client *client, 24 int (*teardown)(struct i2c_client *client,
25 unsigned gpio, unsigned ngpio, 25 unsigned gpio, unsigned ngpio,
26 void *context); 26 void *context);
27 char **names; 27 const char *const *names;
28}; 28};
29 29
30#endif /* _LINUX_PCA953X_H */ 30#endif /* _LINUX_PCA953X_H */
diff --git a/include/linux/init_task.h b/include/linux/init_task.h
index 7996fc2c9ba9..2beaa13492be 100644
--- a/include/linux/init_task.h
+++ b/include/linux/init_task.h
@@ -16,7 +16,7 @@ extern struct files_struct init_files;
16extern struct fs_struct init_fs; 16extern struct fs_struct init_fs;
17 17
18#define INIT_SIGNALS(sig) { \ 18#define INIT_SIGNALS(sig) { \
19 .count = ATOMIC_INIT(1), \ 19 .nr_threads = 1, \
20 .wait_chldexit = __WAIT_QUEUE_HEAD_INITIALIZER(sig.wait_chldexit),\ 20 .wait_chldexit = __WAIT_QUEUE_HEAD_INITIALIZER(sig.wait_chldexit),\
21 .shared_pending = { \ 21 .shared_pending = { \
22 .list = LIST_HEAD_INIT(sig.shared_pending.list), \ 22 .list = LIST_HEAD_INIT(sig.shared_pending.list), \
@@ -35,7 +35,7 @@ extern struct nsproxy init_nsproxy;
35 35
36#define INIT_SIGHAND(sighand) { \ 36#define INIT_SIGHAND(sighand) { \
37 .count = ATOMIC_INIT(1), \ 37 .count = ATOMIC_INIT(1), \
38 .action = { { { .sa_handler = NULL, } }, }, \ 38 .action = { { { .sa_handler = SIG_DFL, } }, }, \
39 .siglock = __SPIN_LOCK_UNLOCKED(sighand.siglock), \ 39 .siglock = __SPIN_LOCK_UNLOCKED(sighand.siglock), \
40 .signalfd_wqh = __WAIT_QUEUE_HEAD_INITIALIZER(sighand.signalfd_wqh), \ 40 .signalfd_wqh = __WAIT_QUEUE_HEAD_INITIALIZER(sighand.signalfd_wqh), \
41} 41}
@@ -45,9 +45,9 @@ extern struct group_info init_groups;
45#define INIT_STRUCT_PID { \ 45#define INIT_STRUCT_PID { \
46 .count = ATOMIC_INIT(1), \ 46 .count = ATOMIC_INIT(1), \
47 .tasks = { \ 47 .tasks = { \
48 { .first = &init_task.pids[PIDTYPE_PID].node }, \ 48 { .first = NULL }, \
49 { .first = &init_task.pids[PIDTYPE_PGID].node }, \ 49 { .first = NULL }, \
50 { .first = &init_task.pids[PIDTYPE_SID].node }, \ 50 { .first = NULL }, \
51 }, \ 51 }, \
52 .level = 0, \ 52 .level = 0, \
53 .numbers = { { \ 53 .numbers = { { \
@@ -61,7 +61,7 @@ extern struct group_info init_groups;
61{ \ 61{ \
62 .node = { \ 62 .node = { \
63 .next = NULL, \ 63 .next = NULL, \
64 .pprev = &init_struct_pid.tasks[type].first, \ 64 .pprev = NULL, \
65 }, \ 65 }, \
66 .pid = &init_struct_pid, \ 66 .pid = &init_struct_pid, \
67} 67}
@@ -163,6 +163,7 @@ extern struct cred init_cred;
163 [PIDTYPE_PGID] = INIT_PID_LINK(PIDTYPE_PGID), \ 163 [PIDTYPE_PGID] = INIT_PID_LINK(PIDTYPE_PGID), \
164 [PIDTYPE_SID] = INIT_PID_LINK(PIDTYPE_SID), \ 164 [PIDTYPE_SID] = INIT_PID_LINK(PIDTYPE_SID), \
165 }, \ 165 }, \
166 .thread_group = LIST_HEAD_INIT(tsk.thread_group), \
166 .dirties = INIT_PROP_LOCAL_SINGLE(dirties), \ 167 .dirties = INIT_PROP_LOCAL_SINGLE(dirties), \
167 INIT_IDS \ 168 INIT_IDS \
168 INIT_PERF_EVENTS(tsk) \ 169 INIT_PERF_EVENTS(tsk) \
diff --git a/include/linux/input.h b/include/linux/input.h
index 83524e4f3290..6fcc9101beeb 100644
--- a/include/linux/input.h
+++ b/include/linux/input.h
@@ -1155,7 +1155,7 @@ struct input_dev {
1155 1155
1156 int sync; 1156 int sync;
1157 1157
1158 int abs[ABS_MAX + 1]; 1158 int abs[ABS_CNT];
1159 int rep[REP_MAX + 1]; 1159 int rep[REP_MAX + 1];
1160 1160
1161 unsigned long key[BITS_TO_LONGS(KEY_CNT)]; 1161 unsigned long key[BITS_TO_LONGS(KEY_CNT)];
@@ -1163,11 +1163,11 @@ struct input_dev {
1163 unsigned long snd[BITS_TO_LONGS(SND_CNT)]; 1163 unsigned long snd[BITS_TO_LONGS(SND_CNT)];
1164 unsigned long sw[BITS_TO_LONGS(SW_CNT)]; 1164 unsigned long sw[BITS_TO_LONGS(SW_CNT)];
1165 1165
1166 int absmax[ABS_MAX + 1]; 1166 int absmax[ABS_CNT];
1167 int absmin[ABS_MAX + 1]; 1167 int absmin[ABS_CNT];
1168 int absfuzz[ABS_MAX + 1]; 1168 int absfuzz[ABS_CNT];
1169 int absflat[ABS_MAX + 1]; 1169 int absflat[ABS_CNT];
1170 int absres[ABS_MAX + 1]; 1170 int absres[ABS_CNT];
1171 1171
1172 int (*open)(struct input_dev *dev); 1172 int (*open)(struct input_dev *dev);
1173 void (*close)(struct input_dev *dev); 1173 void (*close)(struct input_dev *dev);
diff --git a/include/linux/input/tps6507x-ts.h b/include/linux/input/tps6507x-ts.h
new file mode 100644
index 000000000000..ab1440313924
--- /dev/null
+++ b/include/linux/input/tps6507x-ts.h
@@ -0,0 +1,24 @@
1/* linux/i2c/tps6507x-ts.h
2 *
3 * Functions to access TPS65070 touch screen chip.
4 *
5 * Copyright (c) 2009 RidgeRun (todd.fischer@ridgerun.com)
6 *
7 *
8 * For licencing details see kernel-base/COPYING
9 */
10
11#ifndef __LINUX_I2C_TPS6507X_TS_H
12#define __LINUX_I2C_TPS6507X_TS_H
13
14/* Board specific touch screen initial values */
15struct touchscreen_init_data {
16 int poll_period; /* ms */
17 int vref; /* non-zero to leave vref on */
18 __u16 min_pressure; /* min reading to be treated as a touch */
19 __u16 vendor;
20 __u16 product;
21 __u16 version;
22};
23
24#endif /* __LINUX_I2C_TPS6507X_TS_H */
diff --git a/include/linux/joystick.h b/include/linux/joystick.h
index 9e20c29c1e14..47199b13e0eb 100644
--- a/include/linux/joystick.h
+++ b/include/linux/joystick.h
@@ -64,8 +64,8 @@ struct js_event {
64#define JSIOCSCORR _IOW('j', 0x21, struct js_corr) /* set correction values */ 64#define JSIOCSCORR _IOW('j', 0x21, struct js_corr) /* set correction values */
65#define JSIOCGCORR _IOR('j', 0x22, struct js_corr) /* get correction values */ 65#define JSIOCGCORR _IOR('j', 0x22, struct js_corr) /* get correction values */
66 66
67#define JSIOCSAXMAP _IOW('j', 0x31, __u8[ABS_MAX + 1]) /* set axis mapping */ 67#define JSIOCSAXMAP _IOW('j', 0x31, __u8[ABS_CNT]) /* set axis mapping */
68#define JSIOCGAXMAP _IOR('j', 0x32, __u8[ABS_MAX + 1]) /* get axis mapping */ 68#define JSIOCGAXMAP _IOR('j', 0x32, __u8[ABS_CNT]) /* get axis mapping */
69#define JSIOCSBTNMAP _IOW('j', 0x33, __u16[KEY_MAX - BTN_MISC + 1]) /* set button mapping */ 69#define JSIOCSBTNMAP _IOW('j', 0x33, __u16[KEY_MAX - BTN_MISC + 1]) /* set button mapping */
70#define JSIOCGBTNMAP _IOR('j', 0x34, __u16[KEY_MAX - BTN_MISC + 1]) /* get button mapping */ 70#define JSIOCGBTNMAP _IOR('j', 0x34, __u16[KEY_MAX - BTN_MISC + 1]) /* get button mapping */
71 71
diff --git a/include/linux/kmod.h b/include/linux/kmod.h
index facb27fe7de0..6efd7a78de6a 100644
--- a/include/linux/kmod.h
+++ b/include/linux/kmod.h
@@ -23,6 +23,7 @@
23#include <linux/stddef.h> 23#include <linux/stddef.h>
24#include <linux/errno.h> 24#include <linux/errno.h>
25#include <linux/compiler.h> 25#include <linux/compiler.h>
26#include <linux/workqueue.h>
26 27
27#define KMOD_PATH_LEN 256 28#define KMOD_PATH_LEN 256
28 29
@@ -45,19 +46,6 @@ static inline int request_module_nowait(const char *name, ...) { return -ENOSYS;
45 46
46struct key; 47struct key;
47struct file; 48struct file;
48struct subprocess_info;
49
50/* Allocate a subprocess_info structure */
51struct subprocess_info *call_usermodehelper_setup(char *path, char **argv,
52 char **envp, gfp_t gfp_mask);
53
54/* Set various pieces of state into the subprocess_info structure */
55void call_usermodehelper_setkeys(struct subprocess_info *info,
56 struct key *session_keyring);
57int call_usermodehelper_stdinpipe(struct subprocess_info *sub_info,
58 struct file **filp);
59void call_usermodehelper_setcleanup(struct subprocess_info *info,
60 void (*cleanup)(char **argv, char **envp));
61 49
62enum umh_wait { 50enum umh_wait {
63 UMH_NO_WAIT = -1, /* don't wait at all */ 51 UMH_NO_WAIT = -1, /* don't wait at all */
@@ -65,6 +53,29 @@ enum umh_wait {
65 UMH_WAIT_PROC = 1, /* wait for the process to complete */ 53 UMH_WAIT_PROC = 1, /* wait for the process to complete */
66}; 54};
67 55
56struct subprocess_info {
57 struct work_struct work;
58 struct completion *complete;
59 char *path;
60 char **argv;
61 char **envp;
62 enum umh_wait wait;
63 int retval;
64 int (*init)(struct subprocess_info *info);
65 void (*cleanup)(struct subprocess_info *info);
66 void *data;
67};
68
69/* Allocate a subprocess_info structure */
70struct subprocess_info *call_usermodehelper_setup(char *path, char **argv,
71 char **envp, gfp_t gfp_mask);
72
73/* Set various pieces of state into the subprocess_info structure */
74void call_usermodehelper_setfns(struct subprocess_info *info,
75 int (*init)(struct subprocess_info *info),
76 void (*cleanup)(struct subprocess_info *info),
77 void *data);
78
68/* Actually execute the sub-process */ 79/* Actually execute the sub-process */
69int call_usermodehelper_exec(struct subprocess_info *info, enum umh_wait wait); 80int call_usermodehelper_exec(struct subprocess_info *info, enum umh_wait wait);
70 81
@@ -73,38 +84,33 @@ int call_usermodehelper_exec(struct subprocess_info *info, enum umh_wait wait);
73void call_usermodehelper_freeinfo(struct subprocess_info *info); 84void call_usermodehelper_freeinfo(struct subprocess_info *info);
74 85
75static inline int 86static inline int
76call_usermodehelper(char *path, char **argv, char **envp, enum umh_wait wait) 87call_usermodehelper_fns(char *path, char **argv, char **envp,
88 enum umh_wait wait,
89 int (*init)(struct subprocess_info *info),
90 void (*cleanup)(struct subprocess_info *), void *data)
77{ 91{
78 struct subprocess_info *info; 92 struct subprocess_info *info;
79 gfp_t gfp_mask = (wait == UMH_NO_WAIT) ? GFP_ATOMIC : GFP_KERNEL; 93 gfp_t gfp_mask = (wait == UMH_NO_WAIT) ? GFP_ATOMIC : GFP_KERNEL;
80 94
81 info = call_usermodehelper_setup(path, argv, envp, gfp_mask); 95 info = call_usermodehelper_setup(path, argv, envp, gfp_mask);
96
82 if (info == NULL) 97 if (info == NULL)
83 return -ENOMEM; 98 return -ENOMEM;
99
100 call_usermodehelper_setfns(info, init, cleanup, data);
101
84 return call_usermodehelper_exec(info, wait); 102 return call_usermodehelper_exec(info, wait);
85} 103}
86 104
87static inline int 105static inline int
88call_usermodehelper_keys(char *path, char **argv, char **envp, 106call_usermodehelper(char *path, char **argv, char **envp, enum umh_wait wait)
89 struct key *session_keyring, enum umh_wait wait)
90{ 107{
91 struct subprocess_info *info; 108 return call_usermodehelper_fns(path, argv, envp, wait,
92 gfp_t gfp_mask = (wait == UMH_NO_WAIT) ? GFP_ATOMIC : GFP_KERNEL; 109 NULL, NULL, NULL);
93
94 info = call_usermodehelper_setup(path, argv, envp, gfp_mask);
95 if (info == NULL)
96 return -ENOMEM;
97
98 call_usermodehelper_setkeys(info, session_keyring);
99 return call_usermodehelper_exec(info, wait);
100} 110}
101 111
102extern void usermodehelper_init(void); 112extern void usermodehelper_init(void);
103 113
104struct file;
105extern int call_usermodehelper_pipe(char *path, char *argv[], char *envp[],
106 struct file **filp);
107
108extern int usermodehelper_disable(void); 114extern int usermodehelper_disable(void);
109extern void usermodehelper_enable(void); 115extern void usermodehelper_enable(void);
110 116
diff --git a/include/linux/lcd.h b/include/linux/lcd.h
index c67fecafff90..8877123f2d6e 100644
--- a/include/linux/lcd.h
+++ b/include/linux/lcd.h
@@ -69,6 +69,29 @@ struct lcd_device {
69 struct device dev; 69 struct device dev;
70}; 70};
71 71
72struct lcd_platform_data {
73 /* reset lcd panel device. */
74 int (*reset)(struct lcd_device *ld);
75 /* on or off to lcd panel. if 'enable' is 0 then
76 lcd power off and 1, lcd power on. */
77 int (*power_on)(struct lcd_device *ld, int enable);
78
79 /* it indicates whether lcd panel was enabled
80 from bootloader or not. */
81 int lcd_enabled;
82 /* it means delay for stable time when it becomes low to high
83 or high to low that is dependent on whether reset gpio is
84 low active or high active. */
85 unsigned int reset_delay;
86 /* stable time needing to become lcd power on. */
87 unsigned int power_on_delay;
88 /* stable time needing to become lcd power off. */
89 unsigned int power_off_delay;
90
91 /* it could be used for any purpose. */
92 void *pdata;
93};
94
72static inline void lcd_set_power(struct lcd_device *ld, int power) 95static inline void lcd_set_power(struct lcd_device *ld, int power)
73{ 96{
74 mutex_lock(&ld->update_lock); 97 mutex_lock(&ld->update_lock);
diff --git a/include/linux/leds.h b/include/linux/leds.h
index d8bf9665e70c..ba6986a11663 100644
--- a/include/linux/leds.h
+++ b/include/linux/leds.h
@@ -149,14 +149,18 @@ struct gpio_led {
149 unsigned default_state : 2; 149 unsigned default_state : 2;
150 /* default_state should be one of LEDS_GPIO_DEFSTATE_(ON|OFF|KEEP) */ 150 /* default_state should be one of LEDS_GPIO_DEFSTATE_(ON|OFF|KEEP) */
151}; 151};
152#define LEDS_GPIO_DEFSTATE_OFF 0 152#define LEDS_GPIO_DEFSTATE_OFF 0
153#define LEDS_GPIO_DEFSTATE_ON 1 153#define LEDS_GPIO_DEFSTATE_ON 1
154#define LEDS_GPIO_DEFSTATE_KEEP 2 154#define LEDS_GPIO_DEFSTATE_KEEP 2
155 155
156struct gpio_led_platform_data { 156struct gpio_led_platform_data {
157 int num_leds; 157 int num_leds;
158 struct gpio_led *leds; 158 struct gpio_led *leds;
159 int (*gpio_blink_set)(unsigned gpio, 159
160#define GPIO_LED_NO_BLINK_LOW 0 /* No blink GPIO state low */
161#define GPIO_LED_NO_BLINK_HIGH 1 /* No blink GPIO state high */
162#define GPIO_LED_BLINK 2 /* Plase, blink */
163 int (*gpio_blink_set)(unsigned gpio, int state,
160 unsigned long *delay_on, 164 unsigned long *delay_on,
161 unsigned long *delay_off); 165 unsigned long *delay_off);
162}; 166};
diff --git a/include/linux/libata.h b/include/linux/libata.h
index ee84e7e12039..3bad2701bfa6 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -386,6 +386,7 @@ enum {
386 ATA_HORKAGE_1_5_GBPS = (1 << 13), /* force 1.5 Gbps */ 386 ATA_HORKAGE_1_5_GBPS = (1 << 13), /* force 1.5 Gbps */
387 ATA_HORKAGE_NOSETXFER = (1 << 14), /* skip SETXFER, SATA only */ 387 ATA_HORKAGE_NOSETXFER = (1 << 14), /* skip SETXFER, SATA only */
388 ATA_HORKAGE_BROKEN_FPDMA_AA = (1 << 15), /* skip AA */ 388 ATA_HORKAGE_BROKEN_FPDMA_AA = (1 << 15), /* skip AA */
389 ATA_HORKAGE_DUMP_ID = (1 << 16), /* dump IDENTIFY data */
389 390
390 /* DMA mask for user DMA control: User visible values; DO NOT 391 /* DMA mask for user DMA control: User visible values; DO NOT
391 renumber */ 392 renumber */
@@ -513,7 +514,9 @@ struct ata_ioports {
513 void __iomem *command_addr; 514 void __iomem *command_addr;
514 void __iomem *altstatus_addr; 515 void __iomem *altstatus_addr;
515 void __iomem *ctl_addr; 516 void __iomem *ctl_addr;
517#ifdef CONFIG_ATA_BMDMA
516 void __iomem *bmdma_addr; 518 void __iomem *bmdma_addr;
519#endif /* CONFIG_ATA_BMDMA */
517 void __iomem *scr_addr; 520 void __iomem *scr_addr;
518}; 521};
519#endif /* CONFIG_ATA_SFF */ 522#endif /* CONFIG_ATA_SFF */
@@ -721,8 +724,10 @@ struct ata_port {
721 u8 ctl; /* cache of ATA control register */ 724 u8 ctl; /* cache of ATA control register */
722 u8 last_ctl; /* Cache last written value */ 725 u8 last_ctl; /* Cache last written value */
723 struct delayed_work sff_pio_task; 726 struct delayed_work sff_pio_task;
727#ifdef CONFIG_ATA_BMDMA
724 struct ata_bmdma_prd *bmdma_prd; /* BMDMA SG list */ 728 struct ata_bmdma_prd *bmdma_prd; /* BMDMA SG list */
725 dma_addr_t bmdma_prd_dma; /* and its DMA mapping */ 729 dma_addr_t bmdma_prd_dma; /* and its DMA mapping */
730#endif /* CONFIG_ATA_BMDMA */
726#endif /* CONFIG_ATA_SFF */ 731#endif /* CONFIG_ATA_SFF */
727 732
728 unsigned int pio_mask; 733 unsigned int pio_mask;
@@ -856,10 +861,12 @@ struct ata_port_operations {
856 void (*sff_irq_clear)(struct ata_port *); 861 void (*sff_irq_clear)(struct ata_port *);
857 void (*sff_drain_fifo)(struct ata_queued_cmd *qc); 862 void (*sff_drain_fifo)(struct ata_queued_cmd *qc);
858 863
864#ifdef CONFIG_ATA_BMDMA
859 void (*bmdma_setup)(struct ata_queued_cmd *qc); 865 void (*bmdma_setup)(struct ata_queued_cmd *qc);
860 void (*bmdma_start)(struct ata_queued_cmd *qc); 866 void (*bmdma_start)(struct ata_queued_cmd *qc);
861 void (*bmdma_stop)(struct ata_queued_cmd *qc); 867 void (*bmdma_stop)(struct ata_queued_cmd *qc);
862 u8 (*bmdma_status)(struct ata_port *ap); 868 u8 (*bmdma_status)(struct ata_port *ap);
869#endif /* CONFIG_ATA_BMDMA */
863#endif /* CONFIG_ATA_SFF */ 870#endif /* CONFIG_ATA_SFF */
864 871
865 ssize_t (*em_show)(struct ata_port *ap, char *buf); 872 ssize_t (*em_show)(struct ata_port *ap, char *buf);
@@ -1555,7 +1562,6 @@ extern void sata_pmp_error_handler(struct ata_port *ap);
1555#ifdef CONFIG_ATA_SFF 1562#ifdef CONFIG_ATA_SFF
1556 1563
1557extern const struct ata_port_operations ata_sff_port_ops; 1564extern const struct ata_port_operations ata_sff_port_ops;
1558extern const struct ata_port_operations ata_bmdma_port_ops;
1559extern const struct ata_port_operations ata_bmdma32_port_ops; 1565extern const struct ata_port_operations ata_bmdma32_port_ops;
1560 1566
1561/* PIO only, sg_tablesize and dma_boundary limits can be removed */ 1567/* PIO only, sg_tablesize and dma_boundary limits can be removed */
@@ -1564,11 +1570,6 @@ extern const struct ata_port_operations ata_bmdma32_port_ops;
1564 .sg_tablesize = LIBATA_MAX_PRD, \ 1570 .sg_tablesize = LIBATA_MAX_PRD, \
1565 .dma_boundary = ATA_DMA_BOUNDARY 1571 .dma_boundary = ATA_DMA_BOUNDARY
1566 1572
1567#define ATA_BMDMA_SHT(drv_name) \
1568 ATA_BASE_SHT(drv_name), \
1569 .sg_tablesize = LIBATA_MAX_PRD, \
1570 .dma_boundary = ATA_DMA_BOUNDARY
1571
1572extern void ata_sff_dev_select(struct ata_port *ap, unsigned int device); 1573extern void ata_sff_dev_select(struct ata_port *ap, unsigned int device);
1573extern u8 ata_sff_check_status(struct ata_port *ap); 1574extern u8 ata_sff_check_status(struct ata_port *ap);
1574extern void ata_sff_pause(struct ata_port *ap); 1575extern void ata_sff_pause(struct ata_port *ap);
@@ -1593,7 +1594,7 @@ extern int ata_sff_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc,
1593extern void ata_sff_queue_pio_task(struct ata_port *ap, unsigned long delay); 1594extern void ata_sff_queue_pio_task(struct ata_port *ap, unsigned long delay);
1594extern unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc); 1595extern unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc);
1595extern bool ata_sff_qc_fill_rtf(struct ata_queued_cmd *qc); 1596extern bool ata_sff_qc_fill_rtf(struct ata_queued_cmd *qc);
1596extern unsigned int ata_sff_host_intr(struct ata_port *ap, 1597extern unsigned int ata_sff_port_intr(struct ata_port *ap,
1597 struct ata_queued_cmd *qc); 1598 struct ata_queued_cmd *qc);
1598extern irqreturn_t ata_sff_interrupt(int irq, void *dev_instance); 1599extern irqreturn_t ata_sff_interrupt(int irq, void *dev_instance);
1599extern void ata_sff_lost_interrupt(struct ata_port *ap); 1600extern void ata_sff_lost_interrupt(struct ata_port *ap);
@@ -1625,11 +1626,24 @@ extern int ata_pci_sff_init_one(struct pci_dev *pdev,
1625 struct scsi_host_template *sht, void *host_priv, int hflags); 1626 struct scsi_host_template *sht, void *host_priv, int hflags);
1626#endif /* CONFIG_PCI */ 1627#endif /* CONFIG_PCI */
1627 1628
1629#ifdef CONFIG_ATA_BMDMA
1630
1631extern const struct ata_port_operations ata_bmdma_port_ops;
1632
1633#define ATA_BMDMA_SHT(drv_name) \
1634 ATA_BASE_SHT(drv_name), \
1635 .sg_tablesize = LIBATA_MAX_PRD, \
1636 .dma_boundary = ATA_DMA_BOUNDARY
1637
1628extern void ata_bmdma_qc_prep(struct ata_queued_cmd *qc); 1638extern void ata_bmdma_qc_prep(struct ata_queued_cmd *qc);
1629extern unsigned int ata_bmdma_qc_issue(struct ata_queued_cmd *qc); 1639extern unsigned int ata_bmdma_qc_issue(struct ata_queued_cmd *qc);
1630extern void ata_bmdma_dumb_qc_prep(struct ata_queued_cmd *qc); 1640extern void ata_bmdma_dumb_qc_prep(struct ata_queued_cmd *qc);
1641extern unsigned int ata_bmdma_port_intr(struct ata_port *ap,
1642 struct ata_queued_cmd *qc);
1643extern irqreturn_t ata_bmdma_interrupt(int irq, void *dev_instance);
1631extern void ata_bmdma_error_handler(struct ata_port *ap); 1644extern void ata_bmdma_error_handler(struct ata_port *ap);
1632extern void ata_bmdma_post_internal_cmd(struct ata_queued_cmd *qc); 1645extern void ata_bmdma_post_internal_cmd(struct ata_queued_cmd *qc);
1646extern void ata_bmdma_irq_clear(struct ata_port *ap);
1633extern void ata_bmdma_setup(struct ata_queued_cmd *qc); 1647extern void ata_bmdma_setup(struct ata_queued_cmd *qc);
1634extern void ata_bmdma_start(struct ata_queued_cmd *qc); 1648extern void ata_bmdma_start(struct ata_queued_cmd *qc);
1635extern void ata_bmdma_stop(struct ata_queued_cmd *qc); 1649extern void ata_bmdma_stop(struct ata_queued_cmd *qc);
@@ -1640,7 +1654,15 @@ extern int ata_bmdma_port_start32(struct ata_port *ap);
1640#ifdef CONFIG_PCI 1654#ifdef CONFIG_PCI
1641extern int ata_pci_bmdma_clear_simplex(struct pci_dev *pdev); 1655extern int ata_pci_bmdma_clear_simplex(struct pci_dev *pdev);
1642extern void ata_pci_bmdma_init(struct ata_host *host); 1656extern void ata_pci_bmdma_init(struct ata_host *host);
1657extern int ata_pci_bmdma_prepare_host(struct pci_dev *pdev,
1658 const struct ata_port_info * const * ppi,
1659 struct ata_host **r_host);
1660extern int ata_pci_bmdma_init_one(struct pci_dev *pdev,
1661 const struct ata_port_info * const * ppi,
1662 struct scsi_host_template *sht,
1663 void *host_priv, int hflags);
1643#endif /* CONFIG_PCI */ 1664#endif /* CONFIG_PCI */
1665#endif /* CONFIG_ATA_BMDMA */
1644 1666
1645/** 1667/**
1646 * ata_sff_busy_wait - Wait for a port status register 1668 * ata_sff_busy_wait - Wait for a port status register
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 05894795fdc1..9411d32840b0 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -90,7 +90,8 @@ int mm_match_cgroup(const struct mm_struct *mm, const struct mem_cgroup *cgroup)
90extern struct cgroup_subsys_state *mem_cgroup_css(struct mem_cgroup *mem); 90extern struct cgroup_subsys_state *mem_cgroup_css(struct mem_cgroup *mem);
91 91
92extern int 92extern int
93mem_cgroup_prepare_migration(struct page *page, struct mem_cgroup **ptr); 93mem_cgroup_prepare_migration(struct page *page,
94 struct page *newpage, struct mem_cgroup **ptr);
94extern void mem_cgroup_end_migration(struct mem_cgroup *mem, 95extern void mem_cgroup_end_migration(struct mem_cgroup *mem,
95 struct page *oldpage, struct page *newpage); 96 struct page *oldpage, struct page *newpage);
96 97
@@ -227,7 +228,8 @@ static inline struct cgroup_subsys_state *mem_cgroup_css(struct mem_cgroup *mem)
227} 228}
228 229
229static inline int 230static inline int
230mem_cgroup_prepare_migration(struct page *page, struct mem_cgroup **ptr) 231mem_cgroup_prepare_migration(struct page *page, struct page *newpage,
232 struct mem_cgroup **ptr)
231{ 233{
232 return 0; 234 return 0;
233} 235}
diff --git a/include/linux/mfd/88pm860x.h b/include/linux/mfd/88pm860x.h
index e3c4ff8c3e38..bfd23bef7363 100644
--- a/include/linux/mfd/88pm860x.h
+++ b/include/linux/mfd/88pm860x.h
@@ -370,7 +370,7 @@ extern int pm860x_set_bits(struct i2c_client *, int, unsigned char,
370 unsigned char); 370 unsigned char);
371 371
372extern int pm860x_device_init(struct pm860x_chip *chip, 372extern int pm860x_device_init(struct pm860x_chip *chip,
373 struct pm860x_platform_data *pdata); 373 struct pm860x_platform_data *pdata) __devinit ;
374extern void pm860x_device_exit(struct pm860x_chip *chip); 374extern void pm860x_device_exit(struct pm860x_chip *chip) __devexit ;
375 375
376#endif /* __LINUX_MFD_88PM860X_H */ 376#endif /* __LINUX_MFD_88PM860X_H */
diff --git a/include/linux/mfd/ab4500.h b/include/linux/mfd/ab4500.h
deleted file mode 100644
index a42a7033ae53..000000000000
--- a/include/linux/mfd/ab4500.h
+++ /dev/null
@@ -1,262 +0,0 @@
1/*
2 * Copyright (C) 2009 ST-Ericsson
3 *
4 * Author: Srinidhi KASAGAR <srinidhi.kasagar@stericsson.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2, as
8 * published by the Free Software Foundation.
9 *
10 * AB4500 device core funtions, for client access
11 */
12#ifndef MFD_AB4500_H
13#define MFD_AB4500_H
14
15#include <linux/device.h>
16
17/*
18 * AB4500 bank addresses
19 */
20#define AB4500_SYS_CTRL1_BLOCK 0x1
21#define AB4500_SYS_CTRL2_BLOCK 0x2
22#define AB4500_REGU_CTRL1 0x3
23#define AB4500_REGU_CTRL2 0x4
24#define AB4500_USB 0x5
25#define AB4500_TVOUT 0x6
26#define AB4500_DBI 0x7
27#define AB4500_ECI_AV_ACC 0x8
28#define AB4500_RESERVED 0x9
29#define AB4500_GPADC 0xA
30#define AB4500_CHARGER 0xB
31#define AB4500_GAS_GAUGE 0xC
32#define AB4500_AUDIO 0xD
33#define AB4500_INTERRUPT 0xE
34#define AB4500_RTC 0xF
35#define AB4500_MISC 0x10
36#define AB4500_DEBUG 0x12
37#define AB4500_PROD_TEST 0x13
38#define AB4500_OTP_EMUL 0x15
39
40/*
41 * System control 1 register offsets.
42 * Bank = 0x01
43 */
44#define AB4500_TURNON_STAT_REG 0x0100
45#define AB4500_RESET_STAT_REG 0x0101
46#define AB4500_PONKEY1_PRESS_STAT_REG 0x0102
47
48#define AB4500_FSM_STAT1_REG 0x0140
49#define AB4500_FSM_STAT2_REG 0x0141
50#define AB4500_SYSCLK_REQ_STAT_REG 0x0142
51#define AB4500_USB_STAT1_REG 0x0143
52#define AB4500_USB_STAT2_REG 0x0144
53#define AB4500_STATUS_SPARE1_REG 0x0145
54#define AB4500_STATUS_SPARE2_REG 0x0146
55
56#define AB4500_CTRL1_REG 0x0180
57#define AB4500_CTRL2_REG 0x0181
58
59/*
60 * System control 2 register offsets.
61 * bank = 0x02
62 */
63#define AB4500_CTRL3_REG 0x0200
64#define AB4500_MAIN_WDOG_CTRL_REG 0x0201
65#define AB4500_MAIN_WDOG_TIMER_REG 0x0202
66#define AB4500_LOW_BAT_REG 0x0203
67#define AB4500_BATT_OK_REG 0x0204
68#define AB4500_SYSCLK_TIMER_REG 0x0205
69#define AB4500_SMPSCLK_CTRL_REG 0x0206
70#define AB4500_SMPSCLK_SEL1_REG 0x0207
71#define AB4500_SMPSCLK_SEL2_REG 0x0208
72#define AB4500_SMPSCLK_SEL3_REG 0x0209
73#define AB4500_SYSULPCLK_CONF_REG 0x020A
74#define AB4500_SYSULPCLK_CTRL1_REG 0x020B
75#define AB4500_SYSCLK_CTRL_REG 0x020C
76#define AB4500_SYSCLK_REQ1_VALID_REG 0x020D
77#define AB4500_SYSCLK_REQ_VALID_REG 0x020E
78#define AB4500_SYSCTRL_SPARE_REG 0x020F
79#define AB4500_PAD_CONF_REG 0x0210
80
81/*
82 * Regu control1 register offsets
83 * Bank = 0x03
84 */
85#define AB4500_REGU_SERIAL_CTRL1_REG 0x0300
86#define AB4500_REGU_SERIAL_CTRL2_REG 0x0301
87#define AB4500_REGU_SERIAL_CTRL3_REG 0x0302
88#define AB4500_REGU_REQ_CTRL1_REG 0x0303
89#define AB4500_REGU_REQ_CTRL2_REG 0x0304
90#define AB4500_REGU_REQ_CTRL3_REG 0x0305
91#define AB4500_REGU_REQ_CTRL4_REG 0x0306
92#define AB4500_REGU_MISC1_REG 0x0380
93#define AB4500_REGU_OTGSUPPLY_CTRL_REG 0x0381
94#define AB4500_REGU_VUSB_CTRL_REG 0x0382
95#define AB4500_REGU_VAUDIO_SUPPLY_REG 0x0383
96#define AB4500_REGU_CTRL1_SPARE_REG 0x0384
97
98/*
99 * Regu control2 Vmod register offsets
100 */
101#define AB4500_REGU_VMOD_REGU_REG 0x0440
102#define AB4500_REGU_VMOD_SEL1_REG 0x0441
103#define AB4500_REGU_VMOD_SEL2_REG 0x0442
104#define AB4500_REGU_CTRL_DISCH_REG 0x0443
105#define AB4500_REGU_CTRL_DISCH2_REG 0x0444
106
107/*
108 * USB/ULPI register offsets
109 * Bank : 0x5
110 */
111#define AB4500_USB_LINE_STAT_REG 0x0580
112#define AB4500_USB_LINE_CTRL1_REG 0x0581
113#define AB4500_USB_LINE_CTRL2_REG 0x0582
114#define AB4500_USB_LINE_CTRL3_REG 0x0583
115#define AB4500_USB_LINE_CTRL4_REG 0x0584
116#define AB4500_USB_LINE_CTRL5_REG 0x0585
117#define AB4500_USB_OTG_CTRL_REG 0x0587
118#define AB4500_USB_OTG_STAT_REG 0x0588
119#define AB4500_USB_OTG_STAT_REG 0x0588
120#define AB4500_USB_CTRL_SPARE_REG 0x0589
121#define AB4500_USB_PHY_CTRL_REG 0x058A
122
123/*
124 * TVOUT / CTRL register offsets
125 * Bank : 0x06
126 */
127#define AB4500_TVOUT_CTRL_REG 0x0680
128
129/*
130 * DBI register offsets
131 * Bank : 0x07
132 */
133#define AB4500_DBI_REG1_REG 0x0700
134#define AB4500_DBI_REG2_REG 0x0701
135
136/*
137 * ECI regsiter offsets
138 * Bank : 0x08
139 */
140#define AB4500_ECI_CTRL_REG 0x0800
141#define AB4500_ECI_HOOKLEVEL_REG 0x0801
142#define AB4500_ECI_DATAOUT_REG 0x0802
143#define AB4500_ECI_DATAIN_REG 0x0803
144
145/*
146 * AV Connector register offsets
147 * Bank : 0x08
148 */
149#define AB4500_AV_CONN_REG 0x0840
150
151/*
152 * Accessory detection register offsets
153 * Bank : 0x08
154 */
155#define AB4500_ACC_DET_DB1_REG 0x0880
156#define AB4500_ACC_DET_DB2_REG 0x0881
157
158/*
159 * GPADC register offsets
160 * Bank : 0x0A
161 */
162#define AB4500_GPADC_CTRL1_REG 0x0A00
163#define AB4500_GPADC_CTRL2_REG 0x0A01
164#define AB4500_GPADC_CTRL3_REG 0x0A02
165#define AB4500_GPADC_AUTO_TIMER_REG 0x0A03
166#define AB4500_GPADC_STAT_REG 0x0A04
167#define AB4500_GPADC_MANDATAL_REG 0x0A05
168#define AB4500_GPADC_MANDATAH_REG 0x0A06
169#define AB4500_GPADC_AUTODATAL_REG 0x0A07
170#define AB4500_GPADC_AUTODATAH_REG 0x0A08
171#define AB4500_GPADC_MUX_CTRL_REG 0x0A09
172
173/*
174 * Charger / status register offfsets
175 * Bank : 0x0B
176 */
177#define AB4500_CH_STATUS1_REG 0x0B00
178#define AB4500_CH_STATUS2_REG 0x0B01
179#define AB4500_CH_USBCH_STAT1_REG 0x0B02
180#define AB4500_CH_USBCH_STAT2_REG 0x0B03
181#define AB4500_CH_FSM_STAT_REG 0x0B04
182#define AB4500_CH_STAT_REG 0x0B05
183
184/*
185 * Charger / control register offfsets
186 * Bank : 0x0B
187 */
188#define AB4500_CH_VOLT_LVL_REG 0x0B40
189
190/*
191 * Charger / main control register offfsets
192 * Bank : 0x0B
193 */
194#define AB4500_MCH_CTRL1 0x0B80
195#define AB4500_MCH_CTRL2 0x0B81
196#define AB4500_MCH_IPT_CURLVL_REG 0x0B82
197#define AB4500_CH_WD_REG 0x0B83
198
199/*
200 * Charger / USB control register offsets
201 * Bank : 0x0B
202 */
203#define AB4500_USBCH_CTRL1_REG 0x0BC0
204#define AB4500_USBCH_CTRL2_REG 0x0BC1
205#define AB4500_USBCH_IPT_CRNTLVL_REG 0x0BC2
206
207/*
208 * RTC bank register offsets
209 * Bank : 0xF
210 */
211#define AB4500_RTC_SOFF_STAT_REG 0x0F00
212#define AB4500_RTC_CC_CONF_REG 0x0F01
213#define AB4500_RTC_READ_REQ_REG 0x0F02
214#define AB4500_RTC_WATCH_TSECMID_REG 0x0F03
215#define AB4500_RTC_WATCH_TSECHI_REG 0x0F04
216#define AB4500_RTC_WATCH_TMIN_LOW_REG 0x0F05
217#define AB4500_RTC_WATCH_TMIN_MID_REG 0x0F06
218#define AB4500_RTC_WATCH_TMIN_HI_REG 0x0F07
219#define AB4500_RTC_ALRM_MIN_LOW_REG 0x0F08
220#define AB4500_RTC_ALRM_MIN_MID_REG 0x0F09
221#define AB4500_RTC_ALRM_MIN_HI_REG 0x0F0A
222#define AB4500_RTC_STAT_REG 0x0F0B
223#define AB4500_RTC_BKUP_CHG_REG 0x0F0C
224#define AB4500_RTC_FORCE_BKUP_REG 0x0F0D
225#define AB4500_RTC_CALIB_REG 0x0F0E
226#define AB4500_RTC_SWITCH_STAT_REG 0x0F0F
227
228/*
229 * PWM Out generators
230 * Bank: 0x10
231 */
232#define AB4500_PWM_OUT_CTRL1_REG 0x1060
233#define AB4500_PWM_OUT_CTRL2_REG 0x1061
234#define AB4500_PWM_OUT_CTRL3_REG 0x1062
235#define AB4500_PWM_OUT_CTRL4_REG 0x1063
236#define AB4500_PWM_OUT_CTRL5_REG 0x1064
237#define AB4500_PWM_OUT_CTRL6_REG 0x1065
238#define AB4500_PWM_OUT_CTRL7_REG 0x1066
239
240#define AB4500_I2C_PAD_CTRL_REG 0x1067
241#define AB4500_REV_REG 0x1080
242
243/**
244 * struct ab4500
245 * @spi: spi device structure
246 * @tx_buf: transmit buffer
247 * @rx_buf: receive buffer
248 * @lock: sync primitive
249 */
250struct ab4500 {
251 struct spi_device *spi;
252 unsigned long tx_buf[4];
253 unsigned long rx_buf[4];
254 struct mutex lock;
255};
256
257int ab4500_write(struct ab4500 *ab4500, unsigned char block,
258 unsigned long addr, unsigned char data);
259int ab4500_read(struct ab4500 *ab4500, unsigned char block,
260 unsigned long addr);
261
262#endif /* MFD_AB4500_H */
diff --git a/include/linux/mfd/ab8500.h b/include/linux/mfd/ab8500.h
new file mode 100644
index 000000000000..b63ff3ba3351
--- /dev/null
+++ b/include/linux/mfd/ab8500.h
@@ -0,0 +1,128 @@
1/*
2 * Copyright (C) ST-Ericsson SA 2010
3 *
4 * License Terms: GNU General Public License v2
5 * Author: Srinidhi Kasagar <srinidhi.kasagar@stericsson.com>
6 */
7#ifndef MFD_AB8500_H
8#define MFD_AB8500_H
9
10#include <linux/device.h>
11
12/*
13 * Interrupts
14 */
15
16#define AB8500_INT_MAIN_EXT_CH_NOT_OK 0
17#define AB8500_INT_UN_PLUG_TV_DET 1
18#define AB8500_INT_PLUG_TV_DET 2
19#define AB8500_INT_TEMP_WARM 3
20#define AB8500_INT_PON_KEY2DB_F 4
21#define AB8500_INT_PON_KEY2DB_R 5
22#define AB8500_INT_PON_KEY1DB_F 6
23#define AB8500_INT_PON_KEY1DB_R 7
24#define AB8500_INT_BATT_OVV 8
25#define AB8500_INT_MAIN_CH_UNPLUG_DET 10
26#define AB8500_INT_MAIN_CH_PLUG_DET 11
27#define AB8500_INT_USB_ID_DET_F 12
28#define AB8500_INT_USB_ID_DET_R 13
29#define AB8500_INT_VBUS_DET_F 14
30#define AB8500_INT_VBUS_DET_R 15
31#define AB8500_INT_VBUS_CH_DROP_END 16
32#define AB8500_INT_RTC_60S 17
33#define AB8500_INT_RTC_ALARM 18
34#define AB8500_INT_BAT_CTRL_INDB 20
35#define AB8500_INT_CH_WD_EXP 21
36#define AB8500_INT_VBUS_OVV 22
37#define AB8500_INT_MAIN_CH_DROP_END 23
38#define AB8500_INT_CCN_CONV_ACC 24
39#define AB8500_INT_INT_AUD 25
40#define AB8500_INT_CCEOC 26
41#define AB8500_INT_CC_INT_CALIB 27
42#define AB8500_INT_LOW_BAT_F 28
43#define AB8500_INT_LOW_BAT_R 29
44#define AB8500_INT_BUP_CHG_NOT_OK 30
45#define AB8500_INT_BUP_CHG_OK 31
46#define AB8500_INT_GP_HW_ADC_CONV_END 32
47#define AB8500_INT_ACC_DETECT_1DB_F 33
48#define AB8500_INT_ACC_DETECT_1DB_R 34
49#define AB8500_INT_ACC_DETECT_22DB_F 35
50#define AB8500_INT_ACC_DETECT_22DB_R 36
51#define AB8500_INT_ACC_DETECT_21DB_F 37
52#define AB8500_INT_ACC_DETECT_21DB_R 38
53#define AB8500_INT_GP_SW_ADC_CONV_END 39
54#define AB8500_INT_BTEMP_LOW 72
55#define AB8500_INT_BTEMP_LOW_MEDIUM 73
56#define AB8500_INT_BTEMP_MEDIUM_HIGH 74
57#define AB8500_INT_BTEMP_HIGH 75
58#define AB8500_INT_USB_CHARGER_NOT_OK 81
59#define AB8500_INT_ID_WAKEUP_R 82
60#define AB8500_INT_ID_DET_R1R 84
61#define AB8500_INT_ID_DET_R2R 85
62#define AB8500_INT_ID_DET_R3R 86
63#define AB8500_INT_ID_DET_R4R 87
64#define AB8500_INT_ID_WAKEUP_F 88
65#define AB8500_INT_ID_DET_R1F 90
66#define AB8500_INT_ID_DET_R2F 91
67#define AB8500_INT_ID_DET_R3F 92
68#define AB8500_INT_ID_DET_R4F 93
69#define AB8500_INT_USB_CHG_DET_DONE 94
70#define AB8500_INT_USB_CH_TH_PROT_F 96
71#define AB8500_INT_USB_CH_TH_PROP_R 97
72#define AB8500_INT_MAIN_CH_TH_PROP_F 98
73#define AB8500_INT_MAIN_CH_TH_PROT_R 99
74#define AB8500_INT_USB_CHARGER_NOT_OKF 103
75
76#define AB8500_NR_IRQS 104
77#define AB8500_NUM_IRQ_REGS 13
78
79/**
80 * struct ab8500 - ab8500 internal structure
81 * @dev: parent device
82 * @lock: read/write operations lock
83 * @irq_lock: genirq bus lock
84 * @revision: chip revision
85 * @irq: irq line
86 * @write: register write
87 * @read: register read
88 * @rx_buf: rx buf for SPI
89 * @tx_buf: tx buf for SPI
90 * @mask: cache of IRQ regs for bus lock
91 * @oldmask: cache of previous IRQ regs for bus lock
92 */
93struct ab8500 {
94 struct device *dev;
95 struct mutex lock;
96 struct mutex irq_lock;
97 int revision;
98 int irq_base;
99 int irq;
100
101 int (*write) (struct ab8500 *a8500, u16 addr, u8 data);
102 int (*read) (struct ab8500 *a8500, u16 addr);
103
104 unsigned long tx_buf[4];
105 unsigned long rx_buf[4];
106
107 u8 mask[AB8500_NUM_IRQ_REGS];
108 u8 oldmask[AB8500_NUM_IRQ_REGS];
109};
110
111/**
112 * struct ab8500_platform_data - AB8500 platform data
113 * @irq_base: start of AB8500 IRQs, AB8500_NR_IRQS will be used
114 * @init: board-specific initialization after detection of ab8500
115 */
116struct ab8500_platform_data {
117 int irq_base;
118 void (*init) (struct ab8500 *);
119};
120
121extern int ab8500_write(struct ab8500 *a8500, u16 addr, u8 data);
122extern int ab8500_read(struct ab8500 *a8500, u16 addr);
123extern int ab8500_set_bits(struct ab8500 *a8500, u16 addr, u8 mask, u8 data);
124
125extern int __devinit ab8500_init(struct ab8500 *ab8500);
126extern int __devexit ab8500_exit(struct ab8500 *ab8500);
127
128#endif /* MFD_AB8500_H */
diff --git a/include/linux/mfd/ab3100.h b/include/linux/mfd/abx500.h
index 9a881c305a50..390726fcbcb1 100644
--- a/include/linux/mfd/ab3100.h
+++ b/include/linux/mfd/abx500.h
@@ -3,17 +3,37 @@
3 * License terms: GNU General Public License (GPL) version 2 3 * License terms: GNU General Public License (GPL) version 2
4 * AB3100 core access functions 4 * AB3100 core access functions
5 * Author: Linus Walleij <linus.walleij@stericsson.com> 5 * Author: Linus Walleij <linus.walleij@stericsson.com>
6 *
7 * ABX500 core access functions.
8 * The abx500 interface is used for the Analog Baseband chip
9 * ab3100, ab3550, ab5500 and possibly comming. It is not used for
10 * ab4500 and ab8500 since they are another family of chip.
11 *
12 * Author: Mattias Wallin <mattias.wallin@stericsson.com>
13 * Author: Mattias Nilsson <mattias.i.nilsson@stericsson.com>
14 * Author: Bengt Jonsson <bengt.g.jonsson@stericsson.com>
15 * Author: Rickard Andersson <rickard.andersson@stericsson.com>
6 */ 16 */
7 17
8#include <linux/device.h> 18#include <linux/device.h>
9#include <linux/regulator/machine.h> 19#include <linux/regulator/machine.h>
10 20
11#ifndef MFD_AB3100_H 21#ifndef MFD_ABX500_H
12#define MFD_AB3100_H 22#define MFD_ABX500_H
13 23
14#define ABUNKNOWN 0 24#define AB3100_P1A 0xc0
15#define AB3000 1 25#define AB3100_P1B 0xc1
16#define AB3100 2 26#define AB3100_P1C 0xc2
27#define AB3100_P1D 0xc3
28#define AB3100_P1E 0xc4
29#define AB3100_P1F 0xc5
30#define AB3100_P1G 0xc6
31#define AB3100_R2A 0xc7
32#define AB3100_R2B 0xc8
33#define AB3550_P1A 0x10
34#define AB5500_1_0 0x20
35#define AB5500_2_0 0x21
36#define AB5500_2_1 0x22
17 37
18/* 38/*
19 * AB3100, EVENTA1, A2 and A3 event register flags 39 * AB3100, EVENTA1, A2 and A3 event register flags
@@ -89,7 +109,7 @@ struct ab3100 {
89 char chip_name[32]; 109 char chip_name[32];
90 u8 chip_id; 110 u8 chip_id;
91 struct blocking_notifier_head event_subscribers; 111 struct blocking_notifier_head event_subscribers;
92 u32 startup_events; 112 u8 startup_events[3];
93 bool startup_events_read; 113 bool startup_events_read;
94}; 114};
95 115
@@ -112,18 +132,102 @@ struct ab3100_platform_data {
112 int external_voltage; 132 int external_voltage;
113}; 133};
114 134
115int ab3100_set_register_interruptible(struct ab3100 *ab3100, u8 reg, u8 regval);
116int ab3100_get_register_interruptible(struct ab3100 *ab3100, u8 reg, u8 *regval);
117int ab3100_get_register_page_interruptible(struct ab3100 *ab3100,
118 u8 first_reg, u8 *regvals, u8 numregs);
119int ab3100_mask_and_set_register_interruptible(struct ab3100 *ab3100,
120 u8 reg, u8 andmask, u8 ormask);
121u8 ab3100_get_chip_type(struct ab3100 *ab3100);
122int ab3100_event_register(struct ab3100 *ab3100, 135int ab3100_event_register(struct ab3100 *ab3100,
123 struct notifier_block *nb); 136 struct notifier_block *nb);
124int ab3100_event_unregister(struct ab3100 *ab3100, 137int ab3100_event_unregister(struct ab3100 *ab3100,
125 struct notifier_block *nb); 138 struct notifier_block *nb);
126int ab3100_event_registers_startup_state_get(struct ab3100 *ab3100,
127 u32 *fatevent);
128 139
140/* AB3550, STR register flags */
141#define AB3550_STR_ONSWA (0x01)
142#define AB3550_STR_ONSWB (0x02)
143#define AB3550_STR_ONSWC (0x04)
144#define AB3550_STR_DCIO (0x08)
145#define AB3550_STR_BOOT_MODE (0x10)
146#define AB3550_STR_SIM_OFF (0x20)
147#define AB3550_STR_BATT_REMOVAL (0x40)
148#define AB3550_STR_VBUS (0x80)
149
150/* Interrupt mask registers */
151#define AB3550_IMR1 0x29
152#define AB3550_IMR2 0x2a
153#define AB3550_IMR3 0x2b
154#define AB3550_IMR4 0x2c
155#define AB3550_IMR5 0x2d
156
157enum ab3550_devid {
158 AB3550_DEVID_ADC,
159 AB3550_DEVID_DAC,
160 AB3550_DEVID_LEDS,
161 AB3550_DEVID_POWER,
162 AB3550_DEVID_REGULATORS,
163 AB3550_DEVID_SIM,
164 AB3550_DEVID_UART,
165 AB3550_DEVID_RTC,
166 AB3550_DEVID_CHARGER,
167 AB3550_DEVID_FUELGAUGE,
168 AB3550_DEVID_VIBRATOR,
169 AB3550_DEVID_CODEC,
170 AB3550_NUM_DEVICES,
171};
172
173/**
174 * struct abx500_init_setting
175 * Initial value of the registers for driver to use during setup.
176 */
177struct abx500_init_settings {
178 u8 bank;
179 u8 reg;
180 u8 setting;
181};
182
183/**
184 * struct ab3550_platform_data
185 * Data supplied to initialize board connections to the AB3550
186 */
187struct ab3550_platform_data {
188 struct {unsigned int base; unsigned int count; } irq;
189 void *dev_data[AB3550_NUM_DEVICES];
190 size_t dev_data_sz[AB3550_NUM_DEVICES];
191 struct abx500_init_settings *init_settings;
192 unsigned int init_settings_sz;
193};
194
195int abx500_set_register_interruptible(struct device *dev, u8 bank, u8 reg,
196 u8 value);
197int abx500_get_register_interruptible(struct device *dev, u8 bank, u8 reg,
198 u8 *value);
199int abx500_get_register_page_interruptible(struct device *dev, u8 bank,
200 u8 first_reg, u8 *regvals, u8 numregs);
201int abx500_set_register_page_interruptible(struct device *dev, u8 bank,
202 u8 first_reg, u8 *regvals, u8 numregs);
203/**
204 * abx500_mask_and_set_register_inerruptible() - Modifies selected bits of a
205 * target register
206 *
207 * @dev: The AB sub device.
208 * @bank: The i2c bank number.
209 * @bitmask: The bit mask to use.
210 * @bitvalues: The new bit values.
211 *
212 * Updates the value of an AB register:
213 * value -> ((value & ~bitmask) | (bitvalues & bitmask))
214 */
215int abx500_mask_and_set_register_interruptible(struct device *dev, u8 bank,
216 u8 reg, u8 bitmask, u8 bitvalues);
217int abx500_get_chip_id(struct device *dev);
218int abx500_event_registers_startup_state_get(struct device *dev, u8 *event);
219int abx500_startup_irq_enabled(struct device *dev, unsigned int irq);
220
221struct abx500_ops {
222 int (*get_chip_id) (struct device *);
223 int (*get_register) (struct device *, u8, u8, u8 *);
224 int (*set_register) (struct device *, u8, u8, u8);
225 int (*get_register_page) (struct device *, u8, u8, u8 *, u8);
226 int (*set_register_page) (struct device *, u8, u8, u8 *, u8);
227 int (*mask_and_set_register) (struct device *, u8, u8, u8, u8);
228 int (*event_registers_startup_state_get) (struct device *, u8 *);
229 int (*startup_irq_enabled) (struct device *, unsigned int);
230};
231
232int abx500_register_ops(struct device *core_dev, struct abx500_ops *ops);
129#endif 233#endif
diff --git a/include/linux/mfd/janz.h b/include/linux/mfd/janz.h
new file mode 100644
index 000000000000..e9994c469803
--- /dev/null
+++ b/include/linux/mfd/janz.h
@@ -0,0 +1,54 @@
1/*
2 * Common Definitions for Janz MODULbus devices
3 *
4 * Copyright (c) 2010 Ira W. Snyder <iws@ovro.caltech.edu>
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; either version 2 of the License, or (at your
9 * option) any later version.
10 */
11
12#ifndef JANZ_H
13#define JANZ_H
14
15struct janz_platform_data {
16 /* MODULbus Module Number */
17 unsigned int modno;
18};
19
20/* PLX bridge chip onboard registers */
21struct janz_cmodio_onboard_regs {
22 u8 unused1;
23
24 /*
25 * Read access: interrupt status
26 * Write access: interrupt disable
27 */
28 u8 int_disable;
29 u8 unused2;
30
31 /*
32 * Read access: MODULbus number (hex switch)
33 * Write access: interrupt enable
34 */
35 u8 int_enable;
36 u8 unused3;
37
38 /* write-only */
39 u8 reset_assert;
40 u8 unused4;
41
42 /* write-only */
43 u8 reset_deassert;
44 u8 unused5;
45
46 /* read-write access to serial EEPROM */
47 u8 eep;
48 u8 unused6;
49
50 /* write-only access to EEPROM chip select */
51 u8 enid;
52};
53
54#endif /* JANZ_H */
diff --git a/include/linux/mfd/mc13783.h b/include/linux/mfd/mc13783.h
index 8895d9d8879c..4a894f688549 100644
--- a/include/linux/mfd/mc13783.h
+++ b/include/linux/mfd/mc13783.h
@@ -64,6 +64,70 @@ static inline int mc13783_ackirq(struct mc13783 *mc13783, int irq)
64 MC13783_ADC0_TSMOD1 | \ 64 MC13783_ADC0_TSMOD1 | \
65 MC13783_ADC0_TSMOD2) 65 MC13783_ADC0_TSMOD2)
66 66
67struct mc13783_led_platform_data {
68#define MC13783_LED_MD 0
69#define MC13783_LED_AD 1
70#define MC13783_LED_KP 2
71#define MC13783_LED_R1 3
72#define MC13783_LED_G1 4
73#define MC13783_LED_B1 5
74#define MC13783_LED_R2 6
75#define MC13783_LED_G2 7
76#define MC13783_LED_B2 8
77#define MC13783_LED_R3 9
78#define MC13783_LED_G3 10
79#define MC13783_LED_B3 11
80#define MC13783_LED_MAX MC13783_LED_B3
81 int id;
82 const char *name;
83 const char *default_trigger;
84
85/* Three or two bits current selection depending on the led */
86 char max_current;
87};
88
89struct mc13783_leds_platform_data {
90 int num_leds;
91 struct mc13783_led_platform_data *led;
92
93#define MC13783_LED_TRIODE_MD (1 << 0)
94#define MC13783_LED_TRIODE_AD (1 << 1)
95#define MC13783_LED_TRIODE_KP (1 << 2)
96#define MC13783_LED_BOOST_EN (1 << 3)
97#define MC13783_LED_TC1HALF (1 << 4)
98#define MC13783_LED_SLEWLIMTC (1 << 5)
99#define MC13783_LED_SLEWLIMBL (1 << 6)
100#define MC13783_LED_TRIODE_TC1 (1 << 7)
101#define MC13783_LED_TRIODE_TC2 (1 << 8)
102#define MC13783_LED_TRIODE_TC3 (1 << 9)
103 int flags;
104
105#define MC13783_LED_AB_DISABLED 0
106#define MC13783_LED_AB_MD1 1
107#define MC13783_LED_AB_MD12 2
108#define MC13783_LED_AB_MD123 3
109#define MC13783_LED_AB_MD1234 4
110#define MC13783_LED_AB_MD1234_AD1 5
111#define MC13783_LED_AB_MD1234_AD12 6
112#define MC13783_LED_AB_MD1_AD 7
113 char abmode;
114
115#define MC13783_LED_ABREF_200MV 0
116#define MC13783_LED_ABREF_400MV 1
117#define MC13783_LED_ABREF_600MV 2
118#define MC13783_LED_ABREF_800MV 3
119 char abref;
120
121#define MC13783_LED_PERIOD_10MS 0
122#define MC13783_LED_PERIOD_100MS 1
123#define MC13783_LED_PERIOD_500MS 2
124#define MC13783_LED_PERIOD_2S 3
125 char bl_period;
126 char tc1_period;
127 char tc2_period;
128 char tc3_period;
129};
130
67/* to be cleaned up */ 131/* to be cleaned up */
68struct regulator_init_data; 132struct regulator_init_data;
69 133
@@ -80,12 +144,14 @@ struct mc13783_regulator_platform_data {
80struct mc13783_platform_data { 144struct mc13783_platform_data {
81 int num_regulators; 145 int num_regulators;
82 struct mc13783_regulator_init_data *regulators; 146 struct mc13783_regulator_init_data *regulators;
147 struct mc13783_leds_platform_data *leds;
83 148
84#define MC13783_USE_TOUCHSCREEN (1 << 0) 149#define MC13783_USE_TOUCHSCREEN (1 << 0)
85#define MC13783_USE_CODEC (1 << 1) 150#define MC13783_USE_CODEC (1 << 1)
86#define MC13783_USE_ADC (1 << 2) 151#define MC13783_USE_ADC (1 << 2)
87#define MC13783_USE_RTC (1 << 3) 152#define MC13783_USE_RTC (1 << 3)
88#define MC13783_USE_REGULATOR (1 << 4) 153#define MC13783_USE_REGULATOR (1 << 4)
154#define MC13783_USE_LED (1 << 5)
89 unsigned int flags; 155 unsigned int flags;
90}; 156};
91 157
diff --git a/include/linux/mfd/pcf50633/backlight.h b/include/linux/mfd/pcf50633/backlight.h
new file mode 100644
index 000000000000..83747e217b27
--- /dev/null
+++ b/include/linux/mfd/pcf50633/backlight.h
@@ -0,0 +1,51 @@
1/*
2 * Copyright (C) 2009-2010, Lars-Peter Clausen <lars@metafoo.de>
3 * PCF50633 backlight device driver
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License as published by the
7 * Free Software Foundation; either version 2 of the License, or (at your
8 * option) any later version.
9 *
10 * You should have received a copy of the GNU General Public License along
11 * with this program; if not, write to the Free Software Foundation, Inc.,
12 * 675 Mass Ave, Cambridge, MA 02139, USA.
13 *
14 */
15
16#ifndef __LINUX_MFD_PCF50633_BACKLIGHT
17#define __LINUX_MFD_PCF50633_BACKLIGHT
18
19/*
20* @default_brightness: Backlight brightness is initialized to this value
21*
22* Brightness to be used after the driver has been probed.
23* Valid range 0-63.
24*
25* @default_brightness_limit: The actual brightness is limited by this value
26*
27* Brightness limit to be used after the driver has been probed. This is useful
28* when it is not known how much power is available for the backlight during
29* probe.
30* Valid range 0-63. Can be changed later with pcf50633_bl_set_brightness_limit.
31*
32* @ramp_time: Display ramp time when changing brightness
33*
34* When changing the backlights brightness the change is not instant, instead
35* it fades smooth from one state to another. This value specifies how long
36* the fade should take. The lower the value the higher the fade time.
37* Valid range 0-255
38*/
39struct pcf50633_bl_platform_data {
40 unsigned int default_brightness;
41 unsigned int default_brightness_limit;
42 uint8_t ramp_time;
43};
44
45
46struct pcf50633;
47
48int pcf50633_bl_set_brightness_limit(struct pcf50633 *pcf, unsigned int limit);
49
50#endif
51
diff --git a/include/linux/mfd/pcf50633/core.h b/include/linux/mfd/pcf50633/core.h
index 3398bd9aab11..ad411a78870c 100644
--- a/include/linux/mfd/pcf50633/core.h
+++ b/include/linux/mfd/pcf50633/core.h
@@ -18,6 +18,7 @@
18#include <linux/regulator/driver.h> 18#include <linux/regulator/driver.h>
19#include <linux/regulator/machine.h> 19#include <linux/regulator/machine.h>
20#include <linux/power_supply.h> 20#include <linux/power_supply.h>
21#include <linux/mfd/pcf50633/backlight.h>
21 22
22struct pcf50633; 23struct pcf50633;
23 24
@@ -43,6 +44,8 @@ struct pcf50633_platform_data {
43 void (*force_shutdown)(struct pcf50633 *); 44 void (*force_shutdown)(struct pcf50633 *);
44 45
45 u8 resumers[5]; 46 u8 resumers[5];
47
48 struct pcf50633_bl_platform_data *backlight_data;
46}; 49};
47 50
48struct pcf50633_irq { 51struct pcf50633_irq {
@@ -152,6 +155,7 @@ struct pcf50633 {
152 struct platform_device *mbc_pdev; 155 struct platform_device *mbc_pdev;
153 struct platform_device *adc_pdev; 156 struct platform_device *adc_pdev;
154 struct platform_device *input_pdev; 157 struct platform_device *input_pdev;
158 struct platform_device *bl_pdev;
155 struct platform_device *regulator_pdev[PCF50633_NUM_REGULATORS]; 159 struct platform_device *regulator_pdev[PCF50633_NUM_REGULATORS];
156}; 160};
157 161
diff --git a/include/linux/mfd/rdc321x.h b/include/linux/mfd/rdc321x.h
new file mode 100644
index 000000000000..4bdf19c8eedf
--- /dev/null
+++ b/include/linux/mfd/rdc321x.h
@@ -0,0 +1,26 @@
1#ifndef __RDC321X_MFD_H
2#define __RDC321X_MFD_H
3
4#include <linux/types.h>
5#include <linux/pci.h>
6
7/* Offsets to be accessed in the southbridge PCI
8 * device configuration register */
9#define RDC321X_WDT_CTRL 0x44
10#define RDC321X_GPIO_CTRL_REG1 0x48
11#define RDC321X_GPIO_DATA_REG1 0x4c
12#define RDC321X_GPIO_CTRL_REG2 0x84
13#define RDC321X_GPIO_DATA_REG2 0x88
14
15#define RDC321X_MAX_GPIO 58
16
17struct rdc321x_gpio_pdata {
18 struct pci_dev *sb_pdev;
19 unsigned max_gpios;
20};
21
22struct rdc321x_wdt_pdata {
23 struct pci_dev *sb_pdev;
24};
25
26#endif /* __RDC321X_MFD_H */
diff --git a/include/linux/mfd/tc35892.h b/include/linux/mfd/tc35892.h
new file mode 100644
index 000000000000..e47f770d3068
--- /dev/null
+++ b/include/linux/mfd/tc35892.h
@@ -0,0 +1,132 @@
1/*
2 * Copyright (C) ST-Ericsson SA 2010
3 *
4 * License Terms: GNU General Public License, version 2
5 */
6
7#ifndef __LINUX_MFD_TC35892_H
8#define __LINUX_MFD_TC35892_H
9
10#include <linux/device.h>
11
12#define TC35892_RSTCTRL_IRQRST (1 << 4)
13#define TC35892_RSTCTRL_TIMRST (1 << 3)
14#define TC35892_RSTCTRL_ROTRST (1 << 2)
15#define TC35892_RSTCTRL_KBDRST (1 << 1)
16#define TC35892_RSTCTRL_GPIRST (1 << 0)
17
18#define TC35892_IRQST 0x91
19
20#define TC35892_MANFCODE_MAGIC 0x03
21#define TC35892_MANFCODE 0x80
22#define TC35892_VERSION 0x81
23#define TC35892_IOCFG 0xA7
24
25#define TC35892_CLKMODE 0x88
26#define TC35892_CLKCFG 0x89
27#define TC35892_CLKEN 0x8A
28
29#define TC35892_RSTCTRL 0x82
30#define TC35892_EXTRSTN 0x83
31#define TC35892_RSTINTCLR 0x84
32
33#define TC35892_GPIOIS0 0xC9
34#define TC35892_GPIOIS1 0xCA
35#define TC35892_GPIOIS2 0xCB
36#define TC35892_GPIOIBE0 0xCC
37#define TC35892_GPIOIBE1 0xCD
38#define TC35892_GPIOIBE2 0xCE
39#define TC35892_GPIOIEV0 0xCF
40#define TC35892_GPIOIEV1 0xD0
41#define TC35892_GPIOIEV2 0xD1
42#define TC35892_GPIOIE0 0xD2
43#define TC35892_GPIOIE1 0xD3
44#define TC35892_GPIOIE2 0xD4
45#define TC35892_GPIORIS0 0xD6
46#define TC35892_GPIORIS1 0xD7
47#define TC35892_GPIORIS2 0xD8
48#define TC35892_GPIOMIS0 0xD9
49#define TC35892_GPIOMIS1 0xDA
50#define TC35892_GPIOMIS2 0xDB
51#define TC35892_GPIOIC0 0xDC
52#define TC35892_GPIOIC1 0xDD
53#define TC35892_GPIOIC2 0xDE
54
55#define TC35892_GPIODATA0 0xC0
56#define TC35892_GPIOMASK0 0xc1
57#define TC35892_GPIODATA1 0xC2
58#define TC35892_GPIOMASK1 0xc3
59#define TC35892_GPIODATA2 0xC4
60#define TC35892_GPIOMASK2 0xC5
61
62#define TC35892_GPIODIR0 0xC6
63#define TC35892_GPIODIR1 0xC7
64#define TC35892_GPIODIR2 0xC8
65
66#define TC35892_GPIOSYNC0 0xE6
67#define TC35892_GPIOSYNC1 0xE7
68#define TC35892_GPIOSYNC2 0xE8
69
70#define TC35892_GPIOWAKE0 0xE9
71#define TC35892_GPIOWAKE1 0xEA
72#define TC35892_GPIOWAKE2 0xEB
73
74#define TC35892_GPIOODM0 0xE0
75#define TC35892_GPIOODE0 0xE1
76#define TC35892_GPIOODM1 0xE2
77#define TC35892_GPIOODE1 0xE3
78#define TC35892_GPIOODM2 0xE4
79#define TC35892_GPIOODE2 0xE5
80
81#define TC35892_INT_GPIIRQ 0
82#define TC35892_INT_TI0IRQ 1
83#define TC35892_INT_TI1IRQ 2
84#define TC35892_INT_TI2IRQ 3
85#define TC35892_INT_ROTIRQ 5
86#define TC35892_INT_KBDIRQ 6
87#define TC35892_INT_PORIRQ 7
88
89#define TC35892_NR_INTERNAL_IRQS 8
90#define TC35892_INT_GPIO(x) (TC35892_NR_INTERNAL_IRQS + (x))
91
92struct tc35892 {
93 struct mutex lock;
94 struct device *dev;
95 struct i2c_client *i2c;
96
97 int irq_base;
98 int num_gpio;
99 struct tc35892_platform_data *pdata;
100};
101
102extern int tc35892_reg_write(struct tc35892 *tc35892, u8 reg, u8 data);
103extern int tc35892_reg_read(struct tc35892 *tc35892, u8 reg);
104extern int tc35892_block_read(struct tc35892 *tc35892, u8 reg, u8 length,
105 u8 *values);
106extern int tc35892_block_write(struct tc35892 *tc35892, u8 reg, u8 length,
107 const u8 *values);
108extern int tc35892_set_bits(struct tc35892 *tc35892, u8 reg, u8 mask, u8 val);
109
110/**
111 * struct tc35892_gpio_platform_data - TC35892 GPIO platform data
112 * @gpio_base: first gpio number assigned to TC35892. A maximum of
113 * %TC35892_NR_GPIOS GPIOs will be allocated.
114 */
115struct tc35892_gpio_platform_data {
116 int gpio_base;
117};
118
119/**
120 * struct tc35892_platform_data - TC35892 platform data
121 * @irq_base: base IRQ number. %TC35892_NR_IRQS irqs will be used.
122 * @gpio: GPIO-specific platform data
123 */
124struct tc35892_platform_data {
125 int irq_base;
126 struct tc35892_gpio_platform_data *gpio;
127};
128
129#define TC35892_NR_GPIOS 24
130#define TC35892_NR_IRQS TC35892_INT_GPIO(TC35892_NR_GPIOS)
131
132#endif
diff --git a/include/linux/mfd/tps6507x.h b/include/linux/mfd/tps6507x.h
new file mode 100644
index 000000000000..c923e4864f55
--- /dev/null
+++ b/include/linux/mfd/tps6507x.h
@@ -0,0 +1,169 @@
1/* linux/mfd/tps6507x.h
2 *
3 * Functions to access TPS65070 power management chip.
4 *
5 * Copyright (c) 2009 RidgeRun (todd.fischer@ridgerun.com)
6 *
7 *
8 * For licencing details see kernel-base/COPYING
9 */
10
11#ifndef __LINUX_MFD_TPS6507X_H
12#define __LINUX_MFD_TPS6507X_H
13
14/*
15 * ----------------------------------------------------------------------------
16 * Registers, all 8 bits
17 * ----------------------------------------------------------------------------
18 */
19
20
21/* Register definitions */
22#define TPS6507X_REG_PPATH1 0X01
23#define TPS6507X_CHG_USB BIT(7)
24#define TPS6507X_CHG_AC BIT(6)
25#define TPS6507X_CHG_USB_PW_ENABLE BIT(5)
26#define TPS6507X_CHG_AC_PW_ENABLE BIT(4)
27#define TPS6507X_CHG_AC_CURRENT BIT(2)
28#define TPS6507X_CHG_USB_CURRENT BIT(0)
29
30#define TPS6507X_REG_INT 0X02
31#define TPS6507X_REG_MASK_AC_USB BIT(7)
32#define TPS6507X_REG_MASK_TSC BIT(6)
33#define TPS6507X_REG_MASK_PB_IN BIT(5)
34#define TPS6507X_REG_TSC_INT BIT(3)
35#define TPS6507X_REG_PB_IN_INT BIT(2)
36#define TPS6507X_REG_AC_USB_APPLIED BIT(1)
37#define TPS6507X_REG_AC_USB_REMOVED BIT(0)
38
39#define TPS6507X_REG_CHGCONFIG0 0X03
40
41#define TPS6507X_REG_CHGCONFIG1 0X04
42#define TPS6507X_CON_CTRL1_DCDC1_ENABLE BIT(4)
43#define TPS6507X_CON_CTRL1_DCDC2_ENABLE BIT(3)
44#define TPS6507X_CON_CTRL1_DCDC3_ENABLE BIT(2)
45#define TPS6507X_CON_CTRL1_LDO1_ENABLE BIT(1)
46#define TPS6507X_CON_CTRL1_LDO2_ENABLE BIT(0)
47
48#define TPS6507X_REG_CHGCONFIG2 0X05
49
50#define TPS6507X_REG_CHGCONFIG3 0X06
51
52#define TPS6507X_REG_ADCONFIG 0X07
53#define TPS6507X_ADCONFIG_AD_ENABLE BIT(7)
54#define TPS6507X_ADCONFIG_START_CONVERSION BIT(6)
55#define TPS6507X_ADCONFIG_CONVERSION_DONE BIT(5)
56#define TPS6507X_ADCONFIG_VREF_ENABLE BIT(4)
57#define TPS6507X_ADCONFIG_INPUT_AD_IN1 0
58#define TPS6507X_ADCONFIG_INPUT_AD_IN2 1
59#define TPS6507X_ADCONFIG_INPUT_AD_IN3 2
60#define TPS6507X_ADCONFIG_INPUT_AD_IN4 3
61#define TPS6507X_ADCONFIG_INPUT_TS_PIN 4
62#define TPS6507X_ADCONFIG_INPUT_BAT_CURRENT 5
63#define TPS6507X_ADCONFIG_INPUT_AC_VOLTAGE 6
64#define TPS6507X_ADCONFIG_INPUT_SYS_VOLTAGE 7
65#define TPS6507X_ADCONFIG_INPUT_CHARGER_VOLTAGE 8
66#define TPS6507X_ADCONFIG_INPUT_BAT_VOLTAGE 9
67#define TPS6507X_ADCONFIG_INPUT_THRESHOLD_VOLTAGE 10
68#define TPS6507X_ADCONFIG_INPUT_ISET1_VOLTAGE 11
69#define TPS6507X_ADCONFIG_INPUT_ISET2_VOLTAGE 12
70#define TPS6507X_ADCONFIG_INPUT_REAL_TSC 14
71#define TPS6507X_ADCONFIG_INPUT_TSC 15
72
73#define TPS6507X_REG_TSCMODE 0X08
74#define TPS6507X_TSCMODE_X_POSITION 0
75#define TPS6507X_TSCMODE_Y_POSITION 1
76#define TPS6507X_TSCMODE_PRESSURE 2
77#define TPS6507X_TSCMODE_X_PLATE 3
78#define TPS6507X_TSCMODE_Y_PLATE 4
79#define TPS6507X_TSCMODE_STANDBY 5
80#define TPS6507X_TSCMODE_ADC_INPUT 6
81#define TPS6507X_TSCMODE_DISABLE 7
82
83#define TPS6507X_REG_ADRESULT_1 0X09
84
85#define TPS6507X_REG_ADRESULT_2 0X0A
86#define TPS6507X_REG_ADRESULT_2_MASK (BIT(1) | BIT(0))
87
88#define TPS6507X_REG_PGOOD 0X0B
89
90#define TPS6507X_REG_PGOODMASK 0X0C
91
92#define TPS6507X_REG_CON_CTRL1 0X0D
93#define TPS6507X_CON_CTRL1_DCDC1_ENABLE BIT(4)
94#define TPS6507X_CON_CTRL1_DCDC2_ENABLE BIT(3)
95#define TPS6507X_CON_CTRL1_DCDC3_ENABLE BIT(2)
96#define TPS6507X_CON_CTRL1_LDO1_ENABLE BIT(1)
97#define TPS6507X_CON_CTRL1_LDO2_ENABLE BIT(0)
98
99#define TPS6507X_REG_CON_CTRL2 0X0E
100
101#define TPS6507X_REG_CON_CTRL3 0X0F
102
103#define TPS6507X_REG_DEFDCDC1 0X10
104#define TPS6507X_DEFDCDC1_DCDC1_EXT_ADJ_EN BIT(7)
105#define TPS6507X_DEFDCDC1_DCDC1_MASK 0X3F
106
107#define TPS6507X_REG_DEFDCDC2_LOW 0X11
108#define TPS6507X_DEFDCDC2_LOW_DCDC2_MASK 0X3F
109
110#define TPS6507X_REG_DEFDCDC2_HIGH 0X12
111#define TPS6507X_DEFDCDC2_HIGH_DCDC2_MASK 0X3F
112
113#define TPS6507X_REG_DEFDCDC3_LOW 0X13
114#define TPS6507X_DEFDCDC3_LOW_DCDC3_MASK 0X3F
115
116#define TPS6507X_REG_DEFDCDC3_HIGH 0X14
117#define TPS6507X_DEFDCDC3_HIGH_DCDC3_MASK 0X3F
118
119#define TPS6507X_REG_DEFSLEW 0X15
120
121#define TPS6507X_REG_LDO_CTRL1 0X16
122#define TPS6507X_REG_LDO_CTRL1_LDO1_MASK 0X0F
123
124#define TPS6507X_REG_DEFLDO2 0X17
125#define TPS6507X_REG_DEFLDO2_LDO2_MASK 0X3F
126
127#define TPS6507X_REG_WLED_CTRL1 0X18
128
129#define TPS6507X_REG_WLED_CTRL2 0X19
130
131/* VDCDC MASK */
132#define TPS6507X_DEFDCDCX_DCDC_MASK 0X3F
133
134#define TPS6507X_MAX_REGISTER 0X19
135
136/**
137 * struct tps6507x_board - packages regulator and touchscreen init data
138 * @tps6507x_regulator_data: regulator initialization values
139 *
140 * Board data may be used to initialize regulator and touchscreen.
141 */
142
143struct tps6507x_board {
144 struct regulator_init_data *tps6507x_pmic_init_data;
145 struct touchscreen_init_data *tps6507x_ts_init_data;
146};
147
148/**
149 * struct tps6507x_dev - tps6507x sub-driver chip access routines
150 * @read_dev() - I2C register read function
151 * @write_dev() - I2C register write function
152 *
153 * Device data may be used to access the TPS6507x chip
154 */
155
156struct tps6507x_dev {
157 struct device *dev;
158 struct i2c_client *i2c_client;
159 int (*read_dev)(struct tps6507x_dev *tps6507x, char reg, int size,
160 void *dest);
161 int (*write_dev)(struct tps6507x_dev *tps6507x, char reg, int size,
162 void *src);
163
164 /* Client devices */
165 struct tps6507x_pmic *pmic;
166 struct tps6507x_ts *ts;
167};
168
169#endif /* __LINUX_MFD_TPS6507X_H */
diff --git a/include/linux/mfd/wm831x/core.h b/include/linux/mfd/wm831x/core.h
index 5915f6e3d9ab..eb5bd4e0e03c 100644
--- a/include/linux/mfd/wm831x/core.h
+++ b/include/linux/mfd/wm831x/core.h
@@ -256,8 +256,9 @@ struct wm831x {
256 int irq_masks_cache[WM831X_NUM_IRQ_REGS]; /* Cached hardware value */ 256 int irq_masks_cache[WM831X_NUM_IRQ_REGS]; /* Cached hardware value */
257 257
258 /* Chip revision based flags */ 258 /* Chip revision based flags */
259 unsigned has_gpio_ena:1; /* Has GPIO enable bit */ 259 unsigned has_gpio_ena:1; /* Has GPIO enable bit */
260 unsigned has_cs_sts:1; /* Has current sink status bit */ 260 unsigned has_cs_sts:1; /* Has current sink status bit */
261 unsigned charger_irq_wake:1; /* Are charger IRQs a wake source? */
261 262
262 int num_gpio; 263 int num_gpio;
263 264
diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h
index 3196c84cc630..f65913c9f5a4 100644
--- a/include/linux/mmc/host.h
+++ b/include/linux/mmc/host.h
@@ -230,7 +230,7 @@ static inline void *mmc_priv(struct mmc_host *host)
230#define mmc_classdev(x) (&(x)->class_dev) 230#define mmc_classdev(x) (&(x)->class_dev)
231#define mmc_hostname(x) (dev_name(&(x)->class_dev)) 231#define mmc_hostname(x) (dev_name(&(x)->class_dev))
232 232
233extern int mmc_suspend_host(struct mmc_host *, pm_message_t); 233extern int mmc_suspend_host(struct mmc_host *);
234extern int mmc_resume_host(struct mmc_host *); 234extern int mmc_resume_host(struct mmc_host *);
235 235
236extern void mmc_power_save_host(struct mmc_host *host); 236extern void mmc_power_save_host(struct mmc_host *host);
diff --git a/include/linux/mmc/sdhci-spear.h b/include/linux/mmc/sdhci-spear.h
new file mode 100644
index 000000000000..9188c973f3e1
--- /dev/null
+++ b/include/linux/mmc/sdhci-spear.h
@@ -0,0 +1,42 @@
1/*
2 * include/linux/mmc/sdhci-spear.h
3 *
4 * SDHCI declarations specific to ST SPEAr platform
5 *
6 * Copyright (C) 2010 ST Microelectronics
7 * Viresh Kumar<viresh.kumar@st.com>
8 *
9 * This file is licensed under the terms of the GNU General Public
10 * License version 2. This program is licensed "as is" without any
11 * warranty of any kind, whether express or implied.
12 */
13
14#ifndef MMC_SDHCI_SPEAR_H
15#define MMC_SDHCI_SPEAR_H
16
17#include <linux/platform_device.h>
18/*
19 * struct sdhci_plat_data: spear sdhci platform data structure
20 *
21 * @card_power_gpio: gpio pin for enabling/disabling power to sdhci socket
22 * @power_active_high: if set, enable power to sdhci socket by setting
23 * card_power_gpio
24 * @power_always_enb: If set, then enable power on probe, otherwise enable only
25 * on card insertion and disable on card removal.
26 * card_int_gpio: gpio pin used for card detection
27 */
28struct sdhci_plat_data {
29 int card_power_gpio;
30 int power_active_high;
31 int power_always_enb;
32 int card_int_gpio;
33};
34
35/* This function is used to set platform_data field of pdev->dev */
36static inline void
37sdhci_set_plat_data(struct platform_device *pdev, struct sdhci_plat_data *data)
38{
39 pdev->dev.platform_data = data;
40}
41
42#endif /* MMC_SDHCI_SPEAR_H */
diff --git a/include/linux/mmc/sdio_func.h b/include/linux/mmc/sdio_func.h
index c6c0cceba5fe..31baaf82f458 100644
--- a/include/linux/mmc/sdio_func.h
+++ b/include/linux/mmc/sdio_func.h
@@ -145,6 +145,9 @@ extern void sdio_writew(struct sdio_func *func, u16 b,
145extern void sdio_writel(struct sdio_func *func, u32 b, 145extern void sdio_writel(struct sdio_func *func, u32 b,
146 unsigned int addr, int *err_ret); 146 unsigned int addr, int *err_ret);
147 147
148extern u8 sdio_writeb_readb(struct sdio_func *func, u8 write_byte,
149 unsigned int addr, int *err_ret);
150
148extern int sdio_memcpy_toio(struct sdio_func *func, unsigned int addr, 151extern int sdio_memcpy_toio(struct sdio_func *func, unsigned int addr,
149 void *src, int count); 152 void *src, int count);
150extern int sdio_writesb(struct sdio_func *func, unsigned int addr, 153extern int sdio_writesb(struct sdio_func *func, unsigned int addr,
diff --git a/include/linux/mmc/sh_mmcif.h b/include/linux/mmc/sh_mmcif.h
new file mode 100644
index 000000000000..aafe832f18aa
--- /dev/null
+++ b/include/linux/mmc/sh_mmcif.h
@@ -0,0 +1,39 @@
1/*
2 * include/linux/mmc/sh_mmcif.h
3 *
4 * platform data for eMMC driver
5 *
6 * Copyright (C) 2010 Renesas Solutions Corp.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License.
11 *
12 */
13
14#ifndef __SH_MMCIF_H__
15#define __SH_MMCIF_H__
16
17/*
18 * MMCIF : CE_CLK_CTRL [19:16]
19 * 1000 : Peripheral clock / 512
20 * 0111 : Peripheral clock / 256
21 * 0110 : Peripheral clock / 128
22 * 0101 : Peripheral clock / 64
23 * 0100 : Peripheral clock / 32
24 * 0011 : Peripheral clock / 16
25 * 0010 : Peripheral clock / 8
26 * 0001 : Peripheral clock / 4
27 * 0000 : Peripheral clock / 2
28 * 1111 : Peripheral clock (sup_pclk set '1')
29 */
30
31struct sh_mmcif_plat_data {
32 void (*set_pwr)(struct platform_device *pdev, int state);
33 void (*down_pwr)(struct platform_device *pdev);
34 u8 sup_pclk; /* 1 :SH7757, 0: SH7724/SH7372 */
35 unsigned long caps;
36 u32 ocr;
37};
38
39#endif /* __SH_MMCIF_H__ */
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 0fa491326c4a..b4d109e389b8 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -671,6 +671,12 @@ void memory_present(int nid, unsigned long start, unsigned long end);
671static inline void memory_present(int nid, unsigned long start, unsigned long end) {} 671static inline void memory_present(int nid, unsigned long start, unsigned long end) {}
672#endif 672#endif
673 673
674#ifdef CONFIG_HAVE_MEMORYLESS_NODES
675int local_memory_node(int node_id);
676#else
677static inline int local_memory_node(int node_id) { return node_id; };
678#endif
679
674#ifdef CONFIG_NEED_NODE_MEMMAP_SIZE 680#ifdef CONFIG_NEED_NODE_MEMMAP_SIZE
675unsigned long __init node_memmap_size_bytes(int, unsigned long, unsigned long); 681unsigned long __init node_memmap_size_bytes(int, unsigned long, unsigned long);
676#endif 682#endif
diff --git a/include/linux/notifier.h b/include/linux/notifier.h
index 7c3609622334..540703b555cb 100644
--- a/include/linux/notifier.h
+++ b/include/linux/notifier.h
@@ -164,7 +164,10 @@ extern int __srcu_notifier_call_chain(struct srcu_notifier_head *nh,
164/* Encapsulate (negative) errno value (in particular, NOTIFY_BAD <=> EPERM). */ 164/* Encapsulate (negative) errno value (in particular, NOTIFY_BAD <=> EPERM). */
165static inline int notifier_from_errno(int err) 165static inline int notifier_from_errno(int err)
166{ 166{
167 return NOTIFY_STOP_MASK | (NOTIFY_OK - err); 167 if (err)
168 return NOTIFY_STOP_MASK | (NOTIFY_OK - err);
169
170 return NOTIFY_OK;
168} 171}
169 172
170/* Restore (negative) errno value from notify return value. */ 173/* Restore (negative) errno value from notify return value. */
diff --git a/include/linux/page_cgroup.h b/include/linux/page_cgroup.h
index aef22ae2af47..5bb13b3db84d 100644
--- a/include/linux/page_cgroup.h
+++ b/include/linux/page_cgroup.h
@@ -40,6 +40,7 @@ enum {
40 PCG_USED, /* this object is in use. */ 40 PCG_USED, /* this object is in use. */
41 PCG_ACCT_LRU, /* page has been accounted for */ 41 PCG_ACCT_LRU, /* page has been accounted for */
42 PCG_FILE_MAPPED, /* page is accounted as "mapped" */ 42 PCG_FILE_MAPPED, /* page is accounted as "mapped" */
43 PCG_MIGRATION, /* under page migration */
43}; 44};
44 45
45#define TESTPCGFLAG(uname, lname) \ 46#define TESTPCGFLAG(uname, lname) \
@@ -79,6 +80,10 @@ SETPCGFLAG(FileMapped, FILE_MAPPED)
79CLEARPCGFLAG(FileMapped, FILE_MAPPED) 80CLEARPCGFLAG(FileMapped, FILE_MAPPED)
80TESTPCGFLAG(FileMapped, FILE_MAPPED) 81TESTPCGFLAG(FileMapped, FILE_MAPPED)
81 82
83SETPCGFLAG(Migration, MIGRATION)
84CLEARPCGFLAG(Migration, MIGRATION)
85TESTPCGFLAG(Migration, MIGRATION)
86
82static inline int page_cgroup_nid(struct page_cgroup *pc) 87static inline int page_cgroup_nid(struct page_cgroup *pc)
83{ 88{
84 return page_to_nid(pc->page); 89 return page_to_nid(pc->page);
diff --git a/include/linux/pci.h b/include/linux/pci.h
index a327322a33ab..6a471aba3b07 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -311,7 +311,8 @@ struct pci_dev {
311 unsigned int is_virtfn:1; 311 unsigned int is_virtfn:1;
312 unsigned int reset_fn:1; 312 unsigned int reset_fn:1;
313 unsigned int is_hotplug_bridge:1; 313 unsigned int is_hotplug_bridge:1;
314 unsigned int aer_firmware_first:1; 314 unsigned int __aer_firmware_first_valid:1;
315 unsigned int __aer_firmware_first:1;
315 pci_dev_flags_t dev_flags; 316 pci_dev_flags_t dev_flags;
316 atomic_t enable_cnt; /* pci_enable_device has been called */ 317 atomic_t enable_cnt; /* pci_enable_device has been called */
317 318
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 3fd5c82e0e18..fb6c91eac7e3 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -485,6 +485,7 @@ struct perf_guest_info_callbacks {
485#include <linux/ftrace.h> 485#include <linux/ftrace.h>
486#include <linux/cpu.h> 486#include <linux/cpu.h>
487#include <asm/atomic.h> 487#include <asm/atomic.h>
488#include <asm/local.h>
488 489
489#define PERF_MAX_STACK_DEPTH 255 490#define PERF_MAX_STACK_DEPTH 255
490 491
@@ -587,21 +588,19 @@ struct perf_mmap_data {
587 struct rcu_head rcu_head; 588 struct rcu_head rcu_head;
588#ifdef CONFIG_PERF_USE_VMALLOC 589#ifdef CONFIG_PERF_USE_VMALLOC
589 struct work_struct work; 590 struct work_struct work;
591 int page_order; /* allocation order */
590#endif 592#endif
591 int data_order;
592 int nr_pages; /* nr of data pages */ 593 int nr_pages; /* nr of data pages */
593 int writable; /* are we writable */ 594 int writable; /* are we writable */
594 int nr_locked; /* nr pages mlocked */ 595 int nr_locked; /* nr pages mlocked */
595 596
596 atomic_t poll; /* POLL_ for wakeups */ 597 atomic_t poll; /* POLL_ for wakeups */
597 atomic_t events; /* event_id limit */
598 598
599 atomic_long_t head; /* write position */ 599 local_t head; /* write position */
600 atomic_long_t done_head; /* completed head */ 600 local_t nest; /* nested writers */
601 601 local_t events; /* event limit */
602 atomic_t lock; /* concurrent writes */ 602 local_t wakeup; /* wakeup stamp */
603 atomic_t wakeup; /* needs a wakeup */ 603 local_t lost; /* nr records lost */
604 atomic_t lost; /* nr records lost */
605 604
606 long watermark; /* wakeup watermark */ 605 long watermark; /* wakeup watermark */
607 606
@@ -728,6 +727,7 @@ struct perf_event {
728 perf_overflow_handler_t overflow_handler; 727 perf_overflow_handler_t overflow_handler;
729 728
730#ifdef CONFIG_EVENT_TRACING 729#ifdef CONFIG_EVENT_TRACING
730 struct ftrace_event_call *tp_event;
731 struct event_filter *filter; 731 struct event_filter *filter;
732#endif 732#endif
733 733
@@ -803,11 +803,12 @@ struct perf_cpu_context {
803struct perf_output_handle { 803struct perf_output_handle {
804 struct perf_event *event; 804 struct perf_event *event;
805 struct perf_mmap_data *data; 805 struct perf_mmap_data *data;
806 unsigned long head; 806 unsigned long wakeup;
807 unsigned long offset; 807 unsigned long size;
808 void *addr;
809 int page;
808 int nmi; 810 int nmi;
809 int sample; 811 int sample;
810 int locked;
811}; 812};
812 813
813#ifdef CONFIG_PERF_EVENTS 814#ifdef CONFIG_PERF_EVENTS
@@ -993,8 +994,9 @@ static inline bool perf_paranoid_kernel(void)
993} 994}
994 995
995extern void perf_event_init(void); 996extern void perf_event_init(void);
996extern void perf_tp_event(int event_id, u64 addr, u64 count, void *record, 997extern void perf_tp_event(u64 addr, u64 count, void *record,
997 int entry_size, struct pt_regs *regs); 998 int entry_size, struct pt_regs *regs,
999 struct hlist_head *head);
998extern void perf_bp_event(struct perf_event *event, void *data); 1000extern void perf_bp_event(struct perf_event *event, void *data);
999 1001
1000#ifndef perf_misc_flags 1002#ifndef perf_misc_flags
diff --git a/include/linux/quota.h b/include/linux/quota.h
index 7126a15467f1..94c1f03b50eb 100644
--- a/include/linux/quota.h
+++ b/include/linux/quota.h
@@ -174,8 +174,7 @@ enum {
174#include <linux/rwsem.h> 174#include <linux/rwsem.h>
175#include <linux/spinlock.h> 175#include <linux/spinlock.h>
176#include <linux/wait.h> 176#include <linux/wait.h>
177#include <linux/percpu.h> 177#include <linux/percpu_counter.h>
178#include <linux/smp.h>
179 178
180#include <linux/dqblk_xfs.h> 179#include <linux/dqblk_xfs.h>
181#include <linux/dqblk_v1.h> 180#include <linux/dqblk_v1.h>
@@ -254,6 +253,7 @@ enum {
254 253
255struct dqstats { 254struct dqstats {
256 int stat[_DQST_DQSTAT_LAST]; 255 int stat[_DQST_DQSTAT_LAST];
256 struct percpu_counter counter[_DQST_DQSTAT_LAST];
257}; 257};
258 258
259extern struct dqstats *dqstats_pcpu; 259extern struct dqstats *dqstats_pcpu;
@@ -261,20 +261,12 @@ extern struct dqstats dqstats;
261 261
262static inline void dqstats_inc(unsigned int type) 262static inline void dqstats_inc(unsigned int type)
263{ 263{
264#ifdef CONFIG_SMP 264 percpu_counter_inc(&dqstats.counter[type]);
265 per_cpu_ptr(dqstats_pcpu, smp_processor_id())->stat[type]++;
266#else
267 dqstats.stat[type]++;
268#endif
269} 265}
270 266
271static inline void dqstats_dec(unsigned int type) 267static inline void dqstats_dec(unsigned int type)
272{ 268{
273#ifdef CONFIG_SMP 269 percpu_counter_dec(&dqstats.counter[type]);
274 per_cpu_ptr(dqstats_pcpu, smp_processor_id())->stat[type]--;
275#else
276 dqstats.stat[type]--;
277#endif
278} 270}
279 271
280#define DQ_MOD_B 0 /* dquot modified since read */ 272#define DQ_MOD_B 0 /* dquot modified since read */
@@ -332,8 +324,8 @@ struct dquot_operations {
332 324
333/* Operations handling requests from userspace */ 325/* Operations handling requests from userspace */
334struct quotactl_ops { 326struct quotactl_ops {
335 int (*quota_on)(struct super_block *, int, int, char *, int); 327 int (*quota_on)(struct super_block *, int, int, char *);
336 int (*quota_off)(struct super_block *, int, int); 328 int (*quota_off)(struct super_block *, int);
337 int (*quota_sync)(struct super_block *, int, int); 329 int (*quota_sync)(struct super_block *, int, int);
338 int (*get_info)(struct super_block *, int, struct if_dqinfo *); 330 int (*get_info)(struct super_block *, int, struct if_dqinfo *);
339 int (*set_info)(struct super_block *, int, struct if_dqinfo *); 331 int (*set_info)(struct super_block *, int, struct if_dqinfo *);
diff --git a/include/linux/quotaops.h b/include/linux/quotaops.h
index 370abb1e99cb..aa36793b48bd 100644
--- a/include/linux/quotaops.h
+++ b/include/linux/quotaops.h
@@ -9,6 +9,10 @@
9 9
10#include <linux/fs.h> 10#include <linux/fs.h>
11 11
12#define DQUOT_SPACE_WARN 0x1
13#define DQUOT_SPACE_RESERVE 0x2
14#define DQUOT_SPACE_NOFAIL 0x4
15
12static inline struct quota_info *sb_dqopt(struct super_block *sb) 16static inline struct quota_info *sb_dqopt(struct super_block *sb)
13{ 17{
14 return &sb->s_dquot; 18 return &sb->s_dquot;
@@ -41,15 +45,22 @@ int dquot_scan_active(struct super_block *sb,
41struct dquot *dquot_alloc(struct super_block *sb, int type); 45struct dquot *dquot_alloc(struct super_block *sb, int type);
42void dquot_destroy(struct dquot *dquot); 46void dquot_destroy(struct dquot *dquot);
43 47
44int __dquot_alloc_space(struct inode *inode, qsize_t number, 48int __dquot_alloc_space(struct inode *inode, qsize_t number, int flags);
45 int warn, int reserve); 49void __dquot_free_space(struct inode *inode, qsize_t number, int flags);
46void __dquot_free_space(struct inode *inode, qsize_t number, int reserve);
47 50
48int dquot_alloc_inode(const struct inode *inode); 51int dquot_alloc_inode(const struct inode *inode);
49 52
50int dquot_claim_space_nodirty(struct inode *inode, qsize_t number); 53int dquot_claim_space_nodirty(struct inode *inode, qsize_t number);
51void dquot_free_inode(const struct inode *inode); 54void dquot_free_inode(const struct inode *inode);
52 55
56int dquot_disable(struct super_block *sb, int type, unsigned int flags);
57/* Suspend quotas on remount RO */
58static inline int dquot_suspend(struct super_block *sb, int type)
59{
60 return dquot_disable(sb, type, DQUOT_SUSPENDED);
61}
62int dquot_resume(struct super_block *sb, int type);
63
53int dquot_commit(struct dquot *dquot); 64int dquot_commit(struct dquot *dquot);
54int dquot_acquire(struct dquot *dquot); 65int dquot_acquire(struct dquot *dquot);
55int dquot_release(struct dquot *dquot); 66int dquot_release(struct dquot *dquot);
@@ -58,27 +69,25 @@ int dquot_mark_dquot_dirty(struct dquot *dquot);
58 69
59int dquot_file_open(struct inode *inode, struct file *file); 70int dquot_file_open(struct inode *inode, struct file *file);
60 71
61int vfs_quota_on(struct super_block *sb, int type, int format_id, 72int dquot_quota_on(struct super_block *sb, int type, int format_id,
62 char *path, int remount); 73 char *path);
63int vfs_quota_enable(struct inode *inode, int type, int format_id, 74int dquot_enable(struct inode *inode, int type, int format_id,
64 unsigned int flags); 75 unsigned int flags);
65int vfs_quota_on_path(struct super_block *sb, int type, int format_id, 76int dquot_quota_on_path(struct super_block *sb, int type, int format_id,
66 struct path *path); 77 struct path *path);
67int vfs_quota_on_mount(struct super_block *sb, char *qf_name, 78int dquot_quota_on_mount(struct super_block *sb, char *qf_name,
68 int format_id, int type); 79 int format_id, int type);
69int vfs_quota_off(struct super_block *sb, int type, int remount); 80int dquot_quota_off(struct super_block *sb, int type);
70int vfs_quota_disable(struct super_block *sb, int type, unsigned int flags); 81int dquot_quota_sync(struct super_block *sb, int type, int wait);
71int vfs_quota_sync(struct super_block *sb, int type, int wait); 82int dquot_get_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii);
72int vfs_get_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii); 83int dquot_set_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii);
73int vfs_set_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii); 84int dquot_get_dqblk(struct super_block *sb, int type, qid_t id,
74int vfs_get_dqblk(struct super_block *sb, int type, qid_t id,
75 struct fs_disk_quota *di); 85 struct fs_disk_quota *di);
76int vfs_set_dqblk(struct super_block *sb, int type, qid_t id, 86int dquot_set_dqblk(struct super_block *sb, int type, qid_t id,
77 struct fs_disk_quota *di); 87 struct fs_disk_quota *di);
78 88
79int __dquot_transfer(struct inode *inode, struct dquot **transfer_to); 89int __dquot_transfer(struct inode *inode, struct dquot **transfer_to);
80int dquot_transfer(struct inode *inode, struct iattr *iattr); 90int dquot_transfer(struct inode *inode, struct iattr *iattr);
81int vfs_dq_quota_on_remount(struct super_block *sb);
82 91
83static inline struct mem_dqinfo *sb_dqinfo(struct super_block *sb, int type) 92static inline struct mem_dqinfo *sb_dqinfo(struct super_block *sb, int type)
84{ 93{
@@ -145,20 +154,7 @@ static inline unsigned sb_any_quota_active(struct super_block *sb)
145 * Operations supported for diskquotas. 154 * Operations supported for diskquotas.
146 */ 155 */
147extern const struct dquot_operations dquot_operations; 156extern const struct dquot_operations dquot_operations;
148extern const struct quotactl_ops vfs_quotactl_ops; 157extern const struct quotactl_ops dquot_quotactl_ops;
149
150#define sb_dquot_ops (&dquot_operations)
151#define sb_quotactl_ops (&vfs_quotactl_ops)
152
153/* Cannot be called inside a transaction */
154static inline int vfs_dq_off(struct super_block *sb, int remount)
155{
156 int ret = -ENOSYS;
157
158 if (sb->s_qcop && sb->s_qcop->quota_off)
159 ret = sb->s_qcop->quota_off(sb, -1, remount);
160 return ret;
161}
162 158
163#else 159#else
164 160
@@ -203,12 +199,6 @@ static inline int sb_any_quota_active(struct super_block *sb)
203 return 0; 199 return 0;
204} 200}
205 201
206/*
207 * NO-OP when quota not configured.
208 */
209#define sb_dquot_ops (NULL)
210#define sb_quotactl_ops (NULL)
211
212static inline void dquot_initialize(struct inode *inode) 202static inline void dquot_initialize(struct inode *inode)
213{ 203{
214} 204}
@@ -226,39 +216,45 @@ static inline void dquot_free_inode(const struct inode *inode)
226{ 216{
227} 217}
228 218
229static inline int vfs_dq_off(struct super_block *sb, int remount) 219static inline int dquot_transfer(struct inode *inode, struct iattr *iattr)
230{ 220{
231 return 0; 221 return 0;
232} 222}
233 223
234static inline int vfs_dq_quota_on_remount(struct super_block *sb) 224static inline int __dquot_alloc_space(struct inode *inode, qsize_t number,
225 int flags)
235{ 226{
227 if (!(flags & DQUOT_SPACE_RESERVE))
228 inode_add_bytes(inode, number);
236 return 0; 229 return 0;
237} 230}
238 231
239static inline int dquot_transfer(struct inode *inode, struct iattr *iattr) 232static inline void __dquot_free_space(struct inode *inode, qsize_t number,
233 int flags)
234{
235 if (!(flags & DQUOT_SPACE_RESERVE))
236 inode_sub_bytes(inode, number);
237}
238
239static inline int dquot_claim_space_nodirty(struct inode *inode, qsize_t number)
240{ 240{
241 inode_add_bytes(inode, number);
241 return 0; 242 return 0;
242} 243}
243 244
244static inline int __dquot_alloc_space(struct inode *inode, qsize_t number, 245static inline int dquot_disable(struct super_block *sb, int type,
245 int warn, int reserve) 246 unsigned int flags)
246{ 247{
247 if (!reserve)
248 inode_add_bytes(inode, number);
249 return 0; 248 return 0;
250} 249}
251 250
252static inline void __dquot_free_space(struct inode *inode, qsize_t number, 251static inline int dquot_suspend(struct super_block *sb, int type)
253 int reserve)
254{ 252{
255 if (!reserve) 253 return 0;
256 inode_sub_bytes(inode, number);
257} 254}
258 255
259static inline int dquot_claim_space_nodirty(struct inode *inode, qsize_t number) 256static inline int dquot_resume(struct super_block *sb, int type)
260{ 257{
261 inode_add_bytes(inode, number);
262 return 0; 258 return 0;
263} 259}
264 260
@@ -268,7 +264,13 @@ static inline int dquot_claim_space_nodirty(struct inode *inode, qsize_t number)
268 264
269static inline int dquot_alloc_space_nodirty(struct inode *inode, qsize_t nr) 265static inline int dquot_alloc_space_nodirty(struct inode *inode, qsize_t nr)
270{ 266{
271 return __dquot_alloc_space(inode, nr, 1, 0); 267 return __dquot_alloc_space(inode, nr, DQUOT_SPACE_WARN);
268}
269
270static inline void dquot_alloc_space_nofail(struct inode *inode, qsize_t nr)
271{
272 __dquot_alloc_space(inode, nr, DQUOT_SPACE_WARN|DQUOT_SPACE_NOFAIL);
273 mark_inode_dirty(inode);
272} 274}
273 275
274static inline int dquot_alloc_space(struct inode *inode, qsize_t nr) 276static inline int dquot_alloc_space(struct inode *inode, qsize_t nr)
@@ -286,6 +288,11 @@ static inline int dquot_alloc_block_nodirty(struct inode *inode, qsize_t nr)
286 return dquot_alloc_space_nodirty(inode, nr << inode->i_blkbits); 288 return dquot_alloc_space_nodirty(inode, nr << inode->i_blkbits);
287} 289}
288 290
291static inline void dquot_alloc_block_nofail(struct inode *inode, qsize_t nr)
292{
293 dquot_alloc_space_nofail(inode, nr << inode->i_blkbits);
294}
295
289static inline int dquot_alloc_block(struct inode *inode, qsize_t nr) 296static inline int dquot_alloc_block(struct inode *inode, qsize_t nr)
290{ 297{
291 return dquot_alloc_space(inode, nr << inode->i_blkbits); 298 return dquot_alloc_space(inode, nr << inode->i_blkbits);
@@ -293,7 +300,7 @@ static inline int dquot_alloc_block(struct inode *inode, qsize_t nr)
293 300
294static inline int dquot_prealloc_block_nodirty(struct inode *inode, qsize_t nr) 301static inline int dquot_prealloc_block_nodirty(struct inode *inode, qsize_t nr)
295{ 302{
296 return __dquot_alloc_space(inode, nr << inode->i_blkbits, 0, 0); 303 return __dquot_alloc_space(inode, nr << inode->i_blkbits, 0);
297} 304}
298 305
299static inline int dquot_prealloc_block(struct inode *inode, qsize_t nr) 306static inline int dquot_prealloc_block(struct inode *inode, qsize_t nr)
@@ -308,7 +315,8 @@ static inline int dquot_prealloc_block(struct inode *inode, qsize_t nr)
308 315
309static inline int dquot_reserve_block(struct inode *inode, qsize_t nr) 316static inline int dquot_reserve_block(struct inode *inode, qsize_t nr)
310{ 317{
311 return __dquot_alloc_space(inode, nr << inode->i_blkbits, 1, 1); 318 return __dquot_alloc_space(inode, nr << inode->i_blkbits,
319 DQUOT_SPACE_WARN|DQUOT_SPACE_RESERVE);
312} 320}
313 321
314static inline int dquot_claim_block(struct inode *inode, qsize_t nr) 322static inline int dquot_claim_block(struct inode *inode, qsize_t nr)
@@ -345,7 +353,7 @@ static inline void dquot_free_block(struct inode *inode, qsize_t nr)
345static inline void dquot_release_reservation_block(struct inode *inode, 353static inline void dquot_release_reservation_block(struct inode *inode,
346 qsize_t nr) 354 qsize_t nr)
347{ 355{
348 __dquot_free_space(inode, nr << inode->i_blkbits, 1); 356 __dquot_free_space(inode, nr << inode->i_blkbits, DQUOT_SPACE_RESERVE);
349} 357}
350 358
351#endif /* _LINUX_QUOTAOPS_ */ 359#endif /* _LINUX_QUOTAOPS_ */
diff --git a/include/linux/random.h b/include/linux/random.h
index 25d02fe5c9b5..fb7ab9de5f36 100644
--- a/include/linux/random.h
+++ b/include/linux/random.h
@@ -40,6 +40,10 @@ struct rand_pool_info {
40 __u32 buf[0]; 40 __u32 buf[0];
41}; 41};
42 42
43struct rnd_state {
44 __u32 s1, s2, s3;
45};
46
43/* Exported functions */ 47/* Exported functions */
44 48
45#ifdef __KERNEL__ 49#ifdef __KERNEL__
@@ -74,6 +78,30 @@ unsigned long randomize_range(unsigned long start, unsigned long end, unsigned l
74u32 random32(void); 78u32 random32(void);
75void srandom32(u32 seed); 79void srandom32(u32 seed);
76 80
81u32 prandom32(struct rnd_state *);
82
83/*
84 * Handle minimum values for seeds
85 */
86static inline u32 __seed(u32 x, u32 m)
87{
88 return (x < m) ? x + m : x;
89}
90
91/**
92 * prandom32_seed - set seed for prandom32().
93 * @state: pointer to state structure to receive the seed.
94 * @seed: arbitrary 64-bit value to use as a seed.
95 */
96static inline void prandom32_seed(struct rnd_state *state, u64 seed)
97{
98 u32 i = (seed >> 32) ^ (seed << 10) ^ seed;
99
100 state->s1 = __seed(i, 1);
101 state->s2 = __seed(i, 7);
102 state->s3 = __seed(i, 15);
103}
104
77#endif /* __KERNEL___ */ 105#endif /* __KERNEL___ */
78 106
79#endif /* _LINUX_RANDOM_H */ 107#endif /* _LINUX_RANDOM_H */
diff --git a/include/linux/rio.h b/include/linux/rio.h
index dc0c75556c63..bd6eb0ed34a7 100644
--- a/include/linux/rio.h
+++ b/include/linux/rio.h
@@ -64,10 +64,13 @@
64#define RIO_INB_MBOX_RESOURCE 1 64#define RIO_INB_MBOX_RESOURCE 1
65#define RIO_OUTB_MBOX_RESOURCE 2 65#define RIO_OUTB_MBOX_RESOURCE 2
66 66
67#define RIO_PW_MSG_SIZE 64
68
67extern struct bus_type rio_bus_type; 69extern struct bus_type rio_bus_type;
68extern struct list_head rio_devices; /* list of all devices */ 70extern struct list_head rio_devices; /* list of all devices */
69 71
70struct rio_mport; 72struct rio_mport;
73union rio_pw_msg;
71 74
72/** 75/**
73 * struct rio_dev - RIO device info 76 * struct rio_dev - RIO device info
@@ -85,11 +88,15 @@ struct rio_mport;
85 * @swpinfo: Switch port info 88 * @swpinfo: Switch port info
86 * @src_ops: Source operation capabilities 89 * @src_ops: Source operation capabilities
87 * @dst_ops: Destination operation capabilities 90 * @dst_ops: Destination operation capabilities
91 * @comp_tag: RIO component tag
92 * @phys_efptr: RIO device extended features pointer
93 * @em_efptr: RIO Error Management features pointer
88 * @dma_mask: Mask of bits of RIO address this device implements 94 * @dma_mask: Mask of bits of RIO address this device implements
89 * @rswitch: Pointer to &struct rio_switch if valid for this device 95 * @rswitch: Pointer to &struct rio_switch if valid for this device
90 * @driver: Driver claiming this device 96 * @driver: Driver claiming this device
91 * @dev: Device model device 97 * @dev: Device model device
92 * @riores: RIO resources this device owns 98 * @riores: RIO resources this device owns
99 * @pwcback: port-write callback function for this device
93 * @destid: Network destination ID 100 * @destid: Network destination ID
94 */ 101 */
95struct rio_dev { 102struct rio_dev {
@@ -107,11 +114,15 @@ struct rio_dev {
107 u32 swpinfo; /* Only used for switches */ 114 u32 swpinfo; /* Only used for switches */
108 u32 src_ops; 115 u32 src_ops;
109 u32 dst_ops; 116 u32 dst_ops;
117 u32 comp_tag;
118 u32 phys_efptr;
119 u32 em_efptr;
110 u64 dma_mask; 120 u64 dma_mask;
111 struct rio_switch *rswitch; /* RIO switch info */ 121 struct rio_switch *rswitch; /* RIO switch info */
112 struct rio_driver *driver; /* RIO driver claiming this device */ 122 struct rio_driver *driver; /* RIO driver claiming this device */
113 struct device dev; /* LDM device structure */ 123 struct device dev; /* LDM device structure */
114 struct resource riores[RIO_MAX_DEV_RESOURCES]; 124 struct resource riores[RIO_MAX_DEV_RESOURCES];
125 int (*pwcback) (struct rio_dev *rdev, union rio_pw_msg *msg, int step);
115 u16 destid; 126 u16 destid;
116}; 127};
117 128
@@ -211,8 +222,14 @@ struct rio_net {
211 * @hopcount: Hopcount to this switch 222 * @hopcount: Hopcount to this switch
212 * @destid: Associated destid in the path 223 * @destid: Associated destid in the path
213 * @route_table: Copy of switch routing table 224 * @route_table: Copy of switch routing table
225 * @port_ok: Status of each port (one bit per port) - OK=1 or UNINIT=0
214 * @add_entry: Callback for switch-specific route add function 226 * @add_entry: Callback for switch-specific route add function
215 * @get_entry: Callback for switch-specific route get function 227 * @get_entry: Callback for switch-specific route get function
228 * @clr_table: Callback for switch-specific clear route table function
229 * @set_domain: Callback for switch-specific domain setting function
230 * @get_domain: Callback for switch-specific domain get function
231 * @em_init: Callback for switch-specific error management initialization function
232 * @em_handle: Callback for switch-specific error management handler function
216 */ 233 */
217struct rio_switch { 234struct rio_switch {
218 struct list_head node; 235 struct list_head node;
@@ -220,10 +237,19 @@ struct rio_switch {
220 u16 hopcount; 237 u16 hopcount;
221 u16 destid; 238 u16 destid;
222 u8 *route_table; 239 u8 *route_table;
240 u32 port_ok;
223 int (*add_entry) (struct rio_mport * mport, u16 destid, u8 hopcount, 241 int (*add_entry) (struct rio_mport * mport, u16 destid, u8 hopcount,
224 u16 table, u16 route_destid, u8 route_port); 242 u16 table, u16 route_destid, u8 route_port);
225 int (*get_entry) (struct rio_mport * mport, u16 destid, u8 hopcount, 243 int (*get_entry) (struct rio_mport * mport, u16 destid, u8 hopcount,
226 u16 table, u16 route_destid, u8 * route_port); 244 u16 table, u16 route_destid, u8 * route_port);
245 int (*clr_table) (struct rio_mport *mport, u16 destid, u8 hopcount,
246 u16 table);
247 int (*set_domain) (struct rio_mport *mport, u16 destid, u8 hopcount,
248 u8 sw_domain);
249 int (*get_domain) (struct rio_mport *mport, u16 destid, u8 hopcount,
250 u8 *sw_domain);
251 int (*em_init) (struct rio_dev *dev);
252 int (*em_handle) (struct rio_dev *dev, u8 swport);
227}; 253};
228 254
229/* Low-level architecture-dependent routines */ 255/* Low-level architecture-dependent routines */
@@ -235,6 +261,7 @@ struct rio_switch {
235 * @cread: Callback to perform network read of config space. 261 * @cread: Callback to perform network read of config space.
236 * @cwrite: Callback to perform network write of config space. 262 * @cwrite: Callback to perform network write of config space.
237 * @dsend: Callback to send a doorbell message. 263 * @dsend: Callback to send a doorbell message.
264 * @pwenable: Callback to enable/disable port-write message handling.
238 */ 265 */
239struct rio_ops { 266struct rio_ops {
240 int (*lcread) (struct rio_mport *mport, int index, u32 offset, int len, 267 int (*lcread) (struct rio_mport *mport, int index, u32 offset, int len,
@@ -246,6 +273,7 @@ struct rio_ops {
246 int (*cwrite) (struct rio_mport *mport, int index, u16 destid, 273 int (*cwrite) (struct rio_mport *mport, int index, u16 destid,
247 u8 hopcount, u32 offset, int len, u32 data); 274 u8 hopcount, u32 offset, int len, u32 data);
248 int (*dsend) (struct rio_mport *mport, int index, u16 destid, u16 data); 275 int (*dsend) (struct rio_mport *mport, int index, u16 destid, u16 data);
276 int (*pwenable) (struct rio_mport *mport, int enable);
249}; 277};
250 278
251#define RIO_RESOURCE_MEM 0x00000100 279#define RIO_RESOURCE_MEM 0x00000100
@@ -302,21 +330,28 @@ struct rio_device_id {
302}; 330};
303 331
304/** 332/**
305 * struct rio_route_ops - Per-switch route operations 333 * struct rio_switch_ops - Per-switch operations
306 * @vid: RIO vendor ID 334 * @vid: RIO vendor ID
307 * @did: RIO device ID 335 * @did: RIO device ID
308 * @add_hook: Callback that adds a route entry 336 * @init_hook: Callback that performs switch device initialization
309 * @get_hook: Callback that gets a route entry
310 * 337 *
311 * Defines the operations that are necessary to manipulate the route 338 * Defines the operations that are necessary to initialize/control
312 * tables for a particular RIO switch device. 339 * a particular RIO switch device.
313 */ 340 */
314struct rio_route_ops { 341struct rio_switch_ops {
315 u16 vid, did; 342 u16 vid, did;
316 int (*add_hook) (struct rio_mport * mport, u16 destid, u8 hopcount, 343 int (*init_hook) (struct rio_dev *rdev, int do_enum);
317 u16 table, u16 route_destid, u8 route_port); 344};
318 int (*get_hook) (struct rio_mport * mport, u16 destid, u8 hopcount, 345
319 u16 table, u16 route_destid, u8 * route_port); 346union rio_pw_msg {
347 struct {
348 u32 comptag; /* Component Tag CSR */
349 u32 errdetect; /* Port N Error Detect CSR */
350 u32 is_port; /* Implementation specific + PortID */
351 u32 ltlerrdet; /* LTL Error Detect CSR */
352 u32 padding[12];
353 } em;
354 u32 raw[RIO_PW_MSG_SIZE/sizeof(u32)];
320}; 355};
321 356
322/* Architecture and hardware-specific functions */ 357/* Architecture and hardware-specific functions */
diff --git a/include/linux/rio_drv.h b/include/linux/rio_drv.h
index c93a58a40033..edc55da717b3 100644
--- a/include/linux/rio_drv.h
+++ b/include/linux/rio_drv.h
@@ -413,6 +413,12 @@ void rio_release_regions(struct rio_dev *);
413int rio_request_region(struct rio_dev *, int, char *); 413int rio_request_region(struct rio_dev *, int, char *);
414void rio_release_region(struct rio_dev *, int); 414void rio_release_region(struct rio_dev *, int);
415 415
416/* Port-Write management */
417extern int rio_request_inb_pwrite(struct rio_dev *,
418 int (*)(struct rio_dev *, union rio_pw_msg*, int));
419extern int rio_release_inb_pwrite(struct rio_dev *);
420extern int rio_inb_pwrite_handler(union rio_pw_msg *pw_msg);
421
416/* LDM support */ 422/* LDM support */
417int rio_register_driver(struct rio_driver *); 423int rio_register_driver(struct rio_driver *);
418void rio_unregister_driver(struct rio_driver *); 424void rio_unregister_driver(struct rio_driver *);
diff --git a/include/linux/rio_ids.h b/include/linux/rio_ids.h
index 919d4e07d54e..db50e1c288b7 100644
--- a/include/linux/rio_ids.h
+++ b/include/linux/rio_ids.h
@@ -20,5 +20,19 @@
20 20
21#define RIO_VID_TUNDRA 0x000d 21#define RIO_VID_TUNDRA 0x000d
22#define RIO_DID_TSI500 0x0500 22#define RIO_DID_TSI500 0x0500
23#define RIO_DID_TSI568 0x0568
24#define RIO_DID_TSI572 0x0572
25#define RIO_DID_TSI574 0x0574
26#define RIO_DID_TSI576 0x0578 /* Same ID as Tsi578 */
27#define RIO_DID_TSI577 0x0577
28#define RIO_DID_TSI578 0x0578
29
30#define RIO_VID_IDT 0x0038
31#define RIO_DID_IDT70K200 0x0310
32#define RIO_DID_IDTCPS8 0x035c
33#define RIO_DID_IDTCPS12 0x035d
34#define RIO_DID_IDTCPS16 0x035b
35#define RIO_DID_IDTCPS6Q 0x035f
36#define RIO_DID_IDTCPS10Q 0x035e
23 37
24#endif /* LINUX_RIO_IDS_H */ 38#endif /* LINUX_RIO_IDS_H */
diff --git a/include/linux/rio_regs.h b/include/linux/rio_regs.h
index 326540f9b54e..aedee0489fb4 100644
--- a/include/linux/rio_regs.h
+++ b/include/linux/rio_regs.h
@@ -39,6 +39,8 @@
39#define RIO_PEF_INB_MBOX2 0x00200000 /* [II] Mailbox 2 */ 39#define RIO_PEF_INB_MBOX2 0x00200000 /* [II] Mailbox 2 */
40#define RIO_PEF_INB_MBOX3 0x00100000 /* [II] Mailbox 3 */ 40#define RIO_PEF_INB_MBOX3 0x00100000 /* [II] Mailbox 3 */
41#define RIO_PEF_INB_DOORBELL 0x00080000 /* [II] Doorbells */ 41#define RIO_PEF_INB_DOORBELL 0x00080000 /* [II] Doorbells */
42#define RIO_PEF_EXT_RT 0x00000200 /* [III, 1.3] Extended route table support */
43#define RIO_PEF_STD_RT 0x00000100 /* [III, 1.3] Standard route table support */
42#define RIO_PEF_CTLS 0x00000010 /* [III] CTLS */ 44#define RIO_PEF_CTLS 0x00000010 /* [III] CTLS */
43#define RIO_PEF_EXT_FEATURES 0x00000008 /* [I] EFT_PTR valid */ 45#define RIO_PEF_EXT_FEATURES 0x00000008 /* [I] EFT_PTR valid */
44#define RIO_PEF_ADDR_66 0x00000004 /* [I] 66 bits */ 46#define RIO_PEF_ADDR_66 0x00000004 /* [I] 66 bits */
@@ -91,7 +93,10 @@
91#define RIO_OPS_ATOMIC_CLR 0x00000010 /* [I] Atomic clr op */ 93#define RIO_OPS_ATOMIC_CLR 0x00000010 /* [I] Atomic clr op */
92#define RIO_OPS_PORT_WRITE 0x00000004 /* [I] Port-write op */ 94#define RIO_OPS_PORT_WRITE 0x00000004 /* [I] Port-write op */
93 95
94 /* 0x20-0x3c *//* Reserved */ 96 /* 0x20-0x30 *//* Reserved */
97
98#define RIO_SWITCH_RT_LIMIT 0x34 /* [III, 1.3] Switch Route Table Destination ID Limit CAR */
99#define RIO_RT_MAX_DESTID 0x0000ffff
95 100
96#define RIO_MBOX_CSR 0x40 /* [II] Mailbox CSR */ 101#define RIO_MBOX_CSR 0x40 /* [II] Mailbox CSR */
97#define RIO_MBOX0_AVAIL 0x80000000 /* [II] Mbox 0 avail */ 102#define RIO_MBOX0_AVAIL 0x80000000 /* [II] Mbox 0 avail */
@@ -153,7 +158,11 @@
153#define RIO_HOST_DID_LOCK_CSR 0x68 /* [III] Host Base Device ID Lock CSR */ 158#define RIO_HOST_DID_LOCK_CSR 0x68 /* [III] Host Base Device ID Lock CSR */
154#define RIO_COMPONENT_TAG_CSR 0x6c /* [III] Component Tag CSR */ 159#define RIO_COMPONENT_TAG_CSR 0x6c /* [III] Component Tag CSR */
155 160
156 /* 0x70-0xf8 *//* Reserved */ 161#define RIO_STD_RTE_CONF_DESTID_SEL_CSR 0x70
162#define RIO_STD_RTE_CONF_PORT_SEL_CSR 0x74
163#define RIO_STD_RTE_DEFAULT_PORT 0x78
164
165 /* 0x7c-0xf8 *//* Reserved */
157 /* 0x100-0xfff8 *//* [I] Extended Features Space */ 166 /* 0x100-0xfff8 *//* [I] Extended Features Space */
158 /* 0x10000-0xfffff8 *//* [I] Implementation-defined Space */ 167 /* 0x10000-0xfffff8 *//* [I] Implementation-defined Space */
159 168
@@ -183,9 +192,14 @@
183#define RIO_EFB_PAR_EP_ID 0x0001 /* [IV] LP/LVDS EP Devices */ 192#define RIO_EFB_PAR_EP_ID 0x0001 /* [IV] LP/LVDS EP Devices */
184#define RIO_EFB_PAR_EP_REC_ID 0x0002 /* [IV] LP/LVDS EP Recovery Devices */ 193#define RIO_EFB_PAR_EP_REC_ID 0x0002 /* [IV] LP/LVDS EP Recovery Devices */
185#define RIO_EFB_PAR_EP_FREE_ID 0x0003 /* [IV] LP/LVDS EP Free Devices */ 194#define RIO_EFB_PAR_EP_FREE_ID 0x0003 /* [IV] LP/LVDS EP Free Devices */
195#define RIO_EFB_SER_EP_ID_V13P 0x0001 /* [VI] LP/Serial EP Devices, RapidIO Spec ver 1.3 and above */
196#define RIO_EFB_SER_EP_REC_ID_V13P 0x0002 /* [VI] LP/Serial EP Recovery Devices, RapidIO Spec ver 1.3 and above */
197#define RIO_EFB_SER_EP_FREE_ID_V13P 0x0003 /* [VI] LP/Serial EP Free Devices, RapidIO Spec ver 1.3 and above */
186#define RIO_EFB_SER_EP_ID 0x0004 /* [VI] LP/Serial EP Devices */ 198#define RIO_EFB_SER_EP_ID 0x0004 /* [VI] LP/Serial EP Devices */
187#define RIO_EFB_SER_EP_REC_ID 0x0005 /* [VI] LP/Serial EP Recovery Devices */ 199#define RIO_EFB_SER_EP_REC_ID 0x0005 /* [VI] LP/Serial EP Recovery Devices */
188#define RIO_EFB_SER_EP_FREE_ID 0x0006 /* [VI] LP/Serial EP Free Devices */ 200#define RIO_EFB_SER_EP_FREE_ID 0x0006 /* [VI] LP/Serial EP Free Devices */
201#define RIO_EFB_SER_EP_FREC_ID 0x0009 /* [VI] LP/Serial EP Free Recovery Devices */
202#define RIO_EFB_ERR_MGMNT 0x0007 /* [VIII] Error Management Extensions */
189 203
190/* 204/*
191 * Physical 8/16 LP-LVDS 205 * Physical 8/16 LP-LVDS
@@ -201,15 +215,71 @@
201#define RIO_PORT_MNT_HEADER 0x0000 215#define RIO_PORT_MNT_HEADER 0x0000
202#define RIO_PORT_REQ_CTL_CSR 0x0020 216#define RIO_PORT_REQ_CTL_CSR 0x0020
203#define RIO_PORT_RSP_CTL_CSR 0x0024 /* 0x0001/0x0002 */ 217#define RIO_PORT_RSP_CTL_CSR 0x0024 /* 0x0001/0x0002 */
218#define RIO_PORT_LINKTO_CTL_CSR 0x0020 /* Serial */
219#define RIO_PORT_RSPTO_CTL_CSR 0x0024 /* Serial */
204#define RIO_PORT_GEN_CTL_CSR 0x003c 220#define RIO_PORT_GEN_CTL_CSR 0x003c
205#define RIO_PORT_GEN_HOST 0x80000000 221#define RIO_PORT_GEN_HOST 0x80000000
206#define RIO_PORT_GEN_MASTER 0x40000000 222#define RIO_PORT_GEN_MASTER 0x40000000
207#define RIO_PORT_GEN_DISCOVERED 0x20000000 223#define RIO_PORT_GEN_DISCOVERED 0x20000000
208#define RIO_PORT_N_MNT_REQ_CSR(x) (0x0040 + x*0x20) /* 0x0002 */ 224#define RIO_PORT_N_MNT_REQ_CSR(x) (0x0040 + x*0x20) /* 0x0002 */
209#define RIO_PORT_N_MNT_RSP_CSR(x) (0x0044 + x*0x20) /* 0x0002 */ 225#define RIO_PORT_N_MNT_RSP_CSR(x) (0x0044 + x*0x20) /* 0x0002 */
226#define RIO_PORT_N_MNT_RSP_RVAL 0x80000000 /* Response Valid */
227#define RIO_PORT_N_MNT_RSP_ASTAT 0x000003e0 /* ackID Status */
228#define RIO_PORT_N_MNT_RSP_LSTAT 0x0000001f /* Link Status */
210#define RIO_PORT_N_ACK_STS_CSR(x) (0x0048 + x*0x20) /* 0x0002 */ 229#define RIO_PORT_N_ACK_STS_CSR(x) (0x0048 + x*0x20) /* 0x0002 */
211#define RIO_PORT_N_ERR_STS_CSR(x) (0x58 + x*0x20) 230#define RIO_PORT_N_ACK_CLEAR 0x80000000
212#define PORT_N_ERR_STS_PORT_OK 0x00000002 231#define RIO_PORT_N_ACK_INBOUND 0x1f000000
213#define RIO_PORT_N_CTL_CSR(x) (0x5c + x*0x20) 232#define RIO_PORT_N_ACK_OUTSTAND 0x00001f00
233#define RIO_PORT_N_ACK_OUTBOUND 0x0000001f
234#define RIO_PORT_N_ERR_STS_CSR(x) (0x0058 + x*0x20)
235#define RIO_PORT_N_ERR_STS_PW_OUT_ES 0x00010000 /* Output Error-stopped */
236#define RIO_PORT_N_ERR_STS_PW_INP_ES 0x00000100 /* Input Error-stopped */
237#define RIO_PORT_N_ERR_STS_PW_PEND 0x00000010 /* Port-Write Pending */
238#define RIO_PORT_N_ERR_STS_PORT_ERR 0x00000004
239#define RIO_PORT_N_ERR_STS_PORT_OK 0x00000002
240#define RIO_PORT_N_ERR_STS_PORT_UNINIT 0x00000001
241#define RIO_PORT_N_ERR_STS_CLR_MASK 0x07120204
242#define RIO_PORT_N_CTL_CSR(x) (0x005c + x*0x20)
243#define RIO_PORT_N_CTL_PWIDTH 0xc0000000
244#define RIO_PORT_N_CTL_PWIDTH_1 0x00000000
245#define RIO_PORT_N_CTL_PWIDTH_4 0x40000000
246#define RIO_PORT_N_CTL_P_TYP_SER 0x00000001
247#define RIO_PORT_N_CTL_LOCKOUT 0x00000002
248#define RIO_PORT_N_CTL_EN_RX_SER 0x00200000
249#define RIO_PORT_N_CTL_EN_TX_SER 0x00400000
250#define RIO_PORT_N_CTL_EN_RX_PAR 0x08000000
251#define RIO_PORT_N_CTL_EN_TX_PAR 0x40000000
252
253/*
254 * Error Management Extensions (RapidIO 1.3+, Part 8)
255 *
256 * Extended Features Block ID=0x0007
257 */
258
259/* General EM Registers (Common for all Ports) */
260
261#define RIO_EM_EFB_HEADER 0x000 /* Error Management Extensions Block Header */
262#define RIO_EM_LTL_ERR_DETECT 0x008 /* Logical/Transport Layer Error Detect CSR */
263#define RIO_EM_LTL_ERR_EN 0x00c /* Logical/Transport Layer Error Enable CSR */
264#define RIO_EM_LTL_HIADDR_CAP 0x010 /* Logical/Transport Layer High Address Capture CSR */
265#define RIO_EM_LTL_ADDR_CAP 0x014 /* Logical/Transport Layer Address Capture CSR */
266#define RIO_EM_LTL_DEVID_CAP 0x018 /* Logical/Transport Layer Device ID Capture CSR */
267#define RIO_EM_LTL_CTRL_CAP 0x01c /* Logical/Transport Layer Control Capture CSR */
268#define RIO_EM_PW_TGT_DEVID 0x028 /* Port-write Target deviceID CSR */
269#define RIO_EM_PKT_TTL 0x02c /* Packet Time-to-live CSR */
270
271/* Per-Port EM Registers */
272
273#define RIO_EM_PN_ERR_DETECT(x) (0x040 + x*0x40) /* Port N Error Detect CSR */
274#define REM_PED_IMPL_SPEC 0x80000000
275#define REM_PED_LINK_TO 0x00000001
276#define RIO_EM_PN_ERRRATE_EN(x) (0x044 + x*0x40) /* Port N Error Rate Enable CSR */
277#define RIO_EM_PN_ATTRIB_CAP(x) (0x048 + x*0x40) /* Port N Attributes Capture CSR */
278#define RIO_EM_PN_PKT_CAP_0(x) (0x04c + x*0x40) /* Port N Packet/Control Symbol Capture 0 CSR */
279#define RIO_EM_PN_PKT_CAP_1(x) (0x050 + x*0x40) /* Port N Packet Capture 1 CSR */
280#define RIO_EM_PN_PKT_CAP_2(x) (0x054 + x*0x40) /* Port N Packet Capture 2 CSR */
281#define RIO_EM_PN_PKT_CAP_3(x) (0x058 + x*0x40) /* Port N Packet Capture 3 CSR */
282#define RIO_EM_PN_ERRRATE(x) (0x068 + x*0x40) /* Port N Error Rate CSR */
283#define RIO_EM_PN_ERRRATE_TR(x) (0x06c + x*0x40) /* Port N Error Rate Threshold CSR */
214 284
215#endif /* LINUX_RIO_REGS_H */ 285#endif /* LINUX_RIO_REGS_H */
diff --git a/include/linux/sched.h b/include/linux/sched.h
index c0151ffd3541..f118809c953f 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -268,7 +268,6 @@ extern void init_idle(struct task_struct *idle, int cpu);
268extern void init_idle_bootup_task(struct task_struct *idle); 268extern void init_idle_bootup_task(struct task_struct *idle);
269 269
270extern int runqueue_is_locked(int cpu); 270extern int runqueue_is_locked(int cpu);
271extern void task_rq_unlock_wait(struct task_struct *p);
272 271
273extern cpumask_var_t nohz_cpu_mask; 272extern cpumask_var_t nohz_cpu_mask;
274#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ) 273#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ)
@@ -527,8 +526,9 @@ struct thread_group_cputimer {
527 * the locking of signal_struct. 526 * the locking of signal_struct.
528 */ 527 */
529struct signal_struct { 528struct signal_struct {
530 atomic_t count; 529 atomic_t sigcnt;
531 atomic_t live; 530 atomic_t live;
531 int nr_threads;
532 532
533 wait_queue_head_t wait_chldexit; /* for wait4() */ 533 wait_queue_head_t wait_chldexit; /* for wait4() */
534 534
@@ -1423,6 +1423,7 @@ struct task_struct {
1423 nodemask_t mems_allowed; /* Protected by alloc_lock */ 1423 nodemask_t mems_allowed; /* Protected by alloc_lock */
1424 int mems_allowed_change_disable; 1424 int mems_allowed_change_disable;
1425 int cpuset_mem_spread_rotor; 1425 int cpuset_mem_spread_rotor;
1426 int cpuset_slab_spread_rotor;
1426#endif 1427#endif
1427#ifdef CONFIG_CGROUPS 1428#ifdef CONFIG_CGROUPS
1428 /* Control Group info protected by css_set_lock */ 1429 /* Control Group info protected by css_set_lock */
@@ -2035,7 +2036,7 @@ extern int do_notify_parent(struct task_struct *, int);
2035extern void __wake_up_parent(struct task_struct *p, struct task_struct *parent); 2036extern void __wake_up_parent(struct task_struct *p, struct task_struct *parent);
2036extern void force_sig(int, struct task_struct *); 2037extern void force_sig(int, struct task_struct *);
2037extern int send_sig(int, struct task_struct *, int); 2038extern int send_sig(int, struct task_struct *, int);
2038extern void zap_other_threads(struct task_struct *p); 2039extern int zap_other_threads(struct task_struct *p);
2039extern struct sigqueue *sigqueue_alloc(void); 2040extern struct sigqueue *sigqueue_alloc(void);
2040extern void sigqueue_free(struct sigqueue *); 2041extern void sigqueue_free(struct sigqueue *);
2041extern int send_sigqueue(struct sigqueue *, struct task_struct *, int group); 2042extern int send_sigqueue(struct sigqueue *, struct task_struct *, int group);
@@ -2100,7 +2101,6 @@ extern void flush_thread(void);
2100extern void exit_thread(void); 2101extern void exit_thread(void);
2101 2102
2102extern void exit_files(struct task_struct *); 2103extern void exit_files(struct task_struct *);
2103extern void __cleanup_signal(struct signal_struct *);
2104extern void __cleanup_sighand(struct sighand_struct *); 2104extern void __cleanup_sighand(struct sighand_struct *);
2105 2105
2106extern void exit_itimers(struct signal_struct *); 2106extern void exit_itimers(struct signal_struct *);
@@ -2147,6 +2147,11 @@ extern bool current_is_single_threaded(void);
2147#define while_each_thread(g, t) \ 2147#define while_each_thread(g, t) \
2148 while ((t = next_thread(t)) != g) 2148 while ((t = next_thread(t)) != g)
2149 2149
2150static inline int get_nr_threads(struct task_struct *tsk)
2151{
2152 return tsk->signal->nr_threads;
2153}
2154
2150/* de_thread depends on thread_group_leader not being a pid based check */ 2155/* de_thread depends on thread_group_leader not being a pid based check */
2151#define thread_group_leader(p) (p == p->group_leader) 2156#define thread_group_leader(p) (p == p->group_leader)
2152 2157
@@ -2393,10 +2398,6 @@ static inline void thread_group_cputime_init(struct signal_struct *sig)
2393 spin_lock_init(&sig->cputimer.lock); 2398 spin_lock_init(&sig->cputimer.lock);
2394} 2399}
2395 2400
2396static inline void thread_group_cputime_free(struct signal_struct *sig)
2397{
2398}
2399
2400/* 2401/*
2401 * Reevaluate whether the task has signals pending delivery. 2402 * Reevaluate whether the task has signals pending delivery.
2402 * Wake the task if so. 2403 * Wake the task if so.
diff --git a/include/linux/sdhci-pltfm.h b/include/linux/sdhci-pltfm.h
new file mode 100644
index 000000000000..0239bd70241e
--- /dev/null
+++ b/include/linux/sdhci-pltfm.h
@@ -0,0 +1,35 @@
1/*
2 * Platform data declarations for the sdhci-pltfm driver.
3 *
4 * Copyright (c) 2010 MontaVista Software, LLC.
5 *
6 * Author: Anton Vorontsov <avorontsov@ru.mvista.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or (at
11 * your option) any later version.
12 */
13
14#ifndef _SDHCI_PLTFM_H
15#define _SDHCI_PLTFM_H
16
17struct sdhci_ops;
18struct sdhci_host;
19
20/**
21 * struct sdhci_pltfm_data - SDHCI platform-specific information & hooks
22 * @ops: optional pointer to the platform-provided SDHCI ops
23 * @quirks: optional SDHCI quirks
24 * @init: optional hook that is called during device probe, before the
25 * driver tries to access any SDHCI registers
26 * @exit: optional hook that is called during device removal
27 */
28struct sdhci_pltfm_data {
29 struct sdhci_ops *ops;
30 unsigned int quirks;
31 int (*init)(struct sdhci_host *host);
32 void (*exit)(struct sdhci_host *host);
33};
34
35#endif /* _SDHCI_PLTFM_H */
diff --git a/include/linux/sem.h b/include/linux/sem.h
index 8a4adbef8a0f..f2961afa2f66 100644
--- a/include/linux/sem.h
+++ b/include/linux/sem.h
@@ -79,6 +79,7 @@ struct seminfo {
79#ifdef __KERNEL__ 79#ifdef __KERNEL__
80#include <asm/atomic.h> 80#include <asm/atomic.h>
81#include <linux/rcupdate.h> 81#include <linux/rcupdate.h>
82#include <linux/cache.h>
82 83
83struct task_struct; 84struct task_struct;
84 85
@@ -91,7 +92,8 @@ struct sem {
91 92
92/* One sem_array data structure for each set of semaphores in the system. */ 93/* One sem_array data structure for each set of semaphores in the system. */
93struct sem_array { 94struct sem_array {
94 struct kern_ipc_perm sem_perm; /* permissions .. see ipc.h */ 95 struct kern_ipc_perm ____cacheline_aligned_in_smp
96 sem_perm; /* permissions .. see ipc.h */
95 time_t sem_otime; /* last semop time */ 97 time_t sem_otime; /* last semop time */
96 time_t sem_ctime; /* last change time */ 98 time_t sem_ctime; /* last change time */
97 struct sem *sem_base; /* ptr to first semaphore in array */ 99 struct sem *sem_base; /* ptr to first semaphore in array */
diff --git a/include/linux/sfi.h b/include/linux/sfi.h
index 9a6f7607174e..0299b4ce63db 100644
--- a/include/linux/sfi.h
+++ b/include/linux/sfi.h
@@ -73,6 +73,8 @@
73#define SFI_SIG_SPIB "SPIB" 73#define SFI_SIG_SPIB "SPIB"
74#define SFI_SIG_I2CB "I2CB" 74#define SFI_SIG_I2CB "I2CB"
75#define SFI_SIG_GPEM "GPEM" 75#define SFI_SIG_GPEM "GPEM"
76#define SFI_SIG_DEVS "DEVS"
77#define SFI_SIG_GPIO "GPIO"
76 78
77#define SFI_SIGNATURE_SIZE 4 79#define SFI_SIGNATURE_SIZE 4
78#define SFI_OEM_ID_SIZE 6 80#define SFI_OEM_ID_SIZE 6
@@ -145,6 +147,27 @@ struct sfi_rtc_table_entry {
145 u32 irq; 147 u32 irq;
146} __packed; 148} __packed;
147 149
150struct sfi_device_table_entry {
151 u8 type; /* bus type, I2C, SPI or ...*/
152#define SFI_DEV_TYPE_SPI 0
153#define SFI_DEV_TYPE_I2C 1
154#define SFI_DEV_TYPE_UART 2
155#define SFI_DEV_TYPE_HSI 3
156#define SFI_DEV_TYPE_IPC 4
157
158 u8 host_num; /* attached to host 0, 1...*/
159 u16 addr;
160 u8 irq;
161 u32 max_freq;
162 char name[16];
163} __packed;
164
165struct sfi_gpio_table_entry {
166 char controller_name[16];
167 u16 pin_no;
168 char pin_name[16];
169} __packed;
170
148struct sfi_spi_table_entry { 171struct sfi_spi_table_entry {
149 u16 host_num; /* attached to host 0, 1...*/ 172 u16 host_num; /* attached to host 0, 1...*/
150 u16 cs; /* chip select */ 173 u16 cs; /* chip select */
@@ -166,7 +189,6 @@ struct sfi_gpe_table_entry {
166 u16 phys_id; /* physical GPE id */ 189 u16 phys_id; /* physical GPE id */
167} __packed; 190} __packed;
168 191
169
170typedef int (*sfi_table_handler) (struct sfi_table_header *table); 192typedef int (*sfi_table_handler) (struct sfi_table_header *table);
171 193
172#ifdef CONFIG_SFI 194#ifdef CONFIG_SFI
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
index 55695c8d2f8a..4ba59cfc1f75 100644
--- a/include/linux/slub_def.h
+++ b/include/linux/slub_def.h
@@ -75,12 +75,6 @@ struct kmem_cache {
75 int offset; /* Free pointer offset. */ 75 int offset; /* Free pointer offset. */
76 struct kmem_cache_order_objects oo; 76 struct kmem_cache_order_objects oo;
77 77
78 /*
79 * Avoid an extra cache line for UP, SMP and for the node local to
80 * struct kmem_cache.
81 */
82 struct kmem_cache_node local_node;
83
84 /* Allocation and freeing of slabs */ 78 /* Allocation and freeing of slabs */
85 struct kmem_cache_order_objects max; 79 struct kmem_cache_order_objects max;
86 struct kmem_cache_order_objects min; 80 struct kmem_cache_order_objects min;
@@ -102,6 +96,9 @@ struct kmem_cache {
102 */ 96 */
103 int remote_node_defrag_ratio; 97 int remote_node_defrag_ratio;
104 struct kmem_cache_node *node[MAX_NUMNODES]; 98 struct kmem_cache_node *node[MAX_NUMNODES];
99#else
100 /* Avoid an extra cache line for UP */
101 struct kmem_cache_node local_node;
105#endif 102#endif
106}; 103};
107 104
@@ -140,7 +137,7 @@ struct kmem_cache {
140#ifdef CONFIG_ZONE_DMA 137#ifdef CONFIG_ZONE_DMA
141#define SLUB_DMA __GFP_DMA 138#define SLUB_DMA __GFP_DMA
142/* Reserve extra caches for potential DMA use */ 139/* Reserve extra caches for potential DMA use */
143#define KMALLOC_CACHES (2 * SLUB_PAGE_SHIFT - 6) 140#define KMALLOC_CACHES (2 * SLUB_PAGE_SHIFT)
144#else 141#else
145/* Disable DMA functionality */ 142/* Disable DMA functionality */
146#define SLUB_DMA (__force gfp_t)0 143#define SLUB_DMA (__force gfp_t)0
diff --git a/include/linux/swap.h b/include/linux/swap.h
index b6b614364dd8..ff4acea9bbdb 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -282,6 +282,11 @@ extern void kswapd_stop(int nid);
282extern int shmem_unuse(swp_entry_t entry, struct page *page); 282extern int shmem_unuse(swp_entry_t entry, struct page *page);
283#endif /* CONFIG_MMU */ 283#endif /* CONFIG_MMU */
284 284
285#ifdef CONFIG_CGROUP_MEM_RES_CTLR
286extern void mem_cgroup_get_shmem_target(struct inode *inode, pgoff_t pgoff,
287 struct page **pagep, swp_entry_t *ent);
288#endif
289
285extern void swap_unplug_io_fn(struct backing_dev_info *, struct page *); 290extern void swap_unplug_io_fn(struct backing_dev_info *, struct page *);
286 291
287#ifdef CONFIG_SWAP 292#ifdef CONFIG_SWAP
diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h
index febedcf67c7e..81a4e213c6cf 100644
--- a/include/linux/swiotlb.h
+++ b/include/linux/swiotlb.h
@@ -73,16 +73,6 @@ extern void
73swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg, 73swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
74 int nelems, enum dma_data_direction dir); 74 int nelems, enum dma_data_direction dir);
75 75
76extern void
77swiotlb_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dev_addr,
78 unsigned long offset, size_t size,
79 enum dma_data_direction dir);
80
81extern void
82swiotlb_sync_single_range_for_device(struct device *hwdev, dma_addr_t dev_addr,
83 unsigned long offset, size_t size,
84 enum dma_data_direction dir);
85
86extern int 76extern int
87swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr); 77swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr);
88 78
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
index 057929b0a651..a1a86a53bc73 100644
--- a/include/linux/syscalls.h
+++ b/include/linux/syscalls.h
@@ -103,22 +103,6 @@ struct perf_event_attr;
103#define __SC_TEST5(t5, a5, ...) __SC_TEST(t5); __SC_TEST4(__VA_ARGS__) 103#define __SC_TEST5(t5, a5, ...) __SC_TEST(t5); __SC_TEST4(__VA_ARGS__)
104#define __SC_TEST6(t6, a6, ...) __SC_TEST(t6); __SC_TEST5(__VA_ARGS__) 104#define __SC_TEST6(t6, a6, ...) __SC_TEST(t6); __SC_TEST5(__VA_ARGS__)
105 105
106#ifdef CONFIG_PERF_EVENTS
107
108#define TRACE_SYS_ENTER_PERF_INIT(sname) \
109 .perf_event_enable = perf_sysenter_enable, \
110 .perf_event_disable = perf_sysenter_disable,
111
112#define TRACE_SYS_EXIT_PERF_INIT(sname) \
113 .perf_event_enable = perf_sysexit_enable, \
114 .perf_event_disable = perf_sysexit_disable,
115#else
116#define TRACE_SYS_ENTER_PERF(sname)
117#define TRACE_SYS_ENTER_PERF_INIT(sname)
118#define TRACE_SYS_EXIT_PERF(sname)
119#define TRACE_SYS_EXIT_PERF_INIT(sname)
120#endif /* CONFIG_PERF_EVENTS */
121
122#ifdef CONFIG_FTRACE_SYSCALLS 106#ifdef CONFIG_FTRACE_SYSCALLS
123#define __SC_STR_ADECL1(t, a) #a 107#define __SC_STR_ADECL1(t, a) #a
124#define __SC_STR_ADECL2(t, a, ...) #a, __SC_STR_ADECL1(__VA_ARGS__) 108#define __SC_STR_ADECL2(t, a, ...) #a, __SC_STR_ADECL1(__VA_ARGS__)
@@ -134,54 +118,43 @@ struct perf_event_attr;
134#define __SC_STR_TDECL5(t, a, ...) #t, __SC_STR_TDECL4(__VA_ARGS__) 118#define __SC_STR_TDECL5(t, a, ...) #t, __SC_STR_TDECL4(__VA_ARGS__)
135#define __SC_STR_TDECL6(t, a, ...) #t, __SC_STR_TDECL5(__VA_ARGS__) 119#define __SC_STR_TDECL6(t, a, ...) #t, __SC_STR_TDECL5(__VA_ARGS__)
136 120
121extern struct ftrace_event_class event_class_syscall_enter;
122extern struct ftrace_event_class event_class_syscall_exit;
123extern struct trace_event_functions enter_syscall_print_funcs;
124extern struct trace_event_functions exit_syscall_print_funcs;
125
137#define SYSCALL_TRACE_ENTER_EVENT(sname) \ 126#define SYSCALL_TRACE_ENTER_EVENT(sname) \
138 static const struct syscall_metadata __syscall_meta_##sname; \ 127 static struct syscall_metadata __syscall_meta_##sname; \
139 static struct ftrace_event_call \ 128 static struct ftrace_event_call \
140 __attribute__((__aligned__(4))) event_enter_##sname; \ 129 __attribute__((__aligned__(4))) event_enter_##sname; \
141 static struct trace_event enter_syscall_print_##sname = { \
142 .trace = print_syscall_enter, \
143 }; \
144 static struct ftrace_event_call __used \ 130 static struct ftrace_event_call __used \
145 __attribute__((__aligned__(4))) \ 131 __attribute__((__aligned__(4))) \
146 __attribute__((section("_ftrace_events"))) \ 132 __attribute__((section("_ftrace_events"))) \
147 event_enter_##sname = { \ 133 event_enter_##sname = { \
148 .name = "sys_enter"#sname, \ 134 .name = "sys_enter"#sname, \
149 .system = "syscalls", \ 135 .class = &event_class_syscall_enter, \
150 .event = &enter_syscall_print_##sname, \ 136 .event.funcs = &enter_syscall_print_funcs, \
151 .raw_init = init_syscall_trace, \
152 .define_fields = syscall_enter_define_fields, \
153 .regfunc = reg_event_syscall_enter, \
154 .unregfunc = unreg_event_syscall_enter, \
155 .data = (void *)&__syscall_meta_##sname,\ 137 .data = (void *)&__syscall_meta_##sname,\
156 TRACE_SYS_ENTER_PERF_INIT(sname) \
157 } 138 }
158 139
159#define SYSCALL_TRACE_EXIT_EVENT(sname) \ 140#define SYSCALL_TRACE_EXIT_EVENT(sname) \
160 static const struct syscall_metadata __syscall_meta_##sname; \ 141 static struct syscall_metadata __syscall_meta_##sname; \
161 static struct ftrace_event_call \ 142 static struct ftrace_event_call \
162 __attribute__((__aligned__(4))) event_exit_##sname; \ 143 __attribute__((__aligned__(4))) event_exit_##sname; \
163 static struct trace_event exit_syscall_print_##sname = { \
164 .trace = print_syscall_exit, \
165 }; \
166 static struct ftrace_event_call __used \ 144 static struct ftrace_event_call __used \
167 __attribute__((__aligned__(4))) \ 145 __attribute__((__aligned__(4))) \
168 __attribute__((section("_ftrace_events"))) \ 146 __attribute__((section("_ftrace_events"))) \
169 event_exit_##sname = { \ 147 event_exit_##sname = { \
170 .name = "sys_exit"#sname, \ 148 .name = "sys_exit"#sname, \
171 .system = "syscalls", \ 149 .class = &event_class_syscall_exit, \
172 .event = &exit_syscall_print_##sname, \ 150 .event.funcs = &exit_syscall_print_funcs, \
173 .raw_init = init_syscall_trace, \
174 .define_fields = syscall_exit_define_fields, \
175 .regfunc = reg_event_syscall_exit, \
176 .unregfunc = unreg_event_syscall_exit, \
177 .data = (void *)&__syscall_meta_##sname,\ 151 .data = (void *)&__syscall_meta_##sname,\
178 TRACE_SYS_EXIT_PERF_INIT(sname) \
179 } 152 }
180 153
181#define SYSCALL_METADATA(sname, nb) \ 154#define SYSCALL_METADATA(sname, nb) \
182 SYSCALL_TRACE_ENTER_EVENT(sname); \ 155 SYSCALL_TRACE_ENTER_EVENT(sname); \
183 SYSCALL_TRACE_EXIT_EVENT(sname); \ 156 SYSCALL_TRACE_EXIT_EVENT(sname); \
184 static const struct syscall_metadata __used \ 157 static struct syscall_metadata __used \
185 __attribute__((__aligned__(4))) \ 158 __attribute__((__aligned__(4))) \
186 __attribute__((section("__syscalls_metadata"))) \ 159 __attribute__((section("__syscalls_metadata"))) \
187 __syscall_meta_##sname = { \ 160 __syscall_meta_##sname = { \
@@ -191,12 +164,14 @@ struct perf_event_attr;
191 .args = args_##sname, \ 164 .args = args_##sname, \
192 .enter_event = &event_enter_##sname, \ 165 .enter_event = &event_enter_##sname, \
193 .exit_event = &event_exit_##sname, \ 166 .exit_event = &event_exit_##sname, \
167 .enter_fields = LIST_HEAD_INIT(__syscall_meta_##sname.enter_fields), \
168 .exit_fields = LIST_HEAD_INIT(__syscall_meta_##sname.exit_fields), \
194 }; 169 };
195 170
196#define SYSCALL_DEFINE0(sname) \ 171#define SYSCALL_DEFINE0(sname) \
197 SYSCALL_TRACE_ENTER_EVENT(_##sname); \ 172 SYSCALL_TRACE_ENTER_EVENT(_##sname); \
198 SYSCALL_TRACE_EXIT_EVENT(_##sname); \ 173 SYSCALL_TRACE_EXIT_EVENT(_##sname); \
199 static const struct syscall_metadata __used \ 174 static struct syscall_metadata __used \
200 __attribute__((__aligned__(4))) \ 175 __attribute__((__aligned__(4))) \
201 __attribute__((section("__syscalls_metadata"))) \ 176 __attribute__((section("__syscalls_metadata"))) \
202 __syscall_meta__##sname = { \ 177 __syscall_meta__##sname = { \
@@ -204,6 +179,8 @@ struct perf_event_attr;
204 .nb_args = 0, \ 179 .nb_args = 0, \
205 .enter_event = &event_enter__##sname, \ 180 .enter_event = &event_enter__##sname, \
206 .exit_event = &event_exit__##sname, \ 181 .exit_event = &event_exit__##sname, \
182 .enter_fields = LIST_HEAD_INIT(__syscall_meta__##sname.enter_fields), \
183 .exit_fields = LIST_HEAD_INIT(__syscall_meta__##sname.exit_fields), \
207 }; \ 184 }; \
208 asmlinkage long sys_##sname(void) 185 asmlinkage long sys_##sname(void)
209#else 186#else
diff --git a/include/linux/threads.h b/include/linux/threads.h
index 052b12bec8bd..383ab9592bec 100644
--- a/include/linux/threads.h
+++ b/include/linux/threads.h
@@ -33,4 +33,13 @@
33#define PID_MAX_LIMIT (CONFIG_BASE_SMALL ? PAGE_SIZE * 8 : \ 33#define PID_MAX_LIMIT (CONFIG_BASE_SMALL ? PAGE_SIZE * 8 : \
34 (sizeof(long) > 4 ? 4 * 1024 * 1024 : PID_MAX_DEFAULT)) 34 (sizeof(long) > 4 ? 4 * 1024 * 1024 : PID_MAX_DEFAULT))
35 35
36/*
37 * Define a minimum number of pids per cpu. Heuristically based
38 * on original pid max of 32k for 32 cpus. Also, increase the
39 * minimum settable value for pid_max on the running system based
40 * on similar defaults. See kernel/pid.c:pidmap_init() for details.
41 */
42#define PIDS_PER_CPU_DEFAULT 1024
43#define PIDS_PER_CPU_MIN 8
44
36#endif 45#endif
diff --git a/include/linux/topology.h b/include/linux/topology.h
index 5b81156780b1..c44df50a05ab 100644
--- a/include/linux/topology.h
+++ b/include/linux/topology.h
@@ -31,6 +31,7 @@
31#include <linux/bitops.h> 31#include <linux/bitops.h>
32#include <linux/mmzone.h> 32#include <linux/mmzone.h>
33#include <linux/smp.h> 33#include <linux/smp.h>
34#include <linux/percpu.h>
34#include <asm/topology.h> 35#include <asm/topology.h>
35 36
36#ifndef node_has_online_mem 37#ifndef node_has_online_mem
@@ -203,8 +204,114 @@ int arch_update_cpu_topology(void);
203#ifndef SD_NODE_INIT 204#ifndef SD_NODE_INIT
204#error Please define an appropriate SD_NODE_INIT in include/asm/topology.h!!! 205#error Please define an appropriate SD_NODE_INIT in include/asm/topology.h!!!
205#endif 206#endif
207
206#endif /* CONFIG_NUMA */ 208#endif /* CONFIG_NUMA */
207 209
210#ifdef CONFIG_USE_PERCPU_NUMA_NODE_ID
211DECLARE_PER_CPU(int, numa_node);
212
213#ifndef numa_node_id
214/* Returns the number of the current Node. */
215static inline int numa_node_id(void)
216{
217 return __this_cpu_read(numa_node);
218}
219#endif
220
221#ifndef cpu_to_node
222static inline int cpu_to_node(int cpu)
223{
224 return per_cpu(numa_node, cpu);
225}
226#endif
227
228#ifndef set_numa_node
229static inline void set_numa_node(int node)
230{
231 percpu_write(numa_node, node);
232}
233#endif
234
235#ifndef set_cpu_numa_node
236static inline void set_cpu_numa_node(int cpu, int node)
237{
238 per_cpu(numa_node, cpu) = node;
239}
240#endif
241
242#else /* !CONFIG_USE_PERCPU_NUMA_NODE_ID */
243
244/* Returns the number of the current Node. */
245#ifndef numa_node_id
246static inline int numa_node_id(void)
247{
248 return cpu_to_node(raw_smp_processor_id());
249}
250#endif
251
252#endif /* [!]CONFIG_USE_PERCPU_NUMA_NODE_ID */
253
254#ifdef CONFIG_HAVE_MEMORYLESS_NODES
255
256/*
257 * N.B., Do NOT reference the '_numa_mem_' per cpu variable directly.
258 * It will not be defined when CONFIG_HAVE_MEMORYLESS_NODES is not defined.
259 * Use the accessor functions set_numa_mem(), numa_mem_id() and cpu_to_mem().
260 */
261DECLARE_PER_CPU(int, _numa_mem_);
262
263#ifndef set_numa_mem
264static inline void set_numa_mem(int node)
265{
266 percpu_write(_numa_mem_, node);
267}
268#endif
269
270#ifndef numa_mem_id
271/* Returns the number of the nearest Node with memory */
272static inline int numa_mem_id(void)
273{
274 return __this_cpu_read(_numa_mem_);
275}
276#endif
277
278#ifndef cpu_to_mem
279static inline int cpu_to_mem(int cpu)
280{
281 return per_cpu(_numa_mem_, cpu);
282}
283#endif
284
285#ifndef set_cpu_numa_mem
286static inline void set_cpu_numa_mem(int cpu, int node)
287{
288 per_cpu(_numa_mem_, cpu) = node;
289}
290#endif
291
292#else /* !CONFIG_HAVE_MEMORYLESS_NODES */
293
294static inline void set_numa_mem(int node) {}
295
296static inline void set_cpu_numa_mem(int cpu, int node) {}
297
298#ifndef numa_mem_id
299/* Returns the number of the nearest Node with memory */
300static inline int numa_mem_id(void)
301{
302 return numa_node_id();
303}
304#endif
305
306#ifndef cpu_to_mem
307static inline int cpu_to_mem(int cpu)
308{
309 return cpu_to_node(cpu);
310}
311#endif
312
313#endif /* [!]CONFIG_HAVE_MEMORYLESS_NODES */
314
208#ifndef topology_physical_package_id 315#ifndef topology_physical_package_id
209#define topology_physical_package_id(cpu) ((void)(cpu), -1) 316#define topology_physical_package_id(cpu) ((void)(cpu), -1)
210#endif 317#endif
@@ -218,9 +325,4 @@ int arch_update_cpu_topology(void);
218#define topology_core_cpumask(cpu) cpumask_of(cpu) 325#define topology_core_cpumask(cpu) cpumask_of(cpu)
219#endif 326#endif
220 327
221/* Returns the number of the current Node. */
222#ifndef numa_node_id
223#define numa_node_id() (cpu_to_node(raw_smp_processor_id()))
224#endif
225
226#endif /* _LINUX_TOPOLOGY_H */ 328#endif /* _LINUX_TOPOLOGY_H */
diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h
index 1d85f9a6a199..9a59d1f98cd4 100644
--- a/include/linux/tracepoint.h
+++ b/include/linux/tracepoint.h
@@ -20,12 +20,17 @@
20struct module; 20struct module;
21struct tracepoint; 21struct tracepoint;
22 22
23struct tracepoint_func {
24 void *func;
25 void *data;
26};
27
23struct tracepoint { 28struct tracepoint {
24 const char *name; /* Tracepoint name */ 29 const char *name; /* Tracepoint name */
25 int state; /* State. */ 30 int state; /* State. */
26 void (*regfunc)(void); 31 void (*regfunc)(void);
27 void (*unregfunc)(void); 32 void (*unregfunc)(void);
28 void **funcs; 33 struct tracepoint_func *funcs;
29} __attribute__((aligned(32))); /* 34} __attribute__((aligned(32))); /*
30 * Aligned on 32 bytes because it is 35 * Aligned on 32 bytes because it is
31 * globally visible and gcc happily 36 * globally visible and gcc happily
@@ -37,16 +42,19 @@ struct tracepoint {
37 * Connect a probe to a tracepoint. 42 * Connect a probe to a tracepoint.
38 * Internal API, should not be used directly. 43 * Internal API, should not be used directly.
39 */ 44 */
40extern int tracepoint_probe_register(const char *name, void *probe); 45extern int tracepoint_probe_register(const char *name, void *probe, void *data);
41 46
42/* 47/*
43 * Disconnect a probe from a tracepoint. 48 * Disconnect a probe from a tracepoint.
44 * Internal API, should not be used directly. 49 * Internal API, should not be used directly.
45 */ 50 */
46extern int tracepoint_probe_unregister(const char *name, void *probe); 51extern int
52tracepoint_probe_unregister(const char *name, void *probe, void *data);
47 53
48extern int tracepoint_probe_register_noupdate(const char *name, void *probe); 54extern int tracepoint_probe_register_noupdate(const char *name, void *probe,
49extern int tracepoint_probe_unregister_noupdate(const char *name, void *probe); 55 void *data);
56extern int tracepoint_probe_unregister_noupdate(const char *name, void *probe,
57 void *data);
50extern void tracepoint_probe_update_all(void); 58extern void tracepoint_probe_update_all(void);
51 59
52struct tracepoint_iter { 60struct tracepoint_iter {
@@ -102,17 +110,27 @@ static inline void tracepoint_update_probe_range(struct tracepoint *begin,
102/* 110/*
103 * it_func[0] is never NULL because there is at least one element in the array 111 * it_func[0] is never NULL because there is at least one element in the array
104 * when the array itself is non NULL. 112 * when the array itself is non NULL.
113 *
114 * Note, the proto and args passed in includes "__data" as the first parameter.
115 * The reason for this is to handle the "void" prototype. If a tracepoint
116 * has a "void" prototype, then it is invalid to declare a function
117 * as "(void *, void)". The DECLARE_TRACE_NOARGS() will pass in just
118 * "void *data", where as the DECLARE_TRACE() will pass in "void *data, proto".
105 */ 119 */
106#define __DO_TRACE(tp, proto, args) \ 120#define __DO_TRACE(tp, proto, args) \
107 do { \ 121 do { \
108 void **it_func; \ 122 struct tracepoint_func *it_func_ptr; \
123 void *it_func; \
124 void *__data; \
109 \ 125 \
110 rcu_read_lock_sched_notrace(); \ 126 rcu_read_lock_sched_notrace(); \
111 it_func = rcu_dereference_sched((tp)->funcs); \ 127 it_func_ptr = rcu_dereference_sched((tp)->funcs); \
112 if (it_func) { \ 128 if (it_func_ptr) { \
113 do { \ 129 do { \
114 ((void(*)(proto))(*it_func))(args); \ 130 it_func = (it_func_ptr)->func; \
115 } while (*(++it_func)); \ 131 __data = (it_func_ptr)->data; \
132 ((void(*)(proto))(it_func))(args); \
133 } while ((++it_func_ptr)->func); \
116 } \ 134 } \
117 rcu_read_unlock_sched_notrace(); \ 135 rcu_read_unlock_sched_notrace(); \
118 } while (0) 136 } while (0)
@@ -122,24 +140,32 @@ static inline void tracepoint_update_probe_range(struct tracepoint *begin,
122 * not add unwanted padding between the beginning of the section and the 140 * not add unwanted padding between the beginning of the section and the
123 * structure. Force alignment to the same alignment as the section start. 141 * structure. Force alignment to the same alignment as the section start.
124 */ 142 */
125#define DECLARE_TRACE(name, proto, args) \ 143#define __DECLARE_TRACE(name, proto, args, data_proto, data_args) \
126 extern struct tracepoint __tracepoint_##name; \ 144 extern struct tracepoint __tracepoint_##name; \
127 static inline void trace_##name(proto) \ 145 static inline void trace_##name(proto) \
128 { \ 146 { \
129 if (unlikely(__tracepoint_##name.state)) \ 147 if (unlikely(__tracepoint_##name.state)) \
130 __DO_TRACE(&__tracepoint_##name, \ 148 __DO_TRACE(&__tracepoint_##name, \
131 TP_PROTO(proto), TP_ARGS(args)); \ 149 TP_PROTO(data_proto), \
150 TP_ARGS(data_args)); \
151 } \
152 static inline int \
153 register_trace_##name(void (*probe)(data_proto), void *data) \
154 { \
155 return tracepoint_probe_register(#name, (void *)probe, \
156 data); \
132 } \ 157 } \
133 static inline int register_trace_##name(void (*probe)(proto)) \ 158 static inline int \
159 unregister_trace_##name(void (*probe)(data_proto), void *data) \
134 { \ 160 { \
135 return tracepoint_probe_register(#name, (void *)probe); \ 161 return tracepoint_probe_unregister(#name, (void *)probe, \
162 data); \
136 } \ 163 } \
137 static inline int unregister_trace_##name(void (*probe)(proto)) \ 164 static inline void \
165 check_trace_callback_type_##name(void (*cb)(data_proto)) \
138 { \ 166 { \
139 return tracepoint_probe_unregister(#name, (void *)probe);\
140 } 167 }
141 168
142
143#define DEFINE_TRACE_FN(name, reg, unreg) \ 169#define DEFINE_TRACE_FN(name, reg, unreg) \
144 static const char __tpstrtab_##name[] \ 170 static const char __tpstrtab_##name[] \
145 __attribute__((section("__tracepoints_strings"))) = #name; \ 171 __attribute__((section("__tracepoints_strings"))) = #name; \
@@ -156,18 +182,23 @@ static inline void tracepoint_update_probe_range(struct tracepoint *begin,
156 EXPORT_SYMBOL(__tracepoint_##name) 182 EXPORT_SYMBOL(__tracepoint_##name)
157 183
158#else /* !CONFIG_TRACEPOINTS */ 184#else /* !CONFIG_TRACEPOINTS */
159#define DECLARE_TRACE(name, proto, args) \ 185#define __DECLARE_TRACE(name, proto, args, data_proto, data_args) \
160 static inline void _do_trace_##name(struct tracepoint *tp, proto) \
161 { } \
162 static inline void trace_##name(proto) \ 186 static inline void trace_##name(proto) \
163 { } \ 187 { } \
164 static inline int register_trace_##name(void (*probe)(proto)) \ 188 static inline int \
189 register_trace_##name(void (*probe)(data_proto), \
190 void *data) \
165 { \ 191 { \
166 return -ENOSYS; \ 192 return -ENOSYS; \
167 } \ 193 } \
168 static inline int unregister_trace_##name(void (*probe)(proto)) \ 194 static inline int \
195 unregister_trace_##name(void (*probe)(data_proto), \
196 void *data) \
169 { \ 197 { \
170 return -ENOSYS; \ 198 return -ENOSYS; \
199 } \
200 static inline void check_trace_callback_type_##name(void (*cb)(data_proto)) \
201 { \
171 } 202 }
172 203
173#define DEFINE_TRACE_FN(name, reg, unreg) 204#define DEFINE_TRACE_FN(name, reg, unreg)
@@ -176,6 +207,29 @@ static inline void tracepoint_update_probe_range(struct tracepoint *begin,
176#define EXPORT_TRACEPOINT_SYMBOL(name) 207#define EXPORT_TRACEPOINT_SYMBOL(name)
177 208
178#endif /* CONFIG_TRACEPOINTS */ 209#endif /* CONFIG_TRACEPOINTS */
210
211/*
212 * The need for the DECLARE_TRACE_NOARGS() is to handle the prototype
213 * (void). "void" is a special value in a function prototype and can
214 * not be combined with other arguments. Since the DECLARE_TRACE()
215 * macro adds a data element at the beginning of the prototype,
216 * we need a way to differentiate "(void *data, proto)" from
217 * "(void *data, void)". The second prototype is invalid.
218 *
219 * DECLARE_TRACE_NOARGS() passes "void" as the tracepoint prototype
220 * and "void *__data" as the callback prototype.
221 *
222 * DECLARE_TRACE() passes "proto" as the tracepoint protoype and
223 * "void *__data, proto" as the callback prototype.
224 */
225#define DECLARE_TRACE_NOARGS(name) \
226 __DECLARE_TRACE(name, void, , void *__data, __data)
227
228#define DECLARE_TRACE(name, proto, args) \
229 __DECLARE_TRACE(name, PARAMS(proto), PARAMS(args), \
230 PARAMS(void *__data, proto), \
231 PARAMS(__data, args))
232
179#endif /* DECLARE_TRACE */ 233#endif /* DECLARE_TRACE */
180 234
181#ifndef TRACE_EVENT 235#ifndef TRACE_EVENT
diff --git a/include/linux/uinput.h b/include/linux/uinput.h
index 15ddd4483b09..60c81da77f0f 100644
--- a/include/linux/uinput.h
+++ b/include/linux/uinput.h
@@ -166,11 +166,11 @@ struct uinput_ff_erase {
166struct uinput_user_dev { 166struct uinput_user_dev {
167 char name[UINPUT_MAX_NAME_SIZE]; 167 char name[UINPUT_MAX_NAME_SIZE];
168 struct input_id id; 168 struct input_id id;
169 int ff_effects_max; 169 int ff_effects_max;
170 int absmax[ABS_MAX + 1]; 170 int absmax[ABS_CNT];
171 int absmin[ABS_MAX + 1]; 171 int absmin[ABS_CNT];
172 int absfuzz[ABS_MAX + 1]; 172 int absfuzz[ABS_CNT];
173 int absflat[ABS_MAX + 1]; 173 int absflat[ABS_CNT];
174}; 174};
175#endif /* __UINPUT_H_ */ 175#endif /* __UINPUT_H_ */
176 176
diff --git a/include/linux/usb/audio-v2.h b/include/linux/usb/audio-v2.h
index 2389f93a28b5..92f1d99f0f17 100644
--- a/include/linux/usb/audio-v2.h
+++ b/include/linux/usb/audio-v2.h
@@ -105,6 +105,22 @@ struct uac_as_header_descriptor_v2 {
105 __u8 iChannelNames; 105 __u8 iChannelNames;
106} __attribute__((packed)); 106} __attribute__((packed));
107 107
108/* 4.10.1.2 Class-Specific AS Isochronous Audio Data Endpoint Descriptor */
109
110struct uac2_iso_endpoint_descriptor {
111 __u8 bLength; /* in bytes: 8 */
112 __u8 bDescriptorType; /* USB_DT_CS_ENDPOINT */
113 __u8 bDescriptorSubtype; /* EP_GENERAL */
114 __u8 bmAttributes;
115 __u8 bmControls;
116 __u8 bLockDelayUnits;
117 __le16 wLockDelay;
118} __attribute__((packed));
119
120#define UAC2_CONTROL_PITCH (3 << 0)
121#define UAC2_CONTROL_DATA_OVERRUN (3 << 2)
122#define UAC2_CONTROL_DATA_UNDERRUN (3 << 4)
123
108/* 6.1 Interrupt Data Message */ 124/* 6.1 Interrupt Data Message */
109 125
110#define UAC2_INTERRUPT_DATA_MSG_VENDOR (1 << 0) 126#define UAC2_INTERRUPT_DATA_MSG_VENDOR (1 << 0)
diff --git a/include/linux/uuid.h b/include/linux/uuid.h
new file mode 100644
index 000000000000..5b7efbfcee4e
--- /dev/null
+++ b/include/linux/uuid.h
@@ -0,0 +1,70 @@
1/*
2 * UUID/GUID definition
3 *
4 * Copyright (C) 2010, Intel Corp.
5 * Huang Ying <ying.huang@intel.com>
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License version
9 * 2 as published by the Free Software Foundation;
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20
21#ifndef _LINUX_UUID_H_
22#define _LINUX_UUID_H_
23
24#include <linux/types.h>
25#include <linux/string.h>
26
27typedef struct {
28 __u8 b[16];
29} uuid_le;
30
31typedef struct {
32 __u8 b[16];
33} uuid_be;
34
35#define UUID_LE(a, b, c, d0, d1, d2, d3, d4, d5, d6, d7) \
36((uuid_le) \
37{{ (a) & 0xff, ((a) >> 8) & 0xff, ((a) >> 16) & 0xff, ((a) >> 24) & 0xff, \
38 (b) & 0xff, ((b) >> 8) & 0xff, \
39 (c) & 0xff, ((c) >> 8) & 0xff, \
40 (d0), (d1), (d2), (d3), (d4), (d5), (d6), (d7) }})
41
42#define UUID_BE(a, b, c, d0, d1, d2, d3, d4, d5, d6, d7) \
43((uuid_be) \
44{{ ((a) >> 24) & 0xff, ((a) >> 16) & 0xff, ((a) >> 8) & 0xff, (a) & 0xff, \
45 ((b) >> 8) & 0xff, (b) & 0xff, \
46 ((c) >> 8) & 0xff, (c) & 0xff, \
47 (d0), (d1), (d2), (d3), (d4), (d5), (d6), (d7) }})
48
49#define NULL_UUID_LE \
50 UUID_LE(0x00000000, 0x0000, 0x0000, 0x00, 0x00, 0x00, 0x00, \
51 0x00, 0x00, 0x00, 0x00)
52
53#define NULL_UUID_BE \
54 UUID_BE(0x00000000, 0x0000, 0x0000, 0x00, 0x00, 0x00, 0x00, \
55 0x00, 0x00, 0x00, 0x00)
56
57static inline int uuid_le_cmp(const uuid_le u1, const uuid_le u2)
58{
59 return memcmp(&u1, &u2, sizeof(uuid_le));
60}
61
62static inline int uuid_be_cmp(const uuid_be u1, const uuid_be u2)
63{
64 return memcmp(&u1, &u2, sizeof(uuid_be));
65}
66
67extern void uuid_le_gen(uuid_le *u);
68extern void uuid_be_gen(uuid_be *u);
69
70#endif
diff --git a/include/net/cls_cgroup.h b/include/net/cls_cgroup.h
index 6cf44866cecd..726cc3536409 100644
--- a/include/net/cls_cgroup.h
+++ b/include/net/cls_cgroup.h
@@ -39,7 +39,7 @@ extern int net_cls_subsys_id;
39static inline u32 task_cls_classid(struct task_struct *p) 39static inline u32 task_cls_classid(struct task_struct *p)
40{ 40{
41 int id; 41 int id;
42 u32 classid; 42 u32 classid = 0;
43 43
44 if (in_interrupt()) 44 if (in_interrupt())
45 return 0; 45 return 0;
diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
index 6173c619913a..4b860116e096 100644
--- a/include/net/sctp/structs.h
+++ b/include/net/sctp/structs.h
@@ -876,7 +876,7 @@ struct sctp_transport {
876 876
877 /* Reference counting. */ 877 /* Reference counting. */
878 atomic_t refcnt; 878 atomic_t refcnt;
879 int dead:1, 879 __u32 dead:1,
880 /* RTO-Pending : A flag used to track if one of the DATA 880 /* RTO-Pending : A flag used to track if one of the DATA
881 * chunks sent to this address is currently being 881 * chunks sent to this address is currently being
882 * used to compute a RTT. If this flag is 0, 882 * used to compute a RTT. If this flag is 0,
diff --git a/include/net/sock.h b/include/net/sock.h
index d2a71b04a5ae..ca241ea14875 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -1026,15 +1026,23 @@ extern void release_sock(struct sock *sk);
1026 SINGLE_DEPTH_NESTING) 1026 SINGLE_DEPTH_NESTING)
1027#define bh_unlock_sock(__sk) spin_unlock(&((__sk)->sk_lock.slock)) 1027#define bh_unlock_sock(__sk) spin_unlock(&((__sk)->sk_lock.slock))
1028 1028
1029static inline void lock_sock_bh(struct sock *sk) 1029extern bool lock_sock_fast(struct sock *sk);
1030/**
1031 * unlock_sock_fast - complement of lock_sock_fast
1032 * @sk: socket
1033 * @slow: slow mode
1034 *
1035 * fast unlock socket for user context.
1036 * If slow mode is on, we call regular release_sock()
1037 */
1038static inline void unlock_sock_fast(struct sock *sk, bool slow)
1030{ 1039{
1031 spin_lock_bh(&sk->sk_lock.slock); 1040 if (slow)
1041 release_sock(sk);
1042 else
1043 spin_unlock_bh(&sk->sk_lock.slock);
1032} 1044}
1033 1045
1034static inline void unlock_sock_bh(struct sock *sk)
1035{
1036 spin_unlock_bh(&sk->sk_lock.slock);
1037}
1038 1046
1039extern struct sock *sk_alloc(struct net *net, int family, 1047extern struct sock *sk_alloc(struct net *net, int family,
1040 gfp_t priority, 1048 gfp_t priority,
diff --git a/include/trace/events/ext4.h b/include/trace/events/ext4.h
index 2aa6aa3e8f61..f5b1ba90e952 100644
--- a/include/trace/events/ext4.h
+++ b/include/trace/events/ext4.h
@@ -353,7 +353,7 @@ TRACE_EVENT(ext4_discard_blocks,
353 jbd2_dev_to_name(__entry->dev), __entry->blk, __entry->count) 353 jbd2_dev_to_name(__entry->dev), __entry->blk, __entry->count)
354); 354);
355 355
356TRACE_EVENT(ext4_mb_new_inode_pa, 356DECLARE_EVENT_CLASS(ext4__mb_new_pa,
357 TP_PROTO(struct ext4_allocation_context *ac, 357 TP_PROTO(struct ext4_allocation_context *ac,
358 struct ext4_prealloc_space *pa), 358 struct ext4_prealloc_space *pa),
359 359
@@ -381,32 +381,20 @@ TRACE_EVENT(ext4_mb_new_inode_pa,
381 __entry->pa_pstart, __entry->pa_len, __entry->pa_lstart) 381 __entry->pa_pstart, __entry->pa_len, __entry->pa_lstart)
382); 382);
383 383
384TRACE_EVENT(ext4_mb_new_group_pa, 384DEFINE_EVENT(ext4__mb_new_pa, ext4_mb_new_inode_pa,
385
385 TP_PROTO(struct ext4_allocation_context *ac, 386 TP_PROTO(struct ext4_allocation_context *ac,
386 struct ext4_prealloc_space *pa), 387 struct ext4_prealloc_space *pa),
387 388
388 TP_ARGS(ac, pa), 389 TP_ARGS(ac, pa)
389 390);
390 TP_STRUCT__entry(
391 __field( dev_t, dev )
392 __field( ino_t, ino )
393 __field( __u64, pa_pstart )
394 __field( __u32, pa_len )
395 __field( __u64, pa_lstart )
396 391
397 ), 392DEFINE_EVENT(ext4__mb_new_pa, ext4_mb_new_group_pa,
398 393
399 TP_fast_assign( 394 TP_PROTO(struct ext4_allocation_context *ac,
400 __entry->dev = ac->ac_sb->s_dev; 395 struct ext4_prealloc_space *pa),
401 __entry->ino = ac->ac_inode->i_ino;
402 __entry->pa_pstart = pa->pa_pstart;
403 __entry->pa_len = pa->pa_len;
404 __entry->pa_lstart = pa->pa_lstart;
405 ),
406 396
407 TP_printk("dev %s ino %lu pstart %llu len %u lstart %llu", 397 TP_ARGS(ac, pa)
408 jbd2_dev_to_name(__entry->dev), (unsigned long) __entry->ino,
409 __entry->pa_pstart, __entry->pa_len, __entry->pa_lstart)
410); 398);
411 399
412TRACE_EVENT(ext4_mb_release_inode_pa, 400TRACE_EVENT(ext4_mb_release_inode_pa,
@@ -618,9 +606,9 @@ TRACE_EVENT(ext4_free_blocks,
618); 606);
619 607
620TRACE_EVENT(ext4_sync_file, 608TRACE_EVENT(ext4_sync_file,
621 TP_PROTO(struct file *file, struct dentry *dentry, int datasync), 609 TP_PROTO(struct file *file, int datasync),
622 610
623 TP_ARGS(file, dentry, datasync), 611 TP_ARGS(file, datasync),
624 612
625 TP_STRUCT__entry( 613 TP_STRUCT__entry(
626 __field( dev_t, dev ) 614 __field( dev_t, dev )
@@ -630,6 +618,8 @@ TRACE_EVENT(ext4_sync_file,
630 ), 618 ),
631 619
632 TP_fast_assign( 620 TP_fast_assign(
621 struct dentry *dentry = file->f_path.dentry;
622
633 __entry->dev = dentry->d_inode->i_sb->s_dev; 623 __entry->dev = dentry->d_inode->i_sb->s_dev;
634 __entry->ino = dentry->d_inode->i_ino; 624 __entry->ino = dentry->d_inode->i_ino;
635 __entry->datasync = datasync; 625 __entry->datasync = datasync;
@@ -790,7 +780,7 @@ TRACE_EVENT(ext4_mballoc_prealloc,
790 __entry->result_len, __entry->result_logical) 780 __entry->result_len, __entry->result_logical)
791); 781);
792 782
793TRACE_EVENT(ext4_mballoc_discard, 783DECLARE_EVENT_CLASS(ext4__mballoc,
794 TP_PROTO(struct ext4_allocation_context *ac), 784 TP_PROTO(struct ext4_allocation_context *ac),
795 785
796 TP_ARGS(ac), 786 TP_ARGS(ac),
@@ -819,33 +809,18 @@ TRACE_EVENT(ext4_mballoc_discard,
819 __entry->result_len, __entry->result_logical) 809 __entry->result_len, __entry->result_logical)
820); 810);
821 811
822TRACE_EVENT(ext4_mballoc_free, 812DEFINE_EVENT(ext4__mballoc, ext4_mballoc_discard,
813
823 TP_PROTO(struct ext4_allocation_context *ac), 814 TP_PROTO(struct ext4_allocation_context *ac),
824 815
825 TP_ARGS(ac), 816 TP_ARGS(ac)
817);
826 818
827 TP_STRUCT__entry( 819DEFINE_EVENT(ext4__mballoc, ext4_mballoc_free,
828 __field( dev_t, dev )
829 __field( ino_t, ino )
830 __field( __u32, result_logical )
831 __field( int, result_start )
832 __field( __u32, result_group )
833 __field( int, result_len )
834 ),
835 820
836 TP_fast_assign( 821 TP_PROTO(struct ext4_allocation_context *ac),
837 __entry->dev = ac->ac_inode->i_sb->s_dev;
838 __entry->ino = ac->ac_inode->i_ino;
839 __entry->result_logical = ac->ac_b_ex.fe_logical;
840 __entry->result_start = ac->ac_b_ex.fe_start;
841 __entry->result_group = ac->ac_b_ex.fe_group;
842 __entry->result_len = ac->ac_b_ex.fe_len;
843 ),
844 822
845 TP_printk("dev %s inode %lu extent %u/%d/%u@%u ", 823 TP_ARGS(ac)
846 jbd2_dev_to_name(__entry->dev), (unsigned long) __entry->ino,
847 __entry->result_group, __entry->result_start,
848 __entry->result_len, __entry->result_logical)
849); 824);
850 825
851TRACE_EVENT(ext4_forget, 826TRACE_EVENT(ext4_forget,
@@ -974,6 +949,39 @@ TRACE_EVENT(ext4_da_release_space,
974 __entry->reserved_meta_blocks, __entry->allocated_meta_blocks) 949 __entry->reserved_meta_blocks, __entry->allocated_meta_blocks)
975); 950);
976 951
952DECLARE_EVENT_CLASS(ext4__bitmap_load,
953 TP_PROTO(struct super_block *sb, unsigned long group),
954
955 TP_ARGS(sb, group),
956
957 TP_STRUCT__entry(
958 __field( dev_t, dev )
959 __field( __u32, group )
960
961 ),
962
963 TP_fast_assign(
964 __entry->dev = sb->s_dev;
965 __entry->group = group;
966 ),
967
968 TP_printk("dev %s group %u",
969 jbd2_dev_to_name(__entry->dev), __entry->group)
970);
971
972DEFINE_EVENT(ext4__bitmap_load, ext4_mb_bitmap_load,
973
974 TP_PROTO(struct super_block *sb, unsigned long group),
975
976 TP_ARGS(sb, group)
977);
978
979DEFINE_EVENT(ext4__bitmap_load, ext4_mb_buddy_bitmap_load,
980
981 TP_PROTO(struct super_block *sb, unsigned long group),
982
983 TP_ARGS(sb, group)
984);
977 985
978#endif /* _TRACE_EXT4_H */ 986#endif /* _TRACE_EXT4_H */
979 987
diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h
index 88c59c13ea7b..3d685d1f2a03 100644
--- a/include/trace/ftrace.h
+++ b/include/trace/ftrace.h
@@ -62,10 +62,13 @@
62 struct trace_entry ent; \ 62 struct trace_entry ent; \
63 tstruct \ 63 tstruct \
64 char __data[0]; \ 64 char __data[0]; \
65 }; 65 }; \
66 \
67 static struct ftrace_event_class event_class_##name;
68
66#undef DEFINE_EVENT 69#undef DEFINE_EVENT
67#define DEFINE_EVENT(template, name, proto, args) \ 70#define DEFINE_EVENT(template, name, proto, args) \
68 static struct ftrace_event_call \ 71 static struct ftrace_event_call __used \
69 __attribute__((__aligned__(4))) event_##name 72 __attribute__((__aligned__(4))) event_##name
70 73
71#undef DEFINE_EVENT_PRINT 74#undef DEFINE_EVENT_PRINT
@@ -147,7 +150,7 @@
147 * 150 *
148 * entry = iter->ent; 151 * entry = iter->ent;
149 * 152 *
150 * if (entry->type != event_<call>.id) { 153 * if (entry->type != event_<call>->event.type) {
151 * WARN_ON_ONCE(1); 154 * WARN_ON_ONCE(1);
152 * return TRACE_TYPE_UNHANDLED; 155 * return TRACE_TYPE_UNHANDLED;
153 * } 156 * }
@@ -206,18 +209,22 @@
206#undef DECLARE_EVENT_CLASS 209#undef DECLARE_EVENT_CLASS
207#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ 210#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
208static notrace enum print_line_t \ 211static notrace enum print_line_t \
209ftrace_raw_output_id_##call(int event_id, const char *name, \ 212ftrace_raw_output_##call(struct trace_iterator *iter, int flags, \
210 struct trace_iterator *iter, int flags) \ 213 struct trace_event *trace_event) \
211{ \ 214{ \
215 struct ftrace_event_call *event; \
212 struct trace_seq *s = &iter->seq; \ 216 struct trace_seq *s = &iter->seq; \
213 struct ftrace_raw_##call *field; \ 217 struct ftrace_raw_##call *field; \
214 struct trace_entry *entry; \ 218 struct trace_entry *entry; \
215 struct trace_seq *p; \ 219 struct trace_seq *p; \
216 int ret; \ 220 int ret; \
217 \ 221 \
222 event = container_of(trace_event, struct ftrace_event_call, \
223 event); \
224 \
218 entry = iter->ent; \ 225 entry = iter->ent; \
219 \ 226 \
220 if (entry->type != event_id) { \ 227 if (entry->type != event->event.type) { \
221 WARN_ON_ONCE(1); \ 228 WARN_ON_ONCE(1); \
222 return TRACE_TYPE_UNHANDLED; \ 229 return TRACE_TYPE_UNHANDLED; \
223 } \ 230 } \
@@ -226,7 +233,7 @@ ftrace_raw_output_id_##call(int event_id, const char *name, \
226 \ 233 \
227 p = &get_cpu_var(ftrace_event_seq); \ 234 p = &get_cpu_var(ftrace_event_seq); \
228 trace_seq_init(p); \ 235 trace_seq_init(p); \
229 ret = trace_seq_printf(s, "%s: ", name); \ 236 ret = trace_seq_printf(s, "%s: ", event->name); \
230 if (ret) \ 237 if (ret) \
231 ret = trace_seq_printf(s, print); \ 238 ret = trace_seq_printf(s, print); \
232 put_cpu(); \ 239 put_cpu(); \
@@ -234,21 +241,16 @@ ftrace_raw_output_id_##call(int event_id, const char *name, \
234 return TRACE_TYPE_PARTIAL_LINE; \ 241 return TRACE_TYPE_PARTIAL_LINE; \
235 \ 242 \
236 return TRACE_TYPE_HANDLED; \ 243 return TRACE_TYPE_HANDLED; \
237} 244} \
238 245static struct trace_event_functions ftrace_event_type_funcs_##call = { \
239#undef DEFINE_EVENT 246 .trace = ftrace_raw_output_##call, \
240#define DEFINE_EVENT(template, name, proto, args) \ 247};
241static notrace enum print_line_t \
242ftrace_raw_output_##name(struct trace_iterator *iter, int flags) \
243{ \
244 return ftrace_raw_output_id_##template(event_##name.id, \
245 #name, iter, flags); \
246}
247 248
248#undef DEFINE_EVENT_PRINT 249#undef DEFINE_EVENT_PRINT
249#define DEFINE_EVENT_PRINT(template, call, proto, args, print) \ 250#define DEFINE_EVENT_PRINT(template, call, proto, args, print) \
250static notrace enum print_line_t \ 251static notrace enum print_line_t \
251ftrace_raw_output_##call(struct trace_iterator *iter, int flags) \ 252ftrace_raw_output_##call(struct trace_iterator *iter, int flags, \
253 struct trace_event *event) \
252{ \ 254{ \
253 struct trace_seq *s = &iter->seq; \ 255 struct trace_seq *s = &iter->seq; \
254 struct ftrace_raw_##template *field; \ 256 struct ftrace_raw_##template *field; \
@@ -258,7 +260,7 @@ ftrace_raw_output_##call(struct trace_iterator *iter, int flags) \
258 \ 260 \
259 entry = iter->ent; \ 261 entry = iter->ent; \
260 \ 262 \
261 if (entry->type != event_##call.id) { \ 263 if (entry->type != event_##call.event.type) { \
262 WARN_ON_ONCE(1); \ 264 WARN_ON_ONCE(1); \
263 return TRACE_TYPE_UNHANDLED; \ 265 return TRACE_TYPE_UNHANDLED; \
264 } \ 266 } \
@@ -275,7 +277,10 @@ ftrace_raw_output_##call(struct trace_iterator *iter, int flags) \
275 return TRACE_TYPE_PARTIAL_LINE; \ 277 return TRACE_TYPE_PARTIAL_LINE; \
276 \ 278 \
277 return TRACE_TYPE_HANDLED; \ 279 return TRACE_TYPE_HANDLED; \
278} 280} \
281static struct trace_event_functions ftrace_event_type_funcs_##call = { \
282 .trace = ftrace_raw_output_##call, \
283};
279 284
280#include TRACE_INCLUDE(TRACE_INCLUDE_FILE) 285#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
281 286
@@ -381,80 +386,18 @@ static inline notrace int ftrace_get_offsets_##call( \
381 386
382#include TRACE_INCLUDE(TRACE_INCLUDE_FILE) 387#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
383 388
384#ifdef CONFIG_PERF_EVENTS
385
386/*
387 * Generate the functions needed for tracepoint perf_event support.
388 *
389 * NOTE: The insertion profile callback (ftrace_profile_<call>) is defined later
390 *
391 * static int ftrace_profile_enable_<call>(void)
392 * {
393 * return register_trace_<call>(ftrace_profile_<call>);
394 * }
395 *
396 * static void ftrace_profile_disable_<call>(void)
397 * {
398 * unregister_trace_<call>(ftrace_profile_<call>);
399 * }
400 *
401 */
402
403#undef DECLARE_EVENT_CLASS
404#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print)
405
406#undef DEFINE_EVENT
407#define DEFINE_EVENT(template, name, proto, args) \
408 \
409static void perf_trace_##name(proto); \
410 \
411static notrace int \
412perf_trace_enable_##name(struct ftrace_event_call *unused) \
413{ \
414 return register_trace_##name(perf_trace_##name); \
415} \
416 \
417static notrace void \
418perf_trace_disable_##name(struct ftrace_event_call *unused) \
419{ \
420 unregister_trace_##name(perf_trace_##name); \
421}
422
423#undef DEFINE_EVENT_PRINT
424#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
425 DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
426
427#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
428
429#endif /* CONFIG_PERF_EVENTS */
430
431/* 389/*
432 * Stage 4 of the trace events. 390 * Stage 4 of the trace events.
433 * 391 *
434 * Override the macros in <trace/trace_events.h> to include the following: 392 * Override the macros in <trace/trace_events.h> to include the following:
435 * 393 *
436 * static void ftrace_event_<call>(proto)
437 * {
438 * event_trace_printk(_RET_IP_, "<call>: " <fmt>);
439 * }
440 *
441 * static int ftrace_reg_event_<call>(struct ftrace_event_call *unused)
442 * {
443 * return register_trace_<call>(ftrace_event_<call>);
444 * }
445 *
446 * static void ftrace_unreg_event_<call>(struct ftrace_event_call *unused)
447 * {
448 * unregister_trace_<call>(ftrace_event_<call>);
449 * }
450 *
451 *
452 * For those macros defined with TRACE_EVENT: 394 * For those macros defined with TRACE_EVENT:
453 * 395 *
454 * static struct ftrace_event_call event_<call>; 396 * static struct ftrace_event_call event_<call>;
455 * 397 *
456 * static void ftrace_raw_event_<call>(proto) 398 * static void ftrace_raw_event_<call>(void *__data, proto)
457 * { 399 * {
400 * struct ftrace_event_call *event_call = __data;
458 * struct ftrace_data_offsets_<call> __maybe_unused __data_offsets; 401 * struct ftrace_data_offsets_<call> __maybe_unused __data_offsets;
459 * struct ring_buffer_event *event; 402 * struct ring_buffer_event *event;
460 * struct ftrace_raw_<call> *entry; <-- defined in stage 1 403 * struct ftrace_raw_<call> *entry; <-- defined in stage 1
@@ -469,7 +412,7 @@ perf_trace_disable_##name(struct ftrace_event_call *unused) \
469 * __data_size = ftrace_get_offsets_<call>(&__data_offsets, args); 412 * __data_size = ftrace_get_offsets_<call>(&__data_offsets, args);
470 * 413 *
471 * event = trace_current_buffer_lock_reserve(&buffer, 414 * event = trace_current_buffer_lock_reserve(&buffer,
472 * event_<call>.id, 415 * event_<call>->event.type,
473 * sizeof(*entry) + __data_size, 416 * sizeof(*entry) + __data_size,
474 * irq_flags, pc); 417 * irq_flags, pc);
475 * if (!event) 418 * if (!event)
@@ -484,43 +427,42 @@ perf_trace_disable_##name(struct ftrace_event_call *unused) \
484 * event, irq_flags, pc); 427 * event, irq_flags, pc);
485 * } 428 * }
486 * 429 *
487 * static int ftrace_raw_reg_event_<call>(struct ftrace_event_call *unused)
488 * {
489 * return register_trace_<call>(ftrace_raw_event_<call>);
490 * }
491 *
492 * static void ftrace_unreg_event_<call>(struct ftrace_event_call *unused)
493 * {
494 * unregister_trace_<call>(ftrace_raw_event_<call>);
495 * }
496 *
497 * static struct trace_event ftrace_event_type_<call> = { 430 * static struct trace_event ftrace_event_type_<call> = {
498 * .trace = ftrace_raw_output_<call>, <-- stage 2 431 * .trace = ftrace_raw_output_<call>, <-- stage 2
499 * }; 432 * };
500 * 433 *
501 * static const char print_fmt_<call>[] = <TP_printk>; 434 * static const char print_fmt_<call>[] = <TP_printk>;
502 * 435 *
436 * static struct ftrace_event_class __used event_class_<template> = {
437 * .system = "<system>",
438 * .define_fields = ftrace_define_fields_<call>,
439 * .fields = LIST_HEAD_INIT(event_class_##call.fields),
440 * .raw_init = trace_event_raw_init,
441 * .probe = ftrace_raw_event_##call,
442 * };
443 *
503 * static struct ftrace_event_call __used 444 * static struct ftrace_event_call __used
504 * __attribute__((__aligned__(4))) 445 * __attribute__((__aligned__(4)))
505 * __attribute__((section("_ftrace_events"))) event_<call> = { 446 * __attribute__((section("_ftrace_events"))) event_<call> = {
506 * .name = "<call>", 447 * .name = "<call>",
507 * .system = "<system>", 448 * .class = event_class_<template>,
508 * .raw_init = trace_event_raw_init, 449 * .event = &ftrace_event_type_<call>,
509 * .regfunc = ftrace_reg_event_<call>,
510 * .unregfunc = ftrace_unreg_event_<call>,
511 * .print_fmt = print_fmt_<call>, 450 * .print_fmt = print_fmt_<call>,
512 * .define_fields = ftrace_define_fields_<call>, 451 * };
513 * }
514 * 452 *
515 */ 453 */
516 454
517#ifdef CONFIG_PERF_EVENTS 455#ifdef CONFIG_PERF_EVENTS
518 456
457#define _TRACE_PERF_PROTO(call, proto) \
458 static notrace void \
459 perf_trace_##call(void *__data, proto);
460
519#define _TRACE_PERF_INIT(call) \ 461#define _TRACE_PERF_INIT(call) \
520 .perf_event_enable = perf_trace_enable_##call, \ 462 .perf_probe = perf_trace_##call,
521 .perf_event_disable = perf_trace_disable_##call,
522 463
523#else 464#else
465#define _TRACE_PERF_PROTO(call, proto)
524#define _TRACE_PERF_INIT(call) 466#define _TRACE_PERF_INIT(call)
525#endif /* CONFIG_PERF_EVENTS */ 467#endif /* CONFIG_PERF_EVENTS */
526 468
@@ -554,9 +496,9 @@ perf_trace_disable_##name(struct ftrace_event_call *unused) \
554#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ 496#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
555 \ 497 \
556static notrace void \ 498static notrace void \
557ftrace_raw_event_id_##call(struct ftrace_event_call *event_call, \ 499ftrace_raw_event_##call(void *__data, proto) \
558 proto) \
559{ \ 500{ \
501 struct ftrace_event_call *event_call = __data; \
560 struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\ 502 struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\
561 struct ring_buffer_event *event; \ 503 struct ring_buffer_event *event; \
562 struct ftrace_raw_##call *entry; \ 504 struct ftrace_raw_##call *entry; \
@@ -571,7 +513,7 @@ ftrace_raw_event_id_##call(struct ftrace_event_call *event_call, \
571 __data_size = ftrace_get_offsets_##call(&__data_offsets, args); \ 513 __data_size = ftrace_get_offsets_##call(&__data_offsets, args); \
572 \ 514 \
573 event = trace_current_buffer_lock_reserve(&buffer, \ 515 event = trace_current_buffer_lock_reserve(&buffer, \
574 event_call->id, \ 516 event_call->event.type, \
575 sizeof(*entry) + __data_size, \ 517 sizeof(*entry) + __data_size, \
576 irq_flags, pc); \ 518 irq_flags, pc); \
577 if (!event) \ 519 if (!event) \
@@ -586,34 +528,21 @@ ftrace_raw_event_id_##call(struct ftrace_event_call *event_call, \
586 trace_nowake_buffer_unlock_commit(buffer, \ 528 trace_nowake_buffer_unlock_commit(buffer, \
587 event, irq_flags, pc); \ 529 event, irq_flags, pc); \
588} 530}
531/*
532 * The ftrace_test_probe is compiled out, it is only here as a build time check
533 * to make sure that if the tracepoint handling changes, the ftrace probe will
534 * fail to compile unless it too is updated.
535 */
589 536
590#undef DEFINE_EVENT 537#undef DEFINE_EVENT
591#define DEFINE_EVENT(template, call, proto, args) \ 538#define DEFINE_EVENT(template, call, proto, args) \
592 \ 539static inline void ftrace_test_probe_##call(void) \
593static notrace void ftrace_raw_event_##call(proto) \
594{ \
595 ftrace_raw_event_id_##template(&event_##call, args); \
596} \
597 \
598static notrace int \
599ftrace_raw_reg_event_##call(struct ftrace_event_call *unused) \
600{ \ 540{ \
601 return register_trace_##call(ftrace_raw_event_##call); \ 541 check_trace_callback_type_##call(ftrace_raw_event_##template); \
602} \ 542}
603 \
604static notrace void \
605ftrace_raw_unreg_event_##call(struct ftrace_event_call *unused) \
606{ \
607 unregister_trace_##call(ftrace_raw_event_##call); \
608} \
609 \
610static struct trace_event ftrace_event_type_##call = { \
611 .trace = ftrace_raw_output_##call, \
612};
613 543
614#undef DEFINE_EVENT_PRINT 544#undef DEFINE_EVENT_PRINT
615#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \ 545#define DEFINE_EVENT_PRINT(template, name, proto, args, print)
616 DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
617 546
618#include TRACE_INCLUDE(TRACE_INCLUDE_FILE) 547#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
619 548
@@ -630,7 +559,16 @@ static struct trace_event ftrace_event_type_##call = { \
630 559
631#undef DECLARE_EVENT_CLASS 560#undef DECLARE_EVENT_CLASS
632#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ 561#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
633static const char print_fmt_##call[] = print; 562_TRACE_PERF_PROTO(call, PARAMS(proto)); \
563static const char print_fmt_##call[] = print; \
564static struct ftrace_event_class __used event_class_##call = { \
565 .system = __stringify(TRACE_SYSTEM), \
566 .define_fields = ftrace_define_fields_##call, \
567 .fields = LIST_HEAD_INIT(event_class_##call.fields),\
568 .raw_init = trace_event_raw_init, \
569 .probe = ftrace_raw_event_##call, \
570 _TRACE_PERF_INIT(call) \
571};
634 572
635#undef DEFINE_EVENT 573#undef DEFINE_EVENT
636#define DEFINE_EVENT(template, call, proto, args) \ 574#define DEFINE_EVENT(template, call, proto, args) \
@@ -639,15 +577,10 @@ static struct ftrace_event_call __used \
639__attribute__((__aligned__(4))) \ 577__attribute__((__aligned__(4))) \
640__attribute__((section("_ftrace_events"))) event_##call = { \ 578__attribute__((section("_ftrace_events"))) event_##call = { \
641 .name = #call, \ 579 .name = #call, \
642 .system = __stringify(TRACE_SYSTEM), \ 580 .class = &event_class_##template, \
643 .event = &ftrace_event_type_##call, \ 581 .event.funcs = &ftrace_event_type_funcs_##template, \
644 .raw_init = trace_event_raw_init, \
645 .regfunc = ftrace_raw_reg_event_##call, \
646 .unregfunc = ftrace_raw_unreg_event_##call, \
647 .print_fmt = print_fmt_##template, \ 582 .print_fmt = print_fmt_##template, \
648 .define_fields = ftrace_define_fields_##template, \ 583};
649 _TRACE_PERF_INIT(call) \
650}
651 584
652#undef DEFINE_EVENT_PRINT 585#undef DEFINE_EVENT_PRINT
653#define DEFINE_EVENT_PRINT(template, call, proto, args, print) \ 586#define DEFINE_EVENT_PRINT(template, call, proto, args, print) \
@@ -658,14 +591,9 @@ static struct ftrace_event_call __used \
658__attribute__((__aligned__(4))) \ 591__attribute__((__aligned__(4))) \
659__attribute__((section("_ftrace_events"))) event_##call = { \ 592__attribute__((section("_ftrace_events"))) event_##call = { \
660 .name = #call, \ 593 .name = #call, \
661 .system = __stringify(TRACE_SYSTEM), \ 594 .class = &event_class_##template, \
662 .event = &ftrace_event_type_##call, \ 595 .event.funcs = &ftrace_event_type_funcs_##call, \
663 .raw_init = trace_event_raw_init, \
664 .regfunc = ftrace_raw_reg_event_##call, \
665 .unregfunc = ftrace_raw_unreg_event_##call, \
666 .print_fmt = print_fmt_##call, \ 596 .print_fmt = print_fmt_##call, \
667 .define_fields = ftrace_define_fields_##template, \
668 _TRACE_PERF_INIT(call) \
669} 597}
670 598
671#include TRACE_INCLUDE(TRACE_INCLUDE_FILE) 599#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
@@ -765,17 +693,20 @@ __attribute__((section("_ftrace_events"))) event_##call = { \
765#undef DECLARE_EVENT_CLASS 693#undef DECLARE_EVENT_CLASS
766#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ 694#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
767static notrace void \ 695static notrace void \
768perf_trace_templ_##call(struct ftrace_event_call *event_call, \ 696perf_trace_##call(void *__data, proto) \
769 struct pt_regs *__regs, proto) \
770{ \ 697{ \
698 struct ftrace_event_call *event_call = __data; \
771 struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\ 699 struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\
772 struct ftrace_raw_##call *entry; \ 700 struct ftrace_raw_##call *entry; \
701 struct pt_regs __regs; \
773 u64 __addr = 0, __count = 1; \ 702 u64 __addr = 0, __count = 1; \
774 unsigned long irq_flags; \ 703 struct hlist_head *head; \
775 int __entry_size; \ 704 int __entry_size; \
776 int __data_size; \ 705 int __data_size; \
777 int rctx; \ 706 int rctx; \
778 \ 707 \
708 perf_fetch_caller_regs(&__regs, 1); \
709 \
779 __data_size = ftrace_get_offsets_##call(&__data_offsets, args); \ 710 __data_size = ftrace_get_offsets_##call(&__data_offsets, args); \
780 __entry_size = ALIGN(__data_size + sizeof(*entry) + sizeof(u32),\ 711 __entry_size = ALIGN(__data_size + sizeof(*entry) + sizeof(u32),\
781 sizeof(u64)); \ 712 sizeof(u64)); \
@@ -784,32 +715,34 @@ perf_trace_templ_##call(struct ftrace_event_call *event_call, \
784 if (WARN_ONCE(__entry_size > PERF_MAX_TRACE_SIZE, \ 715 if (WARN_ONCE(__entry_size > PERF_MAX_TRACE_SIZE, \
785 "profile buffer not large enough")) \ 716 "profile buffer not large enough")) \
786 return; \ 717 return; \
718 \
787 entry = (struct ftrace_raw_##call *)perf_trace_buf_prepare( \ 719 entry = (struct ftrace_raw_##call *)perf_trace_buf_prepare( \
788 __entry_size, event_call->id, &rctx, &irq_flags); \ 720 __entry_size, event_call->event.type, &__regs, &rctx); \
789 if (!entry) \ 721 if (!entry) \
790 return; \ 722 return; \
723 \
791 tstruct \ 724 tstruct \
792 \ 725 \
793 { assign; } \ 726 { assign; } \
794 \ 727 \
728 head = per_cpu_ptr(event_call->perf_events, smp_processor_id());\
795 perf_trace_buf_submit(entry, __entry_size, rctx, __addr, \ 729 perf_trace_buf_submit(entry, __entry_size, rctx, __addr, \
796 __count, irq_flags, __regs); \ 730 __count, &__regs, head); \
797} 731}
798 732
733/*
734 * This part is compiled out, it is only here as a build time check
735 * to make sure that if the tracepoint handling changes, the
736 * perf probe will fail to compile unless it too is updated.
737 */
799#undef DEFINE_EVENT 738#undef DEFINE_EVENT
800#define DEFINE_EVENT(template, call, proto, args) \ 739#define DEFINE_EVENT(template, call, proto, args) \
801static notrace void perf_trace_##call(proto) \ 740static inline void perf_test_probe_##call(void) \
802{ \ 741{ \
803 struct ftrace_event_call *event_call = &event_##call; \ 742 check_trace_callback_type_##call(perf_trace_##template); \
804 struct pt_regs *__regs = &get_cpu_var(perf_trace_regs); \
805 \
806 perf_fetch_caller_regs(__regs, 1); \
807 \
808 perf_trace_templ_##template(event_call, __regs, args); \
809 \
810 put_cpu_var(perf_trace_regs); \
811} 743}
812 744
745
813#undef DEFINE_EVENT_PRINT 746#undef DEFINE_EVENT_PRINT
814#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \ 747#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
815 DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args)) 748 DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
diff --git a/include/trace/syscall.h b/include/trace/syscall.h
index e5e5f48dbfb3..257e08960d7b 100644
--- a/include/trace/syscall.h
+++ b/include/trace/syscall.h
@@ -25,6 +25,8 @@ struct syscall_metadata {
25 int nb_args; 25 int nb_args;
26 const char **types; 26 const char **types;
27 const char **args; 27 const char **args;
28 struct list_head enter_fields;
29 struct list_head exit_fields;
28 30
29 struct ftrace_event_call *enter_event; 31 struct ftrace_event_call *enter_event;
30 struct ftrace_event_call *exit_event; 32 struct ftrace_event_call *exit_event;
@@ -34,16 +36,16 @@ struct syscall_metadata {
34extern unsigned long arch_syscall_addr(int nr); 36extern unsigned long arch_syscall_addr(int nr);
35extern int init_syscall_trace(struct ftrace_event_call *call); 37extern int init_syscall_trace(struct ftrace_event_call *call);
36 38
37extern int syscall_enter_define_fields(struct ftrace_event_call *call);
38extern int syscall_exit_define_fields(struct ftrace_event_call *call);
39extern int reg_event_syscall_enter(struct ftrace_event_call *call); 39extern int reg_event_syscall_enter(struct ftrace_event_call *call);
40extern void unreg_event_syscall_enter(struct ftrace_event_call *call); 40extern void unreg_event_syscall_enter(struct ftrace_event_call *call);
41extern int reg_event_syscall_exit(struct ftrace_event_call *call); 41extern int reg_event_syscall_exit(struct ftrace_event_call *call);
42extern void unreg_event_syscall_exit(struct ftrace_event_call *call); 42extern void unreg_event_syscall_exit(struct ftrace_event_call *call);
43extern int 43extern int
44ftrace_format_syscall(struct ftrace_event_call *call, struct trace_seq *s); 44ftrace_format_syscall(struct ftrace_event_call *call, struct trace_seq *s);
45enum print_line_t print_syscall_enter(struct trace_iterator *iter, int flags); 45enum print_line_t print_syscall_enter(struct trace_iterator *iter, int flags,
46enum print_line_t print_syscall_exit(struct trace_iterator *iter, int flags); 46 struct trace_event *event);
47enum print_line_t print_syscall_exit(struct trace_iterator *iter, int flags,
48 struct trace_event *event);
47#endif 49#endif
48 50
49#ifdef CONFIG_PERF_EVENTS 51#ifdef CONFIG_PERF_EVENTS
diff --git a/ipc/sem.c b/ipc/sem.c
index dbef95b15941..506c8491a8d1 100644
--- a/ipc/sem.c
+++ b/ipc/sem.c
@@ -3,56 +3,6 @@
3 * Copyright (C) 1992 Krishna Balasubramanian 3 * Copyright (C) 1992 Krishna Balasubramanian
4 * Copyright (C) 1995 Eric Schenk, Bruno Haible 4 * Copyright (C) 1995 Eric Schenk, Bruno Haible
5 * 5 *
6 * IMPLEMENTATION NOTES ON CODE REWRITE (Eric Schenk, January 1995):
7 * This code underwent a massive rewrite in order to solve some problems
8 * with the original code. In particular the original code failed to
9 * wake up processes that were waiting for semval to go to 0 if the
10 * value went to 0 and was then incremented rapidly enough. In solving
11 * this problem I have also modified the implementation so that it
12 * processes pending operations in a FIFO manner, thus give a guarantee
13 * that processes waiting for a lock on the semaphore won't starve
14 * unless another locking process fails to unlock.
15 * In addition the following two changes in behavior have been introduced:
16 * - The original implementation of semop returned the value
17 * last semaphore element examined on success. This does not
18 * match the manual page specifications, and effectively
19 * allows the user to read the semaphore even if they do not
20 * have read permissions. The implementation now returns 0
21 * on success as stated in the manual page.
22 * - There is some confusion over whether the set of undo adjustments
23 * to be performed at exit should be done in an atomic manner.
24 * That is, if we are attempting to decrement the semval should we queue
25 * up and wait until we can do so legally?
26 * The original implementation attempted to do this.
27 * The current implementation does not do so. This is because I don't
28 * think it is the right thing (TM) to do, and because I couldn't
29 * see a clean way to get the old behavior with the new design.
30 * The POSIX standard and SVID should be consulted to determine
31 * what behavior is mandated.
32 *
33 * Further notes on refinement (Christoph Rohland, December 1998):
34 * - The POSIX standard says, that the undo adjustments simply should
35 * redo. So the current implementation is o.K.
36 * - The previous code had two flaws:
37 * 1) It actively gave the semaphore to the next waiting process
38 * sleeping on the semaphore. Since this process did not have the
39 * cpu this led to many unnecessary context switches and bad
40 * performance. Now we only check which process should be able to
41 * get the semaphore and if this process wants to reduce some
42 * semaphore value we simply wake it up without doing the
43 * operation. So it has to try to get it later. Thus e.g. the
44 * running process may reacquire the semaphore during the current
45 * time slice. If it only waits for zero or increases the semaphore,
46 * we do the operation in advance and wake it up.
47 * 2) It did not wake up all zero waiting processes. We try to do
48 * better but only get the semops right which only wait for zero or
49 * increase. If there are decrement operations in the operations
50 * array we do the same as before.
51 *
52 * With the incarnation of O(1) scheduler, it becomes unnecessary to perform
53 * check/retry algorithm for waking up blocked processes as the new scheduler
54 * is better at handling thread switch than the old one.
55 *
56 * /proc/sysvipc/sem support (c) 1999 Dragos Acostachioaie <dragos@iname.com> 6 * /proc/sysvipc/sem support (c) 1999 Dragos Acostachioaie <dragos@iname.com>
57 * 7 *
58 * SMP-threaded, sysctl's added 8 * SMP-threaded, sysctl's added
@@ -61,6 +11,8 @@
61 * (c) 2001 Red Hat Inc 11 * (c) 2001 Red Hat Inc
62 * Lockless wakeup 12 * Lockless wakeup
63 * (c) 2003 Manfred Spraul <manfred@colorfullife.com> 13 * (c) 2003 Manfred Spraul <manfred@colorfullife.com>
14 * Further wakeup optimizations, documentation
15 * (c) 2010 Manfred Spraul <manfred@colorfullife.com>
64 * 16 *
65 * support for audit of ipc object properties and permission changes 17 * support for audit of ipc object properties and permission changes
66 * Dustin Kirkland <dustin.kirkland@us.ibm.com> 18 * Dustin Kirkland <dustin.kirkland@us.ibm.com>
@@ -68,6 +20,57 @@
68 * namespaces support 20 * namespaces support
69 * OpenVZ, SWsoft Inc. 21 * OpenVZ, SWsoft Inc.
70 * Pavel Emelianov <xemul@openvz.org> 22 * Pavel Emelianov <xemul@openvz.org>
23 *
24 * Implementation notes: (May 2010)
25 * This file implements System V semaphores.
26 *
27 * User space visible behavior:
28 * - FIFO ordering for semop() operations (just FIFO, not starvation
29 * protection)
30 * - multiple semaphore operations that alter the same semaphore in
31 * one semop() are handled.
32 * - sem_ctime (time of last semctl()) is updated in the IPC_SET, SETVAL and
33 * SETALL calls.
34 * - two Linux specific semctl() commands: SEM_STAT, SEM_INFO.
35 * - undo adjustments at process exit are limited to 0..SEMVMX.
36 * - namespace are supported.
37 * - SEMMSL, SEMMNS, SEMOPM and SEMMNI can be configured at runtine by writing
38 * to /proc/sys/kernel/sem.
39 * - statistics about the usage are reported in /proc/sysvipc/sem.
40 *
41 * Internals:
42 * - scalability:
43 * - all global variables are read-mostly.
44 * - semop() calls and semctl(RMID) are synchronized by RCU.
45 * - most operations do write operations (actually: spin_lock calls) to
46 * the per-semaphore array structure.
47 * Thus: Perfect SMP scaling between independent semaphore arrays.
48 * If multiple semaphores in one array are used, then cache line
49 * trashing on the semaphore array spinlock will limit the scaling.
50 * - semncnt and semzcnt are calculated on demand in count_semncnt() and
51 * count_semzcnt()
52 * - the task that performs a successful semop() scans the list of all
53 * sleeping tasks and completes any pending operations that can be fulfilled.
54 * Semaphores are actively given to waiting tasks (necessary for FIFO).
55 * (see update_queue())
56 * - To improve the scalability, the actual wake-up calls are performed after
57 * dropping all locks. (see wake_up_sem_queue_prepare(),
58 * wake_up_sem_queue_do())
59 * - All work is done by the waker, the woken up task does not have to do
60 * anything - not even acquiring a lock or dropping a refcount.
61 * - A woken up task may not even touch the semaphore array anymore, it may
62 * have been destroyed already by a semctl(RMID).
63 * - The synchronizations between wake-ups due to a timeout/signal and a
64 * wake-up due to a completed semaphore operation is achieved by using an
65 * intermediate state (IN_WAKEUP).
66 * - UNDO values are stored in an array (one per process and per
67 * semaphore array, lazily allocated). For backwards compatibility, multiple
68 * modes for the UNDO variables are supported (per process, per thread)
69 * (see copy_semundo, CLONE_SYSVSEM)
70 * - There are two lists of the pending operations: a per-array list
71 * and per-semaphore list (stored in the array). This allows to achieve FIFO
72 * ordering without always scanning all pending operations.
73 * The worst-case behavior is nevertheless O(N^2) for N wakeups.
71 */ 74 */
72 75
73#include <linux/slab.h> 76#include <linux/slab.h>
@@ -381,7 +384,6 @@ static int try_atomic_semop (struct sem_array * sma, struct sembuf * sops,
381 sop--; 384 sop--;
382 } 385 }
383 386
384 sma->sem_otime = get_seconds();
385 return 0; 387 return 0;
386 388
387out_of_range: 389out_of_range:
@@ -404,25 +406,51 @@ undo:
404 return result; 406 return result;
405} 407}
406 408
407/* 409/** wake_up_sem_queue_prepare(q, error): Prepare wake-up
408 * Wake up a process waiting on the sem queue with a given error. 410 * @q: queue entry that must be signaled
409 * The queue is invalid (may not be accessed) after the function returns. 411 * @error: Error value for the signal
412 *
413 * Prepare the wake-up of the queue entry q.
410 */ 414 */
411static void wake_up_sem_queue(struct sem_queue *q, int error) 415static void wake_up_sem_queue_prepare(struct list_head *pt,
416 struct sem_queue *q, int error)
412{ 417{
413 /* 418 if (list_empty(pt)) {
414 * Hold preempt off so that we don't get preempted and have the 419 /*
415 * wakee busy-wait until we're scheduled back on. We're holding 420 * Hold preempt off so that we don't get preempted and have the
416 * locks here so it may not strictly be needed, however if the 421 * wakee busy-wait until we're scheduled back on.
417 * locks become preemptible then this prevents such a problem. 422 */
418 */ 423 preempt_disable();
419 preempt_disable(); 424 }
420 q->status = IN_WAKEUP; 425 q->status = IN_WAKEUP;
421 wake_up_process(q->sleeper); 426 q->pid = error;
422 /* hands-off: q can disappear immediately after writing q->status. */ 427
423 smp_wmb(); 428 list_add_tail(&q->simple_list, pt);
424 q->status = error; 429}
425 preempt_enable(); 430
431/**
432 * wake_up_sem_queue_do(pt) - do the actual wake-up
433 * @pt: list of tasks to be woken up
434 *
435 * Do the actual wake-up.
436 * The function is called without any locks held, thus the semaphore array
437 * could be destroyed already and the tasks can disappear as soon as the
438 * status is set to the actual return code.
439 */
440static void wake_up_sem_queue_do(struct list_head *pt)
441{
442 struct sem_queue *q, *t;
443 int did_something;
444
445 did_something = !list_empty(pt);
446 list_for_each_entry_safe(q, t, pt, simple_list) {
447 wake_up_process(q->sleeper);
448 /* q can disappear immediately after writing q->status. */
449 smp_wmb();
450 q->status = q->pid;
451 }
452 if (did_something)
453 preempt_enable();
426} 454}
427 455
428static void unlink_queue(struct sem_array *sma, struct sem_queue *q) 456static void unlink_queue(struct sem_array *sma, struct sem_queue *q)
@@ -434,22 +462,90 @@ static void unlink_queue(struct sem_array *sma, struct sem_queue *q)
434 sma->complex_count--; 462 sma->complex_count--;
435} 463}
436 464
465/** check_restart(sma, q)
466 * @sma: semaphore array
467 * @q: the operation that just completed
468 *
469 * update_queue is O(N^2) when it restarts scanning the whole queue of
470 * waiting operations. Therefore this function checks if the restart is
471 * really necessary. It is called after a previously waiting operation
472 * was completed.
473 */
474static int check_restart(struct sem_array *sma, struct sem_queue *q)
475{
476 struct sem *curr;
477 struct sem_queue *h;
478
479 /* if the operation didn't modify the array, then no restart */
480 if (q->alter == 0)
481 return 0;
482
483 /* pending complex operations are too difficult to analyse */
484 if (sma->complex_count)
485 return 1;
486
487 /* we were a sleeping complex operation. Too difficult */
488 if (q->nsops > 1)
489 return 1;
490
491 curr = sma->sem_base + q->sops[0].sem_num;
492
493 /* No-one waits on this queue */
494 if (list_empty(&curr->sem_pending))
495 return 0;
496
497 /* the new semaphore value */
498 if (curr->semval) {
499 /* It is impossible that someone waits for the new value:
500 * - q is a previously sleeping simple operation that
501 * altered the array. It must be a decrement, because
502 * simple increments never sleep.
503 * - The value is not 0, thus wait-for-zero won't proceed.
504 * - If there are older (higher priority) decrements
505 * in the queue, then they have observed the original
506 * semval value and couldn't proceed. The operation
507 * decremented to value - thus they won't proceed either.
508 */
509 BUG_ON(q->sops[0].sem_op >= 0);
510 return 0;
511 }
512 /*
513 * semval is 0. Check if there are wait-for-zero semops.
514 * They must be the first entries in the per-semaphore simple queue
515 */
516 h = list_first_entry(&curr->sem_pending, struct sem_queue, simple_list);
517 BUG_ON(h->nsops != 1);
518 BUG_ON(h->sops[0].sem_num != q->sops[0].sem_num);
519
520 /* Yes, there is a wait-for-zero semop. Restart */
521 if (h->sops[0].sem_op == 0)
522 return 1;
523
524 /* Again - no-one is waiting for the new value. */
525 return 0;
526}
527
437 528
438/** 529/**
439 * update_queue(sma, semnum): Look for tasks that can be completed. 530 * update_queue(sma, semnum): Look for tasks that can be completed.
440 * @sma: semaphore array. 531 * @sma: semaphore array.
441 * @semnum: semaphore that was modified. 532 * @semnum: semaphore that was modified.
533 * @pt: list head for the tasks that must be woken up.
442 * 534 *
443 * update_queue must be called after a semaphore in a semaphore array 535 * update_queue must be called after a semaphore in a semaphore array
444 * was modified. If multiple semaphore were modified, then @semnum 536 * was modified. If multiple semaphore were modified, then @semnum
445 * must be set to -1. 537 * must be set to -1.
538 * The tasks that must be woken up are added to @pt. The return code
539 * is stored in q->pid.
540 * The function return 1 if at least one semop was completed successfully.
446 */ 541 */
447static void update_queue(struct sem_array *sma, int semnum) 542static int update_queue(struct sem_array *sma, int semnum, struct list_head *pt)
448{ 543{
449 struct sem_queue *q; 544 struct sem_queue *q;
450 struct list_head *walk; 545 struct list_head *walk;
451 struct list_head *pending_list; 546 struct list_head *pending_list;
452 int offset; 547 int offset;
548 int semop_completed = 0;
453 549
454 /* if there are complex operations around, then knowing the semaphore 550 /* if there are complex operations around, then knowing the semaphore
455 * that was modified doesn't help us. Assume that multiple semaphores 551 * that was modified doesn't help us. Assume that multiple semaphores
@@ -469,7 +565,7 @@ static void update_queue(struct sem_array *sma, int semnum)
469again: 565again:
470 walk = pending_list->next; 566 walk = pending_list->next;
471 while (walk != pending_list) { 567 while (walk != pending_list) {
472 int error, alter; 568 int error, restart;
473 569
474 q = (struct sem_queue *)((char *)walk - offset); 570 q = (struct sem_queue *)((char *)walk - offset);
475 walk = walk->next; 571 walk = walk->next;
@@ -494,22 +590,58 @@ again:
494 590
495 unlink_queue(sma, q); 591 unlink_queue(sma, q);
496 592
497 /* 593 if (error) {
498 * The next operation that must be checked depends on the type 594 restart = 0;
499 * of the completed operation: 595 } else {
500 * - if the operation modified the array, then restart from the 596 semop_completed = 1;
501 * head of the queue and check for threads that might be 597 restart = check_restart(sma, q);
502 * waiting for the new semaphore values. 598 }
503 * - if the operation didn't modify the array, then just 599
504 * continue. 600 wake_up_sem_queue_prepare(pt, q, error);
505 */ 601 if (restart)
506 alter = q->alter;
507 wake_up_sem_queue(q, error);
508 if (alter && !error)
509 goto again; 602 goto again;
510 } 603 }
604 return semop_completed;
605}
606
607/**
608 * do_smart_update(sma, sops, nsops, otime, pt) - optimized update_queue
609 * @sma: semaphore array
610 * @sops: operations that were performed
611 * @nsops: number of operations
612 * @otime: force setting otime
613 * @pt: list head of the tasks that must be woken up.
614 *
615 * do_smart_update() does the required called to update_queue, based on the
616 * actual changes that were performed on the semaphore array.
617 * Note that the function does not do the actual wake-up: the caller is
618 * responsible for calling wake_up_sem_queue_do(@pt).
619 * It is safe to perform this call after dropping all locks.
620 */
621static void do_smart_update(struct sem_array *sma, struct sembuf *sops, int nsops,
622 int otime, struct list_head *pt)
623{
624 int i;
625
626 if (sma->complex_count || sops == NULL) {
627 if (update_queue(sma, -1, pt))
628 otime = 1;
629 goto done;
630 }
631
632 for (i = 0; i < nsops; i++) {
633 if (sops[i].sem_op > 0 ||
634 (sops[i].sem_op < 0 &&
635 sma->sem_base[sops[i].sem_num].semval == 0))
636 if (update_queue(sma, sops[i].sem_num, pt))
637 otime = 1;
638 }
639done:
640 if (otime)
641 sma->sem_otime = get_seconds();
511} 642}
512 643
644
513/* The following counts are associated to each semaphore: 645/* The following counts are associated to each semaphore:
514 * semncnt number of tasks waiting on semval being nonzero 646 * semncnt number of tasks waiting on semval being nonzero
515 * semzcnt number of tasks waiting on semval being zero 647 * semzcnt number of tasks waiting on semval being zero
@@ -572,6 +704,7 @@ static void freeary(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
572 struct sem_undo *un, *tu; 704 struct sem_undo *un, *tu;
573 struct sem_queue *q, *tq; 705 struct sem_queue *q, *tq;
574 struct sem_array *sma = container_of(ipcp, struct sem_array, sem_perm); 706 struct sem_array *sma = container_of(ipcp, struct sem_array, sem_perm);
707 struct list_head tasks;
575 708
576 /* Free the existing undo structures for this semaphore set. */ 709 /* Free the existing undo structures for this semaphore set. */
577 assert_spin_locked(&sma->sem_perm.lock); 710 assert_spin_locked(&sma->sem_perm.lock);
@@ -585,15 +718,17 @@ static void freeary(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
585 } 718 }
586 719
587 /* Wake up all pending processes and let them fail with EIDRM. */ 720 /* Wake up all pending processes and let them fail with EIDRM. */
721 INIT_LIST_HEAD(&tasks);
588 list_for_each_entry_safe(q, tq, &sma->sem_pending, list) { 722 list_for_each_entry_safe(q, tq, &sma->sem_pending, list) {
589 unlink_queue(sma, q); 723 unlink_queue(sma, q);
590 wake_up_sem_queue(q, -EIDRM); 724 wake_up_sem_queue_prepare(&tasks, q, -EIDRM);
591 } 725 }
592 726
593 /* Remove the semaphore set from the IDR */ 727 /* Remove the semaphore set from the IDR */
594 sem_rmid(ns, sma); 728 sem_rmid(ns, sma);
595 sem_unlock(sma); 729 sem_unlock(sma);
596 730
731 wake_up_sem_queue_do(&tasks);
597 ns->used_sems -= sma->sem_nsems; 732 ns->used_sems -= sma->sem_nsems;
598 security_sem_free(sma); 733 security_sem_free(sma);
599 ipc_rcu_putref(sma); 734 ipc_rcu_putref(sma);
@@ -715,11 +850,13 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
715 ushort fast_sem_io[SEMMSL_FAST]; 850 ushort fast_sem_io[SEMMSL_FAST];
716 ushort* sem_io = fast_sem_io; 851 ushort* sem_io = fast_sem_io;
717 int nsems; 852 int nsems;
853 struct list_head tasks;
718 854
719 sma = sem_lock_check(ns, semid); 855 sma = sem_lock_check(ns, semid);
720 if (IS_ERR(sma)) 856 if (IS_ERR(sma))
721 return PTR_ERR(sma); 857 return PTR_ERR(sma);
722 858
859 INIT_LIST_HEAD(&tasks);
723 nsems = sma->sem_nsems; 860 nsems = sma->sem_nsems;
724 861
725 err = -EACCES; 862 err = -EACCES;
@@ -807,7 +944,7 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
807 } 944 }
808 sma->sem_ctime = get_seconds(); 945 sma->sem_ctime = get_seconds();
809 /* maybe some queued-up processes were waiting for this */ 946 /* maybe some queued-up processes were waiting for this */
810 update_queue(sma, -1); 947 do_smart_update(sma, NULL, 0, 0, &tasks);
811 err = 0; 948 err = 0;
812 goto out_unlock; 949 goto out_unlock;
813 } 950 }
@@ -849,13 +986,15 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
849 curr->sempid = task_tgid_vnr(current); 986 curr->sempid = task_tgid_vnr(current);
850 sma->sem_ctime = get_seconds(); 987 sma->sem_ctime = get_seconds();
851 /* maybe some queued-up processes were waiting for this */ 988 /* maybe some queued-up processes were waiting for this */
852 update_queue(sma, semnum); 989 do_smart_update(sma, NULL, 0, 0, &tasks);
853 err = 0; 990 err = 0;
854 goto out_unlock; 991 goto out_unlock;
855 } 992 }
856 } 993 }
857out_unlock: 994out_unlock:
858 sem_unlock(sma); 995 sem_unlock(sma);
996 wake_up_sem_queue_do(&tasks);
997
859out_free: 998out_free:
860 if(sem_io != fast_sem_io) 999 if(sem_io != fast_sem_io)
861 ipc_free(sem_io, sizeof(ushort)*nsems); 1000 ipc_free(sem_io, sizeof(ushort)*nsems);
@@ -1069,7 +1208,7 @@ static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid)
1069 /* step 1: figure out the size of the semaphore array */ 1208 /* step 1: figure out the size of the semaphore array */
1070 sma = sem_lock_check(ns, semid); 1209 sma = sem_lock_check(ns, semid);
1071 if (IS_ERR(sma)) 1210 if (IS_ERR(sma))
1072 return ERR_PTR(PTR_ERR(sma)); 1211 return ERR_CAST(sma);
1073 1212
1074 nsems = sma->sem_nsems; 1213 nsems = sma->sem_nsems;
1075 sem_getref_and_unlock(sma); 1214 sem_getref_and_unlock(sma);
@@ -1129,6 +1268,7 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
1129 struct sem_queue queue; 1268 struct sem_queue queue;
1130 unsigned long jiffies_left = 0; 1269 unsigned long jiffies_left = 0;
1131 struct ipc_namespace *ns; 1270 struct ipc_namespace *ns;
1271 struct list_head tasks;
1132 1272
1133 ns = current->nsproxy->ipc_ns; 1273 ns = current->nsproxy->ipc_ns;
1134 1274
@@ -1177,6 +1317,8 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
1177 } else 1317 } else
1178 un = NULL; 1318 un = NULL;
1179 1319
1320 INIT_LIST_HEAD(&tasks);
1321
1180 sma = sem_lock_check(ns, semid); 1322 sma = sem_lock_check(ns, semid);
1181 if (IS_ERR(sma)) { 1323 if (IS_ERR(sma)) {
1182 if (un) 1324 if (un)
@@ -1225,7 +1367,7 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
1225 error = try_atomic_semop (sma, sops, nsops, un, task_tgid_vnr(current)); 1367 error = try_atomic_semop (sma, sops, nsops, un, task_tgid_vnr(current));
1226 if (error <= 0) { 1368 if (error <= 0) {
1227 if (alter && error == 0) 1369 if (alter && error == 0)
1228 update_queue(sma, (nsops == 1) ? sops[0].sem_num : -1); 1370 do_smart_update(sma, sops, nsops, 1, &tasks);
1229 1371
1230 goto out_unlock_free; 1372 goto out_unlock_free;
1231 } 1373 }
@@ -1302,6 +1444,8 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
1302 1444
1303out_unlock_free: 1445out_unlock_free:
1304 sem_unlock(sma); 1446 sem_unlock(sma);
1447
1448 wake_up_sem_queue_do(&tasks);
1305out_free: 1449out_free:
1306 if(sops != fast_sops) 1450 if(sops != fast_sops)
1307 kfree(sops); 1451 kfree(sops);
@@ -1362,6 +1506,7 @@ void exit_sem(struct task_struct *tsk)
1362 for (;;) { 1506 for (;;) {
1363 struct sem_array *sma; 1507 struct sem_array *sma;
1364 struct sem_undo *un; 1508 struct sem_undo *un;
1509 struct list_head tasks;
1365 int semid; 1510 int semid;
1366 int i; 1511 int i;
1367 1512
@@ -1425,10 +1570,11 @@ void exit_sem(struct task_struct *tsk)
1425 semaphore->sempid = task_tgid_vnr(current); 1570 semaphore->sempid = task_tgid_vnr(current);
1426 } 1571 }
1427 } 1572 }
1428 sma->sem_otime = get_seconds();
1429 /* maybe some queued-up processes were waiting for this */ 1573 /* maybe some queued-up processes were waiting for this */
1430 update_queue(sma, -1); 1574 INIT_LIST_HEAD(&tasks);
1575 do_smart_update(sma, NULL, 0, 1, &tasks);
1431 sem_unlock(sma); 1576 sem_unlock(sma);
1577 wake_up_sem_queue_do(&tasks);
1432 1578
1433 call_rcu(&un->rcu, free_un); 1579 call_rcu(&un->rcu, free_un);
1434 } 1580 }
diff --git a/ipc/shm.c b/ipc/shm.c
index 1a314c89f93c..52ed77eb9713 100644
--- a/ipc/shm.c
+++ b/ipc/shm.c
@@ -273,16 +273,13 @@ static int shm_release(struct inode *ino, struct file *file)
273 return 0; 273 return 0;
274} 274}
275 275
276static int shm_fsync(struct file *file, struct dentry *dentry, int datasync) 276static int shm_fsync(struct file *file, int datasync)
277{ 277{
278 int (*fsync) (struct file *, struct dentry *, int datasync);
279 struct shm_file_data *sfd = shm_file_data(file); 278 struct shm_file_data *sfd = shm_file_data(file);
280 int ret = -EINVAL;
281 279
282 fsync = sfd->file->f_op->fsync; 280 if (!sfd->file->f_op->fsync)
283 if (fsync) 281 return -EINVAL;
284 ret = fsync(sfd->file, sfd->file->f_path.dentry, datasync); 282 return sfd->file->f_op->fsync(sfd->file, datasync);
285 return ret;
286} 283}
287 284
288static unsigned long shm_get_unmapped_area(struct file *file, 285static unsigned long shm_get_unmapped_area(struct file *file,
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index 291775021b2e..422cb19f156e 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -2994,7 +2994,6 @@ static void cgroup_event_remove(struct work_struct *work)
2994 remove); 2994 remove);
2995 struct cgroup *cgrp = event->cgrp; 2995 struct cgroup *cgrp = event->cgrp;
2996 2996
2997 /* TODO: check return code */
2998 event->cft->unregister_event(cgrp, event->cft, event->eventfd); 2997 event->cft->unregister_event(cgrp, event->cft, event->eventfd);
2999 2998
3000 eventfd_ctx_put(event->eventfd); 2999 eventfd_ctx_put(event->eventfd);
diff --git a/kernel/cpu.c b/kernel/cpu.c
index 124ad9d6be16..8b92539b4754 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -20,6 +20,20 @@
20/* Serializes the updates to cpu_online_mask, cpu_present_mask */ 20/* Serializes the updates to cpu_online_mask, cpu_present_mask */
21static DEFINE_MUTEX(cpu_add_remove_lock); 21static DEFINE_MUTEX(cpu_add_remove_lock);
22 22
23/*
24 * The following two API's must be used when attempting
25 * to serialize the updates to cpu_online_mask, cpu_present_mask.
26 */
27void cpu_maps_update_begin(void)
28{
29 mutex_lock(&cpu_add_remove_lock);
30}
31
32void cpu_maps_update_done(void)
33{
34 mutex_unlock(&cpu_add_remove_lock);
35}
36
23static __cpuinitdata RAW_NOTIFIER_HEAD(cpu_chain); 37static __cpuinitdata RAW_NOTIFIER_HEAD(cpu_chain);
24 38
25/* If set, cpu_up and cpu_down will return -EBUSY and do nothing. 39/* If set, cpu_up and cpu_down will return -EBUSY and do nothing.
@@ -27,6 +41,8 @@ static __cpuinitdata RAW_NOTIFIER_HEAD(cpu_chain);
27 */ 41 */
28static int cpu_hotplug_disabled; 42static int cpu_hotplug_disabled;
29 43
44#ifdef CONFIG_HOTPLUG_CPU
45
30static struct { 46static struct {
31 struct task_struct *active_writer; 47 struct task_struct *active_writer;
32 struct mutex lock; /* Synchronizes accesses to refcount, */ 48 struct mutex lock; /* Synchronizes accesses to refcount, */
@@ -41,8 +57,6 @@ static struct {
41 .refcount = 0, 57 .refcount = 0,
42}; 58};
43 59
44#ifdef CONFIG_HOTPLUG_CPU
45
46void get_online_cpus(void) 60void get_online_cpus(void)
47{ 61{
48 might_sleep(); 62 might_sleep();
@@ -67,22 +81,6 @@ void put_online_cpus(void)
67} 81}
68EXPORT_SYMBOL_GPL(put_online_cpus); 82EXPORT_SYMBOL_GPL(put_online_cpus);
69 83
70#endif /* CONFIG_HOTPLUG_CPU */
71
72/*
73 * The following two API's must be used when attempting
74 * to serialize the updates to cpu_online_mask, cpu_present_mask.
75 */
76void cpu_maps_update_begin(void)
77{
78 mutex_lock(&cpu_add_remove_lock);
79}
80
81void cpu_maps_update_done(void)
82{
83 mutex_unlock(&cpu_add_remove_lock);
84}
85
86/* 84/*
87 * This ensures that the hotplug operation can begin only when the 85 * This ensures that the hotplug operation can begin only when the
88 * refcount goes to zero. 86 * refcount goes to zero.
@@ -124,6 +122,12 @@ static void cpu_hotplug_done(void)
124 cpu_hotplug.active_writer = NULL; 122 cpu_hotplug.active_writer = NULL;
125 mutex_unlock(&cpu_hotplug.lock); 123 mutex_unlock(&cpu_hotplug.lock);
126} 124}
125
126#else /* #if CONFIG_HOTPLUG_CPU */
127static void cpu_hotplug_begin(void) {}
128static void cpu_hotplug_done(void) {}
129#endif /* #esle #if CONFIG_HOTPLUG_CPU */
130
127/* Need to know about CPUs going up/down? */ 131/* Need to know about CPUs going up/down? */
128int __ref register_cpu_notifier(struct notifier_block *nb) 132int __ref register_cpu_notifier(struct notifier_block *nb)
129{ 133{
@@ -134,8 +138,29 @@ int __ref register_cpu_notifier(struct notifier_block *nb)
134 return ret; 138 return ret;
135} 139}
136 140
141static int __cpu_notify(unsigned long val, void *v, int nr_to_call,
142 int *nr_calls)
143{
144 int ret;
145
146 ret = __raw_notifier_call_chain(&cpu_chain, val, v, nr_to_call,
147 nr_calls);
148
149 return notifier_to_errno(ret);
150}
151
152static int cpu_notify(unsigned long val, void *v)
153{
154 return __cpu_notify(val, v, -1, NULL);
155}
156
137#ifdef CONFIG_HOTPLUG_CPU 157#ifdef CONFIG_HOTPLUG_CPU
138 158
159static void cpu_notify_nofail(unsigned long val, void *v)
160{
161 BUG_ON(cpu_notify(val, v));
162}
163
139EXPORT_SYMBOL(register_cpu_notifier); 164EXPORT_SYMBOL(register_cpu_notifier);
140 165
141void __ref unregister_cpu_notifier(struct notifier_block *nb) 166void __ref unregister_cpu_notifier(struct notifier_block *nb)
@@ -181,8 +206,7 @@ static int __ref take_cpu_down(void *_param)
181 if (err < 0) 206 if (err < 0)
182 return err; 207 return err;
183 208
184 raw_notifier_call_chain(&cpu_chain, CPU_DYING | param->mod, 209 cpu_notify(CPU_DYING | param->mod, param->hcpu);
185 param->hcpu);
186 210
187 if (task_cpu(param->caller) == cpu) 211 if (task_cpu(param->caller) == cpu)
188 move_task_off_dead_cpu(cpu, param->caller); 212 move_task_off_dead_cpu(cpu, param->caller);
@@ -212,17 +236,14 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
212 236
213 cpu_hotplug_begin(); 237 cpu_hotplug_begin();
214 set_cpu_active(cpu, false); 238 set_cpu_active(cpu, false);
215 err = __raw_notifier_call_chain(&cpu_chain, CPU_DOWN_PREPARE | mod, 239 err = __cpu_notify(CPU_DOWN_PREPARE | mod, hcpu, -1, &nr_calls);
216 hcpu, -1, &nr_calls); 240 if (err) {
217 if (err == NOTIFY_BAD) {
218 set_cpu_active(cpu, true); 241 set_cpu_active(cpu, true);
219 242
220 nr_calls--; 243 nr_calls--;
221 __raw_notifier_call_chain(&cpu_chain, CPU_DOWN_FAILED | mod, 244 __cpu_notify(CPU_DOWN_FAILED | mod, hcpu, nr_calls, NULL);
222 hcpu, nr_calls, NULL);
223 printk("%s: attempt to take down CPU %u failed\n", 245 printk("%s: attempt to take down CPU %u failed\n",
224 __func__, cpu); 246 __func__, cpu);
225 err = -EINVAL;
226 goto out_release; 247 goto out_release;
227 } 248 }
228 249
@@ -230,9 +251,7 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
230 if (err) { 251 if (err) {
231 set_cpu_active(cpu, true); 252 set_cpu_active(cpu, true);
232 /* CPU didn't die: tell everyone. Can't complain. */ 253 /* CPU didn't die: tell everyone. Can't complain. */
233 if (raw_notifier_call_chain(&cpu_chain, CPU_DOWN_FAILED | mod, 254 cpu_notify_nofail(CPU_DOWN_FAILED | mod, hcpu);
234 hcpu) == NOTIFY_BAD)
235 BUG();
236 255
237 goto out_release; 256 goto out_release;
238 } 257 }
@@ -246,19 +265,14 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
246 __cpu_die(cpu); 265 __cpu_die(cpu);
247 266
248 /* CPU is completely dead: tell everyone. Too late to complain. */ 267 /* CPU is completely dead: tell everyone. Too late to complain. */
249 if (raw_notifier_call_chain(&cpu_chain, CPU_DEAD | mod, 268 cpu_notify_nofail(CPU_DEAD | mod, hcpu);
250 hcpu) == NOTIFY_BAD)
251 BUG();
252 269
253 check_for_tasks(cpu); 270 check_for_tasks(cpu);
254 271
255out_release: 272out_release:
256 cpu_hotplug_done(); 273 cpu_hotplug_done();
257 if (!err) { 274 if (!err)
258 if (raw_notifier_call_chain(&cpu_chain, CPU_POST_DEAD | mod, 275 cpu_notify_nofail(CPU_POST_DEAD | mod, hcpu);
259 hcpu) == NOTIFY_BAD)
260 BUG();
261 }
262 return err; 276 return err;
263} 277}
264 278
@@ -293,13 +307,11 @@ static int __cpuinit _cpu_up(unsigned int cpu, int tasks_frozen)
293 return -EINVAL; 307 return -EINVAL;
294 308
295 cpu_hotplug_begin(); 309 cpu_hotplug_begin();
296 ret = __raw_notifier_call_chain(&cpu_chain, CPU_UP_PREPARE | mod, hcpu, 310 ret = __cpu_notify(CPU_UP_PREPARE | mod, hcpu, -1, &nr_calls);
297 -1, &nr_calls); 311 if (ret) {
298 if (ret == NOTIFY_BAD) {
299 nr_calls--; 312 nr_calls--;
300 printk("%s: attempt to bring up CPU %u failed\n", 313 printk("%s: attempt to bring up CPU %u failed\n",
301 __func__, cpu); 314 __func__, cpu);
302 ret = -EINVAL;
303 goto out_notify; 315 goto out_notify;
304 } 316 }
305 317
@@ -312,12 +324,11 @@ static int __cpuinit _cpu_up(unsigned int cpu, int tasks_frozen)
312 set_cpu_active(cpu, true); 324 set_cpu_active(cpu, true);
313 325
314 /* Now call notifier in preparation. */ 326 /* Now call notifier in preparation. */
315 raw_notifier_call_chain(&cpu_chain, CPU_ONLINE | mod, hcpu); 327 cpu_notify(CPU_ONLINE | mod, hcpu);
316 328
317out_notify: 329out_notify:
318 if (ret != 0) 330 if (ret != 0)
319 __raw_notifier_call_chain(&cpu_chain, 331 __cpu_notify(CPU_UP_CANCELED | mod, hcpu, nr_calls, NULL);
320 CPU_UP_CANCELED | mod, hcpu, nr_calls, NULL);
321 cpu_hotplug_done(); 332 cpu_hotplug_done();
322 333
323 return ret; 334 return ret;
@@ -383,7 +394,7 @@ static cpumask_var_t frozen_cpus;
383 394
384int disable_nonboot_cpus(void) 395int disable_nonboot_cpus(void)
385{ 396{
386 int cpu, first_cpu, error; 397 int cpu, first_cpu, error = 0;
387 398
388 cpu_maps_update_begin(); 399 cpu_maps_update_begin();
389 first_cpu = cpumask_first(cpu_online_mask); 400 first_cpu = cpumask_first(cpu_online_mask);
@@ -481,7 +492,7 @@ void __cpuinit notify_cpu_starting(unsigned int cpu)
481 if (frozen_cpus != NULL && cpumask_test_cpu(cpu, frozen_cpus)) 492 if (frozen_cpus != NULL && cpumask_test_cpu(cpu, frozen_cpus))
482 val = CPU_STARTING_FROZEN; 493 val = CPU_STARTING_FROZEN;
483#endif /* CONFIG_PM_SLEEP_SMP */ 494#endif /* CONFIG_PM_SLEEP_SMP */
484 raw_notifier_call_chain(&cpu_chain, val, (void *)(long)cpu); 495 cpu_notify(val, (void *)(long)cpu);
485} 496}
486 497
487#endif /* CONFIG_SMP */ 498#endif /* CONFIG_SMP */
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index 61d6af7fa676..02b9611eadde 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -2469,7 +2469,8 @@ void cpuset_unlock(void)
2469} 2469}
2470 2470
2471/** 2471/**
2472 * cpuset_mem_spread_node() - On which node to begin search for a page 2472 * cpuset_mem_spread_node() - On which node to begin search for a file page
2473 * cpuset_slab_spread_node() - On which node to begin search for a slab page
2473 * 2474 *
2474 * If a task is marked PF_SPREAD_PAGE or PF_SPREAD_SLAB (as for 2475 * If a task is marked PF_SPREAD_PAGE or PF_SPREAD_SLAB (as for
2475 * tasks in a cpuset with is_spread_page or is_spread_slab set), 2476 * tasks in a cpuset with is_spread_page or is_spread_slab set),
@@ -2494,16 +2495,27 @@ void cpuset_unlock(void)
2494 * See kmem_cache_alloc_node(). 2495 * See kmem_cache_alloc_node().
2495 */ 2496 */
2496 2497
2497int cpuset_mem_spread_node(void) 2498static int cpuset_spread_node(int *rotor)
2498{ 2499{
2499 int node; 2500 int node;
2500 2501
2501 node = next_node(current->cpuset_mem_spread_rotor, current->mems_allowed); 2502 node = next_node(*rotor, current->mems_allowed);
2502 if (node == MAX_NUMNODES) 2503 if (node == MAX_NUMNODES)
2503 node = first_node(current->mems_allowed); 2504 node = first_node(current->mems_allowed);
2504 current->cpuset_mem_spread_rotor = node; 2505 *rotor = node;
2505 return node; 2506 return node;
2506} 2507}
2508
2509int cpuset_mem_spread_node(void)
2510{
2511 return cpuset_spread_node(&current->cpuset_mem_spread_rotor);
2512}
2513
2514int cpuset_slab_spread_node(void)
2515{
2516 return cpuset_spread_node(&current->cpuset_slab_spread_rotor);
2517}
2518
2507EXPORT_SYMBOL_GPL(cpuset_mem_spread_node); 2519EXPORT_SYMBOL_GPL(cpuset_mem_spread_node);
2508 2520
2509/** 2521/**
diff --git a/kernel/cred.c b/kernel/cred.c
index 2c24870c55d1..a2d5504fbcc2 100644
--- a/kernel/cred.c
+++ b/kernel/cred.c
@@ -347,66 +347,6 @@ struct cred *prepare_exec_creds(void)
347} 347}
348 348
349/* 349/*
350 * prepare new credentials for the usermode helper dispatcher
351 */
352struct cred *prepare_usermodehelper_creds(void)
353{
354#ifdef CONFIG_KEYS
355 struct thread_group_cred *tgcred = NULL;
356#endif
357 struct cred *new;
358
359#ifdef CONFIG_KEYS
360 tgcred = kzalloc(sizeof(*new->tgcred), GFP_ATOMIC);
361 if (!tgcred)
362 return NULL;
363#endif
364
365 new = kmem_cache_alloc(cred_jar, GFP_ATOMIC);
366 if (!new)
367 goto free_tgcred;
368
369 kdebug("prepare_usermodehelper_creds() alloc %p", new);
370
371 memcpy(new, &init_cred, sizeof(struct cred));
372
373 atomic_set(&new->usage, 1);
374 set_cred_subscribers(new, 0);
375 get_group_info(new->group_info);
376 get_uid(new->user);
377
378#ifdef CONFIG_KEYS
379 new->thread_keyring = NULL;
380 new->request_key_auth = NULL;
381 new->jit_keyring = KEY_REQKEY_DEFL_DEFAULT;
382
383 atomic_set(&tgcred->usage, 1);
384 spin_lock_init(&tgcred->lock);
385 new->tgcred = tgcred;
386#endif
387
388#ifdef CONFIG_SECURITY
389 new->security = NULL;
390#endif
391 if (security_prepare_creds(new, &init_cred, GFP_ATOMIC) < 0)
392 goto error;
393 validate_creds(new);
394
395 BUG_ON(atomic_read(&new->usage) != 1);
396 return new;
397
398error:
399 put_cred(new);
400 return NULL;
401
402free_tgcred:
403#ifdef CONFIG_KEYS
404 kfree(tgcred);
405#endif
406 return NULL;
407}
408
409/*
410 * Copy credentials for the new process created by fork() 350 * Copy credentials for the new process created by fork()
411 * 351 *
412 * We share if we can, but under some circumstances we have to generate a new 352 * We share if we can, but under some circumstances we have to generate a new
diff --git a/kernel/exit.c b/kernel/exit.c
index 019a2843bf95..ceffc67b564a 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -58,11 +58,11 @@
58 58
59static void exit_mm(struct task_struct * tsk); 59static void exit_mm(struct task_struct * tsk);
60 60
61static void __unhash_process(struct task_struct *p) 61static void __unhash_process(struct task_struct *p, bool group_dead)
62{ 62{
63 nr_threads--; 63 nr_threads--;
64 detach_pid(p, PIDTYPE_PID); 64 detach_pid(p, PIDTYPE_PID);
65 if (thread_group_leader(p)) { 65 if (group_dead) {
66 detach_pid(p, PIDTYPE_PGID); 66 detach_pid(p, PIDTYPE_PGID);
67 detach_pid(p, PIDTYPE_SID); 67 detach_pid(p, PIDTYPE_SID);
68 68
@@ -79,10 +79,9 @@ static void __unhash_process(struct task_struct *p)
79static void __exit_signal(struct task_struct *tsk) 79static void __exit_signal(struct task_struct *tsk)
80{ 80{
81 struct signal_struct *sig = tsk->signal; 81 struct signal_struct *sig = tsk->signal;
82 bool group_dead = thread_group_leader(tsk);
82 struct sighand_struct *sighand; 83 struct sighand_struct *sighand;
83 84 struct tty_struct *uninitialized_var(tty);
84 BUG_ON(!sig);
85 BUG_ON(!atomic_read(&sig->count));
86 85
87 sighand = rcu_dereference_check(tsk->sighand, 86 sighand = rcu_dereference_check(tsk->sighand,
88 rcu_read_lock_held() || 87 rcu_read_lock_held() ||
@@ -90,14 +89,16 @@ static void __exit_signal(struct task_struct *tsk)
90 spin_lock(&sighand->siglock); 89 spin_lock(&sighand->siglock);
91 90
92 posix_cpu_timers_exit(tsk); 91 posix_cpu_timers_exit(tsk);
93 if (atomic_dec_and_test(&sig->count)) 92 if (group_dead) {
94 posix_cpu_timers_exit_group(tsk); 93 posix_cpu_timers_exit_group(tsk);
95 else { 94 tty = sig->tty;
95 sig->tty = NULL;
96 } else {
96 /* 97 /*
97 * If there is any task waiting for the group exit 98 * If there is any task waiting for the group exit
98 * then notify it: 99 * then notify it:
99 */ 100 */
100 if (sig->group_exit_task && atomic_read(&sig->count) == sig->notify_count) 101 if (sig->notify_count > 0 && !--sig->notify_count)
101 wake_up_process(sig->group_exit_task); 102 wake_up_process(sig->group_exit_task);
102 103
103 if (tsk == sig->curr_target) 104 if (tsk == sig->curr_target)
@@ -123,32 +124,24 @@ static void __exit_signal(struct task_struct *tsk)
123 sig->oublock += task_io_get_oublock(tsk); 124 sig->oublock += task_io_get_oublock(tsk);
124 task_io_accounting_add(&sig->ioac, &tsk->ioac); 125 task_io_accounting_add(&sig->ioac, &tsk->ioac);
125 sig->sum_sched_runtime += tsk->se.sum_exec_runtime; 126 sig->sum_sched_runtime += tsk->se.sum_exec_runtime;
126 sig = NULL; /* Marker for below. */
127 } 127 }
128 128
129 __unhash_process(tsk); 129 sig->nr_threads--;
130 __unhash_process(tsk, group_dead);
130 131
131 /* 132 /*
132 * Do this under ->siglock, we can race with another thread 133 * Do this under ->siglock, we can race with another thread
133 * doing sigqueue_free() if we have SIGQUEUE_PREALLOC signals. 134 * doing sigqueue_free() if we have SIGQUEUE_PREALLOC signals.
134 */ 135 */
135 flush_sigqueue(&tsk->pending); 136 flush_sigqueue(&tsk->pending);
136
137 tsk->signal = NULL;
138 tsk->sighand = NULL; 137 tsk->sighand = NULL;
139 spin_unlock(&sighand->siglock); 138 spin_unlock(&sighand->siglock);
140 139
141 __cleanup_sighand(sighand); 140 __cleanup_sighand(sighand);
142 clear_tsk_thread_flag(tsk,TIF_SIGPENDING); 141 clear_tsk_thread_flag(tsk,TIF_SIGPENDING);
143 if (sig) { 142 if (group_dead) {
144 flush_sigqueue(&sig->shared_pending); 143 flush_sigqueue(&sig->shared_pending);
145 taskstats_tgid_free(sig); 144 tty_kref_put(tty);
146 /*
147 * Make sure ->signal can't go away under rq->lock,
148 * see account_group_exec_runtime().
149 */
150 task_rq_unlock_wait(tsk);
151 __cleanup_signal(sig);
152 } 145 }
153} 146}
154 147
@@ -856,12 +849,9 @@ static void exit_notify(struct task_struct *tsk, int group_dead)
856 849
857 tsk->exit_state = signal == DEATH_REAP ? EXIT_DEAD : EXIT_ZOMBIE; 850 tsk->exit_state = signal == DEATH_REAP ? EXIT_DEAD : EXIT_ZOMBIE;
858 851
859 /* mt-exec, de_thread() is waiting for us */ 852 /* mt-exec, de_thread() is waiting for group leader */
860 if (thread_group_leader(tsk) && 853 if (unlikely(tsk->signal->notify_count < 0))
861 tsk->signal->group_exit_task &&
862 tsk->signal->notify_count < 0)
863 wake_up_process(tsk->signal->group_exit_task); 854 wake_up_process(tsk->signal->group_exit_task);
864
865 write_unlock_irq(&tasklist_lock); 855 write_unlock_irq(&tasklist_lock);
866 856
867 tracehook_report_death(tsk, signal, cookie, group_dead); 857 tracehook_report_death(tsk, signal, cookie, group_dead);
diff --git a/kernel/fork.c b/kernel/fork.c
index 4d57d9e3a6e9..b6cce14ba047 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -165,6 +165,18 @@ void free_task(struct task_struct *tsk)
165} 165}
166EXPORT_SYMBOL(free_task); 166EXPORT_SYMBOL(free_task);
167 167
168static inline void free_signal_struct(struct signal_struct *sig)
169{
170 taskstats_tgid_free(sig);
171 kmem_cache_free(signal_cachep, sig);
172}
173
174static inline void put_signal_struct(struct signal_struct *sig)
175{
176 if (atomic_dec_and_test(&sig->sigcnt))
177 free_signal_struct(sig);
178}
179
168void __put_task_struct(struct task_struct *tsk) 180void __put_task_struct(struct task_struct *tsk)
169{ 181{
170 WARN_ON(!tsk->exit_state); 182 WARN_ON(!tsk->exit_state);
@@ -173,6 +185,7 @@ void __put_task_struct(struct task_struct *tsk)
173 185
174 exit_creds(tsk); 186 exit_creds(tsk);
175 delayacct_tsk_free(tsk); 187 delayacct_tsk_free(tsk);
188 put_signal_struct(tsk->signal);
176 189
177 if (!profile_handoff_task(tsk)) 190 if (!profile_handoff_task(tsk))
178 free_task(tsk); 191 free_task(tsk);
@@ -864,8 +877,9 @@ static int copy_signal(unsigned long clone_flags, struct task_struct *tsk)
864 if (!sig) 877 if (!sig)
865 return -ENOMEM; 878 return -ENOMEM;
866 879
867 atomic_set(&sig->count, 1); 880 sig->nr_threads = 1;
868 atomic_set(&sig->live, 1); 881 atomic_set(&sig->live, 1);
882 atomic_set(&sig->sigcnt, 1);
869 init_waitqueue_head(&sig->wait_chldexit); 883 init_waitqueue_head(&sig->wait_chldexit);
870 if (clone_flags & CLONE_NEWPID) 884 if (clone_flags & CLONE_NEWPID)
871 sig->flags |= SIGNAL_UNKILLABLE; 885 sig->flags |= SIGNAL_UNKILLABLE;
@@ -889,13 +903,6 @@ static int copy_signal(unsigned long clone_flags, struct task_struct *tsk)
889 return 0; 903 return 0;
890} 904}
891 905
892void __cleanup_signal(struct signal_struct *sig)
893{
894 thread_group_cputime_free(sig);
895 tty_kref_put(sig->tty);
896 kmem_cache_free(signal_cachep, sig);
897}
898
899static void copy_flags(unsigned long clone_flags, struct task_struct *p) 906static void copy_flags(unsigned long clone_flags, struct task_struct *p)
900{ 907{
901 unsigned long new_flags = p->flags; 908 unsigned long new_flags = p->flags;
@@ -1245,8 +1252,9 @@ static struct task_struct *copy_process(unsigned long clone_flags,
1245 } 1252 }
1246 1253
1247 if (clone_flags & CLONE_THREAD) { 1254 if (clone_flags & CLONE_THREAD) {
1248 atomic_inc(&current->signal->count); 1255 current->signal->nr_threads++;
1249 atomic_inc(&current->signal->live); 1256 atomic_inc(&current->signal->live);
1257 atomic_inc(&current->signal->sigcnt);
1250 p->group_leader = current->group_leader; 1258 p->group_leader = current->group_leader;
1251 list_add_tail_rcu(&p->thread_group, &p->group_leader->thread_group); 1259 list_add_tail_rcu(&p->thread_group, &p->group_leader->thread_group);
1252 } 1260 }
@@ -1259,7 +1267,6 @@ static struct task_struct *copy_process(unsigned long clone_flags,
1259 p->nsproxy->pid_ns->child_reaper = p; 1267 p->nsproxy->pid_ns->child_reaper = p;
1260 1268
1261 p->signal->leader_pid = pid; 1269 p->signal->leader_pid = pid;
1262 tty_kref_put(p->signal->tty);
1263 p->signal->tty = tty_kref_get(current->signal->tty); 1270 p->signal->tty = tty_kref_get(current->signal->tty);
1264 attach_pid(p, PIDTYPE_PGID, task_pgrp(current)); 1271 attach_pid(p, PIDTYPE_PGID, task_pgrp(current));
1265 attach_pid(p, PIDTYPE_SID, task_session(current)); 1272 attach_pid(p, PIDTYPE_SID, task_session(current));
@@ -1292,7 +1299,7 @@ bad_fork_cleanup_mm:
1292 mmput(p->mm); 1299 mmput(p->mm);
1293bad_fork_cleanup_signal: 1300bad_fork_cleanup_signal:
1294 if (!(clone_flags & CLONE_THREAD)) 1301 if (!(clone_flags & CLONE_THREAD))
1295 __cleanup_signal(p->signal); 1302 free_signal_struct(p->signal);
1296bad_fork_cleanup_sighand: 1303bad_fork_cleanup_sighand:
1297 __cleanup_sighand(p->sighand); 1304 __cleanup_sighand(p->sighand);
1298bad_fork_cleanup_fs: 1305bad_fork_cleanup_fs:
@@ -1327,6 +1334,16 @@ noinline struct pt_regs * __cpuinit __attribute__((weak)) idle_regs(struct pt_re
1327 return regs; 1334 return regs;
1328} 1335}
1329 1336
1337static inline void init_idle_pids(struct pid_link *links)
1338{
1339 enum pid_type type;
1340
1341 for (type = PIDTYPE_PID; type < PIDTYPE_MAX; ++type) {
1342 INIT_HLIST_NODE(&links[type].node); /* not really needed */
1343 links[type].pid = &init_struct_pid;
1344 }
1345}
1346
1330struct task_struct * __cpuinit fork_idle(int cpu) 1347struct task_struct * __cpuinit fork_idle(int cpu)
1331{ 1348{
1332 struct task_struct *task; 1349 struct task_struct *task;
@@ -1334,8 +1351,10 @@ struct task_struct * __cpuinit fork_idle(int cpu)
1334 1351
1335 task = copy_process(CLONE_VM, 0, idle_regs(&regs), 0, NULL, 1352 task = copy_process(CLONE_VM, 0, idle_regs(&regs), 0, NULL,
1336 &init_struct_pid, 0); 1353 &init_struct_pid, 0);
1337 if (!IS_ERR(task)) 1354 if (!IS_ERR(task)) {
1355 init_idle_pids(task->pids);
1338 init_idle(task, cpu); 1356 init_idle(task, cpu);
1357 }
1339 1358
1340 return task; 1359 return task;
1341} 1360}
@@ -1507,14 +1526,6 @@ static void check_unshare_flags(unsigned long *flags_ptr)
1507 *flags_ptr |= CLONE_SIGHAND; 1526 *flags_ptr |= CLONE_SIGHAND;
1508 1527
1509 /* 1528 /*
1510 * If unsharing signal handlers and the task was created
1511 * using CLONE_THREAD, then must unshare the thread
1512 */
1513 if ((*flags_ptr & CLONE_SIGHAND) &&
1514 (atomic_read(&current->signal->count) > 1))
1515 *flags_ptr |= CLONE_THREAD;
1516
1517 /*
1518 * If unsharing namespace, must also unshare filesystem information. 1529 * If unsharing namespace, must also unshare filesystem information.
1519 */ 1530 */
1520 if (*flags_ptr & CLONE_NEWNS) 1531 if (*flags_ptr & CLONE_NEWNS)
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
index b9b134b35088..5c69e996bd0f 100644
--- a/kernel/hrtimer.c
+++ b/kernel/hrtimer.c
@@ -89,7 +89,7 @@ static void hrtimer_get_softirq_time(struct hrtimer_cpu_base *base)
89 89
90 do { 90 do {
91 seq = read_seqbegin(&xtime_lock); 91 seq = read_seqbegin(&xtime_lock);
92 xts = current_kernel_time(); 92 xts = __current_kernel_time();
93 tom = wall_to_monotonic; 93 tom = wall_to_monotonic;
94 } while (read_seqretry(&xtime_lock, seq)); 94 } while (read_seqretry(&xtime_lock, seq));
95 95
diff --git a/kernel/kmod.c b/kernel/kmod.c
index bf0e231d9702..6e9b19667a8d 100644
--- a/kernel/kmod.c
+++ b/kernel/kmod.c
@@ -116,27 +116,16 @@ int __request_module(bool wait, const char *fmt, ...)
116 116
117 trace_module_request(module_name, wait, _RET_IP_); 117 trace_module_request(module_name, wait, _RET_IP_);
118 118
119 ret = call_usermodehelper(modprobe_path, argv, envp, 119 ret = call_usermodehelper_fns(modprobe_path, argv, envp,
120 wait ? UMH_WAIT_PROC : UMH_WAIT_EXEC); 120 wait ? UMH_WAIT_PROC : UMH_WAIT_EXEC,
121 NULL, NULL, NULL);
122
121 atomic_dec(&kmod_concurrent); 123 atomic_dec(&kmod_concurrent);
122 return ret; 124 return ret;
123} 125}
124EXPORT_SYMBOL(__request_module); 126EXPORT_SYMBOL(__request_module);
125#endif /* CONFIG_MODULES */ 127#endif /* CONFIG_MODULES */
126 128
127struct subprocess_info {
128 struct work_struct work;
129 struct completion *complete;
130 struct cred *cred;
131 char *path;
132 char **argv;
133 char **envp;
134 enum umh_wait wait;
135 int retval;
136 struct file *stdin;
137 void (*cleanup)(char **argv, char **envp);
138};
139
140/* 129/*
141 * This is the task which runs the usermode application 130 * This is the task which runs the usermode application
142 */ 131 */
@@ -145,36 +134,10 @@ static int ____call_usermodehelper(void *data)
145 struct subprocess_info *sub_info = data; 134 struct subprocess_info *sub_info = data;
146 int retval; 135 int retval;
147 136
148 BUG_ON(atomic_read(&sub_info->cred->usage) != 1);
149
150 /* Unblock all signals */
151 spin_lock_irq(&current->sighand->siglock); 137 spin_lock_irq(&current->sighand->siglock);
152 flush_signal_handlers(current, 1); 138 flush_signal_handlers(current, 1);
153 sigemptyset(&current->blocked);
154 recalc_sigpending();
155 spin_unlock_irq(&current->sighand->siglock); 139 spin_unlock_irq(&current->sighand->siglock);
156 140
157 /* Install the credentials */
158 commit_creds(sub_info->cred);
159 sub_info->cred = NULL;
160
161 /* Install input pipe when needed */
162 if (sub_info->stdin) {
163 struct files_struct *f = current->files;
164 struct fdtable *fdt;
165 /* no races because files should be private here */
166 sys_close(0);
167 fd_install(0, sub_info->stdin);
168 spin_lock(&f->file_lock);
169 fdt = files_fdtable(f);
170 FD_SET(0, fdt->open_fds);
171 FD_CLR(0, fdt->close_on_exec);
172 spin_unlock(&f->file_lock);
173
174 /* and disallow core files too */
175 current->signal->rlim[RLIMIT_CORE] = (struct rlimit){0, 0};
176 }
177
178 /* We can run anywhere, unlike our parent keventd(). */ 141 /* We can run anywhere, unlike our parent keventd(). */
179 set_cpus_allowed_ptr(current, cpu_all_mask); 142 set_cpus_allowed_ptr(current, cpu_all_mask);
180 143
@@ -184,9 +147,16 @@ static int ____call_usermodehelper(void *data)
184 */ 147 */
185 set_user_nice(current, 0); 148 set_user_nice(current, 0);
186 149
150 if (sub_info->init) {
151 retval = sub_info->init(sub_info);
152 if (retval)
153 goto fail;
154 }
155
187 retval = kernel_execve(sub_info->path, sub_info->argv, sub_info->envp); 156 retval = kernel_execve(sub_info->path, sub_info->argv, sub_info->envp);
188 157
189 /* Exec failed? */ 158 /* Exec failed? */
159fail:
190 sub_info->retval = retval; 160 sub_info->retval = retval;
191 do_exit(0); 161 do_exit(0);
192} 162}
@@ -194,9 +164,7 @@ static int ____call_usermodehelper(void *data)
194void call_usermodehelper_freeinfo(struct subprocess_info *info) 164void call_usermodehelper_freeinfo(struct subprocess_info *info)
195{ 165{
196 if (info->cleanup) 166 if (info->cleanup)
197 (*info->cleanup)(info->argv, info->envp); 167 (*info->cleanup)(info);
198 if (info->cred)
199 put_cred(info->cred);
200 kfree(info); 168 kfree(info);
201} 169}
202EXPORT_SYMBOL(call_usermodehelper_freeinfo); 170EXPORT_SYMBOL(call_usermodehelper_freeinfo);
@@ -207,16 +175,16 @@ static int wait_for_helper(void *data)
207 struct subprocess_info *sub_info = data; 175 struct subprocess_info *sub_info = data;
208 pid_t pid; 176 pid_t pid;
209 177
210 /* Install a handler: if SIGCLD isn't handled sys_wait4 won't 178 /* If SIGCLD is ignored sys_wait4 won't populate the status. */
211 * populate the status, but will return -ECHILD. */ 179 spin_lock_irq(&current->sighand->siglock);
212 allow_signal(SIGCHLD); 180 current->sighand->action[SIGCHLD-1].sa.sa_handler = SIG_DFL;
181 spin_unlock_irq(&current->sighand->siglock);
213 182
214 pid = kernel_thread(____call_usermodehelper, sub_info, SIGCHLD); 183 pid = kernel_thread(____call_usermodehelper, sub_info, SIGCHLD);
215 if (pid < 0) { 184 if (pid < 0) {
216 sub_info->retval = pid; 185 sub_info->retval = pid;
217 } else { 186 } else {
218 int ret; 187 int ret = -ECHILD;
219
220 /* 188 /*
221 * Normally it is bogus to call wait4() from in-kernel because 189 * Normally it is bogus to call wait4() from in-kernel because
222 * wait4() wants to write the exit code to a userspace address. 190 * wait4() wants to write the exit code to a userspace address.
@@ -237,10 +205,7 @@ static int wait_for_helper(void *data)
237 sub_info->retval = ret; 205 sub_info->retval = ret;
238 } 206 }
239 207
240 if (sub_info->wait == UMH_NO_WAIT) 208 complete(sub_info->complete);
241 call_usermodehelper_freeinfo(sub_info);
242 else
243 complete(sub_info->complete);
244 return 0; 209 return 0;
245} 210}
246 211
@@ -249,15 +214,13 @@ static void __call_usermodehelper(struct work_struct *work)
249{ 214{
250 struct subprocess_info *sub_info = 215 struct subprocess_info *sub_info =
251 container_of(work, struct subprocess_info, work); 216 container_of(work, struct subprocess_info, work);
252 pid_t pid;
253 enum umh_wait wait = sub_info->wait; 217 enum umh_wait wait = sub_info->wait;
254 218 pid_t pid;
255 BUG_ON(atomic_read(&sub_info->cred->usage) != 1);
256 219
257 /* CLONE_VFORK: wait until the usermode helper has execve'd 220 /* CLONE_VFORK: wait until the usermode helper has execve'd
258 * successfully We need the data structures to stay around 221 * successfully We need the data structures to stay around
259 * until that is done. */ 222 * until that is done. */
260 if (wait == UMH_WAIT_PROC || wait == UMH_NO_WAIT) 223 if (wait == UMH_WAIT_PROC)
261 pid = kernel_thread(wait_for_helper, sub_info, 224 pid = kernel_thread(wait_for_helper, sub_info,
262 CLONE_FS | CLONE_FILES | SIGCHLD); 225 CLONE_FS | CLONE_FILES | SIGCHLD);
263 else 226 else
@@ -266,15 +229,16 @@ static void __call_usermodehelper(struct work_struct *work)
266 229
267 switch (wait) { 230 switch (wait) {
268 case UMH_NO_WAIT: 231 case UMH_NO_WAIT:
232 call_usermodehelper_freeinfo(sub_info);
269 break; 233 break;
270 234
271 case UMH_WAIT_PROC: 235 case UMH_WAIT_PROC:
272 if (pid > 0) 236 if (pid > 0)
273 break; 237 break;
274 sub_info->retval = pid;
275 /* FALLTHROUGH */ 238 /* FALLTHROUGH */
276
277 case UMH_WAIT_EXEC: 239 case UMH_WAIT_EXEC:
240 if (pid < 0)
241 sub_info->retval = pid;
278 complete(sub_info->complete); 242 complete(sub_info->complete);
279 } 243 }
280} 244}
@@ -376,80 +340,37 @@ struct subprocess_info *call_usermodehelper_setup(char *path, char **argv,
376 sub_info->path = path; 340 sub_info->path = path;
377 sub_info->argv = argv; 341 sub_info->argv = argv;
378 sub_info->envp = envp; 342 sub_info->envp = envp;
379 sub_info->cred = prepare_usermodehelper_creds();
380 if (!sub_info->cred) {
381 kfree(sub_info);
382 return NULL;
383 }
384
385 out: 343 out:
386 return sub_info; 344 return sub_info;
387} 345}
388EXPORT_SYMBOL(call_usermodehelper_setup); 346EXPORT_SYMBOL(call_usermodehelper_setup);
389 347
390/** 348/**
391 * call_usermodehelper_setkeys - set the session keys for usermode helper 349 * call_usermodehelper_setfns - set a cleanup/init function
392 * @info: a subprocess_info returned by call_usermodehelper_setup
393 * @session_keyring: the session keyring for the process
394 */
395void call_usermodehelper_setkeys(struct subprocess_info *info,
396 struct key *session_keyring)
397{
398#ifdef CONFIG_KEYS
399 struct thread_group_cred *tgcred = info->cred->tgcred;
400 key_put(tgcred->session_keyring);
401 tgcred->session_keyring = key_get(session_keyring);
402#else
403 BUG();
404#endif
405}
406EXPORT_SYMBOL(call_usermodehelper_setkeys);
407
408/**
409 * call_usermodehelper_setcleanup - set a cleanup function
410 * @info: a subprocess_info returned by call_usermodehelper_setup 350 * @info: a subprocess_info returned by call_usermodehelper_setup
411 * @cleanup: a cleanup function 351 * @cleanup: a cleanup function
352 * @init: an init function
353 * @data: arbitrary context sensitive data
412 * 354 *
413 * The cleanup function is just befor ethe subprocess_info is about to 355 * The init function is used to customize the helper process prior to
356 * exec. A non-zero return code causes the process to error out, exit,
357 * and return the failure to the calling process
358 *
359 * The cleanup function is just before ethe subprocess_info is about to
414 * be freed. This can be used for freeing the argv and envp. The 360 * be freed. This can be used for freeing the argv and envp. The
415 * Function must be runnable in either a process context or the 361 * Function must be runnable in either a process context or the
416 * context in which call_usermodehelper_exec is called. 362 * context in which call_usermodehelper_exec is called.
417 */ 363 */
418void call_usermodehelper_setcleanup(struct subprocess_info *info, 364void call_usermodehelper_setfns(struct subprocess_info *info,
419 void (*cleanup)(char **argv, char **envp)) 365 int (*init)(struct subprocess_info *info),
366 void (*cleanup)(struct subprocess_info *info),
367 void *data)
420{ 368{
421 info->cleanup = cleanup; 369 info->cleanup = cleanup;
370 info->init = init;
371 info->data = data;
422} 372}
423EXPORT_SYMBOL(call_usermodehelper_setcleanup); 373EXPORT_SYMBOL(call_usermodehelper_setfns);
424
425/**
426 * call_usermodehelper_stdinpipe - set up a pipe to be used for stdin
427 * @sub_info: a subprocess_info returned by call_usermodehelper_setup
428 * @filp: set to the write-end of a pipe
429 *
430 * This constructs a pipe, and sets the read end to be the stdin of the
431 * subprocess, and returns the write-end in *@filp.
432 */
433int call_usermodehelper_stdinpipe(struct subprocess_info *sub_info,
434 struct file **filp)
435{
436 struct file *f;
437
438 f = create_write_pipe(0);
439 if (IS_ERR(f))
440 return PTR_ERR(f);
441 *filp = f;
442
443 f = create_read_pipe(f, 0);
444 if (IS_ERR(f)) {
445 free_write_pipe(*filp);
446 return PTR_ERR(f);
447 }
448 sub_info->stdin = f;
449
450 return 0;
451}
452EXPORT_SYMBOL(call_usermodehelper_stdinpipe);
453 374
454/** 375/**
455 * call_usermodehelper_exec - start a usermode application 376 * call_usermodehelper_exec - start a usermode application
@@ -469,9 +390,6 @@ int call_usermodehelper_exec(struct subprocess_info *sub_info,
469 DECLARE_COMPLETION_ONSTACK(done); 390 DECLARE_COMPLETION_ONSTACK(done);
470 int retval = 0; 391 int retval = 0;
471 392
472 BUG_ON(atomic_read(&sub_info->cred->usage) != 1);
473 validate_creds(sub_info->cred);
474
475 helper_lock(); 393 helper_lock();
476 if (sub_info->path[0] == '\0') 394 if (sub_info->path[0] == '\0')
477 goto out; 395 goto out;
@@ -498,41 +416,6 @@ unlock:
498} 416}
499EXPORT_SYMBOL(call_usermodehelper_exec); 417EXPORT_SYMBOL(call_usermodehelper_exec);
500 418
501/**
502 * call_usermodehelper_pipe - call a usermode helper process with a pipe stdin
503 * @path: path to usermode executable
504 * @argv: arg vector for process
505 * @envp: environment for process
506 * @filp: set to the write-end of a pipe
507 *
508 * This is a simple wrapper which executes a usermode-helper function
509 * with a pipe as stdin. It is implemented entirely in terms of
510 * lower-level call_usermodehelper_* functions.
511 */
512int call_usermodehelper_pipe(char *path, char **argv, char **envp,
513 struct file **filp)
514{
515 struct subprocess_info *sub_info;
516 int ret;
517
518 sub_info = call_usermodehelper_setup(path, argv, envp, GFP_KERNEL);
519 if (sub_info == NULL)
520 return -ENOMEM;
521
522 ret = call_usermodehelper_stdinpipe(sub_info, filp);
523 if (ret < 0) {
524 call_usermodehelper_freeinfo(sub_info);
525 return ret;
526 }
527
528 ret = call_usermodehelper_exec(sub_info, UMH_WAIT_EXEC);
529 if (ret < 0) /* Failed to execute helper, close pipe */
530 filp_close(*filp, NULL);
531
532 return ret;
533}
534EXPORT_SYMBOL(call_usermodehelper_pipe);
535
536void __init usermodehelper_init(void) 419void __init usermodehelper_init(void)
537{ 420{
538 khelper_wq = create_singlethread_workqueue("khelper"); 421 khelper_wq = create_singlethread_workqueue("khelper");
diff --git a/kernel/mutex.c b/kernel/mutex.c
index 632f04c57d82..4c0b7b3e6d2e 100644
--- a/kernel/mutex.c
+++ b/kernel/mutex.c
@@ -172,6 +172,13 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
172 struct thread_info *owner; 172 struct thread_info *owner;
173 173
174 /* 174 /*
175 * If we own the BKL, then don't spin. The owner of
176 * the mutex might be waiting on us to release the BKL.
177 */
178 if (unlikely(current->lock_depth >= 0))
179 break;
180
181 /*
175 * If there's an owner, wait for it to either 182 * If there's an owner, wait for it to either
176 * release the lock or go to sleep. 183 * release the lock or go to sleep.
177 */ 184 */
diff --git a/kernel/padata.c b/kernel/padata.c
index b1c9857f8402..fdd8ae609ce3 100644
--- a/kernel/padata.c
+++ b/kernel/padata.c
@@ -659,7 +659,7 @@ static int padata_cpu_callback(struct notifier_block *nfb,
659 err = __padata_add_cpu(pinst, cpu); 659 err = __padata_add_cpu(pinst, cpu);
660 mutex_unlock(&pinst->lock); 660 mutex_unlock(&pinst->lock);
661 if (err) 661 if (err)
662 return NOTIFY_BAD; 662 return notifier_from_errno(err);
663 break; 663 break;
664 664
665 case CPU_DOWN_PREPARE: 665 case CPU_DOWN_PREPARE:
@@ -670,7 +670,7 @@ static int padata_cpu_callback(struct notifier_block *nfb,
670 err = __padata_remove_cpu(pinst, cpu); 670 err = __padata_remove_cpu(pinst, cpu);
671 mutex_unlock(&pinst->lock); 671 mutex_unlock(&pinst->lock);
672 if (err) 672 if (err)
673 return NOTIFY_BAD; 673 return notifier_from_errno(err);
674 break; 674 break;
675 675
676 case CPU_UP_CANCELED: 676 case CPU_UP_CANCELED:
diff --git a/kernel/panic.c b/kernel/panic.c
index dbe13dbb057a..3b16cd93fa7d 100644
--- a/kernel/panic.c
+++ b/kernel/panic.c
@@ -87,6 +87,7 @@ NORET_TYPE void panic(const char * fmt, ...)
87 */ 87 */
88 preempt_disable(); 88 preempt_disable();
89 89
90 console_verbose();
90 bust_spinlocks(1); 91 bust_spinlocks(1);
91 va_start(args, fmt); 92 va_start(args, fmt);
92 vsnprintf(buf, sizeof(buf), fmt, args); 93 vsnprintf(buf, sizeof(buf), fmt, args);
diff --git a/kernel/perf_event.c b/kernel/perf_event.c
index a4fa381db3c2..bd7ce8ca5bb9 100644
--- a/kernel/perf_event.c
+++ b/kernel/perf_event.c
@@ -2297,11 +2297,6 @@ unlock:
2297 rcu_read_unlock(); 2297 rcu_read_unlock();
2298} 2298}
2299 2299
2300static unsigned long perf_data_size(struct perf_mmap_data *data)
2301{
2302 return data->nr_pages << (PAGE_SHIFT + data->data_order);
2303}
2304
2305#ifndef CONFIG_PERF_USE_VMALLOC 2300#ifndef CONFIG_PERF_USE_VMALLOC
2306 2301
2307/* 2302/*
@@ -2320,6 +2315,19 @@ perf_mmap_to_page(struct perf_mmap_data *data, unsigned long pgoff)
2320 return virt_to_page(data->data_pages[pgoff - 1]); 2315 return virt_to_page(data->data_pages[pgoff - 1]);
2321} 2316}
2322 2317
2318static void *perf_mmap_alloc_page(int cpu)
2319{
2320 struct page *page;
2321 int node;
2322
2323 node = (cpu == -1) ? cpu : cpu_to_node(cpu);
2324 page = alloc_pages_node(node, GFP_KERNEL | __GFP_ZERO, 0);
2325 if (!page)
2326 return NULL;
2327
2328 return page_address(page);
2329}
2330
2323static struct perf_mmap_data * 2331static struct perf_mmap_data *
2324perf_mmap_data_alloc(struct perf_event *event, int nr_pages) 2332perf_mmap_data_alloc(struct perf_event *event, int nr_pages)
2325{ 2333{
@@ -2336,17 +2344,16 @@ perf_mmap_data_alloc(struct perf_event *event, int nr_pages)
2336 if (!data) 2344 if (!data)
2337 goto fail; 2345 goto fail;
2338 2346
2339 data->user_page = (void *)get_zeroed_page(GFP_KERNEL); 2347 data->user_page = perf_mmap_alloc_page(event->cpu);
2340 if (!data->user_page) 2348 if (!data->user_page)
2341 goto fail_user_page; 2349 goto fail_user_page;
2342 2350
2343 for (i = 0; i < nr_pages; i++) { 2351 for (i = 0; i < nr_pages; i++) {
2344 data->data_pages[i] = (void *)get_zeroed_page(GFP_KERNEL); 2352 data->data_pages[i] = perf_mmap_alloc_page(event->cpu);
2345 if (!data->data_pages[i]) 2353 if (!data->data_pages[i])
2346 goto fail_data_pages; 2354 goto fail_data_pages;
2347 } 2355 }
2348 2356
2349 data->data_order = 0;
2350 data->nr_pages = nr_pages; 2357 data->nr_pages = nr_pages;
2351 2358
2352 return data; 2359 return data;
@@ -2382,6 +2389,11 @@ static void perf_mmap_data_free(struct perf_mmap_data *data)
2382 kfree(data); 2389 kfree(data);
2383} 2390}
2384 2391
2392static inline int page_order(struct perf_mmap_data *data)
2393{
2394 return 0;
2395}
2396
2385#else 2397#else
2386 2398
2387/* 2399/*
@@ -2390,10 +2402,15 @@ static void perf_mmap_data_free(struct perf_mmap_data *data)
2390 * Required for architectures that have d-cache aliasing issues. 2402 * Required for architectures that have d-cache aliasing issues.
2391 */ 2403 */
2392 2404
2405static inline int page_order(struct perf_mmap_data *data)
2406{
2407 return data->page_order;
2408}
2409
2393static struct page * 2410static struct page *
2394perf_mmap_to_page(struct perf_mmap_data *data, unsigned long pgoff) 2411perf_mmap_to_page(struct perf_mmap_data *data, unsigned long pgoff)
2395{ 2412{
2396 if (pgoff > (1UL << data->data_order)) 2413 if (pgoff > (1UL << page_order(data)))
2397 return NULL; 2414 return NULL;
2398 2415
2399 return vmalloc_to_page((void *)data->user_page + pgoff * PAGE_SIZE); 2416 return vmalloc_to_page((void *)data->user_page + pgoff * PAGE_SIZE);
@@ -2413,7 +2430,7 @@ static void perf_mmap_data_free_work(struct work_struct *work)
2413 int i, nr; 2430 int i, nr;
2414 2431
2415 data = container_of(work, struct perf_mmap_data, work); 2432 data = container_of(work, struct perf_mmap_data, work);
2416 nr = 1 << data->data_order; 2433 nr = 1 << page_order(data);
2417 2434
2418 base = data->user_page; 2435 base = data->user_page;
2419 for (i = 0; i < nr + 1; i++) 2436 for (i = 0; i < nr + 1; i++)
@@ -2452,7 +2469,7 @@ perf_mmap_data_alloc(struct perf_event *event, int nr_pages)
2452 2469
2453 data->user_page = all_buf; 2470 data->user_page = all_buf;
2454 data->data_pages[0] = all_buf + PAGE_SIZE; 2471 data->data_pages[0] = all_buf + PAGE_SIZE;
2455 data->data_order = ilog2(nr_pages); 2472 data->page_order = ilog2(nr_pages);
2456 data->nr_pages = 1; 2473 data->nr_pages = 1;
2457 2474
2458 return data; 2475 return data;
@@ -2466,6 +2483,11 @@ fail:
2466 2483
2467#endif 2484#endif
2468 2485
2486static unsigned long perf_data_size(struct perf_mmap_data *data)
2487{
2488 return data->nr_pages << (PAGE_SHIFT + page_order(data));
2489}
2490
2469static int perf_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 2491static int perf_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
2470{ 2492{
2471 struct perf_event *event = vma->vm_file->private_data; 2493 struct perf_event *event = vma->vm_file->private_data;
@@ -2506,8 +2528,6 @@ perf_mmap_data_init(struct perf_event *event, struct perf_mmap_data *data)
2506{ 2528{
2507 long max_size = perf_data_size(data); 2529 long max_size = perf_data_size(data);
2508 2530
2509 atomic_set(&data->lock, -1);
2510
2511 if (event->attr.watermark) { 2531 if (event->attr.watermark) {
2512 data->watermark = min_t(long, max_size, 2532 data->watermark = min_t(long, max_size,
2513 event->attr.wakeup_watermark); 2533 event->attr.wakeup_watermark);
@@ -2580,6 +2600,14 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma)
2580 long user_extra, extra; 2600 long user_extra, extra;
2581 int ret = 0; 2601 int ret = 0;
2582 2602
2603 /*
2604 * Don't allow mmap() of inherited per-task counters. This would
2605 * create a performance issue due to all children writing to the
2606 * same buffer.
2607 */
2608 if (event->cpu == -1 && event->attr.inherit)
2609 return -EINVAL;
2610
2583 if (!(vma->vm_flags & VM_SHARED)) 2611 if (!(vma->vm_flags & VM_SHARED))
2584 return -EINVAL; 2612 return -EINVAL;
2585 2613
@@ -2885,120 +2913,80 @@ static void perf_output_wakeup(struct perf_output_handle *handle)
2885} 2913}
2886 2914
2887/* 2915/*
2888 * Curious locking construct.
2889 *
2890 * We need to ensure a later event_id doesn't publish a head when a former 2916 * We need to ensure a later event_id doesn't publish a head when a former
2891 * event_id isn't done writing. However since we need to deal with NMIs we 2917 * event isn't done writing. However since we need to deal with NMIs we
2892 * cannot fully serialize things. 2918 * cannot fully serialize things.
2893 * 2919 *
2894 * What we do is serialize between CPUs so we only have to deal with NMI
2895 * nesting on a single CPU.
2896 *
2897 * We only publish the head (and generate a wakeup) when the outer-most 2920 * We only publish the head (and generate a wakeup) when the outer-most
2898 * event_id completes. 2921 * event completes.
2899 */ 2922 */
2900static void perf_output_lock(struct perf_output_handle *handle) 2923static void perf_output_get_handle(struct perf_output_handle *handle)
2901{ 2924{
2902 struct perf_mmap_data *data = handle->data; 2925 struct perf_mmap_data *data = handle->data;
2903 int cur, cpu = get_cpu();
2904
2905 handle->locked = 0;
2906 2926
2907 for (;;) { 2927 preempt_disable();
2908 cur = atomic_cmpxchg(&data->lock, -1, cpu); 2928 local_inc(&data->nest);
2909 if (cur == -1) { 2929 handle->wakeup = local_read(&data->wakeup);
2910 handle->locked = 1;
2911 break;
2912 }
2913 if (cur == cpu)
2914 break;
2915
2916 cpu_relax();
2917 }
2918} 2930}
2919 2931
2920static void perf_output_unlock(struct perf_output_handle *handle) 2932static void perf_output_put_handle(struct perf_output_handle *handle)
2921{ 2933{
2922 struct perf_mmap_data *data = handle->data; 2934 struct perf_mmap_data *data = handle->data;
2923 unsigned long head; 2935 unsigned long head;
2924 int cpu;
2925
2926 data->done_head = data->head;
2927
2928 if (!handle->locked)
2929 goto out;
2930 2936
2931again: 2937again:
2932 /* 2938 head = local_read(&data->head);
2933 * The xchg implies a full barrier that ensures all writes are done
2934 * before we publish the new head, matched by a rmb() in userspace when
2935 * reading this position.
2936 */
2937 while ((head = atomic_long_xchg(&data->done_head, 0)))
2938 data->user_page->data_head = head;
2939 2939
2940 /* 2940 /*
2941 * NMI can happen here, which means we can miss a done_head update. 2941 * IRQ/NMI can happen here, which means we can miss a head update.
2942 */ 2942 */
2943 2943
2944 cpu = atomic_xchg(&data->lock, -1); 2944 if (!local_dec_and_test(&data->nest))
2945 WARN_ON_ONCE(cpu != smp_processor_id()); 2945 goto out;
2946 2946
2947 /* 2947 /*
2948 * Therefore we have to validate we did not indeed do so. 2948 * Publish the known good head. Rely on the full barrier implied
2949 * by atomic_dec_and_test() order the data->head read and this
2950 * write.
2949 */ 2951 */
2950 if (unlikely(atomic_long_read(&data->done_head))) { 2952 data->user_page->data_head = head;
2951 /*
2952 * Since we had it locked, we can lock it again.
2953 */
2954 while (atomic_cmpxchg(&data->lock, -1, cpu) != -1)
2955 cpu_relax();
2956 2953
2954 /*
2955 * Now check if we missed an update, rely on the (compiler)
2956 * barrier in atomic_dec_and_test() to re-read data->head.
2957 */
2958 if (unlikely(head != local_read(&data->head))) {
2959 local_inc(&data->nest);
2957 goto again; 2960 goto again;
2958 } 2961 }
2959 2962
2960 if (atomic_xchg(&data->wakeup, 0)) 2963 if (handle->wakeup != local_read(&data->wakeup))
2961 perf_output_wakeup(handle); 2964 perf_output_wakeup(handle);
2962out: 2965
2963 put_cpu(); 2966 out:
2967 preempt_enable();
2964} 2968}
2965 2969
2966void perf_output_copy(struct perf_output_handle *handle, 2970__always_inline void perf_output_copy(struct perf_output_handle *handle,
2967 const void *buf, unsigned int len) 2971 const void *buf, unsigned int len)
2968{ 2972{
2969 unsigned int pages_mask;
2970 unsigned long offset;
2971 unsigned int size;
2972 void **pages;
2973
2974 offset = handle->offset;
2975 pages_mask = handle->data->nr_pages - 1;
2976 pages = handle->data->data_pages;
2977
2978 do { 2973 do {
2979 unsigned long page_offset; 2974 unsigned long size = min_t(unsigned long, handle->size, len);
2980 unsigned long page_size;
2981 int nr;
2982 2975
2983 nr = (offset >> PAGE_SHIFT) & pages_mask; 2976 memcpy(handle->addr, buf, size);
2984 page_size = 1UL << (handle->data->data_order + PAGE_SHIFT);
2985 page_offset = offset & (page_size - 1);
2986 size = min_t(unsigned int, page_size - page_offset, len);
2987 2977
2988 memcpy(pages[nr] + page_offset, buf, size); 2978 len -= size;
2979 handle->addr += size;
2980 handle->size -= size;
2981 if (!handle->size) {
2982 struct perf_mmap_data *data = handle->data;
2989 2983
2990 len -= size; 2984 handle->page++;
2991 buf += size; 2985 handle->page &= data->nr_pages - 1;
2992 offset += size; 2986 handle->addr = data->data_pages[handle->page];
2987 handle->size = PAGE_SIZE << page_order(data);
2988 }
2993 } while (len); 2989 } while (len);
2994
2995 handle->offset = offset;
2996
2997 /*
2998 * Check we didn't copy past our reservation window, taking the
2999 * possible unsigned int wrap into account.
3000 */
3001 WARN_ON_ONCE(((long)(handle->head - handle->offset)) < 0);
3002} 2990}
3003 2991
3004int perf_output_begin(struct perf_output_handle *handle, 2992int perf_output_begin(struct perf_output_handle *handle,
@@ -3036,13 +3024,13 @@ int perf_output_begin(struct perf_output_handle *handle,
3036 handle->sample = sample; 3024 handle->sample = sample;
3037 3025
3038 if (!data->nr_pages) 3026 if (!data->nr_pages)
3039 goto fail; 3027 goto out;
3040 3028
3041 have_lost = atomic_read(&data->lost); 3029 have_lost = local_read(&data->lost);
3042 if (have_lost) 3030 if (have_lost)
3043 size += sizeof(lost_event); 3031 size += sizeof(lost_event);
3044 3032
3045 perf_output_lock(handle); 3033 perf_output_get_handle(handle);
3046 3034
3047 do { 3035 do {
3048 /* 3036 /*
@@ -3052,24 +3040,28 @@ int perf_output_begin(struct perf_output_handle *handle,
3052 */ 3040 */
3053 tail = ACCESS_ONCE(data->user_page->data_tail); 3041 tail = ACCESS_ONCE(data->user_page->data_tail);
3054 smp_rmb(); 3042 smp_rmb();
3055 offset = head = atomic_long_read(&data->head); 3043 offset = head = local_read(&data->head);
3056 head += size; 3044 head += size;
3057 if (unlikely(!perf_output_space(data, tail, offset, head))) 3045 if (unlikely(!perf_output_space(data, tail, offset, head)))
3058 goto fail; 3046 goto fail;
3059 } while (atomic_long_cmpxchg(&data->head, offset, head) != offset); 3047 } while (local_cmpxchg(&data->head, offset, head) != offset);
3060 3048
3061 handle->offset = offset; 3049 if (head - local_read(&data->wakeup) > data->watermark)
3062 handle->head = head; 3050 local_add(data->watermark, &data->wakeup);
3063 3051
3064 if (head - tail > data->watermark) 3052 handle->page = offset >> (PAGE_SHIFT + page_order(data));
3065 atomic_set(&data->wakeup, 1); 3053 handle->page &= data->nr_pages - 1;
3054 handle->size = offset & ((PAGE_SIZE << page_order(data)) - 1);
3055 handle->addr = data->data_pages[handle->page];
3056 handle->addr += handle->size;
3057 handle->size = (PAGE_SIZE << page_order(data)) - handle->size;
3066 3058
3067 if (have_lost) { 3059 if (have_lost) {
3068 lost_event.header.type = PERF_RECORD_LOST; 3060 lost_event.header.type = PERF_RECORD_LOST;
3069 lost_event.header.misc = 0; 3061 lost_event.header.misc = 0;
3070 lost_event.header.size = sizeof(lost_event); 3062 lost_event.header.size = sizeof(lost_event);
3071 lost_event.id = event->id; 3063 lost_event.id = event->id;
3072 lost_event.lost = atomic_xchg(&data->lost, 0); 3064 lost_event.lost = local_xchg(&data->lost, 0);
3073 3065
3074 perf_output_put(handle, lost_event); 3066 perf_output_put(handle, lost_event);
3075 } 3067 }
@@ -3077,8 +3069,8 @@ int perf_output_begin(struct perf_output_handle *handle,
3077 return 0; 3069 return 0;
3078 3070
3079fail: 3071fail:
3080 atomic_inc(&data->lost); 3072 local_inc(&data->lost);
3081 perf_output_unlock(handle); 3073 perf_output_put_handle(handle);
3082out: 3074out:
3083 rcu_read_unlock(); 3075 rcu_read_unlock();
3084 3076
@@ -3093,14 +3085,14 @@ void perf_output_end(struct perf_output_handle *handle)
3093 int wakeup_events = event->attr.wakeup_events; 3085 int wakeup_events = event->attr.wakeup_events;
3094 3086
3095 if (handle->sample && wakeup_events) { 3087 if (handle->sample && wakeup_events) {
3096 int events = atomic_inc_return(&data->events); 3088 int events = local_inc_return(&data->events);
3097 if (events >= wakeup_events) { 3089 if (events >= wakeup_events) {
3098 atomic_sub(wakeup_events, &data->events); 3090 local_sub(wakeup_events, &data->events);
3099 atomic_set(&data->wakeup, 1); 3091 local_inc(&data->wakeup);
3100 } 3092 }
3101 } 3093 }
3102 3094
3103 perf_output_unlock(handle); 3095 perf_output_put_handle(handle);
3104 rcu_read_unlock(); 3096 rcu_read_unlock();
3105} 3097}
3106 3098
@@ -3436,22 +3428,13 @@ static void perf_event_task_output(struct perf_event *event,
3436{ 3428{
3437 struct perf_output_handle handle; 3429 struct perf_output_handle handle;
3438 struct task_struct *task = task_event->task; 3430 struct task_struct *task = task_event->task;
3439 unsigned long flags;
3440 int size, ret; 3431 int size, ret;
3441 3432
3442 /*
3443 * If this CPU attempts to acquire an rq lock held by a CPU spinning
3444 * in perf_output_lock() from interrupt context, it's game over.
3445 */
3446 local_irq_save(flags);
3447
3448 size = task_event->event_id.header.size; 3433 size = task_event->event_id.header.size;
3449 ret = perf_output_begin(&handle, event, size, 0, 0); 3434 ret = perf_output_begin(&handle, event, size, 0, 0);
3450 3435
3451 if (ret) { 3436 if (ret)
3452 local_irq_restore(flags);
3453 return; 3437 return;
3454 }
3455 3438
3456 task_event->event_id.pid = perf_event_pid(event, task); 3439 task_event->event_id.pid = perf_event_pid(event, task);
3457 task_event->event_id.ppid = perf_event_pid(event, current); 3440 task_event->event_id.ppid = perf_event_pid(event, current);
@@ -3462,7 +3445,6 @@ static void perf_event_task_output(struct perf_event *event,
3462 perf_output_put(&handle, task_event->event_id); 3445 perf_output_put(&handle, task_event->event_id);
3463 3446
3464 perf_output_end(&handle); 3447 perf_output_end(&handle);
3465 local_irq_restore(flags);
3466} 3448}
3467 3449
3468static int perf_event_task_match(struct perf_event *event) 3450static int perf_event_task_match(struct perf_event *event)
@@ -4020,9 +4002,6 @@ static void perf_swevent_add(struct perf_event *event, u64 nr,
4020 perf_swevent_overflow(event, 0, nmi, data, regs); 4002 perf_swevent_overflow(event, 0, nmi, data, regs);
4021} 4003}
4022 4004
4023static int perf_tp_event_match(struct perf_event *event,
4024 struct perf_sample_data *data);
4025
4026static int perf_exclude_event(struct perf_event *event, 4005static int perf_exclude_event(struct perf_event *event,
4027 struct pt_regs *regs) 4006 struct pt_regs *regs)
4028{ 4007{
@@ -4052,10 +4031,6 @@ static int perf_swevent_match(struct perf_event *event,
4052 if (perf_exclude_event(event, regs)) 4031 if (perf_exclude_event(event, regs))
4053 return 0; 4032 return 0;
4054 4033
4055 if (event->attr.type == PERF_TYPE_TRACEPOINT &&
4056 !perf_tp_event_match(event, data))
4057 return 0;
4058
4059 return 1; 4034 return 1;
4060} 4035}
4061 4036
@@ -4066,19 +4041,46 @@ static inline u64 swevent_hash(u64 type, u32 event_id)
4066 return hash_64(val, SWEVENT_HLIST_BITS); 4041 return hash_64(val, SWEVENT_HLIST_BITS);
4067} 4042}
4068 4043
4069static struct hlist_head * 4044static inline struct hlist_head *
4070find_swevent_head(struct perf_cpu_context *ctx, u64 type, u32 event_id) 4045__find_swevent_head(struct swevent_hlist *hlist, u64 type, u32 event_id)
4071{ 4046{
4072 u64 hash; 4047 u64 hash = swevent_hash(type, event_id);
4073 struct swevent_hlist *hlist;
4074 4048
4075 hash = swevent_hash(type, event_id); 4049 return &hlist->heads[hash];
4050}
4051
4052/* For the read side: events when they trigger */
4053static inline struct hlist_head *
4054find_swevent_head_rcu(struct perf_cpu_context *ctx, u64 type, u32 event_id)
4055{
4056 struct swevent_hlist *hlist;
4076 4057
4077 hlist = rcu_dereference(ctx->swevent_hlist); 4058 hlist = rcu_dereference(ctx->swevent_hlist);
4078 if (!hlist) 4059 if (!hlist)
4079 return NULL; 4060 return NULL;
4080 4061
4081 return &hlist->heads[hash]; 4062 return __find_swevent_head(hlist, type, event_id);
4063}
4064
4065/* For the event head insertion and removal in the hlist */
4066static inline struct hlist_head *
4067find_swevent_head(struct perf_cpu_context *ctx, struct perf_event *event)
4068{
4069 struct swevent_hlist *hlist;
4070 u32 event_id = event->attr.config;
4071 u64 type = event->attr.type;
4072
4073 /*
4074 * Event scheduling is always serialized against hlist allocation
4075 * and release. Which makes the protected version suitable here.
4076 * The context lock guarantees that.
4077 */
4078 hlist = rcu_dereference_protected(ctx->swevent_hlist,
4079 lockdep_is_held(&event->ctx->lock));
4080 if (!hlist)
4081 return NULL;
4082
4083 return __find_swevent_head(hlist, type, event_id);
4082} 4084}
4083 4085
4084static void do_perf_sw_event(enum perf_type_id type, u32 event_id, 4086static void do_perf_sw_event(enum perf_type_id type, u32 event_id,
@@ -4095,7 +4097,7 @@ static void do_perf_sw_event(enum perf_type_id type, u32 event_id,
4095 4097
4096 rcu_read_lock(); 4098 rcu_read_lock();
4097 4099
4098 head = find_swevent_head(cpuctx, type, event_id); 4100 head = find_swevent_head_rcu(cpuctx, type, event_id);
4099 4101
4100 if (!head) 4102 if (!head)
4101 goto end; 4103 goto end;
@@ -4110,7 +4112,7 @@ end:
4110 4112
4111int perf_swevent_get_recursion_context(void) 4113int perf_swevent_get_recursion_context(void)
4112{ 4114{
4113 struct perf_cpu_context *cpuctx = &get_cpu_var(perf_cpu_context); 4115 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
4114 int rctx; 4116 int rctx;
4115 4117
4116 if (in_nmi()) 4118 if (in_nmi())
@@ -4122,10 +4124,8 @@ int perf_swevent_get_recursion_context(void)
4122 else 4124 else
4123 rctx = 0; 4125 rctx = 0;
4124 4126
4125 if (cpuctx->recursion[rctx]) { 4127 if (cpuctx->recursion[rctx])
4126 put_cpu_var(perf_cpu_context);
4127 return -1; 4128 return -1;
4128 }
4129 4129
4130 cpuctx->recursion[rctx]++; 4130 cpuctx->recursion[rctx]++;
4131 barrier(); 4131 barrier();
@@ -4139,7 +4139,6 @@ void perf_swevent_put_recursion_context(int rctx)
4139 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context); 4139 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
4140 barrier(); 4140 barrier();
4141 cpuctx->recursion[rctx]--; 4141 cpuctx->recursion[rctx]--;
4142 put_cpu_var(perf_cpu_context);
4143} 4142}
4144EXPORT_SYMBOL_GPL(perf_swevent_put_recursion_context); 4143EXPORT_SYMBOL_GPL(perf_swevent_put_recursion_context);
4145 4144
@@ -4150,6 +4149,7 @@ void __perf_sw_event(u32 event_id, u64 nr, int nmi,
4150 struct perf_sample_data data; 4149 struct perf_sample_data data;
4151 int rctx; 4150 int rctx;
4152 4151
4152 preempt_disable_notrace();
4153 rctx = perf_swevent_get_recursion_context(); 4153 rctx = perf_swevent_get_recursion_context();
4154 if (rctx < 0) 4154 if (rctx < 0)
4155 return; 4155 return;
@@ -4159,6 +4159,7 @@ void __perf_sw_event(u32 event_id, u64 nr, int nmi,
4159 do_perf_sw_event(PERF_TYPE_SOFTWARE, event_id, nr, nmi, &data, regs); 4159 do_perf_sw_event(PERF_TYPE_SOFTWARE, event_id, nr, nmi, &data, regs);
4160 4160
4161 perf_swevent_put_recursion_context(rctx); 4161 perf_swevent_put_recursion_context(rctx);
4162 preempt_enable_notrace();
4162} 4163}
4163 4164
4164static void perf_swevent_read(struct perf_event *event) 4165static void perf_swevent_read(struct perf_event *event)
@@ -4178,7 +4179,7 @@ static int perf_swevent_enable(struct perf_event *event)
4178 perf_swevent_set_period(event); 4179 perf_swevent_set_period(event);
4179 } 4180 }
4180 4181
4181 head = find_swevent_head(cpuctx, event->attr.type, event->attr.config); 4182 head = find_swevent_head(cpuctx, event);
4182 if (WARN_ON_ONCE(!head)) 4183 if (WARN_ON_ONCE(!head))
4183 return -EINVAL; 4184 return -EINVAL;
4184 4185
@@ -4366,6 +4367,14 @@ static const struct pmu perf_ops_task_clock = {
4366 .read = task_clock_perf_event_read, 4367 .read = task_clock_perf_event_read,
4367}; 4368};
4368 4369
4370/* Deref the hlist from the update side */
4371static inline struct swevent_hlist *
4372swevent_hlist_deref(struct perf_cpu_context *cpuctx)
4373{
4374 return rcu_dereference_protected(cpuctx->swevent_hlist,
4375 lockdep_is_held(&cpuctx->hlist_mutex));
4376}
4377
4369static void swevent_hlist_release_rcu(struct rcu_head *rcu_head) 4378static void swevent_hlist_release_rcu(struct rcu_head *rcu_head)
4370{ 4379{
4371 struct swevent_hlist *hlist; 4380 struct swevent_hlist *hlist;
@@ -4376,12 +4385,11 @@ static void swevent_hlist_release_rcu(struct rcu_head *rcu_head)
4376 4385
4377static void swevent_hlist_release(struct perf_cpu_context *cpuctx) 4386static void swevent_hlist_release(struct perf_cpu_context *cpuctx)
4378{ 4387{
4379 struct swevent_hlist *hlist; 4388 struct swevent_hlist *hlist = swevent_hlist_deref(cpuctx);
4380 4389
4381 if (!cpuctx->swevent_hlist) 4390 if (!hlist)
4382 return; 4391 return;
4383 4392
4384 hlist = cpuctx->swevent_hlist;
4385 rcu_assign_pointer(cpuctx->swevent_hlist, NULL); 4393 rcu_assign_pointer(cpuctx->swevent_hlist, NULL);
4386 call_rcu(&hlist->rcu_head, swevent_hlist_release_rcu); 4394 call_rcu(&hlist->rcu_head, swevent_hlist_release_rcu);
4387} 4395}
@@ -4418,7 +4426,7 @@ static int swevent_hlist_get_cpu(struct perf_event *event, int cpu)
4418 4426
4419 mutex_lock(&cpuctx->hlist_mutex); 4427 mutex_lock(&cpuctx->hlist_mutex);
4420 4428
4421 if (!cpuctx->swevent_hlist && cpu_online(cpu)) { 4429 if (!swevent_hlist_deref(cpuctx) && cpu_online(cpu)) {
4422 struct swevent_hlist *hlist; 4430 struct swevent_hlist *hlist;
4423 4431
4424 hlist = kzalloc(sizeof(*hlist), GFP_KERNEL); 4432 hlist = kzalloc(sizeof(*hlist), GFP_KERNEL);
@@ -4467,10 +4475,46 @@ static int swevent_hlist_get(struct perf_event *event)
4467 4475
4468#ifdef CONFIG_EVENT_TRACING 4476#ifdef CONFIG_EVENT_TRACING
4469 4477
4470void perf_tp_event(int event_id, u64 addr, u64 count, void *record, 4478static const struct pmu perf_ops_tracepoint = {
4471 int entry_size, struct pt_regs *regs) 4479 .enable = perf_trace_enable,
4480 .disable = perf_trace_disable,
4481 .read = perf_swevent_read,
4482 .unthrottle = perf_swevent_unthrottle,
4483};
4484
4485static int perf_tp_filter_match(struct perf_event *event,
4486 struct perf_sample_data *data)
4487{
4488 void *record = data->raw->data;
4489
4490 if (likely(!event->filter) || filter_match_preds(event->filter, record))
4491 return 1;
4492 return 0;
4493}
4494
4495static int perf_tp_event_match(struct perf_event *event,
4496 struct perf_sample_data *data,
4497 struct pt_regs *regs)
4498{
4499 /*
4500 * All tracepoints are from kernel-space.
4501 */
4502 if (event->attr.exclude_kernel)
4503 return 0;
4504
4505 if (!perf_tp_filter_match(event, data))
4506 return 0;
4507
4508 return 1;
4509}
4510
4511void perf_tp_event(u64 addr, u64 count, void *record, int entry_size,
4512 struct pt_regs *regs, struct hlist_head *head)
4472{ 4513{
4473 struct perf_sample_data data; 4514 struct perf_sample_data data;
4515 struct perf_event *event;
4516 struct hlist_node *node;
4517
4474 struct perf_raw_record raw = { 4518 struct perf_raw_record raw = {
4475 .size = entry_size, 4519 .size = entry_size,
4476 .data = record, 4520 .data = record,
@@ -4479,26 +4523,18 @@ void perf_tp_event(int event_id, u64 addr, u64 count, void *record,
4479 perf_sample_data_init(&data, addr); 4523 perf_sample_data_init(&data, addr);
4480 data.raw = &raw; 4524 data.raw = &raw;
4481 4525
4482 /* Trace events already protected against recursion */ 4526 rcu_read_lock();
4483 do_perf_sw_event(PERF_TYPE_TRACEPOINT, event_id, count, 1, 4527 hlist_for_each_entry_rcu(event, node, head, hlist_entry) {
4484 &data, regs); 4528 if (perf_tp_event_match(event, &data, regs))
4529 perf_swevent_add(event, count, 1, &data, regs);
4530 }
4531 rcu_read_unlock();
4485} 4532}
4486EXPORT_SYMBOL_GPL(perf_tp_event); 4533EXPORT_SYMBOL_GPL(perf_tp_event);
4487 4534
4488static int perf_tp_event_match(struct perf_event *event,
4489 struct perf_sample_data *data)
4490{
4491 void *record = data->raw->data;
4492
4493 if (likely(!event->filter) || filter_match_preds(event->filter, record))
4494 return 1;
4495 return 0;
4496}
4497
4498static void tp_perf_event_destroy(struct perf_event *event) 4535static void tp_perf_event_destroy(struct perf_event *event)
4499{ 4536{
4500 perf_trace_disable(event->attr.config); 4537 perf_trace_destroy(event);
4501 swevent_hlist_put(event);
4502} 4538}
4503 4539
4504static const struct pmu *tp_perf_event_init(struct perf_event *event) 4540static const struct pmu *tp_perf_event_init(struct perf_event *event)
@@ -4514,17 +4550,13 @@ static const struct pmu *tp_perf_event_init(struct perf_event *event)
4514 !capable(CAP_SYS_ADMIN)) 4550 !capable(CAP_SYS_ADMIN))
4515 return ERR_PTR(-EPERM); 4551 return ERR_PTR(-EPERM);
4516 4552
4517 if (perf_trace_enable(event->attr.config)) 4553 err = perf_trace_init(event);
4554 if (err)
4518 return NULL; 4555 return NULL;
4519 4556
4520 event->destroy = tp_perf_event_destroy; 4557 event->destroy = tp_perf_event_destroy;
4521 err = swevent_hlist_get(event);
4522 if (err) {
4523 perf_trace_disable(event->attr.config);
4524 return ERR_PTR(err);
4525 }
4526 4558
4527 return &perf_ops_generic; 4559 return &perf_ops_tracepoint;
4528} 4560}
4529 4561
4530static int perf_event_set_filter(struct perf_event *event, void __user *arg) 4562static int perf_event_set_filter(struct perf_event *event, void __user *arg)
@@ -4552,12 +4584,6 @@ static void perf_event_free_filter(struct perf_event *event)
4552 4584
4553#else 4585#else
4554 4586
4555static int perf_tp_event_match(struct perf_event *event,
4556 struct perf_sample_data *data)
4557{
4558 return 1;
4559}
4560
4561static const struct pmu *tp_perf_event_init(struct perf_event *event) 4587static const struct pmu *tp_perf_event_init(struct perf_event *event)
4562{ 4588{
4563 return NULL; 4589 return NULL;
@@ -4894,6 +4920,13 @@ static int perf_event_set_output(struct perf_event *event, int output_fd)
4894 int fput_needed = 0; 4920 int fput_needed = 0;
4895 int ret = -EINVAL; 4921 int ret = -EINVAL;
4896 4922
4923 /*
4924 * Don't allow output of inherited per-task events. This would
4925 * create performance issues due to cross cpu access.
4926 */
4927 if (event->cpu == -1 && event->attr.inherit)
4928 return -EINVAL;
4929
4897 if (!output_fd) 4930 if (!output_fd)
4898 goto set; 4931 goto set;
4899 4932
@@ -4914,6 +4947,18 @@ static int perf_event_set_output(struct perf_event *event, int output_fd)
4914 if (event->data) 4947 if (event->data)
4915 goto out; 4948 goto out;
4916 4949
4950 /*
4951 * Don't allow cross-cpu buffers
4952 */
4953 if (output_event->cpu != event->cpu)
4954 goto out;
4955
4956 /*
4957 * If its not a per-cpu buffer, it must be the same task.
4958 */
4959 if (output_event->cpu == -1 && output_event->ctx != event->ctx)
4960 goto out;
4961
4917 atomic_long_inc(&output_file->f_count); 4962 atomic_long_inc(&output_file->f_count);
4918 4963
4919set: 4964set:
@@ -4954,8 +4999,8 @@ SYSCALL_DEFINE5(perf_event_open,
4954 struct perf_event_context *ctx; 4999 struct perf_event_context *ctx;
4955 struct file *event_file = NULL; 5000 struct file *event_file = NULL;
4956 struct file *group_file = NULL; 5001 struct file *group_file = NULL;
5002 int event_fd;
4957 int fput_needed = 0; 5003 int fput_needed = 0;
4958 int fput_needed2 = 0;
4959 int err; 5004 int err;
4960 5005
4961 /* for future expandability... */ 5006 /* for future expandability... */
@@ -4976,12 +5021,18 @@ SYSCALL_DEFINE5(perf_event_open,
4976 return -EINVAL; 5021 return -EINVAL;
4977 } 5022 }
4978 5023
5024 event_fd = get_unused_fd_flags(O_RDWR);
5025 if (event_fd < 0)
5026 return event_fd;
5027
4979 /* 5028 /*
4980 * Get the target context (task or percpu): 5029 * Get the target context (task or percpu):
4981 */ 5030 */
4982 ctx = find_get_context(pid, cpu); 5031 ctx = find_get_context(pid, cpu);
4983 if (IS_ERR(ctx)) 5032 if (IS_ERR(ctx)) {
4984 return PTR_ERR(ctx); 5033 err = PTR_ERR(ctx);
5034 goto err_fd;
5035 }
4985 5036
4986 /* 5037 /*
4987 * Look up the group leader (we will attach this event to it): 5038 * Look up the group leader (we will attach this event to it):
@@ -5021,13 +5072,11 @@ SYSCALL_DEFINE5(perf_event_open,
5021 if (IS_ERR(event)) 5072 if (IS_ERR(event))
5022 goto err_put_context; 5073 goto err_put_context;
5023 5074
5024 err = anon_inode_getfd("[perf_event]", &perf_fops, event, O_RDWR); 5075 event_file = anon_inode_getfile("[perf_event]", &perf_fops, event, O_RDWR);
5025 if (err < 0) 5076 if (IS_ERR(event_file)) {
5026 goto err_free_put_context; 5077 err = PTR_ERR(event_file);
5027
5028 event_file = fget_light(err, &fput_needed2);
5029 if (!event_file)
5030 goto err_free_put_context; 5078 goto err_free_put_context;
5079 }
5031 5080
5032 if (flags & PERF_FLAG_FD_OUTPUT) { 5081 if (flags & PERF_FLAG_FD_OUTPUT) {
5033 err = perf_event_set_output(event, group_fd); 5082 err = perf_event_set_output(event, group_fd);
@@ -5048,19 +5097,19 @@ SYSCALL_DEFINE5(perf_event_open,
5048 list_add_tail(&event->owner_entry, &current->perf_event_list); 5097 list_add_tail(&event->owner_entry, &current->perf_event_list);
5049 mutex_unlock(&current->perf_event_mutex); 5098 mutex_unlock(&current->perf_event_mutex);
5050 5099
5051err_fput_free_put_context: 5100 fput_light(group_file, fput_needed);
5052 fput_light(event_file, fput_needed2); 5101 fd_install(event_fd, event_file);
5102 return event_fd;
5053 5103
5104err_fput_free_put_context:
5105 fput(event_file);
5054err_free_put_context: 5106err_free_put_context:
5055 if (err < 0) 5107 free_event(event);
5056 free_event(event);
5057
5058err_put_context: 5108err_put_context:
5059 if (err < 0)
5060 put_ctx(ctx);
5061
5062 fput_light(group_file, fput_needed); 5109 fput_light(group_file, fput_needed);
5063 5110 put_ctx(ctx);
5111err_fd:
5112 put_unused_fd(event_fd);
5064 return err; 5113 return err;
5065} 5114}
5066 5115
diff --git a/kernel/pid.c b/kernel/pid.c
index aebb30d9c233..e9fd8c132d26 100644
--- a/kernel/pid.c
+++ b/kernel/pid.c
@@ -513,6 +513,13 @@ void __init pidhash_init(void)
513 513
514void __init pidmap_init(void) 514void __init pidmap_init(void)
515{ 515{
516 /* bump default and minimum pid_max based on number of cpus */
517 pid_max = min(pid_max_max, max_t(int, pid_max,
518 PIDS_PER_CPU_DEFAULT * num_possible_cpus()));
519 pid_max_min = max_t(int, pid_max_min,
520 PIDS_PER_CPU_MIN * num_possible_cpus());
521 pr_info("pid_max: default: %u minimum: %u\n", pid_max, pid_max_min);
522
516 init_pid_ns.pidmap[0].page = kzalloc(PAGE_SIZE, GFP_KERNEL); 523 init_pid_ns.pidmap[0].page = kzalloc(PAGE_SIZE, GFP_KERNEL);
517 /* Reserve PID 0. We never call free_pidmap(0) */ 524 /* Reserve PID 0. We never call free_pidmap(0) */
518 set_bit(0, init_pid_ns.pidmap[0].page); 525 set_bit(0, init_pid_ns.pidmap[0].page);
diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
index 00bb252f29a2..9829646d399c 100644
--- a/kernel/posix-cpu-timers.c
+++ b/kernel/posix-cpu-timers.c
@@ -363,7 +363,7 @@ int posix_cpu_clock_get(const clockid_t which_clock, struct timespec *tp)
363 } 363 }
364 } else { 364 } else {
365 read_lock(&tasklist_lock); 365 read_lock(&tasklist_lock);
366 if (thread_group_leader(p) && p->signal) { 366 if (thread_group_leader(p) && p->sighand) {
367 error = 367 error =
368 cpu_clock_sample_group(which_clock, 368 cpu_clock_sample_group(which_clock,
369 p, &rtn); 369 p, &rtn);
@@ -439,7 +439,7 @@ int posix_cpu_timer_del(struct k_itimer *timer)
439 439
440 if (likely(p != NULL)) { 440 if (likely(p != NULL)) {
441 read_lock(&tasklist_lock); 441 read_lock(&tasklist_lock);
442 if (unlikely(p->signal == NULL)) { 442 if (unlikely(p->sighand == NULL)) {
443 /* 443 /*
444 * We raced with the reaping of the task. 444 * We raced with the reaping of the task.
445 * The deletion should have cleared us off the list. 445 * The deletion should have cleared us off the list.
@@ -691,10 +691,10 @@ int posix_cpu_timer_set(struct k_itimer *timer, int flags,
691 read_lock(&tasklist_lock); 691 read_lock(&tasklist_lock);
692 /* 692 /*
693 * We need the tasklist_lock to protect against reaping that 693 * We need the tasklist_lock to protect against reaping that
694 * clears p->signal. If p has just been reaped, we can no 694 * clears p->sighand. If p has just been reaped, we can no
695 * longer get any information about it at all. 695 * longer get any information about it at all.
696 */ 696 */
697 if (unlikely(p->signal == NULL)) { 697 if (unlikely(p->sighand == NULL)) {
698 read_unlock(&tasklist_lock); 698 read_unlock(&tasklist_lock);
699 put_task_struct(p); 699 put_task_struct(p);
700 timer->it.cpu.task = NULL; 700 timer->it.cpu.task = NULL;
@@ -863,7 +863,7 @@ void posix_cpu_timer_get(struct k_itimer *timer, struct itimerspec *itp)
863 clear_dead = p->exit_state; 863 clear_dead = p->exit_state;
864 } else { 864 } else {
865 read_lock(&tasklist_lock); 865 read_lock(&tasklist_lock);
866 if (unlikely(p->signal == NULL)) { 866 if (unlikely(p->sighand == NULL)) {
867 /* 867 /*
868 * The process has been reaped. 868 * The process has been reaped.
869 * We can't even collect a sample any more. 869 * We can't even collect a sample any more.
@@ -1199,7 +1199,7 @@ void posix_cpu_timer_schedule(struct k_itimer *timer)
1199 spin_lock(&p->sighand->siglock); 1199 spin_lock(&p->sighand->siglock);
1200 } else { 1200 } else {
1201 read_lock(&tasklist_lock); 1201 read_lock(&tasklist_lock);
1202 if (unlikely(p->signal == NULL)) { 1202 if (unlikely(p->sighand == NULL)) {
1203 /* 1203 /*
1204 * The process has been reaped. 1204 * The process has been reaped.
1205 * We can't even collect a sample any more. 1205 * We can't even collect a sample any more.
diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c
index 00d1fda58ab6..ad723420acc3 100644
--- a/kernel/posix-timers.c
+++ b/kernel/posix-timers.c
@@ -559,14 +559,7 @@ SYSCALL_DEFINE3(timer_create, const clockid_t, which_clock,
559 new_timer->it_id = (timer_t) new_timer_id; 559 new_timer->it_id = (timer_t) new_timer_id;
560 new_timer->it_clock = which_clock; 560 new_timer->it_clock = which_clock;
561 new_timer->it_overrun = -1; 561 new_timer->it_overrun = -1;
562 error = CLOCK_DISPATCH(which_clock, timer_create, (new_timer));
563 if (error)
564 goto out;
565 562
566 /*
567 * return the timer_id now. The next step is hard to
568 * back out if there is an error.
569 */
570 if (copy_to_user(created_timer_id, 563 if (copy_to_user(created_timer_id,
571 &new_timer_id, sizeof (new_timer_id))) { 564 &new_timer_id, sizeof (new_timer_id))) {
572 error = -EFAULT; 565 error = -EFAULT;
@@ -597,6 +590,10 @@ SYSCALL_DEFINE3(timer_create, const clockid_t, which_clock,
597 new_timer->sigq->info.si_tid = new_timer->it_id; 590 new_timer->sigq->info.si_tid = new_timer->it_id;
598 new_timer->sigq->info.si_code = SI_TIMER; 591 new_timer->sigq->info.si_code = SI_TIMER;
599 592
593 error = CLOCK_DISPATCH(which_clock, timer_create, (new_timer));
594 if (error)
595 goto out;
596
600 spin_lock_irq(&current->sighand->siglock); 597 spin_lock_irq(&current->sighand->siglock);
601 new_timer->it_signal = current->signal; 598 new_timer->it_signal = current->signal;
602 list_add(&new_timer->list, &current->signal->posix_timers); 599 list_add(&new_timer->list, &current->signal->posix_timers);
diff --git a/kernel/profile.c b/kernel/profile.c
index dfadc5b729f1..b22a899934cc 100644
--- a/kernel/profile.c
+++ b/kernel/profile.c
@@ -365,14 +365,14 @@ static int __cpuinit profile_cpu_callback(struct notifier_block *info,
365 switch (action) { 365 switch (action) {
366 case CPU_UP_PREPARE: 366 case CPU_UP_PREPARE:
367 case CPU_UP_PREPARE_FROZEN: 367 case CPU_UP_PREPARE_FROZEN:
368 node = cpu_to_node(cpu); 368 node = cpu_to_mem(cpu);
369 per_cpu(cpu_profile_flip, cpu) = 0; 369 per_cpu(cpu_profile_flip, cpu) = 0;
370 if (!per_cpu(cpu_profile_hits, cpu)[1]) { 370 if (!per_cpu(cpu_profile_hits, cpu)[1]) {
371 page = alloc_pages_exact_node(node, 371 page = alloc_pages_exact_node(node,
372 GFP_KERNEL | __GFP_ZERO, 372 GFP_KERNEL | __GFP_ZERO,
373 0); 373 0);
374 if (!page) 374 if (!page)
375 return NOTIFY_BAD; 375 return notifier_from_errno(-ENOMEM);
376 per_cpu(cpu_profile_hits, cpu)[1] = page_address(page); 376 per_cpu(cpu_profile_hits, cpu)[1] = page_address(page);
377 } 377 }
378 if (!per_cpu(cpu_profile_hits, cpu)[0]) { 378 if (!per_cpu(cpu_profile_hits, cpu)[0]) {
@@ -388,7 +388,7 @@ out_free:
388 page = virt_to_page(per_cpu(cpu_profile_hits, cpu)[1]); 388 page = virt_to_page(per_cpu(cpu_profile_hits, cpu)[1]);
389 per_cpu(cpu_profile_hits, cpu)[1] = NULL; 389 per_cpu(cpu_profile_hits, cpu)[1] = NULL;
390 __free_page(page); 390 __free_page(page);
391 return NOTIFY_BAD; 391 return notifier_from_errno(-ENOMEM);
392 case CPU_ONLINE: 392 case CPU_ONLINE:
393 case CPU_ONLINE_FROZEN: 393 case CPU_ONLINE_FROZEN:
394 if (prof_cpu_mask != NULL) 394 if (prof_cpu_mask != NULL)
@@ -567,7 +567,7 @@ static int create_hash_tables(void)
567 int cpu; 567 int cpu;
568 568
569 for_each_online_cpu(cpu) { 569 for_each_online_cpu(cpu) {
570 int node = cpu_to_node(cpu); 570 int node = cpu_to_mem(cpu);
571 struct page *page; 571 struct page *page;
572 572
573 page = alloc_pages_exact_node(node, 573 page = alloc_pages_exact_node(node,
diff --git a/kernel/ptrace.c b/kernel/ptrace.c
index 6af9cdd558b7..74a3d693c196 100644
--- a/kernel/ptrace.c
+++ b/kernel/ptrace.c
@@ -594,6 +594,32 @@ int ptrace_request(struct task_struct *child, long request,
594 ret = ptrace_detach(child, data); 594 ret = ptrace_detach(child, data);
595 break; 595 break;
596 596
597#ifdef CONFIG_BINFMT_ELF_FDPIC
598 case PTRACE_GETFDPIC: {
599 struct mm_struct *mm = get_task_mm(child);
600 unsigned long tmp = 0;
601
602 ret = -ESRCH;
603 if (!mm)
604 break;
605
606 switch (addr) {
607 case PTRACE_GETFDPIC_EXEC:
608 tmp = mm->context.exec_fdpic_loadmap;
609 break;
610 case PTRACE_GETFDPIC_INTERP:
611 tmp = mm->context.interp_fdpic_loadmap;
612 break;
613 default:
614 break;
615 }
616 mmput(mm);
617
618 ret = put_user(tmp, (unsigned long __user *) data);
619 break;
620 }
621#endif
622
597#ifdef PTRACE_SINGLESTEP 623#ifdef PTRACE_SINGLESTEP
598 case PTRACE_SINGLESTEP: 624 case PTRACE_SINGLESTEP:
599#endif 625#endif
diff --git a/kernel/relay.c b/kernel/relay.c
index 4268287148c1..c7cf397fb929 100644
--- a/kernel/relay.c
+++ b/kernel/relay.c
@@ -539,7 +539,7 @@ static int __cpuinit relay_hotcpu_callback(struct notifier_block *nb,
539 "relay_hotcpu_callback: cpu %d buffer " 539 "relay_hotcpu_callback: cpu %d buffer "
540 "creation failed\n", hotcpu); 540 "creation failed\n", hotcpu);
541 mutex_unlock(&relay_channels_mutex); 541 mutex_unlock(&relay_channels_mutex);
542 return NOTIFY_BAD; 542 return notifier_from_errno(-ENOMEM);
543 } 543 }
544 } 544 }
545 mutex_unlock(&relay_channels_mutex); 545 mutex_unlock(&relay_channels_mutex);
diff --git a/kernel/sched.c b/kernel/sched.c
index 054a6012de99..d48408142503 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -969,14 +969,6 @@ static struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags)
969 } 969 }
970} 970}
971 971
972void task_rq_unlock_wait(struct task_struct *p)
973{
974 struct rq *rq = task_rq(p);
975
976 smp_mb(); /* spin-unlock-wait is not a full memory barrier */
977 raw_spin_unlock_wait(&rq->lock);
978}
979
980static void __task_rq_unlock(struct rq *rq) 972static void __task_rq_unlock(struct rq *rq)
981 __releases(rq->lock) 973 __releases(rq->lock)
982{ 974{
@@ -4062,6 +4054,23 @@ int __sched wait_for_completion_killable(struct completion *x)
4062EXPORT_SYMBOL(wait_for_completion_killable); 4054EXPORT_SYMBOL(wait_for_completion_killable);
4063 4055
4064/** 4056/**
4057 * wait_for_completion_killable_timeout: - waits for completion of a task (w/(to,killable))
4058 * @x: holds the state of this particular completion
4059 * @timeout: timeout value in jiffies
4060 *
4061 * This waits for either a completion of a specific task to be
4062 * signaled or for a specified timeout to expire. It can be
4063 * interrupted by a kill signal. The timeout is in jiffies.
4064 */
4065unsigned long __sched
4066wait_for_completion_killable_timeout(struct completion *x,
4067 unsigned long timeout)
4068{
4069 return wait_for_common(x, timeout, TASK_KILLABLE);
4070}
4071EXPORT_SYMBOL(wait_for_completion_killable_timeout);
4072
4073/**
4065 * try_wait_for_completion - try to decrement a completion without blocking 4074 * try_wait_for_completion - try to decrement a completion without blocking
4066 * @x: completion structure 4075 * @x: completion structure
4067 * 4076 *
diff --git a/kernel/sched_debug.c b/kernel/sched_debug.c
index 87a330a7185f..35565395d00d 100644
--- a/kernel/sched_debug.c
+++ b/kernel/sched_debug.c
@@ -381,15 +381,9 @@ __initcall(init_sched_debug_procfs);
381void proc_sched_show_task(struct task_struct *p, struct seq_file *m) 381void proc_sched_show_task(struct task_struct *p, struct seq_file *m)
382{ 382{
383 unsigned long nr_switches; 383 unsigned long nr_switches;
384 unsigned long flags;
385 int num_threads = 1;
386
387 if (lock_task_sighand(p, &flags)) {
388 num_threads = atomic_read(&p->signal->count);
389 unlock_task_sighand(p, &flags);
390 }
391 384
392 SEQ_printf(m, "%s (%d, #threads: %d)\n", p->comm, p->pid, num_threads); 385 SEQ_printf(m, "%s (%d, #threads: %d)\n", p->comm, p->pid,
386 get_nr_threads(p));
393 SEQ_printf(m, 387 SEQ_printf(m,
394 "---------------------------------------------------------\n"); 388 "---------------------------------------------------------\n");
395#define __P(F) \ 389#define __P(F) \
diff --git a/kernel/signal.c b/kernel/signal.c
index 825a3f24ad76..906ae5a1779c 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -642,7 +642,7 @@ static inline bool si_fromuser(const struct siginfo *info)
642static int check_kill_permission(int sig, struct siginfo *info, 642static int check_kill_permission(int sig, struct siginfo *info,
643 struct task_struct *t) 643 struct task_struct *t)
644{ 644{
645 const struct cred *cred = current_cred(), *tcred; 645 const struct cred *cred, *tcred;
646 struct pid *sid; 646 struct pid *sid;
647 int error; 647 int error;
648 648
@@ -656,8 +656,10 @@ static int check_kill_permission(int sig, struct siginfo *info,
656 if (error) 656 if (error)
657 return error; 657 return error;
658 658
659 cred = current_cred();
659 tcred = __task_cred(t); 660 tcred = __task_cred(t);
660 if ((cred->euid ^ tcred->suid) && 661 if (!same_thread_group(current, t) &&
662 (cred->euid ^ tcred->suid) &&
661 (cred->euid ^ tcred->uid) && 663 (cred->euid ^ tcred->uid) &&
662 (cred->uid ^ tcred->suid) && 664 (cred->uid ^ tcred->suid) &&
663 (cred->uid ^ tcred->uid) && 665 (cred->uid ^ tcred->uid) &&
@@ -1083,23 +1085,24 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
1083/* 1085/*
1084 * Nuke all other threads in the group. 1086 * Nuke all other threads in the group.
1085 */ 1087 */
1086void zap_other_threads(struct task_struct *p) 1088int zap_other_threads(struct task_struct *p)
1087{ 1089{
1088 struct task_struct *t; 1090 struct task_struct *t = p;
1091 int count = 0;
1089 1092
1090 p->signal->group_stop_count = 0; 1093 p->signal->group_stop_count = 0;
1091 1094
1092 for (t = next_thread(p); t != p; t = next_thread(t)) { 1095 while_each_thread(p, t) {
1093 /* 1096 count++;
1094 * Don't bother with already dead threads 1097
1095 */ 1098 /* Don't bother with already dead threads */
1096 if (t->exit_state) 1099 if (t->exit_state)
1097 continue; 1100 continue;
1098
1099 /* SIGKILL will be handled before any pending SIGSTOP */
1100 sigaddset(&t->pending.signal, SIGKILL); 1101 sigaddset(&t->pending.signal, SIGKILL);
1101 signal_wake_up(t, 1); 1102 signal_wake_up(t, 1);
1102 } 1103 }
1104
1105 return count;
1103} 1106}
1104 1107
1105struct sighand_struct *lock_task_sighand(struct task_struct *tsk, unsigned long *flags) 1108struct sighand_struct *lock_task_sighand(struct task_struct *tsk, unsigned long *flags)
diff --git a/kernel/smp.c b/kernel/smp.c
index 3fc697336183..75c970c715d3 100644
--- a/kernel/smp.c
+++ b/kernel/smp.c
@@ -52,7 +52,7 @@ hotplug_cfd(struct notifier_block *nfb, unsigned long action, void *hcpu)
52 case CPU_UP_PREPARE_FROZEN: 52 case CPU_UP_PREPARE_FROZEN:
53 if (!zalloc_cpumask_var_node(&cfd->cpumask, GFP_KERNEL, 53 if (!zalloc_cpumask_var_node(&cfd->cpumask, GFP_KERNEL,
54 cpu_to_node(cpu))) 54 cpu_to_node(cpu)))
55 return NOTIFY_BAD; 55 return notifier_from_errno(-ENOMEM);
56 break; 56 break;
57 57
58#ifdef CONFIG_HOTPLUG_CPU 58#ifdef CONFIG_HOTPLUG_CPU
diff --git a/kernel/softirq.c b/kernel/softirq.c
index 0db913a5c60f..825e1126008f 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -808,7 +808,7 @@ static int __cpuinit cpu_callback(struct notifier_block *nfb,
808 p = kthread_create(run_ksoftirqd, hcpu, "ksoftirqd/%d", hotcpu); 808 p = kthread_create(run_ksoftirqd, hcpu, "ksoftirqd/%d", hotcpu);
809 if (IS_ERR(p)) { 809 if (IS_ERR(p)) {
810 printk("ksoftirqd for %i failed\n", hotcpu); 810 printk("ksoftirqd for %i failed\n", hotcpu);
811 return NOTIFY_BAD; 811 return notifier_from_errno(PTR_ERR(p));
812 } 812 }
813 kthread_bind(p, hotcpu); 813 kthread_bind(p, hotcpu);
814 per_cpu(ksoftirqd, hotcpu) = p; 814 per_cpu(ksoftirqd, hotcpu) = p;
diff --git a/kernel/sys.c b/kernel/sys.c
index 0d36d889c74d..e83ddbbaf89d 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -1632,9 +1632,9 @@ SYSCALL_DEFINE3(getcpu, unsigned __user *, cpup, unsigned __user *, nodep,
1632 1632
1633char poweroff_cmd[POWEROFF_CMD_PATH_LEN] = "/sbin/poweroff"; 1633char poweroff_cmd[POWEROFF_CMD_PATH_LEN] = "/sbin/poweroff";
1634 1634
1635static void argv_cleanup(char **argv, char **envp) 1635static void argv_cleanup(struct subprocess_info *info)
1636{ 1636{
1637 argv_free(argv); 1637 argv_free(info->argv);
1638} 1638}
1639 1639
1640/** 1640/**
@@ -1668,7 +1668,7 @@ int orderly_poweroff(bool force)
1668 goto out; 1668 goto out;
1669 } 1669 }
1670 1670
1671 call_usermodehelper_setcleanup(info, argv_cleanup); 1671 call_usermodehelper_setfns(info, NULL, argv_cleanup, NULL);
1672 1672
1673 ret = call_usermodehelper_exec(info, UMH_NO_WAIT); 1673 ret = call_usermodehelper_exec(info, UMH_NO_WAIT);
1674 1674
diff --git a/kernel/timer.c b/kernel/timer.c
index be394af5bc22..2454172a80d3 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -752,11 +752,15 @@ unsigned long apply_slack(struct timer_list *timer, unsigned long expires)
752 752
753 expires_limit = expires; 753 expires_limit = expires;
754 754
755 if (timer->slack > -1) 755 if (timer->slack >= 0) {
756 expires_limit = expires + timer->slack; 756 expires_limit = expires + timer->slack;
757 else if (time_after(expires, jiffies)) /* auto slack: use 0.4% */ 757 } else {
758 expires_limit = expires + (expires - jiffies)/256; 758 unsigned long now = jiffies;
759 759
760 /* No slack, if already expired else auto slack 0.4% */
761 if (time_after(expires, now))
762 expires_limit = expires + (expires - now)/256;
763 }
760 mask = expires ^ expires_limit; 764 mask = expires ^ expires_limit;
761 if (mask == 0) 765 if (mask == 0)
762 return expires; 766 return expires;
@@ -1680,11 +1684,14 @@ static int __cpuinit timer_cpu_notify(struct notifier_block *self,
1680 unsigned long action, void *hcpu) 1684 unsigned long action, void *hcpu)
1681{ 1685{
1682 long cpu = (long)hcpu; 1686 long cpu = (long)hcpu;
1687 int err;
1688
1683 switch(action) { 1689 switch(action) {
1684 case CPU_UP_PREPARE: 1690 case CPU_UP_PREPARE:
1685 case CPU_UP_PREPARE_FROZEN: 1691 case CPU_UP_PREPARE_FROZEN:
1686 if (init_timers_cpu(cpu) < 0) 1692 err = init_timers_cpu(cpu);
1687 return NOTIFY_BAD; 1693 if (err < 0)
1694 return notifier_from_errno(err);
1688 break; 1695 break;
1689#ifdef CONFIG_HOTPLUG_CPU 1696#ifdef CONFIG_HOTPLUG_CPU
1690 case CPU_DEAD: 1697 case CPU_DEAD:
diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
index b3bc91a3f510..36ea2b65dcdc 100644
--- a/kernel/trace/blktrace.c
+++ b/kernel/trace/blktrace.c
@@ -675,28 +675,33 @@ static void blk_add_trace_rq(struct request_queue *q, struct request *rq,
675 } 675 }
676} 676}
677 677
678static void blk_add_trace_rq_abort(struct request_queue *q, struct request *rq) 678static void blk_add_trace_rq_abort(void *ignore,
679 struct request_queue *q, struct request *rq)
679{ 680{
680 blk_add_trace_rq(q, rq, BLK_TA_ABORT); 681 blk_add_trace_rq(q, rq, BLK_TA_ABORT);
681} 682}
682 683
683static void blk_add_trace_rq_insert(struct request_queue *q, struct request *rq) 684static void blk_add_trace_rq_insert(void *ignore,
685 struct request_queue *q, struct request *rq)
684{ 686{
685 blk_add_trace_rq(q, rq, BLK_TA_INSERT); 687 blk_add_trace_rq(q, rq, BLK_TA_INSERT);
686} 688}
687 689
688static void blk_add_trace_rq_issue(struct request_queue *q, struct request *rq) 690static void blk_add_trace_rq_issue(void *ignore,
691 struct request_queue *q, struct request *rq)
689{ 692{
690 blk_add_trace_rq(q, rq, BLK_TA_ISSUE); 693 blk_add_trace_rq(q, rq, BLK_TA_ISSUE);
691} 694}
692 695
693static void blk_add_trace_rq_requeue(struct request_queue *q, 696static void blk_add_trace_rq_requeue(void *ignore,
697 struct request_queue *q,
694 struct request *rq) 698 struct request *rq)
695{ 699{
696 blk_add_trace_rq(q, rq, BLK_TA_REQUEUE); 700 blk_add_trace_rq(q, rq, BLK_TA_REQUEUE);
697} 701}
698 702
699static void blk_add_trace_rq_complete(struct request_queue *q, 703static void blk_add_trace_rq_complete(void *ignore,
704 struct request_queue *q,
700 struct request *rq) 705 struct request *rq)
701{ 706{
702 blk_add_trace_rq(q, rq, BLK_TA_COMPLETE); 707 blk_add_trace_rq(q, rq, BLK_TA_COMPLETE);
@@ -724,34 +729,40 @@ static void blk_add_trace_bio(struct request_queue *q, struct bio *bio,
724 !bio_flagged(bio, BIO_UPTODATE), 0, NULL); 729 !bio_flagged(bio, BIO_UPTODATE), 0, NULL);
725} 730}
726 731
727static void blk_add_trace_bio_bounce(struct request_queue *q, struct bio *bio) 732static void blk_add_trace_bio_bounce(void *ignore,
733 struct request_queue *q, struct bio *bio)
728{ 734{
729 blk_add_trace_bio(q, bio, BLK_TA_BOUNCE); 735 blk_add_trace_bio(q, bio, BLK_TA_BOUNCE);
730} 736}
731 737
732static void blk_add_trace_bio_complete(struct request_queue *q, struct bio *bio) 738static void blk_add_trace_bio_complete(void *ignore,
739 struct request_queue *q, struct bio *bio)
733{ 740{
734 blk_add_trace_bio(q, bio, BLK_TA_COMPLETE); 741 blk_add_trace_bio(q, bio, BLK_TA_COMPLETE);
735} 742}
736 743
737static void blk_add_trace_bio_backmerge(struct request_queue *q, 744static void blk_add_trace_bio_backmerge(void *ignore,
745 struct request_queue *q,
738 struct bio *bio) 746 struct bio *bio)
739{ 747{
740 blk_add_trace_bio(q, bio, BLK_TA_BACKMERGE); 748 blk_add_trace_bio(q, bio, BLK_TA_BACKMERGE);
741} 749}
742 750
743static void blk_add_trace_bio_frontmerge(struct request_queue *q, 751static void blk_add_trace_bio_frontmerge(void *ignore,
752 struct request_queue *q,
744 struct bio *bio) 753 struct bio *bio)
745{ 754{
746 blk_add_trace_bio(q, bio, BLK_TA_FRONTMERGE); 755 blk_add_trace_bio(q, bio, BLK_TA_FRONTMERGE);
747} 756}
748 757
749static void blk_add_trace_bio_queue(struct request_queue *q, struct bio *bio) 758static void blk_add_trace_bio_queue(void *ignore,
759 struct request_queue *q, struct bio *bio)
750{ 760{
751 blk_add_trace_bio(q, bio, BLK_TA_QUEUE); 761 blk_add_trace_bio(q, bio, BLK_TA_QUEUE);
752} 762}
753 763
754static void blk_add_trace_getrq(struct request_queue *q, 764static void blk_add_trace_getrq(void *ignore,
765 struct request_queue *q,
755 struct bio *bio, int rw) 766 struct bio *bio, int rw)
756{ 767{
757 if (bio) 768 if (bio)
@@ -765,7 +776,8 @@ static void blk_add_trace_getrq(struct request_queue *q,
765} 776}
766 777
767 778
768static void blk_add_trace_sleeprq(struct request_queue *q, 779static void blk_add_trace_sleeprq(void *ignore,
780 struct request_queue *q,
769 struct bio *bio, int rw) 781 struct bio *bio, int rw)
770{ 782{
771 if (bio) 783 if (bio)
@@ -779,7 +791,7 @@ static void blk_add_trace_sleeprq(struct request_queue *q,
779 } 791 }
780} 792}
781 793
782static void blk_add_trace_plug(struct request_queue *q) 794static void blk_add_trace_plug(void *ignore, struct request_queue *q)
783{ 795{
784 struct blk_trace *bt = q->blk_trace; 796 struct blk_trace *bt = q->blk_trace;
785 797
@@ -787,7 +799,7 @@ static void blk_add_trace_plug(struct request_queue *q)
787 __blk_add_trace(bt, 0, 0, 0, BLK_TA_PLUG, 0, 0, NULL); 799 __blk_add_trace(bt, 0, 0, 0, BLK_TA_PLUG, 0, 0, NULL);
788} 800}
789 801
790static void blk_add_trace_unplug_io(struct request_queue *q) 802static void blk_add_trace_unplug_io(void *ignore, struct request_queue *q)
791{ 803{
792 struct blk_trace *bt = q->blk_trace; 804 struct blk_trace *bt = q->blk_trace;
793 805
@@ -800,7 +812,7 @@ static void blk_add_trace_unplug_io(struct request_queue *q)
800 } 812 }
801} 813}
802 814
803static void blk_add_trace_unplug_timer(struct request_queue *q) 815static void blk_add_trace_unplug_timer(void *ignore, struct request_queue *q)
804{ 816{
805 struct blk_trace *bt = q->blk_trace; 817 struct blk_trace *bt = q->blk_trace;
806 818
@@ -813,7 +825,8 @@ static void blk_add_trace_unplug_timer(struct request_queue *q)
813 } 825 }
814} 826}
815 827
816static void blk_add_trace_split(struct request_queue *q, struct bio *bio, 828static void blk_add_trace_split(void *ignore,
829 struct request_queue *q, struct bio *bio,
817 unsigned int pdu) 830 unsigned int pdu)
818{ 831{
819 struct blk_trace *bt = q->blk_trace; 832 struct blk_trace *bt = q->blk_trace;
@@ -839,8 +852,9 @@ static void blk_add_trace_split(struct request_queue *q, struct bio *bio,
839 * it spans a stripe (or similar). Add a trace for that action. 852 * it spans a stripe (or similar). Add a trace for that action.
840 * 853 *
841 **/ 854 **/
842static void blk_add_trace_remap(struct request_queue *q, struct bio *bio, 855static void blk_add_trace_remap(void *ignore,
843 dev_t dev, sector_t from) 856 struct request_queue *q, struct bio *bio,
857 dev_t dev, sector_t from)
844{ 858{
845 struct blk_trace *bt = q->blk_trace; 859 struct blk_trace *bt = q->blk_trace;
846 struct blk_io_trace_remap r; 860 struct blk_io_trace_remap r;
@@ -869,7 +883,8 @@ static void blk_add_trace_remap(struct request_queue *q, struct bio *bio,
869 * Add a trace for that action. 883 * Add a trace for that action.
870 * 884 *
871 **/ 885 **/
872static void blk_add_trace_rq_remap(struct request_queue *q, 886static void blk_add_trace_rq_remap(void *ignore,
887 struct request_queue *q,
873 struct request *rq, dev_t dev, 888 struct request *rq, dev_t dev,
874 sector_t from) 889 sector_t from)
875{ 890{
@@ -921,64 +936,64 @@ static void blk_register_tracepoints(void)
921{ 936{
922 int ret; 937 int ret;
923 938
924 ret = register_trace_block_rq_abort(blk_add_trace_rq_abort); 939 ret = register_trace_block_rq_abort(blk_add_trace_rq_abort, NULL);
925 WARN_ON(ret); 940 WARN_ON(ret);
926 ret = register_trace_block_rq_insert(blk_add_trace_rq_insert); 941 ret = register_trace_block_rq_insert(blk_add_trace_rq_insert, NULL);
927 WARN_ON(ret); 942 WARN_ON(ret);
928 ret = register_trace_block_rq_issue(blk_add_trace_rq_issue); 943 ret = register_trace_block_rq_issue(blk_add_trace_rq_issue, NULL);
929 WARN_ON(ret); 944 WARN_ON(ret);
930 ret = register_trace_block_rq_requeue(blk_add_trace_rq_requeue); 945 ret = register_trace_block_rq_requeue(blk_add_trace_rq_requeue, NULL);
931 WARN_ON(ret); 946 WARN_ON(ret);
932 ret = register_trace_block_rq_complete(blk_add_trace_rq_complete); 947 ret = register_trace_block_rq_complete(blk_add_trace_rq_complete, NULL);
933 WARN_ON(ret); 948 WARN_ON(ret);
934 ret = register_trace_block_bio_bounce(blk_add_trace_bio_bounce); 949 ret = register_trace_block_bio_bounce(blk_add_trace_bio_bounce, NULL);
935 WARN_ON(ret); 950 WARN_ON(ret);
936 ret = register_trace_block_bio_complete(blk_add_trace_bio_complete); 951 ret = register_trace_block_bio_complete(blk_add_trace_bio_complete, NULL);
937 WARN_ON(ret); 952 WARN_ON(ret);
938 ret = register_trace_block_bio_backmerge(blk_add_trace_bio_backmerge); 953 ret = register_trace_block_bio_backmerge(blk_add_trace_bio_backmerge, NULL);
939 WARN_ON(ret); 954 WARN_ON(ret);
940 ret = register_trace_block_bio_frontmerge(blk_add_trace_bio_frontmerge); 955 ret = register_trace_block_bio_frontmerge(blk_add_trace_bio_frontmerge, NULL);
941 WARN_ON(ret); 956 WARN_ON(ret);
942 ret = register_trace_block_bio_queue(blk_add_trace_bio_queue); 957 ret = register_trace_block_bio_queue(blk_add_trace_bio_queue, NULL);
943 WARN_ON(ret); 958 WARN_ON(ret);
944 ret = register_trace_block_getrq(blk_add_trace_getrq); 959 ret = register_trace_block_getrq(blk_add_trace_getrq, NULL);
945 WARN_ON(ret); 960 WARN_ON(ret);
946 ret = register_trace_block_sleeprq(blk_add_trace_sleeprq); 961 ret = register_trace_block_sleeprq(blk_add_trace_sleeprq, NULL);
947 WARN_ON(ret); 962 WARN_ON(ret);
948 ret = register_trace_block_plug(blk_add_trace_plug); 963 ret = register_trace_block_plug(blk_add_trace_plug, NULL);
949 WARN_ON(ret); 964 WARN_ON(ret);
950 ret = register_trace_block_unplug_timer(blk_add_trace_unplug_timer); 965 ret = register_trace_block_unplug_timer(blk_add_trace_unplug_timer, NULL);
951 WARN_ON(ret); 966 WARN_ON(ret);
952 ret = register_trace_block_unplug_io(blk_add_trace_unplug_io); 967 ret = register_trace_block_unplug_io(blk_add_trace_unplug_io, NULL);
953 WARN_ON(ret); 968 WARN_ON(ret);
954 ret = register_trace_block_split(blk_add_trace_split); 969 ret = register_trace_block_split(blk_add_trace_split, NULL);
955 WARN_ON(ret); 970 WARN_ON(ret);
956 ret = register_trace_block_remap(blk_add_trace_remap); 971 ret = register_trace_block_remap(blk_add_trace_remap, NULL);
957 WARN_ON(ret); 972 WARN_ON(ret);
958 ret = register_trace_block_rq_remap(blk_add_trace_rq_remap); 973 ret = register_trace_block_rq_remap(blk_add_trace_rq_remap, NULL);
959 WARN_ON(ret); 974 WARN_ON(ret);
960} 975}
961 976
962static void blk_unregister_tracepoints(void) 977static void blk_unregister_tracepoints(void)
963{ 978{
964 unregister_trace_block_rq_remap(blk_add_trace_rq_remap); 979 unregister_trace_block_rq_remap(blk_add_trace_rq_remap, NULL);
965 unregister_trace_block_remap(blk_add_trace_remap); 980 unregister_trace_block_remap(blk_add_trace_remap, NULL);
966 unregister_trace_block_split(blk_add_trace_split); 981 unregister_trace_block_split(blk_add_trace_split, NULL);
967 unregister_trace_block_unplug_io(blk_add_trace_unplug_io); 982 unregister_trace_block_unplug_io(blk_add_trace_unplug_io, NULL);
968 unregister_trace_block_unplug_timer(blk_add_trace_unplug_timer); 983 unregister_trace_block_unplug_timer(blk_add_trace_unplug_timer, NULL);
969 unregister_trace_block_plug(blk_add_trace_plug); 984 unregister_trace_block_plug(blk_add_trace_plug, NULL);
970 unregister_trace_block_sleeprq(blk_add_trace_sleeprq); 985 unregister_trace_block_sleeprq(blk_add_trace_sleeprq, NULL);
971 unregister_trace_block_getrq(blk_add_trace_getrq); 986 unregister_trace_block_getrq(blk_add_trace_getrq, NULL);
972 unregister_trace_block_bio_queue(blk_add_trace_bio_queue); 987 unregister_trace_block_bio_queue(blk_add_trace_bio_queue, NULL);
973 unregister_trace_block_bio_frontmerge(blk_add_trace_bio_frontmerge); 988 unregister_trace_block_bio_frontmerge(blk_add_trace_bio_frontmerge, NULL);
974 unregister_trace_block_bio_backmerge(blk_add_trace_bio_backmerge); 989 unregister_trace_block_bio_backmerge(blk_add_trace_bio_backmerge, NULL);
975 unregister_trace_block_bio_complete(blk_add_trace_bio_complete); 990 unregister_trace_block_bio_complete(blk_add_trace_bio_complete, NULL);
976 unregister_trace_block_bio_bounce(blk_add_trace_bio_bounce); 991 unregister_trace_block_bio_bounce(blk_add_trace_bio_bounce, NULL);
977 unregister_trace_block_rq_complete(blk_add_trace_rq_complete); 992 unregister_trace_block_rq_complete(blk_add_trace_rq_complete, NULL);
978 unregister_trace_block_rq_requeue(blk_add_trace_rq_requeue); 993 unregister_trace_block_rq_requeue(blk_add_trace_rq_requeue, NULL);
979 unregister_trace_block_rq_issue(blk_add_trace_rq_issue); 994 unregister_trace_block_rq_issue(blk_add_trace_rq_issue, NULL);
980 unregister_trace_block_rq_insert(blk_add_trace_rq_insert); 995 unregister_trace_block_rq_insert(blk_add_trace_rq_insert, NULL);
981 unregister_trace_block_rq_abort(blk_add_trace_rq_abort); 996 unregister_trace_block_rq_abort(blk_add_trace_rq_abort, NULL);
982 997
983 tracepoint_synchronize_unregister(); 998 tracepoint_synchronize_unregister();
984} 999}
@@ -1321,7 +1336,7 @@ out:
1321} 1336}
1322 1337
1323static enum print_line_t blk_trace_event_print(struct trace_iterator *iter, 1338static enum print_line_t blk_trace_event_print(struct trace_iterator *iter,
1324 int flags) 1339 int flags, struct trace_event *event)
1325{ 1340{
1326 return print_one_line(iter, false); 1341 return print_one_line(iter, false);
1327} 1342}
@@ -1343,7 +1358,8 @@ static int blk_trace_synthesize_old_trace(struct trace_iterator *iter)
1343} 1358}
1344 1359
1345static enum print_line_t 1360static enum print_line_t
1346blk_trace_event_print_binary(struct trace_iterator *iter, int flags) 1361blk_trace_event_print_binary(struct trace_iterator *iter, int flags,
1362 struct trace_event *event)
1347{ 1363{
1348 return blk_trace_synthesize_old_trace(iter) ? 1364 return blk_trace_synthesize_old_trace(iter) ?
1349 TRACE_TYPE_HANDLED : TRACE_TYPE_PARTIAL_LINE; 1365 TRACE_TYPE_HANDLED : TRACE_TYPE_PARTIAL_LINE;
@@ -1381,12 +1397,16 @@ static struct tracer blk_tracer __read_mostly = {
1381 .set_flag = blk_tracer_set_flag, 1397 .set_flag = blk_tracer_set_flag,
1382}; 1398};
1383 1399
1384static struct trace_event trace_blk_event = { 1400static struct trace_event_functions trace_blk_event_funcs = {
1385 .type = TRACE_BLK,
1386 .trace = blk_trace_event_print, 1401 .trace = blk_trace_event_print,
1387 .binary = blk_trace_event_print_binary, 1402 .binary = blk_trace_event_print_binary,
1388}; 1403};
1389 1404
1405static struct trace_event trace_blk_event = {
1406 .type = TRACE_BLK,
1407 .funcs = &trace_blk_event_funcs,
1408};
1409
1390static int __init init_blk_tracer(void) 1410static int __init init_blk_tracer(void)
1391{ 1411{
1392 if (!register_ftrace_event(&trace_blk_event)) { 1412 if (!register_ftrace_event(&trace_blk_event)) {
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 32837e19e3bd..6d2cb14f9449 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -3234,7 +3234,8 @@ free:
3234} 3234}
3235 3235
3236static void 3236static void
3237ftrace_graph_probe_sched_switch(struct task_struct *prev, struct task_struct *next) 3237ftrace_graph_probe_sched_switch(void *ignore,
3238 struct task_struct *prev, struct task_struct *next)
3238{ 3239{
3239 unsigned long long timestamp; 3240 unsigned long long timestamp;
3240 int index; 3241 int index;
@@ -3288,7 +3289,7 @@ static int start_graph_tracing(void)
3288 } while (ret == -EAGAIN); 3289 } while (ret == -EAGAIN);
3289 3290
3290 if (!ret) { 3291 if (!ret) {
3291 ret = register_trace_sched_switch(ftrace_graph_probe_sched_switch); 3292 ret = register_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);
3292 if (ret) 3293 if (ret)
3293 pr_info("ftrace_graph: Couldn't activate tracepoint" 3294 pr_info("ftrace_graph: Couldn't activate tracepoint"
3294 " probe to kernel_sched_switch\n"); 3295 " probe to kernel_sched_switch\n");
@@ -3364,7 +3365,7 @@ void unregister_ftrace_graph(void)
3364 ftrace_graph_entry = ftrace_graph_entry_stub; 3365 ftrace_graph_entry = ftrace_graph_entry_stub;
3365 ftrace_shutdown(FTRACE_STOP_FUNC_RET); 3366 ftrace_shutdown(FTRACE_STOP_FUNC_RET);
3366 unregister_pm_notifier(&ftrace_suspend_notifier); 3367 unregister_pm_notifier(&ftrace_suspend_notifier);
3367 unregister_trace_sched_switch(ftrace_graph_probe_sched_switch); 3368 unregister_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);
3368 3369
3369 out: 3370 out:
3370 mutex_unlock(&ftrace_lock); 3371 mutex_unlock(&ftrace_lock);
diff --git a/kernel/trace/kmemtrace.c b/kernel/trace/kmemtrace.c
index a91da69f153a..bbfc1bb1660b 100644
--- a/kernel/trace/kmemtrace.c
+++ b/kernel/trace/kmemtrace.c
@@ -95,7 +95,8 @@ static inline void kmemtrace_free(enum kmemtrace_type_id type_id,
95 trace_wake_up(); 95 trace_wake_up();
96} 96}
97 97
98static void kmemtrace_kmalloc(unsigned long call_site, 98static void kmemtrace_kmalloc(void *ignore,
99 unsigned long call_site,
99 const void *ptr, 100 const void *ptr,
100 size_t bytes_req, 101 size_t bytes_req,
101 size_t bytes_alloc, 102 size_t bytes_alloc,
@@ -105,7 +106,8 @@ static void kmemtrace_kmalloc(unsigned long call_site,
105 bytes_req, bytes_alloc, gfp_flags, -1); 106 bytes_req, bytes_alloc, gfp_flags, -1);
106} 107}
107 108
108static void kmemtrace_kmem_cache_alloc(unsigned long call_site, 109static void kmemtrace_kmem_cache_alloc(void *ignore,
110 unsigned long call_site,
109 const void *ptr, 111 const void *ptr,
110 size_t bytes_req, 112 size_t bytes_req,
111 size_t bytes_alloc, 113 size_t bytes_alloc,
@@ -115,7 +117,8 @@ static void kmemtrace_kmem_cache_alloc(unsigned long call_site,
115 bytes_req, bytes_alloc, gfp_flags, -1); 117 bytes_req, bytes_alloc, gfp_flags, -1);
116} 118}
117 119
118static void kmemtrace_kmalloc_node(unsigned long call_site, 120static void kmemtrace_kmalloc_node(void *ignore,
121 unsigned long call_site,
119 const void *ptr, 122 const void *ptr,
120 size_t bytes_req, 123 size_t bytes_req,
121 size_t bytes_alloc, 124 size_t bytes_alloc,
@@ -126,7 +129,8 @@ static void kmemtrace_kmalloc_node(unsigned long call_site,
126 bytes_req, bytes_alloc, gfp_flags, node); 129 bytes_req, bytes_alloc, gfp_flags, node);
127} 130}
128 131
129static void kmemtrace_kmem_cache_alloc_node(unsigned long call_site, 132static void kmemtrace_kmem_cache_alloc_node(void *ignore,
133 unsigned long call_site,
130 const void *ptr, 134 const void *ptr,
131 size_t bytes_req, 135 size_t bytes_req,
132 size_t bytes_alloc, 136 size_t bytes_alloc,
@@ -137,12 +141,14 @@ static void kmemtrace_kmem_cache_alloc_node(unsigned long call_site,
137 bytes_req, bytes_alloc, gfp_flags, node); 141 bytes_req, bytes_alloc, gfp_flags, node);
138} 142}
139 143
140static void kmemtrace_kfree(unsigned long call_site, const void *ptr) 144static void
145kmemtrace_kfree(void *ignore, unsigned long call_site, const void *ptr)
141{ 146{
142 kmemtrace_free(KMEMTRACE_TYPE_KMALLOC, call_site, ptr); 147 kmemtrace_free(KMEMTRACE_TYPE_KMALLOC, call_site, ptr);
143} 148}
144 149
145static void kmemtrace_kmem_cache_free(unsigned long call_site, const void *ptr) 150static void kmemtrace_kmem_cache_free(void *ignore,
151 unsigned long call_site, const void *ptr)
146{ 152{
147 kmemtrace_free(KMEMTRACE_TYPE_CACHE, call_site, ptr); 153 kmemtrace_free(KMEMTRACE_TYPE_CACHE, call_site, ptr);
148} 154}
@@ -151,34 +157,34 @@ static int kmemtrace_start_probes(void)
151{ 157{
152 int err; 158 int err;
153 159
154 err = register_trace_kmalloc(kmemtrace_kmalloc); 160 err = register_trace_kmalloc(kmemtrace_kmalloc, NULL);
155 if (err) 161 if (err)
156 return err; 162 return err;
157 err = register_trace_kmem_cache_alloc(kmemtrace_kmem_cache_alloc); 163 err = register_trace_kmem_cache_alloc(kmemtrace_kmem_cache_alloc, NULL);
158 if (err) 164 if (err)
159 return err; 165 return err;
160 err = register_trace_kmalloc_node(kmemtrace_kmalloc_node); 166 err = register_trace_kmalloc_node(kmemtrace_kmalloc_node, NULL);
161 if (err) 167 if (err)
162 return err; 168 return err;
163 err = register_trace_kmem_cache_alloc_node(kmemtrace_kmem_cache_alloc_node); 169 err = register_trace_kmem_cache_alloc_node(kmemtrace_kmem_cache_alloc_node, NULL);
164 if (err) 170 if (err)
165 return err; 171 return err;
166 err = register_trace_kfree(kmemtrace_kfree); 172 err = register_trace_kfree(kmemtrace_kfree, NULL);
167 if (err) 173 if (err)
168 return err; 174 return err;
169 err = register_trace_kmem_cache_free(kmemtrace_kmem_cache_free); 175 err = register_trace_kmem_cache_free(kmemtrace_kmem_cache_free, NULL);
170 176
171 return err; 177 return err;
172} 178}
173 179
174static void kmemtrace_stop_probes(void) 180static void kmemtrace_stop_probes(void)
175{ 181{
176 unregister_trace_kmalloc(kmemtrace_kmalloc); 182 unregister_trace_kmalloc(kmemtrace_kmalloc, NULL);
177 unregister_trace_kmem_cache_alloc(kmemtrace_kmem_cache_alloc); 183 unregister_trace_kmem_cache_alloc(kmemtrace_kmem_cache_alloc, NULL);
178 unregister_trace_kmalloc_node(kmemtrace_kmalloc_node); 184 unregister_trace_kmalloc_node(kmemtrace_kmalloc_node, NULL);
179 unregister_trace_kmem_cache_alloc_node(kmemtrace_kmem_cache_alloc_node); 185 unregister_trace_kmem_cache_alloc_node(kmemtrace_kmem_cache_alloc_node, NULL);
180 unregister_trace_kfree(kmemtrace_kfree); 186 unregister_trace_kfree(kmemtrace_kfree, NULL);
181 unregister_trace_kmem_cache_free(kmemtrace_kmem_cache_free); 187 unregister_trace_kmem_cache_free(kmemtrace_kmem_cache_free, NULL);
182} 188}
183 189
184static int kmem_trace_init(struct trace_array *tr) 190static int kmem_trace_init(struct trace_array *tr)
@@ -237,7 +243,8 @@ struct kmemtrace_user_event_alloc {
237}; 243};
238 244
239static enum print_line_t 245static enum print_line_t
240kmemtrace_print_alloc(struct trace_iterator *iter, int flags) 246kmemtrace_print_alloc(struct trace_iterator *iter, int flags,
247 struct trace_event *event)
241{ 248{
242 struct trace_seq *s = &iter->seq; 249 struct trace_seq *s = &iter->seq;
243 struct kmemtrace_alloc_entry *entry; 250 struct kmemtrace_alloc_entry *entry;
@@ -257,7 +264,8 @@ kmemtrace_print_alloc(struct trace_iterator *iter, int flags)
257} 264}
258 265
259static enum print_line_t 266static enum print_line_t
260kmemtrace_print_free(struct trace_iterator *iter, int flags) 267kmemtrace_print_free(struct trace_iterator *iter, int flags,
268 struct trace_event *event)
261{ 269{
262 struct trace_seq *s = &iter->seq; 270 struct trace_seq *s = &iter->seq;
263 struct kmemtrace_free_entry *entry; 271 struct kmemtrace_free_entry *entry;
@@ -275,7 +283,8 @@ kmemtrace_print_free(struct trace_iterator *iter, int flags)
275} 283}
276 284
277static enum print_line_t 285static enum print_line_t
278kmemtrace_print_alloc_user(struct trace_iterator *iter, int flags) 286kmemtrace_print_alloc_user(struct trace_iterator *iter, int flags,
287 struct trace_event *event)
279{ 288{
280 struct trace_seq *s = &iter->seq; 289 struct trace_seq *s = &iter->seq;
281 struct kmemtrace_alloc_entry *entry; 290 struct kmemtrace_alloc_entry *entry;
@@ -309,7 +318,8 @@ kmemtrace_print_alloc_user(struct trace_iterator *iter, int flags)
309} 318}
310 319
311static enum print_line_t 320static enum print_line_t
312kmemtrace_print_free_user(struct trace_iterator *iter, int flags) 321kmemtrace_print_free_user(struct trace_iterator *iter, int flags,
322 struct trace_event *event)
313{ 323{
314 struct trace_seq *s = &iter->seq; 324 struct trace_seq *s = &iter->seq;
315 struct kmemtrace_free_entry *entry; 325 struct kmemtrace_free_entry *entry;
@@ -463,18 +473,26 @@ static enum print_line_t kmemtrace_print_line(struct trace_iterator *iter)
463 } 473 }
464} 474}
465 475
466static struct trace_event kmem_trace_alloc = { 476static struct trace_event_functions kmem_trace_alloc_funcs = {
467 .type = TRACE_KMEM_ALLOC,
468 .trace = kmemtrace_print_alloc, 477 .trace = kmemtrace_print_alloc,
469 .binary = kmemtrace_print_alloc_user, 478 .binary = kmemtrace_print_alloc_user,
470}; 479};
471 480
472static struct trace_event kmem_trace_free = { 481static struct trace_event kmem_trace_alloc = {
473 .type = TRACE_KMEM_FREE, 482 .type = TRACE_KMEM_ALLOC,
483 .funcs = &kmem_trace_alloc_funcs,
484};
485
486static struct trace_event_functions kmem_trace_free_funcs = {
474 .trace = kmemtrace_print_free, 487 .trace = kmemtrace_print_free,
475 .binary = kmemtrace_print_free_user, 488 .binary = kmemtrace_print_free_user,
476}; 489};
477 490
491static struct trace_event kmem_trace_free = {
492 .type = TRACE_KMEM_FREE,
493 .funcs = &kmem_trace_free_funcs,
494};
495
478static struct tracer kmem_tracer __read_mostly = { 496static struct tracer kmem_tracer __read_mostly = {
479 .name = "kmemtrace", 497 .name = "kmemtrace",
480 .init = kmem_trace_init, 498 .init = kmem_trace_init,
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 7f6059c5aa94..1da7b6ea8b85 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -1768,6 +1768,14 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
1768 * must fill the old tail_page with padding. 1768 * must fill the old tail_page with padding.
1769 */ 1769 */
1770 if (tail >= BUF_PAGE_SIZE) { 1770 if (tail >= BUF_PAGE_SIZE) {
1771 /*
1772 * If the page was filled, then we still need
1773 * to update the real_end. Reset it to zero
1774 * and the reader will ignore it.
1775 */
1776 if (tail == BUF_PAGE_SIZE)
1777 tail_page->real_end = 0;
1778
1771 local_sub(length, &tail_page->write); 1779 local_sub(length, &tail_page->write);
1772 return; 1780 return;
1773 } 1781 }
@@ -3894,12 +3902,12 @@ int ring_buffer_read_page(struct ring_buffer *buffer,
3894 ret = read; 3902 ret = read;
3895 3903
3896 cpu_buffer->lost_events = 0; 3904 cpu_buffer->lost_events = 0;
3905
3906 commit = local_read(&bpage->commit);
3897 /* 3907 /*
3898 * Set a flag in the commit field if we lost events 3908 * Set a flag in the commit field if we lost events
3899 */ 3909 */
3900 if (missed_events) { 3910 if (missed_events) {
3901 commit = local_read(&bpage->commit);
3902
3903 /* If there is room at the end of the page to save the 3911 /* If there is room at the end of the page to save the
3904 * missed events, then record it there. 3912 * missed events, then record it there.
3905 */ 3913 */
@@ -3907,10 +3915,17 @@ int ring_buffer_read_page(struct ring_buffer *buffer,
3907 memcpy(&bpage->data[commit], &missed_events, 3915 memcpy(&bpage->data[commit], &missed_events,
3908 sizeof(missed_events)); 3916 sizeof(missed_events));
3909 local_add(RB_MISSED_STORED, &bpage->commit); 3917 local_add(RB_MISSED_STORED, &bpage->commit);
3918 commit += sizeof(missed_events);
3910 } 3919 }
3911 local_add(RB_MISSED_EVENTS, &bpage->commit); 3920 local_add(RB_MISSED_EVENTS, &bpage->commit);
3912 } 3921 }
3913 3922
3923 /*
3924 * This page may be off to user land. Zero it out here.
3925 */
3926 if (commit < BUF_PAGE_SIZE)
3927 memset(&bpage->data[commit], 0, BUF_PAGE_SIZE - commit);
3928
3914 out_unlock: 3929 out_unlock:
3915 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 3930 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
3916 3931
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 8a76339a9e65..086d36316805 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -1936,7 +1936,7 @@ static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
1936 } 1936 }
1937 1937
1938 if (event) 1938 if (event)
1939 return event->trace(iter, sym_flags); 1939 return event->funcs->trace(iter, sym_flags, event);
1940 1940
1941 if (!trace_seq_printf(s, "Unknown type %d\n", entry->type)) 1941 if (!trace_seq_printf(s, "Unknown type %d\n", entry->type))
1942 goto partial; 1942 goto partial;
@@ -1962,7 +1962,7 @@ static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
1962 1962
1963 event = ftrace_find_event(entry->type); 1963 event = ftrace_find_event(entry->type);
1964 if (event) 1964 if (event)
1965 return event->raw(iter, 0); 1965 return event->funcs->raw(iter, 0, event);
1966 1966
1967 if (!trace_seq_printf(s, "%d ?\n", entry->type)) 1967 if (!trace_seq_printf(s, "%d ?\n", entry->type))
1968 goto partial; 1968 goto partial;
@@ -1989,7 +1989,7 @@ static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
1989 1989
1990 event = ftrace_find_event(entry->type); 1990 event = ftrace_find_event(entry->type);
1991 if (event) { 1991 if (event) {
1992 enum print_line_t ret = event->hex(iter, 0); 1992 enum print_line_t ret = event->funcs->hex(iter, 0, event);
1993 if (ret != TRACE_TYPE_HANDLED) 1993 if (ret != TRACE_TYPE_HANDLED)
1994 return ret; 1994 return ret;
1995 } 1995 }
@@ -2014,7 +2014,8 @@ static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
2014 } 2014 }
2015 2015
2016 event = ftrace_find_event(entry->type); 2016 event = ftrace_find_event(entry->type);
2017 return event ? event->binary(iter, 0) : TRACE_TYPE_HANDLED; 2017 return event ? event->funcs->binary(iter, 0, event) :
2018 TRACE_TYPE_HANDLED;
2018} 2019}
2019 2020
2020int trace_empty(struct trace_iterator *iter) 2021int trace_empty(struct trace_iterator *iter)
@@ -3665,7 +3666,6 @@ tracing_buffers_read(struct file *filp, char __user *ubuf,
3665 size_t count, loff_t *ppos) 3666 size_t count, loff_t *ppos)
3666{ 3667{
3667 struct ftrace_buffer_info *info = filp->private_data; 3668 struct ftrace_buffer_info *info = filp->private_data;
3668 unsigned int pos;
3669 ssize_t ret; 3669 ssize_t ret;
3670 size_t size; 3670 size_t size;
3671 3671
@@ -3692,11 +3692,6 @@ tracing_buffers_read(struct file *filp, char __user *ubuf,
3692 if (ret < 0) 3692 if (ret < 0)
3693 return 0; 3693 return 0;
3694 3694
3695 pos = ring_buffer_page_len(info->spare);
3696
3697 if (pos < PAGE_SIZE)
3698 memset(info->spare + pos, 0, PAGE_SIZE - pos);
3699
3700read: 3695read:
3701 size = PAGE_SIZE - info->read; 3696 size = PAGE_SIZE - info->read;
3702 if (size > count) 3697 if (size > count)
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index d1ce0bec1b3f..2cd96399463f 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -405,12 +405,12 @@ void ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags,
405void __trace_stack(struct trace_array *tr, unsigned long flags, int skip, 405void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
406 int pc); 406 int pc);
407#else 407#else
408static inline void ftrace_trace_stack(struct trace_array *tr, 408static inline void ftrace_trace_stack(struct ring_buffer *buffer,
409 unsigned long flags, int skip, int pc) 409 unsigned long flags, int skip, int pc)
410{ 410{
411} 411}
412 412
413static inline void ftrace_trace_userstack(struct trace_array *tr, 413static inline void ftrace_trace_userstack(struct ring_buffer *buffer,
414 unsigned long flags, int pc) 414 unsigned long flags, int pc)
415{ 415{
416} 416}
@@ -778,12 +778,15 @@ extern void print_subsystem_event_filter(struct event_subsystem *system,
778 struct trace_seq *s); 778 struct trace_seq *s);
779extern int filter_assign_type(const char *type); 779extern int filter_assign_type(const char *type);
780 780
781struct list_head *
782trace_get_fields(struct ftrace_event_call *event_call);
783
781static inline int 784static inline int
782filter_check_discard(struct ftrace_event_call *call, void *rec, 785filter_check_discard(struct ftrace_event_call *call, void *rec,
783 struct ring_buffer *buffer, 786 struct ring_buffer *buffer,
784 struct ring_buffer_event *event) 787 struct ring_buffer_event *event)
785{ 788{
786 if (unlikely(call->filter_active) && 789 if (unlikely(call->flags & TRACE_EVENT_FL_FILTERED) &&
787 !filter_match_preds(call->filter, rec)) { 790 !filter_match_preds(call->filter, rec)) {
788 ring_buffer_discard_commit(buffer, event); 791 ring_buffer_discard_commit(buffer, event);
789 return 1; 792 return 1;
diff --git a/kernel/trace/trace_branch.c b/kernel/trace/trace_branch.c
index b9bc4d470177..8d3538b4ea5f 100644
--- a/kernel/trace/trace_branch.c
+++ b/kernel/trace/trace_branch.c
@@ -143,7 +143,7 @@ static void branch_trace_reset(struct trace_array *tr)
143} 143}
144 144
145static enum print_line_t trace_branch_print(struct trace_iterator *iter, 145static enum print_line_t trace_branch_print(struct trace_iterator *iter,
146 int flags) 146 int flags, struct trace_event *event)
147{ 147{
148 struct trace_branch *field; 148 struct trace_branch *field;
149 149
@@ -167,9 +167,13 @@ static void branch_print_header(struct seq_file *s)
167 " |\n"); 167 " |\n");
168} 168}
169 169
170static struct trace_event_functions trace_branch_funcs = {
171 .trace = trace_branch_print,
172};
173
170static struct trace_event trace_branch_event = { 174static struct trace_event trace_branch_event = {
171 .type = TRACE_BRANCH, 175 .type = TRACE_BRANCH,
172 .trace = trace_branch_print, 176 .funcs = &trace_branch_funcs,
173}; 177};
174 178
175static struct tracer branch_trace __read_mostly = 179static struct tracer branch_trace __read_mostly =
diff --git a/kernel/trace/trace_event_perf.c b/kernel/trace/trace_event_perf.c
index 0565bb42566f..cb6f365016e4 100644
--- a/kernel/trace/trace_event_perf.c
+++ b/kernel/trace/trace_event_perf.c
@@ -9,13 +9,9 @@
9#include <linux/kprobes.h> 9#include <linux/kprobes.h>
10#include "trace.h" 10#include "trace.h"
11 11
12DEFINE_PER_CPU(struct pt_regs, perf_trace_regs);
13EXPORT_PER_CPU_SYMBOL_GPL(perf_trace_regs);
14
15EXPORT_SYMBOL_GPL(perf_arch_fetch_caller_regs); 12EXPORT_SYMBOL_GPL(perf_arch_fetch_caller_regs);
16 13
17static char *perf_trace_buf; 14static char *perf_trace_buf[4];
18static char *perf_trace_buf_nmi;
19 15
20/* 16/*
21 * Force it to be aligned to unsigned long to avoid misaligned accesses 17 * Force it to be aligned to unsigned long to avoid misaligned accesses
@@ -27,57 +23,82 @@ typedef typeof(unsigned long [PERF_MAX_TRACE_SIZE / sizeof(unsigned long)])
27/* Count the events in use (per event id, not per instance) */ 23/* Count the events in use (per event id, not per instance) */
28static int total_ref_count; 24static int total_ref_count;
29 25
30static int perf_trace_event_enable(struct ftrace_event_call *event) 26static int perf_trace_event_init(struct ftrace_event_call *tp_event,
27 struct perf_event *p_event)
31{ 28{
32 char *buf; 29 struct hlist_head *list;
33 int ret = -ENOMEM; 30 int ret = -ENOMEM;
31 int cpu;
34 32
35 if (event->perf_refcount++ > 0) 33 p_event->tp_event = tp_event;
34 if (tp_event->perf_refcount++ > 0)
36 return 0; 35 return 0;
37 36
38 if (!total_ref_count) { 37 list = alloc_percpu(struct hlist_head);
39 buf = (char *)alloc_percpu(perf_trace_t); 38 if (!list)
40 if (!buf) 39 goto fail;
41 goto fail_buf;
42 40
43 rcu_assign_pointer(perf_trace_buf, buf); 41 for_each_possible_cpu(cpu)
42 INIT_HLIST_HEAD(per_cpu_ptr(list, cpu));
44 43
45 buf = (char *)alloc_percpu(perf_trace_t); 44 tp_event->perf_events = list;
46 if (!buf)
47 goto fail_buf_nmi;
48 45
49 rcu_assign_pointer(perf_trace_buf_nmi, buf); 46 if (!total_ref_count) {
50 } 47 char *buf;
48 int i;
51 49
52 ret = event->perf_event_enable(event); 50 for (i = 0; i < 4; i++) {
53 if (!ret) { 51 buf = (char *)alloc_percpu(perf_trace_t);
54 total_ref_count++; 52 if (!buf)
55 return 0; 53 goto fail;
54
55 perf_trace_buf[i] = buf;
56 }
56 } 57 }
57 58
58fail_buf_nmi: 59 if (tp_event->class->reg)
60 ret = tp_event->class->reg(tp_event, TRACE_REG_PERF_REGISTER);
61 else
62 ret = tracepoint_probe_register(tp_event->name,
63 tp_event->class->perf_probe,
64 tp_event);
65
66 if (ret)
67 goto fail;
68
69 total_ref_count++;
70 return 0;
71
72fail:
59 if (!total_ref_count) { 73 if (!total_ref_count) {
60 free_percpu(perf_trace_buf_nmi); 74 int i;
61 free_percpu(perf_trace_buf); 75
62 perf_trace_buf_nmi = NULL; 76 for (i = 0; i < 4; i++) {
63 perf_trace_buf = NULL; 77 free_percpu(perf_trace_buf[i]);
78 perf_trace_buf[i] = NULL;
79 }
80 }
81
82 if (!--tp_event->perf_refcount) {
83 free_percpu(tp_event->perf_events);
84 tp_event->perf_events = NULL;
64 } 85 }
65fail_buf:
66 event->perf_refcount--;
67 86
68 return ret; 87 return ret;
69} 88}
70 89
71int perf_trace_enable(int event_id) 90int perf_trace_init(struct perf_event *p_event)
72{ 91{
73 struct ftrace_event_call *event; 92 struct ftrace_event_call *tp_event;
93 int event_id = p_event->attr.config;
74 int ret = -EINVAL; 94 int ret = -EINVAL;
75 95
76 mutex_lock(&event_mutex); 96 mutex_lock(&event_mutex);
77 list_for_each_entry(event, &ftrace_events, list) { 97 list_for_each_entry(tp_event, &ftrace_events, list) {
78 if (event->id == event_id && event->perf_event_enable && 98 if (tp_event->event.type == event_id &&
79 try_module_get(event->mod)) { 99 tp_event->class && tp_event->class->perf_probe &&
80 ret = perf_trace_event_enable(event); 100 try_module_get(tp_event->mod)) {
101 ret = perf_trace_event_init(tp_event, p_event);
81 break; 102 break;
82 } 103 }
83 } 104 }
@@ -86,90 +107,78 @@ int perf_trace_enable(int event_id)
86 return ret; 107 return ret;
87} 108}
88 109
89static void perf_trace_event_disable(struct ftrace_event_call *event) 110int perf_trace_enable(struct perf_event *p_event)
90{ 111{
91 char *buf, *nmi_buf; 112 struct ftrace_event_call *tp_event = p_event->tp_event;
92 113 struct hlist_head *list;
93 if (--event->perf_refcount > 0)
94 return;
95
96 event->perf_event_disable(event);
97 114
98 if (!--total_ref_count) { 115 list = tp_event->perf_events;
99 buf = perf_trace_buf; 116 if (WARN_ON_ONCE(!list))
100 rcu_assign_pointer(perf_trace_buf, NULL); 117 return -EINVAL;
101 118
102 nmi_buf = perf_trace_buf_nmi; 119 list = per_cpu_ptr(list, smp_processor_id());
103 rcu_assign_pointer(perf_trace_buf_nmi, NULL); 120 hlist_add_head_rcu(&p_event->hlist_entry, list);
104 121
105 /* 122 return 0;
106 * Ensure every events in profiling have finished before 123}
107 * releasing the buffers
108 */
109 synchronize_sched();
110 124
111 free_percpu(buf); 125void perf_trace_disable(struct perf_event *p_event)
112 free_percpu(nmi_buf); 126{
113 } 127 hlist_del_rcu(&p_event->hlist_entry);
114} 128}
115 129
116void perf_trace_disable(int event_id) 130void perf_trace_destroy(struct perf_event *p_event)
117{ 131{
118 struct ftrace_event_call *event; 132 struct ftrace_event_call *tp_event = p_event->tp_event;
133 int i;
119 134
120 mutex_lock(&event_mutex); 135 if (--tp_event->perf_refcount > 0)
121 list_for_each_entry(event, &ftrace_events, list) { 136 return;
122 if (event->id == event_id) { 137
123 perf_trace_event_disable(event); 138 if (tp_event->class->reg)
124 module_put(event->mod); 139 tp_event->class->reg(tp_event, TRACE_REG_PERF_UNREGISTER);
125 break; 140 else
141 tracepoint_probe_unregister(tp_event->name,
142 tp_event->class->perf_probe,
143 tp_event);
144
145 free_percpu(tp_event->perf_events);
146 tp_event->perf_events = NULL;
147
148 if (!--total_ref_count) {
149 for (i = 0; i < 4; i++) {
150 free_percpu(perf_trace_buf[i]);
151 perf_trace_buf[i] = NULL;
126 } 152 }
127 } 153 }
128 mutex_unlock(&event_mutex);
129} 154}
130 155
131__kprobes void *perf_trace_buf_prepare(int size, unsigned short type, 156__kprobes void *perf_trace_buf_prepare(int size, unsigned short type,
132 int *rctxp, unsigned long *irq_flags) 157 struct pt_regs *regs, int *rctxp)
133{ 158{
134 struct trace_entry *entry; 159 struct trace_entry *entry;
135 char *trace_buf, *raw_data; 160 unsigned long flags;
136 int pc, cpu; 161 char *raw_data;
162 int pc;
137 163
138 BUILD_BUG_ON(PERF_MAX_TRACE_SIZE % sizeof(unsigned long)); 164 BUILD_BUG_ON(PERF_MAX_TRACE_SIZE % sizeof(unsigned long));
139 165
140 pc = preempt_count(); 166 pc = preempt_count();
141 167
142 /* Protect the per cpu buffer, begin the rcu read side */
143 local_irq_save(*irq_flags);
144
145 *rctxp = perf_swevent_get_recursion_context(); 168 *rctxp = perf_swevent_get_recursion_context();
146 if (*rctxp < 0) 169 if (*rctxp < 0)
147 goto err_recursion; 170 return NULL;
148
149 cpu = smp_processor_id();
150
151 if (in_nmi())
152 trace_buf = rcu_dereference_sched(perf_trace_buf_nmi);
153 else
154 trace_buf = rcu_dereference_sched(perf_trace_buf);
155
156 if (!trace_buf)
157 goto err;
158 171
159 raw_data = per_cpu_ptr(trace_buf, cpu); 172 raw_data = per_cpu_ptr(perf_trace_buf[*rctxp], smp_processor_id());
160 173
161 /* zero the dead bytes from align to not leak stack to user */ 174 /* zero the dead bytes from align to not leak stack to user */
162 memset(&raw_data[size - sizeof(u64)], 0, sizeof(u64)); 175 memset(&raw_data[size - sizeof(u64)], 0, sizeof(u64));
163 176
164 entry = (struct trace_entry *)raw_data; 177 entry = (struct trace_entry *)raw_data;
165 tracing_generic_entry_update(entry, *irq_flags, pc); 178 local_save_flags(flags);
179 tracing_generic_entry_update(entry, flags, pc);
166 entry->type = type; 180 entry->type = type;
167 181
168 return raw_data; 182 return raw_data;
169err:
170 perf_swevent_put_recursion_context(*rctxp);
171err_recursion:
172 local_irq_restore(*irq_flags);
173 return NULL;
174} 183}
175EXPORT_SYMBOL_GPL(perf_trace_buf_prepare); 184EXPORT_SYMBOL_GPL(perf_trace_buf_prepare);
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index c697c7043349..53cffc0b0801 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -29,11 +29,23 @@ DEFINE_MUTEX(event_mutex);
29 29
30LIST_HEAD(ftrace_events); 30LIST_HEAD(ftrace_events);
31 31
32struct list_head *
33trace_get_fields(struct ftrace_event_call *event_call)
34{
35 if (!event_call->class->get_fields)
36 return &event_call->class->fields;
37 return event_call->class->get_fields(event_call);
38}
39
32int trace_define_field(struct ftrace_event_call *call, const char *type, 40int trace_define_field(struct ftrace_event_call *call, const char *type,
33 const char *name, int offset, int size, int is_signed, 41 const char *name, int offset, int size, int is_signed,
34 int filter_type) 42 int filter_type)
35{ 43{
36 struct ftrace_event_field *field; 44 struct ftrace_event_field *field;
45 struct list_head *head;
46
47 if (WARN_ON(!call->class))
48 return 0;
37 49
38 field = kzalloc(sizeof(*field), GFP_KERNEL); 50 field = kzalloc(sizeof(*field), GFP_KERNEL);
39 if (!field) 51 if (!field)
@@ -56,7 +68,8 @@ int trace_define_field(struct ftrace_event_call *call, const char *type,
56 field->size = size; 68 field->size = size;
57 field->is_signed = is_signed; 69 field->is_signed = is_signed;
58 70
59 list_add(&field->link, &call->fields); 71 head = trace_get_fields(call);
72 list_add(&field->link, head);
60 73
61 return 0; 74 return 0;
62 75
@@ -94,8 +107,10 @@ static int trace_define_common_fields(struct ftrace_event_call *call)
94void trace_destroy_fields(struct ftrace_event_call *call) 107void trace_destroy_fields(struct ftrace_event_call *call)
95{ 108{
96 struct ftrace_event_field *field, *next; 109 struct ftrace_event_field *field, *next;
110 struct list_head *head;
97 111
98 list_for_each_entry_safe(field, next, &call->fields, link) { 112 head = trace_get_fields(call);
113 list_for_each_entry_safe(field, next, head, link) {
99 list_del(&field->link); 114 list_del(&field->link);
100 kfree(field->type); 115 kfree(field->type);
101 kfree(field->name); 116 kfree(field->name);
@@ -107,11 +122,9 @@ int trace_event_raw_init(struct ftrace_event_call *call)
107{ 122{
108 int id; 123 int id;
109 124
110 id = register_ftrace_event(call->event); 125 id = register_ftrace_event(&call->event);
111 if (!id) 126 if (!id)
112 return -ENODEV; 127 return -ENODEV;
113 call->id = id;
114 INIT_LIST_HEAD(&call->fields);
115 128
116 return 0; 129 return 0;
117} 130}
@@ -124,23 +137,33 @@ static int ftrace_event_enable_disable(struct ftrace_event_call *call,
124 137
125 switch (enable) { 138 switch (enable) {
126 case 0: 139 case 0:
127 if (call->enabled) { 140 if (call->flags & TRACE_EVENT_FL_ENABLED) {
128 call->enabled = 0; 141 call->flags &= ~TRACE_EVENT_FL_ENABLED;
129 tracing_stop_cmdline_record(); 142 tracing_stop_cmdline_record();
130 call->unregfunc(call); 143 if (call->class->reg)
144 call->class->reg(call, TRACE_REG_UNREGISTER);
145 else
146 tracepoint_probe_unregister(call->name,
147 call->class->probe,
148 call);
131 } 149 }
132 break; 150 break;
133 case 1: 151 case 1:
134 if (!call->enabled) { 152 if (!(call->flags & TRACE_EVENT_FL_ENABLED)) {
135 tracing_start_cmdline_record(); 153 tracing_start_cmdline_record();
136 ret = call->regfunc(call); 154 if (call->class->reg)
155 ret = call->class->reg(call, TRACE_REG_REGISTER);
156 else
157 ret = tracepoint_probe_register(call->name,
158 call->class->probe,
159 call);
137 if (ret) { 160 if (ret) {
138 tracing_stop_cmdline_record(); 161 tracing_stop_cmdline_record();
139 pr_info("event trace: Could not enable event " 162 pr_info("event trace: Could not enable event "
140 "%s\n", call->name); 163 "%s\n", call->name);
141 break; 164 break;
142 } 165 }
143 call->enabled = 1; 166 call->flags |= TRACE_EVENT_FL_ENABLED;
144 } 167 }
145 break; 168 break;
146 } 169 }
@@ -171,15 +194,16 @@ static int __ftrace_set_clr_event(const char *match, const char *sub,
171 mutex_lock(&event_mutex); 194 mutex_lock(&event_mutex);
172 list_for_each_entry(call, &ftrace_events, list) { 195 list_for_each_entry(call, &ftrace_events, list) {
173 196
174 if (!call->name || !call->regfunc) 197 if (!call->name || !call->class ||
198 (!call->class->probe && !call->class->reg))
175 continue; 199 continue;
176 200
177 if (match && 201 if (match &&
178 strcmp(match, call->name) != 0 && 202 strcmp(match, call->name) != 0 &&
179 strcmp(match, call->system) != 0) 203 strcmp(match, call->class->system) != 0)
180 continue; 204 continue;
181 205
182 if (sub && strcmp(sub, call->system) != 0) 206 if (sub && strcmp(sub, call->class->system) != 0)
183 continue; 207 continue;
184 208
185 if (event && strcmp(event, call->name) != 0) 209 if (event && strcmp(event, call->name) != 0)
@@ -297,7 +321,7 @@ t_next(struct seq_file *m, void *v, loff_t *pos)
297 * The ftrace subsystem is for showing formats only. 321 * The ftrace subsystem is for showing formats only.
298 * They can not be enabled or disabled via the event files. 322 * They can not be enabled or disabled via the event files.
299 */ 323 */
300 if (call->regfunc) 324 if (call->class && (call->class->probe || call->class->reg))
301 return call; 325 return call;
302 } 326 }
303 327
@@ -328,7 +352,7 @@ s_next(struct seq_file *m, void *v, loff_t *pos)
328 (*pos)++; 352 (*pos)++;
329 353
330 list_for_each_entry_continue(call, &ftrace_events, list) { 354 list_for_each_entry_continue(call, &ftrace_events, list) {
331 if (call->enabled) 355 if (call->flags & TRACE_EVENT_FL_ENABLED)
332 return call; 356 return call;
333 } 357 }
334 358
@@ -355,8 +379,8 @@ static int t_show(struct seq_file *m, void *v)
355{ 379{
356 struct ftrace_event_call *call = v; 380 struct ftrace_event_call *call = v;
357 381
358 if (strcmp(call->system, TRACE_SYSTEM) != 0) 382 if (strcmp(call->class->system, TRACE_SYSTEM) != 0)
359 seq_printf(m, "%s:", call->system); 383 seq_printf(m, "%s:", call->class->system);
360 seq_printf(m, "%s\n", call->name); 384 seq_printf(m, "%s\n", call->name);
361 385
362 return 0; 386 return 0;
@@ -387,7 +411,7 @@ event_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
387 struct ftrace_event_call *call = filp->private_data; 411 struct ftrace_event_call *call = filp->private_data;
388 char *buf; 412 char *buf;
389 413
390 if (call->enabled) 414 if (call->flags & TRACE_EVENT_FL_ENABLED)
391 buf = "1\n"; 415 buf = "1\n";
392 else 416 else
393 buf = "0\n"; 417 buf = "0\n";
@@ -450,10 +474,11 @@ system_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
450 474
451 mutex_lock(&event_mutex); 475 mutex_lock(&event_mutex);
452 list_for_each_entry(call, &ftrace_events, list) { 476 list_for_each_entry(call, &ftrace_events, list) {
453 if (!call->name || !call->regfunc) 477 if (!call->name || !call->class ||
478 (!call->class->probe && !call->class->reg))
454 continue; 479 continue;
455 480
456 if (system && strcmp(call->system, system) != 0) 481 if (system && strcmp(call->class->system, system) != 0)
457 continue; 482 continue;
458 483
459 /* 484 /*
@@ -461,7 +486,7 @@ system_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
461 * or if all events or cleared, or if we have 486 * or if all events or cleared, or if we have
462 * a mixture. 487 * a mixture.
463 */ 488 */
464 set |= (1 << !!call->enabled); 489 set |= (1 << !!(call->flags & TRACE_EVENT_FL_ENABLED));
465 490
466 /* 491 /*
467 * If we have a mixture, no need to look further. 492 * If we have a mixture, no need to look further.
@@ -525,6 +550,7 @@ event_format_read(struct file *filp, char __user *ubuf, size_t cnt,
525{ 550{
526 struct ftrace_event_call *call = filp->private_data; 551 struct ftrace_event_call *call = filp->private_data;
527 struct ftrace_event_field *field; 552 struct ftrace_event_field *field;
553 struct list_head *head;
528 struct trace_seq *s; 554 struct trace_seq *s;
529 int common_field_count = 5; 555 int common_field_count = 5;
530 char *buf; 556 char *buf;
@@ -540,10 +566,11 @@ event_format_read(struct file *filp, char __user *ubuf, size_t cnt,
540 trace_seq_init(s); 566 trace_seq_init(s);
541 567
542 trace_seq_printf(s, "name: %s\n", call->name); 568 trace_seq_printf(s, "name: %s\n", call->name);
543 trace_seq_printf(s, "ID: %d\n", call->id); 569 trace_seq_printf(s, "ID: %d\n", call->event.type);
544 trace_seq_printf(s, "format:\n"); 570 trace_seq_printf(s, "format:\n");
545 571
546 list_for_each_entry_reverse(field, &call->fields, link) { 572 head = trace_get_fields(call);
573 list_for_each_entry_reverse(field, head, link) {
547 /* 574 /*
548 * Smartly shows the array type(except dynamic array). 575 * Smartly shows the array type(except dynamic array).
549 * Normal: 576 * Normal:
@@ -613,7 +640,7 @@ event_id_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
613 return -ENOMEM; 640 return -ENOMEM;
614 641
615 trace_seq_init(s); 642 trace_seq_init(s);
616 trace_seq_printf(s, "%d\n", call->id); 643 trace_seq_printf(s, "%d\n", call->event.type);
617 644
618 r = simple_read_from_buffer(ubuf, cnt, ppos, 645 r = simple_read_from_buffer(ubuf, cnt, ppos,
619 s->buffer, s->len); 646 s->buffer, s->len);
@@ -919,14 +946,15 @@ event_create_dir(struct ftrace_event_call *call, struct dentry *d_events,
919 const struct file_operations *filter, 946 const struct file_operations *filter,
920 const struct file_operations *format) 947 const struct file_operations *format)
921{ 948{
949 struct list_head *head;
922 int ret; 950 int ret;
923 951
924 /* 952 /*
925 * If the trace point header did not define TRACE_SYSTEM 953 * If the trace point header did not define TRACE_SYSTEM
926 * then the system would be called "TRACE_SYSTEM". 954 * then the system would be called "TRACE_SYSTEM".
927 */ 955 */
928 if (strcmp(call->system, TRACE_SYSTEM) != 0) 956 if (strcmp(call->class->system, TRACE_SYSTEM) != 0)
929 d_events = event_subsystem_dir(call->system, d_events); 957 d_events = event_subsystem_dir(call->class->system, d_events);
930 958
931 call->dir = debugfs_create_dir(call->name, d_events); 959 call->dir = debugfs_create_dir(call->name, d_events);
932 if (!call->dir) { 960 if (!call->dir) {
@@ -935,22 +963,31 @@ event_create_dir(struct ftrace_event_call *call, struct dentry *d_events,
935 return -1; 963 return -1;
936 } 964 }
937 965
938 if (call->regfunc) 966 if (call->class->probe || call->class->reg)
939 trace_create_file("enable", 0644, call->dir, call, 967 trace_create_file("enable", 0644, call->dir, call,
940 enable); 968 enable);
941 969
942 if (call->id && call->perf_event_enable) 970#ifdef CONFIG_PERF_EVENTS
971 if (call->event.type && (call->class->perf_probe || call->class->reg))
943 trace_create_file("id", 0444, call->dir, call, 972 trace_create_file("id", 0444, call->dir, call,
944 id); 973 id);
974#endif
945 975
946 if (call->define_fields) { 976 if (call->class->define_fields) {
947 ret = trace_define_common_fields(call); 977 /*
948 if (!ret) 978 * Other events may have the same class. Only update
949 ret = call->define_fields(call); 979 * the fields if they are not already defined.
950 if (ret < 0) { 980 */
951 pr_warning("Could not initialize trace point" 981 head = trace_get_fields(call);
952 " events/%s\n", call->name); 982 if (list_empty(head)) {
953 return ret; 983 ret = trace_define_common_fields(call);
984 if (!ret)
985 ret = call->class->define_fields(call);
986 if (ret < 0) {
987 pr_warning("Could not initialize trace point"
988 " events/%s\n", call->name);
989 return ret;
990 }
954 } 991 }
955 trace_create_file("filter", 0644, call->dir, call, 992 trace_create_file("filter", 0644, call->dir, call,
956 filter); 993 filter);
@@ -970,8 +1007,8 @@ static int __trace_add_event_call(struct ftrace_event_call *call)
970 if (!call->name) 1007 if (!call->name)
971 return -EINVAL; 1008 return -EINVAL;
972 1009
973 if (call->raw_init) { 1010 if (call->class->raw_init) {
974 ret = call->raw_init(call); 1011 ret = call->class->raw_init(call);
975 if (ret < 0) { 1012 if (ret < 0) {
976 if (ret != -ENOSYS) 1013 if (ret != -ENOSYS)
977 pr_warning("Could not initialize trace " 1014 pr_warning("Could not initialize trace "
@@ -1035,13 +1072,13 @@ static void remove_subsystem_dir(const char *name)
1035static void __trace_remove_event_call(struct ftrace_event_call *call) 1072static void __trace_remove_event_call(struct ftrace_event_call *call)
1036{ 1073{
1037 ftrace_event_enable_disable(call, 0); 1074 ftrace_event_enable_disable(call, 0);
1038 if (call->event) 1075 if (call->event.funcs)
1039 __unregister_ftrace_event(call->event); 1076 __unregister_ftrace_event(&call->event);
1040 debugfs_remove_recursive(call->dir); 1077 debugfs_remove_recursive(call->dir);
1041 list_del(&call->list); 1078 list_del(&call->list);
1042 trace_destroy_fields(call); 1079 trace_destroy_fields(call);
1043 destroy_preds(call); 1080 destroy_preds(call);
1044 remove_subsystem_dir(call->system); 1081 remove_subsystem_dir(call->class->system);
1045} 1082}
1046 1083
1047/* Remove an event_call */ 1084/* Remove an event_call */
@@ -1132,8 +1169,8 @@ static void trace_module_add_events(struct module *mod)
1132 /* The linker may leave blanks */ 1169 /* The linker may leave blanks */
1133 if (!call->name) 1170 if (!call->name)
1134 continue; 1171 continue;
1135 if (call->raw_init) { 1172 if (call->class->raw_init) {
1136 ret = call->raw_init(call); 1173 ret = call->class->raw_init(call);
1137 if (ret < 0) { 1174 if (ret < 0) {
1138 if (ret != -ENOSYS) 1175 if (ret != -ENOSYS)
1139 pr_warning("Could not initialize trace " 1176 pr_warning("Could not initialize trace "
@@ -1286,8 +1323,8 @@ static __init int event_trace_init(void)
1286 /* The linker may leave blanks */ 1323 /* The linker may leave blanks */
1287 if (!call->name) 1324 if (!call->name)
1288 continue; 1325 continue;
1289 if (call->raw_init) { 1326 if (call->class->raw_init) {
1290 ret = call->raw_init(call); 1327 ret = call->class->raw_init(call);
1291 if (ret < 0) { 1328 if (ret < 0) {
1292 if (ret != -ENOSYS) 1329 if (ret != -ENOSYS)
1293 pr_warning("Could not initialize trace " 1330 pr_warning("Could not initialize trace "
@@ -1388,8 +1425,8 @@ static __init void event_trace_self_tests(void)
1388 1425
1389 list_for_each_entry(call, &ftrace_events, list) { 1426 list_for_each_entry(call, &ftrace_events, list) {
1390 1427
1391 /* Only test those that have a regfunc */ 1428 /* Only test those that have a probe */
1392 if (!call->regfunc) 1429 if (!call->class || !call->class->probe)
1393 continue; 1430 continue;
1394 1431
1395/* 1432/*
@@ -1399,8 +1436,8 @@ static __init void event_trace_self_tests(void)
1399 * syscalls as we test. 1436 * syscalls as we test.
1400 */ 1437 */
1401#ifndef CONFIG_EVENT_TRACE_TEST_SYSCALLS 1438#ifndef CONFIG_EVENT_TRACE_TEST_SYSCALLS
1402 if (call->system && 1439 if (call->class->system &&
1403 strcmp(call->system, "syscalls") == 0) 1440 strcmp(call->class->system, "syscalls") == 0)
1404 continue; 1441 continue;
1405#endif 1442#endif
1406 1443
@@ -1410,7 +1447,7 @@ static __init void event_trace_self_tests(void)
1410 * If an event is already enabled, someone is using 1447 * If an event is already enabled, someone is using
1411 * it and the self test should not be on. 1448 * it and the self test should not be on.
1412 */ 1449 */
1413 if (call->enabled) { 1450 if (call->flags & TRACE_EVENT_FL_ENABLED) {
1414 pr_warning("Enabled event during self test!\n"); 1451 pr_warning("Enabled event during self test!\n");
1415 WARN_ON_ONCE(1); 1452 WARN_ON_ONCE(1);
1416 continue; 1453 continue;
diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c
index 58092d844a1f..57bb1bb32999 100644
--- a/kernel/trace/trace_events_filter.c
+++ b/kernel/trace/trace_events_filter.c
@@ -500,8 +500,10 @@ static struct ftrace_event_field *
500find_event_field(struct ftrace_event_call *call, char *name) 500find_event_field(struct ftrace_event_call *call, char *name)
501{ 501{
502 struct ftrace_event_field *field; 502 struct ftrace_event_field *field;
503 struct list_head *head;
503 504
504 list_for_each_entry(field, &call->fields, link) { 505 head = trace_get_fields(call);
506 list_for_each_entry(field, head, link) {
505 if (!strcmp(field->name, name)) 507 if (!strcmp(field->name, name))
506 return field; 508 return field;
507 } 509 }
@@ -545,7 +547,7 @@ static void filter_disable_preds(struct ftrace_event_call *call)
545 struct event_filter *filter = call->filter; 547 struct event_filter *filter = call->filter;
546 int i; 548 int i;
547 549
548 call->filter_active = 0; 550 call->flags &= ~TRACE_EVENT_FL_FILTERED;
549 filter->n_preds = 0; 551 filter->n_preds = 0;
550 552
551 for (i = 0; i < MAX_FILTER_PRED; i++) 553 for (i = 0; i < MAX_FILTER_PRED; i++)
@@ -572,7 +574,7 @@ void destroy_preds(struct ftrace_event_call *call)
572{ 574{
573 __free_preds(call->filter); 575 __free_preds(call->filter);
574 call->filter = NULL; 576 call->filter = NULL;
575 call->filter_active = 0; 577 call->flags &= ~TRACE_EVENT_FL_FILTERED;
576} 578}
577 579
578static struct event_filter *__alloc_preds(void) 580static struct event_filter *__alloc_preds(void)
@@ -611,7 +613,7 @@ static int init_preds(struct ftrace_event_call *call)
611 if (call->filter) 613 if (call->filter)
612 return 0; 614 return 0;
613 615
614 call->filter_active = 0; 616 call->flags &= ~TRACE_EVENT_FL_FILTERED;
615 call->filter = __alloc_preds(); 617 call->filter = __alloc_preds();
616 if (IS_ERR(call->filter)) 618 if (IS_ERR(call->filter))
617 return PTR_ERR(call->filter); 619 return PTR_ERR(call->filter);
@@ -625,10 +627,10 @@ static int init_subsystem_preds(struct event_subsystem *system)
625 int err; 627 int err;
626 628
627 list_for_each_entry(call, &ftrace_events, list) { 629 list_for_each_entry(call, &ftrace_events, list) {
628 if (!call->define_fields) 630 if (!call->class || !call->class->define_fields)
629 continue; 631 continue;
630 632
631 if (strcmp(call->system, system->name) != 0) 633 if (strcmp(call->class->system, system->name) != 0)
632 continue; 634 continue;
633 635
634 err = init_preds(call); 636 err = init_preds(call);
@@ -644,10 +646,10 @@ static void filter_free_subsystem_preds(struct event_subsystem *system)
644 struct ftrace_event_call *call; 646 struct ftrace_event_call *call;
645 647
646 list_for_each_entry(call, &ftrace_events, list) { 648 list_for_each_entry(call, &ftrace_events, list) {
647 if (!call->define_fields) 649 if (!call->class || !call->class->define_fields)
648 continue; 650 continue;
649 651
650 if (strcmp(call->system, system->name) != 0) 652 if (strcmp(call->class->system, system->name) != 0)
651 continue; 653 continue;
652 654
653 filter_disable_preds(call); 655 filter_disable_preds(call);
@@ -1249,10 +1251,10 @@ static int replace_system_preds(struct event_subsystem *system,
1249 list_for_each_entry(call, &ftrace_events, list) { 1251 list_for_each_entry(call, &ftrace_events, list) {
1250 struct event_filter *filter = call->filter; 1252 struct event_filter *filter = call->filter;
1251 1253
1252 if (!call->define_fields) 1254 if (!call->class || !call->class->define_fields)
1253 continue; 1255 continue;
1254 1256
1255 if (strcmp(call->system, system->name) != 0) 1257 if (strcmp(call->class->system, system->name) != 0)
1256 continue; 1258 continue;
1257 1259
1258 /* try to see if the filter can be applied */ 1260 /* try to see if the filter can be applied */
@@ -1266,7 +1268,7 @@ static int replace_system_preds(struct event_subsystem *system,
1266 if (err) 1268 if (err)
1267 filter_disable_preds(call); 1269 filter_disable_preds(call);
1268 else { 1270 else {
1269 call->filter_active = 1; 1271 call->flags |= TRACE_EVENT_FL_FILTERED;
1270 replace_filter_string(filter, filter_string); 1272 replace_filter_string(filter, filter_string);
1271 } 1273 }
1272 fail = false; 1274 fail = false;
@@ -1315,7 +1317,7 @@ int apply_event_filter(struct ftrace_event_call *call, char *filter_string)
1315 if (err) 1317 if (err)
1316 append_filter_err(ps, call->filter); 1318 append_filter_err(ps, call->filter);
1317 else 1319 else
1318 call->filter_active = 1; 1320 call->flags |= TRACE_EVENT_FL_FILTERED;
1319out: 1321out:
1320 filter_opstack_clear(ps); 1322 filter_opstack_clear(ps);
1321 postfix_clear(ps); 1323 postfix_clear(ps);
@@ -1393,7 +1395,7 @@ int ftrace_profile_set_filter(struct perf_event *event, int event_id,
1393 mutex_lock(&event_mutex); 1395 mutex_lock(&event_mutex);
1394 1396
1395 list_for_each_entry(call, &ftrace_events, list) { 1397 list_for_each_entry(call, &ftrace_events, list) {
1396 if (call->id == event_id) 1398 if (call->event.type == event_id)
1397 break; 1399 break;
1398 } 1400 }
1399 1401
diff --git a/kernel/trace/trace_export.c b/kernel/trace/trace_export.c
index e091f64ba6ce..8536e2a65969 100644
--- a/kernel/trace/trace_export.c
+++ b/kernel/trace/trace_export.c
@@ -127,7 +127,7 @@ ftrace_define_fields_##name(struct ftrace_event_call *event_call) \
127 127
128static int ftrace_raw_init_event(struct ftrace_event_call *call) 128static int ftrace_raw_init_event(struct ftrace_event_call *call)
129{ 129{
130 INIT_LIST_HEAD(&call->fields); 130 INIT_LIST_HEAD(&call->class->fields);
131 return 0; 131 return 0;
132} 132}
133 133
@@ -153,17 +153,21 @@ static int ftrace_raw_init_event(struct ftrace_event_call *call)
153#define F_printk(fmt, args...) #fmt ", " __stringify(args) 153#define F_printk(fmt, args...) #fmt ", " __stringify(args)
154 154
155#undef FTRACE_ENTRY 155#undef FTRACE_ENTRY
156#define FTRACE_ENTRY(call, struct_name, type, tstruct, print) \ 156#define FTRACE_ENTRY(call, struct_name, etype, tstruct, print) \
157 \
158struct ftrace_event_class event_class_ftrace_##call = { \
159 .system = __stringify(TRACE_SYSTEM), \
160 .define_fields = ftrace_define_fields_##call, \
161 .raw_init = ftrace_raw_init_event, \
162}; \
157 \ 163 \
158struct ftrace_event_call __used \ 164struct ftrace_event_call __used \
159__attribute__((__aligned__(4))) \ 165__attribute__((__aligned__(4))) \
160__attribute__((section("_ftrace_events"))) event_##call = { \ 166__attribute__((section("_ftrace_events"))) event_##call = { \
161 .name = #call, \ 167 .name = #call, \
162 .id = type, \ 168 .event.type = etype, \
163 .system = __stringify(TRACE_SYSTEM), \ 169 .class = &event_class_ftrace_##call, \
164 .raw_init = ftrace_raw_init_event, \
165 .print_fmt = print, \ 170 .print_fmt = print, \
166 .define_fields = ftrace_define_fields_##call, \
167}; \ 171}; \
168 172
169#include "trace_entries.h" 173#include "trace_entries.h"
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c
index dd11c830eb84..79f4bac99a94 100644
--- a/kernel/trace/trace_functions_graph.c
+++ b/kernel/trace/trace_functions_graph.c
@@ -1025,7 +1025,7 @@ print_graph_comment(struct trace_seq *s, struct trace_entry *ent,
1025 if (!event) 1025 if (!event)
1026 return TRACE_TYPE_UNHANDLED; 1026 return TRACE_TYPE_UNHANDLED;
1027 1027
1028 ret = event->trace(iter, sym_flags); 1028 ret = event->funcs->trace(iter, sym_flags, event);
1029 if (ret != TRACE_TYPE_HANDLED) 1029 if (ret != TRACE_TYPE_HANDLED)
1030 return ret; 1030 return ret;
1031 } 1031 }
@@ -1112,7 +1112,8 @@ print_graph_function(struct trace_iterator *iter)
1112} 1112}
1113 1113
1114static enum print_line_t 1114static enum print_line_t
1115print_graph_function_event(struct trace_iterator *iter, int flags) 1115print_graph_function_event(struct trace_iterator *iter, int flags,
1116 struct trace_event *event)
1116{ 1117{
1117 return print_graph_function(iter); 1118 return print_graph_function(iter);
1118} 1119}
@@ -1225,14 +1226,18 @@ void graph_trace_close(struct trace_iterator *iter)
1225 } 1226 }
1226} 1227}
1227 1228
1229static struct trace_event_functions graph_functions = {
1230 .trace = print_graph_function_event,
1231};
1232
1228static struct trace_event graph_trace_entry_event = { 1233static struct trace_event graph_trace_entry_event = {
1229 .type = TRACE_GRAPH_ENT, 1234 .type = TRACE_GRAPH_ENT,
1230 .trace = print_graph_function_event, 1235 .funcs = &graph_functions,
1231}; 1236};
1232 1237
1233static struct trace_event graph_trace_ret_event = { 1238static struct trace_event graph_trace_ret_event = {
1234 .type = TRACE_GRAPH_RET, 1239 .type = TRACE_GRAPH_RET,
1235 .trace = print_graph_function_event, 1240 .funcs = &graph_functions
1236}; 1241};
1237 1242
1238static struct tracer graph_trace __read_mostly = { 1243static struct tracer graph_trace __read_mostly = {
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
index a7514326052b..faf7cefd15da 100644
--- a/kernel/trace/trace_kprobe.c
+++ b/kernel/trace/trace_kprobe.c
@@ -324,8 +324,8 @@ struct trace_probe {
324 unsigned long nhit; 324 unsigned long nhit;
325 unsigned int flags; /* For TP_FLAG_* */ 325 unsigned int flags; /* For TP_FLAG_* */
326 const char *symbol; /* symbol name */ 326 const char *symbol; /* symbol name */
327 struct ftrace_event_class class;
327 struct ftrace_event_call call; 328 struct ftrace_event_call call;
328 struct trace_event event;
329 ssize_t size; /* trace entry size */ 329 ssize_t size; /* trace entry size */
330 unsigned int nr_args; 330 unsigned int nr_args;
331 struct probe_arg args[]; 331 struct probe_arg args[];
@@ -404,6 +404,7 @@ static struct trace_probe *alloc_trace_probe(const char *group,
404 goto error; 404 goto error;
405 } 405 }
406 406
407 tp->call.class = &tp->class;
407 tp->call.name = kstrdup(event, GFP_KERNEL); 408 tp->call.name = kstrdup(event, GFP_KERNEL);
408 if (!tp->call.name) 409 if (!tp->call.name)
409 goto error; 410 goto error;
@@ -413,8 +414,8 @@ static struct trace_probe *alloc_trace_probe(const char *group,
413 goto error; 414 goto error;
414 } 415 }
415 416
416 tp->call.system = kstrdup(group, GFP_KERNEL); 417 tp->class.system = kstrdup(group, GFP_KERNEL);
417 if (!tp->call.system) 418 if (!tp->class.system)
418 goto error; 419 goto error;
419 420
420 INIT_LIST_HEAD(&tp->list); 421 INIT_LIST_HEAD(&tp->list);
@@ -443,7 +444,7 @@ static void free_trace_probe(struct trace_probe *tp)
443 for (i = 0; i < tp->nr_args; i++) 444 for (i = 0; i < tp->nr_args; i++)
444 free_probe_arg(&tp->args[i]); 445 free_probe_arg(&tp->args[i]);
445 446
446 kfree(tp->call.system); 447 kfree(tp->call.class->system);
447 kfree(tp->call.name); 448 kfree(tp->call.name);
448 kfree(tp->symbol); 449 kfree(tp->symbol);
449 kfree(tp); 450 kfree(tp);
@@ -456,7 +457,7 @@ static struct trace_probe *find_probe_event(const char *event,
456 457
457 list_for_each_entry(tp, &probe_list, list) 458 list_for_each_entry(tp, &probe_list, list)
458 if (strcmp(tp->call.name, event) == 0 && 459 if (strcmp(tp->call.name, event) == 0 &&
459 strcmp(tp->call.system, group) == 0) 460 strcmp(tp->call.class->system, group) == 0)
460 return tp; 461 return tp;
461 return NULL; 462 return NULL;
462} 463}
@@ -481,7 +482,7 @@ static int register_trace_probe(struct trace_probe *tp)
481 mutex_lock(&probe_lock); 482 mutex_lock(&probe_lock);
482 483
483 /* register as an event */ 484 /* register as an event */
484 old_tp = find_probe_event(tp->call.name, tp->call.system); 485 old_tp = find_probe_event(tp->call.name, tp->call.class->system);
485 if (old_tp) { 486 if (old_tp) {
486 /* delete old event */ 487 /* delete old event */
487 unregister_trace_probe(old_tp); 488 unregister_trace_probe(old_tp);
@@ -904,7 +905,7 @@ static int probes_seq_show(struct seq_file *m, void *v)
904 int i; 905 int i;
905 906
906 seq_printf(m, "%c", probe_is_return(tp) ? 'r' : 'p'); 907 seq_printf(m, "%c", probe_is_return(tp) ? 'r' : 'p');
907 seq_printf(m, ":%s/%s", tp->call.system, tp->call.name); 908 seq_printf(m, ":%s/%s", tp->call.class->system, tp->call.name);
908 909
909 if (!tp->symbol) 910 if (!tp->symbol)
910 seq_printf(m, " 0x%p", tp->rp.kp.addr); 911 seq_printf(m, " 0x%p", tp->rp.kp.addr);
@@ -1061,8 +1062,8 @@ static __kprobes void kprobe_trace_func(struct kprobe *kp, struct pt_regs *regs)
1061 1062
1062 size = sizeof(*entry) + tp->size; 1063 size = sizeof(*entry) + tp->size;
1063 1064
1064 event = trace_current_buffer_lock_reserve(&buffer, call->id, size, 1065 event = trace_current_buffer_lock_reserve(&buffer, call->event.type,
1065 irq_flags, pc); 1066 size, irq_flags, pc);
1066 if (!event) 1067 if (!event)
1067 return; 1068 return;
1068 1069
@@ -1094,8 +1095,8 @@ static __kprobes void kretprobe_trace_func(struct kretprobe_instance *ri,
1094 1095
1095 size = sizeof(*entry) + tp->size; 1096 size = sizeof(*entry) + tp->size;
1096 1097
1097 event = trace_current_buffer_lock_reserve(&buffer, call->id, size, 1098 event = trace_current_buffer_lock_reserve(&buffer, call->event.type,
1098 irq_flags, pc); 1099 size, irq_flags, pc);
1099 if (!event) 1100 if (!event)
1100 return; 1101 return;
1101 1102
@@ -1112,18 +1113,17 @@ static __kprobes void kretprobe_trace_func(struct kretprobe_instance *ri,
1112 1113
1113/* Event entry printers */ 1114/* Event entry printers */
1114enum print_line_t 1115enum print_line_t
1115print_kprobe_event(struct trace_iterator *iter, int flags) 1116print_kprobe_event(struct trace_iterator *iter, int flags,
1117 struct trace_event *event)
1116{ 1118{
1117 struct kprobe_trace_entry_head *field; 1119 struct kprobe_trace_entry_head *field;
1118 struct trace_seq *s = &iter->seq; 1120 struct trace_seq *s = &iter->seq;
1119 struct trace_event *event;
1120 struct trace_probe *tp; 1121 struct trace_probe *tp;
1121 u8 *data; 1122 u8 *data;
1122 int i; 1123 int i;
1123 1124
1124 field = (struct kprobe_trace_entry_head *)iter->ent; 1125 field = (struct kprobe_trace_entry_head *)iter->ent;
1125 event = ftrace_find_event(field->ent.type); 1126 tp = container_of(event, struct trace_probe, call.event);
1126 tp = container_of(event, struct trace_probe, event);
1127 1127
1128 if (!trace_seq_printf(s, "%s: (", tp->call.name)) 1128 if (!trace_seq_printf(s, "%s: (", tp->call.name))
1129 goto partial; 1129 goto partial;
@@ -1149,18 +1149,17 @@ partial:
1149} 1149}
1150 1150
1151enum print_line_t 1151enum print_line_t
1152print_kretprobe_event(struct trace_iterator *iter, int flags) 1152print_kretprobe_event(struct trace_iterator *iter, int flags,
1153 struct trace_event *event)
1153{ 1154{
1154 struct kretprobe_trace_entry_head *field; 1155 struct kretprobe_trace_entry_head *field;
1155 struct trace_seq *s = &iter->seq; 1156 struct trace_seq *s = &iter->seq;
1156 struct trace_event *event;
1157 struct trace_probe *tp; 1157 struct trace_probe *tp;
1158 u8 *data; 1158 u8 *data;
1159 int i; 1159 int i;
1160 1160
1161 field = (struct kretprobe_trace_entry_head *)iter->ent; 1161 field = (struct kretprobe_trace_entry_head *)iter->ent;
1162 event = ftrace_find_event(field->ent.type); 1162 tp = container_of(event, struct trace_probe, call.event);
1163 tp = container_of(event, struct trace_probe, event);
1164 1163
1165 if (!trace_seq_printf(s, "%s: (", tp->call.name)) 1164 if (!trace_seq_printf(s, "%s: (", tp->call.name))
1166 goto partial; 1165 goto partial;
@@ -1217,8 +1216,6 @@ static void probe_event_disable(struct ftrace_event_call *call)
1217 1216
1218static int probe_event_raw_init(struct ftrace_event_call *event_call) 1217static int probe_event_raw_init(struct ftrace_event_call *event_call)
1219{ 1218{
1220 INIT_LIST_HEAD(&event_call->fields);
1221
1222 return 0; 1219 return 0;
1223} 1220}
1224 1221
@@ -1341,9 +1338,9 @@ static __kprobes void kprobe_perf_func(struct kprobe *kp,
1341 struct trace_probe *tp = container_of(kp, struct trace_probe, rp.kp); 1338 struct trace_probe *tp = container_of(kp, struct trace_probe, rp.kp);
1342 struct ftrace_event_call *call = &tp->call; 1339 struct ftrace_event_call *call = &tp->call;
1343 struct kprobe_trace_entry_head *entry; 1340 struct kprobe_trace_entry_head *entry;
1341 struct hlist_head *head;
1344 u8 *data; 1342 u8 *data;
1345 int size, __size, i; 1343 int size, __size, i;
1346 unsigned long irq_flags;
1347 int rctx; 1344 int rctx;
1348 1345
1349 __size = sizeof(*entry) + tp->size; 1346 __size = sizeof(*entry) + tp->size;
@@ -1353,7 +1350,7 @@ static __kprobes void kprobe_perf_func(struct kprobe *kp,
1353 "profile buffer not large enough")) 1350 "profile buffer not large enough"))
1354 return; 1351 return;
1355 1352
1356 entry = perf_trace_buf_prepare(size, call->id, &rctx, &irq_flags); 1353 entry = perf_trace_buf_prepare(size, call->event.type, regs, &rctx);
1357 if (!entry) 1354 if (!entry)
1358 return; 1355 return;
1359 1356
@@ -1362,7 +1359,8 @@ static __kprobes void kprobe_perf_func(struct kprobe *kp,
1362 for (i = 0; i < tp->nr_args; i++) 1359 for (i = 0; i < tp->nr_args; i++)
1363 call_fetch(&tp->args[i].fetch, regs, data + tp->args[i].offset); 1360 call_fetch(&tp->args[i].fetch, regs, data + tp->args[i].offset);
1364 1361
1365 perf_trace_buf_submit(entry, size, rctx, entry->ip, 1, irq_flags, regs); 1362 head = per_cpu_ptr(call->perf_events, smp_processor_id());
1363 perf_trace_buf_submit(entry, size, rctx, entry->ip, 1, regs, head);
1366} 1364}
1367 1365
1368/* Kretprobe profile handler */ 1366/* Kretprobe profile handler */
@@ -1372,9 +1370,9 @@ static __kprobes void kretprobe_perf_func(struct kretprobe_instance *ri,
1372 struct trace_probe *tp = container_of(ri->rp, struct trace_probe, rp); 1370 struct trace_probe *tp = container_of(ri->rp, struct trace_probe, rp);
1373 struct ftrace_event_call *call = &tp->call; 1371 struct ftrace_event_call *call = &tp->call;
1374 struct kretprobe_trace_entry_head *entry; 1372 struct kretprobe_trace_entry_head *entry;
1373 struct hlist_head *head;
1375 u8 *data; 1374 u8 *data;
1376 int size, __size, i; 1375 int size, __size, i;
1377 unsigned long irq_flags;
1378 int rctx; 1376 int rctx;
1379 1377
1380 __size = sizeof(*entry) + tp->size; 1378 __size = sizeof(*entry) + tp->size;
@@ -1384,7 +1382,7 @@ static __kprobes void kretprobe_perf_func(struct kretprobe_instance *ri,
1384 "profile buffer not large enough")) 1382 "profile buffer not large enough"))
1385 return; 1383 return;
1386 1384
1387 entry = perf_trace_buf_prepare(size, call->id, &rctx, &irq_flags); 1385 entry = perf_trace_buf_prepare(size, call->event.type, regs, &rctx);
1388 if (!entry) 1386 if (!entry)
1389 return; 1387 return;
1390 1388
@@ -1394,8 +1392,8 @@ static __kprobes void kretprobe_perf_func(struct kretprobe_instance *ri,
1394 for (i = 0; i < tp->nr_args; i++) 1392 for (i = 0; i < tp->nr_args; i++)
1395 call_fetch(&tp->args[i].fetch, regs, data + tp->args[i].offset); 1393 call_fetch(&tp->args[i].fetch, regs, data + tp->args[i].offset);
1396 1394
1397 perf_trace_buf_submit(entry, size, rctx, entry->ret_ip, 1, 1395 head = per_cpu_ptr(call->perf_events, smp_processor_id());
1398 irq_flags, regs); 1396 perf_trace_buf_submit(entry, size, rctx, entry->ret_ip, 1, regs, head);
1399} 1397}
1400 1398
1401static int probe_perf_enable(struct ftrace_event_call *call) 1399static int probe_perf_enable(struct ftrace_event_call *call)
@@ -1425,6 +1423,26 @@ static void probe_perf_disable(struct ftrace_event_call *call)
1425} 1423}
1426#endif /* CONFIG_PERF_EVENTS */ 1424#endif /* CONFIG_PERF_EVENTS */
1427 1425
1426static __kprobes
1427int kprobe_register(struct ftrace_event_call *event, enum trace_reg type)
1428{
1429 switch (type) {
1430 case TRACE_REG_REGISTER:
1431 return probe_event_enable(event);
1432 case TRACE_REG_UNREGISTER:
1433 probe_event_disable(event);
1434 return 0;
1435
1436#ifdef CONFIG_PERF_EVENTS
1437 case TRACE_REG_PERF_REGISTER:
1438 return probe_perf_enable(event);
1439 case TRACE_REG_PERF_UNREGISTER:
1440 probe_perf_disable(event);
1441 return 0;
1442#endif
1443 }
1444 return 0;
1445}
1428 1446
1429static __kprobes 1447static __kprobes
1430int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs) 1448int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs)
@@ -1454,6 +1472,14 @@ int kretprobe_dispatcher(struct kretprobe_instance *ri, struct pt_regs *regs)
1454 return 0; /* We don't tweek kernel, so just return 0 */ 1472 return 0; /* We don't tweek kernel, so just return 0 */
1455} 1473}
1456 1474
1475static struct trace_event_functions kretprobe_funcs = {
1476 .trace = print_kretprobe_event
1477};
1478
1479static struct trace_event_functions kprobe_funcs = {
1480 .trace = print_kprobe_event
1481};
1482
1457static int register_probe_event(struct trace_probe *tp) 1483static int register_probe_event(struct trace_probe *tp)
1458{ 1484{
1459 struct ftrace_event_call *call = &tp->call; 1485 struct ftrace_event_call *call = &tp->call;
@@ -1461,36 +1487,31 @@ static int register_probe_event(struct trace_probe *tp)
1461 1487
1462 /* Initialize ftrace_event_call */ 1488 /* Initialize ftrace_event_call */
1463 if (probe_is_return(tp)) { 1489 if (probe_is_return(tp)) {
1464 tp->event.trace = print_kretprobe_event; 1490 INIT_LIST_HEAD(&call->class->fields);
1465 call->raw_init = probe_event_raw_init; 1491 call->event.funcs = &kretprobe_funcs;
1466 call->define_fields = kretprobe_event_define_fields; 1492 call->class->raw_init = probe_event_raw_init;
1493 call->class->define_fields = kretprobe_event_define_fields;
1467 } else { 1494 } else {
1468 tp->event.trace = print_kprobe_event; 1495 INIT_LIST_HEAD(&call->class->fields);
1469 call->raw_init = probe_event_raw_init; 1496 call->event.funcs = &kprobe_funcs;
1470 call->define_fields = kprobe_event_define_fields; 1497 call->class->raw_init = probe_event_raw_init;
1498 call->class->define_fields = kprobe_event_define_fields;
1471 } 1499 }
1472 if (set_print_fmt(tp) < 0) 1500 if (set_print_fmt(tp) < 0)
1473 return -ENOMEM; 1501 return -ENOMEM;
1474 call->event = &tp->event; 1502 ret = register_ftrace_event(&call->event);
1475 call->id = register_ftrace_event(&tp->event); 1503 if (!ret) {
1476 if (!call->id) {
1477 kfree(call->print_fmt); 1504 kfree(call->print_fmt);
1478 return -ENODEV; 1505 return -ENODEV;
1479 } 1506 }
1480 call->enabled = 0; 1507 call->flags = 0;
1481 call->regfunc = probe_event_enable; 1508 call->class->reg = kprobe_register;
1482 call->unregfunc = probe_event_disable;
1483
1484#ifdef CONFIG_PERF_EVENTS
1485 call->perf_event_enable = probe_perf_enable;
1486 call->perf_event_disable = probe_perf_disable;
1487#endif
1488 call->data = tp; 1509 call->data = tp;
1489 ret = trace_add_event_call(call); 1510 ret = trace_add_event_call(call);
1490 if (ret) { 1511 if (ret) {
1491 pr_info("Failed to register kprobe event: %s\n", call->name); 1512 pr_info("Failed to register kprobe event: %s\n", call->name);
1492 kfree(call->print_fmt); 1513 kfree(call->print_fmt);
1493 unregister_ftrace_event(&tp->event); 1514 unregister_ftrace_event(&call->event);
1494 } 1515 }
1495 return ret; 1516 return ret;
1496} 1517}
diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
index ab13d7008061..57c1b4596470 100644
--- a/kernel/trace/trace_output.c
+++ b/kernel/trace/trace_output.c
@@ -742,6 +742,9 @@ int register_ftrace_event(struct trace_event *event)
742 if (WARN_ON(!event)) 742 if (WARN_ON(!event))
743 goto out; 743 goto out;
744 744
745 if (WARN_ON(!event->funcs))
746 goto out;
747
745 INIT_LIST_HEAD(&event->list); 748 INIT_LIST_HEAD(&event->list);
746 749
747 if (!event->type) { 750 if (!event->type) {
@@ -774,14 +777,14 @@ int register_ftrace_event(struct trace_event *event)
774 goto out; 777 goto out;
775 } 778 }
776 779
777 if (event->trace == NULL) 780 if (event->funcs->trace == NULL)
778 event->trace = trace_nop_print; 781 event->funcs->trace = trace_nop_print;
779 if (event->raw == NULL) 782 if (event->funcs->raw == NULL)
780 event->raw = trace_nop_print; 783 event->funcs->raw = trace_nop_print;
781 if (event->hex == NULL) 784 if (event->funcs->hex == NULL)
782 event->hex = trace_nop_print; 785 event->funcs->hex = trace_nop_print;
783 if (event->binary == NULL) 786 if (event->funcs->binary == NULL)
784 event->binary = trace_nop_print; 787 event->funcs->binary = trace_nop_print;
785 788
786 key = event->type & (EVENT_HASHSIZE - 1); 789 key = event->type & (EVENT_HASHSIZE - 1);
787 790
@@ -823,13 +826,15 @@ EXPORT_SYMBOL_GPL(unregister_ftrace_event);
823 * Standard events 826 * Standard events
824 */ 827 */
825 828
826enum print_line_t trace_nop_print(struct trace_iterator *iter, int flags) 829enum print_line_t trace_nop_print(struct trace_iterator *iter, int flags,
830 struct trace_event *event)
827{ 831{
828 return TRACE_TYPE_HANDLED; 832 return TRACE_TYPE_HANDLED;
829} 833}
830 834
831/* TRACE_FN */ 835/* TRACE_FN */
832static enum print_line_t trace_fn_trace(struct trace_iterator *iter, int flags) 836static enum print_line_t trace_fn_trace(struct trace_iterator *iter, int flags,
837 struct trace_event *event)
833{ 838{
834 struct ftrace_entry *field; 839 struct ftrace_entry *field;
835 struct trace_seq *s = &iter->seq; 840 struct trace_seq *s = &iter->seq;
@@ -856,7 +861,8 @@ static enum print_line_t trace_fn_trace(struct trace_iterator *iter, int flags)
856 return TRACE_TYPE_PARTIAL_LINE; 861 return TRACE_TYPE_PARTIAL_LINE;
857} 862}
858 863
859static enum print_line_t trace_fn_raw(struct trace_iterator *iter, int flags) 864static enum print_line_t trace_fn_raw(struct trace_iterator *iter, int flags,
865 struct trace_event *event)
860{ 866{
861 struct ftrace_entry *field; 867 struct ftrace_entry *field;
862 868
@@ -870,7 +876,8 @@ static enum print_line_t trace_fn_raw(struct trace_iterator *iter, int flags)
870 return TRACE_TYPE_HANDLED; 876 return TRACE_TYPE_HANDLED;
871} 877}
872 878
873static enum print_line_t trace_fn_hex(struct trace_iterator *iter, int flags) 879static enum print_line_t trace_fn_hex(struct trace_iterator *iter, int flags,
880 struct trace_event *event)
874{ 881{
875 struct ftrace_entry *field; 882 struct ftrace_entry *field;
876 struct trace_seq *s = &iter->seq; 883 struct trace_seq *s = &iter->seq;
@@ -883,7 +890,8 @@ static enum print_line_t trace_fn_hex(struct trace_iterator *iter, int flags)
883 return TRACE_TYPE_HANDLED; 890 return TRACE_TYPE_HANDLED;
884} 891}
885 892
886static enum print_line_t trace_fn_bin(struct trace_iterator *iter, int flags) 893static enum print_line_t trace_fn_bin(struct trace_iterator *iter, int flags,
894 struct trace_event *event)
887{ 895{
888 struct ftrace_entry *field; 896 struct ftrace_entry *field;
889 struct trace_seq *s = &iter->seq; 897 struct trace_seq *s = &iter->seq;
@@ -896,14 +904,18 @@ static enum print_line_t trace_fn_bin(struct trace_iterator *iter, int flags)
896 return TRACE_TYPE_HANDLED; 904 return TRACE_TYPE_HANDLED;
897} 905}
898 906
899static struct trace_event trace_fn_event = { 907static struct trace_event_functions trace_fn_funcs = {
900 .type = TRACE_FN,
901 .trace = trace_fn_trace, 908 .trace = trace_fn_trace,
902 .raw = trace_fn_raw, 909 .raw = trace_fn_raw,
903 .hex = trace_fn_hex, 910 .hex = trace_fn_hex,
904 .binary = trace_fn_bin, 911 .binary = trace_fn_bin,
905}; 912};
906 913
914static struct trace_event trace_fn_event = {
915 .type = TRACE_FN,
916 .funcs = &trace_fn_funcs,
917};
918
907/* TRACE_CTX an TRACE_WAKE */ 919/* TRACE_CTX an TRACE_WAKE */
908static enum print_line_t trace_ctxwake_print(struct trace_iterator *iter, 920static enum print_line_t trace_ctxwake_print(struct trace_iterator *iter,
909 char *delim) 921 char *delim)
@@ -932,13 +944,14 @@ static enum print_line_t trace_ctxwake_print(struct trace_iterator *iter,
932 return TRACE_TYPE_HANDLED; 944 return TRACE_TYPE_HANDLED;
933} 945}
934 946
935static enum print_line_t trace_ctx_print(struct trace_iterator *iter, int flags) 947static enum print_line_t trace_ctx_print(struct trace_iterator *iter, int flags,
948 struct trace_event *event)
936{ 949{
937 return trace_ctxwake_print(iter, "==>"); 950 return trace_ctxwake_print(iter, "==>");
938} 951}
939 952
940static enum print_line_t trace_wake_print(struct trace_iterator *iter, 953static enum print_line_t trace_wake_print(struct trace_iterator *iter,
941 int flags) 954 int flags, struct trace_event *event)
942{ 955{
943 return trace_ctxwake_print(iter, " +"); 956 return trace_ctxwake_print(iter, " +");
944} 957}
@@ -966,12 +979,14 @@ static int trace_ctxwake_raw(struct trace_iterator *iter, char S)
966 return TRACE_TYPE_HANDLED; 979 return TRACE_TYPE_HANDLED;
967} 980}
968 981
969static enum print_line_t trace_ctx_raw(struct trace_iterator *iter, int flags) 982static enum print_line_t trace_ctx_raw(struct trace_iterator *iter, int flags,
983 struct trace_event *event)
970{ 984{
971 return trace_ctxwake_raw(iter, 0); 985 return trace_ctxwake_raw(iter, 0);
972} 986}
973 987
974static enum print_line_t trace_wake_raw(struct trace_iterator *iter, int flags) 988static enum print_line_t trace_wake_raw(struct trace_iterator *iter, int flags,
989 struct trace_event *event)
975{ 990{
976 return trace_ctxwake_raw(iter, '+'); 991 return trace_ctxwake_raw(iter, '+');
977} 992}
@@ -1000,18 +1015,20 @@ static int trace_ctxwake_hex(struct trace_iterator *iter, char S)
1000 return TRACE_TYPE_HANDLED; 1015 return TRACE_TYPE_HANDLED;
1001} 1016}
1002 1017
1003static enum print_line_t trace_ctx_hex(struct trace_iterator *iter, int flags) 1018static enum print_line_t trace_ctx_hex(struct trace_iterator *iter, int flags,
1019 struct trace_event *event)
1004{ 1020{
1005 return trace_ctxwake_hex(iter, 0); 1021 return trace_ctxwake_hex(iter, 0);
1006} 1022}
1007 1023
1008static enum print_line_t trace_wake_hex(struct trace_iterator *iter, int flags) 1024static enum print_line_t trace_wake_hex(struct trace_iterator *iter, int flags,
1025 struct trace_event *event)
1009{ 1026{
1010 return trace_ctxwake_hex(iter, '+'); 1027 return trace_ctxwake_hex(iter, '+');
1011} 1028}
1012 1029
1013static enum print_line_t trace_ctxwake_bin(struct trace_iterator *iter, 1030static enum print_line_t trace_ctxwake_bin(struct trace_iterator *iter,
1014 int flags) 1031 int flags, struct trace_event *event)
1015{ 1032{
1016 struct ctx_switch_entry *field; 1033 struct ctx_switch_entry *field;
1017 struct trace_seq *s = &iter->seq; 1034 struct trace_seq *s = &iter->seq;
@@ -1028,25 +1045,33 @@ static enum print_line_t trace_ctxwake_bin(struct trace_iterator *iter,
1028 return TRACE_TYPE_HANDLED; 1045 return TRACE_TYPE_HANDLED;
1029} 1046}
1030 1047
1031static struct trace_event trace_ctx_event = { 1048static struct trace_event_functions trace_ctx_funcs = {
1032 .type = TRACE_CTX,
1033 .trace = trace_ctx_print, 1049 .trace = trace_ctx_print,
1034 .raw = trace_ctx_raw, 1050 .raw = trace_ctx_raw,
1035 .hex = trace_ctx_hex, 1051 .hex = trace_ctx_hex,
1036 .binary = trace_ctxwake_bin, 1052 .binary = trace_ctxwake_bin,
1037}; 1053};
1038 1054
1039static struct trace_event trace_wake_event = { 1055static struct trace_event trace_ctx_event = {
1040 .type = TRACE_WAKE, 1056 .type = TRACE_CTX,
1057 .funcs = &trace_ctx_funcs,
1058};
1059
1060static struct trace_event_functions trace_wake_funcs = {
1041 .trace = trace_wake_print, 1061 .trace = trace_wake_print,
1042 .raw = trace_wake_raw, 1062 .raw = trace_wake_raw,
1043 .hex = trace_wake_hex, 1063 .hex = trace_wake_hex,
1044 .binary = trace_ctxwake_bin, 1064 .binary = trace_ctxwake_bin,
1045}; 1065};
1046 1066
1067static struct trace_event trace_wake_event = {
1068 .type = TRACE_WAKE,
1069 .funcs = &trace_wake_funcs,
1070};
1071
1047/* TRACE_SPECIAL */ 1072/* TRACE_SPECIAL */
1048static enum print_line_t trace_special_print(struct trace_iterator *iter, 1073static enum print_line_t trace_special_print(struct trace_iterator *iter,
1049 int flags) 1074 int flags, struct trace_event *event)
1050{ 1075{
1051 struct special_entry *field; 1076 struct special_entry *field;
1052 1077
@@ -1062,7 +1087,7 @@ static enum print_line_t trace_special_print(struct trace_iterator *iter,
1062} 1087}
1063 1088
1064static enum print_line_t trace_special_hex(struct trace_iterator *iter, 1089static enum print_line_t trace_special_hex(struct trace_iterator *iter,
1065 int flags) 1090 int flags, struct trace_event *event)
1066{ 1091{
1067 struct special_entry *field; 1092 struct special_entry *field;
1068 struct trace_seq *s = &iter->seq; 1093 struct trace_seq *s = &iter->seq;
@@ -1077,7 +1102,7 @@ static enum print_line_t trace_special_hex(struct trace_iterator *iter,
1077} 1102}
1078 1103
1079static enum print_line_t trace_special_bin(struct trace_iterator *iter, 1104static enum print_line_t trace_special_bin(struct trace_iterator *iter,
1080 int flags) 1105 int flags, struct trace_event *event)
1081{ 1106{
1082 struct special_entry *field; 1107 struct special_entry *field;
1083 struct trace_seq *s = &iter->seq; 1108 struct trace_seq *s = &iter->seq;
@@ -1091,18 +1116,22 @@ static enum print_line_t trace_special_bin(struct trace_iterator *iter,
1091 return TRACE_TYPE_HANDLED; 1116 return TRACE_TYPE_HANDLED;
1092} 1117}
1093 1118
1094static struct trace_event trace_special_event = { 1119static struct trace_event_functions trace_special_funcs = {
1095 .type = TRACE_SPECIAL,
1096 .trace = trace_special_print, 1120 .trace = trace_special_print,
1097 .raw = trace_special_print, 1121 .raw = trace_special_print,
1098 .hex = trace_special_hex, 1122 .hex = trace_special_hex,
1099 .binary = trace_special_bin, 1123 .binary = trace_special_bin,
1100}; 1124};
1101 1125
1126static struct trace_event trace_special_event = {
1127 .type = TRACE_SPECIAL,
1128 .funcs = &trace_special_funcs,
1129};
1130
1102/* TRACE_STACK */ 1131/* TRACE_STACK */
1103 1132
1104static enum print_line_t trace_stack_print(struct trace_iterator *iter, 1133static enum print_line_t trace_stack_print(struct trace_iterator *iter,
1105 int flags) 1134 int flags, struct trace_event *event)
1106{ 1135{
1107 struct stack_entry *field; 1136 struct stack_entry *field;
1108 struct trace_seq *s = &iter->seq; 1137 struct trace_seq *s = &iter->seq;
@@ -1130,17 +1159,21 @@ static enum print_line_t trace_stack_print(struct trace_iterator *iter,
1130 return TRACE_TYPE_PARTIAL_LINE; 1159 return TRACE_TYPE_PARTIAL_LINE;
1131} 1160}
1132 1161
1133static struct trace_event trace_stack_event = { 1162static struct trace_event_functions trace_stack_funcs = {
1134 .type = TRACE_STACK,
1135 .trace = trace_stack_print, 1163 .trace = trace_stack_print,
1136 .raw = trace_special_print, 1164 .raw = trace_special_print,
1137 .hex = trace_special_hex, 1165 .hex = trace_special_hex,
1138 .binary = trace_special_bin, 1166 .binary = trace_special_bin,
1139}; 1167};
1140 1168
1169static struct trace_event trace_stack_event = {
1170 .type = TRACE_STACK,
1171 .funcs = &trace_stack_funcs,
1172};
1173
1141/* TRACE_USER_STACK */ 1174/* TRACE_USER_STACK */
1142static enum print_line_t trace_user_stack_print(struct trace_iterator *iter, 1175static enum print_line_t trace_user_stack_print(struct trace_iterator *iter,
1143 int flags) 1176 int flags, struct trace_event *event)
1144{ 1177{
1145 struct userstack_entry *field; 1178 struct userstack_entry *field;
1146 struct trace_seq *s = &iter->seq; 1179 struct trace_seq *s = &iter->seq;
@@ -1159,17 +1192,22 @@ static enum print_line_t trace_user_stack_print(struct trace_iterator *iter,
1159 return TRACE_TYPE_PARTIAL_LINE; 1192 return TRACE_TYPE_PARTIAL_LINE;
1160} 1193}
1161 1194
1162static struct trace_event trace_user_stack_event = { 1195static struct trace_event_functions trace_user_stack_funcs = {
1163 .type = TRACE_USER_STACK,
1164 .trace = trace_user_stack_print, 1196 .trace = trace_user_stack_print,
1165 .raw = trace_special_print, 1197 .raw = trace_special_print,
1166 .hex = trace_special_hex, 1198 .hex = trace_special_hex,
1167 .binary = trace_special_bin, 1199 .binary = trace_special_bin,
1168}; 1200};
1169 1201
1202static struct trace_event trace_user_stack_event = {
1203 .type = TRACE_USER_STACK,
1204 .funcs = &trace_user_stack_funcs,
1205};
1206
1170/* TRACE_BPRINT */ 1207/* TRACE_BPRINT */
1171static enum print_line_t 1208static enum print_line_t
1172trace_bprint_print(struct trace_iterator *iter, int flags) 1209trace_bprint_print(struct trace_iterator *iter, int flags,
1210 struct trace_event *event)
1173{ 1211{
1174 struct trace_entry *entry = iter->ent; 1212 struct trace_entry *entry = iter->ent;
1175 struct trace_seq *s = &iter->seq; 1213 struct trace_seq *s = &iter->seq;
@@ -1194,7 +1232,8 @@ trace_bprint_print(struct trace_iterator *iter, int flags)
1194 1232
1195 1233
1196static enum print_line_t 1234static enum print_line_t
1197trace_bprint_raw(struct trace_iterator *iter, int flags) 1235trace_bprint_raw(struct trace_iterator *iter, int flags,
1236 struct trace_event *event)
1198{ 1237{
1199 struct bprint_entry *field; 1238 struct bprint_entry *field;
1200 struct trace_seq *s = &iter->seq; 1239 struct trace_seq *s = &iter->seq;
@@ -1213,16 +1252,19 @@ trace_bprint_raw(struct trace_iterator *iter, int flags)
1213 return TRACE_TYPE_PARTIAL_LINE; 1252 return TRACE_TYPE_PARTIAL_LINE;
1214} 1253}
1215 1254
1255static struct trace_event_functions trace_bprint_funcs = {
1256 .trace = trace_bprint_print,
1257 .raw = trace_bprint_raw,
1258};
1216 1259
1217static struct trace_event trace_bprint_event = { 1260static struct trace_event trace_bprint_event = {
1218 .type = TRACE_BPRINT, 1261 .type = TRACE_BPRINT,
1219 .trace = trace_bprint_print, 1262 .funcs = &trace_bprint_funcs,
1220 .raw = trace_bprint_raw,
1221}; 1263};
1222 1264
1223/* TRACE_PRINT */ 1265/* TRACE_PRINT */
1224static enum print_line_t trace_print_print(struct trace_iterator *iter, 1266static enum print_line_t trace_print_print(struct trace_iterator *iter,
1225 int flags) 1267 int flags, struct trace_event *event)
1226{ 1268{
1227 struct print_entry *field; 1269 struct print_entry *field;
1228 struct trace_seq *s = &iter->seq; 1270 struct trace_seq *s = &iter->seq;
@@ -1241,7 +1283,8 @@ static enum print_line_t trace_print_print(struct trace_iterator *iter,
1241 return TRACE_TYPE_PARTIAL_LINE; 1283 return TRACE_TYPE_PARTIAL_LINE;
1242} 1284}
1243 1285
1244static enum print_line_t trace_print_raw(struct trace_iterator *iter, int flags) 1286static enum print_line_t trace_print_raw(struct trace_iterator *iter, int flags,
1287 struct trace_event *event)
1245{ 1288{
1246 struct print_entry *field; 1289 struct print_entry *field;
1247 1290
@@ -1256,12 +1299,16 @@ static enum print_line_t trace_print_raw(struct trace_iterator *iter, int flags)
1256 return TRACE_TYPE_PARTIAL_LINE; 1299 return TRACE_TYPE_PARTIAL_LINE;
1257} 1300}
1258 1301
1259static struct trace_event trace_print_event = { 1302static struct trace_event_functions trace_print_funcs = {
1260 .type = TRACE_PRINT,
1261 .trace = trace_print_print, 1303 .trace = trace_print_print,
1262 .raw = trace_print_raw, 1304 .raw = trace_print_raw,
1263}; 1305};
1264 1306
1307static struct trace_event trace_print_event = {
1308 .type = TRACE_PRINT,
1309 .funcs = &trace_print_funcs,
1310};
1311
1265 1312
1266static struct trace_event *events[] __initdata = { 1313static struct trace_event *events[] __initdata = {
1267 &trace_fn_event, 1314 &trace_fn_event,
diff --git a/kernel/trace/trace_output.h b/kernel/trace/trace_output.h
index 9d91c72ba38b..c038eba0492b 100644
--- a/kernel/trace/trace_output.h
+++ b/kernel/trace/trace_output.h
@@ -25,7 +25,7 @@ extern void trace_event_read_unlock(void);
25extern struct trace_event *ftrace_find_event(int type); 25extern struct trace_event *ftrace_find_event(int type);
26 26
27extern enum print_line_t trace_nop_print(struct trace_iterator *iter, 27extern enum print_line_t trace_nop_print(struct trace_iterator *iter,
28 int flags); 28 int flags, struct trace_event *event);
29extern int 29extern int
30trace_print_lat_fmt(struct trace_seq *s, struct trace_entry *entry); 30trace_print_lat_fmt(struct trace_seq *s, struct trace_entry *entry);
31 31
diff --git a/kernel/trace/trace_sched_switch.c b/kernel/trace/trace_sched_switch.c
index a55fccfede5d..8f758d070c43 100644
--- a/kernel/trace/trace_sched_switch.c
+++ b/kernel/trace/trace_sched_switch.c
@@ -50,7 +50,7 @@ tracing_sched_switch_trace(struct trace_array *tr,
50} 50}
51 51
52static void 52static void
53probe_sched_switch(struct task_struct *prev, struct task_struct *next) 53probe_sched_switch(void *ignore, struct task_struct *prev, struct task_struct *next)
54{ 54{
55 struct trace_array_cpu *data; 55 struct trace_array_cpu *data;
56 unsigned long flags; 56 unsigned long flags;
@@ -108,7 +108,7 @@ tracing_sched_wakeup_trace(struct trace_array *tr,
108} 108}
109 109
110static void 110static void
111probe_sched_wakeup(struct task_struct *wakee, int success) 111probe_sched_wakeup(void *ignore, struct task_struct *wakee, int success)
112{ 112{
113 struct trace_array_cpu *data; 113 struct trace_array_cpu *data;
114 unsigned long flags; 114 unsigned long flags;
@@ -138,21 +138,21 @@ static int tracing_sched_register(void)
138{ 138{
139 int ret; 139 int ret;
140 140
141 ret = register_trace_sched_wakeup(probe_sched_wakeup); 141 ret = register_trace_sched_wakeup(probe_sched_wakeup, NULL);
142 if (ret) { 142 if (ret) {
143 pr_info("wakeup trace: Couldn't activate tracepoint" 143 pr_info("wakeup trace: Couldn't activate tracepoint"
144 " probe to kernel_sched_wakeup\n"); 144 " probe to kernel_sched_wakeup\n");
145 return ret; 145 return ret;
146 } 146 }
147 147
148 ret = register_trace_sched_wakeup_new(probe_sched_wakeup); 148 ret = register_trace_sched_wakeup_new(probe_sched_wakeup, NULL);
149 if (ret) { 149 if (ret) {
150 pr_info("wakeup trace: Couldn't activate tracepoint" 150 pr_info("wakeup trace: Couldn't activate tracepoint"
151 " probe to kernel_sched_wakeup_new\n"); 151 " probe to kernel_sched_wakeup_new\n");
152 goto fail_deprobe; 152 goto fail_deprobe;
153 } 153 }
154 154
155 ret = register_trace_sched_switch(probe_sched_switch); 155 ret = register_trace_sched_switch(probe_sched_switch, NULL);
156 if (ret) { 156 if (ret) {
157 pr_info("sched trace: Couldn't activate tracepoint" 157 pr_info("sched trace: Couldn't activate tracepoint"
158 " probe to kernel_sched_switch\n"); 158 " probe to kernel_sched_switch\n");
@@ -161,17 +161,17 @@ static int tracing_sched_register(void)
161 161
162 return ret; 162 return ret;
163fail_deprobe_wake_new: 163fail_deprobe_wake_new:
164 unregister_trace_sched_wakeup_new(probe_sched_wakeup); 164 unregister_trace_sched_wakeup_new(probe_sched_wakeup, NULL);
165fail_deprobe: 165fail_deprobe:
166 unregister_trace_sched_wakeup(probe_sched_wakeup); 166 unregister_trace_sched_wakeup(probe_sched_wakeup, NULL);
167 return ret; 167 return ret;
168} 168}
169 169
170static void tracing_sched_unregister(void) 170static void tracing_sched_unregister(void)
171{ 171{
172 unregister_trace_sched_switch(probe_sched_switch); 172 unregister_trace_sched_switch(probe_sched_switch, NULL);
173 unregister_trace_sched_wakeup_new(probe_sched_wakeup); 173 unregister_trace_sched_wakeup_new(probe_sched_wakeup, NULL);
174 unregister_trace_sched_wakeup(probe_sched_wakeup); 174 unregister_trace_sched_wakeup(probe_sched_wakeup, NULL);
175} 175}
176 176
177static void tracing_start_sched_switch(void) 177static void tracing_start_sched_switch(void)
diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c
index 8052446ceeaa..0e73bc2ef8c5 100644
--- a/kernel/trace/trace_sched_wakeup.c
+++ b/kernel/trace/trace_sched_wakeup.c
@@ -98,7 +98,8 @@ static int report_latency(cycle_t delta)
98 return 1; 98 return 1;
99} 99}
100 100
101static void probe_wakeup_migrate_task(struct task_struct *task, int cpu) 101static void
102probe_wakeup_migrate_task(void *ignore, struct task_struct *task, int cpu)
102{ 103{
103 if (task != wakeup_task) 104 if (task != wakeup_task)
104 return; 105 return;
@@ -107,7 +108,8 @@ static void probe_wakeup_migrate_task(struct task_struct *task, int cpu)
107} 108}
108 109
109static void notrace 110static void notrace
110probe_wakeup_sched_switch(struct task_struct *prev, struct task_struct *next) 111probe_wakeup_sched_switch(void *ignore,
112 struct task_struct *prev, struct task_struct *next)
111{ 113{
112 struct trace_array_cpu *data; 114 struct trace_array_cpu *data;
113 cycle_t T0, T1, delta; 115 cycle_t T0, T1, delta;
@@ -199,7 +201,7 @@ static void wakeup_reset(struct trace_array *tr)
199} 201}
200 202
201static void 203static void
202probe_wakeup(struct task_struct *p, int success) 204probe_wakeup(void *ignore, struct task_struct *p, int success)
203{ 205{
204 struct trace_array_cpu *data; 206 struct trace_array_cpu *data;
205 int cpu = smp_processor_id(); 207 int cpu = smp_processor_id();
@@ -263,28 +265,28 @@ static void start_wakeup_tracer(struct trace_array *tr)
263{ 265{
264 int ret; 266 int ret;
265 267
266 ret = register_trace_sched_wakeup(probe_wakeup); 268 ret = register_trace_sched_wakeup(probe_wakeup, NULL);
267 if (ret) { 269 if (ret) {
268 pr_info("wakeup trace: Couldn't activate tracepoint" 270 pr_info("wakeup trace: Couldn't activate tracepoint"
269 " probe to kernel_sched_wakeup\n"); 271 " probe to kernel_sched_wakeup\n");
270 return; 272 return;
271 } 273 }
272 274
273 ret = register_trace_sched_wakeup_new(probe_wakeup); 275 ret = register_trace_sched_wakeup_new(probe_wakeup, NULL);
274 if (ret) { 276 if (ret) {
275 pr_info("wakeup trace: Couldn't activate tracepoint" 277 pr_info("wakeup trace: Couldn't activate tracepoint"
276 " probe to kernel_sched_wakeup_new\n"); 278 " probe to kernel_sched_wakeup_new\n");
277 goto fail_deprobe; 279 goto fail_deprobe;
278 } 280 }
279 281
280 ret = register_trace_sched_switch(probe_wakeup_sched_switch); 282 ret = register_trace_sched_switch(probe_wakeup_sched_switch, NULL);
281 if (ret) { 283 if (ret) {
282 pr_info("sched trace: Couldn't activate tracepoint" 284 pr_info("sched trace: Couldn't activate tracepoint"
283 " probe to kernel_sched_switch\n"); 285 " probe to kernel_sched_switch\n");
284 goto fail_deprobe_wake_new; 286 goto fail_deprobe_wake_new;
285 } 287 }
286 288
287 ret = register_trace_sched_migrate_task(probe_wakeup_migrate_task); 289 ret = register_trace_sched_migrate_task(probe_wakeup_migrate_task, NULL);
288 if (ret) { 290 if (ret) {
289 pr_info("wakeup trace: Couldn't activate tracepoint" 291 pr_info("wakeup trace: Couldn't activate tracepoint"
290 " probe to kernel_sched_migrate_task\n"); 292 " probe to kernel_sched_migrate_task\n");
@@ -311,19 +313,19 @@ static void start_wakeup_tracer(struct trace_array *tr)
311 313
312 return; 314 return;
313fail_deprobe_wake_new: 315fail_deprobe_wake_new:
314 unregister_trace_sched_wakeup_new(probe_wakeup); 316 unregister_trace_sched_wakeup_new(probe_wakeup, NULL);
315fail_deprobe: 317fail_deprobe:
316 unregister_trace_sched_wakeup(probe_wakeup); 318 unregister_trace_sched_wakeup(probe_wakeup, NULL);
317} 319}
318 320
319static void stop_wakeup_tracer(struct trace_array *tr) 321static void stop_wakeup_tracer(struct trace_array *tr)
320{ 322{
321 tracer_enabled = 0; 323 tracer_enabled = 0;
322 unregister_ftrace_function(&trace_ops); 324 unregister_ftrace_function(&trace_ops);
323 unregister_trace_sched_switch(probe_wakeup_sched_switch); 325 unregister_trace_sched_switch(probe_wakeup_sched_switch, NULL);
324 unregister_trace_sched_wakeup_new(probe_wakeup); 326 unregister_trace_sched_wakeup_new(probe_wakeup, NULL);
325 unregister_trace_sched_wakeup(probe_wakeup); 327 unregister_trace_sched_wakeup(probe_wakeup, NULL);
326 unregister_trace_sched_migrate_task(probe_wakeup_migrate_task); 328 unregister_trace_sched_migrate_task(probe_wakeup_migrate_task, NULL);
327} 329}
328 330
329static int __wakeup_tracer_init(struct trace_array *tr) 331static int __wakeup_tracer_init(struct trace_array *tr)
diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c
index 4d6d711717f2..d2c859cec9ea 100644
--- a/kernel/trace/trace_syscalls.c
+++ b/kernel/trace/trace_syscalls.c
@@ -15,6 +15,54 @@ static int sys_refcount_exit;
15static DECLARE_BITMAP(enabled_enter_syscalls, NR_syscalls); 15static DECLARE_BITMAP(enabled_enter_syscalls, NR_syscalls);
16static DECLARE_BITMAP(enabled_exit_syscalls, NR_syscalls); 16static DECLARE_BITMAP(enabled_exit_syscalls, NR_syscalls);
17 17
18static int syscall_enter_register(struct ftrace_event_call *event,
19 enum trace_reg type);
20static int syscall_exit_register(struct ftrace_event_call *event,
21 enum trace_reg type);
22
23static int syscall_enter_define_fields(struct ftrace_event_call *call);
24static int syscall_exit_define_fields(struct ftrace_event_call *call);
25
26static struct list_head *
27syscall_get_enter_fields(struct ftrace_event_call *call)
28{
29 struct syscall_metadata *entry = call->data;
30
31 return &entry->enter_fields;
32}
33
34static struct list_head *
35syscall_get_exit_fields(struct ftrace_event_call *call)
36{
37 struct syscall_metadata *entry = call->data;
38
39 return &entry->exit_fields;
40}
41
42struct trace_event_functions enter_syscall_print_funcs = {
43 .trace = print_syscall_enter,
44};
45
46struct trace_event_functions exit_syscall_print_funcs = {
47 .trace = print_syscall_exit,
48};
49
50struct ftrace_event_class event_class_syscall_enter = {
51 .system = "syscalls",
52 .reg = syscall_enter_register,
53 .define_fields = syscall_enter_define_fields,
54 .get_fields = syscall_get_enter_fields,
55 .raw_init = init_syscall_trace,
56};
57
58struct ftrace_event_class event_class_syscall_exit = {
59 .system = "syscalls",
60 .reg = syscall_exit_register,
61 .define_fields = syscall_exit_define_fields,
62 .get_fields = syscall_get_exit_fields,
63 .raw_init = init_syscall_trace,
64};
65
18extern unsigned long __start_syscalls_metadata[]; 66extern unsigned long __start_syscalls_metadata[];
19extern unsigned long __stop_syscalls_metadata[]; 67extern unsigned long __stop_syscalls_metadata[];
20 68
@@ -53,7 +101,8 @@ static struct syscall_metadata *syscall_nr_to_meta(int nr)
53} 101}
54 102
55enum print_line_t 103enum print_line_t
56print_syscall_enter(struct trace_iterator *iter, int flags) 104print_syscall_enter(struct trace_iterator *iter, int flags,
105 struct trace_event *event)
57{ 106{
58 struct trace_seq *s = &iter->seq; 107 struct trace_seq *s = &iter->seq;
59 struct trace_entry *ent = iter->ent; 108 struct trace_entry *ent = iter->ent;
@@ -68,7 +117,7 @@ print_syscall_enter(struct trace_iterator *iter, int flags)
68 if (!entry) 117 if (!entry)
69 goto end; 118 goto end;
70 119
71 if (entry->enter_event->id != ent->type) { 120 if (entry->enter_event->event.type != ent->type) {
72 WARN_ON_ONCE(1); 121 WARN_ON_ONCE(1);
73 goto end; 122 goto end;
74 } 123 }
@@ -105,7 +154,8 @@ end:
105} 154}
106 155
107enum print_line_t 156enum print_line_t
108print_syscall_exit(struct trace_iterator *iter, int flags) 157print_syscall_exit(struct trace_iterator *iter, int flags,
158 struct trace_event *event)
109{ 159{
110 struct trace_seq *s = &iter->seq; 160 struct trace_seq *s = &iter->seq;
111 struct trace_entry *ent = iter->ent; 161 struct trace_entry *ent = iter->ent;
@@ -123,7 +173,7 @@ print_syscall_exit(struct trace_iterator *iter, int flags)
123 return TRACE_TYPE_HANDLED; 173 return TRACE_TYPE_HANDLED;
124 } 174 }
125 175
126 if (entry->exit_event->id != ent->type) { 176 if (entry->exit_event->event.type != ent->type) {
127 WARN_ON_ONCE(1); 177 WARN_ON_ONCE(1);
128 return TRACE_TYPE_UNHANDLED; 178 return TRACE_TYPE_UNHANDLED;
129 } 179 }
@@ -205,7 +255,7 @@ static void free_syscall_print_fmt(struct ftrace_event_call *call)
205 kfree(call->print_fmt); 255 kfree(call->print_fmt);
206} 256}
207 257
208int syscall_enter_define_fields(struct ftrace_event_call *call) 258static int syscall_enter_define_fields(struct ftrace_event_call *call)
209{ 259{
210 struct syscall_trace_enter trace; 260 struct syscall_trace_enter trace;
211 struct syscall_metadata *meta = call->data; 261 struct syscall_metadata *meta = call->data;
@@ -228,7 +278,7 @@ int syscall_enter_define_fields(struct ftrace_event_call *call)
228 return ret; 278 return ret;
229} 279}
230 280
231int syscall_exit_define_fields(struct ftrace_event_call *call) 281static int syscall_exit_define_fields(struct ftrace_event_call *call)
232{ 282{
233 struct syscall_trace_exit trace; 283 struct syscall_trace_exit trace;
234 int ret; 284 int ret;
@@ -243,7 +293,7 @@ int syscall_exit_define_fields(struct ftrace_event_call *call)
243 return ret; 293 return ret;
244} 294}
245 295
246void ftrace_syscall_enter(struct pt_regs *regs, long id) 296void ftrace_syscall_enter(void *ignore, struct pt_regs *regs, long id)
247{ 297{
248 struct syscall_trace_enter *entry; 298 struct syscall_trace_enter *entry;
249 struct syscall_metadata *sys_data; 299 struct syscall_metadata *sys_data;
@@ -265,7 +315,7 @@ void ftrace_syscall_enter(struct pt_regs *regs, long id)
265 size = sizeof(*entry) + sizeof(unsigned long) * sys_data->nb_args; 315 size = sizeof(*entry) + sizeof(unsigned long) * sys_data->nb_args;
266 316
267 event = trace_current_buffer_lock_reserve(&buffer, 317 event = trace_current_buffer_lock_reserve(&buffer,
268 sys_data->enter_event->id, size, 0, 0); 318 sys_data->enter_event->event.type, size, 0, 0);
269 if (!event) 319 if (!event)
270 return; 320 return;
271 321
@@ -278,7 +328,7 @@ void ftrace_syscall_enter(struct pt_regs *regs, long id)
278 trace_current_buffer_unlock_commit(buffer, event, 0, 0); 328 trace_current_buffer_unlock_commit(buffer, event, 0, 0);
279} 329}
280 330
281void ftrace_syscall_exit(struct pt_regs *regs, long ret) 331void ftrace_syscall_exit(void *ignore, struct pt_regs *regs, long ret)
282{ 332{
283 struct syscall_trace_exit *entry; 333 struct syscall_trace_exit *entry;
284 struct syscall_metadata *sys_data; 334 struct syscall_metadata *sys_data;
@@ -297,7 +347,7 @@ void ftrace_syscall_exit(struct pt_regs *regs, long ret)
297 return; 347 return;
298 348
299 event = trace_current_buffer_lock_reserve(&buffer, 349 event = trace_current_buffer_lock_reserve(&buffer,
300 sys_data->exit_event->id, sizeof(*entry), 0, 0); 350 sys_data->exit_event->event.type, sizeof(*entry), 0, 0);
301 if (!event) 351 if (!event)
302 return; 352 return;
303 353
@@ -320,7 +370,7 @@ int reg_event_syscall_enter(struct ftrace_event_call *call)
320 return -ENOSYS; 370 return -ENOSYS;
321 mutex_lock(&syscall_trace_lock); 371 mutex_lock(&syscall_trace_lock);
322 if (!sys_refcount_enter) 372 if (!sys_refcount_enter)
323 ret = register_trace_sys_enter(ftrace_syscall_enter); 373 ret = register_trace_sys_enter(ftrace_syscall_enter, NULL);
324 if (!ret) { 374 if (!ret) {
325 set_bit(num, enabled_enter_syscalls); 375 set_bit(num, enabled_enter_syscalls);
326 sys_refcount_enter++; 376 sys_refcount_enter++;
@@ -340,7 +390,7 @@ void unreg_event_syscall_enter(struct ftrace_event_call *call)
340 sys_refcount_enter--; 390 sys_refcount_enter--;
341 clear_bit(num, enabled_enter_syscalls); 391 clear_bit(num, enabled_enter_syscalls);
342 if (!sys_refcount_enter) 392 if (!sys_refcount_enter)
343 unregister_trace_sys_enter(ftrace_syscall_enter); 393 unregister_trace_sys_enter(ftrace_syscall_enter, NULL);
344 mutex_unlock(&syscall_trace_lock); 394 mutex_unlock(&syscall_trace_lock);
345} 395}
346 396
@@ -354,7 +404,7 @@ int reg_event_syscall_exit(struct ftrace_event_call *call)
354 return -ENOSYS; 404 return -ENOSYS;
355 mutex_lock(&syscall_trace_lock); 405 mutex_lock(&syscall_trace_lock);
356 if (!sys_refcount_exit) 406 if (!sys_refcount_exit)
357 ret = register_trace_sys_exit(ftrace_syscall_exit); 407 ret = register_trace_sys_exit(ftrace_syscall_exit, NULL);
358 if (!ret) { 408 if (!ret) {
359 set_bit(num, enabled_exit_syscalls); 409 set_bit(num, enabled_exit_syscalls);
360 sys_refcount_exit++; 410 sys_refcount_exit++;
@@ -374,7 +424,7 @@ void unreg_event_syscall_exit(struct ftrace_event_call *call)
374 sys_refcount_exit--; 424 sys_refcount_exit--;
375 clear_bit(num, enabled_exit_syscalls); 425 clear_bit(num, enabled_exit_syscalls);
376 if (!sys_refcount_exit) 426 if (!sys_refcount_exit)
377 unregister_trace_sys_exit(ftrace_syscall_exit); 427 unregister_trace_sys_exit(ftrace_syscall_exit, NULL);
378 mutex_unlock(&syscall_trace_lock); 428 mutex_unlock(&syscall_trace_lock);
379} 429}
380 430
@@ -434,11 +484,11 @@ static DECLARE_BITMAP(enabled_perf_exit_syscalls, NR_syscalls);
434static int sys_perf_refcount_enter; 484static int sys_perf_refcount_enter;
435static int sys_perf_refcount_exit; 485static int sys_perf_refcount_exit;
436 486
437static void perf_syscall_enter(struct pt_regs *regs, long id) 487static void perf_syscall_enter(void *ignore, struct pt_regs *regs, long id)
438{ 488{
439 struct syscall_metadata *sys_data; 489 struct syscall_metadata *sys_data;
440 struct syscall_trace_enter *rec; 490 struct syscall_trace_enter *rec;
441 unsigned long flags; 491 struct hlist_head *head;
442 int syscall_nr; 492 int syscall_nr;
443 int rctx; 493 int rctx;
444 int size; 494 int size;
@@ -461,14 +511,16 @@ static void perf_syscall_enter(struct pt_regs *regs, long id)
461 return; 511 return;
462 512
463 rec = (struct syscall_trace_enter *)perf_trace_buf_prepare(size, 513 rec = (struct syscall_trace_enter *)perf_trace_buf_prepare(size,
464 sys_data->enter_event->id, &rctx, &flags); 514 sys_data->enter_event->event.type, regs, &rctx);
465 if (!rec) 515 if (!rec)
466 return; 516 return;
467 517
468 rec->nr = syscall_nr; 518 rec->nr = syscall_nr;
469 syscall_get_arguments(current, regs, 0, sys_data->nb_args, 519 syscall_get_arguments(current, regs, 0, sys_data->nb_args,
470 (unsigned long *)&rec->args); 520 (unsigned long *)&rec->args);
471 perf_trace_buf_submit(rec, size, rctx, 0, 1, flags, regs); 521
522 head = per_cpu_ptr(sys_data->enter_event->perf_events, smp_processor_id());
523 perf_trace_buf_submit(rec, size, rctx, 0, 1, regs, head);
472} 524}
473 525
474int perf_sysenter_enable(struct ftrace_event_call *call) 526int perf_sysenter_enable(struct ftrace_event_call *call)
@@ -480,7 +532,7 @@ int perf_sysenter_enable(struct ftrace_event_call *call)
480 532
481 mutex_lock(&syscall_trace_lock); 533 mutex_lock(&syscall_trace_lock);
482 if (!sys_perf_refcount_enter) 534 if (!sys_perf_refcount_enter)
483 ret = register_trace_sys_enter(perf_syscall_enter); 535 ret = register_trace_sys_enter(perf_syscall_enter, NULL);
484 if (ret) { 536 if (ret) {
485 pr_info("event trace: Could not activate" 537 pr_info("event trace: Could not activate"
486 "syscall entry trace point"); 538 "syscall entry trace point");
@@ -502,15 +554,15 @@ void perf_sysenter_disable(struct ftrace_event_call *call)
502 sys_perf_refcount_enter--; 554 sys_perf_refcount_enter--;
503 clear_bit(num, enabled_perf_enter_syscalls); 555 clear_bit(num, enabled_perf_enter_syscalls);
504 if (!sys_perf_refcount_enter) 556 if (!sys_perf_refcount_enter)
505 unregister_trace_sys_enter(perf_syscall_enter); 557 unregister_trace_sys_enter(perf_syscall_enter, NULL);
506 mutex_unlock(&syscall_trace_lock); 558 mutex_unlock(&syscall_trace_lock);
507} 559}
508 560
509static void perf_syscall_exit(struct pt_regs *regs, long ret) 561static void perf_syscall_exit(void *ignore, struct pt_regs *regs, long ret)
510{ 562{
511 struct syscall_metadata *sys_data; 563 struct syscall_metadata *sys_data;
512 struct syscall_trace_exit *rec; 564 struct syscall_trace_exit *rec;
513 unsigned long flags; 565 struct hlist_head *head;
514 int syscall_nr; 566 int syscall_nr;
515 int rctx; 567 int rctx;
516 int size; 568 int size;
@@ -536,14 +588,15 @@ static void perf_syscall_exit(struct pt_regs *regs, long ret)
536 return; 588 return;
537 589
538 rec = (struct syscall_trace_exit *)perf_trace_buf_prepare(size, 590 rec = (struct syscall_trace_exit *)perf_trace_buf_prepare(size,
539 sys_data->exit_event->id, &rctx, &flags); 591 sys_data->exit_event->event.type, regs, &rctx);
540 if (!rec) 592 if (!rec)
541 return; 593 return;
542 594
543 rec->nr = syscall_nr; 595 rec->nr = syscall_nr;
544 rec->ret = syscall_get_return_value(current, regs); 596 rec->ret = syscall_get_return_value(current, regs);
545 597
546 perf_trace_buf_submit(rec, size, rctx, 0, 1, flags, regs); 598 head = per_cpu_ptr(sys_data->exit_event->perf_events, smp_processor_id());
599 perf_trace_buf_submit(rec, size, rctx, 0, 1, regs, head);
547} 600}
548 601
549int perf_sysexit_enable(struct ftrace_event_call *call) 602int perf_sysexit_enable(struct ftrace_event_call *call)
@@ -555,7 +608,7 @@ int perf_sysexit_enable(struct ftrace_event_call *call)
555 608
556 mutex_lock(&syscall_trace_lock); 609 mutex_lock(&syscall_trace_lock);
557 if (!sys_perf_refcount_exit) 610 if (!sys_perf_refcount_exit)
558 ret = register_trace_sys_exit(perf_syscall_exit); 611 ret = register_trace_sys_exit(perf_syscall_exit, NULL);
559 if (ret) { 612 if (ret) {
560 pr_info("event trace: Could not activate" 613 pr_info("event trace: Could not activate"
561 "syscall exit trace point"); 614 "syscall exit trace point");
@@ -577,9 +630,50 @@ void perf_sysexit_disable(struct ftrace_event_call *call)
577 sys_perf_refcount_exit--; 630 sys_perf_refcount_exit--;
578 clear_bit(num, enabled_perf_exit_syscalls); 631 clear_bit(num, enabled_perf_exit_syscalls);
579 if (!sys_perf_refcount_exit) 632 if (!sys_perf_refcount_exit)
580 unregister_trace_sys_exit(perf_syscall_exit); 633 unregister_trace_sys_exit(perf_syscall_exit, NULL);
581 mutex_unlock(&syscall_trace_lock); 634 mutex_unlock(&syscall_trace_lock);
582} 635}
583 636
584#endif /* CONFIG_PERF_EVENTS */ 637#endif /* CONFIG_PERF_EVENTS */
585 638
639static int syscall_enter_register(struct ftrace_event_call *event,
640 enum trace_reg type)
641{
642 switch (type) {
643 case TRACE_REG_REGISTER:
644 return reg_event_syscall_enter(event);
645 case TRACE_REG_UNREGISTER:
646 unreg_event_syscall_enter(event);
647 return 0;
648
649#ifdef CONFIG_PERF_EVENTS
650 case TRACE_REG_PERF_REGISTER:
651 return perf_sysenter_enable(event);
652 case TRACE_REG_PERF_UNREGISTER:
653 perf_sysenter_disable(event);
654 return 0;
655#endif
656 }
657 return 0;
658}
659
660static int syscall_exit_register(struct ftrace_event_call *event,
661 enum trace_reg type)
662{
663 switch (type) {
664 case TRACE_REG_REGISTER:
665 return reg_event_syscall_exit(event);
666 case TRACE_REG_UNREGISTER:
667 unreg_event_syscall_exit(event);
668 return 0;
669
670#ifdef CONFIG_PERF_EVENTS
671 case TRACE_REG_PERF_REGISTER:
672 return perf_sysexit_enable(event);
673 case TRACE_REG_PERF_UNREGISTER:
674 perf_sysexit_disable(event);
675 return 0;
676#endif
677 }
678 return 0;
679}
diff --git a/kernel/trace/trace_workqueue.c b/kernel/trace/trace_workqueue.c
index cc2d2faa7d9e..a7cc3793baf6 100644
--- a/kernel/trace/trace_workqueue.c
+++ b/kernel/trace/trace_workqueue.c
@@ -49,7 +49,8 @@ static void cpu_workqueue_stat_free(struct kref *kref)
49 49
50/* Insertion of a work */ 50/* Insertion of a work */
51static void 51static void
52probe_workqueue_insertion(struct task_struct *wq_thread, 52probe_workqueue_insertion(void *ignore,
53 struct task_struct *wq_thread,
53 struct work_struct *work) 54 struct work_struct *work)
54{ 55{
55 int cpu = cpumask_first(&wq_thread->cpus_allowed); 56 int cpu = cpumask_first(&wq_thread->cpus_allowed);
@@ -70,7 +71,8 @@ found:
70 71
71/* Execution of a work */ 72/* Execution of a work */
72static void 73static void
73probe_workqueue_execution(struct task_struct *wq_thread, 74probe_workqueue_execution(void *ignore,
75 struct task_struct *wq_thread,
74 struct work_struct *work) 76 struct work_struct *work)
75{ 77{
76 int cpu = cpumask_first(&wq_thread->cpus_allowed); 78 int cpu = cpumask_first(&wq_thread->cpus_allowed);
@@ -90,7 +92,8 @@ found:
90} 92}
91 93
92/* Creation of a cpu workqueue thread */ 94/* Creation of a cpu workqueue thread */
93static void probe_workqueue_creation(struct task_struct *wq_thread, int cpu) 95static void probe_workqueue_creation(void *ignore,
96 struct task_struct *wq_thread, int cpu)
94{ 97{
95 struct cpu_workqueue_stats *cws; 98 struct cpu_workqueue_stats *cws;
96 unsigned long flags; 99 unsigned long flags;
@@ -114,7 +117,8 @@ static void probe_workqueue_creation(struct task_struct *wq_thread, int cpu)
114} 117}
115 118
116/* Destruction of a cpu workqueue thread */ 119/* Destruction of a cpu workqueue thread */
117static void probe_workqueue_destruction(struct task_struct *wq_thread) 120static void
121probe_workqueue_destruction(void *ignore, struct task_struct *wq_thread)
118{ 122{
119 /* Workqueue only execute on one cpu */ 123 /* Workqueue only execute on one cpu */
120 int cpu = cpumask_first(&wq_thread->cpus_allowed); 124 int cpu = cpumask_first(&wq_thread->cpus_allowed);
@@ -259,19 +263,19 @@ int __init trace_workqueue_early_init(void)
259{ 263{
260 int ret, cpu; 264 int ret, cpu;
261 265
262 ret = register_trace_workqueue_insertion(probe_workqueue_insertion); 266 ret = register_trace_workqueue_insertion(probe_workqueue_insertion, NULL);
263 if (ret) 267 if (ret)
264 goto out; 268 goto out;
265 269
266 ret = register_trace_workqueue_execution(probe_workqueue_execution); 270 ret = register_trace_workqueue_execution(probe_workqueue_execution, NULL);
267 if (ret) 271 if (ret)
268 goto no_insertion; 272 goto no_insertion;
269 273
270 ret = register_trace_workqueue_creation(probe_workqueue_creation); 274 ret = register_trace_workqueue_creation(probe_workqueue_creation, NULL);
271 if (ret) 275 if (ret)
272 goto no_execution; 276 goto no_execution;
273 277
274 ret = register_trace_workqueue_destruction(probe_workqueue_destruction); 278 ret = register_trace_workqueue_destruction(probe_workqueue_destruction, NULL);
275 if (ret) 279 if (ret)
276 goto no_creation; 280 goto no_creation;
277 281
@@ -283,11 +287,11 @@ int __init trace_workqueue_early_init(void)
283 return 0; 287 return 0;
284 288
285no_creation: 289no_creation:
286 unregister_trace_workqueue_creation(probe_workqueue_creation); 290 unregister_trace_workqueue_creation(probe_workqueue_creation, NULL);
287no_execution: 291no_execution:
288 unregister_trace_workqueue_execution(probe_workqueue_execution); 292 unregister_trace_workqueue_execution(probe_workqueue_execution, NULL);
289no_insertion: 293no_insertion:
290 unregister_trace_workqueue_insertion(probe_workqueue_insertion); 294 unregister_trace_workqueue_insertion(probe_workqueue_insertion, NULL);
291out: 295out:
292 pr_warning("trace_workqueue: unable to trace workqueues\n"); 296 pr_warning("trace_workqueue: unable to trace workqueues\n");
293 297
diff --git a/kernel/tracepoint.c b/kernel/tracepoint.c
index cc89be5bc0f8..c77f3eceea25 100644
--- a/kernel/tracepoint.c
+++ b/kernel/tracepoint.c
@@ -54,7 +54,7 @@ static struct hlist_head tracepoint_table[TRACEPOINT_TABLE_SIZE];
54 */ 54 */
55struct tracepoint_entry { 55struct tracepoint_entry {
56 struct hlist_node hlist; 56 struct hlist_node hlist;
57 void **funcs; 57 struct tracepoint_func *funcs;
58 int refcount; /* Number of times armed. 0 if disarmed. */ 58 int refcount; /* Number of times armed. 0 if disarmed. */
59 char name[0]; 59 char name[0];
60}; 60};
@@ -64,12 +64,12 @@ struct tp_probes {
64 struct rcu_head rcu; 64 struct rcu_head rcu;
65 struct list_head list; 65 struct list_head list;
66 } u; 66 } u;
67 void *probes[0]; 67 struct tracepoint_func probes[0];
68}; 68};
69 69
70static inline void *allocate_probes(int count) 70static inline void *allocate_probes(int count)
71{ 71{
72 struct tp_probes *p = kmalloc(count * sizeof(void *) 72 struct tp_probes *p = kmalloc(count * sizeof(struct tracepoint_func)
73 + sizeof(struct tp_probes), GFP_KERNEL); 73 + sizeof(struct tp_probes), GFP_KERNEL);
74 return p == NULL ? NULL : p->probes; 74 return p == NULL ? NULL : p->probes;
75} 75}
@@ -79,7 +79,7 @@ static void rcu_free_old_probes(struct rcu_head *head)
79 kfree(container_of(head, struct tp_probes, u.rcu)); 79 kfree(container_of(head, struct tp_probes, u.rcu));
80} 80}
81 81
82static inline void release_probes(void *old) 82static inline void release_probes(struct tracepoint_func *old)
83{ 83{
84 if (old) { 84 if (old) {
85 struct tp_probes *tp_probes = container_of(old, 85 struct tp_probes *tp_probes = container_of(old,
@@ -95,15 +95,16 @@ static void debug_print_probes(struct tracepoint_entry *entry)
95 if (!tracepoint_debug || !entry->funcs) 95 if (!tracepoint_debug || !entry->funcs)
96 return; 96 return;
97 97
98 for (i = 0; entry->funcs[i]; i++) 98 for (i = 0; entry->funcs[i].func; i++)
99 printk(KERN_DEBUG "Probe %d : %p\n", i, entry->funcs[i]); 99 printk(KERN_DEBUG "Probe %d : %p\n", i, entry->funcs[i].func);
100} 100}
101 101
102static void * 102static struct tracepoint_func *
103tracepoint_entry_add_probe(struct tracepoint_entry *entry, void *probe) 103tracepoint_entry_add_probe(struct tracepoint_entry *entry,
104 void *probe, void *data)
104{ 105{
105 int nr_probes = 0; 106 int nr_probes = 0;
106 void **old, **new; 107 struct tracepoint_func *old, *new;
107 108
108 WARN_ON(!probe); 109 WARN_ON(!probe);
109 110
@@ -111,8 +112,9 @@ tracepoint_entry_add_probe(struct tracepoint_entry *entry, void *probe)
111 old = entry->funcs; 112 old = entry->funcs;
112 if (old) { 113 if (old) {
113 /* (N -> N+1), (N != 0, 1) probes */ 114 /* (N -> N+1), (N != 0, 1) probes */
114 for (nr_probes = 0; old[nr_probes]; nr_probes++) 115 for (nr_probes = 0; old[nr_probes].func; nr_probes++)
115 if (old[nr_probes] == probe) 116 if (old[nr_probes].func == probe &&
117 old[nr_probes].data == data)
116 return ERR_PTR(-EEXIST); 118 return ERR_PTR(-EEXIST);
117 } 119 }
118 /* + 2 : one for new probe, one for NULL func */ 120 /* + 2 : one for new probe, one for NULL func */
@@ -120,9 +122,10 @@ tracepoint_entry_add_probe(struct tracepoint_entry *entry, void *probe)
120 if (new == NULL) 122 if (new == NULL)
121 return ERR_PTR(-ENOMEM); 123 return ERR_PTR(-ENOMEM);
122 if (old) 124 if (old)
123 memcpy(new, old, nr_probes * sizeof(void *)); 125 memcpy(new, old, nr_probes * sizeof(struct tracepoint_func));
124 new[nr_probes] = probe; 126 new[nr_probes].func = probe;
125 new[nr_probes + 1] = NULL; 127 new[nr_probes].data = data;
128 new[nr_probes + 1].func = NULL;
126 entry->refcount = nr_probes + 1; 129 entry->refcount = nr_probes + 1;
127 entry->funcs = new; 130 entry->funcs = new;
128 debug_print_probes(entry); 131 debug_print_probes(entry);
@@ -130,10 +133,11 @@ tracepoint_entry_add_probe(struct tracepoint_entry *entry, void *probe)
130} 133}
131 134
132static void * 135static void *
133tracepoint_entry_remove_probe(struct tracepoint_entry *entry, void *probe) 136tracepoint_entry_remove_probe(struct tracepoint_entry *entry,
137 void *probe, void *data)
134{ 138{
135 int nr_probes = 0, nr_del = 0, i; 139 int nr_probes = 0, nr_del = 0, i;
136 void **old, **new; 140 struct tracepoint_func *old, *new;
137 141
138 old = entry->funcs; 142 old = entry->funcs;
139 143
@@ -142,8 +146,10 @@ tracepoint_entry_remove_probe(struct tracepoint_entry *entry, void *probe)
142 146
143 debug_print_probes(entry); 147 debug_print_probes(entry);
144 /* (N -> M), (N > 1, M >= 0) probes */ 148 /* (N -> M), (N > 1, M >= 0) probes */
145 for (nr_probes = 0; old[nr_probes]; nr_probes++) { 149 for (nr_probes = 0; old[nr_probes].func; nr_probes++) {
146 if ((!probe || old[nr_probes] == probe)) 150 if (!probe ||
151 (old[nr_probes].func == probe &&
152 old[nr_probes].data == data))
147 nr_del++; 153 nr_del++;
148 } 154 }
149 155
@@ -160,10 +166,11 @@ tracepoint_entry_remove_probe(struct tracepoint_entry *entry, void *probe)
160 new = allocate_probes(nr_probes - nr_del + 1); 166 new = allocate_probes(nr_probes - nr_del + 1);
161 if (new == NULL) 167 if (new == NULL)
162 return ERR_PTR(-ENOMEM); 168 return ERR_PTR(-ENOMEM);
163 for (i = 0; old[i]; i++) 169 for (i = 0; old[i].func; i++)
164 if ((probe && old[i] != probe)) 170 if (probe &&
171 (old[i].func != probe || old[i].data != data))
165 new[j++] = old[i]; 172 new[j++] = old[i];
166 new[nr_probes - nr_del] = NULL; 173 new[nr_probes - nr_del].func = NULL;
167 entry->refcount = nr_probes - nr_del; 174 entry->refcount = nr_probes - nr_del;
168 entry->funcs = new; 175 entry->funcs = new;
169 } 176 }
@@ -315,18 +322,19 @@ static void tracepoint_update_probes(void)
315 module_update_tracepoints(); 322 module_update_tracepoints();
316} 323}
317 324
318static void *tracepoint_add_probe(const char *name, void *probe) 325static struct tracepoint_func *
326tracepoint_add_probe(const char *name, void *probe, void *data)
319{ 327{
320 struct tracepoint_entry *entry; 328 struct tracepoint_entry *entry;
321 void *old; 329 struct tracepoint_func *old;
322 330
323 entry = get_tracepoint(name); 331 entry = get_tracepoint(name);
324 if (!entry) { 332 if (!entry) {
325 entry = add_tracepoint(name); 333 entry = add_tracepoint(name);
326 if (IS_ERR(entry)) 334 if (IS_ERR(entry))
327 return entry; 335 return (struct tracepoint_func *)entry;
328 } 336 }
329 old = tracepoint_entry_add_probe(entry, probe); 337 old = tracepoint_entry_add_probe(entry, probe, data);
330 if (IS_ERR(old) && !entry->refcount) 338 if (IS_ERR(old) && !entry->refcount)
331 remove_tracepoint(entry); 339 remove_tracepoint(entry);
332 return old; 340 return old;
@@ -340,12 +348,12 @@ static void *tracepoint_add_probe(const char *name, void *probe)
340 * Returns 0 if ok, error value on error. 348 * Returns 0 if ok, error value on error.
341 * The probe address must at least be aligned on the architecture pointer size. 349 * The probe address must at least be aligned on the architecture pointer size.
342 */ 350 */
343int tracepoint_probe_register(const char *name, void *probe) 351int tracepoint_probe_register(const char *name, void *probe, void *data)
344{ 352{
345 void *old; 353 struct tracepoint_func *old;
346 354
347 mutex_lock(&tracepoints_mutex); 355 mutex_lock(&tracepoints_mutex);
348 old = tracepoint_add_probe(name, probe); 356 old = tracepoint_add_probe(name, probe, data);
349 mutex_unlock(&tracepoints_mutex); 357 mutex_unlock(&tracepoints_mutex);
350 if (IS_ERR(old)) 358 if (IS_ERR(old))
351 return PTR_ERR(old); 359 return PTR_ERR(old);
@@ -356,15 +364,16 @@ int tracepoint_probe_register(const char *name, void *probe)
356} 364}
357EXPORT_SYMBOL_GPL(tracepoint_probe_register); 365EXPORT_SYMBOL_GPL(tracepoint_probe_register);
358 366
359static void *tracepoint_remove_probe(const char *name, void *probe) 367static struct tracepoint_func *
368tracepoint_remove_probe(const char *name, void *probe, void *data)
360{ 369{
361 struct tracepoint_entry *entry; 370 struct tracepoint_entry *entry;
362 void *old; 371 struct tracepoint_func *old;
363 372
364 entry = get_tracepoint(name); 373 entry = get_tracepoint(name);
365 if (!entry) 374 if (!entry)
366 return ERR_PTR(-ENOENT); 375 return ERR_PTR(-ENOENT);
367 old = tracepoint_entry_remove_probe(entry, probe); 376 old = tracepoint_entry_remove_probe(entry, probe, data);
368 if (IS_ERR(old)) 377 if (IS_ERR(old))
369 return old; 378 return old;
370 if (!entry->refcount) 379 if (!entry->refcount)
@@ -382,12 +391,12 @@ static void *tracepoint_remove_probe(const char *name, void *probe)
382 * itself uses stop_machine(), which insures that every preempt disabled section 391 * itself uses stop_machine(), which insures that every preempt disabled section
383 * have finished. 392 * have finished.
384 */ 393 */
385int tracepoint_probe_unregister(const char *name, void *probe) 394int tracepoint_probe_unregister(const char *name, void *probe, void *data)
386{ 395{
387 void *old; 396 struct tracepoint_func *old;
388 397
389 mutex_lock(&tracepoints_mutex); 398 mutex_lock(&tracepoints_mutex);
390 old = tracepoint_remove_probe(name, probe); 399 old = tracepoint_remove_probe(name, probe, data);
391 mutex_unlock(&tracepoints_mutex); 400 mutex_unlock(&tracepoints_mutex);
392 if (IS_ERR(old)) 401 if (IS_ERR(old))
393 return PTR_ERR(old); 402 return PTR_ERR(old);
@@ -418,12 +427,13 @@ static void tracepoint_add_old_probes(void *old)
418 * 427 *
419 * caller must call tracepoint_probe_update_all() 428 * caller must call tracepoint_probe_update_all()
420 */ 429 */
421int tracepoint_probe_register_noupdate(const char *name, void *probe) 430int tracepoint_probe_register_noupdate(const char *name, void *probe,
431 void *data)
422{ 432{
423 void *old; 433 struct tracepoint_func *old;
424 434
425 mutex_lock(&tracepoints_mutex); 435 mutex_lock(&tracepoints_mutex);
426 old = tracepoint_add_probe(name, probe); 436 old = tracepoint_add_probe(name, probe, data);
427 if (IS_ERR(old)) { 437 if (IS_ERR(old)) {
428 mutex_unlock(&tracepoints_mutex); 438 mutex_unlock(&tracepoints_mutex);
429 return PTR_ERR(old); 439 return PTR_ERR(old);
@@ -441,12 +451,13 @@ EXPORT_SYMBOL_GPL(tracepoint_probe_register_noupdate);
441 * 451 *
442 * caller must call tracepoint_probe_update_all() 452 * caller must call tracepoint_probe_update_all()
443 */ 453 */
444int tracepoint_probe_unregister_noupdate(const char *name, void *probe) 454int tracepoint_probe_unregister_noupdate(const char *name, void *probe,
455 void *data)
445{ 456{
446 void *old; 457 struct tracepoint_func *old;
447 458
448 mutex_lock(&tracepoints_mutex); 459 mutex_lock(&tracepoints_mutex);
449 old = tracepoint_remove_probe(name, probe); 460 old = tracepoint_remove_probe(name, probe, data);
450 if (IS_ERR(old)) { 461 if (IS_ERR(old)) {
451 mutex_unlock(&tracepoints_mutex); 462 mutex_unlock(&tracepoints_mutex);
452 return PTR_ERR(old); 463 return PTR_ERR(old);
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 77dabbf64b8f..327d2deb4451 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -1110,7 +1110,7 @@ static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
1110 unsigned int cpu = (unsigned long)hcpu; 1110 unsigned int cpu = (unsigned long)hcpu;
1111 struct cpu_workqueue_struct *cwq; 1111 struct cpu_workqueue_struct *cwq;
1112 struct workqueue_struct *wq; 1112 struct workqueue_struct *wq;
1113 int ret = NOTIFY_OK; 1113 int err = 0;
1114 1114
1115 action &= ~CPU_TASKS_FROZEN; 1115 action &= ~CPU_TASKS_FROZEN;
1116 1116
@@ -1124,12 +1124,13 @@ undo:
1124 1124
1125 switch (action) { 1125 switch (action) {
1126 case CPU_UP_PREPARE: 1126 case CPU_UP_PREPARE:
1127 if (!create_workqueue_thread(cwq, cpu)) 1127 err = create_workqueue_thread(cwq, cpu);
1128 if (!err)
1128 break; 1129 break;
1129 printk(KERN_ERR "workqueue [%s] for %i failed\n", 1130 printk(KERN_ERR "workqueue [%s] for %i failed\n",
1130 wq->name, cpu); 1131 wq->name, cpu);
1131 action = CPU_UP_CANCELED; 1132 action = CPU_UP_CANCELED;
1132 ret = NOTIFY_BAD; 1133 err = -ENOMEM;
1133 goto undo; 1134 goto undo;
1134 1135
1135 case CPU_ONLINE: 1136 case CPU_ONLINE:
@@ -1150,7 +1151,7 @@ undo:
1150 cpumask_clear_cpu(cpu, cpu_populated_map); 1151 cpumask_clear_cpu(cpu, cpu_populated_map);
1151 } 1152 }
1152 1153
1153 return ret; 1154 return notifier_from_errno(err);
1154} 1155}
1155 1156
1156#ifdef CONFIG_SMP 1157#ifdef CONFIG_SMP
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 231208948363..e722e9d62221 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -898,6 +898,18 @@ config LKDTM
898 Documentation on how to use the module can be found in 898 Documentation on how to use the module can be found in
899 Documentation/fault-injection/provoke-crashes.txt 899 Documentation/fault-injection/provoke-crashes.txt
900 900
901config CPU_NOTIFIER_ERROR_INJECT
902 tristate "CPU notifier error injection module"
903 depends on HOTPLUG_CPU && DEBUG_KERNEL
904 help
905 This option provides a kernel module that can be used to test
906 the error handling of the cpu notifiers
907
908 To compile this code as a module, choose M here: the module will
909 be called cpu-notifier-error-inject.
910
911 If unsure, say N.
912
901config FAULT_INJECTION 913config FAULT_INJECTION
902 bool "Fault-injection framework" 914 bool "Fault-injection framework"
903 depends on DEBUG_KERNEL 915 depends on DEBUG_KERNEL
diff --git a/lib/Makefile b/lib/Makefile
index 9e6d3c29d73a..3f1062cbbff4 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -21,7 +21,7 @@ lib-y += kobject.o kref.o klist.o
21 21
22obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \ 22obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \
23 bust_spinlocks.o hexdump.o kasprintf.o bitmap.o scatterlist.o \ 23 bust_spinlocks.o hexdump.o kasprintf.o bitmap.o scatterlist.o \
24 string_helpers.o gcd.o lcm.o list_sort.o 24 string_helpers.o gcd.o lcm.o list_sort.o uuid.o
25 25
26ifeq ($(CONFIG_DEBUG_KOBJECT),y) 26ifeq ($(CONFIG_DEBUG_KOBJECT),y)
27CFLAGS_kobject.o += -DDEBUG 27CFLAGS_kobject.o += -DDEBUG
@@ -85,6 +85,7 @@ obj-$(CONFIG_AUDIT_GENERIC) += audit.o
85obj-$(CONFIG_SWIOTLB) += swiotlb.o 85obj-$(CONFIG_SWIOTLB) += swiotlb.o
86obj-$(CONFIG_IOMMU_HELPER) += iommu-helper.o 86obj-$(CONFIG_IOMMU_HELPER) += iommu-helper.o
87obj-$(CONFIG_FAULT_INJECTION) += fault-inject.o 87obj-$(CONFIG_FAULT_INJECTION) += fault-inject.o
88obj-$(CONFIG_CPU_NOTIFIER_ERROR_INJECT) += cpu-notifier-error-inject.o
88 89
89lib-$(CONFIG_GENERIC_BUG) += bug.o 90lib-$(CONFIG_GENERIC_BUG) += bug.o
90 91
diff --git a/lib/atomic64_test.c b/lib/atomic64_test.c
index 65e482caf5e9..9087d71537dd 100644
--- a/lib/atomic64_test.c
+++ b/lib/atomic64_test.c
@@ -9,6 +9,7 @@
9 * (at your option) any later version. 9 * (at your option) any later version.
10 */ 10 */
11#include <linux/init.h> 11#include <linux/init.h>
12#include <linux/kernel.h>
12#include <asm/atomic.h> 13#include <asm/atomic.h>
13 14
14#define INIT(c) do { atomic64_set(&v, c); r = c; } while (0) 15#define INIT(c) do { atomic64_set(&v, c); r = c; } while (0)
diff --git a/lib/cpu-notifier-error-inject.c b/lib/cpu-notifier-error-inject.c
new file mode 100644
index 000000000000..4dc20321b0d5
--- /dev/null
+++ b/lib/cpu-notifier-error-inject.c
@@ -0,0 +1,63 @@
1#include <linux/kernel.h>
2#include <linux/cpu.h>
3#include <linux/module.h>
4#include <linux/notifier.h>
5
6static int priority;
7static int cpu_up_prepare_error;
8static int cpu_down_prepare_error;
9
10module_param(priority, int, 0);
11MODULE_PARM_DESC(priority, "specify cpu notifier priority");
12
13module_param(cpu_up_prepare_error, int, 0644);
14MODULE_PARM_DESC(cpu_up_prepare_error,
15 "specify error code to inject CPU_UP_PREPARE action");
16
17module_param(cpu_down_prepare_error, int, 0644);
18MODULE_PARM_DESC(cpu_down_prepare_error,
19 "specify error code to inject CPU_DOWN_PREPARE action");
20
21static int err_inject_cpu_callback(struct notifier_block *nfb,
22 unsigned long action, void *hcpu)
23{
24 int err = 0;
25
26 switch (action) {
27 case CPU_UP_PREPARE:
28 case CPU_UP_PREPARE_FROZEN:
29 err = cpu_up_prepare_error;
30 break;
31 case CPU_DOWN_PREPARE:
32 case CPU_DOWN_PREPARE_FROZEN:
33 err = cpu_down_prepare_error;
34 break;
35 }
36 if (err)
37 printk(KERN_INFO "Injecting error (%d) at cpu notifier\n", err);
38
39 return notifier_from_errno(err);
40}
41
42static struct notifier_block err_inject_cpu_notifier = {
43 .notifier_call = err_inject_cpu_callback,
44};
45
46static int err_inject_init(void)
47{
48 err_inject_cpu_notifier.priority = priority;
49
50 return register_hotcpu_notifier(&err_inject_cpu_notifier);
51}
52
53static void err_inject_exit(void)
54{
55 unregister_hotcpu_notifier(&err_inject_cpu_notifier);
56}
57
58module_init(err_inject_init);
59module_exit(err_inject_exit);
60
61MODULE_DESCRIPTION("CPU notifier error injection module");
62MODULE_LICENSE("GPL");
63MODULE_AUTHOR("Akinobu Mita <akinobu.mita@gmail.com>");
diff --git a/lib/crc32.c b/lib/crc32.c
index 3087ed899ee3..4855995fcde9 100644
--- a/lib/crc32.c
+++ b/lib/crc32.c
@@ -50,7 +50,7 @@ MODULE_LICENSE("GPL");
50static inline u32 50static inline u32
51crc32_body(u32 crc, unsigned char const *buf, size_t len, const u32 (*tab)[256]) 51crc32_body(u32 crc, unsigned char const *buf, size_t len, const u32 (*tab)[256])
52{ 52{
53# if __BYTE_ORDER == __LITTLE_ENDIAN 53# ifdef __LITTLE_ENDIAN
54# define DO_CRC(x) crc = tab[0][(crc ^ (x)) & 255] ^ (crc >> 8) 54# define DO_CRC(x) crc = tab[0][(crc ^ (x)) & 255] ^ (crc >> 8)
55# define DO_CRC4 crc = tab[3][(crc) & 255] ^ \ 55# define DO_CRC4 crc = tab[3][(crc) & 255] ^ \
56 tab[2][(crc >> 8) & 255] ^ \ 56 tab[2][(crc >> 8) & 255] ^ \
diff --git a/lib/idr.c b/lib/idr.c
index 422a9d5069cc..c1a206901761 100644
--- a/lib/idr.c
+++ b/lib/idr.c
@@ -445,6 +445,7 @@ EXPORT_SYMBOL(idr_remove);
445void idr_remove_all(struct idr *idp) 445void idr_remove_all(struct idr *idp)
446{ 446{
447 int n, id, max; 447 int n, id, max;
448 int bt_mask;
448 struct idr_layer *p; 449 struct idr_layer *p;
449 struct idr_layer *pa[MAX_LEVEL]; 450 struct idr_layer *pa[MAX_LEVEL];
450 struct idr_layer **paa = &pa[0]; 451 struct idr_layer **paa = &pa[0];
@@ -462,8 +463,10 @@ void idr_remove_all(struct idr *idp)
462 p = p->ary[(id >> n) & IDR_MASK]; 463 p = p->ary[(id >> n) & IDR_MASK];
463 } 464 }
464 465
466 bt_mask = id;
465 id += 1 << n; 467 id += 1 << n;
466 while (n < fls(id)) { 468 /* Get the highest bit that the above add changed from 0->1. */
469 while (n < fls(id ^ bt_mask)) {
467 if (p) 470 if (p)
468 free_layer(p); 471 free_layer(p);
469 n += IDR_BITS; 472 n += IDR_BITS;
diff --git a/lib/radix-tree.c b/lib/radix-tree.c
index 2a087e0f9863..05da38bcc298 100644
--- a/lib/radix-tree.c
+++ b/lib/radix-tree.c
@@ -656,7 +656,7 @@ EXPORT_SYMBOL(radix_tree_next_hole);
656 * 656 *
657 * Returns: the index of the hole if found, otherwise returns an index 657 * Returns: the index of the hole if found, otherwise returns an index
658 * outside of the set specified (in which case 'index - return >= max_scan' 658 * outside of the set specified (in which case 'index - return >= max_scan'
659 * will be true). In rare cases of wrap-around, LONG_MAX will be returned. 659 * will be true). In rare cases of wrap-around, ULONG_MAX will be returned.
660 * 660 *
661 * radix_tree_next_hole may be called under rcu_read_lock. However, like 661 * radix_tree_next_hole may be called under rcu_read_lock. However, like
662 * radix_tree_gang_lookup, this will not atomically search a snapshot of 662 * radix_tree_gang_lookup, this will not atomically search a snapshot of
@@ -674,7 +674,7 @@ unsigned long radix_tree_prev_hole(struct radix_tree_root *root,
674 if (!radix_tree_lookup(root, index)) 674 if (!radix_tree_lookup(root, index))
675 break; 675 break;
676 index--; 676 index--;
677 if (index == LONG_MAX) 677 if (index == ULONG_MAX)
678 break; 678 break;
679 } 679 }
680 680
diff --git a/lib/random32.c b/lib/random32.c
index 217d5c4b666d..870dc3fc0f0f 100644
--- a/lib/random32.c
+++ b/lib/random32.c
@@ -39,13 +39,16 @@
39#include <linux/jiffies.h> 39#include <linux/jiffies.h>
40#include <linux/random.h> 40#include <linux/random.h>
41 41
42struct rnd_state {
43 u32 s1, s2, s3;
44};
45
46static DEFINE_PER_CPU(struct rnd_state, net_rand_state); 42static DEFINE_PER_CPU(struct rnd_state, net_rand_state);
47 43
48static u32 __random32(struct rnd_state *state) 44/**
45 * prandom32 - seeded pseudo-random number generator.
46 * @state: pointer to state structure holding seeded state.
47 *
48 * This is used for pseudo-randomness with no outside seeding.
49 * For more random results, use random32().
50 */
51u32 prandom32(struct rnd_state *state)
49{ 52{
50#define TAUSWORTHE(s,a,b,c,d) ((s&c)<<d) ^ (((s <<a) ^ s)>>b) 53#define TAUSWORTHE(s,a,b,c,d) ((s&c)<<d) ^ (((s <<a) ^ s)>>b)
51 54
@@ -55,14 +58,7 @@ static u32 __random32(struct rnd_state *state)
55 58
56 return (state->s1 ^ state->s2 ^ state->s3); 59 return (state->s1 ^ state->s2 ^ state->s3);
57} 60}
58 61EXPORT_SYMBOL(prandom32);
59/*
60 * Handle minimum values for seeds
61 */
62static inline u32 __seed(u32 x, u32 m)
63{
64 return (x < m) ? x + m : x;
65}
66 62
67/** 63/**
68 * random32 - pseudo random number generator 64 * random32 - pseudo random number generator
@@ -75,7 +71,7 @@ u32 random32(void)
75{ 71{
76 unsigned long r; 72 unsigned long r;
77 struct rnd_state *state = &get_cpu_var(net_rand_state); 73 struct rnd_state *state = &get_cpu_var(net_rand_state);
78 r = __random32(state); 74 r = prandom32(state);
79 put_cpu_var(state); 75 put_cpu_var(state);
80 return r; 76 return r;
81} 77}
@@ -118,12 +114,12 @@ static int __init random32_init(void)
118 state->s3 = __seed(LCG(state->s2), 15); 114 state->s3 = __seed(LCG(state->s2), 15);
119 115
120 /* "warm it up" */ 116 /* "warm it up" */
121 __random32(state); 117 prandom32(state);
122 __random32(state); 118 prandom32(state);
123 __random32(state); 119 prandom32(state);
124 __random32(state); 120 prandom32(state);
125 __random32(state); 121 prandom32(state);
126 __random32(state); 122 prandom32(state);
127 } 123 }
128 return 0; 124 return 0;
129} 125}
@@ -147,7 +143,7 @@ static int __init random32_reseed(void)
147 state->s3 = __seed(seeds[2], 15); 143 state->s3 = __seed(seeds[2], 15);
148 144
149 /* mix it in */ 145 /* mix it in */
150 __random32(state); 146 prandom32(state);
151 } 147 }
152 return 0; 148 return 0;
153} 149}
diff --git a/lib/swiotlb.c b/lib/swiotlb.c
index 5fddf720da73..a009055140ec 100644
--- a/lib/swiotlb.c
+++ b/lib/swiotlb.c
@@ -757,37 +757,6 @@ swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr,
757EXPORT_SYMBOL(swiotlb_sync_single_for_device); 757EXPORT_SYMBOL(swiotlb_sync_single_for_device);
758 758
759/* 759/*
760 * Same as above, but for a sub-range of the mapping.
761 */
762static void
763swiotlb_sync_single_range(struct device *hwdev, dma_addr_t dev_addr,
764 unsigned long offset, size_t size,
765 int dir, int target)
766{
767 swiotlb_sync_single(hwdev, dev_addr + offset, size, dir, target);
768}
769
770void
771swiotlb_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dev_addr,
772 unsigned long offset, size_t size,
773 enum dma_data_direction dir)
774{
775 swiotlb_sync_single_range(hwdev, dev_addr, offset, size, dir,
776 SYNC_FOR_CPU);
777}
778EXPORT_SYMBOL_GPL(swiotlb_sync_single_range_for_cpu);
779
780void
781swiotlb_sync_single_range_for_device(struct device *hwdev, dma_addr_t dev_addr,
782 unsigned long offset, size_t size,
783 enum dma_data_direction dir)
784{
785 swiotlb_sync_single_range(hwdev, dev_addr, offset, size, dir,
786 SYNC_FOR_DEVICE);
787}
788EXPORT_SYMBOL_GPL(swiotlb_sync_single_range_for_device);
789
790/*
791 * Map a set of buffers described by scatterlist in streaming mode for DMA. 760 * Map a set of buffers described by scatterlist in streaming mode for DMA.
792 * This is the scatter-gather version of the above swiotlb_map_page 761 * This is the scatter-gather version of the above swiotlb_map_page
793 * interface. Here the scatter gather list elements are each tagged with the 762 * interface. Here the scatter gather list elements are each tagged with the
diff --git a/lib/uuid.c b/lib/uuid.c
new file mode 100644
index 000000000000..8fadd7cef46c
--- /dev/null
+++ b/lib/uuid.c
@@ -0,0 +1,53 @@
1/*
2 * Unified UUID/GUID definition
3 *
4 * Copyright (C) 2009, Intel Corp.
5 * Huang Ying <ying.huang@intel.com>
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License version
9 * 2 as published by the Free Software Foundation;
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20
21#include <linux/kernel.h>
22#include <linux/module.h>
23#include <linux/uuid.h>
24#include <linux/random.h>
25
26static void __uuid_gen_common(__u8 b[16])
27{
28 int i;
29 u32 r;
30
31 for (i = 0; i < 4; i++) {
32 r = random32();
33 memcpy(b + i * 4, &r, 4);
34 }
35 /* reversion 0b10 */
36 b[8] = (b[8] & 0x3F) | 0x80;
37}
38
39void uuid_le_gen(uuid_le *lu)
40{
41 __uuid_gen_common(lu->b);
42 /* version 4 : random generation */
43 lu->b[7] = (lu->b[7] & 0x0F) | 0x40;
44}
45EXPORT_SYMBOL_GPL(uuid_le_gen);
46
47void uuid_be_gen(uuid_be *bu)
48{
49 __uuid_gen_common(bu->b);
50 /* version 4 : random generation */
51 bu->b[6] = (bu->b[6] & 0x0F) | 0x40;
52}
53EXPORT_SYMBOL_GPL(uuid_be_gen);
diff --git a/mm/filemap.c b/mm/filemap.c
index 88d719665a28..20e5642e9f9f 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -151,6 +151,7 @@ void remove_from_page_cache(struct page *page)
151 spin_unlock_irq(&mapping->tree_lock); 151 spin_unlock_irq(&mapping->tree_lock);
152 mem_cgroup_uncharge_cache_page(page); 152 mem_cgroup_uncharge_cache_page(page);
153} 153}
154EXPORT_SYMBOL(remove_from_page_cache);
154 155
155static int sync_page(void *word) 156static int sync_page(void *word)
156{ 157{
@@ -1105,6 +1106,12 @@ page_not_up_to_date_locked:
1105 } 1106 }
1106 1107
1107readpage: 1108readpage:
1109 /*
1110 * A previous I/O error may have been due to temporary
1111 * failures, eg. multipath errors.
1112 * PG_error will be set again if readpage fails.
1113 */
1114 ClearPageError(page);
1108 /* Start the actual read. The read will unlock the page. */ 1115 /* Start the actual read. The read will unlock the page. */
1109 error = mapping->a_ops->readpage(filp, page); 1116 error = mapping->a_ops->readpage(filp, page);
1110 1117
@@ -1269,7 +1276,7 @@ generic_file_aio_read(struct kiocb *iocb, const struct iovec *iov,
1269{ 1276{
1270 struct file *filp = iocb->ki_filp; 1277 struct file *filp = iocb->ki_filp;
1271 ssize_t retval; 1278 ssize_t retval;
1272 unsigned long seg; 1279 unsigned long seg = 0;
1273 size_t count; 1280 size_t count;
1274 loff_t *ppos = &iocb->ki_pos; 1281 loff_t *ppos = &iocb->ki_pos;
1275 1282
@@ -1296,21 +1303,47 @@ generic_file_aio_read(struct kiocb *iocb, const struct iovec *iov,
1296 retval = mapping->a_ops->direct_IO(READ, iocb, 1303 retval = mapping->a_ops->direct_IO(READ, iocb,
1297 iov, pos, nr_segs); 1304 iov, pos, nr_segs);
1298 } 1305 }
1299 if (retval > 0) 1306 if (retval > 0) {
1300 *ppos = pos + retval; 1307 *ppos = pos + retval;
1301 if (retval) { 1308 count -= retval;
1309 }
1310
1311 /*
1312 * Btrfs can have a short DIO read if we encounter
1313 * compressed extents, so if there was an error, or if
1314 * we've already read everything we wanted to, or if
1315 * there was a short read because we hit EOF, go ahead
1316 * and return. Otherwise fallthrough to buffered io for
1317 * the rest of the read.
1318 */
1319 if (retval < 0 || !count || *ppos >= size) {
1302 file_accessed(filp); 1320 file_accessed(filp);
1303 goto out; 1321 goto out;
1304 } 1322 }
1305 } 1323 }
1306 } 1324 }
1307 1325
1326 count = retval;
1308 for (seg = 0; seg < nr_segs; seg++) { 1327 for (seg = 0; seg < nr_segs; seg++) {
1309 read_descriptor_t desc; 1328 read_descriptor_t desc;
1329 loff_t offset = 0;
1330
1331 /*
1332 * If we did a short DIO read we need to skip the section of the
1333 * iov that we've already read data into.
1334 */
1335 if (count) {
1336 if (count > iov[seg].iov_len) {
1337 count -= iov[seg].iov_len;
1338 continue;
1339 }
1340 offset = count;
1341 count = 0;
1342 }
1310 1343
1311 desc.written = 0; 1344 desc.written = 0;
1312 desc.arg.buf = iov[seg].iov_base; 1345 desc.arg.buf = iov[seg].iov_base + offset;
1313 desc.count = iov[seg].iov_len; 1346 desc.count = iov[seg].iov_len - offset;
1314 if (desc.count == 0) 1347 if (desc.count == 0)
1315 continue; 1348 continue;
1316 desc.error = 0; 1349 desc.error = 0;
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index c8569bc298ff..c6ece0a57595 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -149,16 +149,35 @@ struct mem_cgroup_threshold {
149 u64 threshold; 149 u64 threshold;
150}; 150};
151 151
152/* For threshold */
152struct mem_cgroup_threshold_ary { 153struct mem_cgroup_threshold_ary {
153 /* An array index points to threshold just below usage. */ 154 /* An array index points to threshold just below usage. */
154 atomic_t current_threshold; 155 int current_threshold;
155 /* Size of entries[] */ 156 /* Size of entries[] */
156 unsigned int size; 157 unsigned int size;
157 /* Array of thresholds */ 158 /* Array of thresholds */
158 struct mem_cgroup_threshold entries[0]; 159 struct mem_cgroup_threshold entries[0];
159}; 160};
160 161
162struct mem_cgroup_thresholds {
163 /* Primary thresholds array */
164 struct mem_cgroup_threshold_ary *primary;
165 /*
166 * Spare threshold array.
167 * This is needed to make mem_cgroup_unregister_event() "never fail".
168 * It must be able to store at least primary->size - 1 entries.
169 */
170 struct mem_cgroup_threshold_ary *spare;
171};
172
173/* for OOM */
174struct mem_cgroup_eventfd_list {
175 struct list_head list;
176 struct eventfd_ctx *eventfd;
177};
178
161static void mem_cgroup_threshold(struct mem_cgroup *mem); 179static void mem_cgroup_threshold(struct mem_cgroup *mem);
180static void mem_cgroup_oom_notify(struct mem_cgroup *mem);
162 181
163/* 182/*
164 * The memory controller data structure. The memory controller controls both 183 * The memory controller data structure. The memory controller controls both
@@ -207,6 +226,8 @@ struct mem_cgroup {
207 atomic_t refcnt; 226 atomic_t refcnt;
208 227
209 unsigned int swappiness; 228 unsigned int swappiness;
229 /* OOM-Killer disable */
230 int oom_kill_disable;
210 231
211 /* set when res.limit == memsw.limit */ 232 /* set when res.limit == memsw.limit */
212 bool memsw_is_minimum; 233 bool memsw_is_minimum;
@@ -215,17 +236,19 @@ struct mem_cgroup {
215 struct mutex thresholds_lock; 236 struct mutex thresholds_lock;
216 237
217 /* thresholds for memory usage. RCU-protected */ 238 /* thresholds for memory usage. RCU-protected */
218 struct mem_cgroup_threshold_ary *thresholds; 239 struct mem_cgroup_thresholds thresholds;
219 240
220 /* thresholds for mem+swap usage. RCU-protected */ 241 /* thresholds for mem+swap usage. RCU-protected */
221 struct mem_cgroup_threshold_ary *memsw_thresholds; 242 struct mem_cgroup_thresholds memsw_thresholds;
243
244 /* For oom notifier event fd */
245 struct list_head oom_notify;
222 246
223 /* 247 /*
224 * Should we move charges of a task when a task is moved into this 248 * Should we move charges of a task when a task is moved into this
225 * mem_cgroup ? And what type of charges should we move ? 249 * mem_cgroup ? And what type of charges should we move ?
226 */ 250 */
227 unsigned long move_charge_at_immigrate; 251 unsigned long move_charge_at_immigrate;
228
229 /* 252 /*
230 * percpu counter. 253 * percpu counter.
231 */ 254 */
@@ -239,6 +262,7 @@ struct mem_cgroup {
239 */ 262 */
240enum move_type { 263enum move_type {
241 MOVE_CHARGE_TYPE_ANON, /* private anonymous page and swap of it */ 264 MOVE_CHARGE_TYPE_ANON, /* private anonymous page and swap of it */
265 MOVE_CHARGE_TYPE_FILE, /* file page(including tmpfs) and swap of it */
242 NR_MOVE_TYPE, 266 NR_MOVE_TYPE,
243}; 267};
244 268
@@ -255,6 +279,18 @@ static struct move_charge_struct {
255 .waitq = __WAIT_QUEUE_HEAD_INITIALIZER(mc.waitq), 279 .waitq = __WAIT_QUEUE_HEAD_INITIALIZER(mc.waitq),
256}; 280};
257 281
282static bool move_anon(void)
283{
284 return test_bit(MOVE_CHARGE_TYPE_ANON,
285 &mc.to->move_charge_at_immigrate);
286}
287
288static bool move_file(void)
289{
290 return test_bit(MOVE_CHARGE_TYPE_FILE,
291 &mc.to->move_charge_at_immigrate);
292}
293
258/* 294/*
259 * Maximum loops in mem_cgroup_hierarchical_reclaim(), used for soft 295 * Maximum loops in mem_cgroup_hierarchical_reclaim(), used for soft
260 * limit reclaim to prevent infinite loops, if they ever occur. 296 * limit reclaim to prevent infinite loops, if they ever occur.
@@ -282,9 +318,12 @@ enum charge_type {
282/* for encoding cft->private value on file */ 318/* for encoding cft->private value on file */
283#define _MEM (0) 319#define _MEM (0)
284#define _MEMSWAP (1) 320#define _MEMSWAP (1)
321#define _OOM_TYPE (2)
285#define MEMFILE_PRIVATE(x, val) (((x) << 16) | (val)) 322#define MEMFILE_PRIVATE(x, val) (((x) << 16) | (val))
286#define MEMFILE_TYPE(val) (((val) >> 16) & 0xffff) 323#define MEMFILE_TYPE(val) (((val) >> 16) & 0xffff)
287#define MEMFILE_ATTR(val) ((val) & 0xffff) 324#define MEMFILE_ATTR(val) ((val) & 0xffff)
325/* Used for OOM nofiier */
326#define OOM_CONTROL (0)
288 327
289/* 328/*
290 * Reclaim flags for mem_cgroup_hierarchical_reclaim 329 * Reclaim flags for mem_cgroup_hierarchical_reclaim
@@ -1293,14 +1332,62 @@ static void mem_cgroup_oom_unlock(struct mem_cgroup *mem)
1293static DEFINE_MUTEX(memcg_oom_mutex); 1332static DEFINE_MUTEX(memcg_oom_mutex);
1294static DECLARE_WAIT_QUEUE_HEAD(memcg_oom_waitq); 1333static DECLARE_WAIT_QUEUE_HEAD(memcg_oom_waitq);
1295 1334
1335struct oom_wait_info {
1336 struct mem_cgroup *mem;
1337 wait_queue_t wait;
1338};
1339
1340static int memcg_oom_wake_function(wait_queue_t *wait,
1341 unsigned mode, int sync, void *arg)
1342{
1343 struct mem_cgroup *wake_mem = (struct mem_cgroup *)arg;
1344 struct oom_wait_info *oom_wait_info;
1345
1346 oom_wait_info = container_of(wait, struct oom_wait_info, wait);
1347
1348 if (oom_wait_info->mem == wake_mem)
1349 goto wakeup;
1350 /* if no hierarchy, no match */
1351 if (!oom_wait_info->mem->use_hierarchy || !wake_mem->use_hierarchy)
1352 return 0;
1353 /*
1354 * Both of oom_wait_info->mem and wake_mem are stable under us.
1355 * Then we can use css_is_ancestor without taking care of RCU.
1356 */
1357 if (!css_is_ancestor(&oom_wait_info->mem->css, &wake_mem->css) &&
1358 !css_is_ancestor(&wake_mem->css, &oom_wait_info->mem->css))
1359 return 0;
1360
1361wakeup:
1362 return autoremove_wake_function(wait, mode, sync, arg);
1363}
1364
1365static void memcg_wakeup_oom(struct mem_cgroup *mem)
1366{
1367 /* for filtering, pass "mem" as argument. */
1368 __wake_up(&memcg_oom_waitq, TASK_NORMAL, 0, mem);
1369}
1370
1371static void memcg_oom_recover(struct mem_cgroup *mem)
1372{
1373 if (mem->oom_kill_disable && atomic_read(&mem->oom_lock))
1374 memcg_wakeup_oom(mem);
1375}
1376
1296/* 1377/*
1297 * try to call OOM killer. returns false if we should exit memory-reclaim loop. 1378 * try to call OOM killer. returns false if we should exit memory-reclaim loop.
1298 */ 1379 */
1299bool mem_cgroup_handle_oom(struct mem_cgroup *mem, gfp_t mask) 1380bool mem_cgroup_handle_oom(struct mem_cgroup *mem, gfp_t mask)
1300{ 1381{
1301 DEFINE_WAIT(wait); 1382 struct oom_wait_info owait;
1302 bool locked; 1383 bool locked, need_to_kill;
1303 1384
1385 owait.mem = mem;
1386 owait.wait.flags = 0;
1387 owait.wait.func = memcg_oom_wake_function;
1388 owait.wait.private = current;
1389 INIT_LIST_HEAD(&owait.wait.task_list);
1390 need_to_kill = true;
1304 /* At first, try to OOM lock hierarchy under mem.*/ 1391 /* At first, try to OOM lock hierarchy under mem.*/
1305 mutex_lock(&memcg_oom_mutex); 1392 mutex_lock(&memcg_oom_mutex);
1306 locked = mem_cgroup_oom_lock(mem); 1393 locked = mem_cgroup_oom_lock(mem);
@@ -1309,32 +1396,23 @@ bool mem_cgroup_handle_oom(struct mem_cgroup *mem, gfp_t mask)
1309 * accounting. So, UNINTERRUPTIBLE is appropriate. But SIGKILL 1396 * accounting. So, UNINTERRUPTIBLE is appropriate. But SIGKILL
1310 * under OOM is always welcomed, use TASK_KILLABLE here. 1397 * under OOM is always welcomed, use TASK_KILLABLE here.
1311 */ 1398 */
1312 if (!locked) 1399 prepare_to_wait(&memcg_oom_waitq, &owait.wait, TASK_KILLABLE);
1313 prepare_to_wait(&memcg_oom_waitq, &wait, TASK_KILLABLE); 1400 if (!locked || mem->oom_kill_disable)
1401 need_to_kill = false;
1402 if (locked)
1403 mem_cgroup_oom_notify(mem);
1314 mutex_unlock(&memcg_oom_mutex); 1404 mutex_unlock(&memcg_oom_mutex);
1315 1405
1316 if (locked) 1406 if (need_to_kill) {
1407 finish_wait(&memcg_oom_waitq, &owait.wait);
1317 mem_cgroup_out_of_memory(mem, mask); 1408 mem_cgroup_out_of_memory(mem, mask);
1318 else { 1409 } else {
1319 schedule(); 1410 schedule();
1320 finish_wait(&memcg_oom_waitq, &wait); 1411 finish_wait(&memcg_oom_waitq, &owait.wait);
1321 } 1412 }
1322 mutex_lock(&memcg_oom_mutex); 1413 mutex_lock(&memcg_oom_mutex);
1323 mem_cgroup_oom_unlock(mem); 1414 mem_cgroup_oom_unlock(mem);
1324 /* 1415 memcg_wakeup_oom(mem);
1325 * Here, we use global waitq .....more fine grained waitq ?
1326 * Assume following hierarchy.
1327 * A/
1328 * 01
1329 * 02
1330 * assume OOM happens both in A and 01 at the same time. Tthey are
1331 * mutually exclusive by lock. (kill in 01 helps A.)
1332 * When we use per memcg waitq, we have to wake up waiters on A and 02
1333 * in addtion to waiters on 01. We use global waitq for avoiding mess.
1334 * It will not be a big problem.
1335 * (And a task may be moved to other groups while it's waiting for OOM.)
1336 */
1337 wake_up_all(&memcg_oom_waitq);
1338 mutex_unlock(&memcg_oom_mutex); 1416 mutex_unlock(&memcg_oom_mutex);
1339 1417
1340 if (test_thread_flag(TIF_MEMDIE) || fatal_signal_pending(current)) 1418 if (test_thread_flag(TIF_MEMDIE) || fatal_signal_pending(current))
@@ -2118,15 +2196,6 @@ __do_uncharge(struct mem_cgroup *mem, const enum charge_type ctype)
2118 /* If swapout, usage of swap doesn't decrease */ 2196 /* If swapout, usage of swap doesn't decrease */
2119 if (!do_swap_account || ctype == MEM_CGROUP_CHARGE_TYPE_SWAPOUT) 2197 if (!do_swap_account || ctype == MEM_CGROUP_CHARGE_TYPE_SWAPOUT)
2120 uncharge_memsw = false; 2198 uncharge_memsw = false;
2121 /*
2122 * do_batch > 0 when unmapping pages or inode invalidate/truncate.
2123 * In those cases, all pages freed continously can be expected to be in
2124 * the same cgroup and we have chance to coalesce uncharges.
2125 * But we do uncharge one by one if this is killed by OOM(TIF_MEMDIE)
2126 * because we want to do uncharge as soon as possible.
2127 */
2128 if (!current->memcg_batch.do_batch || test_thread_flag(TIF_MEMDIE))
2129 goto direct_uncharge;
2130 2199
2131 batch = &current->memcg_batch; 2200 batch = &current->memcg_batch;
2132 /* 2201 /*
@@ -2137,6 +2206,17 @@ __do_uncharge(struct mem_cgroup *mem, const enum charge_type ctype)
2137 if (!batch->memcg) 2206 if (!batch->memcg)
2138 batch->memcg = mem; 2207 batch->memcg = mem;
2139 /* 2208 /*
2209 * do_batch > 0 when unmapping pages or inode invalidate/truncate.
2210 * In those cases, all pages freed continously can be expected to be in
2211 * the same cgroup and we have chance to coalesce uncharges.
2212 * But we do uncharge one by one if this is killed by OOM(TIF_MEMDIE)
2213 * because we want to do uncharge as soon as possible.
2214 */
2215
2216 if (!batch->do_batch || test_thread_flag(TIF_MEMDIE))
2217 goto direct_uncharge;
2218
2219 /*
2140 * In typical case, batch->memcg == mem. This means we can 2220 * In typical case, batch->memcg == mem. This means we can
2141 * merge a series of uncharges to an uncharge of res_counter. 2221 * merge a series of uncharges to an uncharge of res_counter.
2142 * If not, we uncharge res_counter ony by one. 2222 * If not, we uncharge res_counter ony by one.
@@ -2152,6 +2232,8 @@ direct_uncharge:
2152 res_counter_uncharge(&mem->res, PAGE_SIZE); 2232 res_counter_uncharge(&mem->res, PAGE_SIZE);
2153 if (uncharge_memsw) 2233 if (uncharge_memsw)
2154 res_counter_uncharge(&mem->memsw, PAGE_SIZE); 2234 res_counter_uncharge(&mem->memsw, PAGE_SIZE);
2235 if (unlikely(batch->memcg != mem))
2236 memcg_oom_recover(mem);
2155 return; 2237 return;
2156} 2238}
2157 2239
@@ -2188,7 +2270,8 @@ __mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype)
2188 switch (ctype) { 2270 switch (ctype) {
2189 case MEM_CGROUP_CHARGE_TYPE_MAPPED: 2271 case MEM_CGROUP_CHARGE_TYPE_MAPPED:
2190 case MEM_CGROUP_CHARGE_TYPE_DROP: 2272 case MEM_CGROUP_CHARGE_TYPE_DROP:
2191 if (page_mapped(page)) 2273 /* See mem_cgroup_prepare_migration() */
2274 if (page_mapped(page) || PageCgroupMigration(pc))
2192 goto unlock_out; 2275 goto unlock_out;
2193 break; 2276 break;
2194 case MEM_CGROUP_CHARGE_TYPE_SWAPOUT: 2277 case MEM_CGROUP_CHARGE_TYPE_SWAPOUT:
@@ -2288,6 +2371,7 @@ void mem_cgroup_uncharge_end(void)
2288 res_counter_uncharge(&batch->memcg->res, batch->bytes); 2371 res_counter_uncharge(&batch->memcg->res, batch->bytes);
2289 if (batch->memsw_bytes) 2372 if (batch->memsw_bytes)
2290 res_counter_uncharge(&batch->memcg->memsw, batch->memsw_bytes); 2373 res_counter_uncharge(&batch->memcg->memsw, batch->memsw_bytes);
2374 memcg_oom_recover(batch->memcg);
2291 /* forget this pointer (for sanity check) */ 2375 /* forget this pointer (for sanity check) */
2292 batch->memcg = NULL; 2376 batch->memcg = NULL;
2293} 2377}
@@ -2410,10 +2494,12 @@ static inline int mem_cgroup_move_swap_account(swp_entry_t entry,
2410 * Before starting migration, account PAGE_SIZE to mem_cgroup that the old 2494 * Before starting migration, account PAGE_SIZE to mem_cgroup that the old
2411 * page belongs to. 2495 * page belongs to.
2412 */ 2496 */
2413int mem_cgroup_prepare_migration(struct page *page, struct mem_cgroup **ptr) 2497int mem_cgroup_prepare_migration(struct page *page,
2498 struct page *newpage, struct mem_cgroup **ptr)
2414{ 2499{
2415 struct page_cgroup *pc; 2500 struct page_cgroup *pc;
2416 struct mem_cgroup *mem = NULL; 2501 struct mem_cgroup *mem = NULL;
2502 enum charge_type ctype;
2417 int ret = 0; 2503 int ret = 0;
2418 2504
2419 if (mem_cgroup_disabled()) 2505 if (mem_cgroup_disabled())
@@ -2424,69 +2510,125 @@ int mem_cgroup_prepare_migration(struct page *page, struct mem_cgroup **ptr)
2424 if (PageCgroupUsed(pc)) { 2510 if (PageCgroupUsed(pc)) {
2425 mem = pc->mem_cgroup; 2511 mem = pc->mem_cgroup;
2426 css_get(&mem->css); 2512 css_get(&mem->css);
2513 /*
2514 * At migrating an anonymous page, its mapcount goes down
2515 * to 0 and uncharge() will be called. But, even if it's fully
2516 * unmapped, migration may fail and this page has to be
2517 * charged again. We set MIGRATION flag here and delay uncharge
2518 * until end_migration() is called
2519 *
2520 * Corner Case Thinking
2521 * A)
2522 * When the old page was mapped as Anon and it's unmap-and-freed
2523 * while migration was ongoing.
2524 * If unmap finds the old page, uncharge() of it will be delayed
2525 * until end_migration(). If unmap finds a new page, it's
2526 * uncharged when it make mapcount to be 1->0. If unmap code
2527 * finds swap_migration_entry, the new page will not be mapped
2528 * and end_migration() will find it(mapcount==0).
2529 *
2530 * B)
2531 * When the old page was mapped but migraion fails, the kernel
2532 * remaps it. A charge for it is kept by MIGRATION flag even
2533 * if mapcount goes down to 0. We can do remap successfully
2534 * without charging it again.
2535 *
2536 * C)
2537 * The "old" page is under lock_page() until the end of
2538 * migration, so, the old page itself will not be swapped-out.
2539 * If the new page is swapped out before end_migraton, our
2540 * hook to usual swap-out path will catch the event.
2541 */
2542 if (PageAnon(page))
2543 SetPageCgroupMigration(pc);
2427 } 2544 }
2428 unlock_page_cgroup(pc); 2545 unlock_page_cgroup(pc);
2546 /*
2547 * If the page is not charged at this point,
2548 * we return here.
2549 */
2550 if (!mem)
2551 return 0;
2429 2552
2430 *ptr = mem; 2553 *ptr = mem;
2431 if (mem) { 2554 ret = __mem_cgroup_try_charge(NULL, GFP_KERNEL, ptr, false);
2432 ret = __mem_cgroup_try_charge(NULL, GFP_KERNEL, ptr, false); 2555 css_put(&mem->css);/* drop extra refcnt */
2433 css_put(&mem->css); 2556 if (ret || *ptr == NULL) {
2557 if (PageAnon(page)) {
2558 lock_page_cgroup(pc);
2559 ClearPageCgroupMigration(pc);
2560 unlock_page_cgroup(pc);
2561 /*
2562 * The old page may be fully unmapped while we kept it.
2563 */
2564 mem_cgroup_uncharge_page(page);
2565 }
2566 return -ENOMEM;
2434 } 2567 }
2568 /*
2569 * We charge new page before it's used/mapped. So, even if unlock_page()
2570 * is called before end_migration, we can catch all events on this new
2571 * page. In the case new page is migrated but not remapped, new page's
2572 * mapcount will be finally 0 and we call uncharge in end_migration().
2573 */
2574 pc = lookup_page_cgroup(newpage);
2575 if (PageAnon(page))
2576 ctype = MEM_CGROUP_CHARGE_TYPE_MAPPED;
2577 else if (page_is_file_cache(page))
2578 ctype = MEM_CGROUP_CHARGE_TYPE_CACHE;
2579 else
2580 ctype = MEM_CGROUP_CHARGE_TYPE_SHMEM;
2581 __mem_cgroup_commit_charge(mem, pc, ctype);
2435 return ret; 2582 return ret;
2436} 2583}
2437 2584
2438/* remove redundant charge if migration failed*/ 2585/* remove redundant charge if migration failed*/
2439void mem_cgroup_end_migration(struct mem_cgroup *mem, 2586void mem_cgroup_end_migration(struct mem_cgroup *mem,
2440 struct page *oldpage, struct page *newpage) 2587 struct page *oldpage, struct page *newpage)
2441{ 2588{
2442 struct page *target, *unused; 2589 struct page *used, *unused;
2443 struct page_cgroup *pc; 2590 struct page_cgroup *pc;
2444 enum charge_type ctype;
2445 2591
2446 if (!mem) 2592 if (!mem)
2447 return; 2593 return;
2594 /* blocks rmdir() */
2448 cgroup_exclude_rmdir(&mem->css); 2595 cgroup_exclude_rmdir(&mem->css);
2449 /* at migration success, oldpage->mapping is NULL. */ 2596 /* at migration success, oldpage->mapping is NULL. */
2450 if (oldpage->mapping) { 2597 if (oldpage->mapping) {
2451 target = oldpage; 2598 used = oldpage;
2452 unused = NULL; 2599 unused = newpage;
2453 } else { 2600 } else {
2454 target = newpage; 2601 used = newpage;
2455 unused = oldpage; 2602 unused = oldpage;
2456 } 2603 }
2457
2458 if (PageAnon(target))
2459 ctype = MEM_CGROUP_CHARGE_TYPE_MAPPED;
2460 else if (page_is_file_cache(target))
2461 ctype = MEM_CGROUP_CHARGE_TYPE_CACHE;
2462 else
2463 ctype = MEM_CGROUP_CHARGE_TYPE_SHMEM;
2464
2465 /* unused page is not on radix-tree now. */
2466 if (unused)
2467 __mem_cgroup_uncharge_common(unused, ctype);
2468
2469 pc = lookup_page_cgroup(target);
2470 /* 2604 /*
2471 * __mem_cgroup_commit_charge() check PCG_USED bit of page_cgroup. 2605 * We disallowed uncharge of pages under migration because mapcount
2472 * So, double-counting is effectively avoided. 2606 * of the page goes down to zero, temporarly.
2607 * Clear the flag and check the page should be charged.
2473 */ 2608 */
2474 __mem_cgroup_commit_charge(mem, pc, ctype); 2609 pc = lookup_page_cgroup(oldpage);
2610 lock_page_cgroup(pc);
2611 ClearPageCgroupMigration(pc);
2612 unlock_page_cgroup(pc);
2613
2614 if (unused != oldpage)
2615 pc = lookup_page_cgroup(unused);
2616 __mem_cgroup_uncharge_common(unused, MEM_CGROUP_CHARGE_TYPE_FORCE);
2475 2617
2618 pc = lookup_page_cgroup(used);
2476 /* 2619 /*
2477 * Both of oldpage and newpage are still under lock_page(). 2620 * If a page is a file cache, radix-tree replacement is very atomic
2478 * Then, we don't have to care about race in radix-tree. 2621 * and we can skip this check. When it was an Anon page, its mapcount
2479 * But we have to be careful that this page is unmapped or not. 2622 * goes down to 0. But because we added MIGRATION flage, it's not
2480 * 2623 * uncharged yet. There are several case but page->mapcount check
2481 * There is a case for !page_mapped(). At the start of 2624 * and USED bit check in mem_cgroup_uncharge_page() will do enough
2482 * migration, oldpage was mapped. But now, it's zapped. 2625 * check. (see prepare_charge() also)
2483 * But we know *target* page is not freed/reused under us.
2484 * mem_cgroup_uncharge_page() does all necessary checks.
2485 */ 2626 */
2486 if (ctype == MEM_CGROUP_CHARGE_TYPE_MAPPED) 2627 if (PageAnon(used))
2487 mem_cgroup_uncharge_page(target); 2628 mem_cgroup_uncharge_page(used);
2488 /* 2629 /*
2489 * At migration, we may charge account against cgroup which has no tasks 2630 * At migration, we may charge account against cgroup which has no
2631 * tasks.
2490 * So, rmdir()->pre_destroy() can be called while we do this charge. 2632 * So, rmdir()->pre_destroy() can be called while we do this charge.
2491 * In that case, we need to call pre_destroy() again. check it here. 2633 * In that case, we need to call pre_destroy() again. check it here.
2492 */ 2634 */
@@ -2524,10 +2666,11 @@ static int mem_cgroup_resize_limit(struct mem_cgroup *memcg,
2524 unsigned long long val) 2666 unsigned long long val)
2525{ 2667{
2526 int retry_count; 2668 int retry_count;
2527 u64 memswlimit; 2669 u64 memswlimit, memlimit;
2528 int ret = 0; 2670 int ret = 0;
2529 int children = mem_cgroup_count_children(memcg); 2671 int children = mem_cgroup_count_children(memcg);
2530 u64 curusage, oldusage; 2672 u64 curusage, oldusage;
2673 int enlarge;
2531 2674
2532 /* 2675 /*
2533 * For keeping hierarchical_reclaim simple, how long we should retry 2676 * For keeping hierarchical_reclaim simple, how long we should retry
@@ -2538,6 +2681,7 @@ static int mem_cgroup_resize_limit(struct mem_cgroup *memcg,
2538 2681
2539 oldusage = res_counter_read_u64(&memcg->res, RES_USAGE); 2682 oldusage = res_counter_read_u64(&memcg->res, RES_USAGE);
2540 2683
2684 enlarge = 0;
2541 while (retry_count) { 2685 while (retry_count) {
2542 if (signal_pending(current)) { 2686 if (signal_pending(current)) {
2543 ret = -EINTR; 2687 ret = -EINTR;
@@ -2555,6 +2699,11 @@ static int mem_cgroup_resize_limit(struct mem_cgroup *memcg,
2555 mutex_unlock(&set_limit_mutex); 2699 mutex_unlock(&set_limit_mutex);
2556 break; 2700 break;
2557 } 2701 }
2702
2703 memlimit = res_counter_read_u64(&memcg->res, RES_LIMIT);
2704 if (memlimit < val)
2705 enlarge = 1;
2706
2558 ret = res_counter_set_limit(&memcg->res, val); 2707 ret = res_counter_set_limit(&memcg->res, val);
2559 if (!ret) { 2708 if (!ret) {
2560 if (memswlimit == val) 2709 if (memswlimit == val)
@@ -2576,6 +2725,8 @@ static int mem_cgroup_resize_limit(struct mem_cgroup *memcg,
2576 else 2725 else
2577 oldusage = curusage; 2726 oldusage = curusage;
2578 } 2727 }
2728 if (!ret && enlarge)
2729 memcg_oom_recover(memcg);
2579 2730
2580 return ret; 2731 return ret;
2581} 2732}
@@ -2584,9 +2735,10 @@ static int mem_cgroup_resize_memsw_limit(struct mem_cgroup *memcg,
2584 unsigned long long val) 2735 unsigned long long val)
2585{ 2736{
2586 int retry_count; 2737 int retry_count;
2587 u64 memlimit, oldusage, curusage; 2738 u64 memlimit, memswlimit, oldusage, curusage;
2588 int children = mem_cgroup_count_children(memcg); 2739 int children = mem_cgroup_count_children(memcg);
2589 int ret = -EBUSY; 2740 int ret = -EBUSY;
2741 int enlarge = 0;
2590 2742
2591 /* see mem_cgroup_resize_res_limit */ 2743 /* see mem_cgroup_resize_res_limit */
2592 retry_count = children * MEM_CGROUP_RECLAIM_RETRIES; 2744 retry_count = children * MEM_CGROUP_RECLAIM_RETRIES;
@@ -2608,6 +2760,9 @@ static int mem_cgroup_resize_memsw_limit(struct mem_cgroup *memcg,
2608 mutex_unlock(&set_limit_mutex); 2760 mutex_unlock(&set_limit_mutex);
2609 break; 2761 break;
2610 } 2762 }
2763 memswlimit = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
2764 if (memswlimit < val)
2765 enlarge = 1;
2611 ret = res_counter_set_limit(&memcg->memsw, val); 2766 ret = res_counter_set_limit(&memcg->memsw, val);
2612 if (!ret) { 2767 if (!ret) {
2613 if (memlimit == val) 2768 if (memlimit == val)
@@ -2630,6 +2785,8 @@ static int mem_cgroup_resize_memsw_limit(struct mem_cgroup *memcg,
2630 else 2785 else
2631 oldusage = curusage; 2786 oldusage = curusage;
2632 } 2787 }
2788 if (!ret && enlarge)
2789 memcg_oom_recover(memcg);
2633 return ret; 2790 return ret;
2634} 2791}
2635 2792
@@ -2821,6 +2978,7 @@ move_account:
2821 if (ret) 2978 if (ret)
2822 break; 2979 break;
2823 } 2980 }
2981 memcg_oom_recover(mem);
2824 /* it seems parent cgroup doesn't have enough mem */ 2982 /* it seems parent cgroup doesn't have enough mem */
2825 if (ret == -ENOMEM) 2983 if (ret == -ENOMEM)
2826 goto try_to_free; 2984 goto try_to_free;
@@ -3311,9 +3469,9 @@ static void __mem_cgroup_threshold(struct mem_cgroup *memcg, bool swap)
3311 3469
3312 rcu_read_lock(); 3470 rcu_read_lock();
3313 if (!swap) 3471 if (!swap)
3314 t = rcu_dereference(memcg->thresholds); 3472 t = rcu_dereference(memcg->thresholds.primary);
3315 else 3473 else
3316 t = rcu_dereference(memcg->memsw_thresholds); 3474 t = rcu_dereference(memcg->memsw_thresholds.primary);
3317 3475
3318 if (!t) 3476 if (!t)
3319 goto unlock; 3477 goto unlock;
@@ -3325,7 +3483,7 @@ static void __mem_cgroup_threshold(struct mem_cgroup *memcg, bool swap)
3325 * If it's not true, a threshold was crossed after last 3483 * If it's not true, a threshold was crossed after last
3326 * call of __mem_cgroup_threshold(). 3484 * call of __mem_cgroup_threshold().
3327 */ 3485 */
3328 i = atomic_read(&t->current_threshold); 3486 i = t->current_threshold;
3329 3487
3330 /* 3488 /*
3331 * Iterate backward over array of thresholds starting from 3489 * Iterate backward over array of thresholds starting from
@@ -3349,7 +3507,7 @@ static void __mem_cgroup_threshold(struct mem_cgroup *memcg, bool swap)
3349 eventfd_signal(t->entries[i].eventfd, 1); 3507 eventfd_signal(t->entries[i].eventfd, 1);
3350 3508
3351 /* Update current_threshold */ 3509 /* Update current_threshold */
3352 atomic_set(&t->current_threshold, i - 1); 3510 t->current_threshold = i - 1;
3353unlock: 3511unlock:
3354 rcu_read_unlock(); 3512 rcu_read_unlock();
3355} 3513}
@@ -3369,106 +3527,117 @@ static int compare_thresholds(const void *a, const void *b)
3369 return _a->threshold - _b->threshold; 3527 return _a->threshold - _b->threshold;
3370} 3528}
3371 3529
3372static int mem_cgroup_register_event(struct cgroup *cgrp, struct cftype *cft, 3530static int mem_cgroup_oom_notify_cb(struct mem_cgroup *mem, void *data)
3373 struct eventfd_ctx *eventfd, const char *args) 3531{
3532 struct mem_cgroup_eventfd_list *ev;
3533
3534 list_for_each_entry(ev, &mem->oom_notify, list)
3535 eventfd_signal(ev->eventfd, 1);
3536 return 0;
3537}
3538
3539static void mem_cgroup_oom_notify(struct mem_cgroup *mem)
3540{
3541 mem_cgroup_walk_tree(mem, NULL, mem_cgroup_oom_notify_cb);
3542}
3543
3544static int mem_cgroup_usage_register_event(struct cgroup *cgrp,
3545 struct cftype *cft, struct eventfd_ctx *eventfd, const char *args)
3374{ 3546{
3375 struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp); 3547 struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
3376 struct mem_cgroup_threshold_ary *thresholds, *thresholds_new; 3548 struct mem_cgroup_thresholds *thresholds;
3549 struct mem_cgroup_threshold_ary *new;
3377 int type = MEMFILE_TYPE(cft->private); 3550 int type = MEMFILE_TYPE(cft->private);
3378 u64 threshold, usage; 3551 u64 threshold, usage;
3379 int size; 3552 int i, size, ret;
3380 int i, ret;
3381 3553
3382 ret = res_counter_memparse_write_strategy(args, &threshold); 3554 ret = res_counter_memparse_write_strategy(args, &threshold);
3383 if (ret) 3555 if (ret)
3384 return ret; 3556 return ret;
3385 3557
3386 mutex_lock(&memcg->thresholds_lock); 3558 mutex_lock(&memcg->thresholds_lock);
3559
3387 if (type == _MEM) 3560 if (type == _MEM)
3388 thresholds = memcg->thresholds; 3561 thresholds = &memcg->thresholds;
3389 else if (type == _MEMSWAP) 3562 else if (type == _MEMSWAP)
3390 thresholds = memcg->memsw_thresholds; 3563 thresholds = &memcg->memsw_thresholds;
3391 else 3564 else
3392 BUG(); 3565 BUG();
3393 3566
3394 usage = mem_cgroup_usage(memcg, type == _MEMSWAP); 3567 usage = mem_cgroup_usage(memcg, type == _MEMSWAP);
3395 3568
3396 /* Check if a threshold crossed before adding a new one */ 3569 /* Check if a threshold crossed before adding a new one */
3397 if (thresholds) 3570 if (thresholds->primary)
3398 __mem_cgroup_threshold(memcg, type == _MEMSWAP); 3571 __mem_cgroup_threshold(memcg, type == _MEMSWAP);
3399 3572
3400 if (thresholds) 3573 size = thresholds->primary ? thresholds->primary->size + 1 : 1;
3401 size = thresholds->size + 1;
3402 else
3403 size = 1;
3404 3574
3405 /* Allocate memory for new array of thresholds */ 3575 /* Allocate memory for new array of thresholds */
3406 thresholds_new = kmalloc(sizeof(*thresholds_new) + 3576 new = kmalloc(sizeof(*new) + size * sizeof(struct mem_cgroup_threshold),
3407 size * sizeof(struct mem_cgroup_threshold),
3408 GFP_KERNEL); 3577 GFP_KERNEL);
3409 if (!thresholds_new) { 3578 if (!new) {
3410 ret = -ENOMEM; 3579 ret = -ENOMEM;
3411 goto unlock; 3580 goto unlock;
3412 } 3581 }
3413 thresholds_new->size = size; 3582 new->size = size;
3414 3583
3415 /* Copy thresholds (if any) to new array */ 3584 /* Copy thresholds (if any) to new array */
3416 if (thresholds) 3585 if (thresholds->primary) {
3417 memcpy(thresholds_new->entries, thresholds->entries, 3586 memcpy(new->entries, thresholds->primary->entries, (size - 1) *
3418 thresholds->size *
3419 sizeof(struct mem_cgroup_threshold)); 3587 sizeof(struct mem_cgroup_threshold));
3588 }
3589
3420 /* Add new threshold */ 3590 /* Add new threshold */
3421 thresholds_new->entries[size - 1].eventfd = eventfd; 3591 new->entries[size - 1].eventfd = eventfd;
3422 thresholds_new->entries[size - 1].threshold = threshold; 3592 new->entries[size - 1].threshold = threshold;
3423 3593
3424 /* Sort thresholds. Registering of new threshold isn't time-critical */ 3594 /* Sort thresholds. Registering of new threshold isn't time-critical */
3425 sort(thresholds_new->entries, size, 3595 sort(new->entries, size, sizeof(struct mem_cgroup_threshold),
3426 sizeof(struct mem_cgroup_threshold),
3427 compare_thresholds, NULL); 3596 compare_thresholds, NULL);
3428 3597
3429 /* Find current threshold */ 3598 /* Find current threshold */
3430 atomic_set(&thresholds_new->current_threshold, -1); 3599 new->current_threshold = -1;
3431 for (i = 0; i < size; i++) { 3600 for (i = 0; i < size; i++) {
3432 if (thresholds_new->entries[i].threshold < usage) { 3601 if (new->entries[i].threshold < usage) {
3433 /* 3602 /*
3434 * thresholds_new->current_threshold will not be used 3603 * new->current_threshold will not be used until
3435 * until rcu_assign_pointer(), so it's safe to increment 3604 * rcu_assign_pointer(), so it's safe to increment
3436 * it here. 3605 * it here.
3437 */ 3606 */
3438 atomic_inc(&thresholds_new->current_threshold); 3607 ++new->current_threshold;
3439 } 3608 }
3440 } 3609 }
3441 3610
3442 if (type == _MEM) 3611 /* Free old spare buffer and save old primary buffer as spare */
3443 rcu_assign_pointer(memcg->thresholds, thresholds_new); 3612 kfree(thresholds->spare);
3444 else 3613 thresholds->spare = thresholds->primary;
3445 rcu_assign_pointer(memcg->memsw_thresholds, thresholds_new); 3614
3615 rcu_assign_pointer(thresholds->primary, new);
3446 3616
3447 /* To be sure that nobody uses thresholds before freeing it */ 3617 /* To be sure that nobody uses thresholds */
3448 synchronize_rcu(); 3618 synchronize_rcu();
3449 3619
3450 kfree(thresholds);
3451unlock: 3620unlock:
3452 mutex_unlock(&memcg->thresholds_lock); 3621 mutex_unlock(&memcg->thresholds_lock);
3453 3622
3454 return ret; 3623 return ret;
3455} 3624}
3456 3625
3457static int mem_cgroup_unregister_event(struct cgroup *cgrp, struct cftype *cft, 3626static void mem_cgroup_usage_unregister_event(struct cgroup *cgrp,
3458 struct eventfd_ctx *eventfd) 3627 struct cftype *cft, struct eventfd_ctx *eventfd)
3459{ 3628{
3460 struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp); 3629 struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
3461 struct mem_cgroup_threshold_ary *thresholds, *thresholds_new; 3630 struct mem_cgroup_thresholds *thresholds;
3631 struct mem_cgroup_threshold_ary *new;
3462 int type = MEMFILE_TYPE(cft->private); 3632 int type = MEMFILE_TYPE(cft->private);
3463 u64 usage; 3633 u64 usage;
3464 int size = 0; 3634 int i, j, size;
3465 int i, j, ret;
3466 3635
3467 mutex_lock(&memcg->thresholds_lock); 3636 mutex_lock(&memcg->thresholds_lock);
3468 if (type == _MEM) 3637 if (type == _MEM)
3469 thresholds = memcg->thresholds; 3638 thresholds = &memcg->thresholds;
3470 else if (type == _MEMSWAP) 3639 else if (type == _MEMSWAP)
3471 thresholds = memcg->memsw_thresholds; 3640 thresholds = &memcg->memsw_thresholds;
3472 else 3641 else
3473 BUG(); 3642 BUG();
3474 3643
@@ -3484,59 +3653,136 @@ static int mem_cgroup_unregister_event(struct cgroup *cgrp, struct cftype *cft,
3484 __mem_cgroup_threshold(memcg, type == _MEMSWAP); 3653 __mem_cgroup_threshold(memcg, type == _MEMSWAP);
3485 3654
3486 /* Calculate new number of threshold */ 3655 /* Calculate new number of threshold */
3487 for (i = 0; i < thresholds->size; i++) { 3656 size = 0;
3488 if (thresholds->entries[i].eventfd != eventfd) 3657 for (i = 0; i < thresholds->primary->size; i++) {
3658 if (thresholds->primary->entries[i].eventfd != eventfd)
3489 size++; 3659 size++;
3490 } 3660 }
3491 3661
3662 new = thresholds->spare;
3663
3492 /* Set thresholds array to NULL if we don't have thresholds */ 3664 /* Set thresholds array to NULL if we don't have thresholds */
3493 if (!size) { 3665 if (!size) {
3494 thresholds_new = NULL; 3666 kfree(new);
3495 goto assign; 3667 new = NULL;
3668 goto swap_buffers;
3496 } 3669 }
3497 3670
3498 /* Allocate memory for new array of thresholds */ 3671 new->size = size;
3499 thresholds_new = kmalloc(sizeof(*thresholds_new) +
3500 size * sizeof(struct mem_cgroup_threshold),
3501 GFP_KERNEL);
3502 if (!thresholds_new) {
3503 ret = -ENOMEM;
3504 goto unlock;
3505 }
3506 thresholds_new->size = size;
3507 3672
3508 /* Copy thresholds and find current threshold */ 3673 /* Copy thresholds and find current threshold */
3509 atomic_set(&thresholds_new->current_threshold, -1); 3674 new->current_threshold = -1;
3510 for (i = 0, j = 0; i < thresholds->size; i++) { 3675 for (i = 0, j = 0; i < thresholds->primary->size; i++) {
3511 if (thresholds->entries[i].eventfd == eventfd) 3676 if (thresholds->primary->entries[i].eventfd == eventfd)
3512 continue; 3677 continue;
3513 3678
3514 thresholds_new->entries[j] = thresholds->entries[i]; 3679 new->entries[j] = thresholds->primary->entries[i];
3515 if (thresholds_new->entries[j].threshold < usage) { 3680 if (new->entries[j].threshold < usage) {
3516 /* 3681 /*
3517 * thresholds_new->current_threshold will not be used 3682 * new->current_threshold will not be used
3518 * until rcu_assign_pointer(), so it's safe to increment 3683 * until rcu_assign_pointer(), so it's safe to increment
3519 * it here. 3684 * it here.
3520 */ 3685 */
3521 atomic_inc(&thresholds_new->current_threshold); 3686 ++new->current_threshold;
3522 } 3687 }
3523 j++; 3688 j++;
3524 } 3689 }
3525 3690
3526assign: 3691swap_buffers:
3527 if (type == _MEM) 3692 /* Swap primary and spare array */
3528 rcu_assign_pointer(memcg->thresholds, thresholds_new); 3693 thresholds->spare = thresholds->primary;
3529 else 3694 rcu_assign_pointer(thresholds->primary, new);
3530 rcu_assign_pointer(memcg->memsw_thresholds, thresholds_new);
3531 3695
3532 /* To be sure that nobody uses thresholds before freeing it */ 3696 /* To be sure that nobody uses thresholds */
3533 synchronize_rcu(); 3697 synchronize_rcu();
3534 3698
3535 kfree(thresholds);
3536unlock:
3537 mutex_unlock(&memcg->thresholds_lock); 3699 mutex_unlock(&memcg->thresholds_lock);
3700}
3538 3701
3539 return ret; 3702static int mem_cgroup_oom_register_event(struct cgroup *cgrp,
3703 struct cftype *cft, struct eventfd_ctx *eventfd, const char *args)
3704{
3705 struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
3706 struct mem_cgroup_eventfd_list *event;
3707 int type = MEMFILE_TYPE(cft->private);
3708
3709 BUG_ON(type != _OOM_TYPE);
3710 event = kmalloc(sizeof(*event), GFP_KERNEL);
3711 if (!event)
3712 return -ENOMEM;
3713
3714 mutex_lock(&memcg_oom_mutex);
3715
3716 event->eventfd = eventfd;
3717 list_add(&event->list, &memcg->oom_notify);
3718
3719 /* already in OOM ? */
3720 if (atomic_read(&memcg->oom_lock))
3721 eventfd_signal(eventfd, 1);
3722 mutex_unlock(&memcg_oom_mutex);
3723
3724 return 0;
3725}
3726
3727static void mem_cgroup_oom_unregister_event(struct cgroup *cgrp,
3728 struct cftype *cft, struct eventfd_ctx *eventfd)
3729{
3730 struct mem_cgroup *mem = mem_cgroup_from_cont(cgrp);
3731 struct mem_cgroup_eventfd_list *ev, *tmp;
3732 int type = MEMFILE_TYPE(cft->private);
3733
3734 BUG_ON(type != _OOM_TYPE);
3735
3736 mutex_lock(&memcg_oom_mutex);
3737
3738 list_for_each_entry_safe(ev, tmp, &mem->oom_notify, list) {
3739 if (ev->eventfd == eventfd) {
3740 list_del(&ev->list);
3741 kfree(ev);
3742 }
3743 }
3744
3745 mutex_unlock(&memcg_oom_mutex);
3746}
3747
3748static int mem_cgroup_oom_control_read(struct cgroup *cgrp,
3749 struct cftype *cft, struct cgroup_map_cb *cb)
3750{
3751 struct mem_cgroup *mem = mem_cgroup_from_cont(cgrp);
3752
3753 cb->fill(cb, "oom_kill_disable", mem->oom_kill_disable);
3754
3755 if (atomic_read(&mem->oom_lock))
3756 cb->fill(cb, "under_oom", 1);
3757 else
3758 cb->fill(cb, "under_oom", 0);
3759 return 0;
3760}
3761
3762/*
3763 */
3764static int mem_cgroup_oom_control_write(struct cgroup *cgrp,
3765 struct cftype *cft, u64 val)
3766{
3767 struct mem_cgroup *mem = mem_cgroup_from_cont(cgrp);
3768 struct mem_cgroup *parent;
3769
3770 /* cannot set to root cgroup and only 0 and 1 are allowed */
3771 if (!cgrp->parent || !((val == 0) || (val == 1)))
3772 return -EINVAL;
3773
3774 parent = mem_cgroup_from_cont(cgrp->parent);
3775
3776 cgroup_lock();
3777 /* oom-kill-disable is a flag for subhierarchy. */
3778 if ((parent->use_hierarchy) ||
3779 (mem->use_hierarchy && !list_empty(&cgrp->children))) {
3780 cgroup_unlock();
3781 return -EINVAL;
3782 }
3783 mem->oom_kill_disable = val;
3784 cgroup_unlock();
3785 return 0;
3540} 3786}
3541 3787
3542static struct cftype mem_cgroup_files[] = { 3788static struct cftype mem_cgroup_files[] = {
@@ -3544,8 +3790,8 @@ static struct cftype mem_cgroup_files[] = {
3544 .name = "usage_in_bytes", 3790 .name = "usage_in_bytes",
3545 .private = MEMFILE_PRIVATE(_MEM, RES_USAGE), 3791 .private = MEMFILE_PRIVATE(_MEM, RES_USAGE),
3546 .read_u64 = mem_cgroup_read, 3792 .read_u64 = mem_cgroup_read,
3547 .register_event = mem_cgroup_register_event, 3793 .register_event = mem_cgroup_usage_register_event,
3548 .unregister_event = mem_cgroup_unregister_event, 3794 .unregister_event = mem_cgroup_usage_unregister_event,
3549 }, 3795 },
3550 { 3796 {
3551 .name = "max_usage_in_bytes", 3797 .name = "max_usage_in_bytes",
@@ -3594,6 +3840,14 @@ static struct cftype mem_cgroup_files[] = {
3594 .read_u64 = mem_cgroup_move_charge_read, 3840 .read_u64 = mem_cgroup_move_charge_read,
3595 .write_u64 = mem_cgroup_move_charge_write, 3841 .write_u64 = mem_cgroup_move_charge_write,
3596 }, 3842 },
3843 {
3844 .name = "oom_control",
3845 .read_map = mem_cgroup_oom_control_read,
3846 .write_u64 = mem_cgroup_oom_control_write,
3847 .register_event = mem_cgroup_oom_register_event,
3848 .unregister_event = mem_cgroup_oom_unregister_event,
3849 .private = MEMFILE_PRIVATE(_OOM_TYPE, OOM_CONTROL),
3850 },
3597}; 3851};
3598 3852
3599#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP 3853#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
@@ -3602,8 +3856,8 @@ static struct cftype memsw_cgroup_files[] = {
3602 .name = "memsw.usage_in_bytes", 3856 .name = "memsw.usage_in_bytes",
3603 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_USAGE), 3857 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_USAGE),
3604 .read_u64 = mem_cgroup_read, 3858 .read_u64 = mem_cgroup_read,
3605 .register_event = mem_cgroup_register_event, 3859 .register_event = mem_cgroup_usage_register_event,
3606 .unregister_event = mem_cgroup_unregister_event, 3860 .unregister_event = mem_cgroup_usage_unregister_event,
3607 }, 3861 },
3608 { 3862 {
3609 .name = "memsw.max_usage_in_bytes", 3863 .name = "memsw.max_usage_in_bytes",
@@ -3831,6 +4085,7 @@ mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont)
3831 } else { 4085 } else {
3832 parent = mem_cgroup_from_cont(cont->parent); 4086 parent = mem_cgroup_from_cont(cont->parent);
3833 mem->use_hierarchy = parent->use_hierarchy; 4087 mem->use_hierarchy = parent->use_hierarchy;
4088 mem->oom_kill_disable = parent->oom_kill_disable;
3834 } 4089 }
3835 4090
3836 if (parent && parent->use_hierarchy) { 4091 if (parent && parent->use_hierarchy) {
@@ -3849,6 +4104,7 @@ mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont)
3849 } 4104 }
3850 mem->last_scanned_child = 0; 4105 mem->last_scanned_child = 0;
3851 spin_lock_init(&mem->reclaim_param_lock); 4106 spin_lock_init(&mem->reclaim_param_lock);
4107 INIT_LIST_HEAD(&mem->oom_notify);
3852 4108
3853 if (parent) 4109 if (parent)
3854 mem->swappiness = get_swappiness(parent); 4110 mem->swappiness = get_swappiness(parent);
@@ -3976,6 +4232,80 @@ enum mc_target_type {
3976 MC_TARGET_SWAP, 4232 MC_TARGET_SWAP,
3977}; 4233};
3978 4234
4235static struct page *mc_handle_present_pte(struct vm_area_struct *vma,
4236 unsigned long addr, pte_t ptent)
4237{
4238 struct page *page = vm_normal_page(vma, addr, ptent);
4239
4240 if (!page || !page_mapped(page))
4241 return NULL;
4242 if (PageAnon(page)) {
4243 /* we don't move shared anon */
4244 if (!move_anon() || page_mapcount(page) > 2)
4245 return NULL;
4246 } else if (!move_file())
4247 /* we ignore mapcount for file pages */
4248 return NULL;
4249 if (!get_page_unless_zero(page))
4250 return NULL;
4251
4252 return page;
4253}
4254
4255static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
4256 unsigned long addr, pte_t ptent, swp_entry_t *entry)
4257{
4258 int usage_count;
4259 struct page *page = NULL;
4260 swp_entry_t ent = pte_to_swp_entry(ptent);
4261
4262 if (!move_anon() || non_swap_entry(ent))
4263 return NULL;
4264 usage_count = mem_cgroup_count_swap_user(ent, &page);
4265 if (usage_count > 1) { /* we don't move shared anon */
4266 if (page)
4267 put_page(page);
4268 return NULL;
4269 }
4270 if (do_swap_account)
4271 entry->val = ent.val;
4272
4273 return page;
4274}
4275
4276static struct page *mc_handle_file_pte(struct vm_area_struct *vma,
4277 unsigned long addr, pte_t ptent, swp_entry_t *entry)
4278{
4279 struct page *page = NULL;
4280 struct inode *inode;
4281 struct address_space *mapping;
4282 pgoff_t pgoff;
4283
4284 if (!vma->vm_file) /* anonymous vma */
4285 return NULL;
4286 if (!move_file())
4287 return NULL;
4288
4289 inode = vma->vm_file->f_path.dentry->d_inode;
4290 mapping = vma->vm_file->f_mapping;
4291 if (pte_none(ptent))
4292 pgoff = linear_page_index(vma, addr);
4293 else /* pte_file(ptent) is true */
4294 pgoff = pte_to_pgoff(ptent);
4295
4296 /* page is moved even if it's not RSS of this task(page-faulted). */
4297 if (!mapping_cap_swap_backed(mapping)) { /* normal file */
4298 page = find_get_page(mapping, pgoff);
4299 } else { /* shmem/tmpfs file. we should take account of swap too. */
4300 swp_entry_t ent;
4301 mem_cgroup_get_shmem_target(inode, pgoff, &page, &ent);
4302 if (do_swap_account)
4303 entry->val = ent.val;
4304 }
4305
4306 return page;
4307}
4308
3979static int is_target_pte_for_mc(struct vm_area_struct *vma, 4309static int is_target_pte_for_mc(struct vm_area_struct *vma,
3980 unsigned long addr, pte_t ptent, union mc_target *target) 4310 unsigned long addr, pte_t ptent, union mc_target *target)
3981{ 4311{
@@ -3983,43 +4313,16 @@ static int is_target_pte_for_mc(struct vm_area_struct *vma,
3983 struct page_cgroup *pc; 4313 struct page_cgroup *pc;
3984 int ret = 0; 4314 int ret = 0;
3985 swp_entry_t ent = { .val = 0 }; 4315 swp_entry_t ent = { .val = 0 };
3986 int usage_count = 0;
3987 bool move_anon = test_bit(MOVE_CHARGE_TYPE_ANON,
3988 &mc.to->move_charge_at_immigrate);
3989 4316
3990 if (!pte_present(ptent)) { 4317 if (pte_present(ptent))
3991 /* TODO: handle swap of shmes/tmpfs */ 4318 page = mc_handle_present_pte(vma, addr, ptent);
3992 if (pte_none(ptent) || pte_file(ptent)) 4319 else if (is_swap_pte(ptent))
3993 return 0; 4320 page = mc_handle_swap_pte(vma, addr, ptent, &ent);
3994 else if (is_swap_pte(ptent)) { 4321 else if (pte_none(ptent) || pte_file(ptent))
3995 ent = pte_to_swp_entry(ptent); 4322 page = mc_handle_file_pte(vma, addr, ptent, &ent);
3996 if (!move_anon || non_swap_entry(ent)) 4323
3997 return 0; 4324 if (!page && !ent.val)
3998 usage_count = mem_cgroup_count_swap_user(ent, &page);
3999 }
4000 } else {
4001 page = vm_normal_page(vma, addr, ptent);
4002 if (!page || !page_mapped(page))
4003 return 0;
4004 /*
4005 * TODO: We don't move charges of file(including shmem/tmpfs)
4006 * pages for now.
4007 */
4008 if (!move_anon || !PageAnon(page))
4009 return 0;
4010 if (!get_page_unless_zero(page))
4011 return 0;
4012 usage_count = page_mapcount(page);
4013 }
4014 if (usage_count > 1) {
4015 /*
4016 * TODO: We don't move charges of shared(used by multiple
4017 * processes) pages for now.
4018 */
4019 if (page)
4020 put_page(page);
4021 return 0; 4325 return 0;
4022 }
4023 if (page) { 4326 if (page) {
4024 pc = lookup_page_cgroup(page); 4327 pc = lookup_page_cgroup(page);
4025 /* 4328 /*
@@ -4035,8 +4338,8 @@ static int is_target_pte_for_mc(struct vm_area_struct *vma,
4035 if (!ret || !target) 4338 if (!ret || !target)
4036 put_page(page); 4339 put_page(page);
4037 } 4340 }
4038 /* throught */ 4341 /* There is a swap entry and a page doesn't exist or isn't charged */
4039 if (ent.val && do_swap_account && !ret && 4342 if (ent.val && !ret &&
4040 css_id(&mc.from->css) == lookup_swap_cgroup(ent)) { 4343 css_id(&mc.from->css) == lookup_swap_cgroup(ent)) {
4041 ret = MC_TARGET_SWAP; 4344 ret = MC_TARGET_SWAP;
4042 if (target) 4345 if (target)
@@ -4077,9 +4380,6 @@ static unsigned long mem_cgroup_count_precharge(struct mm_struct *mm)
4077 }; 4380 };
4078 if (is_vm_hugetlb_page(vma)) 4381 if (is_vm_hugetlb_page(vma))
4079 continue; 4382 continue;
4080 /* TODO: We don't move charges of shmem/tmpfs pages for now. */
4081 if (vma->vm_flags & VM_SHARED)
4082 continue;
4083 walk_page_range(vma->vm_start, vma->vm_end, 4383 walk_page_range(vma->vm_start, vma->vm_end,
4084 &mem_cgroup_count_precharge_walk); 4384 &mem_cgroup_count_precharge_walk);
4085 } 4385 }
@@ -4102,6 +4402,7 @@ static void mem_cgroup_clear_mc(void)
4102 if (mc.precharge) { 4402 if (mc.precharge) {
4103 __mem_cgroup_cancel_charge(mc.to, mc.precharge); 4403 __mem_cgroup_cancel_charge(mc.to, mc.precharge);
4104 mc.precharge = 0; 4404 mc.precharge = 0;
4405 memcg_oom_recover(mc.to);
4105 } 4406 }
4106 /* 4407 /*
4107 * we didn't uncharge from mc.from at mem_cgroup_move_account(), so 4408 * we didn't uncharge from mc.from at mem_cgroup_move_account(), so
@@ -4110,6 +4411,7 @@ static void mem_cgroup_clear_mc(void)
4110 if (mc.moved_charge) { 4411 if (mc.moved_charge) {
4111 __mem_cgroup_cancel_charge(mc.from, mc.moved_charge); 4412 __mem_cgroup_cancel_charge(mc.from, mc.moved_charge);
4112 mc.moved_charge = 0; 4413 mc.moved_charge = 0;
4414 memcg_oom_recover(mc.from);
4113 } 4415 }
4114 /* we must fixup refcnts and charges */ 4416 /* we must fixup refcnts and charges */
4115 if (mc.moved_swap) { 4417 if (mc.moved_swap) {
@@ -4274,9 +4576,6 @@ static void mem_cgroup_move_charge(struct mm_struct *mm)
4274 }; 4576 };
4275 if (is_vm_hugetlb_page(vma)) 4577 if (is_vm_hugetlb_page(vma))
4276 continue; 4578 continue;
4277 /* TODO: We don't move charges of shmem/tmpfs pages for now. */
4278 if (vma->vm_flags & VM_SHARED)
4279 continue;
4280 ret = walk_page_range(vma->vm_start, vma->vm_end, 4579 ret = walk_page_range(vma->vm_start, vma->vm_end,
4281 &mem_cgroup_move_charge_walk); 4580 &mem_cgroup_move_charge_walk);
4282 if (ret) 4581 if (ret)
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 75751012c552..5d6fb339de03 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -2098,7 +2098,7 @@ void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol)
2098 /* contextualize the tmpfs mount point mempolicy */ 2098 /* contextualize the tmpfs mount point mempolicy */
2099 new = mpol_new(mpol->mode, mpol->flags, &mpol->w.user_nodemask); 2099 new = mpol_new(mpol->mode, mpol->flags, &mpol->w.user_nodemask);
2100 if (IS_ERR(new)) 2100 if (IS_ERR(new))
2101 goto put_free; /* no valid nodemask intersection */ 2101 goto free_scratch; /* no valid nodemask intersection */
2102 2102
2103 task_lock(current); 2103 task_lock(current);
2104 ret = mpol_set_nodemask(new, &mpol->w.user_nodemask, scratch); 2104 ret = mpol_set_nodemask(new, &mpol->w.user_nodemask, scratch);
@@ -2114,6 +2114,7 @@ void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol)
2114 2114
2115put_free: 2115put_free:
2116 mpol_put(new); /* drop initial ref */ 2116 mpol_put(new); /* drop initial ref */
2117free_scratch:
2117 NODEMASK_SCRATCH_FREE(scratch); 2118 NODEMASK_SCRATCH_FREE(scratch);
2118 } 2119 }
2119} 2120}
diff --git a/mm/migrate.c b/mm/migrate.c
index 09e2471afa0f..4205b1d6049e 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -590,7 +590,7 @@ static int unmap_and_move(new_page_t get_new_page, unsigned long private,
590 } 590 }
591 591
592 /* charge against new page */ 592 /* charge against new page */
593 charge = mem_cgroup_prepare_migration(page, &mem); 593 charge = mem_cgroup_prepare_migration(page, newpage, &mem);
594 if (charge == -ENOMEM) { 594 if (charge == -ENOMEM) {
595 rc = -ENOMEM; 595 rc = -ENOMEM;
596 goto unlock; 596 goto unlock;
diff --git a/mm/nommu.c b/mm/nommu.c
index 63fa17d121f0..b76f3ee0abe0 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -918,14 +918,6 @@ static int validate_mmap_request(struct file *file,
918 if (!(capabilities & BDI_CAP_MAP_DIRECT)) 918 if (!(capabilities & BDI_CAP_MAP_DIRECT))
919 return -ENODEV; 919 return -ENODEV;
920 920
921 if (((prot & PROT_READ) && !(capabilities & BDI_CAP_READ_MAP)) ||
922 ((prot & PROT_WRITE) && !(capabilities & BDI_CAP_WRITE_MAP)) ||
923 ((prot & PROT_EXEC) && !(capabilities & BDI_CAP_EXEC_MAP))
924 ) {
925 printk("MAP_SHARED not completely supported on !MMU\n");
926 return -EINVAL;
927 }
928
929 /* we mustn't privatise shared mappings */ 921 /* we mustn't privatise shared mappings */
930 capabilities &= ~BDI_CAP_MAP_COPY; 922 capabilities &= ~BDI_CAP_MAP_COPY;
931 } 923 }
@@ -941,6 +933,20 @@ static int validate_mmap_request(struct file *file,
941 capabilities &= ~BDI_CAP_MAP_DIRECT; 933 capabilities &= ~BDI_CAP_MAP_DIRECT;
942 } 934 }
943 935
936 if (capabilities & BDI_CAP_MAP_DIRECT) {
937 if (((prot & PROT_READ) && !(capabilities & BDI_CAP_READ_MAP)) ||
938 ((prot & PROT_WRITE) && !(capabilities & BDI_CAP_WRITE_MAP)) ||
939 ((prot & PROT_EXEC) && !(capabilities & BDI_CAP_EXEC_MAP))
940 ) {
941 capabilities &= ~BDI_CAP_MAP_DIRECT;
942 if (flags & MAP_SHARED) {
943 printk(KERN_WARNING
944 "MAP_SHARED not completely supported on !MMU\n");
945 return -EINVAL;
946 }
947 }
948 }
949
944 /* handle executable mappings and implied executable 950 /* handle executable mappings and implied executable
945 * mappings */ 951 * mappings */
946 if (file->f_path.mnt->mnt_flags & MNT_NOEXEC) { 952 if (file->f_path.mnt->mnt_flags & MNT_NOEXEC) {
@@ -996,22 +1002,20 @@ static unsigned long determine_vm_flags(struct file *file,
996 unsigned long vm_flags; 1002 unsigned long vm_flags;
997 1003
998 vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags); 1004 vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags);
999 vm_flags |= VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
1000 /* vm_flags |= mm->def_flags; */ 1005 /* vm_flags |= mm->def_flags; */
1001 1006
1002 if (!(capabilities & BDI_CAP_MAP_DIRECT)) { 1007 if (!(capabilities & BDI_CAP_MAP_DIRECT)) {
1003 /* attempt to share read-only copies of mapped file chunks */ 1008 /* attempt to share read-only copies of mapped file chunks */
1009 vm_flags |= VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
1004 if (file && !(prot & PROT_WRITE)) 1010 if (file && !(prot & PROT_WRITE))
1005 vm_flags |= VM_MAYSHARE; 1011 vm_flags |= VM_MAYSHARE;
1006 } 1012 } else {
1007 else {
1008 /* overlay a shareable mapping on the backing device or inode 1013 /* overlay a shareable mapping on the backing device or inode
1009 * if possible - used for chardevs, ramfs/tmpfs/shmfs and 1014 * if possible - used for chardevs, ramfs/tmpfs/shmfs and
1010 * romfs/cramfs */ 1015 * romfs/cramfs */
1016 vm_flags |= VM_MAYSHARE | (capabilities & BDI_CAP_VMFLAGS);
1011 if (flags & MAP_SHARED) 1017 if (flags & MAP_SHARED)
1012 vm_flags |= VM_MAYSHARE | VM_SHARED; 1018 vm_flags |= VM_SHARED;
1013 else if ((((vm_flags & capabilities) ^ vm_flags) & BDI_CAP_VMFLAGS) == 0)
1014 vm_flags |= VM_MAYSHARE;
1015 } 1019 }
1016 1020
1017 /* refuse to let anyone share private mappings with this process if 1021 /* refuse to let anyone share private mappings with this process if
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index b68e802a7a7d..709aedfaa014 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -479,12 +479,9 @@ void mem_cgroup_out_of_memory(struct mem_cgroup *mem, gfp_t gfp_mask)
479 read_lock(&tasklist_lock); 479 read_lock(&tasklist_lock);
480retry: 480retry:
481 p = select_bad_process(&points, mem); 481 p = select_bad_process(&points, mem);
482 if (PTR_ERR(p) == -1UL) 482 if (!p || PTR_ERR(p) == -1UL)
483 goto out; 483 goto out;
484 484
485 if (!p)
486 p = current;
487
488 if (oom_kill_process(p, gfp_mask, 0, points, mem, 485 if (oom_kill_process(p, gfp_mask, 0, points, mem,
489 "Memory cgroup out of memory")) 486 "Memory cgroup out of memory"))
490 goto retry; 487 goto retry;
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 08b349931ebc..431214b941ac 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -57,6 +57,22 @@
57#include <asm/div64.h> 57#include <asm/div64.h>
58#include "internal.h" 58#include "internal.h"
59 59
60#ifdef CONFIG_USE_PERCPU_NUMA_NODE_ID
61DEFINE_PER_CPU(int, numa_node);
62EXPORT_PER_CPU_SYMBOL(numa_node);
63#endif
64
65#ifdef CONFIG_HAVE_MEMORYLESS_NODES
66/*
67 * N.B., Do NOT reference the '_numa_mem_' per cpu variable directly.
68 * It will not be defined when CONFIG_HAVE_MEMORYLESS_NODES is not defined.
69 * Use the accessor functions set_numa_mem(), numa_mem_id() and cpu_to_mem()
70 * defined in <linux/topology.h>.
71 */
72DEFINE_PER_CPU(int, _numa_mem_); /* Kernel "local memory" node */
73EXPORT_PER_CPU_SYMBOL(_numa_mem_);
74#endif
75
60/* 76/*
61 * Array of node states. 77 * Array of node states.
62 */ 78 */
@@ -2856,6 +2872,24 @@ static void build_zonelist_cache(pg_data_t *pgdat)
2856 zlc->z_to_n[z - zonelist->_zonerefs] = zonelist_node_idx(z); 2872 zlc->z_to_n[z - zonelist->_zonerefs] = zonelist_node_idx(z);
2857} 2873}
2858 2874
2875#ifdef CONFIG_HAVE_MEMORYLESS_NODES
2876/*
2877 * Return node id of node used for "local" allocations.
2878 * I.e., first node id of first zone in arg node's generic zonelist.
2879 * Used for initializing percpu 'numa_mem', which is used primarily
2880 * for kernel allocations, so use GFP_KERNEL flags to locate zonelist.
2881 */
2882int local_memory_node(int node)
2883{
2884 struct zone *zone;
2885
2886 (void)first_zones_zonelist(node_zonelist(node, GFP_KERNEL),
2887 gfp_zone(GFP_KERNEL),
2888 NULL,
2889 &zone);
2890 return zone->node;
2891}
2892#endif
2859 2893
2860#else /* CONFIG_NUMA */ 2894#else /* CONFIG_NUMA */
2861 2895
@@ -2970,9 +3004,23 @@ static __init_refok int __build_all_zonelists(void *data)
2970 * needs the percpu allocator in order to allocate its pagesets 3004 * needs the percpu allocator in order to allocate its pagesets
2971 * (a chicken-egg dilemma). 3005 * (a chicken-egg dilemma).
2972 */ 3006 */
2973 for_each_possible_cpu(cpu) 3007 for_each_possible_cpu(cpu) {
2974 setup_pageset(&per_cpu(boot_pageset, cpu), 0); 3008 setup_pageset(&per_cpu(boot_pageset, cpu), 0);
2975 3009
3010#ifdef CONFIG_HAVE_MEMORYLESS_NODES
3011 /*
3012 * We now know the "local memory node" for each node--
3013 * i.e., the node of the first zone in the generic zonelist.
3014 * Set up numa_mem percpu variable for on-line cpus. During
3015 * boot, only the boot cpu should be on-line; we'll init the
3016 * secondary cpus' numa_mem as they come on-line. During
3017 * node/memory hotplug, we'll fixup all on-line cpus.
3018 */
3019 if (cpu_online(cpu))
3020 set_cpu_numa_mem(cpu, local_memory_node(cpu_to_node(cpu)));
3021#endif
3022 }
3023
2976 return 0; 3024 return 0;
2977} 3025}
2978 3026
diff --git a/mm/shmem.c b/mm/shmem.c
index 4ef9797bd430..7e5030ae18ff 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -727,10 +727,11 @@ done2:
727 if (inode->i_mapping->nrpages && (info->flags & SHMEM_PAGEIN)) { 727 if (inode->i_mapping->nrpages && (info->flags & SHMEM_PAGEIN)) {
728 /* 728 /*
729 * Call truncate_inode_pages again: racing shmem_unuse_inode 729 * Call truncate_inode_pages again: racing shmem_unuse_inode
730 * may have swizzled a page in from swap since vmtruncate or 730 * may have swizzled a page in from swap since
731 * generic_delete_inode did it, before we lowered next_index. 731 * truncate_pagecache or generic_delete_inode did it, before we
732 * Also, though shmem_getpage checks i_size before adding to 732 * lowered next_index. Also, though shmem_getpage checks
733 * cache, no recheck after: so fix the narrow window there too. 733 * i_size before adding to cache, no recheck after: so fix the
734 * narrow window there too.
734 * 735 *
735 * Recalling truncate_inode_pages_range and unmap_mapping_range 736 * Recalling truncate_inode_pages_range and unmap_mapping_range
736 * every time for punch_hole (which never got a chance to clear 737 * every time for punch_hole (which never got a chance to clear
@@ -760,19 +761,16 @@ done2:
760 } 761 }
761} 762}
762 763
763static void shmem_truncate(struct inode *inode)
764{
765 shmem_truncate_range(inode, inode->i_size, (loff_t)-1);
766}
767
768static int shmem_notify_change(struct dentry *dentry, struct iattr *attr) 764static int shmem_notify_change(struct dentry *dentry, struct iattr *attr)
769{ 765{
770 struct inode *inode = dentry->d_inode; 766 struct inode *inode = dentry->d_inode;
771 struct page *page = NULL;
772 int error; 767 int error;
773 768
774 if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) { 769 if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) {
775 if (attr->ia_size < inode->i_size) { 770 loff_t newsize = attr->ia_size;
771 struct page *page = NULL;
772
773 if (newsize < inode->i_size) {
776 /* 774 /*
777 * If truncating down to a partial page, then 775 * If truncating down to a partial page, then
778 * if that page is already allocated, hold it 776 * if that page is already allocated, hold it
@@ -780,9 +778,9 @@ static int shmem_notify_change(struct dentry *dentry, struct iattr *attr)
780 * truncate_partial_page cannnot miss it were 778 * truncate_partial_page cannnot miss it were
781 * it assigned to swap. 779 * it assigned to swap.
782 */ 780 */
783 if (attr->ia_size & (PAGE_CACHE_SIZE-1)) { 781 if (newsize & (PAGE_CACHE_SIZE-1)) {
784 (void) shmem_getpage(inode, 782 (void) shmem_getpage(inode,
785 attr->ia_size>>PAGE_CACHE_SHIFT, 783 newsize >> PAGE_CACHE_SHIFT,
786 &page, SGP_READ, NULL); 784 &page, SGP_READ, NULL);
787 if (page) 785 if (page)
788 unlock_page(page); 786 unlock_page(page);
@@ -794,24 +792,29 @@ static int shmem_notify_change(struct dentry *dentry, struct iattr *attr)
794 * if it's being fully truncated to zero-length: the 792 * if it's being fully truncated to zero-length: the
795 * nrpages check is efficient enough in that case. 793 * nrpages check is efficient enough in that case.
796 */ 794 */
797 if (attr->ia_size) { 795 if (newsize) {
798 struct shmem_inode_info *info = SHMEM_I(inode); 796 struct shmem_inode_info *info = SHMEM_I(inode);
799 spin_lock(&info->lock); 797 spin_lock(&info->lock);
800 info->flags &= ~SHMEM_PAGEIN; 798 info->flags &= ~SHMEM_PAGEIN;
801 spin_unlock(&info->lock); 799 spin_unlock(&info->lock);
802 } 800 }
803 } 801 }
802
803 error = simple_setsize(inode, newsize);
804 if (page)
805 page_cache_release(page);
806 if (error)
807 return error;
808 shmem_truncate_range(inode, newsize, (loff_t)-1);
804 } 809 }
805 810
806 error = inode_change_ok(inode, attr); 811 error = inode_change_ok(inode, attr);
807 if (!error) 812 if (!error)
808 error = inode_setattr(inode, attr); 813 generic_setattr(inode, attr);
809#ifdef CONFIG_TMPFS_POSIX_ACL 814#ifdef CONFIG_TMPFS_POSIX_ACL
810 if (!error && (attr->ia_valid & ATTR_MODE)) 815 if (!error && (attr->ia_valid & ATTR_MODE))
811 error = generic_acl_chmod(inode); 816 error = generic_acl_chmod(inode);
812#endif 817#endif
813 if (page)
814 page_cache_release(page);
815 return error; 818 return error;
816} 819}
817 820
@@ -819,11 +822,11 @@ static void shmem_delete_inode(struct inode *inode)
819{ 822{
820 struct shmem_inode_info *info = SHMEM_I(inode); 823 struct shmem_inode_info *info = SHMEM_I(inode);
821 824
822 if (inode->i_op->truncate == shmem_truncate) { 825 if (inode->i_mapping->a_ops == &shmem_aops) {
823 truncate_inode_pages(inode->i_mapping, 0); 826 truncate_inode_pages(inode->i_mapping, 0);
824 shmem_unacct_size(info->flags, inode->i_size); 827 shmem_unacct_size(info->flags, inode->i_size);
825 inode->i_size = 0; 828 inode->i_size = 0;
826 shmem_truncate(inode); 829 shmem_truncate_range(inode, 0, (loff_t)-1);
827 if (!list_empty(&info->swaplist)) { 830 if (!list_empty(&info->swaplist)) {
828 mutex_lock(&shmem_swaplist_mutex); 831 mutex_lock(&shmem_swaplist_mutex);
829 list_del_init(&info->swaplist); 832 list_del_init(&info->swaplist);
@@ -2022,7 +2025,6 @@ static const struct inode_operations shmem_symlink_inline_operations = {
2022}; 2025};
2023 2026
2024static const struct inode_operations shmem_symlink_inode_operations = { 2027static const struct inode_operations shmem_symlink_inode_operations = {
2025 .truncate = shmem_truncate,
2026 .readlink = generic_readlink, 2028 .readlink = generic_readlink,
2027 .follow_link = shmem_follow_link, 2029 .follow_link = shmem_follow_link,
2028 .put_link = shmem_put_link, 2030 .put_link = shmem_put_link,
@@ -2433,14 +2435,13 @@ static const struct file_operations shmem_file_operations = {
2433 .write = do_sync_write, 2435 .write = do_sync_write,
2434 .aio_read = shmem_file_aio_read, 2436 .aio_read = shmem_file_aio_read,
2435 .aio_write = generic_file_aio_write, 2437 .aio_write = generic_file_aio_write,
2436 .fsync = simple_sync_file, 2438 .fsync = noop_fsync,
2437 .splice_read = generic_file_splice_read, 2439 .splice_read = generic_file_splice_read,
2438 .splice_write = generic_file_splice_write, 2440 .splice_write = generic_file_splice_write,
2439#endif 2441#endif
2440}; 2442};
2441 2443
2442static const struct inode_operations shmem_inode_operations = { 2444static const struct inode_operations shmem_inode_operations = {
2443 .truncate = shmem_truncate,
2444 .setattr = shmem_notify_change, 2445 .setattr = shmem_notify_change,
2445 .truncate_range = shmem_truncate_range, 2446 .truncate_range = shmem_truncate_range,
2446#ifdef CONFIG_TMPFS_POSIX_ACL 2447#ifdef CONFIG_TMPFS_POSIX_ACL
@@ -2559,6 +2560,45 @@ out4:
2559 return error; 2560 return error;
2560} 2561}
2561 2562
2563#ifdef CONFIG_CGROUP_MEM_RES_CTLR
2564/**
2565 * mem_cgroup_get_shmem_target - find a page or entry assigned to the shmem file
2566 * @inode: the inode to be searched
2567 * @pgoff: the offset to be searched
2568 * @pagep: the pointer for the found page to be stored
2569 * @ent: the pointer for the found swap entry to be stored
2570 *
2571 * If a page is found, refcount of it is incremented. Callers should handle
2572 * these refcount.
2573 */
2574void mem_cgroup_get_shmem_target(struct inode *inode, pgoff_t pgoff,
2575 struct page **pagep, swp_entry_t *ent)
2576{
2577 swp_entry_t entry = { .val = 0 }, *ptr;
2578 struct page *page = NULL;
2579 struct shmem_inode_info *info = SHMEM_I(inode);
2580
2581 if ((pgoff << PAGE_CACHE_SHIFT) >= i_size_read(inode))
2582 goto out;
2583
2584 spin_lock(&info->lock);
2585 ptr = shmem_swp_entry(info, pgoff, NULL);
2586#ifdef CONFIG_SWAP
2587 if (ptr && ptr->val) {
2588 entry.val = ptr->val;
2589 page = find_get_page(&swapper_space, entry.val);
2590 } else
2591#endif
2592 page = find_get_page(inode->i_mapping, pgoff);
2593 if (ptr)
2594 shmem_swp_unmap(ptr);
2595 spin_unlock(&info->lock);
2596out:
2597 *pagep = page;
2598 *ent = entry;
2599}
2600#endif
2601
2562#else /* !CONFIG_SHMEM */ 2602#else /* !CONFIG_SHMEM */
2563 2603
2564/* 2604/*
@@ -2598,6 +2638,31 @@ int shmem_lock(struct file *file, int lock, struct user_struct *user)
2598 return 0; 2638 return 0;
2599} 2639}
2600 2640
2641#ifdef CONFIG_CGROUP_MEM_RES_CTLR
2642/**
2643 * mem_cgroup_get_shmem_target - find a page or entry assigned to the shmem file
2644 * @inode: the inode to be searched
2645 * @pgoff: the offset to be searched
2646 * @pagep: the pointer for the found page to be stored
2647 * @ent: the pointer for the found swap entry to be stored
2648 *
2649 * If a page is found, refcount of it is incremented. Callers should handle
2650 * these refcount.
2651 */
2652void mem_cgroup_get_shmem_target(struct inode *inode, pgoff_t pgoff,
2653 struct page **pagep, swp_entry_t *ent)
2654{
2655 struct page *page = NULL;
2656
2657 if ((pgoff << PAGE_CACHE_SHIFT) >= i_size_read(inode))
2658 goto out;
2659 page = find_get_page(inode->i_mapping, pgoff);
2660out:
2661 *pagep = page;
2662 *ent = (swp_entry_t){ .val = 0 };
2663}
2664#endif
2665
2601#define shmem_vm_ops generic_file_vm_ops 2666#define shmem_vm_ops generic_file_vm_ops
2602#define shmem_file_operations ramfs_file_operations 2667#define shmem_file_operations ramfs_file_operations
2603#define shmem_get_inode(sb, dir, mode, dev, flags) ramfs_get_inode(sb, dir, mode, dev) 2668#define shmem_get_inode(sb, dir, mode, dev, flags) ramfs_get_inode(sb, dir, mode, dev)
diff --git a/mm/slab.c b/mm/slab.c
index 02786e1a32d2..e49f8f46f46d 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -821,7 +821,7 @@ static void init_reap_node(int cpu)
821{ 821{
822 int node; 822 int node;
823 823
824 node = next_node(cpu_to_node(cpu), node_online_map); 824 node = next_node(cpu_to_mem(cpu), node_online_map);
825 if (node == MAX_NUMNODES) 825 if (node == MAX_NUMNODES)
826 node = first_node(node_online_map); 826 node = first_node(node_online_map);
827 827
@@ -1050,7 +1050,7 @@ static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
1050 struct array_cache *alien = NULL; 1050 struct array_cache *alien = NULL;
1051 int node; 1051 int node;
1052 1052
1053 node = numa_node_id(); 1053 node = numa_mem_id();
1054 1054
1055 /* 1055 /*
1056 * Make sure we are not freeing a object from another node to the array 1056 * Make sure we are not freeing a object from another node to the array
@@ -1129,7 +1129,7 @@ static void __cpuinit cpuup_canceled(long cpu)
1129{ 1129{
1130 struct kmem_cache *cachep; 1130 struct kmem_cache *cachep;
1131 struct kmem_list3 *l3 = NULL; 1131 struct kmem_list3 *l3 = NULL;
1132 int node = cpu_to_node(cpu); 1132 int node = cpu_to_mem(cpu);
1133 const struct cpumask *mask = cpumask_of_node(node); 1133 const struct cpumask *mask = cpumask_of_node(node);
1134 1134
1135 list_for_each_entry(cachep, &cache_chain, next) { 1135 list_for_each_entry(cachep, &cache_chain, next) {
@@ -1194,7 +1194,7 @@ static int __cpuinit cpuup_prepare(long cpu)
1194{ 1194{
1195 struct kmem_cache *cachep; 1195 struct kmem_cache *cachep;
1196 struct kmem_list3 *l3 = NULL; 1196 struct kmem_list3 *l3 = NULL;
1197 int node = cpu_to_node(cpu); 1197 int node = cpu_to_mem(cpu);
1198 int err; 1198 int err;
1199 1199
1200 /* 1200 /*
@@ -1321,7 +1321,7 @@ static int __cpuinit cpuup_callback(struct notifier_block *nfb,
1321 mutex_unlock(&cache_chain_mutex); 1321 mutex_unlock(&cache_chain_mutex);
1322 break; 1322 break;
1323 } 1323 }
1324 return err ? NOTIFY_BAD : NOTIFY_OK; 1324 return notifier_from_errno(err);
1325} 1325}
1326 1326
1327static struct notifier_block __cpuinitdata cpucache_notifier = { 1327static struct notifier_block __cpuinitdata cpucache_notifier = {
@@ -1479,7 +1479,7 @@ void __init kmem_cache_init(void)
1479 * 6) Resize the head arrays of the kmalloc caches to their final sizes. 1479 * 6) Resize the head arrays of the kmalloc caches to their final sizes.
1480 */ 1480 */
1481 1481
1482 node = numa_node_id(); 1482 node = numa_mem_id();
1483 1483
1484 /* 1) create the cache_cache */ 1484 /* 1) create the cache_cache */
1485 INIT_LIST_HEAD(&cache_chain); 1485 INIT_LIST_HEAD(&cache_chain);
@@ -2121,7 +2121,7 @@ static int __init_refok setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp)
2121 } 2121 }
2122 } 2122 }
2123 } 2123 }
2124 cachep->nodelists[numa_node_id()]->next_reap = 2124 cachep->nodelists[numa_mem_id()]->next_reap =
2125 jiffies + REAPTIMEOUT_LIST3 + 2125 jiffies + REAPTIMEOUT_LIST3 +
2126 ((unsigned long)cachep) % REAPTIMEOUT_LIST3; 2126 ((unsigned long)cachep) % REAPTIMEOUT_LIST3;
2127 2127
@@ -2452,7 +2452,7 @@ static void check_spinlock_acquired(struct kmem_cache *cachep)
2452{ 2452{
2453#ifdef CONFIG_SMP 2453#ifdef CONFIG_SMP
2454 check_irq_off(); 2454 check_irq_off();
2455 assert_spin_locked(&cachep->nodelists[numa_node_id()]->list_lock); 2455 assert_spin_locked(&cachep->nodelists[numa_mem_id()]->list_lock);
2456#endif 2456#endif
2457} 2457}
2458 2458
@@ -2479,7 +2479,7 @@ static void do_drain(void *arg)
2479{ 2479{
2480 struct kmem_cache *cachep = arg; 2480 struct kmem_cache *cachep = arg;
2481 struct array_cache *ac; 2481 struct array_cache *ac;
2482 int node = numa_node_id(); 2482 int node = numa_mem_id();
2483 2483
2484 check_irq_off(); 2484 check_irq_off();
2485 ac = cpu_cache_get(cachep); 2485 ac = cpu_cache_get(cachep);
@@ -3012,7 +3012,7 @@ static void *cache_alloc_refill(struct kmem_cache *cachep, gfp_t flags)
3012 3012
3013retry: 3013retry:
3014 check_irq_off(); 3014 check_irq_off();
3015 node = numa_node_id(); 3015 node = numa_mem_id();
3016 ac = cpu_cache_get(cachep); 3016 ac = cpu_cache_get(cachep);
3017 batchcount = ac->batchcount; 3017 batchcount = ac->batchcount;
3018 if (!ac->touched && batchcount > BATCHREFILL_LIMIT) { 3018 if (!ac->touched && batchcount > BATCHREFILL_LIMIT) {
@@ -3216,10 +3216,10 @@ static void *alternate_node_alloc(struct kmem_cache *cachep, gfp_t flags)
3216 3216
3217 if (in_interrupt() || (flags & __GFP_THISNODE)) 3217 if (in_interrupt() || (flags & __GFP_THISNODE))
3218 return NULL; 3218 return NULL;
3219 nid_alloc = nid_here = numa_node_id(); 3219 nid_alloc = nid_here = numa_mem_id();
3220 get_mems_allowed(); 3220 get_mems_allowed();
3221 if (cpuset_do_slab_mem_spread() && (cachep->flags & SLAB_MEM_SPREAD)) 3221 if (cpuset_do_slab_mem_spread() && (cachep->flags & SLAB_MEM_SPREAD))
3222 nid_alloc = cpuset_mem_spread_node(); 3222 nid_alloc = cpuset_slab_spread_node();
3223 else if (current->mempolicy) 3223 else if (current->mempolicy)
3224 nid_alloc = slab_node(current->mempolicy); 3224 nid_alloc = slab_node(current->mempolicy);
3225 put_mems_allowed(); 3225 put_mems_allowed();
@@ -3281,7 +3281,7 @@ retry:
3281 if (local_flags & __GFP_WAIT) 3281 if (local_flags & __GFP_WAIT)
3282 local_irq_enable(); 3282 local_irq_enable();
3283 kmem_flagcheck(cache, flags); 3283 kmem_flagcheck(cache, flags);
3284 obj = kmem_getpages(cache, local_flags, numa_node_id()); 3284 obj = kmem_getpages(cache, local_flags, numa_mem_id());
3285 if (local_flags & __GFP_WAIT) 3285 if (local_flags & __GFP_WAIT)
3286 local_irq_disable(); 3286 local_irq_disable();
3287 if (obj) { 3287 if (obj) {
@@ -3389,6 +3389,7 @@ __cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid,
3389{ 3389{
3390 unsigned long save_flags; 3390 unsigned long save_flags;
3391 void *ptr; 3391 void *ptr;
3392 int slab_node = numa_mem_id();
3392 3393
3393 flags &= gfp_allowed_mask; 3394 flags &= gfp_allowed_mask;
3394 3395
@@ -3401,7 +3402,7 @@ __cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid,
3401 local_irq_save(save_flags); 3402 local_irq_save(save_flags);
3402 3403
3403 if (nodeid == -1) 3404 if (nodeid == -1)
3404 nodeid = numa_node_id(); 3405 nodeid = slab_node;
3405 3406
3406 if (unlikely(!cachep->nodelists[nodeid])) { 3407 if (unlikely(!cachep->nodelists[nodeid])) {
3407 /* Node not bootstrapped yet */ 3408 /* Node not bootstrapped yet */
@@ -3409,7 +3410,7 @@ __cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid,
3409 goto out; 3410 goto out;
3410 } 3411 }
3411 3412
3412 if (nodeid == numa_node_id()) { 3413 if (nodeid == slab_node) {
3413 /* 3414 /*
3414 * Use the locally cached objects if possible. 3415 * Use the locally cached objects if possible.
3415 * However ____cache_alloc does not allow fallback 3416 * However ____cache_alloc does not allow fallback
@@ -3453,8 +3454,8 @@ __do_cache_alloc(struct kmem_cache *cache, gfp_t flags)
3453 * We may just have run out of memory on the local node. 3454 * We may just have run out of memory on the local node.
3454 * ____cache_alloc_node() knows how to locate memory on other nodes 3455 * ____cache_alloc_node() knows how to locate memory on other nodes
3455 */ 3456 */
3456 if (!objp) 3457 if (!objp)
3457 objp = ____cache_alloc_node(cache, flags, numa_node_id()); 3458 objp = ____cache_alloc_node(cache, flags, numa_mem_id());
3458 3459
3459 out: 3460 out:
3460 return objp; 3461 return objp;
@@ -3551,7 +3552,7 @@ static void cache_flusharray(struct kmem_cache *cachep, struct array_cache *ac)
3551{ 3552{
3552 int batchcount; 3553 int batchcount;
3553 struct kmem_list3 *l3; 3554 struct kmem_list3 *l3;
3554 int node = numa_node_id(); 3555 int node = numa_mem_id();
3555 3556
3556 batchcount = ac->batchcount; 3557 batchcount = ac->batchcount;
3557#if DEBUG 3558#if DEBUG
@@ -3985,7 +3986,7 @@ static int do_tune_cpucache(struct kmem_cache *cachep, int limit,
3985 return -ENOMEM; 3986 return -ENOMEM;
3986 3987
3987 for_each_online_cpu(i) { 3988 for_each_online_cpu(i) {
3988 new->new[i] = alloc_arraycache(cpu_to_node(i), limit, 3989 new->new[i] = alloc_arraycache(cpu_to_mem(i), limit,
3989 batchcount, gfp); 3990 batchcount, gfp);
3990 if (!new->new[i]) { 3991 if (!new->new[i]) {
3991 for (i--; i >= 0; i--) 3992 for (i--; i >= 0; i--)
@@ -4007,9 +4008,9 @@ static int do_tune_cpucache(struct kmem_cache *cachep, int limit,
4007 struct array_cache *ccold = new->new[i]; 4008 struct array_cache *ccold = new->new[i];
4008 if (!ccold) 4009 if (!ccold)
4009 continue; 4010 continue;
4010 spin_lock_irq(&cachep->nodelists[cpu_to_node(i)]->list_lock); 4011 spin_lock_irq(&cachep->nodelists[cpu_to_mem(i)]->list_lock);
4011 free_block(cachep, ccold->entry, ccold->avail, cpu_to_node(i)); 4012 free_block(cachep, ccold->entry, ccold->avail, cpu_to_mem(i));
4012 spin_unlock_irq(&cachep->nodelists[cpu_to_node(i)]->list_lock); 4013 spin_unlock_irq(&cachep->nodelists[cpu_to_mem(i)]->list_lock);
4013 kfree(ccold); 4014 kfree(ccold);
4014 } 4015 }
4015 kfree(new); 4016 kfree(new);
@@ -4115,7 +4116,7 @@ static void cache_reap(struct work_struct *w)
4115{ 4116{
4116 struct kmem_cache *searchp; 4117 struct kmem_cache *searchp;
4117 struct kmem_list3 *l3; 4118 struct kmem_list3 *l3;
4118 int node = numa_node_id(); 4119 int node = numa_mem_id();
4119 struct delayed_work *work = to_delayed_work(w); 4120 struct delayed_work *work = to_delayed_work(w);
4120 4121
4121 if (!mutex_trylock(&cache_chain_mutex)) 4122 if (!mutex_trylock(&cache_chain_mutex))
diff --git a/mm/slub.c b/mm/slub.c
index 26f0cb9cc584..578f68f3c51f 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -2137,7 +2137,7 @@ static void free_kmem_cache_nodes(struct kmem_cache *s)
2137 2137
2138 for_each_node_state(node, N_NORMAL_MEMORY) { 2138 for_each_node_state(node, N_NORMAL_MEMORY) {
2139 struct kmem_cache_node *n = s->node[node]; 2139 struct kmem_cache_node *n = s->node[node];
2140 if (n && n != &s->local_node) 2140 if (n)
2141 kmem_cache_free(kmalloc_caches, n); 2141 kmem_cache_free(kmalloc_caches, n);
2142 s->node[node] = NULL; 2142 s->node[node] = NULL;
2143 } 2143 }
@@ -2146,33 +2146,22 @@ static void free_kmem_cache_nodes(struct kmem_cache *s)
2146static int init_kmem_cache_nodes(struct kmem_cache *s, gfp_t gfpflags) 2146static int init_kmem_cache_nodes(struct kmem_cache *s, gfp_t gfpflags)
2147{ 2147{
2148 int node; 2148 int node;
2149 int local_node;
2150
2151 if (slab_state >= UP && (s < kmalloc_caches ||
2152 s >= kmalloc_caches + KMALLOC_CACHES))
2153 local_node = page_to_nid(virt_to_page(s));
2154 else
2155 local_node = 0;
2156 2149
2157 for_each_node_state(node, N_NORMAL_MEMORY) { 2150 for_each_node_state(node, N_NORMAL_MEMORY) {
2158 struct kmem_cache_node *n; 2151 struct kmem_cache_node *n;
2159 2152
2160 if (local_node == node) 2153 if (slab_state == DOWN) {
2161 n = &s->local_node; 2154 early_kmem_cache_node_alloc(gfpflags, node);
2162 else { 2155 continue;
2163 if (slab_state == DOWN) { 2156 }
2164 early_kmem_cache_node_alloc(gfpflags, node); 2157 n = kmem_cache_alloc_node(kmalloc_caches,
2165 continue; 2158 gfpflags, node);
2166 }
2167 n = kmem_cache_alloc_node(kmalloc_caches,
2168 gfpflags, node);
2169
2170 if (!n) {
2171 free_kmem_cache_nodes(s);
2172 return 0;
2173 }
2174 2159
2160 if (!n) {
2161 free_kmem_cache_nodes(s);
2162 return 0;
2175 } 2163 }
2164
2176 s->node[node] = n; 2165 s->node[node] = n;
2177 init_kmem_cache_node(n, s); 2166 init_kmem_cache_node(n, s);
2178 } 2167 }
diff --git a/mm/swap.c b/mm/swap.c
index 7cd60bf0a972..3ce7bc373a52 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -224,6 +224,7 @@ void __lru_cache_add(struct page *page, enum lru_list lru)
224 ____pagevec_lru_add(pvec, lru); 224 ____pagevec_lru_add(pvec, lru);
225 put_cpu_var(lru_add_pvecs); 225 put_cpu_var(lru_add_pvecs);
226} 226}
227EXPORT_SYMBOL(__lru_cache_add);
227 228
228/** 229/**
229 * lru_cache_add_lru - add a page to a page list 230 * lru_cache_add_lru - add a page to a page list
diff --git a/mm/truncate.c b/mm/truncate.c
index f42675a3615d..937571b8b233 100644
--- a/mm/truncate.c
+++ b/mm/truncate.c
@@ -548,18 +548,18 @@ EXPORT_SYMBOL(truncate_pagecache);
548 * NOTE! We have to be ready to update the memory sharing 548 * NOTE! We have to be ready to update the memory sharing
549 * between the file and the memory map for a potential last 549 * between the file and the memory map for a potential last
550 * incomplete page. Ugly, but necessary. 550 * incomplete page. Ugly, but necessary.
551 *
552 * This function is deprecated and simple_setsize or truncate_pagecache
553 * should be used instead.
551 */ 554 */
552int vmtruncate(struct inode *inode, loff_t offset) 555int vmtruncate(struct inode *inode, loff_t offset)
553{ 556{
554 loff_t oldsize;
555 int error; 557 int error;
556 558
557 error = inode_newsize_ok(inode, offset); 559 error = simple_setsize(inode, offset);
558 if (error) 560 if (error)
559 return error; 561 return error;
560 oldsize = inode->i_size; 562
561 i_size_write(inode, offset);
562 truncate_pagecache(inode, oldsize, offset);
563 if (inode->i_op->truncate) 563 if (inode->i_op->truncate)
564 inode->i_op->truncate(inode); 564 inode->i_op->truncate(inode);
565 565
diff --git a/net/core/datagram.c b/net/core/datagram.c
index e0097531417a..f5b6f43a4c2e 100644
--- a/net/core/datagram.c
+++ b/net/core/datagram.c
@@ -229,15 +229,17 @@ EXPORT_SYMBOL(skb_free_datagram);
229 229
230void skb_free_datagram_locked(struct sock *sk, struct sk_buff *skb) 230void skb_free_datagram_locked(struct sock *sk, struct sk_buff *skb)
231{ 231{
232 bool slow;
233
232 if (likely(atomic_read(&skb->users) == 1)) 234 if (likely(atomic_read(&skb->users) == 1))
233 smp_rmb(); 235 smp_rmb();
234 else if (likely(!atomic_dec_and_test(&skb->users))) 236 else if (likely(!atomic_dec_and_test(&skb->users)))
235 return; 237 return;
236 238
237 lock_sock_bh(sk); 239 slow = lock_sock_fast(sk);
238 skb_orphan(skb); 240 skb_orphan(skb);
239 sk_mem_reclaim_partial(sk); 241 sk_mem_reclaim_partial(sk);
240 unlock_sock_bh(sk); 242 unlock_sock_fast(sk, slow);
241 243
242 /* skb is now orphaned, can be freed outside of locked section */ 244 /* skb is now orphaned, can be freed outside of locked section */
243 __kfree_skb(skb); 245 __kfree_skb(skb);
diff --git a/net/core/drop_monitor.c b/net/core/drop_monitor.c
index cf208d8042b1..ad41529fb60f 100644
--- a/net/core/drop_monitor.c
+++ b/net/core/drop_monitor.c
@@ -172,12 +172,12 @@ out:
172 return; 172 return;
173} 173}
174 174
175static void trace_kfree_skb_hit(struct sk_buff *skb, void *location) 175static void trace_kfree_skb_hit(void *ignore, struct sk_buff *skb, void *location)
176{ 176{
177 trace_drop_common(skb, location); 177 trace_drop_common(skb, location);
178} 178}
179 179
180static void trace_napi_poll_hit(struct napi_struct *napi) 180static void trace_napi_poll_hit(void *ignore, struct napi_struct *napi)
181{ 181{
182 struct dm_hw_stat_delta *new_stat; 182 struct dm_hw_stat_delta *new_stat;
183 183
@@ -225,12 +225,12 @@ static int set_all_monitor_traces(int state)
225 225
226 switch (state) { 226 switch (state) {
227 case TRACE_ON: 227 case TRACE_ON:
228 rc |= register_trace_kfree_skb(trace_kfree_skb_hit); 228 rc |= register_trace_kfree_skb(trace_kfree_skb_hit, NULL);
229 rc |= register_trace_napi_poll(trace_napi_poll_hit); 229 rc |= register_trace_napi_poll(trace_napi_poll_hit, NULL);
230 break; 230 break;
231 case TRACE_OFF: 231 case TRACE_OFF:
232 rc |= unregister_trace_kfree_skb(trace_kfree_skb_hit); 232 rc |= unregister_trace_kfree_skb(trace_kfree_skb_hit, NULL);
233 rc |= unregister_trace_napi_poll(trace_napi_poll_hit); 233 rc |= unregister_trace_napi_poll(trace_napi_poll_hit, NULL);
234 234
235 tracepoint_synchronize_unregister(); 235 tracepoint_synchronize_unregister();
236 236
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index bff37908bd55..6ba1c0eece03 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -934,6 +934,7 @@ int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb)
934 kfree_skb(buff); 934 kfree_skb(buff);
935 NEIGH_CACHE_STAT_INC(neigh->tbl, unres_discards); 935 NEIGH_CACHE_STAT_INC(neigh->tbl, unres_discards);
936 } 936 }
937 skb_dst_force(skb);
937 __skb_queue_tail(&neigh->arp_queue, skb); 938 __skb_queue_tail(&neigh->arp_queue, skb);
938 } 939 }
939 rc = 1; 940 rc = 1;
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 7ab86f3a1ea4..1a2af24e9e3d 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -650,11 +650,12 @@ static inline int rtnl_vfinfo_size(const struct net_device *dev)
650 if (dev->dev.parent && dev_is_pci(dev->dev.parent)) { 650 if (dev->dev.parent && dev_is_pci(dev->dev.parent)) {
651 651
652 int num_vfs = dev_num_vf(dev->dev.parent); 652 int num_vfs = dev_num_vf(dev->dev.parent);
653 size_t size = nlmsg_total_size(sizeof(struct nlattr)); 653 size_t size = nla_total_size(sizeof(struct nlattr));
654 size += nlmsg_total_size(num_vfs * sizeof(struct nlattr)); 654 size += nla_total_size(num_vfs * sizeof(struct nlattr));
655 size += num_vfs * (sizeof(struct ifla_vf_mac) + 655 size += num_vfs *
656 sizeof(struct ifla_vf_vlan) + 656 (nla_total_size(sizeof(struct ifla_vf_mac)) +
657 sizeof(struct ifla_vf_tx_rate)); 657 nla_total_size(sizeof(struct ifla_vf_vlan)) +
658 nla_total_size(sizeof(struct ifla_vf_tx_rate)));
658 return size; 659 return size;
659 } else 660 } else
660 return 0; 661 return 0;
@@ -722,14 +723,13 @@ static int rtnl_vf_ports_fill(struct sk_buff *skb, struct net_device *dev)
722 723
723 for (vf = 0; vf < dev_num_vf(dev->dev.parent); vf++) { 724 for (vf = 0; vf < dev_num_vf(dev->dev.parent); vf++) {
724 vf_port = nla_nest_start(skb, IFLA_VF_PORT); 725 vf_port = nla_nest_start(skb, IFLA_VF_PORT);
725 if (!vf_port) { 726 if (!vf_port)
726 nla_nest_cancel(skb, vf_ports); 727 goto nla_put_failure;
727 return -EMSGSIZE;
728 }
729 NLA_PUT_U32(skb, IFLA_PORT_VF, vf); 728 NLA_PUT_U32(skb, IFLA_PORT_VF, vf);
730 err = dev->netdev_ops->ndo_get_vf_port(dev, vf, skb); 729 err = dev->netdev_ops->ndo_get_vf_port(dev, vf, skb);
730 if (err == -EMSGSIZE)
731 goto nla_put_failure;
731 if (err) { 732 if (err) {
732nla_put_failure:
733 nla_nest_cancel(skb, vf_port); 733 nla_nest_cancel(skb, vf_port);
734 continue; 734 continue;
735 } 735 }
@@ -739,6 +739,10 @@ nla_put_failure:
739 nla_nest_end(skb, vf_ports); 739 nla_nest_end(skb, vf_ports);
740 740
741 return 0; 741 return 0;
742
743nla_put_failure:
744 nla_nest_cancel(skb, vf_ports);
745 return -EMSGSIZE;
742} 746}
743 747
744static int rtnl_port_self_fill(struct sk_buff *skb, struct net_device *dev) 748static int rtnl_port_self_fill(struct sk_buff *skb, struct net_device *dev)
@@ -753,7 +757,7 @@ static int rtnl_port_self_fill(struct sk_buff *skb, struct net_device *dev)
753 err = dev->netdev_ops->ndo_get_vf_port(dev, PORT_SELF_VF, skb); 757 err = dev->netdev_ops->ndo_get_vf_port(dev, PORT_SELF_VF, skb);
754 if (err) { 758 if (err) {
755 nla_nest_cancel(skb, port_self); 759 nla_nest_cancel(skb, port_self);
756 return err; 760 return (err == -EMSGSIZE) ? err : 0;
757 } 761 }
758 762
759 nla_nest_end(skb, port_self); 763 nla_nest_end(skb, port_self);
diff --git a/net/core/sock.c b/net/core/sock.c
index 37fe9b6adade..2cf7f9f7e775 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -2007,6 +2007,39 @@ void release_sock(struct sock *sk)
2007} 2007}
2008EXPORT_SYMBOL(release_sock); 2008EXPORT_SYMBOL(release_sock);
2009 2009
2010/**
2011 * lock_sock_fast - fast version of lock_sock
2012 * @sk: socket
2013 *
2014 * This version should be used for very small section, where process wont block
2015 * return false if fast path is taken
2016 * sk_lock.slock locked, owned = 0, BH disabled
2017 * return true if slow path is taken
2018 * sk_lock.slock unlocked, owned = 1, BH enabled
2019 */
2020bool lock_sock_fast(struct sock *sk)
2021{
2022 might_sleep();
2023 spin_lock_bh(&sk->sk_lock.slock);
2024
2025 if (!sk->sk_lock.owned)
2026 /*
2027 * Note : We must disable BH
2028 */
2029 return false;
2030
2031 __lock_sock(sk);
2032 sk->sk_lock.owned = 1;
2033 spin_unlock(&sk->sk_lock.slock);
2034 /*
2035 * The sk_lock has mutex_lock() semantics here:
2036 */
2037 mutex_acquire(&sk->sk_lock.dep_map, 0, 0, _RET_IP_);
2038 local_bh_enable();
2039 return true;
2040}
2041EXPORT_SYMBOL(lock_sock_fast);
2042
2010int sock_get_timestamp(struct sock *sk, struct timeval __user *userstamp) 2043int sock_get_timestamp(struct sock *sk, struct timeval __user *userstamp)
2011{ 2044{
2012 struct timeval tv; 2045 struct timeval tv;
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index 45889103b3e2..856123fe32f9 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -1911,7 +1911,7 @@ static int __ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
1911 struct rtattr *mp_head; 1911 struct rtattr *mp_head;
1912 1912
1913 /* If cache is unresolved, don't try to parse IIF and OIF */ 1913 /* If cache is unresolved, don't try to parse IIF and OIF */
1914 if (c->mfc_parent > MAXVIFS) 1914 if (c->mfc_parent >= MAXVIFS)
1915 return -ENOENT; 1915 return -ENOENT;
1916 1916
1917 if (VIF_EXISTS(mrt, c->mfc_parent)) 1917 if (VIF_EXISTS(mrt, c->mfc_parent))
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index baeec29fe0f1..58585748bdac 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -1063,10 +1063,11 @@ static unsigned int first_packet_length(struct sock *sk)
1063 spin_unlock_bh(&rcvq->lock); 1063 spin_unlock_bh(&rcvq->lock);
1064 1064
1065 if (!skb_queue_empty(&list_kill)) { 1065 if (!skb_queue_empty(&list_kill)) {
1066 lock_sock_bh(sk); 1066 bool slow = lock_sock_fast(sk);
1067
1067 __skb_queue_purge(&list_kill); 1068 __skb_queue_purge(&list_kill);
1068 sk_mem_reclaim_partial(sk); 1069 sk_mem_reclaim_partial(sk);
1069 unlock_sock_bh(sk); 1070 unlock_sock_fast(sk, slow);
1070 } 1071 }
1071 return res; 1072 return res;
1072} 1073}
@@ -1123,6 +1124,7 @@ int udp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
1123 int peeked; 1124 int peeked;
1124 int err; 1125 int err;
1125 int is_udplite = IS_UDPLITE(sk); 1126 int is_udplite = IS_UDPLITE(sk);
1127 bool slow;
1126 1128
1127 /* 1129 /*
1128 * Check any passed addresses 1130 * Check any passed addresses
@@ -1197,10 +1199,10 @@ out:
1197 return err; 1199 return err;
1198 1200
1199csum_copy_err: 1201csum_copy_err:
1200 lock_sock_bh(sk); 1202 slow = lock_sock_fast(sk);
1201 if (!skb_kill_datagram(sk, skb, flags)) 1203 if (!skb_kill_datagram(sk, skb, flags))
1202 UDP_INC_STATS_USER(sock_net(sk), UDP_MIB_INERRORS, is_udplite); 1204 UDP_INC_STATS_USER(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
1203 unlock_sock_bh(sk); 1205 unlock_sock_fast(sk, slow);
1204 1206
1205 if (noblock) 1207 if (noblock)
1206 return -EAGAIN; 1208 return -EAGAIN;
@@ -1625,9 +1627,9 @@ int udp_rcv(struct sk_buff *skb)
1625 1627
1626void udp_destroy_sock(struct sock *sk) 1628void udp_destroy_sock(struct sock *sk)
1627{ 1629{
1628 lock_sock_bh(sk); 1630 bool slow = lock_sock_fast(sk);
1629 udp_flush_pending_frames(sk); 1631 udp_flush_pending_frames(sk);
1630 unlock_sock_bh(sk); 1632 unlock_sock_fast(sk, slow);
1631} 1633}
1632 1634
1633/* 1635/*
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index cd963f64e27c..89425af0684c 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -507,7 +507,7 @@ int ip6_forward(struct sk_buff *skb)
507 if (mtu < IPV6_MIN_MTU) 507 if (mtu < IPV6_MIN_MTU)
508 mtu = IPV6_MIN_MTU; 508 mtu = IPV6_MIN_MTU;
509 509
510 if (skb->len > mtu) { 510 if (skb->len > mtu && !skb_is_gso(skb)) {
511 /* Again, force OUTPUT device used as source address */ 511 /* Again, force OUTPUT device used as source address */
512 skb->dev = dst->dev; 512 skb->dev = dst->dev;
513 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); 513 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
index bd9e7d3e9c8e..073071f2b75b 100644
--- a/net/ipv6/ip6mr.c
+++ b/net/ipv6/ip6mr.c
@@ -2017,7 +2017,7 @@ static int __ip6mr_fill_mroute(struct mr6_table *mrt, struct sk_buff *skb,
2017 struct rtattr *mp_head; 2017 struct rtattr *mp_head;
2018 2018
2019 /* If cache is unresolved, don't try to parse IIF and OIF */ 2019 /* If cache is unresolved, don't try to parse IIF and OIF */
2020 if (c->mf6c_parent > MAXMIFS) 2020 if (c->mf6c_parent >= MAXMIFS)
2021 return -ENOENT; 2021 return -ENOENT;
2022 2022
2023 if (MIF_EXISTS(mrt, c->mf6c_parent)) 2023 if (MIF_EXISTS(mrt, c->mf6c_parent))
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 3d7a2c0b836a..87be58673b55 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -328,6 +328,7 @@ int udpv6_recvmsg(struct kiocb *iocb, struct sock *sk,
328 int err; 328 int err;
329 int is_udplite = IS_UDPLITE(sk); 329 int is_udplite = IS_UDPLITE(sk);
330 int is_udp4; 330 int is_udp4;
331 bool slow;
331 332
332 if (addr_len) 333 if (addr_len)
333 *addr_len=sizeof(struct sockaddr_in6); 334 *addr_len=sizeof(struct sockaddr_in6);
@@ -424,7 +425,7 @@ out:
424 return err; 425 return err;
425 426
426csum_copy_err: 427csum_copy_err:
427 lock_sock_bh(sk); 428 slow = lock_sock_fast(sk);
428 if (!skb_kill_datagram(sk, skb, flags)) { 429 if (!skb_kill_datagram(sk, skb, flags)) {
429 if (is_udp4) 430 if (is_udp4)
430 UDP_INC_STATS_USER(sock_net(sk), 431 UDP_INC_STATS_USER(sock_net(sk),
@@ -433,7 +434,7 @@ csum_copy_err:
433 UDP6_INC_STATS_USER(sock_net(sk), 434 UDP6_INC_STATS_USER(sock_net(sk),
434 UDP_MIB_INERRORS, is_udplite); 435 UDP_MIB_INERRORS, is_udplite);
435 } 436 }
436 unlock_sock_bh(sk); 437 unlock_sock_fast(sk, slow);
437 438
438 if (flags & MSG_DONTWAIT) 439 if (flags & MSG_DONTWAIT)
439 return -EAGAIN; 440 return -EAGAIN;
diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
index c8b4599a752e..9637e45744fa 100644
--- a/net/iucv/af_iucv.c
+++ b/net/iucv/af_iucv.c
@@ -1619,7 +1619,7 @@ static void iucv_callback_rx(struct iucv_path *path, struct iucv_message *msg)
1619save_message: 1619save_message:
1620 save_msg = kzalloc(sizeof(struct sock_msg_q), GFP_ATOMIC | GFP_DMA); 1620 save_msg = kzalloc(sizeof(struct sock_msg_q), GFP_ATOMIC | GFP_DMA);
1621 if (!save_msg) 1621 if (!save_msg)
1622 return; 1622 goto out_unlock;
1623 save_msg->path = path; 1623 save_msg->path = path;
1624 save_msg->msg = *msg; 1624 save_msg->msg = *msg;
1625 1625
diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c
index fd8b28361a64..f28ad2cc8428 100644
--- a/net/iucv/iucv.c
+++ b/net/iucv/iucv.c
@@ -632,13 +632,14 @@ static int __cpuinit iucv_cpu_notify(struct notifier_block *self,
632 iucv_irq_data[cpu] = kmalloc_node(sizeof(struct iucv_irq_data), 632 iucv_irq_data[cpu] = kmalloc_node(sizeof(struct iucv_irq_data),
633 GFP_KERNEL|GFP_DMA, cpu_to_node(cpu)); 633 GFP_KERNEL|GFP_DMA, cpu_to_node(cpu));
634 if (!iucv_irq_data[cpu]) 634 if (!iucv_irq_data[cpu])
635 return NOTIFY_BAD; 635 return notifier_from_errno(-ENOMEM);
636
636 iucv_param[cpu] = kmalloc_node(sizeof(union iucv_param), 637 iucv_param[cpu] = kmalloc_node(sizeof(union iucv_param),
637 GFP_KERNEL|GFP_DMA, cpu_to_node(cpu)); 638 GFP_KERNEL|GFP_DMA, cpu_to_node(cpu));
638 if (!iucv_param[cpu]) { 639 if (!iucv_param[cpu]) {
639 kfree(iucv_irq_data[cpu]); 640 kfree(iucv_irq_data[cpu]);
640 iucv_irq_data[cpu] = NULL; 641 iucv_irq_data[cpu] = NULL;
641 return NOTIFY_BAD; 642 return notifier_from_errno(-ENOMEM);
642 } 643 }
643 iucv_param_irq[cpu] = kmalloc_node(sizeof(union iucv_param), 644 iucv_param_irq[cpu] = kmalloc_node(sizeof(union iucv_param),
644 GFP_KERNEL|GFP_DMA, cpu_to_node(cpu)); 645 GFP_KERNEL|GFP_DMA, cpu_to_node(cpu));
@@ -647,7 +648,7 @@ static int __cpuinit iucv_cpu_notify(struct notifier_block *self,
647 iucv_param[cpu] = NULL; 648 iucv_param[cpu] = NULL;
648 kfree(iucv_irq_data[cpu]); 649 kfree(iucv_irq_data[cpu]);
649 iucv_irq_data[cpu] = NULL; 650 iucv_irq_data[cpu] = NULL;
650 return NOTIFY_BAD; 651 return notifier_from_errno(-ENOMEM);
651 } 652 }
652 break; 653 break;
653 case CPU_UP_CANCELED: 654 case CPU_UP_CANCELED:
@@ -677,7 +678,7 @@ static int __cpuinit iucv_cpu_notify(struct notifier_block *self,
677 cpu_clear(cpu, cpumask); 678 cpu_clear(cpu, cpumask);
678 if (cpus_empty(cpumask)) 679 if (cpus_empty(cpumask))
679 /* Can't offline last IUCV enabled cpu. */ 680 /* Can't offline last IUCV enabled cpu. */
680 return NOTIFY_BAD; 681 return notifier_from_errno(-EINVAL);
681 smp_call_function_single(cpu, iucv_retrieve_cpu, NULL, 1); 682 smp_call_function_single(cpu, iucv_retrieve_cpu, NULL, 1);
682 if (cpus_empty(iucv_irq_cpumask)) 683 if (cpus_empty(iucv_irq_cpumask))
683 smp_call_function_single(first_cpu(iucv_buffer_cpumask), 684 smp_call_function_single(first_cpu(iucv_buffer_cpumask),
diff --git a/net/netfilter/xt_TEE.c b/net/netfilter/xt_TEE.c
index d7920d9f49e9..859d9fd429c8 100644
--- a/net/netfilter/xt_TEE.c
+++ b/net/netfilter/xt_TEE.c
@@ -76,7 +76,7 @@ tee_tg_route4(struct sk_buff *skb, const struct xt_tee_tginfo *info)
76 if (ip_route_output_key(net, &rt, &fl) != 0) 76 if (ip_route_output_key(net, &rt, &fl) != 0)
77 return false; 77 return false;
78 78
79 dst_release(skb_dst(skb)); 79 skb_dst_drop(skb);
80 skb_dst_set(skb, &rt->u.dst); 80 skb_dst_set(skb, &rt->u.dst);
81 skb->dev = rt->u.dst.dev; 81 skb->dev = rt->u.dst.dev;
82 skb->protocol = htons(ETH_P_IP); 82 skb->protocol = htons(ETH_P_IP);
@@ -157,7 +157,7 @@ tee_tg_route6(struct sk_buff *skb, const struct xt_tee_tginfo *info)
157 if (dst == NULL) 157 if (dst == NULL)
158 return false; 158 return false;
159 159
160 dst_release(skb_dst(skb)); 160 skb_dst_drop(skb);
161 skb_dst_set(skb, dst); 161 skb_dst_set(skb, dst);
162 skb->dev = dst->dev; 162 skb->dev = dst->dev;
163 skb->protocol = htons(ETH_P_IPV6); 163 skb->protocol = htons(ETH_P_IPV6);
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index b7cd8cccbe72..2a9675136c68 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -2293,6 +2293,7 @@ static struct rpc_xprt *xs_setup_udp(struct xprt_create *args)
2293 struct sockaddr *addr = args->dstaddr; 2293 struct sockaddr *addr = args->dstaddr;
2294 struct rpc_xprt *xprt; 2294 struct rpc_xprt *xprt;
2295 struct sock_xprt *transport; 2295 struct sock_xprt *transport;
2296 struct rpc_xprt *ret;
2296 2297
2297 xprt = xs_setup_xprt(args, xprt_udp_slot_table_entries); 2298 xprt = xs_setup_xprt(args, xprt_udp_slot_table_entries);
2298 if (IS_ERR(xprt)) 2299 if (IS_ERR(xprt))
@@ -2330,8 +2331,8 @@ static struct rpc_xprt *xs_setup_udp(struct xprt_create *args)
2330 xs_format_peer_addresses(xprt, "udp", RPCBIND_NETID_UDP6); 2331 xs_format_peer_addresses(xprt, "udp", RPCBIND_NETID_UDP6);
2331 break; 2332 break;
2332 default: 2333 default:
2333 kfree(xprt); 2334 ret = ERR_PTR(-EAFNOSUPPORT);
2334 return ERR_PTR(-EAFNOSUPPORT); 2335 goto out_err;
2335 } 2336 }
2336 2337
2337 if (xprt_bound(xprt)) 2338 if (xprt_bound(xprt))
@@ -2346,10 +2347,11 @@ static struct rpc_xprt *xs_setup_udp(struct xprt_create *args)
2346 2347
2347 if (try_module_get(THIS_MODULE)) 2348 if (try_module_get(THIS_MODULE))
2348 return xprt; 2349 return xprt;
2349 2350 ret = ERR_PTR(-EINVAL);
2351out_err:
2350 kfree(xprt->slot); 2352 kfree(xprt->slot);
2351 kfree(xprt); 2353 kfree(xprt);
2352 return ERR_PTR(-EINVAL); 2354 return ret;
2353} 2355}
2354 2356
2355static const struct rpc_timeout xs_tcp_default_timeout = { 2357static const struct rpc_timeout xs_tcp_default_timeout = {
@@ -2368,6 +2370,7 @@ static struct rpc_xprt *xs_setup_tcp(struct xprt_create *args)
2368 struct sockaddr *addr = args->dstaddr; 2370 struct sockaddr *addr = args->dstaddr;
2369 struct rpc_xprt *xprt; 2371 struct rpc_xprt *xprt;
2370 struct sock_xprt *transport; 2372 struct sock_xprt *transport;
2373 struct rpc_xprt *ret;
2371 2374
2372 xprt = xs_setup_xprt(args, xprt_tcp_slot_table_entries); 2375 xprt = xs_setup_xprt(args, xprt_tcp_slot_table_entries);
2373 if (IS_ERR(xprt)) 2376 if (IS_ERR(xprt))
@@ -2403,8 +2406,8 @@ static struct rpc_xprt *xs_setup_tcp(struct xprt_create *args)
2403 xs_format_peer_addresses(xprt, "tcp", RPCBIND_NETID_TCP6); 2406 xs_format_peer_addresses(xprt, "tcp", RPCBIND_NETID_TCP6);
2404 break; 2407 break;
2405 default: 2408 default:
2406 kfree(xprt); 2409 ret = ERR_PTR(-EAFNOSUPPORT);
2407 return ERR_PTR(-EAFNOSUPPORT); 2410 goto out_err;
2408 } 2411 }
2409 2412
2410 if (xprt_bound(xprt)) 2413 if (xprt_bound(xprt))
@@ -2420,10 +2423,11 @@ static struct rpc_xprt *xs_setup_tcp(struct xprt_create *args)
2420 2423
2421 if (try_module_get(THIS_MODULE)) 2424 if (try_module_get(THIS_MODULE))
2422 return xprt; 2425 return xprt;
2423 2426 ret = ERR_PTR(-EINVAL);
2427out_err:
2424 kfree(xprt->slot); 2428 kfree(xprt->slot);
2425 kfree(xprt); 2429 kfree(xprt);
2426 return ERR_PTR(-EINVAL); 2430 return ret;
2427} 2431}
2428 2432
2429/** 2433/**
@@ -2437,6 +2441,7 @@ static struct rpc_xprt *xs_setup_bc_tcp(struct xprt_create *args)
2437 struct rpc_xprt *xprt; 2441 struct rpc_xprt *xprt;
2438 struct sock_xprt *transport; 2442 struct sock_xprt *transport;
2439 struct svc_sock *bc_sock; 2443 struct svc_sock *bc_sock;
2444 struct rpc_xprt *ret;
2440 2445
2441 xprt = xs_setup_xprt(args, xprt_tcp_slot_table_entries); 2446 xprt = xs_setup_xprt(args, xprt_tcp_slot_table_entries);
2442 if (IS_ERR(xprt)) 2447 if (IS_ERR(xprt))
@@ -2476,8 +2481,8 @@ static struct rpc_xprt *xs_setup_bc_tcp(struct xprt_create *args)
2476 RPCBIND_NETID_TCP6); 2481 RPCBIND_NETID_TCP6);
2477 break; 2482 break;
2478 default: 2483 default:
2479 kfree(xprt); 2484 ret = ERR_PTR(-EAFNOSUPPORT);
2480 return ERR_PTR(-EAFNOSUPPORT); 2485 goto out_err;
2481 } 2486 }
2482 2487
2483 if (xprt_bound(xprt)) 2488 if (xprt_bound(xprt))
@@ -2499,9 +2504,11 @@ static struct rpc_xprt *xs_setup_bc_tcp(struct xprt_create *args)
2499 2504
2500 if (try_module_get(THIS_MODULE)) 2505 if (try_module_get(THIS_MODULE))
2501 return xprt; 2506 return xprt;
2507 ret = ERR_PTR(-EINVAL);
2508out_err:
2502 kfree(xprt->slot); 2509 kfree(xprt->slot);
2503 kfree(xprt); 2510 kfree(xprt);
2504 return ERR_PTR(-EINVAL); 2511 return ret;
2505} 2512}
2506 2513
2507static struct xprt_class xs_udp_transport = { 2514static struct xprt_class xs_udp_transport = {
diff --git a/samples/tracepoints/tp-samples-trace.h b/samples/tracepoints/tp-samples-trace.h
index dffdc49878af..4d46be965961 100644
--- a/samples/tracepoints/tp-samples-trace.h
+++ b/samples/tracepoints/tp-samples-trace.h
@@ -7,7 +7,5 @@
7DECLARE_TRACE(subsys_event, 7DECLARE_TRACE(subsys_event,
8 TP_PROTO(struct inode *inode, struct file *file), 8 TP_PROTO(struct inode *inode, struct file *file),
9 TP_ARGS(inode, file)); 9 TP_ARGS(inode, file));
10DECLARE_TRACE(subsys_eventb, 10DECLARE_TRACE_NOARGS(subsys_eventb);
11 TP_PROTO(void),
12 TP_ARGS());
13#endif 11#endif
diff --git a/samples/tracepoints/tracepoint-probe-sample.c b/samples/tracepoints/tracepoint-probe-sample.c
index 9e60eb6ca2d8..744c0b9652a7 100644
--- a/samples/tracepoints/tracepoint-probe-sample.c
+++ b/samples/tracepoints/tracepoint-probe-sample.c
@@ -13,7 +13,8 @@
13 * Here the caller only guarantees locking for struct file and struct inode. 13 * Here the caller only guarantees locking for struct file and struct inode.
14 * Locking must therefore be done in the probe to use the dentry. 14 * Locking must therefore be done in the probe to use the dentry.
15 */ 15 */
16static void probe_subsys_event(struct inode *inode, struct file *file) 16static void probe_subsys_event(void *ignore,
17 struct inode *inode, struct file *file)
17{ 18{
18 path_get(&file->f_path); 19 path_get(&file->f_path);
19 dget(file->f_path.dentry); 20 dget(file->f_path.dentry);
@@ -23,7 +24,7 @@ static void probe_subsys_event(struct inode *inode, struct file *file)
23 path_put(&file->f_path); 24 path_put(&file->f_path);
24} 25}
25 26
26static void probe_subsys_eventb(void) 27static void probe_subsys_eventb(void *ignore)
27{ 28{
28 printk(KERN_INFO "Event B is encountered\n"); 29 printk(KERN_INFO "Event B is encountered\n");
29} 30}
@@ -32,9 +33,9 @@ static int __init tp_sample_trace_init(void)
32{ 33{
33 int ret; 34 int ret;
34 35
35 ret = register_trace_subsys_event(probe_subsys_event); 36 ret = register_trace_subsys_event(probe_subsys_event, NULL);
36 WARN_ON(ret); 37 WARN_ON(ret);
37 ret = register_trace_subsys_eventb(probe_subsys_eventb); 38 ret = register_trace_subsys_eventb(probe_subsys_eventb, NULL);
38 WARN_ON(ret); 39 WARN_ON(ret);
39 40
40 return 0; 41 return 0;
@@ -44,8 +45,8 @@ module_init(tp_sample_trace_init);
44 45
45static void __exit tp_sample_trace_exit(void) 46static void __exit tp_sample_trace_exit(void)
46{ 47{
47 unregister_trace_subsys_eventb(probe_subsys_eventb); 48 unregister_trace_subsys_eventb(probe_subsys_eventb, NULL);
48 unregister_trace_subsys_event(probe_subsys_event); 49 unregister_trace_subsys_event(probe_subsys_event, NULL);
49 tracepoint_synchronize_unregister(); 50 tracepoint_synchronize_unregister();
50} 51}
51 52
diff --git a/samples/tracepoints/tracepoint-probe-sample2.c b/samples/tracepoints/tracepoint-probe-sample2.c
index be2a960573f1..9fcf990e5d4b 100644
--- a/samples/tracepoints/tracepoint-probe-sample2.c
+++ b/samples/tracepoints/tracepoint-probe-sample2.c
@@ -12,7 +12,8 @@
12 * Here the caller only guarantees locking for struct file and struct inode. 12 * Here the caller only guarantees locking for struct file and struct inode.
13 * Locking must therefore be done in the probe to use the dentry. 13 * Locking must therefore be done in the probe to use the dentry.
14 */ 14 */
15static void probe_subsys_event(struct inode *inode, struct file *file) 15static void probe_subsys_event(void *ignore,
16 struct inode *inode, struct file *file)
16{ 17{
17 printk(KERN_INFO "Event is encountered with inode number %lu\n", 18 printk(KERN_INFO "Event is encountered with inode number %lu\n",
18 inode->i_ino); 19 inode->i_ino);
@@ -22,7 +23,7 @@ static int __init tp_sample_trace_init(void)
22{ 23{
23 int ret; 24 int ret;
24 25
25 ret = register_trace_subsys_event(probe_subsys_event); 26 ret = register_trace_subsys_event(probe_subsys_event, NULL);
26 WARN_ON(ret); 27 WARN_ON(ret);
27 28
28 return 0; 29 return 0;
@@ -32,7 +33,7 @@ module_init(tp_sample_trace_init);
32 33
33static void __exit tp_sample_trace_exit(void) 34static void __exit tp_sample_trace_exit(void)
34{ 35{
35 unregister_trace_subsys_event(probe_subsys_event); 36 unregister_trace_subsys_event(probe_subsys_event, NULL);
36 tracepoint_synchronize_unregister(); 37 tracepoint_synchronize_unregister();
37} 38}
38 39
diff --git a/scripts/gen_initramfs_list.sh b/scripts/gen_initramfs_list.sh
index 76af5f9623e3..a932ae52f921 100644
--- a/scripts/gen_initramfs_list.sh
+++ b/scripts/gen_initramfs_list.sh
@@ -242,6 +242,7 @@ case "$arg" in
242 echo "$output_file" | grep -q "\.gz$" && compr="gzip -9 -f" 242 echo "$output_file" | grep -q "\.gz$" && compr="gzip -9 -f"
243 echo "$output_file" | grep -q "\.bz2$" && compr="bzip2 -9 -f" 243 echo "$output_file" | grep -q "\.bz2$" && compr="bzip2 -9 -f"
244 echo "$output_file" | grep -q "\.lzma$" && compr="lzma -9 -f" 244 echo "$output_file" | grep -q "\.lzma$" && compr="lzma -9 -f"
245 echo "$output_file" | grep -q "\.lzo$" && compr="lzop -9 -f"
245 echo "$output_file" | grep -q "\.cpio$" && compr="cat" 246 echo "$output_file" | grep -q "\.cpio$" && compr="cat"
246 shift 247 shift
247 ;; 248 ;;
diff --git a/scripts/kconfig/streamline_config.pl b/scripts/kconfig/streamline_config.pl
index afbd54ac1d83..c70a27d924f0 100644
--- a/scripts/kconfig/streamline_config.pl
+++ b/scripts/kconfig/streamline_config.pl
@@ -115,7 +115,9 @@ my $ksource = $ARGV[0];
115my $kconfig = $ARGV[1]; 115my $kconfig = $ARGV[1];
116my $lsmod_file = $ARGV[2]; 116my $lsmod_file = $ARGV[2];
117 117
118my @makefiles = `find $ksource -name Makefile`; 118my @makefiles = `find $ksource -name Makefile 2>/dev/null`;
119chomp @makefiles;
120
119my %depends; 121my %depends;
120my %selects; 122my %selects;
121my %prompts; 123my %prompts;
@@ -215,7 +217,6 @@ if ($kconfig) {
215 217
216# Read all Makefiles to map the configs to the objects 218# Read all Makefiles to map the configs to the objects
217foreach my $makefile (@makefiles) { 219foreach my $makefile (@makefiles) {
218 chomp $makefile;
219 220
220 open(MIN,$makefile) || die "Can't open $makefile"; 221 open(MIN,$makefile) || die "Can't open $makefile";
221 while (<MIN>) { 222 while (<MIN>) {
@@ -242,7 +243,7 @@ foreach my $makefile (@makefiles) {
242 foreach my $obj (split /\s+/,$objs) { 243 foreach my $obj (split /\s+/,$objs) {
243 $obj =~ s/-/_/g; 244 $obj =~ s/-/_/g;
244 if ($obj =~ /(.*)\.o$/) { 245 if ($obj =~ /(.*)\.o$/) {
245 # Objects may bes enabled by more than one config. 246 # Objects may be enabled by more than one config.
246 # Store configs in an array. 247 # Store configs in an array.
247 my @arr; 248 my @arr;
248 249
@@ -307,7 +308,7 @@ close (LIN);
307my %configs; 308my %configs;
308foreach my $module (keys(%modules)) { 309foreach my $module (keys(%modules)) {
309 if (defined($objects{$module})) { 310 if (defined($objects{$module})) {
310 @arr = @{$objects{$module}}; 311 my @arr = @{$objects{$module}};
311 foreach my $conf (@arr) { 312 foreach my $conf (@arr) {
312 $configs{$conf} = $module; 313 $configs{$conf} = $module;
313 } 314 }
diff --git a/security/keys/internal.h b/security/keys/internal.h
index 5d4402a1161a..38783dcf6c61 100644
--- a/security/keys/internal.h
+++ b/security/keys/internal.h
@@ -124,6 +124,7 @@ extern struct key *find_keyring_by_name(const char *name, bool skip_perm_check);
124extern int install_user_keyrings(void); 124extern int install_user_keyrings(void);
125extern int install_thread_keyring_to_cred(struct cred *); 125extern int install_thread_keyring_to_cred(struct cred *);
126extern int install_process_keyring_to_cred(struct cred *); 126extern int install_process_keyring_to_cred(struct cred *);
127extern int install_session_keyring_to_cred(struct cred *, struct key *);
127 128
128extern struct key *request_key_and_link(struct key_type *type, 129extern struct key *request_key_and_link(struct key_type *type,
129 const char *description, 130 const char *description,
diff --git a/security/keys/keyctl.c b/security/keys/keyctl.c
index 8f4dce1987c4..13074b454743 100644
--- a/security/keys/keyctl.c
+++ b/security/keys/keyctl.c
@@ -1269,7 +1269,7 @@ long keyctl_session_to_parent(void)
1269 goto not_permitted; 1269 goto not_permitted;
1270 1270
1271 /* the parent must be single threaded */ 1271 /* the parent must be single threaded */
1272 if (atomic_read(&parent->signal->count) != 1) 1272 if (!thread_group_empty(parent))
1273 goto not_permitted; 1273 goto not_permitted;
1274 1274
1275 /* the parent and the child must have different session keyrings or 1275 /* the parent and the child must have different session keyrings or
diff --git a/security/keys/process_keys.c b/security/keys/process_keys.c
index 20a38fed61b1..6b8e4ff4cc68 100644
--- a/security/keys/process_keys.c
+++ b/security/keys/process_keys.c
@@ -216,8 +216,7 @@ static int install_process_keyring(void)
216/* 216/*
217 * install a session keyring directly to a credentials struct 217 * install a session keyring directly to a credentials struct
218 */ 218 */
219static int install_session_keyring_to_cred(struct cred *cred, 219int install_session_keyring_to_cred(struct cred *cred, struct key *keyring)
220 struct key *keyring)
221{ 220{
222 unsigned long flags; 221 unsigned long flags;
223 struct key *old; 222 struct key *old;
diff --git a/security/keys/request_key.c b/security/keys/request_key.c
index f656e9c069e3..f5ec9ac5d57c 100644
--- a/security/keys/request_key.c
+++ b/security/keys/request_key.c
@@ -58,6 +58,38 @@ void complete_request_key(struct key_construction *cons, int error)
58} 58}
59EXPORT_SYMBOL(complete_request_key); 59EXPORT_SYMBOL(complete_request_key);
60 60
61static int umh_keys_init(struct subprocess_info *info)
62{
63 struct cred *cred = (struct cred*)current_cred();
64 struct key *keyring = info->data;
65 /*
66 * This is called in context of freshly forked kthread before
67 * kernel_execve(), we can just change our ->session_keyring.
68 */
69 return install_session_keyring_to_cred(cred, keyring);
70}
71
72static void umh_keys_cleanup(struct subprocess_info *info)
73{
74 struct key *keyring = info->data;
75 key_put(keyring);
76}
77
78static int call_usermodehelper_keys(char *path, char **argv, char **envp,
79 struct key *session_keyring, enum umh_wait wait)
80{
81 gfp_t gfp_mask = (wait == UMH_NO_WAIT) ? GFP_ATOMIC : GFP_KERNEL;
82 struct subprocess_info *info =
83 call_usermodehelper_setup(path, argv, envp, gfp_mask);
84
85 if (!info)
86 return -ENOMEM;
87
88 call_usermodehelper_setfns(info, umh_keys_init, umh_keys_cleanup,
89 key_get(session_keyring));
90 return call_usermodehelper_exec(info, wait);
91}
92
61/* 93/*
62 * request userspace finish the construction of a key 94 * request userspace finish the construction of a key
63 * - execute "/sbin/request-key <op> <key> <uid> <gid> <keyring> <keyring> <keyring>" 95 * - execute "/sbin/request-key <op> <key> <uid> <gid> <keyring> <keyring> <keyring>"
diff --git a/sound/core/pcm_lib.c b/sound/core/pcm_lib.c
index a2ff86189d2a..e9d98be190c5 100644
--- a/sound/core/pcm_lib.c
+++ b/sound/core/pcm_lib.c
@@ -345,7 +345,9 @@ static int snd_pcm_update_hw_ptr0(struct snd_pcm_substream *substream,
345 new_hw_ptr = hw_base + pos; 345 new_hw_ptr = hw_base + pos;
346 } 346 }
347 __delta: 347 __delta:
348 delta = (new_hw_ptr - old_hw_ptr) % runtime->boundary; 348 delta = new_hw_ptr - old_hw_ptr;
349 if (delta < 0)
350 delta += runtime->boundary;
349 if (xrun_debug(substream, in_interrupt ? 351 if (xrun_debug(substream, in_interrupt ?
350 XRUN_DEBUG_PERIODUPDATE : XRUN_DEBUG_HWPTRUPDATE)) { 352 XRUN_DEBUG_PERIODUPDATE : XRUN_DEBUG_HWPTRUPDATE)) {
351 char name[16]; 353 char name[16];
@@ -439,8 +441,13 @@ static int snd_pcm_update_hw_ptr0(struct snd_pcm_substream *substream,
439 snd_pcm_playback_silence(substream, new_hw_ptr); 441 snd_pcm_playback_silence(substream, new_hw_ptr);
440 442
441 if (in_interrupt) { 443 if (in_interrupt) {
442 runtime->hw_ptr_interrupt = new_hw_ptr - 444 delta = new_hw_ptr - runtime->hw_ptr_interrupt;
443 (new_hw_ptr % runtime->period_size); 445 if (delta < 0)
446 delta += runtime->boundary;
447 delta -= (snd_pcm_uframes_t)delta % runtime->period_size;
448 runtime->hw_ptr_interrupt += delta;
449 if (runtime->hw_ptr_interrupt >= runtime->boundary)
450 runtime->hw_ptr_interrupt -= runtime->boundary;
444 } 451 }
445 runtime->hw_ptr_base = hw_base; 452 runtime->hw_ptr_base = hw_base;
446 runtime->status->hw_ptr = new_hw_ptr; 453 runtime->status->hw_ptr = new_hw_ptr;
diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
index 644c2bb17b86..303ac04ff6e4 100644
--- a/sound/core/pcm_native.c
+++ b/sound/core/pcm_native.c
@@ -27,7 +27,6 @@
27#include <linux/pm_qos_params.h> 27#include <linux/pm_qos_params.h>
28#include <linux/uio.h> 28#include <linux/uio.h>
29#include <linux/dma-mapping.h> 29#include <linux/dma-mapping.h>
30#include <linux/math64.h>
31#include <sound/core.h> 30#include <sound/core.h>
32#include <sound/control.h> 31#include <sound/control.h>
33#include <sound/info.h> 32#include <sound/info.h>
@@ -370,38 +369,6 @@ static int period_to_usecs(struct snd_pcm_runtime *runtime)
370 return usecs; 369 return usecs;
371} 370}
372 371
373static int calc_boundary(struct snd_pcm_runtime *runtime)
374{
375 u_int64_t boundary;
376
377 boundary = (u_int64_t)runtime->buffer_size *
378 (u_int64_t)runtime->period_size;
379#if BITS_PER_LONG < 64
380 /* try to find lowest common multiple for buffer and period */
381 if (boundary > LONG_MAX - runtime->buffer_size) {
382 u_int32_t remainder = -1;
383 u_int32_t divident = runtime->buffer_size;
384 u_int32_t divisor = runtime->period_size;
385 while (remainder) {
386 remainder = divident % divisor;
387 if (remainder) {
388 divident = divisor;
389 divisor = remainder;
390 }
391 }
392 boundary = div_u64(boundary, divisor);
393 if (boundary > LONG_MAX - runtime->buffer_size)
394 return -ERANGE;
395 }
396#endif
397 if (boundary == 0)
398 return -ERANGE;
399 runtime->boundary = boundary;
400 while (runtime->boundary * 2 <= LONG_MAX - runtime->buffer_size)
401 runtime->boundary *= 2;
402 return 0;
403}
404
405static int snd_pcm_hw_params(struct snd_pcm_substream *substream, 372static int snd_pcm_hw_params(struct snd_pcm_substream *substream,
406 struct snd_pcm_hw_params *params) 373 struct snd_pcm_hw_params *params)
407{ 374{
@@ -477,9 +444,9 @@ static int snd_pcm_hw_params(struct snd_pcm_substream *substream,
477 runtime->stop_threshold = runtime->buffer_size; 444 runtime->stop_threshold = runtime->buffer_size;
478 runtime->silence_threshold = 0; 445 runtime->silence_threshold = 0;
479 runtime->silence_size = 0; 446 runtime->silence_size = 0;
480 err = calc_boundary(runtime); 447 runtime->boundary = runtime->buffer_size;
481 if (err < 0) 448 while (runtime->boundary * 2 <= LONG_MAX - runtime->buffer_size)
482 goto _error; 449 runtime->boundary *= 2;
483 450
484 snd_pcm_timer_resolution_change(substream); 451 snd_pcm_timer_resolution_change(substream);
485 runtime->status->state = SNDRV_PCM_STATE_SETUP; 452 runtime->status->state = SNDRV_PCM_STATE_SETUP;
diff --git a/sound/mips/au1x00.c b/sound/mips/au1x00.c
index 3e763d6a5d67..446cf9748664 100644
--- a/sound/mips/au1x00.c
+++ b/sound/mips/au1x00.c
@@ -516,6 +516,7 @@ get the interrupt driven case to work efficiently */
516 break; 516 break;
517 if (i == 0x5000) { 517 if (i == 0x5000) {
518 printk(KERN_ERR "au1000 AC97: AC97 command read timeout\n"); 518 printk(KERN_ERR "au1000 AC97: AC97 command read timeout\n");
519 spin_unlock(&au1000->ac97_lock);
519 return 0; 520 return 0;
520 } 521 }
521 522
diff --git a/sound/oss/dmasound/dmasound_atari.c b/sound/oss/dmasound/dmasound_atari.c
index 1f4774123064..13c214466d3b 100644
--- a/sound/oss/dmasound/dmasound_atari.c
+++ b/sound/oss/dmasound/dmasound_atari.c
@@ -1277,7 +1277,7 @@ static irqreturn_t AtaInterrupt(int irq, void *dummy)
1277 * (almost) like on the TT. 1277 * (almost) like on the TT.
1278 */ 1278 */
1279 write_sq_ignore_int = 0; 1279 write_sq_ignore_int = 0;
1280 return IRQ_HANDLED; 1280 goto out;
1281 } 1281 }
1282 1282
1283 if (!write_sq.active) { 1283 if (!write_sq.active) {
@@ -1285,7 +1285,7 @@ static irqreturn_t AtaInterrupt(int irq, void *dummy)
1285 * the sq variables, so better don't do anything here. 1285 * the sq variables, so better don't do anything here.
1286 */ 1286 */
1287 WAKE_UP(write_sq.sync_queue); 1287 WAKE_UP(write_sq.sync_queue);
1288 return IRQ_HANDLED; 1288 goto out;
1289 } 1289 }
1290 1290
1291 /* Probably ;) one frame is finished. Well, in fact it may be that a 1291 /* Probably ;) one frame is finished. Well, in fact it may be that a
@@ -1322,6 +1322,7 @@ static irqreturn_t AtaInterrupt(int irq, void *dummy)
1322 /* We are not playing after AtaPlay(), so there 1322 /* We are not playing after AtaPlay(), so there
1323 is nothing to play any more. Wake up a process 1323 is nothing to play any more. Wake up a process
1324 waiting for audio output to drain. */ 1324 waiting for audio output to drain. */
1325out:
1325 spin_unlock(&dmasound.lock); 1326 spin_unlock(&dmasound.lock);
1326 return IRQ_HANDLED; 1327 return IRQ_HANDLED;
1327} 1328}
diff --git a/sound/pci/asihpi/hpi.h b/sound/pci/asihpi/hpi.h
index 99400de6c075..0173bbe62b67 100644
--- a/sound/pci/asihpi/hpi.h
+++ b/sound/pci/asihpi/hpi.h
@@ -50,7 +50,7 @@ i.e 3.05.02 is a development version
50#define HPI_VER_RELEASE(v) ((int)(v & 0xFF)) 50#define HPI_VER_RELEASE(v) ((int)(v & 0xFF))
51 51
52/* Use single digits for versions less that 10 to avoid octal. */ 52/* Use single digits for versions less that 10 to avoid octal. */
53#define HPI_VER HPI_VERSION_CONSTRUCTOR(4L, 3, 18) 53#define HPI_VER HPI_VERSION_CONSTRUCTOR(4L, 3, 25)
54 54
55/* Library version as documented in hpi-api-versions.txt */ 55/* Library version as documented in hpi-api-versions.txt */
56#define HPI_LIB_VER HPI_VERSION_CONSTRUCTOR(9, 0, 0) 56#define HPI_LIB_VER HPI_VERSION_CONSTRUCTOR(9, 0, 0)
@@ -1632,6 +1632,12 @@ u16 hpi_tuner_get_hd_radio_sdk_version(const struct hpi_hsubsys *ph_subsys,
1632u16 hpi_tuner_get_hd_radio_signal_quality(const struct hpi_hsubsys *ph_subsys, 1632u16 hpi_tuner_get_hd_radio_signal_quality(const struct hpi_hsubsys *ph_subsys,
1633 u32 h_control, u32 *pquality); 1633 u32 h_control, u32 *pquality);
1634 1634
1635u16 hpi_tuner_get_hd_radio_signal_blend(const struct hpi_hsubsys *ph_subsys,
1636 u32 h_control, u32 *pblend);
1637
1638u16 hpi_tuner_set_hd_radio_signal_blend(const struct hpi_hsubsys *ph_subsys,
1639 u32 h_control, const u32 blend);
1640
1635/****************************/ 1641/****************************/
1636/* PADs control */ 1642/* PADs control */
1637/****************************/ 1643/****************************/
diff --git a/sound/pci/asihpi/hpi6000.c b/sound/pci/asihpi/hpi6000.c
index 839ecb2e4b64..12dab5e4892c 100644
--- a/sound/pci/asihpi/hpi6000.c
+++ b/sound/pci/asihpi/hpi6000.c
@@ -691,9 +691,6 @@ static short hpi6000_adapter_boot_load_dsp(struct hpi_adapter_obj *pao,
691 case 0x6200: 691 case 0x6200:
692 boot_load_family = HPI_ADAPTER_FAMILY_ASI(0x6200); 692 boot_load_family = HPI_ADAPTER_FAMILY_ASI(0x6200);
693 break; 693 break;
694 case 0x8800:
695 boot_load_family = HPI_ADAPTER_FAMILY_ASI(0x8800);
696 break;
697 default: 694 default:
698 return HPI6000_ERROR_UNHANDLED_SUBSYS_ID; 695 return HPI6000_ERROR_UNHANDLED_SUBSYS_ID;
699 } 696 }
@@ -1775,7 +1772,6 @@ static void hw_message(struct hpi_adapter_obj *pao, struct hpi_message *phm,
1775 u16 error = 0; 1772 u16 error = 0;
1776 u16 dsp_index = 0; 1773 u16 dsp_index = 0;
1777 u16 num_dsp = ((struct hpi_hw_obj *)pao->priv)->num_dsp; 1774 u16 num_dsp = ((struct hpi_hw_obj *)pao->priv)->num_dsp;
1778 hpios_dsplock_lock(pao);
1779 1775
1780 if (num_dsp < 2) 1776 if (num_dsp < 2)
1781 dsp_index = 0; 1777 dsp_index = 0;
@@ -1796,6 +1792,8 @@ static void hw_message(struct hpi_adapter_obj *pao, struct hpi_message *phm,
1796 } 1792 }
1797 } 1793 }
1798 } 1794 }
1795
1796 hpios_dsplock_lock(pao);
1799 error = hpi6000_message_response_sequence(pao, dsp_index, phm, phr); 1797 error = hpi6000_message_response_sequence(pao, dsp_index, phm, phr);
1800 1798
1801 /* maybe an error response */ 1799 /* maybe an error response */
diff --git a/sound/pci/asihpi/hpi6205.c b/sound/pci/asihpi/hpi6205.c
index 5e88c1fc2b9e..e89991ea3543 100644
--- a/sound/pci/asihpi/hpi6205.c
+++ b/sound/pci/asihpi/hpi6205.c
@@ -966,23 +966,16 @@ static void outstream_write(struct hpi_adapter_obj *pao,
966 status = &interface->outstream_host_buffer_status[phm->obj_index]; 966 status = &interface->outstream_host_buffer_status[phm->obj_index];
967 967
968 if (phw->flag_outstream_just_reset[phm->obj_index]) { 968 if (phw->flag_outstream_just_reset[phm->obj_index]) {
969 /* Format can only change after reset. Must tell DSP. */
970 u16 function = phm->function;
971 phw->flag_outstream_just_reset[phm->obj_index] = 0;
972 phm->function = HPI_OSTREAM_SET_FORMAT;
973 hw_message(pao, phm, phr); /* send the format to the DSP */
974 phm->function = function;
975 if (phr->error)
976 return;
977 }
978#if 1
979 if (phw->flag_outstream_just_reset[phm->obj_index]) {
980 /* First OutStremWrite() call following reset will write data to the 969 /* First OutStremWrite() call following reset will write data to the
981 adapter's buffers, reducing delay before stream can start 970 adapter's buffers, reducing delay before stream can start. The DSP
971 takes care of setting the stream data format using format information
972 embedded in phm.
982 */ 973 */
983 int partial_write = 0; 974 int partial_write = 0;
984 unsigned int original_size = 0; 975 unsigned int original_size = 0;
985 976
977 phw->flag_outstream_just_reset[phm->obj_index] = 0;
978
986 /* Send the first buffer to the DSP the old way. */ 979 /* Send the first buffer to the DSP the old way. */
987 /* Limit size of first transfer - */ 980 /* Limit size of first transfer - */
988 /* expect that this will not usually be triggered. */ 981 /* expect that this will not usually be triggered. */
@@ -1012,7 +1005,6 @@ static void outstream_write(struct hpi_adapter_obj *pao,
1012 original_size - HPI6205_SIZEOF_DATA; 1005 original_size - HPI6205_SIZEOF_DATA;
1013 phm->u.d.u.data.pb_data += HPI6205_SIZEOF_DATA; 1006 phm->u.d.u.data.pb_data += HPI6205_SIZEOF_DATA;
1014 } 1007 }
1015#endif
1016 1008
1017 space_available = outstream_get_space_available(status); 1009 space_available = outstream_get_space_available(status);
1018 if (space_available < (long)phm->u.d.u.data.data_size) { 1010 if (space_available < (long)phm->u.d.u.data.data_size) {
@@ -1369,6 +1361,9 @@ static u16 adapter_boot_load_dsp(struct hpi_adapter_obj *pao,
1369 case HPI_ADAPTER_FAMILY_ASI(0x6500): 1361 case HPI_ADAPTER_FAMILY_ASI(0x6500):
1370 firmware_id = HPI_ADAPTER_FAMILY_ASI(0x6600); 1362 firmware_id = HPI_ADAPTER_FAMILY_ASI(0x6600);
1371 break; 1363 break;
1364 case HPI_ADAPTER_FAMILY_ASI(0x8800):
1365 firmware_id = HPI_ADAPTER_FAMILY_ASI(0x8900);
1366 break;
1372 } 1367 }
1373 boot_code_id[1] = firmware_id; 1368 boot_code_id[1] = firmware_id;
1374 1369
diff --git a/sound/pci/asihpi/hpi_internal.h b/sound/pci/asihpi/hpi_internal.h
index f1cd6f1a0d44..fdd0ce02aa68 100644
--- a/sound/pci/asihpi/hpi_internal.h
+++ b/sound/pci/asihpi/hpi_internal.h
@@ -232,6 +232,8 @@ enum HPI_BUSES {
232#define HPI_TUNER_HDRADIO_SDK_VERSION HPI_CTL_ATTR(TUNER, 13) 232#define HPI_TUNER_HDRADIO_SDK_VERSION HPI_CTL_ATTR(TUNER, 13)
233/** HD Radio DSP firmware version. */ 233/** HD Radio DSP firmware version. */
234#define HPI_TUNER_HDRADIO_DSP_VERSION HPI_CTL_ATTR(TUNER, 14) 234#define HPI_TUNER_HDRADIO_DSP_VERSION HPI_CTL_ATTR(TUNER, 14)
235/** HD Radio signal blend (force analog, or automatic). */
236#define HPI_TUNER_HDRADIO_BLEND HPI_CTL_ATTR(TUNER, 15)
235 237
236/** \} */ 238/** \} */
237 239
@@ -478,8 +480,10 @@ Threshold is a -ve number in units of dB/100,
478 480
479/** First 2 hex digits define the adapter family */ 481/** First 2 hex digits define the adapter family */
480#define HPI_ADAPTER_FAMILY_MASK 0xff00 482#define HPI_ADAPTER_FAMILY_MASK 0xff00
483#define HPI_MODULE_FAMILY_MASK 0xfff0
481 484
482#define HPI_ADAPTER_FAMILY_ASI(f) (f & HPI_ADAPTER_FAMILY_MASK) 485#define HPI_ADAPTER_FAMILY_ASI(f) (f & HPI_ADAPTER_FAMILY_MASK)
486#define HPI_MODULE_FAMILY_ASI(f) (f & HPI_MODULE_FAMILY_MASK)
483#define HPI_ADAPTER_ASI(f) (f) 487#define HPI_ADAPTER_ASI(f) (f)
484 488
485/******************************************* message types */ 489/******************************************* message types */
@@ -970,6 +974,7 @@ struct hpi_control_union_msg {
970 u32 mode; 974 u32 mode;
971 u32 value; 975 u32 value;
972 } mode; 976 } mode;
977 u32 blend;
973 } tuner; 978 } tuner;
974 } u; 979 } u;
975}; 980};
diff --git a/sound/pci/asihpi/hpicmn.c b/sound/pci/asihpi/hpicmn.c
index 565102cae4f8..fcd64539d9ef 100644
--- a/sound/pci/asihpi/hpicmn.c
+++ b/sound/pci/asihpi/hpicmn.c
@@ -347,20 +347,15 @@ short hpi_check_control_cache(struct hpi_control_cache *p_cache,
347 found = 0; 347 found = 0;
348 break; 348 break;
349 case HPI_CONTROL_TUNER: 349 case HPI_CONTROL_TUNER:
350 { 350 if (phm->u.c.attribute == HPI_TUNER_FREQ)
351 struct hpi_control_cache_single *pCT = 351 phr->u.c.param1 = pC->u.t.freq_ink_hz;
352 (struct hpi_control_cache_single *)pI; 352 else if (phm->u.c.attribute == HPI_TUNER_BAND)
353 if (phm->u.c.attribute == HPI_TUNER_FREQ) 353 phr->u.c.param1 = pC->u.t.band;
354 phr->u.c.param1 = pCT->u.t.freq_ink_hz; 354 else if ((phm->u.c.attribute == HPI_TUNER_LEVEL)
355 else if (phm->u.c.attribute == HPI_TUNER_BAND) 355 && (phm->u.c.param1 == HPI_TUNER_LEVEL_AVERAGE))
356 phr->u.c.param1 = pCT->u.t.band; 356 phr->u.c.param1 = pC->u.t.level;
357 else if ((phm->u.c.attribute == HPI_TUNER_LEVEL) 357 else
358 && (phm->u.c.param1 == 358 found = 0;
359 HPI_TUNER_LEVEL_AVERAGE))
360 phr->u.c.param1 = pCT->u.t.level;
361 else
362 found = 0;
363 }
364 break; 359 break;
365 case HPI_CONTROL_AESEBU_RECEIVER: 360 case HPI_CONTROL_AESEBU_RECEIVER:
366 if (phm->u.c.attribute == HPI_AESEBURX_ERRORSTATUS) 361 if (phm->u.c.attribute == HPI_AESEBURX_ERRORSTATUS)
@@ -503,6 +498,9 @@ void hpi_sync_control_cache(struct hpi_control_cache *p_cache,
503 struct hpi_control_cache_single *pC; 498 struct hpi_control_cache_single *pC;
504 struct hpi_control_cache_info *pI; 499 struct hpi_control_cache_info *pI;
505 500
501 if (phr->error)
502 return;
503
506 if (!find_control(phm, p_cache, &pI, &control_index)) 504 if (!find_control(phm, p_cache, &pI, &control_index))
507 return; 505 return;
508 506
@@ -520,8 +518,6 @@ void hpi_sync_control_cache(struct hpi_control_cache *p_cache,
520 break; 518 break;
521 case HPI_CONTROL_MULTIPLEXER: 519 case HPI_CONTROL_MULTIPLEXER:
522 /* mux does not return its setting on Set command. */ 520 /* mux does not return its setting on Set command. */
523 if (phr->error)
524 return;
525 if (phm->u.c.attribute == HPI_MULTIPLEXER_SOURCE) { 521 if (phm->u.c.attribute == HPI_MULTIPLEXER_SOURCE) {
526 pC->u.x.source_node_type = (u16)phm->u.c.param1; 522 pC->u.x.source_node_type = (u16)phm->u.c.param1;
527 pC->u.x.source_node_index = (u16)phm->u.c.param2; 523 pC->u.x.source_node_index = (u16)phm->u.c.param2;
@@ -529,8 +525,6 @@ void hpi_sync_control_cache(struct hpi_control_cache *p_cache,
529 break; 525 break;
530 case HPI_CONTROL_CHANNEL_MODE: 526 case HPI_CONTROL_CHANNEL_MODE:
531 /* mode does not return its setting on Set command. */ 527 /* mode does not return its setting on Set command. */
532 if (phr->error)
533 return;
534 if (phm->u.c.attribute == HPI_CHANNEL_MODE_MODE) 528 if (phm->u.c.attribute == HPI_CHANNEL_MODE_MODE)
535 pC->u.m.mode = (u16)phm->u.c.param1; 529 pC->u.m.mode = (u16)phm->u.c.param1;
536 break; 530 break;
@@ -545,20 +539,14 @@ void hpi_sync_control_cache(struct hpi_control_cache *p_cache,
545 pC->u.phantom_power.state = (u16)phm->u.c.param1; 539 pC->u.phantom_power.state = (u16)phm->u.c.param1;
546 break; 540 break;
547 case HPI_CONTROL_AESEBU_TRANSMITTER: 541 case HPI_CONTROL_AESEBU_TRANSMITTER:
548 if (phr->error)
549 return;
550 if (phm->u.c.attribute == HPI_AESEBUTX_FORMAT) 542 if (phm->u.c.attribute == HPI_AESEBUTX_FORMAT)
551 pC->u.aes3tx.format = phm->u.c.param1; 543 pC->u.aes3tx.format = phm->u.c.param1;
552 break; 544 break;
553 case HPI_CONTROL_AESEBU_RECEIVER: 545 case HPI_CONTROL_AESEBU_RECEIVER:
554 if (phr->error)
555 return;
556 if (phm->u.c.attribute == HPI_AESEBURX_FORMAT) 546 if (phm->u.c.attribute == HPI_AESEBURX_FORMAT)
557 pC->u.aes3rx.source = phm->u.c.param1; 547 pC->u.aes3rx.source = phm->u.c.param1;
558 break; 548 break;
559 case HPI_CONTROL_SAMPLECLOCK: 549 case HPI_CONTROL_SAMPLECLOCK:
560 if (phr->error)
561 return;
562 if (phm->u.c.attribute == HPI_SAMPLECLOCK_SOURCE) 550 if (phm->u.c.attribute == HPI_SAMPLECLOCK_SOURCE)
563 pC->u.clk.source = (u16)phm->u.c.param1; 551 pC->u.clk.source = (u16)phm->u.c.param1;
564 else if (phm->u.c.attribute == HPI_SAMPLECLOCK_SOURCE_INDEX) 552 else if (phm->u.c.attribute == HPI_SAMPLECLOCK_SOURCE_INDEX)
@@ -590,7 +578,7 @@ struct hpi_control_cache *hpi_alloc_control_cache(const u32
590 578
591void hpi_free_control_cache(struct hpi_control_cache *p_cache) 579void hpi_free_control_cache(struct hpi_control_cache *p_cache)
592{ 580{
593 if ((p_cache->init) && (p_cache->p_info)) { 581 if (p_cache->init) {
594 kfree(p_cache->p_info); 582 kfree(p_cache->p_info);
595 p_cache->p_info = NULL; 583 p_cache->p_info = NULL;
596 p_cache->init = 0; 584 p_cache->init = 0;
diff --git a/sound/pci/asihpi/hpifunc.c b/sound/pci/asihpi/hpifunc.c
index eda26b312324..298eef3e20e9 100644
--- a/sound/pci/asihpi/hpifunc.c
+++ b/sound/pci/asihpi/hpifunc.c
@@ -2946,6 +2946,20 @@ u16 hpi_tuner_get_hd_radio_signal_quality(const struct hpi_hsubsys *ph_subsys,
2946 HPI_TUNER_HDRADIO_SIGNAL_QUALITY, 0, 0, pquality, NULL); 2946 HPI_TUNER_HDRADIO_SIGNAL_QUALITY, 0, 0, pquality, NULL);
2947} 2947}
2948 2948
2949u16 hpi_tuner_get_hd_radio_signal_blend(const struct hpi_hsubsys *ph_subsys,
2950 u32 h_control, u32 *pblend)
2951{
2952 return hpi_control_param_get(ph_subsys, h_control,
2953 HPI_TUNER_HDRADIO_BLEND, 0, 0, pblend, NULL);
2954}
2955
2956u16 hpi_tuner_set_hd_radio_signal_blend(const struct hpi_hsubsys *ph_subsys,
2957 u32 h_control, const u32 blend)
2958{
2959 return hpi_control_param_set(ph_subsys, h_control,
2960 HPI_TUNER_HDRADIO_BLEND, blend, 0);
2961}
2962
2949u16 hpi_tuner_getRDS(const struct hpi_hsubsys *ph_subsys, u32 h_control, 2963u16 hpi_tuner_getRDS(const struct hpi_hsubsys *ph_subsys, u32 h_control,
2950 char *p_data) 2964 char *p_data)
2951{ 2965{
@@ -3266,8 +3280,7 @@ u16 hpi_entity_find_next(struct hpi_entity *container_entity,
3266 3280
3267void hpi_entity_free(struct hpi_entity *entity) 3281void hpi_entity_free(struct hpi_entity *entity)
3268{ 3282{
3269 if (entity != NULL) 3283 kfree(entity);
3270 kfree(entity);
3271} 3284}
3272 3285
3273static u16 hpi_entity_alloc_and_copy(struct hpi_entity *src, 3286static u16 hpi_entity_alloc_and_copy(struct hpi_entity *src,
diff --git a/sound/pci/asihpi/hpios.c b/sound/pci/asihpi/hpios.c
index de615cfdb950..742ee12a9e17 100644
--- a/sound/pci/asihpi/hpios.c
+++ b/sound/pci/asihpi/hpios.c
@@ -89,26 +89,3 @@ u16 hpios_locked_mem_free(struct consistent_dma_area *p_mem_area)
89void hpios_locked_mem_free_all(void) 89void hpios_locked_mem_free_all(void)
90{ 90{
91} 91}
92
93void __iomem *hpios_map_io(struct pci_dev *pci_dev, int idx,
94 unsigned int length)
95{
96 HPI_DEBUG_LOG(DEBUG, "mapping %d %s %08llx-%08llx %04llx len 0x%x\n",
97 idx, pci_dev->resource[idx].name,
98 (unsigned long long)pci_resource_start(pci_dev, idx),
99 (unsigned long long)pci_resource_end(pci_dev, idx),
100 (unsigned long long)pci_resource_flags(pci_dev, idx), length);
101
102 if (!(pci_resource_flags(pci_dev, idx) & IORESOURCE_MEM)) {
103 HPI_DEBUG_LOG(ERROR, "not an io memory resource\n");
104 return NULL;
105 }
106
107 if (length > pci_resource_len(pci_dev, idx)) {
108 HPI_DEBUG_LOG(ERROR, "resource too small for requested %d \n",
109 length);
110 return NULL;
111 }
112
113 return ioremap(pci_resource_start(pci_dev, idx), length);
114}
diff --git a/sound/pci/asihpi/hpios.h b/sound/pci/asihpi/hpios.h
index a62c3f1e5f09..370f39b43f85 100644
--- a/sound/pci/asihpi/hpios.h
+++ b/sound/pci/asihpi/hpios.h
@@ -166,13 +166,4 @@ struct hpi_adapter {
166 void __iomem *ap_remapped_mem_base[HPI_MAX_ADAPTER_MEM_SPACES]; 166 void __iomem *ap_remapped_mem_base[HPI_MAX_ADAPTER_MEM_SPACES];
167}; 167};
168 168
169static inline void hpios_unmap_io(void __iomem *addr,
170 unsigned long size)
171{
172 iounmap(addr);
173}
174
175void __iomem *hpios_map_io(struct pci_dev *pci_dev, int idx,
176 unsigned int length);
177
178#endif 169#endif
diff --git a/sound/pci/aw2/aw2-alsa.c b/sound/pci/aw2/aw2-alsa.c
index 67921f93a41e..c15002242d98 100644
--- a/sound/pci/aw2/aw2-alsa.c
+++ b/sound/pci/aw2/aw2-alsa.c
@@ -26,7 +26,7 @@
26#include <linux/slab.h> 26#include <linux/slab.h>
27#include <linux/interrupt.h> 27#include <linux/interrupt.h>
28#include <linux/delay.h> 28#include <linux/delay.h>
29#include <asm/io.h> 29#include <linux/io.h>
30#include <sound/core.h> 30#include <sound/core.h>
31#include <sound/initval.h> 31#include <sound/initval.h>
32#include <sound/pcm.h> 32#include <sound/pcm.h>
@@ -44,9 +44,6 @@ MODULE_LICENSE("GPL");
44/********************************* 44/*********************************
45 * DEFINES 45 * DEFINES
46 ********************************/ 46 ********************************/
47#define PCI_VENDOR_ID_SAA7146 0x1131
48#define PCI_DEVICE_ID_SAA7146 0x7146
49
50#define CTL_ROUTE_ANALOG 0 47#define CTL_ROUTE_ANALOG 0
51#define CTL_ROUTE_DIGITAL 1 48#define CTL_ROUTE_DIGITAL 1
52 49
@@ -165,7 +162,7 @@ module_param_array(enable, bool, NULL, 0444);
165MODULE_PARM_DESC(enable, "Enable Audiowerk2 soundcard."); 162MODULE_PARM_DESC(enable, "Enable Audiowerk2 soundcard.");
166 163
167static DEFINE_PCI_DEVICE_TABLE(snd_aw2_ids) = { 164static DEFINE_PCI_DEVICE_TABLE(snd_aw2_ids) = {
168 {PCI_VENDOR_ID_SAA7146, PCI_DEVICE_ID_SAA7146, 0, 0, 165 {PCI_VENDOR_ID_PHILIPS, PCI_DEVICE_ID_PHILIPS_SAA7146, 0, 0,
169 0, 0, 0}, 166 0, 0, 0},
170 {0} 167 {0}
171}; 168};
@@ -419,7 +416,7 @@ static int snd_aw2_pcm_playback_open(struct snd_pcm_substream *substream)
419{ 416{
420 struct snd_pcm_runtime *runtime = substream->runtime; 417 struct snd_pcm_runtime *runtime = substream->runtime;
421 418
422 snd_printdd(KERN_DEBUG "aw2: Playback_open \n"); 419 snd_printdd(KERN_DEBUG "aw2: Playback_open\n");
423 runtime->hw = snd_aw2_playback_hw; 420 runtime->hw = snd_aw2_playback_hw;
424 return 0; 421 return 0;
425} 422}
@@ -435,7 +432,7 @@ static int snd_aw2_pcm_capture_open(struct snd_pcm_substream *substream)
435{ 432{
436 struct snd_pcm_runtime *runtime = substream->runtime; 433 struct snd_pcm_runtime *runtime = substream->runtime;
437 434
438 snd_printdd(KERN_DEBUG "aw2: Capture_open \n"); 435 snd_printdd(KERN_DEBUG "aw2: Capture_open\n");
439 runtime->hw = snd_aw2_capture_hw; 436 runtime->hw = snd_aw2_capture_hw;
440 return 0; 437 return 0;
441} 438}
diff --git a/sound/pci/emu10k1/emufx.c b/sound/pci/emu10k1/emufx.c
index 4b302d86f5f2..7a9401462c1c 100644
--- a/sound/pci/emu10k1/emufx.c
+++ b/sound/pci/emu10k1/emufx.c
@@ -35,6 +35,7 @@
35#include <linux/vmalloc.h> 35#include <linux/vmalloc.h>
36#include <linux/init.h> 36#include <linux/init.h>
37#include <linux/mutex.h> 37#include <linux/mutex.h>
38#include <linux/moduleparam.h>
38 39
39#include <sound/core.h> 40#include <sound/core.h>
40#include <sound/tlv.h> 41#include <sound/tlv.h>
@@ -50,6 +51,10 @@
50#define EMU10K1_CENTER_LFE_FROM_FRONT 51#define EMU10K1_CENTER_LFE_FROM_FRONT
51#endif 52#endif
52 53
54static bool high_res_gpr_volume;
55module_param(high_res_gpr_volume, bool, 0444);
56MODULE_PARM_DESC(high_res_gpr_volume, "GPR mixer controls use 31-bit range.");
57
53/* 58/*
54 * Tables 59 * Tables
55 */ 60 */
@@ -296,6 +301,7 @@ static const u32 db_table[101] = {
296 301
297/* EMU10k1/EMU10k2 DSP control db gain */ 302/* EMU10k1/EMU10k2 DSP control db gain */
298static const DECLARE_TLV_DB_SCALE(snd_emu10k1_db_scale1, -4000, 40, 1); 303static const DECLARE_TLV_DB_SCALE(snd_emu10k1_db_scale1, -4000, 40, 1);
304static const DECLARE_TLV_DB_LINEAR(snd_emu10k1_db_linear, TLV_DB_GAIN_MUTE, 0);
299 305
300static const u32 onoff_table[2] = { 306static const u32 onoff_table[2] = {
301 0x00000000, 0x00000001 307 0x00000000, 0x00000001
@@ -1072,10 +1078,17 @@ snd_emu10k1_init_mono_control(struct snd_emu10k1_fx8010_control_gpr *ctl,
1072 strcpy(ctl->id.name, name); 1078 strcpy(ctl->id.name, name);
1073 ctl->vcount = ctl->count = 1; 1079 ctl->vcount = ctl->count = 1;
1074 ctl->gpr[0] = gpr + 0; ctl->value[0] = defval; 1080 ctl->gpr[0] = gpr + 0; ctl->value[0] = defval;
1075 ctl->min = 0; 1081 if (high_res_gpr_volume) {
1076 ctl->max = 100; 1082 ctl->min = 0;
1077 ctl->tlv = snd_emu10k1_db_scale1; 1083 ctl->max = 0x7fffffff;
1078 ctl->translation = EMU10K1_GPR_TRANSLATION_TABLE100; 1084 ctl->tlv = snd_emu10k1_db_linear;
1085 ctl->translation = EMU10K1_GPR_TRANSLATION_NONE;
1086 } else {
1087 ctl->min = 0;
1088 ctl->max = 100;
1089 ctl->tlv = snd_emu10k1_db_scale1;
1090 ctl->translation = EMU10K1_GPR_TRANSLATION_TABLE100;
1091 }
1079} 1092}
1080 1093
1081static void __devinit 1094static void __devinit
@@ -1087,10 +1100,17 @@ snd_emu10k1_init_stereo_control(struct snd_emu10k1_fx8010_control_gpr *ctl,
1087 ctl->vcount = ctl->count = 2; 1100 ctl->vcount = ctl->count = 2;
1088 ctl->gpr[0] = gpr + 0; ctl->value[0] = defval; 1101 ctl->gpr[0] = gpr + 0; ctl->value[0] = defval;
1089 ctl->gpr[1] = gpr + 1; ctl->value[1] = defval; 1102 ctl->gpr[1] = gpr + 1; ctl->value[1] = defval;
1090 ctl->min = 0; 1103 if (high_res_gpr_volume) {
1091 ctl->max = 100; 1104 ctl->min = 0;
1092 ctl->tlv = snd_emu10k1_db_scale1; 1105 ctl->max = 0x7fffffff;
1093 ctl->translation = EMU10K1_GPR_TRANSLATION_TABLE100; 1106 ctl->tlv = snd_emu10k1_db_linear;
1107 ctl->translation = EMU10K1_GPR_TRANSLATION_NONE;
1108 } else {
1109 ctl->min = 0;
1110 ctl->max = 100;
1111 ctl->tlv = snd_emu10k1_db_scale1;
1112 ctl->translation = EMU10K1_GPR_TRANSLATION_TABLE100;
1113 }
1094} 1114}
1095 1115
1096static void __devinit 1116static void __devinit
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index 170610e1d7da..dc79564fea30 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -1097,6 +1097,7 @@ static irqreturn_t azx_interrupt(int irq, void *dev_id)
1097 struct azx *chip = dev_id; 1097 struct azx *chip = dev_id;
1098 struct azx_dev *azx_dev; 1098 struct azx_dev *azx_dev;
1099 u32 status; 1099 u32 status;
1100 u8 sd_status;
1100 int i, ok; 1101 int i, ok;
1101 1102
1102 spin_lock(&chip->reg_lock); 1103 spin_lock(&chip->reg_lock);
@@ -1110,8 +1111,10 @@ static irqreturn_t azx_interrupt(int irq, void *dev_id)
1110 for (i = 0; i < chip->num_streams; i++) { 1111 for (i = 0; i < chip->num_streams; i++) {
1111 azx_dev = &chip->azx_dev[i]; 1112 azx_dev = &chip->azx_dev[i];
1112 if (status & azx_dev->sd_int_sta_mask) { 1113 if (status & azx_dev->sd_int_sta_mask) {
1114 sd_status = azx_sd_readb(azx_dev, SD_STS);
1113 azx_sd_writeb(azx_dev, SD_STS, SD_INT_MASK); 1115 azx_sd_writeb(azx_dev, SD_STS, SD_INT_MASK);
1114 if (!azx_dev->substream || !azx_dev->running) 1116 if (!azx_dev->substream || !azx_dev->running ||
1117 !(sd_status & SD_INT_COMPLETE))
1115 continue; 1118 continue;
1116 /* check whether this IRQ is really acceptable */ 1119 /* check whether this IRQ is really acceptable */
1117 ok = azx_position_ok(chip, azx_dev); 1120 ok = azx_position_ok(chip, azx_dev);
@@ -2279,12 +2282,16 @@ static int azx_dev_free(struct snd_device *device)
2279 * white/black-listing for position_fix 2282 * white/black-listing for position_fix
2280 */ 2283 */
2281static struct snd_pci_quirk position_fix_list[] __devinitdata = { 2284static struct snd_pci_quirk position_fix_list[] __devinitdata = {
2285 SND_PCI_QUIRK(0x1025, 0x009f, "Acer Aspire 5110", POS_FIX_LPIB),
2282 SND_PCI_QUIRK(0x1028, 0x01cc, "Dell D820", POS_FIX_LPIB), 2286 SND_PCI_QUIRK(0x1028, 0x01cc, "Dell D820", POS_FIX_LPIB),
2283 SND_PCI_QUIRK(0x1028, 0x01de, "Dell Precision 390", POS_FIX_LPIB), 2287 SND_PCI_QUIRK(0x1028, 0x01de, "Dell Precision 390", POS_FIX_LPIB),
2284 SND_PCI_QUIRK(0x1028, 0x01f6, "Dell Latitude 131L", POS_FIX_LPIB), 2288 SND_PCI_QUIRK(0x1028, 0x01f6, "Dell Latitude 131L", POS_FIX_LPIB),
2285 SND_PCI_QUIRK(0x103c, 0x306d, "HP dv3", POS_FIX_LPIB), 2289 SND_PCI_QUIRK(0x103c, 0x306d, "HP dv3", POS_FIX_LPIB),
2286 SND_PCI_QUIRK(0x1106, 0x3288, "ASUS M2V-MX SE", POS_FIX_LPIB),
2287 SND_PCI_QUIRK(0x1043, 0x813d, "ASUS P5AD2", POS_FIX_LPIB), 2290 SND_PCI_QUIRK(0x1043, 0x813d, "ASUS P5AD2", POS_FIX_LPIB),
2291 SND_PCI_QUIRK(0x104d, 0x9069, "Sony VPCS11V9E", POS_FIX_LPIB),
2292 SND_PCI_QUIRK(0x1106, 0x3288, "ASUS M2V-MX SE", POS_FIX_LPIB),
2293 SND_PCI_QUIRK(0x1179, 0xff10, "Toshiba A100-259", POS_FIX_LPIB),
2294 SND_PCI_QUIRK(0x1297, 0x3166, "Shuttle", POS_FIX_LPIB),
2288 SND_PCI_QUIRK(0x1458, 0xa022, "ga-ma770-ud3", POS_FIX_LPIB), 2295 SND_PCI_QUIRK(0x1458, 0xa022, "ga-ma770-ud3", POS_FIX_LPIB),
2289 SND_PCI_QUIRK(0x1462, 0x1002, "MSI Wind U115", POS_FIX_LPIB), 2296 SND_PCI_QUIRK(0x1462, 0x1002, "MSI Wind U115", POS_FIX_LPIB),
2290 SND_PCI_QUIRK(0x1565, 0x820f, "Biostar Microtech", POS_FIX_LPIB), 2297 SND_PCI_QUIRK(0x1565, 0x820f, "Biostar Microtech", POS_FIX_LPIB),
diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
index e863649d31f5..2bf2cb5da956 100644
--- a/sound/pci/hda/patch_conexant.c
+++ b/sound/pci/hda/patch_conexant.c
@@ -2975,6 +2975,8 @@ static struct snd_pci_quirk cxt5066_cfg_tbl[] = {
2975 SND_PCI_QUIRK(0x1179, 0xff50, "Toshiba Satellite P500-PSPGSC-01800T", CXT5066_OLPC_XO_1_5), 2975 SND_PCI_QUIRK(0x1179, 0xff50, "Toshiba Satellite P500-PSPGSC-01800T", CXT5066_OLPC_XO_1_5),
2976 SND_PCI_QUIRK(0x1179, 0xffe0, "Toshiba Satellite Pro T130-15F", CXT5066_OLPC_XO_1_5), 2976 SND_PCI_QUIRK(0x1179, 0xffe0, "Toshiba Satellite Pro T130-15F", CXT5066_OLPC_XO_1_5),
2977 SND_PCI_QUIRK(0x17aa, 0x21b2, "Thinkpad X100e", CXT5066_IDEAPAD), 2977 SND_PCI_QUIRK(0x17aa, 0x21b2, "Thinkpad X100e", CXT5066_IDEAPAD),
2978 SND_PCI_QUIRK(0x17aa, 0x21b3, "Thinkpad Edge 13 (197)", CXT5066_IDEAPAD),
2979 SND_PCI_QUIRK(0x17aa, 0x21b4, "Thinkpad Edge", CXT5066_IDEAPAD),
2978 SND_PCI_QUIRK(0x17aa, 0x3a0d, "ideapad", CXT5066_IDEAPAD), 2980 SND_PCI_QUIRK(0x17aa, 0x3a0d, "ideapad", CXT5066_IDEAPAD),
2979 SND_PCI_QUIRK(0x17aa, 0x215e, "Lenovo Thinkpad", CXT5066_THINKPAD), 2981 SND_PCI_QUIRK(0x17aa, 0x215e, "Lenovo Thinkpad", CXT5066_THINKPAD),
2980 {} 2982 {}
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 53538b0f9991..17d4548cc353 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -7025,6 +7025,14 @@ static struct hda_input_mux alc889A_mb31_capture_source = {
7025 }, 7025 },
7026}; 7026};
7027 7027
7028static struct hda_input_mux alc889A_imac91_capture_source = {
7029 .num_items = 2,
7030 .items = {
7031 { "Mic", 0x01 },
7032 { "Line", 0x2 }, /* Not sure! */
7033 },
7034};
7035
7028/* 7036/*
7029 * 2ch mode 7037 * 2ch mode
7030 */ 7038 */
@@ -7486,15 +7494,8 @@ static struct snd_kcontrol_new alc885_macmini3_mixer[] = {
7486}; 7494};
7487 7495
7488static struct snd_kcontrol_new alc885_imac91_mixer[] = { 7496static struct snd_kcontrol_new alc885_imac91_mixer[] = {
7489 HDA_CODEC_VOLUME("Line-Out Playback Volume", 0x0c, 0x00, HDA_OUTPUT), 7497 HDA_CODEC_VOLUME("Speaker Playback Volume", 0x0c, 0x00, HDA_OUTPUT),
7490 HDA_BIND_MUTE ("Line-Out Playback Switch", 0x0c, 0x02, HDA_INPUT), 7498 HDA_BIND_MUTE("Speaker Playback Switch", 0x0c, 0x02, HDA_INPUT),
7491 HDA_CODEC_MUTE ("Speaker Playback Switch", 0x14, 0x00, HDA_OUTPUT),
7492 HDA_CODEC_VOLUME("Speaker Playback Volume", 0x0d, 0x00, HDA_OUTPUT),
7493 HDA_CODEC_VOLUME("Line Playback Volume", 0x0b, 0x02, HDA_INPUT),
7494 HDA_CODEC_MUTE ("Line Playback Switch", 0x0b, 0x02, HDA_INPUT),
7495 HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x00, HDA_INPUT),
7496 HDA_CODEC_MUTE ("Mic Playback Switch", 0x0b, 0x00, HDA_INPUT),
7497 HDA_CODEC_VOLUME("Mic Boost", 0x18, 0x00, HDA_INPUT),
7498 { } /* end */ 7499 { } /* end */
7499}; 7500};
7500 7501
@@ -7995,61 +7996,56 @@ static struct hda_verb alc885_mbp3_init_verbs[] = {
7995 7996
7996/* iMac 9,1 */ 7997/* iMac 9,1 */
7997static struct hda_verb alc885_imac91_init_verbs[] = { 7998static struct hda_verb alc885_imac91_init_verbs[] = {
7998 /* Line-Out mixer: unmute input/output amp left and right (volume = 0) */ 7999 /* Internal Speaker Pin (0x0c) */
7999 {0x0c, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_ZERO}, 8000 {0x18, AC_VERB_SET_PIN_WIDGET_CONTROL, (PIN_OUT | AC_PINCTL_VREF_50) },
8000 {0x0c, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)}, 8001 {0x18, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
8001 {0x0c, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(1)}, 8002 {0x18, AC_VERB_SET_CONNECT_SEL, 0x00},
8002 /* Rear mixer */ 8003 {0x1a, AC_VERB_SET_PIN_WIDGET_CONTROL, (PIN_OUT | AC_PINCTL_VREF_50) },
8003 {0x0d, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_ZERO}, 8004 {0x1a, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
8004 {0x0d, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)}, 8005 {0x1a, AC_VERB_SET_CONNECT_SEL, 0x00},
8005 {0x0d, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(1)}, 8006 /* HP Pin: Rear */
8006 /* HP Pin: output 0 (0x0c) */
8007 {0x14, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_HP}, 8007 {0x14, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_HP},
8008 {0x14, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE}, 8008 {0x14, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
8009 {0x14, AC_VERB_SET_CONNECT_SEL, 0x00}, 8009 {0x14, AC_VERB_SET_CONNECT_SEL, 0x00},
8010 {0x14, AC_VERB_SET_UNSOLICITED_ENABLE, ALC880_HP_EVENT | AC_USRSP_EN}, 8010 {0x14, AC_VERB_SET_UNSOLICITED_ENABLE, (ALC880_HP_EVENT | AC_USRSP_EN)},
8011 /* Internal Speakers: output 0 (0x0d) */ 8011 /* Line in Rear */
8012 {0x15, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT}, 8012 {0x15, AC_VERB_SET_PIN_WIDGET_CONTROL, AC_PINCTL_VREF_50},
8013 {0x15, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE}, 8013 {0x15, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE},
8014 {0x15, AC_VERB_SET_CONNECT_SEL, 0x00},
8015 /* Mic (rear) pin: input vref at 80% */
8016 {0x18, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_VREF80},
8017 {0x18, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE},
8018 /* Front Mic pin: input vref at 80% */ 8014 /* Front Mic pin: input vref at 80% */
8019 {0x19, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_VREF80}, 8015 {0x19, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_VREF80},
8020 {0x19, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE}, 8016 {0x19, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE},
8021 /* Line In pin: use output 1 when in LineOut mode */ 8017 /* Rear mixer */
8022 {0x1a, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_IN}, 8018 {0x0d, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_ZERO},
8023 {0x1a, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE}, 8019 {0x0d, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)},
8024 {0x1a, AC_VERB_SET_CONNECT_SEL, 0x01}, 8020 {0x0d, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(1)},
8025 8021 /* Line-Out mixer: unmute input/output amp left and right (volume = 0) */
8026 /* FIXME: use matrix-type input source selection */ 8022 {0x0c, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_ZERO},
8027 /* Mixer elements: 0x18, 19, 1a, 1b, 1c, 1d, 14, 15, 16, 17, 0b */ 8023 {0x0c, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)},
8028 /* Input mixer1: unmute Mic, F-Mic, Line, CD inputs */ 8024 {0x0c, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(1)},
8025 /* 0x24 [Audio Mixer] wcaps 0x20010b: Stereo Amp-In */
8029 {0x24, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0)}, 8026 {0x24, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0)},
8030 {0x24, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(3)}, 8027 {0x24, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(3)},
8031 {0x24, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(2)}, 8028 {0x24, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(2)},
8032 {0x24, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(4)}, 8029 {0x24, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(4)},
8033 /* Input mixer2 */ 8030 /* 0x23 [Audio Mixer] wcaps 0x20010b: Stereo Amp-In */
8034 {0x23, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0)}, 8031 {0x23, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0)},
8035 {0x23, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(3)}, 8032 {0x23, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(3)},
8036 {0x23, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(2)}, 8033 {0x23, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(2)},
8037 {0x23, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(4)}, 8034 {0x23, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(4)},
8038 /* Input mixer3 */ 8035 /* 0x22 [Audio Mixer] wcaps 0x20010b: Stereo Amp-In */
8039 {0x22, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0)}, 8036 {0x22, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0)},
8040 {0x22, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(3)}, 8037 {0x22, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(3)},
8041 {0x22, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(2)}, 8038 {0x22, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(2)},
8042 {0x22, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(4)}, 8039 {0x22, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(4)},
8043 /* ADC1: mute amp left and right */ 8040 /* 0x07 [Audio Input] wcaps 0x10011b: Stereo Amp-In */
8044 {0x07, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)}, 8041 {0x07, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)},
8045 {0x07, AC_VERB_SET_CONNECT_SEL, 0x00}, 8042 {0x07, AC_VERB_SET_CONNECT_SEL, 0x00},
8046 /* ADC2: mute amp left and right */ 8043 /* 0x08 [Audio Input] wcaps 0x10011b: Stereo Amp-In */
8047 {0x08, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)}, 8044 {0x08, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)},
8048 {0x08, AC_VERB_SET_CONNECT_SEL, 0x00}, 8045 {0x08, AC_VERB_SET_CONNECT_SEL, 0x00},
8049 /* ADC3: mute amp left and right */ 8046 /* 0x09 [Audio Input] wcaps 0x10011b: Stereo Amp-In */
8050 {0x09, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)}, 8047 {0x09, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)},
8051 {0x09, AC_VERB_SET_CONNECT_SEL, 0x00}, 8048 {0x09, AC_VERB_SET_CONNECT_SEL, 0x00},
8052
8053 { } 8049 { }
8054}; 8050};
8055 8051
@@ -8118,7 +8114,7 @@ static void alc885_imac91_setup(struct hda_codec *codec)
8118 struct alc_spec *spec = codec->spec; 8114 struct alc_spec *spec = codec->spec;
8119 8115
8120 spec->autocfg.hp_pins[0] = 0x14; 8116 spec->autocfg.hp_pins[0] = 0x14;
8121 spec->autocfg.speaker_pins[0] = 0x15; 8117 spec->autocfg.speaker_pins[0] = 0x18;
8122 spec->autocfg.speaker_pins[1] = 0x1a; 8118 spec->autocfg.speaker_pins[1] = 0x1a;
8123} 8119}
8124 8120
@@ -9627,14 +9623,14 @@ static struct alc_config_preset alc882_presets[] = {
9627 .init_hook = alc885_imac24_init_hook, 9623 .init_hook = alc885_imac24_init_hook,
9628 }, 9624 },
9629 [ALC885_IMAC91] = { 9625 [ALC885_IMAC91] = {
9630 .mixers = { alc885_imac91_mixer, alc882_chmode_mixer }, 9626 .mixers = {alc885_imac91_mixer},
9631 .init_verbs = { alc885_imac91_init_verbs, 9627 .init_verbs = { alc885_imac91_init_verbs,
9632 alc880_gpio1_init_verbs }, 9628 alc880_gpio1_init_verbs },
9633 .num_dacs = ARRAY_SIZE(alc882_dac_nids), 9629 .num_dacs = ARRAY_SIZE(alc882_dac_nids),
9634 .dac_nids = alc882_dac_nids, 9630 .dac_nids = alc882_dac_nids,
9635 .channel_mode = alc885_mbp_4ch_modes, 9631 .channel_mode = alc885_mba21_ch_modes,
9636 .num_channel_mode = ARRAY_SIZE(alc885_mbp_4ch_modes), 9632 .num_channel_mode = ARRAY_SIZE(alc885_mba21_ch_modes),
9637 .input_mux = &alc882_capture_source, 9633 .input_mux = &alc889A_imac91_capture_source,
9638 .dig_out_nid = ALC882_DIGOUT_NID, 9634 .dig_out_nid = ALC882_DIGOUT_NID,
9639 .dig_in_nid = ALC882_DIGIN_NID, 9635 .dig_in_nid = ALC882_DIGIN_NID,
9640 .unsol_event = alc_automute_amp_unsol_event, 9636 .unsol_event = alc_automute_amp_unsol_event,
diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c
index a0e06d82da1f..f1e7babd6920 100644
--- a/sound/pci/hda/patch_sigmatel.c
+++ b/sound/pci/hda/patch_sigmatel.c
@@ -2078,12 +2078,12 @@ static struct snd_pci_quirk stac927x_cfg_tbl[] = {
2078 SND_PCI_QUIRK_MASK(PCI_VENDOR_ID_INTEL, 0xff00, 0x2000, 2078 SND_PCI_QUIRK_MASK(PCI_VENDOR_ID_INTEL, 0xff00, 0x2000,
2079 "Intel D965", STAC_D965_3ST), 2079 "Intel D965", STAC_D965_3ST),
2080 /* Dell 3 stack systems */ 2080 /* Dell 3 stack systems */
2081 SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x01f7, "Dell XPS M1730", STAC_DELL_3ST),
2082 SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x01dd, "Dell Dimension E520", STAC_DELL_3ST), 2081 SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x01dd, "Dell Dimension E520", STAC_DELL_3ST),
2083 SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x01ed, "Dell ", STAC_DELL_3ST), 2082 SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x01ed, "Dell ", STAC_DELL_3ST),
2084 SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x01f4, "Dell ", STAC_DELL_3ST), 2083 SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x01f4, "Dell ", STAC_DELL_3ST),
2085 /* Dell 3 stack systems with verb table in BIOS */ 2084 /* Dell 3 stack systems with verb table in BIOS */
2086 SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x01f3, "Dell Inspiron 1420", STAC_DELL_BIOS), 2085 SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x01f3, "Dell Inspiron 1420", STAC_DELL_BIOS),
2086 SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x01f7, "Dell XPS M1730", STAC_DELL_BIOS),
2087 SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x0227, "Dell Vostro 1400 ", STAC_DELL_BIOS), 2087 SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x0227, "Dell Vostro 1400 ", STAC_DELL_BIOS),
2088 SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x022e, "Dell ", STAC_DELL_BIOS), 2088 SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x022e, "Dell ", STAC_DELL_BIOS),
2089 SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x022f, "Dell Inspiron 1525", STAC_DELL_BIOS), 2089 SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x022f, "Dell Inspiron 1525", STAC_DELL_BIOS),
diff --git a/sound/soc/codecs/wm8350.c b/sound/soc/codecs/wm8350.c
index 8ae20208e7be..0221ca79b3ae 100644
--- a/sound/soc/codecs/wm8350.c
+++ b/sound/soc/codecs/wm8350.c
@@ -426,8 +426,8 @@ static const struct soc_enum wm8350_enum[] = {
426 SOC_ENUM_SINGLE(WM8350_INPUT_MIXER_VOLUME, 15, 2, wm8350_lr), 426 SOC_ENUM_SINGLE(WM8350_INPUT_MIXER_VOLUME, 15, 2, wm8350_lr),
427}; 427};
428 428
429static DECLARE_TLV_DB_LINEAR(pre_amp_tlv, -1200, 3525); 429static DECLARE_TLV_DB_SCALE(pre_amp_tlv, -1200, 3525, 0);
430static DECLARE_TLV_DB_LINEAR(out_pga_tlv, -5700, 600); 430static DECLARE_TLV_DB_SCALE(out_pga_tlv, -5700, 600, 0);
431static DECLARE_TLV_DB_SCALE(dac_pcm_tlv, -7163, 36, 1); 431static DECLARE_TLV_DB_SCALE(dac_pcm_tlv, -7163, 36, 1);
432static DECLARE_TLV_DB_SCALE(adc_pcm_tlv, -12700, 50, 1); 432static DECLARE_TLV_DB_SCALE(adc_pcm_tlv, -12700, 50, 1);
433static DECLARE_TLV_DB_SCALE(out_mix_tlv, -1500, 300, 1); 433static DECLARE_TLV_DB_SCALE(out_mix_tlv, -1500, 300, 1);
diff --git a/sound/soc/codecs/wm8400.c b/sound/soc/codecs/wm8400.c
index 7f5d080536a0..8f294066b0ed 100644
--- a/sound/soc/codecs/wm8400.c
+++ b/sound/soc/codecs/wm8400.c
@@ -107,21 +107,21 @@ static void wm8400_codec_reset(struct snd_soc_codec *codec)
107 wm8400_reset_codec_reg_cache(wm8400->wm8400); 107 wm8400_reset_codec_reg_cache(wm8400->wm8400);
108} 108}
109 109
110static const DECLARE_TLV_DB_LINEAR(rec_mix_tlv, -1500, 600); 110static const DECLARE_TLV_DB_SCALE(rec_mix_tlv, -1500, 600, 0);
111 111
112static const DECLARE_TLV_DB_LINEAR(in_pga_tlv, -1650, 3000); 112static const DECLARE_TLV_DB_SCALE(in_pga_tlv, -1650, 3000, 0);
113 113
114static const DECLARE_TLV_DB_LINEAR(out_mix_tlv, -2100, 0); 114static const DECLARE_TLV_DB_SCALE(out_mix_tlv, -2100, 0, 0);
115 115
116static const DECLARE_TLV_DB_LINEAR(out_pga_tlv, -7300, 600); 116static const DECLARE_TLV_DB_SCALE(out_pga_tlv, -7300, 600, 0);
117 117
118static const DECLARE_TLV_DB_LINEAR(out_omix_tlv, -600, 0); 118static const DECLARE_TLV_DB_SCALE(out_omix_tlv, -600, 0, 0);
119 119
120static const DECLARE_TLV_DB_LINEAR(out_dac_tlv, -7163, 0); 120static const DECLARE_TLV_DB_SCALE(out_dac_tlv, -7163, 0, 0);
121 121
122static const DECLARE_TLV_DB_LINEAR(in_adc_tlv, -7163, 1763); 122static const DECLARE_TLV_DB_SCALE(in_adc_tlv, -7163, 1763, 0);
123 123
124static const DECLARE_TLV_DB_LINEAR(out_sidetone_tlv, -3600, 0); 124static const DECLARE_TLV_DB_SCALE(out_sidetone_tlv, -3600, 0, 0);
125 125
126static int wm8400_outpga_put_volsw_vu(struct snd_kcontrol *kcontrol, 126static int wm8400_outpga_put_volsw_vu(struct snd_kcontrol *kcontrol,
127 struct snd_ctl_elem_value *ucontrol) 127 struct snd_ctl_elem_value *ucontrol)
@@ -440,7 +440,7 @@ static int outmixer_event (struct snd_soc_dapm_widget *w,
440/* INMIX dB values */ 440/* INMIX dB values */
441static const unsigned int in_mix_tlv[] = { 441static const unsigned int in_mix_tlv[] = {
442 TLV_DB_RANGE_HEAD(1), 442 TLV_DB_RANGE_HEAD(1),
443 0,7, TLV_DB_LINEAR_ITEM(-1200, 600), 443 0,7, TLV_DB_SCALE_ITEM(-1200, 600, 0),
444}; 444};
445 445
446/* Left In PGA Connections */ 446/* Left In PGA Connections */
diff --git a/sound/soc/codecs/wm8990.c b/sound/soc/codecs/wm8990.c
index 7b536d923ea9..c018772cc430 100644
--- a/sound/soc/codecs/wm8990.c
+++ b/sound/soc/codecs/wm8990.c
@@ -111,21 +111,21 @@ static const u16 wm8990_reg[] = {
111 111
112#define wm8990_reset(c) snd_soc_write(c, WM8990_RESET, 0) 112#define wm8990_reset(c) snd_soc_write(c, WM8990_RESET, 0)
113 113
114static const DECLARE_TLV_DB_LINEAR(rec_mix_tlv, -1500, 600); 114static const DECLARE_TLV_DB_SCALE(rec_mix_tlv, -1500, 600, 0);
115 115
116static const DECLARE_TLV_DB_LINEAR(in_pga_tlv, -1650, 3000); 116static const DECLARE_TLV_DB_SCALE(in_pga_tlv, -1650, 3000, 0);
117 117
118static const DECLARE_TLV_DB_LINEAR(out_mix_tlv, 0, -2100); 118static const DECLARE_TLV_DB_SCALE(out_mix_tlv, 0, -2100, 0);
119 119
120static const DECLARE_TLV_DB_LINEAR(out_pga_tlv, -7300, 600); 120static const DECLARE_TLV_DB_SCALE(out_pga_tlv, -7300, 600, 0);
121 121
122static const DECLARE_TLV_DB_LINEAR(out_omix_tlv, -600, 0); 122static const DECLARE_TLV_DB_SCALE(out_omix_tlv, -600, 0, 0);
123 123
124static const DECLARE_TLV_DB_LINEAR(out_dac_tlv, -7163, 0); 124static const DECLARE_TLV_DB_SCALE(out_dac_tlv, -7163, 0, 0);
125 125
126static const DECLARE_TLV_DB_LINEAR(in_adc_tlv, -7163, 1763); 126static const DECLARE_TLV_DB_SCALE(in_adc_tlv, -7163, 1763, 0);
127 127
128static const DECLARE_TLV_DB_LINEAR(out_sidetone_tlv, -3600, 0); 128static const DECLARE_TLV_DB_SCALE(out_sidetone_tlv, -3600, 0, 0);
129 129
130static int wm899x_outpga_put_volsw_vu(struct snd_kcontrol *kcontrol, 130static int wm899x_outpga_put_volsw_vu(struct snd_kcontrol *kcontrol,
131 struct snd_ctl_elem_value *ucontrol) 131 struct snd_ctl_elem_value *ucontrol)
@@ -451,7 +451,7 @@ static int outmixer_event(struct snd_soc_dapm_widget *w,
451/* INMIX dB values */ 451/* INMIX dB values */
452static const unsigned int in_mix_tlv[] = { 452static const unsigned int in_mix_tlv[] = {
453 TLV_DB_RANGE_HEAD(1), 453 TLV_DB_RANGE_HEAD(1),
454 0, 7, TLV_DB_LINEAR_ITEM(-1200, 600), 454 0, 7, TLV_DB_SCALE_ITEM(-1200, 600, 0),
455}; 455};
456 456
457/* Left In PGA Connections */ 457/* Left In PGA Connections */
diff --git a/sound/soc/imx/imx-pcm-dma-mx2.c b/sound/soc/imx/imx-pcm-dma-mx2.c
index 2b31ac673ea4..05f19c9284f4 100644
--- a/sound/soc/imx/imx-pcm-dma-mx2.c
+++ b/sound/soc/imx/imx-pcm-dma-mx2.c
@@ -73,7 +73,8 @@ static void snd_imx_dma_err_callback(int channel, void *data, int err)
73{ 73{
74 struct snd_pcm_substream *substream = data; 74 struct snd_pcm_substream *substream = data;
75 struct snd_soc_pcm_runtime *rtd = substream->private_data; 75 struct snd_soc_pcm_runtime *rtd = substream->private_data;
76 struct imx_pcm_dma_params *dma_params = rtd->dai->cpu_dai->dma_data; 76 struct imx_pcm_dma_params *dma_params =
77 snd_soc_dai_get_dma_data(rtd->dai->cpu_dai, substream);
77 struct snd_pcm_runtime *runtime = substream->runtime; 78 struct snd_pcm_runtime *runtime = substream->runtime;
78 struct imx_pcm_runtime_data *iprtd = runtime->private_data; 79 struct imx_pcm_runtime_data *iprtd = runtime->private_data;
79 int ret; 80 int ret;
@@ -102,7 +103,7 @@ static int imx_ssi_dma_alloc(struct snd_pcm_substream *substream)
102 struct imx_pcm_runtime_data *iprtd = runtime->private_data; 103 struct imx_pcm_runtime_data *iprtd = runtime->private_data;
103 int ret; 104 int ret;
104 105
105 dma_params = snd_soc_get_dma_data(rtd->dai->cpu_dai, substream); 106 dma_params = snd_soc_dai_get_dma_data(rtd->dai->cpu_dai, substream);
106 107
107 iprtd->dma = imx_dma_request_by_prio(DRV_NAME, DMA_PRIO_HIGH); 108 iprtd->dma = imx_dma_request_by_prio(DRV_NAME, DMA_PRIO_HIGH);
108 if (iprtd->dma < 0) { 109 if (iprtd->dma < 0) {
@@ -212,7 +213,7 @@ static int snd_imx_pcm_prepare(struct snd_pcm_substream *substream)
212 struct imx_pcm_runtime_data *iprtd = runtime->private_data; 213 struct imx_pcm_runtime_data *iprtd = runtime->private_data;
213 int err; 214 int err;
214 215
215 dma_params = snd_soc_get_dma_data(rtd->dai->cpu_dai, substream); 216 dma_params = snd_soc_dai_get_dma_data(rtd->dai->cpu_dai, substream);
216 217
217 iprtd->substream = substream; 218 iprtd->substream = substream;
218 iprtd->buf = (unsigned int *)substream->dma_buffer.area; 219 iprtd->buf = (unsigned int *)substream->dma_buffer.area;
diff --git a/sound/soc/sh/siu_dai.c b/sound/soc/sh/siu_dai.c
index d86ee1bfc03a..eeed5edd722b 100644
--- a/sound/soc/sh/siu_dai.c
+++ b/sound/soc/sh/siu_dai.c
@@ -588,6 +588,8 @@ static int siu_dai_prepare(struct snd_pcm_substream *substream,
588 ret = siu_dai_spbstart(port_info); 588 ret = siu_dai_spbstart(port_info);
589 if (ret < 0) 589 if (ret < 0)
590 goto fail; 590 goto fail;
591 } else {
592 ret = 0;
591 } 593 }
592 594
593 port_info->play_cap |= self; 595 port_info->play_cap |= self;
diff --git a/sound/usb/caiaq/control.c b/sound/usb/caiaq/control.c
index 36ed703a7416..91c804cd2782 100644
--- a/sound/usb/caiaq/control.c
+++ b/sound/usb/caiaq/control.c
@@ -42,21 +42,12 @@ static int control_info(struct snd_kcontrol *kcontrol,
42 42
43 switch (dev->chip.usb_id) { 43 switch (dev->chip.usb_id) {
44 case USB_ID(USB_VID_NATIVEINSTRUMENTS, USB_PID_AUDIO8DJ): 44 case USB_ID(USB_VID_NATIVEINSTRUMENTS, USB_PID_AUDIO8DJ):
45 if (pos == 0) {
46 /* current input mode of A8DJ */
47 uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
48 uinfo->value.integer.min = 0;
49 uinfo->value.integer.max = 2;
50 return 0;
51 }
52 break;
53
54 case USB_ID(USB_VID_NATIVEINSTRUMENTS, USB_PID_AUDIO4DJ): 45 case USB_ID(USB_VID_NATIVEINSTRUMENTS, USB_PID_AUDIO4DJ):
55 if (pos == 0) { 46 if (pos == 0) {
56 /* current input mode of A4DJ */ 47 /* current input mode of A8DJ and A4DJ */
57 uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER; 48 uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
58 uinfo->value.integer.min = 0; 49 uinfo->value.integer.min = 0;
59 uinfo->value.integer.max = 1; 50 uinfo->value.integer.max = 2;
60 return 0; 51 return 0;
61 } 52 }
62 break; 53 break;
@@ -86,14 +77,6 @@ static int control_get(struct snd_kcontrol *kcontrol,
86 struct snd_usb_caiaqdev *dev = caiaqdev(chip->card); 77 struct snd_usb_caiaqdev *dev = caiaqdev(chip->card);
87 int pos = kcontrol->private_value; 78 int pos = kcontrol->private_value;
88 79
89 if (dev->chip.usb_id ==
90 USB_ID(USB_VID_NATIVEINSTRUMENTS, USB_PID_AUDIO4DJ)) {
91 /* A4DJ has only one control */
92 /* do not expose hardware input mode 0 */
93 ucontrol->value.integer.value[0] = dev->control_state[0] - 1;
94 return 0;
95 }
96
97 if (pos & CNT_INTVAL) 80 if (pos & CNT_INTVAL)
98 ucontrol->value.integer.value[0] 81 ucontrol->value.integer.value[0]
99 = dev->control_state[pos & ~CNT_INTVAL]; 82 = dev->control_state[pos & ~CNT_INTVAL];
@@ -112,20 +95,9 @@ static int control_put(struct snd_kcontrol *kcontrol,
112 int pos = kcontrol->private_value; 95 int pos = kcontrol->private_value;
113 unsigned char cmd = EP1_CMD_WRITE_IO; 96 unsigned char cmd = EP1_CMD_WRITE_IO;
114 97
115 switch (dev->chip.usb_id) { 98 if (dev->chip.usb_id ==
116 case USB_ID(USB_VID_NATIVEINSTRUMENTS, USB_PID_AUDIO4DJ): { 99 USB_ID(USB_VID_NATIVEINSTRUMENTS, USB_PID_TRAKTORKONTROLX1))
117 /* A4DJ has only one control */
118 /* do not expose hardware input mode 0 */
119 dev->control_state[0] = ucontrol->value.integer.value[0] + 1;
120 snd_usb_caiaq_send_command(dev, EP1_CMD_WRITE_IO,
121 dev->control_state, sizeof(dev->control_state));
122 return 1;
123 }
124
125 case USB_ID(USB_VID_NATIVEINSTRUMENTS, USB_PID_TRAKTORKONTROLX1):
126 cmd = EP1_CMD_DIMM_LEDS; 100 cmd = EP1_CMD_DIMM_LEDS;
127 break;
128 }
129 101
130 if (pos & CNT_INTVAL) { 102 if (pos & CNT_INTVAL) {
131 dev->control_state[pos & ~CNT_INTVAL] 103 dev->control_state[pos & ~CNT_INTVAL]
diff --git a/sound/usb/caiaq/device.c b/sound/usb/caiaq/device.c
index 805271827675..cdfb856bddd2 100644
--- a/sound/usb/caiaq/device.c
+++ b/sound/usb/caiaq/device.c
@@ -36,7 +36,7 @@
36#include "input.h" 36#include "input.h"
37 37
38MODULE_AUTHOR("Daniel Mack <daniel@caiaq.de>"); 38MODULE_AUTHOR("Daniel Mack <daniel@caiaq.de>");
39MODULE_DESCRIPTION("caiaq USB audio, version 1.3.20"); 39MODULE_DESCRIPTION("caiaq USB audio, version 1.3.21");
40MODULE_LICENSE("GPL"); 40MODULE_LICENSE("GPL");
41MODULE_SUPPORTED_DEVICE("{{Native Instruments, RigKontrol2}," 41MODULE_SUPPORTED_DEVICE("{{Native Instruments, RigKontrol2},"
42 "{Native Instruments, RigKontrol3}," 42 "{Native Instruments, RigKontrol3},"
@@ -320,12 +320,6 @@ static void __devinit setup_card(struct snd_usb_caiaqdev *dev)
320 } 320 }
321 321
322 break; 322 break;
323 case USB_ID(USB_VID_NATIVEINSTRUMENTS, USB_PID_AUDIO4DJ):
324 /* Audio 4 DJ - default input mode to phono */
325 dev->control_state[0] = 2;
326 snd_usb_caiaq_send_command(dev, EP1_CMD_WRITE_IO,
327 dev->control_state, 1);
328 break;
329 } 323 }
330 324
331 if (dev->spec.num_analog_audio_out + 325 if (dev->spec.num_analog_audio_out +
diff --git a/sound/usb/caiaq/input.c b/sound/usb/caiaq/input.c
index 8bbfbfd4c658..dcb620796d9e 100644
--- a/sound/usb/caiaq/input.c
+++ b/sound/usb/caiaq/input.c
@@ -171,7 +171,7 @@ static void snd_caiaq_input_read_analog(struct snd_usb_caiaqdev *dev,
171 input_report_abs(input_dev, ABS_HAT0Y, (buf[4] << 8) | buf[5]); 171 input_report_abs(input_dev, ABS_HAT0Y, (buf[4] << 8) | buf[5]);
172 input_report_abs(input_dev, ABS_HAT1X, (buf[12] << 8) | buf[13]); 172 input_report_abs(input_dev, ABS_HAT1X, (buf[12] << 8) | buf[13]);
173 input_report_abs(input_dev, ABS_HAT1Y, (buf[2] << 8) | buf[3]); 173 input_report_abs(input_dev, ABS_HAT1Y, (buf[2] << 8) | buf[3]);
174 input_report_abs(input_dev, ABS_HAT2X, (buf[15] << 8) | buf[15]); 174 input_report_abs(input_dev, ABS_HAT2X, (buf[14] << 8) | buf[15]);
175 input_report_abs(input_dev, ABS_HAT2Y, (buf[0] << 8) | buf[1]); 175 input_report_abs(input_dev, ABS_HAT2Y, (buf[0] << 8) | buf[1]);
176 input_report_abs(input_dev, ABS_HAT3X, (buf[10] << 8) | buf[11]); 176 input_report_abs(input_dev, ABS_HAT3X, (buf[10] << 8) | buf[11]);
177 input_report_abs(input_dev, ABS_HAT3Y, (buf[6] << 8) | buf[7]); 177 input_report_abs(input_dev, ABS_HAT3Y, (buf[6] << 8) | buf[7]);
diff --git a/sound/usb/endpoint.c b/sound/usb/endpoint.c
index ef07a6d0dd5f..28ee1ce3971a 100644
--- a/sound/usb/endpoint.c
+++ b/sound/usb/endpoint.c
@@ -149,6 +149,47 @@ int snd_usb_add_audio_endpoint(struct snd_usb_audio *chip, int stream, struct au
149 return 0; 149 return 0;
150} 150}
151 151
152static int parse_uac_endpoint_attributes(struct snd_usb_audio *chip,
153 struct usb_host_interface *alts,
154 int protocol, int iface_no)
155{
156 /* parsed with a v1 header here. that's ok as we only look at the
157 * header first which is the same for both versions */
158 struct uac_iso_endpoint_descriptor *csep;
159 struct usb_interface_descriptor *altsd = get_iface_desc(alts);
160 int attributes = 0;
161
162 csep = snd_usb_find_desc(alts->endpoint[0].extra, alts->endpoint[0].extralen, NULL, USB_DT_CS_ENDPOINT);
163
164 /* Creamware Noah has this descriptor after the 2nd endpoint */
165 if (!csep && altsd->bNumEndpoints >= 2)
166 csep = snd_usb_find_desc(alts->endpoint[1].extra, alts->endpoint[1].extralen, NULL, USB_DT_CS_ENDPOINT);
167
168 if (!csep || csep->bLength < 7 ||
169 csep->bDescriptorSubtype != UAC_EP_GENERAL) {
170 snd_printk(KERN_WARNING "%d:%u:%d : no or invalid"
171 " class specific endpoint descriptor\n",
172 chip->dev->devnum, iface_no,
173 altsd->bAlternateSetting);
174 return 0;
175 }
176
177 if (protocol == UAC_VERSION_1) {
178 attributes = csep->bmAttributes;
179 } else {
180 struct uac2_iso_endpoint_descriptor *csep2 =
181 (struct uac2_iso_endpoint_descriptor *) csep;
182
183 attributes = csep->bmAttributes & UAC_EP_CS_ATTR_FILL_MAX;
184
185 /* emulate the endpoint attributes of a v1 device */
186 if (csep2->bmControls & UAC2_CONTROL_PITCH)
187 attributes |= UAC_EP_CS_ATTR_PITCH_CONTROL;
188 }
189
190 return attributes;
191}
192
152int snd_usb_parse_audio_endpoints(struct snd_usb_audio *chip, int iface_no) 193int snd_usb_parse_audio_endpoints(struct snd_usb_audio *chip, int iface_no)
153{ 194{
154 struct usb_device *dev; 195 struct usb_device *dev;
@@ -158,8 +199,8 @@ int snd_usb_parse_audio_endpoints(struct snd_usb_audio *chip, int iface_no)
158 int i, altno, err, stream; 199 int i, altno, err, stream;
159 int format = 0, num_channels = 0; 200 int format = 0, num_channels = 0;
160 struct audioformat *fp = NULL; 201 struct audioformat *fp = NULL;
161 unsigned char *fmt, *csep;
162 int num, protocol; 202 int num, protocol;
203 struct uac_format_type_i_continuous_descriptor *fmt;
163 204
164 dev = chip->dev; 205 dev = chip->dev;
165 206
@@ -256,8 +297,8 @@ int snd_usb_parse_audio_endpoints(struct snd_usb_audio *chip, int iface_no)
256 dev->devnum, iface_no, altno); 297 dev->devnum, iface_no, altno);
257 continue; 298 continue;
258 } 299 }
259 if (((protocol == UAC_VERSION_1) && (fmt[0] < 8)) || 300 if (((protocol == UAC_VERSION_1) && (fmt->bLength < 8)) ||
260 ((protocol == UAC_VERSION_2) && (fmt[0] != 6))) { 301 ((protocol == UAC_VERSION_2) && (fmt->bLength != 6))) {
261 snd_printk(KERN_ERR "%d:%u:%d : invalid UAC_FORMAT_TYPE desc\n", 302 snd_printk(KERN_ERR "%d:%u:%d : invalid UAC_FORMAT_TYPE desc\n",
262 dev->devnum, iface_no, altno); 303 dev->devnum, iface_no, altno);
263 continue; 304 continue;
@@ -268,7 +309,9 @@ int snd_usb_parse_audio_endpoints(struct snd_usb_audio *chip, int iface_no)
268 * with the previous one, except for a larger packet size, but 309 * with the previous one, except for a larger packet size, but
269 * is actually a mislabeled two-channel setting; ignore it. 310 * is actually a mislabeled two-channel setting; ignore it.
270 */ 311 */
271 if (fmt[4] == 1 && fmt[5] == 2 && altno == 2 && num == 3 && 312 if (fmt->bNrChannels == 1 &&
313 fmt->bSubframeSize == 2 &&
314 altno == 2 && num == 3 &&
272 fp && fp->altsetting == 1 && fp->channels == 1 && 315 fp && fp->altsetting == 1 && fp->channels == 1 &&
273 fp->formats == SNDRV_PCM_FMTBIT_S16_LE && 316 fp->formats == SNDRV_PCM_FMTBIT_S16_LE &&
274 protocol == UAC_VERSION_1 && 317 protocol == UAC_VERSION_1 &&
@@ -276,17 +319,6 @@ int snd_usb_parse_audio_endpoints(struct snd_usb_audio *chip, int iface_no)
276 fp->maxpacksize * 2) 319 fp->maxpacksize * 2)
277 continue; 320 continue;
278 321
279 csep = snd_usb_find_desc(alts->endpoint[0].extra, alts->endpoint[0].extralen, NULL, USB_DT_CS_ENDPOINT);
280 /* Creamware Noah has this descriptor after the 2nd endpoint */
281 if (!csep && altsd->bNumEndpoints >= 2)
282 csep = snd_usb_find_desc(alts->endpoint[1].extra, alts->endpoint[1].extralen, NULL, USB_DT_CS_ENDPOINT);
283 if (!csep || csep[0] < 7 || csep[2] != UAC_EP_GENERAL) {
284 snd_printk(KERN_WARNING "%d:%u:%d : no or invalid"
285 " class specific endpoint descriptor\n",
286 dev->devnum, iface_no, altno);
287 csep = NULL;
288 }
289
290 fp = kzalloc(sizeof(*fp), GFP_KERNEL); 322 fp = kzalloc(sizeof(*fp), GFP_KERNEL);
291 if (! fp) { 323 if (! fp) {
292 snd_printk(KERN_ERR "cannot malloc\n"); 324 snd_printk(KERN_ERR "cannot malloc\n");
@@ -305,7 +337,7 @@ int snd_usb_parse_audio_endpoints(struct snd_usb_audio *chip, int iface_no)
305 if (snd_usb_get_speed(dev) == USB_SPEED_HIGH) 337 if (snd_usb_get_speed(dev) == USB_SPEED_HIGH)
306 fp->maxpacksize = (((fp->maxpacksize >> 11) & 3) + 1) 338 fp->maxpacksize = (((fp->maxpacksize >> 11) & 3) + 1)
307 * (fp->maxpacksize & 0x7ff); 339 * (fp->maxpacksize & 0x7ff);
308 fp->attributes = csep ? csep[3] : 0; 340 fp->attributes = parse_uac_endpoint_attributes(chip, alts, protocol, iface_no);
309 341
310 /* some quirks for attributes here */ 342 /* some quirks for attributes here */
311 343
diff --git a/sound/usb/format.c b/sound/usb/format.c
index b87cf87c4e7b..fe29d61de19b 100644
--- a/sound/usb/format.c
+++ b/sound/usb/format.c
@@ -278,12 +278,11 @@ err:
278 * parse the format type I and III descriptors 278 * parse the format type I and III descriptors
279 */ 279 */
280static int parse_audio_format_i(struct snd_usb_audio *chip, 280static int parse_audio_format_i(struct snd_usb_audio *chip,
281 struct audioformat *fp, 281 struct audioformat *fp, int format,
282 int format, void *_fmt, 282 struct uac_format_type_i_continuous_descriptor *fmt,
283 struct usb_host_interface *iface) 283 struct usb_host_interface *iface)
284{ 284{
285 struct usb_interface_descriptor *altsd = get_iface_desc(iface); 285 struct usb_interface_descriptor *altsd = get_iface_desc(iface);
286 struct uac_format_type_i_discrete_descriptor *fmt = _fmt;
287 int protocol = altsd->bInterfaceProtocol; 286 int protocol = altsd->bInterfaceProtocol;
288 int pcm_format, ret; 287 int pcm_format, ret;
289 288
@@ -320,7 +319,7 @@ static int parse_audio_format_i(struct snd_usb_audio *chip,
320 switch (protocol) { 319 switch (protocol) {
321 case UAC_VERSION_1: 320 case UAC_VERSION_1:
322 fp->channels = fmt->bNrChannels; 321 fp->channels = fmt->bNrChannels;
323 ret = parse_audio_format_rates_v1(chip, fp, _fmt, 7); 322 ret = parse_audio_format_rates_v1(chip, fp, (unsigned char *) fmt, 7);
324 break; 323 break;
325 case UAC_VERSION_2: 324 case UAC_VERSION_2:
326 /* fp->channels is already set in this case */ 325 /* fp->channels is already set in this case */
@@ -392,12 +391,12 @@ static int parse_audio_format_ii(struct snd_usb_audio *chip,
392} 391}
393 392
394int snd_usb_parse_audio_format(struct snd_usb_audio *chip, struct audioformat *fp, 393int snd_usb_parse_audio_format(struct snd_usb_audio *chip, struct audioformat *fp,
395 int format, unsigned char *fmt, int stream, 394 int format, struct uac_format_type_i_continuous_descriptor *fmt,
396 struct usb_host_interface *iface) 395 int stream, struct usb_host_interface *iface)
397{ 396{
398 int err; 397 int err;
399 398
400 switch (fmt[3]) { 399 switch (fmt->bFormatType) {
401 case UAC_FORMAT_TYPE_I: 400 case UAC_FORMAT_TYPE_I:
402 case UAC_FORMAT_TYPE_III: 401 case UAC_FORMAT_TYPE_III:
403 err = parse_audio_format_i(chip, fp, format, fmt, iface); 402 err = parse_audio_format_i(chip, fp, format, fmt, iface);
@@ -407,10 +406,11 @@ int snd_usb_parse_audio_format(struct snd_usb_audio *chip, struct audioformat *f
407 break; 406 break;
408 default: 407 default:
409 snd_printd(KERN_INFO "%d:%u:%d : format type %d is not supported yet\n", 408 snd_printd(KERN_INFO "%d:%u:%d : format type %d is not supported yet\n",
410 chip->dev->devnum, fp->iface, fp->altsetting, fmt[3]); 409 chip->dev->devnum, fp->iface, fp->altsetting,
411 return -1; 410 fmt->bFormatType);
411 return -ENOTSUPP;
412 } 412 }
413 fp->fmt_type = fmt[3]; 413 fp->fmt_type = fmt->bFormatType;
414 if (err < 0) 414 if (err < 0)
415 return err; 415 return err;
416#if 1 416#if 1
@@ -421,10 +421,10 @@ int snd_usb_parse_audio_format(struct snd_usb_audio *chip, struct audioformat *f
421 if (chip->usb_id == USB_ID(0x041e, 0x3000) || 421 if (chip->usb_id == USB_ID(0x041e, 0x3000) ||
422 chip->usb_id == USB_ID(0x041e, 0x3020) || 422 chip->usb_id == USB_ID(0x041e, 0x3020) ||
423 chip->usb_id == USB_ID(0x041e, 0x3061)) { 423 chip->usb_id == USB_ID(0x041e, 0x3061)) {
424 if (fmt[3] == UAC_FORMAT_TYPE_I && 424 if (fmt->bFormatType == UAC_FORMAT_TYPE_I &&
425 fp->rates != SNDRV_PCM_RATE_48000 && 425 fp->rates != SNDRV_PCM_RATE_48000 &&
426 fp->rates != SNDRV_PCM_RATE_96000) 426 fp->rates != SNDRV_PCM_RATE_96000)
427 return -1; 427 return -ENOTSUPP;
428 } 428 }
429#endif 429#endif
430 return 0; 430 return 0;
diff --git a/sound/usb/format.h b/sound/usb/format.h
index 8298c4e8ddfa..387924f0af85 100644
--- a/sound/usb/format.h
+++ b/sound/usb/format.h
@@ -1,8 +1,9 @@
1#ifndef __USBAUDIO_FORMAT_H 1#ifndef __USBAUDIO_FORMAT_H
2#define __USBAUDIO_FORMAT_H 2#define __USBAUDIO_FORMAT_H
3 3
4int snd_usb_parse_audio_format(struct snd_usb_audio *chip, struct audioformat *fp, 4int snd_usb_parse_audio_format(struct snd_usb_audio *chip,
5 int format, unsigned char *fmt, int stream, 5 struct audioformat *fp, int format,
6 struct usb_host_interface *iface); 6 struct uac_format_type_i_continuous_descriptor *fmt,
7 int stream, struct usb_host_interface *iface);
7 8
8#endif /* __USBAUDIO_FORMAT_H */ 9#endif /* __USBAUDIO_FORMAT_H */
diff --git a/sound/usb/midi.c b/sound/usb/midi.c
index 8b1e4b124a9f..46785643c66d 100644
--- a/sound/usb/midi.c
+++ b/sound/usb/midi.c
@@ -645,6 +645,105 @@ static struct usb_protocol_ops snd_usbmidi_cme_ops = {
645}; 645};
646 646
647/* 647/*
648 * AKAI MPD16 protocol:
649 *
650 * For control port (endpoint 1):
651 * ==============================
652 * One or more chunks consisting of first byte of (0x10 | msg_len) and then a
653 * SysEx message (msg_len=9 bytes long).
654 *
655 * For data port (endpoint 2):
656 * ===========================
657 * One or more chunks consisting of first byte of (0x20 | msg_len) and then a
658 * MIDI message (msg_len bytes long)
659 *
660 * Messages sent: Active Sense, Note On, Poly Pressure, Control Change.
661 */
662static void snd_usbmidi_akai_input(struct snd_usb_midi_in_endpoint *ep,
663 uint8_t *buffer, int buffer_length)
664{
665 unsigned int pos = 0;
666 unsigned int len = (unsigned int)buffer_length;
667 while (pos < len) {
668 unsigned int port = (buffer[pos] >> 4) - 1;
669 unsigned int msg_len = buffer[pos] & 0x0f;
670 pos++;
671 if (pos + msg_len <= len && port < 2)
672 snd_usbmidi_input_data(ep, 0, &buffer[pos], msg_len);
673 pos += msg_len;
674 }
675}
676
677#define MAX_AKAI_SYSEX_LEN 9
678
679static void snd_usbmidi_akai_output(struct snd_usb_midi_out_endpoint *ep,
680 struct urb *urb)
681{
682 uint8_t *msg;
683 int pos, end, count, buf_end;
684 uint8_t tmp[MAX_AKAI_SYSEX_LEN];
685 struct snd_rawmidi_substream *substream = ep->ports[0].substream;
686
687 if (!ep->ports[0].active)
688 return;
689
690 msg = urb->transfer_buffer + urb->transfer_buffer_length;
691 buf_end = ep->max_transfer - MAX_AKAI_SYSEX_LEN - 1;
692
693 /* only try adding more data when there's space for at least 1 SysEx */
694 while (urb->transfer_buffer_length < buf_end) {
695 count = snd_rawmidi_transmit_peek(substream,
696 tmp, MAX_AKAI_SYSEX_LEN);
697 if (!count) {
698 ep->ports[0].active = 0;
699 return;
700 }
701 /* try to skip non-SysEx data */
702 for (pos = 0; pos < count && tmp[pos] != 0xF0; pos++)
703 ;
704
705 if (pos > 0) {
706 snd_rawmidi_transmit_ack(substream, pos);
707 continue;
708 }
709
710 /* look for the start or end marker */
711 for (end = 1; end < count && tmp[end] < 0xF0; end++)
712 ;
713
714 /* next SysEx started before the end of current one */
715 if (end < count && tmp[end] == 0xF0) {
716 /* it's incomplete - drop it */
717 snd_rawmidi_transmit_ack(substream, end);
718 continue;
719 }
720 /* SysEx complete */
721 if (end < count && tmp[end] == 0xF7) {
722 /* queue it, ack it, and get the next one */
723 count = end + 1;
724 msg[0] = 0x10 | count;
725 memcpy(&msg[1], tmp, count);
726 snd_rawmidi_transmit_ack(substream, count);
727 urb->transfer_buffer_length += count + 1;
728 msg += count + 1;
729 continue;
730 }
731 /* less than 9 bytes and no end byte - wait for more */
732 if (count < MAX_AKAI_SYSEX_LEN) {
733 ep->ports[0].active = 0;
734 return;
735 }
736 /* 9 bytes and no end marker in sight - malformed, skip it */
737 snd_rawmidi_transmit_ack(substream, count);
738 }
739}
740
741static struct usb_protocol_ops snd_usbmidi_akai_ops = {
742 .input = snd_usbmidi_akai_input,
743 .output = snd_usbmidi_akai_output,
744};
745
746/*
648 * Novation USB MIDI protocol: number of data bytes is in the first byte 747 * Novation USB MIDI protocol: number of data bytes is in the first byte
649 * (when receiving) (+1!) or in the second byte (when sending); data begins 748 * (when receiving) (+1!) or in the second byte (when sending); data begins
650 * at the third byte. 749 * at the third byte.
@@ -1434,6 +1533,11 @@ static struct port_info {
1434 EXTERNAL_PORT(0x086a, 0x0001, 8, "%s Broadcast"), 1533 EXTERNAL_PORT(0x086a, 0x0001, 8, "%s Broadcast"),
1435 EXTERNAL_PORT(0x086a, 0x0002, 8, "%s Broadcast"), 1534 EXTERNAL_PORT(0x086a, 0x0002, 8, "%s Broadcast"),
1436 EXTERNAL_PORT(0x086a, 0x0003, 4, "%s Broadcast"), 1535 EXTERNAL_PORT(0x086a, 0x0003, 4, "%s Broadcast"),
1536 /* Akai MPD16 */
1537 CONTROL_PORT(0x09e8, 0x0062, 0, "%s Control"),
1538 PORT_INFO(0x09e8, 0x0062, 1, "%s MIDI", 0,
1539 SNDRV_SEQ_PORT_TYPE_MIDI_GENERIC |
1540 SNDRV_SEQ_PORT_TYPE_HARDWARE),
1437 /* Access Music Virus TI */ 1541 /* Access Music Virus TI */
1438 EXTERNAL_PORT(0x133e, 0x0815, 0, "%s MIDI"), 1542 EXTERNAL_PORT(0x133e, 0x0815, 0, "%s MIDI"),
1439 PORT_INFO(0x133e, 0x0815, 1, "%s Synth", 0, 1543 PORT_INFO(0x133e, 0x0815, 1, "%s Synth", 0,
@@ -2035,6 +2139,12 @@ int snd_usbmidi_create(struct snd_card *card,
2035 umidi->usb_protocol_ops = &snd_usbmidi_cme_ops; 2139 umidi->usb_protocol_ops = &snd_usbmidi_cme_ops;
2036 err = snd_usbmidi_detect_per_port_endpoints(umidi, endpoints); 2140 err = snd_usbmidi_detect_per_port_endpoints(umidi, endpoints);
2037 break; 2141 break;
2142 case QUIRK_MIDI_AKAI:
2143 umidi->usb_protocol_ops = &snd_usbmidi_akai_ops;
2144 err = snd_usbmidi_detect_per_port_endpoints(umidi, endpoints);
2145 /* endpoint 1 is input-only */
2146 endpoints[1].out_cables = 0;
2147 break;
2038 default: 2148 default:
2039 snd_printd(KERN_ERR "invalid quirk type %d\n", quirk->type); 2149 snd_printd(KERN_ERR "invalid quirk type %d\n", quirk->type);
2040 err = -ENXIO; 2150 err = -ENXIO;
diff --git a/sound/usb/midi.h b/sound/usb/midi.h
index 2089ec987c66..2fca80b744c0 100644
--- a/sound/usb/midi.h
+++ b/sound/usb/midi.h
@@ -37,6 +37,8 @@ struct snd_usb_midi_endpoint_info {
37 37
38/* for QUIRK_MIDI_CME, data is NULL */ 38/* for QUIRK_MIDI_CME, data is NULL */
39 39
40/* for QUIRK_MIDI_AKAI, data is NULL */
41
40int snd_usbmidi_create(struct snd_card *card, 42int snd_usbmidi_create(struct snd_card *card,
41 struct usb_interface *iface, 43 struct usb_interface *iface,
42 struct list_head *midi_list, 44 struct list_head *midi_list,
diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
index 97dd17655104..03ce971e0027 100644
--- a/sound/usb/mixer.c
+++ b/sound/usb/mixer.c
@@ -1126,7 +1126,7 @@ static int parse_audio_feature_unit(struct mixer_build *state, int unitid, void
1126 } else { 1126 } else {
1127 struct uac2_feature_unit_descriptor *ftr = _ftr; 1127 struct uac2_feature_unit_descriptor *ftr = _ftr;
1128 csize = 4; 1128 csize = 4;
1129 channels = (hdr->bLength - 6) / 4; 1129 channels = (hdr->bLength - 6) / 4 - 1;
1130 bmaControls = ftr->bmaControls; 1130 bmaControls = ftr->bmaControls;
1131 } 1131 }
1132 1132
diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c
index 2bf0d77d1768..056587de7be4 100644
--- a/sound/usb/pcm.c
+++ b/sound/usb/pcm.c
@@ -120,10 +120,6 @@ static int init_pitch_v1(struct snd_usb_audio *chip, int iface,
120 120
121 ep = get_endpoint(alts, 0)->bEndpointAddress; 121 ep = get_endpoint(alts, 0)->bEndpointAddress;
122 122
123 /* if endpoint doesn't have pitch control, bail out */
124 if (!(fmt->attributes & UAC_EP_CS_ATTR_PITCH_CONTROL))
125 return 0;
126
127 data[0] = 1; 123 data[0] = 1;
128 if ((err = snd_usb_ctl_msg(dev, usb_sndctrlpipe(dev, 0), UAC_SET_CUR, 124 if ((err = snd_usb_ctl_msg(dev, usb_sndctrlpipe(dev, 0), UAC_SET_CUR,
129 USB_TYPE_CLASS|USB_RECIP_ENDPOINT|USB_DIR_OUT, 125 USB_TYPE_CLASS|USB_RECIP_ENDPOINT|USB_DIR_OUT,
@@ -137,8 +133,32 @@ static int init_pitch_v1(struct snd_usb_audio *chip, int iface,
137 return 0; 133 return 0;
138} 134}
139 135
136static int init_pitch_v2(struct snd_usb_audio *chip, int iface,
137 struct usb_host_interface *alts,
138 struct audioformat *fmt)
139{
140 struct usb_device *dev = chip->dev;
141 unsigned char data[1];
142 unsigned int ep;
143 int err;
144
145 ep = get_endpoint(alts, 0)->bEndpointAddress;
146
147 data[0] = 1;
148 if ((err = snd_usb_ctl_msg(dev, usb_sndctrlpipe(dev, 0), UAC2_CS_CUR,
149 USB_TYPE_CLASS | USB_RECIP_ENDPOINT | USB_DIR_OUT,
150 UAC2_EP_CS_PITCH << 8, 0,
151 data, sizeof(data), 1000)) < 0) {
152 snd_printk(KERN_ERR "%d:%d:%d: cannot set enable PITCH (v2)\n",
153 dev->devnum, iface, fmt->altsetting);
154 return err;
155 }
156
157 return 0;
158}
159
140/* 160/*
141 * initialize the picth control and sample rate 161 * initialize the pitch control and sample rate
142 */ 162 */
143int snd_usb_init_pitch(struct snd_usb_audio *chip, int iface, 163int snd_usb_init_pitch(struct snd_usb_audio *chip, int iface,
144 struct usb_host_interface *alts, 164 struct usb_host_interface *alts,
@@ -146,13 +166,16 @@ int snd_usb_init_pitch(struct snd_usb_audio *chip, int iface,
146{ 166{
147 struct usb_interface_descriptor *altsd = get_iface_desc(alts); 167 struct usb_interface_descriptor *altsd = get_iface_desc(alts);
148 168
169 /* if endpoint doesn't have pitch control, bail out */
170 if (!(fmt->attributes & UAC_EP_CS_ATTR_PITCH_CONTROL))
171 return 0;
172
149 switch (altsd->bInterfaceProtocol) { 173 switch (altsd->bInterfaceProtocol) {
150 case UAC_VERSION_1: 174 case UAC_VERSION_1:
151 return init_pitch_v1(chip, iface, alts, fmt); 175 return init_pitch_v1(chip, iface, alts, fmt);
152 176
153 case UAC_VERSION_2: 177 case UAC_VERSION_2:
154 /* not implemented yet */ 178 return init_pitch_v2(chip, iface, alts, fmt);
155 return 0;
156 } 179 }
157 180
158 return -EINVAL; 181 return -EINVAL;
diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h
index 91ddef31bcbd..f8797f61a24b 100644
--- a/sound/usb/quirks-table.h
+++ b/sound/usb/quirks-table.h
@@ -1973,6 +1973,17 @@ YAMAHA_DEVICE(0x7010, "UB99"),
1973 } 1973 }
1974}, 1974},
1975 1975
1976/* AKAI devices */
1977{
1978 USB_DEVICE(0x09e8, 0x0062),
1979 .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
1980 .vendor_name = "AKAI",
1981 .product_name = "MPD16",
1982 .ifnum = 0,
1983 .type = QUIRK_MIDI_AKAI,
1984 }
1985},
1986
1976/* TerraTec devices */ 1987/* TerraTec devices */
1977{ 1988{
1978 USB_DEVICE_VENDOR_SPEC(0x0ccd, 0x0012), 1989 USB_DEVICE_VENDOR_SPEC(0x0ccd, 0x0012),
diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
index 136e5b4cf6de..b45e54c09ba2 100644
--- a/sound/usb/quirks.c
+++ b/sound/usb/quirks.c
@@ -289,6 +289,7 @@ int snd_usb_create_quirk(struct snd_usb_audio *chip,
289 [QUIRK_MIDI_FASTLANE] = create_any_midi_quirk, 289 [QUIRK_MIDI_FASTLANE] = create_any_midi_quirk,
290 [QUIRK_MIDI_EMAGIC] = create_any_midi_quirk, 290 [QUIRK_MIDI_EMAGIC] = create_any_midi_quirk,
291 [QUIRK_MIDI_CME] = create_any_midi_quirk, 291 [QUIRK_MIDI_CME] = create_any_midi_quirk,
292 [QUIRK_MIDI_AKAI] = create_any_midi_quirk,
292 [QUIRK_AUDIO_STANDARD_INTERFACE] = create_standard_audio_quirk, 293 [QUIRK_AUDIO_STANDARD_INTERFACE] = create_standard_audio_quirk,
293 [QUIRK_AUDIO_FIXED_ENDPOINT] = create_fixed_stream_quirk, 294 [QUIRK_AUDIO_FIXED_ENDPOINT] = create_fixed_stream_quirk,
294 [QUIRK_AUDIO_EDIROL_UAXX] = create_uaxx_quirk, 295 [QUIRK_AUDIO_EDIROL_UAXX] = create_uaxx_quirk,
diff --git a/sound/usb/usbaudio.h b/sound/usb/usbaudio.h
index d679e72a3e5c..06ebf24d3a4d 100644
--- a/sound/usb/usbaudio.h
+++ b/sound/usb/usbaudio.h
@@ -74,6 +74,7 @@ enum quirk_type {
74 QUIRK_MIDI_FASTLANE, 74 QUIRK_MIDI_FASTLANE,
75 QUIRK_MIDI_EMAGIC, 75 QUIRK_MIDI_EMAGIC,
76 QUIRK_MIDI_CME, 76 QUIRK_MIDI_CME,
77 QUIRK_MIDI_AKAI,
77 QUIRK_MIDI_US122L, 78 QUIRK_MIDI_US122L,
78 QUIRK_AUDIO_STANDARD_INTERFACE, 79 QUIRK_AUDIO_STANDARD_INTERFACE,
79 QUIRK_AUDIO_FIXED_ENDPOINT, 80 QUIRK_AUDIO_FIXED_ENDPOINT,
diff --git a/tools/perf/Documentation/perf-stat.txt b/tools/perf/Documentation/perf-stat.txt
index 2cab8e8c33d0..909fa766fa1c 100644
--- a/tools/perf/Documentation/perf-stat.txt
+++ b/tools/perf/Documentation/perf-stat.txt
@@ -43,6 +43,9 @@ OPTIONS
43-c:: 43-c::
44 scale counter values 44 scale counter values
45 45
46-B::
47 print large numbers with thousands' separators according to locale
48
46EXAMPLES 49EXAMPLES
47-------- 50--------
48 51
diff --git a/tools/perf/builtin-annotate.c b/tools/perf/builtin-annotate.c
index 77bcc9b130f5..96db5248e995 100644
--- a/tools/perf/builtin-annotate.c
+++ b/tools/perf/builtin-annotate.c
@@ -277,7 +277,7 @@ static void hist_entry__print_hits(struct hist_entry *self)
277 printf("%*s: %Lu\n", BITS_PER_LONG / 2, "h->sum", h->sum); 277 printf("%*s: %Lu\n", BITS_PER_LONG / 2, "h->sum", h->sum);
278} 278}
279 279
280static void annotate_sym(struct hist_entry *he) 280static int hist_entry__tty_annotate(struct hist_entry *he)
281{ 281{
282 struct map *map = he->ms.map; 282 struct map *map = he->ms.map;
283 struct dso *dso = map->dso; 283 struct dso *dso = map->dso;
@@ -288,7 +288,7 @@ static void annotate_sym(struct hist_entry *he)
288 struct objdump_line *pos, *n; 288 struct objdump_line *pos, *n;
289 289
290 if (hist_entry__annotate(he, &head) < 0) 290 if (hist_entry__annotate(he, &head) < 0)
291 return; 291 return -1;
292 292
293 if (full_paths) 293 if (full_paths)
294 d_filename = filename; 294 d_filename = filename;
@@ -317,30 +317,59 @@ static void annotate_sym(struct hist_entry *he)
317 317
318 if (print_line) 318 if (print_line)
319 free_source_line(he, len); 319 free_source_line(he, len);
320
321 return 0;
320} 322}
321 323
322static void hists__find_annotations(struct hists *self) 324static void hists__find_annotations(struct hists *self)
323{ 325{
324 struct rb_node *nd; 326 struct rb_node *first = rb_first(&self->entries), *nd = first;
327 int key = KEY_RIGHT;
325 328
326 for (nd = rb_first(&self->entries); nd; nd = rb_next(nd)) { 329 while (nd) {
327 struct hist_entry *he = rb_entry(nd, struct hist_entry, rb_node); 330 struct hist_entry *he = rb_entry(nd, struct hist_entry, rb_node);
328 struct sym_priv *priv; 331 struct sym_priv *priv;
329 332
330 if (he->ms.sym == NULL) 333 if (he->ms.sym == NULL || he->ms.map->dso->annotate_warned)
331 continue; 334 goto find_next;
332 335
333 priv = symbol__priv(he->ms.sym); 336 priv = symbol__priv(he->ms.sym);
334 if (priv->hist == NULL) 337 if (priv->hist == NULL) {
338find_next:
339 if (key == KEY_LEFT)
340 nd = rb_prev(nd);
341 else
342 nd = rb_next(nd);
335 continue; 343 continue;
344 }
336 345
337 annotate_sym(he); 346 if (use_browser > 0) {
338 /* 347 key = hist_entry__tui_annotate(he);
339 * Since we have a hist_entry per IP for the same symbol, free 348 if (is_exit_key(key))
340 * he->ms.sym->hist to signal we already processed this symbol. 349 break;
341 */ 350 switch (key) {
342 free(priv->hist); 351 case KEY_RIGHT:
343 priv->hist = NULL; 352 case '\t':
353 nd = rb_next(nd);
354 break;
355 case KEY_LEFT:
356 if (nd == first)
357 continue;
358 nd = rb_prev(nd);
359 default:
360 break;
361 }
362 } else {
363 hist_entry__tty_annotate(he);
364 nd = rb_next(nd);
365 /*
366 * Since we have a hist_entry per IP for the same
367 * symbol, free he->ms.sym->hist to signal we already
368 * processed this symbol.
369 */
370 free(priv->hist);
371 priv->hist = NULL;
372 }
344 } 373 }
345} 374}
346 375
@@ -416,6 +445,8 @@ int cmd_annotate(int argc, const char **argv, const char *prefix __used)
416{ 445{
417 argc = parse_options(argc, argv, options, annotate_usage, 0); 446 argc = parse_options(argc, argv, options, annotate_usage, 0);
418 447
448 setup_browser();
449
419 symbol_conf.priv_size = sizeof(struct sym_priv); 450 symbol_conf.priv_size = sizeof(struct sym_priv);
420 symbol_conf.try_vmlinux_path = true; 451 symbol_conf.try_vmlinux_path = true;
421 452
@@ -435,8 +466,6 @@ int cmd_annotate(int argc, const char **argv, const char *prefix __used)
435 sym_hist_filter = argv[0]; 466 sym_hist_filter = argv[0];
436 } 467 }
437 468
438 setup_pager();
439
440 if (field_sep && *field_sep == '.') { 469 if (field_sep && *field_sep == '.') {
441 pr_err("'.' is the only non valid --field-separator argument\n"); 470 pr_err("'.' is the only non valid --field-separator argument\n");
442 return -1; 471 return -1;
diff --git a/tools/perf/builtin-probe.c b/tools/perf/builtin-probe.c
index 61c6d70732c9..e4a4da32a568 100644
--- a/tools/perf/builtin-probe.c
+++ b/tools/perf/builtin-probe.c
@@ -65,8 +65,10 @@ static int parse_probe_event(const char *str)
65 int ret; 65 int ret;
66 66
67 pr_debug("probe-definition(%d): %s\n", params.nevents, str); 67 pr_debug("probe-definition(%d): %s\n", params.nevents, str);
68 if (++params.nevents == MAX_PROBES) 68 if (++params.nevents == MAX_PROBES) {
69 die("Too many probes (> %d) are specified.", MAX_PROBES); 69 pr_err("Too many probes (> %d) were specified.", MAX_PROBES);
70 return -1;
71 }
70 72
71 /* Parse a perf-probe command into event */ 73 /* Parse a perf-probe command into event */
72 ret = parse_perf_probe_command(str, pev); 74 ret = parse_perf_probe_command(str, pev);
@@ -84,7 +86,9 @@ static int parse_probe_event_argv(int argc, const char **argv)
84 len = 0; 86 len = 0;
85 for (i = 0; i < argc; i++) 87 for (i = 0; i < argc; i++)
86 len += strlen(argv[i]) + 1; 88 len += strlen(argv[i]) + 1;
87 buf = xzalloc(len + 1); 89 buf = zalloc(len + 1);
90 if (buf == NULL)
91 return -ENOMEM;
88 len = 0; 92 len = 0;
89 for (i = 0; i < argc; i++) 93 for (i = 0; i < argc; i++)
90 len += sprintf(&buf[len], "%s ", argv[i]); 94 len += sprintf(&buf[len], "%s ", argv[i]);
diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
index cb46c7d0ea99..9bc89050e6f8 100644
--- a/tools/perf/builtin-record.c
+++ b/tools/perf/builtin-record.c
@@ -25,6 +25,7 @@
25 25
26#include <unistd.h> 26#include <unistd.h>
27#include <sched.h> 27#include <sched.h>
28#include <sys/mman.h>
28 29
29enum write_mode_t { 30enum write_mode_t {
30 WRITE_FORCE, 31 WRITE_FORCE,
@@ -60,13 +61,8 @@ static bool call_graph = false;
60static bool inherit_stat = false; 61static bool inherit_stat = false;
61static bool no_samples = false; 62static bool no_samples = false;
62static bool sample_address = false; 63static bool sample_address = false;
63static bool multiplex = false;
64static int multiplex_fd = -1;
65 64
66static long samples = 0; 65static long samples = 0;
67static struct timeval last_read;
68static struct timeval this_read;
69
70static u64 bytes_written = 0; 66static u64 bytes_written = 0;
71 67
72static struct pollfd *event_array; 68static struct pollfd *event_array;
@@ -86,7 +82,7 @@ struct mmap_data {
86 unsigned int prev; 82 unsigned int prev;
87}; 83};
88 84
89static struct mmap_data *mmap_array[MAX_NR_CPUS][MAX_COUNTERS]; 85static struct mmap_data mmap_array[MAX_NR_CPUS];
90 86
91static unsigned long mmap_read_head(struct mmap_data *md) 87static unsigned long mmap_read_head(struct mmap_data *md)
92{ 88{
@@ -146,8 +142,6 @@ static void mmap_read(struct mmap_data *md)
146 void *buf; 142 void *buf;
147 int diff; 143 int diff;
148 144
149 gettimeofday(&this_read, NULL);
150
151 /* 145 /*
152 * If we're further behind than half the buffer, there's a chance 146 * If we're further behind than half the buffer, there's a chance
153 * the writer will bite our tail and mess up the samples under us. 147 * the writer will bite our tail and mess up the samples under us.
@@ -158,23 +152,13 @@ static void mmap_read(struct mmap_data *md)
158 */ 152 */
159 diff = head - old; 153 diff = head - old;
160 if (diff < 0) { 154 if (diff < 0) {
161 struct timeval iv; 155 fprintf(stderr, "WARNING: failed to keep up with mmap data\n");
162 unsigned long msecs;
163
164 timersub(&this_read, &last_read, &iv);
165 msecs = iv.tv_sec*1000 + iv.tv_usec/1000;
166
167 fprintf(stderr, "WARNING: failed to keep up with mmap data."
168 " Last read %lu msecs ago.\n", msecs);
169
170 /* 156 /*
171 * head points to a known good entry, start there. 157 * head points to a known good entry, start there.
172 */ 158 */
173 old = head; 159 old = head;
174 } 160 }
175 161
176 last_read = this_read;
177
178 if (old != head) 162 if (old != head)
179 samples++; 163 samples++;
180 164
@@ -380,27 +364,30 @@ try_again:
380 */ 364 */
381 if (group && group_fd == -1) 365 if (group && group_fd == -1)
382 group_fd = fd[nr_cpu][counter][thread_index]; 366 group_fd = fd[nr_cpu][counter][thread_index];
383 if (multiplex && multiplex_fd == -1)
384 multiplex_fd = fd[nr_cpu][counter][thread_index];
385 367
386 if (multiplex && fd[nr_cpu][counter][thread_index] != multiplex_fd) { 368 if (counter || thread_index) {
387 369 ret = ioctl(fd[nr_cpu][counter][thread_index],
388 ret = ioctl(fd[nr_cpu][counter][thread_index], PERF_EVENT_IOC_SET_OUTPUT, multiplex_fd); 370 PERF_EVENT_IOC_SET_OUTPUT,
389 assert(ret != -1); 371 fd[nr_cpu][0][0]);
372 if (ret) {
373 error("failed to set output: %d (%s)\n", errno,
374 strerror(errno));
375 exit(-1);
376 }
390 } else { 377 } else {
391 event_array[nr_poll].fd = fd[nr_cpu][counter][thread_index]; 378 mmap_array[nr_cpu].counter = counter;
392 event_array[nr_poll].events = POLLIN; 379 mmap_array[nr_cpu].prev = 0;
393 nr_poll++; 380 mmap_array[nr_cpu].mask = mmap_pages*page_size - 1;
394 381 mmap_array[nr_cpu].base = mmap(NULL, (mmap_pages+1)*page_size,
395 mmap_array[nr_cpu][counter][thread_index].counter = counter;
396 mmap_array[nr_cpu][counter][thread_index].prev = 0;
397 mmap_array[nr_cpu][counter][thread_index].mask = mmap_pages*page_size - 1;
398 mmap_array[nr_cpu][counter][thread_index].base = mmap(NULL, (mmap_pages+1)*page_size,
399 PROT_READ|PROT_WRITE, MAP_SHARED, fd[nr_cpu][counter][thread_index], 0); 382 PROT_READ|PROT_WRITE, MAP_SHARED, fd[nr_cpu][counter][thread_index], 0);
400 if (mmap_array[nr_cpu][counter][thread_index].base == MAP_FAILED) { 383 if (mmap_array[nr_cpu].base == MAP_FAILED) {
401 error("failed to mmap with %d (%s)\n", errno, strerror(errno)); 384 error("failed to mmap with %d (%s)\n", errno, strerror(errno));
402 exit(-1); 385 exit(-1);
403 } 386 }
387
388 event_array[nr_poll].fd = fd[nr_cpu][counter][thread_index];
389 event_array[nr_poll].events = POLLIN;
390 nr_poll++;
404 } 391 }
405 392
406 if (filter != NULL) { 393 if (filter != NULL) {
@@ -501,16 +488,11 @@ static struct perf_event_header finished_round_event = {
501 488
502static void mmap_read_all(void) 489static void mmap_read_all(void)
503{ 490{
504 int i, counter, thread; 491 int i;
505 492
506 for (i = 0; i < nr_cpu; i++) { 493 for (i = 0; i < nr_cpu; i++) {
507 for (counter = 0; counter < nr_counters; counter++) { 494 if (mmap_array[i].base)
508 for (thread = 0; thread < thread_num; thread++) { 495 mmap_read(&mmap_array[i]);
509 if (mmap_array[i][counter][thread].base)
510 mmap_read(&mmap_array[i][counter][thread]);
511 }
512
513 }
514 } 496 }
515 497
516 if (perf_header__has_feat(&session->header, HEADER_TRACE_INFO)) 498 if (perf_header__has_feat(&session->header, HEADER_TRACE_INFO))
@@ -834,8 +816,6 @@ static const struct option options[] = {
834 "Sample addresses"), 816 "Sample addresses"),
835 OPT_BOOLEAN('n', "no-samples", &no_samples, 817 OPT_BOOLEAN('n', "no-samples", &no_samples,
836 "don't sample"), 818 "don't sample"),
837 OPT_BOOLEAN('M', "multiplex", &multiplex,
838 "multiplex counter output in a single channel"),
839 OPT_END() 819 OPT_END()
840}; 820};
841 821
@@ -887,9 +867,7 @@ int cmd_record(int argc, const char **argv, const char *prefix __used)
887 for (i = 0; i < MAX_NR_CPUS; i++) { 867 for (i = 0; i < MAX_NR_CPUS; i++) {
888 for (j = 0; j < MAX_COUNTERS; j++) { 868 for (j = 0; j < MAX_COUNTERS; j++) {
889 fd[i][j] = malloc(sizeof(int)*thread_num); 869 fd[i][j] = malloc(sizeof(int)*thread_num);
890 mmap_array[i][j] = zalloc( 870 if (!fd[i][j])
891 sizeof(struct mmap_data)*thread_num);
892 if (!fd[i][j] || !mmap_array[i][j])
893 return -ENOMEM; 871 return -ENOMEM;
894 } 872 }
895 } 873 }
diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c
index 1d3c1003b43a..359205782964 100644
--- a/tools/perf/builtin-report.c
+++ b/tools/perf/builtin-report.c
@@ -116,7 +116,7 @@ static int perf_session__add_hist_entry(struct perf_session *self,
116 * so we don't allocated the extra space needed because the stdio 116 * so we don't allocated the extra space needed because the stdio
117 * code will not use it. 117 * code will not use it.
118 */ 118 */
119 if (use_browser) 119 if (use_browser > 0)
120 err = hist_entry__inc_addr_samples(he, al->addr); 120 err = hist_entry__inc_addr_samples(he, al->addr);
121out_free_syms: 121out_free_syms:
122 free(syms); 122 free(syms);
@@ -288,6 +288,38 @@ static size_t hists__fprintf_nr_sample_events(struct hists *self,
288 return ret + fprintf(fp, "\n#\n"); 288 return ret + fprintf(fp, "\n#\n");
289} 289}
290 290
291static int hists__tty_browse_tree(struct rb_root *tree, const char *help)
292{
293 struct rb_node *next = rb_first(tree);
294
295 while (next) {
296 struct hists *hists = rb_entry(next, struct hists, rb_node);
297 const char *evname = NULL;
298
299 if (rb_first(&hists->entries) != rb_last(&hists->entries))
300 evname = __event_name(hists->type, hists->config);
301
302 hists__fprintf_nr_sample_events(hists, evname, stdout);
303 hists__fprintf(hists, NULL, false, stdout);
304 fprintf(stdout, "\n\n");
305 next = rb_next(&hists->rb_node);
306 }
307
308 if (sort_order == default_sort_order &&
309 parent_pattern == default_parent_pattern) {
310 fprintf(stdout, "#\n# (%s)\n#\n", help);
311
312 if (show_threads) {
313 bool style = !strcmp(pretty_printing_style, "raw");
314 perf_read_values_display(stdout, &show_threads_values,
315 style);
316 perf_read_values_destroy(&show_threads_values);
317 }
318 }
319
320 return 0;
321}
322
291static int __cmd_report(void) 323static int __cmd_report(void)
292{ 324{
293 int ret = -EINVAL; 325 int ret = -EINVAL;
@@ -330,34 +362,14 @@ static int __cmd_report(void)
330 hists = rb_entry(next, struct hists, rb_node); 362 hists = rb_entry(next, struct hists, rb_node);
331 hists__collapse_resort(hists); 363 hists__collapse_resort(hists);
332 hists__output_resort(hists); 364 hists__output_resort(hists);
333 if (use_browser)
334 hists__browse(hists, help, input_name);
335 else {
336 const char *evname = NULL;
337 if (rb_first(&session->hists.entries) !=
338 rb_last(&session->hists.entries))
339 evname = __event_name(hists->type, hists->config);
340
341 hists__fprintf_nr_sample_events(hists, evname, stdout);
342
343 hists__fprintf(hists, NULL, false, stdout);
344 fprintf(stdout, "\n\n");
345 }
346
347 next = rb_next(&hists->rb_node); 365 next = rb_next(&hists->rb_node);
348 } 366 }
349 367
350 if (!use_browser && sort_order == default_sort_order && 368 if (use_browser > 0)
351 parent_pattern == default_parent_pattern) { 369 hists__tui_browse_tree(&session->hists_tree, help);
352 fprintf(stdout, "#\n# (%s)\n#\n", help); 370 else
371 hists__tty_browse_tree(&session->hists_tree, help);
353 372
354 if (show_threads) {
355 bool style = !strcmp(pretty_printing_style, "raw");
356 perf_read_values_display(stdout, &show_threads_values,
357 style);
358 perf_read_values_destroy(&show_threads_values);
359 }
360 }
361out_delete: 373out_delete:
362 perf_session__delete(session); 374 perf_session__delete(session);
363 return ret; 375 return ret;
@@ -491,7 +503,7 @@ int cmd_report(int argc, const char **argv, const char *prefix __used)
491 * so don't allocate extra space that won't be used in the stdio 503 * so don't allocate extra space that won't be used in the stdio
492 * implementation. 504 * implementation.
493 */ 505 */
494 if (use_browser) 506 if (use_browser > 0)
495 symbol_conf.priv_size = sizeof(struct sym_priv); 507 symbol_conf.priv_size = sizeof(struct sym_priv);
496 508
497 if (symbol__init() < 0) 509 if (symbol__init() < 0)
diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
index ff8c413b7e73..9a39ca3c3ac4 100644
--- a/tools/perf/builtin-stat.c
+++ b/tools/perf/builtin-stat.c
@@ -50,6 +50,7 @@
50 50
51#include <sys/prctl.h> 51#include <sys/prctl.h>
52#include <math.h> 52#include <math.h>
53#include <locale.h>
53 54
54static struct perf_event_attr default_attrs[] = { 55static struct perf_event_attr default_attrs[] = {
55 56
@@ -80,6 +81,8 @@ static pid_t *all_tids = NULL;
80static int thread_num = 0; 81static int thread_num = 0;
81static pid_t child_pid = -1; 82static pid_t child_pid = -1;
82static bool null_run = false; 83static bool null_run = false;
84static bool big_num = false;
85
83 86
84static int *fd[MAX_NR_CPUS][MAX_COUNTERS]; 87static int *fd[MAX_NR_CPUS][MAX_COUNTERS];
85 88
@@ -377,7 +380,7 @@ static void nsec_printout(int counter, double avg)
377{ 380{
378 double msecs = avg / 1e6; 381 double msecs = avg / 1e6;
379 382
380 fprintf(stderr, " %14.6f %-24s", msecs, event_name(counter)); 383 fprintf(stderr, " %18.6f %-24s", msecs, event_name(counter));
381 384
382 if (MATCH_EVENT(SOFTWARE, SW_TASK_CLOCK, counter)) { 385 if (MATCH_EVENT(SOFTWARE, SW_TASK_CLOCK, counter)) {
383 fprintf(stderr, " # %10.3f CPUs ", 386 fprintf(stderr, " # %10.3f CPUs ",
@@ -389,7 +392,10 @@ static void abs_printout(int counter, double avg)
389{ 392{
390 double total, ratio = 0.0; 393 double total, ratio = 0.0;
391 394
392 fprintf(stderr, " %14.0f %-24s", avg, event_name(counter)); 395 if (big_num)
396 fprintf(stderr, " %'18.0f %-24s", avg, event_name(counter));
397 else
398 fprintf(stderr, " %18.0f %-24s", avg, event_name(counter));
393 399
394 if (MATCH_EVENT(HARDWARE, HW_INSTRUCTIONS, counter)) { 400 if (MATCH_EVENT(HARDWARE, HW_INSTRUCTIONS, counter)) {
395 total = avg_stats(&runtime_cycles_stats); 401 total = avg_stats(&runtime_cycles_stats);
@@ -426,7 +432,7 @@ static void print_counter(int counter)
426 int scaled = event_scaled[counter]; 432 int scaled = event_scaled[counter];
427 433
428 if (scaled == -1) { 434 if (scaled == -1) {
429 fprintf(stderr, " %14s %-24s\n", 435 fprintf(stderr, " %18s %-24s\n",
430 "<not counted>", event_name(counter)); 436 "<not counted>", event_name(counter));
431 return; 437 return;
432 } 438 }
@@ -477,7 +483,7 @@ static void print_stat(int argc, const char **argv)
477 print_counter(counter); 483 print_counter(counter);
478 484
479 fprintf(stderr, "\n"); 485 fprintf(stderr, "\n");
480 fprintf(stderr, " %14.9f seconds time elapsed", 486 fprintf(stderr, " %18.9f seconds time elapsed",
481 avg_stats(&walltime_nsecs_stats)/1e9); 487 avg_stats(&walltime_nsecs_stats)/1e9);
482 if (run_count > 1) { 488 if (run_count > 1) {
483 fprintf(stderr, " ( +- %7.3f%% )", 489 fprintf(stderr, " ( +- %7.3f%% )",
@@ -534,6 +540,8 @@ static const struct option options[] = {
534 "repeat command and print average + stddev (max: 100)"), 540 "repeat command and print average + stddev (max: 100)"),
535 OPT_BOOLEAN('n', "null", &null_run, 541 OPT_BOOLEAN('n', "null", &null_run,
536 "null run - dont start any counters"), 542 "null run - dont start any counters"),
543 OPT_BOOLEAN('B', "big-num", &big_num,
544 "print large numbers with thousands\' separators"),
537 OPT_END() 545 OPT_END()
538}; 546};
539 547
@@ -542,6 +550,8 @@ int cmd_stat(int argc, const char **argv, const char *prefix __used)
542 int status; 550 int status;
543 int i,j; 551 int i,j;
544 552
553 setlocale(LC_ALL, "");
554
545 argc = parse_options(argc, argv, options, stat_usage, 555 argc = parse_options(argc, argv, options, stat_usage,
546 PARSE_OPT_STOP_AT_NON_OPTION); 556 PARSE_OPT_STOP_AT_NON_OPTION);
547 if (!argc && target_pid == -1 && target_tid == -1) 557 if (!argc && target_pid == -1 && target_tid == -1)
diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c
index 397290a0a76e..a66f4272b994 100644
--- a/tools/perf/builtin-top.c
+++ b/tools/perf/builtin-top.c
@@ -1060,7 +1060,7 @@ static void event__process_sample(const event_t *self,
1060 pr_err("Can't annotate %s", sym->name); 1060 pr_err("Can't annotate %s", sym->name);
1061 if (sym_filter_entry->map->dso->origin == DSO__ORIG_KERNEL) { 1061 if (sym_filter_entry->map->dso->origin == DSO__ORIG_KERNEL) {
1062 pr_err(": No vmlinux file was found in the path:\n"); 1062 pr_err(": No vmlinux file was found in the path:\n");
1063 vmlinux_path__fprintf(stderr); 1063 machine__fprintf_vmlinux_path(machine, stderr);
1064 } else 1064 } else
1065 pr_err(".\n"); 1065 pr_err(".\n");
1066 exit(1); 1066 exit(1);
diff --git a/tools/perf/perf.c b/tools/perf/perf.c
index 08e0e5d2b50e..6e4871191138 100644
--- a/tools/perf/perf.c
+++ b/tools/perf/perf.c
@@ -15,15 +15,15 @@
15#include "util/parse-events.h" 15#include "util/parse-events.h"
16#include "util/debugfs.h" 16#include "util/debugfs.h"
17 17
18bool use_browser;
19
20const char perf_usage_string[] = 18const char perf_usage_string[] =
21 "perf [--version] [--help] COMMAND [ARGS]"; 19 "perf [--version] [--help] COMMAND [ARGS]";
22 20
23const char perf_more_info_string[] = 21const char perf_more_info_string[] =
24 "See 'perf help COMMAND' for more information on a specific command."; 22 "See 'perf help COMMAND' for more information on a specific command.";
25 23
24int use_browser = -1;
26static int use_pager = -1; 25static int use_pager = -1;
26
27struct pager_config { 27struct pager_config {
28 const char *cmd; 28 const char *cmd;
29 int val; 29 int val;
@@ -49,6 +49,24 @@ int check_pager_config(const char *cmd)
49 return c.val; 49 return c.val;
50} 50}
51 51
52static int tui_command_config(const char *var, const char *value, void *data)
53{
54 struct pager_config *c = data;
55 if (!prefixcmp(var, "tui.") && !strcmp(var + 4, c->cmd))
56 c->val = perf_config_bool(var, value);
57 return 0;
58}
59
60/* returns 0 for "no tui", 1 for "use tui", and -1 for "not specified" */
61static int check_tui_config(const char *cmd)
62{
63 struct pager_config c;
64 c.cmd = cmd;
65 c.val = -1;
66 perf_config(tui_command_config, &c);
67 return c.val;
68}
69
52static void commit_pager_choice(void) 70static void commit_pager_choice(void)
53{ 71{
54 switch (use_pager) { 72 switch (use_pager) {
@@ -255,6 +273,9 @@ static int run_builtin(struct cmd_struct *p, int argc, const char **argv)
255 if (p->option & RUN_SETUP) 273 if (p->option & RUN_SETUP)
256 prefix = NULL; /* setup_perf_directory(); */ 274 prefix = NULL; /* setup_perf_directory(); */
257 275
276 if (use_browser == -1)
277 use_browser = check_tui_config(p->cmd);
278
258 if (use_pager == -1 && p->option & RUN_SETUP) 279 if (use_pager == -1 && p->option & RUN_SETUP)
259 use_pager = check_pager_config(p->cmd); 280 use_pager = check_pager_config(p->cmd);
260 if (use_pager == -1 && p->option & USE_PAGER) 281 if (use_pager == -1 && p->option & USE_PAGER)
diff --git a/tools/perf/util/abspath.c b/tools/perf/util/abspath.c
index a791dd467261..0e76affe9c36 100644
--- a/tools/perf/util/abspath.c
+++ b/tools/perf/util/abspath.c
@@ -1,86 +1,5 @@
1#include "cache.h" 1#include "cache.h"
2 2
3/*
4 * Do not use this for inspecting *tracked* content. When path is a
5 * symlink to a directory, we do not want to say it is a directory when
6 * dealing with tracked content in the working tree.
7 */
8static int is_directory(const char *path)
9{
10 struct stat st;
11 return (!stat(path, &st) && S_ISDIR(st.st_mode));
12}
13
14/* We allow "recursive" symbolic links. Only within reason, though. */
15#define MAXDEPTH 5
16
17const char *make_absolute_path(const char *path)
18{
19 static char bufs[2][PATH_MAX + 1], *buf = bufs[0], *next_buf = bufs[1];
20 char cwd[1024] = "";
21 int buf_index = 1, len;
22
23 int depth = MAXDEPTH;
24 char *last_elem = NULL;
25 struct stat st;
26
27 if (strlcpy(buf, path, PATH_MAX) >= PATH_MAX)
28 die ("Too long path: %.*s", 60, path);
29
30 while (depth--) {
31 if (!is_directory(buf)) {
32 char *last_slash = strrchr(buf, '/');
33 if (last_slash) {
34 *last_slash = '\0';
35 last_elem = xstrdup(last_slash + 1);
36 } else {
37 last_elem = xstrdup(buf);
38 *buf = '\0';
39 }
40 }
41
42 if (*buf) {
43 if (!*cwd && !getcwd(cwd, sizeof(cwd)))
44 die ("Could not get current working directory");
45
46 if (chdir(buf))
47 die ("Could not switch to '%s'", buf);
48 }
49 if (!getcwd(buf, PATH_MAX))
50 die ("Could not get current working directory");
51
52 if (last_elem) {
53 len = strlen(buf);
54
55 if (len + strlen(last_elem) + 2 > PATH_MAX)
56 die ("Too long path name: '%s/%s'",
57 buf, last_elem);
58 buf[len] = '/';
59 strcpy(buf + len + 1, last_elem);
60 free(last_elem);
61 last_elem = NULL;
62 }
63
64 if (!lstat(buf, &st) && S_ISLNK(st.st_mode)) {
65 len = readlink(buf, next_buf, PATH_MAX);
66 if (len < 0)
67 die ("Invalid symlink: %s", buf);
68 if (PATH_MAX <= len)
69 die("symbolic link too long: %s", buf);
70 next_buf[len] = '\0';
71 buf = next_buf;
72 buf_index = 1 - buf_index;
73 next_buf = bufs[buf_index];
74 } else
75 break;
76 }
77
78 if (*cwd && chdir(cwd))
79 die ("Could not change back to '%s'", cwd);
80
81 return buf;
82}
83
84static const char *get_pwd_cwd(void) 3static const char *get_pwd_cwd(void)
85{ 4{
86 static char cwd[PATH_MAX + 1]; 5 static char cwd[PATH_MAX + 1];
diff --git a/tools/perf/util/build-id.c b/tools/perf/util/build-id.c
index 0f60a3906808..70c5cf87d020 100644
--- a/tools/perf/util/build-id.c
+++ b/tools/perf/util/build-id.c
@@ -6,6 +6,8 @@
6 * Copyright (C) 2009, 2010 Red Hat Inc. 6 * Copyright (C) 2009, 2010 Red Hat Inc.
7 * Copyright (C) 2009, 2010 Arnaldo Carvalho de Melo <acme@redhat.com> 7 * Copyright (C) 2009, 2010 Arnaldo Carvalho de Melo <acme@redhat.com>
8 */ 8 */
9#include "util.h"
10#include <stdio.h>
9#include "build-id.h" 11#include "build-id.h"
10#include "event.h" 12#include "event.h"
11#include "symbol.h" 13#include "symbol.h"
@@ -37,3 +39,23 @@ struct perf_event_ops build_id__mark_dso_hit_ops = {
37 .mmap = event__process_mmap, 39 .mmap = event__process_mmap,
38 .fork = event__process_task, 40 .fork = event__process_task,
39}; 41};
42
43char *dso__build_id_filename(struct dso *self, char *bf, size_t size)
44{
45 char build_id_hex[BUILD_ID_SIZE * 2 + 1];
46 const char *home;
47
48 if (!self->has_build_id)
49 return NULL;
50
51 build_id__sprintf(self->build_id, sizeof(self->build_id), build_id_hex);
52 home = getenv("HOME");
53 if (bf == NULL) {
54 if (asprintf(&bf, "%s/%s/.build-id/%.2s/%s", home,
55 DEBUG_CACHE_DIR, build_id_hex, build_id_hex + 2) < 0)
56 return NULL;
57 } else
58 snprintf(bf, size, "%s/%s/.build-id/%.2s/%s", home,
59 DEBUG_CACHE_DIR, build_id_hex, build_id_hex + 2);
60 return bf;
61}
diff --git a/tools/perf/util/build-id.h b/tools/perf/util/build-id.h
index 1d981d63cf9a..5dafb00eaa06 100644
--- a/tools/perf/util/build-id.h
+++ b/tools/perf/util/build-id.h
@@ -5,4 +5,6 @@
5 5
6extern struct perf_event_ops build_id__mark_dso_hit_ops; 6extern struct perf_event_ops build_id__mark_dso_hit_ops;
7 7
8char *dso__build_id_filename(struct dso *self, char *bf, size_t size);
9
8#endif 10#endif
diff --git a/tools/perf/util/cache.h b/tools/perf/util/cache.h
index 4b9aab7f0405..65fe664fddf6 100644
--- a/tools/perf/util/cache.h
+++ b/tools/perf/util/cache.h
@@ -13,56 +13,16 @@
13 13
14#define PERF_DIR_ENVIRONMENT "PERF_DIR" 14#define PERF_DIR_ENVIRONMENT "PERF_DIR"
15#define PERF_WORK_TREE_ENVIRONMENT "PERF_WORK_TREE" 15#define PERF_WORK_TREE_ENVIRONMENT "PERF_WORK_TREE"
16#define DEFAULT_PERF_DIR_ENVIRONMENT ".perf"
17#define DB_ENVIRONMENT "PERF_OBJECT_DIRECTORY"
18#define INDEX_ENVIRONMENT "PERF_INDEX_FILE"
19#define GRAFT_ENVIRONMENT "PERF_GRAFT_FILE"
20#define TEMPLATE_DIR_ENVIRONMENT "PERF_TEMPLATE_DIR"
21#define CONFIG_ENVIRONMENT "PERF_CONFIG"
22#define EXEC_PATH_ENVIRONMENT "PERF_EXEC_PATH" 16#define EXEC_PATH_ENVIRONMENT "PERF_EXEC_PATH"
23#define CEILING_DIRECTORIES_ENVIRONMENT "PERF_CEILING_DIRECTORIES" 17#define DEFAULT_PERF_DIR_ENVIRONMENT ".perf"
24#define PERFATTRIBUTES_FILE ".perfattributes"
25#define INFOATTRIBUTES_FILE "info/attributes"
26#define ATTRIBUTE_MACRO_PREFIX "[attr]"
27#define PERF_DEBUGFS_ENVIRONMENT "PERF_DEBUGFS_DIR" 18#define PERF_DEBUGFS_ENVIRONMENT "PERF_DEBUGFS_DIR"
28 19
29typedef int (*config_fn_t)(const char *, const char *, void *); 20typedef int (*config_fn_t)(const char *, const char *, void *);
30extern int perf_default_config(const char *, const char *, void *); 21extern int perf_default_config(const char *, const char *, void *);
31extern int perf_config_from_file(config_fn_t fn, const char *, void *);
32extern int perf_config(config_fn_t fn, void *); 22extern int perf_config(config_fn_t fn, void *);
33extern int perf_parse_ulong(const char *, unsigned long *);
34extern int perf_config_int(const char *, const char *); 23extern int perf_config_int(const char *, const char *);
35extern unsigned long perf_config_ulong(const char *, const char *);
36extern int perf_config_bool_or_int(const char *, const char *, int *);
37extern int perf_config_bool(const char *, const char *); 24extern int perf_config_bool(const char *, const char *);
38extern int perf_config_string(const char **, const char *, const char *);
39extern int perf_config_set(const char *, const char *);
40extern int perf_config_set_multivar(const char *, const char *, const char *, int);
41extern int perf_config_rename_section(const char *, const char *);
42extern const char *perf_etc_perfconfig(void);
43extern int check_repository_format_version(const char *var, const char *value, void *cb);
44extern int perf_config_system(void);
45extern int perf_config_global(void);
46extern int config_error_nonbool(const char *); 25extern int config_error_nonbool(const char *);
47extern const char *config_exclusive_filename;
48
49#define MAX_PERFNAME (1000)
50extern char perf_default_email[MAX_PERFNAME];
51extern char perf_default_name[MAX_PERFNAME];
52extern int user_ident_explicitly_given;
53
54extern const char *perf_log_output_encoding;
55extern const char *perf_mailmap_file;
56
57/* IO helper functions */
58extern void maybe_flush_or_die(FILE *, const char *);
59extern int copy_fd(int ifd, int ofd);
60extern int copy_file(const char *dst, const char *src, int mode);
61extern ssize_t write_in_full(int fd, const void *buf, size_t count);
62extern void write_or_die(int fd, const void *buf, size_t count);
63extern int write_or_whine(int fd, const void *buf, size_t count, const char *msg);
64extern int write_or_whine_pipe(int fd, const void *buf, size_t count, const char *msg);
65extern void fsync_or_die(int fd, const char *);
66 26
67/* pager.c */ 27/* pager.c */
68extern void setup_pager(void); 28extern void setup_pager(void);
@@ -70,7 +30,7 @@ extern const char *pager_program;
70extern int pager_in_use(void); 30extern int pager_in_use(void);
71extern int pager_use_color; 31extern int pager_use_color;
72 32
73extern bool use_browser; 33extern int use_browser;
74 34
75#ifdef NO_NEWT_SUPPORT 35#ifdef NO_NEWT_SUPPORT
76static inline void setup_browser(void) 36static inline void setup_browser(void)
@@ -83,9 +43,6 @@ void setup_browser(void);
83void exit_browser(bool wait_for_ok); 43void exit_browser(bool wait_for_ok);
84#endif 44#endif
85 45
86extern const char *editor_program;
87extern const char *excludes_file;
88
89char *alias_lookup(const char *alias); 46char *alias_lookup(const char *alias);
90int split_cmdline(char *cmdline, const char ***argv); 47int split_cmdline(char *cmdline, const char ***argv);
91 48
@@ -115,22 +72,12 @@ static inline int is_absolute_path(const char *path)
115 return path[0] == '/'; 72 return path[0] == '/';
116} 73}
117 74
118const char *make_absolute_path(const char *path);
119const char *make_nonrelative_path(const char *path); 75const char *make_nonrelative_path(const char *path);
120const char *make_relative_path(const char *abs, const char *base);
121int normalize_path_copy(char *dst, const char *src);
122int longest_ancestor_length(const char *path, const char *prefix_list);
123char *strip_path_suffix(const char *path, const char *suffix); 76char *strip_path_suffix(const char *path, const char *suffix);
124 77
125extern char *mkpath(const char *fmt, ...) __attribute__((format (printf, 1, 2))); 78extern char *mkpath(const char *fmt, ...) __attribute__((format (printf, 1, 2)));
126extern char *perf_path(const char *fmt, ...) __attribute__((format (printf, 1, 2))); 79extern char *perf_path(const char *fmt, ...) __attribute__((format (printf, 1, 2)));
127/* perf_mkstemp() - create tmp file honoring TMPDIR variable */
128extern int perf_mkstemp(char *path, size_t len, const char *template);
129 80
130extern char *mksnpath(char *buf, size_t n, const char *fmt, ...)
131 __attribute__((format (printf, 3, 4)));
132extern char *perf_snpath(char *buf, size_t n, const char *fmt, ...)
133 __attribute__((format (printf, 3, 4)));
134extern char *perf_pathdup(const char *fmt, ...) 81extern char *perf_pathdup(const char *fmt, ...)
135 __attribute__((format (printf, 1, 2))); 82 __attribute__((format (printf, 1, 2)));
136 83
diff --git a/tools/perf/util/callchain.c b/tools/perf/util/callchain.c
index 21a52e0a4435..62b69ad4aa73 100644
--- a/tools/perf/util/callchain.c
+++ b/tools/perf/util/callchain.c
@@ -15,6 +15,7 @@
15#include <errno.h> 15#include <errno.h>
16#include <math.h> 16#include <math.h>
17 17
18#include "util.h"
18#include "callchain.h" 19#include "callchain.h"
19 20
20bool ip_callchain__valid(struct ip_callchain *chain, event_t *event) 21bool ip_callchain__valid(struct ip_callchain *chain, event_t *event)
diff --git a/tools/perf/util/callchain.h b/tools/perf/util/callchain.h
index 1cba1f5504e7..1ca73e4a2723 100644
--- a/tools/perf/util/callchain.h
+++ b/tools/perf/util/callchain.h
@@ -5,7 +5,6 @@
5#include <linux/list.h> 5#include <linux/list.h>
6#include <linux/rbtree.h> 6#include <linux/rbtree.h>
7#include "event.h" 7#include "event.h"
8#include "util.h"
9#include "symbol.h" 8#include "symbol.h"
10 9
11enum chain_mode { 10enum chain_mode {
diff --git a/tools/perf/util/config.c b/tools/perf/util/config.c
index 8784649109ce..dabe892d0e53 100644
--- a/tools/perf/util/config.c
+++ b/tools/perf/util/config.c
@@ -16,7 +16,7 @@ static const char *config_file_name;
16static int config_linenr; 16static int config_linenr;
17static int config_file_eof; 17static int config_file_eof;
18 18
19const char *config_exclusive_filename = NULL; 19static const char *config_exclusive_filename;
20 20
21static int get_next_char(void) 21static int get_next_char(void)
22{ 22{
@@ -291,19 +291,6 @@ static int perf_parse_long(const char *value, long *ret)
291 return 0; 291 return 0;
292} 292}
293 293
294int perf_parse_ulong(const char *value, unsigned long *ret)
295{
296 if (value && *value) {
297 char *end;
298 unsigned long val = strtoul(value, &end, 0);
299 if (!parse_unit_factor(end, &val))
300 return 0;
301 *ret = val;
302 return 1;
303 }
304 return 0;
305}
306
307static void die_bad_config(const char *name) 294static void die_bad_config(const char *name)
308{ 295{
309 if (config_file_name) 296 if (config_file_name)
@@ -319,15 +306,7 @@ int perf_config_int(const char *name, const char *value)
319 return ret; 306 return ret;
320} 307}
321 308
322unsigned long perf_config_ulong(const char *name, const char *value) 309static int perf_config_bool_or_int(const char *name, const char *value, int *is_bool)
323{
324 unsigned long ret;
325 if (!perf_parse_ulong(value, &ret))
326 die_bad_config(name);
327 return ret;
328}
329
330int perf_config_bool_or_int(const char *name, const char *value, int *is_bool)
331{ 310{
332 *is_bool = 1; 311 *is_bool = 1;
333 if (!value) 312 if (!value)
@@ -348,14 +327,6 @@ int perf_config_bool(const char *name, const char *value)
348 return !!perf_config_bool_or_int(name, value, &discard); 327 return !!perf_config_bool_or_int(name, value, &discard);
349} 328}
350 329
351int perf_config_string(const char **dest, const char *var, const char *value)
352{
353 if (!value)
354 return config_error_nonbool(var);
355 *dest = strdup(value);
356 return 0;
357}
358
359static int perf_default_core_config(const char *var __used, const char *value __used) 330static int perf_default_core_config(const char *var __used, const char *value __used)
360{ 331{
361 /* Add other config variables here and to Documentation/config.txt. */ 332 /* Add other config variables here and to Documentation/config.txt. */
@@ -371,7 +342,7 @@ int perf_default_config(const char *var, const char *value, void *dummy __used)
371 return 0; 342 return 0;
372} 343}
373 344
374int perf_config_from_file(config_fn_t fn, const char *filename, void *data) 345static int perf_config_from_file(config_fn_t fn, const char *filename, void *data)
375{ 346{
376 int ret; 347 int ret;
377 FILE *f = fopen(filename, "r"); 348 FILE *f = fopen(filename, "r");
@@ -389,7 +360,7 @@ int perf_config_from_file(config_fn_t fn, const char *filename, void *data)
389 return ret; 360 return ret;
390} 361}
391 362
392const char *perf_etc_perfconfig(void) 363static const char *perf_etc_perfconfig(void)
393{ 364{
394 static const char *system_wide; 365 static const char *system_wide;
395 if (!system_wide) 366 if (!system_wide)
@@ -403,12 +374,12 @@ static int perf_env_bool(const char *k, int def)
403 return v ? perf_config_bool(k, v) : def; 374 return v ? perf_config_bool(k, v) : def;
404} 375}
405 376
406int perf_config_system(void) 377static int perf_config_system(void)
407{ 378{
408 return !perf_env_bool("PERF_CONFIG_NOSYSTEM", 0); 379 return !perf_env_bool("PERF_CONFIG_NOSYSTEM", 0);
409} 380}
410 381
411int perf_config_global(void) 382static int perf_config_global(void)
412{ 383{
413 return !perf_env_bool("PERF_CONFIG_NOGLOBAL", 0); 384 return !perf_env_bool("PERF_CONFIG_NOGLOBAL", 0);
414} 385}
@@ -450,426 +421,6 @@ int perf_config(config_fn_t fn, void *data)
450} 421}
451 422
452/* 423/*
453 * Find all the stuff for perf_config_set() below.
454 */
455
456#define MAX_MATCHES 512
457
458static struct {
459 int baselen;
460 char* key;
461 int do_not_match;
462 regex_t* value_regex;
463 int multi_replace;
464 size_t offset[MAX_MATCHES];
465 enum { START, SECTION_SEEN, SECTION_END_SEEN, KEY_SEEN } state;
466 int seen;
467} store;
468
469static int matches(const char* key, const char* value)
470{
471 return !strcmp(key, store.key) &&
472 (store.value_regex == NULL ||
473 (store.do_not_match ^
474 !regexec(store.value_regex, value, 0, NULL, 0)));
475}
476
477static int store_aux(const char* key, const char* value, void *cb __used)
478{
479 int section_len;
480 const char *ep;
481
482 switch (store.state) {
483 case KEY_SEEN:
484 if (matches(key, value)) {
485 if (store.seen == 1 && store.multi_replace == 0) {
486 warning("%s has multiple values", key);
487 } else if (store.seen >= MAX_MATCHES) {
488 error("too many matches for %s", key);
489 return 1;
490 }
491
492 store.offset[store.seen] = ftell(config_file);
493 store.seen++;
494 }
495 break;
496 case SECTION_SEEN:
497 /*
498 * What we are looking for is in store.key (both
499 * section and var), and its section part is baselen
500 * long. We found key (again, both section and var).
501 * We would want to know if this key is in the same
502 * section as what we are looking for. We already
503 * know we are in the same section as what should
504 * hold store.key.
505 */
506 ep = strrchr(key, '.');
507 section_len = ep - key;
508
509 if ((section_len != store.baselen) ||
510 memcmp(key, store.key, section_len+1)) {
511 store.state = SECTION_END_SEEN;
512 break;
513 }
514
515 /*
516 * Do not increment matches: this is no match, but we
517 * just made sure we are in the desired section.
518 */
519 store.offset[store.seen] = ftell(config_file);
520 /* fallthru */
521 case SECTION_END_SEEN:
522 case START:
523 if (matches(key, value)) {
524 store.offset[store.seen] = ftell(config_file);
525 store.state = KEY_SEEN;
526 store.seen++;
527 } else {
528 if (strrchr(key, '.') - key == store.baselen &&
529 !strncmp(key, store.key, store.baselen)) {
530 store.state = SECTION_SEEN;
531 store.offset[store.seen] = ftell(config_file);
532 }
533 }
534 default:
535 break;
536 }
537 return 0;
538}
539
540static int store_write_section(int fd, const char* key)
541{
542 const char *dot;
543 int i, success;
544 struct strbuf sb = STRBUF_INIT;
545
546 dot = memchr(key, '.', store.baselen);
547 if (dot) {
548 strbuf_addf(&sb, "[%.*s \"", (int)(dot - key), key);
549 for (i = dot - key + 1; i < store.baselen; i++) {
550 if (key[i] == '"' || key[i] == '\\')
551 strbuf_addch(&sb, '\\');
552 strbuf_addch(&sb, key[i]);
553 }
554 strbuf_addstr(&sb, "\"]\n");
555 } else {
556 strbuf_addf(&sb, "[%.*s]\n", store.baselen, key);
557 }
558
559 success = (write_in_full(fd, sb.buf, sb.len) == (ssize_t)sb.len);
560 strbuf_release(&sb);
561
562 return success;
563}
564
565static int store_write_pair(int fd, const char* key, const char* value)
566{
567 int i, success;
568 int length = strlen(key + store.baselen + 1);
569 const char *quote = "";
570 struct strbuf sb = STRBUF_INIT;
571
572 /*
573 * Check to see if the value needs to be surrounded with a dq pair.
574 * Note that problematic characters are always backslash-quoted; this
575 * check is about not losing leading or trailing SP and strings that
576 * follow beginning-of-comment characters (i.e. ';' and '#') by the
577 * configuration parser.
578 */
579 if (value[0] == ' ')
580 quote = "\"";
581 for (i = 0; value[i]; i++)
582 if (value[i] == ';' || value[i] == '#')
583 quote = "\"";
584 if (i && value[i - 1] == ' ')
585 quote = "\"";
586
587 strbuf_addf(&sb, "\t%.*s = %s",
588 length, key + store.baselen + 1, quote);
589
590 for (i = 0; value[i]; i++)
591 switch (value[i]) {
592 case '\n':
593 strbuf_addstr(&sb, "\\n");
594 break;
595 case '\t':
596 strbuf_addstr(&sb, "\\t");
597 break;
598 case '"':
599 case '\\':
600 strbuf_addch(&sb, '\\');
601 default:
602 strbuf_addch(&sb, value[i]);
603 break;
604 }
605 strbuf_addf(&sb, "%s\n", quote);
606
607 success = (write_in_full(fd, sb.buf, sb.len) == (ssize_t)sb.len);
608 strbuf_release(&sb);
609
610 return success;
611}
612
613static ssize_t find_beginning_of_line(const char* contents, size_t size,
614 size_t offset_, int* found_bracket)
615{
616 size_t equal_offset = size, bracket_offset = size;
617 ssize_t offset;
618
619contline:
620 for (offset = offset_-2; offset > 0
621 && contents[offset] != '\n'; offset--)
622 switch (contents[offset]) {
623 case '=': equal_offset = offset; break;
624 case ']': bracket_offset = offset; break;
625 default: break;
626 }
627 if (offset > 0 && contents[offset-1] == '\\') {
628 offset_ = offset;
629 goto contline;
630 }
631 if (bracket_offset < equal_offset) {
632 *found_bracket = 1;
633 offset = bracket_offset+1;
634 } else
635 offset++;
636
637 return offset;
638}
639
640int perf_config_set(const char* key, const char* value)
641{
642 return perf_config_set_multivar(key, value, NULL, 0);
643}
644
645/*
646 * If value==NULL, unset in (remove from) config,
647 * if value_regex!=NULL, disregard key/value pairs where value does not match.
648 * if multi_replace==0, nothing, or only one matching key/value is replaced,
649 * else all matching key/values (regardless how many) are removed,
650 * before the new pair is written.
651 *
652 * Returns 0 on success.
653 *
654 * This function does this:
655 *
656 * - it locks the config file by creating ".perf/config.lock"
657 *
658 * - it then parses the config using store_aux() as validator to find
659 * the position on the key/value pair to replace. If it is to be unset,
660 * it must be found exactly once.
661 *
662 * - the config file is mmap()ed and the part before the match (if any) is
663 * written to the lock file, then the changed part and the rest.
664 *
665 * - the config file is removed and the lock file rename()d to it.
666 *
667 */
668int perf_config_set_multivar(const char* key, const char* value,
669 const char* value_regex, int multi_replace)
670{
671 int i, dot;
672 int fd = -1, in_fd;
673 int ret = 0;
674 char* config_filename;
675 const char* last_dot = strrchr(key, '.');
676
677 if (config_exclusive_filename)
678 config_filename = strdup(config_exclusive_filename);
679 else
680 config_filename = perf_pathdup("config");
681
682 /*
683 * Since "key" actually contains the section name and the real
684 * key name separated by a dot, we have to know where the dot is.
685 */
686
687 if (last_dot == NULL) {
688 error("key does not contain a section: %s", key);
689 ret = 2;
690 goto out_free;
691 }
692 store.baselen = last_dot - key;
693
694 store.multi_replace = multi_replace;
695
696 /*
697 * Validate the key and while at it, lower case it for matching.
698 */
699 store.key = malloc(strlen(key) + 1);
700 dot = 0;
701 for (i = 0; key[i]; i++) {
702 unsigned char c = key[i];
703 if (c == '.')
704 dot = 1;
705 /* Leave the extended basename untouched.. */
706 if (!dot || i > store.baselen) {
707 if (!iskeychar(c) || (i == store.baselen+1 && !isalpha(c))) {
708 error("invalid key: %s", key);
709 free(store.key);
710 ret = 1;
711 goto out_free;
712 }
713 c = tolower(c);
714 } else if (c == '\n') {
715 error("invalid key (newline): %s", key);
716 free(store.key);
717 ret = 1;
718 goto out_free;
719 }
720 store.key[i] = c;
721 }
722 store.key[i] = 0;
723
724 /*
725 * If .perf/config does not exist yet, write a minimal version.
726 */
727 in_fd = open(config_filename, O_RDONLY);
728 if ( in_fd < 0 ) {
729 free(store.key);
730
731 if ( ENOENT != errno ) {
732 error("opening %s: %s", config_filename,
733 strerror(errno));
734 ret = 3; /* same as "invalid config file" */
735 goto out_free;
736 }
737 /* if nothing to unset, error out */
738 if (value == NULL) {
739 ret = 5;
740 goto out_free;
741 }
742
743 store.key = (char*)key;
744 if (!store_write_section(fd, key) ||
745 !store_write_pair(fd, key, value))
746 goto write_err_out;
747 } else {
748 struct stat st;
749 char *contents;
750 ssize_t contents_sz, copy_begin, copy_end;
751 int new_line = 0;
752
753 if (value_regex == NULL)
754 store.value_regex = NULL;
755 else {
756 if (value_regex[0] == '!') {
757 store.do_not_match = 1;
758 value_regex++;
759 } else
760 store.do_not_match = 0;
761
762 store.value_regex = (regex_t*)malloc(sizeof(regex_t));
763 if (regcomp(store.value_regex, value_regex,
764 REG_EXTENDED)) {
765 error("invalid pattern: %s", value_regex);
766 free(store.value_regex);
767 ret = 6;
768 goto out_free;
769 }
770 }
771
772 store.offset[0] = 0;
773 store.state = START;
774 store.seen = 0;
775
776 /*
777 * After this, store.offset will contain the *end* offset
778 * of the last match, or remain at 0 if no match was found.
779 * As a side effect, we make sure to transform only a valid
780 * existing config file.
781 */
782 if (perf_config_from_file(store_aux, config_filename, NULL)) {
783 error("invalid config file %s", config_filename);
784 free(store.key);
785 if (store.value_regex != NULL) {
786 regfree(store.value_regex);
787 free(store.value_regex);
788 }
789 ret = 3;
790 goto out_free;
791 }
792
793 free(store.key);
794 if (store.value_regex != NULL) {
795 regfree(store.value_regex);
796 free(store.value_regex);
797 }
798
799 /* if nothing to unset, or too many matches, error out */
800 if ((store.seen == 0 && value == NULL) ||
801 (store.seen > 1 && multi_replace == 0)) {
802 ret = 5;
803 goto out_free;
804 }
805
806 fstat(in_fd, &st);
807 contents_sz = xsize_t(st.st_size);
808 contents = mmap(NULL, contents_sz, PROT_READ,
809 MAP_PRIVATE, in_fd, 0);
810 close(in_fd);
811
812 if (store.seen == 0)
813 store.seen = 1;
814
815 for (i = 0, copy_begin = 0; i < store.seen; i++) {
816 if (store.offset[i] == 0) {
817 store.offset[i] = copy_end = contents_sz;
818 } else if (store.state != KEY_SEEN) {
819 copy_end = store.offset[i];
820 } else
821 copy_end = find_beginning_of_line(
822 contents, contents_sz,
823 store.offset[i]-2, &new_line);
824
825 if (copy_end > 0 && contents[copy_end-1] != '\n')
826 new_line = 1;
827
828 /* write the first part of the config */
829 if (copy_end > copy_begin) {
830 if (write_in_full(fd, contents + copy_begin,
831 copy_end - copy_begin) <
832 copy_end - copy_begin)
833 goto write_err_out;
834 if (new_line &&
835 write_in_full(fd, "\n", 1) != 1)
836 goto write_err_out;
837 }
838 copy_begin = store.offset[i];
839 }
840
841 /* write the pair (value == NULL means unset) */
842 if (value != NULL) {
843 if (store.state == START) {
844 if (!store_write_section(fd, key))
845 goto write_err_out;
846 }
847 if (!store_write_pair(fd, key, value))
848 goto write_err_out;
849 }
850
851 /* write the rest of the config */
852 if (copy_begin < contents_sz)
853 if (write_in_full(fd, contents + copy_begin,
854 contents_sz - copy_begin) <
855 contents_sz - copy_begin)
856 goto write_err_out;
857
858 munmap(contents, contents_sz);
859 }
860
861 ret = 0;
862
863out_free:
864 free(config_filename);
865 return ret;
866
867write_err_out:
868 goto out_free;
869
870}
871
872/*
873 * Call this to report error for your variable that should not 424 * Call this to report error for your variable that should not
874 * get a boolean value (i.e. "[my] var" means "true"). 425 * get a boolean value (i.e. "[my] var" means "true").
875 */ 426 */
diff --git a/tools/perf/util/debug.c b/tools/perf/util/debug.c
index dd824cf3b628..6cddff2bc970 100644
--- a/tools/perf/util/debug.c
+++ b/tools/perf/util/debug.c
@@ -22,7 +22,7 @@ int eprintf(int level, const char *fmt, ...)
22 22
23 if (verbose >= level) { 23 if (verbose >= level) {
24 va_start(args, fmt); 24 va_start(args, fmt);
25 if (use_browser) 25 if (use_browser > 0)
26 ret = browser__show_help(fmt, args); 26 ret = browser__show_help(fmt, args);
27 else 27 else
28 ret = vfprintf(stderr, fmt, args); 28 ret = vfprintf(stderr, fmt, args);
diff --git a/tools/perf/util/exec_cmd.c b/tools/perf/util/exec_cmd.c
index 2745605dba11..67eeff571568 100644
--- a/tools/perf/util/exec_cmd.c
+++ b/tools/perf/util/exec_cmd.c
@@ -53,8 +53,8 @@ const char *perf_extract_argv0_path(const char *argv0)
53 slash--; 53 slash--;
54 54
55 if (slash >= argv0) { 55 if (slash >= argv0) {
56 argv0_path = xstrndup(argv0, slash - argv0); 56 argv0_path = strndup(argv0, slash - argv0);
57 return slash + 1; 57 return argv0_path ? slash + 1 : NULL;
58 } 58 }
59 59
60 return argv0; 60 return argv0;
@@ -116,7 +116,7 @@ void setup_path(void)
116 strbuf_release(&new_path); 116 strbuf_release(&new_path);
117} 117}
118 118
119const char **prepare_perf_cmd(const char **argv) 119static const char **prepare_perf_cmd(const char **argv)
120{ 120{
121 int argc; 121 int argc;
122 const char **nargv; 122 const char **nargv;
diff --git a/tools/perf/util/exec_cmd.h b/tools/perf/util/exec_cmd.h
index 31647ac92ed1..bc4b915963f5 100644
--- a/tools/perf/util/exec_cmd.h
+++ b/tools/perf/util/exec_cmd.h
@@ -5,7 +5,6 @@ extern void perf_set_argv_exec_path(const char *exec_path);
5extern const char *perf_extract_argv0_path(const char *path); 5extern const char *perf_extract_argv0_path(const char *path);
6extern const char *perf_exec_path(void); 6extern const char *perf_exec_path(void);
7extern void setup_path(void); 7extern void setup_path(void);
8extern const char **prepare_perf_cmd(const char **argv);
9extern int execv_perf_cmd(const char **argv); /* NULL terminated */ 8extern int execv_perf_cmd(const char **argv); /* NULL terminated */
10extern int execl_perf_cmd(const char *cmd, ...); 9extern int execl_perf_cmd(const char *cmd, ...);
11extern const char *system_path(const char *path); 10extern const char *system_path(const char *path);
diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c
index 8847bec64c54..1f62435f96c2 100644
--- a/tools/perf/util/header.c
+++ b/tools/perf/util/header.c
@@ -221,29 +221,38 @@ static int __dsos__write_buildid_table(struct list_head *head, pid_t pid,
221 return 0; 221 return 0;
222} 222}
223 223
224static int machine__write_buildid_table(struct machine *self, int fd)
225{
226 int err;
227 u16 kmisc = PERF_RECORD_MISC_KERNEL,
228 umisc = PERF_RECORD_MISC_USER;
229
230 if (!machine__is_host(self)) {
231 kmisc = PERF_RECORD_MISC_GUEST_KERNEL;
232 umisc = PERF_RECORD_MISC_GUEST_USER;
233 }
234
235 err = __dsos__write_buildid_table(&self->kernel_dsos, self->pid,
236 kmisc, fd);
237 if (err == 0)
238 err = __dsos__write_buildid_table(&self->user_dsos,
239 self->pid, umisc, fd);
240 return err;
241}
242
224static int dsos__write_buildid_table(struct perf_header *header, int fd) 243static int dsos__write_buildid_table(struct perf_header *header, int fd)
225{ 244{
226 struct perf_session *session = container_of(header, 245 struct perf_session *session = container_of(header,
227 struct perf_session, header); 246 struct perf_session, header);
228 struct rb_node *nd; 247 struct rb_node *nd;
229 int err = 0; 248 int err = machine__write_buildid_table(&session->host_machine, fd);
230 u16 kmisc, umisc; 249
250 if (err)
251 return err;
231 252
232 for (nd = rb_first(&session->machines); nd; nd = rb_next(nd)) { 253 for (nd = rb_first(&session->machines); nd; nd = rb_next(nd)) {
233 struct machine *pos = rb_entry(nd, struct machine, rb_node); 254 struct machine *pos = rb_entry(nd, struct machine, rb_node);
234 if (machine__is_host(pos)) { 255 err = machine__write_buildid_table(pos, fd);
235 kmisc = PERF_RECORD_MISC_KERNEL;
236 umisc = PERF_RECORD_MISC_USER;
237 } else {
238 kmisc = PERF_RECORD_MISC_GUEST_KERNEL;
239 umisc = PERF_RECORD_MISC_GUEST_USER;
240 }
241
242 err = __dsos__write_buildid_table(&pos->kernel_dsos, pos->pid,
243 kmisc, fd);
244 if (err == 0)
245 err = __dsos__write_buildid_table(&pos->user_dsos,
246 pos->pid, umisc, fd);
247 if (err) 256 if (err)
248 break; 257 break;
249 } 258 }
@@ -363,12 +372,17 @@ static int __dsos__cache_build_ids(struct list_head *head, const char *debugdir)
363 return err; 372 return err;
364} 373}
365 374
366static int dsos__cache_build_ids(struct perf_header *self) 375static int machine__cache_build_ids(struct machine *self, const char *debugdir)
376{
377 int ret = __dsos__cache_build_ids(&self->kernel_dsos, debugdir);
378 ret |= __dsos__cache_build_ids(&self->user_dsos, debugdir);
379 return ret;
380}
381
382static int perf_session__cache_build_ids(struct perf_session *self)
367{ 383{
368 struct perf_session *session = container_of(self,
369 struct perf_session, header);
370 struct rb_node *nd; 384 struct rb_node *nd;
371 int ret = 0; 385 int ret;
372 char debugdir[PATH_MAX]; 386 char debugdir[PATH_MAX];
373 387
374 snprintf(debugdir, sizeof(debugdir), "%s/%s", getenv("HOME"), 388 snprintf(debugdir, sizeof(debugdir), "%s/%s", getenv("HOME"),
@@ -377,25 +391,30 @@ static int dsos__cache_build_ids(struct perf_header *self)
377 if (mkdir(debugdir, 0755) != 0 && errno != EEXIST) 391 if (mkdir(debugdir, 0755) != 0 && errno != EEXIST)
378 return -1; 392 return -1;
379 393
380 for (nd = rb_first(&session->machines); nd; nd = rb_next(nd)) { 394 ret = machine__cache_build_ids(&self->host_machine, debugdir);
395
396 for (nd = rb_first(&self->machines); nd; nd = rb_next(nd)) {
381 struct machine *pos = rb_entry(nd, struct machine, rb_node); 397 struct machine *pos = rb_entry(nd, struct machine, rb_node);
382 ret |= __dsos__cache_build_ids(&pos->kernel_dsos, debugdir); 398 ret |= machine__cache_build_ids(pos, debugdir);
383 ret |= __dsos__cache_build_ids(&pos->user_dsos, debugdir);
384 } 399 }
385 return ret ? -1 : 0; 400 return ret ? -1 : 0;
386} 401}
387 402
388static bool dsos__read_build_ids(struct perf_header *self, bool with_hits) 403static bool machine__read_build_ids(struct machine *self, bool with_hits)
404{
405 bool ret = __dsos__read_build_ids(&self->kernel_dsos, with_hits);
406 ret |= __dsos__read_build_ids(&self->user_dsos, with_hits);
407 return ret;
408}
409
410static bool perf_session__read_build_ids(struct perf_session *self, bool with_hits)
389{ 411{
390 bool ret = false;
391 struct perf_session *session = container_of(self,
392 struct perf_session, header);
393 struct rb_node *nd; 412 struct rb_node *nd;
413 bool ret = machine__read_build_ids(&self->host_machine, with_hits);
394 414
395 for (nd = rb_first(&session->machines); nd; nd = rb_next(nd)) { 415 for (nd = rb_first(&self->machines); nd; nd = rb_next(nd)) {
396 struct machine *pos = rb_entry(nd, struct machine, rb_node); 416 struct machine *pos = rb_entry(nd, struct machine, rb_node);
397 ret |= __dsos__read_build_ids(&pos->kernel_dsos, with_hits); 417 ret |= machine__read_build_ids(pos, with_hits);
398 ret |= __dsos__read_build_ids(&pos->user_dsos, with_hits);
399 } 418 }
400 419
401 return ret; 420 return ret;
@@ -404,12 +423,14 @@ static bool dsos__read_build_ids(struct perf_header *self, bool with_hits)
404static int perf_header__adds_write(struct perf_header *self, int fd) 423static int perf_header__adds_write(struct perf_header *self, int fd)
405{ 424{
406 int nr_sections; 425 int nr_sections;
426 struct perf_session *session;
407 struct perf_file_section *feat_sec; 427 struct perf_file_section *feat_sec;
408 int sec_size; 428 int sec_size;
409 u64 sec_start; 429 u64 sec_start;
410 int idx = 0, err; 430 int idx = 0, err;
411 431
412 if (dsos__read_build_ids(self, true)) 432 session = container_of(self, struct perf_session, header);
433 if (perf_session__read_build_ids(session, true))
413 perf_header__set_feat(self, HEADER_BUILD_ID); 434 perf_header__set_feat(self, HEADER_BUILD_ID);
414 435
415 nr_sections = bitmap_weight(self->adds_features, HEADER_FEAT_BITS); 436 nr_sections = bitmap_weight(self->adds_features, HEADER_FEAT_BITS);
@@ -450,7 +471,7 @@ static int perf_header__adds_write(struct perf_header *self, int fd)
450 } 471 }
451 buildid_sec->size = lseek(fd, 0, SEEK_CUR) - 472 buildid_sec->size = lseek(fd, 0, SEEK_CUR) -
452 buildid_sec->offset; 473 buildid_sec->offset;
453 dsos__cache_build_ids(self); 474 perf_session__cache_build_ids(session);
454 } 475 }
455 476
456 lseek(fd, sec_start, SEEK_SET); 477 lseek(fd, sec_start, SEEK_SET);
@@ -490,7 +511,6 @@ int perf_header__write(struct perf_header *self, int fd, bool at_exit)
490 511
491 lseek(fd, sizeof(f_header), SEEK_SET); 512 lseek(fd, sizeof(f_header), SEEK_SET);
492 513
493
494 for (i = 0; i < self->attrs; i++) { 514 for (i = 0; i < self->attrs; i++) {
495 attr = self->attr[i]; 515 attr = self->attr[i];
496 516
diff --git a/tools/perf/util/help.c b/tools/perf/util/help.c
index fbb00978b2e2..6f2975a00358 100644
--- a/tools/perf/util/help.c
+++ b/tools/perf/util/help.c
@@ -4,28 +4,6 @@
4#include "levenshtein.h" 4#include "levenshtein.h"
5#include "help.h" 5#include "help.h"
6 6
7/* most GUI terminals set COLUMNS (although some don't export it) */
8static int term_columns(void)
9{
10 char *col_string = getenv("COLUMNS");
11 int n_cols;
12
13 if (col_string && (n_cols = atoi(col_string)) > 0)
14 return n_cols;
15
16#ifdef TIOCGWINSZ
17 {
18 struct winsize ws;
19 if (!ioctl(1, TIOCGWINSZ, &ws)) {
20 if (ws.ws_col)
21 return ws.ws_col;
22 }
23 }
24#endif
25
26 return 80;
27}
28
29void add_cmdname(struct cmdnames *cmds, const char *name, size_t len) 7void add_cmdname(struct cmdnames *cmds, const char *name, size_t len)
30{ 8{
31 struct cmdname *ent = malloc(sizeof(*ent) + len + 1); 9 struct cmdname *ent = malloc(sizeof(*ent) + len + 1);
@@ -96,9 +74,13 @@ static void pretty_print_string_list(struct cmdnames *cmds, int longest)
96{ 74{
97 int cols = 1, rows; 75 int cols = 1, rows;
98 int space = longest + 1; /* min 1 SP between words */ 76 int space = longest + 1; /* min 1 SP between words */
99 int max_cols = term_columns() - 1; /* don't print *on* the edge */ 77 struct winsize win;
78 int max_cols;
100 int i, j; 79 int i, j;
101 80
81 get_term_dimensions(&win);
82 max_cols = win.ws_col - 1; /* don't print *on* the edge */
83
102 if (space < max_cols) 84 if (space < max_cols)
103 cols = max_cols / space; 85 cols = max_cols / space;
104 rows = (cmds->cnt + cols - 1) / cols; 86 rows = (cmds->cnt + cols - 1) / cols;
@@ -324,7 +306,7 @@ const char *help_unknown_cmd(const char *cmd)
324 306
325 main_cmds.names[0] = NULL; 307 main_cmds.names[0] = NULL;
326 clean_cmdnames(&main_cmds); 308 clean_cmdnames(&main_cmds);
327 fprintf(stderr, "WARNING: You called a Git program named '%s', " 309 fprintf(stderr, "WARNING: You called a perf program named '%s', "
328 "which does not exist.\n" 310 "which does not exist.\n"
329 "Continuing under the assumption that you meant '%s'\n", 311 "Continuing under the assumption that you meant '%s'\n",
330 cmd, assumed); 312 cmd, assumed);
diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c
index 9a71c94f057a..cbf7eae2ce09 100644
--- a/tools/perf/util/hist.c
+++ b/tools/perf/util/hist.c
@@ -1,4 +1,5 @@
1#include "util.h" 1#include "util.h"
2#include "build-id.h"
2#include "hist.h" 3#include "hist.h"
3#include "session.h" 4#include "session.h"
4#include "sort.h" 5#include "sort.h"
@@ -988,22 +989,42 @@ int hist_entry__annotate(struct hist_entry *self, struct list_head *head)
988 struct symbol *sym = self->ms.sym; 989 struct symbol *sym = self->ms.sym;
989 struct map *map = self->ms.map; 990 struct map *map = self->ms.map;
990 struct dso *dso = map->dso; 991 struct dso *dso = map->dso;
991 const char *filename = dso->long_name; 992 char *filename = dso__build_id_filename(dso, NULL, 0);
993 bool free_filename = true;
992 char command[PATH_MAX * 2]; 994 char command[PATH_MAX * 2];
993 FILE *file; 995 FILE *file;
996 int err = 0;
994 u64 len; 997 u64 len;
995 998
996 if (!filename) 999 if (filename == NULL) {
997 return -1; 1000 if (dso->has_build_id) {
1001 pr_err("Can't annotate %s: not enough memory\n",
1002 sym->name);
1003 return -ENOMEM;
1004 }
1005 goto fallback;
1006 } else if (readlink(filename, command, sizeof(command)) < 0 ||
1007 strstr(command, "[kernel.kallsyms]") ||
1008 access(filename, R_OK)) {
1009 free(filename);
1010fallback:
1011 /*
1012 * If we don't have build-ids or the build-id file isn't in the
1013 * cache, or is just a kallsyms file, well, lets hope that this
1014 * DSO is the same as when 'perf record' ran.
1015 */
1016 filename = dso->long_name;
1017 free_filename = false;
1018 }
998 1019
999 if (dso->origin == DSO__ORIG_KERNEL) { 1020 if (dso->origin == DSO__ORIG_KERNEL) {
1000 if (dso->annotate_warned) 1021 if (dso->annotate_warned)
1001 return 0; 1022 goto out_free_filename;
1023 err = -ENOENT;
1002 dso->annotate_warned = 1; 1024 dso->annotate_warned = 1;
1003 pr_err("Can't annotate %s: No vmlinux file was found in the " 1025 pr_err("Can't annotate %s: No vmlinux file was found in the "
1004 "path:\n", sym->name); 1026 "path\n", sym->name);
1005 vmlinux_path__fprintf(stderr); 1027 goto out_free_filename;
1006 return -1;
1007 } 1028 }
1008 1029
1009 pr_debug("%s: filename=%s, sym=%s, start=%#Lx, end=%#Lx\n", __func__, 1030 pr_debug("%s: filename=%s, sym=%s, start=%#Lx, end=%#Lx\n", __func__,
@@ -1025,14 +1046,17 @@ int hist_entry__annotate(struct hist_entry *self, struct list_head *head)
1025 1046
1026 file = popen(command, "r"); 1047 file = popen(command, "r");
1027 if (!file) 1048 if (!file)
1028 return -1; 1049 goto out_free_filename;
1029 1050
1030 while (!feof(file)) 1051 while (!feof(file))
1031 if (hist_entry__parse_objdump_line(self, file, head) < 0) 1052 if (hist_entry__parse_objdump_line(self, file, head) < 0)
1032 break; 1053 break;
1033 1054
1034 pclose(file); 1055 pclose(file);
1035 return 0; 1056out_free_filename:
1057 if (free_filename)
1058 free(filename);
1059 return err;
1036} 1060}
1037 1061
1038void hists__inc_nr_events(struct hists *self, u32 type) 1062void hists__inc_nr_events(struct hists *self, u32 type)
diff --git a/tools/perf/util/hist.h b/tools/perf/util/hist.h
index 6f17dcd8412c..83fa33a7b38b 100644
--- a/tools/perf/util/hist.h
+++ b/tools/perf/util/hist.h
@@ -98,12 +98,32 @@ void hists__filter_by_thread(struct hists *self, const struct thread *thread);
98#ifdef NO_NEWT_SUPPORT 98#ifdef NO_NEWT_SUPPORT
99static inline int hists__browse(struct hists *self __used, 99static inline int hists__browse(struct hists *self __used,
100 const char *helpline __used, 100 const char *helpline __used,
101 const char *input_name __used) 101 const char *ev_name __used)
102{ 102{
103 return 0; 103 return 0;
104} 104}
105
106static inline int hists__tui_browse_tree(struct rb_root *self __used,
107 const char *help __used)
108{
109 return 0;
110}
111
112static inline int hist_entry__tui_annotate(struct hist_entry *self __used)
113{
114 return 0;
115}
116#define KEY_LEFT -1
117#define KEY_RIGHT -2
105#else 118#else
119#include <newt.h>
106int hists__browse(struct hists *self, const char *helpline, 120int hists__browse(struct hists *self, const char *helpline,
107 const char *input_name); 121 const char *ev_name);
122int hist_entry__tui_annotate(struct hist_entry *self);
123
124#define KEY_LEFT NEWT_KEY_LEFT
125#define KEY_RIGHT NEWT_KEY_RIGHT
126
127int hists__tui_browse_tree(struct rb_root *self, const char *help);
108#endif 128#endif
109#endif /* __PERF_HIST_H */ 129#endif /* __PERF_HIST_H */
diff --git a/tools/perf/util/newt.c b/tools/perf/util/newt.c
index ccb7c5bb269e..cf182ca132fe 100644
--- a/tools/perf/util/newt.c
+++ b/tools/perf/util/newt.c
@@ -1,7 +1,15 @@
1#define _GNU_SOURCE 1#define _GNU_SOURCE
2#include <stdio.h> 2#include <stdio.h>
3#undef _GNU_SOURCE 3#undef _GNU_SOURCE
4 4/*
5 * slang versions <= 2.0.6 have a "#if HAVE_LONG_LONG" that breaks
6 * the build if it isn't defined. Use the equivalent one that glibc
7 * has on features.h.
8 */
9#include <features.h>
10#ifndef HAVE_LONG_LONG
11#define HAVE_LONG_LONG __GLIBC_HAVE_LONG_LONG
12#endif
5#include <slang.h> 13#include <slang.h>
6#include <stdlib.h> 14#include <stdlib.h>
7#include <newt.h> 15#include <newt.h>
@@ -227,6 +235,15 @@ static bool dialog_yesno(const char *msg)
227 return newtWinChoice(NULL, yes, no, (char *)msg) == 1; 235 return newtWinChoice(NULL, yes, no, (char *)msg) == 1;
228} 236}
229 237
238static void ui__error_window(const char *fmt, ...)
239{
240 va_list ap;
241
242 va_start(ap, fmt);
243 newtWinMessagev((char *)"Error", (char *)"Ok", (char *)fmt, ap);
244 va_end(ap);
245}
246
230#define HE_COLORSET_TOP 50 247#define HE_COLORSET_TOP 50
231#define HE_COLORSET_MEDIUM 51 248#define HE_COLORSET_MEDIUM 51
232#define HE_COLORSET_NORMAL 52 249#define HE_COLORSET_NORMAL 52
@@ -375,8 +392,11 @@ static int ui_browser__run(struct ui_browser *self, const char *title,
375 newtFormAddHotKey(self->form, NEWT_KEY_DOWN); 392 newtFormAddHotKey(self->form, NEWT_KEY_DOWN);
376 newtFormAddHotKey(self->form, NEWT_KEY_PGUP); 393 newtFormAddHotKey(self->form, NEWT_KEY_PGUP);
377 newtFormAddHotKey(self->form, NEWT_KEY_PGDN); 394 newtFormAddHotKey(self->form, NEWT_KEY_PGDN);
395 newtFormAddHotKey(self->form, ' ');
378 newtFormAddHotKey(self->form, NEWT_KEY_HOME); 396 newtFormAddHotKey(self->form, NEWT_KEY_HOME);
379 newtFormAddHotKey(self->form, NEWT_KEY_END); 397 newtFormAddHotKey(self->form, NEWT_KEY_END);
398 newtFormAddHotKey(self->form, NEWT_KEY_TAB);
399 newtFormAddHotKey(self->form, NEWT_KEY_RIGHT);
380 400
381 if (ui_browser__refresh_entries(self) < 0) 401 if (ui_browser__refresh_entries(self) < 0)
382 return -1; 402 return -1;
@@ -389,6 +409,8 @@ static int ui_browser__run(struct ui_browser *self, const char *title,
389 409
390 if (es->reason != NEWT_EXIT_HOTKEY) 410 if (es->reason != NEWT_EXIT_HOTKEY)
391 break; 411 break;
412 if (is_exit_key(es->u.key))
413 return es->u.key;
392 switch (es->u.key) { 414 switch (es->u.key) {
393 case NEWT_KEY_DOWN: 415 case NEWT_KEY_DOWN:
394 if (self->index == self->nr_entries - 1) 416 if (self->index == self->nr_entries - 1)
@@ -411,6 +433,7 @@ static int ui_browser__run(struct ui_browser *self, const char *title,
411 } 433 }
412 break; 434 break;
413 case NEWT_KEY_PGDN: 435 case NEWT_KEY_PGDN:
436 case ' ':
414 if (self->first_visible_entry_idx + self->height > self->nr_entries - 1) 437 if (self->first_visible_entry_idx + self->height > self->nr_entries - 1)
415 break; 438 break;
416 439
@@ -461,12 +484,10 @@ static int ui_browser__run(struct ui_browser *self, const char *title,
461 } 484 }
462 } 485 }
463 break; 486 break;
464 case NEWT_KEY_ESCAPE: 487 case NEWT_KEY_RIGHT:
465 case NEWT_KEY_LEFT: 488 case NEWT_KEY_LEFT:
466 case CTRL('c'): 489 case NEWT_KEY_TAB:
467 case 'Q': 490 return es->u.key;
468 case 'q':
469 return 0;
470 default: 491 default:
471 continue; 492 continue;
472 } 493 }
@@ -658,18 +679,24 @@ static size_t hist_entry__append_browser(struct hist_entry *self,
658 return ret; 679 return ret;
659} 680}
660 681
661static void hist_entry__annotate_browser(struct hist_entry *self) 682int hist_entry__tui_annotate(struct hist_entry *self)
662{ 683{
663 struct ui_browser browser; 684 struct ui_browser browser;
664 struct newtExitStruct es; 685 struct newtExitStruct es;
665 struct objdump_line *pos, *n; 686 struct objdump_line *pos, *n;
666 LIST_HEAD(head); 687 LIST_HEAD(head);
688 int ret;
667 689
668 if (self->ms.sym == NULL) 690 if (self->ms.sym == NULL)
669 return; 691 return -1;
670 692
671 if (hist_entry__annotate(self, &head) < 0) 693 if (self->ms.map->dso->annotate_warned)
672 return; 694 return -1;
695
696 if (hist_entry__annotate(self, &head) < 0) {
697 ui__error_window(browser__last_msg);
698 return -1;
699 }
673 700
674 ui_helpline__push("Press <- or ESC to exit"); 701 ui_helpline__push("Press <- or ESC to exit");
675 702
@@ -684,7 +711,7 @@ static void hist_entry__annotate_browser(struct hist_entry *self)
684 } 711 }
685 712
686 browser.width += 18; /* Percentage */ 713 browser.width += 18; /* Percentage */
687 ui_browser__run(&browser, self->ms.sym->name, &es); 714 ret = ui_browser__run(&browser, self->ms.sym->name, &es);
688 newtFormDestroy(browser.form); 715 newtFormDestroy(browser.form);
689 newtPopWindow(); 716 newtPopWindow();
690 list_for_each_entry_safe(pos, n, &head, node) { 717 list_for_each_entry_safe(pos, n, &head, node) {
@@ -692,6 +719,7 @@ static void hist_entry__annotate_browser(struct hist_entry *self)
692 objdump_line__free(pos); 719 objdump_line__free(pos);
693 } 720 }
694 ui_helpline__pop(); 721 ui_helpline__pop();
722 return ret;
695} 723}
696 724
697static const void *newt__symbol_tree_get_current(newtComponent self) 725static const void *newt__symbol_tree_get_current(newtComponent self)
@@ -814,6 +842,8 @@ static int hist_browser__populate(struct hist_browser *self, struct hists *hists
814 newtFormAddHotKey(self->form, 'h'); 842 newtFormAddHotKey(self->form, 'h');
815 newtFormAddHotKey(self->form, NEWT_KEY_F1); 843 newtFormAddHotKey(self->form, NEWT_KEY_F1);
816 newtFormAddHotKey(self->form, NEWT_KEY_RIGHT); 844 newtFormAddHotKey(self->form, NEWT_KEY_RIGHT);
845 newtFormAddHotKey(self->form, NEWT_KEY_TAB);
846 newtFormAddHotKey(self->form, NEWT_KEY_UNTAB);
817 newtFormAddComponents(self->form, self->tree, NULL); 847 newtFormAddComponents(self->form, self->tree, NULL);
818 self->selection = newt__symbol_tree_get_current(self->tree); 848 self->selection = newt__symbol_tree_get_current(self->tree);
819 849
@@ -845,7 +875,7 @@ static struct thread *hist_browser__selected_thread(struct hist_browser *self)
845 return he ? he->thread : NULL; 875 return he ? he->thread : NULL;
846} 876}
847 877
848static int hist_browser__title(char *bf, size_t size, const char *input_name, 878static int hist_browser__title(char *bf, size_t size, const char *ev_name,
849 const struct dso *dso, const struct thread *thread) 879 const struct dso *dso, const struct thread *thread)
850{ 880{
851 int printed = 0; 881 int printed = 0;
@@ -859,18 +889,18 @@ static int hist_browser__title(char *bf, size_t size, const char *input_name,
859 printed += snprintf(bf + printed, size - printed, 889 printed += snprintf(bf + printed, size - printed,
860 "%sDSO: %s", thread ? " " : "", 890 "%sDSO: %s", thread ? " " : "",
861 dso->short_name); 891 dso->short_name);
862 return printed ?: snprintf(bf, size, "Report: %s", input_name); 892 return printed ?: snprintf(bf, size, "Event: %s", ev_name);
863} 893}
864 894
865int hists__browse(struct hists *self, const char *helpline, const char *input_name) 895int hists__browse(struct hists *self, const char *helpline, const char *ev_name)
866{ 896{
867 struct hist_browser *browser = hist_browser__new(); 897 struct hist_browser *browser = hist_browser__new();
868 struct pstack *fstack = pstack__new(2); 898 struct pstack *fstack;
869 const struct thread *thread_filter = NULL; 899 const struct thread *thread_filter = NULL;
870 const struct dso *dso_filter = NULL; 900 const struct dso *dso_filter = NULL;
871 struct newtExitStruct es; 901 struct newtExitStruct es;
872 char msg[160]; 902 char msg[160];
873 int err = -1; 903 int key = -1;
874 904
875 if (browser == NULL) 905 if (browser == NULL)
876 return -1; 906 return -1;
@@ -881,7 +911,7 @@ int hists__browse(struct hists *self, const char *helpline, const char *input_na
881 911
882 ui_helpline__push(helpline); 912 ui_helpline__push(helpline);
883 913
884 hist_browser__title(msg, sizeof(msg), input_name, 914 hist_browser__title(msg, sizeof(msg), ev_name,
885 dso_filter, thread_filter); 915 dso_filter, thread_filter);
886 if (hist_browser__populate(browser, self, msg) < 0) 916 if (hist_browser__populate(browser, self, msg) < 0)
887 goto out_free_stack; 917 goto out_free_stack;
@@ -899,11 +929,27 @@ int hists__browse(struct hists *self, const char *helpline, const char *input_na
899 dso = browser->selection->map ? browser->selection->map->dso : NULL; 929 dso = browser->selection->map ? browser->selection->map->dso : NULL;
900 930
901 if (es.reason == NEWT_EXIT_HOTKEY) { 931 if (es.reason == NEWT_EXIT_HOTKEY) {
902 if (es.u.key == NEWT_KEY_F1) 932 key = es.u.key;
933
934 switch (key) {
935 case NEWT_KEY_F1:
903 goto do_help; 936 goto do_help;
937 case NEWT_KEY_TAB:
938 case NEWT_KEY_UNTAB:
939 /*
940 * Exit the browser, let hists__browser_tree
941 * go to the next or previous
942 */
943 goto out_free_stack;
944 default:;
945 }
904 946
905 switch (toupper(es.u.key)) { 947 key = toupper(key);
948 switch (key) {
906 case 'A': 949 case 'A':
950 if (browser->selection->map == NULL &&
951 browser->selection->map->dso->annotate_warned)
952 continue;
907 goto do_annotate; 953 goto do_annotate;
908 case 'D': 954 case 'D':
909 goto zoom_dso; 955 goto zoom_dso;
@@ -922,14 +968,14 @@ do_help:
922 continue; 968 continue;
923 default:; 969 default:;
924 } 970 }
925 if (toupper(es.u.key) == 'Q' || 971 if (is_exit_key(key)) {
926 es.u.key == CTRL('c')) 972 if (key == NEWT_KEY_ESCAPE) {
927 break; 973 if (dialog_yesno("Do you really want to exit?"))
928 if (es.u.key == NEWT_KEY_ESCAPE) { 974 break;
929 if (dialog_yesno("Do you really want to exit?")) 975 else
976 continue;
977 } else
930 break; 978 break;
931 else
932 continue;
933 } 979 }
934 980
935 if (es.u.key == NEWT_KEY_LEFT) { 981 if (es.u.key == NEWT_KEY_LEFT) {
@@ -947,6 +993,7 @@ do_help:
947 } 993 }
948 994
949 if (browser->selection->sym != NULL && 995 if (browser->selection->sym != NULL &&
996 !browser->selection->map->dso->annotate_warned &&
950 asprintf(&options[nr_options], "Annotate %s", 997 asprintf(&options[nr_options], "Annotate %s",
951 browser->selection->sym->name) > 0) 998 browser->selection->sym->name) > 0)
952 annotate = nr_options++; 999 annotate = nr_options++;
@@ -981,6 +1028,7 @@ do_help:
981 struct hist_entry *he; 1028 struct hist_entry *he;
982do_annotate: 1029do_annotate:
983 if (browser->selection->map->dso->origin == DSO__ORIG_KERNEL) { 1030 if (browser->selection->map->dso->origin == DSO__ORIG_KERNEL) {
1031 browser->selection->map->dso->annotate_warned = 1;
984 ui_helpline__puts("No vmlinux file found, can't " 1032 ui_helpline__puts("No vmlinux file found, can't "
985 "annotate with just a " 1033 "annotate with just a "
986 "kallsyms file"); 1034 "kallsyms file");
@@ -991,7 +1039,7 @@ do_annotate:
991 if (he == NULL) 1039 if (he == NULL)
992 continue; 1040 continue;
993 1041
994 hist_entry__annotate_browser(he); 1042 hist_entry__tui_annotate(he);
995 } else if (choice == zoom_dso) { 1043 } else if (choice == zoom_dso) {
996zoom_dso: 1044zoom_dso:
997 if (dso_filter) { 1045 if (dso_filter) {
@@ -1008,7 +1056,7 @@ zoom_out_dso:
1008 pstack__push(fstack, &dso_filter); 1056 pstack__push(fstack, &dso_filter);
1009 } 1057 }
1010 hists__filter_by_dso(self, dso_filter); 1058 hists__filter_by_dso(self, dso_filter);
1011 hist_browser__title(msg, sizeof(msg), input_name, 1059 hist_browser__title(msg, sizeof(msg), ev_name,
1012 dso_filter, thread_filter); 1060 dso_filter, thread_filter);
1013 if (hist_browser__populate(browser, self, msg) < 0) 1061 if (hist_browser__populate(browser, self, msg) < 0)
1014 goto out; 1062 goto out;
@@ -1027,18 +1075,49 @@ zoom_out_thread:
1027 pstack__push(fstack, &thread_filter); 1075 pstack__push(fstack, &thread_filter);
1028 } 1076 }
1029 hists__filter_by_thread(self, thread_filter); 1077 hists__filter_by_thread(self, thread_filter);
1030 hist_browser__title(msg, sizeof(msg), input_name, 1078 hist_browser__title(msg, sizeof(msg), ev_name,
1031 dso_filter, thread_filter); 1079 dso_filter, thread_filter);
1032 if (hist_browser__populate(browser, self, msg) < 0) 1080 if (hist_browser__populate(browser, self, msg) < 0)
1033 goto out; 1081 goto out;
1034 } 1082 }
1035 } 1083 }
1036 err = 0;
1037out_free_stack: 1084out_free_stack:
1038 pstack__delete(fstack); 1085 pstack__delete(fstack);
1039out: 1086out:
1040 hist_browser__delete(browser); 1087 hist_browser__delete(browser);
1041 return err; 1088 return key;
1089}
1090
1091int hists__tui_browse_tree(struct rb_root *self, const char *help)
1092{
1093 struct rb_node *first = rb_first(self), *nd = first, *next;
1094 int key = 0;
1095
1096 while (nd) {
1097 struct hists *hists = rb_entry(nd, struct hists, rb_node);
1098 const char *ev_name = __event_name(hists->type, hists->config);
1099
1100 key = hists__browse(hists, help, ev_name);
1101
1102 if (is_exit_key(key))
1103 break;
1104
1105 switch (key) {
1106 case NEWT_KEY_TAB:
1107 next = rb_next(nd);
1108 if (next)
1109 nd = next;
1110 break;
1111 case NEWT_KEY_UNTAB:
1112 if (nd == first)
1113 continue;
1114 nd = rb_prev(nd);
1115 default:
1116 break;
1117 }
1118 }
1119
1120 return key;
1042} 1121}
1043 1122
1044static struct newtPercentTreeColors { 1123static struct newtPercentTreeColors {
@@ -1058,10 +1137,14 @@ static struct newtPercentTreeColors {
1058void setup_browser(void) 1137void setup_browser(void)
1059{ 1138{
1060 struct newtPercentTreeColors *c = &defaultPercentTreeColors; 1139 struct newtPercentTreeColors *c = &defaultPercentTreeColors;
1061 if (!isatty(1)) 1140
1141 if (!isatty(1) || !use_browser || dump_trace) {
1142 use_browser = 0;
1143 setup_pager();
1062 return; 1144 return;
1145 }
1063 1146
1064 use_browser = true; 1147 use_browser = 1;
1065 newtInit(); 1148 newtInit();
1066 newtCls(); 1149 newtCls();
1067 ui_helpline__puts(" "); 1150 ui_helpline__puts(" ");
@@ -1074,7 +1157,7 @@ void setup_browser(void)
1074 1157
1075void exit_browser(bool wait_for_ok) 1158void exit_browser(bool wait_for_ok)
1076{ 1159{
1077 if (use_browser) { 1160 if (use_browser > 0) {
1078 if (wait_for_ok) { 1161 if (wait_for_ok) {
1079 char title[] = "Fatal Error", ok[] = "Ok"; 1162 char title[] = "Fatal Error", ok[] = "Ok";
1080 newtWinMessage(title, ok, browser__last_msg); 1163 newtWinMessage(title, ok, browser__last_msg);
diff --git a/tools/perf/util/path.c b/tools/perf/util/path.c
index fd1f2faaade4..58a470d036dd 100644
--- a/tools/perf/util/path.c
+++ b/tools/perf/util/path.c
@@ -54,21 +54,6 @@ static char *cleanup_path(char *path)
54 return path; 54 return path;
55} 55}
56 56
57char *mksnpath(char *buf, size_t n, const char *fmt, ...)
58{
59 va_list args;
60 unsigned len;
61
62 va_start(args, fmt);
63 len = vsnprintf(buf, n, fmt, args);
64 va_end(args);
65 if (len >= n) {
66 strlcpy(buf, bad_path, n);
67 return buf;
68 }
69 return cleanup_path(buf);
70}
71
72static char *perf_vsnpath(char *buf, size_t n, const char *fmt, va_list args) 57static char *perf_vsnpath(char *buf, size_t n, const char *fmt, va_list args)
73{ 58{
74 const char *perf_dir = get_perf_dir(); 59 const char *perf_dir = get_perf_dir();
@@ -89,15 +74,6 @@ bad:
89 return buf; 74 return buf;
90} 75}
91 76
92char *perf_snpath(char *buf, size_t n, const char *fmt, ...)
93{
94 va_list args;
95 va_start(args, fmt);
96 (void)perf_vsnpath(buf, n, fmt, args);
97 va_end(args);
98 return buf;
99}
100
101char *perf_pathdup(const char *fmt, ...) 77char *perf_pathdup(const char *fmt, ...)
102{ 78{
103 char path[PATH_MAX]; 79 char path[PATH_MAX];
@@ -143,184 +119,6 @@ char *perf_path(const char *fmt, ...)
143 return cleanup_path(pathname); 119 return cleanup_path(pathname);
144} 120}
145 121
146
147/* perf_mkstemp() - create tmp file honoring TMPDIR variable */
148int perf_mkstemp(char *path, size_t len, const char *template)
149{
150 const char *tmp;
151 size_t n;
152
153 tmp = getenv("TMPDIR");
154 if (!tmp)
155 tmp = "/tmp";
156 n = snprintf(path, len, "%s/%s", tmp, template);
157 if (len <= n) {
158 errno = ENAMETOOLONG;
159 return -1;
160 }
161 return mkstemp(path);
162}
163
164
165const char *make_relative_path(const char *abs_path, const char *base)
166{
167 static char buf[PATH_MAX + 1];
168 int baselen;
169
170 if (!base)
171 return abs_path;
172
173 baselen = strlen(base);
174 if (prefixcmp(abs_path, base))
175 return abs_path;
176 if (abs_path[baselen] == '/')
177 baselen++;
178 else if (base[baselen - 1] != '/')
179 return abs_path;
180
181 strcpy(buf, abs_path + baselen);
182
183 return buf;
184}
185
186/*
187 * It is okay if dst == src, but they should not overlap otherwise.
188 *
189 * Performs the following normalizations on src, storing the result in dst:
190 * - Ensures that components are separated by '/' (Windows only)
191 * - Squashes sequences of '/'.
192 * - Removes "." components.
193 * - Removes ".." components, and the components the precede them.
194 * Returns failure (non-zero) if a ".." component appears as first path
195 * component anytime during the normalization. Otherwise, returns success (0).
196 *
197 * Note that this function is purely textual. It does not follow symlinks,
198 * verify the existence of the path, or make any system calls.
199 */
200int normalize_path_copy(char *dst, const char *src)
201{
202 char *dst0;
203
204 if (has_dos_drive_prefix(src)) {
205 *dst++ = *src++;
206 *dst++ = *src++;
207 }
208 dst0 = dst;
209
210 if (is_dir_sep(*src)) {
211 *dst++ = '/';
212 while (is_dir_sep(*src))
213 src++;
214 }
215
216 for (;;) {
217 char c = *src;
218
219 /*
220 * A path component that begins with . could be
221 * special:
222 * (1) "." and ends -- ignore and terminate.
223 * (2) "./" -- ignore them, eat slash and continue.
224 * (3) ".." and ends -- strip one and terminate.
225 * (4) "../" -- strip one, eat slash and continue.
226 */
227 if (c == '.') {
228 if (!src[1]) {
229 /* (1) */
230 src++;
231 } else if (is_dir_sep(src[1])) {
232 /* (2) */
233 src += 2;
234 while (is_dir_sep(*src))
235 src++;
236 continue;
237 } else if (src[1] == '.') {
238 if (!src[2]) {
239 /* (3) */
240 src += 2;
241 goto up_one;
242 } else if (is_dir_sep(src[2])) {
243 /* (4) */
244 src += 3;
245 while (is_dir_sep(*src))
246 src++;
247 goto up_one;
248 }
249 }
250 }
251
252 /* copy up to the next '/', and eat all '/' */
253 while ((c = *src++) != '\0' && !is_dir_sep(c))
254 *dst++ = c;
255 if (is_dir_sep(c)) {
256 *dst++ = '/';
257 while (is_dir_sep(c))
258 c = *src++;
259 src--;
260 } else if (!c)
261 break;
262 continue;
263
264 up_one:
265 /*
266 * dst0..dst is prefix portion, and dst[-1] is '/';
267 * go up one level.
268 */
269 dst--; /* go to trailing '/' */
270 if (dst <= dst0)
271 return -1;
272 /* Windows: dst[-1] cannot be backslash anymore */
273 while (dst0 < dst && dst[-1] != '/')
274 dst--;
275 }
276 *dst = '\0';
277 return 0;
278}
279
280/*
281 * path = Canonical absolute path
282 * prefix_list = Colon-separated list of absolute paths
283 *
284 * Determines, for each path in prefix_list, whether the "prefix" really
285 * is an ancestor directory of path. Returns the length of the longest
286 * ancestor directory, excluding any trailing slashes, or -1 if no prefix
287 * is an ancestor. (Note that this means 0 is returned if prefix_list is
288 * "/".) "/foo" is not considered an ancestor of "/foobar". Directories
289 * are not considered to be their own ancestors. path must be in a
290 * canonical form: empty components, or "." or ".." components are not
291 * allowed. prefix_list may be null, which is like "".
292 */
293int longest_ancestor_length(const char *path, const char *prefix_list)
294{
295 char buf[PATH_MAX+1];
296 const char *ceil, *colon;
297 int len, max_len = -1;
298
299 if (prefix_list == NULL || !strcmp(path, "/"))
300 return -1;
301
302 for (colon = ceil = prefix_list; *colon; ceil = colon+1) {
303 for (colon = ceil; *colon && *colon != PATH_SEP; colon++);
304 len = colon - ceil;
305 if (len == 0 || len > PATH_MAX || !is_absolute_path(ceil))
306 continue;
307 strlcpy(buf, ceil, len+1);
308 if (normalize_path_copy(buf, buf) < 0)
309 continue;
310 len = strlen(buf);
311 if (len > 0 && buf[len-1] == '/')
312 buf[--len] = '\0';
313
314 if (!strncmp(path, buf, len) &&
315 path[len] == '/' &&
316 len > max_len) {
317 max_len = len;
318 }
319 }
320
321 return max_len;
322}
323
324/* strip arbitrary amount of directory separators at end of path */ 122/* strip arbitrary amount of directory separators at end of path */
325static inline int chomp_trailing_dir_sep(const char *path, int len) 123static inline int chomp_trailing_dir_sep(const char *path, int len)
326{ 124{
@@ -354,5 +152,5 @@ char *strip_path_suffix(const char *path, const char *suffix)
354 152
355 if (path_len && !is_dir_sep(path[path_len - 1])) 153 if (path_len && !is_dir_sep(path[path_len - 1]))
356 return NULL; 154 return NULL;
357 return xstrndup(path, chomp_trailing_dir_sep(path, path_len)); 155 return strndup(path, chomp_trailing_dir_sep(path, path_len));
358} 156}
diff --git a/tools/perf/util/probe-finder.c b/tools/perf/util/probe-finder.c
index 562b1443e785..d964cb199c67 100644
--- a/tools/perf/util/probe-finder.c
+++ b/tools/perf/util/probe-finder.c
@@ -668,6 +668,7 @@ static int convert_probe_point(Dwarf_Die *sp_die, struct probe_finder *pf)
668 ret = dwarf_getlocation_addr(&fb_attr, pf->addr, &pf->fb_ops, &nops, 1); 668 ret = dwarf_getlocation_addr(&fb_attr, pf->addr, &pf->fb_ops, &nops, 1);
669 if (ret <= 0 || nops == 0) { 669 if (ret <= 0 || nops == 0) {
670 pf->fb_ops = NULL; 670 pf->fb_ops = NULL;
671#if _ELFUTILS_PREREQ(0, 142)
671 } else if (nops == 1 && pf->fb_ops[0].atom == DW_OP_call_frame_cfa && 672 } else if (nops == 1 && pf->fb_ops[0].atom == DW_OP_call_frame_cfa &&
672 pf->cfi != NULL) { 673 pf->cfi != NULL) {
673 Dwarf_Frame *frame; 674 Dwarf_Frame *frame;
@@ -677,6 +678,7 @@ static int convert_probe_point(Dwarf_Die *sp_die, struct probe_finder *pf)
677 (uintmax_t)pf->addr); 678 (uintmax_t)pf->addr);
678 return -ENOENT; 679 return -ENOENT;
679 } 680 }
681#endif
680 } 682 }
681 683
682 /* Find each argument */ 684 /* Find each argument */
@@ -741,32 +743,36 @@ static int find_lazy_match_lines(struct list_head *head,
741 const char *fname, const char *pat) 743 const char *fname, const char *pat)
742{ 744{
743 char *fbuf, *p1, *p2; 745 char *fbuf, *p1, *p2;
744 int fd, ret, line, nlines = 0; 746 int fd, line, nlines = -1;
745 struct stat st; 747 struct stat st;
746 748
747 fd = open(fname, O_RDONLY); 749 fd = open(fname, O_RDONLY);
748 if (fd < 0) { 750 if (fd < 0) {
749 pr_warning("Failed to open %s: %s\n", fname, strerror(-fd)); 751 pr_warning("Failed to open %s: %s\n", fname, strerror(-fd));
750 return fd; 752 return -errno;
751 } 753 }
752 754
753 ret = fstat(fd, &st); 755 if (fstat(fd, &st) < 0) {
754 if (ret < 0) {
755 pr_warning("Failed to get the size of %s: %s\n", 756 pr_warning("Failed to get the size of %s: %s\n",
756 fname, strerror(errno)); 757 fname, strerror(errno));
757 return ret; 758 nlines = -errno;
759 goto out_close;
758 } 760 }
759 fbuf = xmalloc(st.st_size + 2); 761
760 ret = read(fd, fbuf, st.st_size); 762 nlines = -ENOMEM;
761 if (ret < 0) { 763 fbuf = malloc(st.st_size + 2);
764 if (fbuf == NULL)
765 goto out_close;
766 if (read(fd, fbuf, st.st_size) < 0) {
762 pr_warning("Failed to read %s: %s\n", fname, strerror(errno)); 767 pr_warning("Failed to read %s: %s\n", fname, strerror(errno));
763 return ret; 768 nlines = -errno;
769 goto out_free_fbuf;
764 } 770 }
765 close(fd);
766 fbuf[st.st_size] = '\n'; /* Dummy line */ 771 fbuf[st.st_size] = '\n'; /* Dummy line */
767 fbuf[st.st_size + 1] = '\0'; 772 fbuf[st.st_size + 1] = '\0';
768 p1 = fbuf; 773 p1 = fbuf;
769 line = 1; 774 line = 1;
775 nlines = 0;
770 while ((p2 = strchr(p1, '\n')) != NULL) { 776 while ((p2 = strchr(p1, '\n')) != NULL) {
771 *p2 = '\0'; 777 *p2 = '\0';
772 if (strlazymatch(p1, pat)) { 778 if (strlazymatch(p1, pat)) {
@@ -776,7 +782,10 @@ static int find_lazy_match_lines(struct list_head *head,
776 line++; 782 line++;
777 p1 = p2 + 1; 783 p1 = p2 + 1;
778 } 784 }
785out_free_fbuf:
779 free(fbuf); 786 free(fbuf);
787out_close:
788 close(fd);
780 return nlines; 789 return nlines;
781} 790}
782 791
@@ -953,11 +962,15 @@ int find_kprobe_trace_events(int fd, struct perf_probe_event *pev,
953 if (!dbg) { 962 if (!dbg) {
954 pr_warning("No dwarf info found in the vmlinux - " 963 pr_warning("No dwarf info found in the vmlinux - "
955 "please rebuild with CONFIG_DEBUG_INFO=y.\n"); 964 "please rebuild with CONFIG_DEBUG_INFO=y.\n");
965 free(pf.tevs);
966 *tevs = NULL;
956 return -EBADF; 967 return -EBADF;
957 } 968 }
958 969
970#if _ELFUTILS_PREREQ(0, 142)
959 /* Get the call frame information from this dwarf */ 971 /* Get the call frame information from this dwarf */
960 pf.cfi = dwarf_getcfi(dbg); 972 pf.cfi = dwarf_getcfi(dbg);
973#endif
961 974
962 off = 0; 975 off = 0;
963 line_list__init(&pf.lcache); 976 line_list__init(&pf.lcache);
diff --git a/tools/perf/util/probe-finder.h b/tools/perf/util/probe-finder.h
index 66f1980e3855..e1f61dcd18ff 100644
--- a/tools/perf/util/probe-finder.h
+++ b/tools/perf/util/probe-finder.h
@@ -29,6 +29,7 @@ extern int find_line_range(int fd, struct line_range *lr);
29 29
30#include <dwarf.h> 30#include <dwarf.h>
31#include <libdw.h> 31#include <libdw.h>
32#include <version.h>
32 33
33struct probe_finder { 34struct probe_finder {
34 struct perf_probe_event *pev; /* Target probe event */ 35 struct perf_probe_event *pev; /* Target probe event */
@@ -44,7 +45,9 @@ struct probe_finder {
44 struct list_head lcache; /* Line cache for lazy match */ 45 struct list_head lcache; /* Line cache for lazy match */
45 46
46 /* For variable searching */ 47 /* For variable searching */
48#if _ELFUTILS_PREREQ(0, 142)
47 Dwarf_CFI *cfi; /* Call Frame Information */ 49 Dwarf_CFI *cfi; /* Call Frame Information */
50#endif
48 Dwarf_Op *fb_ops; /* Frame base attribute */ 51 Dwarf_Op *fb_ops; /* Frame base attribute */
49 struct perf_probe_arg *pvar; /* Current target variable */ 52 struct perf_probe_arg *pvar; /* Current target variable */
50 struct kprobe_trace_arg *tvar; /* Current result variable */ 53 struct kprobe_trace_arg *tvar; /* Current result variable */
diff --git a/tools/perf/util/quote.c b/tools/perf/util/quote.c
index 2726fe40eb5d..01f03242b86a 100644
--- a/tools/perf/util/quote.c
+++ b/tools/perf/util/quote.c
@@ -1,8 +1,6 @@
1#include "cache.h" 1#include "cache.h"
2#include "quote.h" 2#include "quote.h"
3 3
4int quote_path_fully = 1;
5
6/* Help to copy the thing properly quoted for the shell safety. 4/* Help to copy the thing properly quoted for the shell safety.
7 * any single quote is replaced with '\'', any exclamation point 5 * any single quote is replaced with '\'', any exclamation point
8 * is replaced with '\!', and the whole thing is enclosed in a 6 * is replaced with '\!', and the whole thing is enclosed in a
@@ -19,7 +17,7 @@ static inline int need_bs_quote(char c)
19 return (c == '\'' || c == '!'); 17 return (c == '\'' || c == '!');
20} 18}
21 19
22void sq_quote_buf(struct strbuf *dst, const char *src) 20static void sq_quote_buf(struct strbuf *dst, const char *src)
23{ 21{
24 char *to_free = NULL; 22 char *to_free = NULL;
25 23
@@ -41,23 +39,6 @@ void sq_quote_buf(struct strbuf *dst, const char *src)
41 free(to_free); 39 free(to_free);
42} 40}
43 41
44void sq_quote_print(FILE *stream, const char *src)
45{
46 char c;
47
48 fputc('\'', stream);
49 while ((c = *src++)) {
50 if (need_bs_quote(c)) {
51 fputs("'\\", stream);
52 fputc(c, stream);
53 fputc('\'', stream);
54 } else {
55 fputc(c, stream);
56 }
57 }
58 fputc('\'', stream);
59}
60
61void sq_quote_argv(struct strbuf *dst, const char** argv, size_t maxlen) 42void sq_quote_argv(struct strbuf *dst, const char** argv, size_t maxlen)
62{ 43{
63 int i; 44 int i;
@@ -71,415 +52,3 @@ void sq_quote_argv(struct strbuf *dst, const char** argv, size_t maxlen)
71 die("Too many or long arguments"); 52 die("Too many or long arguments");
72 } 53 }
73} 54}
74
75char *sq_dequote_step(char *arg, char **next)
76{
77 char *dst = arg;
78 char *src = arg;
79 char c;
80
81 if (*src != '\'')
82 return NULL;
83 for (;;) {
84 c = *++src;
85 if (!c)
86 return NULL;
87 if (c != '\'') {
88 *dst++ = c;
89 continue;
90 }
91 /* We stepped out of sq */
92 switch (*++src) {
93 case '\0':
94 *dst = 0;
95 if (next)
96 *next = NULL;
97 return arg;
98 case '\\':
99 c = *++src;
100 if (need_bs_quote(c) && *++src == '\'') {
101 *dst++ = c;
102 continue;
103 }
104 /* Fallthrough */
105 default:
106 if (!next || !isspace(*src))
107 return NULL;
108 do {
109 c = *++src;
110 } while (isspace(c));
111 *dst = 0;
112 *next = src;
113 return arg;
114 }
115 }
116}
117
118char *sq_dequote(char *arg)
119{
120 return sq_dequote_step(arg, NULL);
121}
122
123int sq_dequote_to_argv(char *arg, const char ***argv, int *nr, int *alloc)
124{
125 char *next = arg;
126
127 if (!*arg)
128 return 0;
129 do {
130 char *dequoted = sq_dequote_step(next, &next);
131 if (!dequoted)
132 return -1;
133 ALLOC_GROW(*argv, *nr + 1, *alloc);
134 (*argv)[(*nr)++] = dequoted;
135 } while (next);
136
137 return 0;
138}
139
140/* 1 means: quote as octal
141 * 0 means: quote as octal if (quote_path_fully)
142 * -1 means: never quote
143 * c: quote as "\\c"
144 */
145#define X8(x) x, x, x, x, x, x, x, x
146#define X16(x) X8(x), X8(x)
147static signed char const sq_lookup[256] = {
148 /* 0 1 2 3 4 5 6 7 */
149 /* 0x00 */ 1, 1, 1, 1, 1, 1, 1, 'a',
150 /* 0x08 */ 'b', 't', 'n', 'v', 'f', 'r', 1, 1,
151 /* 0x10 */ X16(1),
152 /* 0x20 */ -1, -1, '"', -1, -1, -1, -1, -1,
153 /* 0x28 */ X16(-1), X16(-1), X16(-1),
154 /* 0x58 */ -1, -1, -1, -1,'\\', -1, -1, -1,
155 /* 0x60 */ X16(-1), X8(-1),
156 /* 0x78 */ -1, -1, -1, -1, -1, -1, -1, 1,
157 /* 0x80 */ /* set to 0 */
158};
159
160static inline int sq_must_quote(char c)
161{
162 return sq_lookup[(unsigned char)c] + quote_path_fully > 0;
163}
164
165/*
166 * Returns the longest prefix not needing a quote up to maxlen if
167 * positive.
168 * This stops at the first \0 because it's marked as a character
169 * needing an escape.
170 */
171static ssize_t next_quote_pos(const char *s, ssize_t maxlen)
172{
173 ssize_t len;
174
175 if (maxlen < 0) {
176 for (len = 0; !sq_must_quote(s[len]); len++);
177 } else {
178 for (len = 0; len < maxlen && !sq_must_quote(s[len]); len++);
179 }
180 return len;
181}
182
183/*
184 * C-style name quoting.
185 *
186 * (1) if sb and fp are both NULL, inspect the input name and counts the
187 * number of bytes that are needed to hold c_style quoted version of name,
188 * counting the double quotes around it but not terminating NUL, and
189 * returns it.
190 * However, if name does not need c_style quoting, it returns 0.
191 *
192 * (2) if sb or fp are not NULL, it emits the c_style quoted version
193 * of name, enclosed with double quotes if asked and needed only.
194 * Return value is the same as in (1).
195 */
196static size_t quote_c_style_counted(const char *name, ssize_t maxlen,
197 struct strbuf *sb, FILE *fp, int no_dq)
198{
199#define EMIT(c) \
200 do { \
201 if (sb) strbuf_addch(sb, (c)); \
202 if (fp) fputc((c), fp); \
203 count++; \
204 } while (0)
205
206#define EMITBUF(s, l) \
207 do { \
208 int __ret; \
209 if (sb) strbuf_add(sb, (s), (l)); \
210 if (fp) __ret = fwrite((s), (l), 1, fp); \
211 count += (l); \
212 } while (0)
213
214 ssize_t len, count = 0;
215 const char *p = name;
216
217 for (;;) {
218 int ch;
219
220 len = next_quote_pos(p, maxlen);
221 if (len == maxlen || !p[len])
222 break;
223
224 if (!no_dq && p == name)
225 EMIT('"');
226
227 EMITBUF(p, len);
228 EMIT('\\');
229 p += len;
230 ch = (unsigned char)*p++;
231 if (sq_lookup[ch] >= ' ') {
232 EMIT(sq_lookup[ch]);
233 } else {
234 EMIT(((ch >> 6) & 03) + '0');
235 EMIT(((ch >> 3) & 07) + '0');
236 EMIT(((ch >> 0) & 07) + '0');
237 }
238 }
239
240 EMITBUF(p, len);
241 if (p == name) /* no ending quote needed */
242 return 0;
243
244 if (!no_dq)
245 EMIT('"');
246 return count;
247}
248
249size_t quote_c_style(const char *name, struct strbuf *sb, FILE *fp, int nodq)
250{
251 return quote_c_style_counted(name, -1, sb, fp, nodq);
252}
253
254void quote_two_c_style(struct strbuf *sb, const char *prefix, const char *path, int nodq)
255{
256 if (quote_c_style(prefix, NULL, NULL, 0) ||
257 quote_c_style(path, NULL, NULL, 0)) {
258 if (!nodq)
259 strbuf_addch(sb, '"');
260 quote_c_style(prefix, sb, NULL, 1);
261 quote_c_style(path, sb, NULL, 1);
262 if (!nodq)
263 strbuf_addch(sb, '"');
264 } else {
265 strbuf_addstr(sb, prefix);
266 strbuf_addstr(sb, path);
267 }
268}
269
270void write_name_quoted(const char *name, FILE *fp, int terminator)
271{
272 if (terminator) {
273 quote_c_style(name, NULL, fp, 0);
274 } else {
275 fputs(name, fp);
276 }
277 fputc(terminator, fp);
278}
279
280void write_name_quotedpfx(const char *pfx, ssize_t pfxlen,
281 const char *name, FILE *fp, int terminator)
282{
283 int needquote = 0;
284
285 if (terminator) {
286 needquote = next_quote_pos(pfx, pfxlen) < pfxlen
287 || name[next_quote_pos(name, -1)];
288 }
289 if (needquote) {
290 fputc('"', fp);
291 quote_c_style_counted(pfx, pfxlen, NULL, fp, 1);
292 quote_c_style(name, NULL, fp, 1);
293 fputc('"', fp);
294 } else {
295 int ret;
296
297 ret = fwrite(pfx, pfxlen, 1, fp);
298 fputs(name, fp);
299 }
300 fputc(terminator, fp);
301}
302
303/* quote path as relative to the given prefix */
304char *quote_path_relative(const char *in, int len,
305 struct strbuf *out, const char *prefix)
306{
307 int needquote;
308
309 if (len < 0)
310 len = strlen(in);
311
312 /* "../" prefix itself does not need quoting, but "in" might. */
313 needquote = (next_quote_pos(in, len) < len);
314 strbuf_setlen(out, 0);
315 strbuf_grow(out, len);
316
317 if (needquote)
318 strbuf_addch(out, '"');
319 if (prefix) {
320 int off = 0;
321 while (off < len && prefix[off] && prefix[off] == in[off])
322 if (prefix[off] == '/') {
323 prefix += off + 1;
324 in += off + 1;
325 len -= off + 1;
326 off = 0;
327 } else
328 off++;
329
330 for (; *prefix; prefix++)
331 if (*prefix == '/')
332 strbuf_addstr(out, "../");
333 }
334
335 quote_c_style_counted (in, len, out, NULL, 1);
336
337 if (needquote)
338 strbuf_addch(out, '"');
339 if (!out->len)
340 strbuf_addstr(out, "./");
341
342 return out->buf;
343}
344
345/*
346 * C-style name unquoting.
347 *
348 * Quoted should point at the opening double quote.
349 * + Returns 0 if it was able to unquote the string properly, and appends the
350 * result in the strbuf `sb'.
351 * + Returns -1 in case of error, and doesn't touch the strbuf. Though note
352 * that this function will allocate memory in the strbuf, so calling
353 * strbuf_release is mandatory whichever result unquote_c_style returns.
354 *
355 * Updates endp pointer to point at one past the ending double quote if given.
356 */
357int unquote_c_style(struct strbuf *sb, const char *quoted, const char **endp)
358{
359 size_t oldlen = sb->len, len;
360 int ch, ac;
361
362 if (*quoted++ != '"')
363 return -1;
364
365 for (;;) {
366 len = strcspn(quoted, "\"\\");
367 strbuf_add(sb, quoted, len);
368 quoted += len;
369
370 switch (*quoted++) {
371 case '"':
372 if (endp)
373 *endp = quoted;
374 return 0;
375 case '\\':
376 break;
377 default:
378 goto error;
379 }
380
381 switch ((ch = *quoted++)) {
382 case 'a': ch = '\a'; break;
383 case 'b': ch = '\b'; break;
384 case 'f': ch = '\f'; break;
385 case 'n': ch = '\n'; break;
386 case 'r': ch = '\r'; break;
387 case 't': ch = '\t'; break;
388 case 'v': ch = '\v'; break;
389
390 case '\\': case '"':
391 break; /* verbatim */
392
393 /* octal values with first digit over 4 overflow */
394 case '0': case '1': case '2': case '3':
395 ac = ((ch - '0') << 6);
396 if ((ch = *quoted++) < '0' || '7' < ch)
397 goto error;
398 ac |= ((ch - '0') << 3);
399 if ((ch = *quoted++) < '0' || '7' < ch)
400 goto error;
401 ac |= (ch - '0');
402 ch = ac;
403 break;
404 default:
405 goto error;
406 }
407 strbuf_addch(sb, ch);
408 }
409
410 error:
411 strbuf_setlen(sb, oldlen);
412 return -1;
413}
414
415/* quoting as a string literal for other languages */
416
417void perl_quote_print(FILE *stream, const char *src)
418{
419 const char sq = '\'';
420 const char bq = '\\';
421 char c;
422
423 fputc(sq, stream);
424 while ((c = *src++)) {
425 if (c == sq || c == bq)
426 fputc(bq, stream);
427 fputc(c, stream);
428 }
429 fputc(sq, stream);
430}
431
432void python_quote_print(FILE *stream, const char *src)
433{
434 const char sq = '\'';
435 const char bq = '\\';
436 const char nl = '\n';
437 char c;
438
439 fputc(sq, stream);
440 while ((c = *src++)) {
441 if (c == nl) {
442 fputc(bq, stream);
443 fputc('n', stream);
444 continue;
445 }
446 if (c == sq || c == bq)
447 fputc(bq, stream);
448 fputc(c, stream);
449 }
450 fputc(sq, stream);
451}
452
453void tcl_quote_print(FILE *stream, const char *src)
454{
455 char c;
456
457 fputc('"', stream);
458 while ((c = *src++)) {
459 switch (c) {
460 case '[': case ']':
461 case '{': case '}':
462 case '$': case '\\': case '"':
463 fputc('\\', stream);
464 default:
465 fputc(c, stream);
466 break;
467 case '\f':
468 fputs("\\f", stream);
469 break;
470 case '\r':
471 fputs("\\r", stream);
472 break;
473 case '\n':
474 fputs("\\n", stream);
475 break;
476 case '\t':
477 fputs("\\t", stream);
478 break;
479 case '\v':
480 fputs("\\v", stream);
481 break;
482 }
483 }
484 fputc('"', stream);
485}
diff --git a/tools/perf/util/quote.h b/tools/perf/util/quote.h
index b6a019733919..172889ea234f 100644
--- a/tools/perf/util/quote.h
+++ b/tools/perf/util/quote.h
@@ -22,47 +22,8 @@
22 * 22 *
23 * Note that the above examples leak memory! Remember to free result from 23 * Note that the above examples leak memory! Remember to free result from
24 * sq_quote() in a real application. 24 * sq_quote() in a real application.
25 *
26 * sq_quote_buf() writes to an existing buffer of specified size; it
27 * will return the number of characters that would have been written
28 * excluding the final null regardless of the buffer size.
29 */ 25 */
30 26
31extern void sq_quote_print(FILE *stream, const char *src);
32
33extern void sq_quote_buf(struct strbuf *, const char *src);
34extern void sq_quote_argv(struct strbuf *, const char **argv, size_t maxlen); 27extern void sq_quote_argv(struct strbuf *, const char **argv, size_t maxlen);
35 28
36/* This unwraps what sq_quote() produces in place, but returns
37 * NULL if the input does not look like what sq_quote would have
38 * produced.
39 */
40extern char *sq_dequote(char *);
41
42/*
43 * Same as the above, but can be used to unwrap many arguments in the
44 * same string separated by space. "next" is changed to point to the
45 * next argument that should be passed as first parameter. When there
46 * is no more argument to be dequoted, "next" is updated to point to NULL.
47 */
48extern char *sq_dequote_step(char *arg, char **next);
49extern int sq_dequote_to_argv(char *arg, const char ***argv, int *nr, int *alloc);
50
51extern int unquote_c_style(struct strbuf *, const char *quoted, const char **endp);
52extern size_t quote_c_style(const char *name, struct strbuf *, FILE *, int no_dq);
53extern void quote_two_c_style(struct strbuf *, const char *, const char *, int);
54
55extern void write_name_quoted(const char *name, FILE *, int terminator);
56extern void write_name_quotedpfx(const char *pfx, ssize_t pfxlen,
57 const char *name, FILE *, int terminator);
58
59/* quote path as relative to the given prefix */
60char *quote_path_relative(const char *in, int len,
61 struct strbuf *out, const char *prefix);
62
63/* quoting as a string literal for other languages */
64extern void perl_quote_print(FILE *stream, const char *src);
65extern void python_quote_print(FILE *stream, const char *src);
66extern void tcl_quote_print(FILE *stream, const char *src);
67
68#endif /* __PERF_QUOTE_H */ 29#endif /* __PERF_QUOTE_H */
diff --git a/tools/perf/util/run-command.c b/tools/perf/util/run-command.c
index 2b615acf94d7..da8e9b285f51 100644
--- a/tools/perf/util/run-command.c
+++ b/tools/perf/util/run-command.c
@@ -212,93 +212,3 @@ int run_command_v_opt(const char **argv, int opt)
212 prepare_run_command_v_opt(&cmd, argv, opt); 212 prepare_run_command_v_opt(&cmd, argv, opt);
213 return run_command(&cmd); 213 return run_command(&cmd);
214} 214}
215
216int run_command_v_opt_cd_env(const char **argv, int opt, const char *dir, const char *const *env)
217{
218 struct child_process cmd;
219 prepare_run_command_v_opt(&cmd, argv, opt);
220 cmd.dir = dir;
221 cmd.env = env;
222 return run_command(&cmd);
223}
224
225int start_async(struct async *async)
226{
227 int pipe_out[2];
228
229 if (pipe(pipe_out) < 0)
230 return error("cannot create pipe: %s", strerror(errno));
231 async->out = pipe_out[0];
232
233 /* Flush stdio before fork() to avoid cloning buffers */
234 fflush(NULL);
235
236 async->pid = fork();
237 if (async->pid < 0) {
238 error("fork (async) failed: %s", strerror(errno));
239 close_pair(pipe_out);
240 return -1;
241 }
242 if (!async->pid) {
243 close(pipe_out[0]);
244 exit(!!async->proc(pipe_out[1], async->data));
245 }
246 close(pipe_out[1]);
247
248 return 0;
249}
250
251int finish_async(struct async *async)
252{
253 int ret = 0;
254
255 if (wait_or_whine(async->pid))
256 ret = error("waitpid (async) failed");
257
258 return ret;
259}
260
261int run_hook(const char *index_file, const char *name, ...)
262{
263 struct child_process hook;
264 const char **argv = NULL, *env[2];
265 char idx[PATH_MAX];
266 va_list args;
267 int ret;
268 size_t i = 0, alloc = 0;
269
270 if (access(perf_path("hooks/%s", name), X_OK) < 0)
271 return 0;
272
273 va_start(args, name);
274 ALLOC_GROW(argv, i + 1, alloc);
275 argv[i++] = perf_path("hooks/%s", name);
276 while (argv[i-1]) {
277 ALLOC_GROW(argv, i + 1, alloc);
278 argv[i++] = va_arg(args, const char *);
279 }
280 va_end(args);
281
282 memset(&hook, 0, sizeof(hook));
283 hook.argv = argv;
284 hook.no_stdin = 1;
285 hook.stdout_to_stderr = 1;
286 if (index_file) {
287 snprintf(idx, sizeof(idx), "PERF_INDEX_FILE=%s", index_file);
288 env[0] = idx;
289 env[1] = NULL;
290 hook.env = env;
291 }
292
293 ret = start_command(&hook);
294 free(argv);
295 if (ret) {
296 warning("Could not spawn %s", argv[0]);
297 return ret;
298 }
299 ret = finish_command(&hook);
300 if (ret == -ERR_RUN_COMMAND_WAITPID_SIGNAL)
301 warning("%s exited due to uncaught signal", argv[0]);
302
303 return ret;
304}
diff --git a/tools/perf/util/run-command.h b/tools/perf/util/run-command.h
index d79028727ce2..1ef264d5069c 100644
--- a/tools/perf/util/run-command.h
+++ b/tools/perf/util/run-command.h
@@ -50,39 +50,9 @@ int start_command(struct child_process *);
50int finish_command(struct child_process *); 50int finish_command(struct child_process *);
51int run_command(struct child_process *); 51int run_command(struct child_process *);
52 52
53extern int run_hook(const char *index_file, const char *name, ...);
54
55#define RUN_COMMAND_NO_STDIN 1 53#define RUN_COMMAND_NO_STDIN 1
56#define RUN_PERF_CMD 2 /*If this is to be perf sub-command */ 54#define RUN_PERF_CMD 2 /*If this is to be perf sub-command */
57#define RUN_COMMAND_STDOUT_TO_STDERR 4 55#define RUN_COMMAND_STDOUT_TO_STDERR 4
58int run_command_v_opt(const char **argv, int opt); 56int run_command_v_opt(const char **argv, int opt);
59 57
60/*
61 * env (the environment) is to be formatted like environ: "VAR=VALUE".
62 * To unset an environment variable use just "VAR".
63 */
64int run_command_v_opt_cd_env(const char **argv, int opt, const char *dir, const char *const *env);
65
66/*
67 * The purpose of the following functions is to feed a pipe by running
68 * a function asynchronously and providing output that the caller reads.
69 *
70 * It is expected that no synchronization and mutual exclusion between
71 * the caller and the feed function is necessary so that the function
72 * can run in a thread without interfering with the caller.
73 */
74struct async {
75 /*
76 * proc writes to fd and closes it;
77 * returns 0 on success, non-zero on failure
78 */
79 int (*proc)(int fd, void *data);
80 void *data;
81 int out; /* caller reads from here and closes it */
82 pid_t pid;
83};
84
85int start_async(struct async *async);
86int finish_async(struct async *async);
87
88#endif /* __PERF_RUN_COMMAND_H */ 58#endif /* __PERF_RUN_COMMAND_H */
diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c
index 25bfca4f10f0..8f83a1835766 100644
--- a/tools/perf/util/session.c
+++ b/tools/perf/util/session.c
@@ -5,6 +5,7 @@
5#include <byteswap.h> 5#include <byteswap.h>
6#include <unistd.h> 6#include <unistd.h>
7#include <sys/types.h> 7#include <sys/types.h>
8#include <sys/mman.h>
8 9
9#include "session.h" 10#include "session.h"
10#include "sort.h" 11#include "sort.h"
@@ -894,3 +895,10 @@ size_t perf_session__fprintf_dsos(struct perf_session *self, FILE *fp)
894 __dsos__fprintf(&self->host_machine.user_dsos, fp) + 895 __dsos__fprintf(&self->host_machine.user_dsos, fp) +
895 machines__fprintf_dsos(&self->machines, fp); 896 machines__fprintf_dsos(&self->machines, fp);
896} 897}
898
899size_t perf_session__fprintf_dsos_buildid(struct perf_session *self, FILE *fp,
900 bool with_hits)
901{
902 size_t ret = machine__fprintf_dsos_buildid(&self->host_machine, fp, with_hits);
903 return ret + machines__fprintf_dsos_buildid(&self->machines, fp, with_hits);
904}
diff --git a/tools/perf/util/session.h b/tools/perf/util/session.h
index e7fce486ebe2..55c6881b218d 100644
--- a/tools/perf/util/session.h
+++ b/tools/perf/util/session.h
@@ -132,12 +132,8 @@ void perf_session__process_machines(struct perf_session *self,
132 132
133size_t perf_session__fprintf_dsos(struct perf_session *self, FILE *fp); 133size_t perf_session__fprintf_dsos(struct perf_session *self, FILE *fp);
134 134
135static inline 135size_t perf_session__fprintf_dsos_buildid(struct perf_session *self,
136size_t perf_session__fprintf_dsos_buildid(struct perf_session *self, FILE *fp, 136 FILE *fp, bool with_hits);
137 bool with_hits)
138{
139 return machines__fprintf_dsos_buildid(&self->machines, fp, with_hits);
140}
141 137
142static inline 138static inline
143size_t perf_session__fprintf_nr_events(struct perf_session *self, FILE *fp) 139size_t perf_session__fprintf_nr_events(struct perf_session *self, FILE *fp)
diff --git a/tools/perf/util/sigchain.c b/tools/perf/util/sigchain.c
index 1118b99e57d3..ba785e9b1841 100644
--- a/tools/perf/util/sigchain.c
+++ b/tools/perf/util/sigchain.c
@@ -16,7 +16,7 @@ static void check_signum(int sig)
16 die("BUG: signal out of range: %d", sig); 16 die("BUG: signal out of range: %d", sig);
17} 17}
18 18
19int sigchain_push(int sig, sigchain_fun f) 19static int sigchain_push(int sig, sigchain_fun f)
20{ 20{
21 struct sigchain_signal *s = signals + sig; 21 struct sigchain_signal *s = signals + sig;
22 check_signum(sig); 22 check_signum(sig);
diff --git a/tools/perf/util/sigchain.h b/tools/perf/util/sigchain.h
index 1a53c11265fd..959d64eb5557 100644
--- a/tools/perf/util/sigchain.h
+++ b/tools/perf/util/sigchain.h
@@ -3,7 +3,6 @@
3 3
4typedef void (*sigchain_fun)(int); 4typedef void (*sigchain_fun)(int);
5 5
6int sigchain_push(int sig, sigchain_fun f);
7int sigchain_pop(int sig); 6int sigchain_pop(int sig);
8 7
9void sigchain_push_common(sigchain_fun f); 8void sigchain_push_common(sigchain_fun f);
diff --git a/tools/perf/util/strbuf.c b/tools/perf/util/strbuf.c
index 5249d5a1b0c2..92e068517c1a 100644
--- a/tools/perf/util/strbuf.c
+++ b/tools/perf/util/strbuf.c
@@ -41,16 +41,6 @@ char *strbuf_detach(struct strbuf *sb, size_t *sz)
41 return res; 41 return res;
42} 42}
43 43
44void strbuf_attach(struct strbuf *sb, void *buf, size_t len, size_t alloc)
45{
46 strbuf_release(sb);
47 sb->buf = buf;
48 sb->len = len;
49 sb->alloc = alloc;
50 strbuf_grow(sb, 0);
51 sb->buf[sb->len] = '\0';
52}
53
54void strbuf_grow(struct strbuf *sb, size_t extra) 44void strbuf_grow(struct strbuf *sb, size_t extra)
55{ 45{
56 if (sb->len + extra + 1 <= sb->len) 46 if (sb->len + extra + 1 <= sb->len)
@@ -60,94 +50,7 @@ void strbuf_grow(struct strbuf *sb, size_t extra)
60 ALLOC_GROW(sb->buf, sb->len + extra + 1, sb->alloc); 50 ALLOC_GROW(sb->buf, sb->len + extra + 1, sb->alloc);
61} 51}
62 52
63void strbuf_trim(struct strbuf *sb) 53static void strbuf_splice(struct strbuf *sb, size_t pos, size_t len,
64{
65 char *b = sb->buf;
66 while (sb->len > 0 && isspace((unsigned char)sb->buf[sb->len - 1]))
67 sb->len--;
68 while (sb->len > 0 && isspace(*b)) {
69 b++;
70 sb->len--;
71 }
72 memmove(sb->buf, b, sb->len);
73 sb->buf[sb->len] = '\0';
74}
75void strbuf_rtrim(struct strbuf *sb)
76{
77 while (sb->len > 0 && isspace((unsigned char)sb->buf[sb->len - 1]))
78 sb->len--;
79 sb->buf[sb->len] = '\0';
80}
81
82void strbuf_ltrim(struct strbuf *sb)
83{
84 char *b = sb->buf;
85 while (sb->len > 0 && isspace(*b)) {
86 b++;
87 sb->len--;
88 }
89 memmove(sb->buf, b, sb->len);
90 sb->buf[sb->len] = '\0';
91}
92
93void strbuf_tolower(struct strbuf *sb)
94{
95 unsigned int i;
96
97 for (i = 0; i < sb->len; i++)
98 sb->buf[i] = tolower(sb->buf[i]);
99}
100
101struct strbuf **strbuf_split(const struct strbuf *sb, int delim)
102{
103 int alloc = 2, pos = 0;
104 char *n, *p;
105 struct strbuf **ret;
106 struct strbuf *t;
107
108 ret = calloc(alloc, sizeof(struct strbuf *));
109 p = n = sb->buf;
110 while (n < sb->buf + sb->len) {
111 int len;
112 n = memchr(n, delim, sb->len - (n - sb->buf));
113 if (pos + 1 >= alloc) {
114 alloc = alloc * 2;
115 ret = realloc(ret, sizeof(struct strbuf *) * alloc);
116 }
117 if (!n)
118 n = sb->buf + sb->len - 1;
119 len = n - p + 1;
120 t = malloc(sizeof(struct strbuf));
121 strbuf_init(t, len);
122 strbuf_add(t, p, len);
123 ret[pos] = t;
124 ret[++pos] = NULL;
125 p = ++n;
126 }
127 return ret;
128}
129
130void strbuf_list_free(struct strbuf **sbs)
131{
132 struct strbuf **s = sbs;
133
134 while (*s) {
135 strbuf_release(*s);
136 free(*s++);
137 }
138 free(sbs);
139}
140
141int strbuf_cmp(const struct strbuf *a, const struct strbuf *b)
142{
143 int len = a->len < b->len ? a->len: b->len;
144 int cmp = memcmp(a->buf, b->buf, len);
145 if (cmp)
146 return cmp;
147 return a->len < b->len ? -1: a->len != b->len;
148}
149
150void strbuf_splice(struct strbuf *sb, size_t pos, size_t len,
151 const void *data, size_t dlen) 54 const void *data, size_t dlen)
152{ 55{
153 if (pos + len < pos) 56 if (pos + len < pos)
@@ -166,11 +69,6 @@ void strbuf_splice(struct strbuf *sb, size_t pos, size_t len,
166 strbuf_setlen(sb, sb->len + dlen - len); 69 strbuf_setlen(sb, sb->len + dlen - len);
167} 70}
168 71
169void strbuf_insert(struct strbuf *sb, size_t pos, const void *data, size_t len)
170{
171 strbuf_splice(sb, pos, 0, data, len);
172}
173
174void strbuf_remove(struct strbuf *sb, size_t pos, size_t len) 72void strbuf_remove(struct strbuf *sb, size_t pos, size_t len)
175{ 73{
176 strbuf_splice(sb, pos, len, NULL, 0); 74 strbuf_splice(sb, pos, len, NULL, 0);
@@ -183,13 +81,6 @@ void strbuf_add(struct strbuf *sb, const void *data, size_t len)
183 strbuf_setlen(sb, sb->len + len); 81 strbuf_setlen(sb, sb->len + len);
184} 82}
185 83
186void strbuf_adddup(struct strbuf *sb, size_t pos, size_t len)
187{
188 strbuf_grow(sb, len);
189 memcpy(sb->buf + sb->len, sb->buf + pos, len);
190 strbuf_setlen(sb, sb->len + len);
191}
192
193void strbuf_addf(struct strbuf *sb, const char *fmt, ...) 84void strbuf_addf(struct strbuf *sb, const char *fmt, ...)
194{ 85{
195 int len; 86 int len;
@@ -214,57 +105,6 @@ void strbuf_addf(struct strbuf *sb, const char *fmt, ...)
214 strbuf_setlen(sb, sb->len + len); 105 strbuf_setlen(sb, sb->len + len);
215} 106}
216 107
217void strbuf_expand(struct strbuf *sb, const char *format, expand_fn_t fn,
218 void *context)
219{
220 for (;;) {
221 const char *percent;
222 size_t consumed;
223
224 percent = strchrnul(format, '%');
225 strbuf_add(sb, format, percent - format);
226 if (!*percent)
227 break;
228 format = percent + 1;
229
230 consumed = fn(sb, format, context);
231 if (consumed)
232 format += consumed;
233 else
234 strbuf_addch(sb, '%');
235 }
236}
237
238size_t strbuf_expand_dict_cb(struct strbuf *sb, const char *placeholder,
239 void *context)
240{
241 struct strbuf_expand_dict_entry *e = context;
242 size_t len;
243
244 for (; e->placeholder && (len = strlen(e->placeholder)); e++) {
245 if (!strncmp(placeholder, e->placeholder, len)) {
246 if (e->value)
247 strbuf_addstr(sb, e->value);
248 return len;
249 }
250 }
251 return 0;
252}
253
254size_t strbuf_fread(struct strbuf *sb, size_t size, FILE *f)
255{
256 size_t res;
257 size_t oldalloc = sb->alloc;
258
259 strbuf_grow(sb, size);
260 res = fread(sb->buf + sb->len, 1, size, f);
261 if (res > 0)
262 strbuf_setlen(sb, sb->len + res);
263 else if (oldalloc == 0)
264 strbuf_release(sb);
265 return res;
266}
267
268ssize_t strbuf_read(struct strbuf *sb, int fd, ssize_t hint) 108ssize_t strbuf_read(struct strbuf *sb, int fd, ssize_t hint)
269{ 109{
270 size_t oldlen = sb->len; 110 size_t oldlen = sb->len;
@@ -291,70 +131,3 @@ ssize_t strbuf_read(struct strbuf *sb, int fd, ssize_t hint)
291 sb->buf[sb->len] = '\0'; 131 sb->buf[sb->len] = '\0';
292 return sb->len - oldlen; 132 return sb->len - oldlen;
293} 133}
294
295#define STRBUF_MAXLINK (2*PATH_MAX)
296
297int strbuf_readlink(struct strbuf *sb, const char *path, ssize_t hint)
298{
299 size_t oldalloc = sb->alloc;
300
301 if (hint < 32)
302 hint = 32;
303
304 while (hint < STRBUF_MAXLINK) {
305 ssize_t len;
306
307 strbuf_grow(sb, hint);
308 len = readlink(path, sb->buf, hint);
309 if (len < 0) {
310 if (errno != ERANGE)
311 break;
312 } else if (len < hint) {
313 strbuf_setlen(sb, len);
314 return 0;
315 }
316
317 /* .. the buffer was too small - try again */
318 hint *= 2;
319 }
320 if (oldalloc == 0)
321 strbuf_release(sb);
322 return -1;
323}
324
325int strbuf_getline(struct strbuf *sb, FILE *fp, int term)
326{
327 int ch;
328
329 strbuf_grow(sb, 0);
330 if (feof(fp))
331 return EOF;
332
333 strbuf_reset(sb);
334 while ((ch = fgetc(fp)) != EOF) {
335 if (ch == term)
336 break;
337 strbuf_grow(sb, 1);
338 sb->buf[sb->len++] = ch;
339 }
340 if (ch == EOF && sb->len == 0)
341 return EOF;
342
343 sb->buf[sb->len] = '\0';
344 return 0;
345}
346
347int strbuf_read_file(struct strbuf *sb, const char *path, ssize_t hint)
348{
349 int fd, len;
350
351 fd = open(path, O_RDONLY);
352 if (fd < 0)
353 return -1;
354 len = strbuf_read(sb, fd, hint);
355 close(fd);
356 if (len < 0)
357 return -1;
358
359 return len;
360}
diff --git a/tools/perf/util/strbuf.h b/tools/perf/util/strbuf.h
index a3d121d6c83e..436ac319f6c7 100644
--- a/tools/perf/util/strbuf.h
+++ b/tools/perf/util/strbuf.h
@@ -53,12 +53,6 @@ struct strbuf {
53extern void strbuf_init(struct strbuf *buf, ssize_t hint); 53extern void strbuf_init(struct strbuf *buf, ssize_t hint);
54extern void strbuf_release(struct strbuf *); 54extern void strbuf_release(struct strbuf *);
55extern char *strbuf_detach(struct strbuf *, size_t *); 55extern char *strbuf_detach(struct strbuf *, size_t *);
56extern void strbuf_attach(struct strbuf *, void *, size_t, size_t);
57static inline void strbuf_swap(struct strbuf *a, struct strbuf *b) {
58 struct strbuf tmp = *a;
59 *a = *b;
60 *b = tmp;
61}
62 56
63/*----- strbuf size related -----*/ 57/*----- strbuf size related -----*/
64static inline ssize_t strbuf_avail(const struct strbuf *sb) { 58static inline ssize_t strbuf_avail(const struct strbuf *sb) {
@@ -74,17 +68,6 @@ static inline void strbuf_setlen(struct strbuf *sb, size_t len) {
74 sb->len = len; 68 sb->len = len;
75 sb->buf[len] = '\0'; 69 sb->buf[len] = '\0';
76} 70}
77#define strbuf_reset(sb) strbuf_setlen(sb, 0)
78
79/*----- content related -----*/
80extern void strbuf_trim(struct strbuf *);
81extern void strbuf_rtrim(struct strbuf *);
82extern void strbuf_ltrim(struct strbuf *);
83extern int strbuf_cmp(const struct strbuf *, const struct strbuf *);
84extern void strbuf_tolower(struct strbuf *);
85
86extern struct strbuf **strbuf_split(const struct strbuf *, int delim);
87extern void strbuf_list_free(struct strbuf **);
88 71
89/*----- add data in your buffer -----*/ 72/*----- add data in your buffer -----*/
90static inline void strbuf_addch(struct strbuf *sb, int c) { 73static inline void strbuf_addch(struct strbuf *sb, int c) {
@@ -93,45 +76,17 @@ static inline void strbuf_addch(struct strbuf *sb, int c) {
93 sb->buf[sb->len] = '\0'; 76 sb->buf[sb->len] = '\0';
94} 77}
95 78
96extern void strbuf_insert(struct strbuf *, size_t pos, const void *, size_t);
97extern void strbuf_remove(struct strbuf *, size_t pos, size_t len); 79extern void strbuf_remove(struct strbuf *, size_t pos, size_t len);
98 80
99/* splice pos..pos+len with given data */
100extern void strbuf_splice(struct strbuf *, size_t pos, size_t len,
101 const void *, size_t);
102
103extern void strbuf_add(struct strbuf *, const void *, size_t); 81extern void strbuf_add(struct strbuf *, const void *, size_t);
104static inline void strbuf_addstr(struct strbuf *sb, const char *s) { 82static inline void strbuf_addstr(struct strbuf *sb, const char *s) {
105 strbuf_add(sb, s, strlen(s)); 83 strbuf_add(sb, s, strlen(s));
106} 84}
107static inline void strbuf_addbuf(struct strbuf *sb, const struct strbuf *sb2) {
108 strbuf_add(sb, sb2->buf, sb2->len);
109}
110extern void strbuf_adddup(struct strbuf *sb, size_t pos, size_t len);
111
112typedef size_t (*expand_fn_t) (struct strbuf *sb, const char *placeholder, void *context);
113extern void strbuf_expand(struct strbuf *sb, const char *format, expand_fn_t fn, void *context);
114struct strbuf_expand_dict_entry {
115 const char *placeholder;
116 const char *value;
117};
118extern size_t strbuf_expand_dict_cb(struct strbuf *sb, const char *placeholder, void *context);
119 85
120__attribute__((format(printf,2,3))) 86__attribute__((format(printf,2,3)))
121extern void strbuf_addf(struct strbuf *sb, const char *fmt, ...); 87extern void strbuf_addf(struct strbuf *sb, const char *fmt, ...);
122 88
123extern size_t strbuf_fread(struct strbuf *, size_t, FILE *);
124/* XXX: if read fails, any partial read is undone */ 89/* XXX: if read fails, any partial read is undone */
125extern ssize_t strbuf_read(struct strbuf *, int fd, ssize_t hint); 90extern ssize_t strbuf_read(struct strbuf *, int fd, ssize_t hint);
126extern int strbuf_read_file(struct strbuf *sb, const char *path, ssize_t hint);
127extern int strbuf_readlink(struct strbuf *sb, const char *path, ssize_t hint);
128
129extern int strbuf_getline(struct strbuf *, FILE *, int);
130
131extern void stripspace(struct strbuf *buf, int skip_comments);
132extern int launch_editor(const char *path, struct strbuf *buffer, const char *const *env);
133
134extern int strbuf_branchname(struct strbuf *sb, const char *name);
135extern int strbuf_check_branch_ref(struct strbuf *sb, const char *name);
136 91
137#endif /* __PERF_STRBUF_H */ 92#endif /* __PERF_STRBUF_H */
diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c
index a06131f6259a..7fd6b151feb5 100644
--- a/tools/perf/util/symbol.c
+++ b/tools/perf/util/symbol.c
@@ -11,6 +11,7 @@
11#include <sys/param.h> 11#include <sys/param.h>
12#include <fcntl.h> 12#include <fcntl.h>
13#include <unistd.h> 13#include <unistd.h>
14#include "build-id.h"
14#include "symbol.h" 15#include "symbol.h"
15#include "strlist.h" 16#include "strlist.h"
16 17
@@ -1131,6 +1132,10 @@ bool __dsos__read_build_ids(struct list_head *head, bool with_hits)
1131 list_for_each_entry(pos, head, node) { 1132 list_for_each_entry(pos, head, node) {
1132 if (with_hits && !pos->hit) 1133 if (with_hits && !pos->hit)
1133 continue; 1134 continue;
1135 if (pos->has_build_id) {
1136 have_build_id = true;
1137 continue;
1138 }
1134 if (filename__read_build_id(pos->long_name, pos->build_id, 1139 if (filename__read_build_id(pos->long_name, pos->build_id,
1135 sizeof(pos->build_id)) > 0) { 1140 sizeof(pos->build_id)) > 0) {
1136 have_build_id = true; 1141 have_build_id = true;
@@ -1289,7 +1294,6 @@ int dso__load(struct dso *self, struct map *map, symbol_filter_t filter)
1289 int size = PATH_MAX; 1294 int size = PATH_MAX;
1290 char *name; 1295 char *name;
1291 u8 build_id[BUILD_ID_SIZE]; 1296 u8 build_id[BUILD_ID_SIZE];
1292 char build_id_hex[BUILD_ID_SIZE * 2 + 1];
1293 int ret = -1; 1297 int ret = -1;
1294 int fd; 1298 int fd;
1295 struct machine *machine; 1299 struct machine *machine;
@@ -1321,15 +1325,8 @@ int dso__load(struct dso *self, struct map *map, symbol_filter_t filter)
1321 } 1325 }
1322 1326
1323 self->origin = DSO__ORIG_BUILD_ID_CACHE; 1327 self->origin = DSO__ORIG_BUILD_ID_CACHE;
1324 1328 if (dso__build_id_filename(self, name, size) != NULL)
1325 if (self->has_build_id) {
1326 build_id__sprintf(self->build_id, sizeof(self->build_id),
1327 build_id_hex);
1328 snprintf(name, size, "%s/%s/.build-id/%.2s/%s",
1329 getenv("HOME"), DEBUG_CACHE_DIR,
1330 build_id_hex, build_id_hex + 2);
1331 goto open_file; 1329 goto open_file;
1332 }
1333more: 1330more:
1334 do { 1331 do {
1335 self->origin++; 1332 self->origin++;
@@ -1345,6 +1342,7 @@ more:
1345 case DSO__ORIG_BUILDID: 1342 case DSO__ORIG_BUILDID:
1346 if (filename__read_build_id(self->long_name, build_id, 1343 if (filename__read_build_id(self->long_name, build_id,
1347 sizeof(build_id))) { 1344 sizeof(build_id))) {
1345 char build_id_hex[BUILD_ID_SIZE * 2 + 1];
1348 build_id__sprintf(build_id, sizeof(build_id), 1346 build_id__sprintf(build_id, sizeof(build_id),
1349 build_id_hex); 1347 build_id_hex);
1350 snprintf(name, size, 1348 snprintf(name, size,
@@ -1697,9 +1695,20 @@ int dso__load_vmlinux_path(struct dso *self, struct map *map,
1697 symbol_filter_t filter) 1695 symbol_filter_t filter)
1698{ 1696{
1699 int i, err = 0; 1697 int i, err = 0;
1698 char *filename;
1700 1699
1701 pr_debug("Looking at the vmlinux_path (%d entries long)\n", 1700 pr_debug("Looking at the vmlinux_path (%d entries long)\n",
1702 vmlinux_path__nr_entries); 1701 vmlinux_path__nr_entries + 1);
1702
1703 filename = dso__build_id_filename(self, NULL, 0);
1704 if (filename != NULL) {
1705 err = dso__load_vmlinux(self, map, filename, filter);
1706 if (err > 0) {
1707 dso__set_long_name(self, filename);
1708 goto out;
1709 }
1710 free(filename);
1711 }
1703 1712
1704 for (i = 0; i < vmlinux_path__nr_entries; ++i) { 1713 for (i = 0; i < vmlinux_path__nr_entries; ++i) {
1705 err = dso__load_vmlinux(self, map, vmlinux_path[i], filter); 1714 err = dso__load_vmlinux(self, map, vmlinux_path[i], filter);
@@ -1708,7 +1717,7 @@ int dso__load_vmlinux_path(struct dso *self, struct map *map,
1708 break; 1717 break;
1709 } 1718 }
1710 } 1719 }
1711 1720out:
1712 return err; 1721 return err;
1713} 1722}
1714 1723
@@ -1933,6 +1942,12 @@ static size_t __dsos__fprintf_buildid(struct list_head *head, FILE *fp,
1933 return ret; 1942 return ret;
1934} 1943}
1935 1944
1945size_t machine__fprintf_dsos_buildid(struct machine *self, FILE *fp, bool with_hits)
1946{
1947 return __dsos__fprintf_buildid(&self->kernel_dsos, fp, with_hits) +
1948 __dsos__fprintf_buildid(&self->user_dsos, fp, with_hits);
1949}
1950
1936size_t machines__fprintf_dsos_buildid(struct rb_root *self, FILE *fp, bool with_hits) 1951size_t machines__fprintf_dsos_buildid(struct rb_root *self, FILE *fp, bool with_hits)
1937{ 1952{
1938 struct rb_node *nd; 1953 struct rb_node *nd;
@@ -1940,8 +1955,7 @@ size_t machines__fprintf_dsos_buildid(struct rb_root *self, FILE *fp, bool with_
1940 1955
1941 for (nd = rb_first(self); nd; nd = rb_next(nd)) { 1956 for (nd = rb_first(self); nd; nd = rb_next(nd)) {
1942 struct machine *pos = rb_entry(nd, struct machine, rb_node); 1957 struct machine *pos = rb_entry(nd, struct machine, rb_node);
1943 ret += __dsos__fprintf_buildid(&pos->kernel_dsos, fp, with_hits); 1958 ret += machine__fprintf_dsos_buildid(pos, fp, with_hits);
1944 ret += __dsos__fprintf_buildid(&pos->user_dsos, fp, with_hits);
1945 } 1959 }
1946 return ret; 1960 return ret;
1947} 1961}
@@ -2099,13 +2113,21 @@ out_fail:
2099 return -1; 2113 return -1;
2100} 2114}
2101 2115
2102size_t vmlinux_path__fprintf(FILE *fp) 2116size_t machine__fprintf_vmlinux_path(struct machine *self, FILE *fp)
2103{ 2117{
2104 int i; 2118 int i;
2105 size_t printed = 0; 2119 size_t printed = 0;
2120 struct dso *kdso = self->vmlinux_maps[MAP__FUNCTION]->dso;
2121
2122 if (kdso->has_build_id) {
2123 char filename[PATH_MAX];
2124 if (dso__build_id_filename(kdso, filename, sizeof(filename)))
2125 printed += fprintf(fp, "[0] %s\n", filename);
2126 }
2106 2127
2107 for (i = 0; i < vmlinux_path__nr_entries; ++i) 2128 for (i = 0; i < vmlinux_path__nr_entries; ++i)
2108 printed += fprintf(fp, "[%d] %s\n", i, vmlinux_path[i]); 2129 printed += fprintf(fp, "[%d] %s\n",
2130 i + kdso->has_build_id, vmlinux_path[i]);
2109 2131
2110 return printed; 2132 return printed;
2111} 2133}
diff --git a/tools/perf/util/symbol.h b/tools/perf/util/symbol.h
index 032469e41876..5e02d2c17154 100644
--- a/tools/perf/util/symbol.h
+++ b/tools/perf/util/symbol.h
@@ -170,6 +170,7 @@ int machine__load_vmlinux_path(struct machine *self, enum map_type type,
170 170
171size_t __dsos__fprintf(struct list_head *head, FILE *fp); 171size_t __dsos__fprintf(struct list_head *head, FILE *fp);
172 172
173size_t machine__fprintf_dsos_buildid(struct machine *self, FILE *fp, bool with_hits);
173size_t machines__fprintf_dsos(struct rb_root *self, FILE *fp); 174size_t machines__fprintf_dsos(struct rb_root *self, FILE *fp);
174size_t machines__fprintf_dsos_buildid(struct rb_root *self, FILE *fp, bool with_hits); 175size_t machines__fprintf_dsos_buildid(struct rb_root *self, FILE *fp, bool with_hits);
175 176
@@ -215,6 +216,6 @@ int machines__create_guest_kernel_maps(struct rb_root *self);
215int symbol__init(void); 216int symbol__init(void);
216bool symbol_type__is_a(char symbol_type, enum map_type map_type); 217bool symbol_type__is_a(char symbol_type, enum map_type map_type);
217 218
218size_t vmlinux_path__fprintf(FILE *fp); 219size_t machine__fprintf_vmlinux_path(struct machine *self, FILE *fp);
219 220
220#endif /* __PERF_SYMBOL */ 221#endif /* __PERF_SYMBOL */
diff --git a/tools/perf/util/trace-event-read.c b/tools/perf/util/trace-event-read.c
index cb54cd002f49..f55cc3a765a1 100644
--- a/tools/perf/util/trace-event-read.c
+++ b/tools/perf/util/trace-event-read.c
@@ -53,12 +53,6 @@ static unsigned long page_size;
53static ssize_t calc_data_size; 53static ssize_t calc_data_size;
54static bool repipe; 54static bool repipe;
55 55
56/* If it fails, the next read will report it */
57static void skip(int size)
58{
59 lseek(input_fd, size, SEEK_CUR);
60}
61
62static int do_read(int fd, void *buf, int size) 56static int do_read(int fd, void *buf, int size)
63{ 57{
64 int rsize = size; 58 int rsize = size;
@@ -98,6 +92,19 @@ static int read_or_die(void *data, int size)
98 return r; 92 return r;
99} 93}
100 94
95/* If it fails, the next read will report it */
96static void skip(int size)
97{
98 char buf[BUFSIZ];
99 int r;
100
101 while (size) {
102 r = size > BUFSIZ ? BUFSIZ : size;
103 read_or_die(buf, r);
104 size -= r;
105 };
106}
107
101static unsigned int read4(void) 108static unsigned int read4(void)
102{ 109{
103 unsigned int data; 110 unsigned int data;
diff --git a/tools/perf/util/trace-event.h b/tools/perf/util/trace-event.h
index 406d452956db..b3e86b1e4444 100644
--- a/tools/perf/util/trace-event.h
+++ b/tools/perf/util/trace-event.h
@@ -233,7 +233,12 @@ static inline unsigned long long __data2host8(unsigned long long data)
233 233
234#define data2host2(ptr) __data2host2(*(unsigned short *)ptr) 234#define data2host2(ptr) __data2host2(*(unsigned short *)ptr)
235#define data2host4(ptr) __data2host4(*(unsigned int *)ptr) 235#define data2host4(ptr) __data2host4(*(unsigned int *)ptr)
236#define data2host8(ptr) __data2host8(*(unsigned long long *)ptr) 236#define data2host8(ptr) ({ \
237 unsigned long long __val; \
238 \
239 memcpy(&__val, (ptr), sizeof(unsigned long long)); \
240 __data2host8(__val); \
241})
237 242
238extern int header_page_ts_offset; 243extern int header_page_ts_offset;
239extern int header_page_ts_size; 244extern int header_page_ts_size;
diff --git a/tools/perf/util/util.h b/tools/perf/util/util.h
index 0795bf304b19..4e8b6b0c551c 100644
--- a/tools/perf/util/util.h
+++ b/tools/perf/util/util.h
@@ -81,7 +81,7 @@
81#include <inttypes.h> 81#include <inttypes.h>
82#include "../../../include/linux/magic.h" 82#include "../../../include/linux/magic.h"
83#include "types.h" 83#include "types.h"
84 84#include <sys/ttydefaults.h>
85 85
86#ifndef NO_ICONV 86#ifndef NO_ICONV
87#include <iconv.h> 87#include <iconv.h>
@@ -152,7 +152,6 @@ extern void warning(const char *err, ...) __attribute__((format (printf, 1, 2)))
152extern void set_die_routine(void (*routine)(const char *err, va_list params) NORETURN); 152extern void set_die_routine(void (*routine)(const char *err, va_list params) NORETURN);
153 153
154extern int prefixcmp(const char *str, const char *prefix); 154extern int prefixcmp(const char *str, const char *prefix);
155extern time_t tm_to_time_t(const struct tm *tm);
156 155
157static inline const char *skip_prefix(const char *str, const char *prefix) 156static inline const char *skip_prefix(const char *str, const char *prefix)
158{ 157{
@@ -160,119 +159,6 @@ static inline const char *skip_prefix(const char *str, const char *prefix)
160 return strncmp(str, prefix, len) ? NULL : str + len; 159 return strncmp(str, prefix, len) ? NULL : str + len;
161} 160}
162 161
163#if defined(NO_MMAP) || defined(USE_WIN32_MMAP)
164
165#ifndef PROT_READ
166#define PROT_READ 1
167#define PROT_WRITE 2
168#define MAP_PRIVATE 1
169#define MAP_FAILED ((void*)-1)
170#endif
171
172#define mmap git_mmap
173#define munmap git_munmap
174extern void *git_mmap(void *start, size_t length, int prot, int flags, int fd, off_t offset);
175extern int git_munmap(void *start, size_t length);
176
177#else /* NO_MMAP || USE_WIN32_MMAP */
178
179#include <sys/mman.h>
180
181#endif /* NO_MMAP || USE_WIN32_MMAP */
182
183#ifdef NO_MMAP
184
185/* This value must be multiple of (pagesize * 2) */
186#define DEFAULT_PACKED_GIT_WINDOW_SIZE (1 * 1024 * 1024)
187
188#else /* NO_MMAP */
189
190/* This value must be multiple of (pagesize * 2) */
191#define DEFAULT_PACKED_GIT_WINDOW_SIZE \
192 (sizeof(void*) >= 8 \
193 ? 1 * 1024 * 1024 * 1024 \
194 : 32 * 1024 * 1024)
195
196#endif /* NO_MMAP */
197
198#ifdef NO_ST_BLOCKS_IN_STRUCT_STAT
199#define on_disk_bytes(st) ((st).st_size)
200#else
201#define on_disk_bytes(st) ((st).st_blocks * 512)
202#endif
203
204#define DEFAULT_PACKED_GIT_LIMIT \
205 ((1024L * 1024L) * (sizeof(void*) >= 8 ? 8192 : 256))
206
207#ifdef NO_PREAD
208#define pread git_pread
209extern ssize_t git_pread(int fd, void *buf, size_t count, off_t offset);
210#endif
211/*
212 * Forward decl that will remind us if its twin in cache.h changes.
213 * This function is used in compat/pread.c. But we can't include
214 * cache.h there.
215 */
216extern ssize_t read_in_full(int fd, void *buf, size_t count);
217
218#ifdef NO_SETENV
219#define setenv gitsetenv
220extern int gitsetenv(const char *, const char *, int);
221#endif
222
223#ifdef NO_MKDTEMP
224#define mkdtemp gitmkdtemp
225extern char *gitmkdtemp(char *);
226#endif
227
228#ifdef NO_UNSETENV
229#define unsetenv gitunsetenv
230extern void gitunsetenv(const char *);
231#endif
232
233#ifdef NO_STRCASESTR
234#define strcasestr gitstrcasestr
235extern char *gitstrcasestr(const char *haystack, const char *needle);
236#endif
237
238#ifdef NO_STRLCPY
239#define strlcpy gitstrlcpy
240extern size_t gitstrlcpy(char *, const char *, size_t);
241#endif
242
243#ifdef NO_STRTOUMAX
244#define strtoumax gitstrtoumax
245extern uintmax_t gitstrtoumax(const char *, char **, int);
246#endif
247
248#ifdef NO_HSTRERROR
249#define hstrerror githstrerror
250extern const char *githstrerror(int herror);
251#endif
252
253#ifdef NO_MEMMEM
254#define memmem gitmemmem
255void *gitmemmem(const void *haystack, size_t haystacklen,
256 const void *needle, size_t needlelen);
257#endif
258
259#ifdef FREAD_READS_DIRECTORIES
260#ifdef fopen
261#undef fopen
262#endif
263#define fopen(a,b) git_fopen(a,b)
264extern FILE *git_fopen(const char*, const char*);
265#endif
266
267#ifdef SNPRINTF_RETURNS_BOGUS
268#define snprintf git_snprintf
269extern int git_snprintf(char *str, size_t maxsize,
270 const char *format, ...);
271#define vsnprintf git_vsnprintf
272extern int git_vsnprintf(char *str, size_t maxsize,
273 const char *format, va_list ap);
274#endif
275
276#ifdef __GLIBC_PREREQ 162#ifdef __GLIBC_PREREQ
277#if __GLIBC_PREREQ(2, 1) 163#if __GLIBC_PREREQ(2, 1)
278#define HAVE_STRCHRNUL 164#define HAVE_STRCHRNUL
@@ -293,28 +179,14 @@ static inline char *gitstrchrnul(const char *s, int c)
293 * Wrappers: 179 * Wrappers:
294 */ 180 */
295extern char *xstrdup(const char *str); 181extern char *xstrdup(const char *str);
296extern void *xmalloc(size_t size) __attribute__((weak));
297extern void *xmemdupz(const void *data, size_t len);
298extern char *xstrndup(const char *str, size_t len);
299extern void *xrealloc(void *ptr, size_t size) __attribute__((weak)); 182extern void *xrealloc(void *ptr, size_t size) __attribute__((weak));
300 183
301static inline void *xzalloc(size_t size)
302{
303 void *buf = xmalloc(size);
304
305 return memset(buf, 0, size);
306}
307 184
308static inline void *zalloc(size_t size) 185static inline void *zalloc(size_t size)
309{ 186{
310 return calloc(1, size); 187 return calloc(1, size);
311} 188}
312 189
313static inline size_t xsize_t(off_t len)
314{
315 return (size_t)len;
316}
317
318static inline int has_extension(const char *filename, const char *ext) 190static inline int has_extension(const char *filename, const char *ext)
319{ 191{
320 size_t len = strlen(filename); 192 size_t len = strlen(filename);
@@ -351,8 +223,6 @@ extern unsigned char sane_ctype[256];
351#define isalpha(x) sane_istest(x,GIT_ALPHA) 223#define isalpha(x) sane_istest(x,GIT_ALPHA)
352#define isalnum(x) sane_istest(x,GIT_ALPHA | GIT_DIGIT) 224#define isalnum(x) sane_istest(x,GIT_ALPHA | GIT_DIGIT)
353#define isprint(x) sane_istest(x,GIT_PRINT) 225#define isprint(x) sane_istest(x,GIT_PRINT)
354#define is_glob_special(x) sane_istest(x,GIT_GLOB_SPECIAL)
355#define is_regex_special(x) sane_istest(x,GIT_GLOB_SPECIAL | GIT_REGEX_SPECIAL)
356#define tolower(x) sane_case((unsigned char)(x), 0x20) 226#define tolower(x) sane_case((unsigned char)(x), 0x20)
357#define toupper(x) sane_case((unsigned char)(x), 0) 227#define toupper(x) sane_case((unsigned char)(x), 0)
358 228
@@ -363,38 +233,6 @@ static inline int sane_case(int x, int high)
363 return x; 233 return x;
364} 234}
365 235
366static inline int strtoul_ui(char const *s, int base, unsigned int *result)
367{
368 unsigned long ul;
369 char *p;
370
371 errno = 0;
372 ul = strtoul(s, &p, base);
373 if (errno || *p || p == s || (unsigned int) ul != ul)
374 return -1;
375 *result = ul;
376 return 0;
377}
378
379static inline int strtol_i(char const *s, int base, int *result)
380{
381 long ul;
382 char *p;
383
384 errno = 0;
385 ul = strtol(s, &p, base);
386 if (errno || *p || p == s || (int) ul != ul)
387 return -1;
388 *result = ul;
389 return 0;
390}
391
392#ifdef INTERNAL_QSORT
393void git_qsort(void *base, size_t nmemb, size_t size,
394 int(*compar)(const void *, const void *));
395#define qsort git_qsort
396#endif
397
398#ifndef DIR_HAS_BSD_GROUP_SEMANTICS 236#ifndef DIR_HAS_BSD_GROUP_SEMANTICS
399# define FORCE_DIR_SET_GID S_ISGID 237# define FORCE_DIR_SET_GID S_ISGID
400#else 238#else
@@ -425,6 +263,19 @@ bool strglobmatch(const char *str, const char *pat);
425bool strlazymatch(const char *str, const char *pat); 263bool strlazymatch(const char *str, const char *pat);
426unsigned long convert_unit(unsigned long value, char *unit); 264unsigned long convert_unit(unsigned long value, char *unit);
427 265
266#ifndef ESC
267#define ESC 27
268#endif
269
270static inline bool is_exit_key(int key)
271{
272 char up;
273 if (key == CTRL('c') || key == ESC)
274 return true;
275 up = toupper(key);
276 return up == 'Q';
277}
278
428#define _STR(x) #x 279#define _STR(x) #x
429#define STR(x) _STR(x) 280#define STR(x) _STR(x)
430 281
diff --git a/tools/perf/util/wrapper.c b/tools/perf/util/wrapper.c
index bf44ca85d23b..73e900edb5a2 100644
--- a/tools/perf/util/wrapper.c
+++ b/tools/perf/util/wrapper.c
@@ -23,46 +23,6 @@ char *xstrdup(const char *str)
23 return ret; 23 return ret;
24} 24}
25 25
26void *xmalloc(size_t size)
27{
28 void *ret = malloc(size);
29 if (!ret && !size)
30 ret = malloc(1);
31 if (!ret) {
32 release_pack_memory(size, -1);
33 ret = malloc(size);
34 if (!ret && !size)
35 ret = malloc(1);
36 if (!ret)
37 die("Out of memory, malloc failed");
38 }
39#ifdef XMALLOC_POISON
40 memset(ret, 0xA5, size);
41#endif
42 return ret;
43}
44
45/*
46 * xmemdupz() allocates (len + 1) bytes of memory, duplicates "len" bytes of
47 * "data" to the allocated memory, zero terminates the allocated memory,
48 * and returns a pointer to the allocated memory. If the allocation fails,
49 * the program dies.
50 */
51void *xmemdupz(const void *data, size_t len)
52{
53 char *p = xmalloc(len + 1);
54 memcpy(p, data, len);
55 p[len] = '\0';
56 return p;
57}
58
59char *xstrndup(const char *str, size_t len)
60{
61 char *p = memchr(str, '\0', len);
62
63 return xmemdupz(str, p ? (size_t)(p - str) : len);
64}
65
66void *xrealloc(void *ptr, size_t size) 26void *xrealloc(void *ptr, size_t size)
67{ 27{
68 void *ret = realloc(ptr, size); 28 void *ret = realloc(ptr, size);
@@ -78,73 +38,3 @@ void *xrealloc(void *ptr, size_t size)
78 } 38 }
79 return ret; 39 return ret;
80} 40}
81
82/*
83 * xread() is the same a read(), but it automatically restarts read()
84 * operations with a recoverable error (EAGAIN and EINTR). xread()
85 * DOES NOT GUARANTEE that "len" bytes is read even if the data is available.
86 */
87static ssize_t xread(int fd, void *buf, size_t len)
88{
89 ssize_t nr;
90 while (1) {
91 nr = read(fd, buf, len);
92 if ((nr < 0) && (errno == EAGAIN || errno == EINTR))
93 continue;
94 return nr;
95 }
96}
97
98/*
99 * xwrite() is the same a write(), but it automatically restarts write()
100 * operations with a recoverable error (EAGAIN and EINTR). xwrite() DOES NOT
101 * GUARANTEE that "len" bytes is written even if the operation is successful.
102 */
103static ssize_t xwrite(int fd, const void *buf, size_t len)
104{
105 ssize_t nr;
106 while (1) {
107 nr = write(fd, buf, len);
108 if ((nr < 0) && (errno == EAGAIN || errno == EINTR))
109 continue;
110 return nr;
111 }
112}
113
114ssize_t read_in_full(int fd, void *buf, size_t count)
115{
116 char *p = buf;
117 ssize_t total = 0;
118
119 while (count > 0) {
120 ssize_t loaded = xread(fd, p, count);
121 if (loaded <= 0)
122 return total ? total : loaded;
123 count -= loaded;
124 p += loaded;
125 total += loaded;
126 }
127
128 return total;
129}
130
131ssize_t write_in_full(int fd, const void *buf, size_t count)
132{
133 const char *p = buf;
134 ssize_t total = 0;
135
136 while (count > 0) {
137 ssize_t written = xwrite(fd, p, count);
138 if (written < 0)
139 return -1;
140 if (!written) {
141 errno = ENOSPC;
142 return -1;
143 }
144 count -= written;
145 p += written;
146 total += written;
147 }
148
149 return total;
150}
diff --git a/usr/Makefile b/usr/Makefile
index 1e6a9e4a72cc..6b4b6da0b67d 100644
--- a/usr/Makefile
+++ b/usr/Makefile
@@ -15,6 +15,9 @@ suffix_$(CONFIG_INITRAMFS_COMPRESSION_BZIP2) = .bz2
15# Lzma 15# Lzma
16suffix_$(CONFIG_INITRAMFS_COMPRESSION_LZMA) = .lzma 16suffix_$(CONFIG_INITRAMFS_COMPRESSION_LZMA) = .lzma
17 17
18# Lzo
19suffix_$(CONFIG_INITRAMFS_COMPRESSION_LZO) = .lzo
20
18# Generate builtin.o based on initramfs_data.o 21# Generate builtin.o based on initramfs_data.o
19obj-$(CONFIG_BLK_DEV_INITRD) := initramfs_data$(suffix_y).o 22obj-$(CONFIG_BLK_DEV_INITRD) := initramfs_data$(suffix_y).o
20 23
@@ -45,7 +48,7 @@ endif
45quiet_cmd_initfs = GEN $@ 48quiet_cmd_initfs = GEN $@
46 cmd_initfs = $(initramfs) -o $@ $(ramfs-args) $(ramfs-input) 49 cmd_initfs = $(initramfs) -o $@ $(ramfs-args) $(ramfs-input)
47 50
48targets := initramfs_data.cpio.gz initramfs_data.cpio.bz2 initramfs_data.cpio.lzma initramfs_data.cpio 51targets := initramfs_data.cpio.gz initramfs_data.cpio.bz2 initramfs_data.cpio.lzma initramfs_data.cpio.lzo initramfs_data.cpio
49# do not try to update files included in initramfs 52# do not try to update files included in initramfs
50$(deps_initramfs): ; 53$(deps_initramfs): ;
51 54
diff --git a/usr/initramfs_data.lzo.S b/usr/initramfs_data.lzo.S
new file mode 100644
index 000000000000..59211905da84
--- /dev/null
+++ b/usr/initramfs_data.lzo.S
@@ -0,0 +1,29 @@
1/*
2 initramfs_data includes the compressed binary that is the
3 filesystem used for early user space.
4 Note: Older versions of "as" (prior to binutils 2.11.90.0.23
5 released on 2001-07-14) dit not support .incbin.
6 If you are forced to use older binutils than that then the
7 following trick can be applied to create the resulting binary:
8
9
10 ld -m elf_i386 --format binary --oformat elf32-i386 -r \
11 -T initramfs_data.scr initramfs_data.cpio.gz -o initramfs_data.o
12 ld -m elf_i386 -r -o built-in.o initramfs_data.o
13
14 initramfs_data.scr looks like this:
15SECTIONS
16{
17 .init.ramfs : { *(.data) }
18}
19
20 The above example is for i386 - the parameters vary from architectures.
21 Eventually look up LDFLAGS_BLOB in an older version of the
22 arch/$(ARCH)/Makefile to see the flags used before .incbin was introduced.
23
24 Using .incbin has the advantage over ld that the correct flags are set
25 in the ELF header, as required by certain architectures.
26*/
27
28.section .init.ramfs,"a"
29.incbin "usr/initramfs_data.cpio.lzo"